aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorrobot-piglet <robot-piglet@yandex-team.com>2023-12-04 15:32:14 +0300
committerrobot-piglet <robot-piglet@yandex-team.com>2023-12-05 01:22:50 +0300
commitc21ed9eedf73010bc81342518177dfdfb0d56bd7 (patch)
tree72f8fde4463080cfe5a38eb0babc051cfe32c51e
parentec1311bf2e8cc231723b8b5e484ca576663a1309 (diff)
downloadydb-c21ed9eedf73010bc81342518177dfdfb0d56bd7.tar.gz
Intermediate changes
-rw-r--r--.mapping.json315
-rw-r--r--build/long-path.manifest8
-rw-r--r--build/prebuilt/tools/event2cpp/ya.make.induced_deps10
-rw-r--r--contrib/deprecated/galloc/basictypes.h33
-rw-r--r--contrib/deprecated/galloc/commandlineflags.h111
-rw-r--r--contrib/deprecated/galloc/galloc.cpp55
-rw-r--r--contrib/deprecated/galloc/hack.cpp20
-rw-r--r--contrib/deprecated/galloc/hack.h21
-rw-r--r--contrib/deprecated/galloc/internal_logging.cc67
-rw-r--r--contrib/deprecated/galloc/internal_logging.h101
-rw-r--r--contrib/deprecated/galloc/internal_spinlock.h47
-rw-r--r--contrib/deprecated/galloc/malloc_extension.cc129
-rw-r--r--contrib/deprecated/galloc/malloc_extension.h154
-rw-r--r--contrib/deprecated/galloc/malloc_hook.h13
-rw-r--r--contrib/deprecated/galloc/pagemap.h250
-rw-r--r--contrib/deprecated/galloc/spinlock.h21
-rw-r--r--contrib/deprecated/galloc/stacktrace.h9
-rw-r--r--contrib/deprecated/galloc/system-alloc.cc296
-rw-r--r--contrib/deprecated/galloc/system-alloc.h46
-rw-r--r--contrib/deprecated/galloc/tcmalloc.cc2661
-rw-r--r--contrib/deprecated/galloc/ya.make22
-rw-r--r--contrib/java/antlr/antlr4/antlr.jarbin3547867 -> 0 bytes
-rw-r--r--contrib/libs/antlr4_cpp_runtime/CHANGES.txt582
-rw-r--r--contrib/libs/antlr4_cpp_runtime/CONTRIBUTING.md22
-rw-r--r--contrib/libs/antlr4_cpp_runtime/LICENSE.txt28
-rw-r--r--contrib/libs/antlr4_cpp_runtime/README-cpp.md72
-rw-r--r--contrib/libs/antlr4_cpp_runtime/README.md84
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.cpp10
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.h167
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.cpp10
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.h121
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.cpp23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.h30
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.cpp180
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.h79
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.cpp61
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.h59
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.cpp25
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.h36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.cpp414
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.h200
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CharStream.cpp11
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CharStream.h37
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CommonToken.cpp193
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CommonToken.h158
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.cpp39
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.h74
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.cpp78
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.h79
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.cpp15
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.h35
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.cpp336
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.h466
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.cpp84
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.h80
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Exceptions.cpp64
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Exceptions.h99
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.cpp52
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.h32
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/FlatHashMap.h57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/FlatHashSet.h57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.cpp18
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.h24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/IntStream.cpp12
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/IntStream.h218
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.cpp19
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.h45
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Lexer.cpp294
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Lexer.h196
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.cpp60
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.h46
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.cpp36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.h31
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.cpp92
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.h88
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.cpp46
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.h42
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Parser.cpp670
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Parser.h461
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.cpp294
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.h173
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.cpp138
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.h147
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.cpp53
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.h38
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RecognitionException.cpp65
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RecognitionException.h98
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Recognizer.cpp157
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Recognizer.h160
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RuleContext.cpp144
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RuleContext.h141
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.cpp27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.h32
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.cpp54
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.h155
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Token.cpp9
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Token.h92
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenFactory.h30
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenSource.cpp9
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenSource.h85
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenStream.cpp11
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenStream.h137
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.cpp425
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.h295
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.cpp208
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.h117
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.cpp270
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.h115
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Version.h42
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Vocabulary.cpp64
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h177
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/WritableToken.cpp9
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/WritableToken.h23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/antlr4-common.h101
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h168
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATN.cpp159
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATN.h133
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.cpp106
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.h157
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.cpp232
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.h157
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.cpp39
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.h48
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.cpp628
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.h32
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.cpp33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.h71
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.cpp56
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.h139
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.cpp33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.h36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ATNType.h20
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.cpp29
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.h35
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.cpp16
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.h68
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.cpp109
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.h51
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.cpp27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.h33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/BasicBlockStartState.h24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/BasicState.h23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/BlockEndState.h26
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/BlockStartState.h30
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.cpp14
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.h47
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.cpp14
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.h70
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.cpp25
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.h227
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.cpp12
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.h34
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.cpp31
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.h42
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.cpp15
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.h43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.cpp189
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.h76
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.cpp67
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.h44
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.cpp617
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.h199
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.cpp15
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.h100
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.cpp111
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.h128
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionType.h57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.cpp43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.h59
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.cpp45
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.h75
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.cpp57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.h76
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.cpp43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.h57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.cpp36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.h53
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.cpp36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.h53
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.cpp43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.h57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.cpp36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.h51
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.cpp43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.h51
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.cpp16
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.h42
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/LoopEndState.h26
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.cpp22
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.h27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.cpp16
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.h25
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.cpp102
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.h102
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.cpp1387
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.h911
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulatorOptions.h50
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PlusBlockStartState.h29
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PlusLoopbackState.h25
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.cpp23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.h35
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.cpp17
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.h62
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.cpp24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.h50
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.cpp579
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.h225
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.cpp56
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.h63
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.cpp167
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.h101
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCacheOptions.h71
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextType.h21
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.cpp202
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.h436
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.cpp179
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.h60
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.cpp26
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.h31
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/RuleStartState.h26
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/RuleStopState.h27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.cpp33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.h42
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.cpp418
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.h237
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContextType.h23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SerializedATNView.h101
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.cpp28
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.h38
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.cpp86
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.h43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/StarBlockStartState.h24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopEntryState.h37
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.cpp19
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.h25
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/TokensStartState.h24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/Transition.cpp36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/Transition.h65
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.cpp27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.h33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.cpp21
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.h27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.cpp115
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.h96
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.cpp60
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.h32
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.cpp59
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.h154
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.cpp17
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.h22
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.cpp100
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.h154
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.cpp124
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.h33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/Interval.cpp61
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/Interval.h84
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.cpp501
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.h188
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.cpp120
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.h102
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.cpp4
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.h21
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Any.cpp8
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Any.h16
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Arrays.cpp43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Arrays.h149
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/BitSet.h76
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.cpp207
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.h65
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Casts.h34
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Declarations.h161
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.cpp38
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.h16
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Unicode.h28
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Utf8.cpp242
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/support/Utf8.h54
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/AbstractParseTreeVisitor.h129
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNode.h24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.cpp54
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.h43
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.cpp66
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.h53
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.cpp12
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.h111
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.cpp9
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.h39
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeProperty.h50
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeType.h22
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.cpp9
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.h57
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.cpp48
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.h55
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNode.h40
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.cpp54
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.h32
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/Trees.cpp241
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/Trees.h78
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.cpp9
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.h44
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.cpp69
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.h132
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.cpp64
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.h105
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.cpp370
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.h185
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.cpp77
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.h117
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.cpp39
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.h86
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.cpp28
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.h51
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.cpp36
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.h80
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.cpp154
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.h86
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.cpp31
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.h40
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.cpp182
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.h47
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.cpp13
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.h22
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp20
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.h27
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.cpp30
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.h26
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp20
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.h25
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.cpp33
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.h26
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.h23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.cpp24
-rw-r--r--contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.h23
-rw-r--r--contrib/libs/antlr4_cpp_runtime/ya.make164
-rw-r--r--contrib/libs/libmagic/AUTHORS1
-rw-r--r--contrib/libs/libmagic/COPYING29
-rw-r--r--contrib/libs/libmagic/ChangeLog2129
-rw-r--r--contrib/libs/libmagic/INSTALL365
-rw-r--r--contrib/libs/libmagic/NEWS1
-rw-r--r--contrib/libs/libmagic/README.DEVELOPER49
-rw-r--r--contrib/libs/libmagic/README.md156
-rw-r--r--contrib/libs/libmagic/config-linux.h519
-rw-r--r--contrib/libs/libmagic/config-osx.h9
-rw-r--r--contrib/libs/libmagic/config.h7
-rw-r--r--contrib/libs/libmagic/file/0/ya.make12
-rw-r--r--contrib/libs/libmagic/include/magic.h1
-rw-r--r--contrib/libs/libmagic/magic/Magdir/acorn102
-rw-r--r--contrib/libs/libmagic/magic/Magdir/adi13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/adventure122
-rw-r--r--contrib/libs/libmagic/magic/Magdir/aes29
-rw-r--r--contrib/libs/libmagic/magic/Magdir/algol6835
-rw-r--r--contrib/libs/libmagic/magic/Magdir/allegro9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/alliant18
-rw-r--r--contrib/libs/libmagic/magic/Magdir/amanda12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/amigaos218
-rw-r--r--contrib/libs/libmagic/magic/Magdir/android259
-rw-r--r--contrib/libs/libmagic/magic/Magdir/animation1206
-rw-r--r--contrib/libs/libmagic/magic/Magdir/aout46
-rwxr-xr-xcontrib/libs/libmagic/magic/Magdir/apache28
-rw-r--r--contrib/libs/libmagic/magic/Magdir/apl7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/apple773
-rw-r--r--contrib/libs/libmagic/magic/Magdir/application7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/applix13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/apt52
-rw-r--r--contrib/libs/libmagic/magic/Magdir/archive2607
-rw-r--r--contrib/libs/libmagic/magic/Magdir/aria38
-rw-r--r--contrib/libs/libmagic/magic/Magdir/arm50
-rw-r--r--contrib/libs/libmagic/magic/Magdir/asf132
-rw-r--r--contrib/libs/libmagic/magic/Magdir/assembler18
-rw-r--r--contrib/libs/libmagic/magic/Magdir/asterix18
-rw-r--r--contrib/libs/libmagic/magic/Magdir/att3b41
-rw-r--r--contrib/libs/libmagic/magic/Magdir/audio1291
-rw-r--r--contrib/libs/libmagic/magic/Magdir/avm33
-rw-r--r--contrib/libs/libmagic/magic/Magdir/basis18
-rw-r--r--contrib/libs/libmagic/magic/Magdir/beetle7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ber65
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bflt14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bhl10
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bioinformatics178
-rw-r--r--contrib/libs/libmagic/magic/Magdir/biosig154
-rw-r--r--contrib/libs/libmagic/magic/Magdir/blackberry8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/blcr25
-rw-r--r--contrib/libs/libmagic/magic/Magdir/blender50
-rw-r--r--contrib/libs/libmagic/magic/Magdir/blit24
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bm10
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bout11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bsdi33
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bsi10
-rw-r--r--contrib/libs/libmagic/magic/Magdir/btsnoop13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/burp7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/bytecode41
-rw-r--r--contrib/libs/libmagic/magic/Magdir/c-lang110
-rw-r--r--contrib/libs/libmagic/magic/Magdir/c64549
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cad437
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cafebabe107
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cbor21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ccf14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cddb12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/chord15
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cisco12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/citrus12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/clarion27
-rw-r--r--contrib/libs/libmagic/magic/Magdir/claris48
-rw-r--r--contrib/libs/libmagic/magic/Magdir/clipper65
-rw-r--r--contrib/libs/libmagic/magic/Magdir/clojure30
-rw-r--r--contrib/libs/libmagic/magic/Magdir/coff98
-rw-r--r--contrib/libs/libmagic/magic/Magdir/commands201
-rw-r--r--contrib/libs/libmagic/magic/Magdir/communications22
-rw-r--r--contrib/libs/libmagic/magic/Magdir/compress461
-rw-r--r--contrib/libs/libmagic/magic/Magdir/console1226
-rw-r--r--contrib/libs/libmagic/magic/Magdir/convex69
-rw-r--r--contrib/libs/libmagic/magic/Magdir/coverage91
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cracklib14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/crypto49
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ctags6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ctf23
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cubemap8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/cups56
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dact11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/database886
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dataone47
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dbpf15
-rw-r--r--contrib/libs/libmagic/magic/Magdir/der146
-rw-r--r--contrib/libs/libmagic/magic/Magdir/diamond12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dif33
-rw-r--r--contrib/libs/libmagic/magic/Magdir/diff41
-rw-r--r--contrib/libs/libmagic/magic/Magdir/digital59
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dolby69
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dump96
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dwarfs45
-rw-r--r--contrib/libs/libmagic/magic/Magdir/dyadic61
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ebml8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/edid11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/editors43
-rw-r--r--contrib/libs/libmagic/magic/Magdir/efi15
-rw-r--r--contrib/libs/libmagic/magic/Magdir/elf379
-rw-r--r--contrib/libs/libmagic/magic/Magdir/encore22
-rw-r--r--contrib/libs/libmagic/magic/Magdir/epoc62
-rw-r--r--contrib/libs/libmagic/magic/Magdir/erlang21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/espressif57
-rw-r--r--contrib/libs/libmagic/magic/Magdir/esri28
-rw-r--r--contrib/libs/libmagic/magic/Magdir/etf33
-rw-r--r--contrib/libs/libmagic/magic/Magdir/fcs9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/filesystems2694
-rw-r--r--contrib/libs/libmagic/magic/Magdir/finger16
-rw-r--r--contrib/libs/libmagic/magic/Magdir/firmware133
-rw-r--r--contrib/libs/libmagic/magic/Magdir/flash62
-rw-r--r--contrib/libs/libmagic/magic/Magdir/flif36
-rw-r--r--contrib/libs/libmagic/magic/Magdir/fonts449
-rw-r--r--contrib/libs/libmagic/magic/Magdir/forth82
-rw-r--r--contrib/libs/libmagic/magic/Magdir/fortran9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/frame62
-rw-r--r--contrib/libs/libmagic/magic/Magdir/freebsd164
-rw-r--r--contrib/libs/libmagic/magic/Magdir/fsav128
-rw-r--r--contrib/libs/libmagic/magic/Magdir/fusecompress12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/games696
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gcc17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gconv10
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gentoo85
-rw-r--r--contrib/libs/libmagic/magic/Magdir/geo166
-rw-r--r--contrib/libs/libmagic/magic/Magdir/geos20
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gimp77
-rw-r--r--contrib/libs/libmagic/magic/Magdir/git13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/glibc21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gnome59
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gnu173
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gnumeric8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gpt240
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gpu28
-rw-r--r--contrib/libs/libmagic/magic/Magdir/grace21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/graphviz12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/gringotts48
-rw-r--r--contrib/libs/libmagic/magic/Magdir/hardware12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/hitachi-sh30
-rw-r--r--contrib/libs/libmagic/magic/Magdir/hp433
-rw-r--r--contrib/libs/libmagic/magic/Magdir/human68k26
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ibm37052
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ibm600035
-rw-r--r--contrib/libs/libmagic/magic/Magdir/icc214
-rw-r--r--contrib/libs/libmagic/magic/Magdir/iff80
-rw-r--r--contrib/libs/libmagic/magic/Magdir/images4219
-rw-r--r--contrib/libs/libmagic/magic/Magdir/inform9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/intel310
-rw-r--r--contrib/libs/libmagic/magic/Magdir/interleaf9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/island10
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ispell63
-rw-r--r--contrib/libs/libmagic/magic/Magdir/isz15
-rw-r--r--contrib/libs/libmagic/magic/Magdir/java52
-rw-r--r--contrib/libs/libmagic/magic/Magdir/javascript171
-rw-r--r--contrib/libs/libmagic/magic/Magdir/jpeg252
-rw-r--r--contrib/libs/libmagic/magic/Magdir/karma9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/kde11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/keepass20
-rw-r--r--contrib/libs/libmagic/magic/Magdir/kerberos45
-rw-r--r--contrib/libs/libmagic/magic/Magdir/kicad85
-rw-r--r--contrib/libs/libmagic/magic/Magdir/kml34
-rw-r--r--contrib/libs/libmagic/magic/Magdir/lammps64
-rw-r--r--contrib/libs/libmagic/magic/Magdir/lecter6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/lex12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/lif50
-rw-r--r--contrib/libs/libmagic/magic/Magdir/linux627
-rw-r--r--contrib/libs/libmagic/magic/Magdir/lisp78
-rw-r--r--contrib/libs/libmagic/magic/Magdir/llvm22
-rw-r--r--contrib/libs/libmagic/magic/Magdir/locoscript12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/lua31
-rw-r--r--contrib/libs/libmagic/magic/Magdir/luks126
-rw-r--r--contrib/libs/libmagic/magic/Magdir/m411
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mach303
-rw-r--r--contrib/libs/libmagic/magic/Magdir/macintosh505
-rw-r--r--contrib/libs/libmagic/magic/Magdir/macos7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/magic71
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mail.news132
-rw-r--r--contrib/libs/libmagic/magic/Magdir/make21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/map413
-rw-r--r--contrib/libs/libmagic/magic/Magdir/maple109
-rw-r--r--contrib/libs/libmagic/magic/Magdir/marc2130
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mathcad8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mathematica192
-rw-r--r--contrib/libs/libmagic/magic/Magdir/matroska17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mcrypt52
-rw-r--r--contrib/libs/libmagic/magic/Magdir/measure44
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mercurial13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/metastore8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/meteorological53
-rw-r--r--contrib/libs/libmagic/magic/Magdir/microfocus21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mime9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mips120
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mirage8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/misctools140
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mkid11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mlssa8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mmdf6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/modem92
-rw-r--r--contrib/libs/libmagic/magic/Magdir/modulefile9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/motorola71
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mozilla37
-rw-r--r--contrib/libs/libmagic/magic/Magdir/msdos2304
-rw-r--r--contrib/libs/libmagic/magic/Magdir/msooxml68
-rw-r--r--contrib/libs/libmagic/magic/Magdir/msvc222
-rw-r--r--contrib/libs/libmagic/magic/Magdir/msx309
-rw-r--r--contrib/libs/libmagic/magic/Magdir/mup24
-rw-r--r--contrib/libs/libmagic/magic/Magdir/music17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/nasa7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/natinst24
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ncr49
-rw-r--r--contrib/libs/libmagic/magic/Magdir/netbsd251
-rw-r--r--contrib/libs/libmagic/magic/Magdir/netscape26
-rw-r--r--contrib/libs/libmagic/magic/Magdir/netware11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/news13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/nifty202
-rw-r--r--contrib/libs/libmagic/magic/Magdir/nim-lang29
-rw-r--r--contrib/libs/libmagic/magic/Magdir/nitpicker14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/numpy9
-rw-r--r--contrib/libs/libmagic/magic/Magdir/oasis12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ocaml14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/octave6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ole2compounddocs760
-rw-r--r--contrib/libs/libmagic/magic/Magdir/olf98
-rw-r--r--contrib/libs/libmagic/magic/Magdir/openfst17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/opentimestamps16
-rw-r--r--contrib/libs/libmagic/magic/Magdir/oric16
-rw-r--r--contrib/libs/libmagic/magic/Magdir/os2186
-rw-r--r--contrib/libs/libmagic/magic/Magdir/os40039
-rw-r--r--contrib/libs/libmagic/magic/Magdir/os980
-rw-r--r--contrib/libs/libmagic/magic/Magdir/osf110
-rw-r--r--contrib/libs/libmagic/magic/Magdir/palm156
-rw-r--r--contrib/libs/libmagic/magic/Magdir/parix13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/parrot22
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pascal39
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pbf11
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pbm8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pc8824
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pc9877
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pci_ids116
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pcjr8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pdf51
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pdp42
-rw-r--r--contrib/libs/libmagic/magic/Magdir/perl100
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pgf52
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pgp581
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pgp-binary-keys388
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pkgadd7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/plan925
-rw-r--r--contrib/libs/libmagic/magic/Magdir/playdate57
-rw-r--r--contrib/libs/libmagic/magic/Magdir/plus518
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pmem46
-rw-r--r--contrib/libs/libmagic/magic/Magdir/polyml23
-rw-r--r--contrib/libs/libmagic/magic/Magdir/printer278
-rw-r--r--contrib/libs/libmagic/magic/Magdir/project10
-rw-r--r--contrib/libs/libmagic/magic/Magdir/psdbms14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/psl14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pulsar13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/puzzle17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pwsafe14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/pyramid12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/python305
-rw-r--r--contrib/libs/libmagic/magic/Magdir/qt30
-rw-r--r--contrib/libs/libmagic/magic/Magdir/revision66
-rw-r--r--contrib/libs/libmagic/magic/Magdir/riff841
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ringdove45
-rw-r--r--contrib/libs/libmagic/magic/Magdir/rpi52
-rw-r--r--contrib/libs/libmagic/magic/Magdir/rpm45
-rw-r--r--contrib/libs/libmagic/magic/Magdir/rpmsg7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/rst13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/rtf94
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ruby55
-rw-r--r--contrib/libs/libmagic/magic/Magdir/rust21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sc7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sccs24
-rw-r--r--contrib/libs/libmagic/magic/Magdir/scientific144
-rw-r--r--contrib/libs/libmagic/magic/Magdir/securitycerts6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/selinux24
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sendmail37
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sequent42
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sereal35
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sgi144
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sgml163
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sharc23
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sinclair40
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sisu18
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sketch6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/smalltalk25
-rw-r--r--contrib/libs/libmagic/magic/Magdir/smile34
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sniffer482
-rw-r--r--contrib/libs/libmagic/magic/Magdir/softquad40
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sosi40
-rw-r--r--contrib/libs/libmagic/magic/Magdir/spec21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/spectrum184
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sql288
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ssh42
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ssl20
-rw-r--r--contrib/libs/libmagic/magic/Magdir/statistics45
-rw-r--r--contrib/libs/libmagic/magic/Magdir/subtitle38
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sun141
-rw-r--r--contrib/libs/libmagic/magic/Magdir/svf5
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sylk36
-rw-r--r--contrib/libs/libmagic/magic/Magdir/symbos42
-rw-r--r--contrib/libs/libmagic/magic/Magdir/sysex429
-rw-r--r--contrib/libs/libmagic/magic/Magdir/tcl29
-rw-r--r--contrib/libs/libmagic/magic/Magdir/teapot6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/terminfo63
-rw-r--r--contrib/libs/libmagic/magic/Magdir/tex141
-rw-r--r--contrib/libs/libmagic/magic/Magdir/tgif7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/ti-8x239
-rw-r--r--contrib/libs/libmagic/magic/Magdir/timezone42
-rw-r--r--contrib/libs/libmagic/magic/Magdir/tplink95
-rw-r--r--contrib/libs/libmagic/magic/Magdir/troff44
-rw-r--r--contrib/libs/libmagic/magic/Magdir/tuxedo8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/typeset8
-rw-r--r--contrib/libs/libmagic/magic/Magdir/uf272
-rw-r--r--contrib/libs/libmagic/magic/Magdir/unicode15
-rw-r--r--contrib/libs/libmagic/magic/Magdir/unisig12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/unknown34
-rw-r--r--contrib/libs/libmagic/magic/Magdir/usd21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/uterus16
-rw-r--r--contrib/libs/libmagic/magic/Magdir/uuencode28
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vacuum-cleaner54
-rw-r--r--contrib/libs/libmagic/magic/Magdir/varied.out46
-rw-r--r--contrib/libs/libmagic/magic/Magdir/varied.script21
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vax32
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vicar17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/virtual307
-rw-r--r--contrib/libs/libmagic/magic/Magdir/virtutech12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/visx32
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vms30
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vmware6
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vorbis155
-rw-r--r--contrib/libs/libmagic/magic/Magdir/vxl14
-rw-r--r--contrib/libs/libmagic/magic/Magdir/warc16
-rw-r--r--contrib/libs/libmagic/magic/Magdir/weak16
-rw-r--r--contrib/libs/libmagic/magic/Magdir/web18
-rw-r--r--contrib/libs/libmagic/magic/Magdir/webassembly17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/windows1822
-rw-r--r--contrib/libs/libmagic/magic/Magdir/wireless7
-rw-r--r--contrib/libs/libmagic/magic/Magdir/wordprocessors630
-rw-r--r--contrib/libs/libmagic/magic/Magdir/wsdl23
-rw-r--r--contrib/libs/libmagic/magic/Magdir/x6800025
-rw-r--r--contrib/libs/libmagic/magic/Magdir/xdelta13
-rw-r--r--contrib/libs/libmagic/magic/Magdir/xenix106
-rw-r--r--contrib/libs/libmagic/magic/Magdir/xilinx58
-rw-r--r--contrib/libs/libmagic/magic/Magdir/xo6537
-rw-r--r--contrib/libs/libmagic/magic/Magdir/xwindows43
-rw-r--r--contrib/libs/libmagic/magic/Magdir/yara17
-rw-r--r--contrib/libs/libmagic/magic/Magdir/zfs96
-rw-r--r--contrib/libs/libmagic/magic/Magdir/zilog12
-rw-r--r--contrib/libs/libmagic/magic/Magdir/zip126
-rw-r--r--contrib/libs/libmagic/magic/Magdir/zyxel17
-rw-r--r--contrib/libs/libmagic/magic/ya.make44
-rw-r--r--contrib/libs/libmagic/src/apprentice.c3743
-rw-r--r--contrib/libs/libmagic/src/apptype.c169
-rw-r--r--contrib/libs/libmagic/src/ascmagic.c389
-rw-r--r--contrib/libs/libmagic/src/buffer.c95
-rw-r--r--contrib/libs/libmagic/src/cdf.c1676
-rw-r--r--contrib/libs/libmagic/src/cdf.h353
-rw-r--r--contrib/libs/libmagic/src/cdf_time.c203
-rw-r--r--contrib/libs/libmagic/src/compress.c1227
-rw-r--r--contrib/libs/libmagic/src/der.c458
-rw-r--r--contrib/libs/libmagic/src/der.h28
-rw-r--r--contrib/libs/libmagic/src/elfclass.h82
-rw-r--r--contrib/libs/libmagic/src/encoding.c658
-rw-r--r--contrib/libs/libmagic/src/file.c859
-rw-r--r--contrib/libs/libmagic/src/file.h729
-rw-r--r--contrib/libs/libmagic/src/file/ya.make34
-rw-r--r--contrib/libs/libmagic/src/file_opts.h89
-rw-r--r--contrib/libs/libmagic/src/fmtcheck.c254
-rw-r--r--contrib/libs/libmagic/src/fsmagic.c435
-rw-r--r--contrib/libs/libmagic/src/funcs.c932
-rw-r--r--contrib/libs/libmagic/src/is_csv.c198
-rw-r--r--contrib/libs/libmagic/src/is_json.c500
-rw-r--r--contrib/libs/libmagic/src/is_simh.c209
-rw-r--r--contrib/libs/libmagic/src/is_tar.c179
-rw-r--r--contrib/libs/libmagic/src/magic.c686
-rw-r--r--contrib/libs/libmagic/src/magic.h166
-rw-r--r--contrib/libs/libmagic/src/print.c368
-rw-r--r--contrib/libs/libmagic/src/readcdf.c682
-rw-r--r--contrib/libs/libmagic/src/readelf.c1899
-rw-r--r--contrib/libs/libmagic/src/readelf.h545
-rw-r--r--contrib/libs/libmagic/src/res.cpp7
-rw-r--r--contrib/libs/libmagic/src/seccomp.c290
-rw-r--r--contrib/libs/libmagic/src/softmagic.c2522
-rw-r--r--contrib/libs/libmagic/src/tar.h73
-rw-r--r--contrib/libs/libmagic/src/ya.make60
-rw-r--r--contrib/libs/libmagic/ya.make28
-rw-r--r--contrib/libs/python/Include/opcode.h7
-rw-r--r--contrib/python/coverage/plugins/coveragerc.txt29
-rw-r--r--contrib/python/coverage/plugins/ya.make19
-rw-r--r--contrib/python/coverage/plugins/yarcadia/plugin.py114
-rw-r--r--contrib/python/coverage/py2/.dist-info/METADATA190
-rw-r--r--contrib/python/coverage/py2/.dist-info/entry_points.txt5
-rw-r--r--contrib/python/coverage/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/coverage/py2/LICENSE.txt177
-rw-r--r--contrib/python/coverage/py2/NOTICE.txt14
-rw-r--r--contrib/python/coverage/py2/README.rst151
-rw-r--r--contrib/python/coverage/py2/coverage/__init__.py36
-rw-r--r--contrib/python/coverage/py2/coverage/__main__.py8
-rw-r--r--contrib/python/coverage/py2/coverage/annotate.py108
-rw-r--r--contrib/python/coverage/py2/coverage/backward.py267
-rw-r--r--contrib/python/coverage/py2/coverage/bytecode.py19
-rw-r--r--contrib/python/coverage/py2/coverage/cmdline.py910
-rw-r--r--contrib/python/coverage/py2/coverage/collector.py455
-rw-r--r--contrib/python/coverage/py2/coverage/config.py605
-rw-r--r--contrib/python/coverage/py2/coverage/context.py91
-rw-r--r--contrib/python/coverage/py2/coverage/control.py1162
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/datastack.c50
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/datastack.h45
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/filedisp.c85
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/filedisp.h26
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/module.c108
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/stats.h31
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/tracer.c1149
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/tracer.h75
-rw-r--r--contrib/python/coverage/py2/coverage/ctracer/util.h67
-rw-r--r--contrib/python/coverage/py2/coverage/data.py125
-rw-r--r--contrib/python/coverage/py2/coverage/debug.py406
-rw-r--r--contrib/python/coverage/py2/coverage/disposition.py37
-rw-r--r--contrib/python/coverage/py2/coverage/env.py130
-rw-r--r--contrib/python/coverage/py2/coverage/execfile.py362
-rw-r--r--contrib/python/coverage/py2/coverage/files.py441
-rw-r--r--contrib/python/coverage/py2/coverage/fullcoverage/encodings.py60
-rw-r--r--contrib/python/coverage/py2/coverage/html.py539
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/coverage_html.js616
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/favicon_32.pngbin1732 -> 0 bytes
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/index.html119
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js9
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/jquery.hotkeys.js99
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/jquery.isonscreen.js53
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/jquery.min.js4
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/jquery.tablesorter.min.js2
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/keybd_closed.pngbin112 -> 0 bytes
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/keybd_open.pngbin112 -> 0 bytes
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/pyfile.html113
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/style.css291
-rw-r--r--contrib/python/coverage/py2/coverage/htmlfiles/style.scss660
-rw-r--r--contrib/python/coverage/py2/coverage/inorout.py513
-rw-r--r--contrib/python/coverage/py2/coverage/jsonreport.py103
-rw-r--r--contrib/python/coverage/py2/coverage/misc.py361
-rw-r--r--contrib/python/coverage/py2/coverage/multiproc.py117
-rw-r--r--contrib/python/coverage/py2/coverage/numbits.py163
-rw-r--r--contrib/python/coverage/py2/coverage/parser.py1276
-rw-r--r--contrib/python/coverage/py2/coverage/phystokens.py297
-rw-r--r--contrib/python/coverage/py2/coverage/plugin.py533
-rw-r--r--contrib/python/coverage/py2/coverage/plugin_support.py281
-rw-r--r--contrib/python/coverage/py2/coverage/python.py261
-rw-r--r--contrib/python/coverage/py2/coverage/pytracer.py274
-rw-r--r--contrib/python/coverage/py2/coverage/report.py86
-rw-r--r--contrib/python/coverage/py2/coverage/results.py343
-rw-r--r--contrib/python/coverage/py2/coverage/sqldata.py1123
-rw-r--r--contrib/python/coverage/py2/coverage/summary.py152
-rw-r--r--contrib/python/coverage/py2/coverage/templite.py302
-rw-r--r--contrib/python/coverage/py2/coverage/tomlconfig.py168
-rw-r--r--contrib/python/coverage/py2/coverage/version.py33
-rw-r--r--contrib/python/coverage/py2/coverage/xmlreport.py234
-rw-r--r--contrib/python/coverage/py2/ya.make98
-rw-r--r--contrib/python/coverage/py3/.dist-info/METADATA190
-rw-r--r--contrib/python/coverage/py3/.dist-info/entry_points.txt5
-rw-r--r--contrib/python/coverage/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/coverage/py3/LICENSE.txt177
-rw-r--r--contrib/python/coverage/py3/NOTICE.txt14
-rw-r--r--contrib/python/coverage/py3/README.rst151
-rw-r--r--contrib/python/coverage/py3/coverage/__init__.py36
-rw-r--r--contrib/python/coverage/py3/coverage/__main__.py8
-rw-r--r--contrib/python/coverage/py3/coverage/annotate.py108
-rw-r--r--contrib/python/coverage/py3/coverage/backward.py267
-rw-r--r--contrib/python/coverage/py3/coverage/bytecode.py19
-rw-r--r--contrib/python/coverage/py3/coverage/cmdline.py910
-rw-r--r--contrib/python/coverage/py3/coverage/collector.py455
-rw-r--r--contrib/python/coverage/py3/coverage/config.py605
-rw-r--r--contrib/python/coverage/py3/coverage/context.py91
-rw-r--r--contrib/python/coverage/py3/coverage/control.py1162
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/datastack.c50
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/datastack.h45
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/filedisp.c85
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/filedisp.h26
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/module.c108
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/stats.h31
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/tracer.c1149
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/tracer.h75
-rw-r--r--contrib/python/coverage/py3/coverage/ctracer/util.h103
-rw-r--r--contrib/python/coverage/py3/coverage/data.py125
-rw-r--r--contrib/python/coverage/py3/coverage/debug.py406
-rw-r--r--contrib/python/coverage/py3/coverage/disposition.py37
-rw-r--r--contrib/python/coverage/py3/coverage/env.py130
-rw-r--r--contrib/python/coverage/py3/coverage/execfile.py362
-rw-r--r--contrib/python/coverage/py3/coverage/files.py441
-rw-r--r--contrib/python/coverage/py3/coverage/fullcoverage/encodings.py60
-rw-r--r--contrib/python/coverage/py3/coverage/html.py539
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/coverage_html.js616
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/favicon_32.pngbin1732 -> 0 bytes
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/index.html119
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js9
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/jquery.hotkeys.js99
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/jquery.isonscreen.js53
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/jquery.min.js4
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/jquery.tablesorter.min.js2
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/keybd_closed.pngbin112 -> 0 bytes
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/keybd_open.pngbin112 -> 0 bytes
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/pyfile.html113
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/style.css291
-rw-r--r--contrib/python/coverage/py3/coverage/htmlfiles/style.scss660
-rw-r--r--contrib/python/coverage/py3/coverage/inorout.py513
-rw-r--r--contrib/python/coverage/py3/coverage/jsonreport.py103
-rw-r--r--contrib/python/coverage/py3/coverage/misc.py361
-rw-r--r--contrib/python/coverage/py3/coverage/multiproc.py117
-rw-r--r--contrib/python/coverage/py3/coverage/numbits.py163
-rw-r--r--contrib/python/coverage/py3/coverage/parser.py1276
-rw-r--r--contrib/python/coverage/py3/coverage/phystokens.py297
-rw-r--r--contrib/python/coverage/py3/coverage/plugin.py533
-rw-r--r--contrib/python/coverage/py3/coverage/plugin_support.py281
-rw-r--r--contrib/python/coverage/py3/coverage/python.py261
-rw-r--r--contrib/python/coverage/py3/coverage/pytracer.py274
-rw-r--r--contrib/python/coverage/py3/coverage/report.py86
-rw-r--r--contrib/python/coverage/py3/coverage/results.py343
-rw-r--r--contrib/python/coverage/py3/coverage/sqldata.py1123
-rw-r--r--contrib/python/coverage/py3/coverage/summary.py152
-rw-r--r--contrib/python/coverage/py3/coverage/templite.py302
-rw-r--r--contrib/python/coverage/py3/coverage/tomlconfig.py168
-rw-r--r--contrib/python/coverage/py3/coverage/version.py33
-rw-r--r--contrib/python/coverage/py3/coverage/xmlreport.py234
-rw-r--r--contrib/python/coverage/py3/ya.make98
-rw-r--r--contrib/python/coverage/ya.make19
-rw-r--r--contrib/python/diff-match-patch/py2/.dist-info/METADATA112
-rw-r--r--contrib/python/diff-match-patch/py2/.dist-info/top_level.txt1
-rw-r--r--contrib/python/diff-match-patch/py2/AUTHORS10
-rw-r--r--contrib/python/diff-match-patch/py2/LICENSE202
-rw-r--r--contrib/python/diff-match-patch/py2/README.md84
-rw-r--r--contrib/python/diff-match-patch/py2/diff_match_patch/__init__.py9
-rw-r--r--contrib/python/diff-match-patch/py2/diff_match_patch/diff_match_patch_py2.py2037
-rw-r--r--contrib/python/diff-match-patch/py2/ya.make27
-rw-r--r--contrib/python/diff-match-patch/py3/.dist-info/METADATA108
-rw-r--r--contrib/python/diff-match-patch/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/diff-match-patch/py3/AUTHORS10
-rw-r--r--contrib/python/diff-match-patch/py3/LICENSE202
-rw-r--r--contrib/python/diff-match-patch/py3/README.md84
-rw-r--r--contrib/python/diff-match-patch/py3/diff_match_patch/__init__.py10
-rw-r--r--contrib/python/diff-match-patch/py3/diff_match_patch/__version__.py7
-rw-r--r--contrib/python/diff-match-patch/py3/diff_match_patch/diff_match_patch.py2019
-rw-r--r--contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest1.txt230
-rw-r--r--contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest2.txt188
-rw-r--r--contrib/python/diff-match-patch/py3/ya.make28
-rw-r--r--contrib/python/diff-match-patch/ya.make18
-rw-r--r--contrib/python/humanfriendly/py2/LICENSE.txt20
-rw-r--r--contrib/python/humanfriendly/py2/README.rst170
-rw-r--r--contrib/python/humanfriendly/py3/.dist-info/METADATA216
-rw-r--r--contrib/python/humanfriendly/py3/.dist-info/entry_points.txt3
-rw-r--r--contrib/python/humanfriendly/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/humanfriendly/py3/LICENSE.txt20
-rw-r--r--contrib/python/humanfriendly/py3/README.rst170
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/__init__.py838
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/case.py157
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/cli.py291
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/compat.py146
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/decorators.py43
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/deprecation.py251
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/prompts.py376
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/sphinx.py315
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/tables.py341
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/terminal/__init__.py776
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/terminal/html.py423
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/terminal/spinners.py310
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/testing.py669
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/text.py449
-rw-r--r--contrib/python/humanfriendly/py3/humanfriendly/usage.py351
-rw-r--r--contrib/python/humanfriendly/py3/ya.make41
-rw-r--r--contrib/python/humanfriendly/ya.make18
-rw-r--r--contrib/python/marisa-trie/agent.pxd22
-rw-r--r--contrib/python/marisa-trie/base.pxd63
-rw-r--r--contrib/python/marisa-trie/iostream.pxd7
-rw-r--r--contrib/python/marisa-trie/key.pxd22
-rw-r--r--contrib/python/marisa-trie/keyset.pxd30
-rw-r--r--contrib/python/marisa-trie/marisa/agent.cc51
-rw-r--r--contrib/python/marisa-trie/marisa/agent.h75
-rw-r--r--contrib/python/marisa-trie/marisa/base.h196
-rw-r--r--contrib/python/marisa-trie/marisa/exception.h84
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/algorithm.h27
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/algorithm/sort.h197
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/intrin.h116
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io.h19
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io/mapper.cc163
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io/mapper.h68
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io/reader.cc147
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io/reader.h67
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io/writer.cc148
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/io/writer.h66
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie.h17
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/cache.h82
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/config.h156
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/entry.h83
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/header.h62
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/history.h66
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/key.h227
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.cc877
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.h135
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/range.h116
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/state.h118
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/tail.cc218
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/trie/tail.h73
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector.h19
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.cc825
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.h180
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector/flat-vector.h206
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector/pop-count.h111
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector/rank-index.h83
-rw-r--r--contrib/python/marisa-trie/marisa/grimoire/vector/vector.h257
-rw-r--r--contrib/python/marisa-trie/marisa/iostream.h19
-rw-r--r--contrib/python/marisa-trie/marisa/key.h86
-rw-r--r--contrib/python/marisa-trie/marisa/keyset.cc181
-rw-r--r--contrib/python/marisa-trie/marisa/keyset.h81
-rw-r--r--contrib/python/marisa-trie/marisa/query.h72
-rw-r--r--contrib/python/marisa-trie/marisa/scoped-array.h49
-rw-r--r--contrib/python/marisa-trie/marisa/scoped-ptr.h53
-rw-r--r--contrib/python/marisa-trie/marisa/stdio.h16
-rw-r--r--contrib/python/marisa-trie/marisa/trie.cc249
-rw-r--r--contrib/python/marisa-trie/marisa/trie.h65
-rw-r--r--contrib/python/marisa-trie/marisa_trie.pyx763
-rw-r--r--contrib/python/marisa-trie/query.pxd20
-rw-r--r--contrib/python/marisa-trie/std_iostream.pxd18
-rw-r--r--contrib/python/marisa-trie/trie.pxd41
-rw-r--r--contrib/python/marisa-trie/ya.make33
-rw-r--r--contrib/python/path.py/py2/LICENSE7
-rw-r--r--contrib/python/path.py/py2/README.rst134
-rw-r--r--contrib/python/path.py/py3/.dist-info/METADATA36
-rw-r--r--contrib/python/path.py/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/path.py/py3/LICENSE7
-rw-r--r--contrib/python/path.py/py3/README.rst1
-rw-r--r--contrib/python/path.py/py3/ya.make21
-rw-r--r--contrib/python/path.py/ya.make18
-rw-r--r--contrib/python/path/.dist-info/METADATA201
-rw-r--r--contrib/python/path/.dist-info/top_level.txt1
-rw-r--r--contrib/python/path/LICENSE17
-rw-r--r--contrib/python/path/README.rst163
-rw-r--r--contrib/python/path/path/__init__.py1665
-rw-r--r--contrib/python/path/path/classes.py27
-rw-r--r--contrib/python/path/path/masks.py159
-rw-r--r--contrib/python/path/path/matchers.py59
-rw-r--r--contrib/python/path/path/py.typed0
-rw-r--r--contrib/python/path/ya.make30
-rw-r--r--contrib/python/pygtrie/py2/LICENSE202
-rw-r--r--contrib/python/pygtrie/py2/README.rst66
-rw-r--r--contrib/python/pygtrie/py3/.dist-info/METADATA220
-rw-r--r--contrib/python/pygtrie/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pygtrie/py3/LICENSE202
-rw-r--r--contrib/python/pygtrie/py3/README.rst66
-rw-r--r--contrib/python/pygtrie/py3/pygtrie.py1939
-rw-r--r--contrib/python/pygtrie/py3/ya.make26
-rw-r--r--contrib/python/pygtrie/ya.make18
-rw-r--r--contrib/python/pyre2/py2/AUTHORS12
-rw-r--r--contrib/python/pyre2/py2/LICENSE9
-rw-r--r--contrib/python/pyre2/py2/README.rst250
-rw-r--r--contrib/python/pyre2/py2/tests/test_charliterals.txt47
-rw-r--r--contrib/python/pyre2/py2/tests/test_count.txt40
-rw-r--r--contrib/python/pyre2/py2/tests/test_emptygroups.txt36
-rw-r--r--contrib/python/pyre2/py2/tests/test_findall.txt42
-rw-r--r--contrib/python/pyre2/py2/tests/test_finditer.txt28
-rw-r--r--contrib/python/pyre2/py2/tests/test_match_expand.txt29
-rw-r--r--contrib/python/pyre2/py2/tests/test_mmap.txt18
-rw-r--r--contrib/python/pyre2/py2/tests/test_namedgroups.txt56
-rw-r--r--contrib/python/pyre2/py2/tests/test_pattern.txt12
-rw-r--r--contrib/python/pyre2/py2/tests/test_search.txt29
-rw-r--r--contrib/python/pyre2/py2/tests/test_split.txt17
-rw-r--r--contrib/python/pyre2/py2/tests/test_sub.txt31
-rw-r--r--contrib/python/pyre2/py2/tests/test_unicode.txt71
-rw-r--r--contrib/python/pyre2/py3/.dist-info/METADATA275
-rw-r--r--contrib/python/pyre2/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/pyre2/py3/AUTHORS12
-rw-r--r--contrib/python/pyre2/py3/LICENSE9
-rw-r--r--contrib/python/pyre2/py3/README.rst250
-rw-r--r--contrib/python/pyre2/py3/src/_re2macros.h13
-rw-r--r--contrib/python/pyre2/py3/src/compile.pxi234
-rw-r--r--contrib/python/pyre2/py3/src/includes.pxi109
-rw-r--r--contrib/python/pyre2/py3/src/match.pxi280
-rw-r--r--contrib/python/pyre2/py3/src/pattern.pxi650
-rw-r--r--contrib/python/pyre2/py3/src/re2.pyx458
-rw-r--r--contrib/python/pyre2/py3/tests/test_charliterals.txt47
-rw-r--r--contrib/python/pyre2/py3/tests/test_count.txt40
-rw-r--r--contrib/python/pyre2/py3/tests/test_emptygroups.txt36
-rw-r--r--contrib/python/pyre2/py3/tests/test_findall.txt42
-rw-r--r--contrib/python/pyre2/py3/tests/test_finditer.txt28
-rw-r--r--contrib/python/pyre2/py3/tests/test_match_expand.txt29
-rw-r--r--contrib/python/pyre2/py3/tests/test_mmap.txt18
-rw-r--r--contrib/python/pyre2/py3/tests/test_namedgroups.txt56
-rw-r--r--contrib/python/pyre2/py3/tests/test_pattern.txt12
-rw-r--r--contrib/python/pyre2/py3/tests/test_search.txt29
-rw-r--r--contrib/python/pyre2/py3/tests/test_split.txt17
-rw-r--r--contrib/python/pyre2/py3/tests/test_sub.txt31
-rw-r--r--contrib/python/pyre2/py3/tests/test_unicode.txt71
-rw-r--r--contrib/python/pyre2/py3/ya.make39
-rw-r--r--contrib/python/pyre2/ya.make18
-rw-r--r--contrib/python/python-magic/py2/LICENSE58
-rw-r--r--contrib/python/python-magic/py2/README.md144
-rw-r--r--contrib/python/python-magic/py3/.dist-info/METADATA171
-rw-r--r--contrib/python/python-magic/py3/.dist-info/top_level.txt1
-rw-r--r--contrib/python/python-magic/py3/LICENSE58
-rw-r--r--contrib/python/python-magic/py3/README.md144
-rw-r--r--contrib/python/python-magic/py3/magic/__init__.py469
-rw-r--r--contrib/python/python-magic/py3/magic/compat.py287
-rw-r--r--contrib/python/python-magic/py3/magic/loader.py50
-rw-r--r--contrib/python/python-magic/py3/magic/py.typed0
-rw-r--r--contrib/python/python-magic/py3/ya.make31
-rw-r--r--contrib/python/python-magic/ya.make18
-rw-r--r--contrib/tools/ragel5/common/buffer.h55
-rw-r--r--contrib/tools/ragel5/common/common.cpp296
-rw-r--r--contrib/tools/ragel5/common/common.h308
-rw-r--r--contrib/tools/ragel5/common/config.h39
-rw-r--r--contrib/tools/ragel5/common/pcheck.h51
-rw-r--r--contrib/tools/ragel5/common/version.h2
-rw-r--r--contrib/tools/ragel5/common/ya.make20
-rw-r--r--contrib/tools/ragel5/ragel/fsmap.cpp840
-rw-r--r--contrib/tools/ragel5/ragel/fsmattach.cpp425
-rw-r--r--contrib/tools/ragel5/ragel/fsmbase.cpp598
-rw-r--r--contrib/tools/ragel5/ragel/fsmgraph.cpp1426
-rw-r--r--contrib/tools/ragel5/ragel/fsmgraph.h1482
-rw-r--r--contrib/tools/ragel5/ragel/fsmmin.cpp732
-rw-r--r--contrib/tools/ragel5/ragel/fsmstate.cpp463
-rw-r--r--contrib/tools/ragel5/ragel/main.cpp355
-rw-r--r--contrib/tools/ragel5/ragel/parsedata.cpp1505
-rw-r--r--contrib/tools/ragel5/ragel/parsedata.h401
-rw-r--r--contrib/tools/ragel5/ragel/parsetree.cpp2089
-rw-r--r--contrib/tools/ragel5/ragel/parsetree.h755
-rw-r--r--contrib/tools/ragel5/ragel/ragel.h74
-rw-r--r--contrib/tools/ragel5/ragel/rlparse.cpp6088
-rw-r--r--contrib/tools/ragel5/ragel/rlparse.h184
-rw-r--r--contrib/tools/ragel5/ragel/rlscan.cpp4876
-rw-r--r--contrib/tools/ragel5/ragel/rlscan.h161
-rw-r--r--contrib/tools/ragel5/ragel/xmlcodegen.cpp713
-rw-r--r--contrib/tools/ragel5/ragel/xmlcodegen.h137
-rw-r--r--contrib/tools/ragel5/ragel/ya.make26
-rw-r--r--contrib/tools/ragel5/redfsm/gendata.cpp717
-rw-r--r--contrib/tools/ragel5/redfsm/gendata.h167
-rw-r--r--contrib/tools/ragel5/redfsm/phash.h10
-rw-r--r--contrib/tools/ragel5/redfsm/redfsm.cpp559
-rw-r--r--contrib/tools/ragel5/redfsm/redfsm.h534
-rw-r--r--contrib/tools/ragel5/redfsm/xmlparse.cpp3549
-rw-r--r--contrib/tools/ragel5/redfsm/xmlparse.h228
-rw-r--r--contrib/tools/ragel5/redfsm/xmlscan.cpp925
-rw-r--r--contrib/tools/ragel5/redfsm/xmltags.cpp244
-rw-r--r--contrib/tools/ragel5/redfsm/ya.make25
-rw-r--r--contrib/tools/ragel5/rlgen-cd/fflatcodegen.cpp351
-rw-r--r--contrib/tools/ragel5/rlgen-cd/fflatcodegen.h76
-rw-r--r--contrib/tools/ragel5/rlgen-cd/fgotocodegen.cpp262
-rw-r--r--contrib/tools/ragel5/rlgen-cd/fgotocodegen.h76
-rw-r--r--contrib/tools/ragel5/rlgen-cd/flatcodegen.cpp766
-rw-r--r--contrib/tools/ragel5/rlgen-cd/flatcodegen.h108
-rw-r--r--contrib/tools/ragel5/rlgen-cd/fsmcodegen.cpp749
-rw-r--r--contrib/tools/ragel5/rlgen-cd/fsmcodegen.h218
-rw-r--r--contrib/tools/ragel5/rlgen-cd/ftabcodegen.cpp405
-rw-r--r--contrib/tools/ragel5/rlgen-cd/ftabcodegen.h78
-rw-r--r--contrib/tools/ragel5/rlgen-cd/gotocodegen.cpp742
-rw-r--r--contrib/tools/ragel5/rlgen-cd/gotocodegen.h111
-rw-r--r--contrib/tools/ragel5/rlgen-cd/ipgotocodegen.cpp414
-rw-r--r--contrib/tools/ragel5/rlgen-cd/ipgotocodegen.h97
-rw-r--r--contrib/tools/ragel5/rlgen-cd/main.cpp394
-rw-r--r--contrib/tools/ragel5/rlgen-cd/rlgen-cd.h60
-rw-r--r--contrib/tools/ragel5/rlgen-cd/splitcodegen.cpp521
-rw-r--r--contrib/tools/ragel5/rlgen-cd/splitcodegen.h71
-rw-r--r--contrib/tools/ragel5/rlgen-cd/tabcodegen.cpp988
-rw-r--r--contrib/tools/ragel5/rlgen-cd/tabcodegen.h115
-rw-r--r--contrib/tools/ragel5/rlgen-cd/ya.make25
-rw-r--r--contrib/tools/swig/Lib/exception.i332
-rw-r--r--contrib/tools/swig/Lib/go/exception.i7
-rw-r--r--contrib/tools/swig/Lib/go/std_common.i4
-rw-r--r--contrib/tools/swig/Lib/go/std_except.i31
-rw-r--r--contrib/tools/swig/Lib/go/std_string.i162
-rw-r--r--contrib/tools/swig/Lib/go/std_vector.i92
-rw-r--r--contrib/tools/swig/Lib/java/std_common.i5
-rw-r--r--contrib/tools/swig/Lib/java/std_except.i32
-rw-r--r--contrib/tools/swig/Lib/java/std_string.i121
-rw-r--r--contrib/tools/swig/Lib/java/std_vector.i185
-rw-r--r--contrib/tools/swig/Lib/perl5/exception.i5
-rw-r--r--contrib/tools/swig/Lib/perl5/std_common.i28
-rw-r--r--contrib/tools/swig/Lib/perl5/std_except.i1
-rw-r--r--contrib/tools/swig/Lib/perl5/std_string.i2
-rw-r--r--contrib/tools/swig/Lib/perl5/std_vector.i592
-rw-r--r--contrib/tools/swig/Lib/python/exception.i6
-rw-r--r--contrib/tools/swig/Lib/python/pycontainer.swg1082
-rw-r--r--contrib/tools/swig/Lib/python/pyiterators.swg458
-rw-r--r--contrib/tools/swig/Lib/python/pystdcommon.swg265
-rw-r--r--contrib/tools/swig/Lib/python/pywstrings.swg85
-rw-r--r--contrib/tools/swig/Lib/python/std_alloc.i1
-rw-r--r--contrib/tools/swig/Lib/python/std_char_traits.i1
-rw-r--r--contrib/tools/swig/Lib/python/std_common.i74
-rw-r--r--contrib/tools/swig/Lib/python/std_container.i2
-rw-r--r--contrib/tools/swig/Lib/python/std_except.i1
-rw-r--r--contrib/tools/swig/Lib/python/std_string.i1
-rw-r--r--contrib/tools/swig/Lib/python/std_vector.i34
-rw-r--r--contrib/tools/swig/Lib/python/wchar.i21
-rw-r--r--contrib/tools/swig/Lib/std/std_alloc.i77
-rw-r--r--contrib/tools/swig/Lib/std/std_basic_string.i276
-rw-r--r--contrib/tools/swig/Lib/std/std_char_traits.i140
-rw-r--r--contrib/tools/swig/Lib/std/std_common.i250
-rw-r--r--contrib/tools/swig/Lib/std/std_container.i169
-rw-r--r--contrib/tools/swig/Lib/std/std_except.i73
-rw-r--r--contrib/tools/swig/Lib/std/std_string.i13
-rw-r--r--contrib/tools/swig/Lib/std/std_vector.i225
-rw-r--r--contrib/tools/swig/Lib/typemaps/std_except.swg37
-rw-r--r--contrib/tools/swig/Lib/typemaps/std_string.swg25
-rw-r--r--contrib/tools/swig/Lib/typemaps/std_strings.swg78
-rw-r--r--contrib/tools/swig/Lib/wchar.i11
-rw-r--r--devtools/dummy_arcadia/hello_world/hello.cpp3
-rw-r--r--devtools/dummy_arcadia/hello_world/ya.make15
-rw-r--r--devtools/ya/chameleon_bin/__main__.py59
-rw-r--r--devtools/ya/chameleon_bin/recipe.inc32
-rw-r--r--devtools/ya/chameleon_bin/ya.make12
-rw-r--r--devtools/ya/handlers/dump/__init__.py1161
-rw-r--r--devtools/ya/handlers/dump/gen_conf_docs.py384
-rw-r--r--devtools/ya/handlers/dump/ya.make52
-rw-r--r--devtools/ya/handlers/gc/__init__.py342
-rw-r--r--devtools/ya/handlers/gc/ya.make37
-rw-r--r--devtools/ya/handlers/gen_config/__init__.py48
-rw-r--r--devtools/ya/handlers/gen_config/gen_config.py282
-rw-r--r--devtools/ya/handlers/gen_config/ya.make23
-rw-r--r--devtools/ya/handlers/ide/__init__.py734
-rw-r--r--devtools/ya/handlers/ide/ya.make31
-rw-r--r--devtools/ya/handlers/java/__init__.py49
-rw-r--r--devtools/ya/handlers/java/helpers.py250
-rw-r--r--devtools/ya/handlers/java/ya.make24
-rw-r--r--devtools/ya/handlers/make/__init__.py32
-rw-r--r--devtools/ya/handlers/make/ya.make26
-rw-r--r--devtools/ya/handlers/maven_import/__init__.py31
-rw-r--r--devtools/ya/handlers/maven_import/ya.make28
-rw-r--r--devtools/ya/handlers/package/__init__.py88
-rw-r--r--devtools/ya/handlers/package/opts/__init__.py691
-rw-r--r--devtools/ya/handlers/package/opts/ya.make19
-rw-r--r--devtools/ya/handlers/package/ya.make27
-rw-r--r--devtools/ya/handlers/test/__init__.py40
-rw-r--r--devtools/ya/handlers/test/ya.make21
-rw-r--r--devtools/ya/handlers/tool/__init__.py281
-rw-r--r--devtools/ya/handlers/tool/ya.make33
-rw-r--r--devtools/ya/handlers/ya.make151
-rw-r--r--devtools/ya/opensource/ya.conf92
-rw-r--r--library/cpp/CMakeLists.darwin-arm64.txt1
-rw-r--r--library/cpp/CMakeLists.darwin-x86_64.txt1
-rw-r--r--library/cpp/CMakeLists.linux-aarch64.txt1
-rw-r--r--library/cpp/CMakeLists.linux-x86_64.txt1
-rw-r--r--library/cpp/CMakeLists.windows-x86_64.txt1
-rw-r--r--library/cpp/actors/CMakeLists.txt24
-rw-r--r--library/cpp/actors/README.md107
-rw-r--r--library/cpp/actors/actor_type/CMakeLists.darwin-arm64.txt33
-rw-r--r--library/cpp/actors/actor_type/CMakeLists.darwin-x86_64.txt33
-rw-r--r--library/cpp/actors/actor_type/CMakeLists.linux-aarch64.txt34
-rw-r--r--library/cpp/actors/actor_type/CMakeLists.linux-x86_64.txt34
-rw-r--r--library/cpp/actors/actor_type/CMakeLists.windows-x86_64.txt33
-rw-r--r--library/cpp/actors/actor_type/common.cpp5
-rw-r--r--library/cpp/actors/actor_type/common.h34
-rw-r--r--library/cpp/actors/actor_type/index_constructor.cpp5
-rw-r--r--library/cpp/actors/actor_type/index_constructor.h12
-rw-r--r--library/cpp/actors/actor_type/indexes.cpp5
-rw-r--r--library/cpp/actors/actor_type/indexes.h31
-rw-r--r--library/cpp/actors/actor_type/ya.make16
-rw-r--r--library/cpp/actors/core/CMakeLists.darwin-arm64.txt101
-rw-r--r--library/cpp/actors/core/CMakeLists.darwin-x86_64.txt101
-rw-r--r--library/cpp/actors/core/CMakeLists.linux-aarch64.txt102
-rw-r--r--library/cpp/actors/core/CMakeLists.linux-x86_64.txt102
-rw-r--r--library/cpp/actors/core/CMakeLists.windows-x86_64.txt101
-rw-r--r--library/cpp/actors/core/README.md51
-rw-r--r--library/cpp/actors/core/actor.cpp250
-rw-r--r--library/cpp/actors/core/actor.h999
-rw-r--r--library/cpp/actors/core/actor_benchmark_helper.h763
-rw-r--r--library/cpp/actors/core/actor_bootstrapped.cpp4
-rw-r--r--library/cpp/actors/core/actor_bootstrapped.h48
-rw-r--r--library/cpp/actors/core/actor_coroutine.cpp182
-rw-r--r--library/cpp/actors/core/actor_coroutine.h230
-rw-r--r--library/cpp/actors/core/actor_coroutine_ut.cpp145
-rw-r--r--library/cpp/actors/core/actor_ut.cpp511
-rw-r--r--library/cpp/actors/core/actor_virtual.cpp6
-rw-r--r--library/cpp/actors/core/actor_virtual.h88
-rw-r--r--library/cpp/actors/core/actorid.cpp34
-rw-r--r--library/cpp/actors/core/actorid.h198
-rw-r--r--library/cpp/actors/core/actorsystem.cpp323
-rw-r--r--library/cpp/actors/core/actorsystem.h311
-rw-r--r--library/cpp/actors/core/actorsystem_ut.cpp45
-rw-r--r--library/cpp/actors/core/ask.cpp76
-rw-r--r--library/cpp/actors/core/ask.h18
-rw-r--r--library/cpp/actors/core/ask_ut.cpp131
-rw-r--r--library/cpp/actors/core/av_bootstrapped.cpp17
-rw-r--r--library/cpp/actors/core/av_bootstrapped.h18
-rw-r--r--library/cpp/actors/core/balancer.cpp311
-rw-r--r--library/cpp/actors/core/balancer.h30
-rw-r--r--library/cpp/actors/core/balancer_ut.cpp225
-rw-r--r--library/cpp/actors/core/benchmark_ut.cpp1111
-rw-r--r--library/cpp/actors/core/buffer.cpp93
-rw-r--r--library/cpp/actors/core/buffer.h62
-rw-r--r--library/cpp/actors/core/callstack.cpp93
-rw-r--r--library/cpp/actors/core/callstack.h58
-rw-r--r--library/cpp/actors/core/config.h260
-rw-r--r--library/cpp/actors/core/cpu_manager.cpp148
-rw-r--r--library/cpp/actors/core/cpu_manager.h55
-rw-r--r--library/cpp/actors/core/cpu_state.h215
-rw-r--r--library/cpp/actors/core/defs.h77
-rw-r--r--library/cpp/actors/core/event.cpp51
-rw-r--r--library/cpp/actors/core/event.h389
-rw-r--r--library/cpp/actors/core/event_load.cpp5
-rw-r--r--library/cpp/actors/core/event_load.h133
-rw-r--r--library/cpp/actors/core/event_local.h74
-rw-r--r--library/cpp/actors/core/event_pb.cpp224
-rw-r--r--library/cpp/actors/core/event_pb.h654
-rw-r--r--library/cpp/actors/core/event_pb_payload_ut.cpp154
-rw-r--r--library/cpp/actors/core/event_pb_ut.cpp71
-rw-r--r--library/cpp/actors/core/events.h224
-rw-r--r--library/cpp/actors/core/events_undelivered.cpp59
-rw-r--r--library/cpp/actors/core/executelater.h86
-rw-r--r--library/cpp/actors/core/executor_pool.h154
-rw-r--r--library/cpp/actors/core/executor_pool_base.cpp271
-rw-r--r--library/cpp/actors/core/executor_pool_base.h62
-rw-r--r--library/cpp/actors/core/executor_pool_basic.cpp749
-rw-r--r--library/cpp/actors/core/executor_pool_basic.h291
-rw-r--r--library/cpp/actors/core/executor_pool_basic_feature_flags.h50
-rw-r--r--library/cpp/actors/core/executor_pool_basic_ut.cpp666
-rw-r--r--library/cpp/actors/core/executor_pool_io.cpp157
-rw-r--r--library/cpp/actors/core/executor_pool_io.h49
-rw-r--r--library/cpp/actors/core/executor_pool_united.cpp1455
-rw-r--r--library/cpp/actors/core/executor_pool_united.h48
-rw-r--r--library/cpp/actors/core/executor_pool_united_ut.cpp341
-rw-r--r--library/cpp/actors/core/executor_pool_united_workers.h105
-rw-r--r--library/cpp/actors/core/executor_thread.cpp714
-rw-r--r--library/cpp/actors/core/executor_thread.h126
-rw-r--r--library/cpp/actors/core/harmonizer.cpp700
-rw-r--r--library/cpp/actors/core/harmonizer.h51
-rw-r--r--library/cpp/actors/core/hfunc.h116
-rw-r--r--library/cpp/actors/core/interconnect.cpp181
-rw-r--r--library/cpp/actors/core/interconnect.h268
-rw-r--r--library/cpp/actors/core/invoke.h145
-rw-r--r--library/cpp/actors/core/io_dispatcher.cpp236
-rw-r--r--library/cpp/actors/core/io_dispatcher.h38
-rw-r--r--library/cpp/actors/core/lease.h56
-rw-r--r--library/cpp/actors/core/log.cpp768
-rw-r--r--library/cpp/actors/core/log.h627
-rw-r--r--library/cpp/actors/core/log_buffer.cpp108
-rw-r--r--library/cpp/actors/core/log_buffer.h40
-rw-r--r--library/cpp/actors/core/log_iface.h117
-rw-r--r--library/cpp/actors/core/log_metrics.h152
-rw-r--r--library/cpp/actors/core/log_settings.cpp232
-rw-r--r--library/cpp/actors/core/log_settings.h175
-rw-r--r--library/cpp/actors/core/log_ut.cpp251
-rw-r--r--library/cpp/actors/core/mailbox.cpp590
-rw-r--r--library/cpp/actors/core/mailbox.h571
-rw-r--r--library/cpp/actors/core/mailbox_queue_revolving.h214
-rw-r--r--library/cpp/actors/core/mailbox_queue_simple.h34
-rw-r--r--library/cpp/actors/core/mon.h267
-rw-r--r--library/cpp/actors/core/mon_stats.h191
-rw-r--r--library/cpp/actors/core/mon_ut.cpp29
-rw-r--r--library/cpp/actors/core/monotonic.cpp1
-rw-r--r--library/cpp/actors/core/monotonic.h12
-rw-r--r--library/cpp/actors/core/monotonic_provider.cpp1
-rw-r--r--library/cpp/actors/core/monotonic_provider.h11
-rw-r--r--library/cpp/actors/core/performance_ut.cpp182
-rw-r--r--library/cpp/actors/core/probes.cpp28
-rw-r--r--library/cpp/actors/core/probes.h221
-rw-r--r--library/cpp/actors/core/process_stats.cpp358
-rw-r--r--library/cpp/actors/core/process_stats.h67
-rw-r--r--library/cpp/actors/core/scheduler_actor.cpp279
-rw-r--r--library/cpp/actors/core/scheduler_actor.h29
-rw-r--r--library/cpp/actors/core/scheduler_actor_ut.cpp100
-rw-r--r--library/cpp/actors/core/scheduler_basic.cpp275
-rw-r--r--library/cpp/actors/core/scheduler_basic.h81
-rw-r--r--library/cpp/actors/core/scheduler_cookie.cpp84
-rw-r--r--library/cpp/actors/core/scheduler_cookie.h78
-rw-r--r--library/cpp/actors/core/scheduler_queue.h123
-rw-r--r--library/cpp/actors/core/servicemap.h168
-rw-r--r--library/cpp/actors/core/thread_context.h30
-rw-r--r--library/cpp/actors/core/ut/CMakeLists.darwin-arm64.txt88
-rw-r--r--library/cpp/actors/core/ut/CMakeLists.darwin-x86_64.txt89
-rw-r--r--library/cpp/actors/core/ut/CMakeLists.linux-aarch64.txt92
-rw-r--r--library/cpp/actors/core/ut/CMakeLists.linux-x86_64.txt94
-rw-r--r--library/cpp/actors/core/ut/CMakeLists.windows-x86_64.txt82
-rw-r--r--library/cpp/actors/core/ut/ya.make43
-rw-r--r--library/cpp/actors/core/ut_fat/CMakeLists.darwin-arm64.txt70
-rw-r--r--library/cpp/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt71
-rw-r--r--library/cpp/actors/core/ut_fat/CMakeLists.linux-aarch64.txt74
-rw-r--r--library/cpp/actors/core/ut_fat/CMakeLists.linux-x86_64.txt76
-rw-r--r--library/cpp/actors/core/ut_fat/CMakeLists.windows-x86_64.txt64
-rw-r--r--library/cpp/actors/core/ut_fat/actor_benchmark.cpp46
-rw-r--r--library/cpp/actors/core/ut_fat/ya.make32
-rw-r--r--library/cpp/actors/core/worker_context.cpp7
-rw-r--r--library/cpp/actors/core/worker_context.h192
-rw-r--r--library/cpp/actors/core/ya.make133
-rw-r--r--library/cpp/actors/cppcoro/CMakeLists.darwin-arm64.txt24
-rw-r--r--library/cpp/actors/cppcoro/CMakeLists.darwin-x86_64.txt24
-rw-r--r--library/cpp/actors/cppcoro/CMakeLists.linux-aarch64.txt25
-rw-r--r--library/cpp/actors/cppcoro/CMakeLists.linux-x86_64.txt25
-rw-r--r--library/cpp/actors/cppcoro/CMakeLists.windows-x86_64.txt24
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt30
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt31
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt34
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt36
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt24
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/main.cpp76
-rw-r--r--library/cpp/actors/cppcoro/corobenchmark/ya.make11
-rw-r--r--library/cpp/actors/cppcoro/task_actor.cpp183
-rw-r--r--library/cpp/actors/cppcoro/task_actor.h107
-rw-r--r--library/cpp/actors/cppcoro/task_actor_ut.cpp93
-rw-r--r--library/cpp/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt68
-rw-r--r--library/cpp/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt69
-rw-r--r--library/cpp/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt72
-rw-r--r--library/cpp/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt74
-rw-r--r--library/cpp/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt62
-rw-r--r--library/cpp/actors/cppcoro/ut/ya.make12
-rw-r--r--library/cpp/actors/cppcoro/ya.make25
-rw-r--r--library/cpp/actors/dnscachelib/CMakeLists.darwin-arm64.txt21
-rw-r--r--library/cpp/actors/dnscachelib/CMakeLists.darwin-x86_64.txt21
-rw-r--r--library/cpp/actors/dnscachelib/CMakeLists.linux-aarch64.txt22
-rw-r--r--library/cpp/actors/dnscachelib/CMakeLists.linux-x86_64.txt22
-rw-r--r--library/cpp/actors/dnscachelib/CMakeLists.windows-x86_64.txt21
-rw-r--r--library/cpp/actors/dnscachelib/dnscache.cpp458
-rw-r--r--library/cpp/actors/dnscachelib/dnscache.h139
-rw-r--r--library/cpp/actors/dnscachelib/probes.cpp3
-rw-r--r--library/cpp/actors/dnscachelib/probes.h35
-rw-r--r--library/cpp/actors/dnscachelib/timekeeper.h70
-rw-r--r--library/cpp/actors/dnscachelib/ya.make23
-rw-r--r--library/cpp/actors/dnsresolver/CMakeLists.darwin-arm64.txt22
-rw-r--r--library/cpp/actors/dnsresolver/CMakeLists.darwin-x86_64.txt22
-rw-r--r--library/cpp/actors/dnsresolver/CMakeLists.linux-aarch64.txt23
-rw-r--r--library/cpp/actors/dnsresolver/CMakeLists.linux-x86_64.txt23
-rw-r--r--library/cpp/actors/dnsresolver/CMakeLists.windows-x86_64.txt22
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver.cpp485
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver.h128
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver_caching.cpp694
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp648
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp64
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp24
-rw-r--r--library/cpp/actors/dnsresolver/dnsresolver_ut.cpp98
-rw-r--r--library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt70
-rw-r--r--library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt71
-rw-r--r--library/cpp/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt74
-rw-r--r--library/cpp/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt76
-rw-r--r--library/cpp/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt64
-rw-r--r--library/cpp/actors/dnsresolver/ut/ya.make18
-rw-r--r--library/cpp/actors/dnsresolver/ya.make18
-rw-r--r--library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt29
-rw-r--r--library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt30
-rw-r--r--library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt33
-rw-r--r--library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt34
-rw-r--r--library/cpp/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt23
-rw-r--r--library/cpp/actors/examples/01_ping_pong/main.cpp129
-rw-r--r--library/cpp/actors/examples/01_ping_pong/ya.make13
-rw-r--r--library/cpp/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt66
-rw-r--r--library/cpp/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt67
-rw-r--r--library/cpp/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt70
-rw-r--r--library/cpp/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt71
-rw-r--r--library/cpp/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt60
-rw-r--r--library/cpp/actors/examples/02_discovery/endpoint.cpp116
-rw-r--r--library/cpp/actors/examples/02_discovery/lookup.cpp132
-rw-r--r--library/cpp/actors/examples/02_discovery/main.cpp136
-rw-r--r--library/cpp/actors/examples/02_discovery/publish.cpp110
-rw-r--r--library/cpp/actors/examples/02_discovery/replica.cpp181
-rw-r--r--library/cpp/actors/examples/02_discovery/services.h85
-rw-r--r--library/cpp/actors/examples/02_discovery/ya.make25
-rw-r--r--library/cpp/actors/helpers/CMakeLists.darwin-arm64.txt22
-rw-r--r--library/cpp/actors/helpers/CMakeLists.darwin-x86_64.txt22
-rw-r--r--library/cpp/actors/helpers/CMakeLists.linux-aarch64.txt23
-rw-r--r--library/cpp/actors/helpers/CMakeLists.linux-x86_64.txt23
-rw-r--r--library/cpp/actors/helpers/CMakeLists.windows-x86_64.txt22
-rw-r--r--library/cpp/actors/helpers/activeactors.cpp2
-rw-r--r--library/cpp/actors/helpers/activeactors.h42
-rw-r--r--library/cpp/actors/helpers/flow_controlled_queue.cpp215
-rw-r--r--library/cpp/actors/helpers/flow_controlled_queue.h19
-rw-r--r--library/cpp/actors/helpers/future_callback.h33
-rw-r--r--library/cpp/actors/helpers/mon_histogram_helper.h86
-rw-r--r--library/cpp/actors/helpers/selfping_actor.cpp208
-rw-r--r--library/cpp/actors/helpers/selfping_actor.h16
-rw-r--r--library/cpp/actors/helpers/selfping_actor_ut.cpp49
-rw-r--r--library/cpp/actors/helpers/ut/CMakeLists.darwin-arm64.txt76
-rw-r--r--library/cpp/actors/helpers/ut/CMakeLists.darwin-x86_64.txt77
-rw-r--r--library/cpp/actors/helpers/ut/CMakeLists.linux-aarch64.txt80
-rw-r--r--library/cpp/actors/helpers/ut/CMakeLists.linux-x86_64.txt82
-rw-r--r--library/cpp/actors/helpers/ut/CMakeLists.windows-x86_64.txt70
-rw-r--r--library/cpp/actors/helpers/ut/ya.make31
-rw-r--r--library/cpp/actors/helpers/ya.make23
-rw-r--r--library/cpp/actors/http/CMakeLists.darwin-arm64.txt32
-rw-r--r--library/cpp/actors/http/CMakeLists.darwin-x86_64.txt32
-rw-r--r--library/cpp/actors/http/CMakeLists.linux-aarch64.txt33
-rw-r--r--library/cpp/actors/http/CMakeLists.linux-x86_64.txt33
-rw-r--r--library/cpp/actors/http/CMakeLists.windows-x86_64.txt32
-rw-r--r--library/cpp/actors/http/http.cpp823
-rw-r--r--library/cpp/actors/http/http.h877
-rw-r--r--library/cpp/actors/http/http_cache.cpp608
-rw-r--r--library/cpp/actors/http/http_cache.h27
-rw-r--r--library/cpp/actors/http/http_compress.cpp76
-rw-r--r--library/cpp/actors/http/http_config.h21
-rw-r--r--library/cpp/actors/http/http_proxy.cpp364
-rw-r--r--library/cpp/actors/http/http_proxy.h246
-rw-r--r--library/cpp/actors/http/http_proxy_acceptor.cpp158
-rw-r--r--library/cpp/actors/http/http_proxy_incoming.cpp310
-rw-r--r--library/cpp/actors/http/http_proxy_outgoing.cpp335
-rw-r--r--library/cpp/actors/http/http_proxy_sock64.h147
-rw-r--r--library/cpp/actors/http/http_proxy_sock_impl.h274
-rw-r--r--library/cpp/actors/http/http_proxy_ssl.h133
-rw-r--r--library/cpp/actors/http/http_static.cpp97
-rw-r--r--library/cpp/actors/http/http_static.h9
-rw-r--r--library/cpp/actors/http/http_ut.cpp509
-rw-r--r--library/cpp/actors/http/ut/CMakeLists.darwin-arm64.txt67
-rw-r--r--library/cpp/actors/http/ut/CMakeLists.darwin-x86_64.txt68
-rw-r--r--library/cpp/actors/http/ut/CMakeLists.linux-aarch64.txt71
-rw-r--r--library/cpp/actors/http/ut/CMakeLists.linux-x86_64.txt73
-rw-r--r--library/cpp/actors/http/ut/CMakeLists.windows-x86_64.txt58
-rw-r--r--library/cpp/actors/http/ut/ya.make16
-rw-r--r--library/cpp/actors/http/ya.make36
-rw-r--r--library/cpp/actors/interconnect/CMakeLists.darwin-arm64.txt61
-rw-r--r--library/cpp/actors/interconnect/CMakeLists.darwin-x86_64.txt61
-rw-r--r--library/cpp/actors/interconnect/CMakeLists.linux-aarch64.txt63
-rw-r--r--library/cpp/actors/interconnect/CMakeLists.linux-x86_64.txt63
-rw-r--r--library/cpp/actors/interconnect/CMakeLists.windows-x86_64.txt61
-rw-r--r--library/cpp/actors/interconnect/channel_scheduler.h116
-rw-r--r--library/cpp/actors/interconnect/event_filter.h72
-rw-r--r--library/cpp/actors/interconnect/event_holder_pool.h127
-rw-r--r--library/cpp/actors/interconnect/events_local.h438
-rw-r--r--library/cpp/actors/interconnect/handshake_broker.h156
-rw-r--r--library/cpp/actors/interconnect/interconnect.h189
-rw-r--r--library/cpp/actors/interconnect/interconnect_address.cpp106
-rw-r--r--library/cpp/actors/interconnect/interconnect_address.h47
-rw-r--r--library/cpp/actors/interconnect/interconnect_channel.cpp360
-rw-r--r--library/cpp/actors/interconnect/interconnect_channel.h159
-rw-r--r--library/cpp/actors/interconnect/interconnect_common.h140
-rw-r--r--library/cpp/actors/interconnect/interconnect_counters.cpp703
-rw-r--r--library/cpp/actors/interconnect/interconnect_counters.h57
-rw-r--r--library/cpp/actors/interconnect/interconnect_handshake.cpp1237
-rw-r--r--library/cpp/actors/interconnect/interconnect_handshake.h26
-rw-r--r--library/cpp/actors/interconnect/interconnect_impl.h44
-rw-r--r--library/cpp/actors/interconnect/interconnect_mon.cpp279
-rw-r--r--library/cpp/actors/interconnect/interconnect_mon.h15
-rw-r--r--library/cpp/actors/interconnect/interconnect_nameserver_base.h83
-rw-r--r--library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp178
-rw-r--r--library/cpp/actors/interconnect/interconnect_nameserver_table.cpp86
-rw-r--r--library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp47
-rw-r--r--library/cpp/actors/interconnect/interconnect_proxy_wrapper.h12
-rw-r--r--library/cpp/actors/interconnect/interconnect_resolve.cpp208
-rw-r--r--library/cpp/actors/interconnect/interconnect_stream.cpp680
-rw-r--r--library/cpp/actors/interconnect/interconnect_stream.h145
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp1139
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp944
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_proxy.h570
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_server.cpp119
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_server.h58
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_session.cpp1322
-rw-r--r--library/cpp/actors/interconnect/interconnect_tcp_session.h692
-rw-r--r--library/cpp/actors/interconnect/load.cpp405
-rw-r--r--library/cpp/actors/interconnect/load.h24
-rw-r--r--library/cpp/actors/interconnect/logging.h69
-rw-r--r--library/cpp/actors/interconnect/mock/CMakeLists.darwin-arm64.txt18
-rw-r--r--library/cpp/actors/interconnect/mock/CMakeLists.darwin-x86_64.txt18
-rw-r--r--library/cpp/actors/interconnect/mock/CMakeLists.linux-aarch64.txt19
-rw-r--r--library/cpp/actors/interconnect/mock/CMakeLists.linux-x86_64.txt19
-rw-r--r--library/cpp/actors/interconnect/mock/CMakeLists.windows-x86_64.txt18
-rw-r--r--library/cpp/actors/interconnect/mock/ic_mock.cpp385
-rw-r--r--library/cpp/actors/interconnect/mock/ic_mock.h19
-rw-r--r--library/cpp/actors/interconnect/mock/ya.make14
-rw-r--r--library/cpp/actors/interconnect/outgoing_stream.h272
-rw-r--r--library/cpp/actors/interconnect/packet.cpp31
-rw-r--r--library/cpp/actors/interconnect/packet.h304
-rw-r--r--library/cpp/actors/interconnect/poller.h23
-rw-r--r--library/cpp/actors/interconnect/poller_actor.cpp318
-rw-r--r--library/cpp/actors/interconnect/poller_actor.h72
-rw-r--r--library/cpp/actors/interconnect/poller_actor_darwin.h103
-rw-r--r--library/cpp/actors/interconnect/poller_actor_linux.h132
-rw-r--r--library/cpp/actors/interconnect/poller_actor_win.h111
-rw-r--r--library/cpp/actors/interconnect/poller_tcp.cpp35
-rw-r--r--library/cpp/actors/interconnect/poller_tcp.h25
-rw-r--r--library/cpp/actors/interconnect/poller_tcp_unit.cpp126
-rw-r--r--library/cpp/actors/interconnect/poller_tcp_unit.h67
-rw-r--r--library/cpp/actors/interconnect/poller_tcp_unit_epoll.cpp124
-rw-r--r--library/cpp/actors/interconnect/poller_tcp_unit_epoll.h33
-rw-r--r--library/cpp/actors/interconnect/poller_tcp_unit_select.cpp86
-rw-r--r--library/cpp/actors/interconnect/poller_tcp_unit_select.h19
-rw-r--r--library/cpp/actors/interconnect/profiler.h142
-rw-r--r--library/cpp/actors/interconnect/slowpoke_actor.h47
-rw-r--r--library/cpp/actors/interconnect/types.cpp564
-rw-r--r--library/cpp/actors/interconnect/types.h72
-rw-r--r--library/cpp/actors/interconnect/ut/CMakeLists.darwin-arm64.txt85
-rw-r--r--library/cpp/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt86
-rw-r--r--library/cpp/actors/interconnect/ut/CMakeLists.linux-aarch64.txt89
-rw-r--r--library/cpp/actors/interconnect/ut/CMakeLists.linux-x86_64.txt91
-rw-r--r--library/cpp/actors/interconnect/ut/CMakeLists.txt19
-rw-r--r--library/cpp/actors/interconnect/ut/CMakeLists.windows-x86_64.txt79
-rw-r--r--library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp117
-rw-r--r--library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp179
-rw-r--r--library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp59
-rw-r--r--library/cpp/actors/interconnect/ut/interconnect_ut.cpp177
-rw-r--r--library/cpp/actors/interconnect/ut/large.cpp85
-rw-r--r--library/cpp/actors/interconnect/ut/lib/CMakeLists.txt19
-rw-r--r--library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h87
-rw-r--r--library/cpp/actors/interconnect/ut/lib/interrupter.h249
-rw-r--r--library/cpp/actors/interconnect/ut/lib/node.h149
-rw-r--r--library/cpp/actors/interconnect/ut/lib/test_actors.h83
-rw-r--r--library/cpp/actors/interconnect/ut/lib/test_events.h54
-rw-r--r--library/cpp/actors/interconnect/ut/lib/ya.make10
-rw-r--r--library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp147
-rw-r--r--library/cpp/actors/interconnect/ut/poller_actor_ut.cpp264
-rw-r--r--library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt43
-rw-r--r--library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt43
-rw-r--r--library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt44
-rw-r--r--library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt44
-rw-r--r--library/cpp/actors/interconnect/ut/protos/CMakeLists.txt19
-rw-r--r--library/cpp/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt43
-rw-r--r--library/cpp/actors/interconnect/ut/protos/interconnect_test.proto28
-rw-r--r--library/cpp/actors/interconnect/ut/protos/ya.make9
-rw-r--r--library/cpp/actors/interconnect/ut/sticking_ut.cpp108
-rw-r--r--library/cpp/actors/interconnect/ut/ya.make33
-rw-r--r--library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt69
-rw-r--r--library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt70
-rw-r--r--library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt73
-rw-r--r--library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt75
-rw-r--r--library/cpp/actors/interconnect/ut_fat/CMakeLists.txt19
-rw-r--r--library/cpp/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt63
-rw-r--r--library/cpp/actors/interconnect/ut_fat/main.cpp133
-rw-r--r--library/cpp/actors/interconnect/ut_fat/ya.make21
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt75
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt76
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt79
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt81
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.txt19
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt69
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp167
-rw-r--r--library/cpp/actors/interconnect/ut_huge_cluster/ya.make34
-rw-r--r--library/cpp/actors/interconnect/watchdog_timer.h69
-rw-r--r--library/cpp/actors/interconnect/ya.make97
-rw-r--r--library/cpp/actors/log_backend/CMakeLists.darwin-arm64.txt19
-rw-r--r--library/cpp/actors/log_backend/CMakeLists.darwin-x86_64.txt19
-rw-r--r--library/cpp/actors/log_backend/CMakeLists.linux-aarch64.txt20
-rw-r--r--library/cpp/actors/log_backend/CMakeLists.linux-x86_64.txt20
-rw-r--r--library/cpp/actors/log_backend/CMakeLists.txt19
-rw-r--r--library/cpp/actors/log_backend/CMakeLists.windows-x86_64.txt19
-rw-r--r--library/cpp/actors/log_backend/actor_log_backend.cpp42
-rw-r--r--library/cpp/actors/log_backend/actor_log_backend.h20
-rw-r--r--library/cpp/actors/log_backend/ya.make12
-rw-r--r--library/cpp/actors/memory_log/CMakeLists.darwin-arm64.txt21
-rw-r--r--library/cpp/actors/memory_log/CMakeLists.darwin-x86_64.txt21
-rw-r--r--library/cpp/actors/memory_log/CMakeLists.linux-aarch64.txt22
-rw-r--r--library/cpp/actors/memory_log/CMakeLists.linux-x86_64.txt22
-rw-r--r--library/cpp/actors/memory_log/CMakeLists.txt19
-rw-r--r--library/cpp/actors/memory_log/CMakeLists.windows-x86_64.txt21
-rw-r--r--library/cpp/actors/memory_log/memlog.cpp367
-rw-r--r--library/cpp/actors/memory_log/memlog.h211
-rw-r--r--library/cpp/actors/memory_log/mmap.cpp63
-rw-r--r--library/cpp/actors/memory_log/ya.make15
-rw-r--r--library/cpp/actors/prof/CMakeLists.darwin-arm64.txt22
-rw-r--r--library/cpp/actors/prof/CMakeLists.darwin-x86_64.txt22
-rw-r--r--library/cpp/actors/prof/CMakeLists.linux-aarch64.txt23
-rw-r--r--library/cpp/actors/prof/CMakeLists.linux-x86_64.txt23
-rw-r--r--library/cpp/actors/prof/CMakeLists.txt19
-rw-r--r--library/cpp/actors/prof/CMakeLists.windows-x86_64.txt22
-rw-r--r--library/cpp/actors/prof/tag.cpp132
-rw-r--r--library/cpp/actors/prof/tag.h84
-rw-r--r--library/cpp/actors/prof/tcmalloc.cpp32
-rw-r--r--library/cpp/actors/prof/tcmalloc.h9
-rw-r--r--library/cpp/actors/prof/ut/CMakeLists.darwin-arm64.txt66
-rw-r--r--library/cpp/actors/prof/ut/CMakeLists.darwin-x86_64.txt67
-rw-r--r--library/cpp/actors/prof/ut/CMakeLists.linux-aarch64.txt70
-rw-r--r--library/cpp/actors/prof/ut/CMakeLists.linux-x86_64.txt72
-rw-r--r--library/cpp/actors/prof/ut/CMakeLists.txt19
-rw-r--r--library/cpp/actors/prof/ut/CMakeLists.windows-x86_64.txt60
-rw-r--r--library/cpp/actors/prof/ut/tag_ut.cpp68
-rw-r--r--library/cpp/actors/prof/ut/ya.make7
-rw-r--r--library/cpp/actors/prof/ya.make28
-rw-r--r--library/cpp/actors/protos/CMakeLists.darwin-arm64.txt82
-rw-r--r--library/cpp/actors/protos/CMakeLists.darwin-x86_64.txt82
-rw-r--r--library/cpp/actors/protos/CMakeLists.linux-aarch64.txt83
-rw-r--r--library/cpp/actors/protos/CMakeLists.linux-x86_64.txt83
-rw-r--r--library/cpp/actors/protos/CMakeLists.txt19
-rw-r--r--library/cpp/actors/protos/CMakeLists.windows-x86_64.txt82
-rw-r--r--library/cpp/actors/protos/actors.proto41
-rw-r--r--library/cpp/actors/protos/interconnect.proto133
-rw-r--r--library/cpp/actors/protos/services_common.proto22
-rw-r--r--library/cpp/actors/protos/unittests.proto20
-rw-r--r--library/cpp/actors/protos/ya.make12
-rw-r--r--library/cpp/actors/testlib/CMakeLists.darwin-arm64.txt23
-rw-r--r--library/cpp/actors/testlib/CMakeLists.darwin-x86_64.txt23
-rw-r--r--library/cpp/actors/testlib/CMakeLists.linux-aarch64.txt24
-rw-r--r--library/cpp/actors/testlib/CMakeLists.linux-x86_64.txt24
-rw-r--r--library/cpp/actors/testlib/CMakeLists.txt19
-rw-r--r--library/cpp/actors/testlib/CMakeLists.windows-x86_64.txt23
-rw-r--r--library/cpp/actors/testlib/decorator_ut.cpp327
-rw-r--r--library/cpp/actors/testlib/test_runtime.cpp1968
-rw-r--r--library/cpp/actors/testlib/test_runtime.h814
-rw-r--r--library/cpp/actors/testlib/ut/CMakeLists.darwin-arm64.txt67
-rw-r--r--library/cpp/actors/testlib/ut/CMakeLists.darwin-x86_64.txt68
-rw-r--r--library/cpp/actors/testlib/ut/CMakeLists.linux-aarch64.txt71
-rw-r--r--library/cpp/actors/testlib/ut/CMakeLists.linux-x86_64.txt73
-rw-r--r--library/cpp/actors/testlib/ut/CMakeLists.txt19
-rw-r--r--library/cpp/actors/testlib/ut/CMakeLists.windows-x86_64.txt61
-rw-r--r--library/cpp/actors/testlib/ut/ya.make15
-rw-r--r--library/cpp/actors/testlib/ya.make23
-rw-r--r--library/cpp/actors/util/CMakeLists.darwin-arm64.txt28
-rw-r--r--library/cpp/actors/util/CMakeLists.darwin-x86_64.txt28
-rw-r--r--library/cpp/actors/util/CMakeLists.linux-aarch64.txt29
-rw-r--r--library/cpp/actors/util/CMakeLists.linux-x86_64.txt29
-rw-r--r--library/cpp/actors/util/CMakeLists.txt19
-rw-r--r--library/cpp/actors/util/CMakeLists.windows-x86_64.txt28
-rw-r--r--library/cpp/actors/util/README.md99
-rw-r--r--library/cpp/actors/util/affinity.cpp93
-rw-r--r--library/cpp/actors/util/affinity.h49
-rw-r--r--library/cpp/actors/util/cpu_load_log.h227
-rw-r--r--library/cpp/actors/util/cpu_load_log_ut.cpp275
-rw-r--r--library/cpp/actors/util/cpumask.h133
-rw-r--r--library/cpp/actors/util/datetime.h82
-rw-r--r--library/cpp/actors/util/defs.h16
-rw-r--r--library/cpp/actors/util/funnel_queue.h240
-rw-r--r--library/cpp/actors/util/futex.h13
-rw-r--r--library/cpp/actors/util/intrinsics.h97
-rw-r--r--library/cpp/actors/util/local_process_key.h157
-rw-r--r--library/cpp/actors/util/memory_track.cpp38
-rw-r--r--library/cpp/actors/util/memory_track.h293
-rw-r--r--library/cpp/actors/util/memory_tracker.cpp103
-rw-r--r--library/cpp/actors/util/memory_tracker.h53
-rw-r--r--library/cpp/actors/util/memory_tracker_ut.cpp263
-rw-r--r--library/cpp/actors/util/named_tuple.h30
-rw-r--r--library/cpp/actors/util/queue_chunk.h29
-rw-r--r--library/cpp/actors/util/queue_oneone_inplace.h118
-rw-r--r--library/cpp/actors/util/rc_buf.cpp6
-rw-r--r--library/cpp/actors/util/rc_buf.h1120
-rw-r--r--library/cpp/actors/util/rc_buf_backend.h230
-rw-r--r--library/cpp/actors/util/rc_buf_ut.cpp207
-rw-r--r--library/cpp/actors/util/recentwnd.h67
-rw-r--r--library/cpp/actors/util/rope.cpp13
-rw-r--r--library/cpp/actors/util/rope.h1148
-rw-r--r--library/cpp/actors/util/rope_cont_embedded_list.h391
-rw-r--r--library/cpp/actors/util/rope_ut.cpp418
-rw-r--r--library/cpp/actors/util/shared_data.cpp49
-rw-r--r--library/cpp/actors/util/shared_data.h227
-rw-r--r--library/cpp/actors/util/shared_data_backtracing_owner.h88
-rw-r--r--library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp231
-rw-r--r--library/cpp/actors/util/shared_data_rope_backend.h41
-rw-r--r--library/cpp/actors/util/shared_data_rope_backend_ut.cpp231
-rw-r--r--library/cpp/actors/util/shared_data_ut.cpp205
-rw-r--r--library/cpp/actors/util/should_continue.cpp23
-rw-r--r--library/cpp/actors/util/should_continue.h22
-rw-r--r--library/cpp/actors/util/thread.h26
-rw-r--r--library/cpp/actors/util/thread_load_log.h363
-rw-r--r--library/cpp/actors/util/thread_load_log_ut.cpp966
-rw-r--r--library/cpp/actors/util/threadparkpad.cpp152
-rw-r--r--library/cpp/actors/util/threadparkpad.h21
-rw-r--r--library/cpp/actors/util/ticket_lock.h48
-rw-r--r--library/cpp/actors/util/timerfd.h65
-rw-r--r--library/cpp/actors/util/unordered_cache.h201
-rw-r--r--library/cpp/actors/util/unordered_cache_ut.cpp138
-rw-r--r--library/cpp/actors/util/ut/CMakeLists.darwin-arm64.txt74
-rw-r--r--library/cpp/actors/util/ut/CMakeLists.darwin-x86_64.txt75
-rw-r--r--library/cpp/actors/util/ut/CMakeLists.linux-aarch64.txt78
-rw-r--r--library/cpp/actors/util/ut/CMakeLists.linux-x86_64.txt80
-rw-r--r--library/cpp/actors/util/ut/CMakeLists.txt19
-rw-r--r--library/cpp/actors/util/ut/CMakeLists.windows-x86_64.txt68
-rw-r--r--library/cpp/actors/util/ut/ya.make20
-rw-r--r--library/cpp/actors/util/ut_helpers.h12
-rw-r--r--library/cpp/actors/util/ya.make50
-rw-r--r--library/cpp/actors/wilson/CMakeLists.darwin-arm64.txt25
-rw-r--r--library/cpp/actors/wilson/CMakeLists.darwin-x86_64.txt25
-rw-r--r--library/cpp/actors/wilson/CMakeLists.linux-aarch64.txt26
-rw-r--r--library/cpp/actors/wilson/CMakeLists.linux-x86_64.txt26
-rw-r--r--library/cpp/actors/wilson/CMakeLists.txt19
-rw-r--r--library/cpp/actors/wilson/CMakeLists.windows-x86_64.txt25
-rw-r--r--library/cpp/actors/wilson/protos/CMakeLists.darwin-arm64.txt15
-rw-r--r--library/cpp/actors/wilson/protos/CMakeLists.darwin-x86_64.txt15
-rw-r--r--library/cpp/actors/wilson/protos/CMakeLists.linux-aarch64.txt16
-rw-r--r--library/cpp/actors/wilson/protos/CMakeLists.linux-x86_64.txt16
-rw-r--r--library/cpp/actors/wilson/protos/CMakeLists.txt19
-rw-r--r--library/cpp/actors/wilson/protos/CMakeLists.windows-x86_64.txt15
-rw-r--r--library/cpp/actors/wilson/protos/ya.make12
-rw-r--r--library/cpp/actors/wilson/wilson_event.cpp4
-rw-r--r--library/cpp/actors/wilson/wilson_event.h21
-rw-r--r--library/cpp/actors/wilson/wilson_profile_span.cpp178
-rw-r--r--library/cpp/actors/wilson/wilson_profile_span.h75
-rw-r--r--library/cpp/actors/wilson/wilson_span.cpp62
-rw-r--r--library/cpp/actors/wilson/wilson_span.h244
-rw-r--r--library/cpp/actors/wilson/wilson_trace.cpp4
-rw-r--r--library/cpp/actors/wilson/wilson_trace.h234
-rw-r--r--library/cpp/actors/wilson/wilson_uploader.cpp193
-rw-r--r--library/cpp/actors/wilson/wilson_uploader.h24
-rw-r--r--library/cpp/actors/wilson/ya.make21
-rw-r--r--library/cpp/deprecated/autoarray/README.md3
-rw-r--r--library/cpp/deprecated/autoarray/autoarray.cpp1
-rw-r--r--library/cpp/deprecated/autoarray/autoarray.h264
-rw-r--r--library/cpp/deprecated/autoarray/ya.make7
-rw-r--r--library/cpp/deprecated/fgood/README.md15
-rw-r--r--library/cpp/deprecated/fgood/ffb.cpp407
-rw-r--r--library/cpp/deprecated/fgood/ffb.h264
-rw-r--r--library/cpp/deprecated/fgood/fgood.cpp70
-rw-r--r--library/cpp/deprecated/fgood/fgood.h328
-rw-r--r--library/cpp/deprecated/fgood/fput.h79
-rw-r--r--library/cpp/deprecated/fgood/ya.make8
-rw-r--r--library/cpp/deprecated/mapped_file/mapped_file.cpp64
-rw-r--r--library/cpp/deprecated/mapped_file/ya.make7
-rw-r--r--library/cpp/eventlog/common.h10
-rw-r--r--library/cpp/eventlog/evdecoder.cpp112
-rw-r--r--library/cpp/eventlog/evdecoder.h16
-rw-r--r--library/cpp/eventlog/event_field_output.cpp68
-rw-r--r--library/cpp/eventlog/event_field_output.h29
-rw-r--r--library/cpp/eventlog/event_field_printer.cpp27
-rw-r--r--library/cpp/eventlog/event_field_printer.h38
-rw-r--r--library/cpp/eventlog/eventlog.cpp554
-rw-r--r--library/cpp/eventlog/eventlog.h623
-rw-r--r--library/cpp/eventlog/eventlog_int.cpp12
-rw-r--r--library/cpp/eventlog/eventlog_int.h72
-rw-r--r--library/cpp/eventlog/events_extension.h161
-rw-r--r--library/cpp/eventlog/iterator.cpp88
-rw-r--r--library/cpp/eventlog/iterator.h51
-rw-r--r--library/cpp/eventlog/logparser.cpp814
-rw-r--r--library/cpp/eventlog/logparser.h343
-rw-r--r--library/cpp/eventlog/proto/events_extension.proto22
-rw-r--r--library/cpp/eventlog/proto/internal.proto9
-rw-r--r--library/cpp/eventlog/proto/ya.make12
-rw-r--r--library/cpp/eventlog/threaded_eventlog.cpp1
-rw-r--r--library/cpp/eventlog/threaded_eventlog.h154
-rw-r--r--library/cpp/eventlog/ya.make29
-rw-r--r--library/cpp/fieldcalc/field_calc.cpp1136
-rw-r--r--library/cpp/fieldcalc/field_calc.h136
-rw-r--r--library/cpp/fieldcalc/field_calc_int.h593
-rw-r--r--library/cpp/fieldcalc/lossy_types.h52
-rw-r--r--library/cpp/fieldcalc/ya.make13
-rw-r--r--library/cpp/malloc/galloc/malloc-info.cpp9
-rw-r--r--library/cpp/malloc/galloc/ya.make15
-rw-r--r--library/cpp/on_disk/multi_blob/multiblob.cpp67
-rw-r--r--library/cpp/on_disk/multi_blob/multiblob.h77
-rw-r--r--library/cpp/on_disk/multi_blob/multiblob_builder.cpp146
-rw-r--r--library/cpp/on_disk/multi_blob/multiblob_builder.h64
-rw-r--r--library/cpp/on_disk/multi_blob/ya.make13
-rw-r--r--library/cpp/on_disk/st_hash/fake.cpp4
-rw-r--r--library/cpp/on_disk/st_hash/save_stl.h84
-rw-r--r--library/cpp/on_disk/st_hash/static_hash.h420
-rw-r--r--library/cpp/on_disk/st_hash/static_hash_map.h59
-rw-r--r--library/cpp/on_disk/st_hash/sthash_iterators.h334
-rw-r--r--library/cpp/on_disk/st_hash/ya.make15
-rw-r--r--library/cpp/remmap/remmap.cpp138
-rw-r--r--library/cpp/remmap/remmap.h64
-rw-r--r--library/cpp/remmap/ya.make7
-rw-r--r--library/cpp/sqlite3/sqlite.cpp288
-rw-r--r--library/cpp/sqlite3/sqlite.h136
-rw-r--r--library/cpp/sqlite3/ya.make13
-rw-r--r--library/cpp/streams/growing_file_input/growing_file_input.cpp40
-rw-r--r--library/cpp/streams/growing_file_input/growing_file_input.h23
-rw-r--r--library/cpp/streams/growing_file_input/ya.make11
-rw-r--r--library/cpp/string_utils/subst_buf/substbuf.cpp1
-rw-r--r--library/cpp/string_utils/subst_buf/substbuf.h63
-rw-r--r--library/cpp/string_utils/subst_buf/ya.make7
-rw-r--r--library/cpp/ucompress/README.md1
-rw-r--r--library/cpp/ucompress/common.h8
-rw-r--r--library/cpp/ucompress/reader.cpp58
-rw-r--r--library/cpp/ucompress/reader.h25
-rw-r--r--library/cpp/ucompress/writer.cpp95
-rw-r--r--library/cpp/ucompress/writer.h31
-rw-r--r--library/cpp/ucompress/ya.make18
-rw-r--r--library/cpp/zipatch/reader.cpp173
-rw-r--r--library/cpp/zipatch/reader.h48
-rw-r--r--library/cpp/zipatch/writer.cpp232
-rw-r--r--library/cpp/zipatch/writer.h51
-rw-r--r--library/cpp/zipatch/ya.make16
-rw-r--r--library/python/mlockall/__init__.py10
-rw-r--r--library/python/mlockall/mlockall.pyx19
-rw-r--r--library/python/mlockall/ya.make14
-rw-r--r--library/python/nstools/__init__.py6
-rw-r--r--library/python/nstools/nstools.pyx28
-rw-r--r--library/python/nstools/ya.make14
-rw-r--r--library/python/symbols/libmagic/syms.cpp19
-rw-r--r--library/python/symbols/libmagic/ya.make12
-rw-r--r--library/python/testing/coverage_utils/__init__.py14
-rw-r--r--library/python/testing/coverage_utils/ya.make5
-rw-r--r--library/python/testing/system_info/__init__.py204
-rw-r--r--library/python/testing/system_info/ya.make15
-rw-r--r--library/recipes/docker_compose/example/Dockerfile5
-rw-r--r--library/recipes/docker_compose/example/app.py17
-rw-r--r--library/recipes/docker_compose/example/docker-compose.yml10
-rw-r--r--library/recipes/docker_compose/example/requirements.txt2
-rw-r--r--library/recipes/docker_compose/example/test.py7
-rw-r--r--library/recipes/docker_compose/example/ya.make19
-rw-r--r--library/recipes/docker_compose/example_network_go/Dockerfile1
-rw-r--r--library/recipes/docker_compose/example_network_go/docker-compose.yml13
-rw-r--r--library/recipes/docker_compose/example_network_go/go_test.go21
-rw-r--r--library/recipes/docker_compose/example_network_go/recipe-config.yml5
-rw-r--r--library/recipes/docker_compose/example_network_go/ya.make19
-rw-r--r--library/recipes/docker_compose/example_test_container/Dockerfile2
-rw-r--r--library/recipes/docker_compose/example_test_container/docker-compose.yml7
-rw-r--r--library/recipes/docker_compose/example_test_container/test.py5
-rw-r--r--library/recipes/docker_compose/example_test_container/ya.make19
-rw-r--r--library/recipes/docker_compose/example_test_container_go/Dockerfile2
-rw-r--r--library/recipes/docker_compose/example_test_container_go/docker-compose.yml7
-rw-r--r--library/recipes/docker_compose/example_test_container_go/go_test.go13
-rw-r--r--library/recipes/docker_compose/example_test_container_go/ya.make19
-rw-r--r--library/recipes/docker_compose/example_with_context/docker-compose.yml10
-rw-r--r--library/recipes/docker_compose/example_with_context/docker-context.yml5
-rw-r--r--library/recipes/docker_compose/example_with_context/test.py9
-rw-r--r--library/recipes/docker_compose/example_with_context/ya.make29
-rw-r--r--library/recipes/docker_compose/example_with_recipe_config/Dockerfile2
-rw-r--r--library/recipes/docker_compose/example_with_recipe_config/docker-compose.yml6
-rw-r--r--library/recipes/docker_compose/example_with_recipe_config/recipe-config.yml10
-rw-r--r--library/recipes/docker_compose/example_with_recipe_config/test.py12
-rw-r--r--library/recipes/docker_compose/example_with_recipe_config/ya.make27
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/Dockerfile2
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/docker-compose.yml8
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/test.py5
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/ya.make24
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/Dockerfile2
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/docker-compose.yml9
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/test.py5
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/ya.make24
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/Dockerfile2
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/docker-compose.yml8
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/test.py5
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/ya.make24
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/docker-compose.yml6
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/Dockerfile4
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/app.py2
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/Dockerfile4
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/app.py6
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/test.py2
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/ya.make21
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/docker-compose.yml6
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/Dockerfile4
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/app.py5
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/Dockerfile4
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/app.py6
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/test.py2
-rw-r--r--library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/ya.make21
-rw-r--r--library/recipes/docker_compose/test/acceptance/test_docker_compose.py92
-rw-r--r--library/recipes/docker_compose/test/acceptance/ya.make40
-rw-r--r--library/recipes/docker_compose/test/ut/context.yml9
-rw-r--r--library/recipes/docker_compose/test/ut/data/dir1/file3.txt0
-rw-r--r--library/recipes/docker_compose/test/ut/data/dir2/file4.txt0
-rw-r--r--library/recipes/docker_compose/test/ut/data/file1.txt0
-rw-r--r--library/recipes/docker_compose/test/ut/data/file2.txt0
-rw-r--r--library/recipes/docker_compose/test/ut/init_dir/init.txt1
-rw-r--r--library/recipes/docker_compose/test/ut/test_docker_context.py31
-rw-r--r--library/recipes/docker_compose/test/ut/ya.make16
-rw-r--r--library/recipes/docker_compose/test/ya.make4
-rw-r--r--library/recipes/docker_compose/ya.make20
-rw-r--r--tools/event2cpp/bin/ya.make21
-rw-r--r--tools/event2cpp/proto_events.cpp893
-rw-r--r--tools/event2cpp/proto_events.h20
-rw-r--r--tools/event2cpp/ya.make13
-rw-r--r--tools/struct2fieldcalc/parsestruct.rl669
-rw-r--r--tools/struct2fieldcalc/ya.make16
-rw-r--r--vendor/github.com/dgryski/go-rendezvous/rdv.go79
-rw-r--r--vendor/github.com/dgryski/go-rendezvous/ya.make11
-rw-r--r--vendor/github.com/go-redis/redis/v8/cluster.go1750
-rw-r--r--vendor/github.com/go-redis/redis/v8/cluster_commands.go109
-rw-r--r--vendor/github.com/go-redis/redis/v8/command.go3478
-rw-r--r--vendor/github.com/go-redis/redis/v8/commands.go3475
-rw-r--r--vendor/github.com/go-redis/redis/v8/doc.go4
-rw-r--r--vendor/github.com/go-redis/redis/v8/error.go144
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/arg.go56
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go78
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make11
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go201
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go93
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make14
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/internal.go29
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/log.go26
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/once.go60
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/conn.go121
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/pool.go557
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go58
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go201
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/pool/ya.make22
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/reader.go332
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/scan.go180
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/writer.go155
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/proto/ya.make20
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/rand/rand.go50
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/rand/ya.make7
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/unsafe.go21
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util.go46
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util/strconv.go19
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go23
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/util/ya.make10
-rw-r--r--vendor/github.com/go-redis/redis/v8/internal/ya.make26
-rw-r--r--vendor/github.com/go-redis/redis/v8/iterator.go77
-rw-r--r--vendor/github.com/go-redis/redis/v8/options.go429
-rw-r--r--vendor/github.com/go-redis/redis/v8/pipeline.go147
-rw-r--r--vendor/github.com/go-redis/redis/v8/pubsub.go668
-rw-r--r--vendor/github.com/go-redis/redis/v8/redis.go773
-rw-r--r--vendor/github.com/go-redis/redis/v8/result.go180
-rw-r--r--vendor/github.com/go-redis/redis/v8/ring.go736
-rw-r--r--vendor/github.com/go-redis/redis/v8/script.go65
-rw-r--r--vendor/github.com/go-redis/redis/v8/sentinel.go796
-rw-r--r--vendor/github.com/go-redis/redis/v8/tx.go149
-rw-r--r--vendor/github.com/go-redis/redis/v8/universal.go215
-rw-r--r--vendor/github.com/go-redis/redis/v8/version.go6
-rw-r--r--vendor/github.com/go-redis/redis/v8/ya.make58
-rw-r--r--ydb/library/actors/CMakeLists.txt2
-rw-r--r--ydb/library/actors/README.md107
-rw-r--r--ydb/library/actors/core/CMakeLists.darwin-arm64.txt2
-rw-r--r--ydb/library/actors/core/CMakeLists.darwin-x86_64.txt2
-rw-r--r--ydb/library/actors/core/CMakeLists.linux-aarch64.txt2
-rw-r--r--ydb/library/actors/core/CMakeLists.linux-x86_64.txt2
-rw-r--r--ydb/library/actors/core/CMakeLists.windows-x86_64.txt2
-rw-r--r--ydb/library/actors/core/tsan.supp (renamed from library/cpp/actors/core/tsan.supp)0
-rw-r--r--ydb/library/actors/core/ut/CMakeLists.darwin-arm64.txt88
-rw-r--r--ydb/library/actors/core/ut/CMakeLists.darwin-x86_64.txt89
-rw-r--r--ydb/library/actors/core/ut/CMakeLists.linux-aarch64.txt92
-rw-r--r--ydb/library/actors/core/ut/CMakeLists.linux-x86_64.txt94
-rw-r--r--ydb/library/actors/core/ut/CMakeLists.txt (renamed from library/cpp/actors/actor_type/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/core/ut/CMakeLists.windows-x86_64.txt82
-rw-r--r--ydb/library/actors/core/ut_fat/CMakeLists.darwin-arm64.txt70
-rw-r--r--ydb/library/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt71
-rw-r--r--ydb/library/actors/core/ut_fat/CMakeLists.linux-aarch64.txt74
-rw-r--r--ydb/library/actors/core/ut_fat/CMakeLists.linux-x86_64.txt76
-rw-r--r--ydb/library/actors/core/ut_fat/CMakeLists.txt (renamed from library/cpp/actors/core/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/core/ut_fat/CMakeLists.windows-x86_64.txt64
-rw-r--r--ydb/library/actors/cppcoro/CMakeLists.darwin-arm64.txt24
-rw-r--r--ydb/library/actors/cppcoro/CMakeLists.darwin-x86_64.txt24
-rw-r--r--ydb/library/actors/cppcoro/CMakeLists.linux-aarch64.txt25
-rw-r--r--ydb/library/actors/cppcoro/CMakeLists.linux-x86_64.txt25
-rw-r--r--ydb/library/actors/cppcoro/CMakeLists.txt (renamed from library/cpp/actors/core/ut/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/cppcoro/CMakeLists.windows-x86_64.txt24
-rw-r--r--ydb/library/actors/cppcoro/await_callback.cpp (renamed from library/cpp/actors/cppcoro/await_callback.cpp)0
-rw-r--r--ydb/library/actors/cppcoro/await_callback.h (renamed from library/cpp/actors/cppcoro/await_callback.h)0
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt30
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt31
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt34
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt36
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/CMakeLists.txt (renamed from library/cpp/actors/core/ut_fat/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt24
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/main.cpp76
-rw-r--r--ydb/library/actors/cppcoro/corobenchmark/ya.make11
-rw-r--r--ydb/library/actors/cppcoro/task.cpp (renamed from library/cpp/actors/cppcoro/task.cpp)0
-rw-r--r--ydb/library/actors/cppcoro/task.h (renamed from library/cpp/actors/cppcoro/task.h)0
-rw-r--r--ydb/library/actors/cppcoro/task_actor.cpp183
-rw-r--r--ydb/library/actors/cppcoro/task_actor.h107
-rw-r--r--ydb/library/actors/cppcoro/task_actor_ut.cpp93
-rw-r--r--ydb/library/actors/cppcoro/task_group.cpp (renamed from library/cpp/actors/cppcoro/task_group.cpp)0
-rw-r--r--ydb/library/actors/cppcoro/task_group.h (renamed from library/cpp/actors/cppcoro/task_group.h)0
-rw-r--r--ydb/library/actors/cppcoro/task_result.cpp (renamed from library/cpp/actors/cppcoro/task_result.cpp)0
-rw-r--r--ydb/library/actors/cppcoro/task_result.h (renamed from library/cpp/actors/cppcoro/task_result.h)0
-rw-r--r--ydb/library/actors/cppcoro/task_ut.cpp (renamed from library/cpp/actors/cppcoro/task_ut.cpp)0
-rw-r--r--ydb/library/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt68
-rw-r--r--ydb/library/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt69
-rw-r--r--ydb/library/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt72
-rw-r--r--ydb/library/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt74
-rw-r--r--ydb/library/actors/cppcoro/ut/CMakeLists.txt (renamed from library/cpp/actors/cppcoro/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt62
-rw-r--r--ydb/library/actors/cppcoro/ut/ya.make12
-rw-r--r--ydb/library/actors/cppcoro/ya.make25
-rw-r--r--ydb/library/actors/dnsresolver/CMakeLists.darwin-arm64.txt1
-rw-r--r--ydb/library/actors/dnsresolver/CMakeLists.darwin-x86_64.txt1
-rw-r--r--ydb/library/actors/dnsresolver/CMakeLists.linux-aarch64.txt1
-rw-r--r--ydb/library/actors/dnsresolver/CMakeLists.linux-x86_64.txt1
-rw-r--r--ydb/library/actors/dnsresolver/CMakeLists.windows-x86_64.txt1
-rw-r--r--ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt70
-rw-r--r--ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt71
-rw-r--r--ydb/library/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt74
-rw-r--r--ydb/library/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt76
-rw-r--r--ydb/library/actors/dnsresolver/ut/CMakeLists.txt (renamed from library/cpp/actors/cppcoro/corobenchmark/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt64
-rw-r--r--ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt29
-rw-r--r--ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt30
-rw-r--r--ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt33
-rw-r--r--ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt34
-rw-r--r--ydb/library/actors/examples/01_ping_pong/CMakeLists.txt (renamed from library/cpp/actors/cppcoro/ut/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt23
-rw-r--r--ydb/library/actors/examples/01_ping_pong/main.cpp129
-rw-r--r--ydb/library/actors/examples/01_ping_pong/ya.make13
-rw-r--r--ydb/library/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt66
-rw-r--r--ydb/library/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt67
-rw-r--r--ydb/library/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt70
-rw-r--r--ydb/library/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt71
-rw-r--r--ydb/library/actors/examples/02_discovery/CMakeLists.txt (renamed from library/cpp/actors/dnscachelib/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt60
-rw-r--r--ydb/library/actors/examples/02_discovery/endpoint.cpp116
-rw-r--r--ydb/library/actors/examples/02_discovery/lookup.cpp132
-rw-r--r--ydb/library/actors/examples/02_discovery/main.cpp136
-rw-r--r--ydb/library/actors/examples/02_discovery/protocol.proto (renamed from library/cpp/actors/examples/02_discovery/protocol.proto)0
-rw-r--r--ydb/library/actors/examples/02_discovery/publish.cpp110
-rw-r--r--ydb/library/actors/examples/02_discovery/replica.cpp181
-rw-r--r--ydb/library/actors/examples/02_discovery/services.h85
-rw-r--r--ydb/library/actors/examples/02_discovery/ya.make25
-rw-r--r--ydb/library/actors/examples/CMakeLists.txt (renamed from library/cpp/actors/examples/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/examples/ya.make (renamed from library/cpp/actors/examples/ya.make)0
-rw-r--r--ydb/library/actors/helpers/CMakeLists.darwin-arm64.txt1
-rw-r--r--ydb/library/actors/helpers/CMakeLists.darwin-x86_64.txt1
-rw-r--r--ydb/library/actors/helpers/CMakeLists.linux-aarch64.txt1
-rw-r--r--ydb/library/actors/helpers/CMakeLists.linux-x86_64.txt1
-rw-r--r--ydb/library/actors/helpers/CMakeLists.windows-x86_64.txt1
-rw-r--r--ydb/library/actors/helpers/ut/CMakeLists.darwin-arm64.txt76
-rw-r--r--ydb/library/actors/helpers/ut/CMakeLists.darwin-x86_64.txt77
-rw-r--r--ydb/library/actors/helpers/ut/CMakeLists.linux-aarch64.txt80
-rw-r--r--ydb/library/actors/helpers/ut/CMakeLists.linux-x86_64.txt82
-rw-r--r--ydb/library/actors/helpers/ut/CMakeLists.txt (renamed from library/cpp/actors/dnsresolver/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/helpers/ut/CMakeLists.windows-x86_64.txt70
-rw-r--r--ydb/library/actors/http/CMakeLists.darwin-arm64.txt1
-rw-r--r--ydb/library/actors/http/CMakeLists.darwin-x86_64.txt1
-rw-r--r--ydb/library/actors/http/CMakeLists.linux-aarch64.txt1
-rw-r--r--ydb/library/actors/http/CMakeLists.linux-x86_64.txt1
-rw-r--r--ydb/library/actors/http/CMakeLists.windows-x86_64.txt1
-rw-r--r--ydb/library/actors/http/ut/CMakeLists.darwin-arm64.txt67
-rw-r--r--ydb/library/actors/http/ut/CMakeLists.darwin-x86_64.txt68
-rw-r--r--ydb/library/actors/http/ut/CMakeLists.linux-aarch64.txt71
-rw-r--r--ydb/library/actors/http/ut/CMakeLists.linux-x86_64.txt73
-rw-r--r--ydb/library/actors/http/ut/CMakeLists.txt (renamed from library/cpp/actors/dnsresolver/ut/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/http/ut/CMakeLists.windows-x86_64.txt58
-rw-r--r--ydb/library/actors/interconnect/CMakeLists.darwin-arm64.txt3
-rw-r--r--ydb/library/actors/interconnect/CMakeLists.darwin-x86_64.txt3
-rw-r--r--ydb/library/actors/interconnect/CMakeLists.linux-aarch64.txt3
-rw-r--r--ydb/library/actors/interconnect/CMakeLists.linux-x86_64.txt3
-rw-r--r--ydb/library/actors/interconnect/CMakeLists.windows-x86_64.txt3
-rw-r--r--ydb/library/actors/interconnect/ut/CMakeLists.darwin-arm64.txt85
-rw-r--r--ydb/library/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt86
-rw-r--r--ydb/library/actors/interconnect/ut/CMakeLists.linux-aarch64.txt89
-rw-r--r--ydb/library/actors/interconnect/ut/CMakeLists.linux-x86_64.txt91
-rw-r--r--ydb/library/actors/interconnect/ut/CMakeLists.txt (renamed from library/cpp/actors/examples/01_ping_pong/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/CMakeLists.windows-x86_64.txt79
-rw-r--r--ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt (renamed from library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt (renamed from library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt (renamed from library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt (renamed from library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/lib/CMakeLists.txt (renamed from library/cpp/actors/examples/02_discovery/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt (renamed from library/cpp/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt43
-rw-r--r--ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt43
-rw-r--r--ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt44
-rw-r--r--ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt44
-rw-r--r--ydb/library/actors/interconnect/ut/protos/CMakeLists.txt (renamed from library/cpp/actors/helpers/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt43
-rw-r--r--ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt69
-rw-r--r--ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt70
-rw-r--r--ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt73
-rw-r--r--ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt75
-rw-r--r--ydb/library/actors/interconnect/ut_fat/CMakeLists.txt (renamed from library/cpp/actors/helpers/ut/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt63
-rw-r--r--ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt75
-rw-r--r--ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt76
-rw-r--r--ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt79
-rw-r--r--ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt81
-rw-r--r--ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.txt (renamed from library/cpp/actors/http/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt69
-rw-r--r--ydb/library/actors/prof/CMakeLists.darwin-arm64.txt1
-rw-r--r--ydb/library/actors/prof/CMakeLists.darwin-x86_64.txt1
-rw-r--r--ydb/library/actors/prof/CMakeLists.linux-aarch64.txt1
-rw-r--r--ydb/library/actors/prof/CMakeLists.linux-x86_64.txt1
-rw-r--r--ydb/library/actors/prof/CMakeLists.windows-x86_64.txt1
-rw-r--r--ydb/library/actors/prof/ut/CMakeLists.darwin-arm64.txt66
-rw-r--r--ydb/library/actors/prof/ut/CMakeLists.darwin-x86_64.txt67
-rw-r--r--ydb/library/actors/prof/ut/CMakeLists.linux-aarch64.txt70
-rw-r--r--ydb/library/actors/prof/ut/CMakeLists.linux-x86_64.txt72
-rw-r--r--ydb/library/actors/prof/ut/CMakeLists.txt (renamed from library/cpp/actors/http/ut/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/prof/ut/CMakeLists.windows-x86_64.txt60
-rw-r--r--ydb/library/actors/testlib/CMakeLists.darwin-arm64.txt1
-rw-r--r--ydb/library/actors/testlib/CMakeLists.darwin-x86_64.txt1
-rw-r--r--ydb/library/actors/testlib/CMakeLists.linux-aarch64.txt1
-rw-r--r--ydb/library/actors/testlib/CMakeLists.linux-x86_64.txt1
-rw-r--r--ydb/library/actors/testlib/CMakeLists.windows-x86_64.txt1
-rw-r--r--ydb/library/actors/testlib/ut/CMakeLists.darwin-arm64.txt67
-rw-r--r--ydb/library/actors/testlib/ut/CMakeLists.darwin-x86_64.txt68
-rw-r--r--ydb/library/actors/testlib/ut/CMakeLists.linux-aarch64.txt71
-rw-r--r--ydb/library/actors/testlib/ut/CMakeLists.linux-x86_64.txt73
-rw-r--r--ydb/library/actors/testlib/ut/CMakeLists.txt (renamed from library/cpp/actors/interconnect/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/testlib/ut/CMakeLists.windows-x86_64.txt61
-rw-r--r--ydb/library/actors/util/CMakeLists.darwin-arm64.txt1
-rw-r--r--ydb/library/actors/util/CMakeLists.darwin-x86_64.txt1
-rw-r--r--ydb/library/actors/util/CMakeLists.linux-aarch64.txt1
-rw-r--r--ydb/library/actors/util/CMakeLists.linux-x86_64.txt1
-rw-r--r--ydb/library/actors/util/CMakeLists.windows-x86_64.txt1
-rw-r--r--ydb/library/actors/util/ut/CMakeLists.darwin-arm64.txt74
-rw-r--r--ydb/library/actors/util/ut/CMakeLists.darwin-x86_64.txt75
-rw-r--r--ydb/library/actors/util/ut/CMakeLists.linux-aarch64.txt78
-rw-r--r--ydb/library/actors/util/ut/CMakeLists.linux-x86_64.txt80
-rw-r--r--ydb/library/actors/util/ut/CMakeLists.txt (renamed from library/cpp/actors/interconnect/mock/CMakeLists.txt)0
-rw-r--r--ydb/library/actors/util/ut/CMakeLists.windows-x86_64.txt68
-rw-r--r--ydb/library/actors/ya.make (renamed from library/cpp/actors/ya.make)0
2094 files changed, 7128 insertions, 315587 deletions
diff --git a/.mapping.json b/.mapping.json
index bdc5d552a7..3aa914c3a8 100644
--- a/.mapping.json
+++ b/.mapping.json
@@ -1993,212 +1993,6 @@
"library/cpp/accurate_accumulate/CMakeLists.linux-x86_64.txt":"",
"library/cpp/accurate_accumulate/CMakeLists.txt":"",
"library/cpp/accurate_accumulate/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/CMakeLists.txt":"",
- "library/cpp/actors/actor_type/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/actor_type/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/actor_type/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/actor_type/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/actor_type/CMakeLists.txt":"",
- "library/cpp/actors/actor_type/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/core/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/core/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/core/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/core/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/core/CMakeLists.txt":"",
- "library/cpp/actors/core/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/core/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/core/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/core/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/core/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/core/ut/CMakeLists.txt":"",
- "library/cpp/actors/core/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/core/ut_fat/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/core/ut_fat/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/core/ut_fat/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/core/ut_fat/CMakeLists.txt":"",
- "library/cpp/actors/core/ut_fat/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/cppcoro/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/cppcoro/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/cppcoro/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/cppcoro/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/cppcoro/CMakeLists.txt":"",
- "library/cpp/actors/cppcoro/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/cppcoro/corobenchmark/CMakeLists.txt":"",
- "library/cpp/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/cppcoro/ut/CMakeLists.txt":"",
- "library/cpp/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/dnscachelib/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/dnscachelib/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/dnscachelib/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/dnscachelib/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/dnscachelib/CMakeLists.txt":"",
- "library/cpp/actors/dnscachelib/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/dnsresolver/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/dnsresolver/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/dnsresolver/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/dnsresolver/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/dnsresolver/CMakeLists.txt":"",
- "library/cpp/actors/dnsresolver/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/dnsresolver/ut/CMakeLists.txt":"",
- "library/cpp/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/examples/01_ping_pong/CMakeLists.txt":"",
- "library/cpp/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/examples/02_discovery/CMakeLists.txt":"",
- "library/cpp/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/examples/CMakeLists.txt":"",
- "library/cpp/actors/helpers/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/helpers/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/helpers/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/helpers/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/helpers/CMakeLists.txt":"",
- "library/cpp/actors/helpers/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/helpers/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/helpers/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/helpers/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/helpers/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/helpers/ut/CMakeLists.txt":"",
- "library/cpp/actors/helpers/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/http/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/http/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/http/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/http/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/http/CMakeLists.txt":"",
- "library/cpp/actors/http/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/http/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/http/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/http/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/http/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/http/ut/CMakeLists.txt":"",
- "library/cpp/actors/http/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/mock/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/mock/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/mock/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/mock/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/mock/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/mock/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/lib/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut/protos/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut_fat/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.txt":"",
- "library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/log_backend/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/log_backend/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/log_backend/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/log_backend/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/log_backend/CMakeLists.txt":"",
- "library/cpp/actors/log_backend/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/memory_log/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/memory_log/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/memory_log/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/memory_log/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/memory_log/CMakeLists.txt":"",
- "library/cpp/actors/memory_log/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/prof/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/prof/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/prof/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/prof/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/prof/CMakeLists.txt":"",
- "library/cpp/actors/prof/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/prof/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/prof/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/prof/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/prof/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/prof/ut/CMakeLists.txt":"",
- "library/cpp/actors/prof/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/protos/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/protos/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/protos/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/protos/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/protos/CMakeLists.txt":"",
- "library/cpp/actors/protos/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/testlib/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/testlib/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/testlib/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/testlib/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/testlib/CMakeLists.txt":"",
- "library/cpp/actors/testlib/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/testlib/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/testlib/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/testlib/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/testlib/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/testlib/ut/CMakeLists.txt":"",
- "library/cpp/actors/testlib/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/util/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/util/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/util/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/util/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/util/CMakeLists.txt":"",
- "library/cpp/actors/util/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/util/ut/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/util/ut/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/util/ut/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/util/ut/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/util/ut/CMakeLists.txt":"",
- "library/cpp/actors/util/ut/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/wilson/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/wilson/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/wilson/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/wilson/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/wilson/CMakeLists.txt":"",
- "library/cpp/actors/wilson/CMakeLists.windows-x86_64.txt":"",
- "library/cpp/actors/wilson/protos/CMakeLists.darwin-arm64.txt":"",
- "library/cpp/actors/wilson/protos/CMakeLists.darwin-x86_64.txt":"",
- "library/cpp/actors/wilson/protos/CMakeLists.linux-aarch64.txt":"",
- "library/cpp/actors/wilson/protos/CMakeLists.linux-x86_64.txt":"",
- "library/cpp/actors/wilson/protos/CMakeLists.txt":"",
- "library/cpp/actors/wilson/protos/CMakeLists.windows-x86_64.txt":"",
"library/cpp/archive/CMakeLists.darwin-arm64.txt":"",
"library/cpp/archive/CMakeLists.darwin-x86_64.txt":"",
"library/cpp/archive/CMakeLists.linux-aarch64.txt":"",
@@ -7701,6 +7495,36 @@
"ydb/library/actors/core/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/core/CMakeLists.txt":"",
"ydb/library/actors/core/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/core/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/core/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/core/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/core/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/core/ut/CMakeLists.txt":"",
+ "ydb/library/actors/core/ut/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/core/ut_fat/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/core/ut_fat/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/core/ut_fat/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/core/ut_fat/CMakeLists.txt":"",
+ "ydb/library/actors/core/ut_fat/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/cppcoro/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/cppcoro/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/CMakeLists.txt":"",
+ "ydb/library/actors/cppcoro/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/corobenchmark/CMakeLists.txt":"",
+ "ydb/library/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/cppcoro/ut/CMakeLists.txt":"",
+ "ydb/library/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/dnscachelib/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/dnscachelib/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/dnscachelib/CMakeLists.linux-aarch64.txt":"",
@@ -7713,18 +7537,49 @@
"ydb/library/actors/dnsresolver/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/dnsresolver/CMakeLists.txt":"",
"ydb/library/actors/dnsresolver/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/dnsresolver/ut/CMakeLists.txt":"",
+ "ydb/library/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/examples/01_ping_pong/CMakeLists.txt":"",
+ "ydb/library/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/examples/02_discovery/CMakeLists.txt":"",
+ "ydb/library/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/examples/CMakeLists.txt":"",
"ydb/library/actors/helpers/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/helpers/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/helpers/CMakeLists.linux-aarch64.txt":"",
"ydb/library/actors/helpers/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/helpers/CMakeLists.txt":"",
"ydb/library/actors/helpers/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/helpers/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/helpers/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/helpers/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/helpers/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/helpers/ut/CMakeLists.txt":"",
+ "ydb/library/actors/helpers/ut/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/http/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/http/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/http/CMakeLists.linux-aarch64.txt":"",
"ydb/library/actors/http/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/http/CMakeLists.txt":"",
"ydb/library/actors/http/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/http/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/http/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/http/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/http/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/http/ut/CMakeLists.txt":"",
+ "ydb/library/actors/http/ut/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/interconnect/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/interconnect/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/interconnect/CMakeLists.linux-aarch64.txt":"",
@@ -7737,6 +7592,36 @@
"ydb/library/actors/interconnect/mock/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/interconnect/mock/CMakeLists.txt":"",
"ydb/library/actors/interconnect/mock/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/interconnect/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/CMakeLists.txt":"",
+ "ydb/library/actors/interconnect/ut/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/lib/CMakeLists.txt":"",
+ "ydb/library/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut/protos/CMakeLists.txt":"",
+ "ydb/library/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut_fat/CMakeLists.txt":"",
+ "ydb/library/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.txt":"",
+ "ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/log_backend/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/log_backend/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/log_backend/CMakeLists.linux-aarch64.txt":"",
@@ -7755,6 +7640,12 @@
"ydb/library/actors/prof/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/prof/CMakeLists.txt":"",
"ydb/library/actors/prof/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/prof/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/prof/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/prof/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/prof/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/prof/ut/CMakeLists.txt":"",
+ "ydb/library/actors/prof/ut/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/protos/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/protos/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/protos/CMakeLists.linux-aarch64.txt":"",
@@ -7767,12 +7658,24 @@
"ydb/library/actors/testlib/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/testlib/CMakeLists.txt":"",
"ydb/library/actors/testlib/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/testlib/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/testlib/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/testlib/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/testlib/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/testlib/ut/CMakeLists.txt":"",
+ "ydb/library/actors/testlib/ut/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/util/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/util/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/util/CMakeLists.linux-aarch64.txt":"",
"ydb/library/actors/util/CMakeLists.linux-x86_64.txt":"",
"ydb/library/actors/util/CMakeLists.txt":"",
"ydb/library/actors/util/CMakeLists.windows-x86_64.txt":"",
+ "ydb/library/actors/util/ut/CMakeLists.darwin-arm64.txt":"",
+ "ydb/library/actors/util/ut/CMakeLists.darwin-x86_64.txt":"",
+ "ydb/library/actors/util/ut/CMakeLists.linux-aarch64.txt":"",
+ "ydb/library/actors/util/ut/CMakeLists.linux-x86_64.txt":"",
+ "ydb/library/actors/util/ut/CMakeLists.txt":"",
+ "ydb/library/actors/util/ut/CMakeLists.windows-x86_64.txt":"",
"ydb/library/actors/wilson/CMakeLists.darwin-arm64.txt":"",
"ydb/library/actors/wilson/CMakeLists.darwin-x86_64.txt":"",
"ydb/library/actors/wilson/CMakeLists.linux-aarch64.txt":"",
diff --git a/build/long-path.manifest b/build/long-path.manifest
deleted file mode 100644
index 1195fcc597..0000000000
--- a/build/long-path.manifest
+++ /dev/null
@@ -1,8 +0,0 @@
-<?xml version='1.0' encoding='UTF-8' standalone='yes'?>
-<assembly xmlns="urn:schemas-microsoft-com:asm.v1" manifestVersion="1.0" xmlns:asmv3="urn:schemas-microsoft-com:asm.v3">
- <asmv3:application>
- <asmv3:windowsSettings xmlns:ws2="http://schemas.microsoft.com/SMI/2016/WindowsSettings">
- <ws2:longPathAware>true</ws2:longPathAware>
- </asmv3:windowsSettings>
- </asmv3:application>
-</assembly>
diff --git a/build/prebuilt/tools/event2cpp/ya.make.induced_deps b/build/prebuilt/tools/event2cpp/ya.make.induced_deps
deleted file mode 100644
index ede8def8d7..0000000000
--- a/build/prebuilt/tools/event2cpp/ya.make.induced_deps
+++ /dev/null
@@ -1,10 +0,0 @@
-INDUCED_DEPS(h+cpp
- ${ARCADIA_ROOT}/contrib/libs/protobuf/src/google/protobuf/io/printer.h
- ${ARCADIA_ROOT}/contrib/libs/protobuf/src/google/protobuf/io/zero_copy_stream_impl_lite.h
- ${ARCADIA_ROOT}/contrib/libs/protobuf/src/google/protobuf/stubs/strutil.h
- ${ARCADIA_ROOT}/library/cpp/eventlog/event_field_output.h
- ${ARCADIA_ROOT}/library/cpp/eventlog/event_field_printer.h
- ${ARCADIA_ROOT}/library/cpp/eventlog/events_extension.h
- ${ARCADIA_ROOT}/util/generic/cast.h
- ${ARCADIA_ROOT}/util/stream/output.h
-)
diff --git a/contrib/deprecated/galloc/basictypes.h b/contrib/deprecated/galloc/basictypes.h
deleted file mode 100644
index 985ba38531..0000000000
--- a/contrib/deprecated/galloc/basictypes.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#pragma once
-
-#include <inttypes.h>
-
-typedef signed char schar;
-
-typedef int8_t int8;
-typedef int16_t int16;
-typedef int32_t int32;
-typedef int64_t int64;
-
-typedef uint8_t uint8;
-typedef uint16_t uint16;
-typedef uint32_t uint32;
-typedef uint64_t uint64;
-
-const uint16 kuint16max = ( (uint16) 0xFFFF);
-const uint32 kuint32max = ( (uint32) 0xFFFFFFFF);
-const uint64 kuint64max = ( (((uint64) kuint32max) << 32) | kuint32max );
-
-const int8 kint8max = ( ( int8) 0x7F);
-const int16 kint16max = ( ( int16) 0x7FFF);
-const int32 kint32max = ( ( int32) 0x7FFFFFFF);
-const int64 kint64max = ( ((( int64) kint32max) << 32) | kuint32max );
-
-const int8 kint8min = ( ( int8) 0x80);
-const int16 kint16min = ( ( int16) 0x8000);
-const int32 kint32min = ( ( int32) 0x80000000);
-const int64 kint64min = ( ((( int64) kint32min) << 32) | 0 );
-
-#define DISALLOW_EVIL_CONSTRUCTORS(TypeName) \
- TypeName(const TypeName&); \
- void operator=(const TypeName&)
diff --git a/contrib/deprecated/galloc/commandlineflags.h b/contrib/deprecated/galloc/commandlineflags.h
deleted file mode 100644
index 3666eaaf86..0000000000
--- a/contrib/deprecated/galloc/commandlineflags.h
+++ /dev/null
@@ -1,111 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// This file is a compatibility layer that defines Google's version of
-// command line flags that are used for configuration.
-//
-// We put flags into their own namespace. It is purposefully
-// named in an opaque way that people should have trouble typing
-// directly. The idea is that DEFINE puts the flag in the weird
-// namespace, and DECLARE imports the flag from there into the
-// current namespace. The net result is to force people to use
-// DECLARE to get access to a flag, rather than saying
-// extern bool FLAGS_logtostderr;
-// or some such instead. We want this so we can put extra
-// functionality (like sanity-checking) in DECLARE if we want,
-// and make sure it is picked up everywhere.
-//
-// We also put the type of the variable in the namespace, so that
-// people can't DECLARE_int32 something that they DEFINE_bool'd
-// elsewhere.
-#ifndef BASE_COMMANDLINEFLAGS_H__
-#define BASE_COMMANDLINEFLAGS_H__
-
-#include <string>
-
-#include "basictypes.h"
-
-#define DECLARE_VARIABLE(type, name) \
- namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
- extern type FLAGS_##name; \
- } \
- using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
-
-#define DEFINE_VARIABLE(type, name, value, meaning) \
- namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
- type FLAGS_##name(value); \
- char FLAGS_no##name; \
- } \
- using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
-
-// bool specialization
-#define DECLARE_bool(name) \
- DECLARE_VARIABLE(bool, name)
-#define DEFINE_bool(name, value, meaning) \
- DEFINE_VARIABLE(bool, name, value, meaning)
-
-// int32 specialization
-#define DECLARE_int32(name) \
- DECLARE_VARIABLE(int32, name)
-#define DEFINE_int32(name, value, meaning) \
- DEFINE_VARIABLE(int32, name, value, meaning)
-
-// int64 specialization
-#define DECLARE_int64(name) \
- DECLARE_VARIABLE(int64, name)
-#define DEFINE_int64(name, value, meaning) \
- DEFINE_VARIABLE(int64, name, value, meaning)
-
-#define DECLARE_uint64(name) \
- DECLARE_VARIABLE(uint64, name)
-#define DEFINE_uint64(name, value, meaning) \
- DEFINE_VARIABLE(uint64, name, value, meaning)
-
-// double specialization
-#define DECLARE_double(name) \
- DECLARE_VARIABLE(double, name)
-#define DEFINE_double(name, value, meaning) \
- DEFINE_VARIABLE(double, name, value, meaning)
-
-// Special case for string, because we have to specify the namespace
-// std::string, which doesn't play nicely with our FLAG__namespace hackery.
-#define DECLARE_string(name) \
- namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
- extern std::string FLAGS_##name; \
- } \
- using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
-#define DEFINE_string(name, value, meaning) \
- namespace FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead { \
- std::string FLAGS_##name(value); \
- char FLAGS_no##name; \
- } \
- using FLAG__namespace_do_not_use_directly_use_DECLARE_string_instead::FLAGS_##name
-
-#endif // BASE_COMMANDLINEFLAGS_H__
diff --git a/contrib/deprecated/galloc/galloc.cpp b/contrib/deprecated/galloc/galloc.cpp
deleted file mode 100644
index 4d7fc14f49..0000000000
--- a/contrib/deprecated/galloc/galloc.cpp
+++ /dev/null
@@ -1,55 +0,0 @@
-#include <util/system/defaults.h>
-
-#ifndef USE_GOOGLE_ALLOCATOR
- #define USE_GOOGLE_ALLOCATOR 1
-#endif
-
-#if defined(_MSC_VER) && !defined(__MWERKS__) && !defined (__ICL) && !defined (__COMO__)
- #define USE_VISUALCC
-#elif defined(__INTEL_COMPILER)
- #define USE_INTELCC
-#elif defined(__GNUC__)
- #define USE_GNUCC
-#elif defined(__SUNPRO_C) || defined(__SUNPRO_CC)
- #define USE_SUNCC
-#else
- //#error your compiler does not supported
-#endif
-
-#if defined(USE_INTELCC)
- #pragma warning(disable 177)
- #pragma warning(disable 869)
- #pragma warning(disable 810)
- #pragma warning(disable 967)
- #pragma warning(disable 1599)
- #pragma warning(disable 1469)
-#endif
-
-#if defined(_linux_) || defined(_freebsd_)
- #define GOOGLE_ALLOCATOR_IS_USABLE
-#endif
-
-#if defined(GOOGLE_ALLOCATOR_IS_USABLE) && USE_GOOGLE_ALLOCATOR
- #undef NDEBUG
- #define NDEBUG
-
- #define HAVE_INTTYPES_H 1
- #define HAVE_MMAP 1
- #define HAVE_MUNMAP 1
- #define HAVE_PTHREAD 1
- #define HAVE_SBRK 1
- #define HAVE_UNWIND_H 1
-
- #if defined(USE_INTELCC) || defined(USE_GNUCC)
- #undef HAVE___ATTRIBUTE__
- #define HAVE___ATTRIBUTE__
- #endif
-
- #define PRIuS PRISZT
- #define LLU PRIu64
-
- #include "malloc_extension.cc"
- #include "internal_logging.cc"
- #include "system-alloc.cc"
- #include "tcmalloc.cc"
-#endif
diff --git a/contrib/deprecated/galloc/hack.cpp b/contrib/deprecated/galloc/hack.cpp
deleted file mode 100644
index 3ba36dacea..0000000000
--- a/contrib/deprecated/galloc/hack.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-#include "hack.h"
-
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/spin_wait.h>
-
-#include "spinlock.h"
-
-void SPIN_L(spinlock_t* l) {
- if (!AtomicTryLock(l)) {
- TSpinWait sw;
-
- while (!AtomicTryAndTryLock(l)) {
- sw.Sleep();
- }
- }
-}
-
-void SPIN_U(spinlock_t* l) {
- AtomicUnlock(l);
-}
diff --git a/contrib/deprecated/galloc/hack.h b/contrib/deprecated/galloc/hack.h
deleted file mode 100644
index 3b172a2da2..0000000000
--- a/contrib/deprecated/galloc/hack.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include <sys/types.h>
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
-
-#define AcquireAdaptiveLockSlow AllocAcquireAdaptiveLockSlow
-#define SchedYield AllocSchedYield
-#define ThreadYield AllocThreadYield
-#define NSystemInfo NAllocSystemInfo
-
-#ifdef _MSC_VER
-# define __restrict__ __restrict
-# define JEMALLOC_EXPORT
-#endif
-
-#if defined(__cplusplus)
-};
-#endif
diff --git a/contrib/deprecated/galloc/internal_logging.cc b/contrib/deprecated/galloc/internal_logging.cc
deleted file mode 100644
index 00e5928fe2..0000000000
--- a/contrib/deprecated/galloc/internal_logging.cc
+++ /dev/null
@@ -1,67 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Sanjay Ghemawat <opensource@google.com>
-
-#include <stdio.h>
-#include <stdarg.h>
-#include <string.h>
-
-#include "internal_logging.h"
-
-int TCMallocDebug::level;
-
-void TCMalloc_MESSAGE(const char* format, ...) {
- va_list ap;
- va_start(ap, format);
- char buf[800];
- vsnprintf(buf, sizeof(buf), format, ap);
- va_end(ap);
- write(STDERR_FILENO, buf, strlen(buf));
-}
-
-void TCMalloc_Printer::printf(const char* format, ...) {
- if (left_ > 0) {
- va_list ap;
- va_start(ap, format);
- const int r = vsnprintf(buf_, left_, format, ap);
- va_end(ap);
- if (r < 0) {
- // Perhaps an old glibc that returns -1 on truncation?
- left_ = 0;
- } else if (r > left_) {
- // Truncation
- left_ = 0;
- } else {
- left_ -= r;
- buf_ += r;
- }
- }
-}
diff --git a/contrib/deprecated/galloc/internal_logging.h b/contrib/deprecated/galloc/internal_logging.h
deleted file mode 100644
index 6a4e365723..0000000000
--- a/contrib/deprecated/galloc/internal_logging.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// Internal logging and related utility routines.
-
-#ifndef TCMALLOC_INTERNAL_LOGGING_H__
-#define TCMALLOC_INTERNAL_LOGGING_H__
-
-#include <stdarg.h>
-#include <stdlib.h>
-#include <unistd.h>
-
-//-------------------------------------------------------------------
-// Utility routines
-//-------------------------------------------------------------------
-
-struct TCMallocDebug {
- static int level;
-
- enum { kNone, kInfo, kVerbose };
-};
-
-// Safe debugging routine: we write directly to the stderr file
-// descriptor and avoid FILE buffering because that may invoke
-// malloc()
-extern void TCMalloc_MESSAGE(const char* format, ...)
-#ifdef HAVE___ATTRIBUTE__
- __attribute__ ((__format__ (__printf__, 1, 2)))
-#endif
-;
-
-// Short form for convenience
-#define MESSAGE TCMalloc_MESSAGE
-
-// Like assert(), but executed even in NDEBUG mode
-#undef CHECK_CONDITION
-#define CHECK_CONDITION(cond) \
-do { \
- if (!(cond)) { \
- MESSAGE("%s:%d: assertion failed: %s\n", __FILE__, __LINE__, #cond); \
- abort(); \
- } \
-} while (0)
-
-// Our own version of assert() so we can avoid hanging by trying to do
-// all kinds of goofy printing while holding the malloc lock.
-#ifndef NDEBUG
-#define ASSERT(cond) CHECK_CONDITION(cond)
-#else
-#define ASSERT(cond) ((void) 0)
-#endif
-
-// Print into buffer
-class TCMalloc_Printer {
- private:
- char* buf_; // Where should we write next
- int left_; // Space left in buffer (including space for \0)
-
- public:
- // REQUIRES: "length > 0"
- TCMalloc_Printer(char* buf, int length) : buf_(buf), left_(length) {
- buf[0] = '\0';
- }
-
- void printf(const char* format, ...)
-#ifdef HAVE___ATTRIBUTE__
- __attribute__ ((__format__ (__printf__, 2, 3)))
-#endif
-;
-};
-
-#endif // TCMALLOC_INTERNAL_LOGGING_H__
diff --git a/contrib/deprecated/galloc/internal_spinlock.h b/contrib/deprecated/galloc/internal_spinlock.h
deleted file mode 100644
index e7086f2da2..0000000000
--- a/contrib/deprecated/galloc/internal_spinlock.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#pragma once
-
-extern "C" {
- #include "hack.h"
- #include "spinlock.h"
-}
-
-#define SPINLOCK_INITIALIZER { _SPINLOCK_INITIALIZER }
-
-struct TCMalloc_SpinLock {
- volatile spinlock_t private_lockword_;
-
- inline void Init() noexcept {
- private_lockword_ = _SPINLOCK_INITIALIZER;
- }
-
- inline void Finalize() noexcept {
- }
-
- inline void Lock() noexcept {
- _SPINLOCK(&private_lockword_);
- }
-
- inline void Unlock() noexcept {
- _SPINUNLOCK(&private_lockword_);
- }
-};
-
-class TCMalloc_SpinLockHolder {
- private:
- TCMalloc_SpinLock* lock_;
-
- public:
- inline explicit TCMalloc_SpinLockHolder(TCMalloc_SpinLock* l)
- : lock_(l)
- {
- l->Lock();
- }
-
- inline ~TCMalloc_SpinLockHolder() {
- lock_->Unlock();
- }
-};
-
-// Short-hands for convenient use by tcmalloc.cc
-typedef TCMalloc_SpinLock SpinLock;
-typedef TCMalloc_SpinLockHolder SpinLockHolder;
diff --git a/contrib/deprecated/galloc/malloc_extension.cc b/contrib/deprecated/galloc/malloc_extension.cc
deleted file mode 100644
index 4b20b72bdc..0000000000
--- a/contrib/deprecated/galloc/malloc_extension.cc
+++ /dev/null
@@ -1,129 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-
-#include <assert.h>
-#include <string.h>
-#include <pthread.h>
-#include <stdio.h>
-#include <stdlib.h>
-
-#include <new>
-#include <string>
-
-#include "malloc_extension.h"
-
-// Note: this routine is meant to be called before threads are spawned.
-void MallocExtension::Initialize() {
- static bool initialize_called = false;
-
- if (initialize_called) return;
- initialize_called = true;
-
- // GNU libc++ versions 3.3 and 3.4 obey the environment variables
- // GLIBCPP_FORCE_NEW and GLIBCXX_FORCE_NEW respectively. Setting
- // one of these variables forces the STL default allocator to call
- // new() or delete() for each allocation or deletion. Otherwise
- // the STL allocator tries to avoid the high cost of doing
- // allocations by pooling memory internally. However, tcmalloc
- // does allocations really fast, especially for the types of small
- // items one sees in STL, so it's better off just using us.
- // TODO: control whether we do this via an environment variable?
- setenv("GLIBCPP_FORCE_NEW", "1", false /* no overwrite*/);
- setenv("GLIBCXX_FORCE_NEW", "1", false /* no overwrite*/);
-
- // Now we need to make the setenv 'stick', which it may not do since
- // the env is flakey before main() is called. But luckily stl only
- // looks at this env var the first time it tries to do an alloc, and
- // caches what it finds. So we just cause an stl alloc here.
- std::string dummy("I need to be allocated");
- dummy += "!"; // so the definition of dummy isn't optimized out
-}
-
-// Default implementation -- does nothing
-MallocExtension::~MallocExtension() { }
-bool MallocExtension::VerifyAllMemory() { return true; }
-bool MallocExtension::VerifyNewMemory(void* /*p*/) { return true; }
-bool MallocExtension::VerifyArrayNewMemory(void* /*p*/) { return true; }
-bool MallocExtension::VerifyMallocMemory(void* /*p*/) { return true; }
-
-bool MallocExtension::GetNumericProperty(const char* /*property*/, size_t* /*value*/) {
- return false;
-}
-
-bool MallocExtension::SetNumericProperty(const char* /*property*/, size_t /*value*/) {
- return false;
-}
-
-void MallocExtension::GetStats(char* buffer, int length) {
- assert(length > 0);
- (void)length;
- buffer[0] = '\0';
-}
-
-bool MallocExtension::MallocMemoryStats(int* blocks, size_t* total,
- int histogram[kMallocHistogramSize]) {
- *blocks = 0;
- *total = 0;
- memset(histogram, 0, sizeof(histogram));
- return true;
-}
-
-void** MallocExtension::ReadStackTraces() {
- return NULL;
-}
-
-void** MallocExtension::ReadHeapGrowthStackTraces() {
- return NULL;
-}
-
-// The current malloc extension object. We also keep a pointer to
-// the default implementation so that the heap-leak checker does not
-// complain about a memory leak.
-
-static pthread_once_t module_init = PTHREAD_ONCE_INIT;
-static MallocExtension* default_instance = NULL;
-static MallocExtension* current_instance = NULL;
-
-static void InitModule() {
- default_instance = new MallocExtension;
- current_instance = default_instance;
-}
-
-MallocExtension* MallocExtension::instance() {
- pthread_once(&module_init, InitModule);
- return current_instance;
-}
-
-void MallocExtension::Register(MallocExtension* implementation) {
- pthread_once(&module_init, InitModule);
- current_instance = implementation;
-}
diff --git a/contrib/deprecated/galloc/malloc_extension.h b/contrib/deprecated/galloc/malloc_extension.h
deleted file mode 100644
index 3ed8a91e35..0000000000
--- a/contrib/deprecated/galloc/malloc_extension.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// Extra interfaces exported by some malloc implementations. These
-// interfaces are accessed through a virtual base class so an
-// application can link against a malloc that does not implement these
-// interfaces, and it will get default versions that do nothing.
-
-#ifndef _GOOGLE_MALLOC_EXTENSION_H__
-#define _GOOGLE_MALLOC_EXTENSION_H__
-
-#include <stddef.h>
-
-static const int kMallocHistogramSize = 64;
-
-// The default implementations of the following routines do nothing.
-class MallocExtension {
- public:
- virtual ~MallocExtension();
-
- // Call this very early in the program execution -- say, in a global
- // constructor -- to set up parameters and state needed by all
- // instrumented malloc implemenatations. One example: this routine
- // sets environemnt variables to tell STL to use libc's malloc()
- // instead of doing its own memory management. This is safe to call
- // multiple times, as long as each time is before threads start up.
- static void Initialize();
-
- // See "verify_memory.h" to see what these routines do
- virtual bool VerifyAllMemory();
- virtual bool VerifyNewMemory(void* p);
- virtual bool VerifyArrayNewMemory(void* p);
- virtual bool VerifyMallocMemory(void* p);
- virtual bool MallocMemoryStats(int* blocks, size_t* total,
- int histogram[kMallocHistogramSize]);
-
- // Get a human readable description of the current state of the malloc
- // data structures. The state is stored as a null-terminated string
- // in a prefix of "buffer[0,buffer_length-1]".
- // REQUIRES: buffer_length > 0.
- virtual void GetStats(char* buffer, int buffer_length);
-
- // -------------------------------------------------------------------
- // Control operations for getting and setting malloc implementation
- // specific parameters. Some currently useful properties:
- //
- // generic
- // -------
- // "generic.current_allocated_bytes"
- // Number of bytes currently allocated by application
- // This property is not writable.
- //
- // "generic.heap_size"
- // Number of bytes in the heap ==
- // current_allocated_bytes +
- // fragmentation +
- // freed memory regions
- // This property is not writable.
- //
- // tcmalloc
- // --------
- // "tcmalloc.max_total_thread_cache_bytes"
- // Upper limit on total number of bytes stored across all
- // per-thread caches. Default: 16MB.
- //
- // "tcmalloc.current_total_thread_cache_bytes"
- // Number of bytes used across all thread caches.
- // This property is not writable.
- //
- // "tcmalloc.slack_bytes"
- // Number of bytes allocated from system, but not currently
- // in use by malloced objects. I.e., bytes available for
- // allocation without needing more bytes from system.
- // This property is not writable.
- //
- // TODO: Add more properties as necessary
- // -------------------------------------------------------------------
-
- // Get the named "property"'s value. Returns true if the property
- // is known. Returns false if the property is not a valid property
- // name for the current malloc implementation.
- // REQUIRES: property != NULL; value != NULL
- virtual bool GetNumericProperty(const char* property, size_t* value);
-
- // Set the named "property"'s value. Returns true if the property
- // is known and writable. Returns false if the property is not a
- // valid property name for the current malloc implementation, or
- // is not writable.
- // REQUIRES: property != NULL
- virtual bool SetNumericProperty(const char* property, size_t value);
-
- // The current malloc implementation. Always non-NULL.
- static MallocExtension* instance();
-
- // Change the malloc implementation. Typically called by the
- // malloc implementation during initialization.
- static void Register(MallocExtension* implementation);
-
- protected:
- // Get a list of stack traces of sampled allocation points.
- // Returns a pointer to a "new[]-ed" result array.
- //
- // The state is stored as a sequence of adjacent entries
- // in the returned array. Each entry has the following form:
- // uintptr_t count; // Number of objects with following trace
- // uintptr_t size; // Size of object
- // uintptr_t depth; // Number of PC values in stack trace
- // void* stack[depth]; // PC values that form the stack trace
- //
- // The list of entries is terminated by a "count" of 0.
- //
- // It is the responsibility of the caller to "delete[]" the returned array.
- //
- // May return NULL to indicate no results.
- //
- // This is an internal extension. Callers should use the more
- // convenient "GetHeapSample(string*)" method defined above.
- virtual void** ReadStackTraces();
-
- // Like ReadStackTraces(), but returns stack traces that caused growth
- // in the address space size.
- virtual void** ReadHeapGrowthStackTraces();
-};
-
-#endif // _GOOGLE_MALLOC_EXTENSION_H__
diff --git a/contrib/deprecated/galloc/malloc_hook.h b/contrib/deprecated/galloc/malloc_hook.h
deleted file mode 100644
index 1e109180b6..0000000000
--- a/contrib/deprecated/galloc/malloc_hook.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#pragma once
-
-class MallocHook {
- public:
- typedef void (*NewHook)(void* ptr, size_t size);
- typedef void (*DeleteHook)(void* ptr);
-
- inline static void InvokeNewHook(void*, size_t) {
- }
-
- inline static void InvokeDeleteHook(void*) {
- }
-};
diff --git a/contrib/deprecated/galloc/pagemap.h b/contrib/deprecated/galloc/pagemap.h
deleted file mode 100644
index 7be23ff90c..0000000000
--- a/contrib/deprecated/galloc/pagemap.h
+++ /dev/null
@@ -1,250 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// A data structure used by the caching malloc. It maps from page# to
-// a pointer that contains info about that page. We use two
-// representations: one for 32-bit addresses, and another for 64 bit
-// addresses. Both representations provide the same interface. The
-// first representation is implemented as a flat array, the seconds as
-// a three-level radix tree that strips away approximately 1/3rd of
-// the bits every time.
-//
-// The BITS parameter should be the number of bits required to hold
-// a page number. E.g., with 32 bit pointers and 4K pages (i.e.,
-// page offset fits in lower 12 bits), BITS == 20.
-
-#ifndef TCMALLOC_PAGEMAP_H__
-#define TCMALLOC_PAGEMAP_H__
-
-#if defined HAVE_STDINT_H
-#include <stdint.h>
-#elif defined HAVE_INTTYPES_H
-#include <inttypes.h>
-#else
-#include <sys/types.h>
-#endif
-#include "internal_logging.h"
-
-// Single-level array
-template <int BITS>
-class TCMalloc_PageMap1 {
- private:
- void** array_;
-
- public:
- typedef uintptr_t Number;
-
- explicit TCMalloc_PageMap1(void* (*allocator)(size_t)) {
- array_ = reinterpret_cast<void**>((*allocator)(sizeof(void*) << BITS));
- memset(array_, 0, sizeof(void*) << BITS);
- }
-
- // Ensure that the map contains initialized entries "x .. x+n-1".
- // Returns true if successful, false if we could not allocate memory.
- bool Ensure(Number x, size_t n) {
- // Nothing to do since flat array was allocate at start
- return true;
- }
-
- void PreallocateMoreMemory() {}
-
- // REQUIRES "k" is in range "[0,2^BITS-1]".
- // REQUIRES "k" has been ensured before.
- //
- // Return the current value for KEY. Returns "Value()" if not
- // yet set.
- void* get(Number k) const {
- return array_[k];
- }
-
- // REQUIRES "k" is in range "[0,2^BITS-1]".
- // REQUIRES "k" has been ensured before.
- //
- // Sets the value for KEY.
- void set(Number k, void* v) {
- array_[k] = v;
- }
-};
-
-// Two-level radix tree
-template <int BITS>
-class TCMalloc_PageMap2 {
- private:
- // Put 32 entries in the root and (2^BITS)/32 entries in each leaf.
- static const int ROOT_BITS = 5;
- static const int ROOT_LENGTH = 1 << ROOT_BITS;
-
- static const int LEAF_BITS = BITS - ROOT_BITS;
- static const int LEAF_LENGTH = 1 << LEAF_BITS;
-
- // Leaf node
- struct Leaf {
- void* values[LEAF_LENGTH];
- };
-
- Leaf* root_[ROOT_LENGTH]; // Pointers to 32 child nodes
- void* (*allocator_)(size_t); // Memory allocator
-
- public:
- typedef uintptr_t Number;
-
- explicit TCMalloc_PageMap2(void* (*allocator)(size_t)) {
- allocator_ = allocator;
- memset(root_, 0, sizeof(root_));
- }
-
- void* get(Number k) const {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> LEAF_BITS;
- const Number i2 = k & (LEAF_LENGTH-1);
- return root_[i1]->values[i2];
- }
-
- void set(Number k, void* v) {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> LEAF_BITS;
- const Number i2 = k & (LEAF_LENGTH-1);
- root_[i1]->values[i2] = v;
- }
-
- bool Ensure(Number start, size_t n) {
- for (Number key = start; key <= start + n - 1; ) {
- const Number i1 = key >> LEAF_BITS;
-
- // Make 2nd level node if necessary
- if (root_[i1] == NULL) {
- Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
- if (leaf == NULL) return false;
- memset(leaf, 0, sizeof(*leaf));
- root_[i1] = leaf;
- }
-
- // Advance key past whatever is covered by this leaf node
- key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- return true;
- }
-
- void PreallocateMoreMemory() {
- // Allocate enough to keep track of all possible pages
- Ensure(0, 1 << BITS);
- }
-};
-
-// Three-level radix tree
-template <int BITS>
-class TCMalloc_PageMap3 {
- private:
- // How many bits should we consume at each interior level
- static const int INTERIOR_BITS = (BITS + 2) / 3; // Round-up
- static const int INTERIOR_LENGTH = 1 << INTERIOR_BITS;
-
- // How many bits should we consume at leaf level
- static const int LEAF_BITS = BITS - 2*INTERIOR_BITS;
- static const int LEAF_LENGTH = 1 << LEAF_BITS;
-
- // Interior node
- struct Node {
- Node* ptrs[INTERIOR_LENGTH];
- };
-
- // Leaf node
- struct Leaf {
- void* values[LEAF_LENGTH];
- };
-
- Node* root_; // Root of radix tree
- void* (*allocator_)(size_t); // Memory allocator
-
- Node* NewNode() {
- Node* result = reinterpret_cast<Node*>((*allocator_)(sizeof(Node)));
- if (result != NULL) {
- memset(result, 0, sizeof(*result));
- }
- return result;
- }
-
- public:
- typedef uintptr_t Number;
-
- explicit TCMalloc_PageMap3(void* (*allocator)(size_t)) {
- allocator_ = allocator;
- root_ = NewNode();
- }
-
- void* get(Number k) const {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- const Number i3 = k & (LEAF_LENGTH-1);
- return reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3];
- }
-
- void set(Number k, void* v) {
- ASSERT(k >> BITS == 0);
- const Number i1 = k >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (k >> LEAF_BITS) & (INTERIOR_LENGTH-1);
- const Number i3 = k & (LEAF_LENGTH-1);
- reinterpret_cast<Leaf*>(root_->ptrs[i1]->ptrs[i2])->values[i3] = v;
- }
-
- bool Ensure(Number start, size_t n) {
- for (Number key = start; key <= start + n - 1; ) {
- const Number i1 = key >> (LEAF_BITS + INTERIOR_BITS);
- const Number i2 = (key >> LEAF_BITS) & (INTERIOR_LENGTH-1);
-
- // Make 2nd level node if necessary
- if (root_->ptrs[i1] == NULL) {
- Node* n = NewNode();
- if (n == NULL) return false;
- root_->ptrs[i1] = n;
- }
-
- // Make leaf node if necessary
- if (root_->ptrs[i1]->ptrs[i2] == NULL) {
- Leaf* leaf = reinterpret_cast<Leaf*>((*allocator_)(sizeof(Leaf)));
- if (leaf == NULL) return false;
- memset(leaf, 0, sizeof(*leaf));
- root_->ptrs[i1]->ptrs[i2] = reinterpret_cast<Node*>(leaf);
- }
-
- // Advance key past whatever is covered by this leaf node
- key = ((key >> LEAF_BITS) + 1) << LEAF_BITS;
- }
- return true;
- }
-
- void PreallocateMoreMemory() {
- }
-};
-
-#endif // TCMALLOC_PAGEMAP_H__
diff --git a/contrib/deprecated/galloc/spinlock.h b/contrib/deprecated/galloc/spinlock.h
deleted file mode 100644
index 93fcf10e12..0000000000
--- a/contrib/deprecated/galloc/spinlock.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include <util/system/defaults.h>
-
-typedef volatile intptr_t spinlock_t;
-
-#define SPIN_L AllocAcquireAdaptiveLock
-#define SPIN_U AllocReleaseAdaptiveLock
-
-#define _SPINLOCK_INITIALIZER 0
-#define _SPINUNLOCK(_lck) SPIN_U(_lck)
-#define _SPINLOCK(_lck) SPIN_L(_lck)
-
-#if defined(__cplusplus)
-extern "C" {
-#endif
- void SPIN_L(spinlock_t* lock);
- void SPIN_U(spinlock_t* lock);
-#if defined(__cplusplus)
-};
-#endif
diff --git a/contrib/deprecated/galloc/stacktrace.h b/contrib/deprecated/galloc/stacktrace.h
deleted file mode 100644
index ac393ca600..0000000000
--- a/contrib/deprecated/galloc/stacktrace.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#pragma once
-
-static inline int GetStackTrace(void** /*result*/, int /*max_depth*/, int /*skip_count*/) noexcept {
- return 0;
-}
-
-static inline bool GetStackExtent(void* /*sp*/, void** /*stack_top*/, void** /*stack_bottom*/ ) noexcept {
- return false;
-}
diff --git a/contrib/deprecated/galloc/system-alloc.cc b/contrib/deprecated/galloc/system-alloc.cc
deleted file mode 100644
index 6fea9dfa24..0000000000
--- a/contrib/deprecated/galloc/system-alloc.cc
+++ /dev/null
@@ -1,296 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat
-
-#if defined HAVE_STDINT_H
-#include <stdint.h>
-#elif defined HAVE_INTTYPES_H
-#include <inttypes.h>
-#else
-#include <sys/types.h>
-#endif
-
-#include <unistd.h>
-#include <fcntl.h>
-#include <sys/mman.h>
-
-#include "system-alloc.h"
-#include "internal_spinlock.h"
-#include "internal_logging.h"
-#include "commandlineflags.h"
-
-#if !defined(MAP_ANONYMOUS) && defined(MAP_ANON)
- #define MAP_ANONYMOUS MAP_ANON
-#endif
-
-// Structure for discovering alignment
-union MemoryAligner {
- void* p;
- double d;
- size_t s;
-};
-
-static SpinLock spinlock = SPINLOCK_INITIALIZER;
-
-// Page size is initialized on demand
-static size_t pagesize = 0;
-
-// Configuration parameters.
-//
-// if use_devmem is true, either use_sbrk or use_mmap must also be true.
-// For 2.2 kernels, it looks like the sbrk address space (500MBish) and
-// the mmap address space (1300MBish) are disjoint, so we need both allocators
-// to get as much virtual memory as possible.
-static bool use_devmem = true;
-static bool use_sbrk = true;
-static bool use_mmap = true;
-
-// Flags to keep us from retrying allocators that failed.
-static bool devmem_failure = false;
-static bool sbrk_failure = false;
-static bool mmap_failure = false;
-
-DEFINE_int32(malloc_devmem_start, 0,
- "Physical memory starting location in MB for /dev/mem allocation."
- " Setting this to 0 disables /dev/mem allocation");
-DEFINE_int32(malloc_devmem_limit, 0,
- "Physical memory limit location in MB for /dev/mem allocation."
- " Setting this to 0 means no limit.");
-
-#ifdef HAVE_SBRK
-
-static void* TrySbrk(size_t size, size_t alignment) {
- // sbrk will release memory if passed a negative number, so we do
- // a strict check here
- if (static_cast<ptrdiff_t>(size + alignment) < 0) return NULL;
-
- size = ((size + alignment - 1) / alignment) * alignment;
- void* result = sbrk(size);
- if (result == reinterpret_cast<void*>(-1)) {
- sbrk_failure = true;
- return NULL;
- }
-
- // Is it aligned?
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- if ((ptr & (alignment-1)) == 0) return result;
-
- // Try to get more memory for alignment
- size_t extra = alignment - (ptr & (alignment-1));
- void* r2 = sbrk(extra);
- if (reinterpret_cast<uintptr_t>(r2) == (ptr + size)) {
- // Contiguous with previous result
- return reinterpret_cast<void*>(ptr + extra);
- }
-
- // Give up and ask for "size + alignment - 1" bytes so
- // that we can find an aligned region within it.
- result = sbrk(size + alignment - 1);
- if (result == reinterpret_cast<void*>(-1)) {
- sbrk_failure = true;
- return NULL;
- }
- ptr = reinterpret_cast<uintptr_t>(result);
- if ((ptr & (alignment-1)) != 0) {
- ptr += alignment - (ptr & (alignment-1));
- }
- return reinterpret_cast<void*>(ptr);
-}
-
-#endif /* HAVE_SBRK */
-
-#ifdef HAVE_MMAP
-
-static void* TryMmap(size_t size, size_t alignment) {
- // Enforce page alignment
- if (pagesize == 0) pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
-
- // Note: size + extra does not overflow since:
- // size + alignment < (1<<NBITS).
- // and extra <= alignment
- // therefore size + extra < (1<<NBITS)
- void* result = mmap(NULL, size + extra,
- PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_ANONYMOUS,
- -1, 0);
- if (result == reinterpret_cast<void*>(MAP_FAILED)) {
- mmap_failure = true;
- return NULL;
- }
-
- // Adjust the return memory so it is aligned
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused memory to the system
- if (adjust > 0) {
- munmap(reinterpret_cast<void*>(ptr), adjust);
- }
- if (adjust < extra) {
- munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
- }
-
- ptr += adjust;
- return reinterpret_cast<void*>(ptr);
-}
-
-#endif /* HAVE_MMAP */
-
-static void* TryDevMem(size_t size, size_t alignment) {
- static bool initialized = false;
- static off_t physmem_base; // next physical memory address to allocate
- static off_t physmem_limit; // maximum physical address allowed
- static int physmem_fd; // file descriptor for /dev/mem
-
- // Check if we should use /dev/mem allocation. Note that it may take
- // a while to get this flag initialized, so meanwhile we fall back to
- // the next allocator. (It looks like 7MB gets allocated before
- // this flag gets initialized -khr.)
- if (FLAGS_malloc_devmem_start == 0) {
- // NOTE: not a devmem_failure - we'd like TCMalloc_SystemAlloc to
- // try us again next time.
- return NULL;
- }
-
- if (!initialized) {
- physmem_fd = open("/dev/mem", O_RDWR);
- if (physmem_fd < 0) {
- devmem_failure = true;
- return NULL;
- }
- physmem_base = FLAGS_malloc_devmem_start*1024LL*1024LL;
- physmem_limit = FLAGS_malloc_devmem_limit*1024LL*1024LL;
- initialized = true;
- }
-
- // Enforce page alignment
- if (pagesize == 0) pagesize = getpagesize();
- if (alignment < pagesize) alignment = pagesize;
- size = ((size + alignment - 1) / alignment) * alignment;
-
- // Ask for extra memory if alignment > pagesize
- size_t extra = 0;
- if (alignment > pagesize) {
- extra = alignment - pagesize;
- }
-
- // check to see if we have any memory left
- if (physmem_limit != 0 &&
- (off_t(size + extra) > (physmem_limit - physmem_base))) {
- devmem_failure = true;
- return NULL;
- }
-
- // Note: size + extra does not overflow since:
- // size + alignment < (1<<NBITS).
- // and extra <= alignment
- // therefore size + extra < (1<<NBITS)
- void *result = mmap(0, size + extra, PROT_WRITE|PROT_READ,
- MAP_SHARED, physmem_fd, physmem_base);
- if (result == reinterpret_cast<void*>(MAP_FAILED)) {
- devmem_failure = true;
- return NULL;
- }
- uintptr_t ptr = reinterpret_cast<uintptr_t>(result);
-
- // Adjust the return memory so it is aligned
- size_t adjust = 0;
- if ((ptr & (alignment - 1)) != 0) {
- adjust = alignment - (ptr & (alignment - 1));
- }
-
- // Return the unused virtual memory to the system
- if (adjust > 0) {
- munmap(reinterpret_cast<void*>(ptr), adjust);
- }
- if (adjust < extra) {
- munmap(reinterpret_cast<void*>(ptr + adjust + size), extra - adjust);
- }
-
- ptr += adjust;
- physmem_base += adjust + size;
-
- return reinterpret_cast<void*>(ptr);
-}
-
-void* TCMalloc_SystemAlloc(size_t size, size_t alignment) {
- // Discard requests that overflow
- if (size + alignment < size) return NULL;
-
- if (TCMallocDebug::level >= TCMallocDebug::kVerbose) {
- MESSAGE("TCMalloc_SystemAlloc(%" PRIuS ", %" PRIuS")\n",
- size, alignment);
- }
- SpinLockHolder lock_holder(&spinlock);
-
- // Enforce minimum alignment
- if (alignment < sizeof(MemoryAligner)) alignment = sizeof(MemoryAligner);
-
- // Try twice, once avoiding allocators that failed before, and once
- // more trying all allocators even if they failed before.
- for (int i = 0; i < 2; i++) {
- if (use_devmem && !devmem_failure) {
- void* result = TryDevMem(size, alignment);
- if (result != NULL) return result;
- }
-
-#ifdef HAVE_SBRK
- if (use_sbrk && !sbrk_failure) {
- void* result = TrySbrk(size, alignment);
- if (result != NULL) return result;
- }
-#endif
-
-#ifdef HAVE_MMAP
- if (use_mmap && !mmap_failure) {
- void* result = TryMmap(size, alignment);
- if (result != NULL) return result;
- }
-#endif
-
- // nothing worked - reset failure flags and try again
- devmem_failure = false;
- sbrk_failure = false;
- mmap_failure = false;
- }
- return NULL;
-}
diff --git a/contrib/deprecated/galloc/system-alloc.h b/contrib/deprecated/galloc/system-alloc.h
deleted file mode 100644
index c72d5e00be..0000000000
--- a/contrib/deprecated/galloc/system-alloc.h
+++ /dev/null
@@ -1,46 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat
-//
-// Routine that uses sbrk/mmap to allocate memory from the system.
-// Useful for implementing malloc.
-
-#ifndef TCMALLOC_SYSTEM_ALLOC_H__
-#define TCMALLOC_SYSTEM_ALLOC_H__
-
-// REQUIRES: "alignment" is a power of two or "0" to indicate default alignment
-//
-// Allocate and return "N" bytes of zeroed memory. The returned
-// pointer is a multiple of "alignment" if non-zero. Returns NULL
-// when out of memory.
-extern void* TCMalloc_SystemAlloc(size_t bytes, size_t alignment = 0);
-
-#endif /* TCMALLOC_SYSTEM_ALLOC_H__ */
diff --git a/contrib/deprecated/galloc/tcmalloc.cc b/contrib/deprecated/galloc/tcmalloc.cc
deleted file mode 100644
index a980effa8b..0000000000
--- a/contrib/deprecated/galloc/tcmalloc.cc
+++ /dev/null
@@ -1,2661 +0,0 @@
-// Copyright (c) 2005, Google Inc.
-// All rights reserved.
-//
-// Redistribution and use in source and binary forms, with or without
-// modification, are permitted provided that the following conditions are
-// met:
-//
-// * Redistributions of source code must retain the above copyright
-// notice, this list of conditions and the following disclaimer.
-// * Redistributions in binary form must reproduce the above
-// copyright notice, this list of conditions and the following disclaimer
-// in the documentation and/or other materials provided with the
-// distribution.
-// * Neither the name of Google Inc. nor the names of its
-// contributors may be used to endorse or promote products derived from
-// this software without specific prior written permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-// ---
-// Author: Sanjay Ghemawat <opensource@google.com>
-//
-// A malloc that uses a per-thread cache to satisfy small malloc requests.
-// (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
-//
-// See doc/tcmalloc.html for a high-level
-// description of how this malloc works.
-//
-// SYNCHRONIZATION
-// 1. The thread-specific lists are accessed without acquiring any locks.
-// This is safe because each such list is only accessed by one thread.
-// 2. We have a lock per central free-list, and hold it while manipulating
-// the central free list for a particular size.
-// 3. The central page allocator is protected by "pageheap_lock".
-// 4. The pagemap (which maps from page-number to descriptor),
-// can be read without holding any locks, and written while holding
-// the "pageheap_lock".
-//
-// This multi-threaded access to the pagemap is safe for fairly
-// subtle reasons. We basically assume that when an object X is
-// allocated by thread A and deallocated by thread B, there must
-// have been appropriate synchronization in the handoff of object
-// X from thread A to thread B.
-//
-// TODO: Bias reclamation to larger addresses
-// TODO: implement mallinfo/mallopt
-// TODO: Better testing
-// TODO: Return memory to system
-//
-// 9/28/2003 (new page-level allocator replaces ptmalloc2):
-// * malloc/free of small objects goes from ~300 ns to ~50 ns.
-// * allocation of a reasonably complicated struct
-// goes from about 1100 ns to about 300 ns.
-
-#include <new>
-
-#include <stdio.h>
-#include <stddef.h>
-
-#if defined(_linux_)
- #include <malloc.h>
-#endif
-
-#if defined(_darwin_)
- #include <malloc/malloc.h>
-#endif
-
-#include <string.h>
-#include <pthread.h>
-#include <unistd.h>
-#include <errno.h>
-#include <stdarg.h>
-
-#include "commandlineflags.h"
-#include "malloc_hook.h"
-#include "malloc_extension.h"
-#include "stacktrace.h"
-#include "internal_logging.h"
-#include "internal_spinlock.h"
-#include "pagemap.h"
-#include "system-alloc.h"
-
-#include <util/system/tls.h>
-
-#if defined(Y_HAVE_FAST_POD_TLS)
-Y_POD_STATIC_THREAD(void*) my_heap((void*)0);
-#endif
-
-static inline void SetHeap(pthread_key_t key, const void* pointer) {
- pthread_setspecific(key, pointer);
-
-#if defined(Y_HAVE_FAST_POD_TLS)
- my_heap = (void*)pointer;
-#endif
-}
-
-static inline void* GetHeap(pthread_key_t key) {
-#if defined(Y_HAVE_FAST_POD_TLS)
- return my_heap;
-#else
- return pthread_getspecific(key);
-#endif
-}
-
-//-------------------------------------------------------------------
-// Configuration
-//-------------------------------------------------------------------
-
-// Not all possible combinations of the following parameters make
-// sense. In particular, if kMaxSize increases, you may have to
-// increase kNumClasses as well.
-static const size_t kPageShift = 12;
-static const size_t kPageSize = 1 << kPageShift;
-static const size_t kMaxSize = 8u * kPageSize;
-static const size_t kAlignShift = 4;
-static const size_t kAlignment = 1 << kAlignShift;
-static const size_t kNumClasses = 170;
-
-// Allocates a big block of memory for the pagemap once we reach more than
-// 128MB
-static const size_t kPageMapBigAllocationThreshold = 128 << 20;
-
-// Minimum number of pages to fetch from system at a time. Must be
-// significantly bigger than kBlockSize to amortize system-call
-// overhead, and also to reduce external fragementation. Also, we
-// should keep this value big because various incarnations of Linux
-// have small limits on the number of mmap() regions per
-// address-space.
-static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
-
-// Number of objects to move between a per-thread list and a central
-// list in one shot. We want this to be not too small so we can
-// amortize the lock overhead for accessing the central list. Making
-// it too big may temporarily cause unnecessary memory wastage in the
-// per-thread free list until the scavenger cleans up the list.
-static int num_objects_to_move[kNumClasses];
-
-// Maximum length we allow a per-thread free-list to have before we
-// move objects from it into the corresponding central free-list. We
-// want this big to avoid locking the central free-list too often. It
-// should not hurt to make this list somewhat big because the
-// scavenging code will shrink it down when its contents are not in use.
-static const int kMaxFreeListLength = 256;
-
-// Lower and upper bounds on the per-thread cache sizes
-static const size_t kMinThreadCacheSize = kMaxSize * 2;
-static const size_t kMaxThreadCacheSize = 2 << 20;
-
-// Default bound on the total amount of thread caches
-static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
-
-// For all span-lengths < kMaxPages we keep an exact-size list.
-// REQUIRED: kMaxPages >= kMinSystemAlloc;
-static const size_t kMaxPages = kMinSystemAlloc;
-
-/* The smallest prime > 2^n */
-static int primes_list[] = {
- // Small values might cause high rates of sampling
- // and hence commented out.
- // 2, 5, 11, 17, 37, 67, 131, 257,
- // 521, 1031, 2053, 4099, 8209, 16411,
- 32771, 65537, 131101, 262147, 524309, 1048583,
- 2097169, 4194319, 8388617, 16777259, 33554467 };
-
-// Twice the approximate gap between sampling actions.
-// I.e., we take one sample approximately once every
-// tcmalloc_sample_parameter/2
-// bytes of allocation, i.e., ~ once every 128KB.
-// Must be a prime number.
-DEFINE_int64(tcmalloc_sample_parameter, 262147,
- "Twice the approximate gap between sampling actions."
- " Must be a prime number. Otherwise will be rounded up to a "
- " larger prime number");
-static size_t sample_period = 262147;
-// Protects sample_period above
-static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
-
-//-------------------------------------------------------------------
-// Mapping from size to size_class and vice versa
-//-------------------------------------------------------------------
-
-// A pair of arrays we use for implementing the mapping from a size to
-// its size class. Indexed by "floor(lg(size))".
-static const int kSizeBits = 8 * sizeof(size_t);
-static unsigned char size_base[kSizeBits];
-static unsigned char size_shift[kSizeBits];
-
-// Mapping from size class to size
-static size_t class_to_size[kNumClasses];
-
-// Mapping from size class to number of pages to allocate at a time
-static size_t class_to_pages[kNumClasses];
-
-
-
-// TransferCache is used to cache transfers of num_objects_to_move[size_class]
-// back and forth between thread caches and the central cache for a given size
-// class.
-struct TCEntry {
- void *head; // Head of chain of objects.
- void *tail; // Tail of chain of objects.
-};
-// A central cache freelist can have anywhere from 0 to kNumTransferEntries
-// slots to put link list chains into. To keep memory usage bounded the total
-// number of TCEntries across size classes is fixed. Currently each size
-// class is initially given one TCEntry which also means that the maximum any
-// one class can have is kNumClasses.
-static const int kNumTransferEntries = kNumClasses;
-
-// Return floor(log2(n)) for n > 0.
-#if (defined __i386__ || defined __x86_64__) && defined __GNUC__
-static inline int LgFloor(size_t n) {
- // "ro" for the input spec means the input can come from either a
- // register ("r") or offsetable memory ("o").
- size_t result;
- __asm__("bsr %1, %0"
- : "=r" (result) // Output spec
- : "ro" (n) // Input spec
- : "cc" // Clobbers condition-codes
- );
- return result;
-}
-#else
-// Note: the following only works for "n"s that fit in 32-bits, but
-// that is fine since we only use it for small sizes.
-static inline int LgFloor(size_t n) {
- int log = 0;
- for (int i = 4; i >= 0; --i) {
- int shift = (1 << i);
- size_t x = n >> shift;
- if (x != 0) {
- n = x;
- log += shift;
- }
- }
- ASSERT(n == 1);
- return log;
-}
-#endif
-
-
-// Some very basic linked list functions for dealing with using void * as
-// storage.
-
-static inline void *SLL_Next(void *t) {
- return *(reinterpret_cast<void**>(t));
-}
-
-static inline void SLL_SetNext(void *t, void *n) {
- *(reinterpret_cast<void**>(t)) = n;
-}
-
-static inline void SLL_Push(void **list, void *element) {
- SLL_SetNext(element, *list);
- *list = element;
-}
-
-static inline void *SLL_Pop(void **list) {
- void *result = *list;
- *list = SLL_Next(*list);
- return result;
-}
-
-
-// Remove N elements from a linked list to which head points. head will be
-// modified to point to the new head. start and end will point to the first
-// and last nodes of the range. Note that end will point to NULL after this
-// function is called.
-static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
- if (N == 0) {
- *start = NULL;
- *end = NULL;
- return;
- }
-
- void *tmp = *head;
- for (int i = 1; i < N; ++i) {
- tmp = SLL_Next(tmp);
- }
-
- *start = *head;
- *end = tmp;
- *head = SLL_Next(tmp);
- // Unlink range from list.
- SLL_SetNext(tmp, NULL);
-}
-
-static inline void SLL_PushRange(void **head, void *start, void *end) {
- if (!start) return;
- SLL_SetNext(end, *head);
- *head = start;
-}
-
-static inline size_t SLL_Size(void *head) {
- int count = 0;
- while (head) {
- count++;
- head = SLL_Next(head);
- }
- return count;
-}
-
-// Setup helper functions.
-
-static inline size_t SizeClass(size_t size) {
- if (size == 0) size = 1;
- const size_t lg = LgFloor(size);
- const size_t align = size_shift[lg];
- return static_cast<size_t>(size_base[lg]) + ((size-1) >> align);
-}
-
-// Get the byte-size for a specified class
-static inline size_t ByteSizeForClass(size_t cl) {
- return class_to_size[cl];
-}
-
-
-static int NumMoveSize(size_t size) {
- if (size == 0) return 0;
- // Use approx 64k transfers between thread and central caches.
- int num = static_cast<int>(64.0 * 1024.0 / size);
- if (num < 2) num = 2;
- // Clamp well below kMaxFreeListLength to avoid ping pong between central
- // and thread caches.
- if (num > static_cast<int>(0.8 * kMaxFreeListLength))
- num = static_cast<int>(0.8 * kMaxFreeListLength);
-
- // Also, avoid bringing in too many objects into small object free
- // lists. There are lots of such lists, and if we allow each one to
- // fetch too many at a time, we end up having to scavenge too often
- // (especially when there are lots of threads and each thread gets a
- // small allowance for its thread cache).
- //
- // TODO: Make thread cache free list sizes dynamic so that we do not
- // have to equally divide a fixed resource amongst lots of threads.
- if (num > 32) num = 32;
-
- return num;
-}
-
-// Initialize the mapping arrays
-static void InitSizeClasses() {
- // Special initialization for small sizes
- for (unsigned int lg = 0; lg < kAlignShift; lg++) {
- size_base[lg] = 1;
- size_shift[lg] = kAlignShift;
- }
-
- int next_class = 1;
- int alignshift = kAlignShift;
- int last_lg = -1;
- for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
- int lg = LgFloor(size);
- if (lg > last_lg) {
- // Increase alignment every so often.
- //
- // Since we double the alignment every time size doubles and
- // size >= 128, this means that space wasted due to alignment is
- // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
- // bytes, so the space wasted as a percentage starts falling for
- // sizes > 2K.
- if ((lg >= 7) && (alignshift < 8)) {
- alignshift++;
- }
- size_base[lg] = next_class - ((size-1) >> alignshift);
- size_shift[lg] = alignshift;
- }
-
- class_to_size[next_class] = size;
- last_lg = lg;
-
- next_class++;
- }
- if ((size_t)next_class >= kNumClasses) {
- MESSAGE("used up too many size classes: %d\n", next_class);
- abort();
- }
-
- // Initialize the number of pages we should allocate to split into
- // small objects for a given class.
- for (size_t cl = 1; cl < (size_t)next_class; cl++) {
- // Allocate enough pages so leftover is less than 1/8 of total.
- // This bounds wasted space to at most 12.5%.
- size_t psize = kPageSize;
- const size_t s = class_to_size[cl];
- while ((psize % s) > (psize >> 3)) {
- psize += kPageSize;
- }
- class_to_pages[cl] = psize >> kPageShift;
- }
-
- // Double-check sizes just to be safe
- for (size_t size = 0; size <= kMaxSize; size++) {
- const unsigned int sc = SizeClass(size);
- if (sc == 0) {
- MESSAGE("Bad size class %u for %" PRIuS "\n", sc, size);
- abort();
- }
- if (sc > 1 && size <= class_to_size[sc-1]) {
- MESSAGE("Allocating unnecessarily large class %u for %" PRIuS
- "\n", sc, size);
- abort();
- }
- if (sc >= kNumClasses) {
- MESSAGE("Bad size class %u for %" PRIuS "\n", sc, size);
- abort();
- }
- const size_t s = class_to_size[sc];
- if (size > s) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %u)\n", s, size, sc);
- abort();
- }
- if (s == 0) {
- MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %u)\n", s, size, sc);
- abort();
- }
- }
-
- // Initialize the num_objects_to_move array.
- for (size_t cl = 1; cl < kNumClasses; ++cl) {
- num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
- }
-}
-
-// -------------------------------------------------------------------------
-// Simple allocator for objects of a specified type. External locking
-// is required before accessing one of these objects.
-// -------------------------------------------------------------------------
-
-// Metadata allocator -- keeps stats about how many bytes allocated
-static uint64_t metadata_system_bytes = 0;
-static void* MetaDataAlloc(size_t bytes) {
- void* result = TCMalloc_SystemAlloc(bytes);
- if (result != NULL) {
- metadata_system_bytes += bytes;
- }
- return result;
-}
-
-template <class T>
-class PageHeapAllocator {
- private:
- // How much to allocate from system at a time
- static const int kAllocIncrement = 128 << 10;
-
- // Aligned size of T
- static const size_t kAlignedSize
- = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
-
- // Free area from which to carve new objects
- char* free_area_;
- size_t free_avail_;
-
- // Free list of already carved objects
- void* free_list_;
-
- // Number of allocated but unfreed objects
- int inuse_;
-
- public:
- void Init() {
- ASSERT(kAlignedSize <= kAllocIncrement);
- inuse_ = 0;
- free_area_ = NULL;
- free_avail_ = 0;
- free_list_ = NULL;
- // Reserve some space at the beginning to avoid fragmentation.
- Delete(New());
- }
-
- T* New() {
- // Consult free list
- void* result;
- if (free_list_ != NULL) {
- result = free_list_;
- free_list_ = *(reinterpret_cast<void**>(result));
- } else {
- if (free_avail_ < kAlignedSize) {
- // Need more room
- free_area_ = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
- if (free_area_ == NULL) abort();
- free_avail_ = kAllocIncrement;
- }
- result = free_area_;
- free_area_ += kAlignedSize;
- free_avail_ -= kAlignedSize;
- }
- inuse_++;
- return reinterpret_cast<T*>(result);
- }
-
- void Delete(T* p) {
- *(reinterpret_cast<void**>(p)) = free_list_;
- free_list_ = p;
- inuse_--;
- }
-
- int inuse() const { return inuse_; }
-};
-
-// -------------------------------------------------------------------------
-// Span - a contiguous run of pages
-// -------------------------------------------------------------------------
-
-// Type that can hold a page number
-typedef uintptr_t PageID;
-
-// Type that can hold the length of a run of pages
-typedef uintptr_t Length;
-
-// Convert byte size into pages
-static inline Length pages(size_t bytes) {
- return ((bytes + kPageSize - 1) >> kPageShift);
-}
-
-// Convert a user size into the number of bytes that will actually be
-// allocated
-static size_t AllocationSize(size_t bytes) {
- if (bytes > kMaxSize) {
- // Large object: we allocate an integral number of pages
- return pages(bytes) << kPageShift;
- } else {
- // Small object: find the size class to which it belongs
- return ByteSizeForClass(SizeClass(bytes));
- }
-}
-
-// Information kept for a span (a contiguous run of pages).
-struct Span {
- PageID start; // Starting page number
- Length length; // Number of pages in span
- Span* next; // Used when in link list
- Span* prev; // Used when in link list
- void* objects; // Linked list of free objects
- unsigned int free : 1; // Is the span free
- unsigned int sample : 1; // Sampled object?
- unsigned int sizeclass : 8; // Size-class for small objects (or 0)
- unsigned int refcount : 11; // Number of non-free objects
-
-#undef SPAN_HISTORY
-#ifdef SPAN_HISTORY
- // For debugging, we can keep a log events per span
- int nexthistory;
- char history[64];
- int value[64];
-#endif
-};
-
-#ifdef SPAN_HISTORY
-void Event(Span* span, char op, int v = 0) {
- span->history[span->nexthistory] = op;
- span->value[span->nexthistory] = v;
- span->nexthistory++;
- if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
-}
-#else
-#define Event(s,o,v) ((void) 0)
-#endif
-
-// Allocator/deallocator for spans
-static PageHeapAllocator<Span> span_allocator;
-static Span* NewSpan(PageID p, Length len) {
- Span* result = span_allocator.New();
- memset(result, 0, sizeof(*result));
- result->start = p;
- result->length = len;
-#ifdef SPAN_HISTORY
- result->nexthistory = 0;
-#endif
- return result;
-}
-
-static void DeleteSpan(Span* span) {
-#ifndef NDEBUG
- // In debug mode, trash the contents of deleted Spans
- memset(span, 0x3f, sizeof(*span));
-#endif
- span_allocator.Delete(span);
-}
-
-// -------------------------------------------------------------------------
-// Doubly linked list of spans.
-// -------------------------------------------------------------------------
-
-static void DLL_Init(Span* list) {
- list->next = list;
- list->prev = list;
-}
-
-static void DLL_Remove(Span* span) {
- span->prev->next = span->next;
- span->next->prev = span->prev;
- span->prev = NULL;
- span->next = NULL;
-}
-
-static inline bool DLL_IsEmpty(const Span* list) {
- return list->next == list;
-}
-
-static unsigned int DLL_Length(const Span* list) {
- unsigned int result = 0;
- for (Span* s = list->next; s != list; s = s->next) {
- result++;
- }
- return result;
-}
-
-#if 0 /* Not needed at the moment -- causes compiler warnings if not used */
-static void DLL_Print(const char* label, const Span* list) {
- MESSAGE("%-10s %p:", label, list);
- for (const Span* s = list->next; s != list; s = s->next) {
- MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
- }
- MESSAGE("\n");
-}
-#endif
-
-static void DLL_Prepend(Span* list, Span* span) {
- ASSERT(span->next == NULL);
- ASSERT(span->prev == NULL);
- span->next = list->next;
- span->prev = list;
- list->next->prev = span;
- list->next = span;
-}
-
-static void DLL_InsertOrdered(Span* list, Span* span) {
- ASSERT(span->next == NULL);
- ASSERT(span->prev == NULL);
- // Look for appropriate place to insert
- Span* x = list;
- while ((x->next != list) && (x->next->start < span->start)) {
- x = x->next;
- }
- span->next = x->next;
- span->prev = x;
- x->next->prev = span;
- x->next = span;
-}
-
-// -------------------------------------------------------------------------
-// Stack traces kept for sampled allocations
-// The following state is protected by pageheap_lock_.
-// -------------------------------------------------------------------------
-
-static const int kMaxStackDepth = 31;
-struct StackTrace {
- uintptr_t size; // Size of object
- int depth; // Number of PC values stored in array below
- void* stack[kMaxStackDepth];
-};
-static PageHeapAllocator<StackTrace> stacktrace_allocator;
-static Span sampled_objects;
-
-// Linked list of stack traces recorded every time we allocated memory
-// from the system. Useful for finding allocation sites that cause
-// increase in the footprint of the system. The linked list pointer
-// is stored in trace->stack[kMaxStackDepth-1].
-static StackTrace* growth_stacks = NULL;
-
-// -------------------------------------------------------------------------
-// Map from page-id to per-page data
-// -------------------------------------------------------------------------
-
-// We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
-
-// Selector class -- general selector uses 3-level map
-template <int BITS> class MapSelector {
- public:
- typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
-};
-
-// A two-level map for 32-bit machines
-template <> class MapSelector<32> {
- public:
- typedef TCMalloc_PageMap2<32-kPageShift> Type;
-};
-
-// -------------------------------------------------------------------------
-// Page-level allocator
-// * Eager coalescing
-//
-// Heap for page-level allocation. We allow allocating and freeing a
-// contiguous runs of pages (called a "span").
-// -------------------------------------------------------------------------
-
-class TCMalloc_PageHeap {
- public:
- TCMalloc_PageHeap();
-
- // Allocate a run of "n" pages. Returns zero if out of memory.
- // Caller should not pass "n == 0" -- instead, n should have
- // been rounded up already.
- Span* New(Length n);
-
- // Delete the span "[p, p+n-1]".
- // REQUIRES: span was returned by earlier call to New() and
- // has not yet been deleted.
- void Delete(Span* span);
-
- // Mark an allocated span as being used for small objects of the
- // specified size-class.
- // REQUIRES: span was returned by an earlier call to New()
- // and has not yet been deleted.
- void RegisterSizeClass(Span* span, size_t sc);
-
- // Split an allocated span into two spans: one of length "n" pages
- // followed by another span of length "span->length - n" pages.
- // Modifies "*span" to point to the first span of length "n" pages.
- // Returns a pointer to the second span.
- //
- // REQUIRES: "0 < n < span->length"
- // REQUIRES: !span->free
- // REQUIRES: span->sizeclass == 0
- Span* Split(Span* span, Length n);
-
- // Return the descriptor for the specified page.
- inline Span* GetDescriptor(PageID p) const {
- return reinterpret_cast<Span*>(pagemap_.get(p));
- }
-
- // Dump state to stderr
- void Dump(TCMalloc_Printer* out);
-
- // Return number of bytes allocated from system
- inline uint64_t SystemBytes() const { return system_bytes_; }
-
- // Return number of free bytes in heap
- uint64_t FreeBytes() const {
- return (static_cast<uint64_t>(free_pages_) << kPageShift);
- }
-
- bool Check();
- bool CheckList(Span* list, Length min_pages, Length max_pages);
-
- private:
- // Pick the appropriate map type based on pointer size
- typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
- PageMap pagemap_;
-
- // List of free spans of length >= kMaxPages
- Span large_;
-
- // Array mapping from span length to a doubly linked list of free spans
- Span free_[kMaxPages];
-
- // Number of pages kept in free lists
- uintptr_t free_pages_;
-
- // Bytes allocated from system
- uint64_t system_bytes_;
-
- bool GrowHeap(Length n);
-
- // REQUIRES span->length >= n
- // Remove span from its free list, and move any leftover part of
- // span into appropriate free lists. Also update "span" to have
- // length exactly "n" and mark it as non-free so it can be returned
- // to the client.
- void Carve(Span* span, Length n);
-
- void RecordSpan(Span* span) {
- pagemap_.set(span->start, span);
- if (span->length > 1) {
- pagemap_.set(span->start + span->length - 1, span);
- }
- }
-};
-
-TCMalloc_PageHeap::TCMalloc_PageHeap() : pagemap_(MetaDataAlloc),
- free_pages_(0),
- system_bytes_(0) {
- DLL_Init(&large_);
- for (unsigned int i = 0; i < kMaxPages; i++) {
- DLL_Init(&free_[i]);
- }
-}
-
-Span* TCMalloc_PageHeap::New(Length n) {
- ASSERT(Check());
-
- // n==0 occurs iff pages() overflowed when we added kPageSize-1 to n
- if (n == 0) return NULL;
-
- // Find first size >= n that has a non-empty list
- for (Length s = n; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s])) {
- Span* result = free_[s].next;
- Carve(result, n);
- ASSERT(Check());
- free_pages_ -= n;
- return result;
- }
- }
-
- // Look in large list. If we first do not find something, we try to
- // grow the heap and try again.
- for (int i = 0; i < 2; i++) {
- // find the best span (closest to n in size)
- Span *best = NULL;
- for (Span* span = large_.next; span != &large_; span = span->next) {
- if (span->length >= n &&
- (best == NULL || span->length < best->length)) {
- best = span;
- }
- }
- if (best != NULL) {
- Carve(best, n);
- ASSERT(Check());
- free_pages_ -= n;
- return best;
- }
- if (i == 0) {
- // Nothing suitable in large list. Grow the heap and look again.
- if (!GrowHeap(n)) {
- ASSERT(Check());
- return NULL;
- }
- }
- }
- return NULL;
-}
-
-Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
- ASSERT(0 < n);
- ASSERT(n < span->length);
- ASSERT(!span->free);
- ASSERT(span->sizeclass == 0);
- Event(span, 'T', n);
-
- const int extra = span->length - n;
- Span* leftover = NewSpan(span->start + n, extra);
- Event(leftover, 'U', extra);
- RecordSpan(leftover);
- pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
- span->length = n;
-
- return leftover;
-}
-
-void TCMalloc_PageHeap::Carve(Span* span, Length n) {
- ASSERT(n > 0);
- DLL_Remove(span);
- span->free = 0;
- Event(span, 'A', n);
-
- const int extra = span->length - n;
- ASSERT(extra >= 0);
- if (extra > 0) {
- Span* leftover = NewSpan(span->start + n, extra);
- leftover->free = 1;
- Event(leftover, 'S', extra);
- RecordSpan(leftover);
- if ((unsigned int)extra < kMaxPages) {
- DLL_Prepend(&free_[extra], leftover);
- } else {
- DLL_InsertOrdered(&large_, leftover);
- }
- span->length = n;
- pagemap_.set(span->start + n - 1, span);
- }
-}
-
-void TCMalloc_PageHeap::Delete(Span* span) {
- ASSERT(Check());
- ASSERT(!span->free);
- ASSERT(span->length > 0);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start + span->length - 1) == span);
- span->sizeclass = 0;
- span->sample = 0;
-
- // Coalesce -- we guarantee that "p" != 0, so no bounds checking
- // necessary. We do not bother resetting the stale pagemap
- // entries for the pieces we are merging together because we only
- // care about the pagemap entries for the boundaries.
- const PageID p = span->start;
- const Length n = span->length;
- Span* prev = GetDescriptor(p-1);
- if (prev != NULL && prev->free) {
- // Merge preceding span into this span
- ASSERT(prev->start + prev->length == p);
- const Length len = prev->length;
- DLL_Remove(prev);
- DeleteSpan(prev);
- span->start -= len;
- span->length += len;
- pagemap_.set(span->start, span);
- Event(span, 'L', len);
- }
- Span* next = GetDescriptor(p+n);
- if (next != NULL && next->free) {
- // Merge next span into this span
- ASSERT(next->start == p+n);
- const Length len = next->length;
- DLL_Remove(next);
- DeleteSpan(next);
- span->length += len;
- pagemap_.set(span->start + span->length - 1, span);
- Event(span, 'R', len);
- }
-
- Event(span, 'D', span->length);
- span->free = 1;
- if (span->length < kMaxPages) {
- DLL_Prepend(&free_[span->length], span);
- } else {
- DLL_InsertOrdered(&large_, span);
- }
- free_pages_ += n;
-
- ASSERT(Check());
-}
-
-void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
- // Associate span object with all interior pages as well
- ASSERT(!span->free);
- ASSERT(GetDescriptor(span->start) == span);
- ASSERT(GetDescriptor(span->start+span->length-1) == span);
- Event(span, 'C', sc);
- span->sizeclass = sc;
- for (Length i = 1; i < span->length-1; i++) {
- pagemap_.set(span->start+i, span);
- }
-}
-
-void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
- int nonempty_sizes = 0;
- for (unsigned int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s])) nonempty_sizes++;
- }
- out->printf("------------------------------------------------\n");
- out->printf("PageHeap: %d sizes; %6.1f MB free\n", nonempty_sizes,
- (static_cast<double>(free_pages_) * kPageSize) / 1048576.0);
- out->printf("------------------------------------------------\n");
- uint64_t cumulative = 0;
- for (unsigned int s = 0; s < kMaxPages; s++) {
- if (!DLL_IsEmpty(&free_[s])) {
- const unsigned int list_length = DLL_Length(&free_[s]);
- uint64_t s_pages = s * list_length;
- cumulative += s_pages;
- out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
- s, list_length,
- (s_pages << kPageShift) / 1048576.0,
- (cumulative << kPageShift) / 1048576.0);
- }
- }
-
- uint64_t large_pages = 0;
- unsigned int large_spans = 0;
- for (Span* s = large_.next; s != &large_; s = s->next) {
- out->printf(" [ %6" PRIuS " pages ]\n", s->length);
- large_pages += s->length;
- large_spans++;
- }
- cumulative += large_pages;
- out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum\n",
- large_spans,
- (large_pages << kPageShift) / 1048576.0,
- (cumulative << kPageShift) / 1048576.0);
-}
-
-static void RecordGrowth(size_t growth) {
- StackTrace* t = stacktrace_allocator.New();
- t->depth = GetStackTrace(t->stack, kMaxStackDepth-1, 3);
- t->size = growth;
- t->stack[kMaxStackDepth-1] = reinterpret_cast<void*>(growth_stacks);
- growth_stacks = t;
-}
-
-bool TCMalloc_PageHeap::GrowHeap(Length n) {
- ASSERT(kMaxPages >= kMinSystemAlloc);
- Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
- void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
- if (ptr == NULL) {
- if (n < ask) {
- // Try growing just "n" pages
- ask = n;
- ptr = TCMalloc_SystemAlloc(ask << kPageShift, kPageSize);
- }
- if (ptr == NULL) return false;
- }
- RecordGrowth(ask << kPageShift);
-
- uint64_t old_system_bytes = system_bytes_;
- system_bytes_ += (ask << kPageShift);
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- ASSERT(p > 0);
-
- // If we have already a lot of pages allocated, just pre allocate a bunch of
- // memory for the page map. This prevents fragmentation by pagemap metadata
- // when a program keeps allocating and freeing large blocks.
-
- if (old_system_bytes < kPageMapBigAllocationThreshold
- && system_bytes_ >= kPageMapBigAllocationThreshold) {
- pagemap_.PreallocateMoreMemory();
- }
-
- // Make sure pagemap_ has entries for all of the new pages.
- // Plus ensure one before and one after so coalescing code
- // does not need bounds-checking.
- if (pagemap_.Ensure(p-1, ask+2)) {
- // Pretend the new area is allocated and then Delete() it to
- // cause any necessary coalescing to occur.
- //
- // We do not adjust free_pages_ here since Delete() will do it for us.
- Span* span = NewSpan(p, ask);
- RecordSpan(span);
- Delete(span);
- ASSERT(Check());
- return true;
- } else {
- // We could not allocate memory within "pagemap_"
- // TODO: Once we can return memory to the system, return the new span
- return false;
- }
-}
-
-bool TCMalloc_PageHeap::Check() {
- ASSERT(free_[0].next == &free_[0]);
- CheckList(&large_, kMaxPages, 1000000000);
- for (Length s = 1; s < kMaxPages; s++) {
- CheckList(&free_[s], s, s);
- }
- return true;
-}
-
-bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
- for (Span* s = list->next; s != list; s = s->next) {
- CHECK_CONDITION(s->free);
- CHECK_CONDITION(s->length >= min_pages);
- CHECK_CONDITION(s->length <= max_pages);
- CHECK_CONDITION(GetDescriptor(s->start) == s);
- CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
- }
- return true;
-}
-
-//-------------------------------------------------------------------
-// Free list
-//-------------------------------------------------------------------
-
-class TCMalloc_ThreadCache_FreeList {
- private:
- void* list_; // Linked list of nodes
- uint16_t length_; // Current length
- uint16_t lowater_; // Low water mark for list length
-
- public:
- void Init() {
- list_ = NULL;
- length_ = 0;
- lowater_ = 0;
- }
-
- // Return current length of list
- int length() const {
- return length_;
- }
-
- // Is list empty?
- bool empty() const {
- return list_ == NULL;
- }
-
- // Low-water mark management
- int lowwatermark() const { return lowater_; }
- void clear_lowwatermark() { lowater_ = length_; }
-
- void Push(void* ptr) {
- SLL_Push(&list_, ptr);
- length_++;
- }
-
- void* Pop() {
- ASSERT(list_ != NULL);
- length_--;
- if (length_ < lowater_) lowater_ = length_;
- return SLL_Pop(&list_);
- }
-
- void PushRange(int N, void *start, void *end) {
- SLL_PushRange(&list_, start, end);
- length_ += N;
- }
-
- void PopRange(int N, void **start, void **end) {
- SLL_PopRange(&list_, N, start, end);
- ASSERT(length_ >= N);
- length_ -= N;
- if (length_ < lowater_) lowater_ = length_;
- }
-};
-
-//-------------------------------------------------------------------
-// Data kept per thread
-//-------------------------------------------------------------------
-
-class TCMalloc_ThreadCache {
- private:
- typedef TCMalloc_ThreadCache_FreeList FreeList;
-
- size_t size_; // Combined size of data
- pthread_t tid_; // Which thread owns it
- bool in_setspecific_; // In call to pthread_setspecific?
- FreeList list_[kNumClasses]; // Array indexed by size-class
-
- // We sample allocations, biased by the size of the allocation
- uint32_t rnd_; // Cheap random number generator
- size_t bytes_until_sample_; // Bytes until we sample next
-
- public:
- // All ThreadCache objects are kept in a linked list (for stats collection)
- TCMalloc_ThreadCache* next_;
- TCMalloc_ThreadCache* prev_;
-
- void Init(pthread_t tid);
- void Cleanup();
-
- // Accessors (mostly just for printing stats)
- int freelist_length(size_t cl) const { return list_[cl].length(); }
-
- // Total byte size in cache
- size_t Size() const { return size_; }
-
- void* Allocate(size_t size);
- void Deallocate(void* ptr, size_t size_class);
-
- void FetchFromCentralCache(size_t cl);
- void ReleaseToCentralCache(size_t cl, int N);
- void Scavenge();
- void Print() const;
-
- // Record allocation of "k" bytes. Return true iff allocation
- // should be sampled
- bool SampleAllocation(size_t k);
-
- // Pick next sampling point
- void PickNextSample();
-
- static void InitModule();
- static void InitTSD();
- static TCMalloc_ThreadCache* GetCache();
- static TCMalloc_ThreadCache* GetCacheIfPresent();
- static void* CreateCacheIfNecessary();
- static void DeleteCache(void* ptr);
- static void RecomputeThreadCacheSize();
-};
-
-//-------------------------------------------------------------------
-// Data kept per size-class in central cache
-//-------------------------------------------------------------------
-
-class TCMalloc_Central_FreeList {
- public:
- void Init(size_t cl);
-
- // These methods all do internal locking.
-
- // Insert the specified range into the central freelist. N is the number of
- // elements in the range.
- void InsertRange(void *start, void *end, int N);
-
- // Returns the actual number of fetched elements into N.
- void RemoveRange(void **start, void **end, int *N);
-
- // Returns the number of free objects in cache.
- int length() {
- SpinLockHolder h(&lock_);
- return counter_;
- }
-
- // Returns the number of free objects in the transfer cache.
- int tc_length() {
- SpinLockHolder h(&lock_);
- return used_slots_ * num_objects_to_move[size_class_];
- }
-
- private:
- // REQUIRES: lock_ is held
- // Remove object from cache and return.
- // Return NULL if no free entries in cache.
- void* FetchFromSpans();
-
- // REQUIRES: lock_ is held
- // Remove object from cache and return. Fetches
- // from pageheap if cache is empty. Only returns
- // NULL on allocation failure.
- void* FetchFromSpansSafe();
-
- // REQUIRES: lock_ is held
- // Release a linked list of objects to spans.
- // May temporarily release lock_.
- void ReleaseListToSpans(void *start);
-
- // REQUIRES: lock_ is held
- // Release an object to spans.
- // May temporarily release lock_.
- void ReleaseToSpans(void* object);
-
- // REQUIRES: lock_ is held
- // Populate cache by fetching from the page heap.
- // May temporarily release lock_.
- void Populate();
-
- // REQUIRES: lock is held.
- // Tries to make room for a TCEntry. If the cache is full it will try to
- // expand it at the cost of some other cache size. Return false if there is
- // no space.
- bool MakeCacheSpace();
-
- // REQUIRES: lock_ for locked_size_class is held.
- // Picks a "random" size class to steal TCEntry slot from. In reality it
- // just iterates over the sizeclasses but does so without taking a lock.
- // Returns true on success.
- // May temporarily lock a "random" size class.
- static bool EvictRandomSizeClass(size_t locked_size_class, bool force);
-
- // REQUIRES: lock_ is *not* held.
- // Tries to shrink the Cache. If force is true it will relase objects to
- // spans if it allows it to shrink the cache. Return false if it failed to
- // shrink the cache. Decrements cache_size_ on succeess.
- // May temporarily take lock_. If it takes lock_, the locked_size_class
- // lock is released to the thread from holding two size class locks
- // concurrently which could lead to a deadlock.
- bool ShrinkCache(int locked_size_class, bool force);
-
- // This lock protects all the data members. cached_entries and cache_size_
- // may be looked at without holding the lock.
- SpinLock lock_;
-
- // We keep linked lists of empty and non-empty spans.
- size_t size_class_; // My size class
- Span empty_; // Dummy header for list of empty spans
- Span nonempty_; // Dummy header for list of non-empty spans
- size_t counter_; // Number of free objects in cache entry
-
- // Here we reserve space for TCEntry cache slots. Since one size class can
- // end up getting all the TCEntries quota in the system we just preallocate
- // sufficient number of entries here.
- TCEntry tc_slots_[kNumTransferEntries];
-
- // Number of currently used cached entries in tc_slots_. This variable is
- // updated under a lock but can be read without one.
- int32_t used_slots_;
- // The current number of slots for this size class. This is an
- // adaptive value that is increased if there is lots of traffic
- // on a given size class.
- int32_t cache_size_;
-};
-
-// Pad each CentralCache object to multiple of 64 bytes
-class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
- private:
- char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
-};
-
-//-------------------------------------------------------------------
-// Global variables
-//-------------------------------------------------------------------
-
-// Central cache -- a collection of free-lists, one per size-class.
-// We have a separate lock per free-list to reduce contention.
-static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
-
-// Page-level allocator
-static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
-static char pageheap_memory[sizeof(TCMalloc_PageHeap)];
-static bool phinited = false;
-
-// Avoid extra level of indirection by making "pageheap" be just an alias
-// of pageheap_memory.
-#define pageheap ((TCMalloc_PageHeap*) pageheap_memory)
-
-// Thread-specific key. Initialization here is somewhat tricky
-// because some Linux startup code invokes malloc() before it
-// is in a good enough state to handle pthread_keycreate().
-// Therefore, we use TSD keys only after tsd_inited is set to true.
-// Until then, we use a slow path to get the heap object.
-static bool tsd_inited = false;
-static pthread_key_t heap_key;
-
-// Allocator for thread heaps
-static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
-
-// Linked list of heap objects. Protected by pageheap_lock.
-static TCMalloc_ThreadCache* thread_heaps = NULL;
-static int thread_heap_count = 0;
-
-// Overall thread cache size. Protected by pageheap_lock.
-static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
-
-// Global per-thread cache size. Writes are protected by
-// pageheap_lock. Reads are done without any locking, which should be
-// fine as long as size_t can be written atomically and we don't place
-// invariants between this variable and other pieces of state.
-static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
-
-//-------------------------------------------------------------------
-// Central cache implementation
-//-------------------------------------------------------------------
-
-void TCMalloc_Central_FreeList::Init(size_t cl) {
- lock_.Init();
- size_class_ = cl;
- DLL_Init(&empty_);
- DLL_Init(&nonempty_);
- counter_ = 0;
-
- cache_size_ = 1;
- used_slots_ = 0;
- ASSERT(cache_size_ <= kNumTransferEntries);
-}
-
-void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
- while (start) {
- void *next = SLL_Next(start);
- ReleaseToSpans(start);
- start = next;
- }
-}
-
-void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
- const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
- ASSERT(span != NULL);
- ASSERT(span->refcount > 0);
-
- // If span is empty, move it to non-empty list
- if (span->objects == NULL) {
- DLL_Remove(span);
- DLL_Prepend(&nonempty_, span);
- Event(span, 'N', 0);
- }
-
- // The following check is expensive, so it is disabled by default
- if (false) {
- // Check that object does not occur in list
- int got = 0;
- for (void* p = span->objects; p != NULL; p = *((void**) p)) {
- ASSERT(p != object);
- got++;
- }
- ASSERT(got + span->refcount ==
- (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
- }
-
- counter_++;
- span->refcount--;
- if (span->refcount == 0) {
- Event(span, '#', 0);
- counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
- DLL_Remove(span);
-
- // Release central list lock while operating on pageheap
- lock_.Unlock();
- {
- SpinLockHolder h(&pageheap_lock);
- pageheap->Delete(span);
- }
- lock_.Lock();
- } else {
- *(reinterpret_cast<void**>(object)) = span->objects;
- span->objects = object;
- }
-}
-
-bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
- size_t locked_size_class, bool force) {
- static unsigned int race_counter = 0;
- unsigned int t = race_counter++; // Updated without a lock, but who cares.
- if (t >= kNumClasses) {
- while (t >= kNumClasses) {
- t -= kNumClasses;
- }
- race_counter = t;
- }
- ASSERT(t >= 0);
- ASSERT(t < kNumClasses);
- if (t == locked_size_class) return false;
- return central_cache[t].ShrinkCache(locked_size_class, force);
-}
-
-bool TCMalloc_Central_FreeList::MakeCacheSpace() {
- // Is there room in the cache?
- if (used_slots_ < cache_size_) return true;
- // Check if we can expand this cache?
- if (cache_size_ == kNumTransferEntries) return false;
- // Ok, we'll try to grab an entry from some other size class.
- if (EvictRandomSizeClass(size_class_, false) ||
- EvictRandomSizeClass(size_class_, true)) {
- // Succeeded in evicting, we're going to make our cache larger.
- cache_size_++;
- return true;
- }
- return false;
-}
-
-
-namespace {
-class LockInverter {
- private:
- TCMalloc_SpinLock *held_, *temp_;
- public:
- inline explicit LockInverter(TCMalloc_SpinLock* held, TCMalloc_SpinLock *temp)
- : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
- inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
-};
-}
-
-bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
- // Start with a quick check without taking a lock.
- if (cache_size_ == 0) return false;
- // We don't evict from a full cache unless we are 'forcing'.
- if (force == false && used_slots_ == cache_size_) return false;
-
- // Grab lock, but first release the other lock held by this thread. We use
- // the lock inverter to ensure that we never hold two size class locks
- // concurrently. That can create a deadlock because there is no well
- // defined nesting order.
- LockInverter li(&central_cache[locked_size_class].lock_, &lock_);
- ASSERT(used_slots_ <= cache_size_);
- ASSERT(0 <= cache_size_);
- if (cache_size_ == 0) return false;
- if (used_slots_ == cache_size_) {
- if (force == false) return false;
- // ReleaseListToSpans releases the lock, so we have to make all the
- // updates to the central list before calling it.
- cache_size_--;
- used_slots_--;
- ReleaseListToSpans(tc_slots_[used_slots_].head);
- return true;
- }
- cache_size_--;
- return true;
-}
-
-void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
- SpinLockHolder h(&lock_);
- if (N == num_objects_to_move[size_class_] &&
- MakeCacheSpace()) {
- int slot = used_slots_++;
- ASSERT(slot >=0);
- ASSERT(slot < kNumTransferEntries);
- TCEntry *entry = &tc_slots_[slot];
- entry->head = start;
- entry->tail = end;
- return;
- }
- ReleaseListToSpans(start);
-}
-
-void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
- int num = *N;
- ASSERT(num > 0);
-
- SpinLockHolder h(&lock_);
- if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
- int slot = --used_slots_;
- ASSERT(slot >= 0);
- TCEntry *entry = &tc_slots_[slot];
- *start = entry->head;
- *end = entry->tail;
- return;
- }
-
- // TODO: Prefetch multiple TCEntries?
- void *tail = FetchFromSpansSafe();
- if (!tail) {
- // We are completely out of memory.
- *start = *end = NULL;
- *N = 0;
- return;
- }
-
- SLL_SetNext(tail, NULL);
- void *head = tail;
- int count = 1;
- while (count < num) {
- void *t = FetchFromSpans();
- if (!t) break;
- SLL_Push(&head, t);
- count++;
- }
- *start = head;
- *end = tail;
- *N = count;
-}
-
-
-void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
- void *t = FetchFromSpans();
- if (!t) {
- Populate();
- t = FetchFromSpans();
- }
- return t;
-}
-
-void* TCMalloc_Central_FreeList::FetchFromSpans() {
- if (DLL_IsEmpty(&nonempty_)) return NULL;
- Span* span = nonempty_.next;
-
- ASSERT(span->objects != NULL);
- span->refcount++;
- void* result = span->objects;
- span->objects = *(reinterpret_cast<void**>(result));
- if (span->objects == NULL) {
- // Move to empty list
- DLL_Remove(span);
- DLL_Prepend(&empty_, span);
- Event(span, 'E', 0);
- }
- counter_--;
- return result;
-}
-
-// Fetch memory from the system and add to the central cache freelist.
-void TCMalloc_Central_FreeList::Populate() {
- // Release central list lock while operating on pageheap
- lock_.Unlock();
- const size_t npages = class_to_pages[size_class_];
-
- Span* span;
- {
- SpinLockHolder h(&pageheap_lock);
- span = pageheap->New(npages);
- if (span) pageheap->RegisterSizeClass(span, size_class_);
- }
- if (span == NULL) {
- MESSAGE("allocation failed: %d\n", errno);
- lock_.Lock();
- return;
- }
-
- // Split the block into pieces and add to the free-list
- // TODO: coloring of objects to avoid cache conflicts?
- void** tail = &span->objects;
- char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
- char* limit = ptr + (npages << kPageShift);
- const size_t size = ByteSizeForClass(size_class_);
- int num = 0;
- while (ptr + size <= limit) {
- *tail = ptr;
- tail = reinterpret_cast<void**>(ptr);
- ptr += size;
- num++;
- }
- ASSERT(ptr <= limit);
- *tail = NULL;
- span->refcount = 0; // No sub-object in use yet
-
- // Add span to list of non-empty spans
- lock_.Lock();
- DLL_Prepend(&nonempty_, span);
- counter_ += num;
-}
-
-//-------------------------------------------------------------------
-// TCMalloc_ThreadCache implementation
-//-------------------------------------------------------------------
-
-inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
- if (bytes_until_sample_ < k) {
- PickNextSample();
- return true;
- } else {
- bytes_until_sample_ -= k;
- return false;
- }
-}
-
-void TCMalloc_ThreadCache::Init(pthread_t tid) {
- size_ = 0;
- next_ = NULL;
- prev_ = NULL;
- tid_ = tid;
- in_setspecific_ = false;
- for (size_t cl = 0; cl < kNumClasses; ++cl) {
- list_[cl].Init();
- }
-
- // Initialize RNG -- run it for a bit to get to good values
- rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
- for (int i = 0; i < 100; i++) {
- PickNextSample();
- }
-}
-
-void TCMalloc_ThreadCache::Cleanup() {
- // Put unused memory back into central cache
- for (unsigned int cl = 0; cl < kNumClasses; ++cl) {
- if (list_[cl].length() > 0) {
- ReleaseToCentralCache(cl, list_[cl].length());
- }
- }
-}
-
-inline void* TCMalloc_ThreadCache::Allocate(size_t size) {
- ASSERT(size <= kMaxSize);
- const size_t cl = SizeClass(size);
- FreeList* list = &list_[cl];
- if (list->empty()) {
- FetchFromCentralCache(cl);
- if (list->empty()) return NULL;
- }
- size_ -= ByteSizeForClass(cl);
- return list->Pop();
-}
-
-inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
- size_ += ByteSizeForClass(cl);
- FreeList* list = &list_[cl];
- list->Push(ptr);
- // If enough data is free, put back into central cache
- if (list->length() > kMaxFreeListLength) {
- ReleaseToCentralCache(cl, num_objects_to_move[cl]);
- }
- if (size_ >= per_thread_cache_size) Scavenge();
-}
-
-// Remove some objects of class "cl" from central cache and add to thread heap
-void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl) {
- int fetch_count = num_objects_to_move[cl];
- void *start, *end;
- central_cache[cl].RemoveRange(&start, &end, &fetch_count);
- list_[cl].PushRange(fetch_count, start, end);
- size_ += ByteSizeForClass(cl) * fetch_count;
-}
-
-// Remove some objects of class "cl" from thread heap and add to central cache
-void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
- ASSERT(N > 0);
- FreeList* src = &list_[cl];
- if (N > src->length()) N = src->length();
- size_ -= N*ByteSizeForClass(cl);
-
- // We return prepackaged chains of the correct size to the central cache.
- // TODO: Use the same format internally in the thread caches?
- int batch_size = num_objects_to_move[cl];
- while (N > batch_size) {
- void *tail, *head;
- src->PopRange(batch_size, &head, &tail);
- central_cache[cl].InsertRange(head, tail, batch_size);
- N -= batch_size;
- }
- void *tail, *head;
- src->PopRange(N, &head, &tail);
- central_cache[cl].InsertRange(head, tail, N);
-}
-
-// Release idle memory to the central cache
-void TCMalloc_ThreadCache::Scavenge() {
- // If the low-water mark for the free list is L, it means we would
- // not have had to allocate anything from the central cache even if
- // we had reduced the free list size by L. We aim to get closer to
- // that situation by dropping L/2 nodes from the free list. This
- // may not release much memory, but if so we will call scavenge again
- // pretty soon and the low-water marks will be high on that call.
- //int64 start = CycleClock::Now();
-
- for (unsigned int cl = 0; cl < kNumClasses; cl++) {
- FreeList* list = &list_[cl];
- const int lowmark = list->lowwatermark();
- if (lowmark > 0) {
- const int drop = (lowmark > 1) ? lowmark/2 : 1;
- ReleaseToCentralCache(cl, drop);
- }
- list->clear_lowwatermark();
- }
-
- //int64 finish = CycleClock::Now();
- //CycleTimer ct;
- //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
-}
-
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
- void* ptr = NULL;
- if (!tsd_inited) {
- InitModule();
- } else {
- ptr = GetHeap(heap_key);
- }
- if (ptr == NULL) ptr = CreateCacheIfNecessary();
- return reinterpret_cast<TCMalloc_ThreadCache*>(ptr);
-}
-
-// In deletion paths, we do not try to create a thread-cache. This is
-// because we may be in the thread destruction code and may have
-// already cleaned up the cache for this thread.
-inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
- if (!tsd_inited) return NULL;
- return reinterpret_cast<TCMalloc_ThreadCache*>
- (GetHeap(heap_key));
-}
-
-void TCMalloc_ThreadCache::PickNextSample() {
- // Make next "random" number
- // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
- static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
- uint32_t r = rnd_;
- rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
-
- // Next point is "rnd_ % (sample_period)". I.e., average
- // increment is "sample_period/2".
- const int flag_value = FLAGS_tcmalloc_sample_parameter;
- static int last_flag_value = -1;
-
- if (flag_value != last_flag_value) {
- SpinLockHolder h(&sample_period_lock);
- unsigned int i;
- for (i = 0; i < (sizeof(primes_list)/sizeof(primes_list[0]) - 1); i++) {
- if (primes_list[i] >= flag_value) {
- break;
- }
- }
- sample_period = primes_list[i];
- last_flag_value = flag_value;
- }
- bytes_until_sample_ = rnd_ % sample_period;
-}
-
-void TCMalloc_ThreadCache::InitModule() {
- // There is a slight potential race here because of double-checked
- // locking idiom. However, as long as the program does a small
- // allocation before switching to multi-threaded mode, we will be
- // fine. We increase the chances of doing such a small allocation
- // by doing one in the constructor of the module_enter_exit_hook
- // object declared below.
- SpinLockHolder h(&pageheap_lock);
- if (!phinited) {
- InitSizeClasses();
- threadheap_allocator.Init();
- span_allocator.Init();
- span_allocator.New(); // Reduce cache conflicts
- span_allocator.New(); // Reduce cache conflicts
- stacktrace_allocator.Init();
- DLL_Init(&sampled_objects);
- for (unsigned int i = 0; i < kNumClasses; ++i) {
- central_cache[i].Init(i);
- }
- new ((void*)pageheap_memory) TCMalloc_PageHeap;
- phinited = 1;
- }
-}
-
-void TCMalloc_ThreadCache::InitTSD() {
- ASSERT(!tsd_inited);
- pthread_key_create(&heap_key, DeleteCache);
- tsd_inited = true;
-
- // We may have used a fake pthread_t for the main thread. Fix it.
- pthread_t zero;
- memset(&zero, 0, sizeof(zero));
- SpinLockHolder h(&pageheap_lock);
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- if (h->tid_ == zero) {
- h->tid_ = pthread_self();
- }
- }
-}
-
-void* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
- // Initialize per-thread data if necessary
- TCMalloc_ThreadCache* heap = NULL;
- {
- SpinLockHolder h(&pageheap_lock);
-
- // Early on in glibc's life, we cannot even call pthread_self()
- pthread_t me;
- if (!tsd_inited) {
- memset(&me, 0, sizeof(me));
- } else {
- me = pthread_self();
- }
-
- // This may be a recursive malloc call from pthread_setspecific()
- // In that case, the heap for this thread has already been created
- // and added to the linked list. So we search for that first.
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- if (h->tid_ == me) {
- heap = h;
- break;
- }
- }
-
- if (heap == NULL) {
- // Create the heap and add it to the linked list
- heap = threadheap_allocator.New();
- heap->Init(me);
- heap->next_ = thread_heaps;
- heap->prev_ = NULL;
- if (thread_heaps != NULL) thread_heaps->prev_ = heap;
- thread_heaps = heap;
- thread_heap_count++;
- RecomputeThreadCacheSize();
- }
- }
-
- // We call pthread_setspecific() outside the lock because it may
- // call malloc() recursively. We check for the recursive call using
- // the "in_setspecific_" flag so that we can avoid calling
- // pthread_setspecific() if we are already inside pthread_setspecific().
- if (!heap->in_setspecific_ && tsd_inited) {
- heap->in_setspecific_ = true;
- SetHeap(heap_key, heap);
- heap->in_setspecific_ = false;
- }
- return heap;
-}
-
-void TCMalloc_ThreadCache::DeleteCache(void* ptr) {
-#if defined(Y_HAVE_FAST_POD_TLS)
- my_heap = 0;
-#endif
-
- // Remove all memory from heap
- TCMalloc_ThreadCache* heap;
- heap = reinterpret_cast<TCMalloc_ThreadCache*>(ptr);
- heap->Cleanup();
-
- // Remove from linked list
- SpinLockHolder h(&pageheap_lock);
- if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
- if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
- if (thread_heaps == heap) thread_heaps = heap->next_;
- thread_heap_count--;
- RecomputeThreadCacheSize();
-
- threadheap_allocator.Delete(heap);
-}
-
-void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
- // Divide available space across threads
- int n = thread_heap_count > 0 ? thread_heap_count : 1;
- size_t space = overall_thread_cache_size / n;
-
- // Limit to allowed range
- if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
- if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
-
- per_thread_cache_size = space;
-}
-
-void TCMalloc_ThreadCache::Print() const {
- for (unsigned int cl = 0; cl < kNumClasses; ++cl) {
- MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
- ByteSizeForClass(cl),
- list_[cl].length(),
- list_[cl].lowwatermark());
- }
-}
-
-// Extract interesting stats
-struct TCMallocStats {
- uint64_t system_bytes; // Bytes alloced from system
- uint64_t thread_bytes; // Bytes in thread caches
- uint64_t central_bytes; // Bytes in central cache
- uint64_t transfer_bytes; // Bytes in central transfer cache
- uint64_t pageheap_bytes; // Bytes in page heap
- uint64_t metadata_bytes; // Bytes alloced for metadata
-};
-
-// Get stats into "r". Also get per-size-class counts if class_count != NULL
-static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
- r->central_bytes = 0;
- r->transfer_bytes = 0;
- for (unsigned int cl = 0; cl < kNumClasses; ++cl) {
- const int length = central_cache[cl].length();
- const int tc_length = central_cache[cl].tc_length();
- r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
- r->transfer_bytes +=
- static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
- if (class_count) class_count[cl] = length + tc_length;
- }
-
- // Add stats from per-thread heaps
- r->thread_bytes = 0;
- { // scope
- SpinLockHolder h(&pageheap_lock);
- for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
- r->thread_bytes += h->Size();
- if (class_count) {
- for (unsigned int cl = 0; cl < kNumClasses; ++cl) {
- class_count[cl] += h->freelist_length(cl);
- }
- }
- }
- }
-
- { //scope
- SpinLockHolder h(&pageheap_lock);
- r->system_bytes = pageheap->SystemBytes();
- r->metadata_bytes = metadata_system_bytes;
- r->pageheap_bytes = pageheap->FreeBytes();
- }
-}
-
-// WRITE stats to "out"
-static void DumpStats(TCMalloc_Printer* out, int level) {
- TCMallocStats stats;
- uint64_t class_count[kNumClasses];
- ExtractStats(&stats, (level >= 2 ? class_count : NULL));
-
- if (level >= 2) {
- out->printf("------------------------------------------------\n");
- uint64_t cumulative = 0;
- for (unsigned int cl = 0; cl < kNumClasses; ++cl) {
- if (class_count[cl] > 0) {
- uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
- cumulative += class_bytes;
- out->printf("class %3u [ %8" PRIuS " bytes ] : "
- "%8" LLU " objs; %5.1f MB; %5.1f cum MB\n",
- cl, ByteSizeForClass(cl),
- class_count[cl],
- class_bytes / 1048576.0,
- cumulative / 1048576.0);
- }
- }
-
- SpinLockHolder h(&pageheap_lock);
- pageheap->Dump(out);
- }
-
- const uint64_t bytes_in_use = stats.system_bytes
- - stats.pageheap_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.thread_bytes;
-
- out->printf("------------------------------------------------\n"
- "MALLOC: %12" LLU " Heap size\n"
- "MALLOC: %12" LLU " Bytes in use by application\n"
- "MALLOC: %12" LLU " Bytes free in page heap\n"
- "MALLOC: %12" LLU " Bytes free in central cache\n"
- "MALLOC: %12" LLU " Bytes free in transfer cache\n"
- "MALLOC: %12" LLU " Bytes free in thread caches\n"
- "MALLOC: %12" LLU " Spans in use\n"
- "MALLOC: %12" LLU " Thread heaps in use\n"
- "MALLOC: %12" LLU " Metadata allocated\n"
- "------------------------------------------------\n",
- stats.system_bytes,
- bytes_in_use,
- stats.pageheap_bytes,
- stats.central_bytes,
- stats.transfer_bytes,
- stats.thread_bytes,
- uint64_t(span_allocator.inuse()),
- uint64_t(threadheap_allocator.inuse()),
- stats.metadata_bytes);
-}
-
-static void PrintStats(int level) {
- const int kBufferSize = 16 << 10;
- char* buffer = new char[kBufferSize];
- TCMalloc_Printer printer(buffer, kBufferSize);
- DumpStats(&printer, level);
- write(STDERR_FILENO, buffer, strlen(buffer));
- delete[] buffer;
-}
-
-static void** DumpStackTraces() {
- // Count how much space we need
- int needed_slots = 0;
- {
- SpinLockHolder h(&pageheap_lock);
- for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
- StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
- needed_slots += 3 + stack->depth;
- }
- needed_slots += 100; // Slop in case sample grows
- needed_slots += needed_slots/8; // An extra 12.5% slop
- }
-
- void** result = new void*[needed_slots];
- if (result == NULL) {
- MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
- needed_slots);
- return NULL;
- }
-
- SpinLockHolder h(&pageheap_lock);
- int used_slots = 0;
- for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
- ASSERT(used_slots < needed_slots); // Need to leave room for terminator
- StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
- if (used_slots + 3 + stack->depth >= needed_slots) {
- // No more room
- break;
- }
-
- result[used_slots+0] = reinterpret_cast<void*>(1);
- result[used_slots+1] = reinterpret_cast<void*>(stack->size);
- result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
- for (int d = 0; d < stack->depth; d++) {
- result[used_slots+3+d] = stack->stack[d];
- }
- used_slots += 3 + stack->depth;
- }
- result[used_slots] = reinterpret_cast<void*>(0);
- return result;
-}
-
-static void** DumpHeapGrowthStackTraces() {
- // Count how much space we need
- int needed_slots = 0;
- {
- SpinLockHolder h(&pageheap_lock);
- for (StackTrace* t = growth_stacks;
- t != NULL;
- t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth-1])) {
- needed_slots += 3 + t->depth;
- }
- needed_slots += 100; // Slop in case list grows
- needed_slots += needed_slots/8; // An extra 12.5% slop
- }
-
- void** result = new void*[needed_slots];
- if (result == NULL) {
- MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
- needed_slots);
- return NULL;
- }
-
- SpinLockHolder h(&pageheap_lock);
- int used_slots = 0;
- for (StackTrace* t = growth_stacks;
- t != NULL;
- t = reinterpret_cast<StackTrace*>(t->stack[kMaxStackDepth-1])) {
- ASSERT(used_slots < needed_slots); // Need to leave room for terminator
- if (used_slots + 3 + t->depth >= needed_slots) {
- // No more room
- break;
- }
-
- result[used_slots+0] = reinterpret_cast<void*>(1);
- result[used_slots+1] = reinterpret_cast<void*>(t->size);
- result[used_slots+2] = reinterpret_cast<void*>(t->depth);
- for (int d = 0; d < t->depth; d++) {
- result[used_slots+3+d] = t->stack[d];
- }
- used_slots += 3 + t->depth;
- }
- result[used_slots] = reinterpret_cast<void*>(0);
- return result;
-}
-
-// TCMalloc's support for extra malloc interfaces
-class TCMallocImplementation : public MallocExtension {
- public:
- virtual void GetStats(char* buffer, int buffer_length) {
- ASSERT(buffer_length > 0);
- TCMalloc_Printer printer(buffer, buffer_length);
-
- // Print level one stats unless lots of space is available
- if (buffer_length < 10000) {
- DumpStats(&printer, 1);
- } else {
- DumpStats(&printer, 2);
- }
- }
-
- virtual void** ReadStackTraces() {
- return DumpStackTraces();
- }
-
- virtual void** ReadHeapGrowthStackTraces() {
- return DumpHeapGrowthStackTraces();
- }
-
- virtual bool GetNumericProperty(const char* name, size_t* value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "generic.current_allocated_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.pageheap_bytes;
- return true;
- }
-
- if (strcmp(name, "generic.heap_size") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.system_bytes;
- return true;
- }
-
- if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
- // We assume that bytes in the page heap are not fragmented too
- // badly, and are therefore available for allocation.
- SpinLockHolder l(&pageheap_lock);
- *value = pageheap->FreeBytes();
- return true;
- }
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- SpinLockHolder l(&pageheap_lock);
- *value = overall_thread_cache_size;
- return true;
- }
-
- if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
- *value = stats.thread_bytes;
- return true;
- }
-
- return false;
- }
-
- virtual bool SetNumericProperty(const char* name, size_t value) {
- ASSERT(name != NULL);
-
- if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
- // Clip the value to a reasonable range
- if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
- if (value > (1<<30)) value = (1<<30); // Limit to 1GB
-
- SpinLockHolder l(&pageheap_lock);
- overall_thread_cache_size = static_cast<size_t>(value);
- TCMalloc_ThreadCache::RecomputeThreadCacheSize();
- return true;
- }
-
- return false;
- }
-};
-
-//-------------------------------------------------------------------
-// Helpers for the exported routines below
-//-------------------------------------------------------------------
-
-static Span* DoSampledAllocation(size_t size) {
- SpinLockHolder h(&pageheap_lock);
-
- // Allocate span
- Span* span = pageheap->New(pages(size == 0 ? 1 : size));
- if (span == NULL) {
- return NULL;
- }
-
- // Allocate stack trace
- StackTrace* stack = stacktrace_allocator.New();
- if (stack == NULL) {
- // Sampling failed because of lack of memory
- return span;
- }
-
- // Fill stack trace and record properly
- stack->depth = GetStackTrace(stack->stack, kMaxStackDepth, 1);
- stack->size = size;
- span->sample = 1;
- span->objects = stack;
- DLL_Prepend(&sampled_objects, span);
-
- return span;
-}
-
-static inline void* do_malloc(size_t size) {
- void* ret = NULL;
-
- if (TCMallocDebug::level >= TCMallocDebug::kVerbose) {
- MESSAGE("In tcmalloc do_malloc(%" PRIuS")\n", size);
- }
- // The following call forces module initialization
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
- if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
- Span* span = DoSampledAllocation(size);
- if (span != NULL) {
- ret = reinterpret_cast<void*>(span->start << kPageShift);
- }
- } else if (size > kMaxSize) {
- // Use page-level allocator
- SpinLockHolder h(&pageheap_lock);
- Span* span = pageheap->New(pages(size));
- if (span != NULL) {
- ret = reinterpret_cast<void*>(span->start << kPageShift);
- }
- } else {
- ret = heap->Allocate(size);
- }
- if (ret == NULL) errno = ENOMEM;
- return ret;
-}
-
-static inline void do_free(void* ptr) {
- if (TCMallocDebug::level >= TCMallocDebug::kVerbose)
- MESSAGE("In tcmalloc do_free(%p)\n", ptr);
- if (ptr == NULL) return;
- ASSERT(pageheap != NULL); // Should not call free() before malloc()
- const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
-
- ASSERT(span != NULL);
- ASSERT(!span->free);
- const size_t cl = span->sizeclass;
- if (cl != 0) {
- ASSERT(!span->sample);
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
- if (heap != NULL) {
- heap->Deallocate(ptr, cl);
- } else {
- // Delete directly into central cache
- SLL_SetNext(ptr, NULL);
- central_cache[cl].InsertRange(ptr, ptr, 1);
- }
- } else {
- SpinLockHolder h(&pageheap_lock);
- ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
- ASSERT(span->start == p);
- if (span->sample) {
- DLL_Remove(span);
- stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
- span->objects = NULL;
- }
- pageheap->Delete(span);
- }
-}
-
-// For use by exported routines below that want specific alignments
-//
-// Note: this code can be slow, and can significantly fragment memory.
-// The expectation is that memalign/posix_memalign/valloc/pvalloc will
-// not be invoked very often. This requirement simplifies our
-// implementation and allows us to tune for expected allocation
-// patterns.
-static void* do_memalign(size_t align, size_t size) {
- ASSERT((align & (align - 1)) == 0);
- ASSERT(align > 0);
- if (size + align < size) return NULL; // Overflow
-
- if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
-
- // Allocate at least one byte to avoid boundary conditions below
- if (size == 0) size = 1;
-
- if (size <= kMaxSize && align < kPageSize) {
- // Search through acceptable size classes looking for one with
- // enough alignment. This depends on the fact that
- // InitSizeClasses() currently produces several size classes that
- // are aligned at powers of two. We will waste time and space if
- // we miss in the size class array, but that is deemed acceptable
- // since memalign() should be used rarely.
- unsigned int cl = SizeClass(size);
- while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
- cl++;
- }
- if (cl < kNumClasses) {
- TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
- return heap->Allocate(class_to_size[cl]);
- }
- }
-
- // We will allocate directly from the page heap
- SpinLockHolder h(&pageheap_lock);
-
- if (align <= kPageSize) {
- // Any page-level allocation will be fine
- // TODO: We could put the rest of this page in the appropriate
- // TODO: cache but it does not seem worth it.
- Span* span = pageheap->New(pages(size));
- if (span == NULL) return NULL;
- return reinterpret_cast<void*>(span->start << kPageShift);
- }
-
- // Allocate extra pages and carve off an aligned portion
- const int alloc = pages(size + align);
- Span* span = pageheap->New(alloc);
- if (span == NULL) return NULL;
-
- // Skip starting portion so that we end up aligned
- int skip = 0;
- while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
- skip++;
- }
- ASSERT(skip < alloc);
- if (skip > 0) {
- Span* rest = pageheap->Split(span, skip);
- pageheap->Delete(span);
- span = rest;
- }
-
- // Skip trailing portion that we do not need to return
- const unsigned int needed = pages(size);
- ASSERT(span->length >= needed);
- if (span->length > needed) {
- Span* trailer = pageheap->Split(span, needed);
- pageheap->Delete(trailer);
- }
- return reinterpret_cast<void*>(span->start << kPageShift);
-}
-
-
-
-// The constructor allocates an object to ensure that initialization
-// runs before main(), and therefore we do not have a chance to become
-// multi-threaded before initialization. We also create the TSD key
-// here. Presumably by the time this constructor runs, glibc is in
-// good enough shape to handle pthread_key_create().
-//
-// The constructor also takes the opportunity to tell STL to use
-// tcmalloc. We want to do this early, before construct time, so
-// all user STL allocations go through tcmalloc (which works really
-// well for STL).
-//
-// The destructor prints stats when the program exits.
-
-class TCMallocGuard {
- public:
- TCMallocGuard() {
- char *envval;
- if ((envval = getenv("TCMALLOC_DEBUG"))) {
- TCMallocDebug::level = atoi(envval);
- MESSAGE("Set tcmalloc debugging level to %d\n", TCMallocDebug::level);
- }
- do_free(do_malloc(1));
- TCMalloc_ThreadCache::InitTSD();
- do_free(do_malloc(1));
- MallocExtension::Register(new TCMallocImplementation);
- }
-
- ~TCMallocGuard() {
- const char* env = getenv("MALLOCSTATS");
- if (env != NULL) {
- int level = atoi(env);
- if (level < 1) level = 1;
- PrintStats(level);
- }
- }
-};
-
-static TCMallocGuard module_enter_exit_hook;
-
-
-//-------------------------------------------------------------------
-// Exported routines
-//-------------------------------------------------------------------
-
-// CAVEAT: The code structure below ensures that MallocHook methods are always
-// called from the stack frame of the invoked allocation function.
-// heap-checker.cc depends on this to start a stack trace from
-// the call to the (de)allocation function.
-
-extern "C" void* malloc(size_t size) {
- void* result = do_malloc(size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void free(void* ptr) {
- MallocHook::InvokeDeleteHook(ptr);
- do_free(ptr);
-}
-
-extern "C" void* calloc(size_t n, size_t elem_size) {
- // Overflow check
- const size_t size = n * elem_size;
- if (elem_size != 0 && size / elem_size != n) return NULL;
-
- void* result = do_malloc(size);
- if (result != NULL) {
- memset(result, 0, size);
- }
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void cfree(void* ptr) {
- MallocHook::InvokeDeleteHook(ptr);
- do_free(ptr);
-}
-
-extern "C" void* realloc(void* old_ptr, size_t new_size) {
- if (old_ptr == NULL) {
- void* result = do_malloc(new_size);
- MallocHook::InvokeNewHook(result, new_size);
- return result;
- }
- if (new_size == 0) {
- MallocHook::InvokeDeleteHook(old_ptr);
- do_free(old_ptr);
- return NULL;
- }
-
- // Get the size of the old entry
- const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
- Span* span = pageheap->GetDescriptor(p);
- size_t old_size;
- if (span->sizeclass != 0) {
- old_size = ByteSizeForClass(span->sizeclass);
- } else {
- old_size = span->length << kPageShift;
- }
-
- // Reallocate if the new size is larger than the old size,
- // or if the new size is significantly smaller than the old size.
- if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
- // Need to reallocate
- void* new_ptr = do_malloc(new_size);
- if (new_ptr == NULL) {
- return NULL;
- }
- MallocHook::InvokeNewHook(new_ptr, new_size);
- memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
- MallocHook::InvokeDeleteHook(old_ptr);
- do_free(old_ptr);
- return new_ptr;
- } else {
- return old_ptr;
- }
-}
-
-/*
- * TODO
- */
-#if defined(USE_INTELCC) || defined(_darwin_) || defined(_freebsd_) || defined(_STLPORT_VERSION)
- #define OP_THROWNOTHING noexcept
- #define OP_THROWBADALLOC
-#else
- #define OP_THROWNOTHING
- #define OP_THROWBADALLOC
-#endif
-
-static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
-
-static inline void* cpp_alloc(size_t size, bool nothrow) {
- for (;;) {
- void* p = do_malloc(size);
-#ifdef PREANSINEW
- MallocHook::InvokeNewHook(p, size);
- return p;
-#else
- if (p == NULL) { // allocation failed
- // Get the current new handler. NB: this function is not
- // thread-safe. We make a feeble stab at making it so here, but
- // this lock only protects against tcmalloc interfering with
- // itself, not with other libraries calling set_new_handler.
- std::new_handler nh;
- {
- SpinLockHolder h(&set_new_handler_lock);
- nh = std::set_new_handler(0);
- (void) std::set_new_handler(nh);
- }
- // If no new_handler is established, the allocation failed.
- if (!nh) {
- if (nothrow) return 0;
- throw std::bad_alloc();
- }
- // Otherwise, try the new_handler. If it returns, retry the
- // allocation. If it throws std::bad_alloc, fail the allocation.
- // if it throws something else, don't interfere.
- try {
- (*nh)();
- } catch (const std::bad_alloc&) {
- if (!nothrow) throw;
- MallocHook::InvokeNewHook(p, size);
- return p;
- }
- } else { // allocation success
- MallocHook::InvokeNewHook(p, size);
- return p;
- }
-#endif
- }
-}
-
-#if !defined(YMAKE)
-void* operator new(size_t size) OP_THROWBADALLOC {
- return cpp_alloc(size, false);
-}
-
-void* operator new(size_t size, const std::nothrow_t&) OP_THROWNOTHING {
- return cpp_alloc(size, true);
-}
-
-void operator delete(void* p) OP_THROWNOTHING {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-void operator delete(void* p, const std::nothrow_t&) OP_THROWNOTHING {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-void* operator new[](size_t size) OP_THROWBADALLOC {
- return cpp_alloc(size, false);
-}
-
-void* operator new[](size_t size, const std::nothrow_t&) OP_THROWNOTHING {
- return cpp_alloc(size, true);
-}
-
-void operator delete[](void* p) OP_THROWNOTHING {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-
-void operator delete[](void* p, const std::nothrow_t&) OP_THROWNOTHING {
- MallocHook::InvokeDeleteHook(p);
- do_free(p);
-}
-#endif
-
-extern "C" void* memalign(size_t align, size_t size) {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size) {
- if (((align % sizeof(void*)) != 0) ||
- ((align & (align - 1)) != 0) ||
- (align == 0)) {
- return EINVAL;
- }
-
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- if (result == NULL) {
- return ENOMEM;
- } else {
- *result_ptr = result;
- return 0;
- }
-}
-
-extern "C" void* valloc(size_t size) {
- // Allocate page-aligned object of length >= size bytes
- if (pagesize == 0) pagesize = getpagesize();
- void* result = do_memalign(pagesize, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void* pvalloc(size_t size) {
- // Round up size to a multiple of pagesize
- if (pagesize == 0) pagesize = getpagesize();
- size = (size + pagesize - 1) & ~(pagesize - 1);
- void* result = do_memalign(pagesize, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-extern "C" void malloc_stats(void) {
- PrintStats(1);
-}
-
-extern "C" int mallopt(int /*cmd*/, int /*value*/) {
- return 1; // Indicates error
-}
-
-#if defined(__GLIBC__)
-extern "C" struct mallinfo mallinfo(void) {
- TCMallocStats stats;
- ExtractStats(&stats, NULL);
-
- // Just some of the fields are filled in.
- struct mallinfo info;
- memset(&info, 0, sizeof(info));
-
- // Unfortunately, the struct contains "int" field, so some of the
- // size values will be truncated.
- info.arena = static_cast<int>(stats.system_bytes);
- info.fsmblks = static_cast<int>(stats.thread_bytes
- + stats.central_bytes
- + stats.transfer_bytes);
- info.fordblks = static_cast<int>(stats.pageheap_bytes);
- info.uordblks = static_cast<int>(stats.system_bytes
- - stats.thread_bytes
- - stats.central_bytes
- - stats.transfer_bytes
- - stats.pageheap_bytes);
-
- return info;
-}
-#endif
-
-#if defined(_darwin_)
-extern "C" struct mstats mstats()
-{
- TCMallocStats stats;
- ExtractStats( &stats, NULL );
-
- struct mstats info;
- memset( &info, 0, sizeof( info ) );
-
- return info;
-}
-#endif
-
-//-------------------------------------------------------------------
-// Some library routines on RedHat 9 allocate memory using malloc()
-// and free it using __libc_free() (or vice-versa). Since we provide
-// our own implementations of malloc/free, we need to make sure that
-// the __libc_XXX variants also point to the same implementations.
-//-------------------------------------------------------------------
-
-extern "C" {
-#if (defined(HAVE___ATTRIBUTE__) && !defined(_darwin_)) && 0
- // Potentially faster variants that use the gcc alias extension
-#define ALIAS(x) __attribute__ ((weak, alias (x)))
- void* __libc_malloc(size_t size) ALIAS("malloc");
- void __libc_free(void* ptr) ALIAS("free");
- void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
- void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
- void __libc_cfree(void* ptr) ALIAS("cfree");
- void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
- void* __libc_valloc(size_t size) ALIAS("valloc");
- void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
- int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
-#undef ALIAS
-#else
- // Portable wrappers
- void* __libc_malloc(size_t size) { return malloc(size); }
- void __libc_free(void* ptr) { free(ptr); }
- void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
- void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
- void __libc_cfree(void* ptr) { cfree(ptr); }
- void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
- void* __libc_valloc(size_t size) { return valloc(size); }
- void* __libc_pvalloc(size_t size) { return pvalloc(size); }
- int __posix_memalign(void** r, size_t a, size_t s) {
- return posix_memalign(r, a, s);
- }
-#endif
-}
-
-// Override __libc_memalign in libc on linux boxes specially.
-// They have a bug in libc that causes them to (very rarely) allocate
-// with __libc_memalign() yet deallocate with free() and the
-// definitions above don't catch it.
-// This function is an exception to the rule of calling MallocHook method
-// from the stack frame of the allocation function;
-// heap-checker handles this special case explicitly.
-static void *MemalignOverride(size_t align, size_t size, const void* /*caller*/) {
- void* result = do_memalign(align, size);
- MallocHook::InvokeNewHook(result, size);
- return result;
-}
-
-#if !defined(__MALLOC_HOOK_VOLATILE)
- #define __MALLOC_HOOK_VOLATILE
-#endif
-
-void *(* __MALLOC_HOOK_VOLATILE __memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
diff --git a/contrib/deprecated/galloc/ya.make b/contrib/deprecated/galloc/ya.make
deleted file mode 100644
index 8b823b1d0e..0000000000
--- a/contrib/deprecated/galloc/ya.make
+++ /dev/null
@@ -1,22 +0,0 @@
-LIBRARY()
-
-LICENSE(BSD-3-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-NO_UTIL()
-
-NO_COMPILER_WARNINGS()
-
-IF (OS_DARWIN)
- PEERDIR(
- contrib/libs/gperftools
- )
-ELSE()
- SRCS(
- galloc.cpp
- hack.cpp
- )
-ENDIF()
-
-END()
diff --git a/contrib/java/antlr/antlr4/antlr.jar b/contrib/java/antlr/antlr4/antlr.jar
deleted file mode 100644
index bb96df9510..0000000000
--- a/contrib/java/antlr/antlr4/antlr.jar
+++ /dev/null
Binary files differ
diff --git a/contrib/libs/antlr4_cpp_runtime/CHANGES.txt b/contrib/libs/antlr4_cpp_runtime/CHANGES.txt
deleted file mode 100644
index b2eef10540..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/CHANGES.txt
+++ /dev/null
@@ -1,582 +0,0 @@
-****************************************************************************
-As of ANTLR 4.2.1, March 25 2014, we are no longer updating this file. Instead,
-we are using the github release mechanism. For example, here is
-4.2.1 release notes:
-
-https://github.com/antlr/antlr4/releases/tag/4.2.1
-****************************************************************************
-
-ANTLR v4 Honey Badger
-
-January 15, 2014
-
-* Unit tests for lexer actions from yesterday.
-* Refactored TreeView so we can refresh tree externally w/o creating new one.
- Needed for intellij plugin.
-
-January 14, 2014
-
-* Updated serialized ATN representation of lexer actions, allowing the lexer
- interpreter to execute the majority of lexer commands (#408)
-
-January 12, 2014
-
-* Support executing precedence predicates during the SLL phase of
- adaptivePredict (#401). The result is a massive performance boost for grammars
- containing direct left-recursion (improvements of 5% to 1000+% have been
- observed, depending on the grammar and input).
-
-December 29, 2013
-
-* Internal change: Tool.loadGrammar() -> parseGrammar(). Tool.load()->parse()
-
-* Added Tool.loadGrammar(fileName) that completely parses, extracts implicit lexer,
- and processes into Grammar object. Does not geneate code. Use
- Grammar.getImplicitLexer() to get the lexer created during processing of
- combined grammar.
-
-* Added Grammar.load(fileName) that creates Tool object for you. loadGrammar()
- lets you create your own Tool for setting error handlers etc...
-
- final Grammar g = Grammar.load("/tmp/MyGrammar.g4");
-
-December 19, 2013
-
-* Sam:
- Improved documentation for tree patterns classes
- Refactored parts of the tree patterns API to simplify classes and improve encapsulation
- Move ATN serializer to runtime
- Use ATNDeserializer methods instead of ATNSimulator methods which are now deprecated
-
-* parrt: fix null pointer bug with rule "a : a;"
-
-November 24, 2013
-
-* Ter adds tree pattern matching. Preferred interface:
-
- ParseTree t = parser.expr();
- ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
- ParseTreeMatch m = p.match(t);
- String id = m.get("ID");
-
- or
-
- String xpath = "//blockStatement/*";
- String treePattern = "int <Identifier> = <expression>;";
- ParseTreePattern p =
- parser.compileParseTreePattern(treePattern,
- JavaParser.RULE_localVariableDeclarationStatement);
- List<ParseTreeMatch> matches = p.findAll(tree, xpath);
-
-November 20, 2013
-
-* Sam added method stuff like expr() that calls expr(0). Makes it possible
- to call expr rule from TestRig (grun).
-
-November 14, 2013
-
-* Added Sam's ParserInterpreter implementation that uses ATN after
- deserialization.
-
- LexerGrammar lg = new LexerGrammar(
- "lexer grammar L;\n" +
- "A : 'a' ;\n" +
- "B : 'b' ;\n" +
- "C : 'c' ;\n");
- Grammar g = new Grammar(
- "parser grammar T;\n" +
- "s : (A{;}|B)* C ;\n",
- lg);
-
- LexerInterpreter lexEngine = lg.createLexerInterpreter(new ANTLRInputStream(input));
- CommonTokenStream tokens = new CommonTokenStream(lexEngine);
- ParserInterpreter parser = g.createParserInterpreter(tokens);
- ParseTree t = parser.parse(g.rules.get(startRule).index);
-
-November 13, 2013
-
-* move getChildren() from Tree into Trees (to avoid breaking change)
-* Notation:
- /prog/func, -> all funcs under prog at root
- /prog/*, -> all children of prog at root
- /*/func, -> all func kids of any root node
- prog, -> prog must be root node
- /prog, -> prog must be root node
- /*, -> any root
- *, -> any root
- //ID, -> any ID in tree
- //expr/primary/ID, -> any ID child of a primary under any expr
- //body//ID, -> any ID under a body
- //'return', -> any 'return' literal in tree
- //primary/*, -> all kids of any primary
- //func/*/stat, -> all stat nodes grandkids of any func node
- /prog/func/'def', -> all def literal kids of func kid of prog
- //stat/';', -> all ';' under any stat node
- //expr/primary/!ID, -> anything but ID under primary under any expr node
- //expr/!primary, -> anything but primary under any expr node
- //!*, -> nothing anywhere
- /!*, -> nothing at root
-
-September 16, 2013
-
-* Updated build.xml to support v4 grammars in v4 itself; compiles XPathLexer.g4
-* Add to XPath:
- Collection<ParseTree> findAll(String xpath);
-
-September 11, 2013
-
-* Add ! operator to XPath
-* Use ANTLR v4 XPathLexer.g4 not regex
-* Copy lots of find node stuff from v3 GrammarAST to Trees class in runtime.
-
-September 10, 2013
-
-* Adding in XPath stuff.
-
-August 31, 2013
-
-* Lots of little fixes thanks to Coverity Scan
-
-August 7, 2013
-
-* [BREAKING CHANGE] Altered left-recursion elimination to be simpler. Now,
- we use the following patterns:
-
- * Binary expressions are expressions which contain a recursive invocation of
- the rule as the first and last element of the alternative.
-
- * Suffix expressions contain a recursive invocation of the rule as the first
- element of the alternative, but not as the last element.
-
- * Prefix expressions contain a recursive invocation of the rule as the last
- element of the alternative, but not as the first element.
-
-There is no such thing as a "ternary" expression--they are just binary
-expressions in disguise.
-
-The right associativity specifiers no longer on the individual tokens because
-it's done on alternative basis anyway. The option is now on the individual
-alternative; e.g.,
-
- e : e '*' e
- | e '+' e
- |<assoc=right> e '?' e ':' e
- |<assoc=right> e '=' e
- | INT
- ;
-
-If your language uses a right-associative ternary operator, you will need
-to update your grammar to include <assoc=right> on the alternative operator.
-
-This also fixes #245 and fixes #268:
-
-https://github.com/antlr/antlr4/issues/245
-https://github.com/antlr/antlr4/issues/268
-
-To smooth the transition, <assoc=right> is still allowed on token references
-but it is ignored.
-
-June 30, 2013 -- 4.1 release
-
-June 24, 2013
-
-* Resize ANTLRInputStream.data after reading a file with fewer characters than
- bytes
-* Fix ATN created for non-greedy optional block with multiple alternatives
-* Support Unicode escape sequences with indirection in JavaUnicodeInputStream
- (fixes #287)
-* Remove the ParserRuleContext.altNum field (fixes #288)
-* PredictionContext no longer implements Iterable<SingletonPredictionContext>
-* PredictionContext no longer implements Comparable<PredictionContext>
-* Add the EPSILON_CLOSURE error and EPSILON_OPTIONAL warning
-* Optimized usage of closureBusy set (fixes #282)
-
-June 9, 2013
-
-* Add regression test for #239 (already passes)
-
-June 8, 2013
-
-* Support list labels on a set of tokens (fixes #270)
-* Fix associativity of XOR in Java LR grammar (fixes #280)
-
-June 1, 2013
-
-* DiagnosticErrorListener includes rule names for each decision in its reports
-* Document ANTLRErrorListener and DiagnosticErrorListener (fixes #265)
-* Support '\uFFFF' (fixes #267)
-* Optimize serialized ATN
-
-May 26, 2013
-
-* Report errors that occur while lexing a grammar (fixes #262)
-* Improved error message for unterminated string literals (fixes #243)
-
-May 24, 2013
-
-* Significantly improve performance of JavaUnicodeInputStream.LA(1)
-
-May 20, 2013
-
-* Generate Javadoc for generated visitor and listener interfaces and classes
-* Fix unit tests
-
-May 18, 2013
-
-* Group terminals in Java grammars so ATN can collapse sets
-* Improved Java 7 support in Java grammars (numeric literals)
-* Updated error listener interfaces
-* Support detailed statistics in TestPerformance
-
-May 17, 2013
-
-* Add JavaUnicodeInputStream to handle Unicode escapes in Java code
-* Proper Unicode identifier handling in Java grammars
-* Report file names with lexer errors in TestPerformance
-
-May 14, 2013
-
-* Use a called rule stack to prevent stack overflow in LL1Analyzer
-* Use 0-based indexing for several arrays in the tool
-* Code simplification, assertions, documentation
-
-May 13, 2013
-
-* Unit test updates to ensure exceptions are not hidden
-
-May 12, 2013
-
-* Updates to TestPerformance
-
-May 5, 2013
-
-* Updated several classes to use MurmurHash 3 hashing
-
-May 1, 2013
-
-* Added parse tree JTree to TreeViewer (Bart Kiers)
-
-April 30, 2013
-
-* Updated TestPerformance to support parallelization across passes
-
-April 24, 2013
-
-* Remove unused stub class ParserATNPathFinder
-* Remove ParserInterpreter.predictATN
-* Remove DFA.getATNStatesAlongPath
-* Encapsulate implementation methods in LexerATNSimulator and ParserATNSimulator
-* Updated documentation
-* Simplify creation of new DFA edges
-* Fix handling of previously cached error edges
-* Fix DFA created during forced-SLL parsing (PredictionMode.SLL)
-* Extract methods ParserATNSimulator.getExistingTargetState and
- ParserATNSimulator.computeTargetState.
-
-April 22, 2013
-
-* Lazy initialization of ParserATNSimulator.mergeCache
-* Improved hash code for DFAState
-* Improved hash code with caching for ATNConfigSet
-* Add new configuration parameters to TestPerformance
-* Update Java LR and Java Std to support Java 7 syntax
-
-April 21, 2013
-
-* Add new configuration parameters to TestPerformance
-
-April 18, 2013
-
-* Must check rule transition follow states before eliminating states in
- the ATN (fixes #224)
-* Simplify ParserATNSimulator and improve performance by combining execDFA and
- execATN and using DFA edges even after edge computation is required
-
-April 15, 2013
-
-* Fix code in TestPerformance that clears the DFA
-
-April 12, 2013
-
-* Improved initialization and concurrency control in DFA updates
-* Fix EOF handling in edge case (fixes #218)
-
-April 4, 2013
-
-* Improved testing of error reporting
-* Fix NPE revealed by updated testing method
-* Strict handling of redefined rules - prevents code generation (fixes #210)
-* Updated documentation in Tool
-
-March 27, 2013
-
-* Avoid creating empty action methods in lexer (fixes #202)
-* Split serialized ATN when it exceeds Java's 65535 byte limit (fixes #76)
-* Fix incorrect reports of label type conflicts across separated labeled outer
- alternatives (fixes #195)
-* Update Maven plugin site documentation
-
-March 26, 2013
-
-* Fix bugs with the closureBusy set in ParserATNSimulator.closure
-* Fix handling of empty options{} block (fixes #194)
-* Add error 149 INVALID_LEXER_COMMAND (fixes #190)
-* Add error 150 MISSING_LEXER_COMMAND_ARGUMENT
-* Add error 151 UNWANTED_LEXER_COMMAND_ARGUMENT
-* Updated documentation in the Parser and RecognitionException classes
-* Refactored and extensively documented the ANTLRErrorStrategy interface and
- DefaultErrorStrategy default implementation
-* Track the number of syntax errors in Parser.notifyErrorListeners instead of in
- the error strategy
-* Move primary implementation of getExpectedTokens to ATN, fixes #191
-* Updated ATN documentation
-* Use UUID instead of incremented integer for serialized ATN versioning
-
-March 7, 2013
-
-* Added export to PNG feature to the parse tree viewer
-
-March 6, 2013
-
-* Allow direct calls to left-recursive rules (fixes #161)
-* Change error type 146 (EPSILON_TOKEN) to a warning (fixes #180)
-* Specify locale for all format operations (fixes #158)
-* Fix generation of invalid Unicode escape sequences in Java code (fixes #164)
-* Do not require escape for $ in action when not followed by an ID start char
- (fixes #176)
-
-February 23, 2013
-
-* Refactoring Target-related classes to improve support for additional language
- targets
-
-February 22, 2013
-
-* Do not allow raw newline characters in literals
-* Pair and Triple are immutable; Triple is not a Pair
-
-February 5, 2013
-
-* Fix IntervalSet.add when multiple merges are required (fixes #153)
-
-January 29, 2013
-
-* don't call process() if args aren't specified (Dave Parfitt)
-
-January 21, 2013 -- Release 4.0
-
-* Updated PredictionContext Javadocs
-* Updated Maven site documentation
-* Minor tweaks in Java.stg
-
-January 15, 2013
-
-* Tweak error messages
-* (Tool) Make TokenVocabParser fields `protected final`
-* Fix generated escape sequences for literals containing backslashes
-
-January 14, 2013
-
-* Relax parser in favor of errors during semantic analysis
-* Add error 145: lexer mode must contain at least one non-fragment rule
-* Add error 146: non-fragment lexer rule can match the empty string
-
-January 11, 2013
-
-* Updated error 72, 76; added 73-74 and 136-143: detailed errors about name
- conflicts
-* Report exact location for parameter/retval/local name conflicts
-* Add error 144: multi-character literals are not allowed in lexer sets
-* Error 134 now only applies to rule references in lexer sets
-* Updated error messages (cleanup)
-* Reduce size of _serializedATN by adding 2 to each element: new representation
- avoids embedded values 0 and 0xFFFF which are common and have multi-byte
- representations in Java's modified UTF-8
-
-January 10, 2013
-
-* Add error 135: cannot assign a value to list label: $label
- (fixes antlr/antlr4#128)
-
-January 2, 2013
-
-* Fix EOF handling (antlr/antlr4#110)
-* Remove TREE_PARSER reference
-* Additional validation checks in ATN deserialization
-* Fix potential NPE in parser predicate evaluation
-* Fix termination condition detection in full-context parsing
-
-January 1, 2013
-
-* Updated documentation
-* Minor code cleanup
-* Added the `-XdbgSTWait` command line option for the Tool
-* Removed method override since bug was fixed in V3 runtime
-
-December 31, 2012
-
-* I altered Target.getTargetStringLiteralFromANTLRStringLiteral() so that
- it converts \uXXXX in an ANTLR string to \\uXXXX, thus, avoiding Java's
- conversion to a single character before compilation.
-
-December 16, 2012
-
-* Encapsulate some fields in ANTLRMessage
-* Remove ErrorType.INVALID
-* Update error/warning messages, show all v3 compatibility messages
-
-December 12, 2012
-
-* Use arrays instead of HashSet to save memory in SemanticContext.AND/OR
-* Use arrays instead of HashSet to save memory in cached DFA
-* Reduce single-operand SemanticContext.and/or operations
-
-December 11, 2012
-
-* Add -long-messages option; only show exceptions with errors when set
-* "warning treated as error" is a one-off error
-* Listen for issues reported by StringTemplate, report them as warnings
-* Fix template issues
-* GrammarASTWithOptions.getOptions never returns null
-* Use EnumSet instead of HashSet
-* Use new STGroup.GROUP_FILE_EXTENSION value
-
-December 2, 2012
-
-* Remove -Xverbose-dfa option
-* Create the ParseTreeVisitor interface for all visitors, rename previous base
- visitor class to AbstractParseTreeVisitor
-
-December 1, 2012
-
-* escape [\n\r\t] in lexical error messages; e.g,:
- line 2:3 token recognition error at: '\t'
- line 2:4 token recognition error at: '\n'
-
-* added error for bad sets in lexer; e.g.:
- lexer set element A is invalid (either rule ref or literal with > 1 char)
- some tests in TestSets appeared to allow ~('a'|B) but it was randomly working.
- ('a'|B) works, though doesn't collapse to a set.
-
-* label+='foo' wasn't generating good code. It was generating token type as
- variable name. Now, I gen "s<ttype>" for implicit labels on string literals.
-
-* tokens now have token and char source to draw from.
-
-* remove -Xsave-lexer option; log file as implicit lexer AST.
-
-November 30, 2012
-
-* Maven updates (cleanup, unification, and specify Java 6 bootstrap classpath)
-
-November 28, 2012
-
-* Maven updates (uber-jar, manifest details)
-
-November 27, 2012
-
-* Maven updates (prepare for deploying to Sonatype OSS)
-* Use efficient bitset tests instead of long chains of operator ==
-
-November 26, 2012
-
-* Maven updates (include sources and javadocs, fix warnings)
-* Don't generate action methods for lexer rules not containing an action
-* Generated action and sempred methods are private
-* Remove unused / problematic methods:
-** (unused) TerminalNodeImpl.isErrorNode
-** (unused) RuleContext.conflictsWith, RuleContext.suffix.
-** (problematic) RuleContext.hashCode, RuleContext.equals.
-
-November 23, 2012
-
-* Updated Maven build (added master POM, cleaned up module POMs)
-
-November 22, 2012
-
-* make sure left-recur rule translation uses token stream from correct imported file.
-* actions like @after in imported rules caused inf loop.
-* This misidentified scope lexer/parser: @lexer::members { } @parser::members { }
-
-November 18, 2012
-
-* fixed: undefined rule refs caused exception
-* cleanup, rm dead etypes, add check for ids that cause code gen issues
-* added notion of one-off error
-* added check for v3 backward incompatibilities:
-** tree grammars
-** labels in lexer rules
-** tokens {A;B;} syntax
-** tokens {A='C';} syntax
-** {...}?=> gate semantic predicates
-** (...)=> syntactic predicates
-* Detect EOF in lexer rule
-
-November 17, 2012
-
-* .tokens files goes in output dir like parser file.
-* added check: action in lexer rules must be last element of outermost alt
-* properly check for grammar/filename difference
-* if labels, don't allow set collapse for
- a : A # X | B ;
-* wasn't checking soon enough for rule redef; now it sets a dead flag in
- AST so no more walking dup.
- error(51): T.g:7:0: rule s redefinition (ignoring); previous at line 3
-
-November 11, 2012
-
-* Change version to 4.0b4 (btw, forgot to push 4.0b3 in build.properties when
- I made git tag 4.0b3...ooops).
-
-November 4, 2012
-
-* Kill box in tree dialog box makes dialog dispose of itself
-
-October 29, 2012
-
-* Sam fixes nongreedy more.
-* -Werror added.
-* Sam made speed improvement re preds in lexer.
-
-October 20, 2012
-
-* Merged Sam's fix for nongreedy lexer/parser. lots of unit tests. A fix in
- prediction ctx merge. https://github.com/parrt/antlr4/pull/99
-
-October 14, 2012
-
-* Rebuild how ANTLR detects SLL conflict and failover to full LL. LL is
- a bit slower but correct now. Added ability to ask for exact ambiguity
- detection.
-
-October 8, 2012
-
-* Fixed a bug where labeling the alternatives of the start rule caused
- a null pointer exception.
-
-October 1, 2012 -- 4.0b2 release
-
-September 30, 2012
-
-* Fixed the unbuffered streams, which actually buffered everything
- up by mistake. tweaked a few comments.
-
-* Added a getter to IntStream for the token factory
-
-* Added -depend cmd-line option.
-
-September 29, 2012
-
-* no nongreedy or wildcard in parser.
-
-September 28, 2012
-
-* empty "tokens {}" is ok now.
-
-September 22, 2012
-
-* Rule exception handlers weren't passed to the generated code
-* $ruleattribute.foo weren't handled properly
-* Added -package option
-
-September 18, 2012 -- 4.0b1 release
diff --git a/contrib/libs/antlr4_cpp_runtime/CONTRIBUTING.md b/contrib/libs/antlr4_cpp_runtime/CONTRIBUTING.md
deleted file mode 100644
index 0a2317bab3..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/CONTRIBUTING.md
+++ /dev/null
@@ -1,22 +0,0 @@
-# Contributing to ANTLR 4
-
-1. [Fork](https://help.github.com/articles/fork-a-repo) the [antlr/antlr4 repo](https://github.com/antlr/antlr4), which will give you both key branches, `master` and `dev`
-2. Make sure to `git checkout dev` in your fork so that you are working from the latest development branch
-3. Create and work from a branch derived from `dev` such as `git checkout -b your-branch-name`
-4. Install and configure [EditorConfig](http://editorconfig.org/) so your text editor or IDE uses the ANTLR 4 coding style
-5. [Build ANTLR 4](doc/building-antlr.md)
-6. [Run the ANTLR project unit tests](doc/antlr-project-testing.md)
-7. Create a [pull request](https://help.github.com/articles/using-pull-requests/) with your changes and make sure you're comparing your `dev`-derived branch in your fork to the `dev` branch from the `antlr/antlr4` repo:
-
-<img src="doc/images/PR-on-dev.png" width="600">
-
-**Note:** Each commit requires a "signature", which is simple as using `-s` (not
-`-S`) to the git commit command:
-
-```
-git commit -s -m 'This is my commit message'
-```
-
-Github's pull request process enforces the sig and gives instructions on how to
-fix any commits that lack the sig. See [Github DCO app](https://github.com/apps/dco)
-for more info.
diff --git a/contrib/libs/antlr4_cpp_runtime/LICENSE.txt b/contrib/libs/antlr4_cpp_runtime/LICENSE.txt
deleted file mode 100644
index 5d27694155..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/LICENSE.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Copyright (c) 2012-2022 The ANTLR Project. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-
-1. Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
-
-2. Redistributions in binary form must reproduce the above copyright
-notice, this list of conditions and the following disclaimer in the
-documentation and/or other materials provided with the distribution.
-
-3. Neither name of copyright holders nor the names of its contributors
-may be used to endorse or promote products derived from this software
-without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR
-CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
-LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
-NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
-SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/libs/antlr4_cpp_runtime/README-cpp.md b/contrib/libs/antlr4_cpp_runtime/README-cpp.md
deleted file mode 100644
index 622289ba77..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/README-cpp.md
+++ /dev/null
@@ -1,72 +0,0 @@
-# C++ target for ANTLR 4
-
-This folder contains the C++ runtime support for ANTLR. See [the canonical antlr4 repository](https://github.com/antlr/antlr4) for in depth detail about how to use ANTLR 4.
-
-## Authors and major contributors
-
-ANTLR 4 is the result of substantial effort of the following people:
-
-* [Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu
- ANTLR project lead and supreme dictator for life
- [University of San Francisco](http://www.usfca.edu/)
-* [Sam Harwell](http://tunnelvisionlabs.com/)
- Tool co-author, Java and C# target)
-
-The C++ target has been the work of the following people:
-
-* Dan McLaughlin, dan.mclaughlin@gmail.com (initial port, got code to compile)
-* David Sisson, dsisson@google.com (initial port, made the runtime C++ tests runnable)
-* [Mike Lischke](http://www.soft-gems.net), mike@lischke-online.de (brought the initial port to a working library, made most runtime tests passing)
-
-## Other contributors
-
-* Marcin Szalowicz, mszalowicz@mailplus.pl (cmake build setup)
-* Tim O'Callaghan, timo@linux.com (additional superbuild cmake pattern script)
-
-## Project Status
-
-* Building on macOS, Windows, Android and Linux
-* No errors and warnings
-* Library linking
-* Some unit tests in the macOS project, for important base classes with almost 100% code coverage.
-* All memory allocations checked
-* Simple command line demo application working on all supported platforms.
-* All runtime tests pass.
-
-### Build + Usage Notes
-
-The minimum C++ version to compile the ANTLR C++ runtime with is C++11. The supplied projects can built the runtime either as static or dynamic library, as both 32bit and 64bit arch. The macOS project contains a target for iOS and can also be built using cmake (instead of XCode).
-
-Include the antlr4-runtime.h umbrella header in your target application to get everything needed to use the library.
-
-If you are compiling with cmake, the minimum version required is cmake 2.8.
-By default, the libraries produced by the CMake build target C++11. If you want to target a different C++ standard, you can explicitly pass the standard - e.g. `-DCMAKE_CXX_STANDARD=17`.
-
-#### Compiling on Windows with Visual Studio using he Visual Studio projects
-Simply open the VS project from the runtime folder (VS 2019+) and build it.
-
-#### Compiling on Windows using cmake with Visual Studio VS2019 and later
-Use the "Open Folder" Feature from the File->Open->Folder menu to open the runtime/Cpp directory.
-It will automatically use the CMake description to open up a Visual Studio Solution.
-
-#### Compiling on macOS
-Either open the included XCode project and build that or use the cmake compilation as described for linux.
-
-#### Compiling on Android
-Try run cmake -DCMAKE_ANDROID_NDK=/folder/of/android_ndkr17_and_above -DCMAKE_SYSTEM_NAME=Android -DCMAKE_ANDROID_API=14 -DCMAKE_ANDROID_ARCH_ABI=x86 -DCMAKE_ANDROID_STL_TYPE=c++_shared -DCMAKE_ANDROID_NDK_TOOLCHAIN_VERSION=clang -DCMAKE_BUILD_TYPE=Release /folder/antlr4_src_dir -G Ninja.
-
-#### Compiling on Linux
-- cd \<antlr4-dir\>/runtime/Cpp (this is where this readme is located)
-- mkdir build && mkdir run && cd build
-- cmake .. -DANTLR_JAR_LOCATION=full/path/to/antlr4-4.5.4-SNAPSHOT.jar -DWITH_DEMO=True
-- make
-- DESTDIR=\<antlr4-dir\>/runtime/Cpp/run make install
-
-If you don't want to build the demo then replace the "cmake .. -DANTLR_JAR_LOCATION<...>" command in the above recipe with "cmake .." without any further parameters.
-There is another cmake script available in the subfolder cmake/ for those who prefer the superbuild cmake pattern.
-
-#### CMake Package support
-If the CMake variable 'ANTLR4_INSTALL' is set, CMake Packages will be build and installed during the install step.
-They expose two packages: antlr4_runtime and antlr4_generator which can be referenced to ease up the use of the
-ANTLR Generator and runtime.
-Use and Sample can be found [here](cmake/Antlr4Package.md)
diff --git a/contrib/libs/antlr4_cpp_runtime/README.md b/contrib/libs/antlr4_cpp_runtime/README.md
deleted file mode 100644
index 5566fa224d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# ANTLR v4
-
-[![Java 7+](https://img.shields.io/badge/java-7+-4c7e9f.svg)](http://java.oracle.com)
-[![License](https://img.shields.io/badge/license-BSD-blue.svg)](https://raw.githubusercontent.com/antlr/antlr4/master/LICENSE.txt)
-
-
-**ANTLR** (ANother Tool for Language Recognition) is a powerful parser generator for reading, processing, executing, or translating structured text or binary files. It's widely used to build languages, tools, and frameworks. From a grammar, ANTLR generates a parser that can build parse trees and also generates a listener interface (or visitor) that makes it easy to respond to the recognition of phrases of interest.
-
-**Dev branch build status**
-
-[![MacOSX, Windows, Linux](https://github.com/antlr/antlr4/actions/workflows/hosted.yml/badge.svg)](https://github.com/antlr/antlr4/actions/workflows/hosted.yml) (github actions)
-
-<!--
-* [![Windows](https://github.com/antlr/antlr4/actions/workflows/windows.yml/badge.svg?branch=dev)](https://github.com/antlr/antlr4/actions/workflows/windows.yml) (github actions)
-
-* [![Circle CI Build Status (Linux)](https://img.shields.io/circleci/build/gh/antlr/antlr4/master?label=Linux)](https://app.circleci.com/pipelines/github/antlr/antlr4) (CircleCI)
-
-[![AppVeyor CI Build Status (Windows)](https://img.shields.io/appveyor/build/parrt/antlr4?label=Windows)](https://ci.appveyor.com/project/parrt/antlr4)
-[![Travis-CI Build Status (Swift-Linux)](https://img.shields.io/travis/antlr/antlr4.svg?label=Linux-Swift&branch=master)](https://travis-ci.com/github/antlr/antlr4)
--->
-
-## Repo branch structure
-
-The default branch for this repo is [`master`](https://github.com/antlr/antlr4/tree/master), which is the latest stable release and has tags for the various releases; e.g., see release tag [4.9.3](https://github.com/antlr/antlr4/tree/4.9.3). Branch [`dev`](https://github.com/antlr/antlr4/tree/dev) is where development occurs between releases and all pull requests should be derived from that branch. The `dev` branch is merged back into `master` to cut a release and the release state is tagged (e.g., with `4.10-rc1` or `4.10`.) Visually our process looks roughly like this:
-
-<img src="doc/images/new-antlr-branches.png" width="500">
-
-Targets such as Go that pull directly from the repository can use the default `master` branch but can also pull from the active `dev` branch:
-
-```bash
-$ go get github.com/antlr/antlr4/runtime/Go/antlr@dev
-```
-
-## Authors and major contributors
-
-* [Terence Parr](http://www.cs.usfca.edu/~parrt/), parrt@cs.usfca.edu
-ANTLR project lead and supreme dictator for life
-[University of San Francisco](http://www.usfca.edu/)
-* [Sam Harwell](http://tunnelvisionlabs.com/) (Tool co-author, Java and original C# target)
-* [Eric Vergnaud](https://github.com/ericvergnaud) (Javascript, Python2, Python3 targets and maintenance of C# target)
-* [Peter Boyer](https://github.com/pboyer) (Go target)
-* [Mike Lischke](http://www.soft-gems.net/) (C++ completed target)
-* Dan McLaughlin (C++ initial target)
-* David Sisson (C++ initial target and test)
-* [Janyou](https://github.com/janyou) (Swift target)
-* [Ewan Mellor](https://github.com/ewanmellor), [Hanzhou Shi](https://github.com/hanjoes) (Swift target merging)
-* [Ben Hamilton](https://github.com/bhamiltoncx) (Full Unicode support in serialized ATN and all languages' runtimes for code points > U+FFFF)
-* [Marcos Passos](https://github.com/marcospassos) (PHP target)
-* [Lingyu Li](https://github.com/lingyv-li) (Dart target)
-* [Ivan Kochurkin](https://github.com/KvanTTT) has made major contributions to overall quality, error handling, and Target performance.
-* [Justin King](https://github.com/jcking) has done a huge amount of work across multiple targets, but especially for C++.
-* [Ken Domino](https://github.com/kaby76) has a knack for finding bugs/issues and analysis; also a major contributor on the [grammars-v4 repo](https://github.com/antlr/grammars-v4).
-* [Jim Idle](https://github.com/jimidle) has contributed to previous versions of ANTLR and recently jumped back in to solve a major problem with the Go target.
-
-
-## Useful information
-
-* [Release notes](https://github.com/antlr/antlr4/releases)
-* [Getting started with v4](https://github.com/antlr/antlr4/blob/master/doc/getting-started.md)
-* [Official site](http://www.antlr.org/)
-* [Documentation](https://github.com/antlr/antlr4/blob/master/doc/index.md)
-* [FAQ](https://github.com/antlr/antlr4/blob/master/doc/faq/index.md)
-* [ANTLR code generation targets](https://github.com/antlr/antlr4/blob/master/doc/targets.md)<br>(Currently: Java, C#, Python2|3, JavaScript, Go, C++, Swift, Dart, PHP)
-* [Java API](http://www.antlr.org/api/Java/index.html)
-* [ANTLR v3](http://www.antlr3.org/)
-* [v3 to v4 Migration, differences](https://github.com/antlr/antlr4/blob/master/doc/faq/general.md)
-
-You might also find the following pages useful, particularly if you want to mess around with the various target languages.
-
-* [How to build ANTLR itself](https://github.com/antlr/antlr4/blob/master/doc/building-antlr.md)
-* [How we create and deploy an ANTLR release](https://github.com/antlr/antlr4/blob/master/doc/releasing-antlr.md)
-
-## The Definitive ANTLR 4 Reference
-
-Programmers run into parsing problems all the time. Whether it’s a data format like JSON, a network protocol like SMTP, a server configuration file for Apache, a PostScript/PDF file, or a simple spreadsheet macro language—ANTLR v4 and this book will demystify the process. ANTLR v4 has been rewritten from scratch to make it easier than ever to build parsers and the language applications built on top. This completely rewritten new edition of the bestselling Definitive ANTLR Reference shows you how to take advantage of these new features.
-
-You can buy the book [The Definitive ANTLR 4 Reference](http://amzn.com/1934356999) at amazon or an [electronic version at the publisher's site](https://pragprog.com/book/tpantlr2/the-definitive-antlr-4-reference).
-
-You will find the [Book source code](http://pragprog.com/titles/tpantlr2/source_code) useful.
-
-## Additional grammars
-[This repository](https://github.com/antlr/grammars-v4) is a collection of grammars without actions where the
-root directory name is the all-lowercase name of the language parsed
-by the grammar. For example, java, cpp, csharp, c, etc...
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.cpp
deleted file mode 100644
index 6ceadb87f9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorListener.h"
-
-antlr4::ANTLRErrorListener::~ANTLRErrorListener()
-{
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.h b/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.h
deleted file mode 100644
index 6dc66237e4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorListener.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-
-namespace antlrcpp {
- class BitSet;
-}
-
-namespace antlr4 {
-
- /// How to emit recognition errors (an interface in Java).
- class ANTLR4CPP_PUBLIC ANTLRErrorListener {
- public:
- virtual ~ANTLRErrorListener();
-
- /// <summary>
- /// Upon syntax error, notify any interested parties. This is not how to
- /// recover from errors or compute error messages. <seealso cref="ANTLRErrorStrategy"/>
- /// specifies how to recover from syntax errors and how to compute error
- /// messages. This listener's job is simply to emit a computed message,
- /// though it has enough information to create its own message in many cases.
- /// <p/>
- /// The <seealso cref="RecognitionException"/> is non-null for all syntax errors except
- /// when we discover mismatched token errors that we can recover from
- /// in-line, without returning from the surrounding rule (via the single
- /// token insertion and deletion mechanism).
- /// </summary>
- /// <param name="recognizer">
- /// What parser got the error. From this
- /// object, you can access the context as well
- /// as the input stream. </param>
- /// <param name="offendingSymbol">
- /// The offending token in the input token
- /// stream, unless recognizer is a lexer (then it's null). If
- /// no viable alternative error, {@code e} has token at which we
- /// started production for the decision. </param>
- /// <param name="line">
- /// The line number in the input where the error occurred. </param>
- /// <param name="charPositionInLine">
- /// The character position within that line where the error occurred. </param>
- /// <param name="msg">
- /// The message to emit. </param>
- /// <param name="e">
- /// The exception generated by the parser that led to
- /// the reporting of an error. It is null in the case where
- /// the parser was able to recover in line without exiting the
- /// surrounding rule. </param>
- virtual void syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line,
- size_t charPositionInLine, const std::string &msg, std::exception_ptr e) = 0;
-
- /**
- * This method is called by the parser when a full-context prediction
- * results in an ambiguity.
- *
- * <p>Each full-context prediction which does not result in a syntax error
- * will call either {@link #reportContextSensitivity} or
- * {@link #reportAmbiguity}.</p>
- *
- * <p>When {@code ambigAlts} is not null, it contains the set of potentially
- * viable alternatives identified by the prediction algorithm. When
- * {@code ambigAlts} is null, use {@link ATNConfigSet#getAlts} to obtain the
- * represented alternatives from the {@code configs} argument.</p>
- *
- * <p>When {@code exact} is {@code true}, <em>all</em> of the potentially
- * viable alternatives are truly viable, i.e. this is reporting an exact
- * ambiguity. When {@code exact} is {@code false}, <em>at least two</em> of
- * the potentially viable alternatives are viable for the current input, but
- * the prediction algorithm terminated as soon as it determined that at
- * least the <em>minimum</em> potentially viable alternative is truly
- * viable.</p>
- *
- * <p>When the {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} prediction
- * mode is used, the parser is required to identify exact ambiguities so
- * {@code exact} will always be {@code true}.</p>
- *
- * <p>This method is not used by lexers.</p>
- *
- * @param recognizer the parser instance
- * @param dfa the DFA for the current decision
- * @param startIndex the input index where the decision started
- * @param stopIndex the input input where the ambiguity was identified
- * @param exact {@code true} if the ambiguity is exactly known, otherwise
- * {@code false}. This is always {@code true} when
- * {@link PredictionMode#LL_EXACT_AMBIG_DETECTION} is used.
- * @param ambigAlts the potentially ambiguous alternatives, or {@code null}
- * to indicate that the potentially ambiguous alternatives are the complete
- * set of represented alternatives in {@code configs}
- * @param configs the ATN configuration set where the ambiguity was
- * identified
- */
- virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) = 0;
-
- /**
- * This method is called when an SLL conflict occurs and the parser is about
- * to use the full context information to make an LL decision.
- *
- * <p>If one or more configurations in {@code configs} contains a semantic
- * predicate, the predicates are evaluated before this method is called. The
- * subset of alternatives which are still viable after predicates are
- * evaluated is reported in {@code conflictingAlts}.</p>
- *
- * <p>This method is not used by lexers.</p>
- *
- * @param recognizer the parser instance
- * @param dfa the DFA for the current decision
- * @param startIndex the input index where the decision started
- * @param stopIndex the input index where the SLL conflict occurred
- * @param conflictingAlts The specific conflicting alternatives. If this is
- * {@code null}, the conflicting alternatives are all alternatives
- * represented in {@code configs}. At the moment, conflictingAlts is non-null
- * (for the reference implementation, but Sam's optimized version can see this
- * as null).
- * @param configs the ATN configuration set where the SLL conflict was
- * detected
- */
- virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) = 0;
-
- /**
- * This method is called by the parser when a full-context prediction has a
- * unique result.
- *
- * <p>Each full-context prediction which does not result in a syntax error
- * will call either {@link #reportContextSensitivity} or
- * {@link #reportAmbiguity}.</p>
- *
- * <p>For prediction implementations that only evaluate full-context
- * predictions when an SLL conflict is found (including the default
- * {@link ParserATNSimulator} implementation), this method reports cases
- * where SLL conflicts were resolved to unique full-context predictions,
- * i.e. the decision was context-sensitive. This report does not necessarily
- * indicate a problem, and it may appear even in completely unambiguous
- * grammars.</p>
- *
- * <p>{@code configs} may have more than one represented alternative if the
- * full-context prediction algorithm does not evaluate predicates before
- * beginning the full-context prediction. In all cases, the final prediction
- * is passed as the {@code prediction} argument.</p>
- *
- * <p>Note that the definition of "context sensitivity" in this method
- * differs from the concept in {@link DecisionInfo#contextSensitivities}.
- * This method reports all instances where an SLL conflict occurred but LL
- * parsing produced a unique result, whether or not that unique result
- * matches the minimum alternative in the SLL conflicting set.</p>
- *
- * <p>This method is not used by lexers.</p>
- *
- * @param recognizer the parser instance
- * @param dfa the DFA for the current decision
- * @param startIndex the input index where the decision started
- * @param stopIndex the input index where the context sensitivity was
- * finally determined
- * @param prediction the unambiguous result of the full-context prediction
- * @param configs the ATN configuration set where the unambiguous prediction
- * was determined
- */
- virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- size_t prediction, atn::ATNConfigSet *configs) = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.cpp b/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.cpp
deleted file mode 100644
index 1655a5731d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.cpp
+++ /dev/null
@@ -1,10 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRErrorStrategy.h"
-
-antlr4::ANTLRErrorStrategy::~ANTLRErrorStrategy()
-{
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.h b/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.h
deleted file mode 100644
index a3eecd14c4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRErrorStrategy.h
+++ /dev/null
@@ -1,121 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// The interface for defining strategies to deal with syntax errors encountered
- /// during a parse by ANTLR-generated parsers. We distinguish between three
- /// different kinds of errors:
- ///
- /// <ul>
- /// <li>The parser could not figure out which path to take in the ATN (none of
- /// the available alternatives could possibly match)</li>
- /// <li>The current input does not match what we were looking for</li>
- /// <li>A predicate evaluated to false</li>
- /// </ul>
- ///
- /// Implementations of this interface report syntax errors by calling
- /// <seealso cref="Parser#notifyErrorListeners"/>.
- /// <p/>
- /// TODO: what to do about lexers
- /// </summary>
- class ANTLR4CPP_PUBLIC ANTLRErrorStrategy {
- public:
-
- /// <summary>
- /// Reset the error handler state for the specified {@code recognizer}. </summary>
- /// <param name="recognizer"> the parser instance </param>
- virtual ~ANTLRErrorStrategy();
-
- virtual void reset(Parser *recognizer) = 0;
-
- /**
- * This method is called when an unexpected symbol is encountered during an
- * inline match operation, such as {@link Parser#match}. If the error
- * strategy successfully recovers from the match failure, this method
- * returns the {@link Token} instance which should be treated as the
- * successful result of the match.
- *
- * <p>This method handles the consumption of any tokens - the caller should
- * <b>not</b> call {@link Parser#consume} after a successful recovery.</p>
- *
- * <p>Note that the calling code will not report an error if this method
- * returns successfully. The error strategy implementation is responsible
- * for calling {@link Parser#notifyErrorListeners} as appropriate.</p>
- *
- * @param recognizer the parser instance
- * @throws RecognitionException if the error strategy was not able to
- * recover from the unexpected input symbol
- */
- virtual Token* recoverInline(Parser *recognizer) = 0;
-
- /// <summary>
- /// This method is called to recover from exception {@code e}. This method is
- /// called after <seealso cref="#reportError"/> by the default exception handler
- /// generated for a rule method.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception to recover from </param>
- /// <exception cref="RecognitionException"> if the error strategy could not recover from
- /// the recognition exception </exception>
- virtual void recover(Parser *recognizer, std::exception_ptr e) = 0;
-
- /// <summary>
- /// This method provides the error handler with an opportunity to handle
- /// syntactic or semantic errors in the input stream before they result in a
- /// <seealso cref="RecognitionException"/>.
- /// <p/>
- /// The generated code currently contains calls to <seealso cref="#sync"/> after
- /// entering the decision state of a closure block ({@code (...)*} or
- /// {@code (...)+}).
- /// <p/>
- /// For an implementation based on Jim Idle's "magic sync" mechanism, see
- /// <seealso cref="DefaultErrorStrategy#sync"/>.
- /// </summary>
- /// <seealso cref= DefaultErrorStrategy#sync
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <exception cref="RecognitionException"> if an error is detected by the error
- /// strategy but cannot be automatically recovered at the current state in
- /// the parsing process </exception>
- virtual void sync(Parser *recognizer) = 0;
-
- /// <summary>
- /// Tests whether or not {@code recognizer} is in the process of recovering
- /// from an error. In error recovery mode, <seealso cref="Parser#consume"/> adds
- /// symbols to the parse tree by calling
- /// {@link Parser#createErrorNode(ParserRuleContext, Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)} instead of
- /// {@link Parser#createTerminalNode(ParserRuleContext, Token)}.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <returns> {@code true} if the parser is currently recovering from a parse
- /// error, otherwise {@code false} </returns>
- virtual bool inErrorRecoveryMode(Parser *recognizer) = 0;
-
- /// <summary>
- /// This method is called by when the parser successfully matches an input
- /// symbol.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- virtual void reportMatch(Parser *recognizer) = 0;
-
- /// <summary>
- /// Report any kind of <seealso cref="RecognitionException"/>. This method is called by
- /// the default exception handler generated for a rule method.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception to report </param>
- virtual void reportError(Parser *recognizer, const RecognitionException &e) = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.cpp
deleted file mode 100644
index 674817ac0e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ANTLRFileStream.h"
-
-using namespace antlr4;
-
-void ANTLRFileStream::loadFromFile(const std::string &fileName) {
- _fileName = fileName;
- if (_fileName.empty()) {
- return;
- }
-
- std::ifstream stream(fileName, std::ios::binary);
-
- ANTLRInputStream::load(stream);
-}
-
-std::string ANTLRFileStream::getSourceName() const {
- return _fileName;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.h b/contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.h
deleted file mode 100644
index 6c7d619a00..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRFileStream.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRInputStream.h"
-
-namespace antlr4 {
-
- /// This is an ANTLRInputStream that is loaded from a file all at once
- /// when you construct the object (or call load()).
- // TODO: this class needs testing.
- class ANTLR4CPP_PUBLIC ANTLRFileStream : public ANTLRInputStream {
- public:
- ANTLRFileStream() = default;
- ANTLRFileStream(const std::string &) = delete;
- ANTLRFileStream(const char *data, size_t length) = delete;
- ANTLRFileStream(std::istream &stream) = delete;
-
- // Assumes a file name encoded in UTF-8 and file content in the same encoding (with or w/o BOM).
- virtual void loadFromFile(const std::string &fileName);
- virtual std::string getSourceName() const override;
-
- private:
- std::string _fileName; // UTF-8 encoded file name.
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.cpp
deleted file mode 100644
index b6470af9b7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.cpp
+++ /dev/null
@@ -1,180 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include <string.h>
-
-#include "Exceptions.h"
-#include "misc/Interval.h"
-#include "IntStream.h"
-
-#include "support/Utf8.h"
-#include "support/CPPUtils.h"
-
-#include "ANTLRInputStream.h"
-
-using namespace antlr4;
-using namespace antlrcpp;
-
-using misc::Interval;
-
-ANTLRInputStream::ANTLRInputStream() {
- InitializeInstanceFields();
-}
-
-ANTLRInputStream::ANTLRInputStream(std::string_view input): ANTLRInputStream() {
- load(input.data(), input.length());
-}
-
-ANTLRInputStream::ANTLRInputStream(const char *data, size_t length) {
- load(data, length);
-}
-
-ANTLRInputStream::ANTLRInputStream(std::istream &stream): ANTLRInputStream() {
- load(stream);
-}
-
-void ANTLRInputStream::load(const std::string &input, bool lenient) {
- load(input.data(), input.size(), lenient);
-}
-
-void ANTLRInputStream::load(const char *data, size_t length, bool lenient) {
- // Remove the UTF-8 BOM if present.
- const char *bom = "\xef\xbb\xbf";
- if (length >= 3 && strncmp(data, bom, 3) == 0) {
- data += 3;
- length -= 3;
- }
- if (lenient) {
- _data = Utf8::lenientDecode(std::string_view(data, length));
- } else {
- auto maybe_utf32 = Utf8::strictDecode(std::string_view(data, length));
- if (!maybe_utf32.has_value()) {
- throw IllegalArgumentException("UTF-8 string contains an illegal byte sequence");
- }
- _data = std::move(maybe_utf32).value();
- }
- p = 0;
-}
-
-void ANTLRInputStream::load(std::istream &stream, bool lenient) {
- if (!stream.good() || stream.eof()) // No fail, bad or EOF.
- return;
-
- _data.clear();
-
- std::string s((std::istreambuf_iterator<char>(stream)), std::istreambuf_iterator<char>());
- load(s.data(), s.length(), lenient);
-}
-
-void ANTLRInputStream::reset() {
- p = 0;
-}
-
-void ANTLRInputStream::consume() {
- if (p >= _data.size()) {
- assert(LA(1) == IntStream::EOF);
- throw IllegalStateException("cannot consume EOF");
- }
-
- if (p < _data.size()) {
- p++;
- }
-}
-
-size_t ANTLRInputStream::LA(ssize_t i) {
- if (i == 0) {
- return 0; // undefined
- }
-
- ssize_t position = static_cast<ssize_t>(p);
- if (i < 0) {
- i++; // e.g., translate LA(-1) to use offset i=0; then _data[p+0-1]
- if ((position + i - 1) < 0) {
- return IntStream::EOF; // invalid; no char before first char
- }
- }
-
- if ((position + i - 1) >= static_cast<ssize_t>(_data.size())) {
- return IntStream::EOF;
- }
-
- return _data[static_cast<size_t>((position + i - 1))];
-}
-
-size_t ANTLRInputStream::LT(ssize_t i) {
- return LA(i);
-}
-
-size_t ANTLRInputStream::index() {
- return p;
-}
-
-size_t ANTLRInputStream::size() {
- return _data.size();
-}
-
-// Mark/release do nothing. We have entire buffer.
-ssize_t ANTLRInputStream::mark() {
- return -1;
-}
-
-void ANTLRInputStream::release(ssize_t /* marker */) {
-}
-
-void ANTLRInputStream::seek(size_t index) {
- if (index <= p) {
- p = index; // just jump; don't update stream state (line, ...)
- return;
- }
- // seek forward, consume until p hits index or n (whichever comes first)
- index = std::min(index, _data.size());
- while (p < index) {
- consume();
- }
-}
-
-std::string ANTLRInputStream::getText(const Interval &interval) {
- if (interval.a < 0 || interval.b < 0) {
- return "";
- }
-
- size_t start = static_cast<size_t>(interval.a);
- size_t stop = static_cast<size_t>(interval.b);
-
-
- if (stop >= _data.size()) {
- stop = _data.size() - 1;
- }
-
- size_t count = stop - start + 1;
- if (start >= _data.size()) {
- return "";
- }
-
- auto maybeUtf8 = Utf8::strictEncode(std::u32string_view(_data).substr(start, count));
- if (!maybeUtf8.has_value()) {
- throw IllegalArgumentException("Input stream contains invalid Unicode code points");
- }
- return std::move(maybeUtf8).value();
-}
-
-std::string ANTLRInputStream::getSourceName() const {
- if (name.empty()) {
- return IntStream::UNKNOWN_SOURCE_NAME;
- }
- return name;
-}
-
-std::string ANTLRInputStream::toString() const {
- auto maybeUtf8 = Utf8::strictEncode(_data);
- if (!maybeUtf8.has_value()) {
- throw IllegalArgumentException("Input stream contains invalid Unicode code points");
- }
- return std::move(maybeUtf8).value();
-}
-
-void ANTLRInputStream::InitializeInstanceFields() {
- p = 0;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.h b/contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.h
deleted file mode 100644
index 413eadefa4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ANTLRInputStream.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <string_view>
-
-#include "CharStream.h"
-
-namespace antlr4 {
-
- // Vacuum all input from a stream and then treat it
- // like a string. Can also pass in a string or char[] to use.
- // Input is expected to be encoded in UTF-8 and converted to UTF-32 internally.
- class ANTLR4CPP_PUBLIC ANTLRInputStream : public CharStream {
- protected:
- /// The data being scanned.
- // UTF-32
- std::u32string _data;
-
- /// 0..n-1 index into string of next char </summary>
- size_t p;
-
- public:
- /// What is name or source of this char stream?
- std::string name;
-
- ANTLRInputStream();
-
- ANTLRInputStream(std::string_view input);
-
- ANTLRInputStream(const char *data, size_t length);
- ANTLRInputStream(std::istream &stream);
-
- virtual void load(const std::string &input, bool lenient);
- virtual void load(const char *data, size_t length, bool lenient);
- virtual void load(std::istream &stream, bool lenient);
-
- virtual void load(const std::string &input) { load(input, false); }
- virtual void load(const char *data, size_t length) { load(data, length, false); }
- virtual void load(std::istream &stream) { load(stream, false); }
-
- /// Reset the stream so that it's in the same state it was
- /// when the object was created *except* the data array is not
- /// touched.
- virtual void reset();
- virtual void consume() override;
- virtual size_t LA(ssize_t i) override;
- virtual size_t LT(ssize_t i);
-
- /// <summary>
- /// Return the current input symbol index 0..n where n indicates the
- /// last symbol has been read. The index is the index of char to
- /// be returned from LA(1).
- /// </summary>
- virtual size_t index() override;
- virtual size_t size() override;
-
- /// <summary>
- /// mark/release do nothing; we have entire buffer </summary>
- virtual ssize_t mark() override;
- virtual void release(ssize_t marker) override;
-
- /// <summary>
- /// consume() ahead until p==index; can't just set p=index as we must
- /// update line and charPositionInLine. If we seek backwards, just set p
- /// </summary>
- virtual void seek(size_t index) override;
- virtual std::string getText(const misc::Interval &interval) override;
- virtual std::string getSourceName() const override;
- virtual std::string toString() const override;
-
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.cpp b/contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.cpp
deleted file mode 100644
index 781a13b547..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "ParserRuleContext.h"
-#include "InputMismatchException.h"
-#include "Parser.h"
-
-#include "BailErrorStrategy.h"
-
-using namespace antlr4;
-
-void BailErrorStrategy::recover(Parser *recognizer, std::exception_ptr e) {
- ParserRuleContext *context = recognizer->getContext();
- do {
- context->exception = e;
- if (context->parent == nullptr)
- break;
- context = static_cast<ParserRuleContext *>(context->parent);
- } while (true);
-
- try {
- std::rethrow_exception(e); // Throw the exception to be able to catch and rethrow nested.
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (RecognitionException &inner) {
- throw ParseCancellationException(inner.what());
-#else
- } catch (RecognitionException & /*inner*/) {
- std::throw_with_nested(ParseCancellationException());
-#endif
- }
-}
-
-Token* BailErrorStrategy::recoverInline(Parser *recognizer) {
- InputMismatchException e(recognizer);
- std::exception_ptr exception = std::make_exception_ptr(e);
-
- ParserRuleContext *context = recognizer->getContext();
- do {
- context->exception = exception;
- if (context->parent == nullptr)
- break;
- context = static_cast<ParserRuleContext *>(context->parent);
- } while (true);
-
- try {
- throw e;
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (InputMismatchException &inner) {
- throw ParseCancellationException(inner.what());
-#else
- } catch (InputMismatchException & /*inner*/) {
- std::throw_with_nested(ParseCancellationException());
-#endif
- }
-}
-
-void BailErrorStrategy::sync(Parser * /*recognizer*/) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.h b/contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.h
deleted file mode 100644
index 598f993022..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/BailErrorStrategy.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "DefaultErrorStrategy.h"
-
-namespace antlr4 {
-
- /**
- * This implementation of {@link ANTLRErrorStrategy} responds to syntax errors
- * by immediately canceling the parse operation with a
- * {@link ParseCancellationException}. The implementation ensures that the
- * {@link ParserRuleContext#exception} field is set for all parse tree nodes
- * that were not completed prior to encountering the error.
- *
- * <p>
- * This error strategy is useful in the following scenarios.</p>
- *
- * <ul>
- * <li><strong>Two-stage parsing:</strong> This error strategy allows the first
- * stage of two-stage parsing to immediately terminate if an error is
- * encountered, and immediately fall back to the second stage. In addition to
- * avoiding wasted work by attempting to recover from errors here, the empty
- * implementation of {@link BailErrorStrategy#sync} improves the performance of
- * the first stage.</li>
- * <li><strong>Silent validation:</strong> When syntax errors are not being
- * reported or logged, and the parse result is simply ignored if errors occur,
- * the {@link BailErrorStrategy} avoids wasting work on recovering from errors
- * when the result will be ignored either way.</li>
- * </ul>
- *
- * <p>
- * {@code myparser.setErrorHandler(new BailErrorStrategy());}</p>
- *
- * @see Parser#setErrorHandler(ANTLRErrorStrategy)
- */
- class ANTLR4CPP_PUBLIC BailErrorStrategy : public DefaultErrorStrategy {
- /// <summary>
- /// Instead of recovering from exception {@code e}, re-throw it wrapped
- /// in a <seealso cref="ParseCancellationException"/> so it is not caught by the
- /// rule function catches. Use <seealso cref="Exception#getCause()"/> to get the
- /// original <seealso cref="RecognitionException"/>.
- /// </summary>
- public:
- virtual void recover(Parser *recognizer, std::exception_ptr e) override;
-
- /// Make sure we don't attempt to recover inline; if the parser
- /// successfully recovers, it won't throw an exception.
- virtual Token* recoverInline(Parser *recognizer) override;
-
- /// <summary>
- /// Make sure we don't attempt to recover from problems in subrules. </summary>
- virtual void sync(Parser *recognizer) override;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.cpp
deleted file mode 100644
index cdcca8bc5c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "BaseErrorListener.h"
-#include "RecognitionException.h"
-
-using namespace antlr4;
-
-void BaseErrorListener::syntaxError(Recognizer * /*recognizer*/, Token * /*offendingSymbol*/, size_t /*line*/,
- size_t /*charPositionInLine*/, const std::string &/*msg*/, std::exception_ptr /*e*/) {
-}
-
-void BaseErrorListener::reportAmbiguity(Parser * /*recognizer*/, const dfa::DFA &/*dfa*/, size_t /*startIndex*/,
- size_t /*stopIndex*/, bool /*exact*/, const antlrcpp::BitSet &/*ambigAlts*/, atn::ATNConfigSet * /*configs*/) {
-}
-
-void BaseErrorListener::reportAttemptingFullContext(Parser * /*recognizer*/, const dfa::DFA &/*dfa*/, size_t /*startIndex*/,
- size_t /*stopIndex*/, const antlrcpp::BitSet &/*conflictingAlts*/, atn::ATNConfigSet * /*configs*/) {
-}
-
-void BaseErrorListener::reportContextSensitivity(Parser * /*recognizer*/, const dfa::DFA &/*dfa*/, size_t /*startIndex*/,
- size_t /*stopIndex*/, size_t /*prediction*/, atn::ATNConfigSet * /*configs*/) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.h b/contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.h
deleted file mode 100644
index 317785aa64..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/BaseErrorListener.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRErrorListener.h"
-
-namespace antlrcpp {
- class BitSet;
-}
-
-namespace antlr4 {
-
- /**
- * Provides an empty default implementation of {@link ANTLRErrorListener}. The
- * default implementation of each method does nothing, but can be overridden as
- * necessary.
- */
- class ANTLR4CPP_PUBLIC BaseErrorListener : public ANTLRErrorListener {
-
- virtual void syntaxError(Recognizer *recognizer, Token * offendingSymbol, size_t line, size_t charPositionInLine,
- const std::string &msg, std::exception_ptr e) override;
-
- virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) override;
-
- virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) override;
-
- virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- size_t prediction, atn::ATNConfigSet *configs) override;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.cpp
deleted file mode 100644
index 4eaff2c852..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.cpp
+++ /dev/null
@@ -1,414 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "WritableToken.h"
-#include "Lexer.h"
-#include "RuleContext.h"
-#include "misc/Interval.h"
-#include "Exceptions.h"
-#include "support/CPPUtils.h"
-
-#include "BufferedTokenStream.h"
-
-using namespace antlr4;
-using namespace antlrcpp;
-
-BufferedTokenStream::BufferedTokenStream(TokenSource *tokenSource) : _tokenSource(tokenSource){
- InitializeInstanceFields();
-}
-
-TokenSource* BufferedTokenStream::getTokenSource() const {
- return _tokenSource;
-}
-
-size_t BufferedTokenStream::index() {
- return _p;
-}
-
-ssize_t BufferedTokenStream::mark() {
- return 0;
-}
-
-void BufferedTokenStream::release(ssize_t /*marker*/) {
- // no resources to release
-}
-
-void BufferedTokenStream::reset() {
- seek(0);
-}
-
-void BufferedTokenStream::seek(size_t index) {
- lazyInit();
- _p = adjustSeekIndex(index);
-}
-
-size_t BufferedTokenStream::size() {
- return _tokens.size();
-}
-
-void BufferedTokenStream::consume() {
- bool skipEofCheck = false;
- if (!_needSetup) {
- if (_fetchedEOF) {
- // the last token in tokens is EOF. skip check if p indexes any
- // fetched token except the last.
- skipEofCheck = _p < _tokens.size() - 1;
- } else {
- // no EOF token in tokens. skip check if p indexes a fetched token.
- skipEofCheck = _p < _tokens.size();
- }
- } else {
- // not yet initialized
- skipEofCheck = false;
- }
-
- if (!skipEofCheck && LA(1) == Token::EOF) {
- throw IllegalStateException("cannot consume EOF");
- }
-
- if (sync(_p + 1)) {
- _p = adjustSeekIndex(_p + 1);
- }
-}
-
-bool BufferedTokenStream::sync(size_t i) {
- if (i + 1 < _tokens.size())
- return true;
- size_t n = i - _tokens.size() + 1; // how many more elements we need?
-
- if (n > 0) {
- size_t fetched = fetch(n);
- return fetched >= n;
- }
-
- return true;
-}
-
-size_t BufferedTokenStream::fetch(size_t n) {
- if (_fetchedEOF) {
- return 0;
- }
-
- size_t i = 0;
- while (i < n) {
- std::unique_ptr<Token> t(_tokenSource->nextToken());
-
- if (is<WritableToken *>(t.get())) {
- (static_cast<WritableToken *>(t.get()))->setTokenIndex(_tokens.size());
- }
-
- _tokens.push_back(std::move(t));
- ++i;
-
- if (_tokens.back()->getType() == Token::EOF) {
- _fetchedEOF = true;
- break;
- }
- }
-
- return i;
-}
-
-Token* BufferedTokenStream::get(size_t i) const {
- if (i >= _tokens.size()) {
- throw IndexOutOfBoundsException(std::string("token index ") +
- std::to_string(i) +
- std::string(" out of range 0..") +
- std::to_string(_tokens.size() - 1));
- }
- return _tokens[i].get();
-}
-
-std::vector<Token *> BufferedTokenStream::get(size_t start, size_t stop) {
- std::vector<Token *> subset;
-
- lazyInit();
-
- if (_tokens.empty()) {
- return subset;
- }
-
- if (stop >= _tokens.size()) {
- stop = _tokens.size() - 1;
- }
- for (size_t i = start; i <= stop; i++) {
- Token *t = _tokens[i].get();
- if (t->getType() == Token::EOF) {
- break;
- }
- subset.push_back(t);
- }
- return subset;
-}
-
-size_t BufferedTokenStream::LA(ssize_t i) {
- return LT(i)->getType();
-}
-
-Token* BufferedTokenStream::LB(size_t k) {
- if (k > _p) {
- return nullptr;
- }
- return _tokens[_p - k].get();
-}
-
-Token* BufferedTokenStream::LT(ssize_t k) {
- lazyInit();
- if (k == 0) {
- return nullptr;
- }
- if (k < 0) {
- return LB(-k);
- }
-
- size_t i = _p + k - 1;
- sync(i);
- if (i >= _tokens.size()) { // return EOF token
- // EOF must be last token
- return _tokens.back().get();
- }
-
- return _tokens[i].get();
-}
-
-ssize_t BufferedTokenStream::adjustSeekIndex(size_t i) {
- return i;
-}
-
-void BufferedTokenStream::lazyInit() {
- if (_needSetup) {
- setup();
- }
-}
-
-void BufferedTokenStream::setup() {
- _needSetup = false;
- sync(0);
- _p = adjustSeekIndex(0);
-}
-
-void BufferedTokenStream::setTokenSource(TokenSource *tokenSource) {
- _tokenSource = tokenSource;
- _tokens.clear();
- _fetchedEOF = false;
- _needSetup = true;
-}
-
-std::vector<Token *> BufferedTokenStream::getTokens() {
- std::vector<Token *> result;
- for (auto &t : _tokens)
- result.push_back(t.get());
- return result;
-}
-
-std::vector<Token *> BufferedTokenStream::getTokens(size_t start, size_t stop) {
- return getTokens(start, stop, std::vector<size_t>());
-}
-
-std::vector<Token *> BufferedTokenStream::getTokens(size_t start, size_t stop, const std::vector<size_t> &types) {
- lazyInit();
- if (stop >= _tokens.size() || start >= _tokens.size()) {
- throw IndexOutOfBoundsException(std::string("start ") +
- std::to_string(start) +
- std::string(" or stop ") +
- std::to_string(stop) +
- std::string(" not in 0..") +
- std::to_string(_tokens.size() - 1));
- }
-
- std::vector<Token *> filteredTokens;
-
- if (start > stop) {
- return filteredTokens;
- }
-
- for (size_t i = start; i <= stop; i++) {
- Token *tok = _tokens[i].get();
-
- if (types.empty() || std::find(types.begin(), types.end(), tok->getType()) != types.end()) {
- filteredTokens.push_back(tok);
- }
- }
- return filteredTokens;
-}
-
-std::vector<Token *> BufferedTokenStream::getTokens(size_t start, size_t stop, size_t ttype) {
- std::vector<size_t> s;
- s.push_back(ttype);
- return getTokens(start, stop, s);
-}
-
-ssize_t BufferedTokenStream::nextTokenOnChannel(size_t i, size_t channel) {
- sync(i);
- if (i >= size()) {
- return size() - 1;
- }
-
- Token *token = _tokens[i].get();
- while (token->getChannel() != channel) {
- if (token->getType() == Token::EOF) {
- return i;
- }
- i++;
- sync(i);
- token = _tokens[i].get();
- }
- return i;
-}
-
-ssize_t BufferedTokenStream::previousTokenOnChannel(size_t i, size_t channel) {
- sync(i);
- if (i >= size()) {
- // the EOF token is on every channel
- return size() - 1;
- }
-
- while (true) {
- Token *token = _tokens[i].get();
- if (token->getType() == Token::EOF || token->getChannel() == channel) {
- return i;
- }
-
- if (i == 0)
- return -1;
- i--;
- }
- return i;
-}
-
-std::vector<Token *> BufferedTokenStream::getHiddenTokensToRight(size_t tokenIndex, ssize_t channel) {
- lazyInit();
- if (tokenIndex >= _tokens.size()) {
- throw IndexOutOfBoundsException(std::to_string(tokenIndex) + " not in 0.." + std::to_string(_tokens.size() - 1));
- }
-
- ssize_t nextOnChannel = nextTokenOnChannel(tokenIndex + 1, Lexer::DEFAULT_TOKEN_CHANNEL);
- size_t to;
- size_t from = tokenIndex + 1;
- // if none onchannel to right, nextOnChannel=-1 so set to = last token
- if (nextOnChannel == -1) {
- to = static_cast<ssize_t>(size() - 1);
- } else {
- to = nextOnChannel;
- }
-
- return filterForChannel(from, to, channel);
-}
-
-std::vector<Token *> BufferedTokenStream::getHiddenTokensToRight(size_t tokenIndex) {
- return getHiddenTokensToRight(tokenIndex, -1);
-}
-
-std::vector<Token *> BufferedTokenStream::getHiddenTokensToLeft(size_t tokenIndex, ssize_t channel) {
- lazyInit();
- if (tokenIndex >= _tokens.size()) {
- throw IndexOutOfBoundsException(std::to_string(tokenIndex) + " not in 0.." + std::to_string(_tokens.size() - 1));
- }
-
- if (tokenIndex == 0) {
- // Obviously no tokens can appear before the first token.
- return { };
- }
-
- ssize_t prevOnChannel = previousTokenOnChannel(tokenIndex - 1, Lexer::DEFAULT_TOKEN_CHANNEL);
- if (prevOnChannel == static_cast<ssize_t>(tokenIndex - 1)) {
- return { };
- }
- // if none onchannel to left, prevOnChannel=-1 then from=0
- size_t from = static_cast<size_t>(prevOnChannel + 1);
- size_t to = tokenIndex - 1;
-
- return filterForChannel(from, to, channel);
-}
-
-std::vector<Token *> BufferedTokenStream::getHiddenTokensToLeft(size_t tokenIndex) {
- return getHiddenTokensToLeft(tokenIndex, -1);
-}
-
-std::vector<Token *> BufferedTokenStream::filterForChannel(size_t from, size_t to, ssize_t channel) {
- std::vector<Token *> hidden;
- for (size_t i = from; i <= to; i++) {
- Token *t = _tokens[i].get();
- if (channel == -1) {
- if (t->getChannel() != Lexer::DEFAULT_TOKEN_CHANNEL) {
- hidden.push_back(t);
- }
- } else {
- if (t->getChannel() == static_cast<size_t>(channel)) {
- hidden.push_back(t);
- }
- }
- }
-
- return hidden;
-}
-
-bool BufferedTokenStream::isInitialized() const {
- return !_needSetup;
-}
-
-/**
- * Get the text of all tokens in this buffer.
- */
-std::string BufferedTokenStream::getSourceName() const
-{
- return _tokenSource->getSourceName();
-}
-
-std::string BufferedTokenStream::getText() {
- fill();
- return getText(misc::Interval(0U, size() - 1));
-}
-
-std::string BufferedTokenStream::getText(const misc::Interval &interval) {
- lazyInit();
- size_t start = interval.a;
- size_t stop = interval.b;
- if (start == INVALID_INDEX || stop == INVALID_INDEX) {
- return "";
- }
- sync(stop);
- if (stop >= _tokens.size()) {
- stop = _tokens.size() - 1;
- }
-
- std::stringstream ss;
- for (size_t i = start; i <= stop; i++) {
- Token *t = _tokens[i].get();
- if (t->getType() == Token::EOF) {
- break;
- }
- ss << t->getText();
- }
- return ss.str();
-}
-
-std::string BufferedTokenStream::getText(RuleContext *ctx) {
- return getText(ctx->getSourceInterval());
-}
-
-std::string BufferedTokenStream::getText(Token *start, Token *stop) {
- if (start != nullptr && stop != nullptr) {
- return getText(misc::Interval(start->getTokenIndex(), stop->getTokenIndex()));
- }
-
- return "";
-}
-
-void BufferedTokenStream::fill() {
- lazyInit();
- const size_t blockSize = 1000;
- while (true) {
- size_t fetched = fetch(blockSize);
- if (fetched < blockSize) {
- return;
- }
- }
-}
-
-void BufferedTokenStream::InitializeInstanceFields() {
- _needSetup = true;
- _fetchedEOF = false;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.h b/contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.h
deleted file mode 100644
index 2161471241..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/BufferedTokenStream.h
+++ /dev/null
@@ -1,200 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenStream.h"
-
-namespace antlr4 {
-
- /**
- * This implementation of {@link TokenStream} loads tokens from a
- * {@link TokenSource} on-demand, and places the tokens in a buffer to provide
- * access to any previous token by index.
- *
- * <p>
- * This token stream ignores the value of {@link Token#getChannel}. If your
- * parser requires the token stream filter tokens to only those on a particular
- * channel, such as {@link Token#DEFAULT_CHANNEL} or
- * {@link Token#HIDDEN_CHANNEL}, use a filtering token stream such a
- * {@link CommonTokenStream}.</p>
- */
- class ANTLR4CPP_PUBLIC BufferedTokenStream : public TokenStream {
- public:
- BufferedTokenStream(TokenSource *tokenSource);
- BufferedTokenStream(const BufferedTokenStream& other) = delete;
-
- BufferedTokenStream& operator = (const BufferedTokenStream& other) = delete;
-
- virtual TokenSource* getTokenSource() const override;
- virtual size_t index() override;
- virtual ssize_t mark() override;
-
- virtual void release(ssize_t marker) override;
- virtual void reset();
- virtual void seek(size_t index) override;
-
- virtual size_t size() override;
- virtual void consume() override;
-
- virtual Token* get(size_t i) const override;
-
- /// Get all tokens from start..stop inclusively.
- virtual std::vector<Token *> get(size_t start, size_t stop);
-
- virtual size_t LA(ssize_t i) override;
- virtual Token* LT(ssize_t k) override;
-
- /// Reset this token stream by setting its token source.
- virtual void setTokenSource(TokenSource *tokenSource);
- virtual std::vector<Token *> getTokens();
- virtual std::vector<Token *> getTokens(size_t start, size_t stop);
-
- /// <summary>
- /// Given a start and stop index, return a List of all tokens in
- /// the token type BitSet. Return null if no tokens were found. This
- /// method looks at both on and off channel tokens.
- /// </summary>
- virtual std::vector<Token *> getTokens(size_t start, size_t stop, const std::vector<size_t> &types);
- virtual std::vector<Token *> getTokens(size_t start, size_t stop, size_t ttype);
-
- /// Collect all tokens on specified channel to the right of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL or
- /// EOF. If channel is -1, find any non default channel token.
- virtual std::vector<Token *> getHiddenTokensToRight(size_t tokenIndex, ssize_t channel);
-
- /// <summary>
- /// Collect all hidden tokens (any off-default channel) to the right of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL
- /// or EOF.
- /// </summary>
- virtual std::vector<Token *> getHiddenTokensToRight(size_t tokenIndex);
-
- /// <summary>
- /// Collect all tokens on specified channel to the left of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL.
- /// If channel is -1, find any non default channel token.
- /// </summary>
- virtual std::vector<Token *> getHiddenTokensToLeft(size_t tokenIndex, ssize_t channel);
-
- /// <summary>
- /// Collect all hidden tokens (any off-default channel) to the left of
- /// the current token up until we see a token on DEFAULT_TOKEN_CHANNEL.
- /// </summary>
- virtual std::vector<Token *> getHiddenTokensToLeft(size_t tokenIndex);
-
- virtual std::string getSourceName() const override;
- virtual std::string getText() override;
- virtual std::string getText(const misc::Interval &interval) override;
- virtual std::string getText(RuleContext *ctx) override;
- virtual std::string getText(Token *start, Token *stop) override;
-
- /// Get all tokens from lexer until EOF.
- virtual void fill();
-
- protected:
- /**
- * The {@link TokenSource} from which tokens for this stream are fetched.
- */
- TokenSource *_tokenSource;
-
- /**
- * A collection of all tokens fetched from the token source. The list is
- * considered a complete view of the input once {@link #fetchedEOF} is set
- * to {@code true}.
- */
- std::vector<std::unique_ptr<Token>> _tokens;
-
- /**
- * The index into {@link #tokens} of the current token (next token to
- * {@link #consume}). {@link #tokens}{@code [}{@link #p}{@code ]} should be
- * {@link #LT LT(1)}.
- *
- * <p>This field is set to -1 when the stream is first constructed or when
- * {@link #setTokenSource} is called, indicating that the first token has
- * not yet been fetched from the token source. For additional information,
- * see the documentation of {@link IntStream} for a description of
- * Initializing Methods.</p>
- */
- // ml: since -1 requires to make this member signed for just this single aspect we use a member _needSetup instead.
- // Use bool isInitialized() to find out if this stream has started reading.
- size_t _p;
-
- /**
- * Indicates whether the {@link Token#EOF} token has been fetched from
- * {@link #tokenSource} and added to {@link #tokens}. This field improves
- * performance for the following cases:
- *
- * <ul>
- * <li>{@link #consume}: The lookahead check in {@link #consume} to prevent
- * consuming the EOF symbol is optimized by checking the values of
- * {@link #fetchedEOF} and {@link #p} instead of calling {@link #LA}.</li>
- * <li>{@link #fetch}: The check to prevent adding multiple EOF symbols into
- * {@link #tokens} is trivial with this field.</li>
- * <ul>
- */
- bool _fetchedEOF;
-
- /// <summary>
- /// Make sure index {@code i} in tokens has a token.
- /// </summary>
- /// <returns> {@code true} if a token is located at index {@code i}, otherwise
- /// {@code false}. </returns>
- /// <seealso cref= #get(int i) </seealso>
- virtual bool sync(size_t i);
-
- /// <summary>
- /// Add {@code n} elements to buffer.
- /// </summary>
- /// <returns> The actual number of elements added to the buffer. </returns>
- virtual size_t fetch(size_t n);
-
- virtual Token* LB(size_t k);
-
- /// Allowed derived classes to modify the behavior of operations which change
- /// the current stream position by adjusting the target token index of a seek
- /// operation. The default implementation simply returns {@code i}. If an
- /// exception is thrown in this method, the current stream index should not be
- /// changed.
- /// <p/>
- /// For example, <seealso cref="CommonTokenStream"/> overrides this method to ensure that
- /// the seek target is always an on-channel token.
- ///
- /// <param name="i"> The target token index. </param>
- /// <returns> The adjusted target token index. </returns>
- virtual ssize_t adjustSeekIndex(size_t i);
- void lazyInit();
- virtual void setup();
-
- /**
- * Given a starting index, return the index of the next token on channel.
- * Return {@code i} if {@code tokens[i]} is on channel. Return the index of
- * the EOF token if there are no tokens on channel between {@code i} and
- * EOF.
- */
- virtual ssize_t nextTokenOnChannel(size_t i, size_t channel);
-
- /**
- * Given a starting index, return the index of the previous token on
- * channel. Return {@code i} if {@code tokens[i]} is on channel. Return -1
- * if there are no tokens on channel between {@code i} and 0.
- *
- * <p>
- * If {@code i} specifies an index at or after the EOF token, the EOF token
- * index is returned. This is due to the fact that the EOF token is treated
- * as though it were on every channel.</p>
- */
- virtual ssize_t previousTokenOnChannel(size_t i, size_t channel);
-
- virtual std::vector<Token *> filterForChannel(size_t from, size_t to, ssize_t channel);
-
- bool isInitialized() const;
-
- private:
- bool _needSetup;
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CharStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/CharStream.cpp
deleted file mode 100644
index b05874c8bf..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CharStream.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "CharStream.h"
-
-using namespace antlr4;
-
-CharStream::~CharStream() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CharStream.h b/contrib/libs/antlr4_cpp_runtime/src/CharStream.h
deleted file mode 100644
index a9952dbbac..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CharStream.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "IntStream.h"
-#include "misc/Interval.h"
-
-namespace antlr4 {
-
- /// A source of characters for an ANTLR lexer.
- class ANTLR4CPP_PUBLIC CharStream : public IntStream {
- public:
- virtual ~CharStream();
-
- /// This method returns the text for a range of characters within this input
- /// stream. This method is guaranteed to not throw an exception if the
- /// specified interval lies entirely within a marked range. For more
- /// information about marked ranges, see IntStream::mark.
- ///
- /// <param name="interval"> an interval within the stream </param>
- /// <returns> the text of the specified interval
- /// </returns>
- /// <exception cref="NullPointerException"> if {@code interval} is {@code null} </exception>
- /// <exception cref="IllegalArgumentException"> if {@code interval.a < 0}, or if
- /// {@code interval.b < interval.a - 1}, or if {@code interval.b} lies at or
- /// past the end of the stream </exception>
- /// <exception cref="UnsupportedOperationException"> if the stream does not support
- /// getting the text of the specified interval </exception>
- virtual std::string getText(const misc::Interval &interval) = 0;
-
- virtual std::string toString() const = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CommonToken.cpp b/contrib/libs/antlr4_cpp_runtime/src/CommonToken.cpp
deleted file mode 100644
index 6e9f06a249..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CommonToken.cpp
+++ /dev/null
@@ -1,193 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "TokenSource.h"
-#include "CharStream.h"
-#include "Recognizer.h"
-#include "Vocabulary.h"
-
-#include "misc/Interval.h"
-
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "CommonToken.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-
-using namespace antlrcpp;
-
-const std::pair<TokenSource*, CharStream*> CommonToken::EMPTY_SOURCE;
-
-CommonToken::CommonToken(size_t type) {
- InitializeInstanceFields();
- _type = type;
-}
-
-CommonToken::CommonToken(std::pair<TokenSource*, CharStream*> source, size_t type, size_t channel, size_t start, size_t stop) {
- InitializeInstanceFields();
- _source = source;
- _type = type;
- _channel = channel;
- _start = start;
- _stop = stop;
- if (_source.first != nullptr) {
- _line = static_cast<int>(source.first->getLine());
- _charPositionInLine = source.first->getCharPositionInLine();
- }
-}
-
-CommonToken::CommonToken(size_t type, const std::string &text) {
- InitializeInstanceFields();
- _type = type;
- _channel = DEFAULT_CHANNEL;
- _text = text;
- _source = EMPTY_SOURCE;
-}
-
-CommonToken::CommonToken(Token *oldToken) {
- InitializeInstanceFields();
- _type = oldToken->getType();
- _line = oldToken->getLine();
- _index = oldToken->getTokenIndex();
- _charPositionInLine = oldToken->getCharPositionInLine();
- _channel = oldToken->getChannel();
- _start = oldToken->getStartIndex();
- _stop = oldToken->getStopIndex();
-
- if (is<CommonToken *>(oldToken)) {
- _text = (static_cast<CommonToken *>(oldToken))->_text;
- _source = (static_cast<CommonToken *>(oldToken))->_source;
- } else {
- _text = oldToken->getText();
- _source = { oldToken->getTokenSource(), oldToken->getInputStream() };
- }
-}
-
-size_t CommonToken::getType() const {
- return _type;
-}
-
-void CommonToken::setLine(size_t line) {
- _line = line;
-}
-
-std::string CommonToken::getText() const {
- if (!_text.empty()) {
- return _text;
- }
-
- CharStream *input = getInputStream();
- if (input == nullptr) {
- return "";
- }
- size_t n = input->size();
- if (_start < n && _stop < n) {
- return input->getText(misc::Interval(_start, _stop));
- } else {
- return "<EOF>";
- }
-}
-
-void CommonToken::setText(const std::string &text) {
- _text = text;
-}
-
-size_t CommonToken::getLine() const {
- return _line;
-}
-
-size_t CommonToken::getCharPositionInLine() const {
- return _charPositionInLine;
-}
-
-void CommonToken::setCharPositionInLine(size_t charPositionInLine) {
- _charPositionInLine = charPositionInLine;
-}
-
-size_t CommonToken::getChannel() const {
- return _channel;
-}
-
-void CommonToken::setChannel(size_t channel) {
- _channel = channel;
-}
-
-void CommonToken::setType(size_t type) {
- _type = type;
-}
-
-size_t CommonToken::getStartIndex() const {
- return _start;
-}
-
-void CommonToken::setStartIndex(size_t start) {
- _start = start;
-}
-
-size_t CommonToken::getStopIndex() const {
- return _stop;
-}
-
-void CommonToken::setStopIndex(size_t stop) {
- _stop = stop;
-}
-
-size_t CommonToken::getTokenIndex() const {
- return _index;
-}
-
-void CommonToken::setTokenIndex(size_t index) {
- _index = index;
-}
-
-antlr4::TokenSource *CommonToken::getTokenSource() const {
- return _source.first;
-}
-
-antlr4::CharStream *CommonToken::getInputStream() const {
- return _source.second;
-}
-
-std::string CommonToken::toString() const {
- return toString(nullptr);
-}
-
-std::string CommonToken::toString(Recognizer *r) const {
- std::stringstream ss;
-
- std::string channelStr;
- if (_channel > 0) {
- channelStr = ",channel=" + std::to_string(_channel);
- }
- std::string txt = getText();
- if (!txt.empty()) {
- txt = antlrcpp::escapeWhitespace(txt);
- } else {
- txt = "<no text>";
- }
-
- std::string typeString = std::to_string(symbolToNumeric(_type));
- if (r != nullptr)
- typeString = r->getVocabulary().getDisplayName(_type);
-
- ss << "[@" << symbolToNumeric(getTokenIndex()) << "," << symbolToNumeric(_start) << ":" << symbolToNumeric(_stop)
- << "='" << txt << "',<" << typeString << ">" << channelStr << "," << _line << ":"
- << getCharPositionInLine() << "]";
-
- return ss.str();
-}
-
-void CommonToken::InitializeInstanceFields() {
- _type = 0;
- _line = 0;
- _charPositionInLine = INVALID_INDEX;
- _channel = DEFAULT_CHANNEL;
- _index = INVALID_INDEX;
- _start = 0;
- _stop = 0;
- _source = EMPTY_SOURCE;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CommonToken.h b/contrib/libs/antlr4_cpp_runtime/src/CommonToken.h
deleted file mode 100644
index 3fbc2ae4f5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CommonToken.h
+++ /dev/null
@@ -1,158 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "WritableToken.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC CommonToken : public WritableToken {
- protected:
- /**
- * An empty {@link Pair} which is used as the default value of
- * {@link #source} for tokens that do not have a source.
- */
- static const std::pair<TokenSource *, CharStream *> EMPTY_SOURCE;
-
- /**
- * This is the backing field for {@link #getType} and {@link #setType}.
- */
- size_t _type;
-
- /**
- * This is the backing field for {@link #getLine} and {@link #setLine}.
- */
- size_t _line;
-
- /**
- * This is the backing field for {@link #getCharPositionInLine} and
- * {@link #setCharPositionInLine}.
- */
- size_t _charPositionInLine; // set to invalid position
-
- /**
- * This is the backing field for {@link #getChannel} and
- * {@link #setChannel}.
- */
- size_t _channel;
-
- /**
- * This is the backing field for {@link #getTokenSource} and
- * {@link #getInputStream}.
- *
- * <p>
- * These properties share a field to reduce the memory footprint of
- * {@link CommonToken}. Tokens created by a {@link CommonTokenFactory} from
- * the same source and input stream share a reference to the same
- * {@link Pair} containing these values.</p>
- */
-
- std::pair<TokenSource *, CharStream *> _source; // ml: pure references, usually from statically allocated classes.
-
- /**
- * This is the backing field for {@link #getText} when the token text is
- * explicitly set in the constructor or via {@link #setText}.
- *
- * @see #getText()
- */
- std::string _text;
-
- /**
- * This is the backing field for {@link #getTokenIndex} and
- * {@link #setTokenIndex}.
- */
- size_t _index;
-
- /**
- * This is the backing field for {@link #getStartIndex} and
- * {@link #setStartIndex}.
- */
- size_t _start;
-
- /**
- * This is the backing field for {@link #getStopIndex} and
- * {@link #setStopIndex}.
- */
- size_t _stop;
-
- public:
- /**
- * Constructs a new {@link CommonToken} with the specified token type.
- *
- * @param type The token type.
- */
- CommonToken(size_t type);
- CommonToken(std::pair<TokenSource*, CharStream*> source, size_t type, size_t channel, size_t start, size_t stop);
-
- /**
- * Constructs a new {@link CommonToken} with the specified token type and
- * text.
- *
- * @param type The token type.
- * @param text The text of the token.
- */
- CommonToken(size_t type, const std::string &text);
-
- /**
- * Constructs a new {@link CommonToken} as a copy of another {@link Token}.
- *
- * <p>
- * If {@code oldToken} is also a {@link CommonToken} instance, the newly
- * constructed token will share a reference to the {@link #text} field and
- * the {@link Pair} stored in {@link #source}. Otherwise, {@link #text} will
- * be assigned the result of calling {@link #getText}, and {@link #source}
- * will be constructed from the result of {@link Token#getTokenSource} and
- * {@link Token#getInputStream}.</p>
- *
- * @param oldToken The token to copy.
- */
- CommonToken(Token *oldToken);
-
- virtual size_t getType() const override;
-
- /**
- * Explicitly set the text for this token. If {code text} is not
- * {@code null}, then {@link #getText} will return this value rather than
- * extracting the text from the input.
- *
- * @param text The explicit text of the token, or {@code null} if the text
- * should be obtained from the input along with the start and stop indexes
- * of the token.
- */
- virtual void setText(const std::string &text) override;
- virtual std::string getText() const override;
-
- virtual void setLine(size_t line) override;
- virtual size_t getLine() const override;
-
- virtual size_t getCharPositionInLine() const override;
- virtual void setCharPositionInLine(size_t charPositionInLine) override;
-
- virtual size_t getChannel() const override;
- virtual void setChannel(size_t channel) override;
-
- virtual void setType(size_t type) override;
-
- virtual size_t getStartIndex() const override;
- virtual void setStartIndex(size_t start);
-
- virtual size_t getStopIndex() const override;
- virtual void setStopIndex(size_t stop);
-
- virtual size_t getTokenIndex() const override;
- virtual void setTokenIndex(size_t index) override;
-
- virtual TokenSource *getTokenSource() const override;
- virtual CharStream *getInputStream() const override;
-
- virtual std::string toString() const override;
-
- virtual std::string toString(Recognizer *r) const;
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.cpp b/contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.cpp
deleted file mode 100644
index 23d8f7003a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-#include "CommonToken.h"
-#include "CharStream.h"
-
-#include "CommonTokenFactory.h"
-
-using namespace antlr4;
-
-const std::unique_ptr<TokenFactory<CommonToken>> CommonTokenFactory::DEFAULT(new CommonTokenFactory);
-
-CommonTokenFactory::CommonTokenFactory(bool copyText_) : copyText(copyText_) {
-}
-
-CommonTokenFactory::CommonTokenFactory() : CommonTokenFactory(false) {
-}
-
-std::unique_ptr<CommonToken> CommonTokenFactory::create(std::pair<TokenSource*, CharStream*> source, size_t type,
- const std::string &text, size_t channel, size_t start, size_t stop, size_t line, size_t charPositionInLine) {
-
- std::unique_ptr<CommonToken> t(new CommonToken(source, type, channel, start, stop));
- t->setLine(line);
- t->setCharPositionInLine(charPositionInLine);
- if (text != "") {
- t->setText(text);
- } else if (copyText && source.second != nullptr) {
- t->setText(source.second->getText(misc::Interval(start, stop)));
- }
-
- return t;
-}
-
-std::unique_ptr<CommonToken> CommonTokenFactory::create(size_t type, const std::string &text) {
- return std::unique_ptr<CommonToken>(new CommonToken(type, text));
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.h b/contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.h
deleted file mode 100644
index 0ae1a0353c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenFactory.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenFactory.h"
-
-namespace antlr4 {
-
- /**
- * This default implementation of {@link TokenFactory} creates
- * {@link CommonToken} objects.
- */
- class ANTLR4CPP_PUBLIC CommonTokenFactory : public TokenFactory<CommonToken> {
- public:
- /**
- * The default {@link CommonTokenFactory} instance.
- *
- * <p>
- * This token factory does not explicitly copy token text when constructing
- * tokens.</p>
- */
- static const std::unique_ptr<TokenFactory<CommonToken>> DEFAULT;
-
- protected:
- /**
- * Indicates whether {@link CommonToken#setText} should be called after
- * constructing tokens to explicitly set the text. This is useful for cases
- * where the input stream might not be able to provide arbitrary substrings
- * of text from the input after the lexer creates a token (e.g. the
- * implementation of {@link CharStream#getText} in
- * {@link UnbufferedCharStream} throws an
- * {@link UnsupportedOperationException}). Explicitly setting the token text
- * allows {@link Token#getText} to be called at any time regardless of the
- * input stream implementation.
- *
- * <p>
- * The default value is {@code false} to avoid the performance and memory
- * overhead of copying text for every token unless explicitly requested.</p>
- */
- const bool copyText;
-
- public:
- /**
- * Constructs a {@link CommonTokenFactory} with the specified value for
- * {@link #copyText}.
- *
- * <p>
- * When {@code copyText} is {@code false}, the {@link #DEFAULT} instance
- * should be used instead of constructing a new instance.</p>
- *
- * @param copyText The value for {@link #copyText}.
- */
- CommonTokenFactory(bool copyText);
-
- /**
- * Constructs a {@link CommonTokenFactory} with {@link #copyText} set to
- * {@code false}.
- *
- * <p>
- * The {@link #DEFAULT} instance should be used instead of calling this
- * directly.</p>
- */
- CommonTokenFactory();
-
- virtual std::unique_ptr<CommonToken> create(std::pair<TokenSource*, CharStream*> source, size_t type,
- const std::string &text, size_t channel, size_t start, size_t stop, size_t line, size_t charPositionInLine) override;
-
- virtual std::unique_ptr<CommonToken> create(size_t type, const std::string &text) override;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.cpp
deleted file mode 100644
index 02a2e55af3..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.cpp
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-
-#include "CommonTokenStream.h"
-
-using namespace antlr4;
-
-CommonTokenStream::CommonTokenStream(TokenSource *tokenSource) : CommonTokenStream(tokenSource, Token::DEFAULT_CHANNEL) {
-}
-
-CommonTokenStream::CommonTokenStream(TokenSource *tokenSource, size_t channel_)
-: BufferedTokenStream(tokenSource), channel(channel_) {
-}
-
-ssize_t CommonTokenStream::adjustSeekIndex(size_t i) {
- return nextTokenOnChannel(i, channel);
-}
-
-Token* CommonTokenStream::LB(size_t k) {
- if (k == 0 || k > _p) {
- return nullptr;
- }
-
- ssize_t i = static_cast<ssize_t>(_p);
- size_t n = 1;
- // find k good tokens looking backwards
- while (n <= k) {
- // skip off-channel tokens
- i = previousTokenOnChannel(i - 1, channel);
- n++;
- }
- if (i < 0) {
- return nullptr;
- }
-
- return _tokens[i].get();
-}
-
-Token* CommonTokenStream::LT(ssize_t k) {
- lazyInit();
- if (k == 0) {
- return nullptr;
- }
- if (k < 0) {
- return LB(static_cast<size_t>(-k));
- }
- size_t i = _p;
- ssize_t n = 1; // we know tokens[p] is a good one
- // find k good tokens
- while (n < k) {
- // skip off-channel tokens, but make sure to not look past EOF
- if (sync(i + 1)) {
- i = nextTokenOnChannel(i + 1, channel);
- }
- n++;
- }
-
- return _tokens[i].get();
-}
-
-int CommonTokenStream::getNumberOfOnChannelTokens() {
- int n = 0;
- fill();
- for (size_t i = 0; i < _tokens.size(); i++) {
- Token *t = _tokens[i].get();
- if (t->getChannel() == channel) {
- n++;
- }
- if (t->getType() == Token::EOF) {
- break;
- }
- }
- return n;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.h b/contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.h
deleted file mode 100644
index fde72c7386..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/CommonTokenStream.h
+++ /dev/null
@@ -1,79 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BufferedTokenStream.h"
-
-namespace antlr4 {
-
- /**
- * This class extends {@link BufferedTokenStream} with functionality to filter
- * token streams to tokens on a particular channel (tokens where
- * {@link Token#getChannel} returns a particular value).
- *
- * <p>
- * This token stream provides access to all tokens by index or when calling
- * methods like {@link #getText}. The channel filtering is only used for code
- * accessing tokens via the lookahead methods {@link #LA}, {@link #LT}, and
- * {@link #LB}.</p>
- *
- * <p>
- * By default, tokens are placed on the default channel
- * ({@link Token#DEFAULT_CHANNEL}), but may be reassigned by using the
- * {@code ->channel(HIDDEN)} lexer command, or by using an embedded action to
- * call {@link Lexer#setChannel}.
- * </p>
- *
- * <p>
- * Note: lexer rules which use the {@code ->skip} lexer command or call
- * {@link Lexer#skip} do not produce tokens at all, so input text matched by
- * such a rule will not be available as part of the token stream, regardless of
- * channel.</p>
- */
- class ANTLR4CPP_PUBLIC CommonTokenStream : public BufferedTokenStream {
- public:
- /**
- * Constructs a new {@link CommonTokenStream} using the specified token
- * source and the default token channel ({@link Token#DEFAULT_CHANNEL}).
- *
- * @param tokenSource The token source.
- */
- CommonTokenStream(TokenSource *tokenSource);
-
- /**
- * Constructs a new {@link CommonTokenStream} using the specified token
- * source and filtering tokens to the specified channel. Only tokens whose
- * {@link Token#getChannel} matches {@code channel} or have the
- * {@link Token#getType} equal to {@link Token#EOF} will be returned by the
- * token stream lookahead methods.
- *
- * @param tokenSource The token source.
- * @param channel The channel to use for filtering tokens.
- */
- CommonTokenStream(TokenSource *tokenSource, size_t channel);
-
- virtual Token* LT(ssize_t k) override;
-
- /// Count EOF just once.
- virtual int getNumberOfOnChannelTokens();
-
- protected:
- /**
- * Specifies the channel to use for filtering tokens.
- *
- * <p>
- * The default value is {@link Token#DEFAULT_CHANNEL}, which matches the
- * default channel assigned to tokens created by the lexer.</p>
- */
- size_t channel;
-
- virtual ssize_t adjustSeekIndex(size_t i) override;
-
- virtual Token* LB(size_t k) override;
-
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.cpp
deleted file mode 100644
index c16f949cd2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ConsoleErrorListener.h"
-
-using namespace antlr4;
-
-ConsoleErrorListener ConsoleErrorListener::INSTANCE;
-
-void ConsoleErrorListener::syntaxError(Recognizer * /*recognizer*/, Token * /*offendingSymbol*/,
- size_t line, size_t charPositionInLine, const std::string &msg, std::exception_ptr /*e*/) {
- std::cerr << "line " << line << ":" << charPositionInLine << " " << msg << std::endl;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.h b/contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.h
deleted file mode 100644
index f1d1188667..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ConsoleErrorListener.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BaseErrorListener.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC ConsoleErrorListener : public BaseErrorListener {
- public:
- /**
- * Provides a default instance of {@link ConsoleErrorListener}.
- */
- static ConsoleErrorListener INSTANCE;
-
- /**
- * {@inheritDoc}
- *
- * <p>
- * This implementation prints messages to {@link System#err} containing the
- * values of {@code line}, {@code charPositionInLine}, and {@code msg} using
- * the following format.</p>
- *
- * <pre>
- * line <em>line</em>:<em>charPositionInLine</em> <em>msg</em>
- * </pre>
- */
- virtual void syntaxError(Recognizer *recognizer, Token * offendingSymbol, size_t line, size_t charPositionInLine,
- const std::string &msg, std::exception_ptr e) override;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.cpp b/contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.cpp
deleted file mode 100644
index e5a7327859..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.cpp
+++ /dev/null
@@ -1,336 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "NoViableAltException.h"
-#include "misc/IntervalSet.h"
-#include "atn/ParserATNSimulator.h"
-#include "InputMismatchException.h"
-#include "FailedPredicateException.h"
-#include "ParserRuleContext.h"
-#include "atn/RuleTransition.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "support/StringUtils.h"
-#include "support/Casts.h"
-#include "Parser.h"
-#include "CommonToken.h"
-#include "Vocabulary.h"
-
-#include "DefaultErrorStrategy.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-using namespace antlrcpp;
-
-DefaultErrorStrategy::DefaultErrorStrategy() {
- InitializeInstanceFields();
-}
-
-DefaultErrorStrategy::~DefaultErrorStrategy() {
-}
-
-void DefaultErrorStrategy::reset(Parser *recognizer) {
- _errorSymbols.clear();
- endErrorCondition(recognizer);
-}
-
-void DefaultErrorStrategy::beginErrorCondition(Parser * /*recognizer*/) {
- errorRecoveryMode = true;
-}
-
-bool DefaultErrorStrategy::inErrorRecoveryMode(Parser * /*recognizer*/) {
- return errorRecoveryMode;
-}
-
-void DefaultErrorStrategy::endErrorCondition(Parser * /*recognizer*/) {
- errorRecoveryMode = false;
- lastErrorIndex = -1;
-}
-
-void DefaultErrorStrategy::reportMatch(Parser *recognizer) {
- endErrorCondition(recognizer);
-}
-
-void DefaultErrorStrategy::reportError(Parser *recognizer, const RecognitionException &e) {
- // If we've already reported an error and have not matched a token
- // yet successfully, don't report any errors.
- if (inErrorRecoveryMode(recognizer)) {
- return; // don't report spurious errors
- }
-
- beginErrorCondition(recognizer);
- if (is<const NoViableAltException *>(&e)) {
- reportNoViableAlternative(recognizer, static_cast<const NoViableAltException &>(e));
- } else if (is<const InputMismatchException *>(&e)) {
- reportInputMismatch(recognizer, static_cast<const InputMismatchException &>(e));
- } else if (is<const FailedPredicateException *>(&e)) {
- reportFailedPredicate(recognizer, static_cast<const FailedPredicateException &>(e));
- } else if (is<const RecognitionException *>(&e)) {
- recognizer->notifyErrorListeners(e.getOffendingToken(), e.what(), std::current_exception());
- }
-}
-
-void DefaultErrorStrategy::recover(Parser *recognizer, std::exception_ptr /*e*/) {
- if (lastErrorIndex == static_cast<int>(recognizer->getInputStream()->index()) &&
- lastErrorStates.contains(recognizer->getState())) {
-
- // uh oh, another error at same token index and previously-visited
- // state in ATN; must be a case where LT(1) is in the recovery
- // token set so nothing got consumed. Consume a single token
- // at least to prevent an infinite loop; this is a failsafe.
- recognizer->consume();
- }
- lastErrorIndex = static_cast<int>(recognizer->getInputStream()->index());
- lastErrorStates.add(recognizer->getState());
- misc::IntervalSet followSet = getErrorRecoverySet(recognizer);
- consumeUntil(recognizer, followSet);
-}
-
-void DefaultErrorStrategy::sync(Parser *recognizer) {
- atn::ATNState *s = recognizer->getInterpreter<atn::ATNSimulator>()->atn.states[recognizer->getState()];
-
- // If already recovering, don't try to sync
- if (inErrorRecoveryMode(recognizer)) {
- return;
- }
-
- TokenStream *tokens = recognizer->getTokenStream();
- size_t la = tokens->LA(1);
-
- // try cheaper subset first; might get lucky. seems to shave a wee bit off
- auto nextTokens = recognizer->getATN().nextTokens(s);
- if (nextTokens.contains(Token::EPSILON) || nextTokens.contains(la)) {
- return;
- }
-
- switch (s->getStateType()) {
- case atn::ATNStateType::BLOCK_START:
- case atn::ATNStateType::STAR_BLOCK_START:
- case atn::ATNStateType::PLUS_BLOCK_START:
- case atn::ATNStateType::STAR_LOOP_ENTRY:
- // report error and recover if possible
- if (singleTokenDeletion(recognizer) != nullptr) {
- return;
- }
-
- throw InputMismatchException(recognizer);
-
- case atn::ATNStateType::PLUS_LOOP_BACK:
- case atn::ATNStateType::STAR_LOOP_BACK: {
- reportUnwantedToken(recognizer);
- misc::IntervalSet expecting = recognizer->getExpectedTokens();
- misc::IntervalSet whatFollowsLoopIterationOrRule = expecting.Or(getErrorRecoverySet(recognizer));
- consumeUntil(recognizer, whatFollowsLoopIterationOrRule);
- }
- break;
-
- default:
- // do nothing if we can't identify the exact kind of ATN state
- break;
- }
-}
-
-void DefaultErrorStrategy::reportNoViableAlternative(Parser *recognizer, const NoViableAltException &e) {
- TokenStream *tokens = recognizer->getTokenStream();
- std::string input;
- if (tokens != nullptr) {
- if (e.getStartToken()->getType() == Token::EOF) {
- input = "<EOF>";
- } else {
- input = tokens->getText(e.getStartToken(), e.getOffendingToken());
- }
- } else {
- input = "<unknown input>";
- }
- std::string msg = "no viable alternative at input " + escapeWSAndQuote(input);
- recognizer->notifyErrorListeners(e.getOffendingToken(), msg, std::make_exception_ptr(e));
-}
-
-void DefaultErrorStrategy::reportInputMismatch(Parser *recognizer, const InputMismatchException &e) {
- std::string msg = "mismatched input " + getTokenErrorDisplay(e.getOffendingToken()) +
- " expecting " + e.getExpectedTokens().toString(recognizer->getVocabulary());
- recognizer->notifyErrorListeners(e.getOffendingToken(), msg, std::make_exception_ptr(e));
-}
-
-void DefaultErrorStrategy::reportFailedPredicate(Parser *recognizer, const FailedPredicateException &e) {
- const std::string& ruleName = recognizer->getRuleNames()[recognizer->getContext()->getRuleIndex()];
- std::string msg = "rule " + ruleName + " " + e.what();
- recognizer->notifyErrorListeners(e.getOffendingToken(), msg, std::make_exception_ptr(e));
-}
-
-void DefaultErrorStrategy::reportUnwantedToken(Parser *recognizer) {
- if (inErrorRecoveryMode(recognizer)) {
- return;
- }
-
- beginErrorCondition(recognizer);
-
- Token *t = recognizer->getCurrentToken();
- std::string tokenName = getTokenErrorDisplay(t);
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
-
- std::string msg = "extraneous input " + tokenName + " expecting " + expecting.toString(recognizer->getVocabulary());
- recognizer->notifyErrorListeners(t, msg, nullptr);
-}
-
-void DefaultErrorStrategy::reportMissingToken(Parser *recognizer) {
- if (inErrorRecoveryMode(recognizer)) {
- return;
- }
-
- beginErrorCondition(recognizer);
-
- Token *t = recognizer->getCurrentToken();
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
- std::string expectedText = expecting.toString(recognizer->getVocabulary());
- std::string msg = "missing " + expectedText + " at " + getTokenErrorDisplay(t);
-
- recognizer->notifyErrorListeners(t, msg, nullptr);
-}
-
-Token* DefaultErrorStrategy::recoverInline(Parser *recognizer) {
- // Single token deletion.
- Token *matchedSymbol = singleTokenDeletion(recognizer);
- if (matchedSymbol) {
- // We have deleted the extra token.
- // Now, move past ttype token as if all were ok.
- recognizer->consume();
- return matchedSymbol;
- }
-
- // Single token insertion.
- if (singleTokenInsertion(recognizer)) {
- return getMissingSymbol(recognizer);
- }
-
- // Even that didn't work; must throw the exception.
- throw InputMismatchException(recognizer);
-}
-
-bool DefaultErrorStrategy::singleTokenInsertion(Parser *recognizer) {
- ssize_t currentSymbolType = recognizer->getInputStream()->LA(1);
-
- // if current token is consistent with what could come after current
- // ATN state, then we know we're missing a token; error recovery
- // is free to conjure up and insert the missing token
- atn::ATNState *currentState = recognizer->getInterpreter<atn::ATNSimulator>()->atn.states[recognizer->getState()];
- atn::ATNState *next = currentState->transitions[0]->target;
- const atn::ATN &atn = recognizer->getInterpreter<atn::ATNSimulator>()->atn;
- misc::IntervalSet expectingAtLL2 = atn.nextTokens(next, recognizer->getContext());
- if (expectingAtLL2.contains(currentSymbolType)) {
- reportMissingToken(recognizer);
- return true;
- }
- return false;
-}
-
-Token* DefaultErrorStrategy::singleTokenDeletion(Parser *recognizer) {
- size_t nextTokenType = recognizer->getInputStream()->LA(2);
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
- if (expecting.contains(nextTokenType)) {
- reportUnwantedToken(recognizer);
- recognizer->consume(); // simply delete extra token
- // we want to return the token we're actually matching
- Token *matchedSymbol = recognizer->getCurrentToken();
- reportMatch(recognizer); // we know current token is correct
- return matchedSymbol;
- }
- return nullptr;
-}
-
-Token* DefaultErrorStrategy::getMissingSymbol(Parser *recognizer) {
- Token *currentSymbol = recognizer->getCurrentToken();
- misc::IntervalSet expecting = getExpectedTokens(recognizer);
- size_t expectedTokenType = expecting.getMinElement(); // get any element
- std::string tokenText;
- if (expectedTokenType == Token::EOF) {
- tokenText = "<missing EOF>";
- } else {
- tokenText = "<missing " + recognizer->getVocabulary().getDisplayName(expectedTokenType) + ">";
- }
- Token *current = currentSymbol;
- Token *lookback = recognizer->getTokenStream()->LT(-1);
- if (current->getType() == Token::EOF && lookback != nullptr) {
- current = lookback;
- }
-
- _errorSymbols.push_back(recognizer->getTokenFactory()->create(
- { current->getTokenSource(), current->getTokenSource()->getInputStream() },
- expectedTokenType, tokenText, Token::DEFAULT_CHANNEL, INVALID_INDEX, INVALID_INDEX,
- current->getLine(), current->getCharPositionInLine()));
-
- return _errorSymbols.back().get();
-}
-
-misc::IntervalSet DefaultErrorStrategy::getExpectedTokens(Parser *recognizer) {
- return recognizer->getExpectedTokens();
-}
-
-std::string DefaultErrorStrategy::getTokenErrorDisplay(Token *t) {
- if (t == nullptr) {
- return "<no Token>";
- }
- std::string s = getSymbolText(t);
- if (s == "") {
- if (getSymbolType(t) == Token::EOF) {
- s = "<EOF>";
- } else {
- s = "<" + std::to_string(getSymbolType(t)) + ">";
- }
- }
- return escapeWSAndQuote(s);
-}
-
-std::string DefaultErrorStrategy::getSymbolText(Token *symbol) {
- return symbol->getText();
-}
-
-size_t DefaultErrorStrategy::getSymbolType(Token *symbol) {
- return symbol->getType();
-}
-
-std::string DefaultErrorStrategy::escapeWSAndQuote(const std::string &s) const {
- std::string result;
- result.reserve(s.size() + 2);
- result.push_back('\'');
- antlrcpp::escapeWhitespace(result, s);
- result.push_back('\'');
- result.shrink_to_fit();
- return result;
-}
-
-misc::IntervalSet DefaultErrorStrategy::getErrorRecoverySet(Parser *recognizer) {
- const atn::ATN &atn = recognizer->getInterpreter<atn::ATNSimulator>()->atn;
- RuleContext *ctx = recognizer->getContext();
- misc::IntervalSet recoverSet;
- while (ctx->invokingState != ATNState::INVALID_STATE_NUMBER) {
- // compute what follows who invoked us
- atn::ATNState *invokingState = atn.states[ctx->invokingState];
- const atn::RuleTransition *rt = downCast<const atn::RuleTransition*>(invokingState->transitions[0].get());
- misc::IntervalSet follow = atn.nextTokens(rt->followState);
- recoverSet.addAll(follow);
-
- if (ctx->parent == nullptr)
- break;
- ctx = static_cast<RuleContext *>(ctx->parent);
- }
- recoverSet.remove(Token::EPSILON);
-
- return recoverSet;
-}
-
-void DefaultErrorStrategy::consumeUntil(Parser *recognizer, const misc::IntervalSet &set) {
- size_t ttype = recognizer->getInputStream()->LA(1);
- while (ttype != Token::EOF && !set.contains(ttype)) {
- recognizer->consume();
- ttype = recognizer->getInputStream()->LA(1);
- }
-}
-
-void DefaultErrorStrategy::InitializeInstanceFields() {
- errorRecoveryMode = false;
- lastErrorIndex = -1;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.h b/contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.h
deleted file mode 100644
index 7b914468cf..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/DefaultErrorStrategy.h
+++ /dev/null
@@ -1,466 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRErrorStrategy.h"
-#include "misc/IntervalSet.h"
-
-namespace antlr4 {
-
- /**
- * This is the default implementation of {@link ANTLRErrorStrategy} used for
- * error reporting and recovery in ANTLR parsers.
- */
- class ANTLR4CPP_PUBLIC DefaultErrorStrategy : public ANTLRErrorStrategy {
- public:
- DefaultErrorStrategy();
- DefaultErrorStrategy(DefaultErrorStrategy const& other) = delete;
- virtual ~DefaultErrorStrategy();
-
- DefaultErrorStrategy& operator = (DefaultErrorStrategy const& other) = delete;
-
- protected:
- /**
- * Indicates whether the error strategy is currently "recovering from an
- * error". This is used to suppress reporting multiple error messages while
- * attempting to recover from a detected syntax error.
- *
- * @see #inErrorRecoveryMode
- */
- bool errorRecoveryMode;
-
- /** The index into the input stream where the last error occurred.
- * This is used to prevent infinite loops where an error is found
- * but no token is consumed during recovery...another error is found,
- * ad nauseum. This is a failsafe mechanism to guarantee that at least
- * one token/tree node is consumed for two errors.
- */
- int lastErrorIndex;
-
- misc::IntervalSet lastErrorStates;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation simply calls <seealso cref="#endErrorCondition"/> to
- /// ensure that the handler is not in error recovery mode.
- /// </summary>
- public:
- virtual void reset(Parser *recognizer) override;
-
- /// <summary>
- /// This method is called to enter error recovery mode when a recognition
- /// exception is reported.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- protected:
- virtual void beginErrorCondition(Parser *recognizer);
-
- /// <summary>
- /// {@inheritDoc}
- /// </summary>
- public:
- virtual bool inErrorRecoveryMode(Parser *recognizer) override;
-
- /// <summary>
- /// This method is called to leave error recovery mode after recovering from
- /// a recognition exception.
- /// </summary>
- /// <param name="recognizer"> </param>
- protected:
- virtual void endErrorCondition(Parser *recognizer);
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation simply calls <seealso cref="#endErrorCondition"/>.
- /// </summary>
- public:
- virtual void reportMatch(Parser *recognizer) override;
-
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation returns immediately if the handler is already
- /// in error recovery mode. Otherwise, it calls <seealso cref="#beginErrorCondition"/>
- /// and dispatches the reporting task based on the runtime type of {@code e}
- /// according to the following table.
- ///
- /// <ul>
- /// <li><seealso cref="NoViableAltException"/>: Dispatches the call to
- /// <seealso cref="#reportNoViableAlternative"/></li>
- /// <li><seealso cref="InputMismatchException"/>: Dispatches the call to
- /// <seealso cref="#reportInputMismatch"/></li>
- /// <li><seealso cref="FailedPredicateException"/>: Dispatches the call to
- /// <seealso cref="#reportFailedPredicate"/></li>
- /// <li>All other types: calls <seealso cref="Parser#notifyErrorListeners"/> to report
- /// the exception</li>
- /// </ul>
- virtual void reportError(Parser *recognizer, const RecognitionException &e) override;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The default implementation resynchronizes the parser by consuming tokens
- /// until we find one in the resynchronization set--loosely the set of tokens
- /// that can follow the current rule.
- /// </summary>
- virtual void recover(Parser *recognizer, std::exception_ptr e) override;
-
- /**
- * The default implementation of {@link ANTLRErrorStrategy#sync} makes sure
- * that the current lookahead symbol is consistent with what were expecting
- * at this point in the ATN. You can call this anytime but ANTLR only
- * generates code to check before subrules/loops and each iteration.
- *
- * <p>Implements Jim Idle's magic sync mechanism in closures and optional
- * subrules. E.g.,</p>
- *
- * <pre>
- * a : sync ( stuff sync )* ;
- * sync : {consume to what can follow sync} ;
- * </pre>
- *
- * At the start of a sub rule upon error, {@link #sync} performs single
- * token deletion, if possible. If it can't do that, it bails on the current
- * rule and uses the default error recovery, which consumes until the
- * resynchronization set of the current rule.
- *
- * <p>If the sub rule is optional ({@code (...)?}, {@code (...)*}, or block
- * with an empty alternative), then the expected set includes what follows
- * the subrule.</p>
- *
- * <p>During loop iteration, it consumes until it sees a token that can start a
- * sub rule or what follows loop. Yes, that is pretty aggressive. We opt to
- * stay in the loop as long as possible.</p>
- *
- * <p><strong>ORIGINS</strong></p>
- *
- * <p>Previous versions of ANTLR did a poor job of their recovery within loops.
- * A single mismatch token or missing token would force the parser to bail
- * out of the entire rules surrounding the loop. So, for rule</p>
- *
- * <pre>
- * classDef : 'class' ID '{' member* '}'
- * </pre>
- *
- * input with an extra token between members would force the parser to
- * consume until it found the next class definition rather than the next
- * member definition of the current class.
- *
- * <p>This functionality cost a little bit of effort because the parser has to
- * compare token set at the start of the loop and at each iteration. If for
- * some reason speed is suffering for you, you can turn off this
- * functionality by simply overriding this method as a blank { }.</p>
- */
- virtual void sync(Parser *recognizer) override;
-
- /// <summary>
- /// This is called by <seealso cref="#reportError"/> when the exception is a
- /// <seealso cref="NoViableAltException"/>.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception </param>
- protected:
- virtual void reportNoViableAlternative(Parser *recognizer, const NoViableAltException &e);
-
- /// <summary>
- /// This is called by <seealso cref="#reportError"/> when the exception is an
- /// <seealso cref="InputMismatchException"/>.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception </param>
- virtual void reportInputMismatch(Parser *recognizer, const InputMismatchException &e);
-
- /// <summary>
- /// This is called by <seealso cref="#reportError"/> when the exception is a
- /// <seealso cref="FailedPredicateException"/>.
- /// </summary>
- /// <seealso cref= #reportError
- /// </seealso>
- /// <param name="recognizer"> the parser instance </param>
- /// <param name="e"> the recognition exception </param>
- virtual void reportFailedPredicate(Parser *recognizer, const FailedPredicateException &e);
-
- /**
- * This method is called to report a syntax error which requires the removal
- * of a token from the input stream. At the time this method is called, the
- * erroneous symbol is current {@code LT(1)} symbol and has not yet been
- * removed from the input stream. When this method returns,
- * {@code recognizer} is in error recovery mode.
- *
- * <p>This method is called when {@link #singleTokenDeletion} identifies
- * single-token deletion as a viable recovery strategy for a mismatched
- * input error.</p>
- *
- * <p>The default implementation simply returns if the handler is already in
- * error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
- * enter error recovery mode, followed by calling
- * {@link Parser#notifyErrorListeners}.</p>
- *
- * @param recognizer the parser instance
- */
- virtual void reportUnwantedToken(Parser *recognizer);
-
- /**
- * This method is called to report a syntax error which requires the
- * insertion of a missing token into the input stream. At the time this
- * method is called, the missing token has not yet been inserted. When this
- * method returns, {@code recognizer} is in error recovery mode.
- *
- * <p>This method is called when {@link #singleTokenInsertion} identifies
- * single-token insertion as a viable recovery strategy for a mismatched
- * input error.</p>
- *
- * <p>The default implementation simply returns if the handler is already in
- * error recovery mode. Otherwise, it calls {@link #beginErrorCondition} to
- * enter error recovery mode, followed by calling
- * {@link Parser#notifyErrorListeners}.</p>
- *
- * @param recognizer the parser instance
- */
- virtual void reportMissingToken(Parser *recognizer);
-
- public:
- /**
- * {@inheritDoc}
- *
- * <p>The default implementation attempts to recover from the mismatched input
- * by using single token insertion and deletion as described below. If the
- * recovery attempt fails, this method throws an
- * {@link InputMismatchException}.</p>
- *
- * <p><strong>EXTRA TOKEN</strong> (single token deletion)</p>
- *
- * <p>{@code LA(1)} is not what we are looking for. If {@code LA(2)} has the
- * right token, however, then assume {@code LA(1)} is some extra spurious
- * token and delete it. Then consume and return the next token (which was
- * the {@code LA(2)} token) as the successful result of the match operation.</p>
- *
- * <p>This recovery strategy is implemented by {@link #singleTokenDeletion}.</p>
- *
- * <p><strong>MISSING TOKEN</strong> (single token insertion)</p>
- *
- * <p>If current token (at {@code LA(1)}) is consistent with what could come
- * after the expected {@code LA(1)} token, then assume the token is missing
- * and use the parser's {@link TokenFactory} to create it on the fly. The
- * "insertion" is performed by returning the created token as the successful
- * result of the match operation.</p>
- *
- * <p>This recovery strategy is implemented by {@link #singleTokenInsertion}.</p>
- *
- * <p><strong>EXAMPLE</strong></p>
- *
- * <p>For example, Input {@code i=(3;} is clearly missing the {@code ')'}. When
- * the parser returns from the nested call to {@code expr}, it will have
- * call chain:</p>
- *
- * <pre>
- * stat &rarr; expr &rarr; atom
- * </pre>
- *
- * and it will be trying to match the {@code ')'} at this point in the
- * derivation:
- *
- * <pre>
- * =&gt; ID '=' '(' INT ')' ('+' atom)* ';'
- * ^
- * </pre>
- *
- * The attempt to match {@code ')'} will fail when it sees {@code ';'} and
- * call {@link #recoverInline}. To recover, it sees that {@code LA(1)==';'}
- * is in the set of tokens that can follow the {@code ')'} token reference
- * in rule {@code atom}. It can assume that you forgot the {@code ')'}.
- */
- virtual Token* recoverInline(Parser *recognizer) override;
-
- /// <summary>
- /// This method implements the single-token insertion inline error recovery
- /// strategy. It is called by <seealso cref="#recoverInline"/> if the single-token
- /// deletion strategy fails to recover from the mismatched input. If this
- /// method returns {@code true}, {@code recognizer} will be in error recovery
- /// mode.
- /// <p/>
- /// This method determines whether or not single-token insertion is viable by
- /// checking if the {@code LA(1)} input symbol could be successfully matched
- /// if it were instead the {@code LA(2)} symbol. If this method returns
- /// {@code true}, the caller is responsible for creating and inserting a
- /// token with the correct type to produce this behavior.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <returns> {@code true} if single-token insertion is a viable recovery
- /// strategy for the current mismatched input, otherwise {@code false} </returns>
- protected:
- virtual bool singleTokenInsertion(Parser *recognizer);
-
- /// <summary>
- /// This method implements the single-token deletion inline error recovery
- /// strategy. It is called by <seealso cref="#recoverInline"/> to attempt to recover
- /// from mismatched input. If this method returns null, the parser and error
- /// handler state will not have changed. If this method returns non-null,
- /// {@code recognizer} will <em>not</em> be in error recovery mode since the
- /// returned token was a successful match.
- /// <p/>
- /// If the single-token deletion is successful, this method calls
- /// <seealso cref="#reportUnwantedToken"/> to report the error, followed by
- /// <seealso cref="Parser#consume"/> to actually "delete" the extraneous token. Then,
- /// before returning <seealso cref="#reportMatch"/> is called to signal a successful
- /// match.
- /// </summary>
- /// <param name="recognizer"> the parser instance </param>
- /// <returns> the successfully matched <seealso cref="Token"/> instance if single-token
- /// deletion successfully recovers from the mismatched input, otherwise
- /// {@code null} </returns>
- virtual Token* singleTokenDeletion(Parser *recognizer);
-
- /// <summary>
- /// Conjure up a missing token during error recovery.
- ///
- /// The recognizer attempts to recover from single missing
- /// symbols. But, actions might refer to that missing symbol.
- /// For example, x=ID {f($x);}. The action clearly assumes
- /// that there has been an identifier matched previously and that
- /// $x points at that token. If that token is missing, but
- /// the next token in the stream is what we want we assume that
- /// this token is missing and we keep going. Because we
- /// have to return some token to replace the missing token,
- /// we have to conjure one up. This method gives the user control
- /// over the tokens returned for missing tokens. Mostly,
- /// you will want to create something special for identifier
- /// tokens. For literals such as '{' and ',', the default
- /// action in the parser or tree parser works. It simply creates
- /// a CommonToken of the appropriate type. The text will be the token.
- /// If you change what tokens must be created by the lexer,
- /// override this method to create the appropriate tokens.
- /// </summary>
- virtual Token* getMissingSymbol(Parser *recognizer);
-
- virtual misc::IntervalSet getExpectedTokens(Parser *recognizer);
-
- /// <summary>
- /// How should a token be displayed in an error message? The default
- /// is to display just the text, but during development you might
- /// want to have a lot of information spit out. Override in that case
- /// to use t.toString() (which, for CommonToken, dumps everything about
- /// the token). This is better than forcing you to override a method in
- /// your token objects because you don't have to go modify your lexer
- /// so that it creates a new class.
- /// </summary>
- virtual std::string getTokenErrorDisplay(Token *t);
-
- virtual std::string getSymbolText(Token *symbol);
-
- virtual size_t getSymbolType(Token *symbol);
-
- virtual std::string escapeWSAndQuote(const std::string &s) const;
-
- /* Compute the error recovery set for the current rule. During
- * rule invocation, the parser pushes the set of tokens that can
- * follow that rule reference on the stack; this amounts to
- * computing FIRST of what follows the rule reference in the
- * enclosing rule. See LinearApproximator.FIRST().
- * This local follow set only includes tokens
- * from within the rule; i.e., the FIRST computation done by
- * ANTLR stops at the end of a rule.
- *
- * EXAMPLE
- *
- * When you find a "no viable alt exception", the input is not
- * consistent with any of the alternatives for rule r. The best
- * thing to do is to consume tokens until you see something that
- * can legally follow a call to r *or* any rule that called r.
- * You don't want the exact set of viable next tokens because the
- * input might just be missing a token--you might consume the
- * rest of the input looking for one of the missing tokens.
- *
- * Consider grammar:
- *
- * a : '[' b ']'
- * | '(' b ')'
- * ;
- * b : c '^' INT ;
- * c : ID
- * | INT
- * ;
- *
- * At each rule invocation, the set of tokens that could follow
- * that rule is pushed on a stack. Here are the various
- * context-sensitive follow sets:
- *
- * FOLLOW(b1_in_a) = FIRST(']') = ']'
- * FOLLOW(b2_in_a) = FIRST(')') = ')'
- * FOLLOW(c_in_b) = FIRST('^') = '^'
- *
- * Upon erroneous input "[]", the call chain is
- *
- * a -> b -> c
- *
- * and, hence, the follow context stack is:
- *
- * depth follow set start of rule execution
- * 0 <EOF> a (from main())
- * 1 ']' b
- * 2 '^' c
- *
- * Notice that ')' is not included, because b would have to have
- * been called from a different context in rule a for ')' to be
- * included.
- *
- * For error recovery, we cannot consider FOLLOW(c)
- * (context-sensitive or otherwise). We need the combined set of
- * all context-sensitive FOLLOW sets--the set of all tokens that
- * could follow any reference in the call chain. We need to
- * resync to one of those tokens. Note that FOLLOW(c)='^' and if
- * we resync'd to that token, we'd consume until EOF. We need to
- * sync to context-sensitive FOLLOWs for a, b, and c: {']','^'}.
- * In this case, for input "[]", LA(1) is ']' and in the set, so we would
- * not consume anything. After printing an error, rule c would
- * return normally. Rule b would not find the required '^' though.
- * At this point, it gets a mismatched token error and throws an
- * exception (since LA(1) is not in the viable following token
- * set). The rule exception handler tries to recover, but finds
- * the same recovery set and doesn't consume anything. Rule b
- * exits normally returning to rule a. Now it finds the ']' (and
- * with the successful match exits errorRecovery mode).
- *
- * So, you can see that the parser walks up the call chain looking
- * for the token that was a member of the recovery set.
- *
- * Errors are not generated in errorRecovery mode.
- *
- * ANTLR's error recovery mechanism is based upon original ideas:
- *
- * "Algorithms + Data Structures = Programs" by Niklaus Wirth
- *
- * and
- *
- * "A note on error recovery in recursive descent parsers":
- * http://portal.acm.org/citation.cfm?id=947902.947905
- *
- * Later, Josef Grosch had some good ideas:
- *
- * "Efficient and Comfortable Error Recovery in Recursive Descent
- * Parsers":
- * ftp://www.cocolab.com/products/cocktail/doca4.ps/ell.ps.zip
- *
- * Like Grosch I implement context-sensitive FOLLOW sets that are combined
- * at run-time upon error to avoid overhead during parsing.
- */
- virtual misc::IntervalSet getErrorRecoverySet(Parser *recognizer);
-
- /// <summary>
- /// Consume tokens until one matches the given token set. </summary>
- virtual void consumeUntil(Parser *recognizer, const misc::IntervalSet &set);
-
- private:
- std::vector<std::unique_ptr<Token>> _errorSymbols; // Temporarily created token.
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.cpp
deleted file mode 100644
index ef6f64372d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PredictionContext.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "Parser.h"
-#include "misc/Interval.h"
-#include "dfa/DFA.h"
-
-#include "DiagnosticErrorListener.h"
-
-using namespace antlr4;
-
-DiagnosticErrorListener::DiagnosticErrorListener() : DiagnosticErrorListener(true) {
-}
-
-DiagnosticErrorListener::DiagnosticErrorListener(bool exactOnly_) : exactOnly(exactOnly_) {
-}
-
-void DiagnosticErrorListener::reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) {
- if (exactOnly && !exact) {
- return;
- }
-
- std::string decision = getDecisionDescription(recognizer, dfa);
- antlrcpp::BitSet conflictingAlts = getConflictingAlts(ambigAlts, configs);
- std::string text = recognizer->getTokenStream()->getText(misc::Interval(startIndex, stopIndex));
- std::string message = "reportAmbiguity d=" + decision + ": ambigAlts=" + conflictingAlts.toString() +
- ", input='" + text + "'";
-
- recognizer->notifyErrorListeners(message);
-}
-
-void DiagnosticErrorListener::reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet &/*conflictingAlts*/, atn::ATNConfigSet * /*configs*/) {
- std::string decision = getDecisionDescription(recognizer, dfa);
- std::string text = recognizer->getTokenStream()->getText(misc::Interval(startIndex, stopIndex));
- std::string message = "reportAttemptingFullContext d=" + decision + ", input='" + text + "'";
- recognizer->notifyErrorListeners(message);
-}
-
-void DiagnosticErrorListener::reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex,
- size_t stopIndex, size_t /*prediction*/, atn::ATNConfigSet * /*configs*/) {
- std::string decision = getDecisionDescription(recognizer, dfa);
- std::string text = recognizer->getTokenStream()->getText(misc::Interval(startIndex, stopIndex));
- std::string message = "reportContextSensitivity d=" + decision + ", input='" + text + "'";
- recognizer->notifyErrorListeners(message);
-}
-
-std::string DiagnosticErrorListener::getDecisionDescription(Parser *recognizer, const dfa::DFA &dfa) {
- size_t decision = dfa.decision;
- size_t ruleIndex = (reinterpret_cast<atn::ATNState*>(dfa.atnStartState))->ruleIndex;
-
- const std::vector<std::string>& ruleNames = recognizer->getRuleNames();
- if (ruleIndex == INVALID_INDEX || ruleIndex >= ruleNames.size()) {
- return std::to_string(decision);
- }
-
- std::string ruleName = ruleNames[ruleIndex];
- if (ruleName == "" || ruleName.empty()) {
- return std::to_string(decision);
- }
-
- return std::to_string(decision) + " (" + ruleName + ")";
-}
-
-antlrcpp::BitSet DiagnosticErrorListener::getConflictingAlts(const antlrcpp::BitSet &reportedAlts,
- atn::ATNConfigSet *configs) {
- if (reportedAlts.count() > 0) { // Not exactly like the original Java code, but this listener is only used
- // in the TestRig (where it never provides a good alt set), so it's probably ok so.
- return reportedAlts;
- }
-
- antlrcpp::BitSet result;
- for (auto &config : configs->configs) {
- result.set(config->alt);
- }
-
- return result;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.h b/contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.h
deleted file mode 100644
index ed6d749429..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/DiagnosticErrorListener.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BaseErrorListener.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// This implementation of <seealso cref="ANTLRErrorListener"/> can be used to identify
- /// certain potential correctness and performance problems in grammars. "Reports"
- /// are made by calling <seealso cref="Parser#notifyErrorListeners"/> with the appropriate
- /// message.
- ///
- /// <ul>
- /// <li><b>Ambiguities</b>: These are cases where more than one path through the
- /// grammar can match the input.</li>
- /// <li><b>Weak context sensitivity</b>: These are cases where full-context
- /// prediction resolved an SLL conflict to a unique alternative which equaled the
- /// minimum alternative of the SLL conflict.</li>
- /// <li><b>Strong (forced) context sensitivity</b>: These are cases where the
- /// full-context prediction resolved an SLL conflict to a unique alternative,
- /// <em>and</em> the minimum alternative of the SLL conflict was found to not be
- /// a truly viable alternative. Two-stage parsing cannot be used for inputs where
- /// this situation occurs.</li>
- /// </ul>
- ///
- /// @author Sam Harwell
- /// </summary>
- class ANTLR4CPP_PUBLIC DiagnosticErrorListener : public BaseErrorListener {
- /// <summary>
- /// When {@code true}, only exactly known ambiguities are reported.
- /// </summary>
- protected:
- const bool exactOnly;
-
- /// <summary>
- /// Initializes a new instance of <seealso cref="DiagnosticErrorListener"/> which only
- /// reports exact ambiguities.
- /// </summary>
- public:
- DiagnosticErrorListener();
-
- /// <summary>
- /// Initializes a new instance of <seealso cref="DiagnosticErrorListener"/>, specifying
- /// whether all ambiguities or only exact ambiguities are reported.
- /// </summary>
- /// <param name="exactOnly"> {@code true} to report only exact ambiguities, otherwise
- /// {@code false} to report all ambiguities. </param>
- DiagnosticErrorListener(bool exactOnly);
-
- virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) override;
-
- virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) override;
-
- virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- size_t prediction, atn::ATNConfigSet *configs) override;
-
- protected:
- virtual std::string getDecisionDescription(Parser *recognizer, const dfa::DFA &dfa);
-
- /// <summary>
- /// Computes the set of conflicting or ambiguous alternatives from a
- /// configuration set, if that information was not already provided by the
- /// parser.
- /// </summary>
- /// <param name="reportedAlts"> The set of conflicting or ambiguous alternatives, as
- /// reported by the parser. </param>
- /// <param name="configs"> The conflicting or ambiguous configuration set. </param>
- /// <returns> Returns {@code reportedAlts} if it is not {@code null}, otherwise
- /// returns the set of alternatives represented in {@code configs}. </returns>
- virtual antlrcpp::BitSet getConflictingAlts(const antlrcpp::BitSet &reportedAlts, atn::ATNConfigSet *configs);
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Exceptions.cpp b/contrib/libs/antlr4_cpp_runtime/src/Exceptions.cpp
deleted file mode 100644
index 24aea29b0c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Exceptions.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-using namespace antlr4;
-
-RuntimeException::RuntimeException(const std::string &msg) : std::exception(), _message(msg) {
-}
-
-const char* RuntimeException::what() const noexcept {
- return _message.c_str();
-}
-
-//------------------ IOException ---------------------------------------------------------------------------------------
-
-IOException::IOException(const std::string &msg) : std::exception(), _message(msg) {
-}
-
-const char* IOException::what() const noexcept {
- return _message.c_str();
-}
-
-//------------------ IllegalStateException -----------------------------------------------------------------------------
-
-IllegalStateException::~IllegalStateException() {
-}
-
-//------------------ IllegalArgumentException --------------------------------------------------------------------------
-
-IllegalArgumentException::~IllegalArgumentException() {
-}
-
-//------------------ NullPointerException ------------------------------------------------------------------------------
-
-NullPointerException::~NullPointerException() {
-}
-
-//------------------ IndexOutOfBoundsException -------------------------------------------------------------------------
-
-IndexOutOfBoundsException::~IndexOutOfBoundsException() {
-}
-
-//------------------ UnsupportedOperationException ---------------------------------------------------------------------
-
-UnsupportedOperationException::~UnsupportedOperationException() {
-}
-
-//------------------ EmptyStackException -------------------------------------------------------------------------------
-
-EmptyStackException::~EmptyStackException() {
-}
-
-//------------------ CancellationException -----------------------------------------------------------------------------
-
-CancellationException::~CancellationException() {
-}
-
-//------------------ ParseCancellationException ------------------------------------------------------------------------
-
-ParseCancellationException::~ParseCancellationException() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Exceptions.h b/contrib/libs/antlr4_cpp_runtime/src/Exceptions.h
deleted file mode 100644
index 35d72b52ee..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Exceptions.h
+++ /dev/null
@@ -1,99 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
- // An exception hierarchy modelled loosely after java.lang.* exceptions.
- class ANTLR4CPP_PUBLIC RuntimeException : public std::exception {
- private:
- std::string _message;
- public:
- RuntimeException(const std::string &msg = "");
-
- virtual const char* what() const noexcept override;
- };
-
- class ANTLR4CPP_PUBLIC IllegalStateException : public RuntimeException {
- public:
- IllegalStateException(const std::string &msg = "") : RuntimeException(msg) {}
- IllegalStateException(IllegalStateException const&) = default;
- ~IllegalStateException();
- IllegalStateException& operator=(IllegalStateException const&) = default;
- };
-
- class ANTLR4CPP_PUBLIC IllegalArgumentException : public RuntimeException {
- public:
- IllegalArgumentException(IllegalArgumentException const&) = default;
- IllegalArgumentException(const std::string &msg = "") : RuntimeException(msg) {}
- ~IllegalArgumentException();
- IllegalArgumentException& operator=(IllegalArgumentException const&) = default;
- };
-
- class ANTLR4CPP_PUBLIC NullPointerException : public RuntimeException {
- public:
- NullPointerException(const std::string &msg = "") : RuntimeException(msg) {}
- NullPointerException(NullPointerException const&) = default;
- ~NullPointerException();
- NullPointerException& operator=(NullPointerException const&) = default;
- };
-
- class ANTLR4CPP_PUBLIC IndexOutOfBoundsException : public RuntimeException {
- public:
- IndexOutOfBoundsException(const std::string &msg = "") : RuntimeException(msg) {}
- IndexOutOfBoundsException(IndexOutOfBoundsException const&) = default;
- ~IndexOutOfBoundsException();
- IndexOutOfBoundsException& operator=(IndexOutOfBoundsException const&) = default;
- };
-
- class ANTLR4CPP_PUBLIC UnsupportedOperationException : public RuntimeException {
- public:
- UnsupportedOperationException(const std::string &msg = "") : RuntimeException(msg) {}
- UnsupportedOperationException(UnsupportedOperationException const&) = default;
- ~UnsupportedOperationException();
- UnsupportedOperationException& operator=(UnsupportedOperationException const&) = default;
-
- };
-
- class ANTLR4CPP_PUBLIC EmptyStackException : public RuntimeException {
- public:
- EmptyStackException(const std::string &msg = "") : RuntimeException(msg) {}
- EmptyStackException(EmptyStackException const&) = default;
- ~EmptyStackException();
- EmptyStackException& operator=(EmptyStackException const&) = default;
- };
-
- // IOException is not a runtime exception (in the java hierarchy).
- // Hence we have to duplicate the RuntimeException implementation.
- class ANTLR4CPP_PUBLIC IOException : public std::exception {
- private:
- std::string _message;
-
- public:
- IOException(const std::string &msg = "");
-
- virtual const char* what() const noexcept override;
- };
-
- class ANTLR4CPP_PUBLIC CancellationException : public IllegalStateException {
- public:
- CancellationException(const std::string &msg = "") : IllegalStateException(msg) {}
- CancellationException(CancellationException const&) = default;
- ~CancellationException();
- CancellationException& operator=(CancellationException const&) = default;
- };
-
- class ANTLR4CPP_PUBLIC ParseCancellationException : public CancellationException {
- public:
- ParseCancellationException(const std::string &msg = "") : CancellationException(msg) {}
- ParseCancellationException(ParseCancellationException const&) = default;
- ~ParseCancellationException();
- ParseCancellationException& operator=(ParseCancellationException const&) = default;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.cpp b/contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.cpp
deleted file mode 100644
index ca2537b300..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.cpp
+++ /dev/null
@@ -1,52 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ParserATNSimulator.h"
-#include "Parser.h"
-#include "atn/PredicateTransition.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "support/Casts.h"
-#include "support/CPPUtils.h"
-
-#include "FailedPredicateException.h"
-
-using namespace antlr4;
-using namespace antlrcpp;
-
-FailedPredicateException::FailedPredicateException(Parser *recognizer) : FailedPredicateException(recognizer, "", "") {
-}
-
-FailedPredicateException::FailedPredicateException(Parser *recognizer, const std::string &predicate): FailedPredicateException(recognizer, predicate, "") {
-}
-
-FailedPredicateException::FailedPredicateException(Parser *recognizer, const std::string &predicate, const std::string &message)
- : RecognitionException(!message.empty() ? message : "failed predicate: " + predicate + "?", recognizer,
- recognizer->getInputStream(), recognizer->getContext(), recognizer->getCurrentToken()) {
-
- atn::ATNState *s = recognizer->getInterpreter<atn::ATNSimulator>()->atn.states[recognizer->getState()];
- const atn::Transition *transition = s->transitions[0].get();
- if (transition->getTransitionType() == atn::TransitionType::PREDICATE) {
- _ruleIndex = downCast<const atn::PredicateTransition&>(*transition).getRuleIndex();
- _predicateIndex = downCast<const atn::PredicateTransition&>(*transition).getPredIndex();
- } else {
- _ruleIndex = 0;
- _predicateIndex = 0;
- }
-
- _predicate = predicate;
-}
-
-size_t FailedPredicateException::getRuleIndex() {
- return _ruleIndex;
-}
-
-size_t FailedPredicateException::getPredIndex() {
- return _predicateIndex;
-}
-
-std::string FailedPredicateException::getPredicate() {
- return _predicate;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.h b/contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.h
deleted file mode 100644
index 89bec0fd0b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/FailedPredicateException.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-
-namespace antlr4 {
-
- /// A semantic predicate failed during validation. Validation of predicates
- /// occurs when normally parsing the alternative just like matching a token.
- /// Disambiguating predicate evaluation occurs when we test a predicate during
- /// prediction.
- class ANTLR4CPP_PUBLIC FailedPredicateException : public RecognitionException {
- public:
- explicit FailedPredicateException(Parser *recognizer);
- FailedPredicateException(Parser *recognizer, const std::string &predicate);
- FailedPredicateException(Parser *recognizer, const std::string &predicate, const std::string &message);
-
- virtual size_t getRuleIndex();
- virtual size_t getPredIndex();
- virtual std::string getPredicate();
-
- private:
- size_t _ruleIndex;
- size_t _predicateIndex;
- std::string _predicate;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/FlatHashMap.h b/contrib/libs/antlr4_cpp_runtime/src/FlatHashMap.h
deleted file mode 100644
index ad5ffa2432..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/FlatHashMap.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#if ANTLR4CPP_USING_ABSEIL
-#error #include "absl/container/flat_hash_map.h"
-#else
-#include <unordered_map>
-#endif
-
-// By default ANTLRv4 uses containers provided by the C++ standard library. In most deployments this
-// is fine, however in some using custom containers may be preferred. This header allows that by
-// optionally supporting some alternative implementations and allowing for more easier patching of
-// other alternatives.
-
-namespace antlr4 {
-
-#if ANTLR4CPP_USING_ABSEIL
- template <typename Key, typename Value,
- typename Hash = typename absl::flat_hash_map<Key, Value>::hasher,
- typename Equal = typename absl::flat_hash_map<Key, Value>::key_equal,
- typename Allocator = typename absl::flat_hash_map<Key, Value>::allocator_type>
- using FlatHashMap = absl::flat_hash_map<Key, Value, Hash, Equal, Allocator>;
-#else
- template <typename Key, typename Value,
- typename Hash = typename std::unordered_map<Key, Value>::hasher,
- typename Equal = typename std::unordered_map<Key, Value>::key_equal,
- typename Allocator = typename std::unordered_map<Key, Value>::allocator_type>
- using FlatHashMap = std::unordered_map<Key, Value, Hash, Equal, Allocator>;
-#endif
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/FlatHashSet.h b/contrib/libs/antlr4_cpp_runtime/src/FlatHashSet.h
deleted file mode 100644
index 5396c2bd5d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/FlatHashSet.h
+++ /dev/null
@@ -1,57 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#if ANTLR4CPP_USING_ABSEIL
-#error #include "absl/container/flat_hash_set.h"
-#else
-#include <unordered_set>
-#endif
-
-// By default ANTLRv4 uses containers provided by the C++ standard library. In most deployments this
-// is fine, however in some using custom containers may be preferred. This header allows that by
-// optionally supporting some alternative implementations and allowing for more easier patching of
-// other alternatives.
-
-namespace antlr4 {
-
-#if ANTLR4CPP_USING_ABSEIL
- template <typename Key,
- typename Hash = typename absl::flat_hash_set<Key>::hasher,
- typename Equal = typename absl::flat_hash_set<Key>::key_equal,
- typename Allocator = typename absl::flat_hash_set<Key>::allocator_type>
- using FlatHashSet = absl::flat_hash_set<Key, Hash, Equal, Allocator>;
-#else
- template <typename Key,
- typename Hash = typename std::unordered_set<Key>::hasher,
- typename Equal = typename std::unordered_set<Key>::key_equal,
- typename Allocator = typename std::unordered_set<Key>::allocator_type>
- using FlatHashSet = std::unordered_set<Key, Hash, Equal, Allocator>;
-#endif
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.cpp b/contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.cpp
deleted file mode 100644
index 4f4947985d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.cpp
+++ /dev/null
@@ -1,18 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-
-#include "InputMismatchException.h"
-
-using namespace antlr4;
-
-InputMismatchException::InputMismatchException(Parser *recognizer)
- : RecognitionException(recognizer, recognizer->getInputStream(), recognizer->getContext(),
- recognizer->getCurrentToken()) {
-}
-
-InputMismatchException::~InputMismatchException() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.h b/contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.h
deleted file mode 100644
index 8b75420968..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/InputMismatchException.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// This signifies any kind of mismatched input exceptions such as
- /// when the current input does not match the expected token.
- /// </summary>
- class ANTLR4CPP_PUBLIC InputMismatchException : public RecognitionException {
- public:
- InputMismatchException(Parser *recognizer);
- InputMismatchException(InputMismatchException const&) = default;
- ~InputMismatchException();
- InputMismatchException& operator=(InputMismatchException const&) = default;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/IntStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/IntStream.cpp
deleted file mode 100644
index 37a90a7cd9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/IntStream.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "IntStream.h"
-
-using namespace antlr4;
-
-const std::string IntStream::UNKNOWN_SOURCE_NAME = "<unknown>";
-
-IntStream::~IntStream() = default;
diff --git a/contrib/libs/antlr4_cpp_runtime/src/IntStream.h b/contrib/libs/antlr4_cpp_runtime/src/IntStream.h
deleted file mode 100644
index 40a0f2a9e8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/IntStream.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// A simple stream of symbols whose values are represented as integers. This
- /// interface provides <em>marked ranges</em> with support for a minimum level
- /// of buffering necessary to implement arbitrary lookahead during prediction.
- /// For more information on marked ranges, see <seealso cref="#mark"/>.
- /// <p/>
- /// <strong>Initializing Methods:</strong> Some methods in this interface have
- /// unspecified behavior if no call to an initializing method has occurred after
- /// the stream was constructed. The following is a list of initializing methods:
- ///
- /// <ul>
- /// <li><seealso cref="#LA"/></li>
- /// <li><seealso cref="#consume"/></li>
- /// <li><seealso cref="#size"/></li>
- /// </ul>
- /// </summary>
- class ANTLR4CPP_PUBLIC IntStream {
- public:
- static constexpr size_t EOF = std::numeric_limits<size_t>::max();
-
- /// The value returned by <seealso cref="#LA LA()"/> when the end of the stream is
- /// reached.
- /// No explicit EOF definition. We got EOF on all platforms.
- //static const size_t _EOF = std::ios::eofbit;
-
- /// <summary>
- /// The value returned by <seealso cref="#getSourceName"/> when the actual name of the
- /// underlying source is not known.
- /// </summary>
- static const std::string UNKNOWN_SOURCE_NAME;
-
- virtual ~IntStream();
-
- /// <summary>
- /// Consumes the current symbol in the stream. This method has the following
- /// effects:
- ///
- /// <ul>
- /// <li><strong>Forward movement:</strong> The value of <seealso cref="#index index()"/>
- /// before calling this method is less than the value of {@code index()}
- /// after calling this method.</li>
- /// <li><strong>Ordered lookahead:</strong> The value of {@code LA(1)} before
- /// calling this method becomes the value of {@code LA(-1)} after calling
- /// this method.</li>
- /// </ul>
- ///
- /// Note that calling this method does not guarantee that {@code index()} is
- /// incremented by exactly 1, as that would preclude the ability to implement
- /// filtering streams (e.g. <seealso cref="CommonTokenStream"/> which distinguishes
- /// between "on-channel" and "off-channel" tokens).
- /// </summary>
- /// <exception cref="IllegalStateException"> if an attempt is made to consume the the
- /// end of the stream (i.e. if {@code LA(1)==}<seealso cref="#EOF EOF"/> before calling
- /// {@code consume}). </exception>
- virtual void consume() = 0;
-
- /// <summary>
- /// Gets the value of the symbol at offset {@code i} from the current
- /// position. When {@code i==1}, this method returns the value of the current
- /// symbol in the stream (which is the next symbol to be consumed). When
- /// {@code i==-1}, this method returns the value of the previously read
- /// symbol in the stream. It is not valid to call this method with
- /// {@code i==0}, but the specific behavior is unspecified because this
- /// method is frequently called from performance-critical code.
- /// <p/>
- /// This method is guaranteed to succeed if any of the following are true:
- ///
- /// <ul>
- /// <li>{@code i>0}</li>
- /// <li>{@code i==-1} and <seealso cref="#index index()"/> returns a value greater
- /// than the value of {@code index()} after the stream was constructed
- /// and {@code LA(1)} was called in that order. Specifying the current
- /// {@code index()} relative to the index after the stream was created
- /// allows for filtering implementations that do not return every symbol
- /// from the underlying source. Specifying the call to {@code LA(1)}
- /// allows for lazily initialized streams.</li>
- /// <li>{@code LA(i)} refers to a symbol consumed within a marked region
- /// that has not yet been released.</li>
- /// </ul>
- ///
- /// If {@code i} represents a position at or beyond the end of the stream,
- /// this method returns <seealso cref="#EOF"/>.
- /// <p/>
- /// The return value is unspecified if {@code i<0} and fewer than {@code -i}
- /// calls to <seealso cref="#consume consume()"/> have occurred from the beginning of
- /// the stream before calling this method.
- /// </summary>
- /// <exception cref="UnsupportedOperationException"> if the stream does not support
- /// retrieving the value of the specified symbol </exception>
- virtual size_t LA(ssize_t i) = 0;
-
- /// <summary>
- /// A mark provides a guarantee that <seealso cref="#seek seek()"/> operations will be
- /// valid over a "marked range" extending from the index where {@code mark()}
- /// was called to the current <seealso cref="#index index()"/>. This allows the use of
- /// streaming input sources by specifying the minimum buffering requirements
- /// to support arbitrary lookahead during prediction.
- /// <p/>
- /// The returned mark is an opaque handle (type {@code int}) which is passed
- /// to <seealso cref="#release release()"/> when the guarantees provided by the marked
- /// range are no longer necessary. When calls to
- /// {@code mark()}/{@code release()} are nested, the marks must be released
- /// in reverse order of which they were obtained. Since marked regions are
- /// used during performance-critical sections of prediction, the specific
- /// behavior of invalid usage is unspecified (i.e. a mark is not released, or
- /// a mark is released twice, or marks are not released in reverse order from
- /// which they were created).
- /// <p/>
- /// The behavior of this method is unspecified if no call to an
- /// <seealso cref="IntStream initializing method"/> has occurred after this stream was
- /// constructed.
- /// <p/>
- /// This method does not change the current position in the input stream.
- /// <p/>
- /// The following example shows the use of <seealso cref="#mark mark()"/>,
- /// <seealso cref="#release release(mark)"/>, <seealso cref="#index index()"/>, and
- /// <seealso cref="#seek seek(index)"/> as part of an operation to safely work within a
- /// marked region, then restore the stream position to its original value and
- /// release the mark.
- /// <pre>
- /// IntStream stream = ...;
- /// int index = -1;
- /// int mark = stream.mark();
- /// try {
- /// index = stream.index();
- /// // perform work here...
- /// } finally {
- /// if (index != -1) {
- /// stream.seek(index);
- /// }
- /// stream.release(mark);
- /// }
- /// </pre>
- /// </summary>
- /// <returns> An opaque marker which should be passed to
- /// <seealso cref="#release release()"/> when the marked range is no longer required. </returns>
- virtual ssize_t mark() = 0;
-
- /// <summary>
- /// This method releases a marked range created by a call to
- /// <seealso cref="#mark mark()"/>. Calls to {@code release()} must appear in the
- /// reverse order of the corresponding calls to {@code mark()}. If a mark is
- /// released twice, or if marks are not released in reverse order of the
- /// corresponding calls to {@code mark()}, the behavior is unspecified.
- /// <p/>
- /// For more information and an example, see <seealso cref="#mark"/>.
- /// </summary>
- /// <param name="marker"> A marker returned by a call to {@code mark()}. </param>
- /// <seealso cref= #mark </seealso>
- virtual void release(ssize_t marker) = 0;
-
- /// <summary>
- /// Return the index into the stream of the input symbol referred to by
- /// {@code LA(1)}.
- /// <p/>
- /// The behavior of this method is unspecified if no call to an
- /// <seealso cref="IntStream initializing method"/> has occurred after this stream was
- /// constructed.
- /// </summary>
- virtual size_t index() = 0;
-
- /// <summary>
- /// Set the input cursor to the position indicated by {@code index}. If the
- /// specified index lies past the end of the stream, the operation behaves as
- /// though {@code index} was the index of the EOF symbol. After this method
- /// returns without throwing an exception, the at least one of the following
- /// will be true.
- ///
- /// <ul>
- /// <li><seealso cref="#index index()"/> will return the index of the first symbol
- /// appearing at or after the specified {@code index}. Specifically,
- /// implementations which filter their sources should automatically
- /// adjust {@code index} forward the minimum amount required for the
- /// operation to target a non-ignored symbol.</li>
- /// <li>{@code LA(1)} returns <seealso cref="#EOF"/></li>
- /// </ul>
- ///
- /// This operation is guaranteed to not throw an exception if {@code index}
- /// lies within a marked region. For more information on marked regions, see
- /// <seealso cref="#mark"/>. The behavior of this method is unspecified if no call to
- /// an <seealso cref="IntStream initializing method"/> has occurred after this stream
- /// was constructed.
- /// </summary>
- /// <param name="index"> The absolute index to seek to.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code index} is less than 0 </exception>
- /// <exception cref="UnsupportedOperationException"> if the stream does not support
- /// seeking to the specified index </exception>
- virtual void seek(size_t index) = 0;
-
- /// <summary>
- /// Returns the total number of symbols in the stream, including a single EOF
- /// symbol.
- /// </summary>
- /// <exception cref="UnsupportedOperationException"> if the size of the stream is
- /// unknown. </exception>
- virtual size_t size() = 0;
-
- /// <summary>
- /// Gets the name of the underlying symbol source. This method returns a
- /// non-null, non-empty string. If such a name is not known, this method
- /// returns <seealso cref="#UNKNOWN_SOURCE_NAME"/>.
- /// </summary>
- virtual std::string getSourceName() const = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.cpp
deleted file mode 100644
index f2812ba910..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "InterpreterRuleContext.h"
-
-using namespace antlr4;
-
-InterpreterRuleContext::InterpreterRuleContext() : ParserRuleContext() {
-}
-
-InterpreterRuleContext::InterpreterRuleContext(ParserRuleContext *parent, size_t invokingStateNumber, size_t ruleIndex)
- : ParserRuleContext(parent, invokingStateNumber), _ruleIndex(ruleIndex) {
-}
-
-size_t InterpreterRuleContext::getRuleIndex() const {
- return _ruleIndex;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.h b/contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.h
deleted file mode 100644
index a34d06b1f1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/InterpreterRuleContext.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ParserRuleContext.h"
-
-namespace antlr4 {
-
- /**
- * This class extends {@link ParserRuleContext} by allowing the value of
- * {@link #getRuleIndex} to be explicitly set for the context.
- *
- * <p>
- * {@link ParserRuleContext} does not include field storage for the rule index
- * since the context classes created by the code generator override the
- * {@link #getRuleIndex} method to return the correct value for that context.
- * Since the parser interpreter does not use the context classes generated for a
- * parser, this class (with slightly more memory overhead per node) is used to
- * provide equivalent functionality.</p>
- */
- class ANTLR4CPP_PUBLIC InterpreterRuleContext : public ParserRuleContext {
- public:
- InterpreterRuleContext();
-
- /**
- * Constructs a new {@link InterpreterRuleContext} with the specified
- * parent, invoking state, and rule index.
- *
- * @param parent The parent context.
- * @param invokingStateNumber The invoking state number.
- * @param ruleIndex The rule index for the current context.
- */
- InterpreterRuleContext(ParserRuleContext *parent, size_t invokingStateNumber, size_t ruleIndex);
-
- virtual size_t getRuleIndex() const override;
-
- protected:
- /** This is the backing field for {@link #getRuleIndex}. */
- const size_t _ruleIndex = INVALID_INDEX;
-};
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Lexer.cpp b/contrib/libs/antlr4_cpp_runtime/src/Lexer.cpp
deleted file mode 100644
index b0385c56ba..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Lexer.cpp
+++ /dev/null
@@ -1,294 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/LexerATNSimulator.h"
-#include "Exceptions.h"
-#include "misc/Interval.h"
-#include "CommonTokenFactory.h"
-#include "LexerNoViableAltException.h"
-#include "ANTLRErrorListener.h"
-#include "support/CPPUtils.h"
-#include "CommonToken.h"
-
-#include "Lexer.h"
-
-#define DEBUG_LEXER 0
-
-using namespace antlrcpp;
-using namespace antlr4;
-
-Lexer::Lexer() : Recognizer() {
- InitializeInstanceFields();
- _input = nullptr;
-}
-
-Lexer::Lexer(CharStream *input) : Recognizer(), _input(input) {
- InitializeInstanceFields();
-}
-
-void Lexer::reset() {
- // wack Lexer state variables
- _input->seek(0); // rewind the input
-
- _syntaxErrors = 0;
- token.reset();
- type = Token::INVALID_TYPE;
- channel = Token::DEFAULT_CHANNEL;
- tokenStartCharIndex = INVALID_INDEX;
- tokenStartCharPositionInLine = 0;
- tokenStartLine = 0;
- type = 0;
- _text = "";
-
- hitEOF = false;
- mode = Lexer::DEFAULT_MODE;
- modeStack.clear();
-
- getInterpreter<atn::LexerATNSimulator>()->reset();
-}
-
-std::unique_ptr<Token> Lexer::nextToken() {
- // Mark start location in char stream so unbuffered streams are
- // guaranteed at least have text of current token
- ssize_t tokenStartMarker = _input->mark();
-
- auto onExit = finally([this, tokenStartMarker]{
- // make sure we release marker after match or
- // unbuffered char stream will keep buffering
- _input->release(tokenStartMarker);
- });
-
- while (true) {
- outerContinue:
- if (hitEOF) {
- emitEOF();
- return std::move(token);
- }
-
- token.reset();
- channel = Token::DEFAULT_CHANNEL;
- tokenStartCharIndex = _input->index();
- tokenStartCharPositionInLine = getInterpreter<atn::LexerATNSimulator>()->getCharPositionInLine();
- tokenStartLine = getInterpreter<atn::LexerATNSimulator>()->getLine();
- _text = "";
- do {
- type = Token::INVALID_TYPE;
- size_t ttype;
- try {
- ttype = getInterpreter<atn::LexerATNSimulator>()->match(_input, mode);
- } catch (LexerNoViableAltException &e) {
- notifyListeners(e); // report error
- recover(e);
- ttype = SKIP;
- }
- if (_input->LA(1) == EOF) {
- hitEOF = true;
- }
- if (type == Token::INVALID_TYPE) {
- type = ttype;
- }
- if (type == SKIP) {
- goto outerContinue;
- }
- } while (type == MORE);
- if (token == nullptr) {
- emit();
- }
- return std::move(token);
- }
-}
-
-void Lexer::skip() {
- type = SKIP;
-}
-
-void Lexer::more() {
- type = MORE;
-}
-
-void Lexer::setMode(size_t m) {
- mode = m;
-}
-
-void Lexer::pushMode(size_t m) {
-#if DEBUG_LEXER == 1
- std::cout << "pushMode " << m << std::endl;
-#endif
-
- modeStack.push_back(mode);
- setMode(m);
-}
-
-size_t Lexer::popMode() {
- if (modeStack.empty()) {
- throw EmptyStackException();
- }
-#if DEBUG_LEXER == 1
- std::cout << std::string("popMode back to ") << modeStack.back() << std::endl;
-#endif
-
- setMode(modeStack.back());
- modeStack.pop_back();
- return mode;
-}
-
-
-TokenFactory<CommonToken>* Lexer::getTokenFactory() {
- return _factory;
-}
-
-void Lexer::setInputStream(IntStream *input) {
- reset();
- _input = dynamic_cast<CharStream*>(input);
-}
-
-std::string Lexer::getSourceName() {
- return _input->getSourceName();
-}
-
-CharStream* Lexer::getInputStream() {
- return _input;
-}
-
-void Lexer::emit(std::unique_ptr<Token> newToken) {
- token = std::move(newToken);
-}
-
-Token* Lexer::emit() {
- emit(_factory->create({ this, _input }, type, _text, channel,
- tokenStartCharIndex, getCharIndex() - 1, tokenStartLine, tokenStartCharPositionInLine));
- return token.get();
-}
-
-Token* Lexer::emitEOF() {
- size_t cpos = getCharPositionInLine();
- size_t line = getLine();
- emit(_factory->create({ this, _input }, EOF, "", Token::DEFAULT_CHANNEL, _input->index(), _input->index() - 1, line, cpos));
- return token.get();
-}
-
-size_t Lexer::getLine() const {
- return getInterpreter<atn::LexerATNSimulator>()->getLine();
-}
-
-size_t Lexer::getCharPositionInLine() {
- return getInterpreter<atn::LexerATNSimulator>()->getCharPositionInLine();
-}
-
-void Lexer::setLine(size_t line) {
- getInterpreter<atn::LexerATNSimulator>()->setLine(line);
-}
-
-void Lexer::setCharPositionInLine(size_t charPositionInLine) {
- getInterpreter<atn::LexerATNSimulator>()->setCharPositionInLine(charPositionInLine);
-}
-
-size_t Lexer::getCharIndex() {
- return _input->index();
-}
-
-std::string Lexer::getText() {
- if (!_text.empty()) {
- return _text;
- }
- return getInterpreter<atn::LexerATNSimulator>()->getText(_input);
-}
-
-void Lexer::setText(const std::string &text) {
- _text = text;
-}
-
-std::unique_ptr<Token> Lexer::getToken() {
- return std::move(token);
-}
-
-void Lexer::setToken(std::unique_ptr<Token> newToken) {
- token = std::move(newToken);
-}
-
-void Lexer::setType(size_t ttype) {
- type = ttype;
-}
-
-size_t Lexer::getType() {
- return type;
-}
-
-void Lexer::setChannel(size_t newChannel) {
- channel = newChannel;
-}
-
-size_t Lexer::getChannel() {
- return channel;
-}
-
-std::vector<std::unique_ptr<Token>> Lexer::getAllTokens() {
- std::vector<std::unique_ptr<Token>> tokens;
- std::unique_ptr<Token> t = nextToken();
- while (t->getType() != EOF) {
- tokens.push_back(std::move(t));
- t = nextToken();
- }
- return tokens;
-}
-
-void Lexer::recover(const LexerNoViableAltException &/*e*/) {
- if (_input->LA(1) != EOF) {
- // skip a char and try again
- getInterpreter<atn::LexerATNSimulator>()->consume(_input);
- }
-}
-
-void Lexer::notifyListeners(const LexerNoViableAltException & /*e*/) {
- ++_syntaxErrors;
- std::string text = _input->getText(misc::Interval(tokenStartCharIndex, _input->index()));
- std::string msg = std::string("token recognition error at: '") + getErrorDisplay(text) + std::string("'");
-
- ProxyErrorListener &listener = getErrorListenerDispatch();
- listener.syntaxError(this, nullptr, tokenStartLine, tokenStartCharPositionInLine, msg, std::current_exception());
-}
-
-std::string Lexer::getErrorDisplay(const std::string &s) {
- std::stringstream ss;
- for (auto c : s) {
- switch (c) {
- case '\n':
- ss << "\\n";
- break;
- case '\t':
- ss << "\\t";
- break;
- case '\r':
- ss << "\\r";
- break;
- default:
- ss << c;
- break;
- }
- }
- return ss.str();
-}
-
-void Lexer::recover(RecognitionException * /*re*/) {
- // TODO: Do we lose character or line position information?
- _input->consume();
-}
-
-size_t Lexer::getNumberOfSyntaxErrors() {
- return _syntaxErrors;
-}
-
-void Lexer::InitializeInstanceFields() {
- _syntaxErrors = 0;
- token = nullptr;
- _factory = CommonTokenFactory::DEFAULT.get();
- tokenStartCharIndex = INVALID_INDEX;
- tokenStartLine = 0;
- tokenStartCharPositionInLine = 0;
- hitEOF = false;
- channel = 0;
- type = 0;
- mode = Lexer::DEFAULT_MODE;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Lexer.h b/contrib/libs/antlr4_cpp_runtime/src/Lexer.h
deleted file mode 100644
index 77033ad9e6..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Lexer.h
+++ /dev/null
@@ -1,196 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Recognizer.h"
-#include "TokenSource.h"
-#include "CharStream.h"
-#include "Token.h"
-
-namespace antlr4 {
-
- /// A lexer is recognizer that draws input symbols from a character stream.
- /// lexer grammars result in a subclass of this object. A Lexer object
- /// uses simplified match() and error recovery mechanisms in the interest
- /// of speed.
- class ANTLR4CPP_PUBLIC Lexer : public Recognizer, public TokenSource {
- public:
- static constexpr size_t DEFAULT_MODE = 0;
- static constexpr size_t MORE = std::numeric_limits<size_t>::max() - 1;
- static constexpr size_t SKIP = std::numeric_limits<size_t>::max() - 2;
-
- static constexpr size_t DEFAULT_TOKEN_CHANNEL = Token::DEFAULT_CHANNEL;
- static constexpr size_t HIDDEN = Token::HIDDEN_CHANNEL;
- static constexpr size_t MIN_CHAR_VALUE = 0;
- static constexpr size_t MAX_CHAR_VALUE = 0x10FFFF;
-
- CharStream *_input; // Pure reference, usually from statically allocated instance.
-
- protected:
- /// How to create token objects.
- TokenFactory<CommonToken> *_factory;
-
- public:
- /// The goal of all lexer rules/methods is to create a token object.
- /// This is an instance variable as multiple rules may collaborate to
- /// create a single token. nextToken will return this object after
- /// matching lexer rule(s). If you subclass to allow multiple token
- /// emissions, then set this to the last token to be matched or
- /// something nonnull so that the auto token emit mechanism will not
- /// emit another token.
-
- // Life cycle of a token is this:
- // Created by emit() (via the token factory) or by action code, holding ownership of it.
- // Ownership is handed over to the token stream when calling nextToken().
- std::unique_ptr<Token> token;
-
- /// <summary>
- /// What character index in the stream did the current token start at?
- /// Needed, for example, to get the text for current token. Set at
- /// the start of nextToken.
- /// </summary>
- size_t tokenStartCharIndex;
-
- /// <summary>
- /// The line on which the first character of the token resides </summary>
- size_t tokenStartLine;
-
- /// The character position of first character within the line.
- size_t tokenStartCharPositionInLine;
-
- /// Once we see EOF on char stream, next token will be EOF.
- /// If you have DONE : EOF ; then you see DONE EOF.
- bool hitEOF;
-
- /// The channel number for the current token.
- size_t channel;
-
- /// The token type for the current token.
- size_t type;
-
- // Use the vector as a stack.
- std::vector<size_t> modeStack;
- size_t mode;
-
- Lexer();
- Lexer(CharStream *input);
- virtual ~Lexer() {}
-
- virtual void reset();
-
- /// Return a token from this source; i.e., match a token on the char stream.
- virtual std::unique_ptr<Token> nextToken() override;
-
- /// Instruct the lexer to skip creating a token for current lexer rule
- /// and look for another token. nextToken() knows to keep looking when
- /// a lexer rule finishes with token set to SKIP_TOKEN. Recall that
- /// if token == null at end of any token rule, it creates one for you
- /// and emits it.
- virtual void skip();
- virtual void more();
- virtual void setMode(size_t m);
- virtual void pushMode(size_t m);
- virtual size_t popMode();
-
- template<typename T1>
- void setTokenFactory(TokenFactory<T1> *factory) {
- this->_factory = factory;
- }
-
- virtual TokenFactory<CommonToken>* getTokenFactory() override;
-
- /// Set the char stream and reset the lexer
- virtual void setInputStream(IntStream *input) override;
-
- virtual std::string getSourceName() override;
-
- virtual CharStream* getInputStream() override;
-
- /// By default does not support multiple emits per nextToken invocation
- /// for efficiency reasons. Subclasses can override this method, nextToken,
- /// and getToken (to push tokens into a list and pull from that list
- /// rather than a single variable as this implementation does).
- virtual void emit(std::unique_ptr<Token> newToken);
-
- /// The standard method called to automatically emit a token at the
- /// outermost lexical rule. The token object should point into the
- /// char buffer start..stop. If there is a text override in 'text',
- /// use that to set the token's text. Override this method to emit
- /// custom Token objects or provide a new factory.
- virtual Token* emit();
-
- virtual Token* emitEOF();
-
- virtual size_t getLine() const override;
-
- virtual size_t getCharPositionInLine() override;
-
- virtual void setLine(size_t line);
-
- virtual void setCharPositionInLine(size_t charPositionInLine);
-
- /// What is the index of the current character of lookahead?
- virtual size_t getCharIndex();
-
- /// Return the text matched so far for the current token or any
- /// text override.
- virtual std::string getText();
-
- /// Set the complete text of this token; it wipes any previous
- /// changes to the text.
- virtual void setText(const std::string &text);
-
- /// Override if emitting multiple tokens.
- virtual std::unique_ptr<Token> getToken();
-
- virtual void setToken(std::unique_ptr<Token> newToken);
-
- virtual void setType(size_t ttype);
-
- virtual size_t getType();
-
- virtual void setChannel(size_t newChannel);
-
- virtual size_t getChannel();
-
- virtual const std::vector<std::string>& getChannelNames() const = 0;
-
- virtual const std::vector<std::string>& getModeNames() const = 0;
-
- /// Return a list of all Token objects in input char stream.
- /// Forces load of all tokens. Does not include EOF token.
- virtual std::vector<std::unique_ptr<Token>> getAllTokens();
-
- virtual void recover(const LexerNoViableAltException &e);
-
- virtual void notifyListeners(const LexerNoViableAltException &e);
-
- virtual std::string getErrorDisplay(const std::string &s);
-
- /// Lexers can normally match any char in it's vocabulary after matching
- /// a token, so do the easy thing and just kill a character and hope
- /// it all works out. You can instead use the rule invocation stack
- /// to do sophisticated error recovery if you are in a fragment rule.
- virtual void recover(RecognitionException *re);
-
- /// <summary>
- /// Gets the number of syntax errors reported during parsing. This value is
- /// incremented each time <seealso cref="#notifyErrorListeners"/> is called.
- /// </summary>
- /// <seealso cref= #notifyListeners </seealso>
- virtual size_t getNumberOfSyntaxErrors();
-
- protected:
- /// You can set the text for the current token to override what is in
- /// the input char buffer (via setText()).
- std::string _text;
-
- private:
- size_t _syntaxErrors;
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.cpp b/contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.cpp
deleted file mode 100644
index 38acd09ddd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNType.h"
-#include "atn/LexerATNSimulator.h"
-#include "dfa/DFA.h"
-#include "Exceptions.h"
-#include "Vocabulary.h"
-
-#include "LexerInterpreter.h"
-
-using namespace antlr4;
-
-LexerInterpreter::LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
- const std::vector<std::string> &ruleNames, const std::vector<std::string> &channelNames, const std::vector<std::string> &modeNames,
- const atn::ATN &atn, CharStream *input)
- : Lexer(input), _grammarFileName(grammarFileName), _atn(atn), _ruleNames(ruleNames),
- _channelNames(channelNames), _modeNames(modeNames),
- _vocabulary(vocabulary) {
-
- if (_atn.grammarType != atn::ATNType::LEXER) {
- throw IllegalArgumentException("The ATN must be a lexer ATN.");
- }
-
- for (size_t i = 0; i < atn.getNumberOfDecisions(); ++i) {
- _decisionToDFA.push_back(dfa::DFA(_atn.getDecisionState(i), i));
- }
- _interpreter = new atn::LexerATNSimulator(this, _atn, _decisionToDFA, _sharedContextCache); /* mem-check: deleted in d-tor */
-}
-
-LexerInterpreter::~LexerInterpreter()
-{
- delete _interpreter;
-}
-
-const atn::ATN& LexerInterpreter::getATN() const {
- return _atn;
-}
-
-std::string LexerInterpreter::getGrammarFileName() const {
- return _grammarFileName;
-}
-
-const std::vector<std::string>& LexerInterpreter::getRuleNames() const {
- return _ruleNames;
-}
-
-const std::vector<std::string>& LexerInterpreter::getChannelNames() const {
- return _channelNames;
-}
-
-const std::vector<std::string>& LexerInterpreter::getModeNames() const {
- return _modeNames;
-}
-
-const dfa::Vocabulary& LexerInterpreter::getVocabulary() const {
- return _vocabulary;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.h b/contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.h
deleted file mode 100644
index 3787c1d0d5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/LexerInterpreter.h
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Lexer.h"
-#include "atn/PredictionContext.h"
-#include "atn/PredictionContextCache.h"
-#include "Vocabulary.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC LexerInterpreter : public Lexer {
- public:
- LexerInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
- const std::vector<std::string> &ruleNames, const std::vector<std::string> &channelNames,
- const std::vector<std::string> &modeNames, const atn::ATN &atn, CharStream *input);
-
- ~LexerInterpreter();
-
- virtual const atn::ATN& getATN() const override;
- virtual std::string getGrammarFileName() const override;
- virtual const std::vector<std::string>& getRuleNames() const override;
- virtual const std::vector<std::string>& getChannelNames() const override;
- virtual const std::vector<std::string>& getModeNames() const override;
-
- virtual const dfa::Vocabulary& getVocabulary() const override;
-
- protected:
- const std::string _grammarFileName;
- const atn::ATN &_atn;
-
- const std::vector<std::string> &_ruleNames;
- const std::vector<std::string> &_channelNames;
- const std::vector<std::string> &_modeNames;
- std::vector<dfa::DFA> _decisionToDFA;
-
- atn::PredictionContextCache _sharedContextCache;
-
- private:
- dfa::Vocabulary _vocabulary;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.cpp b/contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.cpp
deleted file mode 100644
index 3304b82b40..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-#include "support/CPPUtils.h"
-#include "CharStream.h"
-#include "Lexer.h"
-
-#include "LexerNoViableAltException.h"
-
-using namespace antlr4;
-
-LexerNoViableAltException::LexerNoViableAltException(Lexer *lexer, CharStream *input, size_t startIndex,
- atn::ATNConfigSet *deadEndConfigs)
- : RecognitionException(lexer, input, nullptr, nullptr), _startIndex(startIndex), _deadEndConfigs(deadEndConfigs) {
-}
-
-size_t LexerNoViableAltException::getStartIndex() {
- return _startIndex;
-}
-
-atn::ATNConfigSet* LexerNoViableAltException::getDeadEndConfigs() {
- return _deadEndConfigs;
-}
-
-std::string LexerNoViableAltException::toString() {
- std::string symbol;
- if (_startIndex < getInputStream()->size()) {
- symbol = static_cast<CharStream *>(getInputStream())->getText(misc::Interval(_startIndex, _startIndex));
- symbol = antlrcpp::escapeWhitespace(symbol, false);
- }
- std::string format = "LexerNoViableAltException('" + symbol + "')";
- return format;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.h b/contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.h
deleted file mode 100644
index 52eada7cfa..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/LexerNoViableAltException.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-#include "atn/ATNConfigSet.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC LexerNoViableAltException : public RecognitionException {
- public:
- LexerNoViableAltException(Lexer *lexer, CharStream *input, size_t startIndex,
- atn::ATNConfigSet *deadEndConfigs);
-
- virtual size_t getStartIndex();
- virtual atn::ATNConfigSet* getDeadEndConfigs();
- virtual std::string toString();
-
- private:
- /// Matching attempted at what input index?
- const size_t _startIndex;
-
- /// Which configurations did we try at input.index() that couldn't match input.LA(1)?
- atn::ATNConfigSet *_deadEndConfigs;
-
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.cpp b/contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.cpp
deleted file mode 100644
index 45372808e5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.cpp
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-#include "CommonToken.h"
-#include "CharStream.h"
-
-#include "ListTokenSource.h"
-
-using namespace antlr4;
-
-ListTokenSource::ListTokenSource(std::vector<std::unique_ptr<Token>> tokens_) : ListTokenSource(std::move(tokens_), "") {
-}
-
-ListTokenSource::ListTokenSource(std::vector<std::unique_ptr<Token>> tokens_, const std::string &sourceName_)
- : tokens(std::move(tokens_)), sourceName(sourceName_) {
- InitializeInstanceFields();
- if (tokens.empty()) {
- throw "tokens cannot be null";
- }
-
- // Check if there is an eof token and create one if not.
- if (tokens.back()->getType() != Token::EOF) {
- Token *lastToken = tokens.back().get();
- size_t start = INVALID_INDEX;
- size_t previousStop = lastToken->getStopIndex();
- if (previousStop != INVALID_INDEX) {
- start = previousStop + 1;
- }
-
- size_t stop = std::max(INVALID_INDEX, start - 1);
- tokens.emplace_back((_factory->create({ this, getInputStream() }, Token::EOF, "EOF",
- Token::DEFAULT_CHANNEL, start, stop, static_cast<int>(lastToken->getLine()), lastToken->getCharPositionInLine())));
- }
-}
-
-size_t ListTokenSource::getCharPositionInLine() {
- if (i < tokens.size()) {
- return tokens[i]->getCharPositionInLine();
- }
- return 0;
-}
-
-std::unique_ptr<Token> ListTokenSource::nextToken() {
- if (i < tokens.size()) {
- return std::move(tokens[i++]);
- }
- return nullptr;
-}
-
-size_t ListTokenSource::getLine() const {
- if (i < tokens.size()) {
- return tokens[i]->getLine();
- }
-
- return 1;
-}
-
-CharStream *ListTokenSource::getInputStream() {
- if (i < tokens.size()) {
- return tokens[i]->getInputStream();
- } else if (!tokens.empty()) {
- return tokens.back()->getInputStream();
- }
-
- // no input stream information is available
- return nullptr;
-}
-
-std::string ListTokenSource::getSourceName() {
- if (sourceName != "") {
- return sourceName;
- }
-
- CharStream *inputStream = getInputStream();
- if (inputStream != nullptr) {
- return inputStream->getSourceName();
- }
-
- return "List";
-}
-
-TokenFactory<CommonToken>* ListTokenSource::getTokenFactory() {
- return _factory;
-}
-
-void ListTokenSource::InitializeInstanceFields() {
- i = 0;
- _factory = CommonTokenFactory::DEFAULT.get();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.h b/contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.h
deleted file mode 100644
index 542b05cb5a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ListTokenSource.h
+++ /dev/null
@@ -1,88 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenSource.h"
-#include "CommonTokenFactory.h"
-
-namespace antlr4 {
-
- /// Provides an implementation of <seealso cref="TokenSource"/> as a wrapper around a list
- /// of <seealso cref="Token"/> objects.
- ///
- /// If the final token in the list is an <seealso cref="Token#EOF"/> token, it will be used
- /// as the EOF token for every call to <seealso cref="#nextToken"/> after the end of the
- /// list is reached. Otherwise, an EOF token will be created.
- class ANTLR4CPP_PUBLIC ListTokenSource : public TokenSource {
- protected:
- // This list will be emptied token by token as we call nextToken().
- // Token streams can be used to buffer tokens for a while.
- std::vector<std::unique_ptr<Token>> tokens;
-
- private:
- /// <summary>
- /// The name of the input source. If this value is {@code null}, a call to
- /// <seealso cref="#getSourceName"/> should return the source name used to create the
- /// the next token in <seealso cref="#tokens"/> (or the previous token if the end of
- /// the input has been reached).
- /// </summary>
- const std::string sourceName;
-
- protected:
- /// The index into <seealso cref="#tokens"/> of token to return by the next call to
- /// <seealso cref="#nextToken"/>. The end of the input is indicated by this value
- /// being greater than or equal to the number of items in <seealso cref="#tokens"/>.
- size_t i;
-
- private:
- /// This is the backing field for <seealso cref="#getTokenFactory"/> and
- /// <seealso cref="setTokenFactory"/>.
- TokenFactory<CommonToken> *_factory = CommonTokenFactory::DEFAULT.get();
-
- public:
- /// Constructs a new <seealso cref="ListTokenSource"/> instance from the specified
- /// collection of <seealso cref="Token"/> objects.
- ///
- /// <param name="tokens"> The collection of <seealso cref="Token"/> objects to provide as a
- /// <seealso cref="TokenSource"/>. </param>
- /// <exception cref="NullPointerException"> if {@code tokens} is {@code null} </exception>
- ListTokenSource(std::vector<std::unique_ptr<Token>> tokens);
- ListTokenSource(const ListTokenSource& other) = delete;
-
- ListTokenSource& operator = (const ListTokenSource& other) = delete;
-
- /// <summary>
- /// Constructs a new <seealso cref="ListTokenSource"/> instance from the specified
- /// collection of <seealso cref="Token"/> objects and source name.
- /// </summary>
- /// <param name="tokens"> The collection of <seealso cref="Token"/> objects to provide as a
- /// <seealso cref="TokenSource"/>. </param>
- /// <param name="sourceName"> The name of the <seealso cref="TokenSource"/>. If this value is
- /// {@code null}, <seealso cref="#getSourceName"/> will attempt to infer the name from
- /// the next <seealso cref="Token"/> (or the previous token if the end of the input has
- /// been reached).
- /// </param>
- /// <exception cref="NullPointerException"> if {@code tokens} is {@code null} </exception>
- ListTokenSource(std::vector<std::unique_ptr<Token>> tokens_, const std::string &sourceName_);
-
- virtual size_t getCharPositionInLine() override;
- virtual std::unique_ptr<Token> nextToken() override;
- virtual size_t getLine() const override;
- virtual CharStream* getInputStream() override;
- virtual std::string getSourceName() override;
-
- template<typename T1>
- void setTokenFactory(TokenFactory<T1> *factory) {
- this->_factory = factory;
- }
-
- virtual TokenFactory<CommonToken>* getTokenFactory() override;
-
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.cpp b/contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.cpp
deleted file mode 100644
index 273c208c74..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Parser.h"
-
-#include "NoViableAltException.h"
-
-using namespace antlr4;
-
-namespace {
-
-// Create a normal shared pointer if the configurations are to be deleted. If not, then
-// the shared pointer is created with a deleter that does nothing.
-Ref<atn::ATNConfigSet> buildConfigsRef(atn::ATNConfigSet *configs, bool deleteConfigs) {
- if (deleteConfigs) {
- return Ref<atn::ATNConfigSet>(configs);
- } else {
- return Ref<atn::ATNConfigSet>(configs, [](atn::ATNConfigSet *){});
- }
-}
-
-}
-
-NoViableAltException::NoViableAltException(Parser *recognizer)
- : NoViableAltException(recognizer, recognizer->getTokenStream(), recognizer->getCurrentToken(),
- recognizer->getCurrentToken(), nullptr, recognizer->getContext(), false) {
-}
-
-NoViableAltException::NoViableAltException(Parser *recognizer, TokenStream *input,Token *startToken,
- Token *offendingToken, atn::ATNConfigSet *deadEndConfigs, ParserRuleContext *ctx, bool deleteConfigs)
- : RecognitionException("No viable alternative", recognizer, input, ctx, offendingToken),
- _deadEndConfigs(buildConfigsRef(deadEndConfigs, deleteConfigs)), _startToken(startToken) {
-}
-
-NoViableAltException::~NoViableAltException() {
-}
-
-Token* NoViableAltException::getStartToken() const {
- return _startToken;
-}
-
-atn::ATNConfigSet* NoViableAltException::getDeadEndConfigs() const {
- return _deadEndConfigs.get();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.h b/contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.h
deleted file mode 100644
index b15039d0cb..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/NoViableAltException.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RecognitionException.h"
-#include "Token.h"
-#include "atn/ATNConfigSet.h"
-
-namespace antlr4 {
-
- /// Indicates that the parser could not decide which of two or more paths
- /// to take based upon the remaining input. It tracks the starting token
- /// of the offending input and also knows where the parser was
- /// in the various paths when the error. Reported by reportNoViableAlternative()
- class ANTLR4CPP_PUBLIC NoViableAltException : public RecognitionException {
- public:
- NoViableAltException(Parser *recognizer); // LL(1) error
- NoViableAltException(Parser *recognizer, TokenStream *input,Token *startToken,
- Token *offendingToken, atn::ATNConfigSet *deadEndConfigs, ParserRuleContext *ctx, bool deleteConfigs);
- ~NoViableAltException();
-
- virtual Token* getStartToken() const;
- virtual atn::ATNConfigSet* getDeadEndConfigs() const;
-
- private:
- /// Which configurations did we try at input.index() that couldn't match input.LT(1)?
- /// Shared pointer that conditionally deletes the configurations (based on flag
- /// passed during construction)
- Ref<atn::ATNConfigSet> _deadEndConfigs;
-
- /// The token object at the start index; the input stream might
- /// not be buffering tokens so get a reference to it. (At the
- /// time the error occurred, of course the stream needs to keep a
- /// buffer all of the tokens but later we might not have access to those.)
- Token *_startToken;
-
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Parser.cpp b/contrib/libs/antlr4_cpp_runtime/src/Parser.cpp
deleted file mode 100644
index 337bcba17a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Parser.cpp
+++ /dev/null
@@ -1,670 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNDeserializationOptions.h"
-#include "tree/pattern/ParseTreePatternMatcher.h"
-#include "dfa/DFA.h"
-#include "ParserRuleContext.h"
-#include "tree/TerminalNode.h"
-#include "tree/ErrorNodeImpl.h"
-#include "Lexer.h"
-#include "atn/ParserATNSimulator.h"
-#include "misc/IntervalSet.h"
-#include "atn/RuleStartState.h"
-#include "DefaultErrorStrategy.h"
-#include "atn/ATNDeserializer.h"
-#include "atn/RuleTransition.h"
-#include "atn/ATN.h"
-#include "Exceptions.h"
-#include "ANTLRErrorListener.h"
-#include "tree/pattern/ParseTreePattern.h"
-#include "internal/Synchronization.h"
-
-#include "atn/ProfilingATNSimulator.h"
-#include "atn/ParseInfo.h"
-
-#include "Parser.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::internal;
-using namespace antlrcpp;
-
-namespace {
-
-struct BypassAltsAtnCache final {
- std::shared_mutex mutex;
- /// This field maps from the serialized ATN string to the deserialized <seealso cref="ATN"/> with
- /// bypass alternatives.
- ///
- /// <seealso cref= ATNDeserializationOptions#isGenerateRuleBypassTransitions() </seealso>
- std::map<std::vector<int32_t>, std::unique_ptr<const atn::ATN>, std::less<>> map;
-};
-
-BypassAltsAtnCache* getBypassAltsAtnCache() {
- static BypassAltsAtnCache* const instance = new BypassAltsAtnCache();
- return instance;
-}
-
-}
-
-Parser::TraceListener::TraceListener(Parser *outerInstance_) : outerInstance(outerInstance_) {
-}
-
-Parser::TraceListener::~TraceListener() {
-}
-
-void Parser::TraceListener::enterEveryRule(ParserRuleContext *ctx) {
- std::cout << "enter " << outerInstance->getRuleNames()[ctx->getRuleIndex()]
- << ", LT(1)=" << outerInstance->_input->LT(1)->getText() << std::endl;
-}
-
-void Parser::TraceListener::visitTerminal(tree::TerminalNode *node) {
- std::cout << "consume " << node->getSymbol() << " rule "
- << outerInstance->getRuleNames()[outerInstance->getContext()->getRuleIndex()] << std::endl;
-}
-
-void Parser::TraceListener::visitErrorNode(tree::ErrorNode * /*node*/) {
-}
-
-void Parser::TraceListener::exitEveryRule(ParserRuleContext *ctx) {
- std::cout << "exit " << outerInstance->getRuleNames()[ctx->getRuleIndex()]
- << ", LT(1)=" << outerInstance->_input->LT(1)->getText() << std::endl;
-}
-
-Parser::TrimToSizeListener Parser::TrimToSizeListener::INSTANCE;
-
-Parser::TrimToSizeListener::~TrimToSizeListener() {
-}
-
-void Parser::TrimToSizeListener::enterEveryRule(ParserRuleContext * /*ctx*/) {
-}
-
-void Parser::TrimToSizeListener::visitTerminal(tree::TerminalNode * /*node*/) {
-}
-
-void Parser::TrimToSizeListener::visitErrorNode(tree::ErrorNode * /*node*/) {
-}
-
-void Parser::TrimToSizeListener::exitEveryRule(ParserRuleContext * ctx) {
- ctx->children.shrink_to_fit();
-}
-
-Parser::Parser(TokenStream *input) {
- InitializeInstanceFields();
- setInputStream(input);
-}
-
-Parser::~Parser() {
- _tracker.reset();
- delete _tracer;
-}
-
-void Parser::reset() {
- if (getInputStream() != nullptr) {
- getInputStream()->seek(0);
- }
- _errHandler->reset(this); // Watch out, this is not shared_ptr.reset().
-
- _matchedEOF = false;
- _syntaxErrors = 0;
- setTrace(false);
- _precedenceStack.clear();
- _precedenceStack.push_back(0);
- _ctx = nullptr;
- _tracker.reset();
-
- atn::ATNSimulator *interpreter = getInterpreter<atn::ParserATNSimulator>();
- if (interpreter != nullptr) {
- interpreter->reset();
- }
-}
-
-Token* Parser::match(size_t ttype) {
- Token *t = getCurrentToken();
- if (t->getType() == ttype) {
- if (ttype == EOF) {
- _matchedEOF = true;
- }
- _errHandler->reportMatch(this);
- consume();
- } else {
- t = _errHandler->recoverInline(this);
- if (_buildParseTrees && t->getTokenIndex() == INVALID_INDEX) {
- // we must have conjured up a new token during single token insertion
- // if it's not the current symbol
- _ctx->addChild(createErrorNode(t));
- }
- }
- return t;
-}
-
-Token* Parser::matchWildcard() {
- Token *t = getCurrentToken();
- if (t->getType() > 0) {
- _errHandler->reportMatch(this);
- consume();
- } else {
- t = _errHandler->recoverInline(this);
- if (_buildParseTrees && t->getTokenIndex() == INVALID_INDEX) {
- // we must have conjured up a new token during single token insertion
- // if it's not the current symbol
- _ctx->addChild(createErrorNode(t));
- }
- }
-
- return t;
-}
-
-void Parser::setBuildParseTree(bool buildParseTrees) {
- this->_buildParseTrees = buildParseTrees;
-}
-
-bool Parser::getBuildParseTree() {
- return _buildParseTrees;
-}
-
-void Parser::setTrimParseTree(bool trimParseTrees) {
- if (trimParseTrees) {
- if (getTrimParseTree()) {
- return;
- }
- addParseListener(&TrimToSizeListener::INSTANCE);
- } else {
- removeParseListener(&TrimToSizeListener::INSTANCE);
- }
-}
-
-bool Parser::getTrimParseTree() {
- return std::find(getParseListeners().begin(), getParseListeners().end(), &TrimToSizeListener::INSTANCE) != getParseListeners().end();
-}
-
-std::vector<tree::ParseTreeListener *> Parser::getParseListeners() {
- return _parseListeners;
-}
-
-void Parser::addParseListener(tree::ParseTreeListener *listener) {
- if (!listener) {
- throw NullPointerException("listener");
- }
-
- this->_parseListeners.push_back(listener);
-}
-
-void Parser::removeParseListener(tree::ParseTreeListener *listener) {
- if (!_parseListeners.empty()) {
- auto it = std::find(_parseListeners.begin(), _parseListeners.end(), listener);
- if (it != _parseListeners.end()) {
- _parseListeners.erase(it);
- }
- }
-}
-
-void Parser::removeParseListeners() {
- _parseListeners.clear();
-}
-
-void Parser::triggerEnterRuleEvent() {
- for (auto *listener : _parseListeners) {
- listener->enterEveryRule(_ctx);
- _ctx->enterRule(listener);
- }
-}
-
-void Parser::triggerExitRuleEvent() {
- // reverse order walk of listeners
- for (auto it = _parseListeners.rbegin(); it != _parseListeners.rend(); ++it) {
- _ctx->exitRule(*it);
- (*it)->exitEveryRule(_ctx);
- }
-}
-
-size_t Parser::getNumberOfSyntaxErrors() {
- return _syntaxErrors;
-}
-
-TokenFactory<CommonToken>* Parser::getTokenFactory() {
- return _input->getTokenSource()->getTokenFactory();
-}
-
-const atn::ATN& Parser::getATNWithBypassAlts() {
- auto serializedAtn = getSerializedATN();
- if (serializedAtn.empty()) {
- throw UnsupportedOperationException("The current parser does not support an ATN with bypass alternatives.");
- }
- // XXX: using the entire serialized ATN as key into the map is a big resource waste.
- // How large can that thing become?
- auto *cache = getBypassAltsAtnCache();
- {
- std::shared_lock<std::shared_mutex> lock(cache->mutex);
- auto existing = cache->map.find(serializedAtn);
- if (existing != cache->map.end()) {
- return *existing->second;
- }
- }
-
- std::unique_lock<std::shared_mutex> lock(cache->mutex);
- auto existing = cache->map.find(serializedAtn);
- if (existing != cache->map.end()) {
- return *existing->second;
- }
- atn::ATNDeserializationOptions deserializationOptions;
- deserializationOptions.setGenerateRuleBypassTransitions(true);
- atn::ATNDeserializer deserializer(deserializationOptions);
- auto atn = deserializer.deserialize(serializedAtn);
- return *cache->map.insert(std::make_pair(std::vector<int32_t>(serializedAtn.begin(), serializedAtn.end()), std::move(atn))).first->second;
-}
-
-tree::pattern::ParseTreePattern Parser::compileParseTreePattern(const std::string &pattern, int patternRuleIndex) {
- if (getTokenStream() != nullptr) {
- TokenSource *tokenSource = getTokenStream()->getTokenSource();
- if (is<Lexer*>(tokenSource)) {
- Lexer *lexer = dynamic_cast<Lexer *>(tokenSource);
- return compileParseTreePattern(pattern, patternRuleIndex, lexer);
- }
- }
- throw UnsupportedOperationException("Parser can't discover a lexer to use");
-}
-
-tree::pattern::ParseTreePattern Parser::compileParseTreePattern(const std::string &pattern, int patternRuleIndex,
- Lexer *lexer) {
- tree::pattern::ParseTreePatternMatcher m(lexer, this);
- return m.compile(pattern, patternRuleIndex);
-}
-
-Ref<ANTLRErrorStrategy> Parser::getErrorHandler() {
- return _errHandler;
-}
-
-void Parser::setErrorHandler(Ref<ANTLRErrorStrategy> const& handler) {
- _errHandler = handler;
-}
-
-IntStream* Parser::getInputStream() {
- return getTokenStream();
-}
-
-void Parser::setInputStream(IntStream *input) {
- setTokenStream(static_cast<TokenStream*>(input));
-}
-
-TokenStream* Parser::getTokenStream() {
- return _input;
-}
-
-void Parser::setTokenStream(TokenStream *input) {
- _input = nullptr; // Just a reference we don't own.
- reset();
- _input = input;
-}
-
-Token* Parser::getCurrentToken() {
- return _input->LT(1);
-}
-
-void Parser::notifyErrorListeners(const std::string &msg) {
- notifyErrorListeners(getCurrentToken(), msg, nullptr);
-}
-
-void Parser::notifyErrorListeners(Token *offendingToken, const std::string &msg, std::exception_ptr e) {
- _syntaxErrors++;
- size_t line = offendingToken->getLine();
- size_t charPositionInLine = offendingToken->getCharPositionInLine();
-
- ProxyErrorListener &listener = getErrorListenerDispatch();
- listener.syntaxError(this, offendingToken, line, charPositionInLine, msg, e);
-}
-
-Token* Parser::consume() {
- Token *o = getCurrentToken();
- if (o->getType() != EOF) {
- getInputStream()->consume();
- }
-
- bool hasListener = _parseListeners.size() > 0 && !_parseListeners.empty();
- if (_buildParseTrees || hasListener) {
- if (_errHandler->inErrorRecoveryMode(this)) {
- tree::ErrorNode *node = createErrorNode(o);
- _ctx->addChild(node);
- if (_parseListeners.size() > 0) {
- for (auto *listener : _parseListeners) {
- listener->visitErrorNode(node);
- }
- }
- } else {
- tree::TerminalNode *node = _ctx->addChild(createTerminalNode(o));
- if (_parseListeners.size() > 0) {
- for (auto *listener : _parseListeners) {
- listener->visitTerminal(node);
- }
- }
- }
- }
- return o;
-}
-
-void Parser::addContextToParseTree() {
- // Add current context to parent if we have a parent.
- if (_ctx->parent == nullptr)
- return;
-
- downCast<ParserRuleContext*>(_ctx->parent)->addChild(_ctx);
-}
-
-void Parser::enterRule(ParserRuleContext *localctx, size_t state, size_t /*ruleIndex*/) {
- setState(state);
- _ctx = localctx;
- _ctx->start = _input->LT(1);
- if (_buildParseTrees) {
- addContextToParseTree();
- }
- if (_parseListeners.size() > 0) {
- triggerEnterRuleEvent();
- }
-}
-
-void Parser::exitRule() {
- if (_matchedEOF) {
- // if we have matched EOF, it cannot consume past EOF so we use LT(1) here
- _ctx->stop = _input->LT(1); // LT(1) will be end of file
- } else {
- _ctx->stop = _input->LT(-1); // stop node is what we just matched
- }
-
- // trigger event on ctx, before it reverts to parent
- if (_parseListeners.size() > 0) {
- triggerExitRuleEvent();
- }
- setState(_ctx->invokingState);
- _ctx = downCast<ParserRuleContext*>(_ctx->parent);
-}
-
-void Parser::enterOuterAlt(ParserRuleContext *localctx, size_t altNum) {
- localctx->setAltNumber(altNum);
-
- // if we have new localctx, make sure we replace existing ctx
- // that is previous child of parse tree
- if (_buildParseTrees && _ctx != localctx) {
- if (_ctx->parent != nullptr) {
- ParserRuleContext *parent = downCast<ParserRuleContext*>(_ctx->parent);
- parent->removeLastChild();
- parent->addChild(localctx);
- }
- }
- _ctx = localctx;
-}
-
-int Parser::getPrecedence() const {
- if (_precedenceStack.empty()) {
- return -1;
- }
-
- return _precedenceStack.back();
-}
-
-void Parser::enterRecursionRule(ParserRuleContext *localctx, size_t ruleIndex) {
- enterRecursionRule(localctx, getATN().ruleToStartState[ruleIndex]->stateNumber, ruleIndex, 0);
-}
-
-void Parser::enterRecursionRule(ParserRuleContext *localctx, size_t state, size_t /*ruleIndex*/, int precedence) {
- setState(state);
- _precedenceStack.push_back(precedence);
- _ctx = localctx;
- _ctx->start = _input->LT(1);
- if (!_parseListeners.empty()) {
- triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules
- }
-}
-
-void Parser::pushNewRecursionContext(ParserRuleContext *localctx, size_t state, size_t /*ruleIndex*/) {
- ParserRuleContext *previous = _ctx;
- previous->parent = localctx;
- previous->invokingState = state;
- previous->stop = _input->LT(-1);
-
- _ctx = localctx;
- _ctx->start = previous->start;
- if (_buildParseTrees) {
- _ctx->addChild(previous);
- }
-
- if (_parseListeners.size() > 0) {
- triggerEnterRuleEvent(); // simulates rule entry for left-recursive rules
- }
-}
-
-void Parser::unrollRecursionContexts(ParserRuleContext *parentctx) {
- _precedenceStack.pop_back();
- _ctx->stop = _input->LT(-1);
- ParserRuleContext *retctx = _ctx; // save current ctx (return value)
-
- // unroll so ctx is as it was before call to recursive method
- if (_parseListeners.size() > 0) {
- while (_ctx != parentctx) {
- triggerExitRuleEvent();
- _ctx = downCast<ParserRuleContext*>(_ctx->parent);
- }
- } else {
- _ctx = parentctx;
- }
-
- // hook into tree
- retctx->parent = parentctx;
-
- if (_buildParseTrees && parentctx != nullptr) {
- // add return ctx into invoking rule's tree
- parentctx->addChild(retctx);
- }
-}
-
-ParserRuleContext* Parser::getInvokingContext(size_t ruleIndex) {
- ParserRuleContext *p = _ctx;
- while (p) {
- if (p->getRuleIndex() == ruleIndex) {
- return p;
- }
- if (p->parent == nullptr)
- break;
- p = downCast<ParserRuleContext*>(p->parent);
- }
- return nullptr;
-}
-
-ParserRuleContext* Parser::getContext() {
- return _ctx;
-}
-
-void Parser::setContext(ParserRuleContext *ctx) {
- _ctx = ctx;
-}
-
-bool Parser::precpred(RuleContext * /*localctx*/, int precedence) {
- return precedence >= _precedenceStack.back();
-}
-
-bool Parser::inContext(const std::string &/*context*/) {
- // TODO: useful in parser?
- return false;
-}
-
-bool Parser::isExpectedToken(size_t symbol) {
- const atn::ATN &atn = getInterpreter<atn::ParserATNSimulator>()->atn;
- ParserRuleContext *ctx = _ctx;
- atn::ATNState *s = atn.states[getState()];
- misc::IntervalSet following = atn.nextTokens(s);
-
- if (following.contains(symbol)) {
- return true;
- }
-
- if (!following.contains(Token::EPSILON)) {
- return false;
- }
-
- while (ctx && ctx->invokingState != ATNState::INVALID_STATE_NUMBER && following.contains(Token::EPSILON)) {
- atn::ATNState *invokingState = atn.states[ctx->invokingState];
- const atn::RuleTransition *rt = static_cast<const atn::RuleTransition*>(invokingState->transitions[0].get());
- following = atn.nextTokens(rt->followState);
- if (following.contains(symbol)) {
- return true;
- }
-
- ctx = downCast<ParserRuleContext*>(ctx->parent);
- }
-
- if (following.contains(Token::EPSILON) && symbol == EOF) {
- return true;
- }
-
- return false;
-}
-
-bool Parser::isMatchedEOF() const {
- return _matchedEOF;
-}
-
-misc::IntervalSet Parser::getExpectedTokens() {
- return getATN().getExpectedTokens(getState(), getContext());
-}
-
-misc::IntervalSet Parser::getExpectedTokensWithinCurrentRule() {
- const atn::ATN &atn = getInterpreter<atn::ParserATNSimulator>()->atn;
- atn::ATNState *s = atn.states[getState()];
- return atn.nextTokens(s);
-}
-
-size_t Parser::getRuleIndex(const std::string &ruleName) {
- const std::map<std::string, size_t> &m = getRuleIndexMap();
- auto iterator = m.find(ruleName);
- if (iterator == m.end()) {
- return INVALID_INDEX;
- }
- return iterator->second;
-}
-
-ParserRuleContext* Parser::getRuleContext() {
- return _ctx;
-}
-
-std::vector<std::string> Parser::getRuleInvocationStack() {
- return getRuleInvocationStack(_ctx);
-}
-
-std::vector<std::string> Parser::getRuleInvocationStack(RuleContext *p) {
- std::vector<std::string> const& ruleNames = getRuleNames();
- std::vector<std::string> stack;
- RuleContext *run = p;
- while (run != nullptr) {
- // compute what follows who invoked us
- size_t ruleIndex = run->getRuleIndex();
- if (ruleIndex == INVALID_INDEX ) {
- stack.push_back("n/a");
- } else {
- stack.push_back(ruleNames[ruleIndex]);
- }
- if (!RuleContext::is(run->parent)) {
- break;
- }
- run = downCast<RuleContext*>(run->parent);
- }
- return stack;
-}
-
-std::vector<std::string> Parser::getDFAStrings() {
- atn::ParserATNSimulator *simulator = getInterpreter<atn::ParserATNSimulator>();
- if (!simulator->decisionToDFA.empty()) {
- UniqueLock<Mutex> lck(_mutex);
-
- std::vector<std::string> s;
- for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) {
- dfa::DFA &dfa = simulator->decisionToDFA[d];
- s.push_back(dfa.toString(getVocabulary()));
- }
- return s;
- }
- return std::vector<std::string>();
-}
-
-void Parser::dumpDFA() {
- atn::ParserATNSimulator *simulator = getInterpreter<atn::ParserATNSimulator>();
- if (!simulator->decisionToDFA.empty()) {
- UniqueLock<Mutex> lck(_mutex);
- bool seenOne = false;
- for (size_t d = 0; d < simulator->decisionToDFA.size(); d++) {
- dfa::DFA &dfa = simulator->decisionToDFA[d];
- if (!dfa.states.empty()) {
- if (seenOne) {
- std::cout << std::endl;
- }
- std::cout << "Decision " << dfa.decision << ":" << std::endl;
- std::cout << dfa.toString(getVocabulary());
- seenOne = true;
- }
- }
- }
-}
-
-std::string Parser::getSourceName() {
- return _input->getSourceName();
-}
-
-atn::ParseInfo Parser::getParseInfo() const {
- atn::ParserATNSimulator *simulator = getInterpreter<atn::ParserATNSimulator>();
- return atn::ParseInfo(dynamic_cast<atn::ProfilingATNSimulator*>(simulator));
-}
-
-void Parser::setProfile(bool profile) {
- atn::ParserATNSimulator *interp = getInterpreter<atn::ParserATNSimulator>();
- atn::PredictionMode saveMode = interp != nullptr ? interp->getPredictionMode() : atn::PredictionMode::LL;
- if (profile) {
- if (!is<atn::ProfilingATNSimulator *>(interp)) {
- setInterpreter(new atn::ProfilingATNSimulator(this)); /* mem-check: replacing existing interpreter which gets deleted. */
- }
- } else if (is<atn::ProfilingATNSimulator *>(interp)) {
- /* mem-check: replacing existing interpreter which gets deleted. */
- atn::ParserATNSimulator *sim = new atn::ParserATNSimulator(this, getATN(), interp->decisionToDFA, interp->getSharedContextCache());
- setInterpreter(sim);
- }
- getInterpreter<atn::ParserATNSimulator>()->setPredictionMode(saveMode);
-}
-
-void Parser::setTrace(bool trace) {
- if (!trace) {
- if (_tracer)
- removeParseListener(_tracer);
- delete _tracer;
- _tracer = nullptr;
- } else {
- if (_tracer)
- removeParseListener(_tracer); // Just in case this is triggered multiple times.
- _tracer = new TraceListener(this);
- addParseListener(_tracer);
- }
-}
-
-bool Parser::isTrace() const {
- return _tracer != nullptr;
-}
-
-tree::TerminalNode *Parser::createTerminalNode(Token *t) {
- return _tracker.createInstance<tree::TerminalNodeImpl>(t);
-}
-
-tree::ErrorNode *Parser::createErrorNode(Token *t) {
- return _tracker.createInstance<tree::ErrorNodeImpl>(t);
-}
-
-void Parser::InitializeInstanceFields() {
- _errHandler = std::make_shared<DefaultErrorStrategy>();
- _precedenceStack.clear();
- _precedenceStack.push_back(0);
- _buildParseTrees = true;
- _syntaxErrors = 0;
- _matchedEOF = false;
- _input = nullptr;
- _tracer = nullptr;
- _ctx = nullptr;
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Parser.h b/contrib/libs/antlr4_cpp_runtime/src/Parser.h
deleted file mode 100644
index f490b00c38..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Parser.h
+++ /dev/null
@@ -1,461 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Recognizer.h"
-#include "tree/ParseTreeListener.h"
-#include "tree/ParseTree.h"
-#include "TokenStream.h"
-#include "TokenSource.h"
-#include "misc/Interval.h"
-
-namespace antlr4 {
-
- /// This is all the parsing support code essentially; most of it is error recovery stuff.
- class ANTLR4CPP_PUBLIC Parser : public Recognizer {
- public:
-
- class TraceListener : public tree::ParseTreeListener {
- public:
- TraceListener(Parser *outerInstance);
- virtual ~TraceListener();
-
- virtual void enterEveryRule(ParserRuleContext *ctx) override;
- virtual void visitTerminal(tree::TerminalNode *node) override;
- virtual void visitErrorNode(tree::ErrorNode *node) override;
- virtual void exitEveryRule(ParserRuleContext *ctx) override;
-
- private:
- Parser *const outerInstance;
- };
-
- class TrimToSizeListener : public tree::ParseTreeListener {
- public:
- static TrimToSizeListener INSTANCE;
-
- virtual ~TrimToSizeListener();
-
- virtual void enterEveryRule(ParserRuleContext *ctx) override;
- virtual void visitTerminal(tree::TerminalNode *node) override;
- virtual void visitErrorNode(tree::ErrorNode *node) override;
- virtual void exitEveryRule(ParserRuleContext *ctx) override;
- };
-
- Parser(TokenStream *input);
- virtual ~Parser();
-
- /// reset the parser's state
- virtual void reset();
-
- /// <summary>
- /// Match current input symbol against {@code ttype}. If the symbol type
- /// matches, <seealso cref="ANTLRErrorStrategy#reportMatch"/> and <seealso cref="#consume"/> are
- /// called to complete the match process.
- ///
- /// If the symbol type does not match,
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is called on the current error
- /// strategy to attempt recovery. If <seealso cref="#getBuildParseTree"/> is
- /// {@code true} and the token index of the symbol returned by
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is -1, the symbol is added to
- /// the parse tree by calling {@link #createErrorNode(ParserRuleContext, Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)}.
- /// </summary>
- /// <param name="ttype"> the token type to match </param>
- /// <returns> the matched symbol </returns>
- /// <exception cref="RecognitionException"> if the current input symbol did not match
- /// {@code ttype} and the error strategy could not recover from the
- /// mismatched symbol </exception>
- virtual Token* match(size_t ttype);
-
- /// <summary>
- /// Match current input symbol as a wildcard. If the symbol type matches
- /// (i.e. has a value greater than 0), <seealso cref="ANTLRErrorStrategy#reportMatch"/>
- /// and <seealso cref="#consume"/> are called to complete the match process.
- /// <p/>
- /// If the symbol type does not match,
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is called on the current error
- /// strategy to attempt recovery. If <seealso cref="#getBuildParseTree"/> is
- /// {@code true} and the token index of the symbol returned by
- /// <seealso cref="ANTLRErrorStrategy#recoverInline"/> is -1, the symbol is added to
- /// the parse tree by calling <seealso cref="ParserRuleContext#addErrorNode"/>.
- /// </summary>
- /// <returns> the matched symbol </returns>
- /// <exception cref="RecognitionException"> if the current input symbol did not match
- /// a wildcard and the error strategy could not recover from the mismatched
- /// symbol </exception>
- virtual Token* matchWildcard();
-
- /// <summary>
- /// Track the <seealso cref="ParserRuleContext"/> objects during the parse and hook
- /// them up using the <seealso cref="ParserRuleContext#children"/> list so that it
- /// forms a parse tree. The <seealso cref="ParserRuleContext"/> returned from the start
- /// rule represents the root of the parse tree.
- /// <p/>
- /// Note that if we are not building parse trees, rule contexts only point
- /// upwards. When a rule exits, it returns the context but that gets garbage
- /// collected if nobody holds a reference. It points upwards but nobody
- /// points at it.
- /// <p/>
- /// When we build parse trees, we are adding all of these contexts to
- /// <seealso cref="ParserRuleContext#children"/> list. Contexts are then not candidates
- /// for garbage collection.
- /// </summary>
- virtual void setBuildParseTree(bool buildParseTrees);
-
- /// <summary>
- /// Gets whether or not a complete parse tree will be constructed while
- /// parsing. This property is {@code true} for a newly constructed parser.
- /// </summary>
- /// <returns> {@code true} if a complete parse tree will be constructed while
- /// parsing, otherwise {@code false} </returns>
- virtual bool getBuildParseTree();
-
- /// <summary>
- /// Trim the internal lists of the parse tree during parsing to conserve memory.
- /// This property is set to {@code false} by default for a newly constructed parser.
- /// </summary>
- /// <param name="trimParseTrees"> {@code true} to trim the capacity of the <seealso cref="ParserRuleContext#children"/>
- /// list to its size after a rule is parsed. </param>
- virtual void setTrimParseTree(bool trimParseTrees);
-
- /// <returns> {@code true} if the <seealso cref="ParserRuleContext#children"/> list is trimmed
- /// using the default <seealso cref="Parser.TrimToSizeListener"/> during the parse process. </returns>
- virtual bool getTrimParseTree();
-
- virtual std::vector<tree::ParseTreeListener *> getParseListeners();
-
- /// <summary>
- /// Registers {@code listener} to receive events during the parsing process.
- /// <p/>
- /// To support output-preserving grammar transformations (including but not
- /// limited to left-recursion removal, automated left-factoring, and
- /// optimized code generation), calls to listener methods during the parse
- /// may differ substantially from calls made by
- /// <seealso cref="ParseTreeWalker#DEFAULT"/> used after the parse is complete. In
- /// particular, rule entry and exit events may occur in a different order
- /// during the parse than after the parser. In addition, calls to certain
- /// rule entry methods may be omitted.
- /// <p/>
- /// With the following specific exceptions, calls to listener events are
- /// <em>deterministic</em>, i.e. for identical input the calls to listener
- /// methods will be the same.
- ///
- /// <ul>
- /// <li>Alterations to the grammar used to generate code may change the
- /// behavior of the listener calls.</li>
- /// <li>Alterations to the command line options passed to ANTLR 4 when
- /// generating the parser may change the behavior of the listener calls.</li>
- /// <li>Changing the version of the ANTLR Tool used to generate the parser
- /// may change the behavior of the listener calls.</li>
- /// </ul>
- /// </summary>
- /// <param name="listener"> the listener to add
- /// </param>
- /// <exception cref="NullPointerException"> if {@code} listener is {@code null} </exception>
- virtual void addParseListener(tree::ParseTreeListener *listener);
-
- /// <summary>
- /// Remove {@code listener} from the list of parse listeners.
- /// <p/>
- /// If {@code listener} is {@code null} or has not been added as a parse
- /// listener, this method does nothing.
- /// </summary>
- /// <seealso cref= #addParseListener
- /// </seealso>
- /// <param name="listener"> the listener to remove </param>
- virtual void removeParseListener(tree::ParseTreeListener *listener);
-
- /// <summary>
- /// Remove all parse listeners.
- /// </summary>
- /// <seealso cref= #addParseListener </seealso>
- virtual void removeParseListeners();
-
- /// <summary>
- /// Notify any parse listeners of an enter rule event.
- /// </summary>
- /// <seealso cref= #addParseListener </seealso>
- virtual void triggerEnterRuleEvent();
-
- /// <summary>
- /// Notify any parse listeners of an exit rule event.
- /// </summary>
- /// <seealso cref= #addParseListener </seealso>
- virtual void triggerExitRuleEvent();
-
- /// <summary>
- /// Gets the number of syntax errors reported during parsing. This value is
- /// incremented each time <seealso cref="#notifyErrorListeners"/> is called.
- /// </summary>
- /// <seealso cref= #notifyErrorListeners </seealso>
- virtual size_t getNumberOfSyntaxErrors();
-
- virtual TokenFactory<CommonToken>* getTokenFactory() override;
-
- /// <summary>
- /// Tell our token source and error strategy about a new way to create tokens. </summary>
- template<typename T1>
- void setTokenFactory(TokenFactory<T1> *factory) {
- _input->getTokenSource()->setTokenFactory(factory);
- }
-
- /// The ATN with bypass alternatives is expensive to create so we create it
- /// lazily. The ATN is owned by us.
- virtual const atn::ATN& getATNWithBypassAlts();
-
- /// <summary>
- /// The preferred method of getting a tree pattern. For example, here's a
- /// sample use:
- ///
- /// <pre>
- /// ParseTree t = parser.expr();
- /// ParseTreePattern p = parser.compileParseTreePattern("<ID>+0", MyParser.RULE_expr);
- /// ParseTreeMatch m = p.match(t);
- /// String id = m.get("ID");
- /// </pre>
- /// </summary>
- virtual tree::pattern::ParseTreePattern compileParseTreePattern(const std::string &pattern, int patternRuleIndex);
-
- /// <summary>
- /// The same as <seealso cref="#compileParseTreePattern(String, int)"/> but specify a
- /// <seealso cref="Lexer"/> rather than trying to deduce it from this parser.
- /// </summary>
- virtual tree::pattern::ParseTreePattern compileParseTreePattern(const std::string &pattern, int patternRuleIndex,
- Lexer *lexer);
-
- virtual Ref<ANTLRErrorStrategy> getErrorHandler();
- virtual void setErrorHandler(Ref<ANTLRErrorStrategy> const& handler);
-
- virtual IntStream* getInputStream() override;
- void setInputStream(IntStream *input) override;
-
- virtual TokenStream* getTokenStream();
-
- /// Set the token stream and reset the parser.
- virtual void setTokenStream(TokenStream *input);
-
- /// <summary>
- /// Match needs to return the current input symbol, which gets put
- /// into the label for the associated token ref; e.g., x=ID.
- /// </summary>
- virtual Token* getCurrentToken();
-
- void notifyErrorListeners(const std::string &msg);
-
- virtual void notifyErrorListeners(Token *offendingToken, const std::string &msg, std::exception_ptr e);
-
- /// Consume and return the <seealso cref="#getCurrentToken current symbol"/>.
- /// <p/>
- /// E.g., given the following input with {@code A} being the current
- /// lookahead symbol, this function moves the cursor to {@code B} and returns
- /// {@code A}.
- ///
- /// <pre>
- /// A B
- /// ^
- /// </pre>
- ///
- /// If the parser is not in error recovery mode, the consumed symbol is added
- /// to the parse tree using <seealso cref="ParserRuleContext#addChild(TerminalNode)"/>, and
- /// <seealso cref="ParseTreeListener#visitTerminal"/> is called on any parse listeners.
- /// If the parser <em>is</em> in error recovery mode, the consumed symbol is
- /// added to the parse tree using {@link #createErrorNode(ParserRuleContext, Token)} then
- /// {@link ParserRuleContext#addErrorNode(ErrorNode)} and
- /// <seealso cref="ParseTreeListener#visitErrorNode"/> is called on any parse
- /// listeners.
- virtual Token* consume();
-
- /// Always called by generated parsers upon entry to a rule. Access field
- /// <seealso cref="#_ctx"/> get the current context.
- virtual void enterRule(ParserRuleContext *localctx, size_t state, size_t ruleIndex);
-
- void exitRule();
-
- virtual void enterOuterAlt(ParserRuleContext *localctx, size_t altNum);
-
- /**
- * Get the precedence level for the top-most precedence rule.
- *
- * @return The precedence level for the top-most precedence rule, or -1 if
- * the parser context is not nested within a precedence rule.
- */
- int getPrecedence() const;
-
- /// @deprecated Use
- /// <seealso cref="#enterRecursionRule(ParserRuleContext, int, int, int)"/> instead.
- virtual void enterRecursionRule(ParserRuleContext *localctx, size_t ruleIndex);
- virtual void enterRecursionRule(ParserRuleContext *localctx, size_t state, size_t ruleIndex, int precedence);
-
- /** Like {@link #enterRule} but for recursive rules.
- * Make the current context the child of the incoming localctx.
- */
- virtual void pushNewRecursionContext(ParserRuleContext *localctx, size_t state, size_t ruleIndex);
- virtual void unrollRecursionContexts(ParserRuleContext *parentctx);
- virtual ParserRuleContext* getInvokingContext(size_t ruleIndex);
- virtual ParserRuleContext* getContext();
- virtual void setContext(ParserRuleContext *ctx);
- virtual bool precpred(RuleContext *localctx, int precedence) override;
- virtual bool inContext(const std::string &context);
-
- /// <summary>
- /// Checks whether or not {@code symbol} can follow the current state in the
- /// ATN. The behavior of this method is equivalent to the following, but is
- /// implemented such that the complete context-sensitive follow set does not
- /// need to be explicitly constructed.
- ///
- /// <pre>
- /// return getExpectedTokens().contains(symbol);
- /// </pre>
- /// </summary>
- /// <param name="symbol"> the symbol type to check </param>
- /// <returns> {@code true} if {@code symbol} can follow the current state in
- /// the ATN, otherwise {@code false}. </returns>
- virtual bool isExpectedToken(size_t symbol);
-
- bool isMatchedEOF() const;
-
- /// <summary>
- /// Computes the set of input symbols which could follow the current parser
- /// state and context, as given by <seealso cref="#getState"/> and <seealso cref="#getContext"/>,
- /// respectively.
- /// </summary>
- /// <seealso cref= ATN#getExpectedTokens(int, RuleContext) </seealso>
- virtual misc::IntervalSet getExpectedTokens();
-
- virtual misc::IntervalSet getExpectedTokensWithinCurrentRule();
-
- /// Get a rule's index (i.e., {@code RULE_ruleName} field) or INVALID_INDEX if not found.
- virtual size_t getRuleIndex(const std::string &ruleName);
-
- virtual ParserRuleContext* getRuleContext();
-
- /// <summary>
- /// Return List&lt;String&gt; of the rule names in your parser instance
- /// leading up to a call to the current rule. You could override if
- /// you want more details such as the file/line info of where
- /// in the ATN a rule is invoked.
- ///
- /// This is very useful for error messages.
- /// </summary>
- virtual std::vector<std::string> getRuleInvocationStack();
-
- virtual std::vector<std::string> getRuleInvocationStack(RuleContext *p);
-
- /// <summary>
- /// For debugging and other purposes. </summary>
- virtual std::vector<std::string> getDFAStrings();
-
- /// <summary>
- /// For debugging and other purposes. </summary>
- virtual void dumpDFA();
-
- virtual std::string getSourceName();
-
- atn::ParseInfo getParseInfo() const;
-
- /**
- * @since 4.3
- */
- void setProfile(bool profile);
-
- /// <summary>
- /// During a parse is sometimes useful to listen in on the rule entry and exit
- /// events as well as token matches. This is for quick and dirty debugging.
- /// </summary>
- virtual void setTrace(bool trace);
-
- /**
- * Gets whether a {@link TraceListener} is registered as a parse listener
- * for the parser.
- *
- * @see #setTrace(boolean)
- */
- bool isTrace() const;
-
- tree::ParseTreeTracker& getTreeTracker() { return _tracker; }
-
- /** How to create a token leaf node associated with a parent.
- * Typically, the terminal node to create is not a function of the parent
- * but this method must still set the parent pointer of the terminal node
- * returned. I would prefer having {@link ParserRuleContext#addAnyChild(ParseTree)}
- * set the parent pointer, but the parent pointer is implementation dependent
- * and currently there is no setParent() in {@link TerminalNode} (and can't
- * add method in Java 1.7 without breaking backward compatibility).
- *
- * @since 4.7
- */
- tree::TerminalNode *createTerminalNode(Token *t);
-
- /** How to create an error node, given a token, associated with a parent.
- * Typically, the error node to create is not a function of the parent
- * but this method must still set the parent pointer of the terminal node
- * returned. I would prefer having {@link ParserRuleContext#addAnyChild(ParseTree)}
- * set the parent pointer, but the parent pointer is implementation dependent
- * and currently there is no setParent() in {@link ErrorNode} (and can't
- * add method in Java 1.7 without breaking backward compatibility).
- *
- * @since 4.7
- */
- tree::ErrorNode *createErrorNode(Token *t);
-
- protected:
- /// The ParserRuleContext object for the currently executing rule.
- /// This is always non-null during the parsing process.
- // ml: this is one of the contexts tracked in _allocatedContexts.
- ParserRuleContext *_ctx;
-
- /// The error handling strategy for the parser. The default is DefaultErrorStrategy.
- /// See also getErrorHandler.
- Ref<ANTLRErrorStrategy> _errHandler;
-
- /// <summary>
- /// The input stream.
- /// </summary>
- /// <seealso cref= #getInputStream </seealso>
- /// <seealso cref= #setInputStream </seealso>
- TokenStream *_input;
-
- std::vector<int> _precedenceStack;
-
- /// <summary>
- /// Specifies whether or not the parser should construct a parse tree during
- /// the parsing process. The default value is {@code true}.
- /// </summary>
- /// <seealso cref= #getBuildParseTree </seealso>
- /// <seealso cref= #setBuildParseTree </seealso>
- bool _buildParseTrees;
-
- /// The list of <seealso cref="ParseTreeListener"/> listeners registered to receive
- /// events during the parse.
- /// <seealso cref= #addParseListener </seealso>
- std::vector<tree::ParseTreeListener *> _parseListeners;
-
- /// <summary>
- /// The number of syntax errors reported during parsing. This value is
- /// incremented each time <seealso cref="#notifyErrorListeners"/> is called.
- /// </summary>
- size_t _syntaxErrors;
-
- /** Indicates parser has match()ed EOF token. See {@link #exitRule()}. */
- bool _matchedEOF;
-
- virtual void addContextToParseTree();
-
- // All rule contexts created during a parse run. This is cleared when calling reset().
- tree::ParseTreeTracker _tracker;
-
- private:
- /// When setTrace(true) is called, a reference to the
- /// TraceListener is stored here so it can be easily removed in a
- /// later call to setTrace(false). The listener itself is
- /// implemented as a parser listener so this field is not directly used by
- /// other parser methods.
- TraceListener *_tracer;
-
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.cpp b/contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.cpp
deleted file mode 100644
index e1c54a0eb1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.cpp
+++ /dev/null
@@ -1,294 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "dfa/DFA.h"
-#include "atn/RuleStartState.h"
-#include "InterpreterRuleContext.h"
-#include "atn/ParserATNSimulator.h"
-#include "ANTLRErrorStrategy.h"
-#include "atn/LoopEndState.h"
-#include "FailedPredicateException.h"
-#include "atn/StarLoopEntryState.h"
-#include "atn/AtomTransition.h"
-#include "atn/RuleTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/ActionTransition.h"
-#include "atn/ATN.h"
-#include "atn/RuleStopState.h"
-#include "Lexer.h"
-#include "Token.h"
-#include "Vocabulary.h"
-#include "InputMismatchException.h"
-#include "CommonToken.h"
-#include "tree/ErrorNode.h"
-
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "ParserInterpreter.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-using namespace antlrcpp;
-
-ParserInterpreter::ParserInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
- const std::vector<std::string> &ruleNames, const atn::ATN &atn, TokenStream *input)
- : Parser(input), _grammarFileName(grammarFileName), _atn(atn), _ruleNames(ruleNames), _vocabulary(vocabulary) {
-
- // init decision DFA
- for (size_t i = 0; i < atn.getNumberOfDecisions(); ++i) {
- atn::DecisionState *decisionState = atn.getDecisionState(i);
- _decisionToDFA.push_back(dfa::DFA(decisionState, i));
- }
-
- // get atn simulator that knows how to do predictions
- _interpreter = new atn::ParserATNSimulator(this, atn, _decisionToDFA, _sharedContextCache); /* mem-check: deleted in d-tor */
-}
-
-ParserInterpreter::~ParserInterpreter() {
- delete _interpreter;
-}
-
-void ParserInterpreter::reset() {
- Parser::reset();
- _overrideDecisionReached = false;
- _overrideDecisionRoot = nullptr;
-}
-
-const atn::ATN& ParserInterpreter::getATN() const {
- return _atn;
-}
-
-const dfa::Vocabulary& ParserInterpreter::getVocabulary() const {
- return _vocabulary;
-}
-
-const std::vector<std::string>& ParserInterpreter::getRuleNames() const {
- return _ruleNames;
-}
-
-std::string ParserInterpreter::getGrammarFileName() const {
- return _grammarFileName;
-}
-
-ParserRuleContext* ParserInterpreter::parse(size_t startRuleIndex) {
- atn::RuleStartState *startRuleStartState = _atn.ruleToStartState[startRuleIndex];
-
- _rootContext = createInterpreterRuleContext(nullptr, atn::ATNState::INVALID_STATE_NUMBER, startRuleIndex);
-
- if (startRuleStartState->isLeftRecursiveRule) {
- enterRecursionRule(_rootContext, startRuleStartState->stateNumber, startRuleIndex, 0);
- } else {
- enterRule(_rootContext, startRuleStartState->stateNumber, startRuleIndex);
- }
-
- while (true) {
- atn::ATNState *p = getATNState();
- switch (p->getStateType()) {
- case atn::ATNStateType::RULE_STOP :
- // pop; return from rule
- if (_ctx->isEmpty()) {
- if (startRuleStartState->isLeftRecursiveRule) {
- ParserRuleContext *result = _ctx;
- auto parentContext = _parentContextStack.top();
- _parentContextStack.pop();
- unrollRecursionContexts(parentContext.first);
- return result;
- } else {
- exitRule();
- return _rootContext;
- }
- }
-
- visitRuleStopState(p);
- break;
-
- default :
- try {
- visitState(p);
- }
- catch (RecognitionException &e) {
- setState(_atn.ruleToStopState[p->ruleIndex]->stateNumber);
- getErrorHandler()->reportError(this, e);
- getContext()->exception = std::current_exception();
- recover(e);
- }
-
- break;
- }
- }
-}
-
-void ParserInterpreter::enterRecursionRule(ParserRuleContext *localctx, size_t state, size_t ruleIndex, int precedence) {
- _parentContextStack.push({ _ctx, localctx->invokingState });
- Parser::enterRecursionRule(localctx, state, ruleIndex, precedence);
-}
-
-void ParserInterpreter::addDecisionOverride(int decision, int tokenIndex, int forcedAlt) {
- _overrideDecision = decision;
- _overrideDecisionInputIndex = tokenIndex;
- _overrideDecisionAlt = forcedAlt;
-}
-
-Ref<InterpreterRuleContext> ParserInterpreter::getOverrideDecisionRoot() const {
- return _overrideDecisionRoot;
-}
-
-InterpreterRuleContext* ParserInterpreter::getRootContext() {
- return _rootContext;
-}
-
-atn::ATNState* ParserInterpreter::getATNState() {
- return _atn.states[getState()];
-}
-
-void ParserInterpreter::visitState(atn::ATNState *p) {
- size_t predictedAlt = 1;
- if (DecisionState::is(p)) {
- predictedAlt = visitDecisionState(downCast<DecisionState*>(p));
- }
-
- const atn::Transition *transition = p->transitions[predictedAlt - 1].get();
- switch (transition->getTransitionType()) {
- case atn::TransitionType::EPSILON:
- if (p->getStateType() == ATNStateType::STAR_LOOP_ENTRY &&
- (downCast<StarLoopEntryState *>(p))->isPrecedenceDecision &&
- !LoopEndState::is(transition->target)) {
- // We are at the start of a left recursive rule's (...)* loop
- // and we're not taking the exit branch of loop.
- InterpreterRuleContext *localctx = createInterpreterRuleContext(_parentContextStack.top().first,
- _parentContextStack.top().second, static_cast<int>(_ctx->getRuleIndex()));
- pushNewRecursionContext(localctx, _atn.ruleToStartState[p->ruleIndex]->stateNumber, static_cast<int>(_ctx->getRuleIndex()));
- }
- break;
-
- case atn::TransitionType::ATOM:
- match(static_cast<int>(static_cast<const atn::AtomTransition*>(transition)->_label));
- break;
-
- case atn::TransitionType::RANGE:
- case atn::TransitionType::SET:
- case atn::TransitionType::NOT_SET:
- if (!transition->matches(static_cast<int>(_input->LA(1)), Token::MIN_USER_TOKEN_TYPE, Lexer::MAX_CHAR_VALUE)) {
- recoverInline();
- }
- matchWildcard();
- break;
-
- case atn::TransitionType::WILDCARD:
- matchWildcard();
- break;
-
- case atn::TransitionType::RULE:
- {
- atn::RuleStartState *ruleStartState = static_cast<atn::RuleStartState*>(transition->target);
- size_t ruleIndex = ruleStartState->ruleIndex;
- InterpreterRuleContext *newctx = createInterpreterRuleContext(_ctx, p->stateNumber, ruleIndex);
- if (ruleStartState->isLeftRecursiveRule) {
- enterRecursionRule(newctx, ruleStartState->stateNumber, ruleIndex, static_cast<const atn::RuleTransition*>(transition)->precedence);
- } else {
- enterRule(newctx, transition->target->stateNumber, ruleIndex);
- }
- }
- break;
-
- case atn::TransitionType::PREDICATE:
- {
- const atn::PredicateTransition *predicateTransition = static_cast<const atn::PredicateTransition*>(transition);
- if (!sempred(_ctx, predicateTransition->getRuleIndex(), predicateTransition->getPredIndex())) {
- throw FailedPredicateException(this);
- }
- }
- break;
-
- case atn::TransitionType::ACTION:
- {
- const atn::ActionTransition *actionTransition = static_cast<const atn::ActionTransition*>(transition);
- action(_ctx, actionTransition->ruleIndex, actionTransition->actionIndex);
- }
- break;
-
- case atn::TransitionType::PRECEDENCE:
- {
- if (!precpred(_ctx, static_cast<const atn::PrecedencePredicateTransition*>(transition)->getPrecedence())) {
- throw FailedPredicateException(this, "precpred(_ctx, " + std::to_string(static_cast<const atn::PrecedencePredicateTransition*>(transition)->getPrecedence()) + ")");
- }
- }
- break;
-
- default:
- throw UnsupportedOperationException("Unrecognized ATN transition type.");
- }
-
- setState(transition->target->stateNumber);
-}
-
-size_t ParserInterpreter::visitDecisionState(DecisionState *p) {
- size_t predictedAlt = 1;
- if (p->transitions.size() > 1) {
- getErrorHandler()->sync(this);
- int decision = p->decision;
- if (decision == _overrideDecision && _input->index() == _overrideDecisionInputIndex && !_overrideDecisionReached) {
- predictedAlt = _overrideDecisionAlt;
- _overrideDecisionReached = true;
- } else {
- predictedAlt = getInterpreter<ParserATNSimulator>()->adaptivePredict(_input, decision, _ctx);
- }
- }
- return predictedAlt;
-}
-
-InterpreterRuleContext* ParserInterpreter::createInterpreterRuleContext(ParserRuleContext *parent,
- size_t invokingStateNumber, size_t ruleIndex) {
- return _tracker.createInstance<InterpreterRuleContext>(parent, invokingStateNumber, ruleIndex);
-}
-
-void ParserInterpreter::visitRuleStopState(atn::ATNState *p) {
- atn::RuleStartState *ruleStartState = _atn.ruleToStartState[p->ruleIndex];
- if (ruleStartState->isLeftRecursiveRule) {
- std::pair<ParserRuleContext *, size_t> parentContext = _parentContextStack.top();
- _parentContextStack.pop();
-
- unrollRecursionContexts(parentContext.first);
- setState(parentContext.second);
- } else {
- exitRule();
- }
-
- const atn::RuleTransition *ruleTransition = static_cast<const atn::RuleTransition*>(_atn.states[getState()]->transitions[0].get());
- setState(ruleTransition->followState->stateNumber);
-}
-
-void ParserInterpreter::recover(RecognitionException &e) {
- size_t i = _input->index();
- getErrorHandler()->recover(this, std::make_exception_ptr(e));
-
- if (_input->index() == i) {
- // no input consumed, better add an error node
- if (is<InputMismatchException *>(&e)) {
- InputMismatchException &ime = static_cast<InputMismatchException&>(e);
- Token *tok = e.getOffendingToken();
- size_t expectedTokenType = ime.getExpectedTokens().getMinElement(); // get any element
- _errorToken = getTokenFactory()->create({ tok->getTokenSource(), tok->getTokenSource()->getInputStream() },
- expectedTokenType, tok->getText(), Token::DEFAULT_CHANNEL, INVALID_INDEX, INVALID_INDEX, // invalid start/stop
- tok->getLine(), tok->getCharPositionInLine());
- _ctx->addChild(createErrorNode(_errorToken.get()));
- }
- else { // NoViableAlt
- Token *tok = e.getOffendingToken();
- _errorToken = getTokenFactory()->create({ tok->getTokenSource(), tok->getTokenSource()->getInputStream() },
- Token::INVALID_TYPE, tok->getText(), Token::DEFAULT_CHANNEL, INVALID_INDEX, INVALID_INDEX, // invalid start/stop
- tok->getLine(), tok->getCharPositionInLine());
- _ctx->addChild(createErrorNode(_errorToken.get()));
- }
- }
-}
-
-Token* ParserInterpreter::recoverInline() {
- return _errHandler->recoverInline(this);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.h b/contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.h
deleted file mode 100644
index 6d4a679e5b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ParserInterpreter.h
+++ /dev/null
@@ -1,173 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Parser.h"
-#include "atn/ATN.h"
-#include "support/BitSet.h"
-#include "atn/PredictionContext.h"
-#include "atn/PredictionContextCache.h"
-#include "Vocabulary.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// A parser simulator that mimics what ANTLR's generated
- /// parser code does. A ParserATNSimulator is used to make
- /// predictions via adaptivePredict but this class moves a pointer through the
- /// ATN to simulate parsing. ParserATNSimulator just
- /// makes us efficient rather than having to backtrack, for example.
- ///
- /// This properly creates parse trees even for left recursive rules.
- ///
- /// We rely on the left recursive rule invocation and special predicate
- /// transitions to make left recursive rules work.
- ///
- /// See TestParserInterpreter for examples.
- /// </summary>
- class ANTLR4CPP_PUBLIC ParserInterpreter : public Parser {
- public:
- ParserInterpreter(const std::string &grammarFileName, const dfa::Vocabulary &vocabulary,
- const std::vector<std::string> &ruleNames, const atn::ATN &atn, TokenStream *input);
- ~ParserInterpreter();
-
- virtual void reset() override;
-
- virtual const atn::ATN& getATN() const override;
-
- virtual const dfa::Vocabulary& getVocabulary() const override;
-
- virtual const std::vector<std::string>& getRuleNames() const override;
- virtual std::string getGrammarFileName() const override;
-
- /// Begin parsing at startRuleIndex
- virtual ParserRuleContext* parse(size_t startRuleIndex);
-
- virtual void enterRecursionRule(ParserRuleContext *localctx, size_t state, size_t ruleIndex, int precedence) override;
-
-
- /** Override this parser interpreters normal decision-making process
- * at a particular decision and input token index. Instead of
- * allowing the adaptive prediction mechanism to choose the
- * first alternative within a block that leads to a successful parse,
- * force it to take the alternative, 1..n for n alternatives.
- *
- * As an implementation limitation right now, you can only specify one
- * override. This is sufficient to allow construction of different
- * parse trees for ambiguous input. It means re-parsing the entire input
- * in general because you're never sure where an ambiguous sequence would
- * live in the various parse trees. For example, in one interpretation,
- * an ambiguous input sequence would be matched completely in expression
- * but in another it could match all the way back to the root.
- *
- * s : e '!'? ;
- * e : ID
- * | ID '!'
- * ;
- *
- * Here, x! can be matched as (s (e ID) !) or (s (e ID !)). In the first
- * case, the ambiguous sequence is fully contained only by the root.
- * In the second case, the ambiguous sequences fully contained within just
- * e, as in: (e ID !).
- *
- * Rather than trying to optimize this and make
- * some intelligent decisions for optimization purposes, I settled on
- * just re-parsing the whole input and then using
- * {link Trees#getRootOfSubtreeEnclosingRegion} to find the minimal
- * subtree that contains the ambiguous sequence. I originally tried to
- * record the call stack at the point the parser detected and ambiguity but
- * left recursive rules create a parse tree stack that does not reflect
- * the actual call stack. That impedance mismatch was enough to make
- * it it challenging to restart the parser at a deeply nested rule
- * invocation.
- *
- * Only parser interpreters can override decisions so as to avoid inserting
- * override checking code in the critical ALL(*) prediction execution path.
- *
- * @since 4.5.1
- */
- void addDecisionOverride(int decision, int tokenIndex, int forcedAlt);
-
- Ref<InterpreterRuleContext> getOverrideDecisionRoot() const;
-
- /** Return the root of the parse, which can be useful if the parser
- * bails out. You still can access the top node. Note that,
- * because of the way left recursive rules add children, it's possible
- * that the root will not have any children if the start rule immediately
- * called and left recursive rule that fails.
- *
- * @since 4.5.1
- */
- InterpreterRuleContext* getRootContext();
-
- protected:
- const std::string _grammarFileName;
- const atn::ATN &_atn;
-
- std::vector<std::string> _ruleNames;
-
- std::vector<dfa::DFA> _decisionToDFA; // not shared like it is for generated parsers
- atn::PredictionContextCache _sharedContextCache;
-
- /** This stack corresponds to the _parentctx, _parentState pair of locals
- * that would exist on call stack frames with a recursive descent parser;
- * in the generated function for a left-recursive rule you'd see:
- *
- * private EContext e(int _p) throws RecognitionException {
- * ParserRuleContext _parentctx = _ctx; // Pair.a
- * int _parentState = getState(); // Pair.b
- * ...
- * }
- *
- * Those values are used to create new recursive rule invocation contexts
- * associated with left operand of an alt like "expr '*' expr".
- */
- std::stack<std::pair<ParserRuleContext *, size_t>> _parentContextStack;
-
- /** We need a map from (decision,inputIndex)->forced alt for computing ambiguous
- * parse trees. For now, we allow exactly one override.
- */
- int _overrideDecision = -1;
- size_t _overrideDecisionInputIndex = INVALID_INDEX;
- size_t _overrideDecisionAlt = INVALID_INDEX;
- bool _overrideDecisionReached = false; // latch and only override once; error might trigger infinite loop
-
- /** What is the current context when we override a decision? This tells
- * us what the root of the parse tree is when using override
- * for an ambiguity/lookahead check.
- */
- Ref<InterpreterRuleContext> _overrideDecisionRoot;
- InterpreterRuleContext* _rootContext;
-
- virtual atn::ATNState *getATNState();
- virtual void visitState(atn::ATNState *p);
-
- /** Method visitDecisionState() is called when the interpreter reaches
- * a decision state (instance of DecisionState). It gives an opportunity
- * for subclasses to track interesting things.
- */
- size_t visitDecisionState(atn::DecisionState *p);
-
- /** Provide simple "factory" for InterpreterRuleContext's.
- * @since 4.5.1
- */
- InterpreterRuleContext* createInterpreterRuleContext(ParserRuleContext *parent, size_t invokingStateNumber, size_t ruleIndex);
-
- virtual void visitRuleStopState(atn::ATNState *p);
-
- /** Rely on the error handler for this parser but, if no tokens are consumed
- * to recover, add an error node. Otherwise, nothing is seen in the parse
- * tree.
- */
- void recover(RecognitionException &e);
- Token* recoverInline();
-
- private:
- const dfa::Vocabulary &_vocabulary;
- std::unique_ptr<Token> _errorToken;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.cpp
deleted file mode 100644
index 7eb3e6577f..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/TerminalNode.h"
-#include "tree/ErrorNode.h"
-#include "misc/Interval.h"
-#include "Parser.h"
-#include "Token.h"
-
-#include "support/Casts.h"
-#include "support/CPPUtils.h"
-
-#include "ParserRuleContext.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-
-using namespace antlrcpp;
-
-ParserRuleContext ParserRuleContext::EMPTY;
-
-ParserRuleContext::ParserRuleContext()
- : start(nullptr), stop(nullptr) {
-}
-
-ParserRuleContext::ParserRuleContext(ParserRuleContext *parent, size_t invokingStateNumber)
-: RuleContext(parent, invokingStateNumber), start(nullptr), stop(nullptr) {
-}
-
-void ParserRuleContext::copyFrom(ParserRuleContext *ctx) {
- // from RuleContext
- this->parent = ctx->parent;
- this->invokingState = ctx->invokingState;
-
- this->start = ctx->start;
- this->stop = ctx->stop;
-
- // copy any error nodes to alt label node
- if (!ctx->children.empty()) {
- for (auto *child : ctx->children) {
- if (ErrorNode::is(child)) {
- downCast<ErrorNode*>(child)->setParent(this);
- children.push_back(child);
- }
- }
-
- // Remove the just reparented error nodes from the source context.
- ctx->children.erase(std::remove_if(ctx->children.begin(), ctx->children.end(), [this](tree::ParseTree *e) -> bool {
- return std::find(children.begin(), children.end(), e) != children.end();
- }), ctx->children.end());
- }
-}
-
-void ParserRuleContext::enterRule(tree::ParseTreeListener * /*listener*/) {
-}
-
-void ParserRuleContext::exitRule(tree::ParseTreeListener * /*listener*/) {
-}
-
-tree::TerminalNode* ParserRuleContext::addChild(tree::TerminalNode *t) {
- t->setParent(this);
- children.push_back(t);
- return t;
-}
-
-RuleContext* ParserRuleContext::addChild(RuleContext *ruleInvocation) {
- children.push_back(ruleInvocation);
- return ruleInvocation;
-}
-
-void ParserRuleContext::removeLastChild() {
- if (!children.empty()) {
- children.pop_back();
- }
-}
-
-tree::TerminalNode* ParserRuleContext::getToken(size_t ttype, size_t i) const {
- if (i >= children.size()) {
- return nullptr;
- }
- size_t j = 0; // what token with ttype have we found?
- for (auto *child : children) {
- if (TerminalNode::is(child)) {
- tree::TerminalNode *typedChild = downCast<tree::TerminalNode*>(child);
- Token *symbol = typedChild->getSymbol();
- if (symbol->getType() == ttype) {
- if (j++ == i) {
- return typedChild;
- }
- }
- }
- }
- return nullptr;
-}
-
-std::vector<tree::TerminalNode *> ParserRuleContext::getTokens(size_t ttype) const {
- std::vector<tree::TerminalNode*> tokens;
- for (auto *child : children) {
- if (TerminalNode::is(child)) {
- tree::TerminalNode *typedChild = downCast<tree::TerminalNode*>(child);
- Token *symbol = typedChild->getSymbol();
- if (symbol->getType() == ttype) {
- tokens.push_back(typedChild);
- }
- }
- }
- return tokens;
-}
-
-misc::Interval ParserRuleContext::getSourceInterval() {
- if (start == nullptr) {
- return misc::Interval::INVALID;
- }
-
- if (stop == nullptr || stop->getTokenIndex() < start->getTokenIndex()) {
- return misc::Interval(start->getTokenIndex(), start->getTokenIndex() - 1); // empty
- }
- return misc::Interval(start->getTokenIndex(), stop->getTokenIndex());
-}
-
-Token* ParserRuleContext::getStart() const {
- return start;
-}
-
-Token* ParserRuleContext::getStop() const {
- return stop;
-}
-
-std::string ParserRuleContext::toInfoString(Parser *recognizer) {
- std::vector<std::string> rules = recognizer->getRuleInvocationStack(this);
- std::reverse(rules.begin(), rules.end());
- std::string rulesStr = antlrcpp::arrayToString(rules);
- return "ParserRuleContext" + rulesStr + "{start=" + std::to_string(start->getTokenIndex()) + ", stop=" +
- std::to_string(stop->getTokenIndex()) + '}';
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.h b/contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.h
deleted file mode 100644
index 63a8466e59..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ParserRuleContext.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RuleContext.h"
-#include "support/CPPUtils.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// A rule invocation record for parsing.
- ///
- /// Contains all of the information about the current rule not stored in the
- /// RuleContext. It handles parse tree children list, Any ATN state
- /// tracing, and the default values available for rule invocatons:
- /// start, stop, rule index, current alt number.
- ///
- /// Subclasses made for each rule and grammar track the parameters,
- /// return values, locals, and labels specific to that rule. These
- /// are the objects that are returned from rules.
- ///
- /// Note text is not an actual field of a rule return value; it is computed
- /// from start and stop using the input stream's toString() method. I
- /// could add a ctor to this so that we can pass in and store the input
- /// stream, but I'm not sure we want to do that. It would seem to be undefined
- /// to get the .text property anyway if the rule matches tokens from multiple
- /// input streams.
- ///
- /// I do not use getters for fields of objects that are used simply to
- /// group values such as this aggregate. The getters/setters are there to
- /// satisfy the superclass interface.
- /// </summary>
- class ANTLR4CPP_PUBLIC ParserRuleContext : public RuleContext {
- public:
- static ParserRuleContext EMPTY;
-
- /// <summary>
- /// For debugging/tracing purposes, we want to track all of the nodes in
- /// the ATN traversed by the parser for a particular rule.
- /// This list indicates the sequence of ATN nodes used to match
- /// the elements of the children list. This list does not include
- /// ATN nodes and other rules used to match rule invocations. It
- /// traces the rule invocation node itself but nothing inside that
- /// other rule's ATN submachine.
- ///
- /// There is NOT a one-to-one correspondence between the children and
- /// states list. There are typically many nodes in the ATN traversed
- /// for each element in the children list. For example, for a rule
- /// invocation there is the invoking state and the following state.
- ///
- /// The parser setState() method updates field s and adds it to this list
- /// if we are debugging/tracing.
- ///
- /// This does not trace states visited during prediction.
- /// </summary>
- // public List<Integer> states;
-
- Token *start;
- Token *stop;
-
- /// The exception that forced this rule to return. If the rule successfully
- /// completed, this is "null exception pointer".
- std::exception_ptr exception;
-
- ParserRuleContext();
- ParserRuleContext(ParserRuleContext *parent, size_t invokingStateNumber);
-
- /** COPY a ctx (I'm deliberately not using copy constructor) to avoid
- * confusion with creating node with parent. Does not copy children
- * (except error leaves).
- */
- virtual void copyFrom(ParserRuleContext *ctx);
-
-
- // Double dispatch methods for listeners
-
- virtual void enterRule(tree::ParseTreeListener *listener);
- virtual void exitRule(tree::ParseTreeListener *listener);
-
- /** Add a token leaf node child and force its parent to be this node. */
- tree::TerminalNode* addChild(tree::TerminalNode *t);
- RuleContext* addChild(RuleContext *ruleInvocation);
-
- /// Used by enterOuterAlt to toss out a RuleContext previously added as
- /// we entered a rule. If we have # label, we will need to remove
- /// generic ruleContext object.
- void removeLastChild();
-
- tree::TerminalNode* getToken(size_t ttype, std::size_t i) const;
-
- std::vector<tree::TerminalNode*> getTokens(size_t ttype) const;
-
- template<typename T>
- T* getRuleContext(size_t i) const {
- static_assert(std::is_base_of_v<RuleContext, T>, "T must be derived from RuleContext");
- size_t j = 0; // what element have we found with ctxType?
- for (auto *child : children) {
- if (RuleContext::is(child)) {
- if (auto *typedChild = dynamic_cast<T*>(child); typedChild != nullptr) {
- if (j++ == i) {
- return typedChild;
- }
- }
- }
- }
- return nullptr;
- }
-
- template<typename T>
- std::vector<T*> getRuleContexts() const {
- static_assert(std::is_base_of_v<RuleContext, T>, "T must be derived from RuleContext");
- std::vector<T*> contexts;
- for (auto *child : children) {
- if (RuleContext::is(child)) {
- if (auto *typedChild = dynamic_cast<T*>(child); typedChild != nullptr) {
- contexts.push_back(typedChild);
- }
- }
- }
- return contexts;
- }
-
- virtual misc::Interval getSourceInterval() override;
-
- /**
- * Get the initial token in this context.
- * Note that the range from start to stop is inclusive, so for rules that do not consume anything
- * (for example, zero length or error productions) this token may exceed stop.
- */
- Token* getStart() const;
-
- /**
- * Get the final token in this context.
- * Note that the range from start to stop is inclusive, so for rules that do not consume anything
- * (for example, zero length or error productions) this token may precede start.
- */
- Token* getStop() const;
-
- /// <summary>
- /// Used for rule context info debugging during parse-time, not so much for ATN debugging </summary>
- virtual std::string toInfoString(Parser *recognizer);
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.cpp
deleted file mode 100644
index 34bfd73e26..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.cpp
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ProxyErrorListener.h"
-
-using namespace antlr4;
-
-void ProxyErrorListener::addErrorListener(ANTLRErrorListener *listener) {
- if (listener == nullptr) {
- throw "listener cannot be null.";
- }
-
- _delegates.insert(listener);
-}
-
-void ProxyErrorListener::removeErrorListener(ANTLRErrorListener *listener) {
- _delegates.erase(listener);
-}
-
-void ProxyErrorListener::removeErrorListeners() {
- _delegates.clear();
-}
-
-void ProxyErrorListener::syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line,
- size_t charPositionInLine, const std::string &msg, std::exception_ptr e) {
-
- for (auto *listener : _delegates) {
- listener->syntaxError(recognizer, offendingSymbol, line, charPositionInLine, msg, e);
- }
-}
-
-void ProxyErrorListener::reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- bool exact, const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) {
- for (auto *listener : _delegates) {
- listener->reportAmbiguity(recognizer, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
- }
-}
-
-void ProxyErrorListener::reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex,
- size_t stopIndex, const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) {
- for (auto *listener : _delegates) {
- listener->reportAttemptingFullContext(recognizer, dfa, startIndex, stopIndex, conflictingAlts, configs);
- }
-}
-
-void ProxyErrorListener::reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- size_t prediction, atn::ATNConfigSet *configs) {
- for (auto *listener : _delegates) {
- listener->reportContextSensitivity(recognizer, dfa, startIndex, stopIndex, prediction, configs);
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.h b/contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.h
deleted file mode 100644
index 04630ce12c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/ProxyErrorListener.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ANTLRErrorListener.h"
-#include "Exceptions.h"
-
-namespace antlr4 {
-
- /// This implementation of ANTLRErrorListener dispatches all calls to a
- /// collection of delegate listeners. This reduces the effort required to support multiple
- /// listeners.
- class ANTLR4CPP_PUBLIC ProxyErrorListener : public ANTLRErrorListener {
- private:
- std::set<ANTLRErrorListener *> _delegates; // Not owned.
-
- public:
- void addErrorListener(ANTLRErrorListener *listener);
- void removeErrorListener(ANTLRErrorListener *listener);
- void removeErrorListeners();
-
- void syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line, size_t charPositionInLine,
- const std::string &msg, std::exception_ptr e) override;
-
- virtual void reportAmbiguity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet &ambigAlts, atn::ATNConfigSet *configs) override;
-
- virtual void reportAttemptingFullContext(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- const antlrcpp::BitSet &conflictingAlts, atn::ATNConfigSet *configs) override;
-
- virtual void reportContextSensitivity(Parser *recognizer, const dfa::DFA &dfa, size_t startIndex, size_t stopIndex,
- size_t prediction, atn::ATNConfigSet *configs) override;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RecognitionException.cpp b/contrib/libs/antlr4_cpp_runtime/src/RecognitionException.cpp
deleted file mode 100644
index 5b37f9d2f0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RecognitionException.cpp
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATN.h"
-#include "Recognizer.h"
-#include "ParserRuleContext.h"
-#include "misc/IntervalSet.h"
-
-#include "RecognitionException.h"
-
-using namespace antlr4;
-
-RecognitionException::RecognitionException(Recognizer *recognizer, IntStream *input, ParserRuleContext *ctx,
- Token *offendingToken)
- : RecognitionException("", recognizer, input, ctx, offendingToken) {
-}
-
-RecognitionException::RecognitionException(const std::string &message, Recognizer *recognizer, IntStream *input,
- ParserRuleContext *ctx, Token *offendingToken)
- : RuntimeException(message), _recognizer(recognizer), _input(input), _ctx(ctx), _offendingToken(offendingToken) {
- InitializeInstanceFields();
- if (recognizer != nullptr) {
- _offendingState = recognizer->getState();
- }
-}
-
-RecognitionException::~RecognitionException() {
-}
-
-size_t RecognitionException::getOffendingState() const {
- return _offendingState;
-}
-
-void RecognitionException::setOffendingState(size_t offendingState) {
- _offendingState = offendingState;
-}
-
-misc::IntervalSet RecognitionException::getExpectedTokens() const {
- if (_recognizer) {
- return _recognizer->getATN().getExpectedTokens(_offendingState, _ctx);
- }
- return misc::IntervalSet::EMPTY_SET;
-}
-
-RuleContext* RecognitionException::getCtx() const {
- return _ctx;
-}
-
-IntStream* RecognitionException::getInputStream() const {
- return _input;
-}
-
-Token* RecognitionException::getOffendingToken() const {
- return _offendingToken;
-}
-
-Recognizer* RecognitionException::getRecognizer() const {
- return _recognizer;
-}
-
-void RecognitionException::InitializeInstanceFields() {
- _offendingState = INVALID_INDEX;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RecognitionException.h b/contrib/libs/antlr4_cpp_runtime/src/RecognitionException.h
deleted file mode 100644
index 9397ab20c8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RecognitionException.h
+++ /dev/null
@@ -1,98 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Exceptions.h"
-
-namespace antlr4 {
-
- /// The root of the ANTLR exception hierarchy. In general, ANTLR tracks just
- /// 3 kinds of errors: prediction errors, failed predicate errors, and
- /// mismatched input errors. In each case, the parser knows where it is
- /// in the input, where it is in the ATN, the rule invocation stack,
- /// and what kind of problem occurred.
- class ANTLR4CPP_PUBLIC RecognitionException : public RuntimeException {
- private:
- /// The Recognizer where this exception originated.
- Recognizer *_recognizer;
- IntStream *_input;
- ParserRuleContext *_ctx;
-
- /// The current Token when an error occurred. Since not all streams
- /// support accessing symbols by index, we have to track the Token
- /// instance itself.
- Token *_offendingToken;
-
- size_t _offendingState;
-
- public:
- RecognitionException(Recognizer *recognizer, IntStream *input, ParserRuleContext *ctx,
- Token *offendingToken = nullptr);
- RecognitionException(const std::string &message, Recognizer *recognizer, IntStream *input,
- ParserRuleContext *ctx, Token *offendingToken = nullptr);
- RecognitionException(RecognitionException const&) = default;
- ~RecognitionException();
- RecognitionException& operator=(RecognitionException const&) = default;
-
- /// Get the ATN state number the parser was in at the time the error
- /// occurred. For NoViableAltException and
- /// LexerNoViableAltException exceptions, this is the
- /// DecisionState number. For others, it is the state whose outgoing
- /// edge we couldn't match.
- ///
- /// If the state number is not known, this method returns -1.
- virtual size_t getOffendingState() const;
-
- protected:
- void setOffendingState(size_t offendingState);
-
- /// Gets the set of input symbols which could potentially follow the
- /// previously matched symbol at the time this exception was thrown.
- ///
- /// If the set of expected tokens is not known and could not be computed,
- /// this method returns an empty set.
- ///
- /// @returns The set of token types that could potentially follow the current
- /// state in the ATN, or an empty set if the information is not available.
- public:
- virtual misc::IntervalSet getExpectedTokens() const;
-
- /// <summary>
- /// Gets the <seealso cref="RuleContext"/> at the time this exception was thrown.
- /// <p/>
- /// If the context is not available, this method returns {@code null}.
- /// </summary>
- /// <returns> The <seealso cref="RuleContext"/> at the time this exception was thrown.
- /// If the context is not available, this method returns {@code null}. </returns>
- virtual RuleContext* getCtx() const;
-
- /// <summary>
- /// Gets the input stream which is the symbol source for the recognizer where
- /// this exception was thrown.
- /// <p/>
- /// If the input stream is not available, this method returns {@code null}.
- /// </summary>
- /// <returns> The input stream which is the symbol source for the recognizer
- /// where this exception was thrown, or {@code null} if the stream is not
- /// available. </returns>
- virtual IntStream* getInputStream() const;
-
- virtual Token* getOffendingToken() const;
-
- /// <summary>
- /// Gets the <seealso cref="Recognizer"/> where this exception occurred.
- /// <p/>
- /// If the recognizer is not available, this method returns {@code null}.
- /// </summary>
- /// <returns> The recognizer where this exception occurred, or {@code null} if
- /// the recognizer is not available. </returns>
- virtual Recognizer* getRecognizer() const;
-
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Recognizer.cpp b/contrib/libs/antlr4_cpp_runtime/src/Recognizer.cpp
deleted file mode 100644
index c8a183324c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Recognizer.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ConsoleErrorListener.h"
-#include "RecognitionException.h"
-#include "support/CPPUtils.h"
-#include "Token.h"
-#include "atn/ATN.h"
-#include "atn/ATNSimulator.h"
-#include "support/CPPUtils.h"
-#include "support/StringUtils.h"
-
-#include "Vocabulary.h"
-
-#include "Recognizer.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::internal;
-
-std::map<const dfa::Vocabulary*, std::map<std::string_view, size_t>> Recognizer::_tokenTypeMapCache;
-std::map<std::vector<std::string>, std::map<std::string, size_t>> Recognizer::_ruleIndexMapCache;
-
-Recognizer::Recognizer() {
- InitializeInstanceFields();
- _proxListener.addErrorListener(&ConsoleErrorListener::INSTANCE);
-}
-
-Recognizer::~Recognizer() {
-}
-
-std::map<std::string_view, size_t> Recognizer::getTokenTypeMap() {
- const dfa::Vocabulary& vocabulary = getVocabulary();
-
- UniqueLock<Mutex> lck(_mutex);
- std::map<std::string_view, size_t> result;
- auto iterator = _tokenTypeMapCache.find(&vocabulary);
- if (iterator != _tokenTypeMapCache.end()) {
- result = iterator->second;
- } else {
- for (size_t i = 0; i <= getATN().maxTokenType; ++i) {
- std::string_view literalName = vocabulary.getLiteralName(i);
- if (!literalName.empty()) {
- result[literalName] = i;
- }
-
- std::string_view symbolicName = vocabulary.getSymbolicName(i);
- if (!symbolicName.empty()) {
- result[symbolicName] = i;
- }
- }
- result["EOF"] = EOF;
- _tokenTypeMapCache[&vocabulary] = result;
- }
-
- return result;
-}
-
-std::map<std::string, size_t> Recognizer::getRuleIndexMap() {
- const std::vector<std::string>& ruleNames = getRuleNames();
- if (ruleNames.empty()) {
- throw "The current recognizer does not provide a list of rule names.";
- }
-
- UniqueLock<Mutex> lck(_mutex);
- std::map<std::string, size_t> result;
- auto iterator = _ruleIndexMapCache.find(ruleNames);
- if (iterator != _ruleIndexMapCache.end()) {
- result = iterator->second;
- } else {
- result = antlrcpp::toMap(ruleNames);
- _ruleIndexMapCache[ruleNames] = result;
- }
- return result;
-}
-
-size_t Recognizer::getTokenType(std::string_view tokenName) {
- const std::map<std::string_view, size_t> &map = getTokenTypeMap();
- auto iterator = map.find(tokenName);
- if (iterator == map.end())
- return Token::INVALID_TYPE;
-
- return iterator->second;
-}
-
-void Recognizer::setInterpreter(atn::ATNSimulator *interpreter) {
- // Usually the interpreter is set by the descendant (lexer or parser (simulator), but can also be exchanged
- // by the profiling ATN simulator.
- delete _interpreter;
- _interpreter = interpreter;
-}
-
-std::string Recognizer::getErrorHeader(RecognitionException *e) {
- // We're having issues with cross header dependencies, these two classes will need to be
- // rewritten to remove that.
- size_t line = e->getOffendingToken()->getLine();
- size_t charPositionInLine = e->getOffendingToken()->getCharPositionInLine();
- return std::string("line ") + std::to_string(line) + ":" + std::to_string(charPositionInLine);
-
-}
-
-std::string Recognizer::getTokenErrorDisplay(Token *t) {
- if (t == nullptr) {
- return "<no Token>";
- }
- std::string s = t->getText();
- if (s == "") {
- if (t->getType() == EOF) {
- s = "<EOF>";
- } else {
- s = std::string("<") + std::to_string(t->getType()) + std::string(">");
- }
- }
-
- std::string result;
- result.reserve(s.size() + 2);
- result.push_back('\'');
- antlrcpp::escapeWhitespace(result, s);
- result.push_back('\'');
- result.shrink_to_fit();
- return result;
-}
-
-void Recognizer::addErrorListener(ANTLRErrorListener *listener) {
- _proxListener.addErrorListener(listener);
-}
-
-void Recognizer::removeErrorListener(ANTLRErrorListener *listener) {
- _proxListener.removeErrorListener(listener);
-}
-
-void Recognizer::removeErrorListeners() {
- _proxListener.removeErrorListeners();
-}
-
-ProxyErrorListener& Recognizer::getErrorListenerDispatch() {
- return _proxListener;
-}
-
-bool Recognizer::sempred(RuleContext * /*localctx*/, size_t /*ruleIndex*/, size_t /*actionIndex*/) {
- return true;
-}
-
-bool Recognizer::precpred(RuleContext * /*localctx*/, int /*precedence*/) {
- return true;
-}
-
-void Recognizer::action(RuleContext * /*localctx*/, size_t /*ruleIndex*/, size_t /*actionIndex*/) {
-}
-
-void Recognizer::InitializeInstanceFields() {
- _stateNumber = ATNState::INVALID_STATE_NUMBER;
- _interpreter = nullptr;
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Recognizer.h b/contrib/libs/antlr4_cpp_runtime/src/Recognizer.h
deleted file mode 100644
index 0226a612e1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Recognizer.h
+++ /dev/null
@@ -1,160 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ProxyErrorListener.h"
-#include "support/Casts.h"
-#include "atn/SerializedATNView.h"
-#include "internal/Synchronization.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC Recognizer {
- public:
- static constexpr size_t EOF = std::numeric_limits<size_t>::max();
-
- Recognizer();
- Recognizer(Recognizer const&) = delete;
- virtual ~Recognizer();
-
- Recognizer& operator=(Recognizer const&) = delete;
-
- virtual std::vector<std::string> const& getRuleNames() const = 0;
-
- /**
- * Get the vocabulary used by the recognizer.
- *
- * @return A {@link Vocabulary} instance providing information about the
- * vocabulary used by the grammar.
- */
- virtual dfa::Vocabulary const& getVocabulary() const = 0;
-
- /// <summary>
- /// Get a map from token names to token types.
- /// <p/>
- /// Used for XPath and tree pattern compilation.
- /// </summary>
- virtual std::map<std::string_view, size_t> getTokenTypeMap();
-
- /// <summary>
- /// Get a map from rule names to rule indexes.
- /// <p/>
- /// Used for XPath and tree pattern compilation.
- /// </summary>
- virtual std::map<std::string, size_t> getRuleIndexMap();
-
- virtual size_t getTokenType(std::string_view tokenName);
-
- /// <summary>
- /// If this recognizer was generated, it will have a serialized ATN
- /// representation of the grammar.
- /// <p/>
- /// For interpreters, we don't know their serialized ATN despite having
- /// created the interpreter from it.
- /// </summary>
- virtual atn::SerializedATNView getSerializedATN() const {
- throw "there is no serialized ATN";
- }
-
- /// <summary>
- /// For debugging and other purposes, might want the grammar name.
- /// Have ANTLR generate an implementation for this method.
- /// </summary>
- virtual std::string getGrammarFileName() const = 0;
-
- /// Get the ATN interpreter (in fact one of it's descendants) used by the recognizer for prediction.
- /// @returns The ATN interpreter used by the recognizer for prediction.
- template <class T>
- T* getInterpreter() const {
- return antlrcpp::downCast<T *>(_interpreter);
- }
-
- /**
- * Set the ATN interpreter used by the recognizer for prediction.
- *
- * @param interpreter The ATN interpreter used by the recognizer for
- * prediction.
- */
- void setInterpreter(atn::ATNSimulator *interpreter);
-
- /// What is the error header, normally line/character position information?
- virtual std::string getErrorHeader(RecognitionException *e);
-
- /** How should a token be displayed in an error message? The default
- * is to display just the text, but during development you might
- * want to have a lot of information spit out. Override in that case
- * to use t.toString() (which, for CommonToken, dumps everything about
- * the token). This is better than forcing you to override a method in
- * your token objects because you don't have to go modify your lexer
- * so that it creates a new Java type.
- *
- * @deprecated This method is not called by the ANTLR 4 Runtime. Specific
- * implementations of {@link ANTLRErrorStrategy} may provide a similar
- * feature when necessary. For example, see
- * {@link DefaultErrorStrategy#getTokenErrorDisplay}.
- */
- virtual std::string getTokenErrorDisplay(Token *t);
-
- /// <exception cref="NullPointerException"> if {@code listener} is {@code null}. </exception>
- virtual void addErrorListener(ANTLRErrorListener *listener);
-
- virtual void removeErrorListener(ANTLRErrorListener *listener);
-
- virtual void removeErrorListeners();
-
- virtual ProxyErrorListener& getErrorListenerDispatch();
-
- // subclass needs to override these if there are sempreds or actions
- // that the ATN interp needs to execute
- virtual bool sempred(RuleContext *localctx, size_t ruleIndex, size_t actionIndex);
-
- virtual bool precpred(RuleContext *localctx, int precedence);
-
- virtual void action(RuleContext *localctx, size_t ruleIndex, size_t actionIndex);
-
- size_t getState() const { return _stateNumber; }
-
- // Get the ATN used by the recognizer for prediction.
- virtual const atn::ATN& getATN() const = 0;
-
- /// <summary>
- /// Indicate that the recognizer has changed internal state that is
- /// consistent with the ATN state passed in. This way we always know
- /// where we are in the ATN as the parser goes along. The rule
- /// context objects form a stack that lets us see the stack of
- /// invoking rules. Combine this and we have complete ATN
- /// configuration information.
- /// </summary>
- void setState(size_t atnState) { _stateNumber = atnState; }
-
- virtual IntStream* getInputStream() = 0;
-
- virtual void setInputStream(IntStream *input) = 0;
-
- virtual TokenFactory<CommonToken>* getTokenFactory() = 0;
-
- template<typename T1>
- void setTokenFactory(TokenFactory<T1> *input);
-
- protected:
- atn::ATNSimulator *_interpreter; // Set and deleted in descendants (or the profiler).
-
- // Mutex to manage synchronized access for multithreading.
- internal::Mutex _mutex;
-
- private:
- static std::map<const dfa::Vocabulary*, std::map<std::string_view, size_t>> _tokenTypeMapCache;
- static std::map<std::vector<std::string>, std::map<std::string, size_t>> _ruleIndexMapCache;
-
- ProxyErrorListener _proxListener; // Manages a collection of listeners.
-
- size_t _stateNumber;
-
- void InitializeInstanceFields();
-
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RuleContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/RuleContext.cpp
deleted file mode 100644
index 6d67f9a29a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RuleContext.cpp
+++ /dev/null
@@ -1,144 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/Trees.h"
-#include "misc/Interval.h"
-#include "Parser.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "tree/ParseTreeVisitor.h"
-
-#include "RuleContext.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::tree;
-
-RuleContext::RuleContext() : ParseTree(ParseTreeType::RULE) {
- InitializeInstanceFields();
-}
-
-RuleContext::RuleContext(RuleContext *parent_, size_t invokingState_) : ParseTree(ParseTreeType::RULE) {
- InitializeInstanceFields();
- this->parent = parent_;
- this->invokingState = invokingState_;
-}
-
-int RuleContext::depth() {
- int n = 1;
- RuleContext *p = this;
- while (true) {
- if (p->parent == nullptr)
- break;
- p = static_cast<RuleContext *>(p->parent);
- n++;
- }
- return n;
-}
-
-bool RuleContext::isEmpty() {
- return invokingState == ATNState::INVALID_STATE_NUMBER;
-}
-
-misc::Interval RuleContext::getSourceInterval() {
- return misc::Interval::INVALID;
-}
-
-std::string RuleContext::getText() {
- if (children.empty()) {
- return "";
- }
-
- std::stringstream ss;
- for (size_t i = 0; i < children.size(); i++) {
- ParseTree *tree = children[i];
- if (tree != nullptr)
- ss << tree->getText();
- }
-
- return ss.str();
-}
-
-size_t RuleContext::getRuleIndex() const {
- return INVALID_INDEX;
-}
-
-size_t RuleContext::getAltNumber() const {
- return atn::ATN::INVALID_ALT_NUMBER;
-}
-
-void RuleContext::setAltNumber(size_t /*altNumber*/) {
-}
-
-std::any RuleContext::accept(tree::ParseTreeVisitor *visitor) {
- return visitor->visitChildren(this);
-}
-
-std::string RuleContext::toStringTree(Parser *recog, bool pretty) {
- return tree::Trees::toStringTree(this, recog, pretty);
-}
-
-std::string RuleContext::toStringTree(std::vector<std::string> &ruleNames, bool pretty) {
- return tree::Trees::toStringTree(this, ruleNames, pretty);
-}
-
-std::string RuleContext::toStringTree(bool pretty) {
- return toStringTree(nullptr, pretty);
-}
-
-
-std::string RuleContext::toString(const std::vector<std::string> &ruleNames) {
- return toString(ruleNames, nullptr);
-}
-
-
-std::string RuleContext::toString(const std::vector<std::string> &ruleNames, RuleContext *stop) {
- std::stringstream ss;
-
- RuleContext *currentParent = this;
- ss << "[";
- while (currentParent != stop) {
- if (ruleNames.empty()) {
- if (!currentParent->isEmpty()) {
- ss << currentParent->invokingState;
- }
- } else {
- size_t ruleIndex = currentParent->getRuleIndex();
-
- std::string ruleName = (ruleIndex < ruleNames.size()) ? ruleNames[ruleIndex] : std::to_string(ruleIndex);
- ss << ruleName;
- }
-
- if (currentParent->parent == nullptr) // No parent anymore.
- break;
- currentParent = static_cast<RuleContext *>(currentParent->parent);
- if (!ruleNames.empty() || !currentParent->isEmpty()) {
- ss << " ";
- }
- }
-
- ss << "]";
-
- return ss.str();
-}
-
-std::string RuleContext::toString() {
- return toString(nullptr);
-}
-
-std::string RuleContext::toString(Recognizer *recog) {
- return toString(recog, &ParserRuleContext::EMPTY);
-}
-
-std::string RuleContext::toString(Recognizer *recog, RuleContext *stop) {
- if (recog == nullptr)
- return toString(std::vector<std::string>(), stop); // Don't use an initializer {} here or we end up calling ourselve recursivly.
- return toString(recog->getRuleNames(), stop);
-}
-
-void RuleContext::InitializeInstanceFields() {
- invokingState = INVALID_INDEX;
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RuleContext.h b/contrib/libs/antlr4_cpp_runtime/src/RuleContext.h
deleted file mode 100644
index a0effa2a02..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RuleContext.h
+++ /dev/null
@@ -1,141 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ParseTree.h"
-
-namespace antlr4 {
-
- /** A rule context is a record of a single rule invocation.
- *
- * We form a stack of these context objects using the parent
- * pointer. A parent pointer of null indicates that the current
- * context is the bottom of the stack. The ParserRuleContext subclass
- * as a children list so that we can turn this data structure into a
- * tree.
- *
- * The root node always has a null pointer and invokingState of -1.
- *
- * Upon entry to parsing, the first invoked rule function creates a
- * context object (asubclass specialized for that rule such as
- * SContext) and makes it the root of a parse tree, recorded by field
- * Parser._ctx.
- *
- * public final SContext s() throws RecognitionException {
- * SContext _localctx = new SContext(_ctx, getState()); <-- create new node
- * enterRule(_localctx, 0, RULE_s); <-- push it
- * ...
- * exitRule(); <-- pop back to _localctx
- * return _localctx;
- * }
- *
- * A subsequent rule invocation of r from the start rule s pushes a
- * new context object for r whose parent points at s and use invoking
- * state is the state with r emanating as edge label.
- *
- * The invokingState fields from a context object to the root
- * together form a stack of rule indication states where the root
- * (bottom of the stack) has a -1 sentinel value. If we invoke start
- * symbol s then call r1, which calls r2, the would look like
- * this:
- *
- * SContext[-1] <- root node (bottom of the stack)
- * R1Context[p] <- p in rule s called r1
- * R2Context[q] <- q in rule r1 called r2
- *
- * So the top of the stack, _ctx, represents a call to the current
- * rule and it holds the return address from another rule that invoke
- * to this rule. To invoke a rule, we must always have a current context.
- *
- * The parent contexts are useful for computing lookahead sets and
- * getting error information.
- *
- * These objects are used during parsing and prediction.
- * For the special case of parsers, we use the subclass
- * ParserRuleContext.
- *
- * @see ParserRuleContext
- */
- class ANTLR4CPP_PUBLIC RuleContext : public tree::ParseTree {
- public:
- static bool is(const tree::ParseTree &parseTree) { return parseTree.getTreeType() == tree::ParseTreeType::RULE; }
-
- static bool is(const tree::ParseTree *parseTree) { return parseTree != nullptr && is(*parseTree); }
-
- /// What state invoked the rule associated with this context?
- /// The "return address" is the followState of invokingState
- /// If parent is null, this should be -1 and this context object represents the start rule.
- size_t invokingState;
-
- RuleContext();
- RuleContext(RuleContext *parent, size_t invokingState);
-
- virtual int depth();
-
- /// A context is empty if there is no invoking state; meaning nobody called current context.
- virtual bool isEmpty();
-
- // satisfy the ParseTree / SyntaxTree interface
-
- virtual misc::Interval getSourceInterval() override;
-
- virtual std::string getText() override;
-
- virtual size_t getRuleIndex() const;
-
- /** For rule associated with this parse tree internal node, return
- * the outer alternative number used to match the input. Default
- * implementation does not compute nor store this alt num. Create
- * a subclass of ParserRuleContext with backing field and set
- * option contextSuperClass.
- * to set it.
- *
- * @since 4.5.3
- */
- virtual size_t getAltNumber() const;
-
- /** Set the outer alternative number for this context node. Default
- * implementation does nothing to avoid backing field overhead for
- * trees that don't need it. Create
- * a subclass of ParserRuleContext with backing field and set
- * option contextSuperClass.
- *
- * @since 4.5.3
- */
- virtual void setAltNumber(size_t altNumber);
-
- virtual std::any accept(tree::ParseTreeVisitor *visitor) override;
-
- /// <summary>
- /// Print out a whole tree, not just a node, in LISP format
- /// (root child1 .. childN). Print just a node if this is a leaf.
- /// We have to know the recognizer so we can get rule names.
- /// </summary>
- virtual std::string toStringTree(Parser *recog, bool pretty = false) override;
-
- /// <summary>
- /// Print out a whole tree, not just a node, in LISP format
- /// (root child1 .. childN). Print just a node if this is a leaf.
- /// </summary>
- virtual std::string toStringTree(std::vector<std::string> &ruleNames, bool pretty = false);
-
- virtual std::string toStringTree(bool pretty = false) override;
- virtual std::string toString() override;
- std::string toString(Recognizer *recog);
- std::string toString(const std::vector<std::string> &ruleNames);
-
- // recog null unless ParserRuleContext, in which case we use subclass toString(...)
- std::string toString(Recognizer *recog, RuleContext *stop);
-
- virtual std::string toString(const std::vector<std::string> &ruleNames, RuleContext *stop);
-
- bool operator == (const RuleContext &other) { return this == &other; } // Simple address comparison.
-
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.cpp b/contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.cpp
deleted file mode 100644
index 250859fdc0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATN.h"
-
-#include "RuleContextWithAltNum.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-RuleContextWithAltNum::RuleContextWithAltNum() : ParserRuleContext() {
- altNum = ATN::INVALID_ALT_NUMBER;
-}
-
-RuleContextWithAltNum::RuleContextWithAltNum(ParserRuleContext *parent, int invokingStateNumber)
- : ParserRuleContext(parent, invokingStateNumber) {
-}
-
-size_t RuleContextWithAltNum::getAltNumber() const {
- return altNum;
-}
-
-void RuleContextWithAltNum::setAltNumber(size_t number) {
- altNum = number;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.h b/contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.h
deleted file mode 100644
index 995d9aa7b1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RuleContextWithAltNum.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "ParserRuleContext.h"
-
-namespace antlr4 {
-
- /// A handy class for use with
- ///
- /// options {contextSuperClass=org.antlr.v4.runtime.RuleContextWithAltNum;}
- ///
- /// that provides a backing field / impl for the outer alternative number
- /// matched for an internal parse tree node.
- ///
- /// I'm only putting into Java runtime as I'm certain I'm the only one that
- /// will really every use this.
- class ANTLR4CPP_PUBLIC RuleContextWithAltNum : public ParserRuleContext {
- public:
- size_t altNum = 0;
-
- RuleContextWithAltNum();
- RuleContextWithAltNum(ParserRuleContext *parent, int invokingStateNumber);
-
- virtual size_t getAltNumber() const override;
- virtual void setAltNumber(size_t altNum) override;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.cpp b/contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.cpp
deleted file mode 100644
index cf30d68587..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "RuntimeMetaData.h"
-#include "Version.h"
-
-using namespace antlr4;
-
-const std::string RuntimeMetaData::VERSION = ANTLRCPP_VERSION_STRING;
-
-std::string RuntimeMetaData::getRuntimeVersion() {
- return VERSION;
-}
-
-void RuntimeMetaData::checkVersion(const std::string &generatingToolVersion, const std::string &compileTimeVersion) {
- std::string runtimeVersion = VERSION;
- bool runtimeConflictsWithGeneratingTool = false;
- bool runtimeConflictsWithCompileTimeTool = false;
-
- if (generatingToolVersion != "") {
- runtimeConflictsWithGeneratingTool = runtimeVersion != generatingToolVersion
- && getMajorMinorVersion(runtimeVersion) != getMajorMinorVersion(generatingToolVersion);
- }
-
- runtimeConflictsWithCompileTimeTool = runtimeVersion != compileTimeVersion
- && getMajorMinorVersion(runtimeVersion) != getMajorMinorVersion(compileTimeVersion);
-
- if (runtimeConflictsWithGeneratingTool) {
- std::cerr << "ANTLR Tool version " << generatingToolVersion << " used for code generation does not match "
- "the current runtime version " << runtimeVersion << std::endl;
- }
- if (runtimeConflictsWithCompileTimeTool) {
- std::cerr << "ANTLR Runtime version " << compileTimeVersion << " used for parser compilation does not match "
- "the current runtime version " << runtimeVersion << std::endl;
- }
-}
-
-std::string RuntimeMetaData::getMajorMinorVersion(const std::string &version) {
- size_t firstDot = version.find('.');
- size_t secondDot = firstDot != std::string::npos ? version.find('.', firstDot + 1) : std::string::npos;
- size_t firstDash = version.find('-');
- size_t referenceLength = version.size();
- if (secondDot != std::string::npos) {
- referenceLength = std::min(referenceLength, secondDot);
- }
-
- if (firstDash != std::string::npos) {
- referenceLength = std::min(referenceLength, firstDash);
- }
-
- return version.substr(0, referenceLength);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.h b/contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.h
deleted file mode 100644
index f178cfe9e8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/RuntimeMetaData.h
+++ /dev/null
@@ -1,155 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// This class provides access to the current version of the ANTLR 4 runtime
- /// library as compile-time and runtime constants, along with methods for
- /// checking for matching version numbers and notifying listeners in the case
- /// where a version mismatch is detected.
- ///
- /// <para>
- /// The runtime version information is provided by <seealso cref="#VERSION"/> and
- /// <seealso cref="#getRuntimeVersion()"/>. Detailed information about these values is
- /// provided in the documentation for each member.</para>
- ///
- /// <para>
- /// The runtime version check is implemented by <seealso cref="#checkVersion"/>. Detailed
- /// information about incorporating this call into user code, as well as its use
- /// in generated code, is provided in the documentation for the method.</para>
- ///
- /// <para>
- /// Version strings x.y and x.y.z are considered "compatible" and no error
- /// would be generated. Likewise, version strings x.y-SNAPSHOT and x.y.z are
- /// considered "compatible" because the major and minor components x.y
- /// are the same in each.</para>
- ///
- /// <para>
- /// To trap any error messages issued by this code, use System.setErr()
- /// in your main() startup code.
- /// </para>
- ///
- /// @since 4.3
- /// </summary>
- class ANTLR4CPP_PUBLIC RuntimeMetaData {
- public:
- /// A compile-time constant containing the current version of the ANTLR 4
- /// runtime library.
- ///
- /// <para>
- /// This compile-time constant value allows generated parsers and other
- /// libraries to include a literal reference to the version of the ANTLR 4
- /// runtime library the code was compiled against. At each release, we
- /// change this value.</para>
- ///
- /// <para>Version numbers are assumed to have the form
- ///
- /// <em>major</em>.<em>minor</em>.<em>patch</em>.<em>revision</em>-<em>suffix</em>,
- ///
- /// with the individual components defined as follows.</para>
- ///
- /// <ul>
- /// <li><em>major</em> is a required non-negative integer, and is equal to
- /// {@code 4} for ANTLR 4.</li>
- /// <li><em>minor</em> is a required non-negative integer.</li>
- /// <li><em>patch</em> is an optional non-negative integer. When
- /// <em>patch</em> is omitted, the {@code .} (dot) appearing before it is
- /// also omitted.</li>
- /// <li><em>revision</em> is an optional non-negative integer, and may only
- /// be included when <em>patch</em> is also included. When <em>revision</em>
- /// is omitted, the {@code .} (dot) appearing before it is also omitted.</li>
- /// <li><em>suffix</em> is an optional string. When <em>suffix</em> is
- /// omitted, the {@code -} (hyphen-minus) appearing before it is also
- /// omitted.</li>
- /// </ul>
- static const std::string VERSION;
-
- /// <summary>
- /// Gets the currently executing version of the ANTLR 4 runtime library.
- ///
- /// <para>
- /// This method provides runtime access to the <seealso cref="#VERSION"/> field, as
- /// opposed to directly referencing the field as a compile-time constant.</para>
- /// </summary>
- /// <returns> The currently executing version of the ANTLR 4 library </returns>
-
- static std::string getRuntimeVersion();
-
- /// <summary>
- /// This method provides the ability to detect mismatches between the version
- /// of ANTLR 4 used to generate a parser, the version of the ANTLR runtime a
- /// parser was compiled against, and the version of the ANTLR runtime which
- /// is currently executing.
- ///
- /// <para>
- /// The version check is designed to detect the following two specific
- /// scenarios.</para>
- ///
- /// <ul>
- /// <li>The ANTLR Tool version used for code generation does not match the
- /// currently executing runtime version.</li>
- /// <li>The ANTLR Runtime version referenced at the time a parser was
- /// compiled does not match the currently executing runtime version.</li>
- /// </ul>
- ///
- /// <para>
- /// Starting with ANTLR 4.3, the code generator emits a call to this method
- /// using two constants in each generated lexer and parser: a hard-coded
- /// constant indicating the version of the tool used to generate the parser
- /// and a reference to the compile-time constant <seealso cref="#VERSION"/>. At
- /// runtime, this method is called during the initialization of the generated
- /// parser to detect mismatched versions, and notify the registered listeners
- /// prior to creating instances of the parser.</para>
- ///
- /// <para>
- /// This method does not perform any detection or filtering of semantic
- /// changes between tool and runtime versions. It simply checks for a
- /// version match and emits an error to stderr if a difference
- /// is detected.</para>
- ///
- /// <para>
- /// Note that some breaking changes between releases could result in other
- /// types of runtime exceptions, such as a <seealso cref="LinkageError"/>, prior to
- /// calling this method. In these cases, the underlying version mismatch will
- /// not be reported here. This method is primarily intended to
- /// notify users of potential semantic changes between releases that do not
- /// result in binary compatibility problems which would be detected by the
- /// class loader. As with semantic changes, changes that break binary
- /// compatibility between releases are mentioned in the release notes
- /// accompanying the affected release.</para>
- ///
- /// <para>
- /// <strong>Additional note for target developers:</strong> The version check
- /// implemented by this class is designed to address specific compatibility
- /// concerns that may arise during the execution of Java applications. Other
- /// targets should consider the implementation of this method in the context
- /// of that target's known execution environment, which may or may not
- /// resemble the design provided for the Java target.</para>
- /// </summary>
- /// <param name="generatingToolVersion"> The version of the tool used to generate a parser.
- /// This value may be null when called from user code that was not generated
- /// by, and does not reference, the ANTLR 4 Tool itself. </param>
- /// <param name="compileTimeVersion"> The version of the runtime the parser was
- /// compiled against. This should always be passed using a direct reference
- /// to <seealso cref="#VERSION"/>. </param>
- static void checkVersion(const std::string &generatingToolVersion, const std::string &compileTimeVersion);
-
- /// <summary>
- /// Gets the major and minor version numbers from a version string. For
- /// details about the syntax of the input {@code version}.
- /// E.g., from x.y.z return x.y.
- /// </summary>
- /// <param name="version"> The complete version string. </param>
- /// <returns> A string of the form <em>major</em>.<em>minor</em> containing
- /// only the major and minor components of the version string. </returns>
- static std::string getMajorMinorVersion(const std::string &version);
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Token.cpp b/contrib/libs/antlr4_cpp_runtime/src/Token.cpp
deleted file mode 100644
index 31266b42d1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Token.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-
-antlr4::Token::~Token() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Token.h b/contrib/libs/antlr4_cpp_runtime/src/Token.h
deleted file mode 100644
index 832db740b3..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Token.h
+++ /dev/null
@@ -1,92 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "IntStream.h"
-
-namespace antlr4 {
-
- /// A token has properties: text, type, line, character position in the line
- /// (so we can ignore tabs), token channel, index, and source from which
- /// we obtained this token.
- class ANTLR4CPP_PUBLIC Token {
- public:
- static constexpr size_t INVALID_TYPE = 0;
-
- /// During lookahead operations, this "token" signifies we hit rule end ATN state
- /// and did not follow it despite needing to.
- static constexpr size_t EPSILON = std::numeric_limits<size_t>::max() - 1;
- static constexpr size_t MIN_USER_TOKEN_TYPE = 1;
- static constexpr size_t EOF = IntStream::EOF;
-
- virtual ~Token();
-
- /// All tokens go to the parser (unless skip() is called in that rule)
- /// on a particular "channel". The parser tunes to a particular channel
- /// so that whitespace etc... can go to the parser on a "hidden" channel.
- static constexpr size_t DEFAULT_CHANNEL = 0;
-
- /// Anything on different channel than DEFAULT_CHANNEL is not parsed
- /// by parser.
- static constexpr size_t HIDDEN_CHANNEL = 1;
-
- /**
- * This is the minimum constant value which can be assigned to a
- * user-defined token channel.
- *
- * <p>
- * The non-negative numbers less than {@link #MIN_USER_CHANNEL_VALUE} are
- * assigned to the predefined channels {@link #DEFAULT_CHANNEL} and
- * {@link #HIDDEN_CHANNEL}.</p>
- *
- * @see Token#getChannel()
- */
- static constexpr size_t MIN_USER_CHANNEL_VALUE = 2;
-
- /// Get the text of the token.
- virtual std::string getText() const = 0;
-
- /// Get the token type of the token
- virtual size_t getType() const = 0;
-
- /// The line number on which the 1st character of this token was matched, line=1..n
- virtual size_t getLine() const = 0;
-
- /// The index of the first character of this token relative to the
- /// beginning of the line at which it occurs, 0..n-1
- virtual size_t getCharPositionInLine() const = 0;
-
- /// Return the channel this token. Each token can arrive at the parser
- /// on a different channel, but the parser only "tunes" to a single channel.
- /// The parser ignores everything not on DEFAULT_CHANNEL.
- virtual size_t getChannel() const = 0;
-
- /// An index from 0..n-1 of the token object in the input stream.
- /// This must be valid in order to print token streams and
- /// use TokenRewriteStream.
- ///
- /// Return INVALID_INDEX to indicate that this token was conjured up since
- /// it doesn't have a valid index.
- virtual size_t getTokenIndex() const = 0;
-
- /// The starting character index of the token
- /// This method is optional; return INVALID_INDEX if not implemented.
- virtual size_t getStartIndex() const = 0;
-
- /// The last character index of the token.
- /// This method is optional; return INVALID_INDEX if not implemented.
- virtual size_t getStopIndex() const = 0;
-
- /// Gets the <seealso cref="TokenSource"/> which created this token.
- virtual TokenSource *getTokenSource() const = 0;
-
- /// Gets the <seealso cref="CharStream"/> from which this token was derived.
- virtual CharStream *getInputStream() const = 0;
-
- virtual std::string toString() const = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenFactory.h b/contrib/libs/antlr4_cpp_runtime/src/TokenFactory.h
deleted file mode 100644
index 4eef044329..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenFactory.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
- /// The default mechanism for creating tokens. It's used by default in Lexer and
- /// the error handling strategy (to create missing tokens). Notifying the parser
- /// of a new factory means that it notifies it's token source and error strategy.
- template<typename Symbol>
- class ANTLR4CPP_PUBLIC TokenFactory {
- public:
- virtual ~TokenFactory() {}
-
- /// This is the method used to create tokens in the lexer and in the
- /// error handling strategy. If text!=null, than the start and stop positions
- /// are wiped to -1 in the text override is set in the CommonToken.
- virtual std::unique_ptr<Symbol> create(std::pair<TokenSource *, CharStream *> source, size_t type, const std::string &text,
- size_t channel, size_t start, size_t stop, size_t line, size_t charPositionInLine) = 0;
-
- /// Generically useful
- virtual std::unique_ptr<Symbol> create(size_t type, const std::string &text) = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenSource.cpp b/contrib/libs/antlr4_cpp_runtime/src/TokenSource.cpp
deleted file mode 100644
index 6b9d7af2f7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenSource.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "TokenSource.h"
-
-antlr4::TokenSource::~TokenSource() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenSource.h b/contrib/libs/antlr4_cpp_runtime/src/TokenSource.h
deleted file mode 100644
index f05c27efac..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenSource.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenFactory.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// A source of tokens must provide a sequence of tokens via <seealso cref="#nextToken()"/>
- /// and also must reveal it's source of characters; <seealso cref="CommonToken"/>'s text is
- /// computed from a <seealso cref="CharStream"/>; it only store indices into the char
- /// stream.
- /// <p/>
- /// Errors from the lexer are never passed to the parser. Either you want to keep
- /// going or you do not upon token recognition error. If you do not want to
- /// continue lexing then you do not want to continue parsing. Just throw an
- /// exception not under <seealso cref="RecognitionException"/> and Java will naturally toss
- /// you all the way out of the recognizers. If you want to continue lexing then
- /// you should not throw an exception to the parser--it has already requested a
- /// token. Keep lexing until you get a valid one. Just report errors and keep
- /// going, looking for a valid token.
- /// </summary>
- class ANTLR4CPP_PUBLIC TokenSource {
- public:
- virtual ~TokenSource();
-
- /// Return a <seealso cref="Token"/> object from your input stream (usually a
- /// <seealso cref="CharStream"/>). Do not fail/return upon lexing error; keep chewing
- /// on the characters until you get a good one; errors are not passed through
- /// to the parser.
- virtual std::unique_ptr<Token> nextToken() = 0;
-
- /// <summary>
- /// Get the line number for the current position in the input stream. The
- /// first line in the input is line 1.
- /// </summary>
- /// <returns> The line number for the current position in the input stream, or
- /// 0 if the current token source does not track line numbers. </returns>
- virtual size_t getLine() const = 0;
-
- /// <summary>
- /// Get the index into the current line for the current position in the input
- /// stream. The first character on a line has position 0.
- /// </summary>
- /// <returns> The line number for the current position in the input stream, or
- /// (sze_t)-1 if the current token source does not track character positions. </returns>
- virtual size_t getCharPositionInLine() = 0;
-
- /// <summary>
- /// Get the <seealso cref="CharStream"/> from which this token source is currently
- /// providing tokens.
- /// </summary>
- /// <returns> The <seealso cref="CharStream"/> associated with the current position in
- /// the input, or {@code null} if no input stream is available for the token
- /// source. </returns>
- virtual CharStream* getInputStream() = 0;
-
- /// <summary>
- /// Gets the name of the underlying input source. This method returns a
- /// non-null, non-empty string. If such a name is not known, this method
- /// returns <seealso cref="IntStream#UNKNOWN_SOURCE_NAME"/>.
- /// </summary>
- virtual std::string getSourceName() = 0;
-
- /// <summary>
- /// Set the <seealso cref="TokenFactory"/> this token source should use for creating
- /// <seealso cref="Token"/> objects from the input.
- /// </summary>
- /// <param name="factory"> The <seealso cref="TokenFactory"/> to use for creating tokens. </param>
- template<typename T1>
- void setTokenFactory(TokenFactory<T1> * /*factory*/) {}
-
- /// <summary>
- /// Gets the <seealso cref="TokenFactory"/> this token source is currently using for
- /// creating <seealso cref="Token"/> objects from the input.
- /// </summary>
- /// <returns> The <seealso cref="TokenFactory"/> currently used by this token source. </returns>
- virtual TokenFactory<CommonToken>* getTokenFactory() = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/TokenStream.cpp
deleted file mode 100644
index fbb1ab788a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenStream.cpp
+++ /dev/null
@@ -1,11 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "TokenStream.h"
-
-using namespace antlr4;
-
-TokenStream::~TokenStream() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenStream.h b/contrib/libs/antlr4_cpp_runtime/src/TokenStream.h
deleted file mode 100644
index 15b4f367a6..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenStream.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "IntStream.h"
-
-namespace antlr4 {
-
- /// <summary>
- /// An <seealso cref="IntStream"/> whose symbols are <seealso cref="Token"/> instances.
- /// </summary>
- class ANTLR4CPP_PUBLIC TokenStream : public IntStream {
- /// <summary>
- /// Get the <seealso cref="Token"/> instance associated with the value returned by
- /// <seealso cref="#LA LA(k)"/>. This method has the same pre- and post-conditions as
- /// <seealso cref="IntStream#LA"/>. In addition, when the preconditions of this method
- /// are met, the return value is non-null and the value of
- /// {@code LT(k).getType()==LA(k)}.
- /// </summary>
- /// <seealso cref= IntStream#LA </seealso>
- public:
- virtual ~TokenStream();
-
- virtual Token* LT(ssize_t k) = 0;
-
- /// <summary>
- /// Gets the <seealso cref="Token"/> at the specified {@code index} in the stream. When
- /// the preconditions of this method are met, the return value is non-null.
- /// <p/>
- /// The preconditions for this method are the same as the preconditions of
- /// <seealso cref="IntStream#seek"/>. If the behavior of {@code seek(index)} is
- /// unspecified for the current state and given {@code index}, then the
- /// behavior of this method is also unspecified.
- /// <p/>
- /// The symbol referred to by {@code index} differs from {@code seek()} only
- /// in the case of filtering streams where {@code index} lies before the end
- /// of the stream. Unlike {@code seek()}, this method does not adjust
- /// {@code index} to point to a non-ignored symbol.
- /// </summary>
- /// <exception cref="IllegalArgumentException"> if {code index} is less than 0 </exception>
- /// <exception cref="UnsupportedOperationException"> if the stream does not support
- /// retrieving the token at the specified index </exception>
- virtual Token* get(size_t index) const = 0;
-
- /// Gets the underlying TokenSource which provides tokens for this stream.
- virtual TokenSource* getTokenSource() const = 0;
-
- /// <summary>
- /// Return the text of all tokens within the specified {@code interval}. This
- /// method behaves like the following code (including potential exceptions
- /// for violating preconditions of <seealso cref="#get"/>, but may be optimized by the
- /// specific implementation.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = "";
- /// for (int i = interval.a; i <= interval.b; i++) {
- /// text += stream.get(i).getText();
- /// }
- /// </pre>
- /// </summary>
- /// <param name="interval"> The interval of tokens within this stream to get text
- /// for. </param>
- /// <returns> The text of all tokens within the specified interval in this
- /// stream.
- /// </returns>
- /// <exception cref="NullPointerException"> if {@code interval} is {@code null} </exception>
- virtual std::string getText(const misc::Interval &interval) = 0;
-
- /// <summary>
- /// Return the text of all tokens in the stream. This method behaves like the
- /// following code, including potential exceptions from the calls to
- /// <seealso cref="IntStream#size"/> and <seealso cref="#getText(Interval)"/>, but may be
- /// optimized by the specific implementation.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = stream.getText(new Interval(0, stream.size()));
- /// </pre>
- /// </summary>
- /// <returns> The text of all tokens in the stream. </returns>
- virtual std::string getText() = 0;
-
- /// <summary>
- /// Return the text of all tokens in the source interval of the specified
- /// context. This method behaves like the following code, including potential
- /// exceptions from the call to <seealso cref="#getText(Interval)"/>, but may be
- /// optimized by the specific implementation.
- /// </p>
- /// If {@code ctx.getSourceInterval()} does not return a valid interval of
- /// tokens provided by this stream, the behavior is unspecified.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = stream.getText(ctx.getSourceInterval());
- /// </pre>
- /// </summary>
- /// <param name="ctx"> The context providing the source interval of tokens to get
- /// text for. </param>
- /// <returns> The text of all tokens within the source interval of {@code ctx}. </returns>
- virtual std::string getText(RuleContext *ctx) = 0;
-
- /// <summary>
- /// Return the text of all tokens in this stream between {@code start} and
- /// {@code stop} (inclusive).
- /// <p/>
- /// If the specified {@code start} or {@code stop} token was not provided by
- /// this stream, or if the {@code stop} occurred before the {@code start}
- /// token, the behavior is unspecified.
- /// <p/>
- /// For streams which ensure that the <seealso cref="Token#getTokenIndex"/> method is
- /// accurate for all of its provided tokens, this method behaves like the
- /// following code. Other streams may implement this method in other ways
- /// provided the behavior is consistent with this at a high level.
- ///
- /// <pre>
- /// TokenStream stream = ...;
- /// String text = "";
- /// for (int i = start.getTokenIndex(); i <= stop.getTokenIndex(); i++) {
- /// text += stream.get(i).getText();
- /// }
- /// </pre>
- /// </summary>
- /// <param name="start"> The first token in the interval to get text for. </param>
- /// <param name="stop"> The last token in the interval to get text for (inclusive). </param>
- /// <returns> The text of all tokens lying between the specified {@code start}
- /// and {@code stop} tokens.
- /// </returns>
- /// <exception cref="UnsupportedOperationException"> if this stream does not support
- /// this method for the specified tokens </exception>
- virtual std::string getText(Token *start, Token *stop) = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.cpp b/contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.cpp
deleted file mode 100644
index 9050eb5c91..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.cpp
+++ /dev/null
@@ -1,425 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "misc/Interval.h"
-#include "Token.h"
-#include "TokenStream.h"
-
-#include "TokenStreamRewriter.h"
-
-using namespace antlr4;
-
-using antlr4::misc::Interval;
-
-TokenStreamRewriter::RewriteOperation::RewriteOperation(TokenStreamRewriter *outerInstance_, size_t index_)
- : outerInstance(outerInstance_) {
-
- InitializeInstanceFields();
- this->index = index_;
-}
-
-TokenStreamRewriter::RewriteOperation::RewriteOperation(TokenStreamRewriter *outerInstance_, size_t index_,
- const std::string& text_) : outerInstance(outerInstance_) {
-
- InitializeInstanceFields();
- this->index = index_;
- this->text = text_;
-}
-
-TokenStreamRewriter::RewriteOperation::~RewriteOperation()
-{
-}
-
-size_t TokenStreamRewriter::RewriteOperation::execute(std::string * /*buf*/) {
- return index;
-}
-
-std::string TokenStreamRewriter::RewriteOperation::toString() {
- std::string opName = "TokenStreamRewriter";
- size_t dollarIndex = opName.find('$');
- opName = opName.substr(dollarIndex + 1, opName.length() - (dollarIndex + 1));
- return "<" + opName + "@" + outerInstance->tokens->get(dollarIndex)->getText() + ":\"" + text + "\">";
-}
-
-void TokenStreamRewriter::RewriteOperation::InitializeInstanceFields() {
- instructionIndex = 0;
- index = 0;
-}
-
-TokenStreamRewriter::InsertBeforeOp::InsertBeforeOp(TokenStreamRewriter *outerInstance_, size_t index_, const std::string& text_)
-: RewriteOperation(outerInstance_, index_, text_), outerInstance(outerInstance_) {
-}
-
-size_t TokenStreamRewriter::InsertBeforeOp::execute(std::string *buf) {
- buf->append(text);
- if (outerInstance->tokens->get(index)->getType() != Token::EOF) {
- buf->append(outerInstance->tokens->get(index)->getText());
- }
- return index + 1;
-}
-
-TokenStreamRewriter::ReplaceOp::ReplaceOp(TokenStreamRewriter *outerInstance_, size_t from, size_t to, const std::string& text)
-: RewriteOperation(outerInstance_, from, text), outerInstance(outerInstance_) {
-
- InitializeInstanceFields();
- lastIndex = to;
-}
-
-size_t TokenStreamRewriter::ReplaceOp::execute(std::string *buf) {
- buf->append(text);
- return lastIndex + 1;
-}
-
-std::string TokenStreamRewriter::ReplaceOp::toString() {
- if (text.empty()) {
- return "<DeleteOp@" + outerInstance->tokens->get(index)->getText() + ".." + outerInstance->tokens->get(lastIndex)->getText() + ">";
- }
- return "<ReplaceOp@" + outerInstance->tokens->get(index)->getText() + ".." + outerInstance->tokens->get(lastIndex)->getText() + ":\"" + text + "\">";
-}
-
-void TokenStreamRewriter::ReplaceOp::InitializeInstanceFields() {
- lastIndex = 0;
-}
-
-//------------------ TokenStreamRewriter -------------------------------------------------------------------------------
-
-const std::string TokenStreamRewriter::DEFAULT_PROGRAM_NAME = "default";
-
-TokenStreamRewriter::TokenStreamRewriter(TokenStream *tokens_) : tokens(tokens_) {
- _programs[DEFAULT_PROGRAM_NAME].reserve(PROGRAM_INIT_SIZE);
-}
-
-TokenStreamRewriter::~TokenStreamRewriter() {
- for (const auto &program : _programs) {
- for (auto *operation : program.second) {
- delete operation;
- }
- }
-}
-
-TokenStream *TokenStreamRewriter::getTokenStream() {
- return tokens;
-}
-
-void TokenStreamRewriter::rollback(size_t instructionIndex) {
- rollback(DEFAULT_PROGRAM_NAME, instructionIndex);
-}
-
-void TokenStreamRewriter::rollback(const std::string &programName, size_t instructionIndex) {
- std::vector<RewriteOperation*> is = _programs[programName];
- if (is.size() > 0) {
- _programs.insert({ programName, std::vector<RewriteOperation*>(is.begin() + MIN_TOKEN_INDEX, is.begin() + instructionIndex) });
- }
-}
-
-void TokenStreamRewriter::deleteProgram() {
- deleteProgram(DEFAULT_PROGRAM_NAME);
-}
-
-void TokenStreamRewriter::deleteProgram(const std::string &programName) {
- rollback(programName, MIN_TOKEN_INDEX);
-}
-
-void TokenStreamRewriter::insertAfter(Token *t, const std::string& text) {
- insertAfter(DEFAULT_PROGRAM_NAME, t, text);
-}
-
-void TokenStreamRewriter::insertAfter(size_t index, const std::string& text) {
- insertAfter(DEFAULT_PROGRAM_NAME, index, text);
-}
-
-void TokenStreamRewriter::insertAfter(const std::string &programName, Token *t, const std::string& text) {
- insertAfter(programName, t->getTokenIndex(), text);
-}
-
-void TokenStreamRewriter::insertAfter(const std::string &programName, size_t index, const std::string& text) {
- // to insert after, just insert before next index (even if past end)
- insertBefore(programName, index + 1, text);
-}
-
-void TokenStreamRewriter::insertBefore(Token *t, const std::string& text) {
- insertBefore(DEFAULT_PROGRAM_NAME, t, text);
-}
-
-void TokenStreamRewriter::insertBefore(size_t index, const std::string& text) {
- insertBefore(DEFAULT_PROGRAM_NAME, index, text);
-}
-
-void TokenStreamRewriter::insertBefore(const std::string &programName, Token *t, const std::string& text) {
- insertBefore(programName, t->getTokenIndex(), text);
-}
-
-void TokenStreamRewriter::insertBefore(const std::string &programName, size_t index, const std::string& text) {
- RewriteOperation *op = new InsertBeforeOp(this, index, text); /* mem-check: deleted in d-tor */
- std::vector<RewriteOperation*> &rewrites = getProgram(programName);
- op->instructionIndex = rewrites.size();
- rewrites.push_back(op);
-}
-
-void TokenStreamRewriter::replace(size_t index, const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, index, index, text);
-}
-
-void TokenStreamRewriter::replace(size_t from, size_t to, const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, from, to, text);
-}
-
-void TokenStreamRewriter::replace(Token *indexT, const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, indexT, indexT, text);
-}
-
-void TokenStreamRewriter::replace(Token *from, Token *to, const std::string& text) {
- replace(DEFAULT_PROGRAM_NAME, from, to, text);
-}
-
-void TokenStreamRewriter::replace(const std::string &programName, size_t from, size_t to, const std::string& text) {
- if (from > to || to >= tokens->size()) {
- throw IllegalArgumentException("replace: range invalid: " + std::to_string(from) + ".." + std::to_string(to) +
- "(size = " + std::to_string(tokens->size()) + ")");
- }
- RewriteOperation *op = new ReplaceOp(this, from, to, text); /* mem-check: deleted in d-tor */
- std::vector<RewriteOperation*> &rewrites = getProgram(programName);
- op->instructionIndex = rewrites.size();
- rewrites.push_back(op);
-}
-
-void TokenStreamRewriter::replace(const std::string &programName, Token *from, Token *to, const std::string& text) {
- replace(programName, from->getTokenIndex(), to->getTokenIndex(), text);
-}
-
-void TokenStreamRewriter::Delete(size_t index) {
- Delete(DEFAULT_PROGRAM_NAME, index, index);
-}
-
-void TokenStreamRewriter::Delete(size_t from, size_t to) {
- Delete(DEFAULT_PROGRAM_NAME, from, to);
-}
-
-void TokenStreamRewriter::Delete(Token *indexT) {
- Delete(DEFAULT_PROGRAM_NAME, indexT, indexT);
-}
-
-void TokenStreamRewriter::Delete(Token *from, Token *to) {
- Delete(DEFAULT_PROGRAM_NAME, from, to);
-}
-
-void TokenStreamRewriter::Delete(const std::string &programName, size_t from, size_t to) {
- std::string nullString;
- replace(programName, from, to, nullString);
-}
-
-void TokenStreamRewriter::Delete(const std::string &programName, Token *from, Token *to) {
- std::string nullString;
- replace(programName, from, to, nullString);
-}
-
-size_t TokenStreamRewriter::getLastRewriteTokenIndex() {
- return getLastRewriteTokenIndex(DEFAULT_PROGRAM_NAME);
-}
-
-size_t TokenStreamRewriter::getLastRewriteTokenIndex(const std::string &programName) {
- if (_lastRewriteTokenIndexes.find(programName) == _lastRewriteTokenIndexes.end()) {
- return INVALID_INDEX;
- }
- return _lastRewriteTokenIndexes[programName];
-}
-
-void TokenStreamRewriter::setLastRewriteTokenIndex(const std::string &programName, size_t i) {
- _lastRewriteTokenIndexes.insert({ programName, i });
-}
-
-std::vector<TokenStreamRewriter::RewriteOperation*>& TokenStreamRewriter::getProgram(const std::string &name) {
- auto iterator = _programs.find(name);
- if (iterator == _programs.end()) {
- return initializeProgram(name);
- }
- return iterator->second;
-}
-
-std::vector<TokenStreamRewriter::RewriteOperation*>& TokenStreamRewriter::initializeProgram(const std::string &name) {
- _programs[name].reserve(PROGRAM_INIT_SIZE);
- return _programs[name];
-}
-
-std::string TokenStreamRewriter::getText() {
- return getText(DEFAULT_PROGRAM_NAME, Interval(0UL, tokens->size() - 1));
-}
-
-std::string TokenStreamRewriter::getText(std::string programName) {
- return getText(programName, Interval(0UL, tokens->size() - 1));
-}
-
-std::string TokenStreamRewriter::getText(const Interval &interval) {
- return getText(DEFAULT_PROGRAM_NAME, interval);
-}
-
-std::string TokenStreamRewriter::getText(const std::string &programName, const Interval &interval) {
- std::vector<TokenStreamRewriter::RewriteOperation*> &rewrites = _programs[programName];
- size_t start = interval.a;
- size_t stop = interval.b;
-
- // ensure start/end are in range
- if (stop > tokens->size() - 1) {
- stop = tokens->size() - 1;
- }
- if (start == INVALID_INDEX) {
- start = 0;
- }
-
- if (rewrites.empty() || rewrites.empty()) {
- return tokens->getText(interval); // no instructions to execute
- }
- std::string buf;
-
- // First, optimize instruction stream
- std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> indexToOp = reduceToSingleOperationPerIndex(rewrites);
-
- // Walk buffer, executing instructions and emitting tokens
- size_t i = start;
- while (i <= stop && i < tokens->size()) {
- RewriteOperation *op = indexToOp[i];
- indexToOp.erase(i); // remove so any left have index size-1
- Token *t = tokens->get(i);
- if (op == nullptr) {
- // no operation at that index, just dump token
- if (t->getType() != Token::EOF) {
- buf.append(t->getText());
- }
- i++; // move to next token
- }
- else {
- i = op->execute(&buf); // execute operation and skip
- }
- }
-
- // include stuff after end if it's last index in buffer
- // So, if they did an insertAfter(lastValidIndex, "foo"), include
- // foo if end==lastValidIndex.
- if (stop == tokens->size() - 1) {
- // Scan any remaining operations after last token
- // should be included (they will be inserts).
- for (auto op : indexToOp) {
- if (op.second->index >= tokens->size() - 1) {
- buf.append(op.second->text);
- }
- }
- }
- return buf;
-}
-
-std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> TokenStreamRewriter::reduceToSingleOperationPerIndex(
- std::vector<TokenStreamRewriter::RewriteOperation*> &rewrites) {
-
-
- // WALK REPLACES
- for (size_t i = 0; i < rewrites.size(); ++i) {
- TokenStreamRewriter::RewriteOperation *op = rewrites[i];
- ReplaceOp *rop = dynamic_cast<ReplaceOp *>(op);
- if (rop == nullptr)
- continue;
-
- // Wipe prior inserts within range
- std::vector<InsertBeforeOp *> inserts = getKindOfOps<InsertBeforeOp>(rewrites, i);
- for (auto *iop : inserts) {
- if (iop->index == rop->index) {
- // E.g., insert before 2, delete 2..2; update replace
- // text to include insert before, kill insert
- delete rewrites[iop->instructionIndex];
- rewrites[iop->instructionIndex] = nullptr;
- rop->text = iop->text + (!rop->text.empty() ? rop->text : "");
- }
- else if (iop->index > rop->index && iop->index <= rop->lastIndex) {
- // delete insert as it's a no-op.
- delete rewrites[iop->instructionIndex];
- rewrites[iop->instructionIndex] = nullptr;
- }
- }
- // Drop any prior replaces contained within
- std::vector<ReplaceOp*> prevReplaces = getKindOfOps<ReplaceOp>(rewrites, i);
- for (auto *prevRop : prevReplaces) {
- if (prevRop->index >= rop->index && prevRop->lastIndex <= rop->lastIndex) {
- // delete replace as it's a no-op.
- delete rewrites[prevRop->instructionIndex];
- rewrites[prevRop->instructionIndex] = nullptr;
- continue;
- }
- // throw exception unless disjoint or identical
- bool disjoint = prevRop->lastIndex < rop->index || prevRop->index > rop->lastIndex;
- // Delete special case of replace (text==null):
- // D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- if (prevRop->text.empty() && rop->text.empty() && !disjoint) {
- delete rewrites[prevRop->instructionIndex];
- rewrites[prevRop->instructionIndex] = nullptr; // kill first delete
- rop->index = std::min(prevRop->index, rop->index);
- rop->lastIndex = std::max(prevRop->lastIndex, rop->lastIndex);
- std::cout << "new rop " << rop << std::endl;
- }
- else if (!disjoint) {
- throw IllegalArgumentException("replace op boundaries of " + rop->toString() +
- " overlap with previous " + prevRop->toString());
- }
- }
- }
-
- // WALK INSERTS
- for (size_t i = 0; i < rewrites.size(); i++) {
- InsertBeforeOp *iop = dynamic_cast<InsertBeforeOp *>(rewrites[i]);
- if (iop == nullptr)
- continue;
-
- // combine current insert with prior if any at same index
-
- std::vector<InsertBeforeOp *> prevInserts = getKindOfOps<InsertBeforeOp>(rewrites, i);
- for (auto *prevIop : prevInserts) {
- if (prevIop->index == iop->index) { // combine objects
- // convert to strings...we're in process of toString'ing
- // whole token buffer so no lazy eval issue with any templates
- iop->text = catOpText(&iop->text, &prevIop->text);
- // delete redundant prior insert
- delete rewrites[prevIop->instructionIndex];
- rewrites[prevIop->instructionIndex] = nullptr;
- }
- }
- // look for replaces where iop.index is in range; error
- std::vector<ReplaceOp*> prevReplaces = getKindOfOps<ReplaceOp>(rewrites, i);
- for (auto *rop : prevReplaces) {
- if (iop->index == rop->index) {
- rop->text = catOpText(&iop->text, &rop->text);
- delete rewrites[i];
- rewrites[i] = nullptr; // delete current insert
- continue;
- }
- if (iop->index >= rop->index && iop->index <= rop->lastIndex) {
- throw IllegalArgumentException("insert op " + iop->toString() + " within boundaries of previous " + rop->toString());
- }
- }
- }
-
- std::unordered_map<size_t, TokenStreamRewriter::RewriteOperation*> m;
- for (TokenStreamRewriter::RewriteOperation *op : rewrites) {
- if (op == nullptr) { // ignore deleted ops
- continue;
- }
- if (m.count(op->index) > 0) {
- throw RuntimeException("should only be one op per index");
- }
- m[op->index] = op;
- }
-
- return m;
-}
-
-std::string TokenStreamRewriter::catOpText(std::string *a, std::string *b) {
- std::string x = "";
- std::string y = "";
- if (a != nullptr) {
- x = *a;
- }
- if (b != nullptr) {
- y = *b;
- }
- return x + y;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.h b/contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.h
deleted file mode 100644
index 929056a3f9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/TokenStreamRewriter.h
+++ /dev/null
@@ -1,295 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-
- /**
- * Useful for rewriting out a buffered input token stream after doing some
- * augmentation or other manipulations on it.
- *
- * <p>
- * You can insert stuff, replace, and delete chunks. Note that the operations
- * are done lazily--only if you convert the buffer to a {@link String} with
- * {@link TokenStream#getText()}. This is very efficient because you are not
- * moving data around all the time. As the buffer of tokens is converted to
- * strings, the {@link #getText()} method(s) scan the input token stream and
- * check to see if there is an operation at the current index. If so, the
- * operation is done and then normal {@link String} rendering continues on the
- * buffer. This is like having multiple Turing machine instruction streams
- * (programs) operating on a single input tape. :)</p>
- *
- * <p>
- * This rewriter makes no modifications to the token stream. It does not ask the
- * stream to fill itself up nor does it advance the input cursor. The token
- * stream {@link TokenStream#index()} will return the same value before and
- * after any {@link #getText()} call.</p>
- *
- * <p>
- * The rewriter only works on tokens that you have in the buffer and ignores the
- * current input cursor. If you are buffering tokens on-demand, calling
- * {@link #getText()} halfway through the input will only do rewrites for those
- * tokens in the first half of the file.</p>
- *
- * <p>
- * Since the operations are done lazily at {@link #getText}-time, operations do
- * not screw up the token index values. That is, an insert operation at token
- * index {@code i} does not change the index values for tokens
- * {@code i}+1..n-1.</p>
- *
- * <p>
- * Because operations never actually alter the buffer, you may always get the
- * original token stream back without undoing anything. Since the instructions
- * are queued up, you can easily simulate transactions and roll back any changes
- * if there is an error just by removing instructions. For example,</p>
- *
- * <pre>
- * CharStream input = new ANTLRFileStream("input");
- * TLexer lex = new TLexer(input);
- * CommonTokenStream tokens = new CommonTokenStream(lex);
- * T parser = new T(tokens);
- * TokenStreamRewriter rewriter = new TokenStreamRewriter(tokens);
- * parser.startRule();
- * </pre>
- *
- * <p>
- * Then in the rules, you can execute (assuming rewriter is visible):</p>
- *
- * <pre>
- * Token t,u;
- * ...
- * rewriter.insertAfter(t, "text to put after t");}
- * rewriter.insertAfter(u, "text after u");}
- * System.out.println(rewriter.getText());
- * </pre>
- *
- * <p>
- * You can also have multiple "instruction streams" and get multiple rewrites
- * from a single pass over the input. Just name the instruction streams and use
- * that name again when printing the buffer. This could be useful for generating
- * a C file and also its header file--all from the same buffer:</p>
- *
- * <pre>
- * rewriter.insertAfter("pass1", t, "text to put after t");}
- * rewriter.insertAfter("pass2", u, "text after u");}
- * System.out.println(rewriter.getText("pass1"));
- * System.out.println(rewriter.getText("pass2"));
- * </pre>
- *
- * <p>
- * If you don't use named rewrite streams, a "default" stream is used as the
- * first example shows.</p>
- */
- class ANTLR4CPP_PUBLIC TokenStreamRewriter {
- public:
- static const std::string DEFAULT_PROGRAM_NAME;
- static constexpr size_t PROGRAM_INIT_SIZE = 100;
- static constexpr size_t MIN_TOKEN_INDEX = 0;
-
- TokenStreamRewriter(TokenStream *tokens);
- virtual ~TokenStreamRewriter();
-
- TokenStream *getTokenStream();
-
- virtual void rollback(size_t instructionIndex);
-
- /// Rollback the instruction stream for a program so that
- /// the indicated instruction (via instructionIndex) is no
- /// longer in the stream. UNTESTED!
- virtual void rollback(const std::string &programName, size_t instructionIndex);
-
- virtual void deleteProgram();
-
- /// Reset the program so that no instructions exist.
- virtual void deleteProgram(const std::string &programName);
- virtual void insertAfter(Token *t, const std::string& text);
- virtual void insertAfter(size_t index, const std::string& text);
- virtual void insertAfter(const std::string &programName, Token *t, const std::string& text);
- virtual void insertAfter(const std::string &programName, size_t index, const std::string& text);
-
- virtual void insertBefore(Token *t, const std::string& text);
- virtual void insertBefore(size_t index, const std::string& text);
- virtual void insertBefore(const std::string &programName, Token *t, const std::string& text);
- virtual void insertBefore(const std::string &programName, size_t index, const std::string& text);
-
- virtual void replace(size_t index, const std::string& text);
- virtual void replace(size_t from, size_t to, const std::string& text);
- virtual void replace(Token *indexT, const std::string& text);
- virtual void replace(Token *from, Token *to, const std::string& text);
- virtual void replace(const std::string &programName, size_t from, size_t to, const std::string& text);
- virtual void replace(const std::string &programName, Token *from, Token *to, const std::string& text);
-
- virtual void Delete(size_t index);
- virtual void Delete(size_t from, size_t to);
- virtual void Delete(Token *indexT);
- virtual void Delete(Token *from, Token *to);
- virtual void Delete(const std::string &programName, size_t from, size_t to);
- virtual void Delete(const std::string &programName, Token *from, Token *to);
-
- virtual size_t getLastRewriteTokenIndex();
-
- /// Return the text from the original tokens altered per the
- /// instructions given to this rewriter.
- virtual std::string getText();
-
- /** Return the text from the original tokens altered per the
- * instructions given to this rewriter in programName.
- */
- std::string getText(std::string programName);
-
- /// Return the text associated with the tokens in the interval from the
- /// original token stream but with the alterations given to this rewriter.
- /// The interval refers to the indexes in the original token stream.
- /// We do not alter the token stream in any way, so the indexes
- /// and intervals are still consistent. Includes any operations done
- /// to the first and last token in the interval. So, if you did an
- /// insertBefore on the first token, you would get that insertion.
- /// The same is true if you do an insertAfter the stop token.
- virtual std::string getText(const misc::Interval &interval);
-
- virtual std::string getText(const std::string &programName, const misc::Interval &interval);
-
- protected:
- class RewriteOperation {
- public:
- /// What index into rewrites List are we?
- size_t index;
- std::string text;
-
- /// Token buffer index.
- size_t instructionIndex;
-
- RewriteOperation(TokenStreamRewriter *outerInstance, size_t index);
- RewriteOperation(TokenStreamRewriter *outerInstance, size_t index, const std::string& text);
- virtual ~RewriteOperation();
-
- /// Execute the rewrite operation by possibly adding to the buffer.
- /// Return the index of the next token to operate on.
-
- virtual size_t execute(std::string *buf);
- virtual std::string toString();
-
- private:
- TokenStreamRewriter *const outerInstance;
- void InitializeInstanceFields();
- };
-
- class InsertBeforeOp : public RewriteOperation {
- private:
- TokenStreamRewriter *const outerInstance;
-
- public:
- InsertBeforeOp(TokenStreamRewriter *outerInstance, size_t index, const std::string& text);
-
- virtual size_t execute(std::string *buf) override;
- };
-
- class ReplaceOp : public RewriteOperation {
- private:
- TokenStreamRewriter *const outerInstance;
-
- public:
- size_t lastIndex;
-
- ReplaceOp(TokenStreamRewriter *outerInstance, size_t from, size_t to, const std::string& text);
- virtual size_t execute(std::string *buf) override;
- virtual std::string toString() override;
-
- private:
- void InitializeInstanceFields();
- };
-
- /// Our source stream
- TokenStream *const tokens;
-
- /// You may have multiple, named streams of rewrite operations.
- /// I'm calling these things "programs."
- /// Maps String (name) -> rewrite (List)
- std::map<std::string, std::vector<RewriteOperation*>> _programs;
-
- /// <summary>
- /// Map String (program name) -> Integer index </summary>
- std::map<std::string, size_t> _lastRewriteTokenIndexes;
- virtual size_t getLastRewriteTokenIndex(const std::string &programName);
- virtual void setLastRewriteTokenIndex(const std::string &programName, size_t i);
- virtual std::vector<RewriteOperation*>& getProgram(const std::string &name);
-
- /// <summary>
- /// We need to combine operations and report invalid operations (like
- /// overlapping replaces that are not completed nested). Inserts to
- /// same index need to be combined etc... Here are the cases:
- ///
- /// I.i.u I.j.v leave alone, nonoverlapping
- /// I.i.u I.i.v combine: Iivu
- ///
- /// R.i-j.u R.x-y.v | i-j in x-y delete first R
- /// R.i-j.u R.i-j.v delete first R
- /// R.i-j.u R.x-y.v | x-y in i-j ERROR
- /// R.i-j.u R.x-y.v | boundaries overlap ERROR
- ///
- /// Delete special case of replace (text==null):
- /// D.i-j.u D.x-y.v | boundaries overlap combine to max(min)..max(right)
- ///
- /// I.i.u R.x-y.v | i in (x+1)-y delete I (since insert before
- /// we're not deleting i)
- /// I.i.u R.x-y.v | i not in (x+1)-y leave alone, nonoverlapping
- /// R.x-y.v I.i.u | i in x-y ERROR
- /// R.x-y.v I.x.u R.x-y.uv (combine, delete I)
- /// R.x-y.v I.i.u | i not in x-y leave alone, nonoverlapping
- ///
- /// I.i.u = insert u before op @ index i
- /// R.x-y.u = replace x-y indexed tokens with u
- ///
- /// First we need to examine replaces. For any replace op:
- ///
- /// 1. wipe out any insertions before op within that range.
- /// 2. Drop any replace op before that is contained completely within
- /// that range.
- /// 3. Throw exception upon boundary overlap with any previous replace.
- ///
- /// Then we can deal with inserts:
- ///
- /// 1. for any inserts to same index, combine even if not adjacent.
- /// 2. for any prior replace with same left boundary, combine this
- /// insert with replace and delete this replace.
- /// 3. throw exception if index in same range as previous replace
- ///
- /// Don't actually delete; make op null in list. Easier to walk list.
- /// Later we can throw as we add to index -> op map.
- ///
- /// Note that I.2 R.2-2 will wipe out I.2 even though, technically, the
- /// inserted stuff would be before the replace range. But, if you
- /// add tokens in front of a method body '{' and then delete the method
- /// body, I think the stuff before the '{' you added should disappear too.
- ///
- /// Return a map from token index to operation.
- /// </summary>
- virtual std::unordered_map<size_t, RewriteOperation*> reduceToSingleOperationPerIndex(std::vector<RewriteOperation*> &rewrites);
-
- virtual std::string catOpText(std::string *a, std::string *b);
-
- /// Get all operations before an index of a particular kind.
- template <typename T>
- std::vector<T *> getKindOfOps(std::vector<RewriteOperation *> rewrites, size_t before) {
- std::vector<T *> ops;
- for (size_t i = 0; i < before && i < rewrites.size(); i++) {
- T *op = dynamic_cast<T *>(rewrites[i]);
- if (op == nullptr) { // ignore deleted or non matching entries
- continue;
- }
- ops.push_back(op);
- }
- return ops;
- }
-
- private:
- std::vector<RewriteOperation *>& initializeProgram(const std::string &name);
-
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.cpp
deleted file mode 100644
index bbfb8848fd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-#include "Exceptions.h"
-#include "support/Utf8.h"
-
-#include "UnbufferedCharStream.h"
-
-using namespace antlrcpp;
-using namespace antlr4;
-using namespace antlr4::misc;
-
-UnbufferedCharStream::UnbufferedCharStream(std::wistream &input)
- : _p(0), _numMarkers(0), _lastChar(0), _lastCharBufferStart(0), _currentCharIndex(0), _input(input) {
- // The vector's size is what used to be n in Java code.
- fill(1); // prime
-}
-
-void UnbufferedCharStream::consume() {
- if (LA(1) == EOF) {
- throw IllegalStateException("cannot consume EOF");
- }
-
- // buf always has at least data[p==0] in this method due to ctor
- _lastChar = _data[_p]; // track last char for LA(-1)
-
- if (_p == _data.size() - 1 && _numMarkers == 0) {
- size_t capacity = _data.capacity();
- _data.clear();
- _data.reserve(capacity);
-
- _p = 0;
- _lastCharBufferStart = _lastChar;
- } else {
- _p++;
- }
-
- _currentCharIndex++;
- sync(1);
-}
-
-void UnbufferedCharStream::sync(size_t want) {
- if (_p + want <= _data.size()) // Already enough data loaded?
- return;
-
- fill(_p + want - _data.size());
-}
-
-size_t UnbufferedCharStream::fill(size_t n) {
- for (size_t i = 0; i < n; i++) {
- if (_data.size() > 0 && _data.back() == 0xFFFF) {
- return i;
- }
-
- try {
- char32_t c = nextChar();
- add(c);
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (IOException &ioe) {
- // throw_with_nested is not available before VS 2015.
- throw ioe;
-#else
- } catch (IOException & /*ioe*/) {
- std::throw_with_nested(RuntimeException());
-#endif
- }
- }
-
- return n;
-}
-
-char32_t UnbufferedCharStream::nextChar() {
- return _input.get();
-}
-
-void UnbufferedCharStream::add(char32_t c) {
- _data += c;
-}
-
-size_t UnbufferedCharStream::LA(ssize_t i) {
- if (i == -1) { // special case
- return _lastChar;
- }
-
- // We can look back only as many chars as we have buffered.
- ssize_t index = static_cast<ssize_t>(_p) + i - 1;
- if (index < 0) {
- throw IndexOutOfBoundsException();
- }
-
- if (i > 0) {
- sync(static_cast<size_t>(i)); // No need to sync if we look back.
- }
- if (static_cast<size_t>(index) >= _data.size()) {
- return EOF;
- }
-
- if (_data[static_cast<size_t>(index)] == std::char_traits<wchar_t>::eof()) {
- return EOF;
- }
-
- return _data[static_cast<size_t>(index)];
-}
-
-ssize_t UnbufferedCharStream::mark() {
- if (_numMarkers == 0) {
- _lastCharBufferStart = _lastChar;
- }
-
- ssize_t mark = -static_cast<ssize_t>(_numMarkers) - 1;
- _numMarkers++;
- return mark;
-}
-
-void UnbufferedCharStream::release(ssize_t marker) {
- ssize_t expectedMark = -static_cast<ssize_t>(_numMarkers);
- if (marker != expectedMark) {
- throw IllegalStateException("release() called with an invalid marker.");
- }
-
- _numMarkers--;
- if (_numMarkers == 0 && _p > 0) {
- _data.erase(0, _p);
- _p = 0;
- _lastCharBufferStart = _lastChar;
- }
-}
-
-size_t UnbufferedCharStream::index() {
- return _currentCharIndex;
-}
-
-void UnbufferedCharStream::seek(size_t index) {
- if (index == _currentCharIndex) {
- return;
- }
-
- if (index > _currentCharIndex) {
- sync(index - _currentCharIndex);
- index = std::min(index, getBufferStartIndex() + _data.size() - 1);
- }
-
- // index == to bufferStartIndex should set p to 0
- ssize_t i = static_cast<ssize_t>(index) - static_cast<ssize_t>(getBufferStartIndex());
- if (i < 0) {
- throw IllegalArgumentException(std::string("cannot seek to negative index ") + std::to_string(index));
- } else if (i >= static_cast<ssize_t>(_data.size())) {
- throw UnsupportedOperationException("Seek to index outside buffer: " + std::to_string(index) +
- " not in " + std::to_string(getBufferStartIndex()) + ".." +
- std::to_string(getBufferStartIndex() + _data.size()));
- }
-
- _p = static_cast<size_t>(i);
- _currentCharIndex = index;
- if (_p == 0) {
- _lastChar = _lastCharBufferStart;
- } else {
- _lastChar = _data[_p - 1];
- }
-}
-
-size_t UnbufferedCharStream::size() {
- throw UnsupportedOperationException("Unbuffered stream cannot know its size");
-}
-
-std::string UnbufferedCharStream::getSourceName() const {
- if (name.empty()) {
- return UNKNOWN_SOURCE_NAME;
- }
-
- return name;
-}
-
-std::string UnbufferedCharStream::getText(const misc::Interval &interval) {
- if (interval.a < 0 || interval.b < interval.a - 1) {
- throw IllegalArgumentException("invalid interval");
- }
-
- size_t bufferStartIndex = getBufferStartIndex();
- if (!_data.empty() && _data.back() == 0xFFFF) {
- if (interval.a + interval.length() > bufferStartIndex + _data.size()) {
- throw IllegalArgumentException("the interval extends past the end of the stream");
- }
- }
-
- if (interval.a < static_cast<ssize_t>(bufferStartIndex) || interval.b >= ssize_t(bufferStartIndex + _data.size())) {
- throw UnsupportedOperationException("interval " + interval.toString() + " outside buffer: " +
- std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStartIndex + _data.size() - 1));
- }
- // convert from absolute to local index
- size_t i = interval.a - bufferStartIndex;
- auto maybeUtf8 = Utf8::strictEncode(std::u32string_view(_data).substr(i, interval.length()));
- if (!maybeUtf8.has_value()) {
- throw IllegalArgumentException("Unbuffered stream contains invalid Unicode code points");
- }
- return std::move(maybeUtf8).value();
-}
-
-std::string UnbufferedCharStream::toString() const {
- throw UnsupportedOperationException("Unbuffered stream cannot be materialized to a string");
-}
-
-size_t UnbufferedCharStream::getBufferStartIndex() const {
- return _currentCharIndex - _p;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.h b/contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.h
deleted file mode 100644
index 5b05834f85..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedCharStream.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CharStream.h"
-
-namespace antlr4 {
-
- /// Do not buffer up the entire char stream. It does keep a small buffer
- /// for efficiency and also buffers while a mark exists (set by the
- /// lookahead prediction in parser). "Unbuffered" here refers to fact
- /// that it doesn't buffer all data, not that's it's on demand loading of char.
- class ANTLR4CPP_PUBLIC UnbufferedCharStream : public CharStream {
- public:
- /// The name or source of this char stream.
- std::string name;
-
- explicit UnbufferedCharStream(std::wistream &input);
-
- void consume() override;
- size_t LA(ssize_t i) override;
-
- /// <summary>
- /// Return a marker that we can release later.
- /// <p/>
- /// The specific marker value used for this class allows for some level of
- /// protection against misuse where {@code seek()} is called on a mark or
- /// {@code release()} is called in the wrong order.
- /// </summary>
- ssize_t mark() override;
-
- /// <summary>
- /// Decrement number of markers, resetting buffer if we hit 0. </summary>
- /// <param name="marker"> </param>
- void release(ssize_t marker) override;
- size_t index() override;
-
- /// <summary>
- /// Seek to absolute character index, which might not be in the current
- /// sliding window. Move {@code p} to {@code index-bufferStartIndex}.
- /// </summary>
- void seek(size_t index) override;
- size_t size() override;
- std::string getSourceName() const override;
- std::string getText(const misc::Interval &interval) override;
-
- std::string toString() const override;
-
- protected:
- /// A moving window buffer of the data being scanned. While there's a marker,
- /// we keep adding to buffer. Otherwise, <seealso cref="#consume consume()"/> resets so
- /// we start filling at index 0 again.
- // UTF-32 encoded.
- std::u32string _data;
- typedef char32_t storage_type;
-
- /// <summary>
- /// 0..n-1 index into <seealso cref="#data data"/> of next character.
- /// <p/>
- /// The {@code LA(1)} character is {@code data[p]}. If {@code p == n}, we are
- /// out of buffered characters.
- /// </summary>
- size_t _p;
-
- /// <summary>
- /// Count up with <seealso cref="#mark mark()"/> and down with
- /// <seealso cref="#release release()"/>. When we {@code release()} the last mark,
- /// {@code numMarkers} reaches 0 and we reset the buffer. Copy
- /// {@code data[p]..data[n-1]} to {@code data[0]..data[(n-1)-p]}.
- /// </summary>
- size_t _numMarkers;
-
- /// This is the {@code LA(-1)} character for the current position.
- size_t _lastChar; // UTF-32
-
- /// <summary>
- /// When {@code numMarkers > 0}, this is the {@code LA(-1)} character for the
- /// first character in <seealso cref="#data data"/>. Otherwise, this is unspecified.
- /// </summary>
- size_t _lastCharBufferStart; // UTF-32
-
- /// <summary>
- /// Absolute character index. It's the index of the character about to be
- /// read via {@code LA(1)}. Goes from 0 to the number of characters in the
- /// entire stream, although the stream size is unknown before the end is
- /// reached.
- /// </summary>
- size_t _currentCharIndex;
-
- std::wistream &_input;
-
- /// <summary>
- /// Make sure we have 'want' elements from current position <seealso cref="#p p"/>.
- /// Last valid {@code p} index is {@code data.length-1}. {@code p+need-1} is
- /// the char index 'need' elements ahead. If we need 1 element,
- /// {@code (p+1-1)==p} must be less than {@code data.length}.
- /// </summary>
- virtual void sync(size_t want);
-
- /// <summary>
- /// Add {@code n} characters to the buffer. Returns the number of characters
- /// actually added to the buffer. If the return value is less than {@code n},
- /// then EOF was reached before {@code n} characters could be added.
- /// </summary>
- virtual size_t fill(size_t n);
-
- /// Override to provide different source of characters than
- /// <seealso cref="#input input"/>.
- virtual char32_t nextChar();
- virtual void add(char32_t c);
- size_t getBufferStartIndex() const;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.cpp b/contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.cpp
deleted file mode 100644
index 16ff49e332..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.cpp
+++ /dev/null
@@ -1,270 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-#include "Exceptions.h"
-#include "assert.h"
-#include "TokenSource.h"
-#include "support/Arrays.h"
-#include "misc/Interval.h"
-#include "RuleContext.h"
-#include "WritableToken.h"
-
-#include "UnbufferedTokenStream.h"
-
-using namespace antlr4;
-
-UnbufferedTokenStream::UnbufferedTokenStream(TokenSource *tokenSource) : UnbufferedTokenStream(tokenSource, 256) {
-}
-
-UnbufferedTokenStream::UnbufferedTokenStream(TokenSource *tokenSource, int /*bufferSize*/)
- : _tokenSource(tokenSource), _lastToken(nullptr), _lastTokenBufferStart(nullptr)
-{
- InitializeInstanceFields();
- fill(1); // prime the pump
-}
-
-UnbufferedTokenStream::~UnbufferedTokenStream() {
-}
-
-Token* UnbufferedTokenStream::get(size_t i) const
-{ // get absolute index
- size_t bufferStartIndex = getBufferStartIndex();
- if (i < bufferStartIndex || i >= bufferStartIndex + _tokens.size()) {
- throw IndexOutOfBoundsException(std::string("get(") + std::to_string(i) + std::string(") outside buffer: ")
- + std::to_string(bufferStartIndex) + std::string("..") + std::to_string(bufferStartIndex + _tokens.size()));
- }
- return _tokens[i - bufferStartIndex].get();
-}
-
-Token* UnbufferedTokenStream::LT(ssize_t i)
-{
- if (i == -1) {
- return _lastToken;
- }
-
- sync(i);
- ssize_t index = static_cast<ssize_t>(_p) + i - 1;
- if (index < 0) {
- throw IndexOutOfBoundsException(std::string("LT(") + std::to_string(i) + std::string(") gives negative index"));
- }
-
- if (index >= static_cast<ssize_t>(_tokens.size())) {
- assert(_tokens.size() > 0 && _tokens.back()->getType() == EOF);
- return _tokens.back().get();
- }
-
- return _tokens[static_cast<size_t>(index)].get();
-}
-
-size_t UnbufferedTokenStream::LA(ssize_t i)
-{
- return LT(i)->getType();
-}
-
-TokenSource* UnbufferedTokenStream::getTokenSource() const
-{
- return _tokenSource;
-}
-
-std::string UnbufferedTokenStream::getText()
-{
- return "";
-}
-
-std::string UnbufferedTokenStream::getText(RuleContext* ctx)
-{
- return getText(ctx->getSourceInterval());
-}
-
-std::string UnbufferedTokenStream::getText(Token *start, Token *stop)
-{
- return getText(misc::Interval(start->getTokenIndex(), stop->getTokenIndex()));
-}
-
-void UnbufferedTokenStream::consume()
-{
- if (LA(1) == EOF) {
- throw IllegalStateException("cannot consume EOF");
- }
-
- // buf always has at least tokens[p==0] in this method due to ctor
- _lastToken = _tokens[_p].get(); // track last token for LT(-1)
-
- // if we're at last token and no markers, opportunity to flush buffer
- if (_p == _tokens.size() - 1 && _numMarkers == 0) {
- _tokens.clear();
- _p = 0;
- _lastTokenBufferStart = _lastToken;
- } else {
- ++_p;
- }
-
- ++_currentTokenIndex;
- sync(1);
-}
-
-/// <summary>
-/// Make sure we have 'need' elements from current position <seealso cref="#p p"/>. Last valid
-/// {@code p} index is {@code tokens.length-1}. {@code p+need-1} is the tokens index 'need' elements
-/// ahead. If we need 1 element, {@code (p+1-1)==p} must be less than {@code tokens.length}.
-/// </summary>
-void UnbufferedTokenStream::sync(ssize_t want)
-{
- ssize_t need = (static_cast<ssize_t>(_p) + want - 1) - static_cast<ssize_t>(_tokens.size()) + 1; // how many more elements we need?
- if (need > 0) {
- fill(static_cast<size_t>(need));
- }
-}
-
-/// <summary>
-/// Add {@code n} elements to the buffer. Returns the number of tokens
-/// actually added to the buffer. If the return value is less than {@code n},
-/// then EOF was reached before {@code n} tokens could be added.
-/// </summary>
-size_t UnbufferedTokenStream::fill(size_t n)
-{
- for (size_t i = 0; i < n; i++) {
- if (_tokens.size() > 0 && _tokens.back()->getType() == EOF) {
- return i;
- }
-
- add(_tokenSource->nextToken());
- }
-
- return n;
-}
-
-void UnbufferedTokenStream::add(std::unique_ptr<Token> t)
-{
- WritableToken *writable = dynamic_cast<WritableToken *>(t.get());
- if (writable != nullptr) {
- writable->setTokenIndex(int(getBufferStartIndex() + _tokens.size()));
- }
-
- _tokens.push_back(std::move(t));
-}
-
-/// <summary>
-/// Return a marker that we can release later.
-/// <p/>
-/// The specific marker value used for this class allows for some level of
-/// protection against misuse where {@code seek()} is called on a mark or
-/// {@code release()} is called in the wrong order.
-/// </summary>
-ssize_t UnbufferedTokenStream::mark()
-{
- if (_numMarkers == 0) {
- _lastTokenBufferStart = _lastToken;
- }
-
- int mark = -_numMarkers - 1;
- _numMarkers++;
- return mark;
-}
-
-void UnbufferedTokenStream::release(ssize_t marker)
-{
- ssize_t expectedMark = -_numMarkers;
- if (marker != expectedMark) {
- throw IllegalStateException("release() called with an invalid marker.");
- }
-
- _numMarkers--;
- if (_numMarkers == 0) { // can we release buffer?
- if (_p > 0) {
- // Copy tokens[p]..tokens[n-1] to tokens[0]..tokens[(n-1)-p], reset ptrs
- // p is last valid token; move nothing if p==n as we have no valid char
- _tokens.erase(_tokens.begin(), _tokens.begin() + static_cast<ssize_t>(_p));
- _p = 0;
- }
-
- _lastTokenBufferStart = _lastToken;
- }
-}
-
-size_t UnbufferedTokenStream::index()
-{
- return _currentTokenIndex;
-}
-
-void UnbufferedTokenStream::seek(size_t index)
-{ // seek to absolute index
- if (index == _currentTokenIndex) {
- return;
- }
-
- if (index > _currentTokenIndex) {
- sync(ssize_t(index - _currentTokenIndex));
- index = std::min(index, getBufferStartIndex() + _tokens.size() - 1);
- }
-
- size_t bufferStartIndex = getBufferStartIndex();
- if (bufferStartIndex > index) {
- throw IllegalArgumentException(std::string("cannot seek to negative index ") + std::to_string(index));
- }
-
- size_t i = index - bufferStartIndex;
- if (i >= _tokens.size()) {
- throw UnsupportedOperationException(std::string("seek to index outside buffer: ") + std::to_string(index) +
- " not in " + std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStartIndex + _tokens.size()));
- }
-
- _p = i;
- _currentTokenIndex = index;
- if (_p == 0) {
- _lastToken = _lastTokenBufferStart;
- } else {
- _lastToken = _tokens[_p - 1].get();
- }
-}
-
-size_t UnbufferedTokenStream::size()
-{
- throw UnsupportedOperationException("Unbuffered stream cannot know its size");
-}
-
-std::string UnbufferedTokenStream::getSourceName() const
-{
- return _tokenSource->getSourceName();
-}
-
-std::string UnbufferedTokenStream::getText(const misc::Interval &interval)
-{
- size_t bufferStartIndex = getBufferStartIndex();
- size_t bufferStopIndex = bufferStartIndex + _tokens.size() - 1;
-
- size_t start = interval.a;
- size_t stop = interval.b;
- if (start < bufferStartIndex || stop > bufferStopIndex) {
- throw UnsupportedOperationException(std::string("interval ") + interval.toString() +
- " not in token buffer window: " + std::to_string(bufferStartIndex) + ".." + std::to_string(bufferStopIndex));
- }
-
- size_t a = start - bufferStartIndex;
- size_t b = stop - bufferStartIndex;
-
- std::stringstream ss;
- for (size_t i = a; i <= b; i++) {
- Token *t = _tokens[i].get();
- if (i > 0)
- ss << ", ";
- ss << t->getText();
- }
-
- return ss.str();
-}
-
-size_t UnbufferedTokenStream::getBufferStartIndex() const
-{
- return _currentTokenIndex - _p;
-}
-
-void UnbufferedTokenStream::InitializeInstanceFields()
-{
- _p = 0;
- _numMarkers = 0;
- _currentTokenIndex = 0;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.h b/contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.h
deleted file mode 100644
index 0c67ec8610..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/UnbufferedTokenStream.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "TokenStream.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC UnbufferedTokenStream : public TokenStream {
- public:
- UnbufferedTokenStream(TokenSource *tokenSource);
- UnbufferedTokenStream(TokenSource *tokenSource, int bufferSize);
- UnbufferedTokenStream(const UnbufferedTokenStream& other) = delete;
- virtual ~UnbufferedTokenStream();
-
- UnbufferedTokenStream& operator = (const UnbufferedTokenStream& other) = delete;
-
- virtual Token* get(size_t i) const override;
- virtual Token* LT(ssize_t i) override;
- virtual size_t LA(ssize_t i) override;
-
- virtual TokenSource* getTokenSource() const override;
-
- virtual std::string getText(const misc::Interval &interval) override;
- virtual std::string getText() override;
- virtual std::string getText(RuleContext *ctx) override;
- virtual std::string getText(Token *start, Token *stop) override;
-
- virtual void consume() override;
-
- /// <summary>
- /// Return a marker that we can release later.
- /// <p/>
- /// The specific marker value used for this class allows for some level of
- /// protection against misuse where {@code seek()} is called on a mark or
- /// {@code release()} is called in the wrong order.
- /// </summary>
- virtual ssize_t mark() override;
- virtual void release(ssize_t marker) override;
- virtual size_t index() override;
- virtual void seek(size_t index) override;
- virtual size_t size() override;
- virtual std::string getSourceName() const override;
-
- protected:
- /// Make sure we have 'need' elements from current position p. Last valid
- /// p index is tokens.length - 1. p + need - 1 is the tokens index 'need' elements
- /// ahead. If we need 1 element, (p+1-1)==p must be less than tokens.length.
- TokenSource *_tokenSource;
-
- /// <summary>
- /// A moving window buffer of the data being scanned. While there's a marker,
- /// we keep adding to buffer. Otherwise, <seealso cref="#consume consume()"/> resets so
- /// we start filling at index 0 again.
- /// </summary>
-
- std::vector<std::unique_ptr<Token>> _tokens;
-
- /// <summary>
- /// 0..n-1 index into <seealso cref="#tokens tokens"/> of next token.
- /// <p/>
- /// The {@code LT(1)} token is {@code tokens[p]}. If {@code p == n}, we are
- /// out of buffered tokens.
- /// </summary>
- size_t _p;
-
- /// <summary>
- /// Count up with <seealso cref="#mark mark()"/> and down with
- /// <seealso cref="#release release()"/>. When we {@code release()} the last mark,
- /// {@code numMarkers} reaches 0 and we reset the buffer. Copy
- /// {@code tokens[p]..tokens[n-1]} to {@code tokens[0]..tokens[(n-1)-p]}.
- /// </summary>
- int _numMarkers;
-
- /// <summary>
- /// This is the {@code LT(-1)} token for the current position.
- /// </summary>
- Token *_lastToken;
-
- /// <summary>
- /// When {@code numMarkers > 0}, this is the {@code LT(-1)} token for the
- /// first token in <seealso cref="#tokens"/>. Otherwise, this is {@code null}.
- /// </summary>
- Token *_lastTokenBufferStart;
-
- /// <summary>
- /// Absolute token index. It's the index of the token about to be read via
- /// {@code LT(1)}. Goes from 0 to the number of tokens in the entire stream,
- /// although the stream size is unknown before the end is reached.
- /// <p/>
- /// This value is used to set the token indexes if the stream provides tokens
- /// that implement <seealso cref="WritableToken"/>.
- /// </summary>
- size_t _currentTokenIndex;
-
- virtual void sync(ssize_t want);
-
- /// <summary>
- /// Add {@code n} elements to the buffer. Returns the number of tokens
- /// actually added to the buffer. If the return value is less than {@code n},
- /// then EOF was reached before {@code n} tokens could be added.
- /// </summary>
- virtual size_t fill(size_t n);
- virtual void add(std::unique_ptr<Token> t);
-
- size_t getBufferStartIndex() const;
-
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Version.h b/contrib/libs/antlr4_cpp_runtime/src/Version.h
deleted file mode 100644
index 43f00ea65c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Version.h
+++ /dev/null
@@ -1,42 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#define ANTLRCPP_VERSION_MAJOR 4
-#define ANTLRCPP_VERSION_MINOR 11
-#define ANTLRCPP_VERSION_PATCH 1
-
-#define ANTLRCPP_MAKE_VERSION(major, minor, patch) ((major) * 100000 + (minor) * 1000 + (patch))
-
-#define ANTLRCPP_VERSION \
- ANTLRCPP_MAKE_VERSION(ANTLR4CPP_VERSION_MAJOR, ANTLR4CPP_VERSION_MINOR, ANTLR4CPP_VERSION_PATCH)
-
-#define ANTLRCPP_VERSION_STRING \
- ANTLR4CPP_STRINGIFY(ANTLR4CPP_VERSION_MAJOR) "." \
- ANTLR4CPP_STRINGIFY(ANTLR4CPP_VERSION_MINOR) "." \
- ANTLR4CPP_STRINGIFY(ANTLR4CPP_VERSION_PATCH)
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Vocabulary.cpp b/contrib/libs/antlr4_cpp_runtime/src/Vocabulary.cpp
deleted file mode 100644
index 0f783d5d79..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Vocabulary.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-
-#include "Vocabulary.h"
-
-using namespace antlr4::dfa;
-
-const Vocabulary Vocabulary::EMPTY_VOCABULARY;
-
-Vocabulary::Vocabulary(std::vector<std::string> literalNames, std::vector<std::string> symbolicNames)
-: Vocabulary(std::move(literalNames), std::move(symbolicNames), {}) {
-}
-
-Vocabulary::Vocabulary(std::vector<std::string> literalNames,
- std::vector<std::string> symbolicNames, std::vector<std::string> displayNames)
- : _literalNames(std::move(literalNames)), _symbolicNames(std::move(symbolicNames)), _displayNames(std::move(displayNames)),
- _maxTokenType(std::max(_displayNames.size(), std::max(_literalNames.size(), _symbolicNames.size())) - 1) {
- // See note here on -1 part: https://github.com/antlr/antlr4/pull/1146
-}
-
-std::string_view Vocabulary::getLiteralName(size_t tokenType) const {
- if (tokenType < _literalNames.size()) {
- return _literalNames[tokenType];
- }
-
- return "";
-}
-
-std::string_view Vocabulary::getSymbolicName(size_t tokenType) const {
- if (tokenType == Token::EOF) {
- return "EOF";
- }
-
- if (tokenType < _symbolicNames.size()) {
- return _symbolicNames[tokenType];
- }
-
- return "";
-}
-
-std::string Vocabulary::getDisplayName(size_t tokenType) const {
- if (tokenType < _displayNames.size()) {
- std::string_view displayName = _displayNames[tokenType];
- if (!displayName.empty()) {
- return std::string(displayName);
- }
- }
-
- std::string_view literalName = getLiteralName(tokenType);
- if (!literalName.empty()) {
- return std::string(literalName);
- }
-
- std::string_view symbolicName = getSymbolicName(tokenType);
- if (!symbolicName.empty()) {
- return std::string(symbolicName);
- }
-
- return std::to_string(tokenType);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h b/contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h
deleted file mode 100644
index af5b243880..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/Vocabulary.h
+++ /dev/null
@@ -1,177 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace dfa {
-
- /// This class provides a default implementation of the <seealso cref="Vocabulary"/>
- /// interface.
- class ANTLR4CPP_PUBLIC Vocabulary final {
- public:
- /// Gets an empty <seealso cref="Vocabulary"/> instance.
- ///
- /// <para>
- /// No literal or symbol names are assigned to token types, so
- /// <seealso cref="#getDisplayName(int)"/> returns the numeric value for all tokens
- /// except <seealso cref="Token#EOF"/>.</para>
- [[deprecated("Use the default constructor of Vocabulary instead.")]] static const Vocabulary EMPTY_VOCABULARY;
-
- Vocabulary() {}
-
- Vocabulary(const Vocabulary&) = default;
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="Vocabulary"/> from the specified
- /// literal and symbolic token names.
- /// </summary>
- /// <param name="literalNames"> The literal names assigned to tokens, or {@code null}
- /// if no literal names are assigned. </param>
- /// <param name="symbolicNames"> The symbolic names assigned to tokens, or
- /// {@code null} if no symbolic names are assigned.
- /// </param>
- /// <seealso cref= #getLiteralName(int) </seealso>
- /// <seealso cref= #getSymbolicName(int) </seealso>
- Vocabulary(std::vector<std::string> literalNames, std::vector<std::string> symbolicNames);
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="Vocabulary"/> from the specified
- /// literal, symbolic, and display token names.
- /// </summary>
- /// <param name="literalNames"> The literal names assigned to tokens, or {@code null}
- /// if no literal names are assigned. </param>
- /// <param name="symbolicNames"> The symbolic names assigned to tokens, or
- /// {@code null} if no symbolic names are assigned. </param>
- /// <param name="displayNames"> The display names assigned to tokens, or {@code null}
- /// to use the values in {@code literalNames} and {@code symbolicNames} as
- /// the source of display names, as described in
- /// <seealso cref="#getDisplayName(int)"/>.
- /// </param>
- /// <seealso cref= #getLiteralName(int) </seealso>
- /// <seealso cref= #getSymbolicName(int) </seealso>
- /// <seealso cref= #getDisplayName(int) </seealso>
- Vocabulary(std::vector<std::string> literalNames, std::vector<std::string> symbolicNames,
- std::vector<std::string> displayNames);
-
- /// <summary>
- /// Returns the highest token type value. It can be used to iterate from
- /// zero to that number, inclusively, thus querying all stored entries. </summary>
- /// <returns> the highest token type value </returns>
- constexpr size_t getMaxTokenType() const { return _maxTokenType; }
-
- /// <summary>
- /// Gets the string literal associated with a token type. The string returned
- /// by this method, when not {@code null}, can be used unaltered in a parser
- /// grammar to represent this token type.
- ///
- /// <para>The following table shows examples of lexer rules and the literal
- /// names assigned to the corresponding token types.</para>
- ///
- /// <table>
- /// <tr>
- /// <th>Rule</th>
- /// <th>Literal Name</th>
- /// <th>Java String Literal</th>
- /// </tr>
- /// <tr>
- /// <td>{@code THIS : 'this';}</td>
- /// <td>{@code 'this'}</td>
- /// <td>{@code "'this'"}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code SQUOTE : '\'';}</td>
- /// <td>{@code '\''}</td>
- /// <td>{@code "'\\''"}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code ID : [A-Z]+;}</td>
- /// <td>n/a</td>
- /// <td>{@code null}</td>
- /// </tr>
- /// </table>
- /// </summary>
- /// <param name="tokenType"> The token type.
- /// </param>
- /// <returns> The string literal associated with the specified token type, or
- /// {@code null} if no string literal is associated with the type. </returns>
- std::string_view getLiteralName(size_t tokenType) const;
-
- /// <summary>
- /// Gets the symbolic name associated with a token type. The string returned
- /// by this method, when not {@code null}, can be used unaltered in a parser
- /// grammar to represent this token type.
- ///
- /// <para>This method supports token types defined by any of the following
- /// methods:</para>
- ///
- /// <ul>
- /// <li>Tokens created by lexer rules.</li>
- /// <li>Tokens defined in a <code>tokens{}</code> block in a lexer or parser
- /// grammar.</li>
- /// <li>The implicitly defined {@code EOF} token, which has the token type
- /// <seealso cref="Token#EOF"/>.</li>
- /// </ul>
- ///
- /// <para>The following table shows examples of lexer rules and the literal
- /// names assigned to the corresponding token types.</para>
- ///
- /// <table>
- /// <tr>
- /// <th>Rule</th>
- /// <th>Symbolic Name</th>
- /// </tr>
- /// <tr>
- /// <td>{@code THIS : 'this';}</td>
- /// <td>{@code THIS}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code SQUOTE : '\'';}</td>
- /// <td>{@code SQUOTE}</td>
- /// </tr>
- /// <tr>
- /// <td>{@code ID : [A-Z]+;}</td>
- /// <td>{@code ID}</td>
- /// </tr>
- /// </table>
- /// </summary>
- /// <param name="tokenType"> The token type.
- /// </param>
- /// <returns> The symbolic name associated with the specified token type, or
- /// {@code null} if no symbolic name is associated with the type. </returns>
- std::string_view getSymbolicName(size_t tokenType) const;
-
- /// <summary>
- /// Gets the display name of a token type.
- ///
- /// <para>ANTLR provides a default implementation of this method, but
- /// applications are free to override the behavior in any manner which makes
- /// sense for the application. The default implementation returns the first
- /// result from the following list which produces a non-{@code null}
- /// result.</para>
- ///
- /// <ol>
- /// <li>The result of <seealso cref="#getLiteralName"/></li>
- /// <li>The result of <seealso cref="#getSymbolicName"/></li>
- /// <li>The result of <seealso cref="Integer#toString"/></li>
- /// </ol>
- /// </summary>
- /// <param name="tokenType"> The token type.
- /// </param>
- /// <returns> The display name of the token type, for use in error reporting or
- /// other user-visible messages which reference specific token types. </returns>
- std::string getDisplayName(size_t tokenType) const;
-
- private:
- std::vector<std::string> const _literalNames;
- std::vector<std::string> const _symbolicNames;
- std::vector<std::string> const _displayNames;
- const size_t _maxTokenType = 0;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/WritableToken.cpp b/contrib/libs/antlr4_cpp_runtime/src/WritableToken.cpp
deleted file mode 100644
index a30cd96f19..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/WritableToken.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "WritableToken.h"
-
-antlr4::WritableToken::~WritableToken() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/WritableToken.h b/contrib/libs/antlr4_cpp_runtime/src/WritableToken.h
deleted file mode 100644
index 28856f25b9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/WritableToken.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-
-namespace antlr4 {
-
- class ANTLR4CPP_PUBLIC WritableToken : public Token {
- public:
- virtual ~WritableToken();
- virtual void setText(const std::string &text) = 0;
- virtual void setType(size_t ttype) = 0;
- virtual void setLine(size_t line) = 0;
- virtual void setCharPositionInLine(size_t pos) = 0;
- virtual void setChannel(size_t channel) = 0;
- virtual void setTokenIndex(size_t index) = 0;
- };
-
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/antlr4-common.h b/contrib/libs/antlr4_cpp_runtime/src/antlr4-common.h
deleted file mode 100644
index d7f9a65fa1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/antlr4-common.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <algorithm>
-#include <any>
-#include <atomic>
-#include <bitset>
-#include <cassert>
-#include <climits>
-#include <cstddef>
-#include <cstdint>
-#include <cstdlib>
-#include <exception>
-#include <fstream>
-#include <iostream>
-#include <iterator>
-#include <limits>
-#include <map>
-#include <memory>
-#include <set>
-#include <sstream>
-#include <stack>
-#include <string>
-#include <string_view>
-#include <typeinfo>
-#include <type_traits>
-#include <unordered_map>
-#include <unordered_set>
-#include <utility>
-#include <vector>
-
-// Defines for the Guid class and other platform dependent stuff.
-#ifdef _WIN32
- #ifdef _MSC_VER
- #pragma warning (disable: 4250) // Class inherits by dominance.
- #pragma warning (disable: 4512) // assignment operator could not be generated
-
- #if _MSC_VER < 1900
- // Before VS 2015 code like "while (true)" will create a (useless) warning in level 4.
- #pragma warning (disable: 4127) // conditional expression is constant
- #endif
- #endif
-
- #ifdef _WIN64
- typedef __int64 ssize_t;
- #else
- typedef __int32 ssize_t;
- #endif
-
- #ifdef ANTLR4CPP_EXPORTS
- #define ANTLR4CPP_PUBLIC __declspec(dllexport)
- #else
- #ifdef ANTLR4CPP_STATIC
- #define ANTLR4CPP_PUBLIC
- #else
- #define ANTLR4CPP_PUBLIC __declspec(dllimport)
- #endif
- #endif
-
-#elif defined(__APPLE__)
- #if __GNUC__ >= 4
- #define ANTLR4CPP_PUBLIC __attribute__ ((visibility ("default")))
- #else
- #define ANTLR4CPP_PUBLIC
- #endif
-#else
- #if __GNUC__ >= 6
- #define ANTLR4CPP_PUBLIC __attribute__ ((visibility ("default")))
- #else
- #define ANTLR4CPP_PUBLIC
- #endif
-#endif
-
-#ifdef __has_builtin
-#define ANTLR4CPP_HAVE_BUILTIN(x) __has_builtin(x)
-#else
-#define ANTLR4CPP_HAVE_BUILTIN(x) 0
-#endif
-
-#define ANTLR4CPP_INTERNAL_STRINGIFY(x) #x
-#define ANTLR4CPP_STRINGIFY(x) ANTLR4CPP_INTERNAL_STRINGIFY(x)
-
-// We use everything from the C++ standard library by default.
-#ifndef ANTLR4CPP_USING_ABSEIL
-#define ANTLR4CPP_USING_ABSEIL 0
-#endif
-
-#include "support/Declarations.h"
-
-// We have to undefine this symbol as ANTLR will use this name for own members and even
-// generated functions. Because EOF is a global macro we cannot use e.g. a namespace scope to disambiguate.
-#ifdef EOF
-#undef EOF
-#endif
-
-#define INVALID_INDEX std::numeric_limits<size_t>::max()
-template<class T> using Ref = std::shared_ptr<T>;
diff --git a/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h b/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h
deleted file mode 100644
index 50b85aa4fc..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/antlr4-runtime.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-// This is the umbrella header for all ANTLR4 C++ runtime headers.
-
-#include "antlr4-common.h"
-
-#include "ANTLRErrorListener.h"
-#include "ANTLRErrorStrategy.h"
-#include "ANTLRFileStream.h"
-#include "ANTLRInputStream.h"
-#include "BailErrorStrategy.h"
-#include "BaseErrorListener.h"
-#include "BufferedTokenStream.h"
-#include "CharStream.h"
-#include "CommonToken.h"
-#include "CommonTokenFactory.h"
-#include "CommonTokenStream.h"
-#include "ConsoleErrorListener.h"
-#include "DefaultErrorStrategy.h"
-#include "DiagnosticErrorListener.h"
-#include "Exceptions.h"
-#include "FailedPredicateException.h"
-#include "InputMismatchException.h"
-#include "IntStream.h"
-#include "InterpreterRuleContext.h"
-#include "Lexer.h"
-#include "LexerInterpreter.h"
-#include "LexerNoViableAltException.h"
-#include "ListTokenSource.h"
-#include "NoViableAltException.h"
-#include "Parser.h"
-#include "ParserInterpreter.h"
-#include "ParserRuleContext.h"
-#include "ProxyErrorListener.h"
-#include "RecognitionException.h"
-#include "Recognizer.h"
-#include "RuleContext.h"
-#include "RuleContextWithAltNum.h"
-#include "RuntimeMetaData.h"
-#include "Token.h"
-#include "TokenFactory.h"
-#include "TokenSource.h"
-#include "TokenStream.h"
-#include "TokenStreamRewriter.h"
-#include "UnbufferedCharStream.h"
-#include "UnbufferedTokenStream.h"
-#include "Version.h"
-#include "Vocabulary.h"
-#include "Vocabulary.h"
-#include "WritableToken.h"
-#include "atn/ATN.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNDeserializationOptions.h"
-#include "atn/ATNDeserializer.h"
-#include "atn/ATNSimulator.h"
-#include "atn/ATNState.h"
-#include "atn/ATNType.h"
-#include "atn/ActionTransition.h"
-#include "atn/AmbiguityInfo.h"
-#include "atn/ArrayPredictionContext.h"
-#include "atn/AtomTransition.h"
-#include "atn/BasicBlockStartState.h"
-#include "atn/BasicState.h"
-#include "atn/BlockEndState.h"
-#include "atn/BlockStartState.h"
-#include "atn/ContextSensitivityInfo.h"
-#include "atn/DecisionEventInfo.h"
-#include "atn/DecisionInfo.h"
-#include "atn/DecisionState.h"
-#include "atn/EpsilonTransition.h"
-#include "atn/ErrorInfo.h"
-#include "atn/LL1Analyzer.h"
-#include "atn/LexerATNConfig.h"
-#include "atn/LexerATNSimulator.h"
-#include "atn/LexerAction.h"
-#include "atn/LexerActionExecutor.h"
-#include "atn/LexerActionType.h"
-#include "atn/LexerChannelAction.h"
-#include "atn/LexerCustomAction.h"
-#include "atn/LexerIndexedCustomAction.h"
-#include "atn/LexerModeAction.h"
-#include "atn/LexerMoreAction.h"
-#include "atn/LexerPopModeAction.h"
-#include "atn/LexerPushModeAction.h"
-#include "atn/LexerSkipAction.h"
-#include "atn/LexerTypeAction.h"
-#include "atn/LookaheadEventInfo.h"
-#include "atn/LoopEndState.h"
-#include "atn/NotSetTransition.h"
-#include "atn/OrderedATNConfigSet.h"
-#include "atn/ParseInfo.h"
-#include "atn/ParserATNSimulator.h"
-#include "atn/ParserATNSimulatorOptions.h"
-#include "atn/PlusBlockStartState.h"
-#include "atn/PlusLoopbackState.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/PredicateEvalInfo.h"
-#include "atn/PredicateTransition.h"
-#include "atn/PredictionContext.h"
-#include "atn/PredictionContextCache.h"
-#include "atn/PredictionContextMergeCache.h"
-#include "atn/PredictionContextMergeCacheOptions.h"
-#include "atn/PredictionMode.h"
-#include "atn/ProfilingATNSimulator.h"
-#include "atn/RangeTransition.h"
-#include "atn/RuleStartState.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/SemanticContext.h"
-#include "atn/SerializedATNView.h"
-#include "atn/SetTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/StarBlockStartState.h"
-#include "atn/StarLoopEntryState.h"
-#include "atn/StarLoopbackState.h"
-#include "atn/TokensStartState.h"
-#include "atn/Transition.h"
-#include "atn/WildcardTransition.h"
-#include "dfa/DFA.h"
-#include "dfa/DFASerializer.h"
-#include "dfa/DFAState.h"
-#include "dfa/LexerDFASerializer.h"
-#include "misc/InterpreterDataReader.h"
-#include "misc/Interval.h"
-#include "misc/IntervalSet.h"
-#include "misc/MurmurHash.h"
-#include "misc/Predicate.h"
-#include "support/Any.h"
-#include "support/Arrays.h"
-#include "support/BitSet.h"
-#include "support/Casts.h"
-#include "support/CPPUtils.h"
-#include "tree/AbstractParseTreeVisitor.h"
-#include "tree/ErrorNode.h"
-#include "tree/ErrorNodeImpl.h"
-#include "tree/ParseTree.h"
-#include "tree/ParseTreeListener.h"
-#include "tree/ParseTreeProperty.h"
-#include "tree/ParseTreeVisitor.h"
-#include "tree/ParseTreeWalker.h"
-#include "tree/TerminalNode.h"
-#include "tree/TerminalNodeImpl.h"
-#include "tree/Trees.h"
-#include "tree/pattern/Chunk.h"
-#include "tree/pattern/ParseTreeMatch.h"
-#include "tree/pattern/ParseTreePattern.h"
-#include "tree/pattern/ParseTreePatternMatcher.h"
-#include "tree/pattern/RuleTagToken.h"
-#include "tree/pattern/TagChunk.h"
-#include "tree/pattern/TextChunk.h"
-#include "tree/pattern/TokenTagToken.h"
-#include "tree/xpath/XPath.h"
-#include "tree/xpath/XPathElement.h"
-#include "tree/xpath/XPathLexer.h"
-#include "tree/xpath/XPathLexerErrorListener.h"
-#include "tree/xpath/XPathRuleAnywhereElement.h"
-#include "tree/xpath/XPathRuleElement.h"
-#include "tree/xpath/XPathTokenAnywhereElement.h"
-#include "tree/xpath/XPathTokenElement.h"
-#include "tree/xpath/XPathWildcardAnywhereElement.h"
-#include "tree/xpath/XPathWildcardElement.h"
-#include "internal/Synchronization.h"
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATN.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATN.cpp
deleted file mode 100644
index 339515cc9c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATN.cpp
+++ /dev/null
@@ -1,159 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/LL1Analyzer.h"
-#include "Token.h"
-#include "atn/RuleTransition.h"
-#include "misc/IntervalSet.h"
-#include "RuleContext.h"
-#include "atn/DecisionState.h"
-#include "Recognizer.h"
-#include "atn/ATNType.h"
-#include "Exceptions.h"
-#include "support/CPPUtils.h"
-
-#include "atn/ATN.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::internal;
-using namespace antlrcpp;
-
-ATN::ATN() : ATN(ATNType::LEXER, 0) {}
-
-ATN::ATN(ATNType grammarType_, size_t maxTokenType_) : grammarType(grammarType_), maxTokenType(maxTokenType_) {}
-
-ATN::~ATN() {
- for (ATNState *state : states) {
- delete state;
- }
-}
-
-misc::IntervalSet ATN::nextTokens(ATNState *s, RuleContext *ctx) const {
- LL1Analyzer analyzer(*this);
- return analyzer.LOOK(s, ctx);
-
-}
-
-misc::IntervalSet const& ATN::nextTokens(ATNState *s) const {
- if (!s->_nextTokenUpdated) {
- UniqueLock<Mutex> lock(_mutex);
- if (!s->_nextTokenUpdated) {
- s->_nextTokenWithinRule = nextTokens(s, nullptr);
- s->_nextTokenUpdated = true;
- }
- }
- return s->_nextTokenWithinRule;
-}
-
-void ATN::addState(ATNState *state) {
- if (state != nullptr) {
- //state->atn = this;
- state->stateNumber = static_cast<int>(states.size());
- }
-
- states.push_back(state);
-}
-
-void ATN::removeState(ATNState *state) {
- delete states.at(state->stateNumber);// just free mem, don't shift states in list
- states.at(state->stateNumber) = nullptr;
-}
-
-int ATN::defineDecisionState(DecisionState *s) {
- decisionToState.push_back(s);
- s->decision = static_cast<int>(decisionToState.size() - 1);
- return s->decision;
-}
-
-DecisionState *ATN::getDecisionState(size_t decision) const {
- if (!decisionToState.empty()) {
- return decisionToState[decision];
- }
- return nullptr;
-}
-
-size_t ATN::getNumberOfDecisions() const {
- return decisionToState.size();
-}
-
-misc::IntervalSet ATN::getExpectedTokens(size_t stateNumber, RuleContext *context) const {
- if (stateNumber == ATNState::INVALID_STATE_NUMBER || stateNumber >= states.size()) {
- throw IllegalArgumentException("Invalid state number.");
- }
-
- RuleContext *ctx = context;
- ATNState *s = states.at(stateNumber);
- misc::IntervalSet following = nextTokens(s);
- if (!following.contains(Token::EPSILON)) {
- return following;
- }
-
- misc::IntervalSet expected;
- expected.addAll(following);
- expected.remove(Token::EPSILON);
- while (ctx && ctx->invokingState != ATNState::INVALID_STATE_NUMBER && following.contains(Token::EPSILON)) {
- ATNState *invokingState = states.at(ctx->invokingState);
- const RuleTransition *rt = static_cast<const RuleTransition*>(invokingState->transitions[0].get());
- following = nextTokens(rt->followState);
- expected.addAll(following);
- expected.remove(Token::EPSILON);
-
- if (ctx->parent == nullptr) {
- break;
- }
- ctx = static_cast<RuleContext *>(ctx->parent);
- }
-
- if (following.contains(Token::EPSILON)) {
- expected.add(Token::EOF);
- }
-
- return expected;
-}
-
-std::string ATN::toString() const {
- std::stringstream ss;
- std::string type;
- switch (grammarType) {
- case ATNType::LEXER:
- type = "LEXER ";
- break;
-
- case ATNType::PARSER:
- type = "PARSER ";
- break;
-
- default:
- break;
- }
- ss << "(" << type << "ATN " << std::hex << this << std::dec << ") maxTokenType: " << maxTokenType << std::endl;
- ss << "states (" << states.size() << ") {" << std::endl;
-
- size_t index = 0;
- for (auto *state : states) {
- if (state == nullptr) {
- ss << " " << index++ << ": nul" << std::endl;
- } else {
- std::string text = state->toString();
- ss << " " << index++ << ": " << indent(text, " ", false) << std::endl;
- }
- }
-
- index = 0;
- for (auto *state : decisionToState) {
- if (state == nullptr) {
- ss << " " << index++ << ": nul" << std::endl;
- } else {
- std::string text = state->toString();
- ss << " " << index++ << ": " << indent(text, " ", false) << std::endl;
- }
- }
-
- ss << "}";
-
- return ss.str();
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATN.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATN.h
deleted file mode 100644
index f12476358a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATN.h
+++ /dev/null
@@ -1,133 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RuleContext.h"
-#include "internal/Synchronization.h"
-
-// GCC generates a warning when forward-declaring ATN if ATN has already been
-// declared due to the attributes added by ANTLR4CPP_PUBLIC.
-// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39159
-// Add constant that can be checked so forward-declarations can be omitted.
-#define ANTLR4CPP_ATN_DECLARED
-
-namespace antlr4 {
-namespace atn {
-
- class LexerATNSimulator;
- class ParserATNSimulator;
-
- class ANTLR4CPP_PUBLIC ATN {
- public:
- static constexpr size_t INVALID_ALT_NUMBER = 0;
-
- /// Used for runtime deserialization of ATNs from strings.
- ATN();
-
- ATN(ATNType grammarType, size_t maxTokenType);
-
- ATN(const ATN&) = delete;
-
- ATN(ATN&&) = delete;
-
- ~ATN();
-
- ATN& operator=(const ATN&) = delete;
-
- ATN& operator=(ATN&&) = delete;
-
- std::vector<ATNState *> states;
-
- /// Each subrule/rule is a decision point and we must track them so we
- /// can go back later and build DFA predictors for them. This includes
- /// all the rules, subrules, optional blocks, ()+, ()* etc...
- std::vector<DecisionState *> decisionToState;
-
- /// Maps from rule index to starting state number.
- std::vector<RuleStartState *> ruleToStartState;
-
- /// Maps from rule index to stop state number.
- std::vector<RuleStopState *> ruleToStopState;
-
- /// The type of the ATN.
- ATNType grammarType;
-
- /// The maximum value for any symbol recognized by a transition in the ATN.
- size_t maxTokenType;
-
- /// <summary>
- /// For lexer ATNs, this maps the rule index to the resulting token type.
- /// For parser ATNs, this maps the rule index to the generated bypass token
- /// type if the
- /// <seealso cref="ATNDeserializationOptions#isGenerateRuleBypassTransitions"/>
- /// deserialization option was specified; otherwise, this is {@code null}.
- /// </summary>
- std::vector<size_t> ruleToTokenType;
-
- /// For lexer ATNs, this is an array of {@link LexerAction} objects which may
- /// be referenced by action transitions in the ATN.
- std::vector<Ref<const LexerAction>> lexerActions;
-
- std::vector<TokensStartState *> modeToStartState;
-
- /// <summary>
- /// Compute the set of valid tokens that can occur starting in state {@code s}.
- /// If {@code ctx} is null, the set of tokens will not include what can follow
- /// the rule surrounding {@code s}. In other words, the set will be
- /// restricted to tokens reachable staying within {@code s}'s rule.
- /// </summary>
- misc::IntervalSet nextTokens(ATNState *s, RuleContext *ctx) const;
-
- /// <summary>
- /// Compute the set of valid tokens that can occur starting in {@code s} and
- /// staying in same rule. <seealso cref="Token#EPSILON"/> is in set if we reach end of
- /// rule.
- /// </summary>
- misc::IntervalSet const& nextTokens(ATNState *s) const;
-
- void addState(ATNState *state);
-
- void removeState(ATNState *state);
-
- int defineDecisionState(DecisionState *s);
-
- DecisionState *getDecisionState(size_t decision) const;
-
- size_t getNumberOfDecisions() const;
-
- /// <summary>
- /// Computes the set of input symbols which could follow ATN state number
- /// {@code stateNumber} in the specified full {@code context}. This method
- /// considers the complete parser context, but does not evaluate semantic
- /// predicates (i.e. all predicates encountered during the calculation are
- /// assumed true). If a path in the ATN exists from the starting state to the
- /// <seealso cref="RuleStopState"/> of the outermost context without matching any
- /// symbols, <seealso cref="Token#EOF"/> is added to the returned set.
- /// <p/>
- /// If {@code context} is {@code null}, it is treated as
- /// <seealso cref="ParserRuleContext#EMPTY"/>.
- /// </summary>
- /// <param name="stateNumber"> the ATN state number </param>
- /// <param name="context"> the full parse context </param>
- /// <returns> The set of potentially valid input symbols which could follow the
- /// specified state in the specified context. </returns>
- /// <exception cref="IllegalArgumentException"> if the ATN does not contain a state with
- /// number {@code stateNumber} </exception>
- misc::IntervalSet getExpectedTokens(size_t stateNumber, RuleContext *context) const;
-
- std::string toString() const;
-
- private:
- friend class LexerATNSimulator;
- friend class ParserATNSimulator;
-
- mutable internal::Mutex _mutex;
- mutable internal::SharedMutex _stateMutex;
- mutable internal::SharedMutex _edgeMutex;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.cpp
deleted file mode 100644
index be4d5bfa8c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "atn/PredictionContext.h"
-#include "SemanticContext.h"
-
-#include "atn/ATNConfig.h"
-
-using namespace antlr4::atn;
-
-namespace {
-
-/**
- * This field stores the bit mask for implementing the
- * {@link #isPrecedenceFilterSuppressed} property as a bit within the
- * existing {@link #reachesIntoOuterContext} field.
- */
-inline constexpr size_t SUPPRESS_PRECEDENCE_FILTER = 0x40000000;
-
-}
-
-ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref<const PredictionContext> context)
- : ATNConfig(state, alt, std::move(context), 0, SemanticContext::Empty::Instance) {}
-
-ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref<const PredictionContext> context, Ref<const SemanticContext> semanticContext)
- : ATNConfig(state, alt, std::move(context), 0, std::move(semanticContext)) {}
-
-ATNConfig::ATNConfig(ATNConfig const& other, Ref<const SemanticContext> semanticContext)
- : ATNConfig(other.state, other.alt, other.context, other.reachesIntoOuterContext, std::move(semanticContext)) {}
-
-ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state)
- : ATNConfig(state, other.alt, other.context, other.reachesIntoOuterContext, other.semanticContext) {}
-
-ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state, Ref<const SemanticContext> semanticContext)
- : ATNConfig(state, other.alt, other.context, other.reachesIntoOuterContext, std::move(semanticContext)) {}
-
-ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state, Ref<const PredictionContext> context)
- : ATNConfig(state, other.alt, std::move(context), other.reachesIntoOuterContext, other.semanticContext) {}
-
-ATNConfig::ATNConfig(ATNConfig const& other, ATNState *state, Ref<const PredictionContext> context, Ref<const SemanticContext> semanticContext)
- : ATNConfig(state, other.alt, std::move(context), other.reachesIntoOuterContext, std::move(semanticContext)) {}
-
-ATNConfig::ATNConfig(ATNState *state, size_t alt, Ref<const PredictionContext> context, size_t reachesIntoOuterContext, Ref<const SemanticContext> semanticContext)
- : state(state), alt(alt), context(std::move(context)), reachesIntoOuterContext(reachesIntoOuterContext), semanticContext(std::move(semanticContext)) {}
-
-size_t ATNConfig::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize(7);
- hashCode = misc::MurmurHash::update(hashCode, state->stateNumber);
- hashCode = misc::MurmurHash::update(hashCode, alt);
- hashCode = misc::MurmurHash::update(hashCode, context);
- hashCode = misc::MurmurHash::update(hashCode, semanticContext);
- hashCode = misc::MurmurHash::finish(hashCode, 4);
- return hashCode;
-}
-
-size_t ATNConfig::getOuterContextDepth() const {
- return reachesIntoOuterContext & ~SUPPRESS_PRECEDENCE_FILTER;
-}
-
-bool ATNConfig::isPrecedenceFilterSuppressed() const {
- return (reachesIntoOuterContext & SUPPRESS_PRECEDENCE_FILTER) != 0;
-}
-
-void ATNConfig::setPrecedenceFilterSuppressed(bool value) {
- if (value) {
- reachesIntoOuterContext |= SUPPRESS_PRECEDENCE_FILTER;
- } else {
- reachesIntoOuterContext &= ~SUPPRESS_PRECEDENCE_FILTER;
- }
-}
-
-bool ATNConfig::operator==(const ATNConfig &other) const {
- return state->stateNumber == other.state->stateNumber && alt == other.alt &&
- ((context == other.context) || (*context == *other.context)) &&
- *semanticContext == *other.semanticContext &&
- isPrecedenceFilterSuppressed() == other.isPrecedenceFilterSuppressed();
-}
-
-std::string ATNConfig::toString() const {
- return toString(true);
-}
-
-std::string ATNConfig::toString(bool showAlt) const {
- std::stringstream ss;
- ss << "(";
-
- ss << state->toString();
- if (showAlt) {
- ss << "," << alt;
- }
- if (context) {
- ss << ",[" << context->toString() << "]";
- }
- if (semanticContext != nullptr && semanticContext != SemanticContext::Empty::Instance) {
- ss << ",[" << semanticContext->toString() << "]";
- }
- if (getOuterContextDepth() > 0) {
- ss << ",up=" << getOuterContextDepth();
- }
- ss << ")";
-
- return ss.str();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.h
deleted file mode 100644
index 1d2e7ae163..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfig.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cassert>
-
-#include "antlr4-common.h"
-#include "atn/SemanticContext.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// A tuple: (ATN state, predicted alt, syntactic, semantic context).
- /// The syntactic context is a graph-structured stack node whose
- /// path(s) to the root is the rule invocation(s)
- /// chain used to arrive at the state. The semantic context is
- /// the tree of semantic predicates encountered before reaching
- /// an ATN state.
- /// </summary>
- class ANTLR4CPP_PUBLIC ATNConfig {
- public:
- struct Hasher
- {
- size_t operator()(Ref<ATNConfig> const& k) const {
- return k->hashCode();
- }
-
- size_t operator()(ATNConfig const& k) const {
- return k.hashCode();
- }
- };
-
- struct Comparer {
- bool operator()(Ref<ATNConfig> const& lhs, Ref<ATNConfig> const& rhs) const {
- return (lhs == rhs) || (*lhs == *rhs);
- }
-
- bool operator()(ATNConfig const& lhs, ATNConfig const& rhs) const {
- return (&lhs == &rhs) || (lhs == rhs);
- }
- };
-
- using Set = std::unordered_set<Ref<ATNConfig>, Hasher, Comparer>;
-
- /// The ATN state associated with this configuration.
- ATNState *state = nullptr;
-
- /// What alt (or lexer rule) is predicted by this configuration.
- const size_t alt = 0;
-
- /// The stack of invoking states leading to the rule/states associated
- /// with this config. We track only those contexts pushed during
- /// execution of the ATN simulator.
- ///
- /// Can be shared between multiple ANTConfig instances.
- Ref<const PredictionContext> context;
-
- /**
- * We cannot execute predicates dependent upon local context unless
- * we know for sure we are in the correct context. Because there is
- * no way to do this efficiently, we simply cannot evaluate
- * dependent predicates unless we are in the rule that initially
- * invokes the ATN simulator.
- *
- * <p>
- * closure() tracks the depth of how far we dip into the outer context:
- * depth > 0. Note that it may not be totally accurate depth since I
- * don't ever decrement. TODO: make it a boolean then</p>
- *
- * <p>
- * For memory efficiency, the {@link #isPrecedenceFilterSuppressed} method
- * is also backed by this field. Since the field is publicly accessible, the
- * highest bit which would not cause the value to become negative is used to
- * store this field. This choice minimizes the risk that code which only
- * compares this value to 0 would be affected by the new purpose of the
- * flag. It also ensures the performance of the existing {@link ATNConfig}
- * constructors as well as certain operations like
- * {@link ATNConfigSet#add(ATNConfig, DoubleKeyMap)} method are
- * <em>completely</em> unaffected by the change.</p>
- */
- size_t reachesIntoOuterContext = 0;
-
- /// Can be shared between multiple ATNConfig instances.
- Ref<const SemanticContext> semanticContext;
-
- ATNConfig(ATNState *state, size_t alt, Ref<const PredictionContext> context);
- ATNConfig(ATNState *state, size_t alt, Ref<const PredictionContext> context, Ref<const SemanticContext> semanticContext);
-
- ATNConfig(ATNConfig const& other, Ref<const SemanticContext> semanticContext);
- ATNConfig(ATNConfig const& other, ATNState *state);
- ATNConfig(ATNConfig const& other, ATNState *state, Ref<const SemanticContext> semanticContext);
- ATNConfig(ATNConfig const& other, ATNState *state, Ref<const PredictionContext> context);
- ATNConfig(ATNConfig const& other, ATNState *state, Ref<const PredictionContext> context, Ref<const SemanticContext> semanticContext);
-
- ATNConfig(ATNConfig const&) = default;
-
- ATNConfig(ATNConfig&&) = default;
-
- virtual ~ATNConfig() = default;
-
- virtual size_t hashCode() const;
-
- /**
- * This method gets the value of the {@link #reachesIntoOuterContext} field
- * as it existed prior to the introduction of the
- * {@link #isPrecedenceFilterSuppressed} method.
- */
- size_t getOuterContextDepth() const;
- bool isPrecedenceFilterSuppressed() const;
- void setPrecedenceFilterSuppressed(bool value);
-
- /// An ATN configuration is equal to another if both have
- /// the same state, they predict the same alternative, and
- /// syntactic/semantic contexts are the same.
- bool operator==(const ATNConfig &other) const;
- bool operator!=(const ATNConfig &other) const;
-
- virtual std::string toString() const;
- std::string toString(bool showAlt) const;
-
- private:
- ATNConfig(ATNState *state, size_t alt, Ref<const PredictionContext> context, size_t reachesIntoOuterContext, Ref<const SemanticContext> semanticContext);
- };
-
-} // namespace atn
-} // namespace antlr4
-
-
-// Hash function for ATNConfig.
-
-namespace std {
- using antlr4::atn::ATNConfig;
-
- template <> struct hash<ATNConfig>
- {
- size_t operator() (const ATNConfig &x) const
- {
- return x.hashCode();
- }
- };
-
- template <> struct hash<std::vector<Ref<ATNConfig>>>
- {
- size_t operator() (const std::vector<Ref<ATNConfig>> &vector) const
- {
- std::size_t seed = 0;
- for (const auto &config : vector) {
- seed ^= config->hashCode() + 0x9e3779b9 + (seed << 6) + (seed >> 2);
- }
- return seed;
- }
- };
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.cpp
deleted file mode 100644
index 4ebdf8882b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.cpp
+++ /dev/null
@@ -1,232 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PredictionContext.h"
-#include "atn/ATNConfig.h"
-#include "atn/ATNSimulator.h"
-#include "Exceptions.h"
-#include "atn/SemanticContext.h"
-#include "support/Arrays.h"
-
-#include "atn/ATNConfigSet.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-namespace {
-
-}
-
-ATNConfigSet::ATNConfigSet() : ATNConfigSet(true) {}
-
-ATNConfigSet::ATNConfigSet(const ATNConfigSet &other)
- : fullCtx(other.fullCtx), _configLookup(other._configLookup.bucket_count(), ATNConfigHasher{this}, ATNConfigComparer{this}) {
- addAll(other);
- uniqueAlt = other.uniqueAlt;
- conflictingAlts = other.conflictingAlts;
- hasSemanticContext = other.hasSemanticContext;
- dipsIntoOuterContext = other.dipsIntoOuterContext;
-}
-
-ATNConfigSet::ATNConfigSet(bool fullCtx)
- : fullCtx(fullCtx), _configLookup(0, ATNConfigHasher{this}, ATNConfigComparer{this}) {}
-
-bool ATNConfigSet::add(const Ref<ATNConfig> &config) {
- return add(config, nullptr);
-}
-
-bool ATNConfigSet::add(const Ref<ATNConfig> &config, PredictionContextMergeCache *mergeCache) {
- assert(config);
-
- if (_readonly) {
- throw IllegalStateException("This set is readonly");
- }
- if (config->semanticContext != SemanticContext::Empty::Instance) {
- hasSemanticContext = true;
- }
- if (config->getOuterContextDepth() > 0) {
- dipsIntoOuterContext = true;
- }
-
- auto existing = _configLookup.find(config.get());
- if (existing == _configLookup.end()) {
- _configLookup.insert(config.get());
- _cachedHashCode = 0;
- configs.push_back(config); // track order here
-
- return true;
- }
-
- // a previous (s,i,pi,_), merge with it and save result
- bool rootIsWildcard = !fullCtx;
- Ref<const PredictionContext> merged = PredictionContext::merge((*existing)->context, config->context, rootIsWildcard, mergeCache);
- // no need to check for existing.context, config.context in cache
- // since only way to create new graphs is "call rule" and here. We
- // cache at both places.
- (*existing)->reachesIntoOuterContext = std::max((*existing)->reachesIntoOuterContext, config->reachesIntoOuterContext);
-
- // make sure to preserve the precedence filter suppression during the merge
- if (config->isPrecedenceFilterSuppressed()) {
- (*existing)->setPrecedenceFilterSuppressed(true);
- }
-
- (*existing)->context = std::move(merged); // replace context; no need to alt mapping
-
- return true;
-}
-
-bool ATNConfigSet::addAll(const ATNConfigSet &other) {
- for (const auto &c : other.configs) {
- add(c);
- }
- return false;
-}
-
-std::vector<ATNState*> ATNConfigSet::getStates() const {
- std::vector<ATNState*> states;
- states.reserve(configs.size());
- for (const auto &c : configs) {
- states.push_back(c->state);
- }
- return states;
-}
-
-/**
- * Gets the complete set of represented alternatives for the configuration
- * set.
- *
- * @return the set of represented alternatives in this configuration set
- *
- * @since 4.3
- */
-
-BitSet ATNConfigSet::getAlts() const {
- BitSet alts;
- for (const auto &config : configs) {
- alts.set(config->alt);
- }
- return alts;
-}
-
-std::vector<Ref<const SemanticContext>> ATNConfigSet::getPredicates() const {
- std::vector<Ref<const SemanticContext>> preds;
- preds.reserve(configs.size());
- for (const auto &c : configs) {
- if (c->semanticContext != SemanticContext::Empty::Instance) {
- preds.push_back(c->semanticContext);
- }
- }
- return preds;
-}
-
-const Ref<ATNConfig>& ATNConfigSet::get(size_t i) const {
- return configs[i];
-}
-
-void ATNConfigSet::optimizeConfigs(ATNSimulator *interpreter) {
- assert(interpreter);
-
- if (_readonly) {
- throw IllegalStateException("This set is readonly");
- }
- if (_configLookup.empty())
- return;
-
- for (const auto &config : configs) {
- config->context = interpreter->getCachedContext(config->context);
- }
-}
-
-bool ATNConfigSet::equals(const ATNConfigSet &other) const {
- if (&other == this) {
- return true;
- }
-
- if (configs.size() != other.configs.size())
- return false;
-
- if (fullCtx != other.fullCtx || uniqueAlt != other.uniqueAlt ||
- conflictingAlts != other.conflictingAlts || hasSemanticContext != other.hasSemanticContext ||
- dipsIntoOuterContext != other.dipsIntoOuterContext) // includes stack context
- return false;
-
- return Arrays::equals(configs, other.configs);
-}
-
-size_t ATNConfigSet::hashCode() const {
- size_t cachedHashCode = _cachedHashCode.load(std::memory_order_relaxed);
- if (!isReadonly() || cachedHashCode == 0) {
- cachedHashCode = 1;
- for (const auto &i : configs) {
- cachedHashCode = 31 * cachedHashCode + i->hashCode(); // Same as Java's list hashCode impl.
- }
- _cachedHashCode.store(cachedHashCode, std::memory_order_relaxed);
- }
- return cachedHashCode;
-}
-
-size_t ATNConfigSet::size() const {
- return configs.size();
-}
-
-bool ATNConfigSet::isEmpty() const {
- return configs.empty();
-}
-
-void ATNConfigSet::clear() {
- if (_readonly) {
- throw IllegalStateException("This set is readonly");
- }
- configs.clear();
- _cachedHashCode = 0;
- _configLookup.clear();
-}
-
-bool ATNConfigSet::isReadonly() const {
- return _readonly;
-}
-
-void ATNConfigSet::setReadonly(bool readonly) {
- _readonly = readonly;
- LookupContainer(0, ATNConfigHasher{this}, ATNConfigComparer{this}).swap(_configLookup);
-}
-
-std::string ATNConfigSet::toString() const {
- std::stringstream ss;
- ss << "[";
- for (size_t i = 0; i < configs.size(); i++) {
- ss << configs[i]->toString();
- }
- ss << "]";
-
- if (hasSemanticContext) {
- ss << ",hasSemanticContext = " << hasSemanticContext;
- }
- if (uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- ss << ",uniqueAlt = " << uniqueAlt;
- }
-
- if (conflictingAlts.size() > 0) {
- ss << ",conflictingAlts = ";
- ss << conflictingAlts.toString();
- }
-
- if (dipsIntoOuterContext) {
- ss << ", dipsIntoOuterContext";
- }
- return ss.str();
-}
-
-size_t ATNConfigSet::hashCode(const ATNConfig &other) const {
- size_t hashCode = 7;
- hashCode = 31 * hashCode + other.state->stateNumber;
- hashCode = 31 * hashCode + other.alt;
- hashCode = 31 * hashCode + other.semanticContext->hashCode();
- return hashCode;
-}
-
-bool ATNConfigSet::equals(const ATNConfig &lhs, const ATNConfig &rhs) const {
- return lhs.state->stateNumber == rhs.state->stateNumber && lhs.alt == rhs.alt && *lhs.semanticContext == *rhs.semanticContext;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.h
deleted file mode 100644
index d147f183a0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNConfigSet.h
+++ /dev/null
@@ -1,157 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cassert>
-
-#include "support/BitSet.h"
-#include "atn/PredictionContext.h"
-#include "atn/ATNConfig.h"
-#include "FlatHashSet.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Specialized set that can track info about the set, with support for combining similar configurations using a
- /// graph-structured stack.
- class ANTLR4CPP_PUBLIC ATNConfigSet {
- public:
- /// Track the elements as they are added to the set; supports get(i)
- std::vector<Ref<ATNConfig>> configs;
-
- // TODO: these fields make me pretty uncomfortable but nice to pack up info together, saves recomputation
- // TODO: can we track conflicts as they are added to save scanning configs later?
- size_t uniqueAlt = 0;
-
- /** Currently this is only used when we detect SLL conflict; this does
- * not necessarily represent the ambiguous alternatives. In fact,
- * I should also point out that this seems to include predicated alternatives
- * that have predicates that evaluate to false. Computed in computeTargetState().
- */
- antlrcpp::BitSet conflictingAlts;
-
- // Used in parser and lexer. In lexer, it indicates we hit a pred
- // while computing a closure operation. Don't make a DFA state from this.
- bool hasSemanticContext = false;
- bool dipsIntoOuterContext = false;
-
- /// Indicates that this configuration set is part of a full context
- /// LL prediction. It will be used to determine how to merge $. With SLL
- /// it's a wildcard whereas it is not for LL context merge.
- const bool fullCtx = true;
-
- ATNConfigSet();
-
- ATNConfigSet(const ATNConfigSet &other);
-
- ATNConfigSet(ATNConfigSet&&) = delete;
-
- explicit ATNConfigSet(bool fullCtx);
-
- virtual ~ATNConfigSet() = default;
-
- bool add(const Ref<ATNConfig> &config);
-
- /// <summary>
- /// Adding a new config means merging contexts with existing configs for
- /// {@code (s, i, pi, _)}, where {@code s} is the
- /// <seealso cref="ATNConfig#state"/>, {@code i} is the <seealso cref="ATNConfig#alt"/>, and
- /// {@code pi} is the <seealso cref="ATNConfig#semanticContext"/>. We use
- /// {@code (s,i,pi)} as key.
- /// <p/>
- /// This method updates <seealso cref="#dipsIntoOuterContext"/> and
- /// <seealso cref="#hasSemanticContext"/> when necessary.
- /// </summary>
- bool add(const Ref<ATNConfig> &config, PredictionContextMergeCache *mergeCache);
-
- bool addAll(const ATNConfigSet &other);
-
- std::vector<ATNState*> getStates() const;
-
- /**
- * Gets the complete set of represented alternatives for the configuration
- * set.
- *
- * @return the set of represented alternatives in this configuration set
- *
- * @since 4.3
- */
- antlrcpp::BitSet getAlts() const;
- std::vector<Ref<const SemanticContext>> getPredicates() const;
-
- const Ref<ATNConfig>& get(size_t i) const;
-
- void optimizeConfigs(ATNSimulator *interpreter);
-
- size_t size() const;
- bool isEmpty() const;
- void clear();
- bool isReadonly() const;
- void setReadonly(bool readonly);
-
- virtual size_t hashCode() const;
-
- virtual bool equals(const ATNConfigSet &other) const;
-
- virtual std::string toString() const;
-
- private:
- struct ATNConfigHasher final {
- const ATNConfigSet* atnConfigSet;
-
- size_t operator()(const ATNConfig *other) const {
- assert(other != nullptr);
- return atnConfigSet->hashCode(*other);
- }
- };
-
- struct ATNConfigComparer final {
- const ATNConfigSet* atnConfigSet;
-
- bool operator()(const ATNConfig *lhs, const ATNConfig *rhs) const {
- assert(lhs != nullptr);
- assert(rhs != nullptr);
- return atnConfigSet->equals(*lhs, *rhs);
- }
- };
-
- mutable std::atomic<size_t> _cachedHashCode = 0;
-
- /// Indicates that the set of configurations is read-only. Do not
- /// allow any code to manipulate the set; DFA states will point at
- /// the sets and they must not change. This does not protect the other
- /// fields; in particular, conflictingAlts is set after
- /// we've made this readonly.
- bool _readonly = false;
-
- virtual size_t hashCode(const ATNConfig &atnConfig) const;
-
- virtual bool equals(const ATNConfig &lhs, const ATNConfig &rhs) const;
-
- using LookupContainer = FlatHashSet<ATNConfig*, ATNConfigHasher, ATNConfigComparer>;
-
- /// All configs but hashed by (s, i, _, pi) not including context. Wiped out
- /// when we go readonly as this set becomes a DFA state.
- LookupContainer _configLookup;
- };
-
- inline bool operator==(const ATNConfigSet &lhs, const ATNConfigSet &rhs) { return lhs.equals(rhs); }
-
- inline bool operator!=(const ATNConfigSet &lhs, const ATNConfigSet &rhs) { return !operator==(lhs, rhs); }
-
-} // namespace atn
-} // namespace antlr4
-
-namespace std {
-
-template <>
-struct hash<::antlr4::atn::ATNConfigSet> {
- size_t operator()(const ::antlr4::atn::ATNConfigSet &atnConfigSet) const {
- return atnConfigSet.hashCode();
- }
-};
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.cpp
deleted file mode 100644
index e0a7cb2b27..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNDeserializationOptions.h"
-#include "Exceptions.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-ATNDeserializationOptions::ATNDeserializationOptions(ATNDeserializationOptions *options)
- : _readOnly(false), _verifyATN(options->_verifyATN),
- _generateRuleBypassTransitions(options->_generateRuleBypassTransitions) {}
-
-const ATNDeserializationOptions& ATNDeserializationOptions::getDefaultOptions() {
- static const ATNDeserializationOptions* const defaultOptions = new ATNDeserializationOptions();
- return *defaultOptions;
-}
-
-void ATNDeserializationOptions::makeReadOnly() {
- _readOnly = true;
-}
-
-void ATNDeserializationOptions::setVerifyATN(bool verify) {
- throwIfReadOnly();
- _verifyATN = verify;
-}
-
-void ATNDeserializationOptions::setGenerateRuleBypassTransitions(bool generate) {
- throwIfReadOnly();
- _generateRuleBypassTransitions = generate;
-}
-
-void ATNDeserializationOptions::throwIfReadOnly() const {
- if (isReadOnly()) {
- throw IllegalStateException("ATNDeserializationOptions is read only.");
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.h
deleted file mode 100644
index 595f918649..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializationOptions.h
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
-class ANTLR4CPP_PUBLIC ATNDeserializationOptions final {
-public:
- ATNDeserializationOptions()
- : _readOnly(false), _verifyATN(true), _generateRuleBypassTransitions(false) {}
-
- // TODO: Is this useful? If so we should mark it as explicit, otherwise remove it.
- ATNDeserializationOptions(ATNDeserializationOptions *options);
-
- ATNDeserializationOptions(const ATNDeserializationOptions&) = default;
-
- ATNDeserializationOptions& operator=(const ATNDeserializationOptions&) = default;
-
- static const ATNDeserializationOptions& getDefaultOptions();
-
- bool isReadOnly() const { return _readOnly; }
-
- void makeReadOnly();
-
- bool isVerifyATN() const { return _verifyATN; }
-
- void setVerifyATN(bool verify);
-
- bool isGenerateRuleBypassTransitions() const { return _generateRuleBypassTransitions; }
-
- void setGenerateRuleBypassTransitions(bool generate);
-
-private:
- void throwIfReadOnly() const;
-
- bool _readOnly;
- bool _verifyATN;
- bool _generateRuleBypassTransitions;
-};
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.cpp
deleted file mode 100644
index 2da3c32357..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.cpp
+++ /dev/null
@@ -1,628 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNDeserializationOptions.h"
-
-#include "atn/ATNType.h"
-#include "atn/ATNState.h"
-#include "atn/ATN.h"
-
-#include "atn/LoopEndState.h"
-#include "atn/DecisionState.h"
-#include "atn/RuleStartState.h"
-#include "atn/RuleStopState.h"
-#include "atn/TokensStartState.h"
-#include "atn/RuleTransition.h"
-#include "atn/EpsilonTransition.h"
-#include "atn/PlusLoopbackState.h"
-#include "atn/PlusBlockStartState.h"
-#include "atn/StarLoopbackState.h"
-#include "atn/BasicBlockStartState.h"
-#include "atn/BasicState.h"
-#include "atn/BlockEndState.h"
-#include "atn/StarLoopEntryState.h"
-
-#include "atn/AtomTransition.h"
-#include "atn/StarBlockStartState.h"
-#include "atn/RangeTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/ActionTransition.h"
-#include "atn/SetTransition.h"
-#include "atn/NotSetTransition.h"
-#include "atn/WildcardTransition.h"
-#include "atn/TransitionType.h"
-#include "Token.h"
-
-#include "misc/IntervalSet.h"
-#include "Exceptions.h"
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "atn/LexerCustomAction.h"
-#include "atn/LexerChannelAction.h"
-#include "atn/LexerModeAction.h"
-#include "atn/LexerMoreAction.h"
-#include "atn/LexerPopModeAction.h"
-#include "atn/LexerPushModeAction.h"
-#include "atn/LexerSkipAction.h"
-#include "atn/LexerTypeAction.h"
-
-#include "atn/ATNDeserializer.h"
-
-#include <cassert>
-#include <string>
-#include <vector>
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-namespace {
-
- void checkCondition(bool condition, std::string_view message) {
- if (!condition) {
- throw IllegalStateException(std::string(message));
- }
- }
-
- void checkCondition(bool condition) {
- checkCondition(condition, "");
- }
-
- /**
- * Analyze the {@link StarLoopEntryState} states in the specified ATN to set
- * the {@link StarLoopEntryState#isPrecedenceDecision} field to the
- * correct value.
- *
- * @param atn The ATN.
- */
- void markPrecedenceDecisions(const ATN &atn) {
- for (ATNState *state : atn.states) {
- if (!StarLoopEntryState::is(state)) {
- continue;
- }
-
- /* We analyze the ATN to determine if this ATN decision state is the
- * decision for the closure block that determines whether a
- * precedence rule should continue or complete.
- */
- if (atn.ruleToStartState[state->ruleIndex]->isLeftRecursiveRule) {
- ATNState *maybeLoopEndState = state->transitions[state->transitions.size() - 1]->target;
- if (LoopEndState::is(maybeLoopEndState)) {
- if (maybeLoopEndState->epsilonOnlyTransitions && RuleStopState::is(maybeLoopEndState->transitions[0]->target)) {
- downCast<StarLoopEntryState*>(state)->isPrecedenceDecision = true;
- }
- }
- }
- }
- }
-
- Ref<const LexerAction> lexerActionFactory(LexerActionType type, int data1, int data2) {
- switch (type) {
- case LexerActionType::CHANNEL:
- return std::make_shared<LexerChannelAction>(data1);
-
- case LexerActionType::CUSTOM:
- return std::make_shared<LexerCustomAction>(data1, data2);
-
- case LexerActionType::MODE:
- return std::make_shared< LexerModeAction>(data1);
-
- case LexerActionType::MORE:
- return LexerMoreAction::getInstance();
-
- case LexerActionType::POP_MODE:
- return LexerPopModeAction::getInstance();
-
- case LexerActionType::PUSH_MODE:
- return std::make_shared<LexerPushModeAction>(data1);
-
- case LexerActionType::SKIP:
- return LexerSkipAction::getInstance();
-
- case LexerActionType::TYPE:
- return std::make_shared<LexerTypeAction>(data1);
-
- default:
- throw IllegalArgumentException("The specified lexer action type " + std::to_string(static_cast<size_t>(type)) +
- " is not valid.");
- }
- }
-
- ConstTransitionPtr edgeFactory(const ATN &atn, TransitionType type, size_t trg, size_t arg1, size_t arg2,
- size_t arg3, const std::vector<misc::IntervalSet> &sets) {
- ATNState *target = atn.states[trg];
- switch (type) {
- case TransitionType::EPSILON:
- return std::make_unique<EpsilonTransition>(target);
- case TransitionType::RANGE:
- if (arg3 != 0) {
- return std::make_unique<RangeTransition>(target, Token::EOF, arg2);
- } else {
- return std::make_unique<RangeTransition>(target, arg1, arg2);
- }
- case TransitionType::RULE:
- return std::make_unique<RuleTransition>(downCast<RuleStartState*>(atn.states[arg1]), arg2, (int)arg3, target);
- case TransitionType::PREDICATE:
- return std::make_unique<PredicateTransition>(target, arg1, arg2, arg3 != 0);
- case TransitionType::PRECEDENCE:
- return std::make_unique<PrecedencePredicateTransition>(target, (int)arg1);
- case TransitionType::ATOM:
- if (arg3 != 0) {
- return std::make_unique<AtomTransition>(target, Token::EOF);
- } else {
- return std::make_unique<AtomTransition>(target, arg1);
- }
- case TransitionType::ACTION:
- return std::make_unique<ActionTransition>(target, arg1, arg2, arg3 != 0);
- case TransitionType::SET:
- return std::make_unique<SetTransition>(target, sets[arg1]);
- case TransitionType::NOT_SET:
- return std::make_unique<NotSetTransition>(target, sets[arg1]);
- case TransitionType::WILDCARD:
- return std::make_unique<WildcardTransition>(target);
- }
-
- throw IllegalArgumentException("The specified transition type is not valid.");
- }
-
- /* mem check: all created instances are freed in the d-tor of the ATN. */
- ATNState* stateFactory(ATNStateType type, size_t ruleIndex) {
- ATNState *s;
- switch (type) {
- case ATNStateType::INVALID:
- return nullptr;
- case ATNStateType::BASIC :
- s = new BasicState();
- break;
- case ATNStateType::RULE_START :
- s = new RuleStartState();
- break;
- case ATNStateType::BLOCK_START :
- s = new BasicBlockStartState();
- break;
- case ATNStateType::PLUS_BLOCK_START :
- s = new PlusBlockStartState();
- break;
- case ATNStateType::STAR_BLOCK_START :
- s = new StarBlockStartState();
- break;
- case ATNStateType::TOKEN_START :
- s = new TokensStartState();
- break;
- case ATNStateType::RULE_STOP :
- s = new RuleStopState();
- break;
- case ATNStateType::BLOCK_END :
- s = new BlockEndState();
- break;
- case ATNStateType::STAR_LOOP_BACK :
- s = new StarLoopbackState();
- break;
- case ATNStateType::STAR_LOOP_ENTRY :
- s = new StarLoopEntryState();
- break;
- case ATNStateType::PLUS_LOOP_BACK :
- s = new PlusLoopbackState();
- break;
- case ATNStateType::LOOP_END :
- s = new LoopEndState();
- break;
- default :
- std::string message = "The specified state type " + std::to_string(static_cast<size_t>(type)) + " is not valid.";
- throw IllegalArgumentException(message);
- }
- assert(s->getStateType() == type);
- s->ruleIndex = ruleIndex;
- return s;
- }
-
- ssize_t readUnicodeInt32(SerializedATNView data, int& p) {
- return static_cast<ssize_t>(data[p++]);
- }
-
- void deserializeSets(
- SerializedATNView data,
- int& p,
- std::vector<misc::IntervalSet>& sets) {
- size_t nsets = data[p++];
- sets.reserve(sets.size() + nsets);
- for (size_t i = 0; i < nsets; i++) {
- size_t nintervals = data[p++];
- misc::IntervalSet set;
-
- bool containsEof = data[p++] != 0;
- if (containsEof) {
- set.add(-1);
- }
-
- for (size_t j = 0; j < nintervals; j++) {
- auto a = readUnicodeInt32(data, p);
- auto b = readUnicodeInt32(data, p);
- set.add(a, b);
- }
- sets.push_back(set);
- }
- }
-
-}
-
-ATNDeserializer::ATNDeserializer() : ATNDeserializer(ATNDeserializationOptions::getDefaultOptions()) {}
-
-ATNDeserializer::ATNDeserializer(ATNDeserializationOptions deserializationOptions) : _deserializationOptions(std::move(deserializationOptions)) {}
-
-std::unique_ptr<ATN> ATNDeserializer::deserialize(SerializedATNView data) const {
- int p = 0;
- int version = data[p++];
- if (version != SERIALIZED_VERSION) {
- std::string reason = "Could not deserialize ATN with version" + std::to_string(version) + "(expected " + std::to_string(SERIALIZED_VERSION) + ").";
-
- throw UnsupportedOperationException(reason);
- }
-
- ATNType grammarType = (ATNType)data[p++];
- size_t maxTokenType = data[p++];
- auto atn = std::make_unique<ATN>(grammarType, maxTokenType);
-
- //
- // STATES
- //
- {
- std::vector<std::pair<LoopEndState*, size_t>> loopBackStateNumbers;
- std::vector<std::pair<BlockStartState*, size_t>> endStateNumbers;
- size_t nstates = data[p++];
- atn->states.reserve(nstates);
- loopBackStateNumbers.reserve(nstates); // Reserve worst case size, its short lived.
- endStateNumbers.reserve(nstates); // Reserve worst case size, its short lived.
- for (size_t i = 0; i < nstates; i++) {
- ATNStateType stype = static_cast<ATNStateType>(data[p++]);
- // ignore bad type of states
- if (stype == ATNStateType::INVALID) {
- atn->addState(nullptr);
- continue;
- }
-
- size_t ruleIndex = data[p++];
- ATNState *s = stateFactory(stype, ruleIndex);
- if (stype == ATNStateType::LOOP_END) { // special case
- int loopBackStateNumber = data[p++];
- loopBackStateNumbers.push_back({ downCast<LoopEndState*>(s), loopBackStateNumber });
- } else if (BlockStartState::is(s)) {
- int endStateNumber = data[p++];
- endStateNumbers.push_back({ downCast<BlockStartState*>(s), endStateNumber });
- }
- atn->addState(s);
- }
-
- // delay the assignment of loop back and end states until we know all the state instances have been initialized
- for (auto &pair : loopBackStateNumbers) {
- pair.first->loopBackState = atn->states[pair.second];
- }
-
- for (auto &pair : endStateNumbers) {
- pair.first->endState = downCast<BlockEndState*>(atn->states[pair.second]);
- }
- }
-
- size_t numNonGreedyStates = data[p++];
- for (size_t i = 0; i < numNonGreedyStates; i++) {
- size_t stateNumber = data[p++];
- // The serialized ATN must be specifying the right states, so that the
- // cast below is correct.
- downCast<DecisionState*>(atn->states[stateNumber])->nonGreedy = true;
- }
-
- size_t numPrecedenceStates = data[p++];
- for (size_t i = 0; i < numPrecedenceStates; i++) {
- size_t stateNumber = data[p++];
- downCast<RuleStartState*>(atn->states[stateNumber])->isLeftRecursiveRule = true;
- }
-
- //
- // RULES
- //
- size_t nrules = data[p++];
- atn->ruleToStartState.reserve(nrules);
- for (size_t i = 0; i < nrules; i++) {
- size_t s = data[p++];
- // Also here, the serialized atn must ensure to point to the correct class type.
- RuleStartState *startState = downCast<RuleStartState*>(atn->states[s]);
- atn->ruleToStartState.push_back(startState);
- if (atn->grammarType == ATNType::LEXER) {
- size_t tokenType = data[p++];
- atn->ruleToTokenType.push_back(tokenType);
- }
- }
-
- atn->ruleToStopState.resize(nrules);
- for (ATNState *state : atn->states) {
- if (!RuleStopState::is(state)) {
- continue;
- }
-
- RuleStopState *stopState = downCast<RuleStopState*>(state);
- atn->ruleToStopState[state->ruleIndex] = stopState;
- atn->ruleToStartState[state->ruleIndex]->stopState = stopState;
- }
-
- //
- // MODES
- //
- size_t nmodes = data[p++];
- atn->modeToStartState.reserve(nmodes);
- for (size_t i = 0; i < nmodes; i++) {
- size_t s = data[p++];
- atn->modeToStartState.push_back(downCast<TokensStartState*>(atn->states[s]));
- }
-
- //
- // SETS
- //
- {
- std::vector<misc::IntervalSet> sets;
-
- deserializeSets(data, p, sets);
- sets.shrink_to_fit();
-
- //
- // EDGES
- //
- int nedges = data[p++];
- for (int i = 0; i < nedges; i++) {
- size_t src = data[p];
- size_t trg = data[p + 1];
- TransitionType ttype = static_cast<TransitionType>(data[p + 2]);
- size_t arg1 = data[p + 3];
- size_t arg2 = data[p + 4];
- size_t arg3 = data[p + 5];
- ConstTransitionPtr trans = edgeFactory(*atn, ttype, trg, arg1, arg2, arg3, sets);
- ATNState *srcState = atn->states[src];
- srcState->addTransition(std::move(trans));
- p += 6;
- }
- }
- // edges for rule stop states can be derived, so they aren't serialized
- for (ATNState *state : atn->states) {
- for (size_t i = 0; i < state->transitions.size(); i++) {
- const Transition *t = state->transitions[i].get();
- if (!RuleTransition::is(t)) {
- continue;
- }
-
- const RuleTransition *ruleTransition = downCast<const RuleTransition*>(t);
- size_t outermostPrecedenceReturn = INVALID_INDEX;
- if (atn->ruleToStartState[ruleTransition->target->ruleIndex]->isLeftRecursiveRule) {
- if (ruleTransition->precedence == 0) {
- outermostPrecedenceReturn = ruleTransition->target->ruleIndex;
- }
- }
-
- ConstTransitionPtr returnTransition = std::make_unique<EpsilonTransition>(ruleTransition->followState, outermostPrecedenceReturn);
- atn->ruleToStopState[ruleTransition->target->ruleIndex]->addTransition(std::move(returnTransition));
- }
- }
-
- for (ATNState *state : atn->states) {
- if (BlockStartState::is(state)) {
- BlockStartState *startState = downCast<BlockStartState*>(state);
-
- // we need to know the end state to set its start state
- if (startState->endState == nullptr) {
- throw IllegalStateException();
- }
-
- // block end states can only be associated to a single block start state
- if (startState->endState->startState != nullptr) {
- throw IllegalStateException();
- }
-
- startState->endState->startState = downCast<BlockStartState*>(state);
- }
-
- if (PlusLoopbackState::is(state)) {
- PlusLoopbackState *loopbackState = downCast<PlusLoopbackState*>(state);
- for (size_t i = 0; i < loopbackState->transitions.size(); i++) {
- ATNState *target = loopbackState->transitions[i]->target;
- if (PlusBlockStartState::is(target)) {
- (downCast<PlusBlockStartState*>(target))->loopBackState = loopbackState;
- }
- }
- } else if (StarLoopbackState::is(state)) {
- StarLoopbackState *loopbackState = downCast<StarLoopbackState*>(state);
- for (size_t i = 0; i < loopbackState->transitions.size(); i++) {
- ATNState *target = loopbackState->transitions[i]->target;
- if (StarLoopEntryState::is(target)) {
- downCast<StarLoopEntryState*>(target)->loopBackState = loopbackState;
- }
- }
- }
- }
-
- //
- // DECISIONS
- //
- size_t ndecisions = data[p++];
- atn->decisionToState.reserve(ndecisions);
- for (size_t i = 0; i < ndecisions; i++) {
- size_t s = data[p++];
- DecisionState *decState = downCast<DecisionState*>(atn->states[s]);
- if (decState == nullptr)
- throw IllegalStateException();
-
- atn->decisionToState.push_back(decState);
- decState->decision = static_cast<int>(i);
- }
-
- //
- // LEXER ACTIONS
- //
- if (atn->grammarType == ATNType::LEXER) {
- atn->lexerActions.resize(data[p++]);
- for (size_t i = 0; i < atn->lexerActions.size(); i++) {
- LexerActionType actionType = static_cast<LexerActionType>(data[p++]);
- int data1 = data[p++];
- int data2 = data[p++];
- atn->lexerActions[i] = lexerActionFactory(actionType, data1, data2);
- }
- }
-
- markPrecedenceDecisions(*atn);
-
- if (_deserializationOptions.isVerifyATN()) {
- verifyATN(*atn);
- }
-
- if (_deserializationOptions.isGenerateRuleBypassTransitions() && atn->grammarType == ATNType::PARSER) {
- atn->ruleToTokenType.resize(atn->ruleToStartState.size());
- for (size_t i = 0; i < atn->ruleToStartState.size(); i++) {
- atn->ruleToTokenType[i] = static_cast<int>(atn->maxTokenType + i + 1);
- }
-
- for (std::vector<RuleStartState*>::size_type i = 0; i < atn->ruleToStartState.size(); i++) {
- BasicBlockStartState *bypassStart = new BasicBlockStartState(); /* mem check: freed in ATN d-tor */
- bypassStart->ruleIndex = static_cast<int>(i);
- atn->addState(bypassStart);
-
- BlockEndState *bypassStop = new BlockEndState(); /* mem check: freed in ATN d-tor */
- bypassStop->ruleIndex = static_cast<int>(i);
- atn->addState(bypassStop);
-
- bypassStart->endState = bypassStop;
- atn->defineDecisionState(bypassStart);
-
- bypassStop->startState = bypassStart;
-
- ATNState *endState;
- const Transition *excludeTransition = nullptr;
- if (atn->ruleToStartState[i]->isLeftRecursiveRule) {
- // wrap from the beginning of the rule to the StarLoopEntryState
- endState = nullptr;
- for (ATNState *state : atn->states) {
- if (state->ruleIndex != i) {
- continue;
- }
-
- if (!StarLoopEntryState::is(state)) {
- continue;
- }
-
- ATNState *maybeLoopEndState = state->transitions[state->transitions.size() - 1]->target;
- if (!LoopEndState::is(maybeLoopEndState)) {
- continue;
- }
-
- if (maybeLoopEndState->epsilonOnlyTransitions && RuleStopState::is(maybeLoopEndState->transitions[0]->target)) {
- endState = state;
- break;
- }
- }
-
- if (endState == nullptr) {
- throw UnsupportedOperationException("Couldn't identify final state of the precedence rule prefix section.");
-
- }
-
- excludeTransition = (static_cast<StarLoopEntryState*>(endState))->loopBackState->transitions[0].get();
- } else {
- endState = atn->ruleToStopState[i];
- }
-
- // all non-excluded transitions that currently target end state need to target blockEnd instead
- for (ATNState *state : atn->states) {
- for (auto &transition : state->transitions) {
- if (transition.get() == excludeTransition) {
- continue;
- }
-
- if (transition->target == endState) {
- const_cast<Transition*>(transition.get())->target = bypassStop;
- }
- }
- }
-
- // all transitions leaving the rule start state need to leave blockStart instead
- while (atn->ruleToStartState[i]->transitions.size() > 0) {
- ConstTransitionPtr transition = atn->ruleToStartState[i]->removeTransition(atn->ruleToStartState[i]->transitions.size() - 1);
- bypassStart->addTransition(std::move(transition));
- }
-
- // link the new states
- atn->ruleToStartState[i]->addTransition(std::make_unique<EpsilonTransition>(bypassStart));
- bypassStop->addTransition(std::make_unique<EpsilonTransition>(endState));
-
- ATNState *matchState = new BasicState(); /* mem check: freed in ATN d-tor */
- atn->addState(matchState);
- matchState->addTransition(std::make_unique<AtomTransition>(bypassStop, atn->ruleToTokenType[i]));
- bypassStart->addTransition(std::make_unique<EpsilonTransition>(matchState));
- }
-
- if (_deserializationOptions.isVerifyATN()) {
- // reverify after modification
- verifyATN(*atn);
- }
- }
-
- return atn;
-}
-
-void ATNDeserializer::verifyATN(const ATN &atn) const {
- // verify assumptions
- for (ATNState *state : atn.states) {
- if (state == nullptr) {
- continue;
- }
-
- checkCondition(state->epsilonOnlyTransitions || state->transitions.size() <= 1);
-
- if (PlusBlockStartState::is(state)) {
- checkCondition((downCast<PlusBlockStartState*>(state))->loopBackState != nullptr);
- }
-
- if (StarLoopEntryState::is(state)) {
- StarLoopEntryState *starLoopEntryState = downCast<StarLoopEntryState*>(state);
- checkCondition(starLoopEntryState->loopBackState != nullptr);
- checkCondition(starLoopEntryState->transitions.size() == 2);
-
- if (StarBlockStartState::is(starLoopEntryState->transitions[0]->target)) {
- checkCondition(downCast<LoopEndState*>(starLoopEntryState->transitions[1]->target) != nullptr);
- checkCondition(!starLoopEntryState->nonGreedy);
- } else if (LoopEndState::is(starLoopEntryState->transitions[0]->target)) {
- checkCondition(StarBlockStartState::is(starLoopEntryState->transitions[1]->target));
- checkCondition(starLoopEntryState->nonGreedy);
- } else {
- throw IllegalStateException();
- }
- }
-
- if (StarLoopbackState::is(state)) {
- checkCondition(state->transitions.size() == 1);
- checkCondition(StarLoopEntryState::is(state->transitions[0]->target));
- }
-
- if (LoopEndState::is(state)) {
- checkCondition((downCast<LoopEndState*>(state))->loopBackState != nullptr);
- }
-
- if (RuleStartState::is(state)) {
- checkCondition((downCast<RuleStartState*>(state))->stopState != nullptr);
- }
-
- if (BlockStartState::is(state)) {
- checkCondition((downCast<BlockStartState*>(state))->endState != nullptr);
- }
-
- if (BlockEndState::is(state)) {
- checkCondition((downCast<BlockEndState*>(state))->startState != nullptr);
- }
-
- if (DecisionState::is(state)) {
- DecisionState *decisionState = downCast<DecisionState*>(state);
- checkCondition(decisionState->transitions.size() <= 1 || decisionState->decision >= 0);
- } else {
- checkCondition(state->transitions.size() <= 1 || RuleStopState::is(state));
- }
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.h
deleted file mode 100644
index 3cd56b9cdf..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNDeserializer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNDeserializationOptions.h"
-#include "atn/SerializedATNView.h"
-#include "atn/LexerAction.h"
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC ATNDeserializer final {
- public:
- static constexpr size_t SERIALIZED_VERSION = 4;
-
- ATNDeserializer();
-
- explicit ATNDeserializer(ATNDeserializationOptions deserializationOptions);
-
- std::unique_ptr<ATN> deserialize(SerializedATNView input) const;
- void verifyATN(const ATN &atn) const;
-
- private:
- const ATNDeserializationOptions _deserializationOptions;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.cpp
deleted file mode 100644
index 04e1af992e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNSimulator.h"
-
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNDeserializer.h"
-#include "atn/ATNType.h"
-#include "dfa/DFAState.h"
-
-using namespace antlr4;
-using namespace antlr4::dfa;
-using namespace antlr4::atn;
-
-const Ref<DFAState> ATNSimulator::ERROR = std::make_shared<DFAState>(std::numeric_limits<int>::max());
-
-ATNSimulator::ATNSimulator(const ATN &atn, PredictionContextCache &sharedContextCache)
- : atn(atn), _sharedContextCache(sharedContextCache) {}
-
-void ATNSimulator::clearDFA() {
- throw UnsupportedOperationException("This ATN simulator does not support clearing the DFA.");
-}
-
-PredictionContextCache& ATNSimulator::getSharedContextCache() const {
- return _sharedContextCache;
-}
-
-Ref<const PredictionContext> ATNSimulator::getCachedContext(const Ref<const PredictionContext> &context) {
- // This function must only be called with an active state lock, as we are going to change a shared structure.
- return PredictionContext::getCachedContext(context, getSharedContextCache());
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.h
deleted file mode 100644
index b14939e219..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNSimulator.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATN.h"
-#include "atn/PredictionContext.h"
-#include "atn/PredictionContextCache.h"
-#include "misc/IntervalSet.h"
-#include "support/CPPUtils.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC ATNSimulator {
- public:
- /// Must distinguish between missing edge and edge we know leads nowhere.
- static const Ref<dfa::DFAState> ERROR;
- const ATN &atn;
-
- ATNSimulator(const ATN &atn, PredictionContextCache &sharedContextCache);
-
- virtual ~ATNSimulator() = default;
-
- virtual void reset() = 0;
-
- /**
- * Clear the DFA cache used by the current instance. Since the DFA cache may
- * be shared by multiple ATN simulators, this method may affect the
- * performance (but not accuracy) of other parsers which are being used
- * concurrently.
- *
- * @throws UnsupportedOperationException if the current instance does not
- * support clearing the DFA.
- *
- * @since 4.3
- */
- virtual void clearDFA();
-
- PredictionContextCache& getSharedContextCache() const;
- Ref<const PredictionContext> getCachedContext(const Ref<const PredictionContext> &context);
-
- protected:
- /// <summary>
- /// The context cache maps all PredictionContext objects that are equals()
- /// to a single cached copy. This cache is shared across all contexts
- /// in all ATNConfigs in all DFA states. We rebuild each ATNConfigSet
- /// to use only cached nodes/graphs in addDFAState(). We don't want to
- /// fill this during closure() since there are lots of contexts that
- /// pop up but are not used ever again. It also greatly slows down closure().
- /// <p/>
- /// This cache makes a huge difference in memory and a little bit in speed.
- /// For the Java grammar on java.*, it dropped the memory requirements
- /// at the end from 25M to 16M. We don't store any of the full context
- /// graphs in the DFA because they are limited to local context only,
- /// but apparently there's a lot of repetition there as well. We optimize
- /// the config contexts before storing the config set in the DFA states
- /// by literally rebuilding them with cached subgraphs only.
- /// <p/>
- /// I tried a cache for use during closure operations, that was
- /// whacked after each adaptivePredict(). It cost a little bit
- /// more time I think and doesn't save on the overall footprint
- /// so it's not worth the complexity.
- /// </summary>
- PredictionContextCache &_sharedContextCache;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.cpp
deleted file mode 100644
index 29911901be..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATN.h"
-#include "atn/Transition.h"
-#include "misc/IntervalSet.h"
-#include "support/CPPUtils.h"
-
-#include "atn/ATNState.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-size_t ATNState::hashCode() const {
- return stateNumber;
-}
-
-bool ATNState::equals(const ATNState &other) const {
- return stateNumber == other.stateNumber;
-}
-
-bool ATNState::isNonGreedyExitState() const {
- return false;
-}
-
-std::string ATNState::toString() const {
- return std::to_string(stateNumber);
-}
-
-void ATNState::addTransition(ConstTransitionPtr e) {
- addTransition(transitions.size(), std::move(e));
-}
-
-void ATNState::addTransition(size_t index, ConstTransitionPtr e) {
- for (const auto &transition : transitions)
- if (transition->target->stateNumber == e->target->stateNumber) {
- return;
- }
-
- if (transitions.empty()) {
- epsilonOnlyTransitions = e->isEpsilon();
- } else if (epsilonOnlyTransitions != e->isEpsilon()) {
- std::cerr << "ATN state %d has both epsilon and non-epsilon transitions.\n" << stateNumber;
- epsilonOnlyTransitions = false;
- }
-
- transitions.insert(transitions.begin() + index, std::move(e));
-}
-
-ConstTransitionPtr ATNState::removeTransition(size_t index) {
- ConstTransitionPtr result = std::move(transitions[index]);
- transitions.erase(transitions.begin() + index);
- return result;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.h
deleted file mode 100644
index 7613f40eee..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNState.h
+++ /dev/null
@@ -1,139 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "misc/IntervalSet.h"
-#include "atn/Transition.h"
-#include "atn/ATNStateType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// The following images show the relation of states and
- /// <seealso cref="ATNState#transitions"/> for various grammar constructs.
- ///
- /// <ul>
- ///
- /// <li>Solid edges marked with an &#0949; indicate a required
- /// <seealso cref="EpsilonTransition"/>.</li>
- ///
- /// <li>Dashed edges indicate locations where any transition derived from
- /// <seealso cref="Transition"/> might appear.</li>
- ///
- /// <li>Dashed nodes are place holders for either a sequence of linked
- /// <seealso cref="BasicState"/> states or the inclusion of a block representing a nested
- /// construct in one of the forms below.</li>
- ///
- /// <li>Nodes showing multiple outgoing alternatives with a {@code ...} support
- /// any number of alternatives (one or more). Nodes without the {@code ...} only
- /// support the exact number of alternatives shown in the diagram.</li>
- ///
- /// </ul>
- ///
- /// <h2>Basic Blocks</h2>
- ///
- /// <h3>Rule</h3>
- ///
- /// <embed src="images/Rule.svg" type="image/svg+xml"/>
- ///
- /// <h3>Block of 1 or more alternatives</h3>
- ///
- /// <embed src="images/Block.svg" type="image/svg+xml"/>
- ///
- /// <h2>Greedy Loops</h2>
- ///
- /// <h3>Greedy Closure: {@code (...)*}</h3>
- ///
- /// <embed src="images/ClosureGreedy.svg" type="image/svg+xml"/>
- ///
- /// <h3>Greedy Positive Closure: {@code (...)+}</h3>
- ///
- /// <embed src="images/PositiveClosureGreedy.svg" type="image/svg+xml"/>
- ///
- /// <h3>Greedy Optional: {@code (...)?}</h3>
- ///
- /// <embed src="images/OptionalGreedy.svg" type="image/svg+xml"/>
- ///
- /// <h2>Non-Greedy Loops</h2>
- ///
- /// <h3>Non-Greedy Closure: {@code (...)*?}</h3>
- ///
- /// <embed src="images/ClosureNonGreedy.svg" type="image/svg+xml"/>
- ///
- /// <h3>Non-Greedy Positive Closure: {@code (...)+?}</h3>
- ///
- /// <embed src="images/PositiveClosureNonGreedy.svg" type="image/svg+xml"/>
- ///
- /// <h3>Non-Greedy Optional: {@code (...)??}</h3>
- ///
- /// <embed src="images/OptionalNonGreedy.svg" type="image/svg+xml"/>
- /// </summary>
-
-// GCC generates a warning here if ATN has already been declared due to the
-// attributes added by ANTLR4CPP_PUBLIC.
-// See: https://gcc.gnu.org/bugzilla/show_bug.cgi?id=39159
-// Only forward-declare if it hasn't already been declared.
-#ifndef ANTLR4CPP_ATN_DECLARED
- class ANTLR4CPP_PUBLIC ATN;
-#endif
-
- class ANTLR4CPP_PUBLIC ATNState {
- public:
- static constexpr size_t INITIAL_NUM_TRANSITIONS = 4;
- static constexpr size_t INVALID_STATE_NUMBER = std::numeric_limits<size_t>::max();
-
- size_t stateNumber = INVALID_STATE_NUMBER;
- size_t ruleIndex = 0; // at runtime, we don't have Rule objects
- bool epsilonOnlyTransitions = false;
-
- /// Track the transitions emanating from this ATN state.
- std::vector<ConstTransitionPtr> transitions;
-
- ATNState() = delete;
-
- ATNState(ATNState const&) = delete;
-
- ATNState(ATNState&&) = delete;
-
- virtual ~ATNState() = default;
-
- ATNState& operator=(ATNState const&) = delete;
-
- ATNState& operator=(ATNState&&) = delete;
-
- void addTransition(ConstTransitionPtr e);
- void addTransition(size_t index, ConstTransitionPtr e);
- ConstTransitionPtr removeTransition(size_t index);
-
- virtual size_t hashCode() const;
- virtual bool equals(const ATNState &other) const;
-
- virtual bool isNonGreedyExitState() const;
- virtual std::string toString() const;
-
- ATNStateType getStateType() const { return _stateType; }
-
- protected:
- explicit ATNState(ATNStateType stateType) : _stateType(stateType) {}
-
- private:
- /// Used to cache lookahead during parsing, not used during construction.
-
- misc::IntervalSet _nextTokenWithinRule;
- std::atomic<bool> _nextTokenUpdated { false };
-
- const ATNStateType _stateType;
-
- friend class ATN;
- };
-
- inline bool operator==(const ATNState &lhs, const ATNState &rhs) { return lhs.equals(rhs); }
-
- inline bool operator!=(const ATNState &lhs, const ATNState &rhs) { return !operator==(lhs, rhs); }
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.cpp
deleted file mode 100644
index 577e2af87c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-#include "atn/ATNStateType.h"
-
-std::string antlr4::atn::atnStateTypeName(ATNStateType atnStateType) {
- switch (atnStateType) {
- case ATNStateType::INVALID:
- return "INVALID";
- case ATNStateType::BASIC:
- return "BASIC";
- case ATNStateType::RULE_START:
- return "RULE_START";
- case ATNStateType::BLOCK_START:
- return "BLOCK_START";
- case ATNStateType::PLUS_BLOCK_START:
- return "PLUS_BLOCK_START";
- case ATNStateType::STAR_BLOCK_START:
- return "STAR_BLOCK_START";
- case ATNStateType::TOKEN_START:
- return "TOKEN_START";
- case ATNStateType::RULE_STOP:
- return "RULE_STOP";
- case ATNStateType::BLOCK_END:
- return "BLOCK_END";
- case ATNStateType::STAR_LOOP_BACK:
- return "STAR_LOOP_BACK";
- case ATNStateType::STAR_LOOP_ENTRY:
- return "STAR_LOOP_ENTRY";
- case ATNStateType::PLUS_LOOP_BACK:
- return "PLUS_LOOP_BACK";
- case ATNStateType::LOOP_END:
- return "LOOP_END";
- }
- return "UNKNOWN";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.h
deleted file mode 100644
index e19b2cce92..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNStateType.h
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstddef>
-#include <string>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- // Constants for ATNState serialization.
- enum class ATNStateType : size_t {
- INVALID = 0,
- BASIC = 1,
- RULE_START = 2,
- BLOCK_START = 3,
- PLUS_BLOCK_START = 4,
- STAR_BLOCK_START = 5,
- TOKEN_START = 6,
- RULE_STOP = 7,
- BLOCK_END = 8,
- STAR_LOOP_BACK = 9,
- STAR_LOOP_ENTRY = 10,
- PLUS_LOOP_BACK = 11,
- LOOP_END = 12,
- };
-
- ANTLR4CPP_PUBLIC std::string atnStateTypeName(ATNStateType atnStateType);
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNType.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ATNType.h
deleted file mode 100644
index 3530ef6051..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ATNType.h
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Represents the type of recognizer an ATN applies to.
- enum class ATNType {
- LEXER = 0,
- PARSER = 1,
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.cpp
deleted file mode 100644
index 1886b7e169..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ActionTransition.h"
-
-using namespace antlr4::atn;
-
-ActionTransition::ActionTransition(ATNState *target, size_t ruleIndex)
- : Transition(TransitionType::ACTION, target), ruleIndex(ruleIndex), actionIndex(INVALID_INDEX), isCtxDependent(false) {
-}
-
-ActionTransition::ActionTransition(ATNState *target, size_t ruleIndex, size_t actionIndex, bool isCtxDependent)
- : Transition(TransitionType::ACTION, target), ruleIndex(ruleIndex), actionIndex(actionIndex), isCtxDependent(isCtxDependent) {
-}
-
-bool ActionTransition::isEpsilon() const {
- return true; // we are to be ignored by analysis 'cept for predicates
-}
-
-bool ActionTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string ActionTransition::toString() const {
- return " ACTION " + Transition::toString() + " { ruleIndex: " + std::to_string(ruleIndex) + ", actionIndex: " +
- std::to_string(actionIndex) + ", isCtxDependent: " + std::to_string(isCtxDependent) + " }";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.h
deleted file mode 100644
index 1700297a78..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ActionTransition.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC ActionTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::ACTION; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- const size_t ruleIndex;
- const size_t actionIndex;
- const bool isCtxDependent; // e.g., $i ref in action
-
- ActionTransition(ATNState *target, size_t ruleIndex);
-
- ActionTransition(ATNState *target, size_t ruleIndex, size_t actionIndex, bool isCtxDependent);
-
- virtual bool isEpsilon() const override;
-
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.cpp
deleted file mode 100644
index 72ce922633..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/AmbiguityInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-AmbiguityInfo::AmbiguityInfo(size_t decision, ATNConfigSet *configs, const antlrcpp::BitSet &ambigAlts,
- TokenStream *input, size_t startIndex, size_t stopIndex, bool fullCtx)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex, fullCtx) {
-
- this->ambigAlts = ambigAlts;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.h
deleted file mode 100644
index db594a1f48..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/AmbiguityInfo.h
+++ /dev/null
@@ -1,68 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// This class represents profiling event information for an ambiguity.
- /// Ambiguities are decisions where a particular input resulted in an SLL
- /// conflict, followed by LL prediction also reaching a conflict state
- /// (indicating a true ambiguity in the grammar).
- ///
- /// <para>
- /// This event may be reported during SLL prediction in cases where the
- /// conflicting SLL configuration set provides sufficient information to
- /// determine that the SLL conflict is truly an ambiguity. For example, if none
- /// of the ATN configurations in the conflicting SLL configuration set have
- /// traversed a global follow transition (i.e.
- /// <seealso cref="ATNConfig#reachesIntoOuterContext"/> is 0 for all configurations), then
- /// the result of SLL prediction for that input is known to be equivalent to the
- /// result of LL prediction for that input.</para>
- ///
- /// <para>
- /// In some cases, the minimum represented alternative in the conflicting LL
- /// configuration set is not equal to the minimum represented alternative in the
- /// conflicting SLL configuration set. Grammars and inputs which result in this
- /// scenario are unable to use <seealso cref="PredictionMode#SLL"/>, which in turn means
- /// they cannot use the two-stage parsing strategy to improve parsing performance
- /// for that input.</para>
- /// </summary>
- /// <seealso cref= ParserATNSimulator#reportAmbiguity </seealso>
- /// <seealso cref= ANTLRErrorListener#reportAmbiguity
- ///
- /// @since 4.3 </seealso>
- class ANTLR4CPP_PUBLIC AmbiguityInfo : public DecisionEventInfo {
- public:
- /// The set of alternative numbers for this decision event that lead to a valid parse.
- antlrcpp::BitSet ambigAlts;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="AmbiguityInfo"/> class with the
- /// specified detailed ambiguity information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set identifying the ambiguous
- /// alternatives for the current input </param>
- /// <param name="ambigAlts"> The set of alternatives in the decision that lead to a valid parse.
- /// The predicted alt is the min(ambigAlts) </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction </param>
- /// <param name="stopIndex"> The index at which the ambiguity was identified during
- /// prediction </param>
- /// <param name="fullCtx"> {@code true} if the ambiguity was identified during LL
- /// prediction; otherwise, {@code false} if the ambiguity was identified
- /// during SLL prediction </param>
- AmbiguityInfo(size_t decision, ATNConfigSet *configs, const antlrcpp::BitSet &ambigAlts, TokenStream *input,
- size_t startIndex, size_t stopIndex, bool fullCtx);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.cpp
deleted file mode 100644
index e9478001b4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.cpp
+++ /dev/null
@@ -1,109 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ArrayPredictionContext.h"
-
-#include <cstring>
-
-#include "atn/SingletonPredictionContext.h"
-#include "misc/MurmurHash.h"
-#include "support/Casts.h"
-
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-namespace {
-
- bool cachedHashCodeEqual(size_t lhs, size_t rhs) {
- return lhs == rhs || lhs == 0 || rhs == 0;
- }
-
- bool predictionContextEqual(const Ref<const PredictionContext> &lhs, const Ref<const PredictionContext> &rhs) {
- return *lhs == *rhs;
- }
-
-}
-
-ArrayPredictionContext::ArrayPredictionContext(const SingletonPredictionContext &predictionContext)
- : ArrayPredictionContext({ predictionContext.parent }, { predictionContext.returnState }) {}
-
-ArrayPredictionContext::ArrayPredictionContext(std::vector<Ref<const PredictionContext>> parents,
- std::vector<size_t> returnStates)
- : PredictionContext(PredictionContextType::ARRAY), parents(std::move(parents)), returnStates(std::move(returnStates)) {
- assert(this->parents.size() > 0);
- assert(this->returnStates.size() > 0);
- assert(this->parents.size() == this->returnStates.size());
-}
-
-bool ArrayPredictionContext::isEmpty() const {
- // Since EMPTY_RETURN_STATE can only appear in the last position, we don't need to verify that size == 1.
- return returnStates[0] == EMPTY_RETURN_STATE;
-}
-
-size_t ArrayPredictionContext::size() const {
- return returnStates.size();
-}
-
-const Ref<const PredictionContext>& ArrayPredictionContext::getParent(size_t index) const {
- return parents[index];
-}
-
-size_t ArrayPredictionContext::getReturnState(size_t index) const {
- return returnStates[index];
-}
-
-size_t ArrayPredictionContext::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getContextType()));
- for (const auto &parent : parents) {
- hash = MurmurHash::update(hash, parent);
- }
- for (const auto &returnState : returnStates) {
- hash = MurmurHash::update(hash, returnState);
- }
- return MurmurHash::finish(hash, 1 + parents.size() + returnStates.size());
-}
-
-bool ArrayPredictionContext::equals(const PredictionContext &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getContextType() != other.getContextType()) {
- return false;
- }
- const auto &array = downCast<const ArrayPredictionContext&>(other);
- return returnStates.size() == array.returnStates.size() &&
- parents.size() == array.parents.size() &&
- cachedHashCodeEqual(cachedHashCode(), array.cachedHashCode()) &&
- std::memcmp(returnStates.data(), array.returnStates.data(), returnStates.size() * sizeof(decltype(returnStates)::value_type)) == 0 &&
- std::equal(parents.begin(), parents.end(), array.parents.begin(), predictionContextEqual);
-}
-
-std::string ArrayPredictionContext::toString() const {
- if (isEmpty()) {
- return "[]";
- }
-
- std::stringstream ss;
- ss << "[";
- for (size_t i = 0; i < returnStates.size(); i++) {
- if (i > 0) {
- ss << ", ";
- }
- if (returnStates[i] == EMPTY_RETURN_STATE) {
- ss << "$";
- continue;
- }
- ss << returnStates[i];
- if (parents[i] != nullptr) {
- ss << " " << parents[i]->toString();
- } else {
- ss << "nul";
- }
- }
- ss << "]";
- return ss.str();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.h
deleted file mode 100644
index f43db98a01..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ArrayPredictionContext.h
+++ /dev/null
@@ -1,51 +0,0 @@
-
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/PredictionContext.h"
-
-namespace antlr4 {
-namespace atn {
-
- class SingletonPredictionContext;
-
- class ANTLR4CPP_PUBLIC ArrayPredictionContext final : public PredictionContext {
- public:
- static bool is(const PredictionContext &predictionContext) { return predictionContext.getContextType() == PredictionContextType::ARRAY; }
-
- static bool is(const PredictionContext *predictionContext) { return predictionContext != nullptr && is(*predictionContext); }
-
- /// Parent can be empty only if full ctx mode and we make an array
- /// from EMPTY and non-empty. We merge EMPTY by using null parent and
- /// returnState == EMPTY_RETURN_STATE.
- // Also here: we use a strong reference to our parents to avoid having them freed prematurely.
- // See also SinglePredictionContext.
- std::vector<Ref<const PredictionContext>> parents;
-
- /// Sorted for merge, no duplicates; if present, EMPTY_RETURN_STATE is always last.
- std::vector<size_t> returnStates;
-
- explicit ArrayPredictionContext(const SingletonPredictionContext &predictionContext);
-
- ArrayPredictionContext(std::vector<Ref<const PredictionContext>> parents, std::vector<size_t> returnStates);
-
- ArrayPredictionContext(ArrayPredictionContext&&) = default;
-
- bool isEmpty() const override;
- size_t size() const override;
- const Ref<const PredictionContext>& getParent(size_t index) const override;
- size_t getReturnState(size_t index) const override;
- bool equals(const PredictionContext &other) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.cpp
deleted file mode 100644
index 74153bf5cd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/IntervalSet.h"
-#include "atn/Transition.h"
-
-#include "atn/AtomTransition.h"
-
-using namespace antlr4::misc;
-using namespace antlr4::atn;
-
-AtomTransition::AtomTransition(ATNState *target, size_t label) : Transition(TransitionType::ATOM, target), _label(label) {
-}
-
-IntervalSet AtomTransition::label() const {
- return IntervalSet::of((int)_label);
-}
-
-bool AtomTransition::matches(size_t symbol, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return _label == symbol;
-}
-
-std::string AtomTransition::toString() const {
- return "ATOM " + Transition::toString() + " { label: " + std::to_string(_label) + " }";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.h
deleted file mode 100644
index db62a7feab..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/AtomTransition.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// TODO: make all transitions sets? no, should remove set edges.
- class ANTLR4CPP_PUBLIC AtomTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::ATOM; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- /// The token type or character value; or, signifies special label.
- /// TODO: rename this to label
- const size_t _label;
-
- AtomTransition(ATNState *target, size_t label);
-
- virtual misc::IntervalSet label() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/BasicBlockStartState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/BasicBlockStartState.h
deleted file mode 100644
index 1c462ec0eb..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/BasicBlockStartState.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-#include "atn/BlockStartState.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC BasicBlockStartState final : public BlockStartState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::BLOCK_START; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- BasicBlockStartState() : BlockStartState(ATNStateType::BLOCK_START) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/BasicState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/BasicState.h
deleted file mode 100644
index 7f8a9ef0dd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/BasicState.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC BasicState final : public ATNState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::BASIC; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- BasicState() : ATNState(ATNStateType::BASIC) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/BlockEndState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/BlockEndState.h
deleted file mode 100644
index 11ef5499ba..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/BlockEndState.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Terminal node of a simple {@code (a|b|c)} block.
- class ANTLR4CPP_PUBLIC BlockEndState final : public ATNState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::BLOCK_END; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- BlockStartState *startState = nullptr;
-
- BlockEndState() : ATNState(ATNStateType::BLOCK_END) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/BlockStartState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/BlockStartState.h
deleted file mode 100644
index 3475115894..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/BlockStartState.h
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// The start of a regular {@code (...)} block.
- class ANTLR4CPP_PUBLIC BlockStartState : public DecisionState {
- public:
- static bool is(const ATNState &atnState) {
- const auto stateType = atnState.getStateType();
- return stateType >= ATNStateType::BLOCK_START && stateType <= ATNStateType::STAR_BLOCK_START;
- }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- BlockEndState *endState = nullptr;
-
- protected:
- using DecisionState::DecisionState;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.cpp
deleted file mode 100644
index 12442a9bc0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ContextSensitivityInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-ContextSensitivityInfo::ContextSensitivityInfo(size_t decision, ATNConfigSet *configs, TokenStream *input,
- size_t startIndex, size_t stopIndex)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex, true) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.h
deleted file mode 100644
index 430ce3b6e8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ContextSensitivityInfo.h
+++ /dev/null
@@ -1,47 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// This class represents profiling event information for a context sensitivity.
- /// Context sensitivities are decisions where a particular input resulted in an
- /// SLL conflict, but LL prediction produced a single unique alternative.
- ///
- /// <para>
- /// In some cases, the unique alternative identified by LL prediction is not
- /// equal to the minimum represented alternative in the conflicting SLL
- /// configuration set. Grammars and inputs which result in this scenario are
- /// unable to use <seealso cref="PredictionMode#SLL"/>, which in turn means they cannot use
- /// the two-stage parsing strategy to improve parsing performance for that
- /// input.</para>
- /// </summary>
- /// <seealso cref= ParserATNSimulator#reportContextSensitivity </seealso>
- /// <seealso cref= ANTLRErrorListener#reportContextSensitivity
- ///
- /// @since 4.3 </seealso>
- class ANTLR4CPP_PUBLIC ContextSensitivityInfo : public DecisionEventInfo {
- public:
- /// <summary>
- /// Constructs a new instance of the <seealso cref="ContextSensitivityInfo"/> class
- /// with the specified detailed context sensitivity information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set containing the unique
- /// alternative identified by full-context prediction </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction </param>
- /// <param name="stopIndex"> The index at which the context sensitivity was
- /// identified during full-context prediction </param>
- ContextSensitivityInfo(size_t decision, ATNConfigSet *configs, TokenStream *input, size_t startIndex, size_t stopIndex);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.cpp
deleted file mode 100644
index bca6c778c0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.cpp
+++ /dev/null
@@ -1,14 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/DecisionEventInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-DecisionEventInfo::DecisionEventInfo(size_t decision, ATNConfigSet *configs, TokenStream *input, size_t startIndex,
- size_t stopIndex, bool fullCtx)
- : decision(decision), configs(configs), input(input), startIndex(startIndex), stopIndex(stopIndex), fullCtx(fullCtx) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.h
deleted file mode 100644
index af7f5f4b17..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionEventInfo.h
+++ /dev/null
@@ -1,70 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// This is the base class for gathering detailed information about prediction
- /// events which occur during parsing.
- ///
- /// Note that we could record the parser call stack at the time this event
- /// occurred but in the presence of left recursive rules, the stack is kind of
- /// meaningless. It's better to look at the individual configurations for their
- /// individual stacks. Of course that is a <seealso cref="PredictionContext"/> object
- /// not a parse tree node and so it does not have information about the extent
- /// (start...stop) of the various subtrees. Examining the stack tops of all
- /// configurations provide the return states for the rule invocations.
- /// From there you can get the enclosing rule.
- ///
- /// @since 4.3
- /// </summary>
- class ANTLR4CPP_PUBLIC DecisionEventInfo {
- public:
- /// <summary>
- /// The invoked decision number which this event is related to.
- /// </summary>
- /// <seealso cref= ATN#decisionToState </seealso>
- const size_t decision;
-
- /// <summary>
- /// The configuration set containing additional information relevant to the
- /// prediction state when the current event occurred, or {@code null} if no
- /// additional information is relevant or available.
- /// </summary>
- const ATNConfigSet *configs;
-
- /// <summary>
- /// The input token stream which is being parsed.
- /// </summary>
- const TokenStream *input;
-
- /// <summary>
- /// The token index in the input stream at which the current prediction was
- /// originally invoked.
- /// </summary>
- const size_t startIndex;
-
- /// <summary>
- /// The token index in the input stream at which the current event occurred.
- /// </summary>
- const size_t stopIndex;
-
- /// <summary>
- /// {@code true} if the current event occurred during LL prediction;
- /// otherwise, {@code false} if the input occurred during SLL prediction.
- /// </summary>
- const bool fullCtx;
-
- DecisionEventInfo(size_t decision, ATNConfigSet *configs, TokenStream *input, size_t startIndex,
- size_t stopIndex, bool fullCtx);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.cpp
deleted file mode 100644
index ee9b1aac34..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.cpp
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ErrorInfo.h"
-#include "atn/LookaheadEventInfo.h"
-
-#include "atn/DecisionInfo.h"
-
-using namespace antlr4::atn;
-
-DecisionInfo::DecisionInfo(size_t decision) : decision(decision) {
-}
-
-std::string DecisionInfo::toString() const {
- std::stringstream ss;
-
- ss << "{decision=" << decision << ", contextSensitivities=" << contextSensitivities.size() << ", errors=";
- ss << errors.size() << ", ambiguities=" << ambiguities.size() << ", SLL_lookahead=" << SLL_TotalLook;
- ss << ", SLL_ATNTransitions=" << SLL_ATNTransitions << ", SLL_DFATransitions=" << SLL_DFATransitions;
- ss << ", LL_Fallback=" << LL_Fallback << ", LL_lookahead=" << LL_TotalLook << ", LL_ATNTransitions=" << LL_ATNTransitions << '}';
-
- return ss.str();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.h
deleted file mode 100644
index 2b43ad8be9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionInfo.h
+++ /dev/null
@@ -1,227 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ContextSensitivityInfo.h"
-#include "atn/AmbiguityInfo.h"
-#include "atn/PredicateEvalInfo.h"
-#include "atn/ErrorInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- class LookaheadEventInfo;
-
- /// <summary>
- /// This class contains profiling gathered for a particular decision.
- ///
- /// <para>
- /// Parsing performance in ANTLR 4 is heavily influenced by both static factors
- /// (e.g. the form of the rules in the grammar) and dynamic factors (e.g. the
- /// choice of input and the state of the DFA cache at the time profiling
- /// operations are started). For best results, gather and use aggregate
- /// statistics from a large sample of inputs representing the inputs expected in
- /// production before using the results to make changes in the grammar.</para>
- ///
- /// @since 4.3
- /// </summary>
- class ANTLR4CPP_PUBLIC DecisionInfo {
- public:
- /// <summary>
- /// The decision number, which is an index into <seealso cref="ATN#decisionToState"/>.
- /// </summary>
- const size_t decision;
-
- /// <summary>
- /// The total number of times <seealso cref="ParserATNSimulator#adaptivePredict"/> was
- /// invoked for this decision.
- /// </summary>
- long long invocations = 0;
-
- /// <summary>
- /// The total time spent in <seealso cref="ParserATNSimulator#adaptivePredict"/> for
- /// this decision, in nanoseconds.
- ///
- /// <para>
- /// The value of this field contains the sum of differential results obtained
- /// by <seealso cref="System#nanoTime()"/>, and is not adjusted to compensate for JIT
- /// and/or garbage collection overhead. For best accuracy, use a modern JVM
- /// implementation that provides precise results from
- /// <seealso cref="System#nanoTime()"/>, and perform profiling in a separate process
- /// which is warmed up by parsing the input prior to profiling. If desired,
- /// call <seealso cref="ATNSimulator#clearDFA"/> to reset the DFA cache to its initial
- /// state before starting the profiling measurement pass.</para>
- /// </summary>
- long long timeInPrediction = 0;
-
- /// <summary>
- /// The sum of the lookahead required for SLL prediction for this decision.
- /// Note that SLL prediction is used before LL prediction for performance
- /// reasons even when <seealso cref="PredictionMode#LL"/> or
- /// <seealso cref="PredictionMode#LL_EXACT_AMBIG_DETECTION"/> is used.
- /// </summary>
- long long SLL_TotalLook = 0;
-
- /// <summary>
- /// Gets the minimum lookahead required for any single SLL prediction to
- /// complete for this decision, by reaching a unique prediction, reaching an
- /// SLL conflict state, or encountering a syntax error.
- /// </summary>
- long long SLL_MinLook = 0;
-
- /// <summary>
- /// Gets the maximum lookahead required for any single SLL prediction to
- /// complete for this decision, by reaching a unique prediction, reaching an
- /// SLL conflict state, or encountering a syntax error.
- /// </summary>
- long long SLL_MaxLook = 0;
-
- /// Gets the <seealso cref="LookaheadEventInfo"/> associated with the event where the
- /// <seealso cref="#SLL_MaxLook"/> value was set.
- Ref<LookaheadEventInfo> SLL_MaxLookEvent;
-
- /// <summary>
- /// The sum of the lookahead required for LL prediction for this decision.
- /// Note that LL prediction is only used when SLL prediction reaches a
- /// conflict state.
- /// </summary>
- long long LL_TotalLook = 0;
-
- /// <summary>
- /// Gets the minimum lookahead required for any single LL prediction to
- /// complete for this decision. An LL prediction completes when the algorithm
- /// reaches a unique prediction, a conflict state (for
- /// <seealso cref="PredictionMode#LL"/>, an ambiguity state (for
- /// <seealso cref="PredictionMode#LL_EXACT_AMBIG_DETECTION"/>, or a syntax error.
- /// </summary>
- long long LL_MinLook = 0;
-
- /// <summary>
- /// Gets the maximum lookahead required for any single LL prediction to
- /// complete for this decision. An LL prediction completes when the algorithm
- /// reaches a unique prediction, a conflict state (for
- /// <seealso cref="PredictionMode#LL"/>, an ambiguity state (for
- /// <seealso cref="PredictionMode#LL_EXACT_AMBIG_DETECTION"/>, or a syntax error.
- /// </summary>
- long long LL_MaxLook = 0;
-
- /// <summary>
- /// Gets the <seealso cref="LookaheadEventInfo"/> associated with the event where the
- /// <seealso cref="#LL_MaxLook"/> value was set.
- /// </summary>
- Ref<LookaheadEventInfo> LL_MaxLookEvent;
-
- /// <summary>
- /// A collection of <seealso cref="ContextSensitivityInfo"/> instances describing the
- /// context sensitivities encountered during LL prediction for this decision.
- /// </summary>
- /// <seealso cref= ContextSensitivityInfo </seealso>
- std::vector<ContextSensitivityInfo> contextSensitivities;
-
- /// <summary>
- /// A collection of <seealso cref="ErrorInfo"/> instances describing the parse errors
- /// identified during calls to <seealso cref="ParserATNSimulator#adaptivePredict"/> for
- /// this decision.
- /// </summary>
- /// <seealso cref= ErrorInfo </seealso>
- std::vector<ErrorInfo> errors;
-
- /// <summary>
- /// A collection of <seealso cref="AmbiguityInfo"/> instances describing the
- /// ambiguities encountered during LL prediction for this decision.
- /// </summary>
- /// <seealso cref= AmbiguityInfo </seealso>
- std::vector<AmbiguityInfo> ambiguities;
-
- /// <summary>
- /// A collection of <seealso cref="PredicateEvalInfo"/> instances describing the
- /// results of evaluating individual predicates during prediction for this
- /// decision.
- /// </summary>
- /// <seealso cref= PredicateEvalInfo </seealso>
- std::vector<PredicateEvalInfo> predicateEvals;
-
- /// <summary>
- /// The total number of ATN transitions required during SLL prediction for
- /// this decision. An ATN transition is determined by the number of times the
- /// DFA does not contain an edge that is required for prediction, resulting
- /// in on-the-fly computation of that edge.
- ///
- /// <para>
- /// If DFA caching of SLL transitions is employed by the implementation, ATN
- /// computation may cache the computed edge for efficient lookup during
- /// future parsing of this decision. Otherwise, the SLL parsing algorithm
- /// will use ATN transitions exclusively.</para>
- /// </summary>
- /// <seealso cref= #SLL_ATNTransitions </seealso>
- /// <seealso cref= ParserATNSimulator#computeTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#computeTargetState </seealso>
- long long SLL_ATNTransitions = 0;
-
- /// <summary>
- /// The total number of DFA transitions required during SLL prediction for
- /// this decision.
- ///
- /// <para>If the ATN simulator implementation does not use DFA caching for SLL
- /// transitions, this value will be 0.</para>
- /// </summary>
- /// <seealso cref= ParserATNSimulator#getExistingTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#getExistingTargetState </seealso>
- long long SLL_DFATransitions = 0;
-
- /// <summary>
- /// Gets the total number of times SLL prediction completed in a conflict
- /// state, resulting in fallback to LL prediction.
- ///
- /// <para>Note that this value is not related to whether or not
- /// <seealso cref="PredictionMode#SLL"/> may be used successfully with a particular
- /// grammar. If the ambiguity resolution algorithm applied to the SLL
- /// conflicts for this decision produce the same result as LL prediction for
- /// this decision, <seealso cref="PredictionMode#SLL"/> would produce the same overall
- /// parsing result as <seealso cref="PredictionMode#LL"/>.</para>
- /// </summary>
- long long LL_Fallback = 0;
-
- /// <summary>
- /// The total number of ATN transitions required during LL prediction for
- /// this decision. An ATN transition is determined by the number of times the
- /// DFA does not contain an edge that is required for prediction, resulting
- /// in on-the-fly computation of that edge.
- ///
- /// <para>
- /// If DFA caching of LL transitions is employed by the implementation, ATN
- /// computation may cache the computed edge for efficient lookup during
- /// future parsing of this decision. Otherwise, the LL parsing algorithm will
- /// use ATN transitions exclusively.</para>
- /// </summary>
- /// <seealso cref= #LL_DFATransitions </seealso>
- /// <seealso cref= ParserATNSimulator#computeTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#computeTargetState </seealso>
- long long LL_ATNTransitions = 0;
-
- /// <summary>
- /// The total number of DFA transitions required during LL prediction for
- /// this decision.
- ///
- /// <para>If the ATN simulator implementation does not use DFA caching for LL
- /// transitions, this value will be 0.</para>
- /// </summary>
- /// <seealso cref= ParserATNSimulator#getExistingTargetState </seealso>
- /// <seealso cref= LexerATNSimulator#getExistingTargetState </seealso>
- long long LL_DFATransitions = 0;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="DecisionInfo"/> class to contain
- /// statistics for a particular decision.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- explicit DecisionInfo(size_t decision);
-
- std::string toString() const;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.cpp
deleted file mode 100644
index 72adb210f5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/DecisionState.h"
-
-using namespace antlr4::atn;
-
-std::string DecisionState::toString() const {
- return "DECISION " + ATNState::toString();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.h
deleted file mode 100644
index b7341ac6c9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/DecisionState.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC DecisionState : public ATNState {
- public:
- static bool is(const ATNState &atnState) {
- const auto stateType = atnState.getStateType();
- return (stateType >= ATNStateType::BLOCK_START && stateType <= ATNStateType::TOKEN_START) ||
- stateType == ATNStateType::PLUS_LOOP_BACK ||
- stateType == ATNStateType::STAR_LOOP_ENTRY;
- }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- int decision = -1;
- bool nonGreedy = false;
-
- virtual std::string toString() const override;
-
- protected:
- using ATNState::ATNState;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.cpp
deleted file mode 100644
index 503fb1630e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/EpsilonTransition.h"
-
-using namespace antlr4::atn;
-
-EpsilonTransition::EpsilonTransition(ATNState *target) : EpsilonTransition(target, INVALID_INDEX) {
-}
-
-EpsilonTransition::EpsilonTransition(ATNState *target, size_t outermostPrecedenceReturn)
- : Transition(TransitionType::EPSILON, target), _outermostPrecedenceReturn(outermostPrecedenceReturn) {
-}
-
-size_t EpsilonTransition::outermostPrecedenceReturn() const {
- return _outermostPrecedenceReturn;
-}
-
-bool EpsilonTransition::isEpsilon() const {
- return true;
-}
-
-bool EpsilonTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string EpsilonTransition::toString() const {
- return "EPSILON " + Transition::toString() + " {}";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.h
deleted file mode 100644
index 21bc812822..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/EpsilonTransition.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC EpsilonTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::EPSILON; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- explicit EpsilonTransition(ATNState *target);
- EpsilonTransition(ATNState *target, size_t outermostPrecedenceReturn);
-
- /**
- * @return the rule index of a precedence rule for which this transition is
- * returning from, where the precedence value is 0; otherwise, INVALID_INDEX.
- *
- * @see ATNConfig#isPrecedenceFilterSuppressed()
- * @see ParserATNSimulator#applyPrecedenceFilter(ATNConfigSet)
- * @since 4.4.1
- */
- size_t outermostPrecedenceReturn() const;
-
- virtual bool isEpsilon() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-
- private:
- const size_t _outermostPrecedenceReturn; // A rule index.
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.cpp
deleted file mode 100644
index efe8507124..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfigSet.h"
-
-#include "atn/ErrorInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-ErrorInfo::ErrorInfo(size_t decision, ATNConfigSet *configs, TokenStream *input, size_t startIndex, size_t stopIndex, bool fullCtx)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex, fullCtx) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.h
deleted file mode 100644
index d34642a195..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ErrorInfo.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// This class represents profiling event information for a syntax error
- /// identified during prediction. Syntax errors occur when the prediction
- /// algorithm is unable to identify an alternative which would lead to a
- /// successful parse.
- /// </summary>
- /// <seealso cref= Parser#notifyErrorListeners(Token, String, RecognitionException) </seealso>
- /// <seealso cref= ANTLRErrorListener#syntaxError
- ///
- /// @since 4.3 </seealso>
- class ANTLR4CPP_PUBLIC ErrorInfo : public DecisionEventInfo {
- public:
- /// <summary>
- /// Constructs a new instance of the <seealso cref="ErrorInfo"/> class with the
- /// specified detailed syntax error information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set reached during prediction
- /// prior to reaching the <seealso cref="ATNSimulator#ERROR"/> state </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction </param>
- /// <param name="stopIndex"> The index at which the syntax error was identified </param>
- /// <param name="fullCtx"> {@code true} if the syntax error was identified during LL
- /// prediction; otherwise, {@code false} if the syntax error was identified
- /// during SLL prediction </param>
- ErrorInfo(size_t decision, ATNConfigSet *configs, TokenStream *input, size_t startIndex, size_t stopIndex,
- bool fullCtx);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.cpp
deleted file mode 100644
index 1d43697584..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.cpp
+++ /dev/null
@@ -1,189 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/RuleStopState.h"
-#include "atn/Transition.h"
-#include "atn/RuleTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/WildcardTransition.h"
-#include "atn/NotSetTransition.h"
-#include "misc/IntervalSet.h"
-#include "atn/ATNConfig.h"
-
-#include "support/CPPUtils.h"
-
-#include "atn/LL1Analyzer.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-namespace {
-
- struct ATNConfigHasher final {
- size_t operator()(const ATNConfig& atn_config) const {
- return atn_config.hashCode();
- }
- };
-
- struct ATNConfigComparer final {
- bool operator()(const ATNConfig& lhs, const ATNConfig& rhs) const {
- return lhs == rhs;
- }
- };
-
- class LL1AnalyzerImpl final {
- public:
- LL1AnalyzerImpl(const ATN& atn, misc::IntervalSet& look, bool seeThruPreds, bool addEOF) : _atn(atn), _look(look), _seeThruPreds(seeThruPreds), _addEOF(addEOF) {}
-
- /// <summary>
- /// Compute set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}.
- /// <p/>
- /// If {@code ctx} is {@code null} and {@code stopState} or the end of the
- /// rule containing {@code s} is reached, <seealso cref="Token#EPSILON"/> is added to
- /// the result set. If {@code ctx} is not {@code null} and {@code addEOF} is
- /// {@code true} and {@code stopState} or the end of the outermost rule is
- /// reached, <seealso cref="Token#EOF"/> is added to the result set.
- /// </summary>
- /// <param name="s"> the ATN state. </param>
- /// <param name="stopState"> the ATN state to stop at. This can be a
- /// <seealso cref="BlockEndState"/> to detect epsilon paths through a closure. </param>
- /// <param name="ctx"> The outer context, or {@code null} if the outer context should
- /// not be used. </param>
- /// <param name="look"> The result lookahead set. </param>
- /// <param name="lookBusy"> A set used for preventing epsilon closures in the ATN
- /// from causing a stack overflow. Outside code should pass
- /// {@code new HashSet<ATNConfig>} for this argument. </param>
- /// <param name="calledRuleStack"> A set used for preventing left recursion in the
- /// ATN from causing a stack overflow. Outside code should pass
- /// {@code new BitSet()} for this argument. </param>
- /// <param name="seeThruPreds"> {@code true} to true semantic predicates as
- /// implicitly {@code true} and "see through them", otherwise {@code false}
- /// to treat semantic predicates as opaque and add <seealso cref="#HIT_PRED"/> to the
- /// result if one is encountered. </param>
- /// <param name="addEOF"> Add <seealso cref="Token#EOF"/> to the result if the end of the
- /// outermost context is reached. This parameter has no effect if {@code ctx}
- /// is {@code null}. </param>
- void LOOK(ATNState *s, ATNState *stopState, Ref<const PredictionContext> const& ctx) {
- if (!_lookBusy.insert(ATNConfig(s, 0, ctx)).second) {
- return;
- }
-
- // ml: s can never be null, hence no need to check if stopState is != null.
- if (s == stopState) {
- if (ctx == nullptr) {
- _look.add(Token::EPSILON);
- return;
- } else if (ctx->isEmpty() && _addEOF) {
- _look.add(Token::EOF);
- return;
- }
- }
-
- if (s->getStateType() == ATNStateType::RULE_STOP) {
- if (ctx == nullptr) {
- _look.add(Token::EPSILON);
- return;
- } else if (ctx->isEmpty() && _addEOF) {
- _look.add(Token::EOF);
- return;
- }
-
- if (ctx != PredictionContext::EMPTY) {
- bool removed = _calledRuleStack.test(s->ruleIndex);
- _calledRuleStack[s->ruleIndex] = false;
- // run thru all possible stack tops in ctx
- for (size_t i = 0; i < ctx->size(); i++) {
- ATNState *returnState = _atn.states[ctx->getReturnState(i)];
- LOOK(returnState, stopState, ctx->getParent(i));
- }
- if (removed) {
- _calledRuleStack.set(s->ruleIndex);
- }
- return;
- }
- }
-
- size_t n = s->transitions.size();
- for (size_t i = 0; i < n; i++) {
- const Transition *t = s->transitions[i].get();
- const auto tType = t->getTransitionType();
-
- if (tType == TransitionType::RULE) {
- if (_calledRuleStack[(static_cast<const RuleTransition*>(t))->target->ruleIndex]) {
- continue;
- }
-
- Ref<const PredictionContext> newContext = SingletonPredictionContext::create(ctx, (static_cast<const RuleTransition*>(t))->followState->stateNumber);
-
- _calledRuleStack.set((static_cast<const RuleTransition*>(t))->target->ruleIndex);
- LOOK(t->target, stopState, newContext);
- _calledRuleStack[(static_cast<const RuleTransition*>(t))->target->ruleIndex] = false;
-
- } else if (tType == TransitionType::PREDICATE || tType == TransitionType::PRECEDENCE) {
- if (_seeThruPreds) {
- LOOK(t->target, stopState, ctx);
- } else {
- _look.add(LL1Analyzer::HIT_PRED);
- }
- } else if (t->isEpsilon()) {
- LOOK(t->target, stopState, ctx);
- } else if (tType == TransitionType::WILDCARD) {
- _look.addAll(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast<ssize_t>(_atn.maxTokenType)));
- } else {
- misc::IntervalSet set = t->label();
- if (!set.isEmpty()) {
- if (tType == TransitionType::NOT_SET) {
- set = set.complement(misc::IntervalSet::of(Token::MIN_USER_TOKEN_TYPE, static_cast<ssize_t>(_atn.maxTokenType)));
- }
- _look.addAll(set);
- }
- }
- }
- }
-
- private:
- const ATN& _atn;
- misc::IntervalSet& _look;
- antlrcpp::BitSet _calledRuleStack;
- std::unordered_set<ATNConfig, ATNConfigHasher, ATNConfigComparer> _lookBusy;
- bool _seeThruPreds;
- bool _addEOF;
- };
-
-}
-
-std::vector<misc::IntervalSet> LL1Analyzer::getDecisionLookahead(ATNState *s) const {
- std::vector<misc::IntervalSet> look;
-
- if (s == nullptr) {
- return look;
- }
-
- look.resize(s->transitions.size()); // Fills all interval sets with defaults.
- for (size_t alt = 0; alt < s->transitions.size(); alt++) {
- LL1AnalyzerImpl impl(_atn, look[alt], false, false);
- impl.LOOK(s->transitions[alt]->target, nullptr, PredictionContext::EMPTY);
- // Wipe out lookahead for this alternative if we found nothing
- // or we had a predicate when we !seeThruPreds
- if (look[alt].size() == 0 || look[alt].contains(LL1Analyzer::HIT_PRED)) {
- look[alt].clear();
- }
- }
- return look;
-}
-
-misc::IntervalSet LL1Analyzer::LOOK(ATNState *s, RuleContext *ctx) const {
- return LOOK(s, nullptr, ctx);
-}
-
-misc::IntervalSet LL1Analyzer::LOOK(ATNState *s, ATNState *stopState, RuleContext *ctx) const {
- Ref<const PredictionContext> lookContext = ctx != nullptr ? PredictionContext::fromRuleContext(_atn, ctx) : nullptr;
- misc::IntervalSet r;
- LL1AnalyzerImpl impl(_atn, r, true, true);
- impl.LOOK(s, stopState, lookContext);
- return r;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.h
deleted file mode 100644
index 7d47c7610f..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LL1Analyzer.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-#include "atn/ATNConfig.h"
-#include "atn/PredictionContext.h"
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC LL1Analyzer final {
- public:
- /// Special value added to the lookahead sets to indicate that we hit
- /// a predicate during analysis if {@code seeThruPreds==false}.
- static constexpr size_t HIT_PRED = Token::INVALID_TYPE;
-
- explicit LL1Analyzer(const atn::ATN &atn) : _atn(atn) {}
-
- /// <summary>
- /// Calculates the SLL(1) expected lookahead set for each outgoing transition
- /// of an <seealso cref="ATNState"/>. The returned array has one element for each
- /// outgoing transition in {@code s}. If the closure from transition
- /// <em>i</em> leads to a semantic predicate before matching a symbol, the
- /// element at index <em>i</em> of the result will be {@code null}.
- /// </summary>
- /// <param name="s"> the ATN state </param>
- /// <returns> the expected symbols for each outgoing transition of {@code s}. </returns>
- std::vector<misc::IntervalSet> getDecisionLookahead(ATNState *s) const;
-
- /// <summary>
- /// Compute set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}.
- /// <p/>
- /// If {@code ctx} is {@code null} and the end of the rule containing
- /// {@code s} is reached, <seealso cref="Token#EPSILON"/> is added to the result set.
- /// If {@code ctx} is not {@code null} and the end of the outermost rule is
- /// reached, <seealso cref="Token#EOF"/> is added to the result set.
- /// </summary>
- /// <param name="s"> the ATN state </param>
- /// <param name="ctx"> the complete parser context, or {@code null} if the context
- /// should be ignored
- /// </param>
- /// <returns> The set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}. </returns>
- misc::IntervalSet LOOK(ATNState *s, RuleContext *ctx) const;
-
- /// <summary>
- /// Compute set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}.
- /// <p/>
- /// If {@code ctx} is {@code null} and the end of the rule containing
- /// {@code s} is reached, <seealso cref="Token#EPSILON"/> is added to the result set.
- /// If {@code ctx} is not {@code null} and the end of the outermost rule is
- /// reached, <seealso cref="Token#EOF"/> is added to the result set.
- /// </summary>
- /// <param name="s"> the ATN state </param>
- /// <param name="stopState"> the ATN state to stop at. This can be a
- /// <seealso cref="BlockEndState"/> to detect epsilon paths through a closure. </param>
- /// <param name="ctx"> the complete parser context, or {@code null} if the context
- /// should be ignored
- /// </param>
- /// <returns> The set of tokens that can follow {@code s} in the ATN in the
- /// specified {@code ctx}. </returns>
- misc::IntervalSet LOOK(ATNState *s, ATNState *stopState, RuleContext *ctx) const;
-
- private:
- const atn::ATN &_atn;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.cpp
deleted file mode 100644
index e70cfac2ca..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "atn/DecisionState.h"
-#include "atn/PredictionContext.h"
-#include "SemanticContext.h"
-#include "atn/LexerActionExecutor.h"
-
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "atn/LexerATNConfig.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-LexerATNConfig::LexerATNConfig(ATNState *state, int alt, Ref<const PredictionContext> context)
- : ATNConfig(state, alt, std::move(context)) {}
-
-LexerATNConfig::LexerATNConfig(ATNState *state, int alt, Ref<const PredictionContext> context, Ref<const LexerActionExecutor> lexerActionExecutor)
- : ATNConfig(state, alt, std::move(context)), _lexerActionExecutor(std::move(lexerActionExecutor)) {}
-
-LexerATNConfig::LexerATNConfig(LexerATNConfig const& other, ATNState *state)
- : ATNConfig(other, state), _lexerActionExecutor(other._lexerActionExecutor), _passedThroughNonGreedyDecision(checkNonGreedyDecision(other, state)) {}
-
-LexerATNConfig::LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref<const LexerActionExecutor> lexerActionExecutor)
- : ATNConfig(other, state), _lexerActionExecutor(std::move(lexerActionExecutor)), _passedThroughNonGreedyDecision(checkNonGreedyDecision(other, state)) {}
-
-LexerATNConfig::LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref<const PredictionContext> context)
- : ATNConfig(other, state, std::move(context)), _lexerActionExecutor(other._lexerActionExecutor), _passedThroughNonGreedyDecision(checkNonGreedyDecision(other, state)) {}
-
-size_t LexerATNConfig::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize(7);
- hashCode = misc::MurmurHash::update(hashCode, state->stateNumber);
- hashCode = misc::MurmurHash::update(hashCode, alt);
- hashCode = misc::MurmurHash::update(hashCode, context);
- hashCode = misc::MurmurHash::update(hashCode, semanticContext);
- hashCode = misc::MurmurHash::update(hashCode, _passedThroughNonGreedyDecision ? 1 : 0);
- hashCode = misc::MurmurHash::update(hashCode, _lexerActionExecutor);
- hashCode = misc::MurmurHash::finish(hashCode, 6);
- return hashCode;
-}
-
-bool LexerATNConfig::operator==(const LexerATNConfig& other) const
-{
- if (this == &other)
- return true;
-
- if (_passedThroughNonGreedyDecision != other._passedThroughNonGreedyDecision)
- return false;
-
- if (_lexerActionExecutor == nullptr)
- return other._lexerActionExecutor == nullptr;
- if (*_lexerActionExecutor != *(other._lexerActionExecutor)) {
- return false;
- }
-
- return ATNConfig::operator==(other);
-}
-
-bool LexerATNConfig::checkNonGreedyDecision(LexerATNConfig const& source, ATNState *target) {
- return source._passedThroughNonGreedyDecision ||
- (DecisionState::is(target) && downCast<DecisionState*>(target)->nonGreedy);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.h
deleted file mode 100644
index 7d1d6b40e2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNConfig.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNConfig.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC LexerATNConfig final : public ATNConfig {
- public:
- LexerATNConfig(ATNState *state, int alt, Ref<const PredictionContext> context);
- LexerATNConfig(ATNState *state, int alt, Ref<const PredictionContext> context, Ref<const LexerActionExecutor> lexerActionExecutor);
-
- LexerATNConfig(LexerATNConfig const& other, ATNState *state);
- LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref<const LexerActionExecutor> lexerActionExecutor);
- LexerATNConfig(LexerATNConfig const& other, ATNState *state, Ref<const PredictionContext> context);
-
- /**
- * Gets the {@link LexerActionExecutor} capable of executing the embedded
- * action(s) for the current configuration.
- */
- const Ref<const LexerActionExecutor>& getLexerActionExecutor() const { return _lexerActionExecutor; }
- bool hasPassedThroughNonGreedyDecision() const { return _passedThroughNonGreedyDecision; }
-
- virtual size_t hashCode() const override;
-
- bool operator==(const LexerATNConfig& other) const;
-
- private:
- /**
- * This is the backing field for {@link #getLexerActionExecutor}.
- */
- const Ref<const LexerActionExecutor> _lexerActionExecutor;
- const bool _passedThroughNonGreedyDecision = false;
-
- static bool checkNonGreedyDecision(LexerATNConfig const& source, ATNState *target);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.cpp
deleted file mode 100644
index ef1b1cf2f1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.cpp
+++ /dev/null
@@ -1,617 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "IntStream.h"
-#include "atn/OrderedATNConfigSet.h"
-#include "Token.h"
-#include "LexerNoViableAltException.h"
-#include "atn/RuleStopState.h"
-#include "atn/RuleTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/PredicateTransition.h"
-#include "atn/ActionTransition.h"
-#include "atn/TokensStartState.h"
-#include "misc/Interval.h"
-#include "dfa/DFA.h"
-#include "Lexer.h"
-#include "internal/Synchronization.h"
-
-#include "dfa/DFAState.h"
-#include "atn/LexerATNConfig.h"
-#include "atn/LexerActionExecutor.h"
-
-#include "atn/LexerATNSimulator.h"
-
-#define DEBUG_ATN 0
-#define DEBUG_DFA 0
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::internal;
-using namespace antlrcpp;
-
-void LexerATNSimulator::SimState::reset() {
- *this = SimState();
-}
-
-LexerATNSimulator::LexerATNSimulator(const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache)
- : LexerATNSimulator(nullptr, atn, decisionToDFA, sharedContextCache) {
-}
-
-LexerATNSimulator::LexerATNSimulator(Lexer *recog, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache)
- : ATNSimulator(atn, sharedContextCache), _recog(recog), _decisionToDFA(decisionToDFA) {
- InitializeInstanceFields();
-}
-
-void LexerATNSimulator::copyState(LexerATNSimulator *simulator) {
- _charPositionInLine = simulator->_charPositionInLine;
- _line = simulator->_line;
- _mode = simulator->_mode;
- _startIndex = simulator->_startIndex;
-}
-
-size_t LexerATNSimulator::match(CharStream *input, size_t mode) {
- _mode = mode;
- ssize_t mark = input->mark();
-
- auto onExit = finally([input, mark] {
- input->release(mark);
- });
-
- _startIndex = input->index();
- _prevAccept.reset();
- const dfa::DFA &dfa = _decisionToDFA[mode];
- dfa::DFAState* s0;
- {
- SharedLock<SharedMutex> stateLock(atn._stateMutex);
- s0 = dfa.s0;
- }
- if (s0 == nullptr) {
- return matchATN(input);
- } else {
- return execATN(input, s0);
- }
-}
-
-void LexerATNSimulator::reset() {
- _prevAccept.reset();
- _startIndex = 0;
- _line = 1;
- _charPositionInLine = 0;
- _mode = Lexer::DEFAULT_MODE;
-}
-
-void LexerATNSimulator::clearDFA() {
- size_t size = _decisionToDFA.size();
- _decisionToDFA.clear();
- for (size_t d = 0; d < size; ++d) {
- _decisionToDFA.emplace_back(atn.getDecisionState(d), d);
- }
-}
-
-size_t LexerATNSimulator::matchATN(CharStream *input) {
- ATNState *startState = atn.modeToStartState[_mode];
-
- std::unique_ptr<ATNConfigSet> s0_closure = computeStartState(input, startState);
-
- bool suppressEdge = s0_closure->hasSemanticContext;
- s0_closure->hasSemanticContext = false;
-
- dfa::DFAState *next = addDFAState(s0_closure.release(), suppressEdge);
-
- size_t predict = execATN(input, next);
-
- return predict;
-}
-
-size_t LexerATNSimulator::execATN(CharStream *input, dfa::DFAState *ds0) {
- if (ds0->isAcceptState) {
- // allow zero-length tokens
- // ml: in Java code this method uses 3 params. The first is a member var of the class anyway (_prevAccept), so why pass it here?
- captureSimState(input, ds0);
- }
-
- size_t t = input->LA(1);
- dfa::DFAState *s = ds0; // s is current/from DFA state
-
- while (true) { // while more work
- // As we move src->trg, src->trg, we keep track of the previous trg to
- // avoid looking up the DFA state again, which is expensive.
- // If the previous target was already part of the DFA, we might
- // be able to avoid doing a reach operation upon t. If s!=null,
- // it means that semantic predicates didn't prevent us from
- // creating a DFA state. Once we know s!=null, we check to see if
- // the DFA state has an edge already for t. If so, we can just reuse
- // it's configuration set; there's no point in re-computing it.
- // This is kind of like doing DFA simulation within the ATN
- // simulation because DFA simulation is really just a way to avoid
- // computing reach/closure sets. Technically, once we know that
- // we have a previously added DFA state, we could jump over to
- // the DFA simulator. But, that would mean popping back and forth
- // a lot and making things more complicated algorithmically.
- // This optimization makes a lot of sense for loops within DFA.
- // A character will take us back to an existing DFA state
- // that already has lots of edges out of it. e.g., .* in comments.
- dfa::DFAState *target = getExistingTargetState(s, t);
- if (target == nullptr) {
- target = computeTargetState(input, s, t);
- }
-
- if (target == ERROR.get()) {
- break;
- }
-
- // If this is a consumable input element, make sure to consume before
- // capturing the accept state so the input index, line, and char
- // position accurately reflect the state of the interpreter at the
- // end of the token.
- if (t != Token::EOF) {
- consume(input);
- }
-
- if (target->isAcceptState) {
- captureSimState(input, target);
- if (t == Token::EOF) {
- break;
- }
- }
-
- t = input->LA(1);
- s = target; // flip; current DFA target becomes new src/from state
- }
-
- return failOrAccept(input, s->configs.get(), t);
-}
-
-dfa::DFAState *LexerATNSimulator::getExistingTargetState(dfa::DFAState *s, size_t t) {
- dfa::DFAState* retval = nullptr;
- SharedLock<SharedMutex> edgeLock(atn._edgeMutex);
- if (t <= MAX_DFA_EDGE) {
- auto iterator = s->edges.find(t - MIN_DFA_EDGE);
-#if DEBUG_ATN == 1
- if (iterator != s->edges.end()) {
- std::cout << std::string("reuse state ") << s->stateNumber << std::string(" edge to ") << iterator->second->stateNumber << std::endl;
- }
-#endif
-
- if (iterator != s->edges.end())
- retval = iterator->second;
- }
- return retval;
-}
-
-dfa::DFAState *LexerATNSimulator::computeTargetState(CharStream *input, dfa::DFAState *s, size_t t) {
- OrderedATNConfigSet *reach = new OrderedATNConfigSet(); /* mem-check: deleted on error or managed by new DFA state. */
-
- // if we don't find an existing DFA state
- // Fill reach starting from closure, following t transitions
- getReachableConfigSet(input, s->configs.get(), reach, t);
-
- if (reach->isEmpty()) { // we got nowhere on t from s
- if (!reach->hasSemanticContext) {
- // we got nowhere on t, don't throw out this knowledge; it'd
- // cause a failover from DFA later.
- addDFAEdge(s, t, ERROR.get());
- }
- delete reach;
-
- // stop when we can't match any more char
- return ERROR.get();
- }
-
- // Add an edge from s to target DFA found/created for reach
- return addDFAEdge(s, t, reach);
-}
-
-size_t LexerATNSimulator::failOrAccept(CharStream *input, ATNConfigSet *reach, size_t t) {
- if (_prevAccept.dfaState != nullptr) {
- accept(input, _prevAccept.dfaState->lexerActionExecutor, _startIndex, _prevAccept.index, _prevAccept.line, _prevAccept.charPos);
- return _prevAccept.dfaState->prediction;
- } else {
- // if no accept and EOF is first char, return EOF
- if (t == Token::EOF && input->index() == _startIndex) {
- return Token::EOF;
- }
-
- throw LexerNoViableAltException(_recog, input, _startIndex, reach);
- }
-}
-
-void LexerATNSimulator::getReachableConfigSet(CharStream *input, ATNConfigSet *closure_, ATNConfigSet *reach, size_t t) {
- // this is used to skip processing for configs which have a lower priority
- // than a config that already reached an accept state for the same rule
- size_t skipAlt = ATN::INVALID_ALT_NUMBER;
-
- for (const auto &c : closure_->configs) {
- bool currentAltReachedAcceptState = c->alt == skipAlt;
- if (currentAltReachedAcceptState && (std::static_pointer_cast<LexerATNConfig>(c))->hasPassedThroughNonGreedyDecision()) {
- continue;
- }
-
-#if DEBUG_ATN == 1
- std::cout << "testing " << getTokenName((int)t) << " at " << c->toString(true) << std::endl;
-#endif
-
- size_t n = c->state->transitions.size();
- for (size_t ti = 0; ti < n; ti++) { // for each transition
- const Transition *trans = c->state->transitions[ti].get();
- ATNState *target = getReachableTarget(trans, (int)t);
- if (target != nullptr) {
- auto lexerActionExecutor = downCast<const LexerATNConfig&>(*c).getLexerActionExecutor();
- if (lexerActionExecutor != nullptr) {
- lexerActionExecutor = lexerActionExecutor->fixOffsetBeforeMatch((int)input->index() - (int)_startIndex);
- }
-
- bool treatEofAsEpsilon = t == Token::EOF;
- Ref<LexerATNConfig> config = std::make_shared<LexerATNConfig>(downCast<const LexerATNConfig&>(*c),
- target, std::move(lexerActionExecutor));
-
- if (closure(input, config, reach, currentAltReachedAcceptState, true, treatEofAsEpsilon)) {
- // any remaining configs for this alt have a lower priority than
- // the one that just reached an accept state.
- skipAlt = c->alt;
- break;
- }
- }
- }
- }
-}
-
-void LexerATNSimulator::accept(CharStream *input, const Ref<const LexerActionExecutor> &lexerActionExecutor, size_t /*startIndex*/,
- size_t index, size_t line, size_t charPos) {
-#if DEBUG_ATN == 1
- std::cout << "ACTION ";
- std::cout << toString(lexerActionExecutor) << std::endl;
-#endif
-
- // seek to after last char in token
- input->seek(index);
- _line = line;
- _charPositionInLine = (int)charPos;
-
- if (lexerActionExecutor != nullptr && _recog != nullptr) {
- lexerActionExecutor->execute(_recog, input, _startIndex);
- }
-}
-
-atn::ATNState *LexerATNSimulator::getReachableTarget(const Transition *trans, size_t t) {
- if (trans->matches(t, Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE)) {
- return trans->target;
- }
-
- return nullptr;
-}
-
-std::unique_ptr<ATNConfigSet> LexerATNSimulator::computeStartState(CharStream *input, ATNState *p) {
- Ref<const PredictionContext> initialContext = PredictionContext::EMPTY; // ml: the purpose of this assignment is unclear
- std::unique_ptr<ATNConfigSet> configs(new OrderedATNConfigSet());
- for (size_t i = 0; i < p->transitions.size(); i++) {
- ATNState *target = p->transitions[i]->target;
- Ref<LexerATNConfig> c = std::make_shared<LexerATNConfig>(target, (int)(i + 1), initialContext);
- closure(input, c, configs.get(), false, false, false);
- }
-
- return configs;
-}
-
-bool LexerATNSimulator::closure(CharStream *input, const Ref<LexerATNConfig> &config, ATNConfigSet *configs,
- bool currentAltReachedAcceptState, bool speculative, bool treatEofAsEpsilon) {
-#if DEBUG_ATN == 1
- std::cout << "closure(" << config->toString(true) << ")" << std::endl;
-#endif
-
- if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) {
-#if DEBUG_ATN == 1
- if (_recog != nullptr) {
- std::cout << "closure at " << _recog->getRuleNames()[config->state->ruleIndex] << " rule stop " << config << std::endl;
- } else {
- std::cout << "closure at rule stop " << config << std::endl;
- }
-#endif
-
- if (config->context == nullptr || config->context->hasEmptyPath()) {
- if (config->context == nullptr || config->context->isEmpty()) {
- configs->add(config);
- return true;
- } else {
- configs->add(std::make_shared<LexerATNConfig>(*config, config->state, PredictionContext::EMPTY));
- currentAltReachedAcceptState = true;
- }
- }
-
- if (config->context != nullptr && !config->context->isEmpty()) {
- for (size_t i = 0; i < config->context->size(); i++) {
- if (config->context->getReturnState(i) != PredictionContext::EMPTY_RETURN_STATE) {
- Ref<const PredictionContext> newContext = config->context->getParent(i); // "pop" return state
- ATNState *returnState = atn.states[config->context->getReturnState(i)];
- Ref<LexerATNConfig> c = std::make_shared<LexerATNConfig>(*config, returnState, newContext);
- currentAltReachedAcceptState = closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon);
- }
- }
- }
-
- return currentAltReachedAcceptState;
- }
-
- // optimization
- if (!config->state->epsilonOnlyTransitions) {
- if (!currentAltReachedAcceptState || !config->hasPassedThroughNonGreedyDecision()) {
- configs->add(config);
- }
- }
-
- ATNState *p = config->state;
- for (size_t i = 0; i < p->transitions.size(); i++) {
- const Transition *t = p->transitions[i].get();
- Ref<LexerATNConfig> c = getEpsilonTarget(input, config, t, configs, speculative, treatEofAsEpsilon);
- if (c != nullptr) {
- currentAltReachedAcceptState = closure(input, c, configs, currentAltReachedAcceptState, speculative, treatEofAsEpsilon);
- }
- }
-
- return currentAltReachedAcceptState;
-}
-
-Ref<LexerATNConfig> LexerATNSimulator::getEpsilonTarget(CharStream *input, const Ref<LexerATNConfig> &config, const Transition *t,
- ATNConfigSet *configs, bool speculative, bool treatEofAsEpsilon) {
-
- Ref<LexerATNConfig> c = nullptr;
- switch (t->getTransitionType()) {
- case TransitionType::RULE: {
- const RuleTransition *ruleTransition = static_cast<const RuleTransition*>(t);
- Ref<const PredictionContext> newContext = SingletonPredictionContext::create(config->context, ruleTransition->followState->stateNumber);
- c = std::make_shared<LexerATNConfig>(*config, t->target, newContext);
- break;
- }
-
- case TransitionType::PRECEDENCE:
- throw UnsupportedOperationException("Precedence predicates are not supported in lexers.");
-
- case TransitionType::PREDICATE: {
- /* Track traversing semantic predicates. If we traverse,
- we cannot add a DFA state for this "reach" computation
- because the DFA would not test the predicate again in the
- future. Rather than creating collections of semantic predicates
- like v3 and testing them on prediction, v4 will test them on the
- fly all the time using the ATN not the DFA. This is slower but
- semantically it's not used that often. One of the key elements to
- this predicate mechanism is not adding DFA states that see
- predicates immediately afterwards in the ATN. For example,
-
- a : ID {p1}? | ID {p2}? ;
-
- should create the start state for rule 'a' (to save start state
- competition), but should not create target of ID state. The
- collection of ATN states the following ID references includes
- states reached by traversing predicates. Since this is when we
- test them, we cannot cash the DFA state target of ID.
- */
- const PredicateTransition *pt = static_cast<const PredicateTransition*>(t);
-
-#if DEBUG_ATN == 1
- std::cout << "EVAL rule " << pt->getRuleIndex() << ":" << pt->getPredIndex() << std::endl;
-#endif
-
- configs->hasSemanticContext = true;
- if (evaluatePredicate(input, pt->getRuleIndex(), pt->getPredIndex(), speculative)) {
- c = std::make_shared<LexerATNConfig>(*config, t->target);
- }
- break;
- }
-
- case TransitionType::ACTION:
- if (config->context == nullptr|| config->context->hasEmptyPath()) {
- // execute actions anywhere in the start rule for a token.
- //
- // TODO: if the entry rule is invoked recursively, some
- // actions may be executed during the recursive call. The
- // problem can appear when hasEmptyPath() is true but
- // isEmpty() is false. In this case, the config needs to be
- // split into two contexts - one with just the empty path
- // and another with everything but the empty path.
- // Unfortunately, the current algorithm does not allow
- // getEpsilonTarget to return two configurations, so
- // additional modifications are needed before we can support
- // the split operation.
- auto lexerActionExecutor = LexerActionExecutor::append(config->getLexerActionExecutor(),
- atn.lexerActions[static_cast<const ActionTransition *>(t)->actionIndex]);
- c = std::make_shared<LexerATNConfig>(*config, t->target, std::move(lexerActionExecutor));
- break;
- }
- else {
- // ignore actions in referenced rules
- c = std::make_shared<LexerATNConfig>(*config, t->target);
- break;
- }
-
- case TransitionType::EPSILON:
- c = std::make_shared<LexerATNConfig>(*config, t->target);
- break;
-
- case TransitionType::ATOM:
- case TransitionType::RANGE:
- case TransitionType::SET:
- if (treatEofAsEpsilon) {
- if (t->matches(Token::EOF, Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE)) {
- c = std::make_shared<LexerATNConfig>(*config, t->target);
- break;
- }
- }
-
- break;
-
- default: // To silence the compiler. Other transition types are not used here.
- break;
- }
-
- return c;
-}
-
-bool LexerATNSimulator::evaluatePredicate(CharStream *input, size_t ruleIndex, size_t predIndex, bool speculative) {
- // assume true if no recognizer was provided
- if (_recog == nullptr) {
- return true;
- }
-
- if (!speculative) {
- return _recog->sempred(nullptr, ruleIndex, predIndex);
- }
-
- size_t savedCharPositionInLine = _charPositionInLine;
- size_t savedLine = _line;
- size_t index = input->index();
- ssize_t marker = input->mark();
-
- auto onExit = finally([this, input, savedCharPositionInLine, savedLine, index, marker] {
- _charPositionInLine = savedCharPositionInLine;
- _line = savedLine;
- input->seek(index);
- input->release(marker);
- });
-
- consume(input);
- return _recog->sempred(nullptr, ruleIndex, predIndex);
-}
-
-void LexerATNSimulator::captureSimState(CharStream *input, dfa::DFAState *dfaState) {
- _prevAccept.index = input->index();
- _prevAccept.line = _line;
- _prevAccept.charPos = _charPositionInLine;
- _prevAccept.dfaState = dfaState;
-}
-
-dfa::DFAState *LexerATNSimulator::addDFAEdge(dfa::DFAState *from, size_t t, ATNConfigSet *q) {
- /* leading to this call, ATNConfigSet.hasSemanticContext is used as a
- * marker indicating dynamic predicate evaluation makes this edge
- * dependent on the specific input sequence, so the static edge in the
- * DFA should be omitted. The target DFAState is still created since
- * execATN has the ability to resynchronize with the DFA state cache
- * following the predicate evaluation step.
- *
- * TJP notes: next time through the DFA, we see a pred again and eval.
- * If that gets us to a previously created (but dangling) DFA
- * state, we can continue in pure DFA mode from there.
- */
- bool suppressEdge = q->hasSemanticContext;
- q->hasSemanticContext = false;
-
- dfa::DFAState *to = addDFAState(q);
-
- if (suppressEdge) {
- return to;
- }
-
- addDFAEdge(from, t, to);
- return to;
-}
-
-void LexerATNSimulator::addDFAEdge(dfa::DFAState *p, size_t t, dfa::DFAState *q) {
- if (/*t < MIN_DFA_EDGE ||*/ t > MAX_DFA_EDGE) { // MIN_DFA_EDGE is 0
- // Only track edges within the DFA bounds
- return;
- }
-
- UniqueLock<SharedMutex> edgeLock(atn._edgeMutex);
- p->edges[t - MIN_DFA_EDGE] = q; // connect
-}
-
-dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs) {
- return addDFAState(configs, true);
-}
-
-dfa::DFAState *LexerATNSimulator::addDFAState(ATNConfigSet *configs, bool suppressEdge) {
- /* the lexer evaluates predicates on-the-fly; by this point configs
- * should not contain any configurations with unevaluated predicates.
- */
- assert(!configs->hasSemanticContext);
-
- dfa::DFAState *proposed = new dfa::DFAState(std::unique_ptr<ATNConfigSet>(configs)); /* mem-check: managed by the DFA or deleted below */
- Ref<ATNConfig> firstConfigWithRuleStopState = nullptr;
- for (const auto &c : configs->configs) {
- if (RuleStopState::is(c->state)) {
- firstConfigWithRuleStopState = c;
- break;
- }
- }
-
- if (firstConfigWithRuleStopState != nullptr) {
- proposed->isAcceptState = true;
- proposed->lexerActionExecutor = downCast<const LexerATNConfig&>(*firstConfigWithRuleStopState).getLexerActionExecutor();
- proposed->prediction = atn.ruleToTokenType[firstConfigWithRuleStopState->state->ruleIndex];
- }
-
- dfa::DFA &dfa = _decisionToDFA[_mode];
-
- {
- UniqueLock<SharedMutex> stateLock(atn._stateMutex);
- auto [existing, inserted] = dfa.states.insert(proposed);
- if (!inserted) {
- delete proposed;
- proposed = *existing;
- } else {
- // Previously we did a lookup, then set fields, then inserted. It was `dfa.states.size()`,
- // since we already inserted we need to subtract one.
- proposed->stateNumber = static_cast<int>(dfa.states.size() - 1);
- proposed->configs->setReadonly(true);
- }
- if (!suppressEdge) {
- dfa.s0 = proposed;
- }
- }
-
- return proposed;
-}
-
-dfa::DFA& LexerATNSimulator::getDFA(size_t mode) {
- return _decisionToDFA[mode];
-}
-
-std::string LexerATNSimulator::getText(CharStream *input) {
- // index is first lookahead char, don't include.
- return input->getText(misc::Interval(_startIndex, input->index() - 1));
-}
-
-size_t LexerATNSimulator::getLine() const {
- return _line;
-}
-
-void LexerATNSimulator::setLine(size_t line) {
- _line = line;
-}
-
-size_t LexerATNSimulator::getCharPositionInLine() {
- return _charPositionInLine;
-}
-
-void LexerATNSimulator::setCharPositionInLine(size_t charPositionInLine) {
- _charPositionInLine = charPositionInLine;
-}
-
-void LexerATNSimulator::consume(CharStream *input) {
- size_t curChar = input->LA(1);
- if (curChar == '\n') {
- _line++;
- _charPositionInLine = 0;
- } else {
- _charPositionInLine++;
- }
- input->consume();
-}
-
-std::string LexerATNSimulator::getTokenName(size_t t) {
- if (t == Token::EOF) {
- return "EOF";
- }
- return std::string("'") + static_cast<char>(t) + std::string("'");
-}
-
-void LexerATNSimulator::InitializeInstanceFields() {
- _startIndex = 0;
- _line = 1;
- _charPositionInLine = 0;
- _mode = antlr4::Lexer::DEFAULT_MODE;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.h
deleted file mode 100644
index 304430b04d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerATNSimulator.h
+++ /dev/null
@@ -1,199 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <atomic>
-
-#include "atn/ATNSimulator.h"
-#include "atn/LexerATNConfig.h"
-#include "atn/ATNConfigSet.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// "dup" of ParserInterpreter
- class ANTLR4CPP_PUBLIC LexerATNSimulator : public ATNSimulator {
- protected:
- struct ANTLR4CPP_PUBLIC SimState final {
- size_t index = INVALID_INDEX;
- size_t line = 0;
- size_t charPos = INVALID_INDEX;
- dfa::DFAState *dfaState = nullptr;
-
- void reset();
- };
-
- public:
- static constexpr size_t MIN_DFA_EDGE = 0;
- static constexpr size_t MAX_DFA_EDGE = 127; // forces unicode to stay in ATN
-
- protected:
- /// <summary>
- /// When we hit an accept state in either the DFA or the ATN, we
- /// have to notify the character stream to start buffering characters
- /// via <seealso cref="IntStream#mark"/> and record the current state. The current sim state
- /// includes the current index into the input, the current line,
- /// and current character position in that line. Note that the Lexer is
- /// tracking the starting line and characterization of the token. These
- /// variables track the "state" of the simulator when it hits an accept state.
- /// <p/>
- /// We track these variables separately for the DFA and ATN simulation
- /// because the DFA simulation often has to fail over to the ATN
- /// simulation. If the ATN simulation fails, we need the DFA to fall
- /// back to its previously accepted state, if any. If the ATN succeeds,
- /// then the ATN does the accept and the DFA simulator that invoked it
- /// can simply return the predicted token type.
- /// </summary>
- Lexer *const _recog;
-
- /// The current token's starting index into the character stream.
- /// Shared across DFA to ATN simulation in case the ATN fails and the
- /// DFA did not have a previous accept state. In this case, we use the
- /// ATN-generated exception object.
- size_t _startIndex;
-
- /// line number 1..n within the input.
- size_t _line;
-
- /// The index of the character relative to the beginning of the line 0..n-1.
- size_t _charPositionInLine;
-
- public:
- std::vector<dfa::DFA> &_decisionToDFA;
-
- protected:
- size_t _mode;
-
- /// Used during DFA/ATN exec to record the most recent accept configuration info.
- SimState _prevAccept;
-
- public:
- LexerATNSimulator(const ATN &atn, std::vector<dfa::DFA> &decisionToDFA, PredictionContextCache &sharedContextCache);
- LexerATNSimulator(Lexer *recog, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA, PredictionContextCache &sharedContextCache);
- virtual ~LexerATNSimulator() = default;
-
- virtual void copyState(LexerATNSimulator *simulator);
- virtual size_t match(CharStream *input, size_t mode);
- virtual void reset() override;
-
- virtual void clearDFA() override;
-
- protected:
- virtual size_t matchATN(CharStream *input);
- virtual size_t execATN(CharStream *input, dfa::DFAState *ds0);
-
- /// <summary>
- /// Get an existing target state for an edge in the DFA. If the target state
- /// for the edge has not yet been computed or is otherwise not available,
- /// this method returns {@code null}.
- /// </summary>
- /// <param name="s"> The current DFA state </param>
- /// <param name="t"> The next input symbol </param>
- /// <returns> The existing target DFA state for the given input symbol
- /// {@code t}, or {@code null} if the target state for this edge is not
- /// already cached </returns>
- virtual dfa::DFAState *getExistingTargetState(dfa::DFAState *s, size_t t);
-
- /// <summary>
- /// Compute a target state for an edge in the DFA, and attempt to add the
- /// computed state and corresponding edge to the DFA.
- /// </summary>
- /// <param name="input"> The input stream </param>
- /// <param name="s"> The current DFA state </param>
- /// <param name="t"> The next input symbol
- /// </param>
- /// <returns> The computed target DFA state for the given input symbol
- /// {@code t}. If {@code t} does not lead to a valid DFA state, this method
- /// returns <seealso cref="#ERROR"/>. </returns>
- virtual dfa::DFAState *computeTargetState(CharStream *input, dfa::DFAState *s, size_t t);
-
- virtual size_t failOrAccept(CharStream *input, ATNConfigSet *reach, size_t t);
-
- /// <summary>
- /// Given a starting configuration set, figure out all ATN configurations
- /// we can reach upon input {@code t}. Parameter {@code reach} is a return
- /// parameter.
- /// </summary>
- void getReachableConfigSet(CharStream *input, ATNConfigSet *closure_, // closure_ as we have a closure() already
- ATNConfigSet *reach, size_t t);
-
- virtual void accept(CharStream *input, const Ref<const LexerActionExecutor> &lexerActionExecutor, size_t startIndex, size_t index,
- size_t line, size_t charPos);
-
- virtual ATNState *getReachableTarget(const Transition *trans, size_t t);
-
- virtual std::unique_ptr<ATNConfigSet> computeStartState(CharStream *input, ATNState *p);
-
- /// <summary>
- /// Since the alternatives within any lexer decision are ordered by
- /// preference, this method stops pursuing the closure as soon as an accept
- /// state is reached. After the first accept state is reached by depth-first
- /// search from {@code config}, all other (potentially reachable) states for
- /// this rule would have a lower priority.
- /// </summary>
- /// <returns> {@code true} if an accept state is reached, otherwise
- /// {@code false}. </returns>
- virtual bool closure(CharStream *input, const Ref<LexerATNConfig> &config, ATNConfigSet *configs,
- bool currentAltReachedAcceptState, bool speculative, bool treatEofAsEpsilon);
-
- // side-effect: can alter configs.hasSemanticContext
- virtual Ref<LexerATNConfig> getEpsilonTarget(CharStream *input, const Ref<LexerATNConfig> &config, const Transition *t,
- ATNConfigSet *configs, bool speculative, bool treatEofAsEpsilon);
-
- /// <summary>
- /// Evaluate a predicate specified in the lexer.
- /// <p/>
- /// If {@code speculative} is {@code true}, this method was called before
- /// <seealso cref="#consume"/> for the matched character. This method should call
- /// <seealso cref="#consume"/> before evaluating the predicate to ensure position
- /// sensitive values, including <seealso cref="Lexer#getText"/>, <seealso cref="Lexer#getLine"/>,
- /// and <seealso cref="Lexer#getCharPositionInLine"/>, properly reflect the current
- /// lexer state. This method should restore {@code input} and the simulator
- /// to the original state before returning (i.e. undo the actions made by the
- /// call to <seealso cref="#consume"/>.
- /// </summary>
- /// <param name="input"> The input stream. </param>
- /// <param name="ruleIndex"> The rule containing the predicate. </param>
- /// <param name="predIndex"> The index of the predicate within the rule. </param>
- /// <param name="speculative"> {@code true} if the current index in {@code input} is
- /// one character before the predicate's location.
- /// </param>
- /// <returns> {@code true} if the specified predicate evaluates to
- /// {@code true}. </returns>
- virtual bool evaluatePredicate(CharStream *input, size_t ruleIndex, size_t predIndex, bool speculative);
-
- virtual void captureSimState(CharStream *input, dfa::DFAState *dfaState);
- virtual dfa::DFAState* addDFAEdge(dfa::DFAState *from, size_t t, ATNConfigSet *q);
- virtual void addDFAEdge(dfa::DFAState *p, size_t t, dfa::DFAState *q);
-
- /// <summary>
- /// Add a new DFA state if there isn't one with this set of
- /// configurations already. This method also detects the first
- /// configuration containing an ATN rule stop state. Later, when
- /// traversing the DFA, we will know which rule to accept.
- /// </summary>
- virtual dfa::DFAState *addDFAState(ATNConfigSet *configs);
-
- virtual dfa::DFAState *addDFAState(ATNConfigSet *configs, bool suppressEdge);
-
- public:
- dfa::DFA& getDFA(size_t mode);
-
- /// Get the text matched so far for the current token.
- virtual std::string getText(CharStream *input);
- virtual size_t getLine() const;
- virtual void setLine(size_t line);
- virtual size_t getCharPositionInLine();
- virtual void setCharPositionInLine(size_t charPositionInLine);
- virtual void consume(CharStream *input);
- virtual std::string getTokenName(size_t t);
-
- private:
- void InitializeInstanceFields();
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.cpp
deleted file mode 100644
index a9d9a6771b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.cpp
+++ /dev/null
@@ -1,15 +0,0 @@
-#include "LexerAction.h"
-
-using namespace antlr4::atn;
-
-size_t LexerAction::hashCode() const {
- auto hash = cachedHashCode();
- if (hash == 0) {
- hash = hashCodeImpl();
- if (hash == 0) {
- hash = std::numeric_limits<size_t>::max();
- }
- _hashCode.store(hash, std::memory_order_relaxed);
- }
- return hash;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.h
deleted file mode 100644
index 5c30a89608..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerAction.h
+++ /dev/null
@@ -1,100 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerActionType.h"
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Represents a single action which can be executed following the successful
- /// match of a lexer rule. Lexer actions are used for both embedded action syntax
- /// and ANTLR 4's new lexer command syntax.
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerAction {
- public:
- virtual ~LexerAction() = default;
-
- /// <summary>
- /// Gets the serialization type of the lexer action.
- /// </summary>
- /// <returns> The serialization type of the lexer action. </returns>
- ///
- /// IMPORTANT: Unlike Java, this returns LexerActionType::INDEXED_CUSTOM for instances of
- /// LexerIndexedCustomAction. If you need the wrapped action type, use
- /// LexerIndexedCustomAction::getAction()->getActionType().
- LexerActionType getActionType() const { return _actionType; }
-
- /// <summary>
- /// Gets whether the lexer action is position-dependent. Position-dependent
- /// actions may have different semantics depending on the <seealso cref="CharStream"/>
- /// index at the time the action is executed.
- ///
- /// <para>Many lexer commands, including {@code type}, {@code skip}, and
- /// {@code more}, do not check the input index during their execution.
- /// Actions like this are position-independent, and may be stored more
- /// efficiently as part of the <seealso cref="LexerATNConfig#lexerActionExecutor"/>.</para>
- /// </summary>
- /// <returns> {@code true} if the lexer action semantics can be affected by the
- /// position of the input <seealso cref="CharStream"/> at the time it is executed;
- /// otherwise, {@code false}. </returns>
- bool isPositionDependent() const { return _positionDependent; }
-
- /// <summary>
- /// Execute the lexer action in the context of the specified <seealso cref="Lexer"/>.
- ///
- /// <para>For position-dependent actions, the input stream must already be
- /// positioned correctly prior to calling this method.</para>
- /// </summary>
- /// <param name="lexer"> The lexer instance. </param>
- virtual void execute(Lexer *lexer) const = 0;
-
- size_t hashCode() const;
-
- virtual bool equals(const LexerAction &other) const = 0;
-
- virtual std::string toString() const = 0;
-
- protected:
- LexerAction(LexerActionType actionType, bool positionDependent)
- : _actionType(actionType), _hashCode(0), _positionDependent(positionDependent) {}
-
- virtual size_t hashCodeImpl() const = 0;
-
- size_t cachedHashCode() const { return _hashCode.load(std::memory_order_relaxed); }
-
- private:
- const LexerActionType _actionType;
- mutable std::atomic<size_t> _hashCode;
- const bool _positionDependent;
- };
-
- inline bool operator==(const LexerAction &lhs, const LexerAction &rhs) {
- return lhs.equals(rhs);
- }
-
- inline bool operator!=(const LexerAction &lhs, const LexerAction &rhs) {
- return !operator==(lhs, rhs);
- }
-
-} // namespace atn
-} // namespace antlr4
-
-namespace std {
-
- template <>
- struct hash<::antlr4::atn::LexerAction> {
- size_t operator()(const ::antlr4::atn::LexerAction &lexerAction) const {
- return lexerAction.hashCode();
- }
- };
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.cpp
deleted file mode 100644
index 490351b892..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.cpp
+++ /dev/null
@@ -1,111 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "atn/LexerIndexedCustomAction.h"
-#include "support/CPPUtils.h"
-#include "support/Arrays.h"
-#include "support/Casts.h"
-
-#include "atn/LexerActionExecutor.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-namespace {
-
- bool cachedHashCodeEqual(size_t lhs, size_t rhs) {
- return lhs == rhs || lhs == 0 || rhs == 0;
- }
-
- bool lexerActionEqual(const Ref<const LexerAction> &lhs, const Ref<const LexerAction> &rhs) {
- return *lhs == *rhs;
- }
-
-}
-
-LexerActionExecutor::LexerActionExecutor(std::vector<Ref<const LexerAction>> lexerActions)
- : _lexerActions(std::move(lexerActions)), _hashCode(0) {}
-
-Ref<const LexerActionExecutor> LexerActionExecutor::append(const Ref<const LexerActionExecutor> &lexerActionExecutor,
- Ref<const LexerAction> lexerAction) {
- if (lexerActionExecutor == nullptr) {
- return std::make_shared<LexerActionExecutor>(std::vector<Ref<const LexerAction>>{ std::move(lexerAction) });
- }
- std::vector<Ref<const LexerAction>> lexerActions;
- lexerActions.reserve(lexerActionExecutor->_lexerActions.size() + 1);
- lexerActions.insert(lexerActions.begin(), lexerActionExecutor->_lexerActions.begin(), lexerActionExecutor->_lexerActions.end());
- lexerActions.push_back(std::move(lexerAction));
- return std::make_shared<LexerActionExecutor>(std::move(lexerActions));
-}
-
-Ref<const LexerActionExecutor> LexerActionExecutor::fixOffsetBeforeMatch(int offset) const {
- std::vector<Ref<const LexerAction>> updatedLexerActions;
- for (size_t i = 0; i < _lexerActions.size(); i++) {
- if (_lexerActions[i]->isPositionDependent() && !LexerIndexedCustomAction::is(*_lexerActions[i])) {
- if (updatedLexerActions.empty()) {
- updatedLexerActions = _lexerActions; // Make a copy.
- }
- updatedLexerActions[i] = std::make_shared<LexerIndexedCustomAction>(offset, _lexerActions[i]);
- }
- }
- if (updatedLexerActions.empty()) {
- return shared_from_this();
- }
- return std::make_shared<LexerActionExecutor>(std::move(updatedLexerActions));
-}
-
-const std::vector<Ref<const LexerAction>>& LexerActionExecutor::getLexerActions() const {
- return _lexerActions;
-}
-
-void LexerActionExecutor::execute(Lexer *lexer, CharStream *input, size_t startIndex) const {
- bool requiresSeek = false;
- size_t stopIndex = input->index();
-
- auto onExit = finally([requiresSeek, input, stopIndex]() {
- if (requiresSeek) {
- input->seek(stopIndex);
- }
- });
- for (const auto &lexerAction : _lexerActions) {
- if (LexerIndexedCustomAction::is(*lexerAction)) {
- int offset = downCast<const LexerIndexedCustomAction&>(*lexerAction).getOffset();
- input->seek(startIndex + offset);
- requiresSeek = (startIndex + offset) != stopIndex;
- } else if (lexerAction->isPositionDependent()) {
- input->seek(stopIndex);
- requiresSeek = false;
- }
- lexerAction->execute(lexer);
- }
-}
-
-size_t LexerActionExecutor::hashCode() const {
- auto hash = _hashCode.load(std::memory_order_relaxed);
- if (hash == 0) {
- hash = MurmurHash::initialize();
- for (const auto &lexerAction : _lexerActions) {
- hash = MurmurHash::update(hash, lexerAction);
- }
- hash = MurmurHash::finish(hash, _lexerActions.size());
- if (hash == 0) {
- hash = std::numeric_limits<size_t>::max();
- }
- _hashCode.store(hash, std::memory_order_relaxed);
- }
- return hash;
-}
-
-bool LexerActionExecutor::equals(const LexerActionExecutor &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- return cachedHashCodeEqual(_hashCode.load(std::memory_order_relaxed), other._hashCode.load(std::memory_order_relaxed)) &&
- _lexerActions.size() == other._lexerActions.size() &&
- std::equal(_lexerActions.begin(), _lexerActions.end(), other._lexerActions.begin(), lexerActionEqual);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.h
deleted file mode 100644
index 28bb1e28ec..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionExecutor.h
+++ /dev/null
@@ -1,128 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CharStream.h"
-#include "atn/LexerAction.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Represents an executor for a sequence of lexer actions which traversed during
- /// the matching operation of a lexer rule (token).
- ///
- /// <para>The executor tracks position information for position-dependent lexer actions
- /// efficiently, ensuring that actions appearing only at the end of the rule do
- /// not cause bloating of the <seealso cref="DFA"/> created for the lexer.</para>
- class ANTLR4CPP_PUBLIC LexerActionExecutor final : public std::enable_shared_from_this<LexerActionExecutor> {
- public:
- /// <summary>
- /// Constructs an executor for a sequence of <seealso cref="LexerAction"/> actions. </summary>
- /// <param name="lexerActions"> The lexer actions to execute. </param>
- explicit LexerActionExecutor(std::vector<Ref<const LexerAction>> lexerActions);
-
- /// <summary>
- /// Creates a <seealso cref="LexerActionExecutor"/> which executes the actions for
- /// the input {@code lexerActionExecutor} followed by a specified
- /// {@code lexerAction}.
- /// </summary>
- /// <param name="lexerActionExecutor"> The executor for actions already traversed by
- /// the lexer while matching a token within a particular
- /// <seealso cref="LexerATNConfig"/>. If this is {@code null}, the method behaves as
- /// though it were an empty executor. </param>
- /// <param name="lexerAction"> The lexer action to execute after the actions
- /// specified in {@code lexerActionExecutor}.
- /// </param>
- /// <returns> A <seealso cref="LexerActionExecutor"/> for executing the combine actions
- /// of {@code lexerActionExecutor} and {@code lexerAction}. </returns>
- static Ref<const LexerActionExecutor> append(const Ref<const LexerActionExecutor> &lexerActionExecutor,
- Ref<const LexerAction> lexerAction);
-
- /// <summary>
- /// Creates a <seealso cref="LexerActionExecutor"/> which encodes the current offset
- /// for position-dependent lexer actions.
- ///
- /// <para>Normally, when the executor encounters lexer actions where
- /// <seealso cref="LexerAction#isPositionDependent"/> returns {@code true}, it calls
- /// <seealso cref="IntStream#seek"/> on the input <seealso cref="CharStream"/> to set the input
- /// position to the <em>end</em> of the current token. This behavior provides
- /// for efficient DFA representation of lexer actions which appear at the end
- /// of a lexer rule, even when the lexer rule matches a variable number of
- /// characters.</para>
- ///
- /// <para>Prior to traversing a match transition in the ATN, the current offset
- /// from the token start index is assigned to all position-dependent lexer
- /// actions which have not already been assigned a fixed offset. By storing
- /// the offsets relative to the token start index, the DFA representation of
- /// lexer actions which appear in the middle of tokens remains efficient due
- /// to sharing among tokens of the same length, regardless of their absolute
- /// position in the input stream.</para>
- ///
- /// <para>If the current executor already has offsets assigned to all
- /// position-dependent lexer actions, the method returns {@code this}.</para>
- /// </summary>
- /// <param name="offset"> The current offset to assign to all position-dependent
- /// lexer actions which do not already have offsets assigned.
- /// </param>
- /// <returns> A <seealso cref="LexerActionExecutor"/> which stores input stream offsets
- /// for all position-dependent lexer actions. </returns>
- Ref<const LexerActionExecutor> fixOffsetBeforeMatch(int offset) const;
-
- /// <summary>
- /// Gets the lexer actions to be executed by this executor. </summary>
- /// <returns> The lexer actions to be executed by this executor. </returns>
- const std::vector<Ref<const LexerAction>>& getLexerActions() const;
-
- /// <summary>
- /// Execute the actions encapsulated by this executor within the context of a
- /// particular <seealso cref="Lexer"/>.
- ///
- /// <para>This method calls <seealso cref="IntStream#seek"/> to set the position of the
- /// {@code input} <seealso cref="CharStream"/> prior to calling
- /// <seealso cref="LexerAction#execute"/> on a position-dependent action. Before the
- /// method returns, the input position will be restored to the same position
- /// it was in when the method was invoked.</para>
- /// </summary>
- /// <param name="lexer"> The lexer instance. </param>
- /// <param name="input"> The input stream which is the source for the current token.
- /// When this method is called, the current <seealso cref="IntStream#index"/> for
- /// {@code input} should be the start of the following token, i.e. 1
- /// character past the end of the current token. </param>
- /// <param name="startIndex"> The token start index. This value may be passed to
- /// <seealso cref="IntStream#seek"/> to set the {@code input} position to the beginning
- /// of the token. </param>
- void execute(Lexer *lexer, CharStream *input, size_t startIndex) const;
-
- size_t hashCode() const;
-
- bool equals(const LexerActionExecutor &other) const;
-
- private:
- const std::vector<Ref<const LexerAction>> _lexerActions;
- mutable std::atomic<size_t> _hashCode;
- };
-
- inline bool operator==(const LexerActionExecutor &lhs, const LexerActionExecutor &rhs) {
- return lhs.equals(rhs);
- }
-
- inline bool operator!=(const LexerActionExecutor &lhs, const LexerActionExecutor &rhs) {
- return !operator==(lhs, rhs);
- }
-
-} // namespace atn
-} // namespace antlr4
-
-namespace std {
-
- template <>
- struct hash<::antlr4::atn::LexerActionExecutor> {
- size_t operator()(const ::antlr4::atn::LexerActionExecutor &lexerActionExecutor) const {
- return lexerActionExecutor.hashCode();
- }
- };
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionType.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionType.h
deleted file mode 100644
index aab4033415..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerActionType.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Represents the serialization type of a <seealso cref="LexerAction"/>.
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- enum class LexerActionType : size_t {
- /// <summary>
- /// The type of a <seealso cref="LexerChannelAction"/> action.
- /// </summary>
- CHANNEL = 0,
- /// <summary>
- /// The type of a <seealso cref="LexerCustomAction"/> action.
- /// </summary>
- CUSTOM,
- /// <summary>
- /// The type of a <seealso cref="LexerModeAction"/> action.
- /// </summary>
- MODE,
- /// <summary>
- /// The type of a <seealso cref="LexerMoreAction"/> action.
- /// </summary>
- MORE,
- /// <summary>
- /// The type of a <seealso cref="LexerPopModeAction"/> action.
- /// </summary>
- POP_MODE,
- /// <summary>
- /// The type of a <seealso cref="LexerPushModeAction"/> action.
- /// </summary>
- PUSH_MODE,
- /// <summary>
- /// The type of a <seealso cref="LexerSkipAction"/> action.
- /// </summary>
- SKIP,
- /// <summary>
- /// The type of a <seealso cref="LexerTypeAction"/> action.
- /// </summary>
- TYPE,
-
- INDEXED_CUSTOM,
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.cpp
deleted file mode 100644
index b6cda6cff0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "support/Casts.h"
-
-#include "atn/LexerChannelAction.h"
-
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-LexerChannelAction::LexerChannelAction(int channel)
- : LexerAction(LexerActionType::CHANNEL, false), _channel(channel) {}
-
-void LexerChannelAction::execute(Lexer *lexer) const {
- lexer->setChannel(getChannel());
-}
-
-size_t LexerChannelAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, getChannel());
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerChannelAction::equals(const LexerAction &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getActionType() != other.getActionType()) {
- return false;
- }
- const auto &lexerAction = downCast<const LexerChannelAction&>(other);
- return getChannel() == lexerAction.getChannel();
-}
-
-std::string LexerChannelAction::toString() const {
- return "channel(" + std::to_string(getChannel()) + ")";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.h
deleted file mode 100644
index 1a5c53efef..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerChannelAction.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- using antlr4::Lexer;
-
- /// <summary>
- /// Implements the {@code channel} lexer action by calling
- /// <seealso cref="Lexer#setChannel"/> with the assigned channel.
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerChannelAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::CHANNEL; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Constructs a new {@code channel} action with the specified channel value. </summary>
- /// <param name="channel"> The channel value to pass to <seealso cref="Lexer#setChannel"/>. </param>
- explicit LexerChannelAction(int channel);
-
- /// <summary>
- /// Gets the channel to use for the <seealso cref="Token"/> created by the lexer.
- /// </summary>
- /// <returns> The channel to use for the <seealso cref="Token"/> created by the lexer. </returns>
- int getChannel() const { return _channel; }
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#setChannel"/> with the
- /// value provided by <seealso cref="#getChannel"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &other) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- const int _channel;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.cpp
deleted file mode 100644
index b6edd89ea1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "support/Casts.h"
-
-#include "atn/LexerCustomAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-LexerCustomAction::LexerCustomAction(size_t ruleIndex, size_t actionIndex)
- : LexerAction(LexerActionType::CUSTOM, true), _ruleIndex(ruleIndex), _actionIndex(actionIndex) {}
-
-void LexerCustomAction::execute(Lexer *lexer) const {
- lexer->action(nullptr, getRuleIndex(), getActionIndex());
-}
-
-size_t LexerCustomAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, getRuleIndex());
- hash = MurmurHash::update(hash, getActionIndex());
- return MurmurHash::finish(hash, 3);
-}
-
-bool LexerCustomAction::equals(const LexerAction &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getActionType() != other.getActionType()) {
- return false;
- }
- const auto &lexerAction = downCast<const LexerCustomAction&>(other);
- return getRuleIndex() == lexerAction.getRuleIndex() && getActionIndex() == lexerAction.getActionIndex();
-}
-
-std::string LexerCustomAction::toString() const {
- return "custom(" + std::to_string(getRuleIndex()) + ", " + std::to_string(getActionIndex()) + ")";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.h
deleted file mode 100644
index 7973271c62..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerCustomAction.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Executes a custom lexer action by calling <seealso cref="Recognizer#action"/> with the
- /// rule and action indexes assigned to the custom action. The implementation of
- /// a custom action is added to the generated code for the lexer in an override
- /// of <seealso cref="Recognizer#action"/> when the grammar is compiled.
- ///
- /// <para>This class may represent embedded actions created with the <code>{...}</code>
- /// syntax in ANTLR 4, as well as actions created for lexer commands where the
- /// command argument could not be evaluated when the grammar was compiled.</para>
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerCustomAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::CUSTOM; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Constructs a custom lexer action with the specified rule and action
- /// indexes.
- /// </summary>
- /// <param name="ruleIndex"> The rule index to use for calls to
- /// <seealso cref="Recognizer#action"/>. </param>
- /// <param name="actionIndex"> The action index to use for calls to
- /// <seealso cref="Recognizer#action"/>. </param>
- LexerCustomAction(size_t ruleIndex, size_t actionIndex);
-
- /// <summary>
- /// Gets the rule index to use for calls to <seealso cref="Recognizer#action"/>.
- /// </summary>
- /// <returns> The rule index for the custom action. </returns>
- size_t getRuleIndex() const { return _ruleIndex; }
-
- /// <summary>
- /// Gets the action index to use for calls to <seealso cref="Recognizer#action"/>.
- /// </summary>
- /// <returns> The action index for the custom action. </returns>
- size_t getActionIndex() const { return _actionIndex; }
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>Custom actions are implemented by calling <seealso cref="Lexer#action"/> with the
- /// appropriate rule and action indexes.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &other) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- const size_t _ruleIndex;
- const size_t _actionIndex;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.cpp
deleted file mode 100644
index 114863702c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.cpp
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "atn/LexerIndexedCustomAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-namespace {
-
- bool cachedHashCodeEqual(size_t lhs, size_t rhs) {
- return lhs == rhs || lhs == 0 || rhs == 0;
- }
-
-}
-
-LexerIndexedCustomAction::LexerIndexedCustomAction(int offset, Ref<const LexerAction> action)
- : LexerAction(LexerActionType::INDEXED_CUSTOM, true), _action(std::move(action)), _offset(offset) {}
-
-void LexerIndexedCustomAction::execute(Lexer *lexer) const {
- // assume the input stream position was properly set by the calling code
- getAction()->execute(lexer);
-}
-
-size_t LexerIndexedCustomAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, getOffset());
- hash = MurmurHash::update(hash, getAction());
- return MurmurHash::finish(hash, 3);
-}
-
-bool LexerIndexedCustomAction::equals(const LexerAction &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getActionType() != other.getActionType()) {
- return false;
- }
- const auto &lexerAction = downCast<const LexerIndexedCustomAction&>(other);
- return getOffset() == lexerAction.getOffset() &&
- cachedHashCodeEqual(cachedHashCode(), lexerAction.cachedHashCode()) &&
- *getAction() == *lexerAction.getAction();
-}
-
-std::string LexerIndexedCustomAction::toString() const {
- return "indexedCustom(" + std::to_string(getOffset()) + ", " + getAction()->toString() + ")";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.h
deleted file mode 100644
index 5693bac62b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerIndexedCustomAction.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "RuleContext.h"
-#include "atn/LexerAction.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// This implementation of <seealso cref="LexerAction"/> is used for tracking input offsets
- /// for position-dependent actions within a <seealso cref="LexerActionExecutor"/>.
- ///
- /// <para>This action is not serialized as part of the ATN, and is only required for
- /// position-dependent lexer actions which appear at a location other than the
- /// end of a rule. For more information about DFA optimizations employed for
- /// lexer actions, see <seealso cref="LexerActionExecutor#append"/> and
- /// <seealso cref="LexerActionExecutor#fixOffsetBeforeMatch"/>.</para>
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerIndexedCustomAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::INDEXED_CUSTOM; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Constructs a new indexed custom action by associating a character offset
- /// with a <seealso cref="LexerAction"/>.
- ///
- /// <para>Note: This class is only required for lexer actions for which
- /// <seealso cref="LexerAction#isPositionDependent"/> returns {@code true}.</para>
- /// </summary>
- /// <param name="offset"> The offset into the input <seealso cref="CharStream"/>, relative to
- /// the token start index, at which the specified lexer action should be
- /// executed. </param>
- /// <param name="action"> The lexer action to execute at a particular offset in the
- /// input <seealso cref="CharStream"/>. </param>
- LexerIndexedCustomAction(int offset, Ref<const LexerAction> action);
-
- /// <summary>
- /// Gets the location in the input <seealso cref="CharStream"/> at which the lexer
- /// action should be executed. The value is interpreted as an offset relative
- /// to the token start index.
- /// </summary>
- /// <returns> The location in the input <seealso cref="CharStream"/> at which the lexer
- /// action should be executed. </returns>
- int getOffset() const { return _offset; }
-
- /// <summary>
- /// Gets the lexer action to execute.
- /// </summary>
- /// <returns> A <seealso cref="LexerAction"/> object which executes the lexer action. </returns>
- const Ref<const LexerAction>& getAction() const { return _action; }
-
- void execute(Lexer *lexer) const override;
- bool equals(const LexerAction &other) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- const Ref<const LexerAction> _action;
- const int _offset;
- };
-
-} // namespace atn
-} // namespace antlr4
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.cpp
deleted file mode 100644
index a4ca3b3d79..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "support/Casts.h"
-
-#include "atn/LexerModeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-LexerModeAction::LexerModeAction(int mode) : LexerAction(LexerActionType::MODE, false), _mode(mode) {}
-
-void LexerModeAction::execute(Lexer *lexer) const {
- lexer->setMode(getMode());
-}
-
-size_t LexerModeAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, getMode());
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerModeAction::equals(const LexerAction &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getActionType() != other.getActionType()) {
- return false;
- }
- const auto &lexerAction = downCast<const LexerModeAction&>(other);
- return getMode() == lexerAction.getMode();
-}
-
-std::string LexerModeAction::toString() const {
- return "mode(" + std::to_string(getMode()) + ")";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.h
deleted file mode 100644
index 6fa61a2e67..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerModeAction.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Implements the {@code mode} lexer action by calling <seealso cref="Lexer#mode"/> with
- /// the assigned mode.
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerModeAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::MODE; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Constructs a new {@code mode} action with the specified mode value. </summary>
- /// <param name="mode"> The mode value to pass to <seealso cref="Lexer#mode"/>. </param>
- explicit LexerModeAction(int mode);
-
- /// <summary>
- /// Get the lexer mode this action should transition the lexer to.
- /// </summary>
- /// <returns> The lexer mode for this {@code mode} command. </returns>
- int getMode() const { return _mode; }
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#mode"/> with the
- /// value provided by <seealso cref="#getMode"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &obj) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- const int _mode;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.cpp
deleted file mode 100644
index 30df87b7b6..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-
-#include "atn/LexerMoreAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-const Ref<const LexerMoreAction>& LexerMoreAction::getInstance() {
- static const Ref<const LexerMoreAction> instance(new LexerMoreAction());
- return instance;
-}
-
-void LexerMoreAction::execute(Lexer *lexer) const {
- lexer->more();
-}
-
-size_t LexerMoreAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- return MurmurHash::finish(hash, 1);
-}
-
-bool LexerMoreAction::equals(const LexerAction &other) const {
- return this == std::addressof(other);
-}
-
-std::string LexerMoreAction::toString() const {
- return "more";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.h
deleted file mode 100644
index fc4b8fcbfc..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerMoreAction.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Implements the {@code more} lexer action by calling <seealso cref="Lexer#more"/>.
- ///
- /// <para>The {@code more} command does not have any parameters, so this action is
- /// implemented as a singleton instance exposed by <seealso cref="#INSTANCE"/>.</para>
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerMoreAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::MORE; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Provides a singleton instance of this parameterless lexer action.
- /// </summary>
- static const Ref<const LexerMoreAction>& getInstance();
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#more"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &obj) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- /// Constructs the singleton instance of the lexer {@code more} command.
- LexerMoreAction() : LexerAction(LexerActionType::MORE, false) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.cpp
deleted file mode 100644
index 5192049348..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-
-#include "atn/LexerPopModeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-const Ref<const LexerPopModeAction>& LexerPopModeAction::getInstance() {
- static const Ref<const LexerPopModeAction> instance(new LexerPopModeAction());
- return instance;
-}
-
-void LexerPopModeAction::execute(Lexer *lexer) const {
- lexer->popMode();
-}
-
-size_t LexerPopModeAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- return MurmurHash::finish(hash, 1);
-}
-
-bool LexerPopModeAction::equals(const LexerAction &other) const {
- return this == std::addressof(other);
-}
-
-std::string LexerPopModeAction::toString() const {
- return "popMode";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.h
deleted file mode 100644
index 8d712cad8c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPopModeAction.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Implements the {@code popMode} lexer action by calling <seealso cref="Lexer#popMode"/>.
- ///
- /// <para>The {@code popMode} command does not have any parameters, so this action is
- /// implemented as a singleton instance exposed by <seealso cref="#INSTANCE"/>.</para>
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerPopModeAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::POP_MODE; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Provides a singleton instance of this parameterless lexer action.
- /// </summary>
- static const Ref<const LexerPopModeAction>& getInstance();
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#popMode"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &other) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- /// Constructs the singleton instance of the lexer {@code popMode} command.
- LexerPopModeAction() : LexerAction(LexerActionType::POP_MODE, false) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.cpp
deleted file mode 100644
index 3ebd21fab2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "support/Casts.h"
-
-#include "atn/LexerPushModeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-LexerPushModeAction::LexerPushModeAction(int mode) : LexerAction(LexerActionType::PUSH_MODE, false), _mode(mode) {}
-
-void LexerPushModeAction::execute(Lexer *lexer) const {
- lexer->pushMode(getMode());
-}
-
-size_t LexerPushModeAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, getMode());
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerPushModeAction::equals(const LexerAction &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getActionType() != other.getActionType()) {
- return false;
- }
- const auto &lexerAction = downCast<const LexerPushModeAction&>(other);
- return getMode() == lexerAction.getMode();
-}
-
-std::string LexerPushModeAction::toString() const {
- return "pushMode(" + std::to_string(getMode()) + ")";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.h
deleted file mode 100644
index 32b706b583..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerPushModeAction.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Implements the {@code pushMode} lexer action by calling
- /// <seealso cref="Lexer#pushMode"/> with the assigned mode.
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerPushModeAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::PUSH_MODE; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Constructs a new {@code pushMode} action with the specified mode value. </summary>
- /// <param name="mode"> The mode value to pass to <seealso cref="Lexer#pushMode"/>. </param>
- explicit LexerPushModeAction(int mode);
-
- /// <summary>
- /// Get the lexer mode this action should transition the lexer to.
- /// </summary>
- /// <returns> The lexer mode for this {@code pushMode} command. </returns>
- int getMode() const { return _mode; }
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#pushMode"/> with the
- /// value provided by <seealso cref="#getMode"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &obj) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- const int _mode;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.cpp
deleted file mode 100644
index 72f9de3e1f..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-
-#include "atn/LexerSkipAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-const Ref<const LexerSkipAction>& LexerSkipAction::getInstance() {
- static const Ref<const LexerSkipAction> instance(new LexerSkipAction());
- return instance;
-}
-
-void LexerSkipAction::execute(Lexer *lexer) const {
- lexer->skip();
-}
-
-size_t LexerSkipAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- return MurmurHash::finish(hash, 1);
-}
-
-bool LexerSkipAction::equals(const LexerAction &other) const {
- return this == std::addressof(other);
-}
-
-std::string LexerSkipAction::toString() const {
- return "skip";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.h
deleted file mode 100644
index afdf4702f2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerSkipAction.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerAction.h"
-#include "atn/LexerActionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// Implements the {@code skip} lexer action by calling <seealso cref="Lexer#skip"/>.
- ///
- /// <para>The {@code skip} command does not have any parameters, so this action is
- /// implemented as a singleton instance exposed by <seealso cref="#INSTANCE"/>.</para>
- ///
- /// @author Sam Harwell
- /// @since 4.2
- /// </summary>
- class ANTLR4CPP_PUBLIC LexerSkipAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::SKIP; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// Provides a singleton instance of this parameterless lexer action.
- static const Ref<const LexerSkipAction>& getInstance();
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#skip"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &obj) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- /// Constructs the singleton instance of the lexer {@code skip} command.
- LexerSkipAction() : LexerAction(LexerActionType::SKIP, false) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.cpp
deleted file mode 100644
index 55ccf358ba..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "support/Casts.h"
-
-#include "atn/LexerTypeAction.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-using namespace antlrcpp;
-
-LexerTypeAction::LexerTypeAction(int type) : LexerAction(LexerActionType::TYPE, false), _type(type) {}
-
-void LexerTypeAction::execute(Lexer *lexer) const {
- lexer->setType(getType());
-}
-
-size_t LexerTypeAction::hashCodeImpl() const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, static_cast<size_t>(getActionType()));
- hash = MurmurHash::update(hash, getType());
- return MurmurHash::finish(hash, 2);
-}
-
-bool LexerTypeAction::equals(const LexerAction &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getActionType() != other.getActionType()) {
- return false;
- }
- const auto &lexerAction = downCast<const LexerTypeAction&>(other);
- return getType() == lexerAction.getType();
-}
-
-std::string LexerTypeAction::toString() const {
- return "type(" + std::to_string(getType()) + ")";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.h
deleted file mode 100644
index 1cd7d71fd3..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LexerTypeAction.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/LexerActionType.h"
-#include "atn/LexerAction.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Implements the {@code type} lexer action by calling <seealso cref="Lexer#setType"/>
- /// with the assigned type.
- class ANTLR4CPP_PUBLIC LexerTypeAction final : public LexerAction {
- public:
- static bool is(const LexerAction &lexerAction) { return lexerAction.getActionType() == LexerActionType::TYPE; }
-
- static bool is(const LexerAction *lexerAction) { return lexerAction != nullptr && is(*lexerAction); }
-
- /// <summary>
- /// Constructs a new {@code type} action with the specified token type value. </summary>
- /// <param name="type"> The type to assign to the token using <seealso cref="Lexer#setType"/>. </param>
- explicit LexerTypeAction(int type);
-
- /// <summary>
- /// Gets the type to assign to a token created by the lexer. </summary>
- /// <returns> The type to assign to a token created by the lexer. </returns>
- int getType() const { return _type; }
-
- /// <summary>
- /// {@inheritDoc}
- ///
- /// <para>This action is implemented by calling <seealso cref="Lexer#setType"/> with the
- /// value provided by <seealso cref="#getType"/>.</para>
- /// </summary>
- void execute(Lexer *lexer) const override;
-
- bool equals(const LexerAction &obj) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
-
- private:
- const int _type;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.cpp
deleted file mode 100644
index aa3f9124c7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/LookaheadEventInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-LookaheadEventInfo::LookaheadEventInfo(size_t decision, ATNConfigSet *configs, size_t predictedAlt,
- TokenStream *input, size_t startIndex, size_t stopIndex, bool fullCtx)
- : DecisionEventInfo(decision, configs, input, startIndex, stopIndex, fullCtx) {
-
- this->predictedAlt = predictedAlt;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.h
deleted file mode 100644
index f5fc24fde2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LookaheadEventInfo.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// This class represents profiling event information for tracking the lookahead
- /// depth required in order to make a prediction.
- class ANTLR4CPP_PUBLIC LookaheadEventInfo : public DecisionEventInfo {
- public:
- /// The alternative chosen by adaptivePredict(), not necessarily
- /// the outermost alt shown for a rule; left-recursive rules have
- /// user-level alts that differ from the rewritten rule with a (...) block
- /// and a (..)* loop.
- size_t predictedAlt = 0;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="LookaheadEventInfo"/> class with
- /// the specified detailed lookahead information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="configs"> The final configuration set containing the necessary
- /// information to determine the result of a prediction, or {@code null} if
- /// the final configuration set is not available </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction </param>
- /// <param name="stopIndex"> The index at which the prediction was finally made </param>
- /// <param name="fullCtx"> {@code true} if the current lookahead is part of an LL
- /// prediction; otherwise, {@code false} if the current lookahead is part of
- /// an SLL prediction </param>
- LookaheadEventInfo(size_t decision, ATNConfigSet *configs, size_t predictedAlt, TokenStream *input, size_t startIndex,
- size_t stopIndex, bool fullCtx);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/LoopEndState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/LoopEndState.h
deleted file mode 100644
index 2616b1c4b8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/LoopEndState.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Mark the end of a * or + loop.
- class ANTLR4CPP_PUBLIC LoopEndState final : public ATNState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::LOOP_END; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- ATNState *loopBackState = nullptr;
-
- LoopEndState() : ATNState(ATNStateType::LOOP_END) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.cpp
deleted file mode 100644
index ba796d7188..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.cpp
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/NotSetTransition.h"
-#include "atn/ATNState.h"
-#include "misc/IntervalSet.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-NotSetTransition::NotSetTransition(ATNState *target, misc::IntervalSet set) : SetTransition(TransitionType::NOT_SET, target, std::move(set)) {}
-
-bool NotSetTransition::matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol
- && !SetTransition::matches(symbol, minVocabSymbol, maxVocabSymbol);
-}
-
-std::string NotSetTransition::toString() const {
- return "NOT_SET " + Transition::toString() + " { " + SetTransition::toString() + " }";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.h
deleted file mode 100644
index ef937a60fe..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/NotSetTransition.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/SetTransition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC NotSetTransition final : public SetTransition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::NOT_SET; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- NotSetTransition(ATNState *target, misc::IntervalSet set);
-
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.cpp
deleted file mode 100644
index 48655424d8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.cpp
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/OrderedATNConfigSet.h"
-
-using namespace antlr4::atn;
-
-size_t OrderedATNConfigSet::hashCode(const ATNConfig &atnConfig) const {
- return atnConfig.hashCode();
-}
-
-bool OrderedATNConfigSet::equals(const ATNConfig &lhs, const ATNConfig &rhs) const {
- return lhs == rhs;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.h b/contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.h
deleted file mode 100644
index 18bf6bcb21..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/OrderedATNConfigSet.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNConfig.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC OrderedATNConfigSet final : public ATNConfigSet {
- public:
- OrderedATNConfigSet() = default;
-
- private:
- size_t hashCode(const ATNConfig &atnConfig) const override;
-
- bool equals(const ATNConfig &lhs, const ATNConfig &rhs) const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.cpp
deleted file mode 100644
index 95a89ac855..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.cpp
+++ /dev/null
@@ -1,102 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ProfilingATNSimulator.h"
-#include "dfa/DFA.h"
-
-#include "atn/ParseInfo.h"
-
-using namespace antlr4::atn;
-
-ParseInfo::ParseInfo(ProfilingATNSimulator *atnSimulator) : _atnSimulator(atnSimulator) {
-}
-
-ParseInfo::~ParseInfo() {
-}
-
-std::vector<DecisionInfo> ParseInfo::getDecisionInfo() {
- return _atnSimulator->getDecisionInfo();
-}
-
-std::vector<size_t> ParseInfo::getLLDecisions() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- std::vector<size_t> LL;
- for (size_t i = 0; i < decisions.size(); ++i) {
- long long fallBack = decisions[i].LL_Fallback;
- if (fallBack > 0) {
- LL.push_back(i);
- }
- }
- return LL;
-}
-
-long long ParseInfo::getTotalTimeInPrediction() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long t = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- t += decisions[i].timeInPrediction;
- }
- return t;
-}
-
-long long ParseInfo::getTotalSLLLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].SLL_TotalLook;
- }
- return k;
-}
-
-long long ParseInfo::getTotalLLLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); i++) {
- k += decisions[i].LL_TotalLook;
- }
- return k;
-}
-
-long long ParseInfo::getTotalSLLATNLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].SLL_ATNTransitions;
- }
- return k;
-}
-
-long long ParseInfo::getTotalLLATNLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].LL_ATNTransitions;
- }
- return k;
-}
-
-long long ParseInfo::getTotalATNLookaheadOps() {
- std::vector<DecisionInfo> decisions = _atnSimulator->getDecisionInfo();
- long long k = 0;
- for (size_t i = 0; i < decisions.size(); ++i) {
- k += decisions[i].SLL_ATNTransitions;
- k += decisions[i].LL_ATNTransitions;
- }
- return k;
-}
-
-size_t ParseInfo::getDFASize() {
- size_t n = 0;
- std::vector<dfa::DFA> &decisionToDFA = _atnSimulator->decisionToDFA;
- for (size_t i = 0; i < decisionToDFA.size(); ++i) {
- n += getDFASize(i);
- }
- return n;
-}
-
-size_t ParseInfo::getDFASize(size_t decision) {
- dfa::DFA &decisionToDFA = _atnSimulator->decisionToDFA[decision];
- return decisionToDFA.states.size();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.h
deleted file mode 100644
index 7ced7de433..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ParseInfo.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ProfilingATNSimulator;
-
- /// This class provides access to specific and aggregate statistics gathered
- /// during profiling of a parser.
- class ANTLR4CPP_PUBLIC ParseInfo {
- public:
- ParseInfo(ProfilingATNSimulator *atnSimulator);
- ParseInfo(ParseInfo const&) = default;
- virtual ~ParseInfo();
-
- ParseInfo& operator=(ParseInfo const&) = default;
-
- /// <summary>
- /// Gets an array of <seealso cref="DecisionInfo"/> instances containing the profiling
- /// information gathered for each decision in the ATN.
- /// </summary>
- /// <returns> An array of <seealso cref="DecisionInfo"/> instances, indexed by decision
- /// number. </returns>
- virtual std::vector<DecisionInfo> getDecisionInfo();
-
- /// <summary>
- /// Gets the decision numbers for decisions that required one or more
- /// full-context predictions during parsing. These are decisions for which
- /// <seealso cref="DecisionInfo#LL_Fallback"/> is non-zero.
- /// </summary>
- /// <returns> A list of decision numbers which required one or more
- /// full-context predictions during parsing. </returns>
- virtual std::vector<size_t> getLLDecisions();
-
- /// <summary>
- /// Gets the total time spent during prediction across all decisions made
- /// during parsing. This value is the sum of
- /// <seealso cref="DecisionInfo#timeInPrediction"/> for all decisions.
- /// </summary>
- virtual long long getTotalTimeInPrediction();
-
- /// <summary>
- /// Gets the total number of SLL lookahead operations across all decisions
- /// made during parsing. This value is the sum of
- /// <seealso cref="DecisionInfo#SLL_TotalLook"/> for all decisions.
- /// </summary>
- virtual long long getTotalSLLLookaheadOps();
-
- /// <summary>
- /// Gets the total number of LL lookahead operations across all decisions
- /// made during parsing. This value is the sum of
- /// <seealso cref="DecisionInfo#LL_TotalLook"/> for all decisions.
- /// </summary>
- virtual long long getTotalLLLookaheadOps();
-
- /// <summary>
- /// Gets the total number of ATN lookahead operations for SLL prediction
- /// across all decisions made during parsing.
- /// </summary>
- virtual long long getTotalSLLATNLookaheadOps();
-
- /// <summary>
- /// Gets the total number of ATN lookahead operations for LL prediction
- /// across all decisions made during parsing.
- /// </summary>
- virtual long long getTotalLLATNLookaheadOps();
-
- /// <summary>
- /// Gets the total number of ATN lookahead operations for SLL and LL
- /// prediction across all decisions made during parsing.
- ///
- /// <para>
- /// This value is the sum of <seealso cref="#getTotalSLLATNLookaheadOps"/> and
- /// <seealso cref="#getTotalLLATNLookaheadOps"/>.</para>
- /// </summary>
- virtual long long getTotalATNLookaheadOps();
-
- /// <summary>
- /// Gets the total number of DFA states stored in the DFA cache for all
- /// decisions in the ATN.
- /// </summary>
- virtual size_t getDFASize();
-
- /// <summary>
- /// Gets the total number of DFA states stored in the DFA cache for a
- /// particular decision.
- /// </summary>
- virtual size_t getDFASize(size_t decision);
-
- protected:
- const ProfilingATNSimulator *_atnSimulator; // non-owning, we are created by this simulator.
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.cpp
deleted file mode 100644
index ad1da03570..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.cpp
+++ /dev/null
@@ -1,1387 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "dfa/DFA.h"
-#include "NoViableAltException.h"
-#include "atn/DecisionState.h"
-#include "ParserRuleContext.h"
-#include "misc/IntervalSet.h"
-#include "Parser.h"
-#include "CommonTokenStream.h"
-#include "atn/NotSetTransition.h"
-#include "atn/AtomTransition.h"
-#include "atn/RuleTransition.h"
-#include "atn/PredicateTransition.h"
-#include "atn/PrecedencePredicateTransition.h"
-#include "atn/SingletonPredictionContext.h"
-#include "atn/ActionTransition.h"
-#include "atn/EpsilonTransition.h"
-#include "atn/RuleStopState.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNConfig.h"
-#include "internal/Synchronization.h"
-
-#include "atn/StarLoopEntryState.h"
-#include "atn/BlockStartState.h"
-#include "atn/BlockEndState.h"
-
-#include "misc/Interval.h"
-#include "ANTLRErrorListener.h"
-
-#include "Vocabulary.h"
-#include "support/Arrays.h"
-#include "support/Casts.h"
-
-#include "atn/ParserATNSimulator.h"
-
-#define DEBUG_ATN 0
-#define DEBUG_LIST_ATN_DECISIONS 0
-#define DEBUG_DFA 0
-#define RETRY_DEBUG 0
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::internal;
-using namespace antlrcpp;
-
-const bool ParserATNSimulator::TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT = ParserATNSimulator::getLrLoopSetting();
-
-ParserATNSimulator::ParserATNSimulator(const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache)
-: ParserATNSimulator(nullptr, atn, decisionToDFA, sharedContextCache) {
-}
-
-ParserATNSimulator::ParserATNSimulator(Parser *parser, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache)
-: ParserATNSimulator(parser, atn, decisionToDFA, sharedContextCache, ParserATNSimulatorOptions()) {}
-
-ParserATNSimulator::ParserATNSimulator(Parser *parser, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache,
- const ParserATNSimulatorOptions &options)
-: ATNSimulator(atn, sharedContextCache), decisionToDFA(decisionToDFA), parser(parser),
- mergeCache(options.getPredictionContextMergeCacheOptions()) {
- InitializeInstanceFields();
-}
-
-void ParserATNSimulator::reset() {
-}
-
-void ParserATNSimulator::clearDFA() {
- int size = (int)decisionToDFA.size();
- decisionToDFA.clear();
- for (int d = 0; d < size; ++d) {
- decisionToDFA.push_back(dfa::DFA(atn.getDecisionState(d), d));
- }
-}
-
-size_t ParserATNSimulator::adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext) {
-
-#if DEBUG_ATN == 1 || DEBUG_LIST_ATN_DECISIONS == 1
- std::cout << "adaptivePredict decision " << decision << " exec LA(1)==" << getLookaheadName(input) << " line "
- << input->LT(1)->getLine() << ":" << input->LT(1)->getCharPositionInLine() << std::endl;
-#endif
-
- _input = input;
- _startIndex = input->index();
- _outerContext = outerContext;
- dfa::DFA &dfa = decisionToDFA[decision];
- _dfa = &dfa;
-
- ssize_t m = input->mark();
- size_t index = _startIndex;
-
- // Now we are certain to have a specific decision's DFA
- // But, do we still need an initial state?
- auto onExit = finally([this, input, index, m] {
- if (mergeCache.getOptions().getClearEveryN() != 0) {
- if (++_mergeCacheCounter == mergeCache.getOptions().getClearEveryN()) {
- mergeCache.clear();
- _mergeCacheCounter = 0;
- }
- }
- _dfa = nullptr;
- input->seek(index);
- input->release(m);
- });
-
- dfa::DFAState *s0;
- {
- SharedLock<SharedMutex> stateLock(atn._stateMutex);
- if (dfa.isPrecedenceDfa()) {
- // the start state for a precedence DFA depends on the current
- // parser precedence, and is provided by a DFA method.
- SharedLock<SharedMutex> edgeLock(atn._edgeMutex);
- s0 = dfa.getPrecedenceStartState(parser->getPrecedence());
- } else {
- // the start state for a "regular" DFA is just s0
- s0 = dfa.s0;
- }
- }
-
- if (s0 == nullptr) {
- auto s0_closure = computeStartState(dfa.atnStartState, &ParserRuleContext::EMPTY, false);
- std::unique_ptr<dfa::DFAState> newState;
- std::unique_ptr<dfa::DFAState> oldState;
- UniqueLock<SharedMutex> stateLock(atn._stateMutex);
- dfa::DFAState* ds0 = dfa.s0;
- if (dfa.isPrecedenceDfa()) {
- /* If this is a precedence DFA, we use applyPrecedenceFilter
- * to convert the computed start state to a precedence start
- * state. We then use DFA.setPrecedenceStartState to set the
- * appropriate start state for the precedence level rather
- * than simply setting DFA.s0.
- */
- ds0->configs = std::move(s0_closure); // not used for prediction but useful to know start configs anyway
- newState = std::make_unique<dfa::DFAState>(applyPrecedenceFilter(ds0->configs.get()));
- s0 = addDFAState(dfa, newState.get());
- UniqueLock<SharedMutex> edgeLock(atn._edgeMutex);
- dfa.setPrecedenceStartState(parser->getPrecedence(), s0);
- } else {
- newState = std::make_unique<dfa::DFAState>(std::move(s0_closure));
- s0 = addDFAState(dfa, newState.get());
- if (ds0 != s0) {
- oldState.reset(ds0);
- dfa.s0 = s0;
- }
- }
- if (s0 == newState.get()) {
- newState.release();
- }
- }
-
- // We can start with an existing DFA.
- size_t alt = execATN(dfa, s0, input, index, outerContext != nullptr ? outerContext : &ParserRuleContext::EMPTY);
-
- return alt;
-}
-
-size_t ParserATNSimulator::execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream *input, size_t startIndex,
- ParserRuleContext *outerContext) {
-
-#if DEBUG_ATN == 1 || DEBUG_LIST_ATN_DECISIONS == 1
- std::cout << "execATN decision " << dfa.decision << " exec LA(1)==" << getLookaheadName(input) <<
- " line " << input->LT(1)->getLine() << ":" << input->LT(1)->getCharPositionInLine() << std::endl;
-#endif
-
- dfa::DFAState *previousD = s0;
-
-#if DEBUG_ATN == 1
- std::cout << "s0 = " << s0 << std::endl;
-#endif
-
- size_t t = input->LA(1);
-
- while (true) { // while more work
- dfa::DFAState *D = getExistingTargetState(previousD, t);
- if (D == nullptr) {
- D = computeTargetState(dfa, previousD, t);
- }
-
- if (D == ERROR.get()) {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for SLL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision; better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- NoViableAltException e = noViableAlt(input, outerContext, previousD->configs.get(), startIndex, false);
- input->seek(startIndex);
- size_t alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previousD->configs.get(), outerContext);
- if (alt != ATN::INVALID_ALT_NUMBER) {
- return alt;
- }
-
- throw e;
- }
-
- if (D->requiresFullContext && _mode != PredictionMode::SLL) {
- // IF PREDS, MIGHT RESOLVE TO SINGLE ALT => SLL (or syntax error)
- BitSet conflictingAlts;
- if (D->predicates.size() != 0) {
-#if DEBUG_ATN == 1
- std::cout << "DFA state has preds in DFA sim LL failover" << std::endl;
-#endif
-
- size_t conflictIndex = input->index();
- if (conflictIndex != startIndex) {
- input->seek(startIndex);
- }
-
- conflictingAlts = evalSemanticContext(D->predicates, outerContext, true);
- if (conflictingAlts.count() == 1) {
-#if DEBUG_ATN == 1
- std::cout << "Full LL avoided" << std::endl;
-#endif
-
- return conflictingAlts.nextSetBit(0);
- }
-
- if (conflictIndex != startIndex) {
- // restore the index so reporting the fallback to full
- // context occurs with the index at the correct spot
- input->seek(conflictIndex);
- }
- }
-
-#if DEBUG_DFA == 1
- std::cout << "ctx sensitive state " << outerContext << " in " << D << std::endl;
-#endif
-
- bool fullCtx = true;
- std::unique_ptr<ATNConfigSet> s0_closure = computeStartState(dfa.atnStartState, outerContext, fullCtx);
- reportAttemptingFullContext(dfa, conflictingAlts, D->configs.get(), startIndex, input->index());
- size_t alt = execATNWithFullContext(dfa, D, s0_closure.get(), input, startIndex, outerContext);
- return alt;
- }
-
- if (D->isAcceptState) {
- if (D->predicates.empty()) {
- return D->prediction;
- }
-
- size_t stopIndex = input->index();
- input->seek(startIndex);
- BitSet alts = evalSemanticContext(D->predicates, outerContext, true);
- switch (alts.count()) {
- case 0:
- throw noViableAlt(input, outerContext, D->configs.get(), startIndex, false);
-
- case 1:
- return alts.nextSetBit(0);
-
- default:
- // report ambiguity after predicate evaluation to make sure the correct
- // set of ambig alts is reported.
- reportAmbiguity(dfa, D, startIndex, stopIndex, false, alts, D->configs.get());
- return alts.nextSetBit(0);
- }
- }
-
- previousD = D;
-
- if (t != Token::EOF) {
- input->consume();
- t = input->LA(1);
- }
- }
-}
-
-dfa::DFAState *ParserATNSimulator::getExistingTargetState(dfa::DFAState *previousD, size_t t) {
- dfa::DFAState* retval;
- SharedLock<SharedMutex> edgeLock(atn._edgeMutex);
- auto iterator = previousD->edges.find(t);
- retval = (iterator == previousD->edges.end()) ? nullptr : iterator->second;
- return retval;
-}
-
-dfa::DFAState *ParserATNSimulator::computeTargetState(dfa::DFA &dfa, dfa::DFAState *previousD, size_t t) {
- std::unique_ptr<ATNConfigSet> reach = computeReachSet(previousD->configs.get(), t, false);
- if (reach == nullptr) {
- addDFAEdge(dfa, previousD, t, ERROR.get());
- return ERROR.get();
- }
-
- // create new target state; we'll add to DFA after it's complete
- dfa::DFAState *D = new dfa::DFAState(std::move(reach)); /* mem-check: managed by the DFA or deleted below, "reach" is no longer valid now. */
- size_t predictedAlt = getUniqueAlt(D->configs.get());
-
- if (predictedAlt != ATN::INVALID_ALT_NUMBER) {
- // NO CONFLICT, UNIQUELY PREDICTED ALT
- D->isAcceptState = true;
- D->configs->uniqueAlt = predictedAlt;
- D->prediction = predictedAlt;
- } else if (PredictionModeClass::hasSLLConflictTerminatingPrediction(_mode, D->configs.get())) {
- // MORE THAN ONE VIABLE ALTERNATIVE
- D->configs->conflictingAlts = getConflictingAlts(D->configs.get());
- D->requiresFullContext = true;
- // in SLL-only mode, we will stop at this state and return the minimum alt
- D->isAcceptState = true;
- D->prediction = D->configs->conflictingAlts.nextSetBit(0);
- }
-
- if (D->isAcceptState && D->configs->hasSemanticContext) {
- predicateDFAState(D, atn.getDecisionState(dfa.decision));
- if (D->predicates.size() != 0) {
- D->prediction = ATN::INVALID_ALT_NUMBER;
- }
- }
-
- // all adds to dfa are done after we've created full D state
- dfa::DFAState *state = addDFAEdge(dfa, previousD, t, D);
- if (state != D) {
- delete D; // If the new state exists already we don't need it and use the existing one instead.
- }
- return state;
-}
-
-void ParserATNSimulator::predicateDFAState(dfa::DFAState *dfaState, DecisionState *decisionState) {
- // We need to test all predicates, even in DFA states that
- // uniquely predict alternative.
- size_t nalts = decisionState->transitions.size();
-
- // Update DFA so reach becomes accept state with (predicate,alt)
- // pairs if preds found for conflicting alts
- BitSet altsToCollectPredsFrom = getConflictingAltsOrUniqueAlt(dfaState->configs.get());
- std::vector<Ref<const SemanticContext>> altToPred = getPredsForAmbigAlts(altsToCollectPredsFrom, dfaState->configs.get(), nalts);
- if (!altToPred.empty()) {
- dfaState->predicates = getPredicatePredictions(altsToCollectPredsFrom, altToPred);
- dfaState->prediction = ATN::INVALID_ALT_NUMBER; // make sure we use preds
- } else {
- // There are preds in configs but they might go away
- // when OR'd together like {p}? || NONE == NONE. If neither
- // alt has preds, resolve to min alt
- dfaState->prediction = altsToCollectPredsFrom.nextSetBit(0);
- }
-}
-
-size_t ParserATNSimulator::execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0,
- TokenStream *input, size_t startIndex, ParserRuleContext *outerContext) {
-
- bool fullCtx = true;
- bool foundExactAmbig = false;
-
- std::unique_ptr<ATNConfigSet> reach;
- ATNConfigSet *previous = s0;
- input->seek(startIndex);
- size_t t = input->LA(1);
- size_t predictedAlt;
-
- while (true) {
- reach = computeReachSet(previous, t, fullCtx);
- if (reach == nullptr) {
- // if any configs in previous dipped into outer context, that
- // means that input up to t actually finished entry rule
- // at least for LL decision. Full LL doesn't dip into outer
- // so don't need special case.
- // We will get an error no matter what so delay until after
- // decision; better error message. Also, no reachable target
- // ATN states in SLL implies LL will also get nowhere.
- // If conflict in states that dip out, choose min since we
- // will get error no matter what.
- NoViableAltException e = noViableAlt(input, outerContext, previous, startIndex, previous != s0);
- input->seek(startIndex);
- size_t alt = getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(previous, outerContext);
- if (alt != ATN::INVALID_ALT_NUMBER) {
- return alt;
- }
- throw e;
- }
- if (previous != s0) // Don't delete the start set.
- delete previous;
- previous = nullptr;
-
- std::vector<BitSet> altSubSets = PredictionModeClass::getConflictingAltSubsets(reach.get());
- reach->uniqueAlt = getUniqueAlt(reach.get());
- // unique prediction?
- if (reach->uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- predictedAlt = reach->uniqueAlt;
- break;
- }
- if (_mode != PredictionMode::LL_EXACT_AMBIG_DETECTION) {
- predictedAlt = PredictionModeClass::resolvesToJustOneViableAlt(altSubSets);
- if (predictedAlt != ATN::INVALID_ALT_NUMBER) {
- break;
- }
- } else {
- // In exact ambiguity mode, we never try to terminate early.
- // Just keeps scarfing until we know what the conflict is
- if (PredictionModeClass::allSubsetsConflict(altSubSets) && PredictionModeClass::allSubsetsEqual(altSubSets)) {
- foundExactAmbig = true;
- predictedAlt = PredictionModeClass::getSingleViableAlt(altSubSets);
- break;
- }
- // else there are multiple non-conflicting subsets or
- // we're not sure what the ambiguity is yet.
- // So, keep going.
- }
- previous = reach.release();
-
- if (t != Token::EOF) {
- input->consume();
- t = input->LA(1);
- }
- }
-
- if (previous != s0) // Don't delete the start set
- delete previous;
-
- // If the configuration set uniquely predicts an alternative,
- // without conflict, then we know that it's a full LL decision
- // not SLL.
- if (reach->uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- reportContextSensitivity(dfa, predictedAlt, reach.get(), startIndex, input->index());
- return predictedAlt;
- }
-
- // We do not check predicates here because we have checked them
- // on-the-fly when doing full context prediction.
-
- /*
- In non-exact ambiguity detection mode, we might actually be able to
- detect an exact ambiguity, but I'm not going to spend the cycles
- needed to check. We only emit ambiguity warnings in exact ambiguity
- mode.
-
- For example, we might know that we have conflicting configurations.
- But, that does not mean that there is no way forward without a
- conflict. It's possible to have nonconflicting alt subsets as in:
-
- LL altSubSets=[{1, 2}, {1, 2}, {1}, {1, 2}]
-
- from
-
- [(17,1,[5 $]), (13,1,[5 10 $]), (21,1,[5 10 $]), (11,1,[$]),
- (13,2,[5 10 $]), (21,2,[5 10 $]), (11,2,[$])]
-
- In this case, (17,1,[5 $]) indicates there is some next sequence that
- would resolve this without conflict to alternative 1. Any other viable
- next sequence, however, is associated with a conflict. We stop
- looking for input because no amount of further lookahead will alter
- the fact that we should predict alternative 1. We just can't say for
- sure that there is an ambiguity without looking further.
- */
- reportAmbiguity(dfa, D, startIndex, input->index(), foundExactAmbig, reach->getAlts(), reach.get());
-
- return predictedAlt;
-}
-
-std::unique_ptr<ATNConfigSet> ParserATNSimulator::computeReachSet(ATNConfigSet *closure_, size_t t, bool fullCtx) {
-
- std::unique_ptr<ATNConfigSet> intermediate(new ATNConfigSet(fullCtx));
-
- /* Configurations already in a rule stop state indicate reaching the end
- * of the decision rule (local context) or end of the start rule (full
- * context). Once reached, these configurations are never updated by a
- * closure operation, so they are handled separately for the performance
- * advantage of having a smaller intermediate set when calling closure.
- *
- * For full-context reach operations, separate handling is required to
- * ensure that the alternative matching the longest overall sequence is
- * chosen when multiple such configurations can match the input.
- */
- std::vector<Ref<ATNConfig>> skippedStopStates;
-
- // First figure out where we can reach on input t
- for (const auto &c : closure_->configs) {
- if (RuleStopState::is(c->state)) {
- assert(c->context->isEmpty());
-
- if (fullCtx || t == Token::EOF) {
- skippedStopStates.push_back(c);
- }
-
- continue;
- }
-
- size_t n = c->state->transitions.size();
- for (size_t ti = 0; ti < n; ti++) { // for each transition
- const Transition *trans = c->state->transitions[ti].get();
- ATNState *target = getReachableTarget(trans, (int)t);
- if (target != nullptr) {
- intermediate->add(std::make_shared<ATNConfig>(*c, target), &mergeCache);
- }
- }
- }
-
- // Now figure out where the reach operation can take us...
- std::unique_ptr<ATNConfigSet> reach;
-
- /* This block optimizes the reach operation for intermediate sets which
- * trivially indicate a termination state for the overall
- * adaptivePredict operation.
- *
- * The conditions assume that intermediate
- * contains all configurations relevant to the reach set, but this
- * condition is not true when one or more configurations have been
- * withheld in skippedStopStates, or when the current symbol is EOF.
- */
- if (skippedStopStates.empty() && t != Token::EOF) {
- if (intermediate->size() == 1) {
- // Don't pursue the closure if there is just one state.
- // It can only have one alternative; just add to result
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = std::move(intermediate);
- } else if (getUniqueAlt(intermediate.get()) != ATN::INVALID_ALT_NUMBER) {
- // Also don't pursue the closure if there is unique alternative
- // among the configurations.
- reach = std::move(intermediate);
- }
- }
-
- /* If the reach set could not be trivially determined, perform a closure
- * operation on the intermediate set to compute its initial value.
- */
- if (reach == nullptr) {
- reach.reset(new ATNConfigSet(fullCtx));
- ATNConfig::Set closureBusy;
-
- bool treatEofAsEpsilon = t == Token::EOF;
- for (const auto &c : intermediate->configs) {
- closure(c, reach.get(), closureBusy, false, fullCtx, treatEofAsEpsilon);
- }
- }
-
- if (t == IntStream::EOF) {
- /* After consuming EOF no additional input is possible, so we are
- * only interested in configurations which reached the end of the
- * decision rule (local context) or end of the start rule (full
- * context). Update reach to contain only these configurations. This
- * handles both explicit EOF transitions in the grammar and implicit
- * EOF transitions following the end of the decision or start rule.
- *
- * When reach==intermediate, no closure operation was performed. In
- * this case, removeAllConfigsNotInRuleStopState needs to check for
- * reachable rule stop states as well as configurations already in
- * a rule stop state.
- *
- * This is handled before the configurations in skippedStopStates,
- * because any configurations potentially added from that list are
- * already guaranteed to meet this condition whether or not it's
- * required.
- */
- ATNConfigSet *temp = removeAllConfigsNotInRuleStopState(reach.get(), *reach == *intermediate);
- if (temp != reach.get())
- reach.reset(temp); // We got a new set, so use that.
- }
-
- /* If skippedStopStates is not null, then it contains at least one
- * configuration. For full-context reach operations, these
- * configurations reached the end of the start rule, in which case we
- * only add them back to reach if no configuration during the current
- * closure operation reached such a state. This ensures adaptivePredict
- * chooses an alternative matching the longest overall sequence when
- * multiple alternatives are viable.
- */
- if (skippedStopStates.size() > 0 && (!fullCtx || !PredictionModeClass::hasConfigInRuleStopState(reach.get()))) {
- assert(!skippedStopStates.empty());
-
- for (const auto &c : skippedStopStates) {
- reach->add(c, &mergeCache);
- }
- }
-
- if (reach->isEmpty()) {
- return nullptr;
- }
- return reach;
-}
-
-ATNConfigSet* ParserATNSimulator::removeAllConfigsNotInRuleStopState(ATNConfigSet *configs,
- bool lookToEndOfRule) {
- if (PredictionModeClass::allConfigsInRuleStopStates(configs)) {
- return configs;
- }
-
- ATNConfigSet *result = new ATNConfigSet(configs->fullCtx); /* mem-check: released by caller */
-
- for (const auto &config : configs->configs) {
- if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) {
- result->add(config, &mergeCache);
- continue;
- }
-
- if (lookToEndOfRule && config->state->epsilonOnlyTransitions) {
- misc::IntervalSet nextTokens = atn.nextTokens(config->state);
- if (nextTokens.contains(Token::EPSILON)) {
- ATNState *endOfRuleState = atn.ruleToStopState[config->state->ruleIndex];
- result->add(std::make_shared<ATNConfig>(*config, endOfRuleState), &mergeCache);
- }
- }
- }
-
- return result;
-}
-
-std::unique_ptr<ATNConfigSet> ParserATNSimulator::computeStartState(ATNState *p, RuleContext *ctx, bool fullCtx) {
- // always at least the implicit call to start rule
- Ref<const PredictionContext> initialContext = PredictionContext::fromRuleContext(atn, ctx);
- std::unique_ptr<ATNConfigSet> configs(new ATNConfigSet(fullCtx));
-
- for (size_t i = 0; i < p->transitions.size(); i++) {
- ATNState *target = p->transitions[i]->target;
- Ref<ATNConfig> c = std::make_shared<ATNConfig>(target, (int)i + 1, initialContext);
- ATNConfig::Set closureBusy;
- closure(c, configs.get(), closureBusy, true, fullCtx, false);
- }
-
- return configs;
-}
-
-std::unique_ptr<ATNConfigSet> ParserATNSimulator::applyPrecedenceFilter(ATNConfigSet *configs) {
- std::map<size_t, Ref<const PredictionContext>> statesFromAlt1;
- std::unique_ptr<ATNConfigSet> configSet(new ATNConfigSet(configs->fullCtx));
- for (const auto &config : configs->configs) {
- // handle alt 1 first
- if (config->alt != 1) {
- continue;
- }
-
- Ref<const SemanticContext> updatedContext = config->semanticContext->evalPrecedence(parser, _outerContext);
- if (updatedContext == nullptr) {
- // the configuration was eliminated
- continue;
- }
-
- statesFromAlt1[config->state->stateNumber] = config->context;
- if (updatedContext != config->semanticContext) {
- configSet->add(std::make_shared<ATNConfig>(*config, updatedContext), &mergeCache);
- }
- else {
- configSet->add(config, &mergeCache);
- }
- }
-
- for (const auto &config : configs->configs) {
- if (config->alt == 1) {
- // already handled
- continue;
- }
-
- if (!config->isPrecedenceFilterSuppressed()) {
- /* In the future, this elimination step could be updated to also
- * filter the prediction context for alternatives predicting alt>1
- * (basically a graph subtraction algorithm).
- */
- auto iterator = statesFromAlt1.find(config->state->stateNumber);
- if (iterator != statesFromAlt1.end() && *iterator->second == *config->context) {
- // eliminated
- continue;
- }
- }
-
- configSet->add(config, &mergeCache);
- }
-
- return configSet;
-}
-
-atn::ATNState* ParserATNSimulator::getReachableTarget(const Transition *trans, size_t ttype) {
- if (trans->matches(ttype, 0, atn.maxTokenType)) {
- return trans->target;
- }
-
- return nullptr;
-}
-
-// Note that caller must memory manage the returned value from this function
-std::vector<Ref<const SemanticContext>> ParserATNSimulator::getPredsForAmbigAlts(const BitSet &ambigAlts,
- ATNConfigSet *configs, size_t nalts) {
- // REACH=[1|1|[]|0:0, 1|2|[]|0:1]
- /* altToPred starts as an array of all null contexts. The entry at index i
- * corresponds to alternative i. altToPred[i] may have one of three values:
- * 1. null: no ATNConfig c is found such that c.alt==i
- * 2. SemanticContext.NONE: At least one ATNConfig c exists such that
- * c.alt==i and c.semanticContext==SemanticContext.NONE. In other words,
- * alt i has at least one un-predicated config.
- * 3. Non-NONE Semantic Context: There exists at least one, and for all
- * ATNConfig c such that c.alt==i, c.semanticContext!=SemanticContext.NONE.
- *
- * From this, it is clear that NONE||anything==NONE.
- */
- std::vector<Ref<const SemanticContext>> altToPred(nalts + 1);
-
- for (const auto &c : configs->configs) {
- if (ambigAlts.test(c->alt)) {
- altToPred[c->alt] = SemanticContext::Or(altToPred[c->alt], c->semanticContext);
- }
- }
-
- size_t nPredAlts = 0;
- for (size_t i = 1; i <= nalts; i++) {
- if (altToPred[i] == nullptr) {
- altToPred[i] = SemanticContext::Empty::Instance;
- } else if (altToPred[i] != SemanticContext::Empty::Instance) {
- nPredAlts++;
- }
- }
-
- // nonambig alts are null in altToPred
- if (nPredAlts == 0) {
- altToPred.clear();
- }
-#if DEBUG_ATN == 1
- std::cout << "getPredsForAmbigAlts result " << Arrays::toString(altToPred) << std::endl;
-#endif
-
- return altToPred;
-}
-
-std::vector<dfa::DFAState::PredPrediction> ParserATNSimulator::getPredicatePredictions(const antlrcpp::BitSet &ambigAlts,
- const std::vector<Ref<const SemanticContext>> &altToPred) {
- bool containsPredicate = std::find_if(altToPred.begin(), altToPred.end(), [](const Ref<const SemanticContext> &context) {
- return context != SemanticContext::Empty::Instance;
- }) != altToPred.end();
- std::vector<dfa::DFAState::PredPrediction> pairs;
- if (containsPredicate) {
- for (size_t i = 1; i < altToPred.size(); i++) {
- const auto &pred = altToPred[i];
- assert(pred != nullptr); // unpredicted is indicated by SemanticContext.NONE
- if (ambigAlts.test(i)) {
- pairs.emplace_back(pred, static_cast<int>(i));
- }
- }
- }
- return pairs;
-}
-
-size_t ParserATNSimulator::getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(ATNConfigSet *configs,
- ParserRuleContext *outerContext)
-{
- std::pair<ATNConfigSet *, ATNConfigSet *> sets = splitAccordingToSemanticValidity(configs, outerContext);
- std::unique_ptr<ATNConfigSet> semValidConfigs(sets.first);
- std::unique_ptr<ATNConfigSet> semInvalidConfigs(sets.second);
- size_t alt = getAltThatFinishedDecisionEntryRule(semValidConfigs.get());
- if (alt != ATN::INVALID_ALT_NUMBER) { // semantically/syntactically viable path exists
- return alt;
- }
- // Is there a syntactically valid path with a failed pred?
- if (!semInvalidConfigs->configs.empty()) {
- alt = getAltThatFinishedDecisionEntryRule(semInvalidConfigs.get());
- if (alt != ATN::INVALID_ALT_NUMBER) { // syntactically viable path exists
- return alt;
- }
- }
- return ATN::INVALID_ALT_NUMBER;
-}
-
-size_t ParserATNSimulator::getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs) {
- misc::IntervalSet alts;
- for (const auto &c : configs->configs) {
- if (c->getOuterContextDepth() > 0 || (c->state != nullptr && c->state->getStateType() == ATNStateType::RULE_STOP && c->context->hasEmptyPath())) {
- alts.add(c->alt);
- }
- }
- if (alts.size() == 0) {
- return ATN::INVALID_ALT_NUMBER;
- }
- return alts.getMinElement();
-}
-
-std::pair<ATNConfigSet *, ATNConfigSet *> ParserATNSimulator::splitAccordingToSemanticValidity(ATNConfigSet *configs,
- ParserRuleContext *outerContext) {
-
- // mem-check: both pointers must be freed by the caller.
- ATNConfigSet *succeeded(new ATNConfigSet(configs->fullCtx));
- ATNConfigSet *failed(new ATNConfigSet(configs->fullCtx));
- for (const auto &c : configs->configs) {
- if (c->semanticContext != SemanticContext::Empty::Instance) {
- bool predicateEvaluationResult = evalSemanticContext(c->semanticContext, outerContext, c->alt, configs->fullCtx);
- if (predicateEvaluationResult) {
- succeeded->add(c);
- } else {
- failed->add(c);
- }
- } else {
- succeeded->add(c);
- }
- }
- return { succeeded, failed };
-}
-
-BitSet ParserATNSimulator::evalSemanticContext(const std::vector<dfa::DFAState::PredPrediction> &predPredictions,
- ParserRuleContext *outerContext, bool complete) {
- BitSet predictions;
- for (const auto &prediction : predPredictions) {
- if (prediction.pred == SemanticContext::Empty::Instance) {
- predictions.set(prediction.alt);
- if (!complete) {
- break;
- }
- continue;
- }
-
- bool fullCtx = false; // in dfa
- bool predicateEvaluationResult = evalSemanticContext(prediction.pred, outerContext, prediction.alt, fullCtx);
-#if DEBUG_ATN == 1 || DEBUG_DFA == 1
- std::cout << "eval pred " << prediction.toString() << " = " << predicateEvaluationResult << std::endl;
-#endif
-
- if (predicateEvaluationResult) {
-#if DEBUG_ATN == 1 || DEBUG_DFA == 1
- std::cout << "PREDICT " << prediction.alt << std::endl;
-#endif
-
- predictions.set(prediction.alt);
- if (!complete) {
- break;
- }
- }
- }
-
- return predictions;
-}
-
-bool ParserATNSimulator::evalSemanticContext(Ref<const SemanticContext> const& pred, ParserRuleContext *parserCallStack,
- size_t /*alt*/, bool /*fullCtx*/) {
- return pred->eval(parser, parserCallStack);
-}
-
-void ParserATNSimulator::closure(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
- bool collectPredicates, bool fullCtx, bool treatEofAsEpsilon) {
- const int initialDepth = 0;
- closureCheckingStopState(config, configs, closureBusy, collectPredicates, fullCtx, initialDepth, treatEofAsEpsilon);
-
- assert(!fullCtx || !configs->dipsIntoOuterContext);
-}
-
-void ParserATNSimulator::closureCheckingStopState(Ref<ATNConfig> const& config, ATNConfigSet *configs,
- ATNConfig::Set &closureBusy, bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon) {
-
-#if DEBUG_ATN == 1
- std::cout << "closure(" << config->toString(true) << ")" << std::endl;
-#endif
-
- if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) {
- // We hit rule end. If we have context info, use it
- // run thru all possible stack tops in ctx
- if (!config->context->isEmpty()) {
- for (size_t i = 0; i < config->context->size(); i++) {
- if (config->context->getReturnState(i) == PredictionContext::EMPTY_RETURN_STATE) {
- if (fullCtx) {
- configs->add(std::make_shared<ATNConfig>(*config, config->state, PredictionContext::EMPTY), &mergeCache);
- continue;
- } else {
- // we have no context info, just chase follow links (if greedy)
-#if DEBUG_ATN == 1
- std::cout << "FALLING off rule " << getRuleName(config->state->ruleIndex) << std::endl;
-#endif
- closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon);
- }
- continue;
- }
- ATNState *returnState = atn.states[config->context->getReturnState(i)];
- Ref<const PredictionContext> newContext = config->context->getParent(i); // "pop" return state
- Ref<ATNConfig> c = std::make_shared<ATNConfig>(returnState, config->alt, newContext, config->semanticContext);
- // While we have context to pop back from, we may have
- // gotten that context AFTER having falling off a rule.
- // Make sure we track that we are now out of context.
- //
- // This assignment also propagates the
- // isPrecedenceFilterSuppressed() value to the new
- // configuration.
- c->reachesIntoOuterContext = config->reachesIntoOuterContext;
- assert(depth > INT_MIN);
-
- closureCheckingStopState(c, configs, closureBusy, collectPredicates, fullCtx, depth - 1, treatEofAsEpsilon);
- }
- return;
- } else if (fullCtx) {
- // reached end of start rule
- configs->add(config, &mergeCache);
- return;
- } else {
- // else if we have no context info, just chase follow links (if greedy)
- }
- }
-
- closure_(config, configs, closureBusy, collectPredicates, fullCtx, depth, treatEofAsEpsilon);
-}
-
-void ParserATNSimulator::closure_(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
- bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon) {
- ATNState *p = config->state;
- // optimization
- if (!p->epsilonOnlyTransitions) {
- // make sure to not return here, because EOF transitions can act as
- // both epsilon transitions and non-epsilon transitions.
- configs->add(config, &mergeCache);
- }
-
- for (size_t i = 0; i < p->transitions.size(); i++) {
- if (i == 0 && canDropLoopEntryEdgeInLeftRecursiveRule(config.get()))
- continue;
-
- const Transition *t = p->transitions[i].get();
- bool continueCollecting = !(t != nullptr && t->getTransitionType() == TransitionType::ACTION) && collectPredicates;
- Ref<ATNConfig> c = getEpsilonTarget(config, t, continueCollecting, depth == 0, fullCtx, treatEofAsEpsilon);
- if (c != nullptr) {
- int newDepth = depth;
- if (config->state != nullptr && config->state->getStateType() == ATNStateType::RULE_STOP) {
- assert(!fullCtx);
-
- // target fell off end of rule; mark resulting c as having dipped into outer context
- // We can't get here if incoming config was rule stop and we had context
- // track how far we dip into outer context. Might
- // come in handy and we avoid evaluating context dependent
- // preds if this is > 0.
-
- if (closureBusy.count(c) > 0) {
- // avoid infinite recursion for right-recursive rules
- continue;
- }
- closureBusy.insert(c);
-
- if (_dfa != nullptr && _dfa->isPrecedenceDfa()) {
- size_t outermostPrecedenceReturn = downCast<const EpsilonTransition *>(t)->outermostPrecedenceReturn();
- if (outermostPrecedenceReturn == _dfa->atnStartState->ruleIndex) {
- c->setPrecedenceFilterSuppressed(true);
- }
- }
-
- c->reachesIntoOuterContext++;
-
- if (!t->isEpsilon()) {
- // avoid infinite recursion for EOF* and EOF+
- if (closureBusy.count(c) == 0) {
- closureBusy.insert(c);
- } else {
- continue;
- }
- }
-
- configs->dipsIntoOuterContext = true; // TODO: can remove? only care when we add to set per middle of this method
- assert(newDepth > INT_MIN);
-
- newDepth--;
-#if DEBUG_DFA == 1
- std::cout << "dips into outer ctx: " << c << std::endl;
-#endif
-
- } else if (!t->isEpsilon()) {
- // avoid infinite recursion for EOF* and EOF+
- if (closureBusy.count(c) == 0) {
- closureBusy.insert(c);
- } else {
- continue;
- }
- }
-
- if (t != nullptr && t->getTransitionType() == TransitionType::RULE) {
- // latch when newDepth goes negative - once we step out of the entry context we can't return
- if (newDepth >= 0) {
- newDepth++;
- }
- }
-
- closureCheckingStopState(c, configs, closureBusy, continueCollecting, fullCtx, newDepth, treatEofAsEpsilon);
- }
- }
-}
-
-bool ParserATNSimulator::canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const {
- if (TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT)
- return false;
-
- ATNState *p = config->state;
-
- // First check to see if we are in StarLoopEntryState generated during
- // left-recursion elimination. For efficiency, also check if
- // the context has an empty stack case. If so, it would mean
- // global FOLLOW so we can't perform optimization
- if (p->getStateType() != ATNStateType::STAR_LOOP_ENTRY ||
- !((StarLoopEntryState *)p)->isPrecedenceDecision || // Are we the special loop entry/exit state?
- config->context->isEmpty() || // If SLL wildcard
- config->context->hasEmptyPath())
- {
- return false;
- }
-
- // Require all return states to return back to the same rule
- // that p is in.
- size_t numCtxs = config->context->size();
- for (size_t i = 0; i < numCtxs; i++) { // for each stack context
- ATNState *returnState = atn.states[config->context->getReturnState(i)];
- if (returnState->ruleIndex != p->ruleIndex)
- return false;
- }
-
- BlockStartState *decisionStartState = (BlockStartState *)p->transitions[0]->target;
- size_t blockEndStateNum = decisionStartState->endState->stateNumber;
- BlockEndState *blockEndState = (BlockEndState *)atn.states[blockEndStateNum];
-
- // Verify that the top of each stack context leads to loop entry/exit
- // state through epsilon edges and w/o leaving rule.
- for (size_t i = 0; i < numCtxs; i++) { // for each stack context
- size_t returnStateNumber = config->context->getReturnState(i);
- ATNState *returnState = atn.states[returnStateNumber];
- // All states must have single outgoing epsilon edge.
- if (returnState->transitions.size() != 1 || !returnState->transitions[0]->isEpsilon())
- {
- return false;
- }
-
- // Look for prefix op case like 'not expr', (' type ')' expr
- ATNState *returnStateTarget = returnState->transitions[0]->target;
- if (returnState->getStateType() == ATNStateType::BLOCK_END && returnStateTarget == p) {
- continue;
- }
-
- // Look for 'expr op expr' or case where expr's return state is block end
- // of (...)* internal block; the block end points to loop back
- // which points to p but we don't need to check that
- if (returnState == blockEndState) {
- continue;
- }
-
- // Look for ternary expr ? expr : expr. The return state points at block end,
- // which points at loop entry state
- if (returnStateTarget == blockEndState) {
- continue;
- }
-
- // Look for complex prefix 'between expr and expr' case where 2nd expr's
- // return state points at block end state of (...)* internal block
- if (returnStateTarget->getStateType() == ATNStateType::BLOCK_END &&
- returnStateTarget->transitions.size() == 1 &&
- returnStateTarget->transitions[0]->isEpsilon() &&
- returnStateTarget->transitions[0]->target == p)
- {
- continue;
- }
-
- // Anything else ain't conforming.
- return false;
- }
-
- return true;
-}
-
-std::string ParserATNSimulator::getRuleName(size_t index) {
- if (parser != nullptr) {
- return parser->getRuleNames()[index];
- }
- return "<rule " + std::to_string(index) + ">";
-}
-
-Ref<ATNConfig> ParserATNSimulator::getEpsilonTarget(Ref<ATNConfig> const& config, const Transition *t, bool collectPredicates,
- bool inContext, bool fullCtx, bool treatEofAsEpsilon) {
- switch (t->getTransitionType()) {
- case TransitionType::RULE:
- return ruleTransition(config, static_cast<const RuleTransition*>(t));
-
- case TransitionType::PRECEDENCE:
- return precedenceTransition(config, static_cast<const PrecedencePredicateTransition*>(t), collectPredicates, inContext, fullCtx);
-
- case TransitionType::PREDICATE:
- return predTransition(config, static_cast<const PredicateTransition*>(t), collectPredicates, inContext, fullCtx);
-
- case TransitionType::ACTION:
- return actionTransition(config, static_cast<const ActionTransition*>(t));
-
- case TransitionType::EPSILON:
- return std::make_shared<ATNConfig>(*config, t->target);
-
- case TransitionType::ATOM:
- case TransitionType::RANGE:
- case TransitionType::SET:
- // EOF transitions act like epsilon transitions after the first EOF
- // transition is traversed
- if (treatEofAsEpsilon) {
- if (t->matches(Token::EOF, 0, 1)) {
- return std::make_shared<ATNConfig>(*config, t->target);
- }
- }
-
- return nullptr;
-
- default:
- return nullptr;
- }
-}
-
-Ref<ATNConfig> ParserATNSimulator::actionTransition(Ref<ATNConfig> const& config, const ActionTransition *t) {
-#if DEBUG_DFA == 1
- std::cout << "ACTION edge " << t->ruleIndex << ":" << t->actionIndex << std::endl;
-#endif
-
- return std::make_shared<ATNConfig>(*config, t->target);
-}
-
-Ref<ATNConfig> ParserATNSimulator::precedenceTransition(Ref<ATNConfig> const& config, const PrecedencePredicateTransition *pt,
- bool collectPredicates, bool inContext, bool fullCtx) {
-#if DEBUG_DFA == 1
- std::cout << "PRED (collectPredicates=" << collectPredicates << ") " << pt->getPrecedence() << ">=_p" << ", ctx dependent=true" << std::endl;
- if (parser != nullptr) {
- std::cout << "context surrounding pred is " << Arrays::listToString(parser->getRuleInvocationStack(), ", ") << std::endl;
- }
-#endif
-
- Ref<ATNConfig> c;
- if (collectPredicates && inContext) {
- const auto &predicate = pt->getPredicate();
-
- if (fullCtx) {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- size_t currentPosition = _input->index();
- _input->seek(_startIndex);
- bool predSucceeds = evalSemanticContext(predicate, _outerContext, config->alt, fullCtx);
- _input->seek(currentPosition);
- if (predSucceeds) {
- c = std::make_shared<ATNConfig>(*config, pt->target); // no pred context
- }
- } else {
- Ref<const SemanticContext> newSemCtx = SemanticContext::And(config->semanticContext, predicate);
- c = std::make_shared<ATNConfig>(*config, pt->target, std::move(newSemCtx));
- }
- } else {
- c = std::make_shared<ATNConfig>(*config, pt->target);
- }
-
-#if DEBUG_DFA == 1
- std::cout << "config from pred transition=" << c << std::endl;
-#endif
-
- return c;
-}
-
-Ref<ATNConfig> ParserATNSimulator::predTransition(Ref<ATNConfig> const& config, const PredicateTransition *pt,
- bool collectPredicates, bool inContext, bool fullCtx) {
-#if DEBUG_DFA == 1
- std::cout << "PRED (collectPredicates=" << collectPredicates << ") " << pt->getRuleIndex() << ":" << pt->getPredIndex() << ", ctx dependent=" << pt->isCtxDependent() << std::endl;
- if (parser != nullptr) {
- std::cout << "context surrounding pred is " << Arrays::listToString(parser->getRuleInvocationStack(), ", ") << std::endl;
- }
-#endif
-
- Ref<ATNConfig> c = nullptr;
- if (collectPredicates && (!pt->isCtxDependent() || (pt->isCtxDependent() && inContext))) {
- const auto &predicate = pt->getPredicate();
- if (fullCtx) {
- // In full context mode, we can evaluate predicates on-the-fly
- // during closure, which dramatically reduces the size of
- // the config sets. It also obviates the need to test predicates
- // later during conflict resolution.
- size_t currentPosition = _input->index();
- _input->seek(_startIndex);
- bool predSucceeds = evalSemanticContext(predicate, _outerContext, config->alt, fullCtx);
- _input->seek(currentPosition);
- if (predSucceeds) {
- c = std::make_shared<ATNConfig>(*config, pt->target); // no pred context
- }
- } else {
- Ref<const SemanticContext> newSemCtx = SemanticContext::And(config->semanticContext, predicate);
- c = std::make_shared<ATNConfig>(*config, pt->target, std::move(newSemCtx));
- }
- } else {
- c = std::make_shared<ATNConfig>(*config, pt->target);
- }
-
-#if DEBUG_DFA == 1
- std::cout << "config from pred transition=" << c << std::endl;
-#endif
-
- return c;
-}
-
-Ref<ATNConfig> ParserATNSimulator::ruleTransition(Ref<ATNConfig> const& config, const RuleTransition *t) {
-#if DEBUG_DFA == 1
- std::cout << "CALL rule " << getRuleName(t->target->ruleIndex) << ", ctx=" << config->context << std::endl;
-#endif
-
- atn::ATNState *returnState = t->followState;
- Ref<const PredictionContext> newContext = SingletonPredictionContext::create(config->context, returnState->stateNumber);
- return std::make_shared<ATNConfig>(*config, t->target, newContext);
-}
-
-BitSet ParserATNSimulator::getConflictingAlts(ATNConfigSet *configs) {
- std::vector<BitSet> altsets = PredictionModeClass::getConflictingAltSubsets(configs);
- return PredictionModeClass::getAlts(altsets);
-}
-
-BitSet ParserATNSimulator::getConflictingAltsOrUniqueAlt(ATNConfigSet *configs) {
- BitSet conflictingAlts;
- if (configs->uniqueAlt != ATN::INVALID_ALT_NUMBER) {
- conflictingAlts.set(configs->uniqueAlt);
- } else {
- conflictingAlts = configs->conflictingAlts;
- }
- return conflictingAlts;
-}
-
-std::string ParserATNSimulator::getTokenName(size_t t) {
- if (t == Token::EOF) {
- return "EOF";
- }
-
- const dfa::Vocabulary &vocabulary = parser != nullptr ? parser->getVocabulary() : dfa::Vocabulary();
- std::string displayName = vocabulary.getDisplayName(t);
- if (displayName == std::to_string(t)) {
- return displayName;
- }
-
- return displayName + "<" + std::to_string(t) + ">";
-}
-
-std::string ParserATNSimulator::getLookaheadName(TokenStream *input) {
- return getTokenName(input->LA(1));
-}
-
-void ParserATNSimulator::dumpDeadEndConfigs(NoViableAltException &nvae) {
- std::cerr << "dead end configs: ";
- for (const auto &c : nvae.getDeadEndConfigs()->configs) {
- std::string trans = "no edges";
- if (c->state->transitions.size() > 0) {
- const Transition *t = c->state->transitions[0].get();
- if (t != nullptr && t->getTransitionType() == TransitionType::ATOM) {
- const AtomTransition *at = static_cast<const AtomTransition*>(t);
- trans = "Atom " + getTokenName(at->_label);
- } else if (t != nullptr && t->getTransitionType() == TransitionType::SET) {
- const SetTransition *st = static_cast<const SetTransition*>(t);
- trans = "Set ";
- trans += st->set.toString();
- } else if (t != nullptr && t->getTransitionType() == TransitionType::NOT_SET) {
- const SetTransition *st = static_cast<const NotSetTransition*>(t);
- trans = "~Set ";
- trans += st->set.toString();
- }
- }
- std::cerr << c->toString(true) + ":" + trans;
- }
-}
-
-NoViableAltException ParserATNSimulator::noViableAlt(TokenStream *input, ParserRuleContext *outerContext,
- ATNConfigSet *configs, size_t startIndex, bool deleteConfigs) {
- return NoViableAltException(parser, input, input->get(startIndex), input->LT(1), configs, outerContext, deleteConfigs);
-}
-
-size_t ParserATNSimulator::getUniqueAlt(ATNConfigSet *configs) {
- size_t alt = ATN::INVALID_ALT_NUMBER;
- for (const auto &c : configs->configs) {
- if (alt == ATN::INVALID_ALT_NUMBER) {
- alt = c->alt; // found first alt
- } else if (c->alt != alt) {
- return ATN::INVALID_ALT_NUMBER;
- }
- }
- return alt;
-}
-
-dfa::DFAState *ParserATNSimulator::addDFAEdge(dfa::DFA &dfa, dfa::DFAState *from, ssize_t t, dfa::DFAState *to) {
-#if DEBUG_DFA == 1
- std::cout << "EDGE " << from << " -> " << to << " upon " << getTokenName(t) << std::endl;
-#endif
-
- if (to == nullptr) {
- return nullptr;
- }
-
- {
- UniqueLock<SharedMutex> stateLock(atn._stateMutex);
- to = addDFAState(dfa, to); // used existing if possible not incoming
- }
- if (from == nullptr || t > (int)atn.maxTokenType) {
- return to;
- }
-
- {
- UniqueLock<SharedMutex> edgeLock(atn._edgeMutex);
- from->edges[t] = to; // connect
- }
-
-#if DEBUG_DFA == 1
- std::string dfaText;
- if (parser != nullptr) {
- dfaText = dfa.toString(parser->getVocabulary());
- } else {
- dfaText = dfa.toString(dfa::Vocabulary());
- }
- std::cout << "DFA=\n" << dfaText << std::endl;
-#endif
-
- return to;
-}
-
-dfa::DFAState *ParserATNSimulator::addDFAState(dfa::DFA &dfa, dfa::DFAState *D) {
- if (D == ERROR.get()) {
- return D;
- }
-
- // Optimizing the configs below should not alter the hash code. Thus we can just do an insert
- // which will only succeed if an equivalent DFAState does not already exist.
- auto [existing, inserted] = dfa.states.insert(D);
- if (!inserted) {
- return *existing;
- }
-
- // Previously we did a lookup, then set fields, then inserted. It was `dfa.states.size()`, since
- // we already inserted we need to subtract one.
- D->stateNumber = static_cast<int>(dfa.states.size() - 1);
- if (!D->configs->isReadonly()) {
- D->configs->optimizeConfigs(this);
- D->configs->setReadonly(true);
- }
-
-#if DEBUG_DFA == 1
- std::cout << "adding new DFA state: " << D << std::endl;
-#endif
-
- return D;
-}
-
-void ParserATNSimulator::reportAttemptingFullContext(dfa::DFA &dfa, const antlrcpp::BitSet &conflictingAlts,
- ATNConfigSet *configs, size_t startIndex, size_t stopIndex) {
-#if DEBUG_DFA == 1 || RETRY_DEBUG == 1
- misc::Interval interval = misc::Interval((int)startIndex, (int)stopIndex);
- std::cout << "reportAttemptingFullContext decision=" << dfa.decision << ":" << configs << ", input=" << parser->getTokenStream()->getText(interval) << std::endl;
-#endif
-
- if (parser != nullptr) {
- parser->getErrorListenerDispatch().reportAttemptingFullContext(parser, dfa, startIndex, stopIndex, conflictingAlts, configs);
- }
-}
-
-void ParserATNSimulator::reportContextSensitivity(dfa::DFA &dfa, size_t prediction, ATNConfigSet *configs,
- size_t startIndex, size_t stopIndex) {
-#if DEBUG_DFA == 1 || RETRY_DEBUG == 1
- misc::Interval interval = misc::Interval(startIndex, stopIndex);
- std::cout << "reportContextSensitivity decision=" << dfa.decision << ":" << configs << ", input=" << parser->getTokenStream()->getText(interval) << std::endl;
-#endif
-
- if (parser != nullptr) {
- parser->getErrorListenerDispatch().reportContextSensitivity(parser, dfa, startIndex, stopIndex, prediction, configs);
- }
-}
-
-void ParserATNSimulator::reportAmbiguity(dfa::DFA &dfa, dfa::DFAState * /*D*/, size_t startIndex, size_t stopIndex,
- bool exact, const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs) {
-#if DEBUG_DFA == 1 || RETRY_DEBUG == 1
- misc::Interval interval = misc::Interval((int)startIndex, (int)stopIndex);
- std::cout << "reportAmbiguity " << ambigAlts << ":" << configs << ", input=" << parser->getTokenStream()->getText(interval) << std::endl;
-#endif
-
- if (parser != nullptr) {
- parser->getErrorListenerDispatch().reportAmbiguity(parser, dfa, startIndex, stopIndex, exact, ambigAlts, configs);
- }
-}
-
-void ParserATNSimulator::setPredictionMode(PredictionMode newMode) {
- _mode = newMode;
-}
-
-atn::PredictionMode ParserATNSimulator::getPredictionMode() {
- return _mode;
-}
-
-Parser* ParserATNSimulator::getParser() {
- return parser;
-}
-
-#ifdef _MSC_VER
-#pragma warning (disable:4996) // 'getenv': This function or variable may be unsafe. Consider using _dupenv_s instead.
-#endif
-
-bool ParserATNSimulator::getLrLoopSetting() {
- char *var = std::getenv("TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT");
- if (var == nullptr)
- return false;
- std::string value(var);
- return value == "true" || value == "1";
-}
-
-#ifdef _MSC_VER
-#pragma warning (default:4996)
-#endif
-
-void ParserATNSimulator::InitializeInstanceFields() {
- _mode = PredictionMode::LL;
- _startIndex = 0;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.h
deleted file mode 100644
index 28fd059dd2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulator.h
+++ /dev/null
@@ -1,911 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "PredictionMode.h"
-#include "dfa/DFAState.h"
-#include "atn/ATNSimulator.h"
-#include "atn/PredictionContext.h"
-#include "atn/PredictionContextMergeCache.h"
-#include "atn/ParserATNSimulatorOptions.h"
-#include "SemanticContext.h"
-#include "atn/ATNConfig.h"
-
-namespace antlr4 {
-namespace atn {
-
- /**
- * The embodiment of the adaptive LL(*), ALL(*), parsing strategy.
- *
- * <p>
- * The basic complexity of the adaptive strategy makes it harder to understand.
- * We begin with ATN simulation to build paths in a DFA. Subsequent prediction
- * requests go through the DFA first. If they reach a state without an edge for
- * the current symbol, the algorithm fails over to the ATN simulation to
- * complete the DFA path for the current input (until it finds a conflict state
- * or uniquely predicting state).</p>
- *
- * <p>
- * All of that is done without using the outer context because we want to create
- * a DFA that is not dependent upon the rule invocation stack when we do a
- * prediction. One DFA works in all contexts. We avoid using context not
- * necessarily because it's slower, although it can be, but because of the DFA
- * caching problem. The closure routine only considers the rule invocation stack
- * created during prediction beginning in the decision rule. For example, if
- * prediction occurs without invoking another rule's ATN, there are no context
- * stacks in the configurations. When lack of context leads to a conflict, we
- * don't know if it's an ambiguity or a weakness in the strong LL(*) parsing
- * strategy (versus full LL(*)).</p>
- *
- * <p>
- * When SLL yields a configuration set with conflict, we rewind the input and
- * retry the ATN simulation, this time using full outer context without adding
- * to the DFA. Configuration context stacks will be the full invocation stacks
- * from the start rule. If we get a conflict using full context, then we can
- * definitively say we have a true ambiguity for that input sequence. If we
- * don't get a conflict, it implies that the decision is sensitive to the outer
- * context. (It is not context-sensitive in the sense of context-sensitive
- * grammars.)</p>
- *
- * <p>
- * The next time we reach this DFA state with an SLL conflict, through DFA
- * simulation, we will again retry the ATN simulation using full context mode.
- * This is slow because we can't save the results and have to "interpret" the
- * ATN each time we get that input.</p>
- *
- * <p>
- * <strong>CACHING FULL CONTEXT PREDICTIONS</strong></p>
- *
- * <p>
- * We could cache results from full context to predicted alternative easily and
- * that saves a lot of time but doesn't work in presence of predicates. The set
- * of visible predicates from the ATN start state changes depending on the
- * context, because closure can fall off the end of a rule. I tried to cache
- * tuples (stack context, semantic context, predicted alt) but it was slower
- * than interpreting and much more complicated. Also required a huge amount of
- * memory. The goal is not to create the world's fastest parser anyway. I'd like
- * to keep this algorithm simple. By launching multiple threads, we can improve
- * the speed of parsing across a large number of files.</p>
- *
- * <p>
- * There is no strict ordering between the amount of input used by SLL vs LL,
- * which makes it really hard to build a cache for full context. Let's say that
- * we have input A B C that leads to an SLL conflict with full context X. That
- * implies that using X we might only use A B but we could also use A B C D to
- * resolve conflict. Input A B C D could predict alternative 1 in one position
- * in the input and A B C E could predict alternative 2 in another position in
- * input. The conflicting SLL configurations could still be non-unique in the
- * full context prediction, which would lead us to requiring more input than the
- * original A B C. To make a prediction cache work, we have to track the exact
- * input used during the previous prediction. That amounts to a cache that maps
- * X to a specific DFA for that context.</p>
- *
- * <p>
- * Something should be done for left-recursive expression predictions. They are
- * likely LL(1) + pred eval. Easier to do the whole SLL unless error and retry
- * with full LL thing Sam does.</p>
- *
- * <p>
- * <strong>AVOIDING FULL CONTEXT PREDICTION</strong></p>
- *
- * <p>
- * We avoid doing full context retry when the outer context is empty, we did not
- * dip into the outer context by falling off the end of the decision state rule,
- * or when we force SLL mode.</p>
- *
- * <p>
- * As an example of the not dip into outer context case, consider as super
- * constructor calls versus function calls. One grammar might look like
- * this:</p>
- *
- * <pre>
- * ctorBody
- * : '{' superCall? stat* '}'
- * ;
- * </pre>
- *
- * <p>
- * Or, you might see something like</p>
- *
- * <pre>
- * stat
- * : superCall ';'
- * | expression ';'
- * | ...
- * ;
- * </pre>
- *
- * <p>
- * In both cases I believe that no closure operations will dip into the outer
- * context. In the first case ctorBody in the worst case will stop at the '}'.
- * In the 2nd case it should stop at the ';'. Both cases should stay within the
- * entry rule and not dip into the outer context.</p>
- *
- * <p>
- * <strong>PREDICATES</strong></p>
- *
- * <p>
- * Predicates are always evaluated if present in either SLL or LL both. SLL and
- * LL simulation deals with predicates differently. SLL collects predicates as
- * it performs closure operations like ANTLR v3 did. It delays predicate
- * evaluation until it reaches and accept state. This allows us to cache the SLL
- * ATN simulation whereas, if we had evaluated predicates on-the-fly during
- * closure, the DFA state configuration sets would be different and we couldn't
- * build up a suitable DFA.</p>
- *
- * <p>
- * When building a DFA accept state during ATN simulation, we evaluate any
- * predicates and return the sole semantically valid alternative. If there is
- * more than 1 alternative, we report an ambiguity. If there are 0 alternatives,
- * we throw an exception. Alternatives without predicates act like they have
- * true predicates. The simple way to think about it is to strip away all
- * alternatives with false predicates and choose the minimum alternative that
- * remains.</p>
- *
- * <p>
- * When we start in the DFA and reach an accept state that's predicated, we test
- * those and return the minimum semantically viable alternative. If no
- * alternatives are viable, we throw an exception.</p>
- *
- * <p>
- * During full LL ATN simulation, closure always evaluates predicates and
- * on-the-fly. This is crucial to reducing the configuration set size during
- * closure. It hits a landmine when parsing with the Java grammar, for example,
- * without this on-the-fly evaluation.</p>
- *
- * <p>
- * <strong>SHARING DFA</strong></p>
- *
- * <p>
- * All instances of the same parser share the same decision DFAs through a
- * static field. Each instance gets its own ATN simulator but they share the
- * same {@link #decisionToDFA} field. They also share a
- * {@link PredictionContextCache} object that makes sure that all
- * {@link PredictionContext} objects are shared among the DFA states. This makes
- * a big size difference.</p>
- *
- * <p>
- * <strong>THREAD SAFETY</strong></p>
- *
- * <p>
- * The {@link ParserATNSimulator} locks on the {@link #decisionToDFA} field when
- * it adds a new DFA object to that array. {@link #addDFAEdge}
- * locks on the DFA for the current decision when setting the
- * {@link DFAState#edges} field. {@link #addDFAState} locks on
- * the DFA for the current decision when looking up a DFA state to see if it
- * already exists. We must make sure that all requests to add DFA states that
- * are equivalent result in the same shared DFA object. This is because lots of
- * threads will be trying to update the DFA at once. The
- * {@link #addDFAState} method also locks inside the DFA lock
- * but this time on the shared context cache when it rebuilds the
- * configurations' {@link PredictionContext} objects using cached
- * subgraphs/nodes. No other locking occurs, even during DFA simulation. This is
- * safe as long as we can guarantee that all threads referencing
- * {@code s.edge[t]} get the same physical target {@link DFAState}, or
- * {@code null}. Once into the DFA, the DFA simulation does not reference the
- * {@link DFA#states} map. It follows the {@link DFAState#edges} field to new
- * targets. The DFA simulator will either find {@link DFAState#edges} to be
- * {@code null}, to be non-{@code null} and {@code dfa.edges[t]} null, or
- * {@code dfa.edges[t]} to be non-null. The
- * {@link #addDFAEdge} method could be racing to set the field
- * but in either case the DFA simulator works; if {@code null}, and requests ATN
- * simulation. It could also race trying to get {@code dfa.edges[t]}, but either
- * way it will work because it's not doing a test and set operation.</p>
- *
- * <p>
- * <strong>Starting with SLL then failing to combined SLL/LL (Two-Stage
- * Parsing)</strong></p>
- *
- * <p>
- * Sam pointed out that if SLL does not give a syntax error, then there is no
- * point in doing full LL, which is slower. We only have to try LL if we get a
- * syntax error. For maximum speed, Sam starts the parser set to pure SLL
- * mode with the {@link BailErrorStrategy}:</p>
- *
- * <pre>
- * parser.{@link Parser#getInterpreter() getInterpreter()}.{@link #setPredictionMode setPredictionMode}{@code (}{@link PredictionMode#SLL}{@code )};
- * parser.{@link Parser#setErrorHandler setErrorHandler}(new {@link BailErrorStrategy}());
- * </pre>
- *
- * <p>
- * If it does not get a syntax error, then we're done. If it does get a syntax
- * error, we need to retry with the combined SLL/LL strategy.</p>
- *
- * <p>
- * The reason this works is as follows. If there are no SLL conflicts, then the
- * grammar is SLL (at least for that input set). If there is an SLL conflict,
- * the full LL analysis must yield a set of viable alternatives which is a
- * subset of the alternatives reported by SLL. If the LL set is a singleton,
- * then the grammar is LL but not SLL. If the LL set is the same size as the SLL
- * set, the decision is SLL. If the LL set has size &gt; 1, then that decision
- * is truly ambiguous on the current input. If the LL set is smaller, then the
- * SLL conflict resolution might choose an alternative that the full LL would
- * rule out as a possibility based upon better context information. If that's
- * the case, then the SLL parse will definitely get an error because the full LL
- * analysis says it's not viable. If SLL conflict resolution chooses an
- * alternative within the LL set, them both SLL and LL would choose the same
- * alternative because they both choose the minimum of multiple conflicting
- * alternatives.</p>
- *
- * <p>
- * Let's say we have a set of SLL conflicting alternatives {@code {1, 2, 3}} and
- * a smaller LL set called <em>s</em>. If <em>s</em> is {@code {2, 3}}, then SLL
- * parsing will get an error because SLL will pursue alternative 1. If
- * <em>s</em> is {@code {1, 2}} or {@code {1, 3}} then both SLL and LL will
- * choose the same alternative because alternative one is the minimum of either
- * set. If <em>s</em> is {@code {2}} or {@code {3}} then SLL will get a syntax
- * error. If <em>s</em> is {@code {1}} then SLL will succeed.</p>
- *
- * <p>
- * Of course, if the input is invalid, then we will get an error for sure in
- * both SLL and LL parsing. Erroneous input will therefore require 2 passes over
- * the input.</p>
- */
- class ANTLR4CPP_PUBLIC ParserATNSimulator : public ATNSimulator {
- public:
- /// Testing only!
- ParserATNSimulator(const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache);
-
- ParserATNSimulator(Parser *parser, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache);
-
- ParserATNSimulator(Parser *parser, const ATN &atn, std::vector<dfa::DFA> &decisionToDFA,
- PredictionContextCache &sharedContextCache,
- const ParserATNSimulatorOptions &options);
-
- virtual void reset() override;
- virtual void clearDFA() override;
- virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext);
-
- static const bool TURN_OFF_LR_LOOP_ENTRY_BRANCH_OPT;
-
- std::vector<dfa::DFA> &decisionToDFA;
-
- /** Implements first-edge (loop entry) elimination as an optimization
- * during closure operations. See antlr/antlr4#1398.
- *
- * The optimization is to avoid adding the loop entry config when
- * the exit path can only lead back to the same
- * StarLoopEntryState after popping context at the rule end state
- * (traversing only epsilon edges, so we're still in closure, in
- * this same rule).
- *
- * We need to detect any state that can reach loop entry on
- * epsilon w/o exiting rule. We don't have to look at FOLLOW
- * links, just ensure that all stack tops for config refer to key
- * states in LR rule.
- *
- * To verify we are in the right situation we must first check
- * closure is at a StarLoopEntryState generated during LR removal.
- * Then we check that each stack top of context is a return state
- * from one of these cases:
- *
- * 1. 'not' expr, '(' type ')' expr. The return state points at loop entry state
- * 2. expr op expr. The return state is the block end of internal block of (...)*
- * 3. 'between' expr 'and' expr. The return state of 2nd expr reference.
- * That state points at block end of internal block of (...)*.
- * 4. expr '?' expr ':' expr. The return state points at block end,
- * which points at loop entry state.
- *
- * If any is true for each stack top, then closure does not add a
- * config to the current config set for edge[0], the loop entry branch.
- *
- * Conditions fail if any context for the current config is:
- *
- * a. empty (we'd fall out of expr to do a global FOLLOW which could
- * even be to some weird spot in expr) or,
- * b. lies outside of expr or,
- * c. lies within expr but at a state not the BlockEndState
- * generated during LR removal
- *
- * Do we need to evaluate predicates ever in closure for this case?
- *
- * No. Predicates, including precedence predicates, are only
- * evaluated when computing a DFA start state. I.e., only before
- * the lookahead (but not parser) consumes a token.
- *
- * There are no epsilon edges allowed in LR rule alt blocks or in
- * the "primary" part (ID here). If closure is in
- * StarLoopEntryState any lookahead operation will have consumed a
- * token as there are no epsilon-paths that lead to
- * StarLoopEntryState. We do not have to evaluate predicates
- * therefore if we are in the generated StarLoopEntryState of a LR
- * rule. Note that when making a prediction starting at that
- * decision point, decision d=2, compute-start-state performs
- * closure starting at edges[0], edges[1] emanating from
- * StarLoopEntryState. That means it is not performing closure on
- * StarLoopEntryState during compute-start-state.
- *
- * How do we know this always gives same prediction answer?
- *
- * Without predicates, loop entry and exit paths are ambiguous
- * upon remaining input +b (in, say, a+b). Either paths lead to
- * valid parses. Closure can lead to consuming + immediately or by
- * falling out of this call to expr back into expr and loop back
- * again to StarLoopEntryState to match +b. In this special case,
- * we choose the more efficient path, which is to take the bypass
- * path.
- *
- * The lookahead language has not changed because closure chooses
- * one path over the other. Both paths lead to consuming the same
- * remaining input during a lookahead operation. If the next token
- * is an operator, lookahead will enter the choice block with
- * operators. If it is not, lookahead will exit expr. Same as if
- * closure had chosen to enter the choice block immediately.
- *
- * Closure is examining one config (some loopentrystate, some alt,
- * context) which means it is considering exactly one alt. Closure
- * always copies the same alt to any derived configs.
- *
- * How do we know this optimization doesn't mess up precedence in
- * our parse trees?
- *
- * Looking through expr from left edge of stat only has to confirm
- * that an input, say, a+b+c; begins with any valid interpretation
- * of an expression. The precedence actually doesn't matter when
- * making a decision in stat seeing through expr. It is only when
- * parsing rule expr that we must use the precedence to get the
- * right interpretation and, hence, parse tree.
- */
- bool canDropLoopEntryEdgeInLeftRecursiveRule(ATNConfig *config) const;
- virtual std::string getRuleName(size_t index);
-
- virtual Ref<ATNConfig> precedenceTransition(Ref<ATNConfig> const& config, const PrecedencePredicateTransition *pt,
- bool collectPredicates, bool inContext, bool fullCtx);
-
- void setPredictionMode(PredictionMode newMode);
- PredictionMode getPredictionMode();
-
- Parser* getParser();
-
- virtual std::string getTokenName(size_t t);
-
- virtual std::string getLookaheadName(TokenStream *input);
-
- /// <summary>
- /// Used for debugging in adaptivePredict around execATN but I cut
- /// it out for clarity now that alg. works well. We can leave this
- /// "dead" code for a bit.
- /// </summary>
- virtual void dumpDeadEndConfigs(NoViableAltException &nvae);
-
- protected:
- Parser *const parser;
-
- /// <summary>
- /// Each prediction operation uses a cache for merge of prediction contexts.
- /// Don't keep around as it wastes huge amounts of memory. The merge cache
- /// isn't synchronized but we're ok since two threads shouldn't reuse same
- /// parser/atnsim object because it can only handle one input at a time.
- /// This maps graphs a and b to merged result c. (a,b)->c. We can avoid
- /// the merge if we ever see a and b again. Note that (b,a)->c should
- /// also be examined during cache lookup.
- /// </summary>
- PredictionContextMergeCache mergeCache;
- size_t _mergeCacheCounter = 0;
-
- // LAME globals to avoid parameters!!!!! I need these down deep in predTransition
- TokenStream *_input;
- size_t _startIndex;
- ParserRuleContext *_outerContext;
- dfa::DFA *_dfa; // Reference into the decisionToDFA vector.
-
- /// <summary>
- /// Performs ATN simulation to compute a predicted alternative based
- /// upon the remaining input, but also updates the DFA cache to avoid
- /// having to traverse the ATN again for the same input sequence.
- ///
- /// There are some key conditions we're looking for after computing a new
- /// set of ATN configs (proposed DFA state):
- /// if the set is empty, there is no viable alternative for current symbol
- /// does the state uniquely predict an alternative?
- /// does the state have a conflict that would prevent us from
- /// putting it on the work list?
- ///
- /// We also have some key operations to do:
- /// add an edge from previous DFA state to potentially new DFA state, D,
- /// upon current symbol but only if adding to work list, which means in all
- /// cases except no viable alternative (and possibly non-greedy decisions?)
- /// collecting predicates and adding semantic context to DFA accept states
- /// adding rule context to context-sensitive DFA accept states
- /// consuming an input symbol
- /// reporting a conflict
- /// reporting an ambiguity
- /// reporting a context sensitivity
- /// reporting insufficient predicates
- ///
- /// cover these cases:
- /// dead end
- /// single alt
- /// single alt + preds
- /// conflict
- /// conflict + preds
- /// </summary>
- virtual size_t execATN(dfa::DFA &dfa, dfa::DFAState *s0, TokenStream *input, size_t startIndex,
- ParserRuleContext *outerContext);
-
- /// <summary>
- /// Get an existing target state for an edge in the DFA. If the target state
- /// for the edge has not yet been computed or is otherwise not available,
- /// this method returns {@code null}.
- /// </summary>
- /// <param name="previousD"> The current DFA state </param>
- /// <param name="t"> The next input symbol </param>
- /// <returns> The existing target DFA state for the given input symbol
- /// {@code t}, or {@code null} if the target state for this edge is not
- /// already cached </returns>
- virtual dfa::DFAState* getExistingTargetState(dfa::DFAState *previousD, size_t t);
-
- /// <summary>
- /// Compute a target state for an edge in the DFA, and attempt to add the
- /// computed state and corresponding edge to the DFA.
- /// </summary>
- /// <param name="dfa"> The DFA </param>
- /// <param name="previousD"> The current DFA state </param>
- /// <param name="t"> The next input symbol
- /// </param>
- /// <returns> The computed target DFA state for the given input symbol
- /// {@code t}. If {@code t} does not lead to a valid DFA state, this method
- /// returns <seealso cref="#ERROR"/>. </returns>
- virtual dfa::DFAState *computeTargetState(dfa::DFA &dfa, dfa::DFAState *previousD, size_t t);
-
- virtual void predicateDFAState(dfa::DFAState *dfaState, DecisionState *decisionState);
-
- // comes back with reach.uniqueAlt set to a valid alt
- virtual size_t execATNWithFullContext(dfa::DFA &dfa, dfa::DFAState *D, ATNConfigSet *s0,
- TokenStream *input, size_t startIndex, ParserRuleContext *outerContext); // how far we got before failing over
-
- virtual std::unique_ptr<ATNConfigSet> computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx);
-
- /// <summary>
- /// Return a configuration set containing only the configurations from
- /// {@code configs} which are in a <seealso cref="RuleStopState"/>. If all
- /// configurations in {@code configs} are already in a rule stop state, this
- /// method simply returns {@code configs}.
- /// <p/>
- /// When {@code lookToEndOfRule} is true, this method uses
- /// <seealso cref="ATN#nextTokens"/> for each configuration in {@code configs} which is
- /// not already in a rule stop state to see if a rule stop state is reachable
- /// from the configuration via epsilon-only transitions.
- /// </summary>
- /// <param name="configs"> the configuration set to update </param>
- /// <param name="lookToEndOfRule"> when true, this method checks for rule stop states
- /// reachable by epsilon-only transitions from each configuration in
- /// {@code configs}.
- /// </param>
- /// <returns> {@code configs} if all configurations in {@code configs} are in a
- /// rule stop state, otherwise return a new configuration set containing only
- /// the configurations from {@code configs} which are in a rule stop state </returns>
- virtual ATNConfigSet* removeAllConfigsNotInRuleStopState(ATNConfigSet *configs, bool lookToEndOfRule);
-
- virtual std::unique_ptr<ATNConfigSet> computeStartState(ATNState *p, RuleContext *ctx, bool fullCtx);
-
- /* parrt internal source braindump that doesn't mess up
- * external API spec.
-
- applyPrecedenceFilter is an optimization to avoid highly
- nonlinear prediction of expressions and other left recursive
- rules. The precedence predicates such as {3>=prec}? Are highly
- context-sensitive in that they can only be properly evaluated
- in the context of the proper prec argument. Without pruning,
- these predicates are normal predicates evaluated when we reach
- conflict state (or unique prediction). As we cannot evaluate
- these predicates out of context, the resulting conflict leads
- to full LL evaluation and nonlinear prediction which shows up
- very clearly with fairly large expressions.
-
- Example grammar:
-
- e : e '*' e
- | e '+' e
- | INT
- ;
-
- We convert that to the following:
-
- e[int prec]
- : INT
- ( {3>=prec}? '*' e[4]
- | {2>=prec}? '+' e[3]
- )*
- ;
-
- The (..)* loop has a decision for the inner block as well as
- an enter or exit decision, which is what concerns us here. At
- the 1st + of input 1+2+3, the loop entry sees both predicates
- and the loop exit also sees both predicates by falling off the
- edge of e. This is because we have no stack information with
- SLL and find the follow of e, which will hit the return states
- inside the loop after e[4] and e[3], which brings it back to
- the enter or exit decision. In this case, we know that we
- cannot evaluate those predicates because we have fallen off
- the edge of the stack and will in general not know which prec
- parameter is the right one to use in the predicate.
-
- Because we have special information, that these are precedence
- predicates, we can resolve them without failing over to full
- LL despite their context sensitive nature. We make an
- assumption that prec[-1] <= prec[0], meaning that the current
- precedence level is greater than or equal to the precedence
- level of recursive invocations above us in the stack. For
- example, if predicate {3>=prec}? is true of the current prec,
- then one option is to enter the loop to match it now. The
- other option is to exit the loop and the left recursive rule
- to match the current operator in rule invocation further up
- the stack. But, we know that all of those prec are lower or
- the same value and so we can decide to enter the loop instead
- of matching it later. That means we can strip out the other
- configuration for the exit branch.
-
- So imagine we have (14,1,$,{2>=prec}?) and then
- (14,2,$-dipsIntoOuterContext,{2>=prec}?). The optimization
- allows us to collapse these two configurations. We know that
- if {2>=prec}? is true for the current prec parameter, it will
- also be true for any prec from an invoking e call, indicated
- by dipsIntoOuterContext. As the predicates are both true, we
- have the option to evaluate them early in the decision start
- state. We do this by stripping both predicates and choosing to
- enter the loop as it is consistent with the notion of operator
- precedence. It's also how the full LL conflict resolution
- would work.
-
- The solution requires a different DFA start state for each
- precedence level.
-
- The basic filter mechanism is to remove configurations of the
- form (p, 2, pi) if (p, 1, pi) exists for the same p and pi. In
- other words, for the same ATN state and predicate context,
- remove any configuration associated with an exit branch if
- there is a configuration associated with the enter branch.
-
- It's also the case that the filter evaluates precedence
- predicates and resolves conflicts according to precedence
- levels. For example, for input 1+2+3 at the first +, we see
- prediction filtering
-
- [(11,1,[$],{3>=prec}?), (14,1,[$],{2>=prec}?), (5,2,[$],up=1),
- (11,2,[$],up=1), (14,2,[$],up=1)],hasSemanticContext=true,dipsIntoOuterContext
-
- to
-
- [(11,1,[$]), (14,1,[$]), (5,2,[$],up=1)],dipsIntoOuterContext
-
- This filters because {3>=prec}? evals to true and collapses
- (11,1,[$],{3>=prec}?) and (11,2,[$],up=1) since early conflict
- resolution based upon rules of operator precedence fits with
- our usual match first alt upon conflict.
-
- We noticed a problem where a recursive call resets precedence
- to 0. Sam's fix: each config has flag indicating if it has
- returned from an expr[0] call. then just don't filter any
- config with that flag set. flag is carried along in
- closure(). so to avoid adding field, set bit just under sign
- bit of dipsIntoOuterContext (SUPPRESS_PRECEDENCE_FILTER).
- With the change you filter "unless (p, 2, pi) was reached
- after leaving the rule stop state of the LR rule containing
- state p, corresponding to a rule invocation with precedence
- level 0"
- */
-
- /**
- * This method transforms the start state computed by
- * {@link #computeStartState} to the special start state used by a
- * precedence DFA for a particular precedence value. The transformation
- * process applies the following changes to the start state's configuration
- * set.
- *
- * <ol>
- * <li>Evaluate the precedence predicates for each configuration using
- * {@link SemanticContext#evalPrecedence}.</li>
- * <li>When {@link ATNConfig#isPrecedenceFilterSuppressed} is {@code false},
- * remove all configurations which predict an alternative greater than 1,
- * for which another configuration that predicts alternative 1 is in the
- * same ATN state with the same prediction context. This transformation is
- * valid for the following reasons:
- * <ul>
- * <li>The closure block cannot contain any epsilon transitions which bypass
- * the body of the closure, so all states reachable via alternative 1 are
- * part of the precedence alternatives of the transformed left-recursive
- * rule.</li>
- * <li>The "primary" portion of a left recursive rule cannot contain an
- * epsilon transition, so the only way an alternative other than 1 can exist
- * in a state that is also reachable via alternative 1 is by nesting calls
- * to the left-recursive rule, with the outer calls not being at the
- * preferred precedence level. The
- * {@link ATNConfig#isPrecedenceFilterSuppressed} property marks ATN
- * configurations which do not meet this condition, and therefore are not
- * eligible for elimination during the filtering process.</li>
- * </ul>
- * </li>
- * </ol>
- *
- * <p>
- * The prediction context must be considered by this filter to address
- * situations like the following.
- * </p>
- * <code>
- * <pre>
- * grammar TA;
- * prog: statement* EOF;
- * statement: letterA | statement letterA 'b' ;
- * letterA: 'a';
- * </pre>
- * </code>
- * <p>
- * If the above grammar, the ATN state immediately before the token
- * reference {@code 'a'} in {@code letterA} is reachable from the left edge
- * of both the primary and closure blocks of the left-recursive rule
- * {@code statement}. The prediction context associated with each of these
- * configurations distinguishes between them, and prevents the alternative
- * which stepped out to {@code prog} (and then back in to {@code statement}
- * from being eliminated by the filter.
- * </p>
- *
- * @param configs The configuration set computed by
- * {@link #computeStartState} as the start state for the DFA.
- * @return The transformed configuration set representing the start state
- * for a precedence DFA at a particular precedence level (determined by
- * calling {@link Parser#getPrecedence}).
- */
- std::unique_ptr<ATNConfigSet> applyPrecedenceFilter(ATNConfigSet *configs);
-
- virtual ATNState *getReachableTarget(const Transition *trans, size_t ttype);
-
- virtual std::vector<Ref<const SemanticContext>> getPredsForAmbigAlts(const antlrcpp::BitSet &ambigAlts,
- ATNConfigSet *configs, size_t nalts);
-
- std::vector<dfa::DFAState::PredPrediction> getPredicatePredictions(const antlrcpp::BitSet &ambigAlts,
- const std::vector<Ref<const SemanticContext>> &altToPred);
-
- /**
- * This method is used to improve the localization of error messages by
- * choosing an alternative rather than throwing a
- * {@link NoViableAltException} in particular prediction scenarios where the
- * {@link #ERROR} state was reached during ATN simulation.
- *
- * <p>
- * The default implementation of this method uses the following
- * algorithm to identify an ATN configuration which successfully parsed the
- * decision entry rule. Choosing such an alternative ensures that the
- * {@link ParserRuleContext} returned by the calling rule will be complete
- * and valid, and the syntax error will be reported later at a more
- * localized location.</p>
- *
- * <ul>
- * <li>If a syntactically valid path or paths reach the end of the decision rule and
- * they are semantically valid if predicated, return the min associated alt.</li>
- * <li>Else, if a semantically invalid but syntactically valid path exist
- * or paths exist, return the minimum associated alt.
- * </li>
- * <li>Otherwise, return {@link ATN#INVALID_ALT_NUMBER}.</li>
- * </ul>
- *
- * <p>
- * In some scenarios, the algorithm described above could predict an
- * alternative which will result in a {@link FailedPredicateException} in
- * the parser. Specifically, this could occur if the <em>only</em> configuration
- * capable of successfully parsing to the end of the decision rule is
- * blocked by a semantic predicate. By choosing this alternative within
- * {@link #adaptivePredict} instead of throwing a
- * {@link NoViableAltException}, the resulting
- * {@link FailedPredicateException} in the parser will identify the specific
- * predicate which is preventing the parser from successfully parsing the
- * decision rule, which helps developers identify and correct logic errors
- * in semantic predicates.
- * </p>
- *
- * @param configs The ATN configurations which were valid immediately before
- * the {@link #ERROR} state was reached
- * @param outerContext The is the \gamma_0 initial parser context from the paper
- * or the parser stack at the instant before prediction commences.
- *
- * @return The value to return from {@link #adaptivePredict}, or
- * {@link ATN#INVALID_ALT_NUMBER} if a suitable alternative was not
- * identified and {@link #adaptivePredict} should report an error instead.
- */
- size_t getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule(ATNConfigSet *configs,
- ParserRuleContext *outerContext);
-
- virtual size_t getAltThatFinishedDecisionEntryRule(ATNConfigSet *configs);
-
- /** Walk the list of configurations and split them according to
- * those that have preds evaluating to true/false. If no pred, assume
- * true pred and include in succeeded set. Returns Pair of sets.
- *
- * Create a new set so as not to alter the incoming parameter.
- *
- * Assumption: the input stream has been restored to the starting point
- * prediction, which is where predicates need to evaluate.
- */
- std::pair<ATNConfigSet *, ATNConfigSet *> splitAccordingToSemanticValidity(ATNConfigSet *configs,
- ParserRuleContext *outerContext);
-
- /// <summary>
- /// Look through a list of predicate/alt pairs, returning alts for the
- /// pairs that win. A {@code NONE} predicate indicates an alt containing an
- /// unpredicated config which behaves as "always true." If !complete
- /// then we stop at the first predicate that evaluates to true. This
- /// includes pairs with null predicates.
- /// </summary>
- antlrcpp::BitSet evalSemanticContext(const std::vector<dfa::DFAState::PredPrediction> &predPredictions,
- ParserRuleContext *outerContext, bool complete);
-
- /**
- * Evaluate a semantic context within a specific parser context.
- *
- * <p>
- * This method might not be called for every semantic context evaluated
- * during the prediction process. In particular, we currently do not
- * evaluate the following but it may change in the future:</p>
- *
- * <ul>
- * <li>Precedence predicates (represented by
- * {@link SemanticContext.PrecedencePredicate}) are not currently evaluated
- * through this method.</li>
- * <li>Operator predicates (represented by {@link SemanticContext.AND} and
- * {@link SemanticContext.OR}) are evaluated as a single semantic
- * context, rather than evaluating the operands individually.
- * Implementations which require evaluation results from individual
- * predicates should override this method to explicitly handle evaluation of
- * the operands within operator predicates.</li>
- * </ul>
- *
- * @param pred The semantic context to evaluate
- * @param parserCallStack The parser context in which to evaluate the
- * semantic context
- * @param alt The alternative which is guarded by {@code pred}
- * @param fullCtx {@code true} if the evaluation is occurring during LL
- * prediction; otherwise, {@code false} if the evaluation is occurring
- * during SLL prediction
- *
- * @since 4.3
- */
- virtual bool evalSemanticContext(Ref<const SemanticContext> const& pred, ParserRuleContext *parserCallStack,
- size_t alt, bool fullCtx);
-
- /* TODO: If we are doing predicates, there is no point in pursuing
- closure operations if we reach a DFA state that uniquely predicts
- alternative. We will not be caching that DFA state and it is a
- waste to pursue the closure. Might have to advance when we do
- ambig detection thought :(
- */
- virtual void closure(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
- bool collectPredicates, bool fullCtx, bool treatEofAsEpsilon);
-
- virtual void closureCheckingStopState(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
- bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon);
-
- /// Do the actual work of walking epsilon edges.
- virtual void closure_(Ref<ATNConfig> const& config, ATNConfigSet *configs, ATNConfig::Set &closureBusy,
- bool collectPredicates, bool fullCtx, int depth, bool treatEofAsEpsilon);
-
- virtual Ref<ATNConfig> getEpsilonTarget(Ref<ATNConfig> const& config, const Transition *t, bool collectPredicates,
- bool inContext, bool fullCtx, bool treatEofAsEpsilon);
- virtual Ref<ATNConfig> actionTransition(Ref<ATNConfig> const& config, const ActionTransition *t);
-
- virtual Ref<ATNConfig> predTransition(Ref<ATNConfig> const& config, const PredicateTransition *pt, bool collectPredicates,
- bool inContext, bool fullCtx);
-
- virtual Ref<ATNConfig> ruleTransition(Ref<ATNConfig> const& config, const RuleTransition *t);
-
- /**
- * Gets a {@link BitSet} containing the alternatives in {@code configs}
- * which are part of one or more conflicting alternative subsets.
- *
- * @param configs The {@link ATNConfigSet} to analyze.
- * @return The alternatives in {@code configs} which are part of one or more
- * conflicting alternative subsets. If {@code configs} does not contain any
- * conflicting subsets, this method returns an empty {@link BitSet}.
- */
- virtual antlrcpp::BitSet getConflictingAlts(ATNConfigSet *configs);
-
- /// <summary>
- /// Sam pointed out a problem with the previous definition, v3, of
- /// ambiguous states. If we have another state associated with conflicting
- /// alternatives, we should keep going. For example, the following grammar
- ///
- /// s : (ID | ID ID?) ';' ;
- ///
- /// When the ATN simulation reaches the state before ';', it has a DFA
- /// state that looks like: [12|1|[], 6|2|[], 12|2|[]]. Naturally
- /// 12|1|[] and 12|2|[] conflict, but we cannot stop processing this node
- /// because alternative to has another way to continue, via [6|2|[]].
- /// The key is that we have a single state that has config's only associated
- /// with a single alternative, 2, and crucially the state transitions
- /// among the configurations are all non-epsilon transitions. That means
- /// we don't consider any conflicts that include alternative 2. So, we
- /// ignore the conflict between alts 1 and 2. We ignore a set of
- /// conflicting alts when there is an intersection with an alternative
- /// associated with a single alt state in the state->config-list map.
- ///
- /// It's also the case that we might have two conflicting configurations but
- /// also a 3rd nonconflicting configuration for a different alternative:
- /// [1|1|[], 1|2|[], 8|3|[]]. This can come about from grammar:
- ///
- /// a : A | A | A B ;
- ///
- /// After matching input A, we reach the stop state for rule A, state 1.
- /// State 8 is the state right before B. Clearly alternatives 1 and 2
- /// conflict and no amount of further lookahead will separate the two.
- /// However, alternative 3 will be able to continue and so we do not
- /// stop working on this state. In the previous example, we're concerned
- /// with states associated with the conflicting alternatives. Here alt
- /// 3 is not associated with the conflicting configs, but since we can continue
- /// looking for input reasonably, I don't declare the state done. We
- /// ignore a set of conflicting alts when we have an alternative
- /// that we still need to pursue.
- /// </summary>
-
- virtual antlrcpp::BitSet getConflictingAltsOrUniqueAlt(ATNConfigSet *configs);
-
- virtual NoViableAltException noViableAlt(TokenStream *input, ParserRuleContext *outerContext,
- ATNConfigSet *configs, size_t startIndex, bool deleteConfigs);
-
- static size_t getUniqueAlt(ATNConfigSet *configs);
-
- /// <summary>
- /// Add an edge to the DFA, if possible. This method calls
- /// <seealso cref="#addDFAState"/> to ensure the {@code to} state is present in the
- /// DFA. If {@code from} is {@code null}, or if {@code t} is outside the
- /// range of edges that can be represented in the DFA tables, this method
- /// returns without adding the edge to the DFA.
- /// <p/>
- /// If {@code to} is {@code null}, this method returns {@code null}.
- /// Otherwise, this method returns the <seealso cref="DFAState"/> returned by calling
- /// <seealso cref="#addDFAState"/> for the {@code to} state.
- /// </summary>
- /// <param name="dfa"> The DFA </param>
- /// <param name="from"> The source state for the edge </param>
- /// <param name="t"> The input symbol </param>
- /// <param name="to"> The target state for the edge
- /// </param>
- /// <returns> If {@code to} is {@code null}, this method returns {@code null};
- /// otherwise this method returns the result of calling <seealso cref="#addDFAState"/>
- /// on {@code to} </returns>
- virtual dfa::DFAState *addDFAEdge(dfa::DFA &dfa, dfa::DFAState *from, ssize_t t, dfa::DFAState *to);
-
- /// <summary>
- /// Add state {@code D} to the DFA if it is not already present, and return
- /// the actual instance stored in the DFA. If a state equivalent to {@code D}
- /// is already in the DFA, the existing state is returned. Otherwise this
- /// method returns {@code D} after adding it to the DFA.
- /// <p/>
- /// If {@code D} is <seealso cref="#ERROR"/>, this method returns <seealso cref="#ERROR"/> and
- /// does not change the DFA.
- /// </summary>
- /// <param name="dfa"> The dfa </param>
- /// <param name="D"> The DFA state to add </param>
- /// <returns> The state stored in the DFA. This will be either the existing
- /// state if {@code D} is already in the DFA, or {@code D} itself if the
- /// state was not already present. </returns>
- virtual dfa::DFAState *addDFAState(dfa::DFA &dfa, dfa::DFAState *D);
-
- virtual void reportAttemptingFullContext(dfa::DFA &dfa, const antlrcpp::BitSet &conflictingAlts,
- ATNConfigSet *configs, size_t startIndex, size_t stopIndex);
-
- virtual void reportContextSensitivity(dfa::DFA &dfa, size_t prediction, ATNConfigSet *configs,
- size_t startIndex, size_t stopIndex);
-
- /// If context sensitive parsing, we know it's ambiguity not conflict.
- virtual void reportAmbiguity(dfa::DFA &dfa,
- dfa::DFAState *D, // the DFA state from execATN() that had SLL conflicts
- size_t startIndex, size_t stopIndex,
- bool exact,
- const antlrcpp::BitSet &ambigAlts,
- ATNConfigSet *configs); // configs that LL not SLL considered conflicting
-
- private:
- // SLL, LL, or LL + exact ambig detection?
- PredictionMode _mode;
-
- static bool getLrLoopSetting();
- void InitializeInstanceFields();
- };
-
-} // namespace atn
-} // namespace antlr4
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulatorOptions.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulatorOptions.h
deleted file mode 100644
index ea31226d25..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ParserATNSimulatorOptions.h
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "atn/PredictionContextMergeCacheOptions.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC ParserATNSimulatorOptions final {
- public:
- ParserATNSimulatorOptions& setPredictionContextMergeCacheOptions(
- PredictionContextMergeCacheOptions predictionContextMergeCacheOptions) {
- _predictionContextMergeCacheOptions = std::move(predictionContextMergeCacheOptions);
- return *this;
- }
-
- const PredictionContextMergeCacheOptions& getPredictionContextMergeCacheOptions() const {
- return _predictionContextMergeCacheOptions;
- }
-
- private:
- PredictionContextMergeCacheOptions _predictionContextMergeCacheOptions;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PlusBlockStartState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PlusBlockStartState.h
deleted file mode 100644
index b6103dc4d0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PlusBlockStartState.h
+++ /dev/null
@@ -1,29 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/BlockStartState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Start of {@code (A|B|...)+} loop. Technically a decision state, but
- /// we don't use for code generation; somebody might need it, so I'm defining
- /// it for completeness. In reality, the <seealso cref="PlusLoopbackState"/> node is the
- /// real decision-making note for {@code A+}.
- class ANTLR4CPP_PUBLIC PlusBlockStartState final : public BlockStartState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::PLUS_BLOCK_START; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- PlusLoopbackState *loopBackState = nullptr;
-
- PlusBlockStartState() : BlockStartState(ATNStateType::PLUS_BLOCK_START) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PlusLoopbackState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PlusLoopbackState.h
deleted file mode 100644
index 07f25aa0c9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PlusLoopbackState.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// Decision state for {@code A+} and {@code (A|B)+}. It has two transitions:
- /// one to the loop back to start of the block and one to exit.
- class ANTLR4CPP_PUBLIC PlusLoopbackState final : public DecisionState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::PLUS_LOOP_BACK; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- PlusLoopbackState() : DecisionState(ATNStateType::PLUS_LOOP_BACK) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.cpp
deleted file mode 100644
index b8685e9516..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PrecedencePredicateTransition.h"
-
-using namespace antlr4::atn;
-
-PrecedencePredicateTransition::PrecedencePredicateTransition(ATNState *target, int precedence)
- : Transition(TransitionType::PRECEDENCE, target), _predicate(std::make_shared<SemanticContext::PrecedencePredicate>(precedence)) {}
-
-bool PrecedencePredicateTransition::isEpsilon() const {
- return true;
-}
-
-bool PrecedencePredicateTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string PrecedencePredicateTransition::toString() const {
- return "PRECEDENCE " + Transition::toString() + " { precedence: " + std::to_string(getPrecedence()) + " }";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.h
deleted file mode 100644
index 3db79a9b73..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PrecedencePredicateTransition.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-#include "atn/SemanticContext.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC PrecedencePredicateTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::PRECEDENCE; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- PrecedencePredicateTransition(ATNState *target, int precedence);
-
- int getPrecedence() const { return _predicate->precedence; }
-
- bool isEpsilon() const override;
- bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
- std::string toString() const override;
-
- const Ref<const SemanticContext::PrecedencePredicate>& getPredicate() const { return _predicate; }
-
- private:
- const std::shared_ptr<const SemanticContext::PrecedencePredicate> _predicate;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.cpp
deleted file mode 100644
index 73ee2a2b97..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "SemanticContext.h"
-
-#include "atn/PredicateEvalInfo.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-PredicateEvalInfo::PredicateEvalInfo(size_t decision, TokenStream *input, size_t startIndex, size_t stopIndex,
- Ref<const SemanticContext> semctx, bool evalResult, size_t predictedAlt, bool fullCtx)
- : DecisionEventInfo(decision, nullptr, input, startIndex, stopIndex, fullCtx),
- semctx(std::move(semctx)), predictedAlt(predictedAlt), evalResult(evalResult) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.h
deleted file mode 100644
index f343f541cb..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateEvalInfo.h
+++ /dev/null
@@ -1,62 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionEventInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// This class represents profiling event information for semantic predicate
- /// evaluations which occur during prediction.
- /// </summary>
- /// <seealso cref= ParserATNSimulator#evalSemanticContext
- ///
- /// @since 4.3 </seealso>
- class ANTLR4CPP_PUBLIC PredicateEvalInfo : public DecisionEventInfo {
- public:
- /// The semantic context which was evaluated.
- const Ref<const SemanticContext> semctx;
-
- /// <summary>
- /// The alternative number for the decision which is guarded by the semantic
- /// context <seealso cref="#semctx"/>. Note that other ATN
- /// configurations may predict the same alternative which are guarded by
- /// other semantic contexts and/or <seealso cref="SemanticContext#NONE"/>.
- /// </summary>
- const size_t predictedAlt;
-
- /// The result of evaluating the semantic context <seealso cref="#semctx"/>.
- const bool evalResult;
-
- /// <summary>
- /// Constructs a new instance of the <seealso cref="PredicateEvalInfo"/> class with the
- /// specified detailed predicate evaluation information.
- /// </summary>
- /// <param name="decision"> The decision number </param>
- /// <param name="input"> The input token stream </param>
- /// <param name="startIndex"> The start index for the current prediction </param>
- /// <param name="stopIndex"> The index at which the predicate evaluation was
- /// triggered. Note that the input stream may be reset to other positions for
- /// the actual evaluation of individual predicates. </param>
- /// <param name="semctx"> The semantic context which was evaluated </param>
- /// <param name="evalResult"> The results of evaluating the semantic context </param>
- /// <param name="predictedAlt"> The alternative number for the decision which is
- /// guarded by the semantic context {@code semctx}. See <seealso cref="#predictedAlt"/>
- /// for more information. </param>
- /// <param name="fullCtx"> {@code true} if the semantic context was
- /// evaluated during LL prediction; otherwise, {@code false} if the semantic
- /// context was evaluated during SLL prediction
- /// </param>
- /// <seealso cref= ParserATNSimulator#evalSemanticContext(SemanticContext, ParserRuleContext, int, boolean) </seealso>
- /// <seealso cref= SemanticContext#eval(Recognizer, RuleContext) </seealso>
- PredicateEvalInfo(size_t decision, TokenStream *input, size_t startIndex, size_t stopIndex,
- Ref<const SemanticContext> semctx, bool evalResult, size_t predictedAlt, bool fullCtx);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.cpp
deleted file mode 100644
index d76dbd203a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PredicateTransition.h"
-
-using namespace antlr4::atn;
-
-PredicateTransition::PredicateTransition(ATNState *target, size_t ruleIndex, size_t predIndex, bool isCtxDependent)
- : Transition(TransitionType::PREDICATE, target), _predicate(std::make_shared<SemanticContext::Predicate>(ruleIndex, predIndex, isCtxDependent)) {}
-
-bool PredicateTransition::isEpsilon() const {
- return true;
-}
-
-bool PredicateTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string PredicateTransition::toString() const {
- return "PREDICATE " + Transition::toString() + " { ruleIndex: " + std::to_string(getRuleIndex()) +
- ", predIndex: " + std::to_string(getPredIndex()) + ", isCtxDependent: " + std::to_string(isCtxDependent()) + " }";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.h
deleted file mode 100644
index e889b1c198..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredicateTransition.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-#include "atn/SemanticContext.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// TODO: this is old comment:
- /// A tree of semantic predicates from the grammar AST if label==SEMPRED.
- /// In the ATN, labels will always be exactly one predicate, but the DFA
- /// may have to combine a bunch of them as it collects predicates from
- /// multiple ATN configurations into a single DFA state.
- class ANTLR4CPP_PUBLIC PredicateTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::PREDICATE; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- PredicateTransition(ATNState *target, size_t ruleIndex, size_t predIndex, bool isCtxDependent);
-
- size_t getRuleIndex() const {
- return _predicate->ruleIndex;
- }
-
- size_t getPredIndex() const {
- return _predicate->predIndex;
- }
-
- bool isCtxDependent() const {
- return _predicate->isCtxDependent;
- }
-
- bool isEpsilon() const override;
- bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
- std::string toString() const override;
-
- const Ref<const SemanticContext::Predicate>& getPredicate() const { return _predicate; }
-
- private:
- const std::shared_ptr<const SemanticContext::Predicate> _predicate;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.cpp
deleted file mode 100644
index 704408f04d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.cpp
+++ /dev/null
@@ -1,579 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/SingletonPredictionContext.h"
-#include "misc/MurmurHash.h"
-#include "atn/ArrayPredictionContext.h"
-#include "atn/PredictionContextCache.h"
-#include "atn/PredictionContextMergeCache.h"
-#include "RuleContext.h"
-#include "ParserRuleContext.h"
-#include "atn/RuleTransition.h"
-#include "support/Arrays.h"
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "atn/PredictionContext.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-namespace {
-
- void combineCommonParents(std::vector<Ref<const PredictionContext>> &parents) {
- std::unordered_set<Ref<const PredictionContext>> uniqueParents;
- uniqueParents.reserve(parents.size());
- for (const auto &parent : parents) {
- uniqueParents.insert(parent);
- }
- for (auto &parent : parents) {
- parent = *uniqueParents.find(parent);
- }
- }
-
- Ref<const PredictionContext> getCachedContextImpl(const Ref<const PredictionContext> &context,
- PredictionContextCache &contextCache,
- std::unordered_map<Ref<const PredictionContext>,
- Ref<const PredictionContext>> &visited) {
- if (context->isEmpty()) {
- return context;
- }
-
- {
- auto iterator = visited.find(context);
- if (iterator != visited.end()) {
- return iterator->second; // Not necessarly the same as context.
- }
- }
-
- auto cached = contextCache.get(context);
- if (cached) {
- visited[context] = cached;
- return cached;
- }
-
- bool changed = false;
-
- std::vector<Ref<const PredictionContext>> parents(context->size());
- for (size_t i = 0; i < parents.size(); i++) {
- auto parent = getCachedContextImpl(context->getParent(i), contextCache, visited);
- if (changed || parent != context->getParent(i)) {
- if (!changed) {
- parents.clear();
- for (size_t j = 0; j < context->size(); j++) {
- parents.push_back(context->getParent(j));
- }
-
- changed = true;
- }
-
- parents[i] = std::move(parent);
- }
- }
-
- if (!changed) {
- visited[context] = context;
- contextCache.put(context);
- return context;
- }
-
- Ref<const PredictionContext> updated;
- if (parents.empty()) {
- updated = PredictionContext::EMPTY;
- } else if (parents.size() == 1) {
- updated = SingletonPredictionContext::create(std::move(parents[0]), context->getReturnState(0));
- contextCache.put(updated);
- } else {
- updated = std::make_shared<ArrayPredictionContext>(std::move(parents), downCast<const ArrayPredictionContext*>(context.get())->returnStates);
- contextCache.put(updated);
- }
-
- visited[updated] = updated;
- visited[context] = updated;
-
- return updated;
- }
-
- void getAllContextNodesImpl(const Ref<const PredictionContext> &context,
- std::vector<Ref<const PredictionContext>> &nodes,
- std::unordered_set<const PredictionContext*> &visited) {
-
- if (visited.find(context.get()) != visited.end()) {
- return; // Already done.
- }
-
- visited.insert(context.get());
- nodes.push_back(context);
-
- for (size_t i = 0; i < context->size(); i++) {
- getAllContextNodesImpl(context->getParent(i), nodes, visited);
- }
- }
-
- size_t insertOrAssignNodeId(std::unordered_map<const PredictionContext*, size_t> &nodeIds, size_t &nodeId, const PredictionContext *node) {
- auto existing = nodeIds.find(node);
- if (existing != nodeIds.end()) {
- return existing->second;
- }
- return nodeIds.insert({node, nodeId++}).first->second;
- }
-
-}
-
-const Ref<const PredictionContext> PredictionContext::EMPTY = std::make_shared<SingletonPredictionContext>(nullptr, PredictionContext::EMPTY_RETURN_STATE);
-
-//----------------- PredictionContext ----------------------------------------------------------------------------------
-
-PredictionContext::PredictionContext(PredictionContextType contextType) : _contextType(contextType), _hashCode(0) {}
-
-PredictionContext::PredictionContext(PredictionContext&& other) : _contextType(other._contextType), _hashCode(other._hashCode.exchange(0, std::memory_order_relaxed)) {}
-
-Ref<const PredictionContext> PredictionContext::fromRuleContext(const ATN &atn, RuleContext *outerContext) {
- if (outerContext == nullptr) {
- return PredictionContext::EMPTY;
- }
-
- // if we are in RuleContext of start rule, s, then PredictionContext
- // is EMPTY. Nobody called us. (if we are empty, return empty)
- if (outerContext->parent == nullptr || outerContext == &ParserRuleContext::EMPTY) {
- return PredictionContext::EMPTY;
- }
-
- // If we have a parent, convert it to a PredictionContext graph
- auto parent = PredictionContext::fromRuleContext(atn, RuleContext::is(outerContext->parent) ? downCast<RuleContext*>(outerContext->parent) : nullptr);
- const auto *transition = downCast<const RuleTransition*>(atn.states[outerContext->invokingState]->transitions[0].get());
- return SingletonPredictionContext::create(std::move(parent), transition->followState->stateNumber);
-}
-
-bool PredictionContext::hasEmptyPath() const {
- // since EMPTY_RETURN_STATE can only appear in the last position, we check last one
- return getReturnState(size() - 1) == EMPTY_RETURN_STATE;
-}
-
-size_t PredictionContext::hashCode() const {
- auto hash = cachedHashCode();
- if (hash == 0) {
- hash = hashCodeImpl();
- if (hash == 0) {
- hash = std::numeric_limits<size_t>::max();
- }
- _hashCode.store(hash, std::memory_order_relaxed);
- }
- return hash;
-}
-
-Ref<const PredictionContext> PredictionContext::merge(Ref<const PredictionContext> a, Ref<const PredictionContext> b,
- bool rootIsWildcard, PredictionContextMergeCache *mergeCache) {
- assert(a && b);
-
- // share same graph if both same
- if (a == b || *a == *b) {
- return a;
- }
-
- const auto aType = a->getContextType();
- const auto bType = b->getContextType();
-
- if (aType == PredictionContextType::SINGLETON && bType == PredictionContextType::SINGLETON) {
- return mergeSingletons(std::static_pointer_cast<const SingletonPredictionContext>(std::move(a)),
- std::static_pointer_cast<const SingletonPredictionContext>(std::move(b)), rootIsWildcard, mergeCache);
- }
-
- // At least one of a or b is array.
- // If one is $ and rootIsWildcard, return $ as * wildcard.
- if (rootIsWildcard) {
- if (a == PredictionContext::EMPTY) {
- return a;
- }
- if (b == PredictionContext::EMPTY) {
- return b;
- }
- }
-
- // convert singleton so both are arrays to normalize
- Ref<const ArrayPredictionContext> left;
- if (aType == PredictionContextType::SINGLETON) {
- left = std::make_shared<ArrayPredictionContext>(downCast<const SingletonPredictionContext&>(*a));
- } else {
- left = std::static_pointer_cast<const ArrayPredictionContext>(std::move(a));
- }
- Ref<const ArrayPredictionContext> right;
- if (bType == PredictionContextType::SINGLETON) {
- right = std::make_shared<ArrayPredictionContext>(downCast<const SingletonPredictionContext&>(*b));
- } else {
- right = std::static_pointer_cast<const ArrayPredictionContext>(std::move(b));
- }
- return mergeArrays(std::move(left), std::move(right), rootIsWildcard, mergeCache);
-}
-
-Ref<const PredictionContext> PredictionContext::mergeSingletons(Ref<const SingletonPredictionContext> a, Ref<const SingletonPredictionContext> b,
- bool rootIsWildcard, PredictionContextMergeCache *mergeCache) {
-
- if (mergeCache) {
- auto existing = mergeCache->get(a, b);
- if (existing) {
- return existing;
- }
- existing = mergeCache->get(b, a);
- if (existing) {
- return existing;
- }
- }
-
- auto rootMerge = mergeRoot(a, b, rootIsWildcard);
- if (rootMerge) {
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(rootMerge));
- }
- return rootMerge;
- }
-
- const auto& parentA = a->parent;
- const auto& parentB = b->parent;
- if (a->returnState == b->returnState) { // a == b
- auto parent = merge(parentA, parentB, rootIsWildcard, mergeCache);
-
- // If parent is same as existing a or b parent or reduced to a parent, return it.
- if (parent == parentA) { // ax + bx = ax, if a=b
- return a;
- }
- if (parent == parentB) { // ax + bx = bx, if a=b
- return b;
- }
-
- // else: ax + ay = a'[x,y]
- // merge parents x and y, giving array node with x,y then remainders
- // of those graphs. dup a, a' points at merged array
- // new joined parent so create new singleton pointing to it, a'
- auto c = SingletonPredictionContext::create(std::move(parent), a->returnState);
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(c));
- }
- return c;
- }
- // a != b payloads differ
- // see if we can collapse parents due to $+x parents if local ctx
- Ref<const PredictionContext> singleParent;
- if (a == b || (*parentA == *parentB)) { // ax + bx = [a,b]x
- singleParent = parentA;
- }
- if (singleParent) { // parents are same, sort payloads and use same parent
- std::vector<size_t> payloads = { a->returnState, b->returnState };
- if (a->returnState > b->returnState) {
- payloads[0] = b->returnState;
- payloads[1] = a->returnState;
- }
- std::vector<Ref<const PredictionContext>> parents = { singleParent, singleParent };
- auto c = std::make_shared<ArrayPredictionContext>(std::move(parents), std::move(payloads));
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(c));
- }
- return c;
- }
-
- // parents differ and can't merge them. Just pack together
- // into array; can't merge.
- // ax + by = [ax,by]
- if (a->returnState > b->returnState) { // sort by payload
- std::vector<size_t> payloads = { b->returnState, a->returnState };
- std::vector<Ref<const PredictionContext>> parents = { b->parent, a->parent };
- auto c = std::make_shared<ArrayPredictionContext>(std::move(parents), std::move(payloads));
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(c));
- }
- return c;
- }
- std::vector<size_t> payloads = {a->returnState, b->returnState};
- std::vector<Ref<const PredictionContext>> parents = { a->parent, b->parent };
- auto c = std::make_shared<ArrayPredictionContext>(std::move(parents), std::move(payloads));
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(c));
- }
- return c;
-}
-
-Ref<const PredictionContext> PredictionContext::mergeRoot(Ref<const SingletonPredictionContext> a, Ref<const SingletonPredictionContext> b,
- bool rootIsWildcard) {
- if (rootIsWildcard) {
- if (a == EMPTY) { // * + b = *
- return EMPTY;
- }
- if (b == EMPTY) { // a + * = *
- return EMPTY;
- }
- } else {
- if (a == EMPTY && b == EMPTY) { // $ + $ = $
- return EMPTY;
- }
- if (a == EMPTY) { // $ + x = [$,x]
- std::vector<size_t> payloads = { b->returnState, EMPTY_RETURN_STATE };
- std::vector<Ref<const PredictionContext>> parents = { b->parent, nullptr };
- return std::make_shared<ArrayPredictionContext>(std::move(parents), std::move(payloads));
- }
- if (b == EMPTY) { // x + $ = [$,x] ($ is always first if present)
- std::vector<size_t> payloads = { a->returnState, EMPTY_RETURN_STATE };
- std::vector<Ref<const PredictionContext>> parents = { a->parent, nullptr };
- return std::make_shared<ArrayPredictionContext>(std::move(parents), std::move(payloads));
- }
- }
- return nullptr;
-}
-
-Ref<const PredictionContext> PredictionContext::mergeArrays(Ref<const ArrayPredictionContext> a, Ref<const ArrayPredictionContext> b,
- bool rootIsWildcard, PredictionContextMergeCache *mergeCache) {
-
- if (mergeCache) {
- auto existing = mergeCache->get(a, b);
- if (existing) {
- return existing;
- }
- existing = mergeCache->get(b, a);
- if (existing) {
- return existing;
- }
- }
-
- // merge sorted payloads a + b => M
- size_t i = 0; // walks a
- size_t j = 0; // walks b
- size_t k = 0; // walks target M array
-
- std::vector<size_t> mergedReturnStates(a->returnStates.size() + b->returnStates.size());
- std::vector<Ref<const PredictionContext>> mergedParents(a->returnStates.size() + b->returnStates.size());
-
- // walk and merge to yield mergedParents, mergedReturnStates
- while (i < a->returnStates.size() && j < b->returnStates.size()) {
- const auto& parentA = a->parents[i];
- const auto& parentB = b->parents[j];
- if (a->returnStates[i] == b->returnStates[j]) {
- // same payload (stack tops are equal), must yield merged singleton
- size_t payload = a->returnStates[i];
- // $+$ = $
- bool both$ = payload == EMPTY_RETURN_STATE && !parentA && !parentB;
- bool ax_ax = (parentA && parentB) && *parentA == *parentB; // ax+ax -> ax
- if (both$ || ax_ax) {
- mergedParents[k] = parentA; // choose left
- mergedReturnStates[k] = payload;
- } else { // ax+ay -> a'[x,y]
- mergedParents[k] = merge(parentA, parentB, rootIsWildcard, mergeCache);
- mergedReturnStates[k] = payload;
- }
- i++; // hop over left one as usual
- j++; // but also skip one in right side since we merge
- } else if (a->returnStates[i] < b->returnStates[j]) { // copy a[i] to M
- mergedParents[k] = parentA;
- mergedReturnStates[k] = a->returnStates[i];
- i++;
- } else { // b > a, copy b[j] to M
- mergedParents[k] = parentB;
- mergedReturnStates[k] = b->returnStates[j];
- j++;
- }
- k++;
- }
-
- // copy over any payloads remaining in either array
- if (i < a->returnStates.size()) {
- for (auto p = i; p < a->returnStates.size(); p++) {
- mergedParents[k] = a->parents[p];
- mergedReturnStates[k] = a->returnStates[p];
- k++;
- }
- } else {
- for (auto p = j; p < b->returnStates.size(); p++) {
- mergedParents[k] = b->parents[p];
- mergedReturnStates[k] = b->returnStates[p];
- k++;
- }
- }
-
- // trim merged if we combined a few that had same stack tops
- if (k < mergedParents.size()) { // write index < last position; trim
- if (k == 1) { // for just one merged element, return singleton top
- auto c = SingletonPredictionContext::create(std::move(mergedParents[0]), mergedReturnStates[0]);
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(c));
- }
- return c;
- }
- mergedParents.resize(k);
- mergedReturnStates.resize(k);
- }
-
- ArrayPredictionContext m(std::move(mergedParents), std::move(mergedReturnStates));
-
- // if we created same array as a or b, return that instead
- // TODO: track whether this is possible above during merge sort for speed
- if (m == *a) {
- if (mergeCache) {
- return mergeCache->put(a, b, a);
- }
- return a;
- }
- if (m == *b) {
- if (mergeCache) {
- return mergeCache->put(a, b, b);
- }
- return b;
- }
-
- combineCommonParents(m.parents);
- auto c = std::make_shared<ArrayPredictionContext>(std::move(m));
- if (mergeCache) {
- return mergeCache->put(a, b, std::move(c));
- }
- return c;
-}
-
-std::string PredictionContext::toDOTString(const Ref<const PredictionContext> &context) {
- if (context == nullptr) {
- return "";
- }
-
- std::stringstream ss;
- ss << "digraph G {\n" << "rankdir=LR;\n";
-
- std::vector<Ref<const PredictionContext>> nodes = getAllContextNodes(context);
- std::unordered_map<const PredictionContext*, size_t> nodeIds;
- size_t nodeId = 0;
-
- for (const auto &current : nodes) {
- if (current->getContextType() == PredictionContextType::SINGLETON) {
- std::string s = std::to_string(insertOrAssignNodeId(nodeIds, nodeId, current.get()));
- ss << " s" << s;
- std::string returnState = std::to_string(current->getReturnState(0));
- if (current == PredictionContext::EMPTY) {
- returnState = "$";
- }
- ss << " [label=\"" << returnState << "\"];\n";
- continue;
- }
- Ref<const ArrayPredictionContext> arr = std::static_pointer_cast<const ArrayPredictionContext>(current);
- ss << " s" << insertOrAssignNodeId(nodeIds, nodeId, arr.get()) << " [shape=box, label=\"" << "[";
- bool first = true;
- for (auto inv : arr->returnStates) {
- if (!first) {
- ss << ", ";
- }
- if (inv == EMPTY_RETURN_STATE) {
- ss << "$";
- } else {
- ss << inv;
- }
- first = false;
- }
- ss << "]";
- ss << "\"];\n";
- }
-
- for (const auto &current : nodes) {
- if (current == EMPTY) {
- continue;
- }
- for (size_t i = 0; i < current->size(); i++) {
- if (!current->getParent(i)) {
- continue;
- }
- ss << " s" << insertOrAssignNodeId(nodeIds, nodeId, current.get()) << "->" << "s" << insertOrAssignNodeId(nodeIds, nodeId, current->getParent(i).get());
- if (current->size() > 1) {
- ss << " [label=\"parent[" << i << "]\"];\n";
- } else {
- ss << ";\n";
- }
- }
- }
-
- ss << "}\n";
- return ss.str();
-}
-
-// The "visited" map is just a temporary structure to control the retrieval process (which is recursive).
-Ref<const PredictionContext> PredictionContext::getCachedContext(const Ref<const PredictionContext> &context,
- PredictionContextCache &contextCache) {
- std::unordered_map<Ref<const PredictionContext>, Ref<const PredictionContext>> visited;
- return getCachedContextImpl(context, contextCache, visited);
-}
-
-std::vector<Ref<const PredictionContext>> PredictionContext::getAllContextNodes(const Ref<const PredictionContext> &context) {
- std::vector<Ref<const PredictionContext>> nodes;
- std::unordered_set<const PredictionContext*> visited;
- getAllContextNodesImpl(context, nodes, visited);
- return nodes;
-}
-
-std::vector<std::string> PredictionContext::toStrings(Recognizer *recognizer, int currentState) const {
- return toStrings(recognizer, EMPTY, currentState);
-}
-
-std::vector<std::string> PredictionContext::toStrings(Recognizer *recognizer, const Ref<const PredictionContext> &stop, int currentState) const {
-
- std::vector<std::string> result;
-
- for (size_t perm = 0; ; perm++) {
- size_t offset = 0;
- bool last = true;
- const PredictionContext *p = this;
- size_t stateNumber = currentState;
-
- std::stringstream ss;
- ss << "[";
- bool outerContinue = false;
- while (!p->isEmpty() && p != stop.get()) {
- size_t index = 0;
- if (p->size() > 0) {
- size_t bits = 1;
- while ((1ULL << bits) < p->size()) {
- bits++;
- }
-
- size_t mask = (1 << bits) - 1;
- index = (perm >> offset) & mask;
- last &= index >= p->size() - 1;
- if (index >= p->size()) {
- outerContinue = true;
- break;
- }
- offset += bits;
- }
-
- if (recognizer != nullptr) {
- if (ss.tellp() > 1) {
- // first char is '[', if more than that this isn't the first rule
- ss << ' ';
- }
-
- const ATN &atn = recognizer->getATN();
- ATNState *s = atn.states[stateNumber];
- std::string ruleName = recognizer->getRuleNames()[s->ruleIndex];
- ss << ruleName;
- } else if (p->getReturnState(index) != EMPTY_RETURN_STATE) {
- if (!p->isEmpty()) {
- if (ss.tellp() > 1) {
- // first char is '[', if more than that this isn't the first rule
- ss << ' ';
- }
-
- ss << p->getReturnState(index);
- }
- }
- stateNumber = p->getReturnState(index);
- p = p->getParent(index).get();
- }
-
- if (outerContinue)
- continue;
-
- ss << "]";
- result.push_back(ss.str());
-
- if (last) {
- break;
- }
- }
-
- return result;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.h
deleted file mode 100644
index 967355af17..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContext.h
+++ /dev/null
@@ -1,225 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <atomic>
-
-#include "Recognizer.h"
-#include "atn/ATN.h"
-#include "atn/ATNState.h"
-#include "atn/PredictionContextType.h"
-
-namespace antlr4 {
-
- class RuleContext;
-
-namespace atn {
-
- class ATN;
- class ArrayPredictionContext;
- class SingletonPredictionContext;
- class PredictionContextCache;
- class PredictionContextMergeCache;
-
- class ANTLR4CPP_PUBLIC PredictionContext {
- public:
- /// Represents $ in local context prediction, which means wildcard.
- /// *+x = *.
- static const Ref<const PredictionContext> EMPTY;
-
- /// Represents $ in an array in full context mode, when $
- /// doesn't mean wildcard: $ + x = [$,x]. Here,
- /// $ = EMPTY_RETURN_STATE.
- // ml: originally Integer.MAX_VALUE, which would be -1 for us, but this is already used in places where
- // -1 is converted to unsigned, so we use a different value here. Any value does the job provided it doesn't
- // conflict with real return states.
- static constexpr size_t EMPTY_RETURN_STATE = std::numeric_limits<size_t>::max() - 9;
-
- // dispatch
- static Ref<const PredictionContext> merge(Ref<const PredictionContext> a,
- Ref<const PredictionContext> b,
- bool rootIsWildcard,
- PredictionContextMergeCache *mergeCache);
-
- /// <summary>
- /// Merge two <seealso cref="SingletonPredictionContext"/> instances.
- ///
- /// <p/>
- ///
- /// Stack tops equal, parents merge is same; return left graph.<br/>
- /// <embed src="images/SingletonMerge_SameRootSamePar.svg" type="image/svg+xml"/>
- ///
- /// <p/>
- ///
- /// Same stack top, parents differ; merge parents giving array node, then
- /// remainders of those graphs. A new root node is created to point to the
- /// merged parents.<br/>
- /// <embed src="images/SingletonMerge_SameRootDiffPar.svg" type="image/svg+xml"/>
- ///
- /// <p/>
- ///
- /// Different stack tops pointing to same parent. Make array node for the
- /// root where both element in the root point to the same (original)
- /// parent.<br/>
- /// <embed src="images/SingletonMerge_DiffRootSamePar.svg" type="image/svg+xml"/>
- ///
- /// <p/>
- ///
- /// Different stack tops pointing to different parents. Make array node for
- /// the root where each element points to the corresponding original
- /// parent.<br/>
- /// <embed src="images/SingletonMerge_DiffRootDiffPar.svg" type="image/svg+xml"/>
- /// </summary>
- /// <param name="a"> the first <seealso cref="SingletonPredictionContext"/> </param>
- /// <param name="b"> the second <seealso cref="SingletonPredictionContext"/> </param>
- /// <param name="rootIsWildcard"> {@code true} if this is a local-context merge,
- /// otherwise false to indicate a full-context merge </param>
- /// <param name="mergeCache"> </param>
- static Ref<const PredictionContext> mergeSingletons(Ref<const SingletonPredictionContext> a,
- Ref<const SingletonPredictionContext> b,
- bool rootIsWildcard,
- PredictionContextMergeCache *mergeCache);
-
- /**
- * Handle case where at least one of {@code a} or {@code b} is
- * {@link #EMPTY}. In the following diagrams, the symbol {@code $} is used
- * to represent {@link #EMPTY}.
- *
- * <h2>Local-Context Merges</h2>
- *
- * <p>These local-context merge operations are used when {@code rootIsWildcard}
- * is true.</p>
- *
- * <p>{@link #EMPTY} is superset of any graph; return {@link #EMPTY}.<br>
- * <embed src="images/LocalMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
- *
- * <p>{@link #EMPTY} and anything is {@code #EMPTY}, so merged parent is
- * {@code #EMPTY}; return left graph.<br>
- * <embed src="images/LocalMerge_EmptyParent.svg" type="image/svg+xml"/></p>
- *
- * <p>Special case of last merge if local context.<br>
- * <embed src="images/LocalMerge_DiffRoots.svg" type="image/svg+xml"/></p>
- *
- * <h2>Full-Context Merges</h2>
- *
- * <p>These full-context merge operations are used when {@code rootIsWildcard}
- * is false.</p>
- *
- * <p><embed src="images/FullMerge_EmptyRoots.svg" type="image/svg+xml"/></p>
- *
- * <p>Must keep all contexts; {@link #EMPTY} in array is a special value (and
- * null parent).<br>
- * <embed src="images/FullMerge_EmptyRoot.svg" type="image/svg+xml"/></p>
- *
- * <p><embed src="images/FullMerge_SameRoot.svg" type="image/svg+xml"/></p>
- *
- * @param a the first {@link SingletonPredictionContext}
- * @param b the second {@link SingletonPredictionContext}
- * @param rootIsWildcard {@code true} if this is a local-context merge,
- * otherwise false to indicate a full-context merge
- */
- static Ref<const PredictionContext> mergeRoot(Ref<const SingletonPredictionContext> a,
- Ref<const SingletonPredictionContext> b,
- bool rootIsWildcard);
-
- /**
- * Merge two {@link ArrayPredictionContext} instances.
- *
- * <p>Different tops, different parents.<br>
- * <embed src="images/ArrayMerge_DiffTopDiffPar.svg" type="image/svg+xml"/></p>
- *
- * <p>Shared top, same parents.<br>
- * <embed src="images/ArrayMerge_ShareTopSamePar.svg" type="image/svg+xml"/></p>
- *
- * <p>Shared top, different parents.<br>
- * <embed src="images/ArrayMerge_ShareTopDiffPar.svg" type="image/svg+xml"/></p>
- *
- * <p>Shared top, all shared parents.<br>
- * <embed src="images/ArrayMerge_ShareTopSharePar.svg" type="image/svg+xml"/></p>
- *
- * <p>Equal tops, merge parents and reduce top to
- * {@link SingletonPredictionContext}.<br>
- * <embed src="images/ArrayMerge_EqualTop.svg" type="image/svg+xml"/></p>
- */
- static Ref<const PredictionContext> mergeArrays(Ref<const ArrayPredictionContext> a,
- Ref<const ArrayPredictionContext> b,
- bool rootIsWildcard,
- PredictionContextMergeCache *mergeCache);
-
- static std::string toDOTString(const Ref<const PredictionContext> &context);
-
- static Ref<const PredictionContext> getCachedContext(const Ref<const PredictionContext> &context,
- PredictionContextCache &contextCache);
-
- static std::vector<Ref<const PredictionContext>> getAllContextNodes(const Ref<const PredictionContext> &context);
-
- /// Convert a RuleContext tree to a PredictionContext graph.
- /// Return EMPTY if outerContext is empty.
- static Ref<const PredictionContext> fromRuleContext(const ATN &atn, RuleContext *outerContext);
-
- PredictionContext(const PredictionContext&) = delete;
-
- virtual ~PredictionContext() = default;
-
- PredictionContext& operator=(const PredictionContext&) = delete;
- PredictionContext& operator=(PredictionContext&&) = delete;
-
- PredictionContextType getContextType() const { return _contextType; }
-
- virtual size_t size() const = 0;
- virtual const Ref<const PredictionContext>& getParent(size_t index) const = 0;
- virtual size_t getReturnState(size_t index) const = 0;
-
- /// This means only the EMPTY (wildcard? not sure) context is in set.
- virtual bool isEmpty() const = 0;
- bool hasEmptyPath() const;
-
- size_t hashCode() const;
-
- virtual bool equals(const PredictionContext &other) const = 0;
-
- virtual std::string toString() const = 0;
-
- std::vector<std::string> toStrings(Recognizer *recognizer, int currentState) const;
- std::vector<std::string> toStrings(Recognizer *recognizer,
- const Ref<const PredictionContext> &stop,
- int currentState) const;
-
- protected:
- explicit PredictionContext(PredictionContextType contextType);
-
- PredictionContext(PredictionContext&& other);
-
- virtual size_t hashCodeImpl() const = 0;
-
- size_t cachedHashCode() const { return _hashCode.load(std::memory_order_relaxed); }
-
- private:
- const PredictionContextType _contextType;
- mutable std::atomic<size_t> _hashCode;
- };
-
- inline bool operator==(const PredictionContext &lhs, const PredictionContext &rhs) {
- return lhs.equals(rhs);
- }
-
- inline bool operator!=(const PredictionContext &lhs, const PredictionContext &rhs) {
- return !operator==(lhs, rhs);
- }
-
-} // namespace atn
-} // namespace antlr4
-
-namespace std {
-
- template <>
- struct hash<::antlr4::atn::PredictionContext> {
- size_t operator()(const ::antlr4::atn::PredictionContext &predictionContext) const {
- return predictionContext.hashCode();
- }
- };
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.cpp
deleted file mode 100644
index 031a35cbf7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.cpp
+++ /dev/null
@@ -1,56 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "atn/PredictionContextCache.h"
-
-using namespace antlr4::atn;
-
-void PredictionContextCache::put(const Ref<const PredictionContext> &value) {
- assert(value);
-
- _data.insert(value);
-}
-
-Ref<const PredictionContext> PredictionContextCache::get(
- const Ref<const PredictionContext> &value) const {
- assert(value);
-
- auto iterator = _data.find(value);
- if (iterator == _data.end()) {
- return nullptr;
- }
- return *iterator;
-}
-
-size_t PredictionContextCache::PredictionContextHasher::operator()(
- const Ref<const PredictionContext> &predictionContext) const {
- return predictionContext->hashCode();
-}
-
-bool PredictionContextCache::PredictionContextComparer::operator()(
- const Ref<const PredictionContext> &lhs,
- const Ref<const PredictionContext> &rhs) const {
- return *lhs == *rhs;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.h
deleted file mode 100644
index 78c8210d97..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextCache.h
+++ /dev/null
@@ -1,63 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "atn/PredictionContext.h"
-#include "FlatHashSet.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC PredictionContextCache final {
- public:
- PredictionContextCache() = default;
-
- PredictionContextCache(const PredictionContextCache&) = delete;
- PredictionContextCache(PredictionContextCache&&) = delete;
-
- PredictionContextCache& operator=(const PredictionContextCache&) = delete;
- PredictionContextCache& operator=(PredictionContextCache&&) = delete;
-
- void put(const Ref<const PredictionContext> &value);
-
- Ref<const PredictionContext> get(const Ref<const PredictionContext> &value) const;
-
- private:
- struct ANTLR4CPP_PUBLIC PredictionContextHasher final {
- size_t operator()(const Ref<const PredictionContext> &predictionContext) const;
- };
-
- struct ANTLR4CPP_PUBLIC PredictionContextComparer final {
- bool operator()(const Ref<const PredictionContext> &lhs,
- const Ref<const PredictionContext> &rhs) const;
- };
-
- FlatHashSet<Ref<const PredictionContext>,
- PredictionContextHasher, PredictionContextComparer> _data;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.cpp
deleted file mode 100644
index 7160b59998..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "atn/PredictionContextMergeCache.h"
-
-#include "misc/MurmurHash.h"
-
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-PredictionContextMergeCache::PredictionContextMergeCache(
- const PredictionContextMergeCacheOptions &options) : _options(options) {}
-
-Ref<const PredictionContext> PredictionContextMergeCache::put(
- const Ref<const PredictionContext> &key1,
- const Ref<const PredictionContext> &key2,
- Ref<const PredictionContext> value) {
- assert(key1);
- assert(key2);
-
- if (getOptions().getMaxSize() == 0) {
- // Cache is effectively disabled.
- return value;
- }
-
- auto [existing, inserted] = _entries.try_emplace(std::make_pair(key1.get(), key2.get()));
- if (inserted) {
- try {
- existing->second.reset(new Entry());
- } catch (...) {
- _entries.erase(existing);
- throw;
- }
- existing->second->key = std::make_pair(key1, key2);
- existing->second->value = std::move(value);
- pushToFront(existing->second.get());
- } else {
- if (existing->second->value != value) {
- existing->second->value = std::move(value);
- }
- moveToFront(existing->second.get());
- }
- compact(existing->second.get());
- return existing->second->value;
-}
-
-Ref<const PredictionContext> PredictionContextMergeCache::get(
- const Ref<const PredictionContext> &key1,
- const Ref<const PredictionContext> &key2) const {
- assert(key1);
- assert(key2);
-
- if (getOptions().getMaxSize() == 0) {
- // Cache is effectively disabled.
- return nullptr;
- }
-
- auto iterator = _entries.find(std::make_pair(key1.get(), key2.get()));
- if (iterator == _entries.end()) {
- return nullptr;
- }
- moveToFront(iterator->second.get());
- return iterator->second->value;
-}
-
-void PredictionContextMergeCache::clear() {
- Container().swap(_entries);
- _head = _tail = nullptr;
- _size = 0;
-}
-
-void PredictionContextMergeCache::moveToFront(Entry *entry) const {
- if (entry->prev == nullptr) {
- assert(entry == _head);
- return;
- }
- entry->prev->next = entry->next;
- if (entry->next != nullptr) {
- entry->next->prev = entry->prev;
- } else {
- assert(entry == _tail);
- _tail = entry->prev;
- }
- entry->prev = nullptr;
- entry->next = _head;
- _head->prev = entry;
- _head = entry;
- assert(entry->prev == nullptr);
-}
-
-void PredictionContextMergeCache::pushToFront(Entry *entry) {
- ++_size;
- entry->prev = nullptr;
- entry->next = _head;
- if (_head != nullptr) {
- _head->prev = entry;
- _head = entry;
- } else {
- assert(entry->next == nullptr);
- _head = entry;
- _tail = entry;
- }
- assert(entry->prev == nullptr);
-}
-
-void PredictionContextMergeCache::remove(Entry *entry) {
- if (entry->prev != nullptr) {
- entry->prev->next = entry->next;
- } else {
- assert(entry == _head);
- _head = entry->next;
- }
- if (entry->next != nullptr) {
- entry->next->prev = entry->prev;
- } else {
- assert(entry == _tail);
- _tail = entry->prev;
- }
- --_size;
- _entries.erase(std::make_pair(entry->key.first.get(), entry->key.second.get()));
-}
-
-void PredictionContextMergeCache::compact(const Entry *preserve) {
- Entry *entry = _tail;
- while (entry != nullptr && _size > getOptions().getMaxSize()) {
- Entry *next = entry->prev;
- if (entry != preserve) {
- remove(entry);
- }
- entry = next;
- }
-}
-
-size_t PredictionContextMergeCache::PredictionContextHasher::operator()(
- const PredictionContextPair &value) const {
- size_t hash = MurmurHash::initialize();
- hash = MurmurHash::update(hash, value.first->hashCode());
- hash = MurmurHash::update(hash, value.second->hashCode());
- return MurmurHash::finish(hash, 2);
-}
-
-bool PredictionContextMergeCache::PredictionContextComparer::operator()(
- const PredictionContextPair &lhs, const PredictionContextPair &rhs) const {
- return *lhs.first == *rhs.first && *lhs.second == *rhs.second;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.h
deleted file mode 100644
index efaeaef578..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCache.h
+++ /dev/null
@@ -1,101 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include <utility>
-
-#include "atn/PredictionContext.h"
-#include "atn/PredictionContextMergeCacheOptions.h"
-#include "FlatHashMap.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC PredictionContextMergeCache final {
- public:
- PredictionContextMergeCache()
- : PredictionContextMergeCache(PredictionContextMergeCacheOptions()) {}
-
- explicit PredictionContextMergeCache(const PredictionContextMergeCacheOptions &options);
-
- PredictionContextMergeCache(const PredictionContextMergeCache&) = delete;
- PredictionContextMergeCache(PredictionContextMergeCache&&) = delete;
-
- PredictionContextMergeCache& operator=(const PredictionContextMergeCache&) = delete;
- PredictionContextMergeCache& operator=(PredictionContextMergeCache&&) = delete;
-
- Ref<const PredictionContext> put(const Ref<const PredictionContext> &key1,
- const Ref<const PredictionContext> &key2,
- Ref<const PredictionContext> value);
-
- Ref<const PredictionContext> get(const Ref<const PredictionContext> &key1,
- const Ref<const PredictionContext> &key2) const;
-
- const PredictionContextMergeCacheOptions& getOptions() const { return _options; }
-
- void clear();
-
- private:
- using PredictionContextPair = std::pair<const PredictionContext*, const PredictionContext*>;
-
- struct ANTLR4CPP_PUBLIC PredictionContextHasher final {
- size_t operator()(const PredictionContextPair &value) const;
- };
-
- struct ANTLR4CPP_PUBLIC PredictionContextComparer final {
- bool operator()(const PredictionContextPair &lhs, const PredictionContextPair &rhs) const;
- };
-
- struct ANTLR4CPP_PUBLIC Entry final {
- std::pair<Ref<const PredictionContext>, Ref<const PredictionContext>> key;
- Ref<const PredictionContext> value;
- Entry *prev = nullptr;
- Entry *next = nullptr;
- };
-
- void moveToFront(Entry *entry) const;
-
- void pushToFront(Entry *entry);
-
- void remove(Entry *entry);
-
- void compact(const Entry *preserve);
-
- using Container = FlatHashMap<PredictionContextPair, std::unique_ptr<Entry>,
- PredictionContextHasher, PredictionContextComparer>;
-
- const PredictionContextMergeCacheOptions _options;
-
- Container _entries;
-
- mutable Entry *_head = nullptr;
- mutable Entry *_tail = nullptr;
-
- size_t _size = 0;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCacheOptions.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCacheOptions.h
deleted file mode 100644
index 7331cc17e0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextMergeCacheOptions.h
+++ /dev/null
@@ -1,71 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-#include <limits>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC PredictionContextMergeCacheOptions final {
- public:
- PredictionContextMergeCacheOptions() = default;
-
- size_t getMaxSize() const { return _maxSize; }
-
- bool hasMaxSize() const { return getMaxSize() != std::numeric_limits<size_t>::max(); }
-
- PredictionContextMergeCacheOptions& setMaxSize(size_t maxSize) {
- _maxSize = maxSize;
- return *this;
- }
-
- size_t getClearEveryN() const {
- return _clearEveryN;
- }
-
- bool hasClearEveryN() const { return getClearEveryN() != 0; }
-
- PredictionContextMergeCacheOptions& setClearEveryN(uint64_t clearEveryN) {
- _clearEveryN = clearEveryN;
- return *this;
- }
-
- PredictionContextMergeCacheOptions& neverClear() {
- return setClearEveryN(0);
- }
-
- private:
- size_t _maxSize = std::numeric_limits<size_t>::max();
- uint64_t _clearEveryN = 1;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextType.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextType.h
deleted file mode 100644
index c8c4473e13..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionContextType.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstddef>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- enum class PredictionContextType : size_t {
- SINGLETON = 1,
- ARRAY = 2,
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.cpp
deleted file mode 100644
index 9db0b8bdb9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.cpp
+++ /dev/null
@@ -1,202 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/RuleStopState.h"
-#include "atn/ATNConfigSet.h"
-#include "atn/ATNConfig.h"
-#include "misc/MurmurHash.h"
-#include "SemanticContext.h"
-
-#include "PredictionMode.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-struct AltAndContextConfigHasher
-{
- /**
- * The hash code is only a function of the {@link ATNState#stateNumber}
- * and {@link ATNConfig#context}.
- */
- size_t operator () (ATNConfig *o) const {
- size_t hashCode = misc::MurmurHash::initialize(7);
- hashCode = misc::MurmurHash::update(hashCode, o->state->stateNumber);
- hashCode = misc::MurmurHash::update(hashCode, o->context);
- return misc::MurmurHash::finish(hashCode, 2);
- }
-};
-
-struct AltAndContextConfigComparer {
- bool operator()(ATNConfig *a, ATNConfig *b) const
- {
- if (a == b) {
- return true;
- }
- return a->state->stateNumber == b->state->stateNumber && *a->context == *b->context;
- }
-};
-
-bool PredictionModeClass::hasSLLConflictTerminatingPrediction(PredictionMode mode, ATNConfigSet *configs) {
- /* Configs in rule stop states indicate reaching the end of the decision
- * rule (local context) or end of start rule (full context). If all
- * configs meet this condition, then none of the configurations is able
- * to match additional input so we terminate prediction.
- */
- if (allConfigsInRuleStopStates(configs)) {
- return true;
- }
-
- bool heuristic;
-
- // Pure SLL mode parsing or SLL+LL if:
- // Don't bother with combining configs from different semantic
- // contexts if we can fail over to full LL; costs more time
- // since we'll often fail over anyway.
- if (mode == PredictionMode::SLL || !configs->hasSemanticContext) {
- std::vector<antlrcpp::BitSet> altsets = getConflictingAltSubsets(configs);
- heuristic = hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(configs);
- } else {
- // dup configs, tossing out semantic predicates
- ATNConfigSet dup(true);
- for (auto &config : configs->configs) {
- Ref<ATNConfig> c = std::make_shared<ATNConfig>(*config, SemanticContext::Empty::Instance);
- dup.add(c);
- }
- std::vector<antlrcpp::BitSet> altsets = getConflictingAltSubsets(&dup);
- heuristic = hasConflictingAltSet(altsets) && !hasStateAssociatedWithOneAlt(&dup);
- }
-
- return heuristic;
-}
-
-bool PredictionModeClass::hasConfigInRuleStopState(ATNConfigSet *configs) {
- for (const auto &config : configs->configs) {
- if (RuleStopState::is(config->state)) {
- return true;
- }
- }
-
- return false;
-}
-
-bool PredictionModeClass::allConfigsInRuleStopStates(ATNConfigSet *configs) {
- for (const auto &config : configs->configs) {
- if (!RuleStopState::is(config->state)) {
- return false;
- }
- }
-
- return true;
-}
-
-size_t PredictionModeClass::resolvesToJustOneViableAlt(const std::vector<antlrcpp::BitSet>& altsets) {
- return getSingleViableAlt(altsets);
-}
-
-bool PredictionModeClass::allSubsetsConflict(const std::vector<antlrcpp::BitSet>& altsets) {
- return !hasNonConflictingAltSet(altsets);
-}
-
-bool PredictionModeClass::hasNonConflictingAltSet(const std::vector<antlrcpp::BitSet>& altsets) {
- for (antlrcpp::BitSet alts : altsets) {
- if (alts.count() == 1) {
- return true;
- }
- }
- return false;
-}
-
-bool PredictionModeClass::hasConflictingAltSet(const std::vector<antlrcpp::BitSet>& altsets) {
- for (antlrcpp::BitSet alts : altsets) {
- if (alts.count() > 1) {
- return true;
- }
- }
- return false;
-}
-
-bool PredictionModeClass::allSubsetsEqual(const std::vector<antlrcpp::BitSet>& altsets) {
- if (altsets.empty()) {
- return true;
- }
-
- const antlrcpp::BitSet& first = *altsets.begin();
- for (const antlrcpp::BitSet& alts : altsets) {
- if (alts != first) {
- return false;
- }
- }
- return true;
-}
-
-size_t PredictionModeClass::getUniqueAlt(const std::vector<antlrcpp::BitSet>& altsets) {
- antlrcpp::BitSet all = getAlts(altsets);
- if (all.count() == 1) {
- return all.nextSetBit(0);
- }
- return ATN::INVALID_ALT_NUMBER;
-}
-
-antlrcpp::BitSet PredictionModeClass::getAlts(const std::vector<antlrcpp::BitSet>& altsets) {
- antlrcpp::BitSet all;
- for (const auto &alts : altsets) {
- all |= alts;
- }
-
- return all;
-}
-
-antlrcpp::BitSet PredictionModeClass::getAlts(ATNConfigSet *configs) {
- antlrcpp::BitSet alts;
- for (const auto &config : configs->configs) {
- alts.set(config->alt);
- }
- return alts;
-}
-
-std::vector<antlrcpp::BitSet> PredictionModeClass::getConflictingAltSubsets(ATNConfigSet *configs) {
- std::unordered_map<ATNConfig*, antlrcpp::BitSet, AltAndContextConfigHasher, AltAndContextConfigComparer> configToAlts;
- for (auto &config : configs->configs) {
- configToAlts[config.get()].set(config->alt);
- }
- std::vector<antlrcpp::BitSet> values;
- values.reserve(configToAlts.size());
- for (const auto &pair : configToAlts) {
- values.push_back(pair.second);
- }
- return values;
-}
-
-std::unordered_map<ATNState*, antlrcpp::BitSet> PredictionModeClass::getStateToAltMap(ATNConfigSet *configs) {
- std::unordered_map<ATNState*, antlrcpp::BitSet> m;
- for (const auto &c : configs->configs) {
- m[c->state].set(c->alt);
- }
- return m;
-}
-
-bool PredictionModeClass::hasStateAssociatedWithOneAlt(ATNConfigSet *configs) {
- auto x = getStateToAltMap(configs);
- for (const auto &pair : x){
- if (pair.second.count() == 1) return true;
- }
- return false;
-}
-
-size_t PredictionModeClass::getSingleViableAlt(const std::vector<antlrcpp::BitSet>& altsets) {
- antlrcpp::BitSet viableAlts;
- for (const auto &alts : altsets) {
- size_t minAlt = alts.nextSetBit(0);
-
- viableAlts.set(minAlt);
- if (viableAlts.count() > 1) // more than 1 viable alt
- {
- return ATN::INVALID_ALT_NUMBER;
- }
- }
-
- return viableAlts.nextSetBit(0);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.h b/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.h
deleted file mode 100644
index 4868ea2ff2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/PredictionMode.h
+++ /dev/null
@@ -1,436 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "support/BitSet.h"
-
-namespace antlr4 {
-namespace atn {
-
- /**
- * This enumeration defines the prediction modes available in ANTLR 4 along with
- * utility methods for analyzing configuration sets for conflicts and/or
- * ambiguities.
- */
- enum class PredictionMode {
- /**
- * The SLL(*) prediction mode. This prediction mode ignores the current
- * parser context when making predictions. This is the fastest prediction
- * mode, and provides correct results for many grammars. This prediction
- * mode is more powerful than the prediction mode provided by ANTLR 3, but
- * may result in syntax errors for grammar and input combinations which are
- * not SLL.
- *
- * <p>
- * When using this prediction mode, the parser will either return a correct
- * parse tree (i.e. the same parse tree that would be returned with the
- * {@link #LL} prediction mode), or it will report a syntax error. If a
- * syntax error is encountered when using the {@link #SLL} prediction mode,
- * it may be due to either an actual syntax error in the input or indicate
- * that the particular combination of grammar and input requires the more
- * powerful {@link #LL} prediction abilities to complete successfully.</p>
- *
- * <p>
- * This prediction mode does not provide any guarantees for prediction
- * behavior for syntactically-incorrect inputs.</p>
- */
- SLL,
-
- /**
- * The LL(*) prediction mode. This prediction mode allows the current parser
- * context to be used for resolving SLL conflicts that occur during
- * prediction. This is the fastest prediction mode that guarantees correct
- * parse results for all combinations of grammars with syntactically correct
- * inputs.
- *
- * <p>
- * When using this prediction mode, the parser will make correct decisions
- * for all syntactically-correct grammar and input combinations. However, in
- * cases where the grammar is truly ambiguous this prediction mode might not
- * report a precise answer for <em>exactly which</em> alternatives are
- * ambiguous.</p>
- *
- * <p>
- * This prediction mode does not provide any guarantees for prediction
- * behavior for syntactically-incorrect inputs.</p>
- */
- LL,
-
- /**
- * The LL(*) prediction mode with exact ambiguity detection. In addition to
- * the correctness guarantees provided by the {@link #LL} prediction mode,
- * this prediction mode instructs the prediction algorithm to determine the
- * complete and exact set of ambiguous alternatives for every ambiguous
- * decision encountered while parsing.
- *
- * <p>
- * This prediction mode may be used for diagnosing ambiguities during
- * grammar development. Due to the performance overhead of calculating sets
- * of ambiguous alternatives, this prediction mode should be avoided when
- * the exact results are not necessary.</p>
- *
- * <p>
- * This prediction mode does not provide any guarantees for prediction
- * behavior for syntactically-incorrect inputs.</p>
- */
- LL_EXACT_AMBIG_DETECTION
- };
-
- class ANTLR4CPP_PUBLIC PredictionModeClass {
- public:
- /**
- * Computes the SLL prediction termination condition.
- *
- * <p>
- * This method computes the SLL prediction termination condition for both of
- * the following cases.</p>
- *
- * <ul>
- * <li>The usual SLL+LL fallback upon SLL conflict</li>
- * <li>Pure SLL without LL fallback</li>
- * </ul>
- *
- * <p><strong>COMBINED SLL+LL PARSING</strong></p>
- *
- * <p>When LL-fallback is enabled upon SLL conflict, correct predictions are
- * ensured regardless of how the termination condition is computed by this
- * method. Due to the substantially higher cost of LL prediction, the
- * prediction should only fall back to LL when the additional lookahead
- * cannot lead to a unique SLL prediction.</p>
- *
- * <p>Assuming combined SLL+LL parsing, an SLL configuration set with only
- * conflicting subsets should fall back to full LL, even if the
- * configuration sets don't resolve to the same alternative (e.g.
- * {@code {1,2}} and {@code {3,4}}. If there is at least one non-conflicting
- * configuration, SLL could continue with the hopes that more lookahead will
- * resolve via one of those non-conflicting configurations.</p>
- *
- * <p>Here's the prediction termination rule them: SLL (for SLL+LL parsing)
- * stops when it sees only conflicting configuration subsets. In contrast,
- * full LL keeps going when there is uncertainty.</p>
- *
- * <p><strong>HEURISTIC</strong></p>
- *
- * <p>As a heuristic, we stop prediction when we see any conflicting subset
- * unless we see a state that only has one alternative associated with it.
- * The single-alt-state thing lets prediction continue upon rules like
- * (otherwise, it would admit defeat too soon):</p>
- *
- * <p>{@code [12|1|[], 6|2|[], 12|2|[]]. s : (ID | ID ID?) ';' ;}</p>
- *
- * <p>When the ATN simulation reaches the state before {@code ';'}, it has a
- * DFA state that looks like: {@code [12|1|[], 6|2|[], 12|2|[]]}. Naturally
- * {@code 12|1|[]} and {@code 12|2|[]} conflict, but we cannot stop
- * processing this node because alternative to has another way to continue,
- * via {@code [6|2|[]]}.</p>
- *
- * <p>It also let's us continue for this rule:</p>
- *
- * <p>{@code [1|1|[], 1|2|[], 8|3|[]] a : A | A | A B ;}</p>
- *
- * <p>After matching input A, we reach the stop state for rule A, state 1.
- * State 8 is the state right before B. Clearly alternatives 1 and 2
- * conflict and no amount of further lookahead will separate the two.
- * However, alternative 3 will be able to continue and so we do not stop
- * working on this state. In the previous example, we're concerned with
- * states associated with the conflicting alternatives. Here alt 3 is not
- * associated with the conflicting configs, but since we can continue
- * looking for input reasonably, don't declare the state done.</p>
- *
- * <p><strong>PURE SLL PARSING</strong></p>
- *
- * <p>To handle pure SLL parsing, all we have to do is make sure that we
- * combine stack contexts for configurations that differ only by semantic
- * predicate. From there, we can do the usual SLL termination heuristic.</p>
- *
- * <p><strong>PREDICATES IN SLL+LL PARSING</strong></p>
- *
- * <p>SLL decisions don't evaluate predicates until after they reach DFA stop
- * states because they need to create the DFA cache that works in all
- * semantic situations. In contrast, full LL evaluates predicates collected
- * during start state computation so it can ignore predicates thereafter.
- * This means that SLL termination detection can totally ignore semantic
- * predicates.</p>
- *
- * <p>Implementation-wise, {@link ATNConfigSet} combines stack contexts but not
- * semantic predicate contexts so we might see two configurations like the
- * following.</p>
- *
- * <p>{@code (s, 1, x, {}), (s, 1, x', {p})}</p>
- *
- * <p>Before testing these configurations against others, we have to merge
- * {@code x} and {@code x'} (without modifying the existing configurations).
- * For example, we test {@code (x+x')==x''} when looking for conflicts in
- * the following configurations.</p>
- *
- * <p>{@code (s, 1, x, {}), (s, 1, x', {p}), (s, 2, x'', {})}</p>
- *
- * <p>If the configuration set has predicates (as indicated by
- * {@link ATNConfigSet#hasSemanticContext}), this algorithm makes a copy of
- * the configurations to strip out all of the predicates so that a standard
- * {@link ATNConfigSet} will merge everything ignoring predicates.</p>
- */
- static bool hasSLLConflictTerminatingPrediction(PredictionMode mode, ATNConfigSet *configs);
-
- /// <summary>
- /// Checks if any configuration in {@code configs} is in a
- /// <seealso cref="RuleStopState"/>. Configurations meeting this condition have
- /// reached
- /// the end of the decision rule (local context) or end of start rule (full
- /// context).
- /// </summary>
- /// <param name="configs"> the configuration set to test </param>
- /// <returns> {@code true} if any configuration in {@code configs} is in a
- /// <seealso cref="RuleStopState"/>, otherwise {@code false} </returns>
- static bool hasConfigInRuleStopState(ATNConfigSet *configs);
-
- /// <summary>
- /// Checks if all configurations in {@code configs} are in a
- /// <seealso cref="RuleStopState"/>. Configurations meeting this condition have
- /// reached
- /// the end of the decision rule (local context) or end of start rule (full
- /// context).
- /// </summary>
- /// <param name="configs"> the configuration set to test </param>
- /// <returns> {@code true} if all configurations in {@code configs} are in a
- /// <seealso cref="RuleStopState"/>, otherwise {@code false} </returns>
- static bool allConfigsInRuleStopStates(ATNConfigSet *configs);
-
- /**
- * Full LL prediction termination.
- *
- * <p>Can we stop looking ahead during ATN simulation or is there some
- * uncertainty as to which alternative we will ultimately pick, after
- * consuming more input? Even if there are partial conflicts, we might know
- * that everything is going to resolve to the same minimum alternative. That
- * means we can stop since no more lookahead will change that fact. On the
- * other hand, there might be multiple conflicts that resolve to different
- * minimums. That means we need more look ahead to decide which of those
- * alternatives we should predict.</p>
- *
- * <p>The basic idea is to split the set of configurations {@code C}, into
- * conflicting subsets {@code (s, _, ctx, _)} and singleton subsets with
- * non-conflicting configurations. Two configurations conflict if they have
- * identical {@link ATNConfig#state} and {@link ATNConfig#context} values
- * but different {@link ATNConfig#alt} value, e.g. {@code (s, i, ctx, _)}
- * and {@code (s, j, ctx, _)} for {@code i!=j}.</p>
- *
- * <p>Reduce these configuration subsets to the set of possible alternatives.
- * You can compute the alternative subsets in one pass as follows:</p>
- *
- * <p>{@code A_s,ctx = {i | (s, i, ctx, _)}} for each configuration in
- * {@code C} holding {@code s} and {@code ctx} fixed.</p>
- *
- * <p>Or in pseudo-code, for each configuration {@code c} in {@code C}:</p>
- *
- * <pre>
- * map[c] U= c.{@link ATNConfig#alt alt} # map hash/equals uses s and x, not
- * alt and not pred
- * </pre>
- *
- * <p>The values in {@code map} are the set of {@code A_s,ctx} sets.</p>
- *
- * <p>If {@code |A_s,ctx|=1} then there is no conflict associated with
- * {@code s} and {@code ctx}.</p>
- *
- * <p>Reduce the subsets to singletons by choosing a minimum of each subset. If
- * the union of these alternative subsets is a singleton, then no amount of
- * more lookahead will help us. We will always pick that alternative. If,
- * however, there is more than one alternative, then we are uncertain which
- * alternative to predict and must continue looking for resolution. We may
- * or may not discover an ambiguity in the future, even if there are no
- * conflicting subsets this round.</p>
- *
- * <p>The biggest sin is to terminate early because it means we've made a
- * decision but were uncertain as to the eventual outcome. We haven't used
- * enough lookahead. On the other hand, announcing a conflict too late is no
- * big deal; you will still have the conflict. It's just inefficient. It
- * might even look until the end of file.</p>
- *
- * <p>No special consideration for semantic predicates is required because
- * predicates are evaluated on-the-fly for full LL prediction, ensuring that
- * no configuration contains a semantic context during the termination
- * check.</p>
- *
- * <p><strong>CONFLICTING CONFIGS</strong></p>
- *
- * <p>Two configurations {@code (s, i, x)} and {@code (s, j, x')}, conflict
- * when {@code i!=j} but {@code x=x'}. Because we merge all
- * {@code (s, i, _)} configurations together, that means that there are at
- * most {@code n} configurations associated with state {@code s} for
- * {@code n} possible alternatives in the decision. The merged stacks
- * complicate the comparison of configuration contexts {@code x} and
- * {@code x'}. Sam checks to see if one is a subset of the other by calling
- * merge and checking to see if the merged result is either {@code x} or
- * {@code x'}. If the {@code x} associated with lowest alternative {@code i}
- * is the superset, then {@code i} is the only possible prediction since the
- * others resolve to {@code min(i)} as well. However, if {@code x} is
- * associated with {@code j>i} then at least one stack configuration for
- * {@code j} is not in conflict with alternative {@code i}. The algorithm
- * should keep going, looking for more lookahead due to the uncertainty.</p>
- *
- * <p>For simplicity, I'm doing a equality check between {@code x} and
- * {@code x'} that lets the algorithm continue to consume lookahead longer
- * than necessary. The reason I like the equality is of course the
- * simplicity but also because that is the test you need to detect the
- * alternatives that are actually in conflict.</p>
- *
- * <p><strong>CONTINUE/STOP RULE</strong></p>
- *
- * <p>Continue if union of resolved alternative sets from non-conflicting and
- * conflicting alternative subsets has more than one alternative. We are
- * uncertain about which alternative to predict.</p>
- *
- * <p>The complete set of alternatives, {@code [i for (_,i,_)]}, tells us which
- * alternatives are still in the running for the amount of input we've
- * consumed at this point. The conflicting sets let us to strip away
- * configurations that won't lead to more states because we resolve
- * conflicts to the configuration with a minimum alternate for the
- * conflicting set.</p>
- *
- * <p><strong>CASES</strong></p>
- *
- * <ul>
- *
- * <li>no conflicts and more than 1 alternative in set =&gt; continue</li>
- *
- * <li> {@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s, 3, z)},
- * {@code (s', 1, y)}, {@code (s', 2, y)} yields non-conflicting set
- * {@code {3}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
- * {@code {1,3}} =&gt; continue
- * </li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
- * {@code (s', 2, y)}, {@code (s'', 1, z)} yields non-conflicting set
- * {@code {1}} U conflicting sets {@code min({1,2})} U {@code min({1,2})} =
- * {@code {1}} =&gt; stop and predict 1</li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 1, y)},
- * {@code (s', 2, y)} yields conflicting, reduced sets {@code {1}} U
- * {@code {1}} = {@code {1}} =&gt; stop and predict 1, can announce
- * ambiguity {@code {1,2}}</li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 2, y)},
- * {@code (s', 3, y)} yields conflicting, reduced sets {@code {1}} U
- * {@code {2}} = {@code {1,2}} =&gt; continue</li>
- *
- * <li>{@code (s, 1, x)}, {@code (s, 2, x)}, {@code (s', 3, y)},
- * {@code (s', 4, y)} yields conflicting, reduced sets {@code {1}} U
- * {@code {3}} = {@code {1,3}} =&gt; continue</li>
- *
- * </ul>
- *
- * <p><strong>EXACT AMBIGUITY DETECTION</strong></p>
- *
- * <p>If all states report the same conflicting set of alternatives, then we
- * know we have the exact ambiguity set.</p>
- *
- * <p><code>|A_<em>i</em>|&gt;1</code> and
- * <code>A_<em>i</em> = A_<em>j</em></code> for all <em>i</em>, <em>j</em>.</p>
- *
- * <p>In other words, we continue examining lookahead until all {@code A_i}
- * have more than one alternative and all {@code A_i} are the same. If
- * {@code A={{1,2}, {1,3}}}, then regular LL prediction would terminate
- * because the resolved set is {@code {1}}. To determine what the real
- * ambiguity is, we have to know whether the ambiguity is between one and
- * two or one and three so we keep going. We can only stop prediction when
- * we need exact ambiguity detection when the sets look like
- * {@code A={{1,2}}} or {@code {{1,2},{1,2}}}, etc...</p>
- */
- static size_t resolvesToJustOneViableAlt(const std::vector<antlrcpp::BitSet> &altsets);
-
- /// <summary>
- /// Determines if every alternative subset in {@code altsets} contains more
- /// than one alternative.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if every <seealso cref="BitSet"/> in {@code altsets}
- /// has
- /// <seealso cref="BitSet#cardinality cardinality"/> &gt; 1, otherwise {@code
- /// false} </returns>
- static bool allSubsetsConflict(const std::vector<antlrcpp::BitSet> &altsets);
-
- /// <summary>
- /// Determines if any single alternative subset in {@code altsets} contains
- /// exactly one alternative.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if {@code altsets} contains a <seealso
- /// cref="BitSet"/> with
- /// <seealso cref="BitSet#cardinality cardinality"/> 1, otherwise {@code false}
- /// </returns>
- static bool hasNonConflictingAltSet(const std::vector<antlrcpp::BitSet> &altsets);
-
- /// <summary>
- /// Determines if any single alternative subset in {@code altsets} contains
- /// more than one alternative.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if {@code altsets} contains a <seealso
- /// cref="BitSet"/> with
- /// <seealso cref="BitSet#cardinality cardinality"/> &gt; 1, otherwise {@code
- /// false} </returns>
- static bool hasConflictingAltSet(const std::vector<antlrcpp::BitSet> &altsets);
-
- /// <summary>
- /// Determines if every alternative subset in {@code altsets} is equivalent.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> {@code true} if every member of {@code altsets} is equal to the
- /// others, otherwise {@code false} </returns>
- static bool allSubsetsEqual(const std::vector<antlrcpp::BitSet> &altsets);
-
- /// <summary>
- /// Returns the unique alternative predicted by all alternative subsets in
- /// {@code altsets}. If no such alternative exists, this method returns
- /// <seealso cref="ATN#INVALID_ALT_NUMBER"/>.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- static size_t getUniqueAlt(const std::vector<antlrcpp::BitSet> &altsets);
-
- /// <summary>
- /// Gets the complete set of represented alternatives for a collection of
- /// alternative subsets. This method returns the union of each <seealso
- /// cref="BitSet"/>
- /// in {@code altsets}.
- /// </summary>
- /// <param name="altsets"> a collection of alternative subsets </param>
- /// <returns> the set of represented alternatives in {@code altsets} </returns>
- static antlrcpp::BitSet getAlts(const std::vector<antlrcpp::BitSet> &altsets);
-
- /** Get union of all alts from configs. @since 4.5.1 */
- static antlrcpp::BitSet getAlts(ATNConfigSet *configs);
-
- /// <summary>
- /// This function gets the conflicting alt subsets from a configuration set.
- /// For each configuration {@code c} in {@code configs}:
- ///
- /// <pre>
- /// map[c] U= c.<seealso cref="ATNConfig#alt alt"/> # map hash/equals uses s and
- /// x, not
- /// alt and not pred
- /// </pre>
- /// </summary>
- static std::vector<antlrcpp::BitSet> getConflictingAltSubsets(ATNConfigSet *configs);
-
- /// <summary>
- /// Get a map from state to alt subset from a configuration set. For each
- /// configuration {@code c} in {@code configs}:
- ///
- /// <pre>
- /// map[c.<seealso cref="ATNConfig#state state"/>] U= c.<seealso
- /// cref="ATNConfig#alt alt"/>
- /// </pre>
- /// </summary>
- static std::unordered_map<ATNState*, antlrcpp::BitSet> getStateToAltMap(ATNConfigSet *configs);
-
- static bool hasStateAssociatedWithOneAlt(ATNConfigSet *configs);
-
- static size_t getSingleViableAlt(const std::vector<antlrcpp::BitSet> &altsets);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.cpp
deleted file mode 100644
index 9fd86d67d4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/PredicateEvalInfo.h"
-#include "atn/LookaheadEventInfo.h"
-#include "Parser.h"
-#include "atn/ATNConfigSet.h"
-#include "support/CPPUtils.h"
-
-#include "atn/ProfilingATNSimulator.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlr4::dfa;
-using namespace antlrcpp;
-
-using namespace std::chrono;
-
-ProfilingATNSimulator::ProfilingATNSimulator(Parser *parser)
- : ParserATNSimulator(parser, parser->getInterpreter<ParserATNSimulator>()->atn,
- parser->getInterpreter<ParserATNSimulator>()->decisionToDFA,
- parser->getInterpreter<ParserATNSimulator>()->getSharedContextCache()) {
- for (size_t i = 0; i < atn.decisionToState.size(); i++) {
- _decisions.push_back(DecisionInfo(i));
- }
-}
-
-size_t ProfilingATNSimulator::adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext) {
- auto onExit = finally([this](){
- _currentDecision = 0; // Originally -1, but that makes no sense (index into a vector and init value is also 0).
- });
-
- _sllStopIndex = -1;
- _llStopIndex = -1;
- _currentDecision = decision;
- high_resolution_clock::time_point start = high_resolution_clock::now();
- size_t alt = ParserATNSimulator::adaptivePredict(input, decision, outerContext);
- high_resolution_clock::time_point stop = high_resolution_clock::now();
- _decisions[decision].timeInPrediction += duration_cast<nanoseconds>(stop - start).count();
- _decisions[decision].invocations++;
-
- long long SLL_k = _sllStopIndex - _startIndex + 1;
- _decisions[decision].SLL_TotalLook += SLL_k;
- _decisions[decision].SLL_MinLook = _decisions[decision].SLL_MinLook == 0 ? SLL_k : std::min(_decisions[decision].SLL_MinLook, SLL_k);
- if (SLL_k > _decisions[decision].SLL_MaxLook) {
- _decisions[decision].SLL_MaxLook = SLL_k;
- _decisions[decision].SLL_MaxLookEvent = std::make_shared<LookaheadEventInfo>(decision, nullptr, alt, input, _startIndex, _sllStopIndex, false);
- }
-
- if (_llStopIndex >= 0) {
- long long LL_k = _llStopIndex - _startIndex + 1;
- _decisions[decision].LL_TotalLook += LL_k;
- _decisions[decision].LL_MinLook = _decisions[decision].LL_MinLook == 0 ? LL_k : std::min(_decisions[decision].LL_MinLook, LL_k);
- if (LL_k > _decisions[decision].LL_MaxLook) {
- _decisions[decision].LL_MaxLook = LL_k;
- _decisions[decision].LL_MaxLookEvent = std::make_shared<LookaheadEventInfo>(decision, nullptr, alt, input, _startIndex, _llStopIndex, true);
- }
- }
-
- return alt;
-}
-
-DFAState* ProfilingATNSimulator::getExistingTargetState(DFAState *previousD, size_t t) {
- // this method is called after each time the input position advances
- // during SLL prediction
- _sllStopIndex = (int)_input->index();
-
- DFAState *existingTargetState = ParserATNSimulator::getExistingTargetState(previousD, t);
- if (existingTargetState != nullptr) {
- _decisions[_currentDecision].SLL_DFATransitions++; // count only if we transition over a DFA state
- if (existingTargetState == ERROR.get()) {
- _decisions[_currentDecision].errors.push_back(
- ErrorInfo(_currentDecision, previousD->configs.get(), _input, _startIndex, _sllStopIndex, false)
- );
- }
- }
-
- _currentState = existingTargetState;
- return existingTargetState;
-}
-
-DFAState* ProfilingATNSimulator::computeTargetState(DFA &dfa, DFAState *previousD, size_t t) {
- DFAState *state = ParserATNSimulator::computeTargetState(dfa, previousD, t);
- _currentState = state;
- return state;
-}
-
-std::unique_ptr<ATNConfigSet> ProfilingATNSimulator::computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx) {
- if (fullCtx) {
- // this method is called after each time the input position advances
- // during full context prediction
- _llStopIndex = (int)_input->index();
- }
-
- std::unique_ptr<ATNConfigSet> reachConfigs = ParserATNSimulator::computeReachSet(closure, t, fullCtx);
- if (fullCtx) {
- _decisions[_currentDecision].LL_ATNTransitions++; // count computation even if error
- if (reachConfigs != nullptr) {
- } else { // no reach on current lookahead symbol. ERROR.
- // TODO: does not handle delayed errors per getSynValidOrSemInvalidAltThatFinishedDecisionEntryRule()
- _decisions[_currentDecision].errors.push_back(ErrorInfo(_currentDecision, closure, _input, _startIndex, _llStopIndex, true));
- }
- } else {
- ++_decisions[_currentDecision].SLL_ATNTransitions;
- if (reachConfigs != nullptr) {
- } else { // no reach on current lookahead symbol. ERROR.
- _decisions[_currentDecision].errors.push_back(ErrorInfo(_currentDecision, closure, _input, _startIndex, _sllStopIndex, false));
- }
- }
- return reachConfigs;
-}
-
-bool ProfilingATNSimulator::evalSemanticContext(Ref<const SemanticContext> const& pred, ParserRuleContext *parserCallStack,
- size_t alt, bool fullCtx) {
- bool result = ParserATNSimulator::evalSemanticContext(pred, parserCallStack, alt, fullCtx);
- if (!(std::dynamic_pointer_cast<const SemanticContext::PrecedencePredicate>(pred) != nullptr)) {
- bool fullContext = _llStopIndex >= 0;
- int stopIndex = fullContext ? _llStopIndex : _sllStopIndex;
- _decisions[_currentDecision].predicateEvals.push_back(
- PredicateEvalInfo(_currentDecision, _input, _startIndex, stopIndex, pred, result, alt, fullCtx));
- }
-
- return result;
-}
-
-void ProfilingATNSimulator::reportAttemptingFullContext(DFA &dfa, const BitSet &conflictingAlts, ATNConfigSet *configs,
- size_t startIndex, size_t stopIndex) {
- if (conflictingAlts.count() > 0) {
- conflictingAltResolvedBySLL = conflictingAlts.nextSetBit(0);
- } else {
- conflictingAltResolvedBySLL = configs->getAlts().nextSetBit(0);
- }
- _decisions[_currentDecision].LL_Fallback++;
- ParserATNSimulator::reportAttemptingFullContext(dfa, conflictingAlts, configs, startIndex, stopIndex);
-}
-
-void ProfilingATNSimulator::reportContextSensitivity(DFA &dfa, size_t prediction, ATNConfigSet *configs,
- size_t startIndex, size_t stopIndex) {
- if (prediction != conflictingAltResolvedBySLL) {
- _decisions[_currentDecision].contextSensitivities.push_back(
- ContextSensitivityInfo(_currentDecision, configs, _input, startIndex, stopIndex)
- );
- }
- ParserATNSimulator::reportContextSensitivity(dfa, prediction, configs, startIndex, stopIndex);
-}
-
-void ProfilingATNSimulator::reportAmbiguity(DFA &dfa, DFAState *D, size_t startIndex, size_t stopIndex, bool exact,
- const BitSet &ambigAlts, ATNConfigSet *configs) {
- size_t prediction;
- if (ambigAlts.count() > 0) {
- prediction = ambigAlts.nextSetBit(0);
- } else {
- prediction = configs->getAlts().nextSetBit(0);
- }
- if (configs->fullCtx && prediction != conflictingAltResolvedBySLL) {
- // Even though this is an ambiguity we are reporting, we can
- // still detect some context sensitivities. Both SLL and LL
- // are showing a conflict, hence an ambiguity, but if they resolve
- // to different minimum alternatives we have also identified a
- // context sensitivity.
- _decisions[_currentDecision].contextSensitivities.push_back(
- ContextSensitivityInfo(_currentDecision, configs, _input, startIndex, stopIndex)
- );
- }
- _decisions[_currentDecision].ambiguities.push_back(
- AmbiguityInfo(_currentDecision, configs, ambigAlts, _input, startIndex, stopIndex, configs->fullCtx)
- );
- ParserATNSimulator::reportAmbiguity(dfa, D, startIndex, stopIndex, exact, ambigAlts, configs);
-}
-
-std::vector<DecisionInfo> ProfilingATNSimulator::getDecisionInfo() const {
- return _decisions;
-}
-
-DFAState* ProfilingATNSimulator::getCurrentState() const {
- return _currentState;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.h b/contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.h
deleted file mode 100644
index 551efb8556..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/ProfilingATNSimulator.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ParserATNSimulator.h"
-#include "atn/DecisionInfo.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC ProfilingATNSimulator : public ParserATNSimulator {
- public:
- explicit ProfilingATNSimulator(Parser *parser);
-
- virtual size_t adaptivePredict(TokenStream *input, size_t decision, ParserRuleContext *outerContext) override;
-
- virtual std::vector<DecisionInfo> getDecisionInfo() const;
- virtual dfa::DFAState* getCurrentState() const;
-
- protected:
- std::vector<DecisionInfo> _decisions;
-
- int _sllStopIndex = 0;
- int _llStopIndex = 0;
-
- size_t _currentDecision = 0;
- dfa::DFAState *_currentState;
-
- /// <summary>
- /// At the point of LL failover, we record how SLL would resolve the conflict so that
- /// we can determine whether or not a decision / input pair is context-sensitive.
- /// If LL gives a different result than SLL's predicted alternative, we have a
- /// context sensitivity for sure. The converse is not necessarily true, however.
- /// It's possible that after conflict resolution chooses minimum alternatives,
- /// SLL could get the same answer as LL. Regardless of whether or not the result indicates
- /// an ambiguity, it is not treated as a context sensitivity because LL prediction
- /// was not required in order to produce a correct prediction for this decision and input sequence.
- /// It may in fact still be a context sensitivity but we don't know by looking at the
- /// minimum alternatives for the current input.
- /// </summary>
- size_t conflictingAltResolvedBySLL = 0;
-
- virtual dfa::DFAState* getExistingTargetState(dfa::DFAState *previousD, size_t t) override;
- virtual dfa::DFAState* computeTargetState(dfa::DFA &dfa, dfa::DFAState *previousD, size_t t) override;
- virtual std::unique_ptr<ATNConfigSet> computeReachSet(ATNConfigSet *closure, size_t t, bool fullCtx) override;
- virtual bool evalSemanticContext(Ref<const SemanticContext> const& pred, ParserRuleContext *parserCallStack,
- size_t alt, bool fullCtx) override;
- virtual void reportAttemptingFullContext(dfa::DFA &dfa, const antlrcpp::BitSet &conflictingAlts, ATNConfigSet *configs,
- size_t startIndex, size_t stopIndex) override;
- virtual void reportContextSensitivity(dfa::DFA &dfa, size_t prediction, ATNConfigSet *configs,
- size_t startIndex, size_t stopIndex) override;
- virtual void reportAmbiguity(dfa::DFA &dfa, dfa::DFAState *D, size_t startIndex, size_t stopIndex, bool exact,
- const antlrcpp::BitSet &ambigAlts, ATNConfigSet *configs) override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.cpp
deleted file mode 100644
index 342e550de9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.cpp
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/IntervalSet.h"
-
-#include "atn/RangeTransition.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-RangeTransition::RangeTransition(ATNState *target, size_t from, size_t to) : Transition(TransitionType::RANGE, target), from(from), to(to) {
-}
-
-misc::IntervalSet RangeTransition::label() const {
- return misc::IntervalSet::of((int)from, (int)to);
-}
-
-bool RangeTransition::matches(size_t symbol, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return symbol >= from && symbol <= to;
-}
-
-std::string RangeTransition::toString() const {
- return "RANGE " + Transition::toString() + " { from: " + std::to_string(from) + ", to: " + std::to_string(to) + " }";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.h
deleted file mode 100644
index b75c60e247..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/RangeTransition.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC RangeTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::RANGE; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- const size_t from;
- const size_t to;
-
- RangeTransition(ATNState *target, size_t from, size_t to);
-
- virtual misc::IntervalSet label() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleStartState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/RuleStartState.h
deleted file mode 100644
index 549491514b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleStartState.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC RuleStartState final : public ATNState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::RULE_START; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- RuleStopState *stopState = nullptr;
- bool isLeftRecursiveRule = false;
-
- RuleStartState() : ATNState(ATNStateType::RULE_START) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleStopState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/RuleStopState.h
deleted file mode 100644
index 7792a1265c..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleStopState.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// The last node in the ATN for a rule, unless that rule is the start symbol.
- /// In that case, there is one transition to EOF. Later, we might encode
- /// references to all calls to this rule to compute FOLLOW sets for
- /// error handling.
- class ANTLR4CPP_PUBLIC RuleStopState final : public ATNState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::RULE_STOP; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- RuleStopState() : ATNState(ATNStateType::RULE_STOP) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.cpp
deleted file mode 100644
index ba50dd03dd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/RuleStartState.h"
-#include "atn/RuleTransition.h"
-
-using namespace antlr4::atn;
-
-RuleTransition::RuleTransition(RuleStartState *ruleStart, size_t ruleIndex, ATNState *followState)
- : RuleTransition(ruleStart, ruleIndex, 0, followState) {
-}
-
-RuleTransition::RuleTransition(RuleStartState *ruleStart, size_t ruleIndex, int precedence, ATNState *followState)
- : Transition(TransitionType::RULE, ruleStart), ruleIndex(ruleIndex), precedence(precedence) {
- this->followState = followState;
-}
-
-bool RuleTransition::isEpsilon() const {
- return true;
-}
-
-bool RuleTransition::matches(size_t /*symbol*/, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return false;
-}
-
-std::string RuleTransition::toString() const {
- std::stringstream ss;
- ss << "RULE " << Transition::toString() << " { ruleIndex: " << ruleIndex << ", precedence: " << precedence <<
- ", followState: " << std::hex << followState << " }";
- return ss.str();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.h
deleted file mode 100644
index 396ef700f2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/RuleTransition.h
+++ /dev/null
@@ -1,42 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC RuleTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::RULE; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- /// Ptr to the rule definition object for this rule ref.
- const size_t ruleIndex; // no Rule object at runtime
-
- const int precedence;
-
- /// What node to begin computations following ref to rule.
- ATNState *followState;
-
- /// @deprecated Use
- /// <seealso cref="#RuleTransition(RuleStartState, size_t, int, ATNState)"/> instead.
- RuleTransition(RuleStartState *ruleStart, size_t ruleIndex, ATNState *followState);
-
- RuleTransition(RuleStartState *ruleStart, size_t ruleIndex, int precedence, ATNState *followState);
- RuleTransition(RuleTransition const&) = delete;
- RuleTransition& operator=(RuleTransition const&) = delete;
-
- virtual bool isEpsilon() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.cpp
deleted file mode 100644
index 7d7fe068df..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.cpp
+++ /dev/null
@@ -1,418 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include <functional>
-#include <unordered_set>
-
-#include "misc/MurmurHash.h"
-#include "support/Casts.h"
-#include "support/CPPUtils.h"
-#include "support/Arrays.h"
-
-#include "SemanticContext.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-namespace {
-
- struct SemanticContextHasher final {
- size_t operator()(const SemanticContext *semanticContext) const {
- return semanticContext->hashCode();
- }
- };
-
- struct SemanticContextComparer final {
- bool operator()(const SemanticContext *lhs, const SemanticContext *rhs) const {
- return *lhs == *rhs;
- }
- };
-
- template <typename Comparer>
- void insertSemanticContext(const Ref<const SemanticContext> &semanticContext,
- std::unordered_set<const SemanticContext*, SemanticContextHasher, SemanticContextComparer> &operandSet,
- std::vector<Ref<const SemanticContext>> &operandList,
- Ref<const SemanticContext::PrecedencePredicate> &precedencePredicate,
- Comparer comparer) {
- if (semanticContext != nullptr) {
- if (semanticContext->getContextType() == SemanticContextType::PRECEDENCE) {
- if (precedencePredicate == nullptr || comparer(downCast<const SemanticContext::PrecedencePredicate*>(semanticContext.get())->precedence, precedencePredicate->precedence)) {
- precedencePredicate = std::static_pointer_cast<const SemanticContext::PrecedencePredicate>(semanticContext);
- }
- } else {
- auto [existing, inserted] = operandSet.insert(semanticContext.get());
- if (inserted) {
- operandList.push_back(semanticContext);
- }
- }
- }
- }
-
- template <typename Comparer>
- void insertSemanticContext(Ref<const SemanticContext> &&semanticContext,
- std::unordered_set<const SemanticContext*, SemanticContextHasher, SemanticContextComparer> &operandSet,
- std::vector<Ref<const SemanticContext>> &operandList,
- Ref<const SemanticContext::PrecedencePredicate> &precedencePredicate,
- Comparer comparer) {
- if (semanticContext != nullptr) {
- if (semanticContext->getContextType() == SemanticContextType::PRECEDENCE) {
- if (precedencePredicate == nullptr || comparer(downCast<const SemanticContext::PrecedencePredicate*>(semanticContext.get())->precedence, precedencePredicate->precedence)) {
- precedencePredicate = std::static_pointer_cast<const SemanticContext::PrecedencePredicate>(std::move(semanticContext));
- }
- } else {
- auto [existing, inserted] = operandSet.insert(semanticContext.get());
- if (inserted) {
- operandList.push_back(std::move(semanticContext));
- }
- }
- }
- }
-
- size_t predictOperandCapacity(const Ref<const SemanticContext> &x) {
- switch (x->getContextType()) {
- case SemanticContextType::AND:
- return downCast<const SemanticContext::AND&>(*x).getOperands().size();
- case SemanticContextType::OR:
- return downCast<const SemanticContext::OR&>(*x).getOperands().size();
- default:
- return 1;
- }
- }
-
- size_t predictOperandCapacity(const Ref<const SemanticContext> &a, const Ref<const SemanticContext> &b) {
- return predictOperandCapacity(a) + predictOperandCapacity(b);
- }
-
-}
-
-//------------------ Predicate -----------------------------------------------------------------------------------------
-
-SemanticContext::Predicate::Predicate(size_t ruleIndex, size_t predIndex, bool isCtxDependent)
- : SemanticContext(SemanticContextType::PREDICATE), ruleIndex(ruleIndex), predIndex(predIndex), isCtxDependent(isCtxDependent) {}
-
-bool SemanticContext::Predicate::eval(Recognizer *parser, RuleContext *parserCallStack) const {
- RuleContext *localctx = nullptr;
- if (isCtxDependent) {
- localctx = parserCallStack;
- }
- return parser->sempred(localctx, ruleIndex, predIndex);
-}
-
-size_t SemanticContext::Predicate::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize();
- hashCode = misc::MurmurHash::update(hashCode, static_cast<size_t>(getContextType()));
- hashCode = misc::MurmurHash::update(hashCode, ruleIndex);
- hashCode = misc::MurmurHash::update(hashCode, predIndex);
- hashCode = misc::MurmurHash::update(hashCode, isCtxDependent ? 1 : 0);
- hashCode = misc::MurmurHash::finish(hashCode, 4);
- return hashCode;
-}
-
-bool SemanticContext::Predicate::equals(const SemanticContext &other) const {
- if (this == &other) {
- return true;
- }
- if (getContextType() != other.getContextType()) {
- return false;
- }
- const Predicate &p = downCast<const Predicate&>(other);
- return ruleIndex == p.ruleIndex && predIndex == p.predIndex && isCtxDependent == p.isCtxDependent;
-}
-
-std::string SemanticContext::Predicate::toString() const {
- return std::string("{") + std::to_string(ruleIndex) + std::string(":") + std::to_string(predIndex) + std::string("}?");
-}
-
-//------------------ PrecedencePredicate -------------------------------------------------------------------------------
-
-SemanticContext::PrecedencePredicate::PrecedencePredicate(int precedence) : SemanticContext(SemanticContextType::PRECEDENCE), precedence(precedence) {}
-
-bool SemanticContext::PrecedencePredicate::eval(Recognizer *parser, RuleContext *parserCallStack) const {
- return parser->precpred(parserCallStack, precedence);
-}
-
-Ref<const SemanticContext> SemanticContext::PrecedencePredicate::evalPrecedence(Recognizer *parser,
- RuleContext *parserCallStack) const {
- if (parser->precpred(parserCallStack, precedence)) {
- return SemanticContext::Empty::Instance;
- }
- return nullptr;
-}
-
-size_t SemanticContext::PrecedencePredicate::hashCode() const {
- size_t hashCode = misc::MurmurHash::initialize();
- hashCode = misc::MurmurHash::update(hashCode, static_cast<size_t>(getContextType()));
- hashCode = misc::MurmurHash::update(hashCode, static_cast<size_t>(precedence));
- return misc::MurmurHash::finish(hashCode, 2);
-}
-
-bool SemanticContext::PrecedencePredicate::equals(const SemanticContext &other) const {
- if (this == &other) {
- return true;
- }
- if (getContextType() != other.getContextType()) {
- return false;
- }
- const PrecedencePredicate &predicate = downCast<const PrecedencePredicate&>(other);
- return precedence == predicate.precedence;
-}
-
-std::string SemanticContext::PrecedencePredicate::toString() const {
- return "{" + std::to_string(precedence) + ">=prec}?";
-}
-
-//------------------ AND -----------------------------------------------------------------------------------------------
-
-SemanticContext::AND::AND(Ref<const SemanticContext> a, Ref<const SemanticContext> b) : Operator(SemanticContextType::AND) {
- std::unordered_set<const SemanticContext*, SemanticContextHasher, SemanticContextComparer> operands;
- Ref<const SemanticContext::PrecedencePredicate> precedencePredicate;
-
- _opnds.reserve(predictOperandCapacity(a, b) + 1);
-
- if (a->getContextType() == SemanticContextType::AND) {
- for (const auto &operand : downCast<const AND*>(a.get())->getOperands()) {
- insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::less<int>{});
- }
- } else {
- insertSemanticContext(std::move(a), operands, _opnds, precedencePredicate, std::less<int>{});
- }
-
- if (b->getContextType() == SemanticContextType::AND) {
- for (const auto &operand : downCast<const AND*>(b.get())->getOperands()) {
- insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::less<int>{});
- }
- } else {
- insertSemanticContext(std::move(b), operands, _opnds, precedencePredicate, std::less<int>{});
- }
-
- if (precedencePredicate != nullptr) {
- // interested in the transition with the lowest precedence
- auto [existing, inserted] = operands.insert(precedencePredicate.get());
- if (inserted) {
- _opnds.push_back(std::move(precedencePredicate));
- }
- }
-}
-
-const std::vector<Ref<const SemanticContext>>& SemanticContext::AND::getOperands() const {
- return _opnds;
-}
-
-bool SemanticContext::AND::equals(const SemanticContext &other) const {
- if (this == &other) {
- return true;
- }
- if (getContextType() != other.getContextType()) {
- return false;
- }
- const AND &context = downCast<const AND&>(other);
- return Arrays::equals(getOperands(), context.getOperands());
-}
-
-size_t SemanticContext::AND::hashCode() const {
- size_t hash = misc::MurmurHash::initialize();
- hash = misc::MurmurHash::update(hash, static_cast<size_t>(getContextType()));
- return misc::MurmurHash::hashCode(getOperands(), hash);
-}
-
-bool SemanticContext::AND::eval(Recognizer *parser, RuleContext *parserCallStack) const {
- for (const auto &opnd : getOperands()) {
- if (!opnd->eval(parser, parserCallStack)) {
- return false;
- }
- }
- return true;
-}
-
-Ref<const SemanticContext> SemanticContext::AND::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const {
- bool differs = false;
- std::vector<Ref<const SemanticContext>> operands;
- for (const auto &context : getOperands()) {
- auto evaluated = context->evalPrecedence(parser, parserCallStack);
- differs |= (evaluated != context);
- if (evaluated == nullptr) {
- // The AND context is false if any element is false.
- return nullptr;
- }
- if (evaluated != Empty::Instance) {
- // Reduce the result by skipping true elements.
- operands.push_back(std::move(evaluated));
- }
- }
-
- if (!differs) {
- return shared_from_this();
- }
-
- if (operands.empty()) {
- // All elements were true, so the AND context is true.
- return Empty::Instance;
- }
-
- Ref<const SemanticContext> result = std::move(operands[0]);
- for (size_t i = 1; i < operands.size(); ++i) {
- result = SemanticContext::And(std::move(result), std::move(operands[i]));
- }
-
- return result;
-}
-
-std::string SemanticContext::AND::toString() const {
- std::string tmp;
- for (const auto &var : getOperands()) {
- tmp += var->toString() + " && ";
- }
- return tmp;
-}
-
-//------------------ OR ------------------------------------------------------------------------------------------------
-
-SemanticContext::OR::OR(Ref<const SemanticContext> a, Ref<const SemanticContext> b) : Operator(SemanticContextType::OR) {
- std::unordered_set<const SemanticContext*, SemanticContextHasher, SemanticContextComparer> operands;
- Ref<const SemanticContext::PrecedencePredicate> precedencePredicate;
-
- _opnds.reserve(predictOperandCapacity(a, b) + 1);
-
- if (a->getContextType() == SemanticContextType::OR) {
- for (const auto &operand : downCast<const OR*>(a.get())->getOperands()) {
- insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::greater<int>{});
- }
- } else {
- insertSemanticContext(std::move(a), operands, _opnds, precedencePredicate, std::greater<int>{});
- }
-
- if (b->getContextType() == SemanticContextType::OR) {
- for (const auto &operand : downCast<const OR*>(b.get())->getOperands()) {
- insertSemanticContext(operand, operands, _opnds, precedencePredicate, std::greater<int>{});
- }
- } else {
- insertSemanticContext(std::move(b), operands, _opnds, precedencePredicate, std::greater<int>{});
- }
-
- if (precedencePredicate != nullptr) {
- // interested in the transition with the highest precedence
- auto [existing, inserted] = operands.insert(precedencePredicate.get());
- if (inserted) {
- _opnds.push_back(std::move(precedencePredicate));
- }
- }
-}
-
-const std::vector<Ref<const SemanticContext>>& SemanticContext::OR::getOperands() const {
- return _opnds;
-}
-
-bool SemanticContext::OR::equals(const SemanticContext &other) const {
- if (this == &other) {
- return true;
- }
- if (getContextType() != other.getContextType()) {
- return false;
- }
- const OR &context = downCast<const OR&>(other);
- return Arrays::equals(getOperands(), context.getOperands());
-}
-
-size_t SemanticContext::OR::hashCode() const {
- size_t hash = misc::MurmurHash::initialize();
- hash = misc::MurmurHash::update(hash, static_cast<size_t>(getContextType()));
- return misc::MurmurHash::hashCode(getOperands(), hash);
-}
-
-bool SemanticContext::OR::eval(Recognizer *parser, RuleContext *parserCallStack) const {
- for (const auto &opnd : getOperands()) {
- if (opnd->eval(parser, parserCallStack)) {
- return true;
- }
- }
- return false;
-}
-
-Ref<const SemanticContext> SemanticContext::OR::evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const {
- bool differs = false;
- std::vector<Ref<const SemanticContext>> operands;
- for (const auto &context : getOperands()) {
- auto evaluated = context->evalPrecedence(parser, parserCallStack);
- differs |= (evaluated != context);
- if (evaluated == Empty::Instance) {
- // The OR context is true if any element is true.
- return Empty::Instance;
- }
- if (evaluated != nullptr) {
- // Reduce the result by skipping false elements.
- operands.push_back(std::move(evaluated));
- }
- }
-
- if (!differs) {
- return shared_from_this();
- }
-
- if (operands.empty()) {
- // All elements were false, so the OR context is false.
- return nullptr;
- }
-
- Ref<const SemanticContext> result = std::move(operands[0]);
- for (size_t i = 1; i < operands.size(); ++i) {
- result = SemanticContext::Or(std::move(result), std::move(operands[i]));
- }
-
- return result;
-}
-
-std::string SemanticContext::OR::toString() const {
- std::string tmp;
- for(const auto &var : getOperands()) {
- tmp += var->toString() + " || ";
- }
- return tmp;
-}
-
-//------------------ SemanticContext -----------------------------------------------------------------------------------
-
-const Ref<const SemanticContext> SemanticContext::Empty::Instance = std::make_shared<Predicate>(INVALID_INDEX, INVALID_INDEX, false);
-
-Ref<const SemanticContext> SemanticContext::evalPrecedence(Recognizer * /*parser*/, RuleContext * /*parserCallStack*/) const {
- return shared_from_this();
-}
-
-Ref<const SemanticContext> SemanticContext::And(Ref<const SemanticContext> a, Ref<const SemanticContext> b) {
- if (!a || a == Empty::Instance) {
- return b;
- }
-
- if (!b || b == Empty::Instance) {
- return a;
- }
-
- Ref<AND> result = std::make_shared<AND>(std::move(a), std::move(b));
- if (result->getOperands().size() == 1) {
- return result->getOperands()[0];
- }
-
- return result;
-}
-
-Ref<const SemanticContext> SemanticContext::Or(Ref<const SemanticContext> a, Ref<const SemanticContext> b) {
- if (!a) {
- return b;
- }
- if (!b) {
- return a;
- }
-
- if (a == Empty::Instance || b == Empty::Instance) {
- return Empty::Instance;
- }
-
- Ref<OR> result = std::make_shared<OR>(std::move(a), std::move(b));
- if (result->getOperands().size() == 1) {
- return result->getOperands()[0];
- }
-
- return result;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.h b/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.h
deleted file mode 100644
index 8116fc0b56..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContext.h
+++ /dev/null
@@ -1,237 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Recognizer.h"
-#include "support/CPPUtils.h"
-#include "atn/SemanticContextType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// A tree structure used to record the semantic context in which
- /// an ATN configuration is valid. It's either a single predicate,
- /// a conjunction "p1 && p2", or a sum of products "p1||p2".
- ///
- /// I have scoped the AND, OR, and Predicate subclasses of
- /// SemanticContext within the scope of this outer class.
- class ANTLR4CPP_PUBLIC SemanticContext : public std::enable_shared_from_this<SemanticContext> {
- public:
- virtual ~SemanticContext() = default;
-
- SemanticContextType getContextType() const { return _contextType; }
-
- /// <summary>
- /// For context independent predicates, we evaluate them without a local
- /// context (i.e., null context). That way, we can evaluate them without
- /// having to create proper rule-specific context during prediction (as
- /// opposed to the parser, which creates them naturally). In a practical
- /// sense, this avoids a cast exception from RuleContext to myruleContext.
- /// <p/>
- /// For context dependent predicates, we must pass in a local context so that
- /// references such as $arg evaluate properly as _localctx.arg. We only
- /// capture context dependent predicates in the context in which we begin
- /// prediction, so we passed in the outer context here in case of context
- /// dependent predicate evaluation.
- /// </summary>
- virtual bool eval(Recognizer *parser, RuleContext *parserCallStack) const = 0;
-
- /**
- * Evaluate the precedence predicates for the context and reduce the result.
- *
- * @param parser The parser instance.
- * @param parserCallStack
- * @return The simplified semantic context after precedence predicates are
- * evaluated, which will be one of the following values.
- * <ul>
- * <li>{@link #NONE}: if the predicate simplifies to {@code true} after
- * precedence predicates are evaluated.</li>
- * <li>{@code null}: if the predicate simplifies to {@code false} after
- * precedence predicates are evaluated.</li>
- * <li>{@code this}: if the semantic context is not changed as a result of
- * precedence predicate evaluation.</li>
- * <li>A non-{@code null} {@link SemanticContext}: the new simplified
- * semantic context after precedence predicates are evaluated.</li>
- * </ul>
- */
- virtual Ref<const SemanticContext> evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const;
-
- virtual size_t hashCode() const = 0;
-
- virtual bool equals(const SemanticContext &other) const = 0;
-
- virtual std::string toString() const = 0;
-
- static Ref<const SemanticContext> And(Ref<const SemanticContext> a, Ref<const SemanticContext> b);
-
- /// See also: ParserATNSimulator::getPredsForAmbigAlts.
- static Ref<const SemanticContext> Or(Ref<const SemanticContext> a, Ref<const SemanticContext> b);
-
- class Empty;
- class Predicate;
- class PrecedencePredicate;
- class Operator;
- class AND;
- class OR;
-
- protected:
- explicit SemanticContext(SemanticContextType contextType) : _contextType(contextType) {}
-
- private:
- const SemanticContextType _contextType;
- };
-
- inline bool operator==(const SemanticContext &lhs, const SemanticContext &rhs) {
- return lhs.equals(rhs);
- }
-
- inline bool operator!=(const SemanticContext &lhs, const SemanticContext &rhs) {
- return !operator==(lhs, rhs);
- }
-
- class ANTLR4CPP_PUBLIC SemanticContext::Empty : public SemanticContext{
- public:
- /**
- * The default {@link SemanticContext}, which is semantically equivalent to
- * a predicate of the form {@code {true}?}.
- */
- static const Ref<const SemanticContext> Instance;
- };
-
- class ANTLR4CPP_PUBLIC SemanticContext::Predicate final : public SemanticContext {
- public:
- static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::PREDICATE; }
-
- static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); }
-
- const size_t ruleIndex;
- const size_t predIndex;
- const bool isCtxDependent; // e.g., $i ref in pred
-
- Predicate(size_t ruleIndex, size_t predIndex, bool isCtxDependent);
-
- bool eval(Recognizer *parser, RuleContext *parserCallStack) const override;
- size_t hashCode() const override;
- bool equals(const SemanticContext &other) const override;
- std::string toString() const override;
- };
-
- class ANTLR4CPP_PUBLIC SemanticContext::PrecedencePredicate final : public SemanticContext {
- public:
- static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::PRECEDENCE; }
-
- static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); }
-
- const int precedence;
-
- explicit PrecedencePredicate(int precedence);
-
- bool eval(Recognizer *parser, RuleContext *parserCallStack) const override;
- Ref<const SemanticContext> evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const override;
- size_t hashCode() const override;
- bool equals(const SemanticContext &other) const override;
- std::string toString() const override;
- };
-
- /**
- * This is the base class for semantic context "operators", which operate on
- * a collection of semantic context "operands".
- *
- * @since 4.3
- */
- class ANTLR4CPP_PUBLIC SemanticContext::Operator : public SemanticContext {
- public:
- static bool is(const SemanticContext &semanticContext) {
- const auto contextType = semanticContext.getContextType();
- return contextType == SemanticContextType::AND || contextType == SemanticContextType::OR;
- }
-
- static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); }
-
- /**
- * Gets the operands for the semantic context operator.
- *
- * @return a collection of {@link SemanticContext} operands for the
- * operator.
- *
- * @since 4.3
- */
-
- virtual const std::vector<Ref<const SemanticContext>>& getOperands() const = 0;
-
- protected:
- using SemanticContext::SemanticContext;
- };
-
- /**
- * A semantic context which is true whenever none of the contained contexts
- * is false.
- */
- class ANTLR4CPP_PUBLIC SemanticContext::AND final : public SemanticContext::Operator {
- public:
- static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::AND; }
-
- static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); }
-
- AND(Ref<const SemanticContext> a, Ref<const SemanticContext> b) ;
-
- const std::vector<Ref<const SemanticContext>>& getOperands() const override;
-
- /**
- * The evaluation of predicates by this context is short-circuiting, but
- * unordered.</p>
- */
- bool eval(Recognizer *parser, RuleContext *parserCallStack) const override;
- Ref<const SemanticContext> evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const override;
- size_t hashCode() const override;
- bool equals(const SemanticContext &other) const override;
- std::string toString() const override;
-
- private:
- std::vector<Ref<const SemanticContext>> _opnds;
- };
-
- /**
- * A semantic context which is true whenever at least one of the contained
- * contexts is true.
- */
- class ANTLR4CPP_PUBLIC SemanticContext::OR final : public SemanticContext::Operator {
- public:
- static bool is(const SemanticContext &semanticContext) { return semanticContext.getContextType() == SemanticContextType::OR; }
-
- static bool is(const SemanticContext *semanticContext) { return semanticContext != nullptr && is(*semanticContext); }
-
- OR(Ref<const SemanticContext> a, Ref<const SemanticContext> b);
-
- const std::vector<Ref<const SemanticContext>>& getOperands() const override;
-
- /**
- * The evaluation of predicates by this context is short-circuiting, but
- * unordered.
- */
- bool eval(Recognizer *parser, RuleContext *parserCallStack) const override;
- Ref<const SemanticContext> evalPrecedence(Recognizer *parser, RuleContext *parserCallStack) const override;
- size_t hashCode() const override;
- bool equals(const SemanticContext &other) const override;
- std::string toString() const override;
-
- private:
- std::vector<Ref<const SemanticContext>> _opnds;
- };
-
-} // namespace atn
-} // namespace antlr4
-
-namespace std {
-
- template <>
- struct hash<::antlr4::atn::SemanticContext> {
- size_t operator()(const ::antlr4::atn::SemanticContext &semanticContext) const {
- return semanticContext.hashCode();
- }
- };
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContextType.h b/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContextType.h
deleted file mode 100644
index bca6e421d2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SemanticContextType.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstddef>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- enum class SemanticContextType : size_t {
- PREDICATE = 1,
- PRECEDENCE = 2,
- AND = 3,
- OR = 4,
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SerializedATNView.h b/contrib/libs/antlr4_cpp_runtime/src/atn/SerializedATNView.h
deleted file mode 100644
index a723589bc3..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SerializedATNView.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-#include <iterator>
-#include <vector>
-
-#include "antlr4-common.h"
-#include "misc/MurmurHash.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC SerializedATNView final {
- public:
- using value_type = int32_t;
- using size_type = size_t;
- using difference_type = ptrdiff_t;
- using reference = int32_t&;
- using const_reference = const int32_t&;
- using pointer = int32_t*;
- using const_pointer = const int32_t*;
- using iterator = const_pointer;
- using const_iterator = const_pointer;
- using reverse_iterator = std::reverse_iterator<iterator>;
- using const_reverse_iterator = std::reverse_iterator<const_iterator>;
-
- SerializedATNView() = default;
-
- SerializedATNView(const_pointer data, size_type size) : _data(data), _size(size) {}
-
- SerializedATNView(const std::vector<int32_t> &serializedATN) : _data(serializedATN.data()), _size(serializedATN.size()) {}
-
- SerializedATNView(const SerializedATNView&) = default;
-
- SerializedATNView& operator=(const SerializedATNView&) = default;
-
- const_iterator begin() const { return data(); }
-
- const_iterator cbegin() const { return data(); }
-
- const_iterator end() const { return data() + size(); }
-
- const_iterator cend() const { return data() + size(); }
-
- const_reverse_iterator rbegin() const { return const_reverse_iterator(end()); }
-
- const_reverse_iterator crbegin() const { return const_reverse_iterator(cend()); }
-
- const_reverse_iterator rend() const { return const_reverse_iterator(begin()); }
-
- const_reverse_iterator crend() const { return const_reverse_iterator(cbegin()); }
-
- bool empty() const { return size() == 0; }
-
- const_pointer data() const { return _data; }
-
- size_type size() const { return _size; }
-
- size_type size_bytes() const { return size() * sizeof(value_type); }
-
- const_reference operator[](size_type index) const { return _data[index]; }
-
- private:
- const_pointer _data = nullptr;
- size_type _size = 0;
- };
-
- inline bool operator==(const SerializedATNView &lhs, const SerializedATNView &rhs) {
- return (lhs.data() == rhs.data() && lhs.size() == rhs.size()) ||
- (lhs.size() == rhs.size() && std::memcmp(lhs.data(), rhs.data(), lhs.size_bytes()) == 0);
- }
-
- inline bool operator!=(const SerializedATNView &lhs, const SerializedATNView &rhs) {
- return !operator==(lhs, rhs);
- }
-
- inline bool operator<(const SerializedATNView &lhs, const SerializedATNView &rhs) {
- int diff = std::memcmp(lhs.data(), rhs.data(), std::min(lhs.size_bytes(), rhs.size_bytes()));
- return diff < 0 || (diff == 0 && lhs.size() < rhs.size());
- }
-
-} // namespace atn
-} // namespace antlr4
-
-namespace std {
-
- template <>
- struct hash<::antlr4::atn::SerializedATNView> {
- size_t operator()(const ::antlr4::atn::SerializedATNView &serializedATNView) const {
- return ::antlr4::misc::MurmurHash::hashCode(serializedATNView.data(), serializedATNView.size());
- }
- };
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.cpp
deleted file mode 100644
index 95ec514edb..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Token.h"
-#include "misc/IntervalSet.h"
-
-#include "atn/SetTransition.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-
-SetTransition::SetTransition(TransitionType transitionType, ATNState *target, misc::IntervalSet aSet)
- : Transition(transitionType, target), set(aSet.isEmpty() ? misc::IntervalSet::of(Token::INVALID_TYPE) : std::move(aSet)) {
-}
-
-misc::IntervalSet SetTransition::label() const {
- return set;
-}
-
-bool SetTransition::matches(size_t symbol, size_t /*minVocabSymbol*/, size_t /*maxVocabSymbol*/) const {
- return set.contains(symbol);
-}
-
-std::string SetTransition::toString() const {
- return "SET " + Transition::toString() + " { set: " + set.toString() + "}";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.h
deleted file mode 100644
index 3a3343ec25..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SetTransition.h
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// A transition containing a set of values. </summary>
- class ANTLR4CPP_PUBLIC SetTransition : public Transition {
- public:
- static bool is(const Transition &transition) {
- const auto transitionType = transition.getTransitionType();
- return transitionType == TransitionType::SET || transitionType == TransitionType::NOT_SET;
- }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- const misc::IntervalSet set;
-
- SetTransition(ATNState *target, misc::IntervalSet set) : SetTransition(TransitionType::SET, target, std::move(set)) {}
-
- virtual misc::IntervalSet label() const override;
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
-
- protected:
- SetTransition(TransitionType transitionType, ATNState *target, misc::IntervalSet set);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.cpp
deleted file mode 100644
index 66a91936e9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/SingletonPredictionContext.h"
-
-#include "support/Casts.h"
-#include "misc/MurmurHash.h"
-
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-namespace {
-
- bool cachedHashCodeEqual(size_t lhs, size_t rhs) {
- return lhs == rhs || lhs == 0 || rhs == 0;
- }
-
-}
-
-SingletonPredictionContext::SingletonPredictionContext(Ref<const PredictionContext> parent, size_t returnState)
- : PredictionContext(PredictionContextType::SINGLETON), parent(std::move(parent)), returnState(returnState) {
- assert(returnState != ATNState::INVALID_STATE_NUMBER);
-}
-
-Ref<const SingletonPredictionContext> SingletonPredictionContext::create(Ref<const PredictionContext> parent, size_t returnState) {
- if (returnState == EMPTY_RETURN_STATE && parent == nullptr) {
- // someone can pass in the bits of an array ctx that mean $
- return std::dynamic_pointer_cast<const SingletonPredictionContext>(EMPTY);
- }
- return std::make_shared<SingletonPredictionContext>(std::move(parent), returnState);
-}
-
-bool SingletonPredictionContext::isEmpty() const {
- return parent == nullptr && returnState == EMPTY_RETURN_STATE;
-}
-
-size_t SingletonPredictionContext::size() const {
- return 1;
-}
-
-const Ref<const PredictionContext>& SingletonPredictionContext::getParent(size_t index) const {
- assert(index == 0);
- static_cast<void>(index);
- return parent;
-}
-
-size_t SingletonPredictionContext::getReturnState(size_t index) const {
- assert(index == 0);
- static_cast<void>(index);
- return returnState;
-}
-
-size_t SingletonPredictionContext::hashCodeImpl() const {
- size_t hash = misc::MurmurHash::initialize();
- hash = misc::MurmurHash::update(hash, static_cast<size_t>(getContextType()));
- hash = misc::MurmurHash::update(hash, parent);
- hash = misc::MurmurHash::update(hash, returnState);
- return misc::MurmurHash::finish(hash, 3);
-}
-
-bool SingletonPredictionContext::equals(const PredictionContext &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- if (getContextType() != other.getContextType()) {
- return false;
- }
- const auto &singleton = downCast<const SingletonPredictionContext&>(other);
- return returnState == singleton.returnState &&
- cachedHashCodeEqual(cachedHashCode(), singleton.cachedHashCode()) &&
- (parent == singleton.parent || (parent != nullptr && singleton.parent != nullptr && *parent == *singleton.parent));
-}
-
-std::string SingletonPredictionContext::toString() const {
- //std::string up = !parent.expired() ? parent.lock()->toString() : "";
- std::string up = parent != nullptr ? parent->toString() : "";
- if (up.length() == 0) {
- if (returnState == EMPTY_RETURN_STATE) {
- return "$";
- }
- return std::to_string(returnState);
- }
- return std::to_string(returnState) + " " + up;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.h b/contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.h
deleted file mode 100644
index 1784c4f045..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/SingletonPredictionContext.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/PredictionContext.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC SingletonPredictionContext final : public PredictionContext {
- public:
- static bool is(const PredictionContext &predictionContext) { return predictionContext.getContextType() == PredictionContextType::SINGLETON; }
-
- static bool is(const PredictionContext *predictionContext) { return predictionContext != nullptr && is(*predictionContext); }
-
- static Ref<const SingletonPredictionContext> create(Ref<const PredictionContext> parent, size_t returnState);
-
- // Usually a parent is linked via a weak ptr. Not so here as we have kinda reverse reference chain.
- // There are no child contexts stored here and often the parent context is left dangling when it's
- // owning ATNState is released. In order to avoid having this context released as well (leaving all other contexts
- // which got this one as parent with a null reference) we use a shared_ptr here instead, to keep those left alone
- // parent contexts alive.
- const Ref<const PredictionContext> parent;
- const size_t returnState;
-
- SingletonPredictionContext(Ref<const PredictionContext> parent, size_t returnState);
-
- bool isEmpty() const override;
- size_t size() const override;
- const Ref<const PredictionContext>& getParent(size_t index) const override;
- size_t getReturnState(size_t index) const override;
- bool equals(const PredictionContext &other) const override;
- std::string toString() const override;
-
- protected:
- size_t hashCodeImpl() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/StarBlockStartState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/StarBlockStartState.h
deleted file mode 100644
index 17fd43fde8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/StarBlockStartState.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/BlockStartState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// The block that begins a closure loop.
- class ANTLR4CPP_PUBLIC StarBlockStartState final : public BlockStartState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::STAR_BLOCK_START; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- StarBlockStartState() : BlockStartState(ATNStateType::STAR_BLOCK_START) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopEntryState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopEntryState.h
deleted file mode 100644
index a62eb812b1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopEntryState.h
+++ /dev/null
@@ -1,37 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC StarLoopEntryState final : public DecisionState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::STAR_LOOP_ENTRY; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- /**
- * Indicates whether this state can benefit from a precedence DFA during SLL
- * decision making.
- *
- * <p>This is a computed property that is calculated during ATN deserialization
- * and stored for use in {@link ParserATNSimulator} and
- * {@link ParserInterpreter}.</p>
- *
- * @see DFA#isPrecedenceDfa()
- */
- bool isPrecedenceDecision = false;
-
- StarLoopbackState *loopBackState = nullptr;
-
- StarLoopEntryState() : DecisionState(ATNStateType::STAR_LOOP_ENTRY) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.cpp
deleted file mode 100644
index 6dddbc0d4e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/StarLoopEntryState.h"
-#include "atn/Transition.h"
-#include "support/Casts.h"
-
-#include "atn/StarLoopbackState.h"
-
-using namespace antlr4::atn;
-
-StarLoopEntryState *StarLoopbackState::getLoopEntryState() const {
- if (transitions[0]->target != nullptr && transitions[0]->target->getStateType() == ATNStateType::STAR_LOOP_ENTRY) {
- return antlrcpp::downCast<StarLoopEntryState*>(transitions[0]->target);
- }
- return nullptr;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.h
deleted file mode 100644
index 04ef9db095..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/StarLoopbackState.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/ATNState.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC StarLoopbackState final : public ATNState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::STAR_LOOP_BACK; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- StarLoopbackState() : ATNState(ATNStateType::STAR_LOOP_BACK) {}
-
- StarLoopEntryState *getLoopEntryState() const;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/TokensStartState.h b/contrib/libs/antlr4_cpp_runtime/src/atn/TokensStartState.h
deleted file mode 100644
index 8e41636283..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/TokensStartState.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/DecisionState.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// The Tokens rule start state linking to each lexer rule start state.
- class ANTLR4CPP_PUBLIC TokensStartState final : public DecisionState {
- public:
- static bool is(const ATNState &atnState) { return atnState.getStateType() == ATNStateType::TOKEN_START; }
-
- static bool is(const ATNState *atnState) { return atnState != nullptr && is(*atnState); }
-
- TokensStartState() : DecisionState(ATNStateType::TOKEN_START) {}
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/Transition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/Transition.cpp
deleted file mode 100644
index b918cddfcf..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/Transition.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-#include "support/Arrays.h"
-
-#include "atn/Transition.h"
-
-using namespace antlr4;
-using namespace antlr4::atn;
-using namespace antlrcpp;
-
-Transition::Transition(TransitionType transitionType, ATNState *target) : _transitionType(transitionType) {
- if (target == nullptr) {
- throw NullPointerException("target cannot be null.");
- }
-
- this->target = target;
-}
-
-bool Transition::isEpsilon() const {
- return false;
-}
-
-misc::IntervalSet Transition::label() const {
- return misc::IntervalSet::EMPTY_SET;
-}
-
-std::string Transition::toString() const {
- std::stringstream ss;
- ss << "(Transition " << std::hex << this << ", target: " << std::hex << target << ')';
-
- return ss.str();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/Transition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/Transition.h
deleted file mode 100644
index 4c88d698ae..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/Transition.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "misc/IntervalSet.h"
-#include "atn/TransitionType.h"
-
-namespace antlr4 {
-namespace atn {
-
- /// <summary>
- /// An ATN transition between any two ATN states. Subclasses define
- /// atom, set, epsilon, action, predicate, rule transitions.
- /// <p/>
- /// This is a one way link. It emanates from a state (usually via a list of
- /// transitions) and has a target state.
- /// <p/>
- /// Since we never have to change the ATN transitions once we construct it,
- /// we can fix these transitions as specific classes. The DFA transitions
- /// on the other hand need to update the labels as it adds transitions to
- /// the states. We'll use the term Edge for the DFA to distinguish them from
- /// ATN transitions.
- /// </summary>
- class ANTLR4CPP_PUBLIC Transition {
- public:
- /// The target of this transition.
- // ml: this is a reference into the ATN.
- ATNState *target;
-
- virtual ~Transition() = default;
-
- TransitionType getTransitionType() const { return _transitionType; }
-
- /**
- * Determines if the transition is an "epsilon" transition.
- *
- * <p>The default implementation returns {@code false}.</p>
- *
- * @return {@code true} if traversing this transition in the ATN does not
- * consume an input symbol; otherwise, {@code false} if traversing this
- * transition consumes (matches) an input symbol.
- */
- virtual bool isEpsilon() const;
- virtual misc::IntervalSet label() const;
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const = 0;
-
- virtual std::string toString() const;
-
- Transition(Transition const&) = delete;
- Transition& operator=(Transition const&) = delete;
-
- protected:
- Transition(TransitionType transitionType, ATNState *target);
-
- private:
- const TransitionType _transitionType;
- };
-
- using ConstTransitionPtr = std::unique_ptr<const Transition>;
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.cpp
deleted file mode 100644
index 78769b2ada..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-#include "atn/TransitionType.h"
-
-std::string antlr4::atn::transitionTypeName(TransitionType transitionType) {
- switch (transitionType) {
- case TransitionType::EPSILON:
- return "EPSILON";
- case TransitionType::RANGE:
- return "RANGE";
- case TransitionType::RULE:
- return "RULE";
- case TransitionType::PREDICATE:
- return "PREDICATE";
- case TransitionType::ATOM:
- return "ATOM";
- case TransitionType::ACTION:
- return "ACTION";
- case TransitionType::SET:
- return "SET";
- case TransitionType::NOT_SET:
- return "NOT_SET";
- case TransitionType::WILDCARD:
- return "WILDCARD";
- case TransitionType::PRECEDENCE:
- return "PRECEDENCE";
- }
- return "UNKNOWN";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.h b/contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.h
deleted file mode 100644
index d5d5f3bd97..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/TransitionType.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstddef>
-#include <string>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace atn {
-
- // Constants for transition serialization.
- enum class TransitionType : size_t {
- EPSILON = 1,
- RANGE = 2,
- RULE = 3,
- PREDICATE = 4, // e.g., {isType(input.LT(1))}?
- ATOM = 5,
- ACTION = 6,
- SET = 7, // ~(A|B) or ~atom, wildcard, which convert to next 2
- NOT_SET = 8,
- WILDCARD = 9,
- PRECEDENCE = 10,
- };
-
- ANTLR4CPP_PUBLIC std::string transitionTypeName(TransitionType transitionType);
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.cpp b/contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.cpp
deleted file mode 100644
index 03ec00d399..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNState.h"
-
-#include "atn/WildcardTransition.h"
-
-using namespace antlr4::atn;
-
-WildcardTransition::WildcardTransition(ATNState *target) : Transition(TransitionType::WILDCARD, target) {
-}
-
-bool WildcardTransition::matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const {
- return symbol >= minVocabSymbol && symbol <= maxVocabSymbol;
-}
-
-std::string WildcardTransition::toString() const {
- return "WILDCARD " + Transition::toString() + " {}";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.h b/contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.h
deleted file mode 100644
index d8d663f1fd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/atn/WildcardTransition.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "atn/Transition.h"
-
-namespace antlr4 {
-namespace atn {
-
- class ANTLR4CPP_PUBLIC WildcardTransition final : public Transition {
- public:
- static bool is(const Transition &transition) { return transition.getTransitionType() == TransitionType::WILDCARD; }
-
- static bool is(const Transition *transition) { return transition != nullptr && is(*transition); }
-
- explicit WildcardTransition(ATNState *target);
-
- virtual bool matches(size_t symbol, size_t minVocabSymbol, size_t maxVocabSymbol) const override;
-
- virtual std::string toString() const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.cpp b/contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.cpp
deleted file mode 100644
index 4cc0ab7cc1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.cpp
+++ /dev/null
@@ -1,115 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "dfa/DFASerializer.h"
-#include "dfa/LexerDFASerializer.h"
-#include "support/CPPUtils.h"
-#include "atn/StarLoopEntryState.h"
-#include "atn/ATNConfigSet.h"
-#include "support/Casts.h"
-
-#include "dfa/DFA.h"
-
-using namespace antlr4;
-using namespace antlr4::dfa;
-using namespace antlrcpp;
-
-DFA::DFA(atn::DecisionState *atnStartState) : DFA(atnStartState, 0) {
-}
-
-DFA::DFA(atn::DecisionState *atnStartState, size_t decision)
- : atnStartState(atnStartState), s0(nullptr), decision(decision) {
-
- _precedenceDfa = false;
- if (atn::StarLoopEntryState::is(atnStartState)) {
- if (downCast<atn::StarLoopEntryState*>(atnStartState)->isPrecedenceDecision) {
- _precedenceDfa = true;
- s0 = new DFAState(std::unique_ptr<atn::ATNConfigSet>(new atn::ATNConfigSet()));
- s0->isAcceptState = false;
- s0->requiresFullContext = false;
- }
- }
-}
-
-DFA::DFA(DFA &&other) : atnStartState(other.atnStartState), s0(other.s0), decision(other.decision) {
- // Source states are implicitly cleared by the move.
- states = std::move(other.states);
-
- other.atnStartState = nullptr;
- other.decision = 0;
- other.s0 = nullptr;
- _precedenceDfa = other._precedenceDfa;
- other._precedenceDfa = false;
-}
-
-DFA::~DFA() {
- bool s0InList = (s0 == nullptr);
- for (auto *state : states) {
- if (state == s0)
- s0InList = true;
- delete state;
- }
-
- if (!s0InList) {
- delete s0;
- }
-}
-
-bool DFA::isPrecedenceDfa() const {
- return _precedenceDfa;
-}
-
-DFAState* DFA::getPrecedenceStartState(int precedence) const {
- assert(_precedenceDfa); // Only precedence DFAs may contain a precedence start state.
-
- auto iterator = s0->edges.find(precedence);
- if (iterator == s0->edges.end())
- return nullptr;
-
- return iterator->second;
-}
-
-void DFA::setPrecedenceStartState(int precedence, DFAState *startState) {
- if (!isPrecedenceDfa()) {
- throw IllegalStateException("Only precedence DFAs may contain a precedence start state.");
- }
-
- if (precedence < 0) {
- return;
- }
-
- s0->edges[precedence] = startState;
-}
-
-std::vector<DFAState *> DFA::getStates() const {
- std::vector<DFAState *> result;
- for (auto *state : states)
- result.push_back(state);
-
- std::sort(result.begin(), result.end(), [](DFAState *o1, DFAState *o2) -> bool {
- return o1->stateNumber < o2->stateNumber;
- });
-
- return result;
-}
-
-std::string DFA::toString(const Vocabulary &vocabulary) const {
- if (s0 == nullptr) {
- return "";
- }
-
- DFASerializer serializer(this, vocabulary);
- return serializer.toString();
-}
-
-std::string DFA::toLexerString() const {
- if (s0 == nullptr) {
- return "";
- }
- LexerDFASerializer serializer(this);
-
- return serializer.toString();
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.h b/contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.h
deleted file mode 100644
index 360eda8ba7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFA.h
+++ /dev/null
@@ -1,96 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "dfa/DFAState.h"
-
-namespace antlr4 {
-namespace dfa {
-
- class ANTLR4CPP_PUBLIC DFA final {
- private:
- struct DFAStateHasher final {
- size_t operator()(const DFAState *dfaState) const {
- return dfaState->hashCode();
- }
- };
-
- struct DFAStateComparer final {
- bool operator()(const DFAState *lhs, const DFAState *rhs) const {
- return lhs == rhs || *lhs == *rhs;
- }
- };
-
- public:
- /// A set of all DFA states. Use a map so we can get old state back.
- /// Set only allows you to see if it's there.
-
- /// From which ATN state did we create this DFA?
- atn::DecisionState *atnStartState;
- std::unordered_set<DFAState*, DFAStateHasher, DFAStateComparer> states; // States are owned by this class.
- DFAState *s0;
- size_t decision;
-
- explicit DFA(atn::DecisionState *atnStartState);
- DFA(atn::DecisionState *atnStartState, size_t decision);
- DFA(const DFA &other) = delete;
- DFA(DFA &&other);
- ~DFA();
-
- /**
- * Gets whether this DFA is a precedence DFA. Precedence DFAs use a special
- * start state {@link #s0} which is not stored in {@link #states}. The
- * {@link DFAState#edges} array for this start state contains outgoing edges
- * supplying individual start states corresponding to specific precedence
- * values.
- *
- * @return {@code true} if this is a precedence DFA; otherwise,
- * {@code false}.
- * @see Parser#getPrecedence()
- */
- bool isPrecedenceDfa() const;
-
- /**
- * Get the start state for a specific precedence value.
- *
- * @param precedence The current precedence.
- * @return The start state corresponding to the specified precedence, or
- * {@code null} if no start state exists for the specified precedence.
- *
- * @throws IllegalStateException if this is not a precedence DFA.
- * @see #isPrecedenceDfa()
- */
- DFAState* getPrecedenceStartState(int precedence) const;
-
- /**
- * Set the start state for a specific precedence value.
- *
- * @param precedence The current precedence.
- * @param startState The start state corresponding to the specified
- * precedence.
- *
- * @throws IllegalStateException if this is not a precedence DFA.
- * @see #isPrecedenceDfa()
- */
- void setPrecedenceStartState(int precedence, DFAState *startState);
-
- /// Return a list of all states in this DFA, ordered by state number.
- std::vector<DFAState *> getStates() const;
-
- std::string toString(const Vocabulary &vocabulary) const;
-
- std::string toLexerString() const;
-
- private:
- /**
- * {@code true} if this DFA is for a precedence decision; otherwise,
- * {@code false}. This is the backing field for {@link #isPrecedenceDfa}.
- */
- bool _precedenceDfa;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.cpp b/contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.cpp
deleted file mode 100644
index 64d01769de..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.cpp
+++ /dev/null
@@ -1,60 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "dfa/DFA.h"
-#include "Vocabulary.h"
-
-#include "dfa/DFASerializer.h"
-
-using namespace antlr4::dfa;
-
-DFASerializer::DFASerializer(const DFA *dfa, const Vocabulary &vocabulary) : _dfa(dfa), _vocabulary(vocabulary) {
-}
-
-std::string DFASerializer::toString() const {
- if (_dfa->s0 == nullptr) {
- return "";
- }
-
- std::stringstream ss;
- std::vector<DFAState *> states = _dfa->getStates();
- for (auto *s : states) {
- for (size_t i = 0; i < s->edges.size(); i++) {
- DFAState *t = s->edges[i];
- if (t != nullptr && t->stateNumber != INT32_MAX) {
- ss << getStateString(s);
- std::string label = getEdgeLabel(i);
- ss << "-" << label << "->" << getStateString(t) << "\n";
- }
- }
- }
-
- return ss.str();
-}
-
-std::string DFASerializer::getEdgeLabel(size_t i) const {
- return _vocabulary.getDisplayName(i); // ml: no longer needed -1 as we use a map for edges, without offset.
-}
-
-std::string DFASerializer::getStateString(DFAState *s) const {
- size_t n = s->stateNumber;
-
- const std::string baseStateStr = std::string(s->isAcceptState ? ":" : "") + "s" + std::to_string(n) +
- (s->requiresFullContext ? "^" : "");
-
- if (s->isAcceptState) {
- if (!s->predicates.empty()) {
- std::string buf;
- for (size_t i = 0; i < s->predicates.size(); i++) {
- buf.append(s->predicates[i].toString());
- }
- return baseStateStr + "=>" + buf;
- } else {
- return baseStateStr + "=>" + std::to_string(s->prediction);
- }
- } else {
- return baseStateStr;
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.h b/contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.h
deleted file mode 100644
index b541714078..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFASerializer.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Vocabulary.h"
-
-namespace antlr4 {
-namespace dfa {
-
- /// A DFA walker that knows how to dump them to serialized strings.
- class ANTLR4CPP_PUBLIC DFASerializer {
- public:
- DFASerializer(const DFA *dfa, const Vocabulary &vocabulary);
-
- virtual ~DFASerializer() = default;
-
- std::string toString() const;
-
- protected:
- virtual std::string getEdgeLabel(size_t i) const;
- std::string getStateString(DFAState *s) const;
-
- private:
- const DFA *_dfa;
- const Vocabulary &_vocabulary;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.cpp b/contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.cpp
deleted file mode 100644
index e591b204c7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATNConfigSet.h"
-#include "atn/SemanticContext.h"
-#include "atn/ATNConfig.h"
-#include "misc/MurmurHash.h"
-
-#include "dfa/DFAState.h"
-
-using namespace antlr4::dfa;
-using namespace antlr4::atn;
-
-std::string DFAState::PredPrediction::toString() const {
- return std::string("(") + pred->toString() + ", " + std::to_string(alt) + ")";
-}
-
-std::set<size_t> DFAState::getAltSet() const {
- std::set<size_t> alts;
- if (configs != nullptr) {
- for (size_t i = 0; i < configs->size(); i++) {
- alts.insert(configs->get(i)->alt);
- }
- }
- return alts;
-}
-
-size_t DFAState::hashCode() const {
- return configs != nullptr ? configs->hashCode() : 0;
-}
-
-bool DFAState::equals(const DFAState &other) const {
- if (this == std::addressof(other)) {
- return true;
- }
- return configs == other.configs ||
- (configs != nullptr && other.configs != nullptr && *configs == *other.configs);
-}
-
-std::string DFAState::toString() const {
- std::stringstream ss;
- ss << stateNumber;
- if (configs) {
- ss << ":" << configs->toString();
- }
- if (isAcceptState) {
- ss << " => ";
- if (!predicates.empty()) {
- for (size_t i = 0; i < predicates.size(); i++) {
- ss << predicates[i].toString();
- }
- } else {
- ss << prediction;
- }
- }
- return ss.str();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.h b/contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.h
deleted file mode 100644
index f555cc45cf..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/DFAState.h
+++ /dev/null
@@ -1,154 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#include "atn/ATNConfigSet.h"
-#include "FlatHashMap.h"
-
-namespace antlr4 {
-namespace dfa {
-
- /// <summary>
- /// A DFA state represents a set of possible ATN configurations.
- /// As Aho, Sethi, Ullman p. 117 says "The DFA uses its state
- /// to keep track of all possible states the ATN can be in after
- /// reading each input symbol. That is to say, after reading
- /// input a1a2..an, the DFA is in a state that represents the
- /// subset T of the states of the ATN that are reachable from the
- /// ATN's start state along some path labeled a1a2..an."
- /// In conventional NFA->DFA conversion, therefore, the subset T
- /// would be a bitset representing the set of states the
- /// ATN could be in. We need to track the alt predicted by each
- /// state as well, however. More importantly, we need to maintain
- /// a stack of states, tracking the closure operations as they
- /// jump from rule to rule, emulating rule invocations (method calls).
- /// I have to add a stack to simulate the proper lookahead sequences for
- /// the underlying LL grammar from which the ATN was derived.
- /// <p/>
- /// I use a set of ATNConfig objects not simple states. An ATNConfig
- /// is both a state (ala normal conversion) and a RuleContext describing
- /// the chain of rules (if any) followed to arrive at that state.
- /// <p/>
- /// A DFA state may have multiple references to a particular state,
- /// but with different ATN contexts (with same or different alts)
- /// meaning that state was reached via a different set of rule invocations.
- /// </summary>
- class ANTLR4CPP_PUBLIC DFAState final {
- public:
- struct ANTLR4CPP_PUBLIC PredPrediction final {
- public:
- Ref<const atn::SemanticContext> pred; // never null; at least SemanticContext.NONE
- int alt;
-
- PredPrediction() = delete;
-
- PredPrediction(const PredPrediction&) = default;
- PredPrediction(PredPrediction&&) = default;
-
- PredPrediction(Ref<const atn::SemanticContext> pred, int alt) : pred(std::move(pred)), alt(alt) {}
-
- PredPrediction& operator=(const PredPrediction&) = default;
- PredPrediction& operator=(PredPrediction&&) = default;
-
- std::string toString() const;
- };
-
- std::unique_ptr<atn::ATNConfigSet> configs;
-
- /// {@code edges[symbol]} points to target of symbol. Shift up by 1 so (-1)
- /// <seealso cref="Token#EOF"/> maps to {@code edges[0]}.
- // ml: this is a sparse list, so we use a map instead of a vector.
- // Watch out: we no longer have the -1 offset, as it isn't needed anymore.
- FlatHashMap<size_t, DFAState*> edges;
-
- /// if accept state, what ttype do we match or alt do we predict?
- /// This is set to <seealso cref="ATN#INVALID_ALT_NUMBER"/> when <seealso cref="#predicates"/>{@code !=null} or
- /// <seealso cref="#requiresFullContext"/>.
- size_t prediction = 0;
-
- Ref<const atn::LexerActionExecutor> lexerActionExecutor;
-
- /// <summary>
- /// During SLL parsing, this is a list of predicates associated with the
- /// ATN configurations of the DFA state. When we have predicates,
- /// <seealso cref="#requiresFullContext"/> is {@code false} since full context prediction evaluates predicates
- /// on-the-fly. If this is not null, then <seealso cref="#prediction"/> is
- /// <seealso cref="ATN#INVALID_ALT_NUMBER"/>.
- /// <p/>
- /// We only use these for non-<seealso cref="#requiresFullContext"/> but conflicting states. That
- /// means we know from the context (it's $ or we don't dip into outer
- /// context) that it's an ambiguity not a conflict.
- /// <p/>
- /// This list is computed by <seealso cref="ParserATNSimulator#predicateDFAState"/>.
- /// </summary>
- std::vector<PredPrediction> predicates;
-
- int stateNumber = -1;
-
- bool isAcceptState = false;
-
- /// <summary>
- /// Indicates that this state was created during SLL prediction that
- /// discovered a conflict between the configurations in the state. Future
- /// <seealso cref="ParserATNSimulator#execATN"/> invocations immediately jumped doing
- /// full context prediction if this field is true.
- /// </summary>
- bool requiresFullContext = false;
-
- /// Map a predicate to a predicted alternative.
- DFAState() = default;
-
- explicit DFAState(int stateNumber) : stateNumber(stateNumber) {}
-
- explicit DFAState(std::unique_ptr<atn::ATNConfigSet> configs) : configs(std::move(configs)) {}
-
- /// <summary>
- /// Get the set of all alts mentioned by all ATN configurations in this
- /// DFA state.
- /// </summary>
- std::set<size_t> getAltSet() const;
-
- size_t hashCode() const;
-
- /// Two DFAState instances are equal if their ATN configuration sets
- /// are the same. This method is used to see if a state already exists.
- ///
- /// Because the number of alternatives and number of ATN configurations are
- /// finite, there is a finite number of DFA states that can be processed.
- /// This is necessary to show that the algorithm terminates.
- ///
- /// Cannot test the DFA state numbers here because in
- /// ParserATNSimulator#addDFAState we need to know if any other state
- /// exists that has this exact set of ATN configurations. The
- /// stateNumber is irrelevant.
- bool equals(const DFAState &other) const;
-
- std::string toString() const;
- };
-
- inline bool operator==(const DFAState &lhs, const DFAState &rhs) {
- return lhs.equals(rhs);
- }
-
- inline bool operator!=(const DFAState &lhs, const DFAState &rhs) {
- return !operator==(lhs, rhs);
- }
-
-} // namespace dfa
-} // namespace antlr4
-
-namespace std {
-
- template <>
- struct hash<::antlr4::dfa::DFAState> {
- size_t operator()(const ::antlr4::dfa::DFAState &dfaState) const {
- return dfaState.hashCode();
- }
- };
-
-} // namespace std
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.cpp b/contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.cpp
deleted file mode 100644
index 20ed734743..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Vocabulary.h"
-
-#include "dfa/LexerDFASerializer.h"
-
-using namespace antlr4::dfa;
-
-LexerDFASerializer::LexerDFASerializer(const DFA *dfa) : DFASerializer(dfa, Vocabulary()) {
-}
-
-std::string LexerDFASerializer::getEdgeLabel(size_t i) const {
- return std::string("'") + static_cast<char>(i) + "'";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.h b/contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.h
deleted file mode 100644
index eed7f4f0c5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/dfa/LexerDFASerializer.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "dfa/DFASerializer.h"
-
-namespace antlr4 {
-namespace dfa {
-
- class ANTLR4CPP_PUBLIC LexerDFASerializer final : public DFASerializer {
- public:
- explicit LexerDFASerializer(const DFA *dfa);
-
- protected:
- std::string getEdgeLabel(size_t i) const override;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.cpp b/contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.cpp
deleted file mode 100644
index dd30ef971b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#include "internal/Synchronization.h"
-
-using namespace antlr4::internal;
-
-void Mutex::lock() {
-#if ANTLR4CPP_USING_ABSEIL
- _impl.Lock();
-#else
- _impl.lock();
-#endif
-}
-
-bool Mutex::try_lock() {
-#if ANTLR4CPP_USING_ABSEIL
- return _impl.TryLock();
-#else
- return _impl.try_lock();
-#endif
-}
-
-void Mutex::unlock() {
-#if ANTLR4CPP_USING_ABSEIL
- _impl.Unlock();
-#else
- _impl.unlock();
-#endif
-}
-
-void SharedMutex::lock() {
-#if ANTLR4CPP_USING_ABSEIL
- _impl.WriterLock();
-#else
- _impl.lock();
-#endif
-}
-
-bool SharedMutex::try_lock() {
-#if ANTLR4CPP_USING_ABSEIL
- return _impl.WriterTryLock();
-#else
- return _impl.try_lock();
-#endif
-}
-
-void SharedMutex::unlock() {
-#if ANTLR4CPP_USING_ABSEIL
- _impl.WriterUnlock();
-#else
- _impl.unlock();
-#endif
-}
-
-void SharedMutex::lock_shared() {
-#if ANTLR4CPP_USING_ABSEIL
- _impl.ReaderLock();
-#else
- _impl.lock_shared();
-#endif
-}
-
-bool SharedMutex::try_lock_shared() {
-#if ANTLR4CPP_USING_ABSEIL
- return _impl.ReaderTryLock();
-#else
- return _impl.try_lock_shared();
-#endif
-}
-
-void SharedMutex::unlock_shared() {
-#if ANTLR4CPP_USING_ABSEIL
- _impl.ReaderUnlock();
-#else
- _impl.unlock_shared();
-#endif
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.h b/contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.h
deleted file mode 100644
index 0f1ff9587d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/internal/Synchronization.h
+++ /dev/null
@@ -1,154 +0,0 @@
-// Copyright 2012-2022 The ANTLR Project
-//
-// Redistribution and use in source and binary forms, with or without modification, are permitted
-// provided that the following conditions are met:
-//
-// 1. Redistributions of source code must retain the above copyright notice, this list of conditions
-// and the following disclaimer.
-//
-// 2. Redistributions in binary form must reproduce the above copyright notice, this list of
-// conditions and the following disclaimer in the documentation and/or other materials provided
-// with the distribution.
-//
-// 3. Neither the name of the copyright holder nor the names of its contributors may be used to
-// endorse or promote products derived from this software without specific prior written
-// permission.
-//
-// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
-// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND
-// FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
-// CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-// DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-// WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
-// WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#include <mutex>
-#include <shared_mutex>
-#include <utility>
-
-#if ANTLR4CPP_USING_ABSEIL
-#error #include "absl/base/call_once.h"
-#error #include "absl/base/thread_annotations.h"
-#error #include "absl/synchronization/mutex.h"
-#define ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS ABSL_NO_THREAD_SAFETY_ANALYSIS
-#else
-#define ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS
-#endif
-
-// By default ANTLRv4 uses synchronization primitives provided by the C++ standard library. In most
-// deployments this is fine, however in some using custom synchronization primitives may be
-// preferred. This header allows that by optionally supporting some alternative implementations and
-// allowing for more easier patching of other alternatives.
-
-namespace antlr4::internal {
-
- // Must be compatible with C++ standard library Mutex requirement.
- class ANTLR4CPP_PUBLIC Mutex final {
- public:
- Mutex() = default;
-
- // No copying or moving, we are as strict as possible to support other implementations.
- Mutex(const Mutex&) = delete;
- Mutex(Mutex&&) = delete;
-
- // No copying or moving, we are as strict as possible to support other implementations.
- Mutex& operator=(const Mutex&) = delete;
- Mutex& operator=(Mutex&&) = delete;
-
- void lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- bool try_lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- void unlock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- private:
-#if ANTLR4CPP_USING_ABSEIL
- absl::Mutex _impl;
-#else
- std::mutex _impl;
-#endif
- };
-
- template <typename Mutex>
- using UniqueLock = std::unique_lock<Mutex>;
-
- // Must be compatible with C++ standard library SharedMutex requirement.
- class ANTLR4CPP_PUBLIC SharedMutex final {
- public:
- SharedMutex() = default;
-
- // No copying or moving, we are as strict as possible to support other implementations.
- SharedMutex(const SharedMutex&) = delete;
- SharedMutex(SharedMutex&&) = delete;
-
- // No copying or moving, we are as strict as possible to support other implementations.
- SharedMutex& operator=(const SharedMutex&) = delete;
- SharedMutex& operator=(SharedMutex&&) = delete;
-
- void lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- bool try_lock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- void unlock() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- void lock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- bool try_lock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- void unlock_shared() ANTLR4CPP_NO_THREAD_SAFTEY_ANALYSIS;
-
- private:
-#if ANTLR4CPP_USING_ABSEIL
- absl::Mutex _impl;
-#else
- std::shared_mutex _impl;
-#endif
- };
-
- template <typename Mutex>
- using SharedLock = std::shared_lock<Mutex>;
-
- class OnceFlag;
-
- template <typename Callable, typename... Args>
- void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args);
-
- // Must be compatible with std::once_flag.
- class ANTLR4CPP_PUBLIC OnceFlag final {
- public:
- constexpr OnceFlag() = default;
-
- // No copying or moving, we are as strict as possible to support other implementations.
- OnceFlag(const OnceFlag&) = delete;
- OnceFlag(OnceFlag&&) = delete;
-
- // No copying or moving, we are as strict as possible to support other implementations.
- OnceFlag& operator=(const OnceFlag&) = delete;
- OnceFlag& operator=(OnceFlag&&) = delete;
-
- private:
- template <typename Callable, typename... Args>
- friend void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args);
-
-#if ANTLR4CPP_USING_ABSEIL
- absl::once_flag _impl;
-#else
- std::once_flag _impl;
-#endif
- };
-
- template <typename Callable, typename... Args>
- void call_once(OnceFlag &onceFlag, Callable &&callable, Args&&... args) {
-#if ANTLR4CPP_USING_ABSEIL
- absl::call_once(onceFlag._impl, std::forward<Callable>(callable), std::forward<Args>(args)...);
-#else
- std::call_once(onceFlag._impl, std::forward<Callable>(callable), std::forward<Args>(args)...);
-#endif
- }
-
-} // namespace antlr4::internal
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.cpp b/contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.cpp
deleted file mode 100644
index 1a236eccfb..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "atn/ATN.h"
-#include "atn/ATNDeserializer.h"
-#include "Vocabulary.h"
-
-#include "misc/InterpreterDataReader.h"
-
-using namespace antlr4::dfa;
-using namespace antlr4::atn;
-using namespace antlr4::misc;
-
-InterpreterData::InterpreterData(std::vector<std::string> const& literalNames, std::vector<std::string> const& symbolicNames)
-: vocabulary(literalNames, symbolicNames) {
-}
-
-InterpreterData InterpreterDataReader::parseFile(std::string const& fileName) {
- // The structure of the data file is very simple. Everything is line based with empty lines
- // separating the different parts. For lexers the layout is:
- // token literal names:
- // ...
- //
- // token symbolic names:
- // ...
- //
- // rule names:
- // ...
- //
- // channel names:
- // ...
- //
- // mode names:
- // ...
- //
- // atn:
- // <a single line with comma separated int values> enclosed in a pair of squared brackets.
- //
- // Data for a parser does not contain channel and mode names.
-
- std::ifstream input(fileName);
- if (!input.good())
- return {};
-
- std::vector<std::string> literalNames;
- std::vector<std::string> symbolicNames;
-
- std::string line;
-
- std::getline(input, line, '\n');
- assert(line == "token literal names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty())
- break;
-
- literalNames.push_back(line == "null" ? "" : line);
- };
-
- std::getline(input, line, '\n');
- assert(line == "token symbolic names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty())
- break;
-
- symbolicNames.push_back(line == "null" ? "" : line);
- };
- InterpreterData result(literalNames, symbolicNames);
-
- std::getline(input, line, '\n');
- assert(line == "rule names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty())
- break;
-
- result.ruleNames.push_back(line);
- };
-
- std::getline(input, line, '\n');
- if (line == "channel names:") {
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty())
- break;
-
- result.channels.push_back(line);
- };
-
- std::getline(input, line, '\n');
- assert(line == "mode names:");
- while (true) {
- std::getline(input, line, '\n');
- if (line.empty())
- break;
-
- result.modes.push_back(line);
- };
- }
-
- std::vector<int32_t> serializedATN;
-
- std::getline(input, line, '\n');
- assert(line == "atn:");
- std::getline(input, line, '\n');
- std::stringstream tokenizer(line);
- std::string value;
- while (tokenizer.good()) {
- std::getline(tokenizer, value, ',');
- unsigned long number;
- if (value[0] == '[')
- number = std::strtoul(&value[1], nullptr, 10);
- else
- number = std::strtoul(value.c_str(), nullptr, 10);
- serializedATN.push_back(static_cast<int32_t>(number));
- }
-
- ATNDeserializer deserializer;
- result.atn = deserializer.deserialize(serializedATN);
- return result;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.h b/contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.h
deleted file mode 100644
index 4b83dd129d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/InterpreterDataReader.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-#include "atn/ATN.h"
-#include "Vocabulary.h"
-
-namespace antlr4 {
-namespace misc {
-
- struct InterpreterData {
- std::unique_ptr<atn::ATN> atn;
- dfa::Vocabulary vocabulary;
- std::vector<std::string> ruleNames;
- std::vector<std::string> channels; // Only valid for lexer grammars.
- std::vector<std::string> modes; // ditto
-
- InterpreterData() {}; // For invalid content.
- InterpreterData(std::vector<std::string> const& literalNames, std::vector<std::string> const& symbolicNames);
- };
-
- // A class to read plain text interpreter data produced by ANTLR.
- class ANTLR4CPP_PUBLIC InterpreterDataReader {
- public:
- static InterpreterData parseFile(std::string const& fileName);
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/Interval.cpp b/contrib/libs/antlr4_cpp_runtime/src/misc/Interval.cpp
deleted file mode 100644
index f0d0bfb491..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/Interval.cpp
+++ /dev/null
@@ -1,61 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-
-using namespace antlr4::misc;
-
-const Interval Interval::INVALID;
-
-size_t Interval::hashCode() const {
- size_t hash = 23;
- hash = hash * 31 + static_cast<size_t>(a);
- hash = hash * 31 + static_cast<size_t>(b);
- return hash;
-}
-
-bool Interval::startsBeforeDisjoint(const Interval &other) const {
- return a < other.a && b < other.a;
-}
-
-bool Interval::startsBeforeNonDisjoint(const Interval &other) const {
- return a <= other.a && b >= other.a;
-}
-
-bool Interval::startsAfter(const Interval &other) const {
- return a > other.a;
-}
-
-bool Interval::startsAfterDisjoint(const Interval &other) const {
- return a > other.b;
-}
-
-bool Interval::startsAfterNonDisjoint(const Interval &other) const {
- return a > other.a && a <= other.b; // b >= other.b implied
-}
-
-bool Interval::disjoint(const Interval &other) const {
- return startsBeforeDisjoint(other) || startsAfterDisjoint(other);
-}
-
-bool Interval::adjacent(const Interval &other) const {
- return a == other.b + 1 || b == other.a - 1;
-}
-
-bool Interval::properlyContains(const Interval &other) const {
- return other.a >= a && other.b <= b;
-}
-
-Interval Interval::Union(const Interval &other) const {
- return Interval(std::min(a, other.a), std::max(b, other.b));
-}
-
-Interval Interval::intersection(const Interval &other) const {
- return Interval(std::max(a, other.a), std::min(b, other.b));
-}
-
-std::string Interval::toString() const {
- return std::to_string(a) + ".." + std::to_string(b);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/Interval.h b/contrib/libs/antlr4_cpp_runtime/src/misc/Interval.h
deleted file mode 100644
index 32abf629a8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/Interval.h
+++ /dev/null
@@ -1,84 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
- // Helpers to convert certain unsigned symbols (e.g. Token::EOF) to their original numeric value (e.g. -1)
- // and vice versa. This is needed mostly for intervals to keep their original order and for toString()
- // methods to print the original numeric value (e.g. for tests).
- constexpr size_t numericToSymbol(ssize_t v) { return static_cast<size_t>(v); }
- constexpr ssize_t symbolToNumeric(size_t v) { return static_cast<ssize_t>(v); }
-
- /// An immutable inclusive interval a..b
- class ANTLR4CPP_PUBLIC Interval final {
- public:
- static const Interval INVALID;
-
- // Must stay signed to guarantee the correct sort order.
- ssize_t a;
- ssize_t b;
-
- constexpr Interval() : Interval(static_cast<ssize_t>(-1), static_cast<ssize_t>(-2)) {}
-
- constexpr explicit Interval(size_t a_, size_t b_) : Interval(symbolToNumeric(a_), symbolToNumeric(b_)) {}
-
- constexpr Interval(ssize_t a_, ssize_t b_) : a(a_), b(b_) {}
-
- /// return number of elements between a and b inclusively. x..x is length 1.
- /// if b < a, then length is 0. 9..10 has length 2.
- constexpr size_t length() const { return b >= a ? static_cast<size_t>(b - a + 1) : 0; }
-
- constexpr bool operator==(const Interval &other) const { return a == other.a && b == other.b; }
-
- size_t hashCode() const;
-
- /// <summary>
- /// Does this start completely before other? Disjoint </summary>
- bool startsBeforeDisjoint(const Interval &other) const;
-
- /// <summary>
- /// Does this start at or before other? Nondisjoint </summary>
- bool startsBeforeNonDisjoint(const Interval &other) const;
-
- /// <summary>
- /// Does this.a start after other.b? May or may not be disjoint </summary>
- bool startsAfter(const Interval &other) const;
-
- /// <summary>
- /// Does this start completely after other? Disjoint </summary>
- bool startsAfterDisjoint(const Interval &other) const;
-
- /// <summary>
- /// Does this start after other? NonDisjoint </summary>
- bool startsAfterNonDisjoint(const Interval &other) const;
-
- /// <summary>
- /// Are both ranges disjoint? I.e., no overlap? </summary>
- bool disjoint(const Interval &other) const;
-
- /// <summary>
- /// Are two intervals adjacent such as 0..41 and 42..42? </summary>
- bool adjacent(const Interval &other) const;
-
- bool properlyContains(const Interval &other) const;
-
- /// <summary>
- /// Return the interval computed from combining this and other </summary>
- Interval Union(const Interval &other) const;
-
- /// <summary>
- /// Return the interval in common between this and o </summary>
- Interval intersection(const Interval &other) const;
-
- std::string toString() const;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.cpp b/contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.cpp
deleted file mode 100644
index d230bf45f6..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.cpp
+++ /dev/null
@@ -1,501 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/MurmurHash.h"
-#include "Lexer.h"
-#include "Exceptions.h"
-#include "Vocabulary.h"
-
-#include "misc/IntervalSet.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-
-IntervalSet const IntervalSet::COMPLETE_CHAR_SET =
- IntervalSet::of(Lexer::MIN_CHAR_VALUE, Lexer::MAX_CHAR_VALUE);
-
-IntervalSet const IntervalSet::EMPTY_SET;
-
-IntervalSet::IntervalSet() : _intervals() {
-}
-
-IntervalSet::IntervalSet(const IntervalSet &set) : IntervalSet() {
- _intervals = set._intervals;
-}
-
-IntervalSet::IntervalSet(IntervalSet&& set) : IntervalSet(std::move(set._intervals)) {
-}
-
-IntervalSet::IntervalSet(std::vector<Interval>&& intervals) : _intervals(std::move(intervals)) {
-}
-
-IntervalSet& IntervalSet::operator=(const IntervalSet& other) {
- _intervals = other._intervals;
- return *this;
-}
-
-IntervalSet& IntervalSet::operator=(IntervalSet&& other) {
- _intervals = move(other._intervals);
- return *this;
-}
-
-IntervalSet IntervalSet::of(ssize_t a) {
- return IntervalSet({ Interval(a, a) });
-}
-
-IntervalSet IntervalSet::of(ssize_t a, ssize_t b) {
- return IntervalSet({ Interval(a, b) });
-}
-
-void IntervalSet::clear() {
- _intervals.clear();
-}
-
-void IntervalSet::add(ssize_t el) {
- add(el, el);
-}
-
-void IntervalSet::add(ssize_t a, ssize_t b) {
- add(Interval(a, b));
-}
-
-void IntervalSet::add(const Interval &addition) {
- if (addition.b < addition.a) {
- return;
- }
-
- // find position in list
- for (auto iterator = _intervals.begin(); iterator != _intervals.end(); ++iterator) {
- Interval r = *iterator;
- if (addition == r) {
- return;
- }
-
- if (addition.adjacent(r) || !addition.disjoint(r)) {
- // next to each other, make a single larger interval
- Interval bigger = addition.Union(r);
- *iterator = bigger;
-
- // make sure we didn't just create an interval that
- // should be merged with next interval in list
- while (iterator + 1 != _intervals.end()) {
- Interval next = *++iterator;
- if (!bigger.adjacent(next) && bigger.disjoint(next)) {
- break;
- }
-
- // if we bump up against or overlap next, merge
- iterator = _intervals.erase(iterator);// remove this one
- --iterator; // move backwards to what we just set
- *iterator = bigger.Union(next); // set to 3 merged ones
- // ml: no need to advance iterator, we do that in the next round anyway. ++iterator; // first call to next after previous duplicates the result
- }
- return;
- }
-
- if (addition.startsBeforeDisjoint(r)) {
- // insert before r
- //--iterator;
- _intervals.insert(iterator, addition);
- return;
- }
-
- // if disjoint and after r, a future iteration will handle it
- }
-
- // ok, must be after last interval (and disjoint from last interval)
- // just add it
- _intervals.push_back(addition);
-}
-
-IntervalSet IntervalSet::Or(const std::vector<IntervalSet> &sets) {
- IntervalSet result;
- for (const auto &s : sets) {
- result.addAll(s);
- }
- return result;
-}
-
-IntervalSet& IntervalSet::addAll(const IntervalSet &set) {
- // walk set and add each interval
- for (auto const& interval : set._intervals) {
- add(interval);
- }
- return *this;
-}
-
-IntervalSet IntervalSet::complement(ssize_t minElement, ssize_t maxElement) const {
- return complement(IntervalSet::of(minElement, maxElement));
-}
-
-IntervalSet IntervalSet::complement(const IntervalSet &vocabulary) const {
- return vocabulary.subtract(*this);
-}
-
-IntervalSet IntervalSet::subtract(const IntervalSet &other) const {
- return subtract(*this, other);
-}
-
-IntervalSet IntervalSet::subtract(const IntervalSet &left, const IntervalSet &right) {
- if (left.isEmpty()) {
- return IntervalSet();
- }
-
- if (right.isEmpty()) {
- // right set has no elements; just return the copy of the current set
- return left;
- }
-
- IntervalSet result(left);
- size_t resultI = 0;
- size_t rightI = 0;
- while (resultI < result._intervals.size() && rightI < right._intervals.size()) {
- Interval &resultInterval = result._intervals[resultI];
- const Interval &rightInterval = right._intervals[rightI];
-
- // operation: (resultInterval - rightInterval) and update indexes
-
- if (rightInterval.b < resultInterval.a) {
- rightI++;
- continue;
- }
-
- if (rightInterval.a > resultInterval.b) {
- resultI++;
- continue;
- }
-
- Interval beforeCurrent;
- Interval afterCurrent;
- if (rightInterval.a > resultInterval.a) {
- beforeCurrent = Interval(resultInterval.a, rightInterval.a - 1);
- }
-
- if (rightInterval.b < resultInterval.b) {
- afterCurrent = Interval(rightInterval.b + 1, resultInterval.b);
- }
-
- if (beforeCurrent.a > -1) { // -1 is the default value
- if (afterCurrent.a > -1) {
- // split the current interval into two
- result._intervals[resultI] = beforeCurrent;
- result._intervals.insert(result._intervals.begin() + resultI + 1, afterCurrent);
- resultI++;
- rightI++;
- } else {
- // replace the current interval
- result._intervals[resultI] = beforeCurrent;
- resultI++;
- }
- } else {
- if (afterCurrent.a > -1) {
- // replace the current interval
- result._intervals[resultI] = afterCurrent;
- rightI++;
- } else {
- // remove the current interval (thus no need to increment resultI)
- result._intervals.erase(result._intervals.begin() + resultI);
- }
- }
- }
-
- // If rightI reached right.intervals.size(), no more intervals to subtract from result.
- // If resultI reached result.intervals.size(), we would be subtracting from an empty set.
- // Either way, we are done.
- return result;
-}
-
-IntervalSet IntervalSet::Or(const IntervalSet &a) const {
- IntervalSet result;
- result.addAll(*this);
- result.addAll(a);
- return result;
-}
-
-IntervalSet IntervalSet::And(const IntervalSet &other) const {
- IntervalSet intersection;
- size_t i = 0;
- size_t j = 0;
-
- // iterate down both interval lists looking for nondisjoint intervals
- while (i < _intervals.size() && j < other._intervals.size()) {
- Interval mine = _intervals[i];
- Interval theirs = other._intervals[j];
-
- if (mine.startsBeforeDisjoint(theirs)) {
- // move this iterator looking for interval that might overlap
- i++;
- } else if (theirs.startsBeforeDisjoint(mine)) {
- // move other iterator looking for interval that might overlap
- j++;
- } else if (mine.properlyContains(theirs)) {
- // overlap, add intersection, get next theirs
- intersection.add(mine.intersection(theirs));
- j++;
- } else if (theirs.properlyContains(mine)) {
- // overlap, add intersection, get next mine
- intersection.add(mine.intersection(theirs));
- i++;
- } else if (!mine.disjoint(theirs)) {
- // overlap, add intersection
- intersection.add(mine.intersection(theirs));
-
- // Move the iterator of lower range [a..b], but not
- // the upper range as it may contain elements that will collide
- // with the next iterator. So, if mine=[0..115] and
- // theirs=[115..200], then intersection is 115 and move mine
- // but not theirs as theirs may collide with the next range
- // in thisIter.
- // move both iterators to next ranges
- if (mine.startsAfterNonDisjoint(theirs)) {
- j++;
- } else if (theirs.startsAfterNonDisjoint(mine)) {
- i++;
- }
- }
- }
-
- return intersection;
-}
-
-
-bool IntervalSet::contains(ssize_t el) const {
- if (_intervals.empty() || el < _intervals.front().a || el > _intervals.back().b) {
- return false;
- }
-
- return std::binary_search(_intervals.begin(), _intervals.end(), Interval(el, el), [](const Interval &lhs, const Interval &rhs) {
- return lhs.b < rhs.a;
- });
-}
-
-bool IntervalSet::isEmpty() const {
- return _intervals.empty();
-}
-
-ssize_t IntervalSet::getSingleElement() const {
- if (_intervals.size() == 1) {
- if (_intervals[0].a == _intervals[0].b) {
- return _intervals[0].a;
- }
- }
-
- return Token::INVALID_TYPE; // XXX: this value is 0, but 0 is a valid interval range, how can that work?
-}
-
-ssize_t IntervalSet::getMaxElement() const {
- if (_intervals.empty()) {
- return Token::INVALID_TYPE;
- }
-
- return _intervals.back().b;
-}
-
-ssize_t IntervalSet::getMinElement() const {
- if (_intervals.empty()) {
- return Token::INVALID_TYPE;
- }
-
- return _intervals.front().a;
-}
-
-std::vector<Interval> const& IntervalSet::getIntervals() const {
- return _intervals;
-}
-
-size_t IntervalSet::hashCode() const {
- size_t hash = MurmurHash::initialize();
- for (const auto &interval : _intervals) {
- hash = MurmurHash::update(hash, interval.a);
- hash = MurmurHash::update(hash, interval.b);
- }
-
- return MurmurHash::finish(hash, _intervals.size() * 2);
-}
-
-bool IntervalSet::operator == (const IntervalSet &other) const {
- if (_intervals.empty() && other._intervals.empty())
- return true;
-
- if (_intervals.size() != other._intervals.size())
- return false;
-
- return std::equal(_intervals.begin(), _intervals.end(), other._intervals.begin());
-}
-
-std::string IntervalSet::toString() const {
- return toString(false);
-}
-
-std::string IntervalSet::toString(bool elemAreChar) const {
- if (_intervals.empty()) {
- return "{}";
- }
-
- std::stringstream ss;
- size_t effectiveSize = size();
- if (effectiveSize > 1) {
- ss << "{";
- }
-
- bool firstEntry = true;
- for (const auto &interval : _intervals) {
- if (!firstEntry)
- ss << ", ";
- firstEntry = false;
-
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- if (a == b) {
- if (a == -1) {
- ss << "<EOF>";
- } else if (elemAreChar) {
- ss << "'" << static_cast<char>(a) << "'";
- } else {
- ss << a;
- }
- } else {
- if (elemAreChar) {
- ss << "'" << static_cast<char>(a) << "'..'" << static_cast<char>(b) << "'";
- } else {
- ss << a << ".." << b;
- }
- }
- }
- if (effectiveSize > 1) {
- ss << "}";
- }
-
- return ss.str();
-}
-
-std::string IntervalSet::toString(const dfa::Vocabulary &vocabulary) const {
- if (_intervals.empty()) {
- return "{}";
- }
-
- std::stringstream ss;
- size_t effectiveSize = size();
- if (effectiveSize > 1) {
- ss << "{";
- }
-
- bool firstEntry = true;
- for (const auto &interval : _intervals) {
- if (!firstEntry)
- ss << ", ";
- firstEntry = false;
-
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- if (a == b) {
- ss << elementName(vocabulary, a);
- } else {
- for (ssize_t i = a; i <= b; i++) {
- if (i > a) {
- ss << ", ";
- }
- ss << elementName(vocabulary, i);
- }
- }
- }
- if (effectiveSize > 1) {
- ss << "}";
- }
-
- return ss.str();
-}
-
-std::string IntervalSet::elementName(const dfa::Vocabulary &vocabulary, ssize_t a) const {
- if (a == -1) {
- return "<EOF>";
- } else if (a == -2) {
- return "<EPSILON>";
- } else {
- return vocabulary.getDisplayName(a);
- }
-}
-
-size_t IntervalSet::size() const {
- size_t result = 0;
- for (const auto &interval : _intervals) {
- result += size_t(interval.b - interval.a + 1);
- }
- return result;
-}
-
-std::vector<ssize_t> IntervalSet::toList() const {
- std::vector<ssize_t> result;
- for (const auto &interval : _intervals) {
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- for (ssize_t v = a; v <= b; v++) {
- result.push_back(v);
- }
- }
- return result;
-}
-
-std::set<ssize_t> IntervalSet::toSet() const {
- std::set<ssize_t> result;
- for (const auto &interval : _intervals) {
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- for (ssize_t v = a; v <= b; v++) {
- result.insert(v);
- }
- }
- return result;
-}
-
-ssize_t IntervalSet::get(size_t i) const {
- size_t index = 0;
- for (const auto &interval : _intervals) {
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- for (ssize_t v = a; v <= b; v++) {
- if (index == i) {
- return v;
- }
- index++;
- }
- }
- return -1;
-}
-
-void IntervalSet::remove(ssize_t el) {
- for (size_t i = 0; i < _intervals.size(); ++i) {
- Interval &interval = _intervals[i];
- ssize_t a = interval.a;
- ssize_t b = interval.b;
- if (el < a) {
- break; // list is sorted and el is before this interval; not here
- }
-
- // if whole interval x..x, rm
- if (el == a && el == b) {
- _intervals.erase(_intervals.begin() + (long)i);
- break;
- }
- // if on left edge x..b, adjust left
- if (el == a) {
- interval.a++;
- break;
- }
- // if on right edge a..x, adjust right
- if (el == b) {
- interval.b--;
- break;
- }
- // if in middle a..x..b, split interval
- if (el > a && el < b) { // found in this interval
- ssize_t oldb = interval.b;
- interval.b = el - 1; // [a..x-1]
- add(el + 1, oldb); // add [x+1..b]
-
- break; // ml: not in the Java code but I believe we also should stop searching here, as we found x.
- }
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.h b/contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.h
deleted file mode 100644
index 49565dc691..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/IntervalSet.h
+++ /dev/null
@@ -1,188 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "misc/Interval.h"
-#include "Exceptions.h"
-
-namespace antlr4 {
-namespace misc {
-
- /**
- * This class implements the {@link IntSet} backed by a sorted array of
- * non-overlapping intervals. It is particularly efficient for representing
- * large collections of numbers, where the majority of elements appear as part
- * of a sequential range of numbers that are all part of the set. For example,
- * the set { 1, 2, 3, 4, 7, 8 } may be represented as { [1, 4], [7, 8] }.
- *
- * <p>
- * This class is able to represent sets containing any combination of values in
- * the range {@link Integer#MIN_VALUE} to {@link Integer#MAX_VALUE}
- * (inclusive).</p>
- */
- class ANTLR4CPP_PUBLIC IntervalSet final {
- public:
- static IntervalSet const COMPLETE_CHAR_SET;
- static IntervalSet const EMPTY_SET;
-
- private:
- /// The list of sorted, disjoint intervals.
- std::vector<Interval> _intervals;
-
- explicit IntervalSet(std::vector<Interval>&& intervals);
-
- public:
- IntervalSet();
- IntervalSet(IntervalSet const& set);
- IntervalSet(IntervalSet&& set);
-
- template<typename T1, typename... T_NEXT>
- IntervalSet(int, T1 t1, T_NEXT&&... next) : IntervalSet() {
- // The first int argument is an ignored count for compatibility
- // with the previous varargs based interface.
- addItems(t1, std::forward<T_NEXT>(next)...);
- }
-
- IntervalSet& operator=(IntervalSet const& set);
- IntervalSet& operator=(IntervalSet&& set);
-
- /// Create a set with a single element, el.
- static IntervalSet of(ssize_t a);
-
- /// Create a set with all ints within range [a..b] (inclusive)
- static IntervalSet of(ssize_t a, ssize_t b);
-
- void clear();
-
- /// Add a single element to the set. An isolated element is stored
- /// as a range el..el.
- void add(ssize_t el);
-
- /// Add interval; i.e., add all integers from a to b to set.
- /// If b<a, do nothing.
- /// Keep list in sorted order (by left range value).
- /// If overlap, combine ranges. For example,
- /// If this is {1..5, 10..20}, adding 6..7 yields
- /// {1..5, 6..7, 10..20}. Adding 4..8 yields {1..8, 10..20}.
- void add(ssize_t a, ssize_t b);
-
- /// combine all sets in the array returned the or'd value
- static IntervalSet Or(const std::vector<IntervalSet> &sets);
-
- // Copy on write so we can cache a..a intervals and sets of that.
- void add(const Interval &addition);
- IntervalSet& addAll(const IntervalSet &set);
-
- template<typename T1, typename... T_NEXT>
- void addItems(T1 t1, T_NEXT&&... next) {
- add(t1);
- addItems(std::forward<T_NEXT>(next)...);
- }
-
- IntervalSet complement(ssize_t minElement, ssize_t maxElement) const;
-
- /// Given the set of possible values (rather than, say UNICODE or MAXINT),
- /// return a new set containing all elements in vocabulary, but not in
- /// this. The computation is (vocabulary - this).
- ///
- /// 'this' is assumed to be either a subset or equal to vocabulary.
- IntervalSet complement(const IntervalSet &vocabulary) const;
-
- /// Compute this-other via this&~other.
- /// Return a new set containing all elements in this but not in other.
- /// other is assumed to be a subset of this;
- /// anything that is in other but not in this will be ignored.
- IntervalSet subtract(const IntervalSet &other) const;
-
- /**
- * Compute the set difference between two interval sets. The specific
- * operation is {@code left - right}. If either of the input sets is
- * {@code null}, it is treated as though it was an empty set.
- */
- static IntervalSet subtract(const IntervalSet &left, const IntervalSet &right);
-
- IntervalSet Or(const IntervalSet &a) const;
-
- /// Return a new set with the intersection of this set with other. Because
- /// the intervals are sorted, we can use an iterator for each list and
- /// just walk them together. This is roughly O(min(n,m)) for interval
- /// list lengths n and m.
- IntervalSet And(const IntervalSet &other) const;
-
- /// Is el in any range of this set?
- bool contains(ssize_t el) const;
-
- /// return true if this set has no members
- bool isEmpty() const;
-
- /// If this set is a single integer, return it otherwise Token.INVALID_TYPE.
- ssize_t getSingleElement() const;
-
- /**
- * Returns the maximum value contained in the set.
- *
- * @return the maximum value contained in the set. If the set is empty, this
- * method returns {@link Token#INVALID_TYPE}.
- */
- ssize_t getMaxElement() const;
-
- /**
- * Returns the minimum value contained in the set.
- *
- * @return the minimum value contained in the set. If the set is empty, this
- * method returns {@link Token#INVALID_TYPE}.
- */
- ssize_t getMinElement() const;
-
- /// <summary>
- /// Return a list of Interval objects. </summary>
- std::vector<Interval> const& getIntervals() const;
-
- size_t hashCode() const;
-
- /// Are two IntervalSets equal? Because all intervals are sorted
- /// and disjoint, equals is a simple linear walk over both lists
- /// to make sure they are the same.
- bool operator == (const IntervalSet &other) const;
- std::string toString() const;
- std::string toString(bool elemAreChar) const;
-
- std::string toString(const dfa::Vocabulary &vocabulary) const;
-
- protected:
- std::string elementName(const dfa::Vocabulary &vocabulary, ssize_t a) const;
-
- public:
- size_t size() const;
- std::vector<ssize_t> toList() const;
- std::set<ssize_t> toSet() const;
-
- /// Get the ith element of ordered set. Used only by RandomPhrase so
- /// don't bother to implement if you're not doing that for a new
- /// ANTLR code gen target.
- ssize_t get(size_t i) const;
- void remove(ssize_t el);
-
- private:
- void addItems() { /* No-op */ }
- };
-
-} // namespace atn
-} // namespace antlr4
-
-// Hash function for IntervalSet.
-
-namespace std {
- using antlr4::misc::IntervalSet;
-
- template <> struct hash<IntervalSet>
- {
- size_t operator() (const IntervalSet &x) const
- {
- return x.hashCode();
- }
- };
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.cpp b/contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.cpp
deleted file mode 100644
index 09072c9f7e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.cpp
+++ /dev/null
@@ -1,120 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include <cstddef>
-#include <cstdint>
-#include <cstring>
-
-#include "misc/MurmurHash.h"
-
-using namespace antlr4::misc;
-
-// A variation of the MurmurHash3 implementation (https://github.com/aappleby/smhasher/blob/master/src/MurmurHash3.cpp)
-// Here we unrolled the loop used there into individual calls to update(), as we usually hash object fields
-// instead of entire buffers.
-
-// Platform-specific functions and macros
-
-// Microsoft Visual Studio
-
-#if defined(_MSC_VER)
-
-#include <stdlib.h>
-
-#define ROTL32(x,y) _rotl(x,y)
-#define ROTL64(x,y) _rotl64(x,y)
-
-#elif ANTLR4CPP_HAVE_BUILTIN(__builtin_rotateleft32) && ANTLR4CPP_HAVE_BUILTIN(__builtin_rotateleft64)
-
-#define ROTL32(x, y) __builtin_rotateleft32(x, y)
-#define ROTL64(x, y) __builtin_rotateleft64(x, y)
-
-#else // defined(_MSC_VER)
-
-// Other compilers
-
-namespace {
-
-constexpr uint32_t ROTL32(uint32_t x, int r) {
- return (x << r) | (x >> (32 - r));
-}
-constexpr uint64_t ROTL64(uint64_t x, int r) {
- return (x << r) | (x >> (64 - r));
-}
-
-}
-
-#endif // !defined(_MSC_VER)
-
-#if SIZE_MAX == UINT64_MAX
-
-size_t MurmurHash::update(size_t hash, size_t value) {
- size_t k1 = value;
- k1 *= UINT64_C(0x87c37b91114253d5);
- k1 = ROTL64(k1, 31);
- k1 *= UINT64_C(0x4cf5ad432745937f);
-
- hash ^= k1;
- hash = ROTL64(hash, 27);
- hash = hash * 5 + UINT64_C(0x52dce729);
-
- return hash;
-}
-
-size_t MurmurHash::finish(size_t hash, size_t entryCount) {
- hash ^= entryCount * 8;
- hash ^= hash >> 33;
- hash *= UINT64_C(0xff51afd7ed558ccd);
- hash ^= hash >> 33;
- hash *= UINT64_C(0xc4ceb9fe1a85ec53);
- hash ^= hash >> 33;
- return hash;
-}
-
-#elif SIZE_MAX == UINT32_MAX
-
-size_t MurmurHash::update(size_t hash, size_t value) {
- size_t k1 = value;
- k1 *= UINT32_C(0xCC9E2D51);
- k1 = ROTL32(k1, 15);
- k1 *= UINT32_C(0x1B873593);
-
- hash ^= k1;
- hash = ROTL32(hash, 13);
- hash = hash * 5 + UINT32_C(0xE6546B64);
-
- return hash;
-}
-
-size_t MurmurHash::finish(size_t hash, size_t entryCount) {
- hash ^= entryCount * 4;
- hash ^= hash >> 16;
- hash *= UINT32_C(0x85EBCA6B);
- hash ^= hash >> 13;
- hash *= UINT32_C(0xC2B2AE35);
- hash ^= hash >> 16;
- return hash;
-}
-
-#else
-#error "Expected sizeof(size_t) to be 4 or 8."
-#endif
-
-size_t MurmurHash::update(size_t hash, const void *data, size_t size) {
- size_t value;
- const uint8_t *bytes = static_cast<const uint8_t*>(data);
- while (size >= sizeof(size_t)) {
- std::memcpy(&value, bytes, sizeof(size_t));
- hash = update(hash, value);
- bytes += sizeof(size_t);
- size -= sizeof(size_t);
- }
- if (size != 0) {
- value = 0;
- std::memcpy(&value, bytes, size);
- hash = update(hash, value);
- }
- return hash;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.h b/contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.h
deleted file mode 100644
index cde7ac7906..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/MurmurHash.h
+++ /dev/null
@@ -1,102 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstdint>
-#include <type_traits>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
- class ANTLR4CPP_PUBLIC MurmurHash final {
- private:
- static constexpr size_t DEFAULT_SEED = 0;
-
- /// Initialize the hash using the default seed value.
- /// Returns the intermediate hash value.
- public:
- static size_t initialize() { return initialize(DEFAULT_SEED); }
-
- /// Initialize the hash using the specified seed.
- static size_t initialize(size_t seed) { return seed; }
-
- /// Update the intermediate hash value for the next input {@code value}.
- /// <param name="hash"> the intermediate hash value </param>
- /// <param name="value"> the value to add to the current hash </param>
- /// Returns the updated intermediate hash value.
- static size_t update(size_t hash, size_t value);
-
- /**
- * Update the intermediate hash value for the next input {@code value}.
- *
- * @param hash the intermediate hash value
- * @param value the value to add to the current hash
- * @return the updated intermediate hash value
- */
- template <class T>
- static size_t update(size_t hash, Ref<T> const& value) {
- return update(hash, value != nullptr ? value->hashCode() : 0);
- }
-
- template <class T>
- static size_t update(size_t hash, T *value) {
- return update(hash, value != nullptr ? value->hashCode() : 0);
- }
-
- static size_t update(size_t hash, const void *data, size_t size);
-
- template <typename T>
- static size_t update(size_t hash, const T *data, size_t size) {
- return update(hash, static_cast<const void*>(data), size * sizeof(std::remove_reference_t<T>));
- }
-
- /// <summary>
- /// Apply the final computation steps to the intermediate value {@code hash}
- /// to form the final result of the MurmurHash 3 hash function.
- /// </summary>
- /// <param name="hash"> the intermediate hash value </param>
- /// <param name="entryCount"> the number of calls to update() before calling finish() </param>
- /// <returns> the final hash result </returns>
- static size_t finish(size_t hash, size_t entryCount);
-
- /// Utility function to compute the hash code of an array using the MurmurHash3 algorithm.
- ///
- /// @param <T> the array element type </param>
- /// <param name="data"> the array data </param>
- /// <param name="seed"> the seed for the MurmurHash algorithm </param>
- /// <returns> the hash code of the data </returns>
- template<typename T> // where T is C array type
- static size_t hashCode(const std::vector<Ref<T>> &data, size_t seed = DEFAULT_SEED) {
- size_t hash = initialize(seed);
- for (auto &entry : data) {
- hash = update(hash, entry);
- }
- return finish(hash, data.size());
- }
-
- static size_t hashCode(const void *data, size_t size, size_t seed = DEFAULT_SEED) {
- size_t hash = initialize(seed);
- hash = update(hash, data, size);
- return finish(hash, size);
- }
-
- template <typename T>
- static size_t hashCode(const T *data, size_t size, size_t seed = DEFAULT_SEED) {
- return hashCode(static_cast<const void*>(data), size * sizeof(std::remove_reference_t<T>), seed);
- }
-
- private:
- MurmurHash() = delete;
-
- MurmurHash(const MurmurHash&) = delete;
-
- MurmurHash& operator=(const MurmurHash&) = delete;
- };
-
-} // namespace atn
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.cpp b/contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.cpp
deleted file mode 100644
index c35f1921c4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "misc/Predicate.h"
-
-antlr4::misc::Predicate::~Predicate() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.h b/contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.h
deleted file mode 100644
index 1032d53fed..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/misc/Predicate.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace misc {
-
- class ANTLR4CPP_PUBLIC Predicate {
- public:
- virtual ~Predicate();
-
- virtual bool test(tree::ParseTree *t) = 0;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Any.cpp b/contrib/libs/antlr4_cpp_runtime/src/support/Any.cpp
deleted file mode 100644
index a1ed50d456..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Any.cpp
+++ /dev/null
@@ -1,8 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Any.h"
-
-using namespace antlrcpp;
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Any.h b/contrib/libs/antlr4_cpp_runtime/src/support/Any.h
deleted file mode 100644
index fa5df58946..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Any.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-// A standard C++ class loosely modeled after boost::Any.
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- using Any = std::any;
-
-} // namespace antlrcpp
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Arrays.cpp b/contrib/libs/antlr4_cpp_runtime/src/support/Arrays.cpp
deleted file mode 100644
index b3c4f94f2f..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Arrays.cpp
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "Exceptions.h"
-
-#include "support/Arrays.h"
-
-using namespace antlrcpp;
-
-std::string Arrays::listToString(const std::vector<std::string> &list, const std::string &separator)
-{
- std::stringstream ss;
- bool firstEntry = true;
-
- ss << '[';
- for (const auto &entry : list) {
- ss << entry;
- if (firstEntry) {
- ss << separator;
- firstEntry = false;
- }
- }
-
- ss << ']';
- return ss.str();
-}
-
-template <>
-std::string Arrays::toString(const std::vector<antlr4::tree::ParseTree*> &source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto *value : source) {
- result += value->toStringTree();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Arrays.h b/contrib/libs/antlr4_cpp_runtime/src/support/Arrays.h
deleted file mode 100644
index 04b852d986..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Arrays.h
+++ /dev/null
@@ -1,149 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- class ANTLR4CPP_PUBLIC Arrays {
- public:
-
- static std::string listToString(const std::vector<std::string> &list, const std::string &separator);
-
- template <typename T>
- static bool equals(const std::vector<T> &a, const std::vector<T> &b) {
- if (a.size() != b.size())
- return false;
-
- for (size_t i = 0; i < a.size(); ++i)
- if (!(a[i] == b[i]))
- return false;
-
- return true;
- }
-
- template <typename T>
- static bool equals(const std::vector<T *> &a, const std::vector<T *> &b) {
- if (a.size() != b.size())
- return false;
-
- for (size_t i = 0; i < a.size(); ++i) {
- if (!a[i] && !b[i])
- continue;
- if (!a[i] || !b[i])
- return false;
- if (a[i] == b[i])
- continue;
-
- if (!(*a[i] == *b[i]))
- return false;
- }
-
- return true;
- }
-
- template <typename T>
- static bool equals(const std::vector<Ref<T>> &a, const std::vector<Ref<T>> &b) {
- if (a.size() != b.size())
- return false;
-
- for (size_t i = 0; i < a.size(); ++i) {
- if (!a[i] && !b[i])
- continue;
- if (!a[i] || !b[i])
- return false;
- if (a[i] == b[i])
- continue;
-
- if (!(*a[i] == *b[i]))
- return false;
- }
-
- return true;
- }
-
- template <typename T>
- static bool equals(const std::vector<std::unique_ptr<T>> &a, const std::vector<std::unique_ptr<T>> &b) {
- if (a.size() != b.size())
- return false;
-
- for (size_t i = 0; i < a.size(); ++i) {
- if (!a[i] && !b[i])
- continue;
- if (!a[i] || !b[i])
- return false;
- if (a[i] == b[i])
- continue;
-
- if (!(*a[i] == *b[i]))
- return false;
- }
-
- return true;
- }
-
- template <typename T>
- static std::string toString(const std::vector<T> &source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto &value : source) {
- result += value.toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-
- template <typename T>
- static std::string toString(const std::vector<Ref<T>> &source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto &value : source) {
- result += value->toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-
- template <typename T>
- static std::string toString(const std::vector<std::unique_ptr<T>> &source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto &value : source) {
- result += value->toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-
- template <typename T>
- static std::string toString(const std::vector<T *> &source) {
- std::string result = "[";
- bool firstEntry = true;
- for (auto value : source) {
- result += value->toString();
- if (firstEntry) {
- result += ", ";
- firstEntry = false;
- }
- }
- return result + "]";
- }
-
- };
-
- template <>
- std::string Arrays::toString(const std::vector<antlr4::tree::ParseTree *> &source);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/BitSet.h b/contrib/libs/antlr4_cpp_runtime/src/support/BitSet.h
deleted file mode 100644
index bb30364be0..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/BitSet.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- class ANTLR4CPP_PUBLIC BitSet : public std::bitset<2048> {
- public:
- size_t nextSetBit(size_t pos) const {
- for (size_t i = pos; i < size(); i++){
- if (test(i)) {
- return i;
- }
- }
-
- return INVALID_INDEX;
- }
-
- // Prints a list of every index for which the bitset contains a bit in true.
- friend std::wostream& operator << (std::wostream& os, const BitSet& obj)
- {
- os << "{";
- size_t total = obj.count();
- for (size_t i = 0; i < obj.size(); i++){
- if (obj.test(i)){
- os << i;
- --total;
- if (total > 1){
- os << ", ";
- }
- }
- }
-
- os << "}";
- return os;
- }
-
- static std::string subStringRepresentation(const std::vector<BitSet>::iterator &begin,
- const std::vector<BitSet>::iterator &end) {
- std::string result;
- std::vector<BitSet>::iterator vectorIterator;
-
- for (vectorIterator = begin; vectorIterator != end; vectorIterator++) {
- result += vectorIterator->toString();
- }
- // Grab the end
- result += end->toString();
-
- return result;
- }
-
- std::string toString() const {
- std::stringstream stream;
- stream << "{";
- bool valueAdded = false;
- for (size_t i = 0; i < size(); ++i){
- if (test(i)){
- if (valueAdded) {
- stream << ", ";
- }
- stream << i;
- valueAdded = true;
- }
- }
-
- stream << "}";
- return stream.str();
- }
-
- };
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.cpp b/contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.cpp
deleted file mode 100644
index 95321b3dc1..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.cpp
+++ /dev/null
@@ -1,207 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/CPPUtils.h"
-
-namespace antlrcpp {
-
- std::string join(const std::vector<std::string> &strings, const std::string &separator) {
- std::string str;
- bool firstItem = true;
- for (const std::string &s : strings) {
- if (!firstItem) {
- str.append(separator);
- }
- firstItem = false;
- str.append(s);
- }
- return str;
- }
-
- std::map<std::string, size_t> toMap(const std::vector<std::string> &keys) {
- std::map<std::string, size_t> result;
- for (size_t i = 0; i < keys.size(); ++i) {
- result.insert({ keys[i], i });
- }
- return result;
- }
-
- std::string escapeWhitespace(std::string str, bool escapeSpaces) {
- std::string result;
- for (auto c : str) {
- switch (c) {
- case '\n':
- result += "\\n";
- break;
-
- case '\r':
- result += "\\r";
- break;
-
- case '\t':
- result += "\\t";
- break;
-
- case ' ':
- if (escapeSpaces) {
- result += "\u00B7";
- break;
- }
- result += c;
- break;
-
- default:
- result += c;
- break;
- }
- }
-
- return result;
- }
-
- std::string toHexString(const int t) {
- std::stringstream stream;
- stream << std::uppercase << std::hex << t;
- return stream.str();
- }
-
- std::string arrayToString(const std::vector<std::string> &data) {
- std::string answer;
- size_t toReserve = 0;
- for (const auto &sub : data) {
- toReserve += sub.size();
- }
- answer.reserve(toReserve);
- for (const auto &sub: data) {
- answer.append(sub);
- }
- return answer;
- }
-
- std::string replaceString(const std::string &s, const std::string &from, const std::string &to) {
- std::string::size_type p;
- std::string ss, res;
-
- ss = s;
- p = ss.find(from);
- while (p != std::string::npos) {
- if (p > 0)
- res.append(ss.substr(0, p)).append(to);
- else
- res.append(to);
- ss = ss.substr(p + from.size());
- p = ss.find(from);
- }
- res.append(ss);
-
- return res;
- }
-
- std::vector<std::string> split(const std::string &s, const std::string &sep, int count) {
- std::vector<std::string> parts;
- std::string ss = s;
-
- std::string::size_type p;
-
- if (s.empty())
- return parts;
-
- if (count == 0)
- count= -1;
-
- p = ss.find(sep);
- while (!ss.empty() && p != std::string::npos && (count < 0 || count > 0)) {
- parts.push_back(ss.substr(0, p));
- ss = ss.substr(p+sep.size());
-
- --count;
- p = ss.find(sep);
- }
- parts.push_back(ss);
-
- return parts;
- }
-
- //--------------------------------------------------------------------------------------------------
-
- // Debugging helper. Adds indentation to all lines in the given string.
- std::string indent(const std::string &s, const std::string &indentation, bool includingFirst) {
- std::vector<std::string> parts = split(s, "\n", -1);
- for (size_t i = 0; i < parts.size(); ++i) {
- if (i == 0 && !includingFirst)
- continue;
- parts[i].insert(0, indentation);
- }
-
- return join(parts, "\n");
- }
-
- //--------------------------------------------------------------------------------------------------
-
- // Recursively get the error from a, possibly nested, exception.
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- // No nested exceptions before VS 2015.
- template <typename T>
- std::exception_ptr get_nested(const T &/*e*/) {
- try {
- return nullptr;
- }
- catch (const std::bad_cast &) {
- return nullptr;
- }
- }
-#else
- template <typename T>
- std::exception_ptr get_nested(const T &e) {
- try {
- auto nested = dynamic_cast<const std::nested_exception&>(e);
- return nested.nested_ptr();
- }
- catch (const std::bad_cast &) {
- return nullptr;
- }
- }
-#endif
-
- std::string what(std::exception_ptr eptr) {
- if (!eptr) {
- throw std::bad_exception();
- }
-
- std::string result;
- std::size_t nestCount = 0;
-
- next: {
- try {
- std::exception_ptr yeptr;
- std::swap(eptr, yeptr);
- std::rethrow_exception(yeptr);
- }
- catch (const std::exception &e) {
- result += e.what();
- eptr = get_nested(e);
- }
- catch (const std::string &e) {
- result += e;
- }
- catch (const char *e) {
- result += e;
- }
- catch (...) {
- result += "cannot be determined";
- }
-
- if (eptr) {
- result += " (";
- ++nestCount;
- goto next;
- }
- }
-
- result += std::string(nestCount, ')');
- return result;
- }
-
-} // namespace antlrcpp
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.h b/contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.h
deleted file mode 100644
index 2eb1a36037..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/CPPUtils.h
+++ /dev/null
@@ -1,65 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- ANTLR4CPP_PUBLIC std::string join(const std::vector<std::string> &strings, const std::string &separator);
- ANTLR4CPP_PUBLIC std::map<std::string, size_t> toMap(const std::vector<std::string> &keys);
- ANTLR4CPP_PUBLIC std::string escapeWhitespace(std::string str, bool escapeSpaces);
- ANTLR4CPP_PUBLIC std::string toHexString(const int t);
- ANTLR4CPP_PUBLIC std::string arrayToString(const std::vector<std::string> &data);
- ANTLR4CPP_PUBLIC std::string replaceString(const std::string &s, const std::string &from, const std::string &to);
- ANTLR4CPP_PUBLIC std::vector<std::string> split(const std::string &s, const std::string &sep, int count);
- ANTLR4CPP_PUBLIC std::string indent(const std::string &s, const std::string &indentation, bool includingFirst = true);
-
- // Using RAII + a lambda to implement a "finally" replacement.
- template <typename OnEnd>
- struct FinalAction {
- FinalAction(OnEnd f) : _cleanUp { std::move(f) } {}
- FinalAction(FinalAction &&other) :
- _cleanUp(std::move(other._cleanUp)), _enabled(other._enabled) {
- other._enabled = false; // Don't trigger the lambda after ownership has moved.
- }
- ~FinalAction() { if (_enabled) _cleanUp(); }
-
- void disable() { _enabled = false; }
- private:
- OnEnd _cleanUp;
- bool _enabled {true};
- };
-
- template <typename OnEnd>
- FinalAction<OnEnd> finally(OnEnd f) {
- return FinalAction<OnEnd>(std::move(f));
- }
-
- // Convenience functions to avoid lengthy dynamic_cast() != nullptr checks in many places.
- template <typename T1, typename T2>
- inline bool is(T2 *obj) { // For pointer types.
- return dynamic_cast<typename std::add_const<T1>::type>(obj) != nullptr;
- }
-
- template <typename T1, typename T2>
- inline bool is(Ref<T2> const& obj) { // For shared pointers.
- return dynamic_cast<T1 *>(obj.get()) != nullptr;
- }
-
- template <typename T>
- std::string toString(const T &o) {
- std::stringstream ss;
- // typeid gives the mangled class name, but that's all what's possible
- // in a portable way.
- ss << typeid(o).name() << "@" << std::hex << reinterpret_cast<uintptr_t>(&o);
- return ss.str();
- }
-
- // Get the error text from an exception pointer or the current exception.
- ANTLR4CPP_PUBLIC std::string what(std::exception_ptr eptr = std::current_exception());
-
-} // namespace antlrcpp
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Casts.h b/contrib/libs/antlr4_cpp_runtime/src/support/Casts.h
deleted file mode 100644
index 2ded955dcd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Casts.h
+++ /dev/null
@@ -1,34 +0,0 @@
-/* Copyright (c) 2012-2021 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cassert>
-#include <memory>
-#include <type_traits>
-
-namespace antlrcpp {
-
- template <typename To, typename From>
- To downCast(From* from) {
- static_assert(std::is_pointer_v<To>, "Target type not a pointer.");
- static_assert(std::is_base_of_v<From, std::remove_pointer_t<To>>, "Target type not derived from source type.");
- #if !defined(__GNUC__) || defined(__GXX_RTTI)
- assert(from == nullptr || dynamic_cast<To>(from) != nullptr);
- #endif
- return static_cast<To>(from);
- }
-
- template <typename To, typename From>
- To downCast(From& from) {
- static_assert(std::is_lvalue_reference_v<To>, "Target type not a lvalue reference.");
- static_assert(std::is_base_of_v<From, std::remove_reference_t<To>>, "Target type not derived from source type.");
- #if !defined(__GNUC__) || defined(__GXX_RTTI)
- assert(dynamic_cast<std::add_pointer_t<std::remove_reference_t<To>>>(std::addressof(from)) != nullptr);
- #endif
- return static_cast<To>(from);
- }
-
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Declarations.h b/contrib/libs/antlr4_cpp_runtime/src/support/Declarations.h
deleted file mode 100644
index 8e960676cf..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Declarations.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-namespace antlr4 {
- class ANTLRErrorListener;
- class ANTLRErrorStrategy;
- class ANTLRFileStream;
- class ANTLRInputStream;
- class BailErrorStrategy;
- class BaseErrorListener;
- class BufferedTokenStream;
- class CharStream;
- class CommonToken;
- class CommonTokenFactory;
- class CommonTokenStream;
- class ConsoleErrorListener;
- class DefaultErrorStrategy;
- class DiagnosticErrorListener;
- class EmptyStackException;
- class FailedPredicateException;
- class IllegalArgumentException;
- class IllegalStateException;
- class InputMismatchException;
- class IntStream;
- class InterpreterRuleContext;
- class Lexer;
- class LexerInterpreter;
- class LexerNoViableAltException;
- class ListTokenSource;
- class NoSuchElementException;
- class NoViableAltException;
- class NullPointerException;
- class ParseCancellationException;
- class Parser;
- class ParserInterpreter;
- class ParserRuleContext;
- class ProxyErrorListener;
- class RecognitionException;
- class Recognizer;
- class RuleContext;
- class Token;
- template<typename Symbol> class TokenFactory;
- class TokenSource;
- class TokenStream;
- class TokenStreamRewriter;
- class UnbufferedCharStream;
- class UnbufferedTokenStream;
- class WritableToken;
-
- namespace misc {
- class InterpreterDataReader;
- class Interval;
- class IntervalSet;
- class MurmurHash;
- class Utils;
- class Predicate;
- }
- namespace atn {
- class ATN;
- class ATNConfig;
- class ATNConfigSet;
- class ATNDeserializationOptions;
- class ATNDeserializer;
- class ATNSerializer;
- class ATNSimulator;
- class ATNState;
- enum class ATNType;
- class ActionTransition;
- class ArrayPredictionContext;
- class AtomTransition;
- class BasicBlockStartState;
- class BasicState;
- class BlockEndState;
- class BlockStartState;
- class DecisionState;
- class EpsilonTransition;
- class LL1Analyzer;
- class LexerAction;
- class LexerActionExecutor;
- class LexerATNConfig;
- class LexerATNSimulator;
- class LexerMoreAction;
- class LexerPopModeAction;
- class LexerSkipAction;
- class LookaheadEventInfo;
- class LoopEndState;
- class NotSetTransition;
- class OrderedATNConfigSet;
- class ParseInfo;
- class ParserATNSimulator;
- class PlusBlockStartState;
- class PlusLoopbackState;
- class PrecedencePredicateTransition;
- class PredicateTransition;
- class PredictionContext;
- enum class PredictionMode;
- class PredictionModeClass;
- class RangeTransition;
- class RuleStartState;
- class RuleStopState;
- class RuleTransition;
- class SemanticContext;
- class SetTransition;
- class SingletonPredictionContext;
- class StarBlockStartState;
- class StarLoopEntryState;
- class StarLoopbackState;
- class TokensStartState;
- class Transition;
- class WildcardTransition;
- }
- namespace dfa {
- class DFA;
- class DFASerializer;
- class DFAState;
- class LexerDFASerializer;
- class Vocabulary;
- }
- namespace tree {
- class AbstractParseTreeVisitor;
- class ErrorNode;
- class ErrorNodeImpl;
- class ParseTree;
- class ParseTreeListener;
- template<typename T> class ParseTreeProperty;
- class ParseTreeVisitor;
- class ParseTreeWalker;
- class SyntaxTree;
- class TerminalNode;
- class TerminalNodeImpl;
- class Tree;
- class Trees;
-
- namespace pattern {
- class Chunk;
- class ParseTreeMatch;
- class ParseTreePattern;
- class ParseTreePatternMatcher;
- class RuleTagToken;
- class TagChunk;
- class TextChunk;
- class TokenTagToken;
- }
-
- namespace xpath {
- class XPath;
- class XPathElement;
- class XPathLexerErrorListener;
- class XPathRuleAnywhereElement;
- class XPathRuleElement;
- class XPathTokenAnywhereElement;
- class XPathTokenElement;
- class XPathWildcardAnywhereElement;
- class XPathWildcardElement;
- }
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.cpp b/contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.cpp
deleted file mode 100644
index 9ee274c8de..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/StringUtils.h"
-
-namespace antlrcpp {
-
- std::string escapeWhitespace(std::string_view in) {
- std::string out;
- escapeWhitespace(out, in);
- out.shrink_to_fit();
- return out;
- }
-
- std::string& escapeWhitespace(std::string& out, std::string_view in) {
- out.reserve(in.size()); // Best case, no escaping.
- for (const auto &c : in) {
- switch (c) {
- case '\t':
- out.append("\\t");
- break;
- case '\r':
- out.append("\\r");
- break;
- case '\n':
- out.append("\\n");
- break;
- default:
- out.push_back(c);
- break;
- }
- }
- return out;
- }
-
-} // namespace antrlcpp
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.h b/contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.h
deleted file mode 100644
index aee0d46d6e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/StringUtils.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- ANTLR4CPP_PUBLIC std::string escapeWhitespace(std::string_view in);
-
- ANTLR4CPP_PUBLIC std::string& escapeWhitespace(std::string& out, std::string_view in);
-
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Unicode.h b/contrib/libs/antlr4_cpp_runtime/src/support/Unicode.h
deleted file mode 100644
index f0f84375ad..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Unicode.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2021 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- class ANTLR4CPP_PUBLIC Unicode final {
- public:
- static constexpr char32_t REPLACEMENT_CHARACTER = 0xfffd;
-
- static constexpr bool isValid(char32_t codePoint) {
- return codePoint < 0xd800 || (codePoint > 0xdfff && codePoint <= 0x10ffff);
- }
-
- private:
- Unicode() = delete;
- Unicode(const Unicode&) = delete;
- Unicode(Unicode&&) = delete;
- Unicode& operator=(const Unicode&) = delete;
- Unicode& operator=(Unicode&&) = delete;
- };
-
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Utf8.cpp b/contrib/libs/antlr4_cpp_runtime/src/support/Utf8.cpp
deleted file mode 100644
index 294e9f1b21..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Utf8.cpp
+++ /dev/null
@@ -1,242 +0,0 @@
-/* Copyright (c) 2021 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include <cassert>
-#include <cstdint>
-
-#include "support/Utf8.h"
-#include "support/Unicode.h"
-
-// The below implementation is based off of https://github.com/google/cel-cpp/internal/utf8.cc,
-// which is itself based off of https://go.googlesource.com/go/+/refs/heads/master/src/unicode/utf8/utf8.go.
-// If for some reason you feel the need to copy this implementation, please retain a comment
-// referencing the two source files and giving credit, as well as maintaining any and all
-// obligations required by the BSD 3-clause license that governs this file.
-
-namespace antlrcpp {
-
-namespace {
-
-#undef SELF
- constexpr uint8_t SELF = 0x80;
-
-#undef LOW
- constexpr uint8_t LOW = 0x80;
-#undef HIGH
- constexpr uint8_t HIGH = 0xbf;
-
-#undef MASKX
- constexpr uint8_t MASKX = 0x3f;
-#undef MASK2
- constexpr uint8_t MASK2 = 0x1f;
-#undef MASK3
- constexpr uint8_t MASK3 = 0xf;
-#undef MASK4
- constexpr uint8_t MASK4 = 0x7;
-
-#undef TX
- constexpr uint8_t TX = 0x80;
-#undef T2
- constexpr uint8_t T2 = 0xc0;
-#undef T3
- constexpr uint8_t T3 = 0xe0;
-#undef T4
- constexpr uint8_t T4 = 0xf0;
-
-#undef XX
- constexpr uint8_t XX = 0xf1;
-#undef AS
- constexpr uint8_t AS = 0xf0;
-#undef S1
- constexpr uint8_t S1 = 0x02;
-#undef S2
- constexpr uint8_t S2 = 0x13;
-#undef S3
- constexpr uint8_t S3 = 0x03;
-#undef S4
- constexpr uint8_t S4 = 0x23;
-#undef S5
- constexpr uint8_t S5 = 0x34;
-#undef S6
- constexpr uint8_t S6 = 0x04;
-#undef S7
- constexpr uint8_t S7 = 0x44;
-
- // NOLINTBEGIN
- // clang-format off
-#undef LEADING
- constexpr uint8_t LEADING[256] = {
- // 1 2 3 4 5 6 7 8 9 A B C D E F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x00-0x0F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x10-0x1F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x20-0x2F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x30-0x3F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x40-0x4F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x50-0x5F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x60-0x6F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x70-0x7F
- // 1 2 3 4 5 6 7 8 9 A B C D E F
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0x80-0x8F
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0x90-0x9F
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xA0-0xAF
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xB0-0xBF
- XX, XX, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, // 0xC0-0xCF
- S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, // 0xD0-0xDF
- S2, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S4, S3, S3, // 0xE0-0xEF
- S5, S6, S6, S6, S7, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xF0-0xFF
- };
- // clang-format on
- // NOLINTEND
-
-#undef ACCEPT
- constexpr std::pair<uint8_t, uint8_t> ACCEPT[16] = {
- {LOW, HIGH}, {0xa0, HIGH}, {LOW, 0x9f}, {0x90, HIGH},
- {LOW, 0x8f}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0},
- {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0},
- {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0}, {0x0, 0x0},
- };
-
-} // namespace
-
- std::pair<char32_t, size_t> Utf8::decode(std::string_view input) {
- assert(!input.empty());
- const auto b = static_cast<uint8_t>(input.front());
- input.remove_prefix(1);
- if (b < SELF) {
- return {static_cast<char32_t>(b), 1};
- }
- const auto leading = LEADING[b];
- if (leading == XX) {
- return {Unicode::REPLACEMENT_CHARACTER, 1};
- }
- auto size = static_cast<size_t>(leading & 7) - 1;
- if (size > input.size()) {
- return {Unicode::REPLACEMENT_CHARACTER, 1};
- }
- const auto& accept = ACCEPT[leading >> 4];
- const auto b1 = static_cast<uint8_t>(input.front());
- input.remove_prefix(1);
- if (b1 < accept.first || b1 > accept.second) {
- return {Unicode::REPLACEMENT_CHARACTER, 1};
- }
- if (size <= 1) {
- return {(static_cast<char32_t>(b & MASK2) << 6) |
- static_cast<char32_t>(b1 & MASKX),
- 2};
- }
- const auto b2 = static_cast<uint8_t>(input.front());
- input.remove_prefix(1);
- if (b2 < LOW || b2 > HIGH) {
- return {Unicode::REPLACEMENT_CHARACTER, 1};
- }
- if (size <= 2) {
- return {(static_cast<char32_t>(b & MASK3) << 12) |
- (static_cast<char32_t>(b1 & MASKX) << 6) |
- static_cast<char32_t>(b2 & MASKX),
- 3};
- }
- const auto b3 = static_cast<uint8_t>(input.front());
- input.remove_prefix(1);
- if (b3 < LOW || b3 > HIGH) {
- return {Unicode::REPLACEMENT_CHARACTER, 1};
- }
- return {(static_cast<char32_t>(b & MASK4) << 18) |
- (static_cast<char32_t>(b1 & MASKX) << 12) |
- (static_cast<char32_t>(b2 & MASKX) << 6) |
- static_cast<char32_t>(b3 & MASKX),
- 4};
- }
-
- std::optional<std::u32string> Utf8::strictDecode(std::string_view input) {
- std::u32string output;
- char32_t codePoint;
- size_t codeUnits;
- output.reserve(input.size()); // Worst case is each byte is a single Unicode code point.
- for (size_t index = 0; index < input.size(); index += codeUnits) {
- std::tie(codePoint, codeUnits) = Utf8::decode(input.substr(index));
- if (codePoint == Unicode::REPLACEMENT_CHARACTER && codeUnits == 1) {
- // Condition is only met when an illegal byte sequence is encountered. See Utf8::decode.
- return std::nullopt;
- }
- output.push_back(codePoint);
- }
- output.shrink_to_fit();
- return output;
- }
-
- std::u32string Utf8::lenientDecode(std::string_view input) {
- std::u32string output;
- char32_t codePoint;
- size_t codeUnits;
- output.reserve(input.size()); // Worst case is each byte is a single Unicode code point.
- for (size_t index = 0; index < input.size(); index += codeUnits) {
- std::tie(codePoint, codeUnits) = Utf8::decode(input.substr(index));
- output.push_back(codePoint);
- }
- output.shrink_to_fit();
- return output;
- }
-
- std::string& Utf8::encode(std::string* buffer, char32_t codePoint) {
- assert(buffer != nullptr);
- if (!Unicode::isValid(codePoint)) {
- codePoint = Unicode::REPLACEMENT_CHARACTER;
- }
- if (codePoint <= 0x7f) {
- buffer->push_back(static_cast<char>(static_cast<uint8_t>(codePoint)));
- } else if (codePoint <= 0x7ff) {
- buffer->push_back(
- static_cast<char>(T2 | static_cast<uint8_t>(codePoint >> 6)));
- buffer->push_back(
- static_cast<char>(TX | (static_cast<uint8_t>(codePoint) & MASKX)));
- } else if (codePoint <= 0xffff) {
- buffer->push_back(
- static_cast<char>(T3 | static_cast<uint8_t>(codePoint >> 12)));
- buffer->push_back(static_cast<char>(
- TX | (static_cast<uint8_t>(codePoint >> 6) & MASKX)));
- buffer->push_back(
- static_cast<char>(TX | (static_cast<uint8_t>(codePoint) & MASKX)));
- } else {
- buffer->push_back(
- static_cast<char>(T4 | static_cast<uint8_t>(codePoint >> 18)));
- buffer->push_back(static_cast<char>(
- TX | (static_cast<uint8_t>(codePoint >> 12) & MASKX)));
- buffer->push_back(static_cast<char>(
- TX | (static_cast<uint8_t>(codePoint >> 6) & MASKX)));
- buffer->push_back(
- static_cast<char>(TX | (static_cast<uint8_t>(codePoint) & MASKX)));
- }
- return *buffer;
- }
-
- std::optional<std::string> Utf8::strictEncode(std::u32string_view input) {
- std::string output;
- output.reserve(input.size() * 4); // Worst case is each Unicode code point encodes to 4 bytes.
- for (size_t index = 0; index < input.size(); index++) {
- char32_t codePoint = input[index];
- if (!Unicode::isValid(codePoint)) {
- return std::nullopt;
- }
- Utf8::encode(&output, codePoint);
- }
- output.shrink_to_fit();
- return output;
- }
-
- std::string Utf8::lenientEncode(std::u32string_view input) {
- std::string output;
- output.reserve(input.size() * 4); // Worst case is each Unicode code point encodes to 4 bytes.
- for (size_t index = 0; index < input.size(); index++) {
- char32_t codePoint = input[index];
- if (!Unicode::isValid(codePoint)) {
- codePoint = Unicode::REPLACEMENT_CHARACTER;
- }
- Utf8::encode(&output, codePoint);
- }
- output.shrink_to_fit();
- return output;
- }
-
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/support/Utf8.h b/contrib/libs/antlr4_cpp_runtime/src/support/Utf8.h
deleted file mode 100644
index e4828441cd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/support/Utf8.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2021 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <optional>
-#include <string>
-#include <string_view>
-#include <tuple>
-
-#include "antlr4-common.h"
-
-namespace antlrcpp {
-
- class ANTLR4CPP_PUBLIC Utf8 final {
- public:
- // Decodes the next code point, returning the decoded code point and the number
- // of code units (a.k.a. bytes) consumed. In the event that an invalid code unit
- // sequence is returned the replacement character, U+FFFD, is returned with a
- // code unit count of 1. As U+FFFD requires 3 code units when encoded, this can
- // be used to differentiate valid input from malformed input.
- static std::pair<char32_t, size_t> decode(std::string_view input);
-
- // Decodes the given UTF-8 encoded input into a string of code points.
- static std::optional<std::u32string> strictDecode(std::string_view input);
-
- // Decodes the given UTF-8 encoded input into a string of code points. Unlike strictDecode(),
- // each byte in an illegal byte sequence is replaced with the Unicode replacement character,
- // U+FFFD.
- static std::u32string lenientDecode(std::string_view input);
-
- // Encodes the given code point and appends it to the buffer. If the code point
- // is an unpaired surrogate or outside of the valid Unicode range it is replaced
- // with the replacement character, U+FFFD.
- static std::string& encode(std::string *buffer, char32_t codePoint);
-
- // Encodes the given Unicode code point string as UTF-8.
- static std::optional<std::string> strictEncode(std::u32string_view input);
-
- // Encodes the given Unicode code point string as UTF-8. Unlike strictEncode(),
- // each invalid Unicode code point is replaced with the Unicode replacement character, U+FFFD.
- static std::string lenientEncode(std::u32string_view input);
-
- private:
- Utf8() = delete;
- Utf8(const Utf8&) = delete;
- Utf8(Utf8&&) = delete;
- Utf8& operator=(const Utf8&) = delete;
- Utf8& operator=(Utf8&&) = delete;
- };
-
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/AbstractParseTreeVisitor.h b/contrib/libs/antlr4_cpp_runtime/src/tree/AbstractParseTreeVisitor.h
deleted file mode 100644
index 25505278f2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/AbstractParseTreeVisitor.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ParseTree.h"
-#include "tree/ParseTreeVisitor.h"
-
-namespace antlr4 {
-namespace tree {
-
- class ANTLR4CPP_PUBLIC AbstractParseTreeVisitor : public ParseTreeVisitor {
- public:
- /// The default implementation calls <seealso cref="ParseTree#accept"/> on the
- /// specified tree.
- virtual std::any visit(ParseTree *tree) override {
- return tree->accept(this);
- }
-
- /**
- * <p>The default implementation initializes the aggregate result to
- * {@link #defaultResult defaultResult()}. Before visiting each child, it
- * calls {@link #shouldVisitNextChild shouldVisitNextChild}; if the result
- * is {@code false} no more children are visited and the current aggregate
- * result is returned. After visiting a child, the aggregate result is
- * updated by calling {@link #aggregateResult aggregateResult} with the
- * previous aggregate result and the result of visiting the child.</p>
- *
- * <p>The default implementation is not safe for use in visitors that modify
- * the tree structure. Visitors that modify the tree should override this
- * method to behave properly in respect to the specific algorithm in use.</p>
- */
- virtual std::any visitChildren(ParseTree *node) override {
- std::any result = defaultResult();
- size_t n = node->children.size();
- for (size_t i = 0; i < n; i++) {
- if (!shouldVisitNextChild(node, result)) {
- break;
- }
-
- std::any childResult = node->children[i]->accept(this);
- result = aggregateResult(std::move(result), std::move(childResult));
- }
-
- return result;
- }
-
- /// The default implementation returns the result of
- /// <seealso cref="#defaultResult defaultResult"/>.
- virtual std::any visitTerminal(TerminalNode * /*node*/) override {
- return defaultResult();
- }
-
- /// The default implementation returns the result of
- /// <seealso cref="#defaultResult defaultResult"/>.
- virtual std::any visitErrorNode(ErrorNode * /*node*/) override {
- return defaultResult();
- }
-
- protected:
- /// <summary>
- /// Gets the default value returned by visitor methods. This value is
- /// returned by the default implementations of
- /// <seealso cref="#visitTerminal visitTerminal"/>, <seealso cref="#visitErrorNode visitErrorNode"/>.
- /// The default implementation of <seealso cref="#visitChildren visitChildren"/>
- /// initializes its aggregate result to this value.
- /// <p/>
- /// The base implementation returns {@code std::any()}.
- /// </summary>
- /// <returns> The default value returned by visitor methods. </returns>
- virtual std::any defaultResult() {
- return std::any();
- }
-
- /// <summary>
- /// Aggregates the results of visiting multiple children of a node. After
- /// either all children are visited or <seealso cref="#shouldVisitNextChild"/> returns
- /// {@code false}, the aggregate value is returned as the result of
- /// <seealso cref="#visitChildren"/>.
- /// <p/>
- /// The default implementation returns {@code nextResult}, meaning
- /// <seealso cref="#visitChildren"/> will return the result of the last child visited
- /// (or return the initial value if the node has no children).
- /// </summary>
- /// <param name="aggregate"> The previous aggregate value. In the default
- /// implementation, the aggregate value is initialized to
- /// <seealso cref="#defaultResult"/>, which is passed as the {@code aggregate} argument
- /// to this method after the first child node is visited. </param>
- /// <param name="nextResult"> The result of the immediately preceeding call to visit
- /// a child node.
- /// </param>
- /// <returns> The updated aggregate result. </returns>
- virtual std::any aggregateResult(std::any /*aggregate*/, std::any nextResult) {
- return nextResult;
- }
-
- /// <summary>
- /// This method is called after visiting each child in
- /// <seealso cref="#visitChildren"/>. This method is first called before the first
- /// child is visited; at that point {@code currentResult} will be the initial
- /// value (in the default implementation, the initial value is returned by a
- /// call to <seealso cref="#defaultResult"/>. This method is not called after the last
- /// child is visited.
- /// <p/>
- /// The default implementation always returns {@code true}, indicating that
- /// {@code visitChildren} should only return after all children are visited.
- /// One reason to override this method is to provide a "short circuit"
- /// evaluation option for situations where the result of visiting a single
- /// child has the potential to determine the result of the visit operation as
- /// a whole.
- /// </summary>
- /// <param name="node"> The <seealso cref="ParseTree"/> whose children are currently being
- /// visited. </param>
- /// <param name="currentResult"> The current aggregate result of the children visited
- /// to the current point.
- /// </param>
- /// <returns> {@code true} to continue visiting children. Otherwise return
- /// {@code false} to stop visiting children and immediately return the
- /// current aggregate result from <seealso cref="#visitChildren"/>. </returns>
- virtual bool shouldVisitNextChild(ParseTree * /*node*/, const std::any &/*currentResult*/) {
- return true;
- }
-
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNode.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNode.h
deleted file mode 100644
index 319ce39e0d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNode.h
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/TerminalNode.h"
-
-namespace antlr4 {
-namespace tree {
-
- class ANTLR4CPP_PUBLIC ErrorNode : public TerminalNode {
- public:
- static bool is(const tree::ParseTree &parseTree) { return parseTree.getTreeType() == tree::ParseTreeType::ERROR; }
-
- static bool is(const tree::ParseTree *parseTree) { return parseTree != nullptr && is(*parseTree); }
-
- protected:
- using TerminalNode::TerminalNode;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.cpp
deleted file mode 100644
index 142791dd96..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-#include "Token.h"
-#include "RuleContext.h"
-#include "tree/ParseTreeVisitor.h"
-
-#include "tree/ErrorNodeImpl.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-
-Token* ErrorNodeImpl::getSymbol() const {
- return symbol;
-}
-
-void ErrorNodeImpl::setParent(RuleContext *parent_) {
- this->parent = parent_;
-}
-
-misc::Interval ErrorNodeImpl::getSourceInterval() {
- if (symbol == nullptr) {
- return misc::Interval::INVALID;
- }
-
- size_t tokenIndex = symbol->getTokenIndex();
- return misc::Interval(tokenIndex, tokenIndex);
-}
-
-std::any ErrorNodeImpl::accept(ParseTreeVisitor *visitor) {
- return visitor->visitErrorNode(this);
-}
-
-std::string ErrorNodeImpl::getText() {
- return symbol->getText();
-}
-
-std::string ErrorNodeImpl::toStringTree(Parser * /*parser*/, bool /*pretty*/) {
- return toString();
-}
-
-std::string ErrorNodeImpl::toString() {
- if (symbol->getType() == Token::EOF) {
- return "<EOF>";
- }
- return symbol->getText();
-}
-
-std::string ErrorNodeImpl::toStringTree(bool /*pretty*/) {
- return toString();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.h
deleted file mode 100644
index 8bafb62552..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ErrorNodeImpl.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ErrorNode.h"
-#include "tree/TerminalNodeImpl.h"
-#include "misc/Interval.h"
-
-#include "support/Any.h"
-
-namespace antlr4 {
-namespace tree {
-
- /// <summary>
- /// Represents a token that was consumed during resynchronization
- /// rather than during a valid match operation. For example,
- /// we will create this kind of a node during single token insertion
- /// and deletion as well as during "consume until error recovery set"
- /// upon no viable alternative exceptions.
- /// </summary>
- class ANTLR4CPP_PUBLIC ErrorNodeImpl : public ErrorNode {
- public:
- Token *symbol;
-
- explicit ErrorNodeImpl(Token *symbol) : ErrorNode(ParseTreeType::ERROR), symbol(symbol) {}
-
- virtual Token* getSymbol() const override;
- virtual void setParent(RuleContext *parent) override;
- virtual misc::Interval getSourceInterval() override;
-
- virtual std::any accept(ParseTreeVisitor *visitor) override;
-
- virtual std::string getText() override;
- virtual std::string toStringTree(Parser *parser, bool pretty = false) override;
- virtual std::string toString() override;
- virtual std::string toStringTree(bool pretty = false) override;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.cpp
deleted file mode 100644
index 83e6339518..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.cpp
+++ /dev/null
@@ -1,66 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "tree/ParseTreeListener.h"
-#include "tree/ParseTree.h"
-#include "tree/ErrorNode.h"
-
-#include "IterativeParseTreeWalker.h"
-
-using namespace antlr4::tree;
-using namespace antlrcpp;
-
-void IterativeParseTreeWalker::walk(ParseTreeListener *listener, ParseTree *t) const {
- std::vector<std::pair<ParseTree*, size_t>> stack;
- ParseTree *currentNode = t;
- size_t currentIndex = 0;
-
- while (currentNode != nullptr) {
- // pre-order visit
- if (ErrorNode::is(*currentNode)) {
- listener->visitErrorNode(downCast<ErrorNode*>(currentNode));
- } else if (TerminalNode::is(*currentNode)) {
- listener->visitTerminal(downCast<TerminalNode*>(currentNode));
- } else {
- enterRule(listener, currentNode);
- }
-
- // Move down to first child, if it exists.
- if (!currentNode->children.empty()) {
- stack.push_back(std::make_pair(currentNode, currentIndex));
- currentIndex = 0;
- currentNode = currentNode->children[0];
- continue;
- }
-
- // No child nodes, so walk tree.
- do {
- // post-order visit
- if (!TerminalNode::is(*currentNode)) {
- exitRule(listener, currentNode);
- }
-
- // No parent, so no siblings.
- if (stack.empty()) {
- currentNode = nullptr;
- currentIndex = 0;
- break;
- }
-
- // Move to next sibling if possible.
- if (stack.back().first->children.size() > ++currentIndex) {
- currentNode = stack.back().first->children[currentIndex];
- break;
- }
-
- // No next sibling, so move up.
- std::tie(currentNode, currentIndex) = stack.back();
- stack.pop_back();
- } while (currentNode != nullptr);
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.h b/contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.h
deleted file mode 100644
index 8957d87e44..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/IterativeParseTreeWalker.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * [The "BSD license"]
- * Copyright (c) 2012 Terence Parr
- * Copyright (c) 2012 Sam Harwell
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. The name of the author may not be used to endorse or promote products
- * derived from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
- * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
- * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
- * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
- * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
- * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-#include "tree/ParseTreeWalker.h"
-
-namespace antlr4 {
-namespace tree {
-
- class ParseTreeListener;
-
- /**
- * An iterative (read: non-recursive) pre-order and post-order tree walker that
- * doesn't use the thread stack but heap-based stacks. Makes it possible to
- * process deeply nested parse trees.
- */
- class ANTLR4CPP_PUBLIC IterativeParseTreeWalker : public ParseTreeWalker {
- public:
- virtual void walk(ParseTreeListener *listener, ParseTree *t) const override;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.cpp
deleted file mode 100644
index 8756398d88..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-
-using namespace antlr4::tree;
-
-bool ParseTree::operator == (const ParseTree &other) const {
- return &other == this;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.h
deleted file mode 100644
index cf8027b8fd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTree.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "support/Any.h"
-#include "tree/ParseTreeType.h"
-
-namespace antlr4 {
-namespace tree {
-
- /// An interface to access the tree of <seealso cref="RuleContext"/> objects created
- /// during a parse that makes the data structure look like a simple parse tree.
- /// This node represents both internal nodes, rule invocations,
- /// and leaf nodes, token matches.
- ///
- /// The payload is either a <seealso cref="Token"/> or a <seealso cref="RuleContext"/> object.
- // ml: This class unites 4 Java classes: RuleNode, ParseTree, SyntaxTree and Tree.
- class ANTLR4CPP_PUBLIC ParseTree {
- public:
- ParseTree(ParseTree const&) = delete;
-
- virtual ~ParseTree() = default;
-
- ParseTree& operator=(ParseTree const&) = delete;
-
- /// The parent of this node. If the return value is null, then this
- /// node is the root of the tree.
- ParseTree *parent = nullptr;
-
- /// If we are debugging or building a parse tree for a visitor,
- /// we need to track all of the tokens and rule invocations associated
- /// with this rule's context. This is empty for parsing w/o tree constr.
- /// operation because we don't the need to track the details about
- /// how we parse this rule.
- // ml: memory is not managed here, but by the owning class. This is just for the structure.
- std::vector<ParseTree *> children;
-
- /// Print out a whole tree, not just a node, in LISP format
- /// {@code (root child1 .. childN)}. Print just a node if this is a leaf.
- virtual std::string toStringTree(bool pretty = false) = 0;
- virtual std::string toString() = 0;
-
- /// Specialize toStringTree so that it can print out more information
- /// based upon the parser.
- virtual std::string toStringTree(Parser *parser, bool pretty = false) = 0;
-
- virtual bool operator == (const ParseTree &other) const;
-
- /// The <seealso cref="ParseTreeVisitor"/> needs a double dispatch method.
- // ml: This has been changed to use Any instead of a template parameter, to avoid the need of a virtual template function.
- virtual std::any accept(ParseTreeVisitor *visitor) = 0;
-
- /// Return the combined text of all leaf nodes. Does not get any
- /// off-channel tokens (if any) so won't return whitespace and
- /// comments if they are sent to parser on hidden channel.
- virtual std::string getText() = 0;
-
- /**
- * Return an {@link Interval} indicating the index in the
- * {@link TokenStream} of the first and last token associated with this
- * subtree. If this node is a leaf, then the interval represents a single
- * token and has interval i..i for token index i.
- *
- * <p>An interval of i..i-1 indicates an empty interval at position
- * i in the input stream, where 0 &lt;= i &lt;= the size of the input
- * token stream. Currently, the code base can only have i=0..n-1 but
- * in concept one could have an empty interval after EOF. </p>
- *
- * <p>If source interval is unknown, this returns {@link Interval#INVALID}.</p>
- *
- * <p>As a weird special case, the source interval for rules matched after
- * EOF is unspecified.</p>
- */
- virtual misc::Interval getSourceInterval() = 0;
-
- ParseTreeType getTreeType() const { return _treeType; }
-
- protected:
- explicit ParseTree(ParseTreeType treeType) : _treeType(treeType) {}
-
- private:
- const ParseTreeType _treeType;
- };
-
- // A class to help managing ParseTree instances without the need of a shared_ptr.
- class ANTLR4CPP_PUBLIC ParseTreeTracker {
- public:
- template<typename T, typename ... Args>
- T* createInstance(Args&& ... args) {
- static_assert(std::is_base_of<ParseTree, T>::value, "Argument must be a parse tree type");
- T* result = new T(args...);
- _allocated.push_back(result);
- return result;
- }
-
- void reset() {
- for (auto * entry : _allocated)
- delete entry;
- _allocated.clear();
- }
-
- private:
- std::vector<ParseTree *> _allocated;
- };
-
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.cpp
deleted file mode 100644
index ce12297586..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParseTreeListener.h"
-
-antlr4::tree::ParseTreeListener::~ParseTreeListener() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.h
deleted file mode 100644
index 60c7d8861a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeListener.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
- /** This interface describes the minimal core of methods triggered
- * by {@link ParseTreeWalker}. E.g.,
- *
- * ParseTreeWalker walker = new ParseTreeWalker();
- * walker.walk(myParseTreeListener, myParseTree); <-- triggers events in your listener
- *
- * If you want to trigger events in multiple listeners during a single
- * tree walk, you can use the ParseTreeDispatcher object available at
- *
- * https://github.com/antlr/antlr4/issues/841
- */
- class ANTLR4CPP_PUBLIC ParseTreeListener {
- public:
- virtual ~ParseTreeListener();
-
- virtual void visitTerminal(TerminalNode *node) = 0;
- virtual void visitErrorNode(ErrorNode *node) = 0;
- virtual void enterEveryRule(ParserRuleContext *ctx) = 0;
- virtual void exitEveryRule(ParserRuleContext *ctx) = 0;
-
- bool operator == (const ParseTreeListener &other) {
- return this == &other;
- }
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeProperty.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeProperty.h
deleted file mode 100644
index efd5e73bf8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeProperty.h
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
- /// <summary>
- /// Associate a property with a parse tree node. Useful with parse tree listeners
- /// that need to associate values with particular tree nodes, kind of like
- /// specifying a return value for the listener event method that visited a
- /// particular node. Example:
- ///
- /// <pre>
- /// ParseTreeProperty&lt;Integer&gt; values = new ParseTreeProperty&lt;Integer&gt;();
- /// values.put(tree, 36);
- /// int x = values.get(tree);
- /// values.removeFrom(tree);
- /// </pre>
- ///
- /// You would make one decl (values here) in the listener and use lots of times
- /// in your event methods.
- /// </summary>
- template<typename V>
- class ANTLR4CPP_PUBLIC ParseTreeProperty {
- public:
- virtual ~ParseTreeProperty() {}
- virtual V get(ParseTree *node) {
- return _annotations[node];
- }
- virtual void put(ParseTree *node, V value) {
- _annotations[node] = value;
- }
- virtual V removeFrom(ParseTree *node) {
- auto value = _annotations[node];
- _annotations.erase(node);
- return value;
- }
-
- protected:
- std::map<ParseTree*, V> _annotations;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeType.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeType.h
deleted file mode 100644
index 17e0512b00..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeType.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include <cstddef>
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
- enum class ParseTreeType : size_t {
- TERMINAL = 1,
- ERROR = 2,
- RULE = 3,
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.cpp
deleted file mode 100644
index a329919c13..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "ParseTreeVisitor.h"
-
-antlr4::tree::ParseTreeVisitor::~ParseTreeVisitor() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.h
deleted file mode 100644
index 02d9dc9b95..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeVisitor.h
+++ /dev/null
@@ -1,57 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "support/Any.h"
-
-namespace antlr4 {
-namespace tree {
-
- /// <summary>
- /// This interface defines the basic notion of a parse tree visitor. Generated
- /// visitors implement this interface and the {@code XVisitor} interface for
- /// grammar {@code X}.
- /// </summary>
- /// @param <T> The return type of the visit operation. Use <seealso cref="Void"/> for
- /// operations with no return type. </param>
- // ml: no template parameter here, to avoid the need for virtual template functions. Instead we have our Any class.
- class ANTLR4CPP_PUBLIC ParseTreeVisitor {
- public:
- virtual ~ParseTreeVisitor();
-
- /// <summary>
- /// Visit a parse tree, and return a user-defined result of the operation.
- /// </summary>
- /// <param name="tree"> The <seealso cref="ParseTree"/> to visit. </param>
- /// <returns> The result of visiting the parse tree. </returns>
- virtual std::any visit(ParseTree *tree) = 0;
-
- /// <summary>
- /// Visit the children of a node, and return a user-defined result of the
- /// operation.
- /// </summary>
- /// <param name="node"> The <seealso cref="ParseTree"/> whose children should be visited. </param>
- /// <returns> The result of visiting the children of the node. </returns>
- virtual std::any visitChildren(ParseTree *node) = 0;
-
- /// <summary>
- /// Visit a terminal node, and return a user-defined result of the operation.
- /// </summary>
- /// <param name="node"> The <seealso cref="TerminalNode"/> to visit. </param>
- /// <returns> The result of visiting the node. </returns>
- virtual std::any visitTerminal(TerminalNode *node) = 0;
-
- /// <summary>
- /// Visit an error node, and return a user-defined result of the operation.
- /// </summary>
- /// <param name="node"> The <seealso cref="ErrorNode"/> to visit. </param>
- /// <returns> The result of visiting the node. </returns>
- virtual std::any visitErrorNode(ErrorNode *node) = 0;
-
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.cpp
deleted file mode 100644
index 3da4bec5c5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.cpp
+++ /dev/null
@@ -1,48 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ErrorNode.h"
-#include "ParserRuleContext.h"
-#include "tree/ParseTreeListener.h"
-#include "support/CPPUtils.h"
-#include "support/Casts.h"
-
-#include "tree/IterativeParseTreeWalker.h"
-#include "tree/ParseTreeWalker.h"
-
-using namespace antlr4::tree;
-using namespace antlrcpp;
-
-static IterativeParseTreeWalker defaultWalker;
-ParseTreeWalker &ParseTreeWalker::DEFAULT = defaultWalker;
-
-void ParseTreeWalker::walk(ParseTreeListener *listener, ParseTree *t) const {
- if (ErrorNode::is(*t)) {
- listener->visitErrorNode(downCast<ErrorNode*>(t));
- return;
- }
- if (TerminalNode::is(*t)) {
- listener->visitTerminal(downCast<TerminalNode*>(t));
- return;
- }
-
- enterRule(listener, t);
- for (auto &child : t->children) {
- walk(listener, child);
- }
- exitRule(listener, t);
-}
-
-void ParseTreeWalker::enterRule(ParseTreeListener *listener, ParseTree *r) const {
- auto *ctx = downCast<ParserRuleContext*>(r);
- listener->enterEveryRule(ctx);
- ctx->enterRule(listener);
-}
-
-void ParseTreeWalker::exitRule(ParseTreeListener *listener, ParseTree *r) const {
- auto *ctx = downCast<ParserRuleContext*>(r);
- ctx->exitRule(listener);
- listener->exitEveryRule(ctx);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.h b/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.h
deleted file mode 100644
index 718cbbd1e4..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/ParseTreeWalker.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-
- class ANTLR4CPP_PUBLIC ParseTreeWalker {
- public:
- static ParseTreeWalker &DEFAULT;
-
- virtual ~ParseTreeWalker() = default;
-
- /**
- * <summary>
- * Performs a walk on the given parse tree starting at the root and going down recursively
- * with depth-first search. On each node, <seealso cref="ParseTreeWalker#enterRule"/> is called before
- * recursively walking down into child nodes, then
- * <seealso cref="ParseTreeWalker#exitRule"/> is called after the recursive call to wind up.
- * </summary>
- * <param name='listener'> The listener used by the walker to process grammar rules </param>
- * <param name='t'> The parse tree to be walked on </param>
- */
- virtual void walk(ParseTreeListener *listener, ParseTree *t) const;
-
- protected:
-
- /**
- * <summary>
- * Enters a grammar rule by first triggering the generic event <seealso cref="ParseTreeListener#enterEveryRule"/>
- * then by triggering the event specific to the given parse tree node
- * </summary>
- * <param name='listener'> The listener responding to the trigger events </param>
- * <param name='r'> The grammar rule containing the rule context </param>
- */
- virtual void enterRule(ParseTreeListener *listener, ParseTree *r) const;
-
- /**
- * <summary>
- * Exits a grammar rule by first triggering the event specific to the given parse tree node
- * then by triggering the generic event <seealso cref="ParseTreeListener#exitEveryRule"/>
- * </summary>
- * <param name='listener'> The listener responding to the trigger events </param>
- * <param name='r'> The grammar rule containing the rule context </param>
- */
- virtual void exitRule(ParseTreeListener *listener, ParseTree *r) const;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNode.h b/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNode.h
deleted file mode 100644
index 9f7466edc5..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNode.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/ParseTree.h"
-
-namespace antlr4 {
-namespace tree {
-
- class ANTLR4CPP_PUBLIC TerminalNode : public ParseTree {
- public:
- static bool is(const tree::ParseTree &parseTree) {
- const auto treeType = parseTree.getTreeType();
- return treeType == ParseTreeType::TERMINAL || treeType == ParseTreeType::ERROR;
- }
-
- static bool is(const tree::ParseTree *parseTree) { return parseTree != nullptr && is(*parseTree); }
-
- virtual Token* getSymbol() const = 0;
-
- /** Set the parent for this leaf node.
- *
- * Technically, this is not backward compatible as it changes
- * the interface but no one was able to create custom
- * TerminalNodes anyway so I'm adding as it improves internal
- * code quality.
- *
- * @since 4.7
- */
- virtual void setParent(RuleContext *parent) = 0;
-
- protected:
- using ParseTree::ParseTree;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.cpp
deleted file mode 100644
index 8eeb299fee..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "misc/Interval.h"
-#include "Token.h"
-#include "RuleContext.h"
-#include "tree/ParseTreeVisitor.h"
-
-#include "tree/TerminalNodeImpl.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-
-Token* TerminalNodeImpl::getSymbol() const {
- return symbol;
-}
-
-void TerminalNodeImpl::setParent(RuleContext *parent_) {
- this->parent = parent_;
-}
-
-misc::Interval TerminalNodeImpl::getSourceInterval() {
- if (symbol == nullptr) {
- return misc::Interval::INVALID;
- }
-
- size_t tokenIndex = symbol->getTokenIndex();
- return misc::Interval(tokenIndex, tokenIndex);
-}
-
-std::any TerminalNodeImpl::accept(ParseTreeVisitor *visitor) {
- return visitor->visitTerminal(this);
-}
-
-std::string TerminalNodeImpl::getText() {
- return symbol->getText();
-}
-
-std::string TerminalNodeImpl::toStringTree(Parser * /*parser*/, bool /*pretty*/) {
- return toString();
-}
-
-std::string TerminalNodeImpl::toString() {
- if (symbol->getType() == Token::EOF) {
- return "<EOF>";
- }
- return symbol->getText();
-}
-
-std::string TerminalNodeImpl::toStringTree(bool /*pretty*/) {
- return toString();
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.h b/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.h
deleted file mode 100644
index 1f8adacc6a..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/TerminalNodeImpl.h
+++ /dev/null
@@ -1,32 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/TerminalNode.h"
-
-namespace antlr4 {
-namespace tree {
-
- class ANTLR4CPP_PUBLIC TerminalNodeImpl : public TerminalNode {
- public:
- Token *symbol;
-
- explicit TerminalNodeImpl(Token *symbol) : TerminalNode(ParseTreeType::TERMINAL), symbol(symbol) {}
-
- virtual Token* getSymbol() const override;
- virtual void setParent(RuleContext *parent) override;
- virtual misc::Interval getSourceInterval() override;
-
- virtual std::any accept(ParseTreeVisitor *visitor) override;
-
- virtual std::string getText() override;
- virtual std::string toStringTree(Parser *parser, bool pretty = false) override;
- virtual std::string toString() override;
- virtual std::string toStringTree(bool pretty = false) override;
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/Trees.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/Trees.cpp
deleted file mode 100644
index f4065949b2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/Trees.cpp
+++ /dev/null
@@ -1,241 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ErrorNode.h"
-#include "Parser.h"
-#include "ParserRuleContext.h"
-#include "support/CPPUtils.h"
-#include "tree/TerminalNodeImpl.h"
-#include "atn/ATN.h"
-#include "misc/Interval.h"
-#include "Token.h"
-#include "CommonToken.h"
-#include "misc/Predicate.h"
-
-#include "tree/Trees.h"
-
-using namespace antlr4;
-using namespace antlr4::misc;
-using namespace antlr4::tree;
-
-using namespace antlrcpp;
-
-Trees::Trees() {
-}
-
-std::string Trees::toStringTree(ParseTree *t, bool pretty) {
- return toStringTree(t, nullptr, pretty);
-}
-
-std::string Trees::toStringTree(ParseTree *t, Parser *recog, bool pretty) {
- if (recog == nullptr)
- return toStringTree(t, std::vector<std::string>(), pretty);
- return toStringTree(t, recog->getRuleNames(), pretty);
-}
-
-std::string Trees::toStringTree(ParseTree *t, const std::vector<std::string> &ruleNames, bool pretty) {
- std::string temp = antlrcpp::escapeWhitespace(Trees::getNodeText(t, ruleNames), false);
- if (t->children.empty()) {
- return temp;
- }
-
- std::stringstream ss;
- ss << "(" << temp << ' ';
-
- // Implement the recursive walk as iteration to avoid trouble with deep nesting.
- std::stack<size_t> stack;
- size_t childIndex = 0;
- ParseTree *run = t;
- size_t indentationLevel = 1;
- while (childIndex < run->children.size()) {
- if (childIndex > 0) {
- ss << ' ';
- }
- ParseTree *child = run->children[childIndex];
- temp = antlrcpp::escapeWhitespace(Trees::getNodeText(child, ruleNames), false);
- if (!child->children.empty()) {
- // Go deeper one level.
- stack.push(childIndex);
- run = child;
- childIndex = 0;
- if (pretty) {
- ++indentationLevel;
- ss << std::endl;
- for (size_t i = 0; i < indentationLevel; ++i) {
- ss << " ";
- }
- }
- ss << "(" << temp << " ";
- } else {
- ss << temp;
- while (++childIndex == run->children.size()) {
- if (stack.size() > 0) {
- // Reached the end of the current level. See if we can step up from here.
- childIndex = stack.top();
- stack.pop();
- run = run->parent;
- if (pretty) {
- --indentationLevel;
- }
- ss << ")";
- } else {
- break;
- }
- }
- }
- }
-
- ss << ")";
- return ss.str();
-}
-
-std::string Trees::getNodeText(ParseTree *t, Parser *recog) {
- return getNodeText(t, recog->getRuleNames());
-}
-
-std::string Trees::getNodeText(ParseTree *t, const std::vector<std::string> &ruleNames) {
- if (ruleNames.size() > 0) {
- if (is<RuleContext *>(t)) {
- size_t ruleIndex = dynamic_cast<RuleContext *>(t)->getRuleIndex();
- std::string ruleName = ruleNames[ruleIndex];
- size_t altNumber = dynamic_cast<RuleContext *>(t)->getAltNumber();
- if (altNumber != atn::ATN::INVALID_ALT_NUMBER) {
- return ruleName + ":" + std::to_string(altNumber);
- }
- return ruleName;
- } else if (is<ErrorNode *>(t)) {
- return t->toString();
- } else if (is<TerminalNode *>(t)) {
- Token *symbol = dynamic_cast<TerminalNode *>(t)->getSymbol();
- if (symbol != nullptr) {
- std::string s = symbol->getText();
- return s;
- }
- }
- }
- // no recog for rule names
- if (is<RuleContext *>(t)) {
- return dynamic_cast<RuleContext *>(t)->getText();
- }
-
- if (is<TerminalNodeImpl *>(t)) {
- return dynamic_cast<TerminalNodeImpl *>(t)->getSymbol()->getText();
- }
-
- return "";
-}
-
-std::vector<ParseTree *> Trees::getAncestors(ParseTree *t) {
- std::vector<ParseTree *> ancestors;
- ParseTree *parent = t->parent;
- while (parent != nullptr) {
- ancestors.insert(ancestors.begin(), parent); // insert at start
- parent = parent->parent;
- }
- return ancestors;
-}
-
-template<typename T>
-static void _findAllNodes(ParseTree *t, size_t index, bool findTokens, std::vector<T> &nodes) {
- // check this node (the root) first
- if (findTokens && is<TerminalNode *>(t)) {
- TerminalNode *tnode = dynamic_cast<TerminalNode *>(t);
- if (tnode->getSymbol()->getType() == index) {
- nodes.push_back(t);
- }
- } else if (!findTokens && is<ParserRuleContext *>(t)) {
- ParserRuleContext *ctx = dynamic_cast<ParserRuleContext *>(t);
- if (ctx->getRuleIndex() == index) {
- nodes.push_back(t);
- }
- }
- // check children
- for (size_t i = 0; i < t->children.size(); i++) {
- _findAllNodes(t->children[i], index, findTokens, nodes);
- }
-}
-
-bool Trees::isAncestorOf(ParseTree *t, ParseTree *u) {
- if (t == nullptr || u == nullptr || t->parent == nullptr) {
- return false;
- }
-
- ParseTree *p = u->parent;
- while (p != nullptr) {
- if (t == p) {
- return true;
- }
- p = p->parent;
- }
- return false;
-}
-
-std::vector<ParseTree *> Trees::findAllTokenNodes(ParseTree *t, size_t ttype) {
- return findAllNodes(t, ttype, true);
-}
-
-std::vector<ParseTree *> Trees::findAllRuleNodes(ParseTree *t, size_t ruleIndex) {
- return findAllNodes(t, ruleIndex, false);
-}
-
-std::vector<ParseTree *> Trees::findAllNodes(ParseTree *t, size_t index, bool findTokens) {
- std::vector<ParseTree *> nodes;
- _findAllNodes<ParseTree *>(t, index, findTokens, nodes);
- return nodes;
-}
-
-std::vector<ParseTree *> Trees::getDescendants(ParseTree *t) {
- std::vector<ParseTree *> nodes;
- nodes.push_back(t);
- std::size_t n = t->children.size();
- for (size_t i = 0 ; i < n ; i++) {
- auto descentants = getDescendants(t->children[i]);
- for (auto *entry: descentants) {
- nodes.push_back(entry);
- }
- }
- return nodes;
-}
-
-std::vector<ParseTree *> Trees::descendants(ParseTree *t) {
- return getDescendants(t);
-}
-
-ParserRuleContext* Trees::getRootOfSubtreeEnclosingRegion(ParseTree *t, size_t startTokenIndex, size_t stopTokenIndex) {
- size_t n = t->children.size();
- for (size_t i = 0; i < n; i++) {
- ParserRuleContext *r = getRootOfSubtreeEnclosingRegion(t->children[i], startTokenIndex, stopTokenIndex);
- if (r != nullptr) {
- return r;
- }
- }
-
- if (is<ParserRuleContext *>(t)) {
- ParserRuleContext *r = dynamic_cast<ParserRuleContext *>(t);
- if (startTokenIndex >= r->getStart()->getTokenIndex() && // is range fully contained in t?
- (r->getStop() == nullptr || stopTokenIndex <= r->getStop()->getTokenIndex())) {
- // note: r.getStop()==null likely implies that we bailed out of parser and there's nothing to the right
- return r;
- }
- }
- return nullptr;
-}
-
-ParseTree * Trees::findNodeSuchThat(ParseTree *t, Ref<Predicate> const& pred) {
- if (pred->test(t)) {
- return t;
- }
-
- size_t n = t->children.size();
- for (size_t i = 0 ; i < n ; ++i) {
- ParseTree *u = findNodeSuchThat(t->children[i], pred);
- if (u != nullptr) {
- return u;
- }
- }
-
- return nullptr;
-}
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/Trees.h b/contrib/libs/antlr4_cpp_runtime/src/tree/Trees.h
deleted file mode 100644
index f779158d01..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/Trees.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "tree/TerminalNode.h"
-#include "ParserRuleContext.h"
-#include "Recognizer.h"
-
-namespace antlr4 {
-namespace tree {
-
- /// A set of utility routines useful for all kinds of ANTLR trees.
- class ANTLR4CPP_PUBLIC Trees {
- public:
- /// Print out a whole tree in LISP form. getNodeText is used on the
- /// node payloads to get the text for the nodes. Detect
- /// parse trees and extract data appropriately.
- static std::string toStringTree(ParseTree *t, bool pretty = false);
-
- /// Print out a whole tree in LISP form. getNodeText is used on the
- /// node payloads to get the text for the nodes. Detect
- /// parse trees and extract data appropriately.
- static std::string toStringTree(ParseTree *t, Parser *recog, bool pretty = false);
-
- /// Print out a whole tree in LISP form. getNodeText is used on the
- /// node payloads to get the text for the nodes. Detect
- /// parse trees and extract data appropriately.
- static std::string toStringTree(ParseTree *t, const std::vector<std::string> &ruleNames, bool pretty = false);
- static std::string getNodeText(ParseTree *t, Parser *recog);
- static std::string getNodeText(ParseTree *t, const std::vector<std::string> &ruleNames);
-
- /// Return a list of all ancestors of this node. The first node of
- /// list is the root and the last is the parent of this node.
- static std::vector<ParseTree *> getAncestors(ParseTree *t);
-
- /** Return true if t is u's parent or a node on path to root from u.
- * Use == not equals().
- *
- * @since 4.5.1
- */
- static bool isAncestorOf(ParseTree *t, ParseTree *u);
- static std::vector<ParseTree *> findAllTokenNodes(ParseTree *t, size_t ttype);
- static std::vector<ParseTree *> findAllRuleNodes(ParseTree *t, size_t ruleIndex);
- static std::vector<ParseTree *> findAllNodes(ParseTree *t, size_t index, bool findTokens);
-
- /** Get all descendents; includes t itself.
- *
- * @since 4.5.1
- */
- static std::vector<ParseTree *> getDescendants(ParseTree *t);
-
- /** @deprecated */
- static std::vector<ParseTree *> descendants(ParseTree *t);
-
- /** Find smallest subtree of t enclosing range startTokenIndex..stopTokenIndex
- * inclusively using postorder traversal. Recursive depth-first-search.
- *
- * @since 4.5.1
- */
- static ParserRuleContext* getRootOfSubtreeEnclosingRegion(ParseTree *t,
- size_t startTokenIndex, // inclusive
- size_t stopTokenIndex); // inclusive
-
- /** Return first node satisfying the pred
- *
- * @since 4.5.1
- */
- static ParseTree* findNodeSuchThat(ParseTree *t, Ref<misc::Predicate> const& pred);
-
- private:
- Trees();
- };
-
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.cpp
deleted file mode 100644
index 5320f910b9..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/pattern/Chunk.h"
-
-antlr4::tree::pattern::Chunk::~Chunk() {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.h
deleted file mode 100644
index 61079a8ca8..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/Chunk.h
+++ /dev/null
@@ -1,44 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// A chunk is either a token tag, a rule tag, or a span of literal text within a
- /// tree pattern.
- /// <p/>
- /// The method <seealso cref="ParseTreePatternMatcher#split(String)"/> returns a list of
- /// chunks in preparation for creating a token stream by
- /// <seealso cref="ParseTreePatternMatcher#tokenize(String)"/>. From there, we get a parse
- /// tree from with <seealso cref="ParseTreePatternMatcher#compile(String, int)"/>. These
- /// chunks are converted to <seealso cref="RuleTagToken"/>, <seealso cref="TokenTagToken"/>, or the
- /// regular tokens of the text surrounding the tags.
- /// </summary>
- class ANTLR4CPP_PUBLIC Chunk {
- public:
- Chunk() = default;
- Chunk(Chunk const&) = default;
- virtual ~Chunk();
-
- Chunk& operator=(Chunk const&) = default;
-
- /// This method returns a text representation of the tag chunk. Labeled tags
- /// are returned in the form {@code label:tag}, and unlabeled tags are
- /// returned as just the tag name.
- virtual std::string toString() {
- std::string str;
- return str;
- }
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.cpp
deleted file mode 100644
index 41896d6df7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.cpp
+++ /dev/null
@@ -1,69 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/ParseTreeMatch.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::pattern;
-
-ParseTreeMatch::ParseTreeMatch(ParseTree *tree, const ParseTreePattern &pattern,
- const std::map<std::string, std::vector<ParseTree *>> &labels,
- ParseTree *mismatchedNode)
- : _tree(tree), _pattern(pattern), _labels(labels), _mismatchedNode(mismatchedNode) {
- if (tree == nullptr) {
- throw IllegalArgumentException("tree cannot be nul");
- }
-}
-
-ParseTreeMatch::~ParseTreeMatch() {
-}
-
-ParseTree* ParseTreeMatch::get(const std::string &label) {
- auto iterator = _labels.find(label);
- if (iterator == _labels.end() || iterator->second.empty()) {
- return nullptr;
- }
-
- return iterator->second.back(); // return last if multiple
-}
-
-std::vector<ParseTree *> ParseTreeMatch::getAll(const std::string &label) {
- auto iterator = _labels.find(label);
- if (iterator == _labels.end()) {
- return {};
- }
-
- return iterator->second;
-}
-
-std::map<std::string, std::vector<ParseTree *>>& ParseTreeMatch::getLabels() {
- return _labels;
-}
-
-ParseTree *ParseTreeMatch::getMismatchedNode() {
- return _mismatchedNode;
-}
-
-bool ParseTreeMatch::succeeded() {
- return _mismatchedNode == nullptr;
-}
-
-const ParseTreePattern& ParseTreeMatch::getPattern() {
- return _pattern;
-}
-
-ParseTree * ParseTreeMatch::getTree() {
- return _tree;
-}
-
-std::string ParseTreeMatch::toString() {
- if (succeeded()) {
- return "Match succeeded; found " + std::to_string(_labels.size()) + " labels";
- } else {
- return "Match failed; found " + std::to_string(_labels.size()) + " labels";
- }
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.h
deleted file mode 100644
index eefde46c83..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreeMatch.h
+++ /dev/null
@@ -1,132 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// Represents the result of matching a ParseTree against a tree pattern.
- class ANTLR4CPP_PUBLIC ParseTreeMatch {
- private:
- /// This is the backing field for getTree().
- ParseTree *_tree;
-
- /// This is the backing field for getPattern().
- const ParseTreePattern &_pattern;
-
- /// This is the backing field for getLabels().
- std::map<std::string, std::vector<ParseTree *>> _labels;
-
- /// This is the backing field for getMismatchedNode().
- ParseTree *_mismatchedNode;
-
- public:
- /// <summary>
- /// Constructs a new instance of <seealso cref="ParseTreeMatch"/> from the specified
- /// parse tree and pattern.
- /// </summary>
- /// <param name="tree"> The parse tree to match against the pattern. </param>
- /// <param name="pattern"> The parse tree pattern. </param>
- /// <param name="labels"> A mapping from label names to collections of
- /// <seealso cref="ParseTree"/> objects located by the tree pattern matching process. </param>
- /// <param name="mismatchedNode"> The first node which failed to match the tree
- /// pattern during the matching process.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code tree} is {@code null} </exception>
- /// <exception cref="IllegalArgumentException"> if {@code pattern} is {@code null} </exception>
- /// <exception cref="IllegalArgumentException"> if {@code labels} is {@code null} </exception>
- ParseTreeMatch(ParseTree *tree, ParseTreePattern const& pattern,
- const std::map<std::string, std::vector<ParseTree *>> &labels, ParseTree *mismatchedNode);
- ParseTreeMatch(ParseTreeMatch const&) = default;
- virtual ~ParseTreeMatch();
-
- /// <summary>
- /// Get the last node associated with a specific {@code label}.
- /// <p/>
- /// For example, for pattern {@code <id:ID>}, {@code get("id")} returns the
- /// node matched for that {@code ID}. If more than one node
- /// matched the specified label, only the last is returned. If there is
- /// no node associated with the label, this returns {@code null}.
- /// <p/>
- /// Pattern tags like {@code <ID>} and {@code <expr>} without labels are
- /// considered to be labeled with {@code ID} and {@code expr}, respectively.
- /// </summary>
- /// <param name="labe"> The label to check.
- /// </param>
- /// <returns> The last <seealso cref="ParseTree"/> to match a tag with the specified
- /// label, or {@code null} if no parse tree matched a tag with the label. </returns>
- virtual ParseTree* get(const std::string &label);
-
- /// <summary>
- /// Return all nodes matching a rule or token tag with the specified label.
- /// <p/>
- /// If the {@code label} is the name of a parser rule or token in the
- /// grammar, the resulting list will contain both the parse trees matching
- /// rule or tags explicitly labeled with the label and the complete set of
- /// parse trees matching the labeled and unlabeled tags in the pattern for
- /// the parser rule or token. For example, if {@code label} is {@code "foo"},
- /// the result will contain <em>all</em> of the following.
- ///
- /// <ul>
- /// <li>Parse tree nodes matching tags of the form {@code <foo:anyRuleName>} and
- /// {@code <foo:AnyTokenName>}.</li>
- /// <li>Parse tree nodes matching tags of the form {@code <anyLabel:foo>}.</li>
- /// <li>Parse tree nodes matching tags of the form {@code <foo>}.</li>
- /// </ul>
- /// </summary>
- /// <param name="labe"> The label.
- /// </param>
- /// <returns> A collection of all <seealso cref="ParseTree"/> nodes matching tags with
- /// the specified {@code label}. If no nodes matched the label, an empty list
- /// is returned. </returns>
- virtual std::vector<ParseTree *> getAll(const std::string &label);
-
- /// <summary>
- /// Return a mapping from label &rarr; [list of nodes].
- /// <p/>
- /// The map includes special entries corresponding to the names of rules and
- /// tokens referenced in tags in the original pattern. For additional
- /// information, see the description of <seealso cref="#getAll(String)"/>.
- /// </summary>
- /// <returns> A mapping from labels to parse tree nodes. If the parse tree
- /// pattern did not contain any rule or token tags, this map will be empty. </returns>
- virtual std::map<std::string, std::vector<ParseTree *>>& getLabels();
-
- /// <summary>
- /// Get the node at which we first detected a mismatch.
- /// </summary>
- /// <returns> the node at which we first detected a mismatch, or {@code null}
- /// if the match was successful. </returns>
- virtual ParseTree* getMismatchedNode();
-
- /// <summary>
- /// Gets a value indicating whether the match operation succeeded.
- /// </summary>
- /// <returns> {@code true} if the match operation succeeded; otherwise,
- /// {@code false}. </returns>
- virtual bool succeeded();
-
- /// <summary>
- /// Get the tree pattern we are matching against.
- /// </summary>
- /// <returns> The tree pattern we are matching against. </returns>
- virtual const ParseTreePattern& getPattern();
-
- /// <summary>
- /// Get the parse tree we are trying to match to a pattern.
- /// </summary>
- /// <returns> The <seealso cref="ParseTree"/> we are trying to match to a pattern. </returns>
- virtual ParseTree* getTree();
-
- virtual std::string toString();
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.cpp
deleted file mode 100644
index ca7f8f20d6..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/pattern/ParseTreePatternMatcher.h"
-#include "tree/pattern/ParseTreeMatch.h"
-
-#include "tree/xpath/XPath.h"
-#include "tree/xpath/XPathElement.h"
-
-#include "tree/pattern/ParseTreePattern.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::pattern;
-
-using namespace antlrcpp;
-
-ParseTreePattern::ParseTreePattern(ParseTreePatternMatcher *matcher, const std::string &pattern, int patternRuleIndex_,
- ParseTree *patternTree)
- : patternRuleIndex(patternRuleIndex_), _pattern(pattern), _patternTree(patternTree), _matcher(matcher) {
-}
-
-ParseTreePattern::~ParseTreePattern() {
-}
-
-ParseTreeMatch ParseTreePattern::match(ParseTree *tree) {
- return _matcher->match(tree, *this);
-}
-
-bool ParseTreePattern::matches(ParseTree *tree) {
- return _matcher->match(tree, *this).succeeded();
-}
-
-std::vector<ParseTreeMatch> ParseTreePattern::findAll(ParseTree *tree, const std::string &xpath) {
- xpath::XPath finder(_matcher->getParser(), xpath);
- std::vector<ParseTree *> subtrees = finder.evaluate(tree);
- std::vector<ParseTreeMatch> matches;
- for (auto *t : subtrees) {
- ParseTreeMatch aMatch = match(t);
- if (aMatch.succeeded()) {
- matches.push_back(aMatch);
- }
- }
- return matches;
-}
-
-
-ParseTreePatternMatcher *ParseTreePattern::getMatcher() const {
- return _matcher;
-}
-
-std::string ParseTreePattern::getPattern() const {
- return _pattern;
-}
-
-int ParseTreePattern::getPatternRuleIndex() const {
- return patternRuleIndex;
-}
-
-ParseTree* ParseTreePattern::getPatternTree() const {
- return _patternTree;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.h
deleted file mode 100644
index d5b86ff473..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePattern.h
+++ /dev/null
@@ -1,105 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// A pattern like {@code <ID> = <expr>;} converted to a <seealso cref="ParseTree"/> by
- /// <seealso cref="ParseTreePatternMatcher#compile(String, int)"/>.
- /// </summary>
- class ANTLR4CPP_PUBLIC ParseTreePattern {
- public:
- /// <summary>
- /// Construct a new instance of the <seealso cref="ParseTreePattern"/> class.
- /// </summary>
- /// <param name="matcher"> The <seealso cref="ParseTreePatternMatcher"/> which created this
- /// tree pattern. </param>
- /// <param name="pattern"> The tree pattern in concrete syntax form. </param>
- /// <param name="patternRuleIndex"> The parser rule which serves as the root of the
- /// tree pattern. </param>
- /// <param name="patternTree"> The tree pattern in <seealso cref="ParseTree"/> form. </param>
- ParseTreePattern(ParseTreePatternMatcher *matcher, const std::string &pattern, int patternRuleIndex,
- ParseTree *patternTree);
- ParseTreePattern(ParseTreePattern const&) = default;
- virtual ~ParseTreePattern();
-
- /// <summary>
- /// Match a specific parse tree against this tree pattern.
- /// </summary>
- /// <param name="tree"> The parse tree to match against this tree pattern. </param>
- /// <returns> A <seealso cref="ParseTreeMatch"/> object describing the result of the
- /// match operation. The <seealso cref="ParseTreeMatch#succeeded()"/> method can be
- /// used to determine whether or not the match was successful. </returns>
- virtual ParseTreeMatch match(ParseTree *tree);
-
- /// <summary>
- /// Determine whether or not a parse tree matches this tree pattern.
- /// </summary>
- /// <param name="tree"> The parse tree to match against this tree pattern. </param>
- /// <returns> {@code true} if {@code tree} is a match for the current tree
- /// pattern; otherwise, {@code false}. </returns>
- virtual bool matches(ParseTree *tree);
-
- /// Find all nodes using XPath and then try to match those subtrees against
- /// this tree pattern.
- /// @param tree The ParseTree to match against this pattern.
- /// @param xpath An expression matching the nodes
- ///
- /// @returns A collection of ParseTreeMatch objects describing the
- /// successful matches. Unsuccessful matches are omitted from the result,
- /// regardless of the reason for the failure.
- virtual std::vector<ParseTreeMatch> findAll(ParseTree *tree, const std::string &xpath);
-
- /// <summary>
- /// Get the <seealso cref="ParseTreePatternMatcher"/> which created this tree pattern.
- /// </summary>
- /// <returns> The <seealso cref="ParseTreePatternMatcher"/> which created this tree
- /// pattern. </returns>
- virtual ParseTreePatternMatcher *getMatcher() const;
-
- /// <summary>
- /// Get the tree pattern in concrete syntax form.
- /// </summary>
- /// <returns> The tree pattern in concrete syntax form. </returns>
- virtual std::string getPattern() const;
-
- /// <summary>
- /// Get the parser rule which serves as the outermost rule for the tree
- /// pattern.
- /// </summary>
- /// <returns> The parser rule which serves as the outermost rule for the tree
- /// pattern. </returns>
- virtual int getPatternRuleIndex() const;
-
- /// <summary>
- /// Get the tree pattern as a <seealso cref="ParseTree"/>. The rule and token tags from
- /// the pattern are present in the parse tree as terminal nodes with a symbol
- /// of type <seealso cref="RuleTagToken"/> or <seealso cref="TokenTagToken"/>.
- /// </summary>
- /// <returns> The tree pattern as a <seealso cref="ParseTree"/>. </returns>
- virtual ParseTree* getPatternTree() const;
-
- private:
- const int patternRuleIndex;
-
- /// This is the backing field for <seealso cref="#getPattern()"/>.
- const std::string _pattern;
-
- /// This is the backing field for <seealso cref="#getPatternTree()"/>.
- ParseTree *_patternTree;
-
- /// This is the backing field for <seealso cref="#getMatcher()"/>.
- ParseTreePatternMatcher *const _matcher;
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.cpp
deleted file mode 100644
index 4c28658954..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.cpp
+++ /dev/null
@@ -1,370 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/pattern/ParseTreePattern.h"
-#include "tree/pattern/ParseTreeMatch.h"
-#include "tree/TerminalNode.h"
-#include "CommonTokenStream.h"
-#include "ParserInterpreter.h"
-#include "tree/pattern/TokenTagToken.h"
-#include "ParserRuleContext.h"
-#include "tree/pattern/RuleTagToken.h"
-#include "tree/pattern/TagChunk.h"
-#include "atn/ATN.h"
-#include "Lexer.h"
-#include "BailErrorStrategy.h"
-
-#include "ListTokenSource.h"
-#include "tree/pattern/TextChunk.h"
-#include "ANTLRInputStream.h"
-#include "support/Arrays.h"
-#include "Exceptions.h"
-#include "support/CPPUtils.h"
-
-#include "tree/pattern/ParseTreePatternMatcher.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-using namespace antlr4::tree::pattern;
-using namespace antlrcpp;
-
-ParseTreePatternMatcher::CannotInvokeStartRule::CannotInvokeStartRule(const RuntimeException &e) : RuntimeException(e.what()) {
-}
-
-ParseTreePatternMatcher::CannotInvokeStartRule::~CannotInvokeStartRule() {
-}
-
-ParseTreePatternMatcher::StartRuleDoesNotConsumeFullPattern::~StartRuleDoesNotConsumeFullPattern() {
-}
-
-ParseTreePatternMatcher::ParseTreePatternMatcher(Lexer *lexer, Parser *parser) : _lexer(lexer), _parser(parser) {
- InitializeInstanceFields();
-}
-
-ParseTreePatternMatcher::~ParseTreePatternMatcher() {
-}
-
-void ParseTreePatternMatcher::setDelimiters(const std::string &start, const std::string &stop, const std::string &escapeLeft) {
- if (start.empty()) {
- throw IllegalArgumentException("start cannot be null or empty");
- }
-
- if (stop.empty()) {
- throw IllegalArgumentException("stop cannot be null or empty");
- }
-
- _start = start;
- _stop = stop;
- _escape = escapeLeft;
-}
-
-bool ParseTreePatternMatcher::matches(ParseTree *tree, const std::string &pattern, int patternRuleIndex) {
- ParseTreePattern p = compile(pattern, patternRuleIndex);
- return matches(tree, p);
-}
-
-bool ParseTreePatternMatcher::matches(ParseTree *tree, const ParseTreePattern &pattern) {
- std::map<std::string, std::vector<ParseTree *>> labels;
- ParseTree *mismatchedNode = matchImpl(tree, pattern.getPatternTree(), labels);
- return mismatchedNode == nullptr;
-}
-
-ParseTreeMatch ParseTreePatternMatcher::match(ParseTree *tree, const std::string &pattern, int patternRuleIndex) {
- ParseTreePattern p = compile(pattern, patternRuleIndex);
- return match(tree, p);
-}
-
-ParseTreeMatch ParseTreePatternMatcher::match(ParseTree *tree, const ParseTreePattern &pattern) {
- std::map<std::string, std::vector<ParseTree *>> labels;
- tree::ParseTree *mismatchedNode = matchImpl(tree, pattern.getPatternTree(), labels);
- return ParseTreeMatch(tree, pattern, labels, mismatchedNode);
-}
-
-ParseTreePattern ParseTreePatternMatcher::compile(const std::string &pattern, int patternRuleIndex) {
- ListTokenSource tokenSrc(tokenize(pattern));
- CommonTokenStream tokens(&tokenSrc);
-
- ParserInterpreter parserInterp(_parser->getGrammarFileName(), _parser->getVocabulary(),
- _parser->getRuleNames(), _parser->getATNWithBypassAlts(), &tokens);
-
- ParserRuleContext *tree = nullptr;
- try {
- parserInterp.setErrorHandler(std::make_shared<BailErrorStrategy>());
- tree = parserInterp.parse(patternRuleIndex);
- } catch (ParseCancellationException &e) {
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- // rethrow_if_nested is not available before VS 2015.
- throw e;
-#else
- std::rethrow_if_nested(e); // Unwrap the nested exception.
-#endif
- } catch (RecognitionException &re) {
- throw re;
-#if defined(_MSC_FULL_VER) && _MSC_FULL_VER < 190023026
- } catch (std::exception &e) {
- // throw_with_nested is not available before VS 2015.
- throw e;
-#else
- } catch (std::exception & /*e*/) {
- std::throw_with_nested(RuntimeException("Cannot invoke start rule")); // Wrap any other exception.
-#endif
- }
-
- // Make sure tree pattern compilation checks for a complete parse
- if (tokens.LA(1) != Token::EOF) {
- throw StartRuleDoesNotConsumeFullPattern();
- }
-
- return ParseTreePattern(this, pattern, patternRuleIndex, tree);
-}
-
-Lexer* ParseTreePatternMatcher::getLexer() {
- return _lexer;
-}
-
-Parser* ParseTreePatternMatcher::getParser() {
- return _parser;
-}
-
-ParseTree* ParseTreePatternMatcher::matchImpl(ParseTree *tree, ParseTree *patternTree,
- std::map<std::string, std::vector<ParseTree *>> &labels) {
- if (tree == nullptr) {
- throw IllegalArgumentException("tree cannot be nul");
- }
-
- if (patternTree == nullptr) {
- throw IllegalArgumentException("patternTree cannot be nul");
- }
-
- // x and <ID>, x and y, or x and x; or could be mismatched types
- if (is<TerminalNode *>(tree) && is<TerminalNode *>(patternTree)) {
- TerminalNode *t1 = dynamic_cast<TerminalNode *>(tree);
- TerminalNode *t2 = dynamic_cast<TerminalNode *>(patternTree);
-
- ParseTree *mismatchedNode = nullptr;
- // both are tokens and they have same type
- if (t1->getSymbol()->getType() == t2->getSymbol()->getType()) {
- if (is<TokenTagToken *>(t2->getSymbol())) { // x and <ID>
- TokenTagToken *tokenTagToken = dynamic_cast<TokenTagToken *>(t2->getSymbol());
-
- // track label->list-of-nodes for both token name and label (if any)
- labels[tokenTagToken->getTokenName()].push_back(tree);
- if (tokenTagToken->getLabel() != "") {
- labels[tokenTagToken->getLabel()].push_back(tree);
- }
- } else if (t1->getText() == t2->getText()) {
- // x and x
- } else {
- // x and y
- if (mismatchedNode == nullptr) {
- mismatchedNode = t1;
- }
- }
- } else {
- if (mismatchedNode == nullptr) {
- mismatchedNode = t1;
- }
- }
-
- return mismatchedNode;
- }
-
- if (is<ParserRuleContext *>(tree) && is<ParserRuleContext *>(patternTree)) {
- ParserRuleContext *r1 = dynamic_cast<ParserRuleContext *>(tree);
- ParserRuleContext *r2 = dynamic_cast<ParserRuleContext *>(patternTree);
- ParseTree *mismatchedNode = nullptr;
-
- // (expr ...) and <expr>
- RuleTagToken *ruleTagToken = getRuleTagToken(r2);
- if (ruleTagToken != nullptr) {
- //ParseTreeMatch *m = nullptr; // unused?
- if (r1->getRuleIndex() == r2->getRuleIndex()) {
- // track label->list-of-nodes for both rule name and label (if any)
- labels[ruleTagToken->getRuleName()].push_back(tree);
- if (ruleTagToken->getLabel() != "") {
- labels[ruleTagToken->getLabel()].push_back(tree);
- }
- } else {
- if (!mismatchedNode) {
- mismatchedNode = r1;
- }
- }
-
- return mismatchedNode;
- }
-
- // (expr ...) and (expr ...)
- if (r1->children.size() != r2->children.size()) {
- if (mismatchedNode == nullptr) {
- mismatchedNode = r1;
- }
-
- return mismatchedNode;
- }
-
- std::size_t n = r1->children.size();
- for (size_t i = 0; i < n; i++) {
- ParseTree *childMatch = matchImpl(r1->children[i], patternTree->children[i], labels);
- if (childMatch) {
- return childMatch;
- }
- }
-
- return mismatchedNode;
- }
-
- // if nodes aren't both tokens or both rule nodes, can't match
- return tree;
-}
-
-RuleTagToken* ParseTreePatternMatcher::getRuleTagToken(ParseTree *t) {
- if (t->children.size() == 1 && is<TerminalNode *>(t->children[0])) {
- TerminalNode *c = dynamic_cast<TerminalNode *>(t->children[0]);
- if (is<RuleTagToken *>(c->getSymbol())) {
- return dynamic_cast<RuleTagToken *>(c->getSymbol());
- }
- }
- return nullptr;
-}
-
-std::vector<std::unique_ptr<Token>> ParseTreePatternMatcher::tokenize(const std::string &pattern) {
- // split pattern into chunks: sea (raw input) and islands (<ID>, <expr>)
- std::vector<Chunk> chunks = split(pattern);
-
- // create token stream from text and tags
- std::vector<std::unique_ptr<Token>> tokens;
- for (auto chunk : chunks) {
- if (is<TagChunk *>(&chunk)) {
- TagChunk &tagChunk = (TagChunk&)chunk;
- // add special rule token or conjure up new token from name
- if (isupper(tagChunk.getTag()[0])) {
- size_t ttype = _parser->getTokenType(tagChunk.getTag());
- if (ttype == Token::INVALID_TYPE) {
- throw IllegalArgumentException("Unknown token " + tagChunk.getTag() + " in pattern: " + pattern);
- }
- tokens.emplace_back(new TokenTagToken(tagChunk.getTag(), (int)ttype, tagChunk.getLabel()));
- } else if (islower(tagChunk.getTag()[0])) {
- size_t ruleIndex = _parser->getRuleIndex(tagChunk.getTag());
- if (ruleIndex == INVALID_INDEX) {
- throw IllegalArgumentException("Unknown rule " + tagChunk.getTag() + " in pattern: " + pattern);
- }
- size_t ruleImaginaryTokenType = _parser->getATNWithBypassAlts().ruleToTokenType[ruleIndex];
- tokens.emplace_back(new RuleTagToken(tagChunk.getTag(), ruleImaginaryTokenType, tagChunk.getLabel()));
- } else {
- throw IllegalArgumentException("invalid tag: " + tagChunk.getTag() + " in pattern: " + pattern);
- }
- } else {
- TextChunk &textChunk = (TextChunk&)chunk;
- ANTLRInputStream input(textChunk.getText());
- _lexer->setInputStream(&input);
- std::unique_ptr<Token> t(_lexer->nextToken());
- while (t->getType() != Token::EOF) {
- tokens.push_back(std::move(t));
- t = _lexer->nextToken();
- }
- _lexer->setInputStream(nullptr);
- }
- }
-
- return tokens;
-}
-
-std::vector<Chunk> ParseTreePatternMatcher::split(const std::string &pattern) {
- size_t p = 0;
- size_t n = pattern.length();
- std::vector<Chunk> chunks;
-
- // find all start and stop indexes first, then collect
- std::vector<size_t> starts;
- std::vector<size_t> stops;
- while (p < n) {
- if (p == pattern.find(_escape + _start,p)) {
- p += _escape.length() + _start.length();
- } else if (p == pattern.find(_escape + _stop,p)) {
- p += _escape.length() + _stop.length();
- } else if (p == pattern.find(_start,p)) {
- starts.push_back(p);
- p += _start.length();
- } else if (p == pattern.find(_stop,p)) {
- stops.push_back(p);
- p += _stop.length();
- } else {
- p++;
- }
- }
-
- if (starts.size() > stops.size()) {
- throw IllegalArgumentException("unterminated tag in pattern: " + pattern);
- }
-
- if (starts.size() < stops.size()) {
- throw IllegalArgumentException("missing start tag in pattern: " + pattern);
- }
-
- size_t ntags = starts.size();
- for (size_t i = 0; i < ntags; i++) {
- if (starts[i] >= stops[i]) {
- throw IllegalArgumentException("tag delimiters out of order in pattern: " + pattern);
- }
- }
-
- // collect into chunks now
- if (ntags == 0) {
- std::string text = pattern.substr(0, n);
- chunks.push_back(TextChunk(text));
- }
-
- if (ntags > 0 && starts[0] > 0) { // copy text up to first tag into chunks
- std::string text = pattern.substr(0, starts[0]);
- chunks.push_back(TextChunk(text));
- }
-
- for (size_t i = 0; i < ntags; i++) {
- // copy inside of <tag>
- std::string tag = pattern.substr(starts[i] + _start.length(), stops[i] - (starts[i] + _start.length()));
- std::string ruleOrToken = tag;
- std::string label = "";
- size_t colon = tag.find(':');
- if (colon != std::string::npos) {
- label = tag.substr(0,colon);
- ruleOrToken = tag.substr(colon + 1, tag.length() - (colon + 1));
- }
- chunks.push_back(TagChunk(label, ruleOrToken));
- if (i + 1 < ntags) {
- // copy from end of <tag> to start of next
- std::string text = pattern.substr(stops[i] + _stop.length(), starts[i + 1] - (stops[i] + _stop.length()));
- chunks.push_back(TextChunk(text));
- }
- }
-
- if (ntags > 0) {
- size_t afterLastTag = stops[ntags - 1] + _stop.length();
- if (afterLastTag < n) { // copy text from end of last tag to end
- std::string text = pattern.substr(afterLastTag, n - afterLastTag);
- chunks.push_back(TextChunk(text));
- }
- }
-
- // strip out all backslashes from text chunks but not tags
- for (size_t i = 0; i < chunks.size(); i++) {
- Chunk &c = chunks[i];
- if (is<TextChunk *>(&c)) {
- TextChunk &tc = (TextChunk&)c;
- std::string unescaped = tc.getText();
- unescaped.erase(std::remove(unescaped.begin(), unescaped.end(), '\\'), unescaped.end());
- if (unescaped.length() < tc.getText().length()) {
- chunks[i] = TextChunk(unescaped);
- }
- }
- }
-
- return chunks;
-}
-
-void ParseTreePatternMatcher::InitializeInstanceFields() {
- _start = "<";
- _stop = ">";
- _escape = "\\";
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.h
deleted file mode 100644
index 8641fc9a00..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/ParseTreePatternMatcher.h
+++ /dev/null
@@ -1,185 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Exceptions.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// A tree pattern matching mechanism for ANTLR <seealso cref="ParseTree"/>s.
- /// <p/>
- /// Patterns are strings of source input text with special tags representing
- /// token or rule references such as:
- /// <p/>
- /// {@code <ID> = <expr>;}
- /// <p/>
- /// Given a pattern start rule such as {@code statement}, this object constructs
- /// a <seealso cref="ParseTree"/> with placeholders for the {@code ID} and {@code expr}
- /// subtree. Then the <seealso cref="#match"/> routines can compare an actual
- /// <seealso cref="ParseTree"/> from a parse with this pattern. Tag {@code <ID>} matches
- /// any {@code ID} token and tag {@code <expr>} references the result of the
- /// {@code expr} rule (generally an instance of {@code ExprContext}.
- /// <p/>
- /// Pattern {@code x = 0;} is a similar pattern that matches the same pattern
- /// except that it requires the identifier to be {@code x} and the expression to
- /// be {@code 0}.
- /// <p/>
- /// The <seealso cref="#matches"/> routines return {@code true} or {@code false} based
- /// upon a match for the tree rooted at the parameter sent in. The
- /// <seealso cref="#match"/> routines return a <seealso cref="ParseTreeMatch"/> object that
- /// contains the parse tree, the parse tree pattern, and a map from tag name to
- /// matched nodes (more below). A subtree that fails to match, returns with
- /// <seealso cref="ParseTreeMatch#mismatchedNode"/> set to the first tree node that did not
- /// match.
- /// <p/>
- /// For efficiency, you can compile a tree pattern in string form to a
- /// <seealso cref="ParseTreePattern"/> object.
- /// <p/>
- /// See {@code TestParseTreeMatcher} for lots of examples.
- /// <seealso cref="ParseTreePattern"/> has two static helper methods:
- /// <seealso cref="ParseTreePattern#findAll"/> and <seealso cref="ParseTreePattern#match"/> that
- /// are easy to use but not super efficient because they create new
- /// <seealso cref="ParseTreePatternMatcher"/> objects each time and have to compile the
- /// pattern in string form before using it.
- /// <p/>
- /// The lexer and parser that you pass into the <seealso cref="ParseTreePatternMatcher"/>
- /// constructor are used to parse the pattern in string form. The lexer converts
- /// the {@code <ID> = <expr>;} into a sequence of four tokens (assuming lexer
- /// throws out whitespace or puts it on a hidden channel). Be aware that the
- /// input stream is reset for the lexer (but not the parser; a
- /// <seealso cref="ParserInterpreter"/> is created to parse the input.). Any user-defined
- /// fields you have put into the lexer might get changed when this mechanism asks
- /// it to scan the pattern string.
- /// <p/>
- /// Normally a parser does not accept token {@code <expr>} as a valid
- /// {@code expr} but, from the parser passed in, we create a special version of
- /// the underlying grammar representation (an <seealso cref="ATN"/>) that allows imaginary
- /// tokens representing rules ({@code <expr>}) to match entire rules. We call
- /// these <em>bypass alternatives</em>.
- /// <p/>
- /// Delimiters are {@code <} and {@code >}, with {@code \} as the escape string
- /// by default, but you can set them to whatever you want using
- /// <seealso cref="#setDelimiters"/>. You must escape both start and stop strings
- /// {@code \<} and {@code \>}.
- /// </summary>
- class ANTLR4CPP_PUBLIC ParseTreePatternMatcher {
- public:
- class CannotInvokeStartRule : public RuntimeException {
- public:
- CannotInvokeStartRule(const RuntimeException &e);
- ~CannotInvokeStartRule();
- };
-
- // Fixes https://github.com/antlr/antlr4/issues/413
- // "Tree pattern compilation doesn't check for a complete parse"
- class StartRuleDoesNotConsumeFullPattern : public RuntimeException {
- public:
- StartRuleDoesNotConsumeFullPattern() = default;
- StartRuleDoesNotConsumeFullPattern(StartRuleDoesNotConsumeFullPattern const&) = default;
- ~StartRuleDoesNotConsumeFullPattern();
-
- StartRuleDoesNotConsumeFullPattern& operator=(StartRuleDoesNotConsumeFullPattern const&) = default;
- };
-
- /// Constructs a <seealso cref="ParseTreePatternMatcher"/> or from a <seealso cref="Lexer"/> and
- /// <seealso cref="Parser"/> object. The lexer input stream is altered for tokenizing
- /// the tree patterns. The parser is used as a convenient mechanism to get
- /// the grammar name, plus token, rule names.
- ParseTreePatternMatcher(Lexer *lexer, Parser *parser);
- virtual ~ParseTreePatternMatcher();
-
- /// <summary>
- /// Set the delimiters used for marking rule and token tags within concrete
- /// syntax used by the tree pattern parser.
- /// </summary>
- /// <param name="start"> The start delimiter. </param>
- /// <param name="stop"> The stop delimiter. </param>
- /// <param name="escapeLeft"> The escape sequence to use for escaping a start or stop delimiter.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code start} is {@code null} or empty. </exception>
- /// <exception cref="IllegalArgumentException"> if {@code stop} is {@code null} or empty. </exception>
- virtual void setDelimiters(const std::string &start, const std::string &stop, const std::string &escapeLeft);
-
- /// <summary>
- /// Does {@code pattern} matched as rule {@code patternRuleIndex} match {@code tree}? </summary>
- virtual bool matches(ParseTree *tree, const std::string &pattern, int patternRuleIndex);
-
- /// <summary>
- /// Does {@code pattern} matched as rule patternRuleIndex match tree? Pass in a
- /// compiled pattern instead of a string representation of a tree pattern.
- /// </summary>
- virtual bool matches(ParseTree *tree, const ParseTreePattern &pattern);
-
- /// <summary>
- /// Compare {@code pattern} matched as rule {@code patternRuleIndex} against
- /// {@code tree} and return a <seealso cref="ParseTreeMatch"/> object that contains the
- /// matched elements, or the node at which the match failed.
- /// </summary>
- virtual ParseTreeMatch match(ParseTree *tree, const std::string &pattern, int patternRuleIndex);
-
- /// <summary>
- /// Compare {@code pattern} matched against {@code tree} and return a
- /// <seealso cref="ParseTreeMatch"/> object that contains the matched elements, or the
- /// node at which the match failed. Pass in a compiled pattern instead of a
- /// string representation of a tree pattern.
- /// </summary>
- virtual ParseTreeMatch match(ParseTree *tree, const ParseTreePattern &pattern);
-
- /// <summary>
- /// For repeated use of a tree pattern, compile it to a
- /// <seealso cref="ParseTreePattern"/> using this method.
- /// </summary>
- virtual ParseTreePattern compile(const std::string &pattern, int patternRuleIndex);
-
- /// <summary>
- /// Used to convert the tree pattern string into a series of tokens. The
- /// input stream is reset.
- /// </summary>
- virtual Lexer* getLexer();
-
- /// <summary>
- /// Used to collect to the grammar file name, token names, rule names for
- /// used to parse the pattern into a parse tree.
- /// </summary>
- virtual Parser* getParser();
-
- // ---- SUPPORT CODE ----
-
- virtual std::vector<std::unique_ptr<Token>> tokenize(const std::string &pattern);
-
- /// Split "<ID> = <e:expr>;" into 4 chunks for tokenizing by tokenize().
- virtual std::vector<Chunk> split(const std::string &pattern);
-
- protected:
- std::string _start;
- std::string _stop;
- std::string _escape; // e.g., \< and \> must escape BOTH!
-
- /// Recursively walk {@code tree} against {@code patternTree}, filling
- /// {@code match.}<seealso cref="ParseTreeMatch#labels labels"/>.
- ///
- /// <returns> the first node encountered in {@code tree} which does not match
- /// a corresponding node in {@code patternTree}, or {@code null} if the match
- /// was successful. The specific node returned depends on the matching
- /// algorithm used by the implementation, and may be overridden. </returns>
- virtual ParseTree* matchImpl(ParseTree *tree, ParseTree *patternTree, std::map<std::string, std::vector<ParseTree *>> &labels);
-
- /// Is t <expr> subtree?
- virtual RuleTagToken* getRuleTagToken(ParseTree *t);
-
- private:
- Lexer *_lexer;
- Parser *_parser;
-
- void InitializeInstanceFields();
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.cpp
deleted file mode 100644
index 6f3fb73446..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.cpp
+++ /dev/null
@@ -1,77 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/RuleTagToken.h"
-
-using namespace antlr4::tree::pattern;
-
-RuleTagToken::RuleTagToken(const std::string &/*ruleName*/, int _bypassTokenType) : bypassTokenType(_bypassTokenType) {
-}
-
-RuleTagToken::RuleTagToken(const std::string &ruleName, size_t bypassTokenType, const std::string &label)
- : ruleName(ruleName), bypassTokenType(bypassTokenType), label(label) {
- if (ruleName.empty()) {
- throw IllegalArgumentException("ruleName cannot be null or empty.");
- }
-
-}
-
-std::string RuleTagToken::getRuleName() const {
- return ruleName;
-}
-
-std::string RuleTagToken::getLabel() const {
- return label;
-}
-
-size_t RuleTagToken::getChannel() const {
- return DEFAULT_CHANNEL;
-}
-
-std::string RuleTagToken::getText() const {
- if (label != "") {
- return std::string("<") + label + std::string(":") + ruleName + std::string(">");
- }
-
- return std::string("<") + ruleName + std::string(">");
-}
-
-size_t RuleTagToken::getType() const {
- return bypassTokenType;
-}
-
-size_t RuleTagToken::getLine() const {
- return 0;
-}
-
-size_t RuleTagToken::getCharPositionInLine() const {
- return INVALID_INDEX;
-}
-
-size_t RuleTagToken::getTokenIndex() const {
- return INVALID_INDEX;
-}
-
-size_t RuleTagToken::getStartIndex() const {
- return INVALID_INDEX;
-}
-
-size_t RuleTagToken::getStopIndex() const {
- return INVALID_INDEX;
-}
-
-antlr4::TokenSource *RuleTagToken::getTokenSource() const {
- return nullptr;
-}
-
-antlr4::CharStream *RuleTagToken::getInputStream() const {
- return nullptr;
-}
-
-std::string RuleTagToken::toString() const {
- return ruleName + ":" + std::to_string(bypassTokenType);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.h
deleted file mode 100644
index cb0e50399e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/RuleTagToken.h
+++ /dev/null
@@ -1,117 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Token.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// A <seealso cref="Token"/> object representing an entire subtree matched by a parser
- /// rule; e.g., {@code <expr>}. These tokens are created for <seealso cref="TagChunk"/>
- /// chunks where the tag corresponds to a parser rule.
- /// </summary>
- class ANTLR4CPP_PUBLIC RuleTagToken : public Token {
- /// <summary>
- /// This is the backing field for <seealso cref="#getRuleName"/>.
- /// </summary>
- private:
- const std::string ruleName;
-
- /// The token type for the current token. This is the token type assigned to
- /// the bypass alternative for the rule during ATN deserialization.
- const size_t bypassTokenType;
-
- /// This is the backing field for <seealso cref="#getLabe"/>.
- const std::string label;
-
- public:
- /// <summary>
- /// Constructs a new instance of <seealso cref="RuleTagToken"/> with the specified rule
- /// name and bypass token type and no label.
- /// </summary>
- /// <param name="ruleName"> The name of the parser rule this rule tag matches. </param>
- /// <param name="bypassTokenType"> The bypass token type assigned to the parser rule.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code ruleName} is {@code null}
- /// or empty. </exception>
- RuleTagToken(const std::string &ruleName, int bypassTokenType); //this(ruleName, bypassTokenType, nullptr);
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="RuleTagToken"/> with the specified rule
- /// name, bypass token type, and label.
- /// </summary>
- /// <param name="ruleName"> The name of the parser rule this rule tag matches. </param>
- /// <param name="bypassTokenType"> The bypass token type assigned to the parser rule. </param>
- /// <param name="label"> The label associated with the rule tag, or {@code null} if
- /// the rule tag is unlabeled.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code ruleName} is {@code null}
- /// or empty. </exception>
- RuleTagToken(const std::string &ruleName, size_t bypassTokenType, const std::string &label);
-
- /// <summary>
- /// Gets the name of the rule associated with this rule tag.
- /// </summary>
- /// <returns> The name of the parser rule associated with this rule tag. </returns>
- std::string getRuleName() const;
-
- /// <summary>
- /// Gets the label associated with the rule tag.
- /// </summary>
- /// <returns> The name of the label associated with the rule tag, or
- /// {@code null} if this is an unlabeled rule tag. </returns>
- std::string getLabel() const;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// Rule tag tokens are always placed on the <seealso cref="#DEFAULT_CHANNE"/>.
- /// </summary>
- virtual size_t getChannel() const override;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// This method returns the rule tag formatted with {@code <} and {@code >}
- /// delimiters.
- /// </summary>
- virtual std::string getText() const override;
-
- /// Rule tag tokens have types assigned according to the rule bypass
- /// transitions created during ATN deserialization.
- virtual size_t getType() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns 0.
- virtual size_t getLine() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns INVALID_INDEX.
- virtual size_t getCharPositionInLine() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns INVALID_INDEX.
- virtual size_t getTokenIndex() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns INVALID_INDEX.
- virtual size_t getStartIndex() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns INVALID_INDEX.
- virtual size_t getStopIndex() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns {@code null}.
- virtual TokenSource *getTokenSource() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> always returns {@code null}.
- virtual CharStream *getInputStream() const override;
-
- /// The implementation for <seealso cref="RuleTagToken"/> returns a string of the form {@code ruleName:bypassTokenType}.
- virtual std::string toString() const override;
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.cpp
deleted file mode 100644
index 63e97aeaa2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.cpp
+++ /dev/null
@@ -1,39 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/TagChunk.h"
-
-using namespace antlr4::tree::pattern;
-
-TagChunk::TagChunk(const std::string &tag) : TagChunk("", tag) {
-}
-
-TagChunk::TagChunk(const std::string &label, const std::string &tag) : _tag(tag), _label(label) {
- if (tag.empty()) {
- throw IllegalArgumentException("tag cannot be null or empty");
- }
-
-}
-
-TagChunk::~TagChunk() {
-}
-
-std::string TagChunk::getTag() {
- return _tag;
-}
-
-std::string TagChunk::getLabel() {
- return _label;
-}
-
-std::string TagChunk::toString() {
- if (!_label.empty()) {
- return _label + ":" + _tag;
- }
-
- return _tag;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.h
deleted file mode 100644
index 1cdae78995..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TagChunk.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Chunk.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// Represents a placeholder tag in a tree pattern. A tag can have any of the
- /// following forms.
- ///
- /// <ul>
- /// <li>{@code expr}: An unlabeled placeholder for a parser rule {@code expr}.</li>
- /// <li>{@code ID}: An unlabeled placeholder for a token of type {@code ID}.</li>
- /// <li>{@code e:expr}: A labeled placeholder for a parser rule {@code expr}.</li>
- /// <li>{@code id:ID}: A labeled placeholder for a token of type {@code ID}.</li>
- /// </ul>
- ///
- /// This class does not perform any validation on the tag or label names aside
- /// from ensuring that the tag is a non-null, non-empty string.
- /// </summary>
- class ANTLR4CPP_PUBLIC TagChunk : public Chunk {
- public:
- /// <summary>
- /// Construct a new instance of <seealso cref="TagChunk"/> using the specified tag and
- /// no label.
- /// </summary>
- /// <param name="tag"> The tag, which should be the name of a parser rule or token
- /// type.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code tag} is {@code null} or
- /// empty. </exception>
- TagChunk(const std::string &tag);
- virtual ~TagChunk();
-
- /// <summary>
- /// Construct a new instance of <seealso cref="TagChunk"/> using the specified label
- /// and tag.
- /// </summary>
- /// <param name="label"> The label for the tag. If this is {@code null}, the
- /// <seealso cref="TagChunk"/> represents an unlabeled tag. </param>
- /// <param name="tag"> The tag, which should be the name of a parser rule or token
- /// type.
- /// </param>
- /// <exception cref="IllegalArgumentException"> if {@code tag} is {@code null} or
- /// empty. </exception>
- TagChunk(const std::string &label, const std::string &tag);
-
- /// <summary>
- /// Get the tag for this chunk.
- /// </summary>
- /// <returns> The tag for the chunk. </returns>
- std::string getTag();
-
- /// <summary>
- /// Get the label, if any, assigned to this chunk.
- /// </summary>
- /// <returns> The label assigned to this chunk, or {@code null} if no label is
- /// assigned to the chunk. </returns>
- std::string getLabel();
-
- /// <summary>
- /// This method returns a text representation of the tag chunk. Labeled tags
- /// are returned in the form {@code label:tag}, and unlabeled tags are
- /// returned as just the tag name.
- /// </summary>
- virtual std::string toString() override;
-
- private:
- /// This is the backing field for <seealso cref="#getTag"/>.
- const std::string _tag;
- /// <summary>
- /// This is the backing field for <seealso cref="#getLabe"/>.
- /// </summary>
- const std::string _label;
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.cpp
deleted file mode 100644
index 8e2e6689d7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "Exceptions.h"
-
-#include "tree/pattern/TextChunk.h"
-
-using namespace antlr4::tree::pattern;
-
-TextChunk::TextChunk(const std::string &text) : text(text) {
- if (text == "") {
- throw IllegalArgumentException("text cannot be nul");
- }
-
-}
-
-TextChunk::~TextChunk() {
-}
-
-std::string TextChunk::getText() {
- return text;
-}
-
-std::string TextChunk::toString() {
- return std::string("'") + text + std::string("'");
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.h
deleted file mode 100644
index bb7fc7f966..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TextChunk.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "Chunk.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// Represents a span of raw text (concrete syntax) between tags in a tree
- /// pattern string.
- /// </summary>
- class ANTLR4CPP_PUBLIC TextChunk : public Chunk {
- private:
- /// <summary>
- /// This is the backing field for <seealso cref="#getText"/>.
- /// </summary>
- const std::string text;
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="TextChunk"/> with the specified text.
- /// </summary>
- /// <param name="text"> The text of this chunk. </param>
- /// <exception cref="IllegalArgumentException"> if {@code text} is {@code null}. </exception>
- public:
- TextChunk(const std::string &text);
- virtual ~TextChunk();
-
- /// <summary>
- /// Gets the raw text of this chunk.
- /// </summary>
- /// <returns> The text of the chunk. </returns>
- std::string getText();
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The implementation for <seealso cref="TextChunk"/> returns the result of
- /// <seealso cref="#getText()"/> in single quotes.
- /// </summary>
- virtual std::string toString() override;
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.cpp
deleted file mode 100644
index f5153c8357..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.cpp
+++ /dev/null
@@ -1,36 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/pattern/TokenTagToken.h"
-
-using namespace antlr4::tree::pattern;
-
-TokenTagToken::TokenTagToken(const std::string &/*tokenName*/, int type)
- : CommonToken(type), tokenName(""), label("") {
-}
-
-TokenTagToken::TokenTagToken(const std::string &tokenName, int type, const std::string &label)
- : CommonToken(type), tokenName(tokenName), label(label) {
-}
-
-std::string TokenTagToken::getTokenName() const {
- return tokenName;
-}
-
-std::string TokenTagToken::getLabel() const {
- return label;
-}
-
-std::string TokenTagToken::getText() const {
- if (!label.empty()) {
- return "<" + label + ":" + tokenName + ">";
- }
-
- return "<" + tokenName + ">";
-}
-
-std::string TokenTagToken::toString() const {
- return tokenName + ":" + std::to_string(_type);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.h b/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.h
deleted file mode 100644
index da9e11cd36..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/pattern/TokenTagToken.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "CommonToken.h"
-
-namespace antlr4 {
-namespace tree {
-namespace pattern {
-
- /// <summary>
- /// A <seealso cref="Token"/> object representing a token of a particular type; e.g.,
- /// {@code <ID>}. These tokens are created for <seealso cref="TagChunk"/> chunks where the
- /// tag corresponds to a lexer rule or token type.
- /// </summary>
- class ANTLR4CPP_PUBLIC TokenTagToken : public CommonToken {
- /// <summary>
- /// This is the backing field for <seealso cref="#getTokenName"/>.
- /// </summary>
- private:
- const std::string tokenName;
- /// <summary>
- /// This is the backing field for <seealso cref="#getLabe"/>.
- /// </summary>
- const std::string label;
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="TokenTagToken"/> for an unlabeled tag
- /// with the specified token name and type.
- /// </summary>
- /// <param name="tokenName"> The token name. </param>
- /// <param name="type"> The token type. </param>
- public:
- TokenTagToken(const std::string &tokenName, int type); //this(tokenName, type, nullptr);
-
- /// <summary>
- /// Constructs a new instance of <seealso cref="TokenTagToken"/> with the specified
- /// token name, type, and label.
- /// </summary>
- /// <param name="tokenName"> The token name. </param>
- /// <param name="type"> The token type. </param>
- /// <param name="label"> The label associated with the token tag, or {@code null} if
- /// the token tag is unlabeled. </param>
- TokenTagToken(const std::string &tokenName, int type, const std::string &label);
-
- /// <summary>
- /// Gets the token name. </summary>
- /// <returns> The token name. </returns>
- std::string getTokenName() const;
-
- /// <summary>
- /// Gets the label associated with the rule tag.
- /// </summary>
- /// <returns> The name of the label associated with the rule tag, or
- /// {@code null} if this is an unlabeled rule tag. </returns>
- std::string getLabel() const;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The implementation for <seealso cref="TokenTagToken"/> returns the token tag
- /// formatted with {@code <} and {@code >} delimiters.
- /// </summary>
- virtual std::string getText() const override;
-
- /// <summary>
- /// {@inheritDoc}
- /// <p/>
- /// The implementation for <seealso cref="TokenTagToken"/> returns a string of the form
- /// {@code tokenName:type}.
- /// </summary>
- virtual std::string toString() const override;
- };
-
-} // namespace pattern
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.cpp
deleted file mode 100644
index c0398962ec..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPathLexer.h"
-#include "XPathLexerErrorListener.h"
-#include "XPathElement.h"
-#include "XPathWildcardAnywhereElement.h"
-#include "XPathWildcardElement.h"
-#include "XPathTokenAnywhereElement.h"
-#include "XPathTokenElement.h"
-#include "XPathRuleAnywhereElement.h"
-#include "XPathRuleElement.h"
-
-#include "XPath.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-const std::string XPath::WILDCARD = "*";
-const std::string XPath::NOT = "!";
-
-XPath::XPath(Parser *parser, const std::string &path) {
- _parser = parser;
- _path = path;
-}
-
-std::vector<std::unique_ptr<XPathElement>> XPath::split(const std::string &path) {
- ANTLRInputStream in(path);
- XPathLexer lexer(&in);
- lexer.removeErrorListeners();
- XPathLexerErrorListener listener;
- lexer.addErrorListener(&listener);
- CommonTokenStream tokenStream(&lexer);
- try {
- tokenStream.fill();
- } catch (LexerNoViableAltException &) {
- size_t pos = lexer.getCharPositionInLine();
- std::string msg = "Invalid tokens or characters at index " + std::to_string(pos) + " in path '" + path + "'";
- throw IllegalArgumentException(msg);
- }
-
- std::vector<Token *> tokens = tokenStream.getTokens();
- std::vector<std::unique_ptr<XPathElement>> elements;
- size_t n = tokens.size();
- size_t i = 0;
- bool done = false;
- while (!done && i < n) {
- Token *el = tokens[i];
- Token *next = nullptr;
- switch (el->getType()) {
- case XPathLexer::ROOT:
- case XPathLexer::ANYWHERE: {
- bool anywhere = el->getType() == XPathLexer::ANYWHERE;
- i++;
- next = tokens[i];
- bool invert = next->getType() == XPathLexer::BANG;
- if (invert) {
- i++;
- next = tokens[i];
- }
- std::unique_ptr<XPathElement> pathElement = getXPathElement(next, anywhere);
- pathElement->setInvert(invert);
- elements.push_back(std::move(pathElement));
- i++;
- break;
-
- }
- case XPathLexer::TOKEN_REF:
- case XPathLexer::RULE_REF:
- case XPathLexer::WILDCARD:
- elements.push_back(getXPathElement(el, false));
- i++;
- break;
-
- case Token::EOF:
- done = true;
- break;
-
- default :
- throw IllegalArgumentException("Unknown path element " + el->toString());
- }
- }
-
- return elements;
-}
-
-std::unique_ptr<XPathElement> XPath::getXPathElement(Token *wordToken, bool anywhere) {
- if (wordToken->getType() == Token::EOF) {
- throw IllegalArgumentException("Missing path element at end of path");
- }
-
- std::string word = wordToken->getText();
- size_t ttype = _parser->getTokenType(word);
- ssize_t ruleIndex = _parser->getRuleIndex(word);
- switch (wordToken->getType()) {
- case XPathLexer::WILDCARD :
- if (anywhere)
- return std::unique_ptr<XPathWildcardAnywhereElement>(new XPathWildcardAnywhereElement());
- return std::unique_ptr<XPathWildcardElement>(new XPathWildcardElement());
-
- case XPathLexer::TOKEN_REF:
- case XPathLexer::STRING :
- if (ttype == Token::INVALID_TYPE) {
- throw IllegalArgumentException(word + " at index " + std::to_string(wordToken->getStartIndex()) + " isn't a valid token name");
- }
- if (anywhere)
- return std::unique_ptr<XPathTokenAnywhereElement>(new XPathTokenAnywhereElement(word, (int)ttype));
- return std::unique_ptr<XPathTokenElement>(new XPathTokenElement(word, (int)ttype));
-
- default :
- if (ruleIndex == -1) {
- throw IllegalArgumentException(word + " at index " + std::to_string(wordToken->getStartIndex()) + " isn't a valid rule name");
- }
- if (anywhere)
- return std::unique_ptr<XPathRuleAnywhereElement>(new XPathRuleAnywhereElement(word, (int)ruleIndex));
- return std::unique_ptr<XPathRuleElement>(new XPathRuleElement(word, (int)ruleIndex));
- }
-}
-
-static ParserRuleContext dummyRoot;
-
-std::vector<ParseTree *> XPath::findAll(ParseTree *tree, std::string const& xpath, Parser *parser) {
- XPath p(parser, xpath);
- return p.evaluate(tree);
-}
-
-std::vector<ParseTree *> XPath::evaluate(ParseTree *t) {
- dummyRoot.children = { t }; // don't set t's parent.
-
- std::vector<ParseTree *> work = { &dummyRoot };
-
- size_t i = 0;
- std::vector<std::unique_ptr<XPathElement>> elements = split(_path);
-
- while (i < elements.size()) {
- std::vector<ParseTree *> next;
- for (auto *node : work) {
- if (!node->children.empty()) {
- // only try to match next element if it has children
- // e.g., //func/*/stat might have a token node for which
- // we can't go looking for stat nodes.
- auto matching = elements[i]->evaluate(node);
- next.insert(next.end(), matching.begin(), matching.end());
- }
- }
- i++;
- work = next;
- }
-
- return work;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.h
deleted file mode 100644
index e38d482d58..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPath.h
+++ /dev/null
@@ -1,86 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- /// Represent a subset of XPath XML path syntax for use in identifying nodes in
- /// parse trees.
- ///
- /// <para>
- /// Split path into words and separators {@code /} and {@code //} via ANTLR
- /// itself then walk path elements from left to right. At each separator-word
- /// pair, find set of nodes. Next stage uses those as work list.</para>
- ///
- /// <para>
- /// The basic interface is
- /// <seealso cref="XPath#findAll ParseTree.findAll"/>{@code (tree, pathString, parser)}.
- /// But that is just shorthand for:</para>
- ///
- /// <pre>
- /// <seealso cref="XPath"/> p = new <seealso cref="XPath#XPath XPath"/>(parser, pathString);
- /// return p.<seealso cref="#evaluate evaluate"/>(tree);
- /// </pre>
- ///
- /// <para>
- /// See {@code org.antlr.v4.test.TestXPath} for descriptions. In short, this
- /// allows operators:</para>
- ///
- /// <dl>
- /// <dt>/</dt> <dd>root</dd>
- /// <dt>//</dt> <dd>anywhere</dd>
- /// <dt>!</dt> <dd>invert; this must appear directly after root or anywhere
- /// operator</dd>
- /// </dl>
- ///
- /// <para>
- /// and path elements:</para>
- ///
- /// <dl>
- /// <dt>ID</dt> <dd>token name</dd>
- /// <dt>'string'</dt> <dd>any string literal token from the grammar</dd>
- /// <dt>expr</dt> <dd>rule name</dd>
- /// <dt>*</dt> <dd>wildcard matching any node</dd>
- /// </dl>
- ///
- /// <para>
- /// Whitespace is not allowed.</para>
-
- class ANTLR4CPP_PUBLIC XPath {
- public:
- static const std::string WILDCARD; // word not operator/separator
- static const std::string NOT; // word for invert operator
-
- XPath(Parser *parser, const std::string &path);
- virtual ~XPath() {}
-
- // TODO: check for invalid token/rule names, bad syntax
- virtual std::vector<std::unique_ptr<XPathElement>> split(const std::string &path);
-
- static std::vector<ParseTree *> findAll(ParseTree *tree, std::string const& xpath, Parser *parser);
-
- /// Return a list of all nodes starting at {@code t} as root that satisfy the
- /// path. The root {@code /} is relative to the node passed to
- /// <seealso cref="#evaluate"/>.
- virtual std::vector<ParseTree *> evaluate(ParseTree *t);
-
- protected:
- std::string _path;
- Parser *_parser;
-
- /// Convert word like {@code *} or {@code ID} or {@code expr} to a path
- /// element. {@code anywhere} is {@code true} if {@code //} precedes the
- /// word.
- virtual std::unique_ptr<XPathElement> getXPathElement(Token *wordToken, bool anywhere);
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.cpp
deleted file mode 100644
index 64b122df13..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "support/CPPUtils.h"
-
-#include "XPathElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathElement::XPathElement(const std::string &nodeName) {
- _nodeName = nodeName;
-}
-
-XPathElement::~XPathElement() {
-}
-
-std::vector<ParseTree *> XPathElement::evaluate(ParseTree * /*t*/) {
- return {};
-}
-
-std::string XPathElement::toString() const {
- std::string inv = _invert ? "!" : "";
- return antlrcpp::toString(*this) + "[" + inv + _nodeName + "]";
-}
-
-void XPathElement::setInvert(bool value) {
- _invert = value;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.h
deleted file mode 100644
index f339117d7f..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathElement.h
+++ /dev/null
@@ -1,40 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "antlr4-common.h"
-
-namespace antlr4 {
-namespace tree {
- class ParseTree;
-
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathElement {
- public:
- /// Construct element like {@code /ID} or {@code ID} or {@code /*} etc...
- /// op is null if just node
- XPathElement(const std::string &nodeName);
- XPathElement(XPathElement const&) = default;
- virtual ~XPathElement();
-
- XPathElement& operator=(XPathElement const&) = default;
-
- /// Given tree rooted at {@code t} return all nodes matched by this path
- /// element.
- virtual std::vector<ParseTree *> evaluate(ParseTree *t);
- virtual std::string toString() const;
-
- void setInvert(bool value);
-
- protected:
- std::string _nodeName;
- bool _invert = false;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.cpp
deleted file mode 100644
index 506d2e1179..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-
-// Generated from XPathLexer.g4 by ANTLR 4.9.3
-
-
-#include "XPathLexer.h"
-
-
-using namespace antlr4;
-
-namespace {
-
-struct XPathLexerStaticData final {
- XPathLexerStaticData(std::vector<std::string> ruleNames,
- std::vector<std::string> channelNames,
- std::vector<std::string> modeNames,
- std::vector<std::string> literalNames,
- std::vector<std::string> symbolicNames)
- : ruleNames(std::move(ruleNames)), channelNames(std::move(channelNames)),
- modeNames(std::move(modeNames)), literalNames(std::move(literalNames)),
- symbolicNames(std::move(symbolicNames)),
- vocabulary(this->literalNames, this->symbolicNames) {}
-
- XPathLexerStaticData(const XPathLexerStaticData&) = delete;
- XPathLexerStaticData(XPathLexerStaticData&&) = delete;
- XPathLexerStaticData& operator=(const XPathLexerStaticData&) = delete;
- XPathLexerStaticData& operator=(XPathLexerStaticData&&) = delete;
-
- std::vector<antlr4::dfa::DFA> decisionToDFA;
- antlr4::atn::PredictionContextCache sharedContextCache;
- const std::vector<std::string> ruleNames;
- const std::vector<std::string> channelNames;
- const std::vector<std::string> modeNames;
- const std::vector<std::string> literalNames;
- const std::vector<std::string> symbolicNames;
- const antlr4::dfa::Vocabulary vocabulary;
- antlr4::atn::SerializedATNView serializedATN;
- std::unique_ptr<antlr4::atn::ATN> atn;
-};
-
-::antlr4::internal::OnceFlag xpathLexerOnceFlag;
-XPathLexerStaticData *xpathLexerStaticData = nullptr;
-
-void xpathLexerInitialize() {
- assert(xpathLexerStaticData == nullptr);
- auto staticData = std::make_unique<XPathLexerStaticData>(
- std::vector<std::string>{
- "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID", "NameChar", "NameStartChar",
- "STRING"
- },
- std::vector<std::string>{
- "DEFAULT_TOKEN_CHANNEL", "HIDDEN"
- },
- std::vector<std::string>{
- "DEFAULT_MODE"
- },
- std::vector<std::string>{
- "", "", "", "'//'", "'/'", "'*'", "'!'"
- },
- std::vector<std::string>{
- "", "TOKEN_REF", "RULE_REF", "ANYWHERE", "ROOT", "WILDCARD", "BANG", "ID",
- "STRING"
- }
- );
- static const int32_t serializedATNSegment[] = {
- 0x4, 0x0, 0x8, 0x32, 0x6, -1, 0x2, 0x0, 0x7, 0x0, 0x2, 0x1, 0x7,
- 0x1, 0x2, 0x2, 0x7, 0x2, 0x2, 0x3, 0x7, 0x3, 0x2, 0x4, 0x7, 0x4,
- 0x2, 0x5, 0x7, 0x5, 0x2, 0x6, 0x7, 0x6, 0x2, 0x7, 0x7, 0x7, 0x1,
- 0x0, 0x1, 0x0, 0x1, 0x0, 0x1, 0x1, 0x1, 0x1, 0x1, 0x2, 0x1, 0x2,
- 0x1, 0x3, 0x1, 0x3, 0x1, 0x4, 0x1, 0x4, 0x5, 0x4, 0x1d, 0x8, 0x4,
- 0xa, 0x4, 0xc, 0x4, 0x20, 0x9, 0x4, 0x1, 0x4, 0x1, 0x4, 0x1, 0x5,
- 0x1, 0x5, 0x3, 0x5, 0x26, 0x8, 0x5, 0x1, 0x6, 0x1, 0x6, 0x1, 0x7,
- 0x1, 0x7, 0x5, 0x7, 0x2c, 0x8, 0x7, 0xa, 0x7, 0xc, 0x7, 0x2f, 0x9,
- 0x7, 0x1, 0x7, 0x1, 0x7, 0x1, 0x2d, 0x0, 0x8, 0x1, 0x3, 0x3, 0x4,
- 0x5, 0x5, 0x7, 0x6, 0x9, 0x7, 0xb, 0x0, 0xd, 0x0, 0xf, 0x8, 0x1,
- 0x0, 0x2, 0x5, 0x0, 0x30, 0x39, 0x5f, 0x5f, 0xb7, 0xb7, 0x300, 0x36f,
- 0x203f, 0x2040, 0xd, 0x0, 0x41, 0x5a, 0x61, 0x7a, 0xc0, 0xd6, 0xd8,
- 0xf6, 0xf8, 0x2ff, 0x370, 0x37d, 0x37f, 0x1fff, 0x200c, 0x200d, 0x2070,
- 0x218f, 0x2c00, 0x2fef, 0x3001, 0xd7ff, 0xf900, 0xfdcf, 0xfdf0, -1,
- 0x0, 0x32, 0x0, 0x1, 0x1, 0x0, 0x0, 0x0, 0x0, 0x3, 0x1, 0x0, 0x0,
- 0x0, 0x0, 0x5, 0x1, 0x0, 0x0, 0x0, 0x0, 0x7, 0x1, 0x0, 0x0, 0x0,
- 0x0, 0x9, 0x1, 0x0, 0x0, 0x0, 0x0, 0xf, 0x1, 0x0, 0x0, 0x0, 0x1,
- 0x11, 0x1, 0x0, 0x0, 0x0, 0x3, 0x14, 0x1, 0x0, 0x0, 0x0, 0x5, 0x16,
- 0x1, 0x0, 0x0, 0x0, 0x7, 0x18, 0x1, 0x0, 0x0, 0x0, 0x9, 0x1a, 0x1,
- 0x0, 0x0, 0x0, 0xb, 0x25, 0x1, 0x0, 0x0, 0x0, 0xd, 0x27, 0x1, 0x0,
- 0x0, 0x0, 0xf, 0x29, 0x1, 0x0, 0x0, 0x0, 0x11, 0x12, 0x5, 0x2f, 0x0,
- 0x0, 0x12, 0x13, 0x5, 0x2f, 0x0, 0x0, 0x13, 0x2, 0x1, 0x0, 0x0, 0x0,
- 0x14, 0x15, 0x5, 0x2f, 0x0, 0x0, 0x15, 0x4, 0x1, 0x0, 0x0, 0x0, 0x16,
- 0x17, 0x5, 0x2a, 0x0, 0x0, 0x17, 0x6, 0x1, 0x0, 0x0, 0x0, 0x18, 0x19,
- 0x5, 0x21, 0x0, 0x0, 0x19, 0x8, 0x1, 0x0, 0x0, 0x0, 0x1a, 0x1e, 0x3,
- 0xd, 0x6, 0x0, 0x1b, 0x1d, 0x3, 0xb, 0x5, 0x0, 0x1c, 0x1b, 0x1, 0x0,
- 0x0, 0x0, 0x1d, 0x20, 0x1, 0x0, 0x0, 0x0, 0x1e, 0x1c, 0x1, 0x0, 0x0,
- 0x0, 0x1e, 0x1f, 0x1, 0x0, 0x0, 0x0, 0x1f, 0x21, 0x1, 0x0, 0x0, 0x0,
- 0x20, 0x1e, 0x1, 0x0, 0x0, 0x0, 0x21, 0x22, 0x6, 0x4, 0x0, 0x0, 0x22,
- 0xa, 0x1, 0x0, 0x0, 0x0, 0x23, 0x26, 0x3, 0xd, 0x6, 0x0, 0x24, 0x26,
- 0x7, 0x0, 0x0, 0x0, 0x25, 0x23, 0x1, 0x0, 0x0, 0x0, 0x25, 0x24, 0x1,
- 0x0, 0x0, 0x0, 0x26, 0xc, 0x1, 0x0, 0x0, 0x0, 0x27, 0x28, 0x7, 0x1,
- 0x0, 0x0, 0x28, 0xe, 0x1, 0x0, 0x0, 0x0, 0x29, 0x2d, 0x5, 0x27, 0x0,
- 0x0, 0x2a, 0x2c, 0x9, 0x0, 0x0, 0x0, 0x2b, 0x2a, 0x1, 0x0, 0x0, 0x0,
- 0x2c, 0x2f, 0x1, 0x0, 0x0, 0x0, 0x2d, 0x2e, 0x1, 0x0, 0x0, 0x0, 0x2d,
- 0x2b, 0x1, 0x0, 0x0, 0x0, 0x2e, 0x30, 0x1, 0x0, 0x0, 0x0, 0x2f, 0x2d,
- 0x1, 0x0, 0x0, 0x0, 0x30, 0x31, 0x5, 0x27, 0x0, 0x0, 0x31, 0x10,
- 0x1, 0x0, 0x0, 0x0, 0x4, 0x0, 0x1e, 0x25, 0x2d, 0x1, 0x1, 0x4, 0x0,
- };
-
- staticData->serializedATN = antlr4::atn::SerializedATNView(serializedATNSegment, sizeof(serializedATNSegment) / sizeof(serializedATNSegment[0]));
-
- atn::ATNDeserializer deserializer;
- staticData->atn = deserializer.deserialize(staticData->serializedATN);
-
- size_t count = staticData->atn->getNumberOfDecisions();
- staticData->decisionToDFA.reserve(count);
- for (size_t i = 0; i < count; i++) {
- staticData->decisionToDFA.emplace_back(staticData->atn->getDecisionState(i), i);
- }
- xpathLexerStaticData = staticData.release();
-}
-
-}
-
-XPathLexer::XPathLexer(CharStream *input) : Lexer(input) {
- XPathLexer::initialize();
- _interpreter = new atn::LexerATNSimulator(this, *xpathLexerStaticData->atn, xpathLexerStaticData->decisionToDFA, xpathLexerStaticData->sharedContextCache);
-}
-
-XPathLexer::~XPathLexer() {
- delete _interpreter;
-}
-
-std::string XPathLexer::getGrammarFileName() const {
- return "XPathLexer.g4";
-}
-
-const std::vector<std::string>& XPathLexer::getRuleNames() const {
- return xpathLexerStaticData->ruleNames;
-}
-
-const std::vector<std::string>& XPathLexer::getChannelNames() const {
- return xpathLexerStaticData->channelNames;
-}
-
-const std::vector<std::string>& XPathLexer::getModeNames() const {
- return xpathLexerStaticData->modeNames;
-}
-
-const dfa::Vocabulary& XPathLexer::getVocabulary() const {
- return xpathLexerStaticData->vocabulary;
-}
-
-antlr4::atn::SerializedATNView XPathLexer::getSerializedATN() const {
- return xpathLexerStaticData->serializedATN;
-}
-
-const atn::ATN& XPathLexer::getATN() const {
- return *xpathLexerStaticData->atn;
-}
-
-void XPathLexer::action(RuleContext *context, size_t ruleIndex, size_t actionIndex) {
- switch (ruleIndex) {
- case 4: IDAction(antlrcpp::downCast<antlr4::RuleContext *>(context), actionIndex); break;
-
- default:
- break;
- }
-}
-
-void XPathLexer::IDAction(antlr4::RuleContext *context, size_t actionIndex) {
- switch (actionIndex) {
- case 0:
- if (isupper(getText()[0]))
- setType(TOKEN_REF);
- else
- setType(RULE_REF);
- break;
-
- default:
- break;
- }
-}
-
-void XPathLexer::initialize() {
- ::antlr4::internal::call_once(xpathLexerOnceFlag, xpathLexerInitialize);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.h
deleted file mode 100644
index 6926d2161e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexer.h
+++ /dev/null
@@ -1,47 +0,0 @@
-
-// Generated from XPathLexer.g4 by ANTLR 4.9.3
-
-#pragma once
-
-
-#include "antlr4-runtime.h"
-
-
-class XPathLexer : public antlr4::Lexer {
-public:
- enum {
- TOKEN_REF = 1, RULE_REF = 2, ANYWHERE = 3, ROOT = 4, WILDCARD = 5, BANG = 6,
- ID = 7, STRING = 8
- };
-
- explicit XPathLexer(antlr4::CharStream *input);
-
- ~XPathLexer() override;
-
- virtual std::string getGrammarFileName() const override;
-
- virtual const std::vector<std::string>& getRuleNames() const override;
-
- virtual const std::vector<std::string>& getChannelNames() const override;
-
- virtual const std::vector<std::string>& getModeNames() const override;
-
- virtual const antlr4::dfa::Vocabulary& getVocabulary() const override;
-
- virtual antlr4::atn::SerializedATNView getSerializedATN() const override;
-
- virtual const antlr4::atn::ATN& getATN() const override;
-
- virtual void action(antlr4::RuleContext *context, size_t ruleIndex, size_t actionIndex) override;
-
- // By default the static state used to implement the lexer is lazily initialized during the first
- // call to the constructor. You can call this function if you wish to initialize the static state
- // ahead of time.
- static void initialize();
-private:
- // Individual action functions triggered by action() above.
- void IDAction(antlr4::RuleContext *context, size_t actionIndex);
-
- // Individual semantic predicate functions triggered by sempred() above.
-};
-
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.cpp
deleted file mode 100644
index 2804c8ee3d..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPathLexerErrorListener.h"
-
-using namespace antlr4;
-using namespace antlr4::tree::xpath;
-
-void XPathLexerErrorListener::syntaxError(Recognizer * /*recognizer*/, Token * /*offendingSymbol*/,
- size_t /*line*/, size_t /*charPositionInLine*/, const std::string &/*msg*/, std::exception_ptr /*e*/) {
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.h
deleted file mode 100644
index c0c3eaaca7..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathLexerErrorListener.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "BaseErrorListener.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathLexerErrorListener : public BaseErrorListener {
- public:
- virtual void syntaxError(Recognizer *recognizer, Token *offendingSymbol, size_t line,
- size_t charPositionInLine, const std::string &msg, std::exception_ptr e) override;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp
deleted file mode 100644
index 9ca910df2e..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "tree/xpath/XPathRuleAnywhereElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathRuleAnywhereElement::XPathRuleAnywhereElement(const std::string &ruleName, int ruleIndex) : XPathElement(ruleName) {
- _ruleIndex = ruleIndex;
-}
-
-std::vector<ParseTree *> XPathRuleAnywhereElement::evaluate(ParseTree *t) {
- return Trees::findAllRuleNodes(t, _ruleIndex);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.h
deleted file mode 100644
index 2ceb75ceed..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleAnywhereElement.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- /// Either {@code ID} at start of path or {@code ...//ID} in middle of path.
- class ANTLR4CPP_PUBLIC XPathRuleAnywhereElement : public XPathElement {
- public:
- XPathRuleAnywhereElement(const std::string &ruleName, int ruleIndex);
-
- virtual std::vector<ParseTree *> evaluate(ParseTree *t) override;
-
- protected:
- int _ruleIndex = 0;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.cpp
deleted file mode 100644
index 1d145fb575..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.cpp
+++ /dev/null
@@ -1,30 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathRuleElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathRuleElement::XPathRuleElement(const std::string &ruleName, size_t ruleIndex) : XPathElement(ruleName) {
- _ruleIndex = ruleIndex;
-}
-
-std::vector<ParseTree *> XPathRuleElement::evaluate(ParseTree *t) {
- // return all children of t that match nodeName
- std::vector<ParseTree *> nodes;
- for (auto *c : t->children) {
- if (antlrcpp::is<ParserRuleContext *>(c)) {
- ParserRuleContext *ctx = dynamic_cast<ParserRuleContext *>(c);
- if ((ctx->getRuleIndex() == _ruleIndex && !_invert) || (ctx->getRuleIndex() != _ruleIndex && _invert)) {
- nodes.push_back(ctx);
- }
- }
- }
- return nodes;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.h
deleted file mode 100644
index b57276f033..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathRuleElement.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathRuleElement : public XPathElement {
- public:
- XPathRuleElement(const std::string &ruleName, size_t ruleIndex);
-
- virtual std::vector<ParseTree *> evaluate(ParseTree *t) override;
-
- protected:
- size_t _ruleIndex = 0;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp
deleted file mode 100644
index c557c9d675..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.cpp
+++ /dev/null
@@ -1,20 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathTokenAnywhereElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathTokenAnywhereElement::XPathTokenAnywhereElement(const std::string &tokenName, int tokenType) : XPathElement(tokenName) {
- this->tokenType = tokenType;
-}
-
-std::vector<ParseTree *> XPathTokenAnywhereElement::evaluate(ParseTree *t) {
- return Trees::findAllTokenNodes(t, tokenType);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.h
deleted file mode 100644
index 2045d91b32..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenAnywhereElement.h
+++ /dev/null
@@ -1,25 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathTokenAnywhereElement : public XPathElement {
- protected:
- int tokenType = 0;
- public:
- XPathTokenAnywhereElement(const std::string &tokenName, int tokenType);
-
- virtual std::vector<ParseTree *> evaluate(ParseTree *t) override;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.cpp
deleted file mode 100644
index d52fc26afd..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.cpp
+++ /dev/null
@@ -1,33 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-#include "support/CPPUtils.h"
-#include "Token.h"
-
-#include "XPathTokenElement.h"
-
-using namespace antlr4;
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathTokenElement::XPathTokenElement(const std::string &tokenName, size_t tokenType) : XPathElement(tokenName) {
- _tokenType = tokenType;
-}
-
-std::vector<ParseTree *> XPathTokenElement::evaluate(ParseTree *t) {
- // return all children of t that match nodeName
- std::vector<ParseTree *> nodes;
- for (auto *c : t->children) {
- if (antlrcpp::is<TerminalNode *>(c)) {
- TerminalNode *tnode = dynamic_cast<TerminalNode *>(c);
- if ((tnode->getSymbol()->getType() == _tokenType && !_invert) || (tnode->getSymbol()->getType() != _tokenType && _invert)) {
- nodes.push_back(tnode);
- }
- }
- }
- return nodes;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.h
deleted file mode 100644
index 7221530ce6..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathTokenElement.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathTokenElement : public XPathElement {
- public:
- XPathTokenElement(const std::string &tokenName, size_t tokenType);
-
- virtual std::vector<ParseTree *> evaluate(ParseTree *t) override;
-
- protected:
- size_t _tokenType = 0;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp
deleted file mode 100644
index 4ff424f056..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPath.h"
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathWildcardAnywhereElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathWildcardAnywhereElement::XPathWildcardAnywhereElement() : XPathElement(XPath::WILDCARD) {
-}
-
-std::vector<ParseTree *> XPathWildcardAnywhereElement::evaluate(ParseTree *t) {
- if (_invert) {
- return {}; // !* is weird but valid (empty)
- }
- return Trees::getDescendants(t);
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.h
deleted file mode 100644
index dc5d1e5a29..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardAnywhereElement.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathWildcardAnywhereElement : public XPathElement {
- public:
- XPathWildcardAnywhereElement();
-
- virtual std::vector<ParseTree *> evaluate(ParseTree *t) override;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.cpp b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.cpp
deleted file mode 100644
index aabda5a9be..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#include "XPath.h"
-#include "tree/ParseTree.h"
-#include "tree/Trees.h"
-
-#include "XPathWildcardElement.h"
-
-using namespace antlr4::tree;
-using namespace antlr4::tree::xpath;
-
-XPathWildcardElement::XPathWildcardElement() : XPathElement(XPath::WILDCARD) {
-}
-
-std::vector<ParseTree *> XPathWildcardElement::evaluate(ParseTree *t) {
- if (_invert) {
- return {}; // !* is weird but valid (empty)
- }
-
- return t->children;
-}
diff --git a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.h b/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.h
deleted file mode 100644
index accb461de2..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/src/tree/xpath/XPathWildcardElement.h
+++ /dev/null
@@ -1,23 +0,0 @@
-/* Copyright (c) 2012-2017 The ANTLR Project. All rights reserved.
- * Use of this file is governed by the BSD 3-clause license that
- * can be found in the LICENSE.txt file in the project root.
- */
-
-#pragma once
-
-#include "XPathElement.h"
-
-namespace antlr4 {
-namespace tree {
-namespace xpath {
-
- class ANTLR4CPP_PUBLIC XPathWildcardElement : public XPathElement {
- public:
- XPathWildcardElement();
-
- virtual std::vector<ParseTree *> evaluate(ParseTree *t) override;
- };
-
-} // namespace xpath
-} // namespace tree
-} // namespace antlr4
diff --git a/contrib/libs/antlr4_cpp_runtime/ya.make b/contrib/libs/antlr4_cpp_runtime/ya.make
deleted file mode 100644
index 6a0db5139b..0000000000
--- a/contrib/libs/antlr4_cpp_runtime/ya.make
+++ /dev/null
@@ -1,164 +0,0 @@
-# Generated by devtools/yamaker from nixpkgs 22.05.
-
-LIBRARY()
-
-LICENSE(BSD-3-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-VERSION(4.11.1)
-
-ORIGINAL_SOURCE(https://github.com/antlr/antlr4/archive/4.11.1.tar.gz)
-
-ADDINCL(
- GLOBAL contrib/libs/antlr4_cpp_runtime/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_UTIL()
-
-CFLAGS(
- GLOBAL -DANTLR4CPP_STATIC
-)
-
-SRCS(
- src/ANTLRErrorListener.cpp
- src/ANTLRErrorStrategy.cpp
- src/ANTLRFileStream.cpp
- src/ANTLRInputStream.cpp
- src/BailErrorStrategy.cpp
- src/BaseErrorListener.cpp
- src/BufferedTokenStream.cpp
- src/CharStream.cpp
- src/CommonToken.cpp
- src/CommonTokenFactory.cpp
- src/CommonTokenStream.cpp
- src/ConsoleErrorListener.cpp
- src/DefaultErrorStrategy.cpp
- src/DiagnosticErrorListener.cpp
- src/Exceptions.cpp
- src/FailedPredicateException.cpp
- src/InputMismatchException.cpp
- src/IntStream.cpp
- src/InterpreterRuleContext.cpp
- src/Lexer.cpp
- src/LexerInterpreter.cpp
- src/LexerNoViableAltException.cpp
- src/ListTokenSource.cpp
- src/NoViableAltException.cpp
- src/Parser.cpp
- src/ParserInterpreter.cpp
- src/ParserRuleContext.cpp
- src/ProxyErrorListener.cpp
- src/RecognitionException.cpp
- src/Recognizer.cpp
- src/RuleContext.cpp
- src/RuleContextWithAltNum.cpp
- src/RuntimeMetaData.cpp
- src/Token.cpp
- src/TokenSource.cpp
- src/TokenStream.cpp
- src/TokenStreamRewriter.cpp
- src/UnbufferedCharStream.cpp
- src/UnbufferedTokenStream.cpp
- src/Vocabulary.cpp
- src/WritableToken.cpp
- src/atn/ATN.cpp
- src/atn/ATNConfig.cpp
- src/atn/ATNConfigSet.cpp
- src/atn/ATNDeserializationOptions.cpp
- src/atn/ATNDeserializer.cpp
- src/atn/ATNSimulator.cpp
- src/atn/ATNState.cpp
- src/atn/ATNStateType.cpp
- src/atn/ActionTransition.cpp
- src/atn/AmbiguityInfo.cpp
- src/atn/ArrayPredictionContext.cpp
- src/atn/AtomTransition.cpp
- src/atn/ContextSensitivityInfo.cpp
- src/atn/DecisionEventInfo.cpp
- src/atn/DecisionInfo.cpp
- src/atn/DecisionState.cpp
- src/atn/EpsilonTransition.cpp
- src/atn/ErrorInfo.cpp
- src/atn/LL1Analyzer.cpp
- src/atn/LexerATNConfig.cpp
- src/atn/LexerATNSimulator.cpp
- src/atn/LexerAction.cpp
- src/atn/LexerActionExecutor.cpp
- src/atn/LexerChannelAction.cpp
- src/atn/LexerCustomAction.cpp
- src/atn/LexerIndexedCustomAction.cpp
- src/atn/LexerModeAction.cpp
- src/atn/LexerMoreAction.cpp
- src/atn/LexerPopModeAction.cpp
- src/atn/LexerPushModeAction.cpp
- src/atn/LexerSkipAction.cpp
- src/atn/LexerTypeAction.cpp
- src/atn/LookaheadEventInfo.cpp
- src/atn/NotSetTransition.cpp
- src/atn/OrderedATNConfigSet.cpp
- src/atn/ParseInfo.cpp
- src/atn/ParserATNSimulator.cpp
- src/atn/PrecedencePredicateTransition.cpp
- src/atn/PredicateEvalInfo.cpp
- src/atn/PredicateTransition.cpp
- src/atn/PredictionContext.cpp
- src/atn/PredictionContextCache.cpp
- src/atn/PredictionContextMergeCache.cpp
- src/atn/PredictionMode.cpp
- src/atn/ProfilingATNSimulator.cpp
- src/atn/RangeTransition.cpp
- src/atn/RuleTransition.cpp
- src/atn/SemanticContext.cpp
- src/atn/SetTransition.cpp
- src/atn/SingletonPredictionContext.cpp
- src/atn/StarLoopbackState.cpp
- src/atn/Transition.cpp
- src/atn/TransitionType.cpp
- src/atn/WildcardTransition.cpp
- src/dfa/DFA.cpp
- src/dfa/DFASerializer.cpp
- src/dfa/DFAState.cpp
- src/dfa/LexerDFASerializer.cpp
- src/internal/Synchronization.cpp
- src/misc/InterpreterDataReader.cpp
- src/misc/Interval.cpp
- src/misc/IntervalSet.cpp
- src/misc/MurmurHash.cpp
- src/misc/Predicate.cpp
- src/support/Any.cpp
- src/support/Arrays.cpp
- src/support/CPPUtils.cpp
- src/support/StringUtils.cpp
- src/support/Utf8.cpp
- src/tree/ErrorNodeImpl.cpp
- src/tree/IterativeParseTreeWalker.cpp
- src/tree/ParseTree.cpp
- src/tree/ParseTreeListener.cpp
- src/tree/ParseTreeVisitor.cpp
- src/tree/ParseTreeWalker.cpp
- src/tree/TerminalNodeImpl.cpp
- src/tree/Trees.cpp
- src/tree/pattern/Chunk.cpp
- src/tree/pattern/ParseTreeMatch.cpp
- src/tree/pattern/ParseTreePattern.cpp
- src/tree/pattern/ParseTreePatternMatcher.cpp
- src/tree/pattern/RuleTagToken.cpp
- src/tree/pattern/TagChunk.cpp
- src/tree/pattern/TextChunk.cpp
- src/tree/pattern/TokenTagToken.cpp
- src/tree/xpath/XPath.cpp
- src/tree/xpath/XPathElement.cpp
- src/tree/xpath/XPathLexer.cpp
- src/tree/xpath/XPathLexerErrorListener.cpp
- src/tree/xpath/XPathRuleAnywhereElement.cpp
- src/tree/xpath/XPathRuleElement.cpp
- src/tree/xpath/XPathTokenAnywhereElement.cpp
- src/tree/xpath/XPathTokenElement.cpp
- src/tree/xpath/XPathWildcardAnywhereElement.cpp
- src/tree/xpath/XPathWildcardElement.cpp
-)
-
-END()
diff --git a/contrib/libs/libmagic/AUTHORS b/contrib/libs/libmagic/AUTHORS
deleted file mode 100644
index bac5d5bcc9..0000000000
--- a/contrib/libs/libmagic/AUTHORS
+++ /dev/null
@@ -1 +0,0 @@
-See COPYING.
diff --git a/contrib/libs/libmagic/COPYING b/contrib/libs/libmagic/COPYING
deleted file mode 100644
index 16410a17f2..0000000000
--- a/contrib/libs/libmagic/COPYING
+++ /dev/null
@@ -1,29 +0,0 @@
-$File: COPYING,v 1.2 2018/09/09 20:33:28 christos Exp $
-Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.
-Software written by Ian F. Darwin and others;
-maintained 1994- Christos Zoulas.
-
-This software is not subject to any export provision of the United States
-Department of Commerce, and may be exported to any country or planet.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
- notice immediately at the beginning of the file, without modification,
- this list of conditions, and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
diff --git a/contrib/libs/libmagic/ChangeLog b/contrib/libs/libmagic/ChangeLog
deleted file mode 100644
index fdf1cff6e2..0000000000
--- a/contrib/libs/libmagic/ChangeLog
+++ /dev/null
@@ -1,2129 +0,0 @@
-2023-07-27 15:45 Christos Zoulas <christos@zoulas.com>
-
- * release 5.45
-
-2023-07-17 11:53 Christos Zoulas <christos@zoulas.com>
-
- * PR/465: psrok1: Avoid muslc asctime_r crash
-
-2023-05-21 13:05 Christos Zoulas <christos@zoulas.com>
-
- * add SIMH tape format support
-
-2023-02-09 12:50 Christos Zoulas <christos@zoulas.com>
-
- * bump the max size of the elf section notes to be read to 128K
- and make it configurable
-
-2023-01-08 1:08 Christos Zoulas <christos@zoulas.com>
-
- * PR/415: Fix decompression with program returning empty
-
-2022-12-26 1:47 Christos Zoulas <christos@zoulas.com>
-
- * PR/408: fix -p with seccomp
- * PR/412: fix MinGW compilation
-
-2022-12-26 12:26 Christos Zoulas <christos@zoulas.com>
-
- * release 5.44
-
-2022-12-14 9:24 Christos Zoulas <christos@zoulas.com>
-
- * Handle nan's so that we don't get internal floating point exceptions
- when they are enabled (Vincent Mihalkovic)
-
-2022-10-23 10:21 Christos Zoulas <christos@zoulas.com>
-
- * PR/397: Restore the ability to process files from stdin immediately.
-
-2022-09-20 17:12 Christos Zoulas <christos@zoulas.com>
-
- * fixed various clustefuzz issues
-
-2022-09-19 15:54 Christos Zoulas <christos@zoulas.com>
-
- * Fix error detection for decompression code (Vincent Mihalkovic)
-
-2022-09-15 13:50 Christos Zoulas <christos@zoulas.com>
-
- * Add MAGIC_NO_COMPRESS_FORK and use it to produce a more
- meaningful error message if we are sandboxing.
-
-2022-09-15 10:45 Christos Zoulas <christos@zoulas.com>
-
- * Add built-in lzip decompression support (Michal Gorny)
-
-2022-09-14 10:35 Christos Zoulas <christos@zoulas.com>
-
- * Add built-in zstd decompression support (Martin Rodriguez Reboredo)
-
-2022-09-13 14:55 Christos Zoulas <christos@zoulas.com>
-
- * release 5.43
-
-2022-09-10 9:17 Christos Zoulas <christos@zoulas.com>
-
- * Add octal indirect magic (Michal Gorny)
-
-2022-08-17 11:43 Christos Zoulas <christos@zoulas.com>
-
- * PR/374: avoid infinite loop in non-wide code (piru)
- * PR/373: Obey MAGIC_CONTINUE with multiple magic files (vismarli)
-
-2022-07-26 11:10 Christos Zoulas <christos@zoulas.com>
-
- * Fix bug with large flist (Florian Weimer)
-
-2022-07-07 13:21 Christos Zoulas <christos@zoulas.com>
-
- * PR/364: Detect non-nul-terminated core filenames from QEMU
- (mam-ableton)
-
-2022-07-04 15:45 Christos Zoulas <christos@zoulas.com>
-
- * PR/359: Add support for http://ndjson.org/ (darose)
- * PR/362: Fix wide printing (ro-ee)
- * PR/358: Fix width for -f - (jpalus)
- * PR/356: Fix JSON constant parsing (davewhite)
-
-2022-06-10 9:40 Christos Zoulas <christos@zoulas.com>
-
- * release 5.42
-
-2022-05-31 14:50 Christos Zoulas <christos@zoulas.com>
-
- * PR/348: add missing cases to prevent file from aborting on
- random magic files.
-
-2022-05-27 21:05 Christos Zoulas <christos@zoulas.com>
-
- * PR/351: octalify filenames when not raw before printing.
-
-2022-04-18 17:51 Christos Zoulas <christos@zoulas.com>
-
- * fix regex cacheing bug (Dirk Mueller)
- * merge file_regcomp and file_regerror() to simplify the code
- and reduce memory requirements for storing regexes (Dirk Mueller)
-
-2022-03-19 12:56 Christos Zoulas <christos@zoulas.com>
-
- * cache regex (Dirk Mueller)
- * detect filesystem full by flushing output (Dirk Mueller)
-
-2021-11-19 12:36 Christos Zoulas <christos@zoulas.com>
-
- * implement running decompressor programs using
- posix_spawnp(2) instead of vfork(2)
-
-2021-10-24 11:51 Christos Zoulas <christos@zoulas.com>
-
- * Add support for msdos dates and times
-
-2021-10-20 9:55 Christos Zoulas <christos@zoulas.com>
-
- * use the system byte swapping functions if available (Werner Fink)
-
-2021-10-18 11:57 Christos Zoulas <christos@zoulas.com>
-
- * release 5.41
-
-2021-09-23 03:51 Christos Zoulas <christos@zoulas.com>
-
- * Avinash Sonawane: Fix tzname detection
-
-2021-09-03 09:17 Christos Zoulas <christos@zoulas.com>
-
- * Fix relationship tests with "search" magic, don't short circuit
- logic
-
-2021-07-13 01:06 Christos Zoulas <christos@zoulas.com>
-
- * Fix memory leak in compile mode
-
-2021-07-01 03:51 Christos Zoulas <christos@zoulas.com>
-
- * PR/272: kiefermat: Only set returnval = 1 when we printed something
- (in all cases print or !print). This simplifies the logic and fixes
- the issue in the PR with -k and --mime-type there was no continuation
- printed before the default case.
-
-2021-06-30 13:07 Christos Zoulas <christos@zoulas.com>
-
- * PR/270: Don't translate unprintable characters in %s magic formats
- when -r
- * PR/269: Avoid undefined behavior with clang (adding offset to NULL)
-
-2021-05-09 18:38 Christos Zoulas <christos@zoulas.com>
-
- * Add a new flag (f) that requires that the match is a full word,
- not a partial word match.
- * Add varint types (unused)
-
-2021-04-19 17:17 Christos Zoulas <christos@zoulas.com>
-
- * PR/256: mutableVoid: If the file is less than 3 bytes, use the file
- length to determine type
- * PR/259: aleksandr.v.novichkov: mime printing through indirect magic
- is not taken into account, use match directly so that it does.
-
-2021-04-04 17:02 Christos Zoulas <christos@zoulas.com>
-
- * count the total bytes found not the total byte positions
- in order to determine encoding (Anatol Belski)
-
-2021-03-30 20:21 Christos Zoulas <christos@zoulas.com>
-
- * release 5.40
-
-2021-02-05 16:31 Christos Zoulas <christos@zoulas.com>
-
- * PR/234: Add limit to the number of bytes to scan for encoding
- * PR/230: Fix /T (trim flag) for regex
-
-2021-02-01 12:31 Christos Zoulas <christos@zoulas.com>
- * PR/77: Trim trailing separator.
-
-2020-12-17 15:44 Christos Zoulas <christos@zoulas.com>
-
- * PR/211: Convert system read errors from corrupt ELF
- files into human readable error messages
-
-2020-12-08 16:24 Christos Zoulas <christos@zoulas.com>
-
- * fix multithreaded decompression file descriptor issue
- by using close-on-exec (Denys Vlasenko)
-
-2020-06-27 11:58 Christos Zoulas <christos@zoulas.com>
-
- * Exclude surrogate pairs from utf-8 detection (Michael Liu)
-
-2020-06-25 12:53 Christos Zoulas <christos@zoulas.com>
-
- * Include # to the list of ignored format chars (Werner Fink)
-
-2020-06-14 20:02 Christos Zoulas <christos@zoulas.com>
-
- * release 5.39
-
-2020-06-07 20:00 Christos Zoulas <christos@zoulas.com>
-
- * Remove unused subtype_mime (Steve Grubb)
- * Remove unused check in okstat (Steve Grubb)
- * Fix mime-type in elf binaries by making sure $x is set
- * Fix indirect negative offsets broken by OFFNEGATIVE
- * Fix GUID equality check
- * PR/165: Handle empty array and strings in JSON
- * PR/162: Add --exclude-quiet
-
-2020-06-06 15:33 Christos Zoulas <christos@zoulas.com>
-
- * Fix memory leak in ascmagic (Steve Grubb)
-
-2020-06-04 00:21 Christos Zoulas <christos@zoulas.com>
-
- * Fix string comparison length with ignore whitespace
-
-2020-05-31 00:11 Christos Zoulas <christos@zoulas.com>
-
- * Fix mingwin 64 compilation
-
-2020-05-30 23:56 Christos Zoulas <christos@zoulas.com>
-
- * PR/159: whitelist getpid needed for file_pipe2file()
-
-2020-05-09 18:57 Christos Zoulas <christos@zoulas.com>
-
- * Indicate negative offsets with a flag OFFNEGATIVE
- so that -0 works.
- * Introduce "offset" magic type that can be used to
- detect the file size, and bail on short files.
- * document DER better in the magic man page.
-
-2020-03-11 21:53 Christos Zoulas <christos@zoulas.com>
-
- * fix memory leaks (SonarQube)
-
-2020-03-08 21:33 Christos Zoulas <christos@zoulas.com>
-
- * fix memory leaks (SonarQube)
- * rewrite confusing loops (SonarQube)
- * fix bogus test (SonarQube)
- * pass a sized buffer to file_fmttime() (SonarQube)
-
- * fix memory leaks (SonarQube)
-
-2020-02-20 15:50 Christos Zoulas <christos@zoulas.com>
-
- * Don't allow * in printf formats, or the code itself (Christoph Biedl)
- * Introduce a printf output size checker to avoid DoS attacks
-
-2020-02-17 17:22 Christos Zoulas <christos@zoulas.com>
-
- * Avoid memory leak on error (oss-fuzz)
- * Check length of string on DER before derefercing and add new types
- * Add missing DER string (oss-fuzz)
-
-2020-02-16 20:45 Christos Zoulas <christos@zoulas.com>
-
- * Add missing DER types, and debugging
-
-2020-02-13 13:10 Christos Zoulas <christos@zoulas.com>
-
- * PR/140: Avoid abort with hand-crafted magic file (gockelhahn)
- * PR/139 Avoid DoS in printf with hand-crafted magic file (gockelhahn)
- * PR/138: Avoid crash with hand-crafted magic file (gockelhahn)
-
-2020-02-12 17:30 Christos Zoulas <christos@zoulas.com>
-
- * PR/136: Fix static build by adding a libmagic.pc (Fabrice Fontaine)
-
-2019-12-24 14:16 Christos Zoulas <christos@zoulas.com>
-
- * add guid support
-
-2019-12-16 21:11 Christos Zoulas <christos@zoulas.com>
-
- * release 5.38
-
-2019-12-15 22:13 Christos Zoulas <christos@zoulas.com>
- Document changes since the previous release:
- - Always accept -S (no sandbox) even if we don't support sandboxing
- - More syscalls elided for sandboxing
- - For ELF dynamic means having an interpreter not just PT_DYNAMIC
- - Check for large ELF session header offset
- - When saving and restoring a locale, keep the locale name in our
- own storage.
- - Add a flag to disable CSV file detection.
- - Don't pass NULL/0 to memset to appease sanitizers.
- - Avoid spurious prints when looks for extensions or apple strings
- in fsmagic.
- - Add builtin decompressors for xz and and bzip.
- - Add a limit for the number of CDF elements.
- - More checks for overflow in CDF.
-
-2019-05-14 22:26 Christos Zoulas <christos@zoulas.com>
-
- * release 5.37
-
-2019-05-09 22:27 Christos Zoulas <christos@zoulas.com>
-
- * Make sure that continuation separators are printed
- with -k within softmagic
-
-2019-05-06 22:27 Christos Zoulas <christos@zoulas.com>
-
- * Change SIGPIPE saving and restoring during compression to use
- sigaction(2) instead of signal(3) and cache it. (Denys Vlasenko)
- * Cache stat(2) calls more to reduce number of calls (Denys Vlasenko)
-
-2019-05-06 17:25 Christos Zoulas <christos@zoulas.com>
-
- * PR/77: Handle --mime-type and -k correctly.
-
-2019-05-03 15:26 Christos Zoulas <christos@zoulas.com>
-
- * Switch decompression code to use vfork() because
- tools like rpmdiff and rpmbuild call libmagic
- with large process footprints (Denys Vlasenko)
-
-2019-04-07 14:05 Christos Zoulas <christos@zoulas.com>
-
- * PR/75: --enable-zlib, did not work.
-
-2019-02-27 11:54 Christos Zoulas <christos@zoulas.com>
-
- * Improve regex efficiency (Michael Schroeder) by:
- 1. Prefixing regex searches with regular search
- for keywords where possible
- 2. Using memmem(3) where available
-
-2019-02-20 10:16 Christos Zoulas <christos@zoulas.com>
-
- * release 5.36
-
-2019-02-19 15:30 Christos Zoulas <christos@zoulas.com>
-
- * Fix cast to use cast macros
- * Add UCS-32 builtin detection (PR/61) reported by tmc
-
-2019-02-18 18:24 Christos Zoulas <christos@zoulas.com>
-
- * Fix stack read (PR/62) and write (PR/64) stack overflows
- reported by spinpx
-
-2018-10-18 19:32 Christos Zoulas <christos@zoulas.com>
-
- * release 5.35
-
-2018-09-10 20:38 Christos Zoulas <christos@zoulas.com>
-
- * Add FreeBSD ELF core file support (John Baldwin)
-
-2018-08-20 18:40 Christos Zoulas <christos@zoulas.com>
-
- * PR/30: Allow all parameter values to be set (don't treat 0 specially)
- * handle default annotations on the softmagic match instead at the
- end.
-
-2018-07-25 10:17 Christos Zoulas <christos@zoulas.com>
-
- * PR/23: Recognize JSON files
-
-2018-07-25 10:17 Christos Zoulas <christos@zoulas.com>
-
- * PR/18: file --mime-encoding should not print mime-type
-
-2018-07-25 8:50 Christos Zoulas <christos@zoulas.com>
-
- * release 5.34
-
-2018-06-22 16:38 Christos Zoulas <christos@zoulas.com>
-
- * Add Quad indirect offsets
-
-2018-05-24 14:10 Christos Zoulas <christos@zoulas.com>
-
- * Enable parsing of ELF dynamic sections to handle PIE better
-
-2018-04-15 14:52 Christos Zoulas <christos@zoulas.com>
-
- * release 5.33
-
-2018-02-24 14:50 Christos Zoulas <christos@zoulas.com>
-
- * extend the support for ${x?:} expansions for magic descriptions
-
-2018-02-21 16:25 Christos Zoulas <christos@zoulas.com>
-
- * add support for ${x?:} in mime types to handle
- pie binaries.
-
-2017-11-03 9:23 Christos Zoulas <christos@zoulas.com>
-
- * add support for negative offsets (offsets from the end of file)
-
-2017-09-26 8:22 Christos Zoulas <christos@zoulas.com>
-
- * close the file on error when writing magic (Steve Grubb)
-
-2017-09-24 12:02 Christos Zoulas <christos@zoulas.com>
-
- * seccomp support (Paul Moore)
-
-2017-09-02 11:53 Christos Zoulas <christos@zoulas.com>
-
- * release 5.32
-
-2017-08-28 16:37 Christos Zoulas <christos@zoulas.com>
-
- * Always reset state in {file,buffer}_apprentice (Krzysztof Wilczynski)
-
-2017-08-27 03:55 Christos Zoulas <christos@zoulas.com>
-
- * Fix always true condition (Thomas Jarosch)
-
-2017-05-24 17:30 Christos Zoulas <christos@zoulas.com>
-
- * pickier parsing of numeric values in magic files.
-
-2017-05-23 17:55 Christos Zoulas <christos@zoulas.com>
-
- * PR/615 add magic_getflags()
-
-2017-05-23 13:55 Christos Zoulas <christos@zoulas.com>
-
- * release 5.31
-
-2017-03-17 20:32 Christos Zoulas <christos@zoulas.com>
-
- * remove trailing spaces from magic files
- * refactor is_tar
- * better bounds checks for cdf
-
-2017-02-10 12:24 Christos Zoulas <christos@zoulas.com>
-
- * release 5.30
-
-2017-02-07 23:27 Christos Zoulas <christos@zoulas.com>
-
- * If we exceeded the offset in a search return no match
- (Christoph Biedl)
- * Be more lenient on corrupt CDF files (Christoph Biedl)
-
-2017-02-04 16:46 Christos Zoulas <christos@zoulas.com>
-
- * pacify ubsan sign extension (oss-fuzz/524)
-
-2017-02-01 12:42 Christos Zoulas <christos@zoulas.com>
-
- * off by one in cdf parsing (PR/593)
- * report debugging sections in elf (PR/591)
-
-2016-11-06 10:52 Christos Zoulas <christos@zoulas.com>
-
- * Allow @@@ in extensions
- * Add missing overflow check in der magic (Jonas Wagner)
-
-2016-10-25 10:40 Christos Zoulas <christos@zoulas.com>
-
- * release 5.29
-
-2016-10-24 11:20 Christos Zoulas <christos@zoulas.com>
-
- * der getlength overflow (Jonas Wagner)
- * multiple magic file load failure (Christoph Biedl)
-
-2016-10-17 11:26 Christos Zoulas <christos@zoulas.com>
-
- * CDF parsing improvements (Guy Helmer)
-
-2016-07-20 7:26 Christos Zoulas <christos@zoulas.com>
-
- * Add support for signed indirect offsets
-
-2016-07-18 7:41 Christos Zoulas <christos@zoulas.com>
-
- * cat /dev/null | file - should print empty (Christoph Biedl)
-
-2016-07-05 15:20 Christos Zoulas <christos@zoulas.com>
-
- * Bump string size from 64 to 96.
-
-2016-06-13 20:20 Christos Zoulas <christos@zoulas.com>
-
- * PR/556: Fix separators on annotations.
-
-2016-06-13 19:40 Christos Zoulas <christos@zoulas.com>
-
- * release 5.28
- * fix leak on allocation failure
-
-2016-06-01 1:20 Christos Zoulas <christos@zoulas.com>
-
- * PR/555: Avoid overflow for offset > nbytes
- * PR/550: Segv on DER parsing:
- - use the correct variable for length
- - set offset to 0 on failure.
-
-2016-05-13 12:00 Christos Zoulas <christos@zoulas.com>
-
- * release 5.27
-
-2016-04-18 9:35 Christos Zoulas <christos@zoulas.com>
-
- * Errors comparing DER entries or computing offsets
- are just indications of malformed non-DER files.
- Don't print them.
- * Offset comparison was off-by-one.
- * Fix compression code (Werner Fink)
- * Put new bytes constant in the right file (not the generated one)
-
-2016-04-16 18:34 Christos Zoulas <christos@zoulas.com>
-
- * release 5.26
-
-2016-03-31 13:50 Christos Zoulas <christos@zoulas.com>
-
- * make the number of bytes read from files configurable.
-
-2016-03-21 13:40 Christos Zoulas <christos@zoulas.com>
-
- * Add bounds checks for DER code (discovered by Thomas Jarosch)
- * Change indirect recursion limit to indirect use count and
- bump from 15 to 50 to prevent abuse.
-
-2016-03-13 20:39 Christos Zoulas <christos@zoulas.com>
-
- * Add -00 which prints filename\0description\0
-
-2016-03-01 13:28 Christos Zoulas <christos@zoulas.com>
-
- * Fix ID3 indirect parsing
-
-2016-01-19 10:18 Christos Zoulas <christos@zoulas.com>
-
- * add DER parsing capability
-
-2015-11-13 10:35 Christos Zoulas <christos@zoulas.com>
-
- * provide dprintf(3) for the OS's that don't have it.
-
-2015-11-11 16:25 Christos Zoulas <christos@zoulas.com>
-
- * redo the compression code report decompression errors
-
-2015-11-10 23:25 Christos Zoulas <christos@zoulas.com>
-
- * REG_STARTEND code is not working as expected, delete it.
-
-2015-11-09 16:05 Christos Zoulas <christos@zoulas.com>
-
- * Add zlib support if we have it.
-
-2015-11-05 11:22 Christos Zoulas <christos@zoulas.com>
-
- * PR/492: compression forking was broken with magic_buffer.
-
-2015-09-16 9:50 Christos Zoulas <christos@zoulas.com>
-
- * release 5.25
-
-2015-09-11 13:25 Christos Zoulas <christos@zoulas.com>
-
- * add a limit to the length of regex searches
-
-2015-09-08 9:50 Christos Zoulas <christos@zoulas.com>
-
- * fix problems with --parameter (Christoph Biedl)
-
-2015-07-11 10:35 Christos Zoulas <christos@zoulas.com>
-
- * Windows fixes PR/466 (Jason Hood)
-
-2015-07-09 10:35 Christos Zoulas <christos@zoulas.com>
-
- * release 5.24
-
-2015-06-11 8:52 Christos Zoulas <christos@zoulas.com>
-
- * redo long option encoding to fix off-by-one in 5.23
-
-2015-06-10 13:50 Christos Zoulas <christos@zoulas.com>
-
- * release 5.23
-
-2015-06-09 16:10 Christos Zoulas <christos@zoulas.com>
-
- * Fix issue with regex range for magic with offset
- * Always return true from mget with USE (success to mget not match
- indication). Fixes mime evaluation after USE magic
- * PR/459: Don't insert magic entries to the list if there are parsing
- errors for them.
-
-2015-06-03 16:00 Christos Zoulas <christos@zoulas.com>
-
- * PR/455: Add utf-7 encoding
-
-2015-06-03 14:30 Christos Zoulas <christos@zoulas.com>
-
- * PR/455: Implement -Z, look inside, but don't report on compression
- * PR/454: Fix allocation error on bad magic.
-
-2015-05-29 10:30 Christos Zoulas <christos@zoulas.com>
-
- * handle MAGIC_CONTINUE everywhere, not just in softmagic
-
-2015-05-21 14:30 Christos Zoulas <christos@zoulas.com>
-
- * don't print descriptions for NAME types when mime.
-
-2015-04-09 15:59 Christos Zoulas <christos@zoulas.com>
-
- * Add --extension to list the known extensions for this file type
- Idea by Andrew J Roazen
-
-2015-02-14 12:23 Christos Zoulas <christos@zoulas.com>
-
- * Bump file search buffer size to 1M.
-
-2015-01-09 14:35 Christos Zoulas <christos@zoulas.com>
-
- * Fix multiple issues with date formats reported by Christoph Biedl:
- - T_LOCAL meaning was reversed
- - Arithmetic did not work
- Also stop adjusting daylight savings for gmt printing.
-
-2015-01-05 13:00 Christos Zoulas <christos@zoulas.com>
-
- * PR/411: Fix memory corruption from corrupt cdf file.
-
-2015-01-02 15:15 Christos Zoulas <christos@zoulas.com>
-
- * release 5.22
-
-2015-01-01 12:01 Christos Zoulas <christos@zoulas.com>
-
- * add indirect relative for TIFF/Exif
-
-2014-12-16 18:10 Christos Zoulas <christos@zoulas.com>
-
- * restructure elf note printing to avoid repeated messages
- * add note limit, suggested by Alexander Cherepanov
-
-2014-12-16 16:53 Christos Zoulas <christos@zoulas.com>
-
- * Bail out on partial pread()'s (Alexander Cherepanov)
- * Fix incorrect bounds check in file_printable (Alexander Cherepanov)
-
-2014-12-11 20:01 Christos Zoulas <christos@zoulas.com>
-
- * PR/405: ignore SIGPIPE from uncompress programs
- * change printable -> file_printable and use it in
- more places for safety
- * in ELF, instead of "(uses dynamic libraries)" when PT_INTERP
- is present print the interpreter name.
-
-2014-12-10 20:01 Christos Zoulas <christos@zoulas.com>
-
- * release 5.21
-
-2014-11-27 18:40 Christos Zoulas <christos@zoulas.com>
-
- * Allow setting more parameters from the command line.
- * Split name/use and indirect magic recursion limits.
-
-2014-11-27 11:12 Christos Zoulas <christos@zoulas.com>
-
- * Adjust ELF parameters and the default recursion
- level.
- * Allow setting the recursion level dynamically.
-
-2014-11-24 8:55 Christos Zoulas <christos@zoulas.com>
-
- * The following fixes resulted from Thomas Jarosch's fuzzing
- tests that revealed severe performance issues on pathological
- input:
- - limit number of elf program and sections processing
- - abort elf note processing quickly
- - reduce the number of recursion levels from 20 to 10
- - preserve error messages in indirect magic handling
-
- This is tracked as CVE-2014-8116 and CVE-2014-8117
-
-2014-11-12 10:30 Christos Zoulas <christos@zoulas.com>
-
- * fix bogus free in the user buffer case.
-
-2014-11-11 12:35 Christos Zoulas <christos@zoulas.com>
-
- * fix out of bounds read for pascal strings
- * fix memory leak (not freeing the head of each mlist)
-
-2014-11-07 10:25 Christos Zoulas <christos@zoulas.com>
-
- * When printing strings from a file, convert them to printable
- on a byte by byte basis, so that we don't get issues with
- locale's trying to interpret random byte streams as UTF-8 and
- having printf error out with EILSEQ.
-
-2014-10-17 11:48 Christos Zoulas <christos@zoulas.com>
-
- * fix bounds in note reading (Francisco Alonso / Red Hat)
-
-2014-10-11 15:02 Christos Zoulas <christos@zoulas.com>
-
- * fix autoconf glue for setlocale and locale_t; some OS's
- have locale_t in xlocale.h
-
-2014-10-10 15:01 Christos Zoulas <christos@zoulas.com>
-
- * release 5.20
-
-2014-08-17 10:01 Christos Zoulas <christos@zoulas.com>
-
- * recognize encrypted CDF documents
-
-2014-08-04 9:18 Christos Zoulas <christos@zoulas.com>
-
- * add magic_load_buffers from Brooks Davis
-
-2014-07-24 16:40 Christos Zoulas <christos@zoulas.com>
-
- * add thumbs.db support
-
-2014-06-12 12:28 Christos Zoulas <christos@zoulas.com>
-
- * release 5.19
-
-2014-06-09 9:04 Christos Zoulas <christos@zoulas.com>
-
- * Misc buffer overruns and missing buffer size tests in cdf parsing
- (Francisco Alonso, Jan Kaluza)
-
-2014-06-02 14:50 Christos Zoulas <christos@zoulas.com>
-
- * Enforce limit of 8K on regex searches that have no limits
- * Allow the l modifier for regex to mean line count. Default
- to byte count. If line count is specified, assume a max
- of 80 characters per line to limit the byte count.
- * Don't allow conversions to be used for dates, allowing
- the mask field to be used as an offset.
-
-2014-05-30 12:51 Christos Zoulas <christos@zoulas.com>
-
- * Make the range operator limit the length of the
- regex search.
-
-2014-05-14 19:23 Christos Zoulas <christos@zoulas.com>
-
- * PR/347: Windows fixes
- * PR/352: Hangul word processor recognition
- * PR/354: Encoding irregularities in text files
-
-2014-05-06 6:12 Christos Zoulas <christos@zoulas.com>
-
- * Fix uninitialized title in CDF files (Jan Kaluza)
-
-2014-05-04 14:55 Christos Zoulas <christos@zoulas.com>
-
- * PR/351: Fix compilation of empty files
-
-2014-04-30 17:39 Christos Zoulas <christos@zoulas.com>
-
- * Fix integer formats: We don't specify 'l' or
- 'h' and 'hh' specifiers anymore, only 'll' for
- quads and nothing for the rest. This is so that
- magic writing is simpler.
-
-2014-04-01 15:25 Christos Zoulas <christos@zoulas.com>
-
- * PR/341: Jan Kaluza, fix memory leak
- * PR/342: Jan Kaluza, fix out of bounds read
-
-2014-03-28 15:25 Christos Zoulas <christos@zoulas.com>
-
- * Fix issue with long formats not matching fmtcheck
-
-2014-03-26 11:25 Christos Zoulas <christos@zoulas.com>
-
- * release 5.18
-
-2014-03-15 17:45 Christos Zoulas <christos@zoulas.com>
-
- * add fmtcheck(3) for those who don't have it
-
-2014-03-14 15:12 Christos Zoulas <christos@zoulas.com>
-
- * prevent mime entries from being attached to magic
- entries with no descriptions
-
- * adjust magic strength for regex type
-
- * remove superfluous ascmagic with encoding test
-
-2014-03-06 12:01 Christos Zoulas <christos@zoulas.com>
-
- * fix regression fix echo -ne "\012\013\014" | file -i -
- which printed "binary" instead of "application/octet-stream"
-
- * add size_t overflow check for magic file size
-
-2014-02-27 16:01 Christos Zoulas <christos@zoulas.com>
-
- * experimental support for matching with CFD CLSID
-
-2014-02-18 13:04 Kimmo Suominen (kimmo@suominen.com)
-
- * Cache old LC_CTYPE locale before setting it to "C", so
- we can use it to restore LC_CTYPE instead of asking
- setlocale() to scan the environment variables.
-
-2014-02-12 18:21 Christos Zoulas <christos@zoulas.com>
-
- * Count recursion levels through indirect magic
-
-2014-02-11 10:40 Christos Zoulas <christos@zoulas.com>
-
- * Prevent infinite recursion on files with indirect offsets of 0
-
-2014-01-30 21:00 Christos Zoulas <christos@zoulas.com>
-
- * Add -E flag that makes file print filesystem errors to stderr
- and exit.
-
-2014-01-08 17:20 Christos Zoulas <christos@zoulas.com>
-
- * mime printing could print results from multiple magic entries
- if there were multiple matches.
- * in some cases overflow was not detected when computing offsets
- in softmagic.
-
-2013-12-05 12:00 Christos Zoulas <christos@zoulas.com>
-
- * use strcasestr() to for cdf strings
- * reset to the "C" locale while doing regex operations, or case
- insensitive comparisons; this is provisional
-
-2013-11-19 20:10 Christos Zoulas <christos@zoulas.com>
-
- * always leave magic file loaded, don't unload for magic_check, etc.
- * fix default encoding to binary instead of unknown which broke recently
- * handle empty and one byte files, less specially so that
- --mime-encoding does not break completely.
- `
-2013-11-06 14:40 Christos Zoulas <christos@zoulas.com>
-
- * fix erroneous non-zero exit code from non-existent file and message
-
-2013-10-29 14:25 Christos Zoulas <christos@zoulas.com>
-
- * add CDF MSI file detection (Guy Helmer)
-
-2013-09-03 11:56 Christos Zoulas <christos@zoulas.com>
-
- * Don't mix errors and regular output if there was an error
- * in magic_descriptor() don't close the file and try to restore
- its position
-
-2013-05-30 17:25 Christos Zoulas <christos@zoulas.com>
-
- * Don't treat magic as an error if offset was past EOF (Christoph Biedl)
-
-2013-05-28 17:25 Christos Zoulas <christos@zoulas.com>
-
- * Fix spacing issues in softmagic and elf (Jan Kaluza)
-
-2013-05-02 18:00 Christos Zoulas <christos@zoulas.com>
-
- * Fix segmentation fault with multiple magic_load commands.
-
-2013-04-22 11:20 Christos Zoulas <christos@zoulas.com>
-
- * The way "default" was implemented was not very useful
- because the "if something was printed at that level"
- was not easily controlled by the user, and the format
- was bound to a string which is too restrictive. Add
- a "clear" for that level keyword and make "default"
- void. This way one can do:
-
- >>13 clear x
- >>13 lelong 1 foo
- >>13 lelong 2 bar
- >>13 default x
- >>>13 lelong x unknown %x
-
-2013-03-25 13:20 Christos Zoulas <christos@zoulas.com>
-
- * disallow strength setting in "name" entries
-
-2013-03-06 21:24 Christos Zoulas <christos@zoulas.com>
-
- * fix recursive magic separator printing
-
-2013-02-26 19:28 Christos Zoulas <christos@zoulas.com>
-
- * limit recursion level for mget
- * fix pread() related breakage in cdf
- * handle offsets properly in recursive "use"
-
-2013-02-18 10:39 Christos Zoulas <christos@zoulas.com>
-
- * add elf reading of debug info to determine if file is stripped
- (Jan Kaluza)
- * use pread()
-
-2013-01-25 18:05 Christos Zoulas <christos@zoulas.com>
-
- * change mime description size from 64 to 80 to accommodate OOXML.
-
-2013-01-11 14:50 Christos Zoulas <christos@zoulas.com>
-
- * Warn about inconsistent continuation levels.
- * Change fsmagic to add a space after it prints.
-
-2013-01-10 21:00 Christos Zoulas <christos@zoulas.com>
-
- * Make getline public so that file can link against it.
- Perhaps it is better to rename it, or hide it differently.
- Fixes builds on platforms that do not provide it.
-
-2013-01-07 16:30 Christos Zoulas <christos@zoulas.com>
-
- * Add SuS d{,1,2,4,8}, u{,1,2,4,8} and document
- what long, int, short, etc is (Guy Harris)
-
-2013-01-06 11:20 Christos Zoulas <christos@zoulas.com>
-
- * add magic_version function and constant
- * Redo memory allocation and de-allocation.
- (prevents double frees on non mmap platforms)
- * Fix bug with name/use having to do with passing
- found state from the parent to the child and back.
-
-2012-12-19 8:47 Christos Zoulas <christos@zoulas.com>
-
- * Only print elf capabilities for archs we know (Jan Kaluza)
-
-2012-10-30 19:14 Christos Zoulas <christos@zoulas.com>
-
- * Add "name" and "use" file types in order to look
- inside mach-o files.
-
-2012-09-06 10:40 Christos Zoulas <christos@zoulas.com>
-
- * make --version exit 0 (Matthew Schultz)
- * add string/T (Jan Kaluza)
-
-2012-08-09 2:15 Christos Zoulas <christos@zoulas.com>
-
- * add z and t modifiers for our own vasprintf
- * search for $HOME/.magic.mgc if it is there first
- * fix reads from a pipe, and preserve errno
-
-2012-05-15 13:12 Christos Zoulas <christos@zoulas.com>
-
- * use ctime_r, asctime_r
-
-2012-04-06 17:18 Christos Zoulas <christos@zoulas.com>
-
- * Fixes for indirect offsets to handle apple disk formats
-
-2012-04-03 18:26 Christos Zoulas <christos@zoulas.com>
-
- * Add windows date field types
- * More info for windows shortcuts (incomplete)
-
-2012-02-20 17:33 Christos Zoulas <christos@zoulas.com>
-
- * Fix CDF parsing issues found by CERT's fuzzing tool (Will Dormann)
-
-2011-12-15 12:17 Chris Metcalf <cmetcalf@tilera.com>
-
- * Support Tilera architectures (tile64, tilepro, tilegx).
-
-2011-12-16 16:33 Reuben Thomas <rrt@sc3d.org>
-
- * Add magic for /usr/bin/env Perl scripts
- * Weaken generic script magic to avoid clashing with
- language-specific magic.
-
-2011-12-08 13:37 Reuben Thomas <rrt@sc3d.org>
-
- * Simplify if (p) free(p) to free(p).
-
-2011-12-08 13:07 Reuben Thomas <rrt@sc3d.org>
-
- * Remove hardwired token finding (names.h), turning it into soft
- magic. Patterns are either anchored regexs or search/8192. English
- language detection and PL/1 detection have been removed as they
- were too fragile. -e tokens is still accepted for backwards
- compatibility.
- * Move 3ds patterns (which are commented out anyway) into autodesk
- (they were, oddly, in c-lang).
-
-2011-12-06 00:16 Reuben Thomas <rrt@sc3d.org>
-
- * Tweak strength of generic hash-bang detectors to be less than
- specific ones.
- * Make an inconsistent description of Python scripts consistent.
-
-2011-12-05 23:58 Reuben Thomas <rrt@sc3d.org>
-
- * Fix minor error in file(1).
-
-2011-11-05 00:00 Reuben Thomas <rrt@sc3d.org>
-
- * Fix issue #150 (I hope).
-
-2011-09-22 12:57 Christos Zoulas <christos@zoulas.com>
-
- * Python3 binding fixes from Kelly Anderson
-
-2011-09-20 11:32 Christos Zoulas <christos@zoulas.com>
-
- * If a string type magic entry is marked as text or binary
- only match text files against text entries and binary
- files against binary entries.
-
-2011-09-01 12:12 Christos Zoulas <christos@zoulas.com>
-
- * Don't wait for any subprocess, just the one we forked.
-
-2011-08-26 16:40 Christos Zoulas <christos@zoulas.com>
-
- * If the application name is not set in a cdf file, try to see
- if it has a directory with the application name on it.
-
-2011-08-17 14:32 Christos Zoulas <christos@zoulas.com>
-
- * Fix ELF lseek(2) madness. Inspired by PR/134 by Jan Kaluza
-
-2011-08-14 09:03 Christos Zoulas <christos@zoulas.com>
-
- * Don't use variable string formats.
-
-2011-07-12 12:32 Reuben Thomas <rrt@sc3d.org>
-
- * Fix detection of Zip files (Mantis #128).
- * Make some minor improvements to file(1).
- * Rename MIME types for filesystem objects for consistency with
- xdg-utils. Typically this means that application/x-foo becomes
- inode/foo, but some names also change slightly, e.g.
- application/x-character-device becomes inode/chardevice.
-
-2011-05-10 20:57 Christos Zoulas <christos@zoulas.com>
-
- * fix mingw compilation (Abradoks)
-
-2011-05-10 20:57 Christos Zoulas <christos@zoulas.com>
-
- * remove patchlevel.h
- * Fix read past allocated memory caused by double-incrementing
- a pointer in a loop (reported by Roberto Maar)
-
-2011-03-30 15:45 Christos Zoulas <christos@zoulas.com>
-
- * Fix cdf string buffer setting (Sven Anders)
-
-2011-03-20 16:35 Christos Zoulas <christos@zoulas.com>
-
- * Eliminate MAXPATHLEN and use dynamic allocation for
- path and file buffers.
-
-2011-03-15 18:15 Christos Zoulas <christos@zoulas.com>
-
- * binary tests on magic entries with masks could spuriously
- get converted to ascii.
-
-2011-03-12 18:06 Reuben Thomas <rrt@sc3d.org>
-
- * Improve file.man (remove BUGS, present email addresses consistently).
-
-2011-03-07 19:38 Christos Zoulas <christos@zoulas.com>
-
- * add lrzip support (from Ville Skytta)
-
-2011-02-10 16:36 Christos Zoulas <christos@zoulas.com>
-
- * fix CDF bounds checking (Guy Helmer)
-
-2011-02-10 12:03 Christos Zoulas <christos@zoulas.com>
-
- * add cdf_ctime() that prints a meaningful error when time cannot
- be converted.
-
-2011-02-02 20:40 Christos Zoulas <christos@zoulas.com>
-
- * help and version output to stdout.
-
- * When matching softmagic for ascii files, don't just print
- the softmagic classification, keep going and print the
- text classification too. This fixes broken troff files when
- we moved them from keyword recognition to softmagic
- (they stopped printing "with CRLF" etc.)
- Reported by Doug McIlroy.
-
-2011-01-16 19:31 Reuben Thomas <rrt@sc3d.org>
-
- * Fix two potential buffer overruns in apprentice_list.
-
-2011-01-14 22:33 Reuben Thomas <rrt@sc3d.org>
-
- * New Python binding in pure Python.
- * Update libmagic(3).
-
-2011-01-06 21:40 Reuben Thomas <rrt@sc3d.org>
-
- * Fix Python bindings (including recent Python 3 compatibility
- update).
-
-2011-01-04 18:43 Reuben Thomas <rrt@sc3d.org>
-
- * magic/Makefile.am: make it easier to recover from magic build failures.
- * Fix pstring length specifier parsing to avoid generating invalid
- magic files.
- * Add pstring length "J" (for "JPEG") to specify that the length
- include itself.
- * Fix JPEG comment parsing at last using pstring/HJ!
- * Ignore section 5 man pages in doc/.cvsignore.
-
-2010-12-22 13:12 Christos Zoulas <christos@zoulas.com>
-
- * Add pstring/BHhLl to specify the type of the length of pascal
- strings.
-
-2010-11-26 18:39 Reuben Thomas <rrt@sc3d.org>
-
- * Fix "-e soft": it was ignored when softmagic was called
- during asciimagic.
- * Improve comments and use "unsigned char" in tar.h/is_tar.c.
-
-2010-11-05 17:26 Reuben Thomas <rrt@sc3d.org>
-
- * Make bug reporting addresses more visible.
-
-2010-11-01 18:35 Reuben Thomas <rrt@sc3d.org>
-
- * Add tcl magic from Gustaf Neumann
-
-2010-10-24 10:42 Christos Zoulas <christos@zoulas.com>
-
- * Fix the whitespace comparing code (Christopher Chittleborough)
-
-2010-10-06 21:05 Christos Zoulas <christos@zoulas.com>
-
- * allow string/t to work (Jan Kaluza)
-
-2010-09-20 22:11 Reuben Thomas <rrt@sc3d.org>
-
- * Apply some patches from Ubuntu and Fedora.
-
-2010-09-20 21:16 Reuben Thomas <rrt@sc3d.org>
-
- * Apply all patches from Debian package 5.04-6 which have not
- already been applied and are not Debian-specific.
-
-2010-09-20 15:24 Reuben Thomas <rrt@sc3d.org>
-
- * Minor security fix to softmagic.c (don't use untrusted
- string as printf format).
-
-2010-07-21 12:20 Christos Zoulas <christos@zoulas.com>
-
- * MINGW32 portability from LRN
-
- * Don't warn about escaping magic regex chars when we are in a regex.
-
-2010-07-19 10:55 Christos Zoulas <christos@zoulas.com>
-
- * Only try to print prpsinfo for core files. (Jan Kaluza)
-
-2010-04-22 12:55 Christos Zoulas <christos@zoulas.com>
-
- * Try more elf offsets for Debian core files. (Arnaud Giersch)
-
-2010-02-20 15:18 Reuben Thomas <rrt@sc3d.org>
-
- * Clarify which sort of CDF we mean.
-
-2010-02-14 22:58 Reuben Thomas <rrt@sc3d.org>
-
- * Re-jig Zip file type magic so that unsupported special
- Zip types (those with "mimetype" at offset 30) can be
- recognized.
-
-2010-02-02 21:50 Reuben Thomas <rrt@sc3d.org>
-
- * Add support for OCF (EPUB) files (application/epub+zip)
-
-2010-01-28 18:25 Christos Zoulas <christos@zoulas.com>
-
- * Fix core-dump from unbound loop:
- https://bugzilla.redhat.com/show_bug.cgi?id=533245
-
-2010-01-22 15:45 Christos Zoulas <christos@zoulas.com>
-
- * print proper mime for crystal reports file
-
- * print the last summary information of a cdf document, not the
- first so that nested documents print the right info
-
-2010-01-16 18:42 Charles Longeau <chl@tuxfamily.org>
-
- * bring back some fixes from OpenBSD:
- - make gcc2 builds file
- - fix typos in a magic file comment
-
-2009-11-17 18:35 Christos Zoulas <christos@zoulas.com>
-
- * ctime/asctime can return NULL on some OS's although
- they should not (Toshit Antani)
-
-2009-09-14 13:49 Christos Zoulas <christos@zoulas.com>
-
- * Centralize magic path handling routines and remove the
- special-casing from file.c so that the python module for
- example comes up with the same magic path (Fixes ~/.magic
- handling) (from Gab)
-
-2009-09-11 23:38 Reuben Thomas <rrt@sc3d.org>
-
- * When magic argument is a directory, read the files in
- strcmp-sorted order (fixes Debian bug #488562 and our own FIXME).
-
-2009-09-11 13:11 Reuben Thomas <rrt@sc3d.org>
-
- * Combine overlapping epoc and psion magic files into one (epoc).
-
- * Add some more EPOC MIME types.
-
-2009-08-19 15:55 Christos Zoulas <christos@zoulas.com>
-
- * Fix 3 bugs (From Ian Darwin):
- - file_showstr could move one past the end of the array
- - parse_apple did not nul terminate the string in the overflow case
- - parse_mime truncated the wrong string in the overflow case
-
-2009-08-12 12:28 Robert Byrnes <byrnes@wildpumpkin.net>
-
- * Include Localstuff when compiling magic.
-
-2009-07-15 10:05 Christos Zoulas <christos@zoulas.com>
-
- * Fix logic for including mygetopts.h
-
- * Make cdf.c compile again with debugging
-
- * Add the necessary field handling for crystal reports files to work
-
-2009-06-23 01:34 Reuben Thomas <rrt@sc3d.org>
-
- * Stop "(if" identifying Lisp files, that's plain dumb!
-
-2009-06-09 22:13 Reuben Thomas <rrt@sc3d.org>
-
- * Add a couple of missing MP3 MIME types.
-
-2009-05-27 23:00 Reuben Thomas <rrt@sc3d.org>
-
- * Add full range of hash-bang tests for Python and Ruby.
-
- * Add MIME types for Python and Ruby scripts.
-
-2009-05-13 10:44 Christos Zoulas <christos@zoulas.com>
-
- * off by one in parsing hw capabilities in elf
- (Cheng Renquan)
-
-2009-05-08 13:40 Christos Zoulas <christos@zoulas.com>
-
- * lint fixes and more from NetBSD
-
-2009-05-06 10:25 Christos Zoulas <christos@zoulas.com>
-
- * Avoid null dereference in cdf code (Drew Yao)
-
- * More cdf bounds checks and overflow checks
-
-2009-05-01 18:37 Christos Zoulas <christos@zoulas.com>
-
- * Buffer overflow fixes from Drew Yao
-
-2009-04-30 17:10 Christos Zoulas <christos@zoulas.com>
-
- * Fix more cdf lossage. All the documents I have
- right now print the correct information.
-
-2009-03-27 18:43 Christos Zoulas <christos@zoulas.com>
-
- * don't print \012- separators in the same magic entry
- if it consists of multiple magic printing lines.
-
-2009-03-23 10:20 Christos Zoulas <christos@zoulas.com>
-
- * Avoid file descriptor leak in compress code from
- (Daniel Novotny)
-
-2009-03-18 16:50 Christos Zoulas <christos@zoulas.com>
-
- * Allow escaping of relation characters, so that we can say \^[A-Z]
- and the ^ is not eaten as a relation char.
-
- * Fix troff and fortran to their previous glory using
- regex. This was broken since their removel from ascmagic.
-
-2009-03-10 16:50 Christos Zoulas <christos@zoulas.com>
-
- * don't use strlen in strndup() (Toby Peterson)
-
-2009-03-10 7:45 Christos Zoulas <christos@zoulas.com>
-
- * avoid c99 syntax.
-
-2009-02-23 15:45 Christos Zoulas <christos@zoulas.com>
-
- * make the cdf code use the buffer first if available,
- and then the fd code.
-
-2009-02-13 13:45 Christos Zoulas <christos@zoulas.com>
-
- * look for struct option to determine if getopt.h is usable for IRIX.
-
- * sanitize cdf document strings
-
-2009-02-04 13:25 Christos Zoulas <christos@zoulas.com>
-
- * fix OS/2 warnings.
-
-2008-12-12 15:50 Christos Zoulas <christos@zoulas.com>
-
- * fix initial offset calculation for non 4K sector files
-
- * add loop limits to avoid DoS attacks by constructing
- looping sector references.
-
-2008-12-03 13:05 Christos Zoulas <christos@zoulas.com>
-
- * fix memory botches on cdf file parsing.
-
- * exit with non-zero value for any error, not just for the last
- file processed.
-
-2008-11-09 20:42 Charles Longeau <chl@tuxfamily.org>
-
- * Replace all str{cpy,cat} functions with strl{cpy,cat}
- * Ensure that strl{cpy,cat} are included in libmagic,
- as needed.
-
-2008-11-06 18:18 Christos Zoulas <christos@zoulas.com>
-
- * Handle ID3 format files.
-
-2008-11-06 23:00 Reuben Thomas <rrt@sc3d.org>
-
- * Fix --mime, --mime-type and --mime-encoding under new scheme.
-
- * Rename "ascii" to "text" and add "encoding" test.
-
- * Return a precise ("utf-16le" or "utf-16be") MIME charset for
- UTF-16.
-
- * Fix error in comment caused by automatic indentation adding
- words!
-
-2008-11-06 10:35 Christos Zoulas <christos@astron.com>
-
- * use memchr instead of strchr because the string
- might not be NUL terminated (Scott MacVicar)
-
-2008-11-03 07:31 Reuben Thomas <rrt@sc3d.org>
-
- * Fix a printf with a non-literal format string.
-
- * Fix formatting and punctuation of help for "--apple".
-
-2008-10-30 11:00 Reuben Thomas <rrt@sc3d.org>
-
- * Correct words counts in comments of struct magic.
-
- * Fix handle_annotation to allow both Apple and MIME types to be
- printed, and to return correct code if MIME type is
- printed (1, not 0) or if there's an error (-1 not 1).
-
- * Fix output of charset for MIME type (precede with semi-colon;
- fixes Debian bug #501460).
-
- * Fix potential attacks via conversion specifications in magic
- strings.
-
- * Add a FIXME for Debian bug #488562 (magic files should be
- read in a defined order, by sorting the names).
-
-2008-10-18 16:45 Christos Zoulas <christos@astron.com>
-
- * Added APPLE file creator/type
-
-2008-10-12 10:20 Christos Zoulas <christos@astron.com>
-
- * Added CDF parsing
-
-2008-10-09 16:40 Christos Zoulas <christos@astron.com>
-
- * filesystem and msdos patches (Joerg Jenderek)
-
-2008-10-09 13:20 Christos Zoulas <christos@astron.com>
-
- * correct --exclude documentation issues: remove troff and fortran
- and rename "token" to "tokens". (Randy McMurchy)
-
-2008-10-01 10:30 Christos Zoulas <christos@astron.com>
-
- * Read ~/.magic in addition to the default magic file not instead
- of, as documented in the man page.
-
-2008-09-10 21:30 Reuben Thomas <rrt@sc3d.org>
-
- * Comment out graphviz patterns, as they match too many files.
-
-2008-08-30 12:54 Christos Zoulas <christos@astron.com>
-
- * Don't eat trailing \n in magic enties.
-
- * Cast defines to allow compilation using a c++ compiler.
-
-2008-08-25 23:56 Reuben Thomas <rrt@sc3d.org>
-
- * Add text/x-lua MIME type for Lua scripts.
-
- * Escape { in regex in graphviz patterns.
-
-2008-07-26 00:59 Reuben Thomas <rrt@sc3d.org>
-
- * Add MIME types for special files.
-
- * Use access to give more accurate information for files that
- can't be opened.
-
- * Add a TODO list.
-
-2008-07-02 11:15 Christos Zoulas <christos@astron.com>
-
- * add !:strength op to adjust magic strength (experimental)
-
-2008-06-16 21:41 Reuben Thomas <rrt@sc3d.org>
-
- * Fix automake error in configure.ac.
-
- * Add MIME type for Psion Sketch files.
-
-2008-06-05 08:59 Christos Zoulas <christos@astron.com>
-
- * Don't print warnings about bad namesize in stripped
- binaries with PT_NOTE is still there, and the actual
- note is gone (Jakub Jelinek)
-
-2008-05-28 15:12 Robert Byrnes <byrnes@wildpumpkin.net>
-
- * magic/Magdir/elf:
- Note invalid byte order for little-endian SPARC32PLUS.
- Add SPARC V9 vendor extensions and memory model.
-
- * src/elfclass.h:
- Pass target machine to doshn (for Solaris hardware capabilities).
-
- * src/readelf.c (doshn):
- Add support for Solaris hardware/software capabilities.
-
- * src/readelf.h:
- Ditto.
-
- * src/vasprintf.c (dispatch):
- Add support for ll modifier.
-
-2008-05-16 10:25 Christos Zoulas <christos@astron.com>
-
- * Fix compiler warnings.
-
- * remove stray printf, and fix a vprintf bug. (Martin Dorey)
-
-2008-05-06 00:13 Robert Byrnes <byrnes@wildpumpkin.net>
-
- * src/Makefile.am:
- Ensure that getopt_long and [v]asprintf are included in libmagic,
- as needed.
-
- Remove unnecessary EXTRA_DIST.
-
- * src/Makefile.in:
- Rerun automake.
-
- * src/vasprintf.c (dispatch):
- Fix variable precision bug: be sure to step past '*'.
-
- * src/vasprintf.c (core):
- Remove unreachable code.
-
- * src/apprentice.c (set_test_type):
- Add cast to avoid compiler warning.
-
-2008-04-22 23:45 Christos Zoulas <christos@astron.com>
-
- * Add magic submission guidelines (Abel Cheung)
-
- * split msdos and windows magic (Abel Cheung)
-
-2008-04-04 11:00 Christos Zoulas <christos@astron.com>
-
- * >= <= is not supported, so fix the magic and warn about it.
- reported by: Thien-Thi Nguyen <ttn@gnuvola.org>
-
-2008-03-27 16:16 Robert Byrnes <byrnes@wildpumpkin.net>
-
- * src/readelf.c (donote):
- ELF core file command name/line bug fixes and enhancements:
-
- Try larger offsets first to avoid false matches
- from earlier data that happen to look like strings;
- this primarily affected SunOS 5.x 32-bit Intel core files.
-
- Add support for command line (instead of just short name)
- for SunOS 5.x.
-
- Add information about NT_PSINFO for SunOS 5.x.
-
- Only trim whitespace from end of command line.
-
-2007-02-11 01:36 Reuben Thomas <rrt@sc3d.org>
-
- * Change strength of ! from MULT to 0, as it matches almost
- anything (Reuben Thomas)
-
- * Debian fixes (Reuben Thomas)
-
-2007-02-11 00:17 Reuben Thomas <rrt@sc3d.org>
-
- * Clarify UTF-8 BOM message (Reuben Thomas)
-
- * Add HTML comment to token list in names.h
-
-2007-02-04 15:50 Christos Zoulas <christos@astron.com>
-
- * Debian fixes (Reuben Thomas)
-
-2007-02-04 11:31 Christos Zoulas <christos@astron.com>
-
- * !:mime annotations in magic files (Reuben Thomas)
-
-2007-01-29 15:35 Christos Zoulas <christos@astron.com>
-
- * zero out utime/utimes structs (Gavin Atkinson)
-
-2007-01-26 13:45 Christos Zoulas <christos@astron.com>
-
- * reduce writable data from Diego "Flameeyes" Petten
-
-2007-12-28 15:06 Christos Zoulas <christos@astron.com>
-
- * strtof detection
-
- * remove bogus regex magic that could cause a DoS
-
- * better mismatch version message
-
-2007-12-27 11:35 Christos Zoulas <christos@astron.com>
-
- * bring back some fixes from OpenBSD
-
- * treat ELF dynamic objects as executables
-
- * fix gcc warnings
-
-2007-12-01 19:55 Christos Zoulas <christos@astron.com>
-
- * make sure we have zlib.h and libz to compile the builtin
- decompress code
-
-2007-10-28 20:48 Christos Zoulas <christos@astron.com>
-
- * float and double magic support (Behan Webster)
-
-2007-10-28 20:48 Christos Zoulas <christos@astron.com>
-
- * Convert fortran to a soft test (Reuben Thomas)
-
-2007-10-23 5:25 Christos Zoulas <christos@astron.com>
-
- * Add --with-filename, and --no-filename (Reuben Thomas)
-
-2007-10-23 3:59 Christos Zoulas <christos@astron.com>
-
- * Rest of the mime split (Reuben Thomas)
-
- * Make usage message generated from the flags so that
- they stay consistent (Reuben Thomas)
-
-2007-10-20 3:06 Christos Zoulas <christos@astron.com>
-
- * typo in comment, missing ifdef QUICK, remove unneeded code
- (Charles Longeau)
-
-2007-10-17 3:33 Christos Zoulas <christos@astron.com>
-
- * Fix problem printing -\012 in some entries
-
- * Separate magic type and encoding flags (Reuben Thomas)
-
-2007-10-09 3:55 Christos Zoulas <christos@astron.com>
-
- * configure fix for int64 and strndup (Reuben Thomas)
-
-2007-09-26 4:45 Christos Zoulas <christos@astron.com>
-
- * Add magic_descriptor() function.
-
- * Fix regression in elf reading code where the core name was
- not being printed.
-
- * Don't convert NUL's to spaces in {l,b}estring16 (Daniel Dawson)
-
-2007-08-19 6:30 Christos Zoulas <christos@astron.com>
-
- * Make mime format consistent so that it can
- be easily parsed:
- mimetype [charset=character-set] [encoding=encoding-mime-type]
-
- Remove spurious extra text from some MIME type printouts
- (mostly in is_tar).
-
- Fix one case where -i produced nothing at all (for a 1-byte file,
- which is now classed as application/octet-stream).
-
- Remove 7/8bit classifications, since they were arbitrary
- and not based on the file data.
-
- This work was done by Reuben Thomas
-
-2007-05-24 10:00 Christos Zoulas <christos@astron.com>
-
- * Fix another integer overflow (Colin Percival)
-
-2007-03-26 13:58 Christos Zoulas <christos@astron.com>
-
- * make sure that all of struct magic_set is initialized appropriately
- (Brett)
-
-2007-03-25 17:44 Christos Zoulas <christos@astron.com>
-
- * reset left bytes in the buffer (Dmitry V. Levin)
-
- * compilation failed with COMPILE_ONLY and ENABLE_CONDITIONALS
- (Peter Avalos)
-
-2007-03-15 10:51 Christos Zoulas <christos@astron.com>
-
- * fix fortran and nroff reversed tests (Dmitry V. Levin)
-
- * fix exclude option (Dmitry V. Levin)
-
-2007-02-08 17:30 Christos Zoulas <christos@astron.com>
-
- * fix integer underflow in file_printf which can lead to
- to exploitable heap overflow (Jean-Sebastien Guay-Lero)
-
-2007-02-05 11:35 Christos Zoulas <christos@astron.com>
-
- * make socket/pipe reading more robust
-
-2007-01-25 16:01 Christos Zoulas <christos@astron.com>
-
- * Centralize all the tests in file_buffer.
-
- * Add exclude flag.
-
-2007-01-18 05:29 Anon Ymous <do@not.spam.me>
-
- * Move the "type" detection code from parse() into its own table
- driven routine. This avoids maintaining multiple lists in
- file.h.
-
- * Add an optional conditional field (ust before the type field).
- This code is wrapped in "#ifdef ENABLE_CONDITIONALS" as it is
- likely to go away.
-
-2007-01-16 23:24 Anon Ymous <do@not.spam.me>
-
- * Fix an initialization bug in check_mem().
-
-2007-01-16 14:58 Anon Ymous <do@not.spam.me>
-
- * Add a "default" type to print a message if nothing previously
- matched at that level or since the last default at that
- level. This is useful for setting up switch-like statements.
- It can also be used to do if/else constructions without a
- redundant second test.
-
- * Fix the "x" special case test so that one can test for that
- string with "=x".
-
- * Allow "search" to search the entire buffer if the "/N"
- search count is missing.
-
- * Make "regex" work! It now starts its search at the
- specified offset and takes an (optional) "/N" line count to
- specify the search range; otherwise it searches to the end
- of the file. The match is now grabbed correctly for format
- strings and the offset set to the end of the match.
-
- * Add a "/s" flag to "regex" and "search" to set the offset to
- the start of the match. By default the offset is set to the
- end of the match, as it is with other tests. This is mostly
- useful for "regex".
-
- * Make "search", "string" and "pstring" use the same
- file_strncmp() routine so that they support the same flags;
- "bestring16" and "lestring16" call the same routine, but
- with flags = 0. Also add a "/C" flag (in analogy to "/c")
- to ignore the case on uppercase (lowercase) characters in
- the test string.
-
- * Strict adherence to C style string escapes. A warnings are
- printed when compiling. Note: previously "\a" was
- incorrectly translated to 'a' instead of an <alert> (i.e.,
- BELL, typically 0x07).
-
- * Make this compile with "-Wall -Wextra" and all the warning
- flags used with WARNS=4 in the NetBSD source. Also make it
- pass lint.
-
- * Many "cleanups" and hopefully not too many new bugs!
-
-2007-01-16 14:56 Anon Ymous <do@not.spam.me>
-
- * make several more files compile with gcc warnings
- on and also make them pass lint.
-
-2007-01-16 14:54 Anon Ymous <do@not.spam.me>
-
- * fix a puts()/putc() usage goof in file.c
-
- * make file.c compile with gcc warnings and pass lint
-
-2006-12-11 16:49 Christos Zoulas <christos@astron.com>
-
- * fix byteswapping issue
-
- * report the number of bytes we tried to
- allocate when allocation fails
-
- * add a few missed cases in the strength routine
-
-2006-12-08 16:32 Christos Zoulas <christos@astron.com>
-
- * store and print the line number of the magic
- entry for debugging.
-
- * if the magic entry did not print anything,
- don't treat it as a match
-
- * change the magic strength algorithm to take
- into account the relationship op.
-
- * fix a bug in search where we could accidentally
- return a match.
-
- * propagate the error return from match to
- file_softmagic.
-
-2006-11-25 13:35 Christos Zoulas <christos@astron.com>
-
- * Don't store the current offset in the magic
- struct, because it needs to be restored and
- it was not done properly all the time. Bug
- found by: Arkadiusz Miskiewicz
-
- * Fix problem in the '\0' separator; and don't
- print it as an additional separator; print
- it as the only separator.
-
-2006-11-17 10:51 Christos Zoulas <christos@astron.com>
-
- * Added a -0 option to print a '\0' separator
- Etienne Buira <etienne.buira@free.fr>
-
-2006-10-31 15:14 Christos Zoulas <christos@astron.com>
-
- * Check offset before copying (Mike Frysinger)
-
- * merge duplicated code
-
- * add quad date support
-
- * make sure that we nul terminate desc (Ryoji Kanai)
-
- * don't process elf notes multiple times
-
- * allow -z to report empty compressed files
-
- * use calloc to initialize the ascii buffers (Jos van den Oever)
-
-2006-06-08 11:11 Christos Zoulas <christos@astron.com>
-
- * QNX fixes (Mike Gorchak)
-
- * Add quad support.
-
- * FIFO checks (Dr. Werner Fink)
-
- * Linux ELF fixes (Dr. Werner Fink)
-
- * Magic format checks (Dr. Werner Fink)
-
- * Magic format function improvement (Karl Chen)
-
-2006-05-03 11:11 Christos Zoulas <christos@astron.com>
-
- * Pick up some elf changes and some constant fixes from SUSE
-
- * Identify gnu tar vs. posix tar
-
- * When keep going, don't print spurious newlines (Radek Vokal)
-
-2006-04-01 12:02 Christos Zoulas <christos@astron.com>
-
- * Use calloc instead of malloc (Mike Frysinger)
-
- * Fix configure script to detect wctypes.h (Mike Frysinger)
-
-2006-03-02 16:06 Christos Zoulas <christos@astron.com>
-
- * Print empty if the file is (Mike Frysinger)
-
- * Don't try to read past the end of the buffer (Mike Frysinger)
-
- * Sort magic entries by strength [experimental]
-
-2005-11-29 13:26 Christos Zoulas <christos@astron.com>
-
- * Use iswprint() to convert the output string.
- (Bastien Nocera)
-
-2005-10-31 8:54 Christos Zoulas <christos@astron.com>
-
- * Fix regression where the core info was not completely processed
- (Radek Vokal)
-
-2005-10-20 11:15 Christos Zoulas <christos@astron.com>
-
- * Middle Endian magic (Diomidis Spinellis)
-
-2005-10-17 11:15 Christos Zoulas <christos@astron.com>
-
- * Open with O_BINARY for CYGWIN (Corinna Vinschen)
-
- * Don't close stdin (Arkadiusz Miskiewicz)
-
- * Look for note sections in non executables.
-
-2005-09-20 13:33 Christos Zoulas <christos@astron.com>
-
- * Don't print SVR4 Style in core files multiple times
- (Radek Vokal)
-
-2005-08-27 04:09 Christos Zoulas <christos@astron.com>
-
- * Cygwin changes Corinna Vinschen
-
-2005-08-18 09:53 Christos Zoulas <christos@astron.com>
-
- * Remove erroreous mention of /etc/magic in the file man page
- This is gentoo bug 101639. (Mike Frysinger)
-
- * Cross-compile support and detection (Mike Frysinger)
-
-2005-08-12 10:17 Christos Zoulas <christos@astron.com>
-
- * Add -h flag and dereference symlinks if POSIXLY_CORRECT
- is set.
-
-2005-07-29 13:57 Christos Zoulas <christos@astron.com>
-
- * Avoid search and regex buffer overflows (Kelledin)
-
-2005-07-12 11:48 Christos Zoulas <christos@astron.com>
-
- * Provide stub implementations for {v,}nsprintf() for older
- OS's that don't have them.
- * Change mbstate_t autoconf detection macro from AC_MBSTATE_T
- to AC_TYPE_MBSTATE_T.
-
-2005-06-25 11:48 Christos Zoulas <christos@astron.com>
-
- * Dynamically allocate the string buffers and make the
- default read size 256K.
-
-2005-06-01 00:00 Joerg Sonnenberger <joerg@britannica.bec.de>
-
- * Dragonfly ELF note support
-
-2005-03-14 00:00 Giuliano Bertoletti <gb@symbolic.it>
-
- * Avoid NULL pointer dereference in time conversion.
-
-2005-03-06 00:00 Joerg Walter <jwalt@mail.garni.ch>
-
- * Add indirect magic offset support, and search mode.
-
-2005-01-12 00:00 Stepan Kasal <kasal@ucw.cz>
-
- * src/ascmagic.c (file_ascmagic): Fix three bugs about text files:
- If a CRLF text file happens to have CR at offset HOWMANY - 1
- (currently 0xffff), it should not be counted as CR line
- terminator.
- If a line has length exactly MAXLINELEN, it should not yet be
- treated as a ``very long line'', as MAXLINELEN is ``longest sane
- line length''.
- With CRLF, the line length was not computed correctly, and even
- lines of length MAXLINELEN - 1 were treated as ``very long''.
-
-2004-12-07 14:15 Christos Zoulas <christos@astron.com>
-
- * bzip2 needs a lot of input buffer space on some files
- before it can begin uncompressing. This makes file -z
- fail on some bz2 files. Fix it by giving it a copy of
- the file descriptor to read as much as it wants if we
- have access to it. <christos@astron.com>
-
-2004-11-24 12:39 Christos Zoulas <christos@astron.com>
-
- * Stack smash fix, and ELF more conservative reading.
- Jakub Bogusz <qboosh@pld-linux.org>
-
-2004-11-20 18:50 Christos Zoulas <christos@astron.com>
-
- * New FreeBSD version parsing code:
- Jon Noack <noackjr@alumni.rice.edu>
-
- * Hackish support for ucs16 strings <christos@astron.com>
-
-2004-11-13 03:07 Christos Zoulas <christos@astron.com>
-
- * print the file name and line number in syntax errors.
-
-2004 10-12 10:50 Christos Zoulas <christos@astron.com>
-
- * Fix stack overwriting on 0 length strings: Tim Waugh
- <twaugh@redhat.com> Ned Ludd <solar@gentoo.org>
-
-2004-09-27 11:30 Christos Zoulas <christos@astron.com>
-
- * Remove 3rd and 4th copyright clause; approved by Ian Darwin.
-
- * Fix small memory leaks; caught by: Tamas Sarlos
- <stamas@csillag.ilab.sztaki.hu>
-
-2004-07-24 16:33 Christos Zoulas <christos@astron.com>
-
- * magic.mime update Danny Milosavljevic <danny.milo@gmx.net>
-
- * FreeBSD version update Oliver Eikemeier <eikemeier@fillmore-labs.com>
-
- * utime/utimes detection Ian Lance Taylor <ian@wasabisystems.com>
-
- * errors reading elf magic Jakub Bogusz <qboosh@pld-linux.org>
-
-2004-04-12 10:55 Christos Zoulas <christos@astron.com>
-
- * make sure that magic formats match magic types during compilation
-
- * fix broken sgi magic file
-
-2004-04-06 20:36 Christos Zoulas <christos@astron.com>
-
- * detect present of mbstate_t Petter Reinholdtsen <pere@hungry.com>
-
- * magic fixes
-
-2004-03-22 15:25 Christos Zoulas <christos@astron.com>
-
- * Lots of mime fixes
- (Joerg Ostertag) <ostertag@rechengilde.de>
-
- * FreeBSD ELF version handling
- (Edwin Groothuis) <edwin@mavetju.org>
-
- * correct cleanup in all cases; don't just close the file.
- (Christos Zoulas) <christos@astron.com>
-
- * add gettext message catalogue support
- (Michael Piefel) <piefel@debian.org>
-
- * better printout for unreadable files
- (Michael Piefel) <piefel@debian.org>
-
- * compensate for missing MAXPATHLEN
- (Michael Piefel) <piefel@debian.org>
-
- * add wide character string length computation
- (Michael Piefel) <piefel@debian.org>
-
- * Avoid infinite loops caused by bad elf alignments
- or name and description note sizes. Reported by
- (Mikael Magnusson) <mmikael@comhem.se>
-
-2004-03-09 13:55 Christos Zoulas <christos@astron.com>
-
- * Fix possible memory leak on error and add missing regfree
- (Dmitry V. Levin) <ldv@altlinux.org>
-
-2003-12-23 12:12 Christos Zoulas <christos@astron.com>
-
- * fix -k flag (Maciej W. Rozycki)
-
-2003-11-18 14:10 Christos Zoulas <christos@astron.com>
-
- * Try to give us much info as possible on corrupt elf files.
- (Willy Tarreau) <willy@w.ods.org>
- * Updated python bindings (Brett Funderburg)
- <brettf@deepfile.com>
-
-2003-11-11 15:03 Christos Zoulas <christos@astron.com>
-
- * Include file.h first, because it includes config.h
- breaks largefile test macros otherwise.
- (Paul Eggert <eggert@CS.UCLA.EDU> via
- Lars Hecking <lhecking@nmrc.ie>)
-
-2003-10-14 21:39 Christos Zoulas <christos@astron.com>
-
- * Python bindings (Brett Funderburg) <brettf@deepfile.com>
- * Don't lookup past the end of the buffer
- (Chad Hanson) <chanson@tcs-sec.com>
- * Add MAGIC_ERROR and api on magic_errno()
-
-2003-10-08 12:40 Christos Zoulas <christos@astron.com>
-
- * handle error conditions from compile as fatal
- (Antti Kantee) <pooka@netbsd.org>
- * handle magic filename parsing sanely
- * more magic fixes.
- * fix a memory leak (Illes Marton) <illes.marton@balabit.hu>
- * describe magic file handling
- (Bryan Henderson) <bryanh@giraffe-data.com>
-
-2003-09-12 15:09 Christos Zoulas <christos@astron.com>
-
- * update magic files.
- * remove largefile support from file.h; it breaks things on most OS's
-
-2003-08-10 10:25 Christos Zoulas <christos@astron.com>
-
- * fix unmapping'ing of mmaped files.
-
-2003-07-10 12:03 Christos Zoulas <christos@astron.com>
-
- * don't exit with -1 on error; always exit 1 (Marty Leisner)
- * restore utimes code.
-
-2003-06-10 17:03 Christos Zoulas <christos@astron.com>
-
- * make sure we don't access uninitialized memory.
- * pass lint
- * #ifdef __cplusplus in magic.h
-
-2003-05-25 19:23 Christos Zoulas <christos@astron.com>
-
- * rename cvs magic file to revision to deal with
- case insensitive filesystems.
-
-2003-05-23 17:03 Christos Zoulas <christos@astron.com>
-
- * documentation fixes from Michael Piefel <piefel@debian.org>
- * magic fixes (various)
- * revert basename magic in .mgc name determination
- * buffer protection in uncompress,
- signness issues,
- close files
- Maciej W. Rozycki <macro@ds2.pg.gda.pl
-
-2003-04-21 20:12 Christos Zoulas <christos@astron.com>
-
- * fix zsh magic
-
-2003-04-04 16:59 Christos Zoulas <christos@astron.com>
-
- * fix operand sort order in string.
-
-2003-04-02 17:30 Christos Zoulas <christos@astron.com>
-
- * cleanup namespace in magic.h
-
-2003-04-02 13:50 Christos Zoulas <christos@astron.com>
-
- * Magic additions (Alex Ott)
- * Fix bug that broke VPATH compilation (Peter Breitenlohner)
-
-2003-03-28 16:03 Christos Zoulas <christos@astron.com>
-
- * remove packed attribute from magic struct.
- * make the magic struct properly aligned.
- * bump version number of compiled files to 2.
-
-2003-03-27 13:10 Christos Zoulas <christos@astron.com>
-
- * separate tar detection and run it before softmagic.
- * fix reversed symlink test.
- * fix version printing.
- * make separator a string instead of a char.
- * update manual page and sort options.
-
-2003-03-26 11:00 Christos Zoulas <christos@astron.com>
-
- * Pass lint
- * make NULL in magic_file mean stdin
- * Fix "-" argument to file to pass NULL to magic_file
- * avoid pointer casts by using memcpy
- * rename magic_buf -> magic_buffer
- * keep only the first error
- * manual page: new sentence, new line
- * fix typo in api function (magic_buf -> magic_buffer)
diff --git a/contrib/libs/libmagic/INSTALL b/contrib/libs/libmagic/INSTALL
deleted file mode 100644
index 7d1c323bea..0000000000
--- a/contrib/libs/libmagic/INSTALL
+++ /dev/null
@@ -1,365 +0,0 @@
-Installation Instructions
-*************************
-
-Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002, 2004, 2005,
-2006, 2007, 2008, 2009 Free Software Foundation, Inc.
-
- Copying and distribution of this file, with or without modification,
-are permitted in any medium without royalty provided the copyright
-notice and this notice are preserved. This file is offered as-is,
-without warranty of any kind.
-
-Basic Installation
-==================
-
- Briefly, the shell commands `./configure; make; make install' should
-configure, build, and install this package. The following
-more-detailed instructions are generic; see the `README' file for
-instructions specific to this package. Some packages provide this
-`INSTALL' file but do not implement all of the features documented
-below. The lack of an optional feature in a given package is not
-necessarily a bug. More recommendations for GNU packages can be found
-in *note Makefile Conventions: (standards)Makefile Conventions.
-
- The `configure' shell script attempts to guess correct values for
-various system-dependent variables used during compilation. It uses
-those values to create a `Makefile' in each directory of the package.
-It may also create one or more `.h' files containing system-dependent
-definitions. Finally, it creates a shell script `config.status' that
-you can run in the future to recreate the current configuration, and a
-file `config.log' containing compiler output (useful mainly for
-debugging `configure').
-
- It can also use an optional file (typically called `config.cache'
-and enabled with `--cache-file=config.cache' or simply `-C') that saves
-the results of its tests to speed up reconfiguring. Caching is
-disabled by default to prevent problems with accidental use of stale
-cache files.
-
- If you need to do unusual things to compile the package, please try
-to figure out how `configure' could check whether to do them, and mail
-diffs or instructions to the address given in the `README' so they can
-be considered for the next release. If you are using the cache, and at
-some point `config.cache' contains results you don't want to keep, you
-may remove or edit it.
-
- The file `configure.ac' (or `configure.in') is used to create
-`configure' by a program called `autoconf'. You need `configure.ac' if
-you want to change it or regenerate `configure' using a newer version
-of `autoconf'.
-
- The simplest way to compile this package is:
-
- 1. `cd' to the directory containing the package's source code and type
- `./configure' to configure the package for your system.
-
- Running `configure' might take a while. While running, it prints
- some messages telling which features it is checking for.
-
- 2. Type `make' to compile the package.
-
- 3. Optionally, type `make check' to run any self-tests that come with
- the package, generally using the just-built uninstalled binaries.
-
- 4. Type `make install' to install the programs and any data files and
- documentation. When installing into a prefix owned by root, it is
- recommended that the package be configured and built as a regular
- user, and only the `make install' phase executed with root
- privileges.
-
- 5. Optionally, type `make installcheck' to repeat any self-tests, but
- this time using the binaries in their final installed location.
- This target does not install anything. Running this target as a
- regular user, particularly if the prior `make install' required
- root privileges, verifies that the installation completed
- correctly.
-
- 6. You can remove the program binaries and object files from the
- source code directory by typing `make clean'. To also remove the
- files that `configure' created (so you can compile the package for
- a different kind of computer), type `make distclean'. There is
- also a `make maintainer-clean' target, but that is intended mainly
- for the package's developers. If you use it, you may have to get
- all sorts of other programs in order to regenerate files that came
- with the distribution.
-
- 7. Often, you can also type `make uninstall' to remove the installed
- files again. In practice, not all packages have tested that
- uninstallation works correctly, even though it is required by the
- GNU Coding Standards.
-
- 8. Some packages, particularly those that use Automake, provide `make
- distcheck', which can by used by developers to test that all other
- targets like `make install' and `make uninstall' work correctly.
- This target is generally not run by end users.
-
-Compilers and Options
-=====================
-
- Some systems require unusual options for compilation or linking that
-the `configure' script does not know about. Run `./configure --help'
-for details on some of the pertinent environment variables.
-
- You can give `configure' initial values for configuration parameters
-by setting variables in the command line or in the environment. Here
-is an example:
-
- ./configure CC=c99 CFLAGS=-g LIBS=-lposix
-
- *Note Defining Variables::, for more details.
-
-Compiling For Multiple Architectures
-====================================
-
- You can compile the package for more than one kind of computer at the
-same time, by placing the object files for each architecture in their
-own directory. To do this, you can use GNU `make'. `cd' to the
-directory where you want the object files and executables to go and run
-the `configure' script. `configure' automatically checks for the
-source code in the directory that `configure' is in and in `..'. This
-is known as a "VPATH" build.
-
- With a non-GNU `make', it is safer to compile the package for one
-architecture at a time in the source code directory. After you have
-installed the package for one architecture, use `make distclean' before
-reconfiguring for another architecture.
-
- On MacOS X 10.5 and later systems, you can create libraries and
-executables that work on multiple system types--known as "fat" or
-"universal" binaries--by specifying multiple `-arch' options to the
-compiler but only a single `-arch' option to the preprocessor. Like
-this:
-
- ./configure CC="gcc -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
- CXX="g++ -arch i386 -arch x86_64 -arch ppc -arch ppc64" \
- CPP="gcc -E" CXXCPP="g++ -E"
-
- This is not guaranteed to produce working output in all cases, you
-may have to build one architecture at a time and combine the results
-using the `lipo' tool if you have problems.
-
-Installation Names
-==================
-
- By default, `make install' installs the package's commands under
-`/usr/local/bin', include files under `/usr/local/include', etc. You
-can specify an installation prefix other than `/usr/local' by giving
-`configure' the option `--prefix=PREFIX', where PREFIX must be an
-absolute file name.
-
- You can specify separate installation prefixes for
-architecture-specific files and architecture-independent files. If you
-pass the option `--exec-prefix=PREFIX' to `configure', the package uses
-PREFIX as the prefix for installing programs and libraries.
-Documentation and other data files still use the regular prefix.
-
- In addition, if you use an unusual directory layout you can give
-options like `--bindir=DIR' to specify different values for particular
-kinds of files. Run `configure --help' for a list of the directories
-you can set and what kinds of files go in them. In general, the
-default for these options is expressed in terms of `${prefix}', so that
-specifying just `--prefix' will affect all of the other directory
-specifications that were not explicitly provided.
-
- The most portable way to affect installation locations is to pass the
-correct locations to `configure'; however, many packages provide one or
-both of the following shortcuts of passing variable assignments to the
-`make install' command line to change installation locations without
-having to reconfigure or recompile.
-
- The first method involves providing an override variable for each
-affected directory. For example, `make install
-prefix=/alternate/directory' will choose an alternate location for all
-directory configuration variables that were expressed in terms of
-`${prefix}'. Any directories that were specified during `configure',
-but not in terms of `${prefix}', must each be overridden at install
-time for the entire installation to be relocated. The approach of
-makefile variable overrides for each directory variable is required by
-the GNU Coding Standards, and ideally causes no recompilation.
-However, some platforms have known limitations with the semantics of
-shared libraries that end up requiring recompilation when using this
-method, particularly noticeable in packages that use GNU Libtool.
-
- The second method involves providing the `DESTDIR' variable. For
-example, `make install DESTDIR=/alternate/directory' will prepend
-`/alternate/directory' before all installation names. The approach of
-`DESTDIR' overrides is not required by the GNU Coding Standards, and
-does not work on platforms that have drive letters. On the other hand,
-it does better at avoiding recompilation issues, and works well even
-when some directory options were not specified in terms of `${prefix}'
-at `configure' time.
-
-Optional Features
-=================
-
- If the package supports it, you can cause programs to be installed
-with an extra prefix or suffix on their names by giving `configure' the
-option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
-
- Some packages pay attention to `--enable-FEATURE' options to
-`configure', where FEATURE indicates an optional part of the package.
-They may also pay attention to `--with-PACKAGE' options, where PACKAGE
-is something like `gnu-as' or `x' (for the X Window System). The
-`README' should mention any `--enable-' and `--with-' options that the
-package recognizes.
-
- For packages that use the X Window System, `configure' can usually
-find the X include and library files automatically, but if it doesn't,
-you can use the `configure' options `--x-includes=DIR' and
-`--x-libraries=DIR' to specify their locations.
-
- Some packages offer the ability to configure how verbose the
-execution of `make' will be. For these packages, running `./configure
---enable-silent-rules' sets the default to minimal output, which can be
-overridden with `make V=1'; while running `./configure
---disable-silent-rules' sets the default to verbose, which can be
-overridden with `make V=0'.
-
-Particular systems
-==================
-
- On HP-UX, the default C compiler is not ANSI C compatible. If GNU
-CC is not installed, it is recommended to use the following options in
-order to use an ANSI C compiler:
-
- ./configure CC="cc -Ae -D_XOPEN_SOURCE=500"
-
-and if that doesn't work, install pre-built binaries of GCC for HP-UX.
-
- On OSF/1 a.k.a. Tru64, some versions of the default C compiler cannot
-parse its `<wchar.h>' header file. The option `-nodtk' can be used as
-a workaround. If GNU CC is not installed, it is therefore recommended
-to try
-
- ./configure CC="cc"
-
-and if that doesn't work, try
-
- ./configure CC="cc -nodtk"
-
- On Solaris, don't put `/usr/ucb' early in your `PATH'. This
-directory contains several dysfunctional programs; working variants of
-these programs are available in `/usr/bin'. So, if you need `/usr/ucb'
-in your `PATH', put it _after_ `/usr/bin'.
-
- On Haiku, software installed for all users goes in `/boot/common',
-not `/usr/local'. It is recommended to use the following options:
-
- ./configure --prefix=/boot/common
-
-Specifying the System Type
-==========================
-
- There may be some features `configure' cannot figure out
-automatically, but needs to determine by the type of machine the package
-will run on. Usually, assuming the package is built to be run on the
-_same_ architectures, `configure' can figure that out, but if it prints
-a message saying it cannot guess the machine type, give it the
-`--build=TYPE' option. TYPE can either be a short name for the system
-type, such as `sun4', or a canonical name which has the form:
-
- CPU-COMPANY-SYSTEM
-
-where SYSTEM can have one of these forms:
-
- OS
- KERNEL-OS
-
- See the file `config.sub' for the possible values of each field. If
-`config.sub' isn't included in this package, then this package doesn't
-need to know the machine type.
-
- If you are _building_ compiler tools for cross-compiling, you should
-use the option `--target=TYPE' to select the type of system they will
-produce code for.
-
- If you want to _use_ a cross compiler, that generates code for a
-platform different from the build platform, you should specify the
-"host" platform (i.e., that on which the generated programs will
-eventually be run) with `--host=TYPE'.
-
-Sharing Defaults
-================
-
- If you want to set default values for `configure' scripts to share,
-you can create a site shell script called `config.site' that gives
-default values for variables like `CC', `cache_file', and `prefix'.
-`configure' looks for `PREFIX/share/config.site' if it exists, then
-`PREFIX/etc/config.site' if it exists. Or, you can set the
-`CONFIG_SITE' environment variable to the location of the site script.
-A warning: not all `configure' scripts look for a site script.
-
-Defining Variables
-==================
-
- Variables not defined in a site shell script can be set in the
-environment passed to `configure'. However, some packages may run
-configure again during the build, and the customized values of these
-variables may be lost. In order to avoid this problem, you should set
-them in the `configure' command line, using `VAR=value'. For example:
-
- ./configure CC=/usr/local2/bin/gcc
-
-causes the specified `gcc' to be used as the C compiler (unless it is
-overridden in the site shell script).
-
-Unfortunately, this technique does not work for `CONFIG_SHELL' due to
-an Autoconf bug. Until the bug is fixed you can use this workaround:
-
- CONFIG_SHELL=/bin/bash /bin/bash ./configure CONFIG_SHELL=/bin/bash
-
-`configure' Invocation
-======================
-
- `configure' recognizes the following options to control how it
-operates.
-
-`--help'
-`-h'
- Print a summary of all of the options to `configure', and exit.
-
-`--help=short'
-`--help=recursive'
- Print a summary of the options unique to this package's
- `configure', and exit. The `short' variant lists options used
- only in the top level, while the `recursive' variant lists options
- also present in any nested packages.
-
-`--version'
-`-V'
- Print the version of Autoconf used to generate the `configure'
- script, and exit.
-
-`--cache-file=FILE'
- Enable the cache: use and save the results of the tests in FILE,
- traditionally `config.cache'. FILE defaults to `/dev/null' to
- disable caching.
-
-`--config-cache'
-`-C'
- Alias for `--cache-file=config.cache'.
-
-`--quiet'
-`--silent'
-`-q'
- Do not print messages saying which checks are being made. To
- suppress all normal output, redirect it to `/dev/null' (any error
- messages will still be shown).
-
-`--srcdir=DIR'
- Look for the package's source code in directory DIR. Usually
- `configure' can determine that directory automatically.
-
-`--prefix=DIR'
- Use DIR as the installation prefix. *note Installation Names::
- for more details, including other options available for fine-tuning
- the installation locations.
-
-`--no-create'
-`-n'
- Run the configure checks, but stop before creating any output
- files.
-
-`configure' also accepts some other, not widely useful, options. Run
-`configure --help' for more details.
-
diff --git a/contrib/libs/libmagic/NEWS b/contrib/libs/libmagic/NEWS
deleted file mode 100644
index 898a3dab34..0000000000
--- a/contrib/libs/libmagic/NEWS
+++ /dev/null
@@ -1 +0,0 @@
-See ChangeLog.
diff --git a/contrib/libs/libmagic/README.DEVELOPER b/contrib/libs/libmagic/README.DEVELOPER
deleted file mode 100644
index dfe27b973e..0000000000
--- a/contrib/libs/libmagic/README.DEVELOPER
+++ /dev/null
@@ -1,49 +0,0 @@
-# How to get started developing
-
-@(#) $File: README.DEVELOPER,v 1.9 2021/09/20 14:04:39 christos Exp $
-
-## Auto files
-
-After checking out the source, run the following:
-
- autoreconf -f -i
- make distclean # this can fail if you have not built before
- ./configure --disable-silent-rules
- make -j4
- make -C tests check
-
-If you see errors, make sure you have the latest libtool and autoconf
-This has been tested with autoconf-2.69 and libtool-2.4.2
-
-## Installing dependencies
-
-If your platform doesn't have the above tools, install the following
-packages first.
-
-### Debian
-
- apt-get install \
- automake \
- gcc \
- libtool \
- make \
- python \
- zlib1g-dev \
-
-See also `.travis.yml`.
-
-### Mac OS X (MacPorts)
-
- port install \
- autoconf \
- automake \
- libtool \
-
-### Mac OS X (HomeBrew)
-
- brew install autoconf automake libtool
-
-Tested with:
- autoconf 2.69
- automake 1.16.1
- libtool 2.4.6
diff --git a/contrib/libs/libmagic/README.md b/contrib/libs/libmagic/README.md
deleted file mode 100644
index 26e3804581..0000000000
--- a/contrib/libs/libmagic/README.md
+++ /dev/null
@@ -1,156 +0,0 @@
-## README for file(1) Command and the libmagic(3) library ##
-
- @(#) $File: README.md,v 1.5 2023/05/28 13:59:47 christos Exp $
-
-- Bug Tracker: <https://bugs.astron.com/>
-- Build Status: <https://travis-ci.org/file/file>
-- Download link: <ftp://ftp.astron.com/pub/file/>
-- E-mail: <christos@astron.com>
-- Fuzzing link: <https://bugs.chromium.org/p/oss-fuzz/issues/list?sort=-opened&can=1&q=proj:file>
-- Home page: https://www.darwinsys.com/file/
-- Mailing List archives: <https://mailman.astron.com/pipermail/file/>
-- Mailing List: <file@astron.com>
-- Public repo: <https://github.com/file/file>
-- Test framework: <https://github.com/file/file-tests>
-
-Phone: Do not even think of telephoning me about this program. Send
-cash first!
-
-This is Release 5.x of Ian Darwin's (copyright but distributable)
-file(1) command, an implementation of the Unix File(1) command.
-It knows the 'magic number' of several thousands of file types.
-This version is the standard "file" command for Linux, *BSD, and
-other systems. (See "patchlevel.h" for the exact release number).
-
-The major changes for 5.x are CDF file parsing, indirect magic,
-name/use (recursion) and overhaul in mime and ascii encoding
-handling.
-
-The major feature of 4.x is the refactoring of the code into a
-library, and the re-write of the file command in terms of that
-library. The library itself, libmagic can be used by 3rd party
-programs that wish to identify file types without having to fork()
-and exec() file. The prime contributor for 4.0 was Mans Rullgard.
-
-UNIX is a trademark of UNIX System Laboratories.
-
-The prime contributor to Release 3.8 was Guy Harris, who put in
-megachanges including byte-order independence.
-
-The prime contributor to Release 3.0 was Christos Zoulas, who put
-in hundreds of lines of source code changes, including his own
-ANSIfication of the code (I liked my own ANSIfication better, but
-his (__P()) is the "Berkeley standard" way of doing it, and I wanted
-UCB to include the code...), his HP-like "indirection" (a feature
-of the HP file command, I think), and his mods that finally got
-the uncompress (-z) mode finished and working.
-
-This release has compiled in numerous environments; see PORTING
-for a list and problems.
-
-This fine freeware file(1) follows the USG (System V) model of the
-file command, rather than the Research (V7) version or the V7-derived
-4.[23] Berkeley one. That is, the file /etc/magic contains much of
-the ritual information that is the source of this program's power.
-My version knows a little more magic (including tar archives) than
-System V; the /etc/magic parsing seems to be compatible with the
-(poorly documented) System V /etc/magic format (with one exception;
-see the man page).
-
-In addition, the /etc/magic file is built from a subdirectory
-for easier(?) maintenance. I will act as a clearinghouse for
-magic numbers assigned to all sorts of data files that
-are in reasonable circulation. Send your magic numbers,
-in magic(5) format please, to the maintainer, Christos Zoulas.
-
-COPYING - read this first.
-* `README` - read this second (you are currently reading this file).
-* `INSTALL` - read on how to install
-* `src/apprentice.c` - parses /etc/magic to learn magic
-* `src/apptype.c` - used for OS/2 specific application type magic
-* `src/ascmagic.c` - third & last set of tests, based on hardwired assumptions.
-* `src/asctime_r.c` - replacement for OS's that don't have it.
-* `src/asprintf.c` - replacement for OS's that don't have it.
-* `src/buffer.c` - buffer handling functions.
-* `src/cdf.[ch]` - parser for Microsoft Compound Document Files
-* `src/cdf_time.c` - time converter for CDF.
-* `src/compress.c` - handles decompressing files to look inside.
-* `src/ctime_r.c` - replacement for OS's that don't have it.
-* `src/der.[ch]` - parser for Distinguished Encoding Rules
-* `src/dprintf.c` - replacement for OS's that don't have it.
-* `src/elfclass.h` - common code for elf 32/64.
-* `src/encoding.c` - handles unicode encodings
-* `src/file.c` - the main program
-* `src/file.h` - header file
-* `src/file_opts.h` - list of options
-* `src/fmtcheck.c` - replacement for OS's that don't have it.
-* `src/fsmagic.c` - first set of tests the program runs, based on filesystem info
-* `src/funcs.c` - utilility functions
-* `src/getline.c` - replacement for OS's that don't have it.
-* `src/getopt_long.c` - replacement for OS's that don't have it.
-* `src/gmtime_r.c` - replacement for OS's that don't have it.
-* `src/is_csv.c` - knows about Comma Separated Value file format (RFC 4180).
-* `src/is_json.c` - knows about JavaScript Object Notation format (RFC 8259).
-* `src/is_simh.c` - knows about SIMH tape file format.
-* `src/is_tar.c, tar.h` - knows about Tape ARchive format (courtesy John Gilmore).
-* `src/localtime_r.c` - replacement for OS's that don't have it.
-* `src/magic.h.in` - source file for magic.h
-* `src/mygetopt.h` - replacement for OS's that don't have it.
-* `src/magic.c` - the libmagic api
-* `src/names.h` - header file for ascmagic.c
-* `src/pread.c` - replacement for OS's that don't have it.
-* `src/print.c` - print results, errors, warnings.
-* `src/readcdf.c` - CDF wrapper.
-* `src/readelf.[ch]` - Stand-alone elf parsing code.
-* `src/softmagic.c` - 2nd set of tests, based on /etc/magic
-* `src/mygetopt.h` - replacement for OS's that don't have it.
-* `src/strcasestr.c` - replacement for OS's that don't have it.
-* `src/strlcat.c` - replacement for OS's that don't have it.
-* `src/strlcpy.c` - replacement for OS's that don't have it.
-* `src/strndup.c` - replacement for OS's that don't have it.
-* `src/tar.h` - tar file definitions
-* `src/vasprintf.c` - for systems that don't have it.
-* `doc/file.man` - man page for the command
-* `doc/magic.man` - man page for the magic file, courtesy Guy Harris.
- Install as magic.4 on USG and magic.5 on V7 or Berkeley; cf Makefile.
-
-Magdir - directory of /etc/magic pieces
-------------------------------------------------------------------------------
-
-If you submit a new magic entry please make sure you read the following
-guidelines:
-
-- Initial match is preferably at least 32 bits long, and is a _unique_ match
-- If this is not feasible, use additional check
-- Match of <= 16 bits are not accepted
-- Delay printing string as much as possible, don't print output too early
-- Avoid printf arbitrary byte as string, which can be a source of
- crash and buffer overflow
-
-- Provide complete information with entry:
- * One line short summary
- * Optional long description
- * File extension, if applicable
- * Full name and contact method (for discussion when entry has problem)
- * Further reference, such as documentation of format
-
-gpg for dummies:
-------------------------------------------------------------------------------
-
-```
-$ gpg --verify file-X.YY.tar.gz.asc file-X.YY.tar.gz
-gpg: assuming signed data in `file-X.YY.tar.gz'
-gpg: Signature made WWW MMM DD HH:MM:SS YYYY ZZZ using DSA key ID KKKKKKKK
-```
-
-To download the key:
-
-```
-$ gpg --keyserver hkp://keys.gnupg.net --recv-keys KKKKKKKK
-```
-------------------------------------------------------------------------------
-
-
-Parts of this software were developed at SoftQuad Inc., developers
-of SGML/HTML/XML publishing software, in Toronto, Canada.
-SoftQuad was swallowed up by Corel in 2002 and does not exist any longer.
diff --git a/contrib/libs/libmagic/config-linux.h b/contrib/libs/libmagic/config-linux.h
deleted file mode 100644
index bf766ac512..0000000000
--- a/contrib/libs/libmagic/config-linux.h
+++ /dev/null
@@ -1,519 +0,0 @@
-/* config.h. Generated from config.h.in by configure. */
-/* config.h.in. Generated from configure.ac by autoheader. */
-
-/* Define if building universal (internal helper macro) */
-/* #undef AC_APPLE_UNIVERSAL_BUILD */
-
-/* Define in built-in ELF support is used */
-#define BUILTIN_ELF 1
-
-/* Enable bzlib compression support */
-/* #undef BZLIBSUPPORT */
-
-/* Define for ELF core file support */
-#define ELFCORE 1
-
-/* Define to 1 if you have the `asctime_r' function. */
-#define HAVE_ASCTIME_R 1
-
-/* Define to 1 if you have the `asprintf' function. */
-#define HAVE_ASPRINTF 1
-
-/* Define to 1 if you have the <byteswap.h> header file. */
-#define HAVE_BYTESWAP_H 1
-
-/* Define to 1 if you have the <bzlib.h> header file. */
-/* #undef HAVE_BZLIB_H */
-
-/* Define to 1 if you have the `ctime_r' function. */
-#define HAVE_CTIME_R 1
-
-/* HAVE_DAYLIGHT */
-#define HAVE_DAYLIGHT 1
-
-/* Define to 1 if you have the declaration of `daylight', and to 0 if you
- don't. */
-#define HAVE_DECL_DAYLIGHT 1
-
-/* Define to 1 if you have the declaration of `tzname', and to 0 if you don't.
- */
-#define HAVE_DECL_TZNAME 1
-
-/* Define to 1 if you have the <dlfcn.h> header file. */
-#define HAVE_DLFCN_H 1
-
-/* Define to 1 if you have the `dprintf' function. */
-#define HAVE_DPRINTF 1
-
-/* Define to 1 if you have the <err.h> header file. */
-#define HAVE_ERR_H 1
-
-/* Define to 1 if you have the <fcntl.h> header file. */
-#define HAVE_FCNTL_H 1
-
-/* Define to 1 if you have the `fmtcheck' function. */
-/* #undef HAVE_FMTCHECK */
-
-/* Define to 1 if you have the `fork' function. */
-#define HAVE_FORK 1
-
-/* Define to 1 if you have the `freelocale' function. */
-#define HAVE_FREELOCALE 1
-
-/* Define to 1 if fseeko (and presumably ftello) exists and is declared. */
-#define HAVE_FSEEKO 1
-
-/* Define to 1 if you have the `getline' function. */
-#define HAVE_GETLINE 1
-
-/* Define to 1 if you have the <getopt.h> header file. */
-#define HAVE_GETOPT_H 1
-
-/* Define to 1 if you have the `getopt_long' function. */
-#define HAVE_GETOPT_LONG 1
-
-/* Define to 1 if you have the `getpagesize' function. */
-#define HAVE_GETPAGESIZE 1
-
-/* Define to 1 if you have the `gmtime_r' function. */
-#define HAVE_GMTIME_R 1
-
-/* Define to 1 if the system has the type `intptr_t'. */
-#define HAVE_INTPTR_T 1
-
-/* Define to 1 if you have the <inttypes.h> header file. */
-#define HAVE_INTTYPES_H 1
-
-/* Define to 1 if you have the `bz2' library (-lbz2). */
-/* #undef HAVE_LIBBZ2 */
-
-/* Define to 1 if you have the `gnurx' library (-lgnurx). */
-/* #undef HAVE_LIBGNURX */
-
-/* Define to 1 if you have the `lz' library (-llz). */
-/* #undef HAVE_LIBLZ */
-
-/* Define to 1 if you have the `lzma' library (-llzma). */
-/* #undef HAVE_LIBLZMA */
-
-/* Define to 1 if you have the `seccomp' library (-lseccomp). */
-/* #undef HAVE_LIBSECCOMP */
-
-/* Define to 1 if you have the `z' library (-lz). */
-#define HAVE_LIBZ 1
-
-/* Define to 1 if you have the `zstd' library (-lzstd). */
-/* #undef HAVE_LIBZSTD */
-
-/* Define to 1 if you have the `localtime_r' function. */
-#define HAVE_LOCALTIME_R 1
-
-/* Define to 1 if you have the <lzlib.h> header file. */
-/* #undef HAVE_LZLIB_H */
-
-/* Define to 1 if you have the <lzma.h> header file. */
-/* #undef HAVE_LZMA_H */
-
-/* Define to 1 if mbrtowc and mbstate_t are properly declared. */
-#define HAVE_MBRTOWC 1
-
-/* Define to 1 if <wchar.h> declares mbstate_t. */
-#define HAVE_MBSTATE_T 1
-
-/* Define to 1 if you have the `memmem' function. */
-#define HAVE_MEMMEM 1
-
-/* Define to 1 if you have the <minix/config.h> header file. */
-/* #undef HAVE_MINIX_CONFIG_H */
-
-/* Define to 1 if you have the `mkostemp' function. */
-#define HAVE_MKOSTEMP 1
-
-/* Define to 1 if you have the `mkstemp' function. */
-#define HAVE_MKSTEMP 1
-
-/* Define to 1 if you have a working `mmap' system call. */
-#define HAVE_MMAP 1
-
-/* Define to 1 if you have the `newlocale' function. */
-#define HAVE_NEWLOCALE 1
-
-/* Define to 1 if you have the `pipe2' function. */
-#define HAVE_PIPE2 1
-
-/* Define to 1 if you have the `posix_spawnp' function. */
-#define HAVE_POSIX_SPAWNP 1
-
-/* Define to 1 if you have the `pread' function. */
-#define HAVE_PREAD 1
-
-/* Have sig_t type */
-#define HAVE_SIG_T 1
-
-/* Define to 1 if you have the <spawn.h> header file. */
-#define HAVE_SPAWN_H 1
-
-/* Define to 1 if you have the <stdint.h> header file. */
-#define HAVE_STDINT_H 1
-
-/* Define to 1 if you have the <stdio.h> header file. */
-#define HAVE_STDIO_H 1
-
-/* Define to 1 if you have the <stdlib.h> header file. */
-#define HAVE_STDLIB_H 1
-
-/* Define to 1 if you have the `strcasestr' function. */
-#define HAVE_STRCASESTR 1
-
-/* Define to 1 if you have the <strings.h> header file. */
-#define HAVE_STRINGS_H 1
-
-/* Define to 1 if you have the <string.h> header file. */
-#define HAVE_STRING_H 1
-
-/* Define to 1 if you have the `strlcat' function. */
-/* #undef HAVE_STRLCAT */
-
-/* Define to 1 if you have the `strlcpy' function. */
-/* #undef HAVE_STRLCPY */
-
-/* Define to 1 if you have the `strndup' function. */
-#define HAVE_STRNDUP 1
-
-/* Define to 1 if you have the `strtof' function. */
-#define HAVE_STRTOF 1
-
-/* HAVE_STRUCT_OPTION */
-#define HAVE_STRUCT_OPTION 1
-
-/* Define to 1 if `st_rdev' is a member of `struct stat'. */
-#define HAVE_STRUCT_STAT_ST_RDEV 1
-
-/* Define to 1 if `tm_gmtoff' is a member of `struct tm'. */
-#define HAVE_STRUCT_TM_TM_GMTOFF 1
-
-/* Define to 1 if `tm_zone' is a member of `struct tm'. */
-#define HAVE_STRUCT_TM_TM_ZONE 1
-
-/* Define to 1 if you have the <sys/bswap.h> header file. */
-/* #undef HAVE_SYS_BSWAP_H */
-
-/* Define to 1 if you have the <sys/ioctl.h> header file. */
-#define HAVE_SYS_IOCTL_H 1
-
-/* Define to 1 if you have the <sys/mman.h> header file. */
-#define HAVE_SYS_MMAN_H 1
-
-/* Define to 1 if you have the <sys/param.h> header file. */
-#define HAVE_SYS_PARAM_H 1
-
-/* Define to 1 if you have the <sys/stat.h> header file. */
-#define HAVE_SYS_STAT_H 1
-
-/* Define to 1 if you have the <sys/sysmacros.h> header file. */
-#define HAVE_SYS_SYSMACROS_H 1
-
-/* Define to 1 if you have the <sys/time.h> header file. */
-#define HAVE_SYS_TIME_H 1
-
-/* Define to 1 if you have the <sys/types.h> header file. */
-#define HAVE_SYS_TYPES_H 1
-
-/* Define to 1 if you have the <sys/utime.h> header file. */
-/* #undef HAVE_SYS_UTIME_H */
-
-/* Define to 1 if you have <sys/wait.h> that is POSIX.1 compatible. */
-#define HAVE_SYS_WAIT_H 1
-
-/* HAVE_TM_ISDST */
-#define HAVE_TM_ISDST 1
-
-/* HAVE_TM_ZONE */
-#define HAVE_TM_ZONE 1
-
-/* HAVE_TZNAME */
-#define HAVE_TZNAME 1
-
-/* Define to 1 if the system has the type `uintptr_t'. */
-#define HAVE_UINTPTR_T 1
-
-/* Define to 1 if you have the <unistd.h> header file. */
-#define HAVE_UNISTD_H 1
-
-/* Define to 1 if you have the `uselocale' function. */
-#define HAVE_USELOCALE 1
-
-/* Define to 1 if you have the `utime' function. */
-#define HAVE_UTIME 1
-
-/* Define to 1 if you have the `utimes' function. */
-#define HAVE_UTIMES 1
-
-/* Define to 1 if you have the <utime.h> header file. */
-#define HAVE_UTIME_H 1
-
-/* Define to 1 if you have the `vasprintf' function. */
-#define HAVE_VASPRINTF 1
-
-/* Define to 1 if you have the `vfork' function. */
-#define HAVE_VFORK 1
-
-/* Define to 1 if you have the <vfork.h> header file. */
-/* #undef HAVE_VFORK_H */
-
-/* Define to 1 or 0, depending whether the compiler supports simple visibility
- declarations. */
-#define HAVE_VISIBILITY 1
-
-/* Define to 1 if you have the <wchar.h> header file. */
-#define HAVE_WCHAR_H 1
-
-/* Define to 1 if you have the <wctype.h> header file. */
-#define HAVE_WCTYPE_H 1
-
-/* Define to 1 if you have the `wcwidth' function. */
-#define HAVE_WCWIDTH 1
-
-/* Define to 1 if `fork' works. */
-#define HAVE_WORKING_FORK 1
-
-/* Define to 1 if `vfork' works. */
-#define HAVE_WORKING_VFORK 1
-
-/* Define to 1 if you have the <xlocale.h> header file. */
-/* #undef HAVE_XLOCALE_H */
-
-/* Define to 1 if you have the <zlib.h> header file. */
-#define HAVE_ZLIB_H 1
-
-/* Define to 1 if you have the <zstd_errors.h> header file. */
-/* #undef HAVE_ZSTD_ERRORS_H */
-
-/* Define to 1 if you have the <zstd.h> header file. */
-/* #undef HAVE_ZSTD_H */
-
-/* Define to the sub-directory where libtool stores uninstalled libraries. */
-#define LT_OBJDIR ".libs/"
-
-/* Enable lzlib compression support */
-/* #undef LZLIBSUPPORT */
-
-/* Define to 1 if `major', `minor', and `makedev' are declared in <mkdev.h>.
- */
-/* #undef MAJOR_IN_MKDEV */
-
-/* Define to 1 if `major', `minor', and `makedev' are declared in
- <sysmacros.h>. */
-#define MAJOR_IN_SYSMACROS 1
-
-/* Name of package */
-#define PACKAGE "file"
-
-/* Define to the address where bug reports for this package should be sent. */
-#define PACKAGE_BUGREPORT "christos@astron.com"
-
-/* Define to the full name of this package. */
-#define PACKAGE_NAME "file"
-
-/* Define to the full name and version of this package. */
-#define PACKAGE_STRING "file 5.45"
-
-/* Define to the one symbol short name of this package. */
-#define PACKAGE_TARNAME "file"
-
-/* Define to the home page for this package. */
-#define PACKAGE_URL ""
-
-/* Define to the version of this package. */
-#define PACKAGE_VERSION "5.45"
-
-/* Define to 1 if all of the C90 standard headers exist (not just the ones
- required in a freestanding environment). This macro is provided for
- backward compatibility; new code need not use it. */
-#define STDC_HEADERS 1
-
-/* Define to 1 if your <sys/time.h> declares `struct tm'. */
-/* #undef TM_IN_SYS_TIME */
-
-/* Enable extensions on AIX 3, Interix. */
-#ifndef _ALL_SOURCE
-# define _ALL_SOURCE 1
-#endif
-/* Enable general extensions on macOS. */
-#ifndef _DARWIN_C_SOURCE
-# define _DARWIN_C_SOURCE 1
-#endif
-/* Enable general extensions on Solaris. */
-#ifndef __EXTENSIONS__
-# define __EXTENSIONS__ 1
-#endif
-/* Enable GNU extensions on systems that have them. */
-#ifndef _GNU_SOURCE
-# define _GNU_SOURCE 1
-#endif
-/* Enable X/Open compliant socket functions that do not require linking
- with -lxnet on HP-UX 11.11. */
-#ifndef _HPUX_ALT_XOPEN_SOCKET_API
-# define _HPUX_ALT_XOPEN_SOCKET_API 1
-#endif
-/* Identify the host operating system as Minix.
- This macro does not affect the system headers' behavior.
- A future release of Autoconf may stop defining this macro. */
-#ifndef _MINIX
-/* # undef _MINIX */
-#endif
-/* Enable general extensions on NetBSD.
- Enable NetBSD compatibility extensions on Minix. */
-#ifndef _NETBSD_SOURCE
-# define _NETBSD_SOURCE 1
-#endif
-/* Enable OpenBSD compatibility extensions on NetBSD.
- Oddly enough, this does nothing on OpenBSD. */
-#ifndef _OPENBSD_SOURCE
-# define _OPENBSD_SOURCE 1
-#endif
-/* Define to 1 if needed for POSIX-compatible behavior. */
-#ifndef _POSIX_SOURCE
-/* # undef _POSIX_SOURCE */
-#endif
-/* Define to 2 if needed for POSIX-compatible behavior. */
-#ifndef _POSIX_1_SOURCE
-/* # undef _POSIX_1_SOURCE */
-#endif
-/* Enable POSIX-compatible threading on Solaris. */
-#ifndef _POSIX_PTHREAD_SEMANTICS
-# define _POSIX_PTHREAD_SEMANTICS 1
-#endif
-/* Enable extensions specified by ISO/IEC TS 18661-5:2014. */
-#ifndef __STDC_WANT_IEC_60559_ATTRIBS_EXT__
-# define __STDC_WANT_IEC_60559_ATTRIBS_EXT__ 1
-#endif
-/* Enable extensions specified by ISO/IEC TS 18661-1:2014. */
-#ifndef __STDC_WANT_IEC_60559_BFP_EXT__
-# define __STDC_WANT_IEC_60559_BFP_EXT__ 1
-#endif
-/* Enable extensions specified by ISO/IEC TS 18661-2:2015. */
-#ifndef __STDC_WANT_IEC_60559_DFP_EXT__
-# define __STDC_WANT_IEC_60559_DFP_EXT__ 1
-#endif
-/* Enable extensions specified by ISO/IEC TS 18661-4:2015. */
-#ifndef __STDC_WANT_IEC_60559_FUNCS_EXT__
-# define __STDC_WANT_IEC_60559_FUNCS_EXT__ 1
-#endif
-/* Enable extensions specified by ISO/IEC TS 18661-3:2015. */
-#ifndef __STDC_WANT_IEC_60559_TYPES_EXT__
-# define __STDC_WANT_IEC_60559_TYPES_EXT__ 1
-#endif
-/* Enable extensions specified by ISO/IEC TR 24731-2:2010. */
-#ifndef __STDC_WANT_LIB_EXT2__
-# define __STDC_WANT_LIB_EXT2__ 1
-#endif
-/* Enable extensions specified by ISO/IEC 24747:2009. */
-#ifndef __STDC_WANT_MATH_SPEC_FUNCS__
-# define __STDC_WANT_MATH_SPEC_FUNCS__ 1
-#endif
-/* Enable extensions on HP NonStop. */
-#ifndef _TANDEM_SOURCE
-# define _TANDEM_SOURCE 1
-#endif
-/* Enable X/Open extensions. Define to 500 only if necessary
- to make mbstate_t available. */
-#ifndef _XOPEN_SOURCE
-/* # undef _XOPEN_SOURCE */
-#endif
-
-
-/* Version number of package */
-#define VERSION "5.45"
-
-/* Define WORDS_BIGENDIAN to 1 if your processor stores words with the most
- significant byte first (like Motorola and SPARC, unlike Intel). */
-#if defined AC_APPLE_UNIVERSAL_BUILD
-# if defined __BIG_ENDIAN__
-# define WORDS_BIGENDIAN 1
-# endif
-#else
-# ifndef WORDS_BIGENDIAN
-/* # undef WORDS_BIGENDIAN */
-# endif
-#endif
-
-/* Enable xzlib compression support */
-/* #undef XZLIBSUPPORT */
-
-/* Enable zlib compression support */
-#define ZLIBSUPPORT 1
-
-/* Enable zstdlib compression support */
-/* #undef ZSTDLIBSUPPORT */
-
-/* Number of bits in a file offset, on hosts where this is settable. */
-/* #undef _FILE_OFFSET_BITS */
-
-/* Define to 1 to make fseeko visible on some hosts (e.g. glibc 2.2). */
-/* #undef _LARGEFILE_SOURCE */
-
-/* Define for large files, on AIX-style hosts. */
-/* #undef _LARGE_FILES */
-
-/* Define for Solaris 2.5.1 so the uint32_t typedef from <sys/synch.h>,
- <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
- #define below would cause a syntax error. */
-/* #undef _UINT32_T */
-
-/* Define for Solaris 2.5.1 so the uint64_t typedef from <sys/synch.h>,
- <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
- #define below would cause a syntax error. */
-/* #undef _UINT64_T */
-
-/* Define for Solaris 2.5.1 so the uint8_t typedef from <sys/synch.h>,
- <pthread.h>, or <semaphore.h> is not used. If the typedef were allowed, the
- #define below would cause a syntax error. */
-/* #undef _UINT8_T */
-
-/* Define to the type of a signed integer type of width exactly 32 bits if
- such a type exists and the standard includes do not define it. */
-/* #undef int32_t */
-
-/* Define to the type of a signed integer type of width exactly 64 bits if
- such a type exists and the standard includes do not define it. */
-/* #undef int64_t */
-
-/* Define to the type of a signed integer type wide enough to hold a pointer,
- if such a type exists, and if the system does not define it. */
-/* #undef intptr_t */
-
-/* Define to a type if <wchar.h> does not define. */
-/* #undef mbstate_t */
-
-/* Define to `long int' if <sys/types.h> does not define. */
-/* #undef off_t */
-
-/* Define as a signed integer type capable of holding a process identifier. */
-/* #undef pid_t */
-
-/* Define to `unsigned int' if <sys/types.h> does not define. */
-/* #undef size_t */
-
-/* Define to the type of an unsigned integer type of width exactly 16 bits if
- such a type exists and the standard includes do not define it. */
-/* #undef uint16_t */
-
-/* Define to the type of an unsigned integer type of width exactly 32 bits if
- such a type exists and the standard includes do not define it. */
-/* #undef uint32_t */
-
-/* Define to the type of an unsigned integer type of width exactly 64 bits if
- such a type exists and the standard includes do not define it. */
-/* #undef uint64_t */
-
-/* Define to the type of an unsigned integer type of width exactly 8 bits if
- such a type exists and the standard includes do not define it. */
-/* #undef uint8_t */
-
-/* Define to the type of an unsigned integer type wide enough to hold a
- pointer, if such a type exists, and if the system does not define it. */
-/* #undef uintptr_t */
-
-/* Define as `fork' if `vfork' does not work. */
-/* #undef vfork */
diff --git a/contrib/libs/libmagic/config-osx.h b/contrib/libs/libmagic/config-osx.h
deleted file mode 100644
index de0960d03b..0000000000
--- a/contrib/libs/libmagic/config-osx.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#include "config-linux.h"
-
-#define HAVE_FMTCHECK 1
-#define HAVE_STRLCAT 1
-#define HAVE_STRLCPY 1
-#define HAVE_XLOCALE_H 1
-#undef HAVE_SYS_SYSMACROS_H
-#undef HAVE_PIPE2
-#undef HAVE_BYTESWAP_H
diff --git a/contrib/libs/libmagic/config.h b/contrib/libs/libmagic/config.h
deleted file mode 100644
index 24eccf047e..0000000000
--- a/contrib/libs/libmagic/config.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#pragma once
-
-#if defined(__APPLE__)
-# include "config-osx.h"
-#else
-# include "config-linux.h"
-#endif
diff --git a/contrib/libs/libmagic/file/0/ya.make b/contrib/libs/libmagic/file/0/ya.make
deleted file mode 100644
index f2b2d67f6a..0000000000
--- a/contrib/libs/libmagic/file/0/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-# Generated by devtools/yamaker.
-
-PROGRAM(file0)
-
-WITHOUT_LICENSE_TEXTS()
-
-PEERDIR(
- contrib/libs/libmagic/src
- contrib/libs/libmagic/src/file
-)
-
-END()
diff --git a/contrib/libs/libmagic/include/magic.h b/contrib/libs/libmagic/include/magic.h
deleted file mode 100644
index 0c283e98d6..0000000000
--- a/contrib/libs/libmagic/include/magic.h
+++ /dev/null
@@ -1 +0,0 @@
-#include "../src/magic.h" /* inclink generated by yamaker */
diff --git a/contrib/libs/libmagic/magic/Magdir/acorn b/contrib/libs/libmagic/magic/Magdir/acorn
deleted file mode 100644
index 37a4ed79e5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/acorn
+++ /dev/null
@@ -1,102 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: acorn,v 1.8 2021/04/26 15:56:00 christos Exp $
-# acorn: file(1) magic for files found on Acorn systems
-#
-
-# RISC OS Chunk File Format
-# From RISC OS Programmer's Reference Manual, Appendix D
-# We guess the file type from the type of the first chunk.
-0 lelong 0xc3cbc6c5 RISC OS Chunk data
->12 string OBJ_ \b, AOF object
->12 string LIB_ \b, ALF library
-
-# RISC OS AIF, contains "SWI OS_Exit" at offset 16.
-16 lelong 0xef000011 RISC OS AIF executable
-
-# RISC OS Draw files
-# From RISC OS Programmer's Reference Manual, Appendix E
-0 string Draw RISC OS Draw file data
-
-# RISC OS new format font files
-# From RISC OS Programmer's Reference Manual, Appendix E
-0 string FONT\0 RISC OS outline font data,
->5 byte x version %d
-0 string FONT\1 RISC OS 1bpp font data,
->5 byte x version %d
-0 string FONT\4 RISC OS 4bpp font data
->5 byte x version %d
-
-# RISC OS Music files
-# From RISC OS Programmer's Reference Manual, Appendix E
-0 string Maestro\r RISC OS music file
->8 byte x version %d
-
->8 byte x type %d
-
-# Digital Symphony data files
-# From: Bernard Jungen (bern8817@euphonynet.be)
-0 string \x02\x01\x13\x13\x13\x01\x0d\x10 Digital Symphony sound sample (RISC OS),
->8 byte x version %d,
->9 pstring x named "%s",
->(9.b+19) byte =0 8-bit logarithmic
->(9.b+19) byte =1 LZW-compressed linear
->(9.b+19) byte =2 8-bit linear signed
->(9.b+19) byte =3 16-bit linear signed
->(9.b+19) byte =4 SigmaDelta-compressed linear
->(9.b+19) byte =5 SigmaDelta-compressed logarithmic
->(9.b+19) byte >5 unknown format
-
-0 string \x02\x01\x13\x13\x14\x12\x01\x0b Digital Symphony song (RISC OS),
->8 byte x version %d,
->9 byte =1 1 voice,
->9 byte !1 %d voices,
->10 leshort =1 1 track,
->10 leshort !1 %d tracks,
->12 leshort =1 1 pattern
->12 leshort !1 %d patterns
-
-0 string \x02\x01\x13\x13\x10\x14\x12\x0e
->9 byte =0 Digital Symphony sequence (RISC OS),
->>8 byte x version %d,
->>10 byte =1 1 line,
->>10 byte !1 %d lines,
->>11 leshort =1 1 position
->>11 leshort !1 %d positions
->9 byte =1 Digital Symphony pattern data (RISC OS),
->>8 byte x version %d,
->>10 leshort =1 1 pattern
->>10 leshort !1 %d patterns
-
-# From: Joerg Jenderek
-# URL: https://www.kyzer.me.uk/pack/xad/#PackDir
-# reference: https://www.kyzer.me.uk/pack/xad/xad_PackDir.lha/PackDir.c
-# GRR: line below is too general as it matches also "Git pack" in ./revision
-0 string PACK\0
-# check for valid compression method 0-4
->5 ulelong <5
-# https://www.riscosopen.org/wiki/documentation/show/Introduction%20To%20Filing%20Systems
-# To skip "Git pack" version 0 test for root directory object like
-# ADFS::RPC.$.websitezip.FONTFIX
->>9 string >ADFS\ PackDir archive (RISC OS)
-# TrID labels above as "Acorn PackDir compressed Archive"
-# compression mode y (0 - 4) for GIF LZW with a maximum n bits
-# (y~n,0~12,1~13,2~14,3~15,4~16)
->>>5 ulelong+12 x \b, LZW %u-bits compression
-# https://www.filebase.org.uk/filetypes
-# !Packdir compressed archive has three hexadecimal digits code 68E
-!:mime application/x-acorn-68E
-!:ext pkd/bin
-# null terminated root directory object like IDEFS::IDE-4.$.Apps.GRAPHICS.!XFMPdemo
->>>9 string x \b, root "%s"
-# load address 0xFFFtttdd, ttt is the object filetype and dddddddddd is time
->>>>&1 ulelong x \b, load address %#x
-# execution address 0xdddddddd dddddddddd is 40 bit unsigned centiseconds since 1.1.1900 UTC
->>>>&5 ulelong x \b, exec address %#x
-# attributes (bits: 0~owner read,1~owner write,3~no delete,4~public read,5~public write)
->>>>&9 ulelong x \b, attributes %#x
-# number of entries in this directory. for root dir 0
-#>>>&13 ulelong x \b, entries %#x
-# the entries start here with object name
->>>>&17 string x \b, 1st object "%s"
-
diff --git a/contrib/libs/libmagic/magic/Magdir/adi b/contrib/libs/libmagic/magic/Magdir/adi
deleted file mode 100644
index 2fe79d4431..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/adi
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: adi,v 1.4 2009/09/19 16:28:07 christos Exp $
-# adi: file(1) magic for ADi's objects
-# From Gregory McGarry <g.mcgarry@ieee.org>
-#
-0 leshort 0x521c COFF DSP21k
->18 lelong &02 executable,
->18 lelong ^02
->>18 lelong &01 static object,
->>18 lelong ^01 relocatable object,
->18 lelong &010 stripped
->18 lelong ^010 not stripped
diff --git a/contrib/libs/libmagic/magic/Magdir/adventure b/contrib/libs/libmagic/magic/Magdir/adventure
deleted file mode 100644
index bd7f863be2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/adventure
+++ /dev/null
@@ -1,122 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: adventure,v 1.18 2019/04/19 00:42:27 christos Exp $
-# adventure: file(1) magic for Adventure game files
-#
-# from Allen Garvin <earendil@faeryland.tamu-commerce.edu>
-# Edited by Dave Chapeskie <dchapes@ddm.on.ca> Jun 28, 1998
-# Edited by Chris Chittleborough <cchittleborough@yahoo.com.au>, March 2002
-#
-# ALAN
-# I assume there are other, lower versions, but these are the only ones I
-# saw in the archive.
-0 beshort 0x0206 ALAN game data
->2 byte <10 version 2.6%d
-
-
-# Infocom (see z-machine)
-#------------------------------------------------------------------------------
-# Z-machine: file(1) magic for Z-machine binaries.
-# Sanity checks by David Griffith <dave@661.org>
-# Updated by Adam Buchbinder <adam.buchbinder@gmail.com>
-#
-#http://www.gnelson.demon.co.uk/zspec/sect11.html
-#https://www.jczorkmid.net/~jpenney/ZSpec11-latest.txt
-#https://en.wikipedia.org/wiki/Z-machine
-# The first byte is the Z-machine revision; it is always between 1 and 8. We
-# had false matches (for instance, inbig5.ocp from the Omega TeX extension as
-# well as an occasional MP3 file), so we sanity-check the version number.
-#
-# It might be possible to sanity-check the release number as well, as it seems
-# (at least in classic Infocom games) to always be a relatively small number,
-# always under 150 or so, but as this isn't rigorous, we'll wait on that until
-# it becomes clear that it's needed.
-#
-0 ubyte >0
->0 ubyte <9
->>16 belong&0xfe00f0f0 0x3030
->>>0 ubyte < 10
->>>>2 ubeshort x
->>>>>18 regex [0-9][0-9][0-9][0-9][0-9][0-9]
->>>>>>0 ubyte < 10 Infocom (Z-machine %d
->>>>>>>2 ubeshort x \b, Release %d
->>>>>>>>18 string >\0 \b, Serial %.6s
->>>>>>>>18 string x \b)
-!:strength + 40
-!:mime application/x-zmachine
-
-#------------------------------------------------------------------------------
-# Glulx: file(1) magic for Glulx binaries.
-#
-# David Griffith <dave@661.org>
-# I haven't checked for false matches yet.
-#
-0 string Glul Glulx game data
->4 beshort x (Version %d
->>6 byte x \b.%d
->>8 byte x \b.%d)
->36 string Info Compiled by Inform
-!:mime application/x-glulx
-
-
-# For Quetzal and blorb magic see iff
-
-
-# TADS (Text Adventure Development System) version 2
-# All files are machine-independent (games compile to byte-code) and are tagged
-# with a version string of the form "V2.<digit>.<digit>\0".
-# Game files start with "TADS2 bin\n\r\032\0" then the compiler version.
-0 string TADS2\ bin TADS
->9 belong !0x0A0D1A00 game data, CORRUPTED
->9 belong 0x0A0D1A00
->>13 string >\0 %s game data
-!:mime application/x-tads
-# Resource files start with "TADS2 rsc\n\r\032\0" then the compiler version.
-0 string TADS2\ rsc TADS
->9 belong !0x0A0D1A00 resource data, CORRUPTED
->9 belong 0x0A0D1A00
->>13 string >\0 %s resource data
-!:mime application/x-tads
-# Some saved game files start with "TADS2 save/g\n\r\032\0", a little-endian
-# 2-byte length N, the N-char name of the game file *without* a NUL (darn!),
-# "TADS2 save\n\r\032\0" and the interpreter version.
-0 string TADS2\ save/g TADS
->12 belong !0x0A0D1A00 saved game data, CORRUPTED
->12 belong 0x0A0D1A00
->>(16.s+32) string >\0 %s saved game data
-!:mime application/x-tads
-# Other saved game files start with "TADS2 save\n\r\032\0" and the interpreter
-# version.
-0 string TADS2\ save TADS
->10 belong !0x0A0D1A00 saved game data, CORRUPTED
->10 belong 0x0A0D1A00
->>14 string >\0 %s saved game data
-!:mime application/x-tads
-
-# TADS (Text Adventure Development System) version 3
-# Game files start with "T3-image\015\012\032"
-0 string T3-image\015\012\032
->11 leshort x TADS 3 game data (format version %d)
-# Saved game files start with "T3-state-v####\015\012\032"
-# where #### is a format version number
-0 string T3-state-v
->14 string \015\012\032 TADS 3 saved game data (format version
->>10 byte x %c
->>11 byte x \b%c
->>12 byte x \b%c
->>13 byte x \b%c)
-!:mime application/x-t3vm-image
-
-# edited by David Griffith <dave@661.org>
-# Danny Milosavljevic <danny.milo@gmx.net>
-# These are ADRIFT (adventure game standard) game files, extension .taf
-# Checked from source at (http://www.adrift.co/) and various taf files
-# found at the Interactive Fiction Archive (https://ifarchive.org/)
-0 belong 0x3C423FC9
->4 belong 0x6A87C2CF Adrift game file version
->>8 belong 0x94453661 3.80
->>8 belong 0x94453761 3.90
->>8 belong 0x93453E61 4.0
->>8 belong 0x92453E61 5.0
->>8 default x unknown
-!:mime application/x-adrift
diff --git a/contrib/libs/libmagic/magic/Magdir/aes b/contrib/libs/libmagic/magic/Magdir/aes
deleted file mode 100644
index e5e1edcb13..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/aes
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: aes,v 1.1 2020/08/18 21:20:22 christos Exp $
-#
-# aes: magic file for AES encrypted files
-
-# Summary: AES Crypt Encrypted Data File
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Advanced_Encryption_Standard
-# Reference: https://www.aescrypt.com/aes_file_format.html
-0 string AES
->3 ubyte <3 AES encrypted data, version %u
-#!:mime application/aes
-!:mime application/x-aes-encrypted
-!:ext aes
-# For Version 2 the encrypted file can have text tags
->>3 ubyte =2
-# length of an extension identifier and contents like: 0 24 33 38
-#>>5 ubeshort x \b, tag length %u
-#>>5 pstring/H x '%s'
-# standard extension tags like CREATED_BY
->>>7 string CREATED_BY \b, created by
-# software product, manufacturer like "SharpAESCrypt v1.3.3.0" "aescrypt (Windows GUI) 3.10" ...
->>>>&1 string x "%s"
-# TODO: more other tags
-# tag CREATED_DATE like YYYY-MM-DD
-# tag CREATED_TIME like HH:MM:SS
-#
-
diff --git a/contrib/libs/libmagic/magic/Magdir/algol68 b/contrib/libs/libmagic/magic/Magdir/algol68
deleted file mode 100644
index 1ca1fad211..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/algol68
+++ /dev/null
@@ -1,35 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: algol68,v 1.6 2022/11/06 18:36:55 christos Exp $
-# algol68: file(1) magic for Algol 68 source
-#
-# URL: https://en.wikipedia.org/wiki/ALGOL_68
-# Reference: http://www.softwarepreservation.org/projects/ALGOL/report/Algol68_revised_report-AB.pdf
-# Update: Joerg Jenderek
-0 search/8192 (input,
->0 use algol_68
-# graph_2d.a68
-0 regex/4006 \^PROC[[:space:]][a-zA-Z0-9_[:space:]]*[[:space:]]=
->0 use algol_68
-0 regex/1024 \bMODE[\t\ ]
->0 use algol_68
-0 regex/1024 \bMODE[\t\ ]
->0 use algol_68
-0 regex/1024 \bREF[\t\ ]
->0 use algol_68
-0 regex/1024 \bFLEX[\t\ ]\*\\[
->0 use algol_68
-
-# display information like mime type and file name extension of Algol 68 source text
-0 name algol_68 Algol 68 source text
-!:mime text/x-Algol68
-# https://file-extension.net/seeker/file_extension_a68
-!:ext a68
-#!:ext a68/alg
-
-#0 regex [\t\ ]OD Algol 68 source text
-#>0 use algol_68
-#!:mime text/x-Algol68
-#0 regex [\t\ ]FI Algol 68 source text
-#>0 use algol_68
-#!:mime text/x-Algol68
diff --git a/contrib/libs/libmagic/magic/Magdir/allegro b/contrib/libs/libmagic/magic/Magdir/allegro
deleted file mode 100644
index b937c9cb02..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/allegro
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: allegro,v 1.4 2009/09/19 16:28:07 christos Exp $
-# allegro: file(1) magic for Allegro datafiles
-# Toby Deshane <hac@shoelace.digivill.net>
-#
-0 belong 0x736C6821 Allegro datafile (packed)
-0 belong 0x736C682E Allegro datafile (not packed/autodetect)
-0 belong 0x736C682B Allegro datafile (appended exe data)
diff --git a/contrib/libs/libmagic/magic/Magdir/alliant b/contrib/libs/libmagic/magic/Magdir/alliant
deleted file mode 100644
index 962020238e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/alliant
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: alliant,v 1.7 2009/09/19 16:28:07 christos Exp $
-# alliant: file(1) magic for Alliant FX series a.out files
-#
-# If the FX series is the one that had a processor with a 68K-derived
-# instruction set, the "short" should probably become "beshort" and the
-# "long" should probably become "belong".
-# If it's the i860-based one, they should probably become either the
-# big-endian or little-endian versions, depending on the mode they ran
-# the 860 in....
-#
-0 short 0420 0420 Alliant virtual executable
->2 short &0x0020 common library
->16 long >0 not stripped
-0 short 0421 0421 Alliant compact executable
->2 short &0x0020 common library
->16 long >0 not stripped
diff --git a/contrib/libs/libmagic/magic/Magdir/amanda b/contrib/libs/libmagic/magic/Magdir/amanda
deleted file mode 100644
index e7fa539013..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/amanda
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: amanda,v 1.6 2017/03/17 21:35:28 christos Exp $
-# amanda: file(1) magic for amanda file format
-#
-0 string AMANDA:\ AMANDA
->8 string TAPESTART\ DATE tape header file,
->>23 string X
->>>25 string >\ Unused %s
->>23 string >\ DATE %s
->8 string FILE\ dump file,
->>13 string >\ DATE %s
diff --git a/contrib/libs/libmagic/magic/Magdir/amigaos b/contrib/libs/libmagic/magic/Magdir/amigaos
deleted file mode 100644
index fdd947fdf7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/amigaos
+++ /dev/null
@@ -1,218 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: amigaos,v 1.20 2021/09/20 00:42:19 christos Exp $
-# amigaos: file(1) magic for AmigaOS binary formats:
-
-#
-# From ignatios@cs.uni-bonn.de (Ignatios Souvatzis)
-#
-0 belong 0x000003fa AmigaOS shared library
-0 belong 0x000003f3 AmigaOS loadseg()ble executable/binary
-0 belong 0x000003e7 AmigaOS object/library data
-#
-0 beshort 0xe310 Amiga Workbench
->2 beshort 1
->>48 byte 1 disk icon
->>48 byte 2 drawer icon
->>48 byte 3 tool icon
->>48 byte 4 project icon
->>48 byte 5 garbage icon
->>48 byte 6 device icon
->>48 byte 7 kickstart icon
->>48 byte 8 workbench application icon
->2 beshort >1 icon, vers. %d
-#
-# various sound formats from the Amiga
-# G=F6tz Waschk <waschk@informatik.uni-rostock.de>
-#
-0 string FC14 Future Composer 1.4 Module sound file
-0 string SMOD Future Composer 1.3 Module sound file
-0 string AON4artofnoise Art Of Noise Module sound file
-1 string MUGICIAN/SOFTEYES Mugician Module sound file
-58 string SIDMON\ II\ -\ THE Sidmon 2.0 Module sound file
-0 string Synth4.0 Synthesis Module sound file
-0 string ARP. The Holy Noise Module sound file
-0 string BeEp\0 JamCracker Module sound file
-0 string COSO\0 Hippel-COSO Module sound file
-# Too simple (short, pure ASCII, deep), MPi
-#26 string V.3 Brian Postma's Soundmon Module sound file v3
-#26 string BPSM Brian Postma's Soundmon Module sound file v3
-#26 string V.2 Brian Postma's Soundmon Module sound file v2
-
-# The following are from: "Stefan A. Haubenthal" <polluks@web.de>
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Amiga_bitmap_font
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/f/font-amiga.trid.xml
-# https://wiki.amigaos.net/wiki/Graphics_Library_and_Text
-# fch_FileID=FCH_ID=0x0f00
-0 beshort 0x0f00
-# skip some AVM powerline firmware images by check for positive number of font elements
-# https://download.avm.de/fritzpowerline/fritzpowerline-1000e-t/other/fritz.os/fritz.powerline_1000ET_01_05.image
->2 ubeshort >0 AmigaOS bitmap font
-#!:mime application/octet-stream
-!:mime font/x-amiga-font
-!:ext font
-# struct FontContents fch_FC; 1st fc_FileName [MAXFONTPATH=256]; ~ filename "/" fc_YSize
-# like: topazb/6 suits/8 Excel/9e emerald/17 Franklin/23 DIAMONDS/60.8C
->>4 string x "%.256s"
-# fc_YSize ~number after slash in fc_FileName; like: 6 7 8 9 11 12 16 17 21 23 45 60
->>260 beshort x \b, fc_YSize %u
-# fch_NumEntries; number of FontContents elements like:
-# 1 (often) 2 3 (IconCondensed.font tempfont.font) 4 (Franklin.font) 6 (mcoop.font)
->>2 ubeshort >1 \b, %u elements
-#>>2 beshort x \b, %u element
-# plural s
-#>>2 beshort !1 \bs
-# like: 6 7 8 9 11 12 16 17 21 23 45 60
-#>>262 beshort x \b, FLAGS_STYLE
->>2 beshort >1 \b, 2nd
-# 2nd fc_FileName like: Franklin/36
->>>264 string x "%.256s"
->>2 beshort >2 \b, 3rd
-# 3rd fc_FileName like: Franklin/18
->>>524 string x "%.256s"
-# URL: http://fileformats.archiveteam.org/wiki/Amiga_bitmap_font
-# Reference: https://wiki.amigaos.net/wiki/Graphics_Library_and_Text
-# http://mark0.net/download/triddefs_xml.7z/defs/f/font-amiga-var2.trid.xml
-# Note: called by TrID "Amiga bitmap Font (var.2)"
-# fch_FileID=TFCH_ID=0x0f02
-0 beshort 0x0f02
-# skip possible misidentified foo by check for positive number of font elements
->2 ubeshort >0 AmigaOS bitmap font (TFCH)
-#!:mime application/octet-stream
-!:mime font/x-amiga-font
-!:ext font
-# struct TFontContents fch_TFC[]; 1st tfc_FileName [254]; ~ filename "/" fc_YSize
-# like: Abbey/45 XScript/75 XTriumvirate/45
->>4 string x "%.254s"
-# tfc_TagCount; including the TAG_END tag like: 4
->>258 ubeshort x \b, tfc_TagCount %u
-# tfc_YSize ~number after slash in tfc_FileName; like: 45 75
->>260 beshort x \b, tfc_YSize %u
-# tfc_Style; tfc_Flags like: 8022h 8222h
-#>>262 ubeshort x \b, FLAGS_STYLE %#x
-# fch_NumEntries; number of FontContents elements like: 1 (abbey.font) 2 (xscript.font xtriumvirate.font)
->>2 ubeshort >1 \b, %u elements
->>2 beshort >1 \b, 2nd
-# 2nd tfc_FileName like: XScript/45 XTriumvirate/30
->>>264 string x "%.254s"
-0 beshort 0x0f03 AmigaOS outline font
-0 belong 0x80001001 AmigaOS outline tag
-0 string ##\ version catalog translation
-0 string EMOD\0 Amiga E module
-8 string ECXM\0 ECX module
-0 string/c @database AmigaGuide file
-
-# Amiga disk types
-# display information like volume name of root block on Amiga (floppy) disk
-0 name adf-rootblock
-# block primary type = T_HEADER (value 2)
->0x000 ubelong !2 \b, type %u
-# header_key; unused in rootblock (value 0)
->0x004 ubelong !0 \b, header_key %u
-# high_seq; unused (value 0)
->0x008 ubelong !0 \b, high_seq %u
-# ht_size; hash table size; 0x48 for flopies
->0x00c ubelong !0x48 \b, hash table size %#x
-# bm_flag; bitmap flag, -1 means VALID
->0x138 belong !-1 \b, bitmap flag %#x
-# bm_ext; first bitmap extension block (Hard disks only)
->0x1A0 ubelong !0 \b, bitmap extension block %#x
-# name_len; volume name length; diskname[30]; volume name
->0x1B0 pstring >\0 \b, "%s"
-# first directory cache block for FFS; otherwise 0
->0x1F8 ubelong !0 \b, directory cache block %#x
-# block secondary type = ST_ROOT (value 1)
->0x1FC ubelong !1 \b, sec_type %#x
-#
-0 string RDSK Rigid Disk Block
->160 string x on %.24s
-# URL: http://fileformats.archiveteam.org/wiki/ADF_(Amiga)
-# https://en.wikipedia.org/wiki/Amiga_Fast_File_System
-# Reference: http://lclevy.free.fr/adflib/adf_info.html
-# Update: Joerg Jenderek
-# Note: created by ADFOpus.exe
-# and verified by `unadf -l TURBO_SILVER_SV.ADF`
-0 string DOS
-# skip DOS Client Message Files like IPXODI.MSG DOSRQSTR.MSG
->3 ubyte <8 Amiga
-# https://reposcope.com/mimetype/application/x-amiga-disk-format
-!:mime application/x-amiga-disk-format
-!:ext adf
->>3 ubyte 0 DOS disk
->>3 ubyte 1 FFS disk
->>3 ubyte 2 Inter DOS disk
->>3 ubyte 3 Inter FFS disk
-# For Fastdir mode the international mode is also enabled,
->>3 ubyte 4 Fastdir DOS disk
->>3 ubyte 5 Fastdir FFS dis
-# called by TrID "Amiga Disk image File (OFS+INTL+DIRC)"
->>3 ubyte 6 Inter Fastdir DOS disk
-# called by TrID "Amiga Disk image File (FFS+INTL+DIRC)"
->>3 ubyte 7 Inter Fastdir FFS disk
-# but according to Wikipedia variants with long name support
-#>>3 ubyte 6 long name DOS disk
-#>>3 ubyte 7 long name FFS disk
-# DOES NOT only work! Partly for file size ~< FILE_BYTES_MAX=1 MiB defined in ../../src/file.h
-#>>-0 offset x \b, %lld bytes
-# Correct file size, but next lines are NOT executed
-#>>-0 offset 901120 (DD 880 KiB floppy)
-# 880 KiB Double Density floppy disk by characteristic hash table size 0x48 and T_HEADER=2
->>0x6E00C ubelong 0x48
->>>0x6E000 ubelong 2 (DD 880 KiB)
-# 1760 KiB High Density floppy disk (1802240 bytes) by characteristic hash table size 0x48
->>0xDC00C ubelong 0x48
->>>0xDC000 ubelong 2 (HD 1760 KiB)
-# Chksum; special block checksum like: 0 0x44ccf4c0 0x51f32cac 0xe33d0e7d ...
-#>>4 ubelong x \b, CRC %#x
-# Rootblock: 0 880 (often for DD and HD) 1146049280 (IMAGINE_1_0_DISK_01.ADF TURBO_SILVER_SV.ADF)
->>8 ubelong >0 \b, probably root block %d
-# bootblock code
->>12 quad !0 \b, bootable
-# assembler instructions: lea exp(pc),a1; moveq 25h,d0; jsr -552(a6)
->>>12 ubequad =0x43fa003e70254eae AmigaDOS 3.0
->>>12 default x
->>>>12 ubequad !0x43fa003e70254eae %#llx..
-# 880 KiB Double Density floppy disk (901120 bytes)
->>0x6E00C ubelong 0x48
->>>0x6E000 ubelong 2
->>>>0x6E000 use adf-rootblock
-# 1760 KiB High Density floppy disk (1802240 bytes)
->>0xDC00C ubelong 0x48
->>>0xDC000 ubelong 2
->>>>0xDC000 use adf-rootblock
-# 1 MiB hard disc by test for T_HEADER=2 and header_key=0=high_seq
->>0x80000 ubelong 2
->>>0x80004 quad 0
->>>>0x80000 use adf-rootblock
-# 2 MiB hard disc; only works if in ../../src/file.h FILE_BYTES_MAX is raised to 2 MiB
-#>>0x100000 ubelong x 2 MiB TEST
-#>>0x100000 ubelong 2 \b, 2 MiB hard disc rootblock
-#>>>0x100000 use adf-rootblock
-0 string KICK Kickstart disk
-
-# From: Alex Beregszaszi <alex@fsn.hu>
-0 string LZX LZX compressed archive (Amiga)
-
-# From: Przemek Kramarczyk <pkramarczyk@gmail.com>
-0 string .KEY AmigaDOS script
-0 string .key AmigaDOS script
-
-# AMOS Basic file formats
-# https://www.exotica.org.uk/wiki/AMOS_file_formats
-0 string AMOS\040Basic\040 AMOS Basic source code
->11 byte =0x56 \b, tested
->11 byte =0x76 \b, untested
-0 string AMOS\040Pro AMOS Basic source code
->11 byte =0x56 \b, tested
->11 byte =0x76 \b, untested
-0 string AmSp AMOS Basic sprite bank
->4 beshort x \b, %d sprites
-0 string AmIc AMOS Basic icon bank
->4 beshort x \b, %d icons
-0 string AmBk AMOS Basic memory bank
->4 beshort x \b, bank number %d
->8 belong&0xFFFFFFF x \b, length %d
->12 regex .{8} \b, type %s
-0 string AmBs AMOS Basic memory banks
->4 beshort x \b, %d banks
diff --git a/contrib/libs/libmagic/magic/Magdir/android b/contrib/libs/libmagic/magic/Magdir/android
deleted file mode 100644
index 8a2dedf3d2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/android
+++ /dev/null
@@ -1,259 +0,0 @@
-
-#------------------------------------------------------------
-# $File: android,v 1.24 2023/02/20 16:51:59 christos Exp $
-# Various android related magic entries
-#------------------------------------------------------------
-
-# Dalvik .dex format. http://retrodev.com/android/dexformat.html
-# From <mkf@google.com> "Mike Fleming"
-# Fixed to avoid regexec 17 errors on some dex files
-# From <diff@lookout.com> "Tim Strazzere"
-0 string dex\n
->0 regex dex\n[0-9]{2}\0 Dalvik dex file
->4 string >000 version %s
-0 string dey\n
->0 regex dey\n[0-9]{2}\0 Dalvik dex file (optimized for host)
->4 string >000 version %s
-
-# Android bootimg format
-# From https://android.googlesource.com/\
-# platform/system/core/+/master/mkbootimg/bootimg.h
-# https://github.com/djrbliss/loki/blob/master/loki.h#L43
-0 string ANDROID! Android bootimg
->1024 string LOKI \b, LOKI'd
->>1028 lelong 0 \b (boot)
->>1028 lelong 1 \b (recovery)
->8 lelong >0 \b, kernel
->>12 lelong >0 \b (%#x)
->16 lelong >0 \b, ramdisk
->>20 lelong >0 \b (%#x)
->24 lelong >0 \b, second stage
->>28 lelong >0 \b (%#x)
->36 lelong >0 \b, page size: %d
->38 string >0 \b, name: %s
->64 string >0 \b, cmdline (%s)
-
-# Android Backup archive
-# From: Ariel Shkedi
-# Update: Joerg Jenderek
-# URL: https://github.com/android/platform_frameworks_base/blob/\
-# 0bacfd2ba68d21a68a3df345b830bc2a1e515b5a/services/java/com/\
-# android/server/BackupManagerService.java#L2367
-# Reference: https://sourceforge.net/projects/adbextractor/
-# android-backup-extractor/perl/backupencrypt.pl
-# Note: only unix line feeds "\n" found
-# After the header comes a tar file
-# If compressed, the entire tar file is compressed with JAVA deflate
-#
-# Include the version number hardcoded with the magic string to avoid
-# false positives
-0 string/b ANDROID\ BACKUP\n Android Backup
-# maybe look for some more characteristics like linefeed '\n' or version
-#>16 string \n
-# No mime-type defined officially
-!:mime application/x-google-ab
-!:ext ab
-# on 2nd line version (often 1, 2 on kitkat 4.4.3+, 4 on 7.1.2)
->15 string >\0 \b, version %s
-# "1" on 3rd line means compressed
->17 string 0\n \b, Not-Compressed
->17 string 1\n \b, Compressed
-# The 4th line is encryption "none" or "AES-256"
-# any string as long as it's not the word none (which is matched below)
->19 string none\n \b, Not-Encrypted
-# look for backup content after line with encryption info
-#>>19 search/7 \n
-# data part after header for not encrypted Android Backup
-#>>>&0 ubequad x \b, content %#16.16llx...
-# look for zlib compressed by ./compress after message with 1 space at end
-#>>>&0 indirect x \b; contains
-# look for tar archive block by ./archive for package name manifest
->>288 string ustar \b; contains
->>>31 use tar-file
-# look for zip/jar archive by ./archive ./zip after message with 1 space at end
-#>>2079 search/1025/s PK\003\004 \b; contains
-#>>>&0 indirect x
->19 string !none
->>19 regex/1l \^([^n\n]|n[^o]|no[^n]|non[^e]|none.+).* \b, Encrypted (%s)
-# Commented out because they don't seem useful to print
-# (but they are part of the header - the tar file comes after them):
-# The 5th line is User Password Salt (128 Hex)
-# string length too high with standard src configuration
-#>>>&1 string >\0 \b, PASSWORD salt: "%-128.128s"
-#>>>&1 regex/1l .* \b, Password salt: %s
-# The 6th line is Master Key Checksum Salt (128 Hex)
-#>>>>&1 regex/1l .* \b, Master salt: %s
-# The 7th line is Number of PBDKF2 Rounds (10000)
-#>>>>>&1 regex/1l .* \b, PBKDF2 rounds: %s
-# The 8th line is User key Initialization Vector (IV) (32 Hex)
-#>>>>>>&1 regex/1l .* \b, IV: %s
-#>>>>>>&1 regex/1l .* \b, IV: %s
-# The 9th line is Master IV+Key+Checksum (192 Hex)
-#>>>>>>>&1 regex/1l .* \b, Key: %s
-# look for new line separator char after line number 9
-#>>>0x204 ubyte 0x0a NL found
-#>>>>&1 ubequad x \b, Content magic %16.16llx
-
-# *.pit files by Joerg Jenderek
-# https://forum.xda-developers.com/showthread.php?p=9122369
-# https://forum.xda-developers.com/showthread.php?t=816449
-# Partition Information Table for Samsung's smartphone with Android
-# used by flash software Odin
-0 ulelong 0x12349876
-# 1st pit entry marker
->0x01C ulequad&0xFFFFFFFCFFFFFFFC =0x0000000000000000
-# minimal 13 and maximal 18 PIT entries found
->>4 ulelong <128 Partition Information Table for Samsung smartphone
->>>4 ulelong x \b, %d entries
-# 1. pit entry
->>>4 ulelong >0 \b; #1
->>>0x01C use PIT-entry
->>>4 ulelong >1 \b; #2
->>>0x0A0 use PIT-entry
->>>4 ulelong >2 \b; #3
->>>0x124 use PIT-entry
->>>4 ulelong >3 \b; #4
->>>0x1A8 use PIT-entry
->>>4 ulelong >4 \b; #5
->>>0x22C use PIT-entry
->>>4 ulelong >5 \b; #6
->>>0x2B0 use PIT-entry
->>>4 ulelong >6 \b; #7
->>>0x334 use PIT-entry
->>>4 ulelong >7 \b; #8
->>>0x3B8 use PIT-entry
->>>4 ulelong >8 \b; #9
->>>0x43C use PIT-entry
->>>4 ulelong >9 \b; #10
->>>0x4C0 use PIT-entry
->>>4 ulelong >10 \b; #11
->>>0x544 use PIT-entry
->>>4 ulelong >11 \b; #12
->>>0x5C8 use PIT-entry
->>>4 ulelong >12 \b; #13
->>>>0x64C use PIT-entry
-# 14. pit entry
->>>4 ulelong >13 \b; #14
->>>>0x6D0 use PIT-entry
->>>4 ulelong >14 \b; #15
->>>0x754 use PIT-entry
->>>4 ulelong >15 \b; #16
->>>0x7D8 use PIT-entry
->>>4 ulelong >16 \b; #17
->>>0x85C use PIT-entry
-# 18. pit entry
->>>4 ulelong >17 \b; #18
->>>0x8E0 use PIT-entry
-
-0 name PIT-entry
-# garbage value implies end of pit entries
->0x00 ulequad&0xFFFFFFFCFFFFFFFC =0x0000000000000000
-# skip empty partition name
->>0x24 ubyte !0
-# partition name
->>>0x24 string >\0 %-.32s
-# flags
->>>0x0C ulelong&0x00000002 2 \b+RW
-# partition ID:
-# 0~IPL,MOVINAND,GANG;1~PIT,GPT;2~HIDDEN;3~SBL,HIDDEN;4~SBL2,HIDDEN;5~BOOT;6~kernel,RECOVER,misc;7~RECOVER
-# ;11~MODEM;20~efs;21~PARAM;22~FACTORY,SYSTEM;23~DBDATAFS,USERDATA;24~CACHE;80~BOOTLOADER;81~TZSW
->>>0x08 ulelong x (%#x)
-# filename
->>>0x44 string >\0 "%-.64s"
-#>>>0x18 ulelong >0
-# blocksize in 512 byte units ?
-#>>>>0x18 ulelong x \b, %db
-# partition size in blocks ?
-#>>>>0x22 ulelong x \b*%d
-
-# Android sparse img format
-# From https://android.googlesource.com/\
-# platform/system/core/+/master/libsparse/sparse_format.h
-0 lelong 0xed26ff3a Android sparse image
->4 leshort x \b, version: %d
->6 leshort x \b.%d
->16 lelong x \b, Total of %d
->12 lelong x \b %d-byte output blocks in
->20 lelong x \b %d input chunks.
-
-# Android binary XML magic
-# In include/androidfw/ResourceTypes.h:
-# RES_XML_TYPE = 0x0003 followed by the size of the header (ResXMLTree_header),
-# which is 8 bytes (2 bytes type + 2 bytes header size + 4 bytes size).
-# The strength is increased to avoid misidentifying as Targa image data
-0 lelong 0x00080003 Android binary XML
-!:strength +1
-
-# Android cryptfs footer
-# From https://android.googlesource.com/\
-# platform/system/vold/+/refs/heads/master/cryptfs.h
-0 lelong 0xd0b5b1c4 Android cryptfs footer
->4 leshort x \b, version: %d
->6 leshort x \b.%d
-
-# Android Vdex format
-# From https://android.googlesource.com/\
-# platform/art/+/master/runtime/vdex_file.h
-0 string vdex Android vdex file,
->4 string >000 verifier deps version: %s,
->8 string >000 dex section version: %s,
->12 lelong >0 number of dex files: %d,
->16 lelong >0 verifier deps size: %d
-
-# Android Vdex format, dexfile is currently being updated
-# by android system
-# From https://android.googlesource.com/\
-# platform/art/+/master/dex2oat/dex2oat.cc
-0 string wdex Android vdex file, being processed by dex2oat,
->4 string >000 verifier deps version: %s,
->8 string >000 dex section version: %s,
->12 lelong >0 number of dex files: %d,
->16 lelong >0 verifier deps size: %d
-
-# Disassembled DEX files
-0 string/t .class\x20
->&0 regex/512 \^\\.super\x20L.*;$ disassembled Android DEX Java class (smali/baksmali)
-!:ext smali
-
-# Android ART (baseline) profile + metadata: baseline.prof, baseline.profm
-# Reference: https://android.googlesource.com/platform/frameworks/support/\
-# +/refs/heads/androidx-main/profileinstaller/profileinstaller/\
-# src/main/java/androidx/profileinstaller/ProfileTranscoder.java
-# Reference: https://android.googlesource.com/platform/frameworks/support/\
-# +/refs/heads/androidx-main/profileinstaller/profileinstaller/\
-# src/main/java/androidx/profileinstaller/ProfileVersion.java
-0 string pro\x00
->0 regex pro\x000[0-9][0-9]\x00 Android ART profile
-!:ext prof
->>4 string 001\x00 \b, version 001 N
->>4 string 005\x00 \b, version 005 O
->>4 string 009\x00 \b, version 009 O MR1
->>4 string 010\x00 \b, version 010 P
->>4 string 015\x00 \b, version 015 S
-0 string prm\x00
->0 regex prm\x000[0-9][0-9]\x00 Android ART profile metadata
-!:ext profm
->>4 string 001\x00 \b, version 001 N
->>4 string 002\x00 \b, version 002
-
-# Android package resource table (ARSC): resources.arsc
-# Reference: https://android.googlesource.com/platform/tools/base/\
-# +/refs/heads/mirror-goog-studio-main/apkparser/binary-resources/\
-# src/main/java/com/google/devrel/gmscore/tools/apk/arsc
-# 00: resource table type = 0x0002 (2) + header size = 12 (2)
-# 04: chunk size (4, skipped)
-# 08: #packages (4)
-0 ulelong 0x000c0002 Android package resource table (ARSC)
-!:ext arsc
->8 ulelong !1 \b, %d packages
-# 12: string pool type = 0x0001 (2) + header size = 28 (2)
-# 16: chunk size (4, skipped)
-# 20: #strings (4), #styles (4), flags (4)
->12 ulelong 0x001c0001
->>20 ulelong !0 \b, %d string(s)
->>24 ulelong !0 \b, %d style(s)
->>28 ulelong &1 \b, sorted
->>28 ulelong &256 \b, utf8
-
-# extracted APK Signing Block
--16 string APK\x20Sig\x20Block\x2042 APK Signing Block
diff --git a/contrib/libs/libmagic/magic/Magdir/animation b/contrib/libs/libmagic/magic/Magdir/animation
deleted file mode 100644
index aab93ca34a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/animation
+++ /dev/null
@@ -1,1206 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: animation,v 1.94 2023/06/16 20:06:50 christos Exp $
-# animation: file(1) magic for animation/movie formats
-#
-# animation formats
-# MPEG, FLI, DL originally from vax@ccwf.cc.utexas.edu (VaX#n8)
-# FLC, SGI, Apple originally from Daniel Quinlan (quinlan@yggdrasil.com)
-
-# SGI and Apple formats
-0 string MOVI Silicon Graphics movie file
-!:mime video/x-sgi-movie
-4 string moov Apple QuickTime
-!:mime video/quicktime
->12 string mvhd \b movie (fast start)
->12 string mdra \b URL
->12 string cmov \b movie (fast start, compressed header)
->12 string rmra \b multiple URLs
-4 string mdat Apple QuickTime movie (unoptimized)
-!:mime video/quicktime
-4 string wide Apple QuickTime movie (unoptimized)
-!:mime video/quicktime
-#4 string skip Apple QuickTime movie (modified)
-#!:mime video/quicktime
-#4 string free Apple QuickTime movie (modified)
-#!:mime video/quicktime
-4 string idsc Apple QuickTime image (fast start)
-!:mime image/x-quicktime
-#4 string idat Apple QuickTime image (unoptimized)
-#!:mime image/x-quicktime
-4 string pckg Apple QuickTime compressed archive
-!:mime application/x-quicktime-player
-
-#### MP4 ####
-# https://www.ftyps.com/ with local additions
-# https://cconcolato.github.io/mp4ra/filetype.html
-4 string ftyp ISO Media
-# https://aeroquartet.com/wordpress/2016/03/05/3-xavc-s/
->8 string XAVC \b, MPEG v4 system, Sony XAVC Codec
-!:mime video/mp4
->>96 string x \b, Audio "%.4s"
->>118 beshort x at %dHz
->>140 string x \b, Video "%.4s"
->>168 beshort x %d
->>170 beshort x \bx%d
->8 string 3g2 \b, MPEG v4 system, 3GPP2
-!:mime video/3gpp2
->>11 byte 4 \b v4 (H.263/AMR GSM 6.10)
->>11 byte 5 \b v5 (H.263/AMR GSM 6.10)
->>11 byte 6 \b v6 (ITU H.264/AMR GSM 6.10)
-# https://www.3gpp2.org/Public_html/Specs/C.S0050-B_v1.0_070521.pdf
-# Section 8.1.1, corresponds to a, b, c
->>11 byte 0x61 \b C.S0050-0 V1.0
->>11 byte 0x62 \b C.S0050-0-A V1.0.0
->>11 byte 0x63 \b C.S0050-0-B V1.0
->8 string 3ge \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 6 \b, Release %d MBMS Extended Presentations
->>11 byte 7 \b, Release %d MBMS Extended Presentations
->>11 byte 9 \b, Release %d MBMS Extended Presentations
->8 string 3gf \b, MPEG v4 system, 3GPP
->>11 byte 9 \b, Release %d File-delivery profile
->8 string 3gg \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 6 \b, Release %d General Profile
->>11 byte 9 \b, Release %d General Profile
->8 string 3gh \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 9 \b, Release %d Adaptive Streaming Profile
->8 string 3gm \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 9 \b, Release %d Media Segment Profile
->8 string 3gp \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 1 \b, Release %d (non existent)
->>11 byte 2 \b, Release %d (non existent)
->>11 byte 3 \b, Release %d (non existent)
->>11 byte 4 \b, Release %d
->>11 byte 5 \b, Release %d
->>11 byte 6 \b, Release %d
->>11 byte 7 \b, Release %d Streaming Servers
->8 string 3gr \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 6 \b, Release %d Progressive Download Profile
->>11 byte 9 \b, Release %d Progressive Download Profile
->8 string 3gs \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 6 \b, Release %d Streaming Servers
->>11 byte 7 \b, Release %d Streaming Servers
->>11 byte 9 \b, Release %d Streaming Servers
->8 string 3gt \b, MPEG v4 system, 3GPP
-!:mime video/3gpp
->>11 byte 8 \b, Release %d Media Stream Recording Profile
->>11 byte 9 \b, Release %d Media Stream Recording Profile
->8 string ARRI \b, MPEG v4 system, ARRI Digital Camera
-!:mime video/mp4
->8 string avc1 \b, MPEG v4 system, 3GPP JVT AVC [ISO 14496-12:2005]
-!:mime video/mp4
->8 string bbxm \b, Blinkbox Master File: H.264 video/16-bit LE LPCM audio
-!:mime video/mp4
->8 string/W qt \b, Apple QuickTime movie
-!:mime video/quicktime
->8 string CAEP \b, Canon Digital Camera
->8 string caqv \b, Casio Digital Camera
->8 string CDes \b, Convergent Design
->8 string caaa \b, CMAF Media Profile - AAC Adaptive Audio
->8 string caac \b, CMAF Media Profile - AAC Core
->8 string caqv \b, Casio Digital Camera Casio
->8 string ccea \b, CMAF Supplemental Data - CEA-608/708
->8 string ccff \b, Common container file format
->8 string cfhd \b, CMAF Media Profile - AVC HD
->8 string cfsd \b, CMAF Media Profile - AVC SD
->8 string chd1 \b, CMAF Media Profile - HEVC HDR10
->8 string chdf \b, CMAF Media Profile - AVC HDHF
->8 string chhd \b, CMAF Media Profile - HEVC HHD8
->8 string chh1 \b, CMAF Media Profile - HEVC HHD10
->8 string clg1 \b, CMAF Media Profile - HEVC HLG10
->8 string cmfc \b, CMAF Track Format
->8 string cmff \b, CMAF Fragment Format
->8 string cmfl \b, CMAF Chunk Format
->8 string cmfs \b, CMAF Segment Format
->8 string cud1 \b, CMAF Media Profile - HEVC UHD10
->8 string cud8 \b, CMAF Media Profile - HEVC UHD8
->8 string cwvt \b, CMAF Media Profile - WebVTT
->8 string da0a \b, DMB MAF w/ MPEG Layer II aud, MOT slides, DLS, JPG/PNG/MNG
->8 string da0b \b, DMB MAF, ext DA0A, with 3GPP timed text, DID, TVA, REL, IPMP
->8 string da1a \b, DMB MAF audio with ER-BSAC audio, JPG/PNG/MNG images
->8 string da1b \b, DMB MAF, ext da1a, with 3GPP timed text, DID, TVA, REL, IPMP
->8 string da2a \b, DMB MAF aud w/ HE-AAC v2 aud, MOT slides, DLS, JPG/PNG/MNG
->8 string da2b \b, DMB MAF, ext da2a, with 3GPP timed text, DID, TVA, REL, IPMP
->8 string da3a \b, DMB MAF aud with HE-AAC aud, JPG/PNG/MNG images
->8 string da3b \b, DMB MAF, ext da3a w/ BIFS, 3GPP, DID, TVA, REL, IPMP
->8 string dash \b, MPEG v4 system, Dynamic Adaptive Streaming over HTTP
-!:mime video/mp4
->8 string dby1 \b, MP4 files with Dolby content
->8 string dsms \b, Media Segment DASH conformant
->8 string dts1 \b, MP4 track file with audio codecs dtsc dtsh or dtse
->8 string dts2 \b, MP4 track file with audio codec dtsx
->8 string dts3 \b, MP4 track file with audio codec dtsy
->8 string dxo$20 \b, DxO ONE camera
->8 string dmb1 \b, DMB MAF supporting all the components defined in the spec
->8 string dmpf \b, Digital Media Project
->8 string drc1 \b, Dirac (wavelet compression), encap in ISO base media (MP4)
->8 string dv1a \b, DMB MAF vid w/ AVC vid, ER-BSAC aud, BIFS, JPG/PNG/MNG, TS
->8 string dv1b \b, DMB MAF, ext dv1a, with 3GPP timed text, DID, TVA, REL, IPMP
->8 string dv2a \b, DMB MAF vid w/ AVC vid, HE-AAC v2 aud, BIFS, JPG/PNG/MNG, TS
->8 string dv2b \b, DMB MAF, ext dv2a, with 3GPP timed text, DID, TVA, REL, IPMP
->8 string dv3a \b, DMB MAF vid w/ AVC vid, HE-AAC aud, BIFS, JPG/PNG/MNG, TS
->8 string dv3b \b, DMB MAF, ext dv3a, with 3GPP timed text, DID, TVA, REL, IPMP
->8 string dvr1 \b, DVB (.DVB) over RTP
-!:mime video/vnd.dvb.file
->8 string dvt1 \b, DVB (.DVB) over MPEG-2 Transport Stream
->8 string emsg \b, Event message box present
-!:mime video/vnd.dvb.file
->8 string F4V \b, Video for Adobe Flash Player 9+ (.F4V)
-!:mime video/mp4
->8 string F4P \b, Protected Video for Adobe Flash Player 9+ (.F4P)
-!:mime video/mp4
->8 string F4A \b, Audio for Adobe Flash Player 9+ (.F4A)
-!:mime audio/mp4
->8 string F4B \b, Audio Book for Adobe Flash Player 9+ (.F4B)
-!:mime audio/mp4
->8 string ifrm \b, Apple iFrame Specification, Version 8.1 Jan 2013
->8 string im1i \b, CMAF Media Profile - IMSC1 Image
->8 string im1t \b, CMAF Media Profile - IMSC1 Text
->8 string isc2 \b, ISMACryp 2.0 Encrypted File
-# ?/enc-isoff-generic
->8 string iso \b, MP4 Base Media
-!:mime video/mp4
-!:ext mp4
->>11 string m v1 [ISO 14496-12:2003]
->>11 string 2 v2 [ISO 14496-12:2005]
->>11 string 4 v4
->>11 string 5 v5
->>11 string 6 v6
->8 string isml \b, MP4 Base Media v2 [ISO 14496-12:2005]
-!:mime video/mp4
->8 string J2P0 \b, JPEG2000 Profile 0
->8 string J2P1 \b, JPEG2000 Profile 1
->8 string/W jp2 \b, JPEG 2000
-!:mime image/jp2
->8 string JP2 \b, JPEG 2000 Image (.JP2) [ISO 15444-1 ?]
-!:mime image/jp2
->8 string JP20 \b, Unknown, from GPAC samples (prob non-existent)
->8 string jpm \b, JPEG 2000 Compound Image (.JPM) [ISO 15444-6]
-!:mime image/jpm
->8 string jpsi \b, The JPSearch data interchange format
->8 string jpx \b, JPEG 2000 w/ extensions (.JPX) [ISO 15444-2]
-!:mime image/jpx
->8 string KDDI \b, 3GPP2 EZmovie for KDDI 3G cellphones
-!:mime video/3gpp2
->8 string LCAG \b, Leica digital camera
->8 string lmsg \b, Last Media Segment indicator for ISO base media file format.
->8 string M4A \b, Apple iTunes ALAC/AAC-LC (.M4A) Audio
-!:mime audio/x-m4a
->8 string M4B \b, Apple iTunes ALAC/AAC-LC (.M4B) Audio Book
-!:mime audio/mp4
->8 string M4P \b, Apple iTunes ALAC/AAC-LC (.M4P) AES Protected Audio
-!:mime video/mp4
->8 string M4V \b, Apple iTunes Video (.M4V) Video
-!:mime video/x-m4v
->8 string M4VH \b, Apple TV (.M4V)
-!:mime video/x-m4v
->8 string M4VP \b, Apple iPhone (.M4V)
-!:mime video/x-m4v
->8 string mj2s \b, Motion JPEG 2000 [ISO 15444-3] Simple Profile
-!:mime video/mj2
->8 string mjp2 \b, Motion JPEG 2000 [ISO 15444-3] General Profile
->8 string MFSM \b, Media File for Samsung video Metadata
->8 string MGSV \b, Sony Home and Mobile Multimedia Platform (HMMP)
-!:mime video/mj2
->8 string mmp4 \b, MPEG-4/3GPP Mobile Profile (.MP4 / .3GP) (for NTT)
-!:mime video/mp4
->8 string mobi \b, MPEG-4, MOBI format
-!:mime video/mp4
->8 string mp21 \b, MPEG-21 [ISO/IEC 21000-9]
->8 string mp41 \b, MP4 v1 [ISO 14496-1:ch13]
-!:mime video/mp4
->8 string mp42 \b, MP4 v2 [ISO 14496-14]
-!:mime video/mp4
->8 string mp71 \b, MP4 w/ MPEG-7 Metadata [per ISO 14496-12]
->8 string mp7t \b, MPEG v4 system, MPEG v7 XML
->8 string mp7b \b, MPEG v4 system, MPEG v7 binary XML
->8 string mpuf \b, Compliance with the MMT Processing Unit format
->8 string msdh \b, Media Segment conforming to ISO base media file format.
->8 string msix \b, Media Segment conforming to ISO base media file format.
->8 string mmp4 \b, MPEG v4 system, 3GPP Mobile
-!:mime video/mp4
->8 string MPPI \b, Photo Player, MAF [ISO/IEC 23000-3]
->8 string mqt \b, Sony / Mobile QuickTime (.MQV) US Pat 7,477,830
-!:mime video/quicktime
->8 string MSNV \b, MPEG-4 (.MP4) for SonyPSP
-!:mime audio/mp4
->8 string NDAS \b, MP4 v2 [ISO 14496-14] Nero Digital AAC Audio
-!:mime audio/mp4
->8 string NDSC \b, MPEG-4 (.MP4) Nero Cinema Profile
-!:mime video/mp4
->8 string NDSH \b, MPEG-4 (.MP4) Nero HDTV Profile
-!:mime video/mp4
->8 string NDSM \b, MPEG-4 (.MP4) Nero Mobile Profile
-!:mime video/mp4
->8 string NDSP \b, MPEG-4 (.MP4) Nero Portable Profile
-!:mime video/mp4
->8 string NDSS \b, MPEG-4 (.MP4) Nero Standard Profile
-!:mime video/mp4
->8 string NDXC \b, H.264/MPEG-4 AVC (.MP4) Nero Cinema Profile
-!:mime video/mp4
->8 string NDXH \b, H.264/MPEG-4 AVC (.MP4) Nero HDTV Profile
-!:mime video/mp4
->8 string NDXM \b, H.264/MPEG-4 AVC (.MP4) Nero Mobile Profile
-!:mime video/mp4
->8 string NDXP \b, H.264/MPEG-4 AVC (.MP4) Nero Portable Profile
-!:mime video/mp4
->8 string NDXS \b, H.264/MPEG-4 AVC (.MP4) Nero Standard Profile
->8 string niko \b, Nikon Digital Camera
-!:mime video/mp4
->8 string odcf \b, OMA DCF DRM Format 2.0 (OMA-TS-DRM-DCF-V2_0-20060303-A)
->8 string opf2 \b, OMA PDCF DRM Format 2.1 (OMA-TS-DRM-DCF-V2_1-20070724-C)
->8 string opx2 \b, OMA PDCF DRM + XBS ext (OMA-TS-DRM_XBS-V1_0-20070529-C)
->8 string pana \b, Panasonic Digital Camera
->8 string piff \b, Protected Interoperable File Format
->8 string pnvi ]b, Panasonic Video Intercom
->8 string qt \b, Apple QuickTime (.MOV/QT)
-!:mime video/quicktime
-# HEIF image format
-# see https://nokiatech.github.io/heif/technical.html
->8 string mif1 \b, HEIF Image
-!:mime image/heif
->8 string msf1 \b, HEIF Image Sequence
-!:mime image/heif-sequence
->8 string heic \b, HEIF Image HEVC Main or Main Still Picture Profile
-!:mime image/heic
->8 string heix \b, HEIF Image HEVC Main 10 Profile
-!:mime image/heic
->8 string hevc \b, HEIF Image Sequenz HEVC Main or Main Still Picture Profile
-!:mime image/heic-sequence
->8 string hevx \b, HEIF Image Sequence HEVC Main 10 Profile
-!:mime image/heic-sequence
-# following HEIF brands are not mentioned in the heif technical info currently (Oct 2017)
-# but used in the reference implementation:
-# https://github.com/nokiatech/heif/blob/d5e9a21c8ba8df712bdf643021dd9f6518134776/Srcs/reader/hevcimagefilereader.cpp
->8 string heim \b, HEIF Image L-HEVC
-!:mime image/heif
->8 string heis \b, HEIF Image L-HEVC
-!:mime image/heif
->8 string avic \b, HEIF Image AVC
-!:mime image/heif
->8 string hevm \b, HEIF Image Sequence L-HEVC
-!:mime image/heif-sequence
->8 string hevs \b, HEIF Image Sequence L-HEVC
-!:mime image/heif-sequence
->8 string avcs \b, HEIF Image Sequence AVC
-!:mime image/heif-sequence
-# AVIF image format
-# see https://aomediacodec.github.io/av1-avif/
->8 string avif \b, AVIF Image
-!:mime image/avif
->8 string avis \b, AVIF Image Sequence
-!:mime image/avif
->8 string risx \b, Representation Index Segment for MPEG-2 TS Segments
->8 string ROSS \b, Ross Video
->8 string sdv \b, SD Memory Card Video
->8 string ssc1 \b, Samsung stereo, single stream (patent pending)
->8 string ssc2 \b, Samsung stereo, dual stream (patent pending)
->8 string SEAU \b, Sony Home and Mobile Multimedia Platform (HMMP)
->8 string SEBK \b, Sony Home and Mobile Multimedia Platform (HMMP)
->8 string senv \b, Video contents Sony Entertainment Network
->8 string sims \b, Media Segment for Sub-Indexed Media Segment format
->8 string sisx \b, Single Index Segment forindex MPEG-2 TS
->8 string ssss \b, Subsegment Index Segment used to index MPEG-2 Segments
->8 string uvvu \b, UltraViolet file brand for DECE Common Format
-
-# MPEG sequences
-# Scans for all common MPEG header start codes
-0 belong 0x00000001
->4 byte&0x1F 0x07 JVT NAL sequence, H.264 video
->>5 byte 66 \b, baseline
->>5 byte 77 \b, main
->>5 byte 88 \b, extended
->>7 byte x \b @ L %u
-0 belong&0xFFFFFF00 0x00000100
->3 byte 0xBA MPEG sequence
-!:mime video/mpeg
-# http://fileformats.archiveteam.org/wiki/Enhanced_VOB
-# https://reposcope.com/mimetype/video/mpeg
-!:ext vob/evo/mpg/mpeg
->>4 byte &0x40 \b, v2, program multiplex
->>4 byte ^0x40 \b, v1, system multiplex
->3 byte 0xBB MPEG sequence, v1/2, multiplex (missing pack header)
->3 byte&0x1F 0x07 MPEG sequence, H.264 video
->>4 byte 66 \b, baseline
->>4 byte 77 \b, main
->>4 byte 88 \b, extended
->>6 byte x \b @ L %u
-# GRR too general as it catches also FoxPro Memo example NG.FPT
->3 byte 0xB0 MPEG sequence, v4
-# TODO: maybe this extra line exclude FoxPro Memo example NG.FPT starting with 000001b0 00000100 00000000
-#>>4 byte !0 MPEG sequence, v4
-!:mime video/mpeg4-generic
->>5 belong 0x000001B5
->>>9 byte &0x80
->>>>10 byte&0xF0 16 \b, video
->>>>10 byte&0xF0 32 \b, still texture
->>>>10 byte&0xF0 48 \b, mesh
->>>>10 byte&0xF0 64 \b, face
->>>9 byte&0xF8 8 \b, video
->>>9 byte&0xF8 16 \b, still texture
->>>9 byte&0xF8 24 \b, mesh
->>>9 byte&0xF8 32 \b, face
->>4 byte 1 \b, simple @ L1
->>4 byte 2 \b, simple @ L2
->>4 byte 3 \b, simple @ L3
->>4 byte 4 \b, simple @ L0
->>4 byte 17 \b, simple scalable @ L1
->>4 byte 18 \b, simple scalable @ L2
->>4 byte 33 \b, core @ L1
->>4 byte 34 \b, core @ L2
->>4 byte 50 \b, main @ L2
->>4 byte 51 \b, main @ L3
->>4 byte 53 \b, main @ L4
->>4 byte 66 \b, n-bit @ L2
->>4 byte 81 \b, scalable texture @ L1
->>4 byte 97 \b, simple face animation @ L1
->>4 byte 98 \b, simple face animation @ L2
->>4 byte 99 \b, simple face basic animation @ L1
->>4 byte 100 \b, simple face basic animation @ L2
->>4 byte 113 \b, basic animation text @ L1
->>4 byte 114 \b, basic animation text @ L2
->>4 byte 129 \b, hybrid @ L1
->>4 byte 130 \b, hybrid @ L2
->>4 byte 145 \b, advanced RT simple @ L!
->>4 byte 146 \b, advanced RT simple @ L2
->>4 byte 147 \b, advanced RT simple @ L3
->>4 byte 148 \b, advanced RT simple @ L4
->>4 byte 161 \b, core scalable @ L1
->>4 byte 162 \b, core scalable @ L2
->>4 byte 163 \b, core scalable @ L3
->>4 byte 177 \b, advanced coding efficiency @ L1
->>4 byte 178 \b, advanced coding efficiency @ L2
->>4 byte 179 \b, advanced coding efficiency @ L3
->>4 byte 180 \b, advanced coding efficiency @ L4
->>4 byte 193 \b, advanced core @ L1
->>4 byte 194 \b, advanced core @ L2
->>4 byte 209 \b, advanced scalable texture @ L1
->>4 byte 210 \b, advanced scalable texture @ L2
->>4 byte 211 \b, advanced scalable texture @ L3
->>4 byte 225 \b, simple studio @ L1
->>4 byte 226 \b, simple studio @ L2
->>4 byte 227 \b, simple studio @ L3
->>4 byte 228 \b, simple studio @ L4
->>4 byte 229 \b, core studio @ L1
->>4 byte 230 \b, core studio @ L2
->>4 byte 231 \b, core studio @ L3
->>4 byte 232 \b, core studio @ L4
->>4 byte 240 \b, advanced simple @ L0
->>4 byte 241 \b, advanced simple @ L1
->>4 byte 242 \b, advanced simple @ L2
->>4 byte 243 \b, advanced simple @ L3
->>4 byte 244 \b, advanced simple @ L4
->>4 byte 245 \b, advanced simple @ L5
->>4 byte 247 \b, advanced simple @ L3b
->>4 byte 248 \b, FGS @ L0
->>4 byte 249 \b, FGS @ L1
->>4 byte 250 \b, FGS @ L2
->>4 byte 251 \b, FGS @ L3
->>4 byte 252 \b, FGS @ L4
->>4 byte 253 \b, FGS @ L5
->3 byte 0xB5 MPEG sequence, v4
-!:mime video/mpeg4-generic
->>4 byte &0x80
->>>5 byte&0xF0 16 \b, video (missing profile header)
->>>5 byte&0xF0 32 \b, still texture (missing profile header)
->>>5 byte&0xF0 48 \b, mesh (missing profile header)
->>>5 byte&0xF0 64 \b, face (missing profile header)
->>4 byte&0xF8 8 \b, video (missing profile header)
->>4 byte&0xF8 16 \b, still texture (missing profile header)
->>4 byte&0xF8 24 \b, mesh (missing profile header)
->>4 byte&0xF8 32 \b, face (missing profile header)
->3 byte 0xB3 MPEG sequence
-!:mime video/mpeg
->>12 belong 0x000001B8 \b, v1, progressive Y'CbCr 4:2:0 video
->>12 belong 0x000001B2 \b, v1, progressive Y'CbCr 4:2:0 video
->>12 belong 0x000001B5 \b, v2,
->>>16 byte&0x0F 1 \b HP
->>>16 byte&0x0F 2 \b Spt
->>>16 byte&0x0F 3 \b SNR
->>>16 byte&0x0F 4 \b MP
->>>16 byte&0x0F 5 \b SP
->>>17 byte&0xF0 64 \b@HL
->>>17 byte&0xF0 96 \b@H-14
->>>17 byte&0xF0 128 \b@ML
->>>17 byte&0xF0 160 \b@LL
->>>17 byte &0x08 \b progressive
->>>17 byte ^0x08 \b interlaced
->>>17 byte&0x06 2 \b Y'CbCr 4:2:0 video
->>>17 byte&0x06 4 \b Y'CbCr 4:2:2 video
->>>17 byte&0x06 6 \b Y'CbCr 4:4:4 video
->>11 byte &0x02
->>>75 byte &0x01
->>>>140 belong 0x000001B8 \b, v1, progressive Y'CbCr 4:2:0 video
->>>>140 belong 0x000001B2 \b, v1, progressive Y'CbCr 4:2:0 video
->>>>140 belong 0x000001B5 \b, v2,
->>>>>144 byte&0x0F 1 \b HP
->>>>>144 byte&0x0F 2 \b Spt
->>>>>144 byte&0x0F 3 \b SNR
->>>>>144 byte&0x0F 4 \b MP
->>>>>144 byte&0x0F 5 \b SP
->>>>>145 byte&0xF0 64 \b@HL
->>>>>145 byte&0xF0 96 \b@H-14
->>>>>145 byte&0xF0 128 \b@ML
->>>>>145 byte&0xF0 160 \b@LL
->>>>>145 byte &0x08 \b progressive
->>>>>145 byte ^0x08 \b interlaced
->>>>>145 byte&0x06 2 \b Y'CbCr 4:2:0 video
->>>>>145 byte&0x06 4 \b Y'CbCr 4:2:2 video
->>>>>145 byte&0x06 6 \b Y'CbCr 4:4:4 video
->>76 belong 0x000001B8 \b, v1, progressive Y'CbCr 4:2:0 video
->>76 belong 0x000001B2 \b, v1, progressive Y'CbCr 4:2:0 video
->>76 belong 0x000001B5 \b, v2,
->>>80 byte&0x0F 1 \b HP
->>>80 byte&0x0F 2 \b Spt
->>>80 byte&0x0F 3 \b SNR
->>>80 byte&0x0F 4 \b MP
->>>80 byte&0x0F 5 \b SP
->>>81 byte&0xF0 64 \b@HL
->>>81 byte&0xF0 96 \b@H-14
->>>81 byte&0xF0 128 \b@ML
->>>81 byte&0xF0 160 \b@LL
->>>81 byte &0x08 \b progressive
->>>81 byte ^0x08 \b interlaced
->>>81 byte&0x06 2 \b Y'CbCr 4:2:0 video
->>>81 byte&0x06 4 \b Y'CbCr 4:2:2 video
->>>81 byte&0x06 6 \b Y'CbCr 4:4:4 video
->>4 belong&0xFFFFFF00 0x78043800 \b, HD-TV 1920P
->>>7 byte&0xF0 0x10 \b, 16:9
->>4 belong&0xFFFFFF00 0x50002D00 \b, SD-TV 1280I
->>>7 byte&0xF0 0x10 \b, 16:9
->>4 belong&0xFFFFFF00 0x30024000 \b, PAL Capture
->>>7 byte&0xF0 0x10 \b, 4:3
->>4 beshort&0xFFF0 0x2C00 \b, 4CIF
->>>5 beshort&0x0FFF 0x01E0 \b NTSC
->>>5 beshort&0x0FFF 0x0240 \b PAL
->>>7 byte&0xF0 0x20 \b, 4:3
->>>7 byte&0xF0 0x30 \b, 16:9
->>>7 byte&0xF0 0x40 \b, 11:5
->>>7 byte&0xF0 0x80 \b, PAL 4:3
->>>7 byte&0xF0 0xC0 \b, NTSC 4:3
->>4 belong&0xFFFFFF00 0x2801E000 \b, LD-TV 640P
->>>7 byte&0xF0 0x10 \b, 4:3
->>4 belong&0xFFFFFF00 0x1400F000 \b, 320x240
->>>7 byte&0xF0 0x10 \b, 4:3
->>4 belong&0xFFFFFF00 0x0F00A000 \b, 240x160
->>>7 byte&0xF0 0x10 \b, 4:3
->>4 belong&0xFFFFFF00 0x0A007800 \b, 160x120
->>>7 byte&0xF0 0x10 \b, 4:3
->>4 beshort&0xFFF0 0x1600 \b, CIF
->>>5 beshort&0x0FFF 0x00F0 \b NTSC
->>>5 beshort&0x0FFF 0x0120 \b PAL
->>>7 byte&0xF0 0x20 \b, 4:3
->>>7 byte&0xF0 0x30 \b, 16:9
->>>7 byte&0xF0 0x40 \b, 11:5
->>>7 byte&0xF0 0x80 \b, PAL 4:3
->>>7 byte&0xF0 0xC0 \b, NTSC 4:3
->>>5 beshort&0x0FFF 0x0240 \b PAL 625
->>>>7 byte&0xF0 0x20 \b, 4:3
->>>>7 byte&0xF0 0x30 \b, 16:9
->>>>7 byte&0xF0 0x40 \b, 11:5
->>4 beshort&0xFFF0 0x2D00 \b, CCIR/ITU
->>>5 beshort&0x0FFF 0x01E0 \b NTSC 525
->>>5 beshort&0x0FFF 0x0240 \b PAL 625
->>>7 byte&0xF0 0x20 \b, 4:3
->>>7 byte&0xF0 0x30 \b, 16:9
->>>7 byte&0xF0 0x40 \b, 11:5
->>4 beshort&0xFFF0 0x1E00 \b, SVCD
->>>5 beshort&0x0FFF 0x01E0 \b NTSC 525
->>>5 beshort&0x0FFF 0x0240 \b PAL 625
->>>7 byte&0xF0 0x20 \b, 4:3
->>>7 byte&0xF0 0x30 \b, 16:9
->>>7 byte&0xF0 0x40 \b, 11:5
->>7 byte&0x0F 1 \b, 23.976 fps
->>7 byte&0x0F 2 \b, 24 fps
->>7 byte&0x0F 3 \b, 25 fps
->>7 byte&0x0F 4 \b, 29.97 fps
->>7 byte&0x0F 5 \b, 30 fps
->>7 byte&0x0F 6 \b, 50 fps
->>7 byte&0x0F 7 \b, 59.94 fps
->>7 byte&0x0F 8 \b, 60 fps
->>11 byte &0x04 \b, Constrained
-
-# MPEG ADTS Audio (*.mpx/mxa/aac)
-# from dreesen@math.fu-berlin.de
-# modified to fully support MPEG ADTS
-
-# MP3, M1A
-# modified by Joerg Jenderek
-# GRR the original test are too common for many DOS files
-# so don't accept as MP3 until we've tested the rate
-# But also beat GEMDOS fonts
-0 beshort&0xFFFE 0xFFFA
-# rates
->2 byte&0xF0 !0
->>2 byte&0xF0 !0xF0 MPEG ADTS, layer III, v1
-!:strength +20
-!:mime audio/mpeg
->2 byte&0xF0 0x10 \b, 32 kbps
->2 byte&0xF0 0x20 \b, 40 kbps
->2 byte&0xF0 0x30 \b, 48 kbps
->2 byte&0xF0 0x40 \b, 56 kbps
->2 byte&0xF0 0x50 \b, 64 kbps
->2 byte&0xF0 0x60 \b, 80 kbps
->2 byte&0xF0 0x70 \b, 96 kbps
->2 byte&0xF0 0x80 \b, 112 kbps
->2 byte&0xF0 0x90 \b, 128 kbps
->2 byte&0xF0 0xA0 \b, 160 kbps
->2 byte&0xF0 0xB0 \b, 192 kbps
->2 byte&0xF0 0xC0 \b, 224 kbps
->2 byte&0xF0 0xD0 \b, 256 kbps
->2 byte&0xF0 0xE0 \b, 320 kbps
-# timing
->2 byte&0x0C 0x00 \b, 44.1 kHz
->2 byte&0x0C 0x04 \b, 48 kHz
->2 byte&0x0C 0x08 \b, 32 kHz
-# channels/options
->3 byte&0xC0 0x00 \b, Stereo
->3 byte&0xC0 0x40 \b, JntStereo
->3 byte&0xC0 0x80 \b, 2x Monaural
->3 byte&0xC0 0xC0 \b, Monaural
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Packet Pad
-#>2 byte &0x01 \b, Custom Flag
-#>3 byte &0x08 \b, Copyrighted
-#>3 byte &0x04 \b, Original Source
-#>3 byte&0x03 1 \b, NR: 50/15 ms
-#>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# MP2, M1A
-0 beshort&0xFFFE 0xFFFC MPEG ADTS, layer II, v1
-!:mime audio/mpeg
-# rates
->2 byte&0xF0 0x10 \b, 32 kbps
->2 byte&0xF0 0x20 \b, 48 kbps
->2 byte&0xF0 0x30 \b, 56 kbps
->2 byte&0xF0 0x40 \b, 64 kbps
->2 byte&0xF0 0x50 \b, 80 kbps
->2 byte&0xF0 0x60 \b, 96 kbps
->2 byte&0xF0 0x70 \b, 112 kbps
->2 byte&0xF0 0x80 \b, 128 kbps
->2 byte&0xF0 0x90 \b, 160 kbps
->2 byte&0xF0 0xA0 \b, 192 kbps
->2 byte&0xF0 0xB0 \b, 224 kbps
->2 byte&0xF0 0xC0 \b, 256 kbps
->2 byte&0xF0 0xD0 \b, 320 kbps
->2 byte&0xF0 0xE0 \b, 384 kbps
-# timing
->2 byte&0x0C 0x00 \b, 44.1 kHz
->2 byte&0x0C 0x04 \b, 48 kHz
->2 byte&0x0C 0x08 \b, 32 kHz
-# channels/options
->3 byte&0xC0 0x00 \b, Stereo
->3 byte&0xC0 0x40 \b, JntStereo
->3 byte&0xC0 0x80 \b, 2x Monaural
->3 byte&0xC0 0xC0 \b, Monaural
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Packet Pad
-#>2 byte &0x01 \b, Custom Flag
-#>3 byte &0x08 \b, Copyrighted
-#>3 byte &0x04 \b, Original Source
-#>3 byte&0x03 1 \b, NR: 50/15 ms
-#>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# MPA, M1A
-# updated by Joerg Jenderek
-# GRR the original test are too common for many DOS files, so test 32 <= kbits <= 448
-# GRR this test is still too general as it catches a BOM of UTF-16 files (0xFFFE)
-# FIXME: Almost all little endian UTF-16 text with BOM are clobbered by these entries
-#0 beshort&0xFFFE 0xFFFE
-#>2 ubyte&0xF0 >0x0F
-#>>2 ubyte&0xF0 <0xE1 MPEG ADTS, layer I, v1
-## rate
-#>>>2 byte&0xF0 0x10 \b, 32 kbps
-#>>>2 byte&0xF0 0x20 \b, 64 kbps
-#>>>2 byte&0xF0 0x30 \b, 96 kbps
-#>>>2 byte&0xF0 0x40 \b, 128 kbps
-#>>>2 byte&0xF0 0x50 \b, 160 kbps
-#>>>2 byte&0xF0 0x60 \b, 192 kbps
-#>>>2 byte&0xF0 0x70 \b, 224 kbps
-#>>>2 byte&0xF0 0x80 \b, 256 kbps
-#>>>2 byte&0xF0 0x90 \b, 288 kbps
-#>>>2 byte&0xF0 0xA0 \b, 320 kbps
-#>>>2 byte&0xF0 0xB0 \b, 352 kbps
-#>>>2 byte&0xF0 0xC0 \b, 384 kbps
-#>>>2 byte&0xF0 0xD0 \b, 416 kbps
-#>>>2 byte&0xF0 0xE0 \b, 448 kbps
-## timing
-#>>>2 byte&0x0C 0x00 \b, 44.1 kHz
-#>>>2 byte&0x0C 0x04 \b, 48 kHz
-#>>>2 byte&0x0C 0x08 \b, 32 kHz
-## channels/options
-#>>>3 byte&0xC0 0x00 \b, Stereo
-#>>>3 byte&0xC0 0x40 \b, JntStereo
-#>>>3 byte&0xC0 0x80 \b, 2x Monaural
-#>>>3 byte&0xC0 0xC0 \b, Monaural
-##>1 byte ^0x01 \b, Data Verify
-##>2 byte &0x02 \b, Packet Pad
-##>2 byte &0x01 \b, Custom Flag
-##>3 byte &0x08 \b, Copyrighted
-##>3 byte &0x04 \b, Original Source
-##>3 byte&0x03 1 \b, NR: 50/15 ms
-##>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# MP3, M2A
-0 beshort&0xFFFE 0xFFF2 MPEG ADTS, layer III, v2
-!:mime audio/mpeg
-# rate
->2 byte&0xF0 0x10 \b, 8 kbps
->2 byte&0xF0 0x20 \b, 16 kbps
->2 byte&0xF0 0x30 \b, 24 kbps
->2 byte&0xF0 0x40 \b, 32 kbps
->2 byte&0xF0 0x50 \b, 40 kbps
->2 byte&0xF0 0x60 \b, 48 kbps
->2 byte&0xF0 0x70 \b, 56 kbps
->2 byte&0xF0 0x80 \b, 64 kbps
->2 byte&0xF0 0x90 \b, 80 kbps
->2 byte&0xF0 0xA0 \b, 96 kbps
->2 byte&0xF0 0xB0 \b, 112 kbps
->2 byte&0xF0 0xC0 \b, 128 kbps
->2 byte&0xF0 0xD0 \b, 144 kbps
->2 byte&0xF0 0xE0 \b, 160 kbps
-# timing
->2 byte&0x0C 0x00 \b, 22.05 kHz
->2 byte&0x0C 0x04 \b, 24 kHz
->2 byte&0x0C 0x08 \b, 16 kHz
-# channels/options
->3 byte&0xC0 0x00 \b, Stereo
->3 byte&0xC0 0x40 \b, JntStereo
->3 byte&0xC0 0x80 \b, 2x Monaural
->3 byte&0xC0 0xC0 \b, Monaural
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Packet Pad
-#>2 byte &0x01 \b, Custom Flag
-#>3 byte &0x08 \b, Copyrighted
-#>3 byte &0x04 \b, Original Source
-#>3 byte&0x03 1 \b, NR: 50/15 ms
-#>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# MP2, M2A
-0 beshort&0xFFFE 0xFFF4 MPEG ADTS, layer II, v2
-!:mime audio/mpeg
-# rate
->2 byte&0xF0 0x10 \b, 8 kbps
->2 byte&0xF0 0x20 \b, 16 kbps
->2 byte&0xF0 0x30 \b, 24 kbps
->2 byte&0xF0 0x40 \b, 32 kbps
->2 byte&0xF0 0x50 \b, 40 kbps
->2 byte&0xF0 0x60 \b, 48 kbps
->2 byte&0xF0 0x70 \b, 56 kbps
->2 byte&0xF0 0x80 \b, 64 kbps
->2 byte&0xF0 0x90 \b, 80 kbps
->2 byte&0xF0 0xA0 \b, 96 kbps
->2 byte&0xF0 0xB0 \b, 112 kbps
->2 byte&0xF0 0xC0 \b, 128 kbps
->2 byte&0xF0 0xD0 \b, 144 kbps
->2 byte&0xF0 0xE0 \b, 160 kbps
-# timing
->2 byte&0x0C 0x00 \b, 22.05 kHz
->2 byte&0x0C 0x04 \b, 24 kHz
->2 byte&0x0C 0x08 \b, 16 kHz
-# channels/options
->3 byte&0xC0 0x00 \b, Stereo
->3 byte&0xC0 0x40 \b, JntStereo
->3 byte&0xC0 0x80 \b, 2x Monaural
->3 byte&0xC0 0xC0 \b, Monaural
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Packet Pad
-#>2 byte &0x01 \b, Custom Flag
-#>3 byte &0x08 \b, Copyrighted
-#>3 byte &0x04 \b, Original Source
-#>3 byte&0x03 1 \b, NR: 50/15 ms
-#>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# MPA, M2A
-0 beshort&0xFFFE 0xFFF6 MPEG ADTS, layer I, v2
-!:mime audio/mpeg
-# rate
->2 byte&0xF0 0x10 \b, 32 kbps
->2 byte&0xF0 0x20 \b, 48 kbps
->2 byte&0xF0 0x30 \b, 56 kbps
->2 byte&0xF0 0x40 \b, 64 kbps
->2 byte&0xF0 0x50 \b, 80 kbps
->2 byte&0xF0 0x60 \b, 96 kbps
->2 byte&0xF0 0x70 \b, 112 kbps
->2 byte&0xF0 0x80 \b, 128 kbps
->2 byte&0xF0 0x90 \b, 144 kbps
->2 byte&0xF0 0xA0 \b, 160 kbps
->2 byte&0xF0 0xB0 \b, 176 kbps
->2 byte&0xF0 0xC0 \b, 192 kbps
->2 byte&0xF0 0xD0 \b, 224 kbps
->2 byte&0xF0 0xE0 \b, 256 kbps
-# timing
->2 byte&0x0C 0x00 \b, 22.05 kHz
->2 byte&0x0C 0x04 \b, 24 kHz
->2 byte&0x0C 0x08 \b, 16 kHz
-# channels/options
->3 byte&0xC0 0x00 \b, Stereo
->3 byte&0xC0 0x40 \b, JntStereo
->3 byte&0xC0 0x80 \b, 2x Monaural
->3 byte&0xC0 0xC0 \b, Monaural
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Packet Pad
-#>2 byte &0x01 \b, Custom Flag
-#>3 byte &0x08 \b, Copyrighted
-#>3 byte &0x04 \b, Original Source
-#>3 byte&0x03 1 \b, NR: 50/15 ms
-#>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# MP3, M25A
-0 beshort&0xFFFE 0xFFE2 MPEG ADTS, layer III, v2.5
-!:mime audio/mpeg
-# rate
->2 byte&0xF0 0x10 \b, 8 kbps
->2 byte&0xF0 0x20 \b, 16 kbps
->2 byte&0xF0 0x30 \b, 24 kbps
->2 byte&0xF0 0x40 \b, 32 kbps
->2 byte&0xF0 0x50 \b, 40 kbps
->2 byte&0xF0 0x60 \b, 48 kbps
->2 byte&0xF0 0x70 \b, 56 kbps
->2 byte&0xF0 0x80 \b, 64 kbps
->2 byte&0xF0 0x90 \b, 80 kbps
->2 byte&0xF0 0xA0 \b, 96 kbps
->2 byte&0xF0 0xB0 \b, 112 kbps
->2 byte&0xF0 0xC0 \b, 128 kbps
->2 byte&0xF0 0xD0 \b, 144 kbps
->2 byte&0xF0 0xE0 \b, 160 kbps
-# timing
->2 byte&0x0C 0x00 \b, 11.025 kHz
->2 byte&0x0C 0x04 \b, 12 kHz
->2 byte&0x0C 0x08 \b, 8 kHz
-# channels/options
->3 byte&0xC0 0x00 \b, Stereo
->3 byte&0xC0 0x40 \b, JntStereo
->3 byte&0xC0 0x80 \b, 2x Monaural
->3 byte&0xC0 0xC0 \b, Monaural
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Packet Pad
-#>2 byte &0x01 \b, Custom Flag
-#>3 byte &0x08 \b, Copyrighted
-#>3 byte &0x04 \b, Original Source
-#>3 byte&0x03 1 \b, NR: 50/15 ms
-#>3 byte&0x03 3 \b, NR: CCIT J.17
-
-# AAC (aka MPEG-2 NBC audio) and MPEG-4 audio
-
-# Stored AAC streams (instead of the MP4 format)
-0 string ADIF MPEG ADIF, AAC
-!:mime audio/x-hx-aac-adif
->4 byte &0x80
->>13 byte &0x10 \b, VBR
->>13 byte ^0x10 \b, CBR
->>16 byte&0x1E 0x02 \b, single stream
->>16 byte&0x1E 0x04 \b, 2 streams
->>16 byte&0x1E 0x06 \b, 3 streams
->>16 byte &0x08 \b, 4 or more streams
->>16 byte &0x10 \b, 8 or more streams
->>4 byte &0x80 \b, Copyrighted
->>13 byte &0x40 \b, Original Source
->>13 byte &0x20 \b, Home Flag
->4 byte ^0x80
->>4 byte &0x10 \b, VBR
->>4 byte ^0x10 \b, CBR
->>7 byte&0x1E 0x02 \b, single stream
->>7 byte&0x1E 0x04 \b, 2 streams
->>7 byte&0x1E 0x06 \b, 3 streams
->>7 byte &0x08 \b, 4 or more streams
->>7 byte &0x10 \b, 8 or more streams
->>4 byte &0x40 \b, Original Stream(s)
->>4 byte &0x20 \b, Home Source
-
-# Live or stored single AAC stream (used with MPEG-2 systems)
-0 beshort&0xFFF6 0xFFF0 MPEG ADTS, AAC
-!:mime audio/x-hx-aac-adts
->1 byte &0x08 \b, v2
->1 byte ^0x08 \b, v4
-# profile
->>2 byte &0xC0 \b LTP
->2 byte&0xc0 0x00 \b Main
->2 byte&0xc0 0x40 \b LC
->2 byte&0xc0 0x80 \b SSR
-# timing
->2 byte&0x3c 0x00 \b, 96 kHz
->2 byte&0x3c 0x04 \b, 88.2 kHz
->2 byte&0x3c 0x08 \b, 64 kHz
->2 byte&0x3c 0x0c \b, 48 kHz
->2 byte&0x3c 0x10 \b, 44.1 kHz
->2 byte&0x3c 0x14 \b, 32 kHz
->2 byte&0x3c 0x18 \b, 24 kHz
->2 byte&0x3c 0x1c \b, 22.05 kHz
->2 byte&0x3c 0x20 \b, 16 kHz
->2 byte&0x3c 0x24 \b, 12 kHz
->2 byte&0x3c 0x28 \b, 11.025 kHz
->2 byte&0x3c 0x2c \b, 8 kHz
-# channels
->2 beshort&0x01c0 0x0040 \b, monaural
->2 beshort&0x01c0 0x0080 \b, stereo
->2 beshort&0x01c0 0x00c0 \b, stereo + center
->2 beshort&0x01c0 0x0100 \b, stereo+center+LFE
->2 beshort&0x01c0 0x0140 \b, surround
->2 beshort&0x01c0 0x0180 \b, surround + LFE
->2 beshort &0x01C0 \b, surround + side
-#>1 byte ^0x01 \b, Data Verify
-#>2 byte &0x02 \b, Custom Flag
-#>3 byte &0x20 \b, Original Stream
-#>3 byte &0x10 \b, Home Source
-#>3 byte &0x08 \b, Copyrighted
-
-# Live MPEG-4 audio streams (instead of RTP FlexMux)
-0 beshort&0xFFE0 0x56E0 MPEG-4 LOAS
-!:mime audio/x-mp4a-latm
-#>1 beshort&0x1FFF x \b, %hu byte packet
->3 byte&0xE0 0x40
->>4 byte&0x3C 0x04 \b, single stream
->>4 byte&0x3C 0x08 \b, 2 streams
->>4 byte&0x3C 0x0C \b, 3 streams
->>4 byte &0x08 \b, 4 or more streams
->>4 byte &0x20 \b, 8 or more streams
->3 byte&0xC0 0
->>4 byte&0x78 0x08 \b, single stream
->>4 byte&0x78 0x10 \b, 2 streams
->>4 byte&0x78 0x18 \b, 3 streams
->>4 byte &0x20 \b, 4 or more streams
->>4 byte &0x40 \b, 8 or more streams
-# This magic isn't strong enough (matches plausible ISO-8859-1 text)
-#0 beshort 0x4DE1 MPEG-4 LO-EP audio stream
-#!:mime audio/x-mp4a-latm
-
-# Summary: FLI animation format
-# Created by: Daniel Quinlan <quinlan@yggdrasil.com>
-# Modified by (1): Abel Cheung <abelcheung@gmail.com> (avoid over-generic detection)
-4 leshort 0xAF11
-# standard FLI always has 320x200 resolution and 8 bit color
->8 leshort 320
->>10 leshort 200
->>>12 leshort 8 FLI animation, 320x200x8
-!:mime video/x-fli
->>>>6 leshort x \b, %d frames
-# frame speed is multiple of 1/70s
->>>>16 leshort x \b, %d/70s per frame
-
-# Summary: FLC animation format
-# Created by: Daniel Quinlan <quinlan@yggdrasil.com>
-# Modified by (1): Abel Cheung <abelcheung@gmail.com> (avoid over-generic detection)
-4 leshort 0xAF12
-# standard FLC always use 8 bit color
->12 leshort 8 FLC animation
-!:mime video/x-flc
->>8 leshort x \b, %d
->>10 leshort x \bx%dx8
->>6 uleshort x \b, %d frames
->>16 uleshort x \b, %dms per frame
-
-# DL animation format
-# XXX - collision with most `mips' magic
-#
-# I couldn't find a real magic number for these, however, this
-# -appears- to work. Note that it might catch other files, too, so be
-# careful!
-#
-# Note that title and author appear in the two 20-byte chunks
-# at decimal offsets 2 and 22, respectively, but they are XOR'ed with
-# 255 (hex FF)! The DL format is really bad.
-#
-#0 byte 1 DL version 1, medium format (160x100, 4 images/screen)
-#!:mime video/x-unknown
-#>42 byte x - %d screens,
-#>43 byte x %d commands
-#0 byte 2 DL version 2
-#!:mime video/x-unknown
-#>1 byte 1 - large format (320x200,1 image/screen),
-#>1 byte 2 - medium format (160x100,4 images/screen),
-#>1 byte >2 - unknown format,
-#>42 byte x %d screens,
-#>43 byte x %d commands
-# Based on empirical evidence, DL version 3 have several nulls following the
-# \003. Most of them start with non-null values at hex offset 0x34 or so.
-#0 string \3\0\0\0\0\0\0\0\0\0\0\0 DL version 3
-
-# iso 13818 transport stream
-#
-# from Oskar Schirmer <schirmer@scara.com> Feb 3, 2001 (ISO 13818.1)
-# syncbyte 8 bit 0x47
-# error_ind 1 bit -
-# payload_start 1 bit 1
-# priority 1 bit -
-# PID 13 bit 0x0000
-# scrambling 2 bit -
-# adaptfld_ctrl 2 bit 1 or 3
-# conti_count 4 bit -
-0 belong&0xFF5FFF10 0x47400010
->188 byte 0x47 MPEG transport stream data
-!:mime video/MP2T
-!:ext ts
-
-# Blu-ray disc Audio-Video MPEG-2 transport stream
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://en.wikipedia.org/wiki/MPEG_transport_stream
-# Note: similar to ISO 13818.1 but with 4 extra bytes per packets
-4 belong&0xFF5FFF10 =0x47400010
->196 byte =0x47 BDAV MPEG-2 Transport Stream (M2TS)
-!:mime video/MP2T
-!:ext m2ts/mts
-
-# DIF digital video file format <mpruett@sgi.com>
-0 belong&0xffffff00 0x1f070000 DIF
-!:mime video/x-dv
->4 byte &0x01 (DVCPRO) movie file
->4 byte ^0x01 (DV) movie file
->3 byte &0x80 (PAL)
->3 byte ^0x80 (NTSC)
-
-# MNG Video Format, <URL:http://www.libpng.org/pub/mng/spec/>
-0 string \x8aMNG MNG video data,
-!:mime video/x-mng
->4 belong !0x0d0a1a0a CORRUPTED,
->4 belong 0x0d0a1a0a
->>16 belong x %d x
->>20 belong x %d
-
-# JNG Video Format, <URL:http://www.libpng.org/pub/mng/spec/>
-0 string \x8bJNG JNG video data,
-!:mime video/x-jng
->4 belong !0x0d0a1a0a CORRUPTED,
->4 belong 0x0d0a1a0a
->>16 belong x %d x
->>20 belong x %d
-
-# Vivo video (Wolfram Kleff)
-3 string \x0D\x0AVersion:Vivo Vivo video data
-
-# ABC (alembic.io 3d models)
-0 string 0gawa ABC 3d model
-
-#---------------------------------------------------------------------------
-# HVQM4: compressed movie format designed by Hudson for Nintendo GameCube
-# From Mark Sheppard <msheppard@climax.co.uk>, 2002-10-03
-#
-0 string HVQM4 %s
->6 string >\0 v%s
->0 byte x GameCube movie,
->0x34 ubeshort x %d x
->0x36 ubeshort x %d,
->0x26 ubeshort x %dus,
->0x42 ubeshort 0 no audio
->0x42 ubeshort >0 %dHz audio
-
-# From: Stefan A. Haubenthal <polluks@sdf.lonestar.org>
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/VOB
-0 string DVDVIDEO-VTS Video title set,
-!:mime video/x-ifo
-!:ext ifo/bup
->0x21 byte x v%x
-0 string DVDVIDEO-VMG Video manager,
-!:mime video/x-ifo
-!:ext ifo/bup
->0x21 byte x v%x
-
-# From: Stefan A. Haubenthal <polluks@sdf.lonestar.org>
-0 string xMovieSetter MovieSetter movie
-0 string xSceneEditor MovieSetter movie
-
-# From: Behan Webster <behanw@websterwood.com>
-# NuppelVideo used by Mythtv (*.nuv)
-# Note: there are two identical stanzas here differing only in the
-# initial string matched. It used to be done with a regex, but we're
-# trying to get rid of those.
-0 string NuppelVideo MythTV NuppelVideo
->12 string x v%s
->20 lelong x (%d
->24 lelong x \bx%d),
->36 string P \bprogressive,
->36 string I \binterlaced,
->40 ledouble x \baspect:%.2f,
->48 ledouble x \bfps:%.2f
-0 string MythTV MythTV NuppelVideo
->12 string x v%s
->20 lelong x (%d
->24 lelong x \bx%d),
->36 string P \bprogressive,
->36 string I \binterlaced,
->40 ledouble x \baspect:%.2f,
->48 ledouble x \bfps:%.2f
-
-# MPEG file
-# MPEG sequences
-# FIXME: This section is from the old magic.mime file and needs
-# integrating with the rest
-#0 belong 0x000001BA
-#>4 byte &0x40
-#!:mime video/mp2p
-#>4 byte ^0x40
-#!:mime video/mpeg
-#0 belong 0x000001BB
-#!:mime video/mpeg
-#0 belong 0x000001B0
-#!:mime video/mp4v-es
-#0 belong 0x000001B5
-#!:mime video/mp4v-es
-#0 belong 0x000001B3
-#!:mime video/mpv
-#0 belong&0xFF5FFF10 0x47400010
-#!:mime video/mp2t
-#0 belong 0x00000001
-#>4 byte&0x1F 0x07
-#!:mime video/h264
-
-# Type: Bink Video
-# Extension: .bik
-# URL: https://wiki.multimedia.cx/index.php?title=Bink_Container
-# From: <hoehle@users.sourceforge.net> 2008-07-18
-0 name bik
-#>4 ulelong x size %d
->20 ulelong x \b, %d
->24 ulelong x \bx%d
->8 ulelong x \b, %d frames
->32 ulelong x at rate %d/
->28 ulelong >1 \b%d
->40 ulelong =0 \b, no audio
->40 ulelong !0 \b, %d audio track
->>40 ulelong !1 \bs
-# follow properties of the first audio track only
->>48 uleshort x %dHz
->>51 byte&0x20 0 mono
->>51 byte&0x20 !0 stereo
-#>>51 byte&0x10 0 FFT
-#>>51 byte&0x10 !0 DCT
-
-0 string BIK
->3 regex =[bdfghi] Bink Video rev.%s
->>0 use bik
-
-0 string KB2
->3 regex =[adfghi] Bink Video 2 rev.%s
->>0 use bik
-
-# Type: NUT Container
-# URL: https://wiki.multimedia.cx/index.php?title=NUT
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-0 string nut/multimedia\ container\0 NUT multimedia container
-
-# Type: Nullsoft Video (NSV)
-# URL: https://wiki.multimedia.cx/index.php?title=Nullsoft_Video
-# From: Mike Melanson <mike@multimedia.cx>
-0 string NSVf Nullsoft Video
-
-# Type: REDCode Video
-# URL: https://www.red.com/ ; https://wiki.multimedia.cx/index.php?title=REDCode
-# From: Mike Melanson <mike@multimedia.cx>
-4 string RED1 REDCode Video
-
-# Type: MTV Multimedia File
-# URL: https://wiki.multimedia.cx/index.php?title=MTV
-# From: Mike Melanson <mike@multimedia.cx>
-0 string AMVS MTV Multimedia File
-
-# Type: ARMovie
-# URL: https://wiki.multimedia.cx/index.php?title=ARMovie
-# From: Mike Melanson <mike@multimedia.cx>
-0 string ARMovie\012 ARMovie
-
-# Type: Interplay MVE Movie
-# URL: https://wiki.multimedia.cx/index.php?title=Interplay_MVE
-# From: Mike Melanson <mike@multimedia.cx>
-0 string Interplay\040MVE\040File\032 Interplay MVE Movie
-
-# Type: Windows Television DVR File
-# URL: https://wiki.multimedia.cx/index.php?title=WTV
-# From: Mike Melanson <mike@mutlimedia.cx>
-# This takes the form of a Windows-style GUID
-0 bequad 0xB7D800203749DA11
->8 bequad 0xA64E0007E95EAD8D Windows Television DVR Media
-
-# Type: Sega FILM/CPK Multimedia
-# URL: https://wiki.multimedia.cx/index.php?title=Sega_FILM
-# From: Mike Melanson <mike@multimedia.cx>
-0 string FILM Sega FILM/CPK Multimedia,
->32 belong x %d x
->28 belong x %d
-
-# Type: Nintendo THP Multimedia
-# URL: https://wiki.multimedia.cx/index.php?title=THP
-# From: Mike Melanson <mike@multimedia.cx>
-0 string THP\0 Nintendo THP Multimedia
-
-# Type: BBC Dirac Video
-# URL: https://wiki.multimedia.cx/index.php?title=Dirac
-# From: Mike Melanson <mike@multimedia.cx>
-0 string BBCD BBC Dirac Video
-
-# Type: RAD Game Tools Smacker Multimedia
-# URL: https://wiki.multimedia.cx/index.php?title=Smacker
-# From: Mike Melanson <mike@multimedia.cx>
-0 string SMK RAD Game Tools Smacker Multimedia
->3 byte x version %c,
->4 lelong x %d x
->8 lelong x %d,
->12 lelong x %d frames
-
-# Material Exchange Format
-# More information:
-# https://en.wikipedia.org/wiki/Material_Exchange_Format
-# http://www.freemxf.org/
-0 string \x06\x0e\x2b\x34\x02\x05\x01\x01\x0d\x01\x02\x01\x01\x02 Material exchange container format
-!:ext mxf
-!:mime application/mxf
-
-# Recognize LucasArts Smush video files (cf.
-# https://wiki.multimedia.cx/index.php/Smush)
-0 string ANIM
->8 string AHDR LucasArts Smush Animation Format (SAN) video
-0 string SANM
->8 string SHDR LucasArts Smush v2 (SANM) video
-
-# Type: Scaleform video
-# Extension: .usm
-# URL: https://wiki.multimedia.cx/index.php/USM
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-0 string CRID
->32 string @UTF Scaleform video
-
-# http://www.jerrysguide.com/tips/demystify-tvs-file-format.html
-0 string TVS\015\012
->&0 string Version\040 TeamViewer Session File
->>&0 string x \b, version %s
-
-# SER file format - simple uncompressed video format for astronomical use
-# Initially developed by Lucam Recorder,
-# as of 2021 maintained by Heiko Wilkens, Grischa Hahn
-# Typical extensions: .SER
-# http://www.grischa-hahn.homepage.t-online.de/astro/ser/SER%20Doc%20V3b.pdf
-0 string LUCAM-RECORDER SER video sequence
-!:ext ser
->18 lelong 0 \b, bayer: mono
->18 lelong 8 \b, bayer: RGGB
->18 lelong 9 \b, bayer: GRBG
->18 lelong 10 \b, bayer: GBRG
->18 lelong 11 \b, bayer: BGGR
->18 lelong 16 \b, bayer: CYYM
->18 lelong 17 \b, bayer: YCMY
->18 lelong 18 \b, bayer: YMCY
->18 lelong 19 \b, bayer: MYYC
->18 lelong 100 \b, bayer: RGB
->18 lelong 101 \b, bayer: BGR
->22 lelong 0 \b, big-endian
->22 lelong 1 \b, little-endian
->26 lelong x \b, width: %d
->30 lelong x \b, height: %d
->34 lelong x \b, %d bit
->38 lelong x \b, frames: %d
-
-# https://wiki.multimedia.cx/index.php/Duck_IVF
-0 string DKIF Duck IVF video file
-!:mime video/x-ivf
->4 leshort >0 \b, version %d
->8 string x \b, codec %s
->12 leshort x \b, %d
->14 leshort x \bx%d
->24 lelong >0 \b, %d frames
diff --git a/contrib/libs/libmagic/magic/Magdir/aout b/contrib/libs/libmagic/magic/Magdir/aout
deleted file mode 100644
index 69b6ec60d8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/aout
+++ /dev/null
@@ -1,46 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: aout,v 1.1 2013/01/09 22:37:23 christos Exp $
-# aout: file(1) magic for a.out executable/object/etc entries that
-# handle executables on multiple platforms.
-#
-
-#
-# Little-endian 32-bit-int a.out, merged from bsdi (for BSD/OS, from
-# BSDI), netbsd, and vax (for UNIX/32V and BSD)
-#
-# XXX - is there anything we can look at to distinguish BSD/OS 386 from
-# NetBSD 386 from various VAX binaries? The BSD/OS shared library flag
-# works only for binaries using shared libraries. Grabbing the entry
-# point from the a.out header, using it to find the first code executed
-# in the program, and looking at that might help.
-#
-0 lelong 0407 a.out little-endian 32-bit executable
->16 lelong >0 not stripped
->32 byte 0x6a (uses BSD/OS shared libs)
-
-0 lelong 0410 a.out little-endian 32-bit pure executable
->16 lelong >0 not stripped
->32 byte 0x6a (uses BSD/OS shared libs)
-
-0 lelong 0413 a.out little-endian 32-bit demand paged pure executable
->16 lelong >0 not stripped
->32 byte 0x6a (uses BSD/OS shared libs)
-
-#
-# Big-endian 32-bit-int a.out, merged from sun (for old 68010 SunOS a.out),
-# mips (for old 68020(!) SGI a.out), and netbsd (for old big-endian a.out).
-#
-# XXX - is there anything we can look at to distinguish old SunOS 68010
-# from old 68020 IRIX from old NetBSD? Again, I guess we could look at
-# the first instruction or instructions in the program.
-#
-0 belong 0407 a.out big-endian 32-bit executable
->16 belong >0 not stripped
-
-0 belong 0410 a.out big-endian 32-bit pure executable
->16 belong >0 not stripped
-
-0 belong 0413 a.out big-endian 32-bit demand paged executable
->16 belong >0 not stripped
-
diff --git a/contrib/libs/libmagic/magic/Magdir/apache b/contrib/libs/libmagic/magic/Magdir/apache
deleted file mode 100755
index d896b50551..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/apache
+++ /dev/null
@@ -1,28 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: apache,v 1.1 2017/04/11 14:52:15 christos Exp $
-# apache: file(1) magic for Apache Big Data formats
-
-# Avro files
-0 string Obj Apache Avro
->3 byte x version %d
-
-# ORC files
-# Important information is in file footer, which we can't index to :(
-0 string ORC Apache ORC
-
-# Parquet files
-0 string PAR1 Apache Parquet
-
-# Hive RC files
-0 string RCF Apache Hive RC file
->3 byte x version %d
-
-# Sequence files (and the careless first version of RC file)
-
-0 string SEQ
->3 byte <6 Apache Hadoop Sequence file version %d
->3 byte >6 Apache Hadoop Sequence file version %d
->3 byte =6
->>5 string org.apache.hadoop.hive.ql.io.RCFile$KeyBuffer Apache Hive RC file version 0
->>3 default x Apache Hadoop Sequence file version 6
diff --git a/contrib/libs/libmagic/magic/Magdir/apl b/contrib/libs/libmagic/magic/Magdir/apl
deleted file mode 100644
index d717e377dc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/apl
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: apl,v 1.6 2009/09/19 16:28:07 christos Exp $
-# apl: file(1) magic for APL (see also "pdp" and "vax" for other APL
-# workspaces)
-#
-0 long 0100554 APL workspace (Ken's original?)
diff --git a/contrib/libs/libmagic/magic/Magdir/apple b/contrib/libs/libmagic/magic/Magdir/apple
deleted file mode 100644
index 547b0ac20a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/apple
+++ /dev/null
@@ -1,773 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: apple,v 1.48 2023/05/01 14:20:21 christos Exp $
-# apple: file(1) magic for Apple file formats
-#
-0 search/1/t FiLeStArTfIlEsTaRt binscii (apple ][) text
-0 string \x0aGL Binary II (apple ][) data
-0 string \x76\xff Squeezed (apple ][) data
-0 string NuFile NuFile archive (apple ][) data
-0 string N\xf5F\xe9l\xe5 NuFile archive (apple ][) data
-0 belong 0x00051600 AppleSingle encoded Macintosh file
-0 belong 0x00051607 AppleDouble encoded Macintosh file
-
-# Type: Apple Emulator A2R format
-# From: Greg Wildman <greg@apple2.org.za>
-# Ref: https://applesaucefdc.com/a2r2-reference/
-# Ref: https://applesaucefdc.com/a2r/
-0 string A2R
->3 string \x31\xFF\x0A\x0D\x0A Applesauce A2R 1.x Disk Image
->3 string \x32\xFF\x0A\x0D\x0A Applesauce A2R 2.x Disk Image
->3 string \x33\xFF\x0A\x0D\x0A Applesauce A2R 3.x Disk Image
->8 string INFO
->>49 byte 01 \b, 5.25″ SS 40trk
->>49 byte 02 \b, 3.5″ DS 80trk
->>49 byte 03 \b, 5.25″ DS 80trk
->>49 byte 04 \b, 5.25″ DS 40trk
->>49 byte 05 \b, 3.5″ DS 80trk
->>49 byte 06 \b, 8″ DS
->>50 byte 01 \b, write protected
->>51 byte 01 \b, cross track synchronized
->>17 string/T x \b, %.32s
-
-# Type: Apple Emulator WOZ format
-# From: Greg Wildman <greg@apple2.org.za>
-# Ref: https://applesaucefdc.com/woz/reference/
-# Ref: https://applesaucefdc.com/woz/reference2/
-0 string WOZ
->3 string \x31\xFF\x0A\x0D\x0A Apple ][ WOZ 1.0 Disk Image
->3 string \x32\xFF\x0A\x0D\x0A Apple ][ WOZ 2.0 Disk Image
->12 string INFO
->>21 byte 01 \b, 5.25 inch
->>21 byte 02 \b, 3.5 inch
->>22 byte 01 \b, write protected
->>23 byte 01 \b, cross track synchronized
->>25 string/T x \b, %.32s
-
-# Type: Apple Macintosh Emulator MOOF format
-# From: Greg Wildman <greg@apple2.org.za>
-# Ref: https://applesaucefdc.com/moof-reference/
-0 string MOOF
->4 string \xFF\x0A\x0D\x0A Apple Macintosh MOOF Disk Image
->12 string INFO
->>21 byte 01 \b, SSDD GCR (400K)
->>21 byte 02 \b, DSDD GCR (800K)
->>21 byte 03 \b, DSHD MFM (1.44M)
->>22 byte 01 \b, write protected
->>23 byte 01 \b, cross track synchronized
->>25 string/T x \b, %.32s
-
-# Type: Apple Emulator disk images
-# From: Greg Wildman <greg@apple2.org.za>
-# ProDOS boot loader?
-0 string \x01\x38\xB0\x03\x4C Apple ProDOS Image
-# Detect Volume Directory block ($02)
->0x400 string \x00\x00\x03\x00
->>0x404 byte &0xF0
->>>0x405 string x \b, Volume /%s
->>>0x429 uleshort x \b, %u Blocks
-# ProDOS ordered ?
->0xb00 string \x00\x00\x03\x00
->>0xb04 byte &0xF0
->>>0xb05 string x \b, Volume /%s
->>>0xb29 uleshort x \b, %u Blocks
-#
-# Proboot HD
-0 string \x01\x8A\x48\xD8\x2C\x82\xC0\x8D\x0E\xC0\x8D\x0C Apple ProDOS ProBoot Image
->0x400 string \x00\x00\x03\x00
->>0x404 byte &0xF0
->>>0x405 string x \b, Volume /%s
->>>0x429 uleshort x \b, %u Blocks
->0xb00 string \x00\x00\x03\x00
->>0xb04 byte &0xF0
->>>0xb05 string x \b, Volume /%s
->>>0xb29 uleshort x \b, %u Blocks
-0 string \x01\xA8\x8A\x20\x7B\xF8\x29\x07\x09\xC0\x99\x30 Apple ProDOS ProBoot Image
->0x400 string \x00\x00\x03\x00
->>0x404 byte &0xF0
->>>0x405 string x \b, Volume /%s
->>>0x429 uleshort x \b, %u Blocks
->0xb00 string \x00\x00\x03\x00
->>0xb04 byte &0xF0
->>>0xb05 string x \b, Volume /%s
->>>0xb29 uleshort x \b, %u Blocks
-0 string \x01\x4A\xD0\x34\xE6\x3D\x8A\x20\x7B\xF8\x09\xC0 Apple ProDOS ProBoot Image
->0x400 string \x00\x00\x03\x00
->>0x404 byte &0xF0
->>>0x405 string x \b, Volume /%s
->>>0x429 uleshort x \b, %u Blocks
->0xb00 string \x00\x00\x03\x00
->>0xb04 byte &0xF0
->>>0xb05 string x \b, Volume /%s
->>>0xb29 uleshort x \b, %u Blocks
-#
-# ProDOS formatted
-0 string \x01\xBD\x88\xC0\x20\x2F\xFB\x20\x58\xFC\x20\x40 Apple ProDOS Unbootable Image
->0x400 string \x00\x00\x03\x00
->>0x404 byte &0xF0
->>>0x405 string x \b, Volume /%s
->>>0x429 uleshort x \b, %u Blocks
->0xb00 string \x00\x00\x03\x00
->>0xb04 byte &0xF0
->>>0xb05 string x \b, Volume /%s
->>>0xb29 uleshort x \b, %u Blocks
-0 string \x01\x38\xB0\x03\x4C\x1C\x09\x78\x86\x43\xC9\x03 Apple ProDOS Unbootable Image
->0x400 string \x00\x00\x03\x00
->>0x404 byte &0xF0
->>>0x405 string x \b, Volume /%s
->>>0x429 uleshort x \b, %u Blocks
->0xb00 string \x00\x00\x03\x00
->>0xb04 byte &0xF0
->>>0xb05 string x \b, Volume /%s
->>>0xb29 uleshort x \b, %u Blocks
-#
-# DOS3 boot loader
-0 string \x01\xA5\x27\xC9\x09\xD0
->0x11001 byte 0x11
->>0x11003 ubyte x Apple DOS 3.%u Image
->>0x11006 ubyte x \b, Volume #%03u
->>0x11034 ubyte x \b, %u Tracks
->>0x11035 ubyte x \b, %u Sectors
->>0x11036 uleshort x \b, %u bytes per sector
-#
-# DOS3 uninitialized disk
-0 string \x01\xA6\x2B\xBD\x88\xC0\x8A\x4A\x4A
->0x11001 byte 0x11
->>0x11003 ubyte x Apple DOS 3.%u Unbootable Image
->>>0x11006 ubyte x \b, Volume #%03u
->>>0x11034 ubyte x \b, %u Tracks
->>>0x11035 ubyte x \b, %u Sectors
->>>0x11036 uleshort x \b, %u bytes per sector
-#
-# Pascal boot loader?
-0 string \x01\xE0\x60\xF0\x03\x4C\xE3\x08\xAD
->0xd6 pstring SYSTEM.APPLE
->>0xb00 leshort 0x0000
->>>0xb04 leshort 0x0000 Apple Pascal Image
->>>>0xb06 pstring x \b, Volume %s:
->>>>0xb0e leshort x \b, %u Blocks
->>>>0xb10 leshort x \b, %u Files
-#
-# Diversi Dos boot loader?
-0 string \x01\xA8\xAD\x81\xC0\xEE\x09\x08\xAD
->0x11001 string \x11\x0F\x03 Apple Diversi Dos Image
->>0x11006 byte x \b, Volume %u
->>0x11034 byte x \b, %u Tracks
->>0x11035 byte x \b, %u Sectors
->>0x11036 leshort x \b, %u bytes per sector
-
-# Type: Apple Emulator 2IMG format
-# From: Radek Vokal <rvokal@redhat.com>
-# Update: Greg Wildman <greg@apple2.org.za>
-0 string 2IMG Apple ][ 2IMG Disk Image
->4 clear x
->4 string XGS! \b, XGS
->4 string CTKG \b, Catakig
->4 string ShIm \b, Sheppy's ImageMaker
->4 string SHEP \b, Sheppy's ImageMaker
->4 string WOOF \b, Sweet 16
->4 string B2TR \b, Bernie ][ the Rescue
->4 string \!nfc \b, ASIMOV2
->4 string \>BD\< \b, Brutal Deluxe's Cadius
->4 string CdrP \b, CiderPress
->4 string Vi][ \b, Virtual ][
->4 string PRFS \b, ProFUSE
->4 string FISH \b, FishWings
->4 string RVLW \b, Revival for Windows
->4 default x
->>4 string x \b, Creator tag "%-4.4s"
->0xc byte 00 \b, DOS 3.3 sector order
->>0x10 byte 00 \b, Volume 254
->>0x10 byte&0x7f x \b, Volume %u
->0xc byte 01 \b, ProDOS sector order
-# Detect Volume Directory block ($02) + 2mg header offset
->>0x440 string \x00\x00\x03\x00
->>>0x444 byte &0xF0
->>>>0x445 string x \b, Volume /%s
->>>>0x469 uleshort x \b, %u Blocks
->0xc byte 02 \b, NIB data
-
-# Type: Peter Ferrie QBoot
-# From: Greg Wildman <greg@apple2.org.za>
-# Ref: https://github.com/peterferrie/qboot
-0 string \x01\x4A\xA8\x69\x0F\x85\x27\xC9
->8 string \x12\xF0\x10\xE6\x3D\x86\xDA\x8A Apple ][ QBoot Image
-
-# Type: Peter Ferrie 0Boot
-# From: Greg Wildman <greg@apple2.org.za>
-# Ref: https://github.com/peterferrie/0boot
-0 string \x01\x4A\xA8\x69\x0F\x85\x27\xC9
->8 string \x12\xF0\x10\xE6\x3D\x86\xDA\x8A Apple ][ 0Boot Image
-
-# Different proprietary boot sectors
-0 string \x01\x0F\x21\x74\x00\x01\x6B\x00\x02\x30\x81\x5D Apple ][ Disk Image
-0 string \x01\x20\x58\xFC\xA2\x00\x8E\x78\x04\x8E\xF4\x03 Apple ][ Disk Image
-0 string \x01\x20\x58\xFC\xAD\x51\xC0\xAD\x54\xC0\xA6\x2B Apple ][ Disk Image
-0 string \x01\x20\x89\xFE\x20\x93\xFE\xA6\x2B\xBD\x88\xC0 Apple ][ Disk Image
-0 string \x01\x20\x93\xFE\x20\x89\xFE\x4C\x25\x08\x68\x85 Apple ][ Disk Image
-0 string \x01\x20\x93\xFE\x20\x89\xFE\x4C\x2D\x08\x68\x85 Apple ][ Disk Image
-0 string \x01\x38\x90\x2A\xC9\x01\xF0\x33\xA8\xC8\xC0\x10 Apple ][ Disk Image
-0 string \x01\x38\xB0\x03\x4C\x32\xA1\x87\x43\xC9\x03\x08 Apple ][ Disk Image
-0 string \x01\x4C\x04\x08\xA9\x2A\x8D\x02\x08\x86\x2B\xEE Apple ][ Disk Image
-0 string \x01\x4C\x60\x08\x09\xD0\x18\xA5\x2B\x4A\x4A\x4A Apple ][ Disk Image
-0 string \x01\x4C\x92\x08\x01\x08\xA2\x00\xB5\x00\x9D\x00 Apple ][ Disk Image
-0 string \x01\x4C\xB3\x08\x09\xD0\x18\xA5\x2B\x4A\x4A\x4A Apple ][ Disk Image
-0 string \x01\x8D\xFB\x03\x8E\xFC\x03\x8C\xFD\x03\x8A\x29 Apple ][ Disk Image
-0 string \x01\xA2\xFF\x9A\xD8\x20\x20\x08\x20\x34\x08\xAD Apple ][ Disk Image
-0 string \x01\xA5\x27\xBD\x88\xC0\x2C\x10\xC0\xA2\x00\xA9 Apple ][ Disk Image
-0 string \x01\xA5\x2B\xAE\x51\xC0\xEA\xAA\xBD\x88\xC0\x20 Apple ][ Disk Image
-0 string \x01\xA6\x27\xBD\x0B\x08\x48\xBD\x0A\x08\x48\x85 Apple ][ Disk Image
-0 string \x01\xA6\x2B\xBD\x88\xC0\x20\x58\xFC\xA9\x01\x85 Apple ][ Disk Image
-0 string \x01\xA6\x2B\xBD\x88\xC0\x20\x58\xFC\xA9\x25\x85 Apple ][ Disk Image
-0 string \x01\xA8\xC0\x0F\x90\x16\xF0\x12\xA0\xFF\x18\xAD Apple ][ Disk Image
-0 string \x01\xA9\x00\x85\xF0\xA9\x04\x85\xF1\xA0\x00\xA9 Apple ][ Disk Image
-0 string \x01\xA9\x5C\x8D\xF2\x03\xA9\xC6\x8D\xF3\x03\x49 Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\x20\x2F\xFB\x20\x58\xFC Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\x20\x49\x08\xA9\x0A\x85 Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\x2C\x82\xC0\xBD\x88\xC0 Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\x86\x43\x8A\x4A\x4A\x4A Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\xA2\x00\x86\xFF\xB5\x00 Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\xA2\x00\xB5\x00\x9D\x00 Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\xA9\xB2\x8D\xF2\x03\xA9 Apple ][ Disk Image
-0 string \x01\xA9\x60\x8D\x01\x08\xA9\xFF\x8D\xF3\x03\x8D Apple ][ Disk Image
-0 string \x01\xAC\x00\x08\xF0\x19\xB9\x30\x08\x85\x3D\xCE Apple ][ Disk Image
-0 string \x01\xAC\x23\x08\x30\x2E\xB9\x24\x08\x85\x3D\xCE Apple ][ Disk Image
-0 string \x01\xAD\x00\x08\xC9\x09\xB0\x20\x69\x02\x8D\x00 Apple ][ Disk Image
-0 string \x01\xB0\x00\xA9\x3C\x8D\x02\x08\x86\x2B\x8A\x4A Apple ][ Disk Image
-0 string \x01\xB0\x00\xA9\x3C\x8D\x02\x08\xA9\xF5\x8D\xF2 Apple ][ Disk Image
-0 string \x01\xB0\x00\xA9\x3F\x8D\x02\x08\x86\x2B\x8E\xF4 Apple ][ Disk Image
-0 string \x01\xB0\x00\xA9\x48\x8D\x02\x08\x86\x2B\x8E\xF4 Apple ][ Disk Image
-0 string \x01\xBD\x88\xC0\x8A\x4A\x4A\x4A\x4A\x09\xC0\x8D Apple ][ Disk Image
-0 string \x01\xBD\x88\xC0\x8A\x4A\x4A\x4A\x4A\x8D\x2F\x08 Apple ][ Disk Image
-0 string \x01\xD8\x2C\x81\xC0\xA9\x60\x4D\x58\xFF\xD0\xFE Apple ][ Disk Image
-0 string \x01\xD8\x78\xBD\x88\xC0\xA9\xFD\x85\x37\x85\x39 Apple ][ Disk Image
-0 string \x01\xE0\x60\xF0\x03\x4C\x16\x09\xAD\x00\x08\xC9 Apple ][ Disk Image
-0 string \x01\xE0\x60\xF0\x03\x4C\xCB\x08\xAD\x00\x08\xC9 Apple ][ Disk Image
-0 string \x01\xE0\x60\xF0\x03\x4C\xEE\x08\xAD\x00\x08\xC9 Apple ][ Disk Image
-0 string \x01\xE0\x60\xF0\x03\x4C\xEF\x08\xAD\x00\x08\xC9 Apple ][ Disk Image
-0 string \x01\xE0\x70\xB0\x04\xE0\x40\xB0\x39\xBD\x88\xC0 Apple ][ Disk Image
-0 string \x01\xEA\x8D\xF4\x03\xA9\x60\x9D\x88\xC0\x8D\x51 Apple ][ Disk Image
-
-# magic for Newton PDA package formats
-# from Ruda Moura <ruda@helllabs.org>
-0 string package0 Newton package, NOS 1.x,
->12 belong &0x80000000 AutoRemove,
->12 belong &0x40000000 CopyProtect,
->12 belong &0x10000000 NoCompression,
->12 belong &0x04000000 Relocation,
->12 belong &0x02000000 UseFasterCompression,
->16 belong x version %d
-
-0 string package1 Newton package, NOS 2.x,
->12 belong &0x80000000 AutoRemove,
->12 belong &0x40000000 CopyProtect,
->12 belong &0x10000000 NoCompression,
->12 belong &0x04000000 Relocation,
->12 belong &0x02000000 UseFasterCompression,
->16 belong x version %d
-
-0 string package4 Newton package,
->8 byte 8 NOS 1.x,
->8 byte 9 NOS 2.x,
->12 belong &0x80000000 AutoRemove,
->12 belong &0x40000000 CopyProtect,
->12 belong &0x10000000 NoCompression,
-
-# The following entries for the Apple II are for files that have
-# been transferred as raw binary data from an Apple, without having
-# been encapsulated by any of the above archivers.
-#
-# In general, Apple II formats are hard to identify because Apple DOS
-# and especially Apple ProDOS have strong typing in the file system and
-# therefore programmers never felt much need to include type information
-# in the files themselves.
-#
-# Eric Fischer <enf@pobox.com>
-
-# AppleWorks word processor:
-# URL: https://en.wikipedia.org/wiki/AppleWorks
-# Reference: http://www.gno.org/pub/apple2/doc/apple/filetypes/ftn.1a.xxxx
-# Update: Joerg Jenderek
-# NOTE:
-# The "O" is really the magic number, but that's so common that it's
-# necessary to check the tab stops that follow it to avoid false positives.
-# and/or look for unused bits of booleans bytes like zoom, paginated, mail merge
-# the newer AppleWorks is from claris with extension CWK
-4 string O
-# test for unused bits of zoom- , paginated-boolean bytes
->84 ubequad ^0x00Fe00000000Fe00
-# look for tabstop definitions "=" no tab, "|" no tab
-# "<" left tab,"^" center tab,">" right tab, "." decimal tab,
-# unofficial "!" other , "\x8a" other
-# official only if SFMinVers is nonzero
->>5 regex/s [=.<>|!^\x8a]{79} AppleWorks Word Processor
-# AppleWorks Word Processor File (Apple II)
-# ./apple (version 5.25) labeled the entry as "AppleWorks word processor data"
-# application/x-appleworks is mime type for claris version with cwk extension
-!:mime application/x-appleworks3
-# http://home.earthlink.net/~hughhood/appleiiworksenvoy/
-# ('p' + 1-byte ProDOS File Type + 2-byte ProDOS Aux Type')
-# $70 $1A $F8 $FF is this the apple type ?
-#:apple pdosp^Z\xf8\xff
-!:ext awp
-# minimum version needed to read this files. SFMinVers (0 , 30~3.0 )
->>>183 ubyte 30 3.0
->>>183 ubyte !30
->>>>183 ubyte !0 %#x
-# usual tabstop start sequence "=====<"
->>>5 string x \b, tabstop ruler "%6.6s"
-# tabstop ruler
-#>>>5 string >\0 \b, tabstops "%-79s"
-# zoom switch
->>>85 byte&0x01 >0 \b, zoomed
-# whether paginated
->>>90 byte&0x01 >0 \b, paginated
-# contains any mail-merge commands
->>>92 byte&0x01 >0 \b, with mail merge
-# left margin in 1/10 inches ( normally 0 or 10 )
->>>91 ubyte >0
->>>>91 ubyte x \b, %d/10 inch left margin
-
-# AppleWorks database:
-#
-# This isn't really a magic number, but it's the closest thing to one
-# that I could find. The 1 and 2 really mean "order in which you defined
-# categories" and "left to right, top to bottom," respectively; the D and R
-# mean that the cursor should move either down or right when you press Return.
-
-#30 string \x01D AppleWorks database data
-#30 string \x02D AppleWorks database data
-#30 string \x01R AppleWorks database data
-#30 string \x02R AppleWorks database data
-
-# AppleWorks spreadsheet:
-#
-# Likewise, this isn't really meant as a magic number. The R or C means
-# row- or column-order recalculation; the A or M means automatic or manual
-# recalculation.
-
-#131 string RA AppleWorks spreadsheet data
-#131 string RM AppleWorks spreadsheet data
-#131 string CA AppleWorks spreadsheet data
-#131 string CM AppleWorks spreadsheet data
-
-# Applesoft BASIC:
-#
-# This is incredibly sloppy, but will be true if the program was
-# written at its usual memory location of 2048 and its first line
-# number is less than 256. Yuck.
-# update by Joerg Jenderek at Feb 2013
-
-# GRR: this test is still too general as it catches also Gujin BOOT144.SYS (0xfa080000)
-#0 belong&0xff00ff 0x80000 Applesoft BASIC program data
-0 belong&0x00ff00ff 0x00080000
-# assuming that line number must be positive
->2 leshort >0 Applesoft BASIC program data, first line number %d
-#>2 leshort x \b, first line number %d
-
-# ORCA/EZ assembler:
-#
-# This will not identify ORCA/M source files, since those have
-# some sort of date code instead of the two zero bytes at 6 and 7
-# XXX Conflicts with ELF
-#4 belong&0xff00ffff 0x01000000 ORCA/EZ assembler source data
-#>5 byte x \b, build number %d
-
-# Broderbund Fantavision
-#
-# I don't know what these values really mean, but they seem to recur.
-# Will they cause too many conflicts?
-
-# Probably :-)
-#2 belong&0xFF00FF 0x040008 Fantavision movie data
-
-# Some attempts at images.
-#
-# These are actually just bit-for-bit dumps of the frame buffer, so
-# there's really no reasonably way to distinguish them except for their
-# address (if preserved) -- 8192 or 16384 -- and their length -- 8192
-# or, occasionally, 8184.
-#
-# Nevertheless this will manage to catch a lot of images that happen
-# to have a solid-colored line at the bottom of the screen.
-
-# GRR: Magic too weak
-#8144 string \x7F\x7F\x7F\x7F\x7F\x7F\x7F\x7F Apple II image with white background
-#8144 string \x55\x2A\x55\x2A\x55\x2A\x55\x2A Apple II image with purple background
-#8144 string \x2A\x55\x2A\x55\x2A\x55\x2A\x55 Apple II image with green background
-#8144 string \xD5\xAA\xD5\xAA\xD5\xAA\xD5\xAA Apple II image with blue background
-#8144 string \xAA\xD5\xAA\xD5\xAA\xD5\xAA\xD5 Apple II image with orange background
-
-# Beagle Bros. Apple Mechanic fonts
-
-0 belong&0xFF00FFFF 0x6400D000 Apple Mechanic font
-
-# Apple Universal Disk Image Format (UDIF) - dmg files.
-# From Johan Gade.
-# These entries are disabled for now until we fix the following issues.
-#
-# Note there might be some problems with the "VAX COFF executable"
-# entry. Note this entry should be placed before the mac filesystem section,
-# particularly the "Apple Partition data" entry.
-#
-# The intended meaning of these tests is, that the file is only of the
-# specified type if both of the lines are correct - i.e. if the first
-# line matches and the second doesn't then it is not of that type.
-#
-#0 long 0x7801730d
-#>4 long 0x62626060 UDIF read-only zlib-compressed image (UDZO)
-#
-# Note that this entry is recognized correctly by the "Apple Partition
-# data" entry - however since this entry is more specific - this
-# information seems to be more useful.
-#0 long 0x45520200
-#>0x410 string disk\ image UDIF read/write image (UDRW)
-
-# From: Toby Peterson <toby@apple.com>
-# From https://www.nationalarchives.gov.uk/pronom/fmt/866
-0 string bplist00
->8 search/500 WebMainResource Apple Safari Webarchive
-!:mime application/x-webarchive
-!:strength +50
-0 string bplist00 Apple binary property list
-!:mime application/x-bplist
-
-# Apple binary property list (bplist)
-# Assumes version bytes are hex.
-# Provides content hints for version 0 files. Assumes that the root
-# object is the first object (true for CoreFoundation implementation).
-# From: David Remahl <dremahl@apple.com>
-0 string bplist
->6 byte x \bCoreFoundation binary property list data, version %#c
->>7 byte x \b%c
->6 string 00 \b
->>8 byte&0xF0 0x00 \b
->>>8 byte&0x0F 0x00 \b, root type: null
->>>8 byte&0x0F 0x08 \b, root type: false boolean
->>>8 byte&0x0F 0x09 \b, root type: true boolean
->>8 byte&0xF0 0x10 \b, root type: integer
->>8 byte&0xF0 0x20 \b, root type: real
->>8 byte&0xF0 0x30 \b, root type: date
->>8 byte&0xF0 0x40 \b, root type: data
->>8 byte&0xF0 0x50 \b, root type: ascii string
->>8 byte&0xF0 0x60 \b, root type: unicode string
->>8 byte&0xF0 0x80 \b, root type: uid (CORRUPT)
->>8 byte&0xF0 0xa0 \b, root type: array
->>8 byte&0xF0 0xd0 \b, root type: dictionary
-
-# Apple/NeXT typedstream data
-# Serialization format used by NeXT and Apple for various
-# purposes in YellowStep/Cocoa, including some nib files.
-# From: David Remahl <dremahl@apple.com>
-2 string typedstream NeXT/Apple typedstream data, big endian
->0 byte x \b, version %d
->0 byte <5 \b
->>13 byte 0x81 \b
->>>14 ubeshort x \b, system %d
-2 string streamtyped NeXT/Apple typedstream data, little endian
->0 byte x \b, version %d
->0 byte <5 \b
->>13 byte 0x81 \b
->>>14 uleshort x \b, system %d
-
-#------------------------------------------------------------------------------
-# CAF: Apple CoreAudio File Format
-#
-# Container format for high-end audio purposes.
-# From: David Remahl <dremahl@apple.com>
-#
-0 string caff CoreAudio Format audio file
->4 beshort <10 version %d
->6 beshort x
-
-
-#------------------------------------------------------------------------------
-# Keychain database files
-0 string kych Mac OS X Keychain File
-
-#------------------------------------------------------------------------------
-# Code Signing related file types
-0 belong 0xfade0c00 Mac OS X Code Requirement
->8 belong 1 (opExpr)
->4 belong x - %d bytes
-
-0 belong 0xfade0c01 Mac OS X Code Requirement Set
->8 belong >1 containing %d items
->4 belong x - %d bytes
-
-0 belong 0xfade0c02 Mac OS X Code Directory
->8 belong x version %x
->12 belong >0 flags %#x
->4 belong x - %d bytes
-
-0 belong 0xfade0cc0 Mac OS X Detached Code Signature (non-executable)
->4 belong x - %d bytes
-
-0 belong 0xfade0cc1 Mac OS X Detached Code Signature
->8 belong >1 (%d elements)
->4 belong x - %d bytes
-
-# From: "Nelson A. de Oliveira" <naoliv@gmail.com>
-# .vdi
-4 string innotek\ VirtualBox\ Disk\ Image %s
-
-# Apple disk partition stuff
-# URL: https://en.wikipedia.org/wiki/Apple_Partition_Map
-# Reference: https://ftp.netbsd.org/pub/NetBSD/NetBSD-current/src/sys/sys/bootblock.h
-# Update: Joerg Jenderek
-# "ER" is APPLE_DRVR_MAP_MAGIC signature
-0 beshort 0x4552
-# display Apple Driver Map (strength=50) after Syslinux bootloader (71)
-#!:strength +0
-# strengthen the magic by looking for used blocksizes 512 2048
->2 ubeshort&0xf1FF 0 Apple Driver Map
-# last 6 bytes for padding found are 0 or end with 55AAh marker for MBR hybrid
-#>>504 ubequad&0x0000FFffFFff0000 0
-!:mime application/x-apple-diskimage
-!:apple ????devr
-# https://en.wikipedia.org/wiki/Apple_Disk_Image
-!:ext dmg/iso
-# sbBlkSize for driver descriptor map 512 2048
->>2 beshort x \b, blocksize %d
-# sbBlkCount sometimes garbish like
-# 0xb0200000 for unzlibed install_flash_player_19.0.0.245_osx.dmg
-# 0xf2720100 for bunziped Firefox 48.0-2.dmg
-# 0xeb02ffff for super_grub2_disk_hybrid_2.02s3.iso
-# 0x00009090 by syslinux-6.03/utils/isohybrid.c
->>4 ubelong x \b, blockcount %u
-# following device/driver information not very useful
-# device type 0 1 (37008 garbage for super_grub2_disk_hybrid_2.02s3.iso)
->>8 ubeshort x \b, devtype %u
-# device id 0 1 (37008 garbage for super_grub2_disk_hybrid_2.02s3.iso)
->>10 ubeshort x \b, devid %u
-# driver data 0 (2425393296 garbage for super_grub2_disk_hybrid_2.02s3.iso)
->>12 ubelong >0
->>>12 ubelong x \b, driver data %u
-# number of driver descriptors sbDrvrCount <= 61
-# (37008 garbage for super_grub2_disk_hybrid_2.02s3.iso)
->>16 ubeshort x \b, driver count %u
-# 61 * apple_drvr_descriptor[8]. information not very useful or same as in partition map
-# >>18 use apple-driver-map
-# >>26 use apple-driver-map
-# # ...
-# >>500 use apple-driver-map
-# number of partitions is always same in every partition (map block count)
-#>>0x0204 ubelong x \b, %u partitions
->>0x0204 ubelong >0 \b, contains[@0x200]:
->>>0x0200 use apple-apm
->>0x0204 ubelong >1 \b, contains[@0x400]:
->>>0x0400 use apple-apm
->>0x0204 ubelong >2 \b, contains[@0x600]:
->>>0x0600 use apple-apm
->>0x0204 ubelong >3 \b, contains[@0x800]:
->>>0x0800 use apple-apm
->>0x0204 ubelong >4 \b, contains[@0xA00]:
->>>0x0A00 use apple-apm
->>0x0204 ubelong >5 \b, contains[@0xC00]:
->>>0x0C00 use apple-apm
->>0x0204 ubelong >6 \b, contains[@0xE00]:
->>>0x0E00 use apple-apm
->>0x0204 ubelong >7 \b, contains[@0x1000]:
->>>0x1000 use apple-apm
-# display apple driver descriptor map (start-block, # blocks in sbBlkSize sizes, type)
-0 name apple-driver-map
->0 ubequad !0
-# descBlock first block of driver
->>0 ubelong x \b, driver start block %u
-# descSize driver size in blocks
->>4 ubeshort x \b, size %u
-# descType driver system type 1 701h F8FFh FFFFh
->>6 ubeshort x \b, type %#x
-
-# URL: https://en.wikipedia.org/wiki/Apple_Partition_Map
-# Reference: https://opensource.apple.com/source/IOStorageFamily/IOStorageFamily-116/IOApplePartitionScheme.h
-# Update: Joerg Jenderek
-# Yes, the 3rd and 4th bytes pmSigPad are reserved, but we use them to make the
-# magic stronger.
-# for apple partition map stored as a single file
-0 belong 0x504d0000
-# to display Apple Partition Map (strength=70) after Syslinux bootloader (71)
-#!:strength +0
->0 use apple-apm
-# magic/Magdir/apple14.test, 365: Warning: Current entry does not yet have a description for adding a EXTENSION type
-# file: could not find any valid magic files!
-#!:ext bin
-# display apple partition map. Normally called after Apple driver map
-0 name apple-apm
->0 belong 0x504d0000 Apple Partition Map
-# number of partitions
->>4 ubelong x \b, map block count %u
-# logical block (512 bytes) start of partition
->>8 ubelong x \b, start block %u
->>12 ubelong x \b, block count %u
->>16 string >0 \b, name %s
->>48 string >0 \b, type %s
-# processor type dpme_process_id[16] e.g. "68000" "68020"
->>120 string >0 \b, processor %s
-# A/UX boot arguments BootArgs[128]
->>136 string >0 \b, boot arguments %s
-# status of partition dpme_flags
->>88 belong & 1 \b, valid
->>88 belong & 2 \b, allocated
->>88 belong & 4 \b, in use
->>88 belong & 8 \b, has boot info
->>88 belong & 16 \b, readable
->>88 belong & 32 \b, writable
->>88 belong & 64 \b, pic boot code
->>88 belong & 128 \b, chain compatible driver
->>88 belong & 256 \b, real driver
->>88 belong & 512 \b, chain driver
-# mount automatically at startup APPLE_PS_AUTO_MOUNT
->>88 ubelong &0x40000000 \b, mount at startup
-# is the startup partition APPLE_PS_STARTUP
->>88 ubelong &0x80000000 \b, is the startup partition
-
-#https://wiki.mozilla.org/DS_Store_File_Format
-#https://en.wikipedia.org/wiki/.DS_Store
-0 string \0\0\0\1Bud1\0 Apple Desktop Services Store
-
-# HFS/HFS+ Resource fork files (andrew.roazen@nau.edu Apr 13 2015)
-# Usually not in separate files, but have either filename rsrc with
-# no extension, or a filename corresponding to another file, with
-# extensions rsr/rsrc
-# URL: http://fileformats.archiveteam.org/wiki/Macintosh_resource_file
-# https://en.wikipedia.org/wiki/Resource_fork
-# Reference: https://github.com/kreativekorp/ksfl/wiki/Macintosh-Resource-File-Format
-# http://developer.apple.com/legacy/mac/library/documentation/mac/pdf/MoreMacintoshToolbox.pdf
-# https://formats.kaitai.io/resource_fork/
-# Update: Joerg Jenderek
-# Note: verified often by command like `deark -m macrsrc Icon_.rsrc`
-# offset of resource data; usually starts at offset 0x0100
-0 string \000\000\001\000
-# skip NPETraceSession.etl with invalid "low" map offset 0
->4 ubelong >0xFF
-# skip few Atari DEGAS Elite bitmap (eil2.pi1 nastro.pi1) with ivalid "high" 0x6550766 0x7510763 map length
->>12 ubelong <0x8001
-# most examples with zeroed system reserved field
->>>16 lelong =0
->>>>0 use apple-rsr
-# few samples with not zeroed system reserved field like: Empty.rsrc.rsr OpenSans-CondBold.dfont
->>>16 lelong !0
-# resource fork variant with not zeroed system reserved field and copy of header
->>>>(4.L) ubelong 0x100
-# GRR: the line above only works if in ../../src/file.h FILE_BYTES_MAX is raised from 1 MiB above 0x6ab0f4 (HelveticaNeue.dfont)
->>>>>0 use apple-rsr
-# data fork variant with not zeroed system reserved field and no copy of header
->>>>(4.L) ubelong 0
->>>>>0 use apple-rsr
-# Note: moved and merged from ./macintosh
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# URL: https://en.wikipedia.org/wiki/Datafork_TrueType
-# Derived from the 'fondu' and 'ufond' source code (fondu.sf.net). 'sfnt' is
-# TrueType; 'POST' is PostScript. 'FONT' and 'NFNT' sometimes appear, but I
-# don't know what they mean.
-# display information about Mac OSX datafork font DFONT
-0 name apple-dfont
->(4.L+30) ubelong x Mac OSX datafork font,
-# https://en.wikipedia.org/wiki/Datafork_TrueType
-!:mime application/x-dfont
-!:ext dfont
-# https://exiftool.org/TagNames/RSRC.html
->(4.L+30) ubelong 0x73666e74 TrueType
->(4.L+30) ubelong 0x464f4e54 'FONT'
->(4.L+30) ubelong 0x4e464e54 'NFNT'
->(4.L+30) ubelong 0x504f5354 PostScript
->(4.L+30) ubelong 0x464f4e44 'FOND'
->(4.L+30) ubelong 0x76657273 'vers'
-# display information about Macintosh resource
-0 name apple-rsr
->(4.L+30) ubelong 0x73666e74
->>0 use apple-dfont
->(4.L+30) ubelong 0x464f4e54
->>0 use apple-dfont
->(4.L+30) ubelong 0x4e464e54
->>0 use apple-dfont
->(4.L+30) ubelong 0x504f5354
->>0 use apple-dfont
->(4.L+30) ubelong 0x464f4e44
->>0 use apple-dfont
->(4.L+30) ubelong 0x76657273
->>0 use apple-dfont
->(4.L+30) default x Apple HFS/HFS+ resource fork
-#!:mime application/octet-stream
-!:mime application/x-apple-rsr
-!:ext rsrc/rsr
-# offset to resource data; usually starts at offset 0x0100
->0 ubelong !0x100 \b, data offset %#x
-# offset to resource map; positive but not nil like in NPETraceSession.etl
->4 ubelong x \b, map offset %#x
-# length of resource map; positive with 32K limitation but not
-# nil like in NPETraceSession.etl or high like 0x7510763 in nastro.pi1
->12 ubelong x \b, map length %#x
-# length of resource data; positive but not nil like in NPETraceSession.etl
->8 ubelong x \b, data length %#x
-# reserved 112 bytes for system use; apparently often nil, but 8fd20000h in Empty.rsrc.rsr and 0x00768c2b in OpenSans-CondBold.dfont
->16 ubelong !0 \b, at 16 %#8.8x
-# https://fontforge.org/docs/techref/macformats.html
-# jump to resource map
-# a copy of resource header or 16 bytes of zeros for data fork
-#>(4.L) ubelong x \b, DATA offset %#x
-#>(4.L+4) ubelong x \b, MAP offset %#x
-#>(4.L+8) ubelong x \b, DATA length %#x
-#>(4.L+12) ubelong x \b, MAP length %#x
-# nextResourceMap; handle to next resource map; used by the Resource Manager for internal bookkeeping; should be zero
->(4.L+16) ubelong !0 \b, nextResourceMap %#x
-# fileRef; file reference number; used by the Resource Manager for internal bookkeeping; should be zero
->(4.L+20) ubeshort !0 \b, fileRef %#x
-# attributes; Resource fork attributes (80h~read-only 40h~compression needed 20h~changed); other bits are reserved and should be zero
->(4.L+22) ubeshort !0 \b, attributes %#x
-# typeListOffset; offset from resource map to start of type list like: 1Ch
->(4.L+24) ubeshort x \b, list offset %#x
-# nameListOffset; offset from esource map to start of name list like: 32h 46h 56h (XLISP.RSR XLISPTIN.RSR) 13Eh (HelveticaNeue.dfont)
->(4.L+26) ubeshort x \b, name offset %#x
-# typeCount; number of types in the map minus 1; If there are no resources, this is 0xFFFF
->(4.L+28) beshort+1 >0 \b, %u type
-# plural s
->>(4.L+28) beshort+1 >1 \bs
-# resource type list array; 1st resource type like: ALRT CODE FOND MPSR icns scsz
->>(4.L+30) ubelong x \b, %#x
->>(4.L+30) string x '%-.4s'
-# resourceCount; number of this type resources minus one. If there is one resource of this type, this is 0x0000
->>(4.L+34) beshort+1 x * %d
-# resourceListOffset; offset from type list to resource list like: Ah 12h DAh
->(4.L+36) ubeshort x resource offset %#x
-
-#https://en.wikipedia.org/wiki/AppleScript
-0 string FasdUAS AppleScript compiled
-
-# AppleWorks/ClarisWorks
-# https://github.com/joshenders/appleworks_format
-# http://fileformats.archiveteam.org/wiki/AppleWorks
-0 name appleworks
->0 belong&0x00ffffff 0x07e100 AppleWorks CWK Document
->0 belong&0x00ffffff 0x008803 ClarisWorks CWK Document
->0 default x
->>0 belong x AppleWorks/ClarisWorks CWK Document
->0 byte x \b, version %d
->30 beshort x \b, %d
->32 beshort x \bx%d
-!:ext cwk
-
-4 string BOBO
->0 byte >4
->>12 belong 0
->>>26 belong 0
->>>>0 use appleworks
->0 belong 0x0481ad00
->>0 use appleworks
-
-# magic for Apple File System (APFS)
-# from Alex Myczko <alex@aiei.ch>
-32 string NXSB Apple File System (APFS)
->36 ulelong x \b, blocksize %u
-
-# iTunes cover art (versions 1 and 2)
-4 string itch
->24 string artw
->>0x1e8 string data iTunes cover art
->>>0x1ed string PNG (PNG)
->>>0x1ec beshort 0xffd8 (JPEG)
-
-# MacPaint image
-65 string PNTGMPNT MacPaint image data
-#0 belong 2 MacPaint image data
diff --git a/contrib/libs/libmagic/magic/Magdir/application b/contrib/libs/libmagic/magic/Magdir/application
deleted file mode 100644
index f316608081..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/application
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: application,v 1.1 2016/10/17 12:13:01 christos Exp $
-# application: file(1) magic for applications on small devices
-#
-# Pebble Application
-0 string PBLAPP\000\000 Pebble application
diff --git a/contrib/libs/libmagic/magic/Magdir/applix b/contrib/libs/libmagic/magic/Magdir/applix
deleted file mode 100644
index f3f362eec7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/applix
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: applix,v 1.5 2009/09/19 16:28:08 christos Exp $
-# applix: file(1) magic for Applixware
-# From: Peter Soos <sp@osb.hu>
-#
-0 string *BEGIN Applixware
->7 string WORDS Words Document
->7 string GRAPHICS Graphic
->7 string RASTER Bitmap
->7 string SPREADSHEETS Spreadsheet
->7 string MACRO Macro
->7 string BUILDER Builder Object
diff --git a/contrib/libs/libmagic/magic/Magdir/apt b/contrib/libs/libmagic/magic/Magdir/apt
deleted file mode 100644
index 2d9f15901b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/apt
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: apt,v 1.1 2016/10/17 19:51:57 christos Exp $
-# apt: file(1) magic for APT Cache files
-# <http://www.fifi.org/doc/libapt-pkg-doc/cache.html/ch2.html>
-# <https://anonscm.debian.org/cgit/apt/apt.git/tree/apt-pkg/pkgcache.h#n292>
-
-# before version 10 ("old format"), data was in arch-specific long/short
-
-# old format 64 bit
-0 name apt-cache-64bit-be
->12 beshort 1 \b, dirty
->40 bequad x \b, %llu packages
->48 bequad x \b, %llu versions
-
-# old format 32 bit
-0 name apt-cache-32bit-be
->8 beshort 1 \b, dirty
->40 belong x \b, %u packages
->44 belong x \b, %u versions
-
-# new format
-0 name apt-cache-be
->6 byte 1 \b, dirty
->24 belong x \b, %u packages
->28 belong x \b, %u versions
-
-0 bequad 0x98FE76DC
->8 ubeshort <10 APT cache data, version %u
->>10 beshort x \b.%u, 64 bit big-endian
->>0 use apt-cache-64bit-be
-
-0 lequad 0x98FE76DC
->8 uleshort <10 APT cache data, version %u
->>10 leshort x \b.%u, 64 bit little-endian
->>0 use \^apt-cache-64bit-be
-
-0 belong 0x98FE76DC
->4 ubeshort <10 APT cache data, version %u
->>6 ubeshort x \b.%u, 32 bit big-endian
->>0 use apt-cache-32bit-be
->4 ubyte >9 APT cache data, version %u
->>5 ubyte x \b.%u, big-endian
->>0 use apt-cache-be
-
-0 lelong 0x98FE76DC
->4 uleshort <10 APT cache data, version %u
->>6 uleshort x \b.%u, 32 bit little-endian
->>0 use \^apt-cache-32bit-be
->4 ubyte >9 APT cache data, version %u
->>5 ubyte x \b.%u, little-endian
->>0 use \^apt-cache-be
diff --git a/contrib/libs/libmagic/magic/Magdir/archive b/contrib/libs/libmagic/magic/Magdir/archive
deleted file mode 100644
index 6e1f9678e7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/archive
+++ /dev/null
@@ -1,2607 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: archive,v 1.193 2023/07/27 17:55:58 christos Exp $
-# archive: file(1) magic for archive formats (see also "msdos" for self-
-# extracting compressed archives)
-#
-# cpio, ar, arc, arj, hpack, lha/lharc, rar, squish, uc2, zip, zoo, etc.
-# pre-POSIX "tar" archives are also handled in the C code ../../src/is_tar.c.
-
-# POSIX tar archives
-# URL: https://en.wikipedia.org/wiki/Tar_(computing)
-# Reference: https://www.freebsd.org/cgi/man.cgi?query=tar&sektion=5&manpath=FreeBSD+8-current
-# header mainly padded with nul bytes
-500 quad 0
-!:strength /2
-# filename or extended attribute printable strings in range space null til umlaut ue
->0 ubeshort >0x1F00
->>0 ubeshort <0xFCFD
-# last 4 header bytes often null but tar\0 in gtarfail2.tar gtarfail.tar-bad
-# at https://sourceforge.net/projects/s-tar/files/testscripts/
->>>508 ubelong&0x8B9E8DFF 0
-# nul, space or ascii digit 0-7 at start of mode
->>>>100 ubyte&0xC8 =0
->>>>>101 ubyte&0xC8 =0
-# nul, space at end of check sum
->>>>>>155 ubyte&0xDF =0
-# space or ascii digit 0 at start of check sum
->>>>>>>148 ubyte&0xEF =0x20
-# FOR DEBUGGING:
-#>>>>>>>>0 regex \^[0-9]{2,4}[.](png|jpg|jpeg|tif|tiff|gif|bmp) NAME "%s"
-# check for 1st image main name with digits used for sorting
-# and for name extension case insensitive like: PNG JPG JPEG TIF TIFF GIF BMP
->>>>>>>>0 regex \^[0-9]{2,4}[.](png|jpg|jpeg|tif|tiff|gif|bmp)
->>>>>>>>>0 use tar-cbt
-# check for 1st member name with ovf suffix
->>>>>>>>0 regex \^.{1,96}[.](ovf)
->>>>>>>>>0 use tar-ova
-# if 1st member name without digits and without used image suffix and without *.ovf then it is a TAR archive
->>>>>>>>0 default x
->>>>>>>>>0 use tar-file
-# minimal check and then display tar archive information which can also be
-# embedded inside others like Android Backup, Clam AntiVirus database
-0 name tar-file
->257 string !ustar
-# header padded with nuls
->>257 ulong =0
-# GNU tar version 1.29 with non pax format option without refusing
-# creates misleading V7 header for Long path, Multi-volume, Volume type
->>>156 ubyte 0x4c GNU tar archive
-!:mime application/x-gtar
-!:ext tar/gtar
->>>156 ubyte 0x4d GNU tar archive
-!:mime application/x-gtar
-!:ext tar/gtar
->>>156 ubyte 0x56 GNU tar archive
-!:mime application/x-gtar
-!:ext tar/gtar
->>>156 default x tar archive (V7)
-!:mime application/x-tar
-!:ext tar
-# other stuff in padding
-# some implementations add new fields to the blank area at the end of the header record
-# created for example by DOS TAR 3.20g 1994 Tim V.Shapore with -j option
->>257 ulong !0 tar archive (old)
-!:mime application/x-tar
-!:ext tar
-# magic in newer, GNU, posix variants
->257 string =ustar
-# 2 last char of magic and UStar version because string expression does not work
-# 2 space characters followed by a null for GNU variant
->>261 ubelong =0x72202000 POSIX tar archive (GNU)
-!:mime application/x-gtar
-!:ext tar/gtar
-# UStar version with ASCII "00"
->>261 ubelong 0x72003030 POSIX
-# gLOBAL and ExTENSION type only found in POSIX.1-2001 format
->>>156 ubyte 0x67 \b.1-2001
->>>156 ubyte 0x78 \b.1-2001
->>>156 ubyte x tar archive
-!:mime application/x-ustar
-!:ext tar/ustar
-# version with 2 binary nuls embedded in Android Backup like com.android.settings.ab
->>261 ubelong 0x72000000 tar archive (ustar)
-!:mime application/x-ustar
-!:ext tar/ustar
-# not seen ustar variant with garbish version
->>261 default x tar archive (unknown ustar)
-!:mime application/x-ustar
-!:ext tar/ustar
-# type flag of 1st tar archive member
-#>156 ubyte x \b, %c-type
->156 ubyte x
->>156 ubyte 0 \b, file
->>156 ubyte 0x30 \b, file
->>156 ubyte 0x31 \b, hard link
->>156 ubyte 0x32 \b, symlink
->>156 ubyte 0x33 \b, char device
->>156 ubyte 0x34 \b, block device
->>156 ubyte 0x35 \b, directory
->>156 ubyte 0x36 \b, fifo
->>156 ubyte 0x37 \b, reserved
->>156 ubyte 0x4c \b, long path
->>156 ubyte 0x4d \b, multi volume
->>156 ubyte 0x56 \b, volume
->>156 ubyte 0x67 \b, global
->>156 ubyte 0x78 \b, extension
->>156 default x \b, type
->>>156 ubyte x '%c'
-# name[100]
->0 string >\0 %-.60s
-# mode mainly stored as an octal number in ASCII null or space terminated
->100 string >\0 \b, mode %-.7s
-# user id mainly as octal numbers in ASCII null or space terminated
->108 string >\0 \b, uid %-.7s
-# group id mainly as octal numbers in ASCII null or space terminated
->116 string >\0 \b, gid %-.7s
-# size mainly as octal number in ASCII
->124 ubyte <0x38
->>124 string >\0 \b, size %-.12s
-# coding indicated by setting the high-order bit of the leftmost byte
->124 ubyte >0xEF \b, size 0x
->>124 ubyte !0xff \b%2.2x
->>125 ubyte !0xff \b%2.2x
->>126 ubyte !0xff \b%2.2x
->>127 ubyte !0xff \b%2.2x
->>128 ubyte !0xff \b%2.2x
->>129 ubyte !0xff \b%2.2x
->>130 ubyte !0xff \b%2.2x
->>131 ubyte !0xff \b%2.2x
->>132 ubyte !0xff \b%2.2x
->>133 ubyte !0xff \b%2.2x
->>134 ubyte !0xff \b%2.2x
->>135 ubyte !0xff \b%2.2x
-# seconds since 0:0:0 1 jan 1970 UTC as octal number mainly in ASCII null or space terminated
->136 string >\0 \b, seconds %-.11s
-# header checksum stored as an octal number in ASCII null or space terminated
-#>148 string x \b, cksum %.7s
-# linkname[100]
->157 string >\0 \b, linkname %-.40s
-# additional fields for ustar
->257 string =ustar
-# owner user name null terminated
->>265 string >\0 \b, user %-.32s
-# group name null terminated
->>297 string >\0 \b, group %-.32s
-# device major minor if not zero
->>329 ubequad&0xCFCFCFCFcFcFcFdf !0
->>>329 string x \b, devmaj %-.7s
->>337 ubequad&0xCFCFCFCFcFcFcFdf !0
->>>337 string x \b, devmin %-.7s
-# prefix[155]
->>345 string >\0 \b, prefix %-.155s
-# old non ustar/POSIX tar
->257 string !ustar
->>508 string =tar\0
-# padding[255] in old star
->>>257 string >\0 \b, padding: %-.40s
->>508 default x
-# padding[255] in old tar sometimes comment field
->>>257 string >\0 \b, comment: %-.40s
-# Summary: Comic Book Archive *.CBT with TAR format
-# URL: https://en.wikipedia.org/wiki/Comic_book_archive
-# http://fileformats.archiveteam.org/wiki/Comic_Book_Archive
-# Note: there exist also RAR, ZIP, ACE and 7Z packed variants
-0 name tar-cbt
->0 string x Comic Book archive, tar archive
-#!:mime application/x-tar
-!:mime application/vnd.comicbook
-#!:mime application/vnd.comicbook+tar
-!:ext cbt
-# name[100] probably like: 19.jpg 0001.png 0002.png
-# or maybe like ComicInfo.xml
->0 string >\0 \b, 1st image %-.60s
-# Summary: Open Virtualization Format *.OVF with disk images and more packed as TAR archive *.OVA
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Open_Virtualization_Format
-# http://fileformats.archiveteam.org/wiki/OVF_(Open_Virtualization_Format)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/o/ova.trid.xml
-# Note: called "Open Virtualization Format package" by TrID
-# assuming *.ovf comes first
-0 name tar-ova
->0 string x Open Virtualization Format Archive
-#!:mime application/x-ustar
-# http://extension.nirsoft.net/ova
-!:mime application/x-virtualbox-ova
-!:ext ova
-# assuming name[100] like: DOS-0.9.ovf FreeDOS_1.ovf Win98SE_DE.ovf
->0 string >\0 \b, with %-.60s
-
-# Incremental snapshot gnu-tar format from:
-# https://www.gnu.org/software/tar/manual/html_node/Snapshot-Files.html
-0 string GNU\ tar- GNU tar incremental snapshot data
->&0 regex [0-9]\\.[0-9]+-[0-9]+ version %s
-
-# cpio archives
-#
-# Yes, the top two "cpio archive" formats *are* supposed to just be "short".
-# The idea is to indicate archives produced on machines with the same
-# byte order as the machine running "file" with "cpio archive", and
-# to indicate archives produced on machines with the opposite byte order
-# from the machine running "file" with "byte-swapped cpio archive".
-#
-# The SVR4 "cpio(4)" hints that there are additional formats, but they
-# are defined as "short"s; I think all the new formats are
-# character-header formats and thus are strings, not numbers.
-# URL: http://fileformats.archiveteam.org/wiki/Cpio
-# https://en.wikipedia.org/wiki/Cpio
-# Reference: https://people.freebsd.org/~kientzle/libarchive/man/cpio.5.txt
-# Update: Joerg Jenderek
-#
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-cpio-bin.trid.xml
-# Note: called "CPIO archive (binary)" by TrID, "cpio/Binary LE" by 7-Zip and "CPIO" by DROID via PUID fmt/635
-0 short 070707
-# skip DROID fmt-635-signature-id-960.cpio by looking for pathname of 1st entry
->26 string >\0 cpio archive
-!:mime application/x-cpio
-# https://download.opensuse.org/distribution/leap/15.4/iso/openSUSE-Leap-15.4-NET-x86_64-Media.iso
-# boot/x86_64/loader/bootlogo
-# message.cpi
-!:ext /cpio/cpi
->>0 use cpio-bin
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-cpio-bin-sw.trid.xml
-# Note: called "CPIO archive (byte swapped binary)" by TrID and "Cpio/Binary BE" by 7-Zip
-0 short 0143561 byte-swapped cpio archive
-!:mime application/x-cpio # encoding: swapped
-# https://telparia.com/fileFormatSamples/archive/cpio/skeleton2.cpio
-!:ext cpio
->0 use cpio-bin-be
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-cpio.trid.xml
-# Note: called "CPIO archive (portable)" by TrID, "cpio/Portable ASCII" by 7-Zip and "cpio/odc" by GNU cpio
-0 string 070707 ASCII cpio archive (pre-SVR4 or odc)
-!:mime application/x-cpio
-# https://telparia.com/fileFormatSamples/archive/cpio/ pthreads-1.60B5.osr5src.cpio cinema.cpi VOL.000.008 VOL.000.012
-!:ext cpio/cpi/008/012
-# Note: called "CPIO archive (portable)" by TrID, "cpio/New ASCII" by 7-Zip and "cpio/newc" by GNU cpio
-0 string 070701 ASCII cpio archive (SVR4 with no CRC)
-!:mime application/x-cpio
-# https://telparia.com/fileFormatSamples/archive/cpio/MainActor-2.06.3.cpio
-!:ext cpio
-# Note: called "CPIO archive (portable)" by TrID, "cpio/New CRC" by 7-Zip and "cpio/crc" by GNU cpio
-0 string 070702 ASCII cpio archive (SVR4 with CRC)
-!:mime application/x-cpio
-# http://ftp.gnu.org/gnu/tar/tar-1.27.cpio.gz
-# https://telparia.com/fileFormatSamples/archive/cpio/pcmcia
-!:ext /cpio
-# display information of old binary cpio archive
-# Note: verfied by 7-Zip `7z l -tcpio -slt *.cpio` and
-# `cpio -ivt --numeric-uid-gid --file=clam.bin-le.cpio`
-0 name cpio-bin
-# c_dev; device number; WHAT IS THAT?
->2 uleshort x \b; device %u
-# c_ino; truncated inode number; use `ls --inode`
->4 uleshort x \b, inode %u
-# c_mode; mode specifies permissions and file type like: ?622~?rw-r--r-- by `ls -l`
->6 uleshort x \b, mode %o
-# c_uid; numeric user id; use `ls --numeric-uid-gid`
->8 uleshort x \b, uid %u
-# c_gid; numeric group id
->10 uleshort x \b, gid %u
-# c_nlink; links to this file; directories at least 2
->12 uleshort >1 \b, %u links
-# c_rdev; device number for block and character entries; zero for all other entries by writers
-# like 0x0440 for /dev/ttyS0
->14 uleshort >0 \b, device %#4.4x
-# c_mtime[2]; modification time in seconds since 1 January 1970; most-significant 16 bits first
->16 medate x \b, modified %s
-# c_filesize[2]; size of pathname; most-significant 16 bits first like: 544
->22 melong x \b, %u bytes
-# c_namesize; bytes in the pathname that follows the header like: 9
-#>20 uleshort x \b, namesize %u
-# pathname of entry like: "clam.exe"
->26 string x "%s"
-# display information of old binary byte swapped cpio archive
-# Note: verfied by 7-Zip `7z l -tcpio -slt *.cpio` and
-# `LANGUAGE=C cpio -ivt --numeric-uid-gid --file=clam.bin-be.cpio`
-0 name cpio-bin-be
->2 ubeshort x \b; device %u
->4 ubeshort x \b, inode %u
->6 ubeshort x \b, mode %o
->8 ubeshort x \b, uid %u
->10 ubeshort x \b, gid %u
->12 ubeshort >1 \b, %u links
->14 ubeshort >0 \b, device %#4.4x
->16 bedate x \b, modified %s
->22 ubelong x \b, %u bytes
-#>20 ubeshort x \b, namesize %u
->26 string x "%s"
-
-#
-# Various archive formats used by various versions of the "ar"
-# command.
-#
-
-#
-# Original UNIX archive formats.
-# They were written with binary values in host byte order, and
-# the magic number was a host "int", which might have been 16 bits
-# or 32 bits. We don't say "PDP-11" or "VAX", as there might have
-# been ports to little-endian 16-bit-int or 32-bit-int platforms
-# (x86?) using some of those formats; if none existed, feel free
-# to use "PDP-11" for little-endian 16-bit and "VAX" for little-endian
-# 32-bit. There might have been big-endian ports of that sort as
-# well.
-#
-0 leshort 0177555 very old 16-bit-int little-endian archive
-0 beshort 0177555 very old 16-bit-int big-endian archive
-0 lelong 0177555 very old 32-bit-int little-endian archive
-0 belong 0177555 very old 32-bit-int big-endian archive
-
-0 leshort 0177545 old 16-bit-int little-endian archive
->2 string __.SYMDEF random library
-0 beshort 0177545 old 16-bit-int big-endian archive
->2 string __.SYMDEF random library
-0 lelong 0177545 old 32-bit-int little-endian archive
->4 string __.SYMDEF random library
-0 belong 0177545 old 32-bit-int big-endian archive
->4 string __.SYMDEF random library
-
-#
-# From "pdp" (but why a 4-byte quantity?)
-#
-0 lelong 0x39bed PDP-11 old archive
-0 lelong 0x39bee PDP-11 4.0 archive
-
-#
-# XXX - what flavor of APL used this, and was it a variant of
-# some ar archive format? It's similar to, but not the same
-# as, the APL workspace magic numbers in pdp.
-#
-0 long 0100554 apl workspace
-
-#
-# System V Release 1 portable(?) archive format.
-#
-0 string =<ar> System V Release 1 ar archive
-!:mime application/x-archive
-
-#
-# Debian package; it's in the portable archive format, and needs to go
-# before the entry for regular portable archives, as it's recognized as
-# a portable archive whose first member has a name beginning with
-# "debian".
-#
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Deb_(file_format)
-0 string =!<arch>\ndebian
-# https://manpages.debian.org/testing/dpkg/dpkg-split.1.en.html
->14 string -split part of multipart Debian package
-!:mime application/vnd.debian.binary-package
-# udeb is used for stripped down deb file
-!:ext deb/udeb
->14 string -binary Debian binary package
-!:mime application/vnd.debian.binary-package
-# For ipk packager see also https://en.wikipedia.org/wiki/Opkg
-!:ext deb/udeb/ipk
-# This should not happen
->14 default x Unknown Debian package
-# NL terminated version; for most Debian cases this is 2.0 or 2.1 for split
->68 string >\0 (format %s)
-#>68 string !2.0\n
-#>>68 string x (format %.3s)
->68 string =2.0\n
-# 2nd archive name=control archive name like control.tar.gz or control.tar.xz
-# or control.tar.zst
->>72 string >\0 \b, with %.15s
-# look for 3rd archive name=data archive name like data.tar.{gz,xz,bz2,lzma}
->>0 search/0x93e4f data.tar. \b, data compression
-# the above line only works if FILE_BYTES_MAX in ../../src/file.h is raised
-# for example like libreoffice-dev-doc_1%3a5.2.7-1+rpi1+deb9u3_all.deb
->>>&0 string x %.2s
-# skip space (0x20 BSD) and slash (0x2f System V) character marking end of name
->>>&2 ubyte !0x20
->>>>&-1 ubyte !0x2f
-# display 3rd character of file name extension like 2 of bz2 or m of lzma
->>>>>&-1 ubyte x \b%c
->>>>>>&0 ubyte !0x20
->>>>>>>&-1 ubyte !0x2f
-# display 4th character of file name extension like a of lzma
->>>>>>>>&-1 ubyte x \b%c
-# split debian package case
->68 string =2.1\n
-# dpkg-1.18.25/dpkg-split/info.c
-# NL terminated ASCII package name like ckermit
->>&0 string x \b, %s
-# NL terminated package version like 302-5.3
->>>&1 string x %s
-# NL terminated MD5 checksum
->>>>&1 string x \b, MD5 %s
-# NL terminated original package length
->>>>>&1 string x \b, unsplitted size %s
-# NL terminated part length
->>>>>>&1 string x \b, part length %s
-# NL terminated package part like n/m
->>>>>>>&1 string x \b, part %s
-# NL terminated package architecture like armhf since dpkg 1.16.1 or later
->>>>>>>>&1 string x \b, %s
-
-#
-# MIPS archive; they're in the portable archive format, and need to go
-# before the entry for regular portable archives, as it's recognized as
-# a portable archive whose first member has a name beginning with
-# "__________E".
-#
-0 string =!<arch>\n__________E MIPS archive
-!:mime application/x-archive
->20 string U with MIPS Ucode members
->21 string L with MIPSEL members
->21 string B with MIPSEB members
->19 string L and an EL hash table
->19 string B and an EB hash table
->22 string X -- out of date
-
-#
-# BSD/SVR2-and-later portable archive formats.
-#
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/AR
-# Reference: https://www.unix.com/man-page/opensolaris/3HEAD/ar.h/
-# Note: Mach-O universal binary in ./cafebabe is dependent
-# TODO: unify current ar archive, MIPS archive, Debian package
-# distinguish BSD, SVR; 32, 64 bit; HP from other 32-bit SVR;
-# *.ar packages from *.a libraries. handle empty archive
-0 string =!<arch>\n current ar archive
-# print first and possibly second ar_name[16] for debugging purpose
-#>8 string x \b, 1st "%.16s"
-#>68 string x \b, 2nd "%.16s"
-!:mime application/x-archive
-# a in most case for libraries; lib for Microsoft libraries; ar else cases
-!:ext a/lib/ar
->8 string __.SYMDEF random library
-# first member with long marked name __.SYMDEF SORTED implies BSD library
->68 string __.SYMDEF\ SORTED random library
-# Reference: https://parisc.wiki.kernel.org/images-parisc/b/b2/Rad_11_0_32.pdf
-# "archive file" entry moved from ./hp
-# LST header system_id 0210h~PA-RISC 1.1,... identifies the target architecture
-# LST header a_magic 0619h~relocatable library
->68 belong 0x020b0619 - PA-RISC1.0 relocatable library
->68 belong 0x02100619 - PA-RISC1.1 relocatable library
->68 belong 0x02110619 - PA-RISC1.2 relocatable library
->68 belong 0x02140619 - PA-RISC2.0 relocatable library
-#EOF for common ar archives
-
-#
-# "Thin" archive, as can be produced by GNU ar.
-#
-0 string =!<thin>\n thin archive with
->68 belong 0 no symbol entries
->68 belong 1 %d symbol entry
->68 belong >1 %d symbol entries
-
-0 search/1 -h- Software Tools format archive text
-
-# ARC archiver, from Daniel Quinlan (quinlan@yggdrasil.com)
-#
-# The first byte is the magic (0x1a), byte 2 is the compression type for
-# the first file (0x01 through 0x09), and bytes 3 to 15 are the MS-DOS
-# filename of the first file (null terminated). Since some types collide
-# we only test some types on basis of frequency: 0x08 (83%), 0x09 (5%),
-# 0x02 (5%), 0x03 (3%), 0x04 (2%), 0x06 (2%). 0x01 collides with terminfo.
-0 lelong&0x8080ffff 0x0000081a ARC archive data, dynamic LZW
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000091a ARC archive data, squashed
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000021a ARC archive data, uncompressed
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000031a ARC archive data, packed
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000041a ARC archive data, squeezed
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000061a ARC archive data, crunched
-!:mime application/x-arc
-# [JW] stuff taken from idarc, obviously ARC successors:
-0 lelong&0x8080ffff 0x00000a1a PAK archive data
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000141a ARC+ archive data
-!:mime application/x-arc
-0 lelong&0x8080ffff 0x0000481a HYP archive data
-!:mime application/x-arc
-
-# Acorn archive formats (Disaster prone simpleton, m91dps@ecs.ox.ac.uk)
-# I can't create either SPARK or ArcFS archives so I have not tested this stuff
-# [GRR: the original entries collide with ARC, above; replaced with combined
-# version (not tested)]
-#0 byte 0x1a RISC OS archive (spark format)
-0 string \032archive RISC OS archive (ArcFS format)
-0 string Archive\000 RISC OS archive (ArcFS format)
-
-# All these were taken from idarc, many could not be verified. Unfortunately,
-# there were many low-quality sigs, i.e. easy to trigger false positives.
-# Please notify me of any real-world fishy/ambiguous signatures and I'll try
-# to get my hands on the actual archiver and see if I find something better. [JW]
-# probably many can be enhanced by finding some 0-byte or control char near the start
-
-# idarc calls this Crush/Uncompressed... *shrug*
-0 string CRUSH Crush archive data
-# Squeeze It (.sqz)
-0 string HLSQZ Squeeze It archive data
-# SQWEZ
-0 string SQWEZ SQWEZ archive data
-# HPack (.hpk)
-0 string HPAK HPack archive data
-# HAP
-0 string \x91\x33HF HAP archive data
-# MD/MDCD
-0 string MDmd MDCD archive data
-# LIM
-0 string LIM\x1a LIM archive data
-# SAR
-3 string LH5 SAR archive data
-# BSArc/BS2
-0 string \212\3SB\020\0 BSArc/BS2 archive data
-# Bethesda Softworks Archive (Oblivion)
-0 string BSA\0 BSArc archive data
->4 lelong x version %d
-# MAR
-2 string =-ah MAR archive data
-# ACB
-#0 belong&0x00f800ff 0x00800000 ACB archive data
-# CPZ
-# TODO, this is what idarc says: 0 string \0\0\0 CPZ archive data
-# JRC
-0 string JRchive JRC archive data
-# Quantum
-0 string DS\0 Quantum archive data
-# ReSOF
-0 string PK\3\6 ReSOF archive data
-# QuArk
-0 string 7\4 QuArk archive data
-# YAC
-14 string YC YAC archive data
-# X1
-0 string X1 X1 archive data
-0 string XhDr X1 archive data
-# CDC Codec (.dqt)
-0 belong&0xffffe000 0x76ff2000 CDC Codec archive data
-# AMGC
-0 string \xad6" AMGC archive data
-# NuLIB
-0 string N\xc3\xb5F\xc3\xa9lx\xc3\xa5 NuLIB archive data
-# PakLeo
-0 string LEOLZW PAKLeo archive data
-# ChArc
-0 string SChF ChArc archive data
-# PSA
-0 string PSA PSA archive data
-# CrossePAC
-0 string DSIGDCC CrossePAC archive data
-# Freeze
-0 string \x1f\x9f\x4a\x10\x0a Freeze archive data
-# KBoom
-0 string \xc2\xa8MP\xc2\xa8 KBoom archive data
-# NSQ, must go after CDC Codec
-0 string \x76\xff NSQ archive data
-# DPA
-0 string Dirk\ Paehl DPA archive data
-# BA
-# TODO: idarc says "bytes 0-2 == bytes 3-5"
-# TTComp
-# URL: http://fileformats.archiveteam.org/wiki/TTComp_archive
-# Update: Joerg Jenderek
-# GRR: line below is too general as it matches also Panorama database "TCDB 2003-10 demo.pan", others
-0 string \0\6
-# look for first keyword of Panorama database *.pan
->12 search/261 DESIGN
-# skip keyword with low entropy
->12 default x
-# skip DOS 2.0 backup id file, sequence 6 with many nils like BACKUPID_xx6.@@@ handled by ./msdos
->>8 quad !0
->>>0 use ttcomp
-# variant ASCII, 4K dictionary (strength=48=50-2). With strength=49 wrong order! WHY?
-0 string \1\6
-# TODO:
-# skip VAX-order 68k Blit mpx/mux executable (strength=50) handled by ./blit
-!:strength -2
->0 use ttcomp
-0 string \0\5
-# skip some DOS 2.0 backup id file, sequence 5 with many nils like BACKUPID_075.@@@ handled by ./msdos
->8 quad !0
->>0 use ttcomp
-0 string \1\5
-# TODO:
-# variant ASCII, 2K dictionary (strength=48=50-2). With strength=49 wrong order! WHY?
-# skip ctab data (strength=50) handled by ./ibm6000
-# skip locale data table (strength=50) handled by ./digital
-!:strength -2
->0 use ttcomp
-0 string \0\4
-# skip many Maple help database *.hdb with version tag handled by ./maple
->1028 string !version
-# skip veclib maple.hdb by looking for Mable keyword
->>4 search/1091 Maple\040
-#>4 search/34090 Maple\040
->>4 default x
-# skip DOS 2.0-3.2 backed up sequence 4 with many nils like LOTUS5.RAR handled by ./msdos
-# skip xBASE Compound Index file *.CDX with many nils
->>>0x54 quad !0
->>>>0 use ttcomp
-0 string \1\4
-# TODO:
-# skip shared library (strength=50) handled by ./ibm6000
-!:strength -2
-# skip Commodore PET BASIC programs (Mastermind.prg) with last 3 nil bytes (\0~end of line followed by 0000h line offset)
-#>-4 ubelong x LAST_BYTES=%8.8x
->-4 ubelong&0x00FFffFF !0
->>0 use ttcomp
-# display information of TTComp archive
-0 name ttcomp
-# (version 5.25) labeled the entry as "TTComp archive data"
->0 ubyte x TTComp archive data
-!:mime application/x-compress-ttcomp
-# PBACKSCR.PI1
-!:ext $xe/$ts/pi1/__d
-# compression type: 0~binary compression 1~ASCII compression
->0 ubyte 0 \b, binary
->0 ubyte 1 \b, ASCII
-# size of the dictionary: 4~1024 bytes 5~2048 bytes 6~4096 bytes
->1 ubyte 4 \b, 1K
->1 ubyte 5 \b, 2K
->1 ubyte 6 \b, 4K
->1 ubyte x dictionary
-# https://mark0.net/forum/index.php?topic=848
-# last 3 bytes probably have only 8 possible bit sequences
-# xxxxxxxx 0000000x 11111111 ____FFh
-# xxxxxxxx 10000000 01111111 __807Fh
-# 0xxxxxxx 11000000 00111111 __C03Fh
-# 00xxxxxx 11100000 00011111 __E01Fh
-# 000xxxxx 11110000 00001111 __F00Fh
-# 0000xxxx 11111000 00000111 __F807h
-# 00000xxx 11111100 00000011 __FC03h
-# 000000xx 11111110 00000001 __FE01h
-# but for quickgif.__d 0A7DD4h
-#>-3 ubyte x \b, last 3 bytes 0x%2.2x
-#>-2 ubeshort x \b%4.4x
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Disk_Copy
-# reference: http://nulib.com/library/FTN.e00005.htm
-0x52 ubeshort 0x0100
-# test for disk image size equal or above 400k
->0x40 ubelong >409599
-# test also for disk image size equal or below 1440k to skip
-# windows7en.mbr UNICODE.DAT
-#>>0x40 ubelong <1474561
-# test now for "low" disk image size equal or below 64 MiB to skip
-# windows7en.mbr (B441BBAAh) UNICODE.DAT (0400AF05h)
->>0x40 ubelong <0x04000001
-# To skip Flags$StringJoiner.class with size 00106A61h test also for valid disk image sizes
-# 00064000 for 400k GCR disks dc42-400k-gcr.trid.xml
-# 000c8000 for 800k GCR disks dc42-800k-gcr.trid.xml
-# 000b4000 for 720k MFM disks dc42-720k-mfm.trid.xml
-# 00168000 for 1440k MFM disks dc42-1440k-mfm.trid.xml
-# https://lisaem.sunder.net/LisaProjectDocs.txt
-# 00500000 05M available
-# 00A00000 10M available
-# 01800000 24M possible
-# 02000000 32M uncertain
-# 04000000 64M uncertain
->>>0x40 ubelong&0xf8003fFF 0
-# skip samples with invalid disk name length like:
-# 181 (biosmd80.rom) 202 (Flags$StringJoiner.class) 90 (UNICODE.DAT)
->>>>0x0 ubyte <64
->>>>>0 use dc42-floppy
-# display information of Apple DiskCopy 4.2 floppy image
-0 name dc42-floppy
-# disk name length; maximal 63
-#>0 ubyte x DISK NAME LENGTH %u
-# ASCII image pascal (maximal 63 bytes) name padded with NULs like:
-# "Microsoft Mail" "Disquette 2" "IIe Installer Disk"
-# "-lisaem.sunder.net hd-" (dc42-lisaem.trid.xml) "-not a Macintosh disk" (dc42-nonmac.trid.xml)
->00 pstring/B x Apple DiskCopy 4.2 image %s
-#!:mime application/octet-stream
-!:mime application/x-dc42-floppy-image
-!:apple dCpydImg
-# probably also img like: "Utilitaires 2.img" "Installation 7.img"
-!:ext image/dc42/img
-# data size in bytes like: 409600 737280 819200 1474560
->0x40 ubelong x \b, %u bytes
-# for debugging purpose size in hexadecimal
-#>0x40 ubelong x (%#8.8x)
-# tag size in bytes like: 0 (often) 2580h (PUID fmt/625) 4B00h (Microsoft Mail.image)
->0x44 ubelong >0 \b, %#x tag size
-# data checksum
-#>0x48 ubelong x \b, %#x checksum
-# tag checksum
-#>0x4c ubelong x \b, %#x tag checksum
-# disk encoding like: 0 1 2 3 (PUID: fmt/625)
->0x50 ubyte 0 \b, GCR CLV ssdd (400k)
->0x50 ubyte 1 \b, GCR CLV dsdd (800k)
->0x50 ubyte 2 \b, MFM CAV dsdd (720k)
->0x50 ubyte 3 \b, MFM CAV dshd (1440k)
->0x50 ubyte >3 \b, %#x encoding
-# format byte like: 12h (Lisa 400K) 24h (400K Macintosh) 96h (800K Apple II disk)
-# 2 (Mac 400k "Disquette Installation 13.image")
-# 22h (double-sided MFM or Mac 800k "Disco 12.image" "IIe Installer Disk.image")
->0x51 ubyte x \b, %#x format
-#>0x54 ubequad x \b, data %#16.16llx
-# ESP, could this conflict with Easy Software Products' (e.g.ESP ghostscript) documentation?
-0 string ESP ESP archive data
-# ZPack
-0 string \1ZPK\1 ZPack archive data
-# Sky
-0 string \xbc\x40 Sky archive data
-# UFA
-0 string UFA UFA archive data
-# Dry
-0 string =-H2O DRY archive data
-# FoxSQZ
-0 string FOXSQZ FoxSQZ archive data
-# AR7
-0 string ,AR7 AR7 archive data
-# PPMZ
-0 string PPMZ PPMZ archive data
-# MS Compress
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/MS-DOS_installation_compression
-# Reference: https://hwiegman.home.xs4all.nl/fileformats/compress/szdd_kwaj_format.html
-# Note: use correct version of extracting tool like EXPAND, UNPACK, DECOMP or 7Z
-4 string \x88\xf0\x27
-# KWAJ variant
->0 string KWAJ MS Compress archive data, KWAJ variant
-!:mime application/x-ms-compress-kwaj
-# extension not working in version 5.32
-# magic/Magdir/archive, 284: Warning: EXTENSION type ` ??_' has bad char '?'
-# file: line 284: Bad magic entry ' ??_'
-!:ext ??_
-# compression method (0-4)
->>8 uleshort x \b, %u method
-# offset of compressed data
->>10 uleshort x \b, %#x offset
-#>>(10.s) uleshort x
-#>>>&-6 string x \b, TEST extension %-.3s
-# header flags to mark header extensions
->>12 uleshort >0 \b, %#x flags
-# 4 bytes: decompressed length of file
->>12 uleshort &0x01
->>>14 ulelong x \b, original size: %u bytes
-# 2 bytes: unknown purpose
-# 2 bytes: length of unknown data + mentioned bytes
-# 1-9 bytes: null-terminated file name
-# 1-4 bytes: null-terminated file extension
->>12 uleshort &0x08
->>>12 uleshort ^0x01
->>>>12 uleshort ^0x02
->>>>>12 uleshort ^0x04
->>>>>>12 uleshort ^0x10
->>>>>>>14 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>14 string x \b, %-.8s
->>>>>>>>&1 string x \b.%-.3s
->>>>>12 uleshort &0x04
->>>>>>12 uleshort ^0x10
->>>>>>>(14.s) uleshort x
->>>>>>>>&14 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>(14.s) uleshort x
->>>>>>>>&14 string x \b, %-.8s
->>>>>>>>>&1 string x \b.%-.3s
->>>>12 uleshort &0x02
->>>>>12 uleshort ^0x04
->>>>>>12 uleshort ^0x10
->>>>>>>16 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>16 string x \b, %-.8s
->>>>>>>>&1 string x \b.%-.3s
->>>>>12 uleshort &0x04
->>>>>>12 uleshort ^0x10
->>>>>>>(16.s) uleshort x
->>>>>>>>&16 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>(16.s) uleshort x
->>>>>>>&16 string x %-.8s
->>>>>>>>&1 string x \b.%-.3s
->>>12 uleshort &0x01
->>>>12 uleshort ^0x02
->>>>>12 uleshort ^0x04
->>>>>>12 uleshort ^0x10
->>>>>>>18 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>18 string x \b, %-.8s
->>>>>>>>&1 string x \b.%-.3s
->>>>>12 uleshort &0x04
->>>>>>12 uleshort ^0x10
->>>>>>>(18.s) uleshort x
->>>>>>>>&18 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>(18.s) uleshort x
->>>>>>>>&18 string x \b, %-.8s
->>>>>>>>>&1 string x \b.%-.3s
->>>>12 uleshort &0x02
->>>>>12 uleshort ^0x04
->>>>>>12 uleshort ^0x10
->>>>>>>20 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>20 string x \b, %-.8s
->>>>>>>>&1 string x \b.%-.3s
->>>>>12 uleshort &0x04
->>>>>>12 uleshort ^0x10
->>>>>>>(20.s) uleshort x
->>>>>>>>&20 string x \b, %-.8s
->>>>>>12 uleshort &0x10
->>>>>>>(20.s) uleshort x
->>>>>>>>&20 string x \b, %-.8s
->>>>>>>>>&1 string x \b.%-.3s
-# 2 bytes: length of data + mentioned bytes
-#
-# SZDD variant Haruhiko Okumura's LZSS or 7z type MsLZ
-# URL: http://fileformats.archiveteam.org/wiki/MS-DOS_installation_compression
-# Reference: http://www.cabextract.org.uk/libmspack/doc/szdd_kwaj_format.html
-# http://mark0.net/download/triddefs_xml.7z/defs/s/szdd.trid.xml
-# Note: called "Microsoft SZDD compressed (Haruhiko Okumura's LZSS)" by TrID
-# verfied by 7-Zip `7z l -tMsLZ -slt *.??_` as MsLZ
-# `deark -l -m lzss_oku -d2 setup-1-41.bin` as "LZSS.C by Haruhiko Okumura"
->0 string SZDD MS Compress archive data, SZDD variant
-# 2nd part of signature
-#>>4 ubelong 0x88F02733 \b, SIGNATURE OK
-!:mime application/x-ms-compress-szdd
-!:ext ??_
-# The character missing from the end of the filename (0=unknown)
->>9 string >\0 \b, %-.1s is last character of original name
-# https://www.betaarchive.com/forum/viewtopic.php?t=26161
-# Compression mode: "A" (0x41) found but sometimes "B" in Windows 3.1 builds 026 and 034e
->>8 string !A \b, %-.1s method
->>10 ulelong >0 \b, original size: %u bytes
-# Summary: InstallShield archive with SZDD compressed
-# URL: https://community.flexera.com/t5/InstallShield-Knowledge-Base/InstallShield-Redistributable-Files/ta-p/5647
-# From: Joerg Jenderek
-1 search/48/bs SZDD\x88\xF0\x27\x33 InstallShield archive
-#!:mime application/octet-stream
-!:mime application/x-installshield-compress-szdd
-!:ext ibt
-# name of compressed archive member like: setup.dl_ _setup7int.dl_ _setup2k.dl_ _igdi.dl_ cabinet.dl_
->0 string x %s
-# name of uncompressed archive member like: setup.dll _Setup.dll IGdi.dll CABINET.DLL
->>&1 string x (%s)
-# probably version like: 9.0.0.333 9.1.0.429 11.50.0.42618
->>>&1 string x \b, version %s
-# SZDD member length like: 168048 169333 181842
->>>>&1 string x \b, %s bytes
-# MS Compress archive data
-#>&0 string SZDD \b, SIGNATURE FOUND
->&0 indirect x
-# QBasic SZDD variant
-3 string \x88\xf0\x27
->0 string SZ\x20 MS Compress archive data, QBasic variant
-!:mime application/x-ms-compress-sz
-!:ext ??$
->>8 ulelong >0 \b, original size: %u bytes
-
-# Summary: lzss compressed/EDI Pack
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/EDI_Install_packed_file
-# Note: called "EDI Install LZS compressed data" by TrID and verified by
-# command like `deark -l -m edi_pack -d2 BOOK01A.IC$` as "EDI Pack LZSS1"
-0 string EDILZSS
->7 string 1
-# look for point character before orginal file name extension
->>8 search/9/b .
-# check suffix of possible orginal file anme
-#>>>&0 ubelong x SUFFIX=%8.8x
-# samples without valid character after point in original file name field like: FENNEL.LZS PLANTAIN.LZS
->>>&0 ubyte <0x20
->>>>0 use edi-lzs
-# samples with valid character after point in original file name field
->>>&0 ubyte >0x1F
-# check 2nd charcter of suffix
-#>>>>&0 ubyte x 2ND_SUFFIX=%x
-# sample with one valid character after point followed by \0 in original file name field like: SPELMATE.H$
->>>>&0 ubyte =0
->>>>>0 use edi-pack
->>>>&0 ubyte >0x1F
-# check 3rd charcter of suffix
-#>>>>>&0 ubyte x 3RD_SUFFIX=%x
-# no sample with 2 valid characters after point followed by \0 in original file name field
->>>>>&0 ubyte =0
->>>>>>0 use edi-pack
-# samples with valid 3rd character after point in original file name field
->>>>>&0 ubyte >0x1F
-# sample with 3 valid character after point followed by \0 in original file name field like: BOOK01A.IC$ CTL3D.DL$
->>>>>>&0 ubyte =0
->>>>>>>0 use edi-pack
-# sample with 3 valid character after point followed by no \0 in original file name field like: HERBTEXT.LZS
->>>>>>&0 ubyte !0
->>>>>>>0 use edi-lzs
-# no sample with invalid 3rd character after point in original file name field
->>>>>&0 default x
->>>>>>0 use edi-lzs
-# sample with invalid 2nd character after point in original file name field like: LACERATE.LZS SPLINTER.LZS
->>>>&0 default x
->>>>>0 use edi-lzs
-# sample without point character in original file name field like GUNSHOT.LZS
->>8 default x
->>>0 use edi-lzs
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/e/edi-lzss2.trid.xml
-# Note: called "EDI Install Pro LZSS2 compressed data" by TrID and verified by
-# command like `deark -l -m edi_pack -d2 4WAY.WA$` as "EDI Pack LZSS2"
->7 string 2 EDI LZSS2 packed
-#!:mime application/octet-stream
-!:mime application/x-edi-pack-lzss
-# the name of a compressed file often ends in character '$' or '_'
-!:ext ??$/??_
-# original filename, NUL-terminated, padded to 13 bytes like: mci.vbx 4way.wav skymap.exe cmdialog.vbx
->>8 string x "%-0.13s"
-# original file size, as a 4-byte integer.
->>21 ulelong x \b, %u bytes
-# compressed data like: ff5249464606ec00 ff4d5aa601010000
->>>25 ubequad x \b, data %#16.16llx...
-0 name edi-pack
-# Note: verified by command like `deark -l -d2 SPELMATE.H$` as "EDI Pack LZSS1"
-# original filename, NUL-terminated, padded to 13 bytes like: ctl3d.dll spelmate.h filemenu.rc owl.def index-it.exe
-# but not like \377Aloe.lzs\273 (HERBTEXT.LZS)
->8 string x EDI LZSS packed "%-.13s"
-#!:mime application/octet-stream
-!:mime application/x-edi-pack-lzss
-# the name of a compressed file often ends in character '$' or '_'
-!:ext ??$/?$
-# compressed data like: f7000001eff02020 ff4d5aa900020000 ff2f2a207370656c
->21 ubequad x \b, data %#16.16llx...
-# URL: http://fileformats.archiveteam.org/wiki/EDI_LZSSLib
-# Note: verified partly by command like `deark -l -m edi_pack -d2 GUNSHOT.LZS` as "EDI LZSSLib"
-0 name edi-lzs
-# Note: verified by command like `deark -l -d2 GUNSHOT.LZS` as "EDI LZSSLib"
-# no original filename looks like: \277BM\226.\0 \277BM.n\001 \277BM\226.\0 \277BM.g\001 \377Aloe.lzs\273
->8 string x EDI LZSSLib packed
-#!:mime application/octet-stream
-!:mime application/x-edi-pack-lzss
-# The name of a compressed file ends with LZS suffix
-!:ext lzs
-# compressed data like: bf424df6e10100f3 ff416c6f652e6c7a ff416c6f652e6c7a
->8 ubequad x \b, data %#16.16llx...
-
-# Summary: CAZIP compressed file
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/CAZIP
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/caz.trid.xml
-# Note: Format is distinct from CAZIPXP compressed
-0 string \x0D\x0A\x1ACAZIP CAZIP compressed file
-#!:mime application/octet-stream
-!:mime application/x-compress-cazip
-# like: BLINKER.WR_ CLIPDEFS._ CAOSETUP.EX_ CLIPPER.EX_ FILEIO.C_
-!:ext ??_/?_/_
-
-# Summary: FTCOMP compressed archive
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/FTCOMP
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-ftcomp.trid.xml
-# Note: called by TrID "FTCOMP compressed archive"
-# extracted by `unpack seahelp.hl_`
-24 string/b FTCOMP FTCOMP compressed archive
-#!:mime application/octet-stream
-!:mime application/x-compress-ftcomp
-!:ext ??_/??@/dll/drv/pk2/
-# probably A596FDFF magic at the beginning
->0 ubelong !0xA596FDFF \b, at beginning %#x
-# probably original file name with directory like: \OS2\unpack.exe \SYSTEM\8514.DRV MAHJONGG.EXE
->41 string x "%s"
-
-# MP3 (archiver, not lossy audio compression)
-0 string MP3\x1a MP3-Archiver archive data
-# ZET
-0 string OZ\xc3\x9d ZET archive data
-# TSComp
-0 string \x65\x5d\x13\x8c\x08\x01\x03\x00 TSComp archive data
-# ARQ
-0 string gW\4\1 ARQ archive data
-# Squash
-3 string OctSqu Squash archive data
-# Terse
-0 string \5\1\1\0 Terse archive data
-# UHarc
-0 string UHA UHarc archive data
-# ABComp
-0 string \2AB ABComp archive data
-0 string \3AB2 ABComp archive data
-# CMP
-0 string CO\0 CMP archive data
-# Splint
-0 string \x93\xb9\x06 Splint archive data
-# InstallShield
-0 string \x13\x5d\x65\x8c InstallShield Z archive Data
-# Gather
-1 string GTH Gather archive data
-# BOA
-0 string BOA BOA archive data
-# RAX
-0 string ULEB\xa RAX archive data
-# Xtreme
-0 string ULEB\0 Xtreme archive data
-# Pack Magic
-0 string @\xc3\xa2\1\0 Pack Magic archive data
-# BTS
-0 belong&0xfeffffff 0x1a034465 BTS archive data
-# ELI 5750
-0 string Ora\ ELI 5750 archive data
-# QFC
-0 string \x1aFC\x1a QFC archive data
-0 string \x1aQF\x1a QFC archive data
-# PRO-PACK https://www.segaretro.org/Rob_Northen_compression
-0 string RNC
->3 byte 1 PRO-PACK archive data (compression 1)
->3 byte 2 PRO-PACK archive data (compression 2)
-# 777
-0 string 777 777 archive data
-# LZS221
-0 string sTaC LZS221 archive data
-# HPA
-0 string HPA HPA archive data
-# Arhangel
-0 string LG Arhangel archive data
-# EXP1, uses bzip2
-0 string 0123456789012345BZh EXP1 archive data
-# IMP
-0 string IMP\xa IMP archive data
-# NRV
-0 string \x00\x9E\x6E\x72\x76\xFF NRV archive data
-# Squish
-0 string \x73\xb2\x90\xf4 Squish archive data
-# Par
-0 string PHILIPP Par archive data
-0 string PAR Par archive data
-# HIT
-0 string UB HIT archive data
-# SBX
-0 belong&0xfffff000 0x53423000 SBX archive data
-# NaShrink
-0 string NSK NaShrink archive data
-# SAPCAR
-0 string #\ CAR\ archive\ header SAPCAR archive data
-0 string CAR\ 2.00 SAPCAR archive data
-0 string CAR\ 2.01 SAPCAR archive data
-#!:mime application/octet-stream
-!:mime application/vnd.sar
-!:ext sar
-# Disintegrator
-0 string DST Disintegrator archive data
-# ASD
-0 string ASD ASD archive data
-# InstallShield CAB
-# Update: Joerg Jenderek at Nov 2021
-# URL: https://en.wikipedia.org/wiki/InstallShield
-# Reference: https://github.com/twogood/unshield/blob/master/lib/cabfile.h
-# Note: Not compatible with Microsoft CAB files
-# http://mark0.net/download/triddefs_xml.7z/defs/a/ark-cab-ishield.trid.xml
-# CAB_SIGNATURE 0x28635349
-0 string ISc( InstallShield
-#!:mime application/octet-stream
-!:mime application/x-installshield
-# http://mark0.net/download/triddefs_xml.7z/defs/a/ark-cab-ishield-hdr.trid.xml
->16 ulelong !0 setup header
-# like: _SYS1.HDR _USER1.HDR data1.hdr
-!:ext hdr
->16 ulelong =0 CAB
-# like: _SYS1.CAB _USER1.CAB DATA1.CAB data2.cab
-!:ext cab
-# https://github.com/twogood/unshield/blob/master/lib/helper.c
-# version like: 0x1005201 0x100600c 0x1007000 0x1009500
-# 0x2000578 0x20005dc 0x2000640 0x40007d0 0x4000834
->4 ulelong x \b, version %#x
-# volume_info like: 0
->8 ulelong !0 \b, volume_info %#x
-# cab_descriptor_offset like: 0x200
->12 ulelong !0x200 \b, offset %#x
-#>0x200 ubequad x \b, at 0x200 %#16.16llx
-# cab_descriptor_size like: 0 (*.cab) BD5 C8B DA5 E2A E36 116C 251D 4DA9 56F0 5CC2 6E4B 777D 779E 1F7C2
->16 ulelong !0 \b, descriptor size %#x
-# TOP4
-0 string T4\x1a TOP4 archive data
-# BatComp left out: sig looks like COM executable
-# so TODO: get real 4dos batcomp file and find sig
-# BlakHole
-0 string BH\5\7 BlakHole archive data
-# BIX
-0 string BIX0 BIX archive data
-# ChiefLZA
-0 string ChfLZ ChiefLZA archive data
-# Blink
-0 string Blink Blink archive data
-# Logitech Compress
-0 string \xda\xfa Logitech Compress archive data
-# ARS-Sfx (FIXME: really a SFX? then goto COM/EXE)
-1 string (C)\ STEPANYUK ARS-Sfx archive data
-# AKT/AKT32
-0 string AKT32 AKT32 archive data
-0 string AKT AKT archive data
-# NPack
-0 string MSTSM NPack archive data
-# PFT
-0 string \0\x50\0\x14 PFT archive data
-# SemOne
-0 string SEM SemOne archive data
-# PPMD
-0 string \x8f\xaf\xac\x84 PPMD archive data
-# FIZ
-0 string FIZ FIZ archive data
-# MSXiE
-0 belong&0xfffff0f0 0x4d530000 MSXiE archive data
-# DeepFreezer
-0 belong&0xfffffff0 0x797a3030 DeepFreezer archive data
-# DC
-0 string =<DC- DC archive data
-# TPac
-0 string \4TPAC\3 TPac archive data
-# Ai
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Ai_Archiver
-0 string Ai\1\1\0 Ai archive data
-#!:mime application/octet-stream
-!:mime application/x-compress-ai
-!:ext ai
-0 string Ai\1\0\0 Ai archive data
-#!:mime application/octet-stream
-!:mime application/x-compress-ai
-!:ext ai
-# Ai32
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-ai.trid.xml
-# Note: called "Ai Archivator compressed archive" by TrID
-0 string Ai\2\0 Ai32 archive data
-#!:mime application/octet-stream
-!:mime application/x-compress-ai
-!:ext ai
-# original file name
->8 pstring/h x "%s"
-# according to TrID the next 3 bytes are nil
->5 ubyte !0 \b, at 5 %#x
->6 ubyte !0 \b, at 6 %#x
->7 ubyte !0 \b, at 7 %#x
-# the fourth byte with value 0 is probably a flag for "non solid" mode
-#>3 ubyte =0x00 \b, unsolid mode
-0 string Ai\2\1 Ai32 archive data
-#!:mime application/octet-stream
-!:mime application/x-compress-ai
-!:ext ai
-# original file name
->8 pstring/h x "%s"
-# the fourth byte with value 0x01 is probably a flag for "solid" mode; this is not the default
->3 ubyte =0x01 \b, solid mode
-# SBC
-0 string SBC SBC archive data
-# Ybs
-0 string YBS Ybs archive data
-# DitPack
-0 string \x9e\0\0 DitPack archive data
-# DMS
-0 string DMS! DMS archive data
-# EPC
-0 string \x8f\xaf\xac\x8c EPC archive data
-# VSARC
-0 string VS\x1a VSARC archive data
-# PDZ
-0 string PDZ PDZ archive data
-# ReDuq
-0 string rdqx ReDuq archive data
-# GCA
-0 string GCAX GCA archive data
-# PPMN
-0 string pN PPMN archive data
-# WinImage
-3 string WINIMAGE WinImage archive data
-# Compressia
-0 string CMP0CMP Compressia archive data
-# UHBC
-0 string UHB UHBC archive data
-# WinHKI
-0 string \x61\x5C\x04\x05 WinHKI archive data
-# WWPack data file
-0 string WWP WWPack archive data
-# BSN (BSA, PTS-DOS)
-0 string \xffBSG BSN archive data
-1 string \xffBSG BSN archive data
-3 string \xffBSG BSN archive data
-1 string \0\xae\2 BSN archive data
-1 string \0\xae\3 BSN archive data
-1 string \0\xae\7 BSN archive data
-# AIN
-0 string \x33\x18 AIN archive data
-0 string \x33\x17 AIN archive data
-# XPA32 test moved and merged with XPA by Joerg Jenderek at Sep 2015
-# SZip (TODO: doesn't catch all versions)
-0 string SZ\x0a\4 SZip archive data
-# XPack DiskImage
-# *.XDI updated by Joerg Jenderek Sep 2015
-# ftp://ftp.sac.sk/pub/sac/pack/0index.txt
-# GRR: this test is still too general as it catches also text files starting with jm
-0 string jm
-# only found examples with this additional characteristic 2 bytes
->2 string \x2\x4 Xpack DiskImage archive data
-#!:ext xdi
-# XPack Data
-# *.xpa updated by Joerg Jenderek Sep 2015
-# ftp://ftp.elf.stuba.sk/pub/pc/pack/
-0 string xpa XPA
-!:ext xpa
-# XPA32
-# ftp://ftp.elf.stuba.sk/pub/pc/pack/xpa32.zip
-# created by XPA32.EXE version 1.0.2 for Windows
->0 string xpa\0\1 \b32 archive data
-# created by XPACK.COM version 1.67m or 1.67r with short 0x1800
->3 ubeshort !0x0001 \bck archive data
-# XPack Single Data
-# changed by Joerg Jenderek Sep 2015 back to like in version 5.12
-# letter 'I'+ acute accent is equivalent to \xcd
-0 string \xcd\ jm Xpack single archive data
-#!:mime application/x-xpa-compressed
-!:ext xpa
-
-# TODO: missing due to unknown magic/magic at end of file:
-#DWC
-#ARG
-#ZAR
-#PC/3270
-#InstallIt
-#RKive
-#RK
-#XPack Diskimage
-
-# These were inspired by idarc, but actually verified
-# Dzip archiver (.dz)
-# Update: Joerg Jenderek
-# URL: http://speeddemosarchive.com/dzip/
-# reference: http://speeddemosarchive.com/dzip/dz29src.zip/main.c
-# GRR: line below is too general as it matches also ASCII texts like Doszip commander help dz.txt
-0 string DZ
-# latest version is 2.9 dated 7 may 2003
->2 byte <4 Dzip archive data
-!:mime application/x-dzip
-!:ext dz
->>2 byte x \b, version %i
->>3 byte x \b.%i
->>4 ulelong x \b, offset %#x
->>8 ulelong x \b, %u files
-# ZZip archiver (.zz)
-0 string ZZ\ \0\0 ZZip archive data
-0 string ZZ0 ZZip archive data
-# PAQ archiver (.paq)
-0 string \xaa\x40\x5f\x77\x1f\xe5\x82\x0d PAQ archive data
-0 string PAQ PAQ archive data
->3 byte&0xf0 0x30
->>3 byte x (v%c)
-# JAR archiver (.j), this is the successor to ARJ, not Java's JAR (which is essentially ZIP)
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/JAR_(ARJ_Software)
-# reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-jar.trid.xml
-# https://www.sac.sk/download/pack/jar102x.exe/TECHNOTE.DOC
-# Note: called "JAR compressed archive" by TrID
-0xe string \x1aJar\x1b JAR (ARJ Software, Inc.) archive data
-#!:mime application/octet-stream
-!:mime application/x-compress-j
->0 ulelong x \b, CRC32 %#x
-# standard suffix is ".j"; for multi volumes following order j01 j02 ... j99 100 ... 990
-!:ext j/j01/j02
-# URL: http://fileformats.archiveteam.org/wiki/JARCS
-# reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-jarcs.trid.xml
-# Note: called "JARCS compressed archive" by TrID
-0 string JARCS JAR (ARJ Software, Inc.) archive data
-#!:mime application/octet-stream
-!:mime application/x-compress-jar
-!:ext jar
-
-# ARJ archiver (jason@jarthur.Claremont.EDU)
-# URL: http://fileformats.archiveteam.org/wiki/ARJ
-# reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-arj.trid.xml
-# https://github.com/FarGroup/FarManager/
-# blob/master/plugins/multiarc/arc.doc/arj.txt
-# Note: called "ARJ compressed archive" by TrID and
-# "ARJ File Format" by DROID via PUID fmt/610
-# verified by `7z l -tarj PHRACK1.ARJ` and
-# `arj.exe l TEST-hk9.ARJ`
-0 leshort 0xea60
-# skip DROID fmt-610-signature-id-946.arj by check for valid file type of main header
->0xA ubyte 2
->>0 use arj-archive
-0 name arj-archive
->0 leshort x ARJ archive
-!:mime application/x-arj
-# look for terminating 0-character of filename
->0x26 search/1024 \0
-# file name extension is normally .arj but not for parts of multi volume
-#>>&-5 string x extension %.4s
->>&-5 string/c .arj data
-!:ext arj
->>&-5 default x
-# for multi volume first name is archive.arj then following parts archive.a01 archive.a02 ...
->>>8 byte &0x04 data
-!:ext a01/a02
-# for SFX first name is archive.exe then following parts archive.e01 archive.e02 ...
->>>8 byte ^0x04 data, SFX multi-volume
-!:ext e01/e02
-# basic header size like: 0x002b 0x002c 0x04e0 0x04e3 0x04e7
-#>2 uleshort x basic header size %#4.4x
-# next fragment content like: 0x0a200a003a8fc713 0x524a000010bb3471 0x524a0000c73c70f9
-#>(2.s) ubequad x NEXT FRAGMENT CONTENT %#16.16llx
-# first_hdr_size; seems to be same as basic header size
-#>2 uleshort x 1st header size %#x
-# archiver version number like: 3 4 6 11 102
->5 byte x \b, v%d
-# minimum archiver version to extract like: 1
->6 ubyte !1 \b, minimum %u to extract
-# FOR DEBUGGING
-#>8 byte x \b, FLAGS %#x
-# GARBLED_FLAG1; garble with password; g switch
->8 byte &0x01 \b, password-protected
-# encryption version: 0~old 1~old 2~new 3~reserved 4~40 bit key GOST
->>0x20 ubyte x (v%u)
-#>8 byte &0x02 \b, secured
-# ANSIPAGE_FLAG; indicates ANSI codepage used by ARJ32; hy switch
->8 byte &0x02 \b, ANSI codepage
-# VOLUME_FLAG indicates presence of succeeding volume; but apparently not for SFX
->8 byte &0x04 \b, multi-volume
-#>8 byte &0x08 \b, file-offset
-# ARJPROT_FLAG; build with data protection record; hk switch
->8 byte &0x08 \b, recoverable
-# arj protection factor; maximal 10; switch hky -> factor=y+1
->>0x22 byte x (factor %u)
->8 byte &0x10 \b, slash-switched
-# BACKUP_FLAG; obsolete
->8 byte &0x20 \b, backup
-# SECURED_FLAG;
->8 byte &0x40 \b, secured,
-# ALTNAME_FLAG; indicates dual-name archive
->8 byte &0x80 \b, dual-name
-# security version; 0~old 2~current
->9 ubyte !0
->>9 ubyte !2 \b, security version %u
-# file type; 2 in main header; 0~binary 1~7-bitText 2~comment 3~directory 4~VolumeLabel 5=ChapterLabel
->0xA ubyte !2 \b, file type %u
-# date+time when original archive was created in MS-DOS format via ./msdos
->0xC ulelong x \b, created
->0xC use dos-date
-# or date and time by new internal function
-#>0xE lemsdosdate x %s
-#>0xC lemsdostime x %s
-# FOR DEBUGGING
-#>0x12 uleshort x RAW DATE %#4.4x
-#>0x10 uleshort x RAW TIME %#4.4x
-# date+time when archive was last modified; sometimes nil or
-# maybe wrong like in HP4DRVR.ARJ
-#>0x10 ulelong >0 \b, modified
-#>>0x10 use dos-date
-# or date and time by new internal function
-#>>0x12 lemsdosdate x %s
-#>>0x10 lemsdostime x %s
-# archive size (currently used only for secured archives); MAYBE?
-#>0x14 ulelong !0 \b, file size %u
-# security envelope file position; MAYBE?
-#>0x18 ulelong !0 \b, at %#x security envelope
-# filespec position in filename; WHAT IS THAT?
-#>0x1C uleshort >0 \b, filespec position %#x
-# length in bytes of security envelope data like: 2CAh 301h 364h 471h
->0x1E uleshort !0 \b, security envelope length %#x
-# last chapter like: 0 1
->0x21 ubyte !0 \b, last chapter %u
-# filename (null-terminated string); sometimes at 0x26 when 4 bytes for extra data
->34 byte x \b, original name:
-# with extras data
->34 byte <0x0B
->>38 string x %s
-# without extras data
->34 byte >0x0A
->>34 string x %s
-# host OS: 0~MSDOS ... 11~WIN32
->7 byte 0 \b, os: MS-DOS
->7 byte 1 \b, os: PRIMOS
->7 byte 2 \b, os: Unix
->7 byte 3 \b, os: Amiga
->7 byte 4 \b, os: Macintosh
->7 byte 5 \b, os: OS/2
->7 byte 6 \b, os: Apple ][ GS
->7 byte 7 \b, os: Atari ST
->7 byte 8 \b, os: NeXT
->7 byte 9 \b, os: VAX/VMS
->7 byte 10 \b, os: WIN95
->7 byte 11 \b, os: WIN32
-# [JW] idarc says this is also possible
-2 leshort 0xea60 ARJ archive data
-#2 leshort 0xea60
-#>2 use arj-archive
-
-# HA archiver (Greg Roelofs, newt@uchicago.edu)
-# This is a really bad format. A file containing HAWAII will match this...
-#0 string HA HA archive data,
-#>2 leshort =1 1 file,
-#>2 leshort >1 %hu files,
-#>4 byte&0x0f =0 first is type CPY
-#>4 byte&0x0f =1 first is type ASC
-#>4 byte&0x0f =2 first is type HSC
-#>4 byte&0x0f =0x0e first is type DIR
-#>4 byte&0x0f =0x0f first is type SPECIAL
-# suggestion: at least identify small archives (<1024 files)
-0 belong&0xffff00fc 0x48410000 HA archive data
->2 leshort =1 1 file,
->2 leshort >1 %u files,
->4 byte&0x0f =0 first is type CPY
->4 byte&0x0f =1 first is type ASC
->4 byte&0x0f =2 first is type HSC
->4 byte&0x0f =0x0e first is type DIR
->4 byte&0x0f =0x0f first is type SPECIAL
-
-# HPACK archiver (Peter Gutmann, pgut1@cs.aukuni.ac.nz)
-0 string HPAK HPACK archive data
-
-# JAM Archive volume format, by Dmitry.Kohmanyuk@UA.net
-0 string \351,\001JAM\ JAM archive,
->7 string >\0 version %.4s
->0x26 byte =0x27 -
->>0x2b string >\0 label %.11s,
->>0x27 lelong x serial %08x,
->>0x36 string >\0 fstype %.8s
-
-# LHARC/LHA archiver (Greg Roelofs, newt@uchicago.edu)
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/LHA_(file_format)
-# Reference: https://web.archive.org/web/20021005080911/http://www.osirusoft.com/joejared/lzhformat.html
-#
-# check and display information of lharc (LHa,PMarc) file
-0 name lharc-file
-# check 1st character of method id like -lz4- -lh5- or -pm2-
->2 string -
-# check 5th character of method id
->>6 string -
-# check header level 0 1 2 3
->>>20 ubyte <4
-# check 2nd, 3th and 4th character of method id
->>>>3 regex \^(lh[0-9a-ex]|lz[s2-8]|pm[012]|pc1) \b
-!:mime application/x-lzh-compressed
-# creator type "LHA "
-!:apple ????LHA
-# display archive type name like "LHa/LZS archive data" or "LArc archive"
->>>>>2 string -lz \b
-!:ext lzs
-# already known -lzs- -lz4- -lz5- with old names
->>>>>>2 string -lzs LHa/LZS archive data
->>>>>>3 regex \^lz[45] LHarc 1.x archive data
-# missing -lz?- with wikipedia names
->>>>>>3 regex \^lz[2378] LArc archive
-# display archive type name like "LHa (2.x) archive data"
->>>>>2 string -lh \b
-# already known -lh0- -lh1- -lh2- -lh3- -lh4- -lh5- -lh6- -lh7- -lhd- variants with old names
->>>>>>3 regex \^lh[01] LHarc 1.x/ARX archive data
-# LHice archiver use ".ICE" as name extension instead usual one ".lzh"
-# FOOBAR archiver use ".foo" as name extension instead usual one
-# "Florian Orjanov's and Olga Bachetska's ARchiver" not found at the moment
->>>>>>>2 string -lh1 \b
-!:ext lha/lzh/ice
->>>>>>3 regex \^lh[23d] LHa 2.x? archive data
->>>>>>3 regex \^lh[7] LHa (2.x)/LHark archive data
->>>>>>3 regex \^lh[456] LHa (2.x) archive data
->>>>>>>2 string -lh5 \b
-# https://en.wikipedia.org/wiki/BIOS
-# Some mainboard BIOS like Award use LHa compression. So archives with unusual extension are found like
-# bios.rom , kd7_v14.bin, 1010.004, ...
-!:ext lha/lzh/rom/bin
-# missing -lh?- variants (Joe Jared)
->>>>>>3 regex \^lh[89a-ce] LHa (Joe Jared) archive
-# UNLHA32 2.67a
->>>>>>2 string -lhx LHa (UNLHA32) archive
-# lha archives with standard file name extensions ".lha" ".lzh"
->>>>>>3 regex !\^(lh1|lh5) \b
-!:ext lha/lzh
-# this should not happen if all -lh variants are described
->>>>>>2 default x LHa (unknown) archive
-#!:ext lha
-# PMarc
->>>>>3 regex \^pm[012] PMarc archive data
-!:ext pma
-# append method id without leading and trailing minus character
->>>>>3 string x [%3.3s]
->>>>>>0 use lharc-header
-#
-# check and display information of lharc header
-0 name lharc-header
-# header size 0x4 , 0x1b-0x61
->0 ubyte x
-# compressed data size != compressed file size
-#>7 ulelong x \b, data size %d
-# attribute: 0x2~?? 0x10~symlink|target 0x20~normal
-#>19 ubyte x \b, 19_%#x
-# level identifier 0 1 2 3
-#>20 ubyte x \b, level %d
-# time stamp
-#>15 ubelong x DATE %#8.8x
-# OS ID for level 1
->20 ubyte 1
-# 0x20 types find for *.rom files
->>(21.b+24) ubyte <0x21 \b, %#x OS
-# ascii type like M for MSDOS
->>(21.b+24) ubyte >0x20 \b, '%c' OS
-# OS ID for level 2
->20 ubyte 2
-#>>23 ubyte x \b, OS ID %#x
->>23 ubyte <0x21 \b, %#x OS
->>23 ubyte >0x20 \b, '%c' OS
-# filename only for level 0 and 1
->20 ubyte <2
-# length of filename
->>21 ubyte >0 \b, with
-# filename
->>>21 pstring x "%s"
-#
-#2 string -lh0- LHarc 1.x/ARX archive data [lh0]
-#!:mime application/x-lharc
-2 string -lh0-
->0 use lharc-file
-#2 string -lh1- LHarc 1.x/ARX archive data [lh1]
-#!:mime application/x-lharc
-2 string -lh1-
->0 use lharc-file
-# NEW -lz2- ... -lz8-
-2 string -lz2-
->0 use lharc-file
-2 string -lz3-
->0 use lharc-file
-2 string -lz4-
->0 use lharc-file
-2 string -lz5-
->0 use lharc-file
-2 string -lz7-
->0 use lharc-file
-2 string -lz8-
->0 use lharc-file
-# [never seen any but the last; -lh4- reported in comp.compression:]
-#2 string -lzs- LHa/LZS archive data [lzs]
-2 string -lzs-
->0 use lharc-file
-# According to wikipedia and others such a version does not exist
-#2 string -lh\40- LHa 2.x? archive data [lh ]
-#2 string -lhd- LHa 2.x? archive data [lhd]
-2 string -lhd-
->0 use lharc-file
-#2 string -lh2- LHa 2.x? archive data [lh2]
-2 string -lh2-
->0 use lharc-file
-#2 string -lh3- LHa 2.x? archive data [lh3]
-2 string -lh3-
->0 use lharc-file
-#2 string -lh4- LHa (2.x) archive data [lh4]
-2 string -lh4-
->0 use lharc-file
-#2 string -lh5- LHa (2.x) archive data [lh5]
-2 string -lh5-
->0 use lharc-file
-#2 string -lh6- LHa (2.x) archive data [lh6]
-2 string -lh6-
->0 use lharc-file
-#2 string -lh7- LHa (2.x)/LHark archive data [lh7]
-2 string -lh7-
-# !:mime application/x-lha
-# >20 byte x - header level %d
->0 use lharc-file
-# NEW -lh8- ... -lhe- , -lhx-
-2 string -lh8-
->0 use lharc-file
-2 string -lh9-
->0 use lharc-file
-2 string -lha-
->0 use lharc-file
-2 string -lhb-
->0 use lharc-file
-2 string -lhc-
->0 use lharc-file
-2 string -lhe-
->0 use lharc-file
-2 string -lhx-
->0 use lharc-file
-# taken from idarc [JW]
-2 string -lZ PUT archive data
-# already done by LHarc magics
-# this should never happen if all sub types of LZS archive are identified
-#2 string -lz LZS archive data
-2 string -sw1- Swag archive data
-
-0 name rar-file-header
->24 byte 15 \b, v1.5
->24 byte 20 \b, v2.0
->24 byte 29 \b, v4
->15 byte 0 \b, os: MS-DOS
->15 byte 1 \b, os: OS/2
->15 byte 2 \b, os: Win32
->15 byte 3 \b, os: Unix
->15 byte 4 \b, os: Mac OS
->15 byte 5 \b, os: BeOS
-
-0 name rar-archive-header
->3 leshort&0x1ff >0 \b, flags:
->>3 leshort &0x01 ArchiveVolume
->>3 leshort &0x02 Commented
->>3 leshort &0x04 Locked
->>3 leshort &0x10 NewVolumeNaming
->>3 leshort &0x08 Solid
->>3 leshort &0x20 Authenticated
->>3 leshort &0x40 RecoveryRecordPresent
->>3 leshort &0x80 EncryptedBlockHeader
->>3 leshort &0x100 FirstVolume
-
-# RAR (Roshal Archive) archive
-0 string Rar!\x1a\7\0 RAR archive data
-!:mime application/x-rar
-!:ext rar/cbr
-# file header
->(0xc.l+9) byte 0x74
->>(0xc.l+7) use rar-file-header
-# subblock seems to share information with file header
->(0xc.l+9) byte 0x7a
->>(0xc.l+7) use rar-file-header
->9 byte 0x73
->>7 use rar-archive-header
-
-0 string Rar!\x1a\7\1\0 RAR archive data, v5
-!:mime application/x-rar
-!:ext rar
-
-# Very old RAR archive
-# https://jasonblanks.com/wp-includes/images/papers/KnowyourarchiveRAR.pdf
-0 string RE\x7e\x5e RAR archive data (<v1.5)
-!:mime application/x-rar
-!:ext rar/cbr
-
-# SQUISH archiver (Greg Roelofs, newt@uchicago.edu)
-0 string SQSH squished archive data (Acorn RISCOS)
-
-# UC2 archiver (Greg Roelofs, newt@uchicago.edu)
-# [JW] see exe section for self-extracting version
-0 string UC2\x1a UC2 archive data
-
-# PKZIP multi-volume archive
-0 string PK\x07\x08PK\x03\x04 Zip multi-volume archive data, at least PKZIP v2.50 to extract
-!:mime application/zip
-!:ext zip/cbz
-
-# Android APK file (Zip archive)
-0 string PK\003\004
-!:strength +1
-# Starts with AndroidManifest.xml (file name length = 19)
->26 uleshort 19
->>30 string AndroidManifest.xml Android package (APK), with AndroidManifest.xml
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>-22 string PK\005\006
->>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
-# Starts with META-INF/com/android/build/gradle/app-metadata.properties
->26 uleshort 57
->>30 string META-INF/com/android/build/gradle/
->>>&0 string app-metadata.properties Android package (APK), with gradle app-metadata.properties
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>>-22 string PK\005\006
->>>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
-# Starts with classes.dex (file name length = 11)
->26 uleshort 11
->>30 string classes.dex Android package (APK), with classes.dex
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>-22 string PK\005\006
->>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
-# Starts with META-INF/MANIFEST.MF (file name length = 20)
-# NB: checks for resources.arsc, classes.dex, etc. as well to avoid matching JAR files
->26 uleshort 20
->>30 string META-INF/MANIFEST.MF
-# Contains resources.arsc (near the end, in the central directory)
->>>-512 search resources.arsc Android package (APK), with MANIFEST.MF and resources.arsc
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>>-22 string PK\005\006
->>>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
->>>-512 default x
-# Contains classes.dex (near the end, in the central directory)
->>>>-512 search classes.dex Android package (APK), with MANIFEST.MF and classes.dex
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>>>-22 string PK\005\006
->>>>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
->>>>-512 default x
-# Contains lib/armeabi (near the end, in the central directory)
->>>>>-512 search lib/armeabi Android package (APK), with MANIFEST.MF and armeabi lib
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>>>>-22 string PK\005\006
->>>>>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
->>>>>-512 default x
-# Contains drawables (near the end, in the central directory)
->>>>>>-512 search res/drawable Android package (APK), with MANIFEST.MF and drawables
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>>>>>-22 string PK\005\006
->>>>>>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
-# It may or may not be an APK file, but it's definitely a Java JAR file
->>>>>>-512 default x Java archive data (JAR)
-!:mime application/java-archive
-!:ext jar
-# Starts with zipflinger virtual entry (28 + 104 = 132 bytes)
-# See https://github.com/obfusk/apksigcopier/blob/666f5b7/apksigcopier/__init__.py#L230
->4 string \x00\x00\x00\x00\x00\x00
->>&0 string \x21\x08\x21\x02
->>>&0 string \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00
->>>>&0 string \x00\x00 Android package (APK), with zipflinger virtual entry
-!:mime application/vnd.android.package-archive
-!:ext apk
->>>>>-22 string PK\005\006
->>>>>>(-6.l-16) string APK\x20Sig\x20Block\x2042 \b, with APK Signing Block
-# APK Signing Block
->0 default x
->>-22 string PK\005\006
->>>(-6.l-16) string APK\x20Sig\x20Block\x2042 Android package (APK), with APK Signing Block
-!:mime application/vnd.android.package-archive
-!:ext apk
-
-# Zip archives (Greg Roelofs, c/o zip-bugs@wkuvx1.wku.edu)
-0 string PK\005\006 Zip archive data (empty)
-!:mime application/zip
-!:ext zip/cbz
-!:strength +1
-0 string PK\003\004
-!:strength +1
-
-# Specialised zip formats which start with a member named 'mimetype'
-# (stored uncompressed, with no 'extra field') containing the file's MIME type.
-# Check for have 8-byte name, 0-byte extra field, name "mimetype", and
-# contents starting with "application/":
->26 string \x8\0\0\0mimetypeapplication/
-
-# KOffice / OpenOffice & StarOffice / OpenDocument formats
-# From: Abel Cheung <abel@oaka.org>
-
-# KOffice (1.2 or above) formats
-# (mimetype contains "application/vnd.kde.<SUBTYPE>")
->>50 string vnd.kde. KOffice (>=1.2)
->>>58 string karbon Karbon document
->>>58 string kchart KChart document
->>>58 string kformula KFormula document
->>>58 string kivio Kivio document
->>>58 string kontour Kontour document
->>>58 string kpresenter KPresenter document
->>>58 string kspread KSpread document
->>>58 string kword KWord document
-
-# OpenOffice formats (for OpenOffice 1.x / StarOffice 6/7)
-# (mimetype contains "application/vnd.sun.xml.<SUBTYPE>")
-# URL: https://en.wikipedia.org/wiki/OpenOffice.org_XML
-# reference: http://fileformats.archiveteam.org/wiki/OpenOffice.org_XML
->>50 string vnd.sun.xml. OpenOffice.org 1.x
->>>62 string writer Writer
->>>>68 byte !0x2e document
-!:mime application/vnd.sun.xml.writer
-!:ext sxw
->>>>68 string .template template
-!:mime application/vnd.sun.xml.writer.template
-!:ext stw
->>>>68 string .web Web template
-!:mime application/vnd.sun.xml.writer.web
-!:ext stw
->>>>68 string .global global document
-!:mime application/vnd.sun.xml.writer.global
-!:ext sxg
->>>62 string calc Calc
->>>>66 byte !0x2e spreadsheet
-!:mime application/vnd.sun.xml.calc
-!:ext sxc
->>>>66 string .template template
-!:mime application/vnd.sun.xml.calc.template
-!:ext stc
->>>62 string draw Draw
->>>>66 byte !0x2e document
-!:mime application/vnd.sun.xml.draw
-!:ext sxd
->>>>66 string .template template
-!:mime application/vnd.sun.xml.draw.template
-!:ext std
->>>62 string impress Impress
->>>>69 byte !0x2e presentation
-!:mime application/vnd.sun.xml.impress
-!:ext sxi
->>>>69 string .template template
-!:mime application/vnd.sun.xml.impress.template
-!:ext sti
->>>62 string math Math document
-!:mime application/vnd.sun.xml.math
-!:ext sxm
->>>62 string base Database file
-!:mime application/vnd.sun.xml.base
-!:ext sdb
-
-# URL: https://wiki.openoffice.org/wiki/Documentation/DevGuide/Extensions/File_Format
-# From: Joerg Jenderek
-# Note: only few OXT samples are detected here by mimetype member
-# is used by OpenOffice and LibreOffice and probably also NeoOffice
-# verified by `unzip -Zv *.oxt` or `7z l -slt *.oxt`
->>50 string vnd.openofficeorg. OpenOffice
->>>68 string extension \b/LibreOffice Extension
-# http://extension.nirsoft.net/oxt
-!:mime application/vnd.openofficeorg.extension
-# like: Gallery-Puzzle.2.1.0.1.oxt
-!:ext oxt
-
-# OpenDocument formats (for OpenOffice 2.x / StarOffice >= 8)
-# URL: http://fileformats.archiveteam.org/wiki/OpenDocument
-# https://lists.oasis-open.org/archives/office/200505/msg00006.html
-# (mimetype contains "application/vnd.oasis.opendocument.<SUBTYPE>")
->>50 string vnd.oasis.opendocument. OpenDocument
->>>73 string text
->>>>77 byte !0x2d Text
-!:mime application/vnd.oasis.opendocument.text
-!:ext odt
->>>>77 string -template Text Template
-!:mime application/vnd.oasis.opendocument.text-template
-!:ext ott
->>>>77 string -web HTML Document Template
-!:mime application/vnd.oasis.opendocument.text-web
-!:ext oth
->>>>77 string -master
->>>>>84 byte !0x2d Master Document
-!:mime application/vnd.oasis.opendocument.text-master
-!:ext odm
->>>>>84 string -template Master Template
-!:mime application/vnd.oasis.opendocument.text-master-template
-!:ext otm
->>>73 string graphics
->>>>81 byte !0x2d Drawing
-!:mime application/vnd.oasis.opendocument.graphics
-!:ext odg
->>>>81 string -template Drawing Template
-!:mime application/vnd.oasis.opendocument.graphics-template
-!:ext otg
->>>73 string presentation
->>>>85 byte !0x2d Presentation
-!:mime application/vnd.oasis.opendocument.presentation
-!:ext odp
->>>>85 string -template Presentation Template
-!:mime application/vnd.oasis.opendocument.presentation-template
-!:ext otp
->>>73 string spreadsheet
->>>>84 byte !0x2d Spreadsheet
-!:mime application/vnd.oasis.opendocument.spreadsheet
-!:ext ods
->>>>84 string -template Spreadsheet Template
-!:mime application/vnd.oasis.opendocument.spreadsheet-template
-!:ext ots
->>>73 string chart
->>>>78 byte !0x2d Chart
-!:mime application/vnd.oasis.opendocument.chart
-!:ext odc
->>>>78 string -template Chart Template
-!:mime application/vnd.oasis.opendocument.chart-template
-!:ext otc
->>>73 string formula
->>>>80 byte !0x2d Formula
-!:mime application/vnd.oasis.opendocument.formula
-!:ext odf
->>>>80 string -template Formula Template
-!:mime application/vnd.oasis.opendocument.formula-template
-!:ext otf
-# https://www.loc.gov/preservation/digital/formats/fdd/fdd000441.shtml
->>>73 string database Database
-!:mime application/vnd.oasis.opendocument.database
-!:ext odb
-# Valid for LibreOffice Base 6.0.1.1 at least
->>>73 string base Database
-# https://bugs.documentfoundation.org/show_bug.cgi?id=45854
-!:mime application/vnd.oasis.opendocument.base
-!:ext odb
->>>73 string image
->>>>78 byte !0x2d Image
-!:mime application/vnd.oasis.opendocument.image
-!:ext odi
->>>>78 string -template Image Template
-!:mime application/vnd.oasis.opendocument.image-template
-!:ext oti
-
-# EPUB (OEBPS) books using OCF (OEBPS Container Format)
-# https://www.idpf.org/ocf/ocf1.0/download/ocf10.htm, section 4.
-# From: Ralf Brown <ralf.brown@gmail.com>
->>50 string epub+zip EPUB document
-!:mime application/epub+zip
-
-# From: Hajin Jang <jb6804@naver.com>
-# hwpx (OWPML) document format follows OCF specification.
-# Hangul Word Processor 2010+ supports HWPX format.
-# URL: https://www.hancom.com/etc/hwpDownload.do
-# https://standard.go.kr/KSCI/standardIntro/getStandardSearchView.do?menuId=503&topMenuId=502&ksNo=KSX6101
-# https://e-ks.kr/streamdocs/view/sd;streamdocsId=72059197557727331
->>50 string hwp+zip Hancom HWP (Hangul Word Processor) file, HWPX
-!:mime application/x-hwp+zip
-!:ext hwpx
-
-# From: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/CorelDRAW
-# NOTE: version; til 2 WL-based; from 3 til 13 by ./riff; from 14 zip based
->>50 string x-vnd.corel. Corel
->>>62 string draw.document+zip Draw drawing, version 14-16
-!:mime application/x-vnd.corel.draw.document+zip
-!:ext cdr
->>>62 string draw.template+zip Draw template, version 14-16
-!:mime application/x-vnd.corel.draw.template+zip
-!:ext cdrt
->>>62 string zcf.draw.document+zip Draw drawing, version 17-22
-!:mime application/x-vnd.corel.zcf.draw.document+zip
-!:ext cdr
->>>62 string zcf.draw.template+zip Draw template, version 17-22
-!:mime application/x-vnd.corel.zcf.draw.template+zip
-!:ext cdt/cdrt
-# URL: http://product.corel.com/help/CorelDRAW/540240626/Main/EN/Doc/CorelDRAW-Other-file-formats.html
->>>62 string zcf.pattern+zip Draw pattern, version 22
-!:mime application/x-vnd.corel.zcf.pattern+zip
-!:ext pat
-# URL: https://en.wikipedia.org/wiki/Corel_Designer
-# Reference: http://fileformats.archiveteam.org/wiki/Corel_Designer
-# Note: called by TrID "Corel DESIGN graphics"
->>>62 string designer.document+zip DESIGNER graphics, version 14-16
-!:mime application/x-vnd.corel.designer.document+zip
-!:ext des
->>>62 string zcf.designer.document+zip DESIGNER graphics, version 17-21
-!:mime application/x-vnd.corel.zcf.designer.document+zip
-!:ext des
-# URL: http://product.corel.com/help/CorelDRAW/540223850/Main/EN/Documentation/
-# CorelDRAW-Corel-Symbol-Library-CSL.html
->>>62 string symbol.library+zip Symbol Library, version 6-16.3
-!:mime application/x-vnd.corel.symbol.library+zip
-!:ext csl
->>>62 string zcf.symbol.library+zip Symbol Library, version 17-22
-!:mime application/x-vnd.corel.zcf.symbol.library+zip
-!:ext csl
-
-# Catch other ZIP-with-mimetype formats
-# In a ZIP file, the bytes immediately after a member's contents are
-# always "PK". The 2 regex rules here print the "mimetype" member's
-# contents up to the first 'P'. Luckily, most MIME types don't contain
-# any capital 'P's. This is a kludge.
-# (mimetype contains "application/<OTHER>")
->>50 default x Zip data
->>>38 regex [!-OQ-~]+ (MIME type "%s"?)
-!:mime application/zip
-# (mimetype contents other than "application/*")
->26 string \x8\0\0\0mimetype
->>38 string !application/
->>>38 regex [!-OQ-~]+ Zip data (MIME type "%s"?)
-!:mime application/zip
-
-# Java Jar files (see also APK files above)
->(26.s+30) leshort 0xcafe Java archive data (JAR)
-!:mime application/java-archive
-!:ext jar
-
-# iOS App
->(26.s+30) leshort !0xcafe
->>26 string !\x8\0\0\0mimetype
->>>30 string Payload/
->>>>38 search/64 .app/ iOS App
-!:mime application/x-ios-app
-
-# Dup, see above.
-#>30 search/100/b application/epub+zip EPUB document
-#!:mime application/epub+zip
-
-# Generic zip archives (Greg Roelofs, c/o zip-bugs@wkuvx1.wku.edu)
-# Next line excludes specialized formats:
->(26.s+30) leshort !0xcafe
->>30 search/100/b !application/epub+zip
->>>26 string !\x8\0\0\0mimetype Zip archive data
-!:mime application/zip
->>>>4 beshort x \b, at least
->>>>4 use zipversion
->>>>4 beshort x to extract
->>>>8 beshort x \b, compression method=
->>>>8 use zipcompression
->>>>0x161 string WINZIP \b, WinZIP self-extracting
-
-# StarView Metafile
-# From Pierre Ducroquet <pinaraf@pinaraf.info>
-0 string VCLMTF StarView MetaFile
->6 beshort x \b, version %d
->8 belong x \b, size %d
-
-# Zoo archiver
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Zoo_(file_format)
-# http://fileformats.archiveteam.org/wiki/Zoo
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-zoo-strict.trid.xml
-# http://distcache.freebsd.org/ports-distfiles/zoo-2.10pl1.tar.gz/zoo.h
-# Note: called "ZOO compressed archive (strict)" by TrID and "ZOO Compressed Archive" by DROID via PUID x-fmt/269
-# verified by command like `deark -m zoo -l -d2 WHRCGA.ZOO`
-20 lelong 0xfdc4a7dc
-# skip DROID x-fmt-269-signature-id-621.zoo by looking for valid major version to manipulate archive
->32 byte >0 Zoo archive data
-!:mime application/x-zoo
-# bak is extension of backup-ed zoo
-!:ext zoo/bak
-# version in text form like: 1.50 2.00 2.10
->>4 byte >48 \b, v%c.
->>>6 byte >47 \b%c
->>>>7 byte >47 \b%c
-# ZOO files typically start with "ZOO ?.?? Archive.", followed by the bytes 0x1a 0x0 0x0; not used by Zoo and they may be anything
->>8 string !\040Archive.\032 \b, at 8
->>>8 string x text "%0.10s"
-# major_ver.minor_ver; minimum version needed to manipulate archive like: 1.0 2.0
->>32 byte >0 \b, modify: v%d
->>>33 byte x \b.%d+
-# major_ver.minor_ver; minimum version needed to extract after modify like in old versions
->>(24.l+28) ubyte x \b, extract: v%u
->>(24.l+29) ubyte x \b.%u+
-# with zoo 2.00 additional fields have been added in the archive header
->>32 byte >1
-# type; type of archive header like: 1 2
->>>34 ubyte !1 \b, header type %u
-# acmt_pos; position of archive comment like: 6258 30599 61369 149501
->>>35 lelong >0 \b, at %d
-# acmt_len; length of archive comment like: 258
->>>>39 uleshort x %u bytes comment
-#>>>>(35.l) ubequad x COMMENT=%16.16llx
-# 1st character of comment maybe is CarriageReturn (0x0d)
->>>>(35.l) ubyte <040
-# 2nd character of comment maybe is LineFeed (0x0a)
->>>>>(35.l+1) ubyte <040
-# comment string after CRLF like "Anonymous ftp site garbo.uwasa.fi 128.214.87.1 moderated by"
->>>>>>(35.l+2) string x %s
-# next character of remaining comment maybe is CarriageReturn (0x0d)
->>>>>>>&0 ubyte <040
->>>>>>>>&0 ubyte <040
-# 2nd comment part like: Timo Salmi ts@chyde.uwasa.fi PC directories and uploads\015\012Harri Valkama hv@chyde.uwasa.fi PC, Mac, Unix files, and upload
->>>>>>>>>&0 string >037 %s
-# vdata; archive-level versioning byte like: 1 3
->>>41 ubyte !1 \b, vdata %#x
-# zoo_start; pointer to 1st entry header
->>24 lelong x \b; at %u
-# zoo_minus; zoo_start -1 for consistency checking
-#>>28 lelong x \b, zoo_minus %#x
-# zoo_tag; tag for check
-#>>(24.l+0) ulelong !0xfdc4a7dc \b, zoo_tag=%8.8x
-# type; type of directory entry like: 1 2
->>(24.l+4) ubyte !2 type=%u
-# packing_method; 0~no packing 1~normal LZW 2~lzh
->>(24.l+5) ubyte x method=
->>>(24.l+5) ubyte 0 \bnot-compressed
->>>(24.l+5) ubyte 1 \blzd
->>>(24.l+5) ubyte 2 \blzh
-# next; position of next directory entry
->>(24.l+6) ulelong x \b, next entry at %u
-# offset; position of file data for this entry
-#>>(24.l+10) ulelong x \b, data at %u
-# file_crc; CRC-16 of file data
->>(24.l+18) uleshort x \b, CRC %#4.4x
-# comment; zero if none or points to entry comment like ADD9h (WHRCGA.ZOO)
->>(24.l+32) lelong >0 \b, at %#x
-# cmt_size; if not 0 for none then length of entry comment like: 46
->>>(24.l+36) uleshort >0 %u bytes comment
-# entry comment itself like: "CGA .GL file showing menu input from keyboard"
->>>>(&-6.l) string x "%s"
-# org_size; original size of file
->>(24.l+20) ulelong x \b, size %u
-# size_now; compressed size of file
->>(24.l+24) ulelong x (%u compressed)
-# major_ver.minor_ver; minimum version needed to extract already done
-# deleted; will be 1 if deleted, 0 if not
->>(24.l+30) ubyte =1 \b, deleted
-# struc; file structure if any; WHAT IS THAT?
->>(24.l+31) ubyte !0 \b, structured
-# fname[13]; short/DOS file name like 12345678.012
->>(24.l+38) string x \b, %0.13s
-# for directory entry type 2 with variable part
->>(24.l+4) ubyte =2
-# var_dir_len; length of variable part of dir entry
->>>(24.l+51) uleshort >0
-#>>>(24.l+51) uleshort >0 \b, variable part length %u
-# namlen; length of long filename
-#>>>>(24.l+56) ubyte x \b, namlen %u
-# dirlen; length of directory name
-#>>>>(24.l+57) ubyte x \b, dirlen %u
-# if file length positive then show long file name
->>>>(24.l+56) ubyte >0
-# lfname[256]; long file name \0-terminated
->>>>>(24.l+58) string x "%s"
-# if directory length positive then jump before file name field and then jump this addtional length plus 2 (\0-terminator + dirlen field) to following directory name
->>>>(24.l+57) ubyte >0
->>>>>(24.l+55) ubyte x
-# dirname[256]; directory name \0-terminated
->>>>>>&(&0.b+2) string x in "%s"
-# dir_crc; CRC of directory entry
-#>>>(24.l+54) uleshort x \b, entry CRC %#4.4x
-# tz; timezone where file was archived; 7Fh~unknown 4~1.00hoursWestOfUTC 12 16 20~5.00hoursWestOfUTC -107~26.75hoursEastOfUTC -4~1.00hoursEastOfUTC
->>>(24.l+53) byte !0x7f \b, time zone %d/4
-# date; last mod file date in DOS format
->>>(24.l+14) lemsdosdate x \b, modified %s
-# time; last mod file time in DOS format
->>>(24.l+16) lemsdostime x %s
-
-# Shell archives
-10 string #\ This\ is\ a\ shell\ archive shell archive text
-!:mime application/octet-stream
-
-#
-# LBR. NB: May conflict with the questionable
-# "binary Computer Graphics Metafile" format.
-#
-0 string \0\ \ \ \ \ \ \ \ \ \ \ \0\0 LBR archive data
-#
-# PMA (CP/M derivative of LHA)
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/LHA_(file_format)
-#
-#2 string -pm0- PMarc archive data [pm0]
-2 string -pm0-
->0 use lharc-file
-#2 string -pm1- PMarc archive data [pm1]
-2 string -pm1-
->0 use lharc-file
-#2 string -pm2- PMarc archive data [pm2]
-2 string -pm2-
->0 use lharc-file
-2 string -pms- PMarc SFX archive (CP/M, DOS)
-#!:mime application/x-foobar-exec
-!:ext com
-5 string -pc1- PopCom compressed executable (CP/M)
-#!:mime application/x-
-#!:ext com
-
-# From Rafael Laboissiere <rafael@laboissiere.net>
-# The Project Revision Control System (see
-# http://prcs.sourceforge.net) generates a packaged project
-# file which is recognized by the following entry:
-0 leshort 0xeb81 PRCS packaged project
-
-# Microsoft cabinets
-# by David Necas (Yeti) <yeti@physics.muni.cz>
-#0 string MSCF\0\0\0\0 Microsoft cabinet file data,
-#>25 byte x v%d
-#>24 byte x \b.%d
-# MPi: All CABs have version 1.3, so this is pointless.
-# Better magic in debian-additions.
-
-# GTKtalog catalogs
-# by David Necas (Yeti) <yeti@physics.muni.cz>
-4 string gtktalog\ GTKtalog catalog data,
->13 string 3 version 3
->>14 beshort 0x677a (gzipped)
->>14 beshort !0x677a (not gzipped)
->13 string >3 version %s
-
-############################################################################
-# Parity archive reconstruction file, the 'par' file format now used on Usenet.
-0 string PAR\0 PARity archive data
->48 leshort =0 - Index file
->48 leshort >0 - file number %d
-
-# Felix von Leitner <felix-file@fefe.de>
-0 string d8:announce BitTorrent file
-!:mime application/x-bittorrent
-!:ext torrent
-# Durval Menezes, <jmgthbfile at durval dot com>
-0 string d13:announce-list BitTorrent file
-!:mime application/x-bittorrent
-!:ext torrent
-0 string d7:comment BitTorrent file
-!:mime application/x-bittorrent
-!:ext torrent
-0 string d4:info BitTorrent file
-!:mime application/x-bittorrent
-!:ext torrent
-
-# Atari MSA archive - Teemu Hukkanen <tjhukkan@iki.fi>
-# URL: http://fileformats.archiveteam.org/wiki/MSA_(Magic_Shadow_Archiver)
-# Reference: http://info-coach.fr/atari/documents/_mydoc/FD_Image_File_Format.pdf
-# http://mark0.net/download/triddefs_xml.7z/defs/m/msa.trid.xml
-# Update: Joerg Jenderek
-# Note: called by TrID "Atari MSA Disk Image" and verified by
-# command like `deark -l -m msa -d2 PDATS578.msa` as " Atari ST floppy disk image"
-# GRR: line below is too general as it matches setup.skin
-0 beshort 0x0e0f
-# skip foo setup.skin with unrealistic high number 52255 of sides by check for valid "low" value
->4 ubeshort <2 Atari MSA archive data
-#!:mime application/octet-stream
-!:mime application/x-atari-msa
-!:ext msa
-# sectors per track like: 9 10
->>2 beshort x \b, %d sectors per track
-# sides (0 or 1; add 1 to this to get correct number of sides)
->>4 beshort 0 \b, 1 sided
->>4 beshort 1 \b, 2 sided
-# starting track like: 0
->>6 beshort x \b, starting track: %d
-# ending track like: 39 79 80 81
->>8 beshort x \b, ending track: %d
-# tracks content
-#>>10 ubequad x \b, track content %#16.16llx
-
-# Alternate ZIP string (amc@arwen.cs.berkeley.edu)
-0 string PK00PK\003\004 Zip archive data
-!:mime application/zip
-!:ext zip/cbz
-
-# Recognize ZIP archives with prepended data by end-of-central-directory record
-# https://en.wikipedia.org/wiki/ZIP_(file_format)#End_of_central_directory_record_(EOCD)
-# by Michal Gorny <mgorny@gentoo.org>
--2 uleshort 0
->&-22 string PK\005\006
-# without #!
->>0 string !#! Zip archive, with extra data prepended
-!:mime application/zip
-!:ext zip/cbz
-# with #!
->>0 string/w #!\ a
->>>&-1 string/T x %s script executable (Zip archive)
-
-# ACE archive (from http://www.wotsit.org/download.asp?f=ace)
-# by Stefan `Sec` Zehl <sec@42.org>
-7 string **ACE** ACE archive data
-!:mime application/x-ace-compressed
-!:ext ace
->15 byte >0 version %d
->16 byte =0x00 \b, from MS-DOS
->16 byte =0x01 \b, from OS/2
->16 byte =0x02 \b, from Win/32
->16 byte =0x03 \b, from Unix
->16 byte =0x04 \b, from MacOS
->16 byte =0x05 \b, from WinNT
->16 byte =0x06 \b, from Primos
->16 byte =0x07 \b, from AppleGS
->16 byte =0x08 \b, from Atari
->16 byte =0x09 \b, from Vax/VMS
->16 byte =0x0A \b, from Amiga
->16 byte =0x0B \b, from Next
->14 byte x \b, version %d to extract
->5 leshort &0x0080 \b, multiple volumes,
->>17 byte x \b (part %d),
->5 leshort &0x0002 \b, contains comment
->5 leshort &0x0200 \b, sfx
->5 leshort &0x0400 \b, small dictionary
->5 leshort &0x0800 \b, multi-volume
->5 leshort &0x1000 \b, contains AV-String
->>30 string \x16*UNREGISTERED\x20VERSION* (unregistered)
->5 leshort &0x2000 \b, with recovery record
->5 leshort &0x4000 \b, locked
->5 leshort &0x8000 \b, solid
-# Date in MS-DOS format (whatever that is)
-#>18 lelong x Created on
-
-# sfArk : compression program for Soundfonts (sf2) by Dirk Jagdmann
-# <doj@cubic.org>
-0x1A string sfArk sfArk compressed Soundfont
->0x15 string 2
->>0x1 string >\0 Version %s
->>0x2A string >\0 : %s
-
-# DR-DOS 7.03 Packed File *.??_
-# Reference: http://www.antonis.de/dos/dos-tuts/mpdostip/html/nwdostip.htm
-# Note: unpacked by PNUNPACK.EXE
-0 string Packed\ File\
-# by looking for Control-Z skip ASCII text starting with Packed File
->0x18 ubyte 0x1a Personal NetWare Packed File
-!:mime application/x-novell-compress
-!:ext ??_
->>12 string x \b, was "%.12s"
-# 1 or 2
-#>>0x19 ubyte x \b, at 0x19 %u
->>0x1b ulelong x with %u bytes
-
-# EET archive
-# From: Tilman Sauerbeck <tilman@code-monkey.de>
-0 belong 0x1ee7ff00 EET archive
-!:mime application/x-eet
-
-# rzip archives
-0 string RZIP rzip compressed data
->4 byte x - version %d
->5 byte x \b.%d
->6 belong x (%d bytes)
-
-# From: Joerg Jenderek
-# URL: https://help.foxitsoftware.com/kb/install-fzip-file.php
-# reference: http://mark0.net/download/triddefs_xml.7z/
-# defs/f/fzip.trid.xml
-# Note: unknown compression; No "PK" zip magic; normally in directory like
-# "%APPDATA%\Foxit Software\Addon\Foxit Reader\Install"
-0 ubequad 0x2506781901010000 Foxit add-on/update
-!:mime application/x-fzip
-!:ext fzip
-
-# From: "Robert Dale" <robdale@gmail.com>
-0 belong 123 dar archive,
->4 belong x label "%.8x
->>8 belong x %.8x
->>>12 beshort x %.4x"
->14 byte 0x54 end slice
->14 beshort 0x4e4e multi-part
->14 beshort 0x4e53 multi-part, with -S
-
-# Symbian installation files
-# https://www.thouky.co.uk/software/psifs/sis.html
-# http://developer.symbian.com/main/downloads/papers/SymbianOSv91/softwareinstallsis.pdf
-8 lelong 0x10000419 Symbian installation file
-!:mime application/vnd.symbian.install
->4 lelong 0x1000006D (EPOC release 3/4/5)
->4 lelong 0x10003A12 (EPOC release 6)
-0 lelong 0x10201A7A Symbian installation file (Symbian OS 9.x)
-!:mime x-epoc/x-sisx-app
-
-# From "Nelson A. de Oliveira" <naoliv@gmail.com>
-0 string MPQ\032 MoPaQ (MPQ) archive
-
-# From: "Nelson A. de Oliveira" <naoliv@gmail.com>
-# .kgb
-0 string KGB_arch KGB Archiver file
->10 string x with compression level %.1s
-
-# xar (eXtensible ARchiver) archive
-# URL: https://en.wikipedia.org/wiki/Xar_(archiver)
-# xar archive format: https://code.google.com/p/xar/
-# From: "David Remahl" <dremahl@apple.com>
-# Update: Joerg Jenderek
-# TODO: lzma compression; X509Data for pkg and xip
-# Note: verified by `xar --dump-header -f FullBundleUpdate.xar` or
-# 7z t -txar Xcode_10.2_beta_4.xip`
-0 string xar! xar archive
-!:mime application/x-xar
-# pkg for Mac OSX installer package like FullBundleUpdate.pkg
-# xip for signed Apple software like Xcode_10.2_beta_4.xip
-!:ext xar/pkg/xip
-# always 28 in older archives
->4 ubeshort >28 \b, header size %u
-# currently there exit only version 1 since about 2014
->6 ubeshort >1 version %u,
->8 ubequad x compressed TOC: %llu,
-#>16 ubequad x uncompressed TOC: %llu,
-# cksum_alg 0-2 in older and also 3-4 in newer
->24 belong 0 no checksum
->24 belong 1 SHA-1 checksum
->24 belong 2 MD5 checksum
->24 belong 3 SHA-256 checksum
->24 belong 4 SHA-512 checksum
->24 belong >4 unknown %#x checksum
-#>24 belong >4 checksum
-# For no compression jump 0 bytes
->24 belong 0
->>0 ubyte x
-# jump more bytes forward by header size
->>>&(4.S) ubyte x
-# jump more bytes forward by compressed table of contents size
-#>>>>&(8.Q) ubequad x \b, heap data %#llx
->>>>&(8.Q) ubyte x
-# look for data by ./compress after message with 1 space at end
->>>>>&-3 indirect x \b, contains
-# For SHA-1 jump 20 minus 2 bytes
->24 belong 1
->>18 ubyte x
-# jump more bytes forward by header size
->>>&(4.S) ubyte x
-# jump more bytes forward by compressed table of contents size
->>>>&(8.Q) ubyte x
-# data compressed by gzip, bzip, lzma or none
->>>>>&-1 indirect x \b, contains
-# For SHA-256 jump 32 minus 2 bytes
->24 belong 3
->>30 ubyte x
-# jump more bytes forward by header size
->>>&(4.S) ubyte x
-# jump more bytes forward by compressed table of contents size
->>>>&(8.Q) ubyte x
->>>>>&-1 indirect x \b, contains
-# For SHA-512 jump 64 minus 2 bytes
->24 belong 4
->>62 ubyte x
-# jump more bytes forward by header size
->>>&(4.S) ubyte x
-# jump more bytes forward by compressed table of contents size
->>>>&(8.Q) ubyte x
->>>>>&-1 indirect x \b, contains
-
-# Type: Parity Archive
-# From: Daniel van Eeden <daniel_e@dds.nl>
-0 string PAR2 Parity Archive Volume Set
-
-# Bacula volume format. (Volumes always start with a block header.)
-# URL: https://bacula.org/3.0.x-manuals/en/developers/developers/Block_Header.html
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-12 string BB02 Bacula volume
->20 bedate x \b, started %s
-
-# ePub is XHTML + XML inside a ZIP archive. The first member of the
-# archive must be an uncompressed file called 'mimetype' with contents
-# 'application/epub+zip'
-
-
-# From: "Michael Gorny" <mgorny@gentoo.org>
-# ZPAQ: http://mattmahoney.net/dc/zpaq.html
-0 string zPQ ZPAQ stream
->3 byte x \b, level %d
-# From: Barry Carter <carter.barry@gmail.com>
-# https://encode.ru/threads/456-zpaq-updates/page32
-0 string 7kSt ZPAQ file
-
-# BBeB ebook, unencrypted (LRF format)
-# URL: https://www.sven.de/librie/Librie/LrfFormat
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-0 string L\0R\0F\0\0\0 BBeB ebook data, unencrypted
->8 beshort x \b, version %d
->36 byte 1 \b, front-to-back
->36 byte 16 \b, back-to-front
->42 beshort x \b, (%dx,
->44 beshort x %d)
-
-# Symantec GHOST image by Joerg Jenderek at May 2014
-# https://us.norton.com/ghost/
-# https://www.garykessler.net/library/file_sigs.html
-0 ubelong&0xFFFFf7f0 0xFEEF0100 Norton GHost image
-# *.GHO
->2 ubyte&0x08 0x00 \b, first file
-# *.GHS or *.[0-9] with cns program option
->2 ubyte&0x08 0x08 \b, split file
-# part of split index interesting for *.ghs
->>4 ubyte x id=%#x
-# compression tag minus one equals numeric compression command line switch z[1-9]
->3 ubyte 0 \b, no compression
->3 ubyte 2 \b, fast compression (Z1)
->3 ubyte 3 \b, medium compression (Z2)
->3 ubyte >3
->>3 ubyte <11 \b, compression (Z%d-1)
->2 ubyte&0x08 0x00
-# ~ 30 byte password field only for *.gho
->>12 ubequad !0 \b, password protected
->>44 ubyte !1
-# 1~Image All, sector-by-sector only for *.gho
->>>10 ubyte 1 \b, sector copy
-# 1~Image Boot track only for *.gho
->>>43 ubyte 1 \b, boot track
-# 1~Image Disc only for *.gho implies Image Boot track and sector copy
->>44 ubyte 1 \b, disc sector copy
-# optional image description only *.gho
->>0xff string >\0 "%-.254s"
-# look for DOS sector end sequence
->0xE08 search/7776 \x55\xAA
->>&-512 indirect x \b; contains
-
-# Google Chrome extensions
-# https://developer.chrome.com/extensions/crx
-# https://developer.chrome.com/extensions/hosting
-0 string Cr24 Google Chrome extension
-!:mime application/x-chrome-extension
->4 ulong x \b, version %u
-
-# SeqBox - Sequenced container
-# ext: sbx, seqbox
-# Marco Pontello marcopon@gmail.com
-# reference: https://github.com/MarcoPon/SeqBox
-0 string SBx SeqBox,
->3 byte x version %d
-
-# LyNX archive
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Lynx_archive
-# Reference: http://ist.uwaterloo.ca/~schepers/formats/LNX.TXT
-# http://mark0.net/download/triddefs_xml.7z/defs/a/ark-lnx.trid.xml
-# Note: called "Lynx archive" by TrID and "Commodore C64 BASIC program" with "POKE 53280" by ./c64
-# TODO: merge and unify with Commodore C64 BASIC program
-56 string USE\040LYNX\040TO\040DISSOLVE\040THIS\040FILE LyNX archive
-# display "Lynx archive" (strength=330) before Commodore C64 BASIC program (strength=50) handled by ./c64
-#!:strength +0
-#!:mime application/octet-stream
-!:mime application/x-commodore-lnx
-!:ext lnx
-# afterwards look for BASIC tokenized GOTO (89h) 10, line terminator \0, end of programm tag \0\0 and CarriageReturn
->86 search/10 \x8910\0\0\0\r \b,
-# for DEBUGGING
-#>>&0 string x STRING="%s"
-# number in ASCII of directory blocks with spaces on both sides like: 1 2 3 5
->>&0 regex [0-9]{1,5} %s directory blocks
-# signature like: "*LYNX XII BY WILL CORLEY" " LYNX IX BY WILL CORLEY" "*LYNX BY CBMCONVERT 2.0*"
->>>&2 regex [^\r]{1,24} \b, signature "%s"
-# number of files in ASCII surrounded by spaces and delimited by CR like: 2 3 6 13 69 144 (maximum?)
->>>>&1 regex [0-9]{1,3} \b, %s files
-
-# From: Joerg Jenderek
-# URL: https://www.acronis.com/
-# Reference: https://en.wikipedia.org/wiki/TIB_(file_format)
-# Note: only tested with True Image 2013 Build 5962 and 2019 Build 14110
-0 ubequad 0xce24b9a220000000 Acronis True Image backup
-!:mime application/x-acronis-tib
-!:ext tib
-# 01000000
-#>20 ubelong x \b, at 20 %#x
-# 20000000
-#>28 ubelong x \b, at 28 %#x
-# strings like "Generic- SD/MMC 1.00" "Unknown Disk" "Msft Virtual Disk 1.0"
-# ???
-# strings like "\Device\0000011e" "\Device\0000015a"
-#>0 search/0x6852300/cs \\Device\\
-#>>&-1 pstring x \b, %s
-# "\Device\HarddiskVolume30" "\Device\HarddiskVolume39"
-#>>>&1 search/180/cs \\Device\\
-#>>>>&-1 pstring x \b, %s
-#>>>>>&0 search/29/cs \0\0\xc8\0
-# disk label
-#>>>>>>&10 lestring16 x \b, disk label %11.11s
-#>>>>>>&9 plestring16 x \b, disk label "%11.11s"
-#>>>>>>&10 ubequad x %16.16llx
-
-
-# Gentoo XPAK binary package
-# by Michal Gorny <mgorny@gentoo.org>
-# https://gitweb.gentoo.org/proj/portage.git/tree/man/xpak.5
--4 string STOP
->-16 string XPAKSTOP Gentoo binary package (XPAK)
-!:mime application/vnd.gentoo.xpak
-
-# From: Joerg Jenderek
-# URL: https://kodi.wiki/view/TexturePacker
-# Reference: https://mirrors.kodi.tv/releases/source/17.3-Krypton.tar.gz
-# /xbmc-Krypton/xbmc/guilib/XBTF.h
-# /xbmc-Krypton/xbmc/guilib/XBTF.cpp
-0 string XBTF
-# skip ASCII text by looking for terminating \0 of path
->264 ubyte 0 XBMC texture package
-!:mime application/x-xbmc-xbt
-!:ext xbt
-# XBTF_VERSION 2
->>4 string !2 \b, version %-.1s
-# nofFiles /xbmc-Krypton/xbmc/guilib/XBTFReader.cpp
->>5 ulelong x \b, %u file
-# plural s
->>5 ulelong >1 \bs
-# path[CXBTFFile[MaximumPathLength=256]
->>9 string x \b, 1st %s
-
-# ALZIP archive
-# by Hyungjun Park <hyungjun.park@worksmobile.com>, Hajin Jang <hajin_jang@worksmobile.com>
-# http://kippler.com/win/unalz/
-# https://salsa.debian.org/l10n-korean-team/unalz
-0 string ALZ\001 ALZ archive data
-!:ext alz
-
-# https://cf-aldn.altools.co.kr/setup/EGG_Specification.zip
-0 string EGGA EGG archive data,
-!:ext egg
->5 byte x version %u
->4 byte x \b.%u
->>0x0E ulelong =0x08E28222
->>0x0E ulelong =0x24F5A262 \b, split
->>0x0E ulelong =0x24E5A060 \b, solid
->>0x0E default x \b, unknown
-
-# PAQ9A archive
-# URL: http://mattmahoney.net/dc/#paq9a
-# Note: Line 1186 of paq9a.cpp gives the magic bytes
-0 string pQ9\001 PAQ9A archive
-
-# From wof (wof@stachelkaktus.net)
-0 string Unison\ archive\ format Unison archive format
-
-# https://ankiweb.net
-30 string collection.anki2 Anki APKG file
-#!:ext .apkg
-
-# Synology archive (DiskStation Manager 7.0+)
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# Note: These archives are signed and encrypted.
-0 ulelong&0xFFFFFF00 0xEFBEAD00
-# MessagePack header (fixarray of 5 elements starting with a bin of 32 bytes)
->8 ulelong&0x00FFFFFF 0x20C495 Synology archive
-!:ext spk
-# Extract some properties from MessagePack third item
->>43 search/0x10000 package=
->>>&0 string x \b, package %s
->>43 search/0x10000 arch=
->>>&0 string x %s
->>43 search/0x10000 version=
->>>&0 string x %s
->>43 search/0x10000 create_time=
->>>&0 string x \b, created on %s
-
-# MonoGame/XNA processed assets archive
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/MonoGame/MonoGame/blob/v3.8.1/MonoGame.Framework/Content/ContentManager.cs
-0 string XNB
-# XNB must be version 4 or 5
->4 byte <6
->>4 byte >3
-# Size must be positive
->>>6 lelong >0 MonoGame/XNA processed assets
-!:ext xnb
->>>>3 string =w \b, for Windows
->>>>3 string =x \b, for Xbox360
->>>>3 string =i \b, for iOS
->>>>3 string =a \b, for Android
->>>>3 string =d \b, for DesktopGL
->>>>3 string =X \b, for MacOSX
->>>>3 string =W \b, for WindowsStoreApp
->>>>3 string =n \b, for NativeClient
->>>>3 string =M \b, for WindowsPhone8
->>>>3 string =r \b, for RaspberryPi
->>>>3 string =P \b, for PlayStation4
->>>>3 string =5 \b, for PlayStation5
->>>>3 string =O \b, for XboxOne
->>>>3 string =S \b, for Nintendo Switch
->>>>3 string =G \b, for Google Stadia
->>>>3 string =b \b, for WebAssembly and Bridge.NET
->>>>3 string =m \b, for WindowsPhone7.0 (XNA)
->>>>3 string =p \b, for PlayStationMobile
->>>>3 string =v \b, for PSVita
->>>>3 string =g \b, for Windows (OpenGL)
->>>>3 string =l \b, for Linux
->>>>4 byte x \b, version %d
->>>>5 byte &0x80 \b, LZX compressed
->>>>>10 lelong x \b, decompressed size: %d bytes
->>>>5 byte &0x40 \b, LZ4 compressed
->>>>>10 lelong x \b, decompressed size: %d bytes
-
-# Electron ASAR archive
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/electron/asar
-0 ulelong 4
-# Match JSON header start and end
->16 string {"files":{"
->>(12.l+12) string }}}} Electron ASAR archive
-!:ext asar
->>>12 ulelong x \b, header length: %d bytes
diff --git a/contrib/libs/libmagic/magic/Magdir/aria b/contrib/libs/libmagic/magic/Magdir/aria
deleted file mode 100644
index c3a6bf57e4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/aria
+++ /dev/null
@@ -1,38 +0,0 @@
-
-#------------------------------------------------------------------------------
-# URL: https://de.wikipedia.org/wiki/Aria_(Software)
-# Reference: https://github.com/aria2/aria2/blob/master/doc/manual-src/en/technical-notes.rst
-# From: Joerg Jenderek
-# Note: only version 1 suited
-# check for valid version one
-0 beshort 0x0001
-# skip most uncompressed DEGAS med-res bitmap *.PI2 and GEM bitmap (v1) *.IMG
-# by test for valid infoHashCheck extension
->2 ubelong&0xffFFffFE 0x00000000
-# skip DEGAS med-res bitmap DIAGRAM1.PI2 by test for valid length of download
->>(6.L+14) ubequad >0
->>>0 use aria
-0 name aria
-# version; (0x0000) or (0x0001); for 0 all multi-byte are in host byte order. For 1 big endian
->0 beshort x aria2 control file, version %u
-#!:mime application/octet-stream
-!:mime application/x-aria
-!:ext aria2
-# EXTension; if EXT[3]&1 == 1 checks whether saved InfoHash and current downloading the same; infoHashCheck extension
->2 ubelong !0 \b, infoHashCheck %#x
-# info hash length like: 0 14h
->6 ubelong !0 \b, %#x bytes info hash
-# info hash; BitTorrent InfoHash
->>10 ubequad x %#16.16llx...
-# piece length; the length of the piece like: 400h 100000h
->(6.L+10) ubelong x \b, piece length 0x%x
-# total length; the total length of the download
->(6.L+14) ubequad x \b, total length %llu
-#>(6.L+14) ubequad x \b, total length %#llx
-# upload length; the uploaded length of download like: 0 400h
->(6.L+22) ubequad !0 \b, upload length %#llx
-# bitfield length; the length of bitfield like: 4 6 Ah 10h 13h 167h
->(6.L+30) ubelong x \b, %#x bytes bitfield
-# bitfield; bitfield which represents current download progress
->(6.L+34) ubequad !0 %#llx...
-
diff --git a/contrib/libs/libmagic/magic/Magdir/arm b/contrib/libs/libmagic/magic/Magdir/arm
deleted file mode 100644
index c514320354..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/arm
+++ /dev/null
@@ -1,50 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: arm,v 1.3 2022/10/31 14:35:39 christos Exp $
-# arm: file(1) magic for ARM COFF
-#
-# https://docs.microsoft.com/en-us/windows/win32/debug/pe-format
-
-# Aarch64
-0 leshort 0xaa64
-# test for unused flag bits in f_flags
->18 uleshort&0x8E80 0
-# use little endian variant of subroutine to
-# display name+variables+flags for common object formatted files
->>0 use display-coff
-!:strength -10
-
-# ARM
-0 leshort 0x01c0
-# test for unused flag bits in f_flags
->18 uleshort&0x8E80 0
-# use little endian variant of subroutine to
-# display name+variables+flags for common object formatted files
->>0 use display-coff
-!:strength -10
-
-# ARM Thumb
-0 leshort 0x01c2
-# test for unused flag bits in f_flags
->18 uleshort&0x8E80 0
-# use little endian variant of subroutine to
-# display name+variables+flags for common object formatted files
->>0 use display-coff
-!:strength -10
-
-# ARMv7 Thumb
-0 leshort 0x01c4
-# test for unused flag bits in f_flags
->18 uleshort&0x8E80 0
-# use little endian variant of subroutine to
-# display name+variables+flags for common object formatted files
->>0 use display-coff
-!:strength -10
-
-# ARM64EC
-0 leshort 0xa641
-# test for unused flag bits in f_flags
->18 uleshort&0x8E80 0
-# use little endian variant of subroutine to
-# display name+variables+flags for common object formatted files
->>0 use display-coff
-!:strength -10
diff --git a/contrib/libs/libmagic/magic/Magdir/asf b/contrib/libs/libmagic/magic/Magdir/asf
deleted file mode 100644
index 744a0afc2c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/asf
+++ /dev/null
@@ -1,132 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: asf,v 1.4 2022/10/31 13:22:26 christos Exp $
-# asf: file(1) magic for Microsoft Advanced Systems Format (ASF) files
-# http://www.staroceans.org/e-book/ASF_Specification.pdf
-
-0 name asf-name
-# ASF_Data_Object
-#>0 guid 75B22636-668E-11CF-A6D9-00AA0062CE6C
-#>16 lequad >0
-#>>(16.q) use asf-object
-# ASF_Simple_Index_Object
->0 guid 33000890-E5B1-11CF-89F4-00A0C90349CB
->0 guid D6E229D3-35DA-11D1-9034-00A0C90349BE ASF_Index_Object
->0 guid FEB103F8-12AD-4C64-840F-2A1D2F7AD48C ASF_Media_Object_Index_Object
->0 guid 3CB73FD0-0C4A-4803-953D-EDF7B6228F0C ASF_Timecode_Index_Object
-
-# ASF_File_Properties_Object
->0 guid 8CABDCA1-A947-11CF-8EE4-00C00C205365
-
-# ASF_Stream_Properties_Object
->0 guid B7DC0791-A9B7-11CF-8EE6-00C00C205365
-#>>56 lequad x Time Offset %lld
-#>>64 lelong x Type-Specific Data Length %d
-#>>68 lelong x Error Correction Data Length %d
-#>>72 leshort x Flags %#x
-#>>74 lelong x Reserved %x
-# ASF_Audio_Media
->>24 guid F8699E40-5B4D-11CF-A8FD-00805F5C442B \b, Audio Media (
->>>78 leshort x \bCodec Id %d
->>>80 leshort x \b, Number of channels %d
->>>82 lelong x \b, Samples Per Second %d
->>>86 lelong x \b, Average Number of Bytes Per Second %d
->>>90 lelong x \b, Block Alignment %d
->>>94 leshort x \b, Bits Per Sample %d
-# ASF_Video_Media
->>24 guid BC19EFC0-5B4D-11CF-A8FD-00805F5C442B \b, Video Media (
->>>78 lelong x \bEncoded Image Width %d
->>>82 lelong x \b, Encoded Image Height %d
-#>>>85 leshort x \b, Format Data Size %x
->>>93 lelong x \b, Image Width %d
->>>97 lelong x \b, Image Height %d
-#>>>101 leshort x \b, Reserved %#x
->>>103 leshort x \b, Bits Per Pixel Count %d
-#>>>105 lelong x \b, Compression ID %d
-#>>>109 lelong x \b, Image Size %d
-#>>>113 lelong x \b, Horizontal Pixels Per Meter %d
-#>>>117 lelong x \b, Vertical Pixels Per Meter %d
-#>>>121 lelong x \b, Colors Used Count %d
-#>>>125 lelong x \b, Important Colors Count %d
->>0 lelong x \b, Error correction type
->>40 use asf-name
->>0 lelong x \b)
-#ASF_Header_Extension_Object
->0 guid 5FBF03B5-A92E-11CF-8EE3-00C00C205365
-# ASF_Codec_List_Object
->0 guid 86D15240-311D-11D0-A3A4-00A0C90348F6
->0 guid 1EFB1A30-0B62-11D0-A39B-00A0C90348F6 ASF_Script_Command_Object
->0 guid F487CD01-A951-11CF-8EE6-00C00C205365 ASF_Marker_Object
->0 guid D6E229DC-35DA-11D1-9034-00A0C90349BE ASF_Bitrate_Mutual_Exclusion_Object
->0 guid 75B22635-668E-11CF-A6D9-00AA0062CE6C ASF_Error_Correction_Object
-# ASF_Content_Description_Object
->0 guid 75B22633-668E-11CF-A6D9-00AA0062CE6C
-#>>24 leshort title length %d
-#>>26 leshort author length %d
-#>>28 leshort copyright length %d
-#>>30 leshort descriptor length %d
-#>>32 leshort rating length %d
->0 guid D2D0A440-E307-11D2-97F0-00A0C95EA850 ASF_Extended_Content_Description_Object
->0 guid 2211B3FA-BD23-11D2-B4B7-00A0C955FC6E ASF_Content_Branding_Object
->0 guid 7BF875CE-468D-11D1-8D82-006097C9A2B2 ASF_Stream_Bitrate_Properties_Object
->0 guid 2211B3FB-BD23-11D2-B4B7-00A0C955FC6E ASF_Content_Encryption_Object
->0 guid 298AE614-2622-4C17-B935-DAE07EE9289C ASF_Extended_Content_Encryption_Object
->0 guid 2211B3FC-BD23-11D2-B4B7-00A0C955FC6E ASF_Digital_Signature_Object
-# ASF_Padding_Object
->0 guid 1806D474-CADF-4509-A4BA-9AABCB96AAE8
->0 guid 14E6A5CB-C672-4332-8399-A96952065B5A ASF_Extended_Stream_Properties_Object
->0 guid A08649CF-4775-4670-8A16-6E35357566CD ASF_Advanced_Mutual_Exclusion_Object
->0 guid D1465A40-5A79-4338-B71B-E36B8FD6C249 ASF_Group_Mutual_Exclusion_Object
->0 guid D4FED15B-88D3-454F-81F0-ED5C45999E24 ASF_Stream_Prioritization_Object
->0 guid A69609E6-517B-11D2-B6AF-00C04FD908E9 ASF_Bandwidth_Sharing_Object
->0 guid 7C4346A9-EFE0-4BFC-B229-393EDE415C85 ASF_Language_List_Object
->0 guid C5F8CBEA-5BAF-4877-8467-AA8C44FA4CCA ASF_Metadata_Object
->0 guid 44231C94-9498-49D1-A141-1D134E457054 ASF_Metadata_Library_Object
->0 guid D6E229DF-35DA-11D1-9034-00A0C90349BE ASF_Index_Parameters_Object
->0 guid 6B203BAD-3F11-48E4-ACA8-D7613DE2CFA7 ASF_Media_Object_Index_Parameters_Object
->0 guid F55E496D-9797-4B5D-8C8B-604DFE9BFB24 ASF_Timecode_Index_Parameters_Object
->0 guid 26F18B5D-4584-47EC-9F5F-0E651F0452C9 ASF_Compatibility_Object
->0 guid 43058533-6981-49E6-9B74-AD12CB86D58C ASF_Advanced_Content_Encryption_Object
->0 guid 59DACFC0-59E6-11D0-A3AC-00A0C90348F6 ASF_Command_Media
->0 guid B61BE100-5B4E-11CF-A8FD-00805F5C442B ASF_JFIF_Media
->0 guid 35907DE0-E415-11CF-A917-00805F5C442B ASF_Degradable_JPEG_Media
->0 guid 91BD222C-F21C-497A-8B6D-5AA86BFC0185 ASF_File_Transfer_Media
->0 guid 3AFB65E2-47EF-40F2-AC2C-70A90D71D343 ASF_Binary_Media
->0 guid 776257D4-C627-41CB-8F81-7AC7FF1C40CC ASF_Web_Stream_Media_Subtype
->0 guid DA1E6B13-8359-4050-B398-388E965BF00C ASF_Web_Stream_Format
->0 guid 20FB5700-5B55-11CF-A8FD-00805F5C442B ASF_No_Error_Correction
->0 guid BFC3CD50-618F-11CF-8BB2-00AA00B4E220 ASF_Audio_Spread
->0 guid ABD3D211-A9BA-11cf-8EE6-00C00C205365 ASF_Reserved_1
->0 guid 7A079BB6-DAA4-4e12-A5CA-91D38DC11A8D ASF_Content_Encryption_System_Windows_Media_DRM
-# _Network_Devices
->0 guid 86D15241-311D-11D0-A3A4-00A0C90348F6 ASF_Reserved_2
->0 guid 4B1ACBE3-100B-11D0-A39B-00A0C90348F6 ASF_Reserved_3
->0 guid 4CFEDB20-75F6-11CF-9C0F-00A0C90349CB ASF_Reserved_4
->0 guid D6E22A00-35DA-11D1-9034-00A0C90349BE ASF_Mutex_Language
->0 guid D6E22A01-35DA-11D1-9034-00A0C90349BE ASF_Mutex_Bitrate
->0 guid D6E22A02-35DA-11D1-9034-00A0C90349BE ASF_Mutex_Unknown
->0 guid AF6060AA-5197-11D2-B6AF-00C04FD908E9 ASF_Bandwidth_Sharing_Exclusive
->0 guid AF6060AB-5197-11D2-B6AF-00C04FD908E9 ASF_Bandwidth_Sharing_Partial
->0 guid 399595EC-8667-4E2D-8FDB-98814CE76C1E ASF_Payload_Extension_System_Timecode
->0 guid E165EC0E-19ED-45D7-B4A7-25CBD1E28E9B ASF_Payload_Extension_System_File_Name
->0 guid D590DC20-07BC-436C-9CF7-F3BBFBF1A4DC ASF_Payload_Extension_System_Content_Type
->0 guid 1B1EE554-F9EA-4BC8-821A-376B74E4C4B8 ASF_Payload_Extension_System_Pixel_Aspect_Ratio
->0 guid C6BD9450-867F-4907-83A3-C77921B733AD ASF_Payload_Extension_System_Sample_Duration
->0 guid 6698B84E-0AFA-4330-AEB2-1C0A98D7A44D ASF_Payload_Extension_System_Encryption_Sample_ID
->0 guid 00E1AF06-7BEC-11D1-A582-00C04FC29CFB ASF_Payload_Extension_System_Degradable_JPEG
-
-0 name asf-object
->0 use asf-name
-#>>16 lequad >0 (size %lld) [
->>16 lequad >0
->>>(16.q) use asf-object
-#>>16 lequad 0 ]
-
-# Microsoft Advanced Streaming Format (ASF) <mpruett@sgi.com>
-0 guid 75B22630-668E-11CF-A6D9-00AA0062CE6C Microsoft ASF
-!:mime video/x-ms-asf
-#>16 lequad >0 (size %lld
-#>>24 lelong x \b, %d header objects)
->16 lequad >0
->>30 use asf-object
->>(16.q) use asf-object
diff --git a/contrib/libs/libmagic/magic/Magdir/assembler b/contrib/libs/libmagic/magic/Magdir/assembler
deleted file mode 100644
index 805a326beb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/assembler
+++ /dev/null
@@ -1,18 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: assembler,v 1.6 2013/12/11 14:14:20 christos Exp $
-# make: file(1) magic for assembler source
-#
-0 regex \^[\040\t]{0,50}\\.asciiz assembler source text
-!:mime text/x-asm
-0 regex \^[\040\t]{0,50}\\.byte assembler source text
-!:mime text/x-asm
-0 regex \^[\040\t]{0,50}\\.even assembler source text
-!:mime text/x-asm
-0 regex \^[\040\t]{0,50}\\.globl assembler source text
-!:mime text/x-asm
-0 regex \^[\040\t]{0,50}\\.text assembler source text
-!:mime text/x-asm
-0 regex \^[\040\t]{0,50}\\.file assembler source text
-!:mime text/x-asm
-0 regex \^[\040\t]{0,50}\\.type assembler source text
-!:mime text/x-asm
diff --git a/contrib/libs/libmagic/magic/Magdir/asterix b/contrib/libs/libmagic/magic/Magdir/asterix
deleted file mode 100644
index a9ea885cdb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/asterix
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: asterix,v 1.5 2009/09/19 16:28:08 christos Exp $
-# asterix: file(1) magic for Aster*x; SunOS 5.5.1 gave the 4-character
-# strings as "long" - we assume they're just strings:
-# From: guy@netapp.com (Guy Harris)
-#
-0 string *STA Aster*x
->7 string WORD Words Document
->7 string GRAP Graphic
->7 string SPRE Spreadsheet
->7 string MACR Macro
-0 string 2278 Aster*x Version 2
->29 byte 0x36 Words Document
->29 byte 0x35 Graphic
->29 byte 0x32 Spreadsheet
->29 byte 0x38 Macro
-
diff --git a/contrib/libs/libmagic/magic/Magdir/att3b b/contrib/libs/libmagic/magic/Magdir/att3b
deleted file mode 100644
index b83ae2ec08..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/att3b
+++ /dev/null
@@ -1,41 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: att3b,v 1.10 2017/03/17 21:35:28 christos Exp $
-# att3b: file(1) magic for AT&T 3B machines
-#
-# The `versions' should be un-commented if they work for you.
-# (Was the problem just one of endianness?)
-#
-# 3B20
-#
-# The 3B20 conflicts with SCCS.
-#0 beshort 0550 3b20 COFF executable
-#>12 belong >0 not stripped
-#>22 beshort >0 - version %d
-#0 beshort 0551 3b20 COFF executable (TV)
-#>12 belong >0 not stripped
-#>22 beshort >0 - version %d
-#
-# WE32K
-#
-0 beshort 0560 WE32000 COFF
->18 beshort ^00000020 object
->18 beshort &00000020 executable
->12 belong >0 not stripped
->18 beshort ^00010000 N/A on 3b2/300 w/paging
->18 beshort &00020000 32100 required
->18 beshort &00040000 and MAU hardware required
->20 beshort 0407 (impure)
->20 beshort 0410 (pure)
->20 beshort 0413 (demand paged)
->20 beshort 0443 (target shared library)
->22 beshort >0 - version %d
-0 beshort 0561 WE32000 COFF executable (TV)
->12 belong >0 not stripped
-#>18 beshort &00020000 - 32100 required
-#>18 beshort &00040000 and MAU hardware required
-#>22 beshort >0 - version %d
-#
-# core file for 3b2
-0 string \000\004\036\212\200 3b2 core file
->364 string >\0 of '%s'
diff --git a/contrib/libs/libmagic/magic/Magdir/audio b/contrib/libs/libmagic/magic/Magdir/audio
deleted file mode 100644
index 55c5cd0ad2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/audio
+++ /dev/null
@@ -1,1291 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: audio,v 1.127 2023/03/05 20:15:49 christos Exp $
-# audio: file(1) magic for sound formats (see also "iff")
-#
-# Jan Nicolai Langfeldt (janl@ifi.uio.no), Dan Quinlan (quinlan@yggdrasil.com),
-# and others
-#
-
-# Sun/NeXT audio data
-0 string .snd Sun/NeXT audio data:
->12 belong 1 8-bit ISDN mu-law,
-!:mime audio/basic
->12 belong 2 8-bit linear PCM [REF-PCM],
-!:mime audio/basic
->12 belong 3 16-bit linear PCM,
-!:mime audio/basic
->12 belong 4 24-bit linear PCM,
-!:mime audio/basic
->12 belong 5 32-bit linear PCM,
-!:mime audio/basic
->12 belong 6 32-bit IEEE floating point,
-!:mime audio/basic
->12 belong 7 64-bit IEEE floating point,
-!:mime audio/basic
->12 belong 8 Fragmented sample data,
->12 belong 10 DSP program,
->12 belong 11 8-bit fixed point,
->12 belong 12 16-bit fixed point,
->12 belong 13 24-bit fixed point,
->12 belong 14 32-bit fixed point,
->12 belong 18 16-bit linear with emphasis,
->12 belong 19 16-bit linear compressed,
->12 belong 20 16-bit linear with emphasis and compression,
->12 belong 21 Music kit DSP commands,
->12 belong 23 8-bit ISDN mu-law compressed (CCITT G.721 ADPCM voice enc.),
-!:mime audio/x-adpcm
->12 belong 24 compressed (8-bit CCITT G.722 ADPCM)
->12 belong 25 compressed (3-bit CCITT G.723.3 ADPCM),
->12 belong 26 compressed (5-bit CCITT G.723.5 ADPCM),
->12 belong 27 8-bit A-law (CCITT G.711),
->20 belong 1 mono,
->20 belong 2 stereo,
->20 belong 4 quad,
->16 belong >0 %d Hz
-
-# DEC systems (e.g. DECstation 5000) use a variant of the Sun/NeXT format
-# that uses little-endian encoding and has a different magic number
-0 lelong 0x0064732E DEC audio data:
->12 lelong 1 8-bit ISDN mu-law,
-!:mime audio/x-dec-basic
->12 lelong 2 8-bit linear PCM [REF-PCM],
-!:mime audio/x-dec-basic
->12 lelong 3 16-bit linear PCM,
-!:mime audio/x-dec-basic
->12 lelong 4 24-bit linear PCM,
-!:mime audio/x-dec-basic
->12 lelong 5 32-bit linear PCM,
-!:mime audio/x-dec-basic
->12 lelong 6 32-bit IEEE floating point,
-!:mime audio/x-dec-basic
->12 lelong 7 64-bit IEEE floating point,
-!:mime audio/x-dec-basic
->12 belong 8 Fragmented sample data,
->12 belong 10 DSP program,
->12 belong 11 8-bit fixed point,
->12 belong 12 16-bit fixed point,
->12 belong 13 24-bit fixed point,
->12 belong 14 32-bit fixed point,
->12 belong 18 16-bit linear with emphasis,
->12 belong 19 16-bit linear compressed,
->12 belong 20 16-bit linear with emphasis and compression,
->12 belong 21 Music kit DSP commands,
->12 lelong 23 8-bit ISDN mu-law compressed (CCITT G.721 ADPCM voice enc.),
-!:mime audio/x-dec-basic
->12 belong 24 compressed (8-bit CCITT G.722 ADPCM)
->12 belong 25 compressed (3-bit CCITT G.723.3 ADPCM),
->12 belong 26 compressed (5-bit CCITT G.723.5 ADPCM),
->12 belong 27 8-bit A-law (CCITT G.711),
->20 lelong 1 mono,
->20 lelong 2 stereo,
->20 lelong 4 quad,
->16 lelong >0 %d Hz
-
-# Creative Labs AUDIO stuff
-0 string MThd Standard MIDI data
-!:mime audio/midi
->8 beshort x (format %d)
->10 beshort x using %d track
->10 beshort >1 \bs
->12 beshort&0x7fff x at 1/%d
->12 beshort&0x8000 >0 SMPTE
-
-0 string CTMF Creative Music (CMF) data
-!:mime audio/x-unknown
-0 string SBI SoundBlaster instrument data
-!:mime audio/x-unknown
-0 string Creative\ Voice\ File Creative Labs voice data
-!:mime audio/x-unknown
-# is this next line right? it came this way...
->19 byte 0x1A
->23 byte >0 - version %d
->22 byte >0 \b.%d
-
-# first entry is also the string "NTRK"
-0 belong 0x4e54524b MultiTrack sound data
->4 belong x - version %d
-
-# Extended MOD format (*.emd) (Greg Roelofs, newt@uchicago.edu); NOT TESTED
-# [based on posting 940824 by "Dirk/Elastik", husberg@lehtori.cc.tut.fi]
-0 string EMOD Extended MOD sound data,
->4 byte&0xf0 x version %d
->4 byte&0x0f x \b.%d,
->45 byte x %d instruments
->83 byte 0 (module)
->83 byte 1 (song)
-
-# Real Audio (Magic .ra\0375)
-0 belong 0x2e7261fd RealAudio sound file
-!:mime audio/x-pn-realaudio
-0 string .RMF\0\0\0 RealMedia file
-!:mime application/vnd.rn-realmedia
-#video/x-pn-realvideo
-#video/vnd.rn-realvideo
-#application/vnd.rn-realmedia
-# sigh, there are many mimes for that but the above are the most common.
-
-# MTM/669/FAR/S3M/ULT/XM format checking [Aaron Eppert, aeppert@dialin.ind.net]
-# Oct 31, 1995
-# fixed by <doj@cubic.org> 2003-06-24
-# Too short...
-#0 string MTM MultiTracker Module sound file
-#0 string if Composer 669 Module sound data
-#0 string JN Composer 669 Module sound data (extended format)
-0 string MAS_U ULT(imate) Module sound data
-
-#0 string FAR Module sound data
-#>4 string >\15 Title: "%s"
-
-0x2c string SCRM ScreamTracker III Module sound data
->0 string >\0 Title: "%s"
-!:mime audio/x-s3m
-
-# .stm before it got above .s3m extension
-0x16 string \!Scream\! ScreamTracker Module sound data
->0 string >\0 Title: "%s"
-
-# Gravis UltraSound patches
-# From <ache@nagual.ru>
-
-0 string GF1PATCH110\0ID#000002\0 GUS patch
-0 string GF1PATCH100\0ID#000002\0 Old GUS patch
-
-# mime types according to http://www.geocities.com/nevilo/mod.htm:
-# audio/it .it
-# audio/x-zipped-it .itz
-# audio/xm fasttracker modules
-# audio/x-s3m screamtracker modules
-# audio/s3m screamtracker modules
-# audio/x-zipped-mod mdz
-# audio/mod mod
-# audio/x-mod All modules (mod, s3m, 669, mtm, med, xm, it, mdz, stm, itz, xmz, s3z)
-
-#
-# Taken from loader code from mikmod version 2.14
-# by Steve McIntyre (stevem@chiark.greenend.org.uk)
-# <doj@cubic.org> added title printing on 2003-06-24
-0 string MAS_UTrack_V00
->14 string >/0 ultratracker V1.%.1s module sound data
-!:mime audio/x-mod
-#audio/x-tracker-module
-
-0 string UN05 MikMod UNI format module sound data
-
-0 string Extended\ Module: Fasttracker II module sound data
-!:mime audio/x-mod
-#audio/x-tracker-module
->17 string >\0 Title: "%s"
-
-21 string/c =!SCREAM! Screamtracker 2 module sound data
-!:mime audio/x-mod
-#audio/x-screamtracker-module
-21 string BMOD2STM Screamtracker 2 module sound data
-!:mime audio/x-mod
-#audio/x-screamtracker-module
-
-1080 string \!PM! 4-channel Protracker module sound data
-!:mime audio/x-mod
-#audio/x-protracker-module
->0 string >\0 Title: "%s"
-
-1080 string M.K. 4-channel Protracker module sound data
-!:mime audio/x-mod
-#audio/x-protracker-module
->0 string >\0 Title: "%s"
-
-1080 string M!K! 4-channel Protracker module sound data
-!:mime audio/x-mod
-#audio/x-protracker-module
->0 string >\0 Title: "%s"
-
-1080 string FLT4 4-channel Startracker module sound data
-!:mime audio/x-mod
-#audio/x-startracker-module
->0 string >\0 Title: "%s"
-
-1080 string FLT8 8-channel Startracker module sound data
-!:mime audio/x-mod
-#audio/x-startracker-module
->0 string >\0 Title: "%s"
-
-1080 string 4CHN 4-channel Fasttracker module sound data
-!:mime audio/x-mod
-#audio/x-fasttracker-module
->0 string >\0 Title: "%s"
-
-1080 string 6CHN 6-channel Fasttracker module sound data
-!:mime audio/x-mod
-#audio/x-fasttracker-module
->0 string >\0 Title: "%s"
-
-1080 string 8CHN 8-channel Fasttracker module sound data
-!:mime audio/x-mod
-#audio/x-fasttracker-module
->0 string >\0 Title: "%s"
-
-1080 string CD81 8-channel Octalyser module sound data
-!:mime audio/x-mod
-#audio/x-octalysertracker-module
->0 string >\0 Title: "%s"
-
-1080 string OKTA 8-channel Octalyzer module sound data
-!:mime audio/x-mod
-#audio/x-octalysertracker-module
->0 string >\0 Title: "%s"
-
-# Not good enough.
-#1082 string CH
-#>1080 string >/0 %.2s-channel Fasttracker "oktalyzer" module sound data
-1080 string 16CN 16-channel Taketracker module sound data
-!:mime audio/x-mod
-#audio/x-taketracker-module
->0 string >\0 Title: "%s"
-1080 string 32CN 32-channel Taketracker module sound data
-!:mime audio/x-mod
-#audio/x-taketracker-module
->0 string >\0 Title: "%s"
-
-# TOC sound files -Trevor Johnson <trevor@jpj.net>
-#
-0 string TOC TOC sound file
-
-# sidfiles <pooka@iki.fi>
-# added name,author,(c) and new RSID type by <doj@cubic.org> 2003-06-24
-0 string SIDPLAY\ INFOFILE Sidplay info file
-
-0 string PSID PlaySID v2.2+ (AMIGA) sidtune
->4 beshort >0 w/ header v%d,
->14 beshort =1 single song,
->14 beshort >1 %d songs,
->16 beshort >0 default song: %d
->0x16 string >\0 name: "%s"
->0x36 string >\0 author: "%s"
->0x56 string >\0 copyright: "%s"
-
-0 string RSID RSID sidtune PlaySID compatible
->4 beshort >0 w/ header v%d,
->14 beshort =1 single song,
->14 beshort >1 %d songs,
->16 beshort >0 default song: %d
->0x16 string >\0 name: "%s"
->0x36 string >\0 author: "%s"
->0x56 string >\0 copyright: "%s"
-
-# IRCAM sound files - Michael Pruett <michael@68k.org>
-# http://www-mmsp.ece.mcgill.ca/documents/AudioFormats/IRCAM/IRCAM.html
-0 belong 0x64a30100 IRCAM file (VAX little-endian)
-0 belong 0x0001a364 IRCAM file (VAX big-endian)
-0 belong 0x64a30200 IRCAM file (Sun big-endian)
-0 belong 0x0002a364 IRCAM file (Sun little-endian)
-0 belong 0x64a30300 IRCAM file (MIPS little-endian)
-0 belong 0x0003a364 IRCAM file (MIPS big-endian)
-0 belong 0x64a30400 IRCAM file (NeXT big-endian)
-0 belong 0x64a30400 IRCAM file (NeXT big-endian)
-0 belong 0x0004a364 IRCAM file (NeXT little-endian)
-
-# NIST SPHERE <mpruett@sgi.com>
-0 string NIST_1A\n\ \ \ 1024\n NIST SPHERE file
-
-# Sample Vision <mpruett@sgi.com>
-0 string SOUND\ SAMPLE\ DATA\ Sample Vision file
-
-# Audio Visual Research <tonigonenstein@users.sourceforge.net>
-0 string 2BIT Audio Visual Research file,
->12 beshort =0 mono,
->12 beshort =-1 stereo,
->14 beshort x %d bits
->16 beshort =0 unsigned,
->16 beshort =-1 signed,
->22 belong&0x00ffffff x %d Hz,
->18 beshort =0 no loop,
->18 beshort =-1 loop,
->21 ubyte <128 note %d,
->22 byte =0 replay 5.485 KHz
->22 byte =1 replay 8.084 KHz
->22 byte =2 replay 10.971 KHz
->22 byte =3 replay 16.168 KHz
->22 byte =4 replay 21.942 KHz
->22 byte =5 replay 32.336 KHz
->22 byte =6 replay 43.885 KHz
->22 byte =7 replay 47.261 KHz
-
-# SGI SoundTrack <mpruett@sgi.com>
-0 string _SGI_SoundTrack SGI SoundTrack project file
-# ID3 version 2 tags <waschk@informatik.uni-rostock.de>
-0 string ID3 Audio file with ID3 version 2
->3 byte x \b.%d
->4 byte x \b.%d
->>5 byte &0x80 \b, unsynchronized frames
->>5 byte &0x40 \b, extended header
->>5 byte &0x20 \b, experimental
->>5 byte &0x10 \b, footer present
->(6.I+10) indirect x \b, contains:
-
-# NSF (NES sound file) magic
-0 string NESM\x1a NES Sound File
->14 string >\0 ("%s" by
->46 string >\0 %s, copyright
->78 string >\0 %s),
->5 byte x version %d,
->6 byte x %d tracks,
->122 byte&0x2 =1 dual PAL/NTSC
->122 byte&0x1 =1 PAL
->122 byte&0x1 =0 NTSC
-
-# NSFE (Extended NES sound file) magic
-# http://slickproductions.org/docs/NSF/nsfespec.txt
-# From: David Pflug <david@pflug.email>
-0 string NSFE Extended NES Sound File
->48 search/0x1000 auth
->>&0 string >\0 ("%s"
->>>&1 string >\0 by %s
->>>>&1 string >\0 \b, copyright %s
->>>>>&1 string >\0 \b, ripped by %s
->20 byte x \b), %d tracks,
->18 byte&0x2 =1 dual PAL/NTSC
->18 byte&0x2 =0
->>18 byte&0x1 =1 PAL
->>18 byte&0x1 =0 NTSC
-
-# Type: SNES SPC700 sound files
-# From: Josh Triplett <josh@freedesktop.org>
-0 string SNES-SPC700\ Sound\ File\ Data\ v SNES SPC700 sound file
->&0 string 0.30 \b, version %s
->>0x23 byte 0x1B \b, without ID666 tag
->>0x23 byte 0x1A \b, with ID666 tag
->>>0x2E string >\0 \b, song "%.32s"
->>>0x4E string >\0 \b, game "%.32s"
-
-# Impulse tracker module (audio/x-it)
-0 string IMPM Impulse Tracker module sound data -
-!:mime audio/x-mod
->4 string >\0 "%s"
->40 leshort !0 compatible w/ITv%x
->42 leshort !0 created w/ITv%x
-
-# Imago Orpheus module (audio/x-imf)
-60 string IM10 Imago Orpheus module sound data -
->0 string >\0 "%s"
-
-# From <collver1@attbi.com>
-# These are the /etc/magic entries to decode modules, instruments, and
-# samples in Impulse Tracker's native format.
-
-0 string IMPS Impulse Tracker Sample
->18 byte &2 16 bit
->18 byte ^2 8 bit
->18 byte &4 stereo
->18 byte ^4 mono
-0 string IMPI Impulse Tracker Instrument
->28 leshort !0 ITv%x
->30 byte !0 %d samples
-
-# Yamaha TX Wave: file(1) magic for Yamaha TX Wave audio files
-# From <collver1@attbi.com>
-0 string LM8953 Yamaha TX Wave
->22 byte 0x49 looped
->22 byte 0xC9 non-looped
->23 byte 1 33kHz
->23 byte 2 50kHz
->23 byte 3 16kHz
-
-# scream tracker: file(1) magic for Scream Tracker sample files
-#
-# From <collver1@attbi.com>
-76 string SCRS Scream Tracker Sample
->0 byte 1 sample
->0 byte 2 adlib melody
->0 byte >2 adlib drum
->31 byte &2 stereo
->31 byte ^2 mono
->31 byte &4 16bit little endian
->31 byte ^4 8bit
->30 byte 0 unpacked
->30 byte 1 packed
-
-# audio
-# From: Cory Dikkers <cdikkers@swbell.net>
-0 string MMD0 MED music file, version 0
-0 string MMD1 OctaMED Pro music file, version 1
-0 string MMD3 OctaMED Soundstudio music file, version 3
-0 string OctaMEDCmpr OctaMED Soundstudio compressed file
-0 string MED MED_Song
-0 string SymM Symphonie SymMOD music file
-#
-# Track Length (TRL), Tracks (TRK), Samples (SMP), Subsongs (SS)
-# http://lclevy.free.fr/exotica/ahx/ahxformat.txt
-0 string THX AHX version
->3 byte =0 1 module data
->3 byte =1 2 module data
->11 ubyte x TRK: %u
->10 ubyte x TRL: %u
->12 ubyte x SMP: %u
->13 ubyte x SS: %u
->(4.H) string x Title: "%.128s"
-
-# header is mostly AHX format
-0 string HVL
->3 byte <2 Hively Tracker Song
->3 byte =0 v1 module data
->3 byte =1 v2 module data
->11 ubyte x TRK: %u
->10 ubyte x TRL: %u
->12 ubyte x SMP: %u
->13 ubyte x SS: %u
->8 ubyte/4 =0 CHN: 4
->8 ubyte/4 >0 CHN: 4+%u
-#>-0 offset <0xffff
->(4.H) string x Title: "%.128s"
-
-#
-0 string OKTASONG Oktalyzer module data
-#
-0 string DIGI\ Booster\ module\0 %s
->20 byte >0 %c
->>21 byte >0 \b%c
->>>22 byte >0 \b%c
->>>>23 byte >0 \b%c
->610 string >\0 \b, "%s"
-#
-0 string DBM0 DIGI Booster Pro Module
->4 byte >0 V%X.
->>5 byte x \b%02X
->16 string >\0 \b, "%s"
-#
-0 string FTMN FaceTheMusic module
->16 string >\0d \b, "%s"
-
-# From: <doj@cubic.org> 2003-06-24
-0 string AMShdr\32 Velvet Studio AMS Module v2.2
-0 string Extreme Extreme Tracker AMS Module v1.3
-0 string DDMF Xtracker DMF Module
->4 byte x v%i
->0xD string >\0 Title: "%s"
->0x2B string >\0 Composer: "%s"
-0 string DSM\32 Dynamic Studio Module DSM
-0 string SONG DigiTrekker DTM Module
-0 string DMDL DigiTrakker MDL Module
-0 string PSM\32 Protracker Studio PSM Module
-44 string PTMF Poly Tracker PTM Module
->0 string >\32 Title: "%s"
-0 string MT20 MadTracker 2.0 Module MT2
-0 string RAD\40by\40REALiTY!! RAD Adlib Tracker Module RAD
-0 string RTMM RTM Module
-0x426 string MaDoKaN96 XMS Adlib Module
->0 string >\0 Composer: "%s"
-0 string AMF AMF Module
->4 string >\0 Title: "%s"
-0 string MODINFO1 Open Cubic Player Module Information MDZ
-0 string Extended\40Instrument: Fast Tracker II Instrument
-
-# From: Takeshi Hamasaki <hma@syd.odn.ne.jp>
-# NOA Nancy Codec file
-0 string \210NOA\015\012\032 NOA Nancy Codec Movie file
-# Yamaha SMAF format
-0 string MMMD Yamaha SMAF file
-# Sharp Jisaku Melody format for PDC
-0 string \001Sharp\040JisakuMelody SHARP Cell-Phone ringing Melody
->20 string Ver01.00 Ver. 1.00
->>32 byte x , %d tracks
-
-# Free lossless audio codec <http://flac.sourceforge.net>
-# From: Przemyslaw Augustyniak <silvathraec@rpg.pl>
-0 string fLaC FLAC audio bitstream data
-!:mime audio/flac
->4 byte&0x7f >0 \b, unknown version
->4 byte&0x7f 0 \b
-# some common bits/sample values
->>20 beshort&0x1f0 0x030 \b, 4 bit
->>20 beshort&0x1f0 0x050 \b, 6 bit
->>20 beshort&0x1f0 0x070 \b, 8 bit
->>20 beshort&0x1f0 0x0b0 \b, 12 bit
->>20 beshort&0x1f0 0x0f0 \b, 16 bit
->>20 beshort&0x1f0 0x170 \b, 24 bit
->>20 byte&0xe 0x0 \b, mono
->>20 byte&0xe 0x2 \b, stereo
->>20 byte&0xe 0x4 \b, 3 channels
->>20 byte&0xe 0x6 \b, 4 channels
->>20 byte&0xe 0x8 \b, 5 channels
->>20 byte&0xe 0xa \b, 6 channels
->>20 byte&0xe 0xc \b, 7 channels
->>20 byte&0xe 0xe \b, 8 channels
-# sample rates derived from known oscillator frequencies;
-# 24.576 MHz (video/fs=48kHz), 22.5792 (audio/fs=44.1kHz) and
-# 16.384 (other/fs=32kHz).
->>17 belong&0xfffff0 0x02b110 \b, 11.025 kHz
->>17 belong&0xfffff0 0x03e800 \b, 16 kHz
->>17 belong&0xfffff0 0x056220 \b, 22.05 kHz
->>17 belong&0xfffff0 0x05dc00 \b, 24 kHz
->>17 belong&0xfffff0 0x07d000 \b, 32 kHz
->>17 belong&0xfffff0 0x0ac440 \b, 44.1 kHz
->>17 belong&0xfffff0 0x0bb800 \b, 48 kHz
->>17 belong&0xfffff0 0x0fa000 \b, 64 kHz
->>17 belong&0xfffff0 0x158880 \b, 88.2 kHz
->>17 belong&0xfffff0 0x177000 \b, 96 kHz
->>17 belong&0xfffff0 0x1f4000 \b, 128 kHz
->>17 belong&0xfffff0 0x2b1100 \b, 176.4 kHz
->>17 belong&0xfffff0 0x2ee000 \b, 192 kHz
->>17 belong&0xfffff0 0x3e8000 \b, 256 kHz
->>17 belong&0xfffff0 0x562200 \b, 352.8 kHz
->>17 belong&0xfffff0 0x5dc000 \b, 384 kHz
->>21 byte&0xf >0 \b, >4G samples
->>21 byte&0xf 0 \b
->>>22 belong >0 \b, %u samples
->>>22 belong 0 \b, length unknown
-
-# (ISDN) VBOX voice message file (Wolfram Kleff)
-0 string VBOX VBOX voice message data
-
-# ReBorn Song Files (.rbs)
-# David J. Singer <doc@deadvirgins.org.uk>
-8 string RB40 RBS Song file
->29 string ReBorn created by ReBorn
->37 string Propellerhead created by ReBirth
-
-# Synthesizer Generator and Kimwitu share their file format
-0 string A#S#C#S#S#L#V#3 Synthesizer Generator or Kimwitu data
-# Kimwitu++ uses a slightly different magic
-0 string A#S#C#S#S#L#HUB Kimwitu++ data
-
-# From "Simon Hosie
-0 string TFMX-SONG TFMX module sound data
-
-# Monkey's Audio compressed audio format (.ape)
-# From danny.milo@gmx.net (Danny Milosavljevic)
-# New version from Abel Cheung <abel (@) oaka.org>
-0 string MAC\040 Monkey's Audio compressed format
-!:mime audio/x-ape
->4 uleshort >0x0F8B version %d
->>(0x08.l) uleshort =1000 with fast compression
->>(0x08.l) uleshort =2000 with normal compression
->>(0x08.l) uleshort =3000 with high compression
->>(0x08.l) uleshort =4000 with extra high compression
->>(0x08.l) uleshort =5000 with insane compression
->>(0x08.l+18) uleshort =1 \b, mono
->>(0x08.l+18) uleshort =2 \b, stereo
->>(0x08.l+20) ulelong x \b, sample rate %d
->4 uleshort <0x0F8C version %d
->>6 uleshort =1000 with fast compression
->>6 uleshort =2000 with normal compression
->>6 uleshort =3000 with high compression
->>6 uleshort =4000 with extra high compression
->>6 uleshort =5000 with insane compression
->>10 uleshort =1 \b, mono
->>10 uleshort =2 \b, stereo
->>12 ulelong x \b, sample rate %d
-
-# adlib sound files
-# From: Alex Myczko <alex@aiei.ch>
-
-# https://github.com/rerrahkr/BambooTracker
-0 string BambooTracker BambooTracker
->13 string Mod Module
->13 string Ist Instrument
->13 string Bnk Bank
->22 byte x \b, version %u
->21 byte x \b.%u
->20 byte x \b.%u
-
-0 string CC2x CheeseCutter 2 song
-
-0 string RAWADATA RdosPlay RAW
-
-1068 string RoR AMUSIC Adlib Tracker
-
-0 string JCH EdLib
-
-0 string mpu401tr MPU-401 Trakker
-
-0 string SAdT Surprise! Adlib Tracker
->4 byte x Version %d
-
-0 string XAD! eXotic ADlib
-
-0 string ofTAZ! eXtra Simple Music
-
-0 string FMK! FM Kingtracker Song
-
-0 string DFM DFM Song
-
-0 string \<CUD-FM-File\> CFF Song
-
-0 string _A2module A2M Song
-
-# Spectrum 128 tunes (.ay files).
-# From: Emanuel Haupt <ehaupt@critical.ch>
-0 string ZXAYEMUL Spectrum 128 tune
-
-0 string \0BONK BONK,
-#>5 byte x version %d
->14 byte x %d channel(s),
->15 byte =1 lossless,
->15 byte =0 lossy,
->16 byte x mid-side
-
-384 string LockStream LockStream Embedded file (mostly MP3 on old Nokia phones)
-
-# format VQF (proprietary codec for sound)
-# some infos on the header file available at :
-# http://www.twinvq.org/english/technology_format.html
-0 string TWIN97012000 VQF data
->27 short 0 \b, Mono
->27 short 1 \b, Stereo
->31 short >0 \b, %d kbit/s
->35 short >0 \b, %d kHz
-
-# Nelson A. de Oliveira (naoliv@gmail.com)
-# .eqf
-0 string Winamp\ EQ\ library\ file %s
-# it will match only versions like v<digit>.<digit>
-# Since I saw only eqf files with version v1.1 I think that it's OK
->23 string x \b%.4s
-# .preset
-0 string [Equalizer\ preset] XMMS equalizer preset
-# .m3u
-0 search/1 #EXTM3U M3U playlist text
-# .pls
-0 search/1 [playlist] PLS playlist text
-# licq.conf
-1 string [licq] LICQ configuration file
-
-# Atari ST audio files by Dirk Jagdmann <doj@cubic.org>
-# NOTE: Most SNDH music is packed using ICE, which has
-# magic numbers "ICE!" and "Ice!". Some SNDH music is
-# not packed, so we check for both packed and unpacked.
-12 string SNDH SNDH Atari ST music
-0 belong&0xFFDFDFFF 0x49434521
->14 search/40 NDH SNDH Atari ST music
->14 search/40 TITL SNDH Atari ST music
-0 string SC68\ Music-file\ /\ (c)\ (BeN)jami sc68 Atari ST music
-
-# musepak support From: "Jiri Pejchal" <jiri.pejchal@gmail.com>
-0 string MP+ Musepack audio (MP+)
-!:mime audio/x-musepack
->3 byte 255 \b, SV pre8
->3 byte&0xF 0x6 \b, SV 6
->3 byte&0xF 0x8 \b, SV 8
->3 byte&0xF 0x7 \b, SV 7
->>3 byte&0xF0 0x0 \b.0
->>3 byte&0xF0 0x10 \b.1
->>3 byte&0xF0 240 \b.15
->>10 byte&0xF0 0x0 \b, no profile
->>10 byte&0xF0 0x10 \b, profile 'Unstable/Experimental'
->>10 byte&0xF0 0x50 \b, quality 0
->>10 byte&0xF0 0x60 \b, quality 1
->>10 byte&0xF0 0x70 \b, quality 2 (Telephone)
->>10 byte&0xF0 0x80 \b, quality 3 (Thumb)
->>10 byte&0xF0 0x90 \b, quality 4 (Radio)
->>10 byte&0xF0 0xA0 \b, quality 5 (Standard)
->>10 byte&0xF0 0xB0 \b, quality 6 (Xtreme)
->>10 byte&0xF0 0xC0 \b, quality 7 (Insane)
->>10 byte&0xF0 0xD0 \b, quality 8 (BrainDead)
->>10 byte&0xF0 0xE0 \b, quality 9
->>10 byte&0xF0 0xF0 \b, quality 10
->>27 byte 0x0 \b, Buschmann 1.7.0-9, Klemm 0.90-1.05
->>27 byte 102 \b, Beta 1.02
->>27 byte 104 \b, Beta 1.04
->>27 byte 105 \b, Alpha 1.05
->>27 byte 106 \b, Beta 1.06
->>27 byte 110 \b, Release 1.1
->>27 byte 111 \b, Alpha 1.11
->>27 byte 112 \b, Beta 1.12
->>27 byte 113 \b, Alpha 1.13
->>27 byte 114 \b, Beta 1.14
->>27 byte 115 \b, Alpha 1.15
-
-0 string MPCK Musepack audio (MPCK)
-!:mime audio/x-musepack
-
-# IMY
-# from http://filext.com/detaillist.php?extdetail=IMY
-# https://cellphones.about.com/od/cellularfaqs/f/rf_imelody.htm
-# http://download.ncl.ie/doc/api/ie/ncl/media/music/IMelody.html
-# http://www.wx800.com/msg/download/irda/iMelody.pdf
-0 string BEGIN:IMELODY iMelody Ringtone Format
-
-# From: "Mateus Caruccio" <mateus@caruccio.com>
-# guitar pro v3,4,5 from http://filext.com/file-extension/gp3
-0 string \030FICHIER\ GUITAR\ PRO\ v3. Guitar Pro Ver. 3 Tablature
-
-# From: "Leslie P. Polzer" <leslie.polzer@gmx.net>
-60 string SONG SoundFX Module sound file
-
-# Type: Adaptive Multi-Rate Codec
-# URL: http://filext.com/detaillist.php?extdetail=AMR
-# From: Russell Coker <russell@coker.com.au>
-0 string #!AMR Adaptive Multi-Rate Codec (GSM telephony)
-!:mime audio/amr
-!:ext amr
-
-# Type: SuperCollider 3 Synth Definition File Format
-# From: Mario Lang <mlang@debian.org>
-0 string SCgf SuperCollider3 Synth Definition file,
->4 belong x version %d
-
-# Type: True Audio Lossless Audio
-# URL: https://wiki.multimedia.cx/index.php?title=True_Audio
-# From: Mike Melanson <mike@multimedia.cx>
-0 string TTA1 True Audio Lossless Audio
-
-# Type: WavPack Lossless Audio
-# URL: https://wiki.multimedia.cx/index.php?title=WavPack
-# From: Mike Melanson <mike@multimedia.cx>
-0 string wvpk WavPack Lossless Audio
-
-# From Fabio R. Schmidlin <frs@pop.com.br>
-# VGM music file
-0 string Vgm\040
->9 ubyte >0 VGM Video Game Music dump v
-!:mime audio/x-vgm
-!:ext vgm
->>9 ubyte/16 >0 \b%d
->>9 ubyte&0x0F x \b%d
->>8 ubyte/16 x \b.%d
->>8 ubyte&0x0F >0 \b%d
-#Get soundchips
->>8 ubyte x \b, soundchip(s)=
->>0x0C ulelong >0 SN76489 (PSG),
->>0x10 ulelong >0 YM2413 (OPLL),
->>0x2C ulelong >0 YM2612 (OPN2),
->>0x30 ulelong >0 YM2151 (OPM),
->>0x38 ulelong >0 Sega PCM,
->>0x34 ulelong >0xC
->>>0x40 ulelong >0 RF5C68 (PCM),
->>0x34 ulelong >0x10
->>>0x44 ulelong >0 YM2203 (OPN),
->>0x34 ulelong >0x14
->>>0x48 ulelong >0 YM2608 (OPNA),
->>0x34 ulelong >0x18
->>>0x4C lelong >0 YM2610 (OPNB),
->>>0x4C lelong <0 YM2610B (OPNB+2FM),
->>0x34 ulelong >0x1C
->>>0x50 ulelong >0 YM3812 (OPL2),
->>0x34 ulelong >0x20
->>>0x54 ulelong >0 YM3526 (OPL),
->>0x34 ulelong >0x24
->>>0x58 ulelong >0 Y8950 (MSX-Audio),
->>0x34 ulelong >0x28
->>>0x5C ulelong >0 YMF262 (OPL3),
->>0x34 ulelong >0x2C
->>>0x60 ulelong >0 YMF278B (OPL4),
->>0x34 ulelong >0x30
->>>0x64 ulelong >0 YMF271 (OPX),
->>0x34 ulelong >0x34
->>>0x68 ulelong >0 YMZ280B (PCMD8),
->>0x34 ulelong >0x38
->>>0x6C ulelong >0 RF5C164 (PCM),
->>0x34 ulelong >0x3C
->>>0x70 ulelong >0 PWM,
->>0x34 ulelong >0x40
->>>0x74 ulelong >0
->>>>0x78 ubyte 0x00 AY-3-8910,
->>>>0x78 ubyte 0x01 AY-3-8912,
->>>>0x78 ubyte 0x02 AY-3-8913,
->>>>0x78 ubyte 0x03 AY-3-8930,
->>>>0x78 ubyte 0x10 YM2149,
->>>>0x78 ubyte 0x11 YM3439,
->>>>0x78 ubyte 0x12 YMZ284,
->>>>0x78 ubyte 0x13 YMZ294,
-# VGM 1.61
->>0x34 ulelong >0x4C
->>>0x80 ulelong >0 DMG,
->>0x34 ulelong >0x50
->>>0x84 lelong >0 NES APU,
->>>0x84 lelong <0 NES APU with FDS,
->>0x34 ulelong >0x54
->>>0x88 ulelong >0 MultiPCM,
->>0x34 ulelong >0x58
->>>0x8C ulelong >0 uPD7759 (ADPCM Speech),
->>0x34 ulelong >0x5C
->>>0x90 ulelong >0 OKIM6258 (ADPCM Speech),
->>0x34 ulelong >0x64
->>>0x98 ulelong >0 OKIM6295 (ADPCM),
->>0x34 ulelong >0x68
->>>0x9C ulelong >0 K051649,
->>0x34 ulelong >0x6C
->>>0xA0 ulelong >0 K054539,
->>0x34 ulelong >0x70
->>>0xA4 ulelong >0 HuC6280,
->>0x34 ulelong >0x74
->>>0xA8 ulelong >0 C140,
->>0x34 ulelong >0x78
->>>0xAC ulelong >0 K053260,
->>0x34 ulelong >0x7C
->>>0xB0 ulelong >0 Pokey,
->>0x34 ulelong >0x80
->>>0xB4 ulelong >0 QSound,
-# VGM 1.71
->>0x34 ulelong >0x84
->>>0xB8 ulelong >0 SCSP,
->>0x34 ulelong >0x8C
->>>0xC0 ulelong >0 WonderSwan,
->>0x34 ulelong >0x90
->>>0xC4 ulelong >0 VSU,
->>0x34 ulelong >0x94
->>>0xC8 ulelong >0 SAA1099,
->>0x34 ulelong >0x98
->>>0xCC ulelong >0 ES5503 (DOC),
->>0x34 ulelong >0x9C
->>>0xD0 lelong >0 ES5505 (OTIS),
->>>0xD0 lelong <0 ES5506 (OTTO),
->>0x34 ulelong >0xA4
->>>0xD8 ulelong >0 X1-010,
->>0x34 ulelong >0xA8
->>>0xDC ulelong >0 C352,
->>0x34 ulelong >0xAC
->>>0xE0 ulelong >0 GA20,
-
-# GVOX Encore file format
-# Since this is a proprietary file format and there is no publicly available
-# format specification, this is just based on induction
-#
-0 string SCOW
->4 byte 0xc4 GVOX Encore music, version 5.0 or above
->4 byte 0xc2 GVOX Encore music, version < 5.0
-
-0 string ZBOT
->4 byte 0xc5 GVOX Encore music, version < 5.0
-
-# Summary: Garmin Voice Processing Module (WAVE audios)
-# From: Joerg Jenderek
-# URL: https://www.garmin.com/
-# Reference: http://www.poi-factory.com/node/19580
-# NOTE: there exist 2 other Garmin VPM formats
-0 string AUDIMG
-# skip text files starting with string "AUDIMG"
->13 ubyte <13 Garmin Voice Processing Module
-!:mime audio/x-vpm-wav-garmin
-!:ext vpm
-# 3 bytes indicating the voice version (200,220)
->>6 string x \b, version %3.3s
-# day of release (01-31)
->>12 ubyte x \b, %.2d
-# month of release (01-12)
->>13 ubyte x \b.%.2d
-# year of release (like 2006, 2007, 2008)
->>14 uleshort x \b.%.4d
-# hour of release (0-23)
->>11 ubyte x %.2d
-# minute of release (0-59)
->>10 ubyte x \b:%.2d
-# second of release (0-59)
->>9 ubyte x \b:%.2d
-# if you select a language like german on your garmin device
-# you can only select voice modules with corresponding language byte ID like 1
->>18 ubyte x \b, language ID %d
-# structure for phrases/sentences?
-# number of voice sample in the 1st phrase?
-#>>19 uleshort x \b, %#x samples
-#>>>21 uleshort >0 \b, at %#4.4x
-#>>>(21.s) ubequad x %#llx
-# 2nd phrase?
-#>>23 uleshort x \b, %#x samples
-#>>>25 uleshort >0 \b, at %#4.4x
-#>>>(25.s) ubequad x %#llx
-# pointer to 1st audio WAV sample
->>16 uleshort >0
->>>(16.s) ulelong >0 \b, at %#x
-# WAV length
-# 1 space char after "bytes" to get phrase "bytes RIFF"
->>>>(16.s+4) ulelong >0 %u bytes
-# look for magic
->>>>>(&-8.l) string RIFF
-# determine type by ./riff
->>>>>>&-4 indirect x
-# 2 - ~ 131 WAV samples following same way
-#
-# Summary: encrypted Garmin Voice Processing Module
-# From: Joerg Jenderek
-# URL: https://www.garmin.com/us/products/ontheroad/voicestudio
-# NOTE: Encrypted variant used in voices like DrNightmare, Elfred, Yeti.
-# There exist 2 other Garmin VPM formats
-0 ubequad 0xa141190fecc8ced6 Garmin Voice Processing Module (encrypted)
-!:mime audio/x-vpm-garmin
-!:ext vpm
-
-# From Martin Mueller Skarbiniks Pedersen
-0 string GDM
->0x3 byte 0xFE General Digital Music.
->0x4 string >\0 title: "%s"
->0x24 string >\0 musician: "%s"
->>0x44 beshort 0x0D0A
->>>0x46 byte 0x1A
->>>>0x47 string GMFS Version
->>>>0x4B byte x %d.
->>>>0x4C byte x \b%02d
->>>>0x4D beshort 0x000 (2GDM v
->>>>0x4F byte x \b%d.
->>>>>0x50 byte x \b%d)
-
-0 string MTM Multitracker
->0x3 byte/16 x Version %d.
->0x3 byte&0x0F x \b%02d
->>0x4 string >\0 title: "%s"
-
-0 string MO3
->3 ubyte <6 MOdule with MP3
->>3 byte 0 Version 0 (With MP3 and lossless)
->>3 byte 1 Version 1 (With ogg and lossless)
->>3 byte 3 Version 2.2
->>3 byte 4 (With no LAME header)
->>3 byte 5 Version 2.4
-
-0 string ADRVPACK AProSys module
-
-# ftp://ftp.modland.com/pub/documents/format_documentation/\
-# Art%20Of%20Noise%20(.aon).txt
-0 string AON
->4 string "ArtOfNoise by Bastian Spiegel(twice/lego)"
->0x2e string NAME Art of Noise Tracker Song
->3 string <9
->3 string 4 (4 voices)
->3 string 8 (8 voices)
->>0x36 string >\0 Title: "%s"
-
-0 string FAR
->0x2c byte 0x0d
->0x2d byte 0x0a
->0x2e byte 0x1a
->>0x3 byte 0xFE Farandole Tracker Song
->>>0x31 byte/16 x Version %d.
->>>0x31 byte&0x0F x \b%02d
->>>>0x4 string >\0 \b, title: "%s"
-
-# magic for Klystrack, https://kometbomb.github.io/klystrack/
-# from Alex Myczko <alex@aiei.ch>
-0 string cyd!song Klystrack song
->8 byte >0 \b, version %u
->8 byte >26
-#>>9 byte x \b, channels %u
-#>>10 leshort x \b, time signature %u
-#>>12 leshort x \b, sequence step %u
-#>>14 byte x \b, instruments %u
-#>>15 leshort x \b, patterns %u
-#>>17 leshort x \b, sequences %u
-#>>19 leshort x \b, length %u
-#>>21 leshort x \b, loop point %u
-#>>23 byte x \b, master volume %u
-#>>24 byte x \b, song speed %u
-#>>25 byte x \b, song speed2 %u
-#>>26 byte x \b, song rate %u
-#>>27 belong x \b, flags %#x
-#>>31 byte x \b, multiplex period %u
-#>>32 byte x \b, pitch inaccuracy %u
->>149 pstring x \b, title %s
-
-0 string cyd!inst Klystrack instrument
-
-# magic for WOPL instrument files, https://github.com/Wohlstand/OPL3BankEditor
-# see Specifications/WOPL-and-OPLI-Specification.txt
-
-0 string WOPL3-INST\0 WOPL instrument
->11 leshort x \b, version %u
-0 string WOPL3-BANK\0 WOPL instrument bank
->11 leshort x \b, version %u
-
-# AdLib/OPL instrument files. Format specifications on
-# http://www.shikadi.net/moddingwiki
-0 string Junglevision\ Patch\ File Junglevision instrument data
-0 string #OPL_II# DMX OP2 instrument data
-0 string IBK\x1a IBK instrument data
-0 string 2OP\x1a IBK instrument data, 2 operators
-0 string 4OP\x1a IBK instrument data, 4 operators
-2 string ADLIB- AdLib instrument data
->0 byte x \b, version %u
->1 byte x \b.%u
-
-# CRI ADX ADPCM audio
-# Used by various Sega games.
-# https://en.wikipedia.org/wiki/ADX_(file_format)
-# https://wiki.multimedia.cx/index.php/CRI_ADX_file
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0x00 beshort 0x8000
->(2.S-2) string (c)CRI CRI ADX ADPCM audio
-!:ext adx
-!:mime audio/x-adx
-!:strength +50
->>0x12 byte x v%u
->>0x04 byte 0x02 \b, pre-set prediction coefficients
->>0x04 byte 0x03 \b, standard ADX
->>0x04 byte 0x04 \b, exponential scale
->>0x04 byte 0x10 \b, AHX (Dreamcast)
->>0x04 byte 0x11 \b, AHX
->>0x08 belong x \b, %u Hz
->>0x12 byte 0x03
->>>0x02 beshort >0x2B
->>>>0x18 belong !0 \b, looping
->>0x12 byte 0x04
->>>0x02 beshort >0x37
->>>>0x24 belong !0 \b, looping
->>0x13 byte&0x08 0x08 \b, encrypted
-
-# Lossless audio (.la) (http://www.lossless-audio.com/)
-0 string LA
->2 string 03 Lossless audio version 0.3
->2 string 04 Lossless audio version 0.4
-
-# Sony PlayStation Audio (.xa)
-0 leshort 0x4158 Sony PlayStation Audio
-
-# Portable Sound Format
-# Used for audio rips for various consoles.
-# http://fileformats.archiveteam.org/wiki/Portable_Sound_Format
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0 string PSF
->3 byte 0x01
->3 byte 0x02
->3 byte 0x11
->3 byte 0x12
->3 byte 0x13
->3 byte 0x21
->3 byte 0x22
->3 byte 0x23
->3 byte 0x41
->>0 string PSF Portable Sound Format
-!:mime audio/x-psf
->>>3 byte 0x01 (Sony PlayStation)
->>>3 byte 0x02 (Sony PlayStation 2)
->>>3 byte 0x11 (Sega Saturn)
->>>3 byte 0x12 (Sega Dreamcast)
->>>3 byte 0x13 (Sega Mega Drive)
->>>3 byte 0x21 (Nintendo 64)
->>>3 byte 0x22 (Game Boy Advance)
->>>3 byte 0x23 (Super NES)
->>>3 byte 0x41 (Capcom QSound)
-
-# Atari 8-bit SAP audio format
-# http://asap.sourceforge.net/sap-format.html
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0 string SAP\r\n Atari 8-bit SAP audio file
-!:mime audio/x-sap
-!:ext sap
->5 search/1024 NAME
->>&1 string x \b: %s
->>5 search/1024 AUTHOR
->>>&1 string x by %s
-
-# Nintendo Wii BRSTM audio format (fields)
-# NOTE: Assuming HEAD starts at 0x40.
-# FIXME: Replace 0x48 with HEAD offset plus 8.
-0 name nintendo-wii-brstm-fields
->(0x10.L) string HEAD \b:
->>(0x10.L+0x0C) belong x
->>>(&-4.L+0x48) belong x
->>>>&-4 byte 0 PCM, signed 8-bit,
->>>>&-4 byte 1 PCM, signed 16-bit,
->>>>&-4 byte 2 THP ADPCM,
->>>>&-3 byte !0 looping,
->>>>&-2 byte 1 mono
->>>>&-2 byte 2 stereo
->>>>&-2 byte 3 3 channels
->>>>&-2 byte 4 quad
->>>>&-2 byte >4 %u channels
->>>>&0 beshort !0 %u Hz
-
-# Nintendo Wii BRSTM audio format
-# https://wiibrew.org/wiki/BRSTM_file
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0 string RSTM Nintendo Wii BRSTM audio file
-!:mime audio/x-brstm
-!:ext brstm
-# Wii is big-endian, so default to BE.
->4 beshort 0xFEFF
->>0 use nintendo-wii-brstm-fields
->4 leshort 0xFEFF
->>0 use \^nintendo-wii-brstm-fields
-
-# Nintendo 3DS BCSTM audio format (fields)
-0 name nintendo-3ds-bcstm-fields
->(0x18.l) string INFO \b:
-# INFO block: Stream information starts at 0x20 (minus 4 for the 'INFO' magic)
->>&0x1C byte 0 PCM, signed 8-bit,
->>&0x1C byte 1 PCM, signed 16-bit,
->>&0x1C byte 2 DSP ADPCM,
->>&0x1C byte 3 IMA ADPCM,
->>&0x1D byte !0 looping,
->>&0x1E byte 1 mono
->>&0x1E byte 2 stereo
->>&0x1E byte 3 3 channels
->>&0x1E byte 4 quad
->>&0x1E byte >4 %u channels
->>&0x20 lelong !0 %u Hz
-
-# Nintendo 3DS BCSTM audio format
-# https://www.3dbrew.org/wiki/BCSTM
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0 string CSTM Nintendo 3DS BCSTM audio file
-!:mime audio/x-bcstm
-!:ext bcstm
-# 3DS is little-endian, so default to LE.
->4 leshort 0xFEFF
->>0 use nintendo-3ds-bcstm-fields
->4 beshort 0xFEFF
->>0 use \^nintendo-3ds-bcstm-fields
-
-# Nintendo Wii U BFSTM audio format
-# http://mk8.tockdom.com/wiki/BFSTM_(File_Format)
-# NOTE: This format is very similar to BCSTM.
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0 string FSTM Nintendo Wii U BFSTM audio file
-!:mime audio/x-bfstm
-!:ext bfstm
-# BFSTM is used on both Wii U (BE) and Switch (LE),
-# so default to LE.
->4 leshort 0xFEFF
->>0 use nintendo-3ds-bcstm-fields
->4 beshort 0xFEFF
->>0 use \^nintendo-3ds-bcstm-fields
-
-# Nintendo 3DS BCSTM audio format (fields)
-0 name nintendo-3ds-bcwav-fields
->(0x18.l) string INFO \b:
-# INFO block (minus 4 for INFO magic)
->>&0x4 byte 0 PCM, signed 8-bit,
->>&0x4 byte 1 PCM, signed 16-bit,
->>&0x4 byte 2 DSP ADPCM,
->>&0x4 byte 3 IMA ADPCM,
->>&0x5 byte !0 looping,
->>&0x8 lelong x stereo
->>&0x8 lelong !0 %u Hz
-
-# Nintendo 3DS BCWAV audio format
-# https://www.3dbrew.org/wiki/BCWAV
-# Added by David Korth <gerbilsoft@gerbilsoft.com>
-0 string CWAV Nintendo 3DS BCWAV audio file
-!:mime audio/x-bcwav
-!:ext bcwav
-# 3DS is little-endian, so default to LE.
->4 leshort 0xFEFF
->>0 use nintendo-3ds-bcwav-fields
->4 beshort 0xFEFF
->>0 use \^nintendo-3ds-bcwav-fields
-
-# Philips DSDIFF audio format (Direct Stream Digital Interchange File Format)
-# Used for DSD audio recordings and Super Audio CD (SACD) mastering annotations
-# https://dsd-guide.com/sites/default/files/white-papers/DSDIFF_1.5_Spec.pdf
-# From: Toni Ruottu <toni.ruottu@iki.fi>
-0 string FRM8
-12 string DSD\x20 DSDIFF audio bitstream data
-!:mime audio/x-dff
-!:ext dff
-
-# format version chunk
->&0 string FVER
-# version 1
->>&8 byte 1
-
-# v1 / sampling resolution ( 1 bit PDM only )
->>>&0 string x \b, 1 bit
-
-# v1 / sound property chunk
->>>&0 search/0xff PROP
->>>>&8 string SND
-
-# v1 / sound property chunk / channel configuration chunk
->>>>>&0 search/0xff CHNL
->>>>>>&8 ubeshort 1 \b, mono
->>>>>>&8 ubeshort 2
->>>>>>>&0 string SLFTSRGT \b, stereo
->>>>>>>&0 default x \b, 2 channels
->>>>>>&8 ubeshort 3
->>>>>>>&0 string SLFTSRGTLFE\x20 \b, 2.1 stereo
->>>>>>>&0 string SLFTSRGTC\x20\x20\x20 \b, 3.0 stereo
->>>>>>>&0 default x \b, 3 channels
->>>>>>&8 ubeshort 4
->>>>>>>&0 string MLFTMRGTLS\x20\x20RS\x20\x20 \b, 4.0 surround
->>>>>>>&0 string SLFTSRGTC\x20\x20\x20LFE\x20 \b, 3.1 stereo
->>>>>>>&0 default x \b, 4 channels
->>>>>>&8 ubeshort 5
->>>>>>>&0 string MLFTMRGTC\x20\x20\x20LS\x20\x20RS\x20\x20 \b, 5.0 surround
->>>>>>>&0 string MLFTMRGTLFE\x20LS\x20\x20RS\x20\x20 \b, 4.1 surround
->>>>>>>&0 default x \b, 5 channels
->>>>>>&8 ubeshort 6
->>>>>>>&0 string MLFTMRGTC\x20\x20\x20LFE\x20LS\x20\x20RS\x20\x20 \b, 5.1 surround
->>>>>>>&0 default x \b, 6 channels
->>>>>>&8 ubeshort >6 \b, %u channels
-
-# v1 / sound property chunk / sample rate chunk
->>>>>&0 search/0xff FS\x20\x20
->>>>>>&0 string x \b,
->>>>>>&8 ubelong%44100 0
->>>>>>>&-4 ubelong/44100 x "DSD %u"
->>>>>>>&-4 ubelong x %u Hz
-
-# v1 / sound property chunk / compression type chunk
->>>>>&0 search/0xff CMPR
->>>>>>&8 string DSD\x20 \b, no compression
->>>>>>&8 string DST\x20 \b, DST compression
->>>>>>&8 default x \b, unknown compression
-
-# v1 / quest for metadata
->>>&0 string x
-
-# v1 / quest for metadata / edited master information chunk
->>>>&0 search DIIN
->>>>>&0 ubequad >0 \b, "edited master" metadata
-
-# v1 / quest for metadata / ID3 chunk ( defacto standard )
->>>>&0 search ID3\x20
->>>>>&8 string ID3 \b, ID3 version 2
->>>>>&0 byte x \b.%u
->>>>>&1 byte x \b.%u
-
-# v1 / quest for metadata / failure ( possibly due to -P bytes=... being too low )
->>>>&0 default x \b, ID3 missing (or unreachable)
-
-# version > 1 or 0
->>&0 default x \b, unknown version
-
-# Sony DSF audio format (Direct Stream Digital Stream File)
-# Used for lossless digital storage of songs produced as DSD audio
-# Portable analog of a track stored on a Super Audio CD (SACD)
-# https://dsd-guide.com/sites/default/files/white-papers/DSFFileFormatSpec_E.pdf
-# From: Toni Ruottu <toni.ruottu@iki.fi>
-0 string DSD\x20 DSF audio bitstream data
-!:mime audio/x-dsf
-!:ext dsf
-
-# format chunk
->28 string fmt\x20
-# version 1
->>&8 ulelong 1
-
-# v1 / sampling resolution ( 1 bit PDM only )
-# NOTE: the spec incorrectly uses "bits per sample" instead of "bits per byte"
->>>&0 string x \b, 1 bit
-
-# v1 / channel configuration
->>>>&4 ulelong 1 \b, mono
->>>>&4 ulelong 2 \b, stereo
->>>>&4 ulelong 3 \b, 3.0 stereo
->>>>&4 ulelong 4 \b, 4.0 surround
->>>>&4 ulelong 5 \b, 3.1 stereo
->>>>&4 ulelong 6 \b, 5.0 surround
->>>>&4 ulelong 7 \b, 5.1 surround
->>>>&0 default x
->>>>>&4 ulelong x \b, %u channels
-
-# v1 / sample rate chunk
->>>>&0 string x \b,
->>>>&12 ulelong%44100 0
->>>>>&-4 ulelong/44100 x "DSD %u"
->>>>&12 ulelong x %u Hz
-
-# v1 / compression
->>>>&0 string x
->>>>>&0 ulelong 0 \b, no compression
->>>>>&0 default x \b, unknown compression
-
-# v1 / embedded ID3v2 metadata
->>>0 string x \b, ID3
->>>>20 ulequad !0
->>>>>(20.q) string ID3 version 2
->>>>>>&0 byte x \b.%u
->>>>>>&1 byte x \b.%u
-# unable to verify ID3 ( possibly due to -P bytes=... being too low )
->>>>>&0 default x unreachable
->>>>&0 default x missing
-
-# version > 1 or 0
->>&0 default x \b, unknown version
diff --git a/contrib/libs/libmagic/magic/Magdir/avm b/contrib/libs/libmagic/magic/Magdir/avm
deleted file mode 100644
index 86e96d110e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/avm
+++ /dev/null
@@ -1,33 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: avm,v 1.1 2020/08/28 20:37:58 christos Exp $
-# avm: file(1) magic for avm files; this is not use
-
-# Summary: FRITZ!Box router configuration backup
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Fritz!Box
-# Reference: http://www.mengelke.de/Projekte/FritzBoxTools2
-# Note: only tested with models 4040 and 6490 Cable (lgi)
-0 string ****\ FRITZ!Box\ FRITZ!Box configuration backup
-#!:mime text/plain
-!:mime application/x-avm-export
-!:ext export
-# router model name like "4040" , "6490 Cable (lgi)" followed by " CONFIGURATION EXPORT"
->15 string x of %-.4s
-# on 2nd line hashed password
-#>41 search/54 Password= \b, password
-# on 3rd line firmware version like: 141.06.24 141.06.50 141.07.10 ... 155.06.83
->41 search/172 FirmwareVersion= \b, firmware version
->>&0 string x %s
-# on 5th line oem like: avme lgi
->41 search/285 OEM= \b, oem
->>&0 string x %s
-# on 7th line language like: de en
->41 search/305 Language= \b, language
->>&0 string x %s
-# on 10th line cfg file name like: /var/tmp.cfg
->41 search/349 tmp.cfg
-# on 11th line date inside c-comment like: Thu Jun 4 22:25:19 2015
->>&4 string x \b, %s
-#
-
diff --git a/contrib/libs/libmagic/magic/Magdir/basis b/contrib/libs/libmagic/magic/Magdir/basis
deleted file mode 100644
index 19dd463b41..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/basis
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#----------------------------------------------------------------
-# $File: basis,v 1.5 2019/04/19 00:42:27 christos Exp $
-# basis: file(1) magic for BBx/Pro5-files
-# Oliver Dammer <dammer@olida.de> 2005/11/07
-# https://www.basis.com business-basic-files.
-#
-0 string \074\074bbx\076\076 BBx
->7 string \000 indexed file
->7 string \001 serial file
->7 string \002 keyed file
->>13 short 0 (sort)
->7 string \004 program
->>18 byte x (LEVEL %d)
->>>23 string >\000 psaved
->7 string \006 mkeyed file
->>13 short 0 (sort)
->>8 string \000 (mkey)
diff --git a/contrib/libs/libmagic/magic/Magdir/beetle b/contrib/libs/libmagic/magic/Magdir/beetle
deleted file mode 100644
index 94a835ccc4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/beetle
+++ /dev/null
@@ -1,7 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: beetle,v 1.2 2018/02/05 23:42:17 rrt Exp $
-# beetle: file(1) magic for Beetle VM object files
-# https://github.com/rrthomas/beetle/
-
-# Beetle object module
-0 string BEETLE\000 Beetle VM object file
diff --git a/contrib/libs/libmagic/magic/Magdir/ber b/contrib/libs/libmagic/magic/Magdir/ber
deleted file mode 100644
index 15288c6824..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ber
+++ /dev/null
@@ -1,65 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ber,v 1.2 2019/04/19 00:42:27 christos Exp $
-# ber: file(1) magic for several BER formats used in the mobile
-# telecommunications industry (Georg Sauthoff)
-
-# The file formats are standardized by the GSMA (GSM association).
-# They are specified via ASN.1 schemas and some prose. Basic encoding
-# rules (BER) is the used encoding. The formats are used for exchanging
-# call data records (CDRs) between mobile operators and associated
-# parties for roaming clearing purposes and fraud detection.
-
-# The magic file covers:
-
-# - TAP files (TD.57) - CDR batches and notifications
-# - RAP files (TD.32) - return batches and acknowledgements
-# - NRT files (TD.35) - CDR batches for 'near real time' processing
-
-#
-# TAP 3 Files
-# TAP -> Transferred Account Procedure
-# cf. https://www.gsma.com/newsroom/wp-content/uploads/TD.57-v32.31.pdf
-# TransferBatch short tag
-0 byte 0x61
-# BatchControlInfo short tag
->&1 search/b5 \x64
-# Sender long tag #TAP 3.x (BER encoded)
->>&1 search/b8 \x5f\x81\x44
-# <SpecificationVersionNumber>3</><ReleaseVersionNumber> block
->>>&64 search/b64 \x5f\x81\x49\x01\x03\x5f\x81\x3d\x01
->>>>&0 byte x TAP 3.%d Batch (TD.57, Transferred Account)
-
-# Notification short tag
-0 byte 0x62
-# Sender long tag
->2 search/b8 \x5f\x81\x44
-# <SpecificationVersionNumber>3</><ReleaseVersionNumber> block
->>&64 search/b64 \x5f\x81\x49\x01\x03\x5f\x81\x3d\x01
->>>&0 byte x TAP 3.%d Notification (TD.57, Transferred Account)
-
-
-# NRT Files
-# NRT a.k.a. NRTRDE
-0 byte 0x61
-# <SpecificationVersionNumber>2</><ReleaseVersionNumber> block
->&1 search/b8 \x5f\x29\x01\x02\x5f\x25\x01
->>&0 byte x NRT 2.%d (TD.35, Near Real Time Roaming Data Exchange)
-
-# RAP Files
-# cf. https://www.gsma.com/newsroom/wp-content/uploads/TD.32-v6.11.pdf
-# Long ReturnBatch tag
-0 string \x7f\x84\x16
-# Long RapBatchControlInfo tag
->&1 search/b8 \x7f\x84\x19
-# <SpecificationVersionNumber>3</><ReleaseVersionNumber> block
->>&64 search/b64 \x5f\x81\x49\x01\x03\x5f\x81\x3d\x01
-# <RapSpecificationVersionNumber>1</><RapReleaseVersionNumber> block
->>>&1 string/b \x5f\x84\x20\x01\x01\x5f\x84\x1f\x01
->>>>&0 byte x RAP 1.%d Batch (TD.32, Returned Account Procedure),
->>>&0 byte x TAP 3.%d
-
-# Long Acknowledgement tag
-0 string \x7f\x84\x17
-# Long Sender tag
->&1 search/b5 \x5f\x81\x44 RAP Acknowledgement (TD.32, Returned Account Procedure)
diff --git a/contrib/libs/libmagic/magic/Magdir/bflt b/contrib/libs/libmagic/magic/Magdir/bflt
deleted file mode 100644
index c46b4dbb4b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bflt
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: bflt,v 1.5 2014/04/30 21:41:02 christos Exp $
-# bFLT: file(1) magic for BFLT uclinux binary files
-#
-# From Philippe De Muyter <phdm@macqel.be>
-#
-0 string bFLT BFLT executable
->4 belong x - version %d
->4 belong 4
->>36 belong&0x1 0x1 ram
->>36 belong&0x2 0x2 gotpic
->>36 belong&0x4 0x4 gzip
->>36 belong&0x8 0x8 gzdata
diff --git a/contrib/libs/libmagic/magic/Magdir/bhl b/contrib/libs/libmagic/magic/Magdir/bhl
deleted file mode 100644
index 6f57f03433..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bhl
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: bhl,v 1.1 2017/06/11 22:20:02 christos Exp $
-# BlockHashLoc
-# ext: bhl
-# Marco Pontello marcopon@gmail.com
-# reference: https://github.com/MarcoPon/BlockHashLoc
-0 string BlockHashLoc\x1a BlockHashLoc recovery info,
->13 byte x version %d
-!:ext bhl
diff --git a/contrib/libs/libmagic/magic/Magdir/bioinformatics b/contrib/libs/libmagic/magic/Magdir/bioinformatics
deleted file mode 100644
index 2966fa6e49..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bioinformatics
+++ /dev/null
@@ -1,178 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: bioinformatics,v 1.5 2019/04/19 00:42:27 christos Exp $
-# bioinfomatics: file(1) magic for Bioinfomatics file formats
-
-###############################################################################
-# BGZF (Blocked GNU Zip Format) - gzip compatible, but also indexable
-# used by SAMtools bgzip/tabix (http://samtools.sourceforge.net/tabix.shtml)
-###############################################################################
-0 string \037\213
->3 byte &0x04
->>12 string BC
->>>14 leshort &0x02 Blocked GNU Zip Format (BGZF; gzip compatible)
->>>>16 leshort x \b, block length %d
-!:mime application/x-gzip
-
-
-###############################################################################
-# Tabix index file
-# used by SAMtools bgzip/tabix (http://samtools.sourceforge.net/tabix.shtml)
-###############################################################################
-0 string TBI\1 SAMtools TBI (Tabix index format)
->0x04 lelong =1 \b, with %d reference sequence
->0x04 lelong >1 \b, with %d reference sequences
->0x08 lelong &0x10000 \b, using half-closed-half-open coordinates (BED style)
->0x08 lelong ^0x10000
->>0x08 lelong =0 \b, using closed and one based coordinates (GFF style)
->>0x08 lelong =1 \b, using SAM format
->>0x08 lelong =2 \b, using VCF format
->0x0c lelong x \b, sequence name column: %d
->0x10 lelong x \b, region start column: %d
->0x08 lelong =0
->>0x14 lelong x \b, region end column: %d
->0x18 byte x \b, comment character: %c
->0x1c lelong x \b, skip line count: %d
-
-
-###############################################################################
-# BAM (Binary Sequence Alignment/Map format)
-# used by SAMtools (http://samtools.sourceforge.net/SAM1.pdf)
-# data is normally present only within compressed BGZF blocks (CDATA), so use file -z to examine it
-###############################################################################
-0 string BAM\1 SAMtools BAM (Binary Sequence Alignment/Map)
->0x04 lelong >0
->>&0x00 regex =^[@]HD\t.*VN: \b, with SAM header
->>>&0 regex =[0-9.]+ \b version %s
->>&(0x04) lelong >0 \b, with %d reference sequences
-
-
-###############################################################################
-# BAI (BAM indexing format)
-# used by SAMtools (http://samtools.sourceforge.net/SAM1.pdf)
-###############################################################################
-0 string BAI\1 SAMtools BAI (BAM indexing format)
->0x04 lelong >0 \b, with %d reference sequences
-
-
-###############################################################################
-# CRAM (Binary Sequence Alignment/Map format)
-###############################################################################
-0 string CRAM CRAM
->0x04 byte >-1 version %d.
->0x05 byte >-1 \b%d
->0x06 string >\0 (identified as %s)
-
-
-###############################################################################
-# BCF (Binary Call Format), version 1
-# used by SAMtools & VCFtools (http://vcftools.sourceforge.net/bcf.pdf)
-# data is normally present only within compressed BGZF blocks (CDATA), so use file -z to examine it
-###############################################################################
-0 string BCF\4
-# length of seqnm data in bytes is positive
->&0x00 lelong >0
-# length of smpl data in bytes is positive
->>&(&-0x04) lelong >0 SAMtools BCF (Binary Call Format)
-# length of meta in bytes
->>>&(&-0x04) lelong >0
-# have meta text string
->>>>&0x00 search ##samtoolsVersion=
->>>>>&0x00 string x \b, generated by SAMtools version %s
-
-
-###############################################################################
-# BCF (Binary Call Format), version 2.1
-# used by SAMtools (https://samtools.github.io/hts-specs/BCFv2_qref.pdf)
-# data is normally present only within compressed BGZF blocks (CDATA), so use file -z to examine it
-###############################################################################
-0 string BCF\2\1 Binary Call Format (BCF) version 2.1
-# length of header text
->&0x00 lelong >0
-# have header string
->>&0x00 search ##samtoolsVersion=
->>>&0x00 string x \b, generated by SAMtools version %s
-
-
-###############################################################################
-# BCF (Binary Call Format), version 2.2
-# used by SAMtools (https://samtools.github.io/hts-specs/BCFv2_qref.pdf)
-# data is normally present only within compressed BGZF blocks (CDATA), so use file -z to examine it
-###############################################################################
-0 string BCF\2\2 Binary Call Format (BCF) version 2.2
-# length of header text
->&0x00 lelong >0
-# have header string
->>&0x00 search ##samtoolsVersion=
->>>&0x00 string x \b, generated by SAMtools version %s
-
-###############################################################################
-# VCF (Variant Call Format)
-# used by VCFtools (http://vcftools.sourceforge.net/)
-###############################################################################
-0 search ##fileformat=VCFv Variant Call Format (VCF)
->&0 string x \b version %s
-
-###############################################################################
-# FASTQ
-# used by MAQ (http://maq.sourceforge.net/fastq.shtml)
-###############################################################################
-# XXX Broken?
-# @<seqname>
-#0 regex =^@[A-Za-z0-9_.:-]+\?\n
-# <seq>
-#>&1 regex =^[A-Za-z\n.~]++
-# +[<seqname>]
-#>>&1 regex =^[A-Za-z0-9_.:-]*\?\n
-# <qual>
-#>>>&1 regex =^[!-~\n]+\n FASTQ
-
-###############################################################################
-# FASTA
-# used by FASTA (https://fasta.bioch.virginia.edu/fasta_www2/fasta_guide.pdf)
-###############################################################################
-#0 byte 0x3e
-# q>0 regex =^[>][!-~\t\ ]+$
-# Amino Acid codes: [A-IK-Z*-]+
-#>>1 regex !=[!-'Jj;:=?@^`|~\\] FASTA
-# IUPAC codes/gaps: [ACGTURYKMSWBDHVNX-]+
-# not in IUPAC codes/gaps: [EFIJLOPQZ]
-#>>>1 regex !=[EFIJLOPQZefijlopqz] \b, with IUPAC nucleotide codes
-#>>>1 regex =^[EFIJLOPQZefijlopqz]+$ \b, with Amino Acid codes
-
-###############################################################################
-# SAM (Sequence Alignment/Map format)
-# used by SAMtools (http://samtools.sourceforge.net/SAM1.pdf)
-###############################################################################
-# Short-cut version to recognise SAM files with (optional) header at beginning
-###############################################################################
-0 string @HD\t
->4 search VN: Sequence Alignment/Map (SAM), with header
->>&0 regex [0-9.]+ \b version %s
-###############################################################################
-# Longer version to recognise SAM alignment lines using (many) regexes
-###############################################################################
-# SAM Alignment QNAME
-0 regex =^[!-?A-~]{1,255}(\t[^\t]+){11}
-# SAM Alignment FLAG
->0 regex =^([^\t]+\t){1}[0-9]{1,5}\t
-# SAM Alignment RNAME
->>0 regex =^([^\t]+\t){2}\\*|[^*=]*\t
-# SAM Alignment POS
->>>0 regex =^([^\t]+\t){3}[0-9]{1,9}\t
-# SAM Alignment MAPQ
->>>>0 regex =^([^\t]+\t){4}[0-9]{1,3}\t
-# SAM Alignment CIGAR
->>>>>0 regex =\t(\\*|([0-9]+[MIDNSHPX=])+)\t
-# SAM Alignment RNEXT
->>>>>>0 regex =\t(\\*|=|[!-()+->?-~][!-~]*)\t
-# SAM Alignment PNEXT
->>>>>>>0 regex =^([^\t]+\t){7}[0-9]{1,9}\t
-# SAM Alignment TLEN
->>>>>>>>0 regex =\t[+-]{0,1}[0-9]{1,9}\t.*\t
-# SAM Alignment SEQ
->>>>>>>>>0 regex =^([^\t]+\t){9}(\\*|[A-Za-z=.]+)\t
-# SAM Alignment QUAL
->>>>>>>>>>0 regex =^([^\t]+\t){10}[!-~]+ Sequence Alignment/Map (SAM)
->>>>>>>>>>>0 regex =^[@]HD\t.*VN: \b, with header
->>>>>>>>>>>>&0 regex =[0-9.]+ \b version %s
diff --git a/contrib/libs/libmagic/magic/Magdir/biosig b/contrib/libs/libmagic/magic/Magdir/biosig
deleted file mode 100644
index 7d41713f24..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/biosig
+++ /dev/null
@@ -1,154 +0,0 @@
-
-##############################################################################
-#
-# Magic ids for biomedical signal file formats
-# Copyright (C) 2018 Alois Schloegl <alois.schloegl@gmail.com>
-#
-# The list has been derived from biosig projects
-# http://biosig.sourceforge.net
-# https://pub.ist.ac.at/~schloegl/matlab/eeg/
-# https://pub.ist.ac.at/~schloegl/biosig/TESTED
-#
-##############################################################################
-#
-0 string ABF\x20 Biosig/Axon Binary format
-!:mime biosig/abf2
-0 string ABF2\0\0 Biosig/Axon Binary format
-!:mime biosig/abf2
-#
-0 string ATES\x20MEDICA\x20SOFT.\x20EEG\x20for\x20Windows Biosig/ATES MEDICA SOFT. EEG for Windows
-!:mime biosig/ates
-#
-0 string ATF\x09 Biosig/Axon Text format
-!:mime biosig/atf
-#
-0 string ADU1 Biosig/Axona file format
-!:mime biosig/axona
-0 string ADU2 Biosig/Axona file format
-!:mime biosig/axona
-#
-0 string ALPHA-TRACE-MEDICAL Biosig/alpha trace
-!:mime biosig/alpha
-#
-0 string AxGr Biosig/AXG
-0 string axgx Biosig/AXG
-!:mime biosig/axg
-#
-0 string HeaderLen= Biosig/BCI2000
-0 string BCI2000V Biosig/BCI2000
-!:mime biosig/bci2000
-#
-### Specification: https://www.biosemi.com/faq/file_format.htm
-0 string \xffBIOSEMI Biosig/Biosemi data format
-!:mime biosig/bdf
-#
-0 string Brain\x20Vision\x20Data\x20Exchange\x20Header\x20File Biosig/Brainvision data file
-0 string Brain\x20Vision\x20V-Amp\x20Data\x20Header\x20File\x20Version Biosig/Brainvision V-Amp file
-0 string Brain\x20Vision\x20Data\x20Exchange\x20Marker\x20File,\x20Version Biosig/Brainvision Marker file
-!:mime biosig/brainvision
-#
-0 string CEDFILE Biosig/CFS: Cambridge Electronic devices File format
-!:mime biosig/ced
-#
-### Specification: https://www.edfplus.info/specs/index.html
-0 string 0\x20\x20\x20\x20\x20\x20\x20 Biosig/EDF: European Data format
-!:mime biosig/edf
-#
-### Specifications: https://arxiv.org/abs/cs/0608052
-0 string GDF Biosig/GDF: General data format for biosignals
-!:mime biosig/gdf
-#
-0 string DATA\0\0\0\0 Biosig/Heka Patchmaster
-0 string DAT1\0\0\0\0 Biosig/Heka Patchmaster
-0 string DAT2\0\0\0\0 Biosig/Heka Patchmaster
-!:mime biosig/heka
-#
-0 string (C)\x20CED\x2087 Biosig/CED SMR
-!:mime biosig/ced-smr
-#
-0 string CFWB\1\0\0\0 Biosig/CFWB
-!:mime biosig/cfwb
-#
-0 string DEMG Biosig/DEMG
-!:mime biosig/demg
-#
-0 string EBS\x94\x0a\x13\x1a\x0d Biosig/EBS
-!:mime biosig/ebs
-#
-0 string Embla\x20data\x20file Biosig/Embla
-!:mime biosig/embla
-#
-0 string Header\r\nFile Version Biosig/ETG4000
-!:mime biosig/etg4000
-#
-0 string GALILEO\x20EEG\x20TRACE\x20FILE Biosig/Galileo
-!:mime biosig/galileo
-#
-0 string IGOR Biosig/IgorPro ITX file
-!:mime biosig/igorpro
-#
-# Specification: http://www.ampsmedical.com/uploads/2017-12-7/The_ISHNE_Format.pdf
-0 string ISHNE1.0 Biosig/ISHNE
-!:mime biosig/ishne
-#
-# CEN/ISO 11073/22077 series, http://www.mfer.org/en/document.htm
-0 string @\x20\x20MFER\x20 Biosig/MFER
-0 string @\x20MFR\x20 Biosig/MFER
-!:mime biosig/mfer
-#
-0 string NEURALEV Biosig/NEV
-0 string N.EV.\0 Biosig/NEV
-!:mime biosig/nev
-#
-0 string NEX1 Biosig/NEX
-!:mime biosig/nex1
-#
-0 string PLEX Biosig/Plexon v1.0
-10 string PLEXON Biosig/Plexon v2.0
-!:mime biosig/plexon
-#
-0 string \x02\x27\x91\xC6 Biosig/RHD2000: Intan RHD2000 format
-#
-# Specification: CEN 1064:2005/ISO 11073:91064
-16 string SCPECG\0\0 Biosig/SCP-ECG format CEN 1064:2005/ISO 11073:91064
-!:mime biosig/scpecg
-#
-0 string IAvSFo Biosig/SIGIF
-!:mime biosig/sigif
-#
-0 string POLY\x20SAMPLE\x20FILEversion\x20 Biosig/TMS32
-!:mime biosig/tms32
-#
-0 string FileId=TMSi\x20PortiLab\x20sample\x20log\x20file\x0a\x0dVersion= Biosig/TMSiLOG
-!:mime biosig/tmsilog
-#
-4 string Synergy\0\48\49\50\46\48\48\51\46\48\48\48\46\48\48\48\0\28\0\0\0\2\0\0\0
->63 string CRawDataElement
->>85 string CRawDataBuffer Biosig/SYNERGY
-!:mime biosig/synergy
-#
-4 string \40\0\4\1\44\1\102\2\146\3\44\0\190\3 Biosig/UNIPRO
-!:mime biosig/unipro
-#
-0 string VER=9\r\nCTIME= Biosig/WCP
-!:mime biosig/wcp
-#
-0 string \xAF\xFE\xDA\xDA Biosig/Walter Graphtek
-0 string \xDA\xDA\xFE\xAF Biosig/Walter Graphtek
-0 string \x55\x55\xFE\xAF Biosig/Walter Graphtek
-!:mime biosig/walter-graphtek
-#
-0 string V3.0\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20\x20
->32 string [PatInfo] Biosig/Sigma
-!:mime biosig/sigma
-#
-0 string \067\069\078\013\010\0x1a\04\0x84 Biosig/File exchange format (FEF)
-!:mime biosig/fef
-0 string \67\69\78\0x13\0x10\0x1a\4\0x84 Biosig/File exchange format (FEF)
-!:mime biosig/fef
-#
-0 string \0\0\0\x64\0\0\0\x1f\0\0\0\x14\0\0\0\0\0\1
->36 string \0\0\0\x65\0\0\0\3\0\0\0\4\0\0
->>56 string \0\0\0\x6a\0\0\0\3\0\0\0\4\0\0\0\0\xff\xff\xff\xff\0\0 Biosig/FIFF
-!:mime biosig/fiff
-#
diff --git a/contrib/libs/libmagic/magic/Magdir/blackberry b/contrib/libs/libmagic/magic/Magdir/blackberry
deleted file mode 100644
index 2e38a54f42..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/blackberry
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: blackberry,v 1.2 2017/03/17 21:35:28 christos Exp $
-# blackberry: file(1) magic for BlackBerry file formats
-#
-5 belong 0
->8 belong 010010010 BlackBerry RIM ETP file
->>22 string x \b for %s
diff --git a/contrib/libs/libmagic/magic/Magdir/blcr b/contrib/libs/libmagic/magic/Magdir/blcr
deleted file mode 100644
index d2f901ae92..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/blcr
+++ /dev/null
@@ -1,25 +0,0 @@
-# Berkeley Lab Checkpoint Restart (BLCR) checkpoint context files
-# https://ftg.lbl.gov/checkpoint
-0 string C\0\0\0R\0\0\0 BLCR
->16 lelong 1 x86
->16 lelong 3 alpha
->16 lelong 5 x86-64
->16 lelong 7 ARM
->8 lelong x context data (little endian, version %d)
-# Uncomment the following only of your "file" program supports "search"
-#>0 search/1024 VMA\06 for kernel
-#>>&1 byte x %d.
-#>>&2 byte x %d.
-#>>&3 byte x %d
-0 string \0\0\0C\0\0\0R BLCR
->16 belong 2 SPARC
->16 belong 4 ppc
->16 belong 6 ppc64
->16 belong 7 ARMEB
->16 belong 8 SPARC64
->8 belong x context data (big endian, version %d)
-# Uncomment the following only of your "file" program supports "search"
-#>0 search/1024 VMA\06 for kernel
-#>>&1 byte x %d.
-#>>&2 byte x \b%d.
-#>>&3 byte x \b%d
diff --git a/contrib/libs/libmagic/magic/Magdir/blender b/contrib/libs/libmagic/magic/Magdir/blender
deleted file mode 100644
index 5a897113e0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/blender
+++ /dev/null
@@ -1,50 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: blender,v 1.9 2022/12/21 15:53:27 christos Exp $
-# blender: file(1) magic for Blender 3D related files
-#
-# Native format rule v1.2. For questions use the developers list
-# https://lists.blender.org/mailman/listinfo/bf-committers
-# GLOB chunk was moved near start and provides subversion info since 2.42
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/BLEND
-# http://www.blender.org/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/blend.trid.xml
-# http://formats.kaitai.io/blender_blend/index.html
-# Note: called "Blender 3D data" by TrID
-# and gzip compressed variant handled by ./compress
-0 string =BLENDER Blender3D,
-#!:mime application/octet-stream
-!:mime application/x-blender
-!:ext blend
-# no sample found with extension blender
-#!:ext blend/blender
->7 string =_ saved as 32-bits
->>8 string =v little endian
->>>9 byte x with version %c.
->>>10 byte x \b%c
->>>11 byte x \b%c
->>>0x40 string =GLOB \b.
->>>>0x58 leshort x \b%.4d
->>8 string =V big endian
->>>9 byte x with version %c.
->>>10 byte x \b%c
->>>11 byte x \b%c
->>>0x40 string =GLOB \b.
->>>>0x58 beshort x \b%.4d
->7 string =- saved as 64-bits
->>8 string =v little endian
->>9 byte x with version %c.
->>10 byte x \b%c
->>11 byte x \b%c
->>0x44 string =GLOB \b.
->>>0x60 leshort x \b%.4d
->>8 string =V big endian
->>>9 byte x with version %c.
->>>10 byte x \b%c
->>>11 byte x \b%c
->>>0x44 string =GLOB \b.
->>>>0x60 beshort x \b%.4d
-
-# Scripts that run in the embedded Python interpreter
-0 string #!BPY Blender3D BPython script
diff --git a/contrib/libs/libmagic/magic/Magdir/blit b/contrib/libs/libmagic/magic/Magdir/blit
deleted file mode 100644
index 5ce7870706..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/blit
+++ /dev/null
@@ -1,24 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: blit,v 1.9 2021/07/03 14:01:46 christos Exp $
-# blit: file(1) magic for 68K Blit stuff as seen from 680x0 machine
-#
-# Note that this 0407 conflicts with several other a.out formats...
-#
-# XXX - should this be redone with "be" and "le", so that it works on
-# little-endian machines as well? If so, what's the deal with
-# "VAX-order" and "VAX-order2"?
-#
-#0 long 0407 68K Blit (standalone) executable
-#0 short 0407 VAX-order2 68K Blit (standalone) executable
-0 short 03401 VAX-order 68K Blit (standalone) executable
-0 long 0406 68k Blit mpx/mux executable
-0 short 0406 VAX-order2 68k Blit mpx/mux executable
-# GRR: line below is too general as it matches also TTComp archive, ASCII, 4K handled by ./archive
-0 short 03001 VAX-order 68k Blit mpx/mux executable
-# TODO:
-# skip TTComp archive, ASCII, 4K by looking for executable keyword like main
-#>0 search/5536 main\0 VAX-order 68k Blit mpx/mux executable
-# Need more values for WE32 DMD executables.
-# Note that 0520 is the same as COFF
-#0 short 0520 tty630 layers executable
diff --git a/contrib/libs/libmagic/magic/Magdir/bm b/contrib/libs/libmagic/magic/Magdir/bm
deleted file mode 100644
index a9a1d5bb3f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bm
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: bm,v 1.2 2021/03/14 16:56:51 christos Exp $
-# bm: file(1) magic for "Birtual Machine", cf. https://github.com/tsoding/bm
-
-0 string bm\001\244 Birtual Machine
->4 leshort x \b, version %d
->6 lelong x \b, program size %u
->14 lelong x \b, memory size %u
->22 lelong x \b, memory capacity %u
diff --git a/contrib/libs/libmagic/magic/Magdir/bout b/contrib/libs/libmagic/magic/Magdir/bout
deleted file mode 100644
index 693cc2a4bd..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bout
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: bout,v 1.5 2009/09/19 16:28:08 christos Exp $
-# i80960 b.out objects and archives
-#
-0 long 0x10d i960 b.out relocatable object
->16 long >0 not stripped
-#
-# b.out archive (hp-rt on i960)
-0 string =!<bout> b.out archive
->8 string __.SYMDEF random library
diff --git a/contrib/libs/libmagic/magic/Magdir/bsdi b/contrib/libs/libmagic/magic/Magdir/bsdi
deleted file mode 100644
index 8499b0c903..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bsdi
+++ /dev/null
@@ -1,33 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: bsdi,v 1.7 2014/03/29 15:40:34 christos Exp $
-# bsdi: file(1) magic for BSD/OS (from BSDI) objects
-# Some object/executable formats use the same magic numbers as are used
-# in other OSes; those are handled by entries in aout.
-#
-
-0 lelong 0314 386 compact demand paged pure executable
->16 lelong >0 not stripped
->32 byte 0x6a (uses shared libs)
-
-# same as in SunOS 4.x, except for static shared libraries
-0 belong&077777777 0600413 SPARC demand paged
->0 byte &0x80
->>20 belong <4096 shared library
->>20 belong =4096 dynamically linked executable
->>20 belong >4096 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
->36 belong 0xb4100001 (uses shared libs)
-
-0 belong&077777777 0600410 SPARC pure
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
->36 belong 0xb4100001 (uses shared libs)
-
-0 belong&077777777 0600407 SPARC
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
->36 belong 0xb4100001 (uses shared libs)
diff --git a/contrib/libs/libmagic/magic/Magdir/bsi b/contrib/libs/libmagic/magic/Magdir/bsi
deleted file mode 100644
index 87e0fec76e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bsi
+++ /dev/null
@@ -1,10 +0,0 @@
-# Chiasmus is an encryption standard developed by the German Federal
-# Office for Information Security (Bundesamt fuer Sicherheit in der
-# Informationstechnik).
-
-# https://www.bsi.bund.de/EN/Topics/OtherTopics/Chiasmus/Chiasmus_node.html
-0 string XIA1\r Chiasmus Encrypted data
-!:ext xia
-
-0 string XIS Chiasmus key
-!:ext xis
diff --git a/contrib/libs/libmagic/magic/Magdir/btsnoop b/contrib/libs/libmagic/magic/Magdir/btsnoop
deleted file mode 100644
index d72daad877..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/btsnoop
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: btsnoop,v 1.5 2009/09/19 16:28:08 christos Exp $
-# BTSnoop: file(1) magic for BTSnoop files
-#
-# From <marcel@holtmann.org>
-0 string btsnoop\0 BTSnoop
->8 belong x version %d,
->12 belong 1001 Unencapsulated HCI
->12 belong 1002 HCI UART (H4)
->12 belong 1003 HCI BCSP
->12 belong 1004 HCI Serial (H5)
->>12 belong x type %d
diff --git a/contrib/libs/libmagic/magic/Magdir/burp b/contrib/libs/libmagic/magic/Magdir/burp
deleted file mode 100644
index 460d18c4c2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/burp
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------
-# $File: burp,v 1.1 2022/07/04 17:15:09 christos Exp $
-# Burp file, I don't know the version
-#------------------------------------------------------------
-# From wof (wof@stachelkaktus.net)
-0 bequad 0x6685828000000001 Burp project save file
diff --git a/contrib/libs/libmagic/magic/Magdir/bytecode b/contrib/libs/libmagic/magic/Magdir/bytecode
deleted file mode 100644
index dca961c264..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/bytecode
+++ /dev/null
@@ -1,41 +0,0 @@
-
-#------------------------------------------------------------
-# $File: bytecode,v 1.5 2023/02/20 16:25:05 christos Exp $
-# magic for various bytecodes
-
-# From: Mikhail Gusarov <dottedmag@dottedmag.net>
-# NekoVM (https://nekovm.org/) bytecode
-0 string NEKO NekoVM bytecode
->4 lelong x (%d global symbols,
->8 lelong x %d global fields,
->12 lelong x %d bytecode ops)
-!:mime application/x-nekovm-bytecode
-
-# https://www.iana.org/assignments/media-types/application/vnd.resilient.logic
-# From: Benedikt Muessig <benedikt@resilient-group.de>
-0 belong 0x07524c4d Resilient Logic bytecode
-!:mime application/vnd.resilient.logic
->4 byte/16 x \b, version %d
->4 byte&0x0f x \b.%d
-
-# Guile file magic from <dalepsmith@gmail.com>
-# https://www.gnu.org/s/guile/
-# https://git.savannah.gnu.org/gitweb/?p=guile.git;f=libguile/_scm.h;hb=HEAD#l250
-
-0 string GOOF---- Guile Object
->8 string LE \b, little endian
->8 string BE \b, big endian
->11 string 4 \b, 32bit
->11 string 8 \b, 64bit
->13 regex .\\.. \b, bytecode v%s
-
-# Racket file magic
-# From: Haelwenn (lanodan) Monnier <contact+libmagic@hacktivis.me>
-# https://racket-lang.org/
-# https://github.com/racket/racket/blob/master/racket/src/expander/compile/write-linklet.rkt
-0 string #~
->&0 pstring x
->>&0 pstring racket
->>>0 string #~ Racket bytecode
->>>>&0 pstring x (version %s)
-
diff --git a/contrib/libs/libmagic/magic/Magdir/c-lang b/contrib/libs/libmagic/magic/Magdir/c-lang
deleted file mode 100644
index 6e375a06a7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/c-lang
+++ /dev/null
@@ -1,110 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: c-lang,v 1.32 2023/06/16 19:57:19 christos Exp $
-# c-lang: file(1) magic for C and related languages programs
-#
-# The strength is to beat standard HTML
-
-# BCPL
-0 search/8192 "libhdr" BCPL source text
-!:mime text/x-bcpl
-0 search/8192 "LIBHDR" BCPL source text
-!:mime text/x-bcpl
-
-# C
-# Check for class if include is found, otherwise class is beaten by include because of lowered strength
-0 search/8192 #include
->0 regex \^#include C
->>0 regex \^class[[:space:]]+
->>>&0 regex \\{[\.\*]\\}(;)?$ \b++
->>&0 clear x source text
-!:strength + 15
-!:mime text/x-c
-0 search/8192 pragma
->0 regex \^#[[:space:]]*pragma C source text
-!:mime text/x-c
-0 search/8192 endif
->0 regex \^#[[:space:]]*(if\|ifn)def
->>&0 regex \^#[[:space:]]*endif$ C source text
-!:mime text/x-c
-0 search/8192 define
->0 regex \^#[[:space:]]*(if\|ifn)def
->>&0 regex \^#[[:space:]]*define C source text
-!:mime text/x-c
-0 search/8192 char
->0 regex \^[[:space:]]*char(\ \\*|\\*)(.+)(=.*)?;[[:space:]]*$ C source text
-!:mime text/x-c
-0 search/8192 double
->0 regex \^[[:space:]]*double(\ \\*|\\*)(.+)(=.*)?;[[:space:]]*$ C source text
-!:mime text/x-c
-0 search/8192 extern
->0 regex \^[[:space:]]*extern[[:space:]]+ C source text
-!:mime text/x-c
-0 search/8192 float
->0 regex \^[[:space:]]*float(\ \\*|\\*)(.+)(=.*)?;[[:space:]]*$ C source text
-!:mime text/x-c
-0 search/8192 struct
->0 regex \^struct[[:space:]]+ C source text
-!:mime text/x-c
-0 search/8192 union
->0 regex \^union[[:space:]]+ C source text
-!:mime text/x-c
-0 search/8192 main(
->&0 search/64 String Java source text
-!:mime text/x-java
->&0 default x
->>&0 regex \\)[[:space:]]*\\{ C source text
-!:mime text/x-c
-
-# C++
-# The strength of these rules is increased so they beat the C rules above
-0 search/8192 namespace
->0 regex \^namespace[[:space:]]+[_[:alpha:]]{1,30}[[:space:]]*\\{ C++ source text
-!:strength + 30
-!:mime text/x-c++
-# using namespace [namespace] or using std::[lib]
-0 search/8192 using
->0 regex \^using[[:space:]]+(namespace\ )?std(::)?[[:alpha:]]*[[:space:]]*; C++ source text
-!:strength + 30
-!:mime text/x-c++
-0 search/8192 template
->0 regex \^[[:space:]]*template[[:space:]]*<.*>[[:space:]]*$ C++ source text
-!:strength + 30
-!:mime text/x-c++
-0 search/8192 virtual
->0 regex \^[[:space:]]*virtual[[:space:]]+.*[};][[:space:]]*$ C++ source text
-!:strength + 30
-!:mime text/x-c++
-# But class alone is reduced to avoid beating php (Jens Schleusener)
-0 search/8192 class
->0 regex \^[[:space:]]*class[[:space:]]+[[:digit:][:alpha:]:_]+[[:space:]]*\\{(.*[\n]*)*\\}(;)?$ C++ source text
-!:strength + 13
-!:mime text/x-c++
-0 search/8192 public
->0 regex \^[[:space:]]*public: C++ source text
-!:strength + 30
-!:mime text/x-c++
-0 search/8192 private
->0 regex \^[[:space:]]*private: C++ source text
-!:strength + 30
-!:mime text/x-c++
-0 search/8192 protected
->0 regex \^[[:space:]]*protected: C++ source text
-!:strength + 30
-!:mime text/x-c++
-
-# Objective-C
-0 search/8192 #import
->0 regex \^#import[[:space:]]+["<] Objective-C source text
-!:strength + 25
-!:mime text/x-objective-c
-
-# From: Mikhail Teterin <mi@aldan.algebra.com>
-0 string cscope cscope reference data
->7 string x version %.2s
-# We skip the path here, because it is often long (so file will
-# truncate it) and mostly redundant.
-# The inverted index functionality was added some time between
-# versions 11 and 15, so look for -q if version is above 14:
->7 string >14
->>10 search/100 \ -q\ with inverted index
->10 search/100 \ -c\ text (non-compressed)
diff --git a/contrib/libs/libmagic/magic/Magdir/c64 b/contrib/libs/libmagic/magic/Magdir/c64
deleted file mode 100644
index 6c8732090f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/c64
+++ /dev/null
@@ -1,549 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: c64,v 1.14 2023/06/16 19:24:06 christos Exp $
-# c64: file(1) magic for various commodore 64 related files
-#
-# From: Dirk Jagdmann <doj@cubic.org>
-
-0x16500 belong 0x12014100 D64 Image
-0x16500 belong 0x12014180 D71 Image
-0x61800 belong 0x28034400 D81 Image
-0 belong 0x43154164 X64 Image
-
-# C64 (and other CBM) cartridges
-# Extended by David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://vice-emu.sourceforge.io/vice_17.html#SEC391
-
-0 string C64\40CARTRIDGE Commodore 64 cartridge
->0x20 ubyte 0 \b,
->0x20 ubyte !0
->>0x20 string/T x \b: "%.32s",
->0x16 beshort 0
->>0x18 beshort 0x0000 16 KB game
->>0x18 beshort 0x0001 8 KB game
->>0x18 beshort 0x0100 UltiMax mode
->>0x18 beshort 0x0101 RAM/disabled
->0x16 beshort 1 Action Replay
->0x16 beshort 2 KCS Power Cartridge
->0x16 beshort 3 Final Cartridge III
->0x16 beshort 4 Simons' BASIC
->0x16 beshort 5 Ocean type 1
->0x16 beshort 6 Expert Cartridge
->0x16 beshort 7 Fun Play, Power Play
->0x16 beshort 8 Super Games
->0x16 beshort 9 Atomic Power
->0x16 beshort 10 Epyx Fastload
->0x16 beshort 11 Westermann Learning
->0x16 beshort 12 Rex Utility
->0x16 beshort 13 Final Cartridge I
->0x16 beshort 14 Magic Formel
->0x16 beshort 15 C64 Game System, System 3
->0x16 beshort 16 Warp Speed
->0x16 beshort 17 Dinamic
->0x16 beshort 18 Zaxxon / Super Zaxxon (Sega)
->0x16 beshort 19 Magic Desk, Domark, HES Australia
->0x16 beshort 20 Super Snapshot V5
->0x16 beshort 21 Comal-80
->0x16 beshort 22 Structured BASIC
->0x16 beshort 23 Ross
->0x16 beshort 24 Dela EP64
->0x16 beshort 25 Dela EP7x8
->0x16 beshort 26 Dela EP256
->0x16 beshort 27 Rex EP256
->0x16 beshort 28 Mikro Assembler
->0x16 beshort 29 Final Cartridge Plus
->0x16 beshort 30 Action Replay 4
->0x16 beshort 31 Stardos
->0x16 beshort 32 EasyFlash
->0x16 beshort 33 EasyFlash Xbank
->0x16 beshort 34 Capture
->0x16 beshort 35 Action Replay 3
->0x16 beshort 36
->>0x1A ubyte 1 Nordic Replay
->>0x1A ubyte !1 Retro Replay
->0x16 beshort 37 MMC64
->0x16 beshort 38 MMC Replay
->0x16 beshort 39 IDE64
->0x16 beshort 40 Super Snapshot V4
->0x16 beshort 41 IEEE-488
->0x16 beshort 42 Game Killer
->0x16 beshort 43 Prophet64
->0x16 beshort 44 EXOS
->0x16 beshort 45 Freeze Frame
->0x16 beshort 46 Freeze Machine
->0x16 beshort 47 Snapshot64
->0x16 beshort 48 Super Explode V5.0
->0x16 beshort 49 Magic Voice
->0x16 beshort 50 Action Replay 2
->0x16 beshort 51 MACH 5
->0x16 beshort 52 Diashow-Maker
->0x16 beshort 53 Pagefox
->0x16 beshort 54 Kingsoft
->0x16 beshort 55 Silverrock 128K Cartridge
->0x16 beshort 56 Formel 64
->0x16 beshort 57
->>0x1A ubyte 1 Hucky
->>0x1A ubyte !1 RGCD
->0x16 beshort 58 RR-Net MK3
->0x16 beshort 59 EasyCalc
->0x16 beshort 60 GMod2
->0x16 beshort 61 MAX Basic
->0x16 beshort 62 GMod3
->0x16 beshort 63 ZIPP-CODE 48
->0x16 beshort 64 Blackbox V8
->0x16 beshort 65 Blackbox V3
->0x16 beshort 66 Blackbox V4
->0x16 beshort 67 REX RAM-Floppy
->0x16 beshort 68 BIS-Plus
->0x16 beshort 69 SD-BOX
->0x16 beshort 70 MultiMAX
->0x16 beshort 71 Blackbox V9
->0x16 beshort 72 Lt. Kernal Host Adaptor
->0x16 beshort 73 RAMLink
->0x16 beshort 74 H.E.R.O.
->0x16 beshort 75 IEEE Flash! 64
->0x16 beshort 76 Turtle Graphics II
->0x16 beshort 77 Freeze Frame MK2
-
-0 string C128\40CARTRIDGE Commodore 128 cartridge
->0x20 ubyte 0 \b,
->0x20 ubyte !0
->>0x20 string/T x \b: "%.32s",
->0x16 beshort 0 generic cartridge
->0x16 beshort 1 Warpspeed128
->>0x1A ubyte 1 \b, REU support
->>0x1A ubyte 2 \b, REU support, with I/O and ROM banking
-
-0 string CBM2\40CARTRIDGE Commodore CBM-II cartridge
->0x20 ubyte !0
->>0x20 string/T x \b: "%.32s"
-
-0 string VIC20\40CARTRIDGE Commodore VIC-20 cartridge
->0x20 ubyte 0 \b,
->0x20 ubyte !0
->>0x20 string/T x \b: "%.32s",
->0x16 beshort 0 generic cartridge
->0x16 beshort 1 Mega-Cart
->0x16 beshort 2 Behr Bonz
->0x16 beshort 3 Vic Flash Plugin
->0x16 beshort 4 UltiMem
->0x16 beshort 5 Final Expansion
-
-0 string PLUS4\40CARTRIDGE Commodore 16/Plus4 cartridge
->0x20 ubyte !0
->>0x20 string/T x \b: "%.32s"
-
-
-# DreamLoad archives see:
-# https://www.lemon64.com/forum/viewtopic.php?t=37415\
-# &sid=494dc2ca91289e05dadf80a7f8a968fe (at the bottom).
-# https://www.c64-wiki.com/wiki/DreamLoad.
-# Example HVSC Commodore 64 music collection:
-# https://kohina.duckdns.org/HVSC/C64Music/10_Years_HVSC.dfi
-
-0 byte 0
->1 string DREAMLOAD\40FILE\40ARCHIVE
->>0x17 byte 0 DFI Image
->>>0x1a leshort x version: %d.
->>>0x18 leshort x \b%d
->>>0x1c lelong x tracks: %d
-
-0 string GCR-1541 GCR Image
->8 byte x version: %i
->9 byte x tracks: %i
-
-9 string PSUR ARC archive (c64)
-2 string -LH1- LHA archive (c64)
-
-0 string C64File PC64 Emulator file
->8 string >\0 "%s"
-0 string C64Image PC64 Freezer Image
-
-0 beshort 0x38CD C64 PCLink Image
-0 string CBM\144\0\0 Power 64 C64 Emulator Snapshot
-
-0 belong 0xFF424CFF WRAptor packer (c64)
-
-0 string C64S\x20tape\x20file T64 tape Image
->32 leshort x Version:%#x
->36 leshort !0 Entries:%i
->40 string x Name:%.24s
-
-0 string C64\x20tape\x20image\x20file\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0 T64 tape Image
->32 leshort x Version:%#x
->36 leshort !0 Entries:%i
->40 string x Name:%.24s
-
-0 string C64S\x20tape\x20image\x20file\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0\x0 T64 tape Image
->32 leshort x Version:%#x
->36 leshort !0 Entries:%i
->40 string x Name:%.24s
-
-# Raw tape file format (.tap files)
-# Esa Hyyti <esa@netlab.tkk.fi>
-0 string C64-TAPE-RAW C64 Raw Tape File (.tap),
->0x0c byte x Version:%u,
->0x10 lelong x Length:%u cycles
-
-# magic for Goattracker2, http://covertbitops.c64.org/
-# from Alex Myczko <alex@aiei.ch>
-0 string GTS5 GoatTracker 2 song
->4 string >\0 \b, "%s"
->36 string >\0 \b by %s
->68 string >\0 \b (C) %s
->100 byte >0 \b, %u subsong(s)
-
-# CBM BASIC (cc65 compiled)
-# Summary: binary executable or Basic program for Commodore C64 computers
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Commodore_BASIC_tokenized_file
-# Reference: https://www.c64-wiki.com/wiki/BASIC_token
-# https://github.com/thezerobit/bastext/blob/master/bastext.doc
-# http://mark0.net/download/triddefs_xml.7z/defs/p/prg-c64.trid.xml
-# TODO: unify Commodore BASIC/program sub routines
-# Note: "PUCrunch archive data" moved from ./archive and merged with c64-exe
-0 leshort 0x0801
-# display Commodore C64 BASIC program (strength=50) after "Lynx archive" (strength=330) handled by ./archive
-#!:strength +0
-# if first token is not SYS this implies BASIC program in most cases
->6 ubyte !0x9e
-# but sELF-ExTRACTING-zIP executable unzp6420.prg contains SYS token at end of second BASIC line (at 0x35)
->>23 search/30 \323ELF-E\330TRACTING-\332IP
->>>0 use c64-exe
->>23 default x
->>>0 use c64-prg
-# if first token is SYS this implies binary executable
->6 ubyte =0x9e
->>0 use c64-exe
-# display information about C64 binary executable (memory address, line number, token)
-0 name c64-exe
->0 uleshort x Commodore C64
-# http://a1bert.kapsi.fi/Dev/pucrunch/
-# start address 0801h; next offset 080bh; BASIC line number is 239=00EFh; BASIC instruction is SYS 2061
-# the above combination appartly also occur for other Commodore programs like: gunzip111.c64.prg
-# and there exist PUCrunch archive for other machines like C16 with other magics
->0 string \x01\x08\x0b\x08\xef\x00\x9e\x32\x30\x36\x31 program, probably PUCrunch archive data
-!:mime application/x-compress-pucrunch
-!:ext prg/pck
->0 string !\x01\x08\x0b\x08\xef\x00\x9e\x32\x30\x36\x31 program
-!:mime application/x-commodore-exec
-!:ext prg/
-# start address like: 801h
->0 uleshort !0x0801 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x800) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# valid 2nd BASIC fragment found only in sELF-ExTRACTING-zIP executable unzp6420.prg
->>23 search/30 \323ELF-E\330TRACTING-\332IP
-# jump again from beginning
->>>(2.s-0x800) ubyte x
->>>>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# display information about tokenized C64 BASIC program (memory address, line number, token)
-0 name c64-prg
->0 uleshort x Commodore C64 BASIC program
-!:mime application/x-commodore-basic
-# Tokenized BASIC programs were stored by Commodore as file type program "PRG" in separate field in directory structures.
-# So file name can have no suffix like in saveroms; When transferring to other platforms, they are often saved with .prg extensions.
-# BAS suffix is typically used for the BASIC source but also found in program pods.bas
-!:ext prg/bas/
-# start address like: 801h
->0 uleshort !0x0801 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x0800) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# 2nd BASIC fragment
->>&0 use basic-line
-# zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# Summary: binary executable or Basic program for Commodore C128 computers
-# URL: https://en.wikipedia.org/wiki/Commodore_128
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/prg-c128.trid.xml
-# From: Joerg Jenderek
-# Note: Commodore 128 BASIC 7.0 variant; there exist varaints with different start addresses
-0 leshort 0x1C01
-!:strength +1
-# GRR: line above with strength 51 (50+1) is too generic because it matches SVr3 curses screen image, big-endian with strength (50) handled by ./terminfo
-# probably skip SVr3 curses images with "invalid high" second line offset
->2 uleshort <0x1D02
-# skip foo with "invalid low" second line offset
->>2 uleshort >0x1C06
-# if first token is not SYS this implies BASIC program
->>>6 ubyte !0x9e
->>>>0 use c128-prg
-# if first token is SYS this implies binary executable
->>>6 ubyte =0x9e
->>>>0 use c128-exe
-# Summary: binary executable or Basic program for Commodore C128 computers
-# Note: Commodore 128 BASIC 7.1 extension by Rick Simon
-# start adress 132Dh
-#0 leshort 0x132D THIS_IS_C128_7.1
-#>0 use c128-prg
-# Summary: binary executable or Basic program for Commodore C128 computers
-# Note: Commodore 128 BASIC 7.0 saved with graphics mode enabled
-# start adress 4001h
-#0 leshort 0x4001 THIS_IS_C128_GRAPHIC
-#>0 use c128-prg
-# display information about tokenized C128 BASIC program (memory address, line number, token)
-0 name c128-prg
->0 uleshort x Commodore C128 BASIC program
-!:mime application/x-commodore-basic
-!:ext prg
-# start address like: 1C01h
->0 uleshort !0x1C01 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x1C00) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# 2nd BASIC fragment
->>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# display information about C128 program (memory address, line number, token)
-0 name c128-exe
->0 uleshort x Commodore C128 program
-!:mime application/x-commodore-exec
-!:ext prg/
-# start address like: 1C01h
->0 uleshort !0x1C01 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x1C00) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# no valid 2nd BASIC fragment in Commodore executables
-#>>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# Summary: binary executable or Basic program for Commodore C16/VIC-20/Plus4 computers
-# URL: https://en.wikipedia.org/wiki/Commodore_Plus/4
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/prg-vic20.trid.xml
-# defs/p/prg-plus4.trid.xml
-# From: Joerg Jenderek
-# Note: there exist VIC-20 variants with different start address
-# GRR: line below is too generic because it matches Novell LANalyzer capture
-# with regular trace header record handled by ./sniffer
-0 leshort 0x1001
-# skip regular Novell LANalyzer capture (novell-2.tr1 novell-lanalyzer.tr1 novell-win10.tr1) with "invalid low" token value 54h
->6 ubyte >0x7F
-# skip regular Novell LANalyzer capture (novell-2.tr1 novell-lanalyzer.tr1 novell-win10.tr1) with "invalid low" second line offset 4Ch
-#>>2 uleshort >0x1006 OFFSET_NOT_TOO_LOW
-# skip foo with "invalid high" second line offset but not for 0x123b (Minefield.prg)
-#>>>2 uleshort <0x1102 OFFSET_NOT_TOO_HIGH
-# if first token is not SYS this implies BASIC program
->>6 ubyte !0x9e
-# valid second end of line separator implies BASIC program
->>>(2.s-0x1000) ubyte =0
->>>>0 use c16-prg
-# invalid second end of line separator !=0 implies binary executable like: Minefield.prg
->>>(2.s-0x1000) ubyte !0
->>>>0 use c16-exe
-# if first token is SYS this implies binary executable
->>6 ubyte =0x9e
->>>0 use c16-exe
-# display information about C16 program (memory address, line number, token)
-0 name c16-exe
->0 uleshort x Commodore C16/VIC-20/Plus4 program
-!:mime application/x-commodore-exec
-!:ext prg/
-# start address like: 1001h
->0 uleshort !0x1001 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x1000) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# no valid 2nd BASIC fragment in excutables
-#>>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# display information about tokenized C16 BASIC program (memory address, line number, token)
-0 name c16-prg
->0 uleshort x Commodore C16/VIC-20/Plus4 BASIC program
-!:mime application/x-commodore-basic
-!:ext prg
-# start address like: 1001h
->0 uleshort !0x1001 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x1000) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# 2nd BASIC fragment
->>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# Summary: binary executable or Basic program for Commodore VIC-20 computer with 8K RAM expansion
-# URL: https://en.wikipedia.org/wiki/VIC-20
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/prg-vic20-8k.trid.xml
-# From: Joerg Jenderek
-# Note: Basic v2.0 with Basic v4.0 extension (VIC20); there exist VIC-20 variants with different start addresses
-# start adress 1201h
-0 leshort 0x1201
-# if first token is not SYS this implies BASIC program
->6 ubyte !0x9e
->>0 use vic-prg
-# if first token is SYS this implies binary executable
->6 ubyte =0x9e
->>0 use vic-exe
-# display information about Commodore VIC-20 BASIC+8K program (memory address, line number, token)
-0 name vic-prg
->0 uleshort x Commodore VIC-20 +8K BASIC program
-!:mime application/x-commodore-basic
-!:ext prg
-# start address like: 1201h
->0 uleshort !0x1201 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x1200) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# 2nd BASIC fragment
->>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# display information about Commodore VIC-20 +8K program (memory address, line number, token)
-0 name vic-exe
->0 uleshort x Commodore VIC-20 +8K program
-!:mime application/x-commodore-exec
-!:ext prg/
-# start address like: 1201h
->0 uleshort !0x1201 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x0400) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# no valid 2nd BASIC fragment in excutables
-#>>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# Summary: binary executable or Basic program for Commodore PET computers
-# URL: https://en.wikipedia.org/wiki/Commodore_PET
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/prg-pet.trid.xml
-# From: Joerg Jenderek
-# start adress 0401h
-0 leshort 0x0401
-!:strength +1
-# GRR: line above with strength 51 (50+1) is too generic because it matches TTComp archive data, ASCII, 1K dictionary
-# (strength=48=50-2) handled by ./archive and shared library (strength=50) handled by ./ibm6000
-# skip TTComp archive data, ASCII, 1K dictionary ttcomp-ascii-1k.bin with "invalid high" second line offset 4162h
->2 uleshort <0x0502
-# skip foo with "invalid low" second line offset
-#>>2 uleshort >0x0406 OFFSET_NOT_TOO_LOW
-# skip bar with "invalid end of line"
-#>>>(2.s-0x0400) ubyte =0 END_OF_LINE_OK
-# if first token is not SYS this implies BASIC program
->>6 ubyte !0x9e
->>>0 use pet-prg
-# if first token is SYS this implies binary executable
->>6 ubyte =0x9e
->>>0 use pet-exe
-# display information about Commodore PET BASIC program (memory address, line number, token)
-0 name pet-prg
->0 uleshort x Commodore PET BASIC program
-!:mime application/x-commodore-basic
-!:ext prg
-# start address like: 0401h
->0 uleshort !0x0401 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x0400) ubyte x
-# 2nd BASIC fragment
->>&0 use basic-line
-# zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# display information about Commodore PET program (memory address, line number, token)
-0 name pet-exe
->0 uleshort x Commodore PET program
-!:mime application/x-commodore-exec
-!:ext prg/
-# start address like: 0401h
->0 uleshort !0x0401 \b, start address %#4.4x
-# 1st BASIC fragment
->2 use basic-line
-# jump to 1 byte before next BASIC fragment; this must be zero-byte marking the end of line
->(2.s-0x0400) ubyte x
->>&-1 ubyte !0 \b, no EOL=%#x
-# no valid 2nd BASIC fragment in excutables
-#>>&0 use basic-line
-# Zero-byte marking the end of the BASIC line
->-3 ubyte !0 \b, 3 last bytes %#2.2x
-# Two zero-bytes in place of the pointer to next BASIC line indicates the end of the program
->>-2 ubeshort x \b%4.4x
-# display information about tokenized BASIC line (memory address, line number, Token)
-0 name basic-line
-# pointer to memory address of beginning of "next" BASIC line
-# greater then previous offset but maximal 100h difference
->0 uleshort x \b, offset %#4.4x
-# offset 0x0000 indicates the end of BASIC program; so bytes afterwards may be some other data
->0 uleshort 0
-# not line number but first 2 data bytes
->>2 ubeshort x \b, data %#4.4x
-# not token but next 2 data bytes
->>4 ubeshort x \b%4.4x
-# not token arguments but next data bytes
->>6 ubequad x \b%16.16llx
->>14 ubequad x \b%16.16llx...
-# like 0x0d20352020204c594e5820495820204259205749 "\r 5 LYNX IX BY WILL CORLEY" for LyNX archive Darkon.lnx handled by ./archive
-#>>3 string x "%-0.30s"
->0 uleshort >0
-# BASIC line number with range from 0 to 65520; practice to increment numbers by some value (5, 10 or 100)
->>2 uleshort x \b, line %u
-# https://www.c64-wiki.com/wiki/BASIC_token
-# The "high-bit" bytes from #128-#254 stood for the various BASIC commands and mathematical operators
->>4 ubyte x \b, token (%#x)
-# https://www.c64-wiki.com/wiki/REM
->>4 string \x8f REM
-# remark string like: ** SYNTHESIZER BY RICOCHET **
->>>5 string >\0 %s
-#>>>>&1 uleshort x \b, NEXT OFFSET %#4.4x
-# https://www.c64-wiki.com/wiki/PRINT
->>4 string \x99 PRINT
-# string like: "Hello world" "\021 \323ELF-E\330TRACTING-\332IP (64 ONLY)\016\231":\2362141
->>>5 string x %s
-#>>>>&0 ubequad x AFTER_PRINT=%#16.16llx
-# https://www.c64-wiki.com/wiki/POKE
->>4 string \x97 POKE
-# <Memory address>,<number>
->>>5 regex \^[0-9,\040]+ %s
-# BASIC command delimiter colon (:=3Ah)
->>>>&-2 ubyte =0x3A
-# after BASIC command delimiter colon remaining (<255) other tokenized BASIC commands
->>>>>&0 string x "%s"
-# https://www.c64-wiki.com/wiki/SYS 0x9e=\236
->>4 string \x9e SYS
-# SYS <Address> parameter is a 16-bit unsigned integer; in the range 0 - 65535
->>>5 regex \^[0-9]{1,5} %s
-# maybe followed by spaces, "control-characters" or colon (:) followed by next commnds or in victracker.prg
-# (\302(43)\252256\254\302(44)\25236) /T.L.R/
-#>>>5 string x SYS_STRING="%s"
-# https://www.c64-wiki.com/wiki/GOSUB
->>4 string \x8d GOSUB
-# <line>
->>>5 string >\0 %s
diff --git a/contrib/libs/libmagic/magic/Magdir/cad b/contrib/libs/libmagic/magic/Magdir/cad
deleted file mode 100644
index 0bead6eeb4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cad
+++ /dev/null
@@ -1,437 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cad,v 1.31 2022/12/09 15:36:23 christos Exp $
-# autocad: file(1) magic for cad files
-#
-
-# Microstation DGN/CIT Files (www.bentley.com)
-# Last updated July 29, 2005 by Lester Hightower
-# DGN is the default file extension of Microstation/Intergraph CAD files.
-# CIT is the proprietary raster format (similar to TIFF) used to attach
-# raster underlays to Microstation DGN (vector) drawings.
-#
-# http://www.wotsit.org/search.asp
-# https://filext.com/detaillist.php?extdetail=DGN
-# https://filext.com/detaillist.php?extdetail=CIT
-#
-# https://www.bentley.com/products/default.cfm?objectid=97F351F5-9C35-4E5E-89C2
-# 3F86C928&method=display&p_objectid=97F351F5-9C35-4E5E-89C280A93F86C928
-# https://www.bentley.com/products/default.cfm?objectid=A5C2FD43-3AC9-4C71-B682
-# 721C479F&method=display&p_objectid=A5C2FD43-3AC9-4C71-B682C7BE721C479F
-#
-# URL: https://en.wikipedia.org/wiki/MicroStation
-# reference: http://dgnlib.maptools.org/dgn.html
-# http://dgnlib.maptools.org/dl/ref18.pdf
-# Update: Joerg Jenderek
-# Note: verfied by command like `dgndump seed2d_b.dgn`
-# test for level 8 and type 5 or 9
-0 beshort&0x3F73 0x0801
-# level of element like 8
-#>0 ubyte&0x3F x \b, level %u
-#>0 ubyte &0x80 \b, complex
-#>0 ubyte &0x40 \b, reserved
-# type of element 9~TCB 8~Digitizer setup 5~Group Data Elements
-#>1 ubyte&0x7F x \b, type %u
-# words to follow in element: 17H~CEL library 2FEh~DGN 9FEh,DFEh~CIT
-#>2 uleshort x \b, words %#4.4x to follow
-# test for 3 reserved 0 bytes in CIT or "conversion" in ViewInfo structure (DGN CEL)
-#>508 ubelong x \b, RESERVED %8.8x
->508 ubelong&0xFFffFF00 =0
-# test for level 8 and type 9 for INGR raster image
->>0 beshort 0x0809
-# test for length of 1st element is multiple of blocks a 512 bytes
->>>2 ubyte 0xfe
->>>>0 use ingr-image
-# test for DGN or CEL by jump words (uleshort) forward to next element
->(2.s*2) ulong x
-# 2nd element type: 8~Digitizer~DesiGNfile 1~library cell header other~CIT
-#>>&1 ubyte&0x7F x \b, 2nd type %u
-# DGN
->>&1 ubyte&0x7F 8
->>>2 uleshort =0x02FE Bentley/Intergraph Microstation CAD drawing
-!:mime application/x-bentley-dgn
-!:ext dgn
-# The 0x40 bit of this byte is 1 if the file is 3D, otherwise 0
->>>>1214 ubyte &0x40 3D
->>>>1214 ubyte ^0x40 2D
-# 2 chars for name of subunits like ft FT in IN mu m mm '\0 '\040
->>>>1120 string x \b, units %-.2s
-# 2 chars for name of master unit like IN in ML SU tn th TH HU mm "\0 "\040 \0\0
->>>>1122 string >\0 %-.2s
-#>>>>1120 ubelong x \b, units %#8.8x
-# element range low,high x y z like xlow=0 08010000h 01080000h
-#>>>>4 ubelong !0 \b, xlow %8.8x
-#>>>>8 ubelong !0 \b, ylow %8.8x
-#>>>>12 ubelong !0 \b, zlow %8.8x
-#>>>>16 ubelong !0 \b, xhigh %8.8x
-#>>>>20 ubelong !0 \b, yhigh %8.8x
-#>>>>24 ubelong !0 \b, zhigh %8.8x
-# graphic group number; all other elements in that group have same non-0 number
-#>>>>28 leshort x \b, grphgrp %#4.4x
-# words to optional attribute linkage
-#>>>>30 ubyte x \b, attindx \%o
-#>>>>31 ubyte x \b\%o
-# >>30 string \026\105 DGNFile
-# >>30 string \034\105 DGNFile
-# >>30 string \073\107 DGNFile
-# >>30 string \073\110 DGNFile
-# >>30 string \106\107 DGNFile
-# >>30 string \110\103 DGNFile
-# >>30 string \120\104 DGNFile
-# >>30 string \172\104 DGNFile
-# >>30 string \172\105 DGNFile
-# >>30 string \172\106 DGNFile
-# >>30 string \234\106 DGNFile
-# >>30 string \273\105 DGNFile
-# >>30 string \306\106 DGNFile
-# >>30 string \310\104 DGNFile
-# >>30 string \341\104 DGNFile
-# >>30 string \372\103 DGNFile
-# >>30 string \372\104 DGNFile
-# >>30 string \372\106 DGNFile
-# >>30 string \376\103 DGNFile
-# elements properties indicator
-#>>>>32 uleshort !0 \b, properties %#4.4x
-# class 0~Primary
-#>>>>>32 uleshort&0x000F !0 \b, class %#4.4x
-# Symbology
-#>>>>>34 uleshort x \b, Symbology %#4.4x
-# test for 2nd element type 1~library cell header
->>&1 ubyte&0x7F 1
-# test for 1st element with level 8 and type 5 for cell library
->>>0 beshort 0x0805 Bentley/Intergraph Microstation CAD cell library
-!:mime application/x-bentley-cel
-!:ext cel
-#
-# URL: http://fileformats.archiveteam.org/wiki/Intergraph_Raster
-# reference: https://web.archive.org/web/20140903185431/
-# http://oreilly.com/www/centers/gff/formats/ingr/index.htm
-# note: verfied by command like `nconvert -fullinfo LONGLAT.CIT`
-# display information for intergraph raster bitmap
-0 name ingr-image
-# in 5.37 "Microstation CITFile" "Bentley/Intergraph MicroStation CIT raster CAD"
-# DataTypeCode indicates format, depth of the pixel data and used compression
->4 uleshort x Intergraph raster image
->>4 uleshort 0x0009 \b, Run-Length Encoded 1-bit
-!:mime image/x-intergraph-rle
-!:ext rel
->>4 uleshort 0x0018 \b, CCITT Group 4 1-bit
-!:mime image/x-intergraph-cit
-!:ext cit
->>4 uleshort 27 \b, Adaptive RLE RGB
-!:mime image/x-intergraph-rgb
-!:ext rgb
->>4 default x
->>>4 uleshort x \b, Type %u
-!:mime image/x-intergraph
-# TODO:
-#>4 uleshort 0 \b, no data
-# ...
-#>4 uleshort 0x0045 \b, Continuous Tone CMKY (Uncompressed)
-# ApplicationType: 0~generic raster image 3~drawing, scanning
-# 8~I/IMAGE and MicroStation Imager 9~ModelView
->6 uleshort !0 \b, ApplicationType %u
-#>6 uleshort x \b, ApplicationType %u
-# XViewOrigin; Raster grid data X origin
-#>8 ulequad !0 \b, XViewOrigin %llx
-# PixelsPerLine is the number of pixels in a scan line of bitmapp
->184 ulelong x \b, %u x
-# NumberOfLines is height of the raster data in scanlines
->188 ulelong x %u
-# DeviceResolution; resolution of scanning device
-# positive indicates number of micros between lines; negative indicates DPI
-#>192 leshort x \b, DeviceResolution %d
-# ScanlineOrient indicates the origin and the orientation of the scan lines
-#>194 ubyte x \b, ScanlineOrient %x
->194 ubyte x \b, orientation
->194 ubyte &0x01 right
->194 ubyte ^0x01 left
->194 ubyte &0x02 down
->194 ubyte ^0x02 top
->194 ubyte &0x04 horizontal
->194 ubyte ^0x04 vertical
-# ScannableFlag; Scanline indexing method used
-#>195 ubyte !0 \b, ScannableFlag %#x
-# RotationAngle; Rotation angle of raster data
-#>196 ubequad !0 \b, RotationAngle %#llx
-# SkewAngle; Skew angle of raster data
-#>204 ubequad !0 \b, SkewAngle %llx
-# DataTypeModifier; Additional raster data format info
-#>212 uleshort !0 \b, DataTypeModifier %#4.4x
-# DesignFile[66]; Name of the design file
->214 string >\0 \b, DesignFile %-.66s
-# DatabaseFile[66]; Name of the database file
->280 string >\0 \b, DatabaseFile %-.66s
-# ParentGridFile[66]; Name of parent grid file
->346 string >\0 \b, ParentGridFile %-.66s
-# FileDescription[80]; Text description of file and contents
->412 string >\0 \b, FileDescription %-.80s
-# MinValue
-#>492 ubequad !0 \b, MinValue %#llx
-# MaxValue
-#>500 ubequad !0 \b, MaxValue %#llx
-# Reserved[3]; Unused (always 0)
-#>508 ubelong&0xFFffFF00 x \b, RESERVED %8.8x
-# GridFileVersion; Grid File Version like 2 3
-#>511 ubyte x \b, GridFileVersion %x
-
-# AutoCAD
-# Merge of the different contributions and updates from https://en.wikipedia.org/wiki/Dwg
-# and https://www.iana.org/assignments/media-types/image/vnd.dwg
-0 string MC0.0 DWG AutoDesk AutoCAD Release 1.0
-!:mime image/vnd.dwg
-0 string AC1.2 DWG AutoDesk AutoCAD Release 1.2
-!:mime image/vnd.dwg
-0 string AC1.3 DWG AutoDesk AutoCAD Release 1.3
-!:mime image/vnd.dwg
-0 string AC1.40 DWG AutoDesk AutoCAD Release 1.40
-!:mime image/vnd.dwg
-0 string AC1.50 DWG AutoDesk AutoCAD Release 2.05
-!:mime image/vnd.dwg
-0 string AC2.10 DWG AutoDesk AutoCAD Release 2.10
-!:mime image/vnd.dwg
-0 string AC2.21 DWG AutoDesk AutoCAD Release 2.21
-!:mime image/vnd.dwg
-0 string AC2.22 DWG AutoDesk AutoCAD Release 2.22
-!:mime image/vnd.dwg
-0 string AC1001 DWG AutoDesk AutoCAD Release 2.22
-!:mime image/vnd.dwg
-0 string AC1002 DWG AutoDesk AutoCAD Release 2.50
-!:mime image/vnd.dwg
-0 string AC1003 DWG AutoDesk AutoCAD Release 2.60
-!:mime image/vnd.dwg
-0 string AC1004 DWG AutoDesk AutoCAD Release 9
-!:mime image/vnd.dwg
-0 string AC1006 DWG AutoDesk AutoCAD Release 10
-!:mime image/vnd.dwg
-0 string AC1009 DWG AutoDesk AutoCAD Release 11/12
-!:mime image/vnd.dwg
-# AutoCAD DWG versions R13/R14 (www.autodesk.com)
-# Written December 01, 2003 by Lester Hightower
-# Based on the DWG File Format Specifications at http://www.opendwg.org/
-# AutoCad, from Nahuel Greco
-# AutoCAD DWG versions R12/R13/R14 (www.autodesk.com)
-0 string AC1012 DWG AutoDesk AutoCAD Release 13
-!:mime image/vnd.dwg
-0 string AC1013 DWG AutoDesk AutoCAD Release 13c3
-!:mime image/vnd.dwg
-0 string AC1014 DWG AutoDesk AutoCAD Release 14
-!:mime image/vnd.dwg
-0 string AC1015 DWG AutoDesk AutoCAD 2000
-!:mime image/vnd.dwg
-
-# A new version of AutoCAD DWG
-# Sergey Zaykov (mail_of_sergey@mail.ru, sergey_zaikov@rambler.ru,
-# ICQ 358572321)
-# From various sources like:
-# https://autodesk.blogs.com/between_the_lines/autocad-release-history.html
-0 string AC1018 DWG AutoDesk AutoCAD 2004/2005/2006
-!:mime image/vnd.dwg
-0 string AC1021 DWG AutoDesk AutoCAD 2007/2008/2009
-!:mime image/vnd.dwg
-0 string AC1024 DWG AutoDesk AutoCAD 2010/2011/2012
-!:mime image/vnd.dwg
-0 string AC1027 DWG AutoDesk AutoCAD 2013-2017
-!:mime image/vnd.dwg
-
-# From GNU LibreDWG
-0 string AC1032 DWG AutoDesk AutoCAD 2018/2019/2020
-!:mime image/vnd.dwg
-0 string AC1035 DWG AutoDesk AutoCAD 2021
-!:mime image/vnd.dwg
-
-# KOMPAS 2D drawing from ASCON
-# This is KOMPAS 2D drawing or fragment of drawing but is not detailed nor
-# gathered nor specification
-# ASCON https://ascon.net/main/ in English,
-# https://ascon.ru/ main site in Russian
-# Extension is CDW for drawing and FRW for fragment of drawing
-# Sergey Zaykov (mail_of_sergey@mail.ru, sergey_zaikov@rambler.ru,
-# ICQ 358572321, https://vkontakte.ru/id16076543)
-# From:
-# https://sd.ascon.ru/otrs/customer.pl?Action=CustomerFAQ&CategoryID=4&ItemID=292
-# (in russian) and my experiments
-0 string KF
->2 belong 0x4E00000C Kompas drawing 12.0 SP1
->2 belong 0x4D00000C Kompas drawing 12.0
->2 belong 0x3200000B Kompas drawing 11.0 SP1
->2 belong 0x3100000B Kompas drawing 11.0
->2 belong 0x2310000A Kompas drawing 10.0 SP1
->2 belong 0x2110000A Kompas drawing 10.0
->2 belong 0x08000009 Kompas drawing 9.0 SP1
->2 belong 0x05000009 Kompas drawing 9.0
->2 belong 0x33010008 Kompas drawing 8+
->2 belong 0x1A000008 Kompas drawing 8.0
->2 belong 0x2C010107 Kompas drawing 7+
->2 belong 0x05000007 Kompas drawing 7.0
->2 belong 0x32000006 Kompas drawing 6+
->2 belong 0x09000006 Kompas drawing 6.0
->2 belong 0x5C009005 Kompas drawing 5.11R03
->2 belong 0x54009005 Kompas drawing 5.11R02
->2 belong 0x51009005 Kompas drawing 5.11R01
->2 belong 0x22009005 Kompas drawing 5.10R03
->2 belong 0x22009005 Kompas drawing 5.10R02 mar
->2 belong 0x21009005 Kompas drawing 5.10R02 febr
->2 belong 0x19009005 Kompas drawing 5.10R01
->2 belong 0xF4008005 Kompas drawing 5.9R01.003
->2 belong 0x1C008005 Kompas drawing 5.9R01.002
->2 belong 0x11008005 Kompas drawing 5.8R01.003
-
-# CAD: file(1) magic for computer aided design files
-# Phillip Griffith <phillip dot griffith at gmail dot com>
-# AutoCAD magic taken from the Open Design Alliance's OpenDWG specifications.
-#
-
-# 3DS (3d Studio files)
-0 leshort 0x4d4d
->6 leshort 0x2
->>8 lelong 0xa
->>>16 leshort 0x3d3d 3D Studio model
-# Beat sgi MMV
-!:strength +20
-!:mime image/x-3ds
-!:ext 3ds
-
-# MegaCAD 2D/3D drawing (.prt)
-# https://megacad.de/
-# From: Markus Heidelberg <markus.heidelberg@web.de>
-0 string MegaCad23\0 MegaCAD 2D/3D drawing
-
-# Hoops CAD files
-# https://docs.techsoft3d.com/visualize/3df/latest/build/general/hsf/\
-# HSF_architecture.html
-# Stephane Charette <stephane.charette@gmail.com>
-0 string ;;\040HSF\040V OpenHSF (Hoops Stream Format)
->7 regex/9 V[.0-9]{4,5}\040 %s
-!:ext hsf
-
-# AutoCAD Drawing Exchange Format
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/DXF
-# https://en.wikipedia.org/wiki/AutoCAD_DXF
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/
-# dxf-var0.trid.xml dxf-var0u.trid.xml dxf-var2.trid.xml dxf-var2u.trid.xml
-# Note: called "AutoCAD Drawing eXchange Format" by TrID and
-# "Drawing Interchange File Format (ASCII)" by DROID
-# GRR: some samples does not match 1st test like: abydos.dxf
-0 regex \^[\ \t]*0\r?\000$
->1 regex \^[\ \t]*SECTION\r?$
->>2 regex \^[\ \t]*2\r?$
-# GRR: some samples without HEADER section like: airplan2.dxf
->>>3 regex \^[\ \t]*HEADER\r?$ AutoCAD Drawing Exchange Format
-#!:mime application/x-dxf
-!:mime image/vnd.dxf
-!:ext dxf
-# DROID PUID fmt/64 fmt-64-signature-id-99.dxf
->>>>&1 search/8192 MC0.0 \b, 1.0
-# DROID PUID fmt/65 fmt-65-signature-id-100.dxf
->>>>&1 search/8192 AC1.2 \b, 1.2
-# DROID PUID fmt/66 fmt-66-signature-id-101.dxf
->>>>&1 search/8192 AC1.3 \b, 1.3
-# DROID PUID fmt/67 fmt-67-signature-id-102.dxf
->>>>&1 search/8192 AC1.40 \b, 1.4
-# DROID PUID fmt/68 fmt-68-signature-id-103.dxf
->>>>&1 search/8192 AC1.50 \b, 2.0
-# DROID PUID fmt/69 fmt-69-signature-id-104.dxf
->>>>&1 search/8192 AC2.10 \b, 2.1
-# DROID PUID fmt/70 fmt-70-signature-id-105.dxf
->>>>&1 search/8192 AC2.21 \b, 2.2
-# DROID PUID fmt/71 fmt-71-signature-id-106.dxf
->>>>&1 search/8192 AC1002 \b, 2.5
-# DROID PUID fmt/72 fmt-72-signature-id-107.dxf
->>>>&1 search/8192 AC1003 \b, 2.6
-# DROID PUID fmt/73 fmt-73-signature-id-108.dxf
->>>>&1 search/8192 AC1004 \b, R9
->>>>&1 search/8192 AC1006 \b, R10
-# http://cd.textfiles.com/amigaenv/DXF/OBJEKTE/LASTMINUTE/apple.dxf
-#>>>>&1 search/8192 AC1008 \b, Rfoo
->>>>&1 search/8192 AC1009 \b, R11/R12
->>>>&1 search/8192 AC1012 \b, R13
->>>>&1 search/8192 AC1013 \b, R13c3
->>>>&1 search/8192 AC1014 \b, R14
->>>>&1 search/8192 AC1015 \b, version 2000
->>>>&1 search/8192 AC1018 \b, version 2004
->>>>&1 search/8192 AC1021 \b, version 2007
->>>>&1 search/8192 AC1024 \b, version 2010
->>>>&1 search/8192 AC1027 \b, version 2013
->>>>&1 search/8192 AC1032 \b, version 2018
->>>>&1 search/8192 AC1035 \b, version 2021
-
-# The Sketchup 3D model format https://www.sketchup.com/
-0 string \xff\xfe\xff\x0e\x53\x00\x6b\x00\x65\x00\x74\x00\x63\x00\x68\x00\x55\x00\x70\x00\x20\x00\x4d\x00\x6f\x00\x64\x00\x65\x00\x6c\x00 SketchUp Model
-!:mime application/vnd.sketchup.skp
-!:ext skp
-
-4 regex/b P[0-9][0-9]\\.[0-9][0-9][0-9][0-9]\\.[0-9][0-9][0-9][0-9]\\.[0-9] NAXOS CAD System file from version %s
-!:strength +40
-
-# glTF (GL Transmission Format) - by the Khronos Group
-# Reference: https://github.com/KhronosGroup/glTF/tree/master/specification/2.0#glb-file-format-specification
-0 string glTF glTF binary model
->4 ulelong x \b, version %d
->8 ulelong x \b, length %d bytes
-!:mime model/gltf-binary
-!:ext glb
-
-# FBX (FilmBoX) - by Kaydara/Autodesk
-# Reference: https://code.blender.org/2013/08/fbx-binary-file-format-specification
-0 string Kaydara\ FBX\ Binary\ \ \0 Kaydara FBX model,
->&2 ulelong x version %d
-!:ext fbx
-
-# PLY (Polygon File Format/Stanford Triangle Format) - by Greg Turk
-# Reference: https://web.archive.org/web/20161204152348/http://www.dcs.ed.ac.uk/teaching/cs4/www/graphics/Web/ply.html
-0 string ply\n PLY model,
-!:ext ply
->4 string format\ ascii\ ASCII,
->>&0 regex/6 [0-9.]+ version %s
->4 string format\ binary binary,
->>&0 string _little_endian\ little endian,
->>>&0 regex/6 [0-9.]+ version %s
->>&0 string _big_endian\ big endian,
->>>&0 regex/6 [0-9.]+ version %s
-
-# VRML (Virtual Reality Modeling Language) - by the Web3D Consortium
-# From: Michel Briand <michelbriand@free.fr>
-# Reference: https://www.web3d.org/standards
-0 string/w #VRML\ V1.0\ ascii VRML 1 file
-!:mime model/vrml
-!:ext wrl
-0 string/w #VRML\ V2.0\ utf8 ISO/IEC 14772 VRML 97 file
-!:mime model/vrml
-!:ext wrl
-# X3D, VRML encoded
-0 string #X3D X3D (Extensible 3D) model, VRML format
->4 string V
->>5 regex/6 [0-9.]+ \b, version %s
-!:mime model/x3d+vrml
-!:ext x3dv
-
-## XML-based 3D CAD Formats
-# From: Michel Briand <michelbriand@free.fr>, Oliver Galvin <odg@riseup.net>
-0 string/w \<?xml\ version=
-!:strength + 5
-# X3D (Extensible 3D)
-# Schema: https://www.web3d.org/specifications/x3d-3.2.dtd
-# MIME Type: https://www.iana.org/assignments/media-types/model/x3d+xml
-# Example: https://www.web3d.org/x3d/content/examples/Basic/course/CreateX3DFromStringRandomSpheres.x3d
->20 search/1000/w \<!DOCTYPE\ X3D X3D (Extensible 3D) model, XML document
-!:mime model/x3d+xml
-!:ext x3d
-# COLLADA (COLLAborative Design Activity) - by the Khronos Group
-# Schema: http://www.collada.org/2005/11/COLLADASchema
-# Reference: https://www.khronos.org/collada
->20 search/1000/w \<COLLADA COLLADA model, XML document
-!:mime model/vnd.collada+xml
-!:ext dae
-# 3MF (3D Manufacturing Format) - by the 3MF Consortium
-# Schema: http://schemas.microsoft.com/3dmanufacturing/core/2015/02
-# Reference: https://3mf.io/specification
->20 search/1000/w xmlns="http://schemas.microsoft.com/3dmanufacturing 3MF (3D Manufacturing Format) model, XML document
-!:mime model/3mf
-!:ext 3mf
-# AMF (Additive Manufacturing File)
-# Reference: https://www.astm.org/Standards/ISOASTM52915.htm
->20 search/1000/w \<amf AMF (Additive Manufacturing Format) model, XML document
-!:mime application/x-amf
-!:ext amf
diff --git a/contrib/libs/libmagic/magic/Magdir/cafebabe b/contrib/libs/libmagic/magic/Magdir/cafebabe
deleted file mode 100644
index 4f97cc0345..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cafebabe
+++ /dev/null
@@ -1,107 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cafebabe,v 1.28 2022/07/01 23:24:47 christos Exp $
-# Cafe Babes unite!
-#
-# Since Java bytecode and Mach-O universal binaries have the same magic number,
-# the test must be performed in the same "magic" sequence to get both right.
-# The long at offset 4 in a Mach-O universal binary tells the number of
-# architectures; the short at offset 4 in a Java bytecode file is the JVM minor
-# version and the short at offset 6 is the JVM major version. Since there are only
-# only 18 labeled Mach-O architectures at current, and the first released
-# Java class format was version 43.0, we can safely choose any number
-# between 18 and 39 to test the number of architectures against
-# (and use as a hack). Let's not use 18, because the Mach-O people
-# might add another one or two as time goes by...
-#
-### JAVA START ###
-# Reference: http://en.wikipedia.org/wiki/Java_class_file
-# Update: Joerg Jenderek
-0 belong 0xcafebabe
->4 ubelong >30 compiled Java class data,
-!:mime application/x-java-applet
-#!:mime application/java-byte-code
-!:ext class
->>6 ubeshort x version %d.
->>4 ubeshort x \b%d
-# for debugging purpose version as hexadecimal to compare with Mach-O universal binary
-#>>4 ubelong x (%#8.8x)
-# Which is which?
-# https://docs.oracle.com/javase/specs/jvms/se6/html/ClassFile.doc.html
-#>>4 belong 0x002b (Java 0.?)
-#>>4 belong 0x032d (Java 1.0)
-#>>4 belong 0x032d (Java 1.1)
->>4 belong 0x002e (Java 1.2)
->>4 belong 0x002f (Java 1.3)
->>4 belong 0x0030 (Java 1.4)
->>4 belong 0x0031 (Java 1.5)
->>4 belong 0x0032 (Java 1.6)
->>4 belong 0x0033 (Java 1.7)
->>4 belong 0x0034 (Java 1.8)
->>4 belong 0x0035 (Java SE 9)
->>4 belong 0x0036 (Java SE 10)
->>4 belong 0x0037 (Java SE 11)
->>4 belong 0x0038 (Java SE 12)
->>4 belong 0x0039 (Java SE 13)
->>4 belong 0x003A (Java SE 14)
->>4 belong 0x003B (Java SE 15)
->>4 belong 0x003C (Java SE 16)
->>4 belong 0x003D (Java SE 17)
->>4 belong 0x003E (Java SE 18)
->>4 belong 0x003F (Java SE 19)
->>4 belong 0x0040 (Java SE 20)
-# pool count unequal zero
-#>>8 beshort x \b, pool count %#x
-# pool table
-#>>10 ubequad x \b, pool %#16.16llx...
-
-0 belong 0xcafed00d JAR compressed with pack200,
->5 byte x version %d.
->4 byte x \b%d
-!:mime application/x-java-pack200
-
-
-0 belong 0xcafed00d JAR compressed with pack200,
->5 byte x version %d.
->4 byte x \b%d
-!:mime application/x-java-pack200
-
-### JAVA END ###
-### MACH-O START ###
-# URL: https://en.wikipedia.org/wiki/Mach-O
-
-0 name mach-o \b [
-# for debugging purpose CPU type as hexadecimal
-#>0 ubequad x CPU=%16.16llx
-# display CPU type as string like: i386 x86_64 ... armv7 armv7k ...
->0 use mach-o-cpu \b
-# for debugging purpose print offset to 1st mach_header like:
-# 1000h 4000h seldom 2d000h 88000h 5b000h 10e000 h
-#>8 ubelong x at %#x offset
->(8.L) indirect x \b:
->0 belong x \b]
-
-# Reference: https://opensource.apple.com/source/cctools/cctools-949.0.1/
-# include/mach-o/fat.h
-# include/mach/machine.h
-0 belong 0xcafebabe
->4 belong 1 Mach-O universal binary with 1 architecture:
-!:mime application/x-mach-binary
->>8 use mach-o \b
-# nfat_arch; number of CPU architectures; highest is 18 for CPU_TYPE_POWERPC in 2020
->4 ubelong >1
->>4 ubelong <20 Mach-O universal binary with %d architectures:
-!:mime application/x-mach-binary
->>>8 use mach-o \b
->>>4 ubelong >1
->>>>28 use mach-o \b
->>>4 ubelong >2
->>>>48 use mach-o \b
->>>4 ubelong >3
->>>>68 use mach-o \b
->>>4 ubelong >4
->>>>88 use mach-o \b
->>>4 ubelong >5
->>>>108 use mach-o \b
-
-### MACH-O END ###
diff --git a/contrib/libs/libmagic/magic/Magdir/cbor b/contrib/libs/libmagic/magic/Magdir/cbor
deleted file mode 100644
index c780dc6594..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cbor
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cbor,v 1.1 2015/01/28 01:05:21 christos Exp $
-# cbor: file(1) magic for CBOR files as defined in RFC 7049
-
-0 string \xd9\xd9\xf7 Concise Binary Object Representation (CBOR) container
-!:mime application/cbor
->3 ubyte <0x20 (positive integer)
->3 ubyte <0x40
->>3 ubyte >0x1f (negative integer)
->3 ubyte <0x60
->>3 ubyte >0x3f (byte string)
->3 ubyte <0x80
->>3 ubyte >0x5f (text string)
->3 ubyte <0xa0
->3 ubyte >0x7f (array)
->3 ubyte <0xc0
->>3 ubyte >0x9f (map)
->3 ubyte <0xe0
->>3 ubyte >0xbf (tagged)
->3 ubyte >0xdf (other)
diff --git a/contrib/libs/libmagic/magic/Magdir/ccf b/contrib/libs/libmagic/magic/Magdir/ccf
deleted file mode 100644
index 1d5ba19e00..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ccf
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ccf,v 1.1 2022/02/15 12:57:45 christos Exp $
-# file(1) magic(5) data for Phillips remote controls
-
-# Exchange format for Philips Pronto universal infrared remote controls
-# A CCF file describes a learned/customized remote control,
-# i.e. it contains button UI and infrared pulse code definitions
-# (Georg Sauthoff)
-# http://files.remotecentral.com/download/45/pan-air-csakr.zip.html
-# https://github.com/gsauthof/pronto-ccf/blob/
-
-8 string @\xa5Z@_CCF
->32 string CCF\x00 Philips Pronto IR remote control CCF
diff --git a/contrib/libs/libmagic/magic/Magdir/cddb b/contrib/libs/libmagic/magic/Magdir/cddb
deleted file mode 100644
index 5d8a8517e2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cddb
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cddb,v 1.4 2009/09/19 16:28:08 christos Exp $
-# CDDB: file(1) magic for CDDB(tm) format CD text data files
-#
-# From <steve@gracenote.com>
-#
-# This is the /etc/magic entry to decode datafiles as used by
-# CDDB-enabled CD player applications.
-#
-
-0 search/1/w #\040xmcd CDDB(tm) format CD text data
diff --git a/contrib/libs/libmagic/magic/Magdir/chord b/contrib/libs/libmagic/magic/Magdir/chord
deleted file mode 100644
index 00d0bec65a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/chord
+++ /dev/null
@@ -1,15 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: chord,v 1.5 2010/09/20 19:19:16 rrt Exp $
-# chord: file(1) magic for Chord music sheet typesetting utility input files
-#
-# From Philippe De Muyter <phdm@macqel.be>
-# File format is actually free, but many distributed files begin with `{title'
-#
-0 string {title Chord text file
-
-# Type: PowerTab file format
-# URL: http://www.power-tab.net/
-# From: Jelmer Vernooij <jelmer@samba.org>
-0 string ptab\003\000 Power-Tab v3 Tablature File
-0 string ptab\004\000 Power-Tab v4 Tablature File
diff --git a/contrib/libs/libmagic/magic/Magdir/cisco b/contrib/libs/libmagic/magic/Magdir/cisco
deleted file mode 100644
index 0279bbb5b5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cisco
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cisco,v 1.4 2009/09/19 16:28:08 christos Exp $
-# cisco: file(1) magic for cisco Systems routers
-#
-# Most cisco file-formats are covered by the generic elf code
-#
-# Microcode files are non-ELF, 0x8501 conflicts with NetBSD/alpha.
-0 belong&0xffffff00 0x85011400 cisco IOS microcode
->7 string >\0 for '%s'
-0 belong&0xffffff00 0x8501cb00 cisco IOS experimental microcode
->7 string >\0 for '%s'
diff --git a/contrib/libs/libmagic/magic/Magdir/citrus b/contrib/libs/libmagic/magic/Magdir/citrus
deleted file mode 100644
index 1801a55fa6..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/citrus
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: citrus,v 1.5 2021/01/04 19:48:31 christos Exp $
-# citrus locale declaration
-#
-
-0 string RuneCT Citrus locale declaration for LC_CTYPE
-0 string CtrsME Citrus locale declaration for LC_MESSAGES
-0 string CtrsMO Citrus locale declaration for LC_MONETARY
-0 string CtrsNU Citrus locale declaration for LC_NUMERIC
-0 string CtrsTI Citrus locale declaration for LC_TIME
-
diff --git a/contrib/libs/libmagic/magic/Magdir/clarion b/contrib/libs/libmagic/magic/Magdir/clarion
deleted file mode 100644
index 9fa0049dab..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/clarion
+++ /dev/null
@@ -1,27 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: clarion,v 1.5 2014/04/30 21:41:02 christos Exp $
-# clarion: file(1) magic for # Clarion Personal/Professional Developer
-# (v2 and above)
-# From: Julien Blache <jb@jblache.org>
-
-# Database files
-# signature
-0 leshort 0x3343 Clarion Developer (v2 and above) data file
-# attributes
->2 leshort &0x0001 \b, locked
->2 leshort &0x0004 \b, encrypted
->2 leshort &0x0008 \b, memo file exists
->2 leshort &0x0010 \b, compressed
->2 leshort &0x0040 \b, read only
-# number of records
->5 lelong x \b, %d records
-
-# Memo files
-0 leshort 0x334d Clarion Developer (v2 and above) memo data
-
-# Key/Index files
-# No magic? :(
-
-# Help files
-0 leshort 0x49e0 Clarion Developer (v2 and above) help data
diff --git a/contrib/libs/libmagic/magic/Magdir/claris b/contrib/libs/libmagic/magic/Magdir/claris
deleted file mode 100644
index 6a1b68fb22..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/claris
+++ /dev/null
@@ -1,48 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: claris,v 1.8 2016/07/18 19:23:38 christos Exp $
-# claris: file(1) magic for claris
-# "H. Nanosecond" <aldomel@ix.netcom.com>
-# Claris Works a word processor, etc.
-# Version 3.0
-
-# .pct claris works clip art files
-#0000000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000 000
-#*
-#0001000 #010 250 377 377 377 377 000 213 000 230 000 021 002 377 014 000
-#null to byte 1000 octal
-514 string \377\377\377\377\000
->0 string \0\0\0\0\0\0\0\0\0\0\0\0\0 Claris clip art
-514 string \377\377\377\377\001
->0 string \0\0\0\0\0\0\0\0\0\0\0\0\0 Claris clip art
-
-# Claris works files
-# .cwk
-# Moved to Apple AppleWorks document
-#0 string \002\000\210\003\102\117\102\117\000\001\206 Claris works document
-# .plt
-0 string \020\341\000\000\010\010 Claris Works palette files .plt
-
-# .msp a dictionary file I am not sure about this I have only one .msp file
-0 string \002\271\262\000\040\002\000\164 Claris works dictionary
-
-# .usp are user dictionary bits
-# I am not sure about a magic header:
-#0000000 001 123 160 146 070 125 104 040 136 123 015 012 160 157 144 151
-# soh S p f 8 U D sp ^ S cr nl p o d i
-#0000020 141 164 162 151 163 164 040 136 123 015 012 144 151 166 040 043
-# a t r i s t sp ^ S cr nl d i v sp #
-
-# .mth Thesaurus
-# starts with \0 but no magic header
-
-# .chy Hyphenation file
-# I am not sure: 000 210 034 000 000
-
-# other claris files
-#./windows/claris/useng.ndx: data
-#./windows/claris/xtndtran.l32: data
-#./windows/claris/xtndtran.lst: data
-#./windows/claris/clworks.lbl: data
-#./windows/claris/clworks.prf: data
-#./windows/claris/userd.spl: data
diff --git a/contrib/libs/libmagic/magic/Magdir/clipper b/contrib/libs/libmagic/magic/Magdir/clipper
deleted file mode 100644
index 484caeb89e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/clipper
+++ /dev/null
@@ -1,65 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: clipper,v 1.9 2020/12/15 23:57:27 christos Exp $
-# clipper: file(1) magic for Intergraph (formerly Fairchild) Clipper.
-#
-# XXX - what byte order does the Clipper use?
-#
-# XXX - what's the "!" stuff:
-#
-# >18 short !074000,000000 C1 R1
-# >18 short !074000,004000 C2 R1
-# >18 short !074000,010000 C3 R1
-# >18 short !074000,074000 TEST
-#
-# I shall assume it's ANDing the field with the first value and
-# comparing it with the second, and rewrite it as:
-#
-# >18 short&074000 000000 C1 R1
-# >18 short&074000 004000 C2 R1
-# >18 short&074000 010000 C3 R1
-# >18 short&074000 074000 TEST
-#
-# as SVR3.1's "file" doesn't support anything of the "!074000,000000"
-# sort, nor does SunOS 4.x, so either it's something Intergraph added
-# in CLIX, or something AT&T added in SVR3.2 or later, or something
-# somebody else thought was a good idea; it's not documented in the
-# man page for this version of "magic", nor does it appear to be
-# implemented (at least not after I blew off the bogus code to turn
-# old-style "&"s into new-style "&"s, which just didn't work at all).
-#
-0 short 0575 CLIPPER COFF executable (VAX #)
->20 short 0407 (impure)
->20 short 0410 (5.2 compatible)
->20 short 0411 (pure)
->20 short 0413 (demand paged)
->20 short 0443 (target shared library)
->12 long >0 not stripped
->22 short >0 - version %d
-0 short 0577 CLIPPER COFF executable
->18 short&074000 000000 C1 R1
->18 short&074000 004000 C2 R1
->18 short&074000 010000 C3 R1
->18 short&074000 074000 TEST
->20 short 0407 (impure)
->20 short 0410 (pure)
->20 short 0411 (separate I&D)
->20 short 0413 (paged)
->20 short 0443 (target shared library)
->12 long >0 not stripped
->22 short >0 - version %d
->48 long&01 01 alignment trap enabled
->52 byte 1 -Ctnc
->52 byte 2 -Ctsw
->52 byte 3 -Ctpw
->52 byte 4 -Ctcb
->53 byte 1 -Cdnc
->53 byte 2 -Cdsw
->53 byte 3 -Cdpw
->53 byte 4 -Cdcb
->54 byte 1 -Csnc
->54 byte 2 -Cssw
->54 byte 3 -Cspw
->54 byte 4 -Cscb
-#4 string pipe CLIPPER instruction trace
-#4 string prof CLIPPER instruction profile
diff --git a/contrib/libs/libmagic/magic/Magdir/clojure b/contrib/libs/libmagic/magic/Magdir/clojure
deleted file mode 100644
index 1f1cddf9a2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/clojure
+++ /dev/null
@@ -1,30 +0,0 @@
-#------------------------------------------------------------------------------
-# file: file(1) magic for Clojure
-# URL: https://clojure.org/
-# From: Jason Felice <jason.m.felice@gmail.com>
-
-0 string/w #!\ /usr/bin/clj Clojure script text executable
-!:mime text/x-clojure
-0 string/w #!\ /usr/local/bin/clj Clojure script text executable
-!:mime text/x-clojure
-0 string/w #!\ /usr/bin/clojure Clojure script text executable
-!:mime text/x-clojure
-0 string/w #!\ /usr/local/bin/clojure Clojure script text executable
-!:mime text/x-clojure
-0 string/W #!/usr/bin/env\ clj Clojure script text executable
-!:mime text/x-clojure
-0 string/W #!/usr/bin/env\ clojure Clojure script text executable
-!:mime text/x-clojure
-0 string/W #!\ /usr/bin/env\ clj Clojure script text executable
-!:mime text/x-clojure
-0 string/W #!\ /usr/bin/env\ clojure Clojure script text executable
-!:mime text/x-clojure
-
-0 regex \^\\\(ns[[:space:]]+[a-z] Clojure module source text
-!:mime text/x-clojure
-
-0 regex \^\\\(ns[[:space:]]+\\\^\\{: Clojure module source text
-!:mime text/x-clojure
-
-0 regex \^\\\(defn-?[[:space:]] Clojure module source text
-!:mime text/x-clojure
diff --git a/contrib/libs/libmagic/magic/Magdir/coff b/contrib/libs/libmagic/magic/Magdir/coff
deleted file mode 100644
index 5123b7213c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/coff
+++ /dev/null
@@ -1,98 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: coff,v 1.7 2022/11/21 22:30:22 christos Exp $
-# coff: file(1) magic for Common Object Files not specific to known cpu types or manufactures
-#
-# COFF
-#
-# by Joerg Jenderek at Oct 2015, Feb 2021
-# https://en.wikipedia.org/wiki/COFF
-# https://de.wikipedia.org/wiki/Common_Object_File_Format
-# http://www.delorie.com/djgpp/doc/coff/filhdr.html
-
-# display name+variables+flags of Common Object Files Format (32bit)
-# Maybe used also in adi,att3b,clipper,hitachi-sh,hp,ibm6000,intel,
-# mips,motorola,msdos,osf1,sharc,varied.out,vax
-0 name display-coff
-# test for unused flag bits (0x8000,0x0800,0x0400,0x0200,x0080) in f_flags
->18 uleshort&0x8E80 0
-# skip DOCTOR.DAILY READER.NDA REDBOX.ROOT by looking for positive number of sections
->>2 uleshort >0
-# skip ega80woa.fnt svgafix.fnt HP3FNTS1.DAT HP3FNTS2.DAT INTRO.ACT LEARN.PIF by looking for low number of sections
->>>2 uleshort <4207
->>>>0 clear x
-# f_magic - magic number
-# DJGPP, 80386 COFF executable, MS Windows COFF Intel 80386 object file (./intel)
->>>>0 uleshort 0x014C Intel 80386
-# Hitachi SH big-endian COFF (./hitachi-sh)
->>>>0 uleshort 0x0500 Hitachi SH big-endian
-# Hitachi SH little-endian COFF (./hitachi-sh)
->>>>0 uleshort 0x0550 Hitachi SH little-endian
-# executable (RISC System/6000 V3.1) or obj module (./ibm6000)
-#>>>>0 uleshort 0x01DF
-# MS Windows COFF Intel Itanium, AMD64
-# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680313(v=vs.85).aspx
->>>>0 uleshort 0x0200 Intel ia64
->>>>0 uleshort 0x8664 Intel amd64
-# ARM COFF (./arm)
->>>>0 uleshort 0xaa64 Aarch64
->>>>0 uleshort 0x01c0 ARM
->>>>0 uleshort 0xa641 ARM64EC
->>>>0 uleshort 0x01c2 ARM Thumb
->>>>0 uleshort 0x01c4 ARMv7 Thumb
-# TODO for other COFFs
-#>>>>0 uleshort 0xABCD COFF_TEMPLATE
->>>>0 default x
->>>>>0 uleshort x type %#04x
->>>>0 uleshort x COFF
-# F_EXEC flag bit
->>>>18 leshort ^0x0002 object file
-!:mime application/x-coff
-!:ext o/obj/lib
-# no cof sample found
-#!:ext cof/o/obj/lib
->>>>18 leshort &0x0002 executable
-#!:mime application/x-coffexec
-# F_RELFLG flag bit,static object
->>>>18 leshort &0x0001 \b, no relocation info
-# F_LNNO flag bit
->>>>18 leshort &0x0004 \b, no line number info
-# F_LSYMS flag bit
->>>>18 leshort &0x0008 \b, stripped
->>>>18 leshort ^0x0008 \b, not stripped
-# flags in other COFF versions
-#0x0010 F_FDPR_PROF
-#0x0020 F_FDPR_OPTI
-#0x0040 F_DSA
-# F_AR32WR flag bit
-#>>>>18 leshort &0x0100 \b, 32 bit little endian
-#0x1000 F_DYNLOAD
-#0x2000 F_SHROBJ
-#0x4000 F_LOADONLY
-# f_nscns - number of sections like: 1 2 3 4 5 7 8 9 11 12 15 16 19 20 21 22 26 30 36 40 42 56 80 89 96 124
->>>>2 uleshort <2 \b, %u section
->>>>2 uleshort >1 \b, %u sections
-# f_symptr - symbol table pointer, only for not stripped
-# like: 0 0x7c 0xf4 0x104 0x182 0x1c2 0x1c6 0x468 0x948 0x416e 0x149a6 0x1c9d8 0x23a68 0x35120 0x7afa0
->>>>8 ulelong >0 \b, symbol offset=%#x
-# f_nsyms - number of symbols, only for not stripped
-# like: 0 2 7 9 10 11 20 35 41 63 71 80 105 146 153 158 170 208 294 572 831 1546
->>>>12 ulelong >0 \b, %d symbols
-# f_opthdr - optional header size. An object file should have a value of 0
->>>>16 uleshort >0 \b, optional header size %u
-# f_timdat - file time & date stamp only for little endian
->>>>4 ledate >0 \b, created %s
-# at offset 20 can be optional header, extra bytes FILHSZ-20 because
-# do not rely on sizeof(FILHDR) to give the correct size for header.
-# or first section header
-# additional variables for other COFF files
->>>>16 uleshort =0
-# first section name s_name[8] like: .text .data .debug$S .drectve .testseg
->>>>>20 string x \b, 1st section name "%.8s"
-# >20 beshort 0407 (impure)
-# >20 beshort 0410 (pure)
-# >20 beshort 0413 (demand paged)
-# >20 beshort 0421 (standalone)
-# >22 leshort >0 - version %d
-# >168 string .lowmem Apple toolbox
-
diff --git a/contrib/libs/libmagic/magic/Magdir/commands b/contrib/libs/libmagic/magic/Magdir/commands
deleted file mode 100644
index 6ad87fd757..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/commands
+++ /dev/null
@@ -1,201 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: commands,v 1.73 2022/11/06 18:39:23 christos Exp $
-# commands: file(1) magic for various shells and interpreters
-#
-#0 string/w : shell archive or script for antique kernel text
-0 string/fwt #!\ /bin/sh POSIX shell script text executable
-!:mime text/x-shellscript
-0 string/fwb #!\ /bin/sh POSIX shell script executable (binary data)
-!:mime text/x-shellscript
->10 string #\040This\040script\040was\040generated\040using\040Makeself \b, self-executable archive
->>53 string x \b, Makeself %s
-
-0 string/fwt #!\ /bin/csh C shell script text executable
-!:mime text/x-shellscript
-
-# korn shell magic, sent by George Wu, gwu@clyde.att.com
-0 string/fwt #!\ /bin/ksh Korn shell script text executable
-!:mime text/x-shellscript
-0 string/fwb #!\ /bin/ksh Korn shell script executable (binary data)
-!:mime text/x-shellscript
-
-0 string/fwt #!\ /bin/tcsh Tenex C shell script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/tcsh Tenex C shell script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/tcsh Tenex C shell script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/bin/tcsh Tenex C shell script text executable
-!:mime text/x-shellscript
-
-#
-# zsh/ash/ae/nawk/gawk magic from cameron@cs.unsw.oz.au (Cameron Simpson)
-0 string/fwt #!\ /bin/zsh Paul Falstad's zsh script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/zsh Paul Falstad's zsh script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/bin/zsh Paul Falstad's zsh script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/env\ zsh Paul Falstad's zsh script text executable
-!:mime text/x-shellscript
-
-0 string/fwt #!\ /bin/ash Neil Brown's ash script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/ash Neil Brown's ash script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/bin/ash Neil Brown's ash script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/bin/ae Neil Brown's ae script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /bin/nawk new awk script text executable
-!:mime text/x-nawk
-0 string/fwt #!\ /usr/bin/nawk new awk script text executable
-!:mime text/x-nawk
-0 string/fwt #!\ /usr/local/bin/nawk new awk script text executable
-!:mime text/x-nawk
-0 string/fwt #!\ /bin/gawk GNU awk script text executable
-!:mime text/x-gawk
-0 string/wt #!\ /usr/bin/gawk GNU awk script text executable
-!:mime text/x-gawk
-0 string/fwt #!\ /usr/local/bin/gawk GNU awk script text executable
-!:mime text/x-gawk
-#
-0 string/fwt #!\ /bin/awk awk script text executable
-!:mime text/x-awk
-0 string/fwt #!\ /usr/bin/awk awk script text executable
-!:mime text/x-awk
-0 regex/4096 =^[\040\t\f\r\n]{0,100}BEGIN[\040\t\f\r\n]{0,100}[{] awk or perl script text
-
-# AT&T Bell Labs' Plan 9 shell
-0 string/fwt #!\ /bin/rc Plan 9 rc shell script text executable
-
-# bash shell magic, from Peter Tobias (tobias@server.et-inf.fho-emden.de)
-0 string/fwt #!\ /bin/bash Bourne-Again shell script text executable
-!:mime text/x-shellscript
-0 string/fwb #!\ /bin/bash Bourne-Again shell script executable (binary data)
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/bash Bourne-Again shell script text executable
-!:mime text/x-shellscript
-0 string/fwb #!\ /usr/bin/bash Bourne-Again shell script executable (binary data)
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/bash Bourne-Again shell script text executable
-!:mime text/x-shellscript
-0 string/fwb #!\ /usr/local/bash Bourne-Again shell script executable (binary data)
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/local/bin/bash Bourne-Again shell script text executable
-!:mime text/x-shellscript
-0 string/fwb #!\ /usr/local/bin/bash Bourne-Again shell script executable (binary data)
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/env\ bash Bourne-Again shell script text executable
-!:mime text/x-shellscript
-
-# Fish shell magic
-# From: Benjamin Lowry <ben@ben.gmbh>
-0 string/fwt #!\ /usr/local/bin/fish fish shell script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/fish fish shell script text executable
-!:mime text/x-shellscript
-0 string/fwt #!\ /usr/bin/env\ fish fish shell script text executable
-!:mime text/x-shellscript
-
-0 search/1/fwt #!\ /usr/bin/tclsh Tcl/Tk script text executable
-!:mime text/x-tcl
-
-0 search/1/fwt #!\ /usr/bin/texlua LuaTex script text executable
-!:mime text/x-luatex
-
-0 search/1/fwt #!\ /usr/bin/luatex LuaTex script text executable
-!:mime text/x-luatex
-
-0 search/1/fwt #!\ /usr/bin/stap Systemtap script text executable
-!:mime text/x-systemtap
-
-# From: Kylie McClain <kylie@somas.is>
-# Type: execline scripts
-# URL: https://skarnet.org/software/execline/
-0 string/fwt #!\ /command/execlineb execline script text executable
-!:mime text/x-execline
-0 string/fwt #!\ /bin/execlineb execline script text executable
-!:mime text/x-execline
-0 string/fwt #!\ /usr/bin/execlineb execline script text executable
-!:mime text/x-execline
-0 string/fwt #!\ /usr/bin/env\ execlineb execline script text executable
-!:mime text/x-execline
-
-0 string #!
->0 regex \^#!.*/bin/execlineb([[:space:]].*)*$ execline script text executable
-!:mime text/x-execline
-
-# PHP scripts
-# Ulf Harnhammar <ulfh@update.uu.se>
-0 search/1/c =<?php PHP script text
-!:strength + 30
-!:mime text/x-php
-0 search/1 =<?\n PHP script text
-!:mime text/x-php
-0 search/1 =<?\r PHP script text
-!:mime text/x-php
-0 search/1/w #!\ /usr/local/bin/php PHP script text executable
-!:strength + 10
-!:mime text/x-php
-0 search/1/w #!\ /usr/bin/php PHP script text executable
-!:strength + 10
-!:mime text/x-php
-# Smarty compiled template, https://www.smarty.net/
-# Elan Ruusamae <glen@delfi.ee>
-0 string =<?php
->5 regex [\ \n]
->>6 string /*\ Smarty\ version Smarty compiled template
->>>24 regex [0-9.]+ \b, version %s
-!:mime text/x-php
-
-0 string Zend\x00 PHP script Zend Optimizer data
-
-# From: Anatol Belski <ab@php.net>
-0 string OPCACHE
->7 ubyte 0 PHP opcache filecache data
-
-0 search/64 --TEST--
->16 search/64 --FILE--
->24 search/8192 --EXPECT PHP core test
-!:ext phpt
-
-# https://www.php.net/manual/en/phar.fileformat.signature.php
--4 string GBMB PHP phar archive
->-8 ubyte 0x1 with MD5 signature
-!:ext phar
->-8 ubyte 0x2 with SHA1 signature
-!:ext phar
->-8 ubyte 0x3 with SHA256 signature
-!:ext phar
->-8 ubyte 0x4 with SHA512 signature
-!:ext phar
->-8 ubyte 0x10 with OpenSSL signature
-!:ext phar
->-8 ubyte 0x11 with OpenSSL SHA256 signature
-!:ext phar
->-8 ubyte 0x12 with OpenSSL SHA512 signature
-!:ext phar
-
-0 string/t $! DCL command file
-
-# Type: Pdmenu
-# URL: https://packages.debian.org/pdmenu
-# From: Edward Betts <edward@debian.org>
-0 string #!/usr/bin/pdmenu Pdmenu configuration file text
-
-# From Danny Weldon
-0 string \x0b\x13\x08\x00
->0x04 uleshort <4 ksh byte-code version %d
-
-# From: arno <arenevier@fdn.fr>
-# mozilla xpconnect typelib
-# see https://www.mozilla.org/scriptable/typelib_file.html
-0 string XPCOM\nTypeLib\r\n\032 XPConnect Typelib
->0x10 byte x version %d
->>0x11 byte x \b.%d
-
-0 string/fwt #!\ /usr/bin/env\ runghc GHC script executable
-0 string/fwt #!\ /usr/bin/env\ runhaskell Haskell script executable
-0 string/fwt #!\ /usr/bin/env\ julia Julia script executable
diff --git a/contrib/libs/libmagic/magic/Magdir/communications b/contrib/libs/libmagic/magic/Magdir/communications
deleted file mode 100644
index 8e1d908b67..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/communications
+++ /dev/null
@@ -1,22 +0,0 @@
-
-#----------------------------------------------------------------------------
-# $File: communications,v 1.5 2009/09/19 16:28:08 christos Exp $
-# communication
-
-# TTCN is the Tree and Tabular Combined Notation described in ISO 9646-3.
-# It is used for conformance testing of communication protocols.
-# Added by W. Borgert <debacle@debian.org>.
-0 string $Suite TTCN Abstract Test Suite
->&1 string $SuiteId
->>&1 string >\n %s
->&2 string $SuiteId
->>&1 string >\n %s
->&3 string $SuiteId
->>&1 string >\n %s
-
-# MSC (message sequence charts) are a formal description technique,
-# described in ITU-T Z.120, mainly used for communication protocols.
-# Added by W. Borgert <debacle@debian.org>.
-0 string mscdocument Message Sequence Chart (document)
-0 string msc Message Sequence Chart (chart)
-0 string submsc Message Sequence Chart (subchart)
diff --git a/contrib/libs/libmagic/magic/Magdir/compress b/contrib/libs/libmagic/magic/Magdir/compress
deleted file mode 100644
index c3f93fa3be..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/compress
+++ /dev/null
@@ -1,461 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: compress,v 1.91 2023/06/16 19:37:47 christos Exp $
-# compress: file(1) magic for pure-compression formats (no archives)
-#
-# compress, gzip, pack, compact, huf, squeeze, crunch, freeze, yabba, etc.
-#
-# Formats for various forms of compressed data
-# Formats for "compress" proper have been moved into "compress.c",
-# because it tries to uncompress it to figure out what's inside.
-
-# standard unix compress
-0 string \037\235 compress'd data
-!:mime application/x-compress
-!:apple LZIVZIVU
-!:ext Z
->2 byte&0x80 >0 block compressed
->2 byte&0x1f x %d bits
-
-# gzip (GNU zip, not to be confused with Info-ZIP or PKWARE zip archiver)
-# URL: https://en.wikipedia.org/wiki/Gzip
-# Reference: https://tools.ietf.org/html/rfc1952
-# Update: Joerg Jenderek, Apr 2019, Dec 2022
-# Edited by Chris Chittleborough <cchittleborough@yahoo.com.au>, March 2002
-# * Original filename is only at offset 10 if "extra field" absent
-# * Produce shorter output - notably, only report compression methods
-# other than 8 ("deflate", the only method defined in RFC 1952).
-# Note: find defs -iname '*.trid.xml' -exec grep -q '<Bytes>1F8B08' {} \; -ls
-# TODO:
-# FBR Blueberry FlashBack screen Record https://www.flashbackrecorder.com/
-# KPR KOffice/Calligra KPresenter application/x-kpresenter
-# KPT KOffice/Calligra KPresenter template? application/x-kpresenter
-# SAV Diggles Saved Game File http://www.innonics.com
-# SAV FarCry (demo) saved game http://www.farcry-thegame.com
-# DAT ZOAGZIP game data format http://en.wikipedia.org/wiki/SD_Gundam_Capsule_Fighter
-0 string \037\213
-# to display gzip compressed (strength=100=2*50) before other (strength=50)?
-#!:strength * 2
-# no FNAME and FCOMMENT bit implies no file name/comment. That means only binary
->3 byte&0x18 =0
-# For binary gzipped no ASCII text should occur
-# mcd-monu-cad.trid.xml
->>10 string MCD Monu-Cad Drawing, Component or Font
-#>>36 string Created\ with\ MONU-CAD
-#!:mime application/octet-stream
-# http://fileformats.archiveteam.org/wiki/Monu-CAD
-# http://www.monucad.com/downloads/FullDemo-2005.EXE
-# /HANDS96.MCC Component
-# /DEMO_DD01.MCD Drawing
-# /MCALF020.FNT Font
-!:ext mcc/mcd/fnt
-# http://www.generalcadd.com
->>10 string GXD General CADD, Drawing or Component
-#!:mime application/octet-stream
-# /gxc/BUILDINGEDGE.gxc Component
-# /gxd/HOCKETT-STPAUL-WRHSE.gxd Drawing
-# /gxd/POWERLAND-MILL-ADD-11.gxd Drawing v9.1.06
-!:ext gxc/gxd
-#>>>13 ubyte 0 \b, version 0
->>>13 string 09 \b, version 9
-# other gzipped binary like gzipped tar, VirtualBox extension package,...
->>10 default x gzip compressed data
-!:mime application/gzip
->>>0 use gzip-info
-# size of the original (uncompressed) input data modulo 2^32
-# TODO: check for GXD MCD cad the reported size
->>>-4 ulelong x \b, original size modulo 2^32 %u
-# gzipped TAR or VirtualBox extension package
-#!:mime application/x-compressed-tar
-#!:mime application/x-virtualbox-vbox-extpack
-# https://www.w3.org/TR/SVG/mimereg.html
-#!:mime image/svg+xml-compressed
-# zlib.3.gz
-# microcode-20180312.tgz
-# tpz same as tgz
-# lua-md5_1.2-1_i386_i486.ipk https://en.wikipedia.org/wiki/Opkg
-# Oracle_VM_VirtualBox_Extension_Pack-5.0.12-104815.vbox-extpack
-# trees.blend http://fileformats.archiveteam.org/wiki/BLEND
-# 2020-07-19-Note-16-24.xoj https://xournal.sourceforge.net/manual.html
-# MYgnucash-gz.gnucash https://wiki.gnucash.org/wiki/GnuCash_XML_format
-# text-rotate.dia https://en.wikipedia.org/wiki/Dia_(software)
-# MYrdata.RData https://en.wikipedia.org/wiki/R_(programming_language)
-!:ext gz/tgz/tpz/ipk/vbox-extpack/svgz/blend/dia/gnucash/rdata/xoj
-# FNAME/FCOMMENT bit implies file name/comment as iso-8859-1 text
->3 byte&0x18 >0 gzip compressed data
-!:mime application/gzip
-# gzipped tar, gzipped Abiword document
-#!:mime application/x-compressed-tar
-#!:mime application/x-abiword-compressed
-#!:mime image/image/svg+xml-compressed
-# kleopatra_splashscreen.svgz gzipped .svg
-# RSI-Mega-Demo_Disk1.adz gzipped .adf http://fileformats.archiveteam.org/wiki/ADF_(Amiga)
-# PostbankTest.kmy gzipped XML https://docs.kde.org/stable5/en/kmymoney/kmymoney/details.formats.compressed.html
-# Logo.xcfgz gzipped .xcf http://fileformats.archiveteam.org/wiki/XCF
-!:ext gz/tgz/tpz/zabw/svgz/adz/kmy/xcfgz
->>0 use gzip-info
-# size of the original (uncompressed) input data modulo 2^32
->>-4 ulelong x \b, original size modulo 2^32 %u
-# display information of gzip compressed files
-0 name gzip-info
-#>2 byte x THIS iS GZIP
->2 byte <8 \b, reserved method
->2 byte >8 \b, unknown method
->3 byte &0x01 \b, ASCII
->3 byte &0x02 \b, has CRC
->3 byte &0x04 \b, extra field
->3 byte&0xC =0x08
->>10 string x \b, was "%s"
->3 byte &0x10 \b, has comment
->3 byte &0x20 \b, encrypted
->4 ledate >0 \b, last modified: %s
->8 byte 2 \b, max compression
->8 byte 4 \b, max speed
->9 byte =0x00 \b, from FAT filesystem (MS-DOS, OS/2, NT)
->9 byte =0x01 \b, from Amiga
->9 byte =0x02 \b, from VMS
->9 byte =0x03 \b, from Unix
->9 byte =0x04 \b, from VM/CMS
->9 byte =0x05 \b, from Atari
->9 byte =0x06 \b, from HPFS filesystem (OS/2, NT)
->9 byte =0x07 \b, from MacOS
->9 byte =0x08 \b, from Z-System
->9 byte =0x09 \b, from CP/M
->9 byte =0x0A \b, from TOPS/20
->9 byte =0x0B \b, from NTFS filesystem (NT)
->9 byte =0x0C \b, from QDOS
->9 byte =0x0D \b, from Acorn RISCOS
-# size of the original (uncompressed) input data modulo 2^32
-#>-4 ulelong x \b, original size modulo 2^32 %u
-#ERROR: line 114: non zero offset 1048572 at level 1
-
-# packed data, Huffman (minimum redundancy) codes on a byte-by-byte basis
-0 string \037\036 packed data
-!:mime application/octet-stream
-!:ext z
->2 belong >1 \b, %d characters originally
->2 belong =1 \b, %d character originally
-#
-# This magic number is byte-order-independent.
-0 short 0x1f1f old packed data
-!:mime application/octet-stream
-
-# XXX - why *two* entries for "compacted data", one of which is
-# byte-order independent, and one of which is byte-order dependent?
-#
-0 short 0x1fff compacted data
-!:mime application/octet-stream
-# This string is valid for SunOS (BE) and a matching "short" is listed
-# in the Ultrix (LE) magic file.
-0 string \377\037 compacted data
-!:mime application/octet-stream
-0 short 0145405 huf output
-!:mime application/octet-stream
-
-# bzip2
-0 string BZh bzip2 compressed data
-!:mime application/x-bzip2
-!:ext bz2
->3 byte >47 \b, block size = %c00k
-
-# bzip a block-sorting file compressor
-# by Julian Seward <sewardj@cs.man.ac.uk> and others
-0 string BZ0 bzip compressed data
-!:mime application/x-bzip
->3 byte >47 \b, block size = %c00k
-
-# lzip
-0 string LZIP lzip compressed data
-!:mime application/x-lzip
-!:ext lz
->4 byte x \b, version: %d
-
-# squeeze and crunch
-# Michael Haardt <michael@cantor.informatik.rwth-aachen.de>
-0 beshort 0x76FF squeezed data,
->4 string x original name %s
-0 beshort 0x76FE crunched data,
->2 string x original name %s
-0 beshort 0x76FD LZH compressed data,
->2 string x original name %s
-
-# Freeze
-0 string \037\237 frozen file 2.1
-0 string \037\236 frozen file 1.0 (or gzip 0.5)
-
-# SCO compress -H (LZH)
-0 string \037\240 SCO compress -H (LZH) data
-
-# European GSM 06.10 is a provisional standard for full-rate speech
-# transcoding, prI-ETS 300 036, which uses RPE/LTP (residual pulse
-# excitation/long term prediction) coding at 13 kbit/s.
-#
-# There's only a magic nibble (4 bits); that nibble repeats every 33
-# bytes. This isn't suited for use, but maybe we can use it someday.
-#
-# This will cause very short GSM files to be declared as data and
-# mismatches to be declared as data too!
-#0 byte&0xF0 0xd0 data
-#>33 byte&0xF0 0xd0
-#>66 byte&0xF0 0xd0
-#>99 byte&0xF0 0xd0
-#>132 byte&0xF0 0xd0 GSM 06.10 compressed audio
-
-# lzop from <markus.oberhumer@jk.uni-linz.ac.at>
-0 string \x89\x4c\x5a\x4f\x00\x0d\x0a\x1a\x0a lzop compressed data
-!:ext lzo
->9 beshort <0x0940
->>9 byte&0xf0 =0x00 - version 0.
->>9 beshort&0x0fff x \b%03x,
->>13 byte 1 LZO1X-1,
->>13 byte 2 LZO1X-1(15),
->>13 byte 3 LZO1X-999,
-## >>22 bedate >0 last modified: %s,
->>14 byte =0x00 os: MS-DOS
->>14 byte =0x01 os: Amiga
->>14 byte =0x02 os: VMS
->>14 byte =0x03 os: Unix
->>14 byte =0x05 os: Atari
->>14 byte =0x06 os: OS/2
->>14 byte =0x07 os: MacOS
->>14 byte =0x0A os: Tops/20
->>14 byte =0x0B os: WinNT
->>14 byte =0x0E os: Win32
->9 beshort >0x0939
->>9 byte&0xf0 =0x00 - version 0.
->>9 byte&0xf0 =0x10 - version 1.
->>9 byte&0xf0 =0x20 - version 2.
->>9 beshort&0x0fff x \b%03x,
->>15 byte 1 LZO1X-1,
->>15 byte 2 LZO1X-1(15),
->>15 byte 3 LZO1X-999,
-## >>25 bedate >0 last modified: %s,
->>17 byte =0x00 os: MS-DOS
->>17 byte =0x01 os: Amiga
->>17 byte =0x02 os: VMS
->>17 byte =0x03 os: Unix
->>17 byte =0x05 os: Atari
->>17 byte =0x06 os: OS/2
->>17 byte =0x07 os: MacOS
->>17 byte =0x0A os: Tops/20
->>17 byte =0x0B os: WinNT
->>17 byte =0x0E os: Win32
-
-# 4.3BSD-Quasijarus Strong Compression
-# https://minnie.tuhs.org/Quasijarus/compress.html
-0 string \037\241 Quasijarus strong compressed data
-
-# From: Cory Dikkers <cdikkers@swbell.net>
-0 string XPKF Amiga xpkf.library compressed data
-0 string PP11 Power Packer 1.1 compressed data
-0 string PP20 Power Packer 2.0 compressed data,
->4 belong 0x09090909 fast compression
->4 belong 0x090A0A0A mediocre compression
->4 belong 0x090A0B0B good compression
->4 belong 0x090A0C0C very good compression
->4 belong 0x090A0C0D best compression
-
-# 7-zip archiver, from Thomas Klausner (wiz@danbala.tuwien.ac.at)
-# https://www.7-zip.org or DOC/7zFormat.txt
-#
-0 string 7z\274\257\047\034 7-zip archive data,
->6 byte x version %d
->7 byte x \b.%d
-!:mime application/x-7z-compressed
-!:ext 7z/cb7
-
-0 name lzma LZMA compressed data,
-!:mime application/x-lzma
-!:ext lzma
->5 lequad =0xffffffffffffffff streamed
->5 lequad !0xffffffffffffffff non-streamed, size %lld
-
-# Type: LZMA
-0 lelong&0xffffff =0x5d
->12 leshort 0xff
->>0 use lzma
->12 leshort 0
->>0 use lzma
-
-# http://tukaani.org/xz/xz-file-format.txt
-0 ustring \xFD7zXZ\x00 XZ compressed data, checksum
-!:strength * 2
-!:mime application/x-xz
-!:ext xz
->7 byte&0xf 0x0 NONE
->7 byte&0xf 0x1 CRC32
->7 byte&0xf 0x4 CRC64
->7 byte&0xf 0xa SHA-256
-
-# https://github.com/ckolivas/lrzip/blob/master/doc/magic.header.txt
-0 string LRZI LRZIP compressed data
-!:mime application/x-lrzip
->4 byte x - version %d
->5 byte x \b.%d
->22 byte 1 \b, encrypted
-
-# https://fastcompression.blogspot.fi/2013/04/lz4-streaming-format-final.html
-0 lelong 0x184d2204 LZ4 compressed data (v1.4+)
-!:mime application/x-lz4
-!:ext lz4
-# Added by osm0sis@xda-developers.com
-0 lelong 0x184c2103 LZ4 compressed data (v1.0-v1.3)
-!:mime application/x-lz4
-0 lelong 0x184c2102 LZ4 compressed data (v0.1-v0.9)
-!:mime application/x-lz4
-
-# Zstandard/LZ4 skippable frames
-# https://github.com/facebook/zstd/blob/dev/zstd_compression_format.md
-0 lelong&0xFFFFFFF0 0x184D2A50
->(4.l+8) indirect x
-
-# Zstandard Dictionary ID subroutine
-0 name zstd-dictionary-id
-# Single Segment = True
->0 byte &0x20 \b, Dictionary ID:
->>0 byte&0x03 0 None
->>0 byte&0x03 1
->>>1 byte x %u
->>0 byte&0x03 2
->>>1 leshort x %u
->>0 byte&0x03 3
->>>1 lelong x %u
-# Single Segment = False
->0 byte ^0x20 \b, Dictionary ID:
->>0 byte&0x03 0 None
->>0 byte&0x03 1
->>>2 byte x %u
->>0 byte&0x03 2
->>>2 leshort x %u
->>0 byte&0x03 3
->>>2 lelong x %u
-
-# Zstandard compressed data
-# https://github.com/facebook/zstd/blob/dev/zstd_compression_format.md
-0 lelong 0xFD2FB522 Zstandard compressed data (v0.2)
-!:mime application/zstd
-!:ext zst
-0 lelong 0xFD2FB523 Zstandard compressed data (v0.3)
-!:mime application/zstd
-!:ext zst
-0 lelong 0xFD2FB524 Zstandard compressed data (v0.4)
-!:mime application/zstd
-!:ext zst
-0 lelong 0xFD2FB525 Zstandard compressed data (v0.5)
-!:mime application/zstd
-!:ext zst
-0 lelong 0xFD2FB526 Zstandard compressed data (v0.6)
-!:mime application/zstd
-!:ext zst
-0 lelong 0xFD2FB527 Zstandard compressed data (v0.7)
-!:mime application/zstd
-!:ext zst
->4 use zstd-dictionary-id
-0 lelong 0xFD2FB528 Zstandard compressed data (v0.8+)
-!:mime application/zstd
-!:ext zst
->4 use zstd-dictionary-id
-
-# https://github.com/facebook/zstd/blob/dev/zstd_compression_format.md
-0 lelong 0xEC30A437 Zstandard dictionary
-!:mime application/x-std-dictionary
->4 lelong x (ID %u)
-
-# AFX compressed files (Wolfram Kleff)
-2 string -afx- AFX compressed file data
-
-# Supplementary magic data for the file(1) command to support
-# rzip(1). The format is described in magic(5).
-#
-# Copyright (C) 2003 by Andrew Tridgell. You may do whatever you want with
-# this file.
-#
-0 string RZIP rzip compressed data
->4 byte x - version %d
->5 byte x \b.%d
->6 belong x (%d bytes)
-
-0 string ArC\x01 FreeArc archive <http://freearc.org>
-
-# Type: DACT compressed files
-0 long 0x444354C3 DACT compressed data
->4 byte >-1 (version %i.
->5 byte >-1 %i.
->6 byte >-1 %i)
->7 long >0 , original size: %i bytes
->15 long >30 , block size: %i bytes
-
-# Valve Pack (VPK) files
-0 lelong 0x55aa1234 Valve Pak file
->0x4 lelong x \b, version %u
->0x8 lelong x \b, %u entries
-
-# Snappy framing format
-# https://code.google.com/p/snappy/source/browse/trunk/framing_format.txt
-0 string \377\006\0\0sNaPpY snappy framed data
-!:mime application/x-snappy-framed
-
-# qpress, https://www.quicklz.com/
-0 string qpress10 qpress compressed data
-!:mime application/x-qpress
-
-# Zlib https://www.ietf.org/rfc/rfc6713.txt
-0 string/b x
->0 beshort%31 =0
->>0 byte&0xf =8
->>>0 byte&0x80 =0 zlib compressed data
-!:mime application/zlib
-
-# BWC compression
-0 string BWC
->3 byte 0 BWC compressed data
-
-# UCL compression
-0 bequad 0x00e955434cff011a UCL compressed data
-
-# Softlib archive
-0 string SLIB Softlib archive
->4 leshort x \b, version %d
->6 leshort x (contains %d files)
-
-# URL: https://github.com/lzfse/lzfse/blob/master/src/lzfse_internal.h#L276
-# From: Eric Hall <eric.hall@darkart.com>
-0 string bvx- lzfse encoded, no compression
-0 string bvx1 lzfse compressed, uncompressed tables
-0 string bvx2 lzfse compressed, compressed tables
-0 string bvxn lzfse encoded, lzvn compressed
-
-# pcxLib.exe compression program
-# http://www.shikadi.net/moddingwiki/PCX_Library
-0 string/b pcxLib
->0x0A string/b Copyright\020(c)\020Genus\020Microprogramming,\020Inc. pcxLib compressed
-
-# https://support-docs.illumina.com/SW/ORA_Format_Specification/Content/SW/ORA/ORAFormatSpecification.htm
-0 uleshort 0x7c49
->2 lelong 0x80 ORA FASTQ compressed file
->>6 ulelong x \b, DNA size %u
->>10 ulelong x \b, read names size %u
->>14 ulelong x \b, quality buffer 1 size %u
->>18 ulelong x \b, quality buffer 2 size %u
->>22 ulelong x \b, sequence buffer size %u
->>26 ulelong x \b, N-position buffer size %u
->>30 ulelong x \b, crypto buffer size %u
->>34 ulelong x \b, misc buffer 1 size %u
->>38 ulelong x \b, misc buffer 2 size %u
->>42 ulelong x \b, flags %#x
->>46 lelong x \b, read size %d
->>50 lelong x \b, number of reads %d
->>54 leshort x \b, version %d
-
-# https://github.com/kspalaiologos/bzip3/blob/master/doc/file_format.md
-0 string/b BZ3v1 bzip3 compressed data
->5 ulelong x \b, blocksize %u
-
-
-# https://support-docs.illumina.com/SW/ORA_Format_Specification/Content/\
-# SW/ORA/ORAFormatSpecification.htm
-# From Guillaume Rizk
-0 short =0x7C49 DRAGEN ORA file,
->-261 short =0x7C49 with metadata:
->-125 u8 x NB reads: %llu,
->-109 u8 x NB bases: %llu.
->-219 u4&0x02 2 File contains interleaved paired reads
diff --git a/contrib/libs/libmagic/magic/Magdir/console b/contrib/libs/libmagic/magic/Magdir/console
deleted file mode 100644
index 0ed53fe34d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/console
+++ /dev/null
@@ -1,1226 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: console,v 1.72 2023/06/16 19:24:06 christos Exp $
-# Console game magic
-# Toby Deshane <hac@shoelace.digivill.net>
-
-# ines: file(1) magic for Marat's iNES Nintendo Entertainment System ROM dump format
-# Updated by David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://wiki.nesdev.com/w/index.php/INES
-# - https://wiki.nesdev.com/w/index.php/NES_2.0
-
-# Common header for iNES, NES 2.0, and Wii U iNES.
-0 name nes-rom-image-ines
->7 byte&0x0C =0x8 (NES 2.0)
->4 byte x \b: %ux16k PRG
->5 byte x \b, %ux8k CHR
->6 byte&0x08 =0x8 [4-Scr]
->6 byte&0x09 =0x0 [H-mirror]
->6 byte&0x09 =0x1 [V-mirror]
->6 byte&0x02 =0x2 [SRAM]
->6 byte&0x04 =0x4 [Trainer]
->7 byte&0x03 =0x2 [PC10]
->7 byte&0x03 =0x1 [VS]
->>7 byte&0x0C =0x8
-# NES 2.0: VS PPU
->>>13 byte&0x0F =0x0 \b, RP2C03B
->>>13 byte&0x0F =0x1 \b, RP2C03G
->>>13 byte&0x0F =0x2 \b, RP2C04-0001
->>>13 byte&0x0F =0x3 \b, RP2C04-0002
->>>13 byte&0x0F =0x4 \b, RP2C04-0003
->>>13 byte&0x0F =0x5 \b, RP2C04-0004
->>>13 byte&0x0F =0x6 \b, RP2C03B
->>>13 byte&0x0F =0x7 \b, RP2C03C
->>>13 byte&0x0F =0x8 \b, RP2C05-01
->>>13 byte&0x0F =0x9 \b, RP2C05-02
->>>13 byte&0x0F =0xA \b, RP2C05-03
->>>13 byte&0x0F =0xB \b, RP2C05-04
->>>13 byte&0x0F =0xC \b, RP2C05-05
-# TODO: VS protection hardware?
->>7 byte x \b]
-# NES 2.0-specific flags.
->7 byte&0x0C =0x8
->>12 byte&0x03 =0x0 [NTSC]
->>12 byte&0x03 =0x1 [PAL]
->>12 byte&0x02 =0x2 [NTSC+PAL]
-
-# Standard iNES ROM header.
-0 string NES\x1A NES ROM image (iNES)
-!:mime application/x-nes-rom
->0 use nes-rom-image-ines
-
-# Wii U Virtual Console iNES ROM header.
-0 belong 0x4E455300 NES ROM image (Wii U Virtual Console)
-!:mime application/x-nes-rom
->0 use nes-rom-image-ines
-
-#------------------------------------------------------------------------------
-# unif: file(1) magic for UNIF-format Nintendo Entertainment System ROM images
-# Reference: https://wiki.nesdev.com/w/index.php/UNIF
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-#
-# NOTE: The UNIF format uses chunks instead of a fixed header,
-# so most of the data isn't easily parseable.
-#
-0 string UNIF
->4 lelong <16 NES ROM image (UNIF v%d format)
-!:mime application/x-nes-rom
-
-#------------------------------------------------------------------------------
-# fds: file(1) magic for Famicom Disk System disk images
-# Reference: https://wiki.nesdev.com/w/index.php/Family_Computer_Disk_System#.FDS_format
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# TODO: Check "Disk info block" and get info from that in addition to the optional header.
-
-# Disk info block. (block 1)
-0 name nintendo-fds-disk-info-block
->23 byte !1 FMC-
->23 byte 1 FSC-
->16 string x \b%.3s
->15 ubyte x \b, mfr %02X
->20 ubyte x (Rev.%02u)
-
-# Headered version.
-0 string FDS\x1A
->0x11 string *NINTENDO-HVC* Famicom Disk System disk image:
-!:mime application/x-fds-disk
->>0x10 use nintendo-fds-disk-info-block
->4 byte 1 (%u side)
->4 byte !1 (%u sides)
-
-# Unheadered version.
-1 string *NINTENDO-HVC* Famicom Disk System disk image:
-!:mime application/x-fds-disk
->0 use nintendo-fds-disk-info-block
-
-#------------------------------------------------------------------------------
-# tnes: file(1) magic for TNES-format Nintendo Entertainment System ROM images
-# Used by Nintendo 3DS NES Virtual Console games.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-#
-0 string TNES NES ROM image (Nintendo 3DS Virtual Console)
-!:mime application/x-nes-rom
->4 byte 100 \b: FDS,
->>0x2010 use nintendo-fds-disk-info-block
->4 byte !100 \b: TNES mapper %u
->>5 byte x \b, %ux8k PRG
->>6 byte x \b, %ux8k CHR
->>7 byte&0x08 =1 [WRAM]
->>8 byte&0x09 =1 [H-mirror]
->>8 byte&0x09 =2 [V-mirror]
->>8 byte&0x02 =3 [VRAM]
-
-#------------------------------------------------------------------------------
-# gameboy: file(1) magic for the Nintendo (Color) Gameboy raw ROM format
-# Reference: http://gbdev.gg8.se/wiki/articles/The_Cartridge_Header
-#
-0x104 bequad 0xCEED6666CC0D000B Game Boy ROM image
-# TODO: application/x-gameboy-color-rom for GBC.
-!:mime application/x-gameboy-rom
->0x143 byte&0x80 0x80
->>0x134 string >\0 \b: "%.15s"
->0x143 byte&0x80 !0x80
->>0x134 string >\0 \b: "%.16s"
->0x14c byte x (Rev.%02u)
-
-# Machine type. (SGB, CGB, SGB+CGB)
-# Old licensee code 0x33 is required for SGB, but not CGB.
->0x14b byte 0x33
->>0x146 byte 0x03
->>>0x143 byte&0x80 0x80 [SGB+CGB]
->>>0x143 byte&0x80 !0x80 [SGB]
->>0x146 byte !0x03
->>>0x143 byte&0xC0 0x80 [CGB]
->>>0x143 byte&0xC0 0xC0 [CGB ONLY]
->0x14b byte !0x33
->>0x143 byte&0xC0 0x80 [CGB]
->>0x143 byte&0xC0 0xC0 [CGB ONLY]
-
-# Mapper.
->0x147 byte 0x00 [ROM ONLY]
->0x147 byte 0x01 [MBC1]
->0x147 byte 0x02 [MBC1+RAM]
->0x147 byte 0x03 [MBC1+RAM+BATT]
->0x147 byte 0x05 [MBC2]
->0x147 byte 0x06 [MBC2+BATTERY]
->0x147 byte 0x08 [ROM+RAM]
->0x147 byte 0x09 [ROM+RAM+BATTERY]
->0x147 byte 0x0B [MMM01]
->0x147 byte 0x0C [MMM01+SRAM]
->0x147 byte 0x0D [MMM01+SRAM+BATT]
->0x147 byte 0x0F [MBC3+TIMER+BATT]
->0x147 byte 0x10 [MBC3+TIMER+RAM+BATT]
->0x147 byte 0x11 [MBC3]
->0x147 byte 0x12 [MBC3+RAM]
->0x147 byte 0x13 [MBC3+RAM+BATT]
->0x147 byte 0x19 [MBC5]
->0x147 byte 0x1A [MBC5+RAM]
->0x147 byte 0x1B [MBC5+RAM+BATT]
->0x147 byte 0x1C [MBC5+RUMBLE]
->0x147 byte 0x1D [MBC5+RUMBLE+SRAM]
->0x147 byte 0x1E [MBC5+RUMBLE+SRAM+BATT]
->0x147 byte 0xFC [Pocket Camera]
->0x147 byte 0xFD [Bandai TAMA5]
->0x147 byte 0xFE [Hudson HuC-3]
->0x147 byte 0xFF [Hudson HuC-1]
-
-# ROM size.
->0x148 byte 0 \b, ROM: 256Kbit
->0x148 byte 1 \b, ROM: 512Kbit
->0x148 byte 2 \b, ROM: 1Mbit
->0x148 byte 3 \b, ROM: 2Mbit
->0x148 byte 4 \b, ROM: 4Mbit
->0x148 byte 5 \b, ROM: 8Mbit
->0x148 byte 6 \b, ROM: 16Mbit
->0x148 byte 7 \b, ROM: 32Mbit
->0x148 byte 0x52 \b, ROM: 9Mbit
->0x148 byte 0x53 \b, ROM: 10Mbit
->0x148 byte 0x54 \b, ROM: 12Mbit
-
-# RAM size.
->0x149 byte 1 \b, RAM: 16Kbit
->0x149 byte 2 \b, RAM: 64Kbit
->0x149 byte 3 \b, RAM: 256Kbit
->0x149 byte 4 \b, RAM: 1Mbit
->0x149 byte 5 \b, RAM: 512Kbit
-
-#------------------------------------------------------------------------------
-# genesis: file(1) magic for various Sega Mega Drive / Genesis ROM image and disc formats
-# Updated by David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://www.retrodev.com/segacd.html
-# - http://devster.monkeeh.com/sega/32xguide1.txt
-#
-
-# Common Sega Mega Drive header format.
-# FIXME: Name fields are 48 bytes, but have spaces for padding instead of 00s.
-0 name sega-mega-drive-header
-# ROM title. (Use domestic if present; if not, use international.)
->0x120 byte >0x20
->>0x120 string >\0 \b: "%.16s"
->0x120 byte <0x21
->>0x150 string >\0 \b: "%.16s"
-# Other information.
->0x180 string >\0 (%.14s
->>0x110 string >\0 \b, %.16s
->0x180 byte 0
->>0x110 string >\0 (%.16s
->0 byte x \b)
-
-# TODO: Check for 32X CD?
-# Sega Mega CD disc images: 2048-byte sectors.
-0 string SEGADISCSYSTEM\ \ Sega Mega CD disc image
-!:mime application/x-sega-cd-rom
->0 use sega-mega-drive-header
->0 byte x \b, 2048-byte sectors
-0 string SEGABOOTDISC\ \ \ \ Sega Mega CD disc image
-!:mime application/x-sega-cd-rom
->0 use sega-mega-drive-header
->0 byte x \b, 2048-byte sectors
-# Sega Mega CD disc images: 2352-byte sectors.
-0x10 string SEGADISCSYSTEM\ \ Sega Mega CD disc image
-!:mime application/x-sega-cd-rom
->0x10 use sega-mega-drive-header
->0 byte x \b, 2352-byte sectors
-0x10 string SEGABOOTDISC\ \ \ \ Sega Mega CD disc image
-!:mime application/x-sega-cd-rom
->0x10 use sega-mega-drive-header
->0 byte x \b, 2352-byte sectors
-
-# Sega Mega Drive: Identify the system ID.
-0x100 string SEGA
->0x3C0 string MARS\ CHECK\ MODE Sega 32X ROM image
-!:mime application/x-genesis-32x-rom
->>0 use sega-mega-drive-header
->0x104 string \ PICO Sega Pico ROM image
-!:mime application/x-sega-pico-rom
->>0 use sega-mega-drive-header
->0x104 string TOYS\ PICO Sega Pico ROM image
-!:mime application/x-sega-pico-rom
->>0 use sega-mega-drive-header
->0x104 string \ TOYS\ PICO Sega Pico ROM image
-!:mime application/x-sega-pico-rom
->>0 use sega-mega-drive-header
->0x104 string \ IAC Sega Pico ROM image
-!:mime application/x-sega-pico-rom
->>0 use sega-mega-drive-header
->0x104 string \ TERA68K Sega Teradrive (68K) ROM image
-!:mime application/x-sega-teradrive-rom
->>0 use sega-mega-drive-header
->0x104 string \ TERA286 Sega Teradrive (286) ROM image
-!:mime application/x-sega-teradrive-rom
->>0 use sega-mega-drive-header
->0x180 string BR Sega Mega CD Boot ROM image
-!:mime application/x-genesis-rom
->>0 use sega-mega-drive-header
->0x104 default x Sega Mega Drive / Genesis ROM image
-!:mime application/x-genesis-rom
->>0 use sega-mega-drive-header
-
-# Sega Mega Drive: Some ROMs have "SEGA" at 0x101, not 0x100.
-0x100 string \ SEGA Sega Mega Drive / Genesis ROM image
->0 use sega-mega-drive-header
-
-# Sega Pico ROMs that don't start with "SEGA".
-0x100 string SAMSUNG\ PICO Samsung Pico ROM image
-!:mime application/x-sega-pico-rom
->0 use sega-mega-drive-header
-0x100 string IMA\ IKUNOUJYUKU Samsung Pico ROM image
-!:mime application/x-sega-pico-rom
->0 use sega-mega-drive-header
-0x100 string IMA IKUNOJYUKU Samsung Pico ROM image
-!:mime application/x-sega-pico-rom
->0 use sega-mega-drive-header
-
-# Sega Picture Magic (modified 32X)
-0x100 string Picture\ Magic
->0x3C0 string PICTURE MAGIC-01 Sega 32X ROM image
-!:mime application/x-genesis-32x-rom
->>0 use sega-mega-drive-header
-
-#------------------------------------------------------------------------------
-# genesis: file(1) magic for the Super MegaDrive ROM dump format
-#
-
-# NOTE: Due to interleaving, we can't display anything
-# other than the copier header information.
-0 name sega-genesis-smd-header
->0 byte x %dx16k blocks
->2 byte 0 \b, last in series or standalone
->2 byte >0 \b, split ROM
-
-# "Sega Genesis" header.
-0x280 string EAGN
->8 beshort 0xAABB Sega Mega Drive / Genesis ROM image (SMD format):
-!:mime application/x-genesis-rom
->>0 use sega-genesis-smd-header
-
-# "Sega Mega Drive" header.
-0x280 string EAMG
->8 beshort 0xAABB Sega Mega Drive / Genesis ROM image (SMD format):
-!:mime application/x-genesis-rom
->>0 use sega-genesis-smd-header
-
-#------------------------------------------------------------------------------
-# smsgg: file(1) magic for Sega Master System and Game Gear ROM images
-# Detects all Game Gear and export Sega Master System ROM images,
-# and some Japanese Sega Master System ROM images.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://www.smspower.org/Development/ROMHeader
-#
-
-# General SMS header rule.
-# The SMS boot ROM checks the header at three locations.
-0 name sega-master-system-rom-header
-# Machine type.
->0x0F byte&0xF0 0x30 Sega Master System
-!:mime application/x-sms-rom
->0x0F byte&0xF0 0x40 Sega Master System
-!:mime application/x-sms-rom
->0x0F byte&0xF0 0x50 Sega Game Gear
-!:mime application/x-gamegear-rom
->0x0F byte&0xF0 0x60 Sega Game Gear
-!:mime application/x-gamegear-rom
->0x0F byte&0xF0 0x70 Sega Game Gear
-!:mime application/x-gamegear-rom
->0x0F default x Sega Master System / Game Gear
-!:mime application/x-sms-rom
->0 byte x ROM image:
-# Product code.
->0x0E byte&0xF0 0x10 1
->0x0E byte&0xF0 0x20 2
->0x0E byte&0xF0 0x30 3
->0x0E byte&0xF0 0x40 4
->0x0E byte&0xF0 0x50 5
->0x0E byte&0xF0 0x60 6
->0x0E byte&0xF0 0x70 7
->0x0E byte&0xF0 0x80 8
->0x0E byte&0xF0 0x90 9
->0x0E byte&0xF0 0xA0 10
->0x0E byte&0xF0 0xB0 11
->0x0E byte&0xF0 0xC0 12
->0x0E byte&0xF0 0xD0 13
->0x0E byte&0xF0 0xE0 14
->0x0E byte&0xF0 0xF0 15
-# If the product code is 5 digits, we'll need to backspace here.
->0x0E byte&0xF0 !0
->>0x0C leshort x \b%04x
->0x0E byte&0xF0 0
->>0x0C leshort x %04x
-# Revision.
->0x0E byte&0x0F x (Rev.%02d)
-# ROM size. (Used for the boot ROM checksum routine.)
->0x0F byte&0x0F 0x0A (8 KB)
->0x0F byte&0x0F 0x0B (16 KB)
->0x0F byte&0x0F 0x0C (32 KB)
->0x0F byte&0x0F 0x0D (48 KB)
->0x0F byte&0x0F 0x0E (64 KB)
->0x0F byte&0x0F 0x0F (128 KB)
->0x0F byte&0x0F 0x00 (256 KB)
->0x0F byte&0x0F 0x01 (512 KB)
->0x0F byte&0x0F 0x02 (1 MB)
-
-# SMS/GG header locations.
-0x7FF0 string TMR\ SEGA
->0x7FF0 use sega-master-system-rom-header
-0x3FF0 string TMR\ SEGA
->0x3FF0 use sega-master-system-rom-header
-0x1FF0 string TMR\ SEGA
->0x1FF0 use sega-master-system-rom-header
-
-#------------------------------------------------------------------------------
-# saturn: file(1) magic for the Sega Saturn disc image format.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-#
-
-# Common Sega Saturn disc header format.
-# NOTE: Title is 112 bytes, but we're only showing 32 due to space padding.
-# TODO: Release date, device information, region code, others?
-0 name sega-saturn-disc-header
->0x60 string >\0 \b: "%.32s"
->0x20 string >\0 (%.10s
->>0x2A string >\0 \b, %.6s)
->>0x2A byte 0 \b)
-
-# 2048-byte sector version.
-0 string SEGA\ SEGASATURN\ Sega Saturn disc image
-!:mime application/x-saturn-rom
->0 use sega-saturn-disc-header
->0 byte x (2048-byte sectors)
-# 2352-byte sector version.
-0x10 string SEGA\ SEGASATURN\ Sega Saturn disc image
-!:mime application/x-saturn-rom
->0x10 use sega-saturn-disc-header
->0 byte x (2352-byte sectors)
-
-#------------------------------------------------------------------------------
-# dreamcast: file(1) magic for the Sega Dreamcast disc image format.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://mc.pp.se/dc/ip0000.bin.html
-#
-
-# Common Sega Dreamcast disc header format.
-# NOTE: Title is 128 bytes, but we're only showing 32 due to space padding.
-# TODO: Release date, device information, region code, others?
-0 name sega-dreamcast-disc-header
->0x80 string >\0 \b: "%.32s"
->0x40 string >\0 (%.10s
->>0x4A string >\0 \b, %.6s)
->>0x4A byte 0 \b)
-
-# 2048-byte sector version.
-0 string SEGA\ SEGAKATANA\ Sega Dreamcast disc image
-!:mime application/x-dc-rom
->0 use sega-dreamcast-disc-header
->0 byte x (2048-byte sectors)
-# 2352-byte sector version.
-0x10 string SEGA\ SEGAKATANA\ Sega Dreamcast disc image
-!:mime application/x-dc-rom
->0x10 use sega-dreamcast-disc-header
->0 byte x (2352-byte sectors)
-
-#------------------------------------------------------------------------------
-# dreamcast: file(1) uncertain magic for the Sega Dreamcast VMU image format
-#
-0 belong 0x21068028 Sega Dreamcast VMU game image
-0 string LCDi Dream Animator file
-
-#------------------------------------------------------------------------------
-# z64: file(1) magic for the Z64 format N64 ROM dumps
-# Reference: http://forum.pj64-emu.com/showthread.php?t=2239
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-#
-0 bequad 0x803712400000000F Nintendo 64 ROM image
-!:mime application/x-n64-rom
->0x20 string >\0 \b: "%.20s"
->0x3B string x (%.4s
->0x3F byte x \b, Rev.%02u)
-
-#------------------------------------------------------------------------------
-# v64: file(1) magic for the V64 format N64 ROM dumps
-# Same as z64 format, but with 16-bit byteswapping.
-#
-0 bequad 0x3780401200000F00 Nintendo 64 ROM image (V64)
-!:mime application/x-n64-rom
-
-#------------------------------------------------------------------------------
-# n64-swap2: file(1) magic for the swap2 format N64 ROM dumps
-# Same as z64 format, but with swapped 16-bit words.
-#
-0 bequad 0x12408037000F0000 Nintendo 64 ROM image (wordswapped)
-!:mime application/x-n64-rom
-
-#------------------------------------------------------------------------------
-# n64-le32: file(1) magic for the 32-bit byteswapped format N64 ROM dumps
-# Same as z64 format, but with 32-bit byteswapping.
-#
-0 bequad 0x401237800F000000 Nintendo 64 ROM image (32-bit byteswapped)
-!:mime application/x-n64-rom
-
-#------------------------------------------------------------------------------
-# gba: file(1) magic for the Nintendo Game Boy Advance raw ROM format
-# Reference: https://problemkaputt.de/gbatek.htm#gbacartridgeheader
-#
-# Original version from: "Nelson A. de Oliveira" <naoliv@gmail.com>
-# Updated version from: David Korth <gerbilsoft@gerbilsoft.com>
-#
-4 bequad 0x24FFAE51699AA221 Game Boy Advance ROM image
-!:mime application/x-gba-rom
->0xA0 string >\0 \b: "%.12s"
->0xAC string x (%.6s
->0xBC byte x \b, Rev.%02u)
-
-#------------------------------------------------------------------------------
-# nds: file(1) magic for the Nintendo DS(i) raw ROM format
-# Reference: https://problemkaputt.de/gbatek.htm#dscartridgeheader
-#
-# Original version from: "Nelson A. de Oliveira" <naoliv@gmail.com>
-# Updated version from: David Korth <gerbilsoft@gerbilsoft.com>
-#
-0xC0 bequad 0x24FFAE51699AA221 Nintendo DS ROM image
-!:mime application/x-nintendo-ds-rom
->0x00 string >\0 \b: "%.12s"
->0x0C string x (%.6s
->0x1E byte x \b, Rev.%02u)
->0x12 byte 2 (DSi enhanced)
->0x12 byte 3 (DSi only)
-# Secure Area check.
->0x20 lelong <0x4000 (homebrew)
->0x20 lelong >0x3FFF
->>0x4000 lequad 0x0000000000000000 (multiboot)
->>0x4000 lequad !0x0000000000000000
->>>0x4000 lequad 0xE7FFDEFFE7FFDEFF (decrypted)
->>>0x4000 lequad !0xE7FFDEFFE7FFDEFF
->>>>0x1000 lequad 0x0000000000000000 (encrypted)
->>>>0x1000 lequad !0x0000000000000000 (mask ROM)
-
-#------------------------------------------------------------------------------
-# nds_passme: file(1) magic for Nintendo DS ROM images for GBA cartridge boot.
-# This is also used for loading .nds files using the MSET exploit on 3DS.
-# Reference: https://github.com/devkitPro/ndstool/blob/master/source/ndscreate.cpp
-0xC0 bequad 0xC8604FE201708FE2 Nintendo DS Slot-2 ROM image (PassMe)
-!:mime application/x-nintendo-ds-rom
-
-#------------------------------------------------------------------------------
-# ngp: file(1) magic for the Neo Geo Pocket (Color) raw ROM format.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://neogpc.googlecode.com/svn-history/r10/trunk/src/core/neogpc.cpp
-# - https://www.devrs.com/ngp/files/ngpctech.txt
-#
-0x0A string BY\ SNK\ CORPORATION Neo Geo Pocket
-!:mime application/x-neo-geo-pocket-rom
->0x23 byte 0x10 Color
->0 byte x ROM image
->0x24 string >\0 \b: "%.12s"
->0x21 uleshort x \b, NEOP%04X
->0x1F ubyte 0xFF (debug mode enabled)
-
-#------------------------------------------------------------------------------
-# msx: file(1) magic for MSX game cartridge dumps
-# Too simple - MPi
-#0 beshort 0x4142 MSX game cartridge dump
-
-#------------------------------------------------------------------------------
-# Sony Playstation executables (Adam Sjoegren <asjo@diku.dk>) :
-0 string PS-X\ EXE Sony Playstation executable
->16 lelong x PC=%#08x,
->20 lelong !0 GP=%#08x,
->24 lelong !0 .text=[%#08x,
->>28 lelong x \b%#x],
->32 lelong !0 .data=[%#08x,
->>36 lelong x \b%#x],
->40 lelong !0 .bss=[%#08x,
->>44 lelong x \b%#x],
->48 lelong !0 Stack=%#08x,
->48 lelong =0 No Stack!,
->52 lelong !0 StackSize=%#x,
-#>76 string >\0 (%s)
-# Area:
->113 string x (%s)
-
-# CPE executables
-0 string CPE CPE executable
->3 byte x (version %d)
-
-# Sony PlayStation archive (PSARC)
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://www.psdevwiki.com/ps3/PlayStation_archive_(PSARC)
-0 string PSAR Sony PlayStation Archive
-!:ext psarc
->4 ubeshort x \b, version %d.
->6 ubeshort x \b%d
->8 string zlib \b, zlib compression
->8 string lzma \b, LZMA compression
->28 ubeshort&2 0 \b, relative paths
->28 ubeshort&2 2 \b, absolute paths
->28 ubeshort&1 1 \b, ignore case
-
-#------------------------------------------------------------------------------
-# Microsoft Xbox executables .xbe (Esa Hyytia <ehyytia@cc.hut.fi>)
-0 string XBEH Microsoft Xbox executable
-!:mime audio/x-xbox-executable
-!:ext xbe
-# expect base address of 0x10000
->0x0104 ulelong =0x10000
->>(0x0118.l-0x0FFF4) lestring16 x \b: "%.40s"
->>(0x0118.l-0x0FFF5) byte x (%c
->>(0x0118.l-0x0FFF6) byte x \b%c-
->>(0x0118.l-0x0FFF8) uleshort x \b%03u)
->>(0x0118.l-0x0FF60) ulelong&0x80000007 0x80000007 \b, all regions
->>(0x0118.l-0x0FF60) ulelong&0x80000007 !0x80000007
->>>(0x0118.l-0x0FF60) ulelong >0 (regions:
->>>>(0x0118.l-0x0FF60) ulelong &0x00000001 NA
->>>>(0x0118.l-0x0FF60) ulelong &0x00000002 Japan
->>>>(0x0118.l-0x0FF60) ulelong &0x00000004 Rest_of_World
->>>>(0x0118.l-0x0FF60) ulelong &0x80000000 Manufacturer
->>>(0x0118.l-0x0FF60) ulelong >0 \b)
-# probabilistic checks whether signed or not
->0x0004 ulelong =0x0
->>&2 ulelong =0x0
->>>&2 ulelong =0x0 \b, not signed
->0x0004 ulelong >0
->>&2 ulelong >0
->>>&2 ulelong >0 \b, signed
-
-# --------------------------------
-# Microsoft Xbox data file formats
-0 string XIP0 XIP, Microsoft Xbox data
-0 string XTF0 XTF, Microsoft Xbox data
-
-#------------------------------------------------------------------------------
-# Microsoft Xbox 360 executables (.xex)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://free60project.github.io/wiki/XEX.html
-# - https://github.com/xenia-project/xenia/blob/HEAD/src/xenia/kernel/util/xex2_info.h
-
-# Title ID (part of Execution ID section)
-0 name xbox-360-xex-execution-id
->(0.L+0xC) byte x (%c
->(0.L+0xD) byte x \b%c
->(0.L+0xE) beshort x \b-%04u, media ID:
->(0.L) belong x %08X)
-
-# Region code (part of Security Info)
-0 name xbox-360-xex-region-code
->0 ubelong 0xFFFFFFFF \b, all regions
->0 ubelong !0xFFFFFFFF
->>0 ubelong >0 (regions:
->>0 ubelong&0x000000FF 0x000000FF USA
->>0 ubelong&0x00000100 0x00000100 Japan
->>0 ubelong&0x00000200 0x00000200 China
->>0 ubelong&0x0000FC00 0x0000FC00 Asia
->>0 ubelong&0x00FF0000 0x00FF0000 PAL
->>0 ubelong&0x00FF0000 0x00FE0000 PAL [except AU/NZ]
->>0 ubelong&0x00FF0000 0x00010000 AU/NZ
->>0 ubelong&0xFF000000 0xFF000000 Other
->>0 ubelong >0 \b)
-
-0 string XEX2 Microsoft Xbox 360 executable
-!:mime audio/x-xbox360-executable
-!:ext xex
->0x18 search/0x100 \x00\x04\x00\x06
->>&0 use xbox-360-xex-execution-id
->(0x010.L+0x178) use xbox-360-xex-region-code
-
-0 string XEX1 Microsoft Xbox 360 executable (XEX1)
-!:mime audio/x-xbox360-executable
-!:ext xex
->0x18 search/0x100 \x00\x04\x00\x06
->>&0 use xbox-360-xex-execution-id
->(0x010.L+0x154) use xbox-360-xex-region-code
-
-#------------------------------------------------------------------------------
-# Microsoft Xbox 360 packages
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://free60project.github.io/wiki/STFS.html
-# - https://github.com/xenia-project/xenia/blob/HEAD/src/xenia/kernel/util/xex2_info.h
-
-# TODO: More information for console-signed packages.
-
-0 name xbox-360-package
->0x360 byte x (%c
->0x361 byte x \b%c
->0x362 beshort x \b-%04u, media ID:
->0x354 belong x %08X)
->0x344 belong x \b, content type:
->>0x344 belong 0x1 Saved Game
->>0x344 belong 0x2 Marketplace Content
->>0x344 belong 0x3 Publisher
->>0x344 belong 0x1000 Xbox 360 Title
->>0x344 belong 0x2000 IPTV Pause Buffer
->>0x344 belong 0x4000 Installed Game
->>0x344 belong 0x5000 Original Xbox Game
->>0x344 belong 0x9000 Avatar Item
->>0x344 belong 0x10000 Profile
->>0x344 belong 0x20000 Gamer Picture
->>0x344 belong 0x30000 Theme
->>0x344 belong 0x40000 Cache File
->>0x344 belong 0x50000 Storage Download
->>0x344 belong 0x60000 Xbox Saved Game
->>0x344 belong 0x70000 Xbox Download
->>0x344 belong 0x80000 Game Demo
->>0x344 belong 0x90000 Video
->>0x344 belong 0xA0000 Game
->>0x344 belong 0xB0000 Installer
->>0x344 belong 0xC0000 Game Trailer
->>0x344 belong 0xD0000 Arcade Title
->>0x344 belong 0xE0000 XNA
->>0x344 belong 0xF0000 License Store
->>0x344 belong 0x100000 Movie
->>0x344 belong 0x200000 TV
->>0x344 belong 0x300000 Music Video
->>0x344 belong 0x400000 Game Video
->>0x344 belong 0x500000 Podcast Video
->>0x344 belong 0x600000 Viral Video
->>0x344 belong 0x2000000 Community Game
-
-0 string CON\x20 Microsoft Xbox 360 package (console-signed)
->0 use xbox-360-package
-0 string PIRS
->0 belong 0 Microsoft Xbox 360 package (non-Xbox Live)
->>0 use xbox-360-package
-0 string LIVE
->0x104 belong 0 Microsoft Xbox 360 package (Xbox Live)
->>0 use xbox-360-package
-
-# Atari Lynx cartridge dump (EXE/BLL header)
-# From: "Stefan A. Haubenthal" <polluks@sdf.lonestar.org>
-# Reference:
-# https://raw.githubusercontent.com/cc65/cc65/master/libsrc/lynx/exehdr.s
-# Double-check that the image type matches too, 0x8008 conflicts with
-# 8 character OMF-86 object file headers.
-0 beshort 0x8008
->6 string BS93 Lynx homebrew cartridge
-!:mime application/x-atari-lynx-rom
->>2 beshort x \b, RAM start $%04x
-# Update: Joerg Jenderek
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/l/lnx.trid.xml
-# Note: called "Atari Lynx ROM" by TrID
-0 string LYNX Lynx cartridge
-!:mime application/x-atari-lynx-rom
-!:ext lnx
-# bank 0 page size like: 128 256 512
->4 leshort/4 >0 \b, bank 0 %dk
->6 leshort/4 >0 \b, bank 1 %dk
-# 32 bytes cart name like: "jconnort.lyx" "viking~1.lyx" "Eye of the Beholder" "C:\EMU\LYNX\ROMS\ULTCHESS.LYX"
->10 string >\0 \b, "%.32s"
-# 16 bytes manufacturer like: "Atari" "NuFX Inc." "Matthias Domin"
->42 string >\0 \b, "%.16s"
-# version number
-#>8 leshort !1 \b, version number %u
-# rotation: 1~left Lexis (NA).lnx 2~right Centipede (Prototype).lnx
->58 ubyte >0 \b, rotation %u
-# spare
-#>59 lelong !0 \b, spare %#x
-
-# Opera file system that is used on the 3DO console
-# From: Serge van den Boom <svdb@stack.nl>
-0 string \x01ZZZZZ\x01 3DO "Opera" file system
-
-# From: Alex Myczko <alex@aiei.ch>
-# From: David Pflug <david@pflug.email>
-# is the offset 12 or the offset 16 correct?
-# GBS (Game Boy Sound) magic
-# ftp://ftp.modland.com/pub/documents/format_documentation/\
-# Gameboy%20Sound%20System%20(.gbs).txt
-0 string GBS Nintendo Gameboy Music/Audio Data
-#12 string GameBoy\ Music\ Module Nintendo Gameboy Music Module
->16 string >\0 ("%.32s" by
->48 string >\0 %.32s, copyright
->80 string >\0 %.32s),
->3 byte x version %u,
->4 byte x %u tracks
-
-# IPS Patch Files from: From: Thomas Klausner <tk@giga.or.at>
-# see https://zerosoft.zophar.net/ips.php
-0 string PATCH IPS patch file
-!:ext ips
-
-# BPS Patch Files - from: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://www.romhacking.net/documents/746/
-0 string BPS1 BPS patch file
-!:ext bps
-
-# APS Patch Files - from: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://github.com/btimofeev/UniPatcher/wiki/APS-(N64)
-0 string APS10 APS patch file
-!:ext aps
->5 byte 0 \b, simple patch
->5 byte 1 \b, N64-specific patch for
->>58 byte x N%c
->>59 byte x \b%c
->>60 byte x \b%c
->7 byte !0x20
-# FIXME: /T specifier isn't working with a fixed-length string.
->>7 string x \b: "%.50s"
-
-# UPS Patch Files - from: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: http://fileformats.archiveteam.org/wiki/UPS_(binary_patch_format)
-0 string UPS1 UPS patch file
-!:ext ups
-
-# Playstations Patch Files from: From: Thomas Klausner <tk@giga.or.at>
-0 string PPF30 Playstation Patch File version 3.0
->5 byte 0 \b, PPF 1.0 patch
->5 byte 1 \b, PPF 2.0 patch
->5 byte 2 \b, PPF 3.0 patch
->>56 byte 0 \b, Imagetype BIN (any)
->>56 byte 1 \b, Imagetype GI (PrimoDVD)
->>57 byte 0 \b, Blockcheck disabled
->>57 byte 1 \b, Blockcheck enabled
->>58 byte 0 \b, Undo data not available
->>58 byte 1 \b, Undo data available
->6 string x \b, description: %s
-
-0 string PPF20 Playstation Patch File version 2.0
->5 byte 0 \b, PPF 1.0 patch
->5 byte 1 \b, PPF 2.0 patch
->>56 lelong >0 \b, size of file to patch %d
->6 string x \b, description: %s
-
-0 string PPF10 Playstation Patch File version 1.0
->5 byte 0 \b, Simple Encoding
->6 string x \b, description: %s
-
-# Compressed ISO disc image (used mostly by PSP, PS2 and MegaDrive)
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://en.wikipedia.org/wiki/.CSO
-# NOTE: This is NOT the same as Compact ISO or GameCube/Wii disc image,
-# though it has the same magic number.
-0 string CISO
-# Match CISO version 1 with ISO-9660 sector size
->20 ubyte <2
->>16 ulelong =2048 CSO v1 disk image
-!:mime application/x-compressed-iso
-!:ext ciso/cso
->>>8 ulequad x \b, original size %llu bytes
->>>16 ulelong x \b, datablock size %u bytes
-# Match CISO version 2
->20 ubyte =2
->>22 uleshort =0
->>>4 ulelong =24 CSO v2 disk image
-!:mime application/x-compressed-iso
-!:ext ciso/cso
->>>>8 ulequad x \b, original size %llu bytes
->>>>16 ulelong x \b, datablock size %u bytes
-
-# From: Daniel Dawson <ddawson@icehouse.net>
-# SNES9x .smv "movie" file format.
-0 string SMV\x1A SNES9x input recording
->0x4 lelong x \b, version %d
-# version 4 is latest so far
->0x4 lelong <5
->>0x8 ledate x \b, recorded at %s
->>0xc lelong >0 \b, rerecorded %d times
->>0x10 lelong x \b, %d frames long
->>0x14 byte >0 \b, data for controller(s):
->>>0x14 byte &0x1 #1
->>>0x14 byte &0x2 #2
->>>0x14 byte &0x4 #3
->>>0x14 byte &0x8 #4
->>>0x14 byte &0x10 #5
->>0x15 byte ^0x1 \b, begins from snapshot
->>0x15 byte &0x1 \b, begins from reset
->>0x15 byte ^0x2 \b, NTSC standard
->>0x15 byte &0x2 \b, PAL standard
->>0x17 byte &0x1 \b, settings:
-# WIP1Timing not used as of version 4
->>>0x4 lelong <4
->>>>0x17 byte &0x2 WIP1Timing
->>>0x17 byte &0x4 Left+Right
->>>0x17 byte &0x8 VolumeEnvX
->>>0x17 byte &0x10 FakeMute
->>>0x17 byte &0x20 SyncSound
-# New flag as of version 4
->>>0x4 lelong >3
->>>>0x17 byte &0x80 NoCPUShutdown
->>0x4 lelong <4
->>>0x18 lelong >0x23
->>>>0x20 leshort !0
->>>>>0x20 lestring16 x \b, metadata: "%s"
->>0x4 lelong >3
->>>0x24 byte >0 \b, port 1:
->>>>0x24 byte 1 joypad
->>>>0x24 byte 2 mouse
->>>>0x24 byte 3 SuperScope
->>>>0x24 byte 4 Justifier
->>>>0x24 byte 5 multitap
->>>0x24 byte >0 \b, port 2:
->>>>0x25 byte 1 joypad
->>>>0x25 byte 2 mouse
->>>>0x25 byte 3 SuperScope
->>>>0x25 byte 4 Justifier
->>>>0x25 byte 5 multitap
->>>0x18 lelong >0x43
->>>>0x40 leshort !0
->>>>>0x40 lestring16 x \b, metadata: "%s"
->>0x17 byte &0x40 \b, ROM:
->>>(0x18.l-26) lelong x CRC32 %#08x
->>>(0x18.l-23) string x "%s"
-
-# Type: scummVM savegame files
-# From: Sven Hartge <debian@ds9.argh.org>
-0 string SCVM ScummVM savegame
->12 string >\0 "%s"
-
-#------------------------------------------------------------------------------
-# Nintendo GameCube / Wii file formats.
-#
-
-# Type: Nintendo GameCube/Wii common disc header data.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://wiibrew.org/wiki/Wii_Disc
-0 name nintendo-gcn-disc-common
->0x20 string x "%.64s"
->0x00 string x (%.6s
->0x06 byte >0
->>0x06 byte 1 \b, Disc 2
->>0x06 byte 2 \b, Disc 3
->>0x06 byte 3 \b, Disc 4
->0x07 byte x \b, Rev.%02u)
->0x18 belong 0x5D1C9EA3
->>0x60 beshort 0x0101 \b (Unencrypted)
->0x200 string NKIT \b (NKit compressed)
-
-
-# Type: Nintendo GameCube disc image
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://wiibrew.org/wiki/Wii_Disc
-0x1C belong 0xC2339F3D Nintendo GameCube disc image:
-!:mime application/x-gamecube-rom
->0 use nintendo-gcn-disc-common
-
-# Type: Nintendo GameCube embedded disc image
-# Commonly found on demo discs.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: http://hitmen.c02.at/files/yagcd/yagcd/index.html#idx14.8
-0 belong 0xAE0F38A2
->0x0C belong 0x00100000
->>(8.L+0x1C) belong 0xC2339F3D Nintendo GameCube embedded disc image:
-!:mime application/x-gamecube-rom
->>>(8.L) use nintendo-gcn-disc-common
-
-# Type: Nintendo Wii disc image
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://wiibrew.org/wiki/Wii_Disc
-0x18 belong 0x5D1C9EA3 Nintendo Wii disc image:
->0 use nintendo-gcn-disc-common
-
-# Type: Nintendo Wii disc image (WBFS format)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://wiibrew.org/wiki/Wii_Disc
-0 string WBFS
->0x218 belong 0x5D1C9EA3 Nintendo Wii disc image (WBFS format):
-!:mime application/x-wii-rom
->>0x200 use nintendo-gcn-disc-common
-
-# Type: Nintendo GameCube/Wii disc image (CISO format)
-# NOTE: This is NOT the same as Compact ISO or PSP CISO,
-# though it has the same magic number.
-0 string CISO
-# Other fields are used to determine what type of CISO this is:
-# - 0x04 == 0x00200000: GameCube/Wii CISO (block_size)
-# - 0x10 == 0x00000800: PSP CISO (ISO-9660 sector size)
-# - None of the above: Compact ISO.
->4 lelong 0x200000
->>8 byte 1
->>>0x801C belong 0xC2339F3D Nintendo GameCube disc image (CISO format):
-!:mime application/x-wii-rom
->>>>0x8000 use nintendo-gcn-disc-common
->>>0x8018 belong 0x5D1C9EA3 Nintendo Wii disc image (CISO format):
-!:mime application/x-wii-rom
->>>>0x8000 use nintendo-gcn-disc-common
-
-# Type: Nintendo GameCube/Wii disc image (GCZ format)
-# Due to zlib compression, we can't get the actual disc information.
-0 lelong 0xB10BC001
->4 lelong 0 Nintendo GameCube disc image (GCZ format)
-!:mime application/x-gamecube-rom
->4 lelong 1 Nintendo Wii disc image (GCZ format)
-!:mime application/x-wii-rom
->4 default x Nintendo GameCube/Wii disc image (GCZ format)
-
-# Type: Nintendo GameCube/Wii disc image (WDF format)
-0 string WII\001DISC
->8 belong 1
-# WDFv1
->>0x54 belong 0xC2339F3D Nintendo GameCube disc image (WDFv1 format):
-!:mime application/x-gamecube-rom
->>>0x38 use nintendo-gcn-disc-common
->>0x58 belong 0x5D1C9EA3 Nintendo Wii disc image (WDFv1 format):
-!:mime application/x-wii-rom
->>>0x38 use nintendo-gcn-disc-common
->8 belong 2
-# WDFv2
->>(12.L+0x1C) belong 0xC2339F3D Nintendo GameCube disc image (WDFv2 format):
-!:mime application/x-gamecube-rom
->>>(12.L) use nintendo-gcn-disc-common
->>(12.L+0x18) belong 0x5D1C9EA3 Nintendo Wii disc image (WDFv2 format):
-!:mime application/x-wii-rom
->>>(12.L) use nintendo-gcn-disc-common
-
-# Type: Nintendo GameCube/Wii disc image (WIA format)
-0 string WIA\001 Nintendo
->0x48 belong 1 GameCube
-!:mime application/x-gamecube-rom
->0x48 belong 2 Wii
-!:mime application/x-wii-rom
->0x48 default x GameCube/Wii
->0x48 belong x disc image (WIA format):
->>0x58 use nintendo-gcn-disc-common
-
-# Type: Nintendo GameCube/Wii disc image (with SDK header)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://wiibrew.org/wiki/Wii_Disc
-0 belong 0xFFFF0000
->0x18 belong 0x00000000
->>0x1C belong 0x00000000
->>>0x8018 belong 0x5D1C9EA3 Nintendo Wii SDK disc image:
-!:mime application/x-wii-rom
->>>>0x8000 use nintendo-gcn-disc-common
->>>0x801C belong 0xC2339F3D Nintendo GameCube SDK disc image:
-!:mime application/x-gamecube-rom
->>>>0x8000 use nintendo-gcn-disc-common
-
-# Type: Nintendo GameCube/Wii disc image (RVZ format)
-0 string RVZ\001 Nintendo
->0x48 belong 1 GameCube
-!:mime application/x-gamecube-rom
->0x48 belong 2 Wii
-!:mime application/x-wii-rom
->0x48 default x GameCube/Wii
->0x48 belong x disc image (RVZ format):
->>0x58 use nintendo-gcn-disc-common
-
-#------------------------------------------------------------------------------
-# Nintendo 3DS file formats.
-#
-
-# Type: Nintendo 3DS "NCSD" image. (game cards and eMMC)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://www.3dbrew.org/wiki/NCSD
-0x100 string NCSD
->0x118 lequad 0 Nintendo 3DS Game Card image
-# NCCH header for partition 0. (game data)
->>0x1150 string >\0 \b: "%.16s"
->>0x312 byte x (Rev.%02u)
->>0x118C byte 2 (New3DS only)
->>0x18D byte 0 (inner device)
->>0x18D byte 1 (Card1)
->>0x18D byte 2 (Card2)
->>0x18D byte 3 (extended device)
->0x118 bequad 0x0102020202000000 Nintendo 3DS eMMC dump (Old3DS)
->0x118 bequad 0x0102020203000000 Nintendo 3DS eMMC dump (New3DS)
-
-# Nintendo 3DS version code.
-# Reference: https://www.3dbrew.org/wiki/Titles
-# Format: leshort containing three fields:
-# - 6-bit: Major
-# - 6-bit: Minor
-# - 4-bit: Revision
-# NOTE: Only supporting major/minor versions from 0-15 right now.
-# NOTE: Should be prefixed with "v".
-0 name nintendo-3ds-version-code
-# Raw version.
->0 leshort x \b%u,
-# Major version.
->0 leshort&0xFC00 0x0000 0
->0 leshort&0xFC00 0x0400 1
->0 leshort&0xFC00 0x0800 2
->0 leshort&0xFC00 0x0C00 3
->0 leshort&0xFC00 0x1000 4
->0 leshort&0xFC00 0x1400 5
->0 leshort&0xFC00 0x1800 6
->0 leshort&0xFC00 0x1C00 7
->0 leshort&0xFC00 0x2000 8
->0 leshort&0xFC00 0x2400 9
->0 leshort&0xFC00 0x2800 10
->0 leshort&0xFC00 0x2C00 11
->0 leshort&0xFC00 0x3000 12
->0 leshort&0xFC00 0x3400 13
->0 leshort&0xFC00 0x3800 14
->0 leshort&0xFC00 0x3C00 15
-# Minor version.
->0 leshort&0x03F0 0x0000 \b.0
->0 leshort&0x03F0 0x0010 \b.1
->0 leshort&0x03F0 0x0020 \b.2
->0 leshort&0x03F0 0x0030 \b.3
->0 leshort&0x03F0 0x0040 \b.4
->0 leshort&0x03F0 0x0050 \b.5
->0 leshort&0x03F0 0x0060 \b.6
->0 leshort&0x03F0 0x0070 \b.7
->0 leshort&0x03F0 0x0080 \b.8
->0 leshort&0x03F0 0x0090 \b.9
->0 leshort&0x03F0 0x00A0 \b.10
->0 leshort&0x03F0 0x00B0 \b.11
->0 leshort&0x03F0 0x00C0 \b.12
->0 leshort&0x03F0 0x00D0 \b.13
->0 leshort&0x03F0 0x00E0 \b.14
->0 leshort&0x03F0 0x00F0 \b.15
-# Revision.
->0 leshort&0x000F x \b.%u
-
-# Type: Nintendo 3DS "NCCH" container.
-# https://www.3dbrew.org/wiki/NCCH
-0x100 string NCCH Nintendo 3DS
->0x18D byte&2 0 File Archive (CFA)
->0x18D byte&2 2 Executable Image (CXI)
->0x150 string >\0 \b: "%.16s"
->0x18D byte 0x05
->>0x10E leshort x (Old3DS System Update v
->>0x10E use nintendo-3ds-version-code
->>0x10E leshort x \b)
->0x18D byte 0x15
->>0x10E leshort x (New3DS System Update v
->>0x10E use nintendo-3ds-version-code
->>0x10E leshort x \b)
->0x18D byte !0x05
->>0x18D byte !0x15
->>>0x112 byte x (v
->>>0x112 use nintendo-3ds-version-code
->>>0x112 byte x \b)
->0x18C byte 2 (New3DS only)
-
-# Type: Nintendo 3DS "SMDH" file. (application description)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://3dbrew.org/wiki/SMDH
-0 string SMDH Nintendo 3DS SMDH file
->0x208 leshort !0
->>0x208 lestring16 x \b: "%.128s"
->>0x388 leshort !0
->>>0x388 lestring16 x by %.128s
->0x208 leshort 0
->>0x008 leshort !0
->>>0x008 lestring16 x \b: "%.128s"
->>>0x188 leshort !0
->>>>0x188 lestring16 x by %.128s
-
-# Type: Nintendo 3DS Homebrew Application.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://3dbrew.org/wiki/3DSX_Format
-0 string 3DSX Nintendo 3DS Homebrew Application (3DSX)
-
-# Type: Nintendo 3DS Banner Model Data.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://3dbrew.org/wiki/CBMD
-0 string CBMD\0\0\0\0 Nintendo 3DS Banner Model Data
-
-#------------------------------------------------------------------------------
-# a7800: file(1) magic for the Atari 7800 raw ROM format.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://sites.google.com/site/atari7800wiki/a78-header
-
-0 byte >0
->0 byte <3
->>1 string ATARI7800 Atari 7800 ROM image
-!:mime application/x-atari-7800-rom
->>>0x11 string >\0 \b: "%.32s"
-# Display type.
->>>0x39 byte 0 (NTSC)
->>>0x39 byte 1 (PAL)
->>>0x36 byte&1 1 (POKEY)
-
-#------------------------------------------------------------------------------
-# vectrex: file(1) magic for the GCE Vectrex raw ROM format.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: http://www.playvectrex.com/designit/chrissalo/hello1.htm
-#
-# NOTE: Title is terminated with 0x80, not 0.
-# The header is terminated with a 0, so that will
-# terminate the title as well.
-#
-0 string g\ GCE Vectrex ROM image
->0x11 string >\0 \b: "%.16s"
-
-#------------------------------------------------------------------------------
-# amiibo: file(1) magic for Nintendo amiibo NFC dumps.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://www.3dbrew.org/wiki/Amiibo
-0x00 byte 0x04
->0x0A beshort 0x0FE0
->>0x0C belong 0xF110FFEE
->>>0x208 beshort 0x0100
->>>>0x020A byte 0x0F
->>>>>0x020C bequad 0x000000045F000000
->>>>>>0x5B byte 0x02
->>>>>>>0x54 belong x Nintendo amiibo NFC dump - amiibo ID: %08X-
->>>>>>>0x58 belong x \b%08X
-
-#------------------------------------------------------------------------------
-# Type: Nintendo Switch XCI (Game Cartridge Image)
-# From: Benjamin Lowry <ben@ben.gmbh>
-# Reference: https://switchbrew.org/wiki/Gamecard_Format
-0x100 string HEAD
->0x10D byte 0xFA Nintendo Switch cartridge image (XCI), 1GB
->0x10D byte 0xF8 Nintendo Switch cartridge image (XCI), 2GB
->0x10D byte 0xF0 Nintendo Switch cartridge image (XCI), 4GB
->0x10D byte 0xE0 Nintendo Switch cartridge image (XCI), 8GB
->0x10D byte 0xE1 Nintendo Switch cartridge image (XCI), 16GB
->0x10D byte 0xE2 Nintendo Switch cartridge image (XCI), 32GB
-
-#------------------------------------------------------------------------------
-# Type: Nintendo Switch Executable
-# From: Benjamin Lowry <ben@ben.gmbh>
-# Reference: https://switchbrew.org/wiki/NSO
-0x00 string NSO0 Nintendo Switch executable (NSO)
-
-#------------------------------------------------------------------------------
-# Type: Nintendo Switch PFS0
-# From: Benjamin Lowry <ben@ben.gmbh>
-# Reference: https://switchbrew.org/wiki/NCA_Format#PFS0
-0x00 string PFS0 Nintendo Switch partition filesystem (PFS0)
->0x04 ulelong x \b, %d files
-
-#------------------------------------------------------------------------------
-# amiibo: file(1) magic for Nintendo Badge Arcade files.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://github.com/GerbilSoft/rom-properties/issues/92
-# - https://github.com/CaitSith2/BadgeArcadeTool
-# - https://github.com/TheMachinumps/Advanced-badge-editor
-
-# PRBS: Individual badge and/or mega badge.
-0 string PRBS
->0x44 byte >0x20 Nintendo Badge Arcade
->>0xB8 ulelong <2
->>>0xBC ulelong <2 badge:
->>>0xBC ulelong >1 Mega Badge
->>>>0xB8 ulelong x (%ux
->>>>0xBC ulelong x \b%u):
->>0xB8 ulelong >1 Mega Badge
->>>0xB8 ulelong x (%ux
->>>0xBC ulelong x \b%u):
->0x44 string x "%s"
->0x3C ulelong x \b, badge ID: %u
->0x74 byte >0x20
->>0x74 string x \b, set: "%s"
->0xA8 ulelong !0xFFFFFFFF
->>0xA8 ulelong x \b, launch title ID: %08X
->>0xA4 ulelong x \b-%08X
-
-# CABS: Badge set.
-0 string CABS
->0x2C byte >0x20 Nintendo Badge Arcade badge set:
->>0x2C string x "%.48s"
->>0x24 ulelong x \b, set ID: %u
-
-#------------------------------------------------------------------------------
-# sufami: file(1) magic for Sufami Turbo ROM images.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://problemkaputt.de/fullsnes.htm#snescartsufamiturbominicartridgeadaptor
-0 string BANDAI\ SFC-ADX
->0x10 string !SFC-ADX\ BACKUP Sufami Turbo ROM image:
->>0x10 string/T x "%.14s"
->>0x30 byte x \b, ID %02X
->>0x31 byte x \b%02X
->>0x32 byte x \b%02X
->>0x33 ubyte >0 \b, series index %u
->>0x34 ubyte 0 [SlowROM]
->>0x34 ubyte 1 [FastROM]
->>0x35 ubyte 1 [SRAM]
->>0x35 ubyte 3 [Special]
diff --git a/contrib/libs/libmagic/magic/Magdir/convex b/contrib/libs/libmagic/magic/Magdir/convex
deleted file mode 100644
index 6b28f768cc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/convex
+++ /dev/null
@@ -1,69 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: convex,v 1.8 2012/10/03 23:44:43 christos Exp $
-# convex: file(1) magic for Convex boxes
-#
-# Convexes are big-endian.
-#
-# /*\
-# * Below are the magic numbers and tests added for Convex.
-# * Added at beginning, because they are expected to be used most.
-# \*/
-0 belong 0507 Convex old-style object
->16 belong >0 not stripped
-0 belong 0513 Convex old-style demand paged executable
->16 belong >0 not stripped
-0 belong 0515 Convex old-style pre-paged executable
->16 belong >0 not stripped
-0 belong 0517 Convex old-style pre-paged, non-swapped executable
->16 belong >0 not stripped
-0 belong 0x011257 Core file
-#
-# The following are a series of dump format magic numbers. Each one
-# corresponds to a drastically different dump format. The first on is
-# the original dump format on a 4.1 BSD or earlier file system. The
-# second marks the change between the 4.1 file system and the 4.2 file
-# system. The Third marks the changing of the block size from 1K
-# to 2K to be compatible with an IDC file system. The fourth indicates
-# a dump that is dependent on Convex Storage Manager, because data in
-# secondary storage is not physically contained within the dump.
-# The restore program uses these number to determine how the data is
-# to be extracted.
-#
-24 belong =60013 dump format, 4.2 or 4.3 BSD (IDC compatible)
-24 belong =60014 dump format, Convex Storage Manager by-reference dump
-#
-# what follows is a bunch of bit-mask checks on the flags field of the opthdr.
-# If there is no `=' sign, assume just checking for whether the bit is set?
-#
-0 belong 0601 Convex SOFF
->88 belong&0x000f0000 =0x00000000 c1
->88 belong &0x00010000 c2
->88 belong &0x00020000 c2mp
->88 belong &0x00040000 parallel
->88 belong &0x00080000 intrinsic
->88 belong &0x00000001 demand paged
->88 belong &0x00000002 pre-paged
->88 belong &0x00000004 non-swapped
->88 belong &0x00000008 POSIX
-#
->84 belong &0x80000000 executable
->84 belong &0x40000000 object
->84 belong&0x20000000 =0 not stripped
->84 belong&0x18000000 =0x00000000 native fpmode
->84 belong&0x18000000 =0x10000000 ieee fpmode
->84 belong&0x18000000 =0x18000000 undefined fpmode
-#
-0 belong 0605 Convex SOFF core
-#
-0 belong 0607 Convex SOFF checkpoint
->88 belong&0x000f0000 =0x00000000 c1
->88 belong &0x00010000 c2
->88 belong &0x00020000 c2mp
->88 belong &0x00040000 parallel
->88 belong &0x00080000 intrinsic
->88 belong &0x00000008 POSIX
-#
->84 belong&0x18000000 =0x00000000 native fpmode
->84 belong&0x18000000 =0x10000000 ieee fpmode
->84 belong&0x18000000 =0x18000000 undefined fpmode
diff --git a/contrib/libs/libmagic/magic/Magdir/coverage b/contrib/libs/libmagic/magic/Magdir/coverage
deleted file mode 100644
index 9f2c3dc91b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/coverage
+++ /dev/null
@@ -1,91 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: coverage,v 1.3 2021/02/23 00:51:10 christos Exp $
-# xoverage: file(1) magic for test coverage data
-
-# File formats used to store test coverage data
-# 2016-05-21, Georg Sauthoff <mail@georg.so>
-
-
-# - GCC gcno - written by GCC at compile time when compiling with
-# gcc -ftest-coverage
-# - GCC gcda - written by a program that was compiled with
-# gcc -fprofile-arcs
-# - LLVM raw profiles - generated by a program compiled with
-# clang -fprofile-instr-generate -fcoverage-mapping ...
-# - LLVM indexed profiles - generated by
-# llvm-profdata
-# - GCOV reports, i.e. the annotated source code
-# - LCOV trace files, i.e. aggregated GCC profiles
-#
-# GCC coverage tracefiles
-# .gcno file are created during compile time,
-# while data collected during runtime is stored in .gcda files
-# cf. gcov-io.h
-# https://gcc.gnu.org/onlinedocs/gcc-5.3.0/gcc/Gcov-Data-Files.html
-# Examples:
-# Fedora 23/x86-64/gcc-5.3.1: 6f 6e 63 67 52 33 30 35
-# Debian 8 PPC64/gcc-4.9.2 : 67 63 6e 6f 34 30 39 2a
-0 lelong 0x67636e6f GCC gcno coverage (-ftest-coverage),
->&3 byte x version %c.
->&1 byte x \b%c
-
-# big endian
-0 belong 0x67636e6f GCC gcno coverage (-ftest-coverage),
->&0 byte x version %c.
->&2 byte x \b%c (big-endian)
-
-# Examples:
-# Fedora 23/x86-64/gcc-5.3.1: 61 64 63 67 52 33 30 35
-# Debian 8 PPC64/gcc-4.9.2 : 67 63 64 61 34 30 39 2a
-0 lelong 0x67636461 GCC gcda coverage (-fprofile-arcs),
->&3 byte x version %c.
->&1 byte x \b%c
-
-# big endian
-0 belong 0x67636461 GCC gcda coverage (-fprofile-arcs),
->&0 byte x version %c.
->&2 byte x \b%c (big-endian)
-
-
-# LCOV tracefiles
-# cf. http://ltp.sourceforge.net/coverage/lcov/geninfo.1.php
-0 string TN:
->&0 search/64 \nSF:/ LCOV coverage tracefile
-
-
-# Coverage reports generated by gcov
-# i.e. source code annotated with coverage information
-0 string \x20\x20\x20\x20\x20\x20\x20\x20-:\x20\x20\x20\ 0:Source:
->&0 search/128 \x20\x20\x20\x20\x20\x20\x20\x20-:\x20\x20\x20\ 0:Graph:
->>&0 search/128 \x20\x20\x20\x20\x20\x20\x20\x20-:\x20\x20\x20\ 0:Data: GCOV coverage report
-
-
-# LLVM coverage files
-
-# raw data after running a program compiled with:
-# `clang -fprofile-instr-generate -fcoverage-mapping ...`
-# default name: default.profraw
-# magic is: \xFF lprofr \x81
-# cf. https://llvm.org/docs/doxygen/html/InstrProfData_8inc_source.html
-0 lequad 0xff6c70726f667281 LLVM raw profile data,
->&0 byte x version %d
-
-# big endian
-0 bequad 0xff6c70726f667281 LLVM raw profile data,
->&7 byte x version %d (big-endian)
-
-
-# LLVM indexed instruction profile (as generated by llvm-profdata)
-# magic is: reverse(\xFF lprofi \x81)
-# cf. https://llvm.org/docs/CoverageMappingFormat.html
-# https://llvm.org/docs/doxygen/html/namespacellvm_1_1IndexedInstrProf.html
-# https://llvm.org/docs/CommandGuide/llvm-cov.html
-# https://llvm.org/docs/CommandGuide/llvm-profdata.html
-0 lequad 0x8169666f72706cff LLVM indexed profile data,
->&0 byte x version %d
-
-# big endian
-0 bequad 0x8169666f72706cff LLVM indexed profile data,
->&7 byte x version %d (big-endian)
-
diff --git a/contrib/libs/libmagic/magic/Magdir/cracklib b/contrib/libs/libmagic/magic/Magdir/cracklib
deleted file mode 100644
index 167659670d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cracklib
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cracklib,v 1.7 2009/09/19 16:28:08 christos Exp $
-# cracklib: file (1) magic for cracklib v2.7
-
-0 lelong 0x70775631 Cracklib password index, little endian
->4 long >0 (%i words)
->4 long 0 ("64-bit")
->>8 long >-1 (%i words)
-0 belong 0x70775631 Cracklib password index, big endian
->4 belong >-1 (%i words)
-# really bellong 0x0000000070775631
-0 search/1 \0\0\0\0pwV1 Cracklib password index, big endian ("64-bit")
->12 belong >0 (%i words)
diff --git a/contrib/libs/libmagic/magic/Magdir/crypto b/contrib/libs/libmagic/magic/Magdir/crypto
deleted file mode 100644
index 910df8dd49..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/crypto
+++ /dev/null
@@ -1,49 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: crypto,v 1.4 2023/07/17 16:41:48 christos Exp $
-# crypto: file(1) magic for crypto formats
-#
-# Bitcoin block files
-0 lelong 0xD9B4BEF9 Bitcoin
->(4.l+40) lelong 0xD9B4BEF9 reverse block
->>4 lelong x \b, size %u
-# normal block below
->0 default x block
->>4 lelong x \b, size %u
->>8 lelong&0xE0000000 0x20000000
->>>8 lelong x \b, BIP9 0x%x
->>8 lelong&0xE0000000 !0x20000000
->>>8 lelong x \b, version 0x%x
->>76 ledate x \b, %s UTC
-# VarInt counter
->>88 ubyte <0xfd \b, txcount %u
->>88 ubyte 0xfd
->>>89 leshort x \b, txcount %u
->>88 ubyte 0xfe
->>>89 lelong x \b, txcount %u
->>88 ubyte 0xff
->>>89 lequad x \b, txcount %llu
-!:ext dat
-# option to find more blocks in the file
-#>>(4.l+8) indirect x ;
-
-# LevelDB
--8 lequad 0xdb4775248b80fb57 LevelDB table data
-
-# http://www.tarsnap.com/scrypt.html
-# see scryptenc_setup() in lib/scryptenc/scryptenc.c
-0 string scrypt\0 scrypt encrypted file
->7 byte x \b, N=2**%d
->8 belong x \b, r=%d
->12 belong x \b, p=%d
-
-# https://age-encryption.org/
-# Only the first recipient is printed in detail to prevent repetitive output
-# in extreme cases ("ssh-rsa, ssh-rsa, ssh-rsa, ...").
-0 string age-encryption.org/v1\n age encrypted file
->25 regex/128 \^[^\040]+ \b, %s recipient
->>25 string scrypt
->>>&0 regex/64 [0-9]+\$ (N=2**%s)
->>&0 search/256 \n->\040 \b, among others
-
-0 string -----BEGIN\040AGE\040ENCRYPTED\040FILE----- age encrypted file, ASCII armored
diff --git a/contrib/libs/libmagic/magic/Magdir/ctags b/contrib/libs/libmagic/magic/Magdir/ctags
deleted file mode 100644
index f480d32338..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ctags
+++ /dev/null
@@ -1,6 +0,0 @@
-
-# ----------------------------------------------------------------------------
-# $File: ctags,v 1.6 2009/09/19 16:28:08 christos Exp $
-# ctags: file (1) magic for Exuberant Ctags files
-# From: Alexander Mai <mai@migdal.ikp.physik.tu-darmstadt.de>
-0 search/1 =!_TAG Exuberant Ctags tag file text
diff --git a/contrib/libs/libmagic/magic/Magdir/ctf b/contrib/libs/libmagic/magic/Magdir/ctf
deleted file mode 100644
index d91684d18c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ctf
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#--------------------------------------------------------------
-# ctf: file(1) magic for CTF (Common Trace Format) trace files
-#
-# Specs. available here: <https://www.efficios.com/ctf>
-#--------------------------------------------------------------
-
-# CTF trace data
-0 lelong 0xc1fc1fc1 Common Trace Format (CTF) trace data (LE)
-0 belong 0xc1fc1fc1 Common Trace Format (CTF) trace data (BE)
-
-# CTF metadata (packetized)
-0 lelong 0x75d11d57 Common Trace Format (CTF) packetized metadata (LE)
->35 byte x \b, v%d
->36 byte x \b.%d
-0 belong 0x75d11d57 Common Trace Format (CTF) packetized metadata (BE)
->35 byte x \b, v%d
->36 byte x \b.%d
-
-# CTF metadata (plain text)
-0 string /*\x20CTF\x20 Common Trace Format (CTF) plain text metadata
-!:strength + 5 # this is to make sure we beat C
->&0 regex [0-9]+\\.[0-9]+ \b, v%s
diff --git a/contrib/libs/libmagic/magic/Magdir/cubemap b/contrib/libs/libmagic/magic/Magdir/cubemap
deleted file mode 100644
index e2f87d8542..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cubemap
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cubemap,v 1.1 2012/06/06 13:03:20 christos Exp $
-# file(1) magic(5) data for cubemaps Martin Erik Werner <martinerikwerner@gmail.com>
-#
-0 string ACMP Map file for the AssaultCube FPS game
-0 string CUBE Map file for cube and cube2 engine games
-0 string MAPZ) Map file for the Blood Frontier/Red Eclipse FPS games
diff --git a/contrib/libs/libmagic/magic/Magdir/cups b/contrib/libs/libmagic/magic/Magdir/cups
deleted file mode 100644
index 6dd14ac5a5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/cups
+++ /dev/null
@@ -1,56 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: cups,v 1.6 2019/04/19 00:42:27 christos Exp $
-# Cups: file(1) magic for the cups raster file format
-# From: Laurent Martelli <martellilaurent@gmail.com>
-# https://www.cups.org/documentation.php/spec-raster.html
-#
-
-0 name cups-le
->280 lelong x \b, %d
->284 lelong x \bx%d dpi
->376 lelong x \b, %dx
->380 lelong x \b%d pixels
->388 lelong x %d bits/color
->392 lelong x %d bits/pixel
->400 lelong 0 ColorOrder=Chunky
->400 lelong 1 ColorOrder=Banded
->400 lelong 2 ColorOrder=Planar
->404 lelong 0 ColorSpace=gray
->404 lelong 1 ColorSpace=RGB
->404 lelong 2 ColorSpace=RGBA
->404 lelong 3 ColorSpace=black
->404 lelong 4 ColorSpace=CMY
->404 lelong 5 ColorSpace=YMC
->404 lelong 6 ColorSpace=CMYK
->404 lelong 7 ColorSpace=YMCK
->404 lelong 8 ColorSpace=KCMY
->404 lelong 9 ColorSpace=KCMYcm
->404 lelong 10 ColorSpace=GMCK
->404 lelong 11 ColorSpace=GMCS
->404 lelong 12 ColorSpace=WHITE
->404 lelong 13 ColorSpace=GOLD
->404 lelong 14 ColorSpace=SILVER
->404 lelong 15 ColorSpace=CIE XYZ
->404 lelong 16 ColorSpace=CIE Lab
->404 lelong 17 ColorSpace=RGBW
->404 lelong 18 ColorSpace=sGray
->404 lelong 19 ColorSpace=sRGB
->404 lelong 20 ColorSpace=AdobeRGB
-
-# Cups Raster image format, Big Endian
-0 string RaS
->3 string t Cups Raster version 1, Big Endian
->3 string 2 Cups Raster version 2, Big Endian
->3 string 3 Cups Raster version 3, Big Endian
-!:mime application/vnd.cups-raster
->0 use \^cups-le
-
-
-# Cups Raster image format, Little Endian
-1 string SaR
->0 string t Cups Raster version 1, Little Endian
->0 string 2 Cups Raster version 2, Little Endian
->0 string 3 Cups Raster version 3, Little Endian
-!:mime application/vnd.cups-raster
->0 use cups-le
diff --git a/contrib/libs/libmagic/magic/Magdir/dact b/contrib/libs/libmagic/magic/Magdir/dact
deleted file mode 100644
index 04627c9703..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dact
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dact,v 1.4 2009/09/19 16:28:08 christos Exp $
-# dact: file(1) magic for DACT compressed files
-#
-0 long 0x444354C3 DACT compressed data
->4 byte >-1 (version %i.
->5 byte >-1 $BS%i.
->6 byte >-1 $BS%i)
->7 long >0 $BS, original size: %i bytes
->15 long >30 $BS, block size: %i bytes
diff --git a/contrib/libs/libmagic/magic/Magdir/database b/contrib/libs/libmagic/magic/Magdir/database
deleted file mode 100644
index 03ac4235f7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/database
+++ /dev/null
@@ -1,886 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: database,v 1.69 2023/01/12 00:14:04 christos Exp $
-# database: file(1) magic for various databases
-#
-# extracted from header/code files by Graeme Wilford (eep2gw@ee.surrey.ac.uk)
-#
-#
-# GDBM magic numbers
-# Will be maintained as part of the GDBM distribution in the future.
-# <downsj@teeny.org>
-0 belong 0x13579acd GNU dbm 1.x or ndbm database, big endian, 32-bit
-!:mime application/x-gdbm
-0 belong 0x13579ace GNU dbm 1.x or ndbm database, big endian, old
-!:mime application/x-gdbm
-0 belong 0x13579acf GNU dbm 1.x or ndbm database, big endian, 64-bit
-!:mime application/x-gdbm
-0 lelong 0x13579acd GNU dbm 1.x or ndbm database, little endian, 32-bit
-!:mime application/x-gdbm
-0 lelong 0x13579ace GNU dbm 1.x or ndbm database, little endian, old
-!:mime application/x-gdbm
-0 lelong 0x13579acf GNU dbm 1.x or ndbm database, little endian, 64-bit
-!:mime application/x-gdbm
-0 string GDBM GNU dbm 2.x database
-!:mime application/x-gdbm
-#
-# Berkeley DB
-#
-# Ian Darwin's file /etc/magic files: big/little-endian version.
-#
-# Hash 1.85/1.86 databases store metadata in network byte order.
-# Btree 1.85/1.86 databases store the metadata in host byte order.
-# Hash and Btree 2.X and later databases store the metadata in host byte order.
-
-0 long 0x00061561 Berkeley DB
-!:mime application/x-dbm
->8 belong 4321
->>4 belong >2 1.86
->>4 belong <3 1.85
->>4 belong >0 (Hash, version %d, native byte-order)
->8 belong 1234
->>4 belong >2 1.86
->>4 belong <3 1.85
->>4 belong >0 (Hash, version %d, little-endian)
-
-0 belong 0x00061561 Berkeley DB
->8 belong 4321
->>4 belong >2 1.86
->>4 belong <3 1.85
->>4 belong >0 (Hash, version %d, big-endian)
->8 belong 1234
->>4 belong >2 1.86
->>4 belong <3 1.85
->>4 belong >0 (Hash, version %d, native byte-order)
-
-0 long 0x00053162 Berkeley DB 1.85/1.86
->4 long >0 (Btree, version %d, native byte-order)
-0 belong 0x00053162 Berkeley DB 1.85/1.86
->4 belong >0 (Btree, version %d, big-endian)
-0 lelong 0x00053162 Berkeley DB 1.85/1.86
->4 lelong >0 (Btree, version %d, little-endian)
-
-12 long 0x00061561 Berkeley DB
->16 long >0 (Hash, version %d, native byte-order)
-12 belong 0x00061561 Berkeley DB
->16 belong >0 (Hash, version %d, big-endian)
-12 lelong 0x00061561 Berkeley DB
->16 lelong >0 (Hash, version %d, little-endian)
-
-12 long 0x00053162 Berkeley DB
->16 long >0 (Btree, version %d, native byte-order)
-12 belong 0x00053162 Berkeley DB
->16 belong >0 (Btree, version %d, big-endian)
-12 lelong 0x00053162 Berkeley DB
->16 lelong >0 (Btree, version %d, little-endian)
-
-12 long 0x00042253 Berkeley DB
->16 long >0 (Queue, version %d, native byte-order)
-12 belong 0x00042253 Berkeley DB
->16 belong >0 (Queue, version %d, big-endian)
-12 lelong 0x00042253 Berkeley DB
->16 lelong >0 (Queue, version %d, little-endian)
-
-# From Max Bowsher.
-12 long 0x00040988 Berkeley DB
->16 long >0 (Log, version %d, native byte-order)
-12 belong 0x00040988 Berkeley DB
->16 belong >0 (Log, version %d, big-endian)
-12 lelong 0x00040988 Berkeley DB
->16 lelong >0 (Log, version %d, little-endian)
-
-#
-#
-# Round Robin Database Tool by Tobias Oetiker <oetiker@ee.ethz.ch>
-0 string/b RRD\0 RRDTool DB
->4 string/b x version %s
-
->>10 short !0 16bit aligned
->>>10 bedouble 8.642135e+130 big-endian
->>>>18 short x 32bit long (m68k)
-
->>10 short 0
->>>12 long !0 32bit aligned
->>>>12 bedouble 8.642135e+130 big-endian
->>>>>20 long 0 64bit long
->>>>>20 long !0 32bit long
->>>>12 ledouble 8.642135e+130 little-endian
->>>>>24 long 0 64bit long
->>>>>24 long !0 32bit long (i386)
->>>>12 string \x43\x2b\x1f\x5b\x2f\x25\xc0\xc7 middle-endian
->>>>>24 short !0 32bit long (arm)
-
->>8 quad 0 64bit aligned
->>>16 bedouble 8.642135e+130 big-endian
->>>>24 long 0 64bit long (s390x)
->>>>24 long !0 32bit long (hppa/mips/ppc/s390/SPARC)
->>>16 ledouble 8.642135e+130 little-endian
->>>>28 long 0 64bit long (alpha/amd64/ia64)
->>>>28 long !0 32bit long (armel/mipsel)
-
-#----------------------------------------------------------------------
-# ROOT: file(1) magic for ROOT databases
-#
-0 string root\0 ROOT file
->4 belong x Version %d
->33 belong x (Compression: %d)
-
-# XXX: Weak magic.
-# Alex Ott <ott@jet.msk.su>
-## Paradox file formats
-#2 leshort 0x0800 Paradox
-#>0x39 byte 3 v. 3.0
-#>0x39 byte 4 v. 3.5
-#>0x39 byte 9 v. 4.x
-#>0x39 byte 10 v. 5.x
-#>0x39 byte 11 v. 5.x
-#>0x39 byte 12 v. 7.x
-#>>0x04 byte 0 indexed .DB data file
-#>>0x04 byte 1 primary index .PX file
-#>>0x04 byte 2 non-indexed .DB data file
-#>>0x04 byte 3 non-incrementing secondary index .Xnn file
-#>>0x04 byte 4 secondary index .Ynn file
-#>>0x04 byte 5 incrementing secondary index .Xnn file
-#>>0x04 byte 6 non-incrementing secondary index .XGn file
-#>>0x04 byte 7 secondary index .YGn file
-#>>>0x04 byte 8 incrementing secondary index .XGn file
-
-## XBase database files
-# updated by Joerg Jenderek at Feb 2013
-# https://www.dbase.com/Knowledgebase/INT/db7_file_fmt.htm
-# https://www.clicketyclick.dk/databases/xbase/format/dbf.html
-# inspect VVYYMMDD , where 1<= MM <= 12 and 1<= DD <= 31
-0 ubelong&0x0000FFFF <0x00000C20
-!:strength +10
-# skip Infocom game Z-machine
->2 ubyte >0
-# skip Androids *.xml
->>3 ubyte >0
->>>3 ubyte <32
-# 1 < version VV
->>>>0 ubyte >1
-# skip HELP.CA3 by test for reserved byte ( NULL )
->>>>>27 ubyte 0
-# reserved bytes not always 0 ; also found 0x3901 (T4.DBF) ,0x7101 (T5.DBF,T6.DBF)
-#>>>>>30 ubeshort x 30NULL?%x
-# possible production flag,tag numbers(<=0x30),tag length(<=0x20), reserved (NULL)
->>>>>>24 ubelong&0xffFFFFff >0x01302000
-# .DBF or .MDX
->>>>>>24 ubelong&0xffFFFFff <0x01302001
-# for Xbase Database file (*.DBF) reserved (NULL) for multi-user
->>>>>>>24 ubelong&0xffFFFFff =0
-# test for 2 reserved NULL bytes,transaction and encryption byte flag
->>>>>>>>12 ubelong&0xFFFFfEfE 0
-# test for MDX flag
->>>>>>>>>28 ubyte x
->>>>>>>>>28 ubyte&0xf8 0
-# header size >= 32
->>>>>>>>>>8 uleshort >31
-# skip PIC15736.PCX by test for language driver name or field name
->>>>>>>>>>>32 ubyte >0
-#!:mime application/x-dbf; charset=unknown-8bit ??
-#!:mime application/x-dbase
->>>>>>>>>>>>0 use xbase-type
-# database file
->>>>>>>>>>>>28 ubyte&0x04 =0 \b DBF
-!:ext dbf
->>>>>>>>>>>>28 ubyte&0x04 =4 \b DataBaseContainer
-!:ext dbc
->>>>>>>>>>>>4 lelong 0 \b, no records
->>>>>>>>>>>>4 lelong >0 \b, %d record
-# plural s appended
->>>>>>>>>>>>>4 lelong >1 \bs
-# https://www.clicketyclick.dk/databases/xbase/format/dbf_check.html#CHECK_DBF
-# 1 <= record size <= 4000 (dBase 3,4) or 32 * KB (=0x8000)
->>>>>>>>>>>>10 uleshort x * %d
-# file size = records * record size + header size
->>>>>>>>>>>>1 ubyte x \b, update-date
->>>>>>>>>>>>1 use xbase-date
-# https://msdn.microsoft.com/de-de/library/cc483186(v=vs.71).aspx
-#>>>>>>>>>>>>29 ubyte =0 \b, codepage ID=%#x
-# 2~cp850 , 3~cp1252 , 0x1b~?? ; what code page is 0x1b ?
->>>>>>>>>>>>29 ubyte >0 \b, codepage ID=%#x
-#>>>>>>>>>>>>28 ubyte&0x01 0 \b, no index file
-# MDX or CDX index
->>>>>>>>>>>>28 ubyte&0x01 1 \b, with index file .MDX
->>>>>>>>>>>>28 ubyte&0x02 2 \b, with memo .FPT
-#>>>>>>>>>>>>28 ubyte&0x04 4 \b, DataBaseContainer
-# 1st record offset + 1 = header size
->>>>>>>>>>>>8 uleshort >0
->>>>>>>>>>>>(8.s+1) ubyte >0
->>>>>>>>>>>>>8 uleshort >0 \b, at offset %d
->>>>>>>>>>>>>(8.s+1) ubyte >0
->>>>>>>>>>>>>>&-1 string >\0 1st record "%s"
-# for multiple index files (*.MDX) Production flag,tag numbers(<=0x30),tag length(<=0x20), reserved (NULL)
->>>>>>>24 ubelong&0x0133f7ff >0
-# test for reserved NULL byte
->>>>>>>>47 ubyte 0
-# test for valid TAG key format (0x10 or 0)
->>>>>>>>>559 ubyte&0xeF 0
-# test MM <= 12
->>>>>>>>>>45 ubeshort <0x0C20
->>>>>>>>>>>45 ubyte >0
->>>>>>>>>>>>46 ubyte <32
->>>>>>>>>>>>>46 ubyte >0
-#!:mime application/x-mdx
->>>>>>>>>>>>>>0 use xbase-type
->>>>>>>>>>>>>>0 ubyte x \b MDX
->>>>>>>>>>>>>>1 ubyte x \b, creation-date
->>>>>>>>>>>>>>1 use xbase-date
->>>>>>>>>>>>>>44 ubyte x \b, update-date
->>>>>>>>>>>>>>44 use xbase-date
-# No.of tags in use (1,2,5,12)
->>>>>>>>>>>>>>28 uleshort x \b, %d
-# No. of entries in tag (0x30)
->>>>>>>>>>>>>>25 ubyte x \b/%d tags
-# Length of tag
->>>>>>>>>>>>>>26 ubyte x * %d
-# 1st tag name_
->>>>>>>>>>>>>548 string x \b, 1st tag "%.11s"
-# 2nd tag name
-#>>>>>>>>>>>>(26.b+548) string x \b, 2nd tag "%.11s"
-#
-# Print the xBase names of different version variants
-0 name xbase-type
->0 ubyte <2
-# 1 < version
->0 ubyte >1
->>0 ubyte 0x02 FoxBase
-!:mime application/x-dbf
-# like: ACCESS.DBF USER.DBF dbase3date.dbf mitarbei.dbf produkte.dbf umlaut-test-v2.dbf
-# FoxBase+/dBaseIII+, no memo
->>0 ubyte 0x03 FoxBase+/dBase III
-!:mime application/x-dbf
-# like: 92DATA.DBF MSCATLOG.DBF SYLLABI2.DBF SYLLABUS.DBF T4.DBF Teleadr.dbf us_city.dbf
-# dBASE IV no memo file
->>0 ubyte 0x04 dBase IV
-!:mime application/x-dbf
-# like: Quattro-test11.dbf umlaut-test-v4.dbf
-# dBASE V no memo file
->>0 ubyte 0x05 dBase V
-!:mime application/x-dbf
-# like: dbase4double.dbf Quattro-test2.dbf umlaut-test7.dbf
-!:ext dbf
-# probably Apollo Database Server 9.7? xBase (0x6)
->>0 ubyte 0x06 Apollo
-!:mime application/x-dbf
-# like: ALIAS.DBF CRYPT.DBF PROCS.DBF USERS.DBF
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
->>0 ubyte 0x2F FoxBase+/Dbase III plus, no memo
-!:mime application/x-dbf
-# no example
->>0 ubyte 0x30 Visual FoxPro
-!:mime application/x-dbf
-# like: 26FRX.DBF 30DBC.DBF 30DBCPRO.DBF BEHINDSC.DBF USER_LEV.DBF
-# Microsoft Visual FoxPro Database Container File like: FOXPRO-DB-TEST.DBC TESTDATA.DBC TASTRADE.DBC
->>0 ubyte 0x31 Visual FoxPro, autoincrement
-!:mime application/x-dbf
-# like: AI_Table.DBF dbase_31.dbf w_cityFoxpro.dbf
-# Visual FoxPro, with field type Varchar or Varbinary
->>0 ubyte 0x32 Visual FoxPro, with field type Varchar
-!:mime application/x-dbf
-# like: dbase_32.dbf
-# dBASE IV SQL, no memo;dbv memo var size (Flagship)
->>0 ubyte 0x43 dBase IV, with SQL table
-!:mime application/x-dbf
-# like: ASSEMBLY.DBF INVENTRY.DBF STAFF.DBF
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
->>0 ubyte 0x62 dBase IV, with SQL table
-#!:mime application/x-dbf
-# no example
-# dBASE IV, with memo!!
->>0 ubyte 0x7b dBase IV, with memo
-!:mime application/x-dbf
-# like: test3memo.DBF dbase5.DBF
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
->>0 ubyte 0x82 dBase IV, with SQL system
-#!:mime application/x-dbf
-# no example
-# FoxBase+/dBaseIII+ with memo .DBT!
->>0 ubyte 0x83 FoxBase+/dBase III, with memo .DBT
-!:mime application/x-dbf
-# like: T2.DBF t3.DBF biblio.dbf dbase_83.dbf dbase3dbt0_4.dbf fsadress.dbf stop.dbf
-# VISUAL OBJECTS (first 1.0 versions) for the Dbase III files (NTX clipper driver); memo file
->>0 ubyte 0x87 VISUAL OBJECTS, with memo file
-!:mime application/x-dbf
-# like: ACCESS.DBF dbase3date.dbf dbase3float.dbf holdings.dbf mitarbei.dbf
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
->>0 ubyte 0x8A FoxBase+/dBase III, with memo .DBT
-#!:mime application/x-dbf
-# no example
-# dBASE IV with memo!
->>0 ubyte 0x8B dBase IV, with memo .DBT
-!:mime application/x-dbf
-# like: animals.dbf archive.dbf callin.dbf dbase_8b.dbf phnebook.dbf t6.dbf
-# dBase IV with SQL Table,no memo?
->>0 ubyte 0x8E dBase IV, with SQL table
-!:mime application/x-dbf
-# like: dbase5.DBF test3memo.DBF test-memo.DBF
-# .dbv and .dbt memo (Flagship)?
->>0 ubyte 0xB3 Flagship
-!:mime application/x-dbf
-# no example
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
->>0 ubyte 0xCA dBase IV with memo .DBT
-#!:mime application/x-dbf
-# no example
-# dBASE IV with SQL table, with memo .DBT
->>0 ubyte 0xCB dBase IV with SQL table, with memo .DBT
-!:mime application/x-dbf
-# like: dbase5.DBF test3memo.DBF test-memo.DBF
-# HiPer-Six format;Clipper SIX, with SMT memo file
->>0 ubyte 0xE5 Clipper SIX with memo
-!:mime application/x-dbf
-# like: dbase5.DBF test3memo.DBF test-memo.DBF testClipper.dbf DATA.DBF
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
->>0 ubyte 0xF4 dBase IV, with SQL table, with memo
-#!:mime application/x-dbf
-# no example
->>0 ubyte 0xF5 FoxPro with memo
-!:mime application/x-dbf
-# like: CUSTOMER.DBF FOXUSER1.DBF Invoice.DBF NG.DBF OBJSAMP.DBF dbase_f5.dbf kunde.dbf
-# probably Apollo Database Server 9.7 with SQL and memo mask? xBase (0xF6)
->>0 ubyte 0xF6 Apollo, with SQL table with memo
-!:mime application/x-dbf
-# like: SCRIPTS.DBF
-# https://docs.microsoft.com/en-us/previous-versions/visualstudio/foxpro/st4a0s68(v=vs.80)
-#>>0 ubyte 0xFA FoxPro 2.x, with memo
-#!:mime application/x-dbf
-# no example
-# unknown version (should not happen)
->>0 default x xBase
-!:mime application/x-dbf
->>>0 ubyte x (%#x)
-# flags in version byte
-# DBT flag (with dBASE III memo .DBT)!!
-# >>0 ubyte&0x80 >0 DBT_FLAG=%x
-# memo flag ??
-# >>0 ubyte&0x08 >0 MEMO_FLAG=%x
-# SQL flag ??
-# >>0 ubyte&0x70 >0 SQL_FLAG=%x
-# test and print the date of xBase .DBF .MDX
-0 name xbase-date
-# inspect YYMMDD , where 1<= MM <= 12 and 1<= DD <= 31
->0 ubelong x
->1 ubyte <13
->>1 ubyte >0
->>>2 ubyte >0
->>>>2 ubyte <32
->>>>>0 ubyte x
-# YY is interpreted as 20YY or 19YY
->>>>>>0 ubyte <100 \b %.2d
-# YY is interpreted 1900+YY; TODO: display yy or 20yy instead 1YY
->>>>>>0 ubyte >99 \b %d
->>>>>1 ubyte x \b-%d
->>>>>2 ubyte x \b-%d
-
-# dBase memo files .DBT or .FPT
-# https://msdn.microsoft.com/en-us/library/8599s21w(v=vs.80).aspx
-16 ubyte <4
->16 ubyte !2
->>16 ubyte !1
-# next free block index is positive
->>>0 ulelong >0
-# skip many JPG. ZIP, BZ2 by test for reserved bytes NULL , 0|2 , 0|1 , low byte of block size
->>>>17 ubelong&0xFFfdFEff 0x00000000
-# skip many RAR by test for low byte 0 ,high byte 0|2|even of block size, 0|a|e|d7 , 0|64h
->>>>>20 ubelong&0xFF01209B 0x00000000
-# dBASE III
->>>>>>16 ubyte 3
-# skip with invalid "low" 1st item "\0\0\0\0" StateRepository-Deployment.srd-shm "\001\010\0\0" gcry_cast5.mod
->>>>>>>512 ubyte >040
-# skip with valid 1st item "rintf" keylayouts.mod
-# by looking for valid terminating character Ctrl-Z like in test.dbt
->>>>>>>>513 search/3308 \032
-# skip GRUB plan9.mod with invalid second terminating character 007
-# by checking second terminating character Ctrl-Z like in test.dbt
->>>>>>>>>&0 ubyte 032
-# dBASE III DBT with two Ctr-Z terminating characters
->>>>>>>>>>0 use dbase3-memo-print
-# second terminating character \0 like in dbase-memo.dbt or GRUB nativedisk.mod
->>>>>>>>>&0 ubyte 0
-# skip GRUB nativedisk.mod with grub_mod_init\0grub_mod_fini\0grub_fs_autoload_hook\0
->>>>>>>>>>0x1ad string !grub_mod_init
-# like dbase-memo.dbt
->>>>>>>>>>>0 use dbase3-memo-print
-# dBASE III DBT without version, dBASE IV DBT , FoxPro FPT , or many ZIP , DBF garbage
->>>>>>16 ubyte 0
-# unusual dBASE III DBT like angest.dbt, dBASE IV DBT with block size 0 , FoxPro FPT , or garbage PCX DBF
->>>>>>>20 uleshort 0
-# FoxPro FPT , unusual dBASE III DBT like biblio.dbt or garbage
->>>>>>>>8 ulong =0
->>>>>>>>>6 ubeshort >0
-# skip emacs.PIF
->>>>>>>>>>4 ushort 0
-# check for valid FoxPro field type
->>>>>>>>>>>512 ubelong <3
-# skip LXMDCLN4.OUT LXMDCLN6.OUT LXMDALG6.OUT with invalid blocksize 170=AAh
->>>>>>>>>>>>6 ubeshort&0x002f 0
->>>>>>>>>>>>>0 use foxpro-memo-print
-# dBASE III DBT , garbage
-# skip WORD1XW.DOC with improbably high free block index
->>>>>>>>>0 ulelong <0x400000
-# skip WinStore.App.exe by looking for printable 2nd character of 1st memo item
->>>>>>>>>>513 ubyte >037
-# skip DOS executables CPQ0TD.DRV E30ODI.COM IBM0MONO.DRV by looking for printable 1st character of 1st memo item
->>>>>>>>>>>512 ubyte >037
-# skip few (14/758) Microsoft Event Trace Logs (boot_BASE+CSWITCH_1.etl DlTel-Merge.etl UpdateUx.006.etl) with invalid "high" 1st item \377\377
->>>>>>>>>>>>512 ubyte <0377
-# skip some Commodore 64 Art Studio (Deep_Strike.aas dragon's_lair_ii.aas), some Atari DEGAS Elite bitmap (ELEPHANT.PC3 ST.PC2)
-# some probably old GRUB modules (part_sun.mod) and virtual-boy-wario-land.vb.
-# by looking for valid terminating character Ctrl-Z
->>>>>>>>>>>>>513 search/523 \032
-# Atari DEGAS bitmap ST.PC2 with 0370 as second terminating character
-#>>>>>>>>>>>>>>&0 ubyte x 2ND_CHAR_IS=%o
-# dBASE III DBT with two Ctr-Z terminating characters like dbase3dbt0_1.dbt dbase_83.dbt
->>>>>>>>>>>>>>&0 ubyte 032
->>>>>>>>>>>>>>>0 use dbase3-memo-print
-# second terminating character \0 like in pcidump.mod or fsadress.dbt umlaut-dbf-cmd.dbt
->>>>>>>>>>>>>>&0 ubyte 0
-# look for old GRUB module pcidump.mod with specific content "pcidump\0Show raw dump of the PCI configuration space"
->>>>>>>>>>>>>>>514 search/0x11E pcidump\0Show
-# dBASE III DBT with Ctr-Z + \0 terminating characters like fsadress.dbt
->>>>>>>>>>>>>>>514 default x
-# unusual dBASE III DBT like fsadress.dbt umlaut-dbf-cmd.dbt
->>>>>>>>>>>>>>>>0 use dbase3-memo-print
-# dBASE III DBT like angest.dbt, or garbage PCX DBF
->>>>>>>>8 ubelong !0
-# skip PCX and some DBF by test for for reserved NULL bytes
->>>>>>>>>510 ubeshort 0
-# skip bad symples with improbably high free block index above 2 GiB file limit
->>>>>>>>>>0 ulelong <0x400000
-# skip AI070GEP.EPS by printable 1st character of 1st memo item
->>>>>>>>>>>512 ubyte >037
-# skip some Microsoft Visual C, OMF library like: BZ2.LIB WATTCPWL.LIB ZLIB.LIB
->>>>>>>>>>>>512 ubyte <0200
-# skip gluon-ffhat-1.0-tp-link-tl-wr1043n-nd-v2-sysupgrade.bin by printable 2nd character
->>>>>>>>>>>>>513 ubyte >037
-# skip few (8/758) Microsoft Event Trace Logs (WBEngine.3.etl Wifi.etl) with valid 1st item like
-# "9600.20369.amd64fre.winblue_ltsb_escrow.220427-1727"
-# "9600.19846.amd64fre.winblue_ltsb_escrow.200923-1735"
-# "10586.494.amd64fre.th2_release_sec.160630-1736"
-# by looking for valid terminating character Ctrl-Z
->>>>>>>>>>>>>>513 search/0x11E \032
-# followed by second character Ctrl-Z implies typical DBT
->>>>>>>>>>>>>>>&0 ubyte 032
-# examples like: angest.dbt
->>>>>>>>>>>>>>>>0 use dbase3-memo-print
->>>>>>>>>>>>>>>&0 ubyte 0
-# no example found here with terminating sequence CTRL-Z + \0
->>>>>>>>>>>>>>>>0 use dbase3-memo-print
-# dBASE IV DBT with positive block size
->>>>>>>20 uleshort >0
-# dBASE IV DBT with valid block length like 512, 1024
-# multiple of 2 in between 16 and 16 K ,implies upper and lower bits are zero
-# skip also 3600h 3E00h size
->>>>>>>>20 uleshort&0xE00f 0
->>>>>>>>>0 use dbase4-memo-print
-
-# Print the information of dBase III DBT memo file
-0 name dbase3-memo-print
->0 ubyte x dBase III DBT
-!:mime application/x-dbt
-!:ext dbt
-# instead 3 as version number 0 for unusual examples like biblio.dbt
->16 ubyte !3 \b, version number %u
-# Number of next available block for appending data
-#>0 lelong =0 \b, next free block index %u
->0 lelong !0 \b, next free block index %u
-# no positive block length
-#>20 uleshort =0 \b, block length %u
->20 uleshort !0 \b, block length %u
-# dBase III memo field terminated often by \032\032
-# like: "WHAT IS XBASE" test.dbt "Borges, Malte" biblio.dbt "First memo\032\032" T2.DBT
->512 string >\0 \b, 1st item "%s"
-# For DEBUGGING
-#>512 ubelong x \b, 1ST item %#8.8x
-#>513 search/0x225 \032 FOUND_TERMINATOR
-#>>&0 ubyte 032 2xCTRL_Z
-# fsadress.dbt has 1 Ctrl-Z terminator followed by nil byte
-#>>&0 ubyte 0 1xCTRL_Z
-
-# https://www.clicketyclick.dk/databases/xbase/format/dbt.html
-# Print the information of dBase IV DBT memo file
-0 name dbase4-memo-print
->0 lelong x dBase IV DBT
-!:mime application/x-dbt
-!:ext dbt
-# 8 character shorted main name of corresponding dBASE IV DBF file
->8 ubelong >0x20000000
-# skip unusual like for angest.dbt
->>20 uleshort >0
->>>8 string >\0 \b of %-.8s.DBF
-# value 0 implies 512 as size
-#>4 ulelong =0 \b, blocks size %u
-# size of blocks not reliable like 0x2020204C in angest.dbt
->4 ulelong !0
->>4 ulelong&0x0000003f 0 \b, blocks size %u
-# dBase IV DBT with positive block length (found 512 , 1024)
->20 uleshort >0 \b, block length %u
-# next available block
-#>0 lelong =0 \b, next free block index %u
->0 lelong !0 \b, next free block index %u
->20 uleshort >0
->>(20.s) ubelong x
->>>&-4 use dbase4-memofield-print
-# unusual dBase IV DBT without block length (implies 512 as length)
->20 uleshort =0
->>512 ubelong x
->>>&-4 use dbase4-memofield-print
-# Print the information of dBase IV memo field
-0 name dbase4-memofield-print
-# free dBase IV memo field
->0 ubelong !0xFFFF0800
->>0 lelong x \b, next free block %u
->>4 lelong x \b, next used block %u
-# used dBase IV memo field
->0 ubelong =0xFFFF0800
-# length of memo field
->>4 lelong x \b, field length %d
->>>8 string >\0 \b, 1st used item "%s"
-# http://www.dbfree.org/webdocs/1-documentation/0018-developers_stuff_(advanced)/os_related_stuff/xbase_file_format.htm
-# Print the information of FoxPro FPT memo file
-0 name foxpro-memo-print
->0 belong x FoxPro FPT
-!:mime application/x-fpt
-!:ext fpt
-# Size of blocks for FoxPro ( 64,256 ); probably a multiple of two
->6 ubeshort x \b, blocks size %u
-# next available block
-#>0 belong =0 \b, next free block index %u
->0 belong !0 \b, next free block index %u
-# field type ( 0~picture, 1~memo, 2~object )
->512 ubelong <3 \b, field type %u
-# length of memo field
->512 ubelong 1
->>516 belong >0 \b, field length %d
->>>520 string >\0 \b, 1st item "%s"
-
-# Summary: DBASE Compound Index file *.CDX and FoxPro index *.IDX
-# From: Joerg Jenderek
-# URL: https://www.clicketyclick.dk/databases/xbase/format/cdx.html
-# https://www.clicketyclick.dk/databases/xbase/format/idx.html
-# https://www.clicketyclick.dk/databases/xbase/format/idx_comp.html
-# Reference: https://mark0.net/download/triddefs_xml.7z/defs/s/sybase-ianywhere-cdx.trid.xml
-# https://mark0.net/download/triddefs_xml.7z/defs/c/cdx-vfp7.trid.xml
-# like: kunde.cdx
-0 ulelong 0x1C00
->0 use xbase-index
-# like: SYLLABI2.CDX SYLLABUS.CDX
-0 ulelong 0x0800
->0 use xbase-index
-# often in xBase index pointer to root node 400h
-0 ulelong 0x0400
-# skip most Maple help database *.hdb with version tag handled by ./maple
->1028 string !version
-# skip Maple help database hsum.hdb checking for valid reserved area
->>492 quad =0
-# skip remaining Maple help database *.hdb by checking key length
-#>>>12 uleshort !0x000F KEY_LENGTHVALID
->>>0 use xbase-index
-# display information about dBase/FoxPro index
-0 name xbase-index
->0 ulelong x xBase
-!:mime application/x-dbase-index
->14 ubyte &0x40 compound index
-# DCX for FoxPro database index like: TESTDATA.DCX
-!:ext cdx/dcx
->14 ubyte ^0x40 index
-# only 1 example like: TEST.IDX
-!:ext idx
-# pointer to root node like: 1C00h 800h often 400h
->0 ulelong !0x400 \b, root pointer %#x
-# Pointer to free node list: often 0 but -1 if not present
->4 ulelong !0 \b, free node pointer %#x
-# MAYBE number of pages in file (Foxbase, FoxPro 1.x) or
-# http://www.foxpert.com/foxpro/knowlbits/files/knowlbits_200708_1.HTM
-# Whenever Visual FoxPro updates the index file it increments this reserved field
-# Reserved for internal use like: 02000000h 03000000h 460c0000h 780f0000h 89000000h 9fdc0100h often 0
->8 ulelong !0 \b, reserved counter %#x
-# length of key like: mostly 000Ah 0028h (TEST.IDX)
->12 uleshort !0x000A \b, key length %#x
-# index options like: 24h E0h E8h
-# 1~a unique index 8~index has FOR clause 32~compact index format 64~compound index header
-# 16~Bit vector (SoftC) 128~Structure index (FoxPro)
->14 ubyte x \b, index options (%#x
->14 ubyte &0x01 \b, unique
->14 ubyte &0x08 \b, has FOR clause
->14 ubyte &0x10 \b, bit vector (SoftC)
->14 ubyte &0x20 \b, compact format
-#>14 ubyte &0x40 \b, compound header
->14 ubyte &0x80 \b, structure
->14 ubyte x \b)
-# WHAT EXACTLY IS THAT? index signature like: 0 (sybase-ianywhere-cdx.trid.xml) 1 (cdx-vfp7.trid.xml)
->15 ubyte !0 \b, index signature %u
-# reserved area (0-bytes) til about 500, but not for uncompressed Index files *.idx
->16 quad !0 \b, at 16 reserved %#llx
->492 quad !0 \b, at 492 reserved %#llx
-# for IDX variant
-#>14 ubyte ^0x40 IDX
-# for CDX variant
->14 ubyte &0x40
-# Ascending or descending: 0~ascending 1~descending
->>502 uleshort x \b, sort order %u
-# Total expression length (FoxPro 2) like: 0 1
->>504 uleshort !0 \b, expression length %u
-# FOR expression pool length like: 1
->>506 uleshort !1 \b, FOR expression pool length %#x
-# reserved for internal use like: 0
->>508 uleshort !0 \b, at 0x508 reserved %#x
-# Key expression pool length like: 1
->>510 uleshort !1 \b, key expression pool length %#x
-# 512 - 1023 Key & FOR expression pool (uncompiled)
->>512 quad !0 \b, key expression pool %#llx
-#>>520 quad !0 \b, key expression pool %#llx
-
-# Summary: dBASE IV Printer Form *.PRF
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/.dbf#Other_file_types_found_in_dBASE
-# Reference: https://mark0.net/download/triddefs_xml.7z/defs/p/prf-dbase.trid.xml
-0 ubeshort 0x0400
-# skip some Xbase Index files *.ndx and Infocom (Z-machine 4) *.z4 handled by ./adventure
-# by looking for valid printer driver name extension
->0x58 search/8 .PR2
->>0 use xbase-prf
-# display information of dbase print form like printer driver *.PR2
-0 name xbase-prf dBase Printer Form
-!:mime application/x-dbase-prf
-!:ext prf
-# MAYBE version? like: 4~DBASE IV
-#>0 ubyte x \b, version %u
-# MAYBE flag like: 1~with output file name 0~not
-#>2 ubyte !0 \b, flag %u
-# optional printer text output file name like E:\DBASE\IV\T6.txt
->3 string >\0 \b, output file %s
-# probably padding with nils til 0x53
-#>0x48 uquad !0 \b, at 0x48 padding %#llx
-# dBASE IV printer driver name like: Generic.PR2 ASCII.PR2
->0x56 string >\0 \b, using printer driver %s
-# 2 is probably last character of previous dBASE printer driver name
-#>0x60 ubyte !0x32 \b, at 0x60 %#x
-# probably padding with nils til 0xa8
-#>0x61 uquad !0 \b, at 0x61 padding %#llx
-# unknown 0x03020300 0x03020100 at 0xa8
->0xa8 ubelong x \b, at 0xa8 unknown %#8.8x
-# probably padding with nils til 0x2aa
-#>0x2a0 uquad !0 \b, at 0x2a0 padding %#llx
-# unknown 0x100ff7f01000001 at 0x2AB
->0x2ab ubequad !0x100ff7f01000001 \b, at 0x2ab unknown %#llx
-# unknown 0x0042 at 0x2b3
->0x2b3 ubeshort !0x0042 \b, at 0x2b3 unknown %#4.4x
-# unknown last 4 bytes at 0x2b6 like: 0 0x23
->0x2b6 ubelong !0 \b, at 0x2b6 unknown %#8.8x
-
-# TODO:
-# DBASE index file *.NDX
-# dBASE compiled Format *.FMO
-# FoxPro Database memo file *.DCT
-# FoxPro Forms Memo *.SCT
-# FoxPro Generated Menu Program *.MPR
-# FoxPro Report *.FRX
-# FoxPro Report Memo *.FRT
-# Foxpro Generated Screen Program *.SPR
-# Foxpro memo *.PJT
-## End of XBase database stuff
-
-# MS Access database
-4 string Standard\ Jet\ DB Microsoft Access Database
-!:mime application/x-msaccess
-4 string Standard\ ACE\ DB Microsoft Access Database
-!:mime application/x-msaccess
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Extensible_Storage_Engine
-# Reference: https://github.com/libyal/libesedb/archive/master.zip
-# libesedb-master/documentation/
-# Extensible Storage Engine (ESE) Database File (EDB) format.asciidoc
-# Note: also known as "JET Blue". Used by numerous Windows components such as
-# Windows Search, Mail, Exchange and Active Directory.
-4 ubelong 0xefcdab89
-# unknown1
->132 ubelong 0 Extensible storage engine
-!:mime application/x-ms-ese
-# file_type 0~database 1~stream
->>12 ulelong 0 DataBase
-# Security DataBase (sdb)
-!:ext edb/sdb
->>12 ulelong 1 STreaMing
-!:ext stm
-# format_version 620h
->>8 uleshort x \b, version %#x
->>10 uleshort >0 revision %#4.4x
->>0 ubelong x \b, checksum %#8.8x
-# Page size 4096 8192 32768
->>236 ulequad x \b, page size %lld
-# database_state
->>52 ulelong 1 \b, JustCreated
->>52 ulelong 2 \b, DirtyShutdown
-#>>52 ulelong 3 \b, CleanShutdown
->>52 ulelong 4 \b, BeingConverted
->>52 ulelong 5 \b, ForceDetach
-# Windows NT major version when the databases indexes were updated.
->>216 ulelong x \b, Windows version %d
-# Windows NT minor version
->>220 ulelong x \b.%d
-
-# From: Joerg Jenderek
-# URL: https://forensicswiki.org/wiki/Windows_Application_Compatibility
-# Note: files contain application compatibility fixes, application compatibility modes and application help messages.
-8 string sdbf
->7 ubyte 0
-# TAG_TYPE_LIST+TAG_INDEXES
->>12 uleshort 0x7802 Windows application compatibility Shim DataBase
-# version? 2 3
-#>>>0 ulelong x \b, version %d
-!:mime application/x-ms-sdb
-!:ext sdb
-
-# TDB database from Samba et al - Martin Pool <mbp@samba.org>
-0 string TDB\ file TDB database
->32 lelong 0x2601196D version 6, little-endian
->>36 lelong x hash size %d bytes
-
-# SE Linux policy database
-0 lelong 0xf97cff8c SE Linux policy
->16 lelong x v%d
->20 lelong 1 MLS
->24 lelong x %d symbols
->28 lelong x %d ocons
-
-# ICE authority file data (Wolfram Kleff)
-2 string ICE ICE authority data
-
-# X11 Xauthority file (Wolfram Kleff)
-10 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-11 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-12 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-13 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-14 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-15 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-16 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-17 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-18 string MIT-MAGIC-COOKIE-1 X11 Xauthority data
-
-# From: Maxime Henrion <mux@FreeBSD.org>
-# PostgreSQL's custom dump format, Maxime Henrion <mux@FreeBSD.org>
-0 string PGDMP PostgreSQL custom database dump
->5 byte x - v%d
->6 byte x \b.%d
->5 beshort <0x101 \b-0
->5 beshort >0x100
->>7 byte x \b-%d
-
-# Type: Advanced Data Format (ADF) database
-# URL: https://www.grc.nasa.gov/WWW/cgns/adf/
-# From: Nicolas Chauvat <nicolas.chauvat@logilab.fr>
-0 string @(#)ADF\ Database CGNS Advanced Data Format
-
-# Tokyo Cabinet magic data
-# http://tokyocabinet.sourceforge.net/index.html
-0 string ToKyO\ CaBiNeT\n Tokyo Cabinet
->14 string x \b (%s)
->32 byte 0 \b, Hash
-!:mime application/x-tokyocabinet-hash
->32 byte 1 \b, B+ tree
-!:mime application/x-tokyocabinet-btree
->32 byte 2 \b, Fixed-length
-!:mime application/x-tokyocabinet-fixed
->32 byte 3 \b, Table
-!:mime application/x-tokyocabinet-table
->33 byte &1 \b, [open]
->33 byte &2 \b, [fatal]
->34 byte x \b, apow=%d
->35 byte x \b, fpow=%d
->36 byte &0x01 \b, [large]
->36 byte &0x02 \b, [deflate]
->36 byte &0x04 \b, [bzip]
->36 byte &0x08 \b, [tcbs]
->36 byte &0x10 \b, [excodec]
->40 lequad x \b, bnum=%lld
->48 lequad x \b, rnum=%lld
->56 lequad x \b, fsiz=%lld
-
-# Type: QDBM Quick Database Manager
-# From: Benoit Sibaud <bsibaud@april.org>
-0 string \\[depot\\]\n\f Quick Database Manager, little endian
-0 string \\[DEPOT\\]\n\f Quick Database Manager, big endian
-
-# Type: TokyoCabinet database
-# URL: http://tokyocabinet.sourceforge.net/
-# From: Benoit Sibaud <bsibaud@april.org>
-0 string ToKyO\ CaBiNeT\n TokyoCabinet database
->14 string x (version %s)
-
-# From: Stephane Blondon https://www.yaal.fr
-# Database file for Zope (done by FileStorage)
-0 string FS21 Zope Object Database File Storage v3 (data)
-0 string FS30 Zope Object Database File Storage v4 (data)
-
-# Cache file for the database of Zope (done by ClientStorage)
-0 string ZEC3 Zope Object Database Client Cache File (data)
-
-# IDA (Interactive Disassembler) database
-0 string IDA1 IDA (Interactive Disassembler) database
-
-# Hopper (reverse engineering tool) https://www.hopperapp.com/
-0 string hopperdb Hopper database
-
-# URL: https://en.wikipedia.org/wiki/Panorama_(database_engine)
-# Reference: http://www.provue.com/Panorama/
-# From: Joerg Jenderek
-# NOTE: test only versions 4 and 6.0 with Windows
-# length of Panorama database name
-5 ubyte >0
-# look after database name for "some" null bits
->(5.B+7) ubelong&0xF3ffF000 0
-# look for first keyword
->>&1 search/2 DESIGN Panorama database
-#!:mime application/x-panorama-database
-!:apple KASXZEPD
-!:ext pan
-# database name
->>>5 pstring x \b, "%s"
-
-#
-#
-# askSam Database by Stefan A. Haubenthal <polluks@web.de>
-0 string askw40\0 askSam DB
-
-#
-#
-# MUIbase Database Tool by Stefan A. Haubenthal <polluks@web.de>
-0 string MBSTV\040 MUIbase DB
->6 string x version %s
-
-#
-# CDB database
-0 string NBCDB\012 NetBSD Constant Database
->7 byte x \b, version %d
->8 string x \b, for '%s'
->24 lelong x \b, datasize %d
->28 lelong x \b, entries %d
->32 lelong x \b, index %d
->36 lelong x \b, seed %#x
-
-#
-# Redis RDB - https://redis.io/topics/persistence
-0 string REDIS Redis RDB file,
->5 regex [0-9][0-9][0-9][0-9] version %s
-
-# Mork database.
-# Used by older versions of Mozilla Suite and Firefox,
-# and current versions of Thunderbird.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-0 string //\ <!--\ <mdb:mork:z\ v=" Mozilla Mork database
->23 string x \b, version %.3s
-
-# URL: https://en.wikipedia.org/wiki/Management_Information_Format
-# Reference: https://www.dmtf.org/sites/default/files/standards/documents/DSP0005.pdf
-# From: Joerg Jenderek
-# Note: only tested with monitor asset reports of Dell Display Manager
-# skip start like Language=fr|CA|iso8859-1
-0 search/27/C Start\040Component DMI Management Information Format
-#!:mime text/plain
-!:mime text/x-dmtf-mif
-!:ext mif
-
diff --git a/contrib/libs/libmagic/magic/Magdir/dataone b/contrib/libs/libmagic/magic/Magdir/dataone
deleted file mode 100644
index 566633eff2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dataone
+++ /dev/null
@@ -1,47 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dataone,v 1.3 2022/04/18 21:38:10 christos Exp $
-#
-# DataONE- files from Dave Vieglais <dave.vieglais@gmail.com> &
-# Pratik Shrivastava <pratikshrivastava23@gmail.com>
-#
-# file formats: https://cn.dataone.org/cn/v2/formats
-#------------------------------------------------------------------------------
-
-# EML (Ecological Metadata Language Format)
-0 string \<?xml\ version=
->&0 regex/1024 eml-[0-9]\\.[0-9]\\.[0-9]+ eml://ecoinformatics.org/%s
-
-# onedcx (DataONE Dublin Core Extended v1.0)
->&0 regex/1024 onedcx/v[0-9]\\.[0-9]+ https://ns.dataone.org/metadata/schema/onedcx/v1.0
-
-# FGDC-STD-001-1998 (Content Standard for Digital Geospatial Metadata,
-# version 001-1998)
->&0 search/1024 fgdc FGDC-STD-001-1998
-
-# Mercury (Oak Ridge National Lab Mercury Metadata version 1.0)
->&0 regex/1024 mercury/terms/v[0-9]\\.[0-9] https://purl.org/ornl/schema/mercury/terms/v1.0
-
-# ISOTC211 (Geographic MetaData (GMD) Extensible Markup Language)
->&0 search/1024 isotc211
->>&0 search/1024 eng;USA https://www.isotc211.org/2005/gmd
-
-# ISOTC211 (NOAA Variant Geographic MetaData (GMD) Extensible Markup Language)
->>&0 regex/1024 gov\\.noaa\\.nodc:[0-9]+ https://www.isotc211.org/2005/gmd-noaa
-
-# ISOTC211 PANGAEA Variant Geographic MetaData (GMD) Extensible Markup Language
->>&0 regex/1024 pangaea\\.dataset[0-9][0-9][0-9][0-9][0-9][0-9]+ https://www.isotc211.org/2005/gmd-pangaea
-!:mime text/xml
-
-
-# Object Reuse and Exchange Vocabulary
-0 string \<?xml\ version=
->&0 search/1024 rdf
->>&0 search/1024 openarchives https://www.openarchives.org/ore/terms
-!:mime application/rdf+xml
-
-
-# Dryad Metadata Application Profile Version 3.1
-0 string <DryadData
->&0 regex/1024 dryad-bibo/v[0-9]\\.[0-9] https://datadryad.org/profile/v3.1
-!:mime text/xml
diff --git a/contrib/libs/libmagic/magic/Magdir/dbpf b/contrib/libs/libmagic/magic/Magdir/dbpf
deleted file mode 100644
index df07ff809a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dbpf
+++ /dev/null
@@ -1,15 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dbpf,v 1.3 2019/04/19 00:42:27 christos Exp $
-# dppf: Maxis Database Packed Files, the stored data file format used by all
-# Maxis games after the Sims: http://wiki.niotso.org/DBPF
-# https://www.wiki.sc4devotion.com/index.php?title=DBPF
-# 13 Oct 2017, Kip Warner <kip at thevertigo dot com>
-0 string DBPF Maxis Database Packed File
->4 ulelong x \b, version: %u.
->>8 ulelong x \b%u
->>>36 ulelong x \b, files: %u
->>24 ledate !0 \b, created: %s
->>28 ledate !0 \b, modified: %s
-!:ext dbpf/package/dat/sc4
-!:mime application/x-maxis-dbpf
diff --git a/contrib/libs/libmagic/magic/Magdir/der b/contrib/libs/libmagic/magic/Magdir/der
deleted file mode 100644
index 3bc2e38aa9..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/der
+++ /dev/null
@@ -1,146 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: der,v 1.6 2023/01/11 23:59:49 christos Exp $
-# der: file(1) magic for DER encoded files
-#
-
-# Certificate information piece
-0 name certinfo
->0 der seq
->>&0 der set
->>>&0 der seq
->>>>&0 der obj_id3=550406
->>>>&0 der prt_str=x \b, countryName=%s
->>&0 der set
->>>&0 der seq
->>>>&0 der obj_id3=550408
->>>>&0 der utf8_str=x \b, stateOrProvinceName=%s
->>&0 der set
->>>&0 der seq
->>>>&0 der obj_id3=55040a
->>>>&0 der utf8_str=x \b, organizationName=%s
->>&0 der set
->>>&0 der seq
->>>>&0 der obj_id3=550403
->>>>&0 der utf8_str=x \b, commonName=%s
->>&0 der seq
-
-# Certificate requests
-0 der seq
->&0 der seq
->>&0 der int1=00 DER Encoded Certificate request
->>&0 use certinfo
-
-# Key Pairs
-0 der seq
->&0 der int1=00
->&0 der int65=x
->&0 der int3=010001 DER Encoded Key Pair, 512 bits
-
-0 der seq
->&0 der int1=00
->&0 der int129=x
->&0 der int3=010001 DER Encoded Key Pair, 1024 bits
-
-0 der seq
->&0 der int1=00
->&0 der int257=x
->&0 der int3=010001 DER Encoded Key Pair, 2048 bits
-
-0 der seq
->&0 der int1=00
->&0 der int513=x
->&0 der int3=010001 DER Encoded Key Pair, 4096 bits
-
-0 der seq
->&0 der int1=00
->&0 der int1025=x
->&0 der int3=010001 DER Encoded Key Pair, 8192 bits
-
-0 der seq
->&0 der int1=00
->&0 der int2049=x
->&0 der int3=010001 DER Encoded Key Pair, 16k bits
-
-0 der seq
->&0 der int1=00
->&0 der int4097=x
->&0 der int3=010001 DER Encoded Key Pair, 32k bits
-
-# Certificates
-0 der seq
->&0 der seq
->>&0 der int2=0dfa DER Encoded Certificate, 512 bits
->>&0 der int2=0dfb DER Encoded Certificate, 1024 bits
->>&0 der int2=0dfc DER Encoded Certificate, 2048 bits
->>&0 der int2=0dfd DER Encoded Certificate, 4096 bits
->>&0 der int2=0dfe DER Encoded Certificate, 8192 bits
->>&0 der int2=0dff DER Encoded Certificate, 16k bits
->>&0 der int2=0e04 DER Encoded Certificate, 32k bits
->>&0 der int2=x DER Encoded Certificate, ? bits (%s)
->>&0 der seq
->>>&0 der obj_id9=2a864886f70d010105 \b, sha1WithRSAEncryption
->>>&0 der obj_id9=x \b, ? Encryption (%s)
->>>&0 der null
->>&0 der seq
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=550406
->>>>>&0 der prt_str=x \b, countryName=%s
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=550408
->>>>>&0 der prt_str=x \b, stateOrProvinceName=%s
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=550407
->>>>>&0 der prt_str=x \b, localityName=%s
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=55040a
->>>>>&0 der prt_str=x \b, organizationName=%s
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=55040b
->>>>>&0 der prt_str=x \b, organizationUnitName=%s
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=550403
->>>>>&0 der prt_str=x \b, commonName=%s
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id9=2a864886f70d010901
->>>>>&0 der ia5_str=x \b, emailAddress=%s
-#>>&0 der seq
-#>>>&0 der utc_time=x \b, utcTime=%s
-#>>>&0 der utc_time=x \b, utcTime=%s
->>&0 use certinfo
-
-0 der seq
->&0 der seq
->>&0 der eoc
->>>&0 der int1=02 Certificate, Version=3
->>>&0 der int1=x Certificate, Version=%s
->>&0 der int9=x \b, Serial=%s
->>&0 der seq
->>>&0 der obj_id9=2a864886f70d01010b
->>>&0 der null
->>&0 der seq
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=550403
->>>>>&0 der utf8_str=x \b, Issuer=%s
-#>>&0 der seq
-#>>>&0 der utc_time=x \b, not-valid-before=%s
-#>>>&0 der utc_time=x \b, not-valid-after=%s
->>&0 der seq
->>>&0 der set
->>>>&0 der seq
->>>>>&0 der obj_id3=550403
->>>>>&0 der utf8_str=x \b, Subject=%s
-
-# PKCS#7 Signed Data (e.g. JAR Signature Block File)
-# OID 1.2.840.113549.1.7.2 (2a864886f70d010702)
-# Reference: https://www.rfc-editor.org/rfc/rfc2315
-0 der seq
->&0 der obj_id9=2a864886f70d010702 DER Encoded PKCS#7 Signed Data
-!:ext RSA/DSA/EC
diff --git a/contrib/libs/libmagic/magic/Magdir/diamond b/contrib/libs/libmagic/magic/Magdir/diamond
deleted file mode 100644
index 39d1ed6258..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/diamond
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: diamond,v 1.7 2009/09/19 16:28:08 christos Exp $
-# diamond: file(1) magic for Diamond system
-#
-# ... diamond is a multi-media mail and electronic conferencing system....
-#
-# XXX - I think it was either renamed Slate, or replaced by Slate....
-#
-# The full deal is too long...
-#0 string <list>\n<protocol\ bbn-multimedia-format> Diamond Multimedia Document
-0 string =<list>\n<protocol\ bbn-m Diamond Multimedia Document
diff --git a/contrib/libs/libmagic/magic/Magdir/dif b/contrib/libs/libmagic/magic/Magdir/dif
deleted file mode 100644
index 9d7e5fd25b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dif
+++ /dev/null
@@ -1,33 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dif,v 1.1 2020/04/09 19:14:01 christos Exp $
-# dif: file(1) magic for DIF text files
-
-#------------------------------------------------------------------------------
-# From: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/Data_Interchange_Format
-# http://fileformats.archiveteam.org/wiki/Data_Interchange_Format
-# Note: called by TrID "Data Interchange Format",
-# by DROID x-fmt/368 "VisiCalc Database"
-0 string TABLE
-# skip text starting with TABLE by looking for numeric version on 2nd line
->6 search/2 0,
-# skip DROID x-fmt-41-signature-id-380.dif by looking for key word TUPLES at the beginning
->>27 search/128 TUPLES Data Interchange Format
-# https://www.pcmatic.com/company/libraries/fileextension/detail.asp?ext=dif.html
-#!:mime application/x-dif-spreadsheet Gnumeric
-# https://github.com/LibreOffice/online/blob/master/discovery.xml
-#!:mime application/x-dif-document LibreOffice
-# https://www.wikidata.org/wiki/Wikidata:WikiProject_Informatics/File_formats/Lists/File_formats
-!:mime application/x-dif
-# https://extension.nirsoft.net/dif
-#!:mime application/vnd.ms-excel
-#!:mime text/plain
-!:ext dif
-# look for double quote 0x22 on 3rd line
->>>10 search/3 "
-# skip if next character also double quote
->>>>&0 ubyte !0x22 \b, generator or table name
-# comment like EXCEL, pwm enclosed in double quotes
->>>>>&-2 string x %s
-
diff --git a/contrib/libs/libmagic/magic/Magdir/diff b/contrib/libs/libmagic/magic/Magdir/diff
deleted file mode 100644
index a6124e3f70..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/diff
+++ /dev/null
@@ -1,41 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: diff,v 1.17 2020/08/22 18:16:58 christos Exp $
-# diff: file(1) magic for diff(1) output
-#
-0 search/1 diff\040 diff output text
-!:mime text/x-diff
-0 search/1 ***\040
->&0 search/1024 \n---\040 context diff output text
-!:mime text/x-diff
-0 search/1 Only\040in\040 diff output text
-!:mime text/x-diff
-0 search/1 Common\040subdirectories:\040 diff output text
-!:mime text/x-diff
-
-0 search/1 Index: RCS/CVS diff output text
-!:mime text/x-diff
-
-# bsdiff: file(1) magic for bsdiff(1) output
-0 string/b BSDIFF40 bsdiff(1) patch file
-
-
-# unified diff
-0 search/4096 ---\040
->&0 search/1024 \n
->>&0 search/1 +++\040
->>>&0 search/1024 \n
->>>>&0 search/1 @@ unified diff output text
-!:mime text/x-diff
-!:strength + 90
-
-# librsync -- the library for network deltas
-#
-# Copyright (C) 2001 by Martin Pool. You may do whatever you want with
-# this file.
-#
-0 belong 0x72730236 rdiff network-delta data
-
-0 belong 0x72730136 rdiff network-delta signature data
->4 belong x (block length=%d,
->8 belong x signature strength=%d)
diff --git a/contrib/libs/libmagic/magic/Magdir/digital b/contrib/libs/libmagic/magic/Magdir/digital
deleted file mode 100644
index b2753b9898..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/digital
+++ /dev/null
@@ -1,59 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: digital,v 1.12 2021/07/03 14:01:46 christos Exp $
-# Digital UNIX - Info
-#
-0 string =!<arch>\n________64E Alpha archive
->22 string X -- out of date
-#
-
-0 leshort 0603
->24 leshort 0410 COFF format alpha pure
->24 leshort 0413 COFF format alpha demand paged
->>22 leshort&030000 !020000 executable
->>22 leshort&020000 !0 dynamically linked
->>16 lelong !0 not stripped
->>16 lelong 0 stripped
->>27 byte x - version %d
->>26 byte x \b.%d
->>28 byte x \b-%d
->24 leshort 0407 COFF format alpha object
->>22 leshort&030000 020000 shared library
->>27 byte x - version %d
->>26 byte x \b.%d
->>28 byte x \b-%d
-
-# Basic recognition of Digital UNIX core dumps - Mike Bremford <mike@opac.bl.uk>
-#
-# The actual magic number is just "Core", followed by a 2-byte version
-# number; however, treating any file that begins with "Core" as a Digital
-# UNIX core dump file may produce too many false hits, so we include one
-# byte of the version number as well; DU 5.0 appears only to be up to
-# version 2.
-#
-0 string Core\001 Alpha COFF format core dump (Digital UNIX)
->24 string >\0 \b, from '%s'
-0 string Core\002 Alpha COFF format core dump (Digital UNIX)
->24 string >\0 \b, from '%s'
-#
-# The next is incomplete, we could tell more about this format,
-# but its not worth it.
-0 leshort 0x188 Alpha compressed COFF
-0 leshort 0x18f Alpha u-code object
-#
-#
-# Some other interesting Digital formats,
-0 string \377\377\177 ddis/ddif
-0 string \377\377\174 ddis/dots archive
-0 string \377\377\176 ddis/dtif table data
-0 string \033c\033 LN03 output
-0 long 04553207 X image
-#
-0 string =!<PDF>!\n profiling data file
-#
-# Locale data tables (MIPS and Alpha).
-#
-# GRR: line below is too general as it matches also TTComp archive, ASCII, 2K handled by ./archive
-0 short 0x0501 locale data table
->6 short 0x24 for MIPS
->6 short 0x40 for Alpha
diff --git a/contrib/libs/libmagic/magic/Magdir/dolby b/contrib/libs/libmagic/magic/Magdir/dolby
deleted file mode 100644
index d73e7d35f9..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dolby
+++ /dev/null
@@ -1,69 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dolby,v 1.9 2019/04/19 00:42:27 christos Exp $
-# ATSC A/53 aka AC-3 aka Dolby Digital <ashitaka@gmx.at>
-# from https://www.atsc.org/standards/a_52a.pdf
-# corrections, additions, etc. are always welcome!
-#
-# syncword
-0 beshort 0x0b77 ATSC A/52 aka AC-3 aka Dolby Digital stream,
-# Proposed audio/ac3 RFC/4184
-!:mime audio/vnd.dolby.dd-raw
-# fscod
->4 byte&0xc0 = 0x00 48 kHz,
->4 byte&0xc0 = 0x40 44.1 kHz,
->4 byte&0xc0 = 0x80 32 kHz,
-# is this one used for 96 kHz?
->4 byte&0xc0 = 0xc0 reserved frequency,
-#
->5 byte&0x07 = 0x00 \b, complete main (CM)
->5 byte&0x07 = 0x01 \b, music and effects (ME)
->5 byte&0x07 = 0x02 \b, visually impaired (VI)
->5 byte&0x07 = 0x03 \b, hearing impaired (HI)
->5 byte&0x07 = 0x04 \b, dialogue (D)
->5 byte&0x07 = 0x05 \b, commentary (C)
->5 byte&0x07 = 0x06 \b, emergency (E)
->5 beshort&0x07e0 0x0720 \b, voiceover (VO)
->5 beshort&0x07e0 >0x0720 \b, karaoke
-# acmod
->6 byte&0xe0 = 0x00 1+1 front,
->>6 byte&0x10 = 0x10 LFE on,
->6 byte&0xe0 = 0x20 1 front/0 rear,
->>6 byte&0x10 = 0x10 LFE on,
->6 byte&0xe0 = 0x40 2 front/0 rear,
-# dsurmod (for stereo only)
->>6 byte&0x18 = 0x00 Dolby Surround not indicated
->>6 byte&0x18 = 0x08 not Dolby Surround encoded
->>6 byte&0x18 = 0x10 Dolby Surround encoded
->>6 byte&0x18 = 0x18 reserved Dolby Surround mode
->>6 byte&0x04 = 0x04 LFE on,
->6 byte&0xe0 = 0x60 3 front/0 rear,
->>6 byte&0x04 = 0x04 LFE on,
->6 byte&0xe0 = 0x80 2 front/1 rear,
->>6 byte&0x04 = 0x04 LFE on,
->6 byte&0xe0 = 0xa0 3 front/1 rear,
->>6 byte&0x01 = 0x01 LFE on,
->6 byte&0xe0 = 0xc0 2 front/2 rear,
->>6 byte&0x04 = 0x04 LFE on,
->6 byte&0xe0 = 0xe0 3 front/2 rear,
->>6 byte&0x01 = 0x01 LFE on,
-#
->4 byte&0x3e = 0x00 \b, 32 kbit/s
->4 byte&0x3e = 0x02 \b, 40 kbit/s
->4 byte&0x3e = 0x04 \b, 48 kbit/s
->4 byte&0x3e = 0x06 \b, 56 kbit/s
->4 byte&0x3e = 0x08 \b, 64 kbit/s
->4 byte&0x3e = 0x0a \b, 80 kbit/s
->4 byte&0x3e = 0x0c \b, 96 kbit/s
->4 byte&0x3e = 0x0e \b, 112 kbit/s
->4 byte&0x3e = 0x10 \b, 128 kbit/s
->4 byte&0x3e = 0x12 \b, 160 kbit/s
->4 byte&0x3e = 0x14 \b, 192 kbit/s
->4 byte&0x3e = 0x16 \b, 224 kbit/s
->4 byte&0x3e = 0x18 \b, 256 kbit/s
->4 byte&0x3e = 0x1a \b, 320 kbit/s
->4 byte&0x3e = 0x1c \b, 384 kbit/s
->4 byte&0x3e = 0x1e \b, 448 kbit/s
->4 byte&0x3e = 0x20 \b, 512 kbit/s
->4 byte&0x3e = 0x22 \b, 576 kbit/s
->4 byte&0x3e = 0x24 \b, 640 kbit/s
diff --git a/contrib/libs/libmagic/magic/Magdir/dump b/contrib/libs/libmagic/magic/Magdir/dump
deleted file mode 100644
index cc5644d3e1..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dump
+++ /dev/null
@@ -1,96 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dump,v 1.17 2018/06/26 01:07:17 christos Exp $
-# dump: file(1) magic for dump file format--for new and old dump filesystems
-#
-# We specify both byte orders in order to recognize byte-swapped dumps.
-#
-0 name new-dump-be
->4 bedate x This dump %s,
->8 bedate x Previous dump %s,
->12 belong >0 Volume %d,
->692 belong 0 Level zero, type:
->692 belong >0 Level %d, type:
->0 belong 1 tape header,
->0 belong 2 beginning of file record,
->0 belong 3 map of inodes on tape,
->0 belong 4 continuation of file record,
->0 belong 5 end of volume,
->0 belong 6 map of inodes deleted,
->0 belong 7 end of medium (for floppy),
->676 string >\0 Label %s,
->696 string >\0 Filesystem %s,
->760 string >\0 Device %s,
->824 string >\0 Host %s,
->888 belong >0 Flags %x
-
-0 name old-dump-be
-#>4 bedate x This dump %s,
-#>8 bedate x Previous dump %s,
->12 belong >0 Volume %d,
->692 belong 0 Level zero, type:
->692 belong >0 Level %d, type:
->0 belong 1 tape header,
->0 belong 2 beginning of file record,
->0 belong 3 map of inodes on tape,
->0 belong 4 continuation of file record,
->0 belong 5 end of volume,
->0 belong 6 map of inodes deleted,
->0 belong 7 end of medium (for floppy),
->676 string >\0 Label %s,
->696 string >\0 Filesystem %s,
->760 string >\0 Device %s,
->824 string >\0 Host %s,
->888 belong >0 Flags %x
-
-0 name ufs2-dump-be
->896 beqdate x This dump %s,
->904 beqdate x Previous dump %s,
->12 belong >0 Volume %d,
->692 belong 0 Level zero, type:
->692 belong >0 Level %d, type:
->0 belong 1 tape header,
->0 belong 2 beginning of file record,
->0 belong 3 map of inodes on tape,
->0 belong 4 continuation of file record,
->0 belong 5 end of volume,
->0 belong 6 map of inodes deleted,
->0 belong 7 end of medium (for floppy),
->676 string >\0 Label %s,
->696 string >\0 Filesystem %s,
->760 string >\0 Device %s,
->824 string >\0 Host %s,
->888 belong >0 Flags %x
-
-24 belong 60012 new-fs dump file (big endian),
->0 use new-dump-be
-
-24 belong 60011 old-fs dump file (big endian),
->0 use old-dump-be
-
-24 lelong 60012 new-fs dump file (little endian),
-# to correctly recognize '*.mo' GNU message catalog (little endian)
-!:strength - 15
->0 use \^new-dump-be
-
-24 lelong 60011 old-fs dump file (little endian),
->0 use \^old-dump-be
-
-
-24 belong 0x19540119 new-fs dump file (ufs2, big endian),
->0 use ufs2-dump-be
-
-24 lelong 0x19540119 new-fs dump file (ufs2, little endian),
->0 use \^ufs2-dump-be
-
-18 leshort 60011 old-fs dump file (16-bit, assuming PDP-11 endianness),
->2 medate x Previous dump %s,
->6 medate x This dump %s,
->10 leshort >0 Volume %d,
->0 leshort 1 tape header.
->0 leshort 2 beginning of file record.
->0 leshort 3 map of inodes on tape.
->0 leshort 4 continuation of file record.
->0 leshort 5 end of volume.
->0 leshort 6 map of inodes deleted.
->0 leshort 7 end of medium (for floppy).
diff --git a/contrib/libs/libmagic/magic/Magdir/dwarfs b/contrib/libs/libmagic/magic/Magdir/dwarfs
deleted file mode 100644
index 3700a33c5d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dwarfs
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dwarfs,v 1.2 2023/05/23 13:37:32 christos Exp $
-# dwarfs: file(1) magic for DwarFS File System Image files
-# URL: https://github.com/mhx/dwarfs for details about DwarFS
-# From: Marcus Holland-Moritz <github@mhxnet.de>
-
-#### DwarFS Version Macro
-0 name dwarfsversion
->&0 byte x \b, version %d
->&1 byte x \b.%d
-
-#### DwarFS Compression Macro
-0 name dwarfscompression
->&0 leshort =0 \b, uncompressed
->&0 leshort =1 \b, LZMA compression
->&0 leshort =2 \b, ZSTD compression
->&0 leshort =3 \b, LZ4 compression
->&0 leshort =4 \b, LZ4HC compression
->&0 leshort =5 \b, BROTLI compression
-
-#### DwarFS files without header
-## We first check against a DWARFS magic at the start of the file, then
-## validate by checking the block count / section type to be all zeros
-## for the first block. Finally, we check that the *next* block also
-## has the correct DWARFS magic.
-0 string DWARFS
->&0x2A string/b \0\0\0\0\0\0
->>&(&0x02.q+0x0A) string DWARFS DwarFS File System Image
->>>&0 use dwarfsversion
->>&0 use dwarfscompression
-
-#### DwarFS files with header
-## We search for a DWARFS magic in the first 64k of the file (images with
-## headers longer than 64k won't be recognized), then validate by checking
-## the block count / section type to be all zeros for the first block.
-## Finally, we check that the *next* block also has the correct DWARFS magic.
-## If we find a DWARFS magic that doesn't pass validation, we continue with
-## an indirect match recursively.
-1 search/65536/b DWARFS
->&0x2A string/b \0\0\0\0\0\0
->>&(&0x02.q+0x0A) string DWARFS DwarFS File System Image (with header)
->>>&0 use dwarfsversion
->>&0 use dwarfscompression
->&-1 indirect x
diff --git a/contrib/libs/libmagic/magic/Magdir/dyadic b/contrib/libs/libmagic/magic/Magdir/dyadic
deleted file mode 100644
index c57f81b7cb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/dyadic
+++ /dev/null
@@ -1,61 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: dyadic,v 1.9 2019/04/19 00:42:27 christos Exp $
-# Dyadic: file(1) magic for Dyalog APL.
-#
-# updated by Joerg Jenderek at Oct 2013
-# https://en.wikipedia.org/wiki/Dyalog_APL
-# https://www.dyalog.com/
-# .DXV Dyalog APL External Variable
-# .DIN Dyalog APL Input Table
-# .DOT Dyalog APL Output Table
-# .DFT Dyalog APL Format File
-0 ubeshort&0xFF60 0xaa00
-# skip biblio.dbt
->1 byte !4
-# real Dyalog APL have non zero version numbers like 7.3 or 13.4
->>2 ubeshort >0x0000 Dyalog APL
->>>1 byte 0x00 aplcore
-#>>>1 byte 0x00 incomplete workspace
-# *.DCF Dyalog APL Component File
->>>1 byte 0x01 component file 32-bit non-journaled non-checksummed
-#>>>1 byte 0x01 component file
->>>1 byte 0x02 external variable exclusive
-#>>>1 byte 0x02 external variable
-# *.DWS Dyalog APL Workspace
->>>1 byte 0x03 workspace
->>>>7 byte&0x28 0x00 32-bit
->>>>7 byte&0x28 0x20 64-bit
->>>>7 byte&0x0c 0x00 classic
->>>>7 byte&0x0c 0x04 unicode
->>>>7 byte&0x88 0x00 big-endian
->>>>7 byte&0x88 0x80 little-endian
->>>1 byte 0x06 external variable shared
-# *.DSE Dyalog APL Session , *.DLF Dyalog APL Session Log File
->>>1 byte 0x07 session
->>>1 byte 0x08 mapped file 32-bit
->>>1 byte 0x09 component file 64-bit non-journaled non-checksummed
->>>1 byte 0x0a mapped file 64-bit
->>>1 byte 0x0b component file 32-bit level 1 journaled non-checksummed
->>>1 byte 0x0c component file 64-bit level 1 journaled non-checksummed
->>>1 byte 0x0d component file 32-bit level 1 journaled checksummed
->>>1 byte 0x0e component file 64-bit level 1 journaled checksummed
->>>1 byte 0x0f component file 32-bit level 2 journaled checksummed
->>>1 byte 0x10 component file 64-bit level 2 journaled checksummed
->>>1 byte 0x11 component file 32-bit level 3 journaled checksummed
->>>1 byte 0x12 component file 64-bit level 3 journaled checksummed
->>>1 byte 0x13 component file 32-bit non-journaled checksummed
->>>1 byte 0x14 component file 64-bit non-journaled checksummed
->>>1 byte 0x15 component file under construction
->>>1 byte 0x16 DFS component file 64-bit level 1 journaled checksummed
->>>1 byte 0x17 DFS component file 64-bit level 2 journaled checksummed
->>>1 byte 0x18 DFS component file 64-bit level 3 journaled checksummed
->>>1 byte 0x19 external workspace
->>>1 byte 0x80 DDB
->>>2 byte x version %d
->>>3 byte x \b.%d
-#>>>2 byte x type %d
-#>>>3 byte x subtype %d
-
-# *.DXF Dyalog APL Transfer File
-0 short 0x6060 Dyalog APL transfer
diff --git a/contrib/libs/libmagic/magic/Magdir/ebml b/contrib/libs/libmagic/magic/Magdir/ebml
deleted file mode 100644
index d37b5c0b23..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ebml
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ebml,v 1.2 2019/04/19 00:42:27 christos Exp $
-# ebml: file(1) magic for various Extensible Binary Meta Language
-# https://www.matroska.org/technical/specs/index.html#track
-0 belong 0x1a45dfa3 EBML file
->4 search/b/100 \102\202
->>&1 string x \b, creator %.8s
diff --git a/contrib/libs/libmagic/magic/Magdir/edid b/contrib/libs/libmagic/magic/Magdir/edid
deleted file mode 100644
index a17b6c4ea7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/edid
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: edid,v 1.1 2019/03/28 12:36:01 christos Exp $
-# edid: file(1) magic for EDID dump files
-
-0 quad 0x00ffffffffffff00 Extended display identification data dump
-!:mime application/x-edid-dump
->18 byte 0x01 Version 1
->>19 byte <0x04 \b.%d
->18 byte 0x02 Version 2
->>19 byte 0x00 \b.0
diff --git a/contrib/libs/libmagic/magic/Magdir/editors b/contrib/libs/libmagic/magic/Magdir/editors
deleted file mode 100644
index 48eaa116e3..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/editors
+++ /dev/null
@@ -1,43 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: editors,v 1.12 2020/10/11 20:28:07 christos Exp $
-# T602 editor documents
-# by David Necas <yeti@physics.muni.cz>
-0 string @CT\ T602 document data,
->4 string 0 Kamenicky
->4 string 1 CP 852
->4 string 2 KOI8-CS
->4 string >2 unknown encoding
-
-# Vi IMproved Encrypted file
-# by David Necas <yeti@physics.muni.cz>
-# updated by Osman Surkatty
-0 string VimCrypt~ Vim encrypted file data
->9 string 01! with zip cryptmethod
->9 string 02! with blowfish cryptmethod
->9 string 03! with blowfish2 cryptmethod
-
-0 name vimnanoswap
->67 byte 0
->>107 byte 0
-#>>>2 string x %s swap file
->>>24 ulelong x \b, pid %d
->>>28 string >\0 \b, user %s
->>>68 string >\0 \b, host %s
->>>108 string >\0 \b, file %s
->>>1007 byte 0x55 \b, modified
-
-# Vi IMproved Swap file
-# by Sven Wegener <swegener@gentoo.org>
-0 string b0VIM\ Vim swap file
->&0 string >\0 \b, version %s
->0 use vimnanoswap
-
-
-# Lock/swap file for several editors, at least
-# Vi IMproved and nano
-0 string b0nano Nano swap file
->0 use vimnanoswap
-
-# kate (K Advanced Text Editor)
-0 string \x00\x00\x00\x12Kate\ Swap\ File\ 2.0\x00 Kate swap file
diff --git a/contrib/libs/libmagic/magic/Magdir/efi b/contrib/libs/libmagic/magic/Magdir/efi
deleted file mode 100644
index 7760100b19..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/efi
+++ /dev/null
@@ -1,15 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: efi,v 1.5 2014/04/30 21:41:02 christos Exp $
-# efi: file(1) magic for Universal EFI binaries
-
-0 lelong 0x0ef1fab9
->4 lelong 1 Universal EFI binary with 1 architecture
->>&0 lelong 7 \b, i386
->>&0 lelong 0x01000007 \b, x86_64
->4 lelong 2 Universal EFI binary with 2 architectures
->>&0 lelong 7 \b, i386
->>&0 lelong 0x01000007 \b, x86_64
->>&20 lelong 7 \b, i386
->>&20 lelong 0x01000007 \b, x86_64
->4 lelong >2 Universal EFI binary with %d architectures
diff --git a/contrib/libs/libmagic/magic/Magdir/elf b/contrib/libs/libmagic/magic/Magdir/elf
deleted file mode 100644
index d3ec0260af..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/elf
+++ /dev/null
@@ -1,379 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: elf,v 1.88 2023/01/08 17:09:18 christos Exp $
-# elf: file(1) magic for ELF executables
-#
-# We have to check the byte order flag to see what byte order all the
-# other stuff in the header is in.
-#
-# What're the correct byte orders for the nCUBE and the Fujitsu VPP500?
-#
-# https://www.sco.com/developers/gabi/latest/ch4.eheader.html
-#
-# Created by: unknown
-# Modified by (1): Daniel Quinlan <quinlan@yggdrasil.com>
-# Modified by (2): Peter Tobias <tobias@server.et-inf.fho-emden.de> (core support)
-# Modified by (3): Christian 'Dr. Disk' Hechelmann <drdisk@ds9.au.s.shuttle.de> (fix of core support)
-# Modified by (4): <gerardo.cacciari@gmail.com> (VMS Itanium)
-# Modified by (5): Matthias Urlichs <smurf@debian.org> (Listing of many architectures)
-
-0 name elf-mips
->0 lelong&0xf0000000 0x00000000 MIPS-I
->0 lelong&0xf0000000 0x10000000 MIPS-II
->0 lelong&0xf0000000 0x20000000 MIPS-III
->0 lelong&0xf0000000 0x30000000 MIPS-IV
->0 lelong&0xf0000000 0x40000000 MIPS-V
->0 lelong&0xf0000000 0x50000000 MIPS32
->0 lelong&0xf0000000 0x60000000 MIPS64
->0 lelong&0xf0000000 0x70000000 MIPS32 rel2
->0 lelong&0xf0000000 0x80000000 MIPS64 rel2
->0 lelong&0xf0000000 0x90000000 MIPS32 rel6
->0 lelong&0xf0000000 0xa0000000 MIPS64 rel6
-
-0 name elf-sparc
->0 lelong&0x00ffff00 0x00000100 V8+ Required,
->0 lelong&0x00ffff00 0x00000200 Sun UltraSPARC1 Extensions Required,
->0 lelong&0x00ffff00 0x00000400 HaL R1 Extensions Required,
->0 lelong&0x00ffff00 0x00000800 Sun UltraSPARC3 Extensions Required,
->0 lelong&0x3 0 total store ordering,
->0 lelong&0x3 1 partial store ordering,
->0 lelong&0x3 2 relaxed memory ordering,
-
-0 name elf-pa-risc
->2 leshort 0x020b 1.0
->2 leshort 0x0210 1.1
->2 leshort 0x0214 2.0
->0 leshort &0x0008 (LP64)
-
-0 name elf-riscv
->0 lelong&0x00000001 0x00000001 RVC,
->0 lelong&0x00000008 0x00000008 RVE,
->0 lelong&0x00000006 0x00000000 soft-float ABI,
->0 lelong&0x00000006 0x00000002 single-float ABI,
->0 lelong&0x00000006 0x00000004 double-float ABI,
->0 lelong&0x00000006 0x00000006 quad-float ABI,
-
-0 name elf-le
->16 leshort 0 no file type,
-!:mime application/octet-stream
->16 leshort 1 relocatable,
-!:mime application/x-object
->16 leshort 2 executable,
-!:mime application/x-executable
->16 leshort 3 ${x?pie executable:shared object},
-
-!:mime application/x-${x?pie-executable:sharedlib}
->16 leshort 4 core file,
-!:mime application/x-coredump
-# OS-specific
->7 byte 202
->>16 leshort 0xFE01 executable,
-!:mime application/x-executable
-# Core file detection is not reliable.
-#>>>(0x38+0xcc) string >\0 of '%s'
-#>>>(0x38+0x10) lelong >0 (signal %d),
->16 leshort &0xff00
->>18 leshort !8 processor-specific,
->>18 leshort 8
->>>16 leshort 0xFF80 PlayStation 2 IOP module,
-!:mime application/x-sharedlib
->>>16 leshort !0xFF80 processor-specific,
->18 clear x
->18 leshort 0 no machine,
->18 leshort 1 AT&T WE32100,
->18 leshort 2 SPARC,
->18 leshort 3 Intel 80386,
->18 leshort 4 Motorola m68k,
->>4 byte 1
->>>36 lelong &0x01000000 68000,
->>>36 lelong &0x00810000 CPU32,
->>>36 lelong 0 68020,
->18 leshort 5 Motorola m88k,
->18 leshort 6 Intel 80486,
->18 leshort 7 Intel 80860,
-# The official e_machine number for MIPS is now #8, regardless of endianness.
-# The second number (#10) will be deprecated later. For now, we still
-# say something if #10 is encountered, but only gory details for #8.
->18 leshort 8 MIPS,
->>4 byte 1
->>>36 lelong &0x20 N32
->18 leshort 10 MIPS,
->>4 byte 1
->>>36 lelong &0x20 N32
->18 leshort 8
-# only for 32-bit
->>4 byte 1
->>>36 use elf-mips
-# only for 64-bit
->>4 byte 2
->>>48 use elf-mips
->18 leshort 9 Amdahl,
->18 leshort 10 MIPS (deprecated),
->18 leshort 11 RS6000,
->18 leshort 15 PA-RISC,
-# only for 32-bit
->>4 byte 1
->>>36 use elf-pa-risc
-# only for 64-bit
->>4 byte 2
->>>48 use elf-pa-risc
->18 leshort 16 nCUBE,
->18 leshort 17 Fujitsu VPP500,
->18 leshort 18 SPARC32PLUS,
-# only for 32-bit
->>4 byte 1
->>>36 use elf-sparc
->18 leshort 19 Intel 80960,
->18 leshort 20 PowerPC or cisco 4500,
->18 leshort 21 64-bit PowerPC or cisco 7500,
->>48 lelong 0 Unspecified or Power ELF V1 ABI,
->>48 lelong 1 Power ELF V1 ABI,
->>48 lelong 2 OpenPOWER ELF V2 ABI,
->18 leshort 22 IBM S/390,
->18 leshort 23 Cell SPU,
->18 leshort 24 cisco SVIP,
->18 leshort 25 cisco 7200,
->18 leshort 36 NEC V800 or cisco 12000,
->18 leshort 37 Fujitsu FR20,
->18 leshort 38 TRW RH-32,
->18 leshort 39 Motorola RCE,
->18 leshort 40 ARM,
->>4 byte 1
->>>36 lelong&0xff000000 0x04000000 EABI4
->>>36 lelong&0xff000000 0x05000000 EABI5
->>>36 lelong &0x00800000 BE8
->>>36 lelong &0x00400000 LE8
->18 leshort 41 Alpha,
->18 leshort 42 Renesas SH,
->18 leshort 43 SPARC V9,
->>4 byte 2
->>>48 use elf-sparc
->18 leshort 44 Siemens Tricore Embedded Processor,
->18 leshort 45 Argonaut RISC Core, Argonaut Technologies Inc.,
->18 leshort 46 Renesas H8/300,
->18 leshort 47 Renesas H8/300H,
->18 leshort 48 Renesas H8S,
->18 leshort 49 Renesas H8/500,
->18 leshort 50 IA-64,
->18 leshort 51 Stanford MIPS-X,
->18 leshort 52 Motorola Coldfire,
->18 leshort 53 Motorola M68HC12,
->18 leshort 54 Fujitsu MMA,
->18 leshort 55 Siemens PCP,
->18 leshort 56 Sony nCPU,
->18 leshort 57 Denso NDR1,
->18 leshort 58 Start*Core,
->18 leshort 59 Toyota ME16,
->18 leshort 60 ST100,
->18 leshort 61 Tinyj emb.,
->18 leshort 62 x86-64,
->18 leshort 63 Sony DSP,
->18 leshort 64 DEC PDP-10,
->18 leshort 65 DEC PDP-11,
->18 leshort 66 FX66,
->18 leshort 67 ST9+ 8/16 bit,
->18 leshort 68 ST7 8 bit,
->18 leshort 69 MC68HC16,
->18 leshort 70 MC68HC11,
->18 leshort 71 MC68HC08,
->18 leshort 72 MC68HC05,
->18 leshort 73 SGI SVx or Cray NV1,
->18 leshort 74 ST19 8 bit,
->18 leshort 75 Digital VAX,
->18 leshort 76 Axis cris,
->18 leshort 77 Infineon 32-bit embedded,
->18 leshort 78 Element 14 64-bit DSP,
->18 leshort 79 LSI Logic 16-bit DSP,
->18 leshort 80 MMIX,
->18 leshort 81 Harvard machine-independent,
->18 leshort 82 SiTera Prism,
->18 leshort 83 Atmel AVR 8-bit,
->18 leshort 84 Fujitsu FR30,
->18 leshort 85 Mitsubishi D10V,
->18 leshort 86 Mitsubishi D30V,
->18 leshort 87 NEC v850,
->18 leshort 88 Renesas M32R,
->18 leshort 89 Matsushita MN10300,
->18 leshort 90 Matsushita MN10200,
->18 leshort 91 picoJava,
->18 leshort 92 OpenRISC,
->18 leshort 93 Synopsys ARCompact ARC700 cores,
->18 leshort 94 Tensilica Xtensa,
->18 leshort 95 Alphamosaic VideoCore,
->18 leshort 96 Thompson Multimedia,
->18 leshort 97 NatSemi 32k,
->18 leshort 98 Tenor Network TPC,
->18 leshort 99 Trebia SNP 1000,
->18 leshort 100 STMicroelectronics ST200,
->18 leshort 101 Ubicom IP2022,
->18 leshort 102 MAX Processor,
->18 leshort 103 NatSemi CompactRISC,
->18 leshort 104 Fujitsu F2MC16,
->18 leshort 105 TI msp430,
->18 leshort 106 Analog Devices Blackfin,
->18 leshort 107 S1C33 Family of Seiko Epson,
->18 leshort 108 Sharp embedded,
->18 leshort 109 Arca RISC,
->18 leshort 110 PKU-Unity Ltd.,
->18 leshort 111 eXcess: 16/32/64-bit,
->18 leshort 112 Icera Deep Execution Processor,
->18 leshort 113 Altera Nios II,
->18 leshort 114 NatSemi CRX,
->18 leshort 115 Motorola XGATE,
->18 leshort 116 Infineon C16x/XC16x,
->18 leshort 117 Renesas M16C series,
->18 leshort 118 Microchip dsPIC30F,
->18 leshort 119 Freescale RISC core,
->18 leshort 120 Renesas M32C series,
->18 leshort 131 Altium TSK3000 core,
->18 leshort 132 Freescale RS08,
->18 leshort 134 Cyan Technology eCOG2,
->18 leshort 135 Sunplus S+core7 RISC,
->18 leshort 136 New Japan Radio (NJR) 24-bit DSP,
->18 leshort 137 Broadcom VideoCore III,
->18 leshort 138 LatticeMico32,
->18 leshort 139 Seiko Epson C17 family,
->18 leshort 140 TI TMS320C6000 DSP family,
->18 leshort 141 TI TMS320C2000 DSP family,
->18 leshort 142 TI TMS320C55x DSP family,
->18 leshort 144 TI Programmable Realtime Unit
->18 leshort 160 STMicroelectronics 64bit VLIW DSP,
->18 leshort 161 Cypress M8C,
->18 leshort 162 Renesas R32C series,
->18 leshort 163 NXP TriMedia family,
->18 leshort 164 QUALCOMM DSP6,
->18 leshort 165 Intel 8051 and variants,
->18 leshort 166 STMicroelectronics STxP7x family,
->18 leshort 167 Andes embedded RISC,
->18 leshort 168 Cyan eCOG1X family,
->18 leshort 169 Dallas MAXQ30,
->18 leshort 170 New Japan Radio (NJR) 16-bit DSP,
->18 leshort 171 M2000 Reconfigurable RISC,
->18 leshort 172 Cray NV2 vector architecture,
->18 leshort 173 Renesas RX family,
->18 leshort 174 META,
->18 leshort 175 MCST Elbrus,
->18 leshort 176 Cyan Technology eCOG16 family,
->18 leshort 177 NatSemi CompactRISC,
->18 leshort 178 Freescale Extended Time Processing Unit,
->18 leshort 179 Infineon SLE9X,
->18 leshort 180 Intel L1OM,
->18 leshort 181 Intel K1OM,
->18 leshort 183 ARM aarch64,
->18 leshort 185 Atmel 32-bit family,
->18 leshort 186 STMicroeletronics STM8 8-bit,
->18 leshort 187 Tilera TILE64,
->18 leshort 188 Tilera TILEPro,
->18 leshort 189 Xilinx MicroBlaze 32-bit RISC,
->18 leshort 190 NVIDIA CUDA architecture,
->18 leshort 191 Tilera TILE-Gx,
->18 leshort 195 Synopsys ARCv2/HS3x/HS4x cores,
->18 leshort 197 Renesas RL78 family,
->18 leshort 199 Renesas 78K0R,
->18 leshort 200 Freescale 56800EX,
->18 leshort 201 Beyond BA1,
->18 leshort 202 Beyond BA2,
->18 leshort 203 XMOS xCORE,
->18 leshort 204 Microchip 8-bit PIC(r),
->18 leshort 210 KM211 KM32,
->18 leshort 211 KM211 KMX32,
->18 leshort 212 KM211 KMX16,
->18 leshort 213 KM211 KMX8,
->18 leshort 214 KM211 KVARC,
->18 leshort 215 Paneve CDP,
->18 leshort 216 Cognitive Smart Memory,
->18 leshort 217 iCelero CoolEngine,
->18 leshort 218 Nanoradio Optimized RISC,
->18 leshort 219 CSR Kalimba architecture family
->18 leshort 220 Zilog Z80
->18 leshort 221 Controls and Data Services VISIUMcore processor
->18 leshort 222 FTDI Chip FT32 high performance 32-bit RISC architecture
->18 leshort 223 Moxie processor family
->18 leshort 224 AMD GPU architecture
->18 leshort 243 UCB RISC-V,
-# only for 32-bit
->>4 byte 1
->>>36 use elf-riscv
-# only for 64-bit
->>4 byte 2
->>>48 use elf-riscv
->18 leshort 244 Lanai 32-bit processor,
->18 leshort 245 CEVA Processor Architecture Family,
->18 leshort 246 CEVA X2 Processor Family,
->18 leshort 247 eBPF,
->18 leshort 248 Graphcore Intelligent Processing Unit,
->18 leshort 249 Imagination Technologies,
->18 leshort 250 Netronome Flow Processor,
->18 leshort 251 NEC Vector Engine,
->18 leshort 252 C-SKY processor family,
->18 leshort 253 Synopsys ARCv3 64-bit ISA/HS6x cores,
->18 leshort 254 MOS Technology MCS 6502 processor,
->18 leshort 255 Synopsys ARCv3 32-bit,
->18 leshort 256 Kalray VLIW core of the MPPA family,
->18 leshort 257 WDC 65816/65C816,
->18 leshort 258 LoongArch,
->18 leshort 259 ChipON KungFu32,
->18 leshort 0x1057 AVR (unofficial),
->18 leshort 0x1059 MSP430 (unofficial),
->18 leshort 0x1223 Adapteva Epiphany (unofficial),
->18 leshort 0x2530 Morpho MT (unofficial),
->18 leshort 0x3330 FR30 (unofficial),
->18 leshort 0x3426 OpenRISC (obsolete),
->18 leshort 0x4688 Infineon C166 (unofficial),
->18 leshort 0x5441 Cygnus FRV (unofficial),
->18 leshort 0x5aa5 DLX (unofficial),
->18 leshort 0x7650 Cygnus D10V (unofficial),
->18 leshort 0x7676 Cygnus D30V (unofficial),
->18 leshort 0x8217 Ubicom IP2xxx (unofficial),
->18 leshort 0x8472 OpenRISC (obsolete),
->18 leshort 0x9025 Cygnus PowerPC (unofficial),
->18 leshort 0x9026 Alpha (unofficial),
->18 leshort 0x9041 Cygnus M32R (unofficial),
->18 leshort 0x9080 Cygnus V850 (unofficial),
->18 leshort 0xa390 IBM S/390 (obsolete),
->18 leshort 0xabc7 Old Xtensa (unofficial),
->18 leshort 0xad45 xstormy16 (unofficial),
->18 leshort 0xbaab Old MicroBlaze (unofficial),,
->18 leshort 0xbeef Cygnus MN10300 (unofficial),
->18 leshort 0xdead Cygnus MN10200 (unofficial),
->18 leshort 0xf00d Toshiba MeP (unofficial),
->18 leshort 0xfeb0 Renesas M32C (unofficial),
->18 leshort 0xfeba Vitesse IQ2000 (unofficial),
->18 leshort 0xfebb NIOS (unofficial),
->18 leshort 0xfeed Moxie (unofficial),
->18 default x
->>18 leshort x *unknown arch %#x*
->20 lelong 0 invalid version
->20 lelong 1 version 1
-
-0 string \177ELF ELF
-!:strength *2
->4 byte 0 invalid class
->4 byte 1 32-bit
->4 byte 2 64-bit
->5 byte 0 invalid byte order
->5 byte 1 LSB
->>0 use elf-le
->5 byte 2 MSB
->>0 use \^elf-le
->7 byte 0 (SYSV)
->7 byte 1 (HP-UX)
->7 byte 2 (NetBSD)
->7 byte 3 (GNU/Linux)
->7 byte 4 (GNU/Hurd)
->7 byte 5 (86Open)
->7 byte 6 (Solaris)
->7 byte 7 (Monterey)
->7 byte 8 (IRIX)
->7 byte 9 (FreeBSD)
->7 byte 10 (Tru64)
->7 byte 11 (Novell Modesto)
->7 byte 12 (OpenBSD)
->7 byte 13 (OpenVMS)
->7 byte 14 (HP NonStop Kernel)
->7 byte 15 (AROS Research Operating System)
->7 byte 16 (FenixOS)
->7 byte 17 (Nuxi CloudABI)
->7 byte 97 (ARM)
->7 byte 202 (Cafe OS)
->7 byte 255 (embedded)
diff --git a/contrib/libs/libmagic/magic/Magdir/encore b/contrib/libs/libmagic/magic/Magdir/encore
deleted file mode 100644
index 287b388dba..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/encore
+++ /dev/null
@@ -1,22 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: encore,v 1.7 2014/04/30 21:41:02 christos Exp $
-# encore: file(1) magic for Encore machines
-#
-# XXX - needs to have the byte order specified (NS32K was little-endian,
-# dunno whether they run the 88K in little-endian mode or not).
-#
-0 short 0x154 Encore
->20 short 0x107 executable
->20 short 0x108 pure executable
->20 short 0x10b demand-paged executable
->20 short 0x10f unsupported executable
->12 long >0 not stripped
->22 short >0 - version %d
->22 short 0 -
-#>4 date x stamp %s
-0 short 0x155 Encore unsupported executable
->12 long >0 not stripped
->22 short >0 - version %d
->22 short 0 -
-#>4 date x stamp %s
diff --git a/contrib/libs/libmagic/magic/Magdir/epoc b/contrib/libs/libmagic/magic/Magdir/epoc
deleted file mode 100644
index 6f4ab5fc38..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/epoc
+++ /dev/null
@@ -1,62 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: epoc,v 1.9 2013/12/21 14:28:15 christos Exp $
-# EPOC : file(1) magic for EPOC documents [Psion Series 5/Osaris/Geofox 1]
-# Stefan Praszalowicz <hpicollo@worldnet.fr> and Peter Breitenlohner <peb@mppmu.mpg.de>
-# Useful information for improving this file can be found at:
-# http://software.frodo.looijaard.name/psiconv/formats/Index.html
-#------------------------------------------------------------------------------
-0 lelong 0x10000037 Psion Series 5
->4 lelong 0x10000039 font file
->4 lelong 0x1000003A printer driver
->4 lelong 0x1000003B clipboard
->4 lelong 0x10000042 multi-bitmap image
-!:mime image/x-epoc-mbm
->4 lelong 0x1000006A application information file
->4 lelong 0x1000006D
->>8 lelong 0x1000007D Sketch image
-!:mime image/x-epoc-sketch
->>8 lelong 0x1000007E voice note
->>8 lelong 0x1000007F Word file
-!:mime application/x-epoc-word
->>8 lelong 0x10000085 OPL program (TextEd)
-!:mime application/x-epoc-opl
->>8 lelong 0x10000087 Comms settings
->>8 lelong 0x10000088 Sheet file
-!:mime application/x-epoc-sheet
->>8 lelong 0x100001C4 EasyFax initialisation file
->4 lelong 0x10000073 OPO module
-!:mime application/x-epoc-opo
->4 lelong 0x10000074 OPL application
-!:mime application/x-epoc-app
->4 lelong 0x1000008A exported multi-bitmap image
->4 lelong 0x1000016D
->>8 lelong 0x10000087 Comms names
-
-0 lelong 0x10000041 Psion Series 5 ROM multi-bitmap image
-
-0 lelong 0x10000050 Psion Series 5
->4 lelong 0x1000006D database
->>8 lelong 0x10000084 Agenda file
-!:mime application/x-epoc-agenda
->>8 lelong 0x10000086 Data file
-!:mime application/x-epoc-data
->>8 lelong 0x10000CEA Jotter file
-!:mime application/x-epoc-jotter
->4 lelong 0x100000E4 ini file
-
-0 lelong 0x10000079 Psion Series 5 binary:
->4 lelong 0x00000000 DLL
->4 lelong 0x10000049 comms hardware library
->4 lelong 0x1000004A comms protocol library
->4 lelong 0x1000005D OPX
->4 lelong 0x1000006C application
->4 lelong 0x1000008D DLL
->4 lelong 0x100000AC logical device driver
->4 lelong 0x100000AD physical device driver
->4 lelong 0x100000E5 file transfer protocol
->4 lelong 0x100000E5 file transfer protocol
->4 lelong 0x10000140 printer definition
->4 lelong 0x10000141 printer definition
-
-0 lelong 0x1000007A Psion Series 5 executable
diff --git a/contrib/libs/libmagic/magic/Magdir/erlang b/contrib/libs/libmagic/magic/Magdir/erlang
deleted file mode 100644
index df7aa2aac8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/erlang
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: erlang,v 1.7 2019/04/19 00:42:27 christos Exp $
-# erlang: file(1) magic for Erlang JAM and BEAM files
-# URL: https://www.erlang.org/faq/x779.html#AEN812
-
-# OTP R3-R4
-0 string \0177BEAM! Old Erlang BEAM file
->6 short >0 - version %d
-
-# OTP R5 and onwards
-0 string FOR1
->8 string BEAM Erlang BEAM file
-
-# 4.2 version may have a copyright notice!
-4 string Tue\ Jan\ 22\ 14:32:44\ MET\ 1991 Erlang JAM file - version 4.2
-79 string Tue\ Jan\ 22\ 14:32:44\ MET\ 1991 Erlang JAM file - version 4.2
-
-4 string 1.0\ Fri\ Feb\ 3\ 09:55:56\ MET\ 1995 Erlang JAM file - version 4.3
-
-0 bequad 0x0000000000ABCDEF Erlang DETS file
diff --git a/contrib/libs/libmagic/magic/Magdir/espressif b/contrib/libs/libmagic/magic/Magdir/espressif
deleted file mode 100644
index a97c09301f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/espressif
+++ /dev/null
@@ -1,57 +0,0 @@
-
-# $File: espressif,v 1.3 2021/04/26 15:56:00 christos Exp $
-# configuration dump of Tasmota firmware for ESP8266 based devices by Espressif
-# URL: https://github.com/arendst/Sonoff-Tasmota/
-# Reference: https://codeload.github.com/arendst/Sonoff-Tasmota/zip/release-6.2/
-# Sonoff-Tasmota-release-6.2.zip/Sonoff-Tasmota-release-6.2/sonoff/settings.h
-# From: Joerg Jenderek
-#
-# cfg_holder=4617=0x1209
-0 uleshort 4617
-# remaining settings normally 0x5A+offset XORed; free_1D5[20] empty since 5.12.0e
->0x1D5 ubequad 0x2f30313233343536 configuration of Tasmota firmware (ESP8266)
-!:mime application/x-tasmota-dmp
-!:ext dmp
-# version like 6.2.1.0 ~ 0x06020100 XORed to 0x63666262
->>11 ubyte^0x65 x \b, version %u
->>10 ubyte^0x64 x \b.%u
->>9 ubyte^0x63 x \b.%u
->>8 ubyte^0x62 x \b.%u
-#>8 ubelong x (%#x)
-# hostname[33] XORed
->>0x165 ubyte^0x1BF x \b, hostname %c
->>0x166 ubyte^0x1C0 >037 \b%c
->>0x167 ubyte^0x1C1 >037 \b%c
->>0x168 ubyte^0x1C2 >037 \b%c
->>0x169 ubyte^0x1C3 >037 \b%c
->>0x16A ubyte^0x1C4 >037 \b%c
->>0x16B ubyte^0x1C5 >037 \b%c
->>0x16C ubyte^0x1C6 >037 \b%c
->>0x16D ubyte^0x1C7 >037 \b%c
->>0x16E ubyte^0x1C8 >037 \b%c
->>0x16F ubyte^0x1C9 >037 \b%c
->>0x170 ubyte^0x1CA >037 \b%c
->>0x171 ubyte^0x1CB >037 \b%c
->>0x172 ubyte^0x1CC >037 \b%c
->>0x173 ubyte^0x1CD >037 \b%c
->>0x174 ubyte^0x1CE >037 \b%c
->>0x175 ubyte^0x1CF >037 \b%c
->>0x176 ubyte^0x1D0 >037 \b%c
->>0x177 ubyte^0x1D1 >037 \b%c
->>0x178 ubyte^0x1D2 >037 \b%c
->>0x179 ubyte^0x1D3 >037 \b%c
->>0x17A ubyte^0x1D4 >037 \b%c
->>0x17B ubyte^0x1D5 >037 \b%c
->>0x17C ubyte^0x1D6 >037 \b%c
->>0x17D ubyte^0x1D7 >037 \b%c
->>0x17E ubyte^0x1D8 >037 \b%c
->>0x17F ubyte^0x1D9 >037 \b%c
->>0x180 ubyte^0x1DA >037 \b%c
->>0x181 ubyte^0x1DB >037 \b%c
->>0x182 ubyte^0x1DC >037 \b%c
->>0x183 ubyte^0x1DD >037 \b%c
->>0x184 ubyte^0x1DE >037 \b%c
->>0x185 ubyte^0x1DF >037 \b%c
-#>>0x165 string x (%.33s)
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/esri b/contrib/libs/libmagic/magic/Magdir/esri
deleted file mode 100644
index e49a7ce407..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/esri
+++ /dev/null
@@ -1,28 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: esri,v 1.5 2019/04/19 00:42:27 christos Exp $
-# ESRI Shapefile format (.shp .shx .dbf=DBaseIII)
-# Based on info from
-# <URL:https://www.esri.com/library/whitepapers/pdfs/shapefile.pdf>
-0 belong 9994 ESRI Shapefile
->4 belong =0
->8 belong =0
->12 belong =0
->16 belong =0
->20 belong =0
->28 lelong x version %d
->24 belong x length %d
->32 lelong =0 type Null Shape
->32 lelong =1 type Point
->32 lelong =3 type PolyLine
->32 lelong =5 type Polygon
->32 lelong =8 type MultiPoint
->32 lelong =11 type PointZ
->32 lelong =13 type PolyLineZ
->32 lelong =15 type PolygonZ
->32 lelong =18 type MultiPointZ
->32 lelong =21 type PointM
->32 lelong =23 type PolyLineM
->32 lelong =25 type PolygonM
->32 lelong =28 type MultiPointM
->32 lelong =31 type MultiPatch
diff --git a/contrib/libs/libmagic/magic/Magdir/etf b/contrib/libs/libmagic/magic/Magdir/etf
deleted file mode 100644
index 707d23d3a4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/etf
+++ /dev/null
@@ -1,33 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: etf,v 1.1 2015/04/19 22:40:04 christos Exp $
-# elf: file(1) magic for Erlang External Term Format magic
-# http://erlang.org/doc/apps/erts/erl_ext_dist.html
-# This magic is too weak so it is not enabled by default
-0 byte 131
->1 byte 80
->>2 belong >0 Erlang External Term Format, compressed, original size = %d
->1 byte 70 Erlang External Term Format, starts with NEW_FLOAT_EXT
->1 byte 77 Erlang External Term Format, starts with BIT_BINARY_EXT
->1 byte 97 Erlang External Term Format, starts with SMALL_INTEGER_EXT
->1 byte 98 Erlang External Term Format, starts with INTEGER_EXT
->1 byte 99 Erlang External Term Format, starts with FLOAT_EXT
->1 byte 100 Erlang External Term Format, starts with ATOM_EXT
->1 byte 101 Erlang External Term Format, starts with REFERENCE_EXT
->1 byte 102 Erlang External Term Format, starts with PORT_EXT
->1 byte 103 Erlang External Term Format, starts with PID_EXT
->1 byte 104 Erlang External Term Format, starts with SMALL_TUPLE_EXT
->1 byte 105 Erlang External Term Format, starts with LARGE_TUPLE_EXT
->1 byte 106 Erlang External Term Format, starts with NIL_EXT
->1 byte 107 Erlang External Term Format, starts with STRING_EXT
->1 byte 108 Erlang External Term Format, starts with LIST_EXT
->1 byte 109 Erlang External Term Format, starts with BINARY_EXT
->1 byte 110 Erlang External Term Format, starts with SMALL_BIG_EXT
->1 byte 111 Erlang External Term Format, starts with LARGE_BIG_EXT
->1 byte 112 Erlang External Term Format, starts with NEW_FUN_EXT
->1 byte 113 Erlang External Term Format, starts with EXPORT_EXT
->1 byte 114 Erlang External Term Format, starts with NEW_REFERENCE_EXT
->1 byte 115 Erlang External Term Format, starts with SMALL_ATOM_EXT
->1 byte 116 Erlang External Term Format, starts with MAP_EXT
->1 byte 117 Erlang External Term Format, starts with FUN_EXT
->1 byte 118 Erlang External Term Format, starts with ATOM_UTF8_EXT
->1 byte 119 Erlang External Term Format, starts with SMALL_ATOM_UTF8_EXT
diff --git a/contrib/libs/libmagic/magic/Magdir/fcs b/contrib/libs/libmagic/magic/Magdir/fcs
deleted file mode 100644
index 613437f842..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/fcs
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: fcs,v 1.4 2009/09/19 16:28:09 christos Exp $
-# fcs: file(1) magic for FCS (Flow Cytometry Standard) data files
-# From Roger Leigh <roger@whinlatter.uklinux.net>
-0 string FCS1.0 Flow Cytometry Standard (FCS) data, version 1.0
-0 string FCS2.0 Flow Cytometry Standard (FCS) data, version 2.0
-0 string FCS3.0 Flow Cytometry Standard (FCS) data, version 3.0
-
diff --git a/contrib/libs/libmagic/magic/Magdir/filesystems b/contrib/libs/libmagic/magic/Magdir/filesystems
deleted file mode 100644
index cd72130516..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/filesystems
+++ /dev/null
@@ -1,2694 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: filesystems,v 1.158 2023/05/21 17:19:08 christos Exp $
-# filesystems: file(1) magic for different filesystems
-#
-0 name partid
->0 ubyte 0x00 Unused
->0 ubyte 0x01 12-bit FAT
->0 ubyte 0x02 XENIX /
->0 ubyte 0x03 XENIX /usr
->0 ubyte 0x04 16-bit FAT, less than 32M
->0 ubyte 0x05 extended partition
->0 ubyte 0x06 16-bit FAT, more than 32M
->0 ubyte 0x07 OS/2 HPFS, NTFS, QNX2, Adv. UNIX
->0 ubyte 0x08 AIX or os, or etc.
->0 ubyte 0x09 AIX boot partition or Coherent
->0 ubyte 0x0a O/2 boot manager or Coherent swap
->0 ubyte 0x0b 32-bit FAT
->0 ubyte 0x0c 32-bit FAT, LBA-mapped
->0 ubyte 0x0d 7XXX, LBA-mapped
->0 ubyte 0x0e 16-bit FAT, LBA-mapped
->0 ubyte 0x0f extended partition, LBA-mapped
->0 ubyte 0x10 OPUS
->0 ubyte 0x11 OS/2 DOS 12-bit FAT
->0 ubyte 0x12 Compaq diagnostics
->0 ubyte 0x14 OS/2 DOS 16-bit FAT <32M
->0 ubyte 0x16 OS/2 DOS 16-bit FAT >=32M
->0 ubyte 0x17 OS/2 hidden IFS
->0 ubyte 0x18 AST Windows swapfile
->0 ubyte 0x19 Willowtech Photon coS
->0 ubyte 0x1b hidden win95 fat 32
->0 ubyte 0x1c hidden win95 fat 32 lba
->0 ubyte 0x1d hidden win95 fat 16 lba
->0 ubyte 0x20 Willowsoft OFS1
->0 ubyte 0x21 reserved
->0 ubyte 0x23 reserved
->0 ubyte 0x24 NEC DOS
->0 ubyte 0x26 reserved
->0 ubyte 0x31 reserved
->0 ubyte 0x32 Alien Internet Services NOS
->0 ubyte 0x33 reserved
->0 ubyte 0x34 reserved
->0 ubyte 0x35 JFS on OS2
->0 ubyte 0x36 reserved
->0 ubyte 0x38 Theos
->0 ubyte 0x39 Plan 9, or Theos spanned
->0 ubyte 0x3a Theos ver 4 4gb partition
->0 ubyte 0x3b Theos ve 4 extended partition
->0 ubyte 0x3c PartitionMagic recovery
->0 ubyte 0x3d Hidden Netware
->0 ubyte 0x40 VENIX 286 or LynxOS
->0 ubyte 0x41 PReP
->0 ubyte 0x42 linux swap sharing DRDOS disk
->0 ubyte 0x43 linux sharing DRDOS disk
->0 ubyte 0x44 GoBack change utility
->0 ubyte 0x45 Boot US Boot manager
->0 ubyte 0x46 EUMEL/Elan or Ergos 3
->0 ubyte 0x47 EUMEL/Elan or Ergos 3
->0 ubyte 0x48 EUMEL/Elan or Ergos 3
->0 ubyte 0x4a ALFX/THIN filesystem for DOS
->0 ubyte 0x4c Oberon partition
->0 ubyte 0x4d QNX4.x
->0 ubyte 0x4e QNX4.x 2nd part
->0 ubyte 0x4f QNX4.x 3rd part
->0 ubyte 0x50 DM (disk manager)
->0 ubyte 0x51 DM6 Aux1 (or Novell)
->0 ubyte 0x52 CP/M or Microport SysV/AT
->0 ubyte 0x53 DM6 Aux3
->0 ubyte 0x54 DM6 DDO
->0 ubyte 0x55 EZ-Drive (disk manager)
->0 ubyte 0x56 Golden Bow (disk manager)
->0 ubyte 0x57 Drive PRO
->0 ubyte 0x5c Priam Edisk (disk manager)
->0 ubyte 0x61 SpeedStor
->0 ubyte 0x63 GNU HURD or Mach or Sys V/386
->0 ubyte 0x64 Novell Netware 2.xx or Speedstore
->0 ubyte 0x65 Novell Netware 3.xx
->0 ubyte 0x66 Novell 386 Netware
->0 ubyte 0x67 Novell
->0 ubyte 0x68 Novell
->0 ubyte 0x69 Novell
->0 ubyte 0x70 DiskSecure Multi-Boot
->0 ubyte 0x71 reserved
->0 ubyte 0x73 reserved
->0 ubyte 0x74 reserved
->0 ubyte 0x75 PC/IX
->0 ubyte 0x76 reserved
->0 ubyte 0x77 M2FS/M2CS partition
->0 ubyte 0x78 XOSL boot loader filesystem
->0 ubyte 0x80 MINIX until 1.4a
->0 ubyte 0x81 MINIX since 1.4b
->0 ubyte 0x82 Linux swap or Solaris
->0 ubyte 0x83 Linux native
->0 ubyte 0x84 OS/2 hidden C: drive
->0 ubyte 0x85 Linux extended partition
->0 ubyte 0x86 NT FAT volume set
->0 ubyte 0x87 NTFS volume set or HPFS mirrored
->0 ubyte 0x8a Linux Kernel AiR-BOOT partition
->0 ubyte 0x8b Legacy Fault tolerant FAT32
->0 ubyte 0x8c Legacy Fault tolerant FAT32 ext
->0 ubyte 0x8d Hidden free FDISK FAT12
->0 ubyte 0x8e Linux Logical Volume Manager
->0 ubyte 0x90 Hidden free FDISK FAT16
->0 ubyte 0x91 Hidden free FDISK DOS EXT
->0 ubyte 0x92 Hidden free FDISK FAT16 Big
->0 ubyte 0x93 Amoeba filesystem
->0 ubyte 0x94 Amoeba bad block table
->0 ubyte 0x95 MIT EXOPC native partitions
->0 ubyte 0x97 Hidden free FDISK FAT32
->0 ubyte 0x98 Datalight ROM-DOS Super-Boot
->0 ubyte 0x99 Mylex EISA SCSI
->0 ubyte 0x9a Hidden free FDISK FAT16 LBA
->0 ubyte 0x9b Hidden free FDISK EXT LBA
->0 ubyte 0x9f BSDI?
->0 ubyte 0xa0 IBM Thinkpad hibernation
->0 ubyte 0xa1 HP Volume expansion (SpeedStor)
->0 ubyte 0xa3 HP Volume expansion (SpeedStor)
->0 ubyte 0xa4 HP Volume expansion (SpeedStor)
->0 ubyte 0xa5 386BSD partition type
->0 ubyte 0xa6 OpenBSD partition type
->0 ubyte 0xa7 NeXTSTEP 486
->0 ubyte 0xa8 Apple UFS
->0 ubyte 0xa9 NetBSD partition type
->0 ubyte 0xaa Olivetty Fat12 1.44MB Service part
->0 ubyte 0xab Apple Boot
->0 ubyte 0xae SHAG OS filesystem
->0 ubyte 0xaf Apple HFS
->0 ubyte 0xb0 BootStar Dummy
->0 ubyte 0xb1 reserved
->0 ubyte 0xb3 reserved
->0 ubyte 0xb4 reserved
->0 ubyte 0xb6 reserved
->0 ubyte 0xb7 BSDI BSD/386 filesystem
->0 ubyte 0xb8 BSDI BSD/386 swap
->0 ubyte 0xbb Boot Wizard Hidden
->0 ubyte 0xbe Solaris 8 partition type
->0 ubyte 0xbf Solaris partition type
->0 ubyte 0xc0 CTOS
->0 ubyte 0xc1 DRDOS/sec (FAT-12)
->0 ubyte 0xc2 Hidden Linux
->0 ubyte 0xc3 Hidden Linux swap
->0 ubyte 0xc4 DRDOS/sec (FAT-16, < 32M)
->0 ubyte 0xc5 DRDOS/sec (EXT)
->0 ubyte 0xc6 DRDOS/sec (FAT-16, >= 32M)
->0 ubyte 0xc7 Syrinx (Cyrnix?) or HPFS disabled
->0 ubyte 0xc8 Reserved for DR-DOS 8.0+
->0 ubyte 0xc9 Reserved for DR-DOS 8.0+
->0 ubyte 0xca Reserved for DR-DOS 8.0+
->0 ubyte 0xcb DR-DOS 7.04+ Secured FAT32 CHS
->0 ubyte 0xcc DR-DOS 7.04+ Secured FAT32 LBA
->0 ubyte 0xcd CTOS Memdump
->0 ubyte 0xce DR-DOS 7.04+ FAT16X LBA
->0 ubyte 0xcf DR-DOS 7.04+ EXT LBA
->0 ubyte 0xd0 REAL/32 secure big partition
->0 ubyte 0xd1 Old Multiuser DOS FAT12
->0 ubyte 0xd4 Old Multiuser DOS FAT16 Small
->0 ubyte 0xd5 Old Multiuser DOS Extended
->0 ubyte 0xd6 Old Multiuser DOS FAT16 Big
->0 ubyte 0xd8 CP/M 86
->0 ubyte 0xdb CP/M or Concurrent CP/M
->0 ubyte 0xdd Hidden CTOS Memdump
->0 ubyte 0xde Dell PowerEdge Server utilities
->0 ubyte 0xdf DG/UX virtual disk manager
->0 ubyte 0xe0 STMicroelectronics ST AVFS
->0 ubyte 0xe1 DOS access or SpeedStor 12-bit
->0 ubyte 0xe3 DOS R/O or Storage Dimensions
->0 ubyte 0xe4 SpeedStor 16-bit FAT < 1024 cyl.
->0 ubyte 0xe5 reserved
->0 ubyte 0xe6 reserved
->0 ubyte 0xeb BeOS
->0 ubyte 0xee GPT Protective MBR
->0 ubyte 0xef EFI system partition
->0 ubyte 0xf0 Linux PA-RISC boot loader
->0 ubyte 0xf1 SpeedStor or Storage Dimensions
->0 ubyte 0xf2 DOS 3.3+ Secondary
->0 ubyte 0xf3 reserved
->0 ubyte 0xf4 SpeedStor large partition
->0 ubyte 0xf5 Prologue multi-volumen partition
->0 ubyte 0xf6 reserved
->0 ubyte 0xf9 pCache: ext2/ext3 persistent cache
->0 ubyte 0xfa Bochs x86 emulator
->0 ubyte 0xfb VMware File System
->0 ubyte 0xfc VMware Swap
->0 ubyte 0xfd Linux RAID partition persistent sb
->0 ubyte 0xfe LANstep or IBM PS/2 IML
->0 ubyte 0xff Xenix Bad Block Table
-
-0 string \366\366\366\366 PC formatted floppy with no filesystem
-# Sun disk labels
-# From /usr/include/sun/dklabel.h:
-0774 beshort 0xdabe
-# modified by Joerg Jenderek, because original test
-# succeeds for Cabinet archive dao360.dl_ with negative blocks
->0770 long >0 Sun disk label
->>0 string x '%s
->>>31 string >\0 \b%s
->>>>63 string >\0 \b%s
->>>>>95 string >\0 \b%s
->>0 string x \b'
->>0734 short >0 %d rpm,
->>0736 short >0 %d phys cys,
->>0740 short >0 %d alts/cyl,
->>0746 short >0 %d interleave,
->>0750 short >0 %d data cyls,
->>0752 short >0 %d alt cyls,
->>0754 short >0 %d heads/partition,
->>0756 short >0 %d sectors/track,
->>0764 long >0 start cyl %d,
->>0770 long x %d blocks
-# Is there a boot block written 1 sector in?
->512 belong&077777777 0600407 \b, boot block present
-
-# Joerg Jenderek: Smart Boot Manager backup file is 25 (MSDOS) or 41 (LINUX) byte header + first sectors of disk
-# (http://btmgr.sourceforge.net/docs/user-guide-3.html)
-0 string SBMBAKUP_ Smart Boot Manager backup file
->9 string x \b, version %-5.5s
->>14 string =_
->>>15 string x %-.1s
->>>>16 string =_ \b.
->>>>>17 string x \b%-.1s
->>>>>>18 string =_ \b.
->>>>>>>19 string x \b%-.1s
->>>22 ubyte 0
->>>>21 ubyte x \b, from drive %#x
->>>22 ubyte >0
->>>>21 string x \b, from drive %s
->>>535 search/17 \x55\xAA
->>>>&-512 indirect x \b; contains
-
-# updated by Joerg Jenderek at Nov 2012
-# DOS Emulator image is 128 byte, null right padded header + harddisc image
-0 string DOSEMU\0
->0x27E leshort 0xAA55
-#offset is 128
->>19 ubyte 128
->>>(19.b-1) ubyte 0x0 DOS Emulator image
->>>>7 ulelong >0 \b, %u heads
->>>>11 ulelong >0 \b, %d sectors/track
->>>>15 ulelong >0 \b, %d cylinders
->>>>128 indirect x \b; contains
-
-# added by Joerg Jenderek at Nov 2012
-# http://www.thenakedpc.com/articles/v04/08/0408-05.html
-# Symantec (Peter Norton) Image.dat file consists of variable header, bootrecord, part of FAT and root directory data
-0 string PNCIHISK\0 Norton Utilities disc image data
-# real x86 boot sector with jump instruction
->509 search/1026 \x55\xAA\xeb
->>&-1 indirect x \b; contains
-# http://file-extension.net/seeker/file_extension_dat
-0 string PNCIUNDO Norton Disk Doctor UnDo file
-#
-
-# DOS/MBR boot sector updated by Joerg Jenderek at Sep 2007,May 2011,2013
-# for any allowed sector sizes
-30 search/481 \x55\xAA
-# to display DOS/MBR boot sector (40) before old one (strength=50+21),Syslinux bootloader (71),SYSLINUX MBR (37+36),NetBSD mbr (110),AdvanceMAME mbr (111)
-# DOS BPB information (70) and after DOS floppy (120) like in previous file version
-!:strength +65
-# for sector sizes < 512 Bytes
->11 uleshort <512
->>(11.s-2) uleshort 0xAA55 DOS/MBR boot sector
-# for sector sizes with 512 or more Bytes
->0x1FE leshort 0xAA55 DOS/MBR boot sector
-
-# ExFAT
-3 string/w =EXFAT
->0x1FE leshort 0xAA55
->>0x6E ubyte 1
->>>0x6F ubyte 0x80
->>>0 ubyte 0xEB DOS/MBR boot sector,
->>>0x69 ubyte x ExFAT Filesystem version %d.
->>>0x68 ubyte x \b%d
->>>0x6d ubyte x \b, (1<<%d) sectors per cluster
->>>0x48 ulequad x \b, sectors %lld
->>>0x64 ulelong x \b, serial number %#x
-
-# keep old DOS/MBR boot sector as dummy for mbr and bootloader displaying
-# only for sector sizes with 512 or more Bytes
-0x1FE leshort 0xAA55 DOS/MBR boot sector
-#
-# to display information (50) before DOS BPB (strength=70) and after DOS floppy (120) like in old file version
-!:strength +65
->2 string OSBS OS/BS MBR
-# added by Joerg Jenderek at Feb 2013 according to https://thestarman.pcministry.com/asm/mbr/
-# and https://en.wikipedia.org/wiki/Master_Boot_Record
-# test for nearly all MS-DOS Master Boot Record initial program loader (IPL) is now done by
-# characteristic assembler instructions: xor ax,ax;mov ss,ax;mov sp,7c00
->0 search/2 \x33\xc0\x8e\xd0\xbc\x00\x7c MS-MBR
-# Microsoft Windows 95A and early ( https://thestarman.pcministry.com/asm/mbr/STDMBR.htm )
-# assembler instructions: mov si,sp;push ax;pop es;push ax;pop ds;sti;cld
->>8 ubequad 0x8bf45007501ffbfc
-# https://thestarman.pcministry.com/asm/mbr/200MBR.htm
->>>0x16 ubyte 0xF3 \b,DOS 2
->>>>219 regex Author\ -\ Author:
-# found "David Litton" , "A Pehrsson "
->>>>>&0 string x "%s"
->>>0x16 ubyte 0xF2
-# NEC MS-DOS 3.30 Rev. 3 . See https://thestarman.pcministry.com/asm/mbr/DOS33MBR.htm
-# assembler instructions: mov di,077c;cmp word ptrl[di],a55a;jnz
->>>>0x22 ubequad 0xbf7c07813d5aa575 \b,NEC 3.3
-# version MS-DOS 3.30 til MS-Windows 95A (WinVer=4.00.1111)
->>>>0x22 default x \b,D0S version 3.3-7.0
-# error messages are printed by assembler instructions: mov si,06nn;...;int 10 (0xBEnn06;...)
-# where nn is string offset varying for different languages
-# "Invalid partition table" nn=0x8b for english version
->>>>>(0x49.b) string Invalid\ partition\ table english
->>>>>(0x49.b) string Ung\201ltige\ Partitionstabelle german
->>>>>(0x49.b) string Table\ de\ partition\ invalide french
->>>>>(0x49.b) string Tabela\ de\ parti\207ao\ inv\240lida portuguese
->>>>>(0x49.b) string Tabla\ de\ partici\242n\ no\ v\240lida spanish
->>>>>(0x49.b) string Tavola\ delle\ partizioni\ non\ valida italian
->>>>>0x49 ubyte >0 at offset %#x
->>>>>>(0x49.b) string >\0 "%s"
-# "Error loading operating system" nn=0xa3 for english version
-# "Fehler beim Laden des Betriebssystems" nn=0xa7 for german version
-# "Erreur en chargeant syst\212me d'exploitation" nn=0xa7 for french version
-# "Erro na inicializa\207ao do sistema operacional" nn=0xa7 for portuguese Brazilian version
-# "Error al cargar sistema operativo" nn=0xa8 for spanish version
-# "Errore durante il caricamento del sistema operativo" nn=0xae for italian version
->>>>>0x74 ubyte >0 at offset %#x
->>>>>>(0x74.b) string >\0 "%s"
-# "Missing operating system" nn=0xc2 for english version
-# "Betriebssystem fehlt" nn=0xcd for german version
-# "Syst\212me d'exploitation absent" nn=0xd2 for french version
-# "Sistema operacional nao encontrado" nn=0xd4 for portuguese Brazilian version
-# "Falta sistema operativo" nn=0xca for spanish version
-# "Sistema operativo mancante" nn=0xe2 for italian version
->>>>>0x79 ubyte >0 at offset %#x
->>>>>>(0x79.b) string >\0 "%s"
-# Microsoft Windows 95B to XP (https://thestarman.pcministry.com/asm/mbr/95BMEMBR.htm)
-# assembler instructions: push ax;pop es;push ax;pop ds;cld;mov si,7c1b
->>8 ubequad 0x5007501ffcbe1b7c
-# assembler instructions: rep;movsb;retf;mov si,07be;mov cl,04
->>>24 ubequad 0xf3a4cbbebe07b104 9M
-# "Invalid partition table" nn=0x10F for english version
-# "Ung\201ltige Partitionstabelle" nn=0x10F for german version
-# "Table de partition erron\202e" nn=0x10F for french version
-# "\216\257\245\340\240\346\250\256\255\255\240\357 \341\250\341\342\245\254\240 \255\245 \255\240\251\244\245\255\240" nn=0x10F for russian version
->>>>(0x3C.b+0x0FF) string Invalid\ partition\ table english
->>>>(0x3C.b+0x0FF) string Ung\201ltige\ Partitionstabelle german
->>>>(0x3C.b+0x0FF) string Table\ de\ partition\ erron\202e french
->>>>(0x3C.b+0x0FF) string \215\245\257\340\240\242\250\253\354\255\240\357\ \342\240\241\253\250\346\240 russian
->>>>0x3C ubyte x at offset %#x+0xFF
->>>>(0x3C.b+0x0FF) string >\0 "%s"
-# "Error loading operating system" nn=0x127 for english version
-# "Fehler beim Laden des Betriebssystems" nn=0x12b for german version
-# "Erreur lors du chargement du syst\212me d'exploitation" nn=0x12a for french version
-# "\216\350\250\241\252\240 \257\340\250 \247\240\243\340\343\247\252\245 \256\257\245\340\240\346\250\256\255\255\256\251 \341\250\341\342\245\254\353" nn=0x12d for russian version
->>>>0xBD ubyte x at offset 0x1%x
->>>>(0xBD.b+0x100) string >\0 "%s"
-# "Missing operating system" nn=0x146 for english version
-# "Betriebssystem fehlt" nn=0x151 for german version
-# "Syst\212me d'exploitation manquant" nn=0x15e for french version
-# "\216\257\245\340\240\346\250\256\255\255\240\357 \341\250\341\342\245\254\240 \255\245 \255\240\251\244\245\255\240" nn=0x156 for russian version
->>>>0xA9 ubyte x at offset 0x1%x
->>>>(0xA9.b+0x100) string >\0 "%s"
-# https://thestarman.pcministry.com/asm/mbr/Win2kmbr.htm
-# assembler instructions: rep;movsb;retf;mov BP,07be;mov cl,04
->>>24 ubequad 0xf3a4cbbdbe07b104 XP
-# where xxyyzz are lower bits from offsets of error messages varying for different languages
->>>>0x1B4 ubelong&0x00FFFFFF 0x002c4463 english
->>>>0x1B4 ubelong&0x00FFFFFF 0x002c486e german
-# "Invalid partition table" xx=0x12C for english version
-# "Ung\201ltige Partitionstabelle" xx=0x12C for german version
->>>>0x1b5 ubyte >0 at offset 0x1%x
->>>>(0x1b5.b+0x100) string >\0 "%s"
-# "Error loading operating system" yy=0x144 for english version
-# "Fehler beim Laden des Betriebssystems" yy=0x148 for german version
->>>>0x1b6 ubyte >0 at offset 0x1%x
->>>>(0x1b6.b+0x100) string >\0 "%s"
-# "Missing operating system" zz=0x163 for english version
-# "Betriebssystem nicht vorhanden" zz=0x16e for german version
->>>>0x1b7 ubyte >0 at offset 0x1%x
->>>>(0x1b7.b+0x100) string >\0 "%s"
-# Microsoft Windows Vista or 7
-# assembler instructions: ..;mov ds,ax;mov si,7c00;mov di,..00
->>8 ubequad 0xc08ed8be007cbf00
-# Microsoft Windows Vista (https://thestarman.pcministry.com/asm/mbr/VistaMBR.htm)
-# assembler instructions: jnz 0729;cmp ebx,"TCPA"
->>>0xEC ubequad 0x753b6681fb544350 Vista
-# where xxyyzz are lower bits from offsets of error messages varying for different languages
->>>>0x1B4 ubelong&0x00FFFFFF 0x00627a99 english
-#>>>>0x1B4 ubelong&0x00FFFFFF ? german
-# "Invalid partition table" xx=0x162 for english version
-# "Ung\201ltige Partitionstabelle" xx=0x1?? for german version
->>>>0x1b5 ubyte >0 at offset 0x1%x
->>>>(0x1b5.b+0x100) string >\0 "%s"
-# "Error loading operating system" yy=0x17a for english version
-# "Fehler beim Laden des Betriebssystems" yy= 0x1?? for german version
->>>>0x1b6 ubyte >0 at offset 0x1%x
->>>>(0x1b6.b+0x100) string >\0 "%s"
-# "Missing operating system" zz=0x199 for english version
-# "Betriebssystem nicht vorhanden" zz=0x1?? for german version
->>>>0x1b7 ubyte >0 at offset 0x1%x
->>>>(0x1b7.b+0x100) string >\0 "%s"
-# Microsoft Windows 7 (https://thestarman.pcministry.com/asm/mbr/W7MBR.htm)
-# assembler instructions: cmp ebx,"TCPA";cmp
->>>0xEC ubequad 0x6681fb5443504175 Windows 7
-# where xxyyzz are lower bits from offsets of error messages varying for different languages
->>>>0x1B4 ubelong&0x00FFFFFF 0x00637b9a english
-#>>>>0x1B4 ubelong&0x00FFFFFF ? german
-# "Invalid partition table" xx=0x163 for english version
-# "Ung\201ltige Partitionstabelle" xx=0x1?? for german version
->>>>0x1b5 ubyte >0 at offset 0x1%x
->>>>(0x1b5.b+0x100) string >\0 "%s"
-# "Error loading operating system" yy=0x17b for english version
-# "Fehler beim Laden des Betriebssystems" yy=0x1?? for german version
->>>>0x1b6 ubyte >0 at offset 0x1%x
->>>>(0x1b6.b+0x100) string >\0 "%s"
-# "Missing operating system" zz=0x19a for english version
-# "Betriebssystem nicht vorhanden" zz=0x1?? for german version
->>>>0x1b7 ubyte >0 at offset 0x1%x
->>>>(0x1b7.b+0x100) string >\0 "%s"
-# https://thestarman.pcministry.com/asm/mbr/Win2kmbr.htm#DiskSigs
-# https://en.wikipedia.org/wiki/MBR_disk_signature#ID
->>0x1b8 ulelong >0 \b, disk signature %#-.4x
-# driveID/timestamp for Win 95B,98,98SE and ME. See https://thestarman.pcministry.com/asm/mbr/mystery.htm
->>0xDA uleshort 0
->>>0xDC ulelong >0 \b, created
-# physical drive number (0x80-0xFF) when the Windows wrote that byte to the drive
->>>>0xDC ubyte x with driveID %#x
-# hours, minutes and seconds
->>>>0xDf ubyte x at %x
->>>>0xDe ubyte x \b:%x
->>>>0xDd ubyte x \b:%x
-# special case for Microsoft MS-DOS 3.21 spanish
-# assembler instructions: cli;mov $0x30,%ax;mov %ax,%ss;mov
->0 ubequad 0xfab830008ed0bc00
-# assembler instructions: $0x1f00,%sp;mov $0x80cb,%di;add %cl,(%bx,%si);in (%dx),%ax;mov
->>8 ubequad 0x1fbfcb800008ed8 MS-MBR,D0S version 3.21 spanish
-# Microsoft MBR IPL end
-
-# dr-dos with some upper-, lowercase variants
->0x9D string Invalid\ partition\ table$
->>181 string No\ Operating\ System$
->>>201 string Operating\ System\ load\ error$ \b, DR-DOS MBR, Version 7.01 to 7.03
->0x9D string Invalid\ partition\ table$
->>181 string No\ operating\ system$
->>>201 string Operating\ system\ load\ error$ \b, DR-DOS MBR, Version 7.01 to 7.03
->342 string Invalid\ partition\ table$
->>366 string No\ operating\ system$
->>>386 string Operating\ system\ load\ error$ \b, DR-DOS MBR, version 7.01 to 7.03
->295 string NEWLDR\0
->>302 string Bad\ PT\ $
->>>310 string No\ OS\ $
->>>>317 string OS\ load\ err$
->>>>>329 string Moved\ or\ missing\ IBMBIO.LDR\n\r
->>>>>>358 string Press\ any\ key\ to\ continue.\n\r$
->>>>>>>387 string Copyright\ (c)\ 1984,1998
->>>>>>>>411 string Caldera\ Inc.\0 \b, DR-DOS MBR (IBMBIO.LDR)
-#
-# tests for different MS-DOS Master Boot Records (MBR) moved and merged
-#
-#>0x145 string Default:\ F \b, FREE-DOS MBR
-#>0x14B string Default:\ F \b, FREE-DOS 1.0 MBR
->0x145 search/7 Default:\ F \b, FREE-DOS MBR
-#>>313 string F0\ .\ .\ .
-#>>>322 string disk\ 1
-#>>>>382 string FAT3
->64 string no\ active\ partition\ found
->>96 string read\ error\ while\ reading\ drive \b, FREE-DOS Beta 0.9 MBR
-# Ranish Partition Manager http://www.ranish.com/part/
->387 search/4 \0\ Error!\r
->>378 search/7 Virus!
->>>397 search/4 Booting\040
->>>>408 search/4 HD1/\0 \b, Ranish MBR (
->>>>>416 string Writing\ changes... \b2.37
->>>>>>438 ubyte x \b,%#x dots
->>>>>>440 ubyte >0 \b,virus check
->>>>>>441 ubyte >0 \b,partition %c
-#2.38,2.42,2.44
->>>>>416 string !Writing\ changes... \b
->>>>>>418 ubyte 1 \bvirus check,
->>>>>>419 ubyte x \b%#x seconds
->>>>>>420 ubyte&0x0F >0 \b,partition
->>>>>>>420 ubyte&0x0F <5 \b %x
->>>>>>>420 ubyte&0x0F 0Xf \b ask
->>>>>420 ubyte x \b)
-#
-# SYSLINUX MBR moved
-# https://www.acronis.de/
->362 string MBR\ Error\ \0\r
->>376 string ress\ any\ key\ to\040
->>>392 string boot\ from\ floppy...\0 \b, Acronis MBR
-# added by Joerg Jenderek
-# https://www.visopsys.org/
-# https://partitionlogic.org.uk/
->309 string No\ bootable\ partition\ found\r
->>339 string I/O\ Error\ reading\ boot\ sector\r \b, Visopsys MBR
->349 string No\ bootable\ partition\ found\r
->>379 string I/O\ Error\ reading\ boot\ sector\r \b, simple Visopsys MBR
-# bootloader, bootmanager
->0x40 string SBML
-# label with 11 characters of FAT 12 bit filesystem
->>43 string SMART\ BTMGR
->>>430 string SBMK\ Bad!\r \b, Smart Boot Manager
-# OEM-ID not always "SBM"
-#>>>>3 strings SBM
->>>>6 string >\0 \b, version %s
->382 string XOSLLOADXCF \b, eXtended Operating System Loader
->6 string LILO \b, LInux i386 boot LOader
->>120 string LILO \b, version 22.3.4 SuSe
->>172 string LILO \b, version 22.5.8 Debian
-# updated by Joerg Jenderek at Oct 2008
-# variables according to grub-0.97/stage1/stage1.S or
-# https://www.gnu.org/software/grub/manual/grub.html#Embedded-data
-# usual values are marked with comments to get only information of strange GRUB loaders
->342 search/60 \0Geom\0
-#>0 ulelong x %x=0x009048EB , 0x2a9048EB 0
->>0x41 ubyte <2
->>>0x3E ubyte >2 \b; GRand Unified Bootloader
-# 0x3 for 0.5.95,0.93,0.94,0.96 0x4 for 1.90
->>>>0x3E ubyte x \b, stage1 version %#x
-#If it is 0xFF, use a drive passed by BIOS
->>>>0x40 ubyte <0xFF \b, boot drive %#x
-# in most case 0,1,0x2e for GRUB 0.5.95
->>>>0x41 ubyte >0 \b, LBA flag %#x
->>>>0x42 uleshort <0x8000 \b, stage2 address %#x
-#>>>>0x42 uleshort =0x8000 \b, stage2 address %#x (usual)
->>>>0x42 uleshort >0x8000 \b, stage2 address %#x
-#>>>>0x44 ulelong =1 \b, 1st sector stage2 %#x (default)
->>>>0x44 ulelong >1 \b, 1st sector stage2 %#x
->>>>0x48 uleshort <0x800 \b, stage2 segment %#x
-#>>>>0x48 uleshort =0x800 \b, stage2 segment %#x (usual)
->>>>0x48 uleshort >0x800 \b, stage2 segment %#x
->>>>402 string Geom\0Hard\ Disk\0Read\0\ Error\0
->>>>>394 string stage1 \b, GRUB version 0.5.95
->>>>382 string Geom\0Hard\ Disk\0Read\0\ Error\0
->>>>>376 string GRUB\ \0 \b, GRUB version 0.93 or 1.94
->>>>383 string Geom\0Hard\ Disk\0Read\0\ Error\0
->>>>>377 string GRUB\ \0 \b, GRUB version 0.94
->>>>385 string Geom\0Hard\ Disk\0Read\0\ Error\0
->>>>>379 string GRUB\ \0 \b, GRUB version 0.95 or 0.96
->>>>391 string Geom\0Hard\ Disk\0Read\0\ Error\0
->>>>>385 string GRUB\ \0 \b, GRUB version 0.97
-# unknown version
->>>343 string Geom\0Read\0\ Error\0
->>>>321 string Loading\ stage1.5 \b, GRUB version x.y
->>>380 string Geom\0Hard\ Disk\0Read\0\ Error\0
->>>>374 string GRUB\ \0 \b, GRUB version n.m
-# SYSLINUX bootloader moved
->395 string chksum\0\ ERROR!\0 \b, Gujin bootloader
-# http://www.bcdwb.de/bcdw/index_e.htm
->3 string BCDL
->>498 string BCDL\ \ \ \ BIN \b, Bootable CD Loader (1.50Z)
-# mbr partition table entries updated by Joerg Jenderek at Sep 2013
-# skip Norton Utilities disc image data
->3 string !IHISK
-# skip Linux style boot sector starting with assembler instructions mov 0x7c0,ax;
->>0 belong !0xb8c0078e
-# not Linux kernel
->>>514 string !HdrS
-# not BeOS
->>>>422 string !Be\ Boot\ Loader
-# jump over BPB instruction implies DOS bootsector or AdvanceMAME mbr
->>>>>0 ubelong&0xFD000000 =0xE9000000
-# AdvanceMAME mbr
->>>>>>(1.b+2) ubequad 0xfa31c08ed88ec08e
->>>>>>>446 use partition-table
-# mbr, Norton Utilities disc image data, or 2nd,etc. sector of x86 bootloader
->>>>>0 ubelong&0xFD000000 !0xE9000000
-# skip FSInfosector
->>>>>>0 string !RRaA
-# skip 3rd sector of MS x86 bootloader with assembler instructions cli;MOVZX EAX,BYTE PTR [BP+10];MOV ECX,
-# https://thestarman.pcministry.com/asm/mbr/MSWIN41.htm
->>>>>>>0 ubequad !0xfa660fb64610668b
-# skip 13rd sector of MS x86 bootloader
->>>>>>>>0 ubequad !0x660fb64610668b4e
-# skip sector starting with DOS new line
->>>>>>>>>0 string !\r\n
-# allowed active flag 0,80h-FFh
->>>>>>>>>>446 ubyte 0
->>>>>>>>>>>446 use partition-table
->>>>>>>>>>446 ubyte >0x7F
->>>>>>>>>>>446 use partition-table
-# TODO: test for extended bootrecord (ebr) moved and merged with mbr partition table entries
-# mbr partition table entries end
-# https://www.acronis.de/
-#FAT label=ACRONIS\ SZ
-#OEM-ID=BOOTWIZ0
->442 string Non-system\ disk,\040
->>459 string press\ any\ key...\x7\0 \b, Acronis Startup Recovery Loader
-# updated by Joerg Jenderek at Nov 2012, Sep 2013
-# DOS names like F11.SYS or BOOTWIZ.SYS are 8 right space padded bytes+3 bytes
-# display 1 space
->>>447 ubyte x \b
->>>477 use DOS-filename
-#
->185 string FDBOOT\ Version\040
->>204 string \rNo\ Systemdisk.\040
->>>220 string Booting\ from\ harddisk.\n\r
->>>245 string Cannot\ load\ from\ harddisk.\n\r
->>>>273 string Insert\ Systemdisk\040
->>>>>291 string and\ press\ any\ key.\n\r \b, FDBOOT harddisk Bootloader
->>>>>>200 string >\0 \b, version %-3s
->242 string Bootsector\ from\ C.H.\ Hochst\204
-# http://freecode.com/projects/dosfstools dosfstools-n.m/src/mkdosfs.c
-# updated by Joerg Jenderek at Nov 2012. Use search directive with offset instead of string
-# skip name "C.H. Hochstaetter" partly because it is sometimes written without umlaut
->242 search/127 Bootsector\ from\ C.H.\ Hochst
->>278 search/127 No\ Systemdisk.\ Booting\ from\ harddisk
-# followed by variants with point,CR-NL or NL-CR
->>>208 search/261 Cannot\ load\ from\ harddisk.
-# followed by variants CR-NL or NL-CR
->>>>236 search/235 Insert\ Systemdisk\ and\ press\ any\ key.
-# followed by variants with point,CR-NL or NL-CR
->>>>>180 search/96 Disk\ formatted\ with\ WinImage\ \b, WinImage harddisk Bootloader
-# followed by string like "6.50 (c) 1993-2004 Gilles Vollant"
->>>>>>&0 string x \b, version %-4.4s
->(1.b+2) ubyte 0xe
->>(1.b+3) ubyte 0x1f
->>>(1.b+4) ubyte 0xbe
-# message offset found at (1.b+5) is 0x77 for FAT32 or 0x5b for others
->>>>(1.b+5) ubyte&0xd3 0x53
->>>>>(1.b+6) ubyte 0x7c
-# assembler instructions: lodsb;and al,al;jz 0xb;push si;mov ah,
->>>>>>(1.b+7) ubyte 0xac
->>>>>>>(1.b+8) ubyte 0x22
->>>>>>>>(1.b+9) ubyte 0xc0
->>>>>>>>>(1.b+10) ubyte 0x74
->>>>>>>>>>(1.b+11) ubyte 0x0b
->>>>>>>>>>>(1.b+12) ubyte 0x56
->>>>>>>>>>>>(1.b+13) ubyte 0xb4 \b, mkdosfs boot message display
-# FAT1X version
->>>>>>>>>>>>>(1.b+5) ubyte 0x5b
->>>>>>>>>>>>>>0x5b string >\0 "%-s"
-# FAT32 version
->>>>>>>>>>>>>(1.b+5) ubyte 0x77
->>>>>>>>>>>>>>0x77 string >\0 "%-s"
->214 string Please\ try\ to\ install\ FreeDOS\ \b, DOS Emulator boot message display
-#>>244 string from\ dosemu-freedos-*-bin.tgz\r
-#>>>170 string Sorry,\ could\ not\ load\ an\040
-#>>>>195 string operating\ system.\r\n
-#
->103 string This\ is\ not\ a\ bootable\ disk.\040
->>132 string Please\ insert\ a\ bootable\040
->>>157 string floppy\ and\r\n
->>>>169 string press\ any\ key\ to\ try\ again...\r \b, FREE-DOS message display
-#
->66 string Solaris\ Boot\ Sector
->>99 string Incomplete\ MDBoot\ load.
->>>89 string Version \b, Sun Solaris Bootloader
->>>>97 byte x version %c
-#
->408 string OS/2\ !!\ SYS01475\r\0
->>429 string OS/2\ !!\ SYS02025\r\0
->>>450 string OS/2\ !!\ SYS02027\r\0
->>>469 string OS2BOOT\ \ \ \ \b, IBM OS/2 Warp bootloader
-#
->409 string OS/2\ !!\ SYS01475\r\0
->>430 string OS/2\ !!\ SYS02025\r\0
->>>451 string OS/2\ !!\ SYS02027\r\0
->>>470 string OS2BOOT\ \ \ \ \b, IBM OS/2 Warp Bootloader
->112 string This\ disk\ is\ not\ bootable\r
->>142 string If\ you\ wish\ to\ make\ it\ bootable
->>>176 string run\ the\ DOS\ program\ SYS\040
->>>200 string after\ the\r
->>>>216 string system\ has\ been\ loaded\r\n
->>>>>242 string Please\ insert\ a\ DOS\ diskette\040
->>>>>271 string into\r\n\ the\ drive\ and\040
->>>>>>292 string strike\ any\ key...\0 \b, IBM OS/2 Warp message display
-# XP
->430 string NTLDR\ is\ missing\xFF\r\n
->>449 string Disk\ error\xFF\r\n
->>>462 string Press\ any\ key\ to\ restart\r \b, Microsoft Windows XP Bootloader
-# DOS names like NTLDR,CMLDR,$LDR$ are 8 right space padded bytes+3 bytes
->>>>417 ubyte&0xDF >0
->>>>>417 string x %-.5s
->>>>>>422 ubyte&0xDF >0
->>>>>>>422 string x \b%-.3s
->>>>>425 ubyte&0xDF >0
->>>>>>425 string >\ \b.%-.3s
-#
->>>>371 ubyte >0x20
->>>>>368 ubyte&0xDF >0
->>>>>>368 string x %-.5s
->>>>>>>373 ubyte&0xDF >0
->>>>>>>>373 string x \b%-.3s
->>>>>>376 ubyte&0xDF >0
->>>>>>>376 string x \b.%-.3s
-#
->430 string NTLDR\ nicht\ gefunden\xFF\r\n
->>453 string Datentr\204gerfehler\xFF\r\n
->>>473 string Neustart\ mit\ beliebiger\ Taste\r \b, Microsoft Windows XP Bootloader (german)
->>>>417 ubyte&0xDF >0
->>>>>417 string x %-.5s
->>>>>>422 ubyte&0xDF >0
->>>>>>>422 string x \b%-.3s
->>>>>425 ubyte&0xDF >0
->>>>>>425 string >\ \b.%-.3s
-# offset variant
->>>>379 string \0
->>>>>368 ubyte&0xDF >0
->>>>>>368 string x %-.5s
->>>>>>>373 ubyte&0xDF >0
->>>>>>>>373 string x \b%-.3s
-#
->430 string NTLDR\ fehlt\xFF\r\n
->>444 string Datentr\204gerfehler\xFF\r\n
->>>464 string Neustart\ mit\ beliebiger\ Taste\r \b, Microsoft Windows XP Bootloader (2.german)
->>>>417 ubyte&0xDF >0
->>>>>417 string x %-.5s
->>>>>>422 ubyte&0xDF >0
->>>>>>>422 string x \b%-.3s
->>>>>425 ubyte&0xDF >0
->>>>>>425 string >\ \b.%-.3s
-# variant
->>>>371 ubyte >0x20
->>>>>368 ubyte&0xDF >0
->>>>>>368 string x %-.5s
->>>>>>>373 ubyte&0xDF >0
->>>>>>>>373 string x \b%-.3s
->>>>>>376 ubyte&0xDF >0
->>>>>>>376 string x \b.%-.3s
-#
->430 string NTLDR\ fehlt\xFF\r\n
->>444 string Medienfehler\xFF\r\n
->>>459 string Neustart:\ Taste\ dr\201cken\r \b, Microsoft Windows XP Bootloader (3.german)
->>>>371 ubyte >0x20
->>>>>368 ubyte&0xDF >0
->>>>>>368 string x %-.5s
->>>>>>>373 ubyte&0xDF >0
->>>>>>>>373 string x \b%-.3s
->>>>>>376 ubyte&0xDF >0
->>>>>>>376 string x \b.%-.3s
-# variant
->>>>417 ubyte&0xDF >0
->>>>>417 string x %-.5s
->>>>>>422 ubyte&0xDF >0
->>>>>>>422 string x \b%-.3s
->>>>>425 ubyte&0xDF >0
->>>>>>425 string >\ \b.%-.3s
-#
->430 string Datentr\204ger\ entfernen\xFF\r\n
->>454 string Medienfehler\xFF\r\n
->>>469 string Neustart:\ Taste\ dr\201cken\r \b, Microsoft Windows XP Bootloader (4.german)
->>>>379 string \0
->>>>>368 ubyte&0xDF >0
->>>>>>368 string x %-.5s
->>>>>>>373 ubyte&0xDF >0
->>>>>>>>373 string x \b%-.3s
->>>>>>376 ubyte&0xDF >0
->>>>>>>376 string x \b.%-.3s
-# variant
->>>>417 ubyte&0xDF >0
->>>>>417 string x %-.5s
->>>>>>422 ubyte&0xDF >0
->>>>>>>422 string x \b%-.3s
->>>>>425 ubyte&0xDF >0
->>>>>>425 string >\ \b.%-.3s
-#
-
-#>3 string NTFS\ \ \ \040
->389 string Fehler\ beim\ Lesen\040
->>407 string des\ Datentr\204gers
->>>426 string NTLDR\ fehlt
->>>>440 string NTLDR\ ist\ komprimiert
->>>>>464 string Neustart\ mit\ Strg+Alt+Entf\r \b, Microsoft Windows XP Bootloader NTFS (german)
-#>3 string NTFS\ \ \ \040
->313 string A\ disk\ read\ error\ occurred.\r
->>345 string A\ kernel\ file\ is\ missing\040
->>>370 string from\ the\ disk.\r
->>>>484 string NTLDR\ is\ compressed
->>>>>429 string Insert\ a\ system\ diskette\040
->>>>>>454 string and\ restart\r\nthe\ system.\r \b, Microsoft Windows XP Bootloader NTFS
-# DOS loader variants different languages,offsets
->472 ubyte&0xDF >0
->>389 string Invalid\ system\ disk\xFF\r\n
->>>411 string Disk\ I/O\ error
->>>>428 string Replace\ the\ disk,\ and\040
->>>>>455 string press\ any\ key \b, Microsoft Windows 98 Bootloader
-#IO.SYS
->>>>>>472 ubyte&0xDF >0
->>>>>>>472 string x \b %-.2s
->>>>>>>>474 ubyte&0xDF >0
->>>>>>>>>474 string x \b%-.5s
->>>>>>>>>>479 ubyte&0xDF >0
->>>>>>>>>>>479 string x \b%-.1s
->>>>>>>480 ubyte&0xDF >0
->>>>>>>>480 string x \b.%-.3s
-#MSDOS.SYS
->>>>>>>483 ubyte&0xDF >0 \b+
->>>>>>>>483 string x \b%-.5s
->>>>>>>>>488 ubyte&0xDF >0
->>>>>>>>>>488 string x \b%-.3s
->>>>>>>>491 ubyte&0xDF >0
->>>>>>>>>491 string x \b.%-.3s
-#
->>390 string Invalid\ system\ disk\xFF\r\n
->>>412 string Disk\ I/O\ error\xFF\r\n
->>>>429 string Replace\ the\ disk,\ and\040
->>>>>451 string then\ press\ any\ key\r \b, Microsoft Windows 98 Bootloader
->>388 string Ungueltiges\ System\ \xFF\r\n
->>>410 string E/A-Fehler\ \ \ \ \xFF\r\n
->>>>427 string Datentraeger\ wechseln\ und\040
->>>>>453 string Taste\ druecken\r \b, Microsoft Windows 95/98/ME Bootloader (german)
-#WINBOOT.SYS only not spaces (0xDF)
->>>>>>497 ubyte&0xDF >0
->>>>>>>497 string x %-.5s
->>>>>>>>502 ubyte&0xDF >0
->>>>>>>>>502 string x \b%-.1s
->>>>>>>>>>503 ubyte&0xDF >0
->>>>>>>>>>>503 string x \b%-.1s
->>>>>>>>>>>>504 ubyte&0xDF >0
->>>>>>>>>>>>>504 string x \b%-.1s
->>>>>>505 ubyte&0xDF >0
->>>>>>>505 string x \b.%-.3s
-#IO.SYS
->>>>>>472 ubyte&0xDF >0 or
->>>>>>>472 string x \b %-.2s
->>>>>>>>474 ubyte&0xDF >0
->>>>>>>>>474 string x \b%-.5s
->>>>>>>>>>479 ubyte&0xDF >0
->>>>>>>>>>>479 string x \b%-.1s
->>>>>>>480 ubyte&0xDF >0
->>>>>>>>480 string x \b.%-.3s
-#MSDOS.SYS
->>>>>>>483 ubyte&0xDF >0 \b+
->>>>>>>>483 string x \b%-.5s
->>>>>>>>>488 ubyte&0xDF >0
->>>>>>>>>>488 string x \b%-.3s
->>>>>>>>491 ubyte&0xDF >0
->>>>>>>>>491 string x \b.%-.3s
-#
->>390 string Ungueltiges\ System\ \xFF\r\n
->>>412 string E/A-Fehler\ \ \ \ \xFF\r\n
->>>>429 string Datentraeger\ wechseln\ und\040
->>>>>455 string Taste\ druecken\r \b, Microsoft Windows 95/98/ME Bootloader (German)
-#WINBOOT.SYS only not spaces (0xDF)
->>>>>>497 ubyte&0xDF >0
->>>>>>>497 string x %-.7s
->>>>>>>>504 ubyte&0xDF >0
->>>>>>>>>504 string x \b%-.1s
->>>>>>505 ubyte&0xDF >0
->>>>>>>505 string x \b.%-.3s
-#IO.SYS
->>>>>>472 ubyte&0xDF >0 or
->>>>>>>472 string x \b %-.2s
->>>>>>>>474 ubyte&0xDF >0
->>>>>>>>>474 string x \b%-.6s
->>>>>>>480 ubyte&0xDF >0
->>>>>>>>480 string x \b.%-.3s
-#MSDOS.SYS
->>>>>>>483 ubyte&0xDF >0 \b+
->>>>>>>>483 string x \b%-.5s
->>>>>>>>>488 ubyte&0xDF >0
->>>>>>>>>>488 string x \b%-.3s
->>>>>>>>491 ubyte&0xDF >0
->>>>>>>>>491 string x \b.%-.3s
-#
->>389 string Ungueltiges\ System\ \xFF\r\n
->>>411 string E/A-Fehler\ \ \ \ \xFF\r\n
->>>>428 string Datentraeger\ wechseln\ und\040
->>>>>454 string Taste\ druecken\r \b, Microsoft Windows 95/98/ME Bootloader (GERMAN)
-# DOS names like IO.SYS,WINBOOT.SYS,MSDOS.SYS,WINBOOT.INI are 8 right space padded bytes+3 bytes
->>>>>>472 string x %-.2s
->>>>>>>474 ubyte&0xDF >0
->>>>>>>>474 string x \b%-.5s
->>>>>>>>479 ubyte&0xDF >0
->>>>>>>>>479 string x \b%-.1s
->>>>>>480 ubyte&0xDF >0
->>>>>>>480 string x \b.%-.3s
->>>>>>483 ubyte&0xDF >0 \b+
->>>>>>>483 string x \b%-.5s
->>>>>>>488 ubyte&0xDF >0
->>>>>>>>488 string x \b%-.2s
->>>>>>>>490 ubyte&0xDF >0
->>>>>>>>>490 string x \b%-.1s
->>>>>>>491 ubyte&0xDF >0
->>>>>>>>491 string x \b.%-.3s
->479 ubyte&0xDF >0
->>416 string Kein\ System\ oder\040
->>>433 string Laufwerksfehler
->>>>450 string Wechseln\ und\ Taste\ dr\201cken \b, Microsoft DOS Bootloader (german)
-#IO.SYS
->>>>>479 string x \b %-.2s
->>>>>>481 ubyte&0xDF >0
->>>>>>>481 string x \b%-.6s
->>>>>487 ubyte&0xDF >0
->>>>>>487 string x \b.%-.3s
-#MSDOS.SYS
->>>>>>490 ubyte&0xDF >0 \b+
->>>>>>>490 string x \b%-.5s
->>>>>>>>495 ubyte&0xDF >0
->>>>>>>>>495 string x \b%-.3s
->>>>>>>498 ubyte&0xDF >0
->>>>>>>>498 string x \b.%-.3s
-#
->376 search/41 Non-System\ disk\ or\040
->>395 search/41 disk\ error\r
->>>407 search/41 Replace\ and\040
->>>>419 search/41 press\ \b,
->>>>419 search/41 strike\ \b, old
->>>>426 search/41 any\ key\ when\ ready\r MS or PC-DOS bootloader
-#449 Disk\ Boot\ failure\r MS 3.21
-#466 Boot\ Failure\r MS 3.30
->>>>>468 search/18 \0
-#IO.SYS,IBMBIO.COM
->>>>>>&0 string x \b %-.2s
->>>>>>>&-20 ubyte&0xDF >0
->>>>>>>>&-1 string x \b%-.4s
->>>>>>>>>&-16 ubyte&0xDF >0
->>>>>>>>>>&-1 string x \b%-.2s
->>>>>>&8 ubyte&0xDF >0 \b.
->>>>>>>&-1 string x \b%-.3s
-#MSDOS.SYS,IBMDOS.COM
->>>>>>&11 ubyte&0xDF >0 \b+
->>>>>>>&-1 string x \b%-.5s
->>>>>>>>&-6 ubyte&0xDF >0
->>>>>>>>>&-1 string x \b%-.1s
->>>>>>>>>>&-5 ubyte&0xDF >0
->>>>>>>>>>>&-1 string x \b%-.2s
->>>>>>>&7 ubyte&0xDF >0 \b.
->>>>>>>>&-1 string x \b%-.3s
->441 string Cannot\ load\ from\ harddisk.\n\r
->>469 string Insert\ Systemdisk\040
->>>487 string and\ press\ any\ key.\n\r \b, MS (2.11) DOS bootloader
-#>43 string \224R-LOADER\ \ SYS =label
->54 string SYS
->>324 string VASKK
->>>495 string NEWLDR\0 \b, DR-DOS Bootloader (LOADER.SYS)
-#
->98 string Press\ a\ key\ to\ retry\0\r
->>120 string Cannot\ find\ file\ \0\r
->>>139 string Disk\ read\ error\0\r
->>>>156 string Loading\ ...\0 \b, DR-DOS (3.41) Bootloader
-#DRBIOS.SYS
->>>>>44 ubyte&0xDF >0
->>>>>>44 string x \b %-.6s
->>>>>>>50 ubyte&0xDF >0
->>>>>>>>50 string x \b%-.2s
->>>>>>52 ubyte&0xDF >0
->>>>>>>52 string x \b.%-.3s
-#
->70 string IBMBIO\ \ COM
->>472 string Cannot\ load\ DOS!\040
->>>489 string Any\ key\ to\ retry \b, DR-DOS Bootloader
->>471 string Cannot\ load\ DOS\040
->>487 string press\ key\ to\ retry \b, Open-DOS Bootloader
-#??
->444 string KERNEL\ \ SYS
->>314 string BOOT\ error! \b, FREE-DOS Bootloader
->499 string KERNEL\ \ SYS
->>305 string BOOT\ err!\0 \b, Free-DOS Bootloader
->449 string KERNEL\ \ SYS
->>319 string BOOT\ error! \b, FREE-DOS 0.5 Bootloader
-#
->449 string Loading\ FreeDOS
->>0x1AF ulelong >0 \b, FREE-DOS 0.95,1.0 Bootloader
->>>497 ubyte&0xDF >0
->>>>497 string x \b %-.6s
->>>>>503 ubyte&0xDF >0
->>>>>>503 string x \b%-.1s
->>>>>>>504 ubyte&0xDF >0
->>>>>>>>504 string x \b%-.1s
->>>>505 ubyte&0xDF >0
->>>>>505 string x \b.%-.3s
-#
->331 string Error!.0 \b, FREE-DOS 1.0 bootloader
-#
->125 string Loading\ FreeDOS...\r
->>311 string BOOT\ error!\r \b, FREE-DOS bootloader
->>>441 ubyte&0xDF >0
->>>>441 string x \b %-.6s
->>>>>447 ubyte&0xDF >0
->>>>>>447 string x \b%-.1s
->>>>>>>448 ubyte&0xDF >0
->>>>>>>>448 string x \b%-.1s
->>>>449 ubyte&0xDF >0
->>>>>449 string x \b.%-.3s
->124 string FreeDOS\0
->>331 string \ err\0 \b, FREE-DOS BETa 0.9 Bootloader
-# DOS names like KERNEL.SYS,KERNEL16.SYS,KERNEL32.SYS,METAKERN.SYS are 8 right space padded bytes+3 bytes
->>>497 ubyte&0xDF >0
->>>>497 string x \b %-.6s
->>>>>503 ubyte&0xDF >0
->>>>>>503 string x \b%-.1s
->>>>>>>504 ubyte&0xDF >0
->>>>>>>>504 string x \b%-.1s
->>>>505 ubyte&0xDF >0
->>>>>505 string x \b.%-.3s
->>333 string \ err\0 \b, FREE-DOS BEta 0.9 Bootloader
->>>497 ubyte&0xDF >0
->>>>497 string x \b %-.6s
->>>>>503 ubyte&0xDF >0
->>>>>>503 string x \b%-.1s
->>>>>>>504 ubyte&0xDF >0
->>>>>>>>504 string x \b%-.1s
->>>>505 ubyte&0xDF >0
->>>>>505 string x \b.%-.3s
->>334 string \ err\0 \b, FREE-DOS Beta 0.9 Bootloader
->>>497 ubyte&0xDF >0
->>>>497 string x \b %-.6s
->>>>>503 ubyte&0xDF >0
->>>>>>503 string x \b%-.1s
->>>>>>>504 ubyte&0xDF >0
->>>>>>>>504 string x \b%-.1s
->>>>505 ubyte&0xDF >0
->>>>>505 string x \b.%-.3s
->336 string Error!\040
->>343 string Hit\ a\ key\ to\ reboot. \b, FREE-DOS Beta 0.9sr1 Bootloader
->>>497 ubyte&0xDF >0
->>>>497 string x \b %-.6s
->>>>>503 ubyte&0xDF >0
->>>>>>503 string x \b%-.1s
->>>>>>>504 ubyte&0xDF >0
->>>>>>>>504 string x \b%-.1s
->>>>505 ubyte&0xDF >0
->>>>>505 string x \b.%-.3s
-# added by Joerg Jenderek
-# https://www.visopsys.org/
-# https://partitionlogic.org.uk/
-# OEM-ID=Visopsys
->478 ulelong 0
->>(1.b+326) string I/O\ Error\ reading\040
->>>(1.b+344) string Visopsys\ loader\r
->>>>(1.b+361) string Press\ any\ key\ to\ continue.\r \b, Visopsys loader
-# http://alexfru.chat.ru/epm.html#bootprog
->494 ubyte >0x4D
->>495 string >E
->>>495 string <S
-#OEM-ID is not reliable
->>>>3 string BootProg
-# It just looks for a program file name at the root directory
-# and loads corresponding file with following execution.
-# DOS names like STARTUP.BIN,STARTUPC.COM,STARTUPE.EXE are 8 right space padded bytes+3 bytes
->>>>499 ubyte&0xDF >0 \b, COM/EXE Bootloader
->>>>>499 use DOS-filename
-#If the boot sector fails to read any other sector,
-#it prints a very short message ("RE") to the screen and hangs the computer.
-#If the boot sector fails to find needed program in the root directory,
-#it also hangs with another message ("NF").
->>>>>492 string RENF \b, FAT (12 bit)
->>>>>495 string RENF \b, FAT (16 bit)
-#If the boot sector fails to read any other sector,
-#it prints a very short message ("RE") to the screen and hangs the computer.
-# x86 bootloader end
-
-# added by Joerg Jenderek at Feb 2013 according to https://thestarman.pcministry.com/asm/mbr/MSWIN41.htm#FSINFO
-# and https://en.wikipedia.org/wiki/File_Allocation_Table#FS_Information_Sector
->0 string RRaA
->>0x1E4 string rrAa \b, FSInfosector
-#>>0x1FC uleshort =0 SHOULD BE ZERO
->>>0x1E8 ulelong <0xffffffff \b, %u free clusters
->>>0x1EC ulelong <0xffffffff \b, last allocated cluster %u
-
-# updated by Joerg Jenderek at Sep 2007
->3 ubyte 0
-#no active flag
->>446 ubyte 0
-# partition 1 not empty
->>>450 ubyte >0
-# partitions 3,4 empty
->>>>482 ubyte 0
->>>>>498 ubyte 0
-# partition 2 ID=0,5,15
->>>>>>466 ubyte <0x10
->>>>>>>466 ubyte 0x05 \b, extended partition table
->>>>>>>466 ubyte 0x0F \b, extended partition table (LBA)
->>>>>>>466 ubyte 0x0 \b, extended partition table (last)
-
-# DOS x86 sector separated and moved from "DOS/MBR boot sector" by Joerg Jenderek at May 2011
-
->0x200 lelong 0x82564557 \b, BSD disklabel
-
-# by Joerg Jenderek at Apr 2013
-# Print the DOS filenames from directory entry form with 8 right space padded bytes + 3 bytes for extension
-# like IO.SYS. MSDOS.SYS , KERNEL.SYS , DRBIO.SYS
-0 name DOS-filename
-# space=0x20 (00100000b) means empty
->0 ubyte&0xDF >0
->>0 ubyte x \b%c
->>>1 ubyte&0xDF >0
->>>>1 ubyte x \b%c
->>>>>2 ubyte&0xDF >0
->>>>>>2 ubyte x \b%c
->>>>>>>3 ubyte&0xDF >0
->>>>>>>>3 ubyte x \b%c
->>>>>>>>>4 ubyte&0xDF >0
->>>>>>>>>>4 ubyte x \b%c
->>>>>>>>>>>5 ubyte&0xDF >0
->>>>>>>>>>>>5 ubyte x \b%c
->>>>>>>>>>>>>6 ubyte&0xDF >0
->>>>>>>>>>>>>>6 ubyte x \b%c
->>>>>>>>>>>>>>>7 ubyte&0xDF >0
->>>>>>>>>>>>>>>>7 ubyte x \b%c
-# DOS filename extension
->>8 ubyte&0xDF >0 \b.
->>>8 ubyte x \b%c
->>>>9 ubyte&0xDF >0
->>>>>9 ubyte x \b%c
->>>>>>10 ubyte&0xDF >0
->>>>>>>10 ubyte x \b%c
-# Print 2 following DOS filenames from directory entry form
-# like IO.SYS+MSDOS.SYS or ibmbio.com+ibmdos.com
-0 name 2xDOS-filename
-# display 1 space
->0 ubyte x \b
->0 use DOS-filename
->11 ubyte x \b+
->11 use DOS-filename
-
-# https://en.wikipedia.org/wiki/Master_boot_record#PTE
-# display standard partition table
-0 name partition-table
-#>0 ubyte x PARTITION-TABLE
-# test and display 1st til 4th partition table entry
->0 use partition-entry-test
->16 use partition-entry-test
->32 use partition-entry-test
->48 use partition-entry-test
-# test for entry of partition table
-0 name partition-entry-test
-# partition type ID > 0
->4 ubyte >0
-# active flag 0
->>0 ubyte 0
->>>0 use partition-entry
-# active flag 0x80, 0x81, ...
->>0 ubyte >0x7F
->>>0 use partition-entry
-# Print entry of partition table
-0 name partition-entry
-# partition type ID > 0
->4 ubyte >0 \b; partition
->>64 leshort 0xAA55 1
->>48 leshort 0xAA55 2
->>32 leshort 0xAA55 3
->>16 leshort 0xAA55 4
->>4 ubyte x : ID=%#x
->>0 ubyte&0x80 0x80 \b, active
->>0 ubyte >0x80 %#x
->>1 ubyte x \b, start-CHS (
->>1 use partition-chs
->>5 ubyte x \b), end-CHS (
->>5 use partition-chs
->>8 ulelong x \b), startsector %u
->>12 ulelong x \b, %u sectors
-# Print cylinder,head,sector (CHS) of partition entry
-0 name partition-chs
-# cylinder
->1 ubyte x \b0x
->1 ubyte&0xC0 0x40 \b1
->1 ubyte&0xC0 0x80 \b2
->1 ubyte&0xC0 0xC0 \b3
->2 ubyte x \b%x
-# head
->0 ubyte x \b,%u
-# sector
->1 ubyte&0x3F x \b,%u
-
-# FATX
-0 string FATX FATX filesystem data
-
-# romfs filesystems - Juan Cespedes <cespedes@debian.org>
-0 string -rom1fs- romfs filesystem, version 1
->8 belong x %d bytes,
->16 string x named %s.
-
-# netboot image - Juan Cespedes <cespedes@debian.org>
-0 lelong 0x1b031336L Netboot image,
->4 lelong&0xFFFFFF00 0
->>4 lelong&0x100 0x000 mode 2
->>4 lelong&0x100 0x100 mode 3
->4 lelong&0xFFFFFF00 !0 unknown mode
-
-0x18b string OS/2 OS/2 Boot Manager
-
-# updated by Joerg Jenderek at Oct 2008 and Sep 2012
-# https://syslinux.zytor.com/iso.php
-# tested with versions 1.47,1.48,1.49,1.50,1.62,1.76,2.00,2.10;3.00,3.11,3.31,;3.70,3.71,3.73,3.75,3.80,3.82,3.84,3.86,4.01,4.03 and 4.05
-# assembler instructions: cli;jmp 0:7Cyy (yy=0x40,0x5e,0x6c,0x6e,0x77);nop;nop
-0 ulequad&0x909000007cc0eafa 0x909000007c40eafa
->631 search/689 ISOLINUX\ isolinux Loader
->>&0 string x (version %-4.4s)
-# https://syslinux.zytor.com/pxe.php
-# assembler instructions: jmp 7C05
-0 ulelong 0x007c05ea pxelinux loader (version 2.13 or older)
-# assembler instructions: pushfd;pushad
-0 ulelong 0x60669c66 pxelinux loader
-# assembler instructions: jmp 05
-0 ulelong 0xc00005ea pxelinux loader (version 3.70 or newer)
-# https://syslinux.zytor.com/wiki/index.php/SYSLINUX
-0 string LDLINUX\ SYS\ SYSLINUX loader
->12 string x (older version %-4.4s)
-0 string \r\nSYSLINUX\ SYSLINUX loader
->11 string x (version %-4.4s)
-# syslinux updated and separated from "DOS/MBR boot sector" by Joerg Jenderek at Sep 2012
-# assembler instructions: jmp yy (yy=0x3c,0x58);nop;"SYSLINUX"
-0 ulelong&0x80909bEB 0x009018EB
-# OEM-ID not always "SYSLINUX"
->434 search/47 Boot\ failed
-# followed by \r\n\0 or :\
->>482 search/132 \0LDLINUX\ SYS Syslinux bootloader (version 2.13 or older)
->>1 ubyte 0x58 Syslinux bootloader (version 3.0-3.9)
->459 search/30 Boot\ error\r\n\0
->>1 ubyte 0x58 Syslinux bootloader (version 3.10 or newer)
-# SYSLINUX MBR updated and separated from "DOS/MBR boot sector" by Joerg Jenderek at Sep 2012
-# assembler instructions: mov di,0600h;mov cx,0100h
-16 search/4 \xbf\x00\x06\xb9\x00\x01
-# to display SYSLINUX MBR (36) before old DOS/MBR boot sector one with partition table (strength=50+21)
-!:strength +36
->94 search/249 Missing\ operating\ system
-# followed by \r for versions older 3.35 , .\r for versions newer 3.52 and point for other
-# skip Ranish MBR
->>408 search/4 HD1/\0
->>408 default x
->>>250 search/118 \0Operating\ system\ load SYSLINUX MBR
-# followed by "ing " or space
->>>>292 search/98 error
->>>>>&0 string \r (version 3.35 or older)
->>>>>&0 string .\r (version 3.52 or newer)
->>>>>&0 default x (version 3.36-3.51 )
->368 search/106 \0Disk\ error\ on\ boot\r\n SYSLINUX GPT-MBR
->>156 search/10 \0Boot\ partition\ not\ found\r\n
->>>270 search/10 \0OS\ not\ bootable\r\n (version 3.86 or older)
->>174 search/10 \0Missing\ OS\r\n
->>>189 search/10 \0Multiple\ active\ partitions\r\n (version 4.00 or newer)
-# SYSLINUX END
-
-# NetBSD mbr variants (master-boot-code version 1.22) added by Joerg Jenderek at Nov 2012
-# assembler instructions: xor ax,ax;mov ax,ss;mov sp,0x7c00;mov ax,
-0 ubequad 0x31c08ed0bc007c8e
-# mbr_bootsel magic before partition table not reliable with small ipl fragments
-#>444 uleshort 0xb5e1
->0004 uleshort x
-# ERRorTeXT
->>181 search/166 Error\ \0\r\n NetBSD mbr
-# NT Drive Serial Number https://thestarman.pcministry.com/asm/mbr/Win2kmbr.htm#DS
->>>0x1B8 ubelong >0 \b,Serial %#-.8x
-# BOOTSEL definitions contains assembler instructions: int 0x13;pop dx;push dx;push dx
->>>0xbb search/71 \xcd\x13\x5a\x52\x52 \b,bootselector
-# BOOT_EXTENDED definitions contains assembler instructions:
-# xchg ecx,edx;addl ecx,edx;movw lba_info,si;movb 0x42,ah;pop dx;push dx;int 0x13
->>>0x96 search/1 \x66\x87\xca\x66\x01\xca\x66\x89\x16\x3a\x07\xbe\x32\x07\xb4\x42\x5a\x52\xcd\x13 \b,boot extended
-# COM_PORT_VAL definitions contains assembler instructions: outb al,dx;add 5,dl;inb %dx;test 0x40,al
->>>0x130 search/55 \xee\x80\xc2\x05\xec\xa8\x40 \b,serial IO
-# not TERSE_ERROR
->>>196 search/106 No\ active\ partition\0
->>>>&0 string Disk\ read\ error\0
->>>>>&0 string No\ operating\ system\0 \b,verbose
-# not NO_CHS definitions contains assembler instructions: pop dx;push dx;movb $8,ah;int0x13
->>>0x7d search/7 \x5a\x52\xb4\x08\xcd\x13 \b,CHS
-# not NO_LBA_CHECK definitions contains assembler instructions: movw 0x55aa,bx;movb 0x41,ah;pop dx;push dx;int 0x13
->>>0xa4 search/84 \xbb\xaa\x55\xb4\x41\x5a\x52\xcd\x13 \b,LBA-check
-# assembler instructions: movw nametab,bx
->>>0x26 search/21 \xBB\x94\x07
-# not NO_BANNER definitions contains assembler instructions: mov banner,si;call message_crlf
->>>>&-9 ubequad&0xBE00f0E800febb94 0xBE0000E80000bb94
->>>>>181 search/166 Error\ \0
-# "a: disk" , "Fn: diskn" or "NetBSD MBR boot"
->>>>>>&3 string x \b,"%s"
->>>446 use partition-table
-# Andrea Mazzoleni AdvanceCD mbr loader of http://advancemame.sourceforge.net/boot-readme.html
-# added by Joerg Jenderek at Nov 2012 for versions 1.3 - 1.4
-# assembler instructions: jmp short 0x58;nop;ASCII
-0 ubequad&0xeb58908000000000 0xeb58900000000000
-# assembler instructions: cli;xor ax,ax;mov ds,ax;mov es,ax;mov ss,
->(1.b+2) ubequad 0xfa31c08ed88ec08e
-# Error messages at end of code
->>376 string No\ operating\ system\r\n\0
->>>398 string Disk\ error\r\n\0FDD\0HDD\0
->>>>419 string \ EBIOS\r\n\0 AdvanceMAME mbr
-
-# Neil Turton mbr loader variant of https://www.chiark.greenend.org.uk/~neilt/mbr/
-# added by Joerg Jenderek at Mar 2011 for versions 1.0.0 - 1.1.11
-# for 1st version assembler instructions: cld;xor ax,ax;mov DS,ax;MOV ES,AX;mov SI,
-# or cld;xor ax,ax;mov SS,ax;XOR SP,SP;mov DS,
-0 ulequad&0xcE1b40D48EC031FC 0x8E0000D08EC031FC
-# pointer to the data starting with Neil Turton signature string
->(0x1BC.s) string NDTmbr
->>&-14 string 1234F\0 Turton mbr (
-# parameters also viewed by install-mbr --list
->>>(0x1BC.s+7) ubyte x \b%u<=
->>>(0x1BC.s+9) ubyte x \bVersion<=%u
-#>>>(0x1BC.s+8) ubyte x asm_flag_%x
->>>(0x1BC.s+8) ubyte&1 1 \b,Y2K-Fix
-# variant used by testdisk of https://www.cgsecurity.org/wiki/Menu_MBRCode
->>>(0x1BC.s+8) ubyte&2 2 \b,TestDisk
-#0x1~1,..,0x8~4,0x10~F,0x80~A enabled
-#>>>(0x1BC.s+10) ubyte x \b,flags %#x
-#0x0~1,0x1~2,...,0x3~4,0x4~F,0x7~D default boot
-#>>>(0x1BC.s+11) ubyte x \b,cfg_def %#x
-# for older versions
->>>(0x1BC.s+9) ubyte <2
-#>>>>(0x1BC.s+12) ubyte 18 \b,%hhu/18 seconds
->>>>(0x1BC.s+12) ubyte !18 \b,%u/18 seconds
-# floppy A: or B:
->>>>(0x1BC.s+13) ubyte <2 \b,floppy %#x
->>>>(0x1BC.s+13) ubyte >1
-# 1st hard disc
-#>>>>>(0x1BC.s+13) ubyte 0x80 \b,drive %#x
-# not 1st hard disc
->>>>>(0x1BC.s+13) ubyte !0x80 \b,drive %#x
-# for version >= 2 maximal timeout can be 65534
->>>(0x1BC.s+9) ubyte >1
-#>>>>(0x1BC.s+12) uleshort 18 \b,%u/18 seconds
->>>>(0x1BC.s+12) uleshort !18 \b,%u/18 seconds
-# floppy A: or B:
->>>>(0x1BC.s+14) ubyte <2 \b,floppy %#x
->>>>(0x1BC.s+14) ubyte >1
-# 1st hard disc
-#>>>>>(0x1BC.s+14) ubyte 0x80 \b,drive %#x
-# not 1st hard disc
->>>>>(0x1BC.s+14) ubyte !0x80 \b,drive %#x
->>>0 ubyte x \b)
-
-# added by Joerg Jenderek
-# In the second sector (+0x200) are variables according to grub-0.97/stage2/asm.S or
-# grub-1.94/kern/i386/pc/startup.S
-# https://www.gnu.org/software/grub/manual/grub.html#Embedded-data
-# usual values are marked with comments to get only information of strange GRUB loaders
-0x200 uleshort 0x70EA
-# found only version 3.{1,2}
->0x206 ubeshort >0x0300
-# GRUB version (0.5.)95,0.93,0.94,0.96,0.97 > "00"
->>0x212 ubyte >0x29
->>>0x213 ubyte >0x29
-# not iso9660_stage1_5
-#>>>0 ulelong&0x00BE5652 0x00BE5652
->>>>0x213 ubyte >0x29 GRand Unified Bootloader
-# config_file for stage1_5 is 0xffffffff + default "/boot/grub/stage2"
->>>>0x217 ubyte 0xFF stage1_5
->>>>0x217 ubyte <0xFF stage2
->>>>0x206 ubyte x \b version %u
->>>>0x207 ubyte x \b.%u
-# module_size for 1.94
->>>>0x208 ulelong <0xffffff \b, installed partition %u
-#>>>>0x208 ulelong =0xffffff \b, %lu (default)
->>>>0x208 ulelong >0xffffff \b, installed partition %u
-# GRUB 0.5.95 unofficial
->>>>0x20C ulelong&0x2E300000 0x2E300000
-# 0=stage2 1=ffs 2=e2fs 3=fat 4=minix 5=reiserfs
->>>>>0x20C ubyte x \b, identifier %#x
-#>>>>>0x20D ubyte =0 \b, LBA flag %#x (default)
->>>>>0x20D ubyte >0 \b, LBA flag %#x
-# GRUB version as string
->>>>>0x20E string >\0 \b, GRUB version %-s
-# for stage1_5 is 0xffffffff + config_file "/boot/grub/stage2" default
->>>>>>0x215 ulong 0xffffffff
->>>>>>>0x219 string >\0 \b, configuration file %-s
->>>>>>0x215 ulong !0xffffffff
->>>>>>>0x215 string >\0 \b, configuration file %-s
-# newer GRUB versions
->>>>0x20C ulelong&0x2E300000 !0x2E300000
-##>>>>>0x20C ulelong =0 \b, saved entry %d (usual)
->>>>>0x20C ulelong >0 \b, saved entry %d
-# for 1.94 contains kernel image size
-# for 0.93,0.94,0.96,0.97
-# 0=stage2 1=ffs 2=e2fs 3=fat 4=minix 5=reiserfs 6=vstafs 7=jfs 8=xfs 9=iso9660 a=ufs2
->>>>>0x210 ubyte x \b, identifier %#x
-# The flag for LBA forcing is in most cases 0
-#>>>>>0x211 ubyte =0 \b, LBA flag %#x (default)
->>>>>0x211 ubyte >0 \b, LBA flag %#x
-# GRUB version as string
->>>>>0x212 string >\0 \b, GRUB version %-s
-# for stage1_5 is 0xffffffff + config_file "/boot/grub/stage2" default
->>>>>0x217 ulong 0xffffffff
->>>>>>0x21b string >\0 \b, configuration file %-s
->>>>>0x217 ulong !0xffffffff
->>>>>>0x217 string >\0 \b, configuration file %-s
-
-# DOS x86 sector updated and separated from "DOS/MBR boot sector" by Joerg Jenderek at May 2011
-# JuMP short bootcodeoffset NOP assembler instructions will usually be EB xx 90
-# over BIOS parameter block (BPB)
-# https://thestarman.pcministry.com/asm/2bytejumps.htm#FWD
-# older drives may use Near JuMP instruction E9 xx xx
-# minimal short forward jump found 0x29 for bootloaders or 0x0
-# maximal short forward jump is 0x7f
-# OEM-ID is empty or contain readable bytes
-0 ulelong&0x804000E9 0x000000E9
-!:strength +60
-# mtools-3.9.8/msdos.h
-# usual values are marked with comments to get only information of strange FAT systems
-# valid sectorsize must be a power of 2 from 32 to 32768
->11 uleshort&0x001f 0
->>11 uleshort <32769
->>>11 uleshort >31
->>>>21 ubyte&0xf0 0xF0
->>>>>0 ubyte 0xEB DOS/MBR boot sector
->>>>>>1 ubyte x \b, code offset %#x+2
->>>>>0 ubyte 0xE9
->>>>>>1 uleshort x \b, code offset %#x+3
->>>>>3 string >\0 \b, OEM-ID "%-.8s"
-#http://mirror.href.com/thestarman/asm/debug/debug2.htm#IHC
->>>>>>8 string IHC \b cached by Windows 9M
->>>>>11 uleshort >512 \b, Bytes/sector %u
-#>>>>>11 uleshort =512 \b, Bytes/sector %u=512 (usual)
->>>>>11 uleshort <512 \b, Bytes/sector %u
->>>>>13 ubyte >1 \b, sectors/cluster %u
-#>>>>>13 ubyte =1 \b, sectors/cluster %u (usual on Floppies)
-# for lazy FAT32 implementation like Transcend digital photo frame PF830
->>>>>82 string/c fat32
->>>>>>14 uleshort !32 \b, reserved sectors %u
-#>>>>>>14 uleshort =32 \b, reserved sectors %u (usual Fat32)
->>>>>82 string/c !fat32
->>>>>>14 uleshort >1 \b, reserved sectors %u
-#>>>>>>14 uleshort =1 \b, reserved sectors %u (usual FAT12,FAT16)
-#>>>>>>14 uleshort 0 \b, reserved sectors %u (usual NTFS)
->>>>>16 ubyte >2 \b, FATs %u
-#>>>>>16 ubyte =2 \b, FATs %u (usual)
->>>>>16 ubyte =1 \b, FAT %u
->>>>>16 ubyte >0
->>>>>17 uleshort >0 \b, root entries %u
-#>>>>>17 uleshort =0 \b, root entries %hu=0 (usual Fat32)
->>>>>19 uleshort >0 \b, sectors %u (volumes <=32 MB)
-#>>>>>19 uleshort =0 \b, sectors %hu=0 (usual Fat32)
->>>>>21 ubyte >0xF0 \b, Media descriptor %#x
-#>>>>>21 ubyte =0xF0 \b, Media descriptor %#x (usual floppy)
->>>>>21 ubyte <0xF0 \b, Media descriptor %#x
->>>>>22 uleshort >0 \b, sectors/FAT %u
-#>>>>>22 uleshort =0 \b, sectors/FAT %hu=0 (usual Fat32)
->>>>>24 uleshort x \b, sectors/track %u
->>>>>26 ubyte >2 \b, heads %u
-#>>>>>26 ubyte =2 \b, heads %u (usual floppy)
->>>>>26 ubyte =1 \b, heads %u
-# valid only for sector sizes with more then 32 Bytes
->>>>>11 uleshort >32
-# https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system#Extended_BIOS_Parameter_Block
-# skip for values 2,2Ah,70h,73h,DFh
-# and continue for extended boot signature values 0,28h,29h,80h
->>>>>>38 ubyte&0x56 =0
->>>>>>>28 ulelong >0 \b, hidden sectors %u
-#>>>>>>>28 ulelong =0 \b, hidden sectors %u (usual floppy)
->>>>>>>32 ulelong >0 \b, sectors %u (volumes > 32 MB)
-#>>>>>>>32 ulelong =0 \b, sectors %u (volumes > 32 MB)
-# FAT<32 bit specific
->>>>>>>82 string/c !fat32
-#>>>>>>>>36 ubyte 0x80 \b, physical drive %#x=0x80 (usual harddisk)
-#>>>>>>>>36 ubyte 0 \b, physical drive %#x=0 (usual floppy)
->>>>>>>>36 ubyte !0x80
->>>>>>>>>36 ubyte !0 \b, physical drive %#x
-# VGA-copy CRC or
-# in Windows NT bit 0 is a dirty flag to request chkdsk at boot time. bit 1 requests surface scan too
->>>>>>>>37 ubyte >0 \b, reserved %#x
-#>>>>>>>>37 ubyte =0 \b, reserved %#x
-# extended boot signature value is 0x80 for NTFS, 0x28 or 0x29 for others
->>>>>>>>38 ubyte !0x29 \b, dos < 4.0 BootSector (%#x)
->>>>>>>>38 ubyte&0xFE =0x28
->>>>>>>>>39 ulelong x \b, serial number %#x
->>>>>>>>38 ubyte =0x29
->>>>>>>>>43 string <NO\ NAME \b, label: "%11.11s"
->>>>>>>>>43 string >NO\ NAME \b, label: "%11.11s"
->>>>>>>>>43 string =NO\ NAME \b, unlabeled
-# there exist some old floppies without word FAT at offset 54
-# a word like "FATnm " is only a hint for a FAT size on nm-bits
-# Normally the number of clusters is calculated by the values of BPP.
-# if it is small enough FAT is 12 bit, if it is too big enough FAT is 32 bit,
-# otherwise FAT is 16 bit.
-# http://homepage.ntlworld.com/jonathan.deboynepollard/FGA/determining-fat-widths.html
->>>>>82 string/c !fat32
->>>>>>54 string FAT12 \b, FAT (12 bit)
->>>>>>54 string FAT16 \b, FAT (16 bit)
->>>>>>54 default x
-# determinate FAT bit size by media descriptor
-# small floppies implies FAT12
->>>>>>>21 ubyte <0xF0 \b, FAT (12 bit by descriptor)
-# with media descriptor F0h floppy or maybe superfloppy with FAT16
->>>>>>>21 ubyte =0xF0
-# superfloppy (many sectors) implies FAT16
->>>>>>>>32 ulelong >0xFFFF \b, FAT (16 bit by descriptor+sectors)
-# no superfloppy with media descriptor F0h implies FAT12
->>>>>>>>32 default x \b, FAT (12 bit by descriptor+sectors)
-# with media descriptor F8h floppy or hard disc with FAT12 or FAT16
->>>>>>>21 ubyte =0xF8
-# 360 KiB with media descriptor F8h, 9 sectors per track ,single sided floppy implies FAT12
->>>>>>>>19 ubequad 0xd002f80300090001 \b, FAT (12 bit by descriptor+geometry)
-# hard disc with FAT12 or FAT16
->>>>>>>>19 default x \b, FAT (1Y bit by descriptor)
-# with media descriptor FAh floppy, RAM disc with FAT12 or FAT16 or Tandy hard disc
->>>>>>>21 ubyte =0xFA
-# 320 KiB with media descriptor FAh, 8 sectors per track ,single sided floppy implies FAT12
->>>>>>>>19 ubequad 0x8002fa0200080001 \b, FAT (12 bit by descriptor+geometry)
-# RAM disc with FAT12 or FAT16 or Tandy hard disc
->>>>>>>>19 default x \b, FAT (1Y bit by descriptor)
-# others are floppy
->>>>>>>21 default x \b, FAT (12 bit by descriptor)
-# FAT32 bit specific
->>>>>82 string/c fat32 \b, FAT (32 bit)
->>>>>>36 ulelong x \b, sectors/FAT %u
-# https://technet.microsoft.com/en-us/library/cc977221.aspx
->>>>>>40 uleshort >0 \b, extension flags %#x
-#>>>>>>40 uleshort =0 \b, extension flags %hu
->>>>>>42 uleshort >0 \b, fsVersion %u
-#>>>>>>42 uleshort =0 \b, fsVersion %u (usual)
->>>>>>44 ulelong >2 \b, rootdir cluster %u
-#>>>>>>44 ulelong =2 \b, rootdir cluster %u
-#>>>>>>44 ulelong =1 \b, rootdir cluster %u
->>>>>>48 uleshort >1 \b, infoSector %u
-#>>>>>>48 uleshort =1 \b, infoSector %u (usual)
->>>>>>48 uleshort <1 \b, infoSector %u
-# 0 or 0xFFFF instead of usual 6 means no backup sector
->>>>>>50 uleshort =0xFFFF \b, no Backup boot sector
->>>>>>50 uleshort =0 \b, no Backup boot sector
-#>>>>>>50 uleshort =6 \b, Backup boot sector %u (usual)
->>>>>>50 default x
->>>>>>>50 uleshort x \b, Backup boot sector %u
-# corrected by Joerg Jenderek at Feb 2011 according to https://thestarman.pcministry.com/asm/mbr/MSWIN41.htm#FSINFO
->>>>>>52 ulelong >0 \b, reserved1 %#x
->>>>>>56 ulelong >0 \b, reserved2 %#x
->>>>>>60 ulelong >0 \b, reserved3 %#x
-# same structure as FAT1X
-#>>>>>>64 ubyte =0x80 \b, physical drive %#x=80 (usual harddisk)
-#>>>>>>64 ubyte =0 \b, physical drive %#x=0 (usual floppy)
->>>>>>64 ubyte !0x80
->>>>>>>64 ubyte >0 \b, physical drive %#x
-# in Windows NT bit 0 is a dirty flag to request chkdsk at boot time. bit 1 requests surface scan too
->>>>>>65 ubyte >0 \b, reserved %#x
->>>>>>66 ubyte !0x29 \b, dos < 4.0 BootSector (%#x)
->>>>>>66 ubyte =0x29
->>>>>>>67 ulelong x \b, serial number %#x
->>>>>>>71 string <NO\ NAME \b, label: "%11.11s"
->>>>>>>71 string >NO\ NAME \b, label: "%11.11s"
->>>>>>>71 string =NO\ NAME \b, unlabeled
-# additional tests for floppy image added by Joerg Jenderek
-# no fixed disk
->>>>>21 ubyte !0xF8
-# floppy media with 12 bit FAT
->>>>>>54 string !FAT16
-# test for FAT after bootsector
->>>>>>>(11.s) ulelong&0x00ffffF0 0x00ffffF0 \b, followed by FAT
-# floppy image
-!:mime application/x-ima
-# NTFS specific added by Joerg Jenderek at Mar 2011 according to https://thestarman.pcministry.com/asm/mbr/NTFSBR.htm
-# and http://homepage.ntlworld.com/jonathan.deboynepollard/FGA/bios-parameter-block.html
-# 0 FATs
->>>>>16 ubyte =0
-# 0 root entries
->>>>>>17 uleshort =0
-# 0 DOS sectors
->>>>>>>19 uleshort =0
-# 0 sectors/FAT
-# dos < 4.0 BootSector value found is 0x80
-#38 ubyte =0x80 \b, dos < 4.0 BootSector (%#x)
->>>>>>>>22 uleshort =0 \b; NTFS
->>>>>>>>>24 uleshort >0 \b, sectors/track %u
->>>>>>>>>36 ulelong !0x800080 \b, physical drive %#x
->>>>>>>>>40 ulequad >0 \b, sectors %lld
->>>>>>>>>48 ulequad >0 \b, $MFT start cluster %lld
->>>>>>>>>56 ulequad >0 \b, $MFTMirror start cluster %lld
-# Values 0 to 127 represent MFT record sizes of 0 to 127 clusters.
-# Values 128 to 255 represent MFT record sizes of 2^(256-N) bytes.
->>>>>>>>>64 lelong <256
->>>>>>>>>>64 lelong <128 \b, clusters/RecordSegment %d
->>>>>>>>>>64 ubyte >127 \b, bytes/RecordSegment 2^(-1*%i)
-# Values 0 to 127 represent index block sizes of 0 to 127 clusters.
-# Values 128 to 255 represent index block sizes of 2^(256-N) byte
->>>>>>>>>68 ulelong <256
->>>>>>>>>>68 ulelong <128 \b, clusters/index block %d
-#>>>>>>>>>>68 ulelong >127 \b, bytes/index block 2^(256-%d)
->>>>>>>>>>68 ubyte >127 \b, bytes/index block 2^(-1*%i)
->>>>>>>>>72 ulequad x \b, serial number 0%llx
->>>>>>>>>80 ulelong >0 \b, checksum %#x
-#>>>>>>>>>80 ulelong =0 \b, checksum %#x=0 (usual)
-# unicode loadername size jump
->>>>>>>>>(0x200.s*2) ubyte x
-# in next sector loadername terminated by unicode CTRL-D and $
->>>>>>>>>>&0x1FF ulequad&0x0000FFffFFffFF00 0x0000002400040000 \b; contains
-# if 2nd NTFS sectors is found then assume whole filesystem
-#!:mime application/x-raw-disk-image
-!:ext img/bin/ntfs
->>>>>>>>>>>0x200 use ntfs-sector2
-
-# For 2nd NTFS sector added by Joerg Jenderek at Jan 2013, Mar 2019
-# https://thestarman.pcministry.com/asm/mbr/NTFSbrHexEd.htm
-# unused assembler instructions short JMP y2;NOP;NOP
-0x056 ulelong&0xFFFF0FFF 0x909002EB NTFS
-#!:mime application/octet-stream
-!:ext bin
->0 use ntfs-sector2
-# https://memory.dataram.com/products-and-services/software/ramdisk
-# assembler instructions JMP C000;NOP
-0x056 ulelong 0x9000c0e9 NTFS
-#!:mime application/octet-stream
-!:ext bin
->0 use ntfs-sector2
-# check for characteristics of second NTFS sector and then display loader name
-0 name ntfs-sector2
-# number of utf16 characters of loadername
->0 uleshort <8
-# unused assembler instructions JMP y2;NOP;NOP or JMP C000;NOP
->>0x056 ulelong&0xFF0000FD 0x900000E9
-# loadernames are NTLDR,CMLDR,PELDR,$LDR$ or BOOTMGR
->>>0x002 lestring16 x bootstrap %-5.5s
-# check for 7 character length of loader name like BOOTMGR
->>>0 uleshort 7
->>>>0x0c lestring16 x \b%-2.2s
-### DOS,NTFS boot sectors end
-
-# ntfsclone-image is a special save format for NTFS volumes,
-# created and restored by the ntfsclone program
-0 string \0ntfsclone-image ntfsclone image,
->0x10 byte x version %d.
->0x11 byte x \b%d,
->0x12 lelong x cluster size %d,
->0x16 lequad x device size %lld,
->0x1e lequad x %lld total clusters,
->0x26 lequad x %lld clusters in use
-
-
-0 name ffsv1
->8404 string x last mounted on %s,
-#>9504 ledate x last checked at %s,
->8224 ledate x last written at %s,
->8401 byte x clean flag %d,
->8228 lelong x number of blocks %d,
->8232 lelong x number of data blocks %d,
->8236 lelong x number of cylinder groups %d,
->8240 lelong x block size %d,
->8244 lelong x fragment size %d,
->8252 lelong x minimum percentage of free blocks %d,
->8256 lelong x rotational delay %dms,
->8260 lelong x disk rotational speed %drps,
->8320 lelong 0 TIME optimization
->8320 lelong 1 SPACE optimization
-
-9564 lelong 0x00011954 Unix Fast File system [v1] (little-endian),
->0 use ffsv1
-
-9564 belong 0x00011954 Unix Fast File system [v1] (big-endian),
->7168 belong 0x4c41424c Apple UFS Volume
->>7186 string x named %s,
->>7176 belong x volume label version %d,
->>7180 bedate x created on %s,
->0 use \^ffsv1
-
-0 name ffsv2
->212 string x last mounted on %s,
->680 string >\0 volume name %s,
->1072 leqldate x last written at %s,
->209 byte x clean flag %d,
->210 byte x readonly flag %d,
->1080 lequad x number of blocks %lld,
->1088 lequad x number of data blocks %lld,
->44 lelong x number of cylinder groups %d,
->48 lelong x block size %d,
->52 lelong x fragment size %d,
->1196 lelong x average file size %d,
->1200 lelong x average number of files in dir %d,
->1104 lequad x pending blocks to free %lld,
->1112 lelong x pending inodes to free %d,
->712 lequad x system-wide uuid %0llx,
->60 lelong x minimum percentage of free blocks %d,
->128 lelong 0 TIME optimization
->128 lelong 1 SPACE optimization
-
-42332 lelong 0x19012038 Unix Fast File system [v2ea] (little-endian)
->40960 use ffsv2
-
-42332 lelong 0x19540119 Unix Fast File system [v2] (little-endian)
->40960 use ffsv2
-
-42332 belong 0x19012038 Unix Fast File system [v2ea] (little-endian)
->40960 use \^ffsv2
-
-42332 belong 0x19540119 Unix Fast File system [v2] (big-endian)
->40960 use \^ffsv2
-
-66908 lelong 0x19012038 Unix Fast File system [v2ea] (little-endian)
->65536 use ffsv2
-
-66908 lelong 0x19540119 Unix Fast File system [v2] (little-endian)
->65536 use ffsv2
-
-66908 belong 0x19012038 Unix Fast File system [v2ea] (little-endian)
->65536 use \^ffsv2
-
-66908 belong 0x19540119 Unix Fast File system [v2] (big-endian)
->65536 use \^ffsv2
-
-0 ulequad 0xc8414d4dc5523031 HAMMER filesystem (little-endian),
->0x90 lelong+1 x volume %d
->0x94 lelong x (of %d),
->0x50 string x name %s,
->0x98 ulelong x version %u,
->0xa0 ulelong x flags %#x
-
-0 ulequad 0x48414d3205172011 HAMMER2 filesystem (little-endian),
->0x3b byte x volume %d,
->0x28 ulequad/1073741824 x size %lluGB,
->0x30 ulelong x version %u,
->0x34 ulelong x flags %#x
-
-# ext2/ext3 filesystems - Andreas Dilger <adilger@dilger.ca>
-# ext4 filesystem - Eric Sandeen <sandeen@sandeen.net>
-# volume label and UUID Russell Coker
-# https://etbe.coker.com.au/2008/07/08/label-vs-uuid-vs-device/
-0x438 leshort 0xEF53 Linux
->0x44c lelong x rev %d
->0x43e leshort x \b.%d
-# No journal? ext2
->0x45c lelong ^0x0000004 ext2 filesystem data
->>0x43a leshort ^0x0000001 (mounted or unclean)
-# Has a journal? ext3 or ext4
->0x45c lelong &0x0000004
-# and small INCOMPAT?
->>0x460 lelong <0x0000040
-# and small RO_COMPAT?
->>>0x464 lelong <0x0000008 ext3 filesystem data
-# else large RO_COMPAT?
->>>0x464 lelong >0x0000007 ext4 filesystem data
-# else large INCOMPAT?
->>0x460 lelong >0x000003f ext4 filesystem data
->0x468 ubelong x \b, UUID=%08x
->0x46c ubeshort x \b-%04x
->0x46e ubeshort x \b-%04x
->0x470 ubeshort x \b-%04x
->0x472 ubelong x \b-%08x
->0x476 ubeshort x \b%04x
->0x478 string >0 \b, volume name "%s"
-# General flags for any ext* fs
->0x460 lelong &0x0000004 (needs journal recovery)
->0x43a leshort &0x0000002 (errors)
-# INCOMPAT flags
->0x460 lelong &0x0000001 (compressed)
-#>0x460 lelong &0x0000002 (filetype)
-#>0x460 lelong &0x0000010 (meta bg)
->0x460 lelong &0x0000040 (extents)
->0x460 lelong &0x0000080 (64bit)
-#>0x460 lelong &0x0000100 (mmp)
-#>0x460 lelong &0x0000200 (flex bg)
-# RO_INCOMPAT flags
-#>0x464 lelong &0x0000001 (sparse super)
->0x464 lelong &0x0000002 (large files)
->0x464 lelong &0x0000008 (huge files)
-#>0x464 lelong &0x0000010 (gdt checksum)
-#>0x464 lelong &0x0000020 (many subdirs)
-#>0x463 lelong &0x0000040 (extra isize)
-
-# f2fs filesystem - Tuomas Tynkkynen <tuomas.tynkkynen@iki.fi>
-0x400 lelong 0xF2F52010 F2FS filesystem
->0x46c ubelong x \b, UUID=%08x
->0x470 ubeshort x \b-%04x
->0x472 ubeshort x \b-%04x
->0x474 ubeshort x \b-%04x
->0x476 ubelong x \b-%08x
->0x47a ubeshort x \b%04x
->0x147c lestring16 x \b, volume name "%s"
-
-# Minix filesystems - Juan Cespedes <cespedes@debian.org>
-0x410 leshort 0x137f
-!:strength / 2
->0x402 beshort < 100
->0x402 beshort > -1 Minix filesystem, V1, 14 char names, %d zones
->0x1e string minix \b, bootable
-0x410 beshort 0x137f
-!:strength / 2
->0x402 beshort < 100
->0x402 beshort > -1 Minix filesystem, V1 (big endian), %d zones
->0x1e string minix \b, bootable
-0x410 leshort 0x138f
-!:strength / 2
->0x402 beshort < 100
->0x402 beshort > -1 Minix filesystem, V1, 30 char names, %d zones
->0x1e string minix \b, bootable
-0x410 beshort 0x138f
-!:strength / 2
->0x402 beshort < 100
->0x402 beshort > -1 Minix filesystem, V1, 30 char names (big endian), %d zones
->0x1e string minix \b, bootable
-# Weak Magic: this is $x
-#0x410 leshort 0x2468
-#>0x402 beshort < 100
-#>>0x402 beshort > -1 Minix filesystem, V2, 14 char names
-#>0x1e string minix \b, bootable
-#0x410 beshort 0x2468
-#>0x402 beshort < 100
-#>0x402 beshort > -1 Minix filesystem, V2 (big endian)
-#>0x1e string minix \b, bootable
-#0x410 leshort 0x2478
-#>0x402 beshort < 100
-#>0x402 beshort > -1 Minix filesystem, V2, 30 char names
-#>0x1e string minix \b, bootable
-#0x410 leshort 0x2478
-#>0x402 beshort < 100
-#>0x402 beshort > -1 Minix filesystem, V2, 30 char names
-#>0x1e string minix \b, bootable
-#0x410 beshort 0x2478
-#>0x402 beshort !0 Minix filesystem, V2, 30 char names (big endian)
-#>0x1e string minix \b, bootable
-# Weak Magic! this is MD
-#0x418 leshort 0x4d5a
-#>0x402 beshort <100
-#>>0x402 beshort > -1 Minix filesystem, V3, 60 char names
-
-# SGI disk labels - Nathan Scott <nathans@debian.org>
-0 belong 0x0BE5A941 SGI disk label (volume header)
-
-# SGI XFS filesystem - Nathan Scott <nathans@debian.org>
-0 belong 0x58465342 SGI XFS filesystem data
->0x4 belong x (blksz %d,
->0x68 beshort x inosz %d,
->0x64 beshort ^0x2004 v1 dirs)
->0x64 beshort &0x2004 v2 dirs)
-
-############################################################################
-# Minix-ST kernel floppy
-0x800 belong 0x46fc2700 Atari-ST Minix kernel image
-# https://en.wikipedia.org/wiki/BIOS_parameter_block
-# floppies with valid BPB and any instruction at beginning
->19 string \240\005\371\005\0\011\0\2\0 \b, 720k floppy
->19 string \320\002\370\005\0\011\0\1\0 \b, 360k floppy
-
-############################################################################
-# Hmmm, is this a better way of detecting _standard_ floppy images ?
-19 string \320\002\360\003\0\011\0\1\0 DOS floppy 360k
->0x1FE leshort 0xAA55 \b, DOS/MBR hard disk boot sector
-19 string \240\005\371\003\0\011\0\2\0 DOS floppy 720k
->0x1FE leshort 0xAA55 \b, DOS/MBR hard disk boot sector
-19 string \100\013\360\011\0\022\0\2\0 DOS floppy 1440k
->0x1FE leshort 0xAA55 \b, DOS/MBR hard disk boot sector
-
-19 string \240\005\371\005\0\011\0\2\0 DOS floppy 720k, IBM
->0x1FE leshort 0xAA55 \b, DOS/MBR hard disk boot sector
-19 string \100\013\371\005\0\011\0\2\0 DOS floppy 1440k, mkdosfs
->0x1FE leshort 0xAA55 \b, DOS/MBR hard disk boot sector
-
-19 string \320\002\370\005\0\011\0\1\0 Atari-ST floppy 360k
-19 string \240\005\371\005\0\011\0\2\0 Atari-ST floppy 720k
-# | | | | |
-# | | | | heads
-# | | | sectors/track
-# | | sectors/FAT
-# | media descriptor
-# BPB: sectors
-
-# Valid media descriptor bytes for MS-DOS:
-#
-# Byte Capacity Media Size and Type
-# -------------------------------------------------
-#
-# F0 2.88 MB 3.5-inch, 2-sided, 36-sector
-# F0 1.44 MB 3.5-inch, 2-sided, 18-sector
-# F9 720K 3.5-inch, 2-sided, 9-sector
-# F9 1.2 MB 5.25-inch, 2-sided, 15-sector
-# FD 360K 5.25-inch, 2-sided, 9-sector
-# FF 320K 5.25-inch, 2-sided, 8-sector
-# FC 180K 5.25-inch, 1-sided, 9-sector
-# FE 160K 5.25-inch, 1-sided, 8-sector
-# FE 250K 8-inch, 1-sided, single-density
-# FD 500K 8-inch, 2-sided, single-density
-# FE 1.2 MB 8-inch, 2-sided, double-density
-# F8 ----- Fixed disk
-#
-# FC xxxK Apricot 70x1x9 boot disk.
-#
-# Originally a bitmap:
-# xxxxxxx0 Not two sided
-# xxxxxxx1 Double sided
-# xxxxxx0x Not 8 SPT
-# xxxxxx1x 8 SPT
-# xxxxx0xx Not Removable drive
-# xxxxx1xx Removable drive
-# 11111xxx Must be one.
-#
-# But now it's rather random:
-# 111111xx Low density disk
-# 00 SS, Not 8 SPT
-# 01 DS, Not 8 SPT
-# 10 SS, 8 SPT
-# 11 DS, 8 SPT
-#
-# 11111001 Double density 3 1/2 floppy disk, high density 5 1/4
-# 11110000 High density 3 1/2 floppy disk
-# 11111000 Hard disk any format
-#
-
-# all FAT12 (strength=70) floppies with sectorsize 512 added by Joerg Jenderek at Jun 2013
-# https://en.wikipedia.org/wiki/File_Allocation_Table#Exceptions
-# Too Weak.
-#512 ubelong&0xE0ffff00 0xE0ffff00
-# without valid Media descriptor in place of BPB, cases with are done at other places
-#>21 ubyte <0xE5 floppy with old FAT filesystem
-# but valid Media descriptor at begin of FAT
-#>>512 ubyte =0xed 720k
-#>>512 ubyte =0xf0 1440k
-#>>512 ubyte =0xf8 720k
-#>>512 ubyte =0xf9 1220k
-#>>512 ubyte =0xfa 320k
-#>>512 ubyte =0xfb 640k
-#>>512 ubyte =0xfc 180k
-# look like an old DOS directory entry
-#>>>0xA0E ubequad 0
-#>>>>0xA00 ubequad !0
-#!:mime application/x-ima
-#>>512 ubyte =0xfd
-# look for 2nd FAT at different location to distinguish between 360k and 500k
-#>>>0x600 ubelong&0xE0ffff00 0xE0ffff00 360k
-#>>>0x500 ubelong&0xE0ffff00 0xE0ffff00 500k
-#>>>0xA0E ubequad 0
-#!:mime application/x-ima
-#>>512 ubyte =0xfe
-#>>>0x400 ubelong&0xE0ffff00 0xE0ffff00 160k
-#>>>>0x60E ubequad 0
-#>>>>>0x600 ubequad !0
-#!:mime application/x-ima
-#>>>0xC00 ubelong&0xE0ffff00 0xE0ffff00 1200k
-#>>512 ubyte =0xff 320k
-#>>>0x60E ubequad 0
-#>>>>0x600 ubequad !0
-#!:mime application/x-ima
-#>>512 ubyte x \b, Media descriptor %#x
-# without x86 jump instruction
-#>>0 ulelong&0x804000E9 !0x000000E9
-# assembler instructions: CLI;MOV SP,1E7;MOV AX;07c0;MOV
-#>>>0 ubequad 0xfabce701b8c0078e \b, MS-DOS 1.12 bootloader
-# IOSYS.COM+MSDOS.COM
-#>>>>0xc4 use 2xDOS-filename
-#>>0 ulelong&0x804000E9 =0x000000E9
-# only x86 short jump instruction found
-#>>>0 ubyte =0xEB
-#>>>>1 ubyte x \b, code offset %#x+2
-# https://thestarman.pcministry.com/DOS/ibm100/Boot.htm
-# assembler instructions: CLI;MOV AX,CS;MOV DS,AX;MOV DX,0
-#>>>>(1.b+2) ubequad 0xfa8cc88ed8ba0000 \b, PC-DOS 1.0 bootloader
-# ibmbio.com+ibmdos.com
-#>>>>>0x176 use DOS-filename
-#>>>>>0x181 ubyte x \b+
-#>>>>>0x182 use DOS-filename
-# https://thestarman.pcministry.com/DOS/ibm110/Boot.htm
-# assembler instructions: CLI;MOV AX,CS;MOV DS,AX;XOR DX,DX;MOV
-#>>>>(1.b+2) ubequad 0xfa8cc88ed833d28e \b, PC-DOS 1.1 bootloader
-# ibmbio.com+ibmdos.com
-#>>>>>0x18b use DOS-filename
-#>>>>>0x196 ubyte x \b+
-#>>>>>0x197 use DOS-filename
-# https://en.wikipedia.org/wiki/Zenith_Data_Systems
-# assembler instructions: MOV BX,07c0;MOV SS,BX;MOV SP,01c6
-#>>>>(1.b+2) ubequad 0xbbc0078ed3bcc601 \b, Zenith Data Systems MS-DOS 1.25 bootloader
-# IO.SYS+MSDOS.SYS
-#>>>>>0x20 use 2xDOS-filename
-# https://en.wikipedia.org/wiki/Corona_Data_Systems
-# assembler instructions: MOV AX,CS;MOV DS,AX;CLI;MOV SS,AX;
-#>>>>(1.b+2) ubequad 0x8cc88ed8fa8ed0bc \b, MS-DOS 1.25 bootloader
-# IO.SYS+MSDOS.SYS
-#>>>>>0x69 use 2xDOS-filename
-# assembler instructions: CLI;PUSH CS;POP SS;MOV SP,7c00;
-#>>>>(1.b+2) ubequad 0xfa0e17bc007cb860 \b, MS-DOS 2.11 bootloader
-# defect IO.SYS+MSDOS.SYS ?
-#>>>>>0x162 use 2xDOS-filename
-
-0 name cdrom
->38913 string !NSR0 ISO 9660 CD-ROM filesystem data
-!:mime application/x-iso9660-image
-!:ext iso/iso9660
->38913 string NSR0 UDF filesystem data
-!:mime application/x-iso9660-image
-!:ext iso/udf
->>38917 string 1 (version 1.0)
->>38917 string 2 (version 1.5)
->>38917 string 3 (version 2.0)
->>38917 byte >0x33 (unknown version, ID %#X)
->>38917 byte <0x31 (unknown version, ID %#X)
-# The next line is not necessary because the MBR staff is done looking for boot signature
->0x1FE leshort 0xAA55 (DOS/MBR boot sector)
-# "application id" which appears to be used as a volume label
->32808 string/T >\0 '%.32s'
->34816 string \000CD001\001EL\ TORITO\ SPECIFICATION (bootable)
-37633 string CD001 ISO 9660 CD-ROM filesystem data (raw 2352 byte sectors)
-!:mime application/x-iso9660-image
-32777 string CDROM High Sierra CD-ROM filesystem data
-# "application id" which appears to be used as a volume label
->32816 string/T >\0 '%.32s'
-
-
-# CDROM Filesystems
-# https://en.wikipedia.org/wiki/ISO_9660
-# Modified for UDF by gerardo.cacciari@gmail.com
-32769 string CD001
-# mime line at that position does not work
-# to display CD-ROM (70=81-11) after MBR (113=40+72+1), partition-table (71=50+21) and before Apple Driver Map (51)
-#!:strength -11
-# to display CD-ROM (114=81+33) before MBR (113=40+72+1), partition-table (71=50+21) and Apple Driver Map (51)
-!:strength +35
->0 use cdrom
-
-# URL: https://en.wikipedia.org/wiki/NRG_(file_format)
-# Reference: https://dl.opendesktop.org/api/files/download/id/1460731811/
-# 11577-mount-iso-0.9.5.tar.bz2/mount-iso-0.9.5/install.sh
-# From: Joerg Jenderek
-# Note: Only for nero disc with once (DAO) type after 300 KB header
-339969 string CD001 Nero CD image at 0x4B000
-!:mime application/x-nrg
-!:ext nrg
->307200 use cdrom
-
-# .cso files
-# Reference: https://pismotec.com/ciso/ciso.h
-# NOTE: There are two other formats with the same magic but
-# completely incompatible specifications:
-# - GameCube/Wii CISO: https://github.com/dolphin-emu/dolphin/blob/master/Source/Core/DiscIO/CISOBlob.h
-# - PSP CISO: https://github.com/jamie/ciso/blob/master/ciso.h
-0 string CISO
-# Other fields are used to determine what type of CISO this is:
-# - 0x04 == 0x00200000: GameCube/Wii CISO (block_size)
-# - 0x10 == 0x00000800: PSP CISO (ISO-9660 sector size)
-# - 0x10 == 0x00004000: For >2GB files using maxcso...
-# https://github.com/unknownbrackets/maxcso/issues/26
-# - None of the above: Compact ISO.
->4 lelong !0
->>4 lelong !0x200000
->>>16 lelong !0x800
->>>>16 lelong !0x4000 Compressed ISO CD image
-
-# cramfs filesystem - russell@coker.com.au
-0 lelong 0x28cd3d45 Linux Compressed ROM File System data, little endian
->4 lelong x size %u
->8 lelong &1 version #2
->8 lelong &2 sorted_dirs
->8 lelong &4 hole_support
->32 lelong x CRC %#x,
->36 lelong x edition %u,
->40 lelong x %u blocks,
->44 lelong x %u files
-
-0 belong 0x28cd3d45 Linux Compressed ROM File System data, big endian
->4 belong x size %u
->8 belong &1 version #2
->8 belong &2 sorted_dirs
->8 belong &4 hole_support
->32 belong x CRC %#x,
->36 belong x edition %u,
->40 belong x %u blocks,
->44 belong x %u files
-
-# reiserfs - russell@coker.com.au
-0x10034 string ReIsErFs ReiserFS V3.5
-0x10034 string ReIsEr2Fs ReiserFS V3.6
-0x10034 string ReIsEr3Fs ReiserFS V3.6.19
->0x1002c leshort x block size %d
->0x10032 leshort &2 (mounted or unclean)
->0x10000 lelong x num blocks %d
->0x10040 lelong 1 tea hash
->0x10040 lelong 2 yura hash
->0x10040 lelong 3 r5 hash
-
-# EST flat binary format (which isn't, but anyway)
-# From: Mark Brown <broonie@sirena.org.uk>
-0 string ESTFBINR EST flat binary
-
-# Aculab VoIP firmware
-# From: Mark Brown <broonie@sirena.org.uk>
-0 string VoIP\ Startup\ and Aculab VoIP firmware
->35 string x format %s
-
-# From: Mark Brown <broonie@sirena.org.uk> [old]
-# From: Behan Webster <behanw@websterwood.com>
-0 belong 0x27051956 u-boot legacy uImage,
->32 string x %s,
->28 byte 0 Invalid os/
->28 byte 1 OpenBSD/
->28 byte 2 NetBSD/
->28 byte 3 FreeBSD/
->28 byte 4 4.4BSD/
->28 byte 5 Linux/
->28 byte 6 SVR4/
->28 byte 7 Esix/
->28 byte 8 Solaris/
->28 byte 9 Irix/
->28 byte 10 SCO/
->28 byte 11 Dell/
->28 byte 12 NCR/
->28 byte 13 LynxOS/
->28 byte 14 VxWorks/
->28 byte 15 pSOS/
->28 byte 16 QNX/
->28 byte 17 Firmware/
->28 byte 18 RTEMS/
->28 byte 19 ARTOS/
->28 byte 20 Unity OS/
->28 byte 21 INTEGRITY/
->29 byte 0 \bInvalid CPU,
->29 byte 1 \bAlpha,
->29 byte 2 \bARM,
->29 byte 3 \bIntel x86,
->29 byte 4 \bIA64,
->29 byte 5 \bMIPS,
->29 byte 6 \bMIPS 64-bit,
->29 byte 7 \bPowerPC,
->29 byte 8 \bIBM S390,
->29 byte 9 \bSuperH,
->29 byte 10 \bSparc,
->29 byte 11 \bSparc 64-bit,
->29 byte 12 \bM68K,
->29 byte 13 \bNios-32,
->29 byte 14 \bMicroBlaze,
->29 byte 15 \bNios-II,
->29 byte 16 \bBlackfin,
->29 byte 17 \bAVR32,
->29 byte 18 \bSTMicroelectronics ST200,
->29 byte 19 \bSandbox architecture,
->29 byte 20 \bANDES Technology NDS32,
->29 byte 21 \bOpenRISC 1000,
->29 byte 22 \bARM 64-bit,
->29 byte 23 \bDesignWare ARC,
->29 byte 24 \bx86_64,
->29 byte 25 \bXtensa,
->29 byte 26 \bRISC-V,
->30 byte 0 Invalid Image
->30 byte 1 Standalone Program
->30 byte 2 OS Kernel Image
->30 byte 3 RAMDisk Image
->30 byte 4 Multi-File Image
->30 byte 5 Firmware Image
->30 byte 6 Script File
->30 byte 7 Filesystem Image (any type)
->30 byte 8 Binary Flat Device Tree BLOB
->31 byte 0 (Not compressed),
->31 byte 1 (gzip),
->31 byte 2 (bzip2),
->31 byte 3 (lzma),
->12 belong x %d bytes,
->8 bedate x %s,
->16 belong x Load Address: %#08X,
->20 belong x Entry Point: %#08X,
->4 belong x Header CRC: %#08X,
->24 belong x Data CRC: %#08X
-
-# JFFS2 file system
-0 leshort 0x1984 Linux old jffs2 filesystem data little endian
-0 beshort 0x1984 Linux old jffs2 filesystem data big endian
-0 leshort 0x1985 Linux jffs2 filesystem data little endian
-0 beshort 0x1985 Linux jffs2 filesystem data big endian
-
-# Squashfs
-0 name squashfs
->28 beshort x version %d.
->30 beshort x \b%d,
->20 beshort 0 uncompressed,
->20 beshort 1 zlib
->20 beshort 2 lzma
->20 beshort 3 lzo
->20 beshort 4 xz
->20 beshort 5 lz4
->20 beshort 6 zstd
->20 beshort >0 compressed,
->28 beshort <3
->>8 belong x %d bytes,
->28 beshort >2
->>28 beshort <4
->>>63 bequad x %lld bytes,
->>28 beshort >3
->>>40 bequad x %lld bytes,
-#>>67 belong x %d bytes,
->4 belong x %d inodes,
->28 beshort <2
->>32 beshort x blocksize: %d bytes,
->28 beshort >1
->>28 beshort <4
->>>51 belong x blocksize: %d bytes,
->>28 beshort >3
->>>12 belong x blocksize: %d bytes,
->28 beshort <4
->>39 bedate x created: %s
->28 beshort >3
->>8 bedate x created: %s
-
-0 string sqsh Squashfs filesystem, big endian,
->0 use squashfs
-
-0 string hsqs Squashfs filesystem, little endian,
->0 use \^squashfs
-
-# AFS Dump Magic
-# From: Ty Sarna <tsarna@sarna.org>
-0 string \x01\xb3\xa1\x13\x22 AFS Dump
->&0 belong x (v%d)
->>&0 byte 0x76
->>>&0 belong x Vol %d,
->>>>&0 byte 0x6e
->>>>>&0 string x %s
->>>>>>&1 byte 0x74
->>>>>>>&0 beshort 2
->>>>>>>>&4 bedate x on: %s
->>>>>>>>&0 bedate =0 full dump
->>>>>>>>&0 bedate !0 incremental since: %s
-
-#----------------------------------------------------------
-#delta ISO Daniel Novotny (dnovotny@redhat.com)
-0 string DISO Delta ISO data
-!:strength +50
->4 belong x version %d
-
-# VMS backup savesets - gerardo.cacciari@gmail.com
-#
-4 string \x01\x00\x01\x00\x01\x00
->(0.s+16) string \x01\x01
->>&(&0.b+8) byte 0x42 OpenVMS backup saveset data
->>>40 lelong x (block size %d,
->>>49 string >\0 original name '%s',
->>>2 short 1024 VAX generated)
->>>2 short 2048 AXP generated)
->>>2 short 4096 I64 generated)
-
-# Summary: Oracle Clustered Filesystem
-# Created by: Aaron Botsis <redhat@digitalmafia.org>
-8 string OracleCFS Oracle Clustered Filesystem,
->4 long x rev %d
->0 long x \b.%d,
->560 string x label: %.64s,
->136 string x mountpoint: %.128s
-
-# Summary: Oracle ASM tagged volume
-# Created by: Aaron Botsis <redhat@digitalmafia.org>
-32 string ORCLDISK Oracle ASM Volume,
->40 string x Disk Name: %0.12s
-32 string ORCLCLRD Oracle ASM Volume (cleared),
->40 string x Disk Name: %0.12s
-
-# Oracle Clustered Filesystem - Aaron Botsis <redhat@digitalmafia.org>
-8 string OracleCFS Oracle Clustered Filesystem,
->4 long x rev %d
->0 long x \b.%d,
->560 string x label: %.64s,
->136 string x mountpoint: %.128s
-
-# Oracle ASM tagged volume - Aaron Botsis <redhat@digitalmafia.org>
-32 string ORCLDISK Oracle ASM Volume,
->40 string x Disk Name: %0.12s
-32 string ORCLCLRD Oracle ASM Volume (cleared),
->40 string x Disk Name: %0.12s
-
-# Compaq/HP RILOE floppy image
-# From: Dirk Jagdmann <doj@cubic.org>
-0 string CPQRFBLO Compaq/HP RILOE floppy image
-
-#------------------------------------------------------------------------------
-# Files-11 On-Disk Structure (File system for various RSX-11 and VMS flavours).
-# These bits come from LBN 1 (home block) of ODS-1, ODS-2 and ODS-5 volumes,
-# which is mapped to VBN 2 of [000000]INDEXF.SYS;1 - gerardo.cacciari@gmail.com
-#
-1008 string DECFILE11 Files-11 On-Disk Structure
->525 byte x (ODS-%d);
->1017 string A RSX-11, VAX/VMS or OpenVMS VAX file system;
->1017 string B
->>525 byte 2 VAX/VMS or OpenVMS file system;
->>525 byte 5 OpenVMS Alpha or Itanium file system;
->984 string x volume label is '%-12.12s'
-
-# From: Thomas Klausner <wiz@NetBSD.org>
-# https://filext.com/file-extension/DAA
-# describes the daa file format. The magic would be:
-0 string DAA\x0\x0\x0\x0\x0 PowerISO Direct-Access-Archive
-
-# From Albert Cahalan <acahalan@gmail.com>
-# really le32 operation,destination,payloadsize (but quite predictable)
-# 01 00 00 00 00 00 00 c0 00 02 00 00
-0 string \1\0\0\0\0\0\0\300\0\2\0\0 Marvell Libertas firmware
-
-# From Eric Sandeen
-# GFS2
-0x10000 belong 0x01161970
->0x10018 belong 0x0000051d GFS1 Filesystem
->>0x10024 belong x (blocksize %d,
->>0x10060 string >\0 lockproto %s)
->0x10018 belong 0x00000709 GFS2 Filesystem
->>0x10024 belong x (blocksize %d,
->>0x10060 string >\0 lockproto %s)
-
-# Russell Coker <russell@coker.com.au>
-0x10040 string _BHRfS_M BTRFS Filesystem
->0x1012b string >\0 label "%s",
->0x10090 lelong x sectorsize %d,
->0x10094 lelong x nodesize %d,
->0x10098 lelong x leafsize %d,
->0x10020 ubelong x UUID=%08x-
->0x10024 ubeshort x \b%04x-
->0x10026 ubeshort x \b%04x-
->0x10028 ubeshort x \b%04x-
->0x1002a ubeshort x \b%04x
->0x1002c ubelong x \b%08x,
->0x10078 lequad x %lld/
->0x10070 lequad x \b%lld bytes used,
->0x10088 lequad x %lld devices
-
-0 string btrfs-stream BTRFS stream file
-
-# dvdisaster's .ecc
-# From: "Nelson A. de Oliveira" <naoliv@gmail.com>
-0 string *dvdisaster* dvdisaster error correction file
-
-# xfs metadump image
-# mb_magic XFSM at 0; superblock magic XFSB at 1 << mb_blocklog
-# but can we do the << ? For now it's always 512 (0x200) anyway.
-0 string XFSM
->0x200 string XFSB XFS filesystem metadump image
-
-# Type: CROM filesystem
-# From: Werner Fink <werner@suse.de>
-0 string CROMFS CROMFS
->6 string >\0 \b version %2.2s,
->8 ulequad >0 \b block data at %lld,
->16 ulequad >0 \b fblock table at %lld,
->24 ulequad >0 \b inode table at %lld,
->32 ulequad >0 \b root at %lld,
->40 ulelong >0 \b fblock size = %d,
->44 ulelong >0 \b block size = %d,
->48 ulequad >0 \b bytes = %lld
-
-# Type: xfs metadump image
-# From: Daniel Novotny <dnovotny@redhat.com>
-# mb_magic XFSM at 0; superblock magic XFSB at 1 << mb_blocklog
-# but can we do the << ? For now it's always 512 (0x200) anyway.
-0 string XFSM
->0x200 string XFSB XFS filesystem metadump image
-
-# Type: delta ISO
-# From: Daniel Novotny <dnovotny@redhat.com>
-0 string DISO Delta ISO data,
->4 belong x version %d
-
-# JFS2 (Journaling File System) image. (Old JFS1 has superblock at 0x1000.)
-# See linux/fs/jfs/jfs_superblock.h for layout; see jfs_filsys.h for flags.
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-0x8000 string JFS1
-# Because it's text-only magic, check a binary value (version) to be sure.
-# Should always be 2, but mkfs.jfs writes it as 1. Needs to be 2 or 1 to be
-# mountable.
->&0 lelong <3 JFS2 filesystem image
-# Label is followed by a UUID; we have to limit string length to avoid
-# appending the UUID in the case of a 16-byte label.
->>&144 regex [\x20-\x7E]{1,16} (label "%s")
->>&0 lequad x \b, %lld blocks
->>&8 lelong x \b, blocksize %d
->>&32 lelong&0x00000006 >0 (dirty)
->>&36 lelong >0 (compressed)
-
-# LFS
-0 lelong 0x070162 LFS filesystem image
->4 lelong 1 version 1,
->>8 lelong x \b blocks %u,
->>12 lelong x \b blocks per segment %u,
->4 lelong 2 version 2,
->>8 lelong x \b fragments %u,
->>12 lelong x \b bytes per segment %u,
->16 lelong x \b disk blocks %u,
->20 lelong x \b block size %u,
->24 lelong x \b fragment size %u,
->28 lelong x \b fragments per block %u,
->32 lelong x \b start for free list %u,
->36 lelong x \b number of free blocks %d,
->40 lelong x \b number of files %u,
->44 lelong x \b blocks available for writing %d,
->48 lelong x \b inodes in cache %d,
->52 lelong x \b inode file disk address %#x,
->56 lelong x \b inode file inode number %u,
->60 lelong x \b address of last segment written %#x,
->64 lelong x \b address of next segment to write %#x,
->68 lelong x \b address of current segment written %#x
-
-0 string td\000 floppy image data (TeleDisk, compressed)
-0 string TD\000 floppy image data (TeleDisk)
-
-0 string CQ\024 floppy image data (CopyQM,
->16 leshort x %d sectors,
->18 leshort x %d heads.)
-
-0 string ACT\020Apricot\020disk\020image\032\004 floppy image data (ApriDisk)
-
-# URL: http://fileformats.archiveteam.org/wiki/LoadDskF/SaveDskF
-# Update: Joerg Jenderek
-# Note: called "IBM SKF disk image" by TrID
-# verfied by 7-Zip `7z l -tFAT -slt *.dsk` and
-# `deark -l -m loaddskf 06200D19.DSK`
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dsk-skf-old.trid.xml
-0 beshort 0xAA58
->0 use SaveDskF
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dsk-skf.trid.xml
-0 beshort 0xAA59
->0 use SaveDskF
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dsk-skf-comp.trid.xml
-0 beshort 0xAA5A
-# skip foo by additional check for unused upper byte of media type in SaveDskF header
-#>3 ubyte =0
-# skip bar by additional check for valid "low" number of heads in SaveDskF header
-#>>26 uleshort <3
-# skip foo by additional check for unused double word field in SaveDskF header
-#>>>30 long =0
-#>>>>0 use SaveDskF
->0 use SaveDskF
-# display information about IBM SaveDskF floppy disk images
-0 name SaveDskF
-# SaveDskF magic
->0 beshort x floppy image data (IBM SaveDskF
-#!:mime application/octet-stream
-!:mime application/x-ibm-dsk
-!:ext dsk
-# also suffix with digit (1dk .2dk ...); NO example FOUND!
-#!:ext dsk/1dk/2dk
->1 ubyte =0x58 \b, old)
->1 ubyte =0x59 \b)
->1 ubyte =0x5A \b, compressed)
-# media type; the first byte of the FAT like: 0xF0 (usual floppy) 0xF9 0xFE
-# https://en.wikipedia.org/wiki/Design_of_the_FAT_file_system
->2 ubyte !0xF0 \b, Media descriptor %#x
-# upper byte of media type is not used; so this seems to be nil
->3 ubyte !0 \b, upper byte of media type %#x
-# sector size in bytes as in the BIOS parameter block like: 512 ; SAVEDSKF.EXE with other sizes produce garbage images
->4 uleshort !512 \b, Bytes/sector %u
-# cluster mask; number of sectors per cluster, minus 1
->6 uleshort+1 >1 \b, sectors/cluster %u
-#>6 uleshort+1 x \b, sectors/cluster %u
-# cluster shift; log2(cluster size / sector size) like: 0~1=ClusterSize/SectorSize
->7 ubyte >0 \b, cluster shift %u
-#>7 ubyte x \b, cluster shift %u
-# reserved sectors; as in the BIOS parameter block like: 1 256 (2M256R-K.DSK)
->8 uleshort >1 \b, reserved sectors %u
-#>8 uleshort x \b, reserved sectors %u
-# FAT copies; as in the BIOS parameter block like: 2 (usual) 1 (2-NK.DSK)
->10 ubyte !2 \b, FAT
-# plural s
->>10 ubyte >1 \bs
->>10 ubyte x %u
-# root directory entries; as in the BIOS parameter block like: 224 (usual) 64 (H1-NK.DSK) 4096 (2-NK.DSK)
->11 uleshort !224 \b, root entries %u
-# sector number of first cluster (count sectors used by boot sector, FATs and root directory) like: 7 10 29 33 288
->13 uleshort !33 \b, 1st cluster at sector %u
-# number of clusters in image; empty clusters at the end are not saved and counted like: 2372 2848
->15 uleshort x \b, %u clusters
-# sectors/FAT; as in the BIOS parameter block like: 1 (H1-NK.DSK) 7 9
->17 ubyte !9 \b, sectors/FAT %u
-# sector number of root directory (ie, count of sectors used by boot sector and FATs) like: 3 (H1-NK.DSK) 9 10 15 19 274 (2M256R-K.DSK)
->18 uleshort !19 \b, root directory at sector %u
-# checksum; sum of all bytes in the file
->20 ulelong x \b, checksum %#8.8x
-# cylinders; number of cylinders like: 40 80
->24 uleshort !80 \b, %u cylinders
-#>24 uleshort x \b, %u cylinders
-# heads; number of heads as in the BIOS parameter block like: 1 (H1-NK.DSK) 2
->26 uleshort !2 \b, heads %u
-#>26 uleshort x \b, heads %u
-# sectors/track; number of sectors per track as in the BIOS parameter block like: 8 15 18 36
->28 uleshort !18 \b, sectors/track %u
-#>28 uleshort x \b, sectors/track %u
-# unused double word field seems to be always like: 0
->30 ulelong !0 \b, at 0x1E %#x
-# number of sectors in images like: 1017 2786 2880
->34 uleshort x \b, sectors %u
-# if string is "printable" it can be a real comment
->(36.s) ubyte !0x00
-# if 1st sector is far enough away (> 0x29) then there is space for comment part
->>38 uleshort >41
-# offset to comment string like: 28h=40
->>>36 uleshort x \b, at %#x
-# comment string terminated with \r\n\0
->>>(36.s) string x "%s"
-# offset to the first sector like: 0 (If this is 0, assume it is 0x200) 29h=41 (DISPLAY3.DSK) 31h 43h 45h 46h 48h 50h 200h=512
->38 uleshort !0 \b, 1st sector at %#x
-# FOR DEBUGGING!
-#>(38.s) ubelong x SECTOR CONTENT %x
-# not compressed floppy image implies readable DOS boot sector inside image
->>1 ubyte !0x5A
-# when not compressed it is readable as DOS boot sector via ./filesystems
-#>>>(38.s) indirect x \b; contains
->38 uleshort =0 \b, 1st sector at 0x200 (0)
-# maybe standard DOS boot sector; NO example FOUND HERE!
-#>>0x200 indirect x \b; contains
-
-0 string \074CPM_Disk\076 disk image data (YAZE)
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Central_Point_Software#cite_note-6
-# Reference: https://www.robcraig.com/download/transcopy-5-x-file-format
-# https://www.robcraig.com/download/transcopy-file-format-by-gene-thompson
-# http://mark0.net/download/triddefs_xml.7z/defs/t/tc-transcopy.trid.xml
-# TransCopy signature
-0 beshort 0x5AA5
-# skip Intel serial flash ROM with invalid 0 disk sides handled by ./intel
->0x103 ubyte !0
-# skip Intel serial flash ROM with unlikely "high" start cylinder 100 handled by ./intel
-#>>0x101 ubyte <100 VALID_START_CYLINDER
-# skip Intel serial flash ROM with unlikely description handled by ./intel
-#>>>2 beshort !0xF00f VALID_DESCRIPTION
-# skip Intel serial flash ROM with invalid disk types 89h 88h handled by ./intel
-#>>>>0x100 byte !0x89 VALID_DISK_TYPE
->>0 use tc-floppy
-# display information of Central Point Software (CPS) Option Board TransCopy floppy image
-0 name tc-floppy
->0 beshort x TransCopy disk image
-#!:mime application/octet-stream
-!:mime application/x-floppy-image-tc
-# like: disk04.tc VOCALC2.TC WIZ5_A.tc WIZ2_720.IMG
-!:ext tc/img
-# 1st description (optional 0-terminated maximal 32) like:
-# "Project Workbench 2.20" "Visi On Calc" "Wizardry V Disk 1 of 3"
->2 string >\0 %.32s
-# 2nd desc. (optional 0-terminated maximal 32) like:
-# "(1988)." "Advanced - Utility" 'Program Disk 2"
->0x22 string >\0 "%.32s"
-# Looks like ascii (like MESSAGES) formatted with attribute bytes (190)?
-# not needed for disk copy
-#>>0x42 string x '%.190s'
-#>>0x88 lestring16 x "%.8s"
-# disktype: 2~MFM High Density 3~MFM Double Density 4~Apple II GCR 5~FM Single Density
-# 6~Commodore GCR 7~MFM Double Density 8~Commodore Amiga Ch~Atari FM FFh~Unknown
->0x100 ubyte !0xFF \b, disk type %u
-# StartingCylinder like: 0
->0x101 ubyte x \b, cylinder
->0x101 ubyte !0 start=%u
-# EndingCylinder like: 40 (often) 41 79
->0x102 ubyte x end=%u
-# NumberOfSides like: 2
->0x103 ubyte !2 \b, %u sides
-# TrackIncrement like: 1
->0x104 ubyte !1 \b, track increment %u
-# TrackPosTbl Track skew
-#>0x105 ubequad x \b, Track skew %#16.16llx
-# TrackOffsTbl
-#>0x305 ubequad x \b, TrackOffsTbl %#16.16llx
-# TrackLngthTbl
-#>0x505 ubequad x \b, TrackLngthTbl %#16.16llx
-# TrackTypeTable
-#>0x705 ubequad x \b, TrackTypeTable %#16.16llx
-# Address mark timing
-#>0x905 ubequad x \b, Address mark timing %#16.16llx
-# Track fragment
-#>0x2905 ubequad !0 \b, Track fragment %#16.16llx
-# Track data
-#>0x4000 ubequad !0 \b, Track data %#16.16llx
-
-# ReFS
-# Richard W.M. Jones <rjones@redhat.com>
-0 string \0\0\0ReFS\0 ReFS filesystem image
-
-# EFW encase image file format:
-# Gregoire Passault
-# http://www.forensicswiki.org/wiki/Encase_image_file_format
-0 string EVF\x09\x0d\x0a\xff\x00 EWF/Expert Witness/EnCase image file format
-
-# UBIfs
-# Linux kernel sources: fs/ubifs/ubifs-media.h
-0 lelong 0x06101831
->0x16 leshort 0 UBIfs image
->0x08 lequad x \b, sequence number %llu
->0x10 leshort x \b, length %u
->0x04 lelong x \b, CRC %#08x
-
-0 lelong 0x23494255
->0x04 leshort <2
->0x05 string \0\0\0
->0x1c string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->0x04 leshort x UBI image, version %u
-
-# NEC PC-88 2D disk image
-# From Fabio R. Schmidlin <sd-snatcher@users.sourceforge.net>
-0x20 ulelong&0xFFFFFEFF 0x2A0
->0x10 string \0\0\0\0\0\0\0\0\0\0
->>0x280 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->>>0x1A ubyte&0xEF 0
->>>>0x1B ubyte&0x8F 0
->>>>>0x1B ubyte&70 <0x40
->>>>>>0x1C ulelong >0x21
->>>>>>>0 regex [[:print:]]* NEC PC-88 disk image, name=%s
->>>>>>>>0x1B ubyte 0 \b, media=2D
->>>>>>>>0x1B ubyte 0x10 \b, media=2DD
->>>>>>>>0x1B ubyte 0x20 \b, media=2HD
->>>>>>>>0x1B ubyte 0x30 \b, media=1D
->>>>>>>>0x1B ubyte 0x40 \b, media=1DD
->>>>>>>>0x1A ubyte 0x10 \b, write-protected
-
-# HDD Raw Copy Tool disk image, file extension: .imgc
-# From Benjamin Vanheuverzwijn <bvanheu@gmail.com>
-0 pstring HDD\ Raw\ Copy\ Tool %s
->0x100 pstring x %s
->0x200 pstring x - HD model: %s
-#>0x300 pstring x unknown %s
->0x400 pstring x serial: %s
-#>0x500 pstring x unknown: %s
-!:ext imgc
-
-# http://martin.hinner.info/fs/bfs/bfs-structure.html
-0 lelong 0x1BADFACE SCO UnixWare BFS filesystem
-
-# https://arstechnica.com/information-technology/2018/07/the-beos-filesystem/
-32 lelong 0x42465331 BE/OS BFS1 filesystem
->36 lelong x \b, byte order %d
->40 lelong x \b, block size %d
->44 lelong x \b, block shift %d
->48 lequad x \b, total blocks %lld
->56 lequad x \b, used blocks %lld
-
-
-0 name next
->0 lelong x \b, size %d
->4 string x \b, label %s
-
-# https://opensource.apple.com/source/IOStorageFamily/IOStorageFamily-44.3\
-# /IONeXTPartitionScheme.h
-0 string NeXT NeXT version 1 disklabel
->12 use next
-0 string dlV1 NeXT version 2 disklabel
->12 use next
-0 string dlV2 NeXT version 3 disklabel
->12 use next
-
-# bcachefs
-# From: Thomas Weißschuh <thomas@t-8ch.de>
-
-0 name bcachefs-uuid
->0 ubelong x \b%08x
->4 ubeshort x \b-%04x
->6 ubeshort x \b-%04x
->8 ubeshort x \b-%04x
->10 ubelong x \b-%08x
->14 ubeshort x \b%04x
-
-0 name bcachefs bcachefs
->0x68 lequad 8 \b, UUID=
->>0x38 use bcachefs-uuid
->>0x48 string >0 \b, label "%.32s"
->>0x10 uleshort x \b, version %u
->>0x12 uleshort x \b, min version %u
->>0x7a byte x \b, device %d
-# assumes the first field is the members field
->>0x2f4 ulelong 0x01 \b/UUID=
->>>0x2f0 default x
->>>&(0x07a.b*56) use bcachefs-uuid
->>0x07b byte x \b, %d devices
->>0x090 byte ^0x02 \b (unclean)
-
-0x1018 string \xc6\x85\x73\xf6\x4e\x1a\x45\xca\x82\x65\xf5\x7f\x48\xba\x6d\x81
->0x1000 use bcachefs
-
-0x1018 string \xc6\x85\x73\xf6\x66\xce\x90\xa9\xd9\x6a\x60\xcf\x80\x3d\xf7\xef
->0x1000 use bcachefs
-
-# EROFS
-# https://kernel.googlesource.com/pub/scm/linux/kernel/git/xiang/erofs-utils/\
-# +/refs/heads/experimental/include/erofs_fs.h#12
-1024 lelong 0xE0F5E1E2 EROFS filesystem
-#>1028 lelong x \b, checksum=%#x
->1032 lelong >0 \b, compat:
->>1032 lelong &1 SB_CHKSUM
->>1032 lelong &2 MTIME
->1036 byte x \b, blocksize=%u
->1037 byte x \b, exslots=%u
-#>1038 leshort x \b, root_nid=%d
-#>1040 lequad x \b, inodes=%ld
-#>1048 leldate x \b, build_time=%s
-#>1056 lelong x \b.%d
-#>1060 lelong x \b, blocks=%d
-#>1064 lelong x \b, metadata@%#x
-#>1068 lelong x \b, xattr@%#x
->1072 guid x \b, uuid=%s
->1088 string >0 \b, name=%s
->1104 lelong >0 \b, incompat:
->>1104 lelong &1 LZ4_0PADDING
->>1104 lelong &2 BIG_PCLUSTER
->>1104 lelong &4 CHUNKED_FILE
->>1104 lelong &8 DEVICE_TABLE
->>1104 lelong &16 ZTAILPACKING
-
-# YAFFS
-# The layout itself is undocumented, determined by the memory layout of the
-# reference implementation. This signature is derived from the
-# reference implementation code and generated test cases
-# We recognize the start of an object header defined by yaffs_obj_hdr:
-# (Note the values being encoded depending on platform endianess)
-
-# u32 type /* enum yaffs_obj_type, valid 1-5 */
-# u32 parent_obj_id; /* 1 for root objects we recognize */
-# u16 sum_no_longer_used; /* checksum of name. Not used by YAFFS and memset to 0xFF */
-# YCHAR name[YAFFS_MAX_NAME_LENGTH + 1];
-
-# mkyaffsimage always writes a root directory with empty name, then processing the target directory contents
-# mkyaffs2image directly proceeds to writing entries with the appropriate u32 YAFFS_OBJECT_TYPE (1-5 valid), each with parent id 1
-
-0 name yaffs
->0 ulelong 1 \b, type file
->0 ulelong 2 \b, type symlink
->0 ulelong 3 \b, type root or directory
->0 ulelong 4 \b, type hardlink
->0 ulelong 5 \b, type special
->0xA byte 0 \b, v1 root directory
->0xA byte !0 \b, object entry
->>0xA string x (name: "%s")
-
-# Little Endian: XX 00 00 00 01 00 00 00 FF FF YY
-# XX: 01 - 05 (object type)
-# YY: 00 for version 1 root directory, > 00 for version 2 (name data)
-0x1 string \x00\x00\x00\x01\x00\x00\x00\xFF\xFF
->0 ulelong 0
->0 ulelong >5
->0 default x YAFFS filesystem root entry (little endian)
->>0 use yaffs
-
-# Big Endian: 00 00 00 XX 00 00 00 01 FF FF YY
-# XX: 01 - 05 (object type)
-# YY: 00 for version 1 root directory, > 00 for version 2 (name data)
-0x4 string \x00\x00\x00\x01\xFF\xFF
->0 string \x00\x00\x00
->>0 ubelong 0
->>0 ubelong >5
->>0 default x YAFFS filesystem root entry (big endian)
->>>0 use \^yaffs
diff --git a/contrib/libs/libmagic/magic/Magdir/finger b/contrib/libs/libmagic/magic/Magdir/finger
deleted file mode 100644
index ab43ac6f9d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/finger
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: finger,v 1.3 2019/04/19 00:42:27 christos Exp $
-# fingerprint: file(1) magic for fingerprint data
-# XPM bitmaps)
-#
-
-# https://cgit.freedesktop.org/libfprint/libfprint/tree/libfprint/data.c
-
-0 string FP1 libfprint fingerprint data V1
->3 beshort x \b, driver_id %x
->5 belong x \b, devtype %x
-
-0 string FP2 libfprint fingerprint data V2
->3 beshort x \b, driver_id %x
->5 belong x \b, devtype %x
diff --git a/contrib/libs/libmagic/magic/Magdir/firmware b/contrib/libs/libmagic/magic/Magdir/firmware
deleted file mode 100644
index 4835b12e8d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/firmware
+++ /dev/null
@@ -1,133 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: firmware,v 1.7 2023/03/11 18:52:03 christos Exp $
-# firmware: file(1) magic for firmware files
-#
-
-# https://github.com/MatrixEditor/frontier-smart-api/blob/main/docs/firmware-2.0.md#11-header-structure
-# examples: https://github.com/cweiske/frontier-silicon-firmwares
-0 lelong 0x00001176
->4 lelong 0x7c Frontier Silicon firmware download
->>8 lelong x \b, MeOS version %x
->>12 string/32/T x \b, version %s
->>40 string/64/T x \b, customization %s
-
-# HPE iLO firmware update image
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://www.sstic.org/2018/presentation/backdooring_your_server_through_its_bmc_the_hpe_ilo4_case/
-# iLO1 (ilo1*.bin) or iLO2 (ilo2_*.bin) images
-0 string \x20\x36\xc1\xce\x60\x37\x62\xf0\x3f\x06\xde\x00\x00\x03\x7f\x00
->16 ubeshort =0xCFDD HPE iLO2 firmware update image
->16 ubeshort =0x6444 HPE iLO1 firmware update image
-# iLO3 images (ilo3_*.bin) start directly with image name
-0 string iLO3\x20v\x20 HPE iLO3 firmware update image,
->7 string x version %s
-# iLO4 images (ilo4_*.bin) start with a signature and a certificate
-0 string --=</Begin\x20HP\x20Signed
->75 string label_HPBBatch
->>5828 string iLO\x204
->>>5732 string HPIMAGE\x00 HPE iLO4 firmware update image,
->>>6947 string x version %s
-# iLO5 images (ilo5_*.bin) start with a signature
->75 string label_HPE-HPB-BMC-ILO5-4096
->>880 string HPIMAGE\x00 HPE iLO5 firmware update image,
->>944 string x version %s
-
-# IBM POWER Secure Boot Container
-# from https://github.com/open-power/skiboot/blob/master/libstb/container.h
-0 belong 0x17082011 POWER Secure Boot Container,
->4 beshort x version %u
->6 bequad x container size %llu
-# These are always zero
-# >14 bequad x target HRMOR %llx
-# >22 bequad x stack pointer %llx
->4096 ustring \xFD7zXZ\x00 XZ compressed
-0 belong 0x1bad1bad POWER boot firmware
->256 belong 0x48002030 (PHYP entry point)
-
-# ARM Cortex-M vector table
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://developer.arm.com/documentation/100701/0200/Exception-properties
-# Match stack MSB
-3 byte 0x20
-# Function pointers must be in Thumb-mode and before 0x20000000 (4*5 bits match)
->4 ulelong&0xE0000001 1
->>8 ulelong&0xE0000001 1
->>>12 ulelong&0xE0000001 1
->>>>44 ulelong&0xE0000001 1
->>>>>56 ulelong&0xE0000001 1
-# Match Cortex-M reserved sections (0x00000000 or 0xFFFFFFFF)
->>>>>>28 ulelong+1 <2
->>>>>>>32 ulelong+1 <2
->>>>>>>>36 ulelong+1 <2
->>>>>>>>>40 ulelong+1 <2
->>>>>>>>>>52 ulelong+1 <2 ARM Cortex-M firmware
->>>>>>>>>>>0 ulelong >0 \b, initial SP at 0x%08x
->>>>>>>>>>>4 ulelong^1 x \b, reset at 0x%08x
->>>>>>>>>>>8 ulelong^1 x \b, NMI at 0x%08x
->>>>>>>>>>>12 ulelong^1 x \b, HardFault at 0x%08x
->>>>>>>>>>>44 ulelong^1 x \b, SVCall at 0x%08x
->>>>>>>>>>>56 ulelong^1 x \b, PendSV at 0x%08x
-
-# ESP-IDF partition table entry
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/espressif/esp-idf/blob/v5.0/components/esp_partition/include/esp_partition.h
-0 string \xAA\x50
->2 ubyte <2 ESP-IDF partition table entry
->>12 string/16 x \b, label: "%s"
->>2 ubyte 0
->>>3 ubyte 0x00 \b, factory app
->>>3 ubyte 0x10 \b, OTA_0 app
->>>3 ubyte 0x11 \b, OTA_1 app
->>>3 ubyte 0x12 \b, OTA_2 app
->>>3 ubyte 0x13 \b, OTA_3 app
->>>3 ubyte 0x14 \b, OTA_4 app
->>>3 ubyte 0x15 \b, OTA_5 app
->>>3 ubyte 0x16 \b, OTA_6 app
->>>3 ubyte 0x17 \b, OTA_7 app
->>>3 ubyte 0x18 \b, OTA_8 app
->>>3 ubyte 0x19 \b, OTA_9 app
->>>3 ubyte 0x1A \b, OTA_10 app
->>>3 ubyte 0x1B \b, OTA_11 app
->>>3 ubyte 0x1C \b, OTA_12 app
->>>3 ubyte 0x1D \b, OTA_13 app
->>>3 ubyte 0x1E \b, OTA_14 app
->>>3 ubyte 0x1F \b, OTA_15 app
->>>3 ubyte 0x20 \b, test app
->>2 ubyte 1
->>>3 ubyte 0x00 \b, OTA selection data
->>>3 ubyte 0x01 \b, PHY init data
->>>3 ubyte 0x02 \b, NVS data
->>>3 ubyte 0x03 \b, coredump data
->>>3 ubyte 0x04 \b, NVS keys
->>>3 ubyte 0x05 \b, emulated eFuse data
->>>3 ubyte 0x06 \b, undefined data
->>>3 ubyte 0x80 \b, ESPHTTPD partition
->>>3 ubyte 0x81 \b, FAT partition
->>>3 ubyte 0x82 \b, SPIFFS partition
->>>3 ubyte 0xFF \b, any data
->>4 ulelong x \b, offset: 0x%X
->>8 ulelong x \b, size: 0x%X
->>28 ulelong&0x1 1 \b, encrypted
-
-# ESP-IDF application image
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/espressif/esp-idf/blob/v5.0/components/bootloader_support/include/esp_app_format.h
-# Note: Concatenation of esp_image_header_t, esp_image_segment_header_t and esp_app_desc_t
-# First segment contains esp_app_desc_t
-0 ubyte 0xE9
->32 ulelong 0xABCD5432 ESP-IDF application image
->>12 uleshort 0x0000 for ESP32
->>12 uleshort 0x0002 for ESP32-S2
->>12 uleshort 0x0005 for ESP32-C3
->>12 uleshort 0x0009 for ESP32-S3
->>12 uleshort 0x000A for ESP32-H2 Beta1
->>12 uleshort 0x000C for ESP32-C2
->>12 uleshort 0x000D for ESP32-C6
->>12 uleshort 0x000E for ESP32-H2 Beta2
->>12 uleshort 0x0010 for ESP32-H2
->>80 string/32 x \b, project name: "%s"
->>48 string/32 x \b, version %s
->>128 string/16 x \b, compiled on %s
->>>112 string/16 x %s
->>144 string/32 x \b, IDF version: %s
->>4 ulelong x \b, entry address: 0x%08X
diff --git a/contrib/libs/libmagic/magic/Magdir/flash b/contrib/libs/libmagic/magic/Magdir/flash
deleted file mode 100644
index 33b734499c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/flash
+++ /dev/null
@@ -1,62 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: flash,v 1.15 2019/04/19 00:42:27 christos Exp $
-# flash: file(1) magic for Macromedia Flash file format
-#
-# See
-#
-# https://www.macromedia.com/software/flash/open/
-# https://wwwimages.adobe.com/www.adobe.com/content/dam/Adobe/\
-# en/devnet/swf/pdf/swf-file-format-spec.pdf page 27
-#
-
-0 name swf-details
-
->0 string F
->>8 byte&0xfd 0x08 Macromedia Flash data
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
->>8 byte&0xfe 0x10 Macromedia Flash data
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
->>8 byte 0x18 Macromedia Flash data
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
->>8 beshort&0xff87 0x2000 Macromedia Flash data
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
->>8 beshort&0xffe0 0x3000 Macromedia Flash data
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
->>8 byte&0x7 0
->>>8 ubyte >0x2f
->>>>9 ubyte <0x20 Macromedia Flash data
-!:mime application/x-shockwave-flash
->>>>>3 byte x \b, version %d
-
->0 string C
->>8 byte 0x78 Macromedia Flash data (compressed)
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
-
->0 string Z
->>8 byte 0x5d Macromedia Flash data (lzma compressed)
-!:mime application/x-shockwave-flash
->>>3 byte x \b, version %d
-
-
-1 string WS
->4 ulelong >14
->>3 ubyte !0
->>>0 use swf-details
-
-# From: Cal Peake <cp@absolutedigital.net>
-0 string FLV\x01 Macromedia Flash Video
-!:mime video/x-flv
-
-#
-# Yosu Gomez
-0 string AGD2\xbe\xb8\xbb\xcd\x00 Macromedia Freehand 7 Document
-0 string AGD3\xbe\xb8\xbb\xcc\x00 Macromedia Freehand 8 Document
-# From Dave Wilson
-0 string AGD4\xbe\xb8\xbb\xcb\x00 Macromedia Freehand 9 Document
diff --git a/contrib/libs/libmagic/magic/Magdir/flif b/contrib/libs/libmagic/magic/Magdir/flif
deleted file mode 100644
index 9406208f47..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/flif
+++ /dev/null
@@ -1,36 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: flif,v 1.1 2015/11/23 22:04:36 christos Exp $
-# flif: Magic data for file(1) command.
-# FLIF (Free Lossless Image Format)
-
-0 string FLIF FLIF
->4 string <H image data
->>6 beshort x \b, %u
->>8 beshort x \bx%u
->>5 string 1 \b, 8-bit/color,
->>5 string 2 \b, 16-bit/color,
->>4 string 1 \b, grayscale, non-interlaced
->>4 string 3 \b, RGB, non-interlaced
->>4 string 4 \b, RGBA, non-interlaced
->>4 string A \b, grayscale
->>4 string C \b, RGB, interlaced
->>4 string D \b, RGBA, interlaced
->4 string >H \b, animation data
->>5 ubyte <255 \b, %i frames
->>>7 beshort x \b, %u
->>>9 beshort x \bx%u
->>>6 string =1 \b, 8-bit/color
->>>6 string =2 \b, 16-bit/color
->>5 ubyte 0xFF
->>>6 beshort x \b, %i frames,
->>>9 beshort x \b, %u
->>>11 beshort x \bx%u
->>>8 string =1 \b, 8-bit/color
->>>8 string =2 \b, 16-bit/color
->>4 string =Q \b, grayscale, non-interlaced
->>4 string =S \b, RGB, non-interlaced
->>4 string =T \b, RGBA, non-interlaced
->>4 string =a \b, grayscale
->>4 string =c \b, RGB, interlaced
->>4 string =d \b, RGBA, interlaced
diff --git a/contrib/libs/libmagic/magic/Magdir/fonts b/contrib/libs/libmagic/magic/Magdir/fonts
deleted file mode 100644
index 17373b5a58..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/fonts
+++ /dev/null
@@ -1,449 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: fonts,v 1.51 2022/08/16 11:16:39 christos Exp $
-# fonts: file(1) magic for font data
-#
-0 search/1 FONT ASCII vfont text
-0 short 0436 Berkeley vfont data
-0 short 017001 byte-swapped Berkeley vfont data
-
-# PostScript fonts (must precede "printer" entries), quinlan@yggdrasil.com
-# Modified by: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/PostScript_fonts
-# http://fileformats.archiveteam.org/wiki/Adobe_Type_1
-# Reference: http://mark0.net/download/triddefs_xml.7z
-# defs/p/pfb.trid.xml
-# Note: PFB stands for Printer Font Binary
-0 string %!PS-AdobeFont-1. PostScript Type 1 font text
-#!:mime font/x-postscript-pfb
-#!:ext pfb
->20 string >\0 (%s)
-# http://www.nationalarchives.gov.uk/pronom/fmt/525
-6 string %!PS-AdobeFont-1.
-# skip DROID fmt-525-signature-id-816.pfb by checking for content after header
->24 ubyte x PostScript Type 1 font program data
-#!:mime application/octet-stream
-!:mime font/x-postscript-pfb
-!:ext pfb
-# often followed by colon (3Ah) and space (20h) and font name like: DarkGardenMK LetterGothic
->>24 ubyte =0x3A
->>>26 string >\0 (%s)
-# some times instead of colon %%CreationDate: and "font name" later
->>24 ubyte !0x3A
-# font name directive followed by def like: c0633bt_.pfb
->>>25 search/1247 /FontName\040/
-# show font name in parentheses like: Frankfurt Lithos CharterBT-BoldItalic Courier10PitchBT-Bold
->>>>&0 regex [A-Za-z0-9-]+ (%s)
-# http://cd.textfiles.com/maxfonts/ATM/M/MIRROR__.PFB
-6 string %PS-AdobeFont-1. PostScript Type 1 font program data
-!:mime font/x-postscript-pfb
-!:ext pfb
-# font name like: Times-Mirror
->25 string >\0 (%s)
-0 string %!FontType1 PostScript Type 1 font program data
-#!:mime font/x-postscript-pfb
-#!:ext pfb
-6 string %!FontType1 PostScript Type 1 font program data
-#!:mime application/octet-stream
-!:mime font/x-postscript-pfb
-!:ext pfb
-# font name like: CaslonOpenFace FetteFraktur Kaufmann Linotext MesozoicGothic Old-Town
->23 string >\0 (%s)
-# http://cd.textfiles.com/maxfonts/ATM/P/PLAYBI.PFB
-230 string %!FontType1 PostScript Type 1 font program data
-!:mime font/x-postscript-pfb
-!:ext pfb
-# font name like: Playbill
->247 string >\0 (%s)
-0 string %!PS-Adobe-3.0\ Resource-Font PostScript Type 1 font text
-#!:mime font/x-postscript-pfb
-#!:ext pfb
-
-# Summary: PostScript Type 1 Printer Font Metrics
-# URL: https://en.wikipedia.org/wiki/PostScript_fonts
-# Reference: https://partners.adobe.com/public/developer/en/font/5178.PFM.pdf
-# Modified by: Joerg Jenderek
-# Note: moved from ./msdos magic
-# dfVersion 256=0100h
-0 uleshort 0x0100
-# GRR: line above is too general as it catches also TrueType font,
-# raw G3 data FAX, WhatsApp encrypted and Panorama database
-# dfType 129=0081h
->66 uleshort 0x0081
-# dfVertRes 300=012Ch not needed as additional test
-#>>70 uleshort 0x012c
-# dfHorizRes 300=012Ch
-#>>>72 uleshort 0x012c
-# dfDriverInfo points to postscript information section
->>(101.l) string/c Postscript Printer Font Metrics
-# above labeled "PFM data" by ./msdos (version 5.28) or "Adobe Printer Font Metrics" by TrID
-!:mime application/x-font-pfm
-# AppleShare Print Server
-#!:apple ASPS????
-!:ext pfm
-# dfCopyright 60 byte null padded Copyright string. uncomment it to get old looking
-#>>>6 string >\060 - %-.60s
-# dfDriverInfo
->>>139 ulelong >0
-# often abbreviated and same as filename
->>>>(139.l) string x %s
-# dfSize
->>>2 ulelong x \b, %d bytes
-# dfFace 210=D2h 9Eh
->>>105 ulelong >0
-# Windows font name
->>>>(105.l) string x \b, %s
-# dfItalic
->>>80 ubyte 1 italic
-# dfUnderline
->>>81 ubyte 1 underline
-# dfStrikeOut
->>>82 ubyte 1 strikeout
-# dfWeight 400=0x0190 300=0x012c 500=0x01f4 600=0x0258 700=0x02bc
->>>83 uleshort >699 bold
-# dfPitchAndFamily 16 17 48 49 64 65
->>>90 ubyte 16 serif
->>>90 ubyte 17 serif proportional
-#>>>90 ubyte 48 other
->>>90 ubyte 49 proportional
->>>90 ubyte 64 script
->>>90 ubyte 65 script proportional
-
-# X11 font files in SNF (Server Natural Format) format
-# updated by Joerg Jenderek at Feb 2013 and Nov 2021
-# http://computer-programming-forum.com/51-perl/8f22fb96d2e34bab.htm
-# URL: http://fileformats.archiveteam.org/wiki/SNF
-# Reference: https://cgit.freedesktop.org/xorg/lib/libXfont/tree/src/bitmap/snfstr.h
-0 belong 00000004
-# version2 same as version1 in struct _snfFontInfo
->104 belong 00000004 X11 SNF font data, MSB first
-# GRR: line above is too general as it catches also DEGAS low-res bitmap like:
-# http://cd.textfiles.com/geminiatari/FILES/GRAPHICS/ANIMAT/SPID_PAT/BIGSPID.PI1
-!:mime application/x-font-sfn
-!:ext snf
-# GRR: line below is too general as it catches also Xbase index file t3-CHAR.NDX
-0 lelong 00000004
->104 lelong 00000004 X11 SNF font data, LSB first
-!:mime application/x-font-sfn
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/snf-x11-lsb.trid.xml
-!:ext snf
-
-# X11 Bitmap Distribution Format, from Daniel Quinlan (quinlan@yggdrasil.com)
-0 search/1 STARTFONT\ X11 BDF font text
-
-# From: Joerg Jenderek
-# URL: https://grub.gibibit.com/New_font_format
-# Reference: util/grub-mkfont.c
-# include/grub/fontformat.h
-# FONT_FORMAT_SECTION_NAMES_FILE
-0 string FILE
-# FONT_FORMAT_PFF2_MAGIC
->8 string PFF2
-# leng 4 only at the moment
->>4 ubelong 4
-# FONT_FORMAT_SECTION_NAMES_FONT_NAME
->>>12 string NAME GRUB2 font
-!:mime application/x-font-pf2
-!:ext pf2
-# length of font_name
->>>>16 ubelong >0
-# font_name
->>>>>20 string >\0 "%-s"
-
-# X11 fonts, from Daniel Quinlan (quinlan@yggdrasil.com)
-# PCF must come before SGI additions ("MIPSEL MIPS-II COFF" collides)
-0 string \001fcp X11 Portable Compiled Font data,
->12 lelong ^0x08 bit: LSB,
->12 lelong &0x08 bit: MSB,
->12 lelong ^0x04 byte: LSB first
->12 lelong &0x04 byte: MSB first
-0 string D1.0\015 X11 Speedo font data
-
-#------------------------------------------------------------------------------
-# FIGlet fonts and controlfiles
-# From figmagic supplied with Figlet version 2.2
-# "David E. O'Brien" <obrien@FreeBSD.ORG>
-0 string flf FIGlet font
->3 string >2a version %-2.2s
-0 string flc FIGlet controlfile
->3 string >2a version %-2.2s
-
-# libGrx graphics lib fonts, from Albert Cahalan (acahalan@cs.uml.edu)
-# Used with djgpp (DOS Gnu C++), sometimes Linux or Turbo C++
-0 belong 0x14025919 libGrx font data,
->8 leshort x %dx
->10 leshort x \b%d
->40 string x %s
-# Misc. DOS VGA fonts, from Albert Cahalan (acahalan@cs.uml.edu)
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/CPI
-# Reference: http://www.delorie.com/djgpp/doc/rbinter/it/58/17.html
-0 belong 0xff464f4e DOS code page font data collection
-!:mime font/x-dos-cpi
-!:ext cpi
-0 string \x7fDRFONT DR-DOS code page font data collection
-!:mime font/x-drdos-cpi
-!:ext cpi
-7 belong 0x00454741 DOS code page font data
-7 belong 0x00564944 DOS code page font data (from Linux?)
-4098 string DOSFONT DOSFONT2 encrypted font data
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/GEM_bitmap_font
-# Reference: http://cd.textfiles.com/ataricompendium/BOOK/HTML/APPENDC.HTM#cnt
-#
-# usual case with lightening mask and skewing mask 5555h~UU
-#62 ulelong 0x55555555
-# skip cl8m8ocofedso.testfile by looking for face size lower/equal 72
-#>2 uleshort <73
-#>>0 use gdos-font
-# BOX18.GFT COWBOY30.GFT ROYALK30.GFT
-#62 ulelong 0
-# skip ISO 9660 CD-ROM ./filesystem by looking for low positive face size
-#>2 uleshort >2
-# skip DOS 2.0 backup id file ./msdos by looking for face size lower/equal 72
-#>>2 uleshort <73
-# skip MS oem.hlp, some Windows ICO ./msdos by looking for valid long name like WYE
-#>>>4 ulelong >0x001F1f1F
-# skip Microsoft WinWord 2.0 ./msdos by looking for positive offset to font data
-#>>>>76 ulelong >83
-#>>>>>0 use gdos-font
-0 name gdos-font
->0 uleshort x GEM GDOS font
-!:mime application/x-font-gdos
-# also .eps found like AA070GEP.EPS AI360GEP.EPS
-!:ext fnt/gtf
-# font name like Big&Tall, Celtic #s, Courier, University Bold, WYE
->4 string x %.32s
-# face size in points 3-72 SLSS03CG.FNT H1CELT72.FNT
->2 uleshort x %u
-# face ID (must be unique)
->0 uleshort x \b, ID %#4.4x
-# lowest character index in face (4 but usually 32 for disk-loaded fonts)
-#>36 uleshort !32 \b, unusual character index %u
-# width of the widest character like 0 8 10 12 16 24 32
-#>50 uleshort x \b, %u char width
-# width of the widest character cell like 8 11 12 14 15 16 33 67
-#>52 uleshort x \b, %u cell width
-# thickening size in pixel like 0 1 2 3 4 5 6 7 8
-#>58 uleshort x \b, %u thick
-# lightening mask to eliminate pixels, usually 5555h
->62 uleshort !0x5555 \b, lightening mask %#x
-# skewing mask to determine when to perform additional rotation when skewing, usually 5555h
->64 uleshort !0x5555 \b, skewing mask %#x
-# offset to optional horizontal offset table 0 58h~88 5eh 252h
-#>68 ulelong x \b, %#x horizontal table offset
-# offset of character offset table 54h for many *.GFT 55h 58h 5Eh 120h 1D4h 202h 220h
-#>72 ulelong x \b, %#x coffset
-# offset to font data like 116h 118h 158 20Ah 20Eh
->76 ulelong x \b, %#x foffset
-# form width in bytes like 58 67 156 190 227 317 345
-#>80 uleshort x \b, %u fwidth
-# form height in bytes like 4 8 11 17 26 56 70 90 120 146 150
-#>82 uleshort x \b, %u fheight
-# pointer to the next font like 0 10000h 20000h 30000h 40000h 60000h 80000h E0000h D0000h
-#>84 ulelong x \b, %#x noffset
-
-# downloadable fonts for browser (prints type) anthon@mnt.org
-# https://tools.ietf.org/html/rfc3073
-0 string PFR1 Portable Font Resource font data (new)
->102 string >0 \b: %s
-0 string PFR0 Portable Font Resource font data (old)
->4 beshort >0 version %d
-
-# True Type fonts
-# Modified by: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/TrueType
-# Reference: https://developer.apple.com/fonts/TrueType-Reference-Manual/
-#
-# sfnt version "typ1" used by some Apple, but no example found
-0 string typ1
->0 use sfnt-font
->0 use sfnt-names
-# sfnt version "true" used by some Apple
-0 string true
->0 use sfnt-font
->0 use sfnt-names
-# GRR: below test is too general
-# sfnt version often 0x00010000
-0 string \000\001\000\000
->0 use sfnt-font
->0 use sfnt-names
-# validate and display sfnt font data like number of tables
-0 name sfnt-font
-# file 5.30 version assumes 00FFh as maximal number of tables
-#>4 ubeshort <0x0100
-# maximal 27 tables found like in Skia.ttf
-# 46 different table names mentioned on Apple specification
-# skip 1st sequence of DOS 2 backup with path separator (\~92 or /~47) misinterpreted as table number
->4 ubeshort <47
-# skip bad examples with garbage table names like in a5.show HYPERC MAC
-# tag names consist of up to four characters padded with spaces at end like
-# BASE DSIG OS/2 Zapf acnt glyf cvt vmtx xref ...
->>12 regex/4l \^[A-Za-z][A-Za-z][A-Za-z/][A-Za-z2\ ]
-#>>>0 ubelong x \b, sfnt version %#x
->>>0 ubelong !0x4f54544f TrueType
-!:mime font/sfnt
-!:apple ????tfil
-# .ttf for TrueType font
-# EUDC.tte created by privat character editor %WINDIR%\system32\eudcedit.exe
-!:ext ttf/tte
-# sfnt version 4F54544Fh~OTTO
->>>0 ubelong =0x4f54544f OpenType
-!:mime font/otf
-!:apple ????OTTO
-!:ext otf
->>>0 ubelong x Font data
-# DSIG=44454947h table name implies a digitally signed font
-# search range = number of tables * 16 =< maximal number of tables * 16 = 27 * 16 = 432
->>>12 search/432 DSIG \b, digitally signed
->>>4 ubeshort x \b, %d tables
-# minimal 9 tables found like in NISC18030.ttf
-#>>>4 ubeshort <10 TMIN
-#>>>4 ubeshort >24 TBIG
-# table directory entries
->>>12 string x \b, 1st "%4.4s"
-
-# search and display 1st name in sfnt font which is often copyright text
-# does not work inside font collections
-0 name sfnt-names
-# search for naming table
->12 search/432/s name
-# biggest offset 0x0100bd28 like Windows10 Fonts\simsunb.ttf
-#>>>>&8 ubelong >0x0100bd27 BIGGEST OFFSET
->>&8 ubelong >0x00100000
-# offset of name table
->>>&-4 ubelong x \b, name offset %#x
-# GRR: pointer to name table only works if offset ~< FILE_BYTES_MAX = 100000h defined in src\file.h
->>&8 ubelong <0x00100000
->>>&-16 ubelong x
-# name table
->>>>(&8.L) ubequad x
-# invalid format selector
-#>>>>>&-8 ubeshort !0 \b, invalid selector %x
-# minimal 3 name records found like in c:\Program Files (x86)\Tesseract-OCR\tessdata\pdf.ttf
-# maximal 1227 name records found like in Apple Chancery.ttf
-#>>>>>&-6 ubeshort <0x4 mincount
-#>>>>>&-6 ubeshort >130 maxcount
->>>>>&-6 ubeshort x \b, %d names
-# offset to start of string storage from start of table
-#>>>>>&-4 ubeshort x \b, record offset %d
-# 1st name record
-# string offset from start of storage area
-#>>>>>&8 ubeshort x \b, string offset %d
-# string length
-#>>>>>&6 ubeshort x \b, string length %d
-# minimal name string 7 like in c:\Program Files (x86)\Kodi\addons\webinterface.default\lib\video-js\font\VideoJS.ttf
-# also found 0 like in SWZCONLN.TTF
-#>>>>>&6 ubeshort <8 MIN STRING
-# maximal name string 806 like in c:\Windows\Fonts\palabi.ttf
-#>>>>>&6 ubeshort >805 MAX STRING
-# platform identifier: 0~Apple Unicode, 1~Macintosh, 3~Microsoft
-#>>>>>&-2 ubeshort >3 BAD PLATFORM
->>>>>&-2 ubeshort 0 \b, Unicode
->>>>>&-2 ubeshort 1 \b, Macintosh
->>>>>&-2 ubeshort 3 \b, Microsoft
-# languageID (0~english Macintosh, 0409h~english Microsoft, ...)
->>>>>&2 ubeshort >0 \b, language %#x
-# name identifiers
-# often 0~copyright, 1~font, 2~font subfamily, 5~version, 13~license, 19~sample, ...
->>>>>&4 ubeshort >0 \b, type %d string
-# platform specific encoding:
-# 0~undefined character set, 1~UGL set with Unicode, 3~Unicode 2.0 BMP only, 4~Unicode 2.0
-#>>>>>&0 ubeshort x \b, %d encoding
->>>>>&0 ubeshort 0
-# handle only name string offset 0 because do not know how to add 2 relative offsets
->>>>>>&6 ubeshort 0
->>>>>>>&(&-14.S-18) ubyte !0
-# GRR: instead 806 only first MAXstring = 96 characters are displayed as defined in src\file.h
-# often copyright string that starts like \251 2006 The Monotype Corporation
->>>>>>>>&-1 string x \b, %-11.96s
-# test for unicode string
->>>>>>>&(&-14.S-18) ubyte 0
->>>>>>>>&0 lestring16 x \b, %-11.96s
-# unicode encoding
->>>>>&0 ubeshort >0
->>>>>>&6 ubeshort 0
->>>>>>>&(&-14.S-17) lestring16 x \b, %-11.96s
-
-0 string \007\001\001\000Copyright\ (c)\ 199 Adobe Multiple Master font
-0 string \012\001\001\000Copyright\ (c)\ 199 Adobe Multiple Master font
-
-# TrueType/OpenType font collections (.ttc)
-# URL: https://en.wikipedia.org/wiki/OpenType
-# https://www.microsoft.com/typography/otspec/otff.htm
-# Modified by: Joerg Jenderek
-# Note: container for TrueType, OpenType font
-0 string ttcf
-# skip ASCII text
->4 ubyte 0
-# sfnt version often 0x00010000 of 1st table is TrueType
->>(12.L) ubelong !0x4f54544f TrueType
-!:mime font/ttf
-!:apple ????tfil
-!:ext ttc
-# sfnt version 4F54544Fh~OTTO of 1st table is OpenType font
->>(12.L) ubelong =0x4f54544f OpenType
-!:mime font/otf
-!:apple ????OTTO
-# no example found for otc
-!:ext ttc/otc
->>4 ubyte x font collection data
-#!:mime font/collection
-# TCC version
->>4 belong 0x00010000 \b, 1.0
->>4 belong 0x00020000 \b, 2.0
->>8 ubelong >0 \b, %d fonts
-# array offset size = fonts * offsetsize = fonts * 4
->>(8.L*4) ubequad x
-# 0x44454947 = 'DSIG'
->>>&4 belong 0x44534947 \b, digitally signed
-# offset to 1st font
->>12 ubelong x \b, at %#x
-# point to 1st font that starts with sfnt version
->>(12.L) use sfnt-font
-
-# Opentype font data from Avi Bercovich
-0 string OTTO OpenType font data
-!:mime application/vnd.ms-opentype
-
-# From: Alex Myczko <alex@aiei.ch>
-0 string SplineFontDB: Spline Font Database
-!:mime application/vnd.font-fontforge-sfd
->14 string x version %s
-
-# EOT
-0x40 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->0x22 string LP Embedded OpenType (EOT)
-# workaround until there's lepstring16
-# >>0x52 lepstring16/h >\0 \b, %s family
->>0x52 short !0
->>>0x54 lestring16 x \b, %s family
-!:mime application/vnd.ms-fontobject
-
-# Web Open Font Format (.woff)
-0 name woff
->4 belong 0x00010000 \b, TrueType
->4 belong 0x4F54544F \b, CFF
->4 belong 0x74727565 \b, TrueType
->4 default x
->>4 belong x \b, flavor %d
->8 belong x \b, length %d
-#>12 beshort x \b, numTables %d
-#>14 beshort x \b, reserved %d
-#>16 belong x \b, totalSfntSize %d
-
-# https://www.w3.org/TR/WOFF/
-0 string wOFF Web Open Font Format
-!:mime font/woff
->0 use woff
->20 beshort x \b, version %d
->22 beshort x \b.%d
-# https://www.w3.org/TR/WOFF2/
-0 string wOF2 Web Open Font Format (Version 2)
-!:mime font/woff2
-!:ext woff2
->0 use woff
-#>20 belong x \b, totalCompressedSize %d
->24 beshort x \b, version %d
->26 beshort x \b.%d
diff --git a/contrib/libs/libmagic/magic/Magdir/forth b/contrib/libs/libmagic/magic/Magdir/forth
deleted file mode 100644
index 34c918152a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/forth
+++ /dev/null
@@ -1,82 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: forth,v 1.4 2021/04/26 15:56:00 christos Exp $
-# forth: file(1) magic for various Forth environments
-# From: Lubomir Rintel <lkundrak@v3.sk>
-#
-
-# Has a FORTH stack diagram and something that looks very much like a FORTH
-# multi-line word definition. Probably a FORTH source.
-0 regex \[[:space:]]\\(([[:space:]].*)?\ --\ (.*[[:space:]])?\\)
->0 regex \^:\[[:space:]]
->>0 regex \^;$ FORTH program
-!:mime text/x-forth
-
-# Inline word definition complete with a stack diagram
-0 regex \^:[[:space:]].*[[:space:]]\\(([[:space:]].*)?\ --\ (.*[[:space:]])?\\)[[:space:]].*[[:space:]];$ FORTH program
-!:mime text/x-forth
-
-# Various dictionary images used by OpenFirware FORTH environment
-
-0 lelong 0xe1a00000
->8 lelong 0xe1a00000
-# skip raspberry pi kernel image kernel7.img by checking for positive text length
->>24 lelong >0 ARM OpenFirmware FORTH Dictionary,
->>>24 lelong x Text length: %d bytes,
->>>28 lelong x Data length: %d bytes,
->>>32 lelong x Text Relocation Table length: %d bytes,
->>>36 lelong x Data Relocation Table length: %d bytes,
->>>40 lelong x Entry Point: %#08X,
->>>44 lelong x BSS length: %d bytes
-
-0 string MP
->28 lelong 1 x86 OpenFirmware FORTH Dictionary,
->>4 leshort x %d blocks
->>2 leshort x + %d bytes,
->>6 leshort x %d relocations,
->>8 leshort x Header length: %d paragraphs,
->>10 leshort x Data Size: %d
->>12 leshort x - %d 4K pages,
->>14 lelong x Initial Stack Pointer: %#08X,
->>20 lelong x Entry Point: %#08X,
->>24 lelong x First Relocation Item: %d,
->>26 lelong x Overlay Number: %d,
->>18 leshort x Checksum: %#08X
-
-0 belong 0x48000020 PowerPC OpenFirmware FORTH Dictionary,
->4 belong x Text length: %d bytes,
->8 belong x Data length: %d bytes,
->12 belong x BSS length: %d bytes,
->16 belong x Symbol Table length: %d bytes,
->20 belong x Entry Point: %#08X,
->24 belong x Text Relocation Table length: %d bytes,
->28 belong x Data Relocation Table length: %d bytes
-
-0 lelong 0x10000007 MIPS OpenFirmware FORTH Dictionary,
->4 lelong x Text length: %d bytes,
->8 lelong x Data length: %d bytes,
->12 lelong x BSS length: %d bytes,
->16 lelong x Symbol Table length: %d bytes,
->20 lelong x Entry Point: %#08X,
->24 lelong x Text Relocation Table length: %d bytes,
->28 lelong x Data Relocation Table length: %d bytes
-
-# Dictionary images used by minimal C FORTH environments, any platform,
-# using native byte order.
-
-# Weak.
-#0 short 0x5820 cForth 16-bit Dictionary,
-#>2 short x Serial: %#08X,
-#>4 short x Dictionary Start: %#08X,
-#>6 short x Dictionary Size: %d bytes,
-#>8 short x User Area Start: %#08X,
-#>10 short x User Area Size: %d bytes,
-#>12 short x Entry Point: %#08X
-
-0 long 0x581120 cForth 32-bit Dictionary,
->4 long x Serial: %#08X,
->8 long x Dictionary Start: %#08X,
->12 long x Dictionary Size: %d bytes,
->16 long x User Area Start: %#08X,
->20 long x User Area Size: %d bytes,
->24 long x Entry Point: %#08X
diff --git a/contrib/libs/libmagic/magic/Magdir/fortran b/contrib/libs/libmagic/magic/Magdir/fortran
deleted file mode 100644
index 6abc2f70cb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/fortran
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: fortran,v 1.10 2015/11/05 18:47:16 christos Exp $
-# FORTRAN source
-# Check that the first 100 lines start with C or whitespace first.
-0 regex/100l !\^[^Cc\ \t].*$
->0 regex/100l \^[Cc][\ \t] FORTRAN program text
-!:mime text/x-fortran
-!:strength - 5
diff --git a/contrib/libs/libmagic/magic/Magdir/frame b/contrib/libs/libmagic/magic/Magdir/frame
deleted file mode 100644
index c0fd840a46..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/frame
+++ /dev/null
@@ -1,62 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: frame,v 1.14 2019/11/25 00:31:30 christos Exp $
-# frame: file(1) magic for FrameMaker files
-#
-# This stuff came on a FrameMaker demo tape, most of which is
-# copyright, but this file is "published" as witness the following:
-#
-# Note that this is the Framemaker Maker Interchange Format, not the
-# Normal format which would be application/vnd.framemaker.
-#
-0 string \<MakerFile FrameMaker document
-!:mime application/x-mif
->11 string 5.5 (5.5
->11 string 5.0 (5.0
->11 string 4.0 (4.0
->11 string 3.0 (3.0
->11 string 2.0 (2.0
->11 string 1.0 (1.0
->14 byte x %c)
-# URL: http://fileformats.archiveteam.org/wiki/Maker_Interchange_Format
-# Reference: https://help.adobe.com/en_US/framemaker/mifreference/mifref.pdf
-# Update: Joerg Jenderek 2019 Nov
-0 string \<MIFFile FrameMaker MIF (ASCII) file
-# https://www.iana.org/assignments/media-types/application/vnd.mif
-!:mime application/vnd.mif
-# mif most but also find bookTOC.framemif
-!:ext mif/framemif
-# followed by space~20h
-#>8 ubyte 0x20 \b, space before version
-# 3 characters of version number of the MIF language like 1.0, 2.0 ... 2015 ...
->9 string x (%.3s
-# if not greater sign then display 4th character of version
->12 ubyte =0x3e \b)
->12 ubyte !0x3e \b%c)
-# comment starting with # shows the name+version number of generating program
->13 search/3 #
->>&0 string x "%s"
-0 search/1 \<MakerDictionary FrameMaker Dictionary text
-!:mime application/x-mif
->17 string 3.0 (3.0)
->17 string 2.0 (2.0)
->17 string 1.0 (1.x)
-0 string \<MakerScreenFont FrameMaker Font file
-!:mime application/x-mif
->17 string 1.01 (%s)
-0 string \<MML FrameMaker MML file
-!:mime application/x-mif
-0 string \<BookFile FrameMaker Book file
-!:mime application/x-mif
->10 string 3.0 (3.0
->10 string 2.0 (2.0
->10 string 1.0 (1.0
->13 byte x %c)
-# XXX - this book entry should be verified, if you find one, uncomment this
-#0 string \<Book\040 FrameMaker Book (ASCII) file
-#!:mime application/x-mif
-#>6 string 3.0 (3.0)
-#>6 string 2.0 (2.0)
-#>6 string 1.0 (1.0)
-0 string \<Maker\040Intermediate\040Print\040File FrameMaker IPL file
-!:mime application/x-mif
diff --git a/contrib/libs/libmagic/magic/Magdir/freebsd b/contrib/libs/libmagic/magic/Magdir/freebsd
deleted file mode 100644
index 66aff6caf2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/freebsd
+++ /dev/null
@@ -1,164 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: freebsd,v 1.9 2022/01/19 12:44:13 christos Exp $
-# freebsd: file(1) magic for FreeBSD objects
-#
-# All new-style FreeBSD magic numbers are in host byte order (i.e.,
-# little-endian on x86).
-#
-# XXX - this comes from the file "freebsd" in a recent FreeBSD version of
-# "file"; it, and the NetBSD stuff in "netbsd", appear to use different
-# schemes for distinguishing between executable images, shared libraries,
-# and object files.
-#
-# FreeBSD says:
-#
-# Regardless of whether it's pure, demand-paged, or none of the
-# above:
-#
-# if the entry point is < 4096, then it's a shared library if
-# the "has run-time loader information" bit is set, and is
-# position-independent if the "is position-independent" bit
-# is set;
-#
-# if the entry point is >= 4096 (or >4095, same thing), then it's
-# an executable, and is dynamically-linked if the "has run-time
-# loader information" bit is set.
-#
-# On x86, NetBSD says:
-#
-# If it's neither pure nor demand-paged:
-#
-# if it has the "has run-time loader information" bit set, it's
-# a dynamically-linked executable;
-#
-# if it doesn't have that bit set, then:
-#
-# if it has the "is position-independent" bit set, it's
-# position-independent;
-#
-# if the entry point is non-zero, it's an executable, otherwise
-# it's an object file.
-#
-# If it's pure:
-#
-# if it has the "has run-time loader information" bit set, it's
-# a dynamically-linked executable, otherwise it's just an
-# executable.
-#
-# If it's demand-paged:
-#
-# if it has the "has run-time loader information" bit set,
-# then:
-#
-# if the entry point is < 4096, it's a shared library;
-#
-# if the entry point is = 4096 or > 4096 (i.e., >= 4096),
-# it's a dynamically-linked executable);
-#
-# if it doesn't have the "has run-time loader information" bit
-# set, then it's just an executable.
-#
-# (On non-x86, NetBSD does much the same thing, except that it uses
-# 8192 on 68K - except for "68k4k", which is presumably "68K with 4K
-# pages - SPARC, and MIPS, presumably because Sun-3's and Sun-4's
-# had 8K pages; dunno about MIPS.)
-#
-# I suspect the two will differ only in perverse and uninteresting cases
-# ("shared" libraries that aren't demand-paged and whose pages probably
-# won't actually be shared, executables with entry points <4096).
-#
-# I leave it to those more familiar with FreeBSD and NetBSD to figure out
-# what the right answer is (although using ">4095", FreeBSD-style, is
-# probably better than separately checking for "=4096" and ">4096",
-# NetBSD-style). (The old "netbsd" file analyzed FreeBSD demand paged
-# executables using the NetBSD technique.)
-#
-0 lelong&0377777777 041400407 FreeBSD/i386
->20 lelong <4096
->>3 byte&0xC0 &0x80 shared library
->>3 byte&0xC0 0x40 PIC object
->>3 byte&0xC0 0x00 object
->20 lelong >4095
->>3 byte&0x80 0x80 dynamically linked executable
->>3 byte&0x80 0x00 executable
->16 lelong >0 not stripped
-
-0 lelong&0377777777 041400410 FreeBSD/i386 pure
->20 lelong <4096
->>3 byte&0xC0 &0x80 shared library
->>3 byte&0xC0 0x40 PIC object
->>3 byte&0xC0 0x00 object
->20 lelong >4095
->>3 byte&0x80 0x80 dynamically linked executable
->>3 byte&0x80 0x00 executable
->16 lelong >0 not stripped
-
-0 lelong&0377777777 041400413 FreeBSD/i386 demand paged
->20 lelong <4096
->>3 byte&0xC0 &0x80 shared library
->>3 byte&0xC0 0x40 PIC object
->>3 byte&0xC0 0x00 object
->20 lelong >4095
->>3 byte&0x80 0x80 dynamically linked executable
->>3 byte&0x80 0x00 executable
->16 lelong >0 not stripped
-
-0 lelong&0377777777 041400314 FreeBSD/i386 compact demand paged
->20 lelong <4096
->>3 byte&0xC0 &0x80 shared library
->>3 byte&0xC0 0x40 PIC object
->>3 byte&0xC0 0x00 object
->20 lelong >4095
->>3 byte&0x80 0x80 dynamically linked executable
->>3 byte&0x80 0x00 executable
->16 lelong >0 not stripped
-
-# XXX gross hack to identify core files
-# cores start with a struct tss; we take advantage of the following:
-# byte 7: highest byte of the kernel stack pointer, always 0xfe
-# 8/9: kernel (ring 0) ss value, always 0x0010
-# 10 - 27: ring 1 and 2 ss/esp, unused, thus always 0
-# 28: low order byte of the current PTD entry, always 0 since the
-# PTD is page-aligned
-#
-7 string \357\020\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 FreeBSD/i386 a.out core file
->1039 string >\0 from '%s'
-
-# /var/run/ld.so.hints
-# What are you laughing about?
-0 lelong 011421044151 ld.so hints file (Little Endian
->4 lelong >0 \b, version %d)
->4 belong <1 \b)
-0 belong 011421044151 ld.so hints file (Big Endian
->4 belong >0 \b, version %d)
->4 belong <1 \b)
-
-#
-# Files generated by FreeBSD scrshot(1)/vidcontrol(1) utilities
-#
-0 string SCRSHOT_ scrshot(1) screenshot,
->8 byte x version %d,
->9 byte 2 %d bytes in header,
->>10 byte x %d chars wide by
->>11 byte x %d chars high
-
-#
-# FreeBSD kernel minidumps
-#
-0 string minidump\040FreeBSD/ FreeBSD kernel minidump
-# powerpc uses 32-byte magic, followed by 32-byte mmu kind, then version
->17 string powerpc
->>17 string >\0 for %s,
->>>32 string >\0 %s,
->>>>64 byte 0 big endian,
->>>>>64 belong x version %d
->>>>64 default x little endian,
->>>>>64 lelong x version %d
-# all other architectures use 24-byte magic, followed by version
->17 default x
->>17 string >\0 for %s,
->>>24 byte 0 big endian,
->>>>24 belong x version %d
->>>24 default x little endian,
->>>>24 lelong x version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/fsav b/contrib/libs/libmagic/magic/Magdir/fsav
deleted file mode 100644
index 5c1d6e23dc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/fsav
+++ /dev/null
@@ -1,128 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: fsav,v 1.22 2021/04/26 15:56:00 christos Exp $
-# fsav: file(1) magic for datafellows fsav virus definition files
-# Anthon van der Neut (anthon@mnt.org)
-
-# ftp://ftp.f-prot.com/pub/{macrdef2.zip,nomacro.def}
-0 beshort 0x1575 fsav macro virus signatures
->8 leshort >0 (%d-
->11 byte >0 \b%02d-
->10 byte >0 \b%02d)
-# ftp://ftp.f-prot.com/pub/sign.zip
-#10 ubyte <12
-#>9 ubyte <32
-#>>8 ubyte 0x0a
-#>>>12 ubyte 0x07
-#>>>>11 uleshort >0 fsav DOS/Windows virus signatures (%d-
-#>>>>10 byte 0 \b01-
-#>>>>10 byte 1 \b02-
-#>>>>10 byte 2 \b03-
-#>>>>10 byte 3 \b04-
-#>>>>10 byte 4 \b05-
-#>>>>10 byte 5 \b06-
-#>>>>10 byte 6 \b07-
-#>>>>10 byte 7 \b08-
-#>>>>10 byte 8 \b09-
-#>>>>10 byte 9 \b10-
-#>>>>10 byte 10 \b11-
-#>>>>10 byte 11 \b12-
-#>>>>9 ubyte >0 \b%02d)
-# ftp://ftp.f-prot.com/pub/sign2.zip
-#0 ubyte 0x62
-#>1 ubyte 0xF5
-#>>2 ubyte 0x1
-#>>>3 ubyte 0x1
-#>>>>4 ubyte 0x0e
-#>>>>>13 ubyte >0 fsav virus signatures
-#>>>>>>11 ubyte x size %#02x
-#>>>>>>12 ubyte x \b%02x
-#>>>>>>13 ubyte x \b%02x bytes
-
-# Joerg Jenderek: joerg dot jenderek at web dot de
-# clamav-0.100.2\docs\html\node60.html
-# https://github.com/vrtadmin/clamav-faq/raw/master/manual/clamdoc.pdf
-# ClamAV virus database files start with a 512 bytes colon separated header
-# ClamAV-VDB:buildDate:version:signaturesNumbers:functionalityLevelRequired:MD5:Signature:builder:buildTime
-# + gzipped (optional) tarball files
-# output can often be verified by `sigtool --info=FILE`
-0 string ClamAV-VDB: Clam AntiVirus
-# padding spaces implies database
->511 ubyte =0x20 database
-!:mime application/x-clamav-database
-# empty build time
->>10 string =:: (unsigned)
-# sigtool(1) man page
-!:ext cud
-# display some text to avoid error like:
-# Magdir/fsav, 78: Warning: Current entry does not yet have a description for adding a EXTENSION type
-# file: could not find any valid magic files! (No error)
->>10 default x (with buildtime)
-#>>10 default x
-# clamtmp is used for temporarily database like update process
-# for pure tar database only cld extension found
-!:ext cld/cvd/clamtmp/cud
->511 default x file
-!:mime application/x-clamav
-!:ext info
->11 string >\0
-# buildDate empty or like "22 Mar 2017 12-57 -0400"; verified by `sigtool -i FILE`
->>11 regex \^[^:]{0,23} \b, %s
-# version like 25170
->>>&1 regex \^[^:]{1,6} \b, version %s
-# signaturesNumbers like 4566249
->>>>&1 regex \^[^:]{1,10} \b, %s signatures
-# functionalityLevelRequired like 60
->>>>>&1 regex \^[^:]{1,4} \b, level %s
-# X for nothing or MD5
-#>>>>>>&1 regex \^[^:]{1,32} \b, MD5 "%s"
->>>>>>&1 regex \^[^:]{1,32}
-# X for nothing or digital signature starting like AIzk/LYbX
-#>>>>>>>&1 regex \^[^:]{1,255} \b, signature "%s"
->>>>>>>&1 regex \^[^:]{1,255}
-# builder like neo
->>>>>>>>&1 regex \^[^:]{1,32} \b, builder %s
-# buildTime like 1506611558
-#>>>>>>>>>&1 regex \^[^:]{1,10} \b, %s
->>>>>>>>>&1 regex \^[^:]{1,10}
-# padding with spaces
-#>>>>>>>>>>&1 ubequad x \b, padding %#16.16llx
->510 ubyte =0x20
-# inspect real database content
-#>>512 ubeshort x \b, database MAGIC %#x
-# ./archive handle pure tar archives
->>1012 quad =0 \b, with
->>>512 use tar-file
-# not pure tar
->>1012 quad !0
-# one space at the end of text and then handles gzipped archives by ./compress
->>>512 string \037\213 \b, with
->>>>512 indirect x
-
-# Type: Grisoft AVG AntiVirus
-# From: David Newgas <david@newgas.net>
-0 string AVG7_ANTIVIRUS_VAULT_FILE AVG 7 Antivirus vault file data
-
-0 string X5O!P%@AP[4\\PZX54(P^)7CC)7}$EICAR
->33 string -STANDARD-ANTIVIRUS-TEST-FILE!$H+H* EICAR virus test files
-
-# From: Joerg Jenderek
-# URL: https://www.avira.com/
-# Note: found in directory %ProgramData%\Avira\Antivirus\INFECTED (Windows)
-# tested with version 15.0.43.23 at November 2019
-0 string AntiVir\ Qua Avira AntiVir quarantined
-!:mime application/x-avira-qua
-#!:mime application/octet-stream
-!:ext qua
->156 string SUSPICIOUS_FILE
-# file path of suspicious file
->>220 lestring16 x %s
->156 string !SUSPICIOUS_FILE
-# file path of virus file
->>228 lestring16 x %s
-# quarantined date
->60 ldate x at %s
-# virus/danger name
->156 string !SUSPICIOUS_FILE
->>156 string x \b, category "%s"
-
diff --git a/contrib/libs/libmagic/magic/Magdir/fusecompress b/contrib/libs/libmagic/magic/Magdir/fusecompress
deleted file mode 100644
index 165cf3c772..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/fusecompress
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: fusecompress,v 1.2 2011/08/08 09:05:55 christos Exp $
-# fusecompress: file(1) magic for fusecompress
-0 string \037\135\211 FuseCompress(ed) data
->3 byte 0x00 (none format)
->3 byte 0x01 (bz2 format)
->3 byte 0x02 (gz format)
->3 byte 0x03 (lzo format)
->3 byte 0x04 (xor format)
->3 byte >0x04 (unknown format)
->4 long x uncompressed size: %d
diff --git a/contrib/libs/libmagic/magic/Magdir/games b/contrib/libs/libmagic/magic/Magdir/games
deleted file mode 100644
index 0ccb4acff5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/games
+++ /dev/null
@@ -1,696 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: games,v 1.31 2023/03/29 22:57:27 christos Exp $
-# games: file(1) for games
-
-# Fabio Bonelli <fabiobonelli@libero.it>
-# Quake II - III data files
-0 string IDP2 Quake II 3D Model file,
->20 long x %u skin(s),
->8 long x (%u x
->12 long x %u),
->40 long x %u frame(s),
->16 long x Frame size %u bytes,
->24 long x %u vertices/frame,
->28 long x %u texture coordinates,
->32 long x %u triangles/frame
-
-0 string IBSP Quake
->4 long 0x26 II Map file (BSP)
->4 long 0x2E III Map file (BSP)
-
-0 string IDS2 Quake II SP2 sprite file
-
-#---------------------------------------------------------------------------
-# Doom and Quake
-# submitted by Nicolas Patrois
-
-0 string \xcb\x1dBoom\xe6\xff\x03\x01 Boom or linuxdoom demo
-# some doom lmp files don't match, I've got one beginning with \x6d\x02\x01\x01
-
-24 string LxD\ 203 Linuxdoom save
->0 string x , name=%s
->44 string x , world=%s
-
-# Quake
-
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/PAK
-# reference: https://quakewiki.org/wiki/.pak
-# GRR: line below is too general as it matches also Acorn PackDir compressed Archive
-# and Git pack ./revision
-0 string PACK
-# real Quake examples like pak0.pak have only some hundreds like 150 files
-# So test for few files
->8 ulelong <0x01000000
-# in file version 5.32 test for null terminator is only true for
-# offset ~< FILE_BYTES_MAX = 1 MB defined in ../../src/file.h
-# look for null terminator of 1st entry name
->>(4.l+55) ubyte 0 Quake I or II world or extension
-!:mime application/x-dzip
-!:ext pak
-#>>>8 ulelong x \b, table size %u
-# dividing this by entry size (64) gives number of files
->>>8 ulelong/64 x \b, %u files
-# offset to the beginning of the file table
->>>4 ulelong x \b, offset %#x
-# 1st file entry
->>>(4.l) use pak-entry
-# 2nd file entry
-#>>>4 ulelong+64 x \b, offset %#x
-#>>>(4.l+64) use pak-entry
-#
-# display file table entry of Quake PAK archive
-0 name pak-entry
-# normally entry start after header which implies offset 12 or higher
->56 ulelong >11
-# the offset from the beginning of pak to beginning of this entry file contents
->>56 ulelong x at %#x
-# the size of file for this entry
->>60 ulelong x %u bytes
-# 56 byte null-terminated entry name string includes path like maps/e1m1.bsp
->>0 string x '%-.56s'
-# inspect entry content by jumping to entry offset
->>(56) indirect x \b:
-
-#0 string -1\x0a Quake I demo
-#>30 string x version %.4s
-#>61 string x level %s
-
-#0 string 5\x0a Quake I save
-
-# The levels
-
-# Quake 1
-
-0 string 5\x0aIntroduction Quake I save: start Introduction
-0 string 5\x0athe_Slipgate_Complex Quake I save: e1m1 The slipgate complex
-0 string 5\x0aCastle_of_the_Damned Quake I save: e1m2 Castle of the damned
-0 string 5\x0athe_Necropolis Quake I save: e1m3 The necropolis
-0 string 5\x0athe_Grisly_Grotto Quake I save: e1m4 The grisly grotto
-0 string 5\x0aZiggurat_Vertigo Quake I save: e1m8 Ziggurat vertigo (secret)
-0 string 5\x0aGloom_Keep Quake I save: e1m5 Gloom keep
-0 string 5\x0aThe_Door_To_Chthon Quake I save: e1m6 The door to Chthon
-0 string 5\x0aThe_House_of_Chthon Quake I save: e1m7 The house of Chthon
-0 string 5\x0athe_Installation Quake I save: e2m1 The installation
-0 string 5\x0athe_Ogre_Citadel Quake I save: e2m2 The ogre citadel
-0 string 5\x0athe_Crypt_of_Decay Quake I save: e2m3 The crypt of decay (dopefish lives!)
-0 string 5\x0aUnderearth Quake I save: e2m7 Underearth (secret)
-0 string 5\x0athe_Ebon_Fortress Quake I save: e2m4 The ebon fortress
-0 string 5\x0athe_Wizard's_Manse Quake I save: e2m5 The wizard's manse
-0 string 5\x0athe_Dismal_Oubliette Quake I save: e2m6 The dismal oubliette
-0 string 5\x0aTermination_Central Quake I save: e3m1 Termination central
-0 string 5\x0aVaults_of_Zin Quake I save: e3m2 Vaults of Zin
-0 string 5\x0athe_Tomb_of_Terror Quake I save: e3m3 The tomb of terror
-0 string 5\x0aSatan's_Dark_Delight Quake I save: e3m4 Satan's dark delight
-0 string 5\x0athe_Haunted_Halls Quake I save: e3m7 The haunted halls (secret)
-0 string 5\x0aWind_Tunnels Quake I save: e3m5 Wind tunnels
-0 string 5\x0aChambers_of_Torment Quake I save: e3m6 Chambers of torment
-0 string 5\x0athe_Sewage_System Quake I save: e4m1 The sewage system
-0 string 5\x0aThe_Tower_of_Despair Quake I save: e4m2 The tower of despair
-0 string 5\x0aThe_Elder_God_Shrine Quake I save: e4m3 The elder god shrine
-0 string 5\x0athe_Palace_of_Hate Quake I save: e4m4 The palace of hate
-0 string 5\x0aHell's_Atrium Quake I save: e4m5 Hell's atrium
-0 string 5\x0athe_Nameless_City Quake I save: e4m8 The nameless city (secret)
-0 string 5\x0aThe_Pain_Maze Quake I save: e4m6 The pain maze
-0 string 5\x0aAzure_Agony Quake I save: e4m7 Azure agony
-0 string 5\x0aShub-Niggurath's_Pit Quake I save: end Shub-Niggurath's pit
-
-# Quake DeathMatch levels
-
-0 string 5\x0aPlace_of_Two_Deaths Quake I save: dm1 Place of two deaths
-0 string 5\x0aClaustrophobopolis Quake I save: dm2 Claustrophobopolis
-0 string 5\x0aThe_Abandoned_Base Quake I save: dm3 The abandoned base
-0 string 5\x0aThe_Bad_Place Quake I save: dm4 The bad place
-0 string 5\x0aThe_Cistern Quake I save: dm5 The cistern
-0 string 5\x0aThe_Dark_Zone Quake I save: dm6 The dark zone
-
-# Scourge of Armagon
-
-0 string 5\x0aCommand_HQ Quake I save: start Command HQ
-0 string 5\x0aThe_Pumping_Station Quake I save: hip1m1 The pumping station
-0 string 5\x0aStorage_Facility Quake I save: hip1m2 Storage facility
-0 string 5\x0aMilitary_Complex Quake I save: hip1m5 Military complex (secret)
-0 string 5\x0athe_Lost_Mine Quake I save: hip1m3 The lost mine
-0 string 5\x0aResearch_Facility Quake I save: hip1m4 Research facility
-0 string 5\x0aAncient_Realms Quake I save: hip2m1 Ancient realms
-0 string 5\x0aThe_Gremlin's_Domain Quake I save: hip2m6 The gremlin's domain (secret)
-0 string 5\x0aThe_Black_Cathedral Quake I save: hip2m2 The black cathedral
-0 string 5\x0aThe_Catacombs Quake I save: hip2m3 The catacombs
-0 string 5\x0athe_Crypt__ Quake I save: hip2m4 The crypt
-0 string 5\x0aMortum's_Keep Quake I save: hip2m5 Mortum's keep
-0 string 5\x0aTur_Torment Quake I save: hip3m1 Tur torment
-0 string 5\x0aPandemonium Quake I save: hip3m2 Pandemonium
-0 string 5\x0aLimbo Quake I save: hip3m3 Limbo
-0 string 5\x0athe_Edge_of_Oblivion Quake I save: hipdm1 The edge of oblivion (secret)
-0 string 5\x0aThe_Gauntlet Quake I save: hip3m4 The gauntlet
-0 string 5\x0aArmagon's_Lair Quake I save: hipend Armagon's lair
-
-# Malice
-
-0 string 5\x0aThe_Academy Quake I save: start The academy
-0 string 5\x0aThe_Lab Quake I save: d1 The lab
-0 string 5\x0aArea_33 Quake I save: d1b Area 33
-0 string 5\x0aSECRET_MISSIONS Quake I save: d3b Secret missions
-0 string 5\x0aThe_Hospital Quake I save: d10 The hospital (secret)
-0 string 5\x0aThe_Genetics_Lab Quake I save: d11 The genetics lab (secret)
-0 string 5\x0aBACK_2_MALICE Quake I save: d4b Back to Malice
-0 string 5\x0aArea44 Quake I save: d1c Area 44
-0 string 5\x0aTakahiro_Towers Quake I save: d2 Takahiro towers
-0 string 5\x0aA_Rat's_Life Quake I save: d3 A rat's life
-0 string 5\x0aInto_The_Flood Quake I save: d4 Into the flood
-0 string 5\x0aThe_Flood Quake I save: d5 The flood
-0 string 5\x0aNuclear_Plant Quake I save: d6 Nuclear plant
-0 string 5\x0aThe_Incinerator_Plant Quake I save: d7 The incinerator plant
-0 string 5\x0aThe_Foundry Quake I save: d7b The foundry
-0 string 5\x0aThe_Underwater_Base Quake I save: d8 The underwater base
-0 string 5\x0aTakahiro_Base Quake I save: d9 Takahiro base
-0 string 5\x0aTakahiro_Laboratories Quake I save: d12 Takahiro laboratories
-0 string 5\x0aStayin'_Alive Quake I save: d13 Stayin' alive
-0 string 5\x0aB.O.S.S._HQ Quake I save: d14 B.O.S.S. HQ
-0 string 5\x0aSHOWDOWN! Quake I save: d15 Showdown!
-
-# Malice DeathMatch levels
-
-0 string 5\x0aThe_Seventh_Precinct Quake I save: ddm1 The seventh precinct
-0 string 5\x0aSub_Station Quake I save: ddm2 Sub station
-0 string 5\x0aCrazy_Eights! Quake I save: ddm3 Crazy eights!
-0 string 5\x0aEast_Side_Invertationa Quake I save: ddm4 East side invertationa
-0 string 5\x0aSlaughterhouse Quake I save: ddm5 Slaughterhouse
-0 string 5\x0aDOMINO Quake I save: ddm6 Domino
-0 string 5\x0aSANDRA'S_LADDER Quake I save: ddm7 Sandra's ladder
-
-
-0 string MComprHD MAME CHD compressed hard disk image,
->12 belong x version %u
-
-# MAME input recordings
-
-0 string MAMEINP\0 MAME input recording
->8 leqdate x at %s,
->16 leshort x format version %d.
->18 leshort x \b%d,
->20 string x %s driver,
->32 string x %s
-
-# doom - submitted by Jon Dowland
-
-0 string =IWAD doom main IWAD data
->4 lelong x containing %d lumps
-0 string =PWAD doom patch PWAD data
->4 lelong x containing %d lumps
-
-# Build engine group files (Duke Nukem, Shadow Warrior, ...)
-# Extension: .grp
-# Created by: "Ganael Laplanche" <ganael.laplanche@martymac.org>
-0 string KenSilverman Build engine group file
->12 lelong x containing %d files
-
-# Summary: Warcraft 3 save
-# Extension: .w3g
-# Created by: "Nelson A. de Oliveira" <naoliv@gmail.com>
-0 string Warcraft\ III\ recorded\ game %s
-
-
-# Summary: Warcraft 3 map
-# Extension: .w3m
-# Created by: "Nelson A. de Oliveira" <naoliv@gmail.com>
-0 string HM3W Warcraft III map file
-
-
-# Summary: SGF Smart Game Format
-# Extension: .sgf
-# Reference: https://www.red-bean.com/sgf/
-# Created by: Eduardo Sabbatella <eduardo_sabbatella@yahoo.com.ar>
-# Modified by (1): Abel Cheung (regex, more game format)
-# FIXME: Some games don't have GM (game type)
-0 regex \\(;.*GM\\[[0-9]{1,2}\\] Smart Game Format
->2 search/0x200/b GM[
->>&0 string 1] (Go)
->>&0 string 2] (Othello)
->>&0 string 3] (chess)
->>&0 string 4] (Gomoku+Renju)
->>&0 string 5] (Nine Men's Morris)
->>&0 string 6] (Backgammon)
->>&0 string 7] (Chinese chess)
->>&0 string 8] (Shogi)
->>&0 string 9] (Lines of Action)
->>&0 string 10] (Ataxx)
->>&0 string 11] (Hex)
->>&0 string 12] (Jungle)
->>&0 string 13] (Neutron)
->>&0 string 14] (Philosopher's Football)
->>&0 string 15] (Quadrature)
->>&0 string 16] (Trax)
->>&0 string 17] (Tantrix)
->>&0 string 18] (Amazons)
->>&0 string 19] (Octi)
->>&0 string 20] (Gess)
->>&0 string 21] (Twixt)
->>&0 string 22] (Zertz)
->>&0 string 23] (Plateau)
->>&0 string 24] (Yinsh)
->>&0 string 25] (Punct)
->>&0 string 26] (Gobblet)
->>&0 string 27] (hive)
->>&0 string 28] (Exxit)
->>&0 string 29] (Hnefatal)
->>&0 string 30] (Kuba)
->>&0 string 31] (Tripples)
->>&0 string 32] (Chase)
->>&0 string 33] (Tumbling Down)
->>&0 string 34] (Sahara)
->>&0 string 35] (Byte)
->>&0 string 36] (Focus)
->>&0 string 37] (Dvonn)
->>&0 string 38] (Tamsk)
->>&0 string 39] (Gipf)
->>&0 string 40] (Kropki)
-
-##############################################
-# NetImmerse/Gamebryo game engine entries
-
-# Summary: Gamebryo game engine file
-# Extension: .nif, .kf
-# Created by: Abel Cheung <abelcheung@gmail.com>
-0 string Gamebryo\ File\ Format,\ Version\ Gamebryo game engine file
->&0 regex [0-9a-z.]+ \b, version %s
-
-# Summary: Gamebryo game engine file
-# Extension: .kfm
-# Created by: Abel Cheung <abelcheung@gmail.com>
-0 string ;Gamebryo\ KFM\ File\ Version\ Gamebryo game engine animation File
->&0 regex [0-9a-z.]+ \b, version %s
-
-# Summary: NetImmerse game engine file
-# Extension .nif
-# Created by: Abel Cheung <abelcheung@gmail.com>
-0 string NetImmerse\ File\ Format,\ Version
->&0 string n\ NetImmerse game engine file
->>&0 regex [0-9a-z.]+ \b, version %s
-
-# Type: SGF Smart Game Format
-# URL: https://www.red-bean.com/sgf/
-# From: Eduardo Sabbatella <eduardo_sabbatella@yahoo.com.ar>
-2 regex/c \\(;.*GM\\[[0-9]{1,2}\\] Smart Game Format
->2 regex/c GM\\[1\\] - Go Game
->2 regex/c GM\\[6\\] - BackGammon Game
->2 regex/c GM\\[11\\] - Hex Game
->2 regex/c GM\\[18\\] - Amazons Game
->2 regex/c GM\\[19\\] - Octi Game
->2 regex/c GM\\[20\\] - Gess Game
->2 regex/c GM\\[21\\] - twix Game
-
-# Epic Games/Unreal Engine Package
-# URL: https://docs.unrealengine.com/udk/Three/ContentCooking.html
-# https://eliotvu.com/page/unreal-package-file-format
-# Little-endian version (such as x86 PC)
-0 lelong 0x9E2A83C1 Unreal Engine package (little-endian)
-!:ext xxx/tfc/upk/me1/u
->4 uleshort !0 \b, version %u
->>6 uleshort !0 \b/%03u
->>0 use upk_header
-# Big-endian version (such as PS3)
-0 belong 0x9E2A83C1 Unreal Engine package (big-endian)
-!:ext xxx/tfc
->6 ubeshort !0 \b, version %u
->>4 ubeshort !0 \b/%03u
->>0 use \^upk_header
-
-0 name upk_header
-# Identify game from version and licensee
->4 ulelong 0x000002b2 (Alice Madness Returns)
->4 ulelong 0x002f0313 (Aliens: Colonial Marines)
->4 ulelong 0x005b021b (Alpha Protocol)
->4 ulelong 0x0000032c (AntiChamber)
->4 ulelong 0x00200223 (APB: All Points Bulletin)
->4 ulelong 0x004b02d7 (Bioshock Infinite)
->4 ulelong 0x00380340 (Borderlands 2)
->4 ulelong 0x001d02e6 (Bulletstorm)
->4 ulelong 0x00050240 (CrimeCraft)
->4 ulelong 0x00000356 (Deadlight)
->4 ulelong 0x001e0321 (Dishonored)
->4 ulelong 0x000202a6 (Dungeon Defenders)
->4 ulelong 0x000901ea (Gears of War)
->4 ulelong 0x0000023f (Gears of War 2)
->4 ulelong 0x0000033c (Gears of War 3)
->4 ulelong 0x0000034e (Gears of War: Judgement)
->4 ulelong 0x0004035c (Hawken)
->4 ulelong 0x0001034a (Infinity Blade 2)
->4 ulelong 0x00000350 (InMomentum)
->4 ulelong 0x0015037D (Life Is Strange)
->4 ulelong 0x000b01a5 (Medal of Honor: Airborne)
->4 ulelong 0x002b0218 (Mirrors Edge)
->4 ulelong 0x0000027e (Monday Night Combat)
->4 ulelong 0x0000024b (MoonBase Alpha)
->4 ulelong 0x002e01d8 (Mortal Kombat Komplete Edition 2605)
->4 ulelong 0x0000035c (Painkiller HD)
->4 ulelong 0x0000034d (Q.U.B.E)
->4 ulelong 0x80660340 (Quantum Conundrum)
->4 ulelong 0x0000035b (Ravaged)
->4 ulelong 0x00150340 (Remember Me)
->4 ulelong 0x00060171 (Roboblitz)
->4 ulelong 0x00000325 (Rock of Ages)
->4 ulelong 0x0000032a (Sanctum)
->4 ulelong 0x00030248 (Saw)
->4 ulelong 0x007e0248 (Singularity)
->4 ulelong 0x00090388 (Soldier Front 2)
->4 ulelong 0x000701e6 (Stargate Worlds)
->4 ulelong 0x00000334 (Super Monday Night Combat)
->4 ulelong 0x000002c2 (The Ball)
->4 ulelong 0x000e0262 (The Exiled Realm of Arborea or TERA)
->4 ulelong 0x0000035b (The Five Cores)
->4 ulelong 0x00000349 (The Haunted: Hells Reach)
->4 ulelong 0x00000354 (Unmechanical)
->4 ulelong 0x035c0298 (Unreal Development Kit)
->4 ulelong 0x00000200 (Unreal Tournament 3)
->4 ulelong 0x0000032d (Waves)
->4 ulelong 0x003b034d (XCOM: Enemy Unknown)
-# Newer versions insert more headers
->4 ulelong&0xFFFF <249
->>12 lelong !0 \b, names: %d
->>28 lelong !0 \b, imports: %d
->>20 lelong !0 \b, exports: %d
->4 ulelong&0xFFFF >248
->>12 belong&0xFF !0
->>>12 string x \b, folder "%s"
->>>>&5 lelong !0 \b, names: %d
->>>>&21 lelong !0 \b, imports: %d
->>>>&13 lelong !0 \b, exports: %d
->>12 belong&0xFF 0
->>>16 belong&0xFF !0
->>>>16 string x \b, folder "%s"
->>>>>&5 lelong !0 \b, names: %d
->>>>>&21 lelong !0 \b, imports: %d
->>>>>&13 lelong !0 \b, exports: %d
->>>16 belong&0xFF 0
->>>>20 string x \b, folder "%s"
->>>>>&5 lelong !0 \b, names: %d
->>>>>&21 lelong !0 \b, imports: %d
->>>>>&13 lelong !0 \b, exports: %d
-
-0 string ESVG
->4 lelong 0x00160000
->10 string TOC\020 Empire Deluxe for DOS saved game
-
-# Sid Meier's Civilization V/VI
-# From: Benjamin Lowry <ben@ben.gmbh>
-0 string CIV5
->4 byte 0x08 Sid Meier's Civilization V saved game,
->>12 regex [0-9a-z.]+ saved by game version %s
->4 byte 0x01 Sid Meier's Civilization V replay data,
->>12 regex [0-9a-z.]+ saved by game version %s
-
-0 string CIV6 Sid Meier's Civilization VI saved game
-
-# https://syzygy-tables.info/
-# From Michel Van den Bergh
-0 string \327f\f\245 Syzygy DTZ tablebase
-!:mime application/syzygy
-0 string q\350#] Syzygy WDL tablebase
-!:mime application/syzygy
-
-##############################################################################
-# Grand Theft Auto (GTA) file formats.
-#
-# Summary:
-# Includes GTA-specific formats used in all games from 1997 to present. Games
-# and formats were created by Rockstar North, formerly DMA Design. Magic tests
-# were written based on a combination of official and community documentation.
-#
-# Created by: Oliver Galvin <odg@riseup.net>
-#
-# References:
-# * Classic GTA documentation and research:
-# <https://gitlab.com/classic-gta/gta-data>
-# * Official RenderWare documentation available from EA:
-# <https://github.com/electronicarts/RenderWare3Docs>
-# * Lots of community research in the GTAMods wiki:
-# <https://gtamods.com/wiki>
-
-# GTA 2D-Era data - 'Classic' top down games (1/L/2)
-
-## GTA text
-
-0 string \xbf\xf8\xbd\x49\x62\xbe GTA1 in-game text (FXT),
-0 string GBL GTA2 in-game text (GXT),
->3 string E English,
->>4 uleshort x version %d
->3 string F French,
->>4 uleshort x version %d
->3 string G German,
->>4 uleshort x version %d
->3 string I Italian,
->>4 uleshort x version %d
->3 string S Spanish,
->>4 uleshort x version %d
->3 string J Japanese,
->>4 uleshort x version %d
-
-## GTA maps
-
-0 ulelong 331 GTA1 map layout (CMP),
->4 byte 1 Level 1
->4 byte 2 Level 2
->4 byte 3 Level 3
-0 string GBMP GTA2/GBH map layout (GMP),
->4 uleshort x version %d
-0 string/t [MapFiles] GTA2 multiplayer map metadata (MMP)
-0 string/t MainOrBonus\ =\ MAIN GTA2 single player map listing (test1.seq)
-
-## GTA 2D sprites and textures
-
-0 ulelong 290 GTA1 style data (GRX), 8 bit editor graphics
-0 ulelong 325 GTA1 style data (GRY), 8 bit in-game graphics
-0 ulelong 336 GTA1 style data (G24), 24 bit in-game graphics
-0 string GBST GTA2/GBH style data (STY), in-game graphics,
->4 uleshort x version %d
-
-## GTA audio index
-
-0 ulelong 0
->4 ulelong <0x40000
->>8 ulelong >4500
->>>8 ulelong <45000 GTA audio index data (SDT)
-
-## GTA scripts
-
-0 ulelong 0x00080000
->4 uleshort 0x0024 GTA2 binary main script (SCR)
-
-0 uleshort 0x063c GTA2 binary mission script (SCR), Residential area (ste)
-0 uleshort 0x055b GTA2 binary mission script (SCR), Downtown area (wil)
-0 uleshort 0x0469 GTA2 binary mission script (SCR), Industrial area (bil)
-
-0 string v9.6\0\0 GTA2 replay file (REP),
->8 regex/30c [a-z0-9:\ ]+\0\0 created on %s
-
-# GTA 3D-Era (III/VC/SA/LCS/VCS) - used by the RenderWare engine by Criterion Games
-
-## GTA 3D models and textures - RenderWare binary streams
-
-8 ulelong 0x00000310 RenderWare data, v3.1.0.0, used in GTA III on PS2,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x0401ffff RenderWare data, v3.1.0.1, used in GTA III on PC/PS2,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x0800ffff RenderWare data, v3.2.0.0, used in GTA III on PC,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x0c00ffff RenderWare data, v3.3.0.0,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x0c02ffff RenderWare data, v3.3.0.2, used in GTA III PC and GTA VC PS2,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x1000ffff RenderWare data, v3.4.0.0,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x1003ffff RenderWare data, v3.4.0.3, used in GTA VC PC,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x1005ffff RenderWare data, v3.4.0.5, used in GTA III/VC on Android,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x1400ffff RenderWare data, v3.5.0.0, used in GTA III/VC on Xbox,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-8 ulelong 0x1803ffff RenderWare data, v3.6.0.3, used in GTA SA,
->0 ulelong 0x00000016 texture archive (TXD)
->0 ulelong 0x00000010 3D models (DFF)
-
-0 string COL RenderWare collision data (COL),
->3 string L version 1, used in GTA III/VC/SA
->3 string 2 version 2, used in GTA SA
->3 string 3 version 3, used in GTA SA
->3 string 4 version 4, used in GTA SA
-
-## GTA items and animations
-
-0 string/c #\ ipl\ generated\ from\ max\ file GTA Item Placement data (IPL), used in GTA III/VC
-0 string/b bnry GTA Item Placement data (IPL), used in GTA SA/IV,
->4 ulelong x %d items
-
-0 string ANP GTA animation data (IFP),
->3 string K version 1, used in GTA III/VC
->3 string 3 version 2, used in GTA SA
-
-0 string GtaSA29 GTA Replay data (REP), used in GTA SA
-
-## GTA text
-
-0 string TKEY GTA in-game text (GXT), version 2, used in GTA III
-0 string TABL GTA in-game text (GXT), version 3, used in GTA VC/LS/VCS
-
-## GTA scripts
-
-0 string \x02\x00\x01 GTA script (SCM), used in GTA III/VC/SA
-
-## GTA archives
-
-0 string VER2 GTA archive (IMG), version 2, used in GTA SA,
->4 ulelong x %d items
-
-# GTA HD-Era (IV/V) - used by the Rockstar Advanced Game Engine (RAGE)
-
-## GTA models and textures - RAGE resources
-# Note: GTA IV formats not yet documented - WAD, WBD, WBN, WHM, WPL
-
-0 ulelong 0x00695254 GTA Drawable data (WDR), model and weapon data, used in GTA IV
-0 ulelong 0x00695238 GTA Windows Frag Type (WFT), vehicle models, used in GTA IV
-0 ulelong 0x006953A4 GTA Ped and LOD models (WDD), used in GTA IV
-0 ulelong 0x00695384 GTA Windows Texture Dictionary (WTD), used in GTA IV
-
-## GTA text
-
-4 string TABL GTA in-game text (GXT),
->0 uleshort x version %d, used in GTA SA/IV
-0 string 2GXT GTA in-game text (GXT2), used in GTA V
-
-## GTA scripts
-
-0 ulelong 0x0d524353 GTA script (SCO), unencrypted, used in GTA IV,
->4 ulelong x %d code bytes,
->>8 ulelong x %d static variables,
->>>12 ulelong x %d global variables
-0 ulelong 0x0e726373 GTA script (SCO), encrypted, used in GTA IV
->4 ulelong x %d code bytes,
->>8 ulelong x %d static variables,
->>>12 ulelong x %d global variables
-
-## GTA archives
-
-0 ulelong 0xa94e2a52 GTA archive (IMG),
->4 ulelong x version %d, used in GTA IV,
->>8 ulelong x %d items
-
-# RPF[0-8]
-0 ulelong&0xfffffff0 =0x52504630
->0 ulelong&0xf <9 RAGE Package Format (RPF), version %d, used in
->>0 ulelong&0xf =0 Rockstar Table Tennis,
->>0 ulelong&0xf =1 *unknown*
->>0 ulelong&0xf =2 GTA IV,
->>0 ulelong&0xf =3 GTA IV Audio & Midnight Club: LA,
->>0 ulelong&0xf =4 Max Payne 3,
->>0 ulelong&0xf =5 *unknown*
->>0 ulelong&0xf =6 RDR,
->>0 ulelong&0xf =7 GTA V,
->>0 ulelong&0xf =8 RDR 2,
->>4 ulelong x %d bytes,
->>>8 ulelong x %d entries
-
-# Blitz3D Model File Format
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/minetest/B3DExport/blob/master/B3DExport.py
-0 string BB3D
->4 lelong >0
->>8 lelong >0 Blitz3D Model
-!:ext b3d
->>>8 lelong x \b, version %d
-
-# Minetest Schematic File Format
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/minetest/minetest/blob/5.6.1/src/mapgen/mg_schematic.h
-0 string MTSM Minetest Schematic
-!:ext mts
->4 ubeshort x \b, version %d
->6 ubeshort x \b, size [%d
->8 ubeshort x \b, %d
->10 ubeshort x \b, %d]
-
-# MagicaVoxel File Format
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/ephtracy/voxel-model/blob/ee2216c28a78ebb68691dc6cfa9c4ba429117ea2/MagicaVoxel-file-format-vox.txt
-# Note: This format is used in Veloren voxel RPG.
-0 string VOX\x20
->4 lelong >0 MagicaVoxel model
-!:ext vox
->>4 lelong x \b, version %d
-
-# Wwise SoundBank
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://wiki.xentax.com/index.php/Wwise_SoundBank_(*.bnk)
-0 string BKHD
-# Little-endian version (such as x86 PC)
->4 ulelong <0x100 Wwise SoundBank (little-endian)
-!:ext bnk
->>0 use wwise_bkhd
-# Big-endian version (such as PS3)
->4 ubelong <0x100 Wwise SoundBank (big-endian)
-!:ext bnk
->>0 use \^wwise_bkhd
-
-0 name wwise_bkhd
->8 ulelong x \b, version %d
->12 ulelong x \b, id %08X
->16 ulelong =0x00 \b, SFX
->16 ulelong =0x01 \b, arabic
->16 ulelong =0x02 \b, bulgarian
->16 ulelong =0x03 \b, chinese (HK)
->16 ulelong =0x04 \b, chinese (PRC)
->16 ulelong =0x05 \b, chinese (Taiwan)
->16 ulelong =0x06 \b, czech
->16 ulelong =0x07 \b, danish
->16 ulelong =0x08 \b, dutch
->16 ulelong =0x09 \b, english (Australia)
->16 ulelong =0x0A \b, english (India)
->16 ulelong =0x0B \b, english (UK)
->16 ulelong =0x0C \b, english (US)
->16 ulelong =0x0D \b, finnish
->16 ulelong =0x0E \b, french (Canada)
->16 ulelong =0x0F \b, french (France)
->16 ulelong =0x10 \b, german
->16 ulelong =0x11 \b, greek
->16 ulelong =0x12 \b, hebrew
->16 ulelong =0x13 \b, hungarian
->16 ulelong =0x14 \b, indonesian
->16 ulelong =0x15 \b, italian
->16 ulelong =0x16 \b, japanese
->16 ulelong =0x17 \b, korean
->16 ulelong =0x18 \b, latin
->16 ulelong =0x19 \b, norwegian
->16 ulelong =0x1A \b, polish
->16 ulelong =0x1B \b, portuguese (Brazil)
->16 ulelong =0x1C \b, portuguese (Portugal)
->16 ulelong =0x1D \b, romanian
->16 ulelong =0x1E \b, russian
->16 ulelong =0x1F \b, slovenian
->16 ulelong =0x20 \b, spanish (Mexico)
->16 ulelong =0x21 \b, spanish (Spain)
->16 ulelong =0x22 \b, spanish (US)
->16 ulelong =0x23 \b, swedish
->16 ulelong =0x24 \b, turkish
->16 ulelong =0x25 \b, ukrainian
->16 ulelong =0x26 \b, vietnamese
-
-# Wwise Audio Package
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://wiki.xentax.com/index.php/Wwise_Audio_PCK
-0 string AKPK
-# Little-endian version (such as x86 PC)
->8 ulelong <0x100 Wwise Audio Package (little-endian)
-!:ext pck
-# Big-endian version (such as PS3)
->8 ubelong <0x100 Wwise Audio Package (big-endian)
-!:ext pck
diff --git a/contrib/libs/libmagic/magic/Magdir/gcc b/contrib/libs/libmagic/magic/Magdir/gcc
deleted file mode 100644
index ae98dc7dbc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gcc
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gcc,v 1.5 2016/07/01 23:31:13 christos Exp $
-# gcc: file(1) magic for GCC special files
-#
-0 string gpch GCC precompiled header
-
-# The version field is annoying. It's 3 characters, not zero-terminated.
->5 byte x (version %c
->6 byte x \b%c
->7 byte x \b%c)
-
-# 67 = 'C', 111 = 'o', 43 = '+', 79 = 'O'
->4 byte 67 for C
->4 byte 111 for Objective-C
->4 byte 43 for C++
->4 byte 79 for Objective-C++
diff --git a/contrib/libs/libmagic/magic/Magdir/gconv b/contrib/libs/libmagic/magic/Magdir/gconv
deleted file mode 100644
index eec5ddcd7a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gconv
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gconv
-# gconv: file(1) magic for iconv/gconv module configuration cache
-#
-# Magic number defined in glibc/iconv/iconvconfig.h as GCONVCACHE_MAGIC
-#
-# From: Marek Cermak <macermak@redhat.com>
-#
-0 lelong 0x20010324 gconv module configuration cache data
diff --git a/contrib/libs/libmagic/magic/Magdir/gentoo b/contrib/libs/libmagic/magic/Magdir/gentoo
deleted file mode 100644
index f988047ad4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gentoo
+++ /dev/null
@@ -1,85 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: gentoo,v 1.5 2022/12/26 17:16:55 christos Exp $
-# gentoo: file(1) magic for gentoo specific formats
-#
-# Summary: Gentoo ebuild Manifest files (GLEP 74)
-# Reference: https://www.gentoo.org/glep/glep-0074.html
-# Submitted by: Michal Gorny <mgorny@gentoo.org>
-# Start by doing a fast check for the most common tags.
-0 string AUX
->0 use gentoo-manifest
-0 string DATA
->0 use gentoo-manifest
-0 string DIST
->0 use gentoo-manifest
-0 string EBUILD
->0 use gentoo-manifest
-0 string MANIFEST
->0 use gentoo-manifest
-
-# Manifest can be PGP-signed.
-0 string -----BEGIN\040PGP\040SIGNED\040MESSAGE-----
->34 search/32 \n\n
->>&0 string AUX
->>>&0 use gentoo-manifest
->>&0 string DATA
->>>&0 use gentoo-manifest
->>&0 string DIST
->>>&0 use gentoo-manifest
->>&0 string EBUILD
->>>&0 use gentoo-manifest
->>&0 string MANIFEST
->>>&0 use gentoo-manifest
-
-# Use a more detailed regex to verify that we were correct.
-# <tag> <filename> <size> <hash-name> <hash-value>...
-# (<tag>'s already been matched prior to calling)
-0 name gentoo-manifest
->&0 regex [[:space:]]+[[:print:]]+[[:space:]]+[[:digit:]]+[[:space:]]+[[:alnum:]]+[[:space:]]+[[:xdigit:]]{32} Gentoo Manifest (GLEP 74)
-!:mime application/vnd.gentoo.manifest
-
-# Summary: Gentoo ebuild and eclass files
-# Reference: https://projects.gentoo.org/pms/8/pms.html
-# Submitted by: Michal Gorny <mgorny@gentoo.org>
-0 search/512 EAPI=
->0 regex .*\n[\040\t]*EAPI=["']? Gentoo ebuild
->>&0 regex [[:alnum:]+_.-]+ \b, EAPI %s
-!:mime application/vnd.gentoo.ebuild
-
-0 search/512 @ECLASS:\040 Gentoo eclass
->&0 string x %s
-!:mime application/vnd.gentoo.eclass
-
-# Summary: Gentoo supplementary package and category metadata files
-# Reference: https://www.gentoo.org/glep/glep-0068.html
-# Submitted by: Michal Gorny <mgorny@gentoo.org>
-0 string \<?xml
->0 search/512 \<catmetadata Gentoo category metadata file
-!:mime application/vnd.gentoo.catmetadata+xml
->0 search/512 \<pkgmetadata Gentoo package metadata file
-!:mime application/vnd.gentoo.pkgmetadata+xml
-
-# Summary: Gentoo GLEP 78 binary package
-# Reference: https://www.gentoo.org/glep/glep-0078.html
-# Note: assumes the strict format
-# Submitted by: Michal Gorny <mgorny@gentoo.org>
-
-# GPKG uses ustar (or ustar-compatible GNU format) that starts with
-# a <directory>/gpkg-1 file
-257 string ustar
->0 search/100 /gpkg-1\0
->>0 regex [^/]+ Gentoo GLEP 78 (GPKG) binary package for "%s"
-!:mime application/vnd.gentoo.gpkg
-!:ext tar
-# the logic below requires the gpkg-1 file to be empty
->>>124 string 00000000000\0
-# determine the compression used by looking at the second member name
->>>>512 search/100 .tar.
->>>>>&0 string gz\0 using gzip compression
->>>>>&0 string bz2\0 using bzip2 compression
->>>>>&0 string lz\0 using lzip compression
->>>>>&0 string lz4\0 using lz4 compression
->>>>>&0 string lzo\0 using lzo compression
->>>>>&0 string xz\0 using xz compression
->>>>>&0 string zst\0 using zstd compression
->>>>(636.o+1024) search/611 .sig\0 \b, signed
diff --git a/contrib/libs/libmagic/magic/Magdir/geo b/contrib/libs/libmagic/magic/Magdir/geo
deleted file mode 100644
index 1fde25e57b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/geo
+++ /dev/null
@@ -1,166 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: geo,v 1.10 2022/10/31 13:22:26 christos Exp $
-# Geo- files from Kurt Schwehr <schwehr@ccom.unh.edu>
-
-######################################################################
-#
-# Acoustic Doppler Current Profilers (ADCP)
-#
-######################################################################
-
-0 beshort 0x7f7f RDI Acoustic Doppler Current Profiler (ADCP)
-
-######################################################################
-#
-# Metadata
-#
-######################################################################
-
-0 string Identification_Information FGDC ASCII metadata
-
-######################################################################
-#
-# Seimsic / Subbottom
-#
-######################################################################
-
-# Knudsen subbottom chirp profiler - Binary File Format: B9
-# KEB D409-03167 V1.75 Huffman
-0 string KEB\ Knudsen seismic KEL binary (KEB) -
->4 regex [-A-Z0-9]+ Software: %s
->>&1 regex V[0-9]+\\.[0-9]+ version %s
-
-######################################################################
-#
-# LIDAR - Laser altimetry or bathy
-#
-######################################################################
-
-
-# Caris LIDAR format for LADS comes as two parts... ascii location file and binary waveform data
-0 string HCA LADS Caris Ascii Format (CAF) bathymetric lidar
->4 regex [0-9]+\\.[0-9]+ version %s
-
-0 string HCB LADS Caris Binary Format (CBF) bathymetric lidar waveform data
->3 byte x version %d .
->4 byte x %d
-
-
-######################################################################
-#
-# MULTIBEAM SONARS https://www.ldeo.columbia.edu/res/pi/MB-System/formatdoc/
-#
-######################################################################
-
-# GeoAcoustics - GeoSwath Plus
-# Update: Joerg Jenderek
-# URL: https://www.mbari.org/products/research-software/mb-system/
-# Reference: http://ccom.unh.edu/sites/default/files/news-and-events/conferences/auv-bootcamp/
-# GS%2B-6063-BB-GS%2B-Broadcast-Raw-Data-File-Format-Command-Specification.pdf
-# Note: All data is written using Intel 80x86 byte ordering (LSB to MSB)
-# raw_header_siz; file header size is 544 bytes
-4 beshort 0x2002
-# GRR: line above is too general as it matches also some Microsoft Event Trace Logs *.ETL
-# skip many (63/753) Microsoft Event Trace Logs (AMSITrace.etl lxcore_kernel.etl NotificationUxBroker.052.etl WindowsBackup.4.etl) with invalid "low" ping header size 0
->6 leshort >0 GeoSwath RDF
-# skip foo samples with invalid "high" spare bytes
-#>>536 ulequad =0 OK_THIS_IS_GeoSwath_RDF
-#!:mime application/octet-stream
-!:mime application/x-geoswath-rdf
-# http://ccom.unh.edu/sites/default/files/news-and-events/conferences/auv-bootcamp/060116342.rdf
-!:ext rdf
-# filename; original file name like: "C:\GS+\Projects\Default\Raw Data Files\060116342.rdf"
->>8 string x "%-.512s"
-# version[8]; recording software version number like: 3.16c
->>527 string x \b, version %-.8s
-# creation; unsigned int file creation time; WHAT time format is this?
->>0 ulelong x \b, creation time %#8.8x
-# raw_ping_header_size; size of ping header in bytes like: 64
->>6 leshort !64 \b, ping header size %d
-# frequency; system frequency in hertz like: 500000
->>520 lelong x \b, frequency %d
-# echo_type; Echosounder type index like: 1
->>524 leshort x \b, echo type %#x
-# file_mode; file mode mask (0x00 bathy & sidescan, 0x80 bathy, 0x40 sidescan, 0x20 seismic)
->>526 ubyte !0 \b, file mode %#2.2x
-# pps_mode; PPS synch mode like: 2
->>535 byte x \b, pps mode %#x
-# char spare[8]; apparently zeroed
->>536 ubequad !0 \b, spare %#16.16llx
-# Ping_number; 1st ping number like: 4944
->>544 lelong x \b, 1st ping number %d
-
-0 string Start:- GeoSwatch auf text file
-
-# Seabeam 2100
-# mbsystem code mb41
-0 string SB2100 SeaBeam 2100 multibeam sonar
-0 string SB2100DR SeaBeam 2100 DR multibeam sonar
-0 string SB2100PR SeaBeam 2100 PR multibeam sonar
-
-# This corresponds to MB-System format 94, L-3/ELAC/SeaBeam XSE vendor
-# format. It is the format of our upgraded SeaBeam 2112 on R/V KNORR.
-0 string $HSF XSE multibeam
-
-# mb121 https://www.saic.com/maritime/gsf/
-8 string GSF-v SAIC generic sensor format (GSF) sonar data,
->&0 regex [0-9]+\\.[0-9]+ version %s
-
-# MGD77 - https://www.ngdc.noaa.gov/mgg/dat/geodas/docs/mgd77.htm
-# mb161
-9 string MGD77 MGD77 Header, Marine Geophysical Data Exchange Format
-
-# MBSystem processing caches the mbinfo output
-1 string Swath\ Data\ File: mbsystem info cache
-
-# Caris John Hughes Clark format
-0 string HDCS Caris multibeam sonar related data
-1 string Start/Stop\ parameter\ header: Caris ASCII project summary
-
-######################################################################
-#
-# Visualization and 3D modeling
-#
-######################################################################
-
-# IVS - IVS3d.com Tagged Data Representation
-0 string %%\ TDR\ 2.0 IVS Fledermaus TDR file
-
-# http://www.ecma-international.org/publications/standards/Ecma-363.htm
-# 3D in PDFs
-0 string U3D ECMA-363, Universal 3D
-
-######################################################################
-#
-# Support files
-#
-######################################################################
-
-# https://midas.psi.ch/elog/
-0 string $@MID@$ elog journal entry
-
-# Geospatial Designs https://www.geospatialdesigns.com/surfer6_format.htm
-0 string DSBB Surfer 6 binary grid file
->4 leshort x \b, %d
->6 leshort x \bx%d
->8 ledouble x \b, minx=%g
->16 ledouble x \b, maxx=%g
->24 ledouble x \b, miny=%g
->32 ledouble x \b, maxy=%g
->40 ledouble x \b, minz=%g
->48 ledouble x \b, maxz=%g
-
-# magic for LAS format files
-# alex myczko <alex@aiei.ch>
-# https://www.asprs.org/wp-content/uploads/2010/12/LAS_1_3_r11.pdf
-0 string LASF LIDAR point data records
->24 byte >0 \b, version %u
->25 byte >0 \b.%u
->26 string >\0 \b, SYSID %s
->58 string >\0 \b, Generating Software %s
-
-# magic for PCD format files
-# alex myczko <alex@aiei.ch>
-# http://pointclouds.org/documentation/tutorials/pcd_file_format.php
-0 string #\ .PCD Point Cloud Data
diff --git a/contrib/libs/libmagic/magic/Magdir/geos b/contrib/libs/libmagic/magic/Magdir/geos
deleted file mode 100644
index 66c2bd1a29..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/geos
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: geos,v 1.4 2009/09/19 16:28:09 christos Exp $
-# GEOS files (Vidar Madsen, vidar@gimp.org)
-# semi-commonly used in embedded and handheld systems.
-0 belong 0xc745c153 GEOS
->40 byte 1 executable
->40 byte 2 VMFile
->40 byte 3 binary
->40 byte 4 directory label
->40 byte <1 unknown
->40 byte >4 unknown
->4 string >\0 \b, name "%s"
-#>44 short x \b, version %d
-#>46 short x \b.%d
-#>48 short x \b, rev %d
-#>50 short x \b.%d
-#>52 short x \b, proto %d
-#>54 short x \br%d
-#>168 string >\0 \b, copyright "%s"
diff --git a/contrib/libs/libmagic/magic/Magdir/gimp b/contrib/libs/libmagic/magic/Magdir/gimp
deleted file mode 100644
index e763cbec83..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gimp
+++ /dev/null
@@ -1,77 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gimp,v 1.10 2019/10/15 18:19:40 christos Exp $
-# GIMP Gradient: file(1) magic for the GIMP's gradient data files (.ggr)
-# by Federico Mena <federico@nuclecu.unam.mx>
-
-0 string/t GIMP\ Gradient GIMP gradient data
-#!:mime text/plain
-!:mime text/x-gimp-ggr
-!:ext ggr
-
-# GIMP palette (.gpl)
-# From: Markus Heidelberg <markus.heidelberg@web.de>
-0 string/t GIMP\ Palette GIMP palette data
-# URL: https://docs.gimp.org/en/gimp-concepts-palettes.html
-# Reference: http://fileformats.archiveteam.org/wiki/GIMP_Palette
-#!:mime text/plain
-!:mime text/x-gimp-gpl
-!:ext gpl
-
-#------------------------------------------------------------------------------
-# XCF: file(1) magic for the XCF image format used in the GIMP (.xcf) developed
-# by Spencer Kimball and Peter Mattis
-# ('Bucky' LaDieu, nega@vt.edu)
-
-# URL: https://en.wikipedia.org/wiki/XCF_(file_format)
-# Reference: https://gitlab.gnome.org/GNOME/gimp/blob/master/devel-docs/xcf.txt
-0 string gimp\ xcf GIMP XCF image data,
-!:mime image/x-xcf
-!:ext xcf
->9 string file version 0,
->9 string v version
->>10 string >\0 %s,
->14 belong x %u x
->18 belong x %u,
->22 belong 0 RGB Color
->22 belong 1 Greyscale
->22 belong 2 Indexed Color
->22 belong >2 Unknown Image Type.
-
-#------------------------------------------------------------------------------
-# XCF: file(1) magic for the patterns used in the GIMP (.pat), developed
-# by Spencer Kimball and Peter Mattis
-# ('Bucky' LaDieu, nega@vt.edu)
-
-# Reference: http://fileformats.archiveteam.org/wiki/GIMP_Pattern
-20 string GPAT GIMP pattern data,
->24 string x %s
-!:mime image/x-gimp-pat
-!:ext pat
-
-#------------------------------------------------------------------------------
-# XCF: file(1) magic for the brushes used in the GIMP (.gbr), developed
-# by Spencer Kimball and Peter Mattis
-# ('Bucky' LaDieu, nega@vt.edu)
-
-20 string GIMP GIMP brush data
-# Reference: http://fileformats.archiveteam.org/wiki/GIMP_Brush
-!:mime image/x-gimp-gbr
-# some sources also list gpb
-!:ext gbr
-
-# From: Joerg Jenderek
-# URL: https://docs.gimp.org/en/gimp-using-animated-brushes.html
-# Reference: http://fileformats.archiveteam.org/wiki/GIMP_Animated_Brush
-# share\gimp\2.0\brushes\Legacy\confetti.gih
-0 search/21/b \040ncells: GIMP animated brush data
-!:mime image/x-gimp-gih
-!:ext gih
-
-# GIMP Curves File
-# From: "Nelson A. de Oliveira" <naoliv@gmail.com>
-0 string #\040GIMP\040Curves\040File GIMP curve file
-#!:mime text/plain
-!:mime text/x-gimp-curve
-!:ext /txt
-
diff --git a/contrib/libs/libmagic/magic/Magdir/git b/contrib/libs/libmagic/magic/Magdir/git
deleted file mode 100644
index 67eab32a66..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/git
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: git,v 1.2 2020/08/09 16:57:15 christos Exp $
-# git: file(1) magic for Git objects
-
-0 string blob\040
->5 regex [0-9a-f]+ Git blob %s
-
-0 string tree\040
->5 regex [0-9a-f]+ Git tree %s
-
-0 string commit\040
->7 regex [0-9a-f]+ Git commit %s
diff --git a/contrib/libs/libmagic/magic/Magdir/glibc b/contrib/libs/libmagic/magic/Magdir/glibc
deleted file mode 100644
index 3b856f3836..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/glibc
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: glibc,v 1.1 2018/10/11 15:35:43 christos Exp $
-# glibc locale files
-#
-# https://sourceware.org/git/?p=glibc.git;f=locale/localeinfo.h;h=68822a63#l32
-
-0 belong 0x20070920 glibc locale file LC_CTYPE
-0 belong 0x14110320 glibc locale file LC_NUMERIC
-0 belong 0x17110320 glibc locale file LC_TIME
-0 belong 0x17100520 glibc locale file LC_COLLATE
-0 belong 0x11110320 glibc locale file LC_MONETARY
-0 belong 0x10110320 glibc locale file LC_MESSAGES
-0 belong 0x13110320 glibc locale file LC_ALL
-0 belong 0x12110320 glibc locale file LC_PAPER
-0 belong 0x1d110320 glibc locale file LC_NAME
-0 belong 0x1c110320 glibc locale file LC_ADDRESS
-0 belong 0x1f110320 glibc locale file LC_TELEPHONE
-0 belong 0x1e110320 glibc locale file LC_MEASUREMENT
-0 belong 0x19110320 glibc locale file LC_IDENTIFICATION
-
diff --git a/contrib/libs/libmagic/magic/Magdir/gnome b/contrib/libs/libmagic/magic/Magdir/gnome
deleted file mode 100644
index 7a45d1d586..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gnome
+++ /dev/null
@@ -1,59 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gnome,v 1.7 2020/06/23 16:17:08 christos Exp $
-# GNOME related files
-
-# Contributed by Josh Triplett
-# FIXME: Could be simplified if pstring supported two-byte counts
-0 string GnomeKeyring\n\r\0\n GNOME keyring
->&0 ubyte 0 \b, major version 0
->>&0 ubyte 0 \b, minor version 0
->>>&0 ubyte 0 \b, crypto type 0 (AES)
->>>&0 ubyte >0 \b, crypto type %u (unknown)
->>>&1 ubyte 0 \b, hash type 0 (MD5)
->>>&1 ubyte >0 \b, hash type %u (unknown)
->>>&2 ubelong 0xFFFFFFFF \b, name NULL
->>>&2 ubelong !0xFFFFFFFF
->>>>&-4 ubelong >255 \b, name too long for file's pstring type
->>>>&-4 ubelong <256
->>>>>&-1 pstring x \b, name "%s"
->>>>>>&0 ubeqdate x \b, last modified %s
->>>>>>&8 ubeqdate x \b, created %s
->>>>>>&16 ubelong &1
->>>>>>>&0 ubelong x \b, locked if idle for %u seconds
->>>>>>&16 ubelong ^1 \b, not locked if idle
->>>>>>&24 ubelong x \b, hash iterations %u
->>>>>>&28 ubequad x \b, salt %llu
->>>>>>&52 ubelong x \b, %u item(s)
-
-# From: Alex Beregszaszi <alex@fsn.hu>
-4 string gtktalog GNOME Catalogue (gtktalog)
->13 string >\0 version %s
-
-# Summary: GStreamer binary registry
-# Extension: .bin
-# Submitted by: Josh Triplett <josh@joshtriplett.org>
-0 belong 0xc0def00d GStreamer binary registry
->4 string x \b, version %s
-
-# GVariant Database file
-# By Elan Ruusamae <glen@delfi.ee>
-# https://github.com/GNOME/gvdb/blob/master/gvdb-format.h
-# It's always "GVariant", it's byte swapped on incompatible archs
-# See https://github.com/GNOME/gvdb/blob/master/gvdb-builder.c
-# file_builder_serialise()
-# https://developer.gnome.org/glib/2.34/glib-GVariant.html#GVariant
-0 string GVariant GVariant Database file,
-# version is never filled. probably future extension
->8 lelong x version %d
-# not sure are these usable, so commented out
-#>>16 lelong x start %d,
-#>>>20 lelong x end %d
-
-# G-IR database made by gobject-introspect toolset,
-# https://live.gnome.org/GObjectIntrospection
-0 string GOBJ\nMETADATA\r\n\032 G-IR binary database
->16 byte x \b, v%d
->17 byte x \b.%d
->20 short x \b, %d entries
->22 short x \b/%d local
diff --git a/contrib/libs/libmagic/magic/Magdir/gnu b/contrib/libs/libmagic/magic/Magdir/gnu
deleted file mode 100644
index 761d657c4e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gnu
+++ /dev/null
@@ -1,173 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gnu,v 1.24 2021/04/26 15:56:00 christos Exp $
-# gnu: file(1) magic for various GNU tools
-#
-# GNU nlsutils message catalog file format
-#
-# GNU message catalog (.mo and .gmo files)
-
-# Update: Joerg Jenderek
-# URL: https://www.gnu.org/software/gettext/manual/html_node/MO-Files.html
-# Reference: ftp://ftp.gnu.org/pub/gnu/gettext/gettext-0.19.8.tar.gz/
-# gettext-0.19.8.1/gettext-runtime/intl/gmo.h
-# Note: maybe call it like "GNU translation gettext machine object"
-0 string \336\22\4\225 GNU message catalog (little endian),
-#0 ulelong 0x950412DE GNU-format message catalog data
-# TODO: write lines in such a way that code can also be called for big endian variant
-#>0 use gettext-object
-#0 name gettext-object
->4 ulelong x revision
-!:mime application/x-gettext-translation
-# mo extension is also used for Easeus Partition Master PE32 executable module
-# like ConvertFatToNTFS.mo
-!:ext gmo/mo
-# only found three revision combinations 0.0 0.1 1.1 as unsigned 32-bit
-# major revision
->4 ulelong/0xFFff x %u.
-# minor revision
->4 ulelong&0x0000FFff x \b%u
->>8 ulelong x \b, %u message
-# plural s
->>8 ulelong >1 \bs
-# size of hashing table
-#>20 ulelong x \b, %u hash
-#>20 ulelong >1 \bes
-#>24 ulelong x at %#x
-# for revision x.0 offset of table with originals is 1Ch if directly after header
->4 ulelong&0x0000FFff =0
->>12 ulelong !0x1C \b, at %#x string table
-# but for x.1 table offset i found is 30h. That means directly after bigger header
->4 ulelong&0x0000FFff >0
->>12 ulelong !0x30 \b, at %#x string table
-# The following variables are only used in .mo files with minor revision >= 1
-# number of system dependent segments
-#>>28 ulelong x \b, %u segment
-#>>28 ulelong >1 \bs
-# offset of table describing system dependent segments
-#>>32 ulelong x at %#x
-# number of system dependent strings pairs
->>36 ulelong x \b, %u sysdep message
->>36 ulelong >1 \bs
-# offset of table with start offsets of original sysdep strings
-#>>40 ulelong x \b, at %#x sysdep strings
-# offset of table with start offsets of translated sysdep strings
-#>>44 ulelong x \b, at %#x sysdep translations
-# >>(44.l) ulelong x %#x chars
-# >>>&0 ulelong x at %#x
-# >>>>(&-4) string x "%s"
-# string table after big header
-#>>48 ubequad x \b, string table %#llx
-#
-# 0th string length seems to be always 0
-#>(12.l) ulelong x \b, %u chars
-#>>&0 ulelong x at %#x
-# if 1st string length positive inspect offset and string
-#>(12.l+8) ulelong >0 \b, %u chars
-#>>&0 ulelong x at %#x
-# if 2nd string length positive inspect offset and string
-# >(12.l+16) ulelong >0 \b, %u chars
-# >>&0 ulelong x at %#x
-# skip newline byte
-#>>>(&-4) ubyte =0x0A
-#>>>>&0 string x "%s"
-#>>>(&-4) ubyte !0x0A
-#>>>>&-1 string x '%s'
-# offset of table with translation strings
-#>16 ulelong x \b, at %#x translation table
-# check translation 0 length and offset
->(16.l) ulelong >0
->>&0 ulelong x
-# translation 0 seems to be often Project-Id with name and version
->>>(&-4) string x \b, %s
-# trans. 1 with bytes >= 1 unlike icoutils-0.31.0\po\en@boldquot.gmo with 1 NL
->(16.l+8) ulelong >1
->>&0 ulelong x
->>>(&-4) ubyte !0x0A
->>>>&-1 string x '%s'
-# 1 New Line like in tar-1.29\po\de.gmo
->>>(&-4) ubyte =0x0A
->>>>&0 ubyte !0x0A
->>>>>&-1 string x '%s'
-# 2nd New Line like in parted-3.1\po\de.gmo
->>>>&0 ubyte =0x0A
->>>>>&0 string x '%s'
-
-0 string \225\4\22\336 GNU message catalog (big endian),
-#0 ubelong 0x950412DE GNU-format message catalog data
-!:mime application/x-gettext-translation
-!:ext gmo/mo
-# TODO: for big endian use same code as for little endian
-#>0 use \^gettext-object
-# DEBUG code
-#>16 ubelong x \b, at %#x translation table
-#>(16.L) ubelong x %#x chars
-#>>&0 ubelong x at %#x
-# unexpected value HERE!
-#>>>(&-4) ubequad x %#llx
-#
->4 beshort x revision %d.
->6 beshort >0 \b%d,
->>8 belong x %d messages,
->>36 belong x %d sysdep messages
->6 beshort =0 \b%d,
->>8 belong x %d messages
-
-
-# GnuPG
-# The format is very similar to pgp
-0 string \001gpg GPG key trust database
->4 byte x version %d
-# Note: magic.mime had 0x8501 for the next line instead of 0x8502
-0 beshort 0x8502 GPG encrypted data
-!:mime text/PGP # encoding: data
-
-# Update: Joerg Jenderek
-# Note: PGP and GPG use same data structure.
-# So recognition is now done by ./pgp with start test for byte 0x99
-# This magic is not particularly good, as the keyrings don't have true
-# magic. Nevertheless, it covers many keyrings.
-# 0 ubeshort-0x9901 <2
-# >3 byte 4
-# >>4 bedate x GPG key public ring, created %s
-# !:mime application/x-gnupg-keyring
-
-# Symmetric encryption
-0 leshort 0x0d8c
->4 leshort 0x0203
->>2 leshort 0x0204 GPG symmetrically encrypted data (3DES cipher)
->>2 leshort 0x0304 GPG symmetrically encrypted data (CAST5 cipher)
->>2 leshort 0x0404 GPG symmetrically encrypted data (BLOWFISH cipher)
->>2 leshort 0x0704 GPG symmetrically encrypted data (AES cipher)
->>2 leshort 0x0804 GPG symmetrically encrypted data (AES192 cipher)
->>2 leshort 0x0904 GPG symmetrically encrypted data (AES256 cipher)
->>2 leshort 0x0a04 GPG symmetrically encrypted data (TWOFISH cipher)
->>2 leshort 0x0b04 GPG symmetrically encrypted data (CAMELLIA128 cipher)
->>2 leshort 0x0c04 GPG symmetrically encrypted data (CAMELLIA192 cipher)
->>2 leshort 0x0d04 GPG symmetrically encrypted data (CAMELLIA256 cipher)
-
-
-# GnuPG Keybox file
-# <https://git.gnupg.org/cgi-bin/gitweb.cgi?p=gnupg.git;a=blob;f=kbx/keybox-blob.c;hb=HEAD>
-# From: Philipp Hahn <hahn@univention.de>
-0 belong 32
->4 byte 1
->>8 string KBXf GPG keybox database
->>>5 byte 1 version %d
->>>16 bedate x \b, created-at %s
->>>20 bedate x \b, last-maintained %s
-
-
-# From: James Youngman <jay@gnu.org>
-# gnu find magic
-0 string \0LOCATE GNU findutils locate database data
->7 string >\0 \b, format %s
->7 string 02 \b (frcode)
-
-# Files produced by GNU gettext
-
-# gettext message catalogue
-0 search/1024 \nmsgid
->&0 search/1024 \nmsgstr GNU gettext message catalogue text
-!:strength +100
-!:mime text/x-po
diff --git a/contrib/libs/libmagic/magic/Magdir/gnumeric b/contrib/libs/libmagic/magic/Magdir/gnumeric
deleted file mode 100644
index 928ad3eed1..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gnumeric
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gnumeric,v 1.4 2009/09/19 16:28:09 christos Exp $
-# gnumeric: file(1) magic for Gnumeric spreadsheet
-# This entry is only semi-helpful, as Gnumeric compresses its files, so
-# they will ordinarily reported as "compressed", but at least -z helps
-39 string =<gmr:Workbook Gnumeric spreadsheet
-!:mime application/x-gnumeric
diff --git a/contrib/libs/libmagic/magic/Magdir/gpt b/contrib/libs/libmagic/magic/Magdir/gpt
deleted file mode 100644
index c2fd51c0dc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gpt
+++ /dev/null
@@ -1,240 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gpt,v 1.5 2020/12/12 20:01:47 christos Exp $
-#
-# GPT Partition table patterns.
-# Author: Rogier Goossens (goossens.rogier@gmail.com)
-# Note that a GPT-formatted disk must contain an MBR as well.
-#
-
-# The initial segment (up to >>>>>>>>422) was copied from the X86
-# partition table code (aka MBR).
-# This is kept separate, so that MBR partitions are not reported as well.
-# (use -k if you do want them as well)
-
-# First, detect the MBR partition table
-# If more than one GPT protective MBR partition exists, don't print anything
-# (the other MBR detection code will then just print the MBR partition table)
-0x1FE leshort 0xAA55
->3 string !MS
->>3 string !SYSLINUX
->>>3 string !MTOOL
->>>>3 string !NEWLDR
->>>>>5 string !DOS
-# not FAT (32 bit)
->>>>>>82 string !FAT32
-#not Linux kernel
->>>>>>>514 string !HdrS
-#not BeOS
->>>>>>>>422 string !Be\ Boot\ Loader
-# GPT with protective MBR entry in partition 1 (only)
->>>>>>>>>450 ubyte 0xee
->>>>>>>>>>466 ubyte !0xee
->>>>>>>>>>>482 ubyte !0xee
->>>>>>>>>>>>498 ubyte !0xee
-#>>>>>>>>>>>>>446 use gpt-mbr-partition
->>>>>>>>>>>>>(454.l*8192) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>0 ubyte x of 8192 bytes
->>>>>>>>>>>>>(454.l*8192) string !EFI\ PART
->>>>>>>>>>>>>>(454.l*4096) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>0 ubyte x of 4096 bytes
->>>>>>>>>>>>>>(454.l*4096) string !EFI\ PART
->>>>>>>>>>>>>>>(454.l*2048) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>0 ubyte x of 2048 bytes
->>>>>>>>>>>>>>>(454.l*2048) string !EFI\ PART
->>>>>>>>>>>>>>>>(454.l*1024) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>0 ubyte x of 1024 bytes
->>>>>>>>>>>>>>>>(454.l*1024) string !EFI\ PART
->>>>>>>>>>>>>>>>>(454.l*512) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>>0 ubyte x of 512 bytes
-# GPT with protective MBR entry in partition 2 (only)
->>>>>>>>>450 ubyte !0xee
->>>>>>>>>>466 ubyte 0xee
->>>>>>>>>>>482 ubyte !0xee
->>>>>>>>>>>>498 ubyte !0xee
-#>>>>>>>>>>>>>462 use gpt-mbr-partition
->>>>>>>>>>>>>(470.l*8192) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>0 ubyte x of 8192 bytes
->>>>>>>>>>>>>(470.l*8192) string !EFI\ PART
->>>>>>>>>>>>>>(470.l*4096) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>0 ubyte x of 4096 bytes
->>>>>>>>>>>>>>(470.l*4096) string !EFI\ PART
->>>>>>>>>>>>>>>(470.l*2048) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>0 ubyte x of 2048 bytes
->>>>>>>>>>>>>>>(470.l*2048) string !EFI\ PART
->>>>>>>>>>>>>>>>(470.l*1024) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>0 ubyte x of 1024 bytes
->>>>>>>>>>>>>>>>(470.l*1024) string !EFI\ PART
->>>>>>>>>>>>>>>>>(470.l*512) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>>0 ubyte x of 512 bytes
-# GPT with protective MBR entry in partition 3 (only)
->>>>>>>>>450 ubyte !0xee
->>>>>>>>>>466 ubyte !0xee
->>>>>>>>>>>482 ubyte 0xee
->>>>>>>>>>>>498 ubyte !0xee
-#>>>>>>>>>>>>>478 use gpt-mbr-partition
->>>>>>>>>>>>>(486.l*8192) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>0 ubyte x of 8192 bytes
->>>>>>>>>>>>>(486.l*8192) string !EFI\ PART
->>>>>>>>>>>>>>(486.l*4096) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>0 ubyte x of 4096 bytes
->>>>>>>>>>>>>>(486.l*4096) string !EFI\ PART
->>>>>>>>>>>>>>>(486.l*2048) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>0 ubyte x of 2048 bytes
->>>>>>>>>>>>>>>(486.l*2048) string !EFI\ PART
->>>>>>>>>>>>>>>>(486.l*1024) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>0 ubyte x of 1024 bytes
->>>>>>>>>>>>>>>>(486.l*1024) string !EFI\ PART
->>>>>>>>>>>>>>>>>(486.l*512) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>>0 ubyte x of 512 bytes
-# GPT with protective MBR entry in partition 4 (only)
->>>>>>>>>450 ubyte !0xee
->>>>>>>>>>466 ubyte !0xee
->>>>>>>>>>>482 ubyte !0xee
->>>>>>>>>>>>498 ubyte 0xee
-#>>>>>>>>>>>>>494 use gpt-mbr-partition
->>>>>>>>>>>>>(502.l*8192) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>0 ubyte x of 8192 bytes
->>>>>>>>>>>>>(502.l*8192) string !EFI\ PART
->>>>>>>>>>>>>>(502.l*4096) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>0 ubyte x of 4096 bytes
->>>>>>>>>>>>>>(502.l*4096) string !EFI\ PART
->>>>>>>>>>>>>>>(502.l*2048) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>0 ubyte x of 2048 bytes
->>>>>>>>>>>>>>>(502.l*2048) string !EFI\ PART
->>>>>>>>>>>>>>>>(502.l*1024) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>0 ubyte x of 1024 bytes
->>>>>>>>>>>>>>>>(502.l*1024) string !EFI\ PART
->>>>>>>>>>>>>>>>>(502.l*512) string EFI\ PART GPT partition table
->>>>>>>>>>>>>>>>>>0 use gpt-mbr-type
->>>>>>>>>>>>>>>>>>&-8 use gpt-table
->>>>>>>>>>>>>>>>>>0 ubyte x of 512 bytes
-
-# The following code does GPT detection and processing, including
-# sector size detection.
-# It has to be duplicated above because the top-level pattern
-# (i.e. not called using 'use') must print *something* for file
-# to count it as a match. Text only printed in named patterns is
-# not counted, and causes file to continue, and try and match
-# other patterns.
-#
-# Unfortunately, when assuming sector sizes >=16k, if the sector size
-# happens to be 512 instead, we may find confusing data after the GPT
-# table... If the GPT table has less than 128 entries, this may even
-# happen for assumed sector sizes as small as 4k
-# This could be solved by checking for the presence of the backup GPT
-# header as well, but that makes the logic extremely complex
-##0 name gpt-mbr-partition
-##>(8.l*8192) string EFI\ PART
-##>>(8.l*8192) use gpt-mbr-type
-##>>&-8 use gpt-table
-##>>0 ubyte x of 8192 bytes
-##>(8.l*8192) string !EFI\ PART
-##>>(8.l*4096) string EFI\ PART GPT partition table
-##>>>0 use gpt-mbr-type
-##>>>&-8 use gpt-table
-##>>>0 ubyte x of 4096 bytes
-##>>(8.l*4096) string !EFI\ PART
-##>>>(8.l*2048) string EFI\ PART GPT partition table
-##>>>>0 use gpt-mbr-type
-##>>>>&-8 use gpt-table
-##>>>>0 ubyte x of 2048 bytes
-##>>>(8.l*2048) string !EFI\ PART
-##>>>>(8.l*1024) string EFI\ PART GPT partition table
-##>>>>>0 use gpt-mbr-type
-##>>>>>&-8 use gpt-table
-##>>>>>0 ubyte x of 1024 bytes
-##>>>>(8.l*1024) string !EFI\ PART
-##>>>>>(8.l*512) string EFI\ PART GPT partition table
-##>>>>>>0 use gpt-mbr-type
-##>>>>>>&-8 use gpt-table
-##>>>>>>0 ubyte x of 512 bytes
-
-# Print details of MBR type for a GPT-disk
-# Calling code ensures that there is only one 0xee partition.
-0 name gpt-mbr-type
-# GPT with protective MBR entry in partition 1
->450 ubyte 0xee
->>454 ulelong 1
->>>462 string !\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \b (with hybrid MBR)
->>454 ulelong !1 \b (nonstandard: not at LBA 1)
-# GPT with protective MBR entry in partition 2
->466 ubyte 0xee
->>470 ulelong 1
->>>478 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->>>>446 string !\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \b (with hybrid MBR)
->>>478 string !\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \b (with hybrid MBR)
->>470 ulelong !1 \b (nonstandard: not at LBA 1)
-# GPT with protective MBR entry in partition 3
->482 ubyte 0xee
->>486 ulelong 1
->>>494 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->>>>446 string !\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \b (with hybrid MBR)
->>>494 string !\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \b (with hybrid MBR)
->>486 ulelong !1 \b (nonstandard: not at LBA 1)
-# GPT with protective MBR entry in partition 4
->498 ubyte 0xee
->>502 ulelong 1
->>>446 string !\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0 \b (with hybrid MBR)
->>502 ulelong !1 \b (nonstandard: not at LBA 1)
-
-# Print the information from a GPT partition table structure
-0 name gpt-table
->10 uleshort x \b, version %u
->8 uleshort x \b.%u
->56 ulelong x \b, GUID: %08x
->60 uleshort x \b-%04x
->62 uleshort x \b-%04x
->64 ubeshort x \b-%04x
->66 ubeshort x \b-%04x
->68 ubelong x \b%08x
-#>80 uleshort x \b, %d partition entries
->32 ulequad+1 x \b, disk size: %lld sectors
-
-# In case a GPT data-structure is at LBA 0, report it as well
-# This covers systems which are not GPT-aware, and which show
-# and allow access to the protective partition. This code will
-# detect the contents of such a partition.
-0 string EFI\ PART GPT data structure (nonstandard: at LBA 0)
->0 use gpt-table
->0 ubyte x (sector size unknown)
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/gpu b/contrib/libs/libmagic/magic/Magdir/gpu
deleted file mode 100644
index 36d712443b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gpu
+++ /dev/null
@@ -1,28 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gpu,v 1.3 2021/04/26 15:56:00 christos Exp $
-# gpu: file(1) magic for GPU input files
-
-# Standard Portable Intermediate Representation (SPIR)
-# Documentation: https://www.khronos.org/spir
-# Typical file extension: .spv
-
-0 belong 0x07230203 Khronos SPIR-V binary, big-endian
->4 belong x \b, version %#08x
->8 belong x \b, generator %#08x
-
-0 lelong 0x07230203 Khronos SPIR-V binary, little-endian
->4 lelong x \b, version %#08x
->8 lelong x \b, generator %#08x
-
-# Vulkan Trace file
-# Documentation:
-# https://github.com/LunarG/VulkanTools/blob/master/vktrace/vktrace_common/\
-# vktrace_trace_packet_identifiers.h
-# Typical file extension: .vktrace
-
-8 lequad 0xABADD068ADEAFD0C Vulkan trace file, little-endian
->0 leshort x \b, version %d
-
-8 bequad 0xABADD068ADEAFD0C Vulkan trace file, big-endian
->0 beshort x \b, version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/grace b/contrib/libs/libmagic/magic/Magdir/grace
deleted file mode 100644
index 25bd759edc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/grace
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: grace,v 1.4 2009/09/19 16:28:09 christos Exp $
-# ACE/gr and Grace type files - PLEASE DO NOT REMOVE THIS LINE
-#
-# ACE/gr binary
-0 string \000\000\0001\000\000\0000\000\000\0000\000\000\0002\000\000\0000\000\000\0000\000\000\0003 old ACE/gr binary file
->39 byte >0 - version %c
-# ACE/gr ascii
-0 string #\ xvgr\ parameter\ file ACE/gr ascii file
-0 string #\ xmgr\ parameter\ file ACE/gr ascii file
-0 string #\ ACE/gr\ parameter\ file ACE/gr ascii file
-# Grace projects
-0 string #\ Grace\ project\ file Grace project file
->23 string @version\ (version
->>32 byte >0 %c
->>33 string >\0 \b.%.2s
->>35 string >\0 \b.%.2s)
-# ACE/gr fit description files
-0 string #\ ACE/gr\ fit\ description\ ACE/gr fit description file
-# end of ACE/gr and Grace type files - PLEASE DO NOT REMOVE THIS LINE
diff --git a/contrib/libs/libmagic/magic/Magdir/graphviz b/contrib/libs/libmagic/magic/Magdir/graphviz
deleted file mode 100644
index d8bf22db43..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/graphviz
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: graphviz,v 1.9 2019/04/30 04:01:40 christos Exp $
-# graphviz: file(1) magic for https://www.graphviz.org/
-
-# FIXME: These patterns match too generally. For example, the first
-# line matches a LaTeX file containing the word "graph" (with a {
-# following later) and the second line matches this file.
-#0 regex/100l [\r\n\t\ ]*graph[\r\n\t\ ]+.*\\{ graphviz graph text
-#!:mime text/vnd.graphviz
-#0 regex/100l [\r\n\t\ ]*digraph[\r\n\t\ ]+.*\\{ graphviz digraph text
-#!:mime text/vnd.graphviz
diff --git a/contrib/libs/libmagic/magic/Magdir/gringotts b/contrib/libs/libmagic/magic/Magdir/gringotts
deleted file mode 100644
index b67475406a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/gringotts
+++ /dev/null
@@ -1,48 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: gringotts,v 1.6 2017/03/17 21:35:28 christos Exp $
-# gringotts: file(1) magic for Gringotts
-# http://devel.pluto.linux.it/projects/Gringotts/
-# author: Germano Rizzo <mano@pluto.linux.it>
-#GRG3????Y
-0 string GRG Gringotts data file
-#file format 1
->3 string 1 v.1, MCRYPT S2K, SERPENT crypt, SHA-256 hash, ZLib lvl.9
-#file format 2
->3 string 2 v.2, MCRYPT S2K,
->>8 byte&0x70 0x00 RIJNDAEL-128 crypt,
->>8 byte&0x70 0x10 SERPENT crypt,
->>8 byte&0x70 0x20 TWOFISH crypt,
->>8 byte&0x70 0x30 CAST-256 crypt,
->>8 byte&0x70 0x40 SAFER+ crypt,
->>8 byte&0x70 0x50 LOKI97 crypt,
->>8 byte&0x70 0x60 3DES crypt,
->>8 byte&0x70 0x70 RIJNDAEL-256 crypt,
->>8 byte&0x08 0x00 SHA1 hash,
->>8 byte&0x08 0x08 RIPEMD-160 hash,
->>8 byte&0x04 0x00 ZLib
->>8 byte&0x04 0x04 BZip2
->>8 byte&0x03 0x00 lvl.0
->>8 byte&0x03 0x01 lvl.3
->>8 byte&0x03 0x02 lvl.6
->>8 byte&0x03 0x03 lvl.9
-#file format 3
->3 string 3 v.3, OpenPGP S2K,
->>8 byte&0x70 0x00 RIJNDAEL-128 crypt,
->>8 byte&0x70 0x10 SERPENT crypt,
->>8 byte&0x70 0x20 TWOFISH crypt,
->>8 byte&0x70 0x30 CAST-256 crypt,
->>8 byte&0x70 0x40 SAFER+ crypt,
->>8 byte&0x70 0x50 LOKI97 crypt,
->>8 byte&0x70 0x60 3DES crypt,
->>8 byte&0x70 0x70 RIJNDAEL-256 crypt,
->>8 byte&0x08 0x00 SHA1 hash,
->>8 byte&0x08 0x08 RIPEMD-160 hash,
->>8 byte&0x04 0x00 ZLib
->>8 byte&0x04 0x04 BZip2
->>8 byte&0x03 0x00 lvl.0
->>8 byte&0x03 0x01 lvl.3
->>8 byte&0x03 0x02 lvl.6
->>8 byte&0x03 0x03 lvl.9
-#file format >3
->3 string >3 v.%.1s (unknown details)
diff --git a/contrib/libs/libmagic/magic/Magdir/hardware b/contrib/libs/libmagic/magic/Magdir/hardware
deleted file mode 100644
index e92986c5a8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/hardware
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: hardware,v 1.1 2018/08/02 06:32:52 christos Exp $
-# hardware magic
-
-# EDID
-# https://en.wikipedia.org/wiki/Extended_Display_Identification_Data
-0 string \x00\xFF\xFF\xFF\xFF\xFF\xFF\x00
->19 byte x
->>18 byte x EDID data, version %u.
->>19 byte x \b%u
-#>>17 ubyte+1990 <255 \b, manufactured %u
diff --git a/contrib/libs/libmagic/magic/Magdir/hitachi-sh b/contrib/libs/libmagic/magic/Magdir/hitachi-sh
deleted file mode 100644
index f64489f7fc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/hitachi-sh
+++ /dev/null
@@ -1,30 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: hitachi-sh,v 1.10 2020/12/12 20:01:47 christos Exp $
-# hitach-sh: file(1) magic for Hitachi Super-H
-#
-# Super-H COFF
-#
-# updated by Joerg Jenderek at Oct 2015
-# https://en.wikipedia.org/wiki/COFF
-# https://de.wikipedia.org/wiki/Common_Object_File_Format
-# http://www.delorie.com/djgpp/doc/coff/filhdr.html
-# below test line conflicts with 2nd NTFS filesystem sector
-# 2nd NTFS filesystem sector often starts with 0x05004e00 for unicode string 5 NTLDR
-# and Portable Gaming Notation Compressed format (*.WID http://pgn.freeservers.com/)
-0 beshort 0x0500
-# test for unused flag bits (0x8000,0x0800,0x0400,0x0200,x0080) in f_flags
->18 ubeshort&0x8E80 0
-# use big endian variant of subroutine to display name+variables+flags
-# for common object formatted files
->>0 use \^display-coff
-!:strength -10
-
-0 leshort 0x0550
-# test for unused flag bits in f_flags
->18 uleshort&0x8E80 0
-# use little endian variant of subroutine to
-# display name+variables+flags for common object formatted files
->>0 use display-coff
-!:strength -10
-
diff --git a/contrib/libs/libmagic/magic/Magdir/hp b/contrib/libs/libmagic/magic/Magdir/hp
deleted file mode 100644
index d57169ee16..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/hp
+++ /dev/null
@@ -1,433 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: hp,v 1.25 2019/01/13 00:32:38 christos Exp $
-# hp: file(1) magic for Hewlett Packard machines (see also "printer")
-#
-# XXX - somebody should figure out whether any byte order needs to be
-# applied to the "TML" stuff; I'm assuming the Apollo stuff is
-# big-endian as it was mostly 68K-based.
-#
-# I think the 500 series was the old stack-based machines, running a
-# UNIX environment atop the "SUN kernel"; dunno whether it was
-# big-endian or little-endian.
-#
-# Daniel Quinlan (quinlan@yggdrasil.com): hp200 machines are 68010 based;
-# hp300 are 68020+68881 based; hp400 are also 68k. The following basic
-# HP magic is useful for reference, but using "long" magic is a better
-# practice in order to avoid collisions.
-#
-# Guy Harris (guy@netapp.com): some additions to this list came from
-# HP-UX 10.0's "/usr/include/sys/unistd.h" (68030, 68040, PA-RISC 1.1,
-# 1.2, and 2.0). The 1.2 and 2.0 stuff isn't in the HP-UX 10.0
-# "/etc/magic", though, except for the "archive file relocatable library"
-# stuff, and the 68030 and 68040 stuff isn't there at all - are they not
-# used in executables, or have they just not yet updated "/etc/magic"
-# completely?
-#
-# 0 beshort 200 hp200 (68010) BSD binary
-# 0 beshort 300 hp300 (68020+68881) BSD binary
-# 0 beshort 0x20c hp200/300 HP-UX binary
-# 0 beshort 0x20d hp400 (68030) HP-UX binary
-# 0 beshort 0x20e hp400 (68040?) HP-UX binary
-# 0 beshort 0x20b PA-RISC1.0 HP-UX binary
-# 0 beshort 0x210 PA-RISC1.1 HP-UX binary
-# 0 beshort 0x211 PA-RISC1.2 HP-UX binary
-# 0 beshort 0x214 PA-RISC2.0 HP-UX binary
-
-#
-# The "misc" stuff needs a byte order; the archives look suspiciously
-# like the old 177545 archives (0xff65 = 0177545).
-#
-#### Old Apollo stuff
-0 beshort 0627 Apollo m68k COFF executable
->18 beshort ^040000 not stripped
->22 beshort >0 - version %d
-0 beshort 0624 apollo a88k COFF executable
->18 beshort ^040000 not stripped
->22 beshort >0 - version %d
-0 long 01203604016 TML 0123 byte-order format
-0 long 01702407010 TML 1032 byte-order format
-0 long 01003405017 TML 2301 byte-order format
-0 long 01602007412 TML 3210 byte-order format
-#### PA-RISC 1.1
-0 belong 0x02100106 PA-RISC1.1 relocatable object
-0 belong 0x02100107 PA-RISC1.1 executable
->168 belong &0x00000004 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x02100108 PA-RISC1.1 shared executable
->168 belong&0x4 0x4 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x0210010b PA-RISC1.1 demand-load executable
->168 belong&0x4 0x4 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x0210010e PA-RISC1.1 shared library
->96 belong >0 - not stripped
-
-0 belong 0x0210010d PA-RISC1.1 dynamic load library
->96 belong >0 - not stripped
-
-#### PA-RISC 2.0
-0 belong 0x02140106 PA-RISC2.0 relocatable object
-
-0 belong 0x02140107 PA-RISC2.0 executable
->168 belong &0x00000004 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x02140108 PA-RISC2.0 shared executable
->168 belong &0x00000004 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x0214010b PA-RISC2.0 demand-load executable
->168 belong &0x00000004 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x0214010e PA-RISC2.0 shared library
->96 belong >0 - not stripped
-
-0 belong 0x0214010d PA-RISC2.0 dynamic load library
->96 belong >0 - not stripped
-
-#### 800
-0 belong 0x020b0106 PA-RISC1.0 relocatable object
-
-0 belong 0x020b0107 PA-RISC1.0 executable
->168 belong&0x4 0x4 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x020b0108 PA-RISC1.0 shared executable
->168 belong&0x4 0x4 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x020b010b PA-RISC1.0 demand-load executable
->168 belong&0x4 0x4 dynamically linked
->(144) belong 0x054ef630 dynamically linked
->96 belong >0 - not stripped
-
-0 belong 0x020b010e PA-RISC1.0 shared library
->96 belong >0 - not stripped
-
-0 belong 0x020b010d PA-RISC1.0 dynamic load library
->96 belong >0 - not stripped
-
-#### 500
-0 long 0x02080106 HP s500 relocatable executable
->16 long >0 - version %d
-
-0 long 0x02080107 HP s500 executable
->16 long >0 - version %d
-
-0 long 0x02080108 HP s500 pure executable
->16 long >0 - version %d
-
-#### 200
-0 belong 0x020c0108 HP s200 pure executable
->4 beshort >0 - version %d
->8 belong &0x80000000 save fp regs
->8 belong &0x40000000 dynamically linked
->8 belong &0x20000000 debuggable
->36 belong >0 not stripped
-
-0 belong 0x020c0107 HP s200 executable
->4 beshort >0 - version %d
->8 belong &0x80000000 save fp regs
->8 belong &0x40000000 dynamically linked
->8 belong &0x20000000 debuggable
->36 belong >0 not stripped
-
-0 belong 0x020c010b HP s200 demand-load executable
->4 beshort >0 - version %d
->8 belong &0x80000000 save fp regs
->8 belong &0x40000000 dynamically linked
->8 belong &0x20000000 debuggable
->36 belong >0 not stripped
-
-0 belong 0x020c0106 HP s200 relocatable executable
->4 beshort >0 - version %d
->6 beshort >0 - highwater %d
->8 belong &0x80000000 save fp regs
->8 belong &0x20000000 debuggable
->8 belong &0x10000000 PIC
-
-0 belong 0x020a0108 HP s200 (2.x release) pure executable
->4 beshort >0 - version %d
->36 belong >0 not stripped
-
-0 belong 0x020a0107 HP s200 (2.x release) executable
->4 beshort >0 - version %d
->36 belong >0 not stripped
-
-0 belong 0x020c010e HP s200 shared library
->4 beshort >0 - version %d
->6 beshort >0 - highwater %d
->36 belong >0 not stripped
-
-0 belong 0x020c010d HP s200 dynamic load library
->4 beshort >0 - version %d
->6 beshort >0 - highwater %d
->36 belong >0 not stripped
-
-#### MISC
-0 long 0x0000ff65 HP old archive
-0 long 0x020aff65 HP s200 old archive
-0 long 0x020cff65 HP s200 old archive
-0 long 0x0208ff65 HP s500 old archive
-
-0 long 0x015821a6 HP core file
-
-0 long 0x4da7eee8 HP-WINDOWS font
->8 byte >0 - version %d
-0 string Bitmapfile HP Bitmapfile
-
-0 string IMGfile CIS compimg HP Bitmapfile
-# XXX - see "lif"
-#0 short 0x8000 lif file
-0 long 0x020c010c compiled Lisp
-
-0 string msgcat01 HP NLS message catalog,
->8 long >0 %d messages
-
-# Summary: HP-48/49 calculator
-# Created by: phk@data.fls.dk
-# Modified by (1): AMAKAWA Shuhei <sa264@cam.ac.uk>
-# Modified by (2): Samuel Thibault <samuel.thibault@ens-lyon.org> (HP49 support)
-0 string HPHP HP
->4 string 48 48 binary
->4 string 49 49 binary
->7 byte >64 - Rev %c
->8 leshort 0x2911 (ADR)
->8 leshort 0x2933 (REAL)
->8 leshort 0x2955 (LREAL)
->8 leshort 0x2977 (COMPLX)
->8 leshort 0x299d (LCOMPLX)
->8 leshort 0x29bf (CHAR)
->8 leshort 0x29e8 (ARRAY)
->8 leshort 0x2a0a (LNKARRAY)
->8 leshort 0x2a2c (STRING)
->8 leshort 0x2a4e (HXS)
->8 leshort 0x2a74 (LIST)
->8 leshort 0x2a96 (DIR)
->8 leshort 0x2ab8 (ALG)
->8 leshort 0x2ada (UNIT)
->8 leshort 0x2afc (TAGGED)
->8 leshort 0x2b1e (GROB)
->8 leshort 0x2b40 (LIB)
->8 leshort 0x2b62 (BACKUP)
->8 leshort 0x2b88 (LIBDATA)
->8 leshort 0x2d9d (PROG)
->8 leshort 0x2dcc (CODE)
->8 leshort 0x2e48 (GNAME)
->8 leshort 0x2e6d (LNAME)
->8 leshort 0x2e92 (XLIB)
-
-0 string %%HP: HP text
->6 string T(0) - T(0)
->6 string T(1) - T(1)
->6 string T(2) - T(2)
->6 string T(3) - T(3)
->10 string A(D) A(D)
->10 string A(R) A(R)
->10 string A(G) A(G)
->14 string F(.) F(.);
->14 string F(,) F(,);
-
-
-# Summary: HP-38/39 calculator
-# Created by: Samuel Thibault <samuel.thibault@ens-lyon.org>
-0 string HP3
->3 string 8 HP 38
->3 string 9 HP 39
->4 string Bin binary
->4 string Asc ASCII
->7 string A (Directory List)
->7 string B (Zaplet)
->7 string C (Note)
->7 string D (Program)
->7 string E (Variable)
->7 string F (List)
->7 string G (Matrix)
->7 string H (Library)
->7 string I (Target List)
->7 string J (ASCII Vector specification)
->7 string K (wildcard)
-
-# Summary: HP-38/39 calculator
-# Created by: Samuel Thibault <samuel.thibault@ens-lyon.org>
-0 string HP3
->3 string 8 HP 38
->3 string 9 HP 39
->4 string Bin binary
->4 string Asc ASCII
->7 string A (Directory List)
->7 string B (Zaplet)
->7 string C (Note)
->7 string D (Program)
->7 string E (Variable)
->7 string F (List)
->7 string G (Matrix)
->7 string H (Library)
->7 string I (Target List)
->7 string J (ASCII Vector specification)
->7 string K (wildcard)
-
-# hpBSD magic numbers
-0 beshort 200 hp200 (68010) BSD
->2 beshort 0407 impure binary
->2 beshort 0410 read-only binary
->2 beshort 0413 demand paged binary
-0 beshort 300 hp300 (68020+68881) BSD
->2 beshort 0407 impure binary
->2 beshort 0410 read-only binary
->2 beshort 0413 demand paged binary
-#
-# From David Gero <dgero@nortelnetworks.com>
-# HP-UX 10.20 core file format from /usr/include/sys/core.h
-# Unfortunately, HP-UX uses corehead blocks without specifying the order
-# There are four we care about:
-# CORE_KERNEL, which starts with the string "HP-UX"
-# CORE_EXEC, which contains the name of the command
-# CORE_PROC, which contains the signal number that caused the core dump
-# CORE_FORMAT, which contains the version of the core file format (== 1)
-# The only observed order in real core files is KERNEL, EXEC, FORMAT, PROC
-# but we include all 6 variations of the order of the first 3, and
-# assume that PROC will always be last
-# Order 1: KERNEL, EXEC, FORMAT, PROC
-0x10 string HP-UX
->0 belong 2
->>0xC belong 0x3C
->>>0x4C belong 0x100
->>>>0x58 belong 0x44
->>>>>0xA0 belong 1
->>>>>>0xAC belong 4
->>>>>>>0xB0 belong 1
->>>>>>>>0xB4 belong 4 core file
->>>>>>>>>0x90 string >\0 from '%s'
->>>>>>>>>0xC4 belong 3 - received SIGQUIT
->>>>>>>>>0xC4 belong 4 - received SIGILL
->>>>>>>>>0xC4 belong 5 - received SIGTRAP
->>>>>>>>>0xC4 belong 6 - received SIGABRT
->>>>>>>>>0xC4 belong 7 - received SIGEMT
->>>>>>>>>0xC4 belong 8 - received SIGFPE
->>>>>>>>>0xC4 belong 10 - received SIGBUS
->>>>>>>>>0xC4 belong 11 - received SIGSEGV
->>>>>>>>>0xC4 belong 12 - received SIGSYS
->>>>>>>>>0xC4 belong 33 - received SIGXCPU
->>>>>>>>>0xC4 belong 34 - received SIGXFSZ
-# Order 2: KERNEL, FORMAT, EXEC, PROC
->>>0x4C belong 1
->>>>0x58 belong 4
->>>>>0x5C belong 1
->>>>>>0x60 belong 0x100
->>>>>>>0x6C belong 0x44
->>>>>>>>0xB4 belong 4 core file
->>>>>>>>>0xA4 string >\0 from '%s'
->>>>>>>>>0xC4 belong 3 - received SIGQUIT
->>>>>>>>>0xC4 belong 4 - received SIGILL
->>>>>>>>>0xC4 belong 5 - received SIGTRAP
->>>>>>>>>0xC4 belong 6 - received SIGABRT
->>>>>>>>>0xC4 belong 7 - received SIGEMT
->>>>>>>>>0xC4 belong 8 - received SIGFPE
->>>>>>>>>0xC4 belong 10 - received SIGBUS
->>>>>>>>>0xC4 belong 11 - received SIGSEGV
->>>>>>>>>0xC4 belong 12 - received SIGSYS
->>>>>>>>>0xC4 belong 33 - received SIGXCPU
->>>>>>>>>0xC4 belong 34 - received SIGXFSZ
-# Order 3: FORMAT, KERNEL, EXEC, PROC
-0x24 string HP-UX
->0 belong 1
->>0xC belong 4
->>>0x10 belong 1
->>>>0x14 belong 2
->>>>>0x20 belong 0x3C
->>>>>>0x60 belong 0x100
->>>>>>>0x6C belong 0x44
->>>>>>>>0xB4 belong 4 core file
->>>>>>>>>0xA4 string >\0 from '%s'
->>>>>>>>>0xC4 belong 3 - received SIGQUIT
->>>>>>>>>0xC4 belong 4 - received SIGILL
->>>>>>>>>0xC4 belong 5 - received SIGTRAP
->>>>>>>>>0xC4 belong 6 - received SIGABRT
->>>>>>>>>0xC4 belong 7 - received SIGEMT
->>>>>>>>>0xC4 belong 8 - received SIGFPE
->>>>>>>>>0xC4 belong 10 - received SIGBUS
->>>>>>>>>0xC4 belong 11 - received SIGSEGV
->>>>>>>>>0xC4 belong 12 - received SIGSYS
->>>>>>>>>0xC4 belong 33 - received SIGXCPU
->>>>>>>>>0xC4 belong 34 - received SIGXFSZ
-# Order 4: EXEC, KERNEL, FORMAT, PROC
-0x64 string HP-UX
->0 belong 0x100
->>0xC belong 0x44
->>>0x54 belong 2
->>>>0x60 belong 0x3C
->>>>>0xA0 belong 1
->>>>>>0xAC belong 4
->>>>>>>0xB0 belong 1
->>>>>>>>0xB4 belong 4 core file
->>>>>>>>>0x44 string >\0 from '%s'
->>>>>>>>>0xC4 belong 3 - received SIGQUIT
->>>>>>>>>0xC4 belong 4 - received SIGILL
->>>>>>>>>0xC4 belong 5 - received SIGTRAP
->>>>>>>>>0xC4 belong 6 - received SIGABRT
->>>>>>>>>0xC4 belong 7 - received SIGEMT
->>>>>>>>>0xC4 belong 8 - received SIGFPE
->>>>>>>>>0xC4 belong 10 - received SIGBUS
->>>>>>>>>0xC4 belong 11 - received SIGSEGV
->>>>>>>>>0xC4 belong 12 - received SIGSYS
->>>>>>>>>0xC4 belong 33 - received SIGXCPU
->>>>>>>>>0xC4 belong 34 - received SIGXFSZ
-# Order 5: FORMAT, EXEC, KERNEL, PROC
-0x78 string HP-UX
->0 belong 1
->>0xC belong 4
->>>0x10 belong 1
->>>>0x14 belong 0x100
->>>>>0x20 belong 0x44
->>>>>>0x68 belong 2
->>>>>>>0x74 belong 0x3C
->>>>>>>>0xB4 belong 4 core file
->>>>>>>>>0x58 string >\0 from '%s'
->>>>>>>>>0xC4 belong 3 - received SIGQUIT
->>>>>>>>>0xC4 belong 4 - received SIGILL
->>>>>>>>>0xC4 belong 5 - received SIGTRAP
->>>>>>>>>0xC4 belong 6 - received SIGABRT
->>>>>>>>>0xC4 belong 7 - received SIGEMT
->>>>>>>>>0xC4 belong 8 - received SIGFPE
->>>>>>>>>0xC4 belong 10 - received SIGBUS
->>>>>>>>>0xC4 belong 11 - received SIGSEGV
->>>>>>>>>0xC4 belong 12 - received SIGSYS
->>>>>>>>>0xC4 belong 33 - received SIGXCPU
->>>>>>>>>0xC4 belong 34 - received SIGXFSZ
-# Order 6: EXEC, FORMAT, KERNEL, PROC
->0 belong 0x100
->>0xC belong 0x44
->>>0x54 belong 1
->>>>0x60 belong 4
->>>>>0x64 belong 1
->>>>>>0x68 belong 2
->>>>>>>0x74 belong 0x2C
->>>>>>>>0xB4 belong 4 core file
->>>>>>>>>0x44 string >\0 from '%s'
->>>>>>>>>0xC4 belong 3 - received SIGQUIT
->>>>>>>>>0xC4 belong 4 - received SIGILL
->>>>>>>>>0xC4 belong 5 - received SIGTRAP
->>>>>>>>>0xC4 belong 6 - received SIGABRT
->>>>>>>>>0xC4 belong 7 - received SIGEMT
->>>>>>>>>0xC4 belong 8 - received SIGFPE
->>>>>>>>>0xC4 belong 10 - received SIGBUS
->>>>>>>>>0xC4 belong 11 - received SIGSEGV
->>>>>>>>>0xC4 belong 12 - received SIGSYS
->>>>>>>>>0xC4 belong 33 - received SIGXCPU
->>>>>>>>>0xC4 belong 34 - received SIGXFSZ
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/human68k b/contrib/libs/libmagic/magic/Magdir/human68k
deleted file mode 100644
index 707c74098b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/human68k
+++ /dev/null
@@ -1,26 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: human68k,v 1.6 2021/04/26 15:56:00 christos Exp $
-# human68k: file(1) magic for Human68k (X680x0 DOS) binary formats
-# Magic too short!
-#0 string HU Human68k
-#>68 string LZX LZX compressed
-#>>72 string >\0 (version %s)
-#>(8.L+74) string LZX LZX compressed
-#>>(8.L+78) string >\0 (version %s)
-#>60 belong >0 binded
-#>(8.L+66) string #HUPAIR hupair
-#>0 string HU X executable
-#>(8.L+74) string #LIBCV1 - linked PD LIBC ver 1
-#>4 belong >0 - base address %#x
-#>28 belong >0 not stripped
-#>32 belong >0 with debug information
-#0 beshort 0x601a Human68k Z executable
-#0 beshort 0x6000 Human68k object file
-#0 belong 0xd1000000 Human68k ar binary archive
-#0 belong 0xd1010000 Human68k ar ascii archive
-#0 beshort 0x0068 Human68k lib archive
-#4 string LZX Human68k LZX compressed
-#>8 string >\0 (version %s)
-#>4 string LZX R executable
-#2 string #HUPAIR Human68k hupair R executable
diff --git a/contrib/libs/libmagic/magic/Magdir/ibm370 b/contrib/libs/libmagic/magic/Magdir/ibm370
deleted file mode 100644
index dc976f8705..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ibm370
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ibm370,v 1.11 2021/03/14 16:51:45 christos Exp $
-# ibm370: file(1) magic for IBM 370 and compatibles.
-#
-# "ibm370" said that 0x15d == 0535 was "ibm 370 pure executable".
-# What the heck *is* "USS/370"?
-# AIX 4.1's "/etc/magic" has
-#
-# 0 short 0535 370 sysV executable
-# >12 long >0 not stripped
-# >22 short >0 - version %d
-# >30 long >0 - 5.2 format
-# 0 short 0530 370 sysV pure executable
-# >12 long >0 not stripped
-# >22 short >0 - version %d
-# >30 long >0 - 5.2 format
-#
-# instead of the "USS/370" versions of the same magic numbers.
-#
-0 beshort 0537 370 XA sysV executable
->12 belong >0 not stripped
->22 beshort >0 - version %d
->30 belong >0 - 5.2 format
-0 beshort 0532 370 XA sysV pure executable
->12 belong >0 not stripped
->22 beshort >0 - version %d
->30 belong >0 - 5.2 format
-0 beshort 054001 370 sysV pure executable
->12 belong >0 not stripped
-0 beshort 055001 370 XA sysV pure executable
->12 belong >0 not stripped
-0 beshort 056401 370 sysV executable
->12 belong >0 not stripped
-0 beshort 057401 370 XA sysV executable
->12 belong >0 not stripped
-0 beshort 0531 SVR2 executable (Amdahl-UTS)
->12 belong >0 not stripped
->24 belong >0 - version %d
-0 beshort 0534 SVR2 pure executable (Amdahl-UTS)
->12 belong >0 not stripped
->24 belong >0 - version %d
-0 beshort 0530 SVR2 pure executable (USS/370)
->12 belong >0 not stripped
->24 belong >0 - version %d
-0 beshort 0535 SVR2 executable (USS/370)
->12 belong >0 not stripped
->24 belong >0 - version %d
-
-# NETDATA (https://en.wikipedia.org/wiki/NETDATA)
-# -\INMR01 In EBCDIC
-0 string \x60\xe0\xc9\xd5\xd4\xd9\xf0\xf1 IBM NETDATA file
diff --git a/contrib/libs/libmagic/magic/Magdir/ibm6000 b/contrib/libs/libmagic/magic/Magdir/ibm6000
deleted file mode 100644
index 724b64d3a5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ibm6000
+++ /dev/null
@@ -1,35 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ibm6000,v 1.15 2021/07/03 14:01:46 christos Exp $
-# ibm6000: file(1) magic for RS/6000 and the RT PC.
-#
-0 beshort 0x01df executable (RISC System/6000 V3.1) or obj module
->12 belong >0 not stripped
-# Breaks sun4 statically linked execs.
-#0 beshort 0x0103 executable (RT Version 2) or obj module
-#>2 byte 0x50 pure
-#>28 belong >0 not stripped
-#>6 beshort >0 - version %ld
-# GRR: line below is too general as it matches also TTComp archive, ASCII, 1K handled by ./archive
-0 beshort 0x0104 shared library
-# GRR: line below is too general as it matches also TTComp archive, ASCII, 2K handled by ./archive
-0 beshort 0x0105 ctab data
-0 beshort 0xfe04 structured file
-0 string 0xabcdef AIX message catalog
-0 belong 0x000001f9 AIX compiled message catalog
-0 string \<aiaff> archive
-0 string \<bigaf> archive (big format)
-0 belong 0x09006bea AIX backup/restore format file
-0 belong 0x09006fea AIX backup/restore format file
-
-0 beshort 0x01f7 64-bit XCOFF executable or object module
->20 belong 0 not stripped
-# GRR: this test is still too general as it catches also many FATs of DOS filesystems
-4 belong &0x0feeddb0
-# real core dump could not be 32-bit and 64-bit together
->7 byte&0x03 !3 AIX core file
->>1 byte &0x01 fulldump
->>7 byte &0x01 32-bit
->>>0x6e0 string >\0 \b, %s
->>7 byte &0x02 64-bit
->>>0x524 string >\0 \b, %s
diff --git a/contrib/libs/libmagic/magic/Magdir/icc b/contrib/libs/libmagic/magic/Magdir/icc
deleted file mode 100644
index 15fd76b8d5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/icc
+++ /dev/null
@@ -1,214 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: icc,v 1.7 2021/04/26 15:56:00 christos Exp $
-# icc: file(1) magic for International Color Consortium file formats
-
-#
-# Color profiles as per the ICC's "Image technology colour management -
-# Architecture, profile format, and data structure" specification.
-# See
-#
-# http://www.color.org/specification/ICC1v43_2010-12.pdf
-#
-# for Specification ICC.1:2010 (Profile version 4.3.0.0).
-# URL: http://fileformats.archiveteam.org/wiki/ICC_profile
-# Reference: http://www.color.org/iccmax/ICC.2-2016-7.pdf
-# Update: Joerg Jenderek
-#
-# Bytes 36 to 39 contain a generic profile file signature of "acsp";
-# bytes 40 to 43 "may be used to identify the primary platform/operating
-# system framework for which the profile was created".
-#
-# check and display ICC/ICM color profile
-0 name color-profile
->36 string acsp
-# skip ASCII like Cognacspirit.txt by month <= 12
->>26 ubeshort <13
-# platform/operating system. Only 5 mentioned
-
-#
-# This appears to be what's used for Apple ColorSync profiles.
-# Instead of adding that, Apple just changed the generic "acsp" entry
-# to be for "ColorSync ICC Color Profile" rather than "Kodak Color
-# Management System, ICC Profile".
-# Yes, it's "APPL", not "AAPL"; see the spec.
->>>40 string APPL ColorSync
-
-# Microsoft ICM color profile
->>>40 string MSFT Microsoft
-
-# Yes, that's a blank after "SGI".
->>>40 string SGI\ SGI
-
-# XXX - is this what's used for the Sun KCMS or not? The standard file
-# uses just "acsp" for that, but Apple's file uses it for "ColorSync",
-# and there *is* an identified "primary platform" value of SUNW.
->>>40 string SUNW Sun KCMS
-
-# 5th platform
->>>40 string TGNT Taligent
-
-# remaining "l" "e" of "color profile" printed later to avoid error
->>>40 string x color profi
-#>>>40 string x (%.4s)
-!:mime application/vnd.iccprofile
-# for "ICM" extension only versions 2.x and for Kodak "CC" 2.0 is found
->>>8 ubyte =2
-# do not use empty message text to avoid error like
-# icc, 82: Warning: Current entry does not yet have a description for adding a EXTENSION type
-# file.exe: could not find any valid magic files!
->>>>9 ubyte !0 \ble
-!:ext icc/icm
-# minor version
->>>>9 ubyte =0 \bl
-# Kodak colour management system
->>>>>4 string =KCMS \be
-!:ext icc/icm/cc
->>>>>4 string !KCMS \be
-!:ext icc/icm
->>>8 ubyte !2 \ble
-!:ext icc
-# Profile version major.4bit-minor.sub1.sub2 like 4.3.0.0 (04300000h)
->>>8 ubyte x %u
->>>9 ubyte/16 x \b.%u
-# reserved and shall be null but 205.205 in umx1220u.icm
->>>10 ubyte >0 \b.%u
->>>>11 ubyte >0 \b.%u
-# preferred colour management module like appl CCMS KCMS Lino UCCM "Win " "FF "
-# skip space like in brmsl08f.icm and null like in brmsl09f.icm, brmsl07f.icm
->>>4 string >\ \b, type %.2s
->>>>6 string >\ \b%.1s
->>>>>7 string >\ \b%.1s
-# colour space "XYZ " "Lab " "RGB " CMYK GRAY ...
->>>16 string x \b, %.3s
->>>19 string >\ \b%.1s
-# Profile Connection Space (PCS) field usually "XYZ " or "Lab " but sometimes
-# null or CMYK like in ISOcoated_v2_to_PSOcoated_v3_DeviceLink.icc
->>>20 string >\0 \b/%.3s
->>>>23 string >\ \b%.1s
-# eleven device classes
->>>12 string x \b-%.4s device
-# skip 00001964h in hpf69000.icc or 0h in XRDC50Q.ICM or " ROT" in brmsl05f.icm
->>>52 string >\040
-# skip "none" model like in "Trinitron Compatible 9300K G2.2.icm"
->>>>52 ubelong !0x6e6f6e65
-# device manufacturer field like "HP " "IBM " EPSO
->>>>>48 string x \b, %.2s
->>>>>50 string >\ \b%.1s
->>>>>51 string >\ \b%.1s
-# model like "ADI " "A265" and skip 20000404h in IS330.icm for RICOH RUSSIAN-SC
->>>>>52 string >\ \ \b/%.3s
->>>>>>55 string >\ \b%.1s
->>>>>52 string x model
-# creator (often same as manufacture) like HP SONY XROX or null like in A925A.icm
->>>80 string >\0 by %.2s
->>>>82 string >\ \b%.1s
->>>>>83 string >\ \b%.1s
-# profile size
->>>0 ubelong x \b, %u bytes
-# skip invalid date 0 like in linearSRGB.icc
->>>24 ubequad !0
-# datetime dd-mm-yyyy hh:mm:ss
->>>>28 ubeshort x \b, %u
-# month <= 12
->>>>26 ubeshort x \b-%u
-# year
->>>>24 ubeshort x \b-%u
-# do not display midnight time like in CNHP8308.ICC
->>>>30 ubequad&0xFFffFFffFFff0000 !0
-# hour <= 24
->>>>>30 ubeshort x %u
-# minutes <= 59
->>>>>32 ubeshort x \b:%.2u
-# seconds <= 59
->>>>>34 ubeshort x \b:%.2u
-# vendor specific flags like 2 in HPCLJ5.ICM
->>>44 ubeshort >0 \b, %#x vendor flags
-# profile flags bits 0-2 of least 16 used by ICC
-#>>>44 ubelong >0 \b, %#x flags
-# icEmbeddedProfileTrue
->>>44 ubelong &1 \b, embedded
-# icEmbeddedProfileFalse
-#>>>44 ubelong ^1 \b, not embedded
-# icUseWithEmbeddedDataOnly
->>>44 ubelong &2 \b, dependently
-# icUseAnywhere
-#>>>44 ubelong ^2 \b, independently
->>>44 ubelong &4 \b, MCS
-#>>>44 ubelong ^4 \b, no MCS
-# vendor specific device attributes 1~srgb.icc
-# E000D00h~CNB7QEDA.ICM C000A00h~CNB5FCAA.ICM 01040401h~CNB25PE3.ICM
->>>56 ubelong >0 \b, %#x vendor attribute
-# ICC device attributes bits 0-7 used
-#>>>60 ubelong x \b, %#x attribute
-# http://www.color.org/icc34.h
->>>60 ubelong &0x01 \b, transparent
-#>>>60 ubelong ^0x01 \b, reflective
->>>60 ubelong &0x02 \b, matte
-#>>>60 ubelong ^0x02 \b, glossy
->>>60 ubelong &0x04 \b, negative
-#>>>60 ubelong ^0x04 \b, positive
->>>60 ubelong &0x08 \b, black&white
-#>>>60 ubelong ^0x08 \b, colour
->>>60 ubelong &0x10 \b, non-paper
-#>>>60 ubelong ^0x10 \b, paper
->>>60 ubelong &0x20 \b, non-textured
-#>>>60 ubelong ^0x20 \b, textured
->>>60 ubelong &0x40 \b, non-isotropic
-#>>>60 ubelong ^0x40 \b, isotropic
->>>60 ubelong &0x80 \b, self-luminous
-#>>>60 ubelong ^0x80 \b, non-self-luminous
-# rendering intent 0-3 but 7AEA5027h in EE051__1.ICM 6CB1BCh in EE061__1.ICM
->>>64 ubelong >3 \b, %#x rendering intent
-#>>>64 ubelong =0 \b, perceptual
->>>64 ubelong =1 \b, relative colorimetric
->>>64 ubelong =2 \b, saturation
->>>64 ubelong =3 \b, absolute colorimetric
-# PCS illuminant (3*s15Fixed16Numbers) often 0000f6d6 00010000 0000d32d
->>>71 ubequad !0xd6000100000000d3 \b, PCS
-# usually X~0.9642*65536=63189.8112~63190=F6D5h ; but also found
-# often F6D6 in gt5000r.icm, F6B8 in kodakce.icm, F6CA in RSWOP.icm
->>>>68 ubelong !0x0000f6d5 X=%#x
-# usually Y=1.0~00010000h but Y=0 in brmsl07f.icm
->>>>72 ubelong !0x00010000 Y=%#x
-# usually Z~0.8249*65536=54060.6464~54061=D32Dh ; but also found
-# D2F7 in hp1200c.icm, often D32C in A925A.icm, D309 in RSWOP.icm , D2F8 in kodak_dc.icm
->>>>76 ubelong !0x0000d32d Z=%#x
-# Profile ID. MD5 fingerprinting method as defined in Internet RFC 1321.
->>>84 ubequad >0 \b, %#llx MD5
-# reserved in older versions should be zero but also found CDCDCDCDCDCDCDCD
-#>>100 ubequad x \b %#llx reserved
-# tag table
-# 6 <= tags count <= 43
-#>>>128 ubelong >43 \b, %u tags
->>>128 ubelong x
-# shall contain the profileDescriptionTag "desc" , copyrightTag "cprt"
-# search range = tags count * 12 -8=< maximal tag count * 12 -8= 43 * 12 -8= 508
->>>>132 search/508 cprt
-# but no copyright tag in linearSRGB.icc
-# beneath /System/Library/Frameworks/WebKit.framework/
-# Versions/A/Frameworks/WebCore.framework/Versions/A/Resources
->>>>132 default x \b, no copyright tag
-# 1st tag
-#>>>132 string x \b, 1st tag %.4s
-#>>>136 ubelong x %#x offset
-#>>>140 ubelong x %#x len
-# 2nd tag,...
-# look also for profileDescriptionTag "desc"
->>>132 search/508 desc
-# look further for TextDescriptionType "desc" signature
->>>>(&0.L) string =desc
->>>>>&4 pstring/l x "%s"
-# look alternative for multiLocalizedUnicodeType "mluc" signature like in VideoPAL.icc
->>>>(&0.L) string =mluc
->>>>>&(&8.L) ubequad x
->>>>>>&4 bestring16 x '%s'
-
-# Any other profile.
-# XXX - should we use "acsp\0\0\0\0" for "no primary platform" profiles,
-# and use "acsp" for everything else and dump the "primary platform"
-# string in those cases?
-36 string acsp
->0 use color-profile
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/iff b/contrib/libs/libmagic/magic/Magdir/iff
deleted file mode 100644
index 258d16a4e1..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/iff
+++ /dev/null
@@ -1,80 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: iff,v 1.18 2022/03/21 19:57:18 christos Exp $
-# iff: file(1) magic for Interchange File Format (see also "audio" & "images")
-#
-# Daniel Quinlan (quinlan@yggdrasil.com) -- IFF was designed by Electronic
-# Arts for file interchange. It has also been used by Apple, SGI, and
-# especially Commodore-Amiga.
-#
-# IFF files begin with an 8 byte FORM header, followed by a 4 character
-# FORM type, which is followed by the first chunk in the FORM.
-
-0 string FORM IFF data
-#>4 belong x \b, FORM is %d bytes long
-# audio formats
->8 string AIFF \b, AIFF audio
-!:mime audio/x-aiff
->8 string AIFC \b, AIFF-C compressed audio
-!:mime audio/x-aiff
->8 string 8SVX \b, 8SVX 8-bit sampled sound voice
-!:mime audio/x-aiff
->8 string 16SV \b, 16SV 16-bit sampled sound voice
->8 string SAMP \b, SAMP sampled audio
->8 string MAUD \b, MAUD MacroSystem audio
->8 string SMUS \b, SMUS simple music
->8 string CMUS \b, CMUS complex music
-# image formats
->8 string ILBMBMHD \b, ILBM interleaved image
->>20 beshort x \b, %d x
->>22 beshort x %d
->8 string RGBN \b, RGBN 12-bit RGB image
->8 string RGB8 \b, RGB8 24-bit RGB image
->8 string DEEP \b, DEEP TVPaint/XiPaint image
->8 string DR2D \b, DR2D 2-D object
->8 string TDDD \b, TDDD 3-D rendering
->8 string LWOB \b, LWOB 3-D object
->8 string LWO2 \b, LWO2 3-D object, v2
->8 string LWLO \b, LWLO 3-D layered object
->8 string REAL \b, REAL Real3D rendering
->8 string MC4D \b, MC4D MaxonCinema4D rendering
->8 string ANIM \b, ANIM animation
->8 string YAFA \b, YAFA animation
->8 string SSA\ \b, SSA super smooth animation
->8 string FANT \b, Fantavision animation
->8 string ACBM \b, ACBM continuous image
->8 string FAXX \b, FAXX fax image
->8 string STFX \b, ST-Fax image
->8 string IMAGIHDR \b, CD-i image
-# other formats
->8 string FTXT \b, FTXT formatted text
->8 string CTLG \b, CTLG message catalog
->8 string PREF \b, PREF preferences
->8 string DTYP \b, DTYP datatype description
->8 string PTCH \b, PTCH binary patch
->8 string AMFF \b, AMFF AmigaMetaFile format
->8 string WZRD \b, WZRD StormWIZARD resource
->8 string DOC\040 \b, DOC desktop publishing document
->8 string SWRT \b, SWRT Final Copy/Writer document
->8 string WORD \b, ProWrite document
->8 string WTXT \b, WTXT Wordworth document
->8 string WOWO \b, WOWO Wordworth document
->8 string WVQA \b, Westwood Studios VQA Multimedia,
->>24 leshort x %d video frames,
->>26 leshort x %d x
->>28 leshort x %d
->8 string MOVE \b, Wing Commander III Video
->>12 string _PC_ \b, PC version
->>12 string 3DO_ \b, 3DO version
-
-# These go at the end of the iff rules
-#
-# David Griffith <dave@661.org>
-# I don't see why these might collide with anything else.
-#
-# Interactive Fiction related formats
-#
->8 string IFRS \b, Blorb Interactive Fiction
->>24 string Exec with executable chunk
->8 string IFZS \b, Z-machine or Glulx saved game file (Quetzal)
-!:mime application/x-blorb
diff --git a/contrib/libs/libmagic/magic/Magdir/images b/contrib/libs/libmagic/magic/Magdir/images
deleted file mode 100644
index 48e9f6dabf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/images
+++ /dev/null
@@ -1,4219 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: images,v 1.243 2023/07/17 16:49:09 christos Exp $
-# images: file(1) magic for image formats (see also "iff", and "c-lang" for
-# XPM bitmaps)
-#
-# originally from jef@helios.ee.lbl.gov (Jef Poskanzer),
-# additions by janl@ifi.uio.no as well as others. Jan also suggested
-# merging several one- and two-line files into here.
-#
-# little magic: PCX (first byte is 0x0a)
-
-# Targa - matches `povray', `ppmtotga' and `xv' outputs
-# by Philippe De Muyter <phdm@macqel.be>
-# URL: http://justsolve.archiveteam.org/wiki/TGA
-# Reference: http://www.dca.fee.unicamp.br/~martino/disciplinas/ea978/tgaffs.pdf
-# Update: Joerg Jenderek
-# at 2, byte ImgType must be 1, 2, 3, 9, 10 or 11
-# ,32 or 33 (both not observed)
-# at 1, byte CoMapType must be 1 if ImgType is 1 or 9, 0 otherwise
-# or theoretically 2-128 reserved for use by Truevision or 128-255 may be used for developer applications
-# at 3, leshort Index is 0 for povray, ppmtotga and xv outputs
-# `xv' recognizes only a subset of the following (RGB with pixelsize = 24)
-# `tgatoppm' recognizes a superset (Index may be anything)
-#
-# test of Color Map Type 0~no 1~color map
-# and Image Type 1 2 3 9 10 11 32 33
-# and Color Map Entry Size 0 15 16 24 32
-0 ubequad&0x00FeC400000000C0 0
-# Conflict with MPEG sequences.
-!:strength -40
-# Prevent conflicts with CRI ADX.
-#>(2.S-2) belong !0x28632943
-# above line does not work for rgb32_top_left_rle.tga
-# skip some MPEG sequence *.vob and some CRI ADX audio with improbable interleave bits
->17 ubyte&0xC0 !0xC0
-# skip more garbage like *.iso by looking for positive image type
->>2 ubyte >0
-# skip some compiled terminfo like xterm+tmux by looking for image type less equal 33
->>>2 ubyte <34
-# skip some MPEG sequence *.vob HV001T01.EVO winnicki.mpg with unacceptable alpha channel depth 11
->>>>17 ubyte&0x0F !11
-# skip arches.3200 , Finder.Root , Slp.1 by looking for low pixel depth 1 8 15 16 24 32
->>>>>16 ubyte 1
->>>>>>0 use tga-image
->>>>>16 ubyte 8
->>>>>>0 use tga-image
->>>>>16 ubyte 15
->>>>>>0 use tga-image
->>>>>16 ubyte 16
->>>>>>0 use tga-image
->>>>>16 ubyte 24
->>>>>>0 use tga-image
->>>>>16 ubyte 32
->>>>>>0 use tga-image
-# display tga bitmap image information
-0 name tga-image
->2 ubyte <34 Targa image data
-!:mime image/x-tga
-!:apple ????TPIC
-# normal extension .tga but some Truevision products used others:
-# tpic (Apple),icb (Image Capture Board),vda (Video Display Adapter),vst (NuVista),win (UNSURE about that)
-!:ext tga/tpic/icb/vda/vst
-# image type 1 2 3 9 10 11 32 33
->2 ubyte&0xF7 1 - Map
->2 ubyte&0xF7 2 - RGB
-# alpha channel
->>17 ubyte&0x0F >0 \bA
->2 ubyte&0xF7 3 - Mono
-# type not found, but by http://www.fileformat.info/format/tga/corion.htm
-# Compressed color-mapped data, using Huffman, Delta, and runlength encoding
->2 ubyte 32 - Color
-# Compressed color-mapped data, using Huffman, Delta, and RLE. 4-pass quadtree- type process
->2 ubyte 33 - Color
-# Color Map Type 0~no 1~color map
->1 ubyte 1 (
-# first color map entry, 0 normal
->>3 uleshort >0 \b%d-
-# color map length 0 2 1dh 3bh d9h 100h
->>5 uleshort x \b%d)
-# 8~run length encoding bit
->2 ubyte&0x08 8 - RLE
-# gimp can create big pictures!
->12 uleshort >0 %d x
->12 uleshort =0 65536 x
-# image height. 0 interpreted as 65536
->14 uleshort >0 %d
->14 uleshort =0 65536
-# Image Pixel depth 1 8 15 16 24 32
->16 ubyte x x %d
-# X origin of image. 0 normal
->8 uleshort >0 +%d
-# Y origin of image. 0 normal; positive for top
->10 uleshort >0 +%d
-# Image descriptor: bits 3-0 give the alpha channel depth, bits 5-4 give direction
-# alpha depth like: 1 8
->17 ubyte&0x0F >0 - %d-bit alpha
-# bits 5-4 give direction. normal bottom left
->17 ubyte &0x20 - top
-#>17 ubyte ^0x20 - bottom
->17 ubyte &0x10 - right
-#>17 ubyte ^0x10 - left
-# some info say other bits 6-7 should be zero
-# but data storage interleave by http://www.fileformat.info/format/tga/corion.htm
-# 00 - no interleave;01 - even/odd interleave; 10 - four way interleave; 11 - reserved
-#>17 ubyte&0xC0 0x00 - no interleave
->17 ubyte&0xC0 0x40 - interleave
->17 ubyte&0xC0 0x80 - four way interleave
->17 ubyte&0xC0 0xC0 - reserved
-# positive length implies identification field
->0 ubyte >0
->>18 string x "%s"
-# last 18 bytes of newer tga file footer signature
->18 search/4261301/s TRUEVISION-XFILE.\0
-# extension area offset if not 0
->>&-8 ulelong >0
-# length of the extension area. normal 495 for version 2.0
->>>(&-4.l) uleshort 0x01EF
-# AuthorName[41]
->>>>&0 string >\0 - author "%-.40s"
-# Comment[324]=4 * 80 null terminated
->>>>&41 string >\0 - comment "%-.80s"
-# date
->>>>&365 ubequad&0xffffFFFFffff0000 !0
-# Day
->>>>>&-6 uleshort x %d
-# Month
->>>>>&-8 uleshort x \b-%d
-# Year
->>>>>&-4 uleshort x \b-%d
-# time
->>>>&371 ubequad&0xffffFFFFffff0000 !0
-# hour
->>>>>&-8 uleshort x %d
-# minutes
->>>>>&-6 uleshort x \b:%.2d
-# second
->>>>>&-4 uleshort x \b:%.2d
-# JobName[41]
->>>>&377 string >\0 - job "%-.40s"
-# JobHour Jobminute Jobsecond
->>>>&418 ubequad&0xffffFFFFffff0000 !0
->>>>>&-8 uleshort x %d
->>>>>&-6 uleshort x \b:%.2d
->>>>>&-4 uleshort x \b:%.2d
-# SoftwareId[41]
->>>>&424 string >\0 - %-.40s
-# SoftwareVersionNumber
->>>>&424 ubyte >0
->>>>>&40 uleshort/100 x %d
->>>>>&40 uleshort%100 x \b.%d
-# VersionLetter
->>>>>&42 ubyte >0x20 \b%c
-# KeyColor
->>>>&468 ulelong >0 - keycolor %#8.8x
-# Denominator of Pixel ratio. 0~no pixel aspect
->>>>&474 uleshort >0
-# Numerator
->>>>>&-4 uleshort >0 - aspect %d
->>>>>&-2 uleshort x \b/%d
-# Denominator of Gamma ratio. 0~no Gamma value
->>>>&478 uleshort >0
-# Numerator
->>>>>&-4 uleshort >0 - gamma %d
->>>>>&-2 uleshort x \b/%d
-# ColorOffset
-#>>>>&480 ulelong x - col offset %#8.8x
-# StampOffset
-#>>>>&484 ulelong x - stamp offset %#8.8x
-# ScanOffset
-#>>>>&488 ulelong x - scan offset %#8.8x
-# AttributesType
-#>>>>&492 ubyte x - Attributes %#x
-## EndOfTGA
-
-# PBMPLUS images
-# URL: https://en.wikipedia.org/wiki/Netpbm
-# The next byte following the magic is always whitespace.
-# adding 65 to strength so that Netpbm images comes before "x86 boot sector" or
-# "DOS/MBR boot sector" identified by ./filesystems
-0 name netpbm
->3 regex/s =\^[0-9]{1,50}[\040\t\f\r\n]+[0-9]{1,50} Netpbm image data
->>&0 regex =[0-9]{1,50} \b, size = %s x
->>>&0 regex =[0-9]{1,50} \b %s
-
-0 search/1 P1
-# test for whitespace after 2 byte magic
->2 regex/2 [\040\t\f\r\n]
-# skip DROID x-fmt-164-signature-id-583.pbm with ten 0 digits
->>3 string !000000000
->>>0 use netpbm
->>>0 string x \b, bitmap
-!:strength + 65
-!:mime image/x-portable-bitmap
-!:ext pbm
-# check for character # starting a comment line
->>>3 ubyte =0x23
->>>>4 string x %s
-
-0 search/1 P2
->0 regex/4 P2[\040\t\f\r\n]
->>0 use netpbm
->>0 string x \b, greymap
-!:strength + 65
-# american spelling gray
-!:mime image/x-portable-graymap
-!:ext pgm
-
-0 search/1 P3
->0 regex/4 P3[\040\t\f\r\n]
->>0 use netpbm
->>0 string x \b, pixmap
-!:strength + 65
-!:mime image/x-portable-pixmap
-!:ext ppm
-
-0 string P4
->0 regex/4 P4[\040\t\f\r\n]
->>0 use netpbm
->>0 string x \b, rawbits, bitmap
-!:strength + 65
-!:mime image/x-portable-bitmap
-!:ext pbm
-
-0 string P5
->0 regex/4 P5[\040\t\f\r\n]
->>0 use netpbm
->>0 string x \b, rawbits, greymap
-!:strength + 65
-!:mime image/x-portable-greymap
-!:ext pgm
-
-0 string P6
->0 regex/4 P6[\040\t\f\r\n]
->>0 use netpbm
->>0 string x \b, rawbits, pixmap
-!:strength + 65
-!:mime image/x-portable-pixmap
-!:ext ppm/pnm
-
-# URL: https://en.wikipedia.org/wiki/Netpbm#PAM_graphics_format
-# Reference: http://fileformats.archiveteam.org/wiki/Portable_Arbitrary_Map
-# Update: Joerg Jenderek
-0 string P7
-# skip DROID fmt-405-signature-id-589.pam by looking for character like New Line
->2 ubyte !0xAB
-#>2 ubyte =0x0A
->>3 search/256/b WIDTH Netpbm PAM image file, size =
-!:mime image/x-portable-arbitrarymap
-!:ext pam
-!:strength + 65
->>>&1 string x %s
->>>3 search/256/b HEIGHT x
->>>>&1 string x %s
-# at offset 2 a New Line character (0xA) should appear
->>>2 ubyte !0x0A \b, %#x at offset 2 instead new line
-
-# From: bryanh@giraffe-data.com (Bryan Henderson)
-0 string \117\072 Solitaire Image Recorder format
->4 string \013 MGI Type 11
->4 string \021 MGI Type 17
-0 string .MDA MicroDesign data
->21 ubyte 48 version 2
->21 ubyte 51 version 3
-0 string .MDP MicroDesign page data
->21 ubyte 48 version 2
->21 ubyte 51 version 3
-
-# NIFF (Navy Interchange File Format, a modification of TIFF) images
-# [GRR: this *must* go before TIFF]
-0 string IIN1 NIFF image data
-!:mime image/x-niff
-
-# Canon RAW version 1 (CRW) files are a type of Canon Image File Format
-# (CIFF) file. These are apparently all little-endian.
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# URL: https://www.sno.phy.queensu.ca/~phil/exiftool/canon_raw.html
-0 string II\x1a\0\0\0HEAPCCDR Canon CIFF raw image data
-!:mime image/x-canon-crw
->16 uleshort x \b, version %d.
->14 uleshort x \b%d
-
-# Canon RAW version 2 (CR2) files are a kind of TIFF with an extra magic
-# number. Put this above the TIFF test to make sure we detect them.
-# These are apparently all little-endian.
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# URL: https://libopenraw.freedesktop.org/wiki/Canon_CR2
-0 string II\x2a\0\x10\0\0\0CR Canon CR2 raw image data
-!:mime image/x-canon-cr2
-!:strength +80
->10 ubyte x \b, version %d.
->11 ubyte x \b%d
-
-# Fujifilm RAF RAW image files with embedded JPEG data and compressed
-# or uncompressed CFA RAW data. Byte order: Big Endian.
-# URL: https://libopenraw.freedesktop.org/formats/raf/
-# Useful info from http://fileformats.archiveteam.org/wiki/Fujifilm_RAF.
-# File extension: RAF
-# Works for both the FinePix S2 Pro and the X-T3. Anybody have some more Fuji
-# raw samples available?
-# -- David Dyer-Bennet <dd-b@dd-b.net> 9-Sep-2021
-0 string FUJIFILMCCD-RAW Fujifilm RAF raw image data
-!:mime image/x-fuji-raf
-!:ext raf
->0x10 string x \b, format version %4.4s
->0x1C string x \b, camera %s
-
-# Tag Image File Format, from Daniel Quinlan (quinlan@yggdrasil.com)
-# The second word of TIFF files is the TIFF version number, 42, which has
-# never changed. The TIFF specification recommends testing for it.
-0 string MM\x00\x2a TIFF image data, big-endian
-!:strength +70
-!:mime image/tiff
-!:ext tif/tiff
->(4.L) use \^tiff_ifd
-0 string II\x2a\x00 TIFF image data, little-endian
-!:mime image/tiff
-!:strength +70
-!:ext tif/tiff
->(4.l) use tiff_ifd
-
-0 name tiff_ifd
->0 uleshort x \b, direntries=%d
->2 use tiff_entry
-
-0 name tiff_entry
-# NewSubFileType
->0 uleshort 0xfe
->>12 use tiff_entry
->0 uleshort 0x100
->>4 ulelong 1
->>>12 use tiff_entry
->>>8 uleshort x \b, width=%d
->0 uleshort 0x101
->>4 ulelong 1
->>>8 uleshort x \b, height=%d
->>>12 use tiff_entry
->0 uleshort 0x102
->>8 uleshort x \b, bps=%d
->>12 use tiff_entry
->0 uleshort 0x103
->>4 ulelong 1 \b, compression=
->>>8 uleshort 1 \bnone
->>>8 uleshort 2 \bhuffman
->>>8 uleshort 3 \bbi-level group 3
->>>8 uleshort 4 \bbi-level group 4
->>>8 uleshort 5 \bLZW
->>>8 uleshort 6 \bJPEG (old)
->>>8 uleshort 7 \bJPEG
->>>8 uleshort 8 \bdeflate
->>>8 uleshort 9 \bJBIG, ITU-T T.85
->>>8 uleshort 0xa \bJBIG, ITU-T T.43
->>>8 uleshort 0x7ffe \bNeXT RLE 2-bit
->>>8 uleshort 0x8005 \bPackBits (Macintosh RLE)
->>>8 uleshort 0x8029 \bThunderscan RLE
->>>8 uleshort 0x807f \bRasterPadding (CT or MP)
->>>8 uleshort 0x8080 \bRLE (Line Work)
->>>8 uleshort 0x8081 \bRLE (High-Res Cont-Tone)
->>>8 uleshort 0x8082 \bRLE (Binary Line Work)
->>>8 uleshort 0x80b2 \bDeflate (PKZIP)
->>>8 uleshort 0x80b3 \bKodak DCS
->>>8 uleshort 0x8765 \bJBIG
->>>8 uleshort 0x8798 \bJPEG2000
->>>8 uleshort 0x8799 \bNikon NEF Compressed
->>>8 default x
->>>>8 uleshort x \b(unknown %#x)
->>>12 use tiff_entry
->0 uleshort 0x106 \b, PhotometricInterpretation=
->>8 clear x
->>8 uleshort 0 \bWhiteIsZero
->>8 uleshort 1 \bBlackIsZero
->>8 uleshort 2 \bRGB
->>8 uleshort 3 \bRGB Palette
->>8 uleshort 4 \bTransparency Mask
->>8 uleshort 5 \bCMYK
->>8 uleshort 6 \bYCbCr
->>8 uleshort 8 \bCIELab
->>8 default x
->>>8 uleshort x \b(unknown=%#x)
->>12 use tiff_entry
-# FillOrder
->0 uleshort 0x10a
->>4 ulelong 1
->>>12 use tiff_entry
-# DocumentName
->0 uleshort 0x10d
->>(8.l) string x \b, name=%s
->>>12 use tiff_entry
-# ImageDescription
->0 uleshort 0x10e
->>(8.l) string x \b, description=%s
->>>12 use tiff_entry
-# Make
->0 uleshort 0x10f
->>(8.l) string x \b, manufacturer=%s
->>>12 use tiff_entry
-# Model
->0 uleshort 0x110
->>(8.l) string x \b, model=%s
->>>12 use tiff_entry
-# StripOffsets
->0 uleshort 0x111
->>12 use tiff_entry
-# Orientation
->0 uleshort 0x112 \b, orientation=
->>8 uleshort 1 \bupper-left
->>8 uleshort 3 \blower-right
->>8 uleshort 6 \bupper-right
->>8 uleshort 8 \blower-left
->>8 uleshort 9 \bundefined
->>8 default x
->>>8 uleshort x \b[*%d*]
->>12 use tiff_entry
-# XResolution
->0 uleshort 0x11a
->>8 ulelong x \b, xresolution=%d
->>12 use tiff_entry
-# YResolution
->0 uleshort 0x11b
->>8 ulelong x \b, yresolution=%d
->>12 use tiff_entry
-# ResolutionUnit
->0 uleshort 0x128
->>8 uleshort x \b, resolutionunit=%d
->>12 use tiff_entry
-# Software
->0 uleshort 0x131
->>(8.l) string x \b, software=%s
->>12 use tiff_entry
-# Datetime
->0 uleshort 0x132
->>(8.l) string x \b, datetime=%s
->>12 use tiff_entry
-# HostComputer
->0 uleshort 0x13c
->>(8.l) string x \b, hostcomputer=%s
->>12 use tiff_entry
-# WhitePoint
->0 uleshort 0x13e
->>12 use tiff_entry
-# PrimaryChromaticities
->0 uleshort 0x13f
->>12 use tiff_entry
-# YCbCrCoefficients
->0 uleshort 0x211
->>12 use tiff_entry
-# YCbCrPositioning
->0 uleshort 0x213
->>12 use tiff_entry
-# ReferenceBlackWhite
->0 uleshort 0x214
->>12 use tiff_entry
-# Copyright
->0 uleshort 0x8298
->>(8.l) string x \b, copyright=%s
->>12 use tiff_entry
-# ExifOffset
->0 uleshort 0x8769
->>12 use tiff_entry
-# GPS IFD
->0 uleshort 0x8825 \b, GPS-Data
->>12 use tiff_entry
-
-#>0 uleshort x \b, unknown=%#x
-#>>12 use tiff_entry
-
-0 string MM\x00\x2b Big TIFF image data, big-endian
-!:mime image/tiff
-0 string II\x2b\x00 Big TIFF image data, little-endian
-!:mime image/tiff
-
-# PNG [Portable Network Graphics, or "PNG's Not GIF"] images
-# (Greg Roelofs, newt@uchicago.edu)
-# (Albert Cahalan, acahalan@cs.uml.edu)
-#
-# 137 P N G \r \n ^Z \n [4-byte length] I H D R [HEAD data] [HEAD crc] ...
-#
-
-# IHDR parser
-0 name png-ihdr
->0 ubelong x \b, %d x
->4 ubelong x %d,
->8 ubyte x %d-bit
->9 ubyte 0 grayscale,
->9 ubyte 2 \b/color RGB,
->9 ubyte 3 colormap,
->9 ubyte 4 gray+alpha,
->9 ubyte 6 \b/color RGBA,
-#>10 ubyte 0 deflate/32K,
->12 ubyte 0 non-interlaced
->12 ubyte 1 interlaced
-
-# Standard PNG image.
-0 string \x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x0DIHDR PNG image data
-!:mime image/png
-!:ext png
-!:strength +10
->16 use png-ihdr
-
-# Apple CgBI PNG image.
-0 string \x89PNG\x0d\x0a\x1a\x0a\x00\x00\x00\x04CgBI
->24 string \x00\x00\x00\x0DIHDR PNG image data (CgBI)
-!:mime image/png
-!:ext png
-!:strength +10
->>32 use png-ihdr
-
-# possible GIF replacements; none yet released!
-# (Greg Roelofs, newt@uchicago.edu)
-#
-# GRR 950115: this was mine ("Zip GIF"):
-0 string GIF94z ZIF image (GIF+deflate alpha)
-!:mime image/x-unknown
-#
-# GRR 950115: this is Jeremy Wohl's Free Graphics Format (better):
-#
-0 string FGF95a FGF image (GIF+deflate beta)
-!:mime image/x-unknown
-#
-# GRR 950115: this is Thomas Boutell's Portable Bitmap Format proposal
-# (best; not yet implemented):
-#
-0 string PBF PBF image (deflate compression)
-!:mime image/x-unknown
-
-# GIF
-# Strength set up to beat 0x55AA DOS/MBR signature word lookups (+65)
-0 string GIF8 GIF image data
-!:strength +80
-!:mime image/gif
-!:apple 8BIMGIFf
-!:ext gif
->4 string 7a \b, version 8%s,
->4 string 9a \b, version 8%s,
->6 uleshort >0 %d x
->8 uleshort >0 %d
-#>10 ubyte &0x80 color mapped,
-#>10 ubyte&0x07 =0x00 2 colors
-#>10 ubyte&0x07 =0x01 4 colors
-#>10 ubyte&0x07 =0x02 8 colors
-#>10 ubyte&0x07 =0x03 16 colors
-#>10 ubyte&0x07 =0x04 32 colors
-#>10 ubyte&0x07 =0x05 64 colors
-#>10 ubyte&0x07 =0x06 128 colors
-#>10 ubyte&0x07 =0x07 256 colors
-
-# ITC (CMU WM) raster files. It is essentially a byte-reversed Sun raster,
-# 1 plane, no encoding.
-0 string \361\0\100\273 CMU window manager raster image data
->4 ulelong >0 %d x
->8 ulelong >0 %d,
->12 ulelong >0 %d-bit
-
-# Magick Image File Format
-# URL: https://imagemagick.org/script/miff.php
-# Reference: http://fileformats.archiveteam.org/wiki/MIFF
-# Update: Joerg Jenderek
-# http://www.nationalarchives.gov.uk/pronom/fmt/930
-0 search/256/bc id=imagemagick
-# skip bad ASCII text by following new line~0x0A or space~0x20 character
-#>&0 ubyte x \b, next character %#x
-# called by TriD ImageMagick Machine independent File Format bitmap
->&0 ubyte&0xD5 0 MIFF image data
-# https://reposcope.com/mimetype/image/miff
-#!:mime image/miff
-!:mime image/x-miff
-!:ext miff/mif
-# examples with standard file(1) magic
-#>>0 string =id=ImageMagick with standard magic
-# examples with unusual file(1) magic like
->>0 string !id=ImageMagick starting with
-# start with comment (brace) like http://samples.fileformat.info/.../AQUARIUM.MIF
->>>0 ubyte =0x7b comment
-# skip second character which is often a newline and show comment
->>>>2 string x "%s"
-# does not start with comment, probably letters with other case like Id=ImageMagick
-# ImageMagick-7.0.9-2/Magick++/demo/smile_anim.miff
->>>0 ubyte !0x7b
->>>>0 string >\0 '%-.14s'
-# URL: https://imagemagick.org/
-# Reference: https://imagemagick.org/script/magick-vector-graphics.php
-# From: Joerg Jenderek
-# Note: all white-spaces between commands are ignored
-0 string push
-# skip some white spaces
->5 search/3 graphic-context ImageMagick Vector Graphic
-# TODO: look for dangerous commands like CVE-2016-3715
-#!:mime text/plain
-!:mime image/x-mvg
-!:ext mvg
-
-# Artisan
-0 long 1123028772 Artisan image data
->4 long 1 \b, rectangular 24-bit
->4 long 2 \b, rectangular 8-bit with colormap
->4 long 3 \b, rectangular 32-bit (24-bit with matte)
-
-# FIG (Facility for Interactive Generation of figures), an object-based format
-# URL: http://fileformats.archiveteam.org/wiki/Fig
-# https://en.wikipedia.org/wiki/Xfig
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/f/fig.trid.xml
-# https://web.archive.org/web/20070920204655/http://epb.lbl.gov/xfig/fig-format.html
-# Update: Joerg Jenderek
-# Note: called "FIG vector drawing" by TrID,
-# 4 byte magic is assumed to be always at offset 0 and
-# verified by `fig2mpdf -v bootloader.fig && file bootloader.pdf`
-#0 search/1/tb #FIG FIG image text
-# GRR: with --keep-going option the line above gives duplicate messages
-0 search/1/ts #FIG
->&0 use image-xfig
-# binary data variant with non ASCII text characters like Control-A or °C in thermostat.fig
-0 search/1/bs #FIG
->&0 use image-xfig
-# display XFIG image describing text, mime type, file name extension and version
-0 name image-xfig
->8 ubyte x FIG image text
-#!:mime text/plain
-# https://reposcope.com/mimetype/image/x-xfig
-!:mime image/x-xfig
-!:ext fig
-# version string like: 1.4 2.1 3.1 3.2
->5 string x \b, version %.3s
-# some times after version text like: "Produced by xfig version 3.2.5-alpha5"
->8 ubyte >0x0D
->>8 string x "%s"
-# should be point character (2Eh) of version string according to TrID
-#>6 ubyte !0x2E \b, at 6 %#x
-# caret character (23h) at the beginning in most or probably all examples
-#>0 ubyte !0x23 \b, starting with character %#x
-# URL: http://fileformats.archiveteam.org/wiki/DeskMate_Draw
-# http://en.wikipedia.org/wiki/Deskmate
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dm-fig.trid.xml
-# From: Joerg Jenderek
-# Note: called "DeskMate Draw drawing" by TrID
-0 string \x14FIG DeskMate Drawing
-#!:mime application/octet-stream
-!:mime image/x-deskmate-fig
-!:ext fig
-# TODO:
-# "Cabri 3D Figure" by TrID fig-cabri.trid.xml
-# "Playmation Figure" by TrID fig-playmation.trid.xml
-
-# PHIGS
-0 string ARF_BEGARF PHIGS clear text archive
-0 string @(#)SunPHIGS SunPHIGS
-# version number follows, in the form m.n
->40 string SunBin binary
->32 string archive archive
-
-# GKS (Graphics Kernel System)
-0 string GKSM GKS Metafile
->24 string SunGKS \b, SunGKS
-
-# CGM image files
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/CGM
-# https://en.wikipedia.org/wiki/Computer_Graphics_Metafile
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/cgm-ct.trid.xml
-# http://standards.iso.org/ittf/PubliclyAvailableStandards/c032381_ISO_IEC_8632-4_1999(E).zip
-# Note: called "Computer Graphics Metafile (Clear Text)" by TrID and
-# "Computer Graphics Metafile ASCII" by DROID or CGM by XnView
-# verified by LibreOffice and partly by XnView `nconvert -info *.CGM`
-# According to TrID only letter B and M are always upcased and by DROID often only B is upcased for command BEGIN METAFILE
-0 string/c begmf
-# skip SOME DROID fmt-301-signature-id-359.cgm fmt-301-signature-id-361.cgm fmt-302-signature-id-364.cgm
-# fmt-302-signature-id-365.cgm x-fmt-142-signature-id-350.cgm x-fmt-142-signature-id-351.cgm
->5 short !0
-# skip other versions of DROID fmt-301-signature-id-359.cgm fmt-301-signature-id-361.cgm fmt-302-signature-id-364.cgm
-# fmt-302-signature-id-365.cgm x-fmt-142-signature-id-350.cgm x-fmt-142-signature-id-351.cgm
->>5 short !0xABab clear text Computer Graphics Metafile
-# https://reposcope.com/mimetype/image/cgm
-!:mime image/cgm
-!:ext cgm
-# SF:NAME like: 'metafile example';
->>>5 string x %s
-# look for command METAFILE VERSION (MFVERSION <SOFTSEP> <I:VERSION>)
->>>2 search/128/c mfversion
-#>>>>&0 ubyte x SOFTSEP=%#x
-# version like: 1 3 4
->>>>&1 ubyte >0x31 \b, version %c
-# Summary: Computer Graphics Metafile (binary)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/cgm-bin.trid.xml
-# https://standards.iso.org/ittf/PubliclyAvailableStandards/c032380_ISO_IEC_8632-3_1999(E).zip
-# Note: called "Computer Graphics Metafile (binary)" by TrID and DROID or CGM by XnView
-# verified by LibreOffice and partly by XnView `nconvert -info *.CGM`
-# look for BEGIN METAFILE (element Class 0 and ID 1 and "random" Parameter) that is binary C C C C 0 0 0 0 0 0 1 P P P P P
-0 ubeshort&0xFFe0 0x0020
-# skip SOME DROID fmt-303-signature-id-368.cgm fmt-304-signature-id-369.cgm fmt-305-signature-id-370.cgm fmt-306-signature-id-371.cgm
-# with containing only 28 bytes
->28 ubyte x
-# look for METAFILE VERSION (element class 1 and id 1 and parameter P1 with length 2) that is binary 0 0 0 1 i i i i i i 1 P P P 1 P
-# with "low" version; 2nd worst case argentin.cgm with parameter length 56
-# worst MS.CGM
-#>>2 search/73/b \x10\x22\0 binary Computer Graphics Metafile
->>2 search/128/b \x10\x22\0 binary Computer Graphics Metafile
-!:mime image/cgm
-!:ext cgm
-# metafile 2 byte version number like: 1 (most) 2 3 4
->>>&-1 ubeshort >1 \b, version %u
-# length number of 1st parameter octets in range 0 to 30 implies short command
->>>0 ubeshort&0x001F <31 \b, parameter length %u
-# length of string like: 8 9 10 11 12 29
-#>>>>2 ubyte x \b, %u BYTES (SHORT)
-# string like: 'HiJaak 2' 'Example 1' 'sahara.cgm' 'MASTERCLIPS--Art Of Business '
->>>>2 pstring >\0 '%s'
-# after 1st short command with even parameter length comes 2nd command like: 1022h 0010h (EAF00010.CGM 'HiJaak 2' FLOPPY2.CGM TIGER.CGM 'B:\TIGER.CGM')
->>>>0 ubeshort&0x0001 =0
->>>>>(2.b+3) ubeshort !0x1022 \b, 2nd command %#4.4x (short even)
-# after 1st short command with odd parameter length comes nil padding byte followed 2nd command like: 1022h
->>>>0 ubeshort&0x0001 =1
-#>>>>>(2.b+3) ubyte !0 \b, PADDING %#x
->>>>>(2.b+4) ubeshort !0x1022 \b, 2nd command %#4.4x (short odd)
-# 11111 binary (decimal 31) in the parameter field indicates that the command is in long-form
->>>0 ubeshort&0x001F =0x1F
-# bit 15 is partition flag with 1 for 'not-last' partition and 0 for 'last' partition
->>>>2 ubeshort&0x8000 !0 \b, partition flag %#4.4x
-# bits 0 to 14 is parameter list length; the number of following parameter octets; range 0 to 32767
-# length of 1st long command parameter like: 53
->>>>2 ubeshort&0x7Fff x \b, parameter length %u (long)
-# The two header words are then followed by lenghth of 1st string like: 52
-#>>>>4 ubyte x \b, %u BYTES
-# string like: 'K:\PROJECTS\GRAPHICS\DWKS3.5\CLIPART\FLAGS\Italy.cgm'
->>>>4 pstring/B x '%s'
-# odd long parameter length implies single null padding octet to start command on word boundary
->>>>2 ubeshort&0x0001 =1
-# after 1st long command with odd parameter length comes nil padding byte followed by 2nd command like: 1022h
-#>>>>>(4.b+5) ubyte !0 \b, PADDING %#x
->>>>>(4.b+6) ubeshort !0x1022 \b, 2nd command %#4.4x (long odd)
-# even long parameter length implies next command directly is following
->>>>2 ubeshort&0x0001 =0
-# after 1st long command with even parameter length comes 2nd command like: 1022h 0x1054 (MS.CGM)
->>>>>(4.b+5) ubeshort !0x1022 \b, 2nd command %#4.4x (long even)
-# look for END METAFILE (element class 0 and id 2 and 0 parameter) that is binary 0 0 0 0 i i i i i 1 i P P P P P
->>>-2 ubeshort !0x0040 \b, NOT_FOUND_END_METAFILE
-
-# MGR bitmaps (Michael Haardt, u31b3hs@pool.informatik.rwth-aachen.de)
-0 string yz MGR bitmap, modern format, 8-bit aligned
-0 string zz MGR bitmap, old format, 1-bit deep, 16-bit aligned
-0 string xz MGR bitmap, old format, 1-bit deep, 32-bit aligned
-0 string yx MGR bitmap, modern format, squeezed
-
-# Fuzzy Bitmap (FBM) images
-0 string %bitmap\0 FBM image data
->30 long 0x31 \b, mono
->30 long 0x33 \b, color
-
-# facsimile data
-1 string PC\ Research,\ Inc group 3 fax data
->29 ubyte 0 \b, normal resolution (204x98 DPI)
->29 ubyte 1 \b, fine resolution (204x196 DPI)
-# From: Herbert Rosmanith <herp@wildsau.idv.uni.linz.at>
-0 string Sfff structured fax file
-
-# From: Joerg Jenderek <joerg.jen.der.ek@gmx.net>
-# URL: http://fileformats.archiveteam.org/wiki/Award_BIOS_logo
-# Note: verified by XnView command `nconvert -fullinfo *.EPA`
-0 string \x11\x06 Award BIOS Logo, 136 x 84
-!:mime image/x-award-bioslogo
-!:ext epa
-0 string \x11\x09 Award BIOS Logo, 136 x 126
-!:mime image/x-award-bioslogo
-!:ext epa
-# https://telparia.com/fileFormatSamples/image/epa/IO.EPA
-# Note: by bitmap-awbm-v1x1009.trid.xml called "Award BIOS logo bitmap (128x126) (v1)"
-# verified by RECOIL `recoil2png -o tmp.png IO.EPA; file tmp.png`
-0 string \x10\x09 Award BIOS Logo, 128 x 126
-!:mime image/x-award-bioslogo
-!:ext epa
-#0 string \x07\x1f BIOS Logo corrupted?
-# http://www.blackfiveservices.co.uk/awbmtools.shtml
-# http://biosgfx.narod.ru/v3/
-# http://biosgfx.narod.ru/abr-2/
-0 string AWBM
-# Note: by bitmap-awbm.trid.xml called "Award BIOS logo bitmap (v2)"
->4 uleshort <1981 Award BIOS Logo, version 2
-#>4 uleshort <1981 Award BIOS bitmap
-!:mime image/x-award-bioslogo2
-#!:mime image/x-award-bmp
-!:ext epa/bmp
-# image width is a multiple of 4
->>4 uleshort&0x0003 0
->>>4 uleshort x \b, %d
->>>6 uleshort x x %d
->>4 uleshort&0x0003 >0 \b,
->>>4 uleshort&0x0003 =1
->>>>4 uleshort x %d+3
->>>4 uleshort&0x0003 =2
->>>>4 uleshort x %d+2
->>>4 uleshort&0x0003 =3
->>>>4 uleshort x %d+1
->>>6 uleshort x x %d
-# at offset 8 starts imagedata followed by "RGB " marker
-
-# PC bitmaps (OS/2, Windows BMP files) (Greg Roelofs, newt@uchicago.edu)
-# https://en.wikipedia.org/wiki/BMP_file_format#DIB_header_.\
-# 28bitmap_information_header.29
-# Note: variant starting direct with DIB header see
-# http://fileformats.archiveteam.org/wiki/BMP
-# verified by ImageMagick version 6.8.9-8 command `identify *.dib`
-0 uleshort 40
-# skip bad samples like GAME by looking for valid number of color planes
->12 uleshort 1 Device independent bitmap graphic
-!:mime image/x-ms-bmp
-!:apple ????BMPp
-!:ext dib
->>4 ulelong x \b, %d x
->>8 ulelong x %d x
->>14 uleshort x %d
-# number of color planes (must be 1)
-#>>12 uleshort >1 \b, %u color planes
-# compression method: 0~no 1~RLE 8-bit/pixel 3~Huffman 1D
-#>>16 ulelong 3 \b, Huffman 1D compression
->>16 ulelong >0 \b, %u compression
-# image size is the size of raw bitmap; a dummy 0 can be given for BI_RGB bitmaps
->>20 ulelong x \b, image size %u
-# horizontal and vertical resolution of the image (pixel per metre, signed integer)
->>24 ulelong >0 \b, resolution %d x
->>>28 ulelong x %d px/m
-# number of colors in palette, or 0 to default to 2**n
-#>>32 ulelong >0 \b, %u colors
-# number of important colors used, or 0 when every color is important
->>36 ulelong >0 \b, %u important colors
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/VBM_(VDC_BitMap)
-# Reference: http://csbruce.com/cbm/postings/csc19950906-1.txt
-# http://mark0.net/download/triddefs_xml.7z
-# defs/b/bitmap-vbm.trid.xml
-# defs/b/bitmap-vbm-v3.trid.xml
-# Note: called "VDC BitMap" by TrID
-# verified by RECOIL `recoil2png -o tmp.png coke_can.vbm; file tmp.png`
-# begin with a signature of 'B' 'M' 0xCB, followed by a version byte 2 or 3
-# Similar to the unrelated Windows BMP format
-# check for VDC bitmap and then display image dimension and version
-0 name bitmap-vbm
->2 ubyte 0xCB VDC bitmap
-!:mime image/x-commodore-vbm
-# http://recoil.sourceforge.net/formats.html
-!:ext bm/vbm
-# the VBM format version number: 2 or 3
->>3 ubyte x \b, version %u
-# width of the image in Hi/Lo format
->>4 ubeshort x \b, %u
-# height of the image
->>6 ubeshort x x %u
-# version 3 images have the following additional header information
->>3 ubyte =3
-# data-encoding type: 0~uncompressed 1~RLE-compressed
->>>8 ubyte 0 \b, uncompressed
->>>8 ubyte 1 \b, RLE-compressed
-# byte code for general RLE repetitions
-#>>>9 ubyte x \b, RLE repetition code 0x%x
-# reserved := 0
-#>>>14 short >0 \b, reserved 0x%x
-# length of comment text; 0~no comment text
-#>>>16 ubeshort >0 \b, comment length %u
->>>16 pstring/H >0 \b, comment "%s"
-#
-0 string BM
-# check for magic and version 2 of VDC bitmap or BMP with cbSize=715=CB02
->2 ubeshort 0xCB02
->>6 short =0
->>>0 use bitmap-bmp
-# VDC bitmap height or maybe a few OS/2 BMP with nonzero "hotspot coordinates"
->>6 short !0
->>>0 use bitmap-vbm
-# check for magic and version 3 of VDC bitmap or BMP with cbSize=971=CB03
->2 ubeshort 0xCB03
-# check for reserved value (=0) of VDC bitmap
->>14 short =0
->>>0 use bitmap-vbm
-# BMP with cbSize=????03CBh and dib header size != 0
->>14 short !0
->>>0 use bitmap-bmp
-# cbSize is size of header or file size of Windows BMP bitmap
->2 default x
->>0 use bitmap-bmp
-0 name bitmap-bmp
->14 ulelong 12 PC bitmap, OS/2 1.x format
-!:mime image/bmp
-!:ext bmp
->>18 uleshort x \b, %d x
->>20 uleshort x %d
-# number of color planes (must be 1)
-#>>22 uleshort !1 \b, %u color planes
-# number of bits per pixel (color depth); found 4 8
->>24 uleshort x x %d
-# x, y coordinates of the hotspot
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
-# cbSize; size of file or header like 1Ah 228C8h
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize 0x%8.8x
-# offBits; offset to bitmap data like:
->>10 ulelong x \b, bits offset %u
-# http://fileformats.archiveteam.org/wiki/BMP#OS.2F2_BMP_2.0 no examples found
->14 ulelong 48 PC bitmap, OS/2 2.x format (DIB header size=48)
->14 ulelong 24 PC bitmap, OS/2 2.x format (DIB header size=24)
-# http://entropymine.com/jason/bmpsuite/bmpsuite/q/pal8os2v2-16.bmp
-# Note: by bitmap-bmp-v2o.trid.xml called "Windows Bitmap (v2o)"
->14 ulelong 16 PC bitmap, OS/2 2.x format (DIB header size=16)
-!:mime image/bmp
-!:apple ????BMPp
-!:ext bmp
-# image width and height fields are unsigned integers for OS/2
->>18 ulelong x \b, %u x
->>22 ulelong x %u
-# number of bits per pixel (color depth); found 8
->>28 uleshort >1 x %u
-# x, y coordinates of the hotspot
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
-# number of color planes (must be 1)
-#>>26 uleshort >1 \b, %u color planes
-# cbSize; size of file like: 241E
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize 0x%x
-# offBits; offset to bitmap data like: 41E
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset 0x%x
->14 ulelong 64 PC bitmap, OS/2 2.x format
-!:mime image/bmp
-!:apple ????BMPp
-!:ext bmp
-# image width and height fields are unsigned integers for OS/2
->>18 ulelong x \b, %u x
->>22 ulelong x %u
-# number of bits per pixel (color depth); found 1 4 8
->>28 uleshort >1 x %u
-# x, y coordinates of the hotspot
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
->>26 uleshort >1 \b, %u color planes
-# cbSize; size of file or headers
->>2 ulelong x \b, cbSize %u
-# BMP with cbSize 000002CBh=715 or 000003CBh=971 maybe misinterpreted as VDC bitmap
-#>>2 ulelong x \b, cbSize %#x
-# offBits; offset to bitmap data like 56h 5Eh 8Eh 43Eh
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset %#x
-#>>(10.l) ubequad !0 \b, bits %#16.16llx
-# BITMAPV2INFOHEADER adds RGB bit masks
->14 ulelong 52 PC bitmap, Adobe Photoshop
-!:mime image/bmp
-!:apple ????BMPp
-!:ext bmp
->>18 ulelong x \b, %d x
->>22 ulelong x %d x
-# number of bits per pixel (color depth); found 16 32
->>28 uleshort x %d
-# x, y coordinates of the hotspot; should be zero for Windows variant
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
-# cbSize; size of file like: 14A 7F42
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize 0x%x
-# offBits; offset to bitmap data like: 42h
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset 0x%x
-# BITMAPV3INFOHEADER adds alpha channel bit mask
->14 ulelong 56 PC bitmap, Adobe Photoshop with alpha channel mask
-!:mime image/bmp
-!:apple ????BMPp
-!:ext bmp
->>18 ulelong x \b, %d x
->>22 ulelong x %d x
-# number of bits per pixel (color depth); found 16 32
->>28 uleshort x %d
-# x, y coordinates of the hotspot; should be zero for Windows variant
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
-# cbSize; size of file like: 4E 7F46 131DE 14046h
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize 0x%x
-# offBits; offset to bitmap data like: 46h
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset 0x%x
->14 ulelong 40
-# jump 4 bytes before end of file/header to skip fmt-116-signature-id-118.dib
-# broken for large bitmaps
-#>>(2.l-4) ulong x PC bitmap, Windows 3.x format
->>14 ulelong 40 PC bitmap, Windows 3.x format
-!:mime image/bmp
-!:apple ????BMPp
->>>18 ulelong x \b, %d x
->>>22 ulelong x %d
-# 320 x 400 https://en.wikipedia.org/wiki/LOGO.SYS
->>>18 ulequad =0x0000019000000140 x
-!:ext bmp/sys
->>>18 ulequad !0x0000019000000140
-# compression method 2~RLE 4-bit/pixel implies also extension rle
->>>>30 ulelong 2 x
-!:ext bmp/rle
-# not RLE compressed and not 320x400 dimension
->>>>30 default x
-# "small" dimensions like: 14x15 15x16 16x14 16x16 32x32
-# https://en.wikipedia.org/wiki/Favicon
->>>>>18 ulequad&0xffFFffC0ffFFffC0 =0 x
-# https://www.politi-kdigital.de/favicon.ico
-# http://forum.rpc1.org/favicon.ico
-!:ext bmp/ico
-# "big" dimensions > 63
->>>>>18 default x x
-!:ext bmp
-# number of bits per pixel (color depth); found 1 2 4 8 16 24 32
->>>28 uleshort x %d
-# x, y coordinates of the hotspot; there is no hotspot in bitmaps, so values 0
-#>>>6 uleshort >0 \b, hotspot %ux
-#>>>>8 uleshort x \b%u
-# number of color planes (must be 1), except badplanes.bmp for testing
-#>>>26 uleshort >1 \b, %u color planes
-# compression method: 0~no 1~RLE 8-bit/pixel 2~RLE 4-bit/pixel 3~Huffman 1D 6~RGBA bit field masks
-#>>>30 ulelong 3 \b, Huffman 1D compression
->>>30 ulelong >0 \b, %u compression
-# image size is the size of raw bitmap; a dummy 0 can be given for BI_RGB bitmaps
->>>34 ulelong >0 \b, image size %u
-# horizontal and vertical resolution of the image (pixel per metre, signed integer)
->>>38 ulelong >0 \b, resolution %d x
->>>>42 ulelong x %d px/m
-# number of colors in palette 16 256, or 0 to default to 2**n
-#>>>46 ulelong >0 \b, %u colors
-# number of important colors used, or 0 when every color is important
->>>50 ulelong >0 \b, %u important colors
-# cbSize; often size of file
->>>2 ulelong x \b, cbSize %u
-#>>>2 ulelong x \b, cbSize %#x
-# offBits; offset to bitmap data like 36h 76h BEh 236h 406h 436h 4E6h
->>>10 ulelong x \b, bits offset %u
-#>>>10 ulelong x \b, bits offset %#x
-#>>>(10.l) ubequad !0 \b, bits %#16.16llxd
->14 ulelong 124 PC bitmap, Windows 98/2000 and newer format
-!:mime image/bmp
-!:ext bmp
->>18 ulelong x \b, %d x
->>22 ulelong x %d x
-# color planes; must be 1
-#>>>26 uleshort >1 \b, %u color planes
-# number of bits per pixel (color depth); found 4 8 16 24 32 1 (fmt-119-signature-id-121.bmp) 0 (rgb24jpeg.bmp rgb24png.bmp)
->>28 uleshort x %d
-# x, y coordinates of the hotspot; should be zero for Windows variant
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
-# cbSize; size of file like: 8E AA 48A 999 247A 4F02 7F8A 3F88E B216E 1D4C8A 100008A
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize 0x%x
-# offBits; offset to bitmap data like: 8A 47A ABABABAB (fmt-119-signature-id-121.bmp)
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset 0x%x
->14 ulelong 108 PC bitmap, Windows 95/NT4 and newer format
-!:mime image/bmp
-!:ext bmp
->>18 ulelong x \b, %d x
->>22 ulelong x %d x
-# number of bits per pixel (color depth); found 8 24 32
->>28 uleshort x %d
-# x, y coordinates of the hotspot; should be zero for Windows variant
->>6 uleshort >0 \b, hotspot %ux
->>>8 uleshort x \b%u
-# cbSize; size of file like: 82 8A 9A 9F86 1E07A 3007A 88B7A C007A
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize 0x%x
-# offBits; offset to bitmap data like: 7A 7E 46A
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset 0x%x
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/OS/2_Icon
-# Reference: http://www.fileformat.info
-# /format/os2bmp/spec/902d5c253f2a43ada39c2b81034f27fd/view.htm
-# Note: verified by command like `deark -l -d3 OS2MEMU.ICO`
-0 string IC
-# skip Lotus smart icon *.smi by looking for valid hotspot coordinates
->6 ulelong&0xFF00FF00 =0 OS/2 icon
-# jump 4 bytes before end of header/file and test for accessibility
-#>>(2.l-4) ubelong x End of header is OK!
-!:mime image/x-os2-ico
-!:ext ico
-# cbSize; size of header or file in bytes like 1ah 120h 420h
->>2 ulelong x \b, cbSize %u
-# xHotspot, yHotspot; coordinates of the hotspot for icons like 16 32
->>6 uleshort x \b, hotspot %ux
->>8 uleshort x \b%u
-# offBits; offset in bytes to the beginning of the bit-map pel data like 20h
->>10 ulelong x \b, bits offset %u
-#>>(10.l) ubequad x \b, bits %#16.16llx
-#0 string PI PC pointer image data
-#0 string CI PC color icon data
-0 string CI
-# test also for valid dib header sizes 12 or 64
->14 ulelong <65 OS/2
-# test also for valid hotspot coordinates
-#>>6 ulelong&0xFE00FE00 =0 OS/2
-!:mime image/x-os2-ico
-!:ext ico
->>14 ulelong 12 1.x color icon
-# image width and height fields are unsigned integers for OS/2
->>>18 uleshort x %u x
-# stored height = 2 * real height
->>>20 uleshort/2 x %u
-# number of bits per pixel (color depth). Typical 32 24 16 8 4 but only 1 found
->>>24 uleshort >1 x %u
-# color planes; must be 1
-#>>>22 uleshort >1 \b, %u color planes
->>14 ulelong 64 2.x color icon
-# image width and height
->>>18 ulelong x %u x
-# stored height = 2 * real height
->>>22 ulelong/2 x %u
-# number of bits per pixel (color depth). only 1 found
->>>28 uleshort >1 x %u
-#>>>26 uleshort >1 \b, %u color planes
-# compression method: 0~no 3~Huffman 1D
->>>30 ulelong 3 \b, Huffman 1D compression
-#>>>30 ulelong >0 \b, %u compression
-# xHotspot, yHotspot; coordinates of the hotspot like 0 1 16 20 32 33 63 64
->>6 uleshort x \b, hotspot %ux
->>8 uleshort x \b%u
-# cbSize; size of header or maybe file in bytes like 1Ah 4Eh 84Eh
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize %x
-# offBits; offset to bitmap data (pixel array) like E4h 3Ah 66h 6Ah 33Ah 4A4h
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset %#x
-#>>(10.l) ubequad !0 \b, bits %#16.16llx
-# dib header size: 12~Ch~OS/2 1.x 64~40h~OS/2 2.x
-#>>14 ulelong x \b, dib header size %u
-#0 string CP PC color pointer image data
-# URL: http://fileformats.archiveteam.org/wiki/OS/2_Pointer
-# Reference: http://www.fileformat.info/format/os2bmp/egff.htm
-0 string CP
-# skip many Corel Photo-Paint image "CPT9FILE" by checking for positive bits offset
->10 ulelong >0
-# skip CPU-Z Report by checking for valid dib header sizes 12 or 64
->>14 ulelong =12
->>>0 use os2-ptr
->>14 ulelong =64
->>>0 use os2-ptr
-# display information of OS/2 pointer bitmaps
-0 name os2-ptr
->14 ulelong x OS/2
-# http://extension.nirsoft.net/PTR
-!:mime image/x-ibm-pointer
-!:ext ptr
->>14 ulelong 12 1.x color pointer
-# image width and height fields are unsigned integers for OS/2
->>>18 uleshort x %u x
-# stored height = 2 * real height
->>>20 uleshort/2 x %u
-# number of bits per pixel (color depth). Typical 32 24 16 8 4 but only 1 found
->>>24 uleshort >1 x %u
-# color planes; must be 1
-#>>>22 uleshort >1 \b, %u color planes
->>14 ulelong 64 2.x color pointer
-# image width and height
->>>18 ulelong x %u x
-# stored height = 2 * real height
->>>22 ulelong/2 x %u
-# number of bits per pixel (color depth). only 1 found
->>>28 uleshort >1 x %u
-#>>>26 uleshort >1 \b, %u color planes
-# compression method: 0~no 3~Huffman 1D
->>>30 ulelong 3 \b, Huffman 1D compression
-#>>>30 ulelong >0 \b, %u compression
-# xHotspot, yHotspot; coordinates of the hotspot like 0 3 4 8 15 16 23 27 31
->>6 uleshort x \b, hotspot %ux
->>8 uleshort x \b%u
-# cbSize; size of header or maybe file in bytes like 1Ah 4Eh
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize %x
-# offBits; offset to bitmap data (pixel array) like 6Ah A4h E4h 4A4h
->>10 ulelong x \b, bits offset %u
-#>>10 ulelong x \b, bits offset %#x
-#>>(10.l) ubequad !0 \b, bits %#16.16llx
-# dib header size: 12~Ch~OS/2 1.x 64~40h~OS/2 2.x
-#>>14 ulelong x \b, dib header size %u
-# Conflicts with other entries [BABYL]
-# URL: http://fileformats.archiveteam.org/wiki/BMP#OS.2F2_Bitmap_Array
-# Note: container for OS/2 icon "IC", color icon "CI", color pointer "CP" or bitmap "BM"
-#0 string BA PC bitmap array data
-0 string BA
-# skip old Emacs RMAIL BABYL ./mail.news by checking for low header size
->2 ulelong <0x004c5942 OS/2 graphic array
-!:mime image/x-os2-graphics
-#!:apple ????BMPf
-# cbSize; size of header like 28h 5Ch
->>2 ulelong x \b, cbSize %u
-#>>2 ulelong x \b, cbSize %#x
-# offNext; offset to data like 0 48h F2h 4Eh 64h C6h D2h D6h DAh E6h EAh 348h
->>6 ulelong >0 \b, data offset %u
-#>>6 ulelong >0 \b, data offset %#x
-#>>(6.l) ubequad !0 \b, data %#16.16llx
-# dimensions of the intended device like 640 x 480 for VGA or 1024 x 768
->>10 uleshort >0 \b, display %u
->>>12 uleshort >0 x %u
-# usType of first array element
-#>>14 string x \b, usType %2.2s
-# 1 space char after "1st"
-# no *.bga examples found https://www.openwith.org/file-extensions/bga/1342
->>14 string BM \b; 1st
-!:ext bmp/bga
->>14 string CI \b; 1st
-!:ext ico
->>14 string CP \b; 1st
-!:ext ico
->>14 string IC \b; 1st
-!:ext ico
-# no white-black pointer found
-#>>14 string PT \b; 1st
-#!:ext
->>14 indirect x
-
-# XPM icons (Greg Roelofs, newt@uchicago.edu)
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/XPM
-# Reference: http://www.x.org/docs/XPM/xpm.pdf
-# http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-xpm.trid.xml
-# Note: called "X PixMap bitmap" by TrID and "X-Windows Pixmap Image" by DROID via PUID x-fmt/208
-# starting with c comment like: logo.xpm
-0 string /*\040
-# 9 byte c-comment "/* XPM */" not at the beginning like: mozicon16.xpm mozicon50.xpm (thunderbird)
->0 search/0xCE /*\ XPM\ */
-# skip DROID x-fmt-208-signature-id-620.xpm by looking for char array without explict length
-# and match mh-logo.xpm (emacs)
->>&0 search/1249 []
->>>0 use xpm-image
-# non standard because no 9 byte c-comment "/* XPM */" like: logo.xpm in qemu package
->0 default x
-# words are separated by a white space which can be composed of space and tabulation characters
->>0 search/0x52 static\040char\040
-# skip debug.c testmlc.c by looking for char array without explict length
-# https://www.clamav.net/downloads/production/clamav-0.104.2.tar.gz
-# clamav-0.104.2\libclammspack\mspack\debug.c
->>>&0 search/64 []
->>>>0 use xpm-image
-# display X pixmap image information
-0 name xpm-image
->0 string x X pixmap image text
-#!:mime text/plain
-# https://reposcope.com/mimetype/image/x-xpixmap
-# alias
-#!:mime image/x-xpm
-!:mime image/x-xpixmap
-!:ext xpm
-# NO pm example found!
-#!:ext xpm/pm
-# look for start of character array at beginning of a line like: psetupl.xpm (OpenOffice 4.1.7)
->0 search/0x406 \n"
-# DEBUG VALUES string
-#>>&0 string x '%s'
-# width with optional white space before like: 16 24 32 48 1280
->>&0 regex/8 [0-9]{1,5} \b, %s
-# height with white space like: 15 16 17 24 32 48 1024
->>>&0 regex/8 [0-9]{1,5} x %s
-# number of colors with white space like: 1 2 3 4 5 8 11 14 162 255 but unrelistic 4294967295 by hardcopy tool
->>>>&0 regex/12 [0-9]{1,9} x %s
-# chars_per_pixel with white space like: 1 2
->>>>>&0 regex/14 [0-9]{1,2} \b, %s chars/pixel
-# non standard because not starting with 9 byte c-comment "/* XPM */"
->0 string !/*\ XPM\ */
->>0 string x \b, 1st line "%s"
-
-# Utah Raster Toolkit RLE images (janl@ifi.uio.no)
-0 uleshort 0xcc52 RLE image data,
->6 uleshort x %d x
->8 uleshort x %d
->2 uleshort >0 \b, lower left corner: %d
->4 uleshort >0 \b, lower right corner: %d
->10 ubyte&0x1 =0x1 \b, clear first
->10 ubyte&0x2 =0x2 \b, no background
->10 ubyte&0x4 =0x4 \b, alpha channel
->10 ubyte&0x8 =0x8 \b, comment
->11 ubyte >0 \b, %d color channels
->12 ubyte >0 \b, %d bits per pixel
->13 ubyte >0 \b, %d color map channels
-
-# image file format (Robert Potter, potter@cs.rochester.edu)
-0 string Imagefile\ version- iff image data
-# this adds the whole header (inc. version number), informative but longish
->10 string >\0 %s
-
-# Sun raster images, from Daniel Quinlan (quinlan@yggdrasil.com)
-0 ubelong 0x59a66a95 Sun raster image data
->4 ubelong >0 \b, %d x
->8 ubelong >0 %d,
->12 ubelong >0 %d-bit,
-#>16 ubelong >0 %d bytes long,
->20 ubelong 0 old format,
-#>20 ubelong 1 standard,
->20 ubelong 2 compressed,
->20 ubelong 3 RGB,
->20 ubelong 4 TIFF,
->20 ubelong 5 IFF,
->20 ubelong 0xffff reserved for testing,
->24 ubelong 0 no colormap
->24 ubelong 1 RGB colormap
->24 ubelong 2 raw colormap
-#>28 ubelong >0 colormap is %d bytes long
-
-# SGI image file format, from Daniel Quinlan (quinlan@yggdrasil.com)
-#
-# See
-# http://reality.sgi.com/grafica/sgiimage.html
-#
-0 ubeshort 474 SGI image data
-#>2 ubyte 0 \b, verbatim
->2 ubyte 1 \b, RLE
-#>3 ubyte 1 \b, normal precision
->3 ubyte 2 \b, high precision
->4 ubeshort x \b, %d-D
->6 ubeshort x \b, %d x
->8 ubeshort x %d
->10 ubeshort x \b, %d channel
->10 ubeshort !1 \bs
->80 string >0 \b, "%s"
-
-0 string IT01 FIT image data
->4 ubelong x \b, %d x
->8 ubelong x %d x
->12 ubelong x %d
-#
-0 string IT02 FIT image data
->4 ubelong x \b, %d x
->8 ubelong x %d x
->12 ubelong x %d
-#
-2048 string PCD_IPI Kodak Photo CD image pack file
->0xe02 ubyte&0x03 0x00 , landscape mode
->0xe02 ubyte&0x03 0x01 , portrait mode
->0xe02 ubyte&0x03 0x02 , landscape mode
->0xe02 ubyte&0x03 0x03 , portrait mode
-0 string PCD_OPA Kodak Photo CD overview pack file
-
-# FITS format. Jeff Uphoff <juphoff@tarsier.cv.nrao.edu>
-# FITS is the Flexible Image Transport System, the de facto standard for
-# data and image transfer, storage, etc., for the astronomical community.
-# (FITS floating point formats are big-endian.)
-0 string SIMPLE\ \ = FITS image data
-!:mime image/fits
-!:ext fits/fts
->109 string 8 \b, 8-bit, character or unsigned binary integer
->108 string 16 \b, 16-bit, two's complement binary integer
->107 string \ 32 \b, 32-bit, two's complement binary integer
->107 string -32 \b, 32-bit, floating point, single precision
->107 string -64 \b, 64-bit, floating point, double precision
-
-# other images
-0 string This\ is\ a\ BitMap\ file Lisp Machine bit-array-file
-
-# From SunOS 5.5.1 "/etc/magic" - appeared right before Sun raster image
-# stuff.
-#
-0 ubeshort 0x1010 PEX Binary Archive
-
-# DICOM medical imaging data
-# URL: https://en.wikipedia.org/wiki/DICOM#Data_format
-# Note: "dcm" is the official file name extension
-# XnView mention also "dc3" and "acr" as file name extension
-128 string DICM DICOM medical imaging data
-!:mime application/dicom
-!:ext dcm/dicom/dic
-
-# XWD - X Window Dump file.
-# URL: http://fileformats.archiveteam.org/wiki/XWD
-# Reference: https://wiki.multimedia.cx/index.php?title=XWD
-# http://mark0.net/download/triddefs_xml.7z/defs/x/xdm-x11.trid.xml
-# Note: called "X-Windows Screen Dump (X11)" by TrID and
-# "X-Windows Screen Dump" version X11 by DROID via PUID fmt/483
-# verfied by XnView `nconvert -in xwd -info *`
-# and ImageMagick 6.9.11 `identify -verbose *` as XWD X Windows system window dump
-# and `xwud -in fig41.wxd -dumpheader`
-# As described in /usr/X11R6/include/X11/XWDFile.h
-# used by the xwd program.
-# Bradford Castalia, idaeim, 1/01
-# updated by Adam Buchbinder, 2/09 and Joerg Jenderek, May 2022
-# The following assumes version 7 of the format; the first long is the length
-# of the header, which is at least 25 4-byte longs, and the one at offset 8
-# is a constant which is always either 1 or 2. Offset 12 is the pixmap depth,
-# which is a maximum of 32.
-# Size of the entire file header (bytes) like: 100 104 105 106 107 109 110 113 114 115 118 172
-0 ubelong >99
-# pixmap_format; Pixmap format; 0~1-bit (XYBitmap) format 1~single-plane (XYPixmap) 2~bitmap with two or more planes (ZPixmap)
->8 ubelong <3
-# pixmap_depth; Pixmap depth; value 1 - 32
->>12 ubelong <33
-# file_version; XWD_FILE_VERSION=7
->>>4 ubelong 7
-# skip DROID fmt-401-signature-id-618.xwd by test for existing border field
->>>>96 ubelong x X-Window screen dump image data, version X11
-# ./images (version 1.205) labeled the above entry as "XWD X Window Dump image data"
-# https://reposcope.com/mimetype/image/x-xwindowdump
-!:mime image/x-xwindowdump
-#!:ext xwd
-!:ext xwd/dmp
-# https://www.xnview.com/en/image_formats/ NO example with x11 suffix FOUND!
-#!:ext xwd/dmp/x11
-# https://www.nationalarchives.gov.uk/PRONOM/fmt/401 NO example with xdm suffix FOUND!
-#!:ext xwd/dmp/x11/xmd
-# file comment if header > 100; so not in MARBLES.XWD and hardcopy-x-window-v11.xwd
->>>>>0 ubelong >100
-# comment or windows name
->>>>>>100 string >\0 \b, "%s"
-# pixmap_width; pixmap width like: 576 800 1014 1280 1419 NOT -1414812757=abABabABh
->>>>>16 ubelong x \b, %dx
-# pixmap_height; pixmap height like: 449 454 600 704 720 1001 1024 NOT -1414812757=abABabABh
->>>>>20 ubelong x \b%dx
-# pixmap_depth; pixmap depth
->>>>>12 ubelong x \b%d
-# XOffset; Bitmap X offset; pixel numbers to ignore at the beginning of each scan-line
-#>>>>>24 ubelong x \b, %u ignore
-# ByteOrder; byte order of image data: 0~least significant byte first 1~most significant byte first
->>>>>28 ubelong >0 \b, order %u
-# BitmapUnit; bitmap base data size unit in each scan line like: 8 16 32
-#>>>>>32 ubelong x \b, unit %u
-# BitmapBitOrder; bit-order of image data; apparently same as ByteOrder
-#>>>>>36 ubelong x \b, bit order %u
-# BitmapPad; number of padding bits added to each scan line like: 8 16 32
-#>>>>>40 ubelong x \b, pad %u
-# BitsPerPixel; Bits per pixel: 1~StaticGray and GrayScale 2-15~StaticColor and PseudoColor 16,24,32~TrueColor and DirectColor
-#>>>>>44 ubelong x \b, %u bits/pixel
-# BytesPerLine; size of each scan line in bytes
-#>>>>>48 ubelong x \b, %u bytes/line
-# VisualClass; class of the image: 0~StaticGray 1~GrayScale 2~StaticColor 3~PseudoColor 4~TrueColor 5~DirectColor
-#>>>>>52 ubelong x \b, %u Class
-# RedMask; red RGB mask values used by ZPixmaps like: 0 0xff0000
-#>>>>>56 ubelong !0 \b, %#x red
-# GreenMask; green mask like: 0
-#>>>>>60 ubelong !0 \b, %#x green
-# BlueMask; blue mask like: 0 0xff
-#>>>>>64 ubelong !0 \b, %#x blue
-# BitsPerRgb; Size of each color mask in bits like: 0 1 8 24
-#>>>>>68 ubelong x \b, %u bits/RGB
-# NumberOfColors; number of colors in image like: 256 4 2 0 (WHAT DOES THIS MEAN?)
->>>>>72 ubelong x \b, %u colors
-# ColorMapEntries; number of entries in color map like: 256 16 2 0~no color map
->>>>>76 ubelong x %u entries
-# WindowWidth; window width
-#>>>>>80 ubelong x \b, %u width
-# WindowHeight; window height
-#>>>>>84 ubelong x \b, %u height
-# WindowX; Window upper left X coordinate like: 0 24 32 80 237 290 422 466 568 (lenna.dmp)
->>>>>88 ubelong !0 \b, x=%d
-# WindowY; Window upper left Y coordinate like: 0 8 18 26 60 73 107 (fig41.xwd) 128
->>>>>92 ubelong !0 \b, y=%d
-# WindowBorderWidth; Window border width; apparently pixmap_width=WindowWidth+2*WindowBorderWidth
-# like: 1 (fig41.xwd) 2 (maze.dmp) 3 (lenna.dmp mandrill.dmp)
->>>>>96 ubelong >0 \b, %u border
-# From: Joerg Jenderek
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/x/xdm-x10.trid.xml
-# Note: called "X-Windows Screen Dump (X10)" by TrID and
-# "X-Windows Screen Dump" version X10 by DROID via PUID x-fmt/300
-# verfied by XnView `nconvert -in xwd -info *`
-# HeaderSize is the size of the header in bytes; always 40 for X10 variant
-0 ubelong =0x000000028
-# FileVersion; always 6 for X10 variant
->4 ubelong =6
-# skip DROID x-fmt-300-signature-id-619.xdm by test existing border field
->>36 ubeshort x X-Window screen dump image data, version X10
-!:mime image/x-xwindowdump
-!:ext xwd
-# http://www.nationalarchives.gov.uk/pronom/fmt/401 NO example with xdm suffix FOUND!
-#!:ext xwd/xdm
-# PixmapWidth; pixmap width like: 127 1280
->>>20 ubelong x \b, %d
-# PixmapHeight; pixmap height like: 64 1024
->>>24 ubelong x \bx%d
-# DisplayPlanes; number of display planes like: 1 4 8
->>>12 ubelong x \bx%u
-# DisplayType; display type like: 1 3
-#>>>8 ubelong x \b, type %u
-# PixmapFormat; pixmap format like: 1~bitmap with two or more planes (ZPixmap) 0~single-plane bitmap (XYBitmap)
-#>>>16 ubelong x \b, %u format
-# WindowWidth; window width; probably PixmapWidth=WindowWidth+2*WindowBorderWidth
-#>>>28 ubeshort x \b, width %u
-# WindowHeight; window height; probably PixmapWidth=PixmapHeight+2*WindowBorderWidth
-#>>>30 ubeshort x \b, height %u
-# WindowX; window upper left X coordinate like: 0
->>>32 ubeshort !0 \b, x=%d
-# WindowY; window upper left Y coordinate like: 0
->>>34 ubeshort !0 \b, y=%d
-# WindowBorderWidth; window border width like: 0
->>>36 ubeshort !0 \b, %u border
-# WindowNumColors; Number of color entries in window like: 2 16 256
-#>>>38 ubeshort x \b, %u colors
-# if the image is a PseudoColor image, a color map immediately follows the header. X10COLORMAP[WindowNumColors];
-# EntryNumber; number of the color-map entry like: 0
-#>>>40 ubeshort x \b, colors #%u
-# Red; red-channel value
-#>>>42 ubeshort !0 \b, red %#x
-# Green; green-channel value
-#>>>44 ubeshort !0 \b, green %#x
-# Blue; blue-channel value
-#>>>46 ubeshort !0 \b, blue %#x
-# 2ND Entry like: 2
-#>>>48 ubeshort x \b, colors #%u
-
-# PDS - Planetary Data System
-# These files use Parameter Value Language in the header section.
-# Unfortunately, there is no certain magic, but the following
-# strings have been found to be most likely.
-0 string NJPL1I00 PDS (JPL) image data
-2 string NJPL1I PDS (JPL) image data
-0 string CCSD3ZF PDS (CCSD) image data
-2 string CCSD3Z PDS (CCSD) image data
-0 string PDS_ PDS image data
-0 string LBLSIZE= PDS (VICAR) image data
-
-# pM8x: ATARI STAD compressed bitmap format
-#
-# from Oskar Schirmer <schirmer@scara.com> Feb 2, 2001
-# p M 8 5/6 xx yy zz data...
-# Atari ST STAD bitmap is always 640x400, bytewise runlength compressed.
-# bytes either run horizontally (pM85) or vertically (pM86). yy is the
-# most frequent byte, xx and zz are runlength escape codes, where xx is
-# used for runs of yy.
-#
-0 string pM85 Atari ST STAD bitmap image data (hor)
->5 ubyte 0x00 (white background)
->5 ubyte 0xFF (black background)
-0 string pM86 Atari ST STAD bitmap image data (vert)
->5 ubyte 0x00 (white background)
->5 ubyte 0xFF (black background)
-
-# From: Alex Myczko <alex@aiei.ch>
-# https://www.atarimax.com/jindroush.atari.org/afmtatr.html
-0 uleshort 0x0296 Atari ATR image
-
-# URL: http://fileformats.archiveteam.org/wiki/DEGAS_image
-# Reference: https://wiki.multimedia.cx/index.php?title=Degas
-# From: Joerg Jenderek
-# http://mark0.net/download/triddefs_xml.7z/defs/b
-# bitmap-pi2-degas.trid.xml bitmap-pi3-degas.trid.xml
-# bitmap-pc1-degas.trid.xml bitmap-pc2-degas.trid.xml bitmap-pc3-degas.trid.xml
-# Note: verified by NetPBM `pi3topbm sigirl1.pi3 | file`
-# `deark -m degas -l -d2 ataribak.pi1`
-# XnView `nconvert -fullinfo *.p??`
-# DEGAS low-res uncompressed bitmap *.pi1
-0 beshort 0x0000
-# skip some ISO 9660 CD-ROM filesystems like plpbt.iso by test for 4 non black colors in palette entries
->2 quad !0
-# skip g3test.g3 by test for unused bits of 2nd color entry
->>4 ubeshort&0xF000 0
-#>>>0 beshort x 1ST_VALUE=%x
-#>>>-0 offset x FILE_SIZE=%lld
-# standard DEGAS low-res uncompressed bitmap *.pi1 with file size 32034
->>>-0 offset =32034
-#>>>>0 beshort x 1st_VALUE=%x
-# like: 8ball.pi1 teddy.pi1 sonic01.pi1
->>>>0 use degas-bitmap
-# about 61 DEGAS Elite low-res uncompressed bitmap *.pi1 with file size 32066
->>>-0 offset =32066
-# like: spider.pi1 pinkgirl.pi1 frog3.pi1
->>>>0 use degas-bitmap
-# about 55 DEGAS Elite low-res uncompressed bitmap *.pi1 with file size 32128
->>>-0 offset =32128
-# like: mountain.pi1 bigspid.pi1 alf33.pi1
->>>>0 use degas-bitmap
-# 1 DEGAS Elite low-res uncompressed bitmap *.pi1 with file size 44834
->>>-0 offset =44834
-# like: kenshin.pi1
->>>>0 use degas-bitmap
-# DEGAS mid-res uncompressed bitmap *.pi2 (strength=50) after GEM Images like:
-# BEETHVEN.IMG CHURCH.IMG GAMEOVR4.IMG TURKEY.IMG clinton.img
-0 beshort 0x0001
-#!:strength +0
-# skip many control files like gnucash-4.8.setup.exe.aria2 by test for non black in 4 palette entries
->2 quad !0
-# skip control file load-v0001.aria2 and many GEM Image data like
-# GAMEOVR4.IMG BEETHVEN.IMG CHURCH.IMG TURKEY.IMG clinton.img
-# by test for valid file sizes
-# standard DEGAS mid-res uncompressed bitmap *.pi2 with file size 32034
->>-0 offset =32034
-# (39/41) like: GEMINI03.PI2 ST_TOOLS.PI2 TBX_DEMO.PI2
->>>0 use degas-bitmap
-# few DEGAS Elite mid-res uncompressed bitmap *.pi2 with file size 32066
->>-0 offset =32066
-# (2/41) like: medres.pi2
->>>0 use degas-bitmap
-# DEGAS high-res uncompressed bitmap *.pi3
-0 beshort 0x0002
-# skip Intel ia64 COFF msvcrt.lib by test for unused bits of 1st atari color palette entry
->2 ubeshort&0xF000 0
-# skip few Adobe PhotoShop Brushes like Faux-Spitzen.abr by check
-# for invalid Adobe PhotoShop Brush UTF16-LE string length
->>19 ubyte =0
-# many like: 4th_ofj2.pi3 GEMINI03.PI3 PEOPLE18.PI3 POWERFIX.PI3 abydos.pi3 highres.pi3 sigirl1.pi3 vanna5.pi3
->>>0 use degas-bitmap
-# Adobe PhotoShop Brush UTF16-LE string length 15 "Gitter - klein " 8 "Kreis 1 "
->>19 ubyte !0
-#>>19 ubyte !0 \b, NOTE LENGTH %u
-#>>>21 lestring16 x \b, BRUSH NOTE "%s"
->>>(19.b*2) ubequad x
-# maybe last character of Adobe PhotoShop Brush UTF16-LE string and terminating nul char like
-# 006e0000 for n in "Faux-Spitzen.abr" 00310000 for 1 in "Verschiedene Spitzen.abr"
-# 00000000 "LEREDACT.PI3" 03730773 "TBX_DEMO.PI3"
-#>>>>&8 ubelong x \b, LAST CHAR+NIL %8.8x
->>>>&8 ubelong&0xff00ffFF !0
-# skip many Adobe Photoshop Color swatch (ANPA-Farben.aco TOYO-Farbsystem.aco) with invalid 3rd color entry (1319 2201 2206 21f5 2480 24db 25fd)
->>>>>6 ubeshort&0xF000 0
-# skip few Adobe Photoshop Color swatch (FOCOLTONE-Farben.aco "PANTONE process coated.aco") with invalid 4th color entry (ffff)
->>>>>>8 ubeshort&0xF000 0
-# many DEGAS bitmap like: ARABDEMO.PI3 ELMRSESN.PI3 GEMVIEW.PI3 LEREDACT.PI3 PICCOLO.PI3 REPRO_JR.PI3 ST_TOOLS.PI3 TBX_DEMO.PI3 evgem7.pi3
->>>>>>>0 use degas-bitmap
-# test for last character of Adobe PhotoShop Brush UTF16-LE string and terminating nul char
->>>>&8 ubelong&0xff00ffFF =0
-# select last DEGAS bitmaps by invalid last char of brush note like BASICNES.PI3 DB_HELP.PI3 DB_WRITR.PI3 LEREDACT.PI3
->>>>>&-4 ubelong&0x00FF0000 <0x00200000
->>>>>>0 use degas-bitmap
-# last character of Adobe PhotoShop Brush UTF16-LE note
-#>>>>>&-4 ubelong&0x00FF0000 >0x001F0000 \b, THAT IS ABR
-# DEGAS low-res compressed bitmap *.pc1 like: BATTLSHP.PC1 GNUCHESS.PC1 MEDUSABL.PC1 MOONLORD.PC1 WILDROSE.PC1
-0 beshort 0x8000
-# skip lif files handled via ./lif by test for unused bits of 1st palette entry
->2 ubeshort&0xF000 0
-# skip CRI ADX ADPCM audio (R04HT.adx R03T-15552.adx) with 44100 Hz misinterpreted as 5th color entry value AC44h
->>10 ubeshort&0xF000 0
-# skip few (fmt-840-signature-id-1195.adx fmt-840-signature-id-1199.adx) by test for 4 first non black colors in palette entries
->>>2 quad !0
->>>>0 use degas-bitmap
-# DEGAS mid-res compressed bitmap *.pc2 like: abydos.pc2 ARTIS3.PC2 SMTHDRAW.PC2 STAR_2K.PC2 TX2_DEMO.PC2
-0 beshort 0x8001
-# skip many (1274/1369) PostScript Type 1 font (DarkGardenMK.pfb coupbi.pfb MONOBOLD.PFB) with invalid 1st atari color palette entry 5506 5b06 6906 7906 7e06 fb15
->2 ubeshort&0xF000 0
-# skip some (95/1369) PostScript Type 1 font (fmt-525-signature-id-816.pfb LUXEMBRG.PFB) with invalid 3rd atari color palette entry 2521
->>6 ubeshort&0xF000 0
->>>0 use degas-bitmap
-# DEGAS high-res compressed bitmap *.pc3 like: abydos.pc3 COYOTE.PC3 ELEPHANT.PC3 TX2_DEMO.PC3 SMTHDRAW.PC3
-0 beshort 0x8002
-# skip some (36/212) Python Pickle (factor_cache.pickle environment.pickle) with invalid 1st atari color entry (2863 6363 7d71)
->2 ubeshort&0xF000 0
->>0 use degas-bitmap
-# display information of Atari DEGAS and DEGAS Elite bitmap images
-0 name degas-bitmap
->0 ubyte x Atari DEGAS
-#!:mime application/octet-stream
-!:mime image/x-atari-degas
-# compressed
->0 ubyte =0x80 Elite compressed
-# uncompressed
-#>0 ubyte =0x00 uncompressed
-#>0 ubyte =0x00 un.
->0 ubyte =0x00
-# check for existence of footer for DEGAS Elite images
->>32042 ubequad x Elite
->0 beshort 0x0000 bitmap
-!:ext pi1
->0 beshort 0x0001 bitmap
-!:ext pi2
->0 beshort 0x0002 bitmap
-# no example with SUH extension found
-#!:ext pi3/suh
-!:ext pi3
->0 beshort 0x8000 bitmap
-!:ext pc1
->0 beshort 0x8001 bitmap
-!:ext pc2
->0 beshort 0x8002 bitmap
-!:ext pc3
-# low resolution; 320x200, 16 colors
->1 ubyte =0 320 x 200 x 16
-# medium resolution; 640x200, 4 colors
->1 ubyte =1 640 x 200 x 4
-# high resolution; 640x400, 2 colors
->1 ubyte =2 640 x 400 x 2
-# http://fileformats.archiveteam.org/wiki/Atari_ST_color_palette
-# hardware_palette[16]; 9 bit ?????RRR?GGG?BBB; 12 bit ????RRRRGGGGBBBB for Atari STE
-# for Atari DEGAS apparently no Spectrum 512 Enhanced 15 bit palette RGB?RRRRGGGGBBBB
-# Red Green Blue unused bit ? often 0 but not bilboule.pi1; color_value (examples or numbers)
-# 1st color palette entry like: 0777 (61) 0fff (LEREDACT.PI3) 0fcf (devgem7.pi3) 0001 (9)
->2 ubeshort x \b, color palette %4.4x
-# 2nd palette entry like: 0000 (32) 0700 (38) 0f00 (LEREDACT.PI3 devgem7.pi3)
->4 ubeshort x %4.4x
-# 3rd palette entry
->6 ubeshort x %4.4x
-# 4th palette entry like: 0000 (72)
->8 ubeshort x %4.4x
-# 5th palette entry
->10 ubeshort x %4.4x
->2 ubeshort x ...
-# 6th palette entry
-#>12 ubeshort x %4.4x
-# 7th palette entry like: 0000 (16) 0001 (ELMRSESN.PI3 elmrsesn.pi3) 0070 (51) 00f0 (BASICNES.PI3 LEREDACT.PI3) 00f8 (devgem7.pi3)
-#>14 ubeshort x %4.4x
-# 8th palette entry
-#>16 ubeshort x %4.4x
-# 9 palette entry
-#>18 ubeshort x %4.4x
-# 10 palette entry
-#>20 ubeshort x %4.4x
-# 11 palette entry
-#>22 ubeshort x %4.4x
-# 12 palette entry
-#>24 ubeshort x %4.4x
-# 13 palette entry
-#>26 ubeshort x %4.4x
-# 14th palette entry
-#>28 ubeshort x %4.4x
-# 15th palette entry
-#>30 ubeshort x %4.4x
-# 16th palette entry
-#>32 ubeshort x %4.4x
-# data[16000] for uncompressed images; pixel data
-#>34 ubequad x \b, DATA %#16.16llx...
-# footer for Elite variant images
-# https://www.fileformat.info/format/atari/egff.htm
-# https://pulkomandy.tk/projects/GrafX2/wiki/Develop/FileFormats/Atari
-# left_color_animation[4]; like: 0000000000000000 0000000100020003 fffafff000000030 (bigspid.pi1)
-#>32034 ubequad !0 \b, color animations %16.16llx (left)
-# right_color_animation[4]; like: 0000000000000000 0000000100020003
-#>>32042 ubequad !0 %16.16llx (right)
-# channel_direction[4]; 0~left 1~none 2~right like: 0001000100010001 0002000000010001 (cycle2.pi1)
-# sometimes unexpected like: feaafc0000000000 (bigspid.pi1)
-#>32050 ubequad !0 \b, channel directions %16.16llx
-# channel_delay[4]; 128 - channel delay, timebase 1/60 s
-#>32058 ubequad !0 \b, channel delays %16.16llx
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/GED
-# https://recoil.sourceforge.net/formats.html#Atari-8-bit
-# Reference: https://sourceforge.net/projects/recoil/files/recoil/6.3.4/recoil-6.3.4.tar.gz
-# recoil-6.3.4/recoil.c
-# http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-ged.trid.xml
-# Note: called "Atari GED bitmap" by TrID; file size 11302
-# and verified by RECOIL graphic tool
-0 string \xFF\xFF0SO\x7F Atari GED bitmap, 160x200
-#!:mime application/octet-stream
-!:mime image/x-atari-ged
-!:ext ged
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/ImageLab/PrintTechnic
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-b_w.trid.xml
-# Note: called "ImageLab bitmap" by TrID
-# verfied by XnView `nconvert -fullinfo "MAEDCHEN.B&W"`
-0 string B&W256 ImageLab bitmap
-!:mime image/x-ilab
-# https://www.xnview.com/de/image_formats/
-# GRR: add char & inside parse_ext in ../../src/apprentice.c to avoid in file version 5.40 error like:
-# Magdir\images, 1090: Warning: EXTENSION type ` b_w/b&w' has bad char '&'
-!:ext b_w/b&w
-# Width
->6 ubeshort x \b, %u
-# Height
->8 ubeshort x x %u
-
-# XXX:
-# This is bad magic 0x5249 == 'RI' conflicts with RIFF and other
-# magic.
-# SGI RICE image file <mpruett@sgi.com>
-#0 ubeshort 0x5249 RICE image
-#>2 ubeshort x v%d
-#>4 ubeshort x (%d x
-#>6 ubeshort x %d)
-#>8 ubeshort 0 8 bit
-#>8 ubeshort 1 10 bit
-#>8 ubeshort 2 12 bit
-#>8 ubeshort 3 13 bit
-#>10 ubeshort 0 4:2:2
-#>10 ubeshort 1 4:2:2:4
-#>10 ubeshort 2 4:4:4
-#>10 ubeshort 3 4:4:4:4
-#>12 ubeshort 1 RGB
-#>12 ubeshort 2 CCIR601
-#>12 ubeshort 3 RP175
-#>12 ubeshort 4 YUV
-
-# PCX image files
-# From: Dan Fandrich <dan@coneharvesters.com>
-# updated by Joerg Jenderek at Feb 2013 by https://de.wikipedia.org/wiki/PCX
-# https://web.archive.org/web/20100206055706/http://www.qzx.com/pc-gpe/pcx.txt
-# GRR: original test was still too general as it catches xbase examples T5.DBT,T6.DBT with 0xa000000
-# test for bytes 0x0a,version byte (0,2,3,4,5),compression byte flag(0,1), bit depth (>0) of PCX or T5.DBT,T6.DBT
-0 ubelong&0xffF8fe00 0x0a000000
-# for PCX bit depth > 0
->3 ubyte >0
-# test for valid versions
->>1 ubyte <6
->>>1 ubyte !1 PCX
-!:mime image/x-pcx
-#!:mime image/pcx
->>>>1 ubyte 0 ver. 2.5 image data
->>>>1 ubyte 2 ver. 2.8 image data, with palette
->>>>1 ubyte 3 ver. 2.8 image data, without palette
->>>>1 ubyte 4 for Windows image data
->>>>1 ubyte 5 ver. 3.0 image data
->>>>4 uleshort x bounding box [%d,
->>>>6 uleshort x %d] -
->>>>8 uleshort x [%d,
->>>>10 uleshort x %d],
->>>>65 ubyte >1 %d planes each of
->>>>3 ubyte x %d-bit
->>>>68 ubyte 1 colour,
->>>>68 ubyte 2 grayscale,
-# this should not happen
->>>>68 default x image,
->>>>12 uleshort >0 %d x
->>>>>14 uleshort x %d dpi,
->>>>2 ubyte 0 uncompressed
->>>>2 ubyte 1 RLE compressed
-
-# Adobe Photoshop
-# From: Asbjoern Sloth Toennesen <asbjorn@lila.io>
-# URL: http://fileformats.archiveteam.org/wiki/PSD
-# Reference: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/
-# Note: verfied by XnView `nconvert -fullinfo *.psd *.psb *.pdd`
-# and ImageMagick `identify -verbose *.pdd`
-0 string 8BPS
-# skip DROID x-fmt-92-signature-id-277.psd by checking valid width
->18 ubelong >0 Adobe Photoshop
-!:mime image/vnd.adobe.photoshop
-!:apple ????8BPS
-# version: always equal to 1, but 2 for PSB
->>4 beshort 1
-# URL: http://fileformats.archiveteam.org/wiki/PhotoDeluxe
-# EXTRAS/PHOTOS/DEMOPIX/ORIGINAL.PDD
->>>34 search/0xC0d7 PHUT Image (PhotoDeluxe)
-!:ext pdd
->>>34 default x Image
-!:ext psd
-# URL: http://fileformats.archiveteam.org/wiki/PSB
->>4 beshort 2 Image (PSB)
-!:ext psb
-# width in pixels: 1-30000 1-300000 for PSB
->>18 belong x \b, %d x
->>14 belong x %d,
-# The color mode; 0~Bitmap 1~Grayscale 2~Indexed 3~RGB 4~CMYK 7~Multichannel 9~Duotone 9~Lab
->>24 beshort 0 bitmap
->>24 beshort 1 grayscale
-# the number of channels; range is 1 to 56
->>>12 beshort 2 with alpha
->>24 beshort 2 indexed
->>24 beshort 3 RGB
->>>12 beshort 4 \bA
->>24 beshort 4 CMYK
->>>12 beshort 5 \bA
->>24 beshort 7 multichannel
->>24 beshort 8 duotone
->>24 beshort 9 lab
->>12 beshort > 1
->>>12 beshort x \b, %dx
->>12 beshort 1 \b,
->>22 beshort x %d-bit channel
->>12 beshort > 1 \bs
-# 6 reserved bytes; must be zero, but spaces inside ImageMagick input.psd
-# https://download.imagemagick.org/ImageMagick/download/ImageMagick-7.0.11-11.zip
-# ImageMagick-7.0.11-11\PerlMagick\t\input.psd
->>6 bequad&0xFFffFFffFFff0000 !0 \b, at offset 6
->>>6 belong x 0x%8.8x
->>>6 beshort x \b%4.4x
-
-# From: Joerg Jenderek
-# URL: https://www.adobe.com/devnet-apps/photoshop/fileformatashtml/
-# http://fileformats.archiveteam.org/wiki/Photoshop
-# Reference: http://www.nomodes.com/aco.html
-# Note: registers as Photoshop.SwatchesFile for Photoshop.exe on Windows
-# check for valid versions like: 2 (newest) 1 (old) 0 (oldest no examples)
-0 ubeshort <3
-# skip few Atari DEGAS med-res bitmap (DIAGRAM1.PI2) and many ISO 9660 CD-ROM by check for invalid low color numbers (0)
->2 ubeshort >0
-# skip few Targa (bmpsuite-15col.tga rgb24_top_left_colormap.tga) by check for invalid high color space ID (F0 1D)
->>4 ubeshort <16
-# skip many (69/327) Targa image *.TGA by check of accessing near the ending of first color space section (size=nc*5*2)
->>>(2.S*10) ubelong x
-# RGB branch for Adobe Photoshop Color swatch
->>>>4 ubeshort =0
-# skip many (220/327) Targa by check of for invalid high RGB color z value (hexadecimal 2 3 2e03 4600 5e04 7502 8002 8b05 c700)
->>>>>12 ubeshort =0
-# RGB branch for Adobe Photoshop Color swatch for older versions
->>>>>>0 ubeshort <2
->>>>>>>0 use adobe-aco
-# RGB branch for Adobe Photoshop Color swatch for newer version 2
->>>>>>0 ubeshort =2
-# skip many (74/176) Atari DEGAS hi-res bitmap (*.PI3) by check for invalid low color name length (0)
->>>>>>>16 ubeshort >0
->>>>>>>>0 use adobe-aco
-# non RGB branch for Adobe Photoshop Color swatch
->>>>4 ubeshort !0
-# non RGB branch for Adobe Photoshop Color swatch for older versions
->>>>>0 ubeshort <2
-# skip many GEM Image (CHURCH.IMG TIGER.IMG) by check for invalid second high color space ID (55 114 143 157 256 288 450)
->>>>>>14 ubeshort <16
->>>>>>>0 use adobe-aco
-# non RGB branch for Adobe Photoshop Color swatch for newer version 2
->>>>>0 ubeshort =2
-# skip few Atari DEGAS hi-res bitmap (pal1wb-blue.pi3) and few ABR by check for invalid "high" nil bytes (7) before color name length
->>>>>>14 ubeshort =0
->>>>>>>0 use adobe-aco
-# display Adobe Photoshop Color swatch file information (version, number of colors, color spaces, coordinates, names)
-0 name adobe-aco
->0 ubeshort x Adobe Photoshop Color swatch, version %u
-#!:mime application/octet-stream
-!:mime application/x-adobe-aco
-!:apple ????8BCO
-!:ext aco
->0 ubeshort <2
->>(2.S*10) ubelong x
-# version 2 section after version 1 section
->>>&0 ubeshort 2 and 2
-# nc; number of colors like: 20 50 86 88 126 204 300 1050 1137 1280 2092 3010 4096
->2 ubeshort x \b, %u colors
-# maybe last 4 bytes of first section (probably y z color value) like: 0 0x66660000 0xfe700000 0xffff0000
-#>(2.S*10) ubelong x 1ST_SECTION_END=%#8.8x
->0 ubeshort <2 \b; 1st
-# first older Adobe Photoshop Color entry
->>4 use aco-color
->>>2 ubeshort >1 \b; 2nd
-# second older Adobe Photoshop Color entry
->>>>14 use aco-color
->0 ubeshort =2 \b; 1st
-# first new Adobe Photoshop Color entry
->>4 use aco-color-v2
->>>2 ubeshort >1 \b; 2nd
-# jump first color name length words
->>>>(16.S*2) ubequad x
-# second new Adobe Photoshop Color entry
->>>>>&10 use aco-color-v2
-# display Adobe Photoshop Color entry (color space, color coordinates)
-0 name aco-color
-# each color spec entry occupies five words
-# color space: 0~RGB 1~HSB 2~CMYK 3~Pantone 4~Focoltone 5~Trumatch 6~Toyo 7~Lab 8~Grayscale 9?~wideCMYK 10~HKS ...
-#>0 ubeshort x COLOR_ENTRY
->0 ubeshort 0 RGB
->0 ubeshort 1 HSB
->0 ubeshort 2 CMYK
->0 ubeshort 3 Pantone
->0 ubeshort 4 Focoltone
->0 ubeshort 5 Trumatch
->0 ubeshort 6 Toyo
->0 ubeshort 7 Lab
->0 ubeshort 8 Grayscale
->0 ubeshort 9 wide CMYK
->0 ubeshort 10 HKS
-# unofficial
-# >0 ubeshort 12 foo
-# >0 ubeshort 13 bar
-# >0 ubeshort 14 FOO
-# >0 ubeshort 15 BAR
->0 ubeshort x space (%u)
-# color coordinate w
->2 ubeshort x \b, w %#x
-# color coordinate x
->4 ubeshort x \b, x %#x
-# color coordinate y
->6 ubeshort x \b, y %#x
-# color coordinate z; zero for RGB space
->8 ubeshort x \b, z %#x
-# display Adobe Photoshop Color entry version 2 (color space, color coordinates names)
-0 name aco-color-v2
->0 use aco-color
-#>10 ubeshort x \b, NUL_BYTES %#x
-# color name length plus one (len+1) like: 7 8 9 13 14 15 16 17 22 26
-#>>12 ubeshort x \b, LENGTH %u
->>12 ubeshort-1 x \b, %u chars
-# len words; UTF-16 representation of the color name like: "DIC 1s" "PANTONE Process Yellow PC"
->>14 bestring16 x "%s"
-# followed by nil word
-
-# XV thumbnail indicator (ThMO)
-# URL: https://en.wikipedia.org/wiki/Xv_(software)
-# Reference: http://fileformats.archiveteam.org/wiki/XV_thumbnail
-# Update: Joerg Jenderek
-0 string P7\ 332 XV thumbnail image data
-#0 string P7\ 332 XV "thumbnail file" (icon) data
-!:mime image/x-xv-thumbnail
-# thumbnail .xvpic/foo.bar for graphic foo.bar
-!:ext p7/gif/tif/xpm/jpg
-
-# NITF is defined by United States MIL-STD-2500A
-0 string NITF National Imagery Transmission Format
->25 string >\0 dated %.14s
-
-# GEM Image: Version 1, Headerlen 8 (Wolfram Kleff)
-# Format variations from: Bernd Nuernberger <bernd.nuernberger@web.de>
-# Update: Joerg Jenderek
-# See http://fileformats.archiveteam.org/wiki/GEM_Raster
-# For variations, also see:
-# https://www.seasip.info/Gem/ff_img.html (Ventura)
-# http://www.atari-wiki.com/?title=IMG_file (XIMG, STTT)
-# http://www.fileformat.info/format/gemraster/spec/index.htm (XIMG, STTT)
-# http://sylvana.net/1stguide/1STGUIDE.ENG (TIMG)
-0 beshort 0x0001
-# header_size
->2 beshort 0x0008
->>0 use gem_info
->2 beshort 0x0009
->>0 use gem_info
-# no example for NOSIG
->2 beshort 24
->>0 use gem_info
-# no example for HYPERPAINT
->2 beshort 25
->>0 use gem_info
-16 string XIMG\0
->0 use gem_info
-# no example
-16 string STTT\0\x10
->0 use gem_info
-# no example or description
-16 string TIMG\0
->0 use gem_info
-
-0 name gem_info
-# version is 2 for some XIMG and 1 for all others
->0 ubeshort <0x0003 GEM
-# https://www.snowstone.org.uk/riscos/mimeman/mimemap.txt
-!:mime image/x-gem
-# header_size 24 25 27 59 779 words for colored bitmaps
->>2 ubeshort >9
->>>16 string STTT\0\x10 STTT
->>>16 string TIMG\0 TIMG
-# HYPERPAINT or NOSIG variant
->>>16 string \0\x80
->>>>2 ubeshort =24 NOSIG
->>>>2 ubeshort !24 HYPERPAINT
-# NOSIG or XIMG variant
->>>16 default x
->>>>16 string !XIMG\0 NOSIG
->>16 string =XIMG\0 XIMG Image data
-!:ext img/ximg
-# to avoid Warning: Current entry does not yet have a description for adding a EXTENSION type
->>16 string !XIMG\0 Image data
-!:ext img
-# header_size is 9 for Ventura files and 8 for other GEM Paint files
->>2 ubeshort 9 (Ventura)
-#>>2 ubeshort 8 (Paint)
->>12 ubeshort x %d x
->>14 ubeshort x %d,
-# 1 4 8
->>4 ubeshort x %d planes,
-# in tenths of a millimetre
->>8 ubeshort x %d x
->>10 ubeshort x %d pixelsize
-# pattern_size 1-8. 2 for GEM Paint
->>6 ubeshort !2 \b, pattern size %d
-
-# GEM Metafile (Wolfram Kleff)
-0 ulelong 0x0018FFFF GEM Metafile data
->4 uleshort x version %d
-
-#
-# SMJPEG. A custom Motion JPEG format used by Loki Entertainment
-# Software Torbjorn Andersson <d91tan@Update.UU.SE>.
-#
-0 string \0\nSMJPEG SMJPEG
->8 ubelong x %d.x data
-# According to the specification you could find any number of _TXT
-# headers here, but I can't think of any way of handling that. None of
-# the SMJPEG files I tried it on used this feature. Even if such a
-# file is encountered the output should still be reasonable.
->16 string _SND \b,
->>24 ubeshort >0 %d Hz
->>26 ubyte 8 8-bit
->>26 ubyte 16 16-bit
->>28 string NONE uncompressed
-# >>28 string APCM ADPCM compressed
->>27 ubyte 1 mono
->>28 ubyte 2 stereo
-# Help! Isn't there any way to avoid writing this part twice?
-# Yes, use a name/use
->>32 string _VID \b,
-# >>>48 string JFIF JPEG
->>>40 ubelong >0 %d frames
->>>44 ubeshort >0 (%d x
->>>46 ubeshort >0 %d)
->16 string _VID \b,
-# >>32 string JFIF JPEG
->>24 ubelong >0 %d frames
->>28 ubeshort >0 (%d x
->>30 ubeshort >0 %d)
-
-0 string Paint\ Shop\ Pro\ Image\ File Paint Shop Pro Image File
-
-# taken from fkiss: (<yav@mte.biglobe.ne.jp> ?)
-0 string KiSS KISS/GS
->4 ubyte 16 color
->>5 ubyte x %d bit
->>8 uleshort x %d colors
->>10 uleshort x %d groups
->4 ubyte 32 cell
->>5 ubyte x %d bit
->>8 uleshort x %d x
->>10 uleshort x %d
->>12 uleshort x +%d
->>14 uleshort x +%d
-
-# Webshots (www.webshots.com), by John Harrison
-0 string C\253\221g\230\0\0\0 Webshots Desktop .wbz file
-
-# Hercules DASD image files
-# From Jan Jaeger <jj@septa.nl> and Jay Maynard <jaymaynard@gmail.com>
-0 string CKD_P370 Hercules CKD DASD image file
->8 lelong x \b, %d heads per cylinder
->12 lelong x \b, track size %d bytes
->16 byte x \b, device type 33%2.2X
-
-0 string CKD_C370 Hercules compressed CKD DASD image file
->8 lelong x \b, %d heads per cylinder
->12 lelong x \b, track size %d bytes
->16 byte x \b, device type 33%2.2X
->552 lelong x \b, %d total cylinders
->>557 byte 0 \b, no compression
->>557 byte 1 \b, ZLIB compression
->>557 byte 2 \b, BZ2 compression
-
-0 string CKD_S370 Hercules CKD DASD shadow file
->8 lelong x \b, %d heads per cylinder
->12 lelong x \b, track size %d bytes
->16 byte x \b, device type 33%2.2X
-
-0 string CKD_P064 Hercules CKD64 DASD image file
->8 lelong x \b, %d heads per cylinder
->12 lelong x \b, track size %d bytes
->16 byte x \b, device type 33%2.2X
-
-0 string CKD_C064 Hercules compressed CKD64 DASD image file
->8 lelong x \b, %d heads per cylinder
->12 lelong x \b, track size %d bytes
->16 byte x \b, device type 33%2.2X
->524 lelong x \b, %d total cylinders
->>585 byte 0 \b, no compression
->>585 byte 1 \b, ZLIB compression
->>585 byte 2 \b, BZ2 compression
-
-0 string CKD_S064 Hercules CKD64 DASD shadow file
->8 lelong x \b, %d heads per cylinder
->12 lelong x \b, track size %d bytes
->16 byte x \b, device type 33%2.2X
-
-# Squeak images and programs - etoffi@softhome.net
-0 string \146\031\0\0 Squeak image data
-0 search/1 'From\040Squeak Squeak program text
-
-# partimage: file(1) magic for PartImage files (experimental, incomplete)
-# Author: Hans-Joachim Baader <hjb@pro-linux.de>
-0 string PaRtImAgE-VoLuMe PartImage
->0x0020 string 0.6.1 file version %s
->>0x0060 ulelong >-1 volume %d
-#>>0x0064 8 byte identifier
-#>>0x007c reserved
->>0x0200 string >\0 type %s
->>0x1400 string >\0 device %s,
->>0x1600 string >\0 original filename %s,
-# Some fields omitted
->>0x2744 ulelong 0 not compressed
->>0x2744 ulelong 1 gzip compressed
->>0x2744 ulelong 2 bzip2 compressed
->>0x2744 ulelong >2 compressed with unknown algorithm
->0x0020 string >0.6.1 file version %s
->0x0020 string <0.6.1 file version %s
-
-# DCX is multi-page PCX, using a simple header of up to 1024
-# offsets for the respective PCX components.
-# From: Joerg Wunsch <joerg_wunsch@uriah.heep.sax.de>
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/DCX
-0 ulelong 987654321 DCX multi-page
-# http://www.nationalarchives.gov.uk/pronom/x-fmt/348
-!:mime image/x-dcx
-!:ext dcx
-# The first file offset usually starts at file offset 0x1004
-# print 1 space after 0x100? offset and then handles PCX images by ./images
->4 ulelong x \b, at %#x
->(4.l) indirect x
-# possible 2nd PCX image
-#>8 ulelong !0 \b, at %#x
-#>>(8.l) indirect x
-# possible 3rd PCX image
-#>12 ulelong !0 \b, at %#x
-#>>(12.l) indirect x
-
-# Simon Walton <simonw@matteworld.com>
-# Kodak Cineon format for scanned negatives
-# http://www.kodak.com/US/en/motion/support/dlad/
-0 ulelong 0xd75f2a80 Cineon image data
->200 ubelong >0 \b, %d x
->204 ubelong >0 %d
-
-
-# Bio-Rad .PIC is an image format used by microscope control systems
-# and related image processing software used by biologists.
-# From: Vebjorn Ljosa <vebjorn@ljosa.com>
-# BOOL values are two-byte integers; use them to rule out false positives.
-# https://web.archive.org/web/20050317223257/www.cs.ubc.ca/spider/ladic/text/biorad.txt
-# Samples: https://www.loci.wisc.edu/software/sample-data
-14 uleshort <2
->62 uleshort <2
->>54 uleshort 12345 Bio-Rad .PIC Image File
->>>0 uleshort >0 %d x
->>>2 uleshort >0 %d,
->>>4 uleshort =1 1 image in file
->>>4 uleshort >1 %d images in file
-
-# From Jan "Yenya" Kasprzak <kas@fi.muni.cz>
-# The description of *.mrw format can be found at
-# http://www.dalibor.cz/minolta/raw_file_format.htm
-0 string \000MRM Minolta Dimage camera raw image data
-
-# Summary: DjVu image / document
-# Extension: .djvu
-# Reference: http://djvu.org/docs/DjVu3Spec.djvu
-# Submitted by: Stephane Loeuillet <stephane.loeuillet@tiscali.fr>
-# Modified by (1): Abel Cheung <abelcheung@gmail.com>
-0 string AT&TFORM
->12 string DJVM DjVu multiple page document
-!:mime image/vnd.djvu
->12 string DJVU DjVu image or single page document
-!:mime image/vnd.djvu
->12 string DJVI DjVu shared document
-!:mime image/vnd.djvu
->12 string THUM DjVu page thumbnails
-!:mime image/vnd.djvu
-
-# Originally by Marc Espie
-# Modified by Robert Minsk <robertminsk at yahoo.com>
-# https://www.openexr.com/openexrfilelayout.pdf
-0 ulelong 20000630 OpenEXR image data,
-!:mime image/x-exr
->4 ulelong&0x000000ff x version %d,
->4 ulelong ^0x00000200 storage: scanline
->4 ulelong &0x00000200 storage: tiled
->8 search/0x1000 compression\0 \b, compression:
->>&16 ubyte 0 none
->>&16 ubyte 1 rle
->>&16 ubyte 2 zips
->>&16 ubyte 3 zip
->>&16 ubyte 4 piz
->>&16 ubyte 5 pxr24
->>&16 ubyte 6 b44
->>&16 ubyte 7 b44a
->>&16 ubyte 8 dwaa
->>&16 ubyte 9 dwab
->>&16 ubyte >9 unknown
->8 search/0x1000 dataWindow\0 \b, dataWindow:
->>&10 ulelong x (%d
->>&14 ulelong x %d)-
->>&18 ulelong x \b(%d
->>&22 ulelong x %d)
->8 search/0x1000 displayWindow\0 \b, displayWindow:
->>&10 ulelong x (%d
->>&14 ulelong x %d)-
->>&18 ulelong x \b(%d
->>&22 ulelong x %d)
->8 search/0x1000 lineOrder\0 \b, lineOrder:
->>&14 ubyte 0 increasing y
->>&14 ubyte 1 decreasing y
->>&14 ubyte 2 random y
->>&14 ubyte >2 unknown
-
-# SMPTE Digital Picture Exchange Format, SMPTE DPX
-#
-# ANSI/SMPTE 268M-1994, SMPTE Standard for File Format for Digital
-# Moving-Picture Exchange (DPX), v1.0, 18 February 1994
-# Robert Minsk <robertminsk at yahoo.com>
-# Modified by Harry Mallon <hjmallon at gmail.com>
-0 string SDPX DPX image data, big-endian,
-!:mime image/x-dpx
->0 use dpx_info
-0 string XPDS DPX image data, little-endian,
-!:mime image/x-dpx
->0 use \^dpx_info
-
-0 name dpx_info
->768 ubeshort <4
->>772 ubelong x %dx
->>776 ubelong x \b%d,
->768 ubeshort >3
->>776 ubelong x %dx
->>772 ubelong x \b%d,
->768 ubeshort 0 left to right/top to bottom
->768 ubeshort 1 right to left/top to bottom
->768 ubeshort 2 left to right/bottom to top
->768 ubeshort 3 right to left/bottom to top
->768 ubeshort 4 top to bottom/left to right
->768 ubeshort 5 top to bottom/right to left
->768 ubeshort 6 bottom to top/left to right
->768 ubeshort 7 bottom to top/right to left
-
-# From: Tom Hilinski <tom.hilinski@comcast.net>
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/NetCDF
-# http://fileformats.archiveteam.org/wiki/NetCDF
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/n/netcdf.trid.xml
-# https://www.loc.gov/preservation/digital/formats/fdd/fdd000330.shtml
-# Note: called "NetCDF Network Common Data Form" by TrID and
-# "netCDF-3 Classic" by DROID via PUID fmt/282
-# https://www.unidata.ucar.edu/packages/netcdf/
-0 string CDF\001
-# skip DROID fmt-282-signature-id-298.nc by test for more content bytes
->3 uleshort >0 NetCDF Data Format data
-#!:mime application/netcdf
-# https://reposcope.com/mimetype/application/x-netcdf
-!:mime application/x-netcdf
-!:ext nc
-# https://fileinfo.com/extension/cdf
-# https://www.file-extensions.org/cdf-file-extension-unidata-network-common-data-form
-# in 1994 changed from CDF to NC file extension avoid a clash with other file formats
-#!:ext nc/cdf
-# 64-bit offset netcdf Classic https://www.unidata.ucar.edu/software/netcdf/docs/file_format_specifications
-# Note: called "netCDF-3 64-bit" by DROID via PUID fmt/283
-0 string CDF\002
-# skip DROID fmt-283-signature-id-299.nc by test for more content bytes
->3 uleshort >0 NetCDF Data Format data (64-bit offset)
-#!:mime application/netcdf
-!:mime application/x-netcdf
-!:ext nc
-
-# From: Michael Liu
-# https://en.wikipedia.org/wiki/Common_Data_Format
-0 ubelong 0xCDF30001 Common Data Format (Version 3 or later) data
-!:mime application/x-cdf
-
-0 ubelong 0xCDF26002 Common Data Format (Version 2.6 or 2.7) data
-!:mime application/x-cdf
-
-0 ubelong 0x0000FFFF Common Data Format (Version 2.5 or earlier) data
-!:mime application/x-cdf
-
-# Hierarchical Data Format, used to facilitate scientific data exchange
-# specifications at http://hdf.ncsa.uiuc.edu/
-# URL: http://fileformats.archiveteam.org/wiki/HDF
-# https://en.wikipedia.org/wiki/Hierarchical_Data_Format
-# Reference: https://portal.hdfgroup.org/download/attachments/52627880/HDF5_File_Format_Specification_Version-3.0.pdf
-0 ubelong 0x0e031301 Hierarchical Data Format (version 4) data
-!:mime application/x-hdf
-!:ext hdf/hdf4/h4
-0 string \211HDF\r\n\032\n Hierarchical Data Format (version 5) data
-#!:mime application/x-hdf
-!:mime application/x-hdf5
-!:ext h5/hdf5/hdf/he5
-512 string \211HDF\r\n\032\n
-# skip Matlab v5 mat-file testhdf5_7.4_GLNX86.mat handled by ./mathematica
->0 string !MATLAB Hierarchical Data Format (version 5) with 512 bytes user block
-#!:mime application/x-hdf
-!:mime application/x-hdf5
-!:ext h5/hdf5/hdf/he5
-1024 string \211HDF\r\n\032\n Hierarchical Data Format (version 5) with 1k user block
-#!:mime application/x-hdf
-!:mime application/x-hdf5
-!:ext h5/hdf5/hdf/he5
-2048 string \211HDF\r\n\032\n Hierarchical Data Format (version 5) with 2k user block
-#!:mime application/x-hdf
-!:mime application/x-hdf5
-!:ext h5/hdf5/hdf/he5
-4096 string \211HDF\r\n\032\n Hierarchical Data Format (version 5) with 4k user block
-#!:mime application/x-hdf
-!:mime application/x-hdf5
-!:ext h5/hdf5/hdf/he5
-
-# From: Tobias Burnus <burnus@net-b.de>
-# Xara (for a while: Corel Xara) is a graphic package, see
-# http://www.xara.com/ for Windows and as GPL application for Linux
-0 string XARA\243\243 Xara graphics file
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Corel_Gallery
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bmf-corel.trid.xml
-# Note: called "Corel Binary Material Format" by TrID and
-# "Corel Flow" by XnView
-0 string @CorelBMF\n\rCorel\040Corporation Corel GALLERY Clipart
-!:mime image/x-corel-bmf
-!:ext bmf
-
-# https://www.cartesianinc.com/Tech/
-# Reference: http://fileformats.archiveteam.org/wiki/Cartesian_Perceptual_Compression
-0 string CPC\262 Cartesian Perceptual Compression image
-!:mime image/x-cpi
-!:ext cpi/cpc
-
-# From Albert Cahalan <acahalan@gmail.com>
-# puredigital used it for the CVS disposable camcorder
-#8 lelong 4 ZBM bitmap image data
-#>4 uleshort x %u x
-#>6 uleshort x %u
-
-# From Albert Cahalan <acahalan@gmail.com>
-# uncompressed 5:6:5 HighColor image for OLPC XO firmware icons
-0 string C565 OLPC firmware icon image data
->4 uleshort x %u x
->6 uleshort x %u
-
-# Applied Images - Image files from Cytovision
-# Gustavo Junior Alves <gjalves@gjalves.com.br>
-0 string \xce\xda\xde\xfa Cytovision Metaphases file
-0 string \xed\xad\xef\xac Cytovision Karyotype file
-0 string \x0b\x00\x03\x00 Cytovision FISH Probe file
-0 string \xed\xfe\xda\xbe Cytovision FLEX file
-0 string \xed\xab\xed\xfe Cytovision FLEX file
-0 string \xad\xfd\xea\xad Cytovision RATS file
-
-# Wavelet Scalar Quantization format used in gray-scale fingerprint images
-# From Tano M Fotang <mfotang@quanteq.com>
-0 string \xff\xa0\xff\xa8\x00 Wavelet Scalar Quantization image data
-
-# Type: PCO B16 image files
-# URL: http://www.pco.de/fileadmin/user_upload/db/download/MA_CWDCOPIE_0412b.pdf
-# From: Florian Philipp <florian.philipp@binarywings.net>
-# Extension: .b16
-# Description: Pixel image format produced by PCO Camware, typically used
-# together with PCO cameras.
-# Note: Different versions exist for e.g. 8 bit and 16 bit images.
-# Documentation is incomplete.
-0 string/b PCO- PCO B16 image data
->12 ulelong x \b, %dx
->16 ulelong x \b%d
->20 ulelong 0 \b, short header
->20 ulelong -1 \b, extended header
->>24 ulelong 0 \b, grayscale
->>>36 ulelong 0 linear LUT
->>>36 ulelong 1 logarithmic LUT
->>>28 ulelong x [%d
->>>32 ulelong x \b,%d]
->>24 ulelong 1 \b, color
->>>64 ulelong 0 linear LUT
->>>64 ulelong 1 logarithmic LUT
->>>40 ulelong x r[%d
->>>44 ulelong x \b,%d]
->>>48 ulelong x g[%d
->>>52 ulelong x \b,%d]
->>>56 ulelong x b[%d
->>>60 ulelong x \b,%d]
-
-# Polar Monitor Bitmap (.pmb) used as logo for Polar Electro watches
-# From: Markus Heidelberg <markus.heidelberg at web.de>
-0 string/t [BitmapInfo2] Polar Monitor Bitmap text
-!:mime image/x-polar-monitor-bitmap
-
-# From: Rick Richardson <rickrich@gmail.com>
-# updated by: Joerg Jenderek
-# URL: http://techmods.net/nuvi/
-0 string GARMIN\ BITMAP\ 01 Garmin Bitmap file
-# extension is also used for
-# Sony SRF raw image (image/x-sony-srf)
-# SRF map
-# Terragen Surface Map (https://www.planetside.co.uk/terragen)
-# FileLocator Pro search criteria file (https://www.mythicsoft.com/filelocatorpro)
-!:ext srf
-#!:mime image/x-garmin-srf
-# version 1.00,2.00,2.10,2.40,2.50
->0x2f string >0 \b, version %4.4s
-# width (2880,2881,3240)
->0x55 uleshort >0 \b, %dx
-# height (80,90)
->>0x53 uleshort x \b%d
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Imageiio/imaginfo_(Ulead)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pe3.trid.xml
-# Note: called "Ulead Imageiio/Imaginfo thumbnail" by TrID
-0 string IIO1$ Ulead Photo Explorer 3
-#!:mime application/octet-stream
-!:mime image/x-ulead-pe3
-# IMAGEIIO.PE3
-!:ext pe3
-# look for DOS/Windows drive letter
->5 search/192/s :\\
-# directory or full name of corresponding imaginfo.pe3 like: "T:\SAMPLES\TEXTURES\SKY_SNOW\IIOE371.TMP "S:\PI3\PIMPACT3\PROGRAMS\PATTERNS\imaginfo.pe3"
->>&-1 string x "%s"
-# look for DOS/Windows network path if no drive letter part
->5 default x
->>5 search/192/s \x5c\x5c
-# full name of corresponding imaginfo.pe3 like: "\\Lionking\upi\SAMPLES\IMAGES\ANIMALS\imaginfo.pe3"
->>>&0 string x "%s"
-# Type: Ulead Photo Explorer5 (.pe5)
-# URL: http://fileformats.archiveteam.org/wiki/Imageiio/imaginfo_(Ulead)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pe4.trid.xml
-# From: Simon Horman <horms@debian.org>
-# Update: Joerg Jenderek
-# Note: some called "Ulead Imageiio/Imaginfo thumbnail" by TrID
-# and used in various Ulead applications
-0 string IIO2H Ulead Photo Explorer 4 or 5
-#!:mime application/octet-stream
-!:mime image/x-ulead-pe4
-# IMAGEIIO.PE4
-!:ext pe4/pe5
-# look in most samples for JPEG signature like: SAMPLES/IMAGES/SCENES/IMAGINFO.PE4
->0x4c2 search/0xE02/s JFIF with JPEG image data
->>&-6 use jpeg
-# near the end list of image names like: Img0001.pcd 1116012L.JPG NCARD4.TPL
-#
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pe3-imaginfo.trid.xml
-11 string \001\0\0\0\0
-# check for version 3 part
->19 string \0\001\0\003\0
->>0 use ulead-imaginfo
-# From: Joerg Jenderek
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pe4-imaginfo.trid.xml
-11 string \001\0\0\0\0
-# check for version 4 part
->19 string \0\0\0\004\0
->>0 use ulead-imaginfo
-# display information about Ulead Imaginfo thumbnail (version, directory, image extension)
-0 name ulead-imaginfo
->22 ubyte x Ulead Imaginfo thumbnail
-#!:mime application/octet-stream
-!:mime image/x-ulead-imaginfo
->22 ubyte =3 \b, version 3
-# IMAGINFO.PE3
-!:ext pe3
->22 ubyte =4 \b, version 4
-# IMAGINFO.PE4
-!:ext pe4
-# MAYBE ALSO VERSION 5 ?
-#>22 ubyte =5 \b, version 5
-#!:ext pe5
->22 ubyte x
-# look for DOS/Windows driver letter
->>4 search/192/s :\x5c
-# skip f:\Programme\iPhoto Plus 4\Template\Business Cards\IMAGINFO.PE4
-# by looking for driver letter in range A-Z
->>>&-1 ubyte >0x40
-# directory path like: "E:\iPE\CDSample\Images\Scenes" "D:\XmasCard\Samples" "C:\TEMP\PLANTS"
->>>>&-5 pstring/l >0 \b, "%s"
-# look for DOS/Windows network path if no valid drive letter part
->>>&-1 default x
->>>>4 search/192/s \x5c\x5c
-# directory path like: "\\FSX\SYS\OPPS\IPE.ENG\TEMPLATE\BUSINESS" "\\Lionking\upi\SAMPLES\IMAGES\ANIMALS"
->>>>>&-4 pstring/l >0 \b, "%s"
-# look for DOS/Windows network path if no drive letter part
->>4 default x
->>>4 search/192/s \x5c\x5c
-# directory path like: "\\FSX\SYS\opps\ipe.eng\samples" "\\DANIEL\IPE_CD\IPE.ITA"
->>>>&-4 pstring/l >0 \b, "%s"
-# look for point character inside image names
->56 search/38/s .
-# image name extension like: bmp jpg pcd tpl
->>&1 string x with %-.3s images
-# Summary: Ulead Pattern image (Corel Corporation)
-# URL: https://en.wikipedia.org/wiki/Ulead_Systems
-# https://www.file-extensions.org/pst-file-extension-ulead-pattern-image-format
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pst-ulead.trid.xml
-# From: Joerg Jenderek
-# Note: used also by CorelDraw Essentials 3 version 13.0.0.800
-# there seems to exist other versions
-0 ubelong 0xFFFF0100
->8 search/21 PresetInfo Ulead pattern image
-#!:mime application/octet-stream
-!:mime image/x-ulead-pst
-!:ext pst
-# string length like: 16 18 19 21 24
-#>>4 uleshort x n=%u
-# like: BlendPresetInfo DropShadowPresetInfo FileNewPresetInfo VectorExtrudePresetInfo EnvelopePresetInfo ContourPresetInfo DistortionPresetInfo
->>4 pstring/h x "%s"
-
-# Type: X11 cursor
-# URL: http://webcvs.freedesktop.org/mime/shared-mime-info/freedesktop.org.xml.in?view=markup
-# From: Mathias Brodala <info@noctus.net>
-0 string Xcur X11 cursor
-
-# Type: Olympus ORF raw images.
-# URL: https://libopenraw.freedesktop.org/wiki/Olympus_ORF
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-0 string MMOR Olympus ORF raw image data, big-endian
-!:mime image/x-olympus-orf
-0 string IIRO Olympus ORF raw image data, little-endian
-!:mime image/x-olympus-orf
-0 string IIRS Olympus ORF raw image data, little-endian
-!:mime image/x-olympus-orf
-
-# Type: files used in modern AVCHD camcoders to store clip information
-# Extension: .cpi
-# From: Alexander Danilov <alexander.a.danilov@gmail.com>
-0 string HDMV0100 AVCHD Clip Information
-
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# URL: http://local.wasp.uwa.edu.au/~pbourke/dataformats/pic/
-# Radiance HDR; usually has .pic or .hdr extension.
-0 string #?RADIANCE\n Radiance HDR image data
-!:mime image/vnd.radiance
-
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# URL: https://www.mpi-inf.mpg.de/resources/pfstools/pfs_format_spec.pdf
-# Used by the pfstools packages. The regex matches for the image size could
-# probably use some work. The MIME type is made up; if there's one in
-# actual common use, it should replace the one below.
-0 string PFS1\x0a PFS HDR image data
-#!mime image/x-pfs
->1 regex [0-9]*\ \b, %s
->>1 regex \ [0-9]{4} \bx%s
-
-# Type: Foveon X3F
-# URL: https://www.photofo.com/downloads/x3f-raw-format.pdf
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# Note that the MIME type isn't defined anywhere that I can find; if
-# there's a canonical type for this format, it should replace this one.
-0 string FOVb Foveon X3F raw image data
-!:mime image/x-x3f
->6 uleshort x \b, version %d.
->4 uleshort x \b%d
->28 ulelong x \b, %dx
->32 ulelong x \b%d
-
-# Paint.NET file
-# From Adam Buchbinder <adam.buchbinder@gmail.com>
-0 string PDN3 Paint.NET image data
-!:mime image/x-paintnet
-
-# Not really an image.
-# From: "Tano M. Fotang" <mfotang@quanteq.com>
-0 string \x46\x4d\x52\x00 ISO/IEC 19794-2 Format Minutiae Record (FMR)
-
-# doc: https://www.shikino.co.jp/eng/products/images/FLOWER.jpg.zip
-# example: https://www.shikino.co.jp/eng/products/images/FLOWER.wdp.zip
-90 ubequad 0x574D50484F544F00 JPEG-XR Image
->98 ubyte&0x08 =0x08 \b, hard tiling
->99 ubyte&0x80 =0x80 \b, tiling present
->99 ubyte&0x40 =0x40 \b, codestream present
->99 ubyte&0x38 x \b, spatial xform=
->99 ubyte&0x38 0x00 \bTL
->99 ubyte&0x38 0x08 \bBL
->99 ubyte&0x38 0x10 \bTR
->99 ubyte&0x38 0x18 \bBR
->99 ubyte&0x38 0x20 \bBT
->99 ubyte&0x38 0x28 \bRB
->99 ubyte&0x38 0x30 \bLT
->99 ubyte&0x38 0x38 \bLB
->100 ubyte&0x80 =0x80 \b, short header
->>102 ubeshort+1 x \b, %d
->>104 ubeshort+1 x \bx%d
->100 ubyte&0x80 =0x00 \b, long header
->>102 ubelong+1 x \b, %x
->>106 ubelong+1 x \bx%x
->101 ubeshort&0xf x \b, bitdepth=
->>101 ubeshort&0xf 0x0 \b1-WHITE=1
->>101 ubeshort&0xf 0x1 \b8
->>101 ubeshort&0xf 0x2 \b16
->>101 ubeshort&0xf 0x3 \b16-SIGNED
->>101 ubeshort&0xf 0x4 \b16-FLOAT
->>101 ubeshort&0xf 0x5 \b(reserved 5)
->>101 ubeshort&0xf 0x6 \b32-SIGNED
->>101 ubeshort&0xf 0x7 \b32-FLOAT
->>101 ubeshort&0xf 0x8 \b5
->>101 ubeshort&0xf 0x9 \b10
->>101 ubeshort&0xf 0xa \b5-6-5
->>101 ubeshort&0xf 0xb \b(reserved %d)
->>101 ubeshort&0xf 0xc \b(reserved %d)
->>101 ubeshort&0xf 0xd \b(reserved %d)
->>101 ubeshort&0xf 0xe \b(reserved %d)
->>101 ubeshort&0xf 0xf \b1-BLACK=1
->101 ubeshort&0xf0 x \b, colorfmt=
->>101 ubeshort&0xf0 0x00 \bYONLY
->>101 ubeshort&0xf0 0x10 \bYUV240
->>101 ubeshort&0xf0 0x20 \bYWV422
->>101 ubeshort&0xf0 0x30 \bYWV444
->>101 ubeshort&0xf0 0x40 \bCMYK
->>101 ubeshort&0xf0 0x50 \bCMYKDIRECT
->>101 ubeshort&0xf0 0x60 \bNCOMPONENT
->>101 ubeshort&0xf0 0x70 \bRGB
->>101 ubeshort&0xf0 0x80 \bRGBE
->>101 ubeshort&0xf0 >0x80 \b(reserved %#x)
-
-# From: Johan van der Knijff <johan.vanderknijff@kb.nl>
-#
-# BPG (Better Portable Graphics) format
-# https://bellard.org/bpg/
-# http://fileformats.archiveteam.org/wiki/BPG
-#
-0 string \x42\x50\x47\xFB BPG (Better Portable Graphics)
-!:mime image/bpg
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Apple_Icon_Image_format
-0 string icns Mac OS X icon
-!:mime image/x-icns
-!:apple ????icns
-!:ext icns
->4 ubelong >0
-# file size
->>4 ubelong x \b, %d bytes
-# icon type
->>8 string x \b, "%4.4s" type
-
-# TIM images
-# URL: http://fileformats.archiveteam.org/wiki/TIM_(PlayStation_graphics)
-# Reference: https://mrclick.zophar.net/TilEd/download/timgfx.txt
-# Update: Joerg Jenderek
-# Note: called as "PSX TIM *bpp bitmap" by bitmap-tim-*.trid.xml
-# verified as "TIM PSX" by XnView `nconvert -fullinfo *.tim` and
-# by RECOIL `recoil2png -o TMP.PNG input.tim; file TMP.PNG` and often
-# as "PSX TIM" by ImageMagick version 7.1.0-10 command `identify *.tim`
-# here signed integers are used but according to Kaitai unsigned
-0 ulelong 0x00000010
-# 32 Flag bits *cttt; c~CLUT flag t~type 000~4BPP 001~8BPP 010~16BPP 011~24BPP 100~Mixed
-#>4 ulelong x FLAGS=%#x
-# 12+Size of CLUT (2Ch for 4BPP; 20Ch 40Ch 60Ch 80Ch C0Ch for 8BPP) or
-# +image data size (800Ch 2000Ch 2580C for 16BPP) (02000003h for dBase memo test.dbt)
-#>8 ulelong x \b, 12+CLUT or data size=%#8.8x
-# CLUT or data size remainder is 12 (Ch), but 03 for dBase memo test.dbt
-#>8 ubyte&0x0F =0x0C \b, SIZE REMAINDER IS 12
-# skip dBase III memo test.dbt with invalid flags 22D10189h
->4 ulelong&0xffFFffF0 =0 Sony PlayStation PSX image,
-# file (version 5.40) labeled the above entry as "TIM image"
-!:mime image/x-sony-tim
-!:ext tim
-#>>4 ulelong&0x00000007 x \b, BPP~%u
-# 4BPP and 8BPP examples exist with CLUT or without CLUT
->>4 ulelong&0x07 0x0 4-Bit,
->>4 ulelong&0x07 0x1 8-Bit,
-# 16BPP and 24BPP examples have no CLUT
->>4 ulelong 0x2 15-Bit,
->>4 ulelong 0x3 24-Bit,
-# no example
->>4 ulelong&0x07 0x4 Mixed-Bit,
-# CLUT flag set
->>4 ulelong &8
-# 12 + size of CLUT like: 1000Ch 800Ch 400Ch 40Ch and 2FEh (KAGE.TIM)
-#>>>(8.l+8) ulelong x \b, 12+CLUT SIZE=%#8.8x
->>>(8.l+12) uleshort x Pixel at (%d,
->>>(8.l+14) uleshort x \b%d) Size=
-# image width (to get actual width multiply by 4 for 4BPP and by 2 for 8BPP)
->>>>4 ulelong 0x8
->>>>>(8.l+16) uleshort*4 x \b%d
->>>>4 ulelong 0x9
->>>>>(8.l+16) uleshort*2 x \b%d
-# image height like: 32 64 128 144 160 208 256
->>>(8.l+18) uleshort x \bx%d,
->>>4 ulelong 0x8 16 CLUT Entries at
->>>4 ulelong 0x9 256 CLUT Entries at
->>>12 uleshort x (%d,
->>>14 uleshort x \b%d)
-# no Color LookUp Table (CLUT)
->>4 ulelong ^8
-# image origin X Y
->>>12 uleshort x Pixel at (%d,
->>>14 uleshort x \b%d) Size=
-# real image width = multiply by 4 (4BPP) 2 (8BPP) 1 (16BPP) 2/3 (24BPP)
->>>>4 ulelong 0x0
->>>>>16 uleshort*4 x \b%d
->>>>4 ulelong 0x1
->>>>>16 uleshort*2 x \b%d
->>>>4 ulelong 0x2
->>>>>16 uleshort x \b%d
->>>>4 ulelong 0x3
-# GRR: NOT working
-#>>>>>16 uleshort*2/3 x \b%d
->>>>>16 uleshort x \b2/3*%d
-# mixed format width not explained!
->>>>4 ulelong 0x4
->>>>>16 uleshort x \b%d
-# image height like: 64 240 256
->>>18 uleshort x \bx%d
-# TIM image data
-
-# MDEC streams
-0 ulelong 0x80010160 MDEC video stream,
->16 uleshort x %dx
->18 uleshort x \b%d
-#>8 ulelong x %d frames
-#>4 uleshort x secCount=%d;
-#>6 uleshort x nSectors=%d;
-#>12 ulelong x frameSize=%d;
-
-# BS encoded bitstreams
-2 uleshort 0x3800 BS image,
-# GRR: the above line is also true for binary Computer Graphics Metafile SAB00012.CGM with long parameter length 56 (=38h)
->6 uleshort x Version %d,
->4 uleshort x Quantization %d,
->0 uleshort x (Decompresses to %d words)
-
-# Type: farbfeld image.
-# Url: http://tools.suckless.org/farbfeld/
-# From: Ian D. Scott <ian@iandouglasscott.com>
-#
-0 string farbfeld farbfeld image data,
->8 ubelong x %dx
->12 ubelong x \b%d
-
-# Type: Microsoft DirectDraw Surface (DXGI formats)
-# URL: https://msdn.microsoft.com/library/default.asp?url=/library/en-us/directx9_c/directx/graphics/reference/DDSFileReference/ddsfileformat.asp
-# From: Morten Hustveit <morten@debian.org>
-# Updated by: David Korth <gerbilsoft@gerbilsoft.com>
-0 name ms-directdraw-dx10
->0 ulelong x \b, DXGI format:
->0 ulelong 1 R32G32B32A32_TYPELESS
->0 ulelong 2 R32G32B32A32_FLOAT
->0 ulelong 3 R32G32B32A32_UINT
->0 ulelong 4 R32G32B32A32_SINT
->0 ulelong 5 R32G32B32_TYPELESS
->0 ulelong 6 R32G32B32_FLOAT
->0 ulelong 7 R32G32B32_UINT
->0 ulelong 8 R32G32B32_SINT
->0 ulelong 9 R16G16B16A16_TYPELESS
->0 ulelong 10 R16G16B16A16_FLOAT
->0 ulelong 11 R16G16B16A16_UNORM
->0 ulelong 12 R16G16B16A16_UINT
->0 ulelong 13 R16G16B16A16_SNORM
->0 ulelong 14 R16G16B16A16_SINT
->0 ulelong 15 R32G32_TYPELESS
->0 ulelong 16 R32G32_FLOAT
->0 ulelong 17 R32G32_UINT
->0 ulelong 18 R32G32_SINT
->0 ulelong 19 R32G8X24_TYPELESS
->0 ulelong 20 D32_FLOAT_S8X24_UINT
->0 ulelong 21 R32_FLOAT_X8X24_TYPELESS
->0 ulelong 22 X32_TYPELESS_G8X24_UINT
->0 ulelong 23 R10G10B10A2_TYPELESS
->0 ulelong 24 R10G10B10A2_UNORM
->0 ulelong 25 R10G10B10A2_UINT
->0 ulelong 26 R11G11B10_FLOAT
->0 ulelong 27 R8G8B8A8_TYPELESS
->0 ulelong 28 R8G8B8A8_UNORM
->0 ulelong 29 R8G8B8A8_UNORM_SRGB
->0 ulelong 30 R8G8B8A8_UINT
->0 ulelong 31 R8G8B8A8_SNORM
->0 ulelong 32 R8G8B8A8_SINT
->0 ulelong 33 R16G16_TYPELESS
->0 ulelong 34 R16G16_FLOAT
->0 ulelong 35 R16G16_UNORM
->0 ulelong 36 R16G16_UINT
->0 ulelong 37 R16G16_SNORM
->0 ulelong 38 R16G16_SINT
->0 ulelong 39 R32_TYPELESS
->0 ulelong 40 D32_FLOAT
->0 ulelong 41 R32_FLOAT
->0 ulelong 42 R32_UINT
->0 ulelong 43 R32_SINT
->0 ulelong 44 R24G8_TYPELESS
->0 ulelong 45 D24_UNORM_S8_UINT
->0 ulelong 46 R24_UNORM_X8_TYPELESS
->0 ulelong 47 X24_TYPELESS_G8_UINT
->0 ulelong 48 R8G8_TYPELESS
->0 ulelong 49 R8G8_UNORM
->0 ulelong 50 R8G8_UINT
->0 ulelong 51 R8G8_SNORM
->0 ulelong 52 R8G8_SINT
->0 ulelong 53 R16_TYPELESS
->0 ulelong 54 R16_FLOAT
->0 ulelong 55 D16_UNORM
->0 ulelong 56 R16_UNORM
->0 ulelong 57 R16_UINT
->0 ulelong 58 R16_SNORM
->0 ulelong 59 R16_SINT
->0 ulelong 60 R8_TYPELESS
->0 ulelong 61 R8_UNORM
->0 ulelong 62 R8_UINT
->0 ulelong 63 R8_SNORM
->0 ulelong 64 R8_SINT
->0 ulelong 65 A8_UNORM
->0 ulelong 66 R1_UNORM
->0 ulelong 67 R9G9B9E5_SHAREDEXP
->0 ulelong 68 R8G8_B8G8_UNORM
->0 ulelong 69 G8R8_G8B8_UNORM
->0 ulelong 70 BC1_TYPELESS
->0 ulelong 71 BC1_UNORM
->0 ulelong 72 BC1_UNORM_SRGB
->0 ulelong 73 BC2_TYPELESS
->0 ulelong 74 BC2_UNORM
->0 ulelong 75 BC2_UNORM_SRGB
->0 ulelong 76 BC3_TYPELESS
->0 ulelong 77 BC3_UNORM
->0 ulelong 78 BC3_UNORM_SRGB
->0 ulelong 79 BC4_TYPELESS
->0 ulelong 80 BC4_UNORM
->0 ulelong 81 BC4_SNORM
->0 ulelong 82 BC5_TYPELESS
->0 ulelong 83 BC5_UNORM
->0 ulelong 84 BC5_SNORM
->0 ulelong 85 B5G6R5_UNORM
->0 ulelong 86 B5G5R5A1_UNORM
->0 ulelong 87 B8G8R8A8_UNORM
->0 ulelong 88 B8G8R8X8_UNORM
->0 ulelong 89 R10G10B10_XR_BIAS_A2_UNORM
->0 ulelong 90 B8G8R8A8_TYPELESS
->0 ulelong 91 B8G8R8A8_UNORM_SRGB
->0 ulelong 92 B8G8R8X8_TYPELESS
->0 ulelong 93 B8G8R8X8_UNORM_SRGB
->0 ulelong 94 BC6H_TYPELESS
->0 ulelong 95 BC6H_UF16
->0 ulelong 96 BC6H_SF16
->0 ulelong 97 BC7_TYPELESS
->0 ulelong 98 BC7_UNORM
->0 ulelong 99 BC7_UNORM_SRGB
->0 ulelong 100 AYUV
->0 ulelong 101 Y410
->0 ulelong 102 Y416
->0 ulelong 103 NV12
->0 ulelong 104 P010
->0 ulelong 105 P016
->0 ulelong 106 420_OPAQUE
->0 ulelong 107 YUY2
->0 ulelong 108 Y210
->0 ulelong 109 Y216
->0 ulelong 110 NV11
->0 ulelong 111 AI44
->0 ulelong 112 IA44
->0 ulelong 113 P8
->0 ulelong 114 A8P8
->0 ulelong 115 B4G4R4A4_UNORM
-
->0 ulelong 116 XBOX_R10G10B10_7E3_A2_FLOAT
->0 ulelong 117 XBOX_R10G10B10_6E4_A2_FLOAT
->0 ulelong 118 XBOX_D16_UNORM_S8_UINT
->0 ulelong 119 XBOX_R16_UNORM_X8_TYPELESS
->0 ulelong 120 XBOX_X16_TYPELESS_G8_UINT
-
->0 ulelong 130 DXGI_FORMAT_P208
->0 ulelong 131 DXGI_FORMAT_V208
->0 ulelong 132 DXGI_FORMAT_V408
-
->0 ulelong 133 ASTC_4X4_TYPELESS
->0 ulelong 134 ASTC_4X4_UNORM
->0 ulelong 135 ASTC_4X4_UNORM_SRGB
->0 ulelong 137 ASTC_5X4_TYPELESS
->0 ulelong 138 ASTC_5X4_UNORM
->0 ulelong 139 ASTC_5X4_UNORM_SRGB
->0 ulelong 141 ASTC_5X5_TYPELESS
->0 ulelong 142 ASTC_5X5_UNORM
->0 ulelong 143 ASTC_5X5_UNORM_SRGB
->0 ulelong 145 ASTC_6X5_TYPELESS
->0 ulelong 146 ASTC_6X5_UNORM
->0 ulelong 147 ASTC_6X5_UNORM_SRGB
->0 ulelong 149 ASTC_6X6_TYPELESS
->0 ulelong 150 ASTC_6X6_UNORM
->0 ulelong 151 ASTC_6X6_UNORM_SRGB
->0 ulelong 153 ASTC_8X5_TYPELESS
->0 ulelong 154 ASTC_8X5_UNORM
->0 ulelong 155 ASTC_8X5_UNORM_SRGB
->0 ulelong 157 ASTC_8X6_TYPELESS
->0 ulelong 158 ASTC_8X6_UNORM
->0 ulelong 159 ASTC_8X6_UNORM_SRGB
->0 ulelong 161 ASTC_8X8_TYPELESS
->0 ulelong 162 ASTC_8X8_UNORM
->0 ulelong 163 ASTC_8X8_UNORM_SRGB
->0 ulelong 165 ASTC_10X5_TYPELESS
->0 ulelong 166 ASTC_10X5_UNORM
->0 ulelong 167 ASTC_10X5_UNORM_SRGB
->0 ulelong 169 ASTC_10X6_TYPELESS
->0 ulelong 170 ASTC_10X6_UNORM
->0 ulelong 171 ASTC_10X6_UNORM_SRGB
->0 ulelong 173 ASTC_10X8_TYPELESS
->0 ulelong 174 ASTC_10X8_UNORM
->0 ulelong 175 ASTC_10X8_UNORM_SRGB
->0 ulelong 177 ASTC_10X10_TYPELESS
->0 ulelong 178 ASTC_10X10_UNORM
->0 ulelong 179 ASTC_10X10_UNORM_SRGB
->0 ulelong 181 ASTC_12X10_TYPELESS
->0 ulelong 182 ASTC_12X10_UNORM
->0 ulelong 183 ASTC_12X10_UNORM_SRGB
->0 ulelong 185 ASTC_12X12_TYPELESS
->0 ulelong 186 ASTC_12X12_UNORM
->0 ulelong 187 ASTC_12X12_UNORM_SRGB
-
->0 ulelong 190 XBOX_R10G10B10_SNORM_A2_UNORM
->0 ulelong 189 XBOX_R4G4_UNORM
->0 ulelong 0xFFFFFFFF DXGI_FORMAT_FORCE_UINT
-
-# Type: Microsoft DirectDraw Surface (common data)
-# URL: https://msdn.microsoft.com/library/default.asp?url=/library/en-us/directx9_c/directx/graphics/reference/DDSFileReference/ddsfileformat.asp
-# From: Morten Hustveit <morten@debian.org>
-# Updated by: David Korth <gerbilsoft@gerbilsoft.com>
-0 name ms-directdraw-surface
->0x10 ulelong x %u x
->0x0C ulelong x %u
-# Color depth.
->0x58 ulelong >0 \b, %u-bit color
-# Determine the pixel format.
->0x50 ulelong&0x4 4
-# FIXME: Handle DX10 and XBOX formats.
->>0x54 string DX10
->>>0x80 use ms-directdraw-dx10
->>0x54 string !DX10 \b, compressed using %.4s
->0x50 ulelong&0x2 0x2 \b, alpha only
->0x50 ulelong&0x200 0x200 \b, YUV
->0x50 ulelong&0x20000 0x20000 \b, luminance
-# RGB pixel format
->0x50 ulelong&0x40 0x40
-
-# Determine the RGB format using the color masks.
-# ulequad order: 0xGGGGGGGGRRRRRRRR, 0xAAAAAAAABBBBBBBB
-
->>0x58 ulelong 16
-
-# NOTE: 15-bit color formats usually have 16-bit listed as the color depth.
->>>0x5C ulequad 0x000003E000007C00
->>>>0x64 ulequad 0x000000000000001F \b, RGB555
->>>0x5C ulequad 0x000003E000001F00
->>>>0x64 ulequad 0x000000000000007C \b, BGR555
-
->>>0x5C ulequad 0x000007E00000F800
->>>>0x64 ulequad 0x000000000000001F \b, RGB565
->>>0x5C ulequad 0x000007E000001F00
->>>>0x64 ulequad 0x00000000000000F8 \b, BGR565
-
->>>0x5C ulequad 0x000000F000000F00
->>>>0x64 ulequad 0x0000F0000000000F \b, ARGB4444
->>>0x5C ulequad 0x000000F00000000F
->>>>0x64 ulequad 0x0000F00000000F00 \b, ABGR4444
-
->>>0x5C ulequad 0x00000F000000F000
->>>>0x64 ulequad 0x0000000F000000F0 \b, RGBA4444
->>>0x5C ulequad 0x00000F00000000F0
->>>>0x64 ulequad 0x0000000F0000F000 \b, BGRA4444
-
->>>0x5C ulequad 0x000000F000000F00
->>>>0x64 ulequad 0x000000000000000F \b, xRGB4444
->>>0x5C ulequad 0x000000F00000000F
->>>>0x64 ulequad 0x0000000000000F00 \b, xBGR4444
-
->>>0x5C ulequad 0x00000F000000F000
->>>>0x64 ulequad 0x00000000000000F0 \b, RGBx4444
->>>0x5C ulequad 0x00000F00000000F0
->>>>0x64 ulequad 0x000000000000F000 \b, BGRx4444
-
->>>0x5C ulequad 0x000003E000007C00
->>>>0x64 ulequad 0x000080000000001F \b, ARGB1555
->>>0x5C ulequad 0x000003E000001F00
->>>>0x64 ulequad 0x000080000000007C \b, ABGR1555
->>>0x5C ulequad 0x000007C00000F800
->>>>0x64 ulequad 0x000000010000003E \b, RGBA5551
->>>0x5C ulequad 0x000007C00000003E
->>>>0x64 ulequad 0x000000010000F800 \b, BGRA5551
-
->>88 ulelong 24
->>>0x5C ulequad 0x0000FF0000FF0000
->>>>0x64 ulequad 0x00000000000000FF \b, RGB888
->>>0x5C ulequad 0x0000FF00000000FF
->>>>0x64 ulequad 0x0000000000FF0000 \b, BGR888
-
->>88 ulelong 32
->>>0x5C ulequad 0x0000FF0000FF0000
->>>>0x64 ulequad 0xFF000000000000FF \b, ARGB8888
->>>0x5C ulequad 0x0000FF00000000FF
->>>>0x64 ulequad 0xFF00000000FF0000 \b, ABGR8888
-
->>>0x5C ulequad 0x00FF0000FF000000
->>>>0x64 ulequad 0x000000FF0000FF00 \b, RGBA8888
->>>0x5C ulequad 0x00FF00000000FF00
->>>>0x64 ulequad 0x000000FFFF000000 \b, BGBA8888
-
->>>0x5C ulequad 0x0000FF0000FF0000
->>>>0x64 ulequad 0x00000000000000FF \b, xRGB8888
->>>0x5C ulequad 0x0000FF00000000FF
->>>>0x64 ulequad 0x0000000000FF0000 \b, xBGR8888
-
->>>0x5C ulequad 0x00FF0000FF000000
->>>>0x64 ulequad 0x000000000000FF00 \b, RGBx8888
->>>0x5C ulequad 0x00FF00000000FF00
->>>>0x64 ulequad 0x00000000FF000000 \b, BGBx8888
-
-# Less common 32-bit color formats.
->>>0x5C ulequad 0xFFFF00000000FFFF
->>>>0x64 ulequad 0x0000000000000000 \b, G16R16
->>>0x5C ulequad 0x0000FFFFFFFF0000
->>>>0x64 ulequad 0x0000000000000000 \b, R16G16
-
->>>0x5C ulequad 0x000FFC003FF00000
->>>>0x64 ulequad 0xC0000000000003FF \b, A2R10G10B10
->>>0x5C ulequad 0x000FFC00000003FF
->>>>0x64 ulequad 0xC00000003FF00000 \b, A2B10G10R10
-
-# Type: Microsoft DirectDraw Surface
-# URL: https://msdn.microsoft.com/library/default.asp?url=/library/en-us/directx9_c/directx/graphics/reference/DDSFileReference/ddsfileformat.asp
-# From: Morten Hustveit <morten@debian.org>
-# Updated by: David Korth <gerbilsoft@gerbilsoft.com>
-0 string/b DDS\040\174\000\000\000 Microsoft DirectDraw Surface (DDS):
->0 use ms-directdraw-surface
-
-# Type: Sega PVR image.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://fabiensanglard.net/Mykaruga/tools/segaPVRFormat.txt
-# - https://github.com/yazgoo/pvrx2png
-# - https://github.com/nickworonekin/puyotools
-
-# Sega PVR header.
-0 name sega-pvr-image-header
->0x0C uleshort x %u x
->0x0E uleshort x %u
-# Image format.
->0x08 ubyte 0 \b, ARGB1555
->0x08 ubyte 1 \b, RGB565
->0x08 ubyte 2 \b, ARGB4444
->0x08 ubyte 3 \b, YUV442
->0x08 ubyte 4 \b, Bump
->0x08 ubyte 5 \b, 4bpp
->0x08 ubyte 6 \b, 8bpp
-# Image data type.
->0x09 ubyte 0x01 \b, square twiddled
->0x09 ubyte 0x02 \b, square twiddled & mipmap
->0x09 ubyte 0x03 \b, VQ
->0x09 ubyte 0x04 \b, VQ & mipmap
->0x09 ubyte 0x05 \b, 8-bit CLUT twiddled
->0x09 ubyte 0x06 \b, 4-bit CLUT twiddled
->0x09 ubyte 0x07 \b, 8-bit direct twiddled
->0x09 ubyte 0x08 \b, 4-bit direct twiddled
->0x09 ubyte 0x09 \b, rectangle
->0x09 ubyte 0x0B \b, rectangular stride
->0x09 ubyte 0x0D \b, rectangular twiddled
->0x09 ubyte 0x10 \b, small VQ
->0x09 ubyte 0x11 \b, small VQ & mipmap
->0x09 ubyte 0x12 \b, square twiddled & mipmap
-
-# Sega PVR image.
-0 string PVRT
->0x10 string DDS\040\174\000\000\000 Sega PVR (Xbox) image:
->>0x20 use ms-directdraw-surface
->0x10 ubelong !0x44445320 Sega PVR image:
->>0 use sega-pvr-image-header
-
-# Sega PVR image with GBIX.
-0 string GBIX
->0x10 string PVRT
->>0x10 string DDS\040\174\000\000\000 Sega PVR (Xbox) image:
->>>0x20 use ms-directdraw-surface
->>0x10 ubelong !0x44445320 Sega PVR image:
->>>0x10 use sega-pvr-image-header
->>0x08 ulelong x \b, global index = %u
-
-# Sega GVR header.
-0 name sega-gvr-image-header
->0x0C ubeshort x %u x
->0x0E ubeshort x %u
-# Image data format.
->0x0B ubyte 0 \b, I4
->0x0B ubyte 1 \b, I8
->0x0B ubyte 2 \b, IA4
->0x0B ubyte 3 \b, IA8
->0x0B ubyte 4 \b, RGB565
->0x0B ubyte 5 \b, RGB5A3
->0x0B ubyte 6 \b, ARGB8888
->0x0B ubyte 8 \b, CI4
->0x0B ubyte 9 \b, CI8
->0x0B ubyte 14 \b, DXT1
-
-# Sega GVR image.
-0 string GVRT Sega GVR image:
->0x10 use sega-gvr-image-header
-
-# Sega GVR image with GBIX.
-0 string GBIX
->0x10 string GVRT Sega GVR image:
->>0x10 use sega-gvr-image-header
->>0x08 ubelong x \b, global index = %u
-
-# Sega GVR image with GCIX. (Wii)
-0 string GCIX
->0x10 string GVRT Sega GVR image:
->>0x10 use sega-gvr-image-header
->>0x08 ubelong x \b, global index = %u
-
-# Light Field Picture
-# Documentation: http://optics.miloush.net/lytro/TheFileFormat.aspx
-# Typical file extensions: .lfp .lfr .lfx
-
-0 ubelong 0x894C4650
->4 ubelong 0x0D0A1A0A
->12 ubelong 0x00000000 Lytro Light Field Picture
->8 ubelong x \b, version %d
-
-# Type: Vision Research Phantom CINE Format
-# URL: https://www.phantomhighspeed.com/
-# URL2: http://phantomhighspeed.force.com/vriknowledge/servlet/fileField?id=0BEU0000000Cfyk
-# From: Harry Mallon <hjmallon at gmail.com>
-#
-# This has a short "CI" code but the 44 is the size of the struct which is
-# stable
-0 string CI
->2 uleshort 44 Vision Research CINE Video,
->>4 uleshort 0 Grayscale,
->>4 uleshort 1 JPEG Compressed,
->>4 uleshort 2 RAW,
->>6 uleshort x version %d,
->>20 ulelong x %d frames,
->>48 ulelong x %dx
->>52 ulelong x \b%d
-
-# Type: ARRI Raw Image
-# Info: SMPTE RDD30:2014
-# From: Harry Mallon <hjmallon at gmail.com>
-0 string ARRI ARRI ARI image data,
->4 ulelong 0x78563412 little-endian,
->4 ulelong 0x12345678 big-endian,
->12 ulelong x version %d,
->20 ulelong x %dx
->24 ulelong x \b%d
-
-# Type: Khronos KTX texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Reference: https://www.khronos.org/opengles/sdk/tools/KTX/file_format_spec/
-
-# glEnum decoding.
-# NOTE: Only the most common formats are listed here.
-0 name khronos-ktx-glEnum
->0 ulelong 0x1907 \b, RGB
->0 ulelong 0x1908 \b, RGBA
->0 ulelong 0x1909 \b, LUMINANCE
->0 ulelong 0x190A \b, LUMINANCE_ALPHA
->0 ulelong 0x80E1 \b, BGR
->0 ulelong 0x80E2 \b, BGRA
->0 ulelong 0x83A0 \b, RGB_S3TC
->0 ulelong 0x83A1 \b, RGB4_S3TC
->0 ulelong 0x83A2 \b, RGBA_S3TC
->0 ulelong 0x83A3 \b, RGBA4_S3TC
->0 ulelong 0x83A4 \b, RGBA_DXT5_S3TC
->0 ulelong 0x83A5 \b, RGBA4_DXT5_S3TC
->0 ulelong 0x83F0 \b, COMPRESSED_RGB_S3TC_DXT1_EXT
->0 ulelong 0x83F1 \b, COMPRESSED_RGBA_S3TC_DXT1_EXT
->0 ulelong 0x83F2 \b, COMPRESSED_RGBA_S3TC_DXT3_EXT
->0 ulelong 0x83F3 \b, COMPRESSED_RGBA_S3TC_DXT5_EXT
->0 ulelong 0x8D64 \b, ETC1_RGB8_OES
->0 ulelong 0x9270 \b, COMPRESSED_R11_EAC
->0 ulelong 0x9271 \b, COMPRESSED_SIGNED_R11_EAC
->0 ulelong 0x9272 \b, COMPRESSED_RG11_EAC
->0 ulelong 0x9273 \b, COMPRESSED_SIGNED_RG11_EAC
->0 ulelong 0x9274 \b, COMPRESSED_RGB8_ETC2
->0 ulelong 0x9275 \b, COMPRESSED_SRGB8_ETC2
->0 ulelong 0x9276 \b, COMPRESSED_RGB8_PUNCHTHROUGH_ALPHA1_ETC2
->0 ulelong 0x9277 \b, COMPRESSED_SRGB8_PUNCHTHROUGH_ALPHA1_ETC2
->0 ulelong 0x9278 \b, COMPRESSED_RGBA2_ETC2_EAC
->0 ulelong 0x9279 \b, COMPRESSED_SRGB8_ALPHA8_ETC2_EAC
->0 ulelong 0x93B0 \b, COMPRESSED_RGBA_ASTC_4x4_KHR
->0 ulelong 0x93B1 \b, COMPRESSED_RGBA_ASTC_5x4_KHR
->0 ulelong 0x93B2 \b, COMPRESSED_RGBA_ASTC_5x5_KHR
->0 ulelong 0x93B3 \b, COMPRESSED_RGBA_ASTC_6x5_KHR
->0 ulelong 0x93B4 \b, COMPRESSED_RGBA_ASTC_6x6_KHR
->0 ulelong 0x93B5 \b, COMPRESSED_RGBA_ASTC_8x5_KHR
->0 ulelong 0x93B6 \b, COMPRESSED_RGBA_ASTC_8x6_KHR
->0 ulelong 0x93B7 \b, COMPRESSED_RGBA_ASTC_8x8_KHR
->0 ulelong 0x93B8 \b, COMPRESSED_RGBA_ASTC_10x5_KHR
->0 ulelong 0x93B9 \b, COMPRESSED_RGBA_ASTC_10x6_KHR
->0 ulelong 0x93BA \b, COMPRESSED_RGBA_ASTC_10x8_KHR
->0 ulelong 0x93BB \b, COMPRESSED_RGBA_ASTC_10x10_KHR
->0 ulelong 0x93BC \b, COMPRESSED_RGBA_ASTC_12x10_KHR
->0 ulelong 0x93BD \b, COMPRESSED_RGBA_ASTC_12x12_KHR
->0 ulelong 0x93D0 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_4x4_KHR
->0 ulelong 0x93D1 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_5x4_KHR
->0 ulelong 0x93D2 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_5x5_KHR
->0 ulelong 0x93D3 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_6x5_KHR
->0 ulelong 0x93D4 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_6x6_KHR
->0 ulelong 0x93D5 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_8x5_KHR
->0 ulelong 0x93D6 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_8x6_KHR
->0 ulelong 0x93D7 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_8x8_KHR
->0 ulelong 0x93D8 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_10x5_KHR
->0 ulelong 0x93D9 \b, COMPRESSED_SRGB8_ALPHA8_ASTC_10x6_KHR
->0 ulelong 0x93DA \b, COMPRESSED_SRGB8_ALPHA8_ASTC_10x8_KHR
->0 ulelong 0x93DB \b, COMPRESSED_SRGB8_ALPHA8_ASTC_10x10_KHR
->0 ulelong 0x93DC \b, COMPRESSED_SRGB8_ALPHA8_ASTC_12x10_KHR
->0 ulelong 0x93DD \b, COMPRESSED_SRGB8_ALPHA8_ASTC_12x12_KHR
-
-# Endian-specific KTX header.
-# TODO: glType (all textures I've seen so far are GL_UNSIGNED_BYTE)
-0 name khronos-ktx-endian-header
->20 ulelong x \b, %u
->24 ulelong >1 x %u
->28 ulelong >1 x %u
->8 ulelong >0
->>8 use khronos-ktx-glEnum
->8 ulelong 0
->>12 use khronos-ktx-glEnum
-
-# Main KTX header.
-# Determine endianness, then check the rest of the header.
-0 string \xABKTX\ 11\xBB\r\n\x1A\n Khronos KTX texture
->12 ulelong 0x04030201 (little-endian)
->>16 use khronos-ktx-endian-header
->12 ubelong 0x04030201 (big-endian)
->>16 use \^khronos-ktx-endian-header
-
-# Type: Khronos KTX2 texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Based on draft19.
-# Reference: http://github.khronos.org/KTX-Specification/
-
-# Supercompression enum.
-0 name khronos-ktx2-supercompression
->0 ulelong 1 BasisLZ
->0 ulelong 2 Zstandard
->0 ulelong 3 ZLIB
-
-# Vulkan format identifier.
-# NOTE: Formats prohibited from KTX2 are commented out.
-0 name khronos-ktx2-vkFormat
->0 ulelong 0 UNDEFINED
->0 ulelong 1 R4G4_UNORM_PACK8
->0 ulelong 2 R4G4B4A4_UNORM_PACK16
->0 ulelong 3 B4G4R4A4_UNORM_PACK16
->0 ulelong 4 R5G6B5_UNORM_PACK16
->0 ulelong 5 B5G6R5_UNORM_PACK16
->0 ulelong 6 R5G5B5A1_UNORM_PACK16
->0 ulelong 7 B5G5R5A1_UNORM_PACK16
->0 ulelong 8 A1R5G5B5_UNORM_PACK16
->0 ulelong 9 R8_UNORM
->0 ulelong 10 R8_SNORM
-#>0 ulelong 11 R8_USCALED
-#>0 ulelong 12 R8_SSCALED
->0 ulelong 13 R8_UINT
->0 ulelong 14 R8_SINT
->0 ulelong 15 R8_SRGB
->0 ulelong 16 R8G8_UNORM
->0 ulelong 17 R8G8_SNORM
-#>0 ulelong 18 R8G8_USCALED
-#>0 ulelong 19 R8G8_SSCALED
->0 ulelong 20 R8G8_UINT
->0 ulelong 21 R8G8_SINT
->0 ulelong 22 R8G8_SRGB
->0 ulelong 23 R8G8B8_UNORM
->0 ulelong 24 R8G8B8_SNORM
-#>0 ulelong 25 R8G8B8_USCALED
-#>0 ulelong 26 R8G8B8_SSCALED
->0 ulelong 27 R8G8B8_UINT
->0 ulelong 28 R8G8B8_SINT
->0 ulelong 29 R8G8B8_SRGB
->0 ulelong 30 B8G8R8_UNORM
->0 ulelong 31 B8G8R8_SNORM
-#>0 ulelong 32 B8G8R8_USCALED
-#>0 ulelong 33 B8G8R8_SSCALED
->0 ulelong 34 B8G8R8_UINT
->0 ulelong 35 B8G8R8_SINT
->0 ulelong 36 B8G8R8_SRGB
->0 ulelong 37 R8G8B8A8_UNORM
->0 ulelong 38 R8G8B8A8_SNORM
-#>0 ulelong 39 R8G8B8A8_USCALED
-#>0 ulelong 40 R8G8B8A8_SSCALED
->0 ulelong 41 R8G8B8A8_UINT
->0 ulelong 42 R8G8B8A8_SINT
->0 ulelong 43 R8G8B8A8_SRGB
->0 ulelong 44 B8G8R8A8_UNORM
->0 ulelong 45 B8G8R8A8_SNORM
-#>0 ulelong 46 B8G8R8A8_USCALED
-#>0 ulelong 47 B8G8R8A8_SSCALED
->0 ulelong 48 B8G8R8A8_UINT
->0 ulelong 49 B8G8R8A8_SINT
->0 ulelong 50 B8G8R8A8_SRGB
-#>0 ulelong 51 A8B8G8R8_UNORM_PACK32
-#>0 ulelong 52 A8B8G8R8_SNORM_PACK32
-#>0 ulelong 53 A8B8G8R8_USCALED_PACK32
-#>0 ulelong 54 A8B8G8R8_SSCALED_PACK32
-#>0 ulelong 55 A8B8G8R8_UINT_PACK32
-#>0 ulelong 56 A8B8G8R8_SINT_PACK32
-#>0 ulelong 57 A8B8G8R8_SRGB_PACK32
->0 ulelong 58 A2R10G10B10_UNORM_PACK32
->0 ulelong 59 A2R10G10B10_SNORM_PACK32
-#>0 ulelong 60 A2R10G10B10_USCALED_PACK32
-#>0 ulelong 61 A2R10G10B10_SSCALED_PACK32
->0 ulelong 62 A2R10G10B10_UINT_PACK32
->0 ulelong 63 A2R10G10B10_SINT_PACK32
->0 ulelong 64 A2B10G10R10_UNORM_PACK32
->0 ulelong 65 A2B10G10R10_SNORM_PACK32
-#>0 ulelong 66 A2B10G10R10_USCALED_PACK32
-#>0 ulelong 67 A2B10G10R10_SSCALED_PACK32
->0 ulelong 68 A2B10G10R10_UINT_PACK32
->0 ulelong 69 A2B10G10R10_SINT_PACK32
->0 ulelong 70 R16_UNORM
->0 ulelong 71 R16_SNORM
-#>0 ulelong 72 R16_USCALED
-#>0 ulelong 73 R16_SSCALED
->0 ulelong 74 R16_UINT
->0 ulelong 75 R16_SINT
->0 ulelong 76 R16_SFLOAT
->0 ulelong 77 R16G16_UNORM
->0 ulelong 78 R16G16_SNORM
-#>0 ulelong 79 R16G16_USCALED
-#>0 ulelong 80 R16G16_SSCALED
->0 ulelong 81 R16G16_UINT
->0 ulelong 82 R16G16_SINT
->0 ulelong 83 R16G16_SFLOAT
->0 ulelong 84 R16G16B16_UNORM
->0 ulelong 85 R16G16B16_SNORM
-#>0 ulelong 86 R16G16B16_USCALED
-#>0 ulelong 87 R16G16B16_SSCALED
->0 ulelong 88 R16G16B16_UINT
->0 ulelong 89 R16G16B16_SINT
->0 ulelong 90 R16G16B16_SFLOAT
->0 ulelong 91 R16G16B16A16_UNORM
->0 ulelong 92 R16G16B16A16_SNORM
-#>0 ulelong 93 R16G16B16A16_USCALED
-#>0 ulelong 94 R16G16B16A16_SSCALED
->0 ulelong 95 R16G16B16A16_UINT
->0 ulelong 96 R16G16B16A16_SINT
->0 ulelong 97 R16G16B16A16_SFLOAT
->0 ulelong 98 R32_UINT
->0 ulelong 99 R32_SINT
->0 ulelong 100 R32_SFLOAT
->0 ulelong 101 R32G32_UINT
->0 ulelong 102 R32G32_SINT
->0 ulelong 103 R32G32_SFLOAT
->0 ulelong 104 R32G32B32_UINT
->0 ulelong 105 R32G32B32_SINT
->0 ulelong 106 R32G32B32_SFLOAT
->0 ulelong 107 R32G32B32A32_UINT
->0 ulelong 108 R32G32B32A32_SINT
->0 ulelong 109 R32G32B32A32_SFLOAT
->0 ulelong 110 R64_UINT
->0 ulelong 111 R64_SINT
->0 ulelong 112 R64_SFLOAT
->0 ulelong 113 R64G64_UINT
->0 ulelong 114 R64G64_SINT
->0 ulelong 115 R64G64_SFLOAT
->0 ulelong 116 R64G64B64_UINT
->0 ulelong 117 R64G64B64_SINT
->0 ulelong 118 R64G64B64_SFLOAT
->0 ulelong 119 R64G64B64A64_UINT
->0 ulelong 120 R64G64B64A64_SINT
->0 ulelong 121 R64G64B64A64_SFLOAT
->0 ulelong 122 B10G11R11_UFLOAT_PACK32
->0 ulelong 123 E5B9G9R9_UFLOAT_PACK32
->0 ulelong 124 D16_UNORM
->0 ulelong 125 X8_D24_UNORM_PACK32
->0 ulelong 126 D32_SFLOAT
->0 ulelong 127 S8_UINT
->0 ulelong 128 D16_UNORM_S8_UINT
->0 ulelong 129 D24_UNORM_S8_UINT
->0 ulelong 130 D32_SFLOAT_S8_UINT
-
->0 ulelong 131 BC1_RGB_UNORM_BLOCK
->0 ulelong 132 BC1_RGB_SRGB_BLOCK
->0 ulelong 133 BC1_RGBA_UNORM_BLOCK
->0 ulelong 134 BC1_RGBA_SRGB_BLOCK
->0 ulelong 135 BC2_UNORM_BLOCK
->0 ulelong 136 BC2_SRGB_BLOCK
->0 ulelong 137 BC3_UNORM_BLOCK
->0 ulelong 138 BC3_SRGB_BLOCK
->0 ulelong 139 BC4_UNORM_BLOCK
->0 ulelong 140 BC4_SNORM_BLOCK
->0 ulelong 141 BC5_UNORM_BLOCK
->0 ulelong 142 BC5_SNORM_BLOCK
->0 ulelong 143 BC6H_UFLOAT_BLOCK
->0 ulelong 144 BC6H_SFLOAT_BLOCK
->0 ulelong 145 BC7_UNORM_BLOCK
->0 ulelong 146 BC7_SRGB_BLOCK
-
->0 ulelong 147 ETC2_R8G8B8_UNORM_BLOCK
->0 ulelong 148 ETC2_R8G8B8_SRGB_BLOCK
->0 ulelong 149 ETC2_R8G8B8A1_UNORM_BLOCK
->0 ulelong 150 ETC2_R8G8B8A1_SRGB_BLOCK
->0 ulelong 151 ETC2_R8G8B8A8_UNORM_BLOCK
->0 ulelong 152 ETC2_R8G8B8A8_SRGB_BLOCK
-
->0 ulelong 153 EAC_R11_UNORM_BLOCK
->0 ulelong 154 EAC_R11_SNORM_BLOCK
->0 ulelong 155 EAC_R11G11_UNORM_BLOCK
->0 ulelong 156 EAC_R11G11_SNORM_BLOCK
-
->0 ulelong 157 ASTC_4x4_UNORM_BLOCK
->0 ulelong 158 ASTC_4x4_SRGB_BLOCK
->0 ulelong 159 ASTC_5x4_UNORM_BLOCK
->0 ulelong 160 ASTC_5x4_SRGB_BLOCK
->0 ulelong 161 ASTC_5x5_UNORM_BLOCK
->0 ulelong 162 ASTC_5x5_SRGB_BLOCK
->0 ulelong 163 ASTC_6x5_UNORM_BLOCK
->0 ulelong 164 ASTC_6x5_SRGB_BLOCK
->0 ulelong 165 ASTC_6x6_UNORM_BLOCK
->0 ulelong 166 ASTC_6x6_SRGB_BLOCK
->0 ulelong 167 ASTC_8x5_UNORM_BLOCK
->0 ulelong 168 ASTC_8x5_SRGB_BLOCK
->0 ulelong 169 ASTC_8x6_UNORM_BLOCK
->0 ulelong 170 ASTC_8x6_SRGB_BLOCK
->0 ulelong 171 ASTC_8x8_UNORM_BLOCK
->0 ulelong 172 ASTC_8x8_SRGB_BLOCK
->0 ulelong 173 ASTC_10x5_UNORM_BLOCK
->0 ulelong 174 ASTC_10x5_SRGB_BLOCK
->0 ulelong 175 ASTC_10x6_UNORM_BLOCK
->0 ulelong 176 ASTC_10x6_SRGB_BLOCK
->0 ulelong 177 ASTC_10x8_UNORM_BLOCK
->0 ulelong 178 ASTC_10x8_SRGB_BLOCK
->0 ulelong 179 ASTC_10x10_UNORM_BLOCK
->0 ulelong 180 ASTC_10x10_SRGB_BLOCK
->0 ulelong 181 ASTC_12x10_UNORM_BLOCK
->0 ulelong 182 ASTC_12x10_SRGB_BLOCK
->0 ulelong 183 ASTC_12x12_UNORM_BLOCK
->0 ulelong 184 ASTC_12x12_SRGB_BLOCK
-
->0 ulelong 1000156000 G8B8G8R8_422_UNORM
->0 ulelong 1000156001 B8G8R8G8_422_UNORM
->0 ulelong 1000156002 G8_B8_R8_3PLANE_420_UNORM
->0 ulelong 1000156003 G8_B8R8_2PLANE_420_UNORM
->0 ulelong 1000156004 G8_B8_R8_3PLANE_422_UNORM
->0 ulelong 1000156005 G8_B8R8_2PLANE_422_UNORM
->0 ulelong 1000156006 G8_B8_R8_3PLANE_444_UNORM
->0 ulelong 1000156007 R10X6_UNORM_PACK16
->0 ulelong 1000156008 R10X6G10X6_UNORM_2PACK16
->0 ulelong 1000156009 R10X6G10X6B10X6A10X6_UNORM_4PACK16
->0 ulelong 1000156010 G10X6B10X6G10X6R10X6_422_UNORM_4PACK16
->0 ulelong 1000156011 B10X6G10X6R10X6G10X6_422_UNORM_4PACK16
->0 ulelong 1000156012 G10X6_B10X6_R10X6_3PLANE_420_UNORM_3PACK16
->0 ulelong 1000156013 G10X6_B10X6R10X6_2PLANE_420_UNORM_3PACK16
->0 ulelong 1000156014 G10X6_B10X6_R10X6_3PLANE_422_UNORM_3PACK16
->0 ulelong 1000156015 G10X6_B10X6R10X6_2PLANE_422_UNORM_3PACK16
->0 ulelong 1000156016 G10X6_B10X6_R10X6_3PLANE_444_UNORM_3PACK16
->0 ulelong 1000156017 R12X4_UNORM_PACK16
->0 ulelong 1000156018 R12X4G12X4_UNORM_2PACK16
->0 ulelong 1000156019 R12X4G12X4B12X4A12X4_UNORM_4PACK16
->0 ulelong 1000156020 G12X4B12X4G12X4R12X4_422_UNORM_4PACK16
->0 ulelong 1000156021 B12X4G12X4R12X4G12X4_422_UNORM_4PACK16
->0 ulelong 1000156022 G12X4_B12X4_R12X4_3PLANE_420_UNORM_3PACK16
->0 ulelong 1000156023 G12X4_B12X4R12X4_2PLANE_420_UNORM_3PACK16
->0 ulelong 1000156024 G12X4_B12X4_R12X4_3PLANE_422_UNORM_3PACK16
->0 ulelong 1000156025 G12X4_B12X4R12X4_2PLANE_422_UNORM_3PACK16
->0 ulelong 1000156026 G12X4_B12X4_R12X4_3PLANE_444_UNORM_3PACK16
->0 ulelong 1000156027 G16B16G16R16_422_UNORM
->0 ulelong 1000156028 B16G16R16G16_422_UNORM
->0 ulelong 1000156029 G16_B16_R16_3PLANE_420_UNORM
->0 ulelong 1000156030 G16_B16R16_2PLANE_420_UNORM
->0 ulelong 1000156031 G16_B16_R16_3PLANE_422_UNORM
->0 ulelong 1000156032 G16_B16R16_2PLANE_422_UNORM
->0 ulelong 1000156033 G16_B16_R16_3PLANE_444_UNORM
-
->0 ulelong 1000054000 PVRTC1_2BPP_UNORM_BLOCK_IMG
->0 ulelong 1000054001 PVRTC1_4BPP_UNORM_BLOCK_IMG
->0 ulelong 1000054002 PVRTC2_2BPP_UNORM_BLOCK_IMG
->0 ulelong 1000054003 PVRTC2_4BPP_UNORM_BLOCK_IMG
->0 ulelong 1000054004 PVRTC1_2BPP_SRGB_BLOCK_IMG
->0 ulelong 1000054005 PVRTC1_4BPP_SRGB_BLOCK_IMG
->0 ulelong 1000054006 PVRTC2_2BPP_SRGB_BLOCK_IMG
->0 ulelong 1000054007 PVRTC2_4BPP_SRGB_BLOCK_IMG
-
->0 ulelong 1000066000 ASTC_4x4_SFLOAT_BLOCK_EXT
->0 ulelong 1000066001 ASTC_5x4_SFLOAT_BLOCK_EXT
->0 ulelong 1000066002 ASTC_5x5_SFLOAT_BLOCK_EXT
->0 ulelong 1000066003 ASTC_6x5_SFLOAT_BLOCK_EXT
->0 ulelong 1000066004 ASTC_6x6_SFLOAT_BLOCK_EXT
->0 ulelong 1000066005 ASTC_8x5_SFLOAT_BLOCK_EXT
->0 ulelong 1000066006 ASTC_8x6_SFLOAT_BLOCK_EXT
->0 ulelong 1000066007 ASTC_8x8_SFLOAT_BLOCK_EXT
->0 ulelong 1000066008 ASTC_10x5_SFLOAT_BLOCK_EXT
->0 ulelong 1000066009 ASTC_10x6_SFLOAT_BLOCK_EXT
->0 ulelong 1000066010 ASTC_10x8_SFLOAT_BLOCK_EXT
->0 ulelong 1000066011 ASTC_10x10_SFLOAT_BLOCK_EXT
->0 ulelong 1000066012 ASTC_12x10_SFLOAT_BLOCK_EXT
->0 ulelong 1000066013 ASTC_12x12_SFLOAT_BLOCK_EXT
-
-# Main KTX2 header.
-0 string \xABKTX\ 20\xBB\r\n\x1A\n Khronos KTX2 texture
->20 ulelong x \b, %u
->24 ulelong >1 x %u
->28 ulelong >1 x %u
->32 ulelong >1 \b, %u layers
->36 ulelong >1 \b, %u faces
->40 ulelong >1 \b, %u mipmaps
->44 ulelong >0 \b,
->>44 use khronos-ktx2-supercompression
->12 ulelong >0 \b,
->>12 use khronos-ktx2-vkFormat
-
-# Type: Valve VTF texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://developer.valvesoftware.com/wiki/Valve_Texture_Format
-
-# VTF image formats.
-0 name vtf-image-format
->0 ulelong 0 RGBA8888
->0 ulelong 1 ABGR8888
->0 ulelong 2 RGB888
->0 ulelong 3 BGR888
->0 ulelong 4 RGB565
->0 ulelong 5 I8
->0 ulelong 6 IA88
->0 ulelong 7 P8
->0 ulelong 8 A8
->0 ulelong 9 RGB888 (bluescreen)
->0 ulelong 10 BGR888 (bluescreen)
->0 ulelong 11 ARGB8888
->0 ulelong 12 BGRA8888
->0 ulelong 13 DXT1
->0 ulelong 14 DXT3
->0 ulelong 15 DXT5
->0 ulelong 16 BGRx8888
->0 ulelong 17 BGR565
->0 ulelong 18 BGRx5551
->0 ulelong 19 BGRA4444
->0 ulelong 20 DXT1+A1
->0 ulelong 21 BGRA5551
->0 ulelong 22 UV88
->0 ulelong 23 UVWQ8888
->0 ulelong 24 RGBA16161616F
->0 ulelong 25 RGBA16161616
->0 ulelong 26 UVLX8888
-
-# Main VTF header.
-0 string VTF\0 Valve Texture Format
->4 ulelong x v%u
->8 ulelong x \b.%u
->0x10 uleshort x \b, %u
->0x12 uleshort >1 x %u
->4 lequad 0x0000000700000002
->>0x3F uleshort >1 x %u
->0x18 uleshort >1 \b, %u frames
->0x38 ubyte x \b, mipmaps: %u
->0x34 ulelong >-1 \b,
->>0x34 use vtf-image-format
-
-# Type: Valve VTF3 (PS3) texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-0 string VTF3 Valve Texture Format (PS3)
->0x14 ubeshort x \b, %u
->0x16 ubeshort x \b x %u
->0x10 ubelong&0x2000 0 \b, DXT1
->0x10 ubelong&0x2000 0x2000 \b, DXT5
-
-# Type: ASTC texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://stackoverflow.com/questions/22600678/determine-internal-format-of-given-astc-compressed-image-through-its-header
-# - https://stackoverflow.com/a/22682244
-0 ulelong 0x5ca1ab13 ASTC
->4 ubyte x %u
->5 ubyte x \bx%u
->6 ubyte >1 \bx%u
-# X, Y, and Z dimensions are stored as 24-bit LE.
-# Pretend it's 32-bit and mask off the high byte.
->7 ulelong&0x00FFFFFF x texture, %u
->10 ulelong&0x00FFFFFF x x %u
->13 ulelong&0x00FFFFFF >1 x %u
-
-# Zebra Metafile graphic
-# http://www.fileformat.info/format/zbr/egff.htm
-0 ubeshort 0x9a02 Zebra Metafile graphic
->2 uleshort 1 (version 1.x)
->2 uleshort 2 (version 1.1x or 1.2x)
->2 uleshort 3 (version 1.49)
->2 uleshort 4 (version 1.50)
->4 string x (comment = %s)
-
-# Microsoft Paint graphic
-# http://www.fileformat.info/format/mspaint/egff.htm
-0 string DanM icrosoft Paint image data (version 1.x)
->4 uleshort x (%d
->>6 uleshort x x %d)
-0 string LinS Microsoft Paint image data (version 2.0)
->4 uleshort x (%d
->>6 uleshort x x %d)
-
-# reMarkable tablet internal file format (https://www.remarkable.com/)
-# https://github.com/ax3l/lines-are-beautiful
-# https://plasma.ninja/blog/devices/remarkable/binary/format/2017/12/26/\
-# reMarkable-lines-file-format.html#what-to-do-next
-# from Axel Huebl
-0 string reMarkable
->11 string lines
->>17 string with
->>>22 string selections
->>>>33 string and
->>>>>37 string layers
->>>>>>43 ulelong x reMarkable tablet notebook lines, 1404 x 1872, %x page(s)
-
-# newer per-page files for the reMarkable
-0 string reMarkable
->11 string .lines
->>18 string file,
->>>24 string version=
->>>>32 ubyte x reMarkable tablet page (v%c), 1404 x 1872,
->>>>>43 ulelong x %d layer(s)
-
-# Type: PVR3 texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - http://cdn.imgtec.com/sdk-documentation/PVR+File+Format.Specification.pdf
-
-# PVR3 pixel formats.
-0 name pvr3-pixel-format
->0 ulelong 0 PVRTC 2bpp RGB
->0 ulelong 1 PVRTC 2bpp RGBA
->0 ulelong 2 PVRTC 4bpp RGB
->0 ulelong 3 PVRTC 4bpp RGBA
->0 ulelong 4 PVRTC-II 2bpp
->0 ulelong 5 PVRTC-II 4bpp
->0 ulelong 6 ETC1
->0 ulelong 7 DXT1
->0 ulelong 8 DXT2
->0 ulelong 9 DXT3
->0 ulelong 10 DXT4
->0 ulelong 11 DXT5
->0 ulelong 12 BC4
->0 ulelong 13 BC5
->0 ulelong 14 BC6
->0 ulelong 15 BC7
->0 ulelong 16 UYVY
->0 ulelong 17 YUY2
->0 ulelong 18 BW1bpp
->0 ulelong 19 R9G9B9E5 Shared Exponent
->0 ulelong 20 RGBG8888
->0 ulelong 21 GRGB8888
->0 ulelong 22 ETC2 RGB
->0 ulelong 23 ETC2 RGBA
->0 ulelong 24 ETC2 RGB A1
->0 ulelong 25 EAC R11
->0 ulelong 26 EAC RG11
->0 ulelong 27 ASTC_4x4
->0 ulelong 28 ASTC_5x4
->0 ulelong 29 ASTC_5x5
->0 ulelong 30 ASTC_6x5
->0 ulelong 31 ASTC_6x6
->0 ulelong 32 ASTC_8x5
->0 ulelong 33 ASTC_8x6
->0 ulelong 34 ASTC_8x8
->0 ulelong 35 ASTC_10x5
->0 ulelong 36 ASTC_10x6
->0 ulelong 37 ASTC_10x8
->0 ulelong 38 ASTC_10x10
->0 ulelong 39 ASTC_12x10
->0 ulelong 40 ASTC_12x12
->0 ulelong 41 ASTC_3x3x3
->0 ulelong 42 ASTC_4x3x3
->0 ulelong 43 ASTC_4x4x3
->0 ulelong 44 ASTC_4x4x4
->0 ulelong 45 ASTC_5x4x4
->0 ulelong 46 ASTC_5x5x4
->0 ulelong 47 ASTC_5x5x5
->0 ulelong 48 ASTC_6x5x5
->0 ulelong 49 ASTC_6x6x5
->0 ulelong 50 ASTC_6x6x6
-
-0 string PVR\x03 PowerVR 3.0 texture:
->0x18 ulelong x %u x
->0x1C ulelong x %u
->0x20 ulelong >1 x %u
->0x08 ubyte x \b,
->0x0C ulelong 0
->>0x08 use pvr3-pixel-format
->0x0C ulelong !0
->>0x08 ubyte !0 %c
->>>0x0C ubyte !0 \b%u
->>0x09 ubyte !0 \b%c
->>>0x0D ubyte !0 \b%u
->>0x0A ubyte !0 \b%c
->>>0x0E ubyte !0 \b%u
->>0x0B ubyte !0 \b%c
->>>0x0F ubyte !0 \b%u
->0x10 ulelong 1 \b, sRGB
->0x04 ulelong&0x02 0x02 \b, premultiplied alpha
-
-0 string \x03RVP PowerVR 3.0 texture: BE,
->0x18 ubelong x %u x
->0x1C ubelong x %u
->0x20 ubelong >1 x %u
->0x08 ubyte x \b,
->0x0C ubelong 0
->>0x08 use pvr3-pixel-format
->0x0C ubelong !0
->>0x0B ubyte !0 %c
->>>0x0F ubyte !0 \b%u
->>0x0A ubyte !0 \b%c
->>>0x0E ubyte !0 \b%u
->>0x09 ubyte !0 \b%c
->>>0x0D ubyte !0 \b%u
->>0x08 ubyte !0 \b%c
->>>0x0C ubyte !0 \b%u
->0x10 ubelong 1 \b, sRGB
->0x04 ubelong&0x02 0x02 \b, premultiplied alpha
-
-# Type: Microsoft Xbox XPR0 texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://github.com/Cxbx-Reloaded/Cxbx-Reloaded/blob/develop/src/core/hle/D3D8/XbD3D8Types.h
-
-# XPR pixel formats.
-0 name xbox-xpr-pixel-format
->0 ubyte 0x00 L8
->0 ubyte 0x01 AL8
->0 ubyte 0x02 ARGB1555
->0 ubyte 0x03 RGB555
->0 ubyte 0x04 ARGB4444
->0 ubyte 0x05 RGB565
->0 ubyte 0x06 ARGB8888
->0 ubyte 0x07 xRGB8888
->0 ubyte 0x0B P8
->0 ubyte 0x0C DXT1
->0 ubyte 0x0E DXT2
->0 ubyte 0x0F DXT4
->0 ubyte 0x10 Linear ARGB1555
->0 ubyte 0x11 Linear RGB565
->0 ubyte 0x12 Linear ARGB8888
->0 ubyte 0x13 Linear L8
->0 ubyte 0x16 Linear R8B8
->0 ubyte 0x17 Linear G8B8
->0 ubyte 0x19 A8
->0 ubyte 0x1A A8L8
->0 ubyte 0x1B Linear AL8
->0 ubyte 0x1C Linear RGB555
->0 ubyte 0x1D Linear ARGB4444
->0 ubyte 0x1E Linear xRGB8888
->0 ubyte 0x1F Linear A8
->0 ubyte 0x20 Linear A8L8
->0 ubyte 0x24 YUY2
->0 ubyte 0x25 UYVY
->0 ubyte 0x27 L6V5U5
->0 ubyte 0x28 V8U8
->0 ubyte 0x29 R8B8
->0 ubyte 0x2A D24S8
->0 ubyte 0x2B F24S8
->0 ubyte 0x2C D16
->0 ubyte 0x2D F16
->0 ubyte 0x2E Linear D24S8
->0 ubyte 0x2F Linear F24S8
->0 ubyte 0x30 Linear D16
->0 ubyte 0x31 Linear F16
->0 ubyte 0x32 L16
->0 ubyte 0x33 V16U16
->0 ubyte 0x35 Linear L16
->0 ubyte 0x36 Linear V16U16
->0 ubyte 0x37 Linear L6V5U5
->0 ubyte 0x38 RGBA5551
->0 ubyte 0x39 RGBA4444
->0 ubyte 0x3A QWVU8888
->0 ubyte 0x3B BGRA8888
->0 ubyte 0x3C RGBA8888
->0 ubyte 0x3D Linear RGBA5551
->0 ubyte 0x3E Linear RGBA4444
->0 ubyte 0x3F Linear ABGR8888
->0 ubyte 0x40 Linear BGRA8888
->0 ubyte 0x41 Linear RGBA8888
->0 ubyte 0x64 Vertex Data
-
-0 string XPR0 Microsoft Xbox XPR0 texture
->0x19 ubyte x \b, format:
->>0x19 use xbox-xpr-pixel-format
-
-# ILDA Image Data Transfer Format
-# https://www.ilda.com/resources/StandardsDocs/ILDA_IDTF14_rev011.pdf
-#
-# Updated by Chuck Hein (laser@geekdude.com)
-#
-0 string ILDA ILDA Image Data Transfer Format
->7 ubyte 0x00 3D Coordinates with Indexed Color
->7 ubyte 0x01 2D Coordinates with Indexed Color
->7 ubyte 0x02 Color Palette
->7 ubyte 0x04 3D Coordinates with True Color
->7 ubyte 0x05 2D Coordinates with True Color
->8 string >0 \b, palette %s
->16 string >0 \b, company %s
->24 ubeshort >0 \b, number of records %d
->>26 ubeshort x \b, palette number %d
->>28 ubeshort >0 \b, number of frames %d
->>30 ubyte >0 \b, projector number %d
-
-# Dropbox "lepton" compressed jpeg format
-# https://github.com/dropbox/lepton
-0 ubelong&0xfffff0ff 0xcf84005a Lepton image file
->2 ubyte x (version %d)
-
-# Apple QuickTake camera raw images
-# https://en.wikipedia.org/wiki/Apple_QuickTake
-# dcraw can decode them
-0 name quicktake
->4 ubelong 8
->>544 ubeshort x \b, %dx
->>546 ubeshort x \b%d
->4 ubelong 4
->>546 ubeshort x \b, %dx
->>544 ubeshort x \b%d
-
-0 string qktk Apple QuickTake 100 Raw Image
->0 use quicktake
-
-0 string qktn
->4 ubyte 0 Apple QuickTake 150 Raw Image
->4 ubyte >0 Apple QuickTake 200 Raw Image
->0 use quicktake
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Corel_Photo-Paint_image
-# Reference: http://blog.argasinski.eu/wp-content/uploads/2011/08/cpt-specification-0.01.pdf
-0 string CPT
->4 string FILE Corel Photo-Paint image, version
-# version like 7, 9 or 8
->>3 ubyte x %c,
-!:mime image/x-corel-cpt
-!:ext cpt
-# if blocks_array_offset available jump blockNumber*8 bytes
->>0x34 ulelong >0
->>>(0x28.l*8) ubyte x
-# jump additional stored blocks_array_offset bytes forward to object block
->>>>&(0x34.l-1) ulelong x %u
-# object height in pixels
->>>>>&0 ulelong x x %u
-# if no blocks_array_offset available jump blockNumber*8 bytes
->>0x34 ulelong =0
->>>(0x28.l*8) ubyte x
-# jump additional 0x13C bytes forward to object block
->>>>&0x13B ulelong x %u
->>>>>&0 ulelong x x %u
-# image color model used
->>0x8 ulelong x
->>>0x8 ulelong 0x1 RGB 24 bits
->>>0x8 ulelong 0x3 CMYK 24 bits
->>>0x8 ulelong 0x5 greyscale 8 bits
->>>0x8 ulelong 0x6 black and white 1 bit
->>>0x8 ulelong 0xA RGB 8 bits
-# palette_length number of colors * 3 in case of 8-bit RGB paletted image
-# 0 otherwise. Allowed values: 0 or [1..256] * 3
-#>>0xC ulelong >0 \b, palette length %u
->>>>0xC ulelong/3 <256 \b, %u colors
->>>0x8 ulelong 0xB LAB
->>>0x8 ulelong 0xC RGB 48 bits
->>>0x8 ulelong 0xE greyscale 16 bits
-# this should not happen
->>>0x8 default x color model
->>>>0x8 ulelong x %#x
-# bit 1 in CPT file flags: UCS-2 file comment present
->>0x31 ubyte &0x02
-# look for comment marker
->>>0x100 search/0xc9d \4\2\0\0
-# UCS-2 file comment
->>>>&0 lestring16 x "%s"
-# if no UCS-2 is present show ANSI file comment[112] if available
->>0x31 ubyte&0x02 =0
->>>0x3C string >\0 "%-.112s"
-# reserved seems to be always 0
-#>>0x10 ulelong >0 \b, reserved1 %u
-# horizontal real dpi = dpi_h * 25.4 / 10**6
->>0x18 ulelong x \b, %u micro dots/mm
-# image vertical DPI in CPT DPI unit
-#>>0x1C ulelong x \b, %u micro dots/mm
-# reserved seems to be always 0
-#>>0x20 ulelong >0 \b, reserved2 %u
-#>>0x24 ulelong >0 \b, reserved3 %u
-# blocks_count; number of CPT_Block blocks. Allowed values: > 0
->>0x28 ulelong x \b, %u block
-# plural s
->>0x28 ulelong !1 \bs
-# CPT file flags
-# lower byte of CPT file flags: 0x94~CPT9FILE 0x01~often CPT7FILE 0x8C~CPT8FILE
-#>>0x30 ubyte x \b, lower flags %#x
-# upper byte of CPT file flags:
-#>>0x31 ubyte >0 \b, upper flags %#x
-# bit 2 in CPT file flags: unknown
-#>>0x31 ubyte &0x04 \b, with UNKNOWN
-# bits 3-7 in CPT file flags: unknown, seem to be often 0
-# show unusual flag combinations
->>0x31 ubyte&0xFC >0
->>>0x30 uleshort x \b, flags %#4.4x
-# reserved seems to be always 0
-#>>0x32 uleshort >0 \b, reserved4 %#x
-# blocks_array_offset is always 0 for CPT7 and CPT8 files created by PP7-PP8
-# typical values like: 13Ch 154h 43Ch 4F0h DA8h
->>0x34 ulelong x \b, array offset %#x
-# reserved seems to be often 0
->>0x38 ulelong >0 \b, reserved5 %#x
-# possible next master block
-#>>0x100 ubequad !0 \b, next block=%#llx...
-# bit 0: ICC profile block present
->>0x31 ubyte &0x01 \b, with ICC profile
-# check for characteristic string acsp of color profile for DEBUGGING
-#>>>0x178 string x icc=%.4s
-# display ICC/ICM color profile by ./icc
-#>>>0x154 use color-profile
-
-# URL: http://fileformats.archiveteam.org/wiki/CorelDRAW
-# https://en.wikipedia.org/wiki/CorelDRAW
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/cdr-gen.trid.xml
-# Note: called "CorelDRAW drawing (generic)" by TrID
-# version til 2 WL-based; from version 3 til 13 handled by ./riff and from 14 zip based handled by ./archive
-0 ubelong&0xFFffF7ff 0x574C6500 Corel Draw Picture
-#!:mime image/x-coreldraw
-!:mime application/vnd.corel-draw
-!:ext cdr
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/cdr-corel-10.trid.xml
-# Note: called "CorelDRAW drawing (v1.0)" by TrID and
-# "CorelDraw Drawing" with version "1.0" by DROID via PUID fmt/467
-# only DROID fmt-467-signature-id-726.cdr example
->2 ubyte 0x65 \b, version 1.0
-#>>4 ubelong !0x45000000 \b, at 4 %#8.8x
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/cdr-corel-20.trid.xml
-# Note: called "CorelDRAW drawing (v2.0)" by TrID and
-# "CorelDraw Drawing" with version "2.0" by DROID via PUID fmt/466
->2 ubyte 0x6D \b, version 2.0
-# According to DROID 0xed080000 or 0x25050000
-#>>4 ubelong !0xed080000
-#>>>4 ubelong !0x25050000 \b, at 4 %#8.8x
-
-# Type: Crunch compressed texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://github.com/BinomialLLC/crunch/blob/44c8402e24441c7524ca364941fd224ab3b971e9/inc/crn_decomp.h#L267
-0 ubelong 0x4878004A Crunch compressed texture:
->0x0C ubeshort x %u x
->0x0E ubeshort x %u
->0x12 ubyte 0 \b, DXT1
->0x12 ubyte 1 \b, DXT3
->0x12 ubyte 2 \b, DXT5
->0x12 ubyte 3 \b, DXT5 CCxY
->0x12 ubyte 4 \b, DXT5 xGxR
->0x12 ubyte 5 \b, DXT5 xGBR
->0x12 ubyte 6 \b, DXT5 AGBR
->0x12 ubyte 7 \b, DXn XY
->0x12 ubyte 8 \b, DXn YX
->0x12 ubyte 9 \b, DXT5 Alpha
->0x12 ubyte 10 \b, ETC1
->0x10 ubyte >1 \b, %u images
->0x11 ubyte >1 \b, %u faces
-# TODO: Flags at 0x13? (ubeshort)
-
-# Type: BasisLZ compressed texture.
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://github.com/BinomialLLC/basis_universal/blob/master/spec/basis_spec.txt
-0 uleshort 0x4273
->0x04 uleshort 0x4D BasisLZ
->>0x02 uleshort x v%x compressed texture:
->>0x14 ubyte 0 ETC1S
->>0x14 ubyte 1 UASTC 4x4
->>0x0E ulelong&0xFFFFFF >1 \b, %u slices
->>0x11 ulelong&0xFFFFFF >1 \b, %u images
->>0x15 uleshort&0x02 2 \b, Y-flipped
-
-# MIME registration: https://www.iana.org/assignments/media-types/model/e57
-# Sample files: http://www.libe57.org/data.html
-# Reference implementation: http://www.libe57.org/
-# https://www.ri.cmu.edu/pub_files/2011/1/2011-huber-e57-v3.pdf
-0 string ASTM-E57 ASTM E57 three-dimensional model
-!:mime model/e57
-!:ext e57
-
-# QOI [Quite OK Image Format] images
-# (Horia Mihai David, mihaidavid@posteo.net)
-#
-# QOI format by Dominic Szablewski <http://phoboslab.org/>
-# <https://qoiformat.org/>
-#
-# Based on spec v1.0 (2022.01.05) <https://qoiformat.org/qoi-specification.pdf>
-
-0 string qoif QOI image data
-!:ext qoi
-!:mime image/x-qoi
-# See <https://github.com/phoboslab/qoi/issues/167>
->4 ubelong x %ux
->8 ubelong x \b%u,
->>13 ubyte 0 s
->>>12 ubyte 3 \bRGB
->>>12 ubyte 4 \bRGBA
->>>12 default x
->>>>12 ubyte x \b*bad channels %u*
->>>13 ubyte 0 (linear alpha)
->>13 ubyte 1
->>>12 ubyte 3 RGB
->>>12 ubyte 4 RGBA
->>>13 ubyte 1 (all channels linear)
->>13 default x
->>>13 ubyte x *bad colorspace %u*
-
-
-# Type: Godot 3, 4 texture (pixel format)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-0 name godot-pixel-format
->0 ulelong&0xFFFFF 0 L8
->0 ulelong&0xFFFFF 1 LA8
->0 ulelong&0xFFFFF 2 R8
->0 ulelong&0xFFFFF 3 RG8
->0 ulelong&0xFFFFF 4 RGB8
->0 ulelong&0xFFFFF 5 RGBA8
->0 ulelong&0xFFFFF 6 RGBA4444
->0 ulelong&0xFFFFF 7 RGB565
->0 ulelong&0xFFFFF 8 RF
->0 ulelong&0xFFFFF 9 RGF
->0 ulelong&0xFFFFF 10 RGBF
->0 ulelong&0xFFFFF 11 RGBAF
->0 ulelong&0xFFFFF 12 RH
->0 ulelong&0xFFFFF 13 RGH
->0 ulelong&0xFFFFF 14 RGBH
->0 ulelong&0xFFFFF 15 RGBAH
->0 ulelong&0xFFFFF 16 RGBE9995
->0 ulelong&0xFFFFF 17 DXT1
->0 ulelong&0xFFFFF 18 DXT3
->0 ulelong&0xFFFFF 19 DXT5
->0 ulelong&0xFFFFF 20 RGTC_R
->0 ulelong&0xFFFFF 21 RGTC_RG
->0 ulelong&0xFFFFF 22 BPTC_RGBA
->0 ulelong&0xFFFFF 23 BPTC_RGBF
->0 ulelong&0xFFFFF 24 BPTC_RGBFU
->0 ulelong&0xFFFFF 25 PVRTC1_2
->0 ulelong&0xFFFFF 26 PVRTC1_2A
->0 ulelong&0xFFFFF 27 PVRTC1_4
->0 ulelong&0xFFFFF 28 PVRTC1_4A
->0 ulelong&0xFFFFF 29 ETC
->0 ulelong&0xFFFFF 30 ETC2_R11
->0 ulelong&0xFFFFF 31 ETC2_R11S
->0 ulelong&0xFFFFF 32 ETC2_RG11
->0 ulelong&0xFFFFF 33 ETC2_RG11S
->0 ulelong&0xFFFFF 34 ETC2_RGB8
->0 ulelong&0xFFFFF 35 ETC2_RGBA8
->0 ulelong&0xFFFFF 36 ETC2_RGB8A1
->0 ulelong&0xFFFFF 37 ASTC_8x8
-
-# Type: Godot 3, 4 texture (rescale display, width)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Shows rescale value if it's not a power of 2.
-0 name godot-rescale-display-w
->0 uleshort 0
->0 uleshort 1
->0 uleshort 2
->0 uleshort 4
->0 uleshort 8
->0 uleshort 16
->0 uleshort 32
->0 uleshort 64
->0 uleshort 128
->0 uleshort 256
->0 uleshort 512
->0 uleshort 1024
->0 uleshort 2048
->0 uleshort 4096
->0 uleshort 8192
->0 uleshort 16384
->0 uleshort 32768
->0 default x
->>0 uleshort x (rescale to %u x
-
-# Type: Godot 3, 4 texture (rescale display, height)
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Shows rescale value if it's not a power of 2.
-0 name godot-rescale-display-h
->0 clear x
->0 uleshort 0
->0 uleshort 1
->0 uleshort 2
->0 uleshort 4
->0 uleshort 8
->0 uleshort 16
->0 uleshort 32
->0 uleshort 64
->0 uleshort 128
->0 uleshort 256
->0 uleshort 512
->0 uleshort 1024
->0 uleshort 2048
->0 uleshort 4096
->0 uleshort 8192
->0 uleshort 16384
->0 uleshort 32768
->0 default x
->>0 uleshort x %u)
-
-# Type: Godot 3 texture
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://github.com/godotengine/godot/blob/3.3/core/image.h
-# - https://github.com/godotengine/godot/blob/3.3/scene/resources/texture.cpp
-# - https://github.com/godotengine/godot/blob/3.3/scene/resources/texture.h
-# TODO: Don't show "rescale to" if it matches the image size.
-0 string GDST Godot 3 texture:
-!:ext stex
-!:mime image/x-godot-stex
->4 uleshort x %u x
->8 uleshort x %u
->6 uleshort 0 \b,
->6 uleshort !0
->>6 use godot-rescale-display-w
->>10 use godot-rescale-display-h
->>10 uleshort x \b,
->16 ulelong&0x800000 !0 has mipmaps,
->16 ulelong&0x100000 0x100000 lossless encoding
->16 ulelong&0x200000 0x200000 lossy encoding
->16 ulelong&0x300000 0
->>16 use godot-pixel-format
-
-# Type: Godot 4 texture
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# References:
-# - https://github.com/godotengine/godot/blob/master/core/io/image.h
-# - https://github.com/godotengine/godot/blob/master/scene/resources/texture.cpp
-# - https://github.com/godotengine/godot/blob/master/scene/resources/texture.h
-# TODO: Don't show "rescale to" if it matches the image size.
-0 string GST2 Godot 4 texture
-!:ext stex
-!:mime image/x-godot-stex
->4 ulelong x v%u:
->0x28 uleshort x %u x
->0x2A uleshort x %u
->8 use godot-rescale-display-w
->12 use godot-rescale-display-h
->12 uleshort x \b,
->0x2C ulelong >1 %u mipmaps,
->0x30 use godot-pixel-format
->0x24 ulelong 1 \b, embedded PNG image
->0x24 ulelong 2 \b, embedded WebP image
->0x24 ulelong 3 \b, Basis Universal
-
-# Summary: iCEDraw graphic *.IDF
-# URL: http://fileformats.archiveteam.org/wiki/ICEDraw
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/i/idf-icedraw.trid.xml
-# From: Joerg Jenderek
-# Note: called "iCEDraw graphic" by TrID, "iCEDraw text" by FFmpeg and "iCE Draw" by Ansilove
-# verified by FFmpeg command `ffprobe ICE-9605.IDF` and `ansilove -s SQ-FORCE.IDF`
-0 string \0041.4\0\0\0\0O\0 iCEDraw graphic
-#!:mime application/octet-stream
-!:mime image/x-idf
-!:ext idf
-
-# Type: ColoRIX VGA Paint Image File (.rix/.sci/.scX)
-# From: Eddy Jansson <github.com/eloj>
-# Reference: https://www.fileformat.info/format/rix/spec/
-#
-0 name rix-header
->0 uleshort x \b, %u x
->2 uleshort x %u
-# palette type:
-# .. if direct color, low bits encode bpp
->4 ubyte&128 0
->>4 ubyte&127 x \b %u bpp (direct color)
-# .. else palette
->4 ubyte&128 128
->>4 ubyte&7 0 \b x 2
->>4 ubyte&7 1 \b x 4
->>4 ubyte&7 2 \b x 8
->>4 ubyte&7 3 \b x 16
->>4 ubyte&7 4 \b x 32
->>4 ubyte&7 5 \b x 64
->>4 ubyte&7 6 \b x 128
->>4 ubyte&7 7 \b x 256
-# storage type
-#>5 ubyte&15 0 \b, Linear
->5 ubyte&15 1 \b, Planar (0213)
->5 ubyte&15 2 \b, Planar
->5 ubyte&15 3 \b, Text
->5 ubyte&15 4 \b, Planar lines
->5 ubyte&128 128 \b (compressed)
->5 ubyte&64 64 \b (extension)
->5 ubyte&32 32 \b (encrypted)
-
-0 string RIX3 ColoRIX Image
->4 use rix-header
-
-0 string RIX7 ColoRIX Slideshow
-
-# http://fileformats.archiveteam.org/wiki/PaperPort_(MAX)
-0 string ViG Visioneer PaperPort
->3 string Ae 2
->3 string Be 2
->3 string Cj 3-4
->3 string Em 5-7
->3 string Fk 8-12
->3 default x MAX
diff --git a/contrib/libs/libmagic/magic/Magdir/inform b/contrib/libs/libmagic/magic/Magdir/inform
deleted file mode 100644
index fe518ece91..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/inform
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: inform,v 1.5 2009/09/19 16:28:09 christos Exp $
-# inform: file(1) magic for Inform interactive fiction language
-
-# URL: http://www.inform-fiction.org/
-# From: Reuben Thomas <rrt@sc3d.org>
-
-0 search/100/cW constant\ story Inform source text
diff --git a/contrib/libs/libmagic/magic/Magdir/intel b/contrib/libs/libmagic/magic/Magdir/intel
deleted file mode 100644
index 5177fea457..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/intel
+++ /dev/null
@@ -1,310 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: intel,v 1.23 2022/10/31 13:22:26 christos Exp $
-# intel: file(1) magic for x86 Unix
-#
-# Various flavors of x86 UNIX executable/object (other than Xenix, which
-# is in "microsoft"). DOS is in "msdos"; the ambitious soul can do
-# Windows as well.
-#
-# Windows NT belongs elsewhere, as you need x86 and MIPS and Alpha and
-# whatever comes next (HP-PA Hummingbird?). OS/2 may also go elsewhere
-# as well, if, as, and when IBM makes it portable.
-#
-# The `versions' should be un-commented if they work for you.
-# (Was the problem just one of endianness?)
-#
-0 leshort 0502 basic-16 executable
->12 lelong >0 not stripped
-#>22 leshort >0 - version %d
-0 leshort 0503 basic-16 executable (TV)
->12 lelong >0 not stripped
-#>22 leshort >0 - version %d
-0 leshort 0510 x86 executable
->12 lelong >0 not stripped
-0 leshort 0511 x86 executable (TV)
->12 lelong >0 not stripped
-0 leshort =0512 iAPX 286 executable small model (COFF)
->12 lelong >0 not stripped
-#>22 leshort >0 - version %d
-0 leshort =0522 iAPX 286 executable large model (COFF)
->12 lelong >0 not stripped
-#>22 leshort >0 - version %d
-# updated by Joerg Jenderek at Oct 2015
-# https://de.wikipedia.org/wiki/Common_Object_File_Format
-# http://www.delorie.com/djgpp/doc/coff/filhdr.html
-# ./msdos (version 5.25) labeled the next entry as "MS Windows COFF Intel 80386 object file"
-# ./intel (version 5.25) label labeled the next entry as "80386 COFF executable"
-# SGI labeled the next entry as "iAPX 386 executable" --Dan Quinlan
-0 leshort =0514
-# use subroutine to display name+flags+variables for common object formatted files
->0 use display-coff
-#>12 lelong >0 not stripped
-# no hint found, that at offset 22 is version
-#>22 leshort >0 - version %d
-0 leshort 0x0200
-# no F_EXEC flag bit implies Intel ia64 COFF object file without optional header
->18 leshort ^0x0002
-# skip some DEGAS high-res uncompressed bitmap *.pi3 handled by ./images like
-# GEMINI03.PI3 MODEM2.PI3 POWERFIX.PI3 sigirl1.pi3 vanna5.pi3
-# by test for valid starting character (often point 0x2E) of 1st section name
->>20 ubyte >0x1F
->>>0 use display-coff
-# F_EXEC flag bit implies Intel ia64 COFF executable
->18 leshort &0x0002
->>0 use display-coff
-0 leshort 0x8664
->0 use display-coff
-
-# rom: file(1) magic for BIOS ROM Extensions found in intel machines
-# mapped into memory between 0xC0000 and 0xFFFFF
-# From: Alex Myczko <alex@aiei.ch>
-# updated by Joerg Jenderek
-# https://en.wikipedia.org/wiki/Option_ROM
-# URL: http://fileformats.archiveteam.org/wiki/BIOS
-# Reference: http://www.lejabeach.com/sisubb/BIOS_Disassembly_Ninjutsu_Uncovered.pdf
-0 beshort 0x55AA
-# skip misidentified raspberry pi pieeprom-*.bin by check for
-# unlikely high ROM size (0xF0*512=240*512) and not observed start instruction 0x0F
->2 ubeshort !0xF00F
-# skip 2 byte sized eof.bin with start magic
->>0 use rom-x86
-0 name rom-x86
->0 beshort x BIOS (ia32) ROM Ext.
-#!:mime application/octet-stream
-!:mime application/x-ibm-rom
-!:ext rom/bin
-################################################################################
-# not Plug aNd Play ($PnP) like 00000000 (ide_xtp.bin kvmvapic.bin V7VGA.ROM) 000000fc (MCT-VGA.bin)
-# 55aaf00f (pieeprom-*.bin) 55aa40e9 (Trm3x5.bin) 24506f4f (sgabios-bin.rom)
-# 55aa4be9 (vgabios-stdvga.rom vgabios-cirrus-bin.rom vgabios-vmware-bin.rom)
->(26.s) ubelong !0x24506e50
-#>(26.s) ubelong !0x24506e50 NOT PNP=%8.8x
-# also not PCI (PCIR) implies "old" ISA cards or foo like: 8a168404 (MCT-VGA.bin)
-# 55aaf00f (pieeprom*.bin)
->>(24.s) ubelong !0x50434952
-#>>(24.s) ubelong !0x50434952 ISA CARD=%8.8x
-# "old" identification strings used in file version 5.41 and earlier
-# probably an USB controller
->>>5 string USB USB
-# probably https://en.wikipedia.org/wiki/Preboot_Execution_Environment
->>>7 string LDR UNDI image
-# probably another Adaptec SCSI controller
->>>26 string Adaptec Adaptec
-# http://minuszerodegrees.net/rom/bin/adaptec_aha1542cp_bios_908501-00.bin
-# already done by PNP variant
-#>>>28 string Adaptec Adaptec
-# probably Promise SCSI controller
->>>42 string PROMISE Promise
-# old test for IBM compatible Video cards; INTERNAL FACTS WHY IS THIS WORKING?
->30 string IBM IBM comp. Video
-# display exact text for IBM compatible Video cards with longer text
->>33 ubyte !0
->>>30 string x "%s"
-# http://minuszerodegrees.net/rom/bin/unknown/MCT-VGA-16%20-%20TDVGA%203588%20BIOS%20Version%20V1.04A.zip
-# "IBM COMPATIBLETDVGA 3588 BIOS Version V1.04A2+" "MCT-VGA-16 - TDVGA 3588 BIOS Version V1.04A.bin"
-# "IBM VGA Compatible\001" NVidia44.bin
-# "IBM EGA ROM Video Seven BIOS Code, Version 1.04" V7VGA.ROM
-# "IBM" vgabios-stdvga.rom
-# "IBM" vgabios-vmware-bin.rom:
-# "IBM" vgabios-cirrus-bin.rom
-# "IBM" vgabios-virtio-bin.rom
-################################################################################
-# ROM size in 512B blocks must be interpreted as unsigned for ROM of network cards
-# like: efi-eepro100.rom efi-rtl8139.rom pxe-e1000.rom
->2 ubyte x (%u*512)
-# file name file size calculated size remark
-# eof.bin 2 - with start magic nothing is shown here
-# orchid.bin 188 0 =0*512 on window 95 CD in Drivers\audio\orchid3d
-# multiboot.bin 1024 1024 =2*512 QEMU emulator
-# loader1.bin 512 2048 =4*512
-# ide_xtp.bin 8192 8192 =16*512
-# kvmvapic.bin 9216 9216 =18*512
-# V7VGA.ROM 18832 16384 =32*512
-# adaptec1542.bin 32768 16384 =32*512
-# MCT-VGA.bin 32768 24576 =48*512
-# 2975BIOS.BIN 32768 32256 =63*512
-# efi-e1000.rom 196608 64000 =125*512
-# efi-rtl8139.rom 176640 66048 =129*512
-# pieeprom*.bin 524288 122880 =240*512
-################################################################################
-# initialization vector with executable code; often near JuMP instruction E9 yy zz
->3 ubyte =0xE9 jmp
-# jmp offset like: 008fh 0093h 009fh 00afh 0143h 3ad7h 5417h 54ech 594dh 895fh
->>4 uleshort x %#4.4x
-# for initialization vector samples without 3 byte jump instruction
->3 ubyte !0xE9 instruction
-# eb4b3734h NVidia44.bin
-# 00003234h V7VGA.ROM
-# 060e0731h kvmvapic.bin
-# cb000000h linuxboot-bin.rom
-# e80d0fcbh PXE-Intel.rom
-# b8004875h orchid.bin
->>3 ubelong x %#8.8x
-# For misidentified raspberry pi pieeprom-*.bin like: 0xf00f
-#>2 ubeshort x \b, AT 2 %#4.4x
-################################################################################
-# new sections for BIOS (ia32) ROM Extension
-# 4 bytes ASCII Signature "$PnP" for Plug aNd Play expansion header
->(26.s) string =$PnP \b;
-#>(26.s) string =$PnP FOUND $PnP
-# at 1Ah possible offset to expansion header structure; new for Plug aNd Play
->>26 uleshort x at %#x PNP
-# Plug and Play vendor+device ID like: 0 0x000f1000 (2975BIOS.BIN) 0x31121095 (4243.bin) 0x04904215 (adaptec1542.bin)
-#>>(26.s+0x0A) ulelong !0 NOT-nullID=%8.8x
->>(26.s+0x0A) uleshort !0
-# show PnP Vendor identification in human readable text form instead of numeric
-# For adaptec_ava1515_bios_585201-00.bin reverted endian! BUT IS THIS ALWAYS TRUE?
->>>(26.s+0x0C) use \^PCI-vendor
->>>(26.s+0x0A) ubeshort x device=%#4.4x
-# 3 byte Device type code; probably the same meaning as in PCI section?
-# OK for storage controller SCSI (2975BIOS.BIN adaptec1542.bin)
-# and network controller ethernet (efi-e1000.rom efi-rtl8139.rom)
->>(26.s+0x12) use PCI-class
-# structure revision like: 01h
->>(26.s+4) ubyte !1 \b, revision %u
-# PnP Header structure length in multiple of 16 bytes like: 2
->>(26.s+5) uleshort !2 \b, length %u*16
-# offset to next header; 0 if none
->>(26.s+7) uleshort !0 \b, at %#x next header
-# reserved byte; seems to be zero
->>(26.s+8) ubyte !0 \b, reserved %#x
-# 8-bit checksum for this header; calculated and patched by patch2pnprom
->>(26.s+9) ubyte !0 \b, CRC %#x
-# pointer to optional manufacturer string; like: 0 (4243.bin) 59h 5ch 60h c7h 14eh 27ch 296h 324h 3662h
->>(26.s+0x0E) uleshort >0 \b, at %#x
->>>(26.s+0x0C) uleshort x
-# manufacturer ASCII-Z string like "http://ipxe.org" "Plop - Elmar Hanlhofer www.plop.at" "QEMU"
->>>>(&0.s) string x "%s"
-# pointer to optional product string; like: 0 (2975BIOS.BIN) 6ch 70h 7ch d9h 160h 281h 29bh 329h
->>(26.s+0x10) uleshort >0 \b, at %#x
->>>(26.s+0x0E) uleshort x
-# often human readable product ASCII-Z string like "iPXE" "Plop Boot Manager"
-# "multiboot loader" "Intel UNDI, PXE-2.0 (build 082)"
->>>>(&0.s) string x "%s"
-# PnP Device indicators; contains bits that identify the device as being capable of bootable
-#>>(26.s+0x15) ubyte x \b, INDICATORS %#x
-# device is a display device
->>(26.s+0x15) ubyte &0x01 \b, display
-# device is an input device
->>(26.s+0x15) ubyte &0x02 \b, input
-# device is an IPL device
->>(26.s+0x15) ubyte &0x04 \b, IPL
-#>>(26.s+0x15) ubyte &0x08 reserved
-# ROM is only required if this device is selected as a boot device
->>(26.s+0x15) ubyte &0x10 \b, bootable
-# indicates ROM is read cacheable
->>(26.s+0x15) ubyte &0x20 \b, cacheable
-# ROM may be shadowed in RAM
->>(26.s+0x15) ubyte &0x40 \b, shadowable
-# ROM supports the device driver initialization model
->>(26.s+0x15) ubyte &0x80 \b, InitialModel
-# boot connection vector; an offset to a routine that hook into INT 9h, INT 10h, or INT 13h
-# 0 means disabled 0x0429 (4650_sr5.bin) 0x0072 (adaptec1542.bin)
->>(26.s+0x16) uleshort !0 \b, boot vector offset %#x
-# disconnect vector; offset to routine that do cleanup from an unsuccessful boot attempt
->>(26.s+0x18) uleshort !0 \b, disconnect offset %#x
-# bootstrap entry point/vector (BEV); offset to a routine (like RPL) that hook into INT 19h
-# 0 means disabled 0x3c (multiboot.bin) 0x358 (efi-rtl8139.rom) 0xae7 (PXE-Intel.rom)
->>(26.s+0x1A) uleshort !0 \b, bootstrap offset %#x
-# 2nd reserved area; seems to be zero
->>(26.s+0x1C) uleshort !0 \b, 2nd reserved %#x
-# static resource information vector; 0 means disabled
->>(26.s+0x1E) uleshort !0 \b, static offset %#4.4x
-################################################################################
-# 4 bytes ASCII Signature "PCIR" for PCI Data Structure
-#>(24.s) string =PCIR FOUND PCIR
->(24.s) string =PCIR \b;
-# pointer to PCI data structure like: 1Ch 38h 104h 8E44h
->>24 uleshort x at %#x PCI
-# Vendor identification (ID) https://pci-ids.ucw.cz/v2.2/pci.ids
-#>>(24.s+4) uleshort x ID=%4.4x
-# show Vendor identification in human readable text form instead of numeric
->>(24.s+4) use PCI-vendor
-# device identification (ID)
->>(24.s+6) uleshort x device=%#4.4x
-# Base+sub class code https://wiki.osdev.org/PCI
->>(24.s+0x0D) use PCI-class
-# pointer to vital product data (VPD); 0 indicates no VPD; WHAT EXACTLY iS VPD?
->>(24.s+8) uleshort !0 \b, at %#x VPD
-# PCI data structure length like: 24h 28h
->>(24.s+0xA) uleshort >0x28 \b, length %u
-# PCI data structure revision like: 0 3
->>(24.s+0xC) ubyte >0 \b, revision %u
-# image length (hexadecimal) in multiple of 512 bytes like: 54 56 68 6a 76 78 7c 7d 7e 7f 80 81 83
-# Apparently this gives the same information as given by byte at offset 2 but as 16-bit
-#>>(24.s+0x10) uleshort x \b, length %u*512
-# revision level of code/data like: 0 1 201h 502h
->>(24.s+0xC) ubyte >1 \b, code revision %#x
-# code type: 0~Intel x86/PC-AT compatible 1~Open firmware standard for PCI42 FF~Reserved
->>(24.s+0x14) ubyte >0 \b, code type %#x
-# last image indicator; bit 7 indicates "last image"; bits 0-6 are reserved
->>(24.s+0x15) ubyte >0
->>>(24.s+0x15) ubyte =0x80 \b, last ROM
-# THIS SHOULD NOT HAPPEN!
->>>(24.s+0x15) ubyte !0x80 \b, indicator %x
-# 3rd reserved area; seems to be zero in most cases but not for
-# efi-e1000.rom efi-rtl8139.rom
->>(24.s+0x16) ubeshort !0 \b, 3rd reserved %#x
-
-# Flash descriptors for Intel SPI flash roms.
-# From Dr. Jesus <j@hug.gs>
-0 lelong 0x0ff0a55a Intel serial flash for ICH/PCH ROM <= 5 or 3400 series A-step
-16 lelong 0x0ff0a55a Intel serial flash for PCH ROM
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Advanced_Configuration_and_Power_Interface
-# Reference: https://uefi.org/sites/default/files/resources/ACPI_6_3_final_Jan30.pdf
-# Note: generated for example by `cat /sys/firmware/acpi/tables/DSDT MyDSDT.aml`
-0 string DSDT
->0 use acpi-table
-# not tested or other file format
-0 string APIC
->0 use acpi-table
-#0 string ASF!
-#>0 use acpi-table
-0 string FACP
->0 use acpi-table
-#0 string FACS
-#>0 use acpi-table
-0 string MCFG
->0 use acpi-table
-0 string SLIC
->0 use acpi-table
-0 string SSDT
->0 use acpi-table
-0 name acpi-table
-# skip ASCII text starting with DSDT by looking for valid "low" revision
->8 ubyte <17 ACPI Machine Language file
-# assume that ACPI tables size are lower than 16 MiB
-#>4 ulelong <0x01000000
-# DSDT for Differentiated System Description Table
->>0 string x '%.4s'
-#!:mime application/octet-stream
-!:mime application/x-intel-aml
-!:ext aml
-# the manufacture model ID like: VBOXBIOS BXDSDT
->>16 string >\0 %.8s
-# OEM revision of DSDT for supplied OEM Table ID like: 0 1 2 20090511
->>>24 ulelong x %x
-# OEM ID like: INTEL VBOX (VirtualBox) BXDSDT (qemu) MEDION or \030\001\0\0 for s3pt.aml
->>10 ubyte >040 by %c
->>>11 ubyte >040 \b%c
->>>>12 ubyte >040 \b%c
->>>>>13 ubyte >040 \b%c
->>>>>>14 ubyte >040 \b%c
->>>>>>>15 ubyte >040 \b%c
-# This field also sets the global integer width for the AML interpreter.
-# Values less than two will cause the interpreter to use 32-bit.
-# Values of two and greater will cause the interpreter to use full 64-bit.
-# 16 for asf!.aml, 67 fo rsdp.aml
->>8 ubyte x \b, revision %u
-# length, in bytes, of the entire DSDT (including the header)
->>4 ulelong x \b, %u bytes
-# entire table must sum to zero
-#>>9 ubyte x \b, checksum %#x
-# vendor ID for the ASL Compiler like: INTL MSFT ...
->>28 string >\0 \b, created by %.4s
-# revision number of the ASL Compiler like: 20051117 20140724 20190703 20200110 ...
->>>32 ulelong x %x
-
diff --git a/contrib/libs/libmagic/magic/Magdir/interleaf b/contrib/libs/libmagic/magic/Magdir/interleaf
deleted file mode 100644
index 8e3aaf57da..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/interleaf
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: interleaf,v 1.10 2009/09/19 16:28:10 christos Exp $
-# interleaf: file(1) magic for InterLeaf TPS:
-#
-0 string =\210OPS Interleaf saved data
-0 string =<!OPS Interleaf document text
->5 string ,\ Version\ = \b, version
->>17 string >\0 %.3s
diff --git a/contrib/libs/libmagic/magic/Magdir/island b/contrib/libs/libmagic/magic/Magdir/island
deleted file mode 100644
index f40521a036..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/island
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: island,v 1.5 2009/09/19 16:28:10 christos Exp $
-# island: file(1) magic for IslandWite/IslandDraw, from SunOS 5.5.1
-# "/etc/magic":
-# From: guy@netapp.com (Guy Harris)
-#
-4 string pgscriptver IslandWrite document
-13 string DrawFile IslandDraw document
-
diff --git a/contrib/libs/libmagic/magic/Magdir/ispell b/contrib/libs/libmagic/magic/Magdir/ispell
deleted file mode 100644
index 57a6e9e789..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ispell
+++ /dev/null
@@ -1,63 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ispell,v 1.8 2009/09/19 16:28:10 christos Exp $
-# ispell: file(1) magic for ispell
-#
-# Ispell 3.0 has a magic of 0x9601 and ispell 3.1 has 0x9602. This magic
-# will match 0x9600 through 0x9603 in *both* little endian and big endian.
-# (No other current magic entries collide.)
-#
-# Updated by Daniel Quinlan (quinlan@yggdrasil.com)
-#
-0 leshort&0xFFFC 0x9600 little endian ispell
->0 byte 0 hash file (?),
->0 byte 1 3.0 hash file,
->0 byte 2 3.1 hash file,
->0 byte 3 hash file (?),
->2 leshort 0x00 8-bit, no capitalization, 26 flags
->2 leshort 0x01 7-bit, no capitalization, 26 flags
->2 leshort 0x02 8-bit, capitalization, 26 flags
->2 leshort 0x03 7-bit, capitalization, 26 flags
->2 leshort 0x04 8-bit, no capitalization, 52 flags
->2 leshort 0x05 7-bit, no capitalization, 52 flags
->2 leshort 0x06 8-bit, capitalization, 52 flags
->2 leshort 0x07 7-bit, capitalization, 52 flags
->2 leshort 0x08 8-bit, no capitalization, 128 flags
->2 leshort 0x09 7-bit, no capitalization, 128 flags
->2 leshort 0x0A 8-bit, capitalization, 128 flags
->2 leshort 0x0B 7-bit, capitalization, 128 flags
->2 leshort 0x0C 8-bit, no capitalization, 256 flags
->2 leshort 0x0D 7-bit, no capitalization, 256 flags
->2 leshort 0x0E 8-bit, capitalization, 256 flags
->2 leshort 0x0F 7-bit, capitalization, 256 flags
->4 leshort >0 and %d string characters
-0 beshort&0xFFFC 0x9600 big endian ispell
->1 byte 0 hash file (?),
->1 byte 1 3.0 hash file,
->1 byte 2 3.1 hash file,
->1 byte 3 hash file (?),
->2 beshort 0x00 8-bit, no capitalization, 26 flags
->2 beshort 0x01 7-bit, no capitalization, 26 flags
->2 beshort 0x02 8-bit, capitalization, 26 flags
->2 beshort 0x03 7-bit, capitalization, 26 flags
->2 beshort 0x04 8-bit, no capitalization, 52 flags
->2 beshort 0x05 7-bit, no capitalization, 52 flags
->2 beshort 0x06 8-bit, capitalization, 52 flags
->2 beshort 0x07 7-bit, capitalization, 52 flags
->2 beshort 0x08 8-bit, no capitalization, 128 flags
->2 beshort 0x09 7-bit, no capitalization, 128 flags
->2 beshort 0x0A 8-bit, capitalization, 128 flags
->2 beshort 0x0B 7-bit, capitalization, 128 flags
->2 beshort 0x0C 8-bit, no capitalization, 256 flags
->2 beshort 0x0D 7-bit, no capitalization, 256 flags
->2 beshort 0x0E 8-bit, capitalization, 256 flags
->2 beshort 0x0F 7-bit, capitalization, 256 flags
->4 beshort >0 and %d string characters
-# ispell 4.0 hash files kromJx <kromJx@crosswinds.net>
-# Ispell 4.0
-0 string ISPL ispell
->4 long x hash file version %d,
->8 long x lexletters %d,
->12 long x lexsize %d,
->16 long x hashsize %d,
->20 long x stblsize %d
diff --git a/contrib/libs/libmagic/magic/Magdir/isz b/contrib/libs/libmagic/magic/Magdir/isz
deleted file mode 100644
index 4d9c030844..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/isz
+++ /dev/null
@@ -1,15 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: isz,v 1.5 2019/04/19 00:42:27 christos Exp $
-# ISO Zipped file format
-# https://www.ezbsystems.com/isz/iszspec.txt
-0 string IsZ! ISO Zipped file
->4 byte x \b, header size %u
->5 byte x \b, version %u
->8 lelong x \b, serial %u
-#12 leshort x \b, sector size %u
-#>16 lelong x \b, total sectors %u
->17 byte >0 \b, password protected
-#>24 lequad x \b, segment size %llu
-#>32 lelong x \b, blocks %u
-#>36 lelong x \b, block size %u
diff --git a/contrib/libs/libmagic/magic/Magdir/java b/contrib/libs/libmagic/magic/Magdir/java
deleted file mode 100644
index d361275535..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/java
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#------------------------------------------------------------
-# $File: java,v 1.22 2023/01/11 23:59:49 christos Exp $
-# Java ByteCode and Mach-O binaries (e.g., Mac OS X) use the
-# same magic number, 0xcafebabe, so they are both handled
-# in the entry called "cafebabe".
-#------------------------------------------------------------
-# Java serialization
-# From Martin Pool (m.pool@pharos.com.au)
-0 beshort 0xaced Java serialization data
->2 beshort >0x0004 \b, version %d
-
-0 belong 0xfeedfeed Java KeyStore
-!:mime application/x-java-keystore
-0 belong 0xcececece Java JCE KeyStore
-!:mime application/x-java-jce-keystore
-
-# Java source
-0 regex \^import.*;$ Java source
-!:mime text/x-java
-
-# Java HPROF dumps
-# https://java.net/downloads/heap-snapshot/hprof-binary-format.html
-0 string JAVA\x20PROFILE\x201.0.
->0x12 byte 0
->>0x11 ubyte-0x31 <2 Java HPROF dump,
->>>0x17 beqdate/1000 x created %s
-
-# Java jmod module
-# See https://hg.openjdk.java.net/jdk9/jdk9/jdk/file/tip/src/java.base/share/classes/jdk/internal/jmod/JmodFile.java
-# Grr. 2 byte magic "JM", really? In 2019?
-0 belong 0x4a4d0100 Java jmod module version 1.0
-!:mime application/x-java-jmod
-
-# Java jlinked image
-# See https://hg.openjdk.java.net/jdk9/jdk9/jdk/file/tip/src/java.base/share/native/libjimage/imageFile.hpp
-0 belong 0xcafedada Java module image (big endian)
->4 beshort >0x00 \b, version %d
->6 beshort x \b.%d
-!:mime application/x-java-image
-
-0 lelong 0xcafedada Java module image (little endian)
->6 leshort >0x00 \b, version %d
->4 leshort x \b.%d
-!:mime application/x-java-image
-
-# JAR Manifest & Signature File
-# Reference: https://docs.oracle.com/javase/8/docs/technotes/guides/jar/jar.html
-0 string/t Manifest-Version:\x201.0 JAR Manifest
-!:ext MF
-0 string/t Signature-Version:\x201.0 JAR Signature File
-!:ext SF
diff --git a/contrib/libs/libmagic/magic/Magdir/javascript b/contrib/libs/libmagic/magic/Magdir/javascript
deleted file mode 100644
index 90a09cce46..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/javascript
+++ /dev/null
@@ -1,171 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: javascript,v 1.5 2023/01/12 00:02:16 christos Exp $
-# javascript: magic for javascript and node.js scripts.
-#
-0 string/tw #!/bin/node Node.js script executable
-!:mime application/javascript
-0 string/tw #!/usr/bin/node Node.js script executable
-!:mime application/javascript
-0 string/tw #!/bin/nodejs Node.js script executable
-!:mime application/javascript
-0 string/tw #!/usr/bin/nodejs Node.js script executable
-!:mime application/javascript
-0 string/t #!/usr/bin/env\ node Node.js script executable
-!:mime application/javascript
-0 string/t #!/usr/bin/env\ nodejs Node.js script executable
-!:mime application/javascript
-
-# JavaScript
-# The strength is increased to beat the C++ & HTML rules
-0 search "use\x20strict" JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 search 'use\x20strict' JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex module(\\.|\\[["'])exports.*= JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex \^(const|var|let).*=.*require\\( JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex \^export\x20(function|class|default|const|var|let|async)\x20 JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex \\((async\x20)?function[(\x20] JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex \^(import|export).*\x20from\x20 JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex \^(import|export)\x20["']\\./ JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex \^require\\(["'] JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-0 regex typeof.*[!=]== JavaScript source
-!:strength +30
-!:mime application/javascript
-!:ext js
-
-# React Native minified JavaScript
-0 search/128 __BUNDLE_START_TIME__= React Native minified JavaScript
-!:strength +30
-!:mime application/javascript
-!:ext bundle/jsbundle
-
-# Hermes by Facebook https://hermesengine.dev/
-# https://github.com/facebook/hermes/blob/master/include/hermes/\
-# BCGen/HBC/BytecodeFileFormat.h#L24
-0 lequad 0x1F1903C103BC1FC6 Hermes JavaScript bytecode
->8 lelong x \b, version %d
-
-# v8 JavaScript engine bytecode
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://v8.dev/docs/ignition
-# Note: used in bytenode and NW.js protected source code
-# V8 bytecode extraction was added in NodeJS v5.7.0 (V8 4.6.85.31).
-# Version information is provided for some v8 versions found in NodeJS releases.
-2 uleshort =0xC0DE
->0 ulelong^0xC0DE0000 >0
-# Reservation table starts at 40
->>40 ulelong&0xFFFFFF00 =0x80000000
-# Stub keys present
->>>24 ulelong >0
->>>>0 ulelong^0xC0DE0000 x v8 bytecode, external reference table size: %u bytes,
->>>>4 ulelong =0xEE4BF478 version 5.1.281.111,
->>>>4 ulelong =0xC4A0100C version 5.5.372.43,
->>>>8 ulelong x source size: %u bytes,
->>>>12 ulelong x cpu features: %#08X,
->>>>16 ulelong x flag hash: %#08X,
->>>>20 ulelong x %u reservations,
->>>>28 ulelong x payload size: %u bytes,
->>>>32 ulelong x checksum1: %#08X,
->>>>36 ulelong x checksum2: %#08X
-# No stub keys
->>>24 ulelong =0
->>>>0 ulelong^0xC0DE0000 x v8 bytecode, external reference table size: %u bytes,
->>>>4 ulelong =0x54F0AD81 version 6.2.414.46,
->>>>4 ulelong =0X7D1BF182 version 6.2.414.54,
->>>>4 ulelong =0x35BA122E version 6.2.414.77,
->>>>4 ulelong =0X9319F9C2 version 6.2.414.78,
->>>>4 ulelong =0xB1240060 version 6.6.346.32,
->>>>4 ulelong =0x2B757060 version 6.7.288.46,
->>>>4 ulelong =0x09D147AA version 6.7.288.49,
->>>>4 ulelong =0xF4D4F48A version 6.8.275.32,
->>>>4 ulelong =0xD3961326 version 7.0.276.38,
->>>>8 ulelong x source size: %u bytes,
->>>>12 ulelong x cpu features: %#08X,
->>>>16 ulelong x flag hash: %#08X,
->>>>20 ulelong x %u reservations,
->>>>28 ulelong x payload size: %u bytes,
->>>>32 ulelong x checksum1: %#08X,
->>>>36 ulelong x checksum2: %#08X
-# Reservation table starts at 32
->>32 ulelong&0xFFFFFF00 =0x80000000
-# Second checksum present
->>>28 ulelong >0
->>>>0 ulelong^0xC0DE0000 x v8 bytecode, external reference table size: %u bytes,
->>>>4 ulelong =0x21DDF627 version 7.4.288.21,
->>>>4 ulelong =0x1FC9FE84 version 7.4.288.27,
->>>>4 ulelong =0x60A99E8B version 7.5.288.22,
->>>>4 ulelong =0x4F665E90 version 7.6.303.29,
->>>>4 ulelong =0xC7ACFCDE version 7.7.299.11,
->>>>4 ulelong =0x7F641D8F version 7.7.299.13,
->>>>4 ulelong =0xFD9A4F2E version 7.8.279.17,
->>>>4 ulelong =0x3A845324 version 7.8.279.23,
->>>>4 ulelong =0xFF52FEAF version 7.9.317.25,
->>>>8 ulelong x source size: %u bytes,
->>>>12 ulelong x flag hash: %#08X,
->>>>16 ulelong x %u reservations,
->>>>20 ulelong x payload size: %u bytes,
->>>>24 ulelong x checksum1: %#08X,
->>>>28 ulelong x checksum2: %#08X
-# No second checksum
->>>28 ulelong =0
->>>>0 ulelong^0xC0DE0000 x v8 bytecode, external reference table size: %u bytes,
->>>>4 ulelong =0x8725E0F8 version 8.1.307.30,
->>>>4 ulelong =0x09ED1289 version 8.1.307.31,
->>>>4 ulelong =0xA5728C87 version 8.3.110.9,
->>>>4 ulelong =0xB45C5D30 version 8.4.371.23,
->>>>4 ulelong =0xED9C278B version 8.4.371.19,
->>>>4 ulelong =0xD27BFF42 version 8.6.395.16,
->>>>8 ulelong x source size: %u bytes,
->>>>12 ulelong x flag hash: %#08X,
->>>>16 ulelong x %u reservations,
->>>>20 ulelong x payload size: %u bytes,
->>>>24 ulelong x payload checksum: %#08X
-# No reservation table and code starts at 24
->>32 ulelong =0
->>>0 ulelong^0xC0DE0000 x v8 bytecode, external reference table size: %u bytes,
->>>4 ulelong =0x9A6F0B0F version 9.0.257.17,
->>>4 ulelong =0x271D5D1E version 9.0.257.24,
->>>4 ulelong =0x4EEA75DF version 9.0.257.25,
->>>4 ulelong =0x80809479 version 9.1.269.36,
->>>4 ulelong =0x55C46F65 version 9.1.269.38,
->>>4 ulelong =0x8A9C758A version 9.2.230.21,
->>>4 ulelong =0x9712F0E1 version 9.3.345.16,
->>>4 ulelong =0x29593715 version 9.4.146.19,
->>>4 ulelong =0xCD991825 version 9.4.146.24,
->>>4 ulelong =0xACDD64EE version 9.4.146.26,
->>>4 ulelong =0xC96B4CD5 version 9.5.172.21,
->>>4 ulelong =0xBCCE4578 version 9.5.172.25,
->>>4 ulelong =0xA2EEA077 version 9.6.180.15,
->>>4 ulelong =0xFD350011 version 10.1.124.8,
->>>4 ulelong =0xBEF4028F version 10.2.154.13,
->>>4 ulelong =0xAF632352 version 10.2.154.4,
->>>8 ulelong x source size: %u bytes,
->>>12 ulelong x flag hash: %#08X,
->>>16 ulelong x payload size: %u bytes,
->>>20 ulelong x payload checksum: %#08X
diff --git a/contrib/libs/libmagic/magic/Magdir/jpeg b/contrib/libs/libmagic/magic/Magdir/jpeg
deleted file mode 100644
index 9cebadad70..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/jpeg
+++ /dev/null
@@ -1,252 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: jpeg,v 1.38 2022/12/02 17:42:04 christos Exp $
-# JPEG images
-# SunOS 5.5.1 had
-#
-# 0 string \377\330\377\340 JPEG file
-# 0 string \377\330\377\356 JPG file
-#
-# both of which turn into "JPEG image data" here.
-#
-0 belong 0xffd8fff7 JPEG-LS image data
-!:mime image/jls
-!:ext jls
->0 use jpeg
-
-0 belong&0xffffff00 0xffd8ff00 JPEG image data
-!:mime image/jpeg
-!:apple 8BIMJPEG
-!:strength *3
-!:ext jpeg/jpg/jpe/jfif
->0 use jpeg
-
-0 name jpeg
->6 string JFIF \b, JFIF standard
-# The following added by Erik Rossen <rossen@freesurf.ch> 1999-09-06
-# in a vain attempt to add image size reporting for JFIF. Note that these
-# tests are not fool-proof since some perfectly valid JPEGs are currently
-# impossible to specify in magic(4) format.
-# First, a little JFIF version info:
->>11 byte x \b %d.
->>12 byte x \b%02d
-# Next, the resolution or aspect ratio of the image:
->>13 byte 0 \b, aspect ratio
->>13 byte 1 \b, resolution (DPI)
->>13 byte 2 \b, resolution (DPCM)
->>14 beshort x \b, density %dx
->>16 beshort x \b%d
->>4 beshort x \b, segment length %d
-# Next, show thumbnail info, if it exists:
->>18 byte !0 \b, thumbnail %dx
->>>19 byte x \b%d
->6 string Exif \b, Exif standard: [
->>12 indirect/r x
->>12 string x \b]
-
-# Jump to the first segment
->(4.S+4) use jpeg_segment
-
-# This uses recursion...
-0 name jpeg_segment
->0 beshort 0xFFFE
-# Recursion handled by FFE0
-#>>(2.S+2) use jpeg_segment
->>2 pstring/HJ x \b, comment: "%s"
-
->0 beshort 0xFFC0
->>(2.S+2) use jpeg_segment
->>4 byte x \b, baseline, precision %d
->>7 beshort x \b, %dx
->>5 beshort x \b%d
->>9 byte x \b, components %d
-
->0 beshort 0xFFC1
->>(2.S+2) use jpeg_segment
->>4 byte x \b, extended sequential, precision %d
->>7 beshort x \b, %dx
->>5 beshort x \b%d
->>9 byte x \b, components %d
-
->0 beshort 0xFFC2
->>(2.S+2) use jpeg_segment
->>4 byte x \b, progressive, precision %d
->>7 beshort x \b, %dx
->>5 beshort x \b%d
->>9 byte x \b, components %d
-
-# Define Huffman Tables
->0 beshort 0xFFC4
->>(2.S+2) use jpeg_segment
-
->0 beshort 0xFFE1
-# Recursion handled by FFE0
-#>>(2.S+2) use jpeg_segment
->>4 string Exif \b, Exif Standard: [
->>>10 indirect/r x
->>>10 string x \b]
-
-# Application specific markers
->0 beshort&0xFFE0 =0xFFE0
->>(2.S+2) use jpeg_segment
-
-# DB: Define Quantization tables
-# DD: Define Restart interval [XXX: wrong here, it is 4 bytes]
-# D8: Start of image
-# D9: End of image
-# Dn: Restart
->0 beshort&0xFFD0 =0xFFD0
->>0 beshort&0xFFE0 !0xFFE0
->>>(2.S+2) use jpeg_segment
-
-#>0 beshort x unknown %#x
-#>>(2.S+2) use jpeg_segment
-
-# HSI is Handmade Software's proprietary JPEG encoding scheme
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/HSI_JPEG
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-hsi1.trid.xml
-# Note: called by TrID "HSI JPEG bitmap"
-0 string hsi1 JPEG image data, HSI proprietary
-#!:mime application/octet-stream
-!:mime image/x-hsi
-!:ext hsi/jpg
-
-# From: David Santinoli <david@santinoli.com>
-0 string \x00\x00\x00\x0C\x6A\x50\x20\x20\x0D\x0A\x87\x0A JPEG 2000
-# delete from ./animation (version 1.87) with jP (=6A50h) magic at offset 4
-# From: Johan van der Knijff <johan.vanderknijff@kb.nl>
-# Added sub-entries for JP2, JPX, JPM and MJ2 formats; added mimetypes
-# https://github.com/bitsgalore/jp2kMagic
-#
-# Now read value of 'Brand' field, which yields a few possibilities:
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/JP2
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-jpeg2k.trid.xml
-# Note: called by TrID "JPEG 2000 bitmap"
->20 string \x6a\x70\x32\x20 Part 1 (JP2)
-# aliases image/jpeg2000, image/jpeg2000-image, image/x-jpeg2000-image
-!:mime image/jp2
-!:ext jp2
-# URL: http://fileformats.archiveteam.org/wiki/JPX
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-jpx.trid.xml
-# Note: called by TrID "JPEG 2000 eXtended bitmap"
->20 string \x6a\x70\x78\x20 Part 2 (JPX)
-!:mime image/jpx
-!:ext jpf/jpx
-# URL: http://fileformats.archiveteam.org/wiki/JPM
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-jpm.trid.xml
-# Note: called by TrID "JPEG 2000 eXtended bitmap"
->20 string \x6a\x70\x6d\x20 Part 6 (JPM)
-!:mime image/jpm
-!:ext jpm
-# URL: http://fileformats.archiveteam.org/wiki/MJ2
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/v/video-mj2.trid.xml
-# Note: called by TrID "Motion JPEG 2000 video"
->20 string \x6d\x6a\x70\x32 Part 3 (MJ2)
-!:mime video/mj2
-!:ext mj2/mjp2
-
-# Type: JPEG 2000 codesream
-# From: Mathieu Malaterre <mathieu.malaterre@gmail.com>
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/JPEG_2000_codestream
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-jpc.trid.xml
-# Note: called by TrID "JPEG-2000 Code Stream bitmap"
-0 belong 0xff4fff51 JPEG 2000 codestream
-# value like: 0701h FF50h
-#>45 ubeshort x \b, at 45 %#4.4x
-#!:mime application/octet-stream
-# https://reposcope.com/mimetype/image/x-jp2-codestream
-!:mime image/x-jp2-codestream
-!:ext jpc/j2c/j2k
-# MAYBE also JHC like in byte_causal.jhc ?
-# WHAT IS THAT? DEAD ENTRY?
-#45 beshort 0xff52
-
-# JPEG extended range
-# URL: http://fileformats.archiveteam.org/wiki/JPEG_XR
-# Reference: https://www.itu.int/rec/T-REC-T.832
-# http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-wmp.trid.xml
-# Note: called by TrID "JPEG XR bitmap"
-0 string \x49\x49\xbc
-# FILE_VERSION_ID; shall be equal to 1; other values are reserved for future use
->3 byte 1
-# FIRST_IFD_OFFSET; shall be an integer multiple of 2; so skip DROID fmt-590-signature-id-931.wdp
->>4 lelong%2 0 JPEG-XR
-#!:mime image/vnd.ms-photo
-!:mime image/jxr
-# NO example for HDP !
-!:ext jxr/wdp/hdp
-# MAYBE also WMP ?
-#!:ext jxr/wdp/hdp/wmp
-# moved from ./images (version 1.205 ), merged and
-# partly verified by XnView `nconvert -info abydos.jxr FLOWER.wdp`
-# example: https://web.archive.org/web/20160403012904/
-# http://shikino.co.jp/solution/upfile/FLOWER.wdp.zip
->90 bequad 0x574D50484F544F00
->>98 byte&0x08 =0x08 \b, hard tiling
->>99 byte&0x80 =0x80 \b, tiling present
->>99 byte&0x40 =0x40 \b, codestream present
->>99 byte&0x38 x \b, spatial xform=
->>99 byte&0x38 0x00 \bTL
->>99 byte&0x38 0x08 \bBL
->>99 byte&0x38 0x10 \bTR
->>99 byte&0x38 0x18 \bBR
->>99 byte&0x38 0x20 \bBT
->>99 byte&0x38 0x28 \bRB
->>99 byte&0x38 0x30 \bLT
->>99 byte&0x38 0x38 \bLB
->>100 byte&0x80 =0x80 \b, short header
->>>102 beshort+1 x \b, %d
->>>104 beshort+1 x \bx%d
->>100 byte&0x80 =0x00 \b, long header
->>>102 belong+1 x \b, %x
->>>106 belong+1 x \bx%x
->>101 beshort&0xf x \b, bitdepth=
->>>101 beshort&0xf 0x0 \b1-WHITE=1
->>>101 beshort&0xf 0x1 \b8
->>>101 beshort&0xf 0x2 \b16
->>>101 beshort&0xf 0x3 \b16-SIGNED
->>>101 beshort&0xf 0x4 \b16-FLOAT
->>>101 beshort&0xf 0x5 \b(reserved 5)
->>>101 beshort&0xf 0x6 \b32-SIGNED
->>>101 beshort&0xf 0x7 \b32-FLOAT
->>>101 beshort&0xf 0x8 \b5
->>>101 beshort&0xf 0x9 \b10
->>>101 beshort&0xf 0xa \b5-6-5
->>>101 beshort&0xf 0xb \b(reserved %d)
->>>101 beshort&0xf 0xc \b(reserved %d)
->>>101 beshort&0xf 0xd \b(reserved %d)
->>>101 beshort&0xf 0xe \b(reserved %d)
->>>101 beshort&0xf 0xf \b1-BLACK=1
->>101 beshort&0xf0 x \b, colorfmt=
->>>101 beshort&0xf0 0x00 \bYONLY
->>>101 beshort&0xf0 0x10 \bYUV240
->>>101 beshort&0xf0 0x20 \bYWV422
->>>101 beshort&0xf0 0x30 \bYWV444
->>>101 beshort&0xf0 0x40 \bCMYK
->>>101 beshort&0xf0 0x50 \bCMYKDIRECT
->>>101 beshort&0xf0 0x60 \bNCOMPONENT
->>>101 beshort&0xf0 0x70 \bRGB
->>>101 beshort&0xf0 0x80 \bRGBE
->>>101 beshort&0xf0 >0x80 \b(reserved %#x)
-
-# JPEG XL
-# From: Ian Tester
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/JPEG_XL
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-jxl.trid.xml
-# Note: called by TrID "JPEG XL bitmap"
-0 string \xff\x0a JPEG XL codestream
-!:mime image/jxl
-!:ext jxl
-
-# JPEG XL (transcoded JPEG file)
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/JPEG_XL
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-jxl-iso.trid.xml
-# Note: called by TrID "JPEG XL bitmap (ISOBMFF)"
-0 string \x00\x00\x00\x0cJXL\x20\x0d\x0a\x87\x0a JPEG XL container
-!:mime image/jxl
-!:ext jxl
diff --git a/contrib/libs/libmagic/magic/Magdir/karma b/contrib/libs/libmagic/magic/Magdir/karma
deleted file mode 100644
index 938a51d5ed..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/karma
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: karma,v 1.8 2015/08/29 07:10:35 christos Exp $
-# karma: file(1) magic for Karma data files
-#
-# From <rgooch@atnf.csiro.au>
-
-0 string KarmaRHD\040Version Karma Data Structure Version
->16 belong x %u
diff --git a/contrib/libs/libmagic/magic/Magdir/kde b/contrib/libs/libmagic/magic/Magdir/kde
deleted file mode 100644
index dda5819a9b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/kde
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: kde,v 1.5 2010/11/25 15:00:12 christos Exp $
-# kde: file(1) magic for KDE
-
-0 string/t [KDE\ Desktop\ Entry] KDE desktop entry
-!:mime application/x-kdelnk
-0 string/t #\ KDE\ Config\ File KDE config file
-!:mime application/x-kdelnk
-0 string/t #\ xmcd xmcd database file for kscd
-!:mime text/x-xmcd
diff --git a/contrib/libs/libmagic/magic/Magdir/keepass b/contrib/libs/libmagic/magic/Magdir/keepass
deleted file mode 100644
index 3d26efa5c5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/keepass
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: keepass,v 1.2 2019/04/19 00:42:27 christos Exp $
-# keepass: file(1) magic for KeePass file
-#
-# Keepass Password Safe:
-# * original one: https://keepass.info/
-# * *nix port: https://www.keepassx.org/
-# * android port: https://code.google.com/p/keepassdroid/
-
-0 lelong 0x9AA2D903 Keepass password database
->4 lelong 0xB54BFB65 1.x KDB
->>48 lelong >0 \b, %d groups
->>52 lelong >0 \b, %d entries
->>8 lelong&0x0f 1 \b, SHA-256
->>8 lelong&0x0f 2 \b, AES
->>8 lelong&0x0f 4 \b, RC4
->>8 lelong&0x0f 8 \b, Twofish
->>120 lelong >0 \b, %d key transformation rounds
->4 lelong 0xB54BFB67 2.x KDBX
diff --git a/contrib/libs/libmagic/magic/Magdir/kerberos b/contrib/libs/libmagic/magic/Magdir/kerberos
deleted file mode 100644
index df6dc52364..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/kerberos
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: kerberos,v 1.3 2019/04/19 00:42:27 christos Exp $
-# kerberos: MIT kerberos file binary formats
-#
-
-# This magic entry is for demonstration purposes and could be improved
-# if the following features were implemented in file:
-#
-# Strings inside [[ .. ]] in the descriptions have special meanings and
-# are not printed.
-#
-# - Provide some form of iteration in number of components
-# [[${counter}=%d]] in the description
-# then append
-# [${counter}--] in the offset of the entries
-# - Provide a way to round the next offset
-# Add [R:4] after the offset?
-# - Provide a way to have optional entries
-# XXX: Syntax:
-# - Provide a way to "save" entries to print them later.
-# if the description is [[${name}=%s]], then nothing is
-# printed and a subsequent entry in the same magic file
-# can refer to ${name}
-# - Provide a way to format strings as hex values
-#
-# https://www.gnu.org/software/shishi/manual/html_node/\
-# The-Keytab-Binary-File-Format.html
-#
-
-0 name keytab_entry
-#>0 beshort x \b, size=%d
-#>2 beshort x \b, components=%d
->4 pstring/H x \b, realm=%s
->>&0 pstring/H x \b, principal=%s/
->>>&0 pstring/H x \b%s
->>>>&0 belong x \b, type=%d
->>>>>&0 bedate x \b, date=%s
->>>>>>&0 byte x \b, kvno=%u
-#>>>>>>>&0 pstring/H x
-#>>>>>>>>&0 belong x
-#>>>>>>>>>>&0 use keytab_entry
-
-0 belong 0x05020000 Kerberos Keytab file
->4 use keytab_entry
diff --git a/contrib/libs/libmagic/magic/Magdir/kicad b/contrib/libs/libmagic/magic/Magdir/kicad
deleted file mode 100644
index 212a550a49..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/kicad
+++ /dev/null
@@ -1,85 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: kicad,v 1.2 2020/05/06 14:03:28 christos Exp $
-# kicad: file(1) magic for KiCad files
-#
-# See
-#
-# http://kicad-pcb.org
-#
-
-# KiCad Schematic Document
-0 string (kicad_sch
->10 byte 0x20 KiCad Schematic Document
-!:ext kicad_sch/kicad_sch-bak
->>11 string (version
->>>19 byte 0x20
->>>>20 regex [0-9.]+ (Version %s)
-
-# KiCad Schematic Document (Legacy)
-0 string EESchema
->8 byte 0x20
->>9 string Schematic
->>>18 byte 0x20 KiCad Schematic Document (Legacy)
-!:ext sch/bak
->>>>24 string Version
->>>>>31 byte 0x20
->>>>>>32 string x (Version %s)
-
-# KiCad Symbol Library
-0 string (kicad_symbol_lib
->17 byte 0x20 KiCad Symbol Library
-!:ext kicad_sym
->>18 string (version
->>>26 byte 0x20
->>>>27 regex [0-9.]+ (Version %s)
-
-# KiCad Symbol Library (Legacy)
-0 string EESchema-LIBRARY
->16 byte 0x20 KiCad Symbol Library (Legacy)
-!:ext lib
->>17 string Version
->>>24 byte 0x20
->>>>25 string x (Version %s)
-
-# KiCad Symbol Library Documentation (Legacy)
-0 string EESchema-DOCLIB
->15 byte 0x20 KiCad Symbol Library Documentation (Legacy)
-!:ext dcm
->>17 string Version
->>>24 byte 0x20
->>>>25 string x (Version %s)
-
-# KiCad Board Layout
-0 string (kicad_pcb
->10 byte 0x20 KiCad Board Layout
-!:ext kicad_pcb/kicad_pcb-bak
->>11 string (version
->>>19 byte 0x20
->>>>20 regex [0-9.]+ (Version %s)
-
-# KiCad Footprint
-0 string (module
->7 byte 0x20 KiCad Footprint
-!:ext kicad_mod
-
-# KiCad Footprint (Legacy)
-0 string PCBNEW-LibModule-V1 KiCad Footprint (Legacy)
-!:ext mod
-
-# KiCad Netlist
-0 string (export
->7 byte 0x20 KiCad Netlist
-!:ext net
-
-# KiCad Symbol Library Table
-0 string (sym_lib_table
->14 byte 0xA KiCad Symbol Library Table
->14 byte 0xD KiCad Symbol Library Table
->14 byte 0x20 KiCad Symbol Library Table
-
-# KiCad Footprint Library Table
-0 string (fp_lib_table
->13 byte 0xA KiCad Footprint Library Table
->13 byte 0xD KiCad Footprint Library Table
->13 byte 0x20 KiCad Footprint Library Table
diff --git a/contrib/libs/libmagic/magic/Magdir/kml b/contrib/libs/libmagic/magic/Magdir/kml
deleted file mode 100644
index 904f3b5d5f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/kml
+++ /dev/null
@@ -1,34 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: kml,v 1.6 2019/05/21 04:50:10 christos Exp $
-# Type: Google KML, formerly Keyhole Markup Language
-# Future development of this format has been handed
-# over to the Open Geospatial Consortium.
-# https://www.opengeospatial.org/standards/kml/
-# From: Asbjoern Sloth Toennesen <asbjorn@lila.io>
-0 string/t \<?xml
->20 search/400 \ xmlns=
->>&0 regex ['"]http://earth.google.com/kml Google KML document
-!:mime application/vnd.google-earth.kml+xml
->>>&1 string 2.0' \b, version 2.0
->>>&1 string 2.1' \b, version 2.1
->>>&1 string 2.2' \b, version 2.2
-
-#------------------------------------------------------------------------------
-# Type: OpenGIS KML, formerly Keyhole Markup Language
-# This standard is maintained by the
-# Open Geospatial Consortium.
-# https://www.opengeospatial.org/standards/kml/
-# From: Asbjoern Sloth Toennesen <asbjorn@lila.io>
->>&0 regex ['"]http://www.opengis.net/kml OpenGIS KML document
-!:mime application/vnd.google-earth.kml+xml
->>>&1 string/t 2.2 \b, version 2.2
-
-#------------------------------------------------------------------------------
-# Type: Google KML Archive (ZIP based)
-# https://code.google.com/apis/kml/documentation/kml_tut.html
-# From: Asbjoern Sloth Toennesen <asbjorn@lila.io>
-0 string PK\003\004
->4 byte 0x14
->>30 string doc.kml Compressed Google KML Document, including resources.
-!:mime application/vnd.google-earth.kmz
diff --git a/contrib/libs/libmagic/magic/Magdir/lammps b/contrib/libs/libmagic/magic/Magdir/lammps
deleted file mode 100644
index 5424383db8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/lammps
+++ /dev/null
@@ -1,64 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: lammps,v 1.1 2021/03/14 16:24:18 christos Exp $
-#
-
-# Magic file patterns for use with file(1) for the
-# LAMMPS molecular dynamics simulation software.
-# https://lammps.sandia.gov
-#
-# Updated: 2021-03-14 by akohlmey@gmail.com
-
-# Binary restart file for the LAMMPS MD code
-0 string LammpS\ RestartT LAMMPS binary restart file
->0x14 long x (rev %d),
->>0x20 string x Version %s,
->>>0x10 lelong 0x0001 Little Endian
->>>0x10 lelong 0x1000 Big Endian
-
-# Atom style binary dump file for the LAMMPS MD code
-# written on a little endian machine
-0 lequad -8
->0x08 string DUMPATOM LAMMPS atom style binary dump
->>0x14 long x (rev %d),
->>>0x10 lelong 0x0001 Little Endian,
->>>>0x18 lequad x First time step: %lld
-
-# written on a big endian machine
-0 bequad -8
->0x08 string DUMPATOM LAMMPS atom style binary dump
->>0x14 belong x (rev %d),
->>>0x10 lelong 0x1000 Big Endian,
->>>>0x18 bequad x First time step: %lld
-
-# Atom style binary dump file for the LAMMPS MD code
-# written on a little endian machine
-0 lequad -10
->0x08 string DUMPCUSTOM LAMMPS custom style binary dump
->>0x16 lelong x (rev %d),
->>>0x12 lelong 0x0001 Little Endian,
->>>>0x1a lequad x First time step: %lld
-
-# written on a big endian machine
-0 bequad -10
->0x08 string DUMPCUSTOM LAMMPS custom style binary dump
->>0x16 belong x (rev %d),
->>>0x12 lelong 0x1000 Big Endian,
->>>>0x1a bequad x First time step: %lld
-
-# LAMMPS log file
-0 string LAMMPS\ ( LAMMPS log file
->8 regex/16 [0-9]+\ [A-Za-z]+\ [0-9]+ written by version %s
-
-# Data file written either by LAMMPS, msi2lmp or VMD/TopoTools
-0 string LAMMPS\ data\ file LAMMPS data file
->0x12 string CGCMM\ style written by TopoTools
->0x12 string msi2lmp written by msi2lmp
->0x11 string via\ write_data written by LAMMPS
-
-# LAMMPS data file written by OVITO
-0 string #\ LAMMPS\ data\ file LAMMPS data file
->0x13 string written\ by\ OVITO written by OVITO
-
-# LAMMPS text mode dump file
-0 string ITEM:\ TIMESTEP LAMMPS text mode dump,
->15 regex/16 [0-9]+ First time step: %s
diff --git a/contrib/libs/libmagic/magic/Magdir/lecter b/contrib/libs/libmagic/magic/Magdir/lecter
deleted file mode 100644
index 6ae87c12c0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/lecter
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: lecter,v 1.4 2009/09/19 16:28:10 christos Exp $
-# DEC SRC Virtual Paper: Lectern files
-# Karl M. Hegbloom <karlheg@inetarena.com>
-0 string lect DEC SRC Virtual Paper Lectern file
diff --git a/contrib/libs/libmagic/magic/Magdir/lex b/contrib/libs/libmagic/magic/Magdir/lex
deleted file mode 100644
index cc9fac5e1f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/lex
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: lex,v 1.6 2009/09/19 16:28:10 christos Exp $
-# lex: file(1) magic for lex
-#
-# derived empirically, your offsets may vary!
-0 search/100 yyprevious C program text (from lex)
->3 search/1 >\0 for %s
-# C program text from GNU flex, from Daniel Quinlan <quinlan@yggdrasil.com>
-0 search/100 generated\ by\ flex C program text (from flex)
-# lex description file, from Daniel Quinlan <quinlan@yggdrasil.com>
-0 search/1 %{ lex description text
diff --git a/contrib/libs/libmagic/magic/Magdir/lif b/contrib/libs/libmagic/magic/Magdir/lif
deleted file mode 100644
index 3474a48d23..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/lif
+++ /dev/null
@@ -1,50 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: lif,v 1.11 2022/10/19 20:15:16 christos Exp $
-# lif: file(1) magic for lif
-#
-# (Daniel Quinlan <quinlan@yggdrasil.com>)
-#
-# Modified by: Joerg Jenderek
-# URL: https://www.hp9845.net/9845/projects/hpdir/
-# https://github.com/bug400/lifutils
-# Reference: https://www.hp9845.net/9845/downloads/manuals/LIF_excerpt_64941-90906_flpRef_Jan84.pdf
-# Note: called by TrID "HP Logical Interchange Format disk image"
-0 beshort 0x8000
-# GRR: line above is too general as it catches also compressed DEGAS low-res bitmap *.pc1
-# skip many compressed DEGAS low-res bitmap *.pc1 by test for unused bytes
->14 beshort =0
-# skip MUNCHIE.PC1 BOARD.PC1 ENEMIES.PC1 by test for low version number
->>20 ubeshort <0x0100
-# skip DROID fmt-840-signature-id-1195.adx fmt-840-signature-id-1199.adx by test for ASCII like volume name
->>>2 ubelong >0x2020201F
->>>>0 use lif-file
-0 name lif-file
-# LIF ID
->0 beshort x lif file
-!:mime application/x-lif-disk
-# lif used by Tony Duell LIF utilities; enhanced version by Joachim Siebold use also dat; hpi used by hpdir
-!:ext lif/hpi/dat
-# volume label; A-Z 0-9 _ ; default are 6 spaces
->2 string x "%.6s"
-#>2 ubelong x LABEL=%8.8x
-# version number; 0 for systems without extensions or 1 for model 64000
->20 ubeshort x \b, version %u
-# LIF identifier; 010000 for system 3000
->12 beshort !0x1000 \b, LIF identifier %#x
-# directory start address in units like: 2
->8 ubelong x \b, directory
->8 ubelong !2 start address %u
-# length of directory like: 2 4 7 10 12 14 (for model 64000) 16 18 20 24 30 50 57 77 80
->16 ubelong x length %u
-# level 1 extensions
->20 beshort =0
->>24 ubequad !0 \b, for extensions %#llx...
->20 beshort >0
->>24 ubequad !0 \b, extensions %#llx...
-# word 21-126 reserved for extensions and future use; set to nil
->42 ubequad !0 \b, RESERVED %#llx
-# lif first file name for standard directory; 0xffff... means uninitialized
->8 ubelong 2
->>512 string <\xff\xff \b, 1st file %-.10s
-
diff --git a/contrib/libs/libmagic/magic/Magdir/linux b/contrib/libs/libmagic/magic/Magdir/linux
deleted file mode 100644
index ae181148df..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/linux
+++ /dev/null
@@ -1,627 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: linux,v 1.85 2023/07/17 14:40:09 christos Exp $
-# linux: file(1) magic for Linux files
-#
-# Values for Linux/i386 binaries, from Daniel Quinlan <quinlan@yggdrasil.com>
-# The following basic Linux magic is useful for reference, but using
-# "long" magic is a better practice in order to avoid collisions.
-#
-# 2 leshort 100 Linux/i386
-# >0 leshort 0407 impure executable (OMAGIC)
-# >0 leshort 0410 pure executable (NMAGIC)
-# >0 leshort 0413 demand-paged executable (ZMAGIC)
-# >0 leshort 0314 demand-paged executable (QMAGIC)
-#
-0 lelong 0x00640107 Linux/i386 impure executable (OMAGIC)
->16 lelong 0 \b, stripped
-0 lelong 0x00640108 Linux/i386 pure executable (NMAGIC)
->16 lelong 0 \b, stripped
-0 lelong 0x0064010b Linux/i386 demand-paged executable (ZMAGIC)
->16 lelong 0 \b, stripped
-0 lelong 0x006400cc Linux/i386 demand-paged executable (QMAGIC)
->16 lelong 0 \b, stripped
-#
-0 string \007\001\000 Linux/i386 object file
->20 lelong >0x1020 \b, DLL library
-# Linux-8086 stuff:
-0 string \01\03\020\04 Linux-8086 impure executable
->28 long !0 not stripped
-0 string \01\03\040\04 Linux-8086 executable
->28 long !0 not stripped
-#
-0 string \243\206\001\0 Linux-8086 object file
-#
-0 string \01\03\020\20 Minix-386 impure executable
->28 long !0 not stripped
-0 string \01\03\040\20 Minix-386 executable
->28 long !0 not stripped
-0 string \01\03\04\20 Minix-386 NSYM/GNU executable
->28 long !0 not stripped
-# core dump file, from Bill Reynolds <bill@goshawk.lanl.gov>
-216 lelong 0421 Linux/i386 core file
-!:strength / 2
->220 string >\0 of '%s'
->200 lelong >0 (signal %d)
-#
-# LILO boot/chain loaders, from Daniel Quinlan <quinlan@yggdrasil.com>
-# this can be overridden by the DOS executable (COM) entry
-2 string LILO Linux/i386 LILO boot/chain loader
-#
-# Linux make config build file, from Ole Aamot <oka@oka.no>
-# Updated by Ken Sharp
-28 string make\ config Linux make config build file (old)
-49 search/70 Kernel\ Configuration Linux make config build file
-
-#
-# PSF fonts, from H. Peter Anvin <hpa@yggdrasil.com>
-# Updated by Adam Buchbinder <adam.buchbinder@gmail.com>
-# See: https://www.win.tue.nl/~aeb/linux/kbd/font-formats-1.html
-0 leshort 0x0436 Linux/i386 PC Screen Font v1 data,
->2 byte&0x01 0 256 characters,
->2 byte&0x01 !0 512 characters,
->2 byte&0x02 0 no directory,
->2 byte&0x02 !0 Unicode directory,
->3 byte >0 8x%d
-0 string \x72\xb5\x4a\x86\x00\x00 Linux/i386 PC Screen Font v2 data,
->16 lelong x %d characters,
->12 lelong&0x01 0 no directory,
->12 lelong&0x01 !0 Unicode directory,
->28 lelong x %d
->24 lelong x \bx%d
-
-# Linux swap and hibernate files
-# Linux kernel: include/linux/swap.h
-# util-linux: libblkid/src/superblocks/swap.c
-
-# format v0, unsupported since 2002
-0xff6 string SWAP-SPACE Linux old swap file, 4k page size
-0x1ff6 string SWAP-SPACE Linux old swap file, 8k page size
-0x3ff6 string SWAP-SPACE Linux old swap file, 16k page size
-0x7ff6 string SWAP-SPACE Linux old swap file, 32k page size
-0xfff6 string SWAP-SPACE Linux old swap file, 64k page size
-
-# format v1, supported since 1998
-0 name linux-swap
->0x400 lelong 1 little endian, version %u,
->>0x404 lelong x size %u pages,
->>0x408 lelong x %u bad pages,
->0x400 belong 1 big endian, version %u,
->>0x404 belong x size %u pages,
->>0x408 belong x %u bad pages,
->0x41c string \0 no label,
->0x41c string >\0 LABEL=%s,
->0x40c ubelong x UUID=%08x
->0x410 ubeshort x \b-%04x
->0x412 ubeshort x \b-%04x
->0x414 ubeshort x \b-%04x
->0x416 ubelong x \b-%08x
->0x41a ubeshort x \b%04x
-
-0xff6 string SWAPSPACE2 Linux swap file, 4k page size,
->0 use linux-swap
-0x1ff6 string SWAPSPACE2 Linux swap file, 8k page size,
->0 use linux-swap
-0x3ff6 string SWAPSPACE2 Linux swap file, 16k page size,
->0 use linux-swap
-0x7ff6 string SWAPSPACE2 Linux swap file, 32k page size,
->0 use linux-swap
-0xfff6 string SWAPSPACE2 Linux swap file, 64k page size,
->0 use linux-swap
-
-0 name linux-hibernate
->0 string S1SUSPEND \b, with SWSUSP1 image
->0 string S2SUSPEND \b, with SWSUSP2 image
->0 string ULSUSPEND \b, with uswsusp image
->0 string LINHIB0001 \b, with compressed hibernate image
->0 string \xed\xc3\x02\xe9\x98\x56\xe5\x0c \b, with tuxonice image
->0 default x \b, with unknown hibernate image
-
-0xfec string SWAPSPACE2 Linux swap file, 4k page size,
->0 use linux-swap
->0xff6 use linux-hibernate
-0x1fec string SWAPSPACE2 Linux swap file, 8k page size,
->0 use linux-swap
->0x1ff6 use linux-hibernate
-0x3fec string SWAPSPACE2 Linux swap file, 16k page size,
->0 use linux-swap
->0x3ff6 use linux-hibernate
-0x7fec string SWAPSPACE2 Linux swap file, 32k page size,
->0 use linux-swap
->0x7ff6 use linux-hibernate
-0xffec string SWAPSPACE2 Linux swap file, 64k page size,
->0 use linux-swap
->0xfff6 use linux-hibernate
-
-#
-# Linux kernel boot images, from Albert Cahalan <acahalan@cs.uml.edu>
-# and others such as Axel Kohlmeyer <akohlmey@rincewind.chemie.uni-ulm.de>
-# and Nicolas Lichtmaier <nick@debian.org>
-# All known start with: b8 c0 07 8e d8 b8 00 90 8e c0 b9 00 01 29 f6 29
-# Linux kernel boot images (i386 arch) (Wolfram Kleff)
-# URL: https://www.kernel.org/doc/Documentation/x86/boot.txt
-514 string HdrS Linux kernel
-!:strength + 55
-# often no extension like in linux, vmlinuz, bzimage or memdisk but sometimes
-# Acronis Recovery kernel64.dat and Plop Boot Manager plpbtrom.bin
-# DamnSmallLinux 1.5 damnsmll.lnx
-!:ext /dat/bin/lnx
->510 leshort 0xAA55 x86 boot executable
->>518 leshort >0x1ff
->>>529 byte 0 zImage,
->>>529 byte 1 bzImage,
->>>526 lelong >0
->>>>(526.s+0x200) string >\0 version %s,
->>498 leshort 1 RO-rootFS,
->>498 leshort 0 RW-rootFS,
->>508 leshort >0 root_dev %#X,
->>502 leshort >0 swap_dev %#X,
->>504 leshort >0 RAMdisksize %u KB,
->>506 leshort 0xFFFF Normal VGA
->>506 leshort 0xFFFE Extended VGA
->>506 leshort 0xFFFD Prompt for Videomode
->>506 leshort >0 Video mode %d
-# This also matches new kernels, which were caught above by "HdrS".
-0 belong 0xb8c0078e Linux kernel
->0x1e3 string Loading version 1.3.79 or older
->0x1e9 string Loading from prehistoric times
-
-# System.map files - Nicolas Lichtmaier <nick@debian.org>
-8 search/1 \ A\ _text Linux kernel symbol map text
-
-# LSM entries - Nicolas Lichtmaier <nick@debian.org>
-0 search/1 Begin3 Linux Software Map entry text
-0 search/1 Begin4 Linux Software Map entry text (new format)
-
-# From Matt Zimmerman, enhanced for v3 by Matthew Palmer
-0 belong 0x4f4f4f4d User-mode Linux COW file
->4 belong <3 \b, version %d
->>8 string >\0 \b, backing file %s
->4 belong >2 \b, version %d
->>32 string >\0 \b, backing file %s
-
-############################################################################
-# Linux kernel versions
-
-0 string \xb8\xc0\x07\x8e\xd8\xb8\x00\x90 Linux
->497 leshort 0 x86 boot sector
->>514 belong 0x8e of a kernel from the dawn of time!
->>514 belong 0x908ed8b4 version 0.99-1.1.42
->>514 belong 0x908ed8b8 for memtest86
-
->497 leshort !0 x86 kernel
->>504 leshort >0 RAMdisksize=%u KB
->>502 leshort >0 swap=%#X
->>508 leshort >0 root=%#X
->>>498 leshort 1 \b-ro
->>>498 leshort 0 \b-rw
->>506 leshort 0xFFFF vga=normal
->>506 leshort 0xFFFE vga=extended
->>506 leshort 0xFFFD vga=ask
->>506 leshort >0 vga=%d
->>514 belong 0x908ed881 version 1.1.43-1.1.45
->>514 belong 0x15b281cd
->>>0xa8e belong 0x55AA5a5a version 1.1.46-1.2.13,1.3.0
->>>0xa99 belong 0x55AA5a5a version 1.3.1,2
->>>0xaa3 belong 0x55AA5a5a version 1.3.3-1.3.30
->>>0xaa6 belong 0x55AA5a5a version 1.3.31-1.3.41
->>>0xb2b belong 0x55AA5a5a version 1.3.42-1.3.45
->>>0xaf7 belong 0x55AA5a5a version 1.3.46-1.3.72
->>514 string HdrS
->>>518 leshort >0x1FF
->>>>529 byte 0 \b, zImage
->>>>529 byte 1 \b, bzImage
->>>>(526.s+0x200) string >\0 \b, version %s
-
-# Linux boot sector thefts.
-0 belong 0xb8c0078e Linux
->0x1e6 belong 0x454c4b53 ELKS Kernel
->0x1e6 belong !0x454c4b53 style boot sector
-
-############################################################################
-# Linux S390 kernel image
-# Created by: Jan Kaluza <jkaluza@redhat.com>
-8 string \x02\x00\x00\x18\x60\x00\x00\x50\x02\x00\x00\x68\x60\x00\x00\x50\x40\x40\x40\x40\x40\x40\x40\x40 Linux S390
->0x00010000 search/b/4096 \x00\x0a\x00\x00\x8b\xad\xcc\xcc
-# 64bit
->>&0 string \xc1\x00\xef\xe3\xf0\x68\x00\x00 Z10 64bit kernel
->>&0 string \xc1\x00\xef\xc3\x00\x00\x00\x00 Z9-109 64bit kernel
->>&0 string \xc0\x00\x20\x00\x00\x00\x00\x00 Z990 64bit kernel
->>&0 string \x00\x00\x00\x00\x00\x00\x00\x00 Z900 64bit kernel
-# 32bit
->>&0 string \x81\x00\xc8\x80\x00\x00\x00\x00 Z10 32bit kernel
->>&0 string \x81\x00\xc8\x80\x00\x00\x00\x00 Z9-109 32bit kernel
->>&0 string \x80\x00\x20\x00\x00\x00\x00\x00 Z990 32bit kernel
->>&0 string \x80\x00\x00\x00\x00\x00\x00\x00 Z900 32bit kernel
-
-############################################################################
-# Linux ARM compressed kernel image
-# From: Kevin Cernekee <cernekee@gmail.com>
-# Update: Joerg Jenderek
-0x24 lelong 0x016f2818 Linux kernel ARM boot executable zImage
-# There are three possible situations: LE, BE with LE bootloader and pure BE.
-# In order to aid telling these apart a new endian flag was added. In order
-# to support kernels before the flag and BE with LE bootloader was added we'll
-# do a negative check against the BE variant of the flag when we see a LE magic.
->0x30 belong !0x04030201 (little-endian)
-# raspian "kernel7.img", Vu+ Ultimo4K "kernel_auto.bin"
-!:ext img/bin
->0x30 belong 0x04030201 (big-endian)
-0x24 belong 0x016f2818 Linux kernel ARM boot executable zImage (big-endian)
-
-############################################################################
-# Linux AARCH64 kernel image
-0x38 lelong 0x644d5241 Linux kernel ARM64 boot executable Image
->0x18 lelong ^1 \b, little-endian
->0x18 lelong &1 \b, big-endian
->0x18 lelong &2 \b, 4K pages
->0x18 lelong &4 \b, 16K pages
->0x18 lelong &6 \b, 32K pages
-
-############################################################################
-# Linux 8086 executable
-0 lelong&0xFF0000FF 0xC30000E9 Linux-Dev86 executable, headerless
->5 string .
->>4 string >\0 \b, libc version %s
-
-0 lelong&0xFF00FFFF 0x4000301 Linux-8086 executable
->2 byte&0x01 !0 \b, unmapped zero page
->2 byte&0x20 0 \b, impure
->2 byte&0x20 !0
->>2 byte&0x10 !0 \b, A_EXEC
->2 byte&0x02 !0 \b, A_PAL
->2 byte&0x04 !0 \b, A_NSYM
->2 byte&0x08 !0 \b, A_STAND
->2 byte&0x40 !0 \b, A_PURE
->2 byte&0x80 !0 \b, A_TOVLY
->28 long !0 \b, not stripped
->37 string .
->>36 string >\0 \b, libc version %s
-
-# 0 lelong&0xFF00FFFF 0x10000301 ld86 I80386 executable
-# 0 lelong&0xFF00FFFF 0xB000301 ld86 M68K executable
-# 0 lelong&0xFF00FFFF 0xC000301 ld86 NS16K executable
-# 0 lelong&0xFF00FFFF 0x17000301 ld86 SPARC executable
-
-# SYSLINUX boot logo files (from 'ppmtolss16' sources)
-# https://www.syslinux.org/wiki/index.php/SYSLINUX#Display_graphic_from_filename:
-# file extension .lss .16
-0 lelong =0x1413f33d SYSLINUX' LSS16 image data
-# syslinux-4.05/mime/image/x-lss16.xml
-!:mime image/x-lss16
->4 leshort x \b, width %d
->6 leshort x \b, height %d
-
-0 string OOOM User-Mode-Linux's Copy-On-Write disk image
->4 belong x version %d
-
-# SE Linux policy database
-# From: Mike Frysinger <vapier@gentoo.org>
-0 lelong 0xf97cff8c SE Linux policy
->16 lelong x v%d
->20 lelong 1 MLS
->24 lelong x %d symbols
->28 lelong x %d ocons
-
-# Linux Logical Volume Manager (LVM)
-# Emmanuel VARAGNAT <emmanuel.varagnat@guzu.net>
-#
-# System ID, UUID and volume group name are 128 bytes long
-# but they should never be full and initialized with zeros...
-#
-# LVM1
-#
-0x0 string/b HM\001 LVM1 (Linux Logical Volume Manager), version 1
->0x12c string/b >\0 , System ID: %s
-
-0x0 string/b HM\002 LVM1 (Linux Logical Volume Manager), version 2
->0x12c string/b >\0 , System ID: %s
-
-# LVM2
-#
-# It seems that the label header can be in one the four first sector
-# of the disk... (from _find_labeller in lib/label/label.c of LVM2)
-#
-# 0x200 seems to be the common case
-0 name lvm2
-# display UUID in LVM format + display all 32 bytes (instead of max string length: 31)
->0x0 string >\x2f \b, UUID: %.6s
->0x6 string >\x2f \b-%.4s
->0xa string >\x2f \b-%.4s
->0xe string >\x2f \b-%.4s
->0x12 string >\x2f \b-%.4s
->0x16 string >\x2f \b-%.4s
->0x1a string >\x2f \b-%.6s
->0x20 lequad x \b, size: %lld
-
-
-# read the offset to add to the start of the header, and the header
-# start in 0x200
-0x218 string/b LVM2\ 001 LVM2 PV (Linux Logical Volume Manager)
->&(&-12.l-0x20) use lvm2
-
-0x018 string/b LVM2\ 001 LVM2 PV (Linux Logical Volume Manager)
->&(&-12.l-0x20) use lvm2
-
-0x418 string/b LVM2\ 001 LVM2 PV (Linux Logical Volume Manager)
->&(&-12.l-0x20) use lvm2
-
-0x618 string/b LVM2\ 001 LVM2 PV (Linux Logical Volume Manager)
->&(&-12.l-0x20) use lvm2
-
-# LVM snapshot
-# from Jason Farrel
-0 string SnAp LVM Snapshot (CopyOnWrite store)
->4 lelong !0 - valid,
->4 lelong 0 - invalid,
->8 lelong x version %d,
->12 lelong x chunk_size %d
-
-# SE Linux policy database
-0 lelong 0xf97cff8c SE Linux policy
->16 lelong x v%d
->20 lelong 1 MLS
->24 lelong x %d symbols
->28 lelong x %d ocons
-
-# Summary: Xen saved domain file
-# Created by: Radek Vokal <rvokal@redhat.com>
-0 string LinuxGuestRecord Xen saved domain
->20 search/256 (name
->>&1 string x (name %s)
-
-# Type: Xen, the virtual machine monitor
-# From: Radek Vokal <rvokal@redhat.com>
-0 string LinuxGuestRecord Xen saved domain
-#>2 regex \(name\ [^)]*\) %s
->20 search/256 (name (name
->>&1 string x %s...)
-
-# Systemd journald files
-# See https://www.freedesktop.org/wiki/Software/systemd/journal-files/.
-# From: Zbigniew Jedrzejewski-Szmek <zbyszek@in.waw.pl>
-# Update: Joerg Jenderek
-# URL: https://systemd.io/JOURNAL_FILE_FORMAT/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/j/journal-sysd.trid.xml
-# Note: called "systemd journal" by TrID
-# verified by `journalctl --file=user-1000.journal`
-# check magic signature[8]
-0 string LPKSHHRH
-# check that state is one of known values
-# STATE_OFFLINE~0 STATE_ONLINE~1 STATE_ARCHIVED~2
->16 ubyte&252 0
-# check that each half of three unique id128s is non-zero
-# file_id
->>24 ubequad >0
->>>32 ubequad >0
-# machine_id
->>>>40 ubequad >0
->>>>>48 ubequad >0
-# boot_id; last writer
->>>>>>56 ubequad >0
->>>>>>>64 ubequad >0 Journal file
-#!:mime application/octet-stream
-!:mime application/x-linux-journal
-# provide more info
-# head_entry_realtime; contains a POSIX timestamp stored in microseconds
->>>>>>>>184 leqdate/1000000 !0 \b, %s
->>>>>>>>184 leqdate 0 empty
-# If a file is closed after writing the state field should be set to STATE_OFFLINE
->>>>>>>>16 ubyte 0 \b,
-# for offline and empty only journal~ extension found
->>>>>>>>>184 leqdate 0 offline
-# https://man7.org/linux/man-pages/man8/systemd-journald.service.8.html
-# GRR: add char ~ inside parse_ext in ../../src/apprentice.c to avoid in file version 5.44 error like:
-# Magdir/linux, 463: Warning: EXTENSION type ` journal~' has bad char '~'
-!:ext journal~
-# for offline and non empty often *.journal~ but also user-1001.journal
->>>>>>>>>184 leqdate !0 offline
-!:ext journal/journal~
-# if a file is opened for writing the state field should be set to STATE_ONLINE
->>>>>>>>16 ubyte 1 \b,
-# for online and empty only journal~ extension found
->>>>>>>>>184 leqdate 0 online
-# system@0005febee06e2ff2-f7ea54d10e4346ff.journal~
-!:ext journal~
-# for online and non empty only journal extension found
->>>>>>>>>184 leqdate !0 online
-# system.journal user-1000.journal
-!:ext journal
-# after a file has been rotated it should be set to STATE_ARCHIVED
->>>>>>>>16 ubyte 2 \b, archived
-!:ext journal
-# no *.journal~ found
-#!:ext journal/journal~
-# compatible_flags
->>>>>>>>8 ulelong&1 1 \b, sealed
-# incompatible_flags; COMPRESSED_XZ~1 COMPRESSED_LZ4~2 KEYED_HASH~4 COMPRESSED_ZSTD~8 COMPACT~16
-#>>>>>>>>12 ulelong x FLAGS=%#x
->>>>>>>>12 ulelong&1 1 \b, compressed
->>>>>>>>12 ulelong&2 !0 \b, compressed lz4
->>>>>>>>12 ulelong&4 !0 \b, keyed hash siphash24
->>>>>>>>12 ulelong&8 !0 \b, compressed zstd
->>>>>>>>12 ulelong&16 !0 \b, compact
-# uint8_t reserved[7]; apparently nil
-#>>17 long !0 \b, reserved %#8.8x
-# seqnum_id; like: 0 e623691afec94b5aa968ae2d726c49cc f98b2af481924b29 8d6816ca3639edc6
-#>>>>>>>>72 ubequad x \b, seqnum_id %#16.16llx
-#>>>>>>>>80 ubequad x b%16.16llx
-# header_size like: 100h
->>>>>>>>88 ulequad !0x100h \b, header size %#llx
-# arena_size like: 0 7fff00h ffff00h 17fff00h
-#>>>>>>>>96 ulequad >0 \b, arena size %#llx
-# data_hash_table_offset like: 0 15f0h 15f0h
-#>>>>>>>>104 ulequad >0 \b, hash table offset %#llx
-# data_hash_table_size like: 0 38e380h
-#>>>>>>>>112 ulequad >0 \b, hash table size %#llx
-# field_hash_table_offset like: 0 110h
-#>>>>>>>>120 ulequad >0 \b, field hash table offset %#llx
-# field_hash_table_size like: 0 14d0h
-#>>>>>>>>128 ulequad >0 \b, field hash table size %#llx
-# tail_object_offset like: 0 43edd8h 511278h c68968h d487d0h efaa98h
-#>>>>>>>>136 ulequad >0 \b, tail object offset %#llx
-# n_objects like: 0 1032h 5a2eh 92bdh a8b5h aa75h 112adh 40c23h 4714eh
-#>>>>>>>>144 ulequad >0 \b, objects %#llx
-# n_entries like: 0 3aeh 235ah 2dc4h 3125h 16129h 187a1h
->>>>>>>>152 ulequad >0 \b, entries %#llx
-# tail_entry_seqnum like: 0 1988h 16249h 24c12h 24c12h 41e64h 9fefdh
-#>>>>>>>>160 ulequad >0 \b, tail entry seqnum %#llx
-# head_entry_seqnum like: 0 1h 15dbh 6552h 213bfh 213bfh 3e672h 9a28ah
-#>>>>>>>>168 ulequad >0 \b, head entry seqnum %#llx
-# entry_array_offset like: 0 390058h 3909d8h 3909e0h
-#>>>>>>>>176 ulequad >0 \b, entry array offset %#llx
-
-# BCache backing and cache devices
-# From: Gabriel de Perthuis <g2p.code@gmail.com>
-0x1008 lequad 8
->0x1018 string \xc6\x85\x73\xf6\x4e\x1a\x45\xca\x82\x65\xf5\x7f\x48\xba\x6d\x81 BCache
->>0x1010 ulequad 0 cache device
->>0x1010 ulequad 1 backing device
->>0x1010 ulequad 3 cache device
->>0x1010 ulequad 4 backing device
->>0x1048 string >0 \b, label "%.32s"
->>0x1028 ubelong x \b, uuid %08x
->>0x102c ubeshort x \b-%04x
->>0x102e ubeshort x \b-%04x
->>0x1030 ubeshort x \b-%04x
->>0x1032 ubelong x \b-%08x
->>0x1036 ubeshort x \b%04x
->>0x1038 ubelong x \b, set uuid %08x
->>0x103c ubeshort x \b-%04x
->>0x103e ubeshort x \b-%04x
->>0x1040 ubeshort x \b-%04x
->>0x1042 ubelong x \b-%08x
->>0x1046 ubeshort x \b%04x
-
-# Linux device tree:
-# File format description can be found in the Linux kernel sources at
-# Documentation/devicetree/booting-without-of.txt
-# From Christoph Biedl
-0 belong 0xd00dfeed
-# structure must be within blob, strings are omitted to handle devicetrees > 1M
->&(8.L) byte x
->>20 belong >1 Device Tree Blob version %d
->>>4 belong x \b, size=%d
->>>20 belong >1
->>>>28 belong x \b, boot CPU=%d
->>>20 belong >2
->>>>32 belong x \b, string block size=%d
->>>20 belong >16
->>>>36 belong x \b, DT structure block size=%d
-
-# glibc locale archive as defined in glibc locale/locarchive.h
-0 lelong 0xde020109 locale archive
->24 lelong x %d strings
-
-# Linux Software RAID (mdadm)
-# Russell Coker <russell@coker.com.au>
-0 name linuxraid
->16 belong x UUID=%8x:
->20 belong x \b%8x:
->24 belong x \b%8x:
->28 belong x \b%8x
->32 string x name=%s
->72 lelong x level=%d
->92 lelong x disks=%d
-
-4096 lelong 0xa92b4efc Linux Software RAID
->4100 lelong x version 1.2 (%d)
->4096 use linuxraid
-
-0 lelong 0xa92b4efc Linux Software RAID
->4 lelong x version 1.1 (%d)
->0 use linuxraid
-
-# Summary: Database file for mlocate
-# Description: A database file as used by mlocate, a fast implementation
-# of locate/updatedb. It uses merging to reuse the existing
-# database and avoid rereading most of the filesystem. It's
-# the default version of locate on Arch Linux (and others).
-# File path: /var/lib/mlocate/mlocate.db by default (but configurable)
-# Site: https://fedorahosted.org/mlocate/
-# Format docs: https://linux.die.net/man/5/mlocate.db
-# Type: mlocate database file
-# URL: https://fedorahosted.org/mlocate/
-# From: Wander Nauta <info@wandernauta.nl>
-0 string \0mlocate mlocate database
->12 byte x \b, version %d
->13 byte 1 \b, require visibility
->16 string x \b, root %s
-
-# Dump files for iproute2 tool. Generated by the "ip r|a save" command. URL:
-# https://www.linuxfoundation.org/collaborate/workgroups/networking/iproute2
-# From: Pavel Emelyanov <xemul@parallels.com>
-0 lelong 0x45311224 iproute2 routes dump
-0 lelong 0x47361222 iproute2 addresses dump
-
-# Image and service files for CRIU tool.
-# URL: https://criu.org
-# From: Pavel Emelyanov <xemul@parallels.com>
-0 lelong 0x54564319 CRIU image file v1.1
-0 lelong 0x55105940 CRIU service file
-0 lelong 0x58313116 CRIU inventory
-
-# Kdump compressed dump files
-# https://github.com/makedumpfile/makedumpfile/blob/master/IMPLEMENTATION
-
-0 string KDUMP\x20\x20\x20 Kdump compressed dump
->0 use kdump-compressed-dump
-
-0 name kdump-compressed-dump
->8 long x v%d
->12 string >\0 \b, system %s
->77 string >\0 \b, node %s
->142 string >\0 \b, release %s
->207 string >\0 \b, version %s
->272 string >\0 \b, machine %s
->337 string >\0 \b, domain %s
-
-# Flattened format
-0 string makedumpfile
->16 bequad 1
->>0x1010 string KDUMP\x20\x20\x20 Flattened kdump compressed dump
->>>0x1010 use kdump-compressed-dump
-
-# Device Tree files
-0 search/1024 /dts-v1/ Device Tree File (v1)
-# beat c code
-!:strength +14
-
-
-# e2fsck undo file
-# David Gilman <davidgilman1@gmail.com>
-0 string E2UNDO02 e2fsck undo file, version 2
->44 lelong x \b, undo file is
->>44 lelong&1 0 not finished
->>44 lelong&1 1 finished
->48 lelong x \b, undo file features:
->>48 lelong&1 0 lacks filesystem offset
->>48 lelong&1 1 has filesystem offset
->>>64 lequad x at %#llx
-
-# ansible vault (does not really belong here)
-0 string $ANSIBLE_VAULT; Ansible Vault
->&0 regex [0-9]+\\.[0-9]+ \b, version %s
->>&0 string ;
->>>&0 regex [A-Z0-9]+ \b, encryption %s
-
-# From: Joerg Jenderek
-# URL: https://www.gnu.org/software/grub
-# Reference: https://ftp.gnu.org/gnu/grub/grub-2.06.tar.gz
-# grub-2.06/include/grub/keyboard_layouts.h
-# grub-2.06/grub-core/commands/keylayouts.c
-# GRUB_KEYBOARD_LAYOUTS_FILEMAGIC
-0 string GRUBLAYO GRUB Keyboard
-!:mime application/x-grub-keyboard
-!:ext gkb
-# GRUB_KEYBOARD_LAYOUTS_VERSION like: 10
->8 ulelong !10 \b, version %u
-# 4 grub_uint32_t grub_keyboard_layout[160]
-# for normal french keyboard this is letter a
->92 ubyte !0x71
->>92 ubyte >0x40 \b, english q is %c
-#>732 ubyte x \b, english Q is %c
-# for normal german keyboard this is letter z
->124 ubyte !0x79
->>124 ubyte >0x40 \b, english y is %c
-#>764 ubyte x \b, english Y is %c
diff --git a/contrib/libs/libmagic/magic/Magdir/lisp b/contrib/libs/libmagic/magic/Magdir/lisp
deleted file mode 100644
index c854fb7c74..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/lisp
+++ /dev/null
@@ -1,78 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: lisp,v 1.27 2020/08/14 19:23:39 christos Exp $
-# lisp: file(1) magic for lisp programs
-#
-# various lisp types, from Daniel Quinlan (quinlan@yggdrasil.com)
-
-# updated by Joerg Jenderek
-# GRR: This lot is too weak
-#0 string ;;
-# windows INF files often begin with semicolon and use CRLF as line end
-# lisp files are mainly created on unix system with LF as line end
-#>2 search/4096 !\r Lisp/Scheme program text
-#>2 search/4096 \r Windows INF file
-
-0 search/4096 (setq\ Lisp/Scheme program text
-!:mime text/x-lisp
-0 search/4096 (defvar\ Lisp/Scheme program text
-!:mime text/x-lisp
-0 search/4096 (defparam\ Lisp/Scheme program text
-!:mime text/x-lisp
-0 search/4096 (defun\ Lisp/Scheme program text
-!:mime text/x-lisp
-0 search/4096 (autoload\ Lisp/Scheme program text
-!:mime text/x-lisp
-0 search/4096 (custom-set-variables\ Lisp/Scheme program text
-!:mime text/x-lisp
-
-# URL: https://en.wikipedia.org/wiki/Emacs_Lisp
-# Reference: https://ftp.gnu.org/old-gnu/emacs/elisp-manual-18-1.03.tar.gz
-# Update: Joerg Jenderek
-# Emacs 18 - this is always correct, but not very magical.
-0 string \012(
-# look for emacs lisp keywords
-# GRR: split regex because it is too long or get error like
-# lisp, 36: Warning: cannot get string from `^(defun|defvar|defconst|defmacro|setq|fset|put|provide|require|'
->&0 regex \^(defun|defvar|defconst|defmacro|setq|fset) Emacs v18 byte-compiled Lisp data
-!:mime application/x-elc
-# https://searchcode.com/codesearch/view/2173420/
-# not really pure text
-!:apple EMAxTEXT
-!:ext elc
-# remaining regex
->&0 regex \^(put|provide|require|random) Emacs v18 byte-compiled Lisp data
-!:mime application/x-elc
-!:apple EMAxTEXT
-!:ext elc
-# missed cl.elc dbx.elc simple.elc look like normal lisp starting with ;;;
-
-# Emacs 19+ - ver. recognition added by Ian Springer
-# Also applies to XEmacs 19+ .elc files; could tell them apart with regexs
-# - Chris Chittleborough <cchittleborough@yahoo.com.au>
-# Update: Joerg Jenderek
-0 string ;ELC
-# version\0\0\0
->4 byte >18 Emacs/XEmacs v%d byte-compiled Lisp data
-# why less than 32 ? does not make sense to me. GNU Emacs version is 24.5 at April 2015
-#>4 byte <32 Emacs/XEmacs v%d byte-compiled Lisp data
-!:mime application/x-elc
-!:apple EMAxTEXT
-!:ext elc
-
-# Files produced by GNU/Emacs pdumper
-0 string DUMPEDGNUEMACS GNU/Emacs pdumper image
-
-# Files produced by CLISP Common Lisp From: Bruno Haible <haible@ilog.fr>
-0 string (SYSTEM::VERSION\040' CLISP byte-compiled Lisp program (pre 2004-03-27)
-0 string (|SYSTEM|::|VERSION|\040' CLISP byte-compiled Lisp program text
-
-0 long 0x70768BD2 CLISP memory image data
-0 long 0xD28B7670 CLISP memory image data, other endian
-
-#.com and .bin for MIT scheme
-0 string \372\372\372\372 MIT scheme (library?)
-
-# From: David Allouche <david@allouche.net>
-0 search/1 \<TeXmacs| TeXmacs document text
-!:mime text/texmacs
diff --git a/contrib/libs/libmagic/magic/Magdir/llvm b/contrib/libs/libmagic/magic/Magdir/llvm
deleted file mode 100644
index 6befe7a8bf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/llvm
+++ /dev/null
@@ -1,22 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: llvm,v 1.10 2023/03/11 17:54:17 christos Exp $
-# llvm: file(1) magic for LLVM byte-codes
-# URL: https://llvm.org/docs/BitCodeFormat.html
-# From: Al Stone <ahs3@fc.hp.com>
-
-0 string llvm LLVM byte-codes, uncompressed
-0 string llvc0 LLVM byte-codes, null compression
-0 string llvc1 LLVM byte-codes, gzip compression
-0 string llvc2 LLVM byte-codes, bzip2 compression
-0 string CPCH LLVM Pre-compiled header file
-
-0 lelong 0x0b17c0de LLVM bitcode, wrapper
-# Are these Mach-O ABI values? They appear to be.
->16 lelong 0x01000007 x86_64
->16 lelong 0x00000007 i386
->16 lelong 0x00000012 ppc
->16 lelong 0x01000012 ppc64
->16 lelong 0x0000000c arm
-
-0 string BC\xc0\xde LLVM IR bitcode
diff --git a/contrib/libs/libmagic/magic/Magdir/locoscript b/contrib/libs/libmagic/magic/Magdir/locoscript
deleted file mode 100644
index 87771ccdf9..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/locoscript
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: locoscript,v 1.1 2021/01/03 20:56:25 christos Exp $
-# locoscript: file(1) magic for LocoScript documents and related files
-#
-# See http://fileformats.archiveteam.org/wiki/LocoScript
-0 string JOY\x01\x01 LocoScript 1 document
-0 string JOY\x01\x02 LocoScript 2 document
-0 string JOY\x01\x04 LocoScript 3 document
-0 string JOY\x01\x06 LocoScript 4 document
-0 string DOC\x01\x01 LocoScript PC document
-0 string DOC\x01\x03 LocoScript Professional document
diff --git a/contrib/libs/libmagic/magic/Magdir/lua b/contrib/libs/libmagic/magic/Magdir/lua
deleted file mode 100644
index ab17374534..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/lua
+++ /dev/null
@@ -1,31 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: lua,v 1.8 2020/10/08 23:23:56 christos Exp $
-# lua: file(1) magic for Lua scripting language
-# URL: https://www.lua.org/
-# From: Reuben Thomas <rrt@sc3d.org>, Seo Sanghyeon <tinuviel@sparcs.kaist.ac.kr>
-
-# Lua scripts
-0 search/1/w #!\ /usr/bin/lua Lua script text executable
-!:mime text/x-lua
-0 search/1/w #!\ /usr/local/bin/lua Lua script text executable
-!:mime text/x-lua
-0 search/1 #!/usr/bin/env\ lua Lua script text executable
-!:mime text/x-lua
-0 search/1 #!\ /usr/bin/env\ lua Lua script text executable
-!:mime text/x-lua
-
-# Lua bytecode
-0 string \033Lua Lua bytecode,
-# 2.4 uses 0x23 as its version byte because it shares the format
-# with 2.3 (which was never released publicly).
->4 byte 0x23 version 2.4
->4 byte 0x25 version 2.5/3.0
->4 byte 0x31 version 3.1
->4 byte 0x32 version 3.2
->4 byte 0x40 version 4.0
->4 byte 0x50 version 5.0
->4 byte 0x51 version 5.1
->4 byte 0x52 version 5.2
->4 byte 0x53 version 5.3
->4 byte 0x54 version 5.4
diff --git a/contrib/libs/libmagic/magic/Magdir/luks b/contrib/libs/libmagic/magic/Magdir/luks
deleted file mode 100644
index 16042517a3..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/luks
+++ /dev/null
@@ -1,126 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: luks,v 1.5 2022/09/07 11:23:44 christos Exp $
-# luks: file(1) magic for Linux Unified Key Setup
-# URL: https://en.wikipedia.org/wiki/Linux_Unified_Key_Setup
-# http://fileformats.archiveteam.org/wiki/LUKS
-# From: Anthon van der Neut <anthon@mnt.org>
-# Update: Joerg Jenderek
-# Note: verfied by command like `cryptsetup luksDump /dev/sda3`
-
-0 string LUKS\xba\xbe LUKS encrypted file,
-# https://reposcope.com/mimetype/application/x-raw-disk-image
-!:mime application/x-raw-disk-image
-#!:mime application/x-luks-volume
-# img is the generic extension; no suffix for partitions; luksVolumeHeaderBackUp via zuluCrypt
-!:ext /luks/img/luksVolumeHeaderBackUp
-# version like: 1 2
->6 beshort x ver %d
-# test for version 1 variant
->6 beshort 1
->>0 use luks-v1
-# test for version 2 variant
->6 beshort >1
->>0 use luks-v2
-# Reference: https://mirrors.edge.kernel.org/pub/linux/utils/cryptsetup/LUKS_docs/on-disk-format.pdf
-# http://mark0.net/download/triddefs_xml.7z/defs/l/luks.trid.xml
-# display information about LUKS version 1
-0 name luks-v1
-# cipher-name like: aes twofish
->8 string x [%s,
-# cipher-mode like: xts-plain64 cbc-essiv
->40 string x %s,
-# hash specification like: sha256 sha1 ripemd160
->72 string x %s]
->168 string x UUID: %s
-# NEW PART!
-# payload-offset; start offset of the bulk data
->104 ubelong x \b, at %#x data
-# key-bytes; number of key bytes; key-bytes*8=MK-bits
->108 ubelong x \b, %u key bytes
-# mk-digest[20]; master key checksum from PBKDF2
->112 ubequad x \b, MK digest %#16.16llx
->>120 ubequad x \b%16.16llx
->>128 ubelong x \b%8.8x
-# mk-digest-salt[32]; salt parameter for master key PBKDF2
->132 ubequad x \b, MK salt %#16.16llx
->>140 ubequad x \b%16.16llx
->>148 ubequad x \b%16.16llx
->>156 ubequad x \b%16.16llx
-# mk-digest-iter; iterations parameter for master key PBKDF2
->164 ubelong x \b, %u MK iterations
-# key slot 1
->208 ubelong =0x00AC71F3 \b; slot #0
->>208 use luks-slot
-# key slot 2
->256 ubelong =0x00AC71F3 \b; slot #1
->>256 use luks-slot
-# key slot 3
->304 ubelong =0x00AC71F3 \b; slot #2
->>304 use luks-slot
-# key slot 4
->352 ubelong =0x00AC71F3 \b; slot #3
->>352 use luks-slot
-# key slot 5
->400 ubelong =0x00AC71F3 \b; slot #4
->>400 use luks-slot
-# key slot 6
->448 ubelong =0x00AC71F3 \b; slot #5
->>448 use luks-slot
-# key slot 7
->496 ubelong =0x00AC71F3 \b; slot #6
->>496 use luks-slot
-# key slot 8
->544 ubelong =0x00AC71F3 \b; slot #7
->>544 use luks-slot
-# Reference: https://gitlab.com/cryptsetup/LUKS2-docs/-/raw/master/luks2_doc_wip.pdf
-# http://mark0.net/download/triddefs_xml.7z/defs/l/luks2.trid.xml
-# display information about LUKS version 2
-0 name luks-v2
-# hdr_size; size including JSON area called Metadata area by cryptsetup with value like: 16384
->8 ubequad x \b, header size %llu
-# possible check for MAGIC_2ND after header
-#>(8.Q) string SKUL\xba\xbe \b, 2nd_HEADER_OK
-# seqid; sequence ID, increased on update; called Epoch by cryptsetup with value like: 3 4 8 10
->16 ubequad x \b, ID %llu
-# label[48]; optional ASCII label or empty; called Label by cryptsetup with value like: "LUKS2_EXT4_ROOT"
->24 string >\0 \b, label %s
-# csum_alg[32]; checksum algorithm like: sha256 sha1 sha512 wirlpool ripemd160
->72 string x \b, algo %s
-# salt[64]; salt , unique for every header
->104 ubequad x \b, salt %#llx...
-# uuid[40]; UID of device as string like: 242256c6-396e-4a35-af5f-5b70cb7af9a7
->168 string x \b, UUID: %-.40s
-# subsystem[48]; optional owner subsystem label or empty
->208 string >\0 \b, sub label %-.48s
-# hdr_offset; offset from device start [ bytes ] like: 0
->256 ubequad !0 \b, offset %llx
-# char _padding [184]; must be zeroed
-#>264 ubequad x \b, padding %#16.16llx
-#>440 ubequad x \b...%16.16llx
-# csum[64]; header checksum
->448 ubequad x \b, crc %#llx...
-# char _padding4096 [7*512]; Padding , must be zeroed
-#>512 ubequad x \b, more padding %#16.16llx
-#>4088 ubequad x \b...%16.16llx
-# JSON text data terminated by the zero character; unused remainder empty and filled with zeroes like:
-# {"keyslots":{"0":{"type":"luks2","key_size":64,"af":{"type":"luks1","stripes":4000,"hash":"sha256"},"area":{"type":"raw","offse"
->0x1000 string x \b, at 0x1000 %s
-#>0x1000 indirect x
-# display information (like active) about LUKS1 slot
-0 name luks-slot
-# state of keyslot; 0x00AC71F3~active 0x0000DEAD~inactive
-#>0 ubelong x \b, status %#8.8x
->0 ubelong =0x00AC71F3 active
->0 ubelong =0x0000DEAD inactive
-# iteration parameter for PBKDF2
-#>4 ubelong x \b, %u iterations
-# salt parameter for PBKDF2
-#>8 ubequad x \b, salt %#16.16llx
-#>>16 ubequad x \b%16.16llx
-#>>24 ubequad x \b%16.16llx
-#>>32 ubequad x \b%16.16llx
-# start sector of key material like: 8 0x200 0x3f8 0x5f0 0xdd0
->40 ubelong x \b, %#x material offset
-# number of anti-forensic stripes like: 4000
->44 ubelong !4000 \b, %u stripes
diff --git a/contrib/libs/libmagic/magic/Magdir/m4 b/contrib/libs/libmagic/magic/Magdir/m4
deleted file mode 100644
index 587ebe80c6..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/m4
+++ /dev/null
@@ -1,11 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: m4,v 1.3 2019/02/27 16:46:23 christos Exp $
-# make: file(1) magic for M4 scripts
-#
-0 search/8192 dnl
->0 regex \^dnl\ M4 macro processor script text
-!:mime text/x-m4
-0 search/8192 AC_DEFUN
->0 regex \^AC_DEFUN\\(\\[ M4 macro processor script text
-!:strength + 15
-!:mime text/x-m4
diff --git a/contrib/libs/libmagic/magic/Magdir/mach b/contrib/libs/libmagic/magic/Magdir/mach
deleted file mode 100644
index 7eb98ff34e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mach
+++ /dev/null
@@ -1,303 +0,0 @@
-
-#------------------------------------------------------------
-# $File: mach,v 1.29 2021/04/26 15:56:00 christos Exp $
-# Mach has two magic numbers, 0xcafebabe and 0xfeedface.
-# Unfortunately the first, cafebabe, is shared with
-# Java ByteCode, so they are both handled in the file "cafebabe".
-# The "feedface" ones are handled herein.
-#------------------------------------------------------------
-# if set, it's for the 64-bit version of the architecture
-# yes, this is separate from the low-order magic number bit
-# it's also separate from the "64-bit libraries" bit in the
-# upper 8 bits of the CPU subtype
-
-# Reference: https://opensource.apple.com/source/cctools/cctools-949.0.1/
-# include/mach-o/loader.h
-# display CPU type as string like: i386 x86_64 ... armv7 armv7k ...
-0 name mach-o-cpu
->0 belong&0xff000000 0
-#
-# 32-bit ABIs.
-#
-# 1 vax
->>0 belong&0x00ffffff 1
->>>4 belong&0x00ffffff 0 vax
->>>4 belong&0x00ffffff 1 vax11/780
->>>4 belong&0x00ffffff 2 vax11/785
->>>4 belong&0x00ffffff 3 vax11/750
->>>4 belong&0x00ffffff 4 vax11/730
->>>4 belong&0x00ffffff 5 uvaxI
->>>4 belong&0x00ffffff 6 uvaxII
->>>4 belong&0x00ffffff 7 vax8200
->>>4 belong&0x00ffffff 8 vax8500
->>>4 belong&0x00ffffff 9 vax8600
->>>4 belong&0x00ffffff 10 vax8650
->>>4 belong&0x00ffffff 11 vax8800
->>>4 belong&0x00ffffff 12 uvaxIII
->>>4 belong&0x00ffffff >12 vax subarchitecture=%d
->>0 belong&0x00ffffff 2 romp
->>0 belong&0x00ffffff 3 architecture=3
->>0 belong&0x00ffffff 4 ns32032
->>0 belong&0x00ffffff 5 ns32332
->>0 belong&0x00ffffff 6 m68k
-# 7 x86
->>0 belong&0x00ffffff 7
->>>4 belong&0x0000000f 3 i386
->>>4 belong&0x0000000f 4 i486
->>>>4 belong&0x00fffff0 0
->>>>4 belong&0x00fffff0 0x80 \bsx
->>>4 belong&0x0000000f 5 i586
->>>4 belong&0x0000000f 6
->>>>4 belong&0x00fffff0 0 p6
->>>>4 belong&0x00fffff0 0x10 pentium_pro
->>>>4 belong&0x00fffff0 0x20 pentium_2_m0x20
->>>>4 belong&0x00fffff0 0x30 pentium_2_m3
->>>>4 belong&0x00fffff0 0x40 pentium_2_m0x40
->>>>4 belong&0x00fffff0 0x50 pentium_2_m5
->>>>4 belong&0x00fffff0 >0x50 pentium_2_m%#x
->>>4 belong&0x0000000f 7 celeron
->>>>4 belong&0x00fffff0 0x00 \b_m%#x
->>>>4 belong&0x00fffff0 0x10 \b_m%#x
->>>>4 belong&0x00fffff0 0x20 \b_m%#x
->>>>4 belong&0x00fffff0 0x30 \b_m%#x
->>>>4 belong&0x00fffff0 0x40 \b_m%#x
->>>>4 belong&0x00fffff0 0x50 \b_m%#x
->>>>4 belong&0x00fffff0 0x60
->>>>4 belong&0x00fffff0 0x70 \b_mobile
->>>>4 belong&0x00fffff0 >0x70 \b_m%#x
->>>4 belong&0x0000000f 8 pentium_3
->>>>4 belong&0x00fffff0 0x00
->>>>4 belong&0x00fffff0 0x10 \b_m
->>>>4 belong&0x00fffff0 0x20 \b_xeon
->>>>4 belong&0x00fffff0 >0x20 \b_m%#x
->>>4 belong&0x0000000f 9 pentiumM
->>>>4 belong&0x00fffff0 0x00
->>>>4 belong&0x00fffff0 >0x00 \b_m%#x
->>>4 belong&0x0000000f 10 pentium_4
->>>>4 belong&0x00fffff0 0x00
->>>>4 belong&0x00fffff0 0x10 \b_m
->>>>4 belong&0x00fffff0 >0x10 \b_m%#x
->>>4 belong&0x0000000f 11 itanium
->>>>4 belong&0x00fffff0 0x00
->>>>4 belong&0x00fffff0 0x10 \b_2
->>>>4 belong&0x00fffff0 >0x10 \b_m%#x
->>>4 belong&0x0000000f 12 xeon
->>>>4 belong&0x00fffff0 0x00
->>>>4 belong&0x00fffff0 0x10 \b_mp
->>>>4 belong&0x00fffff0 >0x10 \b_m%#x
->>>4 belong&0x0000000f >12 ia32 family=%d
->>>>4 belong&0x00fffff0 0x00
->>>>4 belong&0x00fffff0 >0x00 model=%x
->>0 belong&0x00ffffff 8 mips
->>>4 belong&0x00ffffff 1 R2300
->>>4 belong&0x00ffffff 2 R2600
->>>4 belong&0x00ffffff 3 R2800
->>>4 belong&0x00ffffff 4 R2000a
->>>4 belong&0x00ffffff 5 R2000
->>>4 belong&0x00ffffff 6 R3000a
->>>4 belong&0x00ffffff 7 R3000
->>>4 belong&0x00ffffff >7 subarchitecture=%d
->>0 belong&0x00ffffff 9 ns32532
->>0 belong&0x00ffffff 10 mc98000
->>0 belong&0x00ffffff 11 hppa
->>>4 belong&0x00ffffff 0 7100
->>>4 belong&0x00ffffff 1 7100LC
->>>4 belong&0x00ffffff >1 subarchitecture=%d
->>0 belong&0x00ffffff 12 arm
->>>4 belong&0x00ffffff 0
->>>4 belong&0x00ffffff 1 subarchitecture=%d
->>>4 belong&0x00ffffff 2 subarchitecture=%d
->>>4 belong&0x00ffffff 3 subarchitecture=%d
->>>4 belong&0x00ffffff 4 subarchitecture=%d
->>>4 belong&0x00ffffff 5 \bv4t
->>>4 belong&0x00ffffff 6 \bv6
->>>4 belong&0x00ffffff 7 \bv5tej
->>>4 belong&0x00ffffff 8 \bxscale
->>>4 belong&0x00ffffff 9 \bv7
->>>4 belong&0x00ffffff 10 \bv7f
->>>4 belong&0x00ffffff 11 \bv7s
->>>4 belong&0x00ffffff 12 \bv7k
->>>4 belong&0x00ffffff 13 \bv8
->>>4 belong&0x00ffffff 14 \bv6m
->>>4 belong&0x00ffffff 15 \bv7m
->>>4 belong&0x00ffffff 16 \bv7em
->>>4 belong&0x00ffffff >16 subarchitecture=%d
-# 13 m88k
->>0 belong&0x00ffffff 13
->>>4 belong&0x00ffffff 0 mc88000
->>>4 belong&0x00ffffff 1 mc88100
->>>4 belong&0x00ffffff 2 mc88110
->>>4 belong&0x00ffffff >2 mc88000 subarchitecture=%d
->>0 belong&0x00ffffff 14 SPARC
->>0 belong&0x00ffffff 15 i860g
->>0 belong&0x00ffffff 16 alpha
->>0 belong&0x00ffffff 17 rs6000
->>0 belong&0x00ffffff 18 ppc
->>>4 belong&0x00ffffff 0
->>>4 belong&0x00ffffff 1 \b_601
->>>4 belong&0x00ffffff 2 \b_602
->>>4 belong&0x00ffffff 3 \b_603
->>>4 belong&0x00ffffff 4 \b_603e
->>>4 belong&0x00ffffff 5 \b_603ev
->>>4 belong&0x00ffffff 6 \b_604
->>>4 belong&0x00ffffff 7 \b_604e
->>>4 belong&0x00ffffff 8 \b_620
->>>4 belong&0x00ffffff 9 \b_750
->>>4 belong&0x00ffffff 10 \b_7400
->>>4 belong&0x00ffffff 11 \b_7450
->>>4 belong&0x00ffffff 100 \b_970
->>>4 belong&0x00ffffff >100 subarchitecture=%d
->>0 belong&0x00ffffff >18 architecture=%d
->0 belong&0xff000000 0x01000000
-#
-# 64-bit ABIs.
-#
->>0 belong&0x00ffffff 0 64-bit architecture=%d
->>0 belong&0x00ffffff 1 64-bit architecture=%d
->>0 belong&0x00ffffff 2 64-bit architecture=%d
->>0 belong&0x00ffffff 3 64-bit architecture=%d
->>0 belong&0x00ffffff 4 64-bit architecture=%d
->>0 belong&0x00ffffff 5 64-bit architecture=%d
->>0 belong&0x00ffffff 6 64-bit architecture=%d
->>0 belong&0x00ffffff 7 x86_64
->>>4 belong&0x00ffffff 0 subarchitecture=%d
->>>4 belong&0x00ffffff 1 subarchitecture=%d
->>>4 belong&0x00ffffff 2 subarchitecture=%d
->>>4 belong&0x00ffffff 3
->>>4 belong&0x00ffffff 4 \b_arch1
->>>4 belong&0x00ffffff 8 \b_haswell
->>>4 belong&0x00ffffff >4 subarchitecture=%d
->>0 belong&0x00ffffff 8 64-bit architecture=%d
->>0 belong&0x00ffffff 9 64-bit architecture=%d
->>0 belong&0x00ffffff 10 64-bit architecture=%d
->>0 belong&0x00ffffff 11 64-bit architecture=%d
->>0 belong&0x00ffffff 12 arm64
->>>4 belong&0x00ffffff 0
->>>4 belong&0x00ffffff 1 \bv8
->>>4 belong&0x00ffffff 2 \be
->>>>7 ubyte&0xff >0 (caps:
->>>>7 ubyte&0xff <0x80 %#02x
->>>>7 ubyte&0xc0 0x80 PAC
->>>>>7 ubyte&0x3f x \b%02d
->>>>7 ubyte&0xc0 0xc0 PAK
->>>>>7 ubyte&0x3f x \b%02d
->>>>7 ubyte&0xff x \b)
->>>4 belong&0x00ffffff >2 subarchitecture=%d
->>0 belong&0x00ffffff 13 64-bit architecture=%d
->>0 belong&0x00ffffff 14 64-bit architecture=%d
->>0 belong&0x00ffffff 15 64-bit architecture=%d
->>0 belong&0x00ffffff 16 64-bit architecture=%d
->>0 belong&0x00ffffff 17 64-bit architecture=%d
->>0 belong&0x00ffffff 18 ppc64
->>>4 belong&0x00ffffff 0
->>>4 belong&0x00ffffff 1 \b_601
->>>4 belong&0x00ffffff 2 \b_602
->>>4 belong&0x00ffffff 3 \b_603
->>>4 belong&0x00ffffff 4 \b_603e
->>>4 belong&0x00ffffff 5 \b_603ev
->>>4 belong&0x00ffffff 6 \b_604
->>>4 belong&0x00ffffff 7 \b_604e
->>>4 belong&0x00ffffff 8 \b_620
->>>4 belong&0x00ffffff 9 \b_650
->>>4 belong&0x00ffffff 10 \b_7400
->>>4 belong&0x00ffffff 11 \b_7450
->>>4 belong&0x00ffffff 100 \b_970
->>>4 belong&0x00ffffff >100 subarchitecture=%d
->>0 belong&0x00ffffff >18 64-bit architecture=%d
->0 belong&0xff000000 0x02000000
-#
-# 64_32-bit ABIs.
-#
->>0 belong&0x00ffffff 0 64_32-bit architecture=%d
->>0 belong&0x00ffffff 1 64_32-bit architecture=%d
->>0 belong&0x00ffffff 2 64_32-bit architecture=%d
->>0 belong&0x00ffffff 3 64_32-bit architecture=%d
->>0 belong&0x00ffffff 4 64_32-bit architecture=%d
->>0 belong&0x00ffffff 5 64_32-bit architecture=%d
->>0 belong&0x00ffffff 6 64_32-bit architecture=%d
->>0 belong&0x00ffffff 7 64_32-bit architecture=%d
->>0 belong&0x00ffffff 8 64_32-bit architecture=%d
->>0 belong&0x00ffffff 9 64_32-bit architecture=%d
->>0 belong&0x00ffffff 10 64_32-bit architecture=%d
->>0 belong&0x00ffffff 11 64_32-bit architecture=%d
->>0 belong&0x00ffffff 12 64_32-bit arm
->>>4 belong&0x00ffffff 0
->>>4 belong&0x00ffffff 1 \bv8
->>>4 belong&0x00ffffff >1 subarchitecture=%d
->>0 belong&0x00ffffff 13 64_32-bit architecture=%d
->>0 belong&0x00ffffff 14 64_32-bit architecture=%d
->>0 belong&0x00ffffff 15 64_32-bit architecture=%d
->>0 belong&0x00ffffff 16 64_32-bit architecture=%d
->>0 belong&0x00ffffff 17 64_32-bit architecture=%d
->>0 belong&0x00ffffff 18 64_32-bit architecture=%d
->>0 belong&0x00ffffff >18 64_32-bit architecture=%d
-
-0 name mach-o-be
->0 byte 0xcf 64-bit
->4 use mach-o-cpu
->12 belong 1 object
-# GRR: Does not work for Mach-O with 2 architectures; instead display oo
-#!:ext o
-!:ext o/
->12 belong 2 executable
-# the executables normally have no file extension like perl,
-# but exceptions like perl5.18 perl5.16
-!:ext 16/18/
->12 belong 3 fixed virtual memory shared library
->12 belong 4 core
->12 belong 5 preload executable
->12 belong 6 dynamically linked shared library
-# GRR: Does not work for Mach-O with 2 architectures; instead display dylibdylib
-#!:ext dylib
-!:ext dylib/
->12 belong 7 dynamic linker
->12 belong 8 bundle
-# normally name extension bundle; but exceptions like: AMDil_r700.dylib
-!:ext bundle/dylib/
->12 belong 9 dynamically linked shared library stub
->12 belong 10 dSYM companion file
->12 belong 11 kext bundle
->12 belong >11
->>12 belong x filetype=%d
->24 belong >0 \b, flags:<
->>24 belong &0x00000001 \bNOUNDEFS
->>24 belong &0x00000002 \b|INCRLINK
->>24 belong &0x00000004 \b|DYLDLINK
->>24 belong &0x00000008 \b|BINDATLOAD
->>24 belong &0x00000010 \b|PREBOUND
->>24 belong &0x00000020 \b|SPLIT_SEGS
->>24 belong &0x00000040 \b|LAZY_INIT
->>24 belong &0x00000080 \b|TWOLEVEL
->>24 belong &0x00000100 \b|FORCE_FLAT
->>24 belong &0x00000200 \b|NOMULTIDEFS
->>24 belong &0x00000400 \b|NOFIXPREBINDING
->>24 belong &0x00000800 \b|PREBINDABLE
->>24 belong &0x00001000 \b|ALLMODSBOUND
->>24 belong &0x00002000 \b|SUBSECTIONS_VIA_SYMBOLS
->>24 belong &0x00004000 \b|CANONICAL
->>24 belong &0x00008000 \b|WEAK_DEFINES
->>24 belong &0x00010000 \b|BINDS_TO_WEAK
->>24 belong &0x00020000 \b|ALLOW_STACK_EXECUTION
->>24 belong &0x00040000 \b|ROOT_SAFE
->>24 belong &0x00080000 \b|SETUID_SAFE
->>24 belong &0x00100000 \b|NO_REEXPORTED_DYLIBS
->>24 belong &0x00200000 \b|PIE
->>24 belong &0x00400000 \b|DEAD_STRIPPABLE_DYLIB
->>24 belong &0x00800000 \b|HAS_TLV_DESCRIPTORS
->>24 belong &0x01000000 \b|NO_HEAP_EXECUTION
->>24 belong &0x02000000 \b|APP_EXTENSION_SAFE
->>24 belong &0x04000000 \b|NLIST_OUTOFSYNC_WITH_DYLDINFO
->>24 belong &0x08000000 \b|SIM_SUPPORT
->>24 belong &0x80000000 \b|DYLIB_IN_CACHE
->>24 belong x \b>
-
-#
-0 lelong&0xfffffffe 0xfeedface Mach-O
-!:strength +1
-!:mime application/x-mach-binary
->0 use \^mach-o-be
-
-0 belong&0xfffffffe 0xfeedface Mach-O
-!:strength +1
-!:mime application/x-mach-binary
->0 use mach-o-be
diff --git a/contrib/libs/libmagic/magic/Magdir/macintosh b/contrib/libs/libmagic/magic/Magdir/macintosh
deleted file mode 100644
index a74aac487c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/macintosh
+++ /dev/null
@@ -1,505 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: macintosh,v 1.36 2022/12/06 18:45:20 christos Exp $
-# macintosh description
-#
-# BinHex is the Macintosh ASCII-encoded file format (see also "apple")
-# Daniel Quinlan, quinlan@yggdrasil.com
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/BinHex
-# Reference: http://fileformats.archiveteam.org/wiki/BinHex
-# Note: only tested with version 4.0 and hqx extension
-# Any text/binary before the characteristic comment sentence is to be ignored like in
-# http://ftp.vim.org/pub/ftp/ftp/infomac/disk/mac-update-40b7.hqx
-0 search/1602 (This\ file\
->&0 use binhex
-# http://ftp.vim.org/pub/ftp/ftp/infomac/_Disk_&_File/zap-res-forks-101.hqx
-0 search/2652/b (This\ file\
->&0 use binhex
-0 name binhex
-# keep split search string format similar like in version 5.37
->0 string must\ be\ converted\ with\ BinHex\ BinHex binary text, version
-# http://www.macdisk.com/binhexen.php3
-!:apple BNHQTEXT
-# http://www.faqs.org/faqs/macintosh/comm-faq/part1/
->>&0 string 1.0 1.0
-!:mime application/mac-binhex
-!:ext hex
->>&0 string 2.0 2.0
-!:mime application/mac-binhex
-!:ext hcx
-# BinHex 3.0 never existed
->>&0 string 4.0 4.0
-!:mime application/mac-binhex40
-!:ext hqx
-# BinHex 5.0 also MacBinary I
->>&0 string 5.0 5.0
-!:mime application/mac-binhex40
-!:ext hqx
-# this should never happen
->>&0 default x
->>>&0 string x %.3s
-!:mime application/mac-binhex
-!:ext hqx
-
-# Stuffit archives are the de facto standard of compression for Macintosh
-# files obtained from most archives. (franklsm@tuns.ca)
-0 string SIT! StuffIt Archive (data)
-!:mime application/x-stuffit
-!:apple SIT!SIT!
->2 string x : %s
-0 string SITD StuffIt Deluxe (data)
->2 string x : %s
-0 string Seg StuffIt Deluxe Segment (data)
->2 string x : %s
-
-# Newer StuffIt archives (grant@netbsd.org)
-0 string StuffIt StuffIt Archive
-!:mime application/x-stuffit
-!:apple SIT!SIT!
-#>162 string >0 : %s
-
-# Macintosh Applications and Installation binaries (franklsm@tuns.ca)
-# GRR: Too weak
-#0 string APPL Macintosh Application (data)
-#>2 string x \b: %s
-
-# Macintosh System files (franklsm@tuns.ca)
-# GRR: Too weak
-#0 string zsys Macintosh System File (data)
-#0 string FNDR Macintosh Finder (data)
-#0 string libr Macintosh Library (data)
-#>2 string x : %s
-#0 string shlb Macintosh Shared Library (data)
-#>2 string x : %s
-#0 string cdev Macintosh Control Panel (data)
-#>2 string x : %s
-#0 string INIT Macintosh Extension (data)
-#>2 string x : %s
-#0 string FFIL Macintosh Truetype Font (data)
-#>2 string x : %s
-#0 string LWFN Macintosh Postscript Font (data)
-#>2 string x : %s
-
-# Additional Macintosh Files (franklsm@tuns.ca)
-# GRR: Too weak
-#0 string PACT Macintosh Compact Pro Archive (data)
-#>2 string x : %s
-#0 string ttro Macintosh TeachText File (data)
-#>2 string x : %s
-#0 string TEXT Macintosh TeachText File (data)
-#>2 string x : %s
-#0 string PDF Macintosh PDF File (data)
-#>2 string x : %s
-
-# MacBinary format (Eric Fischer, enf@pobox.com)
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/MacBinary
-# http://fileformats.archiveteam.org/wiki/MacBinary
-# Reference: https://files.stairways.com/other/macbinaryii-standard-info.txt
-# Note: verified by macutils `macunpack -i -v BBEdit4.0.sit.bin` and
-# `deark -l -d -m macbinary G3FirmwareUpdate1.1.smi.bin`
-#
-# Unfortunately MacBinary doesn't really have a magic number prior
-# to the MacBinary III format.
-#
-
-# old version number, must be kept at zero for compatibility
-0 byte 0
-# length of filename (must be in the range 1-63)
->1 ubyte >0
-# skip T.PIC.LZ INSTRUMENT.7T INVENTORY
->>1 ubyte <64
-# skip Docs.MWII ReadMe.MacWrite "Notes (MacWrite II)"
-# by looking for printable characters at beginning of file name
->>>2 ubelong >0x1F000000
-# zero fill, must be zero for compatibility
->>>>74 byte 0
-# zero fill, must be zero for compatibility
->>>>>82 byte 0
-# skip few DEGAS mid-res uncompressed bitmap (GEMINI03.PI2 CODE_RAM.PI2) with "too high" file names ffffff88 ffff4f00
->>>>>>2 ubelong <0xffff0000
-# MacBinary I test for valid version numbers
->>>>>>>122 ubeshort 0
-# additional check for undefined header fields in MacBinary I
-#>>>>>>>>101 ulong 0
->>>>>>>>0 use mac-bin
-# MacBinary II the newer versions begins at 129
->>>>>>>122 ubeshort 0x8181
->>>>>>>>0 use mac-bin
-# MacBinary III with MacBinary II to read
->>>>>>122 ubeshort 0x8281
->>>>>>>0 use mac-bin
-
-# display information of MacBinary file
-0 name mac-bin
->122 ubyte x MacBinary
-# versions for MacBinary II/III
->122 ubyte 129 II
->122 ubyte 130 III
-# only in MacBinary III
->>102 string !mBIN with surprising version
-!:mime application/x-macbinary
-!:apple PSPTBINA
-!:ext bin/macbin
-# THIS SHOULD NEVER HAPPEN! Maybe another file type is misidentified as MacBinary
-#>1 ubyte >63 \b, name length %u too BIG!
-#>122 ubeshort x \b, version %#x
-# Finder flags if not 0
-# >73 byte !0 \b, flags 0x
-# >73 byte =0
-# >>101 byte !0 \b, flags 0x
-# # original Finder flags (Bits 8-15)
-# >73 byte !0 \b%x
-# # finder flags, bits 0-7
-# >101 byte !0 \b%x
->73 byte &0x01 \b, inited
->73 byte &0x02 \b, changed
->73 byte &0x04 \b, busy
->73 byte &0x08 \b, bozo
->73 byte &0x10 \b, system
->73 byte &0x20 \b, bundle
->73 byte &0x40 \b, invisible
->73 byte &0x80 \b, locked
-
-# 75 beshort # vertical posn in window
-#>75 beshort !0 \b, v.pos %u
-# 77 beshort # horiz posn in window
-#>77 beshort !0 \b, h.pos %u
-# 79 beshort # window or folder ID
->79 ubeshort !0 \b, ID %#x
-# protected flag
->81 byte !0 \b, protected %#x
-# length of comment after resource
->99 ubeshort !0 \b, comment length %u
-# char. code of file name
->106 ubyte !0 \b, char. code %#x
-# still more Finder flags
->107 ubyte !0 \b, more flags %#x
-# length of total files when unpacked only used when pack and unpack on the fly
->116 ubelong !0 \b, total length %u
-# 120 beshort # length of add'l header
->120 ubeshort !0 \b, 2nd header length %u
-# 124 beshort # checksum
-#>124 ubeshort !0 \b, CRC %#x
-# creation date in seconds since MacOS epoch start. So 1 Jan 1970 ~ 7C25B080
-# few (31/1247) examples (hinkC4.0.sitx.bin InternetExplorer5.1.smi.bin G3FirmwareUpdate1.1.smi.bin Firewire2.3.3.smi.bin LR2image.bin) contain zeroed date fields
->91 long !0
->>91 beldate-0x7C25B080 x \b, %s
-# THIS SHOULD NEVER HAPPEN! Maybe another file type is misidentified or time overflow
->91 ubelong <0x7c25b080 INVALID date
-# reported date seconds by deark
-#>91 ubelong x deark-DATE=%u
-# last modified date
->95 long !0
->>95 beldate-0x7C25B080 x \b, modified %s
-# Apple creator+typ if not null
-# file creator (normally expressed as four characters)
->69 ulong !0 \b, creator
-# instead 4 character code display full creator name
->>69 use apple-creator
-# file type (normally expressed as four characters)
->65 ulong !0 \b, type
->>65 use apple-type
-# length of data segment
->83 ubelong !0 \b, %u bytes
-# filename (in the range 1-63)
-# like "BBEdit4.0.sit" "Archive.sitx" "MacPGP 2.2 (.sea)"
->1 pstring x "%s"
-# print 1 space and then at offset 128 inspect data fork content if it has one
->83 ubelong !0 \b
->>128 indirect x
-# Afterwards resource fork if length of resource segment not zero
->87 ubelong !0
-# calculate resource fork offset
->>83 ubelong+128 x \b, at %#x
-# length of resource segment
->>87 ubelong !0 %u bytes
->>(83.S+128) ubequad x resource
-# further resource fork content inspection
->>>&-8 indirect x
-
-# Apple Type/Creator Database
-# URL: https://en.wikipedia.org/wiki/Type_code
-# Reference: https://www.lacikam.co.il/tcdb/
-# https://www.macdisk.com/macsigen.php
-# Note: classic Mac OS files have two 4 character codes for type and creator.
-# Thereby the Finder attach documents types to applications.
-
-#>65 string x \b, type "%4.4s"
-
-# display information about apple type
-0 name apple-type
->0 string 8BIM PhotoShop
->0 string ALB3 PageMaker 3
->0 string ALB4 PageMaker 4
->0 string ALT3 PageMaker 3
->0 string APPL application
->0 string AWWP AppleWorks word processor
->0 string CIRC simulated circuit
->0 string DRWG MacDraw
->0 string EPSF Encapsulated PostScript
->0 string FFIL font suitcase
->0 string FKEY function key
->0 string FNDR Macintosh Finder
->0 string GIFf GIF image
->0 string Gzip GNU gzip
->0 string INIT system extension
->0 string LIB\ library
->0 string LWFN PostScript font
->0 string MSBC Microsoft BASIC
->0 string PACT Compact Pro archive
->0 string PDF\ Portable Document Format
->0 string PICT picture
->0 string PNTG MacPaint picture
->0 string PREF preferences
->0 string PROJ Think C project
->0 string QPRJ Think Pascal project
->0 string SCFL Defender scores
->0 string SCRN startup screen
->0 string SITD StuffIt Deluxe
->0 string SPn3 SuperPaint
->0 string STAK HyperCard stack
->0 string Seg\ StuffIt segment
->0 string TARF Unix tar archive
->0 string TEXT ASCII
->0 string TIFF TIFF image
->0 string TOVF Eudora table of contents
->0 string WDBN Microsoft Word word processor
->0 string WORD MacWrite word processor
->0 string XLS\ Microsoft Excel
->0 string ZIVM compress (.Z)
->0 string ZSYS Pre-System 7 system file
->0 string acf3 Aldus FreeHand
->0 string cdev control panel
->0 string dfil Desk Accessory suitcase
->0 string libr library
->0 string nX^d WriteNow word processor
->0 string nX^w WriteNow dictionary
->0 string rsrc resource
->0 string scbk Scrapbook
->0 string shlb shared library
->0 string ttro SimpleText read-only
->0 string zsys system file
-
-# additional types added in Dec 2017
->0 string BINA binary file
->0 string BMPp BMP image
->0 string JPEG JPEG image
-#>0 string W4BN Microsoft Word x.y word processor?
-# if type name is not known display 4 character identifier
->0 default x
->>0 string x '%4.4s'
-
-#>69 string x \b, creator "%4.4s"
-
-# Now Apple has no repository of registered Creator IDs any more. These are
-# just the ones that I happened to have files from and was able to identify.
-
-# display information about apple creator
-0 name apple-creator
->0 string 8BIM Adobe Photoshop
->0 string ALD3 PageMaker 3
->0 string ALD4 PageMaker 4
->0 string ALFA Alpha editor
->0 string APLS Apple Scanner
->0 string APSC Apple Scanner
->0 string BRKL Brickles
->0 string BTFT BitFont
->0 string CCL2 Common Lisp 2
->0 string CCL\ Common Lisp
->0 string CDmo The Talking Moose
->0 string CPCT Compact Pro
->0 string CSOm Eudora
->0 string DMOV Font/DA Mover
->0 string DSIM DigSim
->0 string EDIT Macintosh Edit
->0 string ERIK Macintosh Finder
->0 string EXTR self-extracting archive
->0 string Gzip GNU gzip
->0 string KAHL Think C
->0 string LWFU LaserWriter Utility
->0 string LZIV compress
->0 string MACA MacWrite
->0 string MACS Macintosh operating system
->0 string MAcK MacKnowledge terminal emulator
->0 string MLND Defender
->0 string MPNT MacPaint
->0 string MSBB Microsoft BASIC (binary)
->0 string MSWD Microsoft Word
->0 string NCSA NCSA Telnet
->0 string PJMM Think Pascal
->0 string PSAL Hunt the Wumpus
-#>0 string PSI2 Apple File Exchange
->0 string R*ch BBEdit
->0 string RMKR Resource Maker
->0 string RSED Resource Editor
->0 string Rich BBEdit
->0 string SIT! StuffIt
->0 string SPNT SuperPaint
->0 string Unix NeXT Mac filesystem
->0 string VIM! Vim editor
->0 string WILD HyperCard
->0 string XCEL Microsoft Excel
->0 string aCa2 Fontographer
->0 string aca3 Aldus FreeHand
->0 string dosa Macintosh MS-DOS file system
->0 string movr Font/DA Mover
->0 string nX^n WriteNow
->0 string pdos Apple ProDOS file system
->0 string scbk Scrapbook
->0 string ttxt SimpleText
->0 string ufox Foreign File Access
-# additional creators added in Dec 2017
-# Claris/Apple Works
->0 string BOBO Apple Works
-# CU-SeeMe_0.87b3_(68K).bin
-#>0 string CUce bar
->0 string PSPT Apple File Exchange
-# Disk_Copy_4.2.sea.bin
-#>0 string NCse foo
-# probably StuffIt/Aladdin by Smith Micro Software, Inc.
->0 string STi0 stuffit
-# MacGzip-1.1.3.sea.bin
-#>0 string aust bar
-# D-Disk_Copy_6.3.3.smi.bin
->0 string oneb Disk Copy Self Mounting
-# if creator name is not known display 4 character identifier
->0 default x
->>0 string x '%4.4s'
-
-# sas magic from Bruce Foster (bef@nwu.edu)
-#
-#0 string SAS SAS
-#>8 string x %s
-0 string SAS SAS
->24 string DATA data file
->24 string CATALOG catalog
->24 string INDEX data file index
->24 string VIEW data view
-# sas 7+ magic from Reinhold Koch (reinhold.koch@roche.com)
-#
-0x54 string SAS SAS 7+
->0x9C string DATA data file
->0x9C string CATALOG catalog
->0x9C string INDEX data file index
->0x9C string VIEW data view
-
-# spss magic for SPSS system and portable files,
-# from Bruce Foster (bef@nwu.edu).
-
-0 long 0xc1e2c3c9 SPSS Portable File
->40 string x %s
-
-0 string $FL2 SPSS System File
->24 string x %s
-
-0 string $FL3 SPSS System File
->24 string x %s
-
-# Macintosh filesystem data
-# From "Tom N Harris" <telliamed@mac.com>
-# Fixed HFS+ and Partition map magic: Ethan Benson <erbenson@alaska.net>
-# The MacOS epoch begins on 1 Jan 1904 instead of 1 Jan 1970, so these
-# entries depend on the data arithmetic added after v.35
-# There's also some Pascal strings in here, ditto...
-
-# The boot block signature, according to IM:Files, is
-# "for HFS volumes, this field always contains the value 0x4C4B."
-# But if this is true for MFS or HFS+ volumes, I don't know.
-# Alternatively, the boot block is supposed to be zeroed if it's
-# unused, so a simply >0 should suffice.
-
-0x400 beshort 0xD2D7 Macintosh MFS data
->0 beshort 0x4C4B (bootable)
->0x40a beshort &0x8000 (locked)
->0x402 beldate-0x7C25B080 x created: %s,
->0x406 beldate-0x7C25B080 >0 last backup: %s,
->0x414 belong x block size: %d,
->0x412 beshort x number of blocks: %d,
->0x424 pstring x volume name: %s
-
-# *.hfs updated by Joerg Jenderek
-# https://en.wikipedia.org/wiki/Hierarchical_File_System
-# "BD" gives many false positives
-0x400 beshort 0x4244
-# ftp://ftp.mars.org/pub/hfs/hfsutils-3.2.6.tar.gz/hfsutils-3.2.6/libhfs/apple.h
-# first block of volume bit map (always 3)
->0x40e ubeshort 0x0003
-# maximal length of volume name is 27
->>0x424 ubyte <28 Macintosh HFS data
-!:mime application/x-apple-diskimage
-#!:apple hfsdINIT
-#!:apple MACSdisk
-# https://www.macdisk.com/macsigen.php
-#!:apple ddskdevi
-!:apple ????devi
-# https://en.wikipedia.org/wiki/Apple_Disk_Image
-!:ext hfs/dmg
->>>0 beshort 0x4C4B (bootable)
-#>>>0 beshort 0x0000 (not bootable)
->>>0x40a beshort &0x8000 (locked)
->>>0x40a beshort ^0x0100 (mounted)
->>>0x40a beshort &0x0200 (spared blocks)
->>>0x40a beshort &0x0800 (unclean)
->>>0x47C beshort 0x482B (Embedded HFS+ Volume)
-# https://www.epochconverter.com/
-# 0x7C245F00 seconds ~ 2082758400 ~ 01 Jan 2036 00:00:00 ~ 66 years to 1970
-# 0x7C25B080 seconds ~ 2082844800 ~ 02 Jan 2036 00:00:00
-# construct not working
-#>>>0x402 beldate-0x7C25B080 x created: %s,
-#>>>0x406 beldate-0x7C25B080 x last modified: %s,
-#>>>0x440 beldate-0x7C25B080 >0 last backup: %s,
-# found block sizes 200h,1200h,2800h
->>>0x414 belong x block size: %d,
->>>0x412 beshort x number of blocks: %d,
->>>0x424 pstring x volume name: %s
-
-0 name hfsplus
->&0 beshort x version %d data
->0 beshort 0x4C4B (bootable)
->0x404 belong ^0x00000100 (mounted)
->&2 belong &0x00000200 (spared blocks)
->&2 belong &0x00000800 (unclean)
->&2 belong &0x00008000 (locked)
->&6 string x last mounted by: '%.4s',
-# really, that should be treated as a belong and we print a string
-# based on the value. TN1150 only mentions '8.10' for "MacOS 8.1"
->&14 beldate-0x7C25B080 x created: %s,
-# only the creation date is local time, all other timestamps in HFS+ are UTC.
->&18 bedate-0x7C25B080 x last modified: %s,
->&22 bedate-0x7C25B080 >0 last backup: %s,
->&26 bedate-0x7C25B080 >0 last checked: %s,
->&38 belong x block size: %d,
->&42 belong x number of blocks: %d,
->&46 belong x free blocks: %d
-
-0x400 beshort 0x482B Apple HFS Plus
->&0 use hfsplus
-0x400 beshort 0x4858 Apple HFS Plus Extended
->&0 use hfsplus
-
-## AFAIK, only the signature is different
-# same as Apple Partition Map
-# GRR: This magic is too weak, it is just "TS"
-#0x200 beshort 0x5453 Apple Old Partition data
-#>0x2 beshort x block size: %d,
-#>0x230 string x first type: %s,
-#>0x210 string x name: %s,
-#>0x254 belong x number of blocks: %d,
-#>0x400 beshort 0x504D
-#>>0x430 string x second type: %s,
-#>>0x410 string x name: %s,
-#>>0x454 belong x number of blocks: %d,
-#>>0x800 beshort 0x504D
-#>>>0x830 string x third type: %s,
-#>>>0x810 string x name: %s,
-#>>>0x854 belong x number of blocks: %d,
-#>>>0xa00 beshort 0x504D
-#>>>>0xa30 string x fourth type: %s,
-#>>>>0xa10 string x name: %s,
-#>>>>0xa54 belong x number of blocks: %d
-
-# From: Remi Mommsen <mommsen@slac.stanford.edu>
-0 string BOMStore Mac OS X bill of materials (BOM) file
-
diff --git a/contrib/libs/libmagic/magic/Magdir/macos b/contrib/libs/libmagic/magic/Magdir/macos
deleted file mode 100644
index 0bacc13a48..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/macos
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: macos,v 1.1 2012/12/21 16:41:07 christos Exp $
-# MacOS files
-#
-
-0 string book\0\0\0\0mark\0\0\0\0 MacOS Alias file
diff --git a/contrib/libs/libmagic/magic/Magdir/magic b/contrib/libs/libmagic/magic/Magdir/magic
deleted file mode 100644
index c8aa054b72..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/magic
+++ /dev/null
@@ -1,71 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: magic,v 1.11 2023/06/27 13:42:49 christos Exp $
-# magic: file(1) magic for magic files
-#
-# Update: Joerg Jenderek
-# skip Magicsee_R1.cfg found on retropie starting with # Magicsee R1 one-handed controller
-0 string/t #\ Magic\ magic text file for file(1) cmd
-#!:mime text/plain
-!:mime text/x-file
-# no suffix in ../Header
-!:ext /
-#
-# some samples start with a comment line
-0 ubyte =0x23
-# many samples start with separator line
->4 string --------
->>0 use magic-fragment
-# few samples with 1st comment line and without seperator comment line
->4 default x
-# few sample with 1st comment line and without seperator comment line and regular expression like: sisu
->>1 search/112 regex\x09
->>>0 use magic-fragment
->>1 default x
-# few samples with 1st comment line and without seperator comment line and string value like:
-# blcr bsi selinux ssh (file 3.34) digital gnu wordperfect
->>>1 search/471 string\x09
->>>>0 use magic-fragment
->>>1 default x
-# few samples with 1st comment line and without seperator comment line and short value like:
-# (file 3.34) os9 osf1
->>>>1 search/1716 short\x09
->>>>>0 use magic-fragment
-# but many samples start with an empty first line
-0 ubyte =0x0A
-# many samples sttart with separator comment line
->4 string --------
->>0 use magic-fragment
-# few samples with 1st empty line and without seperator comment line like: biosig espressif
->4 default x
->>1 search/581 \041:mime
->>>0 use magic-fragment
-# display information (lines) about magic text fragment
-0 name magic-fragment
->0 string x magic text fragment for file(1) cmd
-!:mime text/x-file
-# most without suffix but mail.news varied.out varied.script
-!:ext /news/out/script
-# next lines are mainly for control reasons
-# some (34/339) samples start comment line
->0 ubyte !0x0A
->>0 string x \b, 1st line "%s"
->>>&1 string x \b, 2nd line "%s"
-# but most (305/339) samples start with an empty first line
->0 ubyte =0x0A
->>1 string x \b, 2nd line "%s"
->>>&1 string x \b, 3rd line "%s"
-#
-# URL: http://en.wikipedia.org/wiki/File_(command)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/m/mgc.trid.xml
-# Note: called "magic compiled data (LE)" by TrID
-0 lelong 0xF11E041C magic binary file for file(1) cmd
-#!:mime application/octet-stream
-!:mime application/x-file
-!:ext mgc
->4 lelong x (version %d) (little endian)
-0 belong 0xF11E041C magic binary file for file(1) cmd
-#!:mime application/octet-stream
-!:mime application/x-file
-!:ext mgc
->4 belong x (version %d) (big endian)
diff --git a/contrib/libs/libmagic/magic/Magdir/mail.news b/contrib/libs/libmagic/magic/Magdir/mail.news
deleted file mode 100644
index 3ca3b405f6..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mail.news
+++ /dev/null
@@ -1,132 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: mail.news,v 1.30 2022/10/31 13:22:26 christos Exp $
-# mail.news: file(1) magic for mail and news
-#
-# Unfortunately, saved netnews also has From line added in some news software.
-#0 string From mail text
-0 string/t Relay-Version: old news text
-!:mime message/rfc822
-0 string/t #!\ rnews batched news text
-!:mime message/rfc822
-0 string/t N#!\ rnews mailed, batched news text
-!:mime message/rfc822
-0 string/t Forward\ to mail forwarding text
-!:mime message/rfc822
-0 string/t Pipe\ to mail piping text
-!:mime message/rfc822
-0 string/tc delivered-to: SMTP mail text
-!:mime message/rfc822
-0 string/tc return-path: SMTP mail text
-!:mime message/rfc822
-0 string/t Path: news text
-!:mime message/news
-0 string/t Xref: news text
-!:mime message/news
-0 string/t From: news or mail text
-!:mime message/rfc822
-0 string/t Date: news or mail text
-!:mime message/rfc822
-0 string/t Article saved news text
-!:mime message/news
-# Reference: http://quimby.gnus.org/notes/BABYL
-# Update: Joerg Jenderek
-# Note: used by Rmail in Emacs version 22 and before
-# is not text because of characters like Control-L Control-_
-0 string/b BABYL\ OPTIONS: Emacs RMAIL
-#0 string/t BABYL Emacs RMAIL text
-# https://reposcope.com/mimetype/message/x-gnu-rmail
-!:mime message/x-gnu-rmail
-# ~/RMAIL
-!:ext /
-0 string/t Received: RFC 822 mail text
-!:mime message/rfc822
-0 string/t MIME-Version: MIME entity text
-#0 string/t Content- MIME entity text
-
-# TNEF files...
-# URL: http://fileformats.archiveteam.org/wiki/Transport_Neutral_Encapsulation_Format
-# https://en.wikipedia.org/wiki/Transport_Neutral_Encapsulation_Format
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/t/tnef.trid.xml
-# https://interoperability.blob.core.windows.net/files/MS-OXTNEF/%5bMS-OXTNEF%5d-210817.pdf
-# Update: Joerg Jenderek
-# Note: moved and merged from ./msdos (version 1.154) there just called "TNEF"
-# partly verified by `tnef --list -v -f voice.tnef` and `ytnef -v triples.tnef`
-# TNEF magic From "Joomy" <joomy@se-ed.net>
-# TNEF_SIGNATURE
-0 lelong 0x223E9F78 Transport Neutral Encapsulation Format (TNEF)
-!:mime application/vnd.ms-tnef
-# winmail.dat or win.dat by Microsoft Outlook
-!:ext tnef/dat
-# https://docs.microsoft.com/en-us/openspecs/exchange_server_protocols/ms-oxtnef/7fdb64ee-7f63-4d95-9af1-c672e7475c3a
-# LegacyKey
-#>4 uleshort x \b, key %#4.4x
-# attrLevelMessage; Level where attribute applies like: 1~attrLevelMessage 2~attrLevelAttachment
->6 ubyte !1 \b, 1st level %#2.2x
-# other ID (like 02900000h) or TnefVersion ID (idTnefVersion=06900800h)
->7 ubelong !0x06900800 \b, 1st id %#8.8x
->7 ubelong =0x06900800
-# TnefVersion length like: 4
->>11 ulelong !4 \b, TnefVersion length %x
-# TNEFVersionData; TnefVersion data like: 00010000h
->>15 ulelong !0x00010000h \b, version %#8.8x
-# Checksum like: 1
->>19 uleshort !1 \b, checksum %#4.4x
-# attrLevelMessage; level of attOemCodepage like: 1
->>21 ubyte !1 \b, level %#2.2x
-# idOEMCodePage; OEMCodePage ID like: 07900600h
->>22 ubelong =0x07900600 \b, OEM codepage
-# OEMCodePage length like: 8
->>>26 ulelong =8
-# OEMCodePageData; PrimaryCodePage like: 1251 1252
->>>>30 ulelong x %u
-# OEMCodePageData; SecondaryCodePage; unused and SHOULD contain zero
->>>>34 ulelong !0 and %u
-# OEMCodePageData Checksum like: E7h E8h
->>>>38 uleshort x (checksum %#x)
-# attrLevelMessage of attMessageClass like: 1
->>40 ubyte !1 \b, level %u
-# idMessageClass; ID of attMessageClass like: 08800700h
->>41 ubelong =0x08800700 \b, MessageAttribute
-# attMessageClass length like: 16 24 25
-#>>>45 ulelong x (length %u)
-# attMessageClass data like: "IPM.Microsoft Mail.Note" "IPM.Note.Portada Newseum"
-# "IPM.Appointment" "IPM.Note.Microsoft.Voicemail.UM.CA"
->>>45 pstring/l x "%s"
-
-# From: Kevin Sullivan <ksulliva@psc.edu>
-0 string *mbx* MBX mail folder
-
-# From: Simon Matter <simon.matter@invoca.ch>
-0 string \241\002\213\015skiplist\ file\0\0\0 Cyrus skiplist DB
-0 string \241\002\213\015twoskip\ file\0\0\0\0 Cyrus twoskip DB
-
-# JAM(mbp) Fidonet message area databases
-# JHR file
-0 string JAM\0 JAM message area header file
->12 leshort >0 (%d messages)
-
-# Squish Fidonet message area databases
-# SQD file (requires at least one message in the area)
-# XXX: Weak magic
-#256 leshort 0xAFAE4453 Squish message area data file
-#>4 leshort >0 (%d messages)
-
-#0 string \<!--\ MHonArc text/html; x-type=mhonarc
-
-# Cyrus: file(1) magic for compiled Cyrus sieve scripts
-# URL: https://www.cyrusimap.org/docs/cyrus-imapd/2.4.6/internal/bytecode.php
-# URL: http://git.cyrusimap.org/cyrus-imapd/tree/sieve/bytecode.h?h=master
-# From: Philipp Hahn <hahn@univention.de>
-
-# Compiled Cyrus sieve script
-0 string CyrSBytecode Cyrus sieve bytecode data,
->12 belong =1 version 1, big-endian
->12 lelong =1 version 1, little-endian
->12 belong x version %d, network-endian
-
-# Dovecot mail server, version 2.2 and later.
-# Dovecot mailing list: dovecot@dovecot.org
-# File format spec: https://wiki.dovecot.org/Design/Dcrypt/#File_format
-# From: Stephen Gildea
-0 string CRYPTED\003\007 Dovecot encrypted message
->9 byte x \b, dcrypt version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/make b/contrib/libs/libmagic/magic/Magdir/make
deleted file mode 100644
index 1abdf7a3ee..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/make
+++ /dev/null
@@ -1,21 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: make,v 1.5 2022/03/12 15:09:47 christos Exp $
-# make: file(1) magic for makefiles
-#
-# URL: https://en.wikipedia.org/wiki/Make_(software)
-0 regex/100l \^(CFLAGS|VPATH|LDFLAGS|all:|\\.PRECIOUS) makefile script text
-!:mime text/x-makefile
-!:strength -15
-# Update: Joerg Jenderek
-# Reference: https://www.freebsd.org/cgi/man.cgi?make(1)
-# exclude grub-core\lib\libgcrypt\mpi\Makefile.am with "#BEGIN_ASM_LIST"
-# by additional escaping point character
-# exclude MS Windows help file CoNtenT with ":include FOOBAR.CNT"
-# and NSIS script with "!include" by additional escaping point character
-0 regex/100l \^\\.(BEGIN|endif|include) BSD makefile script text
-!:mime text/x-makefile
-!:ext /mk
-!:strength -10
-0 regex/100l \^SUBDIRS[[:space:]]+= automake makefile script text
-!:mime text/x-makefile
-!:strength -15
diff --git a/contrib/libs/libmagic/magic/Magdir/map b/contrib/libs/libmagic/magic/Magdir/map
deleted file mode 100644
index 2d56df0156..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/map
+++ /dev/null
@@ -1,413 +0,0 @@
-
-
-#------------------------------------------------------------------------------
-# $File: map,v 1.10 2023/02/03 20:41:57 christos Exp $
-# map: file(1) magic for Map data
-#
-
-# Garmin .FIT files https://pub.ks-and-ks.ne.jp/cycling/edge500_fit.shtml
-8 string .FIT FIT Map data
->15 byte 0
->>35 belong x \b, unit id %d
->>39 lelong x \b, serial %u
-# https://pub.ks-and-ks.ne.jp/cycling/edge500_fit.shtml
-# 20 years after unix epoch
-# TZ=GMT date -d '1989-12-31 0:00' +%s
->>43 leldate+631065600 x \b, %s
-
->>47 leshort x \b, manufacturer %d
->>47 leshort 1 \b (garmin)
->>49 leshort x \b, product %d
->>53 byte x \b, type %d
->>53 byte 1 \b (Device)
->>53 byte 2 \b (Settings)
->>53 byte 3 \b (Sports/Cycling)
->>53 byte 4 \b (Activity)
->>53 byte 8 \b (Elevations)
->>53 byte 10 \b (Totals)
-
-# Summary: Garmin map
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Garmin_.img
-# Reference: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/IMG_File_Format
-# sourceforge.net/projects/garmin-img/files/IMG%20File%20Format/1.0/imgformat-1.0.pdf
-# GRR: similar to MBR boot sector handled by ./filesystems
-0x1FE leshort =0xAA55
-# look for valid map signature
->0x13 string =IMG\0
->>0 use garmin-map
-0 name garmin-map
->0 ubyte x Garmin
-!:mime application/x-garmin-map
-# If non-zero, every byte of the entire .img file is to be XORed with this value
->0 ubyte !0 \b, %#x XORed
-# goto block before FAT
->(0x40.b*512) ubyte x
-# 1st fat name "DLLINFO TXT" only found for vpm
->>&512 string =DLLINFO\ TXT map (Voice Processing)
-# there exist 2 other Garmin VPM formats; see ./audio
-!:ext vpm
-# Deutsch__Yannick_D4481-00_0210.vpm
-#>>>512 search/0x0116da60/s RIFF \b; with
-# determine type voice type by ./riff
-#>>>>&0 indirect x \b
->>&512 string !DLLINFO\ TXT map
-!:ext img
-# 9 zeros
->1 ubelong !0 \b, zeroes %#x
-# Map's version major
->8 ubyte x v%u
-# Map's version minor
->9 ubyte x \b.%.2u
-# Map description[20], 0x20 padded
->0x49 string x %.20s
-# Map name, continued (0x20 padded, \0 terminated)
->0x65 string >\ \b%.31s
-# Update year (+1900 for val >= 0x63, +2000 for val <= 0x62)
->0xB ubyte x \b, updated
->>0xB ubyte >0x62
->>>0xB ubyte-100 x 20%.2u
->>0xB ubyte <0x63
->>>0xB ubyte x 20%.2u
-# Update month (0-11)
->0xA ubyte x \b-%.2u
-# All zeroes
->0xc uleshort !0 \b, zeroes %#x
-# Mapsource flag, 1 - file created by Mapsource, 0 - Garmin map visible in Basecamp and Homeport
-#>0xE ubyte !0 \b, Mapsource flag %#x
->0xE ubyte 1 \b, Mapsource
-# Checksum, sum of all bytes modulo 256 should be 0
-#>0xF ubyte x \b, Checksum %#x
-# Signature: DSKIMG 0x00 or DSDIMG 0x00 for demo map
->0x10 string !DSKIMG \b, signature "%.7s"
->0x39 use garmin-date
-# Map file identifier like GARMIN\0
->0x41 string !GARMIN \b, id "%.7s"
-# Block size exponent, E1; appears to always be 0x09; minimum block size 512 bytes
->0x61 ubyte !0x09 \b, E1=%u
-# Block size exponent, E2 ; file blocksize=2**(E1+E2)
->>0x62 ubyte x \b, E2=%u
->0x61 ubyte =0x09 \b, blocksize
->>0x62 ubyte 0 512
->>0x62 ubyte 1 1024
->>0x62 ubyte 2 2048
->>0x62 ubyte 3 4096
->>0x62 ubyte 4 8192
->>0x62 ubyte 5 16384
->>0x62 default x
->>>0x62 ubyte x E2=%u
-# MBR signature
->0x1FE leshort !0xAA55 \b, invalid MBR
-# 512 zeros
->0x200 uquad !0 \b, zeroes %#llx
-# First sub-file offset (absolute); sometimes NO/UNKNOWN sub file!
->0x40C ulelong >0 \b, at %#x
-# sub-file Header length
-#>>(0x40C.l) uleshort x \b, header len %#x
->>(0x40C.l) uleshort x %u bytes
-# sub-file Type[10] like "GARMIN RGN" "GARMIN TRE", "GARMIN TYP", etc.
->>(0x40C.l+2) ubyte >0x1F
->>>(0x40C.l+2) ubyte <0xFF
->>>>(0x40C.l+2) string x "%.10s"
-# 0x00 for most maps, 0x80 for locked maps (City Nav, City Select, etc.)
->>>>(0x40C.l+13) ubyte >0 \b, locked %#x
-# Block sequence numbers like 0000 0100 0200 ... FFFF
-# >0x420 ubequad >0 \b, seq. %#16.16llx
-# >>0x428 ubequad >0 \b%16.16llx
-# >>>0x430 ubequad >0 \b%16.16llx
-# >>>>0x438 ubequad >0 \b%16.16llx
-# >>>>>0x440 ubequad >0 \b%16.16llx
-# >>>>>>0x448 ubequad >0 \b%16.16llx
-# >>>>>>>0x450 ubequad >0 \b%16.16llx
-# >>>>>>>>0x458 ubequad >0 \b%16.16llx
-# >>>>>>>>>0x460 ubequad >0 \b%16.16llx
-# >>>>>>>>>>0x468 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>0x470 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>0x478 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>>0x480 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>>>0x488 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>>>>0x490 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>>>>>0x498 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>>>>>>0x4A0 ubequad >0 \b%16.16llx
-# >>>>>>>>>>>>>>>>>>0x4A8 ubequad >0 \b%16.16llx
-# look for end of FAT
-#>>0x420 search/512/s \xff\xff FAT END
-# Physical block number of FAT header
-#>0x40 ubyte x \b, FAT at phy. block %u
->0x40 ubyte x
->>(0x40.b*512) ubyte x
-# 1st FAT block
->>>&511 use garmin-fat
-# 2nd FAT block
->>>&1023 use garmin-fat
-# 3th FAT block
->>>&1535 use garmin-fat
-# 4th FAT block
->>>&2047 use garmin-fat
-# ... xth FAT block
-#
-# 314 zeros but not in vpm and also gmaptz.img
->0x84 uquad !0 \b, at 0x84 %#llx
-# display FileAllocationTable block entry in garmin map
-0 name garmin-fat
->0 ubyte x \b;
-# sub file part; 0x0003 seems to be garbage
->0x10 uleshort !0 next %#4.4x
->0x10 uleshort =0
-# fat flag 0~dummy block 1~true sub file
->>0 ubyte !1 flag %u
->>0 ubyte =1
-# sub-file name like MAKEGMAP 12345678
->>>0x1 string x %.8s
-# sub-file typ like RGN TRE MDR LBL
->>>0x9 string x \b.%.3s
-# size of sub file
->>>0xC ulelong x \b, %u bytes
-# 32-bit block sequence numbers
-#>>>0x20 ubequad x \b, seq. %#16.16llx
-
-# display date stored inside Garmin maps like yyyy-mm-dd h:mm:ss
-0 name garmin-date
-# year like 2018
->0 uleshort x \b, created %u
-# month (0-11)
->2 ubyte x \b-%.2u
-# day (1-31)
->3 ubyte x \b-%.2u
-# hour (0-23)
->4 ubyte x %u
-# minute (0-59)
->5 ubyte x \b:%.2u
-# second (0-59)
->6 ubyte x \b:%.2u
-
-# Summary: Garmin Map subfiles
-# From: Joerg Jenderek
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/IMG_File_Format
-# Garmin Common Header
-2 string GARMIN\
-# skip ASCII text by checking for low header length
->0 uleshort <0x1000 Garmin map,
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/GMP_Subfile_Format
->>9 string GMP subtile
-!:mime application/x-garmin-gpm
-!:ext gmp
-# copyright message
->>>(0.s) string x %s
->>>0x0E use garmin-date
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/MDR_Subfile_Format
-# This contains the searchable address table used for finding routing destinations
->>9 string MDR address table
-!:mime application/x-garmin-mdr
-!:ext mdr
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/NOD_Subfile_Format
-# http://svn.parabola.me.uk/display/trunk/doc/nod.txt
-# This contains the routing information
->>9 string NOD routing
-!:mime application/x-garmin-nod
-!:ext nod
->>>0x0E use garmin-date
-#>>>0x15 ulelong x \b, at %#x
-#>>>0x19 ulelong x %#x bytes NOD1
-#>>>0x25 ulelong x \b, at %#x
-#>>>0x29 ulelong x %#x bytes NOD2
-#>>>0x31 ulelong x \b, at %#x
-#>>>0x35 ulelong x %#x bytes NOD3
-# URL: http://www.pinns.co.uk/osm/net.html
-# routable highways (length, direction, allowed speed,house address information)
->>9 string NET highways
-!:mime application/x-garmin-net
-!:ext net
-#>>>0x15 ulelong x \b, at %#x
-#>>>0x19 ulelong x %#x bytes NET1
-#>>>0x22 ulelong >0
-#>>>>0x1E ulelong x \b, at %#x
-#>>>>0x22 ulelong x %#x bytes NET2
-#>>>0x2B ulelong >0
-#>>>>0x27 ulelong x \b, at %#x
-#>>>>0x2B ulelong x %#x bytes NET3
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/LBL_Subfile_Format
->>9 string LBL labels
-!:mime application/x-garmin-lbl
-!:ext lbl
->>>(0.s) string x %s
-# Label coding type 6h 9h and ah
->>>0x1E ubyte x \b, coding type %#x
-#>>>0x15 ulelong x \b, at %#x
-#>>>0x19 ulelong x %#x bytes LBL1
-#>>>0x1F ulelong x \b, at %#x
-#>>>0x23 ulelong x %#x bytes LBL2
-#>>>0x2D ulelong x \b, at %#x
-#>>>0x31 ulelong x %#x bytes LBL3
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/SRT_Subfile_Format
-# A lookup table of the chars in the map's codepage, and their collating sequence
->>9 string SRT sort table
-!:mime application/x-garmin-srt
-!:ext srt
->>>0x0E use garmin-date
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/TRE_Subfile_Format
->>9 string TRE tree
-!:mime application/x-garmin-tre
-!:ext tre
-# title like City Nav Europe NTU 2019.2 Basemap
-# or OSM Street map
->>>(0.s) string x %s
-# 2nd title like Copyright 1995-2018 by GARMIN Corporation.
-# or http://www.openstreetmap.org/
->>>>&1 string x %s
->>>0x0E use garmin-date
-#>>>0x21 ulelong x \b, at %#x
-#>>>0x25 ulelong x %#x bytes TRE1
-#>>>0x29 ulelong x \b, at %#x
-#>>>0x2D ulelong x %#x bytes TRE2
-#>>>0x31 ulelong x \b, at %#x
-#>>>0x35 ulelong x %#x bytes TRE3
-# Copyright record size
-#>>>0x39 uleshort x \b, copyright record size %u
-# Map ID
->>>0x74 ulelong x \b, ID %#x
-# URL: https://www.gpspower.net/garmin-tutorials/353310-basecamp-installing-free-desktop-map.html
-# For road traffic information service (RDS/TMS/TMC). Commonly seen in City Navigator maps
->>9 string TRF traffic,
-!:mime application/x-garmin-trf
-!:ext trf
-# city/region like Preitenegg
->>>(0.s+1) string x 1st %s
-# highway part like L606/L148
->>>>&1 string x %s
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/Format
-# Reference: http://www.pinns.co.uk/osm/typformat.html
-# customize the appearance of objects. For GPS and MapSource/Qlandkarte better looking maps
->>9 string TYP types
-!:mime application/x-garmin-typ
-!:ext typ
->>>0x0E use garmin-date
-# character set 1252 65001~UTF8
->>>0x15 uleshort x \b, code page %u
-# POIs
-#>>>0x17 ulelong x \b, at %#x
-#>>>0x1B ulelong x %#x bytes TYP1
-# extra pois
-#>>>0x5B ulelong x \b, at %#x
-#>>>0x5F ulelong x %#x bytes TYP8
-# URL: https://wiki.openstreetmap.org/wiki/OSM_Map_On_Garmin/RGN_Subfile_Format
-# http://www.pinns.co.uk/osm/RGN.html
-# region data used by the Garmin software
->>9 string RGN region
-!:mime application/x-garmin-rgn
-!:ext rgn
-# POIs,Indexed POIs,Polylines or Polygons or first map level
-#>>>0x15 ulelong x \b, at %#x
-#>>>0x19 ulelong x %#x bytes RGN1
-# polygons with extended types
-#>>>0x21 ulelong >0
-#>>>>0x1D ulelong x \b, at %#x
-#>>>>0x21 ulelong x %#x bytes RGN2
-# polylines with extended types
-#>>>0x3D ulelong >0
-#>>>>0x39 ulelong x \b, at %#x
-#>>>>0x3D ulelong x %#x bytes RGN3
-# extended POIs
-#>>>0x59 ulelong >0
-#>>>>0x55 ulelong x \b, at %#x
-#>>>>0x59 ulelong x %#x bytes RGN3
-#>>9 default x unknown map type
-# Header length; GMP:31h 35h 3Dh,MDR:11Eh 238h 2C4h 310h,NOD:3Fh 7Fh,NET:64h,
-# LBL:2A9h,SRT:1Dh 25h 27h,TRE:CFh 135h,TRF:5Ah,TYP:5Bh 6Eh 7Ch AEh,RGN:7Dh
->>0 uleshort x \b, header length %#x
-
-# URL: https://www.memotech.franken.de/FileFormats/
-# Reference: https://www.memotech.franken.de/FileFormats/Garmin_RGN_Format.pdf
-# From: Joerg Jenderek
-0 string KpGr Garmin update
-# format version like: 0064h~1.0
->0x4 uleshort !0x0064
->>4 uleshort/100 x \b, version %u
->>4 uleshort%100 x \b.%u
-# 1st Garmin entry
->6 use garmin-entry
-# 2nd Garmin entry
->(0x6.l+10) ubyte x
->>&0 use garmin-entry
-# 3rd entry
->(0x6.l+10) ubyte x
->>&(&0.l+4) ubyte x
->>>&0 use garmin-entry
-# look again at version to use default clause
->0x4 uleshort x
-# test for region content by looking for
-# Garmin *.srf by ./images with normal builder name "SQA" or longer "hales"
-# 1 space after equal sign
->>0x3a search/5/s GARMIN\ BITMAP \b=
-!:mime image/x-garmin-exe
-!:ext exe
->>>&0 indirect x
-# if not bitmap *.srf then region; 1 space after equal sign
->>0x3a default x \b=
-!:mime application/x-garmin-rgn
-!:ext rgn
-# recursiv embedded
->>>0x3a search/5/s KpGrd
->>>>&0 indirect x
-# look for ZIP or JAR archive by ./archive and ./zip
->>>0x3a search/5/s PK\003\004
->>>>&0 indirect x
-# TODO: other garmin RGN record content like foo
-#>>0x3a search/5/s bar BAR
-# display information of Garmin RGN record
-0 name garmin-entry
-# record length: 2 for Data, for Application often 1Bh sometimes 1Dh, "big" for Region
-#>0 ulelong x \b, length %#x
-# data record (ID='D') with version content like 0064h~1.0
->4 ubyte =0x44
->>5 uleshort !0x0064 \b; Data
->>>5 uleshort/100 x \b, version %u
->>>5 uleshort%100 x \b.%u
-# Application Record (ID='A')
->4 ubyte =0x41 \b; App
-# version content like 00c8h~2.0
->>5 uleshort !0x00C8
->>>5 uleshort/100 x \b, version %u
->>>5 uleshort%100 x \b.%u
-# builder name like: SQA sqa build hales
->>7 string x \b, build by %s
-# build date like: Oct 25 1999, Oct 1 2008, Feb 23 2009, Dec 15 2009
->>>&1 string x %s
-# build time like: 11:26:12, 11:45:54, 14:16:13, 18:23:01
->>>>&1 string x %s
-# region record (ID='R')
->4 ubyte =0x52 \b; Region
-# region ID:14~fw_all.bin: 78~ZIP, RGN or SRF bitmap; 148~ZIP or JAR; 249~display firmware; 251~WiFi or GCD firmware; 255~ZIP
->>5 uleshort x ID=%u
-# delay in ms: like 0, 500
->>7 ulelong !0 \b, %u ms
-# region size (is record length - 10)
-#>>11 ulelong x \b, length %#x
-# region content like:
-# "KpGr"~recursiv embedded,"GARMIN BITMAP"~Garmin Bitmap *.srf, "PK"~ZIP archive
-#>>15 string x \b, content "%s"
->>15 ubequad x \b, content %#llx...
-# This does NOT WORK!
-#>>15 indirect x \b; contains
->4 default x \b; other
-# garmin Record ID Identifies the record content like: D A R
->>4 ubyte x ID '%c'
-
-# TOM TOM GPS watches ttbin files:
-# https://github.com/ryanbinns/ttwatch/tree/master/ttbin
-# From: Daniel Lenski
-0 byte 0x20
->1 leshort 0x0007
->>0x76 byte 0x20
->>>0x77 leshort 0x0075 TomTom activity file, v7
->>>>8 leldate x (%s,
->>>>3 byte x device firmware %d.
->>>>4 byte x \b%d.
->>>>5 byte x \b%d,
->>>>6 leshort x product ID %04d)
-
-# Garmin firmware:
-# https://www.memotech.franken.de/FileFormats/Garmin_GCD_Format.pdf
-# https://www.gpsrchive.com/GPSMAP/GPSMAP%2066sr/Firmware.html
-0 string GARMIN
->6 uleshort 100 GARMIN firmware (version 1.0)
diff --git a/contrib/libs/libmagic/magic/Magdir/maple b/contrib/libs/libmagic/magic/Magdir/maple
deleted file mode 100644
index 80cf9f29a1..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/maple
+++ /dev/null
@@ -1,109 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: maple,v 1.10 2021/08/30 13:31:25 christos Exp $
-# maple: file(1) magic for maple files
-# "H. Nanosecond" <aldomel@ix.netcom.com>
-# Maple V release 4, a multi-purpose math program
-#
-
-# maple library .lib
-# URL: https://en.wikipedia.org/wiki/Maple_(software)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/l/lib-maple-v-r4.trid.xml
-# Update: Joerg Jenderek
-0 string \000MVR4\nI Maple Vr4 library
-#!:mime application/octet-stream
-!:mime application/x-maple-lib
-!:ext lib
-
-# URL: https://en.wikipedia.org/wiki/Maple_(software)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/l/lib-maple-v-r5.trid.xml
-# From: Joerg Jenderek
-0 string \000MVR5\n Maple Vr5 library
-#!:mime application/octet-stream
-!:mime application/x-maple-lib
-!:ext lib
-
-# From: Joerg Jenderek
-0x400 string M7R0\nI Maple Vr7 library
-#!:mime application/octet-stream
-!:mime application/x-maple-lib
-!:ext lib
-# null terminated library name like: C:\Maple12/Cliffordlib\maple.lib ../Maplets/Tutors.lib
->5 string x %s
-# probably library name padding with nil or points (0x2E)
-#>0xF8 uquad x \b, PADDING 0x%16.16llx
-# null terminated strings like: Exterior Clifford FunctionArithmetics
-# like: 1 20 40
->0x115 ulelong x \b, %u string
-# plural s
->0x115 ulelong >1 \bs
->0x119 string x 1st '%s'
-# probably second name section padding with nil or points (0x2E)
-#>0x3F0 uquad x \b, 2nd PADDING 0x%16.16llx
-# line feed separated ASCII string with maximal 79 length
-#>0x407 string x \b, section "%s"
->0x454 ubyte !0x0a \b, at 0x454 0x%x
-
-# .ind
-# no magic for these :-(
-# they are compiled indexes for maple files
-
-# .hdb
-# Update: Joerg Jenderek
-# URL: https://www.maplesoft.com/support/help/maple/view.aspx?path=Formats/HDB
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/h/hdb-maple.trid.xml
-# Note: This format was replaced in Maple 18 by the Maple Help format (*.help)
-0 string \000\004\000\000
-# skip xBASE Compound Index file *.CDX by looking for version
->1028 string version Maple help database
-# length of string version
-#>>1024 ulelong !7 \b, at 0x400 unexpected %u
-#!:mime application/octet-stream
-!:mime application/x-maple-hdb
-!:ext hdb
->1028 default x
-# skip more xBASE Compound Index file *.CDX by looking for keyword Maple
-# like hsum.hdb
->>4 search/0xCC41 Maple Maple help database
-!:mime application/x-maple-hdb
-!:ext hdb
-
-# .mhp
-# this has the form <PACKAGE=name>
-0 string \<PACKAGE= Maple help file
-0 string \<HELP\ NAME= Maple help file
-0 string \n\<HELP\ NAME= Maple help file with extra carriage return at start (yuck)
-#0 string #\ Newton Maple help file, old style
-0 string #\ daub Maple help file, old style
-#0 string #=========== Maple help file, old style
-
-# .mws
-0 string \000\000\001\044\000\221 Maple worksheet
-#this is anomalous
-0 string WriteNow\000\002\000\001\000\000\000\000\100\000\000\000\000\000 Maple worksheet, but weird
-# this has the form {VERSION 2 3 "IBM INTEL NT" "2.3" }\n
-# that is {VERSION major_version miunor_version computer_type version_string}
-0 string {VERSION\ Maple worksheet
->9 string >\0 version %.1s.
->>11 string >\0 %.1s
-
-# .mps
-0 string \0\0\001$ Maple something
-# from byte 4 it is either 'nul E' or 'soh R'
-# I think 'nul E' means a file that was saved as a different name
-# a sort of revision marking
-# 'soh R' means new
->4 string \000\105 An old revision
->4 string \001\122 The latest save
-
-# .mpl
-# some of these are the same as .mps above
-#0000000 000 000 001 044 000 105 same as .mps
-#0000000 000 000 001 044 001 122 same as .mps
-
-0 string #\n##\ <SHAREFILE= Maple something
-0 string \n#\n##\ <SHAREFILE= Maple something
-0 string ##\ <SHAREFILE= Maple something
-0 string #\r##\ <SHAREFILE= Maple something
-0 string \r#\r##\ <SHAREFILE= Maple something
-0 string #\ \r##\ <DESCRIBE> Maple something anomalous.
diff --git a/contrib/libs/libmagic/magic/Magdir/marc21 b/contrib/libs/libmagic/magic/Magdir/marc21
deleted file mode 100644
index bb4998ec04..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/marc21
+++ /dev/null
@@ -1,30 +0,0 @@
-#--------------------------------------------
-# marc21: file(1) magic for MARC 21 Format
-#
-# Kevin Ford (kefo@loc.gov)
-#
-# MARC21 formats are for the representation and communication
-# of bibliographic and related information in machine-readable
-# form. For more info, see https://www.loc.gov/marc/
-
-
-# leader position 20-21 must be 45
-# and 22-23 also 00 so far, but we check that later.
-20 string 45
->0 search/2048 \x1e
-
-# leader starts with 5 digits, followed by codes specific to MARC format
->>0 regex/1l (^[0-9]{5})[acdnp][^bhlnqsu-z] MARC21 Bibliographic
-!:mime application/marc
->>0 regex/1l (^[0-9]{5})[acdnosx][z] MARC21 Authority
-!:mime application/marc
->>0 regex/1l (^[0-9]{5})[cdn][uvxy] MARC21 Holdings
-!:mime application/marc
->>0 regex/1l (^[0-9]{5})[acdn][w] MARC21 Classification
-!:mime application/marc
->>0 regex/1l (^[0-9]{5})[cdn][q] MARC21 Community
-!:mime application/marc
-
-# leader position 22-23, should be "00" but is it?
->>0 regex/1l (^.{21})([^0]{2}) (non-conforming)
-!:mime application/marc
diff --git a/contrib/libs/libmagic/magic/Magdir/mathcad b/contrib/libs/libmagic/magic/Magdir/mathcad
deleted file mode 100644
index b186641f7d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mathcad
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mathcad,v 1.5 2009/09/19 16:28:10 christos Exp $
-# mathcad: file(1) magic for Mathcad documents
-# URL: http://www.mathsoft.com/
-# From: Josh Triplett <josh@freedesktop.org>
-
-0 string .MCAD\t Mathcad document
diff --git a/contrib/libs/libmagic/magic/Magdir/mathematica b/contrib/libs/libmagic/magic/Magdir/mathematica
deleted file mode 100644
index dda71e884e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mathematica
+++ /dev/null
@@ -1,192 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mathematica,v 1.17 2023/06/16 19:33:58 christos Exp $
-# mathematica: file(1) magic for mathematica files
-# "H. Nanosecond" <aldomel@ix.netcom.com>
-# Mathematica a multi-purpose math program
-# versions 2.2 and 3.0
-
-0 name wolfram
->0 string x Mathematica notebook version 2.x
-!:ext mb
-!:mime application/vnd.wolfram.mathematica
-
-#mathematica .mb
-0 string \064\024\012\000\035\000\000\000
->0 use wolfram
-0 string \064\024\011\000\035\000\000\000
->0 use wolfram
-
-#
-0 search/1000 Content-type:\040application/mathematica Mathematica notebook version 2.x
-!:ext nb
-!:mime application/mathematica
-
-
-# .ma
-# multiple possibilities:
-
-0 string (*^\n\n::[\011frontEndVersion\ =
-#>41 string >\0 %s
->0 use wolfram
-
-#0 string (*^\n\n::[\011palette
-
-#0 string (*^\n\n::[\011Information
-#>675 string >\0 %s #doesn't work well
-
-# there may be 'cr' instead of 'nl' in some does this matter?
-
-# generic:
-0 string (*^\r\r::[\011
->0 use wolfram
-0 string (*^\r\n\r\n::[\011
->0 use wolfram
-0 string (*^\015
->0 use wolfram
-0 string (*^\n\r\n\r::[\011
->0 use wolfram
-0 string (*^\r::[\011
->0 use wolfram
-0 string (*^\r\n::[\011
->0 use wolfram
-0 string (*^\n\n::[\011
->0 use wolfram
-0 string (*^\n::[\011
->0 use wolfram
-
-
-# Mathematica .mx files
-
-#0 string (*This\ is\ a\ Mathematica\ binary\ dump\ file.\ It\ can\ be\ loaded\ with\ Get.*) Mathematica binary file
-0 string (*This\ is\ a\ Mathematica\ binary\ Mathematica binary file
-#>71 string \000\010\010\010\010\000\000\000\000\000\000\010\100\010\000\000\000
-# >71... is optional
->88 string >\0 from %s
-
-
-# Mathematica files PBF:
-# 115 115 101 120 102 106 000 001 000 000 000 203 000 001 000
-0 string MMAPBF\000\001\000\000\000\203\000\001\000 Mathematica PBF (fonts I think)
-
-# .ml files These are menu resources I think
-# these start with "[0-9][0-9][0-9]\ A~[0-9][0-9][0-9]\
-# how to put that into a magic rule?
-4 string \ A~ MAthematica .ml file
-
-# .nb files
-#too long 0 string (***********************************************************************\n\n\ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ \ Mathematica-Compatible Notebook Mathematica 3.0 notebook
-0 string (*********************** Mathematica 3.0 notebook
-
-# other (* matches it is a comment start in these langs
-# GRR: Too weak; also matches other languages e.g. ML
-#0 string (* Mathematica, or Pascal, Modula-2 or 3 code text
-
-#########################
-# MatLab v5
-# URL: http://fileformats.archiveteam.org/wiki/MAT
-# Reference: https://www.mathworks.com/help/pdf_doc/matlab/matfile_format.pdf
-# first 116 bytes of header contain text in human-readable form
-0 string MATLAB Matlab v
-#>11 string/T x \b, at 11 "%.105s"
-#!:mime application/octet-stream
-!:mime application/x-matlab-data
-!:ext mat
-# https://de.mathworks.com/help/matlab/import_export/mat-file-versions.html
-# level of the MAT-file like: 5.0 7.0 or maybe 7.3
-#>7 string x LEVEL "%.3s"
->7 ubyte =0x35 \b5 mat-file
->7 ubyte !0x35
->>7 string x \b%.3s mat-file
->126 short 0x494d (big endian)
->>124 beshort x version %#04x
->126 short 0x4d49 (little endian)
-# 0x0100 for level 5.0 and 0x0200 for level 7.0
->>124 leshort x version %#04x
-# test again so that default clause works
->126 short x
-# created by MATLAB include Platform sometimes without leading comma (0x2C) or missing
-# like: GLNX86 PCWIN PCWIN64 SOL2 Windows\0407 nt posix
->>20 search/2 Platform:\040 \b, platform
->>>&0 string x %-0.2s
->>>&2 ubyte !0x2C \b%c
->>>>&0 ubyte !0x2C \b%c
->>>>>&0 ubyte !0x2C \b%c
->>>>>>&0 ubyte !0x2C \b%c
->>>>>>>&0 ubyte !0x2C \b%c
->>>>>>>>&0 ubyte !0x2C \b%c
->>>>>>>>>&0 ubyte !0x2C \b%c
-# examples without Platform tag like one_by_zero_char.mat
->>20 default x
->>>11 string x "%s"
-# created by MATLAB include time like: Fri Feb 20 15:26:59 2009
->34 search/9/c created\040on:\040 \b, created
->>&0 string x %-.24s
-# MatLab v4
-# From: Joerg Jenderek
-# check for valid imaginary flag of Matlab matrix version 4
-13 ushort 0
-# check for valid ASCII matrix name
->20 ubyte >0x1F
-# skip PreviousEntries.dat with "invalid high" name \304P\344@\001
->>20 ubyte <0304
-# skip some Netwfw*.dat and $I3KREPH.dat by checking for non zero number of rows
->>>4 ulong !0
-# skip some CD-ROM filesystem like test-hfs.iso by looking for valid big endian type flag
->>>>0 ubelong&0xFFffFF00 0x00000300
->>>>>0 use matlab4
-# no example for 8-bit and 16-bit integers matrix
->>>>0 ubelong&0xFFffFF00 0x00000400
->>>>>0 use matlab4
-# branch for Little-Endian variant of Matlab MATrix version 4
-# skip big endian variant by looking for valid low lttle endian type flag
->>>>0 ulelong <53
-# skip tokens.dat and some Netwfw*.dat by check for valid imaginary flag value of MAT version 4
->>>>>12 ulelong <2
-# no misidentified little endian MATrix example with "short" matrix name
->>>>>>16 ulelong <3
-# skip radeon firmware BONAIRE_sdma.bin HAWAII_sdma.bin KABINI_sdma.bin KAVERI_sdma.bin MULLINS_sdma.bin
-# by check for non zero matrix name length
->>>>>>>16 ubelong >0
->>>>>>>>0 use \^matlab4
-# little endian MATrix with "long" matrix name or some misidentified samples
->>>>>>16 ulelong >2
-# skip TileCacheLogo-*.dat with invalid 2nd character \001 of matrix name with length 96
->>>>>>>21 ubyte >0x1F
->>>>>>>>0 use \^matlab4
-# Note: called "MATLAB Mat File" with version "Level 4" by DROID via PUID fmt/1550
-# display information of Matlab v4 mat-file
-0 name matlab4 Matlab v4 mat-file
-#!:mime application/octet-stream
-!:mime application/x-matlab-data
-!:ext mat
-# 20-byte header with 5 long integers that contains information describing certain attributes of the Matrix
-# type flag decimal MOPT; maximal 4052=FD4h; maximal 52=34h for little endian
-#>0 ubelong x \b, type flag %u
-#>0 ubelong x (%#x)
-# M: 0~little endian 1~Big Endian 2~VAX D-float 3~VAX G-float 4~Cray
-#>0 ubelong/1000 x \b, M=%u
->0 ubelong/1000 0 (little endian)
->0 ubelong/1000 1 (big endian)
->0 ubelong/1000 2 (VAX D-float)
->0 ubelong/1000 3 (VAX G-float)
->0 ubelong/1000 4 (Cray)
-# namlen; the length of the matrix name
-#>16 ubelong x \b, name length %u
-#>(16.L+19) ubyte x \b, TERMINATING NAME CHARACTER=%#x
-# nul terminated matrix name like: fit_params testmatrix testsparsecomplex teststringarray
-#>20 string x \b, MATRIX NAME="%s"
-#>21 ubyte x \b, MAYBE 2ND CHAR=%c
->16 pstring/L x %s
-# T indicates the matrix type: 0~numeric 1~text 2~sparse
-#>0 ubelong%10 x \b, T=%u
->0 ubelong%10 0 \b, numeric
->0 ubelong%10 1 \b, text
->0 ubelong%10 2 \b, sparse
-# mrows; number of rows in the matrix like: 1 3 8
->4 ubelong x \b, rows %u
-# ncols; number of columns in the matrix like: 1 3 4 5 9 43
->8 ubelong x \b, columns %u
-# imagf; imaginary flag; 1~matrix has an imaginary part 0~only real data
->12 ubelong !0 \b, imaginary (%u)
-# real; Real part of the matrix consists of mrows * ncols numbers
diff --git a/contrib/libs/libmagic/magic/Magdir/matroska b/contrib/libs/libmagic/magic/Magdir/matroska
deleted file mode 100644
index 271af556aa..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/matroska
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: matroska,v 1.9 2019/04/19 00:42:27 christos Exp $
-# matroska: file(1) magic for Matroska files
-#
-# See https://www.matroska.org/
-#
-
-# EBML id:
-0 belong 0x1a45dfa3
-# DocType id:
->4 search/4096 \x42\x82
-# DocType contents:
->>&1 string webm WebM
-!:mime video/webm
->>&1 string matroska Matroska data
-!:mime video/x-matroska
diff --git a/contrib/libs/libmagic/magic/Magdir/mcrypt b/contrib/libs/libmagic/magic/Magdir/mcrypt
deleted file mode 100644
index f2edd08912..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mcrypt
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mcrypt,v 1.6 2022/02/08 18:51:45 christos Exp $
-# Mavroyanopoulos Nikos <nmav@hellug.gr>
-# mcrypt: file(1) magic for mcrypt 2.2.x;
-# URL: https://en.wikipedia.org/wiki/Mcrypt
-# http://fileformats.archiveteam.org/wiki/MCrypt
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/n/nc-mcrypt.trid.xml
-# Update: Joerg Jenderek
-# Note: called by TrID "mcrypt encrypted (v2.5)"
-0 string \0m\3 mcrypt 2.5 encrypted data,
-#!:mime application/octet-stream
-!:mime application/x-crypt-nc
-!:ext nc
->4 string >\0 algorithm: %s,
->>&1 leshort >0 keysize: %d bytes,
->>>&0 string >\0 mode: %s,
-
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/n/nc-mcrypt-22.trid.xml
-# Note: called by TrID "mcrypt encrypted (v2.2)"
-0 string \0m\2 mcrypt 2.2 encrypted data,
-#!:mime application/octet-stream
-!:mime application/x-crypt-nc
-# no example
-!:ext nc
->3 byte 0 algorithm: blowfish-448,
->3 byte 1 algorithm: DES,
->3 byte 2 algorithm: 3DES,
->3 byte 3 algorithm: 3-WAY,
->3 byte 4 algorithm: GOST,
->3 byte 6 algorithm: SAFER-SK64,
->3 byte 7 algorithm: SAFER-SK128,
->3 byte 8 algorithm: CAST-128,
->3 byte 9 algorithm: xTEA,
->3 byte 10 algorithm: TWOFISH-128,
->3 byte 11 algorithm: RC2,
->3 byte 12 algorithm: TWOFISH-192,
->3 byte 13 algorithm: TWOFISH-256,
->3 byte 14 algorithm: blowfish-128,
->3 byte 15 algorithm: blowfish-192,
->3 byte 16 algorithm: blowfish-256,
->3 byte 100 algorithm: RC6,
->3 byte 101 algorithm: IDEA,
->4 byte 0 mode: CBC,
->4 byte 1 mode: ECB,
->4 byte 2 mode: CFB,
->4 byte 3 mode: OFB,
->4 byte 4 mode: nOFB,
->5 byte 0 keymode: 8bit
->5 byte 1 keymode: 4bit
->5 byte 2 keymode: SHA-1 hash
->5 byte 3 keymode: MD5 hash
diff --git a/contrib/libs/libmagic/magic/Magdir/measure b/contrib/libs/libmagic/magic/Magdir/measure
deleted file mode 100644
index 42e7186484..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/measure
+++ /dev/null
@@ -1,44 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: measure,v 1.3 2021/03/25 17:30:10 christos Exp $
-# measure: file(1) magic for measurement data
-
-# DIY-Thermocam raw data
-0 name diy-thermocam-parser
->0 beshort x scale %d-
->2 beshort x \b%d,
->4 lefloat x spot sensor temperature %f,
->9 ubyte 0 unit celsius,
->9 ubyte 1 unit fahrenheit,
->8 ubyte x color scheme %d
->10 ubyte 1 \b, show spot sensor
->11 ubyte 1 \b, show scale bar
->12 ubyte &1 \b, minimum point enabled
->12 ubyte &2 \b, maximum point enabled
->13 lefloat x \b, calibration: offset %f,
->17 lefloat x slope %f
-
-0 name diy-thermocam-checker
->9 ubyte <2
->>10 ubyte <2
->>>11 ubyte <2
->>>>12 ubyte <4
->>>>>17 lefloat >0.0001 DIY-Thermocam raw data
-
-# V2 and Leption 3.x:
-38408 ubyte <19
->38400 use diy-thermocam-checker
->>38400 default x (Lepton 3.x),
->>>38400 use diy-thermocam-parser
-
-# V1 or Lepton 2.x
-9608 ubyte <19
->9600 use diy-thermocam-checker
->>9600 default x (Lepton 2.x),
->>>9600 use diy-thermocam-parser
-
-# Becker & Hickl Photon Counting (PMS) data file
-# format documentation: https://www.becker-hickl.com/wp-content/uploads/2018/11/opm-pms400-v01.pdf (page 57)
-(0x02.l) string *IDENTIFICATION Becker & Hickl PMS Data File
->0x12 short x (%d data blocks)
-!:ext sdt
diff --git a/contrib/libs/libmagic/magic/Magdir/mercurial b/contrib/libs/libmagic/magic/Magdir/mercurial
deleted file mode 100644
index b8f3cddb36..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mercurial
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mercurial,v 1.5 2019/04/19 00:42:27 christos Exp $
-# mercurial: file(1) magic for Mercurial changeset bundles
-# https://www.selenic.com/mercurial/wiki/
-#
-# Jesse Glick (jesse.glick@sun.com)
-#
-
-0 string HG10 Mercurial changeset bundle
->4 string UN (uncompressed)
->4 string GZ (gzip compressed)
->4 string BZ (bzip2 compressed)
diff --git a/contrib/libs/libmagic/magic/Magdir/metastore b/contrib/libs/libmagic/magic/Magdir/metastore
deleted file mode 100644
index e64e70440b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/metastore
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: metastore,v 1.3 2019/04/19 00:42:27 christos Exp $
-# metastore: file(1) magic for metastore files
-# From: Thomas Wissen
-# see https://david.hardeman.nu/software.php#metastore
-0 string MeTaSt00r3 Metastore data file,
->10 bequad x version %0llx
diff --git a/contrib/libs/libmagic/magic/Magdir/meteorological b/contrib/libs/libmagic/magic/Magdir/meteorological
deleted file mode 100644
index 725982f8d9..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/meteorological
+++ /dev/null
@@ -1,53 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: meteorological,v 1.4 2022/12/09 18:02:09 christos Exp $
-# rinex: file(1) magic for RINEX files
-# http://igscb.jpl.nasa.gov/igscb/data/format/rinex210.txt
-# ftp://cddis.gsfc.nasa.gov/pub/reports/formats/rinex300.pdf
-# data for testing: ftp://cddis.gsfc.nasa.gov/pub/gps/data
-60 string RINEX
->80 search/256 XXRINEXB RINEX Data, GEO SBAS Broadcast
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/broadcast
->80 search/256 XXRINEXD RINEX Data, Observation (Hatanaka comp)
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/observation
->80 search/256 XXRINEXC RINEX Data, Clock
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/clock
->80 search/256 XXRINEXH RINEX Data, GEO SBAS Navigation
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/navigation
->80 search/256 XXRINEXG RINEX Data, GLONASS Navigation
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/navigation
->80 search/256 XXRINEXL RINEX Data, Galileo Navigation
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/navigation
->80 search/256 XXRINEXM RINEX Data, Meteorological
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/meteorological
->80 search/256 XXRINEXN RINEX Data, Navigation
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/navigation
->80 search/256 XXRINEXO RINEX Data, Observation
->>&32 string x \b, date %15.15s
->>5 string x \b, version %6.6s
-!:mime rinex/observation
-
-# https://en.wikipedia.org/wiki/GRIB
-0 string GRIB
->7 byte =1 Gridded binary (GRIB) version 1
-!:mime application/x-grib
-!:ext grb/grib
->7 byte =2 Gridded binary (GRIB) version 2
-!:mime application/x-grib2
-!:ext grb2/grib2
diff --git a/contrib/libs/libmagic/magic/Magdir/microfocus b/contrib/libs/libmagic/magic/Magdir/microfocus
deleted file mode 100644
index 93e39aa1bc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/microfocus
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: microfocus,v 1.3 2019/04/19 00:42:27 christos Exp $
-# Micro Focus COBOL data files.
-
-# https://documentation.microfocus.com/help/index.jsp?topic=\
-# %2FGUID-0E0191D8-C39A-44D1-BA4C-D67107BAF784%2FHRFLRHFILE05.html
-# http://www.cobolproducts.com/datafile/data-viewer.html
-# https://github.com/miracle2k/mfcobol-export
-
-0 string \x30\x00\x00\x7C
->36 string \x00\x3E Micro Focus File with Header (DAT)
-!:mime application/octet-stream
-
-0 string \x30\x7E\x00\x00
->36 string \x00\x3E Micro Focus File with Header (DAT)
-!:mime application/octet-stream
-
-39 string \x02
->136 string \x02\x02\x04\x04 Micro Focus Index File (IDX)
-!:mime application/octet-stream
diff --git a/contrib/libs/libmagic/magic/Magdir/mime b/contrib/libs/libmagic/magic/Magdir/mime
deleted file mode 100644
index 57b2dd557b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mime
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mime,v 1.8 2017/03/17 22:20:22 christos Exp $
-# mime: file(1) magic for MIME encoded files
-#
-0 string/t Content-Type:\040
->14 string >\0 %s
-0 string/t Content-Type:
->13 string >\0 %s
diff --git a/contrib/libs/libmagic/magic/Magdir/mips b/contrib/libs/libmagic/magic/Magdir/mips
deleted file mode 100644
index fe83614703..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mips
+++ /dev/null
@@ -1,120 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mips,v 1.10 2014/04/30 21:41:02 christos Exp $
-# mips: file(1) magic for MIPS ECOFF and Ucode, as used in SGI IRIX
-# and DEC Ultrix
-#
-0 beshort 0x0160 MIPSEB ECOFF executable
->20 beshort 0407 (impure)
->20 beshort 0410 (swapped)
->20 beshort 0413 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->22 byte x - version %d
->23 byte x \b.%d
-#
-0 beshort 0x0162 MIPSEL-BE ECOFF executable
->20 beshort 0407 (impure)
->20 beshort 0410 (swapped)
->20 beshort 0413 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-0 beshort 0x6001 MIPSEB-LE ECOFF executable
->20 beshort 03401 (impure)
->20 beshort 04001 (swapped)
->20 beshort 05401 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-0 beshort 0x6201 MIPSEL ECOFF executable
->20 beshort 03401 (impure)
->20 beshort 04001 (swapped)
->20 beshort 05401 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-# MIPS 2 additions
-#
-0 beshort 0x0163 MIPSEB MIPS-II ECOFF executable
->20 beshort 0407 (impure)
->20 beshort 0410 (swapped)
->20 beshort 0413 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->22 byte x - version %d
->23 byte x \b.%d
-#
-0 beshort 0x0166 MIPSEL-BE MIPS-II ECOFF executable
->20 beshort 0407 (impure)
->20 beshort 0410 (swapped)
->20 beshort 0413 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->22 byte x - version %d
->23 byte x \b.%d
-#
-0 beshort 0x6301 MIPSEB-LE MIPS-II ECOFF executable
->20 beshort 03401 (impure)
->20 beshort 04001 (swapped)
->20 beshort 05401 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-0 beshort 0x6601 MIPSEL MIPS-II ECOFF executable
->20 beshort 03401 (impure)
->20 beshort 04001 (swapped)
->20 beshort 05401 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-# MIPS 3 additions
-#
-0 beshort 0x0140 MIPSEB MIPS-III ECOFF executable
->20 beshort 0407 (impure)
->20 beshort 0410 (swapped)
->20 beshort 0413 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->22 byte x - version %d
->23 byte x \b.%d
-#
-0 beshort 0x0142 MIPSEL-BE MIPS-III ECOFF executable
->20 beshort 0407 (impure)
->20 beshort 0410 (swapped)
->20 beshort 0413 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->22 byte x - version %d
->23 byte x \b.%d
-#
-0 beshort 0x4001 MIPSEB-LE MIPS-III ECOFF executable
->20 beshort 03401 (impure)
->20 beshort 04001 (swapped)
->20 beshort 05401 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-0 beshort 0x4201 MIPSEL MIPS-III ECOFF executable
->20 beshort 03401 (impure)
->20 beshort 04001 (swapped)
->20 beshort 05401 (paged)
->8 belong >0 not stripped
->8 belong 0 stripped
->23 byte x - version %d
->22 byte x \b.%d
-#
-0 beshort 0x180 MIPSEB Ucode
-0 beshort 0x182 MIPSEL-BE Ucode
diff --git a/contrib/libs/libmagic/magic/Magdir/mirage b/contrib/libs/libmagic/magic/Magdir/mirage
deleted file mode 100644
index cdeb3fcbc2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mirage
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mirage,v 1.7 2009/09/19 16:28:10 christos Exp $
-# mirage: file(1) magic for Mirage executables
-#
-# XXX - byte order?
-#
-0 long 31415 Mirage Assembler m.out executable
diff --git a/contrib/libs/libmagic/magic/Magdir/misctools b/contrib/libs/libmagic/magic/Magdir/misctools
deleted file mode 100644
index dc1542adac..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/misctools
+++ /dev/null
@@ -1,140 +0,0 @@
-
-#-----------------------------------------------------------------------------
-# $File: misctools,v 1.21 2023/02/03 20:43:48 christos Exp $
-# misctools: file(1) magic for miscellaneous UNIX tools.
-#
-0 search/1 %%!! X-Post-It-Note text
-# URL: http://fileformats.archiveteam.org/wiki/ICalendar
-# https://en.wikipedia.org/wiki/ICalendar
-# Update: Joerg Jenderek
-# Reference: https://www.rfc-editor.org/rfc/rfc5545
-# http://mark0.net/download/triddefs_xml.7z/defs/v/vcs.trid.xml
-# Note: called "iCalendar - vCalendar" by TrID
-0 string/c BEGIN:vcalendar
-# skip DROID fmt-387-signature-id-572.vcs fmt-388-signature-id-573.ics
-# with invalid separator 0x0 or 0xAB instead of CarriageReturn (0x0D) or LineFeed (0x0A)
->15 ubyte&0xF8 =0x08
-# look for VERSION keyword often on second line but sometimes later as in holidays_NRW_2014.ics
->>0 search/188 VERSION
-# after VERSION keword :1.0 or often :2.0 but sometimes also ;VALUE=TEXT:2.0 like in Jewish religious Juish.ics
-# http://www.webcal.guru/de-DE/kalender_herunterladen?calendar_instance_id=217
-# \n\040:2.0 like in import-real-world-2004-11-19.ics found at
-# https://ftp.gnu.org/gnu/emacs/emacs-28.1.tar.xz
-# emacs-28.1/test/lisp/calendar/icalendar-resources/import-real-world-2004-11-19.ics
-#>>>&0 string x AFTER_VERSION=%.15s
-# Note: called "Internet Calendar and Scheduling format" by DROID via PUID fmt/388
-# skip optional verparam=;other-param like ;VALUE=TEXT and look for version 2.0 that implies iCalendar variant
->>>&0 search/81 :2.0 iCalendar calendar
-# look for Free/Busy component
->>>>15 search/278 :VFREEBUSY file, with Free/Busy component
-!:mime text/calendar
-!:apple ????iFBf
-# no real examples found but only example on Wikipedia page
-!:ext ifb
-# iCalendar calendar without Free/Busy component
->>>>15 default x
-# look for ALARM component
->>>>>15 search/154 :VALARM file, with ALARM component
-!:mime text/calendar
-!:apple ????iCal
-# found on macOS beneath /Users/$USER/Library/Calendars/ as EventAllDayAlarms.icsalarm or EventTimedAlarms.icsalarm
-# no isc examples found
-!:ext icsalarm/ics
-# iCalendar calendar without Free/Busy component and ALARM component
->>>>>15 default x file
-!:mime text/calendar
-!:apple ????iCal
-# no examples found with .ical .icalender suffix
-!:ext ics
-# if no VERSION 2.0 is found then assume it is VERSION 1.0, that is older vCalendar
-# URL: http://fileformats.archiveteam.org/wiki/VCalendar
-# Note: called "VCalendar format" by DROID via fmt/387
->>>&0 default x vCalendar calendar file
-# deprecated
-!:mime text/x-vcalendar
-!:ext vcs
-# GRR: without VERSION keyword violates specification but accepted by Thunderbird like
-# https://ftp.gnu.org/gnu/emacs/emacs-28.1.tar.xz
-# emacs-28.1/test/lisp/calendar/icalendar-resources/import-with-timezone.ics
->>0 default x vCalendar calendar file, without VERSION
-!:mime text/x-vcalendar
-#!:mime text/calendar
-# no vcs example found
-!:ext ics/vcs
-# GRR: According to newest specification CarriageReturn (0xD) and LineFeed (0xA) should be used as separator but others accepted by Thunderbird
-# like CRLF,LF in Sport Today.vcs created by calendar plugin of TV-Browser https://enwiki.tvbrowser.org/index.php/Calendar_Export
-# or LF like https://www.schulferien.org/media/ical/deutschland/ferien_nordrhein-westfalen_2023.ics?k=foo
->>15 ubeshort !0x0D0A \b, without CRLF
-
-# updated by Joerg Jenderek at Apr 2015, May 2021
-# https://en.wikipedia.org/wiki/VCard
-# URL: http://fileformats.archiveteam.org/wiki/VCard
-# https://datatracker.ietf.org/doc/html/rfc6350
-# the value is case-insensitive
-0 string/c begin:vcard
-# skip DROID fmt-395-signature-id-634.vcf
->13 string !VERSION:END vCard visiting card
-# deprecated
-#!:mime text/x-vcard
-!:mime text/vcard
-!:apple ????vCrd
-!:ext vcf/vcard
-# VERSION must come right after BEGIN for 3.0 or 4.0 except in 2.1 , where it can be anywhere
-# Joerg_Jenderek_67.vcf
->>12 search/0x113b4/c version:
-# VERSION 2.1 , 3.0 or 4.0
->>>&0 string x \b, version %-.3s
->>>&0 string !2.1
->>>>13 string !VERSION: \b, 2nd line does not start with VERSION:
-# downcase violates RFC 6350, but some "bad" software produce such vcards
->>0 string !BEGIN \b, not up case
-# http://ftp.mozilla.org/pub/thunderbird/candidates/
-# 78.10.1-candidates/build1/source/thunderbird-78.10.1.source.tar.xz
-# thunderbird-78.10.1/comm/mailnews/import/test/unit/resources/basic_vcard_addressbook.vcf
->>11 beshort !0x0D0A \b, lines not separated by CRLF
-
-# Summary: Libtool library file
-# Extension: .la
-# Submitted by: Tomasz Trojanowski <tomek@uninet.com.pl>
-0 search/80 .la\ -\ a\ libtool\ library\ file libtool library file
-
-# Summary: Libtool object file
-# Extension: .lo
-# Submitted by: Abel Cheung <abelcheung@gmail.com>
-0 search/80 .lo\ -\ a\ libtool\ object\ file libtool object file
-
-# From: Daniel Novotny <dnovotny@redhat.com>
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Core_dump#User-mode_memory_dumps
-# Reference: https://msdn.microsoft.com/en-us/library/ms680378%28VS.85%29.aspx
-#
-# "Windows Minidump" by TrID
-# ./misctools (version 5.25) labeled the entry as "MDMP crash report data"
-0 string MDMP Mini DuMP crash report
-# https://filext.com/file-extension/DMP
-!:mime application/x-dmp
-!:ext dmp/mdmp
-# The high-order word is an internal value that is implementation specific.
-# The low-order word is MINIDUMP_VERSION 0xA793
->4 ulelong&0x0000FFFF !0xA793 \b, version %#4.4x
-# NumberOfStreams 8,9,10,13
->8 ulelong x \b, %d streams
-# StreamDirectoryRva 0x20
->12 ulelong !0x20 \b, %#8.8x RVA
-# CheckSum 0
->16 ulelong !0 \b, CheckSum %#8.8x
-# Reserved or TimeDateStamp
->20 ledate x \b, %s
-# https://msdn.microsoft.com/en-us/library/windows/desktop/ms680519%28v=vs.85%29.aspx
-# Flags MINIDUMP_TYPE enumeration type 0 0x121 0x800
->24 ulelong x \b, %#x type
-# >24 ulelong >0 \b; include
-# >>24 ulelong &0x00000001 \b data sections,
-# >>24 ulelong &0x00000020 \b list of unloaded modules,
-# >>24 ulelong &0x00000100 \b process and thread information,
-# >>24 ulelong &0x00000800 \b memory information,
-
-# Summary: abook addressbook file
-# Submitted by: Mark Schreiber <mark7@alumni.cmu.edu>
-0 string #\x20abook\x20addressbook\x20file abook address book
-!:mime application/x-abook-addressbook
diff --git a/contrib/libs/libmagic/magic/Magdir/mkid b/contrib/libs/libmagic/magic/Magdir/mkid
deleted file mode 100644
index faad3966c0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mkid
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mkid,v 1.6 2009/09/19 16:28:10 christos Exp $
-# mkid: file(1) magic for mkid(1) databases
-#
-# ID is the binary tags database produced by mkid(1).
-#
-# XXX - byte order?
-#
-0 string \311\304 ID tags data
->2 short >0 version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/mlssa b/contrib/libs/libmagic/magic/Magdir/mlssa
deleted file mode 100644
index 3c8875eb3d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mlssa
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mlssa,v 1.4 2009/09/19 16:28:10 christos Exp $
-# mlssa: file(1) magic for MLSSA datafiles
-#
-0 lelong 0xffffabcd MLSSA datafile,
->4 leshort x algorithm %d,
->10 lelong x %d samples
diff --git a/contrib/libs/libmagic/magic/Magdir/mmdf b/contrib/libs/libmagic/magic/Magdir/mmdf
deleted file mode 100644
index 5576a66277..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mmdf
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mmdf,v 1.6 2009/09/19 16:28:10 christos Exp $
-# mmdf: file(1) magic for MMDF mail files
-#
-0 string \001\001\001\001 MMDF mailbox
diff --git a/contrib/libs/libmagic/magic/Magdir/modem b/contrib/libs/libmagic/magic/Magdir/modem
deleted file mode 100644
index 5d59401f6c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/modem
+++ /dev/null
@@ -1,92 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: modem,v 1.11 2022/10/19 20:15:16 christos Exp $
-# modem: file(1) magic for modem programs
-#
-# From: Florian La Roche <florian@knorke.saar.de>
-1 string PC\ Research,\ Inc Digifax-G3-File
->29 byte 1 \b, fine resolution
->29 byte 0 \b, normal resolution
-
-# Summary: CCITT Group 3 Facsimile in "raw" form (i.e. no header).
-# Modified by: Joerg Jenderek
-# URL: https://de.wikipedia.org/wiki/Fax
-# http://fileformats.archiveteam.org/wiki/CCITT_Group_3
-# Reference: https://web.archive.org/web/20020628195336/http://www.netnam.vn/unescocourse/computervision/104.htm
-# GRR: EOL of G3 is too general as it catches also TrueType fonts, Postscript PrinterFontMetric, others
-0 short 0x0100
-# 16 0-bits near beginning like True Type fonts *.ttf, Postscript PrinterFontMetric *.pfm, FTYPE.HYPERCARD, XFER
->2 search/9 \0\0
-# maximal 7 0-bits for pixel sequences or 11 0-bits for EOL in G3
->2 default x
-# skip IRCAM file (VAX big-endian) ./audio
->>0 belong !0x0001a364
-# skip GEM Image data ./images
->>>2 beshort !0x0008
-# look for first keyword of Panorama database *.pan
->>>>11 search/262 \x06DESIGN
-# skip Panorama database
->>>>11 default x
-# old Apple DreamWorld DreamGrafix *.3200 with keyword at end of g3 looking files
->>>>>27118 search/1864 DreamWorld
->>>>>27118 default x
-# skip MouseTrap/Mt.Defaults with file size 16 found on Golden Orchard Apple II CD Rom
->>>>>>8 ubequad !0x2e01010454010203
-# skip PICTUREH.SML found on Golden Orchard Apple II CD Rom
->>>>>>>8 ubequad !0x5dee74ad1aa56394
-# skip few (5/41) DEGAS mid-res bitmap (GEMINI01.PI2 GEMINI02.PI2 GEMINI03.PI2 CODE_RAM.PI2 TBX_DEMO.PI2)
-# with file size 32034
->>>>>>>>-0 offset !32034 raw G3 (Group 3) FAX, byte-padded
-# version 5.25 labeled the entry above "raw G3 data, byte-padded"
-!:mime image/g3fax
-#!:apple ????TIFF
-!:ext g3
-# unusual image starting with black pixel
-#0 short 0x1300 raw G3 (Group 3) FAX
-0 short 0x1400
-# 16 0-bits near beginning like PicturePuzzler found on Golden Orchard Apple CD Rom
->2 search/9 \0\0
-# maximal 7 0-bits for pixel sequences or 11 0-bits for EOL in G3
->2 default x
-# skip some (84/1246) MacBinary II/III (Cyberdog2.068k.smi.bin FileMakerPro4.img.bin Hypercard1.25.image.bin UsbStorage1.3.5.smi.bin) with "non random" numbers by versions values 81h/82h + 81h
->>122 ubeshort&0xFcFf !0x8081 raw G3 (Group 3) FAX
-# version 5.25 labeled the above entry as "raw G3 data"
-!:mime image/g3fax
-!:ext g3
-# unusual image with black pixel near beginning
-#0 short 0x1900 raw G3 (Group 3) FAX
-
-#
-# Magic data for vgetty voice formats
-# (Martin Seine & Marc Eberhard)
-
-#
-# raw modem data version 1
-#
-0 string RMD1 raw modem data
->4 string >\0 (%s /
->20 short >0 compression type %#04x)
-
-#
-# portable voice format 1
-#
-0 string PVF1\n portable voice format
->5 string >\0 (binary %s)
-
-#
-# portable voice format 2
-#
-0 string PVF2\n portable voice format
->5 string >\0 (ascii %s)
-
-# From: Bernd Nuernberger <bernd.nuernberger@web.de>
-# Brooktrout G3 fax data incl. 128 byte header
-# Common suffixes: 3??, BRK, BRT, BTR
-0 leshort 0x01bb
->2 leshort 0x0100 Brooktrout 301 fax image,
->>9 leshort x %d x
->>0x2d leshort x %d
->>6 leshort 200 \b, fine resolution
->>6 leshort 100 \b, normal resolution
->>11 byte 1 \b, G3 compression
->>11 byte 2 \b, G32D compression
diff --git a/contrib/libs/libmagic/magic/Magdir/modulefile b/contrib/libs/libmagic/magic/Magdir/modulefile
deleted file mode 100644
index 46c3baf2a0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/modulefile
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: modulefile,v 1.1 2019/10/15 18:04:40 christos Exp $
-# modulefile: file(1) magic for user's environment modulefile
-# URL: http://modules.sourceforge.net/
-# Reference: https://modules.readthedocs.io/en/stable/modulefile.html
-# From: Xavier Delaruelle <xavier.delaruelle@cea.fr>
-0 string #%Module modulefile
-!:mime text/x-modulefile
diff --git a/contrib/libs/libmagic/magic/Magdir/motorola b/contrib/libs/libmagic/magic/Magdir/motorola
deleted file mode 100644
index af93720f29..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/motorola
+++ /dev/null
@@ -1,71 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: motorola,v 1.12 2021/04/26 15:56:00 christos Exp $
-# motorola: file(1) magic for Motorola 68K and 88K binaries
-#
-# 68K
-#
-0 beshort 0520 mc68k COFF
->18 beshort ^00000020 object
->18 beshort &00000020 executable
->12 belong >0 not stripped
->168 string .lowmem Apple toolbox
->20 beshort 0407 (impure)
->20 beshort 0410 (pure)
->20 beshort 0413 (demand paged)
->20 beshort 0421 (standalone)
-0 beshort 0521 mc68k executable (shared)
->12 belong >0 not stripped
-0 beshort 0522 mc68k executable (shared demand paged)
->12 belong >0 not stripped
-#
-# Motorola/UniSoft 68K Binary Compatibility Standard (BCS)
-#
-0 beshort 0554 68K BCS executable
-#
-# 88K
-#
-# Motorola/88Open BCS
-#
-0 beshort 0555 88K BCS executable
-#
-# Motorola S-Records, from Gerd Truschinski <gt@freebsd.first.gmd.de>
-0 string S0 Motorola S-Record; binary data in text format
-
-# ATARI ST relocatable PRG
-#
-# from Oskar Schirmer <schirmer@scara.com> Feb 3, 2001
-# (according to Roland Waldi, Oct 21, 1987)
-# besides the magic 0x601a, the text segment size is checked to be
-# not larger than 1 MB (which is a lot on ST).
-# The additional 0x601b distinction I took from Doug Lee's magic.
-0 belong&0xFFFFFFF0 0x601A0000 Atari ST M68K contiguous executable
->2 belong x (txt=%d,
->6 belong x dat=%d,
->10 belong x bss=%d,
->14 belong x sym=%d)
-0 belong&0xFFFFFFF0 0x601B0000 Atari ST M68K non-contig executable
->2 belong x (txt=%d,
->6 belong x dat=%d,
->10 belong x bss=%d,
->14 belong x sym=%d)
-
-# Atari ST/TT... program format (sent by Wolfram Kleff <kleff@cs.uni-bonn.de>)
-0 beshort 0x601A Atari 68xxx executable,
->2 belong x text len %u,
->6 belong x data len %u,
->10 belong x BSS len %u,
->14 belong x symboltab len %u,
->18 belong 0
->22 belong &0x01 fastload flag,
->22 belong &0x02 may be loaded to alternate RAM,
->22 belong &0x04 malloc may be from alternate RAM,
->22 belong x flags: %#X,
->26 beshort 0 no relocation tab
->26 beshort !0 + relocation tab
->30 string SFX [Self-Extracting LZH SFX archive]
->38 string SFX [Self-Extracting LZH SFX archive]
->44 string ZIP! [Self-Extracting ZIP SFX archive]
-
-0 beshort 0x0064 Atari 68xxx CPX file
->8 beshort x (version %04x)
diff --git a/contrib/libs/libmagic/magic/Magdir/mozilla b/contrib/libs/libmagic/magic/Magdir/mozilla
deleted file mode 100644
index 32f3bb7e9c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mozilla
+++ /dev/null
@@ -1,37 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: mozilla,v 1.12 2021/04/26 15:56:00 christos Exp $
-# mozilla: file(1) magic for Mozilla XUL fastload files
-# (XUL.mfasl and XPC.mfasl)
-# URL: https://www.mozilla.org/
-# From: Josh Triplett <josh@freedesktop.org>
-
-0 string XPCOM\nMozFASL\r\n\x1A Mozilla XUL fastload data
-# Probably the next magic line contains misspelled "mozLz40\0"
-0 string mozLz4a Mozilla lz4 compressed bookmark data
-# From: Joerg Jenderek
-# URL: https://lz4.github.io/lz4/
-# Reference: https://github.com/avih/dejsonlz4/archive/master.zip/
-# dejsonlz4-master\src\dejsonlz4.c
-# Note: mostly JSON compressed with a non-standard LZ4 header
-# can be unpacked by dejsonlz4 but not lz4 program.
-0 string mozLz40\0 Mozilla lz4 compressed data
-!:mime application/x-lz4+json
-# mozlz4 extension seems to be used for search/store, while jsonlz4 for bookmarks
-!:ext jsonlz4/mozlz4
-# decomp_size
->8 ulelong x \b, originally %u bytes
-# lz4 data
-#>12 ubequad x \b, lz4 data %#16.16llx
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Firefox_4
-# Reference: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT
-# Note: Most ZIP utilities are able to extract such archives
-# maybe only partly or after some warnings. Example:
-# zip -FF omni.ja --out omni.zip
-4 string PK\001\002 Mozilla archive omni.ja
-!:mime application/x-zip
-!:ext ja
-# TODO:
-#>4 use zip-dir-entry
diff --git a/contrib/libs/libmagic/magic/Magdir/msdos b/contrib/libs/libmagic/magic/Magdir/msdos
deleted file mode 100644
index aacf85946b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/msdos
+++ /dev/null
@@ -1,2304 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: msdos,v 1.169 2023/04/17 16:39:19 christos Exp $
-# msdos: file(1) magic for MS-DOS files
-#
-
-# .BAT files (Daniel Quinlan, quinlan@yggdrasil.com)
-# updated by Joerg Jenderek at Oct 2008,Apr 2011
-0 string/t @
->1 string/cW \ echo\ off DOS batch file text
-!:mime text/x-msdos-batch
-!:ext bat
->1 string/cW echo\ off DOS batch file text
-!:mime text/x-msdos-batch
-!:ext bat
->1 string/cW rem DOS batch file text
-!:mime text/x-msdos-batch
-!:ext bat
->1 string/cW set\ DOS batch file text
-!:mime text/x-msdos-batch
-!:ext bat
-
-
-# OS/2 batch files are REXX. the second regex is a bit generic, oh well
-# the matched commands seem to be common in REXX and uncommon elsewhere
-100 search/0xffff rxfuncadd
->100 regex/c =^[\ \t]{0,10}call[\ \t]{1,10}rxfunc OS/2 REXX batch file text
-100 search/0xffff say
->100 regex/c =^[\ \t]{0,10}say\ ['"] OS/2 REXX batch file text
-
-# updated by Joerg Jenderek at Oct 2015
-# https://de.wikipedia.org/wiki/Common_Object_File_Format
-# http://www.delorie.com/djgpp/doc/coff/filhdr.html
-# ./intel already labeled COFF type 0x14c=0514 as "80386 COFF executable"
-#0 leshort 0x14c MS Windows COFF Intel 80386 object file
-#>4 ledate x stamp %s
-0 leshort 0x166 MS Windows COFF MIPS R4000 object file
-#>4 ledate x stamp %s
-0 leshort 0x184 MS Windows COFF Alpha object file
-#>4 ledate x stamp %s
-0 leshort 0x268 MS Windows COFF Motorola 68000 object file
-#>4 ledate x stamp %s
-0 leshort 0x1f0 MS Windows COFF PowerPC object file
-#>4 ledate x stamp %s
-0 leshort 0x290 MS Windows COFF PA-RISC object file
-#>4 ledate x stamp %s
-
-# Tests for various EXE types.
-#
-# Many of the compressed formats were extracted from IDARC 1.23 source code.
-#
-# e_magic
-0 string/b MZ
-# TODO
-# FLT: Syntrillium CoolEdit Filter https://en.wikipedia.org/wiki/Adobe_Audition
-# FMX64:FileMaker Pro 64-bit plug-in https://en.wikipedia.org/wiki/FileMaker
-# FMX: FileMaker Pro 32-bit plug-in https://en.wikipedia.org/wiki/FileMaker
-# FOD: WIFE Font Driver
-# GAU: MS Flight Simulator Gauge
-# IFS: OS/2 Installable File System https://en.wikipedia.org/wiki/OS/2
-# MEXW32:MATLAB Windows 32bit compiled function https://en.wikipedia.org/wiki/MATLAB
-# MEXW64:MATLAB Windows 64bit compiled function https://en.wikipedia.org/wiki/MATLAB
-# MLL: Maya plug-in (generic) http://en.wikipedia.org/wiki/Autodesk_Maya
-# PFL: PhotoFilter plugin http://photofiltre.free.fr
-# 8*: PhotoShop plug-in (generic) http://www.adobe.com/products/photoshop/main.html
-# PLG: Aston Shell plugin http://www.astonshell.com/
-# QLB: Microsoft Basic Quick library https://en.wikipedia.org/wiki/QuickBASIC
-# SKL: WinLIFT skin http://www.zapsolution.com/winlift/index.htm
-# TBK: Asymetrix ToolBook application http://www.toolbook.com
-# TBP: The Bat! plugin http://www.ritlabs.com
-# UPC: Ultimate Paint Graphics Editor plugin http://ultimatepaint.j-t-l.com
-# XFM: Syntrillium Cool Edit Transform Effect bad http://www.cooledit.com
-# XPL: X-Plane plugin http://www.xsquawkbox.net/xpsdk/
-# ZAP: ZoneLabs Zone Alarm data http://www.zonelabs.com
-#
-# NEXT LINES FOR DEBUGGING!
-# e_cblp; bytes on last page of file
-# e_cp; pages in file
-#>4 uleshort x \b, e_cp 0x%x
-# e_lfanew; file address of new exe header
-#>0x3c ulelong x \b, e_lfanew 0x%x
-# e_lfarlc; address of relocation table
-#>0x18 uleshort x \b, e_lfarlc=0x%x
-# e_ovno; overlay number. If zero, this is the main executable foo
-#>0x1a uleshort !0 \b, e_ovno 0x%x
-#>0x1C ubequad !0 \b, e_res 0x%16.16llx
-# e_oemid; often 0
-#>0x24 uleshort !0 \b, e_oemid 0x%x
-# e_oeminfo; typically zeroes, but 13Dh (WORDSTAR.CNV WPFT5.CNV) 143h (WRITWIN.CNV)
-# 1A3h (DBASE.CNV LOTUS123.CNV RFTDCA.CNV WORDDOS.CNV WORDMAC.CNV WORDWIN1.CNVXLBIFF.CNV)
-#>0x26 uleshort !0 \b, e_oeminfo 0x%x
-# e_res2; typically zeroes, but 000006006F082D2Ah SCSICFG.EXE 00009A0300007C03h de.exe
-# 0000CA0000000002h country.exe dosxmgr.exe 421E0A00421EA823h QMC.EXE
-#>0x28 ubequad !0 \b, e_res2 0x%16.16llx
-# https://web.archive.org/web/20171116024937/http://www.ctyme.com/intr/rb-2939.htm#table1593
-# https://github.com/uxmal/reko/blob/master/src/ImageLoaders/MzExe/ExeImageLoader.cs
-# new exe header magic like: PE NE LE LX W3 W4
-# no examples found for ZM DL MP P2 P3
-#>(0x3c.l) string x \b, at [0x3c] %.2s
-#>(0x3c.l) ubelong x \b, at [0x3c] %#8.8x
-#>(0x3c.l+4) ubelong x \b, at [0x3c+4] %#8.8x
-#
-# Most non-DOS MZ-executable extensions have the relocation table more than 0x40 bytes into the file.
-# http://www.mitec.cz/Downloads/EXE.zip/EXE64.exe e_lfarlc=0x8ead
-# OS/2 ECS\INSTALL\DETECTEI\PCISCAN.EXE e_lfarlc=0x1c
-# some EFI apps Shell_Full.efi ext4_x64_signed.efi e_lfarlc=0
-# Icon library WORD60.ICL e_lfarlc=0
-# Microsoft compiled help format 2.0 WINWORD.DEV.HXS e_lfarlc=0
->0x18 uleshort <0x40
-# check magic of new second header
-# NE executable with low e_lfarlc like: WORD60.ICL
-# ICL: Icons Library 16-bit http://fileformats.archiveteam.org/wiki/Icon_library
->>(0x3c.l) string NE Windows Icons Library 16-bit
-!:mime image/x-ms-icl
-!:ext icl
-# handle LX executable with low e_lfarlc like: PCISCAN.EXE
->>(0x3c.l) string LX
->>>(0x3c.l) use lx-executable
-# skip Portable Executable (PE) with low e_lfarlc here, because handled later
-# like: ext4_x64_signed.efi Shell_Full.efi WINWORD.DEV.HXS
->>(0x3c.l) string PE
-# not New Executable (NE) and not PE with low e_lfarlc like:
-# MACCNV55.EXE WORK_RTF.EXE TELE200.EXE NDD.EXE iflash.exe
->>(0x3c.l) default x MS-DOS executable, MZ for MS-DOS
-!:mime application/x-dosexec
-# Windows and later versions of DOS will allow .EXEs to be named with a .COM
-# extension, mostly for compatibility's sake.
-# like: EDIT.COM 4DOS.COM CMD8086.COM CMD-FR.COM SYSLINUX.COM
-# URL: https://en.wikipedia.org/wiki/Personal_NetWare#VLM
-# Reference: https://mark0.net/download/triddefs_xml.7z/defs/e/exe-vlm-msg.trid.xml
-# also like: BGISRV.DRV
-!:ext exe/com/vlm/drv
-# These traditional tests usually work but not always. When test quality support is
-# implemented these can be turned on.
-#>>0x18 leshort 0x1c (Borland compiler)
-#>>0x18 leshort 0x1e (MS compiler)
-
-# Maybe it's a PE?
-# URL: http://fileformats.archiveteam.org/wiki/Portable_Executable
-# Reference: https://docs.microsoft.com/de-de/windows/win32/debug/pe-format
->(0x3c.l) string PE\0\0 PE
-!:mime application/vnd.microsoft.portable-executable
-# https://docs.microsoft.com/de-de/windows/win32/debug/pe-format#characteristics
-# DLL Characteristics
-#>>(0x3c.l+22) uleshort x \b, CHARACTERISTICS %#4.4x,
-# 0x0200~IMAGE_FILE_DEBUG_STRIPPED Debugging information is removed from the image file
-# 0x1000~IMAGE_FILE_SYSTEM The image file is a system file, not a user program.
-# 0x2000~IMAGE_FILE_DLL The image file is a dynamic-link library (DLL)
->>(0x3c.l+24) leshort 0x010b \b32 executable
-# https://learn.microsoft.com/en-us/windows/win32/debug/pe-format#windows-subsystem
-#>>>(0x3c.l+92) leshort x \b, SUBSYSTEM %u
->>(0x3c.l+24) leshort 0x020b \b32+ executable
-#>>>(0x3c.l+92) leshort x \b, SUBSYSTEM %u
->>(0x3c.l+24) leshort 0x0107 ROM image
->>(0x3c.l+24) default x Unknown PE signature
->>>&0 leshort x %#x
->>(0x3c.l+22) leshort&0x2000 >0 (DLL)
-# 0~IMAGE_SUBSYSTEM_UNKNOWN An unknown subsystem
->>(0x3c.l+92) leshort 0 (
-# Summary: Microsoft compiled help *.HXS format 2.0
-# URL: https://en.wikipedia.org/wiki/Microsoft_Help_2
-# Reference: http://www.russotto.net/chm/itolitlsformat.html
-# https://mark0.net/download/triddefs_xml.7z/defs/h/hxs.trid.xml
-# Note: 2 PE sections (.rsrc, .its) implies Microsoft compiled help format; the .its section contains the help content ITOLITLS
-# verified by command like `pelook.exe -d WINWORD.HXS & pelook.exe -h WINWORD.HXS`
->>>(0x3c.l+6) uleshort =2 \bMicrosoft compiled help format 2.0)
-!:ext hxs
-# 3 PE sections (.text, .reloc, .rsrc) implies some Control Panel Item like:
-# CPL: Control Panel item for WINE 1.7.28 https://www.winehq.org/
->>>(0x3c.l+6) uleshort !2 \bControl Panel Item)
-!:ext cpl
-# 1~IMAGE_SUBSYSTEM_NATIVE device drivers and native Windows processes
->>(0x3c.l+92) leshort 1
-# Native PEs include ntoskrnl.exe, hal.dll, smss.exe, autochk.exe, and all the
-# drivers in Windows/System32/drivers/*.sys.
->>>(0x3c.l+22) leshort&0x2000 >0 (native)
-!:ext dll/sys
->>>(0x3c.l+22) leshort&0x2000 0 (native)
-!:ext exe/sys
-# 2~IMAGE_SUBSYSTEM_WINDOWS_GUI The Windows graphical user interface (GUI) subsystem
->>(0x3c.l+92) leshort 2
->>>(0x3c.l+22) leshort&0x2000 >0 (GUI)
-# These could probably be at least partially distinguished from one another by
-# looking for specific exported functions.
-# CPL: Control Panel item
-# TLB: Type library
-# OCX: OLE/ActiveX control
-# ACM: Audio compression manager codec
-# AX: DirectShow source filter
-# IME: Input method editor
-!:ext dll/cpl/tlb/ocx/acm/ax/ime
->>>(0x3c.l+22) leshort&0x2000 0 (GUI)
-# Screen savers typically include code from the scrnsave.lib static library, but
-# that's not guaranteed.
-!:ext exe/scr
-# 3~IMAGE_SUBSYSTEM_WINDOWS_CUI The Windows character subsystem
->>(0x3c.l+92) leshort 3
->>>(0x3c.l+22) leshort&0x2000 >0 (console)
-!:ext dll/cpl/tlb/ocx/acm/ax/ime
->>>(0x3c.l+22) leshort&0x2000 0 (console)
-!:ext exe/com
-# NO Windows Subsystem number 4!
->>(0x3c.l+92) leshort 4 (Unknown subsystem 4)
-# 5~IMAGE_SUBSYSTEM_OS2_CUI The OS/2 character subsystem
->>(0x3c.l+92) leshort 5 (OS/2)
-# GRR: No examples found by Joerg Jenderek
-#!:ext foo-exe-os2
-# NO Windows Subsystem number 6!
->>(0x3c.l+92) leshort 6 (Unknown subsystem 6)
-# 7~IMAGE_SUBSYSTEM_POSIX_CUI The Posix character subsystem
->>(0x3c.l+92) leshort 7 (POSIX
->>>(0x3c.l+22) leshort&0x2000 >0 \b)
-# like: PSXDLL.DLL
-!:ext dll
->>>(0x3c.l+22) leshort&0x2000 0 \b)
-# like: PAX.EXE
-!:ext exe
-# 8~IMAGE_SUBSYSTEM_NATIVE_WINDOWS Native Win9x driver
->>(0x3c.l+92) leshort 8 (Win9x)
-# GRR: No examples found by Joerg Jenderek
-#!:ext foo-exe-win98
-# 9~IMAGE_SUBSYSTEM_WINDOWS_CE_GUI Windows CE
->>(0x3c.l+92) leshort 9 (Windows CE
->>>(0x3c.l+22) leshort&0x2000 >0 \b)
-# like: MCS9900Ce50.dll Mosiisr99x.dll TMCGPS.DLL
-!:ext dll
->>>(0x3c.l+22) leshort&0x2000 0 \b)
-# like: NNGStart.exe navigator.exe
-!:ext exe
-# 10~IMAGE_SUBSYSTEM_EFI_APPLICATION An Extensible Firmware Interface (EFI) application
->>(0x3c.l+92) leshort 10 (EFI application)
-# like: bootmgfw.efi grub.efi gdisk_x64.efi Shell_Full.efi shim.efi syslinux.efi
-!:ext efi
-# 11~IMAGE_SUBSYSTEM_EFI_BOOT_SERVICE_DRIVER An EFI driver with boot services
->>(0x3c.l+92) leshort 11 (EFI boot service driver)
-# like: ext2_x64_signed.efi Fat_x64.efi iso9660_x64_signed.efi
-!:ext efi
->>(0x3c.l+92) leshort 12 (EFI runtime driver)
-# no sample found
-!:ext efi
-# 13~IMAGE_SUBSYSTEM_EFI_ROM An EFI ROM image
->>(0x3c.l+92) leshort 13 (EFI ROM)
-# no sample found
-!:ext efi
-# 14~IMAGE_SUBSYSTEM_XBOX XBOX
->>(0x3c.l+92) leshort 14 (XBOX)
-#!:ext foo-xbox
-# NO Windows Subsystem number 15!
->>(0x3c.l+92) leshort 15 (Unknown subsystem 15)
-# 16~IMAGE_SUBSYSTEM_WINDOWS_BOOT_APPLICATION Windows boot application
->>(0x3c.l+92) leshort 16 (Windows boot application
->>>(0x3c.l+22) leshort&0x2000 >0 \b)
-# like: bootvhd.dll bootuwf.dll hvloader.dll tcbloader.dll bootspaces.dll
-!:ext dll
->>>(0x3c.l+22) leshort&0x2000 0 \b)
-# like: bootmgr.efi memtest.efi shellx64.efi memtest.exe winload.exe winresume.exe bootvhd.dll hvloader.dll
-!:ext efi/exe
-# GRR: the next 2 lines are not executed!
-#>>(0x3c.l+92) default x (Unknown subsystem
-#>>>&0 leshort x %#x)
->>(0x3c.l+92) leshort >16 (Unknown subsystem
->>>&0 leshort x %#x)
->>(0x3c.l+4) leshort 0x14c Intel 80386
->>(0x3c.l+4) leshort 0x166 MIPS R4000
->>(0x3c.l+4) leshort 0x168 MIPS R10000
->>(0x3c.l+4) leshort 0x184 Alpha
->>(0x3c.l+4) leshort 0x1a2 Hitachi SH3
->>(0x3c.l+4) leshort 0x1a3 Hitachi SH3 DSP
->>(0x3c.l+4) leshort 0x1a8 Hitachi SH5
->>(0x3c.l+4) leshort 0x169 MIPS WCE v2
->>(0x3c.l+4) leshort 0x1a6 Hitachi SH4
->>(0x3c.l+4) leshort 0x1c0 ARM
->>(0x3c.l+4) leshort 0x1c2 ARM Thumb
->>(0x3c.l+4) leshort 0x1c4 ARMv7 Thumb
->>(0x3c.l+4) leshort 0x1d3 Matsushita AM33
->>(0x3c.l+4) leshort 0x1f0 PowerPC
->>(0x3c.l+4) leshort 0x1f1 PowerPC with FPU
->>(0x3c.l+4) leshort 0x1f2 PowerPC (big-endian)
->>(0x3c.l+4) leshort 0x200 Intel Itanium
->>(0x3c.l+4) leshort 0x266 MIPS16
->>(0x3c.l+4) leshort 0x268 Motorola 68000
->>(0x3c.l+4) leshort 0x290 PA-RISC
->>(0x3c.l+4) leshort 0x366 MIPSIV
->>(0x3c.l+4) leshort 0x466 MIPS16 with FPU
->>(0x3c.l+4) leshort 0xebc EFI byte code
->>(0x3c.l+4) leshort 0x5032 RISC-V 32-bit
->>(0x3c.l+4) leshort 0x5064 RISC-V 64-bit
->>(0x3c.l+4) leshort 0x5128 RISC-V 128-bit
->>(0x3c.l+4) leshort 0x6232 LoongArch 32-bit
->>(0x3c.l+4) leshort 0x6264 LoongArch 64-bit
->>(0x3c.l+4) leshort 0x9041 Mitsubishi M32R
->>(0x3c.l+4) leshort 0x8664 x86-64
->>(0x3c.l+4) leshort 0xaa64 Aarch64
->>(0x3c.l+4) leshort 0xc0ee MSIL
-# GRR: the next 2 lines are not executed!
->>(0x3c.l+4) default x Unknown processor type
->>>&0 leshort x %#x
->>(0x3c.l+22) leshort&0x0200 >0 (stripped to external PDB)
->>(0x3c.l+22) leshort&0x1000 >0 system file
->>(0x3c.l+24) leshort 0x010b
->>>(0x3c.l+232) lelong >0 Mono/.Net assembly
->>(0x3c.l+24) leshort 0x020b
->>>(0x3c.l+248) lelong >0 Mono/.Net assembly
-
-# hooray, there's a DOS extender using the PE format, with a valid PE
-# executable inside (which just prints a message and exits if run in win)
->>(8.s*16) string 32STUB \b, 32rtm DOS extender
->>(8.s*16) string !32STUB \b, for MS Windows
->>(0x3c.l+0xf8) string UPX0 \b, UPX compressed
->>(0x3c.l+0xf8) search/0x140 PEC2 \b, PECompact2 compressed
->>(0x3c.l+0xf8) search/0x140 UPX2
->>>(&0x10.l+(-4)) string PK\3\4 \b, ZIP self-extracting archive (Info-Zip)
->>(0x3c.l+0xf8) search/0x140 .idata
->>>(&0xe.l+(-4)) string PK\3\4 \b, ZIP self-extracting archive (Info-Zip)
->>>(&0xe.l+(-4)) string ZZ0 \b, ZZip self-extracting archive
->>>(&0xe.l+(-4)) string ZZ1 \b, ZZip self-extracting archive
->>(0x3c.l+0xf8) search/0x140 .rsrc
->>>(&0x0f.l+(-4)) string a\\\4\5 \b, WinHKI self-extracting archive
->>>(&0x0f.l+(-4)) string Rar! \b, RAR self-extracting archive
->>>(&0x0f.l+(-4)) search/0x3000 MSCF \b, InstallShield self-extracting archive
->>>(&0x0f.l+(-4)) search/32 Nullsoft \b, Nullsoft Installer self-extracting archive
->>(0x3c.l+0xf8) search/0x140 .data
->>>(&0x0f.l) string WEXTRACT \b, MS CAB-Installer self-extracting archive
->>(0x3c.l+0xf8) search/0x140 .petite\0 \b, Petite compressed
->>>(0x3c.l+0xf7) byte x
->>>>(&0x104.l+(-4)) string =!sfx! \b, ACE self-extracting archive
->>(0x3c.l+0xf8) search/0x140 .WISE \b, WISE installer self-extracting archive
->>(0x3c.l+0xf8) search/0x140 .dz\0\0\0 \b, Dzip self-extracting archive
->>&(0x3c.l+0xf8) search/0x100 _winzip_ \b, ZIP self-extracting archive (WinZip)
->>&(0x3c.l+0xf8) search/0x100 SharedD \b, Microsoft Installer self-extracting archive
->>0x30 string Inno \b, InnoSetup self-extracting archive
-# NumberOfSections; Normal Dynamic Link libraries have a few sections for code, data and resource etc.
-# PE used as container have less sections
->>(0x3c.l+6) leshort >1 \b, %u sections
-# do not display for 1 section to get output like in version 5.43 and to keep output columns low
-#>>(0x3c.l+6) leshort =1 \b, %u section
-
-# If the relocation table is 0x40 or more bytes into the file, it's definitely
-# not a DOS EXE.
->0x18 uleshort >0x3f
-
-# Hmm, not a PE but the relocation table is too high for a traditional DOS exe,
-# must be one of the unusual subformats.
->>(0x3c.l) string !PE\0\0 MS-DOS executable
-#!:mime application/x-dosexec
-
->>(0x3c.l) string NE \b, NE
-#!:mime application/x-dosexec
-!:mime application/x-ms-ne-executable
-# FOR DEBUGGING!
-# Reference: https://wiki.osdev.org/NE
-# ProgFlags; Program flags, bitmapped
-#>>>(0x3c.l+0x0C) ubyte x \b, ProgFlags 0x%2.2x
-# >>>(0x3c.l+0x0c) ubyte&0x03 =0 \b, none
-# >>>(0x3c.l+0x0c) ubyte&0x03 =1 \b, single shared
-# >>>(0x3c.l+0x0c) ubyte&0x03 =2 \b, multiple
-# >>>(0x3c.l+0x0c) ubyte&0x03 =3 \b, (null)
-# >>>(0x3c.l+0x0c) ubyte &0x04 \b, Global initialization
-# >>>(0x3c.l+0x0c) ubyte &0x08 \b, Protected mode only
-# >>>(0x3c.l+0x0c) ubyte &0x10 \b, 8086 instructions
-# >>>(0x3c.l+0x0c) ubyte &0x20 \b, 80286 instructions
-# >>>(0x3c.l+0x0c) ubyte &0x40 \b, 80386 instructions
-# >>>(0x3c.l+0x0c) ubyte &0x80 \b, 80x87 instructions
-# ApplFlags; Application flags, bitmapped
-# https://www.fileformat.info/format/exe/corion-ne.htm
-#>>>(0x3c.l+0x0D) ubyte x \b, ApplFlags 0x%2.2x
-# Application type (bits 0-2); 1~Full screen (not aware of Windows/P.M. API)
-# 2~Compatible with Windows/P.M. API 3~Uses Windows/P.M. API
-#>>>(0x3c.l+0x0D) ubyte&0x07 =1 \b, Full screen
-#>>>(0x3c.l+0x0D) ubyte&0x07 =2 \b, Compatible with Windows/P.M. API
-#>>>(0x3c.l+0x0D) ubyte&0x07 =3 \b, use Windows/P.M. API
-# bit 7; DLL or driver (SS:SP info invalid, CS:IP points at FAR init routine called with AX handle
-#>>>(0x3c.l+0x0D) ubyte &0x80 \b, DLL or driver
-# AutoDataSegIndex; automatic data segment index like: 0 2 3 22
-# zero if the SINGLEDATA and MULTIPLEDATA bits are cleared
-#>>>(0x3c.l+0x0e) uleshort x \b, AutoDataSegIndex %u
-# InitHeapSize; intial local heap size like; 0 400h 1400h
-# zero if there is no local allocation
-#>>>(0x3c.l+0x10) uleshort !0 \b, InitHeapSize 0x%x
-# InitStackSize; inital stack size like: 0 10h A00h 7D0h A8Ch FA0h 1000h 1388h
-# 1400h (CBT) 1800h 2000h 2800h 2EE0h 2F3Ch 3258h 3E80h 4000h 4E20h 5000h 6000h
-# 6D60h 8000h 40000h
-# zero if the SS register value does not equal the DS register value
-#>>>(0x3c.l+0x12) uleshort !0 \b, InitStackSize 0x%x
-# EntryPoint; segment offset value of CS:IP like: 0 10000h 18A84h 11C1Ah 307F1h
-#>>>(0x3c.l+0x14) ulelong !0 \b, EntryPoint 0x%x
-# InitStack; specifies the segment offset value of stack pointer SS:SP
-# like: 0 20000h 160000h
-#>>>(0x3c.l+0x18) ulelong !0 \b, InitStack 0x%x
-# SegCount; number of segments in segment table like: 0 1 2 3 16h
-#>>>(0x3c.l+0x1C) uleshort x \b, SegCount 0x%x
-# ModRefs; number of module references (DLLs) like; 0 1 3
-#>>>(0x3c.l+0x1E) uleshort !0 \b, ModRefs %u
-# NoResNamesTabSiz; size in bytes of non-resident names table
-# like: Bh 16h B4h B9h 2Ch 18Fh 16AAh
-#>>>(0x3c.l+0x20) uleshort x \b, NoResNamesTabSiz 0x%x
-# SegTableOffset; offset of Segment table like: 40h
-#>>>(0x3c.l+0x22) uleshort !0x40 \b, SegTableOffset 0x%x
-# ResTableOffset; offset of resources table like: 40h 50h 58h F0h
-# 40h for most fonts likedos737.fon FMFONT.FOT but 60h for L1WBASE.FON
-#>>>(0x3c.l+0x24) uleshort x \b, ResTableOffset 0x%x
-# ResidNamTable; offset of resident names table
-# like: 58h 5Ch 60h 68h 74h 98h 2E3h 2E7h 2F0h
-#>>>(0x3c.l+0x26) uleshort x \b, ResidNamTable 0x%x
-# ImportNameTable; offset of imported names table (array of counted strings, terminated with string of length 00h)
-# like: 77h 7Eh 80h C6h A7h ACh 2F8h 3FFh
-#>>>(0x3c.l+0x2a) uleshort x \b, ImportNameTable 0x%x
-# OffStartNonResTab; offset from start of file to non-resident names table
-# like: 110h 11Dh 19Bh 1A5h 3F5h 4C8h 4EEh D93h
-#>>>(0x3c.l+0x2c) ulelong x \b, OffStartNonResTab 0x%x
-# MovEntryCount; number of movable entry points like: 0 4 5 6 16 17 24 312 355 446
-#>>>(0x3c.l+0x30) uleshort !0 \b, MovEntryCount %u
-# FileAlnSzShftCnt; log2 of the segment sector size; 4~16 0~9~512 (default)
-#>>>(0x3c.l+0x32) uleshort !9 \b, FileAlnSzShftCnt %u
-# nResTabEntries; number of resource table entries like: 0 2
-#>>>(0x3c.l+0x34) uleshort !0 \b, nResTabEntries %u
-# targOS; Target OS; 0~unknown~OS/2 1.0 or MS Windows 1-2
-# OS/2 1.0 like: DTM.DLL SHELL11F.EXE HELPMSG.EXE CREATEDD.EXE
-# or Windows 1.03 - 2.1 like: MSDOSD.EXE KARTEI.EXE KALENDER.EXE
-#>>>(0x3c.l+0x36) byte x TARGOS %x
->>>(0x3c.l+0x36) byte 0 for OS/2 1.0 or MS Windows 1-2
->>>(0x3c.l+0x36) byte 1 for OS/2 1.x
->>>(0x3c.l+0x36) byte 2 for MS Windows 3.x
->>>(0x3c.l+0x36) byte 3 for MS-DOS
->>>(0x3c.l+0x36) byte 4 for Windows 386
->>>(0x3c.l+0x36) byte 5 for Borland Operating System Services
-# http://downloads.sourceforge.net/dfendreloaded/D-Fend-Reloaded-1.4.4.zip
-# D-Fend Reloaded/VirtualHD/FREEDOS/DPMILD32.EXE
-# GRR: WHAT OS is this?
-#>>>(0x3c.l+0x36) byte 6 for TARGET SIX
-# https://en.wikipedia.org/wiki/Phar_Lap_(company)
->>>(0x3c.l+0x36) byte 0x81 for MS-DOS, Phar Lap DOS extender, OS/2
-# like: CVP7.EXE
->>>(0x3c.l+0x36) byte 0x82 for MS-DOS, Phar Lap DOS extender, Windows
->>>(0x3c.l+0x36) default x
->>>>(0x3c.l+0x36) ubyte x (unknown OS %#x)
-# expctwinver; expected Windows version (minor first) like:
-# 0.0~DTM.DLL 203.4~Windows 1.03 GDI.EXE 2.1~TTY.DRV 3.0~dos737.fon FMFONT.FOT THREED.VBX 3.10~GDI.EXE 4.0~(ME) VGAFULL.3GR
->>>(0x3c.l+0x3F) ubyte x (%u
->>>(0x3c.l+0x3E) ubyte x \b.%u)
-# OS2EXEFlags; other EXE flags
-# 0~Long filename support 1~2.x protected mode 4~2.x proportional fonts 8~Executable has gangload area
-#>>>(0x3c.l+0x37) byte !0 \b, OS2EXEFlags 0x%x
-# retThunkOffset; offset to return thunks or start of gangload area like: 0 34h 58h 246h
-#>>>(0x3c.l+0x38) uleshort !0 \b, retThunkOffset 0x%x
-# segrefthunksoff; offset to segment reference thunks or size of gangload area
-# like: 0 33Eh 39Ah AEEh
-#>>>(0x3c.l+0x3A) uleshort !0 \b, segrefthunksoff 0x%x
-# mincodeswap; minimum code swap area size like 0 620Ch
-#>>>(0x3c.l+0x3C) uleshort !0 \b, mincodeswap 0x%x
->>>(0x3c.l+0x0c) leshort&0x8000 0x8000 (DLL or font)
-# DRV: Driver
-# 3GR: Grabber device driver
-# CPL: Control Panel Item
-# VBX: Visual Basic Extension https://en.wikipedia.org/wiki/Visual_Basic
-# FON: Bitmap font http://fileformats.archiveteam.org/wiki/FON
-# FOT: Font resource file
-# EXE: WINSPOOL.EXE USER.EXE krnl386.exe GDI.EXE
-# CNV: Microsoft Word text conversion https://www.file-extensions.org/cnv-file-extension-microsoft-word-text-conversion-data
-!:ext dll/drv/3gr/cpl/vbx/fon/fot
->>>(0x3c.l+0x0c) leshort&0x8000 0 (EXE)
-!:ext exe/scr
->>>&(&0x24.s-1) string ARJSFX \b, ARJ self-extracting archive
->>>(0x3c.l+0x70) search/0x80 WinZip(R)\ Self-Extractor \b, ZIP self-extracting archive (WinZip)
-
->>(0x3c.l) string LX\0\0 \b, LX
-!:mime application/x-dosexec
->>>(0x3c.l+0x0a) leshort <1 (unknown OS)
->>>(0x3c.l+0x0a) leshort 1 for OS/2
->>>(0x3c.l+0x0a) leshort 2 for MS Windows
->>>(0x3c.l+0x0a) leshort 3 for DOS
->>>(0x3c.l+0x0a) leshort >3 (unknown OS)
->>>(0x3c.l+0x10) lelong&0x28000 =0x8000 (DLL)
->>>(0x3c.l+0x10) lelong&0x20000 >0 (device driver)
->>>(0x3c.l+0x10) lelong&0x300 0x300 (GUI)
->>>(0x3c.l+0x10) lelong&0x28300 <0x300 (console)
->>>(0x3c.l+0x08) leshort 1 i80286
->>>(0x3c.l+0x08) leshort 2 i80386
->>>(0x3c.l+0x08) leshort 3 i80486
->>>(8.s*16) string emx \b, emx
->>>>&1 string x %s
->>>&(&0x54.l-3) string arjsfx \b, ARJ self-extracting archive
-
-# MS Windows system file, supposedly a collection of LE executables
-# like vmm32.vxd WIN386.EXE
->>(0x3c.l) string W3 \b, W3 for MS Windows
-#!:mime application/x-dosexec
-!:mime application/x-ms-w3-executable
-!:ext vxd/exe
-# W4 executable
->>(0x3c.l) string W4 \b, W4 for MS Windows
-#!:mime application/x-dosexec
-!:mime application/x-ms-w4-executable
-# windows 98 VMM32.VXD
-!:ext vxd
-
->>(0x3c.l) string LE\0\0 \b, LE executable
-!:mime application/x-dosexec
->>>(0x3c.l+0x0a) leshort 1
-# some DOS extenders use LE files with OS/2 header
->>>>0x240 search/0x100 DOS/4G for MS-DOS, DOS4GW DOS extender
->>>>0x240 search/0x200 WATCOM\ C/C++ for MS-DOS, DOS4GW DOS extender
->>>>0x440 search/0x100 CauseWay\ DOS\ Extender for MS-DOS, CauseWay DOS extender
->>>>0x40 search/0x40 PMODE/W for MS-DOS, PMODE/W DOS extender
->>>>0x40 search/0x40 STUB/32A for MS-DOS, DOS/32A DOS extender (stub)
->>>>0x40 search/0x80 STUB/32C for MS-DOS, DOS/32A DOS extender (configurable stub)
->>>>0x40 search/0x80 DOS/32A for MS-DOS, DOS/32A DOS extender (embedded)
-# this is a wild guess; hopefully it is a specific signature
->>>>&0x24 lelong <0x50
->>>>>(&0x4c.l) string \xfc\xb8WATCOM
->>>>>>&0 search/8 3\xdbf\xb9 \b, 32Lite compressed
-# another wild guess: if real OS/2 LE executables exist, they probably have higher start EIP
-#>>>>(0x3c.l+0x1c) lelong >0x10000 for OS/2
-# fails with DOS-Extenders.
->>>(0x3c.l+0x0a) leshort 2 for MS Windows
->>>(0x3c.l+0x0a) leshort 3 for DOS
->>>(0x3c.l+0x0a) leshort 4 for MS Windows (VxD)
-# VXD: VxD for Windows 95/98/Me
-# 386: VxD for Windows 2.10, 3.0, 3.1x
-# PDR: Port driver
-# MPD: Miniport driver (?)
-!:ext vxd/386/pdr/mpd
->>>(&0x7c.l+0x26) string UPX \b, UPX compressed
->>>&(&0x54.l-3) string UNACE \b, ACE self-extracting archive
-
-# looks like ASCII, probably some embedded copyright message.
-# and definitely not NE/LE/LX/PE
->>0x3c lelong >0x20000000
->>>(4.s*512) leshort !0x014c \b, MZ for MS-DOS
-!:mime application/x-dosexec
-!:ext exe/com
-# header data too small for extended executable
->2 long !0
->>0x18 uleshort <0x40
->>>(4.s*512) leshort !0x014c
-
->>>>&(2.s-514) string !LE
->>>>>&-2 string !BW
-#>>>>>>(0x3c.l) string x \b, 2ND MAGIC %.2s
-# but some LX executable appear here also like: PCISCAN.EXE
->>>>>>(0x3c.l) string !LX
-# because Portable Executable (PE) already done skip many here like:
-# xcopy32.exe stinger64.exe WimUtil.exe
-# NO such DOS examples found and
-# DOS examples seems to be already handled by e_lfarlc <0x40 like: CMD8086.COM CMD-FR.COM
->>>>>>>(0x3c.l) string !PE \b, MZ for MS-DOS
-!:mime application/x-dosexec
->>>>&(2.s-514) string LE \b, LE
->>>>>0x240 search/0x100 DOS/4G for MS-DOS, DOS4GW DOS extender
-# educated guess since indirection is still not capable enough for complex offset
-# calculations (next embedded executable would be at &(&2*512+&0-2)
-# I suspect there are only LE executables in these multi-exe files
->>>>&(2.s-514) string BW
->>>>>0x240 search/0x100 DOS/4G \b, LE for MS-DOS, DOS4GW DOS extender (embedded)
->>>>>0x240 search/0x100 !DOS/4G \b, BW collection for MS-DOS
-
-# This sequence skips to the first COFF segment, usually .text
->(4.s*512) leshort 0x014c \b, COFF
-!:mime application/x-dosexec
->>(8.s*16) string go32stub for MS-DOS, DJGPP go32 DOS extender
->>(8.s*16) string emx
->>>&1 string x for DOS, Win or OS/2, emx %s
->>&(&0x42.l-3) byte x
->>>&0x26 string UPX \b, UPX compressed
-# and yet another guess: small .text, and after large .data is unusual, could be 32lite
->>&0x2c search/0xa0 .text
->>>&0x0b lelong <0x2000
->>>>&0 lelong >0x6000 \b, 32lite compressed
-
->(8.s*16) string $WdX \b, WDos/X DOS extender
-
-# By now an executable type should have been printed out. The executable
-# may be a self-uncompressing archive, so look for evidence of that and
-# print it out.
-#
-# Some signatures below from Greg Roelofs, newt@uchicago.edu.
-#
->0x35 string \x8e\xc0\xb9\x08\x00\xf3\xa5\x4a\x75\xeb\x8e\xc3\x8e\xd8\x33\xff\xbe\x30\x00\x05 \b, aPack compressed
->0xe7 string LH/2\ Self-Extract \b, %s
->0x1c string UC2X \b, UCEXE compressed
->0x1c string WWP\ \b, WWPACK compressed
->0x1c string RJSX \b, ARJ self-extracting archive
->0x1c string diet \b, diet compressed
->0x1c string LZ09 \b, LZEXE v0.90 compressed
->0x1c string LZ91 \b, LZEXE v0.91 compressed
->0x1c string tz \b, TinyProg compressed
->0x1e string Copyright\ 1989-1990\ PKWARE\ Inc. Self-extracting PKZIP archive
-!:mime application/zip
-# Yes, this really is "Copr", not "Corp."
->0x1e string PKLITE\ Copr. Self-extracting PKZIP archive
-!:mime application/zip
-# winarj stores a message in the stub instead of the sig in the MZ header
->0x20 search/0xe0 aRJsfX \b, ARJ self-extracting archive
->0x20 string AIN
->>0x23 string 2 \b, AIN 2.x compressed
->>0x23 string <2 \b, AIN 1.x compressed
->>0x23 string >2 \b, AIN 1.x compressed
->0x24 string LHa's\ SFX \b, LHa self-extracting archive
-!:mime application/x-lha
->0x24 string LHA's\ SFX \b, LHa self-extracting archive
-!:mime application/x-lha
->0x24 string \ $ARX \b, ARX self-extracting archive
->0x24 string \ $LHarc \b, LHarc self-extracting archive
->0x20 string SFX\ by\ LARC \b, LARC self-extracting archive
->0x40 string aPKG \b, aPackage self-extracting archive
->0x64 string W\ Collis\0\0 \b, Compack compressed
->0x7a string Windows\ self-extracting\ ZIP \b, ZIP self-extracting archive
->>&0xf4 search/0x140 \x0\x40\x1\x0
->>>(&0.l+(4)) string MSCF \b, WinHKI CAB self-extracting archive
->1638 string -lh5- \b, LHa self-extracting archive v2.13S
->0x17888 string Rar! \b, RAR self-extracting archive
-
-# Skip to the end of the EXE. This will usually work fine in the PE case
-# because the MZ image is hardcoded into the toolchain and almost certainly
-# won't match any of these signatures.
->(4.s*512) long x
->>&(2.s-517) byte x
->>>&0 string PK\3\4 \b, ZIP self-extracting archive
->>>&0 string Rar! \b, RAR self-extracting archive
->>>&0 string =!\x11 \b, AIN 2.x self-extracting archive
->>>&0 string =!\x12 \b, AIN 2.x self-extracting archive
->>>&0 string =!\x17 \b, AIN 1.x self-extracting archive
->>>&0 string =!\x18 \b, AIN 1.x self-extracting archive
->>>&7 search/400 **ACE** \b, ACE self-extracting archive
->>>&0 search/0x480 UC2SFX\ Header \b, UC2 self-extracting archive
-
-# a few unknown ZIP sfxes, no idea if they are needed or if they are
-# already captured by the generic patterns above
->(8.s*16) search/0x20 PKSFX \b, ZIP self-extracting archive (PKZIP)
-# TODO: how to add this? >FileSize-34 string Windows\ Self-Installing\ Executable \b, ZIP self-extracting archive
-#
-
-# TELVOX Teleinformatica CODEC self-extractor for OS/2:
->49801 string \x79\xff\x80\xff\x76\xff \b, CODEC archive v3.21
->>49824 leshort =1 \b, 1 file
->>49824 leshort >1 \b, %u files
-
-# Summary: OS/2 LX Library and device driver (no DOS stub)
-# From: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/EXE
-# Reference: http://www.textfiles.com/programming/FORMATS/lxexe.txt
-# https://github.com/open-watcom/open-watcom-v2/blob/master/bld/watcom/h/exeflat.h
-# Note: by dll-os2-no-dos-stub.trid.xml called "OS/2 Dynamic Link Library (no DOS stub)"
-# TODO: unify with DOS stub variant (MZ magic)
-0 string/b LX
->2 ushort =0
->>0 use lx-executable
-# no examples found for big endian variant
->2 ushort =0x0101
->>0 use \^lx-executable
-0 name lx-executable
-# similar looking like variant with MS-DOS stub (MZ magic): "MS-DOS executable, LX"
-#>0x00 uleshort x executable,
-# signature OSF_FLAT_LX_SIGNATURE~0x584C~LX OSF_FLAT_SIGNATURE~0x454C~LE
->0x00 uleshort =0x584c LX
->0x00 uleshort =0x454C LE
->0x00 uleshort x executable
-#!:mime application/x-msdownload
-!:mime application/x-lx-executable
-!:ext exe
-# byte order: 00h~little-endian non-zero=1~big-endian
-#>0x02 ubyte =0 (little-endian)
->0x02 ubyte !0 (big-endian)
-# FOR DEBUGGING!
-# word order: 00h~little-endian non-zero=1~big-endian
-#>0x03 ubyte =0 \b, little-endian word order
-#>0x03 ubyte !0 \b, big-endian word order
-# cpu_type; CPU type like: 1~286 2~386 3~486 4 20h~i860 21h~Intel N11 40h~MIPS R2000,R3000 41h~MIPS R6000 42h~MIPS R4000
-#>0x08 uleshort x \b, CPU %u
-# os_type; target operating system like: 0~unknown 1~OS/2 2~Windows 3~DOS 4.x 4~Windows 386
-#>0x0A leshort x \b, OS %u
-# flags; module type flags
-#>0x10 ulelong x \b, FLAGS %#8.8x
-# 00000002h ~Reserved for system use
-#>0x10 ulelong &0x00000002 \b, 2h reserved
-# OSF_INIT_INSTANCE=00000004h ~Per-Process Library Initialization; setting this bit for EXE file is invalid
-#>0x10 ulelong &0x00000004 \b, per-process library Initialization
-# OSF_INTERNAL_FIXUPS_DONE=00000010h ~Internal fixups for the module have been applied
-#>0x10 ulelong &0x00000010 \b, int. fixup
-# OSF_EXTERNAL_FIXUPS_DONE=00000020h ~External fixups for the module have been applied
-#>0x10 ulelong &0x00000020 \b, ext. fixup
-# OSF_NOT_PM_COMPATIBLE=00000100h ~Incompatible with PM windowing
-#>0x10 ulelong&0x00000100 =0x00000100 \b, incompatible with PM windowing
-# OSF_PM_COMPATIBLE=00000200h ~Compatible with PM windowing
-#>0x10 ulelong&0x00000200 =0x00000200 \b, compatible with PM windowing
-# bit 17; device driver
-#>0x10 ulelong&0x00020000 >0 \b, device driver
-# Per-process Library Termination; setting this bit for EXE file is invalid
-#>0x10 ulelong&0x40000000 =0x40000000 \b, per-process library termination
->0x0a leshort 1 for OS/2
-# no example found
->0x0a leshort 3 for DOS
-# http://www.ctyme.com/intr/rb-2939.htm#Table1610
-# library by module type mask 00038000h (bits 15-17);
-# 0h ~executable Program module
->0x10 ulelong&0x00038000 =0x00000000 (program)
-#!:ext exe
-# OSF_IS_DLL=8000h ~Library module (DLL)
->0x10 ulelong&0x00038000 >0x00000000
-# OSF_PHYS_DEVICE=00020000h ~device driver
->>0x10 ulelong&0x00020000 >0 (device driver)
-!:ext sys
-# if not device driver it is library (DLL)
->>0x10 ulelong&0x00020000 =0 (library)
-!:ext dll
-# bits 8-10; OSF_PM_APP=300h in flags ~Uses PM windowing API; either it is GUI or console
->0x10 ulelong&0x00000300 =0x00000300 (GUI)
->0x10 ulelong&0x00000300 !0x00000300 (console)
-# CPU type
->0x08 uleshort 1 i80286
-# all inspected examples
->0x08 uleshort 2 i80386
->0x08 uleshort 3 i80486
->0x08 uleshort 4 i80586
-# 21h Intel "N11" or compatible
-# 40h MIPS Mark I ( R2000, R3000) or compatible
-# 41h MIPS Mark II ( R6000 ) or compatible
-# 42h MIPS Mark III ( R4000 ) or compatible
-
-# added by Joerg Jenderek of https://www.freedos.org/software/?prog=kc
-# and https://www.freedos.org/software/?prog=kpdos
-# for FreeDOS files like KEYBOARD.SYS, KEYBRD2.SYS, KEYBRD3.SYS, *.KBD
-0 string/b KCF FreeDOS KEYBoard Layout collection
-# only version=0x100 found
->3 uleshort x \b, version %#x
-# length of string containing author,info and special characters
->6 ubyte >0
-#>>6 pstring x \b, name=%s
->>7 string >\0 \b, author=%-.14s
->>7 search/254 \xff \b, info=
-#>>>&0 string x \b%-s
->>>&0 string x \b%-.15s
-# for FreeDOS *.KL files
-0 string/b KLF FreeDOS KEYBoard Layout file
-# only version=0x100 or 0x101 found
->3 uleshort x \b, version %#x
-# stringlength
->5 ubyte >0
->>8 string x \b, name=%-.2s
-0 string \xffKEYB\ \ \ \0\0\0\0
->12 string \0\0\0\0`\004\360 MS-DOS KEYBoard Layout file
-
-# DOS device driver updated by Joerg Jenderek at May 2011,Mar 2017,Aug 2020,Mar 2023
-# URL: http://fileformats.archiveteam.org/wiki/DOS_device_driver
-# Reference: http://www.delorie.com/djgpp/doc/rbinter/it/46/16.html
-# http://www.o3one.org/hwdocs/bios_doc/dosref22.html
-0 ulequad&0x07a0ffffffff 0xffffffff
-# skip OS/2 INI ./os2
->4 ubelong !0x14000000
-#>>10 ubequad x MAYBE_DRIVER_NAME=%16.16llx
-# https://bugs.astron.com/view.php?id=434
-# skip OOXML document fragment 0000.dat where driver name is "empty" instead of "ASCII like"
->>10 ubequad !0
->>>0 use msdos-driver
-0 name msdos-driver DOS executable (
-#!:mime application/octet-stream
-!:mime application/x-dosdriver
-# also found FreeDOS print driver SPOOL.DEV and disc compression driver STACLOAD.BIN
-# and IBM Token-Ring adapter IBMTOK.DOS. Why and when DOS instead SYS is used?
-# PROTMAN.DOS ELNKPL.DOS
-!:ext sys/dev/bin/dos
-# 1 space char after "UPX compressed" to get phrase like "UPX compressed character device"
->40 search/7 UPX! \bUPX compressed
-# DOS device driver attributes
->4 uleshort&0x8000 0x0000 \bblock device driver
-# character device
->4 uleshort&0x8000 0x8000 \b
-# 1 space char after "clock" to get phrase like "clock character device driver CLOCK$"
->>4 uleshort&0x0008 0x0008 \bclock
-# fast video output by int 29h
-# 1 space char after "fast" to get phrase like "fast standard input/output character device driver"
->>4 uleshort&0x0010 0x0010 \bfast
-# standard input/output device
-# 1 space char after "standard" to get phrase like "standard input/output character device driver"
->>4 uleshort&0x0003 >0 \bstandard
->>>4 uleshort&0x0001 0x0001 \binput
->>>4 uleshort&0x0003 0x0003 \b/
-# 1 space char after "output" to get phrase like "input/output character device driver"
->>>4 uleshort&0x0002 0x0002 \boutput
->>4 uleshort&0x8000 0x8000 \bcharacter device driver
->0 ubyte x
-# upx compressed device driver has garbage instead of real in name field of header
->>40 search/7 UPX!
->>40 default x
-# leading/trailing nulls, zeros or non ASCII characters in 8-byte name field at offset 10 are skipped
-# 1 space char before device driver name to get phrase like "device driver PROTMAN$" "device driver HP-150II" "device driver PC$MOUSE"
->>>12 ubyte >0x23 \b
->>>>10 ubyte >0x20
->>>>>10 ubyte !0x2E
->>>>>>10 ubyte !0x2A \b%c
->>>>11 ubyte >0x20
->>>>>11 ubyte !0x2E \b%c
->>>>12 ubyte >0x20
->>>>>12 ubyte !0x39
->>>>>>12 ubyte !0x2E \b%c
->>>13 ubyte >0x20
->>>>13 ubyte !0x2E \b%c
->>>>14 ubyte >0x20
->>>>>14 ubyte !0x2E \b%c
->>>>15 ubyte >0x20
->>>>>15 ubyte !0x2E \b%c
->>>>16 ubyte >0x20
->>>>>16 ubyte !0x2E
->>>>>>16 ubyte <0xCB \b%c
->>>>17 ubyte >0x20
->>>>>17 ubyte !0x2E
->>>>>>17 ubyte <0x90 \b%c
-# some character device drivers like ASPICD.SYS, btcdrom.sys and Cr_atapi.sys contain only spaces or points in name field
->>>12 ubyte <0x2F
-# they have their real name at offset 22
-# also block device drivers like DUMBDRV.SYS
->>>>22 string >\056 %-.6s
->4 uleshort&0x8000 0x0000
-# 32 bit sector addressing ( > 32 MB) for block devices
->>4 uleshort&0x0002 0x0002 \b,32-bit sector-
-# support by driver functions 13h, 17h, 18h
->4 uleshort&0x0040 0x0040 \b,IOCTL-
-# open, close, removable media support by driver functions 0Dh, 0Eh, 0Fh
->4 uleshort&0x0800 0x0800 \b,close media-
-# output until busy support by int 10h for character device driver
->4 uleshort&0x8000 0x8000
->>4 uleshort&0x2000 0x2000 \b,until busy-
-# direct read/write support by driver functions 03h,0Ch
->4 uleshort&0x4000 0x4000 \b,control strings-
->4 uleshort&0x8000 0x8000
->>4 uleshort&0x6840 >0 \bsupport
->4 uleshort&0x8000 0x0000
->>4 uleshort&0x4842 >0 \bsupport
->0 ubyte x \b)
->0 ulelong !0xffffffff with pointer %#x
-# DOS driver cmd640x.sys has 0x12 instead of 0xffffffff for pointer field to next device header
-0 ulequad 0x0513c00000000012
->0 use msdos-driver
-# DOS drivers DC2975.SYS, DUMBDRV.SYS, ECHO.SYS has also none 0xffffffff for pointer field
-0 ulequad 0x32f28000ffff0016
->0 use msdos-driver
-0 ulequad 0x007f00000000ffff
->0 use msdos-driver
-# https://www.uwe-sieber.de/files/cfg_echo.zip
-0 ulequad 0x001600000000ffff
->0 use msdos-driver
-# DOS drivers LS120.SYS, MKELS120.SYS use reserved bits of attribute field
-0 ulequad 0x0bf708c2ffffffff
->0 use msdos-driver
-0 ulequad 0x07bd08c2ffffffff
->0 use msdos-driver
-# 3Com EtherLink 3C501 CID\SERVER\IBMLS\IBM500D1\DLSNETDR.ZIP\ELNK.DOS
-0 ulequad 0x027ac0c0ffffffff
->0 use msdos-driver
-# IBM Streamer CID\SERVER\IBMLS\IBM500D1\DLSNETDR.ZIP\IBMMPC.DOS
-0 ulequad 0x00228880ffffffff
->0 use msdos-driver
-
-# updated by Joerg Jenderek
-# GRR: line below too general as it catches also
-# rt.lib DYADISKS.PIC and many more
-# start with assembler instruction MOV
-0 ubyte 0x8c
-# skip "AppleWorks word processor data" like ARTICLE.1 ./apple
->4 string !O====
-# skip some unknown basic binaries like RocketRnger.SHR
->>5 string !MAIN
-# skip "GPG symmetrically encrypted data" ./gnu
-# skip "PGP symmetric key encrypted data" ./pgp
-# openpgpdefs.h: fourth byte < 14 indicate cipher algorithm type
->>>4 ubyte >13
->>>>0 use msdos-com
-# the remaining files should be DOS *.COM executables
-# dosshell.COM 8cc0 2ea35f07 e85211 e88a11 b80058 cd
-# hmload.COM 8cc8 8ec0 bbc02b 89dc 83c30f c1eb04 b4
-# UNDELETE.COM 8cca 2e8916 6503 b430 cd21 8b 2e0200 8b
-# BOOTFIX.COM 8cca 2e8916 9603 b430 cd21 8b 2e0200 8b
-# RAWRITE3.COM 8cca 2e8916 d602 b430 cd21 8b 2e0200 8b
-# SHARE.COM 8cca 2e8916 d602 b430 cd21 8b 2e0200 8b
-# validchr.COM 8cca 2e8916 9603 b430 cd21 8b 2e028b1e
-# devload.COM 8cca 8916ad01 b430 cd21 8b2e0200 892e
-
-0 name msdos-com
-# URL: http://fileformats.archiveteam.org/wiki/DOS_executable_(.com)
->0 byte x DOS executable (
-# DOS executable with JuMP 16-bit instruction
->0 byte =0xE9
-# check for probably nil padding til offset 64 of Lotus driver name
->>56 quad =0
-# check for "long" alphabetic Lotus driver name like:
-# Diablo "COMPAQ Text Display" "IBM Monochrome Display" "Plantronics ColorPlus"
->>>24 regex =^[A-Z][A-Za-z\040]{5,21} \bLotus driver) %s
-!:mime application/x-dosexec
-# like: CPQ0TD.DRV IBM0MONO.DRV (Lotus 123 10a) SDIAB4.DRV SPL0CPLS.DRV (Lotus Symphony 2)
-!:ext drv
-# COM with nils like MODE.COM IBMDOS.COM (pcdos 3.31 ru Compaq) RSSTUB.COM (PC-DOS 2000 de) ACCESS.COM (Lotus Symphony 1)
->>>24 default x \bCOM)
-!:mime application/x-dosexec
-!:ext com
-# DOS executable with JuMP 16-bit and without nil padding
->>56 quad !0
-# https://wiki.syslinux.org/wiki/index.php?title=Doc/comboot
-# TODO: HOWTO distinguish COMboot from pure DOS executables?
-# look for unreliable Syslinux specific api call INTerrupt 22h for 16-bit COMBOOT program
->>>1 search/0xc088 \xcd\x22 \bCOM or COMBOOT 16-bit)
-!:mime application/x-dosexec
-# like: sbm.cbt command.com (Windows XP) UNI2ASCI.COM (FreeDOS 1.2)
-!:ext com/cbt
->>>1 default x \bCOM)
-!:mime application/x-dosexec
-!:ext com
-# DOS executable without JuMP 16-bit instruction
->0 byte !0xE9
-# SCREATE.SYS https://en.wikipedia.org/wiki/Stac_Electronics
->>10 string =?STACVOL \bSCREATE.SYS)
-!:mime application/x-dosexec
-!:ext sys
-# COM executable without JuMP 16-bit instruction and not SCREATE.SYS
->>10 string !?STACVOL \bCOM)
-!:mime application/x-dosexec
-!:ext com
->6 string SFX\ of\ LHarc \b, %s
->0x1FE leshort 0xAA55 \b, boot code
->85 string UPX \b, UPX compressed
->4 string \ $ARX \b, ARX self-extracting archive
->4 string \ $LHarc \b, LHarc self-extracting archive
->0x20e string SFX\ by\ LARC \b, LARC self-extracting archive
-# like: E30ODI.COM MADGEODI.COM UNI2ASCI.COM RECOVER.COM (DOS 2) COMMAND.COM (DOS 2)
->1 search/0xc088 \xcd\x22 \b, maybe with interrupt 22h
->0 ubelong x \b, start instruction %#8.8x
-# show more instructions but not in samples like: rem.com (DJGPP)
->4 ubelong x %8.8x
-
-# JMP 8bit
-0 byte 0xeb
-# byte 0xeb conflicts with magic leshort 0xn2eb of "SYMMETRY i386" handled by ./sequent
-# allow forward jumps only
->1 byte >-1
-# that offset must be accessible
-# with hexadecimal values like: 0e 2e 50 8c 8d ba bc bd be e8 fb fc
->>(1.b+2) byte x
-# if look like COM executable with x86 boot signature then this
-# implies FAT volume with x86 real mode code already handled by ./filesystems
-#
-# No x86 boot signature implies often DOS executable
-# check for unrealistic high number of FATs. Then it is an unusual disk image or often a DOS executable
-# like: FIXBIOS.COM (50 bytes)
->>>16 ubyte >3
-# https://www.drivedroid.io/
-# skip MBR disk image drivedroid.img version 12 July 2013 by start message
->>>>2 string !DriveDroid
-# ftp://old-dos.ru/OSCollect/OS/MS-DOS/Final Releases/
-# skip unusual floppy image disk1.img of MS-DOS 1.25 (Corona Data Systems OEM)
-# by check for characteristic message text near the beginning
->>>>>15 string !Non\040System\040disk
-# "ftp://old-dos.ru/OSCollect/OS/BeOS/BeOS 4.0.rar"
-# skip BeOS 4 bootfloppy.img done as "Linux kernel x86 boot executable" by ./linux
-# by check for characteristic message text near the beginning
->>>>>>6 string !read\040error\015
-# https://github.com/ventoy/Ventoy/releases/download/v1.0.78/ventoy-1.0.78-windows.zip
-# skip ventoy 1.0.78 boot_hybrid.img
->>>>>>>24 string !\220\220\353I$\022\017
-# "ftp://old-dos.ru/OSCollect/OS/MS-DOS/Final Releases/PC-DOS 1.0 (5.25).rar"
-# skip unusual floppy image PCDOS100.IMG of DOS 1.0
-# by check for characteristic message text near the beginning
->>>>>>>>9 string !7-May-81
-# "ftp://old-dos.ru/OSCollect/OS/BeOS/BeOS 5.0 Personal (BA).rar"
-# skip BeOS 5 floppy_1.44.00.ima done as "DOS/MBR boot sector" by ./filesystems
-# by check for characteristic message near the beginning
->>>>>>>>>3 string !\370sdfS\270
-# like: FIXBIOS.COM (50 bytes)
->>>>>>>>>>0 use msdos-com
-# check for unrealistic low number of FATs. Then it is an unusual FAT disk image or often a DOS executable
-# like: DEVICE.COM INSTALL.COM (GAG 4.10) WORD.COM (Word 1.15)
->>>16 ubyte =0
-# if low FATs with x86 boot signature it can be unusual disk image like: boot.img (Ventoy 1.0.27) geodspms.img (Syslinux)
->>>>0x1FE leshort =0xAA55
->>>>0x1FE default x
-# https://thestarman.pcministry.com/tool/hxd/dimtut.htm
-# skip unusual floppy image TK-DOS11.img IBMDOS11.img of IBM DOS 1.10
-# by check for characteristic bootloader names near end of boot sector
->>>>>395 string !ibmbio\040\040com
->>>>>>0 use msdos-com
-# 8-bit jump with valid number of FAT implies FAT volume already handled by ./filesystems
-# like: balder.img
->>>16 default x
-# skip disk images with boot signature at end of 1st sector
-# like: TDSK-64b.img
->>>>(11.s-2) uleshort !0xAA55
-# skip unusual floppy image without boot signature like 360k-256.img (mtools 4.0.18)
-# by check for characteristic file system type text for FAT (12 bit or 16 bit)
->>>>>54 string !FAT
-# "ftp://old-dos.ru/OSCollect/OS/MS-DOS/Final Releases/Microsoft MS-DOS 3.31 (Compaq OEM) (3.5).rar"
-# skip unusual floppy image Disk4.img without boot signature and file system type text
-# by check for characteristic OEM-ID text
->>>>>>3 string !COMPAQ\040\040
-# no such DOS COM executables found
->>>>>>>0 use msdos-com
-# JMP 16bit
-0 byte 0xe9
-# 16-bit offset; for DEBUGGING!; can be negative like: USBDRIVE.COM
-#>1 leshort x \b, OFFSET %d
-# forward jumps
->1 leshort >-1
-# that offset must be accessible
-# with hexadecimal values like: 06 1e 0e 2e 60 8c 8d b4 ba be e8 fc
->>(1.s+3) byte x
-# check for unrealistic high number of FATs. Then it is not a disk image and it is a DOS executable
-# like: CALLVER.COM CPUCACHE.COM K437_EUR.COM SHSUCDX.COM UMBFILL.COM (183 bytes)
->>>16 ubyte >3
->>>>0 use msdos-com
-# check for unrealistic low number of FATs. Then it is not a disk image and it is a DOS executable
-# like: GAG.COM DRMOUSE.COM NDN.COM CPQ0TD.DRV
->>>16 ubyte =0
->>>>0 use msdos-com
-# maybe disc image with valid number of FATs or DOS executable
-# like: IPXODI.COM PERUSE.COM TASKID.COM
->>>16 default x
-# invalid low media descriptor. Then it is not a disk image and it is a DOS executable
->>>>21 ubyte <0xE5
->>>>>0 use msdos-com
-# valid media descriptor. Then it is maybe disk image or DOS executable
->>>>21 ubyte >0xE4
-# invalid sectorsize not a power of 2 from 32-32768. Then it is not a disk image and it must be DOS executable
-# like: LEARN.COM (Word 1.15)
->>>>>11 uleshort&0x001f !0
->>>>>>0 use msdos-com
-# negative offset, must not lead into PSP
-# like: BASICA.COM (PC dos 3.20) FORMAT.COM SMC8100.COM WORD.COM (word4)
-# HIDSUPT1.COM USBDRIVE.COM USBSUPT1.COM USBUHCI.COM (FreeDOS USBDOS)
->1 leshort <-259
-# that offset must be accessible
-# add 10000h to jump at end of 64 KiB segment, add 1 for jump instruction and 2 for 16-bit offset
->>(1,s+65539) byte x
-# after jump next instruction for DEBUGGING!
-#>>>&-1 ubelong x \b, NEXT instruction %#8.8x
->>>0 use msdos-com
-
-# updated by Joerg Jenderek at Oct 2008,2015,2022
-# following line is too general
-0 ubyte 0xb8
-# skip 2 linux kernels like memtest.bin with "\xb8\xc0\x07\x8e" in ./linux
->0 string !\xb8\xc0\x07\x8e
-# modified by Joerg Jenderek
-# syslinux COM32 or COM32R executable
->>1 lelong&0xFFFFFFFe 0x21CD4CFe COM executable (32-bit COMBOOT
-# https://www.syslinux.org/wiki/index.php/Comboot_API
-# Since version 5.00 c32 modules switched from the COM32 object format to ELF
-!:mime application/x-c32-comboot-syslinux-exec
-!:ext c32
-# https://syslinux.zytor.com/comboot.php
-# older syslinux version ( <4 )
-# (32-bit COMBOOT) programs *.C32 contain 32-bit code and run in flat-memory 32-bit protected mode
-# start with assembler instructions mov eax,21cd4cffh
->>>1 lelong 0x21CD4CFf \b)
-# syslinux:doc/comboot.txt
-# A COM32R program must start with the byte sequence B8 FE 4C CD 21 (mov
-# eax,21cd4cfeh) as a magic number.
-# syslinux version (4.x)
-# "COM executable (COM32R)" or "Syslinux COM32 module" by TrID
->>>1 lelong 0x21CD4CFe \b, relocatable)
->>1 default x
-# look for interrupt instruction like in rem.com (DJGPP) LOADER.COM (DR-DOS 7.x)
->>>3 search/118 \xCD
-# FOR DEBUGGING; possible hexadecimal interrupt number like: 10~BANNER.COM 13~bcdw_cl.com 15~poweroff.com (Syslinux)
-# 1A~BERNDPCI.COM 20~SETENHKB.COM 21~mostly 22~gfxboot.com (Syslinux) 2F~SHUTDOWN.COM (GEMSYS)
-#>>>>&0 ubyte x \b, INTERUPT %#x
-# few examples with interrupt 0x13 instruction
->>>>&0 ubyte =0x13
-# FOR DEBUGGING!
-#>>>>>3 ubequad x \b, 2nd INSTRUCTION %#16.16llx
-# skip Gpt.com Mbr.com (edk2-UDK2018 bootsector) described as "DOS/MBR boot sector" by ./filesystems
-# by check for assembler instructions: mov es,ax ; mov ax,07c0h ; mov ds,ax
->>>>>3 ubequad !0x8ec0b8c0078ed88d
-# few COM executables with interrupt 0x13 instruction like: Bootable CD Wizard executables bcdw_cl.com fdemuoff.com
-# http://bootcd.narod.ru/bcdw150z_en.zip
->>>>>>0 use msdos-com
-# few examples with interrupt 0x16 instruction like flashimg.img
->>>>&0 ubyte =0x16
-# skip Syslinux 3.71 flashimg.img done as "DOS/MBR boot sector" by ./filesystems
-# by check for assembler instructions: cmp ax 0xE4E4 (magic); jnz
->>>>>8 ubelong !0x3DE4E475
-# no DOS executable with interrupt 0x16 found
->>>>>>0 use msdos-com
-# most examples with interrupt instruction unequal 0x13 and 0x16
->>>>&0 default x
-#>>>>>&-1 ubyte x \b, INTERUPT %#x
-# like: LOADER.COM SETENHKB.COM banner.com copybs.com gif2raw.com poweroff.com rem.com
->>>>>0 use msdos-com
-# few COM executables without interrupt instruction like RESTART.COM (DOS 7.10) REBOOT.COM
-# or some EUC-KR text files or one Ulead Imaginfo thumbnail
->>>3 default x
-# FOR DEBUGGING; 2nd instruction like 0x50 (RESTART.COM) 0x8e (REBOOT.COM)
-# or random like: 0x0 (IMAGINFO.PE3 sky_snow) 0xb1 (euckr_.txt)
-#>>>>3 ubyte x \b, 2nd INSTRUCTION %#x
-# skip 1 Ulead Imaginfo thumbnail (IMAGINFO.PE3 sky_snow)
-# inside SAMPLES/TEXTURES/SKY_SNOW
-# from https://archive.org/download/PI3CANON/PI3CANON.iso
->>>>3 ubyte !0x0
-# skip some EUC-KR text files like: euckr_falsepositive.txt
-# https://bugs.astron.com/view.php?id=186
->>>>>3 ubyte !0xb1
-# like: RESTART.COM (DOS 7.10) REBOOT.COM
->>>>>>0 use msdos-com
-
-# URL: https://en.wikipedia.org/wiki/UPX
-# Reference: https://github.com/upx/upx/archive/v3.96.zip/upx-3.96/
-# src/stub/src/i086-dos16.com.S
-# Update: Joerg Jenderek
-# assembler instructions: cmp sp, offset sp_limit
-0 string/b \x81\xfc
-#>2 uleshort x \b, sp_limit=%#x
-# assembler instructions: jump above +2; int 0x20; mov cx, offset bytes_to_copy
->4 string \x77\x02\xcd\x20\xb9
-#>9 uleshort x \b, [bytes_to_copy]=%#x
-# at different offsets assembler instructions: push di; jump decomp_start_n2b
->0x1e search/3 \x57\xe9
-#>>&0 uleshort x \b, decomp_start_n2b=%#x
-# src/stub/src/include/header.S; UPX_MAGIC_LE32
->>&2 string UPX! FREE-DOS executable (COM), UPX
-!:mime application/x-dosexec
-# UPX compressed *.CPI; See ./fonts
->>>&21 string =FONT compressed DOS code page font
-!:ext cpx
->>>&21 string !FONT compressed
-!:ext com
-# compressed size?
-#>>>&14 uleshort+152 x \b, %u bytes
-# uncompressed len
->>>&12 uleshort x \b, uncompressed %u bytes
-252 string Must\ have\ DOS\ version DR-DOS executable (COM)
-!:mime application/x-dosexec
-!:ext com
-# GRR search is not working
-#2 search/28 \xcd\x21 COM executable for MS-DOS
-#WHICHFAT.cOM
-2 string \xcd\x21 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-#DELTREE.cOM DELTREE2.cOM
-4 string \xcd\x21 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-#IFMEMDSK.cOM ASSIGN.cOM COMP.cOM
-5 string \xcd\x21 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-#DELTMP.COm HASFAT32.cOM
-7 string \xcd\x21
->0 byte !0xb8 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-#COMP.cOM MORE.COm
-10 string \xcd\x21
->5 string !\xcd\x21 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-#comecho.com
-13 string \xcd\x21 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-#HELP.COm EDIT.coM
-18 string \xcd\x21
-# not printable before it?
->17 byte >32
->>17 byte <126
->>17 default x COM executable for MS-DOS
-!:mime application/x-dosexec
-!:ext com
-#NWRPLTRM.COm
-23 string \xcd\x21 COM executable for MS-DOS
-!:mime application/x-dosexec
-!:ext com
-#LOADFIX.cOm LOADFIX.cOm
-30 string \xcd\x21 COM executable for MS-DOS
-!:mime application/x-dosexec
-!:ext com
-#syslinux.com 3.11
-70 string \xcd\x21 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
-# many compressed/converted COMs start with a copy loop instead of a jump
-0x6 search/0xa \xfc\x57\xf3\xa5\xc3 COM executable for MS-DOS
-!:mime application/x-dosexec
-!:ext com
-0x6 search/0xa \xfc\x57\xf3\xa4\xc3 COM executable for DOS
-!:mime application/x-dosexec
-!:ext com
->0x18 search/0x10 \x50\xa4\xff\xd5\x73 \b, aPack compressed
-0x3c string W\ Collis\0\0 COM executable for MS-DOS, Compack compressed
-!:mime application/x-dosexec
-!:ext com
-# FIXME: missing diet .com compression
-
-# miscellaneous formats
-0 string/b LZ MS-DOS executable (built-in)
-#0 byte 0xf0 MS-DOS program library data
-#
-
-# AAF files:
-# <stuartc@rd.bbc.co.uk> Stuart Cunningham
-0 string/b \320\317\021\340\241\261\032\341AAFB\015\000OM\006\016\053\064\001\001\001\377 AAF legacy file using MS Structured Storage
->30 byte 9 (512B sectors)
->30 byte 12 (4kB sectors)
-0 string/b \320\317\021\340\241\261\032\341\001\002\001\015\000\002\000\000\006\016\053\064\003\002\001\001 AAF file using MS Structured Storage
->30 byte 9 (512B sectors)
->30 byte 12 (4kB sectors)
-
-# Popular applications
-#
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/DOC
-# Reference: https://web.archive.org/web/20170206041048/
-# http://www.msxnet.org/word2rtf/formats/ffh-dosword5
-# wIdent+dty
-0 belong 0x31be0000
-# skip droid skeleton like x-fmt-274-signature-id-488.doc
->128 ubyte >0 Microsoft
->>96 uleshort =0 Word
-!:mime application/msword
-!:apple MSWDWDBN
-# DCX is used in the Unix version.
-!:ext doc/dcx
->>>0x6E ulequad =0 1.0-4.0
->>>0x6E ulequad !0 5.0-6.0
->>>0x6E ulequad x (DOS) Document
-# https://web.archive.org/web/20130831064118/http://msxnet.org/word2rtf/formats/write.txt
->>96 uleshort !0 Write 3.0 (Windows) Document
-!:mime application/x-mswrite
-!:apple MSWDWDBN
-# sometimes also doc like in splitter.doc srchtest.doc
-!:ext wri/doc
-# wTool must be 0125400 octal
-#>>4 uleshort !0xAB00 \b, wTool %o
-# reserved; must be zero
-#>>6 ulelong !0 \b, reserved %u
-# block pointer to the block containing optional file manager information
-#>>0x1C uleshort x \b, at %#x info block
-# jump to File manager information block
->>(0x1C.s*128) uleshort x
-# test for valid information start; maybe also 0012h
->>>&-2 uleshort =0x0014
-# Document ASCIIZ name
->>>>&0x12 string x %s
-# author name
->>>>>&1 string x \b, author %s
-# reviser name
->>>>>>&1 string x \b, reviser %s
-# keywords
->>>>>>>&1 string x \b, keywords %s
-# comment
->>>>>>>>&1 string x \b, comment %s
-# version number
->>>>>>>>>&1 string x \b, version %s
-# date of last change MM/DD/YY
->>>>>>>>>>&1 string x \b, %-.8s
-# creation date MM/DD/YY
->>>>>>>>>>&9 string x created %-.8s
-# file name of print format like NORMAL.STY
->>0x1E string >0 \b, formatted by %-.66s
-# count of pages in whole file for write variant; maybe some times wrong
->>96 uleshort >0 \b, %u pages
-# name of the printer driver like HPLASMS
->>0x62 string >0 \b, %-.8s printer
-# number of blocks used in the file; seems to be 0 for Word 4.0 and Write 3.0
->>0x6A uleshort >0 \b, %u blocks
-# bit field for corrected text areas
-#>>0x6C uleshort x \b, %#x bit field
-# text of document; some times start with 4 non printable characters like CR LF
->>128 ubyte x \b,
->>>128 ubyte >0x1F
->>>>128 string x %s
->>>128 ubyte <0x20
->>>>129 ubyte >0x1F
->>>>>129 string x %s
->>>>129 ubyte <0x20
->>>>>130 ubyte >0x1F
->>>>>>130 string x %s
->>>>>130 ubyte <0x20
->>>>>>131 ubyte >0x1F
->>>>>>>131 string x %s
->>>>>>131 ubyte <0x20
->>>>>>>132 ubyte >0x1F
->>>>>>>>132 string x %s
->>>>>>>132 ubyte <0x20
->>>>>>>>133 ubyte >0x1F
->>>>>>>>>133 string x %s
-#
-0 string/b PO^Q` Microsoft Word 6.0 Document
-!:mime application/msword
-#
-4 long 0
->0 belong 0xfe320000 Microsoft Word for Macintosh 1.0
-!:mime application/msword
-!:ext mcw
->0 belong 0xfe340000 Microsoft Word for Macintosh 3.0
-!:mime application/msword
-!:ext mcw
->0 belong 0xfe37001c Microsoft Word for Macintosh 4.0
-!:mime application/msword
-!:ext mcw
->0 belong 0xfe370023 Microsoft Word for Macintosh 5.0
-!:mime application/msword
-!:ext mcw
-
-0 string/b \333\245-\0\0\0 Microsoft Word 2.0 Document
-!:mime application/msword
-!:ext doc
-# Note: seems already recognized as "OLE 2 Compound Document" in ./ole2compounddocs
-#512 string/b \354\245\301 Microsoft Word Document
-#!:mime application/msword
-
-#
-0 string/b \xDB\xA5\x2D\x00 Microsoft WinWord 2.0 Document
-!:mime application/msword
-#
-0 string/b \xDB\xA5\x2D\x00 Microsoft WinWord 2.0 Document
-!:mime application/msword
-
-#
-0 string/b \x09\x04\x06\x00\x00\x00\x10\x00 Microsoft Excel Worksheet
-!:mime application/vnd.ms-excel
-# https://www.macdisk.com/macsigen.php
-!:apple XCELXLS4
-!:ext xls
-#
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Lotus_1-2-3
-# Reference: http://www.aboutvb.de/bas/formate/pdf/wk3.pdf
-# Note: newer Lotus versions >2 use longer BOF record
-# record type (BeginningOfFile=0000h) + length (001Ah)
-0 belong 0x00001a00
-# reserved should be 0h but 8c0dh for TUTMAC.WK3, 5h for SAMPADNS.WK3, 1h for a_readme.wk3, 1eh for K&G86.WK3
-#>18 uleshort&0x73E0 0
-# Lotus Multi Byte Character Set (LMBCS=1-31)
->20 ubyte >0
->>20 ubyte <32 Lotus 1-2-3
-#!:mime application/x-123
-!:mime application/vnd.lotus-1-2-3
-!:apple ????L123
-# (version 5.26) labeled the entry as "Lotus 1-2-3 wk3 document data"
->>>4 uleshort 0x1000 WorKsheet, version 3
-!:ext wk3
-# (version 5.26) labeled the entry as "Lotus 1-2-3 wk4 document data"
->>>4 uleshort 0x1002 WorKsheet, version 4
-# also worksheet template 4 (.wt4)
-!:ext wk4/wt4
-# no example or documentation for wk5
-#>>4 uleshort 0x???? WorKsheet, version 4
-#!:ext wk5
-# only MacrotoScript.123 example
->>>4 uleshort 0x1003 WorKsheet, version 97
-# also worksheet template Smartmaster (.12M)?
-!:ext 123
-# only Set_Y2K.123 example
->>>4 uleshort 0x1005 WorKsheet, version 9.8 Millennium
-!:ext 123
-# no example for this version
->>>4 uleshort 0x8001 FoRMatting data
-!:ext frm
-# (version 5.26) labeled the entry as "Lotus 1-2-3 fm3 or fmb document data"
-# TrID labeles the entry as "Formatting Data for Lotus 1-2-3 worksheet"
->>>4 uleshort 0x8007 ForMatting data, version 3
-!:ext fm3
->>>4 default x unknown
-# file revision sub code 0004h for worksheets
->>>>6 uleshort =0x0004 worksheet
-!:ext wXX
->>>>6 uleshort !0x0004 formatting data
-!:ext fXX
-# main revision number
->>>>4 uleshort x \b, revision %#x
->>>6 uleshort =0x0004 \b, cell range
-# active cellcoord range (start row, page,column ; end row, page, column)
-# start values normally 0~1st sheet A1
->>>>8 ulelong !0
->>>>>10 ubyte >0 \b%d*
->>>>>8 uleshort x \b%d,
->>>>>11 ubyte x \b%d-
-# end page mostly 0
->>>>14 ubyte >0 \b%d*
-# end raw, column normally not 0
->>>>12 uleshort x \b%d,
->>>>15 ubyte x \b%d
-# Lotus Multi Byte Character Set (1~cp850,2~cp851,...,16~japan,...,31~??)
->>>>20 ubyte >1 \b, character set %#x
-# flags
->>>>21 ubyte x \b, flags %#x
->>>6 uleshort !0x0004
-# record type (FONTNAME=00AEh)
->>>>30 search/29 \0\xAE
-# variable length m (2) + entries (1) + ?? (1) + LCMBS string (n)
->>>>>&4 string >\0 \b, 1st font "%s"
-#
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Lotus_1-2-3
-# Reference: http://www.schnarff.com/file-formats/lotus-1-2-3/WSFF2.TXT
-# Note: Used by both old Lotus 1-2-3 and Lotus Symphony (DOS) til version 2.x
-# record type (BeginningOfFile=0000h) + length (0002h)
-0 belong 0x00000200
-# GRR: line above is too general as it catches also MS Windows CURsor
-# to display MS Windows cursor (strength=70) before Lotus 1-2-3 (strength=70-1)
-!:strength -1
-# skip Windows cursors with image height <256 and keep Lotus with low opcode 0001-0083h
->7 ubyte 0
-# skip Windows cursors with image width 256 and keep Lotus with positive opcode
->>6 ubyte >0 Lotus
-# !:mime application/x-123
-!:mime application/vnd.lotus-1-2-3
-!:apple ????L123
-# revision number (0404h = 123 1A, 0405h = Lotus Symphony , 0406h = 123 2.x wk1 , 8006h = fmt , ...)
-# undocumented; (version 5.26) labeled the configurations as "Lotus 1-2-3"
->>>4 uleshort 0x0007 1-2-3 CoNFiguration, version 2.x (PGRAPH.CNF)
-!:ext cnf
->>>4 uleshort 0x0C05 1-2-3 CoNFiguration, version 2.4J
-!:ext cnf
->>>4 uleshort 0x0801 1-2-3 CoNFiguration, version 1-2.1
-!:ext cnf
->>>4 uleshort 0x0802 Symphony CoNFiguration
-!:ext cnf
->>>4 uleshort 0x0804 1-2-3 CoNFiguration, version 2.2
-!:ext cnf
->>>4 uleshort 0x080A 1-2-3 CoNFiguration, version 2.3-2.4
-!:ext cnf
->>>4 uleshort 0x1402 1-2-3 CoNFiguration, version 3.x
-!:ext cnf
->>>4 uleshort 0x1450 1-2-3 CoNFiguration, version 4.x
-!:ext cnf
-# (version 5.26) labeled the entry as "Lotus 123"
-# TrID labeles the entry as "Lotus 123 Worksheet (generic)"
->>>4 uleshort 0x0404 1-2-3 WorKSheet, version 1
-# extension "wks" also for Microsoft Works document
-!:ext wks
-# (version 5.26) labeled the entry as "Lotus 123"
-# TrID labeles the entry as "Lotus 123 Worksheet (generic)"
->>>4 uleshort 0x0405 Symphony WoRksheet, version 1.0
-!:ext wrk/wr1
-# (version 5.26) labeled the entry as "Lotus 1-2-3 wk1 document data"
-# TrID labeles the entry as "Lotus 123 Worksheet (V2)"
->>>4 uleshort 0x0406 1-2-3/Symphony worksheet, version 2
-# Symphony (.wr1)
-!:ext wk1/wr1
-# no example for this japan version
->>>4 uleshort 0x0600 1-2-3 WorKsheet, version 1.xJ
-!:ext wj1
-# no example or documentation for wk2
-#>>>4 uleshort 0x???? 1-2-3 WorKsheet, version 2
-#!:ext wk2
-# undocumented japan version
->>>4 uleshort 0x0602 1-2-3 worksheet, version 2.4J
-!:ext wj3
-# (version 5.26) labeled the entry as "Lotus 1-2-3 fmt document data"
->>>4 uleshort 0x8006 1-2-3 ForMaTting data, version 2.x
-# japan version 2.4J (fj3)
-!:ext fmt/fj3
-# no example for this version
->>>4 uleshort 0x8007 1-2-3 FoRMatting data, version 2.0
-!:ext frm
-# (version 5.26) labeled the entry as "Lotus 1-2-3"
->>>4 default x unknown worksheet or configuration
-!:ext cnf
->>>>4 uleshort x \b, revision %#x
-# 2nd record for most worksheets describes cells range
->>>6 use lotus-cells
-# 3rd record for most japan worksheets describes cells range
->>>(8.s+10) use lotus-cells
-# check and then display Lotus worksheet cells range
-0 name lotus-cells
-# look for type (RANGE=0006h) + length (0008h) at record begin
->0 ubelong 0x06000800 \b, cell range
-# cell range (start column, row, end column, row) start values normally 0,0~A1 cell
->>4 ulong !0
->>>4 uleshort x \b%d,
->>>6 uleshort x \b%d-
-# end of cell range
->>8 uleshort x \b%d,
->>10 uleshort x \b%d
-# EndOfLotus123
-0 string/b WordPro\0 Lotus WordPro
-!:mime application/vnd.lotus-wordpro
-0 string/b WordPro\r\373 Lotus WordPro
-!:mime application/vnd.lotus-wordpro
-
-
-# Summary: Script used by InstallScield to uninstall applications
-# Extension: .isu
-# Submitted by: unknown
-# Modified by (1): Abel Cheung <abelcheung@gmail.com> (replace useless entry)
-0 string \x71\xa8\x00\x00\x01\x02
->12 string Stirling\ Technologies, InstallShield Uninstall Script
-
-# Winamp .avs
-#0 string Nullsoft\ AVS\ Preset\ \060\056\061\032 A plug in for Winamp ms-windows Freeware media player
-0 string/b Nullsoft\ AVS\ Preset\ Winamp plug in
-
-# Windows Metafile .WMF
-# URL: http://fileformats.archiveteam.org/wiki/Windows_Metafile
-# http://en.wikipedia.org/wiki/Windows_Metafile
-# Reference: https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-WMF/%5bMS-WMF%5d.pdf
-# http://mark0.net/download/triddefs_xml.7z/defs/w/wmf.trid.xml
-# Note: called "Windows Metafile" by TrID and
-# verified by ImageMagick `identify -verbose *.wmf` as WMF (Windows Meta File)
-# META_PLACEABLE Record (Aldus Placeable Metafile signature)
-0 string/b \327\315\306\232
-# Note: called "Windows Metafile Image with Placeable File Header" by DROID via PUID x-fmt/119
-# and verified by XnView `nconvert -info abydos.wmf SPA_FLAG.wmf hardcopy-windows-meta.wmf` as "Windows Placeable metafile"
-# skip failed libreoffice-7.3.2.2 ofz35149-1.wmf with invalid version 2020h and exttextout-2.wmf with invalid version 3a02h
-# and x-fmt-119-signature-id-609.wmf without version instead of 0100h=METAVERSION100 or 0300h=METAVERSION300
->26 uleshort&0xFDff =0x0100 Windows metafile
-# HWmf; resource handle to the metafile; When the metafile is on disk, this field MUST contain 0
-# seems to be always true but in failed samples 2020h ofz35149-1.wmf 56f8h exttextout-2.wmf
->>4 uleshort !0 \b, resource handle %#x
-# BoundingBox; the rectangle in the playback context measured in logical units for displaying
-# sometimes useful like: hardcopy-windows-meta.wmf (0,0 / 1280,1024)
-# but garbage in x-fmt-119-signature-id-609.wmf (-21589,-21589 / -21589,-21589)
-#>>6 ubequad x \b, bounding box %#16.16llx
-# Left; x-coordinate of the upper-left corner of the rectangle
->>6 leshort x \b, bounding box (%d
-# Top; y-coordinate upper-left corner
->>8 leshort x \b,%d
-# Right; x-coordinate lower-right corner
->>10 leshort x / %d
-# Bottom; y-coordinate lower-right corner
->>12 leshort x \b,%d)
-# Inch; number of logical units per inch like: 72 96 575 576 1000 1200 1439 1440 2540
->>14 uleshort x \b, dpi %u
-# Reserved; field is not used and MUST be set to 0; but ababababh in x-fmt-119-signature-id-609.wmf
->>16 ulelong !0 \b, reserved %#x
-# Checksum; checksum for the previous 10 words
->>20 uleshort x \b, checksum %#x
-# META_HEADER Record after META_PLACEABLE Record
->>22 use wmf-head
-# GRR: no example for type 2 (DISKMETAFILE) variant found under few thousands WMF
-0 string/b \002\000\011\000 Windows metafile
->0 use wmf-head
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/w/wmf-16.trid.xml
-# Note: called "Windows Metafile (old Win 3.x format)" by TrID and
-# "Windows Metafile Image without Placeable File Header" by DROID via PUID x-fmt/119
-# verified by XnView `nconvert -info *.wmf` as Windows metafile
-# variant with type=1=MEMORYMETAFILE and valid HeaderSize 9
-0 string/b \001\000\011\000
-# skip DROID x-fmt-119-signature-id-1228.wmf by looking for content after header (18 bytes=2*011)
->18 ulelong >0 Windows metafile
-# GRR: in version 5.44 unequal and not endian variant not working!
-#>18 ulelong !0 THIS_SHOULD_NOT_HAPPEN
-#>18 long !0 THIS_SHOULD_NOT_HAPPEN
->>0 use wmf-head
-# display information of Windows metafile header (type, size, objects)
-0 name wmf-head
-# MetafileType: 0001h=MEMORYMETAFILE~Metafile is stored in memory 0002h=DISKMETAFILE~Metafile is stored on disk
->0 uleshort !0x0001 \b, type %#x
-# HeaderSize; the number of WORDs in header record; seems to be always 9 (18 bytes)
->2 uleshort*2 !18 \b, header size %u
-# MetafileVersion: 0100h=METAVERSION100~DIBs (device-independent bitmaps) not supported 0300h=METAVERSION300~DIBs are supported
-# but in failed samples 2020h ofz35149-1.wmf 3a02h exttextout-2.wmf
->4 uleshort =0x0100 \b, DIBs not supported
->4 uleshort =0x0300
-#>4 uleshort =0x0300 \b, DIBs supported
-# this should not happen!
->4 default x \b, version
->>4 uleshort x %#x
-# Size; the number of WORDs in the entire metafile
->6 ulelong x \b, size %u words
-#>6 ulelong*2 x \b, size %u bytes
-!:mime image/wmf
-!:ext wmf
-# NumberOfObjects: the number of graphics objects like: 0 hardcopy-windows-meta.wmf 1 2 3 4 5 6 7 8 9 12 13 14 16 17 20 27 110 PERSGRID.WMF
->10 uleshort x \b, %u objects
-# MaxRecord: the size of the largest record in the metafile in WORDs like: 78h b0h 1f4h 310h 63fh 1e0022h 3fcc21h
->12 ulelong x \b, largest record size %#x
-# NumberOfMembers: It SHOULD be 0x0000, but 5 TestBitBltStretchBlt.wmf 13 TestPalette.wmf and in failed samples 4254 bitcount-1.wmf 8224 ofz5942-1.wmf 56832 exttextout-2.wmf
->16 uleshort !0 \b, %u members
-
-#tz3 files whatever that is (MS Works files)
-0 string/b \003\001\001\004\070\001\000\000 tz3 ms-works file
-0 string/b \003\002\001\004\070\001\000\000 tz3 ms-works file
-0 string/b \003\003\001\004\070\001\000\000 tz3 ms-works file
-
-# PGP sig files .sig
-#0 string \211\000\077\003\005\000\063\237\127 065 to \027\266\151\064\005\045\101\233\021\002 PGP sig
-0 string \211\000\077\003\005\000\063\237\127\065\027\266\151\064\005\045\101\233\021\002 PGP sig
-0 string \211\000\077\003\005\000\063\237\127\066\027\266\151\064\005\045\101\233\021\002 PGP sig
-0 string \211\000\077\003\005\000\063\237\127\067\027\266\151\064\005\045\101\233\021\002 PGP sig
-0 string \211\000\077\003\005\000\063\237\127\070\027\266\151\064\005\045\101\233\021\002 PGP sig
-0 string \211\000\077\003\005\000\063\237\127\071\027\266\151\064\005\045\101\233\021\002 PGP sig
-0 string \211\000\225\003\005\000\062\122\207\304\100\345\042 PGP sig
-
-# windows zips files .dmf
-0 string/b MDIF\032\000\010\000\000\000\372\046\100\175\001\000\001\036\001\000 MS Windows special zipped file
-
-# Windows icons
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/CUR_(file_format)
-# Note: similar to Windows CURsor. container for BMP (only DIB part) or PNG
-0 belong 0x00000100
->9 byte 0
->>0 byte x
->>0 use cur-ico-dir
->9 ubyte 0xff
->>0 byte x
->>0 use cur-ico-dir
-# displays number of icons and information for icon or cursor
-0 name cur-ico-dir
-# skip some Lotus 1-2-3 worksheets, CYCLE.PIC and keep Windows cursors with
-# 1st data offset = dir header size + n * dir entry size = 6 + n * 10h = ?6h
->18 ulelong &0x00000006
-# skip remaining worksheets, because valid only for DIB image (40) or PNG image (\x89PNG)
->>(18.l) ulelong x MS Windows
->>>0 ubelong 0x00000100 icon resource
-# https://www.iana.org/assignments/media-types/image/vnd.microsoft.icon
-!:mime image/vnd.microsoft.icon
-#!:mime image/x-icon
-!:ext ico
->>>>4 uleshort x - %d icon
-# plural s
->>>>4 uleshort >1 \bs
-# 1st icon
->>>>0x06 use ico-entry
-# 2nd icon
->>>>4 uleshort >1
->>>>>0x16 use ico-entry
->>>0 ubelong 0x00000200 cursor resource
-#!:mime image/x-cur
-!:mime image/x-win-bitmap
-!:ext cur
->>>>4 uleshort x - %d icon
->>>>4 uleshort >1 \bs
-# 1st cursor
->>>>0x06 use cur-entry
-#>>>>0x16 use cur-entry
-# display information of one cursor entry
-0 name cur-entry
->0 use cur-ico-entry
->4 uleshort x \b, hotspot @%dx
->6 uleshort x \b%d
-# display information of one icon entry
-0 name ico-entry
->0 use cur-ico-entry
-# normally 0 1 but also found 14
->4 uleshort >1 \b, %d planes
-# normally 0 1 but also found some 3, 4, some 6, 8, 24, many 32, two 256
->6 uleshort >1 \b, %d bits/pixel
-# display shared information of cursor or icon entry
-0 name cur-ico-entry
->0 byte =0 \b, 256x
->0 byte !0 \b, %dx
->1 byte =0 \b256
->1 byte !0 \b%d
-# number of colors in palette
->2 ubyte !0 \b, %d colors
-# reserved 0 FFh
-#>3 ubyte x \b, reserved %x
-#>8 ulelong x \b, image size %d
-# offset of PNG or DIB image
-#>12 ulelong x \b, offset %#x
-# PNG header (\x89PNG)
->(12.l) ubelong =0x89504e47
-# 1 space char after "with" to get phrase "with PNG image" by magic in ./images
->>&-4 indirect x \b with
-# DIB image
->(12.l) ubelong !0x89504e47
-#>>&-4 use dib-image
-
-# Windows non-animated cursors
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/CUR_(file_format)
-# Note: similar to Windows ICOn. container for BMP ( only DIB part)
-# GRR: line below is too general as it catches also Lotus 1-2-3 files
-0 belong 0x00000200
->9 byte 0
->>0 use cur-ico-dir
->9 ubyte 0xff
->>0 use cur-ico-dir
-
-# .chr files
-0 string/b PK\010\010BGI Borland font
->4 string >\0 %s
-# then there is a copyright notice
-
-
-# .bgi files
-0 string/b pk\010\010BGI Borland device
->4 string >\0 %s
-# then there is a copyright notice
-
-
-# Windows Recycle Bin record file (named INFO2)
-# By Abel Cheung (abelcheung AT gmail dot com)
-# Version 4 always has 280 bytes (0x118) per record, version 5 has 800 bytes
-# Since Vista uses another structure, INFO2 structure probably won't change
-# anymore. Detailed analysis in:
-# http://www.cybersecurityinstitute.biz/downloads/INFO2.pdf
-0 lelong 0x00000004
->12 lelong 0x00000118 Windows Recycle Bin INFO2 file (Win98 or below)
-
-0 lelong 0x00000005
->12 lelong 0x00000320 Windows Recycle Bin INFO2 file (Win2k - WinXP)
-
-# From Doug Lee via a FreeBSD pr
-9 string GERBILDOC First Choice document
-9 string GERBILDB First Choice database
-9 string GERBILCLIP First Choice database
-0 string GERBIL First Choice device file
-9 string RABBITGRAPH RabbitGraph file
-0 string DCU1 Borland Delphi .DCU file
-0 string =!<spell> MKS Spell hash list (old format)
-0 string =!<spell2> MKS Spell hash list
-# Too simple - MPi
-#0 string AH Halo(TM) bitmapped font file
-0 lelong 0x08086b70 TurboC BGI file
-0 lelong 0x08084b50 TurboC Font file
-
-# Debian#712046: The magic below identifies "Delphi compiled form data".
-# An additional source of information is available at:
-# http://www.woodmann.com/fravia/dafix_t1.htm
-0 string TPF0
->4 pstring >\0 Delphi compiled form '%s'
-
-# tests for DBase files moved, updated and merged to database
-
-0 string PMCC Windows 3.x .GRP file
-1 string RDC-meg MegaDots
->8 byte >0x2F version %c
->9 byte >0x2F \b.%c file
-
-# .PIF files added by Joerg Jenderek from https://smsoft.ru/en/pifdoc.htm
-# only for windows versions equal or greater 3.0
-0x171 string MICROSOFT\ PIFEX\0 Windows Program Information File
-!:mime application/x-dosexec
-!:ext pif
-#>2 string >\0 \b, Title:%.30s
->0x24 string >\0 \b for %.63s
->0x65 string >\0 \b, directory=%.64s
->0xA5 string >\0 \b, parameters=%.64s
-#>0x181 leshort x \b, offset %x
-#>0x183 leshort x \b, offsetdata %x
-#>0x185 leshort x \b, section length %x
->0x187 search/0xB55 WINDOWS\ VMM\ 4.0\0
->>&0x5e ubyte >0
->>>&-1 string <PIFMGR.DLL \b, icon=%s
-#>>>&-1 string PIFMGR.DLL \b, icon=%s
->>>&-1 string >PIFMGR.DLL \b, icon=%s
->>&0xF0 ubyte >0
->>>&-1 string <Terminal \b, font=%.32s
-#>>>&-1 string =Terminal \b, font=%.32s
->>>&-1 string >Terminal \b, font=%.32s
->>&0x110 ubyte >0
->>>&-1 string <Lucida\ Console \b, TrueTypeFont=%.32s
-#>>>&-1 string =Lucida\ Console \b, TrueTypeFont=%.32s
->>>&-1 string >Lucida\ Console \b, TrueTypeFont=%.32s
-#>0x187 search/0xB55 WINDOWS\ 286\ 3.0\0 \b, Windows 3.X standard mode-style
-#>0x187 search/0xB55 WINDOWS\ 386\ 3.0\0 \b, Windows 3.X enhanced mode-style
->0x187 search/0xB55 WINDOWS\ NT\ \ 3.1\0 \b, Windows NT-style
-#>0x187 search/0xB55 WINDOWS\ NT\ \ 4.0\0 \b, Windows NT-style
->0x187 search/0xB55 CONFIG\ \ SYS\ 4.0\0 \b +CONFIG.SYS
-#>>&06 string x \b:%s
->0x187 search/0xB55 AUTOEXECBAT\ 4.0\0 \b +AUTOEXEC.BAT
-#>>&06 string x \b:%s
-
-# Norton Guide (.NG , .HLP) files added by Joerg Jenderek from source NG2HTML.C
-# of http://www.davep.org/norton-guides/ng2h-105.tgz
-# https://en.wikipedia.org/wiki/Norton_Guides
-0 string NG\0\001
-# only value 0x100 found at offset 2
->2 ulelong 0x00000100 Norton Guide
-!:mime application/x-norton-guide
-# often like NORTON.NG but some times like NC.HLP
-!:ext ng/hlp
-# Title[40]
->>8 string >\0 "%-.40s"
-#>>6 uleshort x \b, MenuCount=%u
-# szCredits[5][66]
->>48 string >\0 \b, %-.66s
->>114 string >\0 %-.66s
-
-# URL: https://en.wikipedia.org/wiki/Norton_Commander
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/m/msg-nc-eng.trid.xml
-# From: Joerg Jenderek
-# Note: Message file is used by executable with same main name.
-# Only tested with version 5.50 (english) and 2.01 (Windows)
-0 string Abort
-# \0 or i
-#>5 ubyte x %x
-# skip ASCII Abort text by looking for error message like in NCVIEW.MSG
->6 search/7089 Non-DOS\ disk Norton Commander module message
-!:mime application/x-norton-msg
-!:ext msg
-
-# URL: http://www.antonis.de/dos/dos-tuts/mpdostip/html/nwdostip.htm
-# Reference: https://mark0.net/download/triddefs_xml.7z/defs/m/msg-netware-dos.trid.xml
-# From: Joerg Jenderek
-0 string DOS\ Client\ Message\ File: Novell DOS client message
-#!:mime application/octet-stream
-#!:mime application/x-novell-msg
-!:ext msg
-# look for second letter instead space character
->26 ubyte >0x20
-# digit 1 or often main or program name like: IPXODI.COM TASKID pnwtrap DOSRqstr
->>25 ubyte !0x20 %c
->>>26 ubyte !0x20 \b%c
->>>>27 ubyte !0x20 \b%c
->>>>>28 ubyte !0x20 \b%c
->>>>>>29 ubyte !0x20 \b%c
->>>>>>>30 ubyte !0x20 \b%c
->>>>>>>>31 ubyte !0x20 \b%c
->>>>>>>>>32 ubyte !0x20 \b%c
->>>>>>>>>>33 ubyte !0x20 \b%c
->>>>>>>>>>>34 ubyte !0x20 \b%c
->>>>>>>>>>>>35 ubyte !0x20 \b%c
->>>>>>>>>>>>>36 ubyte !0x20 \b%c
-# followed by string like: 0 v.10 V1.20
-#
-# followed by ,\040Tran
->28 search/14 ,\040Tran
-# probably translated version string like: 0 v1.00
->>&0 string x \b, tran version %s
-# followed by Ctrl-J Ctrl-Z
->>>&0 ubyte !0xa \b, terminated by %#2.2x
->>>>&0 ubyte x \b%2.2x
-# Ctrl-Z
->0x65 ubyte !0x1A \b, at 0x65 %#x
-# one
->0x66 ubyte !0x01 \b, at 0x66 %#x
-# URL: https://en.wikipedia.org/wiki/NetWare
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dat-novell-msg.trid.xml
-# ftp://ftp.iitb.ac.in/LDP/en/NLM-HOWTO/NLM-HOWTO-single.html
-# From: Joerg Jenderek
-0 string Novell\ Message\ Librarian\ Data\ File Novell message librarian data
-#>35 string Version\ 1.00
-#>49 string COPYRIGHT\ (c)\ 1985\ by\ Novell,\ Inc.
-#>83 string \ \ All\ Rights\ Reserved
-#!:mime application/octet-stream
-#!:mime application/x-novell-msg
-!:ext msg
-#!:ext msg/dat
-# 4DOS help (.HLP) files added by Joerg Jenderek from source TPHELP.PAS
-# of https://www.4dos.info/
-# pointer,HelpID[8]=4DHnnnmm
-0 ulelong 0x48443408 4DOS help file
->4 string x \b, version %-4.4s
-
-# old binary Microsoft (.HLP) files added by Joerg Jenderek from http://file-extension.net/seeker/file_extension_hlp
-0 ulequad 0x3a000000024e4c MS Advisor help file
-
-# HtmlHelp files (.chm)
-0 string/b ITSF\003\000\000\000\x60\000\000\000 MS Windows HtmlHelp Data
-!:mime application/vnd.ms-htmlhelp
-!:ext chm
-
-# GFA-BASIC (Wolfram Kleff)
-2 string/b GFA-BASIC3 GFA-BASIC 3 data
-
-#------------------------------------------------------------------------------
-# From Stuart Caie <kyzer@4u.net> (developer of cabextract)
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Cabinet_(file_format)
-# Reference: https://msdn.microsoft.com/en-us/library/bb267310.aspx
-# Note: verified by `7z l *.cab`
-# Microsoft Cabinet files
-0 string/b MSCF\0\0\0\0 Microsoft Cabinet archive data
-#
-# https://support.microsoft.com/en-us/help/973559/frequently-asked-questions-about-the-microsoft-support-diagnostic-tool
-# CAB with *.{diagcfg,diagpkg} is used by Microsoft Support Diagnostic Tool MSDT.EXE
-# because some archive does not have *.diag* as 1st or 2nd archive member like
-# O15CTRRemove.diagcab or AzureStorageAnalyticsLogs_global.DiagCab
-# brute looking after header for filenames with diagcfg or diagpkg extension in CFFILE section
->0x2c search/980/c .diag \b, Diagnostic
-!:mime application/vnd.ms-cab-compressed
-!:ext diagcab
-# http://fileformats.archiveteam.org/wiki/PUZ
-# Microsoft Publisher version about 2003 has a "Pack and Go" feature that
-# bundles a Publisher document *PNG.pub with all links into a CAB
->0x2c search/300/c png.pub\0 \b, Publisher Packed and Go
-!:mime application/vnd.ms-cab-compressed
-!:ext puz
-# ppz variant with Microsoft PowerPoint Viewer ppview32.exe to play PowerPoint presentation
->0x2c search/17/c ppview32.exe\0 \b, PowerPoint Viewer Packed and Go
-!:mime application/vnd.ms-powerpoint
-#!:mime application/mspowerpoint
-!:ext ppz
-# URL: https://en.wikipedia.org/wiki/Windows_Desktop_Gadgets
-# Reference: https://docs.microsoft.com/en-us/previous-versions/windows/desktop/sidebar/
-# http://win10gadgets.com/download/273/ All_CPU_Meter1.zip/All_CPU_Meter_V4.7.3.gadget
->0x2c search/968/c gadget.xml \b, Windows Desktop Gadget
-#!:mime application/vnd.ms-cab-compressed
-# http://extension.nirsoft.net/gadget
-!:mime application/x-windows-gadget
-!:ext gadget
-# http://www.incredimail.com/
-# IncrediMail CAB contains an initialisation file "content.ini" like in im2.ims
->0x2c search/3369/c content.ini\0 \b, IncrediMail
-!:mime application/x-incredimail
-# member Flavor.htm implies IncrediMail ecard like in tell_a_friend.imf
->>0x2c search/83/c Flavor.htm\0 ecard
-!:ext imf
-# member Macromedia Flash data *.swf implies IncrediMail skin like in im2.ims
->>0x2c search/211/c .swf\0 skin
-!:ext ims
-# member anim.im3 implies IncrediMail animation like in letter_fold.ima
->>0x2c search/92/c anim.im3\0 animation
-!:ext ima
-# other IncrediMail cab archive
->>0x2c default x
->>>0x2c search/116/c thumb ecard, image, notifier or skin
-!:ext imf/imi/imn/ims
-# http://file-extension.net/seeker/file_extension_ime
->>>0x2c default x emoticons or sound
-!:ext ime/imw
-# no Diagnostic, Packed and Go, Windows Desktop Gadget, IncrediMail
->0x2c default x
-# look for 1st member name
->>(16.l+16) ubyte x
-# From: Joerg Jenderek
-# URL: https://docs.microsoft.com/en-us/windows-hardware/drivers/install/building-device-metadata-packages
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/devicemetadata-ms.trid.xml
->>>&-1 string PackageInfo.xml \b, Device Metadata Package
-!:mime application/vnd.ms-cab-compressed
-!:ext devicemetadata-ms
-# https://en.wikipedia.org/wiki/SNP_file_format
->>>&-1 string/c _accrpt_.snp \b, Access report snapshot
-!:mime application/msaccess
-!:ext snp
-# https://en.wikipedia.org/wiki/Microsoft_InfoPath
->>>&-1 string manifest.xsf \b, InfoPath Form Template
-!:mime application/vnd.ms-cab-compressed
-#!:mime application/vnd.ms-infopath
-!:ext xsn
-# https://www.cabextract.org.uk/wince_cab_format/
-# extension of DOS 8+3 name with ".000" of 1st archive member name implies Windows CE installer
->>>&7 string =.000 \b, WinCE install
-!:mime application/vnd.ms-cab-compressed
-!:ext cab
-
-# https://support.microsoft.com/kb/934307/en-US
-# All inspected MSU contain a file with name WSUSSCAN.cab
-# that is called "Windows Update meta data" by Microsoft
->>>&-1 string/c wsusscan.cab \b, Microsoft Standalone Update
-!:mime application/vnd.ms-cab-compressed
-!:ext msu
->>>&-1 default x
-# look at point character of 1st archive member name for file name extension
-# GRR: search range is maybe too large and match point else where like in EN600x64.cab!
->>>>&-1 search/255 .
-# http://www.pptfaq.com/FAQ00164_What_is_a_PPZ_file-.htm
-# PPZ were created using Pack & Go feature of PowerPoint versions 97 - 2002
-# packs optional files, a PowerPoint presentation *.ppt with optional PLAYLIST.LST to CAB
->>>>>&0 string/c ppt\0
->>>>>>28 uleshort >1 \b, PowerPoint Packed and Go
-!:mime application/vnd.ms-powerpoint
-#!:mime application/mspowerpoint
-!:ext ppz
-# or POWERPNT.PPT packed as POWERPNT.PP_ found on Windows 2000,XP setup CD in directory i386
->>>>>>28 uleshort =1 \b, one packed PowerPoint
-!:mime application/vnd.ms-cab-compressed
-!:ext pp_
-# https://msdn.microsoft.com/en-us/library/windows/desktop/bb773190(v=vs.85).aspx
-# first member *.theme implies Windows 7 Theme Pack like in CommunityShowcaseAqua3.themepack
-# or Windows 8 Desktop Theme Pack like in PanoramicGlaciers.deskthemepack
->>>>>&0 string/c theme \b, Windows
-!:mime application/x-windows-themepack
-# https://www.drewkeller.com/content/using-theme-both-windows-7-and-windows-8
-# 1st member Panoramic.theme or Panoramas.theme implies Windows 8-10 Theme Pack
-# with MTSM=RJSPBS in [MasterThemeSelector] inside *.theme
->>>>>>(16.l+16) string =Panoram 8
-!:ext deskthemepack
->>>>>>(16.l+16) string !Panoram 7 or 8
-!:ext themepack/deskthemepack
->>>>>>(16.l+16) ubyte x Theme Pack
-# URL: https://en.wikipedia.org/wiki/Microsoft_OneNote#File_format
-# http://fileformats.archiveteam.org/wiki/OneNote
-# Reference: https://mark0.net/download/triddefs_xml.7z/defs/o/onepkg.trid.xml
-# 1st member name like: "Class Notes.one" "test-onenote.one" "Open Notebook.onetoc2" "Editor Öffnen.onetoc2"
->>>>>&0 string/c one \b, OneNote Package
-!:mime application/msonenote
-!:ext onepkg
->>>>>&0 default x
-# look for null terminator of 1st member name
->>>>>>&0 search/255 \0
-# 2nd member name WSUSSCAN.cab like in Microsoft-Windows-MediaFeaturePack-OOB-Package.msu
->>>>>>>&16 string/c wsusscan.cab \b, Microsoft Standalone Update
-!:mime application/vnd.ms-cab-compressed
-!:ext msu
->>>>>>>&16 default x
-# archive with more then one file need some output in version 5.32 to avoid error message like
-# Magdir/msdos, 1138: Warning: Current entry does not yet have a description for adding a MIME type
-# Magdir/msdos, 1139: Warning: Current entry does not yet have a description for adding a EXTENSION type
-# file: could not find any valid magic files!
->>>>>>>>28 uleshort >1 \b, many
-!:mime application/vnd.ms-cab-compressed
-!:ext cab
-# remaining archives with just one file
->>>>>>>>28 uleshort =1
-# neither extra bytes nor cab chain implies Windows 2000,XP setup files in directory i386
->>>>>>>>>30 uleshort =0x0000 \b, Windows 2000/XP setup
-# cut of last char of source extension and add underscore to generate extension
-# TERMCAP._ ... FXSCOUNT.H_ ... L3CODECA.AC_ ... NPDRMV2.ZI_
-!:mime application/vnd.ms-cab-compressed
-!:ext _/?_/??_
-# archive need some output like "single" in version 5.32 to avoid error messages
->>>>>>>>>30 uleshort !0x0000 \b, single
-!:mime application/vnd.ms-cab-compressed
-!:ext cab
-# first archive name without point character
->>>>&-1 default x
->>>>>28 uleshort =1 \b, single
-!:mime application/vnd.ms-cab-compressed
-# on XP_CD\I386\ like: NETWORKS._ PROTOCOL._ QUOTES._ SERVICES._
-!:ext _
->>>>>28 uleshort >1 \b, many
-!:mime application/vnd.ms-cab-compressed
-# like: HP Envy 6000 printer driver packages Full_x86.cab Full_x64.cab
-!:ext cab
-# TODO: additional extensions like
-# .xtp InfoPath Template Part
-# .lvf Logitech Video Effects Face Accessory
->8 ulelong x \b, %u bytes
->28 uleshort 1 \b, 1 file
->28 uleshort >1 \b, %u files
-# Reserved fields, set to zero
-#>4 belong !0 \b, reserved1 %x
-#>12 belong !0 \b, reserved2 %x
-# offset of the first CFFILE entry coffFiles: minimal 2Ch
->16 ulelong x \b, at %#x
->(16.l) use cab-file
-# at least also 2nd member
->28 uleshort >1
->>(16.l+16) ubyte x
->>>&0 search/255 \0
-# second member info
->>>>&0 use cab-file
-#>20 belong !0 \b, reserved %x
-# Cabinet file format version. Currently, versionMajor = 1 and versionMinor = 3
->24 ubeshort !0x0301 \b version %#x
-# number of CFFOLDER entries
->26 uleshort >1 \b, %u cffolders
-# cabinet file option indicators 1~PREVIOUS, 2~NEXT, 4~reserved fields
-# only found for flags 0 1 2 3 4 not 7
->30 uleshort >0 \b, flags %#x
-# Cabinet files have a 16-bit cabinet setID field that is designed for application use.
-# default is zero, however, the -i option of cabarc can be used to set this field
->32 uleshort >0 \b, ID %u
-# iCabinet is number of this cabinet file in a set, where 0 for the first cabinet
-#>34 uleshort x \b, iCabinet %u
-# add one for display because humans start numbering by 1 and also fit to name of disk szDisk*
->34 uleshort+1 x \b, number %u
->30 uleshort &0x0004 \b, extra bytes
-# cbCFHeader optional size of per-cabinet reserved area 14h 1800h
->>36 uleshort >0 %u in head
-# cbCFFolder is optional size of per-folder reserved area
->>38 ubyte >0 %u in folder
-# cbCFData is optional size of per-datablock reserved area
->>39 ubyte >0 %u in data block
-# optional per-cabinet reserved area abReserve[cbCFHeader]
->>36 uleshort >0
-# 1st CFFOLDER after reserved area in header
->>>(36.s+40) use cab-folder
-# no reserved area in header
->30 uleshort ^0x0004
-# no previous and next cab archive
->>30 uleshort =0x0000
->>>36 use cab-folder
-# only previous cab archive
->>30 uleshort =0x0001 \b, previous
->>>36 use cab-anchor
-# only next cab archive
->>30 uleshort =0x0002 \b, next
->>>36 use cab-anchor
-# previous+next cab archive
-# can not use sub routine cab-anchor to display previous and next cabinet together
-#>>>36 use cab-anchor
-#>>>>&0 use cab-anchor
->>30 uleshort =0x0003 \b, previous
->>>36 string x %s
-# optional name of previous disk szDisk*
->>>>&1 string x disk %s
->>>>>&1 string x \b, next %s
-# optional name of previous disk szDisk*
->>>>>>&1 string x disk %s
->>>>>>>&1 use cab-folder
-# display filename and disk name of previous or next cabinet
-0 name cab-anchor
-# optional name of previous/next cabinet file szCabinet*[255]
->&0 string x %s
-# optional name of previous/next disk szDisk*[255]
->>&1 string x disk %s
-# display folder structure CFFOLDER information like compression of cabinet
-0 name cab-folder
-# offset of the CFDATA block in this folder
-#>0 ulelong x \b, coffCabStart %#x
-# number of CFDATA blocks in folder
->4 uleshort x \b, %u datablock
-# plural s
->4 uleshort >1 \bs
-# compression typeCompress: 0~None 1~MSZIP 0x1503~LZX:21 0x1003~LZX:16 0x0f03~LZX:15
->6 uleshort x \b, %#x compression
-# optional per-folder reserved area
-#>8 ubequad x \b, abReserve %#llx
-# display member structure CFFILE information like member name of cabinet
-0 name cab-file
-# cbFile is uncompressed size of file in bytes
-#>0 ulelong x \b, cbFile %u
-# uoffFolderStart is uncompressed offset of file in folder
-#>4 ulelong >0 \b, uoffFolderStart %#x
-# iFolder is index into the CFFOLDER area. 0 indicates first folder in cabinet
-# define ifoldCONTINUED_FROM_PREV (0xFFFD)
-# define ifoldCONTINUED_TO_NEXT (0xFFFE)
-# define ifoldCONTINUED_PREV_AND_NEXT (0xFFFF)
->8 uleshort >0 \b, iFolder %#x
-# date stamp for file
->10 lemsdosdate x last modified %s
-# time stamp for file
->12 lemsdostime x %s
-# attribs is attribute flags for file
-# define _A_RDONLY (0x01) file is read-only
-# define _A_HIDDEN (0x02) file is hidden
-# define _A_SYSTEM (0x04) file is a system file
-# define _A_ARCH (0x20) file modified since last backup
-# example http://sebastien.kirche.free.fr/pebuilder_plugins/depends.cab
-# define _A_EXEC (0x40) run after extraction
-# define _A_NAME_IS_UTF (0x80) szName[] contains UTF
-# define UNKNOWN (0x0100) undocumented or accident
-#>14 uleshort x \b, attribs %#x
->14 uleshort >0 +
->>14 uleshort &0x0001 \bR
->>14 uleshort &0x0002 \bH
->>14 uleshort &0x0004 \bS
->>14 uleshort &0x0020 \bA
->>14 uleshort &0x0040 \bX
->>14 uleshort &0x0080 \bUtf
-# unknown 0x0100 flag found on one XP_CD:\I386\DRIVER.CAB
->>14 uleshort &0x0100 \b?
-# szName is name of archive member
->16 string x "%s"
-# next archive member name if more files
-#>>&17 string >\0 \b, NEXT NAME %-.50s
-
-# InstallShield Cabinet files
-0 string/b ISc( InstallShield Cabinet archive data
->5 byte&0xf0 =0x60 version 6,
->5 byte&0xf0 !0x60 version 4/5,
->(12.l+40) lelong x %u files
-
-# Windows CE package files
-0 string/b MSCE\0\0\0\0 Microsoft WinCE install header
->20 lelong 0 \b, architecture-independent
->20 lelong 103 \b, Hitachi SH3
->20 lelong 104 \b, Hitachi SH4
->20 lelong 0xA11 \b, StrongARM
->20 lelong 4000 \b, MIPS R4000
->20 lelong 10003 \b, Hitachi SH3
->20 lelong 10004 \b, Hitachi SH3E
->20 lelong 10005 \b, Hitachi SH4
->20 lelong 70001 \b, ARM 7TDMI
->52 leshort 1 \b, 1 file
->52 leshort >1 \b, %u files
->56 leshort 1 \b, 1 registry entry
->56 leshort >1 \b, %u registry entries
-
-
-# Windows Enhanced Metafile (EMF)
-# See msdn.microsoft.com/archive/en-us/dnargdi/html/msdn_enhmeta.asp
-# for further information.
-0 ulelong 1
->40 string \ EMF Windows Enhanced Metafile (EMF) image data
->>44 ulelong x version %#x
-
-
-0 string/b \224\246\056 Microsoft Word Document
-!:mime application/msword
-
-# From: "Nelson A. de Oliveira" <naoliv@gmail.com>
-# Magic type for Dell's BIOS .hdr files
-# Dell's .hdr
-0 string/b $RBU
->23 string Dell %s system BIOS
->5 byte 2
->>48 byte x version %d.
->>49 byte x \b%d.
->>50 byte x \b%d
->5 byte <2
->>48 string x version %.3s
-
-# Type: Microsoft Document Imaging Format (.mdi)
-# URL: https://en.wikipedia.org/wiki/Microsoft_Document_Imaging_Format
-# From: Daniele Sempione <scrows@oziosi.org>
-# Too weak (EP)
-#0 short 0x5045 Microsoft Document Imaging Format
-
-# MS eBook format (.lit)
-0 string/b ITOLITLS Microsoft Reader eBook Data
->8 lelong x \b, version %u
-!:mime application/x-ms-reader
-
-# Windows CE Binary Image Data Format
-# From: Dr. Jesus <j@hug.gs>
-0 string/b B000FF\n Windows Embedded CE binary image
-
-# The second byte of these signatures is a file version; I don't know what,
-# if anything, produced files with version numbers 0-2.
-# From: John Elliott <johne@seasip.demon.co.uk>
-0 string \xfc\x03\x00 Mallard BASIC program data (v1.11)
-0 string \xfc\x04\x00 Mallard BASIC program data (v1.29+)
-0 string \xfc\x03\x01 Mallard BASIC protected program data (v1.11)
-0 string \xfc\x04\x01 Mallard BASIC protected program data (v1.29+)
-
-0 string MIOPEN Mallard BASIC Jetsam data
-0 string Jetsam0 Mallard BASIC Jetsam index data
-
-# DOS backup 2.0 to 3.2
-# URL: http://fileformats.archiveteam.org/wiki/BACKUP_(MS-DOS)
-# Reference: http://www.ibiblio.org/pub/micro/pc-stuff/freedos/files/dos/restore/brtecdoc.htm
-# backupid.@@@
-
-# plausibility check for date
-0x3 ushort >1979
->0x5 ubyte-1 <31
->>0x6 ubyte-1 <12
-# actually 121 nul bytes
->>>0x7 string \0\0\0\0\0\0\0\0
->>>>0x1 ubyte x DOS 2.0 backup id file, sequence %d
-#!:mime application/octet-stream
-!:ext @@@
->>>>0x0 ubyte 0xff \b, last disk
-
-# backed up file
-
-# skip some AppleWorks word like Tomahawk.Awp, WIN98SE-DE.vhd
-# by looking for trailing nul of maximal file name string
-0x52 ubyte 0
-# test for flag byte: FFh~complete file, 00h~split file
-# FFh -127 = -1 -127 = -128
-# 00h -127 = 0 -127 = -127
->0 byte-127 <-126
-# plausibility check for file name length
->>0x53 ubyte-1 <78
-# looking for terminating nul of file name string
->>>(0x53.b+4) ubyte 0
-# looking if last char of string is valid DOS file name
->>>>(0x53.b+3) ubyte >0x1F
-# actually 44 nul bytes
-# but sometimes garbage according to Ralf Quint. So can not be used as test
-#>0x54 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
-# first char of full file name is DOS (5Ch) or UNIX (2Fh) path separator
-# only DOS variant found. UNIX variant according to V32SLASH.TXT in archive PD0315.EXE
->>>>>5 ubyte&0x8C 0x0C
-# ./msdos (version 5.30) labeled the entry as
-# "DOS 2.0 backed up file %s, split file, sequence %d" or
-# "DOS 2.0 backed up file %s, complete file"
->>>>>>0 ubyte x DOS 2.0-3.2 backed up
-#>>>>>>0 ubyte 0xff complete
->>>>>>0 ubyte 0
->>>>>>>1 uleshort x sequence %d of
-# full file name with path but without drive letter and colon stored from 0x05 til 0x52
->>>>>>0x5 string x file %s
-#!:mime application/octet-stream
-# backup name is original filename
-#!:ext doc/exe/rar/zip
-#!:ext *
-# magic/Magdir/msdos, 1169: Warning: EXTENSION type ` *' has bad char '*'
-# file: line 1169: Bad magic entry ' *'
-# after header original file content
->>>>>>128 indirect x \b;
-
-
-# DOS backup 3.3 to 5.x
-
-# CONTROL.nnn files
-0 string \x8bBACKUP\x20
-# actually 128 nul bytes
->0xa string \0\0\0\0\0\0\0\0
->>0x9 ubyte x DOS 3.3 backup control file, sequence %d
->>0x8a ubyte 0xff \b, last disk
-
-# NB: The BACKUP.nnn files consist of the files backed up,
-# concatenated.
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/MS-DOS_date/time
-# Reference: https://docs.microsoft.com/en-us/windows/win32/api/winbase/nf-winbase-dosdatetimetofiletime
-# Note: DOS date+time format is different from formats such as Unix epoch
-# bit encoded; uses year values relative to 1980 and 2 second precision
-0 name dos-date
-# HHHHHMMMMMMSSSSS bit encoded Hour (0-23) Minute (0-59) SecondPart (*2)
-#>0 uleshort x RAW TIME [%#4.4x]
-# hour part
-#>0 uleshort/2048 x hour [%u]
-# YYYYYMMMMDDDDD bit encoded YearPart (+1980) Month (1-12) Day (1-31)
-#>2 uleshort x RAW DATE [%#4.4x]
-# day part
->2 uleshort&0x001F x %u
-#>2 uleshort/16 x MONTH PART [%#x]
-# GRR: not working
-#>2 uleshort/16 &0x000F MONTH [%u]
-#>2 uleshort&0x01E0 x MONTH PART [%#4.4x]
->2 uleshort&0x01E0 =0x0020 jan
->2 uleshort&0x01E0 =0x0040 feb
->2 uleshort&0x01E0 =0x0060 mar
->2 uleshort&0x01E0 =0x0080 apr
->2 uleshort&0x01E0 =0x00A0 may
->2 uleshort&0x01E0 =0x00C0 jun
->2 uleshort&0x01E0 =0x00E0 jul
->2 uleshort&0x01E0 =0x0100 aug
->2 uleshort&0x01E0 =0x0120 sep
->2 uleshort&0x01E0 =0x0140 oct
->2 uleshort&0x01E0 =0x0160 nov
->2 uleshort&0x01E0 =0x0180 dec
-# year part
->2 uleshort/512 x 1980+%u
-#
diff --git a/contrib/libs/libmagic/magic/Magdir/msooxml b/contrib/libs/libmagic/magic/Magdir/msooxml
deleted file mode 100644
index 905017eb91..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/msooxml
+++ /dev/null
@@ -1,68 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: msooxml,v 1.19 2023/03/14 19:46:15 christos Exp $
-# msooxml: file(1) magic for Microsoft Office XML
-# From: Ralf Brown <ralf.brown@gmail.com>
-
-# .docx, .pptx, and .xlsx are XML plus other files inside a ZIP
-# archive. The first member file is normally "[Content_Types].xml".
-# but some libreoffice generated files put this later. Perhaps skip
-# the "[Content_Types].xml" test?
-# Since MSOOXML doesn't have anything like the uncompressed "mimetype"
-# file of ePub or OpenDocument, we'll have to scan for a filename
-# which can distinguish between the three types
-
-0 name msooxml
->0 string word/ Microsoft Word 2007+
-!:mime application/vnd.openxmlformats-officedocument.wordprocessingml.document
-!:ext docx
->0 string ppt/ Microsoft PowerPoint 2007+
-!:mime application/vnd.openxmlformats-officedocument.presentationml.presentation
-!:ext pptx
->0 string xl/ Microsoft Excel 2007+
-!:mime application/vnd.openxmlformats-officedocument.spreadsheetml.sheet
-!:ext xlsx
->0 string visio/ Microsoft Visio 2013+
-!:mime application/vnd.ms-visio.drawing.main+xml
->0 string AppManifest.xaml Microsoft Silverlight Application
-!:mime application/x-silverlight-app
-
-# start by checking for ZIP local file header signature
-0 string PK\003\004
-!:strength +10
-# make sure the first file is correct
->0x1E use msooxml
->0x1E default x
->>0x1E regex \\[Content_Types\\]\\.xml|_rels/\\.rels|docProps|customXml
-# skip to the second local file header
-# since some documents include a 520-byte extra field following the file
-# header, we need to scan for the next header
->>>(18.l+49) search/6000 PK\003\004
-# now skip to the *third* local file header; again, we need to scan due to a
-# 520-byte extra field following the file header
->>>>&26 search/6000 PK\003\004
-# and check the subdirectory name to determine which type of OOXML
-# file we have. Correct the mimetype with the registered ones:
-# https://technet.microsoft.com/en-us/library/cc179224.aspx
->>>>>&26 use msooxml
->>>>>&26 default x
-# OpenOffice/Libreoffice orders ZIP entry differently, so check the 4th file
->>>>>>&26 search/6000 PK\003\004
->>>>>>>&26 use msooxml
-# Some OOXML generators add an extra customXml directory. Check another file.
->>>>>>>&26 default x
->>>>>>>>&26 search/6000 PK\003\004
->>>>>>>>>&26 use msooxml
->>>>>>>>>&26 default x Microsoft OOXML
->>>>>>>&26 default x Microsoft OOXML
->>>>>&26 default x Microsoft OOXML
->>0x1E regex \\[trash\\]
->>>&26 search/6000 PK\003\004
->>>>&26 search/6000 PK\003\004
->>>>>&26 use msooxml
->>>>>&26 default x
->>>>>>&26 search/6000 PK\003\004
->>>>>>>&26 use msooxml
->>>>>>>&26 default x Microsoft OOXML
->>>>>>&26 default x Microsoft OOXML
->>>>>&26 default x Microsoft OOXML
diff --git a/contrib/libs/libmagic/magic/Magdir/msvc b/contrib/libs/libmagic/magic/Magdir/msvc
deleted file mode 100644
index fbfa4f266f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/msvc
+++ /dev/null
@@ -1,222 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: msvc,v 1.11 2022/01/17 17:17:30 christos Exp $
-# msvc: file(1) magic for msvc
-# "H. Nanosecond" <aldomel@ix.netcom.com>
-# Microsoft visual C
-#
-# I have version 1.0
-
-# .aps
-0 string HWB\000\377\001\000\000\000 Microsoft Visual C .APS file
-
-# .ide
-#too long 0 string \102\157\162\154\141\156\144\040\103\053\053\040\120\162\157\152\145\143\164\040\106\151\154\145\012\000\032\000\002\000\262\000\272\276\372\316 MSVC .ide
-0 string \102\157\162\154\141\156\144\040\103\053\053\040\120\162\157 MSVC .ide
-
-# .res
-0 string \000\000\000\000\040\000\000\000\377 MSVC .res
-0 string \377\003\000\377\001\000\020\020\350 MSVC .res
-0 string \377\003\000\377\001\000\060\020\350 MSVC .res
-
-#.lib
-# URL: https://en.wikipedia.org/wiki/Microsoft_Visual_C%2B%2B
-# http://fileformats.archiveteam.org/wiki/Microsoft_Library
-# http://fileformats.archiveteam.org/wiki/OMF
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/l/lib-msvc.trid.xml
-# https://pierrelib.pagesperso-orange.fr/exec_formats/OMF_v1.1.pdf
-# Update: Joerg Jenderek
-#0 string \360\015\000\000 Microsoft Visual C library
-#0 string \360\075\000\000 Microsoft Visual C library
-#0 string \360\175\000\000 Microsoft Visual C library
-# test for RecordType~LibraryHeaderRecord=0xF0 + RecordLength=???Dh + dictionary offset is multiple of 0x200
-0 ubelong&0xFF0f80ff =0xF00d0000
-# Microsoft Visual C library (strength=70) before MIDI SysEx messages (strength=50) handled by ./sysex
-#!:strength +0
-# test for valid 2nd RecordType~Translator Header Record=THEADR=80h or LHEADR=82h
->(1.s+3) ubyte&0xFD =0x80
->>0 use omf-lib
-# display information about Microsoft Visual C/OMF library
-0 name omf-lib
-# RecordType~LibraryHeaderRecord=0xF0
-#>0 byte 0xF0 Microsoft Visual C library
-# the above description was used in file version 5.41
->0 byte 0xF0 Microsoft Visual C/OMF library
-#>0 byte 0xF0 relocatable Object Module Format (OMF) libray
-#!:mime application/octet-stream
-!:mime application/x-omf-lib
-!:ext lib
-# 1st record data length like 13=0Dh 29=1Dh 61=3Dh 125=7Dh 509=01FDh ... 32765=7FFDh
-#>1 uleshort x \b, 1st record data length %u
-#>1 uleshort x \b, 1st record data length %#x
-# 2**4=16 <= RecordLength+3 = PageSize = 2**n {16 32 512 no examples 64 128 256 1024 2048 ...32768} <= 2**15=32768
->1 uleshort+3 x \b, page size %u
-# dictionary offset like: 400h 600h a00h c00h 1200h 1800h 2400h 5600h 12800h 19200h 28a00h
->3 ulelong x \b, at %#x dictionary
-# dictionary block a 512 bytes; the first 37 bytes correspond to the 37 buckets
-#>(3.l) ubequad x (%#16.16llx...)
-# dictionary size; length in 512-byte blocks; a prime number? like:
-# 1 2 3 4 5 6 7 9 11 13 15 16 18 21 22 23 24 25 31 50 53 89 101 117 277
->7 uleshort x with %u block
-# plurals s
->7 uleshort >1 \bs
-# If dictionary byte 38 (FFLAG) has the value 255, there is no space left
->(3.l+37) ubyte <0xFF (FFLAG=%#x)
->(3.l+37) ubyte =0xFF (FFLAG=full)
-# dictionary entry; length byte of following symbol, the following text bytes of symbol, two bytes specifies the page number
-# like: dbfntx1! DBFNTX.LIB zlibCompileFlags_ ZLIB.LIB atoi! mwlibc.lib
->(3.l+38) pstring x 1st entry %s
-# like: 1 33 41 47 458 8783
->>&0 uleshort x in page %u
-# library flags; 0 or 1, but WHAT IS 0x4d in MOUSE.LIB ?
->9 ubyte >1 \b, flags %#x
->9 ubyte =1 case sensitive
-# In the library after header comes first object module with a Library Module Header Record (LHEADR=82h)
-# but in examples Translator Header Record (THEADR=80h) which is handled identically
->(1.s+3) ubyte x \b, 2nd record
->(1.s+3) ubyte !0x80 (type %#x)
-#>(1.s+4) uleshort x \b, 2nd record data length %u
-# Module name often source name like "dos\crt0.asm" in mlibce.lib or "QB4UTIL.ASM" in QB4UTIL.LIB
-# or "C:\Documents and Settings\Allan Campbell\My Documents\FDOSBoot\zlib\zutil.c" in ZLIB.LIB
-# or title like "87INIT" in FP87.LIB or "ACOSASIN" in MATHC.LIB or "Copyright" in calc-bcc.lib
->(1.s+6) pstring x "%s"
-# 2nd record checksum
-#>>&0 ubyte x checksum %#x
-# 3rd RecordType: 96h~LNAMES 88h~COMENT
->>&1 ubyte x \b, 3rd record
->>&1 ubyte !0x88
->>>&-1 ubyte !0x96
-# 3rd unusual record type
->>>>&-1 ubyte x (type %#x)
-# 3rd record is a List of Names Record (LNAMES=96h)
->>&1 ubyte =0x96 LNAMES
-# LNAMES record length like: 2 15 19
-#>>>&0 uleshort x \b, LNAMES record length %u
->>>&0 uleshort >2
-# 1st LNAME string length; null is valid; maximal 255
-#>>>>&0 ubyte x 1st LNAME length %u
->>>>&0 ubyte =0
-# 2nd LNAME length like: 4 7 8 17 31
-#>>>>>&0 ubyte x 2nd LNAME length %u
-# name used for segment, class, group, overlay, etc like:
-# CODE (mwlibc.lib) _TEXT32 (JMPPM32.LIB) _OVLCODE (WOVL.LIB)
->>>>>&0 pstring x %s
-# 3rd LNAME length like: 4 5
-#>>>>>>&0 ubyte x 3rd LNAME length %u
-# like: DATA (mwlibc.lib) CODE (JMPPM32.LIB) _TEXT (EMU87.LIB)
->>>>>>&0 pstring x %s
-# maybe 4th LNAME length like: 4 6
->>>>>>>&0 ubyte <44
-# like: DATA (DEBUG.LIB) DGROUP (mwlibc.lib MOUSE.LIB)
->>>>>>>>&-1 pstring x %s
-# 3rd record is a COMMENT (Including all comment class extensions)
->>&1 ubyte =0x88 COMMENT
-# comment record length like: 3 FLIB7M.LIB 1Bh 1Eh 23h 27h 2Bh 30h freetype-bcc.lib
-#>>>&0 uleshort x \b, record length %#x
-# real comment length = record length - 1 (comment type) - 1 (comment Class) - 1 (checksum) -1 (char count)
-# like: 2 LIBFL.LIB 4 "UUID" 5 "dscap" 6 "int386" 7 "qb4util" 8 "AMSGEXIT" 16 REXX.LIB 20 27 35 44 freetype-bcc.lib
-#>>>>&-2 uleshort-4 >0 \b, comment length %u
-# check that record contain at least comment type (1 byte), comment class (1 byte), checksum (1 byte)
-# probably always true
->>>&0 uleshort >2
-# comment type: 80h~NP~no purge bit 40h~NL~no list bit
-#>>>>&0 ubyte !0 Type %#x
->>>>&0 ubyte &0x80 Preserved
-# no example
->>>>&0 ubyte &0x40 NoList
-# comment class like: 0~Translator A0~OMF extensions A3~LIBMOD A1~New OMF extensions AA~UNKNOWN
->>>>&1 ubyte x class=%#x
-# check that comment record contains at least real content
->>>>&-2 uleshort >3
-# Translator comment record (0); it may name the source language or translator
->>>>>&1 ubyte =0 Translator
-#>>>>>>&0 ubyte x Translator length %u
-# like: "TC86 Borland Turbo C 2.01 " (GEMS.LIB) "TC86 Borland Turbo C++ 3.00" (CATDB.LIB)
->>>>>>&0 pstring x "%s"
-# OMF extensions comment record (A0); first byte of commentary string identifies subtype
->>>>>&1 ubyte =0xA0 OMF extensions
-# A0 subtype like: 1~IMPDEF
->>>>>>&0 ubyte !1 subtype %#x
-# Import Definition Record (Comment Class A0, Subtype 01~IMPDEF)
->>>>>>&0 ubyte 1 IMPDEF
-# ordinal flag; determines form of Entry Ident field. If nonzero (seems to be 1) Entry is ordinal
->>>>>>>&0 ubyte !0 ordinal
-# like: IMPORT.LIB DOSCALLS.LIB mlibw.lib mwinlibc.lib REXX.LIB
->>>>>>>>&-1 ubyte >1 %u
-# Internal Name in count, char string format; module name for the imported symbol
-# like: 7 "REXXSAA" 9 11 13 14 15 16 20 21 26 "_Z10_clip_linePdS_S_S_dddd"
-#>>>>>>>&1 ubyte x internal name length %u
-# internal module name like: _DllGetVersion DllGetVersion BezierTerminationTest Copyright
->>>>>>>&1 pstring x %s
-# module name in count, char string format; DLL name that supplies a matching export symbol
-# like: jpeg62.dll (jpeg-bcc.lib) unrar3.dll (unrar-bcc.lib) REXX (REXX.LIB)
->>>>>>>>&0 pstring x exported by %s
-# Entry Ident; 16-bit if ordinal flag != 0 or imported name in count, char string format if ordinal flag = 0
-# like: \0 (calc-bcc.lib) DllGetVersion (libtiff-bcc.lib) UTF8ToHtml (libxml2-bcc.lib) xslAddCall (libxslt-bcc.lib)
-#>>>>>>>>>&0 pstring >\0 entry ident %s
-# "New OMF" extensions comment (A1); indicate version of symbolic debug information
-# like: LIBFL.LIB
->>>>>&1 ubyte =0xA1 New OMF extensions
-# symbolic debug information version n
->>>>>>&0 ubyte x n=%u
-# symbolic debug information style like: HL~IBM PM Debugger style (LIBFL.LIB) DX~AIX style CV~Microsoft symbol and type style
->>>>>>>&0 string HL IBM style
->>>>>>>&0 string DX AIX style
->>>>>>>&0 string CV Microsoft style
-# LIBMOD comment record (A3) used only by the librarian
-# Microsoft extension added for LIB version 3.07 in macro assembler (MASM 5.0)
->>>>>&1 ubyte =0xA3 LIBMOD
-# The A3 LIBMOD record contains only the ASCII string of the module name in count char format
-#>>>>>>&0 ubyte x LIBMOD length %u
-# LIBMOD comment record module name without path and extension like:
-# qb4util (QB4UTIL.LIB) affaldiv (libh.lib) crt0 (slibc.lib) clipper (DDDRAWS.LIB) dinpdev (DINPUTS.LIB) UUID (UUID.LIB)
->>>>>>&0 pstring x %s
-# GRR: WHAT iS THAT? AA foo comment record
-#>>>>>&1 ubyte =0xAA AA-comment
-# like: OS220
-#>>>>>>&0 string x what=%-5.5s
-#
-
-#.pch
-0 string DTJPCH0\000\022\103\006\200 Microsoft Visual C .pch
-
-# Summary: Symbol Table / Debug info used by Microsoft compilers
-# URL: https://en.wikipedia.org/wiki/Program_database
-# Reference: https://code.google.com/p/pdbparser/wiki/MSF_Format
-# Update: Joerg Jenderek
-# Note: test only for Windows XP+SP3 x86 , 8.1 x64 arm and 10.1 x86
-# info does only applies partly for older files like msvbvm50.pdb about year 2001
-0 string Microsoft\ C/C++\040
-# "Microsoft Program DataBase" by TrID
->24 search/14 \r\n\x1A MSVC program database
-!:mime application/x-ms-pdb
-!:ext pdb
-# "MSF 7.00" "program database 2.00" for msvbvm50.pdb
->>16 regex \([0-9.]+\) ver %s
-#>>>0x38 search/128123456 /LinkInfo \b with linkinfo
-# "MSF 7.00" variant
->>0x1e leshort 0
-# PageSize 400h 1000h
->>>0x20 lelong x \b, %d
-# Page Count
->>>0x28 lelong x \b*%d bytes
-# "program database 2.00" variant
->>0x1e leshort !0
-# PageSize 400h
->>>0x2c lelong x \b, %d
-# Page Count for msoo-dll.pdb 4379h
->>>0x32 leshort x \b*%d bytes
-
-# Reference: https://github.com/Microsoft/vstest/pull/856/commits/fdc7a9f074ca5a8dfeec83b1be9162bf0cf4000d
-0 string/c bsjb\001\000\001\000\000\000\000\000\f\000\000\000pdb\ v1.0 Microsoft Roslyn C# debugging symbols version 1.0
-
-#.sbr
-0 string \000\002\000\007\000 MSVC .sbr
->5 string >\0 %s
-
-#.bsc
-0 string \002\000\002\001 MSVC .bsc
-
-#.wsp
-0 string 1.00\ .0000.0000\000\003 MSVC .wsp version 1.0000.0000
-# these seem to start with the version and contain menus
diff --git a/contrib/libs/libmagic/magic/Magdir/msx b/contrib/libs/libmagic/magic/Magdir/msx
deleted file mode 100644
index 60e16569e2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/msx
+++ /dev/null
@@ -1,309 +0,0 @@
-
-#------------------------------------------------------------------------------
-# msx: file(1) magic for the MSX Home Computer
-# v1.3
-# Fabio R. Schmidlin <sd-snatcher@users.sourceforge.net>
-
-############## MSX Music file formats ##############
-
-# Gigamix MGSDRV music file
-0 string/b MGS MSX Gigamix MGSDRV3 music file,
->6 ubeshort 0x0D0A
->>3 byte x \bv%c
->>4 byte x \b.%c
->>5 byte x \b%c
->>8 string >\0 \b, title: %s
-
-1 string/b mgs2\ MSX Gigamix MGSDRV2 music file
->6 uleshort 0x80
->>0x2E uleshort 0
->>>0x30 string >\0 \b, title: %s
-
-# KSS music file
-0 string/b KSCC KSS music file v1.03
->0xE byte 0
->>0xF byte&0x02 0 \b, soundchips: AY-3-8910, SCC(+)
->>0xF byte&0x02 2 \b, soundchip(s): SN76489
->>>0xF byte&0x04 4 stereo
->>0xF byte&0x01 1 \b, YM2413
->>0xF byte&0x08 8 \b, Y8950
-
-0 string/b KSSX KSS music file v1.20
->0xE byte&0xEF 0
->>0xF byte&0x40 0x00 \b, 60Hz
->>0xF byte&0x40 0x40 \b, 50Hz
->>0xF byte&0x02 0 \b, soundchips: AY-3-8910, SCC(+)
->>0xF byte&0x02 0x02 \b, soundchips: SN76489
->>>0xF byte&0x04 0x04 stereo
->>0xF byte&0x01 0x01 \b,
->>>0xF byte&0x18 0x00 \bYM2413
->>>0xF byte&0x18 0x08 \bYM2413, Y8950
->>>0xF byte&0x18 0x18 \bYM2413+Y8950 pseudostereo
->>0xF byte&0x18 0x10 \b, Majyutsushi DAC
-
-# Moonblaster for Moonsound
-0 string/b MBMS
->4 byte 0x10 MSX Moonblaster for MoonSound music
-
-# Music Player K-kaz
-0 string/b MPK MSX Music Player K-kaz song
->6 ubeshort 0x0D0A
->>3 byte x v%c
->>4 byte x \b.%c
->>5 byte x \b%c
-
-# I don't know why these don't work
-#0 search/0xFFFF \r\n.FM9
-#>0 search/0xFFFF \r\n#FORMAT MSX Music Player K-kaz source MML file
-#0 search/0xFFFF \r\nFM1\ \=
-#>0 search/0xFFFF \r\nPSG1\=
-#>>0 search/0xFFFF \r\nSCC1\= MSX MuSiCa MML source file
-
-# OPX Music file
-0x35 beshort 0x0d0a
->0x7B beshort 0x0d0a
->>0x7D byte 0x1a
->>>0x87 uleshort 0 MSX OPX Music file
->>>>0x86 byte 0 v1.5
->>>>>0 string >\32 \b, title: %s
->>>>0x86 byte 1 v2.4
->>>>>0 string >\32 \b, title: %s
-
-# SCMD music file
-0x8B string/b SCMD
->0xCE uleshort 0 MSX SCMD Music file
-#>>-2 uleshort 0x6a71 ; The file must end with this value. How to code this here?
->>0x8F string >\0 \b, title: %s
-
-0 search/0xFFFF \r\n@title
->&0 search/0xFFFF \r\n@m=[ MSX SCMD source MML file
-
-
-############## MSX image file formats ##############
-
-# MSX raw VRAM dump
-0 ubyte 0xFE
->1 uleshort 0
->>5 uleshort 0
->>>3 uleshort 0x37FF MSX SC2/GRP raw image
->>>3 uleshort 0x6A00 MSX Graph Saurus SR5 raw image
->>>3 uleshort >0x769E
->>>>3 uleshort <0x8000 MSX GE5/GE6 raw image
->>>>>3 uleshort 0x7FFF \b, with sprite patterns
->>>3 uleshort 0xD3FF MSX screen 7-12 raw image
->>>3 uleshort 0xD400 MSX Graph Saurus SR7/SR8/SRS raw image
-
-# Graph Saurus compressed images
-0 ubyte 0xFD
->1 uleshort 0
->>5 uleshort 0
->>>3 uleshort >0x013D MSX Graph Saurus compressed image
-
-# MSX G9B image file
-0 string/b G9B
->1 uleshort 11
->>3 uleshort >10
->>>5 ubyte >0 MSX G9B image, depth=%d
->>>>8 uleshort x \b, %dx
->>>>10 uleshort x \b%d
->>>>5 ubyte <9
->>>>>6 ubyte 0
->>>>>>7 ubyte x \b, codec=%d RGB color palettes
->>>>>6 ubyte 64 \b, codec=RGB fixed color
->>>>>6 ubyte 128 \b, codec=YJK
->>>>>6 ubyte 192 \b, codec=YUV
->>>>5 ubyte >8 codec=RGB fixed color
->>>>12 ubyte 0 \b, raw
->>>>12 ubyte 1 \b, bitbuster compression
-
-############## Other MSX file formats ##############
-
-# MSX internal ROMs
-0 ubeshort 0xF3C3
->2 uleshort <0x4000
->>8 ubyte 0xC3
->>>9 uleshort <0x4000
->>>>0x0B ubeshort 0x00C3
->>>>>0x0D uleshort <0x4000
->>>>>>0x0F ubeshort 0x00C3
->>>>>>>0x11 uleshort <0x4000
->>>>>>>>0x13 ubeshort 0x00C3
->>>>>>>>>0x15 uleshort <0x4000
->>>>>>>>>>0x50 ubyte 0xC3
->>>>>>>>>>>0x51 uleshort <0x4000
->>>>>>>>>>>>(9.s) ubyte 0xC3
->>>>>>>>>>>>>&0 uleshort >0x4000
->>>>>>>>>>>>>>&0 ubyte 0xC3 MSX BIOS+BASIC
->>>>>>>>>>>>>>>0x002D ubyte+1 <3 \b. version=MSX%d
->>>>>>>>>>>>>>>0x002D ubyte 2 \b, version=MSX2+
->>>>>>>>>>>>>>>0x002D ubyte 3 \b, version=MSX Turbo-R
->>>>>>>>>>>>>>>0x002D ubyte >3 \b, version=Unknown MSX %d version
->>>>>>>>>>>>>>>0x0006 ubyte x \b, VDP.DR=%#2x
->>>>>>>>>>>>>>>0x0007 ubyte x \b, VDP.DW=%#2x
->>>>>>>>>>>>>>>0x002B ubyte&0xF 0 \b, charset=Japanese
->>>>>>>>>>>>>>>0x002B ubyte&0xF 1 \b, charset=International
->>>>>>>>>>>>>>>0x002B ubyte&0xF 2 \b, charset=Korean
->>>>>>>>>>>>>>>0x002B ubyte&0xF >2 \b, charset=Unknown id:%d
->>>>>>>>>>>>>>>0x002B ubyte&0x70 0x00 \b, date format=Y-M-D
->>>>>>>>>>>>>>>0x002B ubyte&0x70 0x10 \b, date format=M-D-Y
->>>>>>>>>>>>>>>0x002B ubyte&0x70 0x20 \b, date format=D-M-Y
->>>>>>>>>>>>>>>0x002B ubyte&0x80 0x00 \b, vfreq=60Hz
->>>>>>>>>>>>>>>0x002B ubyte&0x80 0x80 \b, vfreq=50Hz
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 0 \b, keyboard=Japanese
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 1 \b, keyboard=International
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 2 \b, keyboard=French
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 3 \b, keyboard=UK
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 4 \b, keyboard=German
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 5 \b, keyboard=Unknown id:%d
->>>>>>>>>>>>>>>0x002C ubyte&0x0F 6 \b, keyboard=Spanish
->>>>>>>>>>>>>>>0x002C ubyte&0x0F >6 \b, keyboard=Unknown id:%d
->>>>>>>>>>>>>>>0x002C ubyte&0xF0 0x00 \b, basic=Japanese
->>>>>>>>>>>>>>>0x002C ubyte&0xF0 0x10 \b, basic=International
->>>>>>>>>>>>>>>0x002C ubyte&0xF0 >0x10 \b, basic=Unknown id:%d
->>>>>>>>>>>>>>>0x002E ubyte&1 1 \b, built-in MIDI
-
-
-0 string/b CD
->2 uleshort >0x10
->>2 uleshort <0x4000
->>>4 uleshort <0x4000
->>>>6 uleshort <0x4000
->>>>>8 ubyte 0xC3
->>>>>>9 uleshort <0x4000
->>>>>>>0x10 ubyte 0xC3
->>>>>>>>0x11 uleshort <0x4000
->>>>>>>>>0x14 ubyte 0xC3
->>>>>>>>>>0x15 uleshort <0x4000 MSX2/2+/TR SubROM
-
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->0x5F0 ubequad 0x8282828244380000
->>0x150 ubyte 0x38
->>>0x170 string \20\20\20
->>>>0x1E32 string ())
->>>>>0x2130 ubequad 0xA5A5594924231807
->>>>>0x2138 ubequad 0x4A4A3424488830C0 MSX Kanji Font
-
-
-
-# MSX extension ROMs
-0 string/b AB
->2 uleshort 0x0010 MSX ROM
->>2 uleshort x \b, init=%#4x
->>4 uleshort >0 \b, stahdl=%#4x
->>6 uleshort >0 \b, devhdl=%#4x
->>8 uleshort >0 \b, bas=%#4x
->2 uleshort 0x4010 MSX ROM
->>2 uleshort x \b, init=%#04x
->>4 uleshort >0 \b, stahdl=%#04x
->>6 uleshort >0 \b, devhdl=%#04x
->>8 uleshort >0 \b, bas=%#04x
->2 uleshort 0x8010 MSX ROM
->>2 uleshort x \b, init=%#04x
->>4 uleshort >0 \b, stahdl=%#04x
->>6 uleshort >0 \b, devhdl=%#04x
->>8 uleshort >0 \b, bas=%#04x
-0 string/b AB\0\0
->6 uleshort 0
->>4 uleshort >0x400F MSX-BASIC extension ROM
->>>4 uleshort >0 \b, stahdl=%#04x
->>>6 uleshort >0 \b, devhdl=%#04x
->>>0x1C string OPLL \b, MSX-Music
->>>>0x18 string PAC2 \b (external)
->>>>0x18 string APRL \b (internal)
-
-0 string/b AB\0\0\0\0
->6 uleshort >0x400F MSX device BIOS
->>6 uleshort >0 \b, devhdl=%#04x
-
-
-0 string/b AB
-#>2 string 5JSuperLAYDOCK MSX Super Laydock ROM
-#>3 string @HYDLIDE3MSX MSX Hydlide-3 ROM
-#>3 string @3\x80IA862 Golvellius MSX1 ROM
->2 uleshort >15
->>2 uleshort <0xC000
->>>8 string \0\0\0\0\0\0\0\0
->>>>(2.s&0x3FFF) uleshort >0 MSX ROM
->>>>>0x10 string YZ\0\0\0\0 Konami Game Master 2 MSX ROM
->>>>>0x10 string CD \b, Konami RC-
->>>>>>0x12 ubyte x \b%d
->>>>>>0x13 ubyte/16 x \b%d
->>>>>>0x13 ubyte&0xF x \b%d
->>>>>0x10 string EF \b, Konami RC-
->>>>>>0x12 ubyte x \b%d
->>>>>>0x13 ubyte/16 x \b%d
->>>>>>0x13 ubyte&0xF x \b%d
->>>>>2 uleshort x \b, init=%#04x
->>>>>4 uleshort >0 \b, stahdl=%#04x
->>>>>6 uleshort >0 \b, devhdl=%#04x
->>>>>8 uleshort >0 \b, bas=%#04x
->>>2 uleshort 0
->>>>4 uleshort 0
->>>>>6 uleshort 0
->>>>>>8 uleshort >0 MSX BASIC program in ROM, bas=%#04x
-
-0x4000 string/b AB
->0x4002 uleshort >0x400F
->>0x400A string \0\0\0\0\0\0 MSX ROM with nonstandard page order
->>>0x4002 uleshort x \b, init=%#04x
->>>0x4004 uleshort >0 \b, stahdl=%#04x
->>>0x4006 uleshort >0 \b, devhdl=%#04x
->>>0x4008 uleshort >0 \b, bas=%#04x
-
-0x8000 string/b AB
->0x8002 uleshort >0x400F
->>0x800A string \0\0\0\0\0\0 MSX ROM with nonstandard page order
->>>0x8002 uleshort x \b, init=%#04x
->>>0x8004 uleshort >0 \b, stahdl=%#04x
->>>0x8006 uleshort >0 \b, devhdl=%#04x
->>>0x8008 uleshort >0 \b, bas=%#04x
-
-
-0x3C000 string/b AB
->0x3C008 string \0\0\0\0\0\0\0\0 MSX MegaROM with nonstandard page order
->>0x3C002 uleshort x \b, init=%#04x
->>0x3C004 uleshort >0 \b, stahdl=%#04x
->>0x3C006 uleshort >0 \b, devhdl=%#04x
->>0x3C008 uleshort >0 \b, bas=%#04x
-
-# MSX BIN file
-#0 byte 0xFE
-#>1 uleshort >0x8000
-#>>3 uleshort >0x8004
-#>>>5 uleshort >0x8000 MSX BIN file
-
-# MSX-BASIC file
-0 byte 0xFF
->3 uleshort 0x000A
->>1 uleshort >0x8000 MSX-BASIC program
-
-# MSX .CAS file
-0 string/b \x1F\xA6\xDE\xBA\xCC\x13\x7D\x74 MSX cassette archive
-
-# Mega-Assembler file
-0 byte 0xFE
->1 uleshort 0x0001
->>5 uleshort 0xffff
->>>6 byte 0x0A MSX Mega-Assembler source
-
-# Execrom Patchfile
-0 string ExecROM\ patchfile\x1A MSX ExecROM patchfile
->0x12 ubyte/16 x v%d
->0x12 ubyte&0xF x \b.%d
->0x13 ubyte x \b, contains %d patches
-
-# Konami's King's Valley-2 custom stage (ELG file)
-4 uleshort 0x0900
->0xF byte 1
->>0x14 byte 0
->>>0x1E string \040\040\040
->>>>0x23 byte 1
->>>>>0x25 byte 0
->>>>>>0x15 string >\x30
->>>>>>>0x15 string <\x5A Konami King's Valley-2 custom stage, title: "%-8.8s"
->>>>>>>>0x1D byte <32 \b, theme: %d
-
-# Metal Gear 1 savegame
-#0x4F string \x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF
-#>>0x60 string \xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF
-#>>>0x7B string \0x00\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\xFF\x00 Metal Gear 1 savegame
diff --git a/contrib/libs/libmagic/magic/Magdir/mup b/contrib/libs/libmagic/magic/Magdir/mup
deleted file mode 100644
index 05b9471b07..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/mup
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# ------------------------------------------------------------------------
-# $File: mup,v 1.5 2017/03/17 21:35:28 christos Exp $
-# mup: file(1) magic for Mup (Music Publisher) input file.
-#
-# From: Abel Cheung <abel (@) oaka.org>
-#
-# NOTE: This header is mainly proposed in the Arkkra mailing list,
-# and is not a mandatory header because of old mup input file
-# compatibility. Noteedit also use mup format, but is not forcing
-# user to use any header as well.
-#
-0 search/1 //!Mup Mup music publication program input text
->6 string -Arkkra (Arkkra)
->>13 string -
->>>16 string .
->>>>14 string x \b, need V%.4s
->>>15 string .
->>>>14 string x \b, need V%.3s
->6 string -
->>9 string .
->>>7 string x \b, need V%.4s
->>8 string .
->>>7 string x \b, need V%.3s
diff --git a/contrib/libs/libmagic/magic/Magdir/music b/contrib/libs/libmagic/magic/Magdir/music
deleted file mode 100644
index ad8da65938..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/music
+++ /dev/null
@@ -1,17 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: music,v 1.1 2011/11/25 03:28:17 christos Exp $
-# music: file (1) magic for music formats
-
-# BWW format used by Bagpipe Music Writer Gold by Robert MacNeil Musicworks
-# and Bagpipe Writer by Doug Wickstrom
-#
-0 string Bagpipe Bagpipe
->8 string Reader Reader
->>15 string >\0 (version %.3s)
->8 string Music\ Writer Music Writer
->>20 string :
->>>21 string >\0 (version %.3s)
->>21 string Gold Gold
->>>25 string :
->>>>26 string >\0 (version %.3s)
-
diff --git a/contrib/libs/libmagic/magic/Magdir/nasa b/contrib/libs/libmagic/magic/Magdir/nasa
deleted file mode 100644
index de3545f808..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/nasa
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# nasa: file(1) magic
-
-# From: Barry Carter <carter.barry@gmail.com>
-0 string DAF/SPK NASA SPICE file (binary format)
-0 string DAFETF\ NAIF\ DAF\ ENCODED NASA SPICE file (transfer format)
diff --git a/contrib/libs/libmagic/magic/Magdir/natinst b/contrib/libs/libmagic/magic/Magdir/natinst
deleted file mode 100644
index 7a55ddecdf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/natinst
+++ /dev/null
@@ -1,24 +0,0 @@
-
-#-----------------------------------------------------------------------------
-# $File: natinst,v 1.6 2014/06/03 19:17:27 christos Exp $
-# natinst: file(1) magic for National Instruments Code Files
-
-#
-# From <egamez@fcfm.buap.mx> Enrique Gamez-Flores
-# version 1
-# Many formats still missing, we use, for the moment LabVIEW
-# We guess VXI format file. VISA, LabWindowsCVI, BridgeVIEW, etc, are missing
-#
-0 string RSRC National Instruments,
-# Check if it's a LabVIEW File
->8 string LV LabVIEW File,
-# Check which kind of file it is
->>10 string SB Code Resource File, data
->>10 string IN Virtual Instrument Program, data
->>10 string AR VI Library, data
-# This is for Menu Libraries
->8 string LMNULBVW Portable File Names, data
-# This is for General Resources
->8 string rsc Resources File, data
-# This is for VXI Package
-0 string VMAP National Instruments, VXI File, data
diff --git a/contrib/libs/libmagic/magic/Magdir/ncr b/contrib/libs/libmagic/magic/Magdir/ncr
deleted file mode 100644
index 21b09aba5c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ncr
+++ /dev/null
@@ -1,49 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ncr,v 1.8 2014/04/30 21:41:02 christos Exp $
-# ncr: file(1) magic for NCR Tower objects
-#
-# contributed by
-# Michael R. Wayne *** TMC & Associates *** INTERNET: wayne@ford-vax.arpa
-# uucp: {philabs | pyramid} !fmsrl7!wayne OR wayne@fmsrl7.UUCP
-#
-0 beshort 000610 Tower/XP rel 2 object
->12 belong >0 not stripped
->20 beshort 0407 executable
->20 beshort 0410 pure executable
->22 beshort >0 - version %d
-0 beshort 000615 Tower/XP rel 2 object
->12 belong >0 not stripped
->20 beshort 0407 executable
->20 beshort 0410 pure executable
->22 beshort >0 - version %d
-0 beshort 000620 Tower/XP rel 3 object
->12 belong >0 not stripped
->20 beshort 0407 executable
->20 beshort 0410 pure executable
->22 beshort >0 - version %d
-0 beshort 000625 Tower/XP rel 3 object
->12 belong >0 not stripped
->20 beshort 0407 executable
->20 beshort 0410 pure executable
->22 beshort >0 - version %d
-0 beshort 000630 Tower32/600/400 68020 object
->12 belong >0 not stripped
->20 beshort 0407 executable
->20 beshort 0410 pure executable
->22 beshort >0 - version %d
-0 beshort 000640 Tower32/800 68020
->18 beshort &020000 w/68881 object
->18 beshort &040000 compatible object
->18 beshort &060000 object
->20 beshort 0407 executable
->20 beshort 0413 pure executable
->12 belong >0 not stripped
->22 beshort >0 - version %d
-0 beshort 000645 Tower32/800 68010
->18 beshort &040000 compatible object
->18 beshort &060000 object
->20 beshort 0407 executable
->20 beshort 0413 pure executable
->12 belong >0 not stripped
->22 beshort >0 - version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/netbsd b/contrib/libs/libmagic/magic/Magdir/netbsd
deleted file mode 100644
index 77e64f0b2e..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/netbsd
+++ /dev/null
@@ -1,251 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: netbsd,v 1.26 2019/01/01 03:11:23 christos Exp $
-# netbsd: file(1) magic for NetBSD objects
-#
-# All new-style magic numbers are in network byte order.
-# The old-style magic numbers are indistinguishable from the same magic
-# numbers used in other systems, and are handled, for all those systems,
-# in aout.
-#
-
-0 name netbsd-detail
->20 lelong x @%#x
->4 lelong >0 \b+T=%d
->8 lelong >0 \b+D=%d
->12 lelong >0 \b+B=%d
->16 lelong >0 \b+S=%d
->24 lelong >0 \b+TR=%d
->28 lelong >0 \b+TD=%d
-
-0 name netbsd-4096
->0 byte &0x80
->>20 lelong <4096 shared library
->>20 lelong =4096 dynamically linked executable
->>20 lelong >4096 dynamically linked executable
->0 byte ^0x80 executable
->16 lelong >0 not stripped
-
-0 name netbsd-8192
->0 byte &0x80
->>20 lelong <8192 shared library
->>20 lelong =8192 dynamically linked executable
->>20 lelong >8192 dynamically linked executable
->0 byte ^0x80 executable
->16 lelong >0 not stripped
->0 use netbsd-detail
-
-0 name netbsd-normal
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80
->>0 byte &0x40 position independent
->>20 lelong !0 executable
->>20 lelong =0 object file
->16 lelong >0 not stripped
->0 use netbsd-detail
-
-0 name netbsd-pure
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 lelong >0 not stripped
->0 use netbsd-detail
-
-0 name netbsd-core
->12 string >\0 from '%s'
->32 lelong !0 (signal %d)
-
-0 belong&0377777777 041400413 a.out NetBSD/i386 demand paged
->0 use netbsd-4096
-
-0 belong&0377777777 041400410 a.out NetBSD/i386 pure
->0 use netbsd-pure
-
-0 belong&0377777777 041400407 a.out NetBSD/i386
->0 use netbsd-normal
-
-0 belong&0377777777 041400507 a.out NetBSD/i386 core
->0 use netbsd-core
-
-0 belong&0377777777 041600413 a.out NetBSD/m68k demand paged
->0 use \^netbsd-8192
-
-0 belong&0377777777 041600410 a.out NetBSD/m68k pure
->0 use \^netbsd-pure
-
-0 belong&0377777777 041600407 a.out NetBSD/m68k
->0 use \^netbsd-normal
-
-0 belong&0377777777 041600507 a.out NetBSD/m68k core
->0 use \^netbsd-core
-
-0 belong&0377777777 042000413 a.out NetBSD/m68k4k demand paged
->0 use \^netbsd-4096
-
-0 belong&0377777777 042000410 a.out NetBSD/m68k4k pure
->0 use \^netbsd-pure
-
-0 belong&0377777777 042000407 a.out NetBSD/m68k4k
->0 use \^netbsd-normal
-
-0 belong&0377777777 042000507 a.out NetBSD/m68k4k core
->0 use \^netbsd-core
-
-0 belong&0377777777 042200413 a.out NetBSD/ns32532 demand paged
->0 use netbsd-4096
-
-0 belong&0377777777 042200410 a.out NetBSD/ns32532 pure
->0 use netbsd-pure
-
-0 belong&0377777777 042200407 a.out NetBSD/ns32532
->0 use netbsd-normal
-
-0 belong&0377777777 042200507 a.out NetBSD/ns32532 core
->0 use netbsd-core
-
-0 belong&0377777777 045200507 a.out NetBSD/powerpc core
->0 use netbsd-core
-
-0 belong&0377777777 042400413 a.out NetBSD/SPARC demand paged
->0 use \^netbsd-8192
-
-0 belong&0377777777 042400410 a.out NetBSD/SPARC pure
->0 use \^netbsd-pure
-
-0 belong&0377777777 042400407 a.out NetBSD/SPARC
->0 use \^netbsd-normal
-
-0 belong&0377777777 042400507 a.out NetBSD/SPARC core
->0 use \^netbsd-core
-
-0 belong&0377777777 042600413 a.out NetBSD/pmax demand paged
->0 use netbsd-4096
-
-0 belong&0377777777 042600410 a.out NetBSD/pmax pure
->0 use \^netbsd-pure
-
-0 belong&0377777777 042600407 a.out NetBSD/pmax
->0 use netbsd-normal
-
-0 belong&0377777777 042600507 a.out NetBSD/pmax core
->0 use netbsd-core
-
-0 belong&0377777777 043000413 a.out NetBSD/vax 1k demand paged
->0 use netbsd-4096
-
-0 belong&0377777777 043000410 a.out NetBSD/vax 1k pure
->0 use netbsd-pure
-
-0 belong&0377777777 043000407 a.out NetBSD/vax 1k
->0 use netbsd-normal
-
-0 belong&0377777777 043000507 a.out NetBSD/vax 1k core
->0 use netbsd-core
-
-0 belong&0377777777 045400413 a.out NetBSD/vax 4k demand paged
->0 use netbsd-4096
-
-0 belong&0377777777 045400410 a.out NetBSD/vax 4k pure
->0 use netbsd-pure
-
-0 belong&0377777777 045400407 a.out NetBSD/vax 4k
->0 use netbsd-normal
-
-0 belong&0377777777 045400507 a.out NetBSD/vax 4k core
->0 use netbsd-core
-
-# NetBSD/alpha does not support (and has never supported) a.out objects,
-# so no rules are provided for them. NetBSD/alpha ELF objects are
-# dealt with in "elf".
-0 lelong 0x00070185 ECOFF NetBSD/alpha binary
->10 leshort 0x0001 not stripped
->10 leshort 0x0000 stripped
-0 belong&0377777777 043200507 a.out NetBSD/alpha core
->12 string >\0 from '%s'
->32 lelong !0 (signal %d)
-
-0 belong&0377777777 043400413 a.out NetBSD/mips demand paged
->0 use \^netbsd-8192
-
->16 belong >0 not stripped
-0 belong&0377777777 043400410 a.out NetBSD/mips pure
->0 use netbsd-pure
-
-0 belong&0377777777 043400407 a.out NetBSD/mips
->0 use netbsd-normal
-
-0 belong&0377777777 043400507 a.out NetBSD/mips core
->0 use netbsd-core
-
-0 belong&0377777777 043600413 a.out NetBSD/arm32 demand paged
->0 use netbsd-4096
-
-0 belong&0377777777 043600410 a.out NetBSD/arm32 pure
->0 use netbsd-pure
-
-0 belong&0377777777 043600407 a.out NetBSD/arm32
->0 use netbsd-normal
-
-# NetBSD/arm26 has always used ELF objects, but it shares a core file
-# format with NetBSD/arm32.
-0 belong&0377777777 043600507 a.out NetBSD/arm core
->0 use netbsd-core
-
-# Kernel core dump format
-0 belong&0x0000ffff 0x00008fca NetBSD kernel core file
->0 belong&0x03ff0000 0x00000000 \b, Unknown
->0 belong&0x03ff0000 0x00010000 \b, sun 68010/68020
->0 belong&0x03ff0000 0x00020000 \b, sun 68020
->0 belong&0x03ff0000 0x00640000 \b, 386 PC
->0 belong&0x03ff0000 0x00860000 \b, i386 BSD
->0 belong&0x03ff0000 0x00870000 \b, m68k BSD (8K pages)
->0 belong&0x03ff0000 0x00880000 \b, m68k BSD (4K pages)
->0 belong&0x03ff0000 0x00890000 \b, ns32532 BSD
->0 belong&0x03ff0000 0x008a0000 \b, SPARC/32 BSD
->0 belong&0x03ff0000 0x008b0000 \b, pmax BSD
->0 belong&0x03ff0000 0x008c0000 \b, vax BSD (1K pages)
->0 belong&0x03ff0000 0x008d0000 \b, alpha BSD
->0 belong&0x03ff0000 0x008e0000 \b, mips BSD (Big Endian)
->0 belong&0x03ff0000 0x008f0000 \b, arm6 BSD
->0 belong&0x03ff0000 0x00900000 \b, m68k BSD (2K pages)
->0 belong&0x03ff0000 0x00910000 \b, sh3 BSD
->0 belong&0x03ff0000 0x00950000 \b, ppc BSD (Big Endian)
->0 belong&0x03ff0000 0x00960000 \b, vax BSD (4K pages)
->0 belong&0x03ff0000 0x00970000 \b, mips1 BSD
->0 belong&0x03ff0000 0x00980000 \b, mips2 BSD
->0 belong&0x03ff0000 0x00990000 \b, m88k BSD
->0 belong&0x03ff0000 0x00920000 \b, parisc BSD
->0 belong&0x03ff0000 0x009b0000 \b, sh5/64 BSD
->0 belong&0x03ff0000 0x009c0000 \b, SPARC/64 BSD
->0 belong&0x03ff0000 0x009d0000 \b, amd64 BSD
->0 belong&0x03ff0000 0x009e0000 \b, sh5/32 BSD
->0 belong&0x03ff0000 0x009f0000 \b, ia64 BSD
->0 belong&0x03ff0000 0x00b70000 \b, aarch64 BSD
->0 belong&0x03ff0000 0x00b80000 \b, or1k BSD
->0 belong&0x03ff0000 0x00b90000 \b, Risk-V BSD
->0 belong&0x03ff0000 0x00c80000 \b, hp200 BSD
->0 belong&0x03ff0000 0x012c0000 \b, hp300 BSD
->0 belong&0x03ff0000 0x020b0000 \b, hp800 HP-UX
->0 belong&0x03ff0000 0x020c0000 \b, hp200/hp300 HP-UX
->0 belong&0xfc000000 0x04000000 \b, CPU
->0 belong&0xfc000000 0x08000000 \b, DATA
->0 belong&0xfc000000 0x10000000 \b, STACK
->4 leshort x \b, (headersize = %d
->6 leshort x \b, segmentsize = %d
->8 lelong x \b, segments = %d)
-
-# little endian only for now.
-0 name ktrace
->4 leshort 7
->>6 leshort <3 NetBSD ktrace file version %d
->>>12 string x from %s
->>>56 string x \b, emulation %s
->>>8 lelong <65536 \b, pid=%d
-
-56 string netbsd
->0 use ktrace
-56 string linux
->0 use ktrace
-56 string sunos
->0 use ktrace
-56 string hpux
->0 use ktrace
diff --git a/contrib/libs/libmagic/magic/Magdir/netscape b/contrib/libs/libmagic/magic/Magdir/netscape
deleted file mode 100644
index 0e1ca61334..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/netscape
+++ /dev/null
@@ -1,26 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: netscape,v 1.8 2017/03/17 21:35:28 christos Exp $
-# netscape: file(1) magic for Netscape files
-# "H. Nanosecond" <aldomel@ix.netcom.com>
-# version 3 and 4 I think
-#
-
-# Netscape Address book .nab
-0 string \000\017\102\104\000\000\000\000\000\000\001\000\000\000\000\002\000\000\000\002\000\000\004\000 Netscape Address book
-
-# Netscape Communicator address book
-0 string \000\017\102\111 Netscape Communicator address book
-
-# .snm Caches
-0 string #\ Netscape\ folder\ cache Netscape folder cache
-0 string \000\036\204\220\000 Netscape folder cache
-# .n2p
-# Net 2 Phone
-#0 string 123\130\071\066\061\071\071\071\060\070\061\060\061\063\060
-0 string SX961999 Net2phone
-
-#
-#This is files ending in .art, FIXME add more rules
-0 string JG\004\016\0\0\0\0 AOL ART image
-0 string JG\003\016\0\0\0\0 AOL ART image
diff --git a/contrib/libs/libmagic/magic/Magdir/netware b/contrib/libs/libmagic/magic/Magdir/netware
deleted file mode 100644
index 089a243644..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/netware
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: netware,v 1.5 2020/09/04 16:30:51 christos Exp $
-# netware: file(1) magic for NetWare Loadable Modules (NLMs)
-# From: Mads Martin Joergensen <mmj@suse.de>
-# URL: https://en.wikipedia.org/wiki/NetWare_Loadable_Module
-
-0 string NetWare\ Loadable\ Module NetWare Loadable Module
-#!:mime application/octet-stream
-!:ext nlm
-
diff --git a/contrib/libs/libmagic/magic/Magdir/news b/contrib/libs/libmagic/magic/Magdir/news
deleted file mode 100644
index eea8aed765..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/news
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: news,v 1.6 2009/09/19 16:28:11 christos Exp $
-# news: file(1) magic for SunOS NeWS fonts (not "news" as in "netnews")
-#
-0 string StartFontMetrics ASCII font metrics
-0 string StartFont ASCII font bits
-0 belong 0x137A2944 NeWS bitmap font
-0 belong 0x137A2947 NeWS font family
-0 belong 0x137A2950 scalable OpenFont binary
-0 belong 0x137A2951 encrypted scalable OpenFont binary
-8 belong 0x137A2B45 X11/NeWS bitmap font
-8 belong 0x137A2B48 X11/NeWS font family
diff --git a/contrib/libs/libmagic/magic/Magdir/nifty b/contrib/libs/libmagic/magic/Magdir/nifty
deleted file mode 100644
index 151d869872..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/nifty
+++ /dev/null
@@ -1,202 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: nifty,v 1.1 2022/02/14 16:51:15 christos Exp $
-# file(1) magic for the NIfTI file format
-
-# Type: NIfTI, Neuroimaging file format
-# URL: https://nifti.nimh.nih.gov/
-# From: Yann Leprince <yann.leprince@cea.fr>, 2022
-
-344 string n+1\0 NIfTI-1 neuroimaging data,
-!:mime image/x.nifti
-!:ext nii
->0 use nifti1
-344 string ni1\0 NIfTI-1 neuroimaging data header,
-!:mime image/x.nifti
-!:ext hdr
->0 use nifti1
-
-4 string n+2\0\r\n\032\n NIfTI-2 neuroimaging data,
-!:mime image/x.nifti
-!:ext nii
->0 use nifti2
-4 string ni2\0\r\n\032\n NIfTI-2 neuroimaging data header,
-!:mime image/x.nifti
-!:ext hdr
->0 use nifti2
-
-# Main subroutine for NIfTI-1
-0 name nifti1
->0 clear x
->0 lelong =348 little endian
->>70 use nifti-datatype-le
->>112 lefloat !0 with scaling
->>0 use nifti1-dim-le
->>252 leshort >0 \b, with qform
->>>252 use xform-code-nifti1-le
->>254 leshort >0 \b, with sform
->>>254 use xform-code-nifti1-le
->>136 string >\0 \b, description: %s
->0 belong =348 big endian
->>70 use \^nifti-datatype-le
->>112 befloat !0 with scaling
->>0 use \^nifti1-dim-le
->>252 beshort >0 \b, with qform
->>>252 use \^xform-code-nifti1-le
->>254 beshort >0 \b, with sform
->>>254 use \^xform-code-nifti1-le
->>136 string >\0 \b, description: %s
->0 default x
->>0 long x invalid sizeof_hdr=%d
-
-# Main subroutine for NIfTI-2
-0 name nifti2
->0 clear x
->0 lelong =540 little endian
->>12 use nifti-datatype-le
->>176 lefloat !0 with scaling
->>0 use nifti2-dim-le
->>344 lelong >0 \b, with qform
->>>344 use xform-code-nifti2-le
->>348 lelong >0 \b, with sform
->>>348 use xform-code-nifti2-le
->>240 string >\0 \b, description: %s
->0 belong =540 big endian
->>12 use \^nifti-datatype-le
->>176 befloat !0 with scaling
->>0 use \^nifti2-dim-le
->>344 lelong >0 \b, with qform
->>>344 use \^xform-code-nifti2-le
->>348 lelong >0 \b, with sform
->>>348 use \^xform-code-nifti2-le
->>240 string >\0 \b, description: %s
->0 default x
->>0 long x invalid sizeof_hdr=%d
-
-
-# Other subroutines for details of NIfTI files
-
-0 name nifti-datatype-le
->0 clear x
->0 leshort =1 \b, binary datatype
->0 leshort =2 \b, uint8 datatype
->0 leshort =4 \b, int16 datatype
->0 leshort =8 \b, int32 datatype
->0 leshort =16 \b, float32 datatype
->0 leshort =32 \b, complex64 datatype
->0 leshort =64 \b, float64 datatype
->0 leshort =128 \b, RGB24 datatype
->0 leshort =256 \b, int8 datatype
->0 leshort =512 \b, uint16 datatype
->0 leshort =768 \b, uint32 datatype
->0 leshort =1024 \b, int64 datatype
->0 leshort =1280 \b, uint64 datatype
->0 leshort =1536 \b, float128 datatype
->0 leshort =1792 \b, complex128 datatype
->0 leshort =2048 \b, complex256 datatype
->0 leshort =2304 \b, RGBA32 datatype
->0 default x
->>0 leshort x \b, unknown datatype 0x%x
->>2 leshort x (%d bits/pixel)
-
-0 name nifti1-dim-le
->0 clear x
->40 leshort <0 \b, INVALID dim[0]=%d
->40 leshort >7 \b, INVALID dim[0]=%d
->0 default x
->>40 leshort x \b, %d-dimensional (size
->>42 leshort x %d
->>40 leshort >1
->>>44 leshort x \bx%d
->>40 leshort >2
->>>46 leshort x \bx%d
->>40 leshort >3
->>>48 leshort x \bx%d
->>40 leshort >4
->>>50 leshort x \bx%d
->>40 leshort >5
->>>52 leshort x \bx%d
->>40 leshort >6
->>>54 leshort x \bx%d
->>80 lefloat x \b, voxel size %f
->>40 leshort >1
->>>84 lefloat x x %f
->>40 leshort >2
->>>88 lefloat x x %f
->>123 use nifti1-xyz-unit
->>40 leshort >3
->>>92 lefloat x x %f
->>>123 use nifti1-t-unit
->>40 leshort x \b)
-
-0 name nifti2-dim-le
->0 clear x
->16 lequad <0 \b, INVALID dim[0]=%lld
->16 lequad >7 \b, INVALID dim[0]=%lld
->0 default x
->>16 lequad x \b, %lld-dimensional (size
->>24 lequad x %lld
->>16 lequad >1
->>>32 lequad x \bx%lld
->>16 lequad >2
->>>40 lequad x \bx%lld
->>16 lequad >3
->>>48 lequad x \bx%lld
->>16 lequad >4
->>>56 lequad x \bx%lld
->>16 lequad >5
->>>64 lequad x \bx%lld
->>16 lequad >6
->>>72 lequad x \bx%lld,
->>112 ledouble x \b, voxel size %f
->>16 lequad >1
->>>120 ledouble x x %f
->>16 lequad >2
->>>128 ledouble x x %f
->>500 use nifti2-xyz-unit
->>16 lequad >3
->>>136 ledouble x x %f
->>>500 use nifti2-t-unit
->>16 lequad x \b)
-
-0 name xform-code-nifti1-le
->0 leshort =1 to scanner-based coordinates
->0 leshort =2 to aligned coordinates
->0 leshort =3 to Talairach coordinates
->0 leshort =4 to MNI152 coordinates
->0 leshort =5 to template coordinates
-
-0 name xform-code-nifti2-le
->0 lelong =1 to scanner-based coordinates
->0 lelong =2 to aligned coordinates
->0 lelong =3 to Talairach coordinates
->0 lelong =4 to MNI152 coordinates
->0 lelong =5 to template coordinates
-
-0 name nifti1-xyz-unit
->0 byte &0x01
->>0 byte ^0x02 m
->>0 byte &0x02 micron
->0 byte ^0x01
->>0 byte &0x02 mm
-
-0 name nifti1-t-unit
->0 byte &0x08
->>0 byte ^0x10 s
->>0 byte &0x10 ms
->0 byte ^0x08
->>0 byte &0x10 microsecond
-
-0 name nifti2-xyz-unit
->0 lelong &0x01
->>0 lelong ^0x02 m
->>0 lelong &0x02 micron
->0 lelong ^0x01
->>0 lelong &0x02 mm
-
-0 name nifti2-t-unit
->0 lelong &0x08
->>0 lelong ^0x10 s
->>0 lelong &0x10 ms
->0 lelong ^0x08
->>0 lelong &0x10 microsecond
diff --git a/contrib/libs/libmagic/magic/Magdir/nim-lang b/contrib/libs/libmagic/magic/Magdir/nim-lang
deleted file mode 100644
index bc2cf987c7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/nim-lang
+++ /dev/null
@@ -1,29 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: nim-lang,v 1.3 2021/07/06 12:34:06 christos Exp $
-# nim-lang: file(1) magic for nim
-# URL: https://nim-lang.org/
-
-0 search/8192 import
->&0 search/64 os
->>&0 use nim1
->&0 default x
->>&0 search/64 osproc
->>>&0 use nim1
->>&0 default x
->>>&0 search/64 strutils
->>>>&0 use nim1
-
-0 name nim1
->&0 search/8192 proc
->>&0 use nim2
->&0 default x
->>&0 search/8192 template
->>>&0 use nim2
->>&0 default x
->>>&0 search/8192 let
->>>>&0 use nim2
-
-0 name nim2
->&0 search/8192 when Nim source code
-!:ext nim
diff --git a/contrib/libs/libmagic/magic/Magdir/nitpicker b/contrib/libs/libmagic/magic/Magdir/nitpicker
deleted file mode 100644
index bea96c3e74..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/nitpicker
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: nitpicker,v 1.8 2019/04/19 00:42:27 christos Exp $
-# nitpicker: file(1) magic for Flowfiles.
-# From: Christian Jachmann <C.Jachmann@gmx.net> https://www.nitpicker.de
-0 string NPFF NItpicker Flow File
->4 byte x V%d.
->5 byte x %d
->6 bedate x started: %s
->10 bedate x stopped: %s
->14 belong x Bytes: %u
->18 belong x Bytes1: %u
->22 belong x Flows: %u
->26 belong x Pkts: %u
diff --git a/contrib/libs/libmagic/magic/Magdir/numpy b/contrib/libs/libmagic/magic/Magdir/numpy
deleted file mode 100644
index c1520dd5df..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/numpy
+++ /dev/null
@@ -1,9 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: numpy,v 1.1 2019/05/09 16:24:36 christos Exp $
-# numpy: file(1) magic for NumPy array binary serialization format
-# Reference: https://docs.scipy.org/doc/numpy/reference/generated/numpy.lib.format.html
-0 string \x93NUMPY NumPy array,
->6 ubyte x version %d
->7 ubyte x \b.%d,
->8 uleshort x header length %d
diff --git a/contrib/libs/libmagic/magic/Magdir/oasis b/contrib/libs/libmagic/magic/Magdir/oasis
deleted file mode 100644
index 45ad6d137d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/oasis
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: oasis,v 1.2 2014/06/03 19:17:27 christos Exp $
-# OASIS
-# Summary: OASIS stream file
-# Long description: Open Artwork System Interchange Standard
-# File extension: .oas
-# Full name: Ben Cowley (bcowley@broadcom.com)
-# Philip Dixon (pdixon@broadcom.com)
-# Reference: http://www.wrcad.com/oasis/oasis-3626-042303-draft.pdf
-# (see page 3)
-0 string %SEMI-OASIS\r\n OASIS Stream file
diff --git a/contrib/libs/libmagic/magic/Magdir/ocaml b/contrib/libs/libmagic/magic/Magdir/ocaml
deleted file mode 100644
index 3ec3100c6d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ocaml
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ocaml,v 1.5 2010/09/20 18:55:20 rrt Exp $
-# ocaml: file(1) magic for Objective Caml files.
-0 string Caml1999 OCaml
->8 string X exec file
->8 string I interface file (.cmi)
->8 string O object file (.cmo)
->8 string A library file (.cma)
->8 string Y native object file (.cmx)
->8 string Z native library file (.cmxa)
->8 string M abstract syntax tree implementation file
->8 string N abstract syntax tree interface file
->9 string >\0 (Version %3.3s)
diff --git a/contrib/libs/libmagic/magic/Magdir/octave b/contrib/libs/libmagic/magic/Magdir/octave
deleted file mode 100644
index 49ea3e73e0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/octave
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: octave,v 1.4 2009/09/19 16:28:11 christos Exp $
-# octave binary data file(1) magic, from Dirk Eddelbuettel <edd@debian.org>
-0 string Octave-1-L Octave binary data (little endian)
-0 string Octave-1-B Octave binary data (big endian)
diff --git a/contrib/libs/libmagic/magic/Magdir/ole2compounddocs b/contrib/libs/libmagic/magic/Magdir/ole2compounddocs
deleted file mode 100644
index 2c451a9ab5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ole2compounddocs
+++ /dev/null
@@ -1,760 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ole2compounddocs,v 1.26 2023/05/15 16:46:12 christos Exp $
-# Microsoft OLE 2 Compound Documents : file(1) magic for Microsoft Structured
-# storage (https://en.wikipedia.org/wiki/Compound_File_Binary_Format)
-# Additional tests for OLE 2 Compound Documents should be under this recipe.
-# reference: https://www.openoffice.org/sc/compdocfileformat.pdf
-
-0 string \320\317\021\340\241\261\032\341
-# https://digital-preservation.github.io/droid/
-# skip droid skeleton like fmt-39-signature-id-128.doc by valid version
->0x1A ushort !0xABAB OLE 2 Compound Document
-#>0x1C uleshort x \b, endnian %#4.4x
-# big endian not tested
->>0x1C ubeshort =0xfffe \b, big-endian
->>>546 string jbjb : Microsoft Word Document
-!:mime application/msword
-!:apple MSWDWDBN
-!:ext doc
-# Byte Order 0xFFFE means little-endian found in real world applications
-#>>0x1C uleshort =0xfffe \b, little-endian
->>0x1C uleshort =0xfffe
-# From: Joerg Jenderek
-# Major Version 3 or 4
->>>0x1A uleshort x \b, v%u
-# Minor Version 32h=50 3Bh=59 3Eh=62
->>>0x18 uleshort x \b.%u
-# SecID of first sector of the directory stream is often 1 but high like 3144h
->>>48 ulelong x \b, SecID %#x
-# Sector Shift Exponent in short-stream container stream: 6~64 bytes
->>>32 uleshort !6 \b, exponent of short stream %u
-# total number of sectors used for the FAT
->>>44 ulelong >1 \b, %u FAT sectors
-# SecID of first sector of the short-sector allocation table (Mini FAT)
-# or -2 (End Of ChainSecID) if not extant
->>>60 ulelong !0xffFFffFE \b, Mini FAT start sector %#x
-# total number of sectors used for the short-sector allocation table
->>>64 ulelong !1 \b, %u Mini FAT sector
-# plural s
->>>>64 ulelong >1 \bs
-# SecID of first sector of the master sector allocation table (DIFAT)
-# or -2 (End Of Chain SecID) if no additional sectors used
->>>68 ulelong !0xffFFffFE \b, DIFAT start sector %#x
-# total number of sectors used for the master sector allocation table (DIFAT)
->>>72 ulelong >0 \b, %u DIFAT sectors
-# First part of the master sector allocation table (DIFAT) containing 109 SecIDs
-#>>>76 ubequad x \b, DIFAT=%#16.16llx
-#>>>84 ubequad x \b%16.16llx...
-# pointer to root entry only works with standard configuration for SecID ~< 800h
-# Red-Carpet-presentation-1.0-1.sdd sg10.sdv 2000_GA_Annual_Review_Data.xls
-# "ORLEN Factbook 2017.xls" XnView_metadata.doc
-# "Barham, Lisa - Die Shopping-Prinzessinnen.doc" then not recognized
->>>48 ulelong >0x800 too big for FILE_BYTES_MAX = 1 MiB
-# Sector Shift Exponent 9~512 for major version 3 or C~4096 for major version 4
->>>0x1E uleshort 0xc \b, blocksize 4096
-# jump to one block (4096 bytes per block) before root storage block
->>>>(48.l*4096) ubyte x
->>>>>&4095 use ole2-directory
-#>>>0x1E uleshort 9 \b, blocksize 512
->>>0x1E uleshort 9
-# jump to one block (512 bytes per block) before root storage block
-# in 5.37 only true for offset ~< FILE_BYTES_MAX=7 MiB defined in ../../src/file.h
->>>>(48.l*512) ubyte x
->>>>>&511 use ole2-directory
-# check directory entry structure and display types by GUID
-0 name ole2-directory
-# directory entry name like "Root Entry"
-#>0 lestring16 x \b, 1st %.10s
-# type of the entry; 5~Root storage
-#>66 ubyte x \b, type %x
-# node colour of the entry: 00H ~ Red 01H ~ Black
-#>67 ubyte x \b, color %x
-# the DirIDs of the child nodes. Should both be -1 in the root storage entry
-#>68 bequad !0xffffffffffffffff \b, DirIDs %llx
-# NEXT lines for DEBUGGING
-# second directory entry name like VisioDocument Control000
-#>128 lestring16 x \b, 2nd %.20s
-# third directory entry like WordDocument
-#>256 lestring16 x \b, 3rd %.20s
-# forth
-#>384 lestring16 x \b, 4th %.10s
-# 5th
-#>512 lestring16 x \b, 5th %.10s
-# 6th
-#>640 lestring16 x \b, 6th %.10s
-# 7th
-#>768 lestring16 x \b, 7th %.10s
-# https://wikileaks.org/ciav7p1/cms/page_13762814.html
-# https://m.blog.naver.com/superman4u/40047693679
-# https://misc.daniel-marschall.de/projects/guid_analysis/guid.txt
-# https://toolslick.com/conversion/data/guid
-#>80 ubequad !0 \b, clsid %#16.16llx
-#>>88 ubequad x \b%16.16llx
-# test for "Root Entry" inside directory by type 5 value
->66 ubyte 5
-# look for CLSID GUID 0
->>88 ubequad 0x0
->>>80 ubequad 0x0
-# - Microstation V8 DGN files (www.bentley.com)
-# URL: https://en.wikipedia.org/wiki/MicroStation
-# Last update on 10/23/2006 by Lester Hightower
-# 07/24/2019 by Joerg Jenderek
-# Second directory entry name like Dgn~H Dgn~S
->>>>128 lestring16 Dgn~ : Microstation V8 CAD
-#!:mime application/x-ole-storage
-!:mime application/x-bentley-dgn
-# http://www.q-cad.com/files/samples_cad_files/1344468165.dgn
-!:ext dgn
-#
-# URL: http://fileformats.archiveteam.org/wiki/WordPerfect
-# Second directory entry name PerfectOffice_
->>>>128 lestring16 PerfectOffice_ : WordPerfect 7-X3 presentations Master, Document or Graphic
-!:mime application/vnd.wordperfect
-# https://www.macdisk.com/macsigen.php "WPC2" for Wordperfect 2 *.wpd
-!:apple ????WPC7
-!:ext mst/wpd/wpg
-#
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Word_Processor
-# Second directory entry name MatOST_
->>>>128 lestring16 MatOST : Microsoft Works 3.0 document
-!:mime application/vnd.ms-works
-!:apple ????AWWP
-!:ext wps
-#
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Spreadsheet
-# 3rd directory entry name WksSSWorkBook
->>>>256 lestring16 WksSSWorkBook : Microsoft Works 6-9 spreadsheet
-!:mime application/vnd.ms-works
-!:apple ????AWSS
-!:ext xlr
-#
-# URL: http://fileformats.archiveteam.org/wiki/XLS
-# what is the difference to {00020820-0000-0000-c000-000000000046} ?
-# Second directory entry name Workbook
->>>>128 lestring16 Workbook
->>>>>256 lestring16 !WksSSWorkBook : Microsoft Excel 97-2003 worksheet 0 clsid
-!:mime application/vnd.ms-excel
-# https://www.macdisk.com/macsigen.php XLS5 for Excel 5
-!:apple ????XLS9
-!:ext xls
-#
-# URL: http://fileformats.archiveteam.org/wiki/PPT
-# Second directory entry name Object1 Object12 Object35
->>>>128 lestring16 Object : Microsoft PowerPoint 4 presentation
-!:mime application/vnd.ms-powerpoint
-# https://www.macdisk.com/macsigen.php
-!:apple ????PPT3
-!:ext ppt
-#
-# URL: https://www.msoutlook.info/question/164
-# Second directory entry name __CollDataStm
->>>>128 lestring16 __CollDataStm : Microsoft Outlook Send Receive Settings
-#!:mime application/vnd.ms-outlook
-!:mime application/x-ms-srs
-# %APPDATA%\Microsoft\Outlook\Outlook.srs
-!:ext srs
-#
-# URL: https://www.file-extensions.org/cag-file-extension
-# Second directory entry name Category
->>>>128 lestring16 Category : Microsoft Clip Art Gallery
-#!:mime application/x-ole-storage
-!:mime application/x-ms-cag
-!:apple MScgCGdb
-!:ext cag/
-#
-# URL: https://www.filesuffix.com/de/extension/rra
-# 3rd directory entry name StrIndex_StringTable
->>>>256 lestring16 StrIndex_StringTable : Windows temporarily installer
-#!:mime application/x-ole-storage
-!:mime application/x-ms-rra
-!:ext rra
-#
-# URL: https://www.forensicswiki.org/wiki/Jump_Lists
-# 3rd directory entry name DestList
->>>>256 lestring16 DestList : Windows jump list
-#!:mime application/x-ole-storage
-!:mime application/x-ms-jumplist
-# %APPDATA%\Microsoft\Windows\Recent\AutomaticDestinations\*.automaticDestinations-ms
-!:ext automaticDestinations-ms
-#
-# URL: https://en.wikipedia.org/wiki/Windows_thumbnail_cache
-# Second directory entry name 256_
->>>>128 lestring16 256_ : Windows thumbnail database 256
-#!:mime application/x-ole-storage
-!:mime application/x-ms-thumbnail
-# Thumbs.db
-!:ext db
->>>>128 lestring16 96_ : Windows thumbnail database 96
-!:mime application/x-ms-thumbnail
-!:ext db
-# 3rd directory entry name Catalog_
->>>>256 lestring16 Catalog : Windows thumbnail database
-!:mime application/x-ms-thumbnail
-!:ext db
-#
-# URL: https://support.microsoft.com/en-us/help/300887/how-to-use-system-information-msinfo32-command-line-tool-switches
-# Note: older Microsoft Systeminfo (MSInfo Configuration File of msinfo32); newer use xml based
-# Second directory entry name Control000
->>>>128 lestring16 Control000 : Microsoft old Systeminfo
-#!:mime application/x-ole-storage
-!:mime application/x-ms-info
-!:ext nfo
-#
-# From: Joerg Jenderek
-# URL: https://learn.microsoft.com/en-us/sysinternals/downloads/autoruns
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/arn-autoruns-v14.trid.xml
-# Note: older versions til 13 about middle 2021 handled by ./windows
-# called "Sysinternals Autoruns data (v14)" by TrID
-# second, third and fourth directory entry name like Header Items 0
->>>>128 lestring16 Header : Microsoft sysinternals AutoRuns data, version 14
-#!:mime application/x-ole-storage
-!:mime application/x-ms-arn
-# like: MyHOSTNAME.arn
-!:ext arn
-#
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Microsoft_Access
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/m/mdz.trid.xml
-# http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
-# Note: only version foo tested and called "Microsoft Access Wizard template" by TrID
-# Fourth directory entry name TemplateID
->>>>384 lestring16 TemplateID : Microsoft Access wizard template
-# Second directory entry name like \005SummaryInformation and 3rd name like \005DocumentSummaryInformation
-#!:mime application/x-ole-storage
-#!:mime application/vnd.ms-office
-#!:mime application/vnd.ms-access
-#!:mime application/msaccess
-!:mime application/x-ms-mdz
-# http://extension.nirsoft.net/mdz
-!:ext mdz
-#
-# URL: http://fileformats.archiveteam.org/wiki/Corel_Print_House
-# Second directory entry name Thumbnail
->>>>128 lestring16 Thumbnail : Corel PrintHouse image
-#!:mime application/x-ole-storage
-!:mime application/x-corel-cph
-!:ext cph
-# 3rd directory entry name Thumbnail
->>>>256 lestring16 Thumbnail : Corel PrintHouse image
-!:mime application/x-corel-cph
-!:ext cph
-# URL: http://fileformats.archiveteam.org/wiki/Corel_Gallery
-# Note: format since Gallery 2; sometimes called Corel Multimedia Manager Album
-# third directory entry name _INFO_
->>>>256 lestring16 _INFO_ : Corel Gallery
-# second directory entry name _ITEM_ or _DATA_
-# later directory entry names: _ALBUM_ _THUMBNAIL_
-#!:mime application/x-ole-storage
-!:mime application/x-corel-gal
-!:ext gal
-#
-# From: Joerg Jenderek
-# URL: https://archive.org/details/iPhoto-Plus-4
-# https://filext.com/file-extension/TPL
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/t/tpl-ulead.trid.xml
-# Note: found in Template sub directory in program directory of software iPhoto Plus version 4
-# second, third and fourth directory entry name like TplHeader TplMainImage TplPreview
->>>>128 lestring16 TplHeader : Ulead iPhoto Template
-#!:mime application/x-ole-storage
-!:mime image/x-ulead-tpl
-# https://www.file-extensions.org/tpl-file-extension-ulead-photo-express-template
-!:ext tpl
-#
-# URL: https://en.wikipedia.org/wiki/Hangul_(word_processor)
-# https://www.hancom.com/etc/hwpDownload.do
-# Note: "HWP Document File" signature found in FileHeader
-# Hangul Word Processor WORDIAN, 2002 and later is using HWP 5.0 format.
-# Second directory entry name FileHeader hint for Thinkfree Office document
->>>>128 lestring16 FileHeader : Hancom HWP (Hangul Word Processor) file, version 5.0
-#!:mime application/haansofthwp
-!:mime application/x-hwp
-# https://example-files.online-convert.com/document/hwp/example.hwp
-!:ext hwp
-#
-# URL: https://ask.libreoffice.org/en/question/26303/creating-new-themes-for-the-gallery-not-functioning/
-# Second directory entry name like dd2000 dd2001 dd2036 dd2060 dd2083
->>>>128 lestring16 dd2 : StarOffice Gallery view
-#!:mime application/x-ole-storage
-!:mime application/x-star-sdv
-!:ext sdv
-# URL: https://en.wikipedia.org/wiki/SoftMaker_Office
-# second directory entry name Current User
->>>>128 lestring16 Current\ User : SoftMaker
-# third directory entry name SMNativeObjData
->>>>>256 lestring16 SMNativeObjData
-# 5th directory entry name PowerPoint
->>>>>>512 lestring16 PowerPoint PowerPoint presentation or template
-!:mime application/vnd.ms-powerpoint
-!:ext ppt/pps/pot
-# 4th directory entry name PowerPoint
->>>>>384 lestring16 PowerPoint Presentations or template
-# http://extension.nirsoft.net/prv
-!:mime application/vnd.softmaker.presentations
-!:ext prd/prv
-# third directory entry name like Current User
->>>>256 lestring16 Current\ User : SoftMaker
-# 5th directory entry name PowerPoint
->>>>>512 lestring16 PowerPoint Presentations or template
-# http://extension.nirsoft.net/prd
-!:mime application/vnd.softmaker.presentations
-!:ext prd/prv
-# 2nd directory entry name Pictures
->>>>>>128 lestring16 Pictures with pictures
-#
-# URL: http://fileformats.archiveteam.org/wiki/PageMaker
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p
-# pagemaker-generic.trid.xml
-# pagemaker-pm6.trid.xml
-# pagemaker-pm65.trid.xml
-# pmd-pm7.trid.xml
-# From: Joerg Jenderek
-# Note: since version 6 embedd as stream with PageMaker name the "old" format handled by ./wordprocessors
-# verified by Michal Mutl Structured Storage Viewer `SSView.exe brochus.pt6`
-# Second directory entry name PageMaker
->>>>128 lestring16 PageMaker :
-# look for magic of "old" PageMaker like in 02TEMPLT.T65
->>>>>0 search/0xa900/s \0\0\0\0\0\0\xff\x99
-# GRR: jump to PageMaker stream and inspect it by sub routine PageMaker of ./wordprocessors failed with wrong version!
-#>>>>>>&0 use PageMaker
-# THIS WORKS PARTLY!
->>>>>>&0 indirect x
-# remaining null clsid
->>>>128 default x
->>>>>0 use ole2-unknown
-# look for CLSID where "second" part is 0
->>>80 ubequad !0x0
-#
-# Summary: Family Tree Maker
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Family_Tree_Maker
-# https://en.wikipedia.org/wiki/Family_Tree_Maker
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/f/ftw.trid.xml
-# Note called "Family Tree Maker Family Tree" by TrID and
-# "FamilyTree Maker Database" with version "1-4" by DROID via PUID fmt/1352
-# tested only with version 2.0
-# verified by Michal Mutl Structured Storage Viewer `SSView.exe my.ftw`
-# newer versions are SQLite based and handled by ./sql
-# directory names like: IND.DB AUX.DB GENERAL.DB NAME.NDX BIRTH.NDX EXTRA.DB
->>>>80 ubequad 0x5702000000000000 : Family Tree Maker Windows database, version 1-4
-# look for "File Format (C) Copyright 1993 Banner Blue Software Inc. - All Rights Reserved" in GENERAL.DB
-#>>>>>0 search/0x5460c/s F\0i\0l\0e\0\040\0F\0o\0r\0m\0a\0t\0\040\0(\0C\0)\0 \b, VERSION
-# GRR: jump to version value like 2 does not work!
-#>>>>>>&-8 ubyte x %u
-#!:mime application/x-ole-storage
-!:mime application/x-fmt
-# FBK is used for backup of FTW
-!:ext ftw/fbk
-#
->>>>80 default x
->>>>>0 use ole2-unknown
-# look for known clsid GUID
-# - Visio documents
-# URL: http://fileformats.archiveteam.org/wiki/Visio
-# Last update on 10/23/2006 by Lester Hightower, 07/20/2019 by Joerg Jenderek
->>88 ubequad 0xc000000000000046
->>>80 ubequad 0x131a020000000000 : Microsoft Visio 2000-2002 Document, stencil or template
-!:mime application/vnd.visio
-# VSD~Drawing VSS~Stencil VST~Template
-!:ext vsd/vss/vst
->>>80 ubequad 0x141a020000000000 : Microsoft Visio 2003-2010 Document, stencil or template
-!:mime application/vnd.visio
-!:ext vsd/vss/vst
-#
-# URL: http://fileformats.archiveteam.org/wiki/Windows_Installer
-# https://en.wikipedia.org/wiki/Windows_Installer#ICE_validation
-# Update: Joerg Jenderek
-# Windows Installer Package *.MSI or validation module *.CUB
->>>80 ubequad 0x84100c0000000000 : Microsoft Windows Installer Package or validation module
-!:mime application/x-msi
-#!:mime application/x-ms-win-installer
-# https://learn.microsoft.com/en-us/windows/win32/msi/internal-consistency-evaluators-ices
-# cub is used for validation module like: Vstalogo.cub XPlogo.cub darice.cub logo.cub mergemod.cub
-#!:mime application/x-ms-cub
-!:ext msi/cub
-# From: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/Windows_Installer
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/m/mst.trid.xml
-# called "Windows SDK Setup Transform script" by TrID
->>>80 ubequad 0x82100c0000000000 : Microsoft Windows Installer transform script
-#!:mime application/x-ole-storage
-!:mime application/x-ms-mst
-!:ext mst
->>>80 ubequad 0x86100c0000000000 : Microsoft Windows Installer Patch
-# ??
-!:mime application/x-wine-extension-msp
-#!:mime application/x-ms-msp
-!:ext msp
-#
-# URL: http://fileformats.archiveteam.org/wiki/DOC
->>>80 ubequad 0x0009020000000000 : Microsoft Word 6-95 document or template
-!:mime application/msword
-# for template MSWDW8TN
-!:apple MSWDWDBN
-!:ext doc/dot
->>>80 ubequad 0x0609020000000000 : Microsoft Word 97-2003 document or template
-!:mime application/msword
-!:apple MSWDWDBN
-# dot for template; no extension on Macintosh
-!:ext doc/dot/
-#
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Word_Processor
->>>80 ubequad 0x0213020000000000 : Microsoft Works 3-4 document or template
-!:mime application/vnd.ms-works
-!:apple ????AWWP
-# ps for template https://filext.com/file-extension/PS bps for backup
-!:ext wps/ps/bps
-#
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Database
->>>80 ubequad 0x0313020000000000 : Microsoft Works 3-4 database or template
-!:mime application/vnd.ms-works-db
-# https://www.macdisk.com/macsigen.php
-!:apple ????AWDB
-# db for template www.file-extensions.org/db-file-extension-microsoft-works-data bdb for backup
-!:ext wdb/db/bdb
-#
-# URL: https://en.wikipedia.org/wiki/Microsoft_Excel
->>>80 ubequad 0x1008020000000000 : Microsoft Excel 5-95 worksheet, addin or template
-!:mime application/vnd.ms-excel
-# https://www.macdisk.com/macsigen.php
-!:apple ????XLS5
-# worksheet/addin/template/no extension on Macintosh
-!:ext xls/xla/xlt/
-#
->>>80 ubequad 0x2008020000000000 : Microsoft Excel 97-2003
-!:mime application/vnd.ms-excel
-# https://www.macdisk.com/macsigen.php XLS5 for Excel 5
-!:apple ????XLS9
-# 3rd directory entry name
->>>>256 lestring16 _VBA_PROJECT_CUR addin
-!:ext xla/
-# 4th directory entry name
->>>>384 lestring16 _VBA_PROJECT_CUR addin
-!:ext xla
-#!:ext xla/
->>>>256 default x worksheet or template
-!:ext xls/xlt
-#!:ext xls/xlt/
-#
-# URL: http://fileformats.archiveteam.org/wiki/OLE2
->>>80 ubequad 0x0b0d020000000000 : Microsoft Outlook 97-2003 item
-#>>>80 ubequad 0x0b0d020000000000 : Microsoft Outlook 97-2003 Message
-#!:mime application/vnd.ms-outlook
-!:mime application/x-ms-msg
-!:ext msg
-# URL: https://wiki.fileformat.com/email/oft/
->>>80 ubequad 0x46f0060000000000 : Microsoft Outlook 97-2003 item template
-#!:mime application/vnd.ms-outlook
-!:mime application/x-ms-oft
-!:ext oft
-#
-# URL: http://fileformats.archiveteam.org/wiki/PPT
->>>80 ubequad 0x5148040000000000 : Microsoft PowerPoint 4.0 presentation
-!:mime application/vnd.ms-powerpoint
-# https://www.macdisk.com/macsigen.php
-!:apple ????PPT3
-!:ext ppt
-# Summary: "newer" Greenstreet Art drawing
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/GST_ART
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/art-gst-docfile.trid.xml
-# Note: called like "Greenstreet Art drawing" by TrID
-# Note: CONTENT stream contains binary part of older versions with phrase GST:ART at offset 16
-# verified by Michal Mutl Structured Storage Viewer `SSView.exe BCARD2.ART`
->>>80 ubequad 0x602c020000000000 : Greenstreet Art drawing
-#!:mime application/x-ole-storage
-!:mime image/x-greenstreet-art
-!:ext art
->>>80 default x
->>>>0 use ole2-unknown
-#??
-# URL: http://www.checkfilename.com/view-details/Microsoft-Works/RespageIndex/0/sTab/2/
->>88 ubequad 0xa29a00aa004a1a72 : Microsoft
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Word_Processor
->>>80 ubequad 0xc2dbcd28e20ace11 Works 4 document
-!:mime application/vnd.ms-works
-!:apple ????AWWP
-!:ext wps
-#
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Database
->>>80 ubequad 0xc3dbcd28e20ace11 Works 4 database
-!:mime application/vnd.ms-works-db
-!:apple ????AWDB
-!:ext wdb/bdb
-#??
->>88 ubequad 0xa40700c04fb932ba : Microsoft
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Word_Processor
->>>80 ubequad 0xb25aa40e0a9ed111 Works 5-6 document
-!:mime application/vnd.ms-works
-!:apple ????AWWP
-!:ext wps
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Microsoft_Works
-# Reference: http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
-# Note: probably version 6 and 7
-# organize pictures like JPFG images in streams __cf1 with names like
-# 001.JPG, 002.JPG ... in streams __fname
->>88 ubequad 0xa1c800c04f612452 : Microsoft
->>>80 ubequad 0xc0c7266eb98cd311 Works portfolio
-# 2nd directory entry name PfOrder, 3rd __LastID and 4th __SizeUsed
-#!:mime application/x-ole-storage
-# https://www.iana.org/assignments/media-types/application/vnd.ms-works
-!:mime application/vnd.ms-works
-# https://extension.nirsoft.net/wsb
-# like: wsbsamp.wsb WORKS2003_CD:\MSWorks\Common\Sammlung.wsb
-!:ext wsb
-#??
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Publisher
->>88 ubequad 0x00c0000000000046 : Microsoft
->>>80 ubequad 0x0112020000000000 Publisher
-!:mime application/vnd.ms-publisher
-!:ext pub
-#
-# URL: http://fileformats.archiveteam.org/wiki/PPT
-#??
->>88 ubequad 0xa90300aa00510ea3 : Microsoft
->>>80 ubequad 0x70ae7bea3bfbcd11 PowerPoint 95 presentation
-!:mime application/vnd.ms-powerpoint
-# https://www.macdisk.com/macsigen.php
-!:apple ????PPT3
-!:ext ppt/pot
-#??
->>88 ubequad 0x86ea00aa00b929e8 : Microsoft
->>>80 ubequad 0x108d81649b4fcf11 PowerPoint 97-2003 presentation or template
-!:mime application/vnd.ms-powerpoint
-!:apple ????PPT3
-# /autostart/template
-!:ext ppt/pps/pot
-# From: Joerg Jenderek
-# URL: https://www.file-extensions.org/ppa-file-extension
-# https://en.wikipedia.org/wiki/Microsoft_PowerPoint#cite_note-231
-# Reference: http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
->>88 ubequad 0x871800aa0060263b : Microsoft
-# only version 8 (97) tested; PowerPoint 4.0 to 11.0 (2004) (Wikipedia); 97 to 2003 (file-extensions.org)
->>>80 ubequad 0xf04672810a72cf11 PowerPoint Addin or Wizard
-# second, third and fourth directory entry name like VBA PROJECT PROJECTwm
-# http://extension.nirsoft.net/pwz
-!:mime application/vnd.ms-powerpoint
-# like: BSHPPT97.PPA "AutoContent Wizard.pwz"
-!:ext ppa/pwz
-#
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/AWD_(At_Work_Document)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/awd-fax.trid.xml
-# Note: called "Microsoft At Work Fax document" by TrID
->>88 ubequad 0xb29400dd010f2bf9 : Microsoft
->>>80 ubequad 0x801cb0023de01a10 At Work fax Document
-#!:mime application/x-ole-storage
-!:mime image/x-ms-awd
-!:ext awd
-#
-# URL: https://en.wikipedia.org/wiki/Microsoft_Project
-#??
->>88 ubequad 0xbe1100c04fb6faf1 : Microsoft
->>>80 ubequad 0x3a8fb774c8c8d111 Project
-!:mime application/vnd.ms-project
-!:ext mpp
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Microsoft_Office_shared_tools#Binder
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/o/obd.trid.xml
-# http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
-# Note: only version 8 tested and called "Office Binder Document" by TrID and
-# "Microsoft Office Binder File for Windows" version 97-2000 by DROID fmt/240
->>88 ubequad 0xb21c00aa004ba90b : Microsoft
->>>80 ubequad 0x0004855964661b10 Office Binder Document, Template or wizard
-# second directory entry name like Binder
-# https://www.file-extensions.org/obd-file-extension
-#!:mime application/vnd.ms-binder
-!:mime application/x-msbinder
-# obt for template; obz for Microsoft Office Binder wizard
-!:ext obd/obt/obz
-#
-# URL: http://fileformats.archiveteam.org/wiki/WordPerfect
-# Reference: http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
-# https://github.com/OneWingedShark/WordPerfect/
-# blob/master/doc/SDK_Help/FileFormats/WPFF_DocumentStructure.htm
-# From: Joerg Jenderek
-# Note: internal version x.2 or 2.2 like in embedded ole6-PerfectOffice_MAIN.wpd
-# 3rd directory entry name PerfectOffice_OBJECT and 2nd PerfectOffice_MAIN,
-# which contains WordPerfect document \xffWPC signature handled by ./wordprocessors
->>88 ubequad 0x19370000929679cd : WordPerfect 7
->>>80 ubequad 0xff739851ad2d2002 Document
-!:mime application/vnd.wordperfect
-#!:apple ????WPC?
-# https://fossies.org/linux/wp2latex/test/ole6.wpd
-!:ext wpd
-#>>>>0 search/0xc01/s \xffWPC \b, WPC SIGNATURE
-# inspect embedded WordPerfect document by ./wordprocessors with 1 space at end
-#>>>>>&0 indirect x \b; contains
-# GRR: the above expression does not work correctly
-#
-# URL: http://fileformats.archiveteam.org/wiki/SHW_(Corel)
-#???
->>88 ubequad 0x99ae04021c007002 : WordPerfect
->>>80 ubequad 0x62fe2e4099191b10 7-X3 presentation
-!:mime application/x-corelpresentations
-#!:mime application/x-shw-viewer
-#!:mime image/x-presentations
-!:ext shw
-#
-# URL: http://www.checkfilename.com/view-details/WordPerfect-Office-X3/RespageIndex/0/sTab/2/
->>>80 ubequad 0x60fe2e4099191b10 9 Graphic
-#!:mime application/x-wpg
-#!:mime image/x-wordperfect-graphics
-!:mime image/x-wpg
-# https://www.macdisk.com/macsigen.php "WPC2" for Wordperfect 2 *.wpd
-!:apple ????WPC9
-!:ext wpg
-#
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/CorelCAD
-# https://en.wikipedia.org/wiki/CorelCAD
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/ccd-corelcad.trid.xml
-# Note: called "CorelCAD Drawing" by TrID and CorelCAD
-# directory entry names like Contents ViewInfo CustomViewDescriptions LayerInfo
->>88 ubequad 0xbe26db67235e2689 : Corel
->>>80 ubequad 0x20f414de1cacce11 \bCAD Drawing or Template
-#!:mime application/x-ole-storage
-!:mime application/x-corel-cad
-# CCT for CorelCAD Template
-!:ext ccd/cct
-#
-# URL: http://fileformats.archiveteam.org/wiki/StarOffice_binary_formats
->>88 ubequad 0x996104021c007002 : StarOffice
->>>80 ubequad 0x407e5cdc5cb31b10 StarWriter 3.0 document or template
-# https://www.openoffice.org/framework/documentation/mimetypes/mimetypes.html
-!:mime application/x-starwriter
-!:ext sdw/vor
-#
->>>80 ubequad 0xa03f543fa6b61b10 StarCalc 3.0 spreadsheet or template
-!:mime application/x-starcalc
-!:ext sdc/vor
-#
->>>80 ubequad 0xe0aa10af6db31b10 StarDraw 3.0 drawing or template
-!:mime application/x-starimpress
-#!:mime application/x-stardraw
-# sda ??
-!:ext sdd/sda/vor
-#??
->>88 ubequad 0x89cb008029e4b0b1 : StarOffice
->>>80 ubequad 0x41d461633542d011 StarCalc 4.0 spreadsheet or template
-!:mime application/x-starcalc
-!:ext sdc/vor
-#
->>>80 ubequad 0x61b8a5c6d685d111 StarCalc 5.0 spreadsheet or template
-!:mime application/vnd.stardivision.cal
-!:ext sdc/vor
-#
->>>80 ubequad 0xc03c2d011642d011 StarImpress 4.0 presentation or template
-!:mime application/x-starimpress
-!:ext sdd/vor
-#??
->>88 ubequad 0xb12a04021c007002 : StarOffice
->>>80 ubequad 0x600459d4fd351c10 StarMath 3.0
-!:mime application/x-starmath
-!:ext smf
-#??
->>88 ubequad 0x8e2c00001b4cc711 : StarOffice
->>>80 ubequad 0xe0999cfb6d2c1c10 StarChart 3.0
-!:mime application/x-starchart
-!:ext sds
-#??
->>88 ubequad 0xa45e00a0249d57b1 : StarOffice
->>>80 ubequad 0xb0e9048b0e42d011 StarWriter 4.0 document or template
-!:mime application/x-starwriter
-!:ext sdw/vor
-#??
->>88 ubequad 0x89ca008029e4b0b1 : StarOffice
->>>80 ubequad 0xe1b7b3022542d011 StarMath 4.0
-!:mime application/x-starmath
-!:ext smf
-#
->>>80 ubequad 0xe0b7b3022542d011 StarChart 4.0
-!:mime application/x-starchart
-!:ext sds
-#??
->>88 ubequad 0xa53f00a0249d57b1 : StarOffice
->>>80 ubequad 0x70c90a340de3d011 Master 4.0 document
-!:mime application/x-starwriter-global
-!:ext sgl
-#??
->>88 ubequad 0x89d0008029e4b0b1 : StarOffice
->>>80 ubequad 0x40e6b5ffde85d111 StarMath 5.0
-!:mime application/vnd.stardivision.math
-!:ext smf
-#
->>>80 ubequad 0xa005892ebd85d111 StarDraw 5.0 drawing or template
-!:mime application/vnd.stardivision.draw
-!:ext sda/vor
-#
->>>80 ubequad 0x21725c56bc85d111 StarImpress 5.0 presentation or template
-!:mime application/vnd.stardivision.impress
-# sda is used for what?
-!:ext sdd/vor/sda
-#
->>>80 ubequad 0x214388bfdd85d111 StarChart 5.0
-!:mime application/vnd.stardivision.chart
-!:ext sds
-# ??
->>88 ubequad 0xaab4006097da561a : StarOffice
->>>80 ubequad 0xd1f90cc2ae85d111 StarWriter 5.0 document or template
-!:mime application/vnd.stardivision.writer
-!:ext sdw/vor
-#
->>>80 ubequad 0xd3f90cc2ae85d111 Master 5.0 document
-!:mime application/vnd.stardivision.writer-global
-!:ext sgl
-#??
-# URL: http://fileformats.archiveteam.org/wiki/FlashPix
->>88 ubequad 0x855300aa00a1f95b : Kodak
->>>80 ubequad 0x0067615654c1ce11 FlashPIX Image
-!:mime image/vnd.fpx
-!:apple ????FPix
-!:ext fpx
-# URL: https://en.wikipedia.org/wiki/SoftMaker_Office
->>88 ubequad 0x95f600a0cc3cca14 : PlanMaker
->>>80 ubequad 0x9174088a6452d411 document or template
-!:mime application/vnd.softmaker.planmaker
-# pmv for template https://www.file-extensions.org/pmv-file-extension
-!:ext pmd/pmv
-# URL: http://fileformats.archiveteam.org/wiki/MAX_(3ds_Max)
-# https://en.wikipedia.org/wiki/Autodesk_3ds_Max
-# Reference: http://fileformats.archiveteam.org/wiki/Microsoft_Compound_File
-# Note: called "3D Studio Max Scene" by TrID and "3DS Max" by DROID and
-# "3DSMax thumbnail" by XnView and verfied by `nconvert -info A380.max`
-# applies only to "newer" versions (about 2008-2020)
->>88 ubequad 0x9fed04143144cc1e : Autodesk
->>>80 ubequad 0x7b8cdd1cc081a045 3ds Max
-#!:mime application/x-ole-storage
-!:mime model/x-autodesk-max
-# like: https://static.free3d.com/models/dropbox/dropbox/sq/A380.7z/A380.max
-!:ext max
-# also chr for character file according to DROID https://www.nationalarchives.gov.uk/PRONOM/fmt/978
-#!:ext max/chr
-# remaining non null clsid
->>88 default x
->>>0 use ole2-unknown
-# display information about directory for not detected CDF files
-0 name ole2-unknown
->80 ubequad x : UNKNOWN
-# https://reposcope.com/mimetype/application/x-ole-storage
-!:mime application/x-ole-storage
-# according to file version 5.41 with -e soft option
-#!:mime application/CDFV2
-#!:ext ???
->80 ubequad !0 \b, clsid %#16.16llx
->>88 ubequad x \b%16.16llx
-# converted hexadecimal format to standard GUUID notation
->>80 guid x {%s}
-# second directory entry name like VisioDocument Control000
->128 lestring16 x with names %.20s
-# third directory entry like WordDocument Preview.dib
->256 lestring16 x %.20s
-# forth like \005SummaryInformation
->384 lestring16 x %.25s
-# 5th
->512 lestring16 x %.10s
-# 6th
->640 lestring16 x %.10s
-# 7th
->768 lestring16 x %.10s
diff --git a/contrib/libs/libmagic/magic/Magdir/olf b/contrib/libs/libmagic/magic/Magdir/olf
deleted file mode 100644
index 6ae3fc04e5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/olf
+++ /dev/null
@@ -1,98 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: olf,v 1.4 2009/09/19 16:28:11 christos Exp $
-# olf: file(1) magic for OLF executables
-#
-# We have to check the byte order flag to see what byte order all the
-# other stuff in the header is in.
-#
-# MIPS R3000 may also be for MIPS R2000.
-# What're the correct byte orders for the nCUBE and the Fujitsu VPP500?
-#
-# Created by Erik Theisen <etheisen@openbsd.org>
-# Based on elf from Daniel Quinlan <quinlan@yggdrasil.com>
-0 string \177OLF OLF
->4 byte 0 invalid class
->4 byte 1 32-bit
->4 byte 2 64-bit
->7 byte 0 invalid os
->7 byte 1 OpenBSD
->7 byte 2 NetBSD
->7 byte 3 FreeBSD
->7 byte 4 4.4BSD
->7 byte 5 Linux
->7 byte 6 SVR4
->7 byte 7 esix
->7 byte 8 Solaris
->7 byte 9 Irix
->7 byte 10 SCO
->7 byte 11 Dell
->7 byte 12 NCR
->5 byte 0 invalid byte order
->5 byte 1 LSB
->>16 leshort 0 no file type,
->>16 leshort 1 relocatable,
->>16 leshort 2 executable,
->>16 leshort 3 shared object,
-# Core handling from Peter Tobias <tobias@server.et-inf.fho-emden.de>
-# corrections by Christian 'Dr. Disk' Hechelmann <drdisk@ds9.au.s.shuttle.de>
->>16 leshort 4 core file
->>>(0x38+0xcc) string >\0 of '%s'
->>>(0x38+0x10) lelong >0 (signal %d),
->>16 leshort &0xff00 processor-specific,
->>18 leshort 0 no machine,
->>18 leshort 1 AT&T WE32100 - invalid byte order,
->>18 leshort 2 SPARC - invalid byte order,
->>18 leshort 3 Intel 80386,
->>18 leshort 4 Motorola 68000 - invalid byte order,
->>18 leshort 5 Motorola 88000 - invalid byte order,
->>18 leshort 6 Intel 80486,
->>18 leshort 7 Intel 80860,
->>18 leshort 8 MIPS R3000_BE - invalid byte order,
->>18 leshort 9 Amdahl - invalid byte order,
->>18 leshort 10 MIPS R3000_LE,
->>18 leshort 11 RS6000 - invalid byte order,
->>18 leshort 15 PA-RISC - invalid byte order,
->>18 leshort 16 nCUBE,
->>18 leshort 17 VPP500,
->>18 leshort 18 SPARC32PLUS,
->>18 leshort 20 PowerPC,
->>18 leshort 0x9026 Alpha,
->>20 lelong 0 invalid version
->>20 lelong 1 version 1
->>36 lelong 1 MathCoPro/FPU/MAU Required
->8 string >\0 (%s)
->5 byte 2 MSB
->>16 beshort 0 no file type,
->>16 beshort 1 relocatable,
->>16 beshort 2 executable,
->>16 beshort 3 shared object,
->>16 beshort 4 core file,
->>>(0x38+0xcc) string >\0 of '%s'
->>>(0x38+0x10) belong >0 (signal %d),
->>16 beshort &0xff00 processor-specific,
->>18 beshort 0 no machine,
->>18 beshort 1 AT&T WE32100,
->>18 beshort 2 SPARC,
->>18 beshort 3 Intel 80386 - invalid byte order,
->>18 beshort 4 Motorola 68000,
->>18 beshort 5 Motorola 88000,
->>18 beshort 6 Intel 80486 - invalid byte order,
->>18 beshort 7 Intel 80860,
->>18 beshort 8 MIPS R3000_BE,
->>18 beshort 9 Amdahl,
->>18 beshort 10 MIPS R3000_LE - invalid byte order,
->>18 beshort 11 RS6000,
->>18 beshort 15 PA-RISC,
->>18 beshort 16 nCUBE,
->>18 beshort 17 VPP500,
->>18 beshort 18 SPARC32PLUS,
->>18 beshort 20 PowerPC or cisco 4500,
->>18 beshort 21 cisco 7500,
->>18 beshort 24 cisco SVIP,
->>18 beshort 25 cisco 7200,
->>18 beshort 36 cisco 12000,
->>18 beshort 0x9026 Alpha,
->>20 belong 0 invalid version
->>20 belong 1 version 1
->>36 belong 1 MathCoPro/FPU/MAU Required
diff --git a/contrib/libs/libmagic/magic/Magdir/openfst b/contrib/libs/libmagic/magic/Magdir/openfst
deleted file mode 100644
index 8df9b56b85..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/openfst
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: openfst,v 1.1 2019/09/30 15:58:24 christos Exp $
-# openfs: file(1) magic for OpenFST (Weighted finite-state tranducer library)
-
-0 long 0x7eb2fdd6 OpenFst binary FST data
->&0 pstring/l x \b, fst type: %s
->>&0 pstring/l x \b, arc type: %s
->>>&0 long x \b, version: %d
->>>>&20 quad x \b, num states: %lld
->>>>>&0 quad >0 \b, num arcs: %lld
-
-0 long 0x56515c OpenFst binary FAR data, far type: stlist
->4 long x \b, version: %d
-
-0 long 0x7eb2f35c OpenFst binary FAR data, far type: sttable
->4 long x \b, version: %d
diff --git a/contrib/libs/libmagic/magic/Magdir/opentimestamps b/contrib/libs/libmagic/magic/Magdir/opentimestamps
deleted file mode 100644
index f2f0e3ec11..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/opentimestamps
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#------------------------------------------------------------
-# $File: opentimestamps,v 1.1 2019/05/27 01:27:31 christos Exp $
-# OpenTimestamps related magic entries
-# https://opentimestamps.org/
-# https://en.wikipedia.org/wiki/OpenTimestamps
-# "Emanuele Cisbani" <emanuele.cisbani@gmail.com>
-#------------------------------------------------------------
-
-# OpenTimestamps Proof .ots format.
-# Magic is defined here:
-# https://github.com/opentimestamps/python-opentimestamps/\
-# blob/master/opentimestamps/core/timestamp.py#L273
-
-0 string \x00\x4f\x70\x65\x6e\x54\x69\x6d\x65\x73\x74\x61\x6d\x70\x73\x00 OpenTimestamps
->16 string \x00\x50\x72\x6f\x6f\x66\x00\xbf\x89\xe2\xe8\x84\xe8\x92\x94\x01 Proof
diff --git a/contrib/libs/libmagic/magic/Magdir/oric b/contrib/libs/libmagic/magic/Magdir/oric
deleted file mode 100644
index 38c02c5751..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/oric
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: oric,v 1.2 2022/04/25 17:28:20 christos Exp $
-# Oric tape files
-# From: Stefan A. Haubenthal <polluks@sdf.lonestar.org>
-# References:
-# http://fileformats.archiveteam.org/wiki/TAP_(Oric)
-# http://fileformats.archiveteam.org/wiki/DSK_(Oric)
-0 string \x16\x16\x16\x24 Oric tape,
->6 byte =0x00 BASIC,
->6 byte =0x80 memory block,
->7 byte >0x00 autorun,
->13 string x "%.15s"
-
-0 string ORICDISK Oric Image
-0 string MFM_DISK Oric Image
diff --git a/contrib/libs/libmagic/magic/Magdir/os2 b/contrib/libs/libmagic/magic/Magdir/os2
deleted file mode 100644
index cb43e999f6..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/os2
+++ /dev/null
@@ -1,186 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: os2,v 1.14 2022/03/21 21:25:50 christos Exp $
-# os2: file(1) magic for OS/2 files
-#
-
-# Provided 1998/08/22 by
-# David Mediavilla <davidme.news@REMOVEIFNOTSPAMusa.net>
-1 search/100 InternetShortcut MS Windows 95 Internet shortcut text
-!:mime application/x-mswinurl
-!:ext url
->17 search/100 URL= (URL=<
->>&0 string x \b%s>)
-
-# OS/2 URL objects
-# Provided 1998/08/22 by
-# David Mediavilla <davidme.news@REMOVEIFNOTSPAMusa.net>
-#0 string http: OS/2 URL object text
-#>5 string >\ (WWW) <http:%s>
-#0 string mailto: OS/2 URL object text
-#>7 string >\ (email) <%s>
-#0 string news: OS/2 URL object text
-#>5 string >\ (Usenet) <%s>
-#0 string ftp: OS/2 URL object text
-#>4 string >\ (FTP) <ftp:%s>
-#0 string file: OS/2 URL object text
-#>5 string >\ (Local file) <%s>
-
-# >>>>> OS/2 INF/HLP <<<<< (source: Daniel Dissett ddissett@netcom.com)
-# URL: http://fileformats.archiveteam.org/wiki/INF/HLP_(OS/2)
-# Reference: http://www.edm2.com/0308/inf.html
-# Carl Hauser (chauser.parc@xerox.com) and
-# Marcus Groeber (marcusg@ph-cip.uni-koeln.de)
-# list the following header format in inf02a.doc:
-#
-# int16 ID; // ID magic word (5348h = "HS")
-# int8 unknown1; // unknown purpose, could be third letter of ID
-# int8 flags; // probably a flag word...
-# // bit 0: set if INF style file
-# // bit 4: set if HLP style file
-# // patching this byte allows reading HLP files
-# // using the VIEW command, while help files
-# // seem to work with INF settings here as well.
-# int16 hdrsize; // total size of header
-# int16 unknown2; // unknown purpose
-#
-0 string HSP\x01\x9b\x00 OS/2 INF
-!:mime application/x-os2-inf
-!:ext inf
->107 string >0 (%s)
-0 string HSP\x10\x9b\x00 OS/2 HLP
-!:mime application/x-os2-hlp
-!:ext hlp
->107 string >0 (%s)
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/MSG_(OS/2)
-# Reference: https://github.com/OS2World/UTIL-SYSTEM-MKMSGF/blob/master/mkmsgf.h
-# Note: created by MKMSGF.EXE. Text source can be recreated by E_MSGF
-# example like OS001H.MSG
-0 string \xffMKMSGF\0 OS/2 help message
-!:mime application/x-os2-msg
-!:ext msg
-# identifier[3] like: DOS NET REX SYS ...
->8 string x '%.3s'
-# msgnumber: number of messages
->11 uleshort x \b, %u messages
-# firstmsgnumber; number of the first message like: some times 0 often 1 169 1000 3502
->13 uleshort >1 \b, 1st number %u
-# offset16bit; 1~Index table has 16-bit offsets (files<64k) 0~Index table has 32-bit offsets
->15 ubyte =0 \b, 32-bit
-#>15 ubyte =1 \b, 16-bit
-# version; file version: 2~new 0~old
->16 uleshort !2 \b, version %u
-# indextaboffset; offset of index table: 1F~after header 0~no index table for version 0?
->18 uleshort >0
->>18 uleshort !0x1f \b, at %#x index
-# 32-bit offset
->>15 ubyte =0
-# offset with message table
->>>(18.s) ulelong x \b, at %#x
-# 1st message
-# http://www.os2museum.com/files/docs/os210ptk/os2-1.0-ptk-tools-1988.pdf
-# message type: E~Error H~Help I~Information P~Prompt W~Warning ?
->>>>(&-4.l) ubyte x %c-type
->>>>>&0 string x %s
-# 16-bit offset
->>15 ubyte =1
-# msgnum; message number
->>>(18.s) uleshort x \b, number %u
-# msgindex; offset of message from begin of file
->>>(18.s+2) uleshort x at %#x
-# message type E H I P W ?
->>>>(&-2.s) ubyte x %c-type
-# skip newline carriage return
->>>>>&0 ubeshort =0x0D0a
->>>>>>&0 string x %s
->>>>>&0 ubeshort !0x0D0a
->>>>>>&-2 string x %s
-# for version 0 index table apparently at offset 1F
->16 uleshort 0
->>15 ubyte 1
-# 1st message 16-bit
->>>0x1F uleshort x \b, at %#x
-# message type: E~Error H~Help I~Information P~Prompt W~Warning ?
->>>>(0x1F.s) ubyte x %c-type
->>>>>&0 string x %s
-# 2nd message 16-bit
->>>0x21 uleshort x \b, at %#x
->>>>(0x21.s) ubyte x %c-type
->>>>>&0 string x %s
-# 3rd message 16-bit
->>>0x23 uleshort x \b, at %#x
->>>>(0x23.s) ubyte x %c-type
->>>>>&0 string x %s
-# version 0 32-bit
->>15 ubyte 0
-# 1st message 32-bit
->>>0x1f ulelong x \b, at %#x
->>>>(0x1F.l) ubyte x %c-type
->>>>>&0 string x %s
-# 2nd message 32-bit
->>>0x23 ulelong x \b, at %#x
->>>>(0x23.l) ubyte x %c-type
->>>>>&0 string x %s
-# 3rd message 32-bit
->>>0x27 ulelong x \b, AT %#x
->>>>(0x27.l) ubyte x %c-type
->>>>>&0 string x %s
-# countryinfo; offset of country info block: 0 for version 0
->20 uleshort !0 \b, at %#x countryinfo
-# nextcoutryinfo
->>22 uleshort >0 \b, at %#x next
-# reserved[5]; Must be 0
->>25 ulelong !0 \b, RESERVED %#x
->>(20.s) use os2-msg-info
-# display country info block of MKMSGF message file
-0 name os2-msg-info
-# bytesperchar; bytes per char: 1~SBCS 2~DBCS
->0 ubyte >1 \b, %u bytes/char
-# reserved; Not known
->1 uleshort !0 \b, reserved %#x
-# langfamilyID; language family ID like: 0~? 1~Arabic ... 7~German ... 9~English ... 34~Slovene
->3 uleshort >0 \b, language %u
-# langversionID; like: 7_1~German 7_2~Swiss German 12_1~French 12_3~Canadian French
->>5 uleshort x \b_%u
-# langfamilyID too high. This should not happen
->3 uleshort >34 (invalid language)
-# codepagesnumber; number of codepages like: 1 2 ... 16
->7 uleshort x \b, %u code page
-# plural s
->7 uleshort >1 \bs
-# too many number of codepages. This should not happen
->7 uleshort >16 (Too many)
-# codepages[16]; codepages list like 437 850 ...
->7 uleshort <17
-# 1st code page
->>9 uleshort >0 %u
-# possible 2nd code page number
->>>7 uleshort >1
->>>>11 uleshort x %u
-# filename[260]; name of file like: dbaseos2.msg dde4c01e.msg os2ldr.mgr xdfh.msg ...
->41 string x \b, %s
-
-# OS/2 INI (this is a guess)
-0 string \xff\xff\xff\xff\x14\0\0\0 OS/2 INI
-!:mime application/x-os2-ini
-!:ext ini
-
-# From: Joerg Jenderek
-# URL: http://warpin.netlabs.org/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/ark-wpi.trid.xml
-# Note: called by TrID "WarpIN Installer"
-# probably magic at the beginning
-0 ubelong =0x770402BE WarpIN Installer
-#>4 ubelong =0x03000000
-#!:mime application/octet-stream
-!:mime application/x-os2-wpi
-!:ext wpi
-# creator program name like: "reserved" or "WIC x.y.z"
->0x106 string x \b, created by %s
-# name like: "reserved" or "OS/2 Netlabs"
->0x146 string x \b, '%s'
-# name like: "N/A" "http://warpin.netlabs.org"
->0x186 string x \b, URL %s
-
diff --git a/contrib/libs/libmagic/magic/Magdir/os400 b/contrib/libs/libmagic/magic/Magdir/os400
deleted file mode 100644
index 6a05f083eb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/os400
+++ /dev/null
@@ -1,39 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: os400,v 1.5 2009/09/19 16:28:11 christos Exp $
-# os400: file(1) magic for IBM OS/400 files
-#
-# IBM OS/400 (i5/OS) Save file (SAVF) - gerardo.cacciari@gmail.com
-# In spite of its quite variable format (due to internal memory page
-# length differences between CISC and RISC versions of the OS) the
-# SAVF structure hasn't suitable offsets to identify the catalog
-# header in the first descriptor where there are some useful infos,
-# so we must search in a somewhat large area for a particular string
-# that represents the EBCDIC encoding of 'QSRDSSPC' (save/restore
-# descriptor space) preceded by a two byte constant.
-#
-1090 search/7393 \x19\xDB\xD8\xE2\xD9\xC4\xE2\xE2\xD7\xC3 IBM OS/400 save file data
->&212 byte 0x01 \b, created with SAVOBJ
->&212 byte 0x02 \b, created with SAVLIB
->&212 byte 0x07 \b, created with SAVCFG
->&212 byte 0x08 \b, created with SAVSECDTA
->&212 byte 0x0A \b, created with SAVSECDTA
->&212 byte 0x0B \b, created with SAVDLO
->&212 byte 0x0D \b, created with SAVLICPGM
->&212 byte 0x11 \b, created with SAVCHGOBJ
->&213 byte 0x44 \b, at least V5R4 to open
->&213 byte 0x43 \b, at least V5R3 to open
->&213 byte 0x42 \b, at least V5R2 to open
->&213 byte 0x41 \b, at least V5R1 to open
->&213 byte 0x40 \b, at least V4R5 to open
->&213 byte 0x3F \b, at least V4R4 to open
->&213 byte 0x3E \b, at least V4R3 to open
->&213 byte 0x3C \b, at least V4R2 to open
->&213 byte 0x3D \b, at least V4R1M4 to open
->&213 byte 0x3B \b, at least V4R1 to open
->&213 byte 0x3A \b, at least V3R7 to open
->&213 byte 0x35 \b, at least V3R6 to open
->&213 byte 0x36 \b, at least V3R2 to open
->&213 byte 0x34 \b, at least V3R1 to open
->&213 byte 0x31 \b, at least V3R0M5 to open
->&213 byte 0x30 \b, at least V2R3 to open
diff --git a/contrib/libs/libmagic/magic/Magdir/os9 b/contrib/libs/libmagic/magic/Magdir/os9
deleted file mode 100644
index 74b47f3585..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/os9
+++ /dev/null
@@ -1,80 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: os9,v 1.8 2017/03/17 21:35:28 christos Exp $
-#
-# Copyright (c) 1996 Ignatios Souvatzis. All rights reserved.
-#
-# Redistribution and use in source and binary forms, with or without
-# modification, are permitted provided that the following conditions
-# are met:
-# 1. Redistributions of source code must retain the above copyright
-# notice, this list of conditions and the following disclaimer.
-# 2. Redistributions in binary form must reproduce the above copyright
-# notice, this list of conditions and the following disclaimer in the
-# documentation and/or other materials provided with the distribution.
-#
-# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
-# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
-# OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
-# IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
-# OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
-# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
-# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
-# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-#
-#
-#
-# OS9/6809 module descriptions:
-#
-0 beshort 0x87CD OS9/6809 module:
-#
->6 byte&0x0f 0x00 non-executable
->6 byte&0x0f 0x01 machine language
->6 byte&0x0f 0x02 BASIC I-code
->6 byte&0x0f 0x03 Pascal P-code
->6 byte&0x0f 0x04 C I-code
->6 byte&0x0f 0x05 COBOL I-code
->6 byte&0x0f 0x06 Fortran I-code
-#
->6 byte&0xf0 0x10 program executable
->6 byte&0xf0 0x20 subroutine
->6 byte&0xf0 0x30 multi-module
->6 byte&0xf0 0x40 data module
-#
->6 byte&0xf0 0xC0 system module
->6 byte&0xf0 0xD0 file manager
->6 byte&0xf0 0xE0 device driver
->6 byte&0xf0 0xF0 device descriptor
-#
-# OS9/m68k stuff (to be continued)
-#
-0 beshort 0x4AFC OS9/68K module:
-#
-# attr
->0x14 byte&0x80 0x80 re-entrant
->0x14 byte&0x40 0x40 ghost
->0x14 byte&0x20 0x20 system-state
-#
-# lang:
-#
->0x13 byte 1 machine language
->0x13 byte 2 BASIC I-code
->0x13 byte 3 Pascal P-code
->0x13 byte 4 C I-code
->0x13 byte 5 COBOL I-code
->0x13 byte 6 Fortran I-code
-#
-#
-# type:
-#
->0x12 byte 1 program executable
->0x12 byte 2 subroutine
->0x12 byte 3 multi-module
->0x12 byte 4 data module
->0x12 byte 11 trap library
->0x12 byte 12 system module
->0x12 byte 13 file manager
->0x12 byte 14 device driver
->0x12 byte 15 device descriptor
diff --git a/contrib/libs/libmagic/magic/Magdir/osf1 b/contrib/libs/libmagic/magic/Magdir/osf1
deleted file mode 100644
index 4e9147196d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/osf1
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: osf1,v 1.7 2009/09/19 16:28:11 christos Exp $
-#
-# Mach magic number info
-#
-0 long 0xefbe OSF/Rose object
-# I386 magic number info
-#
-0 short 0565 i386 COFF object
diff --git a/contrib/libs/libmagic/magic/Magdir/palm b/contrib/libs/libmagic/magic/Magdir/palm
deleted file mode 100644
index 5d2b913c35..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/palm
+++ /dev/null
@@ -1,156 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: palm,v 1.15 2021/12/16 21:50:06 christos Exp $
-# palm: file(1) magic for PalmOS {.prc,.pdb}: applications, docfiles, and hacks
-#
-# Brian Lalor <blalor@hcirisc.cs.binghamton.edu>
-
-# These are weak, byte 59 is not guaranteed to be 0 and there are
-# 8 character identifiers at byte 60, one I found for appl is BIGb.
-# What are the possibilities and where is this documented?
-
-# The common header format for PalmOS .pdb/.prc files is
-# {
-# char name[ 32 ];
-# Word attributes;
-# Word version;
-# DWord creationDate;
-# DWord modificationDate;
-# DWord lastBackupDate;
-# DWord modificationNumber;
-# DWord appInfoID;
-# DWord sortInfoID;
-# char type[4];
-# char creator[4];
-# DWord uniqueIDSeed;
-# RecordListType recordList;
-# };
-#
-# Datestamps are unsigned seconds since the MacOS epoch (Jan 1, 1904),
-# or Unix/POSIX time + 2082844800.
-
-0 name aportisdoc
-# date is supposed to be big-endian seconds since 1 Jan 1904, but many
-# files contain the timestamp in little-endian or a completely
-# nonsensical value...
-#>36 bedate-2082844800 >0 \b, created %s
-# compression: 1=uncomp, 2=orig, 0x4448=HuffDic
->(78.L) beshort =1 \b, uncompressed
-# compressed
->(78.L) beshort >1
->>(78.L+4) belong x \b, %d bytes uncompressed
-
-# appl
-#60 string appl PalmOS application
-#>0 string >\0 "%s"
-
-# HACK
-#60 string HACK HackMaster hack
-#>0 string >\0 "%s"
-
-# iSiloX e-book
-60 string SDocSilX iSiloX E-book
->0 string >\0 "%s"
-
-# Mobipocket (www.mobipocket.com), donated by Carl Witty
-# expanded by Ralf Brown
-60 string BOOKMOBI Mobipocket E-book
-!:mime application/x-mobipocket-ebook
-# MobiPocket stores a full title, pointed at by the belong at offset
-# 0x54 in its header at (78.L), with length given by the belong at
-# offset 0x58.
-# there's no guarantee that the title string is null-terminated, but
-# we currently can't specify a variable-length string where the length
-# field is not at the start of the string; in practice, the data
-# following the string always seems to start with a zero byte
->(78.L) belong x
->>&(&0x50.L-4) string >\0 "%s"
->0 use aportisdoc
->>(78.L+0x68) belong >0 \b, version %d
->>(78.L+0x1C) belong !0 \b, codepage %d
->>(78.L+0x0C) beshort >0 \b, encrypted (type %d)
-
-# AportisDoc/PalmDOC
-60 string TEXtREAd AportisDoc/PalmDOC E-book
->0 string >\0 "%s"
->0 use aportisdoc
-
-# Variety of PalmOS document types
-# Michael-John Turner <mj@debian.org>
-# Thanks to Hasan Umit Ezerce <humit@tr-net.net.tr> for his DocType
-60 string BVokBDIC BDicty PalmOS document
->0 string >\0 "%s"
-60 string DB99DBOS DB PalmOS document
->0 string >\0 "%s"
-60 string vIMGView FireViewer/ImageViewer PalmOS document
->0 string >\0 "%s"
-60 string PmDBPmDB HanDBase PalmOS document
->0 string >\0 "%s"
-60 string InfoINDB InfoView PalmOS document
->0 string >\0 "%s"
-60 string ToGoToGo iSilo PalmOS document
->0 string >\0 "%s"
-60 string JfDbJBas JFile PalmOS document
->0 string >\0 "%s"
-60 string JfDbJFil JFile Pro PalmOS document
->0 string >\0 "%s"
-60 string DATALSdb List PalmOS document
->0 string >\0 "%s"
-60 string Mdb1Mdb1 MobileDB PalmOS document
->0 string >\0 "%s"
-60 string PNRdPPrs PeanutPress PalmOS document
->0 string >\0 "%s"
-60 string DataPlkr Plucker PalmOS document
->0 string >\0 "%s"
-60 string DataSprd QuickSheet PalmOS document
->0 string >\0 "%s"
-60 string SM01SMem SuperMemo PalmOS document
->0 string >\0 "%s"
-60 string TEXtTlDc TealDoc PalmOS document
->0 string >\0 "%s"
-60 string InfoTlIf TealInfo PalmOS document
->0 string >\0 "%s"
-60 string DataTlMl TealMeal PalmOS document
->0 string >\0 "%s"
-60 string DataTlPt TealPaint PalmOS document
->0 string >\0 "%s"
-60 string dataTDBP ThinkDB PalmOS document
->0 string >\0 "%s"
-60 string TdatTide Tides PalmOS document
->0 string >\0 "%s"
-60 string ToRaTRPW TomeRaider PalmOS document
->0 string >\0 "%s"
-
-# A GutenPalm zTXT etext for use on Palm Pilots (http://gutenpalm.sf.net)
-# For version 1.xx zTXTs, outputs version and numbers of bookmarks and
-# annotations.
-# For other versions, just outputs version.
-#
-60 string zTXT A GutenPalm zTXT e-book
->0 string >\0 "%s"
->(0x4E.L) byte 0
->>(0x4E.L+1) byte x (v0.%02d)
->(0x4E.L) byte 1
->>(0x4E.L+1) byte x (v1.%02d)
->>>(0x4E.L+10) beshort >0
->>>>(0x4E.L+10) beshort <2 - 1 bookmark
->>>>(0x4E.L+10) beshort >1 - %d bookmarks
->>>(0x4E.L+14) beshort >0
->>>>(0x4E.L+14) beshort <2 - 1 annotation
->>>>(0x4E.L+14) beshort >1 - %d annotations
->(0x4E.L) byte >1 (v%d.
->>(0x4E.L+1) byte x %02d)
-
-# Palm OS .prc file types
-60 string libr
-# flags, only bit 0 or bit 6
-# https://en.wikipedia.org/wiki/PRC_%28Palm_OS%29
-# https://web.mit.edu/tytso/www/pilot/prc-format.html
->0x20 beshort&0xffbe 0
->>0 string >\0 Palm OS dynamic library data "%s"
-60 string ptch Palm OS operating system patch data
->0 string >\0 "%s"
-
-# Mobipocket (www.mobipocket.com), donated by Carl Witty
-60 string BOOKMOBI Mobipocket E-book
->0 string >\0 "%s"
diff --git a/contrib/libs/libmagic/magic/Magdir/parix b/contrib/libs/libmagic/magic/Magdir/parix
deleted file mode 100644
index ba5cbf5fe8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/parix
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: parix,v 1.5 2020/03/08 22:18:32 christos Exp $
-#
-# Parix COFF executables
-# From: Ignatios Souvatzis <ignatios@cs.uni-bonn.de>
-#
-0 beshort&0xefff 0x8ACE PARIX
->0 byte&0xf0 0x80 T800
->0 byte&0xf0 0x90 T9000
->19 byte&0x02 0x02 executable
->19 byte&0x02 0x00 object
->19 byte&0x0c 0x00 not stripped
diff --git a/contrib/libs/libmagic/magic/Magdir/parrot b/contrib/libs/libmagic/magic/Magdir/parrot
deleted file mode 100644
index b2a56c817a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/parrot
+++ /dev/null
@@ -1,22 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: parrot,v 1.2 2019/04/19 00:42:27 christos Exp $
-# parrot: file(1) magic for Parrot Virtual Machine
-# URL: https://www.lua.org/
-# From: Lubomir Rintel <lkundrak@v3.sk>
-
-# Compiled Parrot byte code
-0 string \376PBC\r\n\032\n Parrot bytecode
->64 byte x %d.
->72 byte x \b%d,
->8 byte >0 %d byte words,
->16 byte 0 little-endian,
->16 byte 1 big-endian,
->32 byte 0 IEEE-754 8 byte double floats,
->32 byte 1 x86 12 byte long double floats,
->32 byte 2 IEEE-754 16 byte long double floats,
->32 byte 3 MIPS 16 byte long double floats,
->32 byte 4 AIX 16 byte long double floats,
->32 byte 5 4-byte floats,
->40 byte x Parrot %d.
->48 byte x \b%d.
->56 byte x \b%d
diff --git a/contrib/libs/libmagic/magic/Magdir/pascal b/contrib/libs/libmagic/magic/Magdir/pascal
deleted file mode 100644
index 6168802456..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pascal
+++ /dev/null
@@ -1,39 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: pascal,v 1.4 2022/07/30 16:53:06 christos Exp $
-# pascal: file(1) magic for Pascal source
-#
-0 search/8192 (input, Pascal source text
-!:mime text/x-pascal
-#0 regex \^program Pascal source text
-#!:mime text/x-pascal
-#0 regex \^record Pascal source text
-#!:mime text/x-pascal
-
-# Free Pascal
-0 string PPU Pascal unit
->3 string x \b, version %s
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Dan_Bricklin
-0 string/b Type
-# URL: https://dl.winworldpc.com/Dan%20Bricklins%20Demo%20II%20Version%202%20Manual.7z
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dbd-v2.trid.xml
->4 string D2 Dan Bricklin's Demo 2 demo
-#!:mime application/octet-stream
-!:ext dbd
-# URL: https://muhaz.org/turbo-pascal-download-details.html
-# From: Joerg Jenderek
-# Note: used by Turbo Pascal 5.5 TOUR.EXE
->4 string T2 Turbo Pascal TOUR data
-#!:mime application/octet-stream
-!:mime application/x-borland-cbt
-!:ext cbt
-# WHAT iS THAT?
-#>4 string \040P Dan Bricklin's Demo 2 foo
-#!:mime application/octet-stream
-# _PPRINT.SG2 _PASCII.SG2
-#!:ext sg2
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dbd-gen.trid.xml
->4 default x Dan Bricklin's Demo demo (generic)
-#!:mime application/octet-stream
-!:ext dbd
diff --git a/contrib/libs/libmagic/magic/Magdir/pbf b/contrib/libs/libmagic/magic/Magdir/pbf
deleted file mode 100644
index 0ab7a88101..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pbf
+++ /dev/null
@@ -1,11 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pbf,v 1.3 2019/04/19 00:42:27 christos Exp $
-# file(1) magic(5) data for OpenStreetMap
-
-# OpenStreetMap Protocolbuffer Binary Format (.osm.pbf)
-# https://wiki.openstreetmap.org/wiki/PBF_Format
-# From: Markus Heidelberg <markus.heidelberg@web.de>
-0 belong&0xfffffff0 0
->4 beshort 0x0A09
->>6 string OSMHeader OpenStreetMap Protocolbuffer Binary Format
diff --git a/contrib/libs/libmagic/magic/Magdir/pbm b/contrib/libs/libmagic/magic/Magdir/pbm
deleted file mode 100644
index 40ecf49114..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pbm
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pbm,v 1.6 2009/09/19 16:28:11 christos Exp $
-# pbm: file(1) magic for Portable Bitmap files
-#
-# XXX - byte order?
-#
-0 short 0x2a17 "compact bitmap" format (Poskanzer)
diff --git a/contrib/libs/libmagic/magic/Magdir/pc88 b/contrib/libs/libmagic/magic/Magdir/pc88
deleted file mode 100644
index 03822f5027..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pc88
+++ /dev/null
@@ -1,24 +0,0 @@
-#------------------------------------------------------------------------------
-# pc88: file(1) magic for the NEC Home Computer
-# v1.0
-# Fabio R. Schmidlin <sd-snatcher@users.sourceforge.net>
-
-# PC88 2D disk image
-0x20 ulelong&0xFFFFFEFF 0x2A0
->0x10 string \0\0\0\0\0\0\0\0\0\0
->>0x280 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0
->>>0x1A ubyte&0xEF 0
->>>>0x1B ubyte&0x8F 0
->>>>>0x1B ubyte&70 <0x40
->>>>>>0x1C ulelong >0x21
->>>>>>>0 regex [[:print:]]* NEC PC-88 disk image, name=%s
->>>>>>>>0x1B ubyte 0 \b, media=2D
->>>>>>>>0x1B ubyte 0x10 \b, media=2DD
->>>>>>>>0x1B ubyte 0x20 \b, media=2HD
->>>>>>>>0x1B ubyte 0x30 \b, media=1D
->>>>>>>>0x1B ubyte 0x40 \b, media=1DD
->>>>>>>>0x1A ubyte 0x10 \b, write-protected
-
-
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/pc98 b/contrib/libs/libmagic/magic/Magdir/pc98
deleted file mode 100644
index e8f6b8a57a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pc98
+++ /dev/null
@@ -1,77 +0,0 @@
-#------------------------------------------------------------------------------
-# pc98: file(1) magic for the MSX Home Computer
-# v1.0
-# Fabio R. Schmidlin <sd-snatcher@users.sourceforge.net>
-
-# Maki-chan v1 Graphic format
-# The image resolution should be X=(44.L - 40.L) and Y=(46.L - 42.L), but I couldn't find a way to do so
-# http://www.jisyo.com/viewer/faq/maki_tech.htm
-0 string/b MAKI01 Maki-chan v1.
->6 ubyte|0x20 x \b%c image
->8 ubelong >0x40404040 \b, system ID:
->>8 byte x %c
->>9 byte x \b%c
->>10 byte x \b%c
->>11 byte x \b%c
->44 ubeshort x \b, %dx
->46 ubeshort x \b%d
->38 ubeshort&2 0 \b, 16 paletted RGB colors
->38 ubeshort&2 2 \b, 8 fixed RGB colors
->38 ubeshort&1 1 \b, 2:1 dot aspect ratio
-
-# Maki-chan v2 Graphic format
-# http://www.jisyo.com/viewer/faq/mag_tech.htm
-# https://mooncore.eu/bunny/txt/makichan.htm
-# http://metanest.jp/mag/mag.xhtml
-0 string/b MAKI02\ \ Maki-chan v2 image,
->8 byte x system ID: %c
->9 byte x \b%c
->10 byte x \b%c
->11 byte x \b%c,
->13 search/0x200 \x1A
-#Maki-chan video modes are a bit messy and seems to have been expanded over the years without too much planing:
-#1) When offset1(ubeshort) !=0x0344:
-# 1.1) And offset3(ubyte).b7=0:
-# - b0=pixel aspect ratio: 1=2:1 (note: this ignores that the machine's 1:1 pixel aspect ratio isn't really 1:1)
-# - b1=number of colors: 0=16 colors, 1=8 colors
-# - b2=Palette or fixed colors flag (called "analog" and "digital" in the doc): 0=Paletted, 1=Fixed colors encoded directly in the pixel data
-# 1.2) And offset3(ubyte).B7=1:
-# - b0=256 paletted colors
-# - b1=256 fixed colors using the MSX SCR8 palette
-#2) When offset1(ubeshort) =0x0344:
-# - 256x212 image with 19268 YJK colors. The usual resolution and color information fields from the file must be ignored
->>&1 ubeshort 0x0344 256x212, 19268 fixed YJK colors
->>&1 ubeshort !0x0344
->>>&5 uleshort+1 x %dx
->>>&7 uleshort+1 x \b%d,
->>>&0 ubyte&0x86 0x00 16 paletted RGB colors
->>>&0 ubyte&0x86 0x02 8 paletted RGB colors
->>>&0 ubyte&0x86 0x04 16 fixed RGB colors
->>>&0 ubyte&0x86 0x06 8 fixed RGB colors
->>>&0 ubyte&0x81 0x80 256 paletted RGB colors
->>>&0 ubyte&0x81 0x81 256 fixed MSX-SCR8 colors
->>>&0 ubyte&0x01 1 \b, 2:1 dot aspect ratio
-
-# XLD4 (Q4) picture
-11 string/b MAJYO XLD4(Q4) picture
-
-# Yanagisawa Pi picture
-#0 string Pi\x1A\0 Yanagisawa Pi picture
-#>3 search/0x200 \x04
-0 string Pi
->2 search/0x200 \x1A
->>&0 ubyte 0
->>>&3 ubyte 4 Yanagisawa Pi 16 color picture,
->>>&4 byte x system ID: %c
->>>&5 byte x \b%c
->>>&6 byte x \b%c
->>>&7 byte x \b%c,
->>>&10 ubeshort x %dx
->>>&12 ubeshort x \b%d
->>>&3 ubyte 8 Yanagisawa Pi 256 color picture
->>>&4 byte x system ID: %c
->>>&5 byte x \b%c
->>>&6 byte x \b%c
->>>&7 byte x \b%c,
->>>&10 ubeshort x %dx
->>>&12 ubeshort x \b%d
diff --git a/contrib/libs/libmagic/magic/Magdir/pci_ids b/contrib/libs/libmagic/magic/Magdir/pci_ids
deleted file mode 100644
index 34bc2e2f8a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pci_ids
+++ /dev/null
@@ -1,116 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pci_ids,v 1.1 2022/04/02 14:47:42 christos Exp $
-# pci.ids: file(1) magic for PCI specific informations
-#
-
-# Vendor identification (ID) https://pci-ids.ucw.cz/v2.2/pci.ids
-# show hexadecimal PCI vendor identification in human readable text form
-0 name PCI-vendor
-# ID vendor name
-#>0 uleshort =0x0f00 fOO
->0 uleshort =0x1000 Broadcom
->0 uleshort =0x1002 AMD/ATI
->0 uleshort =0x1013 Cirrus Logic
->0 uleshort =0x1014 IBM
->0 uleshort =0x1022 AMD
->0 uleshort =0x1050 Winbond
->0 uleshort =0x105a Promise
->0 uleshort =0x1095 Silicon
->0 uleshort =0x10EC Realtek
->0 uleshort =0x10de NVIDIA
->0 uleshort =0x1106 VIA
-# Woodward McCoach, Inc.
->0 uleshort =0x1231 Woodward
-#
->0 uleshort =0x1234 Bochs
->0 uleshort =0x15ad VMware
->0 uleshort =0x1af4 Virtio
->0 uleshort =0x1b36 QEMU
->0 uleshort =0x1de1 Tekram
-# maybe also Promise?
-#>0 uleshort =0x4289 Promise
-#>0 uleshort =0x66a1 FOO
->0 uleshort =0x8086 Intel
->0 uleshort =0x9004 Adaptec
-# also Adaptec; but no example
->0 uleshort =0x9005 Adaptec
-# for unknown/missing manufactors
->0 default x UNKNOWN
->>0 uleshort x (%#4.4x)
-
-# https://blog.ladsai.com/pci-configuration-space-class-code.html
-# Base class code https://wiki.osdev.org/PCI
-# show hexadecimal PCI class+sub+ProgIF identification in human readable text form
-0 name PCI-class
-#>0 ubyte x CLASS=%x
->0 ubyte x
-# Device was built prior definition of the class code field
->>0 ubyte 0x00 PRIOR
-# Any device except for VGA-Compatible devices like: 2975BIOS.BIN Trm3x5.bin
-# BUT also NVidia44.bin vgabios-stdvga-bin.rom
-#>>>0 ubyte 0x00 NOT VGA
-# VGA-Compatible Device; NO EXAMPLE found here!!
-#>>>0 ubyte 0x01 VGA
-# like 4243.bin
-#>>>0 ubyte 0x04 SUB_CLASS_4
->>0 ubyte 0x01 storage controller
-# device sub-type and its definition is dependent upon the base-type code
->>>1 ubyte 0x00 SCSI
->>>1 ubyte 0x01 IDE
->>>1 ubyte 0x02 Floppy
->>>1 ubyte 0x03 IPI
->>>0 ubyte 0x04 RAID
->>>1 ubyte 0x05 ATA
->>>1 ubyte 0x06 SATA
->>>1 ubyte 0x07 SAS
->>>1 ubyte 0x08 NVM
-# 4650_sr5.bin "PROMISE" "FT TX4650 Ary X"
->>>1 ubyte 0x80 OTHER
->>0 ubyte 0x02 network controller
->>>1 ubyte 0x00 ethernet
->>>1 ubyte 0x01 token ring
->>>1 ubyte 0x02 FDDI
->>>1 ubyte 0x03 ATM
->>>1 ubyte 0x04 ISDN
->>>1 ubyte 0x05 WorldFip
-# PICMG 2.14 Multi Computing
->>>1 ubyte 0x06 PICMG
->>>1 ubyte 0x80 OTHER
->>0 ubyte 0x03 display controller
->>0 ubyte 0x04 multimedia controller
->>0 ubyte 0x05 memory controller
->>0 ubyte 0x06 bridge device
-# Simple Communication Controllers
->>0 ubyte 0x07 communication controller
-# Base System Peripherals
->>0 ubyte 0x08 base peripheral
-# Input Devices
->>0 ubyte 0x09 input device
-# Docking Stations
->>0 ubyte 0x0A docking station
->>0 ubyte 0x0B processor
->>0 ubyte 0x0C serial bus controller
->>0 ubyte 0x0D wireless controller
-# Intelligent I/O Controllers
->>0 ubyte 0x0E I/O controller
-# Satellite Communication Controllers
->>0 ubyte 0x0F satellite controller
-# Encryption/Decryption Controllers
->>0 ubyte 0x10 encryption controller
-# Data Acquisition and Signal Processing Controllers
->>0 ubyte 0x11 signal controller
-# Processing Accelerator
->>0 ubyte 0x12 processing accelerator
-# Non-Essential Instrumentation
->>0 ubyte 0x13 non-essential
-# reserved or unassigned
->>0 default x
-# device does not fit any defined class; Unassigned Class (Vendor specific)
->>>0 ubyte 0xFF UNASSIGNED
-# THIS SHOULD NOT HAPPEN! BUT CLASS=8f for Promise 4650_sr5.bin 8660_sr5.bin
->>>0 default x RESERVED
->>>>0 ubyte x (%#x)
-# Prog IF of PCI class code?
-# defines the specific device programming interface
->2 ubyte >0 \b, ProgIF=%u
diff --git a/contrib/libs/libmagic/magic/Magdir/pcjr b/contrib/libs/libmagic/magic/Magdir/pcjr
deleted file mode 100644
index c3ab7a25fd..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pcjr
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pcjr,v 1.1 2021/01/09 15:09:58 christos Exp $
-# pcjr: file(1) magic for PCjr Cartridge image file format
-# From: Francis Laniel <laniel_francis@privacyrequired.com>
-0 string PCjr
->0x80 beshort 0x55aa PCjr Cartridge image
->0x200 beshort 0x55aa PCjr Cartridge image
diff --git a/contrib/libs/libmagic/magic/Magdir/pdf b/contrib/libs/libmagic/magic/Magdir/pdf
deleted file mode 100644
index 7a99d8d3cf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pdf
+++ /dev/null
@@ -1,51 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pdf,v 1.18 2023/07/17 15:57:18 christos Exp $
-# pdf: file(1) magic for Portable Document Format
-#
-
-0 name pdf
->8 search /Count
->>&0 regex [0-9]+ \b, %s page(s)
->8 search/512 /Filter/FlateDecode/ (zip deflate encoded)
-
-0 string %PDF- PDF document
-!:mime application/pdf
-!:strength +60
-!:ext pdf
->5 byte x \b, version %c
->7 byte x \b.%c
->0 use pdf
-
-0 string \012%PDF- PDF document
-!:mime application/pdf
-!:strength +60
-!:ext pdf
->6 byte x \b, version %c
->8 byte x \b.%c
->0 use pdf
-
-0 string \xef\xbb\xbf%PDF- PDF document (UTF-8)
-!:mime application/pdf
-!:strength +60
-!:ext pdf
->6 byte x \b, version %c
->8 byte x \b.%c
->0 use pdf
-
-# From: Nick Schmalenberger <nick@schmalenberger.us>
-# Forms Data Format
-0 string %FDF- FDF document
-!:mime application/vnd.fdf
-!:strength +60
-!:ext pdf
->5 byte x \b, version %c
->7 byte x \b.%c
-
-0 search/1024 %PDF- PDF document
-!:mime application/pdf
-!:strength +60
-!:ext pdf
->&0 byte x \b, version %c
->&2 byte x \b.%c
->0 use pdf
diff --git a/contrib/libs/libmagic/magic/Magdir/pdp b/contrib/libs/libmagic/magic/Magdir/pdp
deleted file mode 100644
index 2d18b62df5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pdp
+++ /dev/null
@@ -1,42 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pdp,v 1.11 2017/03/17 21:35:28 christos Exp $
-# pdp: file(1) magic for PDP-11 executable/object and APL workspace
-#
-0 lelong 0101555 PDP-11 single precision APL workspace
-0 lelong 0101554 PDP-11 double precision APL workspace
-#
-# PDP-11 a.out
-#
-0 leshort 0407 PDP-11 executable
->8 leshort >0 not stripped
->15 byte >0 - version %d
-
-# updated by Joerg Jenderek at Mar 2013
-# GRR: line below too general as it catches also Windows precompiled setup information *.PNF
-0 leshort 0401
-# skip *.PNF with WinDirPathOffset 58h
->68 ulelong !0x00000058 PDP-11 UNIX/RT ldp
-# skip *.PNF with high byte of InfVersionDatumCount zero
-#>>15 byte !0 PDP-11 UNIX/RT ldp
-0 leshort 0405 PDP-11 old overlay
-
-0 leshort 0410 PDP-11 pure executable
->8 leshort >0 not stripped
->15 byte >0 - version %d
-
-0 leshort 0411 PDP-11 separate I&D executable
->8 leshort >0 not stripped
->15 byte >0 - version %d
-
-0 leshort 0437 PDP-11 kernel overlay
-
-# These last three are derived from 2.11BSD file(1)
-0 leshort 0413 PDP-11 demand-paged pure executable
->8 leshort >0 not stripped
-
-0 leshort 0430 PDP-11 overlaid pure executable
->8 leshort >0 not stripped
-
-0 leshort 0431 PDP-11 overlaid separate executable
->8 leshort >0 not stripped
diff --git a/contrib/libs/libmagic/magic/Magdir/perl b/contrib/libs/libmagic/magic/Magdir/perl
deleted file mode 100644
index 4a3756a483..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/perl
+++ /dev/null
@@ -1,100 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: perl,v 1.27 2023/07/17 16:01:36 christos Exp $
-# perl: file(1) magic for Larry Wall's perl language.
-#
-# The `eval' lines recognizes an outrageously clever hack.
-# Keith Waclena <keith@cerberus.uchicago.edu>
-# Send additions to <perl5-porters@perl.org>
-0 search/1024 eval\ "exec\ perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ "exec\ /bin/perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ "exec\ /usr/bin/perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ "exec\ /usr/local/bin/perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ 'exec\ perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ 'exec\ /bin/perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ 'exec\ /usr/bin/perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ 'exec\ /usr/local/bin/perl Perl script text
-!:mime text/x-perl
-0 search/1024 eval\ '(exit\ $?0)'\ &&\ eval\ 'exec Perl script text
-!:mime text/x-perl
-0 string #!/usr/bin/env\ perl Perl script text executable
-!:mime text/x-perl
-0 string #!\ /usr/bin/env\ perl Perl script text executable
-!:mime text/x-perl
-0 string #!
->0 regex \^#!.*/bin/perl([[:space:]].*)*$ Perl script text executable
-!:mime text/x-perl
-
-# by Dmitry V. Levin and Alexey Tourbin
-# check the first line
-0 search/8192 package
->0 regex \^package[[:space:]]+[0-9A-Za-z_:]+[[:space:]]*([[:space:]]v?[0-9][0-9.]*)?[[:space:]]*; Perl5 module source text
-!:strength + 40
-# not 'p', check other lines
-0 search/8192 !p
->0 regex \^package[[:space:]]+[0-9A-Za-z_:]+[[:space:]]*([[:space:]]v?[0-9][0-9.]*)?[[:space:]]*;
->>0 regex \^1[[:space:]]*;|\^(use|sub|my)[[:space:]].*[(;{=] Perl5 module source text
-!:strength + 75
-
-# Perl POD documents
-# From: Tom Hukins <tom@eborcom.com>
-0 search/1024/W \=pod\n Perl POD document text
-0 search/1024/W \n\=pod\n Perl POD document text
-0 search/1024/W \=head1\ Perl POD document text
-0 search/1024/W \n\=head1\ Perl POD document text
-0 search/1024/W \=head2\ Perl POD document text
-0 search/1024/W \n\=head2\ Perl POD document text
-0 search/1024/W \=encoding\ Perl POD document text
-0 search/1024/W \n\=encoding\ Perl POD document text
-
-
-# Perl Storable data files.
-0 string perl-store perl Storable (v0.6) data
->4 byte >0 (net-order %d)
->>4 byte &01 (network-ordered)
->>4 byte =3 (major 1)
->>4 byte =2 (major 1)
-
-0 string pst0 perl Storable (v0.7) data
->4 byte >0
->>4 byte &01 (network-ordered)
->>4 byte =5 (major 2)
->>4 byte =4 (major 2)
->>5 byte >0 (minor %d)
-
-# This is Debian #742949 by Zefram <zefram@fysh.org>:
-# -----------------------------------------------------------
-# The Perl module Hash::SharedMem
-# <https://metacpan.org/release/Hash-SharedMem> defines a file format
-# for a key/value store. Details of the file format are in the "DESIGN"
-# file in the module distribution. Magic:
-0 bequad =0xa58afd185cbf5af7 Hash::SharedMem master file, big-endian
->8 bequad <0x1000000
->>15 byte >2 \b, line size 2^%d byte
->>14 byte >2 \b, page size 2^%d byte
->>13 byte &1
->>>13 byte >1 \b, max fanout %d
-0 lequad =0xa58afd185cbf5af7 Hash::SharedMem master file, little-endian
->8 lequad <0x1000000
->>8 byte >2 \b, line size 2^%d byte
->>9 byte >2 \b, page size 2^%d byte
->>10 byte &1
->>>10 byte >1 \b, max fanout %d
-0 bequad =0xc693dac5ed5e47c2 Hash::SharedMem data file, big-endian
->8 bequad <0x1000000
->>15 byte >2 \b, line size 2^%d byte
->>14 byte >2 \b, page size 2^%d byte
->>13 byte &1
->>>13 byte >1 \b, max fanout %d
-0 lequad =0xc693dac5ed5e47c2 Hash::SharedMem data file, little-endian
->8 lequad <0x1000000
->>8 byte >2 \b, line size 2^%d byte
->>9 byte >2 \b, page size 2^%d byte
->>10 byte &1
->>>10 byte >1 \b, max fanout %d
diff --git a/contrib/libs/libmagic/magic/Magdir/pgf b/contrib/libs/libmagic/magic/Magdir/pgf
deleted file mode 100644
index 8318ce1338..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pgf
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pgf,v 1.3 2021/02/23 00:51:10 christos Exp $
-# pgf: file(1) magic for Progressive Graphics File (PGF)
-#
-# <http://www.libpgf.org/uploads/media/PGF_Details_01.pdf>
-# 2013 by Philipp Hahn <pmhahn debian org>
-0 string PGF Progressive Graphics image data,
-!:mime image/x-pgf
->3 string 2 version %s,
->3 string 4 version %s,
->3 string 5 version %s,
->3 string 6 version %s,
-# PGFPreHeader
-#>>4 lelong x header size %d,
-# PGFHeader
->>8 lelong x %d x
->>12 lelong x %d,
->>16 byte x %d levels,
->>17 byte x compression level %d,
->>18 byte x %d bpp,
->>19 byte x %d channels,
->>20 clear x
->>20 byte 0 bitmap,
->>20 byte 1 gray scale,
->>20 byte 2 indexed color,
->>20 byte 3 RGB color,
->>20 byte 4 CMYK color,
->>20 byte 5 HSL color,
->>20 byte 6 HSB color,
->>20 byte 7 multi-channel,
->>20 byte 8 duo tone,
->>20 byte 9 LAB color,
->>20 byte 10 gray scale 16,
->>20 byte 11 RGB color 48,
->>20 byte 12 LAB color 48,
->>20 byte 13 CMYK color 64,
->>20 byte 14 deep multi-channel,
->>20 byte 15 duo tone 16,
->>20 byte 17 RGBA color,
->>20 byte 18 gray scale 32,
->>20 byte 19 RGB color 12,
->>20 byte 20 RGB color 16,
->>20 byte 255 unknown format,
->>20 default x format
->>>20 byte x \b %d,
->>21 byte x %d bpc
-# PGFPostHeader
-# Level-Sizes
-#>>(4.l+4) lelong x level 0 size: %d
-#>>(4.l+8) lelong x level 1 size: %d
-#>>(4.l+12) lelong x level 2 size: %d
diff --git a/contrib/libs/libmagic/magic/Magdir/pgp b/contrib/libs/libmagic/magic/Magdir/pgp
deleted file mode 100644
index d81883868b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pgp
+++ /dev/null
@@ -1,581 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pgp,v 1.25 2021/04/26 15:56:00 christos Exp $
-# pgp: file(1) magic for Pretty Good Privacy
-
-# Handling of binary PGP keys is in pgp-binary-keys.
-# see https://lists.gnupg.org/pipermail/gnupg-devel/1999-September/016052.html
-#
-0 beshort 0xa600 PGP encrypted data
-#!:mime application/pgp-encrypted
-#0 string -----BEGIN\040PGP text/PGP armored data
-!:mime text/PGP # encoding: armored data
-#>15 string PUBLIC\040KEY\040BLOCK- public key block
-#>15 string MESSAGE- message
-#>15 string SIGNED\040MESSAGE- signed message
-#>15 string PGP\040SIGNATURE- signature
-
-# Update: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/Pretty_Good_Privacy
-# Reference: https://reposcope.com/mimetype/application/pgp-keys
-2 string ---BEGIN\040PGP\040PRIVATE\040KEY\040BLOCK- PGP private key block
-#!:mime text/PGP
-!:mime application/pgp-keys
-!:ext asc
-2 string ---BEGIN\040PGP\040PUBLIC\040KEY\040BLOCK- PGP public key block
-!:mime application/pgp-keys
-!:ext asc
->10 search/100 \n\n
->>&0 use pgp
-0 string -----BEGIN\040PGP\040MESSAGE- PGP message
-# https://reposcope.com/mimetype/application/pgp-encrypted
-#!:mime application/pgp
-!:mime application/pgp-encrypted
-!:ext asc
-#!:ext asc/pgp/gpg
->10 search/100 \n\n
->>&0 use pgp
-# Reference: https://www.gnupg.org/gph/en/manual/x135.html
-0 string -----BEGIN\040PGP\040SIGNED\040MESSAGE- PGP signed message
-#!:mime text/plain
-!:mime text/PGP
-#!:mime application/pgp
-!:ext asc
-0 string -----BEGIN\040PGP\040SIGNATURE- PGP signature
-# https://reposcope.com/mimetype/application/pgp-signature
-!:mime application/pgp-signature
-!:ext asc
->10 search/100 \n\n
->>&0 use pgp
-
-# Decode the type of the packet based on it's base64 encoding.
-# Idea from Mark Martinec
-# The specification is in RFC 4880, section 4.2 and 4.3:
-# https://tools.ietf.org/html/rfc4880#section-4.2
-
-0 name pgp
->0 byte 0x67 Reserved (old)
->0 byte 0x68 Public-Key Encrypted Session Key (old)
->0 byte 0x69 Signature (old)
->0 byte 0x6a Symmetric-Key Encrypted Session Key (old)
->0 byte 0x6b One-Pass Signature (old)
->0 byte 0x6c Secret-Key (old)
->0 byte 0x6d Public-Key (old)
->0 byte 0x6e Secret-Subkey (old)
->0 byte 0x6f Compressed Data (old)
->0 byte 0x70 Symmetrically Encrypted Data (old)
->0 byte 0x71 Marker (old)
->0 byte 0x72 Literal Data (old)
->0 byte 0x73 Trust (old)
->0 byte 0x74 User ID (old)
->0 byte 0x75 Public-Subkey (old)
->0 byte 0x76 Unused (old)
->0 byte 0x77
->>1 byte&0xc0 0x00 Reserved
->>1 byte&0xc0 0x40 Public-Key Encrypted Session Key
->>1 byte&0xc0 0x80 Signature
->>1 byte&0xc0 0xc0 Symmetric-Key Encrypted Session Key
->0 byte 0x78
->>1 byte&0xc0 0x00 One-Pass Signature
->>1 byte&0xc0 0x40 Secret-Key
->>1 byte&0xc0 0x80 Public-Key
->>1 byte&0xc0 0xc0 Secret-Subkey
->0 byte 0x79
->>1 byte&0xc0 0x00 Compressed Data
->>1 byte&0xc0 0x40 Symmetrically Encrypted Data
->>1 byte&0xc0 0x80 Marker
->>1 byte&0xc0 0xc0 Literal Data
->0 byte 0x7a
->>1 byte&0xc0 0x00 Trust
->>1 byte&0xc0 0x40 User ID
->>1 byte&0xc0 0x80 Public-Subkey
->>1 byte&0xc0 0xc0 Unused [z%x]
->0 byte 0x30
->>1 byte&0xc0 0x00 Unused [0%x]
->>1 byte&0xc0 0x40 User Attribute
->>1 byte&0xc0 0x80 Sym. Encrypted and Integrity Protected Data
->>1 byte&0xc0 0xc0 Modification Detection Code
-
-# magic signatures to detect PGP crypto material (from stef)
-# detects and extracts metadata from:
-# - symmetric encrypted packet header
-# - RSA (e=65537) secret (sub-)keys
-
-# 1024b RSA encrypted data
-
-0 string \x84\x8c\x03 PGP RSA encrypted session key -
->3 belong x keyid: %08X
->7 belong x %08X
->11 byte 0x01 RSA (Encrypt or Sign) 1024b
->11 byte 0x02 RSA Encrypt-Only 1024b
->12 string \x04\x00
->12 string \x03\xff
->12 string \x03\xfe
->12 string \x03\xfd
->12 string \x03\xfc
->12 string \x03\xfb
->12 string \x03\xfa
->12 string \x03\xf9
->142 byte 0xd2 .
-
-# 2048b RSA encrypted data
-
-0 string \x85\x01\x0c\x03 PGP RSA encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x01 RSA (Encrypt or Sign) 2048b
->12 byte 0x02 RSA Encrypt-Only 2048b
->13 string \x08\x00
->13 string \x07\xff
->13 string \x07\xfe
->13 string \x07\xfd
->13 string \x07\xfc
->13 string \x07\xfb
->13 string \x07\xfa
->13 string \x07\xf9
->271 byte 0xd2 .
-
-# 3072b RSA encrypted data
-
-0 string \x85\x01\x8c\x03 PGP RSA encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x01 RSA (Encrypt or Sign) 3072b
->12 byte 0x02 RSA Encrypt-Only 3072b
->13 string \x0c\x00
->13 string \x0b\xff
->13 string \x0b\xfe
->13 string \x0b\xfd
->13 string \x0b\xfc
->13 string \x0b\xfb
->13 string \x0b\xfa
->13 string \x0b\xf9
->399 byte 0xd2 .
-
-# 4096b RSA encrypted data
-
-0 string \x85\x02\x0c\x03 PGP RSA encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x01 RSA (Encrypt or Sign) 4096b
->12 byte 0x02 RSA Encrypt-Only 4096b
->13 string \x10\x00
->13 string \x0f\xff
->13 string \x0f\xfe
->13 string \x0f\xfd
->13 string \x0f\xfc
->13 string \x0f\xfb
->13 string \x0f\xfa
->13 string \x0f\xf9
->527 byte 0xd2 .
-
-# 8192b RSA encrypted data
-
-0 string \x85\x04\x0c\x03 PGP RSA encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x01 RSA (Encrypt or Sign) 8192b
->12 byte 0x02 RSA Encrypt-Only 8192b
->13 string \x20\x00
->13 string \x1f\xff
->13 string \x1f\xfe
->13 string \x1f\xfd
->13 string \x1f\xfc
->13 string \x1f\xfb
->13 string \x1f\xfa
->13 string \x1f\xf9
->1039 byte 0xd2 .
-
-# 1024b Elgamal encrypted data
-
-0 string \x85\x01\x0e\x03 PGP Elgamal encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x10 Elgamal Encrypt-Only 1024b.
->13 string \x04\x00
->13 string \x03\xff
->13 string \x03\xfe
->13 string \x03\xfd
->13 string \x03\xfc
->13 string \x03\xfb
->13 string \x03\xfa
->13 string \x03\xf9
-
-# 2048b Elgamal encrypted data
-
-0 string \x85\x02\x0e\x03 PGP Elgamal encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x10 Elgamal Encrypt-Only 2048b.
->13 string \x08\x00
->13 string \x07\xff
->13 string \x07\xfe
->13 string \x07\xfd
->13 string \x07\xfc
->13 string \x07\xfb
->13 string \x07\xfa
->13 string \x07\xf9
-
-# 3072b Elgamal encrypted data
-
-0 string \x85\x03\x0e\x03 PGP Elgamal encrypted session key -
->4 belong x keyid: %08X
->8 belong x %08X
->12 byte 0x10 Elgamal Encrypt-Only 3072b.
->13 string \x0c\x00
->13 string \x0b\xff
->13 string \x0b\xfe
->13 string \x0b\xfd
->13 string \x0b\xfc
->13 string \x0b\xfb
->13 string \x0b\xfa
->13 string \x0b\xf9
-
-# crypto algo mapper
-
-0 name crypto
->0 byte 0x00 Plaintext or unencrypted data
->0 byte 0x01 IDEA
->0 byte 0x02 TripleDES
->0 byte 0x03 CAST5 (128 bit key)
->0 byte 0x04 Blowfish (128 bit key, 16 rounds)
->0 byte 0x07 AES with 128-bit key
->0 byte 0x08 AES with 192-bit key
->0 byte 0x09 AES with 256-bit key
->0 byte 0x0a Twofish with 256-bit key
-
-# hash algo mapper
-
-0 name hash
->0 byte 0x01 MD5
->0 byte 0x02 SHA-1
->0 byte 0x03 RIPE-MD/160
->0 byte 0x08 SHA256
->0 byte 0x09 SHA384
->0 byte 0x0a SHA512
->0 byte 0x0b SHA224
-
-# display public key algorithms as human readable text
-0 name key_algo
->0 byte 0x01 RSA (Encrypt or Sign)
-# keep old look of version 5.28 without parentheses
->0 byte 0x02 RSA Encrypt-Only
->0 byte 0x03 RSA (Sign-Only)
->0 byte 16 ElGamal (Encrypt-Only)
->0 byte 17 DSA
->0 byte 18 Elliptic Curve
->0 byte 19 ECDSA
->0 byte 20 ElGamal (Encrypt or Sign)
->0 byte 21 Diffie-Hellman
->0 default x
->>0 ubyte <22 unknown (pub %d)
-# this should never happen
->>0 ubyte >21 invalid (%d)
-
-# pgp symmetric encrypted data
-
-0 byte 0x8c PGP symmetric key encrypted data -
->1 byte 0x0d
->1 byte 0x0c
->2 byte 0x04
->3 use crypto
->4 byte 0x01 salted -
->>5 use hash
->>14 byte 0xd2 .
->>14 byte 0xc9 .
->4 byte 0x03 salted & iterated -
->>5 use hash
->>15 byte 0xd2 .
->>15 byte 0xc9 .
-
-# encrypted keymaterial needs s2k & can be checksummed/hashed
-
-0 name chkcrypto
->0 use crypto
->1 byte 0x00 Simple S2K
->1 byte 0x01 Salted S2K
->1 byte 0x03 Salted&Iterated S2K
->2 use hash
-
-# all PGP keys start with this prolog
-# containing version, creation date, and purpose
-
-0 name keyprolog
->0 byte 0x04
->1 beldate x created on %s -
->5 byte 0x01 RSA (Encrypt or Sign)
->5 byte 0x02 RSA Encrypt-Only
-
-# end of secret keys known signature
-# contains e=65537 and the prolog to
-# the encrypted parameters
-
-0 name keyend
->0 string \x00\x11\x01\x00\x01 e=65537
->5 use crypto
->5 byte 0xff checksummed
->>6 use chkcrypto
->5 byte 0xfe hashed
->>6 use chkcrypto
-
-# PGP secret keys contain also the public parts
-# these vary by bitsize of the key
-
-0 name x1024
->0 use keyprolog
->6 string \x03\xfe
->6 string \x03\xff
->6 string \x04\x00
->136 use keyend
-
-0 name x2048
->0 use keyprolog
->6 string \x80\x00
->6 string \x07\xfe
->6 string \x07\xff
->264 use keyend
-
-0 name x3072
->0 use keyprolog
->6 string \x0b\xfe
->6 string \x0b\xff
->6 string \x0c\x00
->392 use keyend
-
-0 name x4096
->0 use keyprolog
->6 string \x10\x00
->6 string \x0f\xfe
->6 string \x0f\xff
->520 use keyend
-
-# \x00|\x1f[\xfe\xff]).{1024})'
-0 name x8192
->0 use keyprolog
->6 string \x20\x00
->6 string \x1f\xfe
->6 string \x1f\xff
->1032 use keyend
-
-# depending on the size of the pkt
-# we branch into the proper key size
-# signatures defined as x{keysize}
-
-0 name pgpkey
->0 string \x01\xd8 1024b
->>2 use x1024
->0 string \x01\xeb 1024b
->>2 use x1024
->0 string \x01\xfb 1024b
->>2 use x1024
->0 string \x01\xfd 1024b
->>2 use x1024
->0 string \x01\xf3 1024b
->>2 use x1024
->0 string \x01\xee 1024b
->>2 use x1024
->0 string \x01\xfe 1024b
->>2 use x1024
->0 string \x01\xf4 1024b
->>2 use x1024
->0 string \x02\x0d 1024b
->>2 use x1024
->0 string \x02\x03 1024b
->>2 use x1024
->0 string \x02\x05 1024b
->>2 use x1024
->0 string \x02\x15 1024b
->>2 use x1024
->0 string \x02\x00 1024b
->>2 use x1024
->0 string \x02\x10 1024b
->>2 use x1024
->0 string \x02\x04 1024b
->>2 use x1024
->0 string \x02\x06 1024b
->>2 use x1024
->0 string \x02\x16 1024b
->>2 use x1024
->0 string \x03\x98 2048b
->>2 use x2048
->0 string \x03\xab 2048b
->>2 use x2048
->0 string \x03\xbb 2048b
->>2 use x2048
->0 string \x03\xbd 2048b
->>2 use x2048
->0 string \x03\xcd 2048b
->>2 use x2048
->0 string \x03\xb3 2048b
->>2 use x2048
->0 string \x03\xc3 2048b
->>2 use x2048
->0 string \x03\xc5 2048b
->>2 use x2048
->0 string \x03\xd5 2048b
->>2 use x2048
->0 string \x03\xae 2048b
->>2 use x2048
->0 string \x03\xbe 2048b
->>2 use x2048
->0 string \x03\xc0 2048b
->>2 use x2048
->0 string \x03\xd0 2048b
->>2 use x2048
->0 string \x03\xb4 2048b
->>2 use x2048
->0 string \x03\xc4 2048b
->>2 use x2048
->0 string \x03\xc6 2048b
->>2 use x2048
->0 string \x03\xd6 2048b
->>2 use x2048
->0 string \x05X 3072b
->>2 use x3072
->0 string \x05k 3072b
->>2 use x3072
->0 string \x05{ 3072b
->>2 use x3072
->0 string \x05} 3072b
->>2 use x3072
->0 string \x05\x8d 3072b
->>2 use x3072
->0 string \x05s 3072b
->>2 use x3072
->0 string \x05\x83 3072b
->>2 use x3072
->0 string \x05\x85 3072b
->>2 use x3072
->0 string \x05\x95 3072b
->>2 use x3072
->0 string \x05n 3072b
->>2 use x3072
->0 string \x05\x7e 3072b
->>2 use x3072
->0 string \x05\x80 3072b
->>2 use x3072
->0 string \x05\x90 3072b
->>2 use x3072
->0 string \x05t 3072b
->>2 use x3072
->0 string \x05\x84 3072b
->>2 use x3072
->0 string \x05\x86 3072b
->>2 use x3072
->0 string \x05\x96 3072b
->>2 use x3072
->0 string \x07[ 4096b
->>2 use x4096
->0 string \x07\x18 4096b
->>2 use x4096
->0 string \x07+ 4096b
->>2 use x4096
->0 string \x07; 4096b
->>2 use x4096
->0 string \x07= 4096b
->>2 use x4096
->0 string \x07M 4096b
->>2 use x4096
->0 string \x073 4096b
->>2 use x4096
->0 string \x07C 4096b
->>2 use x4096
->0 string \x07E 4096b
->>2 use x4096
->0 string \x07U 4096b
->>2 use x4096
->0 string \x07. 4096b
->>2 use x4096
->0 string \x07> 4096b
->>2 use x4096
->0 string \x07@ 4096b
->>2 use x4096
->0 string \x07P 4096b
->>2 use x4096
->0 string \x074 4096b
->>2 use x4096
->0 string \x07D 4096b
->>2 use x4096
->0 string \x07F 4096b
->>2 use x4096
->0 string \x07V 4096b
->>2 use x4096
->0 string \x0e[ 8192b
->>2 use x8192
->0 string \x0e\x18 8192b
->>2 use x8192
->0 string \x0e+ 8192b
->>2 use x8192
->0 string \x0e; 8192b
->>2 use x8192
->0 string \x0e= 8192b
->>2 use x8192
->0 string \x0eM 8192b
->>2 use x8192
->0 string \x0e3 8192b
->>2 use x8192
->0 string \x0eC 8192b
->>2 use x8192
->0 string \x0eE 8192b
->>2 use x8192
->0 string \x0eU 8192b
->>2 use x8192
->0 string \x0e. 8192b
->>2 use x8192
->0 string \x0e> 8192b
->>2 use x8192
->0 string \x0e@ 8192b
->>2 use x8192
->0 string \x0eP 8192b
->>2 use x8192
->0 string \x0e4 8192b
->>2 use x8192
->0 string \x0eD 8192b
->>2 use x8192
->0 string \x0eF 8192b
->>2 use x8192
->0 string \x0eV 8192b
->>2 use x8192
-
-# PGP RSA (e=65537) secret (sub-)key header
-
-0 byte 0x97 PGP Secret Sub-key -
->1 use pgpkey
-0 byte 0x9d
-# Update: Joerg Jenderek
-# secret subkey packet (tag 7) with same structure as secret key packet (tag 5)
-# skip Fetus.Sys16 CALIBUS.MAIN OrbFix.Sys16.Ex by looking for positive len
->1 ubeshort >0
-#>1 ubeshort x \b, body length %#x
-# next packet type often 88h,89h~(tag 2)~Signature Packet
-#>>(1.S+3) ubyte x \b, next packet type %#x
-# skip Dragon.SHR DEMO.INIT by looking for positive version
->>3 ubyte >0
-# skip BUISSON.13 GUITAR1 by looking for low version number
->>>3 ubyte <5 PGP Secret Sub-key
-# sub-key are normally part of secret key. So it does not occur as standalone file
-#!:ext bin
-# version 2,3~old 4~new . Comment following line for version 5.28 look
->>>>3 ubyte x (v%d)
->>>>3 ubyte x -
-# old versions 2 or 3 but no real example found
->>>>3 ubyte <4
-# 2 byte for key bits in version 5.28 look
->>>>>11 ubeshort x %db
->>>>>4 beldate x created on %s -
-# old versions use 2 additional bytes after time stamp
-#>>>>>8 ubeshort x %#x
-# display key algorithm 1~RSA Encrypt|Sign - 21~Diffie-Hellman
->>>>>10 use key_algo
->>>>>(11.S/8) ubequad x
-# look after first key
->>>>>>&5 use keyend
-# new version
->>>>3 ubyte >3
->>>>>9 ubeshort x %db
->>>>>4 beldate x created on %s -
-# display key algorithm
->>>>>8 use key_algo
->>>>>(9.S/8) ubequad x
-# look after first key for something like s2k
->>>>>>&3 use keyend
diff --git a/contrib/libs/libmagic/magic/Magdir/pgp-binary-keys b/contrib/libs/libmagic/magic/Magdir/pgp-binary-keys
deleted file mode 100644
index 1ce76d907b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pgp-binary-keys
+++ /dev/null
@@ -1,388 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pgp-binary-keys,v 1.2 2021/04/26 15:56:00 christos Exp $
-# pgp-binary-keys: This file handles pgp binary keys.
-#
-# An PGP certificate or message doesn't have a fixed header. Instead,
-# they are sequences of packets:
-#
-# https://tools.ietf.org/html/rfc4880#section-4.3
-#
-# whose order conforms to a grammar:
-#
-# https://tools.ietf.org/html/rfc4880#section-11
-#
-# Happily most packets have a few fields that are constrained, which
-# allow us to fingerprint them with relatively high certainty.
-#
-# A PGP packet is described by a single byte: the so-called CTB. The
-# high-bit is always set. If bit 6 is set, then it is a so-called
-# new-style CTB; if bit 6 is clear, then it is a so-called old-style
-# CTB. Old-style CTBs have only four bits of type information; bits
-# 1-0 are used to describe the length. New-style CTBs have 6 bits of
-# type information.
-#
-# Following the CTB is the packet's length in bytes. If we blindly
-# advance the file cursor by this amount past the end of the length
-# information we come to the next packet.
-#
-# Data Structures
-# ===============
-#
-# New Style CTB
-# -------------
-#
-# https://tools.ietf.org/html/rfc4880#section-4.2.2
-#
-# 76543210
-# ||\----/
-# || tag
-# |always 1
-# always 1
-#
-# Tag bits 7 and 6 set
-# 0 0xC0 -- Reserved - a packet tag MUST NOT have this value
-# 1 0xC1 -- Public-Key Encrypted Session Key Packet
-# 2 0xC2 -- Signature Packet
-# 3 0xC3 -- Symmetric-Key Encrypted Session Key Packet
-# 4 0xC4 -- One-Pass Signature Packet
-# 5 0xC5 -- Secret-Key Packet
-# 6 0xC6 -- Public-Key Packet
-# 7 0xC7 -- Secret-Subkey Packet
-# 8 0xC8 -- Compressed Data Packet
-# 9 0xC9 -- Symmetrically Encrypted Data Packet
-# 10 0xCA -- Marker Packet
-# 11 0xCB -- Literal Data Packet
-# 12 0xCC -- Trust Packet
-# 13 0xCD -- User ID Packet
-# 14 0xCE -- Public-Subkey Packet
-# 17 0xD1 -- User Attribute Packet
-# 18 0xD2 -- Sym. Encrypted and Integrity Protected Data Packet
-# 19 0xD3 -- Modification Detection Code Packet
-# 60 to 63 -- Private or Experimental Values
-#
-# The CTB is followed by the length header, which is densely encoded:
-#
-# if length[0] is:
-# 0..191: one byte length (length[0])
-# 192..223: two byte length ((length[0] - 192) * 256 + length[2] + 192
-# 224..254: four byte length (big endian interpretation of length[1..5])
-# 255: partial body encoding
-#
-# The partial body encoding is similar to HTTP's chunk encoding. It
-# is only allowed for container packets (SEIP, Compressed Data and
-# Literal).
-#
-# Old Style CTB
-# -------------
-#
-# https://tools.ietf.org/html/rfc4880#section-4.2.1
-#
-# CTB:
-#
-# 76543210
-# ||\--/\/
-# || | length encoding
-# || tag
-# |always 0
-# always 1
-#
-# Tag:
-#
-# Tag bit 7 set, bits 6, 1, 0 clear
-# 0 0x80 -- Reserved - a packet tag MUST NOT have this value
-# 1 0x84 -- Public-Key Encrypted Session Key Packet
-# 2 0x88 -- Signature Packet
-# 3 0x8C -- Symmetric-Key Encrypted Session Key Packet
-# 4 0x90 -- One-Pass Signature Packet
-# 5 0x94 -- Secret-Key Packet
-# 6 0x98 -- Public-Key Packet
-# 7 0x9C -- Secret-Subkey Packet
-# 8 0xA0 -- Compressed Data Packet
-# 9 0xA4 -- Symmetrically Encrypted Data Packet
-# 10 0xA8 -- Marker Packet
-# 11 0xAC -- Literal Data Packet
-# 12 0xB0 -- Trust Packet
-# 13 0xB4 -- User ID Packet
-# 14 0xB8 -- Public-Subkey Packet
-#
-# Length encoding:
-#
-# Value
-# 0 1 byte length (following byte is the length)
-# 1 2 byte length (following two bytes are the length)
-# 2 4 byte length (following four bytes are the length)
-# 3 indeterminate length: natural end of packet, e.g., EOF
-#
-# An indeterminate length is only allowed for container packets
-# (SEIP, Compressed Data and Literal).
-#
-# Certificates
-# ------------
-#
-# We check the first three packets to determine if a sequence of
-# OpenPGP packets is likely to be a certificate. The grammar allows
-# the following prefixes:
-#
-# [Primary Key] [SIG] (EOF or another certificate)
-# [Primary Key] [SIG] [User ID] [SIG]...
-# [Primary Key] [SIG] [User Attribute] [SIG]...
-# [Primary Key] [SIG] [Subkey] [SIG]...
-# [Primary Key] [User ID] [SIG]...
-# [Primary Key] [User Attribute] [SIG]...
-# [Primary Key] [Subkey] [SIG]...
-#
-# Any number of marker packets are also allowed between each packet,
-# but they are not normally used and we don't currently check for
-# them.
-#
-# The keys and subkeys may be public or private.
-#
-
-# Key packets and signature packets are versioned. There are two
-# packet versions that we need to worry about in practice: v3 and v4.
-# v4 packets were introduced in RFC 2440, which was published in 1998.
-# It also deprecated v3 packets. There are no actively used v3
-# certificates (GnuPG removed the code to support them in November
-# 2014). But there are v3 keys lying around and it is useful to
-# identify them. The next version of OpenPGP will introduce v5 keys.
-# The document has not yet been standardized so changes are still
-# possible. But, for our purposes, it appears that v5 data structures
-# will be identical to v4 data structures modulo the version number.
-#
-# https://tools.ietf.org/html/rfc2440
-# https://lists.gnupg.org/pipermail/gnupg-announce/2014q4/000358.html
-# https://www.ietf.org/id/draft-ietf-openpgp-rfc4880bis-09.html#name-key-material-packet
-
-
-
-
-# The first packet has to be a public key or a secret key.
-#
-# New-Style Public Key
-0 ubyte =0xC6 OpenPGP Public Key
->&0 use primary_key_length_new
-# New-Style Secret Key
-0 ubyte =0xC5 OpenPGP Secret Key
->&0 use primary_key_length_new
-# Old-Style Public Key
-0 ubyte&0xFC =0x98 OpenPGP Public Key
->&-1 use primary_key_length_old
-# Old-Style Secret Key
-0 ubyte&0xFC =0x94 OpenPGP Secret Key
->&-1 use primary_key_length_old
-
-# Parse the length, check the packet's body and finally advance to the
-# next packet.
-
-# There are 4 different new-style length encodings, but the partial
-# body encoding is only acceptable for the SEIP, Compressed Data, and
-# Literal packets, which isn't valid for any packets in a certificate
-# so we ignore it.
-0 name primary_key_length_new
->&0 ubyte <192
-#>>&0 ubyte x (1 byte length encoding, %d bytes)
->>&0 use pgp_binary_key_pk_check
->>>&(&-1.B) use sig_or_component_1
->&0 ubyte >191
->>&-1 ubyte <225
-# offset = ((offset[0] - 192) << 8) + offset[1] + 192 (for the length header)
-# raw - (192 * 256 - 192)
-# = 48960
-#>>>&0 ubeshort x (2 byte length encoding, %d bytes)
->>>&1 use pgp_binary_key_pk_check
->>>>&(&-2.S-48960) use sig_or_component_1
->&0 ubyte =255
-#>>&0 belong x (5 byte length encoding, %d bytes)
->>&4 use pgp_binary_key_pk_check
->>>&(&-4.L) use sig_or_component_1
-# Partial body encoding (only valid for container packets).
-# >&0 ubyte >224
-# >>&0 ubyte <255 partial body encoding
-
-# There are 4 different old-style length encodings, but the
-# indeterminate length encoding is only acceptable for the SEIP,
-# Compressed Data, and Literal packets, which isn't valid for any
-# packets in a certificate.
-0 name primary_key_length_old
-#>&0 ubyte x (ctb: %x)
->&0 ubyte&0x3 =0
-#>>&0 ubyte x (1 byte length encoding, %d bytes)
->>&1 use pgp_binary_key_pk_check
->>>&(&-1.B) use sig_or_component_1
->&0 ubyte&0x3 =1
-#>>&0 ubeshort x (2 byte length encoding, %d bytes)
->>&2 use pgp_binary_key_pk_check
->>>&(&-2.S) use sig_or_component_1
->&0 ubyte&0x3 =2
-#>>&0 ubelong x (4 byte length encoding, %d bytes)
->>&4 use pgp_binary_key_pk_check
->>>&(&-4.L) use sig_or_component_1
-
-# Check the Key.
-#
-# https://tools.ietf.org/html/rfc4880#section-5.5.2
-0 name pgp_binary_key_pk_check
-# Valid versions are: 2, 3, 4. 5 is proposed in RFC 4880bis.
-# Anticipate a v6 / v7 format that like v5 is compatible with v4.
-# key format in a decade or so :D.
->&0 ubyte >1
->>&-1 ubyte <8
->>>&-1 byte x Version %d
-# Check that keys were created after 1990.
-# (1990 - 1970) * 365.2524 * 24 * 60 * 60 = 631156147
->>>&0 bedate >631156147 \b, Created %s
->>>>&-5 ubyte >3
->>>>>&4 use pgp_binary_key_algo
->>>>&-5 ubyte <4
->>>>>&6 use pgp_binary_key_algo
-
-# Print out the key's algorithm and the number of bits, if this is
-# relevant (ECC keys are a fixed size).
-0 name pgp_binary_key_algo
->0 clear x
->&0 ubyte =1 \b, RSA (Encrypt or Sign,
->>&0 ubeshort x \b %d bits)
->&0 ubyte =2 \b, RSA (Encrypt,
->>&0 ubeshort x \b %d bits)
->&0 ubyte =3 \b, RSA (Sign,
->>&0 ubeshort x \b %d bits)
->&0 ubyte =16 \b, El Gamal (Encrypt,
->>&0 ubeshort x \b %d bits)
->&0 ubyte =17 \b, DSA
->>&0 ubeshort x \b (%d bits)
->&0 ubyte =18 \b, ECDH
->&0 ubyte =19 \b, ECDSA
->&0 ubyte =20 \b, El Gamal (Encrypt or Sign,
->>&0 ubeshort x \b %d bits)
->&0 ubyte =22 \b, EdDSA
->&0 default x
->>&0 ubyte x \b, Unknown Algorithm (%#x)
-
-# Match all possible second packets.
-0 name sig_or_component_1
-#>0 ubyte x (ctb: %x)
->&0 ubyte =0xC2
->>0 ubyte x \b; Signature
->>&0 use sig_or_component_1_length_new
->&0 ubyte =0xCD
->>0 ubyte x \b; User ID
->>&0 use sig_or_component_1_length_new
->&0 ubyte =0xCE
->>0 ubyte x \b; Public Subkey
->>&0 use sig_or_component_1_length_new
->&0 ubyte =0xC7
->>0 ubyte x \b; Secret Subkey
->>&0 use sig_or_component_1_length_new
->&0 ubyte =0xD1
->>0 ubyte x \b; User Attribute
->>&0 use sig_or_component_1_length_new
->&0 ubyte&0xFC =0x88
->>0 ubyte x \b; Signature
->>&-1 use sig_or_component_1_length_old
->&0 ubyte&0xFC =0xB4
->>0 ubyte x \b; User ID
->>&-1 use sig_or_component_1_length_old
->&0 ubyte&0xFC =0xB8
->>0 ubyte x \b; Public Subkey
->>&-1 use sig_or_component_1_length_old
->&0 ubyte&0xFC =0x9C
->>0 ubyte x \b; Secret Subkey
->>&-1 use sig_or_component_1_length_old
-
-# Copy of 'primary_key_length_new', but calls cert_packet_3.
-0 name sig_or_component_1_length_new
->&0 ubyte <192
-#>>&0 ubyte x (1 byte new length encoding, %d bytes)
->>&(&-1.B) use cert_packet_3
->&0 ubyte >191
->>&-1 ubyte <225
-# offset = ((offset[0] - 192) << 8) + offset[1] + 192 + 1 (for the length header)
-# raw - (192 * 256 - 192 - 1)
-# = 48959
-#>>>&-1 ubeshort x (2 byte new length encoding, %d bytes)
->>>&(&-1.S-48959) use cert_packet_3
->&0 ubyte =255
-#>>&0 belong x (5 byte new length encoding, %d bytes)
->>&(&-4.L) use cert_packet_3
-# Partial body encoding (only valid for container packets).
-# >&0 ubyte >224
-# >>&0 ubyte <255 partial body encoding
-
-0 name sig_or_component_1_length_old
-#>&0 ubyte x (ctb: %x)
->&0 ubyte&0x3 =0
-#>>&0 ubyte x (1 byte old length encoding, %d bytes)
->>&(&0.B+1) use cert_packet_3
->&0 ubyte&0x3 =1
-#>>&0 ubeshort x (2 byte old length encoding, %d bytes)
->>&(&0.S+2) use cert_packet_3
->&0 ubyte&0x3 =2
-#>>&0 ubelong x (4 byte old length encoding, %d bytes)
->>&(&0.L+4) use cert_packet_3
-
-# Copy of above.
-0 name cert_packet_3
-#>0 ubyte x (ctb: %x)
->&0 ubyte =0xC2
->>0 ubyte x \b; Signature
->>&0 use cert_packet_3_length_new
->&0 ubyte =0xCD
->>0 ubyte x \b; User ID
->>&0 use cert_packet_3_length_new
->&0 ubyte =0xCE
->>0 ubyte x \b; Public Subkey
->>&0 use cert_packet_3_length_new
->&0 ubyte =0xC7
->>0 ubyte x \b; Secret Subkey
->>&0 use cert_packet_3_length_new
->&0 ubyte =0xD1
->>0 ubyte x \b; User Attribute
->>&0 use cert_packet_3_length_new
->&0 ubyte&0xFC =0x88
->>0 ubyte x \b; Signature
->>&-1 use cert_packet_3_length_old
->&0 ubyte&0xFC =0xB4
->>0 ubyte x \b; User ID
->>&-1 use cert_packet_3_length_old
->&0 ubyte&0xFC =0xB8
->>0 ubyte x \b; Public Subkey
->>&-1 use cert_packet_3_length_old
->&0 ubyte&0xFC =0x9C
->>0 ubyte x \b; Secret Subkey
->>&-1 use cert_packet_3_length_old
-
-# Copy of above.
-0 name cert_packet_3_length_new
->&0 ubyte <192
-#>>&0 ubyte x (1 byte new length encoding, %d bytes)
->>&(&-1.B) use pgp_binary_keys_end
->&0 ubyte >191
->>&-1 ubyte <225
-# offset = ((offset[0] - 192) << 8) + offset[1] + 192 + 1 (for the length header)
-# raw - (192 * 256 - 192 - 1)
-# = 48959
-#>>>&-1 ubeshort x (2 byte new length encoding, %d bytes)
->>>&(&-1.S-48959) use pgp_binary_keys_end
->&0 ubyte =255
-#>>&0 belong x (5 byte new length encoding, %d bytes)
->>&(&-4.L) use pgp_binary_keys_end
-
-0 name cert_packet_3_length_old
-#>&0 ubyte x (ctb: %x)
->&0 ubyte&0x3 =0
-#>>&0 ubyte x (1 byte old length encoding, %d bytes)
->>&(&0.B+1) use pgp_binary_keys_end
->&0 ubyte&0x3 =1
-#>>&0 ubeshort x (2 byte old length encoding, %d bytes)
->>&(&0.S+2) use pgp_binary_keys_end
->&0 ubyte&0x3 =2
-#>>&0 ubelong x (4 byte old length encoding, %d bytes)
->>&(&0.L+4) use pgp_binary_keys_end
-
-# We managed to parse the first three packets of the certificate. Declare
-# victory.
-0 name pgp_binary_keys_end
->0 byte x \b; OpenPGP Certificate
-!:mime application/pgp-keys
-!:ext pgp/gpg/pkr/asd
diff --git a/contrib/libs/libmagic/magic/Magdir/pkgadd b/contrib/libs/libmagic/magic/Magdir/pkgadd
deleted file mode 100644
index 7dfb28691d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pkgadd
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pkgadd,v 1.6 2009/09/19 16:28:11 christos Exp $
-# pkgadd: file(1) magic for SysV R4 PKG Datastreams
-#
-0 string #\ PaCkAgE\ DaTaStReAm pkg Datastream (SVR4)
-!:mime application/x-svr4-package
diff --git a/contrib/libs/libmagic/magic/Magdir/plan9 b/contrib/libs/libmagic/magic/Magdir/plan9
deleted file mode 100644
index db068479c2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/plan9
+++ /dev/null
@@ -1,25 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: plan9,v 1.6 2021/07/30 12:25:13 christos Exp $
-# plan9: file(1) magic for AT&T Bell Labs' Plan 9 executables and object files
-# From: "Stefan A. Haubenthal" <polluks@web.de>
-#
-0 belong 0x00000107 Plan 9 executable, Motorola 68k
-0 belong 0x00000197 Plan 9 executable, AT&T Hobbit
-0 belong 0x000001EB Plan 9 executable, Intel 386
-0 belong 0x00000247 Plan 9 executable, Intel 960
-0 belong 0x000002AB Plan 9 executable, SPARC
-0 belong 0x00000407 Plan 9 executable, MIPS R3000
-0 belong 0x0000048B Plan 9 executable, AT&T DSP 3210
-0 belong 0x00000517 Plan 9 executable, MIPS R4000 BE
-0 belong 0x000005AB Plan 9 executable, AMD 29000
-0 belong 0x00000647 Plan 9 executable, ARM 7-something
-0 belong 0x000006EB Plan 9 executable, PowerPC
-0 belong 0x00000797 Plan 9 executable, MIPS R4000 LE
-0 belong 0x0000084B Plan 9 executable, DEC Alpha
-
-0 belong 0x3A11013C Plan 9 object file, MIPS R3000
-0 belong 0x430D013C Plan 9 object file, AT&T Hobbit
-0 belong 0x4D013201 Plan 9 object file, Motorola 68k
-0 belong 0x7410013C Plan 9 object file, SPARC
-0 belong 0x7E004501 Plan 9 object file, Intel 386
diff --git a/contrib/libs/libmagic/magic/Magdir/playdate b/contrib/libs/libmagic/magic/Magdir/playdate
deleted file mode 100644
index 77f8c68937..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/playdate
+++ /dev/null
@@ -1,57 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: playdate,v 1.1 2022/11/04 13:34:48 christos Exp $
-#
-# Various native file formats for the Playdate portable video game console.
-#
-# These are unofficially documented at
-# https://github.com/jaames/playdate-reverse-engineering
-#
-# The SDK is a source for many test files, and can be used to
-# create others. https://play.date/dev/
-
-
-# pdi: static image
-0 string Playdate\ IMG Playdate image data
->12 belong&0x80 0x80 (compressed)
->>20 lelong x %d x
->>24 lelong x %d
->12 belong&0x80 0x00 (uncompressed)
->>16 leshort x %d x
->>18 leshort x %d
-
-# pdt: multiple static images
-0 string Playdate\ IMT Playdate image data set
->12 belong&0x80 0x80 (compressed)
->>20 lelong x %d x
->>24 lelong x %d,
->>28 lelong x %d cells
->12 belong&0x80 0x00 (uncompressed)
->>20 lelong x tile grid %d x
->>24 lelong x %d
-
-# pds: string tables
-0 string Playdate\ STR Playdate localization strings
->12 belong&0x80 0x80 (compressed)
->12 belong&0x80 0x00 (uncompressed)
-
-# pda: audio
-0 string Playdate\ AUD Playdate audio file
->12 lelong&0xffffff x %d Hz,
->15 byte 0 unsigned, 8-bit PCM, 1 channel
->15 byte 1 unsigned, 8-bit PCM, 2 channel
->15 byte 2 signed, 16-bit little-endian PCM, 1 channel
->15 byte 3 signed, 16-bit little-endian PCM, 1 channel
->15 byte 4 4-bit ADPCM, 1 channel
->15 byte 5 4-bit ADPCM, 2 channel
-
-# pda: video
-0 string Playdate\ VID Playdate video file
->24 leshort x %d x
->26 leshort x %d,
->16 leshort x %d frames,
->20 lefloat x %.2f FPS
-
-# pdz: executable package
-# Not a lot we can do, as it's a stream of entries with no summary information.
-0 string Playdate\ PDZ Playdate executable package
diff --git a/contrib/libs/libmagic/magic/Magdir/plus5 b/contrib/libs/libmagic/magic/Magdir/plus5
deleted file mode 100644
index 795cca1f11..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/plus5
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: plus5,v 1.6 2009/09/19 16:28:11 christos Exp $
-# plus5: file(1) magic for Plus Five's UNIX MUMPS
-#
-# XXX - byte order? Paging Hokey....
-#
-0 short 0x259 mumps avl global
->2 byte >0 (V%d)
->6 byte >0 with %d byte name
->7 byte >0 and %d byte data cells
-0 short 0x25a mumps blt global
->2 byte >0 (V%d)
->8 short >0 - %d byte blocks
->15 byte 0x00 - P/D format
->15 byte 0x01 - P/K/D format
->15 byte 0x02 - K/D format
->15 byte >0x02 - Bad Flags
diff --git a/contrib/libs/libmagic/magic/Magdir/pmem b/contrib/libs/libmagic/magic/Magdir/pmem
deleted file mode 100644
index c0ead7316b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pmem
+++ /dev/null
@@ -1,46 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pmem,v 1.4 2021/04/26 15:56:00 christos Exp $
-# pmem: file(1) magic for Persistent Memory Development Kit pool files
-#
-0 string PMEM
->4 string POOLSET Persistent Memory Poolset file
->>11 search REPLICA with replica
->4 regex LOG|BLK|OBJ Persistent Memory Pool file, type: %s,
->>8 lelong >0 version: %#x,
->>12 lelong x compat: %#x,
->>16 lelong x incompat: %#x,
->>20 lelong x ro_compat: %#x,
-
-
->>120 leqldate x crtime: %s,
->>128 lequad x alignment_desc: %#016llx,
-
->>136 clear x
->>136 byte 2 machine_class: 64-bit,
->>136 default x machine_class: unknown
->>>136 byte x (%#d),
-
->>137 clear x
->>137 byte 1 data: little-endian,
->>137 byte 2 data: big-endian,
->>137 default x data: unknown
->>>137 byte x (%#d),
-
->>138 byte !0 reserved[0]: %d,
->>139 byte !0 reserved[1]: %d,
->>140 byte !0 reserved[2]: %d,
->>141 byte !0 reserved[3]: %d,
-
->>142 clear x
->>142 leshort 62 machine: x86_64
->>142 leshort 183 machine: aarch64
->>142 default x machine: unknown
->>>142 leshort x (%#d)
-
->4 string BLK
->>4096 lelong x \b, blk.bsize: %d
-
->4 string OBJ
->>4096 string >0 \b, obj.layout: '%s'
->>4096 string <0 \b, obj.layout: NULL
diff --git a/contrib/libs/libmagic/magic/Magdir/polyml b/contrib/libs/libmagic/magic/Magdir/polyml
deleted file mode 100644
index 1cc01093e4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/polyml
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: polyml,v 1.2 2019/04/19 00:42:27 christos Exp $
-# polyml: file(1) magic for PolyML
-#
-# PolyML
-# MPEG, FLI, DL originally from vax@ccwf.cc.utexas.edu (VaX#n8)
-# FLC, SGI, Apple originally from Daniel Quinlan (quinlan@yggdrasil.com)
-
-# [0]: https://www.polyml.org/
-# [1]: https://github.com/polyml/polyml/blob/master/\
-# libpolyml/savestate.cpp#L146-L147
-# [2]: https://github.com/polyml/polyml/blob/master/\
-# libpolyml/savestate.cpp#L1262-L1263
-
-# Type: Poly/ML saved data
-# From: Matthew Fernandez <matthew.fernandez@gmail.com>
-
-0 string POLYSAVE Poly/ML saved state
->8 long x version %u
-
-0 string POLYMODU Poly/ML saved module
->8 long x version %u
diff --git a/contrib/libs/libmagic/magic/Magdir/printer b/contrib/libs/libmagic/magic/Magdir/printer
deleted file mode 100644
index b45a2025ec..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/printer
+++ /dev/null
@@ -1,278 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: printer,v 1.34 2023/06/16 19:27:12 christos Exp $
-# printer: file(1) magic for printer-formatted files
-#
-
-# PostScript, updated by Daniel Quinlan (quinlan@yggdrasil.com)
-0 string %! PostScript document text
-!:mime application/postscript
-!:apple ASPSTEXT
->2 string PS-Adobe- conforming
->>11 string >\0 DSC level %.3s
->>>15 string EPS \b, type %s
->>>15 string Query \b, type %s
->>>15 string ExitServer \b, type %s
->>>15 search/1000 %%LanguageLevel:\040
->>>>&0 string >\0 \b, Level %s
-# Some PCs have the annoying habit of adding a ^D as a document separator
-0 string \004%! PostScript document text
-!:mime application/postscript
-!:apple ASPSTEXT
->3 string PS-Adobe- conforming
->>12 string >\0 DSC level %.3s
->>>16 string EPS \b, type %s
->>>16 string Query \b, type %s
->>>16 string ExitServer \b, type %s
->>>16 search/1000 %%LanguageLevel:\040
->>>>&0 string >\0 \b, Level %s
-0 string \033%-12345X%!PS PostScript document
-
-# DOS EPS Binary File Header
-# From: Ed Sznyter <ews@Black.Market.NET>
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Encapsulated_PostScript
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/eps-adobe.trid.xml
-# Note: called "Encapsulated PostScript binary" by TrID and
-# verified partly by ImageMagick `identify -verbose *` as EPT (Encapsulated PostScript with TIFF preview)
-0 belong 0xC5D0D3C6
-# skip DROID fmt-122-signature-id-174.eps fmt-123-signature-id-178.eps fmt-124-signature-id-180.eps
-# by looking for content after header
-# GRR: in version 5.44 unequal and not endian variant not working!
->32 ulelong >0 DOS EPS Binary File
-!:mime image/x-eps
-# TODO: check that "long" is false on big endian machines
-# Postscript often (850/857) comes after header; so values like: 30 32 or 2788 10644 43350 71828
->>4 long >0 at byte %d
-# 1 space char after length value to get phrase like "length 263893 PostScript document text"
->>>8 long >0 length %d
-# PostScript document text handled by ./printer
->>>>(4.l) indirect x
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/e/eps-wmf.trid.xml
-# Note: called "Encapsulated PostScript binary (with WMF preview)" by TrID
-# verified partly by XnView `nconvert -info *.EP?` as TIFF epsp
->>>>12 long >0 at byte %d
-!:ext eps
-# GRR: in file version 5.44 calling indirect of ./msdos produce phrase like "length 452\012- Windows metafile"
->>>>16 long >0 length %d
-# Windows metafile data handled by ./msdos
->>>>>(12.l) indirect x
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/e/eps-tiff.trid.xml
-# Note: called "Encapsulated PostScript binary (with TIFF preview)" by TrID
->>>>20 long >0 at byte %d
-# For the variant with the TIFF preview image sometimes the file extension ept is used
-!:ext eps/ept
-# GRR: in file version 5.44 calling indirect of ./images produce phrase like "length 43320\012- TIFF image data,"
->>>>>24 long >0 length %d
-# TIFF image data handled by ./images
->>>>>>(20.l) indirect x
-
-# Summary: Adobe's PostScript Printer Description File
-# Extension: .ppd
-# Reference: https://partners.adobe.com/public/developer/en/ps/5003.PPD_Spec_v4.3.pdf, Section 3.8
-# Submitted by: Yves Arrouye <arrouye@marin.fdn.fr>
-#
-0 string *PPD-Adobe:\x20 PPD file
->&0 string x \b, version %s
-!:ext ppd
-!:mime application/vnd.cups-ppd
-
-# HP Printer Job Language
-0 string \033%-12345X@PJL HP Printer Job Language data
-# HP Printer Job Language
-# The header found on Win95 HP plot files is the "Silliest Thing possible"
-# (TM)
-# Every driver puts the language at some random position, with random case
-# (LANGUAGE and Language)
-# For example the LaserJet 5L driver puts the "PJL ENTER LANGUAGE" in line 10
-# From: Uwe Bonnes <bon@elektron.ikp.physik.th-darmstadt.de>
-#
-0 string \033%-12345X@PJL HP Printer Job Language data
->&0 string >\0 %s
->>&0 string >\0 %s
->>>&0 string >\0 %s
->>>>&0 string >\0 %s
-#>15 string \ ENTER\ LANGUAGE\ =
-#>31 string PostScript PostScript
-
-# From: Stefan Thurner <thurners@nicsys.de>
-0 string \033%-12345X@PJL
->&0 search/10000 %! PJL encapsulated PostScript document text
-
-# Rick Richardson <rickrich@gmail.com>
-
-# For Fuji-Xerox Printers - HBPL stands for Host Based Printer Language
-# For Oki Data Printers - HIPERC
-# For Konica Minolta Printers - LAVAFLOW
-# For Samsung Printers - QPDL
-# For HP Printers - ZJS stands for Zenographics ZJStream
-0 string \033%-12345X@PJL HP Printer Job Language data
->0 search/10000 @PJL\ ENTER\ LANGUAGE=HBPL - HBPL
->0 search/10000 @PJL\ ENTER\ LANGUAGE=HIPERC - Oki Data HIPERC
->0 search/10000 @PJL\ ENTER\ LANGUAGE=LAVAFLOW - Konica Minolta LAVAFLOW
->0 search/10000 @PJL\ ENTER\ LANGUAGE=QPDL - Samsung QPDL
->0 search/10000 @PJL\ ENTER\ LANGUAGE\ =\ QPDL - Samsung QPDL
->0 search/10000 @PJL\ ENTER\ LANGUAGE=ZJS - HP ZJS
-# Summary: Hewlett-Packard printer firmware update
-# From: Joerg Jenderek
-# URL: https://support.hp.com/us-en/drivers/selfservice/hp-envy-6000e-all-in-one-printer-series/2100187505/model/2100187513
-# Note: firmware update tested with ENVY 6000 All-in-One Printer
-0 string @PJL\ ENTER\ LANGUAGE=FWUPDATE2 HP Printer firmware update
-#!:mime application/octet-stream
-#!:mime application/x-hp-firmware
-# https://ftp.hp.com/pub/softlib/software13/printers/en6000/2214/EN6000_2214B.exe
-# vasari_base_dist_pp1_001.2214B_nonassert_appsigned_lbi_rootfs_secure_signed.ful2
-!:ext ful2
-
-# HP Printer Control Language, Daniel Quinlan (quinlan@yggdrasil.com)
-0 string \033E\033 HP PCL printer data
->3 string \&l0A - default page size
->3 string \&l1A - US executive page size
->3 string \&l2A - US letter page size
->3 string \&l3A - US legal page size
->3 string \&l26A - A4 page size
->3 string \&l80A - Monarch envelope size
->3 string \&l81A - No. 10 envelope size
->3 string \&l90A - Intl. DL envelope size
->3 string \&l91A - Intl. C5 envelope size
->3 string \&l100A - Intl. B5 envelope size
->3 string \&l-81A - No. 10 envelope size (landscape)
->3 string \&l-90A - Intl. DL envelope size (landscape)
-
-# IMAGEN printer-ready files:
-0 string @document( Imagen printer
-# this only works if "language xxx" is first item in Imagen header.
->10 string language\ impress (imPRESS data)
->10 string language\ daisy (daisywheel text)
->10 string language\ diablo (daisywheel text)
->10 string language\ printer (line printer emulation)
->10 string language\ tektronix (Tektronix 4014 emulation)
-# Add any other languages that your Imagen uses - remember
-# to keep the word `text' if the file is human-readable.
-# [GRR 950115: missing "postscript" or "ultrascript" (whatever it was called)]
-#
-# Now magic for IMAGEN font files...
-0 string Rast RST-format raster font data
->45 string >0 face %s
-# From Jukka Ukkonen
-0 string \033[K\002\0\0\017\033(a\001\0\001\033(g Canon Bubble Jet BJC formatted data
-
-# From <mike@flyn.org>
-# These are the /etc/magic entries to decode data sent to an Epson printer.
-0 string \x1B\x40\x1B\x28\x52\x08\x00\x00REMOTE1P Epson Stylus Color 460 data
-
-
-#------------------------------------------------------------------------------
-# zenographics: file(1) magic for Zenographics ZjStream printer data
-# Rick Richardson <rickrich@gmail.com>
-0 string JZJZ
->0x12 string ZZ Zenographics ZjStream printer data (big-endian)
-0 string ZJZJ
->0x12 string ZZ Zenographics ZjStream printer data (little-endian)
-
-
-#------------------------------------------------------------------------------
-# Oak Technologies printer stream
-# Rick Richardson <rickrich@gmail.com>
-0 string OAK
->0x07 byte 0
->0x0b byte 0 Oak Technologies printer stream
-
-# This would otherwise be recognized as PostScript - nick@debian.org
-0 string %!VMF SunClock's Vector Map Format data
-
-#------------------------------------------------------------------------------
-# HP LaserJet 1000 series downloadable firmware file
-0 string \xbe\xefABCDEFGH HP LaserJet 1000 series downloadable firmware
-
-# From: Paolo <oopla@users.sf.net>
-# Epson ESC/Page, ESC/PageColor
-0 string \x1b\x01@EJL Epson ESC/Page language printer data
-
-# Summary: Hewlett-Packard Graphics Language
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/HP-GL
-# https://en.wikipedia.org/wiki/HPGL
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/h/hpg.trid.xml
-# Note: called "Hewlett-Packard Graphics Language" by TrID and
-# "Hewlett Packard Graphics Language" by DROID via PUID x-fmt/293 and
-# HPGL by XnView command `nconvert -info *`
-# initialize, start a plotting job
-0 string IN;
->0 use hpgl
-# fill.plt
-0 string INPS
->0 use hpgl
-# http://ftp.funet.fi/index/graphics/packages/hpgl2ps/hpgl2ps.tar.Z/hpgl2ps/test1.hpgl
-0 string DF;
->0 use hpgl
-# http://ftp.funet.fi/index/graphics/packages/hpgl2ps/hpgl2ps.tar.Z/hpgl2ps/test3.hpgl
-# Select Pen n; If no pen number or 0, the controller performs an end of file command; n in range between -32767 and 32768 like: 6
-0 string SP
-# skip text Linux-syscall-note inside qemu sources starting with SPDX-Exception-Identifier: Linux-syscall-note
-# by checking for valid Pen number
->2 regex \^([0-9]{1,5})
-#>2 regex \^([0-9]{1,5}) PEN_NUMBER=%s
->>0 use hpgl
-# charsize.hp pages.hp set the scaling points (P1 and P2) to their default positions
-0 string IP0
->0 use hpgl
-# ci.hp
-0 string CO\040
->0 use hpgl
-# iw.hp 286x192.5_lh.hpg 286x192.5_lq.hpg
-0 string PS\040
->0 use hpgl
-# thick.hp
-0 string PS9
->0 use hpgl
-# ul.hp
-0 string PS4
->0 use hpgl
-# la.hp
-0 string BP
->0 use hpgl
-# miter.hp
-# Plot Absolute x,y{,x,y{...}}; x and y in range between -32767 and 32768 like: PA4000,3000;
-0 string PA
-# skip shell scripts test_msa_run_32r5eb.sh test_msa_run_32r5eb.sh with variable PATH_TO_QEMU
-# by checking for valid x coordinate
->2 regex \^([-]{0,1}[0-9]{1,5})
-#>2 regex \^([-]{0,1}[0-9]{1,5}) COORDINATE=%s
->>0 use hpgl
-# pw.hpg number of pens x
-0 string NP
->0 use hpgl
-# win_1.hp
-#0 string \003INCA WHAT_IS_THAT
-#>0 use hpgl
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/h/hpgl2.trid.xml
-# Note: called "Hewlett-Packard Graphics Language 2" by TrID
-0 string \033%-1B Hewlett-Packard Graphics Language 2
-!:mime application/vnd.hp-HPGL
-# like: dt.plt
-!:ext plt
-#!:ext plt/gl2/hpg2/spl
-# remaining part after escsape sequnce
->5 string x with "%-.10s"
-# display Hewlett-Packard Graphics Language vector graphic information
-0 name hpgl
->0 string x Hewlett-Packard Graphics Language
-#!:mime vector/x-hpgl
-# https://www.iana.org/assignments/media-types/application/vnd.hp-HPGL
-!:mime application/vnd.hp-HPGL
-# no example with HPL suffix found
-!:ext hpgl/hpg/hp/plt
-# like: "IN;" "DF;IN;LT;PU1000,1000;PD2000,10" "SP6;DI0,1;SR0.70,1.90;SC0,800,"
-# "CO Concentric circles drawn with different linewidths;"
->0 string x \b, starting with "%-.54s"
-# continue but not for 1 long line without CR or LF
->>&0 ubyte <0x0E
-#>>&0 ubyte <0x0E TERMINATOR=%x
-# second line after 1 terminator character
->>>&0 string >\r with "%-.10s"
-# next character again CR or LF
->>>&0 ubyte <0x0E
-#>>>&0 ubyte <0x0E 2ND_CHARACTER=%x
-# second line after 2 terminator characters
->>>>&0 string >\r with "%-.10s"
diff --git a/contrib/libs/libmagic/magic/Magdir/project b/contrib/libs/libmagic/magic/Magdir/project
deleted file mode 100644
index 9180b57d63..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/project
+++ /dev/null
@@ -1,10 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: project,v 1.5 2017/03/17 21:35:28 christos Exp $
-# project: file(1) magic for Project management
-#
-# Magic strings for ftnchek project files. Alexander Mai
-0 string FTNCHEK_\ P project file for ftnchek
->10 string 1 version 2.7
->10 string 2 version 2.8 to 2.10
->10 string 3 version 2.11 or later
diff --git a/contrib/libs/libmagic/magic/Magdir/psdbms b/contrib/libs/libmagic/magic/Magdir/psdbms
deleted file mode 100644
index 3eec965731..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/psdbms
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: psdbms,v 1.8 2017/03/17 21:35:28 christos Exp $
-# psdbms: file(1) magic for psdatabase
-#
-# Update: Joerg Jenderek
-# GRR: line below too general as it catches also some Panorama database *.pan ,
-# AppleWorks word processor
-0 belong&0xff00ffff 0x56000000
-# assume version starts with digit
->1 regex/s =^[0-9] ps database
->>1 string >\0 version %s
-# kernel name
->>4 string >\0 from kernel %s
diff --git a/contrib/libs/libmagic/magic/Magdir/psl b/contrib/libs/libmagic/magic/Magdir/psl
deleted file mode 100644
index 0296540ce5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/psl
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: psl,v 1.3 2019/04/19 00:42:27 christos Exp $
-# psl: file(1) magic for Public Suffix List representations
-# From: Daniel Kahn Gillmor <dkg@fifthhorseman.net>
-# URL: https://publicsuffix.org
-# see also: https://thread.gmane.org/gmane.network.dns.libpsl.bugs/162/focus=166
-
-0 search/512 \n\n//\ ===BEGIN\ ICANN\ DOMAINS===\n\n Public Suffix List data
-
-0 string .DAFSA@PSL_
->15 string \n Public Suffix List data (optimized)
->>11 byte >0x2f
->>>11 byte <0x3a (Version %c)
diff --git a/contrib/libs/libmagic/magic/Magdir/pulsar b/contrib/libs/libmagic/magic/Magdir/pulsar
deleted file mode 100644
index 7cb6f18bda..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pulsar
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pulsar,v 1.5 2009/09/19 16:28:12 christos Exp $
-# pulsar: file(1) magic for Pulsar POP3 daemon binary files
-#
-# http://pulsar.sourceforge.net
-# mailto:rok.papez@lugos.si
-#
-
-0 belong 0x1ee7f11e Pulsar POP3 daemon mailbox cache file.
->4 ubelong x Version: %d.
->8 ubelong x \b%d
-
diff --git a/contrib/libs/libmagic/magic/Magdir/puzzle b/contrib/libs/libmagic/magic/Magdir/puzzle
deleted file mode 100644
index ac983f32b8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/puzzle
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: puzzle,v 1.2 2021/10/07 15:40:40 christos Exp $
-# wsdl: Magic for various puzzles
-
-# PUZ crossword puzzles from Alan De Smet
-# Test files can be found at
-# https://theworld.com/~wij/puzzles/wij-themed.html or using the
-# "Universal" or "WS Journal" links on the right side of
-# https://www.cruciverb.com/ .
-
-2 string ACROSS&DOWN PUZ crossword puzzle
->0x2c byte x %d x
->0x2d byte x %d,
->0x2e leshort x %d clues,
->0x1e leshort 0x0000 plain text solution
->0x1e leshort !0x0000 scrambled solution
diff --git a/contrib/libs/libmagic/magic/Magdir/pwsafe b/contrib/libs/libmagic/magic/Magdir/pwsafe
deleted file mode 100644
index 549093f143..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pwsafe
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pwsafe,v 1.2 2019/04/19 00:42:27 christos Exp $
-# pwsafe: file(1) magic for passwordsafe file
-#
-# Password Safe
-# http://passwordsafe.sourceforge.net/
-# file format specs
-# https://passwordsafe.svn.sourceforge.net/viewvc/passwordsafe/trunk/pwsafe/pwsafe/docs/formatV3.txt
-# V2 https://passwordsafe.svn.sourceforge.net/viewvc/passwordsafe/trunk/pwsafe/pwsafe/docs/formatV2.txt
-# V1 https://passwordsafe.svn.sourceforge.net/viewvc/passwordsafe/trunk/pwsafe/pwsafe/docs/notes.txt
-# V2 and V1 have no easy identifier that I can find
-# .psafe3
-0 string PWS3 Password Safe V3 database
diff --git a/contrib/libs/libmagic/magic/Magdir/pyramid b/contrib/libs/libmagic/magic/Magdir/pyramid
deleted file mode 100644
index ee47c80767..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/pyramid
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: pyramid,v 1.7 2009/09/19 16:28:12 christos Exp $
-# pyramid: file(1) magic for Pyramids
-#
-# XXX - byte order?
-#
-0 long 0x50900107 Pyramid 90x family executable
-0 long 0x50900108 Pyramid 90x family pure executable
->16 long >0 not stripped
-0 long 0x5090010b Pyramid 90x family demand paged pure executable
->16 long >0 not stripped
diff --git a/contrib/libs/libmagic/magic/Magdir/python b/contrib/libs/libmagic/magic/Magdir/python
deleted file mode 100644
index 00d90d1238..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/python
+++ /dev/null
@@ -1,305 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: python,v 1.45 2022/07/24 23:59:37 christos Exp $
-# python: file(1) magic for python
-#
-# Outlook puts """ too for urgent messages
-# From: David Necas <yeti@physics.muni.cz>
-# often the module starts with a multiline string
-0 string/t """ Python script text executable
-# MAGIC as specified in Python/import.c (1.0 to 3.7)
-# and in Lib/importlib/_bootstrap_external.py (3.5+)
-# two bytes of magic followed by "\r\n" in little endian order
-0 belong 0x02099900 python 1.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x03099900 python 1.1/1.2 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x892e0d0a python 1.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x04170d0a python 1.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x994e0d0a python 1.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xfcc40d0a python 1.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xfdc40d0a python 1.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x87c60d0a python 2.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x88c60d0a python 2.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2aeb0d0a python 2.1 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2beb0d0a python 2.1 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2ded0d0a python 2.2 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2eed0d0a python 2.2 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x3bf20d0a python 2.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x3cf20d0a python 2.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x45f20d0a python 2.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x59f20d0a python 2.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x63f20d0a python 2.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x6df20d0a python 2.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x6ef20d0a python 2.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x77f20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x81f20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x8bf20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x8cf20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x95f20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x9ff20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xa9f20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xb3f20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xb4f20d0a python 2.5 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xc7f20d0a python 2.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xd1f20d0a python 2.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xd2f20d0a python 2.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xdbf20d0a python 2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xe5f20d0a python 2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xeff20d0a python 2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xf9f20d0a python 2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x03f30d0a python 2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x04f30d0a python 2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x0af30d0a PyPy2.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xb80b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xc20b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xcc0b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xd60b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xe00b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xea0b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xf40b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xf50b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xff0b0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x090c0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x130c0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x1d0c0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x1f0c0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x270c0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x3b0c0d0a python 3.0 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x450c0d0a python 3.1 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x4f0c0d0a python 3.1 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x580c0d0a python 3.2 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x620c0d0a python 3.2 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x6c0c0d0a python 3.2 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x760c0d0a python 3.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x800c0d0a python 3.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x8a0c0d0a python 3.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x940c0d0a python 3.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x9e0c0d0a python 3.3 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xb20c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xbc0c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xc60c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xd00c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xda0c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xe40c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xee0c0d0a python 3.4 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0xf80c0d0a python 3.5.1- byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x020d0d0a python 3.5.1- byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x0c0d0d0a python 3.5.1- byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x160d0d0a python 3.5.1- byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x170d0d0a python 3.5.2+ byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x200d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x210d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2a0d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2b0d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2c0d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2d0d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x2f0d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x300d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x310d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x320d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x330d0d0a python 3.6 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x3e0d0d0a python 3.7 byte-compiled
-!:mime application/x-bytecode.python
-0 belong 0x3f0d0d0a python 3.7 byte-compiled
-!:mime application/x-bytecode.python
-
-# magic 3392+ implements PEP 552: Deterministic pycs
-0 name pyc-pep552
-# the flag field determines how .pyc validity is checked
->4 ulelong&1 0 timestamp-based,
->>8 uledate x .py timestamp: %s UTC,
->>12 ulelong x .py size: %d bytes
->4 ulelong&1 !0 hash-based, check-source flag
->>4 ulelong&2 0 unset,
->>4 ulelong&2 !0 set,
->>8 ulequad x hash: 0x%llx
-
-# uleshort magic followed by \x0d\0xa
-2 string \x0d\x0a
-# extra check: only two bits of flag field are currently used
->4 ulelong <0x4
-# \x0d as part of magic should suffice till Python 3.14 (magic 3600)
->>1 ubyte 0x0d Byte-compiled Python module for
-!:mime application/x-bytecode.python
-# now look at the magic number to determine the version
->>>0 uleshort <3400 CPython 3.7,
->>>0 default x
->>>>0 uleshort <3420 CPython 3.8,
->>>>0 default x
->>>>>0 uleshort <3430 CPython 3.9,
->>>>>0 default x
->>>>>>0 uleshort <3450 CPython 3.10,
->>>>>>0 default x
->>>>>>>0 uleshort <3500 CPython 3.11,
->>>>>>>0 default x CPython 3.12 or newer,
->>>0 use pyc-pep552
->>0 uleshort 240 Byte-compiled Python module for PyPy3.7,
-!:mime application/x-bytecode.python
->>>0 use pyc-pep552
->>0 uleshort 256 Byte-compiled Python module for PyPy3.8,
-!:mime application/x-bytecode.python
->>>0 use pyc-pep552
->>0 uleshort 336 Byte-compiled Python module for PyPy3.9,
-!:mime application/x-bytecode.python
->>>0 use pyc-pep552
-
-0 search/1/w #!\040/usr/bin/python Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-0 search/1/w #!\040/usr/local/bin/python Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-0 search/10/w #!\040/usr/bin/env\040python Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-
-
-# from module.submodule import func1, func2
-0 search/8192 import
->0 regex \^from[\040\t]+([A-Za-z0-9_]|\\.)+[\040\t]+import.*$ Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-
-# def __init__ (self, ...):
-0 search/4096 def\ __init__
->&0 search/64 self Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-
-# if __name__ == "__main__":
-0 search/4096 if\ __name__
->&0 search/64 '__main__' Python script text executable
->&0 search/64 "__main__" Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-
-# import module [as abrev]
-0 search/8192 import
->0 regex \^import\ [_[:alpha:]]+\ as\ [[:alpha:]][[:space:]]*$ Python script text executable
-!:mime text/x-script.python
-
-# comments
-#0 search/4096 '''
-#>&0 regex .*'''$ Python script text executable
-#!:mime text/x-script.python
-
-#0 search/4096 """
-#>&0 regex .*"""$ Python script text executable
-#!:mime text/x-script.python
-
-# try:
-# except: or finally:
-# block
-0 search/4096 try:
->&0 regex \^[[:space:]]*except.*:$ Python script text executable
-!:strength + 15
-!:mime text/x-script.python
->&0 search/4096 finally: Python script text executable
-!:mime text/x-script.python
-
-# class name[(base classes,)]: [pass]
-0 search/8192 class
->0 regex \^class\ [_[:alpha:]]+(\\(.*\\))?(\ )*:([\ \t]+pass)?$ Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-
-# def name(*args, **kwargs):
-0 search/8192 def\
->0 regex \^[[:space:]]{0,50}def\ {1,50}[_a-zA-Z]{1,100}
->>&0 regex \\(([[:alpha:]*_,\ ]){0,255}\\):$ Python script text executable
-!:strength + 15
-!:mime text/x-script.python
-
-# https://numpy.org/devdocs/reference/generated/numpy.lib.format.html
-0 string \223NUMPY NumPy data file
-!:mime application/x-numpy-data
->6 byte x \b, version %d
->7 byte x \b.%d
-#>8 leshort x \b, header length=%d
->10 string x \b, description %s
diff --git a/contrib/libs/libmagic/magic/Magdir/qt b/contrib/libs/libmagic/magic/Magdir/qt
deleted file mode 100644
index 68085f2892..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/qt
+++ /dev/null
@@ -1,30 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: qt,v 1.4 2022/11/11 14:50:23 christos Exp $
-# qt: file(1) magic for Qt
-
-# https://doc.qt.io/qt-5/resources.html
-0 string \<!DOCTYPE\040RCC\> Qt Resource Collection file
-
-# https://qt.gitorious.org/qt/qtbase/source/\
-# 5367fa356233da4c0f28172a8f817791525f5457:\
-# src/tools/rcc/rcc.cpp#L840
-0 string qres\0\0 Qt Binary Resource file
-0 search/1024 The\040Resource\040Compiler\040for\040Qt Qt C-code resource file
-
-# https://qt.gitorious.org/qt/qtbase/source/\
-# 5367fa356233da4c0f28172a8f817791525f5457:\
-# src/corelib/kernel/qtranslator.cpp#L62
-0 string \x3c\xb8\x64\x18\xca\xef\x9c\x95
->8 string \xcd\x21\x1c\xbf\x60\xa1\xbd\xdd Qt Translation file
-
-
-# Qt V4 Javascript engine compiled unit
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/qt/qtdeclarative/blob/v6.4.0/src/qml/common/qv4compileddata_p.h
-0 string qv4cdata QV4 compiled unit
-!:ext qmlc
->8 ulelong x \b, version %d
->12 byte x \b, Qt %d
->13 byte x \b.%d
->14 byte x \b.%d
diff --git a/contrib/libs/libmagic/magic/Magdir/revision b/contrib/libs/libmagic/magic/Magdir/revision
deleted file mode 100644
index 824220a3d2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/revision
+++ /dev/null
@@ -1,66 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: revision,v 1.11 2019/04/19 00:42:27 christos Exp $
-# file(1) magic for revision control files
-# From Hendrik Scholz <hendrik@scholz.net>
-0 string/t /1\ :pserver: cvs password text file
-
-# Conary changesets
-# From: Jonathan Smith <smithj@rpath.com>
-0 belong 0xea3f81bb Conary changeset data
-
-# Type: Git bundles (git-bundle)
-# From: Josh Triplett <josh@freedesktop.org>
-0 string #\ v2\ git\ bundle\n Git bundle
-
-# Type: Git pack
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Git
-# reference: https://github.com/git/git/blob/master/Documentation/technical/pack-format.txt
-# The actual magic is 'PACK', but that clashes with Doom/Quake packs. However,
-# those have a little-endian offset immediately following the magic 'PACK',
-# the first byte of which is never 0, while the first byte of the Git pack
-# version, since it's a tiny number stored in big-endian format, is always 0.
-0 string PACK
-# GRR: line above is too general as it matches also PackDir archive ./acorn
-# test for major version. Git 2017 accepts version number 2 or 3
->4 ubelong <9
-# Acorn PackDir with method 0 compression has root like ADFS::HardDisc4.$.AsylumSrc
-# or SystemDevice::foobar
->>9 search/13 ::
-# but in git binary
->>9 default x Git pack
-!:mime application/x-git
-!:ext pack
-# 4 GB limit implies unsigned integer
->>>4 ubelong x \b, version %u
->>>8 ubelong x \b, %u objects
-
-# Type: Git pack index
-# From: Adam Buchbinder <adam.buchbinder@gmail.com>
-0 string \377tOc Git pack index
->4 belong =2 \b, version 2
-
-# Type: Git index file
-# From: Frederic Briare <fbriere@fbriere.net>
-0 string DIRC Git index
->4 belong >0 \b, version %d
->>8 belong >0 \b, %d entries
-
-# Type: Mercurial bundles
-# From: Seo Sanghyeon <tinuviel@sparcs.kaist.ac.kr>
-0 string HG10 Mercurial bundle,
->4 string UN uncompressed
->4 string BZ bzip2 compressed
-
-# Type: Subversion (SVN) dumps
-# From: Uwe Zeisberger <zeisberg@informatik.uni-freiburg.de>
-0 string SVN-fs-dump-format-version: Subversion dumpfile
->28 string >\0 (version: %s)
-
-# Type: Bazaar revision bundles and merge requests
-# URL: https://www.bazaar-vcs.org/
-# From: Jelmer Vernooij <jelmer@samba.org>
-0 string #\ Bazaar\ revision\ bundle\ v Bazaar Bundle
-0 string #\ Bazaar\ merge\ directive\ format Bazaar merge directive
diff --git a/contrib/libs/libmagic/magic/Magdir/riff b/contrib/libs/libmagic/magic/Magdir/riff
deleted file mode 100644
index 422a0d7941..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/riff
+++ /dev/null
@@ -1,841 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: riff,v 1.45 2022/07/24 23:47:49 christos Exp $
-# riff: file(1) magic for RIFF format
-# See
-#
-# https://www.seanet.com/users/matts/riffmci/riffmci.htm
-# http://www-mmsp.ece.mcgill.ca/Documents/AudioFormats/WAVE/Docs/riffmci.pdf
-# https://www.iana.org/assignments/wave-avi-codec-registry/wave-avi-codec-registry.xml
-#
-
-# audio format tag. Assume limits: max 1024 bit, 128 channels, 1 MHz
-0 name riff-wave
->0 leshort 0x01 \b, Microsoft PCM
->>14 leshort >0
->>>14 leshort <1024 \b, %d bit
->0 leshort 0x02 \b, Microsoft ADPCM
->0 leshort 0x03 \b, IEEE Float
->0 leshort 0x04 \b, Compaq VSELP
->0 leshort 0x05 \b, IBM CVSD
->0 leshort 0x06 \b, ITU G.711 A-law
->0 leshort 0x07 \b, ITU G.711 mu-law
->0 leshort 0x08 \b, Microsoft DTS
->0 leshort 0x10 \b, OKI ADPCM
->0 leshort 0x11 \b, IMA ADPCM
->0 leshort 0x12 \b, MediaSpace ADPCM
->0 leshort 0x13 \b, Sierra ADPCM
->0 leshort 0x14 \b, ITU G.723 ADPCM (Yamaha)
->0 leshort 0x15 \b, DSP Solutions DIGISTD
->0 leshort 0x16 \b, DSP Solutions DIGIFIX
->0 leshort 0x17 \b, Dialogic OKI ADPCM
->0 leshort 0x18 \b, MediaVision ADPCM
->0 leshort 0x19 \b, HP CU
->0 leshort 0x20 \b, Yamaha ADPCM
->0 leshort 0x21 \b, Speech Compression SONARC
->0 leshort 0x22 \b, DSP Group True Speech
->0 leshort 0x23 \b, Echo Speech EchoSC1
->0 leshort 0x24 \b, AudioFile AF36
->0 leshort 0x25 \b, APTX
->0 leshort 0x26 \b, AudioFile AF10
->0 leshort 0x27 \b, Prosody 1612
->0 leshort 0x28 \b, LRC
->0 leshort 0x30 \b, Dolby AC2
->0 leshort 0x31 \b, GSM 6.10
->0 leshort 0x32 \b, MSN Audio
->0 leshort 0x33 \b, Antex ADPCME
->0 leshort 0x34 \b, Control Res VQLPC
->0 leshort 0x35 \b, Digireal
->0 leshort 0x36 \b, DigiADPCM
->0 leshort 0x37 \b, Control Res CR10
->0 leshort 0x38 \b, NMS VBXADPCM
->0 leshort 0x39 \b, Roland RDAC
->0 leshort 0x3A \b, Echo Speech EchoSC3
->0 leshort 0x3B \b, Rockwell ADPCM
->0 leshort 0x3C \b, Rockwell Digitalk
->0 leshort 0x3D \b, Xebec
->0 leshort 0x40 \b, ITU G.721 ADPCM
->0 leshort 0x41 \b, ITU G.728 CELP
->0 leshort 0x42 \b, MSG723
->0 leshort 0x50 \b, MPEG
->0 leshort 0x52 \b, RT24
->0 leshort 0x53 \b, PAC
->0 leshort 0x55 \b, MPEG Layer 3
->0 leshort 0x59 \b, Lucent G.723
->0 leshort 0x60 \b, Cirrus
->0 leshort 0x61 \b, ESPCM
->0 leshort 0x62 \b, Voxware
->0 leshort 0x63 \b, Canopus Atrac
->0 leshort 0x64 \b, ITU G.726 ADPCM
->0 leshort 0x65 \b, ITU G.722 ADPCM
->0 leshort 0x66 \b, DSAT
->0 leshort 0x67 \b, DSAT Display
->0 leshort 0x69 \b, Voxware Byte Aligned
->0 leshort 0x70 \b, Voxware AC8
->0 leshort 0x71 \b, Voxware AC10
->0 leshort 0x72 \b, Voxware AC16
->0 leshort 0x73 \b, Voxware AC20
->0 leshort 0x74 \b, Voxware MetaVoice
->0 leshort 0x75 \b, Voxware MetaSound
->0 leshort 0x76 \b, Voxware RT29HW
->0 leshort 0x77 \b, Voxware VR12
->0 leshort 0x78 \b, Voxware VR18
->0 leshort 0x79 \b, Voxware TQ40
->0 leshort 0x80 \b, Softsound
->0 leshort 0x81 \b, Voxware TQ60
->0 leshort 0x82 \b, MSRT24
->0 leshort 0x83 \b, ITU G.729A
->0 leshort 0x84 \b, MVI MV12
->0 leshort 0x85 \b, DF G.726
->0 leshort 0x86 \b, DF GSM610
->0 leshort 0x88 \b, ISIAudio
->0 leshort 0x89 \b, Onlive
->0 leshort 0x91 \b, SBC24
->0 leshort 0x92 \b, Dolby AC3 S/PDIF
->0 leshort 0x97 \b, ZyXEL ADPCM
->0 leshort 0x98 \b, Philips LPCBB
->0 leshort 0x99 \b, Packed
->0 leshort 0x100 \b, Rhetorex ADPCM
->0 leshort 0x101 \b, BeCubed Software IRAT
->0 leshort 0x111 \b, Vivo G.723
->0 leshort 0x112 \b, Vivo Siren
->0 leshort 0x123 \b, Digital G.723
->0 leshort 0x200 \b, Creative ADPCM
->0 leshort 0x202 \b, Creative FastSpeech8
->0 leshort 0x203 \b, Creative FastSpeech10
->0 leshort 0x220 \b, Quarterdeck
->0 leshort 0x300 \b, FM Towns Snd
->0 leshort 0x400 \b, BTV Digital
->0 leshort 0x680 \b, VME VMPCM
->0 leshort 0x1000 \b, OLIGSM
->0 leshort 0x1001 \b, OLIADPCM
->0 leshort 0x1002 \b, OLICELP
->0 leshort 0x1003 \b, OLISBC
->0 leshort 0x1004 \b, OLIOPR
->0 leshort 0x1100 \b, LH Codec
->0 leshort 0x1400 \b, Norris
->0 leshort 0x1401 \b, ISIAudio
->0 leshort 0x1500 \b, Soundspace Music Compression
->0 leshort 0x2000 \b, AC3 DVM
->0 leshort 0x2001 \b, DTS
->2 leshort =1 \b, mono
->2 leshort =2 \b, stereo
->2 leshort >2
->>2 leshort <128 \b, %d channels
->4 lelong >0
->>4 lelong <1000000 %d Hz
-
-# try to find "fmt "
-0 name riff-walk
->0 string fmt\x20
->>4 lelong >15
->>>8 use riff-wave
->0 string LIST
->>&(4.l+4) use riff-walk
->0 string DISP
->>&(4.l+4) use riff-walk
->0 string bext
->>&(4.l+4) use riff-walk
->0 string Fake
->>&(4.l+4) use riff-walk
->0 string fact
->>&(4.l+4) use riff-walk
->0 string VP8
->>11 byte 0x9d
->>>12 byte 0x01
->>>>13 byte 0x2a \b, VP8 encoding
->>>>>14 leshort&0x3fff x \b, %d
->>>>>16 leshort&0x3fff x \bx%d, Scaling:
->>>>>14 leshort&0xc000 0x0000 \b [none]
->>>>>14 leshort&0xc000 0x1000 \b [5/4]
->>>>>14 leshort&0xc000 0x2000 \b [5/3]
->>>>>14 leshort&0xc000 0x3000 \b [2]
->>>>>14 leshort&0xc000 0x0000 \bx[none]
->>>>>14 leshort&0xc000 0x1000 \bx[5/4]
->>>>>14 leshort&0xc000 0x2000 \bx[5/3]
->>>>>14 leshort&0xc000 0x3000 \bx[2]
->>>>>15 byte&0x80 =0x00 \b, YUV color
->>>>>15 byte&0x80 =0x80 \b, bad color specification
->>>>>15 byte&0x40 =0x40 \b, no clamping required
->>>>>15 byte&0x40 =0x00 \b, decoders should clamp
-#>0 string x we got %s
-#>>&(4.l+4) use riff-walk
-
-# RecorderGear TR500 call recorder digits (BCD)
-0 name tr500-call-recorder-digits
->0 byte&0xF0 0x00 \b0
->0 byte&0xF0 0x10 \b1
->0 byte&0xF0 0x20 \b2
->0 byte&0xF0 0x30 \b3
->0 byte&0xF0 0x40 \b4
->0 byte&0xF0 0x50 \b5
->0 byte&0xF0 0x60 \b6
->0 byte&0xF0 0x70 \b7
->0 byte&0xF0 0x80 \b8
->0 byte&0xF0 0x90 \b9
->0 byte&0xF0 0xb0 \b*
->0 byte&0xF0 0xc0 \b#
->0 byte&0x0F 0 \b0
->0 byte&0x0F 1 \b1
->0 byte&0x0F 2 \b2
->0 byte&0x0F 3 \b3
->0 byte&0x0F 4 \b4
->0 byte&0x0F 5 \b5
->0 byte&0x0F 6 \b6
->0 byte&0x0F 7 \b7
->0 byte&0x0F 8 \b8
->0 byte&0x0F 9 \b9
->0 byte&0x0F 0xb \b*
->0 byte&0x0F 0xc \b#
-
-# TR500 call recorder extended header
-# From: David Korth <gerbilsoft@gerbilsoft.com>
-# Contains dialed/incoming phone number and timestamp.
-# TODO: Verify byte 15.
-0 name tr500-call-recorder-header
->15 byte 2 (outgoing call:
->15 byte 4 (incoming call:
->1 byte 0xFF \bno number
->1 byte !0xFF
->>1 use tr500-call-recorder-digits
->>2 byte !0xFF
->>>2 use tr500-call-recorder-digits
->>3 byte !0xFF
->>>3 use tr500-call-recorder-digits
->>4 byte !0xFF
->>>4 use tr500-call-recorder-digits
->>5 byte !0xFF
->>>5 use tr500-call-recorder-digits
->>6 byte !0xFF
->>>6 use tr500-call-recorder-digits
->>7 byte !0xFF
->>>7 use tr500-call-recorder-digits
->>8 byte !0xFF
->>>8 use tr500-call-recorder-digits
->9 byte x \b, 20%02x
->10 byte x \b/%02x
->11 byte x \b/%02x
->12 byte x %02x
->13 byte x \b:%02x
->14 byte x \b:%02x)
-
-# AVI section extended by Patrik Radman <patrik+file-magic@iki.fi>
-#
-0 string RIFF RIFF (little-endian) data
-# RIFF Palette format
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Resource_Interchange_File_Format
-# Reference: https://worms2d.info/Palette_file
-# WAVE/AVI codec registry: https://www.iana.org/assignments/wave-avi-codec-registry/wave-avi-codec-registry.xml
->8 string PAL\ \b, palette
-!:mime application/x-riff
-# color palette by Microsoft Corporation
-!:ext pal
-# file size = chunk size + 8 in most cases
->>4 ulelong+8 x \b, %u bytes
-# Extended PAL Format
->>12 string plth \b, extended
-# Simple PAL Format
->>12 string data
-# data chunk size = color entries * 4 + 4 + sometimes extra (4) appended bytes
->>>16 ulelong x \b, data size %u
-# palVersion is always 0x0300
-#>>>20 leshort x \b, version %#4.4x
-# palNumEntries specifies the number of palette color entries
->>>22 uleshort x \b, %u entries
-# after palPalEntry sized (number of color entries * 4 ) vector
->>>(22.s*4) ubequad x
-# jump relative 22 ( 8 + 16) bytes forward points after end of file or to
-# appended extra bytes like in http://safecolours.rigdenage.com/set(ms).zip/Protan(MS).pal
->>>>&16 ubelong x \b, extra bytes
->>>>>&-4 ubelong >0 %#8.8x
-# RIFF Device Independent Bitmap format
-# URL: http://fileformats.archiveteam.org/wiki/RDIB
->8 string RDIB \b, device-independent bitmap
-!:ext rdi/dib
->>16 string BM
->>>30 leshort 12 \b, OS/2 1.x format
->>>>34 leshort x \b, %d x
->>>>36 leshort x %d
->>>30 leshort 64 \b, OS/2 2.x format
->>>>34 leshort x \b, %d x
->>>>36 leshort x %d
->>>30 leshort 40 \b, Windows 3.x format
->>>>34 lelong x \b, %d x
->>>>38 lelong x %d x
->>>>44 leshort x %d
-# RIFF MIDI format
-# URL: http://fileformats.archiveteam.org/wiki/RIFF_MIDI
->8 string RMID \b, MIDI
-# http://extension.nirsoft.net/rmi
-!:mime audio/mid
-#!:mime audio/x-rmid
-!:ext rmi
-# RIFF Multimedia Movie File format
-# URL: http://fileformats.archiveteam.org/wiki/RIFF_Multimedia_Movie
->8 string RMMP \b, multimedia movie
-!:mime video/x-mmm
-!:ext mmm
-# RIFF wrapper for MP3
->8 string RMP3 \b, MPEG Layer 3 audio
-#!:mime audio/x-rmp3
-# Microsoft WAVE format (*.wav)
-# URL: http://fileformats.archiveteam.org/wiki/WAV
->8 string WAVE \b, WAVE audio
-#!:mime audio/vnd.wave
-!:mime audio/x-wav
-# https://www.macdisk.com/macsigen.php
-#!:apple ????WAVE
-!:ext wav/wave
->>12 string >\0
->>>12 use riff-walk
-# TR500 call recorder extended header
->>16 ulelong 0x1E4
->>>20 leshort 0x11
->>>>256 byte 4
->>>>>256 use tr500-call-recorder-header
-# Update: Joerg Jenderek
-# lower case for Corel Draw version 8 Bidi
->8 string/c cdr
-# skip Corel CCX Clipart
->>8 string !CDRXcont
-# Corel Draw Picture
->>>0 use corel-draw
-# URL: http://fileformats.archiveteam.org/wiki/CCX_(Corel)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/ccx-corel.trid.xml
->>8 string =CDRXcont \b, Corel Clipart
-!:mime application/x-corel-ccx
-!:ext ccx
-# 3rd chunk data {Corel\040Binary\040Meta\040File}
-#>>>20 string x \b, 3rd '%-s'
->>>4 ulelong+8 x \b, %u bytes
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/CorelDRAW
-# Reference: http://fileformats.archiveteam.org/wiki/CorelDRAW
-# Picture templates created by newer software start with RIFF type CDT
->8 string CDT
->>0 use corel-draw
-# Picture templates with version 4.4
->8 string CDST
->>0 use corel-draw
-# pattern created by newer software start with RIFF type PAT
->8 string PAT
->>0 use corel-draw
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Corel_Designer
-# Reference: http://fileformats.archiveteam.org/wiki/Corel_Designer
->8 string DES
->>8 string !DESC
->>>0 use corel-des
-# Corel Draw templates with version 12.5 or Corel Designer illustration 12
->>8 string =DESC
-# MORE TESTS NEEDED HERE!
-#>>>0 use corel-des
-#>>>0 use corel-draw
->8 string NUNDROOT \b, Steinberg CuBase
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/MIDI_Instrument_Definition_File
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/i/idf.trid.xml
-# ftp://curscott.servebeer.com/Download/Apps/_Microsoft/
-# Visual%20Studio%206.0%20Professional%20MSDN/
-# SAMPLES/VC98/SDK/GRAPHICS/AUDIO/IDFEDIT/GLOBALS.H
-# Note: called "MIDI Instrument Definition File" by TrID
->8 string IDF\ LIST \b, MIDI Instrument Definition File
-!:mime audio/x-idf
-!:ext idf
-# 3rd chunk size like: 254 284 286 670
-#>>0x10 ulelong x \b, 3th SIZE %u
-# for debugging purpose display next chunk like: MMAPhdr
-#>>0x14 string x \b, 4th "%-8.8s"
-#>>0x1C ulelong x \b, 4th SIZE 0x%x
-# probably MIDI instrument name like: "Universal-MIDI-Instrument" "instrument name" "General MIDI"
->>0x30 string x "%s"
-# look for inst TAG
->>0x31 search/256 inst by
-# probably manufacture name like: "Unspecified Company" "NVidia Corporation"
->>>&0x24 string x "%s"
-# AVI == Audio Video Interleave
-# Reference: http://fileformats.archiveteam.org/wiki/AVI
->8 string AVI\040 \b, AVI
-# https://reposcope.com/mimetype/video/x-msvideo
-!:mime video/x-msvideo
-# https://www.iana.org/assignments/wave-avi-codec-registry/wave-avi-codec-registry.xml
-#!:mime video/vnd.avi
-!:ext avi/divx
->>12 string LIST
->>>20 string hdrlavih
->>>>&36 lelong x \b, %u x
->>>>&40 lelong x %u,
->>>>&4 lelong >1000000 <1 fps,
->>>>&4 lelong 1000000 1.00 fps,
->>>>&4 lelong 500000 2.00 fps,
->>>>&4 lelong 333333 3.00 fps,
->>>>&4 lelong 250000 4.00 fps,
->>>>&4 lelong 200000 5.00 fps,
->>>>&4 lelong 166667 6.00 fps,
->>>>&4 lelong 142857 7.00 fps,
->>>>&4 lelong 125000 8.00 fps,
->>>>&4 lelong 111111 9.00 fps,
->>>>&4 lelong 100000 10.00 fps,
-# ]9.9,10.1[
->>>>&4 lelong <101010
->>>>>&-4 lelong >99010
->>>>>>&-4 lelong !100000 ~10 fps,
->>>>&4 lelong 83333 12.00 fps,
-# ]11.9,12.1[
->>>>&4 lelong <84034
->>>>>&-4 lelong >82645
->>>>>>&-4 lelong !83333 ~12 fps,
->>>>&4 lelong 66667 15.00 fps,
-# ]14.9,15.1[
->>>>&4 lelong <67114
->>>>>&-4 lelong >66225
->>>>>>&-4 lelong !66667 ~15 fps,
->>>>&4 lelong 50000 20.00 fps,
->>>>&4 lelong 41708 23.98 fps,
->>>>&4 lelong 41667 24.00 fps,
-# ]23.9,24.1[
->>>>&4 lelong <41841
->>>>>&-4 lelong >41494
->>>>>>&-4 lelong !41708
->>>>>>>&-4 lelong !41667 ~24 fps,
->>>>&4 lelong 40000 25.00 fps,
-# ]24.9,25.1[
->>>>&4 lelong <40161
->>>>>&-4 lelong >39841
->>>>>>&-4 lelong !40000 ~25 fps,
->>>>&4 lelong 33367 29.97 fps,
->>>>&4 lelong 33333 30.00 fps,
-# ]29.9,30.1[
->>>>&4 lelong <33445
->>>>>&-4 lelong >33223
->>>>>>&-4 lelong !33367
->>>>>>>&-4 lelong !33333 ~30 fps,
->>>>&4 lelong <32224 >30 fps,
-##>>>>&4 lelong x (%lu)
-##>>>>&20 lelong x %lu frames,
-# Note: The tests below assume that the AVI has 1 or 2 streams,
-# "vids" optionally followed by "auds".
-# (Should cover 99.9% of all AVIs.)
-# assuming avih length = 56
->>>88 string LIST
->>>>96 string strlstrh
->>>>>108 string vids video:
->>>>>>&0 lelong 0 uncompressed
-# skip past vids strh
->>>>>>(104.l+108) string strf
->>>>>>>(104.l+132) lelong 1 RLE 8bpp
->>>>>>>(104.l+132) string/c anim Intel RDX
->>>>>>>(104.l+132) string/c aur2 AuraVision Aura 2
->>>>>>>(104.l+132) string/c aura AuraVision Aura
->>>>>>>(104.l+132) string/c bt20 Brooktree MediaStream
->>>>>>>(104.l+132) string/c btcv Brooktree Composite Video
->>>>>>>(104.l+132) string/c cc12 Intel YUV12
->>>>>>>(104.l+132) string/c cdvc Canopus DV
->>>>>>>(104.l+132) string/c cham Winnov Caviara Cham
->>>>>>>(104.l+132) string/c cljr Proprietary YUV 4 pixels
->>>>>>>(104.l+132) string/c cmyk Common Data Format in Printing
->>>>>>>(104.l+132) string/c cpla Weitek 4:2:0 YUV Planar
->>>>>>>(104.l+132) string/c cvid Cinepak
->>>>>>>(104.l+132) string/c cwlt Microsoft Color WLT DIB
->>>>>>>(104.l+132) string/c cyuv Creative Labs YUV
->>>>>>>(104.l+132) string/c d261 H.261
->>>>>>>(104.l+132) string/c d263 H.263
->>>>>>>(104.l+132) string/c duck TrueMotion 1.0
->>>>>>>(104.l+132) string/c dve2 DVE-2 Videoconferencing
->>>>>>>(104.l+132) string/c fljp Field Encoded Motion JPEG
->>>>>>>(104.l+132) string/c fvf1 Fractal Video Frame
->>>>>>>(104.l+132) string/c gwlt Microsoft Greyscale WLT DIB
->>>>>>>(104.l+132) string/c h260 H.260
->>>>>>>(104.l+132) string/c h261 H.261
->>>>>>>(104.l+132) string/c h262 H.262
->>>>>>>(104.l+132) string/c h263 H.263
->>>>>>>(104.l+132) string/c h264 H.264
->>>>>>>(104.l+132) string/c h265 H.265
->>>>>>>(104.l+132) string/c h266 H.266
->>>>>>>(104.l+132) string/c h267 H.267
->>>>>>>(104.l+132) string/c h268 H.268
->>>>>>>(104.l+132) string/c h269 H.269
->>>>>>>(104.l+132) string/c i263 Intel I.263
->>>>>>>(104.l+132) string/c i420 Intel Indeo 4
->>>>>>>(104.l+132) string/c ian Intel RDX
->>>>>>>(104.l+132) string/c iclb CellB Videoconferencing Codec
->>>>>>>(104.l+132) string/c ilvc Intel Layered Video
->>>>>>>(104.l+132) string/c ilvr ITU-T H.263+
->>>>>>>(104.l+132) string/c iraw Intel YUV Uncompressed
->>>>>>>(104.l+132) string/c iv30 Intel Indeo 3
->>>>>>>(104.l+132) string/c iv31 Intel Indeo 3.1
->>>>>>>(104.l+132) string/c iv32 Intel Indeo 3.2
->>>>>>>(104.l+132) string/c iv33 Intel Indeo 3.3
->>>>>>>(104.l+132) string/c iv34 Intel Indeo 3.4
->>>>>>>(104.l+132) string/c iv35 Intel Indeo 3.5
->>>>>>>(104.l+132) string/c iv36 Intel Indeo 3.6
->>>>>>>(104.l+132) string/c iv37 Intel Indeo 3.7
->>>>>>>(104.l+132) string/c iv38 Intel Indeo 3.8
->>>>>>>(104.l+132) string/c iv39 Intel Indeo 3.9
->>>>>>>(104.l+132) string/c iv40 Intel Indeo 4.0
->>>>>>>(104.l+132) string/c iv41 Intel Indeo 4.1
->>>>>>>(104.l+132) string/c iv42 Intel Indeo 4.2
->>>>>>>(104.l+132) string/c iv43 Intel Indeo 4.3
->>>>>>>(104.l+132) string/c iv44 Intel Indeo 4.4
->>>>>>>(104.l+132) string/c iv45 Intel Indeo 4.5
->>>>>>>(104.l+132) string/c iv46 Intel Indeo 4.6
->>>>>>>(104.l+132) string/c iv47 Intel Indeo 4.7
->>>>>>>(104.l+132) string/c iv48 Intel Indeo 4.8
->>>>>>>(104.l+132) string/c iv49 Intel Indeo 4.9
->>>>>>>(104.l+132) string/c iv50 Intel Indeo 5.0
->>>>>>>(104.l+132) string/c mpeg MPEG 1 Video Frame
->>>>>>>(104.l+132) string/c mjpg Motion JPEG
->>>>>>>(104.l+132) string/c mp42 Microsoft MPEG-4 v2
->>>>>>>(104.l+132) string/c mp43 Microsoft MPEG-4 v3
->>>>>>>(104.l+132) string/c mrca MR Codec
->>>>>>>(104.l+132) string/c mrle Run Length Encoding
->>>>>>>(104.l+132) string/c msvc Microsoft Video 1
->>>>>>>(104.l+132) string/c phmo Photomotion
->>>>>>>(104.l+132) string/c qpeq QPEG 1.1 Format Video
->>>>>>>(104.l+132) string/c rgbt RGBT
->>>>>>>(104.l+132) string/c rle4 Run Length Encoded 4
->>>>>>>(104.l+132) string/c rle8 Run Length Encoded 8
->>>>>>>(104.l+132) string/c rt21 Intel Indeo 2.1
->>>>>>>(104.l+132) string/c rvx Intel RDX
->>>>>>>(104.l+132) string/c sdcc Sun Digital Camera Codec
->>>>>>>(104.l+132) string/c sfmc Crystal Net SFM Codec
->>>>>>>(104.l+132) string/c smsc SMSC
->>>>>>>(104.l+132) string/c smsd SMSD
->>>>>>>(104.l+132) string/c splc Splash Studios ACM Audio Codec
->>>>>>>(104.l+132) string/c sqz2 Microsoft VXtreme Video Codec
->>>>>>>(104.l+132) string/c sv10 Sorenson Video R1
->>>>>>>(104.l+132) string/c tlms TeraLogic Motion Intraframe Codec A
->>>>>>>(104.l+132) string/c tlst TeraLogic Motion Intraframe Codec B
->>>>>>>(104.l+132) string/c tm20 TrueMotion 2.0
->>>>>>>(104.l+132) string/c tmic TeraLogic Motion Intraframe Codec 2
->>>>>>>(104.l+132) string/c tmot TrueMotion Video Compression
->>>>>>>(104.l+132) string/c tr20 TrueMotion RT 2.0
->>>>>>>(104.l+132) string/c ulti Ultimotion
->>>>>>>(104.l+132) string/c uyvy UYVY 4:2:2 byte ordering
->>>>>>>(104.l+132) string/c v422 24-bit YUV 4:2:2 format
->>>>>>>(104.l+132) string/c v655 16-bit YUV 4:2:2 format
->>>>>>>(104.l+132) string/c vcr1 ATI VCR 1.0
->>>>>>>(104.l+132) string/c vcr2 ATI VCR 2.0
->>>>>>>(104.l+132) string/c vcr3 ATI VCR 3.0
->>>>>>>(104.l+132) string/c vcr4 ATI VCR 4.0
->>>>>>>(104.l+132) string/c vcr5 ATI VCR 5.0
->>>>>>>(104.l+132) string/c vcr6 ATI VCR 6.0
->>>>>>>(104.l+132) string/c vcr7 ATI VCR 7.0
->>>>>>>(104.l+132) string/c vcr8 ATI VCR 8.0
->>>>>>>(104.l+132) string/c vcr9 ATI VCR 9.0
->>>>>>>(104.l+132) string/c vdct Video Maker Pro DIB
->>>>>>>(104.l+132) string/c vids YUV 4:2:2 CCIR 601 for V422
->>>>>>>(104.l+132) string/c vivo Vivo H.263
->>>>>>>(104.l+132) string/c vixl VIXL
->>>>>>>(104.l+132) string/c vlv1 VLCAP.DRV
->>>>>>>(104.l+132) string/c wbvc W9960
->>>>>>>(104.l+132) string/c x263 mmioFOURCC('X','2','6','3')
->>>>>>>(104.l+132) string/c xlv0 XL Video Decoder
->>>>>>>(104.l+132) string/c y211 YUV 2:1:1 Packed
->>>>>>>(104.l+132) string/c y411 YUV 4:1:1 Packed
->>>>>>>(104.l+132) string/c y41b YUV 4:1:1 Planar
->>>>>>>(104.l+132) string/c y41p PC1 4:1:1
->>>>>>>(104.l+132) string/c y41t PC1 4:1:1 with transparency
->>>>>>>(104.l+132) string/c y42b YUV 4:2:2 Planar
->>>>>>>(104.l+132) string/c y42t PC1 4:2:2 with transparency
->>>>>>>(104.l+132) string/c yc12 Intel YUV12 Codec
->>>>>>>(104.l+132) string/c yuv8 Winnov Caviar YUV8
->>>>>>>(104.l+132) string/c yuv9 YUV9
->>>>>>>(104.l+132) string/c yuy2 YUY2 4:2:2 byte ordering packed
->>>>>>>(104.l+132) string/c yuyv BI_YUYV, Canopus
->>>>>>>(104.l+132) string/c fmp4 FFMpeg MPEG-4
->>>>>>>(104.l+132) string/c div3 DivX 3
->>>>>>>>112 string/c div3 Low-Motion
->>>>>>>>112 string/c div4 Fast-Motion
->>>>>>>(104.l+132) string/c divx DivX 4
->>>>>>>(104.l+132) string/c dx50 DivX 5
->>>>>>>(104.l+132) string/c xvid XviD
->>>>>>>(104.l+132) string/c h264 H.264
->>>>>>>(104.l+132) string/c wmv3 Windows Media Video 9
->>>>>>>(104.l+132) string/c h264 X.264 or H.264
->>>>>>>(104.l+132) lelong 0
-##>>>>>>>(104.l+132) string x (%.4s)
-# skip past first (video) LIST
->>>>(92.l+96) string LIST
->>>>>(92.l+104) string strlstrh
->>>>>>(92.l+116) string auds \b, audio:
-# auds strh length = 56:
->>>>>>>(92.l+172) string strf
->>>>>>>>(92.l+180) leshort 0x0001 uncompressed PCM
->>>>>>>>(92.l+180) leshort 0x0002 ADPCM
->>>>>>>>(92.l+180) leshort 0x0006 aLaw
->>>>>>>>(92.l+180) leshort 0x0007 uLaw
->>>>>>>>(92.l+180) leshort 0x0050 MPEG-1 Layer 1 or 2
->>>>>>>>(92.l+180) leshort 0x0055 MPEG-1 Layer 3
->>>>>>>>(92.l+180) leshort 0x2000 Dolby AC3
->>>>>>>>(92.l+180) leshort 0x0161 DivX
-##>>>>>>>>(92.l+180) leshort x (%#.4x)
->>>>>>>>(92.l+182) leshort 1 (mono,
->>>>>>>>(92.l+182) leshort 2 (stereo,
->>>>>>>>(92.l+182) leshort >2 (%d channels,
->>>>>>>>(92.l+184) lelong x %d Hz)
-# auds strh length = 64:
->>>>>>>(92.l+180) string strf
->>>>>>>>(92.l+188) leshort 0x0001 uncompressed PCM
->>>>>>>>(92.l+188) leshort 0x0002 ADPCM
->>>>>>>>(92.l+188) leshort 0x0055 MPEG-1 Layer 3
->>>>>>>>(92.l+188) leshort 0x2000 Dolby AC3
->>>>>>>>(92.l+188) leshort 0x0161 DivX
-##>>>>>>>>(92.l+188) leshort x (%#.4x)
->>>>>>>>(92.l+190) leshort 1 (mono,
->>>>>>>>(92.l+190) leshort 2 (stereo,
->>>>>>>>(92.l+190) leshort >2 (%d channels,
->>>>>>>>(92.l+192) lelong x %d Hz)
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/VDR_(VirtualDub)
-# Reference: http://sourceforge.net/projects/virtualdub/files/virtualdub-win/
-# 1.10.4.35491/VirtualDub-1.10.4-src.7z/src/vdremote/Main.cpp
-# VirtualDub link handler
->8 string VDRM \b, VirtualDub link
-!:mime video/x-vdr
-!:ext vdr
->>12 string PATH \b, PATH
-# remote-path to video file
->>16 pstring/l x %s
-# Animated Cursor format
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Windows_Animated_Cursor
-# Reference: https://www.gdgsoft.com/anituner/help/aniformat.htm
->8 string ACON \b, animated cursor
-!:mime application/x-navi-animation
-# http://extension.nirsoft.net/ani
-#!:mime image/ani
-!:ext ani
-# INAM tag followed by length of title
->>24 string INAM
->>>28 pstring/l x "%s"
-# IART tag followed by length of author
->>>(28.l+32) ubelong 0x49415254
->>>>&0 pstring/l x %s
-# SoundFont 2 <mpruett@sgi.com>
-# URL: http://fileformats.archiveteam.org/wiki/SoundFont_2.0
->8 string sfbk \b, SoundFont/Bank
-!:mime audio/x-sfbk
-!:ext sf2
-# MPEG-1 wrapped in a RIFF, apparently
-# URL: http://file.fyicenter.com/17_Video_.DAT_File_Extension_for_VCD_Files.html
->8 string CDXA \b, wrapped MPEG-1 (CDXA)
-!:mime video/x-cdxa
-!:ext mpg/dat
-# URL: http://fileformats.archiveteam.org/wiki/4X_IMA_ADPCM
->8 string 4XMV \b, 4X Movie file
-!:mime video/x-4xmv
-!:ext 4xm/4xa
-# AMV-type AVI file: https://wiki.multimedia.cx/index.php?title=AMV
->8 string AMV\040 \b, AMV
-# http://fileformats.archiveteam.org/wiki/MTV_Video_(.AMV)
-!:mime video/x-amv
-!:ext amv
-#!:ext amv/mtv
-# URL: http://fileformats.archiveteam.org/wiki/WebP
->8 string WEBP \b, Web/P image
-!:mime image/webp
-!:ext webp
-!:strength + 50
->>12 use riff-walk
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/RIFF_MIDS
->8 string MIDS \b, MIDI Stream
-!:mime audio/x-mids
-!:ext mds
-# From: Joerg Jenderek
-# URL: http://mark0.net/soft-trid-e.html
-# Reference: http://fileformats.archiveteam.org/wiki/Trd_(TRID)
->8 string TRID \b, TrID defs package
-!:mime application/x-trid-trd
-!:ext trd
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/CorelDRAW
-# Reference: http://fileformats.archiveteam.org/wiki/CorelDRAW
-# Note: Since version 3 CorelDraw Pictures are RIFF based
-# but data chunks remain proprietary.
-# Since version 14 til 15 packed as "content/riffData.cdr" and
-# since 16 "content/root.dat" in ZIP container
-# TODO: distinguish templates with version 12.5 from Designer illustration 12
-# display information of RIFF based Corel Draw pictures, templates and patterns
-0 name corel-draw
-# display second chunk for debugging
-#>8 string x \b, [8]=%.8s
->0 string x \b, Corel Draw
-#!:mime image/x-coreldraw
-!:mime application/vnd.corel-draw
-# used by newer pictures templates
->>8 string CDT
-# used by templates with newer versions since 16
->>>12 string =fver Picture template (root.dat)
-!:ext dat
-# used by templates with older versions with vrsn tag
->>>12 string !fver
-# used by templates with older versions 14-15
->>>>11 string >E Picture template (riffData.cdr)
-!:ext cdr
-# used by templates with older versions 11-13
->>>>11 string <F Picture template
-!:ext cdt/cdrt
-# used by older templates with version 4.4
->>8 string CDST Picture template
-!:ext cdt
-# used by templates with version 12.5
->>8 string DESC Picture template
-!:ext cdt
-# used by newer patterns with version 22
->>8 string PAT Pattern
-!:ext dat
-# remaining older templates, patterns, drawings
->>8 default x
-# pattern with old version 4.y
->>>26 ulelong =0x0000206C Pattern
-!:ext pat
-# pattern with newer versions
->>>26 ulelong =0x00000D2C Pattern
-!:ext pat
-# remaining older templates or pictures
->>>26 default x
-# used by older versions 5 - 15
->>>>12 string =vrsn
-# 4th chunk size unequal 282Ch only found for CDR
->>>>>26 ulelong !0x0000282c Picture
-!:ext cdr
->>>>>26 default x Picture or template
-!:ext cdr/cdt
-# used by newer versions since 16
->>>>12 string =fver Picture (root.dat)
-!:ext dat
-# version marked by 1 ASCII char: space~3, ... , F~15, ... , N~22, ... R~22 template
->11 string x \b, version
->11 string >\040 '%-.1s'
->0 use corel-version
->4 ulelong+8 x \b, %u bytes
-#
-# display numeric version of RIFF based Corel after 3rd RIFF tag
-0 name corel-version
-# for debugging purpose; vrsn for short content; fver for 16 byte size
-#>12 string x \b, TAG "%-4.4s"
-# 1st data chunk length 2 implies short content version
->16 ulelong 2
-# vrsn chunk short content interpreted by MajorVersion * 100 + MinorVersion
->>20 uleshort/100 x %u
->>20 uleshort%100 >0 \b.%u
-# for debugging purpose display next chunk like: DISP LIST
-#>>22 string x \b, 4th "%-4.4s"
-#>>26 ulelong x \b, 4th SIZE %#x
-# for debugging purpose display 5th chunk like: LIST DISP ccmm osfp
-#>>(26.l+30) string x \b, 5th "%-4.4s"
-# 1st data chunk length 10h implies 16 byte content with version info
->16 ulelong 0x10
->>34 ubyte x %u
->>>33 ubyte >0 \b.%u
-# display information of RIFF based Corel Design formats
-0 name corel-des
-# display second chunk for debugging
-#>8 string x \b, [8]=%.8s
->12 string x \b, Corel DESIGNER
-!:mime image/x-corel-des
-#!:mime application/x-vnd.corel.designer.document
-# used by Corel Designer with newer versions since 16
->12 string =fver graphics (root.dat)
-!:ext dat
-# used by Corel Designer templates with older versions with vrsn tag
->12 string !fver
-# used by Corel Designer with versions 14-15
->>11 string >D graphics (riffData.cdr)
-!:ext cdr
-# used by Corel Designer with versions 10-12
->>11 string <E graphics
-!:ext des
-# version indicated by last ASCII char of second chunk tag
->11 string x \b, version '%-.1s'
-# but vrsn short content is not always version indicator
-# exceptions: 'A'~11.4 'B'~12 'C'~12.5
->11 string >D
->>0 use corel-version
-# for debugging purpose display next chunk like: DISP LIST
-#>>22 string x \b, 4th "%-4.4s"
-#>>26 ulelong x \b, 4th SIZE %#x
-# for debugging purpose display 5th chunk like: LIST osfp
-#>>(26.l+30) string x \b, 5th "%-4.4s"
->4 ulelong+8 x \b, %u bytes
-
-#
-# XXX - some of the below may only appear in little-endian form.
-#
-# Also "MV93" appears to be for one form of Macromedia Director
-# files, and "GDMF" appears to be another multimedia format.
-#
-0 string RIFX RIFF (big-endian) data
-# RIFF Palette format
->8 string PAL \b, palette
->>16 beshort x \b, version %d
->>18 beshort x \b, %d entries
-# RIFF Device Independent Bitmap format
->8 string RDIB \b, device-independent bitmap
->>16 string BM
->>>30 beshort 12 \b, OS/2 1.x format
->>>>34 beshort x \b, %d x
->>>>36 beshort x %d
->>>30 beshort 64 \b, OS/2 2.x format
->>>>34 beshort x \b, %d x
->>>>36 beshort x %d
->>>30 beshort 40 \b, Windows 3.x format
->>>>34 belong x \b, %d x
->>>>38 belong x %d x
->>>>44 beshort x %d
-# RIFF MIDI format
->8 string RMID \b, MIDI
-# RIFF Multimedia Movie File format
->8 string RMMP \b, multimedia movie
-# Microsoft WAVE format (*.wav)
->8 string WAVE \b, WAVE audio
->>20 leshort 1 \b, Microsoft PCM
->>>34 leshort >0 \b, %d bit
->>22 beshort =1 \b, mono
->>22 beshort =2 \b, stereo
->>22 beshort >2 \b, %d channels
->>24 belong >0 %d Hz
-# Corel Draw Picture big endian not tested by real examples
-#>8 string CDRA \b, Corel Draw Picture
-#>8 string CDR6 \b, Corel Draw Picture, version 6
->8 string CDR
->>0 use \^corel-draw
-
-# AVI == Audio Video Interleave
->8 string AVI\040 \b, AVI
-# Animated Cursor format
->8 string ACON \b, animated cursor
-# Notation Interchange File Format (big-endian only)
->8 string NIFF \b, Notation Interchange File Format
-# SoundFont 2 <mpruett@sgi.com>
->8 string sfbk SoundFont/Bank
-
-#------------------------------------------------------------------------------
-# Sony Wave64
-# see http://www.vcs.de/fileadmin/user_upload/MBS/PDF/Whitepaper/Informations_about_Sony_Wave64.pdf
-# 128 bit RIFF-GUID { 66666972-912E-11CF-A5D6-28DB04C10000 } in little-endian
-0 string riff\x2E\x91\xCF\x11\xA5\xD6\x28\xDB\x04\xC1\x00\x00 Sony Wave64 RIFF data
-# 128 bit + total file size (64 bits) so 24 bytes
-# then WAVE-GUID { 65766177-ACF3-11D3-8CD1-00C04F8EDB8A }
->24 string wave\xF3\xAC\xD3\x11\x8C\xD1\x00\xC0\x4F\x8E\xDB\x8A \b, WAVE 64 audio
-!:mime audio/x-w64
-# FMT-GUID { 20746D66-ACF3-11D3-8CD1-00C04F8EDB8A }
->>40 search/256 fmt\x20\xF3\xAC\xD3\x11\x8C\xD1\x00\xC0\x4F\x8E\xDB\x8A \b
->>>&10 leshort =1 \b, mono
->>>&10 leshort =2 \b, stereo
->>>&10 leshort >2 \b, %d channels
->>>&12 lelong >0 %d Hz
-
-#------------------------------------------------------------------------------
-# MBWF/RF64
-# see EBU TECH 3306 https://tech.ebu.ch/docs/tech/tech3306-2009.pdf
-0 string RF64\xff\xff\xff\xffWAVEds64 MBWF/RF64 audio
-!:mime audio/x-wav
->40 search/256 fmt\x20 \b
->>&6 leshort =1 \b, mono
->>&6 leshort =2 \b, stereo
->>&6 leshort >2 \b, %d channels
->>&8 lelong >0 %d Hz
diff --git a/contrib/libs/libmagic/magic/Magdir/ringdove b/contrib/libs/libmagic/magic/Magdir/ringdove
deleted file mode 100644
index 38dd4bfe66..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ringdove
+++ /dev/null
@@ -1,45 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: ringdove,v 1.1 2022/08/16 12:04:30 christos Exp $
-# ringdove: file(1) magic for RingdoveEDA data files
-
-# librnd and global
-0 regex/128l ha:rnd-menu-v[0-9]+[\ \t\r\n]*[{] librnd menu system (lihata)
-0 regex/128l ha:rnd-menu-patch-v[0-9]+[\ \t\r\n]*[{] librnd menu patch (lihata)
-0 regex/128l ha:coraleda-project-v[0-9]+[\ \t\r\n]*[{] CoralEDA/Ringdove project file (lihata)
-0 regex/128l ha:ringdove-project-v[0-9]+[\ \t\r\n]*[{] Ringdove project file (lihata)
-
-# pcb-rnd
-0 regex/128l ha:pcb-rnd-board-v[0-9]+[\ \t\r\n]*[{] pcb-rnd board file (lihata)
-0 regex/128l li:pcb-rnd-subcircuit-v[0-9]+[\ \t\r\n]*[{] pcb-rnd subcircuit/footprint file (lihata)
-0 regex/128l ha:pcb-rnd-buffer-v[0-9]+[\ \t\r\n]*[{] pcb-rnd paste buffer content (lihata)
-0 regex/128l li:pcb-rnd-conf-v[0-9]+[\ \t\r\n]*[{] pcb-rnd configuration (lihata)
-0 regex/128l ha:pcb-rnd-drc-query-v[0-9]+[\ \t\r\n]*[{] pcb-rnd drc query string (lihata)
-0 regex/128l li:pcb-rnd-font-v[0-9]+[\ \t\r\n]*[{] pcb-rnd vector font (lihata)
-0 regex/128l ha:pcb-rnd-log-v[0-9]+[\ \t\r\n]*[{] pcb-rnd message log dump (lihata)
-0 regex/128l ha:pcb-rnd-padstack-v[0-9]+[\ \t\r\n]*[{] pcb-rnd padstack (lihata)
-0 regex/128l li:pcb-rnd-view-list-v[0-9]+[\ \t\r\n]*[{] pcb-rnd view list (lihata)
-0 regex/128l li:view-list-v[0-9]+[\ \t\r\n]*[{] pcb-rnd view list (lihata)
-0 search Netlist(Freeze) pcb-rnd or gEDA/PCB netlist forward annotation action script
-
-# sch-rnd (cschem data model)
-0 regex/128l li:cschem-buffer-v[0-9]+[\ \t\r\n]*[{] sch-rnd/cschem buffer content (lihata)
-0 regex/128l li:sch-rnd-conf-v[0-9]+[\ \t\r\n]*[{] sch-rnd configuration (lihata)
-0 regex/128l ha:std_devmap.v[0-9]+[\ \t\r\n]*[{] sch-rnd devmap (device mapping; lihata)
-0 regex/128l li:cschem-group-v[0-9]+[\ \t\r\n]*[{] sch-rnd/cschem group or symbol (lihata)
-0 regex/128l ha:cschem-sheet-v[0-9]+[\ \t\r\n]*[{] sch-rnd/cschem schematic sheet (lihata)
-
-# tEDAx (modular format)
-0 regex/1l tEDAx[\ \t\r\n]v tEDAx (Trivial EDA eXchange)
->0 regex begin\ symbol\ v with schematic symbol
->0 regex begin\ board\ v with Printed Circuit Board
->0 regex begin\ route_req\ v with PCB routing request
->0 regex begin\ route_res\ v with PCB routing result
->0 regex begin\ camv_layer\ v with camv-rnd exported layer
->0 regex begin\ netlist\ v with netlist
->0 regex begin\ backann\ v with Ringdove EDA back annotation
->0 regex begin\ footprint\ v with PCB footprint
->0 regex begin\ drc\ v with PCB DRC script
->0 regex begin\ drc_query_rule\ v with pcb-rnd drc_query rules
->0 regex begin\ drc_query_def\ v with pcb-rnd drc_query value/config definitions
->0 regex begin\ etest\ v with PCB electric test
-
diff --git a/contrib/libs/libmagic/magic/Magdir/rpi b/contrib/libs/libmagic/magic/Magdir/rpi
deleted file mode 100644
index 0d213b5357..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/rpi
+++ /dev/null
@@ -1,52 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: rpi,v 1.3 2022/04/02 14:39:34 christos Exp $
-# rpi: file(1) magic for Raspberry Pi images
--44 lelong 0
->4 lelong 0
->>8 lelong 1
->>12 lelong 4
->>>16 string 283x
->>>>20 lelong 1
->>>>>24 lelong 4
->>>>>>28 string DTOK
->>>>>>>32 lelong 44
->>>>>>>>36 lelong 4
->>>>>>>>>40 string RPTL Raspberry PI kernel image
-
--56 lelong 0
->4 lelong 0
->>8 lelong 1
->>12 lelong 4
->>>16 string 283x
->>>>20 lelong 1
->>>>>24 lelong 4
->>>>>>28 string DTOK
->>>>>>>32 lelong 1
->>>>>>>>36 lelong 4
->>>>>>>>>40 string DDTK8
->>>>>>>>>>48 lelong 4
->>>>>>>>>>>52 string RPTL Raspberry PI kernel image
-
-# From: Joerg Jenderek
-# URL: https://www.raspberrypi.com/documentation/computers/raspberry-pi.html
-# #raspberry-pi-4-boot-eeprom
-# Reference: https://github.com/raspberrypi/rpi-eeprom/blob/master/rpi-eeprom-config
-# Note: start with same magic as for BIOS (ia32) ROM Extension handled by ./intel
-# masked with MAGIC_MASK and then compared with MAGIC
-0 belong&0xFFffF00F 0x55aaF00F Raspberry PI EEPROM
-#!:mime application/octet-stream
-!:mime application/x-raspberry-eeprom
-# like: pieeprom-2020-09-03.bin
-!:ext bin
-# a 32 bit offset to the next section like: 000184d4 000184c8 00018534 ... 0000bb84 0000bbd4 0000bbd4
->4 ubelong x \b, offset %8.8x
-#>(4.L) ubelong x NEXT=%8.8x
-# self.length
->8 ubelong !0 \b, length %x
-# self.filename
->12 string >0 \b, "%s"
-# length is zero
->8 ubelong =0
-# if length is zero then 2nd section magic here can be zero; this means sections parsing done
->>8 ubelong !0 \b, 2nd MAGIC=%8.8x
diff --git a/contrib/libs/libmagic/magic/Magdir/rpm b/contrib/libs/libmagic/magic/Magdir/rpm
deleted file mode 100644
index 9a795f841a..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/rpm
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: rpm,v 1.12 2013/01/11 16:45:23 christos Exp $
-#
-# RPM: file(1) magic for Red Hat Packages Erik Troan (ewt@redhat.com)
-#
-0 belong 0xedabeedb RPM
-!:mime application/x-rpm
->4 byte x v%d
->5 byte x \b.%d
->6 beshort 1 src
->6 beshort 0 bin
->>8 beshort 1 i386/x86_64
->>8 beshort 2 Alpha/Sparc64
->>8 beshort 3 Sparc
->>8 beshort 4 MIPS
->>8 beshort 5 PowerPC
->>8 beshort 6 68000
->>8 beshort 7 SGI
->>8 beshort 8 RS6000
->>8 beshort 9 IA64
->>8 beshort 10 Sparc64
->>8 beshort 11 MIPSel
->>8 beshort 12 ARM
->>8 beshort 13 MiNT
->>8 beshort 14 S/390
->>8 beshort 15 S/390x
->>8 beshort 16 PowerPC64
->>8 beshort 17 SuperH
->>8 beshort 18 Xtensa
->>8 beshort 255 noarch
-
-#delta RPM Daniel Novotny (dnovotny@redhat.com)
-0 string drpm Delta RPM
-!:mime application/x-rpm
->12 string x %s
->>8 beshort 11 MIPSel
->>8 beshort 12 ARM
->>8 beshort 13 MiNT
->>8 beshort 14 S/390
->>8 beshort 15 S/390x
->>8 beshort 16 PowerPC64
->>8 beshort 17 SuperH
->>8 beshort 18 Xtensa
->>10 string x %s
diff --git a/contrib/libs/libmagic/magic/Magdir/rpmsg b/contrib/libs/libmagic/magic/Magdir/rpmsg
deleted file mode 100644
index cbbbb2bc4f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/rpmsg
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: rpmsg,v 1.1 2019/04/19 00:40:47 christos Exp $
-# rpmsg: file(1) magic for restricted-permission messages (or "rights-protected" messages)
-# see https://en.wikipedia.org/wiki/Rpmsg
-
-0 string \x76\xe8\x04\x60\xc4\x11\xe3\x86 rpmsg Restricted Permission Message
diff --git a/contrib/libs/libmagic/magic/Magdir/rst b/contrib/libs/libmagic/magic/Magdir/rst
deleted file mode 100644
index 0df15b8fa5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/rst
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: rst,v 1.4 2023/07/27 18:26:32 christos Exp $
-# rst: ReStructuredText http://docutils.sourceforge.net/rst.html
-0 search/256 \=\=
-!:strength + 30
->&0 regex/256 \^[\=]+$
->>&0 search/512 :Author: ReStructuredText file
->>&0 search/512 \012Authors: ReStructuredText file
->>&0 search/512 \012Author: ReStructuredText file
->>&0 default x
->>>&0 regex/512 \^\\.\\.[A-Za-z] ReStructuredText file
-!:ext rst
diff --git a/contrib/libs/libmagic/magic/Magdir/rtf b/contrib/libs/libmagic/magic/Magdir/rtf
deleted file mode 100644
index 48a1f28af4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/rtf
+++ /dev/null
@@ -1,94 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: rtf,v 1.9 2020/12/12 20:01:47 christos Exp $
-# rtf: file(1) magic for Rich Text Format (RTF)
-#
-# Duncan P. Simpson, D.P.Simpson@dcs.warwick.ac.uk
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Rich_Text_Format
-# Reference: http://www.snake.net/software/RTF/RTF-Spec-1.7.rtf
-# http://www.kleinlercher.at/tools/Windows_Protocols/Word2007RTFSpec9.pdf
-0 string {\\rtf
-# skip DROID fmt-355-signature-id-522.rtf by looking for valid version
->5 ubyte !0xAB
-# skip also \ in DROID fmt-50-signature-id-158.rtf by looking for valid version
->>5 ubyte !0x5C Rich Text Format data
-!:mime text/rtf
-!:apple ????RTF
-!:ext rtf
->>>0 use rtf-info
-# display information like version, language and code page of RTF
-0 name rtf-info
-# 1 mostly, 2 for newer Pocket Word documents, space for test like fdo78502.rtf, { for some urtf
->5 ubyte !0x7b \b, version %c
-# The word for character set must precede any text or most other control words
->6 string \\mac \b, Apple Macintosh
->6 string \\pc
-# control word \pca
->>9 ubyte =0x61 \b, IBM PS/2, code page 850
->>9 ubyte !0x61 \b, IBM PC, code page 437
-# unknown character set or ANSI later after control words like
-# \adeflang1025 \info \title \author \category \manager
-# "Burow, Steffanie - Im Tal des Schneeleoparden.rtf"
-#>6 search/105 \\ansi \b, ANSI
->6 search/502 \\ansi \b, ANSI
->6 default x \b, unknown character set
-# look for explicit codepage keyword
-# "Burow, Steffanie - Im Tal des Schneeleoparden.rtf"
-#>5 search/110 \\ansicpg
->5 search/500 \\ansicpg
-# skip unknown or buggy codepage string 0 like in fdo78502.rtf
->>&0 ubyte !0x30 \b, code page
-# codepage string: 437~United States IBM, ..., 1252~WesternEuropean, ..., 57011~Punjabi
->>>&-1 string x %-.3s
-# skip space or \ and display possible 4th digit of code page string
->>>&2 ubyte >0x2F
->>>>&-1 ubyte <0x3A \b%c
-# possible 5th digit of code page string
->>>>>&0 ubyte >0x2F
->>>>>>&-1 ubyte <0x3A \b%c
-# look again at version byte to use default clause
->5 ubyte x
-# Default language ID for South Asian/Middle Eastern text
-# language ID: 1025, ..., 1065~Persian, ..., 2057~English_UnitedKingdom, ..., 58380~French_NorthAfrica
-# Readme-0.72-Persian.rtf
-#>6 search/1 \\adeflang \b, default middle east language ID
->>6 search/497 \\adeflang \b, default middle east language ID
-# https://docs.microsoft.com/en-us/openspecs/office_standards/ms-oe376/6c085406-a698-4e12-9d4d-c3b0ee3dbc4a
->>>&0 string x %.4s
-# skip \ and NL and show possible 5th digit of language string
->>>&4 ubyte >0x2F
->>>>&-1 ubyte <0x3A \b%c
-# else look for default language to be used when the \plain control word is encountered
->>6 default x
-# "Burow, Steffanie - Im Tal des Schneeleoparden.rtf"
-#>>>6 search/127 \\deflang
->>>6 search/505 \\deflang
->>>>&0 string >0 \b, default language ID %-.4s
-# possible 5th digit of language string
->>>>&4 ubyte >0x2F
->>>>>&-1 ubyte <0x3A \b%c
-
-# Reference: http://latex2rtf.sourceforge.net/rtfspec_63.html
-# Note: no real world example found
-0 string {\\urtf Rich Text Format unicoded data
-!:mime text/rtf
-#!:apple ????RTF
-!:ext rtf
->1 use rtf-info
-
-# URL: https://en.wikipedia.org/wiki/Microsoft_Word
-# Reference: http://fileformats.archiveteam.org/wiki/Microsoft_Word
-# Note: called by TrID "Pocket Word document"
-# by PlanMaker "Pocket Word-Handheld PC" for pwd
-# by PlanMaker "Pocket Word-Pocket PC" for psw
-0 string {\\pwd Pocket Word document or template
-# by SoftMaker Office http://extension.nirsoft.net/pwd
-#!:mime application/msword
-# https://reposcope.com/mimetype/application/x-pocket-word
-!:mime application/x-pocket-word
-# PWD for Handheld PC variant and PSW for Pocket PC variant
-# PWT for template
-!:ext pwd/psw/pwt
->0 use rtf-info
-
diff --git a/contrib/libs/libmagic/magic/Magdir/ruby b/contrib/libs/libmagic/magic/Magdir/ruby
deleted file mode 100644
index 9e67a3e22d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ruby
+++ /dev/null
@@ -1,55 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ruby,v 1.10 2019/07/21 09:40:17 christos Exp $
-# ruby: file(1) magic for Ruby scripting language
-# URL: https://www.ruby-lang.org/
-# From: Reuben Thomas <rrt@sc3d.org>
-
-# Ruby scripts
-0 search/1/w #!\ /usr/bin/ruby Ruby script text executable
-!:strength + 15
-!:mime text/x-ruby
-0 search/1/w #!\ /usr/local/bin/ruby Ruby script text executable
-!:strength + 15
-!:mime text/x-ruby
-0 search/1 #!/usr/bin/env\ ruby Ruby script text executable
-!:strength + 15
-!:mime text/x-ruby
-0 search/1 #!\ /usr/bin/env\ ruby Ruby script text executable
-!:strength + 15
-!:mime text/x-ruby
-
-# What looks like ruby, but does not have a shebang
-# (modules and such)
-# From: Lubomir Rintel <lkundrak@v3.sk>
-0 search/8192 require
->0 regex \^[[:space:]]*require[[:space:]]'[A-Za-z_/.]+'
->>0 regex def\ [a-z]|\ do$
->>>&0 regex \^[[:space:]]*end([[:space:]]+[;#].*)?$ Ruby script text
-!:strength + 30
-!:mime text/x-ruby
-0 regex \^[[:space:]]*(class|module)[[:space:]][A-Z]
->0 regex (modul|includ)e\ [A-Z]|def\ [a-z]
->>&0 regex \^[[:space:]]*end([[:space:]]+[;#].*)?$ Ruby script text
-!:strength + 30
-!:mime text/x-ruby
-# Classes with no modules or defs, beats simple ASCII
-0 regex \^[[:space:]]*(class|module)[[:space:]][A-Z]
->&0 regex \^[[:space:]]*end([[:space:]]+[;#if].*)?$ Ruby script text
-!:strength + 10
-!:mime text/x-ruby
-# Looks for function definition to balance python magic
-# def name (args)
-# end
-0 search/8192 def\
->0 regex \^[[:space:]]*def\ [a-z]|def\ [[:alpha:]]+::[a-z]
->>&0 regex \^[[:space:]]*end([[:space:]]+[;#].*)?$ Ruby script text
-!:strength + 10
-!:mime text/x-ruby
-
-0 search/8192 require
->0 regex \^[[:space:]]*require[[:space:]]'[A-Za-z_/.]+' Ruby script text
-!:mime text/x-ruby
-0 search/8192 include
->0 regex \^[[:space:]]*include\ ([A-Z]+[a-z]*(::))+ Ruby script text
-!:mime text/x-ruby
diff --git a/contrib/libs/libmagic/magic/Magdir/rust b/contrib/libs/libmagic/magic/Magdir/rust
deleted file mode 100644
index b1bbd9d970..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/rust
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: rust,v 1.2 2022/11/18 15:58:15 christos Exp $
-# Magic for Rust and related languages programs
-#
-
-# Rust compiler metadata
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/rust-lang/rust/blob/1.64.0/compiler/rustc_metadata/src/rmeta/mod.rs
-0 string rust\x00\x00\x00
->12 string \014rustc\x20 Rust compiler metadata
-!:ext rmeta
->>7 byte x \b, version %d
-
-# Rust incremental compilation metadata
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/rust-lang/rust/blob/1.64.0/compiler/rustc_incremental/src/persist/file_format.rs
-0 string RSIC
->4 uleshort =0 Rust incremental compilation metadata
-!:ext bin
->>6 pstring x \b, rustc %s
diff --git a/contrib/libs/libmagic/magic/Magdir/sc b/contrib/libs/libmagic/magic/Magdir/sc
deleted file mode 100644
index dc6d6c83d7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sc
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sc,v 1.6 2009/09/19 16:28:12 christos Exp $
-# sc: file(1) magic for "sc" spreadsheet
-#
-38 string Spreadsheet sc spreadsheet file
-!:mime application/x-sc
diff --git a/contrib/libs/libmagic/magic/Magdir/sccs b/contrib/libs/libmagic/magic/Magdir/sccs
deleted file mode 100644
index 04e7929921..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sccs
+++ /dev/null
@@ -1,24 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sccs,v 1.8 2020/06/20 21:32:52 christos Exp $
-# sccs: file(1) magic for SCCS archives
-#
-# SCCS v4 archive structure:
-# \001h01207
-# \001s 00276/00000/00000
-# \001d D 1.1 87/09/23 08:09:20 ian 1 0
-# \001c date and time created 87/09/23 08:09:20 by ian
-# \001e
-# \001u
-# \001U
-# ... etc.
-# Now '\001h' happens to be the same as the 3B20's a.out magic number (0550).
-# *Sigh*. And these both came from various parts of the USG.
-# Maybe we should just switch everybody from SCCS to RCS!
-# Further, you can't just say '\001h0', because the five-digit number
-# is a checksum that could (presumably) have any leading digit,
-# Fortunately we have regular expression matching:
-0 string \001h
->2 regex [0-9][0-9][0-9][0-9][0-9]$
->>8 string \001s\040 SCCS v4 archive data
->2 string V6,sum= SCCS v6 archive data
diff --git a/contrib/libs/libmagic/magic/Magdir/scientific b/contrib/libs/libmagic/magic/Magdir/scientific
deleted file mode 100644
index d52d6aeb01..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/scientific
+++ /dev/null
@@ -1,144 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: scientific,v 1.14 2023/04/29 17:28:09 christos Exp $
-# scientific: file(1) magic for scientific formats
-#
-# From: Joe Krahn <krahn@niehs.nih.gov>
-
-########################################################
-# CCP4 data and plot files:
-0 string MTZ\040 MTZ reflection file
-
-92 string PLOT%%84 Plot84 plotting file
->52 byte 1 , Little-endian
->55 byte 1 , Big-endian
-
-########################################################
-# Electron density MAP/MASK formats
-
-0 string EZD_MAP NEWEZD Electron Density Map
-109 string MAP\040( Old EZD Electron Density Map
-
-0 string/c :-)\040Origin BRIX Electron Density Map
->170 string >0 , Sigma:%.12s
-#>4 string >0 %.178s
-#>4 addr x %.178s
-
-7 string 18\040!NTITLE XPLOR ASCII Electron Density Map
-9 string \040!NTITLE\012\040REMARK CNS ASCII electron density map
-
-208 string MAP\040 CCP4 Electron Density Map
-# Assumes same stamp for float and double (normal case)
->212 byte 17 \b, Big-endian
->212 byte 34 \b, VAX format
->212 byte 68 \b, Little-endian
->212 byte 85 \b, Convex native
-
-############################################################
-# X-Ray Area Detector images
-0 string R-AXIS4\ \ \ R-Axis Area Detector Image:
->796 lelong <20 Little-endian, IP #%d,
->>768 lelong >0 Size=%dx
->>772 lelong >0 \b%d
->796 belong <20 Big-endian, IP #%d,
->>768 belong >0 Size=%dx
->>772 belong >0 \b%d
-
-0 string RAXIS\ \ \ \ \ R-Axis Area Detector Image, Win32:
->796 lelong <20 Little-endian, IP #%d,
->>768 lelong >0 Size=%dx
->>772 lelong >0 \b%d
->796 belong <20 Big-endian, IP #%d,
->>768 belong >0 Size=%dx
->>772 belong >0 \b%d
-
-
-1028 string MMX\000\000\000\000\000\000\000\000\000\000\000\000\000 MAR Area Detector Image,
->1072 ulong >1 Compressed(%d),
->1100 ulong >1 %d headers,
->1104 ulong >0 %d x
->1108 ulong >0 %d,
->1120 ulong >0 %d bits/pixel
-
-# Type: GEDCOM genealogical (family history) data
-# From: Giuseppe Bilotta
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/GEDCOM
-# https://en.wikipedia.org/wiki/GEDCOM
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/g/
-# ged.trid.xml ged-utf8.trid.xml ged-utf16.trid.xml
-# Note: called "GEDCOM Family History" by TrID and "Genealogical Data Communication (GEDCOM) Format" by DROID via PUID fmt/851
-0 search/1/c 0\ HEAD GEDCOM genealogy text
-#!:mime text/plain
-#!:mime application/x-gedcom
-# https://www.iana.org/assignments/media-types/text/vnd.familysearch.gedcom
-!:mime text/vnd.familysearch.gedcom
-!:ext ged
-# no gedcom sample found and ged suffix also used for other formats
-#!:ext ged/gedcom
->&0 search 1\ GEDC
->>&0 search 2\ VERS version
-# 4 5.0 5.3 5.4 5.5 5.5.1 5.5.5 5.6 7.0 or no version
->>>&1 string >\0 %s
-# From: Phil Endecott <phil05@chezphil.org>
-# 0\040HEAD as UTF-16 big endian without BOM
-0 string \000\060\000\040\000\110\000\105\000\101\000\104 GEDCOM genealogy text
-!:mime text/vnd.familysearch.gedcom
-!:ext ged
-# look for VERS tag encoded as UTF-16 big endian
->12 search/0x65 V\0E\0R\0S version
-# version like: 5.5.1
->>&2 bestring16 x %s
->>0 string x \b, UTF-16 (without BOM) big-endian text
-# 0\040HEAD as UTF-16 little endian without BOM
-0 string \060\000\040\000\110\000\105\000\101\000\104\000 GEDCOM genealogy text
-!:mime text/vnd.familysearch.gedcom
-!:ext ged
-# look for VERS tag encoded as UTF-16 lttle endian
->12 search/0x65 V\0E\0R\0S version
-# version like: 5.5.1
->>&3 lestring16 x %s
->>2 string x \b, UTF-16 (without BOM) little-endian text
-# Note: UTF-16 with BOM variants already described above by first test as "GEDCOM genealogy text"
-# 0\040HEAD as UTF-16 big endian with BOM
-#0 string \376\377\000\060\000\040\000\110\000\105\000\101\000\104 GEDCOM data
-# 0\040HEAD as UTF-16 little endian with BOM
-#0 string \377\376\060\000\040\000\110\000\105\000\101\000\104\000 GEDCOM data
-
-# PDB: Protein Data Bank files
-# Adam Buchbinder <adam.buchbinder@gmail.com>
-#
-# https://www.wwpdb.org/documentation/format32/sect2.html
-# https://www.ch.ic.ac.uk/chemime/
-#
-# The PDB file format is fixed-field, 80 columns. From the spec:
-#
-# COLS DATA
-# 1 - 6 "HEADER"
-# 11 - 50 String(40)
-# 51 - 59 Date
-# 63 - 66 IDcode
-#
-# Thus, positions 7-10, 60-62 and 67-80 are spaces. The Date must be in the
-# format DD-MMM-YY, e.g., 01-JAN-70, and the IDcode consists of numbers and
-# uppercase letters. However, examples have been seen without the date string,
-# e.g., the example on the chemime site.
-0 string HEADER\ \ \ \040
->&0 regex/1l \^.{40}
->>&0 regex/1l [0-9]{2}-[A-Z]{3}-[0-9]{2}\ {3}
->>>&0 regex/1ls [A-Z0-9]{4}.{14}$
->>>>&0 regex/1l [A-Z0-9]{4} Protein Data Bank data, ID Code %s
-!:mime chemical/x-pdb
->>>>0 regex/1l [0-9]{2}-[A-Z]{3}-[0-9]{2} \b, %s
-
-# Type: GDSII Stream file
-0 belong 0x00060002 GDSII Stream file
->4 byte 0x00
->>5 byte x version %d.0
->4 byte >0x00 version %d
->>5 byte x \b.%d
-
-# Type: LXT (interLaced eXtensible Trace)
-# chrysn <chrysn@fsfe.org>
-0 beshort 0x0138 interLaced eXtensible Trace (LXT) file
->2 beshort >0 (Version %u)
diff --git a/contrib/libs/libmagic/magic/Magdir/securitycerts b/contrib/libs/libmagic/magic/Magdir/securitycerts
deleted file mode 100644
index 8785dd883f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/securitycerts
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: securitycerts,v 1.4 2009/09/19 16:28:12 christos Exp $
-0 search/1 -----BEGIN\ CERTIFICATE------ RFC1421 Security Certificate text
-0 search/1 -----BEGIN\ NEW\ CERTIFICATE RFC1421 Security Certificate Signing Request text
-0 belong 0xedfeedfe Sun 'jks' Java Keystore File data
diff --git a/contrib/libs/libmagic/magic/Magdir/selinux b/contrib/libs/libmagic/magic/Magdir/selinux
deleted file mode 100644
index 89d5f53629..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/selinux
+++ /dev/null
@@ -1,24 +0,0 @@
-# Type: SE Linux policy modules *.pp reference policy
-# for Fedora 5 to 9, RHEL5, and Debian Etch and Lenny.
-# URL: https://doc.coker.com.au/computers/selinux-magic
-# From: Russell Coker <russell@coker.com.au>
-
-0 lelong 0xf97cff8f SE Linux modular policy
->4 lelong x version %d,
->8 lelong x %d sections,
->>(12.l) lelong 0xf97cff8d
->>>(12.l+27) lelong x mod version %d,
->>>(12.l+31) lelong 0 Not MLS,
->>>(12.l+31) lelong 1 MLS,
->>>(12.l+23) lelong 2
->>>>(12.l+47) string >\0 module name %s
->>>(12.l+23) lelong 1 base
-
-1 string policy_module( SE Linux policy module source
-2 string policy_module( SE Linux policy module source
-
-0 string ##\ <summary> SE Linux policy interface source
-
-#0 search gen_context( SE Linux policy file contexts
-
-#0 search gen_sens( SE Linux policy MLS constraints source
diff --git a/contrib/libs/libmagic/magic/Magdir/sendmail b/contrib/libs/libmagic/magic/Magdir/sendmail
deleted file mode 100644
index 6808dbfd33..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sendmail
+++ /dev/null
@@ -1,37 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sendmail,v 1.12 2022/10/31 13:22:26 christos Exp $
-# sendmail: file(1) magic for sendmail config files
-#
-# XXX - byte order?
-#
-# Update: Joerg Jenderek
-# GRR: this test is too general as it catches also
-# READ.ME.FIRST.AWP Sendmail frozen configuration
-# - version ====|====|====|====|====|====|====|====|====|====|====|====|===
-# Email_23_f217153422.ts Sendmail frozen configuration
-# - version \330jK\354
-0 byte 046
-# https://www.sendmail.com/sm/open_source/docs/older_release_notes/
-# freezed configuration file (dbm format?) created from sendmail.cf with -bz
-# by older sendmail. til version 8.6 support for frozen configuration files is removed
-# valid version numbers look like "7.14.4" and should be similar to output of commands
-# "sendmail -d0 -bt < /dev/null |grep -i Version" or "egrep '^DZ' /etc/sendmail.cf"
->16 regex/s =^[0-78][0-9.]{4} Sendmail frozen configuration
-# normally only /etc/sendmail.fc or /var/adm/sendmail/sendmail.fc
-!:ext fc
->>16 string >\0 - version %s
-0 short 0x271c
-# look for valid version number
->16 regex/s =^[0-78][0-9.]{4} Sendmail frozen configuration
-!:ext fc
->>16 string >\0 - version %s
-
-#------------------------------------------------------------------------------
-# sendmail: file(1) magic for sendmail m4(1) files
-#
-# From Hendrik Scholz <hendrik@scholz.net>
-# i.e. files in /usr/share/sendmail/cf/
-#
-0 string divert(-1)\n sendmail m4 text file
-
diff --git a/contrib/libs/libmagic/magic/Magdir/sequent b/contrib/libs/libmagic/magic/Magdir/sequent
deleted file mode 100644
index da38de65af..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sequent
+++ /dev/null
@@ -1,42 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sequent,v 1.14 2019/04/19 00:42:27 christos Exp $
-# sequent: file(1) magic for Sequent machines
-#
-# Sequent information updated by Don Dwiggins <atsun!dwiggins>.
-# For Sequent's multiprocessor systems (incomplete).
-0 lelong 0x00ea BALANCE NS32000 .o
->16 lelong >0 not stripped
->124 lelong >0 version %d
-0 lelong 0x10ea BALANCE NS32000 executable (0 @ 0)
->16 lelong >0 not stripped
->124 lelong >0 version %d
-0 lelong 0x20ea BALANCE NS32000 executable (invalid @ 0)
->16 lelong >0 not stripped
->124 lelong >0 version %d
-0 lelong 0x30ea BALANCE NS32000 standalone executable
->16 lelong >0 not stripped
->124 lelong >0 version %d
-#
-# Symmetry information added by Jason Merrill <jason@jarthur.claremont.edu>.
-# Symmetry magic nums will not be reached if DOS COM comes before them;
-# byte 0xeb is matched before these get a chance.
-0 leshort 0x12eb SYMMETRY i386 .o
->16 lelong >0 not stripped
->124 lelong >0 version %d
-0 leshort 0x22eb SYMMETRY i386 executable (0 @ 0)
->16 lelong >0 not stripped
->124 lelong >0 version %d
-0 leshort 0x32eb SYMMETRY i386 executable (invalid @ 0)
->16 lelong >0 not stripped
->124 lelong >0 version %d
-# https://en.wikipedia.org/wiki/Sequent_Computer_Systems
-# below test line conflicts with MS-DOS 2.11 floppies and Acronis loader
-#0 leshort 0x42eb SYMMETRY i386 standalone executable
-0 leshort 0x42eb
-# skip unlike negative version
->124 lelong >-1
-# assuming version 28867614 is very low probable
->>124 lelong !28867614 SYMMETRY i386 standalone executable
->>>16 lelong >0 not stripped
->>>124 lelong >0 version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/sereal b/contrib/libs/libmagic/magic/Magdir/sereal
deleted file mode 100644
index ead78d5d35..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sereal
+++ /dev/null
@@ -1,35 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sereal,v 1.3 2015/02/05 19:14:45 christos Exp $
-# sereal: file(1) magic the Sereal binary serialization format
-#
-# From: Ævar Arnfjörð Bjarmason <avarab@gmail.com>
-#
-# See the specification of the format at
-# https://github.com/Sereal/Sereal/blob/master/sereal_spec.pod#document-header-format
-#
-# I'd have liked to do the byte&0xF0 matching against 0, 1, 2 ... by
-# doing (byte&0xF0)>>4 here, but unfortunately that's not
-# supported. So when we print out a message about an unknown format
-# we'll print out e.g. 0x30 instead of the more human-readable
-# 0x30>>4.
-#
-# See https://github.com/Sereal/Sereal/commit/35372ae01d in the
-# Sereal.git repository for test Sereal data.
-0 name sereal
->4 byte&0x0F x (version %d,
->4 byte&0xF0 0x00 uncompressed)
->4 byte&0xF0 0x10 compressed with non-incremental Snappy)
->4 byte&0xF0 0x20 compressed with incremental Snappy)
->4 byte&0xF0 >0x20 unknown subformat, flag: %d>>4)
-
-0 string/b \=srl Sereal data packet
-!:mime application/sereal
->&0 use sereal
-0 string/b \=\xF3rl Sereal data packet
-!:mime application/sereal
->&0 use sereal
-0 string/b \=\xC3\xB3rl Sereal data packet, UTF-8 encoded
-!:mime application/sereal
->&0 use sereal
-
diff --git a/contrib/libs/libmagic/magic/Magdir/sgi b/contrib/libs/libmagic/magic/Magdir/sgi
deleted file mode 100644
index fe532e0010..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sgi
+++ /dev/null
@@ -1,144 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sgi,v 1.24 2021/09/13 13:23:53 christos Exp $
-# sgi: file(1) magic for Silicon Graphics operating systems and applications
-#
-# Executable images are handled either in aout (for old-style a.out
-# files for 68K; they are indistinguishable from other big-endian 32-bit
-# a.out files) or in mips (for MIPS ECOFF and Ucode files)
-#
-
-# kbd file definitions
-0 string kbd!map kbd map file
->8 byte >0 Ver %d:
->10 short >0 with %d table(s)
-
-0 beshort 0x8765 disk quotas file
-
-0 beshort 0x0506 IRIS Showcase file
->2 byte 0x49 -
->3 byte x - version %d
-0 beshort 0x0226 IRIS Showcase template
->2 byte 0x63 -
->3 byte x - version %d
-0 belong 0x5343464d IRIS Showcase file
->4 byte x - version %d
-0 belong 0x5443464d IRIS Showcase template
->4 byte x - version %d
-0 belong 0xdeadbabe IRIX Parallel Arena
->8 belong >0 - version %d
-
-# core files
-#
-# 32bit core file
-0 belong 0xdeadadb0 IRIX core dump
->4 belong 1 of
->16 string >\0 '%s'
-# 64bit core file
-0 belong 0xdeadad40 IRIX 64-bit core dump
->4 belong 1 of
->16 string >\0 '%s'
-# N32bit core file
-0 belong 0xbabec0bb IRIX N32 core dump
->4 belong 1 of
->16 string >\0 '%s'
-# New style crash dump file
-0 string \x43\x72\x73\x68\x44\x75\x6d\x70 IRIX vmcore dump of
->36 string >\0 '%s'
-
-# Trusted IRIX info
-0 string SGIAUDIT SGI Audit file
->8 byte x - version %d
->9 byte x \b.%d
-#
-0 string WNGZWZSC Wingz compiled script
-0 string WNGZWZSS Wingz spreadsheet
-0 string WNGZWZHP Wingz help file
-#
-0 string #Inventor\040V IRIS Inventor 1.0 file
-0 string #Inventor\040V2 Open Inventor 2.0 file
-# GLF is OpenGL stream encoding
-0 string glfHeadMagic(); GLF_TEXT
-4 belong 0x7d000000 GLF_BINARY_LSB_FIRST
-!:strength -30
-4 belong 0x0000007d GLF_BINARY_MSB_FIRST
-!:strength -30
-# GLS is OpenGL stream encoding; GLS is the successor of GLF
-0 string glsBeginGLS( GLS_TEXT
-4 belong 0x10000000 GLS_BINARY_LSB_FIRST
-!:strength -30
-4 belong 0x00000010 GLS_BINARY_MSB_FIRST
-!:strength -30
-
-# Performance Co-Pilot file types
-0 string PmNs PCP compiled namespace (V.0)
-0 string PmN PCP compiled namespace
->3 string >\0 (V.%1.1s)
-3 belong 0x84500526 PCP archive
->7 byte x (V.%d)
->20 belong -2 temporal index
->20 belong -1 metadata
->20 belong 0 log volume #0
->20 belong >0 log volume #%d
->24 string >\0 host: %s
-3 belong 0x28500526 PCP archive
->7 byte x (V.%d)
->24 belong -2 temporal index
->24 belong -1 metadata
->24 belong 0 log volume #0
->24 belong >0 log volume #%d
->36 string >\0 host: %s
-0 string PCPFolio PCP
->9 string Version: Archive Folio
->18 string >\0 (V.%s)
-0 string #pmchart PCP pmchart view
->9 string Version
->17 string >\0 (V%-3.3s)
-0 string #kmchart PCP pmchart view
->9 string Version
->17 string >\0 (V.%s)
-0 string pmview PCP pmview config
->7 string Version
->15 string >\0 (V%-3.3s)
-0 string #pmlogger PCP pmlogger config
->10 string Version
->18 string >\0 (V%1.1s)
-0 string #pmdahotproc PCP pmdahotproc config
->13 string Version
->21 string >\0 (V%-3.3s)
-0 string PcPh PCP Help
->4 string 1 Index
->4 string 2 Text
->5 string >\0 (V.%1.1s)
-0 string #pmieconf-rules PCP pmieconf rules
->16 string >\0 (V.%1.1s)
-3 string pmieconf-pmie PCP pmie config
->17 string >\0 (V.%1.1s)
-0 string #pmlogconf-setup PCP pmlogconf config
->17 string >\0 (V.%1.1s)
-1 string pmlogconf PCP pmlogger config
->11 string >\0 (V.%1.1s)
-0 string MMV PCP memory mapped values
->4 long x (V.%d)
-
-# SpeedShop data files
-0 lelong 0x13130303 SpeedShop data file
-
-# mdbm files
-0 lelong 0x01023962 mdbm file, version 0 (obsolete)
-0 string mdbm mdbm file,
->5 byte x version %d,
->6 byte x 2^%d pages,
->7 byte x pagesize 2^%d,
->17 byte x hash %d,
->11 byte x dataformat %d
-
-# Alias Maya files
-0 string/t //Maya\040ASCII Alias Maya Ascii File,
->13 string >\0 version %s
-8 string MAYAFOR4 Alias Maya Binary File,
->32 string >\0 version %s scene
-8 string MayaFOR4 Alias Maya Binary File,
->32 string >\0 version %s scene
-8 string CIMG Alias Maya Image File
-8 string DEEP Alias Maya Image File
diff --git a/contrib/libs/libmagic/magic/Magdir/sgml b/contrib/libs/libmagic/magic/Magdir/sgml
deleted file mode 100644
index 9cd38e09b7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sgml
+++ /dev/null
@@ -1,163 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sgml,v 1.48 2023/01/18 16:10:21 christos Exp $
-# Type: SVG Vectorial Graphics
-# From: Noel Torres <tecnico@ejerciciosresueltos.com>
-0 string \<?xml\ version=
->14 regex ['"\ \t]*[0-9.]+['"\ \t]*
->>19 search/4096 \<svg SVG Scalable Vector Graphics image
-!:strength + 50
-!:mime image/svg+xml
-!:ext svg
->>19 search/4096 \<gnc-v2 GnuCash file
-!:mime application/x-gnucash
-0 string \<svg SVG Scalable Vector Graphics image
-!:strength + 50
-!:mime image/svg+xml
-!:ext svg
-
-# Sitemap file
-0 string/t \<?xml\ version=
->14 regex ['"\ \t]*[0-9.]+['"\ \t]*
->>19 search/4096 \<urlset XML Sitemap document text
-!:mime application/xml-sitemap
-
-# OpenStreetMap XML (.osm)
-# https://wiki.openstreetmap.org/wiki/OSM_XML
-# From: Markus Heidelberg <markus.heidelberg@web.de>
-0 string \<?xml\ version=
->14 regex ['"\ \t]*[0-9.]+['"\ \t]*
->>19 search/4096 \<osm OpenStreetMap XML data
-
-# xhtml
-0 string/t \<?xml\ version="
->19 search/4096/cWbt \<!doctype\ html XHTML document text
->>15 string >\0 (version %.3s)
-!:mime text/html
-0 string/t \<?xml\ version='
->19 search/4096/cWbt \<!doctype\ html XHTML document text
->>15 string >\0 (version %.3s)
-!:mime text/html
-0 string/t \<?xml\ version="
->19 search/4096/cWbt \<html broken XHTML document text
->>15 string >\0 (version %.3s)
-!:mime text/html
-
-#------------------------------------------------------------------------------
-# sgml: file(1) magic for Standard Generalized Markup Language
-# HyperText Markup Language (HTML) is an SGML document type,
-# from Daniel Quinlan (quinlan@yggdrasil.com)
-# adapted to string extensions by Anthon van der Neut <anthon@mnt.org)
-0 search/4096/cWt \<!doctype\ html HTML document text
-!:mime text/html
-!:strength + 5
-
-# avoid misdetection as JavaScript
-0 string/cWt \<!doctype\ html HTML document text
-!:mime text/html
-0 string/ct \<html> HTML document text
-!:mime text/html
-0 string/ct \<!--
->&0 search/4096/cWt \<!doctype\ html HTML document text
-!:mime text/html
->&0 search/4096/ct \<html> HTML document text
-!:mime text/html
-
-# SVG document
-# https://www.w3.org/TR/SVG/single-page.html
-0 search/4096/cWbt \<!doctype\ svg SVG XML document
-!:mime image/svg+xml
-!:strength + 15
-
-0 search/4096/cwt \<head\> HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cWt \<head\ HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cwt \<title\> HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cWt \<title\ HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cwt \<html\> HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cWt \<html\ HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cwt \<script\> HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cWt \<script\ HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cwt \<style\> HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cWt \<style\ HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cwt \<table\> HTML document text
-!:mime text/html
-!:strength + 15
-0 search/4096/cWt \<table\ HTML document text
-!:mime text/html
-!:strength + 15
-
-0 search/4096/cwt \<a\ href= HTML document text
-!:mime text/html
-!:strength + 15
-
-# Extensible markup language (XML), a subset of SGML
-# from Marc Prud'hommeaux (marc@apocalypse.org)
-0 search/1/cwt \<?xml XML document text
-!:mime text/xml
-!:strength + 15
-0 string/t \<?xml\ version\ " XML
-!:mime text/xml
-!:strength + 15
-0 string/t \<?xml\ version=" XML
-!:mime text/xml
-!:strength + 15
->15 string/t >\0 %.3s document text
->>23 search/1 \<xsl:stylesheet (XSL stylesheet)
->>24 search/1 \<xsl:stylesheet (XSL stylesheet)
-0 string/t \<?xml\ version=' XML
-!:mime text/xml
-!:strength + 15
->15 string/t >\0 %.3s document text
->>23 search/1 \<xsl:stylesheet (XSL stylesheet)
->>24 search/1 \<xsl:stylesheet (XSL stylesheet)
-0 search/1/wt \<?XML broken XML document text
-!:mime text/xml
-!:strength - 10
-
-
-# SGML, mostly from rph@sq
-0 search/4096/cwt \<!doctype exported SGML document text
-0 search/4096/cwt \<!subdoc exported SGML subdocument text
-0 search/4096/cwt \<!-- exported SGML document text
-!:strength - 10
-
-# Web browser cookie files
-# (Mozilla, Galeon, Netscape 4, Konqueror..)
-# Ulf Harnhammar <ulfh@update.uu.se>
-0 search/1 #\ HTTP\ Cookie\ File Web browser cookie text
-0 search/1 #\ Netscape\ HTTP\ Cookie\ File Netscape cookie text
-0 search/1 #\ KDE\ Cookie\ File Konqueror cookie text
-
-# XML-based format representing braille pages in a digital format.
-#
-# Specification:
-# http://files.pef-format.org/specifications/pef-2008-1/pef-specification.html
-#
-# Simon Aittamaa <simon.aittamaa@gmail.com>
-0 string \<?xml\ version=
->14 regex ['"\ \t]*[0-9.]+['"\ \t]*
->>19 search/4096 \<pef Portable Embosser Format
-!:mime application/x-pef+xml
-
-# https://www.qgis.org/en/site/
-0 string \<!DOCTYPE\040qgis QGIS XML document
diff --git a/contrib/libs/libmagic/magic/Magdir/sharc b/contrib/libs/libmagic/magic/Magdir/sharc
deleted file mode 100644
index e54088bc8f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sharc
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#------------------------------------------------------------------------
-# $File: sharc,v 1.8 2017/03/17 21:35:28 christos Exp $
-# file(1) magic for sharc files
-#
-# SHARC DSP, MIDI SysEx and RiscOS filetype definitions added by
-# FutureGroove Music (dsp@futuregroove.de)
-
-#------------------------------------------------------------------------
-#0 string Draw RiscOS Drawfile
-#0 string PACK RiscOS PackdDir archive
-
-#------------------------------------------------------------------------
-# SHARC DSP stuff (based on the FGM SHARC DSP SDK)
-
-#0 string =! Assembler source
-#0 string Analog ADi asm listing file
-0 string .SYSTEM SHARC architecture file
-0 string .system SHARC architecture file
-
-0 leshort 0x521C SHARC COFF binary
->2 leshort >1 , %d sections
->>12 lelong >0 , not stripped
diff --git a/contrib/libs/libmagic/magic/Magdir/sinclair b/contrib/libs/libmagic/magic/Magdir/sinclair
deleted file mode 100644
index 608d779d7b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sinclair
+++ /dev/null
@@ -1,40 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sinclair,v 1.7 2021/04/27 20:35:51 christos Exp $
-# sinclair: file(1) sinclair QL
-
-# additions to /etc/magic by Thomas M. Ott (ThMO)
-
-# Sinclair QL floppy disk formats (ThMO)
-0 string =QL5 QL disk dump data,
->3 string =A 720 KB,
->3 string =B 1.44 MB,
->3 string =C 3.2 MB,
->4 string >\0 label:%.10s
-
-# Sinclair QL OS dump (ThMO)
-0 belong =0x30000
->49124 belong <47104
->>49128 belong <47104
->>>49132 belong <47104
->>>>49136 belong <47104 QL OS dump data,
->>>>>49148 string >\0 type %.3s,
->>>>>49142 string >\0 version %.4s
-
-# Sinclair QL firmware executables (ThMO)
-0 string NqNqNq`\004 QL firmware executable (BCPL)
-
-# Sinclair QL libraries (was ThMO)
-0 beshort 0xFB01 QDOS object
->2 pstring x '%s'
-
-# Sinclair QL executables (was ThMO)
-4 belong 0x4AFB QDOS executable
->9 pstring x '%s'
-6 beshort 0x4AFB QDOS executable
->9 pstring x '%s'
-
-# Sinclair QL ROM (ThMO)
-0 belong =0x4AFB0001 QL plugin-ROM data,
->9 pstring =\0 un-named
->9 pstring >\0 named: %s
diff --git a/contrib/libs/libmagic/magic/Magdir/sisu b/contrib/libs/libmagic/magic/Magdir/sisu
deleted file mode 100644
index ba7104fa16..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sisu
+++ /dev/null
@@ -1,18 +0,0 @@
-# Type: SiSU Markup Language
-# URL: http://www.sisudoc.org/
-# From: Ralph Amissah <ralph.amissah@gmail.com>
-
-0 regex \^%?[\ \t]*SiSU[\ \t]+insert SiSU text insert
->5 regex [0-9.]+ %s
-
-0 regex \^%[\ \t]+SiSU[\ \t]+master SiSU text master
->5 regex [0-9.]+ %s
-
-0 regex \^%?[\ \t]*SiSU[\ \t]+text SiSU text
->5 regex [0-9.]+ %s
-
-0 regex \^%?[\ \t]*SiSU[\ \t][0-9.]+ SiSU text
->5 regex [0-9.]+ %s
-
-0 regex \^%*[\ \t]*sisu-[0-9.]+ SiSU text
->5 regex [0-9.]+ %s
diff --git a/contrib/libs/libmagic/magic/Magdir/sketch b/contrib/libs/libmagic/magic/Magdir/sketch
deleted file mode 100644
index ee731ddd52..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sketch
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sketch,v 1.5 2017/03/17 21:35:28 christos Exp $
-# Sketch Drawings: http://sketch.sourceforge.net/
-# From: Edwin Mons <e@ik.nu>
-0 search/1 ##Sketch Sketch document text
diff --git a/contrib/libs/libmagic/magic/Magdir/smalltalk b/contrib/libs/libmagic/magic/Magdir/smalltalk
deleted file mode 100644
index 9ff2c6b0c0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/smalltalk
+++ /dev/null
@@ -1,25 +0,0 @@
-
-#-----------------------------------------------
-# $File: smalltalk,v 1.5 2009/09/19 16:28:12 christos Exp $
-# GNU Smalltalk image, starting at version 1.6.2
-# From: catull_us@yahoo.com
-#
-0 string GSTIm\0\0 GNU SmallTalk
-# little-endian
->7 byte&1 =0 LE image version
->>10 byte x %d.
->>9 byte x \b%d.
->>8 byte x \b%d
-#>>12 lelong x , data: %ld
-#>>16 lelong x , table: %ld
-#>>20 lelong x , memory: %ld
-# big-endian
->7 byte&1 =1 BE image version
->>8 byte x %d.
->>9 byte x \b%d.
->>10 byte x \b%d
-#>>12 belong x , data: %ld
-#>>16 belong x , table: %ld
-#>>20 belong x , memory: %ld
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/smile b/contrib/libs/libmagic/magic/Magdir/smile
deleted file mode 100644
index d196de50d1..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/smile
+++ /dev/null
@@ -1,34 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: smile,v 1.1 2011/08/17 17:37:18 christos Exp $
-# smile: file(1) magic for Smile serialization
-#
-# The Smile serialization format uses a 4-byte header:
-#
-# Constant byte #0: 0x3A (ASCII ':')
-# Constant byte #1: 0x29 (ASCII ')')
-# Constant byte #2: 0x0A (ASCII linefeed, '\n')
-# Variable byte #3, consisting of bits:
-# Bits 4-7 (4 MSB): 4-bit version number
-# Bits 3: Reserved
-# Bit 2 (mask 0x04): Whether raw binary (unescaped 8-bit) values may be present in content
-# Bit 1 (mask 0x02): Whether shared String value checking was enabled during encoding, default false
-# Bit 0 (mask 0x01): Whether shared property name checking was enabled during encoding, default true
-#
-# Reference: http://wiki.fasterxml.com/SmileFormatSpec
-# Created by: Pierre-Alexandre Meyer <pierre@mouraf.org>
-
-# Detection
-0 string :)\n Smile binary data
-
-# Versioning
->3 byte&0xF0 x version %d:
-
-# Properties
->3 byte&0x04 0x04 binary raw,
->3 byte&0x04 0x00 binary encoded,
->3 byte&0x02 0x02 shared String values enabled,
->3 byte&0x02 0x00 shared String values disabled,
->3 byte&0x01 0x01 shared field names enabled
->3 byte&0x01 0x00 shared field names disabled
-
diff --git a/contrib/libs/libmagic/magic/Magdir/sniffer b/contrib/libs/libmagic/magic/Magdir/sniffer
deleted file mode 100644
index 751d197376..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sniffer
+++ /dev/null
@@ -1,482 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sniffer,v 1.34 2022/12/14 18:27:36 christos Exp $
-# sniffer: file(1) magic for packet capture files
-#
-# From: guy@alum.mit.edu (Guy Harris)
-#
-
-#
-# Microsoft Network Monitor 1.x capture files.
-#
-0 string RTSS NetMon capture file
->5 byte x - version %d
->4 byte x \b.%d
->6 leshort 0 (Unknown)
->6 leshort 1 (Ethernet)
->6 leshort 2 (Token Ring)
->6 leshort 3 (FDDI)
->6 leshort 4 (ATM)
->6 leshort >4 (type %d)
-
-#
-# Microsoft Network Monitor 2.x capture files.
-#
-0 string GMBU NetMon capture file
->5 byte x - version %d
->4 byte x \b.%d
->6 leshort 0 (Unknown)
->6 leshort 1 (Ethernet)
->6 leshort 2 (Token Ring)
->6 leshort 3 (FDDI)
->6 leshort 4 (ATM)
->6 leshort 5 (IP-over-IEEE 1394)
->6 leshort 6 (802.11)
->6 leshort 7 (Raw IP)
->6 leshort 8 (Raw IP)
->6 leshort 9 (Raw IP)
->6 leshort >9 (type %d)
-
-#
-# Network General Sniffer capture files.
-# Sorry, make that "Network Associates Sniffer capture files."
-# Sorry, make that "Network General old DOS Sniffer capture files."
-#
-0 string TRSNIFF\040data\040\040\040\040\032 Sniffer capture file
->33 byte 2 (compressed)
->23 leshort x - version %d
->25 leshort x \b.%d
->32 byte 0 (Token Ring)
->32 byte 1 (Ethernet)
->32 byte 2 (ARCNET)
->32 byte 3 (StarLAN)
->32 byte 4 (PC Network broadband)
->32 byte 5 (LocalTalk)
->32 byte 6 (Znet)
->32 byte 7 (Internetwork Analyzer)
->32 byte 9 (FDDI)
->32 byte 10 (ATM)
-
-#
-# Cinco Networks NetXRay capture files.
-# Sorry, make that "Network General Sniffer Basic capture files."
-# Sorry, make that "Network Associates Sniffer Basic capture files."
-# Sorry, make that "Network Associates Sniffer Basic, and Windows
-# Sniffer Pro", capture files."
-# Sorry, make that "Network General Sniffer capture files."
-# Sorry, make that "NetScout Sniffer capture files."
-#
-0 string XCP\0 NetXRay capture file
->4 string >\0 - version %s
->44 leshort 0 (Ethernet)
->44 leshort 1 (Token Ring)
->44 leshort 2 (FDDI)
->44 leshort 3 (WAN)
->44 leshort 8 (ATM)
->44 leshort 9 (802.11)
-
-#
-# "libpcap" capture files.
-# https://www.tcpdump.org/manpages/pcap-savefile.5.html
-# (We call them "tcpdump capture file(s)" for now, as "tcpdump" is
-# the main program that uses that format, but there are other programs
-# that use "libpcap", or that use the same capture file format.)
-#
-0 name pcap-be
->4 beshort x - version %d
->6 beshort x \b.%d
-# clear that continuation level match
->20 clear x
->20 belong&0x03FFFFFF 0 (No link-layer encapsulation
->20 belong&0x03FFFFFF 1 (Ethernet
->20 belong&0x03FFFFFF 2 (3Mb Ethernet
->20 belong&0x03FFFFFF 3 (AX.25
->20 belong&0x03FFFFFF 4 (ProNET
->20 belong&0x03FFFFFF 5 (CHAOS
->20 belong&0x03FFFFFF 6 (Token Ring
->20 belong&0x03FFFFFF 7 (BSD ARCNET
->20 belong&0x03FFFFFF 8 (SLIP
->20 belong&0x03FFFFFF 9 (PPP
->20 belong&0x03FFFFFF 10 (FDDI
->20 belong&0x03FFFFFF 11 (RFC 1483 ATM
->20 belong&0x03FFFFFF 12 (Raw IP
->20 belong&0x03FFFFFF 13 (BSD/OS SLIP
->20 belong&0x03FFFFFF 14 (BSD/OS PPP
->20 belong&0x03FFFFFF 19 (Linux ATM Classical IP
->20 belong&0x03FFFFFF 50 (PPP or Cisco HDLC
->20 belong&0x03FFFFFF 51 (PPP-over-Ethernet
->20 belong&0x03FFFFFF 99 (Symantec Enterprise Firewall
->20 belong&0x03FFFFFF 100 (RFC 1483 ATM
->20 belong&0x03FFFFFF 101 (Raw IP
->20 belong&0x03FFFFFF 102 (BSD/OS SLIP
->20 belong&0x03FFFFFF 103 (BSD/OS PPP
->20 belong&0x03FFFFFF 104 (BSD/OS Cisco HDLC
->20 belong&0x03FFFFFF 105 (802.11
->20 belong&0x03FFFFFF 106 (Linux Classical IP over ATM
->20 belong&0x03FFFFFF 107 (Frame Relay
->20 belong&0x03FFFFFF 108 (OpenBSD loopback
->20 belong&0x03FFFFFF 109 (OpenBSD IPsec encrypted
->20 belong&0x03FFFFFF 112 (Cisco HDLC
->20 belong&0x03FFFFFF 113 (Linux cooked v1
->20 belong&0x03FFFFFF 114 (LocalTalk
->20 belong&0x03FFFFFF 117 (OpenBSD PFLOG
->20 belong&0x03FFFFFF 119 (802.11 with Prism header
->20 belong&0x03FFFFFF 122 (RFC 2625 IP over Fibre Channel
->20 belong&0x03FFFFFF 123 (SunATM
->20 belong&0x03FFFFFF 127 (802.11 with radiotap header
->20 belong&0x03FFFFFF 129 (Linux ARCNET
->20 belong&0x03FFFFFF 130 (Juniper Multi-Link PPP
->20 belong&0x03FFFFFF 131 (Juniper Multi-Link Frame Relay
->20 belong&0x03FFFFFF 132 (Juniper Encryption Services PIC
->20 belong&0x03FFFFFF 133 (Juniper GGSN PIC
->20 belong&0x03FFFFFF 134 (Juniper FRF.16 Frame Relay
->20 belong&0x03FFFFFF 135 (Juniper ATM2 PIC
->20 belong&0x03FFFFFF 136 (Juniper Advanced Services PIC
->20 belong&0x03FFFFFF 137 (Juniper ATM1 PIC
->20 belong&0x03FFFFFF 138 (Apple IP over IEEE 1394
->20 belong&0x03FFFFFF 139 (SS7 MTP2 with pseudo-header
->20 belong&0x03FFFFFF 140 (SS7 MTP2
->20 belong&0x03FFFFFF 141 (SS7 MTP3
->20 belong&0x03FFFFFF 142 (SS7 SCCP
->20 belong&0x03FFFFFF 143 (DOCSIS
->20 belong&0x03FFFFFF 144 (Linux IrDA
->20 belong&0x03FFFFFF 147 (Private use 0
->20 belong&0x03FFFFFF 148 (Private use 1
->20 belong&0x03FFFFFF 149 (Private use 2
->20 belong&0x03FFFFFF 150 (Private use 3
->20 belong&0x03FFFFFF 151 (Private use 4
->20 belong&0x03FFFFFF 152 (Private use 5
->20 belong&0x03FFFFFF 153 (Private use 6
->20 belong&0x03FFFFFF 154 (Private use 7
->20 belong&0x03FFFFFF 155 (Private use 8
->20 belong&0x03FFFFFF 156 (Private use 9
->20 belong&0x03FFFFFF 157 (Private use 10
->20 belong&0x03FFFFFF 158 (Private use 11
->20 belong&0x03FFFFFF 159 (Private use 12
->20 belong&0x03FFFFFF 160 (Private use 13
->20 belong&0x03FFFFFF 161 (Private use 14
->20 belong&0x03FFFFFF 162 (Private use 15
->20 belong&0x03FFFFFF 163 (802.11 with AVS header
->20 belong&0x03FFFFFF 164 (Juniper Passive Monitor PIC
->20 belong&0x03FFFFFF 165 (BACnet MS/TP
->20 belong&0x03FFFFFF 166 (PPPD
->20 belong&0x03FFFFFF 167 (Juniper PPPoE
->20 belong&0x03FFFFFF 168 (Juniper PPPoE/ATM
->20 belong&0x03FFFFFF 169 (GPRS LLC
->20 belong&0x03FFFFFF 170 (GPF-T
->20 belong&0x03FFFFFF 171 (GPF-F
->20 belong&0x03FFFFFF 174 (Juniper PIC Peer
->20 belong&0x03FFFFFF 175 (Ethernet with Endace ERF header
->20 belong&0x03FFFFFF 176 (Packet-over-SONET with Endace ERF header
->20 belong&0x03FFFFFF 177 (Linux LAPD
->20 belong&0x03FFFFFF 178 (Juniper Ethernet
->20 belong&0x03FFFFFF 179 (Juniper PPP
->20 belong&0x03FFFFFF 180 (Juniper Frame Relay
->20 belong&0x03FFFFFF 181 (Juniper C-HDLC
->20 belong&0x03FFFFFF 182 (FRF.16 Frame Relay
->20 belong&0x03FFFFFF 183 (Juniper Voice PIC
->20 belong&0x03FFFFFF 184 (Arinc 429
->20 belong&0x03FFFFFF 185 (Arinc 653 Interpartition Communication
->20 belong&0x03FFFFFF 186 (USB with FreeBSD header
->20 belong&0x03FFFFFF 187 (Bluetooth HCI H4
->20 belong&0x03FFFFFF 188 (802.16 MAC Common Part Sublayer
->20 belong&0x03FFFFFF 189 (Linux USB
->20 belong&0x03FFFFFF 190 (Controller Area Network (CAN) v. 2.0B
->20 belong&0x03FFFFFF 191 (802.15.4 with Linux padding
->20 belong&0x03FFFFFF 192 (PPI
->20 belong&0x03FFFFFF 193 (802.16 MAC Common Part Sublayer plus radiotap header
->20 belong&0x03FFFFFF 194 (Juniper Integrated Service Module
->20 belong&0x03FFFFFF 195 (802.15.4 with FCS
->20 belong&0x03FFFFFF 196 (SITA
->20 belong&0x03FFFFFF 197 (Endace ERF
->20 belong&0x03FFFFFF 198 (Ethernet with u10 Networks pseudo-header
->20 belong&0x03FFFFFF 199 (IPMB
->20 belong&0x03FFFFFF 200 (Juniper Secure Tunnel
->20 belong&0x03FFFFFF 201 (Bluetooth HCI H4 with pseudo-header
->20 belong&0x03FFFFFF 202 (AX.25 with KISS header
->20 belong&0x03FFFFFF 203 (LAPD
->20 belong&0x03FFFFFF 204 (PPP with direction pseudo-header
->20 belong&0x03FFFFFF 205 (Cisco HDLC with direction pseudo-header
->20 belong&0x03FFFFFF 206 (Frame Relay with direction pseudo-header
->20 belong&0x03FFFFFF 209 (Linux IPMB
->20 belong&0x03FFFFFF 215 (802.15.4 with non-ASK PHY header
->20 belong&0x03FFFFFF 216 (Linux evdev events
->20 belong&0x03FFFFFF 219 (MPLS with label as link-layer header
->20 belong&0x03FFFFFF 220 (Memory-mapped Linux USB
->20 belong&0x03FFFFFF 221 (DECT
->20 belong&0x03FFFFFF 222 (AOS Space Data Link protocol
->20 belong&0x03FFFFFF 223 (Wireless HART
->20 belong&0x03FFFFFF 224 (Fibre Channel FC-2
->20 belong&0x03FFFFFF 225 (Fibre Channel FC-2 with frame delimiters
->20 belong&0x03FFFFFF 226 (Solaris IPNET
->20 belong&0x03FFFFFF 227 (SocketCAN
->20 belong&0x03FFFFFF 228 (Raw IPv4
->20 belong&0x03FFFFFF 229 (Raw IPv6
->20 belong&0x03FFFFFF 230 (802.15.4 without FCS
->20 belong&0x03FFFFFF 231 (D-Bus messages
->20 belong&0x03FFFFFF 232 (Juniper Virtual Server
->20 belong&0x03FFFFFF 233 (Juniper SRX E2E
->20 belong&0x03FFFFFF 234 (Juniper Fibre Channel
->20 belong&0x03FFFFFF 235 (DVB-CI
->20 belong&0x03FFFFFF 236 (MUX27010
->20 belong&0x03FFFFFF 237 (STANAG 5066 D_PDUs
->20 belong&0x03FFFFFF 238 (Juniper ATM CEMIC
->20 belong&0x03FFFFFF 239 (Linux netfilter log messages
->20 belong&0x03FFFFFF 240 (Hilscher netAnalyzer
->20 belong&0x03FFFFFF 241 (Hilscher netAnalyzer with delimiters
->20 belong&0x03FFFFFF 242 (IP-over-Infiniband
->20 belong&0x03FFFFFF 243 (MPEG-2 Transport Stream packets
->20 belong&0x03FFFFFF 244 (ng4t ng40
->20 belong&0x03FFFFFF 245 (NFC LLCP
->20 belong&0x03FFFFFF 246 (Packet filter state syncing
->20 belong&0x03FFFFFF 247 (InfiniBand
->20 belong&0x03FFFFFF 248 (SCTP
->20 belong&0x03FFFFFF 249 (USB with USBPcap header
->20 belong&0x03FFFFFF 250 (Schweitzer Engineering Laboratories RTAC packets
->20 belong&0x03FFFFFF 251 (Bluetooth Low Energy air interface
->20 belong&0x03FFFFFF 252 (Wireshark Upper PDU export
->20 belong&0x03FFFFFF 253 (Linux netlink
->20 belong&0x03FFFFFF 254 (Bluetooth Linux Monitor
->20 belong&0x03FFFFFF 255 (Bluetooth Basic Rate/Enhanced Data Rate baseband packets
->20 belong&0x03FFFFFF 256 (Bluetooth Low Energy air interface with pseudo-header
->20 belong&0x03FFFFFF 257 (PROFIBUS data link layer
->20 belong&0x03FFFFFF 258 (Apple DLT_PKTAP
->20 belong&0x03FFFFFF 259 (Ethernet with 802.3 Clause 65 EPON preamble
->20 belong&0x03FFFFFF 260 (IPMI trace packets
->20 belong&0x03FFFFFF 261 (Z-Wave RF profile R1 and R2 packets
->20 belong&0x03FFFFFF 262 (Z-Wave RF profile R3 packets
->20 belong&0x03FFFFFF 263 (WattStopper Digital Lighting Mngmt/Legrand Nitoo Open Proto
->20 belong&0x03FFFFFF 264 (ISO 14443 messages
->20 belong&0x03FFFFFF 265 (IEC 62106 Radio Data System groups
->20 belong&0x03FFFFFF 266 (USB with Darwin header
->20 belong&0x03FFFFFF 267 (OpenBSD DLT_OPENFLOW
->20 belong&0x03FFFFFF 268 (IBM SDLC frames
->20 belong&0x03FFFFFF 269 (TI LLN sniffer frames
->20 belong&0x03FFFFFF 271 (Linux vsock
->20 belong&0x03FFFFFF 272 (Nordic Semiconductor Bluetooth LE sniffer frames
->20 belong&0x03FFFFFF 273 (Excentis XRA-31 DOCSIS 3.1 RF sniffer frames
->20 belong&0x03FFFFFF 274 (802.3br mPackets
->20 belong&0x03FFFFFF 275 (DisplayPort AUX channel monitoring data
->20 belong&0x03FFFFFF 276 (Linux cooked v2
->20 belong&0x03FFFFFF 278 (OpenVizsla USB
->20 belong&0x03FFFFFF 279 (Elektrobit High Speed Capture and Replay (EBHSCR)
->20 belong&0x03FFFFFF 281 (Broadcom tag
->20 belong&0x03FFFFFF 282 (Broadcom tag (prepended)
->20 belong&0x03FFFFFF 283 (802.15.4 with TAP
->20 belong&0x03FFFFFF 284 (Marvell DSA
->20 belong&0x03FFFFFF 285 (Marvell EDSA
->20 belong&0x03FFFFFF 286 (ELEE lawful intercept
->20 belong&0x03FFFFFF 287 (Z-Wave serial
->20 belong&0x03FFFFFF 288 (USB 2.0
->20 belong&0x03FFFFFF 289 (ATSC ALP
->20 belong&0x03FFFFFF 290 (Event Tracing for Windows
->20 belong&0x03FFFFFF 291 (Hilscher netANALYZER NG pseudo-footer
->20 belong&0x03FFFFFF 292 (ZBOSS NCP protocol with pseudo-header
->20 belong&0x03FFFFFF 293 (Low-Speed USB 2.0/1.1/1.0
->20 belong&0x03FFFFFF 294 (Full-Speed USB 2.0/1.1/1.0
->20 belong&0x03FFFFFF 295 (High-Speed USB 2.0
-# print default match
->20 default x
->>20 belong x (linktype#%u
->16 belong x \b, capture length %u)
-
-# packets time stamps in seconds and microseconds.
-0 ubelong 0xa1b2c3d4 pcap capture file, microseconds ts (big-endian)
-!:mime application/vnd.tcpdump.pcap
->0 use pcap-be
-0 ulelong 0xa1b2c3d4 pcap capture file, microsecond ts (little-endian)
-!:mime application/vnd.tcpdump.pcap
->0 use \^pcap-be
-
-# packets time stamps in seconds and nanoseconds.
-0 ubelong 0xa1b23c4d pcap capture file, nanosecond ts (big-endian)
-!:mime application/vnd.tcpdump.pcap
->0 use pcap-be
-0 ulelong 0xa1b23c4d pcap capture file, nanosecond ts (little-endian)
-!:mime application/vnd.tcpdump.pcap
->0 use \^pcap-be
-
-#
-# "libpcap"-with-Alexey-Kuznetsov's-patches capture files.
-#
-0 ubelong 0xa1b2cd34 pcap capture file, microsecond ts, extensions (big-endian)
->0 use pcap-be
-0 ulelong 0xa1b2cd34 pcap capture file, microsecond ts, extensions (little-endian)
->0 use \^pcap-be
-
-#
-# "pcapng" capture files.
-# https://github.com/pcapng/pcapng
-# Pcapng files can contain multiple sections. Printing the endianness,
-# snaplen, or other information from the first SHB may be misleading.
-#
-0 ubelong 0x0a0d0d0a
->8 ubelong 0x1a2b3c4d pcapng capture file
->>12 beshort x - version %d
->>14 beshort x \b.%d
-0 ulelong 0x0a0d0d0a
->8 ulelong 0x1a2b3c4d pcapng capture file
->>12 leshort x - version %d
->>14 leshort x \b.%d
-
-#
-# AIX "iptrace" capture files.
-#
-0 string iptrace\0401.0 AIX iptrace capture file
-0 string iptrace\0402.0 AIX iptrace capture file
-
-#
-# Novell LANalyzer capture files.
-# URL: http://www.blacksheepnetworks.com/security/info/nw/lan/trace.txt
-# Reference: https://github.com/wireshark/wireshark/blob/master/wiretap/lanalyzer.c
-# Update: Joerg Jenderek
-#
-# regular trace header record (RT_HeaderRegular)
-0 leshort 0x1001
-# GRR: line above is too generic because it matches Commodore Plus/4 BASIC V3.5
-# and VIC-20 BASIC V2 program
-# skip many Commodore Basic program (Microzodiac.prg Minefield.prg Vic-tac-toe.prg breakvic_joy.prg)
-# with invalid second record type 0 instead of "Trace receive channel name record"
->(2.s+4) leshort =0x1006h
->>0 use novell-lanalyzer
-# cyclic trace header record (RT_HeaderCyclic)
-0 leshort 0x1007
->0 use novell-lanalyzer
-0 name novell-lanalyzer
->0 leshort x Novell LANalyzer capture file
-# https://reposcope.com/mimetype/application/x-lanalyzer
-!:mime application/x-lanalyzer
-# maybe also TR2 .. TR9 TRA .. TRZ
-!:ext tr1
-# version like: 1.5
->4 ubyte x \b, version %u
-# minor version; one byte identifying the trace file minor version number
->5 ubyte x \b.%u
-# Trace header record type like: 1001~regular or 1007~cyclic
->0 leshort !0x1001 \b, record type %4.4x
-# record_length[2] is the length of the data part of 1st reorcd (without "type" and "length" fields) like: 4Ch
->2 leshort x \b, record length %#x
-# second record type like: 1006h~Trace receive channel name record
->(2.s+4) leshort !0x1006h \b, 2nd record type %#4.4x
->(2.s+6) leshort x \b, 2nd record length %#x
-# each channel name is a null-terminated, eight-byte ASCII string like: Channel1
->(2.s+8) string x \b, names %.9s
-# 2nd channel name like: Channel2
->(2.s+17) string x %.9s ...
-
-#
-# HP-UX "nettl" capture files.
-# URL: https://nixdoc.net/man-pages/HP-UX/man1m/nettl.1m.html
-# Reference: https://github.com/wireshark/wireshark/blob/master/wiretap/nettl.c
-# Update: Joerg Jenderek
-# Note: Wireshark fills "meta information header fields" with "dummy" values
-# nettl_magic_hpux9[12]; for HP-UX 9.x not tested
-0 string \x00\x00\x00\x01\x00\x00\x00\x00\x00\x07\xD0\x00 HP/UX 9.x nettl capture file
-!:mime application/x-nettl
-!:ext trc0/trc1
-# nettl_magic_hpux10[12]; for HP-UX 10.x and 11.x
-0 string \x54\x52\x00\x64\x00 HP/UX nettl capture file
-# https://reposcope.com/mimetype/application/x-nettl
-!:mime application/x-nettl
-# maybe also TRC000 TRC001 TRC002 ...
-!:ext trc0/trc1
-# file_name[56]; maybe also like /tmp/raw.tr.TRC000
->12 string !/tmp/wireshark.TRC000
->>12 string x "%-.56s"
-# tz[20]; like UTC
->68 string !UTC \b, tz
->>68 string x %-.20s
-# host_name[9];
->88 string >\0 \b, host %-.9s
-# os_vers[9]; like B.11.11
->97 string !B.11.11 \b, os
->>97 string x %-.9s
-# os_v; like 55h
->>106 ubyte x (%#x)
-# xxa[8]; like 0
->107 ubequad !0 \b, xxa=%#16.16llx
-# model[11] like: 9000/800
->115 string !9000/800 \b, model
->>115 string x %-.11s
-# unknown; probably just padding to 128 bytes like: 0406h
->126 ubeshort !0x0406h \b, at 126 %#4.4x
-
-#
-# RADCOM WAN/LAN Analyzer capture files.
-#
-0 string \x42\xd2\x00\x34\x12\x66\x22\x88 RADCOM WAN/LAN Analyzer capture file
-
-#
-# NetStumbler log files. Not really packets, per se, but about as
-# close as you can get. These are log files from NetStumbler, a
-# Windows program, that scans for 802.11b networks.
-#
-0 string NetS NetStumbler log file
->8 lelong x \b, %d stations found
-
-#
-# *Peek tagged capture files.
-#
-0 string \177ver EtherPeek/AiroPeek/OmniPeek capture file
-
-#
-# Visual Networks traffic capture files.
-#
-0 string \x05VNF Visual Networks traffic capture file
-
-#
-# Network Instruments Observer capture files.
-#
-0 string ObserverPktBuffe Network Instruments Observer capture file
-
-#
-# Files from Accellent Group's 5View products.
-#
-# URL: http://www.infovista.com
-# Reference: http://mark0.net/download/triddefs_xml.7z
-# defs/0/5vw.trid.xml
-# https://2.na.dl.wireshark.org/src/wireshark-3.6.2.tar.xz
-# wireshark-3.6.2/wiretap/5views.c
-# Update: Joerg Jenderek
-# Note: called "5View capture" by TrID and
-# "Wireshark capture file" on Windows or
-# "Packet Capture (Accellent/InfoVista 5view)" by shared MIME-info database
-# verified/falsified by `wireshark *.5vw`
-0 string \xaa\xaa\xaa\xaa
-# skip misidentified boot/x86_64/loader/kroete.dat on Suse LEAP DVD
-# by check for valid record version
->8 ulelong =0x00010000
->>0 use 5view-le
-0 name 5view-le
-# t_5VW_Info_Header.Signature = CST_5VW_INFO_HEADER_KEY = 0xAAAAAAAAU
->0 ulelong x 5View capture file
-# https://reposcope.com/mimetype/application/x-5view
-!:mime application/x-5view
-!:ext 5vw
-# size of header in bytes (included signature and reserved fields); probably always 20h
->4 ulelong !0x00000020 \b, header size %#x
-# version of header record; apparently always CST_5VW_INFO_RECORD_VERSION=0x00010000U
->8 ulelong !0x00010000 \b, record version %#x
-# DataSize; total size of data without header like: 18h
->12 ulelong x \b, record size %#x
-# filetype; type of the capture file like: 18001000h
->16 ulelong x \b, file type %#8.8x
-# Reserved[3]; reserved for future use; apparently zero
->20 quad !0 \b, Reserved %#llx
-# look for record header key CST_5VW_RECORDS_HEADER_KEY of structure t_5VW_TimeStamped_Header
->0x20 search/0xB8/b \xEE\xEE\x33\x33 \b; record
-# HeaderSize; actual size of this header in bytes like: 32 24h
->>&0 uleshort x size %#x
-# HeaderType; exact type of this header; probably always 0x4000
->>&2 uleshort !0x4000 \b, header type %#x
-# RecType; type of record like: 80000000h
->>&4 ulelong x \b, record type %#x
-# RecSubType; subtype of record like: 0
->>&8 ulelong !0 \b, subtype %#x
-# RecSize; Size of one record like: 5Ch
->>&12 ulelong x \b, RecSize %#x
-# RecNb; Number of records like: 1
->>&16 ulelong >1 \b, %#x records
-# Timestamp Utc
-#>>&20 ulelong x \b, RAW TIME %#8.8x
->>&20 date x \b, Time-stamp %s
diff --git a/contrib/libs/libmagic/magic/Magdir/softquad b/contrib/libs/libmagic/magic/Magdir/softquad
deleted file mode 100644
index 28f03b9b78..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/softquad
+++ /dev/null
@@ -1,40 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: softquad,v 1.14 2022/10/28 17:19:54 christos Exp $
-# softquad: file(1) magic for SoftQuad Publishing Software
-# URL: https://en.wikipedia.org/wiki/SoftQuad_Software
-#
-# Author/Editor and RulesBuilder
-#
-# XXX - byte order?
-#
-0 string \<!SQ\ DTD> Compiled SGML rules file
->9 string >\0 Type %s
-0 string \<!SQ\ A/E> A/E SGML Document binary
->9 string >\0 Type %s
-0 string \<!SQ\ STS> A/E SGML binary styles file
->9 string >\0 Type %s
-0 short 0xc0de Compiled PSI (v1) data
-0 short 0xc0da Compiled PSI (v2) data
->3 string >\0 (%s)
-# Binary sqtroff font/desc files...
-# GRR: the line below is also true for 5View capture file handled by ./sniffer
-0 short 0125252
-# skip 5View capture file with "invalid" version AAAAh
->2 short >0 SoftQuad DESC or font file binary - version %d
-# Bitmaps...
-0 search/1 SQ\ BITMAP1 SoftQuad Raster Format text
-#0 string SQ\ BITMAP2 SoftQuad Raster Format data
-# sqtroff intermediate language (replacement for ditroff int. lang.)
-0 string X\ SoftQuad troff Context intermediate
->2 string 495 for AT&T 495 laser printer
->2 string hp for Hewlett-Packard LaserJet
->2 string impr for IMAGEN imPRESS
->2 string ps for PostScript
-
-# From: Michael Piefel <piefel@debian.org>
-# sqtroff intermediate language (replacement for ditroff int. lang.)
-0 string X\ 495 SoftQuad troff Context intermediate for AT&T 495 laser printer
-0 string X\ hp SoftQuad troff Context intermediate for HP LaserJet
-0 string X\ impr SoftQuad troff Context intermediate for IMAGEN imPRESS
-0 string X\ ps SoftQuad troff Context intermediate for PostScript
diff --git a/contrib/libs/libmagic/magic/Magdir/sosi b/contrib/libs/libmagic/magic/Magdir/sosi
deleted file mode 100644
index 88ecc512ba..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sosi
+++ /dev/null
@@ -1,40 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sosi,v 1.2 2021/02/23 00:51:10 christos Exp $
-# SOSI
-# Summary: Systematic Organization of Spatial Information
-# Long description: Norwegian text based map format
-# File extension: .sos
-# Full name: Petter Reinholdtsen (pere@hungry.com)
-# Reference: https://en.wikipedia.org/wiki/SOSI
-#
-# Example SOSI files available from
-# https://trac.osgeo.org/gdal/ticket/3638
-# https://nedlasting.geonorge.no/geonorge/Basisdata/N50Kartdata/SOSI/
-# https://nedlasting.geonorge.no/geonorge/Samferdsel/Elveg/SOSI/
-#
-# Start with optional comments (from "!" to the next line end)
-# followed by ".HODE" and end with "\n.SLUTT" followed by an optional
-# separator (any number of " ", "\t", "\n" or "\r"), might have BOM at
-# the start and following ".HODE" near the start there is "..OMR=C3=85DE"
-# (either UTF-8, ISO-8859-1 or some 7 bit Norwegian charset based on
-# ASCII) , "..TRANSPAR", "..TEGNSETT " followed by the charset and a
-# separator, as well as "..SOSI-VERSJON " followed by the format
-# version and a separator.
-#
-# FIXME figure out how to accept any of [space], [tab], [newline] and
-# [carriage return] as separators, not only line end.
-
-# Not searching for full "OMR=C3=85DE" to match also for non-UTF-8
-# character sets
-0 search ..OMR
->0 search ..TRANSPAR
->>0 search .HODE SOSI map data
->>>&0 search ..SOSI-VERSJON
->>>>&1 string x \b, version %s
-# FIXME could not figure out way to make a match for .SLUTT at the end required
-#>-7 string \n.SLUTT slutt
-#>-8 string \n.SLUTT\n slutt-nl
-#>-9 string \n.SLUTT\r\n slutt-crnl2
-!:mime text/vnd.sosi
-!:ext sos
diff --git a/contrib/libs/libmagic/magic/Magdir/spec b/contrib/libs/libmagic/magic/Magdir/spec
deleted file mode 100644
index c504b1fd19..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/spec
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: spec,v 1.4 2009/09/19 16:28:12 christos Exp $
-# spec: file(1) magic for SPEC raw results (*.raw, *.rsf)
-#
-# Cloyce D. Spradling <cloyce@headgear.org>
-
-0 string spec SPEC
->4 string .cpu CPU
->>8 string <: \b%.4s
->>12 string . raw result text
-
-17 string version=SPECjbb SPECjbb
->32 string <: \b%.4s
->>37 string <: v%.4s raw result text
-
-0 string BEGIN\040SPECWEB SPECweb
->13 string <: \b%.2s
->>15 string _SSL \b_SSL
->>>20 string <: v%.4s raw result text
->>16 string <: v%.4s raw result text
diff --git a/contrib/libs/libmagic/magic/Magdir/spectrum b/contrib/libs/libmagic/magic/Magdir/spectrum
deleted file mode 100644
index cf14551b4d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/spectrum
+++ /dev/null
@@ -1,184 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: spectrum,v 1.10 2023/05/08 01:33:36 christos Exp $
-# spectrum: file(1) magic for Spectrum emulator files.
-#
-# John Elliott <jce@seasip.demon.co.uk>
-
-#
-# Spectrum +3DOS header
-#
-0 string PLUS3DOS\032 Spectrum +3 data
->15 byte 0 - BASIC program
->15 byte 1 - number array
->15 byte 2 - character array
->15 byte 3 - memory block
->>16 belong 0x001B0040 (screen)
->15 byte 4 - Tasword document
->15 string TAPEFILE - ZXT tapefile
-#
-# Tape file. This assumes the .TAP starts with a Spectrum-format header,
-# which nearly all will.
-#
-# Update: Sanity-check string contents to be printable.
-# -Adam Buchbinder <adam.buchbinder@gmail.com>
-# Update: Joerg Jenderek 2023 May
-# URL: http://fileformats.archiveteam.org/wiki/TAP_(ZX_Spectrum)
-# Reference: http://web.archive.org/web/20110711141601/http://www.zxmodules.de/fileformats/tapformat.html
-# http://mark0.net/download/triddefs_xml.7z/defs/t/tap-zx.trid.xml
-# Note: called "ZX Spectrum Tape image" by TrID and "TAP (ZX Spectrum)" by DROID via PUID fmt/801
-# verified by fuse-emulator-utils `tzxlist EXAMPLES.TAP`
-#
-# headers length 19=023 and flag byte 0 indicating a standard ROM loading header
-0 string \023\000\000
->4 string >\0
-# skip {85CEE8D6-0F90-4492-B484-98E38862B28D}.2.ver0x0000000000000004.db {DDF571F2-BE98-426D-8288-1A9A39C3FDA2}.2.ver0x0000000000000001.db
-# inside c:\ProgramData\Microsoft\Windows\Caches according to TrID and DROID
->>23 ubyte =0xFF
-# skip DROID fmt-801-signature-id-1166.tap with invalid name \253\253\253\253\253\253\253\253\253\253
-# which looks like: "TF COPY II" "screen " "\023\001TF" " 1943 "
->>>4 string <\177 Spectrum .TAP data "%-10.10s"
-#!:mime application/octet-stream
-!:mime application/x-spectrum-tap
-!:ext tap
->>>>3 byte 0 - BASIC program
-# autostart line; 0..9999 are valid; 32768 means "no auto-loading"
->>>>>16 uleshort x \b, autostart line %u
-# program length; length of BASIC program
->>>>>18 uleshort x \b, program length %u
->>>>3 byte 1 - number array
->>>>3 byte 2 - character array
->>>>3 byte 3 - memory block
-# length of the following data 1B00h=6912 and start address 4000h=16384 in case of a SCREEN$ header
->>>>>14 belong 0x001B0040 (screen)
-# unused 32768=8000h
->>>>>18 uleshort !32768 \b, unused %u
-# zxlength; length of the following data after the header
->>>>14 uleshort x \b, data length %u
-#>>14 uleshort x \b, data length %#x
-# checksum byte; simply all bytes (including flag byte) XORed
-#>>>>20 ubyte x \b, checksum %#x
-
-# The following three blocks are from pak21-spectrum@srcf.ucam.org
-# TZX tape images
-# Update: Joerg Jenderek 2023 May
-# URL: http://fileformats.archiveteam.org/wiki/TZX
-# Reference: https://worldofspectrum.net/TZXformat.html
-# http://mark0.net/download/triddefs_xml.7z/defs/t/tzx.trid.xml
-# Note: called "ZX Spectrum Tape image" by TrID and "TZX Format" by DROID via PUID fmt/1000
-0 string ZXTape!\x1a Spectrum .TZX data
-#!:mime application/octet-stream
-!:mime application/x-spectrum-tzx
-# CDT is used for Amstrad tapes
-!:ext tzx/cdt
->8 byte x version %d
->9 byte x \b.%d
-# ID of first block
->10 ubyte x \b; ID %#x
-# turbo speed data block
->10 ubyte =0x11 (turbo)
-# length of PILOT tone (number of pulses)
->>21 uleshort x \b, %u pilot pulses
-# length of PILOT pulse
->>11 uleshort x with %u tstates
-# length of SYNC first pulse
->>13 uleshort x \b, %u and
-# length of SYNC second pulse
->>15 uleshort x %u sync tstates
-# length of ZERO bit pulse
->>17 uleshort x \b, %u zero tstates
-# length of ONE bit pulse
->>19 uleshort x \b, %u one tstates
-# used bits in the last byte
->>23 ubyte x \b, use %u bit
-# plural s
->>23 ubyte >1 \bs
-# pause after this block in milliseconds
->>24 uleshort x \b, %u ms pause
-# BYTE[3]; length of data that follow
->>26 ulelong&0x00FFffFF x \b, %u data bytes
->10 ubyte =0x20 (pause)
-# pause duration in milliseconds
->>11 uleshort x %u ms
-# text description
->10 ubyte =0x30 (text)
-# length of the text description
-#>>11 ubyte x L=%u
->>11 pstring x "%s"
-# archive text description in ASCII format
->10 ubyte =0x32 (archive info)
-# length of archive text
->>11 uleshort x \b, %#x bytes
-# number of text strings
->>13 ubyte x with %u (type) text parts
-# text type identification byte: 0~title 1~publisher 2~author 3~year 4~language 5~type 6~price 7~protection 8~origin ff~comment
->>14 byte <9 (%d)
->>>14 byte >-2
-# length of text string
-#>>>>15 ubyte x L=%u
->>>>15 pstring x %s
-# 2nd possible text description
->>>>>&0 byte <9 (%d)
->>>>>>&-1 byte >-2
->>>>>>>&0 pstring x %s
-# 3rd possible text description
->>>>>>>>&0 byte <9 (%d)
->>>>>>>>>&-1 byte >-2
->>>>>>>>>>&0 pstring x %s
-# 4th possible text description
->>>>>>>>>>>&0 byte <9 (%d)
->>>>>>>>>>>>&-1 byte >-2
->>>>>>>>>>>>>&0 pstring x %s
-# 5th possible text description
->>>>>>>>>>>>>>&0 byte <9 (%d)
->>>>>>>>>>>>>>>&-1 byte >-2
->>>>>>>>>>>>>>>>&0 pstring x %s
-# 6th possible text description
->>>>>>>>>>>>>>>>>&0 byte <9 (%d)
->>>>>>>>>>>>>>>>>>&-1 byte >-2
->>>>>>>>>>>>>>>>>>>&0 pstring x %s
-# 7th possible text description
->>>>>>>>>>>>>>>>>>>>&0 byte <9 (%d)
->>>>>>>>>>>>>>>>>>>>>&-1 byte >-2
->>>>>>>>>>>>>>>>>>>>>>&0 pstring x %s
-
-# RZX input recording files
-0 string RZX! Spectrum .RZX data
->4 byte x version %d
->5 byte x \b.%d
-
-# Floppy disk images
-0 string MV\ -\ CPCEMU\ Disk-Fil Amstrad/Spectrum .DSK data
-0 string MV\ -\ CPC\ format\ Dis Amstrad/Spectrum DU54 .DSK data
-0 string EXTENDED\ CPC\ DSK\ Fil Amstrad/Spectrum Extended .DSK data
-0 string SINCLAIR Spectrum .SCL Betadisk image
-
-# Hard disk images
-0 string RS-IDE\x1a Spectrum .HDF hard disk image
->7 byte x \b, version %#02x
-
-# SZX snapshots (fuse and spectaculator)
-# Martin M. S. Pedersen <martin@linux.com>
-# http://www.spectaculator.com/docs/zx-state/header.shtml
-#
-0 string ZXST zx-state snapshot
->4 byte x version %d
->5 byte x \b.%d
->>6 byte 0 16k ZX Spectrum
->>6 byte 1 48k ZX Spectrum/ZX Spectrum+
->>6 byte 2 ZX Spectrum 128
->>6 byte 3 ZX Spectrum +2
->>6 byte 4 ZX Spectrum +2A/+2B
->>6 byte 5 ZX Spectrum +3
->>6 byte 6 ZX Spectrum +3e
->>6 byte 7 Pentagon 128
->>6 byte 8 Timex Sinclair TC2048
->>6 byte 9 Timex Sinclair TC2068
->>6 byte 10 Scorpion ZS-256
->>6 byte 11 ZX Spectrum SE
->>6 byte 12 Timex Sinclair TS2068
->>6 byte 13 Pentagon 512
->>6 byte 14 Pentagon 1024
->>6 byte 15 48k ZX Spectrum (NTSC)
->>6 byte 16 ZX Spectrum 12Ke
->>>7 byte 1 (alternate timings)
diff --git a/contrib/libs/libmagic/magic/Magdir/sql b/contrib/libs/libmagic/magic/Magdir/sql
deleted file mode 100644
index 00f36179f8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sql
+++ /dev/null
@@ -1,288 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sql,v 1.26 2023/04/29 17:26:58 christos Exp $
-# sql: file(1) magic for SQL files
-#
-# From: "Marty Leisner" <mleisner@eng.mc.xerox.com>
-# Recognize some MySQL files.
-# Elan Ruusamae <glen@delfi.ee>, added MariaDB signatures
-# from https://bazaar.launchpad.net/~maria-captains/maria/5.5/view/head:/support-files/magic
-#
-0 beshort 0xfe01 MySQL table definition file
->2 byte x Version %d
->3 byte 0 \b, type UNKNOWN
->3 byte 1 \b, type DIAM_ISAM
->3 byte 2 \b, type HASH
->3 byte 3 \b, type MISAM
->3 byte 4 \b, type PISAM
->3 byte 5 \b, type RMS_ISAM
->3 byte 6 \b, type HEAP
->3 byte 7 \b, type ISAM
->3 byte 8 \b, type MRG_ISAM
->3 byte 9 \b, type MYISAM
->3 byte 10 \b, type MRG_MYISAM
->3 byte 11 \b, type BERKELEY_DB
->3 byte 12 \b, type INNODB
->3 byte 13 \b, type GEMINI
->3 byte 14 \b, type NDBCLUSTER
->3 byte 15 \b, type EXAMPLE_DB
->3 byte 16 \b, type CSV_DB
->3 byte 17 \b, type FEDERATED_DB
->3 byte 18 \b, type BLACKHOLE_DB
->3 byte 19 \b, type PARTITION_DB
->3 byte 20 \b, type BINLOG
->3 byte 21 \b, type SOLID
->3 byte 22 \b, type PBXT
->3 byte 23 \b, type TABLE_FUNCTION
->3 byte 24 \b, type MEMCACHE
->3 byte 25 \b, type FALCON
->3 byte 26 \b, type MARIA
->3 byte 27 \b, type PERFORMANCE_SCHEMA
->3 byte 127 \b, type DEFAULT
->0x0033 ulong x \b, MySQL version %d
-0 belong&0xffffff00 0xfefe0500 MySQL ISAM index file
->3 byte x Version %d
-0 belong&0xffffff00 0xfefe0600 MySQL ISAM compressed data file
->3 byte x Version %d
-0 belong&0xffffff00 0xfefe0700 MySQL MyISAM index file
->3 byte x Version %d
->14 beshort x \b, %d key parts
->16 beshort x \b, %d unique key parts
->18 byte x \b, %d keys
->28 bequad x \b, %lld records
->36 bequad x \b, %lld deleted records
-0 belong&0xffffff00 0xfefe0800 MySQL MyISAM compressed data file
->3 byte x Version %d
-0 belong&0xffffff00 0xfefe0900 MySQL Maria index file
->3 byte x Version %d
-0 belong&0xffffff00 0xfefe0a00 MySQL Maria compressed data file
->3 byte x Version %d
-0 belong&0xffffff00 0xfefe0c00
->4 string MACF MySQL Maria control file
->>3 byte x Version %d
-0 string \376bin MySQL replication log,
->9 long x server id %d
->8 byte 1
->>13 long 69 \b, MySQL V3.2.3
->>>19 string x \b, server version %s
->>13 long 75 \b, MySQL V4.0.2-V4.1
->>>25 string x \b, server version %s
->8 byte 15 MySQL V5+,
->>25 string x server version %s
->4 string MARIALOG MySQL Maria transaction log file
->>3 byte x Version %d
-
-#------------------------------------------------------------------------------
-# iRiver H Series database file
-# From Ken Guest <ken@linux.ie>
-# As observed from iRivNavi.iDB and unencoded firmware
-#
-0 string iRivDB iRiver Database file
->11 string >\0 Version %s
->39 string iHP-100 [H Series]
-
-#------------------------------------------------------------------------------
-# SQLite database files
-# Ken Guest <ken@linux.ie>, Ty Sarna, Zack Weinberg
-#
-# Version 1 used GDBM internally; its files cannot be distinguished
-# from other GDBM files.
-#
-# Update: Joerg Jenderek
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/sqlite-2x.trid.xml
-# Note: called "SQLite 2.x database" by TrID and "SQLite Database File Format" version 2 by DROID via PUID fmt/1135
-# Version 2 used this format:
-0 string **\ This\ file\ contains\ an\ SQLite SQLite 2.x database
-!:mime application/x-sqlite2
-# FileAttributesStore.db test.sqlite2
-!:ext sqlite/sqlite2/db
-
-# URL: https://en.wikipedia.org/wiki/SQLite
-# Reference: https://www.sqlite.org/fileformat.html
-# Update: Joerg Jenderek
-# Version 3 of SQLite allows applications to embed their own "user version"
-# number in the database at offset 60. Later, SQLite added an "application id"
-# at offset 68 that is preferred over "user version" for indicating the
-# associated application.
-#
-0 string SQLite\ format\ 3
-# skip DROID fmt-729-signature-id-1053.sqlite by checking for valid page size
->16 ubeshort >0 SQLite 3.x
-# deprecated
-#!:mime application/x-sqlite3
-!:mime application/vnd.sqlite3
-# seldom found extension sqlite3 like in SyncData.sqlite3
-# db
-# db3 like: AddrBook.db3 cgipcrvp.db3
-# https://www.maplesoft.com/support/help/Maple/view.aspx?path=worksheet%2freference%2fhelpdatabase
-# help is used for newer Maple help database
-# SQLite database weewx.sdb used by weather software weewx
-# https://www.weewx.com/docs/usersguide.htm
-# Avira Antivir use extension "dbe" like in avevtdb.dbe, avguard_tchk.dbe
-# Unfortunately extension sqlite also used for other databases starting with string
-# "TTCONTAINER" like in tracks.sqlite contentconsumer.sqlite contentproducerrepository.sqlite
-# and with string "ZV-zlib" in like extra.sqlite
->>68 belong !0x5CDE09EF database
-!:ext sqlite/sqlite3/db/db3/dbe/sdb/help
->>68 belong =0x5CDE09EF database
-# maple is used for Maple Workbook
-!:ext maple
->>60 belong =0x5f4d544e (Monotone source repository)
-# if no known user version then check for Application IDs with default clause
->>60 belong !0x5f4d544e
-# The "Application ID" set by PRAGMA application_id
->>>68 belong =0x0f055112 (Fossil checkout)
->>>68 belong =0x0f055113 (Fossil global configuration)
->>>68 belong =0x0f055111 (Fossil repository)
->>>68 belong =0x42654462 (Bentley Systems BeSQLite Database)
->>>68 belong =0x42654c6e (Bentley Systems Localization File)
->>>68 belong =0x47504b47 (OGC GeoPackage file)
-# https://www.sqlite.org/src/artifact?ci=trunk&filename=magic.txt
->>>68 belong =0x47503130 (OGC GeoPackage version 1.0 file)
->>>68 belong =0x45737269 (Esri Spatially-Enabled Database)
->>>68 belong =0x4d504258 (MBTiles tileset)
-# https://www.maplesoft.com/support/help/errors/view.aspx?path=Formats/Maple
->>>68 belong =0x5CDE09EF (Maple Workbook)
-# unknown application ID
->>>68 default x
->>>>68 belong !0 \b, application id %u
-# The "user version" as read and set by the user_version pragma like:
-# 1 2 4 5 7 9 10 25 36 43 53 400 416 131073 131074 131075
->>60 belong !0 \b, user version %d
-# SQLITE_VERSION_NUMBER like: 0 3008011 3016002 3007014 3017000 3022000 3028000 3031001
->>96 belong x \b, last written using SQLite version %d
-# database page size in bytes; a power of two between 512 and 32768, or 1 for 65536
-# like: 512 1024 often 4096 32768
->>16 ubeshort !4096 \b, page size %u
-# File format write version. 1 for legacy; 2 for WAL; 0 for corruptDB.sqlite
->>18 ubyte !1 \b, writer version %u
-# File format read version. 1 for legacy; 2 for WAL; 4 for corruptDB.sqlite
->>19 ubyte !1 \b, read version %u
-# Bytes of unused "reserved" space at the end of each page. Usually 0
->>20 ubyte !0 \b, unused bytes %u
-# maximum embedded payload fraction. Must be 64; 1 for corruptDB.sqlite
->>21 ubyte !64 \b, maximum payload %u
-# Minimum embedded payload fraction. Must be 32; 1 for corruptDB.sqlite
->>22 ubyte !32 \b, minimum payload %u
-# Leaf payload fraction. Must be 32; 0 for corruptDB.sqlite
->>23 ubyte !32 \b, leaf payload %u
-# file change counter
->>24 ubelong x \b, file counter %u
-# Size of the database file in pages
->>28 ubelong x \b, database pages %u
-# page number of the first freelist trunk page like: 0 2 3 4 5 9
-# 10 13 14 15 16 17 18 19 23 36 39 46 50 136 190 217 307 505 516 561 883 1659
->>32 ubelong !0 \b, 1st free page %u
-# total number of freelist pages
->>36 ubelong !0 \b, free pages %u
-# The schema cookie like: 2 3 4 6 7 9 A D E F 13 14 1C 25 2A 2F 33 44 4B 53 5A 5F 62 86 87 8F 91 A8
->>40 ubelong x \b, cookie %#x
-# the schema format number. Supported formats are 1 2 3 and often 4
-# 3328 for corruptDB.sqlite and 0 for 512 byte storage.sqlite (TorBrowser Firefox Thunderbird)
->>44 ubelong x \b, schema %u
-# Suggested cache size like: 0 2000
->>48 ubelong !0 \b, cache page size %u
-# The page number of the largest root b-tree page when in auto-vacuum or incremental-vacuum modes, or zero otherwise.
->>52 ubelong !0 \b, largest root page %u
-# The database text encoding; a value of 1 means UTF-8; 2 means UTF-16le; 3 means UTF-16be
-#>>56 ubelong x \b, encoding %u
->>56 ubelong x
->>>56 ubelong =1 \b, UTF-8
->>>56 ubelong =2 \b, UTF-16 little endian
->>>56 ubelong =3 \b, UTF-16 big endian
-# 0 for corruptDB.sqlite and for storage.sqlite with database pages 1 (TorBrowser Firefox Thunderbird)
-# https://mozilla.github.io/firefox-browser-architecture/text/0010-firefox-data-stores.html
->>>56 default x
->>>>56 ubelong x \b, unknown %#x encoding
-# True (non-zero) for incremental-vacuum mode; false (zero) otherwiseqy
->>64 ubelong !0 \b, vacuum mode %u
-# Reserved for expansion. Must be zero
->>72 uquad !0 \b, reserved %#llx
-# The version-valid-for number like:
-# 1 2 3 4 C F 68h 95h 266h A99h 3DCDh B7CEh
->>92 ubelong x \b, version-valid-for %u
-
-# SQLite Write-Ahead Log from SQLite version >= 3.7.0
-# https://www.sqlite.org/fileformat.html#walformat
-0 belong&0xfffffffe 0x377f0682 SQLite Write-Ahead Log,
-!:ext sqlite-wal/db-wal
->4 belong x version %d
-# Summary: SQLite Write-Ahead-Log index (shared memory)
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/SQLite
-# Reference: http://www.sqlite.org/draft/walformat.html#walidxfmt
-# iVersion; WAL-index format version number; always 3007000=2DE218h
-0 ulelong 0x002DE218
->0 use shm-le
-# big endian variant not tested
-0 ubelong 0x002DE218
->0 use \^shm-le
-# show information about SQLite Write-Ahead-Log shared memory
-0 name shm-le
->0 ulelong x SQLite Write-Ahead Log shared memory
-#!:mime application/octet-stream
-!:mime application/vnd.sqlite3
-# db3-shm Acronis BackupAndRecovery F4CEEE47-042C-4828-95A0-DE44EC267A28.db3-shm
-# dbx-shm probably Dropbox filecache.dbx-shm
-# aup3-shm Audacity project tada.aup3-shm
-# srd-shm Microsoft Windows StateRepository service StateRepository-Deployment.srd-shm StateRepository-Machine.srd-shm:
-!:ext sqlite-shm/db-shm/db3-shm/dbx-shm/aup3-shm/srd-shm
-# unused padding space; must be zero
->4 ulelong !0 \b, unused %x
-# iChange; unsigned integer counter, incremented with each transaction
->8 ulelong x \b, counter %u
-# isInit; the "isInit" flag; 1 when the shm file has been initialized
->12 ubyte !1 \b, not initialized %u
-# bigEndCksum; true if the WAL file uses big-ending checksums; 0 if the WAL uses little-endian checksums
->13 ubyte !0 \b, checksum type %u
-# szPage; database page size in bytes, or 1 if the page size is 65536
->14 uleshort !1 \b, page size %u
->14 uleshort =1 \b, page size 65536
-# mxFrame; number of valid and committed frames in the WAL file
->16 ulelong x \b, %u frames
-# nPage; size of the database file in pages
->20 ulelong x \b, %u pages
-# aFrameCksum; checksum of the last frame in the WAL file
->24 ulelong x \b, frame checksum %#x
-# aSalt; two salt value copied from the WAL file header in the byte-order of the WAL file; might be different from machine byte-order
->32 ulequad x \b, salt %#llx
-# aCksum; checksum over bytes 0 through 39 of this header
->40 ulelong x \b, header checksum %#x
-# a copy of bytes 0 through 47 of header
->48 ulelong !3007000 \b, iversion %u
-# nBackfill; number of WAL frames that have already been backfilled into the database by prior checkpoints
->96 ulelong !0 \b, %u backfilled
-# nBackfillAttempted; number of WAL frames that have attempted to be backfilled
->>128 ulelong x (%u attempts)
-# read-mark[0..4]; five "read marks"; each read mark is a 32-bit unsigned integer
->100 ulelong !0 \b, read-mark[0] %#x
->104 ulelong x \b, read-mark[1] %#x
->108 ulelong !0xffffffff \b, read-mark[2] %#x
->112 ulelong !0xffffffff \b, read-mark[3] %#x
->116 ulelong !0xffffffff \b, read-mark[4] %#x
-# unused space set aside for 8 file locks
->120 ulequad !0 \b, space %#llx
-# unused space reserved for further expansion
->132 ulelong !0 \b, reserved %#x
-
-# SQLite Rollback Journal
-# https://www.sqlite.org/fileformat.html#rollbackjournal
-0 string \xd9\xd5\x05\xf9\x20\xa1\x63\xd7 SQLite Rollback Journal
-
-# Panasonic channel list database svl.bin or svl.db added by Joerg Jenderek
-# https://github.com/PredatH0r/ChanSort
-0 string PSDB\0 Panasonic channel list DataBase
-!:ext db/bin
-#!:mime application/x-db-svl-panasonic
->126 string SQLite\ format\ 3
-#!:mime application/x-panasonic-sqlite3
->>&-15 indirect x \b; contains
-
-# H2 Database from https://www.h2database.com/
-0 string --\ H2\ 0.5/B\ --\ \n H2 Database file
-
-# DuckDB database file from https://duckdb.org
-8 string DUCK DuckDB database file
->12 lequad x \b, version %lld
-#>20 lequad x \b, flags %#llx
-#>28 lequad x \b, flags %#llx
diff --git a/contrib/libs/libmagic/magic/Magdir/ssh b/contrib/libs/libmagic/magic/Magdir/ssh
deleted file mode 100644
index 56b28a8488..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ssh
+++ /dev/null
@@ -1,42 +0,0 @@
-# Type: OpenSSH key files
-# From: Nicolas Collignon <tsointsoin@gmail.com>
-
-0 string SSH\040PRIVATE\040KEY OpenSSH RSA1 private key,
->28 string >\0 version %s
-0 string -----BEGIN\040OPENSSH\040PRIVATE\040KEY----- OpenSSH private key
-# https://www.rfc-editor.org/rfc/rfc5958
-0 string -----BEGIN\040PRIVATE\040KEY----- OpenSSH private key (no password)
-0 string -----BEGIN\040ENCRYPTED\040PRIVATE\040KEY----- OpenSSH private key (with password)
-
-0 string ssh-dss\040 OpenSSH DSA public key
-0 string ssh-rsa\040 OpenSSH RSA public key
-0 string ecdsa-sha2-nistp256 OpenSSH ECDSA public key
-0 string ecdsa-sha2-nistp384 OpenSSH ECDSA public key
-0 string ecdsa-sha2-nistp521 OpenSSH ECDSA public key
-0 string ssh-ed25519 OpenSSH ED25519 public key
-
-0 string SSHKRL\n\0
->8 ubelong 1 OpenSSH key/certificate revocation list, format %u
->>12 ubequad x \b, version %llx
->>>20 beqdate x \b, generated %s
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/PuTTY
-# Reference: https://the.earth.li/~sgtatham/putty/latest/putty-0.73.tar.gz
-# /sshpubk.c
-0 string PuTTY-User-Key-File- PuTTY Private Key File
-#!:mime text/plain
-# https://github.com/github/putty/blob/master/windows/installer.wxs
-!:mime application/x-putty-private-key
-!:ext ppk
-# version 1 or 2
->20 string x \b, version %.1s
-# name of the algorithm like: ssh-dss ssh-rsa ecdsa-sha2-nistp256 ssh-ed25519
->23 string x \b, algorithm %s
-# next line says "Encryption: " plus an encryption type like aes256-cbc or none
->32 search/13 Encryption:\040 \b, Encryption
->>&0 string x %s
-# next line says "Comment: " plus the comment string
->>>&0 search/3 Comment:\040
->>>>&0 string x "%s"
-
diff --git a/contrib/libs/libmagic/magic/Magdir/ssl b/contrib/libs/libmagic/magic/Magdir/ssl
deleted file mode 100644
index 2309392393..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ssl
+++ /dev/null
@@ -1,20 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ssl,v 1.5 2017/12/29 04:00:07 christos Exp $
-# ssl: file(1) magic for SSL file formats
-
-# Type: OpenSSL certificates/key files
-# From: Nicolas Collignon <tsointsoin@gmail.com>
-
-0 string -----BEGIN\040CERTIFICATE----- PEM certificate
-0 string -----BEGIN\040CERTIFICATE\040REQ PEM certificate request
-0 string -----BEGIN\040RSA\040PRIVATE PEM RSA private key
-0 string -----BEGIN\040DSA\040PRIVATE PEM DSA private key
-0 string -----BEGIN\040EC\040PRIVATE PEM EC private key
-0 string -----BEGIN\040ECDSA\040PRIVATE PEM ECDSA private key
-
-# From Luc Gommans
-# OpenSSL enc file (recognized by a magic string preceding the password's salt)
-0 string Salted__ openssl enc'd data with salted password
-# Using the -a or -base64 option, OpenSSL will base64-encode the data.
-0 string U2FsdGVkX1 openssl enc'd data with salted password, base64 encoded
diff --git a/contrib/libs/libmagic/magic/Magdir/statistics b/contrib/libs/libmagic/magic/Magdir/statistics
deleted file mode 100644
index ca9f8591b6..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/statistics
+++ /dev/null
@@ -1,45 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: statistics,v 1.3 2022/03/24 15:48:58 christos Exp $
-# statistics: file(1) magic for statistics related software
-#
-
-# From Remy Rampin
-
-# Stata is a statistical software tool that was created in 1985. While I
-# don't personally use it, data files in its native (proprietary) format
-# are common (.dta files).
-#
-# Because they are so common, especially in statistical and social
-# sciences, Stata files and SPSS files can be opened by a lot of modern
-# software, for example Python's pandas package provides built-in
-# support for them (read_stata() and read_spss()).
-#
-# I noticed that the magic database includes an entry for SPSS files but
-# not Stata files. Stata files for Stata 13 and newer (formats 117, 118,
-# and 119) always begin with the string "<stata_dta><header>" as per
-# https://www.stata.com/help.cgi?dta#definition
-#
-# The format version number always follows, for example:
-# <stata_dta><header><release>117</release>
-# <stata_dta><header><release>118</release>
-#
-# Therefore the following line would do the trick:
-# 0 string <stata_dta><header> Stata Data File
-#
-# (I'm sure the version number could be captured as well but I did not
-# manage this without a regex)
-#
-# Unfortunately the previous formats (created by Stata before 13, which
-# was released 2013) are harder to recognize. Format 115 starts with the
-# four bytes 0x73010100 or 0x73020100, format 114 with 0x72010100 or
-# 0x72020100, format 113 with 0x71010101 or 0x71020101.
-#
-# For additional reference, the Library of Congress website has an entry
-# for the Stata Data File Format 118:
-# https://www.loc.gov/preservation/digital/formats/fdd/fdd000471.shtml
-#
-# Example of those files can be found on Zenodo:
-# https://zenodo.org/search?page=1&size=20&q=&file_type=dta
-0 string \<stata_dta\>\<header\>\<release\> Stata Data File
->&0 regex [0-9]+ (Release %s)
diff --git a/contrib/libs/libmagic/magic/Magdir/subtitle b/contrib/libs/libmagic/magic/Magdir/subtitle
deleted file mode 100644
index cfbe293d59..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/subtitle
+++ /dev/null
@@ -1,38 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: subtitle,v 1.2 2022/09/07 11:29:09 christos Exp $
-# subtitle: file(1) magic for subtitles files
-
-# EBU-STL
-# https://tech.ebu.ch/docs/tech/tech3264.pdf
-3 string STL EBU-STL subtitles
->6 regex =^[0-9][0-9] \b, rate %s
->>8 string .01 \b, v1
-!:mime application/x-ebu-stl
->>>16 regex =^[^\ ]{0,32} \b, title "%s"
->>>>224 regex =^[0-9]{2} \b, created %-.2s
->>>>>&0 regex =^[0-9]{2} \b-%-.2s
->>>>>>&0 regex =^[0-9]{2} \b-%-.2s
-!:ext stl
-
-# SubRip (srt) subtitles
-0 regex/20 =^1[\r\n]+0[01]:[0-9]{2}:[0-9]{2},[0-9]{3}\040--> SubRip
-!:mime application/x-subrip
-!:ext srt
-
-# WebVTT subtitles
-# https://www.w3.org/TR/webvtt1/
-0 string/t WEBVTT
->&0 regex/255 =[0-9]{2}:[0-9]{2}\\.[0-9]{3}\040--> WebVTT subtitles
-!:mime text/vtt
-!:ext vtt
-
-# XML TTML subtitles
-# https://www.w3.org/TR/ttml2/
-0 string/t \<?xml
->20 search/400 \020xmlns=
->>&0 regex ['"]http://www.w3.org/ns/ttml TTML subtitles
-!:mime application/ttml+xml
-# Augment strength to beat plain XML
-!:strength * 3
-!:ext ttml
diff --git a/contrib/libs/libmagic/magic/Magdir/sun b/contrib/libs/libmagic/magic/Magdir/sun
deleted file mode 100644
index df83834d2d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sun
+++ /dev/null
@@ -1,141 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sun,v 1.28 2019/04/19 00:42:27 christos Exp $
-# sun: file(1) magic for Sun machines
-#
-# Values for big-endian Sun (MC680x0, SPARC) binaries on pre-5.x
-# releases. (5.x uses ELF.) Entries for executables without an
-# architecture type, used before the 68020-based Sun-3's came out,
-# are in aout, as they're indistinguishable from other big-endian
-# 32-bit a.out files.
-#
-0 belong&077777777 0600413 a.out SunOS SPARC demand paged
->0 byte &0x80
->>20 belong <4096 shared library
->>20 belong =4096 dynamically linked executable
->>20 belong >4096 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0600410 a.out SunOS SPARC pure
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0600407 a.out SunOS SPARC
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0400413 a.out SunOS mc68020 demand paged
->0 byte &0x80
->>20 belong <4096 shared library
->>20 belong =4096 dynamically linked executable
->>20 belong >4096 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0400410 a.out SunOS mc68020 pure
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0400407 a.out SunOS mc68020
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0200413 a.out SunOS mc68010 demand paged
->0 byte &0x80
->>20 belong <4096 shared library
->>20 belong =4096 dynamically linked executable
->>20 belong >4096 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0200410 a.out SunOS mc68010 pure
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-0 belong&077777777 0200407 a.out SunOS mc68010
->0 byte &0x80 dynamically linked executable
->0 byte ^0x80 executable
->16 belong >0 not stripped
-
-#
-# Core files. "SPARC 4.x BCP" means "core file from a SunOS 4.x SPARC
-# binary executed in compatibility mode under SunOS 5.x".
-#
-0 belong 0x080456 SunOS core file
->4 belong 432 (SPARC)
->>132 string >\0 from '%s'
->>116 belong =3 (quit)
->>116 belong =4 (illegal instruction)
->>116 belong =5 (trace trap)
->>116 belong =6 (abort)
->>116 belong =7 (emulator trap)
->>116 belong =8 (arithmetic exception)
->>116 belong =9 (kill)
->>116 belong =10 (bus error)
->>116 belong =11 (segmentation violation)
->>116 belong =12 (bad argument to system call)
->>116 belong =29 (resource lost)
->>120 belong x (T=%dK,
->>124 belong x D=%dK,
->>128 belong x S=%dK)
->4 belong 826 (68K)
->>128 string >\0 from '%s'
->4 belong 456 (SPARC 4.x BCP)
->>152 string >\0 from '%s'
-# Sun SunPC
-0 long 0xfa33c08e SunPC 4.0 Hard Disk
-0 string #SUNPC_CONFIG SunPC 4.0 Properties Values
-# Sun snoop (see RFC 1761, which describes the capture file format,
-# RFC 3827, which describes some additional datalink types, and
-# https://www.iana.org/assignments/snoop-datalink-types/snoop-datalink-types.xml,
-# which is the IANA registry of Snoop datalink types)
-#
-0 string snoop Snoop capture file
->8 belong >0 - version %d
->12 belong 0 (IEEE 802.3)
->12 belong 1 (IEEE 802.4)
->12 belong 2 (IEEE 802.5)
->12 belong 3 (IEEE 802.6)
->12 belong 4 (Ethernet)
->12 belong 5 (HDLC)
->12 belong 6 (Character synchronous)
->12 belong 7 (IBM channel-to-channel adapter)
->12 belong 8 (FDDI)
->12 belong 9 (Other)
->12 belong 10 (type %d)
->12 belong 11 (type %d)
->12 belong 12 (type %d)
->12 belong 13 (type %d)
->12 belong 14 (type %d)
->12 belong 15 (type %d)
->12 belong 16 (Fibre Channel)
->12 belong 17 (ATM)
->12 belong 18 (ATM Classical IP)
->12 belong 19 (type %d)
->12 belong 20 (type %d)
->12 belong 21 (type %d)
->12 belong 22 (type %d)
->12 belong 23 (type %d)
->12 belong 24 (type %d)
->12 belong 25 (type %d)
->12 belong 26 (IP over Infiniband)
->12 belong >26 (type %d)
-
-#---------------------------------------------------------------------------
-# The following entries have been tested by Duncan Laurie <duncan@sun.com> (a
-# lead Sun/Cobalt developer) who agrees that they are good and worthy of
-# inclusion.
-
-# Boot ROM images for Sun/Cobalt Linux server appliances
-0 string Cobalt\ Networks\ Inc.\nFirmware\ v Paged COBALT boot rom
->38 string x V%.4s
-
-# New format for Sun/Cobalt boot ROMs is annoying, it stores the version code
-# at the very end where file(1) can't get it.
-0 string CRfs COBALT boot rom data (Flat boot rom or file system)
diff --git a/contrib/libs/libmagic/magic/Magdir/svf b/contrib/libs/libmagic/magic/Magdir/svf
deleted file mode 100644
index b0d5c980f9..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/svf
+++ /dev/null
@@ -1,5 +0,0 @@
-# $File: svf,v 1.2 2023/05/23 13:37:32 christos Exp $
-#
-# file(1) magic(5) data for SmartVersion files with the .svf extension.
-
-0 string DFS\ File\x0D\x0Ahttp://www.difstream.com\x0D\x0A SmartVersion binary patch file
diff --git a/contrib/libs/libmagic/magic/Magdir/sylk b/contrib/libs/libmagic/magic/Magdir/sylk
deleted file mode 100644
index f497c05bb2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sylk
+++ /dev/null
@@ -1,36 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: sylk,v 1.1 2020/04/05 22:18:34 christos Exp $
-# sylk: file(1) magic for SYLK text files
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/SYmbolic_LinK_%28SYLK%29
-# http://fileformats.archiveteam.org/wiki/SYLK
-# Note: called by TrID "SYLK - SYmbolic LinK data",
-# by DROID "Microsoft Symbolic Link (SYLK) File"
-# by FreeDesktop.org "spreadsheet interchange document"
-0 string ID;P
-# skip short DROID x-fmt-106-signature-id-603.slk
->7 ubyte >0 spreadsheet interchange document
-# https://reposcope.com/mimetype/text/spreadsheet
-#!:mime text/spreadsheet
-# https://reposcope.com/mimetype/application/x-sylk by Gnumeric
-!:mime application/x-sylk
-!:ext slk/sylk
->>4 ubyte >037 \b, created by
-# Gnumeric, pmw~PlanMaker, CALCOOO32~LibreOffice OpenOffice, SCALC3~StarOffice
-# MP~Multiplan, XL~Excel WXL~Excel Windows
->>>4 string Gnumeric Gnumeric
->>>4 string pmw PlanMaker
->>>4 string CALCOOO32 Libre/OpenOffice Calc
->>>4 string SCALC3 StarOffice Calc
->>>4 string XL Excel
-# Excel, version probably running on Windows
->>>4 string WXL Excel
-# not tested
->>>4 string MP Multiplan
-# unknown spreadsheet software
->>>4 default x
->>>>4 string x %s
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/symbos b/contrib/libs/libmagic/magic/Magdir/symbos
deleted file mode 100644
index c97a42e0c7..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/symbos
+++ /dev/null
@@ -1,42 +0,0 @@
-
-#------------------------------------------------------------------------------
-# msx: file(1) magic for the SymbOS operating system
-# http://www.symbos.de
-# Fabio R. Schmidlin <frs@pop.com.br>
-
-# SymbOS EXE file
-0x30 string SymExe SymbOS executable
->0x36 ubyte x v%c
->0x37 ubyte x \b.%c
->0xF string x \b, name: %s
-
-# SymbOS DOX document
-0 string INFOq\0 SymbOS DOX document
-
-# Symbos driver
-0 string SMD1 SymbOS driver
->19 byte x \b, name: %c
->20 byte x \b%c
->21 byte x \b%c
->22 byte x \b%c
->23 byte x \b%c
->24 byte x \b%c
->25 byte x \b%c
->26 byte x \b%c
->27 byte x \b%c
->28 byte x \b%c
->29 byte x \b%c
->30 byte x \b%c
->31 byte x \b%c
-
-# Symbos video
-0 string SymVid SymbOS video
->6 ubyte x v%c
->7 ubyte x \b.%c
-
-# Soundtrakker 128 ST2 music
-0 byte 0
->0xC string \x00\x00\x00\x00\x00\x00\x02\x00\x00\x00\x40\x00 Soundtrakker 128 ST2 music,
->>1 string x name: %s
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/sysex b/contrib/libs/libmagic/magic/Magdir/sysex
deleted file mode 100644
index d02389d9a4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/sysex
+++ /dev/null
@@ -1,429 +0,0 @@
-
-#------------------------------------------------------------------------
-# $File: sysex,v 1.12 2022/10/31 13:22:26 christos Exp $
-# sysex: file(1) magic for MIDI sysex files
-#
-# GRR: original 1 byte test at offset was too general as it catches also many FATs of DOS filesystems
-# where real SYStem EXclusive messages at offset 1 are limited to seven bits
-# https://en.wikipedia.org/wiki/MIDI
-# test for StartSysEx byte and upper unsed bit of vendor ID
-0 ubeshort&0xFF80 0xF000
-# MIDI System Exclusive (SysEx) messages (strength=50) after Microsoft Visual C library (strength=70)
-#!:strength +0
-# skip Microsoft Visual C library with page size 16 misidentified as ADA and
-# page size 32 misidentified as Inventronics by looking for terminating End Of eXclusive byte (EOX)
->2 search/12 \xF7
->>0 use midi-sysex
-# display information about MIDI System Exclusive (SysEx) messages
-0 name midi-sysex
-# https://fileinfo.com/extension/syx
->1 ubyte x MIDI audio System Exclusive (SysEx) message -
-# Note: file (version 5.41) labeled the above entry as "SysEx File"
-#!:mime application/octet-stream
-!:mime audio/x-syx
-# https://onsongapp.com/docs/features/formats/sysex
-!:ext syx/sysex
-# https://www.midi.org/specifications-old/item/manufacturer-id-numbers
-# https://raw.githubusercontent.com/insolace/MIDI-Sysex-MFG-IDs/master/Sysex%20ID%20Tables/MIDI%20Sysex%20MFG%20IDs.csv
-# SysEx manufacturer ID; originally one byte, but now 0 is used as an escapement to reach the next two
-# North American Group
-#>1 byte 0x01 Sequential
->1 byte 0x01 Sequential Circuits
->1 byte 0x02 IDP
-#>1 byte 0x03 OctavePlateau
->1 byte 0x03 Voyetra Turtle Beach
->1 byte 0x04 Moog
-#>1 byte 0x05 Passport
->1 byte 0x05 Passport Designs
-#>1 byte 0x06 Lexicon
->1 byte 0x06 Lexicon Inc.
->1 byte 0x07 Kurzweil/Future Retro
->>3 byte 0x77 777
->>4 byte 0x00 Bank
->>4 byte 0x01 Song
->>5 byte 0x0f 16
->>5 byte 0x0e 15
->>5 byte 0x0d 14
->>5 byte 0x0c 13
->>5 byte 0x0b 12
->>5 byte 0x0a 11
->>5 byte 0x09 10
->>5 byte 0x08 9
->>5 byte 0x07 8
->>5 byte 0x06 7
->>5 byte 0x05 6
->>5 byte 0x04 5
->>5 byte 0x03 4
->>5 byte 0x02 3
->>5 byte 0x01 2
->>5 byte 0x00 1
->>5 byte 0x10 (ALL)
->>2 byte x \b, Channel %d
->1 byte 0x08 Fender
-#>1 byte 0x09 Gulbransen
->1 byte 0x09 MIDI9
-#>1 byte 0x0a AKG
->1 byte 0x0a AKG Acoustics
->1 byte 0x0b Voyce
->1 byte 0x0c Waveframe
-# not ADA programming language
-#>1 byte 0x0d ADA
->1 byte 0x0d ADA Signal Processors Inc.
-#>1 byte 0x0e Garfield
->1 byte 0x0e Garfield Electronics
->1 byte 0x0f Ensoniq
->1 byte 0x10 Oberheim
->>2 byte 0x06 Matrix 6 series
->>3 byte 0x0A Dump (All)
->>3 byte 0x01 Dump (Bank)
->>4 belong 0x0002040E Matrix 1000
->>>11 byte <2 User bank %d
->>>11 byte >1 Preset bank %d
->1 byte 0x11 Apple
->1 byte 0x12 GreyMatter
->1 byte 0x14 PalmTree
->1 byte 0x15 JLCooper
->1 byte 0x16 Lowrey
->1 byte 0x17 AdamsSmith
->1 byte 0x18 E-mu
-#>1 byte 0x19 Harmony
->1 byte 0x19 Harmony Systems
->1 byte 0x1a ART
->1 byte 0x1b Baldwin
->1 byte 0x1c Eventide
->1 byte 0x1d Inventronics
->1 byte 0x1f Clarity
-
-# European Group
-#>1 byte 0x21 SIEL
->1 byte 0x21 Proel Labs (SIEL)
->1 byte 0x22 Synthaxe
->1 byte 0x24 Hohner
->1 byte 0x25 Twister
-#>1 byte 0x26 Solton
->1 byte 0x26 Ketron s.r.l.
->1 byte 0x27 Jellinghaus
->1 byte 0x28 Southworth
->1 byte 0x29 PPG
->1 byte 0x2a JEN
-#>1 byte 0x2b SSL
->1 byte 0x2b Solid State Logic Organ Systems
-#>1 byte 0x2c AudioVertrieb
->1 byte 0x2c Audio Veritrieb-P. Struven
-
->1 byte 0x2f ELKA
->>3 byte 0x09 EK-44
-
->1 byte 0x30 Dynacord
-#>1 byte 0x31 Jomox
->1 byte 0x31 Viscount International Spa
->1 byte 0x33 Clavia
->1 byte 0x39 Soundcraft
-# Some Waldorf info from http://Stromeko.Synth.net/Downloads#WaldorfDocs
->1 byte 0x3e Waldorf
->>2 byte 0x00 microWave
->>2 byte 0x0E microwave2 / XT
->>2 byte 0x0F Q / Q+
->>3 byte =0 (default id)
->>3 byte >0 (
->>>3 byte <0x7F \bdevice %d)
->>>3 byte =0x7F \bbroadcast id)
->>3 byte 0x7f Microwave I
->>>4 byte 0x00 SNDR (Sound Request)
->>>4 byte 0x10 SNDD (Sound Dump)
->>>4 byte 0x20 SNDP (Sound Parameter Change)
->>>4 byte 0x30 SNDQ (Sound Parameter Inquiry)
->>>4 byte 0x70 BOOT (Sound Reserved)
->>>4 byte 0x01 MULR (Multi Request)
->>>4 byte 0x11 MULD (Multi Dump)
->>>4 byte 0x21 MULP (Multi Parameter Change)
->>>4 byte 0x31 MULQ (Multi Parameter Inquiry)
->>>4 byte 0x71 OS (Multi Reserved)
->>>4 byte 0x02 DRMR (Drum Map Request)
->>>4 byte 0x12 DRMD (Drum Map Dump)
->>>4 byte 0x22 DRMP (Drum Map Parameter Change)
->>>4 byte 0x32 DRMQ (Drum Map Parameter Inquiry)
->>>4 byte 0x72 BIN (Drum Map Reserved)
->>>4 byte 0x03 PATR (Sequencer Pattern Request)
->>>4 byte 0x13 PATD (Sequencer Pattern Dump)
->>>4 byte 0x23 PATP (Sequencer Pattern Parameter Change)
->>>4 byte 0x33 PATQ (Sequencer Pattern Parameter Inquiry)
->>>4 byte 0x73 AFM (Sequencer Pattern Reserved)
->>>4 byte 0x04 GLBR (Global Parameter Request)
->>>4 byte 0x14 GLBD (Global Parameter Dump)
->>>4 byte 0x24 GLBP (Global Parameter Parameter Change)
->>>4 byte 0x34 GLBQ (Global Parameter Parameter Inquiry)
->>>4 byte 0x07 MODR (Mode Parameter Request)
->>>4 byte 0x17 MODD (Mode Parameter Dump)
->>>4 byte 0x27 MODP (Mode Parameter Parameter Change)
->>>4 byte 0x37 MODQ (Mode Parameter Parameter Inquiry)
->>2 byte 0x10 microQ
->>>4 byte 0x00 SNDR (Sound Request)
->>>4 byte 0x10 SNDD (Sound Dump)
->>>4 byte 0x20 SNDP (Sound Parameter Change)
->>>4 byte 0x30 SNDQ (Sound Parameter Inquiry)
->>>4 byte 0x70 (Sound Reserved)
->>>4 byte 0x01 MULR (Multi Request)
->>>4 byte 0x11 MULD (Multi Dump)
->>>4 byte 0x21 MULP (Multi Parameter Change)
->>>4 byte 0x31 MULQ (Multi Parameter Inquiry)
->>>4 byte 0x71 OS (Multi Reserved)
->>>4 byte 0x02 DRMR (Drum Map Request)
->>>4 byte 0x12 DRMD (Drum Map Dump)
->>>4 byte 0x22 DRMP (Drum Map Parameter Change)
->>>4 byte 0x32 DRMQ (Drum Map Parameter Inquiry)
->>>4 byte 0x72 BIN (Drum Map Reserved)
->>>4 byte 0x04 GLBR (Global Parameter Request)
->>>4 byte 0x14 GLBD (Global Parameter Dump)
->>>4 byte 0x24 GLBP (Global Parameter Parameter Change)
->>>4 byte 0x34 GLBQ (Global Parameter Parameter Inquiry)
->>2 byte 0x11 rackAttack
->>>4 byte 0x00 SNDR (Sound Parameter Request)
->>>4 byte 0x10 SNDD (Sound Parameter Dump)
->>>4 byte 0x20 SNDP (Sound Parameter Parameter Change)
->>>4 byte 0x30 SNDQ (Sound Parameter Parameter Inquiry)
->>>4 byte 0x01 PRGR (Program Parameter Request)
->>>4 byte 0x11 PRGD (Program Parameter Dump)
->>>4 byte 0x21 PRGP (Program Parameter Parameter Change)
->>>4 byte 0x31 PRGQ (Program Parameter Parameter Inquiry)
->>>4 byte 0x71 OS (Program Parameter Reserved)
->>>4 byte 0x03 PATR (Pattern Parameter Request)
->>>4 byte 0x13 PATD (Pattern Parameter Dump)
->>>4 byte 0x23 PATP (Pattern Parameter Parameter Change)
->>>4 byte 0x33 PATQ (Pattern Parameter Parameter Inquiry)
->>>4 byte 0x04 GLBR (Global Parameter Request)
->>>4 byte 0x14 GLBD (Global Parameter Dump)
->>>4 byte 0x24 GLBP (Global Parameter Parameter Change)
->>>4 byte 0x34 GLBQ (Global Parameter Parameter Inquiry)
->>>4 byte 0x05 EFXR (FX Parameter Request)
->>>4 byte 0x15 EFXD (FX Parameter Dump)
->>>4 byte 0x25 EFXP (FX Parameter Parameter Change)
->>>4 byte 0x35 EFXQ (FX Parameter Parameter Inquiry)
->>>4 byte 0x07 MODR (Mode Command Request)
->>>4 byte 0x17 MODD (Mode Command Dump)
->>>4 byte 0x27 MODP (Mode Command Parameter Change)
->>>4 byte 0x37 MODQ (Mode Command Parameter Inquiry)
->>2 byte 0x03 Wave
->>>4 byte 0x00 SBPR (Soundprogram)
->>>4 byte 0x01 SAPR (Performance)
->>>4 byte 0x02 SWAVE (Wave)
->>>4 byte 0x03 SWTBL (Wave control table)
->>>4 byte 0x04 SVT (Velocity Curve)
->>>4 byte 0x05 STT (Tuning Table)
->>>4 byte 0x06 SGLB (Global Parameters)
->>>4 byte 0x07 SARRMAP (Performance Program Change Map)
->>>4 byte 0x08 SBPRMAP (Sound Program Change Map)
->>>4 byte 0x09 SBPRPAR (Sound Parameter)
->>>4 byte 0x0A SARRPAR (Performance Parameter)
->>>4 byte 0x0B SINSPAR (Instrument/External Parameter)
->>>4 byte 0x0F SBULK (Bulk Switch on/off)
-
-# Japanese Group
->1 byte 0x40 Kawai
->>3 byte 0x20 K1
->>3 byte 0x22 K4
-
->1 byte 0x41 Roland
->>3 byte 0x14 D-50
->>3 byte 0x2b U-220
->>3 byte 0x02 TR-707
-
->1 byte 0x42 Korg
->>3 byte 0x19 M1
-
->1 byte 0x43 Yamaha
->1 byte 0x44 Casio
->1 byte 0x46 Kamiya
->1 byte 0x47 Akai
-#>1 byte 0x48 Victor
->1 byte 0x48 Victor Company of Japan. Ltd.
->1 byte 0x49 Mesosha
->1 byte 0x4b Fujitsu
->1 byte 0x4c Sony
->1 byte 0x4e Teac
->1 byte 0x50 Matsushita
->1 byte 0x51 Fostex
-#>1 byte 0x52 Zoom
->1 byte 0x52 Zoom Corporation
->1 byte 0x54 Matsushita
->1 byte 0x57 Acoustic tech. lab.
-# https://www.midi.org/techspecs/manid.php
->1 belong&0xffffff00 0x00007400 Ta Horng
->1 belong&0xffffff00 0x00007500 e-Tek
->1 belong&0xffffff00 0x00007600 E-Voice
->1 belong&0xffffff00 0x00007700 Midisoft
->1 belong&0xffffff00 0x00007800 Q-Sound
->1 belong&0xffffff00 0x00007900 Westrex
->1 belong&0xffffff00 0x00007a00 Nvidia*
->1 belong&0xffffff00 0x00007b00 ESS
->1 belong&0xffffff00 0x00007c00 Mediatrix
->1 belong&0xffffff00 0x00007d00 Brooktree
->1 belong&0xffffff00 0x00007e00 Otari
->1 belong&0xffffff00 0x00007f00 Key Electronics
->1 belong&0xffffff00 0x00010000 Shure
->1 belong&0xffffff00 0x00010100 AuraSound
->1 belong&0xffffff00 0x00010200 Crystal
->1 belong&0xffffff00 0x00010300 Rockwell
->1 belong&0xffffff00 0x00010400 Silicon Graphics
->1 belong&0xffffff00 0x00010500 Midiman
->1 belong&0xffffff00 0x00010600 PreSonus
->1 belong&0xffffff00 0x00010800 Topaz
->1 belong&0xffffff00 0x00010900 Cast Lightning
->1 belong&0xffffff00 0x00010a00 Microsoft
->1 belong&0xffffff00 0x00010b00 Sonic Foundry
->1 belong&0xffffff00 0x00010c00 Line 6
->1 belong&0xffffff00 0x00010d00 Beatnik Inc.
->1 belong&0xffffff00 0x00010e00 Van Koerving
->1 belong&0xffffff00 0x00010f00 Altech Systems
->1 belong&0xffffff00 0x00011000 S & S Research
->1 belong&0xffffff00 0x00011100 VLSI Technology
->1 belong&0xffffff00 0x00011200 Chromatic
->1 belong&0xffffff00 0x00011300 Sapphire
->1 belong&0xffffff00 0x00011400 IDRC
->1 belong&0xffffff00 0x00011500 Justonic Tuning
->1 belong&0xffffff00 0x00011600 TorComp
->1 belong&0xffffff00 0x00011700 Newtek Inc.
->1 belong&0xffffff00 0x00011800 Sound Sculpture
->1 belong&0xffffff00 0x00011900 Walker Technical
->1 belong&0xffffff00 0x00011a00 Digital Harmony
->1 belong&0xffffff00 0x00011b00 InVision
->1 belong&0xffffff00 0x00011c00 T-Square
->1 belong&0xffffff00 0x00011d00 Nemesys
->1 belong&0xffffff00 0x00011e00 DBX
->1 belong&0xffffff00 0x00011f00 Syndyne
->1 belong&0xffffff00 0x00012000 Bitheadz
->1 belong&0xffffff00 0x00012100 Cakewalk
->1 belong&0xffffff00 0x00012200 Staccato
->1 belong&0xffffff00 0x00012300 National Semicon.
->1 belong&0xffffff00 0x00012400 Boom Theory
->1 belong&0xffffff00 0x00012500 Virtual DSP Corp
->1 belong&0xffffff00 0x00012600 Antares
->1 belong&0xffffff00 0x00012700 Angel Software
->1 belong&0xffffff00 0x00012800 St Louis Music
->1 belong&0xffffff00 0x00012900 Lyrrus dba G-VOX
->1 belong&0xffffff00 0x00012a00 Ashley Audio
->1 belong&0xffffff00 0x00012b00 Vari-Lite
->1 belong&0xffffff00 0x00012c00 Summit Audio
->1 belong&0xffffff00 0x00012d00 Aureal Semicon.
->1 belong&0xffffff00 0x00012e00 SeaSound
->1 belong&0xffffff00 0x00012f00 U.S. Robotics
->1 belong&0xffffff00 0x00013000 Aurisis
->1 belong&0xffffff00 0x00013100 Nearfield Multimedia
->1 belong&0xffffff00 0x00013200 FM7 Inc.
->1 belong&0xffffff00 0x00013300 Swivel Systems
->1 belong&0xffffff00 0x00013400 Hyperactive
->1 belong&0xffffff00 0x00013500 MidiLite
->1 belong&0xffffff00 0x00013600 Radical
->1 belong&0xffffff00 0x00013700 Roger Linn
->1 belong&0xffffff00 0x00013800 Helicon
->1 belong&0xffffff00 0x00013900 Event
->1 belong&0xffffff00 0x00013a00 Sonic Network
->1 belong&0xffffff00 0x00013b00 Realtime Music
->1 belong&0xffffff00 0x00013c00 Apogee Digital
-
->1 belong&0xffffff00 0x00202b00 Medeli Electronics
->1 belong&0xffffff00 0x00202c00 Charlie Lab
->1 belong&0xffffff00 0x00202d00 Blue Chip Music
->1 belong&0xffffff00 0x00202e00 BEE OH Corp
->1 belong&0xffffff00 0x00202f00 LG Semicon America
->1 belong&0xffffff00 0x00203000 TESI
->1 belong&0xffffff00 0x00203100 EMAGIC
->1 belong&0xffffff00 0x00203200 Behringer
->1 belong&0xffffff00 0x00203300 Access Music
->1 belong&0xffffff00 0x00203400 Synoptic
->1 belong&0xffffff00 0x00203500 Hanmesoft Corp
->1 belong&0xffffff00 0x00203600 Terratec
->1 belong&0xffffff00 0x00203700 Proel SpA
->1 belong&0xffffff00 0x00203800 IBK MIDI
->1 belong&0xffffff00 0x00203900 IRCAM
->1 belong&0xffffff00 0x00203a00 Propellerhead Software
->1 belong&0xffffff00 0x00203b00 Red Sound Systems
->1 belong&0xffffff00 0x00203c00 Electron ESI AB
->1 belong&0xffffff00 0x00203d00 Sintefex Audio
->1 belong&0xffffff00 0x00203e00 Music and More
->1 belong&0xffffff00 0x00203f00 Amsaro
->1 belong&0xffffff00 0x00204000 CDS Advanced Technology
->1 belong&0xffffff00 0x00204100 Touched by Sound
->1 belong&0xffffff00 0x00204200 DSP Arts
->1 belong&0xffffff00 0x00204300 Phil Rees Music
->1 belong&0xffffff00 0x00204400 Stamer Musikanlagen GmbH
->1 belong&0xffffff00 0x00204500 Soundart
->1 belong&0xffffff00 0x00204600 C-Mexx Software
->1 belong&0xffffff00 0x00204700 Klavis Tech.
->1 belong&0xffffff00 0x00204800 Noteheads AB
-
-# Update: Joerg Jenderek; January 2022
->1 byte 0x00 ID EXTENSIONS
->1 byte 0x13 Digidesign Inc.
->1 byte 0x1e Key Concepts
->1 byte 0x20 Passac
->1 byte 0x23 Stepp
->1 byte 0x2d Neve
->1 byte 0x2e Soundtracs Ltd.
->1 byte 0x32 Drawmer
->1 byte 0x34 Audio Architecture
->1 byte 0x35 Generalmusic Corp SpA
->1 byte 0x36 Cheetah Marketing
->1 byte 0x37 C.T.M.
->1 byte 0x38 Simmons UK
->1 byte 0x3a Steinberg
->1 byte 0x3b Wersi GmbH
->1 byte 0x3c AVAB Niethammer AB
->1 byte 0x3d Digigram
->1 byte 0x3f Quasimidi
-#
->1 byte 0x40 Kawai Musical Instruments MFG. CO. Ltd
-#>1 byte 0x45 foo
-#>1 byte 0x4a foo
-#>1 byte 0x4d foo
-#>1 byte 0x4f foo
-#>1 byte 0x53 foo
->1 byte 0x55 Suzuki Musical Instruments MFG. Co. Ltd.
->1 byte 0x56 Fuji Sound Corporation Ltd.
-#>1 byte 0x58 foo
->1 byte 0x59 Faith. Inc.
->1 byte 0x5a Internet Corporation
-#>1 byte 0x5b foo
->1 byte 0x5c Seekers Co. Ltd.
-#>1 byte 0x5d foo
-#>1 byte 0x5e foo
->1 byte 0x5f SD Card Association
-# Reserved for other uses for 60H to 7FH
-# URL: https://www.philscomputerlab.com/roland-midi-emulator-project-20.html
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/syx--midiemu.trid.xml
-# Note: called by TrID "MIDI Emulator Project SysEx preset command"
->1 byte 0x66 MIDI Emulator
-# https://electronicmusic.fandom.com/wiki/List_of_MIDI_Manufacturer_IDs
-# Educational, prototyping, test, private use and experimentation
->1 byte 0x7D PROTOTYPING
-# universal non-real-time (sample dump, tuning table, etc.)
->1 byte 0x7E UNIVERSAL
-# universal real time (MIDI time code, MIDI Machine control, etc.)
->1 byte 0x7F universal real time
-# display information about End Of eXclusive byte (EOX=F7)
-#>2 ubyte 0xF7 \b, at 2 EOX
-#>3 ubyte 0xF7 \b, at 3 EOX
-# https://tttapa.github.io/Control-Surface-doc/new-input/Doxygen/d2/d93/SysEx-Send-Receive_8ino-example.html
->4 ubyte 0xF7 \b, at 4 EOX
-# http://www.1manband.nl/tutorials2/sysex.htm
->5 ubyte 0xF7 \b, at 5 EOX
-# http://www.somascape.org/midi/tech/mfile.html#sysex
->6 ubyte 0xF7 \b, at 6 EOX
-#
->7 ubyte 0xF7 \b, at 7 EOX
-# https://webmidijs.org/forum/discussion/34/how-to-send-or-receive-system-exclusive-messages
->8 ubyte 0xF7 \b, at 8 EOX
-#
->9 ubyte 0xF7 \b, at 9 EOX
-# https://www.chd-el.cz/wp-content/uploads/845010_syxcom.pdf
->10 ubyte 0xF7 \b, at 10 EOX
-# https://stackoverflow.com/questions/52906076/handling-midi-the-input-of-multiple-system-exclusive-messages-in-vb
->11 ubyte 0xF7 \b, at 11 EOX
-# https://www.2writers.com/eddie/TutSysEx.htm
->12 ubyte 0xF7 \b, at 12 EOX
->13 ubyte 0xF7 \b, at 13 EOX
-# http://www.chromakinetics.com/handsonic/rolSysEx.htm
->14 ubyte 0xF7 \b, at 14 EOX
-#>15 ubyte 0xF7 \b, at 15 EOX
-
-0 string T707 Roland TR-707 Data
diff --git a/contrib/libs/libmagic/magic/Magdir/tcl b/contrib/libs/libmagic/magic/Magdir/tcl
deleted file mode 100644
index edc3ec42b4..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/tcl
+++ /dev/null
@@ -1,29 +0,0 @@
-#------------------------------------------------------------------------------
-# file: file(1) magic for Tcl scripting language
-# URL: https://www.tcl.tk/
-# From: gustaf neumann
-
-# Tcl scripts
-0 search/1/w #!\ /usr/bin/tcl Tcl script text executable
-!:mime text/x-tcl
-0 search/1/w #!\ /usr/local/bin/tcl Tcl script text executable
-!:mime text/x-tcl
-0 search/1 #!/usr/bin/env\ tcl Tcl script text executable
-!:mime text/x-tcl
-0 search/1 #!\ /usr/bin/env\ tcl Tcl script text executable
-!:mime text/x-tcl
-0 search/1/w #!\ /usr/bin/wish Tcl/Tk script text executable
-!:mime text/x-tcl
-0 search/1/w #!\ /usr/local/bin/wish Tcl/Tk script text executable
-!:mime text/x-tcl
-0 search/1 #!/usr/bin/env\ wish Tcl/Tk script text executable
-!:mime text/x-tcl
-0 search/1 #!\ /usr/bin/env\ wish Tcl/Tk script text executable
-!:mime text/x-tcl
-
-# check the first line
-0 search/1 package\ req
->0 regex \^package[\ \t]+req Tcl script
-# not 'p', check other lines
-0 search/1 !p
->0 regex \^package[\ \t]+req Tcl script
diff --git a/contrib/libs/libmagic/magic/Magdir/teapot b/contrib/libs/libmagic/magic/Magdir/teapot
deleted file mode 100644
index b6577b6f28..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/teapot
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: teapot,v 1.4 2009/09/19 16:28:12 christos Exp $
-# teapot: file(1) magic for "teapot" spreadsheet
-#
-0 string #!teapot\012xdr teapot work sheet (XDR format)
diff --git a/contrib/libs/libmagic/magic/Magdir/terminfo b/contrib/libs/libmagic/magic/Magdir/terminfo
deleted file mode 100644
index 41704eb559..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/terminfo
+++ /dev/null
@@ -1,63 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: terminfo,v 1.13 2022/11/21 22:25:37 christos Exp $
-# terminfo: file(1) magic for terminfo
-#
-# URL: https://invisible-island.net/ncurses/man/term.5.html
-# URL: https://invisible-island.net/ncurses/man/scr_dump.5.html
-#
-# Workaround for Targa image type by Joerg Jenderek
-# GRR: line below too general as it catches also
-# Targa image type 1 with 26 long identification field
-# and HELP.DSK
-0 string \032\001
-# 5th character of terminal name list, but not Targa image pixel size (15 16 24 32)
->16 ubyte >32
-# namelist, if more than 1 separated by "|" like "st|stterm| simpleterm 0.4.1"
->>12 regex \^[a-zA-Z0-9][a-zA-Z0-9.][^|]* Compiled terminfo entry "%-s"
-!:mime application/x-terminfo
-# no extension
-#!:ext
-#
-#------------------------------------------------------------------------------
-# The following was added for ncurses6 development:
-#------------------------------------------------------------------------------
-#
-0 string \036\002
-# imitate the legacy compiled-format, to get the entry-name printed
->16 ubyte >32
-# namelist, if more than 1 separated by "|" like "st|stterm| simpleterm 0. 4.1"
->>12 regex \^[a-zA-Z0-9][a-zA-Z0-9.][^|]* Compiled 32-bit terminfo entry "%-s"
-!:mime application/x-terminfo2
-#
-# While the compiled terminfo uses little-endian format regardless of
-# platform, SystemV screen dumps do not. They came later, and that detail was
-# overlooked.
-#
-# AIX and HPUX use the SVr4 big-endian format
-# Solaris uses the SVr3 formats (sparc and x86 differ endian-ness)
-0 beshort 0433 SVr2 curses screen image, big-endian
-# GRR: line below too general as it catches Commodore C128 program (crc32.prg XLINK.PRG) with start address 1C01h handled by ./c64
-0 beshort 0434 SVr3 curses screen image, big-endian
-0 beshort 0435 SVr4 curses screen image, big-endian
-#
-0 leshort 0433 SVr2 curses screen image, little-endian
-0 leshort 0434 SVr3 curses screen image, little-endian
-0 leshort 0435 SVr4 curses screen image, little-endian
-#
-# Rather than SVr4, Solaris "xcurses" writes this header:
-0 regex \^MAX=[0-9]+,[0-9]+$
->1 regex \^BEG=[0-9]+,[0-9]+$
->2 regex \^SCROLL=[0-9]+,[0-9]+$
->3 regex \^VMIN=[0-9]+$
->4 regex \^VTIME=[0-9]+$
->5 regex \^FLAGS=0x[[:xdigit:]]+$
->6 regex \^FG=[0-9],[0-9]+$
->7 regex \^BG=[0-9]+,[0-9]+, Solaris xcurses screen image
-#
-# ncurses5 (and before) did not use a magic number, making screen dumps "data".
-# ncurses6 (2015) uses this format, ignoring byte-order
-0 string \210\210\210\210ncurses ncurses6 screen image
-#
-# PDCurses added this in 2005
-0 string PDC\001 PDCurses screen image
diff --git a/contrib/libs/libmagic/magic/Magdir/tex b/contrib/libs/libmagic/magic/Magdir/tex
deleted file mode 100644
index e66f8ffdce..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/tex
+++ /dev/null
@@ -1,141 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: tex,v 1.22 2022/12/21 16:50:04 christos Exp $
-# tex: file(1) magic for TeX files
-#
-# XXX - needs byte-endian stuff (big-endian and little-endian DVI?)
-#
-# From <conklin@talisman.kaleida.com>
-
-# Although we may know the offset of certain text fields in TeX DVI
-# and font files, we can't use them reliably because they are not
-# zero terminated. [but we do anyway, christos]
-0 string \367\002
->(14.b+15) string \213
->>14 pstring >\0 TeX DVI file (%s)
-!:mime application/x-dvi
-0 string \367\203 TeX generic font data
-0 string \367\131 TeX packed font data
->3 string >\0 (%s)
-0 string \367\312
->(2.b+11) string \363 TeX virtual font data
-0 search/1 This\ is\ TeX, TeX transcript text
-0 search/1 This\ is\ METAFONT, METAFONT transcript text
-
-# There is no way to detect TeX Font Metric (*.tfm) files without
-# breaking them apart and reading the data. The following patterns
-# match most *.tfm files generated by METAFONT or afm2tfm.
-2 string \000\021 TeX font metric data
-!:mime application/x-tex-tfm
->33 string >\0 (%s)
-2 string \000\022 TeX font metric data
-!:mime application/x-tex-tfm
->33 string >\0 (%s)
-
-# Texinfo and GNU Info, from Daniel Quinlan (quinlan@yggdrasil.com)
-0 search/1 \\input\ texinfo Texinfo source text
-!:mime text/x-texinfo
-0 search/1 This\ is\ Info\ file GNU Info text
-!:mime text/x-info
-
-# TeX documents, from Daniel Quinlan (quinlan@yggdrasil.com)
-0 search/4096 \\input TeX document text
-!:mime text/x-tex
-!:strength + 15
-0 search/4096 \\begin LaTeX document text
-!:mime text/x-tex
-!:strength + 15
-0 search/4096 \\section LaTeX document text
-!:mime text/x-tex
-!:strength + 18
-0 search/4096 \\setlength LaTeX document text
-!:mime text/x-tex
-!:strength + 15
-0 search/4096 \\documentstyle LaTeX document text
-!:mime text/x-tex
-!:strength + 18
-0 search/4096 \\chapter LaTeX document text
-!:mime text/x-tex
-!:strength + 18
-0 search/4096 \\documentclass LaTeX 2e document text
-!:mime text/x-tex
-!:strength + 15
-0 search/4096 \\relax LaTeX auxiliary file
-!:mime text/x-tex
-!:strength + 15
-0 search/4096 \\contentsline LaTeX table of contents
-!:mime text/x-tex
-!:strength + 15
-0 search/4096 %\ -*-latex-*- LaTeX document text
-!:mime text/x-tex
-
-# Tex document, from Hendrik Scholz <hendrik@scholz.net>
-0 search/1 \\ifx TeX document text
-
-# Index and glossary files
-0 search/4096 \\indexentry LaTeX raw index file
-0 search/4096 \\begin{theindex} LaTeX sorted index
-0 search/4096 \\glossaryentry LaTeX raw glossary
-0 search/4096 \\begin{theglossary} LaTeX sorted glossary
-0 search/4096 This\ is\ makeindex Makeindex log file
-
-# End of TeX
-
-#------------------------------------------------------------------------------
-# file(1) magic for BibTex text files
-# From Hendrik Scholz <hendrik@scholz.net>
-
-0 search/1/c @article{ BibTeX text file
-0 search/1/c @book{ BibTeX text file
-0 search/1/c @inbook{ BibTeX text file
-0 search/1/c @incollection{ BibTeX text file
-0 search/1/c @inproceedings{ BibTeX text file
-0 search/1/c @manual{ BibTeX text file
-0 search/1/c @misc{ BibTeX text file
-0 search/1/c @preamble{ BibTeX text file
-0 search/1/c @phdthesis{ BibTeX text file
-0 search/1/c @techreport{ BibTeX text file
-0 search/1/c @unpublished{ BibTeX text file
-
-73 search/1 %%%\ \ BibTeX-file{ BibTex text file (with full header)
-
-73 search/1 %%%\ \ @BibTeX-style-file{ BibTeX style text file (with full header)
-
-0 search/1 %\ BibTeX\ standard\ bibliography\ BibTeX standard bibliography style text file
-
-0 search/1 %\ BibTeX\ ` BibTeX custom bibliography style text file
-
-0 search/1 @c\ @mapfile{ TeX font aliases text file
-
-0 string #LyX LyX document text
-
-# ConTeXt documents
-# https://wiki.contextgarden.net/
-0 search/4096 \\setupcolors[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\definecolor[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupinteraction[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\useURL[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setuppapersize[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setuplayout[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupfooter[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupfootertexts[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setuppagenumbering[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupbodyfont[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setuphead[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupitemize[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupwhitespace[ ConTeXt document text
-!:strength + 15
-0 search/4096 \\setupindenting[ ConTeXt document text
-!:strength + 15
diff --git a/contrib/libs/libmagic/magic/Magdir/tgif b/contrib/libs/libmagic/magic/Magdir/tgif
deleted file mode 100644
index e80b3a76cb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/tgif
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: tgif,v 1.7 2010/09/20 19:03:46 rrt Exp $
-# file(1) magic for tgif(1) files
-# From Hendrik Scholz <hendrik@scholz.net>
-0 string %TGIF\ Tgif file version
->6 string x %s
diff --git a/contrib/libs/libmagic/magic/Magdir/ti-8x b/contrib/libs/libmagic/magic/Magdir/ti-8x
deleted file mode 100644
index b05c5c9c3f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/ti-8x
+++ /dev/null
@@ -1,239 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: ti-8x,v 1.8 2020/02/12 22:13:01 christos Exp $
-# ti-8x: file(1) magic for the TI-8x and TI-9x Graphing Calculators.
-#
-# From: Ryan McGuire (rmcguire@freenet.columbus.oh.us).
-#
-# Update: Romain Lievin (roms@lpg.ticalc.org).
-#
-# NOTE: This list is not complete.
-# Files for the TI-80 and TI-81 are pretty rare. I'm not going to put the
-# program/group magic numbers in here because I cannot find any.
-0 string **TI80** TI-80 Graphing Calculator File.
-0 string **TI81** TI-81 Graphing Calculator File.
-#
-# Magic Numbers for the TI-73
-#
-0 string **TI73** TI-73 Graphing Calculator
->0x00003B byte 0x00 (real number)
->0x00003B byte 0x01 (list)
->0x00003B byte 0x02 (matrix)
->0x00003B byte 0x03 (equation)
->0x00003B byte 0x04 (string)
->0x00003B byte 0x05 (program)
->0x00003B byte 0x06 (assembly program)
->0x00003B byte 0x07 (picture)
->0x00003B byte 0x08 (gdb)
->0x00003B byte 0x0C (complex number)
->0x00003B byte 0x0F (window settings)
->0x00003B byte 0x10 (zoom)
->0x00003B byte 0x11 (table setup)
->0x00003B byte 0x13 (backup)
-
-# Magic Numbers for the TI-82
-#
-0 string **TI82** TI-82 Graphing Calculator
->0x00003B byte 0x00 (real)
->0x00003B byte 0x01 (list)
->0x00003B byte 0x02 (matrix)
->0x00003B byte 0x03 (Y-variable)
->0x00003B byte 0x05 (program)
->0x00003B byte 0x06 (protected prgm)
->0x00003B byte 0x07 (picture)
->0x00003B byte 0x08 (gdb)
->0x00003B byte 0x0B (window settings)
->0x00003B byte 0x0C (window settings)
->0x00003B byte 0x0D (table setup)
->0x00003B byte 0x0E (screenshot)
->0x00003B byte 0x0F (backup)
-#
-# Magic Numbers for the TI-83
-#
-0 string **TI83** TI-83 Graphing Calculator
->0x00003B byte 0x00 (real)
->0x00003B byte 0x01 (list)
->0x00003B byte 0x02 (matrix)
->0x00003B byte 0x03 (Y-variable)
->0x00003B byte 0x04 (string)
->0x00003B byte 0x05 (program)
->0x00003B byte 0x06 (protected prgm)
->0x00003B byte 0x07 (picture)
->0x00003B byte 0x08 (gdb)
->0x00003B byte 0x0B (window settings)
->0x00003B byte 0x0C (window settings)
->0x00003B byte 0x0D (table setup)
->0x00003B byte 0x0E (screenshot)
->0x00003B byte 0x13 (backup)
-#
-# Magic Numbers for the TI-83+
-#
-0 string **TI83F* TI-83+ Graphing Calculator
->0x00003B byte 0x00 (real number)
->0x00003B byte 0x01 (list)
->0x00003B byte 0x02 (matrix)
->0x00003B byte 0x03 (equation)
->0x00003B byte 0x04 (string)
->0x00003B byte 0x05 (program)
->0x00003B byte 0x06 (assembly program)
->0x00003B byte 0x07 (picture)
->0x00003B byte 0x08 (gdb)
->0x00003B byte 0x0C (complex number)
->0x00003B byte 0x0F (window settings)
->0x00003B byte 0x10 (zoom)
->0x00003B byte 0x11 (table setup)
->0x00003B byte 0x13 (backup)
->0x00003B byte 0x15 (application variable)
->0x00003B byte 0x17 (group of variable)
-
-#
-# Magic Numbers for the TI-85
-#
-0 string **TI85** TI-85 Graphing Calculator
->0x00003B byte 0x00 (real number)
->0x00003B byte 0x01 (complex number)
->0x00003B byte 0x02 (real vector)
->0x00003B byte 0x03 (complex vector)
->0x00003B byte 0x04 (real list)
->0x00003B byte 0x05 (complex list)
->0x00003B byte 0x06 (real matrix)
->0x00003B byte 0x07 (complex matrix)
->0x00003B byte 0x08 (real constant)
->0x00003B byte 0x09 (complex constant)
->0x00003B byte 0x0A (equation)
->0x00003B byte 0x0C (string)
->0x00003B byte 0x0D (function GDB)
->0x00003B byte 0x0E (polar GDB)
->0x00003B byte 0x0F (parametric GDB)
->0x00003B byte 0x10 (diffeq GDB)
->0x00003B byte 0x11 (picture)
->0x00003B byte 0x12 (program)
->0x00003B byte 0x13 (range)
->0x00003B byte 0x17 (window settings)
->0x00003B byte 0x18 (window settings)
->0x00003B byte 0x19 (window settings)
->0x00003B byte 0x1A (window settings)
->0x00003B byte 0x1B (zoom)
->0x00003B byte 0x1D (backup)
->0x00003B byte 0x1E (unknown)
->0x00003B byte 0x2A (equation)
->0x000032 string ZS4 - ZShell Version 4 File.
->0x000032 string ZS3 - ZShell Version 3 File.
-#
-# Magic Numbers for the TI-86
-#
-0 string **TI86** TI-86 Graphing Calculator
->0x00003B byte 0x00 (real number)
->0x00003B byte 0x01 (complex number)
->0x00003B byte 0x02 (real vector)
->0x00003B byte 0x03 (complex vector)
->0x00003B byte 0x04 (real list)
->0x00003B byte 0x05 (complex list)
->0x00003B byte 0x06 (real matrix)
->0x00003B byte 0x07 (complex matrix)
->0x00003B byte 0x08 (real constant)
->0x00003B byte 0x09 (complex constant)
->0x00003B byte 0x0A (equation)
->0x00003B byte 0x0C (string)
->0x00003B byte 0x0D (function GDB)
->0x00003B byte 0x0E (polar GDB)
->0x00003B byte 0x0F (parametric GDB)
->0x00003B byte 0x10 (diffeq GDB)
->0x00003B byte 0x11 (picture)
->0x00003B byte 0x12 (program)
->0x00003B byte 0x13 (range)
->0x00003B byte 0x17 (window settings)
->0x00003B byte 0x18 (window settings)
->0x00003B byte 0x19 (window settings)
->0x00003B byte 0x1A (window settings)
->0x00003B byte 0x1B (zoom)
->0x00003B byte 0x1D (backup)
->0x00003B byte 0x1E (unknown)
->0x00003B byte 0x2A (equation)
-#
-# Magic Numbers for the TI-89
-#
-0 string **TI89** TI-89 Graphing Calculator
->0x000048 byte 0x00 (expression)
->0x000048 byte 0x04 (list)
->0x000048 byte 0x06 (matrix)
->0x000048 byte 0x0A (data)
->0x000048 byte 0x0B (text)
->0x000048 byte 0x0C (string)
->0x000048 byte 0x0D (graphic data base)
->0x000048 byte 0x0E (figure)
->0x000048 byte 0x10 (picture)
->0x000048 byte 0x12 (program)
->0x000048 byte 0x13 (function)
->0x000048 byte 0x14 (macro)
->0x000048 byte 0x1C (zipped)
->0x000048 byte 0x21 (assembler)
-#
-# Magic Numbers for the TI-92
-#
-0 string **TI92** TI-92 Graphing Calculator
->0x000048 byte 0x00 (expression)
->0x000048 byte 0x04 (list)
->0x000048 byte 0x06 (matrix)
->0x000048 byte 0x0A (data)
->0x000048 byte 0x0B (text)
->0x000048 byte 0x0C (string)
->0x000048 byte 0x0D (graphic data base)
->0x000048 byte 0x0E (figure)
->0x000048 byte 0x10 (picture)
->0x000048 byte 0x12 (program)
->0x000048 byte 0x13 (function)
->0x000048 byte 0x14 (macro)
->0x000048 byte 0x1D (backup)
-#
-# Magic Numbers for the TI-92+/V200
-#
-0 string **TI92P* TI-92+/V200 Graphing Calculator
->0x000048 byte 0x00 (expression)
->0x000048 byte 0x04 (list)
->0x000048 byte 0x06 (matrix)
->0x000048 byte 0x0A (data)
->0x000048 byte 0x0B (text)
->0x000048 byte 0x0C (string)
->0x000048 byte 0x0D (graphic data base)
->0x000048 byte 0x0E (figure)
->0x000048 byte 0x10 (picture)
->0x000048 byte 0x12 (program)
->0x000048 byte 0x13 (function)
->0x000048 byte 0x14 (macro)
->0x000048 byte 0x1C (zipped)
->0x000048 byte 0x21 (assembler)
-#
-# Magic Numbers for the TI-73/83+/89/92+/V200 FLASH upgrades
-#
-#0x0000016 string Advanced TI-XX Graphing Calculator (FLASH)
-0 string **TIFL** TI-XX Graphing Calculator (FLASH)
->8 byte >0 - Revision %d
->>9 byte x \b.%d,
->12 byte >0 Revision date %02x
->>13 byte x \b/%02x
->>14 beshort x \b/%04x,
->17 string >/0 name: '%s',
->48 byte 0x74 device: TI-73,
->48 byte 0x73 device: TI-83+,
->48 byte 0x98 device: TI-89,
->48 byte 0x88 device: TI-92+,
->49 byte 0x23 type: OS upgrade,
->49 byte 0x24 type: application,
->49 byte 0x25 type: certificate,
->49 byte 0x3e type: license,
->74 lelong >0 size: %d bytes
-
-# VTi & TiEmu skins (TI Graphing Calculators).
-# From: Romain Lievin (roms@lpg.ticalc.org).
-# Magic Numbers for the VTi skins
-0 string VTI Virtual TI skin
->3 string v - Version
->>4 byte >0 \b %c
->>6 byte x \b.%c
-# Magic Numbers for the TiEmu skins
-0 string TiEmu TiEmu skin
->6 string v - Version
->>7 byte >0 \b %c
->>9 byte x \b.%c
->>10 byte x \b%c
diff --git a/contrib/libs/libmagic/magic/Magdir/timezone b/contrib/libs/libmagic/magic/Magdir/timezone
deleted file mode 100644
index 84e9081667..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/timezone
+++ /dev/null
@@ -1,42 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: timezone,v 1.13 2021/07/21 17:57:20 christos Exp $
-# timezone: file(1) magic for timezone data
-#
-# from Daniel Quinlan (quinlan@yggdrasil.com)
-# this should work on Linux, SunOS, and maybe others
-# Added new official magic number for recent versions of the Olson code
-0 name timezone
->4 byte 0 \b, old version
->4 byte >0 \b, version %c
->20 belong 0 \b, no gmt time flags
->20 belong 1 \b, 1 gmt time flag
->20 belong >1 \b, %d gmt time flags
->24 belong 0 \b, no std time flags
->24 belong 1 \b, 1 std time flag
->24 belong >1 \b, %d std time flags
->28 belong 0 \b, no leap seconds
->28 belong 1 \b, 1 leap second
->28 belong >1 \b, %d leap seconds
->32 belong 0 \b, no transition times
->32 belong 1 \b, 1 transition time
->32 belong >1 \b, %d transition times
->36 belong 0 \b, no local time types
->36 belong 1 \b, 1 local time type
->36 belong >1 \b, %d local time types
->40 belong 0 \b, no abbreviation chars
->40 belong 1 \b, 1 abbreviation char
->40 belong >1 \b, %d abbreviation chars
-
-0 string TZif timezone data
->51 string TZif \b(slim)
->>51 use timezone
->51 default x \b(fat)
->>0 use timezone
-
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\1\0 old timezone data
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\2\0 old timezone data
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\3\0 old timezone data
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\4\0 old timezone data
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\5\0 old timezone data
-0 string \0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\0\6\0 old timezone data
diff --git a/contrib/libs/libmagic/magic/Magdir/tplink b/contrib/libs/libmagic/magic/Magdir/tplink
deleted file mode 100644
index 1b4ef0f336..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/tplink
+++ /dev/null
@@ -1,95 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: tplink,v 1.8 2023/05/15 16:41:02 christos Exp $
-# tplink: File magic for openwrt firmware files
-
-# URL: https://wiki.openwrt.org/doc/techref/header
-# Reference: https://git.openwrt.org/?p=openwrt.git;a=blob;f=tools/firmware-utils/src/mktplinkfw.c
-# http://mark0.net/download/triddefs_xml.7z/defs/b/bin-tplink-v1.trid.xml
-# Note: called "TP-Link router firmware (v1)" by TrID
-# From: Joerg Jenderek
-# check for valid header version 1 or 2
-0 ulelong <3
->0 ulelong !0
-# test for header padding with nulls
->>0x100 long 0
-# skip Norton Commander Cleanup Utility NCCLEAN.INI by looking for valid vendor name
->>>4 ubelong >0x1F000000
-# skip user.dbt by looking for positive hardware id
->>>>0x40 ubeshort >0
-# skip cversions.1.db cversions.2.db cversions.3.db inside
-# c:\ProgramData\Microsoft\Windows\Caches
-# with invalid vendor names \240\0\0\0 \140\0\0\0 \040\0\0\0
->>>>>5 short !0
->>>>>>0 use firmware-tplink
-
-0 name firmware-tplink
->0 ubyte x firmware
-!:mime application/x-tplink-bin
-# like: TL-WR1043ND-V1-FW0.0.3-stripped.bin gluon-ffrefugee-0.9.2-tp-link-archer-c5-v1-sysupgrade.bin
-!:ext bin
-# hardware id like 10430001 07410001 09410004 09410006
->0x40 ubeshort x %x
->0x42 ubeshort x v%x
-# hardware revision like 1
->0x44 ubelong !1 (revision %u)
-# vendor_name[24] like OpenWrt or TP-LINK Technologies
->4 string x %.24s
-# fw_version[36] like r49389 or ver. 1.0
->0x1c string x %.36s
-# header version 1 or 2
->0 ubyte !1 V%X
-# ver_hi.ver_mid.ver_lo
->0x98 long !0 \b, version
->>0x98 ubeshort x %u
->>0x9A ubeshort x \b.%u
->>0x9C ubeshort x \b.%u
-# region code 0~universal 1~US
->0x48 ubelong x
-#>>0x48 ubelong 0 (universal)
->>0x48 ubelong 1 (US)
->>0x48 ubelong >1 (region %u)
-# total length of the firmware. not always true
->0x7C ubelong x \b, %u bytes or less
-# unknown 1
->0x48 ubelong !0 \b, UNKNOWN1 %#x
-# md5sum1[16]
-#>0x4c ubequad x \b, MD5 %llx
-#>>0x54 ubequad x \b%llx
-# unknown 2
->0x5c ubelong !0 \b, UNKNOWN2 %#x
-# md5sum2[16]
-#>0x60 ubequad !0 \b, 2nd MD5 %llx
-#>>0x68 ubequad x \b%llx
-# unknown 3
->0x70 ubelong !0 \b, UNKNOWN3 %#x
-# kernel load address
-#>0x74 ubelong x \b, %#x load
-# kernel entry point
-#>0x78 ubelong x \b, %#x entry
-# kernel data offset. 200h means direct after header
->0x80 ubelong x \b, at %#x
-# kernel data length and 1 space
->0x84 ubelong x %u bytes
-# look for kernel type (gzip compressed vmlinux.bin by ./compress)
->(0x80.L) indirect x
-# root file system data offset
-# WRONG in 5.35 with above indirect expression
->0x88 ubelong x \b, at %#x
-# rootfs data length and 1 space
->0x8C ubelong x %u bytes
-# in 5.32 only true for offset ~< FILE_BYTES_MAX=9 MB defined in ../../src/file.h
->(0x88.L) indirect x
-# 'qshs' for wr940nv1_en_3_13_7_up(111228).bin
-#>(0x88.L) string x \b, file system '%.4s'
-#>(0x88.L) ubequad x \b, file system %#llx
-# bootloader data offset
->0x90 ubelong !0 \b, at %#x
-# bootloader data length only reasonable if bootloader offset not null
->>0x94 ubelong !0 %u bytes
-# pad[354] should be 354 null bytes.
-#>0x9E ubequad !0 \b, padding %#llx
-# But at 0x120 18 non null bytes in examples like
-# wr940nv4_eu_3_16_9_up_boot(160620).bin
-# wr940nv6_us_3_18_1_up_boot(171030).bin
-#>0x120 ubequad !0 \b, other padding %#llx
diff --git a/contrib/libs/libmagic/magic/Magdir/troff b/contrib/libs/libmagic/magic/Magdir/troff
deleted file mode 100644
index 301a40bc34..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/troff
+++ /dev/null
@@ -1,44 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: troff,v 1.14 2023/06/01 16:00:46 christos Exp $
-# troff: file(1) magic for *roff
-#
-# updated by Daniel Quinlan (quinlan@yggdrasil.com)
-
-# troff input
-0 search/1 .\\" troff or preprocessor input text
-!:strength +12
-!:mime text/troff
-0 search/1 '\\" troff or preprocessor input text
-!:strength +12
-!:mime text/troff
-0 search/1 '.\\" troff or preprocessor input text
-!:strength +12
-!:mime text/troff
-0 search/1 \\" troff or preprocessor input text
-!:strength +12
-!:mime text/troff
-#0 search/1 ''' troff or preprocessor input text
-#!:mime text/troff
-0 regex/20l \^\\.[A-Za-z][A-Za-z0-9][\ \t] troff or preprocessor input text
-!:strength +12
-!:mime text/troff
-0 regex/20l \^\\.[A-Za-z][A-Za-z0-9]$ troff or preprocessor input text
-!:strength +12
-!:mime text/troff
-
-# ditroff intermediate output text
-0 search/1 x\ T ditroff output text
->4 search/1 cat for the C/A/T phototypesetter
->4 search/1 ps for PostScript
->4 search/1 dvi for DVI
->4 search/1 ascii for ASCII
->4 search/1 lj4 for LaserJet 4
->4 search/1 latin1 for ISO 8859-1 (Latin 1)
->4 search/1 X75 for xditview at 75dpi
->>7 search/1 -12 (12pt)
->4 search/1 X100 for xditview at 100dpi
->>8 search/1 -12 (12pt)
-
-# output data formats
-0 string \100\357 very old (C/A/T) troff output data
diff --git a/contrib/libs/libmagic/magic/Magdir/tuxedo b/contrib/libs/libmagic/magic/Magdir/tuxedo
deleted file mode 100644
index 191501decf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/tuxedo
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: tuxedo,v 1.4 2009/09/19 16:28:13 christos Exp $
-# tuxedo: file(1) magic for BEA TUXEDO data files
-#
-# from Ian Springer <ispringer@hotmail.com>
-#
-0 string \0\0\1\236\0\0\0\0\0\0\0\0\0\0\0\0 BEA TUXEDO DES mask data
diff --git a/contrib/libs/libmagic/magic/Magdir/typeset b/contrib/libs/libmagic/magic/Magdir/typeset
deleted file mode 100644
index e99fe3731b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/typeset
+++ /dev/null
@@ -1,8 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: typeset,v 1.8 2009/09/19 16:28:13 christos Exp $
-# typeset: file(1) magic for other typesetting
-#
-0 string Interpress/Xerox Xerox InterPress data
->16 string / (version
->>17 string >\0 %s)
diff --git a/contrib/libs/libmagic/magic/Magdir/uf2 b/contrib/libs/libmagic/magic/Magdir/uf2
deleted file mode 100644
index 49a86d7640..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/uf2
+++ /dev/null
@@ -1,72 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: uf2,v 1.3 2021/04/28 01:00:31 christos Exp $
-# uf2: file(1) magic for UF2 firmware image files
-#
-# https://github.com/microsoft/uf2
-#
-# Created by Blake Ramsdell <blaker@gmail.com>
-
-0 string UF2\n UF2 firmware image
-!:ext uf2
-# This is for checking the other magic numbers, do we want to do that?
-#>4 lelong 0x9E5D5157 howdy
-#>>508 lelong 0x0AB16F30 doody
->8 lelong &0x0001 \b, not main flash
->8 lelong &0x1000 \b, file container
->8 lelong &0x2000 \b, family
-
-# To update the UF2 family data, use this fine command
-#
-# families=`curl \
-# https://raw.githubusercontent.com/microsoft/uf2/master/utils/uf2families.json \
-# | jq -r '.[] | ">>28\tlelong\t\(.id)\t\(.description)"' | sort -n -k 3` && \
-# perl -0777 -i -pe \
-# "s/(### BEGIN UF2 FAMILIES\\n).*(\\n### END UF2 FAMILIES)/\$1$families\$2/s" \
-# uf2
-
-### BEGIN UF2 FAMILIES
->>28 lelong 0x00ff6919 ST STM32L4xx
->>28 lelong 0x04240bdf ST STM32L5xx
->>28 lelong 0x16573617 Microchip (Atmel) ATmega32
->>28 lelong 0x1851780a Microchip (Atmel) SAML21
->>28 lelong 0x1b57745f Nordic NRF52
->>28 lelong 0x1c5f21b0 ESP32
->>28 lelong 0x1e1f432d ST STM32L1xx
->>28 lelong 0x202e3a91 ST STM32L0xx
->>28 lelong 0x21460ff0 ST STM32WLxx
->>28 lelong 0x2abc77ec NXP LPC55xx
->>28 lelong 0x300f5633 ST STM32G0xx
->>28 lelong 0x31d228c6 GD32F350
->>28 lelong 0x4c71240a ST STM32G4xx
->>28 lelong 0x4fb2d5bd NXP i.MX RT10XX
->>28 lelong 0x53b80f00 ST STM32F7xx
->>28 lelong 0x55114460 Microchip (Atmel) SAMD51
->>28 lelong 0x57755a57 ST STM32F401
->>28 lelong 0x5a18069b Cypress FX2
->>28 lelong 0x5d1a0a2e ST STM32F2xx
->>28 lelong 0x5ee21072 ST STM32F103
->>28 lelong 0x647824b6 ST STM32F0xx
->>28 lelong 0x68ed2b88 Microchip (Atmel) SAMD21
->>28 lelong 0x6b846188 ST STM32F3xx
->>28 lelong 0x6d0922fa ST STM32F407
->>28 lelong 0x6db66082 ST STM32H7xx
->>28 lelong 0x70d16653 ST STM32WBxx
->>28 lelong 0x7eab61ed ESP8266
->>28 lelong 0x7f83e793 NXP KL32L2x
->>28 lelong 0x8fb060fe ST STM32F407VG
->>28 lelong 0xada52840 Nordic NRF52840
->>28 lelong 0xbfdd4eee ESP32-S2
->>28 lelong 0xc47e5767 ESP32-S3
->>28 lelong 0xd42ba06c ESP32-C3
->>28 lelong 0xe48bff56 Raspberry Pi RP2040
-### END UF2 FAMILIES
-
->>28 default x
->>>28 lelong x %#08x
->8 lelong&0x2000 0 \b, file size
->>28 lelong x %#08x
->8 lelong &0x4000 \b, MD5 checksum present
->8 lelong &0x8000 \b, extension tags present
->12 lelong x \b, address %#08x
->24 lelong x \b, %u total blocks
diff --git a/contrib/libs/libmagic/magic/Magdir/unicode b/contrib/libs/libmagic/magic/Magdir/unicode
deleted file mode 100644
index 7ca61bacbe..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/unicode
+++ /dev/null
@@ -1,15 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: unicode,v 1.7 2019/02/19 20:34:42 christos Exp $
-# Unicode: BOM prefixed text files - Adrian Havill <havill@turbolinux.co.jp>
-# These types are recognised in file_ascmagic so these encodings can be
-# treated by text patterns. Missing types are already dealt with internally.
-#
-0 string +/v8 Unicode text, UTF-7
-0 string +/v9 Unicode text, UTF-7
-0 string +/v+ Unicode text, UTF-7
-0 string +/v/ Unicode text, UTF-7
-0 string \335\163\146\163 Unicode text, UTF-8-EBCDIC
-0 string \000\000\376\377 Unicode text, UTF-32, big-endian
-0 string \377\376\000\000 Unicode text, UTF-32, little-endian
-0 string \016\376\377 Unicode text, SCSU (Standard Compression Scheme for Unicode)
diff --git a/contrib/libs/libmagic/magic/Magdir/unisig b/contrib/libs/libmagic/magic/Magdir/unisig
deleted file mode 100644
index 6212c3871f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/unisig
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: unisig,v 1.1 2020/04/09 19:05:44 christos Exp $
-# unisig: file(1) magic for files carrying a uniform signature (Unisig)
-# From: Lassi Kortela, John Cowan
-# URL: https://github.com/unisig
-#
-0 string \xDC\xDC\x0D\x0A\x1A\x0A\x00 Unisig:
->7 ubyte =0 UUID
->>8 guid x %s
->7 ubyte >0 URI
->>7 pstring x %s
diff --git a/contrib/libs/libmagic/magic/Magdir/unknown b/contrib/libs/libmagic/magic/Magdir/unknown
deleted file mode 100644
index 578a8ea06d..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/unknown
+++ /dev/null
@@ -1,34 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: unknown,v 1.8 2013/01/09 22:37:24 christos Exp $
-# unknown: file(1) magic for unknown machines
-#
-# 0x107 is 0407, 0x108 is 0410, and 0x109 is 0411; those are all PDP-11
-# (executable, pure, and split I&D, respectively), but the PDP-11 version
-# doesn't have the "version %ld", which may be a bogus COFFism (I don't
-# think there was ever COFF for the PDP-11).
-#
-# 0x10B is 0413; that's VAX demand-paged, but this is a short, not a
-# long, as it would be on a VAX. In any case, that could collide with
-# VAX demand-paged files, as the magic number is little-endian on those
-# binaries, so the first 16 bits of the file would contain 0x10B.
-#
-# Therefore, those entries are commented out.
-#
-# 0x10C is 0414 and 0x10E is 0416; those *are* unknown.
-#
-#0 short 0x107 unknown machine executable
-#>8 short >0 not stripped
-#>15 byte >0 - version %ld
-#0 short 0x108 unknown pure executable
-#>8 short >0 not stripped
-#>15 byte >0 - version %ld
-#0 short 0x109 PDP-11 separate I&D
-#>8 short >0 not stripped
-#>15 byte >0 - version %ld
-#0 short 0x10b unknown pure executable
-#>8 short >0 not stripped
-#>15 byte >0 - version %ld
-0 long 0x10c unknown demand paged pure executable
->16 long >0 not stripped
-0 long 0x10e unknown readable demand paged pure executable
diff --git a/contrib/libs/libmagic/magic/Magdir/usd b/contrib/libs/libmagic/magic/Magdir/usd
deleted file mode 100644
index 356cdf7675..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/usd
+++ /dev/null
@@ -1,21 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: usd,v 1.2 2020/05/21 22:17:00 christos Exp $
-#
-# From Christian Schmidbauer
-#
-# https://github.com/PixarAnimationStudios/USD
-
-# USD crate file
-# https://github.com/PixarAnimationStudios/USD/blob/ebac0a8b6703f4fa1c27115f1f013bb9819662f4/pxr/usd/usd/crateFile.h#L441-L450
-0 string PXR-USDC USD crate
->8 byte x \b, version %x.
->9 byte x \b%x.
->10 byte x \b%x
-!:ext usd
-
-# USD ASCII file
-0 string #usda\040 USD ASCII
->6 string x \b, version %s
-!:mime text/plain
-!:ext usd
diff --git a/contrib/libs/libmagic/magic/Magdir/uterus b/contrib/libs/libmagic/magic/Magdir/uterus
deleted file mode 100644
index 4b9e768b64..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/uterus
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: uterus,v 1.4 2022/10/31 13:22:26 christos Exp $
-# file(1) magic for uterus files
-# http://freecode.com/projects/uterus
-#
-0 string UTE+ uterus file
->4 string v \b, version
->5 byte x %c
->6 string . \b.
->7 byte x \b%c
->8 string \<\> \b, big-endian
->>16 belong >0 \b, slut size %u
->8 string \>\< \b, little-endian
->>16 lelong >0 \b, slut size %u
->10 byte &8 \b, compressed
diff --git a/contrib/libs/libmagic/magic/Magdir/uuencode b/contrib/libs/libmagic/magic/Magdir/uuencode
deleted file mode 100644
index df70dc5319..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/uuencode
+++ /dev/null
@@ -1,28 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: uuencode,v 1.9 2021/11/13 17:48:10 christos Exp $
-# uuencode: file(1) magic for ASCII-encoded files
-#
-
-# The first line of xxencoded files is identical to that in uuencoded files,
-# but the first character in most subsequent lines is 'h' instead of 'M'.
-# (xxencoding uses lowercase letters in place of most of uuencode's
-# punctuation and survives BITNET gateways better.)
-0 regex/1024 \^begin\040[0-7]{3}\040
->&0 regex/256 [\012\015]+M[\040-\140]{60}[\012\015]+ uuencoded text
->&0 regex/256 [\012\015]+h[0-9A-Za-z\053\055]{60}[\012\015]+ xxencoded text
->&0 default x uuencoded or xxencoded text
->&0 string >\0 \b, file name "%s"
-
-# btoa(1) is an alternative to uuencode that requires less space.
-0 search/1 xbtoa\ Begin btoa'd text
-
-# ship(1) is another, much cooler alternative to uuencode.
-# Greg Roelofs, newt@uchicago.edu
-0 search/1 $\012ship ship'd binary text
-
-# bencode(8) is used to encode compressed news batches (Bnews/Cnews only?)
-# Greg Roelofs, newt@uchicago.edu
-0 search/1 Decode\ the\ following\ with\ bdeco bencoded News text
-
-# GRR: handle BASE64
diff --git a/contrib/libs/libmagic/magic/Magdir/vacuum-cleaner b/contrib/libs/libmagic/magic/Magdir/vacuum-cleaner
deleted file mode 100644
index eef78f21ef..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vacuum-cleaner
+++ /dev/null
@@ -1,54 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vacuum-cleaner,v 1.1 2015/11/14 13:38:35 christos Exp $
-# vacuum cleaner magic by Thomas M. Ott (ThMO)
-#
-# navigation map for LG robot vacuum cleaner models VR62xx, VR64xx, VR63xx
-# file: MAPDATAyyyymmddhhmmss_xxxxxx_cc.blk
-# -> yyyymmdd: year, month, day of cleaning
-# -> hhmmss: hour, minute, second of cleaning
-# -> xxxxxx: 6 digits
-# -> cc: cleaning runs counter
-# size: 136044 bytes
-#
-# struct maphdr {
-# int32_t map_cnt; /* 0: single map */
-# int32_t min_ceil; /* 4: 100 mm == 10 cm == min. ceil */
-# int32_t max_ceil; /* 8: 10000 mm == 100 m == max. ceil */
-# int32_t max_climb; /* 12: 50 mm = 5 cm == max. height to climb */
-# int32_t unknown; /* 16: 50000 ??? */
-# int32_t cell_bytes; /* 20: # of bytes for cells per block */
-# int32_t block_max; /* 24: 1000 == max. # of blocks */
-# int32_t route_max; /* 28: 1000 == max. # of routes */
-# int32_t used_blocks; /* 32: 5/45/33/... == # of block entries used! */
-# int32_t cell_dim; /* 36: 10 == cell dimension */
-# int32_t clock_tick; /* 40: 100 == clock ticks */
-# #if 0
-# struct { /* 44: 1000 blocks for 10x10 cells */
-# int32_t yoffset;
-# int32_t xoffset;
-# int32_t posxy;
-# int32_t timecode;
-# } blocks[ 1000];
-# char cells[ 1000* 100]; /* 16044: 1000 10x10 cells */
-# int16_t routes[ 1000* 10]; /* 116044: 1000 10-routes */
-# #endif
-# };
-
-0 lelong =1
->4 lelong =100
->>8 lelong =10000
->>>12 lelong =50
->>>>16 lelong =50000
->>>>>20 lelong =100
->>>>>>24 lelong =1000
->>>>>>>28 lelong =1000
->>>>>>>>36 lelong =10
->>>>>>>>>40 lelong =100
->>>>>>>>>>32 lelong x LG robot VR6[234]xx %dm^2 navigation
->>>>>>>>>>136040 lelong =-1 reuse map data
->>>>>>>>>>136040 lelong =0 map data
->>>>>>>>>>136040 lelong >0 spurious map data
->>>>>>>>>>136040 lelong <-1 spurious map data
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/varied.out b/contrib/libs/libmagic/magic/Magdir/varied.out
deleted file mode 100644
index 01caf07faf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/varied.out
+++ /dev/null
@@ -1,46 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: varied.out,v 1.23 2014/04/30 21:41:02 christos Exp $
-# varied.out: file(1) magic for various USG systems
-#
-# Herewith many of the object file formats used by USG systems.
-# Most have been moved to files for a particular processor,
-# and deleted if they duplicate other entries.
-#
-0 short 0610 Perkin-Elmer executable
-# AMD 29K
-0 beshort 0572 amd 29k coff noprebar executable
-0 beshort 01572 amd 29k coff prebar executable
-0 beshort 0160007 amd 29k coff archive
-# Cray
-6 beshort 0407 unicos (cray) executable
-# Ultrix 4.3
-596 string \130\337\377\377 Ultrix core file
->600 string >\0 from '%s'
-# BeOS and MAcOS PEF executables
-# From: hplus@zilker.net (Jon Watte)
-0 string Joy!peffpwpc header for PowerPC PEF executable
-#
-# ava assembler/linker Uros Platise <uros.platise@ijs.si>
-0 string avaobj AVR assembler object code
->7 string >\0 version '%s'
-# gnu gmon magic From: Eugen Dedu <dedu@ese-metz.fr>
-0 string gmon GNU prof performance data
->4 long x - version %d
-# From: Dave Pearson <davep@davep.org>
-# Harbour <URL:http://harbour-project.org/> HRB files.
-0 string \xc0HRB Harbour HRB file
->4 leshort x version %d
-# Harbour HBV files
-0 string \xc0HBV Harbour variable dump file
->4 leshort x version %d
-
-# From: Alex Beregszaszi <alex@fsn.hu>
-# 0 string exec BugOS executable
-# 0 string pack BugOS archive
-
-# From: Jason Spence <jspence@lightconsulting.com>
-# Generated by the "examples" in STM's ST40 devkit, and derived code.
-0 lelong 0x13a9f17e ST40 component image format
->4 string >\0 \b, name '%s'
-
diff --git a/contrib/libs/libmagic/magic/Magdir/varied.script b/contrib/libs/libmagic/magic/Magdir/varied.script
deleted file mode 100644
index 74b1b2276c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/varied.script
+++ /dev/null
@@ -1,21 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: varied.script,v 1.15 2022/10/18 13:01:30 christos Exp $
-# varied.script: file(1) magic for various interpreter scripts
-
-0 string/wt #!\ a
->&-1 string/T x %s script text executable
-!:strength / 3
-
-0 string/wb #!\ a
->&-1 string/T x %s script executable (binary data)
-!:strength / 3
-
-
-# using env
-0 string/wt #!\ /usr/bin/env a
->15 string/T >\0 %s script text executable
-!:strength / 6
-
-0 string/wb #!\ /usr/bin/env a
->15 string/T >\0 %s script executable (binary data)
-!:strength / 6
diff --git a/contrib/libs/libmagic/magic/Magdir/vax b/contrib/libs/libmagic/magic/Magdir/vax
deleted file mode 100644
index f3deffa59f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vax
+++ /dev/null
@@ -1,32 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vax,v 1.10 2019/10/04 18:07:46 christos Exp $
-# vax: file(1) magic for VAX executable/object and APL workspace
-#
-0 lelong 0101557 VAX single precision APL workspace
-0 lelong 0101556 VAX double precision APL workspace
-
-#
-# VAX a.out (BSD; others collide with 386 and other 32-bit little-endian
-# executables, and are handled in aout)
-#
-0 lelong 0420 a.out VAX demand paged (first page unmapped) pure executable
->16 lelong >0 not stripped
-
-#
-# VAX COFF
-#
-# The `versions' were commented out, but have been un-commented out.
-# (Was the problem just one of endianness?)
-#
-0 leshort 0570
->2 uleshort <100 VAX COFF executable, sections %d
->>4 ledate x \b, created %s
->>12 lelong >0 \b, not stripped
->>22 leshort >0 \b, version %d
-
-0 leshort 0575
->2 uleshort <100 VAX COFF pure executable, sections %d
->>4 ledate x \b, created %s
->>12 lelong >0 \b, not stripped
->>22 leshort >0 \b, version %d
diff --git a/contrib/libs/libmagic/magic/Magdir/vicar b/contrib/libs/libmagic/magic/Magdir/vicar
deleted file mode 100644
index 59d843d7ca..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vicar
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vicar,v 1.4 2009/09/19 16:28:13 christos Exp $
-# vicar: file(1) magic for VICAR files.
-#
-# From: Ossama Othman <othman@astrosun.tn.cornell.edu
-# VICAR is JPL's in-house spacecraft image processing program
-# VICAR image
-0 string LBLSIZE= VICAR image data
->32 string BYTE \b, 8 bits = VAX byte
->32 string HALF \b, 16 bits = VAX word = Fortran INTEGER*2
->32 string FULL \b, 32 bits = VAX longword = Fortran INTEGER*4
->32 string REAL \b, 32 bits = VAX longword = Fortran REAL*4
->32 string DOUB \b, 64 bits = VAX quadword = Fortran REAL*8
->32 string COMPLEX \b, 64 bits = VAX quadword = Fortran COMPLEX*8
-# VICAR label file
-43 string SFDU_LABEL VICAR label file
diff --git a/contrib/libs/libmagic/magic/Magdir/virtual b/contrib/libs/libmagic/magic/Magdir/virtual
deleted file mode 100644
index 3372020421..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/virtual
+++ /dev/null
@@ -1,307 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: virtual,v 1.17 2022/08/23 08:00:54 christos Exp $
-# From: James Nobis <quel@quelrod.net>
-# Microsoft hard disk images for:
-# Virtual Server
-# Virtual PC
-# VirtualBox
-# URL: http://fileformats.archiveteam.org/wiki/VHD_(Virtual_Hard_Disk)
-# Reference: https://download.microsoft.com/download/f/f/e/ffef50a5-07dd-4cf8-aaa3-442c0673a029/
-# Virtual%20Hard%20Disk%20Format%20Spec_10_18_06.doc
-0 string conectix Microsoft Disk Image, Virtual Server or Virtual PC
-# alternative shorter names
-#0 string conectix Microsoft Virtual Hard Disk image
-#0 string conectix Microsoft Virtual HD image
-!:mime application/x-virtualbox-vhd
-!:ext vhd
-# Features is a bit field used to indicate specific feature support
-#>8 ubelong !0x00000002 \b, Features %#x
-# Reserved. This bit must always be set to 1.
-#>8 ubelong &0x00000002 \b, Reserved %#x
-# File Format Version for the current specification 0x00010000
-#>12 ubelong !0x00010000 \b, Version %#8.8x
-# Data Offset only found 0x200
-#>16 ubequad !0x200 \b, Data Offset %#llx
-#>16 ubequad x \b, at %#llx
-# Dynamic Disk Header cookie like cxsparse
-#>(16.Q) string x "%-.8s"
-# This field contains a Unicode string (UTF-16) of the parent hard disk filename
-#>(16.Q+64) ubequad x \b, parent name %#llx
-# Creator Application
-# vpc~Microsoft Virtual PC, vs~Microsoft Virtual Server, vbox~VirtualBox, d2v~disk2vhd
->28 string x \b, Creator %-4.4s
-# Creator Version: 0x00010000~Virtual Server 2004, 0x00050000~Virtual PC 2004
-# holds the major/minor version of the application that created the image
->32 ubeshort x %x
->34 ubeshort x \b.%x
-#>32 ubelong x \b, Version %#8.8x
-# Creator Host OS: 0x5769326B~Windows (Wi2k), 0x4D616320~Macintosh (Mac)
->36 ubelong x (
->>36 ubelong 0x5769326B \bW2k
->>36 ubelong 0x4D616320 \bMac
->>36 default x \b0x
->>>36 ubelong x \b%8.8x
-# creation Time in seconds since 1 Jan 2000 UTC~946684800 sec. since Unix Epoch
->24 bedate+946684800 x \b) %s
-# Original Size
-#>40 ubequad x \b, o.-Size %#llx
-# Current Size is same as original size, but change when disk is expanded
-#>48 ubequad x \b, Size %#llx
->48 ubequad x \b, %llu bytes
-# Disk Geometry: cylinder, heads, and sectors/track for hard disk
-#>56 ubeshort x \b, Cylinder %#x
->56 ubeshort x \b, CHS %u
-# Heads
-#>58 ubyte x \b, Heads %#x
->58 ubyte x \b/%u
-# Sectors per track
-#>59 ubyte x \b, Sectors %#x
->59 ubyte x \b/%u
-# Disk Type: 3~Dynamic hard disk
->60 ubelong !0x3 \b, type %#x
-# Checksum
-#>64 ubelong x \b, cksum %#x
-# universally unique identifier (UUID) to associate a parent with its differencing image
-#>68 ubequad x \b, id %#16.16llx
-#>76 ubequad x \b-%16.16llx
-# Saved State: 1~Saved State
->84 ubyte !0 \b, State %#x
-# Reserved 427 bytes with nils
-#>85 ubequad !0 \b, Reserved %#16.16llx
-
-# From: Joerg Jenderek
-# URL: https://msdn.microsoft.com/en-us/library/mt740058.aspx
-# Reference: https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/
-# MS-VHDX/[MS-VHDX].pdf
-# Note: extends the VHD format with new capabilities, such as a 16TB maximum size
-# TODO: find and display values like virtual size, disk size, cluster_size, etc
-# display id in GUID format
-#
-# VHDX_FILE_IDENTIFIER signature 0x656C696678646876
-0 string vhdxfile
-# VHDX_HEADER signature. 1 header is stored at offset 64KB and the other at 128KB
->0x10000 string head Microsoft Disk Image eXtended
-#>0x20000 string head \b, 2nd header
-#!:mime application/x-virtualbox-vhdx
-!:ext vhdx
-# Creator[256] like "QEMU v3.0.0", "Microsoft Windows 6.3.9600.18512"
->>8 lestring16 x \b, by %.256s
-# The Checksum field is a CRC-32C hash over the entire 4 KB structure
-#>>0x10004 ulelong x \b, CRC %#x
-# SequenceNumber
->>0x10008 ulequad x \b, sequence %#llx
-# FileWriteGuid
-#>>0x10010 ubequad x \b, file id %#llx
-#>>>0x10018 ubequad x \b-%llx
-# DataWriteGuid
-#>>0x10020 ubequad x \b, data id %#llx
-#>>>0x10028 ubequad x \b-%llx
-# LogGuid. If this field is zero, then the log is empty or has no valid entries
->>0x10030 ubequad >0 \b, log id %#llx
->>>0x10038 ubequad x \b-%llx
-# LogVersion. If not 0 there is a log to replay
->>0x10040 uleshort >0 \b, LogVersion %#x
-# Version. This field must be set to 1
->>0x10042 uleshort !1 \b, Version %#x
-# LogLength must be multiples of 1 MB
->>0x10044 ulelong/1048576 >1 \b, LogLength %u MB
-# LogOffset (normally 0x100000 when log direct after header); multiples of 1 MB
->>0x10048 ulequad !0x100000 \b, LogOffset %#llx
-# Log Entry Signature must be 0x65676F6C~loge
->>(0x10048.q) ulelong !0x65676F6C \b, NO Log Signature
->>(0x10048.q) ulelong =0x65676F6C \b; LOG
-# Log Entry Checksum
-#>>>(0x10048.q+4) ulelong x \b, Log CRC %#x
-# Log Entry Length must be a multiple of 4 KB
->>>(0x10048.q+8) ulelong/1024 >4 \b, EntryLength %u KB
-# Log Entry Tail must be a multiple of 4 KB
-#>>>(0x10048.q+12) ulelong x \b, Tail %#x
-# Log Entry SequenceNumber
-#>>>(0x10048.q+16) ulequad x \b, # %#llx
-# Log Entry DescriptorCount may be zero. only 4 bytes in other docs instead 8
-#>>>(0x10048.q+24) ulelong x \b, DescriptorCount %#llx
-# Log Entry Reserved must be set to 0
->>>(0x10048.q+28) ulelong !0 \b, Reserved %#x
-# Log Entry LogGuid
-#>>>(0x10048.q+32) ubequad x \b, Log id %#llx
-#>>>(0x10048.q+40) ubequad x \b-%llx
-# Log Entry FlushedFileOffset should VHDX size when entry is written.
-#>>>(0x10048.q+48) ulequad x \b, FlushedFileOffset %llu
-# Log Entry LastFileOffset
-#>>>(0x10048.q+56) ulequad x \b, LastFileOffset %llu
-# filling
-#>>>(0x10048.q+64) ulequad >0 \b, filling %llx
-# Reserved[4016]
-#>>0x10050 ulequad >0 \b, Reserved %#llx
-# VHDX_REGION_TABLE_HEADER Signature 0x69676572~regi at offset 192 KB and 256 KB
->0x30000 ulelong !0x69676572 \b, 1st region INVALID
->0x30000 ulelong =0x69676572 \b; region
-# region Checksum. CRC-32C hash over the entire 64-KB table
-#>>0x30004 ulelong x \b, CRC %#x
-# The EntryCount specifies number of valid entries; Found 2; This must be =< 2047.
->>0x30008 ulelong x \b, %u entries
-# reserved must be zero
-#>>0x3000C ulelong !0 \b, RESERVED %#x
-# Region Table Entry starts with identifier for the object. often BAT id
->>0x30010 use vhdx-id
-# FileOffset
->>0x30020 ulequad x \b, at %#llx
-# Length. Specifies the length of the object within the file
-#>>0x30028 ulelong x \b, Length %#x
-# 1 means region entry is required. if region not recognized, then REFUSE to load VHDX
->>0x3002C ulelong x \b, Required %u
-# 2nd region entry often metadata id
->>0x30030 use vhdx-id
-# 2nd entry FileOffset
->>0x30040 ulequad x \b, at %#llx
-# 1 means region entry is required. if region not recognized, then REFUSE to load VHDX
->>0x3004C ulelong x \b, Required %u
-# 2nd region
->>0x40000 ulelong !0x69676572 \b, 2nd region INVALID
-# check in vhdx images for known id and show names instead hexadecimal
-0 name vhdx-id
-# https://www.windowstricks.in/online-windows-guid-converter
-# 2DC27766-F623-4200-9D64-115E9BFD4A08 BAT GUID
-# 6677C22D23F600429D64115E9BFD4A08 BAT ID
->0 ubequad =0x6677C22D23F60042
->>8 ubequad =0x9D64115E9BFD4A08 \b, id BAT
-# no BAT id
->>8 default x
->>>0 use vhdx-id-hex
-# 8B7CA206-4790-4B9A-B8FE-575F050F886E Metadata region GUID
-# 06A27C8B90479A4BB8FE575F050F886E Metadata region ID
->0 ubequad =0x06A27C8B90479A4B
->>8 ubequad =0xB8FE575F050F886E \b, id Metadata
-# no Metadata id
->>8 default x
->>>0 use vhdx-id-hex
-# 2FA54224-CD1B-4876-B211-5DBED83BF4B8 Virtual Disk Size GUID
-# 2442A52F1BCD7648B2115DBED83BF4B8 Virtual Disk Size ID
-# value "virtual size" can be verified by command `qemu-img info `
->0 ubequad =0x2442A52F1BCD7648
->>8 ubequad =0xB2115DBED83BF4B8 \b, id vsize
-# no Virtual Disk Size ID
->>8 default x
->>>0 use vhdx-id-hex
-# other ids
->0 default x
->>0 use vhdx-id-hex
-# in vhdx images show id as hexadecimal
-0 name vhdx-id-hex
->0 ubequad x \b, ID %#16.16llx
->8 ubequad x \b-%16.16llx
-#
-# libvirt
-# From: Philipp Hahn <hahn@univention.de>
-0 string LibvirtQemudSave Libvirt QEMU Suspend Image
->0x10 lelong x \b, version %u
->0x14 lelong x \b, XML length %u
->0x18 lelong 1 \b, running
->0x1c lelong 1 \b, compressed
-
-0 string LibvirtQemudPart Libvirt QEMU partial Suspend Image
-# From: Alex Beregszaszi <alex@fsn.hu>
-0 string/b COWD VMWare3
->4 byte 3 disk image
->>32 lelong x (%d/
->>36 lelong x \b%d/
->>40 lelong x \b%d)
->4 byte 2 undoable disk image
->>32 string >\0 (%s)
-
-0 string/b VMDK VMware4 disk image
-0 string/b KDMV VMware4 disk image
-
-#--------------------------------------------------------------------
-# Qemu Emulator Images
-# Lines written by Friedrich Schwittay (f.schwittay@yousable.de)
-# Updated by Adam Buchbinder (adam.buchbinder@gmail.com)
-# Made by reading sources, reading documentation, and doing trial and error
-# on existing QCOW files
-0 string/b QFI\xFB QEMU QCOW Image
-!:mime application/x-qemu-disk
-
-# Uncomment the following line to display Magic (only used for debugging
-# this magic number)
-#>0 string/b x , Magic: %s
-
-# There are currently 2 Versions: "1" and "2".
-# https://www.gnome.org/~markmc/qcow-image-format-version-1.html
->4 belong x (v%d)
-
-# Using the existence of the Backing File Offset to determine whether
-# to read Backing File Information
->>12 belong >0 \b, has backing file (
-# Note that this isn't a null-terminated string; the length is actually
-# (16.L). Assuming a null-terminated string happens to work usually, but it
-# may spew junk until it reaches a \0 in some cases.
->>>(12.L) string >\0 \bpath %s
-
-# Modification time of the Backing File
-# Really useful if you want to know if your backing
-# file is still usable together with this image
->>>>20 bedate >0 \b, mtime %s)
->>>>20 default x \b)
-
-# Size is stored in bytes in a big-endian u64.
->>24 bequad x \b, %lld bytes
-
-# 1 for AES encryption, 0 for none.
->>36 belong 1 \b, AES-encrypted
-
-# https://www.gnome.org/~markmc/qcow-image-format.html
->4 belong 2 (v2)
-# Using the existence of the Backing File Offset to determine whether
-# to read Backing File Information
->>8 bequad >0 \b, has backing file
-# Note that this isn't a null-terminated string; the length is actually
-# (16.L). Assuming a null-terminated string happens to work usually, but it
-# may spew junk until it reaches a \0 in some cases. Also, since there's no
-# .Q modifier, we just use the bottom four bytes as an offset. Note that if
-# the file is over 4G, and the backing file path is stored after the first 4G,
-# the wrong filename will be printed. (This should be (8.Q), when that syntax
-# is introduced.)
->>>(12.L) string >\0 (path %s)
->>24 bequad x \b, %lld bytes
->>32 belong 1 \b, AES-encrypted
-
->4 belong 3 (v3)
-# Using the existence of the Backing File Offset to determine whether
-# to read Backing File Information
->>8 bequad >0 \b, has backing file
-# Note that this isn't a null-terminated string; the length is actually
-# (16.L). Assuming a null-terminated string happens to work usually, but it
-# may spew junk until it reaches a \0 in some cases. Also, since there's no
-# .Q modifier, we just use the bottom four bytes as an offset. Note that if
-# the file is over 4G, and the backing file path is stored after the first 4G,
-# the wrong filename will be printed. (This should be (8.Q), when that syntax
-# is introduced.)
->>>(12.L) string >\0 (path %s)
->>24 bequad x \b, %lld bytes
->>32 belong 1 \b, AES-encrypted
-
->4 default x (unknown version)
-
-0 string/b QEVM QEMU suspend to disk image
-
-# QEMU QED Image
-# https://wiki.qemu.org/Features/QED/Specification
-0 string/b QED\0 QEMU QED Image
-
-# VDI Image
-# Sun xVM VirtualBox Disk Image
-# From: Richard W.M. Jones <rich@annexia.org>
-# VirtualBox Disk Image
-0x40 ulelong 0xbeda107f VirtualBox Disk Image
->0x44 uleshort >0 \b, major %u
->0x46 uleshort >0 \b, minor %u
->0 string >\0 (%s)
->368 lequad x \b, %lld bytes
-
-0 string/b Bochs\ Virtual\ HD\ Image Bochs disk image,
->32 string x type %s,
->48 string x subtype %s
-
-0 lelong 0x02468ace Bochs Sparse disk image
-
diff --git a/contrib/libs/libmagic/magic/Magdir/virtutech b/contrib/libs/libmagic/magic/Magdir/virtutech
deleted file mode 100644
index 410ab9ee4b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/virtutech
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: virtutech,v 1.4 2009/09/19 16:28:13 christos Exp $
-# Virtutech Compressed Random Access File Format
-#
-# From <gustav@virtutech.com>
-0 string \211\277\036\203 Virtutech CRAFF
->4 belong x v%d
->20 belong 0 uncompressed
->20 belong 1 bzipp2ed
->20 belong 2 gzipped
->24 belong 0 not clean
diff --git a/contrib/libs/libmagic/magic/Magdir/visx b/contrib/libs/libmagic/magic/Magdir/visx
deleted file mode 100644
index fe5c827d94..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/visx
+++ /dev/null
@@ -1,32 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: visx,v 1.5 2009/09/19 16:28:13 christos Exp $
-# visx: file(1) magic for Visx format files
-#
-0 short 0x5555 VISX image file
->2 byte 0 (zero)
->2 byte 1 (unsigned char)
->2 byte 2 (short integer)
->2 byte 3 (float 32)
->2 byte 4 (float 64)
->2 byte 5 (signed char)
->2 byte 6 (bit-plane)
->2 byte 7 (classes)
->2 byte 8 (statistics)
->2 byte 10 (ascii text)
->2 byte 15 (image segments)
->2 byte 100 (image set)
->2 byte 101 (unsigned char vector)
->2 byte 102 (short integer vector)
->2 byte 103 (float 32 vector)
->2 byte 104 (float 64 vector)
->2 byte 105 (signed char vector)
->2 byte 106 (bit plane vector)
->2 byte 121 (feature vector)
->2 byte 122 (feature vector library)
->2 byte 124 (chain code)
->2 byte 126 (bit vector)
->2 byte 130 (graph)
->2 byte 131 (adjacency graph)
->2 byte 132 (adjacency graph library)
->2 string .VISIX (ascii text)
diff --git a/contrib/libs/libmagic/magic/Magdir/vms b/contrib/libs/libmagic/magic/Magdir/vms
deleted file mode 100644
index 56d57ae932..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vms
+++ /dev/null
@@ -1,30 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vms,v 1.10 2017/03/17 21:35:28 christos Exp $
-# vms: file(1) magic for VMS executables (experimental)
-#
-# VMS .exe formats, both VAX and AXP (Greg Roelofs, newt@uchicago.edu)
-
-# GRR 950122: I'm just guessing on these, based on inspection of the headers
-# of three executables each for Alpha and VAX architectures. The VAX files
-# all had headers similar to this:
-#
-# 00000 b0 00 30 00 44 00 60 00 00 00 00 00 30 32 30 35 ..0.D.`.....0205
-# 00010 01 01 00 00 ff ff ff ff ff ff ff ff 00 00 00 00 ................
-#
-0 string \xb0\0\x30\0 VMS VAX executable
->44032 string PK\003\004 \b, Info-ZIP SFX archive v5.12 w/decryption
-#
-# The AXP files all looked like this, except that the byte at offset 0x22
-# was 06 in some of them and 07 in others:
-#
-# 00000 03 00 00 00 00 00 00 00 ec 02 00 00 10 01 00 00 ................
-# 00010 68 00 00 00 98 00 00 00 b8 00 00 00 00 00 00 00 h...............
-# 00020 00 00 07 00 00 00 00 00 00 00 00 00 00 00 00 00 ................
-# 00030 00 00 00 00 01 00 00 00 00 00 00 00 00 00 00 00 ................
-# 00040 00 00 00 00 ff ff ff ff ff ff ff ff 02 00 00 00 ................
-#
-# GRR this test is still too general as it catches example adressen.dbt
-0 belong 0x03000000
->8 ubelong 0xec020000 VMS Alpha executable
->>75264 string PK\003\004 \b, Info-ZIP SFX archive v5.12 w/decryption
diff --git a/contrib/libs/libmagic/magic/Magdir/vmware b/contrib/libs/libmagic/magic/Magdir/vmware
deleted file mode 100644
index cd1a9d9576..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vmware
+++ /dev/null
@@ -1,6 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vmware,v 1.8 2017/03/17 21:35:28 christos Exp $
-# VMware specific files (deducted from version 1.1 and log file entries)
-# Anthon van der Neut (anthon@mnt.org)
-0 belong 0x4d52564e VMware nvram
diff --git a/contrib/libs/libmagic/magic/Magdir/vorbis b/contrib/libs/libmagic/magic/Magdir/vorbis
deleted file mode 100644
index 49e75cb2d2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vorbis
+++ /dev/null
@@ -1,155 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vorbis,v 1.26 2020/08/22 18:30:55 christos Exp $
-# vorbis: file(1) magic for Ogg/Vorbis files
-#
-# From Felix von Leitner <leitner@fefe.de>
-# Extended by Beni Cherniavsky <cben@crosswinds.net>
-# Further extended by Greg Wooledge <greg@wooledge.org>
-#
-# Most (everything but the number of channels and bitrate) is commented
-# out with `##' as it's not interesting to the average user. The most
-# probable things advanced users would want to uncomment are probably
-# the number of comments and the encoder version.
-#
-# FIXME: The first match has been made a search, so that it can skip
-# over prepended ID3 tags. This will work for MIME type detection, but
-# won't work for detecting other properties of the file (they all need
-# to be made relative to the search). In any case, if the file has ID3
-# tags, the ID3 information will be printed, not the Ogg information,
-# so until that's fixed, this doesn't matter.
-# FIXME[2]: Disable the above for now, since search assumes text mode.
-#
-# --- Ogg Framing ---
-#0 search/1000 OggS Ogg data
-0 string OggS Ogg data
->4 byte !0 UNKNOWN REVISION %u
-##>4 byte 0 revision 0
->4 byte 0
-##>>14 lelong x (Serial %lX)
-# non-Vorbis content: FLAC (Free Lossless Audio Codec, http://flac.sourceforge.net)
->>28 string \x7fFLAC \b, FLAC audio
-# non-Vorbis content: Theora
-!:mime audio/ogg
->>28 string \x80theora \b, Theora video
-!:mime video/ogg
-# non-Vorbis content: Kate
->>28 string \x80kate\0\0\0\0 \b, Kate (Karaoke and Text)
-!:mime application/ogg
->>>37 ubyte x v%u
->>>38 ubyte x \b.%u,
->>>40 byte 0 utf8 encoding,
->>>40 byte !0 unknown character encoding,
->>>60 string >\0 language %s,
->>>60 string \0 no language set,
->>>76 string >\0 category %s
->>>76 string \0 no category set
-# non-Vorbis content: Skeleton
->>28 string fishead\0 \b, Skeleton
-!:mime video/ogg
->>>36 leshort x v%u
->>>40 leshort x \b.%u
-# non-Vorbis content: Speex
->>28 string Speex\ \ \ \b, Speex audio
-!:mime audio/ogg
-# non-Vorbis content: OGM
->>28 string \x01video\0\0\0 \b, OGM video
-!:mime video/ogg
->>>37 string/c div3 (DivX 3)
->>>37 string/c divx (DivX 4)
->>>37 string/c dx50 (DivX 5)
->>>37 string/c xvid (XviD)
-# --- First vorbis packet - general header ---
->>28 string \x01vorbis \b, Vorbis audio,
-!:mime audio/ogg
->>>35 lelong !0 UNKNOWN VERSION %u,
-##>>>35 lelong 0 version 0,
->>>35 lelong 0
->>>>39 ubyte 1 mono,
->>>>39 ubyte 2 stereo,
->>>>39 ubyte >2 %u channels,
->>>>40 lelong x %u Hz
-# Minimal, nominal and maximal bitrates specified when encoding
->>>>48 string <\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff \b,
-# The above tests if at least one of these is specified:
->>>>>52 lelong !-1
-# Vorbis RC2 has a bug which puts -1000 in the min/max bitrate fields
-# instead of -1.
-# Vorbis 1.0 uses 0 instead of -1.
->>>>>>52 lelong !0
->>>>>>>52 lelong !-1000
->>>>>>>>52 lelong x <%u
->>>>>48 lelong !-1
->>>>>>48 lelong x ~%u
->>>>>44 lelong !-1
->>>>>>44 lelong !-1000
->>>>>>>44 lelong !0
->>>>>>>>44 lelong x >%u
->>>>>48 string <\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff\xff bps
-# -- Second vorbis header packet - the comments
-# A kludge to read the vendor string. It's a counted string, not a
-# zero-terminated one, so file(1) can't read it in a generic way.
-# libVorbis is the only one existing currently, so I detect specifically
-# it. The interesting value is the cvs date (8 digits decimal).
-# Post-RC1 Ogg files have the second header packet (and thus the version)
-# in a different place, so we must use an indirect offset.
->>>(84.b+85) string \x03vorbis
->>>>(84.b+96) string/c Xiphophorus\ libVorbis\ I \b, created by: Xiphophorus libVorbis I
->>>>>(84.b+120) string >00000000
-# Map to beta version numbers:
->>>>>>(84.b+120) string <20000508 (<beta1, prepublic)
->>>>>>(84.b+120) string 20000508 (1.0 beta 1 or beta 2)
->>>>>>(84.b+120) string >20000508
->>>>>>>(84.b+120) string <20001031 (beta2-3)
->>>>>>(84.b+120) string 20001031 (1.0 beta 3)
->>>>>>(84.b+120) string >20001031
->>>>>>>(84.b+120) string <20010225 (beta3-4)
->>>>>>(84.b+120) string 20010225 (1.0 beta 4)
->>>>>>(84.b+120) string >20010225
->>>>>>>(84.b+120) string <20010615 (beta4-RC1)
->>>>>>(84.b+120) string 20010615 (1.0 RC1)
->>>>>>(84.b+120) string 20010813 (1.0 RC2)
->>>>>>(84.b+120) string 20010816 (RC2 - Garf tuned v1)
->>>>>>(84.b+120) string 20011014 (RC2 - Garf tuned v2)
->>>>>>(84.b+120) string 20011217 (1.0 RC3)
->>>>>>(84.b+120) string 20011231 (1.0 RC3)
-# Some pre-1.0 CVS snapshots still had "Xiphphorus"...
->>>>>>(84.b+120) string >20011231 (pre-1.0 CVS)
-# For the 1.0 release, Xiphophorus is replaced by Xiph.Org
->>>>(84.b+96) string/c Xiph.Org\ libVorbis\ I \b, created by: Xiph.Org libVorbis I
->>>>>(84.b+117) string >00000000
->>>>>>(84.b+117) string <20020717 (pre-1.0 CVS)
->>>>>>(84.b+117) string 20020717 (1.0)
->>>>>>(84.b+117) string 20030909 (1.0.1)
->>>>>>(84.b+117) string 20040629 (1.1.0 RC1)
->>>>>>(84.b+117) string 20050304 (1.1.2)
->>>>>>(84.b+117) string 20070622 (1.2.0)
->>>>>>(84.b+117) string 20090624 (1.2.2)
->>>>>>(84.b+117) string 20090709 (1.2.3)
->>>>>>(84.b+117) string 20100325 (1.3.1)
->>>>>>(84.b+117) string 20101101 (1.3.2)
->>>>>>(84.b+117) string 20120203 (1.3.3)
->>>>>>(84.b+117) string 20140122 (1.3.4)
->>>>>>(84.b+117) string 20150105 (1.3.5)
-
-# non-Vorbis content: Opus https://tools.ietf.org/html/rfc7845#section-5
->>28 string OpusHead \b, Opus audio,
-!:mime audio/ogg
->>>36 ubyte >0x0F UNKNOWN VERSION %u,
->>>36 ubyte&0x0F !0 version 0.%u,
->>>>46 ubyte >1
->>>>>46 ubyte !255 unknown channel mapping family %u,
->>>>>37 ubyte x %u channels
->>>>46 ubyte 0
->>>>>37 ubyte 1 mono
->>>>>37 ubyte 2 stereo
->>>>46 ubyte 1
->>>>>37 ubyte 1 mono
->>>>>37 ubyte 2 stereo
->>>>>37 ubyte 3 linear surround
->>>>>37 ubyte 4 quadraphonic
->>>>>37 ubyte 5 5.0 surround
->>>>>37 ubyte 6 5.1 surround
->>>>>37 ubyte 7 6.1 surround
->>>>>37 ubyte 8 7.1 surround
->>>>40 lelong !0 \b, %u Hz (Input Sample Rate) \ No newline at end of file
diff --git a/contrib/libs/libmagic/magic/Magdir/vxl b/contrib/libs/libmagic/magic/Magdir/vxl
deleted file mode 100644
index 0fdc68a7f0..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/vxl
+++ /dev/null
@@ -1,14 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: vxl,v 1.4 2009/09/19 16:28:13 christos Exp $
-# VXL: file(1) magic for VXL binary IO data files
-#
-# from Ian Scott <scottim@sf.net>
-#
-# VXL is a collection of C++ libraries for Computer Vision.
-# See the vsl chapter in the VXL Book for more info
-# http://www.isbe.man.ac.uk/public_vxl_doc/books/vxl/book.html
-# http:/vxl.sf.net
-
-2 lelong 0x472b2c4e VXL data file,
->0 leshort >0 schema version no %d
diff --git a/contrib/libs/libmagic/magic/Magdir/warc b/contrib/libs/libmagic/magic/Magdir/warc
deleted file mode 100644
index 5942867ddf..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/warc
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: warc,v 1.4 2019/04/19 00:42:27 christos Exp $
-# warc: file(1) magic for WARC files
-
-0 string WARC/ WARC Archive
->5 string x version %.4s
-!:mime application/warc
-
-#------------------------------------------------------------------------------
-# Arc File Format from Internet Archive
-# see https://www.archive.org/web/researcher/ArcFileFormat.php
-0 string filedesc:// Internet Archive File
-!:mime application/x-ia-arc
->11 search/256 \x0A \b
->>&0 ubyte >0 \b version %c
diff --git a/contrib/libs/libmagic/magic/Magdir/weak b/contrib/libs/libmagic/magic/Magdir/weak
deleted file mode 100644
index 6dc1793c92..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/weak
+++ /dev/null
@@ -1,16 +0,0 @@
-
-#------------------------------------------------------------------------------
-# weak: file(1) magic for very weak magic entries, disabled by default
-#
-# These entries are so weak that they might interfere identification of
-# other formats. Example include:
-# - Only identify for 1 or 2 bytes
-# - Match against very wide range of values
-# - Match against generic word in some spoken languages (e.g. English)
-
-# Summary: Computer Graphics Metafile
-# Extension: .cgm
-#0 beshort&0xffe0 0x0020 binary Computer Graphics Metafile
-#0 beshort 0x3020 character Computer Graphics Metafile
-
-#0 string =!! Bennet Yee's "face" format
diff --git a/contrib/libs/libmagic/magic/Magdir/web b/contrib/libs/libmagic/magic/Magdir/web
deleted file mode 100644
index a0d26e67fb..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/web
+++ /dev/null
@@ -1,18 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: web,v 1.2 2022/10/29 16:02:37 christos Exp $
-
-# http://www.rdfhdt.org/
-# From Christoph Biedl
-# http://www.rdfhdt.org/hdt-internals/
-# https://github.com/rdfhdt/hdt-cpp
-
-0 string $HDT\x01 HDT file (binary compressed indexed RDF triples) type 1
-!:mime application/vnd.hdt
-!:ext hdt
-
-0 string [Adblock\040Plus Adblock Plus
->&1 regex [0-9.]+ %s
->1 string x rules file
->10 search/100 Version:
->>&1 regex [0-9]+ \b, version %s
diff --git a/contrib/libs/libmagic/magic/Magdir/webassembly b/contrib/libs/libmagic/magic/Magdir/webassembly
deleted file mode 100644
index 469b45e22b..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/webassembly
+++ /dev/null
@@ -1,17 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: webassembly,v 1.4 2022/08/16 11:16:39 christos Exp $
-# webassembly: file(1) magic for WebAssembly modules
-#
-# WebAssembly is a virtual architecture developed by a W3C Community
-# Group at https://webassembly.org/. The file extension is .wasm, and
-# the MIME type is application/wasm.
-#
-# https://webassembly.org/docs/binary-encoding/ is the main
-# document describing the binary format.
-# From: Pip Cet <pipcet@gmail.com> and Joel Martin
-
-0 string \0asm WebAssembly (wasm) binary module
->4 lelong =1 version %#x (MVP)
-!:mime application/wasm
-!:ext wasm
->4 lelong >1 version %#x
diff --git a/contrib/libs/libmagic/magic/Magdir/windows b/contrib/libs/libmagic/magic/Magdir/windows
deleted file mode 100644
index f58ce3e5a5..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/windows
+++ /dev/null
@@ -1,1822 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: windows,v 1.63 2023/07/17 16:56:13 christos Exp $
-# windows: file(1) magic for Microsoft Windows
-#
-# This file is mainly reserved for files where programs
-# using them are run almost always on MS Windows 3.x or
-# above, or files only used exclusively in Windows OS,
-# where there is no better category to allocate for.
-# For example, even though WinZIP almost run on Windows
-# only, it is better to treat them as "archive" instead.
-# For format usable in DOS, such as generic executable
-# format, please specify under "msdos" file.
-#
-
-
-# Summary: Outlook Express DBX file
-# Created by: Christophe Monniez
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Outlook_Express_Database
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dbx.trid.xml
-# https://sourceforge.net/projects/ol2mbox/files/LibDBX/
-# v1.0.4/libdbx_1.0.4.tar.gz/FILE-FORMAT
-# Note: called "Outlook Express Database" by TrID and DROID via PUID fmt/838 fmt/839
-# and partly verified by `undbx --verbosity 4 Posteingang.dbx`
-0 string \xCF\xAD\x12\xFE
-# skip DROID fmt-838-signature-id-1193.dbx fmt-839-signature-id-1194.dbx by check for valid file size
->0x7C ulelong >0 MS Outlook Express DBX file
-#!:mime application/octet-stream
-#!:mime application/vnd.ms-outlook
-!:mime application/x-ms-dbx
-!:ext dbx
->>4 byte =0xC5 \b, message database
->>4 byte =0xC6 \b, folder database
->>4 byte =0xC7 \b, account information
->>4 byte =0x30 \b, offline database
-# version like: 5.2 5.5 (typical)
->>20 ulequad !0x0000000500000005 \b, version
-# major version
->>>24 ulelong x %u
-# minor version
->>>20 ulelong x \b.%u
-# CLSID: 6F74FDC5-E366-11d1-9A4E-00C04FA309D4~Message 6F74FDC6-E366-11D1-9A4E-00C04FA309D4~Folder
-# 26FE9D30-1A8F-11D2-AABF-006097D474C4~offline
-#>>4 guid x \b, CLSID %s
-# file size; total size of file; sometimes real size a little bit higher
->>0x7C ulelong x \b, ~ %u bytes
-# highest Email ID; the next email will have a number one higher than this
->>0x5c ulelong x \b, highest ID %#x
-# item count; number of items stored in this DBX file
->>0xC4 ulelong x \b, %u item
-# plural s
->>0xC4 ulelong !1 \bs
-# index pointer; file offset pointing to a page of Data Indexes
->>0xE4 ulelong >0 \b, index pointer %#x
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Nickfile
-# https://www.nirsoft.net/utils/outlook_nk2_edit.html
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/n/nk2.trid.xml
-# https://github.com/libyal/libnk2/blob/main/documentation
-# Nickfile%20(NK2)%20format.asciidoc
-# Note: called "Outlook Nickfile" by TrID & TestDisk and
-# "Outlook Nickname File" by Microsoft Outlook and
-# "Outlook AutoComplete File" by Nirsoft NK2Edit
-# partly verfied by NK2Edit Raw Text Edit Mode
-0 ubelong 0x0DF0ADBA MS Outlook Nickfile
-#!:mime application/octet-stream
-#!:mime application/vnd.ms-outlook
-!:mime application/x-ms-nickfile
-!:ext nk2/dat/bak
-# nick is used by "older" Outlook; dat is used by "newer" Outlook (probably 2010 - 2016); bak is used for backup
-#!:ext nick/nk2/dat/bak
-# Unknown; probably a version indicator like: 0000000Ah 0000000Ch
->4 ulelong x \b, probably version %u
-# Unknown2; probably a version indicator like: 1 0
->8 ulelong x \b.%u
-# number of rows (nickname or alias items) in file
->12 ulelong x \b, %u items
-# number of item entries/columns/properties value like: 17h
->16 ulelong x \b, %u entries
-# value type/property tag: 001Fh~4 bytes for data size of UTF-16 LE string
->20 uleshort x \b, value type %#4.4x
-# entry type/property identifier: 6001h~PR_DOTSTUFF_STATE/PR_NICK_NAME_W
->22 uleshort x \b, entry type %#4.4x
-# Reserved like: 0013FD90h
-#>24 ulelong x \b, reserved %#8.8x
-# value data array/Irrelevant Union like: 0000000004E31A80h
-#>28 ulequad x \b, data %#16.16llx
-# UTF-16
->20 uleshort =0x001F
-# unicode string bytes like: 2Ch
->>36 ulelong x \b, %u bytes
-# unicode string value PT_UNICODE like: janesmith@contoso.org
->>40 lestring16 x "%s"
-
-# Summary: Windows crash dump
-# Created by: Andreas Schuster (https://computer.forensikblog.de/)
-# https://web.archive.org/web/20101125060849/https://computer.forensikblog.de/en/2008/02/64bit_magic.html
-# Modified by (1): Abel Cheung (Avoid match with first 4 bytes only)
-# Modified by (2): Joerg Jenderek (addtional fields, extension, URL)
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dmp.trid.xml
-# https://gitlab.com/qemu-project/qemu/-/blob/master/include/qemu/win_dump_defs.h
-# Note: called "Windows memory dump" by TrID
-# and verified by like Windows Kit `Dumpchk.exe 043022-18703-01.dmp`
-# and partly by NirSoft `BlueScreenView.exe 043022-18703-01.dmp`
-# char Signature[4]
-0 string PAGE
-# char ValidDump[4]
->4 string DUMP MS Windows 32bit crash dump
-#!:mime application/octet-stream
-!:mime application/x-ms-dmp
-# like: Mini111013-01.dmp
-!:ext dmp
-# major version like: 15
->>8 ulelong x \b, version %u
-# minor version like: 2600
->>12 ulelong x \b.%u
-# DirectoryTableBase like: 709000
-#>>16 ulelong x \b, DirectoryTableBase %#x
-# PfnDatabase like: 805620c8
-#>>20 ulelong x \b, PfnDatabase %#x
-# PsLoadedModuleList like: 8055d720
-#>>24 ulelong x \b, PsLoadedModuleList %#x
-# PsActiveProcessHead like:805638b8
-#>>28 ulelong x \b, PsActiveProcessHead %#x
-# MachineImageType like: 14c (intel x86)
->>32 ulelong !0x14c \b, MachineImageType %#x
-# NumberProcessors like: 2
->>36 ulelong x \b, %u processors
-# BugcheckCode like: e2
-#>>40 ulelong x \b, BugcheckCode %#x
-# BugcheckParameter1 like: 0
-#>>44 ulelong x \b, BugcheckParameter1 %#x
-# BugcheckParameter2 like: 0
-#>>48 ulelong x \b, BugcheckParameter2 %#x
-# BugcheckParameter3 like: 0
-#>>52 ulelong x \b, BugcheckParameter3 %#x
-# BugcheckParameter4 like: 0
-#>>56 ulelong x \b, BugcheckParameter4 %#x
-# VersionUser[32]; like "PAGEPAGEPAGEPAGEPAGEPAGEPAGEPAGE" ""
-#>>60 string x \b, VersionUser "%.32s"
-# uint32_t reserved0 like: 45474101
-#>>92 ulelong x \b, reserved0 %#x
->>0x05c byte 0 \b, no PAE
->>0x05c byte 1 \b, PAE
-# KdDebuggerDataBlock like: 8054d2e0
-#>>96 ulelong x \b, KdDebuggerDataBlock %#x
-# uint8_t PhysicalMemoryBlockBuffer[700]
-# WinDumpPhyMemDesc32 NumberOfRuns like: 45474150
-#>>100 ulelong x \b, NumberOfRuns %#x
-# WinDumpPhyMemDesc32 uint32_t NumberOfPages like: 1162297680
-#>>104 ulelong x \b, NumberOfPages %#x
-# WinDumpPhyMemRun32 Run[86]; 688 bytes
-#>>108 ulelong x \b, BasePage %#x
-#>>112 ulelong x \b, PageCount %#x
-# uint8_t reserved1[3200]
-#>>800 string x \b, reserved "%s"
-#>>4000 ulelong x \b, RequiredDumpSpace %#x
-# uint8_t reserved2[92];
-#>>4004 string x \b, reserved2 "%s"
->>0xf88 lelong 1 \b, full dump
->>0xf88 lelong 2 \b, kernel dump
->>0xf88 lelong 3 \b, small dump
-# like: 4
->>0xf88 lelong >3 \b, dump type (%#x)
-# WinDumpPhyMemDesc32 uint32_t NumberOfPages like: 1162297680
-# GRR: IS THIS TRUE? VALUE IS SOMETIMES VERY HIGH!
-#>>104 ulelong x \b, NumberOfPages %#x
->>0x068 lelong x \b, %d pages
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/d/dmp-64.trid.xml113o
-# Note: called "Windows 64bit Memory Dump" by TrID
-# char ValidDump[4]
->4 string DU64 MS Windows 64bit crash dump
-#!:mime application/octet-stream
-!:mime application/x-ms-dmp
-# like: c:\Windows\Minidump\020322-18890-01.dmp c:\Windows\MEMORY.DMP
-!:ext dmp
-# major version like: 15
->>8 ulelong x \b, version %u
-# minor version like: 9600 19041 22621
->>12 ulelong x \b.%u
-# DirectoryTableBase like: 001ab000
-#>>16 ulequad x \b, DirectoryTableBase %#llx
-# PfnDatabase like: fffffa8000000000
-#>>24 ulequad x \b, PfnDatabase %#llx
-# PsLoadedModuleList like: fffff800c553f650
-#>>32 ulequad x \b, PsLoadedModuleList %#llx
-# PsActiveProcessHead like: fffff800c5525400
-#>>40 ulequad x \b, PsActiveProcessHead %#llx
-# MachineImageType like: 00008664
->>48 ulelong !0x8664 \b, MachineImageType %#x
-# NumberProcessors like: 2 4
->>52 ulelong x \b, %u processors
-# BugcheckCode like: 1000007e
-#>>56 ulelong x \b, BugcheckCode %#x
-# unused0
-#>>60 ulelong x \b, unused0 %#x
-# BugcheckParameter1 like: ffffffffc0000005
-#>>64 ulequad x \b, BugcheckParameter1 %#llx
-# BugcheckParameter2 like: fffff801abb2158f
-#>>72 ulequad x \b, BugcheckParameter2 %#llx
-# BugcheckParameter3 like: ffffd000290d4288
-#>>80 ulequad x \b, BugcheckParameter3 %#llx
-# BugcheckParameter4 like: ffffd000290d3aa0
-#>>88 ulequad x \b, BugcheckParameter4 %#llx
-# VersionUser[32]; like "" "PAGEPAGEPAGEPAGEPAGEPAGEPAGEPAGE" ""
-#>>96 string x \b, VersionUser "%.32s"
-# KdDebuggerDataBlock like: fffff800c550c530
-#>>128 ulequad x \b, KdDebuggerDataBlock %#llx
-# uint8_t PhysicalMemoryBlockBuffer[704]
-# WinDumpPhyMemDesc64 NumberOfRuns like: 6 7 0x45474150
-#>>136 ulelong x \b, NumberOfRuns %#x
-# WinDumpPhyMemDesc64 unused like: 0 0x45474150
-#>>140 ulelong x \b, unused %#x
-# WinDumpPhyMemRun64 Run[43] BasePage like: 1
-#>>152 ulequad x \b, BasePage %#llx
-# WinDumpPhyMemRun64 Run[43] PageCount like: 57h
-#>>160 ulequad x \b, PageCount %#llx
-# uint8_t ContextBuffer[3000] like: "" "\001" "\0207J\266\001\340\377\377&8\007\312"
-#>>840 string x \b, ContextBuffer "%s"
-# WinDumpExceptionRecord ExceptionCode
-#>>3840 ulelong x \b, ExceptionCode %#x
-# WinDumpExceptionRecord ExceptionFlags
-#>>3844 ulelong x \b, ExceptionFlags %#x
-# WinDumpExceptionRecord ExceptionRecord
-#>>3848 ulequad x \b, ExceptionRecord %#llx
-# WinDumpExceptionRecord ExceptionAddress
-#>>3856 ulequad x \b, ExceptionAddress %#llx
-# WinDumpExceptionRecord NumberParameters
-#>>3864 ulelong x \b, NumberParameters %#x
-# WinDumpExceptionRecord unused
-#>>3868 ulelong x \b, unsed %#x
-# WinDumpExceptionRecord ExceptionInformation[15]
-#>>3872 ulequad x \b, ExceptionInformation[0] %#llx
-# https://learn.microsoft.com/en-us/troubleshoot/windows-server/performance/memory-dump-file-options
-# but DumpType like: 4~small 5~full (MEMORY.DMP) 6~kernel (MEMORY.DMP)
->>0xf98 ulelong x \b,
->>>0xf98 lelong 5 full dump
->>>0xf98 lelong 6 kernel dump
->>>0xf98 lelong 4 small dump
-# This probably never occur
->>>0xf98 default x DumpType
->>>>0xf98 ulelong x (%#x)
-# WinDumpPhyMemDesc64 uint64_t NumberOfPages like: 3142425 8341923 8366500 1162297680 4992030524978970960
-# GRR: IS THIS TRUE? VALUE IS SOMETIMES VERY HIGH!
->>0x090 lequad x \b, %lld pages
-
-# Summary: Vista Event Log
-# Created by: Andreas Schuster (https://computer.forensikblog.de/)
-# Update: Joerg Jenderek
-# URL: https://github.com/libyal/libevtx/blob/main/documentation/Windows%20XML%20Event%20Log%20(EVTX).asciidoc
-# Reference (1): https://web.archive.org/web/20110803085000/
-# https://computer.forensikblog.de/en/2007/05/some_magic.html
-# http://mark0.net/download/triddefs_xml.7z/defs/e/evtx.trid.xml
-# Note: called "Vista Event Log" by TrID and "Event Log" by Windows
-# verified partly by `wevtutil.exe gli /lf:true dumpfile.evtx`
-0 string ElfFile\0 MS Windows
-#!:mime application/octet-stream
-!:mime application/x-ms-evtx
-!:ext evtx
-# Major+Minor format version: 3.1~Vista and later 3.2~Windows 10 (2004) and later
->0x24 ulelong =0x00030001 Vista-8.1 Event Log
->0x24 ulelong !0x00030001 10-11 Event Log, version
->>0x26 uleshort x %u
->>0x24 uleshort x \b.%u
->0x2a leshort x \b, %d chunks
->>0x10 lelong x \b (no. %d in use)
->0x18 lelong >1 \b, next record no. %d
->0x18 lelong =1 \b, empty
->0x78 lelong &1 \b, DIRTY
->0x78 lelong &2 \b, FULL
-
-# Summary: Windows Event Trace Log
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/ETL
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/e/etl.trid.xml
-# https://www.geoffchappell.com/studies/windows/km/ntoskrnl/api/etw/tracelog/trace_logfile_header.htm
-# Note: called "Window tracing/diagnostic binary log" by TrID
-# verified by `tracerpt.EXE Wifi.etl -of EVTX`
-# and by etl-parser `etl2xml --input AMSITrace.etl --output AMSITrace.xml`
-# Every ETL file begins with a WMI_BUFFER_HEADER, a SYSTEM_TRACE_HEADER and a TRACE_LOGFILE_HEADER
-0 ubyte 0
-# look for corresponding encoded as UTF-16 file name extension like in: boot_BASE+CSWITCH_1.etl
->0 search/0x699087/b .\0e\0t\0l\0\0\0
-# GRR: line above only works if in ../../src/file.h FILE_BYTES_MAX is raised above 699086h (6,59 MiB)
->>0 use trace-etl
-# display information of Windows Performance Analyzer Trace File (file name)
-0 name trace-etl
->0 ubyte x Windows Event Trace Log
-#!:mime application/x-ms-etl
-# http://extension.nirsoft.net/etl
-!:mime application/etl
-!:ext etl
-# look for DOS drive letter part of log file name like: PhotosAppTracing_startedInBGMode.etl
->0 search/0x2b4/sb :\0\x5c\0
-# like: "c:\Windows\Logs\NetSetup\service.0.etl" "C:\Windows\System32\LogFiles\WMI\Wifi.etl"
->>&-2 lestring16 x "%s"
-
-# Summary: Windows System Deployment Image
-# Created by: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/System_Deployment_Image
-# Reference: http://skolk.livejournal.com/1320.html
-0 string $SDI
->4 string 0001 System Deployment Image
-!:mime application/x-ms-sdi
-#!:mime application/octet-stream
-# \Boot\boot.sdi
-!:ext sdi
-# MDBtype: 0~Unspecified 1~RAM 2~ROM
->>8 ulequad !0 \b, MDBtype %#llx
-# BootCodeOffset
->>16 ulequad !0 \b, BootCodeOffset %#llx
-# BootCodeSize
->>24 ulequad !0 \b, BootCodeSize %#llx
-# VendorID
->>32 ulequad !0 \b, VendorID %#llx
-# DeviceID
->>40 ulequad !0 \b, DeviceID %#llx
-# DeviceModel
->>48 ulequad !0 \b, DeviceModel %#llx
->>>56 ulequad !0 \b%llx
-# DeviceRole
->>64 ulequad !0 \b, DeviceRole %#llx
-# Reserved1; reserved fields and gaps between BLOBs are padded with \0
-#>>72 ulequad !0 \b, Reserved1 %#llx
-# RuntimeGUID
->>80 ulequad !0 \b, RuntimeGUID %#llx
->>>88 ulequad !0 \b%llx
-# RuntimeOEMrev
->>96 ulequad !0 \b, RuntimeOEMrev %#llx
-# Reserved2
-#>>104 ulequad !0 \b, Reserved2 %#llx
-# BLOB alignment value in pages, as specified in sdimgr /pack: 1~4K 2~8k
->>112 ulequad !0 \b, PageAlignment %llu
-# Reserved3[48]
-#>>120 ulequad !0 \b, Reserved3 %#llx
-# SDI checksum 39h
->>0x1f8 ulequad x \b, checksum %#llx
-# BLOBtype[8] \0-padded: PART, WIM , BOOT, LOAD, DISK
->>0x400 string >\0 \b, type %-3.8s
-# 0~non-filesystem 7~NTFS 6~BIGFAT
->>>0x420 ulequad !0 (%#llx)
-# ATTRibutes
->>>0x408 ulequad !0 %#llx attributes
-# Offset
->>>0x410 ulequad x at %#llx
-# print 1 space after size and then handles NTFS boot sector by ./filesystems
->>>0x418 ulequad >0 %llu bytes
->>>>(0x410.l) indirect x
-# 2nd BLOB: WIM
->>0x440 string >\0 \b, type %-3.8s
->>>0x428 ulequad !0 (%#llx)
-# ATTRibutes
->>>0x448 ulequad !0 %#llx attributes
-# Offset
->>>0x450 ulequad x at %#llx
->>>0x458 ulequad >0 %llu bytes
->>>>(0x450.l) indirect x
-# 3rd BLOB
->>0x480 string >\0 \b, type %-3.8s
-
-# Summary: Windows boot status log BOOTSTAT.DAT
-# From: Joerg Jenderek
-# Reference: https://www.geoffchappell.com/notes/windows/boot/bsd.htm
-# Note: mainly refers to older Windows Vista, sometimes
-# BOOTSTAT.DAT only contains nulls or invalid data
-# checking for valid version below 5
-0 ulelong <5
-# skip many ISO images by checking for valid 64 KiB file size
->8 ulelong =0x00010000
->>0 use bootstat-dat
-# display information of BOOTSTAT.DAT
-0 name bootstat-dat
->0 ulelong x Windows boot log
-#!:mime application/octet-stream
-!:mime application/x-ms-dat
-# BOOTSTAT.DAT in BOOT subdirectory
-!:ext dat
-# apparently a version number: 2 for older like Vista, 3, 4 Windows 10
->0 ulelong >2 \b, version %u
-# apparently the size of the header: often 10h in older Windows, 14h, 18h
->4 ulelong !0x10 \b, header size %#x
-#>4 ulelong !0x10 \b, header size %u
-# apparently the size of the file: always 0x00010000~64KiB
-# the file is acceptable to BOOTMGR only if it is exactly 64 KiB
->8 ulelong !0x00010000 \b, file size %#x
-# size of valid data, in bytes: C8h 50h 172h 5D5Ch
->0xc ulelong x \b, %#x valid bytes
-# skip header and jump to first bootstat entry and display information
->(0x4.l-1) ubyte x
->>&0 use bootstat-entry
-# jump to first entry again because pointer are bad after "use"
->(0x4.l-1) ubyte x
-# by 1st entry size jump to 2nd entry and display information
->>&(&0x18.l-1) ubyte x
->>>&0 use bootstat-entry
-# jump to possible 3rd boot entry and display information
-# >(0x4.l-1) ubyte x
-# >>&(&0x18.l-1) ubyte x
-# >>>&(&0x18.l-1) ubyte x
-# >>>>&0 use bootstat-entry
-# display BOOTSTAT.DAT entry
-0 name bootstat-entry
-#>0x00 ubequad x \b, ENTRY %16.16llx
-# size of entry, in bytes: 40h(init) 78h(launced) 9Ch
-#>0x18 ulelong x \b; entry size %u
->0x18 ulelong x \b; entry size %#x
-# time stamp, in seconds
->0x00 ulelong x \b, %#x seconds
-# always zero, significance unknown
->0x04 ulelong !0 \b, not null %u
-# GUID of event source; but empty if event source is BOOTMGR
->0x08 ubequad !0 \b, GUID %#16.16llx
->>0x10 ubequad x \b%16.16llx
-# severity code: 1~informational 3~errors
->0x1C ulelong !1 \b, severity %#x
-# apparently a version number: 2
->0x20 ulelong !2 \b, version %u
-# event identifier 1~log file initialised 11h~boot application launched
-#>0x24 ulelong x \b, event %#x
->0x24 ulelong !1
->>0x24 ulelong !0x11 \b, event %#x
-# entry data; size depends on event identifier
-#>0x28 ubequad x \b, data %#16.16llx
->0x24 ulelong =0x1 \b, Init
-# always 0, significance unknown
->>0x34 uleshort !0 \b, not null %u
-# always 7, significance unknown
->>0x36 uleshort !7 \b, not seven %u
-# year
->>0x28 uleshort x %u
-# month
->>0x2A uleshort x \b-%u
-# day
->>0x2C uleshort x \b-%u
-# hour
->>0x2E uleshort x %u
-# minute
->>0x30 uleshort x \b:%u
-# second
->>0x32 uleshort x \b:%u
-# boot application launched
->0x24 ulelong =0x11 \b, launched
-# type of start: 0 normally, 1 or 2 maybe in a recovery sequence
->>0x38 uleshort !0 \b, type %u
-# pathname of boot application, as null-terminated Unicode string; typically
-# \Windows\system32\winload.exe \Windows\system32\winload.efi
->>0x3C lestring16 x %s
-
-# Summary: Windows Error Report text files
-# URL: https://en.wikipedia.org/wiki/Windows_Error_Reporting
-# Reference: https://www.nirsoft.net/utils/app_crash_view.html
-# Created by: Joerg Jenderek
-# Note: in directories %ProgramData%\Microsoft\Windows\WER\{ReportArchive,ReportQueue}
-# %LOCALAPPDATA%\Microsoft\Windows\WER\{ReportArchive,ReportQueue}
-0 lestring16 Version=
->22 lestring16 EventType Windows Error Report
-!:mime text/plain
-# Report.wer
-!:ext wer
-
-# Summary: Windows 3.1 group files
-# Extension: .grp
-# Created by: unknown
-0 string \120\115\103\103 MS Windows 3.1 group files
-
-
-# Summary: Old format help files
-# URL: https://en.wikipedia.org/wiki/WinHelp
-# Reference: https://www.oocities.org/mwinterhoff/helpfile.htm
-# Update: Joerg Jenderek
-# Created by: Dirk Jagdmann <doj@cubic.org>
-#
-# check and then display version and date inside MS Windows HeLP file fragment
-0 name help-ver-date
-# look for Magic of SYSTEMHEADER
->0 leshort 0x036C
-# version Major 1 for right file fragment
->>4 leshort 1 Windows
-# print non empty string above to avoid error message
-# Warning: Current entry does not yet have a description for adding a MIME type
-!:mime application/winhelp
-!:ext hlp
-# version Minor of help file format is hint for windows version
->>>2 leshort 0x0F 3.x
->>>2 leshort 0x15 3.0
->>>2 leshort 0x21 3.1
->>>2 leshort 0x27 x.y
->>>2 leshort 0x33 95
->>>2 default x y.z
->>>>2 leshort x %#x
-# to complete message string like "MS Windows 3.x help file"
->>>2 leshort x help
-# GenDate often older than file creation date
->>>6 ldate x \b, %s
-#
-# Magic for HeLP files
-0 lelong 0x00035f3f
-# ./windows (version 5.25) labeled the entry as "MS Windows 3.x help file"
-# file header magic 0x293B at DirectoryStart+9
->(4.l+9) uleshort 0x293B MS
-# look for @VERSION bmf.. like IBMAVW.ANN
->>0xD4 string =\x62\x6D\x66\x01\x00 Windows help annotation
-!:mime application/x-winhelp
-!:ext ann
->>0xD4 string !\x62\x6D\x66\x01\x00
-# "GID Help index" by TrID
->>>(4.l+0x65) string =|Pete Windows help Global Index
-!:mime application/x-winhelp
-!:ext gid
-# HeLP Bookmark or
-# "Windows HELP File" by TrID
->>>(4.l+0x65) string !|Pete
-# maybe there exist a cleaner way to detect HeLP fragments
-# brute search for Magic 0x036C with matching Major maximal 7 iterations
-# discapp.hlp
->>>>16 search/0x49AF/s \x6c\x03
->>>>>&0 use help-ver-date
->>>>>&4 leshort !1
-# putty.hlp
->>>>>>&0 search/0x69AF/s \x6c\x03
->>>>>>>&0 use help-ver-date
->>>>>>>&4 leshort !1
->>>>>>>>&0 search/0x49AF/s \x6c\x03
->>>>>>>>>&0 use help-ver-date
->>>>>>>>>&4 leshort !1
->>>>>>>>>>&0 search/0x49AF/s \x6c\x03
->>>>>>>>>>>&0 use help-ver-date
->>>>>>>>>>>&4 leshort !1
->>>>>>>>>>>>&0 search/0x49AF/s \x6c\x03
->>>>>>>>>>>>>&0 use help-ver-date
->>>>>>>>>>>>>&4 leshort !1
->>>>>>>>>>>>>>&0 search/0x49AF/s \x6c\x03
->>>>>>>>>>>>>>>&0 use help-ver-date
->>>>>>>>>>>>>>>&4 leshort !1
->>>>>>>>>>>>>>>>&0 search/0x49AF/s \x6c\x03
-# GCC.HLP is detected after 7 iterations
->>>>>>>>>>>>>>>>>&0 use help-ver-date
-# this only happens if bigger hlp file is detected after used search iterations
->>>>>>>>>>>>>>>>>&4 leshort !1 Windows y.z help
-!:mime application/winhelp
-!:ext hlp
-# repeat search again or following default line does not work
->>>>16 search/0x49AF/s \x6c\x03
-# remaining files should be HeLP Bookmark WinHlp32.BMK (XP 32-bit) or WinHlp32 (Windows 8.1 64-bit)
->>>>16 default x Windows help Bookmark
-!:mime application/x-winhelp
-!:ext bmk
-## FirstFreeBlock normally FFFFFFFFh 10h for *ANN
-##>>8 lelong x \b, FirstFreeBlock %#8.8x
-# EntireFileSize
->>12 lelong x \b, %d bytes
-## ReservedSpace normally 042Fh AFh for *.ANN
-#>>(4.l) lelong x \b, ReservedSpace %#8.8x
-## UsedSpace normally 0426h A6h for *.ANN
-#>>(4.l+4) lelong x \b, UsedSpace %#8.8x
-## FileFlags normally 04...
-#>>(4.l+5) lelong x \b, FileFlags %#8.8x
-## file header magic 0x293B
-#>>(4.l+9) uleshort x \b, file header magic %#4.4x
-## file header Flags 0x0402
-#>>(4.l+11) uleshort x \b, file header Flags %#4.4x
-## file header PageSize 0400h 80h for *.ANN
-#>>(4.l+13) uleshort x \b, PageSize %#4.4x
-## Structure[16] z4
-#>>(4.l+15) string >\0 \b, Structure_"%-.16s"
-## MustBeZero 0
-#>>(4.l+31) uleshort x \b, MustBeZero %#4.4x
-## PageSplits
-#>>(4.l+33) uleshort x \b, PageSplits %#4.4x
-## RootPage
-#>>(4.l+35) uleshort x \b, RootPage %#4.4x
-## MustBeNegOne 0xffff
-#>>(4.l+37) uleshort x \b, MustBeNegOne %#4.4x
-## TotalPages 1
-#>>(4.l+39) uleshort x \b, TotalPages %#4.4x
-## NLevels 0x0001
-#>>(4.l+41) uleshort x \b, NLevels %#4.4x
-## TotalBtreeEntries
-#>>(4.l+43) ulelong x \b, TotalBtreeEntries %#8.8x
-## pages of the B+ tree
-#>>(4.l+47) ubequad x \b, PageStart %#16.16llx
-
-# start with colon or semicolon for comment line like Back2Life.cnt
-0 regex \^(:|;)
-# look for first keyword Base
->0 search/45 :Base
->>&0 use cnt-name
-# only solution to search again from beginning , because relative offsets changes when use is called
->0 search/45 :Base
->0 default x
-# look for other keyword Title like in putty.cnt
->>0 search/45 :Title
->>>&0 use cnt-name
-#
-# display mime type and name of Windows help Content source
-0 name cnt-name
-# skip space at beginning
->0 string \040
-# name without extension and greater character or name with hlp extension
->>1 regex/c \^([^\xd>]*|.*\\.hlp) MS Windows help file Content, based "%s"
-!:mime text/plain
-!:apple ????TEXT
-!:ext cnt
-#
-# Windows creates a full text search from hlp file, if the user clicks the "Find" tab and enables keyword indexing
-0 string tfMR MS Windows help Full Text Search index
-!:mime application/x-winhelp-fts
-!:ext fts
->16 string >\0 for "%s"
-
-# Summary: Hyper terminal
-# Created by: unknown
-# Update: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/HyperACCESS
-# https://www.hilgraeve.com/hyperterminal/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/h/ht.trid.xml
-# Note: called "HyperTerminal data file" by TrID and "HyperTerminal File" on English Windows
-0 string HyperTerminal\040
->14 string 1.0\ --\ HyperTerminal\ data\ file MS Windows HyperTerminal profile
-#!:mime application/octet-stream
-!:mime application/x-ms-ht
-!:ext ht
-
-# https://ithreats.files.wordpress.com/2009/05/\040
-# lnk_the_windows_shortcut_file_format.pdf
-# Summary: Windows shortcut
-# Created by: unknown
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Windows_Shortcut
-# https://learn.microsoft.com/en-us/openspecs/windows_protocols/ms-shllink/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/l/lnk-shortcut.trid.xml
-# https://winprotocoldoc.blob.core.windows.net/productionwindowsarchives/MS-SHLLINK/%5bMS-SHLLINK%5d.pdf
-# Note: called "Windows Shortcut" by TrID, "Microsoft Windows Shortcut" by DROID via PUID x-fmt/428 and "Windows shortcut file" by ./msdos (v 1.158)
-# partly verified by command like `lnkinfo AOL.lnk`
-# 'L' + GUUID
-# HeaderSize + LinkCLSID 00021401-0000-0000-C000-000000000046
-0 string \114\0\0\0\001\024\002\0\0\0\0\0\300\0\0\0\0\0\0\106 MS Windows shortcut
-!:mime application/x-ms-shortcut
-!:ext lnk
-# LinkFlags
-# HasLinkTargetIDList; if set a LinkTargetIDList structure MUST follow the ShellLinkHeader; If is not set, structure MUST NOT be present
->20 lelong&1 1 \b, Item id list present
-# HasLinkInfo; if set a LinkInfo structure MUST follow the ShellLinkHeader or LinkTargetIDList; If is not set, structure MUST NOT be present
->20 lelong&2 2 \b, Points to a file or directory
->20 lelong&4 4 \b, Has Description string
->20 lelong&8 8 \b, Has Relative path
->20 lelong&16 16 \b, Has Working directory
->20 lelong&32 32 \b, Has command line arguments
->20 lelong&64 64 \b, Icon
-# IconIndex
->>56 lelong x \b number=%d
-# IsUnicode; If set then StringData section contains Unicode-encoded strings
->20 lelong&128 128 \b, Unicoded
-# ForceNoLinkInfo; LinkInfo structure is ignored
->20 lelong&256 256 \b, NoLinkInfo
-# HasExpString; with an EnvironmentVariableDataBlock
->20 lelong&512 512 \b, HasEnvironment
-# look for BlockSize 314h and EnvironmentVariableDataBlock BlockSignature A0000001h
->>76 search/1972 \x14\x03\x00\x00\x01\x00\x00\xa0
-# TargetAnsi (260 bytes); NULL-terminated path to environment variable encoded with system default code page
-#>>>&0 string x '%s'
-# TargetUnicode (520 bytes): optional NULL-terminated path to same environment variable Unicode encoded
-# like: "%windir%\system32\calc.exe"
->>>&260 lestring16 x "%s"
-# RunInSeparateProcess; run in a separate virtual machine when launching a 16-bit application; no examples found
->20 lelong&1024 1024 \b, RunInSeparateProcess
-# Unused1; undefined and MUST be ignored
-#>20 lelong&2048 2048 \b, Unused1
-# HasDarwinID; with a DarwinDataBlock
->20 lelong&4096 4096 \b, HasDarwinID
-# look for BlockSize 314h and DarwinDataBlock BlockSignature A0000006h
->>76 search/1972 \x14\x03\x00\x00\x06\x00\x00\xa0
-# DarwinDataAnsi (260 bytes); NULL-terminated application identifier encoded with system default code page; SHOULD be ignored
-#>>>&0 string x '%s'
-# DarwinDataUnicode (520 bytes); NULL-terminated application identifier Unicode encoded
->>>&260 lestring16 x "%s"
-# RunAsUser; target application is run as a different user
->20 lelong&8192 8192 \b, RunAsUser
-# HasExpIcon; with an IconEnvironmentDataBlock
->20 lelong&16384 16384 \b, HasExpIcon
-# look for BlockSize 314h and IconEnvironmentDataBlock BlockSignature A0000007h
->>76 search/1972 \x14\x03\x00\x00\x07\x00\x00\xa0
-# TargetAnsi (260 bytes); NULL-terminated path to environment icon variable encoded with system default code page
-#>>>&0 string x '%s'
-# TargetUnicode (520 bytes); optional NULL-terminated path to same icon environment variable Unicode encoded
-# like: "%SystemDrive%\Program Files\YaCy\addon\YaCy.ico"
->>>&260 lestring16 x "%s"
-# NoPidlAlias; represented in the shell namespace; no examples found
->20 lelong&32768 32768 \b, NoPidlAlias
-# Unused2; undefined and MUST be ignored
-#>20 lelong&65536 65536 \b, Unused2
-# RunWithShimLayer; with a ShimDataBlock; no examples found
->20 lelong&131072 131072 \b, RunWithShimLayer
-# ForceNoLinkTrack; TrackerDataBlock is ignored; no examples found
->20 lelong&262144 262144 \b, ForceNoLinkTrack
->20 lelong&262144 0
-# look for BlockSize 60h, TrackerDataBlock BlockSignature A0000003h, it length 58h and Version 0
->>76 search/1972 \x60\x00\x00\x00\x03\x00\x00\xa0\x58\x00\x00\x00\0\0\0\0
-# MachineID (16 bytes); a NULL-terminated NetBIOS name encoded with system default code page of the machine
->>>&0 string x \b, MachineID %0.16s
-# Droid (32 bytes)
-#
-# DroidBirth (32 bytes)
-#
-# EnableTargetMetadata; collect target properties and store in PropertyStoreDataBlock
->20 lelong&524288 524288 \b, EnableTargetMetadata
-# look for BlockSize >= Ch, PropertyStoreDataBlock BlockSignature A0000009h
-#>>76 search/1972 \x00\x00\x09\x00\x00\xa0
-# PropertyStore (variable)
-#
-# DisableLinkPathTracking; EnvironmentVariableDataBlock is ignored; no examples found
->20 lelong&1048576 1048576 \b, DisableLinkPathTracking
-# DisableKnownFolderTracking; SpecialFolderDataBlock and KnownFolderDataBlock are ignored and not saved
->20 lelong&2097152 2097152 \b, DisableKnownFolderTracking
->20 lelong&2097152 0
-# look for BlockSize 1Ch and KnownFolderDataBlock BlockSignature A000000Bh
->>76 search/1972 \x1c\x00\x00\x00\x0B\x00\x00\xa0
-# https://learn.microsoft.com/en-us/dotnet/desktop/winforms/controls/known-folder-guids-for-file-dialog-custom-places
-# KnownFolderID specifies the folder GUID ID
-# ProgramFiles 905E63B6-C1BF-494E-B29C-65B732D3D21A
-# ProgramFilesX86 7C5A40EF-A0FB-4BFC-874A-C0F2E0B9FA8E
->>>&0 guid x KnownFolderID %s
-# DisableKnownFolderAlias; unaliased form of the known folder IDList SHOULD be used; no examples found
->20 lelong&4194304 4194304 \b, DisableKnownFolderAlias
-# AllowLinkToLink; link that references another link is enabled; no examples found
->20 lelong&8388608 8388608 \b, AllowLinkToLink
-# UnaliasOnSave; unaliased form of that known folder or the target IDList SHOULD be used; no examples found
->20 lelong&16777216 16777216 \b, UnaliasOnSave
-# PreferEnvironmentPath; path specified in the EnvironmentVariableDataBlock SHOULD be used
->20 lelong&33554432 33554432 \b, PreferEnvironmentPath
-# KeepLocalIDListForUNCTarget; UNC name SHOULD be stored in local path IDList in PropertyStoreDataBlock; no examples found
->20 lelong&67108864 67108864 \b, KeepLocalIDListForUNCTarget
-# FileAttributes
->24 lelong&1 1 \b, Read-Only
->24 lelong&2 2 \b, Hidden
->24 lelong&4 4 \b, System
-# Reserved1; MUST be zero
->24 lelong&8 8 \b, Reserved1
->24 lelong&16 16 \b, Directory
->24 lelong&32 32 \b, Archive
-# Reserved2; MUST be zero
->24 lelong&64 64 \b, Reserved2
->24 lelong&128 128 \b, Normal
->24 lelong&256 256 \b, Temporary
-# no examples found
->24 lelong&512 512 \b, Sparse
-# no examples found
->24 lelong&1024 1024 \b, Reparse point
->24 lelong&2048 2048 \b, Compressed
->24 lelong&4096 4096 \b, Offline
-# FILE_ATTRIBUTE_NOT_CONTENT_INDEXED; contents need to be indexed
->24 lelong&8192 8192 \b, NeedIndexed
-# FILE_ATTRIBUTE_ENCRYPTED; file or directory is encrypted
->24 lelong&16384 16384 \b, Encrypted
-# value zero means there is no time set on the target
->28 leqwdate !0 \b, ctime=%s
-# Access time of target in UTC
->36 leqwdate !0 \b, atime=%s
-# write time of target in UTC
->44 leqwdate !0 \b, mtime=%s
-# FileSize; 32 bit size of target in bytes
->52 lelong x \b, length=%u, window=
-# ShowCommand; 1~SW_SHOWNORMAL 3~SW_SHOWMAXIMIZED HerzlichMEDION.lnk 7~SW_SHOWMINNOACTIVE YaCy.lnk Privoxy.lnk; All other values like 2 MUST be treated as SW_SHOWNORMAL
-#>60 lelong x ShowCommand=%#x
->60 lelong x
->>60 lelong 3 \bshowmaximized
->>60 lelong 7 \bshowminnoactive
->>60 default x \bnormal
-# Hotkey
->64 uleshort >0 \b, hot key
-# 41h~A 42h~B ...
->>64 ubyte x %c
-# modifier keys: 0x01~HOTKEYF_SHIFT 0x02~HOTKEYF_CONTROL 0x04~HOTKEYF_ALT
->>65 ubyte&1 1 \b+SHIFT
->>65 ubyte&2 2 \b+CONTROL
->>65 ubyte&4 4 \b+ALT
-# Reserved; MUST be zero
-#>66 uleshort !0 \b, reserved %#x
-# Reserved2; MUST be zero
-#>68 ulelong !0 \b, reserved2 %#x
-# Reserved3; MUST be zero
-#>72 ulelong !0 \b, reserved3 %#x
-# optional LINKTARGET_IDLIST if LinkFlags bit HasLinkTargetIDList is set
->20 lelong&1 1
-# IDListSize; size of IDList
->>76 uleshort x \b, IDListSize %#4.4x
-# 1st item
->>78 use lnk-item
-# 2nd possible item
->>(78.s+78) uleshort >0
->>>(78.s+78) use lnk-item
-# 3rd possible item
->>>&(&-2.s-2) uleshort >0
->>>>&-2 use lnk-item
-# 4th possible item
->>>>&(&-2.s-2) uleshort >0
->>>>>&-2 use lnk-item
-# Because HasLinkInfo is set, a LinkInfo structure follows
->20 lelong&2 2
-# if no LINKTARGET_IDLIST (no HasLinkTargetIDList) then direct after header; no example found
->>20 lelong&1 =0
->>>76 use lnk-info
-# if LINKTARGET_IDLIST (HasLinkTargetIDList) then after LINKTARGET_IDLIST by addtional IDListSize bytes
->>20 lelong&1 =1
->>>76 uleshort >0
-#>>>>(76.s+78) use lnk-info
->>>>(76.s+78) ubelong x
-# move pointer to beginnig of LinkInfo structure
->>>>>&-8 ubelong x
-#>>>>>>&16 ulelong x \b, LocalBasePathOffset=%#8.8x
->>>>>>&(&16.l) string x \b, LocalBasePath "%s"
-# check and then display link item (size,data)
-0 name lnk-item
-# size value 0x0000 means TerminalID; indicates the end of the item IDs list
->0 uleshort >0
-#>>0 uleshort x \b, ItemIDSize %#4.4x
-# item Data
-#>>2 ubequad x \b, Item data=%#16.16llx
-#>>2 ubyte x \b, Item type=%#x
->>2 ubyte =0x1f \b, Root folder
-# like: "26EE0668-A00A-44D7-9371-BEB064C98683" Control Panel
-# "20D04FE0-3AEA-1069-A2D8-08002B30309D" My Computer
-# "871C5380-42A0-1069-A2EA-08002B30309D" Internet Explorer
->>>4 guid x "%s"
->>2 ubyte =0x2f \b, Volume
-# like: "C:\" "D:\"
->>>3 string x "%s"
-# Control panel category
-#>>2 ubyte foo \b, Control panel category
-# display LinkInfo structure (size,flags,offsets)
-0 name lnk-info
-# LinkInfoSize; size of the LinkInfo structure
->0 ulelong x \b, LinkInfoSize %#x
-# LinkInfoHeaderSize; if 1C no optional fields; >=24 optional fields are specified
->4 ulelong x \b, LinkInfoHeaderSize %#x
-# LinkInfoFlags;
-#>8 ulelong x \b, LinkInfoFlags=%#x
->8 ulelong&1 1 \b, VolumeIDAndLocalBasePath
-# VolumeIDOffset; location of the VolumeID field (VolumeIDSize DriveType DriveSerialNumber VolumeLabelOffset ... ) inside LinkInfo structure
->>12 ulelong x \b, VolumeIDOffset %#x
-# LocalBasePathOffset; location of LocalBasePath field like "C:\test\a.txt" inside LinkInfo structure
->>16 ulelong x \b, LocalBasePathOffset %#x
-# LocalBasePathOffsetUnicode; location of the LocalBasePathUnicode field inside LinkInfo structure
->>4 ulelong >23
->>>28 ulelong x \b, LocalBasePathOffsetUnicode %#x
->8 ulelong&2 2 \b, CommonNetworkRelativeLinkAndPathSuffix
-# CommonNetworkRelativeLinkOffset; location of the CommonNetworkRelativeLink field inside LinkInfo structure
->>20 ulelong x \b, CommonNetworkRelativeLinkOffset %#x
-# CommonPathSuffixOffset; location of CommonPathSuffix field
->24 ulelong x \b, CommonPathSuffixOffset %#x
-# CommonPathSuffixOffsetUnicode; location of CommonPathSuffixUnicode field inside LinkInfo structure
->4 ulelong >23
->>32 ulelong x \b, CommonPathSuffixOffsetUnicode %#x
-
-# Summary: Outlook Personal Folders
-# Created by: unknown
-# Update: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/Personal_Folder_File
-# https://en.wikipedia.org/wiki/Personal_Storage_Table
-# Reference: https://interoperability.blob.core.windows.net/files/MS-PST/%5bMS-PST%5d.pdf
-# http://mark0.net/download/triddefs_xml.7z/defs/p/pab.trid.xml
-# dwMagic !BDN
-0 lelong 0x4E444221
-# skip DROID x-fmt-75-signature-id-472.pab x-fmt-248-signature-id-260.pst x-fmt-249-signature-id-261.pst
-# by check for existance of bPlatformCreate value
->14 ubyte x Microsoft Outlook
-#!:mime application/octet-stream
-# NOT official registered !
-!:mime application/vnd.ms-outlook
-# dwCRCPartial; 32-bit cyclic redundancy check (CRC) value of followin 471 bytes; zero for 64-bit
-#>>4 ulelong !0 \b, CRC %#x
-# wMagicClient; AB (4142h) is used for PAB files; SM (534Dh) is used for PST files; SO (534Fh) is used for OST files
-#>>8 leshort x \b, wMagicClient=%#x
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pab.trid.xml
-# Note: called "Microsoft Personal Address Book" by TrID and
-# "Microsoft Outlook Personal Address Book" by DROID via x-fmt/75
->>8 leshort 0x4142 Personal Address Book
-#!:mime application/x-ms-pab
-!:ext pab
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pst.trid.xml
-# http://mark0.net/download/triddefs_xml.7z/defs/p/pst-unicode.trid.xml
-# Note: called "Microsoft OutLook Personal Folder" by TrID and
-# by DROID via x-fmt/248 for ANSI and via x-fmt/249 for Unicode
-#>>8 leshort 0x4D53 \b, PST~
-# called "Microsoft Outlook email folder" in ./windows version 1.37 and older
->>8 leshort 0x4D53 Personal Storage
-#!:mime application/x-ms-pst
-!:ext pst
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/o/ost.trid.xml
-# Note: called "Outlook Exchange Offline Storage" by TrID
->>8 leshort 0x4F53 Offline Storage
-#!:mime application/x-ms-ost
-!:ext ost
-# wVer; file format version. 14 or 15 if the file is ANSI; > 21 or 23(=17h) if Unicode; 37 for written by Outlook with WIP
->>10 uleshort x (
-# probably NO intermediate versions exist
->>10 leshort <0x10 \b<=2002, ANSI,
->>10 leshort >0x14 \b>=2003, Unicode,
->>10 uleshort x version %u)
-# wVerClient; client file format version like: 19 22
-#>>12 uleshort x \b, wVerClient=%u
-# bPlatformCreate; This value MUST be set to 1 but also found 2
->>14 ubyte >1 \b, bPlatformCreate=%u
-# bPlatformAccess; This value MUST be set to 1 but also found 2
->>15 ubyte >1 \b, bPlatformAccess=%u
-# dwReserved1; SHOULD ignore and NOT modify this value; SHOULD initialize to zero
->>16 ulelong !0 \b, dwReserved1=%#x
-# dwReserved2; SHOULD ignore and NOT modify this value; SHOULD initialize to zero
->>20 ulelong !0 \b, dwReserved2=%#x
-# ANSI 32-bit variant Outlook 1997-2002
->>10 uleshort <16
-# bidNextB; next BlockID (ANSI 4 bytes)
-#>>>24 ulelong !0 \b, bidNextB=%#x
-# bidNextP; Next available back BlockID pointer
-#>>>28 ulelong !0 \b, bidNextP=%#x
-# dwUnique; value monotonically increased when modifying PST; so CRC is changing
->>>32 ulelong !0 \b, dwUnique=%#x
-# rgnid[128]; A fixed array of 32 NodeIDs, each corresponding to one of the 32 possible NID_TYPEs
-#>>>36 ubequad x \b, rgnid=%#llx...
-# dwReserved; Implementations SHOULD ignore this value and SHOULD NOT modify it; Initialized zero
->>>164 ulelong !0 \b, dwReserved=%#x
-# ibFileEof; the size of the PST file, in bytes (ANSI 4 bytes)
->>>168 ulelong x \b, %u bytes
-# ibAMapLast; offset to the last AMap page
-#>>>172 ulelong x \b, ibAMapLast=%#x
-# bSentinel; MUST be set to 0x80
->>>460 ubyte !0x80 \b, bSentinel=%#x
-# bCryptMethod: 0~No encryption 1~encryption with permutation 2~encryption with cyclic 16~encryption with Windows Information Protection (WIP)
->>>461 ubyte >0 \b, bCryptMethod=%u
-# UNICODE 64-bit variant Outlook 2003-2007
->>10 uleshort >20
-# bidUnused; Unused 8 bytes padding (Unicode only); sometimes like: 0x0000000100000004
->>>24 ulequad !0x0000000100000004 \b, bidUnused=%#16.16llx
-# dwUnique; value monotonically increased when modifying PST; so CRC is changing
->>>40 ulelong !0 \b, dwUnique=%#x
-# rgnid[] (128 bytes): A fixed array of 32 NIDs, each corresponding to one of the 32 possible
-#>>>44 ubequad x \b, rgnid=%#llx...
-# ibFileEof; the size of the PST file, in bytes (Unicode 8 bytes)
->>>184 ulequad x \b, %llu bytes
-# bSentinel; MUST be set to 0x80
->>>512 ubyte !0x80 \b, bSentinel=%#x
-# bCryptMethod; Encryption type like: 0 1 2 16
->>>513 ubyte >0 \b, bCryptMethod=%u
-# dwCRC; 32-bit CRC of the of the previous 516 bytes
->>>524 ulelong x \b, CRC32 %#x
-
-
-# Summary: Windows help cache
-# Created by: unknown
-0 string \164\146\115\122\012\000\000\000\001\000\000\000 MS Windows help cache
-
-
-# Summary: IE cache file
-# Created by: Christophe Monniez
-0 string Client\ UrlCache\ MMF Internet Explorer cache file
->20 string >\0 version %s
-
-
-# Summary: Registry files
-# Created by: unknown
-# Modified by (1): Joerg Jenderek
-0 string regf MS Windows registry file, NT/2000 or above
-0 string CREG MS Windows 95/98/ME registry file
-0 string SHCC3 MS Windows 3.1 registry file
-
-
-# Summary: Windows Registry text
-# URL: https://en.wikipedia.org/wiki/Windows_Registry#.REG_files
-# Reference: http://fileformats.archiveteam.org/wiki/Windows_Registry
-# Submitted by: Abel Cheung <abelcheung@gmail.com>
-# Update: Joerg Jenderek
-# Windows 3-9X variant
-0 string REGEDIT
-# skip ASCII text like "REGEDITor.txt" but match
-# L1WMAP.REG with only 1 CRNL or org.gnome.gnumeric.reg with 2 NL
->7 search/3 \n Windows Registry text
-!:mime text/x-ms-regedit
-!:ext reg
-# Windows 9X variant
->>0 string REGEDIT4 (Win95 or above)
-# Windows 2K ANSI variant
-0 string Windows\ Registry\ Editor\
->&0 string Version\ 5.00\r\n\r\n Windows Registry text (Win2K or above)
-!:mime text/x-ms-regedit
-!:ext reg
-# Windows 2K UTF-16 variant
-2 lestring16 Windows\ Registry\ Editor\
->0x32 lestring16 Version\ 5.00\r\n\r\n Windows Registry little-endian text (Win2K or above)
-# relative offset not working
-#>&0 lestring16 Version\ 5.00\r\n\r\n Windows Registry little-endian text (Win2K or above)
-!:mime text/x-ms-regedit
-!:ext reg
-# WINE variant
-# URL: https://en.wikipedia.org/wiki/Wine_(software)
-# Reference: https://www.winehq.org/pipermail/wine-cvs/2005-October/018763.html
-# Note: WINE use text based registry (system.reg,user.reg,userdef.reg)
-# instead binary hiv structure like Windows
-0 string WINE\ REGISTRY\ Version\ WINE registry text
-# version 2
->&0 string x \b, version %s
-!:mime text/x-wine-extension-reg
-!:ext reg
-
-# Windows *.INF *.INI files updated by Joerg Jenderek at Apr 2013, Feb 2018
-# empty ,comment , section
-# PR/383: remove unicode BOM because it is not portable across regex impls
-#0 regex/s \\`(\\r\\n|;|[[])
-# empty line CRLF
-0 ubeshort 0x0D0A
->0 use ini-file
-# comment line starting with semicolon
-0 string ;
-# look for phrase of Windows policy ADMinistrative template (with starting remark)
-# like: WINDOW_95_CD/TOOLS/RESKIT/netadmin/poledit/conf.adm
->1 search/3548 END\040CATEGORY
-# ADM with remark (by adm-rem.trid.xml) already done by generic ASCII variant
-# if no Windows policy ADMinistrative template then Windows INItialization
->1 default x
->>0 use ini-file
-# section line starting with left bracket
-0 string [
->0 use ini-file
-# check and then display Windows INItialization configuration
-0 name ini-file
-# look for left bracket in section line
->0 search/8192 [
-# https://en.wikipedia.org/wiki/Autorun.inf
-# https://msdn.microsoft.com/en-us/library/windows/desktop/cc144200.aspx
-# space after right bracket
-# or AutoRun.Amd64 for 64 bit systems
-# or only NL separator
->>&0 regex/c \^autorun
-# but sometimes total commander directory tree file "treeinfo.wc" with lines like
-# [AUTORUN]
-# [boot]
->>>&0 string =]\r\n[ Total commander directory treeinfo.wc
-!:mime text/plain
-!:ext wc
-# From: Pal Tamas <folti@balabit.hu>
-# Autorun File
->>>&0 string !]\r\n[ Microsoft Windows Autorun file
-!:mime application/x-setupscript
-!:ext inf
-# https://msdn.microsoft.com/en-us/library/windows/hardware/ff549520(v=vs.85).aspx
-# version strings ASCII coded case-independent for Windows setup information script file
->>&0 regex/c \^(version|strings)] Windows setup INFormation
-!:mime application/x-setupscript
-#!:mime application/x-wine-extension-inf
-!:ext inf
-# NETCRC.INF OEMCPL.INF
->>&0 regex/c \^(WinsockCRCList|OEMCPL)] Windows setup INFormation
-!:mime application/x-setupscript
-!:ext inf
-# http://www.winfaq.de/faq_html/Content/tip2500/onlinefaq.php?h=tip2653.htm
-# https://msdn.microsoft.com/en-us/library/windows/desktop/cc144102.aspx
-# .ShellClassInfo DeleteOnCopy LocalizedFileNames ASCII coded case-independent
->>&0 regex/1024c \^(\\.ShellClassInfo|DeleteOnCopy|LocalizedFileNames)] Windows desktop.ini
-!:mime application/x-wine-extension-ini
-#!:mime text/plain
-# https://support.microsoft.com/kb/84709/
->>&0 regex/c \^don't\ load] Windows CONTROL.INI
-!:mime application/x-wine-extension-ini
-!:ext ini
->>&0 regex/c \^(ndishlp\\$|protman\\$|NETBEUI\\$)] Windows PROTOCOL.INI
-!:mime application/x-wine-extension-ini
-!:ext ini
-# https://technet.microsoft.com/en-us/library/cc722567.aspx
-# http://www.winfaq.de/faq_html/Content/tip0000/onlinefaq.php?h=tip0137.htm
->>&0 regex/c \^(windows|Compatibility|embedding)] Windows WIN.INI
-!:mime application/x-wine-extension-ini
-!:ext ini
-# https://en.wikipedia.org/wiki/SYSTEM.INI
->>&0 regex/c \^(boot|386enh|drivers)] Windows SYSTEM.INI
-!:mime application/x-wine-extension-ini
-!:ext ini
-# http://www.mdgx.com/newtip6.htm
->>&0 regex/c \^SafeList] Windows IOS.INI
-!:mime application/x-wine-extension-ini
-!:ext ini
-# https://en.wikipedia.org/wiki/NTLDR Windows Boot Loader information
->>&0 regex/c \^boot\x20loader] Windows boot.ini
-!:mime application/x-wine-extension-ini
-!:ext ini
-# https://en.wikipedia.org/wiki/CONFIG.SYS
->>&0 regex/c \^menu] MS-DOS CONFIG.SYS
-# @CONFIG.UI configuration file of previous DOS version saved by Caldera OPENDOS INSTALL.EXE
-# CONFIG.PSS saved version of file CONFIG.SYS created by %WINDIR%\SYSTEM\MSCONFIG.EXE
-# CONFIG.TSH renamed file CONFIG.SYS.BAT by %WINDIR%\SYSTEM\MSCONFIG.EXE
-# dos and w40 used in dual booting scene
-!:ext sys/dos/w40
-# https://support.microsoft.com/kb/118579/
->>&0 regex/c \^Paths]\r\n MS-DOS MSDOS.SYS
-!:ext sys/dos
-# http://chmspec.nongnu.org/latest/INI.html#HHP
->>&0 regex/c \^options]\r\n Microsoft HTML Help Project
-!:mime text/plain
-!:ext hhp
-# From: Joerg Jenderek
-# URL: https://documentation.basis.com/BASISHelp/WebHelp/b3odbc/ODBC_Driver/obdcdriv_character_translation.htm
-# Reference: https://www.garykessler.net/library/file_sigs.html
-# http://mark0.net/download/triddefs_xml.7z/defs/c/cpx.trid.xml
-# Note: stored in directory %WINDIR%\SysWOW64 or %WINDIR%\system
-# second word often Latin but sometimes Cyrillic like in 12510866.CPX
->>&0 regex/c \^Windows\ (Latin|Cyrillic) Windows codepage translator
-#!:mime text/plain
-!:mime text/x-ms-cpx
-# like: 12510866.CPX
-!:ext cpx
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/File_Explorer
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/scf-exp.trid.xml,scf-exp-old.trid.xml
-# Note: called "Windows Explorer Command Shell File" by TrID and "File Explorer Command" by Windows via SHCmdFile
->>&0 regex/c \^Shell]\r\n Windows Explorer Shell Command File
-#!:mime text/plain
-!:mime text/x-ms-scf
-# like: channels.scf desktop.scf explorer.scf "Desktop anzeigen.scf"
-!:ext scf
-# look for icon file directive maybe pointing to malicious file
->>>1 search/128 IconFile= \b, icon
->>>>&0 string x "%s"
-# From: Joerg Jenderek
-# URL: http://en.wikipedia.org/wiki/VIA_Technologies
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/scf-via.trid.xml
-# Note: called "VIA setup configuration file" by TrID
->>&0 regex/c \^SCF]\r\n VIA setup configuration
-#!:mime text/plain
-!:mime text/x-via-scf
-# like: SETUP.SCF
-!:ext scf
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/InstallShield
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/l/lid-is.trid.xml
-# Note: contain also 3 keywords like: count Default key0
->>&0 regex/c \^Languages] InstallShield Language Identifier
-#!:mime text/plain
-!:mime text/x-installshield-lid
-# like: SETUP.LID
-!:ext lid
-# From: Joerg Jenderek
-# URL: https://www.file-extensions.org/tag-file-extension
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/t/taginfo.trid.xml
-# Note: contain also keywords like: Application Category Company Misc Version
->>&0 regex/c \^TagInfo] TagInfo
-#!:mime text/plain
-#!:mime text/prs.lines.tag
-!:mime text/x-ms-tag
-# like: DATA.TAG
-!:ext tag
-# URL: https://en.wikipedia.org/wiki/Flatpak
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/f/flatpakref.trid.xml
-# Note: called "Flatpack Reference" by TrID
->>&0 string Flatpak\ Ref] Flatpak repository reference
-#!:mime text/plain
-# https://reposcope.com/mimetype/application/vnd.flatpak.ref
-!:mime application/vnd.flatpak.ref
-!:ext flatpakref
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/CloneCD
-# Reference: https://en.wikipedia.org/wiki/CloneCD_Control_File
-# http://mark0.net/download/triddefs_xml.7z/defs/c/cdimage-clonecd-cue.trid.xml
-# Note: called "CloneCD CDImage (description)" by TrID and "CloneCD Control File" by DROID via PUID fmt/1760
->>&0 string CloneCD] CloneCD CD-image Description
-#!:mime text/plain
-!:mime text/x-ccd
-!:ext ccd
-# unknown keyword after opening bracket
->>&0 default x
-#>>>&0 string/c x UNKNOWN [%s
-# look for left bracket of second section
->>>&0 search/8192 [
-# version Strings FileIdentification
->>>>&0 string/c version Windows setup INFormation
-!:mime application/x-setupscript
-!:ext inf
-# From: Joerg Jenderek
-# URL: https://cdrtfe.sourceforge.io/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/c/cfp-cdrtfe.trid.xml
->>>>&0 string FileExplorer] cdrtfe Project
-!:mime text/x-cfp
-!:ext cfp
-# https://en.wikipedia.org/wiki/Initialization_file Windows Initialization File or other
->>>>&0 default x
->>>>>&0 ubyte x
-# characters, digits, underscore and white space followed by right bracket
-# terminated by CR implies section line to skip BOOTLOG.TXT DETLOG.TXT
->>>>>>&-1 regex/T \^([A-Za-z0-9_\(\)\ ]+)\]\r Generic INItialization configuration [%-.40s
-# NETDEF.INF multiarc.ini
-#!:mime application/x-setupscript
-!:mime application/x-wine-extension-ini
-#!:mime text/plain
-!:ext ini/inf
-# samples with only 1 and unknown section name
-# XXX: matches a file containing '[1] 2'
-#>>>&0 default x Generic INItialization configuration
-#>>>>0 string x \b, 1st line "%s"
-# UTF-16 BOM
-0 ubeshort =0xFFFE
-# look for phrase of Windows policy ADMinistrative template (UTF-16 by adm-uni.trid.xml)
-# like: wuau.adm
->2 search/0x384A E\0N\0D\0\040\0C\0A\0T\0E\0G\0O\0R\0Y\0
->>0 use windows-adm
-# if no Windows policy ADMinistrative template then Windows INFormation
->2 default x
-# UTF-16 BOM followed by CR~0D00 , comment~semicolon~3B00 , section~bracket~5B00
->>0 ubelong&0xFFff89FF =0xFFFE0900
-# look for left bracket in section line
->>>2 search/8192 [
-# keyword without 1st letter which is maybe up-/down-case
->>>>&3 lestring16 ersion] Windows setup INFormation
-!:mime application/x-setupscript
-# like: hdaudio.inf iscsi.inf spaceport.inf tpm.inf usbhub3.inf UVncVirtualDisplay.inf
-!:ext inf
->>>>&3 lestring16 trings] Windows setup INFormation
-!:mime application/x-setupscript
-# like: arduino_gemma.inf iis.inf MSM8960.inf
-!:ext inf
->>>>&3 lestring16 ourceDisksNames] Windows setup INFormation
-!:mime application/x-setupscript
-# like: atiixpag.inf mdmnokia.inf netefe32.inf rdpbus.inf
-!:ext inf
-# netnwcli.inf start with ;---[ NetNWCli.INX ]
->>>>&3 default x
-# look for NL followed by left bracket
->>>>>&0 search/8192 \x0A\x00\x5b
-# like: defltwk.inf netvwifibus.inf WSDPrint.inf
->>>>>>&3 lestring16 ersion] Windows setup INFormation
-!:mime application/x-setupscript
-!:ext inf
-
-# Summary: Windows Policy ADMinistrative template
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Administrative_Template
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/adm.trid.xml
-# Note: typically stored in directory like: %WINDIR%\system32\GroupPolicy\ADM
-# worst case ASCII variant starting with remark line like: inetset.adm
-0 search/0x4E CLASS\040
->&0 string MACHINE
->>0 use windows-adm
->&0 string USER
->>0 use windows-adm
-# display information about Windows policy ADMinistrative template
-0 name windows-adm Windows Policy Administrative Template
-!:mime text/x-ms-adm
-!:ext adm
-# UTF-16 BOM implies UTF-16 encoded ADM (by adm-uni.trid.xml)
->0 ubeshort =0xFFFE
->>2 lestring16 x \b, 1st line "%s"
-# look for UTF-16 encoded CarriageReturn LineFeed
->>>2 search/0x3A \r\0\n\0
->>>>&0 lestring16 x \b, 2nd line "%s"
-# no UTF-16 BOM implies "ASCII" encoded ADM (by adm.trid.xml)
->0 ubeshort !0xFFFE
->>0 string x \b, 1st line "%s"
-#>>>&0 ubequad x \b, 2ND %16.16llx
-# 2nd line empty
->>>&2 beshort =0x0D0A
->>>>&0 beshort !0x0D0A \b, 3th line
->>>>>&-2 string x "%s"
-# 2nd line with content
->>>&2 beshort !0x0D0A \b, 2nd line
->>>>&-2 string x "%s"
-
-# Windows Precompiled INF files *.PNF added by Joerg Jenderek at Mar 2013 of _PNF_HEADER inf.h
-# http://read.pudn.com/downloads3/sourcecode/windows/248345/win2k/private/windows/setup/setupapi/inf.h__.htm
-# URL: http://fileformats.archiveteam.org/wiki/INF_(Windows)
-# Reference: http://en.verysource.com/code/10350344_1/inf.h.html
-# Note: stored in %Windir%\Inf %Windir%\System32\DriverStore\FileRepository
-# check for valid major and minor versions: 101h - 303h
-0 leshort&0xFcFc =0x0000
-# GRR: line above (strength 50) is too general as it catches also "PDP-11 UNIX/RT ldp" ./pdp
->0 leshort&0x0303 !0x0000
-# test for valid InfStyles: 1 2
->>2 uleshort >0
->>>2 uleshort <3
-# look for colon in WinDirPath after PNF header
-#>>>>0x59 search/18 :
-# skip few Adobe Photoshop Color swatch ("Mac OS.aco" TRUMATCH-Farben.aco Windows.aco) and some
-# Targa image (money-256.tga XING_B_UCM8.tga x-fmt-367-signature-id-604.tga) with "invalid low section name" \0
->>>>(20.l) ubelong >0x40004000
->>>>>0 use PreCompiledInf
-0 name PreCompiledInf
->0 uleshort x Windows Precompiled iNF
-!:mime application/x-pnf
-!:ext pnf
-# major version 1 for older Windows like XP and 3 since about Windows Vista
-# 101h~95-XP; 301h~Windows Vista-7 ; 302h~Windows 10 14393; 303h~Windows 10 18362-Windows11
->1 ubyte x \b, version %u
->0 ubyte x \b.%u
->0 uleshort =0x0101 (Windows
->>4 ulelong&0x00000001 !0x00000001 95-98)
->>4 ulelong&0x00000001 =0x00000001 XP)
->0 uleshort =0x0301 (Windows Vista-8.1)
->0 uleshort =0x0302 (Windows 10 older)
->0 uleshort =0x0303 (Windows 10-11)
-# 1 ,2 (windows 98 SE)
->2 uleshort !2 \b, InfStyle %u
-# PNF_FLAG_IS_UNICODE 0x00000001
-# PNF_FLAG_HAS_STRINGS 0x00000002
-# PNF_FLAG_SRCPATH_IS_URL 0x00000004
-# PNF_FLAG_HAS_VOLATILE_DIRIDS 0x00000008
-# PNF_FLAG_INF_VERIFIED 0x00000010
-# PNF_FLAG_INF_DIGITALLY_SIGNED 0x00000020
-# UNKNOWN8 0x00000080
-# UNKNOWN 0x00000100
-# UNKNOWN1 0x01000000
-# UNKNOWN2 0x02000000
->4 ulelong&0x03000180 >0 \b, flags
->>4 ulelong x %#x
->4 ulelong&0x00000001 0x00000001 \b, unicoded
->4 ulelong&0x00000002 0x00000002 \b, has strings
->4 ulelong&0x00000004 0x00000004 \b, src URL
->4 ulelong&0x00000008 0x00000008 \b, volatile dir ids
->4 ulelong&0x00000010 0x00000010 \b, verified
->4 ulelong&0x00000020 0x00000020 \b, digitally signed
-# >4 ulelong&0x00000080 0x00000080 \b, UNKNOWN8
-# >4 ulelong&0x00000100 0x00000100 \b, UNKNOWN
-# >4 ulelong&0x01000000 0x01000000 \b, UNKNOWN1
-# >4 ulelong&0x02000000 0x02000000 \b, UNKNOWN2
-#>8 ulelong x \b, InfSubstValueListOffset %#x
-# many 0, 1 lmouusb.PNF, 2 linkfx10.PNF , f webfdr16.PNF
-# , 6 bth.PNF, 9 usbport.PNF, d netnwifi.PNF, 10h nettcpip.PNF
-#>12 uleshort x \b, InfSubstValueCount %#x
-# only < 9 found: 8 hcw85b64.PNF
-#>14 uleshort x \b, InfVersionDatumCount %#x
-# only found values lower 0x0000ffff ??
-#>16 ulelong x \b, InfVersionDataSize %#x
-# only found positive values lower 0x00ffFFff for InfVersionDataOffset
->20 ulelong x \b, at %#x
->4 ulelong&0x00000001 =0x00000001
-# case independent: CatalogFile Class DriverVer layoutfile LayoutFile SetupClass signature Signature
->>(20.l) lestring16 x "%s"
->4 ulelong&0x00000001 !0x00000001
->>(20.l) string x "%s"
-# FILETIME is number of 100-nanosecond intervals since 1 January 1601
-#>24 ulequad x \b, InfVersionLastWriteTime %16.16llx
->24 qwdate x \b, InfVersionLastWriteTime %s
-# for Windows 98, XP
->0 uleshort <0x0102
-# only found values lower 0x00ffFFff
-# often 70 but also 78h for corelist.PNF
-# >>32 ulelong x \b, StringTableBlockOffset %#x
-# >>36 ulelong x \b, StringTableBlockSize %#x
-# >>40 ulelong x \b, InfSectionCount %#x
-# >>44 ulelong x \b, InfSectionBlockOffset %#x
-# >>48 ulelong x \b, InfSectionBlockSize %#x
-# >>52 ulelong x \b, InfLineBlockOffset %#x
-# >>56 ulelong x \b, InfLineBlockSize %#x
-# >>60 ulelong x \b, InfValueBlockOffset %#x
-# >>64 ulelong x \b, InfValueBlockSize %#x
-# WinDirPathOffset
-# like 58h, which means direct after PNF header
-#>>68 ulelong x \b, at %#x
->>68 ulelong x
->>>4 ulelong&0x00000001 =0x00000001
-#>>>>(68.l) ubequad =0x43003a005c005700
-# normally unicoded C:\Windows
-#>>>>>(68.l) lestring16 x \b, WinDirPath "%s"
->>>>(68.l) ubequad !0x43003a005c005700
->>>>>(68.l) lestring16 x \b, WinDirPath "%s"
->>>4 ulelong&0x00000001 !0x00000001
-# normally ASCII C:\WINDOWS
-#>>>>(68.l) string =C:\\WINDOWS \b, WinDirPath "%s"
->>>>(68.l) string !C:\\WINDOWS
->>>>>(68.l) string x \b, WinDirPath "%s"
-# found OsLoaderPathOffset values often 0 , once 70h corelist.PNF, once 68h ASCII machine.PNF
->>>72 ulelong >0 \b,
->>>>4 ulelong&0x00000001 =0x00000001
->>>>>(72.l) lestring16 x OsLoaderPath "%s"
->>>>4 ulelong&0x00000001 !0x00000001
-# seldom C:\ instead empty
->>>>>(72.l) string x OsLoaderPath "%s"
-# 1fdh
-#>>>76 uleshort x \b, StringTableHashBucketCount %#x
-# https://docs.microsoft.com/en-us/openspecs/office_standards/ms-oe376/6c085406-a698-4e12-9d4d-c3b0ee3dbc4a
-# only 407h found
->>>78 uleshort !0x409 \b, LanguageID %x
-#>>>78 uleshort =0x409 \b, LanguageID %x
-# InfSourcePathOffset often 0
->>>80 ulelong >0 \b, at %#x
->>>>4 ulelong&0x00000001 =0x00000001
->>>>>(80.l) lestring16 x SourcePath "%s"
->>>>4 ulelong&0x00000001 !0x00000001
->>>>>(80.l) string >\0 SourcePath "%s"
-# OriginalInfNameOffset often 0
->>>84 ulelong >0 \b, at %#x
->>>>4 ulelong&0x00000001 =0x00000001
->>>>>(84.l) lestring16 x InfName "%s"
->>>>4 ulelong&0x00000001 !0x00000001
->>>>>(84.l) string >\0 InfName "%s"
-
-# for newer Windows like Vista, 7 , 8.1 , 10
->0 uleshort >0x0101
->>80 ulelong x \b, at %#x WinDirPath
->>>4 ulelong&0x00000001 0x00000001
-# normally unicoded C:\Windows
-#>>>>(80.l) ubequad =0x43003a005c005700
-#>>>>>(80.l) lestring16 x "%s"
->>>>(80.l) ubequad !0x43003a005c005700
->>>>>(80.l) lestring16 x "%s"
-# language id: 0 407h~german 409h~English_US
->>90 uleshort !0x409 \b, LanguageID %x
-#>>90 uleshort =0x409 \b, LanguageID %x
->>92 ulelong >0 \b, at %#x
->>>4 ulelong&0x00000001 0x00000001
-# language string like: de-DE en-US
->>>>(92.l) lestring16 x language %s
-
-# Summary: backup file created with utility like NTBACKUP.EXE shipped with Windows NT/2K/XP/2003
-# Extension: .bkf
-# Created by: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/NTBackup
-# Reference: http://laytongraphics.com/mtf/MTF_100a.PDF
-# Descriptor BloCK name of Microsoft Tape Format
-0 string TAPE
-# Format Logical Address is zero
->20 ulequad 0
-# Reserved for MBC is zero
->>28 uleshort 0
-# Control Block ID is zero
->>>36 ulelong 0
-# BIT4-BIT15, BIT18-BIT31 of block attributes are unused
->>>>4 ulelong&0xFFfcFFe0 0 Windows NTbackup archive
-#!:mime application/x-ntbackup
-!:ext bkf
-# OS ID
->>>>>10 ubyte 1 \b NetWare
->>>>>10 ubyte 13 \b NetWare SMS
->>>>>10 ubyte 14 \b NT
->>>>>10 ubyte 24 \b 3
->>>>>10 ubyte 25 \b OS/2
->>>>>10 ubyte 26 \b 95
->>>>>10 ubyte 27 \b Macintosh
->>>>>10 ubyte 28 \b UNIX
-# OS Version (2)
-#>>>>>11 ubyte x OS V=%x
-# MTF_CONTINUATION Media Sequence Number > 1
-#>>>>>4 ulelong&0x00000001 !0 \b, continued
-# MTF_COMPRESSION
->>>>>4 ulelong&0x00000004 !0 \b, compressed
-# MTF_EOS_AT_EOM End Of Medium was hit during end of set processing
->>>>>4 ulelong&0x00000008 !0 \b, End Of Medium hit
->>>>>4 ulelong&0x00020000 0
-# MTF_SET_MAP_EXISTS A Media Based Catalog Set Map may exist on tape
->>>>>>4 ulelong&0x00010000 !0 \b, with catalog
-# MTF_FDD_ALLOWED However File/Directory Detail can only exist if a Set Map is also present
->>>>>4 ulelong&0x00020000 !0 \b, with file catalog
-# Offset To First Event 238h,240h,28Ch
-#>>>>>8 uleshort x \b, event offset %4.4x
-# Displayable Size (20e0230h 20e024ch 20e0224h)
-#>>>>>8 ulequad x dis. size %16.16llx
-# Media Family ID (455288C4h 4570BD1Ah 45708F2Fh 4570BBF5h)
-#>>>>>52 ulelong x family ID %8.8x
-# TAPE Attributes (3)
-#>>>>>56 ulelong x TAPE %8.8x
-# Media Sequence Number
->>>>>60 uleshort >1 \b, sequence %u
-# Password Encryption Algorithm (3)
->>>>>62 uleshort >0 \b, %#x encrypted
-# Soft Filemark Block Size * 512 (2)
-#>>>>>64 uleshort =2 \b, soft size %u*512
->>>>>64 uleshort !2 \b, soft size %u*512
-# Media Based Catalog Type (1,2)
-#>>>>>66 uleshort x \b, catalog type %4.4x
-# size of Media Name (66,68,6Eh)
->>>>>68 uleshort >0
-# offset of Media Name (5Eh)
->>>>>>70 uleshort >0
-# 0~, 1~ANSI, 2~UNICODE
->>>>>>>48 ubyte 1
-# size terminated ansi coded string normally followed by "MTF Media Label"
->>>>>>>>(70.s) string >\0 \b, name: %s
->>>>>>>48 ubyte 2
-# Not null, but size terminated unicoded string
->>>>>>>>(70.s) lestring16 x \b, name: %s
-# size of Media Label (104h)
->>>>>72 uleshort >0
-# offset of Media Label (C4h,C6h,CCh)
->>>>>74 uleshort >0
->>>>>>48 ubyte 1
-#Tag|Version|Vendor|Vendor ID|Creation Time Stamp|Cartridge Label|Side|Media ID|Media Domain ID|Vendor Specific fields
->>>>>>>(74.s) string >\0 \b, label: %s
->>>>>>48 ubyte 2
->>>>>>>(74.s) lestring16 x \b, label: %s
-# size of password name (0,1Ch)
-#>>>>>76 uleshort >0 \b, password size %4.4x
-# Software Vendor ID (CBEh)
->>>>>86 uleshort x \b, software (%#x)
-# size of Software Name (6Eh)
->>>>>80 uleshort >0
-# offset of Software Name (1C8h,1CAh,1D0h)
->>>>>>82 uleshort >0
-# 1~ANSI, 2~UNICODE
->>>>>>>48 ubyte 1
->>>>>>>>(82.s) string >\0 \b: %s
->>>>>>>48 ubyte 2
-# size terminated unicoded coded string normally followed by "SPAD"
->>>>>>>>(82.s) lestring16 x \b: %s
-# Format Logical Block Size (512,1024)
-#>>>>>84 uleshort =1024 \b, block size %u
->>>>>84 uleshort !1024 \b, block size %u
-# Media Date of MTF_DATE_TIME type with 5 bytes
-#>>>>>>88 ubequad x DATE %16.16llx
-# MTF Major Version (1)
-#>>>>>>93 ubyte x \b, MFT version %x
-#
-
-# URL: https://en.wikipedia.org/wiki/PaintShop_Pro
-# Reference: https://www.cryer.co.uk/file-types/p/pal.htm
-# Created by: Joerg Jenderek
-# Note: there exist other color palette formats also with .pal extension
-0 string JASC-PAL\r\n PaintShop Pro color palette
-#!:mime text/plain
-# PspPalette extension is used by newer (probably 8) PaintShopPro versions
-!:ext pal/PspPalette
-# 2nd line contains palette file version. For example "0100"
->10 string !0100 \b, version %.4s
-# third line contains the number of colours: 16 256 ...
->16 string x \b, %.3s colors
-
-# URL: https://en.wikipedia.org/wiki/Innosetup
-# Reference: https://github.com/jrsoftware/issrc/blob/master/Projects/Undo.pas
-# Created by: Joerg Jenderek
-# Note: created by like "InnoSetup self-extracting archive" inside ./msdos
-# TrID labeles the entry as "Inno Setup Uninstall Log"
-# TUninstallLogID
-0 string Inno\ Setup\ Uninstall\ Log\ (b) InnoSetup Log
-!:mime application/x-innosetup
-# unins000.dat, unins001.dat, ...
-!:ext dat
-# " 64-bit" variant
->0x1c string >\0 \b%.7s
-# AppName[0x80] like "Minimal SYStem", ClamWin Free Antivirus , ...
->0xc0 string x %s
-# AppId[0x80] is similar to AppName or
-# GUID like {4BB0DCDC-BC24-49EC-8937-72956C33A470} start with left brace
->0x40 ubyte 0x7b
->>0x40 string x %-.38s
-# do not know how this log version correlates to program version
->0x140 ulelong x \b, version %#x
-# NumRecs
-#>0x144 ulelong x \b, %#4.4x records
-# EndOffset means files size
->0x148 ulelong x \b, %u bytes
-# Flags 5 25h 35h
-#>0x14c ulelong x \b, flags %8.8x
-# Reserved: array[0..26] of Longint
-# the non Unicode HighestSupportedVersion may never become greater than or equal to 1000
->0x140 ulelong <1000
-# hostname
->>0x1d6 pstring x \b, %s
-# user name
->>>&0 pstring x \b\%s
-# directory like C:\Program Files (x86)\GnuWin32
->>>>&0 pstring x \b, "%s"
-# version 1000 or higher implies unicode
->0x140 ulelong >999
-# hostname
->>0x1db lestring16 x \b, %-.9s
-# utf string variant with prepending fe??ffFFff
->>0x1db search/43 \xFF\xFF\xFF
-# user name
->>>&0 lestring16 x \b\%-.9s
->>>&0 search/43 \xFF\xFF\xFF
-# directory like C:\Program Files\GIMP 2
->>>>&0 lestring16 x \b, %-.42s
-
-# URL: https://jrsoftware.org/ishelp/index.php?topic=setup_signeduninstaller
-# Reference:https://github.com/jrsoftware/issrc/blob/main/Projects/Struct.pas
-# From: Joerg Jenderek
-0 string Inno\ Setup\ Messages\ (
-# null padded til 0x40 boundary
->0x38 quad 0 InnoSetup messages
-!:mime application/x-innosetup-msg
-# unins000.msg, unins001.msg, ...
-!:ext msg
-# version like 5.1.1 5.1.11 5.5.0 5.5.3 6.0.0
->>0x15 string x \b, version %.5s
-# look for 6th char of version string or terminating right parentheses
->>>0x1a ubyte !0x29 \b%c
-# NumMessages
->>0x40 ulelong x \b, %u messages
-# TotalSize: Cardinal;
-#>>0x44 ulelong x \b, TotalSize %u
-# NotTotalSize: Cardinal;
-#>>0x48 ulelong x \b, NotTotalSize %u
-# CRCMessages: Longint;
-#>>0x4C ulelong x \b, CRC %#x
->>0x40 ulelong x
-# (u) after version means unicoded messages
->>>0x1c search/2 (u) (UTF-16),
->>>>0x50 lestring16 x %s
-# ASCII coded message
->>>0x1c default x (ASCII),
->>>>0x50 string x %s
-
-# Windows Imaging (WIM) Image
-# Update: Joerg Jenderek at Mar 2019, 2021
-# URL: https://en.wikipedia.org/wiki/Windows_Imaging_Format
-# http://fileformats.archiveteam.org/wiki/Windows_Imaging_Format
-# Reference: https://download.microsoft.com/download/f/e/f/
-# fefdc36e-392d-4678-9e4e-771ffa2692ab/Windows%20Imaging%20File%20Format.rtf
-# Note: verified by like `7z t boot.wim` `wiminfo install.esd --header`
-0 string MSWIM\000\000\000
->0 use wim-archive
-# https://wimlib.net/man1/wimoptimize.html
-0 string WLPWM\000\000\000
->0 use wim-archive
-0 name wim-archive
-# _WIMHEADER_V1_PACKED ImageTag[8]
->0 string x Windows imaging
-!:mime application/x-ms-wim
-# TO avoid in file version 5.36 error like
-# Magdir/windows, 760: Warning: Current entry does not yet have a description
-# file: could not find any valid magic files! (No error)
-# split WIM
->16 ulelong &0x00000008 (SWM
-!:ext swm
-# usPartNumber; 1, unless the file was split into multiple parts
->>40 uleshort x \b %u
-# usTotalParts; The total number of WIM file parts in a spanned set
->>42 uleshort x \b of %u) image
-# non split WIM
->16 ulelong ^0x00000008
-# https://wimlib.net/man1/wimmount.html
-# solid WIMs; version 3584; usually contain LZMS-compressed and the .esd extension
->>12 ulelong 3584 (ESD) image
-!:ext esd
->>12 ulelong !3584 (
-# look for archive member RunTime.xml like in Microsoft.Windows.Cosa.Desktop.Client.ppkg
->>>156 search/68233/s RunTime.xml \bWindows provisioning package)
-!:ext ppkg
-# if is is not a Windows provisioning package, then it is a WIM
->>>156 default x \bWIM) image
-# second disk image part created by Microsoft's RecoveryDrive.exe has name Reconstruct.WIM2
-!:ext wim/wim2
->0 string/b WLPWM\000\000\000 \b, wimlib pipable format
-# cbSize size of the WIM header in bytes like 208
-#>8 ulelong x \b, headersize %u
-# dwVersion version of the WIM file 00010d00h~1.13 00000e00h~0.14
->14 uleshort x v%u
->13 ubyte x \b.%u
-# dwImageCount; The number of images contained in the WIM file
->44 ulelong >1 \b, %u images
-# dwBootIndex
-# 1-based index of the bootable image of the WIM, or 0 if no image is bootable
->0x78 ulelong >0 \b, bootable no. %u
-# dwFlags
-#>16 ulelong x \b, flags %#8.8x
-#define FLAG_HEADER_COMPRESSION 0x00000002
-#define FLAG_HEADER_READONLY 0x00000004
-#define FLAG_HEADER_SPANNED 0x00000008
-#define FLAG_HEADER_RESOURCE_ONLY 0x00000010
-#define FLAG_HEADER_METADATA_ONLY 0x00000020
-#define FLAG_HEADER_WRITE_IN_PROGRESS 0x00000040
-#define FLAG_HEADER_RP_FIX 0x00000080 reparse point fixup
-#define FLAG_HEADER_COMPRESS_RESERVED 0x00010000
-#define FLAG_HEADER_COMPRESS_XPRESS 0x00020000
-#define FLAG_HEADER_COMPRESS_LZX 0x00040000
-#define FLAG_HEADER_COMPRESS_LZMS 0x00080000
-#define FLAG_HEADER_COMPRESS_XPRESS2 0x00100000 wimlib-1.13.0\include\wimlib\header.h
-# XPRESS, with small chunk size
->16 ulelong &0x00100000 \b, XPRESS2
->16 ulelong &0x00080000 \b, LZMS
->16 ulelong &0x00040000 \b, LZX
->16 ulelong &0x00020000 \b, XPRESS
->16 ulelong &0x00000002 compressed
->16 ulelong &0x00000004 \b, read only
->16 ulelong &0x00000010 \b, resource only
->16 ulelong &0x00000020 \b, metadata only
->16 ulelong &0x00000080 \b, reparse point fixup
-#>16 ulelong &0x00010000 \b, RESERVED
-# dwCompressionSize; Uncompressed chunk size for resources or 0 if uncompressed
-#>20 ulelong >0 \b, chunk size %u bytes
-# gWIMGuid
-#>24 ubequad x \b, GUID %#16.16llx
-#>>32 ubequad x \b%16.16llx
-# rhOffsetTable; the location of the resource lookup table
-# wim_reshdr_disk[24]= u8 size_in_wim[7] + u8 flags + le64 offset_in_wim + le64 uncompressed_size
-#>48 ubequad x \b, rhOffsetTable %#16.16llx
-# rhXmlData; the location of the XML data
-#>0x50 ulelong x \b, at %#8.8x
-# NOT WORKING \xff\xfe<\0W\0I\0M\0
-#>(0x50.l) ubequad x \b, xml=%16.16llx
-# rhBootMetadata; the location of the metadata resource
-#>0x60 ubequad x \b, rhBootMetadata %#16.16llx
-# rhIntegrity; the location of integrity table used to verify files
-#>0x7c ubequad x \b, rhIntegrity %#16.16llx
-# Unused[60]
-#>148 ubequad !0 \b,unused %#16.16llx
-#
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Windows_Easy_Transfer
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/m/mig.trid.xml
-# Note: called "Windows Easy Transfer migration data" by TrID,
-# "Migration Store" or "EasyTransfer file" by Microsoft
-0 string 1giM Windows Easy Transfer migration data
-#!:mime application/octet-stream
-!:mime application/x-ms-mig
-!:ext mig
->0x18 string =MRTS without password
-# data offset with 1 space at end
->>0x1c ulelong+0x38 x \b, at %#x
-# look for zlib compressed data by ./compress
->>(0x1c.l+0x38) ubyte x
->>>&-1 indirect x
-# in password protected examples MRTS comes some bytes further
->0x18 string !MRTS with password
-# look for first MRTS tag
->0x18 search/29/b MRTS
-# probably first file name length like 178, ...
-#>>&0 ulelong x \b, 1st length %u
-# URL like File\C:\Users\nutzer\AppData\Roaming\Microsoft\Internet Explorer\Quick Launch\desktop.ini
->>&20 lestring16 x \b, 1st %-s
-
-# Microsoft SYLK
-# https://en.wikipedia.org/wiki/SYmbolic_LinK_(SYLK)
-# https://outflank.nl/upload/sylksum.txt
-0 string ID;P Microsoft SYLK program
->4 string >0 \b, created by %s
-!:ext slk/sylk
-
-# Summary: Windows Performance Monitor Alert
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/Performance_Monitor
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p/pma.trid.xml
-# Note: called "Windows Performance Monitor Alert" by TrID
-0 ubelong =0xDC058340
->4 ubyte =0 Windows Performance Monitor Alert
-#!:mime application/octet-stream
-# https://www.thoughtco.com/mime-types-by-content-type-3469108
-# https://filext.com/file-extension/PAM
-!:mime application/x-perfmon
-#!:mime application/x-ms-pma
-!:ext pma
-# metric type like: "BrowserMetrics" "CrashpadMetrics" "SetupMetrics"
->>80 string x \b, "%s"
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/InstallShield
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/i/ins.trid.xml
-# Note: contain also keywords like: BATCH_INSTALL ISVERSION LOGHANDLE SRCDIR SRCDISK WINDIR WINSYSDISK
-0 ubelong 0xB8C90C00 InstallShield Script
-#!:mime application/octet-stream
-!:mime application/x-installshield-ins
-# like test.ins Setup.ins
-!:ext ins
-# UNKNOWN like: 160034121de07e00 1600341260befe00 16003412e0783700
-# 5000010021083f00 50000100b0335600 50000100cbfdf800 50000100dfbc4700
-#>4 ubequad x \b, at 4 %#16.16llx
-# copyright text like: "Stirling Technologies, Inc. (c) 1990-1994"
-# "InstallSHIELD Software Corporation (c) 1990-1997"
->13 pstring/h x "%s"
-# look for specific ASCII variable names
->1 search/0x121/s SRCDIR \b, variable names:
-# 1st like: SRCDIR
->>&-4 leshort x #%u
->>&-2 pstring/h x %s
-# 2nd like: SRCDISK
->>>&0 leshort x #%u
->>>&2 pstring/h x %s
-# 3rd like: TARGETDISK
->>>>&0 leshort x #%u
->>>>&2 pstring/h x %s
-# 4th like: TARGETDIR
-#>>>>>&0 leshort x #%u
-#>>>>>&2 pstring/h x %s
-# 5th like: WINDIR
-#>>>>>>&0 leshort x #%u
-#>>>>>>&2 pstring/h x %s
-# 6th like: WINDISK
-#>>>>>>>&0 leshort x #%u
-#>>>>>>>&2 pstring/h x %s
-# 7th like: WINSYSDIR
-#>>>>>>>>&0 leshort x #%u
-#>>>>>>>>&2 pstring/h x %s
-# ... LOGHANDLE
->0 ubelong x ...
-#
-
-# Summary: Microsoft Remote Desktop Protocol connection
-# From: Joerg Jenderek
-# URL: https://learn.microsoft.com/en-us/windows-server/remote/remote-desktop-services/clients/rdp-files
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/r/rdp.trid.xml
-# Note: called "Remote Desktop Connection Settings" by TrID
-0 string screen\040mode\040id:i: Remote Desktop Protocol connection
-#!:mime text/plain
-!:mime text/x-ms-rdp
-!:ext rdp
-# Screen mode: 1~session appear in a window 2~session appear full screen
->17 string 1 \b, window mode
->17 string 2 \b, full screen mode
-
-0 guid 7B5C52E4-D88C-4DA7-AEB1-5378D02996D3 Microsoft OneNote
-!:ext one
-!:mime application/onenote
-0 guid 43FF2FA1-EFD9-4C76-9EE2-10EA5722765F Microsoft OneNote Revision Store File
-
-# Microsoft XAML Binary Format
-# From: Alexandre Iooss <erdnaxe@crans.org>
-# URL: https://github.com/WalkingCat/XbfDump/blob/8832d2ffcaa738434d803fefa2ba99d3af37ed29/xbf_data.h
-0 string XBF\0
->12 ulelong <0xFF
->>16 ulelong <0xFF Microsoft XAML Binary Format
-!:ext xbf
->>>12 ulelong x %d
->>>16 ulelong x \b.%d
->>>4 ulelong x \b, metadata size: %d bytes
->>>8 ulelong x \b, node size: %d bytes
-
-# Metaswitch MetaView Service Assurance Server exports
-0 string MetaView\x20Service\x20Assurance\x20Export\x20File MetaView SAS export
->39 string Version\x20
->>47 byte x \b, version %c
-
-# Active Directory Group Policy Registry Policy File Format
-# From: Yuuta Liang <yuuta@yuuta.moe>
-# URL: https://learn.microsoft.com/en-us/previous-versions/windows/desktop/policy/registry-policy-file-format
-0 string PReg
->4 lelong x Group Policy Registry Policy, Version=%d
diff --git a/contrib/libs/libmagic/magic/Magdir/wireless b/contrib/libs/libmagic/magic/Magdir/wireless
deleted file mode 100644
index badb73bb85..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/wireless
+++ /dev/null
@@ -1,7 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: wireless,v 1.2 2009/09/19 16:28:13 christos Exp $
-# wireless-regdb: file(1) magic for CRDA wireless-regdb file format
-#
-0 string RGDB CRDA wireless regulatory database file
->4 belong 19 (Version 1)
diff --git a/contrib/libs/libmagic/magic/Magdir/wordprocessors b/contrib/libs/libmagic/magic/Magdir/wordprocessors
deleted file mode 100644
index 3a2e1ceaa8..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/wordprocessors
+++ /dev/null
@@ -1,630 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: wordprocessors,v 1.34 2023/01/24 20:13:40 christos Exp $
-# wordprocessors: file(1) magic fo word processors.
-#
-####### PWP file format used on Smith Corona Personal Word Processors:
-2 string \040\040\040\040\040\040\040\040\040\040\040ML4D\040'92 Smith Corona PWP
->24 byte 2 \b, single spaced
->24 byte 3 \b, 1.5 spaced
->24 byte 4 \b, double spaced
->25 byte 0x42 \b, letter
->25 byte 0x54 \b, legal
->26 byte 0x46 \b, A4
-
-# URL: http://fileformats.archiveteam.org/wiki/Microsoft_Works_Word_Processor
-# reference: http://mark0.net/download/triddefs_xml.7z
-# /defs/w/wps-works-dos.trid.xml
-# From: Joerg Jenderek
-# Note: older non OLE 2 Compound based versions
-0 ubeshort =0x01FE
->112 ubeshort =0x0100 Microsoft Works 1-3 (DOS) or 2 (Windows) document
-# title like THE GREAT KHAN GAME
->>0x100 string x %s
-!:mime application/vnd-ms-works
-#!:mime application/x-msworks
-# https://www.macdisk.com/macsigen.php
-!:apple ????AWWP
-!:ext wps
-
-# Corel/WordPerfect
-# URL: https://en.wikipedia.org/wiki/WordPerfect
-# Reference: https://github.com/OneWingedShark/WordPerfect/blob/master/doc/SDK_Help/FileFormats/WPFF_DocumentStructure.htm
-# http://mark0.net/download/triddefs_xml.7z/defs/w/wp-generic.trid.xml
-0 string \xffWPC
-# WordPerfect
->8 byte 1
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/w/wpm-macro.trid.xml
-# Note: there exist other macro variants
->>9 byte 1 WordPerfect macro
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-wpm
-# like: ALTD.WPM ENDFOOT.WPM FOOTEND.WPM LABELS.WPM REVEALTX.WPM
-!:ext wpm
-# Note: used in WordPerfect 5.1; there exist other FIL variants
->>9 byte 2 WordPerfect help file
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-help
-# like: WPHELP.FIL
-!:ext fil
-# pointer to document area like: 10h
->>>4 ulelong !0x10 \b, at %#x document area
->>9 byte 3 WordPerfect keyboard file
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-keyboard
-!:ext wpk
-# no document area, so point to end of file; so this is file size like: 23381 2978 32835 3355 3775 919
->>>4 ulelong x \b, %u bytes
->>9 byte 4 WordPerfect VAX keyboard definition
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-keyboard
-#!:ext foo
-# URL: http://fileformats.archiveteam.org/wiki/WordPerfect
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/w/wpd-doc-gen.trid.xml
->>9 byte 10 WordPerfect document
-# https://www.iana.org/assignments/media-types/application/vnd.wordperfect
-!:mime application/vnd.wordperfect
-#!:apple ????WPC2
-# TODO: distinguish different suffix
-!:ext wpd/wpt/wkb/icr/tut/sty/tst/crs
->>9 byte 11 WordPerfect dictionary
->>9 byte 12 WordPerfect thesaurus
->>9 byte 13 WordPerfect block
->>9 byte 14 WordPerfect rectangular block
->>9 byte 15 WordPerfect column block
->>9 byte 16 WordPerfect printer data
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-prs
-# like: STANDARD.PRS WORKBOOK.PRS
-!:ext prs
-# like: "Standard Printer" "Workbook Printer"
->>>0x64 pstring/B >A "%s"
-#>>9 byte 18 WordPerfect Prefix information file
-# printer resource .ALL
->>9 byte 19 WordPerfect printer data
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-all
-!:ext all
-# display Resource
->>9 byte 20 WordPerfect driver resource data
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-drs
-# like: WPSMALL.DRS
-!:ext drs
-# pointer to index area with string "smalldrs" like: 46h
->>>4 uleshort !0x46 \b, at %#x index area
->>9 byte 21 WordPerfect Overlay file
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-fil
-# like: WP.FIL
-!:ext fil
-# URL: http://fileformats.archiveteam.org/wiki/WordPerfect_Graphics
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/b/bitmap-wpg.trid.xml
-# Note: called "WordPerfect Graphics bitmap" by TrID and
-# "WordPerfect Graphics Metafile" by DROID via x-fmt/395 fmt/1042
-# "WPG (Word Perfect Graphics)" by ImageMagick `identify -verbose BUTTRFLY.WPG`
->>9 byte 22 WordPerfect graphic image
-# TODO: skip DROID x-fmt-395-signature-id-132.wpg by check for existing document area
-#>>>4 ulelong >15 WordPerfect_graphic_OK
-#!:mime application/octet-stream
-# http://extension.nirsoft.net/wpg
-!:mime image/x-wordperfect-graphics
-# https://reposcope.com/mimetype/application/x-wpg
-#!:mime application/x-wpg
-# like: BUTTRFLY.WPG STAR-5.WPG input.wpg WORDPFCT.WPG
-!:ext wpg
-# pointer to document area like: 10h 1Ah
->>>4 ulelong !0x1A \b, at %#x document area
->>9 byte 23 WordPerfect hyphenation code
->>9 byte 24 WordPerfect hyphenation data
->>9 byte 25 WordPerfect macro resource data
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-mrs
-# like: WP.MRS
-!:ext mrs
->>9 byte 27 WordPerfect hyphenation lex
->>9 byte 29 WordPerfect wordlist
->>9 byte 30 WordPerfect equation resource data
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-qrs
-# like: WQ.QRS wpDE.qrs wpen.qrs
-!:ext qrs
-# jump to document area with some marker and equation
->>>(4.l) ubyte x
-# equation like: "Fraction: x OVER y"
->>>>&1 string >A (...%-.19s...)
-# pointer to document area like: 17C4h
->>>4 ulelong x \b, at %#x document area
-#>>9 byte 31 reserved
-#>>9 byte 32 WordPerfect VAX .SET
->>9 byte 33 WordPerfect spell rules
->>9 byte 34 WordPerfect dictionary rules
-#>>9 byte 35 reserved
-# video resource device driver
-# Note: filetype 26 for VRS and filetype 36 for WPD apparently is wrong
->>9 byte 36 WordPerfect Video Resource
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-vrs
-# like: STANDARD.VRS
-!:ext vrs
-# like: "IBM CGA (& compatibles)"
->>>0x20 string >A "%.23s"
->>9 byte 39 WordPerfect spell rules (Microlytics)
-#>>9 byte 40 reserved
->>9 byte 41 WordPerfect Install options
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-ins
-# like: WP51.INS
-!:ext ins
-# probably default directory name like: "C:\WP51\"
->>>0x12 string >A "%.8s"
-# maybe mouse driver for WP5.1
->>9 byte 42 WordPerfect Resource
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-irs
-# like: STANDARD.IRS
-!:ext irs
-# like: "Mouse Driver (MOUSE.COM)"
->>>0x28 string >A "%.24s"
->>9 byte 43 WordPerfect settings file
-# maybe Macintosh WP2.0 document
->>9 byte 44 WordPerfect 3.5 document
-!:mime application/vnd.wordperfect
-!:apple ????WPD3
-# like: WP3.wpd
-!:ext wpd
->>9 byte 45 WordPerfect 4.2 document
-# External spell code module (WP5.1)
-#>>9 byte 46 WordPerfect external spell
-# external spell dictionary .LEX
-#>>9 byte 47 WordPerfect external spell dictionary
-# Macintosh SOFT graphics file (SOFT (Sequential Object Format)
-#>>9 byte 48 WordPerfect SOFT graphics
-#>>9 byte 49 reserved
-#>>9 byte 50 reserved
-# WPWin 5.1 Application Resource Library added for WPWin 5.1
-#>>9 byte 51 WordPerfect application resource library
->>9 byte 69 WordPerfect dialog file
-# From: Joerg Jenderek
-# Note: found in sub directory WritingTools inside WordPerfect 2021 program directory
->>9 byte 70 WordPerfect Writing Tools
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-cbt
-# like: Wt13cbede.cbt Wt13cbeit.cbt Wt13cbefr.cbt WT21cbede.cbt Wt13cbeEN.CBD WT21cbeEN.CBD
-!:ext cbd/cbt
->>9 byte 76 WordPerfect button bar
->>9 default x
->>>9 byte x Corel WordPerfect: Unknown filetype %d
-# Corel Shell
->8 byte 2
->>9 byte 1 Corel shell macro
->>9 byte 10 Corel shell definition
->>9 default x
->>>9 byte x Corel Shell: Unknown filetype %d
-# Corel Notebook
->8 byte 3
->>9 byte 1 Corel Notebook macro
->>9 byte 2 Corel Notebook help file
->>9 byte 3 Corel Notebook keyboard file
->>9 byte 10 Corel Notebook definition
->>9 default x
->>>9 byte x Corel Notebook: Unknown filetype %d
-# Corel Calculator
->8 byte 4
->>9 byte 2 Corel Calculator help file
->>9 default x
->>>9 byte x Corel Calculator: Unknown filetype %d
-# Corel File Manager
->8 byte 5
->>9 default x
->>>9 byte x Corel File Manager: Unknown filetype %d
-# Corel Calendar
->8 byte 6
->>9 byte 2 Corel Calendar help file
->>9 byte 10 Corel Calendar data file
->>9 default x
->>>9 byte x Corel Calendar: Unknown filetype %d
-# Corel Program Editor/Ed Editor
->8 byte 7
->>9 byte 1 Corel Editor macro
->>9 byte 2 Corel Editor help file
->>9 byte 3 Corel Editor keyboard file
->>9 byte 25 Corel Editor macro resource file
->>9 default x
->>>9 byte x Corel Program Editor/Ed Editor: Unknown filetype %d
-# Corel Macro Editor
->8 byte 8
->>9 byte 1 Corel Macro editor macro
->>9 byte 2 Corel Macro editor help file
->>9 byte 3 Corel Macro editor keyboard file
->>9 default x
->>>9 byte x Corel Macro Editor: Unknown filetype %d
-# Corel Plan Perfect
->8 byte 9
->>9 default x
->>>9 byte x Corel Plan Perfect: Unknown filetype %d
-# Corel DataPerfect
->8 byte 10
-# CHECK: Don't these belong into product 9?
->>9 byte 1 Corel PlanPerfect macro
->>9 byte 2 Corel PlanPerfect help file
->>9 byte 3 Corel PlanPerfect keyboard file
->>9 byte 10 Corel PlanPerfect worksheet
->>9 byte 15 Corel PlanPerfect printer definition
->>9 byte 18 Corel PlanPerfect graphic definition
->>9 byte 19 Corel PlanPerfect data
->>9 byte 20 Corel PlanPerfect temporary printer
->>9 byte 25 Corel PlanPerfect macro resource data
->>9 default x
->>>9 byte x Corel DataPerfect: Unknown filetype %d
-# Corel Mail
->8 byte 11
->>9 byte 2 Corel Mail help file
->>9 byte 5 Corel Mail distribution list
->>9 byte 10 Corel Mail out box
->>9 byte 11 Corel Mail in box
->>9 byte 20 Corel Mail users archived mailbox
->>9 byte 21 Corel Mail archived message database
->>9 byte 22 Corel Mail archived attachments
->>9 default x
->>>9 byte x Corel Mail: Unknown filetype %d
-# Corel Printer
->8 byte 12
->>9 byte 11 Corel Printer temporary file
->>9 default x
->>>9 byte x Corel Printer: Unknown filetype %d
-# Corel Scheduler
->8 byte 13
->>9 byte 2 Corel Scheduler help file
->>9 byte 10 Corel Scheduler in file
->>9 byte 11 Corel Scheduler out file
->>9 default x
->>>9 byte x Corel Scheduler: Unknown filetype %d
-# Corel WordPerfect Office
->8 byte 14
->>9 byte 10 Corel GroupWise settings file
->>9 byte 17 Corel GroupWise directory services
->>9 byte 43 Corel GroupWise settings file
->>9 default x
->>>9 byte x Corel WordPerfect Office: Unknown filetype %d
-# Corel DrawPerfect
-# URL: http://fileformats.archiveteam.org/wiki/Corel_Presentations
-# Update: Joerg Jenderek
->8 byte 15
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/shw-wp-2.trid.xml
-# Note: called "WordPerfect Presentations (v2)" by TrID and
-# "Corel Presentation" with version "7-8-9" by DROID via PUID fmt/877
->>9 byte 10 WordPerfect Presentation
-#!:mime application/octet-stream
-#!:mime application/vnd.wordperfect
-!:mime application/x-drawperfect-shw
-# like: BENEFITS.SHW chartbar.shw chartbul.shw chartgal.shw chartorg.shw fig-demo.shw figurgal.shw mastrgal.shw scuba.shw tutorial.shw
-!:ext shw
-# pointer to document area like: 10h
->>>4 ulelong !0x10 \b, at %#x document area
-# according to TrID this is nil
->>>12 ulelong !0 \b, at 0xC %#x
-# search for embedded WP file like in tutorial.shw
-#>>>16 search/638/sb \xffWPC WPC_MAGIC_FOUND
-# GRR: indirect call leads to recursion! WHY?
-#>>>>&0 indirect x \b; contains
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/s/shw-wp-3.trid.xml
-# Note: called "WordPerfect/Corel Presentations (v3)" by TrID and
-# "Corel Presentation" with version "3" by DROID via PUID fmt/878
->>9 byte 15 Corel Presentation
-#!:mime application/octet-stream
-#!:mime application/vnd.wordperfect
-!:mime application/x-drawperfect-shw
-# like: FIG_ANIM.SHW presenta.shw
-!:ext shw
-# pointer to document area like: 1ah
->>>4 ulelong !0x1a \b, at %#x document area
-# according to TrID this is nil
->>>12 ulelong !0 \b, at 0xC %#x
-# reserved like: 3
->>>16 ulelong !0x3 \b, at 0x10 %#x
-# file size, not including pad characters at EOF
->>>0x14 ulelong x \b, %u bytes
-# search for embedded WP file like in foo
-#>>>24 search/638/sb \xffWPC WPC_MAGIC_FOUND
-# GRR: indirect call leads to recursion! WHY?
-#>>>>&0 indirect x \b; contains
-# embedded inside Compound Document variant handled by ./ole2compounddocs
->>9 byte 16 Corel Presentation (embeded)
-#!:mime application/octet-stream
-#!:mime application/vnd.wordperfect
-!:mime application/x-corelpresentations
-# like: PerfectOffice_MAIN
-!:ext /
-# pointer to document area like: 1ah
->>>4 ulelong !0x1a \b, at %#x document area
->>>12 ulelong !0 \b, at 0xC %#x
-# reserved like: 3
->>>16 ulelong !0x3 \b, at 0x10 %#x
-# file size, not including pad characters at EOF
->>>0x14 ulelong x \b, %u bytes
-# search for embedded WP file
-#>>>24 search/638/sb \xffWPC WPC_MAGIC_FOUND
-# GRR: indirect call leads to recursion! WHY?
-#>>>>&0 indirect x \b; contains
->>9 default x
->>>9 byte x Corel DrawPerfect: Unknown filetype %d
-# Corel LetterPerfect
->8 byte 16
->>9 default x
->>>9 byte x Corel LetterPerfect: Unknown filetype %d
-# Corel Terminal
->8 byte 17
->>9 byte 10 Corel Terminal resource data
->>9 byte 11 Corel Terminal resource data
->>9 byte 43 Corel Terminal resource data
->>9 default x
->>>9 byte x Corel Terminal: Unknown filetype %d
-# Corel loadable file
->8 byte 18
->>9 byte 10 Corel loadable file
->>9 byte 11 Corel GUI loadable text
->>9 byte 12 Corel graphics resource data
->>9 byte 13 Corel printer settings file
->>9 byte 14 Corel port definition file
->>9 byte 15 Corel print queue parameters
->>9 byte 16 Corel compressed file
->>9 default x
->>>9 byte x Corel loadable file: Unknown filetype %d
->>15 byte 0 \b, optimized for Intel
->>15 byte 1 \b, optimized for Non-Intel
-# Network service
->8 byte 20
->>9 byte 10 Corel Network service msg file
->>9 byte 11 Corel Network service msg file
->>9 byte 12 Corel Async gateway login msg
->>9 byte 14 Corel GroupWise message file
->>9 default x
->>>9 byte x Corel Network service: Unknown filetype %d
-# GroupWise
->8 byte 31
->>9 byte 20 GroupWise admin domain database
->>9 byte 21 GroupWise admin host database
->>9 byte 23 GroupWise admin remote host database
->>9 byte 24 GroupWise admin ADS deferment data file
->>9 default x
->>>9 byte x GroupWise: Unknown filetype %d
-# Corel Writing Tools WT*.*
-# From: Joerg Jenderek
-# URL: https://support.corel.com/hc/en-us/articles/215876258-Writing-Tools-Spell-Check-Dictionary-does-not-work-in-WordPerfect-X5
-# http://wordperfect.helpmax.net/en/editing-and-formatting-documents/using-the-writing-tools/working-with-user-word-lists/
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/u/uwl-wp.trid.xml
->8 byte 32
->>9 byte 10 Corel Writing Tools User Word List
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-wordlist
-# personal user word list UWL under user directory like: WTDE.UWL WTUS.UWL WT21DE.UWL WT21US.UWL WT13DE.UWL ...
-# and "template" SAV/HWL variant under program directory like: wt13en.hwl Wt13de.sav Wt13it.sav wt13ru.sav WT21us.sav Wtcz.sav ...
-!:ext uwl/hwl/sav
-# jump to document area with some marker and word list
->>>(4.l) ubyte x
-# look for beginning of word list starting mostly with letter a as UTF-16 like: Wt13es.sav
-# but not found in russian wt13ru.sav
->>>>&0 search/91/sb a\0
-# word list starting like: "acsesory\022accessory.\001\026acomodate\026accommodate4\001"
->>>>>&0 lestring16 x (...%-.33s...)
-# pointer to document area like: 200h
->>>4 ulelong !0x200 \b, at %#x document area
-# file size, not including pad characters at EOF
->>>0x14 uleshort x \b, %u bytes
-# IntelliTAG
->8 byte 33
->>9 byte 10 IntelliTAG (SGML) compiled DTD
->>9 default x
->>>9 byte x IntelliTAG: Unknown filetype %d
-# Summary: Corel WordPerfect WritingTools advise part
-# From: Joerg Jenderek
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/a/adv-wp.trid.xml
->8 byte 34
->>9 byte 11 Corel WordPerfect dictionary advise
-#!:mime application/octet-stream
-!:mime application/x-wordperfect-adv
-#!:mime application/vnd.wordperfect.adv
-# like: WT21de.adv Wt13de.adv Wt13es.adv Wt13fr.adv wt13us.adv
-!:ext adv
-# advise text part often start with tag like: 580A
-#>>>(16.s) ubequad x ADVISE PART %#llx
-# part of advise text like: "This is too informal for most writing."
->>>(16.s+16) string x (...%-.33s...)
-# everything else
->8 default x
->>8 byte x Unknown Corel/Wordperfect product %d,
->>>9 byte x file type %d
->10 byte 0 \b, v5.
-# version of WP file; 2.1~WP 8.0
-# major version of WP file like: 1 2
->10 byte !0 \b, v%d.
-# minor version of WP file like: 0 1
->11 byte x \b%d
-
-# Hancom HWP (Hangul Word Processor)
-# Hangul Word Processor 3.0 through 97 used HWP 3.0 format.
-# URL: https://www.hancom.com/etc/hwpDownload.do
-0 string HWP\ Document\ File Hancom HWP (Hangul Word Processor) file, version 3.0
-!:ext hwp
-
-# CosmicBook, from Benoit Rouits
-0 string CSBK Ted Neslson's CosmicBook hypertext file
-
-2 string EYWR AmigaWriter file
-
-# chi: file(1) magic for ChiWriter files
-0 string \\1cw\ ChiWriter file
->5 string >\0 version %s
-0 string \\1cw ChiWriter file
-
-# Quark Express from https://www.garykessler.net/library/file_sigs.html
-2 string IIXPR3 Intel Quark Express Document (English)
-2 string IIXPRa Intel Quark Express Document (Korean)
-2 string MMXPR3 Motorola Quark Express Document (English)
-!:mime application/x-quark-xpress-3
-2 string MMXPRa Motorola Quark Express Document (Korean)
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/PageMaker
-# https://en.wikipedia.org/wiki/Adobe_PageMaker
-# Reference: http://mark0.net/download/triddefs_xml.7z/defs/p
-# pm4-pagemaker.trid.xml
-# pm5-pagemaker.trid.xml
-# Note: since version 6 in 1995 called Adobe PageMaker and
-# embedded in Compound Document handled by ./ole2compounddocs
-# mainly tested little endian variant
-4 ubelong =0x0000FF99
->0 use PageMaker
-# big endian variant
-4 ubelong =0x000099FF
->0 use \^PageMaker
-# display information of Aldus/Adobe PageMaker document/publication
-0 name PageMaker
->110 uleshort <0x0600 Aldus
->110 uleshort >0x05FF Adobe
->110 uleshort x PageMaker
-# "MP" marker for newer version 4 and above according to TrID
-#>108 string x \b, MARKER "%.2s"
-# http://www.nationalarchives.gov.uk/pronom/fmt/876
-!:mime application/vnd.pagemaker
-#!:mime application/x-pagemaker
-# different file name extensions are used depending on version
-# older version like 3
->110 uleshort/256 =0 document
-# https://www.macdisk.com/macsigen.php
-!:apple ALB3ALD3
-# PT3 for template and no example for PageMaker document/publication with PM3 extension
-!:ext pm3/pt3
->110 uleshort/256 =4 document
-!:apple ALD4ALB4
-# no example for PT4 template
-!:ext pm4/pt4
->110 uleshort/256 =5 document
-!:apple ALD5ALB5
-# no example for PT5 template
-!:ext pm5/pt5
->110 uleshort =0x0600 document
-!:apple ALD6ALB6
-# PT6 for template
-!:ext pm6/pt6
-# HOWTO to distinguish version 7 from 6.5 ?
->110 uleshort =0x0632 document
-!:apple AD65AB65
-# no example for T65 template
-!:ext p65/t65/pmd/pmt
-# version 7 with PMT extension for template
-#!:ext pmd/pmt
-#!:apple ????PUBF
-# endian marker FF 99 for little endian
->6 ubyte =0xFF \b, little-endian
->6 ubyte =0x99 \b, big-endian
-# newer numeric version like: 4 5 6 6.50
-#>110 uleshort x \b, VERSION=%#x
->110 uleshort >0x03FF
->>110 uleshort/256 x \b, version %u
->>110 uleshort%256 >0 \b.%u
-# older version like 3
->110 uleshort <0x0400 \b, maybe version 3
-
-# adobe indesign (document, whatever...) from querkan
-0 belong 0x0606edf5 Adobe InDesign
->16 string DOCUMENT Document
-
-#------------------------------------------------------------------------------
-# ichitaro456: file(1) magic for Just System Word Processor Ichitaro
-#
-# Contributor kenzo-:
-# Reversed-engineered JS Ichitaro magic numbers
-#
-
-0 string DOC
->43 byte 0x14 Just System Word Processor Ichitaro v4
-!:mime application/x-ichitaro4
->144 string JDASH application/x-ichitaro4
-
-0 string DOC
->43 byte 0x15 Just System Word Processor Ichitaro v5
-!:mime application/x-ichitaro5
-
-0 string DOC
->43 byte 0x16 Just System Word Processor Ichitaro v6
-!:mime application/x-ichitaro6
-
-# Type: Freemind mindmap documents
-# From: Jamie Thompson <debian-bugs@jamie-thompson.co.uk>
-0 string/w \<map\ version Freemind document
-!:mime application/x-freemind
-
-# Type: Freeplane mindmap documents
-# From: Felix Natter <fnatter@gmx.net>
-0 string/w \<map\ version="freeplane Freeplane document
-!:mime application/x-freeplane
-
-# Type: Scribus
-# From: Werner Fink <werner@suse.de>
-0 string \<SCRIBUSUTF8\ Version Scribus Document
-0 string \<SCRIBUSUTF8NEW\ Version Scribus Document
-!:mime application/x-scribus
-
-# help files .hlp compiled from html and used by gfxboot added by Joerg Jenderek
-# markups page=0x04,label=0x12, followed by strings like "opt" or "main" and title=0x14
-0 ulelong&0x8080FFFF 0x00001204 gfxboot compiled html help file
-
-# From: Joerg Jenderek
-# URL: https://en.wikipedia.org/wiki/StarOffice
-# Reference: http://mark0.net/download/triddefs_xml.7z
-# /defs/t/thm-staroffice.trid.xml
-# Note: used in Star-, Open- and Libre-Office
-# named as soffice.StarConfigFile.6 or OpenOffice.org configuration by others
-0 ubeshort 0x0400
-# non nil gap
-#>(2.s+8) ubequad x \b, gap %#16.16llx
-# test for null value in gap after theme name maybe unreliable
-#>(2.s+9) ubyte 0 \b, 0-byte
-# look for keyword GALRESRV near the end
-# "C:\Program Files (x86)\StarOffice6.0\share\gallery\sg27.thm" Navigation, 238 objects
-#>0 search/8415 GALRESRV \b, GALRESRV found
-# "neues thema6.thm" MorePictures, 315 objects
-#>0 search/19299 GALRESRV \b, GALRESRV FOUND
-#>2 uleshort x \b, name length %u
-# skip file2147.chk by check for positive name length like for sg16.thm "3D"
->2 uleshort >0
-# skip dBase printer form T6.PRF with misidentified gallery
-# name :\DBASE\IV\T6.txts by check for 1st object name or RESRV keyword
-# https://www.clicketyclick.dk/databases/xbase/xbase/dbase_ex.zip
-# template/t6/with_data/T6.PRF
-# by first char of object name or RESRV part of keyword GALRESRV
->>(2.s+13) ubyte >0x1F StarOffice Gallery theme
-!:mime application/x-stargallery-thm
-# thm is also used for JPEG thumbnail images
-!:ext thm
-# gallery name often 1 word like: 3D sounds Diagrams Flussdiagramme Fotos
-# or like private://gallery/hidden/imgppt "Cisco - WAN - LAN"
->>>2 pstring/h x %s
-# number of objects
->>>(2.s+4) ulelong x \b, %u object
-# plural s
->>>(2.s+4) ulelong !1 \bs
-# if available then display first object name
->>>(2.s+4) ulelong >0
-# partial file name, URL or internal name like "dd2*" of 1st object or RESRV
->>>>(2.s+11) pstring/h x \b, 1st %s
-
-# From: Joerg Jenderek
-# URL: http://fileformats.archiveteam.org/wiki/StarOffice_Gallery
-# Note: used in Star-, Open- and Libre-Office and found in directories like
-# %APPDATA%\Roaming\LibreOffice\4\user\gallery
-# $HOME/.config/libreoffice/4/user/gallery
-0 string SGA3 StarOffice Gallery thumbnails
-# Unknown like 0x04000?0001000142
-#>4 ubequad x \b, UNKNOWN %#16.16llx
-#!:mime application/x-sdg
-!:mime application/x-stargallery-sdg
-!:ext sdg
-# display image magic for debugging purpose like 'BM'
-# looking like PC bitmap, Windows 3.x format with unknown compression
-#>11 string x \b, image magic '%-.2s'
-# inspect 1st GALLERY thumbnail magic by ./images with 1 space at end
-#>11 indirect x \b; contains
-
diff --git a/contrib/libs/libmagic/magic/Magdir/wsdl b/contrib/libs/libmagic/magic/Magdir/wsdl
deleted file mode 100644
index 1c9e60aaa2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/wsdl
+++ /dev/null
@@ -1,23 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: wsdl,v 1.6 2021/04/26 15:56:00 christos Exp $
-# wsdl: PHP WSDL Cache, https://www.php.net/manual/en/book.soap.php
-# Cache format extracted from source:
-# https://svn.php.net/viewvc/php/php-src/trunk/ext/soap/php_sdl.c?revision=HEAD&view=markup
-# Requires file >= 5.05
-# By Elan Ruusamae <glen@delfi.ee>, Patryk Zawadzki <patrys@pld-linux.org>, 2010-2011
-0 string wsdl PHP WSDL cache,
->4 byte x version %#02x
->6 ledate x \b, created %s
-
-# uri
->10 lelong <0x7fffffff
->>10 pstring/l x \b, uri: "%s"
-
-# source
->>>&0 lelong <0x7fffffff
->>>>&-4 pstring/l x \b, source: "%s"
-
-# target_ns
->>>>>&0 lelong <0x7fffffff
->>>>>>&-4 pstring/l x \b, target_ns: "%s"
diff --git a/contrib/libs/libmagic/magic/Magdir/x68000 b/contrib/libs/libmagic/magic/Magdir/x68000
deleted file mode 100644
index 927b96dea2..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/x68000
+++ /dev/null
@@ -1,25 +0,0 @@
-#------------------------------------------------------------------------------
-# x68000: file(1) magic for the Sharp Home Computer
-# v1.0
-# Fabio R. Schmidlin <sd-snatcher@users.sourceforge.net>
-
-# Yanagisawa PIC picture
-0 string PIC
->3 search/0x200 \x1A
->>&0 search/0x200 \x0
->>>&0 ubyte 0 Yanagisawa PIC image file,
->>>>&0 ubyte&15 0 model: X68000,
->>>>&0 ubyte&15 1 model: PC-88VA,
->>>>&0 ubyte&15 2 model: FM-TOWNS,
->>>>&0 ubyte&15 3 model: MAC,
->>>>&0 ubyte&15 15 model: Generic,
->>>>&3 ubeshort x %dx
->>>>&5 ubeshort x \b%d,
->>>>&1 ubeshort 4 colors: 16
->>>>&1 ubeshort 8 colors: 256
->>>>&1 ubeshort 12 colors: 4096
->>>>&1 ubeshort 15 colors: 32768
->>>>&1 ubeshort 16 colors: 65536
->>>>&1 ubeshort >16 colors: %d-bit
-
-
diff --git a/contrib/libs/libmagic/magic/Magdir/xdelta b/contrib/libs/libmagic/magic/Magdir/xdelta
deleted file mode 100644
index fde1d26e13..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/xdelta
+++ /dev/null
@@ -1,13 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: xdelta,v 1.5 2011/08/08 09:01:05 christos Exp $
-# file(1) magic(5) data for xdelta Josh MacDonald <jmacd@CS.Berkeley.EDU>
-#
-0 string %XDELTA% XDelta binary patch file 0.14
-0 string %XDZ000% XDelta binary patch file 0.18
-0 string %XDZ001% XDelta binary patch file 0.20
-0 string %XDZ002% XDelta binary patch file 1.0
-0 string %XDZ003% XDelta binary patch file 1.0.4
-0 string %XDZ004% XDelta binary patch file 1.1
-
-0 string \xD6\xC3\xC4\x00 VCDIFF binary diff
diff --git a/contrib/libs/libmagic/magic/Magdir/xenix b/contrib/libs/libmagic/magic/Magdir/xenix
deleted file mode 100644
index fc8027b746..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/xenix
+++ /dev/null
@@ -1,106 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: xenix,v 1.15 2022/10/19 20:15:16 christos Exp $
-# xenix: file(1) magic for Microsoft Xenix
-#
-# "Middle model" stuff, and "Xenix 8086 relocatable or 80286 small
-# model" lifted from "magic.xenix", with comment "derived empirically;
-# treat as folklore until proven"
-#
-# "small model", "large model", "huge model" stuff lifted from XXX
-#
-# XXX - "x.out" collides with PDP-11 archives
-#
-0 string core core file (Xenix)
-# URL: http://www.polarhome.com/service/man/?qf=86rel&tf=2&of=Xenix
-# http://fileformats.archiveteam.org/wiki/OMF
-# Reference: http://www.azillionmonkeys.com/qed/Omfg.pdf
-# Update: Joerg Jenderek
-# recordtype~TranslatorHEADerRecord
-0 byte 0x80
-# GRR: line above is too general as it catches also Extensible storage engine DataBase,
-# all lif files like forth.lif hpcc88.lif lex90b.lif ( See ./lif)
-# and all compressed DEGAS low-res bitmaps like: MUNCHIE.PC1 PIDER1.PC1
-# skip examples like GENA.SND Switch.Snd by looking for record length maximal 1024-3
->1 uleshort <1022
-# skip examples like GAME.PICTURE Strange.Pic by looking for positive record length
->>1 uleshort >0
-# skip examples like Xtable.Data FRACTAL.GEN SHR.VIEW by looking for positive string length
->>>3 ubyte >0
-# skip examples like OMBRE.6 with "UUUUUU" name by looking for valid high second record type
->>>>(1.s+3) ubyte >0x6D
-# skip few Atari DEGAS bitmap TPDEMO.PC2 RECIPE.PC2 with invalid "high" second record type FEh FFh
->>>>>(1.s+3) ubyte <0xF2 8086 relocatable (Microsoft)
-#!:mime application/octet-stream
-!:mime application/x-object
-!:ext obj/o/a
-# T-module name often source name like "hello.c" or "jmppm32.asm" in JMPPM32.OBJ or
-# "kbhit" in KBHITS.OBJ or "CAUSEWAY_KERNAL" in CWAPI.OBJ
->>>>>>3 pstring x \b, "%s"
-# data length probably lower 256 according to TrID obj_omf.trid.xml
->>>>>>1 uleshort x \b, 1st record data length %u
-# checksum
-#>>>>>>(3.b+4) ubyte x \b, checksum %#2.2x
-# second recordtype: 96h~LNAMES 88h~COMENT 8CH~EXTDEF
-# highest F1h~Library End Record
->>>>>>(1.s+3) ubyte x \b, 2nd record type %#x
->>>>>>(1.s+4) uleshort x \b, 2nd record data length %u
-0 leshort 0xff65 x.out
->2 string __.SYMDEF randomized
->0 byte x archive
-0 leshort 0x206 Microsoft a.out
->8 leshort 1 Middle model
->0x1e leshort &0x10 overlay
->0x1e leshort &0x2 separate
->0x1e leshort &0x4 pure
->0x1e leshort &0x800 segmented
->0x1e leshort &0x400 standalone
->0x1e leshort &0x8 fixed-stack
->0x1c byte &0x80 byte-swapped
->0x1c byte &0x40 word-swapped
->0x10 lelong >0 not-stripped
->0x1e leshort ^0xc000 pre-SysV
->0x1e leshort &0x4000 V2.3
->0x1e leshort &0x8000 V3.0
->0x1c byte &0x4 86
->0x1c byte &0xb 186
->0x1c byte &0x9 286
->0x1c byte &0xa 386
->0x1f byte <0x040 small model
->0x1f byte =0x048 large model
->0x1f byte =0x049 huge model
->0x1e leshort &0x1 executable
->0x1e leshort ^0x1 object file
->0x1e leshort &0x40 Large Text
->0x1e leshort &0x20 Large Data
->0x1e leshort &0x120 Huge Objects Enabled
->0x10 lelong >0 not stripped
-
-0 leshort 0x140 old Microsoft 8086 x.out
->0x3 byte &0x4 separate
->0x3 byte &0x2 pure
->0 byte &0x1 executable
->0 byte ^0x1 relocatable
->0x14 lelong >0 not stripped
-
-0 lelong 0x206 b.out
->0x1e leshort &0x10 overlay
->0x1e leshort &0x2 separate
->0x1e leshort &0x4 pure
->0x1e leshort &0x800 segmented
->0x1e leshort &0x400 standalone
->0x1e leshort &0x1 executable
->0x1e leshort ^0x1 object file
->0x1e leshort &0x4000 V2.3
->0x1e leshort &0x8000 V3.0
->0x1c byte &0x4 86
->0x1c byte &0xb 186
->0x1c byte &0x9 286
->0x1c byte &0x29 286
->0x1c byte &0xa 386
->0x1e leshort &0x4 Large Text
->0x1e leshort &0x2 Large Data
->0x1e leshort &0x102 Huge Objects Enabled
-
-0 leshort 0x580 XENIX 8086 relocatable or 80286 small model
-# GRR: line above is too general as it catches also all 8086 relocatable (Microsoft) with 1st record data length 5 C0M.OBJ C0T.OBJ C0S.OBJ
diff --git a/contrib/libs/libmagic/magic/Magdir/xilinx b/contrib/libs/libmagic/magic/Magdir/xilinx
deleted file mode 100644
index fd1467813c..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/xilinx
+++ /dev/null
@@ -1,58 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: xilinx,v 1.10 2022/12/18 14:59:32 christos Exp $
-# This is Aaron's attempt at a MAGIC file for Xilinx .bit files.
-# Xilinx-Magic@RevRagnarok.com
-# Got the info from FPGA-FAQ 0026
-#
-# Rewritten to use pstring/H instead of hardcoded lengths by O. Freyermuth,
-# fixes at least reading of bitfiles from Spartan 2, 3, 6.
-# http://www.fpga-faq.com/FAQ_Pages/0026_Tell_me_about_bit_files.htm
-#
-# First there is the sync header and its length
-0 beshort 0x0009
->2 belong =0x0ff00ff0
->>&0 belong =0x0ff00ff0
->>>&0 byte =0x00
->>>&1 beshort =0x0001
->>>&3 string a Xilinx BIT data
-# Next is a Pascal-style string with the NCD name. We want to capture that.
->>>>&0 pstring/H x - from %s
-# And then 'b'
->>>>>&1 string b
-# Then the model / part number:
->>>>>>&0 pstring/H x - for %s
-# Then 'c'
->>>>>>>&1 string c
-# Then the build-date
->>>>>>>>&0 pstring/H x - built %s
-# Then 'd'
->>>>>>>>>&1 string d
-# Then the build-time
->>>>>>>>>>&0 pstring/H x \b(%s)
-# Then 'e'
->>>>>>>>>>>&1 string e
-# And length of data
->>>>>>>>>>>>&0 belong x - data length %#x
-
-# Raw bitstream files
-0 long 0xffffffff
->&0 belong 0xaa995566 Xilinx RAW bitstream (.BIN)
-
-# AXLF (xclbin) files used by AMD/Xilinx accelerators.
-# The file format is defined by XRT source tree:
-# https://github.com/Xilinx/XRT/blob/master/src/runtime_src/core/include/xclbin.h
-# Display file size, creation date, accelerator shell name, xclbin uuid and
-# number of sections.
-
-0 string xclbin2 AMD/Xilinx accelerator AXLF (xclbin) file
->0x130 lequad x \b, %lld bytes
->0x138 leqdate x \b, created %s
->0x160 string >0 \b, shell "%.64s"
->0x1a0 ubelong x \b, uuid %08x
->0x1a4 ubeshort x \b-%04x
->0x1a6 ubeshort x \b-%04x
->0x1a8 ubeshort x \b-%04x
->0x1aa ubelong x \b-%08x
->0x1ae ubeshort x \b%04x
->0x1c0 lelong x \b, %d sections \ No newline at end of file
diff --git a/contrib/libs/libmagic/magic/Magdir/xo65 b/contrib/libs/libmagic/magic/Magdir/xo65
deleted file mode 100644
index f7b555f59f..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/xo65
+++ /dev/null
@@ -1,37 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: xo65,v 1.5 2022/07/17 15:36:20 christos Exp $
-# https://cc65.github.io/doc/sim65.html
-# xo65 object files
-# From: "Ullrich von Bassewitz" <uz@cc65.org>
-#
-0 string \x55\x7A\x6E\x61 xo65 object,
->4 leshort x version %d,
->6 leshort&0x0001 =0x0001 with debug info
->6 leshort&0x0001 =0x0000 no debug info
-
-# xo65 library files
-0 string \x6E\x61\x55\x7A xo65 library,
->4 leshort x version %d
-
-# o65 object files
-0 string \x01\x00\x6F\x36\x35 o65
->6 leshort&0x1000 =0x0000 executable,
->6 leshort&0x1000 =0x1000 object,
->5 byte x version %d,
->6 leshort&0x8000 =0x8000 65816,
->6 leshort&0x8000 =0x0000 6502,
->6 leshort&0x2000 =0x2000 32 bit,
->6 leshort&0x2000 =0x0000 16 bit,
->6 leshort&0x4000 =0x4000 page reloc,
->6 leshort&0x4000 =0x0000 byte reloc,
->6 leshort&0x0003 =0x0000 alignment 1
->6 leshort&0x0003 =0x0001 alignment 2
->6 leshort&0x0003 =0x0002 alignment 4
->6 leshort&0x0003 =0x0003 alignment 256
-
-# sim65 executable files
-0 string \x73\x69\x6d\x36\x35 sim65 executable,
->5 byte x version %d,
->6 leshort&0x0000 =0x0000 6502
->6 leshort&0x0001 =0x0001 65C02
diff --git a/contrib/libs/libmagic/magic/Magdir/xwindows b/contrib/libs/libmagic/magic/Magdir/xwindows
deleted file mode 100644
index d8c08c8702..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/xwindows
+++ /dev/null
@@ -1,43 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: xwindows,v 1.13 2022/03/24 15:48:58 christos Exp $
-# xwindows: file(1) magic for various X/Window system file formats.
-
-# Compiled X Keymap
-# XKM (compiled X keymap) files (including version and byte ordering)
-1 string mkx Compiled XKB Keymap: lsb,
->0 byte >0 version %d
->0 byte =0 obsolete
-0 string xkm Compiled XKB Keymap: msb,
->3 byte >0 version %d
->3 byte =0 obsolete
-
-# xfsdump archive
-0 string xFSdump0 xfsdump archive
->8 belong x (version %d)
-
-# Jaleo XFS files
-0 long 395726 Jaleo XFS file
->4 long x - version %d
->8 long x - [%d -
->20 long x \b%dx
->24 long x \b%dx
->28 long 1008 \bYUV422]
->28 long 1000 \bRGB24]
-
-# Xcursor data
-# X11 mouse cursor format defined in libXcursor, see
-# https://www.x.org/archive/X11R6.8.1/doc/Xcursor.3.html
-# https://cgit.freedesktop.org/xorg/lib/libXcursor/tree/include/X11/Xcursor/Xcursor.h
-0 string Xcur Xcursor data
-!:mime image/x-xcursor
->10 leshort x version %d
->>8 leshort x \b.%d
-
-# X bitmap https://en.wikipedia.org/wiki/X_BitMap
-0 search/2048 #define\040
->&0 regex [a-zA-Z0-9]+_width\040 xbm image
->>&0 regex [0-9]+ (%sx
->>>&0 string \n#define\040
->>>>&0 regex [a-zA-Z0-9]+_height\040
->>>>>&0 regex [0-9]+ \b%s)
diff --git a/contrib/libs/libmagic/magic/Magdir/yara b/contrib/libs/libmagic/magic/Magdir/yara
deleted file mode 100644
index 6156cc63bc..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/yara
+++ /dev/null
@@ -1,17 +0,0 @@
-
-
-#------------------------------------------------------------------------------
-# $File: yara,v 1.4 2021/04/26 15:56:00 christos Exp $
-# yara: file(1) magic for https://virustotal.github.io/yara/
-#
-
-0 string YARA
->4 lelong >2047
->8 byte <20 YARA 3.x compiled rule set
-# version
->>8 clear x
->>8 byte 6 created with version 3.3.0
->>8 byte 8 created with version 3.4.0
->>8 byte 11 created with version 3.5.0
->>8 default x
->>>8 byte x development version %#02x
diff --git a/contrib/libs/libmagic/magic/Magdir/zfs b/contrib/libs/libmagic/magic/Magdir/zfs
deleted file mode 100644
index 5cb0fdd180..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/zfs
+++ /dev/null
@@ -1,96 +0,0 @@
-#------------------------------------------------------------------------------
-# zfs: file(1) magic for ZFS dumps
-#
-# From <rea-fbsd@codelabs.ru>
-# ZFS dump header has the following structure (as per zfs_ioctl.h
-# in FreeBSD with drr_type is set to DRR_BEGIN)
-#
-# enum {
-# DRR_BEGIN, DRR_OBJECT, DRR_FREEOBJECTS,
-# DRR_WRITE, DRR_FREE, DRR_END,
-# } drr_type;
-# uint32_t drr_pad;
-# uint64_t drr_magic;
-# uint64_t drr_version;
-# uint64_t drr_creation_time;
-# dmu_objset_type_t drr_type;
-# uint32_t drr_pad;
-# uint64_t drr_toguid;
-# uint64_t drr_fromguid;
-# char drr_toname[MAXNAMELEN];
-#
-# Backup magic is 0x00000002f5bacbac (quad word)
-# The drr_type is defined as
-# typedef enum dmu_objset_type {
-# DMU_OST_NONE,
-# DMU_OST_META,
-# DMU_OST_ZFS,
-# DMU_OST_ZVOL,
-# DMU_OST_OTHER, /* For testing only! */
-# DMU_OST_ANY, /* Be careful! */
-# DMU_OST_NUMTYPES
-# } dmu_objset_type_t;
-#
-# Almost all uint64_t fields are printed as the 32-bit ones (with high
-# 32 bits zeroed), because there is no simple way to print them as the
-# full 64-bit values.
-
-# Big-endian values
-8 string \000\000\000\002\365\272\313\254 ZFS snapshot (big-endian machine),
->20 belong x version %u,
->32 belong 0 type: NONE,
->32 belong 1 type: META,
->32 belong 2 type: ZFS,
->32 belong 3 type: ZVOL,
->32 belong 4 type: OTHER,
->32 belong 5 type: ANY,
->32 belong >5 type: UNKNOWN (%u),
->40 byte x destination GUID: %02X
->41 byte x %02X
->42 byte x %02X
->43 byte x %02X
->44 byte x %02X
->45 byte x %02X
->46 byte x %02X
->47 byte x %02X,
->48 ulong >0
->>52 ulong >0
->>>48 byte x source GUID: %02X
->>>49 byte x %02X
->>>50 byte x %02X
->>>51 byte x %02X
->>>52 byte x %02X
->>>53 byte x %02X
->>>54 byte x %02X
->>>55 byte x %02X,
->56 string >\0 name: '%s'
-
-# Little-endian values
-8 string \254\313\272\365\002\000\000\000 ZFS snapshot (little-endian machine),
->16 lelong x version %u,
->32 lelong 0 type: NONE,
->32 lelong 1 type: META,
->32 lelong 2 type: ZFS,
->32 lelong 3 type: ZVOL,
->32 lelong 4 type: OTHER,
->32 lelong 5 type: ANY,
->32 lelong >5 type: UNKNOWN (%u),
->47 byte x destination GUID: %02X
->46 byte x %02X
->45 byte x %02X
->44 byte x %02X
->43 byte x %02X
->42 byte x %02X
->41 byte x %02X
->40 byte x %02X,
->48 ulong >0
->>52 ulong >0
->>>55 byte x source GUID: %02X
->>>54 byte x %02X
->>>53 byte x %02X
->>>52 byte x %02X
->>>51 byte x %02X
->>>50 byte x %02X
->>>49 byte x %02X
->>>48 byte x %02X,
->56 string >\0 name: '%s'
diff --git a/contrib/libs/libmagic/magic/Magdir/zilog b/contrib/libs/libmagic/magic/Magdir/zilog
deleted file mode 100644
index 1c861fb283..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/zilog
+++ /dev/null
@@ -1,12 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: zilog,v 1.7 2009/09/19 16:28:13 christos Exp $
-# zilog: file(1) magic for Zilog Z8000.
-#
-# Was it big-endian or little-endian? My Product Specification doesn't
-# say.
-#
-0 long 0xe807 object file (z8000 a.out)
-0 long 0xe808 pure object file (z8000 a.out)
-0 long 0xe809 separate object file (z8000 a.out)
-0 long 0xe805 overlay object file (z8000 a.out)
diff --git a/contrib/libs/libmagic/magic/Magdir/zip b/contrib/libs/libmagic/magic/Magdir/zip
deleted file mode 100644
index abf5284776..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/zip
+++ /dev/null
@@ -1,126 +0,0 @@
-#------------------------------------------------------------------------------
-# $File: zip,v 1.8 2021/10/24 15:53:56 christos Exp $
-# zip: file(1) magic for zip files; this is not use
-# Note the version of magic in archive is currently stronger, this is
-# just an example until negative offsets are supported better
-# Note: All fields unless otherwise noted are unsigned!
-
-# Zip Central Directory record
-0 name zipcd
->0 string PK\001\002 Zip archive data
-!:mime application/zip
-# no "made by" in local file header with PK\3\4 magic
->>4 leshort x \b, made by
->>4 use zipversion
->>4 use ziphost
-# inside ./archive 1.151 called "at least" zipversion "to extract"
->>6 leshort x \b, extract using at least
->>6 use zipversion
-# This is DOS date like: ledate 21:00:48 19 Dec 2001 != DOS 00:00 1 Jan 2010 ~ 0000213C
->>12 ulelong x \b, last modified
->>14 lemsdosdate x \b, last modified %s
->>12 lemsdostime x %s
-# uncompressed size of 1st entry; FFffFFff means real value stored in ZIP64 record
->>24 ulelong !0xFFffFFff \b, uncompressed size %u
-# inside ./archive 1.151 called "compression method="zipcompression
->>10 leshort x \b, method=
->>10 use zipcompression
-
-# URL: https://en.wikipedia.org/wiki/Zip_(file_format)
-# reference: https://pkware.cachefly.net/webdocs/casestudies/APPNOTE.TXT (Version: 6.3.9)
-# Zip known compressions
-0 name zipcompression
->0 leshort 0 \bstore
->0 leshort 1 \bShrinking
->0 leshort 6 \bImploding
->0 leshort 7 \bTokenizing
->0 leshort 8 \bdeflate
->0 leshort 9 \bdeflate64
->0 leshort 10 \bLibrary imploding
-#>0 leshort 11 \bReserved by PKWARE
->0 leshort 12 \bbzip2
-#>0 leshort 13 \bReserved by PKWARE
->0 leshort 14 \blzma
-#>0 leshort 15 \bReserved by PKWARE
->0 leshort 16 \bCMPSC (IBM z/OS)
-#>0 leshort 17 \bReserved by PKWARE
->0 leshort 18 \bIBM TERSE
->0 leshort 19 \bIBM LZ77 (z/Architecture)
->0 leshort 20 \bZstd (deprecated)
->0 leshort 93 \bZstd
->0 leshort 94 \bMP3
->0 leshort 95 \bxz
->0 leshort 96 \bJpeg
->0 leshort 97 \bWavPack
->0 leshort 98 \bPPMd
->0 leshort 99 \bAES Encrypted
->0 default x
->>0 leshort x \b[%#x]
-
-# Zip known versions
-0 name zipversion
-# The lower byte indicates the ZIP version of this file. The value/10 indicates
-# the major version number, and the value mod 10 is the minor version number.
->0 ubyte/10 x v%u
->0 ubyte%10 x \b.%u
-# >0 leshort 0x09 v0.9
-# >0 leshort 0x0a v1.0
-# >0 leshort 0x0b v1.1
-# >0 leshort 0x14 v2.0
-# >0 leshort 0x15 v2.1
-# >0 leshort 0x19 v2.5
-# >0 leshort 0x1b v2.7
-# >0 leshort 0x2d v4.5
-# >0 leshort 0x2e v4.6
-# >0 leshort 0x32 v5.0
-# >0 leshort 0x33 v5.1
-# >0 leshort 0x34 v5.2
-# >0 leshort 0x3d v6.1
-# >0 leshort 0x3e v6.2
-# >0 leshort 0x3f v6.3
-# >0 default x
-# >>0 leshort x v?[%#x]
-
-# display compatible host system name of ZIP archive
-0 name ziphost
-# The upper byte indicates the compatibility of the file attribute information.
-# If the file is compatible with MS-DOS (v 2.04g) then this value will be zero.
-#>1 ubyte 0 DOS
->1 ubyte 1 Amiga
->1 ubyte 2 OpenVMS
->1 ubyte 3 UNIX
->1 ubyte 4 VM/CMS
->1 ubyte 6 OS/2
->1 ubyte 7 Macintosh
->1 ubyte 11 MVS
->1 ubyte 13 Acorn Risc
->1 ubyte 16 BeOS
->1 ubyte 17 Tandem
-# 9 untested
->1 ubyte 5 Atari ST
->1 ubyte 8 Z-System
->1 ubyte 9 CP/M
->1 ubyte 10 Windows NTFS
->1 ubyte 12 VSE
->1 ubyte 14 VFAT
->1 ubyte 15 alternate MVS
->1 ubyte 18 OS/400
->1 ubyte 19 OS X
-# unused
-#>1 ubyte >19 unused %#x
-
-# Zip End Of Central Directory record
-# GRR: wrong for ZIP with comment archive
--22 string PK\005\006
-#>4 uleshort !0xFFff \b, %u disks
-#>6 uleshort !0xFFff \b, central directory disk %u
-#>8 uleshort !0xFFff \b, %u central directories on this disk
-#>10 uleshort !0xFFff \b, %u central directories
-#>12 ulelong !0xFFffFFff \b, %u central directory bytes
-# offset of central directory
-#>16 ulelong x \b, central directory offset %#x
->(16.l) use zipcd
-# archive comment length n
-#>>20 uleshort >0 \b, comment length %u
-# archive comment
->>20 pstring/l >0 \b, %s
diff --git a/contrib/libs/libmagic/magic/Magdir/zyxel b/contrib/libs/libmagic/magic/Magdir/zyxel
deleted file mode 100644
index d3a43e4878..0000000000
--- a/contrib/libs/libmagic/magic/Magdir/zyxel
+++ /dev/null
@@ -1,17 +0,0 @@
-
-#------------------------------------------------------------------------------
-# $File: zyxel,v 1.6 2009/09/19 16:28:13 christos Exp $
-# zyxel: file(1) magic for ZyXEL modems
-#
-# From <rob@pe1chl.ampr.org>
-# These are the /etc/magic entries to decode datafiles as used for the
-# ZyXEL U-1496E DATA/FAX/VOICE modems. (This header conforms to a
-# ZyXEL-defined standard)
-
-0 string ZyXEL\002 ZyXEL voice data
->10 byte 0 - CELP encoding
->10 byte&0x0B 1 - ADPCM2 encoding
->10 byte&0x0B 2 - ADPCM3 encoding
->10 byte&0x0B 3 - ADPCM4 encoding
->10 byte&0x0B 8 - New ADPCM3 encoding
->10 byte&0x04 4 with resync
diff --git a/contrib/libs/libmagic/magic/ya.make b/contrib/libs/libmagic/magic/ya.make
deleted file mode 100644
index c8fe646fd4..0000000000
--- a/contrib/libs/libmagic/magic/ya.make
+++ /dev/null
@@ -1,44 +0,0 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-SRCDIR(contrib/libs/libmagic/magic/Magdir)
-
-RESOURCE(
- Magdir.mgc /magic/magic.mgc
-)
-
-RUN_PROGRAM(
- contrib/libs/libmagic/file/0 -C -m $CURDIR/Magdir
- CWD $BINDIR
- OUT Magdir.mgc
- IN acorn adi adventure aes algol68 allegro alliant amanda amigaos android animation aout apache apl apple
- application applix apt archive aria arm asf assembler asterix att3b audio avm basis beetle ber bflt
- bhl bioinformatics biosig blackberry blcr blender blit bm bout bsdi bsi btsnoop burp bytecode c-lang
- c64 cad cafebabe cbor ccf cddb chord cisco citrus clarion claris clipper clojure coff commands
- communications compress console convex coverage cracklib crypto ctags ctf cubemap cups dact database
- dataone dbpf der diamond dif diff digital dolby dump dwarfs dyadic ebml edid editors efi elf encore
- epoc erlang espressif esri etf fcs filesystems finger firmware flash flif fonts forth fortran frame
- freebsd fsav fusecompress games gcc gconv gentoo geo geos gimp git glibc gnome gnu gnumeric gpt gpu
- grace graphviz gringotts hardware hitachi-sh hp human68k ibm370 ibm6000 icc iff images inform intel
- interleaf island ispell isz java javascript jpeg karma kde keepass kerberos kicad kml lammps lecter
- lex lif linux lisp llvm locoscript lua luks m4 mach macintosh macos magic mail.news make map maple
- marc21 mathcad mathematica matroska mcrypt measure mercurial metastore meteorological microfocus mime
- mips mirage misctools mkid mlssa mmdf modem modulefile motorola mozilla msdos msooxml msvc msx mup
- music nasa natinst ncr netbsd netscape netware news nifty nim-lang nitpicker numpy oasis ocaml octave
- ole2compounddocs olf openfst opentimestamps oric os2 os400 os9 osf1 palm parix parrot pascal pbf pbm
- pc88 pc98 pci_ids pcjr pdf pdp perl pgf pgp pgp-binary-keys pkgadd plan9 playdate plus5 pmem polyml
- printer project psdbms psl pulsar puzzle pwsafe pyramid python qt revision riff ringdove rpi rpm rpmsg
- rst rtf ruby rust sc sccs scientific securitycerts selinux sendmail sequent sereal sgi sgml sharc
- sinclair sisu sketch smalltalk smile sniffer softquad sosi spec spectrum sql ssh ssl statistics
- subtitle sun svf sylk symbos sysex tcl teapot terminfo tex tgif ti-8x timezone tplink troff tuxedo
- typeset uf2 unicode unisig unknown usd uterus uuencode vacuum-cleaner varied.out varied.script vax
- vicar virtual virtutech visx vms vmware vorbis vxl warc weak web webassembly windows wireless
- wordprocessors wsdl x68000 xdelta xenix xilinx xo65 xwindows yara zfs zilog zip zyxel
-)
-
-END()
diff --git a/contrib/libs/libmagic/src/apprentice.c b/contrib/libs/libmagic/src/apprentice.c
deleted file mode 100644
index ca22186695..0000000000
--- a/contrib/libs/libmagic/src/apprentice.c
+++ /dev/null
@@ -1,3743 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * apprentice - make one pass through /etc/magic, learning its secrets.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: apprentice.c,v 1.342 2023/07/17 14:38:35 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <stdlib.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <stddef.h>
-#include <string.h>
-#include <assert.h>
-#include <ctype.h>
-#include <fcntl.h>
-#ifdef QUICK
-#include <sys/mman.h>
-#endif
-#include <dirent.h>
-#include <limits.h>
-#ifdef HAVE_BYTESWAP_H
-#include <byteswap.h>
-#endif
-#ifdef HAVE_SYS_BSWAP_H
-#error #include <sys/bswap.h>
-#endif
-
-
-#define EATAB {while (isascii(CAST(unsigned char, *l)) && \
- isspace(CAST(unsigned char, *l))) ++l;}
-#define LOWCASE(l) (isupper(CAST(unsigned char, l)) ? \
- tolower(CAST(unsigned char, l)) : (l))
-/*
- * Work around a bug in headers on Digital Unix.
- * At least confirmed for: OSF1 V4.0 878
- */
-#if defined(__osf__) && defined(__DECC)
-#ifdef MAP_FAILED
-#undef MAP_FAILED
-#endif
-#endif
-
-#ifndef MAP_FAILED
-#define MAP_FAILED (void *) -1
-#endif
-
-#ifndef MAP_FILE
-#define MAP_FILE 0
-#endif
-
-#define ALLOC_CHUNK CAST(size_t, 10)
-#define ALLOC_INCR CAST(size_t, 200)
-
-#define MAP_TYPE_USER 0
-#define MAP_TYPE_MALLOC 1
-#define MAP_TYPE_MMAP 2
-
-struct magic_entry {
- struct magic *mp;
- uint32_t cont_count;
- uint32_t max_count;
-};
-
-struct magic_entry_set {
- struct magic_entry *me;
- uint32_t count;
- uint32_t max;
-};
-
-struct magic_map {
- void *p;
- size_t len;
- int type;
- struct magic *magic[MAGIC_SETS];
- uint32_t nmagic[MAGIC_SETS];
-};
-
-int file_formats[FILE_NAMES_SIZE];
-const size_t file_nformats = FILE_NAMES_SIZE;
-const char *file_names[FILE_NAMES_SIZE];
-const size_t file_nnames = FILE_NAMES_SIZE;
-
-file_private int getvalue(struct magic_set *ms, struct magic *, const char **, int);
-file_private int hextoint(int);
-file_private const char *getstr(struct magic_set *, struct magic *, const char *,
- int);
-file_private int parse(struct magic_set *, struct magic_entry *, const char *,
- size_t, int);
-file_private void eatsize(const char **);
-file_private int apprentice_1(struct magic_set *, const char *, int);
-file_private ssize_t apprentice_magic_strength_1(const struct magic *);
-file_private int apprentice_sort(const void *, const void *);
-file_private void apprentice_list(struct mlist *, int );
-file_private struct magic_map *apprentice_load(struct magic_set *,
- const char *, int);
-file_private struct mlist *mlist_alloc(void);
-file_private void mlist_free_all(struct magic_set *);
-file_private void mlist_free(struct mlist *);
-file_private void byteswap(struct magic *, uint32_t);
-file_private void bs1(struct magic *);
-
-#if defined(HAVE_BYTESWAP_H)
-#define swap2(x) bswap_16(x)
-#define swap4(x) bswap_32(x)
-#define swap8(x) bswap_64(x)
-#elif defined(HAVE_SYS_BSWAP_H)
-#define swap2(x) bswap16(x)
-#define swap4(x) bswap32(x)
-#define swap8(x) bswap64(x)
-#else
-file_private uint16_t swap2(uint16_t);
-file_private uint32_t swap4(uint32_t);
-file_private uint64_t swap8(uint64_t);
-#endif
-
-file_private char *mkdbname(struct magic_set *, const char *, int);
-file_private struct magic_map *apprentice_buf(struct magic_set *, struct magic *,
- size_t);
-file_private struct magic_map *apprentice_map(struct magic_set *, const char *);
-file_private int check_buffer(struct magic_set *, struct magic_map *, const char *);
-file_private void apprentice_unmap(struct magic_map *);
-file_private int apprentice_compile(struct magic_set *, struct magic_map *,
- const char *);
-file_private int check_format_type(const char *, int, const char **);
-file_private int check_format(struct magic_set *, struct magic *);
-file_private int get_op(char);
-file_private int parse_mime(struct magic_set *, struct magic_entry *, const char *,
- size_t);
-file_private int parse_strength(struct magic_set *, struct magic_entry *,
- const char *, size_t);
-file_private int parse_apple(struct magic_set *, struct magic_entry *, const char *,
- size_t);
-file_private int parse_ext(struct magic_set *, struct magic_entry *, const char *,
- size_t);
-
-
-file_private size_t magicsize = sizeof(struct magic);
-
-file_private const char usg_hdr[] = "cont\toffset\ttype\topcode\tmask\tvalue\tdesc";
-
-file_private struct {
- const char *name;
- size_t len;
- int (*fun)(struct magic_set *, struct magic_entry *, const char *,
- size_t);
-} bang[] = {
-#define DECLARE_FIELD(name) { # name, sizeof(# name) - 1, parse_ ## name }
- DECLARE_FIELD(mime),
- DECLARE_FIELD(apple),
- DECLARE_FIELD(ext),
- DECLARE_FIELD(strength),
-#undef DECLARE_FIELD
- { NULL, 0, NULL }
-};
-
-#ifdef COMPILE_ONLY
-
-int main(int, char *[]);
-
-int
-main(int argc, char *argv[])
-{
- int ret;
- struct magic_set *ms;
- char *progname;
-
- if ((progname = strrchr(argv[0], '/')) != NULL)
- progname++;
- else
- progname = argv[0];
-
- if (argc != 2) {
- (void)fprintf(stderr, "Usage: %s file\n", progname);
- return 1;
- }
-
- if ((ms = magic_open(MAGIC_CHECK)) == NULL) {
- (void)fprintf(stderr, "%s: %s\n", progname, strerror(errno));
- return 1;
- }
- ret = magic_compile(ms, argv[1]) == -1 ? 1 : 0;
- if (ret == 1)
- (void)fprintf(stderr, "%s: %s\n", progname, magic_error(ms));
- magic_close(ms);
- return ret;
-}
-#endif /* COMPILE_ONLY */
-
-struct type_tbl_s {
- const char name[16];
- const size_t len;
- const int type;
- const int format;
-};
-
-/*
- * XXX - the actual Single UNIX Specification says that "long" means "long",
- * as in the C data type, but we treat it as meaning "4-byte integer".
- * Given that the OS X version of file 5.04 did the same, I guess that passes
- * the actual test; having "long" be dependent on how big a "long" is on
- * the machine running "file" is silly.
- */
-static const struct type_tbl_s type_tbl[] = {
-# define XX(s) s, (sizeof(s) - 1)
-# define XX_NULL "", 0
- { XX("invalid"), FILE_INVALID, FILE_FMT_NONE },
- { XX("byte"), FILE_BYTE, FILE_FMT_NUM },
- { XX("short"), FILE_SHORT, FILE_FMT_NUM },
- { XX("default"), FILE_DEFAULT, FILE_FMT_NONE },
- { XX("long"), FILE_LONG, FILE_FMT_NUM },
- { XX("string"), FILE_STRING, FILE_FMT_STR },
- { XX("date"), FILE_DATE, FILE_FMT_STR },
- { XX("beshort"), FILE_BESHORT, FILE_FMT_NUM },
- { XX("belong"), FILE_BELONG, FILE_FMT_NUM },
- { XX("bedate"), FILE_BEDATE, FILE_FMT_STR },
- { XX("leshort"), FILE_LESHORT, FILE_FMT_NUM },
- { XX("lelong"), FILE_LELONG, FILE_FMT_NUM },
- { XX("ledate"), FILE_LEDATE, FILE_FMT_STR },
- { XX("pstring"), FILE_PSTRING, FILE_FMT_STR },
- { XX("ldate"), FILE_LDATE, FILE_FMT_STR },
- { XX("beldate"), FILE_BELDATE, FILE_FMT_STR },
- { XX("leldate"), FILE_LELDATE, FILE_FMT_STR },
- { XX("regex"), FILE_REGEX, FILE_FMT_STR },
- { XX("bestring16"), FILE_BESTRING16, FILE_FMT_STR },
- { XX("lestring16"), FILE_LESTRING16, FILE_FMT_STR },
- { XX("search"), FILE_SEARCH, FILE_FMT_STR },
- { XX("medate"), FILE_MEDATE, FILE_FMT_STR },
- { XX("meldate"), FILE_MELDATE, FILE_FMT_STR },
- { XX("melong"), FILE_MELONG, FILE_FMT_NUM },
- { XX("quad"), FILE_QUAD, FILE_FMT_QUAD },
- { XX("lequad"), FILE_LEQUAD, FILE_FMT_QUAD },
- { XX("bequad"), FILE_BEQUAD, FILE_FMT_QUAD },
- { XX("qdate"), FILE_QDATE, FILE_FMT_STR },
- { XX("leqdate"), FILE_LEQDATE, FILE_FMT_STR },
- { XX("beqdate"), FILE_BEQDATE, FILE_FMT_STR },
- { XX("qldate"), FILE_QLDATE, FILE_FMT_STR },
- { XX("leqldate"), FILE_LEQLDATE, FILE_FMT_STR },
- { XX("beqldate"), FILE_BEQLDATE, FILE_FMT_STR },
- { XX("float"), FILE_FLOAT, FILE_FMT_FLOAT },
- { XX("befloat"), FILE_BEFLOAT, FILE_FMT_FLOAT },
- { XX("lefloat"), FILE_LEFLOAT, FILE_FMT_FLOAT },
- { XX("double"), FILE_DOUBLE, FILE_FMT_DOUBLE },
- { XX("bedouble"), FILE_BEDOUBLE, FILE_FMT_DOUBLE },
- { XX("ledouble"), FILE_LEDOUBLE, FILE_FMT_DOUBLE },
- { XX("leid3"), FILE_LEID3, FILE_FMT_NUM },
- { XX("beid3"), FILE_BEID3, FILE_FMT_NUM },
- { XX("indirect"), FILE_INDIRECT, FILE_FMT_NUM },
- { XX("qwdate"), FILE_QWDATE, FILE_FMT_STR },
- { XX("leqwdate"), FILE_LEQWDATE, FILE_FMT_STR },
- { XX("beqwdate"), FILE_BEQWDATE, FILE_FMT_STR },
- { XX("name"), FILE_NAME, FILE_FMT_NONE },
- { XX("use"), FILE_USE, FILE_FMT_NONE },
- { XX("clear"), FILE_CLEAR, FILE_FMT_NONE },
- { XX("der"), FILE_DER, FILE_FMT_STR },
- { XX("guid"), FILE_GUID, FILE_FMT_STR },
- { XX("offset"), FILE_OFFSET, FILE_FMT_QUAD },
- { XX("bevarint"), FILE_BEVARINT, FILE_FMT_STR },
- { XX("levarint"), FILE_LEVARINT, FILE_FMT_STR },
- { XX("msdosdate"), FILE_MSDOSDATE, FILE_FMT_STR },
- { XX("lemsdosdate"), FILE_LEMSDOSDATE, FILE_FMT_STR },
- { XX("bemsdosdate"), FILE_BEMSDOSDATE, FILE_FMT_STR },
- { XX("msdostime"), FILE_MSDOSTIME, FILE_FMT_STR },
- { XX("lemsdostime"), FILE_LEMSDOSTIME, FILE_FMT_STR },
- { XX("bemsdostime"), FILE_BEMSDOSTIME, FILE_FMT_STR },
- { XX("octal"), FILE_OCTAL, FILE_FMT_STR },
- { XX_NULL, FILE_INVALID, FILE_FMT_NONE },
-};
-
-/*
- * These are not types, and cannot be preceded by "u" to make them
- * unsigned.
- */
-static const struct type_tbl_s special_tbl[] = {
- { XX("der"), FILE_DER, FILE_FMT_STR },
- { XX("name"), FILE_NAME, FILE_FMT_STR },
- { XX("use"), FILE_USE, FILE_FMT_STR },
- { XX("octal"), FILE_OCTAL, FILE_FMT_STR },
- { XX_NULL, FILE_INVALID, FILE_FMT_NONE },
-};
-# undef XX
-# undef XX_NULL
-
-file_private int
-get_type(const struct type_tbl_s *tbl, const char *l, const char **t)
-{
- const struct type_tbl_s *p;
-
- for (p = tbl; p->len; p++) {
- if (strncmp(l, p->name, p->len) == 0) {
- if (t)
- *t = l + p->len;
- break;
- }
- }
- return p->type;
-}
-
-file_private off_t
-maxoff_t(void) {
- if (/*CONSTCOND*/sizeof(off_t) == sizeof(int))
- return CAST(off_t, INT_MAX);
- if (/*CONSTCOND*/sizeof(off_t) == sizeof(long))
- return CAST(off_t, LONG_MAX);
- return 0x7fffffff;
-}
-
-file_private int
-get_standard_integer_type(const char *l, const char **t)
-{
- int type;
-
- if (isalpha(CAST(unsigned char, l[1]))) {
- switch (l[1]) {
- case 'C':
- /* "dC" and "uC" */
- type = FILE_BYTE;
- break;
- case 'S':
- /* "dS" and "uS" */
- type = FILE_SHORT;
- break;
- case 'I':
- case 'L':
- /*
- * "dI", "dL", "uI", and "uL".
- *
- * XXX - the actual Single UNIX Specification says
- * that "L" means "long", as in the C data type,
- * but we treat it as meaning "4-byte integer".
- * Given that the OS X version of file 5.04 did
- * the same, I guess that passes the actual SUS
- * validation suite; having "dL" be dependent on
- * how big a "long" is on the machine running
- * "file" is silly.
- */
- type = FILE_LONG;
- break;
- case 'Q':
- /* "dQ" and "uQ" */
- type = FILE_QUAD;
- break;
- default:
- /* "d{anything else}", "u{anything else}" */
- return FILE_INVALID;
- }
- l += 2;
- } else if (isdigit(CAST(unsigned char, l[1]))) {
- /*
- * "d{num}" and "u{num}"; we only support {num} values
- * of 1, 2, 4, and 8 - the Single UNIX Specification
- * doesn't say anything about whether arbitrary
- * values should be supported, but both the Solaris 10
- * and OS X Mountain Lion versions of file passed the
- * Single UNIX Specification validation suite, and
- * neither of them support values bigger than 8 or
- * non-power-of-2 values.
- */
- if (isdigit(CAST(unsigned char, l[2]))) {
- /* Multi-digit, so > 9 */
- return FILE_INVALID;
- }
- switch (l[1]) {
- case '1':
- type = FILE_BYTE;
- break;
- case '2':
- type = FILE_SHORT;
- break;
- case '4':
- type = FILE_LONG;
- break;
- case '8':
- type = FILE_QUAD;
- break;
- default:
- /* XXX - what about 3, 5, 6, or 7? */
- return FILE_INVALID;
- }
- l += 2;
- } else {
- /*
- * "d" or "u" by itself.
- */
- type = FILE_LONG;
- ++l;
- }
- if (t)
- *t = l;
- return type;
-}
-
-file_private void
-init_file_tables(void)
-{
- static int done = 0;
- const struct type_tbl_s *p;
-
- if (done)
- return;
- done++;
-
- for (p = type_tbl; p->len; p++) {
- assert(p->type < FILE_NAMES_SIZE);
- file_names[p->type] = p->name;
- file_formats[p->type] = p->format;
- }
- assert(p - type_tbl == FILE_NAMES_SIZE);
-}
-
-file_private int
-add_mlist(struct mlist *mlp, struct magic_map *map, size_t idx)
-{
- struct mlist *ml;
-
- mlp->map = NULL;
- if ((ml = CAST(struct mlist *, malloc(sizeof(*ml)))) == NULL)
- return -1;
-
- ml->map = idx == 0 ? map : NULL;
- ml->magic = map->magic[idx];
- ml->nmagic = map->nmagic[idx];
- if (ml->nmagic) {
- ml->magic_rxcomp = CAST(file_regex_t **,
- calloc(ml->nmagic, sizeof(*ml->magic_rxcomp)));
- if (ml->magic_rxcomp == NULL) {
- free(ml);
- return -1;
- }
- } else
- ml->magic_rxcomp = NULL;
- mlp->prev->next = ml;
- ml->prev = mlp->prev;
- ml->next = mlp;
- mlp->prev = ml;
- return 0;
-}
-
-/*
- * Handle one file or directory.
- */
-file_private int
-apprentice_1(struct magic_set *ms, const char *fn, int action)
-{
- struct magic_map *map;
-#ifndef COMPILE_ONLY
- size_t i;
-#endif
-
- if (magicsize != FILE_MAGICSIZE) {
- file_error(ms, 0, "magic element size %lu != %lu",
- CAST(unsigned long, sizeof(*map->magic[0])),
- CAST(unsigned long, FILE_MAGICSIZE));
- return -1;
- }
-
- if (action == FILE_COMPILE) {
- map = apprentice_load(ms, fn, action);
- if (map == NULL)
- return -1;
- return apprentice_compile(ms, map, fn);
- }
-
-#ifndef COMPILE_ONLY
- map = apprentice_map(ms, fn);
- if (map == NULL) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(NULL, "using regular magic file `%s'", fn);
- map = apprentice_load(ms, fn, action);
- if (map == NULL)
- return -1;
- }
-
- for (i = 0; i < MAGIC_SETS; i++) {
- if (add_mlist(ms->mlist[i], map, i) == -1) {
- /* failed to add to any list, free explicitly */
- if (i == 0)
- apprentice_unmap(map);
- else
- mlist_free_all(ms);
- file_oomem(ms, sizeof(*ms->mlist[0]));
- return -1;
- }
- }
-
- if (action == FILE_LIST) {
- for (i = 0; i < MAGIC_SETS; i++) {
- printf("Set %" SIZE_T_FORMAT "u:\nBinary patterns:\n",
- i);
- apprentice_list(ms->mlist[i], BINTEST);
- printf("Text patterns:\n");
- apprentice_list(ms->mlist[i], TEXTTEST);
- }
- }
- return 0;
-#else
- return 0;
-#endif /* COMPILE_ONLY */
-}
-
-file_protected void
-file_ms_free(struct magic_set *ms)
-{
- size_t i;
- if (ms == NULL)
- return;
- for (i = 0; i < MAGIC_SETS; i++)
- mlist_free(ms->mlist[i]);
- free(ms->o.pbuf);
- free(ms->o.buf);
- free(ms->c.li);
-#ifdef USE_C_LOCALE
- freelocale(ms->c_lc_ctype);
-#endif
- free(ms);
-}
-
-file_protected struct magic_set *
-file_ms_alloc(int flags)
-{
- struct magic_set *ms;
- size_t i, len;
-
- if ((ms = CAST(struct magic_set *, calloc(CAST(size_t, 1u),
- sizeof(*ms)))) == NULL)
- return NULL;
-
- if (magic_setflags(ms, flags) == -1) {
- errno = EINVAL;
- goto free;
- }
-
- ms->o.buf = ms->o.pbuf = NULL;
- ms->o.blen = 0;
- len = (ms->c.len = 10) * sizeof(*ms->c.li);
-
- if ((ms->c.li = CAST(struct level_info *, malloc(len))) == NULL)
- goto free;
-
- ms->event_flags = 0;
- ms->error = -1;
- for (i = 0; i < MAGIC_SETS; i++)
- ms->mlist[i] = NULL;
- ms->file = "unknown";
- ms->line = 0;
- ms->indir_max = FILE_INDIR_MAX;
- ms->name_max = FILE_NAME_MAX;
- ms->elf_shnum_max = FILE_ELF_SHNUM_MAX;
- ms->elf_shsize_max = FILE_ELF_SHSIZE_MAX;
- ms->elf_phnum_max = FILE_ELF_PHNUM_MAX;
- ms->elf_notes_max = FILE_ELF_NOTES_MAX;
- ms->regex_max = FILE_REGEX_MAX;
- ms->bytes_max = FILE_BYTES_MAX;
- ms->encoding_max = FILE_ENCODING_MAX;
-#ifdef USE_C_LOCALE
- ms->c_lc_ctype = newlocale(LC_CTYPE_MASK, "C", 0);
- assert(ms->c_lc_ctype != NULL);
-#endif
- return ms;
-free:
- free(ms);
- return NULL;
-}
-
-file_private void
-apprentice_unmap(struct magic_map *map)
-{
- size_t i;
- char *p;
- if (map == NULL)
- return;
-
- switch (map->type) {
- case MAP_TYPE_USER:
- break;
- case MAP_TYPE_MALLOC:
- p = CAST(char *, map->p);
- for (i = 0; i < MAGIC_SETS; i++) {
- char *b = RCAST(char *, map->magic[i]);
- if (p != NULL && b >= p && b <= p + map->len)
- continue;
- free(b);
- }
- free(p);
- break;
-#ifdef QUICK
- case MAP_TYPE_MMAP:
- if (map->p && map->p != MAP_FAILED)
- (void)munmap(map->p, map->len);
- break;
-#endif
- default:
- fprintf(stderr, "Bad map type %d", map->type);
- abort();
- }
- free(map);
-}
-
-file_private struct mlist *
-mlist_alloc(void)
-{
- struct mlist *mlist;
- if ((mlist = CAST(struct mlist *, calloc(1, sizeof(*mlist)))) == NULL) {
- return NULL;
- }
- mlist->next = mlist->prev = mlist;
- return mlist;
-}
-
-file_private void
-mlist_free_all(struct magic_set *ms)
-{
- size_t i;
-
- for (i = 0; i < MAGIC_SETS; i++) {
- mlist_free(ms->mlist[i]);
- ms->mlist[i] = NULL;
- }
-}
-
-file_private void
-mlist_free_one(struct mlist *ml)
-{
- size_t i;
-
- if (ml->map)
- apprentice_unmap(CAST(struct magic_map *, ml->map));
-
- for (i = 0; i < ml->nmagic; ++i) {
- if (ml->magic_rxcomp[i]) {
- file_regfree(ml->magic_rxcomp[i]);
- free(ml->magic_rxcomp[i]);
- ml->magic_rxcomp[i] = NULL;
- }
- }
- free(ml->magic_rxcomp);
- ml->magic_rxcomp = NULL;
- free(ml);
-}
-
-file_private void
-mlist_free(struct mlist *mlist)
-{
- struct mlist *ml, *next;
-
- if (mlist == NULL)
- return;
-
- for (ml = mlist->next; ml != mlist;) {
- next = ml->next;
- mlist_free_one(ml);
- ml = next;
- }
- mlist_free_one(mlist);
-}
-
-#ifndef COMPILE_ONLY
-/* void **bufs: an array of compiled magic files */
-file_protected int
-buffer_apprentice(struct magic_set *ms, struct magic **bufs,
- size_t *sizes, size_t nbufs)
-{
- size_t i, j;
- struct magic_map *map;
-
- if (nbufs == 0)
- return -1;
-
- (void)file_reset(ms, 0);
-
- init_file_tables();
-
- for (i = 0; i < MAGIC_SETS; i++) {
- mlist_free(ms->mlist[i]);
- if ((ms->mlist[i] = mlist_alloc()) == NULL) {
- file_oomem(ms, sizeof(*ms->mlist[0]));
- goto fail;
- }
- }
-
- for (i = 0; i < nbufs; i++) {
- map = apprentice_buf(ms, bufs[i], sizes[i]);
- if (map == NULL)
- goto fail;
-
- for (j = 0; j < MAGIC_SETS; j++) {
- if (add_mlist(ms->mlist[j], map, j) == -1) {
- file_oomem(ms, sizeof(*ms->mlist[0]));
- goto fail;
- }
- }
- }
-
- return 0;
-fail:
- mlist_free_all(ms);
- return -1;
-}
-#endif
-
-/* const char *fn: list of magic files and directories */
-file_protected int
-file_apprentice(struct magic_set *ms, const char *fn, int action)
-{
- char *p, *mfn;
- int fileerr, errs = -1;
- size_t i, j;
-
- (void)file_reset(ms, 0);
-
- if ((fn = magic_getpath(fn, action)) == NULL)
- return -1;
-
- init_file_tables();
-
- if ((mfn = strdup(fn)) == NULL) {
- file_oomem(ms, strlen(fn));
- return -1;
- }
-
- for (i = 0; i < MAGIC_SETS; i++) {
- mlist_free(ms->mlist[i]);
- if ((ms->mlist[i] = mlist_alloc()) == NULL) {
- file_oomem(ms, sizeof(*ms->mlist[0]));
- for (j = 0; j < i; j++) {
- mlist_free(ms->mlist[j]);
- ms->mlist[j] = NULL;
- }
- free(mfn);
- return -1;
- }
- }
- fn = mfn;
-
- while (fn) {
- p = CCAST(char *, strchr(fn, PATHSEP));
- if (p)
- *p++ = '\0';
- if (*fn == '\0')
- break;
- fileerr = apprentice_1(ms, fn, action);
- errs = MAX(errs, fileerr);
- fn = p;
- }
-
- free(mfn);
-
- if (errs == -1) {
- for (i = 0; i < MAGIC_SETS; i++) {
- mlist_free(ms->mlist[i]);
- ms->mlist[i] = NULL;
- }
- file_error(ms, 0, "could not find any valid magic files!");
- return -1;
- }
-
-#if 0
- /*
- * Always leave the database loaded
- */
- if (action == FILE_LOAD)
- return 0;
-
- for (i = 0; i < MAGIC_SETS; i++) {
- mlist_free(ms->mlist[i]);
- ms->mlist[i] = NULL;
- }
-#endif
-
- switch (action) {
- case FILE_LOAD:
- case FILE_COMPILE:
- case FILE_CHECK:
- case FILE_LIST:
- return 0;
- default:
- file_error(ms, 0, "Invalid action %d", action);
- return -1;
- }
-}
-
-/*
- * Compute the real length of a magic expression, for the purposes
- * of determining how "strong" a magic expression is (approximating
- * how specific its matches are):
- * - magic characters count 0 unless escaped.
- * - [] expressions count 1
- * - {} expressions count 0
- * - regular characters or escaped magic characters count 1
- * - 0 length expressions count as one
- */
-file_private size_t
-nonmagic(const char *str)
-{
- const char *p;
- size_t rv = 0;
-
- for (p = str; *p; p++)
- switch (*p) {
- case '\\': /* Escaped anything counts 1 */
- if (!*++p)
- p--;
- rv++;
- continue;
- case '?': /* Magic characters count 0 */
- case '*':
- case '.':
- case '+':
- case '^':
- case '$':
- continue;
- case '[': /* Bracketed expressions count 1 the ']' */
- while (*p && *p != ']')
- p++;
- p--;
- continue;
- case '{': /* Braced expressions count 0 */
- while (*p && *p != '}')
- p++;
- if (!*p)
- p--;
- continue;
- default: /* Anything else counts 1 */
- rv++;
- continue;
- }
-
- return rv == 0 ? 1 : rv; /* Return at least 1 */
-}
-
-
-file_private size_t
-typesize(int type)
-{
- switch (type) {
- case FILE_BYTE:
- return 1;
-
- case FILE_SHORT:
- case FILE_LESHORT:
- case FILE_BESHORT:
- case FILE_MSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_BEMSDOSTIME:
- case FILE_LEMSDOSTIME:
- return 2;
-
- case FILE_LONG:
- case FILE_LELONG:
- case FILE_BELONG:
- case FILE_MELONG:
- return 4;
-
- case FILE_DATE:
- case FILE_LEDATE:
- case FILE_BEDATE:
- case FILE_MEDATE:
- case FILE_LDATE:
- case FILE_LELDATE:
- case FILE_BELDATE:
- case FILE_MELDATE:
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- case FILE_BEID3:
- case FILE_LEID3:
- return 4;
-
- case FILE_QUAD:
- case FILE_BEQUAD:
- case FILE_LEQUAD:
- case FILE_QDATE:
- case FILE_LEQDATE:
- case FILE_BEQDATE:
- case FILE_QLDATE:
- case FILE_LEQLDATE:
- case FILE_BEQLDATE:
- case FILE_QWDATE:
- case FILE_LEQWDATE:
- case FILE_BEQWDATE:
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- case FILE_OFFSET:
- case FILE_BEVARINT:
- case FILE_LEVARINT:
- return 8;
-
- case FILE_GUID:
- return 16;
-
- default:
- return FILE_BADSIZE;
- }
-}
-
-/*
- * Get weight of this magic entry, for sorting purposes.
- */
-file_private ssize_t
-apprentice_magic_strength_1(const struct magic *m)
-{
-#define MULT 10U
- size_t ts, v;
- ssize_t val = 2 * MULT; /* baseline strength */
-
- switch (m->type) {
- case FILE_DEFAULT: /* make sure this sorts last */
- if (m->factor_op != FILE_FACTOR_OP_NONE) {
- file_magwarn(NULL, "Usupported factor_op in default %d",
- m->factor_op);
- }
- return 0;
-
- case FILE_BYTE:
- case FILE_SHORT:
- case FILE_LESHORT:
- case FILE_BESHORT:
- case FILE_LONG:
- case FILE_LELONG:
- case FILE_BELONG:
- case FILE_MELONG:
- case FILE_DATE:
- case FILE_LEDATE:
- case FILE_BEDATE:
- case FILE_MEDATE:
- case FILE_LDATE:
- case FILE_LELDATE:
- case FILE_BELDATE:
- case FILE_MELDATE:
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- case FILE_QUAD:
- case FILE_BEQUAD:
- case FILE_LEQUAD:
- case FILE_QDATE:
- case FILE_LEQDATE:
- case FILE_BEQDATE:
- case FILE_QLDATE:
- case FILE_LEQLDATE:
- case FILE_BEQLDATE:
- case FILE_QWDATE:
- case FILE_LEQWDATE:
- case FILE_BEQWDATE:
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- case FILE_BEVARINT:
- case FILE_LEVARINT:
- case FILE_GUID:
- case FILE_BEID3:
- case FILE_LEID3:
- case FILE_OFFSET:
- case FILE_MSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_BEMSDOSTIME:
- case FILE_LEMSDOSTIME:
- ts = typesize(m->type);
- if (ts == FILE_BADSIZE) {
- (void)fprintf(stderr, "Bad size for type %d\n",
- m->type);
- abort();
- }
- val += ts * MULT;
- break;
-
- case FILE_PSTRING:
- case FILE_STRING:
- case FILE_OCTAL:
- val += m->vallen * MULT;
- break;
-
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- val += m->vallen * MULT / 2;
- break;
-
- case FILE_SEARCH:
- if (m->vallen == 0)
- break;
- val += m->vallen * MAX(MULT / m->vallen, 1);
- break;
-
- case FILE_REGEX:
- v = nonmagic(m->value.s);
- val += v * MAX(MULT / v, 1);
- break;
-
- case FILE_INDIRECT:
- case FILE_NAME:
- case FILE_USE:
- case FILE_CLEAR:
- break;
-
- case FILE_DER:
- val += MULT;
- break;
-
- default:
- (void)fprintf(stderr, "Bad type %d\n", m->type);
- abort();
- }
-
- switch (m->reln) {
- case 'x': /* matches anything penalize */
- case '!': /* matches almost anything penalize */
- val = 0;
- break;
-
- case '=': /* Exact match, prefer */
- val += MULT;
- break;
-
- case '>':
- case '<': /* comparison match reduce strength */
- val -= 2 * MULT;
- break;
-
- case '^':
- case '&': /* masking bits, we could count them too */
- val -= MULT;
- break;
-
- default:
- (void)fprintf(stderr, "Bad relation %c\n", m->reln);
- abort();
- }
-
- return val;
-}
-
-
-/*ARGSUSED*/
-file_protected size_t
-file_magic_strength(const struct magic *m,
- size_t nmagic __attribute__((__unused__)))
-{
- ssize_t val = apprentice_magic_strength_1(m);
-
-#ifdef notyet
- if (m->desc[0] == '\0') {
- size_t i;
- /*
- * Magic entries with no description get their continuations
- * added
- */
- for (i = 1; m[i].cont_level != 0 && i < MIN(nmagic, 3); i++) {
- ssize_t v = apprentice_magic_strength_1(&m[i]) >>
- (i + 1);
- val += v;
- if (m[i].desc[0] != '\0')
- break;
- }
- }
-#endif
-
- switch (m->factor_op) {
- case FILE_FACTOR_OP_NONE:
- break;
- case FILE_FACTOR_OP_PLUS:
- val += m->factor;
- break;
- case FILE_FACTOR_OP_MINUS:
- val -= m->factor;
- break;
- case FILE_FACTOR_OP_TIMES:
- val *= m->factor;
- break;
- case FILE_FACTOR_OP_DIV:
- val /= m->factor;
- break;
- default:
- (void)fprintf(stderr, "Bad factor_op %u\n", m->factor_op);
- abort();
- }
-
- if (val <= 0) /* ensure we only return 0 for FILE_DEFAULT */
- val = 1;
-
-#ifndef notyet
- /*
- * Magic entries with no description get a bonus because they depend
- * on subsequent magic entries to print something.
- */
- if (m->desc[0] == '\0')
- val++;
-#endif
-
- return val;
-}
-
-/*
- * Sort callback for sorting entries by "strength" (basically length)
- */
-file_private int
-apprentice_sort(const void *a, const void *b)
-{
- const struct magic_entry *ma = CAST(const struct magic_entry *, a);
- const struct magic_entry *mb = CAST(const struct magic_entry *, b);
- size_t sa = file_magic_strength(ma->mp, ma->cont_count);
- size_t sb = file_magic_strength(mb->mp, mb->cont_count);
- if (sa == sb)
- return 0;
- else if (sa > sb)
- return -1;
- else
- return 1;
-}
-
-/*
- * Shows sorted patterns list in the order which is used for the matching
- */
-file_private void
-apprentice_list(struct mlist *mlist, int mode)
-{
- uint32_t magindex, descindex, mimeindex, lineindex;
- struct mlist *ml;
- for (ml = mlist->next; ml != mlist; ml = ml->next) {
- for (magindex = 0; magindex < ml->nmagic; magindex++) {
- struct magic *m = &ml->magic[magindex];
- if ((m->flag & mode) != mode) {
- /* Skip sub-tests */
- while (magindex + 1 < ml->nmagic &&
- ml->magic[magindex + 1].cont_level != 0)
- ++magindex;
- continue; /* Skip to next top-level test*/
- }
-
- /*
- * Try to iterate over the tree until we find item with
- * description/mimetype.
- */
- lineindex = descindex = mimeindex = magindex;
- for (; magindex + 1 < ml->nmagic &&
- ml->magic[magindex + 1].cont_level != 0;
- magindex++) {
- uint32_t mi = magindex + 1;
- if (*ml->magic[descindex].desc == '\0'
- && *ml->magic[mi].desc)
- descindex = mi;
- if (*ml->magic[mimeindex].mimetype == '\0'
- && *ml->magic[mi].mimetype)
- mimeindex = mi;
- }
-
- printf("Strength = %3" SIZE_T_FORMAT "u@%u: %s [%s]\n",
- file_magic_strength(m, ml->nmagic - magindex),
- ml->magic[lineindex].lineno,
- ml->magic[descindex].desc,
- ml->magic[mimeindex].mimetype);
- }
- }
-}
-
-file_private void
-set_test_type(struct magic *mstart, struct magic *m)
-{
- switch (m->type) {
- case FILE_BYTE:
- case FILE_SHORT:
- case FILE_LONG:
- case FILE_DATE:
- case FILE_BESHORT:
- case FILE_BELONG:
- case FILE_BEDATE:
- case FILE_LESHORT:
- case FILE_LELONG:
- case FILE_LEDATE:
- case FILE_LDATE:
- case FILE_BELDATE:
- case FILE_LELDATE:
- case FILE_MEDATE:
- case FILE_MELDATE:
- case FILE_MELONG:
- case FILE_QUAD:
- case FILE_LEQUAD:
- case FILE_BEQUAD:
- case FILE_QDATE:
- case FILE_LEQDATE:
- case FILE_BEQDATE:
- case FILE_QLDATE:
- case FILE_LEQLDATE:
- case FILE_BEQLDATE:
- case FILE_QWDATE:
- case FILE_LEQWDATE:
- case FILE_BEQWDATE:
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- case FILE_BEVARINT:
- case FILE_LEVARINT:
- case FILE_DER:
- case FILE_GUID:
- case FILE_OFFSET:
- case FILE_MSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_BEMSDOSTIME:
- case FILE_LEMSDOSTIME:
- case FILE_OCTAL:
- mstart->flag |= BINTEST;
- break;
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- /* Allow text overrides */
- if (mstart->str_flags & STRING_TEXTTEST)
- mstart->flag |= TEXTTEST;
- else
- mstart->flag |= BINTEST;
- break;
- case FILE_REGEX:
- case FILE_SEARCH:
- /* Check for override */
- if (mstart->str_flags & STRING_BINTEST)
- mstart->flag |= BINTEST;
- if (mstart->str_flags & STRING_TEXTTEST)
- mstart->flag |= TEXTTEST;
-
- if (mstart->flag & (TEXTTEST|BINTEST))
- break;
-
- /* binary test if pattern is not text */
- if (file_looks_utf8(m->value.us, CAST(size_t, m->vallen), NULL,
- NULL) <= 0)
- mstart->flag |= BINTEST;
- else
- mstart->flag |= TEXTTEST;
- break;
- case FILE_DEFAULT:
- /* can't deduce anything; we shouldn't see this at the
- top level anyway */
- break;
- case FILE_INVALID:
- default:
- /* invalid search type, but no need to complain here */
- break;
- }
-}
-
-file_private int
-addentry(struct magic_set *ms, struct magic_entry *me,
- struct magic_entry_set *mset)
-{
- size_t i = me->mp->type == FILE_NAME ? 1 : 0;
- if (mset[i].me == NULL || mset[i].count == mset[i].max) {
- struct magic_entry *mp;
-
- size_t incr = mset[i].max + ALLOC_INCR;
- if ((mp = CAST(struct magic_entry *,
- realloc(mset[i].me, sizeof(*mp) * incr))) ==
- NULL) {
- file_oomem(ms, sizeof(*mp) * incr);
- return -1;
- }
- (void)memset(&mp[mset[i].count], 0, sizeof(*mp) *
- ALLOC_INCR);
- mset[i].me = mp;
- mset[i].max = CAST(uint32_t, incr);
- assert(mset[i].max == incr);
- }
- mset[i].me[mset[i].count++] = *me;
- memset(me, 0, sizeof(*me));
- return 0;
-}
-
-/*
- * Load and parse one file.
- */
-file_private void
-load_1(struct magic_set *ms, int action, const char *fn, int *errs,
- struct magic_entry_set *mset)
-{
- size_t lineno = 0, llen = 0;
- char *line = NULL;
- ssize_t len;
- struct magic_entry me;
-
- FILE *f = fopen(ms->file = fn, "r");
- if (f == NULL) {
- if (errno != ENOENT)
- file_error(ms, errno, "cannot read magic file `%s'",
- fn);
- (*errs)++;
- return;
- }
-
- memset(&me, 0, sizeof(me));
- /* read and parse this file */
- for (ms->line = 1; (len = getline(&line, &llen, f)) != -1;
- ms->line++) {
- if (len == 0) /* null line, garbage, etc */
- continue;
- if (line[len - 1] == '\n') {
- lineno++;
- line[len - 1] = '\0'; /* delete newline */
- }
- switch (line[0]) {
- case '\0': /* empty, do not parse */
- case '#': /* comment, do not parse */
- continue;
- case '!':
- if (line[1] == ':') {
- size_t i;
-
- for (i = 0; bang[i].name != NULL; i++) {
- if (CAST(size_t, len - 2) > bang[i].len &&
- memcmp(bang[i].name, line + 2,
- bang[i].len) == 0)
- break;
- }
- if (bang[i].name == NULL) {
- file_error(ms, 0,
- "Unknown !: entry `%s'", line);
- (*errs)++;
- continue;
- }
- if (me.mp == NULL) {
- file_error(ms, 0,
- "No current entry for :!%s type",
- bang[i].name);
- (*errs)++;
- continue;
- }
- if ((*bang[i].fun)(ms, &me,
- line + bang[i].len + 2,
- len - bang[i].len - 2) != 0) {
- (*errs)++;
- continue;
- }
- continue;
- }
- /*FALLTHROUGH*/
- default:
- again:
- switch (parse(ms, &me, line, lineno, action)) {
- case 0:
- continue;
- case 1:
- (void)addentry(ms, &me, mset);
- goto again;
- default:
- (*errs)++;
- break;
- }
- }
- }
- if (me.mp)
- (void)addentry(ms, &me, mset);
- free(line);
- (void)fclose(f);
-}
-
-/*
- * parse a file or directory of files
- * const char *fn: name of magic file or directory
- */
-file_private int
-cmpstrp(const void *p1, const void *p2)
-{
- return strcmp(*RCAST(char *const *, p1), *RCAST(char *const *, p2));
-}
-
-
-file_private uint32_t
-set_text_binary(struct magic_set *ms, struct magic_entry *me, uint32_t nme,
- uint32_t starttest)
-{
- static const char text[] = "text";
- static const char binary[] = "binary";
- static const size_t len = sizeof(text);
-
- uint32_t i = starttest;
-
- do {
- set_test_type(me[starttest].mp, me[i].mp);
- if ((ms->flags & MAGIC_DEBUG) == 0)
- continue;
- (void)fprintf(stderr, "%s%s%s: %s\n",
- me[i].mp->mimetype,
- me[i].mp->mimetype[0] == '\0' ? "" : "; ",
- me[i].mp->desc[0] ? me[i].mp->desc : "(no description)",
- me[i].mp->flag & BINTEST ? binary : text);
- if (me[i].mp->flag & BINTEST) {
- char *p = strstr(me[i].mp->desc, text);
- if (p && (p == me[i].mp->desc ||
- isspace(CAST(unsigned char, p[-1]))) &&
- (p + len - me[i].mp->desc == MAXstring
- || (p[len] == '\0' ||
- isspace(CAST(unsigned char, p[len])))))
- (void)fprintf(stderr, "*** Possible "
- "binary test for text type\n");
- }
- } while (++i < nme && me[i].mp->cont_level != 0);
- return i;
-}
-
-file_private void
-set_last_default(struct magic_set *ms, struct magic_entry *me, uint32_t nme)
-{
- uint32_t i;
- for (i = 0; i < nme; i++) {
- if (me[i].mp->cont_level == 0 &&
- me[i].mp->type == FILE_DEFAULT) {
- while (++i < nme)
- if (me[i].mp->cont_level == 0)
- break;
- if (i != nme) {
- /* XXX - Ugh! */
- ms->line = me[i].mp->lineno;
- file_magwarn(ms,
- "level 0 \"default\" did not sort last");
- }
- return;
- }
- }
-}
-
-file_private int
-coalesce_entries(struct magic_set *ms, struct magic_entry *me, uint32_t nme,
- struct magic **ma, uint32_t *nma)
-{
- uint32_t i, mentrycount = 0;
- size_t slen;
-
- for (i = 0; i < nme; i++)
- mentrycount += me[i].cont_count;
-
- if (mentrycount == 0) {
- *ma = NULL;
- *nma = 0;
- return 0;
- }
-
- slen = sizeof(**ma) * mentrycount;
- if ((*ma = CAST(struct magic *, malloc(slen))) == NULL) {
- file_oomem(ms, slen);
- return -1;
- }
-
- mentrycount = 0;
- for (i = 0; i < nme; i++) {
- (void)memcpy(*ma + mentrycount, me[i].mp,
- me[i].cont_count * sizeof(**ma));
- mentrycount += me[i].cont_count;
- }
- *nma = mentrycount;
- return 0;
-}
-
-file_private void
-magic_entry_free(struct magic_entry *me, uint32_t nme)
-{
- uint32_t i;
- if (me == NULL)
- return;
- for (i = 0; i < nme; i++)
- free(me[i].mp);
- free(me);
-}
-
-file_private struct magic_map *
-apprentice_load(struct magic_set *ms, const char *fn, int action)
-{
- int errs = 0;
- uint32_t i, j;
- size_t files = 0, maxfiles = 0;
- char **filearr = NULL, *mfn;
- struct stat st;
- struct magic_map *map;
- struct magic_entry_set mset[MAGIC_SETS];
- DIR *dir;
- struct dirent *d;
-
- memset(mset, 0, sizeof(mset));
- ms->flags |= MAGIC_CHECK; /* Enable checks for parsed files */
-
-
- if ((map = CAST(struct magic_map *, calloc(1, sizeof(*map)))) == NULL)
- {
- file_oomem(ms, sizeof(*map));
- return NULL;
- }
- map->type = MAP_TYPE_MALLOC;
-
- /* print silly verbose header for USG compat. */
- if (action == FILE_CHECK)
- (void)fprintf(stderr, "%s\n", usg_hdr);
-
- /* load directory or file */
- if (stat(fn, &st) == 0 && S_ISDIR(st.st_mode)) {
- dir = opendir(fn);
- if (!dir) {
- errs++;
- goto out;
- }
- while ((d = readdir(dir)) != NULL) {
- if (d->d_name[0] == '.')
- continue;
- if (asprintf(&mfn, "%s/%s", fn, d->d_name) < 0) {
- file_oomem(ms,
- strlen(fn) + strlen(d->d_name) + 2);
- errs++;
- closedir(dir);
- goto out;
- }
- if (stat(mfn, &st) == -1 || !S_ISREG(st.st_mode)) {
- free(mfn);
- continue;
- }
- if (files >= maxfiles) {
- size_t mlen;
- char **nfilearr;
- maxfiles = (maxfiles + 1) * 2;
- mlen = maxfiles * sizeof(*filearr);
- if ((nfilearr = CAST(char **,
- realloc(filearr, mlen))) == NULL) {
- file_oomem(ms, mlen);
- free(mfn);
- closedir(dir);
- errs++;
- goto out;
- }
- filearr = nfilearr;
- }
- filearr[files++] = mfn;
- }
- closedir(dir);
- if (filearr) {
- qsort(filearr, files, sizeof(*filearr), cmpstrp);
- for (i = 0; i < files; i++) {
- load_1(ms, action, filearr[i], &errs, mset);
- free(filearr[i]);
- }
- free(filearr);
- filearr = NULL;
- }
- } else
- load_1(ms, action, fn, &errs, mset);
- if (errs)
- goto out;
-
- for (j = 0; j < MAGIC_SETS; j++) {
- /* Set types of tests */
- for (i = 0; i < mset[j].count; ) {
- if (mset[j].me[i].mp->cont_level != 0) {
- i++;
- continue;
- }
- i = set_text_binary(ms, mset[j].me, mset[j].count, i);
- }
- if (mset[j].me)
- qsort(mset[j].me, mset[j].count, sizeof(*mset[0].me),
- apprentice_sort);
-
- /*
- * Make sure that any level 0 "default" line is last
- * (if one exists).
- */
- set_last_default(ms, mset[j].me, mset[j].count);
-
- /* coalesce per file arrays into a single one, if needed */
- if (mset[j].count == 0)
- continue;
-
- if (coalesce_entries(ms, mset[j].me, mset[j].count,
- &map->magic[j], &map->nmagic[j]) == -1) {
- errs++;
- goto out;
- }
- }
-
-out:
- free(filearr);
- for (j = 0; j < MAGIC_SETS; j++)
- magic_entry_free(mset[j].me, mset[j].count);
-
- if (errs) {
- apprentice_unmap(map);
- return NULL;
- }
- return map;
-}
-
-/*
- * extend the sign bit if the comparison is to be signed
- */
-file_protected uint64_t
-file_signextend(struct magic_set *ms, struct magic *m, uint64_t v)
-{
- if (!(m->flag & UNSIGNED)) {
- switch(m->type) {
- /*
- * Do not remove the casts below. They are
- * vital. When later compared with the data,
- * the sign extension must have happened.
- */
- case FILE_BYTE:
- v = CAST(signed char, v);
- break;
- case FILE_SHORT:
- case FILE_BESHORT:
- case FILE_LESHORT:
- v = CAST(short, v);
- break;
- case FILE_DATE:
- case FILE_BEDATE:
- case FILE_LEDATE:
- case FILE_MEDATE:
- case FILE_LDATE:
- case FILE_BELDATE:
- case FILE_LELDATE:
- case FILE_MELDATE:
- case FILE_LONG:
- case FILE_BELONG:
- case FILE_LELONG:
- case FILE_MELONG:
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- case FILE_MSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_BEMSDOSTIME:
- case FILE_LEMSDOSTIME:
- v = CAST(int32_t, v);
- break;
- case FILE_QUAD:
- case FILE_BEQUAD:
- case FILE_LEQUAD:
- case FILE_QDATE:
- case FILE_QLDATE:
- case FILE_QWDATE:
- case FILE_BEQDATE:
- case FILE_BEQLDATE:
- case FILE_BEQWDATE:
- case FILE_LEQDATE:
- case FILE_LEQLDATE:
- case FILE_LEQWDATE:
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- case FILE_OFFSET:
- case FILE_BEVARINT:
- case FILE_LEVARINT:
- v = CAST(int64_t, v);
- break;
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- case FILE_REGEX:
- case FILE_SEARCH:
- case FILE_DEFAULT:
- case FILE_INDIRECT:
- case FILE_NAME:
- case FILE_USE:
- case FILE_CLEAR:
- case FILE_DER:
- case FILE_GUID:
- case FILE_OCTAL:
- break;
- default:
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "cannot happen: m->type=%d\n",
- m->type);
- return FILE_BADSIZE;
- }
- }
- return v;
-}
-
-file_private int
-string_modifier_check(struct magic_set *ms, struct magic *m)
-{
- if ((ms->flags & MAGIC_CHECK) == 0)
- return 0;
-
- if ((m->type != FILE_REGEX || (m->str_flags & REGEX_LINE_COUNT) == 0) &&
- (m->type != FILE_PSTRING && (m->str_flags & PSTRING_LEN) != 0)) {
- file_magwarn(ms,
- "'/BHhLl' modifiers are only allowed for pascal strings\n");
- return -1;
- }
- switch (m->type) {
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- if (m->str_flags != 0) {
- file_magwarn(ms,
- "no modifiers allowed for 16-bit strings\n");
- return -1;
- }
- break;
- case FILE_STRING:
- case FILE_PSTRING:
- if ((m->str_flags & REGEX_OFFSET_START) != 0) {
- file_magwarn(ms,
- "'/%c' only allowed on regex and search\n",
- CHAR_REGEX_OFFSET_START);
- return -1;
- }
- break;
- case FILE_SEARCH:
- if (m->str_range == 0) {
- file_magwarn(ms,
- "missing range; defaulting to %d\n",
- STRING_DEFAULT_RANGE);
- m->str_range = STRING_DEFAULT_RANGE;
- return -1;
- }
- break;
- case FILE_REGEX:
- if ((m->str_flags & STRING_COMPACT_WHITESPACE) != 0) {
- file_magwarn(ms, "'/%c' not allowed on regex\n",
- CHAR_COMPACT_WHITESPACE);
- return -1;
- }
- if ((m->str_flags & STRING_COMPACT_OPTIONAL_WHITESPACE) != 0) {
- file_magwarn(ms, "'/%c' not allowed on regex\n",
- CHAR_COMPACT_OPTIONAL_WHITESPACE);
- return -1;
- }
- break;
- default:
- file_magwarn(ms, "coding error: m->type=%d\n",
- m->type);
- return -1;
- }
- return 0;
-}
-
-file_private int
-get_op(char c)
-{
- switch (c) {
- case '&':
- return FILE_OPAND;
- case '|':
- return FILE_OPOR;
- case '^':
- return FILE_OPXOR;
- case '+':
- return FILE_OPADD;
- case '-':
- return FILE_OPMINUS;
- case '*':
- return FILE_OPMULTIPLY;
- case '/':
- return FILE_OPDIVIDE;
- case '%':
- return FILE_OPMODULO;
- default:
- return -1;
- }
-}
-
-#ifdef ENABLE_CONDITIONALS
-file_private int
-get_cond(const char *l, const char **t)
-{
- static const struct cond_tbl_s {
- char name[8];
- size_t len;
- int cond;
- } cond_tbl[] = {
- { "if", 2, COND_IF },
- { "elif", 4, COND_ELIF },
- { "else", 4, COND_ELSE },
- { "", 0, COND_NONE },
- };
- const struct cond_tbl_s *p;
-
- for (p = cond_tbl; p->len; p++) {
- if (strncmp(l, p->name, p->len) == 0 &&
- isspace(CAST(unsigned char, l[p->len]))) {
- if (t)
- *t = l + p->len;
- break;
- }
- }
- return p->cond;
-}
-
-file_private int
-check_cond(struct magic_set *ms, int cond, uint32_t cont_level)
-{
- int last_cond;
- last_cond = ms->c.li[cont_level].last_cond;
-
- switch (cond) {
- case COND_IF:
- if (last_cond != COND_NONE && last_cond != COND_ELIF) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "syntax error: `if'");
- return -1;
- }
- last_cond = COND_IF;
- break;
-
- case COND_ELIF:
- if (last_cond != COND_IF && last_cond != COND_ELIF) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "syntax error: `elif'");
- return -1;
- }
- last_cond = COND_ELIF;
- break;
-
- case COND_ELSE:
- if (last_cond != COND_IF && last_cond != COND_ELIF) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "syntax error: `else'");
- return -1;
- }
- last_cond = COND_NONE;
- break;
-
- case COND_NONE:
- last_cond = COND_NONE;
- break;
- }
-
- ms->c.li[cont_level].last_cond = last_cond;
- return 0;
-}
-#endif /* ENABLE_CONDITIONALS */
-
-file_private int
-parse_indirect_modifier(struct magic_set *ms, struct magic *m, const char **lp)
-{
- const char *l = *lp;
-
- while (!isspace(CAST(unsigned char, *++l)))
- switch (*l) {
- case CHAR_INDIRECT_RELATIVE:
- m->str_flags |= INDIRECT_RELATIVE;
- break;
- default:
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "indirect modifier `%c' "
- "invalid", *l);
- *lp = l;
- return -1;
- }
- *lp = l;
- return 0;
-}
-
-file_private void
-parse_op_modifier(struct magic_set *ms, struct magic *m, const char **lp,
- int op)
-{
- const char *l = *lp;
- char *t;
- uint64_t val;
-
- ++l;
- m->mask_op |= op;
- val = CAST(uint64_t, strtoull(l, &t, 0));
- l = t;
- m->num_mask = file_signextend(ms, m, val);
- eatsize(&l);
- *lp = l;
-}
-
-file_private int
-parse_string_modifier(struct magic_set *ms, struct magic *m, const char **lp)
-{
- const char *l = *lp;
- char *t;
- int have_range = 0;
-
- while (!isspace(CAST(unsigned char, *++l))) {
- switch (*l) {
- case '0': case '1': case '2':
- case '3': case '4': case '5':
- case '6': case '7': case '8':
- case '9':
- if (have_range && (ms->flags & MAGIC_CHECK))
- file_magwarn(ms, "multiple ranges");
- have_range = 1;
- m->str_range = CAST(uint32_t, strtoul(l, &t, 0));
- if (m->str_range == 0)
- file_magwarn(ms, "zero range");
- l = t - 1;
- break;
- case CHAR_COMPACT_WHITESPACE:
- m->str_flags |= STRING_COMPACT_WHITESPACE;
- break;
- case CHAR_COMPACT_OPTIONAL_WHITESPACE:
- m->str_flags |= STRING_COMPACT_OPTIONAL_WHITESPACE;
- break;
- case CHAR_IGNORE_LOWERCASE:
- m->str_flags |= STRING_IGNORE_LOWERCASE;
- break;
- case CHAR_IGNORE_UPPERCASE:
- m->str_flags |= STRING_IGNORE_UPPERCASE;
- break;
- case CHAR_REGEX_OFFSET_START:
- m->str_flags |= REGEX_OFFSET_START;
- break;
- case CHAR_BINTEST:
- m->str_flags |= STRING_BINTEST;
- break;
- case CHAR_TEXTTEST:
- m->str_flags |= STRING_TEXTTEST;
- break;
- case CHAR_TRIM:
- m->str_flags |= STRING_TRIM;
- break;
- case CHAR_FULL_WORD:
- m->str_flags |= STRING_FULL_WORD;
- break;
- case CHAR_PSTRING_1_LE:
-#define SET_LENGTH(a) m->str_flags = (m->str_flags & ~PSTRING_LEN) | (a)
- if (m->type != FILE_PSTRING)
- goto bad;
- SET_LENGTH(PSTRING_1_LE);
- break;
- case CHAR_PSTRING_2_BE:
- if (m->type != FILE_PSTRING)
- goto bad;
- SET_LENGTH(PSTRING_2_BE);
- break;
- case CHAR_PSTRING_2_LE:
- if (m->type != FILE_PSTRING)
- goto bad;
- SET_LENGTH(PSTRING_2_LE);
- break;
- case CHAR_PSTRING_4_BE:
- if (m->type != FILE_PSTRING)
- goto bad;
- SET_LENGTH(PSTRING_4_BE);
- break;
- case CHAR_PSTRING_4_LE:
- switch (m->type) {
- case FILE_PSTRING:
- case FILE_REGEX:
- break;
- default:
- goto bad;
- }
- SET_LENGTH(PSTRING_4_LE);
- break;
- case CHAR_PSTRING_LENGTH_INCLUDES_ITSELF:
- if (m->type != FILE_PSTRING)
- goto bad;
- m->str_flags |= PSTRING_LENGTH_INCLUDES_ITSELF;
- break;
- default:
- bad:
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "string modifier `%c' "
- "invalid", *l);
- goto out;
- }
- /* allow multiple '/' for readability */
- if (l[1] == '/' && !isspace(CAST(unsigned char, l[2])))
- l++;
- }
- if (string_modifier_check(ms, m) == -1)
- goto out;
- *lp = l;
- return 0;
-out:
- *lp = l;
- return -1;
-}
-
-/*
- * parse one line from magic file, put into magic[index++] if valid
- */
-file_private int
-parse(struct magic_set *ms, struct magic_entry *me, const char *line,
- size_t lineno, int action)
-{
-#ifdef ENABLE_CONDITIONALS
- static uint32_t last_cont_level = 0;
-#endif
- size_t i;
- struct magic *m;
- const char *l = line;
- char *t;
- int op;
- uint32_t cont_level;
- int32_t diff;
-
- cont_level = 0;
-
- /*
- * Parse the offset.
- */
- while (*l == '>') {
- ++l; /* step over */
- cont_level++;
- }
-#ifdef ENABLE_CONDITIONALS
- if (cont_level == 0 || cont_level > last_cont_level)
- if (file_check_mem(ms, cont_level) == -1)
- return -1;
- last_cont_level = cont_level;
-#endif
- if (cont_level != 0) {
- if (me->mp == NULL) {
- file_magerror(ms, "No current entry for continuation");
- return -1;
- }
- if (me->cont_count == 0) {
- file_magerror(ms, "Continuations present with 0 count");
- return -1;
- }
- m = &me->mp[me->cont_count - 1];
- diff = CAST(int32_t, cont_level) - CAST(int32_t, m->cont_level);
- if (diff > 1)
- file_magwarn(ms, "New continuation level %u is more "
- "than one larger than current level %u", cont_level,
- m->cont_level);
- if (me->cont_count == me->max_count) {
- struct magic *nm;
- size_t cnt = me->max_count + ALLOC_CHUNK;
- if ((nm = CAST(struct magic *, realloc(me->mp,
- sizeof(*nm) * cnt))) == NULL) {
- file_oomem(ms, sizeof(*nm) * cnt);
- return -1;
- }
- me->mp = nm;
- me->max_count = CAST(uint32_t, cnt);
- }
- m = &me->mp[me->cont_count++];
- (void)memset(m, 0, sizeof(*m));
- m->cont_level = cont_level;
- } else {
- static const size_t len = sizeof(*m) * ALLOC_CHUNK;
- if (me->mp != NULL)
- return 1;
- if ((m = CAST(struct magic *, malloc(len))) == NULL) {
- file_oomem(ms, len);
- return -1;
- }
- me->mp = m;
- me->max_count = ALLOC_CHUNK;
- (void)memset(m, 0, sizeof(*m));
- m->factor_op = FILE_FACTOR_OP_NONE;
- m->cont_level = 0;
- me->cont_count = 1;
- }
- m->lineno = CAST(uint32_t, lineno);
-
- if (*l == '&') { /* m->cont_level == 0 checked below. */
- ++l; /* step over */
- m->flag |= OFFADD;
- }
- if (*l == '(') {
- ++l; /* step over */
- m->flag |= INDIR;
- if (m->flag & OFFADD)
- m->flag = (m->flag & ~OFFADD) | INDIROFFADD;
-
- if (*l == '&') { /* m->cont_level == 0 checked below */
- ++l; /* step over */
- m->flag |= OFFADD;
- }
- }
- /* Indirect offsets are not valid at level 0. */
- if (m->cont_level == 0 && (m->flag & (OFFADD | INDIROFFADD))) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "relative offset at level 0");
- return -1;
- }
-
- /* get offset, then skip over it */
- if (*l == '-') {
- ++l; /* step over */
- m->flag |= OFFNEGATIVE;
- }
- m->offset = CAST(int32_t, strtol(l, &t, 0));
- if (l == t) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "offset `%s' invalid", l);
- return -1;
- }
-
- l = t;
-
- if (m->flag & INDIR) {
- m->in_type = FILE_LONG;
- m->in_offset = 0;
- m->in_op = 0;
- /*
- * read [.,lbs][+-]nnnnn)
- */
- if (*l == '.' || *l == ',') {
- if (*l == ',')
- m->in_op |= FILE_OPSIGNED;
- l++;
- switch (*l) {
- case 'l':
- m->in_type = FILE_LELONG;
- break;
- case 'L':
- m->in_type = FILE_BELONG;
- break;
- case 'm':
- m->in_type = FILE_MELONG;
- break;
- case 'h':
- case 's':
- m->in_type = FILE_LESHORT;
- break;
- case 'H':
- case 'S':
- m->in_type = FILE_BESHORT;
- break;
- case 'c':
- case 'b':
- case 'C':
- case 'B':
- m->in_type = FILE_BYTE;
- break;
- case 'e':
- case 'f':
- case 'g':
- m->in_type = FILE_LEDOUBLE;
- break;
- case 'E':
- case 'F':
- case 'G':
- m->in_type = FILE_BEDOUBLE;
- break;
- case 'i':
- m->in_type = FILE_LEID3;
- break;
- case 'I':
- m->in_type = FILE_BEID3;
- break;
- case 'o':
- m->in_type = FILE_OCTAL;
- break;
- case 'q':
- m->in_type = FILE_LEQUAD;
- break;
- case 'Q':
- m->in_type = FILE_BEQUAD;
- break;
- default:
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms,
- "indirect offset type `%c' invalid",
- *l);
- return -1;
- }
- l++;
- }
-
- if (*l == '~') {
- m->in_op |= FILE_OPINVERSE;
- l++;
- }
- if ((op = get_op(*l)) != -1) {
- m->in_op |= op;
- l++;
- }
- if (*l == '(') {
- m->in_op |= FILE_OPINDIRECT;
- l++;
- }
- if (isdigit(CAST(unsigned char, *l)) || *l == '-') {
- m->in_offset = CAST(int32_t, strtol(l, &t, 0));
- if (l == t) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms,
- "in_offset `%s' invalid", l);
- return -1;
- }
- l = t;
- }
- if (*l++ != ')' ||
- ((m->in_op & FILE_OPINDIRECT) && *l++ != ')')) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms,
- "missing ')' in indirect offset");
- return -1;
- }
- }
- EATAB;
-
-#ifdef ENABLE_CONDITIONALS
- m->cond = get_cond(l, &l);
- if (check_cond(ms, m->cond, cont_level) == -1)
- return -1;
-
- EATAB;
-#endif
-
- /*
- * Parse the type.
- */
- if (*l == 'u') {
- /*
- * Try it as a keyword type prefixed by "u"; match what
- * follows the "u". If that fails, try it as an SUS
- * integer type.
- */
- m->type = get_type(type_tbl, l + 1, &l);
- if (m->type == FILE_INVALID) {
- /*
- * Not a keyword type; parse it as an SUS type,
- * 'u' possibly followed by a number or C/S/L.
- */
- m->type = get_standard_integer_type(l, &l);
- }
- /* It's unsigned. */
- if (m->type != FILE_INVALID)
- m->flag |= UNSIGNED;
- } else {
- /*
- * Try it as a keyword type. If that fails, try it as
- * an SUS integer type if it begins with "d" or as an
- * SUS string type if it begins with "s". In any case,
- * it's not unsigned.
- */
- m->type = get_type(type_tbl, l, &l);
- if (m->type == FILE_INVALID) {
- /*
- * Not a keyword type; parse it as an SUS type,
- * either 'd' possibly followed by a number or
- * C/S/L, or just 's'.
- */
- if (*l == 'd')
- m->type = get_standard_integer_type(l, &l);
- else if (*l == 's'
- && !isalpha(CAST(unsigned char, l[1]))) {
- m->type = FILE_STRING;
- ++l;
- }
- }
- }
-
- if (m->type == FILE_INVALID) {
- /* Not found - try it as a special keyword. */
- m->type = get_type(special_tbl, l, &l);
- }
-
- if (m->type == FILE_INVALID) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "type `%s' invalid", l);
- return -1;
- }
-
- if (m->type == FILE_NAME && cont_level != 0) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "`name%s' entries can only be "
- "declared at top level", l);
- return -1;
- }
-
- /* New-style anding: "0 byte&0x80 =0x80 dynamically linked" */
- /* New and improved: ~ & | ^ + - * / % -- exciting, isn't it? */
-
- m->mask_op = 0;
- if (*l == '~') {
- if (!IS_STRING(m->type))
- m->mask_op |= FILE_OPINVERSE;
- else if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "'~' invalid for string types");
- ++l;
- }
- m->str_range = 0;
- m->str_flags = m->type == FILE_PSTRING ? PSTRING_1_LE : 0;
- if ((op = get_op(*l)) != -1) {
- if (IS_STRING(m->type)) {
- int r;
-
- if (op != FILE_OPDIVIDE) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms,
- "invalid string/indirect op: "
- "`%c'", *t);
- return -1;
- }
-
- if (m->type == FILE_INDIRECT)
- r = parse_indirect_modifier(ms, m, &l);
- else
- r = parse_string_modifier(ms, m, &l);
- if (r == -1)
- return -1;
- } else
- parse_op_modifier(ms, m, &l, op);
- }
-
- /*
- * We used to set mask to all 1's here, instead let's just not do
- * anything if mask = 0 (unless you have a better idea)
- */
- EATAB;
-
- switch (*l) {
- case '>':
- case '<':
- m->reln = *l;
- ++l;
- if (*l == '=') {
- if (ms->flags & MAGIC_CHECK) {
- file_magwarn(ms, "%c= not supported",
- m->reln);
- return -1;
- }
- ++l;
- }
- break;
- /* Old-style anding: "0 byte &0x80 dynamically linked" */
- case '&':
- case '^':
- case '=':
- m->reln = *l;
- ++l;
- if (*l == '=') {
- /* HP compat: ignore &= etc. */
- ++l;
- }
- break;
- case '!':
- m->reln = *l;
- ++l;
- break;
- default:
- m->reln = '='; /* the default relation */
- if (*l == 'x' && ((isascii(CAST(unsigned char, l[1])) &&
- isspace(CAST(unsigned char, l[1]))) || !l[1])) {
- m->reln = *l;
- ++l;
- }
- break;
- }
- /*
- * Grab the value part, except for an 'x' reln.
- */
- if (m->reln != 'x' && getvalue(ms, m, &l, action))
- return -1;
-
- /*
- * TODO finish this macro and start using it!
- * #define offsetcheck {if (offset > ms->bytes_max -1)
- * magwarn("offset too big"); }
- */
-
- /*
- * Now get last part - the description
- */
- EATAB;
- if (l[0] == '\b') {
- ++l;
- m->flag |= NOSPACE;
- } else if ((l[0] == '\\') && (l[1] == 'b')) {
- ++l;
- ++l;
- m->flag |= NOSPACE;
- }
- for (i = 0; (m->desc[i++] = *l++) != '\0' && i < sizeof(m->desc); )
- continue;
- if (i == sizeof(m->desc)) {
- m->desc[sizeof(m->desc) - 1] = '\0';
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "description `%s' truncated", m->desc);
- }
-
- /*
- * We only do this check while compiling, or if any of the magic
- * files were not compiled.
- */
- if (ms->flags & MAGIC_CHECK) {
- if (check_format(ms, m) == -1)
- return -1;
- }
-#ifndef COMPILE_ONLY
- if (action == FILE_CHECK) {
- file_mdump(m);
- }
-#endif
- m->mimetype[0] = '\0'; /* initialise MIME type to none */
- return 0;
-}
-
-/*
- * parse a STRENGTH annotation line from magic file, put into magic[index - 1]
- * if valid
- */
-/*ARGSUSED*/
-file_private int
-parse_strength(struct magic_set *ms, struct magic_entry *me, const char *line,
- size_t len __attribute__((__unused__)))
-{
- const char *l = line;
- char *el;
- unsigned long factor;
- char sbuf[512];
- struct magic *m = &me->mp[0];
-
- if (m->factor_op != FILE_FACTOR_OP_NONE) {
- file_magwarn(ms,
- "Current entry already has a strength type: %c %d",
- m->factor_op, m->factor);
- return -1;
- }
- if (m->type == FILE_NAME) {
- file_magwarn(ms, "%s: Strength setting is not supported in "
- "\"name\" magic entries",
- file_printable(ms, sbuf, sizeof(sbuf), m->value.s,
- sizeof(m->value.s)));
- return -1;
- }
- EATAB;
- switch (*l) {
- case FILE_FACTOR_OP_NONE:
- break;
- case FILE_FACTOR_OP_PLUS:
- case FILE_FACTOR_OP_MINUS:
- case FILE_FACTOR_OP_TIMES:
- case FILE_FACTOR_OP_DIV:
- m->factor_op = *l++;
- break;
- default:
- file_magwarn(ms, "Unknown factor op `%c'", *l);
- return -1;
- }
- EATAB;
- factor = strtoul(l, &el, 0);
- if (factor > 255) {
- file_magwarn(ms, "Too large factor `%lu'", factor);
- goto out;
- }
- if (*el && !isspace(CAST(unsigned char, *el))) {
- file_magwarn(ms, "Bad factor `%s'", l);
- goto out;
- }
- m->factor = CAST(uint8_t, factor);
- if (m->factor == 0 && m->factor_op == FILE_FACTOR_OP_DIV) {
- file_magwarn(ms, "Cannot have factor op `%c' and factor %u",
- m->factor_op, m->factor);
- goto out;
- }
- return 0;
-out:
- m->factor_op = FILE_FACTOR_OP_NONE;
- m->factor = 0;
- return -1;
-}
-
-file_private int
-goodchar(unsigned char x, const char *extra)
-{
- return (isascii(x) && isalnum(x)) || strchr(extra, x);
-}
-
-file_private int
-parse_extra(struct magic_set *ms, struct magic_entry *me, const char *line,
- size_t llen, off_t off, size_t len, const char *name, const char *extra,
- int nt)
-{
- size_t i;
- const char *l = line;
- struct magic *m = &me->mp[me->cont_count == 0 ? 0 : me->cont_count - 1];
- char *buf = CAST(char *, CAST(void *, m)) + off;
-
- if (buf[0] != '\0') {
- len = nt ? strlen(buf) : len;
- file_magwarn(ms, "Current entry already has a %s type "
- "`%.*s', new type `%s'", name, CAST(int, len), buf, l);
- return -1;
- }
-
- if (*m->desc == '\0') {
- file_magwarn(ms, "Current entry does not yet have a "
- "description for adding a %s type", name);
- return -1;
- }
-
- EATAB;
- for (i = 0; *l && i < llen && i < len && goodchar(*l, extra);
- buf[i++] = *l++)
- continue;
-
- if (i == len && *l) {
- if (nt)
- buf[len - 1] = '\0';
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "%s type `%s' truncated %"
- SIZE_T_FORMAT "u", name, line, i);
- } else {
- if (!isspace(CAST(unsigned char, *l)) && !goodchar(*l, extra))
- file_magwarn(ms, "%s type `%s' has bad char '%c'",
- name, line, *l);
- if (nt)
- buf[i] = '\0';
- }
-
- if (i > 0)
- return 0;
-
- file_magerror(ms, "Bad magic entry '%s'", line);
- return -1;
-}
-
-/*
- * Parse an Apple CREATOR/TYPE annotation from magic file and put it into
- * magic[index - 1]
- */
-file_private int
-parse_apple(struct magic_set *ms, struct magic_entry *me, const char *line,
- size_t len)
-{
- return parse_extra(ms, me, line, len,
- CAST(off_t, offsetof(struct magic, apple)),
- sizeof(me->mp[0].apple), "APPLE", "!+-./?", 0);
-}
-
-/*
- * Parse a comma-separated list of extensions
- */
-file_private int
-parse_ext(struct magic_set *ms, struct magic_entry *me, const char *line,
- size_t len)
-{
- return parse_extra(ms, me, line, len,
- CAST(off_t, offsetof(struct magic, ext)),
- sizeof(me->mp[0].ext), "EXTENSION", ",!+-/@?_$&~", 0);
- /* & for b&w */
- /* ~ for journal~ */
-}
-
-/*
- * parse a MIME annotation line from magic file, put into magic[index - 1]
- * if valid
- */
-file_private int
-parse_mime(struct magic_set *ms, struct magic_entry *me, const char *line,
- size_t len)
-{
- return parse_extra(ms, me, line, len,
- CAST(off_t, offsetof(struct magic, mimetype)),
- sizeof(me->mp[0].mimetype), "MIME", "+-/.$?:{}", 1);
-}
-
-file_private int
-check_format_type(const char *ptr, int type, const char **estr)
-{
- int quad = 0, h;
- size_t len, cnt;
- if (*ptr == '\0') {
- /* Missing format string; bad */
- *estr = "missing format spec";
- return -1;
- }
-
- switch (file_formats[type]) {
- case FILE_FMT_QUAD:
- quad = 1;
- /*FALLTHROUGH*/
- case FILE_FMT_NUM:
- if (quad == 0) {
- switch (type) {
- case FILE_BYTE:
- h = 2;
- break;
- case FILE_SHORT:
- case FILE_BESHORT:
- case FILE_LESHORT:
- h = 1;
- break;
- case FILE_LONG:
- case FILE_BELONG:
- case FILE_LELONG:
- case FILE_MELONG:
- case FILE_LEID3:
- case FILE_BEID3:
- case FILE_INDIRECT:
- h = 0;
- break;
- default:
- fprintf(stderr, "Bad number format %d", type);
- abort();
- }
- } else
- h = 0;
- while (*ptr && strchr("-.#", *ptr) != NULL)
- ptr++;
-#define CHECKLEN() do { \
- for (len = cnt = 0; isdigit(CAST(unsigned char, *ptr)); ptr++, cnt++) \
- len = len * 10 + (*ptr - '0'); \
- if (cnt > 5 || len > 1024) \
- goto toolong; \
-} while (/*CONSTCOND*/0)
-
- CHECKLEN();
- if (*ptr == '.')
- ptr++;
- CHECKLEN();
- if (quad) {
- if (*ptr++ != 'l')
- goto invalid;
- if (*ptr++ != 'l')
- goto invalid;
- }
-
- switch (*ptr++) {
-#ifdef STRICT_FORMAT /* "long" formats are int formats for us */
- /* so don't accept the 'l' modifier */
- case 'l':
- switch (*ptr++) {
- case 'i':
- case 'd':
- case 'u':
- case 'o':
- case 'x':
- case 'X':
- if (h == 0)
- return 0;
- /*FALLTHROUGH*/
- default:
- goto invalid;
- }
-
- /*
- * Don't accept h and hh modifiers. They make writing
- * magic entries more complicated, for very little benefit
- */
- case 'h':
- if (h-- <= 0)
- goto invalid;
- switch (*ptr++) {
- case 'h':
- if (h-- <= 0)
- goto invalid;
- switch (*ptr++) {
- case 'i':
- case 'd':
- case 'u':
- case 'o':
- case 'x':
- case 'X':
- return 0;
- default:
- goto invalid;
- }
- case 'i':
- case 'd':
- case 'u':
- case 'o':
- case 'x':
- case 'X':
- if (h == 0)
- return 0;
- /*FALLTHROUGH*/
- default:
- goto invalid;
- }
-#endif
- case 'c':
- if (h == 2)
- return 0;
- goto invalid;
- case 'i':
- case 'd':
- case 'u':
- case 'o':
- case 'x':
- case 'X':
-#ifdef STRICT_FORMAT
- if (h == 0)
- return 0;
- /*FALLTHROUGH*/
-#else
- return 0;
-#endif
- default:
- goto invalid;
- }
-
- case FILE_FMT_FLOAT:
- case FILE_FMT_DOUBLE:
- if (*ptr == '-')
- ptr++;
- if (*ptr == '.')
- ptr++;
- CHECKLEN();
- if (*ptr == '.')
- ptr++;
- CHECKLEN();
- switch (*ptr++) {
- case 'e':
- case 'E':
- case 'f':
- case 'F':
- case 'g':
- case 'G':
- return 0;
-
- default:
- goto invalid;
- }
-
-
- case FILE_FMT_STR:
- if (*ptr == '-')
- ptr++;
- while (isdigit(CAST(unsigned char, *ptr)))
- ptr++;
- if (*ptr == '.') {
- ptr++;
- while (isdigit(CAST(unsigned char , *ptr)))
- ptr++;
- }
-
- switch (*ptr++) {
- case 's':
- return 0;
- default:
- goto invalid;
- }
-
- default:
- /* internal error */
- fprintf(stderr, "Bad file format %d", type);
- abort();
- }
-invalid:
- *estr = "not valid";
- return -1;
-toolong:
- *estr = "too long";
- return -1;
-}
-
-/*
- * Check that the optional printf format in description matches
- * the type of the magic.
- */
-file_private int
-check_format(struct magic_set *ms, struct magic *m)
-{
- char *ptr;
- const char *estr;
-
- for (ptr = m->desc; *ptr; ptr++)
- if (*ptr == '%')
- break;
- if (*ptr == '\0') {
- /* No format string; ok */
- return 1;
- }
-
- assert(file_nformats == file_nnames);
-
- if (m->type >= file_nformats) {
- file_magwarn(ms, "Internal error inconsistency between "
- "m->type and format strings");
- return -1;
- }
- if (file_formats[m->type] == FILE_FMT_NONE) {
- file_magwarn(ms, "No format string for `%s' with description "
- "`%s'", m->desc, file_names[m->type]);
- return -1;
- }
-
- ptr++;
- if (check_format_type(ptr, m->type, &estr) == -1) {
- /*
- * TODO: this error message is unhelpful if the format
- * string is not one character long
- */
- file_magwarn(ms, "Printf format is %s for type "
- "`%s' in description `%s'", estr,
- file_names[m->type], m->desc);
- return -1;
- }
-
- for (; *ptr; ptr++) {
- if (*ptr == '%') {
- file_magwarn(ms,
- "Too many format strings (should have at most one) "
- "for `%s' with description `%s'",
- file_names[m->type], m->desc);
- return -1;
- }
- }
- return 0;
-}
-
-/*
- * Read a numeric value from a pointer, into the value union of a magic
- * pointer, according to the magic type. Update the string pointer to point
- * just after the number read. Return 0 for success, non-zero for failure.
- */
-file_private int
-getvalue(struct magic_set *ms, struct magic *m, const char **p, int action)
-{
- char *ep;
- uint64_t ull;
- int y;
-
- switch (m->type) {
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_REGEX:
- case FILE_SEARCH:
- case FILE_NAME:
- case FILE_USE:
- case FILE_DER:
- case FILE_OCTAL:
- *p = getstr(ms, m, *p, action == FILE_COMPILE);
- if (*p == NULL) {
- if (ms->flags & MAGIC_CHECK)
- file_magwarn(ms, "cannot get string from `%s'",
- m->value.s);
- return -1;
- }
- if (m->type == FILE_REGEX) {
- file_regex_t rx;
- int rc =
- file_regcomp(ms, &rx, m->value.s, REG_EXTENDED);
- if (rc == 0) {
- file_regfree(&rx);
- }
- return rc ? -1 : 0;
- }
- return 0;
- default:
- if (m->reln == 'x')
- return 0;
- break;
- }
-
- switch (m->type) {
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- errno = 0;
-#ifdef HAVE_STRTOF
- m->value.f = strtof(*p, &ep);
-#else
- m->value.f = (float)strtod(*p, &ep);
-#endif
- if (errno == 0)
- *p = ep;
- return 0;
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- errno = 0;
- m->value.d = strtod(*p, &ep);
- if (errno == 0)
- *p = ep;
- return 0;
- case FILE_GUID:
- if (file_parse_guid(*p, m->value.guid) == -1)
- return -1;
- *p += FILE_GUID_SIZE - 1;
- return 0;
- default:
- errno = 0;
- ull = CAST(uint64_t, strtoull(*p, &ep, 0));
- m->value.q = file_signextend(ms, m, ull);
- if (*p == ep) {
- file_magwarn(ms, "Unparsable number `%s'", *p);
- return -1;
- } else {
- size_t ts = typesize(m->type);
- uint64_t x;
- const char *q;
-
- if (ts == FILE_BADSIZE) {
- file_magwarn(ms,
- "Expected numeric type got `%s'",
- type_tbl[m->type].name);
- return -1;
- }
- for (q = *p; isspace(CAST(unsigned char, *q)); q++)
- continue;
- if (*q == '-' && ull != UINT64_MAX)
- ull = -CAST(int64_t, ull);
- switch (ts) {
- case 1:
- x = CAST(uint64_t, ull & ~0xffULL);
- y = (x & ~0xffULL) != ~0xffULL;
- break;
- case 2:
- x = CAST(uint64_t, ull & ~0xffffULL);
- y = (x & ~0xffffULL) != ~0xffffULL;
- break;
- case 4:
- x = CAST(uint64_t, ull & ~0xffffffffULL);
- y = (x & ~0xffffffffULL) != ~0xffffffffULL;
- break;
- case 8:
- x = 0;
- y = 0;
- break;
- default:
- fprintf(stderr, "Bad width %zu", ts);
- abort();
- }
- if (x && y) {
- file_magwarn(ms, "Overflow for numeric"
- " type `%s' value %#" PRIx64,
- type_tbl[m->type].name, ull);
- return -1;
- }
- }
- if (errno == 0) {
- *p = ep;
- eatsize(p);
- }
- return 0;
- }
-}
-
-/*
- * Convert a string containing C character escapes. Stop at an unescaped
- * space or tab.
- * Copy the converted version to "m->value.s", and the length in m->vallen.
- * Return updated scan pointer as function result. Warn if set.
- */
-file_private const char *
-getstr(struct magic_set *ms, struct magic *m, const char *s, int warn)
-{
- const char *origs = s;
- char *p = m->value.s;
- size_t plen = sizeof(m->value.s);
- char *origp = p;
- char *pmax = p + plen - 1;
- int c;
- int val;
- size_t bracket_nesting = 0;
-
- while ((c = *s++) != '\0') {
- if (isspace(CAST(unsigned char, c)))
- break;
- if (p >= pmax) {
- file_error(ms, 0, "string too long: `%s'", origs);
- return NULL;
- }
- if (c != '\\') {
- if (c == '[') {
- bracket_nesting++;
- }
- if (c == ']' && bracket_nesting > 0) {
- bracket_nesting--;
- }
- *p++ = CAST(char, c);
- continue;
- }
- switch(c = *s++) {
-
- case '\0':
- if (warn)
- file_magwarn(ms, "incomplete escape");
- s--;
- goto out;
- case '.':
- if (m->type == FILE_REGEX &&
- bracket_nesting == 0 && warn) {
- file_magwarn(ms, "escaped dot ('.') found, "
- "use \\\\. instead");
- }
- warn = 0; /* already did */
- /*FALLTHROUGH*/
- case '\t':
- if (warn) {
- file_magwarn(ms,
- "escaped tab found, use \\\\t instead");
- warn = 0; /* already did */
- }
- /*FALLTHROUGH*/
- default:
- if (warn) {
- if (isprint(CAST(unsigned char, c))) {
- /* Allow escaping of
- * ``relations'' */
- if (strchr("<>&^=!", c) == NULL
- && (m->type != FILE_REGEX ||
- strchr("[]().*?^$|{}", c)
- == NULL)) {
- file_magwarn(ms, "no "
- "need to escape "
- "`%c'", c);
- }
- } else {
- file_magwarn(ms,
- "unknown escape sequence: "
- "\\%03o", c);
- }
- }
- /*FALLTHROUGH*/
- /* space, perhaps force people to use \040? */
- case ' ':
-#if 0
- /*
- * Other things people escape, but shouldn't need to,
- * so we disallow them
- */
- case '\'':
- case '"':
- case '?':
-#endif
- /* Relations */
- case '>':
- case '<':
- case '&':
- case '^':
- case '=':
- case '!':
- /* and backslash itself */
- case '\\':
- *p++ = CAST(char, c);
- break;
-
- case 'a':
- *p++ = '\a';
- break;
-
- case 'b':
- *p++ = '\b';
- break;
-
- case 'f':
- *p++ = '\f';
- break;
-
- case 'n':
- *p++ = '\n';
- break;
-
- case 'r':
- *p++ = '\r';
- break;
-
- case 't':
- *p++ = '\t';
- break;
-
- case 'v':
- *p++ = '\v';
- break;
-
- /* \ and up to 3 octal digits */
- case '0':
- case '1':
- case '2':
- case '3':
- case '4':
- case '5':
- case '6':
- case '7':
- val = c - '0';
- c = *s++; /* try for 2 */
- if (c >= '0' && c <= '7') {
- val = (val << 3) | (c - '0');
- c = *s++; /* try for 3 */
- if (c >= '0' && c <= '7')
- val = (val << 3) | (c-'0');
- else
- --s;
- }
- else
- --s;
- *p++ = CAST(char, val);
- break;
-
- /* \x and up to 2 hex digits */
- case 'x':
- val = 'x'; /* Default if no digits */
- c = hextoint(*s++); /* Get next char */
- if (c >= 0) {
- val = c;
- c = hextoint(*s++);
- if (c >= 0)
- val = (val << 4) + c;
- else
- --s;
- } else
- --s;
- *p++ = CAST(char, val);
- break;
- }
- }
- --s;
-out:
- *p = '\0';
- m->vallen = CAST(unsigned char, (p - origp));
- if (m->type == FILE_PSTRING) {
- size_t l = file_pstring_length_size(ms, m);
- if (l == FILE_BADSIZE)
- return NULL;
- m->vallen += CAST(unsigned char, l);
- }
- return s;
-}
-
-
-/* Single hex char to int; -1 if not a hex char. */
-file_private int
-hextoint(int c)
-{
- if (!isascii(CAST(unsigned char, c)))
- return -1;
- if (isdigit(CAST(unsigned char, c)))
- return c - '0';
- if ((c >= 'a') && (c <= 'f'))
- return c + 10 - 'a';
- if (( c>= 'A') && (c <= 'F'))
- return c + 10 - 'A';
- return -1;
-}
-
-
-/*
- * Print a string containing C character escapes.
- */
-file_protected void
-file_showstr(FILE *fp, const char *s, size_t len)
-{
- char c;
-
- for (;;) {
- if (len == FILE_BADSIZE) {
- c = *s++;
- if (c == '\0')
- break;
- }
- else {
- if (len-- == 0)
- break;
- c = *s++;
- }
- if (c >= 040 && c <= 0176) /* TODO isprint && !iscntrl */
- (void) fputc(c, fp);
- else {
- (void) fputc('\\', fp);
- switch (c) {
- case '\a':
- (void) fputc('a', fp);
- break;
-
- case '\b':
- (void) fputc('b', fp);
- break;
-
- case '\f':
- (void) fputc('f', fp);
- break;
-
- case '\n':
- (void) fputc('n', fp);
- break;
-
- case '\r':
- (void) fputc('r', fp);
- break;
-
- case '\t':
- (void) fputc('t', fp);
- break;
-
- case '\v':
- (void) fputc('v', fp);
- break;
-
- default:
- (void) fprintf(fp, "%.3o", c & 0377);
- break;
- }
- }
- }
-}
-
-/*
- * eatsize(): Eat the size spec from a number [eg. 10UL]
- */
-file_private void
-eatsize(const char **p)
-{
- const char *l = *p;
-
- if (LOWCASE(*l) == 'u')
- l++;
-
- switch (LOWCASE(*l)) {
- case 'l': /* long */
- case 's': /* short */
- case 'h': /* short */
- case 'b': /* char/byte */
- case 'c': /* char/byte */
- l++;
- /*FALLTHROUGH*/
- default:
- break;
- }
-
- *p = l;
-}
-
-/*
- * handle a buffer containing a compiled file.
- */
-file_private struct magic_map *
-apprentice_buf(struct magic_set *ms, struct magic *buf, size_t len)
-{
- struct magic_map *map;
-
- if ((map = CAST(struct magic_map *, calloc(1, sizeof(*map)))) == NULL) {
- file_oomem(ms, sizeof(*map));
- return NULL;
- }
- map->len = len;
- map->p = buf;
- map->type = MAP_TYPE_USER;
- if (check_buffer(ms, map, "buffer") != 0) {
- apprentice_unmap(map);
- return NULL;
- }
- return map;
-}
-
-/*
- * handle a compiled file.
- */
-
-file_private struct magic_map *
-apprentice_map(struct magic_set *ms, const char *fn)
-{
- int fd;
- struct stat st;
- char *dbname = NULL;
- struct magic_map *map;
- struct magic_map *rv = NULL;
-
- fd = -1;
- if ((map = CAST(struct magic_map *, calloc(1, sizeof(*map)))) == NULL) {
- file_oomem(ms, sizeof(*map));
- goto error;
- }
- map->type = MAP_TYPE_USER; /* unspecified */
-
- if (strncmp(fn, "res@", 4) == 0) {
- map->type = MAP_TYPE_MALLOC;
- _magic_read_res(fn + 4, &map->p, &map->len);
- if (map->p == NULL) {
- file_error(ms, 0, "cannot read_res %s", fn);
- goto error;
- }
- if (check_buffer(ms, map, fn) != 0) {
- goto error;
- }
- return map;
- }
-
- dbname = mkdbname(ms, fn, 0);
- if (dbname == NULL)
- goto error;
-
- if ((fd = open(dbname, O_RDONLY|O_BINARY)) == -1)
- goto error;
-
- if (fstat(fd, &st) == -1) {
- file_error(ms, errno, "cannot stat `%s'", dbname);
- goto error;
- }
- if (st.st_size < 8 || st.st_size > maxoff_t()) {
- file_error(ms, 0, "file `%s' is too %s", dbname,
- st.st_size < 8 ? "small" : "large");
- goto error;
- }
-
- map->len = CAST(size_t, st.st_size);
-#ifdef QUICK
- map->type = MAP_TYPE_MMAP;
- if ((map->p = mmap(0, CAST(size_t, st.st_size), PROT_READ|PROT_WRITE,
- MAP_PRIVATE|MAP_FILE, fd, CAST(off_t, 0))) == MAP_FAILED) {
- file_error(ms, errno, "cannot map `%s'", dbname);
- goto error;
- }
-#else
- map->type = MAP_TYPE_MALLOC;
- if ((map->p = CAST(void *, malloc(map->len))) == NULL) {
- file_oomem(ms, map->len);
- goto error;
- }
- if (read(fd, map->p, map->len) != (ssize_t)map->len) {
- file_badread(ms);
- goto error;
- }
-#endif
- (void)close(fd);
- fd = -1;
-
- if (check_buffer(ms, map, dbname) != 0) {
- goto error;
- }
-#ifdef QUICK
- if (mprotect(map->p, CAST(size_t, st.st_size), PROT_READ) == -1) {
- file_error(ms, errno, "cannot mprotect `%s'", dbname);
- goto error;
- }
-#endif
-
- free(dbname);
- return map;
-
-error:
- if (fd != -1)
- (void)close(fd);
- apprentice_unmap(map);
- free(dbname);
- return rv;
-}
-
-file_private int
-check_buffer(struct magic_set *ms, struct magic_map *map, const char *dbname)
-{
- uint32_t *ptr;
- uint32_t entries, nentries;
- uint32_t version;
- int i, needsbyteswap;
-
- ptr = CAST(uint32_t *, map->p);
- if (*ptr != MAGICNO) {
- if (swap4(*ptr) != MAGICNO) {
- file_error(ms, 0, "bad magic in `%s'", dbname);
- return -1;
- }
- needsbyteswap = 1;
- } else
- needsbyteswap = 0;
- if (needsbyteswap)
- version = swap4(ptr[1]);
- else
- version = ptr[1];
- if (version != VERSIONNO) {
- file_error(ms, 0, "File %s supports only version %d magic "
- "files. `%s' is version %d", VERSION,
- VERSIONNO, dbname, version);
- return -1;
- }
- entries = CAST(uint32_t, map->len / sizeof(struct magic));
- if ((entries * sizeof(struct magic)) != map->len) {
- file_error(ms, 0, "Size of `%s' %" SIZE_T_FORMAT "u is not "
- "a multiple of %" SIZE_T_FORMAT "u",
- dbname, map->len, sizeof(struct magic));
- return -1;
- }
- map->magic[0] = CAST(struct magic *, map->p) + 1;
- nentries = 0;
- for (i = 0; i < MAGIC_SETS; i++) {
- if (needsbyteswap)
- map->nmagic[i] = swap4(ptr[i + 2]);
- else
- map->nmagic[i] = ptr[i + 2];
- if (i != MAGIC_SETS - 1)
- map->magic[i + 1] = map->magic[i] + map->nmagic[i];
- nentries += map->nmagic[i];
- }
- if (entries != nentries + 1) {
- file_error(ms, 0, "Inconsistent entries in `%s' %u != %u",
- dbname, entries, nentries + 1);
- return -1;
- }
- if (needsbyteswap)
- for (i = 0; i < MAGIC_SETS; i++)
- byteswap(map->magic[i], map->nmagic[i]);
- return 0;
-}
-
-/*
- * handle an mmaped file.
- */
-file_private int
-apprentice_compile(struct magic_set *ms, struct magic_map *map, const char *fn)
-{
- static const size_t nm = sizeof(*map->nmagic) * MAGIC_SETS;
- static const size_t m = sizeof(**map->magic);
- int fd = -1;
- size_t len;
- char *dbname;
- int rv = -1;
- uint32_t i;
- union {
- struct magic m;
- uint32_t h[2 + MAGIC_SETS];
- } hdr;
-
- dbname = mkdbname(ms, fn, 1);
-
- if (dbname == NULL)
- goto out;
-
- if ((fd = open(dbname, O_WRONLY|O_CREAT|O_TRUNC|O_BINARY, 0644)) == -1)
- {
- file_error(ms, errno, "cannot open `%s'", dbname);
- goto out;
- }
- memset(&hdr, 0, sizeof(hdr));
- hdr.h[0] = MAGICNO;
- hdr.h[1] = VERSIONNO;
- memcpy(hdr.h + 2, map->nmagic, nm);
-
- if (write(fd, &hdr, sizeof(hdr)) != CAST(ssize_t, sizeof(hdr))) {
- file_error(ms, errno, "error writing `%s'", dbname);
- goto out2;
- }
-
- for (i = 0; i < MAGIC_SETS; i++) {
- len = m * map->nmagic[i];
- if (write(fd, map->magic[i], len) != CAST(ssize_t, len)) {
- file_error(ms, errno, "error writing `%s'", dbname);
- goto out2;
- }
- }
-
- rv = 0;
-out2:
- if (fd != -1)
- (void)close(fd);
-out:
- apprentice_unmap(map);
- free(dbname);
- return rv;
-}
-
-file_private const char ext[] = ".mgc";
-/*
- * make a dbname
- */
-file_private char *
-mkdbname(struct magic_set *ms, const char *fn, int strip)
-{
- const char *p, *q;
- char *buf;
-
- if (strip) {
- if ((p = strrchr(fn, '/')) != NULL)
- fn = ++p;
- }
-
- for (q = fn; *q; q++)
- continue;
- /* Look for .mgc */
- for (p = ext + sizeof(ext) - 1; p >= ext && q >= fn; p--, q--)
- if (*p != *q)
- break;
-
- /* Did not find .mgc, restore q */
- if (p >= ext)
- while (*q)
- q++;
-
- q++;
- /* Compatibility with old code that looked in .mime */
- if (ms->flags & MAGIC_MIME) {
- if (asprintf(&buf, "%.*s.mime%s", CAST(int, q - fn), fn, ext)
- < 0)
- return NULL;
- if (access(buf, R_OK) != -1) {
- ms->flags &= MAGIC_MIME_TYPE;
- return buf;
- }
- free(buf);
- }
- if (asprintf(&buf, "%.*s%s", CAST(int, q - fn), fn, ext) < 0)
- return NULL;
-
- /* Compatibility with old code that looked in .mime */
- if (strstr(fn, ".mime") != NULL)
- ms->flags &= MAGIC_MIME_TYPE;
- return buf;
-}
-
-/*
- * Byteswap an mmap'ed file if needed
- */
-file_private void
-byteswap(struct magic *magic, uint32_t nmagic)
-{
- uint32_t i;
- for (i = 0; i < nmagic; i++)
- bs1(&magic[i]);
-}
-
-#if !defined(HAVE_BYTESWAP_H) && !defined(HAVE_SYS_BSWAP_H)
-/*
- * swap a short
- */
-file_private uint16_t
-swap2(uint16_t sv)
-{
- uint16_t rv;
- uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
- uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
- d[0] = s[1];
- d[1] = s[0];
- return rv;
-}
-
-/*
- * swap an int
- */
-file_private uint32_t
-swap4(uint32_t sv)
-{
- uint32_t rv;
- uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
- uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
- d[0] = s[3];
- d[1] = s[2];
- d[2] = s[1];
- d[3] = s[0];
- return rv;
-}
-
-/*
- * swap a quad
- */
-file_private uint64_t
-swap8(uint64_t sv)
-{
- uint64_t rv;
- uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
- uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
-# if 0
- d[0] = s[3];
- d[1] = s[2];
- d[2] = s[1];
- d[3] = s[0];
- d[4] = s[7];
- d[5] = s[6];
- d[6] = s[5];
- d[7] = s[4];
-# else
- d[0] = s[7];
- d[1] = s[6];
- d[2] = s[5];
- d[3] = s[4];
- d[4] = s[3];
- d[5] = s[2];
- d[6] = s[1];
- d[7] = s[0];
-# endif
- return rv;
-}
-#endif
-
-file_protected uintmax_t
-file_varint2uintmax_t(const unsigned char *us, int t, size_t *l)
-{
- uintmax_t x = 0;
- const unsigned char *c;
- if (t == FILE_LEVARINT) {
- for (c = us; *c; c++) {
- if ((*c & 0x80) == 0)
- break;
- }
- if (l)
- *l = c - us + 1;
- for (; c >= us; c--) {
- x |= *c & 0x7f;
- x <<= 7;
- }
- } else {
- for (c = us; *c; c++) {
- x |= *c & 0x7f;
- if ((*c & 0x80) == 0)
- break;
- x <<= 7;
- }
- if (l)
- *l = c - us + 1;
- }
- return x;
-}
-
-
-/*
- * byteswap a single magic entry
- */
-file_private void
-bs1(struct magic *m)
-{
- m->cont_level = swap2(m->cont_level);
- m->offset = swap4(CAST(uint32_t, m->offset));
- m->in_offset = swap4(CAST(uint32_t, m->in_offset));
- m->lineno = swap4(CAST(uint32_t, m->lineno));
- if (IS_STRING(m->type)) {
- m->str_range = swap4(m->str_range);
- m->str_flags = swap4(m->str_flags);
- }
- else {
- m->value.q = swap8(m->value.q);
- m->num_mask = swap8(m->num_mask);
- }
-}
-
-file_protected size_t
-file_pstring_length_size(struct magic_set *ms, const struct magic *m)
-{
- switch (m->str_flags & PSTRING_LEN) {
- case PSTRING_1_LE:
- return 1;
- case PSTRING_2_LE:
- case PSTRING_2_BE:
- return 2;
- case PSTRING_4_LE:
- case PSTRING_4_BE:
- return 4;
- default:
- file_error(ms, 0, "corrupt magic file "
- "(bad pascal string length %d)",
- m->str_flags & PSTRING_LEN);
- return FILE_BADSIZE;
- }
-}
-file_protected size_t
-file_pstring_get_length(struct magic_set *ms, const struct magic *m,
- const char *ss)
-{
- size_t len = 0;
- const unsigned char *s = RCAST(const unsigned char *, ss);
- unsigned int s3, s2, s1, s0;
-
- switch (m->str_flags & PSTRING_LEN) {
- case PSTRING_1_LE:
- len = *s;
- break;
- case PSTRING_2_LE:
- s0 = s[0];
- s1 = s[1];
- len = (s1 << 8) | s0;
- break;
- case PSTRING_2_BE:
- s0 = s[0];
- s1 = s[1];
- len = (s0 << 8) | s1;
- break;
- case PSTRING_4_LE:
- s0 = s[0];
- s1 = s[1];
- s2 = s[2];
- s3 = s[3];
- len = (s3 << 24) | (s2 << 16) | (s1 << 8) | s0;
- break;
- case PSTRING_4_BE:
- s0 = s[0];
- s1 = s[1];
- s2 = s[2];
- s3 = s[3];
- len = (s0 << 24) | (s1 << 16) | (s2 << 8) | s3;
- break;
- default:
- file_error(ms, 0, "corrupt magic file "
- "(bad pascal string length %d)",
- m->str_flags & PSTRING_LEN);
- return FILE_BADSIZE;
- }
-
- if (m->str_flags & PSTRING_LENGTH_INCLUDES_ITSELF) {
- size_t l = file_pstring_length_size(ms, m);
- if (l == FILE_BADSIZE)
- return l;
- len -= l;
- }
-
- return len;
-}
-
-file_protected int
-file_magicfind(struct magic_set *ms, const char *name, struct mlist *v)
-{
- uint32_t i, j;
- struct mlist *mlist, *ml;
-
- mlist = ms->mlist[1];
-
- for (ml = mlist->next; ml != mlist; ml = ml->next) {
- struct magic *ma = ml->magic;
- for (i = 0; i < ml->nmagic; i++) {
- if (ma[i].type != FILE_NAME)
- continue;
- if (strcmp(ma[i].value.s, name) == 0) {
- v->magic = &ma[i];
- v->magic_rxcomp = &(ml->magic_rxcomp[i]);
- for (j = i + 1; j < ml->nmagic; j++)
- if (ma[j].cont_level == 0)
- break;
- v->nmagic = j - i;
- return 0;
- }
- }
- }
- return -1;
-}
diff --git a/contrib/libs/libmagic/src/apptype.c b/contrib/libs/libmagic/src/apptype.c
deleted file mode 100644
index 9473627567..0000000000
--- a/contrib/libs/libmagic/src/apptype.c
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * Adapted from: apptype.c, Written by Eberhard Mattes and put into the
- * file_public domain
- *
- * Notes: 1. Qualify the filename so that DosQueryAppType does not do extraneous
- * searches.
- *
- * 2. DosQueryAppType will return FAPPTYP_DOS on a file ending with ".com"
- * (other than an OS/2 exe or Win exe with this name). Eberhard Mattes
- * remarks Tue, 6 Apr 93: Moreover, it reports the type of the (new and very
- * bug ridden) Win Emacs as "OS/2 executable".
- *
- * 3. apptype() uses the filename if given, otherwise a tmp file is created with
- * the contents of buf. If buf is not the complete file, apptype can
- * incorrectly identify the exe type. The "-z" option of "file" is the reason
- * for this ugly code.
- */
-
-/*
- * amai: Darrel Hankerson did the changes described here.
- *
- * It remains to check the validity of comments (2.) since it's referred to an
- * "old" OS/2 version.
- *
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: apptype.c,v 1.17 2022/12/26 17:31:14 christos Exp $")
-#endif /* lint */
-
-#include <stdlib.h>
-#include <string.h>
-
-#ifdef __EMX__
-#include <io.h>
-#define INCL_DOSSESMGR
-#define INCL_DOSERRORS
-#define INCL_DOSFILEMGR
-#include <os2.h>
-typedef ULONG APPTYPE;
-
-file_protected int
-file_os2_apptype(struct magic_set *ms, const char *fn, const void *buf,
- size_t nb)
-{
- APPTYPE rc, type;
- char path[_MAX_PATH], drive[_MAX_DRIVE], dir[_MAX_DIR],
- fname[_MAX_FNAME], ext[_MAX_EXT];
- char *filename;
- FILE *fp;
-
- if (fn)
- filename = strdup(fn);
- else if ((filename = tempnam("./", "tmp")) == NULL) {
- file_error(ms, errno, "cannot create tempnam");
- return -1;
- }
- /* qualify the filename to prevent extraneous searches */
- _splitpath(filename, drive, dir, fname, ext);
- (void)sprintf(path, "%s%s%s%s", drive,
- (*dir == '\0') ? "./" : dir,
- fname,
- (*ext == '\0') ? "." : ext);
-
- if (fn == NULL) {
- if ((fp = fopen(path, "wb")) == NULL) {
- file_error(ms, errno, "cannot open tmp file `%s'", path);
- return -1;
- }
- if (fwrite(buf, 1, nb, fp) != nb) {
- file_error(ms, errno, "cannot write tmp file `%s'",
- path);
- (void)fclose(fp);
- return -1;
- }
- (void)fclose(fp);
- }
- rc = DosQueryAppType((unsigned char *)path, &type);
-
- if (fn == NULL) {
- unlink(path);
- free(filename);
- }
-#if 0
- if (rc == ERROR_INVALID_EXE_SIGNATURE)
- printf("%s: not an executable file\n", fname);
- else if (rc == ERROR_FILE_NOT_FOUND)
- printf("%s: not found\n", fname);
- else if (rc == ERROR_ACCESS_DENIED)
- printf("%s: access denied\n", fname);
- else if (rc != 0)
- printf("%s: error code = %lu\n", fname, rc);
- else
-#else
-
- /*
- * for our purpose here it's sufficient to just ignore the error and
- * return w/o success (=0)
- */
-
- if (rc)
- return (0);
-
-#endif
-
- if (type & FAPPTYP_32BIT)
- if (file_printf(ms, "32-bit ") == -1)
- return -1;
- if (type & FAPPTYP_PHYSDRV) {
- if (file_printf(ms, "physical device driver") == -1)
- return -1;
- } else if (type & FAPPTYP_VIRTDRV) {
- if (file_printf(ms, "virtual device driver") == -1)
- return -1;
- } else if (type & FAPPTYP_DLL) {
- if (type & FAPPTYP_PROTDLL)
- if (file_printf(ms, "file_protected ") == -1)
- return -1;
- if (file_printf(ms, "DLL") == -1)
- return -1;
- } else if (type & (FAPPTYP_WINDOWSREAL | FAPPTYP_WINDOWSPROT)) {
- if (file_printf(ms, "Windows executable") == -1)
- return -1;
- } else if (type & FAPPTYP_DOS) {
- /*
- * The API routine is partially broken on filenames ending
- * ".com".
- */
- if (stricmp(ext, ".com") == 0)
- if (strncmp((const char *)buf, "MZ", 2))
- return (0);
- if (file_printf(ms, "DOS executable") == -1)
- return -1;
- /* ---------------------------------------- */
- /* Might learn more from the magic(4) entry */
- if (file_printf(ms, ", magic(4)-> ") == -1)
- return -1;
- return (0);
- /* ---------------------------------------- */
- } else if (type & FAPPTYP_BOUND) {
- if (file_printf(ms, "bound executable") == -1)
- return -1;
- } else if ((type & 7) == FAPPTYP_WINDOWAPI) {
- if (file_printf(ms, "PM executable") == -1)
- return -1;
- } else if (file_printf(ms, "OS/2 executable") == -1)
- return -1;
-
- switch (type & (FAPPTYP_NOTWINDOWCOMPAT |
- FAPPTYP_WINDOWCOMPAT |
- FAPPTYP_WINDOWAPI)) {
- case FAPPTYP_NOTWINDOWCOMPAT:
- if (file_printf(ms, " [NOTWINDOWCOMPAT]") == -1)
- return -1;
- break;
- case FAPPTYP_WINDOWCOMPAT:
- if (file_printf(ms, " [WINDOWCOMPAT]") == -1)
- return -1;
- break;
- case FAPPTYP_WINDOWAPI:
- if (file_printf(ms, " [WINDOWAPI]") == -1)
- return -1;
- break;
- }
- return 1;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/ascmagic.c b/contrib/libs/libmagic/src/ascmagic.c
deleted file mode 100644
index 2d61267923..0000000000
--- a/contrib/libs/libmagic/src/ascmagic.c
+++ /dev/null
@@ -1,389 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * ASCII magic -- try to detect text encoding.
- *
- * Extensively modified by Eric Fischer <enf@pobox.com> in July, 2000,
- * to handle character codes other than ASCII on a unified basis.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: ascmagic.c,v 1.116 2023/05/21 16:08:50 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <string.h>
-#include <ctype.h>
-#include <stdlib.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-
-#define MAXLINELEN 300 /* longest sane line length */
-#define ISSPC(x) ((x) == ' ' || (x) == '\t' || (x) == '\r' || (x) == '\n' \
- || (x) == 0x85 || (x) == '\f')
-
-file_private unsigned char *encode_utf8(unsigned char *, size_t, file_unichar_t *,
- size_t);
-file_private size_t trim_nuls(const unsigned char *, size_t);
-
-/*
- * Undo the NUL-termination kindly provided by process()
- * but leave at least one byte to look at
- */
-file_private size_t
-trim_nuls(const unsigned char *buf, size_t nbytes)
-{
- while (nbytes > 1 && buf[nbytes - 1] == '\0')
- nbytes--;
-
- return nbytes;
-}
-
-file_protected int
-file_ascmagic(struct magic_set *ms, const struct buffer *b, int text)
-{
- file_unichar_t *ubuf = NULL;
- size_t ulen = 0;
- int rv = 1;
- struct buffer bb;
-
- const char *code = NULL;
- const char *code_mime = NULL;
- const char *type = NULL;
-
- bb = *b;
- bb.flen = trim_nuls(CAST(const unsigned char *, b->fbuf), b->flen);
- /*
- * Avoid trimming at an odd byte if the original buffer was evenly
- * sized; this avoids losing the last character on UTF-16 LE text
- */
- if ((bb.flen & 1) && !(b->flen & 1))
- bb.flen++;
-
- /* If file doesn't look like any sort of text, give up. */
- if (file_encoding(ms, &bb, &ubuf, &ulen, &code, &code_mime,
- &type) == 0)
- rv = 0;
- else
- rv = file_ascmagic_with_encoding(ms, &bb,
- ubuf, ulen, code, type, text);
-
- free(ubuf);
-
- return rv;
-}
-
-file_protected int
-file_ascmagic_with_encoding(struct magic_set *ms, const struct buffer *b,
- file_unichar_t *ubuf, size_t ulen, const char *code, const char *type,
- int text)
-{
- struct buffer bb;
- const unsigned char *buf = CAST(const unsigned char *, b->fbuf);
- size_t nbytes = b->flen;
- unsigned char *utf8_buf = NULL, *utf8_end;
- size_t mlen, i, len;
- int rv = -1;
- int mime = ms->flags & MAGIC_MIME;
- int need_separator = 0;
-
- const char *subtype = NULL;
-
- int has_escapes = 0;
- int has_backspace = 0;
- int seen_cr = 0;
-
- size_t n_crlf = 0;
- size_t n_lf = 0;
- size_t n_cr = 0;
- size_t n_nel = 0;
- int executable = 0;
-
- size_t last_line_end = CAST(size_t, -1);
- size_t has_long_lines = 0;
-
- nbytes = trim_nuls(buf, nbytes);
-
- /* If we have fewer than 2 bytes, give up. */
- if (nbytes <= 1) {
- rv = 0;
- goto done;
- }
-
- if (ulen > 0 && (ms->flags & MAGIC_NO_CHECK_SOFT) == 0) {
- /* Convert ubuf to UTF-8 and try text soft magic */
- /* malloc size is a conservative overestimate; could be
- improved, or at least realloced after conversion. */
- mlen = ulen * 6;
- if ((utf8_buf = CAST(unsigned char *, malloc(mlen))) == NULL) {
- file_oomem(ms, mlen);
- goto done;
- }
- if ((utf8_end = encode_utf8(utf8_buf, mlen, ubuf, ulen))
- == NULL) {
- rv = 0;
- goto done;
- }
- buffer_init(&bb, b->fd, &b->st, utf8_buf,
- CAST(size_t, utf8_end - utf8_buf));
-
- if ((rv = file_softmagic(ms, &bb, NULL, NULL,
- TEXTTEST, text)) == 0)
- rv = -1;
- else
- need_separator = 1;
- buffer_fini(&bb);
- if ((ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION))) {
- rv = rv == -1 ? 0 : 1;
- goto done;
- }
- }
-
- if ((ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION))) {
- rv = 0;
- goto done;
- }
-
- /* Now try to discover other details about the file. */
- for (i = 0; i < ulen; i++) {
- if (ubuf[i] == '\n') {
- if (seen_cr)
- n_crlf++;
- else
- n_lf++;
- last_line_end = i;
- } else if (seen_cr)
- n_cr++;
-
- seen_cr = (ubuf[i] == '\r');
- if (seen_cr)
- last_line_end = i;
-
- if (ubuf[i] == 0x85) { /* X3.64/ECMA-43 "next line" character */
- n_nel++;
- last_line_end = i;
- }
-
- /* If this line is _longer_ than MAXLINELEN, remember it. */
- if (i > last_line_end + MAXLINELEN) {
- size_t ll = i - last_line_end;
- if (ll > has_long_lines)
- has_long_lines = ll;
- }
-
- if (ubuf[i] == '\033')
- has_escapes = 1;
- if (ubuf[i] == '\b')
- has_backspace = 1;
- }
-
- if (strcmp(type, "binary") == 0) {
- rv = 0;
- goto done;
- }
- len = file_printedlen(ms);
- if (mime) {
- if ((mime & MAGIC_MIME_TYPE) != 0) {
- if (len) {
- /*
- * Softmagic printed something, we
- * are either done, or we need a separator
- */
- if ((ms->flags & MAGIC_CONTINUE) == 0) {
- rv = 1;
- goto done;
- }
- if (need_separator && file_separator(ms) == -1)
- goto done;
- }
- if (file_printf(ms, "text/plain") == -1)
- goto done;
- }
- } else {
- if (len) {
- switch (file_replace(ms, " text$", ", ")) {
- case 0:
- switch (file_replace(ms, " text executable$",
- ", ")) {
- case 0:
- if (file_printf(ms, ", ") == -1)
- goto done;
- break;
- case -1:
- goto done;
- default:
- executable = 1;
- break;
- }
- break;
- case -1:
- goto done;
- default:
- break;
- }
- }
-
- if (file_printf(ms, "%s", code) == -1)
- goto done;
-
- if (subtype) {
- if (file_printf(ms, " %s", subtype) == -1)
- goto done;
- }
-
- if (file_printf(ms, " %s", type) == -1)
- goto done;
-
- if (executable)
- if (file_printf(ms, " executable") == -1)
- goto done;
-
- if (has_long_lines)
- if (file_printf(ms, ", with very long lines (%"
- SIZE_T_FORMAT "u)", has_long_lines) == -1)
- goto done;
-
- /*
- * Only report line terminators if we find one other than LF,
- * or if we find none at all.
- */
- if ((n_crlf == 0 && n_cr == 0 && n_nel == 0 && n_lf == 0) ||
- (n_crlf != 0 || n_cr != 0 || n_nel != 0)) {
- if (file_printf(ms, ", with") == -1)
- goto done;
-
- if (n_crlf == 0 && n_cr == 0 &&
- n_nel == 0 && n_lf == 0) {
- if (file_printf(ms, " no") == -1)
- goto done;
- } else {
- if (n_crlf) {
- if (file_printf(ms, " CRLF") == -1)
- goto done;
- if (n_cr || n_lf || n_nel)
- if (file_printf(ms, ",") == -1)
- goto done;
- }
- if (n_cr) {
- if (file_printf(ms, " CR") == -1)
- goto done;
- if (n_lf || n_nel)
- if (file_printf(ms, ",") == -1)
- goto done;
- }
- if (n_lf) {
- if (file_printf(ms, " LF") == -1)
- goto done;
- if (n_nel)
- if (file_printf(ms, ",") == -1)
- goto done;
- }
- if (n_nel)
- if (file_printf(ms, " NEL") == -1)
- goto done;
- }
-
- if (file_printf(ms, " line terminators") == -1)
- goto done;
- }
-
- if (has_escapes)
- if (file_printf(ms, ", with escape sequences") == -1)
- goto done;
- if (has_backspace)
- if (file_printf(ms, ", with overstriking") == -1)
- goto done;
- }
- rv = 1;
-done:
- free(utf8_buf);
-
- return rv;
-}
-
-/*
- * Encode Unicode string as UTF-8, returning pointer to character
- * after end of string, or NULL if an invalid character is found.
- */
-file_private unsigned char *
-encode_utf8(unsigned char *buf, size_t len, file_unichar_t *ubuf, size_t ulen)
-{
- size_t i;
- unsigned char *end = buf + len;
-
- for (i = 0; i < ulen; i++) {
- if (ubuf[i] <= 0x7f) {
- if (end - buf < 1)
- return NULL;
- *buf++ = CAST(unsigned char, ubuf[i]);
- continue;
- }
- if (ubuf[i] <= 0x7ff) {
- if (end - buf < 2)
- return NULL;
- *buf++ = CAST(unsigned char, (ubuf[i] >> 6) + 0xc0);
- goto out1;
- }
- if (ubuf[i] <= 0xffff) {
- if (end - buf < 3)
- return NULL;
- *buf++ = CAST(unsigned char, (ubuf[i] >> 12) + 0xe0);
- goto out2;
- }
- if (ubuf[i] <= 0x1fffff) {
- if (end - buf < 4)
- return NULL;
- *buf++ = CAST(unsigned char, (ubuf[i] >> 18) + 0xf0);
- goto out3;
- }
- if (ubuf[i] <= 0x3ffffff) {
- if (end - buf < 5)
- return NULL;
- *buf++ = CAST(unsigned char, (ubuf[i] >> 24) + 0xf8);
- goto out4;
- }
- if (ubuf[i] <= 0x7fffffff) {
- if (end - buf < 6)
- return NULL;
- *buf++ = CAST(unsigned char, (ubuf[i] >> 30) + 0xfc);
- goto out5;
- }
- /* Invalid character */
- return NULL;
- out5: *buf++ = CAST(unsigned char, ((ubuf[i] >> 24) & 0x3f) + 0x80);
- out4: *buf++ = CAST(unsigned char, ((ubuf[i] >> 18) & 0x3f) + 0x80);
- out3: *buf++ = CAST(unsigned char, ((ubuf[i] >> 12) & 0x3f) + 0x80);
- out2: *buf++ = CAST(unsigned char, ((ubuf[i] >> 6) & 0x3f) + 0x80);
- out1: *buf++ = CAST(unsigned char, ((ubuf[i] >> 0) & 0x3f) + 0x80);
- }
-
- return buf;
-}
diff --git a/contrib/libs/libmagic/src/buffer.c b/contrib/libs/libmagic/src/buffer.c
deleted file mode 100644
index 598db1471d..0000000000
--- a/contrib/libs/libmagic/src/buffer.c
+++ /dev/null
@@ -1,95 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2017.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: buffer.c,v 1.13 2023/07/02 12:48:39 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <unistd.h>
-#include <string.h>
-#include <stdlib.h>
-#include <sys/stat.h>
-
-void
-buffer_init(struct buffer *b, int fd, const struct stat *st, const void *data,
- size_t len)
-{
- b->fd = fd;
- if (st)
- memcpy(&b->st, st, sizeof(b->st));
- else if (b->fd == -1 || fstat(b->fd, &b->st) == -1)
- memset(&b->st, 0, sizeof(b->st));
- b->fbuf = data;
- b->flen = len;
- b->eoff = 0;
- b->ebuf = NULL;
- b->elen = 0;
-}
-
-void
-buffer_fini(struct buffer *b)
-{
- free(b->ebuf);
- b->ebuf = NULL;
- b->elen = 0;
-}
-
-int
-buffer_fill(const struct buffer *bb)
-{
- struct buffer *b = CCAST(struct buffer *, bb);
-
- if (b->elen != 0)
- return b->elen == FILE_BADSIZE ? -1 : 0;
-
- if (!S_ISREG(b->st.st_mode))
- goto out;
-
- b->elen = CAST(size_t, b->st.st_size) < b->flen ?
- CAST(size_t, b->st.st_size) : b->flen;
- if (b->elen == 0) {
- free(b->ebuf);
- b->ebuf = NULL;
- return 0;
- }
- if ((b->ebuf = malloc(b->elen)) == NULL)
- goto out;
-
- b->eoff = b->st.st_size - b->elen;
- if (pread(b->fd, b->ebuf, b->elen, b->eoff) == -1) {
- free(b->ebuf);
- b->ebuf = NULL;
- goto out;
- }
-
- return 0;
-out:
- b->elen = FILE_BADSIZE;
- return -1;
-}
diff --git a/contrib/libs/libmagic/src/cdf.c b/contrib/libs/libmagic/src/cdf.c
deleted file mode 100644
index fb3cf7cb59..0000000000
--- a/contrib/libs/libmagic/src/cdf.c
+++ /dev/null
@@ -1,1676 +0,0 @@
-/*-
- * Copyright (c) 2008 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/*
- * Parse Composite Document Files, the format used in Microsoft Office
- * document files before they switched to zipped XML.
- * Info from: http://sc.openoffice.org/compdocfileformat.pdf
- *
- * N.B. This is the "Composite Document File" format, and not the
- * "Compound Document Format", nor the "Channel Definition Format".
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: cdf.c,v 1.123 2022/09/24 20:30:13 christos Exp $")
-#endif
-
-#include <assert.h>
-#ifdef CDF_DEBUG
-#include <err.h>
-#endif
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <time.h>
-#include <ctype.h>
-#include <limits.h>
-#ifdef HAVE_BYTESWAP_H
-#include <byteswap.h>
-#endif
-#ifdef HAVE_SYS_BSWAP_H
-#error #include <sys/bswap.h>
-#endif
-
-#ifndef EFTYPE
-#define EFTYPE EINVAL
-#endif
-
-#ifndef SIZE_T_MAX
-#define SIZE_T_MAX CAST(size_t, ~0ULL)
-#endif
-
-#include "cdf.h"
-
-#ifdef CDF_DEBUG
-#define DPRINTF(a) printf a, fflush(stdout)
-#else
-#define DPRINTF(a)
-#endif
-
-static union {
- char s[4];
- uint32_t u;
-} cdf_bo;
-
-#define NEED_SWAP (cdf_bo.u == CAST(uint32_t, 0x01020304))
-
-#define CDF_TOLE8(x) \
- (CAST(uint64_t, NEED_SWAP ? _cdf_tole8(x) : CAST(uint64_t, x)))
-#define CDF_TOLE4(x) \
- (CAST(uint32_t, NEED_SWAP ? _cdf_tole4(x) : CAST(uint32_t, x)))
-#define CDF_TOLE2(x) \
- (CAST(uint16_t, NEED_SWAP ? _cdf_tole2(x) : CAST(uint16_t, x)))
-#define CDF_TOLE(x) (/*CONSTCOND*/sizeof(x) == 2 ? \
- CDF_TOLE2(CAST(uint16_t, x)) : \
- (/*CONSTCOND*/sizeof(x) == 4 ? \
- CDF_TOLE4(CAST(uint32_t, x)) : \
- CDF_TOLE8(CAST(uint64_t, x))))
-#define CDF_GETUINT32(x, y) cdf_getuint32(x, y)
-
-#define CDF_MALLOC(n) cdf_malloc(__FILE__, __LINE__, (n))
-#define CDF_REALLOC(p, n) cdf_realloc(__FILE__, __LINE__, (p), (n))
-#define CDF_CALLOC(n, u) cdf_calloc(__FILE__, __LINE__, (n), (u))
-
-
-/*ARGSUSED*/
-static void *
-cdf_malloc(const char *file __attribute__((__unused__)),
- size_t line __attribute__((__unused__)), size_t n)
-{
- DPRINTF(("%s,%" SIZE_T_FORMAT "u: %s %" SIZE_T_FORMAT "u\n",
- file, line, __func__, n));
- if (n == 0)
- n++;
- return malloc(n);
-}
-
-/*ARGSUSED*/
-static void *
-cdf_realloc(const char *file __attribute__((__unused__)),
- size_t line __attribute__((__unused__)), void *p, size_t n)
-{
- DPRINTF(("%s,%" SIZE_T_FORMAT "u: %s %" SIZE_T_FORMAT "u\n",
- file, line, __func__, n));
- return realloc(p, n);
-}
-
-/*ARGSUSED*/
-static void *
-cdf_calloc(const char *file __attribute__((__unused__)),
- size_t line __attribute__((__unused__)), size_t n, size_t u)
-{
- DPRINTF(("%s,%" SIZE_T_FORMAT "u: %s %" SIZE_T_FORMAT "u %"
- SIZE_T_FORMAT "u\n", file, line, __func__, n, u));
- if (n == 0)
- n++;
- return calloc(n, u);
-}
-
-#if defined(HAVE_BYTESWAP_H)
-# define _cdf_tole2(x) bswap_16(x)
-# define _cdf_tole4(x) bswap_32(x)
-# define _cdf_tole8(x) bswap_64(x)
-#elif defined(HAVE_SYS_BSWAP_H)
-# define _cdf_tole2(x) bswap16(x)
-# define _cdf_tole4(x) bswap32(x)
-# define _cdf_tole8(x) bswap64(x)
-#else
-/*
- * swap a short
- */
-static uint16_t
-_cdf_tole2(uint16_t sv)
-{
- uint16_t rv;
- uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
- uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
- d[0] = s[1];
- d[1] = s[0];
- return rv;
-}
-
-/*
- * swap an int
- */
-static uint32_t
-_cdf_tole4(uint32_t sv)
-{
- uint32_t rv;
- uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
- uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
- d[0] = s[3];
- d[1] = s[2];
- d[2] = s[1];
- d[3] = s[0];
- return rv;
-}
-
-/*
- * swap a quad
- */
-static uint64_t
-_cdf_tole8(uint64_t sv)
-{
- uint64_t rv;
- uint8_t *s = RCAST(uint8_t *, RCAST(void *, &sv));
- uint8_t *d = RCAST(uint8_t *, RCAST(void *, &rv));
- d[0] = s[7];
- d[1] = s[6];
- d[2] = s[5];
- d[3] = s[4];
- d[4] = s[3];
- d[5] = s[2];
- d[6] = s[1];
- d[7] = s[0];
- return rv;
-}
-#endif
-
-/*
- * grab a uint32_t from a possibly unaligned address, and return it in
- * the native host order.
- */
-static uint32_t
-cdf_getuint32(const uint8_t *p, size_t offs)
-{
- uint32_t rv;
- (void)memcpy(&rv, p + offs * sizeof(uint32_t), sizeof(rv));
- return CDF_TOLE4(rv);
-}
-
-#define CDF_UNPACK(a) \
- (void)memcpy(&(a), &buf[len], sizeof(a)), len += sizeof(a)
-#define CDF_UNPACKA(a) \
- (void)memcpy((a), &buf[len], sizeof(a)), len += sizeof(a)
-
-uint16_t
-cdf_tole2(uint16_t sv)
-{
- return CDF_TOLE2(sv);
-}
-
-uint32_t
-cdf_tole4(uint32_t sv)
-{
- return CDF_TOLE4(sv);
-}
-
-uint64_t
-cdf_tole8(uint64_t sv)
-{
- return CDF_TOLE8(sv);
-}
-
-void
-cdf_swap_header(cdf_header_t *h)
-{
- size_t i;
-
- h->h_magic = CDF_TOLE8(h->h_magic);
- h->h_uuid[0] = CDF_TOLE8(h->h_uuid[0]);
- h->h_uuid[1] = CDF_TOLE8(h->h_uuid[1]);
- h->h_revision = CDF_TOLE2(h->h_revision);
- h->h_version = CDF_TOLE2(h->h_version);
- h->h_byte_order = CDF_TOLE2(h->h_byte_order);
- h->h_sec_size_p2 = CDF_TOLE2(h->h_sec_size_p2);
- h->h_short_sec_size_p2 = CDF_TOLE2(h->h_short_sec_size_p2);
- h->h_num_sectors_in_sat = CDF_TOLE4(h->h_num_sectors_in_sat);
- h->h_secid_first_directory = CDF_TOLE4(h->h_secid_first_directory);
- h->h_min_size_standard_stream =
- CDF_TOLE4(h->h_min_size_standard_stream);
- h->h_secid_first_sector_in_short_sat =
- CDF_TOLE4(CAST(uint32_t, h->h_secid_first_sector_in_short_sat));
- h->h_num_sectors_in_short_sat =
- CDF_TOLE4(h->h_num_sectors_in_short_sat);
- h->h_secid_first_sector_in_master_sat =
- CDF_TOLE4(CAST(uint32_t, h->h_secid_first_sector_in_master_sat));
- h->h_num_sectors_in_master_sat =
- CDF_TOLE4(h->h_num_sectors_in_master_sat);
- for (i = 0; i < __arraycount(h->h_master_sat); i++) {
- h->h_master_sat[i] =
- CDF_TOLE4(CAST(uint32_t, h->h_master_sat[i]));
- }
-}
-
-void
-cdf_unpack_header(cdf_header_t *h, char *buf)
-{
- size_t i;
- size_t len = 0;
-
- CDF_UNPACK(h->h_magic);
- CDF_UNPACKA(h->h_uuid);
- CDF_UNPACK(h->h_revision);
- CDF_UNPACK(h->h_version);
- CDF_UNPACK(h->h_byte_order);
- CDF_UNPACK(h->h_sec_size_p2);
- CDF_UNPACK(h->h_short_sec_size_p2);
- CDF_UNPACKA(h->h_unused0);
- CDF_UNPACK(h->h_num_sectors_in_sat);
- CDF_UNPACK(h->h_secid_first_directory);
- CDF_UNPACKA(h->h_unused1);
- CDF_UNPACK(h->h_min_size_standard_stream);
- CDF_UNPACK(h->h_secid_first_sector_in_short_sat);
- CDF_UNPACK(h->h_num_sectors_in_short_sat);
- CDF_UNPACK(h->h_secid_first_sector_in_master_sat);
- CDF_UNPACK(h->h_num_sectors_in_master_sat);
- for (i = 0; i < __arraycount(h->h_master_sat); i++)
- CDF_UNPACK(h->h_master_sat[i]);
-}
-
-void
-cdf_swap_dir(cdf_directory_t *d)
-{
- d->d_namelen = CDF_TOLE2(d->d_namelen);
- d->d_left_child = CDF_TOLE4(CAST(uint32_t, d->d_left_child));
- d->d_right_child = CDF_TOLE4(CAST(uint32_t, d->d_right_child));
- d->d_storage = CDF_TOLE4(CAST(uint32_t, d->d_storage));
- d->d_storage_uuid[0] = CDF_TOLE8(d->d_storage_uuid[0]);
- d->d_storage_uuid[1] = CDF_TOLE8(d->d_storage_uuid[1]);
- d->d_flags = CDF_TOLE4(d->d_flags);
- d->d_created = CDF_TOLE8(CAST(uint64_t, d->d_created));
- d->d_modified = CDF_TOLE8(CAST(uint64_t, d->d_modified));
- d->d_stream_first_sector = CDF_TOLE4(
- CAST(uint32_t, d->d_stream_first_sector));
- d->d_size = CDF_TOLE4(d->d_size);
-}
-
-void
-cdf_swap_class(cdf_classid_t *d)
-{
- d->cl_dword = CDF_TOLE4(d->cl_dword);
- d->cl_word[0] = CDF_TOLE2(d->cl_word[0]);
- d->cl_word[1] = CDF_TOLE2(d->cl_word[1]);
-}
-
-void
-cdf_unpack_dir(cdf_directory_t *d, char *buf)
-{
- size_t len = 0;
-
- CDF_UNPACKA(d->d_name);
- CDF_UNPACK(d->d_namelen);
- CDF_UNPACK(d->d_type);
- CDF_UNPACK(d->d_color);
- CDF_UNPACK(d->d_left_child);
- CDF_UNPACK(d->d_right_child);
- CDF_UNPACK(d->d_storage);
- CDF_UNPACKA(d->d_storage_uuid);
- CDF_UNPACK(d->d_flags);
- CDF_UNPACK(d->d_created);
- CDF_UNPACK(d->d_modified);
- CDF_UNPACK(d->d_stream_first_sector);
- CDF_UNPACK(d->d_size);
- CDF_UNPACK(d->d_unused0);
-}
-
-int
-cdf_zero_stream(cdf_stream_t *scn)
-{
- scn->sst_len = 0;
- scn->sst_dirlen = 0;
- scn->sst_ss = 0;
- free(scn->sst_tab);
- scn->sst_tab = NULL;
- return -1;
-}
-
-static size_t
-cdf_check_stream(const cdf_stream_t *sst, const cdf_header_t *h)
-{
- size_t ss = sst->sst_dirlen < h->h_min_size_standard_stream ?
- CDF_SHORT_SEC_SIZE(h) : CDF_SEC_SIZE(h);
- assert(ss == sst->sst_ss);
- return sst->sst_ss;
-}
-
-static int
-cdf_check_stream_offset(const cdf_stream_t *sst, const cdf_header_t *h,
- const void *p, size_t tail, int line)
-{
- const char *b = RCAST(const char *, sst->sst_tab);
- const char *e = RCAST(const char *, p) + tail;
- size_t ss = cdf_check_stream(sst, h);
- /*LINTED*/(void)&line;
- if (e >= b && CAST(size_t, e - b) <= ss * sst->sst_len)
- return 0;
- DPRINTF(("%d: offset begin %p < end %p || %" SIZE_T_FORMAT "u"
- " > %" SIZE_T_FORMAT "u [%" SIZE_T_FORMAT "u %"
- SIZE_T_FORMAT "u]\n", line, b, e, (size_t)(e - b),
- ss * sst->sst_len, ss, sst->sst_len));
- errno = EFTYPE;
- return -1;
-}
-
-static ssize_t
-cdf_read(const cdf_info_t *info, off_t off, void *buf, size_t len)
-{
- size_t siz = CAST(size_t, off + len);
-
- if (CAST(off_t, off + len) != CAST(off_t, siz))
- goto out;
-
- if (info->i_buf != NULL && info->i_len >= siz) {
- (void)memcpy(buf, &info->i_buf[off], len);
- return CAST(ssize_t, len);
- }
-
- if (info->i_fd == -1)
- goto out;
-
- if (pread(info->i_fd, buf, len, off) != CAST(ssize_t, len))
- return -1;
-
- return CAST(ssize_t, len);
-out:
- errno = EINVAL;
- return -1;
-}
-
-int
-cdf_read_header(const cdf_info_t *info, cdf_header_t *h)
-{
- char buf[512];
-
- (void)memcpy(cdf_bo.s, "\01\02\03\04", 4);
- if (cdf_read(info, CAST(off_t, 0), buf, sizeof(buf)) == -1)
- return -1;
- cdf_unpack_header(h, buf);
- cdf_swap_header(h);
- if (h->h_magic != CDF_MAGIC) {
- DPRINTF(("Bad magic %#" INT64_T_FORMAT "x != %#"
- INT64_T_FORMAT "x\n",
- (unsigned long long)h->h_magic,
- (unsigned long long)CDF_MAGIC));
- goto out;
- }
- if (h->h_sec_size_p2 > 20) {
- DPRINTF(("Bad sector size %hu\n", h->h_sec_size_p2));
- goto out;
- }
- if (h->h_short_sec_size_p2 > 20) {
- DPRINTF(("Bad short sector size %hu\n",
- h->h_short_sec_size_p2));
- goto out;
- }
- return 0;
-out:
- errno = EFTYPE;
- return -1;
-}
-
-
-ssize_t
-cdf_read_sector(const cdf_info_t *info, void *buf, size_t offs, size_t len,
- const cdf_header_t *h, cdf_secid_t id)
-{
- size_t ss = CDF_SEC_SIZE(h);
- size_t pos;
-
- if (SIZE_T_MAX / ss < CAST(size_t, id))
- return -1;
-
- pos = CDF_SEC_POS(h, id);
- assert(ss == len);
- return cdf_read(info, CAST(off_t, pos), RCAST(char *, buf) + offs, len);
-}
-
-ssize_t
-cdf_read_short_sector(const cdf_stream_t *sst, void *buf, size_t offs,
- size_t len, const cdf_header_t *h, cdf_secid_t id)
-{
- size_t ss = CDF_SHORT_SEC_SIZE(h);
- size_t pos;
-
- if (SIZE_T_MAX / ss < CAST(size_t, id))
- return -1;
-
- pos = CDF_SHORT_SEC_POS(h, id);
- assert(ss == len);
- if (pos + len > CDF_SEC_SIZE(h) * sst->sst_len) {
- DPRINTF(("Out of bounds read %" SIZE_T_FORMAT "u > %"
- SIZE_T_FORMAT "u\n",
- pos + len, CDF_SEC_SIZE(h) * sst->sst_len));
- goto out;
- }
- (void)memcpy(RCAST(char *, buf) + offs,
- RCAST(const char *, sst->sst_tab) + pos, len);
- return len;
-out:
- errno = EFTYPE;
- return -1;
-}
-
-/*
- * Read the sector allocation table.
- */
-int
-cdf_read_sat(const cdf_info_t *info, cdf_header_t *h, cdf_sat_t *sat)
-{
- size_t i, j, k;
- size_t ss = CDF_SEC_SIZE(h);
- cdf_secid_t *msa, mid, sec;
- size_t nsatpersec = (ss / sizeof(mid)) - 1;
-
- for (i = 0; i < __arraycount(h->h_master_sat); i++)
- if (h->h_master_sat[i] == CDF_SECID_FREE)
- break;
-
-#define CDF_SEC_LIMIT (UINT32_MAX / (64 * ss))
- if ((nsatpersec > 0 &&
- h->h_num_sectors_in_master_sat > CDF_SEC_LIMIT / nsatpersec) ||
- i > CDF_SEC_LIMIT) {
- DPRINTF(("Number of sectors in master SAT too big %u %"
- SIZE_T_FORMAT "u\n", h->h_num_sectors_in_master_sat, i));
- errno = EFTYPE;
- return -1;
- }
-
- sat->sat_len = h->h_num_sectors_in_master_sat * nsatpersec + i;
- DPRINTF(("sat_len = %" SIZE_T_FORMAT "u ss = %" SIZE_T_FORMAT "u\n",
- sat->sat_len, ss));
- if ((sat->sat_tab = CAST(cdf_secid_t *, CDF_CALLOC(sat->sat_len, ss)))
- == NULL)
- return -1;
-
- for (i = 0; i < __arraycount(h->h_master_sat); i++) {
- if (h->h_master_sat[i] < 0)
- break;
- if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
- h->h_master_sat[i]) != CAST(ssize_t, ss)) {
- DPRINTF(("Reading sector %d", h->h_master_sat[i]));
- goto out1;
- }
- }
-
- if ((msa = CAST(cdf_secid_t *, CDF_CALLOC(1, ss))) == NULL)
- goto out1;
-
- mid = h->h_secid_first_sector_in_master_sat;
- for (j = 0; j < h->h_num_sectors_in_master_sat; j++) {
- if (mid < 0)
- goto out;
- if (j >= CDF_LOOP_LIMIT) {
- DPRINTF(("Reading master sector loop limit"));
- goto out3;
- }
- if (cdf_read_sector(info, msa, 0, ss, h, mid) !=
- CAST(ssize_t, ss)) {
- DPRINTF(("Reading master sector %d", mid));
- goto out2;
- }
- for (k = 0; k < nsatpersec; k++, i++) {
- sec = CDF_TOLE4(CAST(uint32_t, msa[k]));
- if (sec < 0)
- goto out;
- if (i >= sat->sat_len) {
- DPRINTF(("Out of bounds reading MSA %"
- SIZE_T_FORMAT "u >= %" SIZE_T_FORMAT "u",
- i, sat->sat_len));
- goto out3;
- }
- if (cdf_read_sector(info, sat->sat_tab, ss * i, ss, h,
- sec) != CAST(ssize_t, ss)) {
- DPRINTF(("Reading sector %d",
- CDF_TOLE4(msa[k])));
- goto out2;
- }
- }
- mid = CDF_TOLE4(CAST(uint32_t, msa[nsatpersec]));
- }
-out:
- sat->sat_len = i;
- free(msa);
- return 0;
-out3:
- errno = EFTYPE;
-out2:
- free(msa);
-out1:
- free(sat->sat_tab);
- return -1;
-}
-
-size_t
-cdf_count_chain(const cdf_sat_t *sat, cdf_secid_t sid, size_t size)
-{
- size_t i, j;
- cdf_secid_t maxsector = CAST(cdf_secid_t, (sat->sat_len * size)
- / sizeof(maxsector));
-
- DPRINTF(("Chain:"));
- if (sid == CDF_SECID_END_OF_CHAIN) {
- /* 0-length chain. */
- DPRINTF((" empty\n"));
- return 0;
- }
-
- for (j = i = 0; sid >= 0; i++, j++) {
- DPRINTF((" %d", sid));
- if (j >= CDF_LOOP_LIMIT) {
- DPRINTF(("Counting chain loop limit"));
- goto out;
- }
- if (sid >= maxsector) {
- DPRINTF(("Sector %d >= %d\n", sid, maxsector));
- goto out;
- }
- sid = CDF_TOLE4(CAST(uint32_t, sat->sat_tab[sid]));
- }
- if (i == 0) {
- DPRINTF((" none, sid: %d\n", sid));
- goto out;
-
- }
- DPRINTF(("\n"));
- return i;
-out:
- errno = EFTYPE;
- return CAST(size_t, -1);
-}
-
-int
-cdf_read_long_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, cdf_secid_t sid, size_t len, cdf_stream_t *scn)
-{
- size_t ss = CDF_SEC_SIZE(h), i, j;
- ssize_t nr;
- scn->sst_tab = NULL;
- scn->sst_len = cdf_count_chain(sat, sid, ss);
- scn->sst_dirlen = MAX(h->h_min_size_standard_stream, len);
- scn->sst_ss = ss;
-
- if (sid == CDF_SECID_END_OF_CHAIN || len == 0)
- return cdf_zero_stream(scn);
-
- if (scn->sst_len == CAST(size_t, -1))
- goto out;
-
- scn->sst_tab = CDF_CALLOC(scn->sst_len, ss);
- if (scn->sst_tab == NULL)
- return cdf_zero_stream(scn);
-
- for (j = i = 0; sid >= 0; i++, j++) {
- if (j >= CDF_LOOP_LIMIT) {
- DPRINTF(("Read long sector chain loop limit"));
- goto out;
- }
- if (i >= scn->sst_len) {
- DPRINTF(("Out of bounds reading long sector chain "
- "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
- scn->sst_len));
- goto out;
- }
- if ((nr = cdf_read_sector(info, scn->sst_tab, i * ss, ss, h,
- sid)) != CAST(ssize_t, ss)) {
- if (i == scn->sst_len - 1 && nr > 0) {
- /* Last sector might be truncated */
- return 0;
- }
- DPRINTF(("Reading long sector chain %d", sid));
- goto out;
- }
- sid = CDF_TOLE4(CAST(uint32_t, sat->sat_tab[sid]));
- }
- return 0;
-out:
- errno = EFTYPE;
- return cdf_zero_stream(scn);
-}
-
-int
-cdf_read_short_sector_chain(const cdf_header_t *h,
- const cdf_sat_t *ssat, const cdf_stream_t *sst,
- cdf_secid_t sid, size_t len, cdf_stream_t *scn)
-{
- size_t ss = CDF_SHORT_SEC_SIZE(h), i, j;
- scn->sst_tab = NULL;
- scn->sst_len = cdf_count_chain(ssat, sid, CDF_SEC_SIZE(h));
- scn->sst_dirlen = len;
- scn->sst_ss = ss;
-
- if (scn->sst_len == CAST(size_t, -1))
- goto out;
-
- scn->sst_tab = CDF_CALLOC(scn->sst_len, ss);
- if (scn->sst_tab == NULL)
- return cdf_zero_stream(scn);
-
- for (j = i = 0; sid >= 0; i++, j++) {
- if (j >= CDF_LOOP_LIMIT) {
- DPRINTF(("Read short sector chain loop limit"));
- goto out;
- }
- if (i >= scn->sst_len) {
- DPRINTF(("Out of bounds reading short sector chain "
- "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n",
- i, scn->sst_len));
- goto out;
- }
- if (cdf_read_short_sector(sst, scn->sst_tab, i * ss, ss, h,
- sid) != CAST(ssize_t, ss)) {
- DPRINTF(("Reading short sector chain %d", sid));
- goto out;
- }
- sid = CDF_TOLE4(CAST(uint32_t, ssat->sat_tab[sid]));
- }
- return 0;
-out:
- errno = EFTYPE;
- return cdf_zero_stream(scn);
-}
-
-int
-cdf_read_sector_chain(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
- cdf_secid_t sid, size_t len, cdf_stream_t *scn)
-{
-
- if (len < h->h_min_size_standard_stream && sst->sst_tab != NULL)
- return cdf_read_short_sector_chain(h, ssat, sst, sid, len,
- scn);
- else
- return cdf_read_long_sector_chain(info, h, sat, sid, len, scn);
-}
-
-int
-cdf_read_dir(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, cdf_dir_t *dir)
-{
- size_t i, j;
- size_t ss = CDF_SEC_SIZE(h), ns, nd;
- char *buf;
- cdf_secid_t sid = h->h_secid_first_directory;
-
- ns = cdf_count_chain(sat, sid, ss);
- if (ns == CAST(size_t, -1))
- return -1;
-
- nd = ss / CDF_DIRECTORY_SIZE;
-
- dir->dir_len = ns * nd;
- dir->dir_tab = CAST(cdf_directory_t *,
- CDF_CALLOC(dir->dir_len, sizeof(dir->dir_tab[0])));
- if (dir->dir_tab == NULL)
- return -1;
-
- if ((buf = CAST(char *, CDF_MALLOC(ss))) == NULL) {
- free(dir->dir_tab);
- return -1;
- }
-
- for (j = i = 0; i < ns; i++, j++) {
- if (j >= CDF_LOOP_LIMIT) {
- DPRINTF(("Read dir loop limit"));
- goto out;
- }
- if (cdf_read_sector(info, buf, 0, ss, h, sid) !=
- CAST(ssize_t, ss)) {
- DPRINTF(("Reading directory sector %d", sid));
- goto out;
- }
- for (j = 0; j < nd; j++) {
- cdf_unpack_dir(&dir->dir_tab[i * nd + j],
- &buf[j * CDF_DIRECTORY_SIZE]);
- }
- sid = CDF_TOLE4(CAST(uint32_t, sat->sat_tab[sid]));
- }
- if (NEED_SWAP)
- for (i = 0; i < dir->dir_len; i++)
- cdf_swap_dir(&dir->dir_tab[i]);
- free(buf);
- return 0;
-out:
- free(dir->dir_tab);
- free(buf);
- errno = EFTYPE;
- return -1;
-}
-
-
-int
-cdf_read_ssat(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, cdf_sat_t *ssat)
-{
- size_t i, j;
- size_t ss = CDF_SEC_SIZE(h);
- cdf_secid_t sid = h->h_secid_first_sector_in_short_sat;
-
- ssat->sat_tab = NULL;
- ssat->sat_len = cdf_count_chain(sat, sid, ss);
- if (ssat->sat_len == CAST(size_t, -1))
- goto out;
-
- ssat->sat_tab = CAST(cdf_secid_t *, CDF_CALLOC(ssat->sat_len, ss));
- if (ssat->sat_tab == NULL)
- goto out1;
-
- for (j = i = 0; sid >= 0; i++, j++) {
- if (j >= CDF_LOOP_LIMIT) {
- DPRINTF(("Read short sat sector loop limit"));
- goto out;
- }
- if (i >= ssat->sat_len) {
- DPRINTF(("Out of bounds reading short sector chain "
- "%" SIZE_T_FORMAT "u > %" SIZE_T_FORMAT "u\n", i,
- ssat->sat_len));
- goto out;
- }
- if (cdf_read_sector(info, ssat->sat_tab, i * ss, ss, h, sid) !=
- CAST(ssize_t, ss)) {
- DPRINTF(("Reading short sat sector %d", sid));
- goto out1;
- }
- sid = CDF_TOLE4(CAST(uint32_t, sat->sat_tab[sid]));
- }
- return 0;
-out:
- errno = EFTYPE;
-out1:
- free(ssat->sat_tab);
- return -1;
-}
-
-int
-cdf_read_short_stream(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, const cdf_dir_t *dir, cdf_stream_t *scn,
- const cdf_directory_t **root)
-{
- size_t i;
- const cdf_directory_t *d;
-
- *root = NULL;
- for (i = 0; i < dir->dir_len; i++)
- if (dir->dir_tab[i].d_type == CDF_DIR_TYPE_ROOT_STORAGE)
- break;
-
- /* If the it is not there, just fake it; some docs don't have it */
- if (i == dir->dir_len) {
- DPRINTF(("Cannot find root storage dir\n"));
- goto out;
- }
- d = &dir->dir_tab[i];
- *root = d;
-
- /* If the it is not there, just fake it; some docs don't have it */
- if (d->d_stream_first_sector < 0) {
- DPRINTF(("No first secror in dir\n"));
- goto out;
- }
-
- return cdf_read_long_sector_chain(info, h, sat,
- d->d_stream_first_sector, d->d_size, scn);
-out:
- scn->sst_tab = NULL;
- (void)cdf_zero_stream(scn);
- return 0;
-}
-
-static int
-cdf_namecmp(const char *d, const uint16_t *s, size_t l)
-{
- for (; l--; d++, s++)
- if (*d != CDF_TOLE2(*s))
- return CAST(unsigned char, *d) - CDF_TOLE2(*s);
- return 0;
-}
-
-int
-cdf_read_doc_summary_info(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
- const cdf_dir_t *dir, cdf_stream_t *scn)
-{
- return cdf_read_user_stream(info, h, sat, ssat, sst, dir,
- "\05DocumentSummaryInformation", scn);
-}
-
-int
-cdf_read_summary_info(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
- const cdf_dir_t *dir, cdf_stream_t *scn)
-{
- return cdf_read_user_stream(info, h, sat, ssat, sst, dir,
- "\05SummaryInformation", scn);
-}
-
-int
-cdf_read_user_stream(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
- const cdf_dir_t *dir, const char *name, cdf_stream_t *scn)
-{
- const cdf_directory_t *d;
- int i = cdf_find_stream(dir, name, CDF_DIR_TYPE_USER_STREAM);
-
- if (i <= 0) {
- memset(scn, 0, sizeof(*scn));
- return -1;
- }
-
- d = &dir->dir_tab[i - 1];
- return cdf_read_sector_chain(info, h, sat, ssat, sst,
- d->d_stream_first_sector, d->d_size, scn);
-}
-
-int
-cdf_find_stream(const cdf_dir_t *dir, const char *name, int type)
-{
- size_t i, name_len = strlen(name) + 1;
-
- for (i = dir->dir_len; i > 0; i--)
- if (dir->dir_tab[i - 1].d_type == type &&
- cdf_namecmp(name, dir->dir_tab[i - 1].d_name, name_len)
- == 0)
- break;
- if (i > 0)
- return CAST(int, i);
-
- DPRINTF(("Cannot find type %d `%s'\n", type, name));
- errno = ESRCH;
- return 0;
-}
-
-#define CDF_SHLEN_LIMIT (UINT32_MAX / 64)
-#define CDF_PROP_LIMIT (UINT32_MAX / (64 * sizeof(cdf_property_info_t)))
-
-static const void *
-cdf_offset(const void *p, size_t l)
-{
- return CAST(const void *, CAST(const uint8_t *, p) + l);
-}
-
-static const uint8_t *
-cdf_get_property_info_pos(const cdf_stream_t *sst, const cdf_header_t *h,
- const uint8_t *p, const uint8_t *e, size_t i)
-{
- size_t tail = (i << 1) + 1;
- size_t ofs;
-
- if (p >= e) {
- DPRINTF(("Past end %p < %p\n", e, p));
- return NULL;
- }
-
- if (cdf_check_stream_offset(sst, h, p, (tail + 1) * sizeof(uint32_t),
- __LINE__) == -1)
- return NULL;
-
- ofs = CDF_GETUINT32(p, tail);
- if (ofs < 2 * sizeof(uint32_t)) {
- DPRINTF(("Offset too small %zu\n", ofs));
- return NULL;
- }
-
- ofs -= 2 * sizeof(uint32_t);
- if (ofs > CAST(size_t, e - p)) {
- DPRINTF(("Offset too big %zu %td\n", ofs, e - p));
- return NULL;
- }
-
- return CAST(const uint8_t *, cdf_offset(CAST(const void *, p), ofs));
-}
-
-static cdf_property_info_t *
-cdf_grow_info(cdf_property_info_t **info, size_t *maxcount, size_t incr)
-{
- cdf_property_info_t *inp;
- size_t newcount = *maxcount + incr;
-
- if (newcount > CDF_PROP_LIMIT) {
- DPRINTF(("exceeded property limit %" SIZE_T_FORMAT "u > %"
- SIZE_T_FORMAT "u\n", newcount, CDF_PROP_LIMIT));
- goto out;
- }
- inp = CAST(cdf_property_info_t *,
- CDF_REALLOC(*info, newcount * sizeof(*inp)));
- if (inp == NULL)
- goto out;
-
- *info = inp;
- *maxcount = newcount;
- return inp;
-out:
- free(*info);
- *maxcount = 0;
- *info = NULL;
- return NULL;
-}
-
-static int
-cdf_copy_info(cdf_property_info_t *inp, const void *p, const void *e,
- size_t len)
-{
- if (inp->pi_type & CDF_VECTOR)
- return 0;
-
- if (CAST(size_t, CAST(const char *, e) - CAST(const char *, p)) < len)
- return 0;
-
- (void)memcpy(&inp->pi_val, p, len);
-
- switch (len) {
- case 2:
- inp->pi_u16 = CDF_TOLE2(inp->pi_u16);
- break;
- case 4:
- inp->pi_u32 = CDF_TOLE4(inp->pi_u32);
- break;
- case 8:
- inp->pi_u64 = CDF_TOLE8(inp->pi_u64);
- break;
- default:
- abort();
- }
- return 1;
-}
-
-int
-cdf_read_property_info(const cdf_stream_t *sst, const cdf_header_t *h,
- uint32_t offs, cdf_property_info_t **info, size_t *count, size_t *maxcount)
-{
- const cdf_section_header_t *shp;
- cdf_section_header_t sh;
- const uint8_t *p, *q, *e;
- size_t i, o4, nelements, j, slen, left;
- cdf_property_info_t *inp;
-
- if (offs > UINT32_MAX / 4) {
- errno = EFTYPE;
- goto out;
- }
- shp = CAST(const cdf_section_header_t *,
- cdf_offset(sst->sst_tab, offs));
- if (cdf_check_stream_offset(sst, h, shp, sizeof(*shp), __LINE__) == -1)
- goto out;
- sh.sh_len = CDF_TOLE4(shp->sh_len);
- if (sh.sh_len > CDF_SHLEN_LIMIT) {
- errno = EFTYPE;
- goto out;
- }
-
- if (cdf_check_stream_offset(sst, h, shp, sh.sh_len, __LINE__) == -1)
- goto out;
-
- sh.sh_properties = CDF_TOLE4(shp->sh_properties);
- DPRINTF(("section len: %u properties %u\n", sh.sh_len,
- sh.sh_properties));
- if (sh.sh_properties > CDF_PROP_LIMIT)
- goto out;
- inp = cdf_grow_info(info, maxcount, sh.sh_properties);
- if (inp == NULL)
- goto out;
- inp += *count;
- *count += sh.sh_properties;
- p = CAST(const uint8_t *, cdf_offset(sst->sst_tab, offs + sizeof(sh)));
- e = CAST(const uint8_t *, cdf_offset(shp, sh.sh_len));
- if (p >= e || cdf_check_stream_offset(sst, h, e, 0, __LINE__) == -1)
- goto out;
-
- for (i = 0; i < sh.sh_properties; i++) {
- if ((q = cdf_get_property_info_pos(sst, h, p, e, i)) == NULL)
- goto out;
- inp[i].pi_id = CDF_GETUINT32(p, i << 1);
- left = CAST(size_t, e - q);
- if (left < sizeof(uint32_t)) {
- DPRINTF(("short info (no type)_\n"));
- goto out;
- }
- inp[i].pi_type = CDF_GETUINT32(q, 0);
- DPRINTF(("%" SIZE_T_FORMAT "u) id=%#x type=%#x offs=%#tx,%#x\n",
- i, inp[i].pi_id, inp[i].pi_type, q - p, offs));
- if (inp[i].pi_type & CDF_VECTOR) {
- if (left < sizeof(uint32_t) * 2) {
- DPRINTF(("missing CDF_VECTOR length\n"));
- goto out;
- }
- nelements = CDF_GETUINT32(q, 1);
- if (nelements > CDF_ELEMENT_LIMIT || nelements == 0) {
- DPRINTF(("CDF_VECTOR with nelements == %"
- SIZE_T_FORMAT "u\n", nelements));
- goto out;
- }
- slen = 2;
- } else {
- nelements = 1;
- slen = 1;
- }
- o4 = slen * sizeof(uint32_t);
- if (inp[i].pi_type & (CDF_ARRAY|CDF_BYREF|CDF_RESERVED))
- goto unknown;
- switch (inp[i].pi_type & CDF_TYPEMASK) {
- case CDF_NULL:
- case CDF_EMPTY:
- break;
- case CDF_SIGNED16:
- if (!cdf_copy_info(&inp[i], &q[o4], e, sizeof(int16_t)))
- goto unknown;
- break;
- case CDF_SIGNED32:
- case CDF_BOOL:
- case CDF_UNSIGNED32:
- case CDF_FLOAT:
- if (!cdf_copy_info(&inp[i], &q[o4], e, sizeof(int32_t)))
- goto unknown;
- break;
- case CDF_SIGNED64:
- case CDF_UNSIGNED64:
- case CDF_DOUBLE:
- case CDF_FILETIME:
- if (!cdf_copy_info(&inp[i], &q[o4], e, sizeof(int64_t)))
- goto unknown;
- break;
- case CDF_LENGTH32_STRING:
- case CDF_LENGTH32_WSTRING:
- if (nelements > 1) {
- size_t nelem = inp - *info;
- inp = cdf_grow_info(info, maxcount, nelements);
- if (inp == NULL)
- goto out;
- inp += nelem;
- }
- for (j = 0; j < nelements && i < sh.sh_properties;
- j++, i++)
- {
- uint32_t l;
-
- if (o4 + sizeof(uint32_t) > left)
- goto out;
-
- l = CDF_GETUINT32(q, slen);
- o4 += sizeof(uint32_t);
- if (o4 + l > left)
- goto out;
-
- inp[i].pi_str.s_len = l;
- inp[i].pi_str.s_buf = CAST(const char *,
- CAST(const void *, &q[o4]));
-
- DPRINTF(("o=%" SIZE_T_FORMAT "u l=%d(%"
- SIZE_T_FORMAT "u), t=%" SIZE_T_FORMAT
- "u s=%.*s\n", o4, l,
- CDF_ROUND(l, sizeof(l)),
- left, (int)l, inp[i].pi_str.s_buf));
-
- if (l & 1)
- l++;
-
- slen += l >> 1;
- o4 = slen * sizeof(uint32_t);
- }
- i--;
- break;
- case CDF_CLIPBOARD:
- if (inp[i].pi_type & CDF_VECTOR)
- goto unknown;
- break;
- default:
- unknown:
- memset(&inp[i].pi_val, 0, sizeof(inp[i].pi_val));
- DPRINTF(("Don't know how to deal with %#x\n",
- inp[i].pi_type));
- break;
- }
- }
- return 0;
-out:
- free(*info);
- *info = NULL;
- *count = 0;
- *maxcount = 0;
- errno = EFTYPE;
- return -1;
-}
-
-int
-cdf_unpack_summary_info(const cdf_stream_t *sst, const cdf_header_t *h,
- cdf_summary_info_header_t *ssi, cdf_property_info_t **info, size_t *count)
-{
- size_t maxcount;
- const cdf_summary_info_header_t *si =
- CAST(const cdf_summary_info_header_t *, sst->sst_tab);
- const cdf_section_declaration_t *sd =
- CAST(const cdf_section_declaration_t *, RCAST(const void *,
- RCAST(const char *, sst->sst_tab)
- + CDF_SECTION_DECLARATION_OFFSET));
-
- if (cdf_check_stream_offset(sst, h, si, sizeof(*si), __LINE__) == -1 ||
- cdf_check_stream_offset(sst, h, sd, sizeof(*sd), __LINE__) == -1)
- return -1;
- ssi->si_byte_order = CDF_TOLE2(si->si_byte_order);
- ssi->si_os_version = CDF_TOLE2(si->si_os_version);
- ssi->si_os = CDF_TOLE2(si->si_os);
- ssi->si_class = si->si_class;
- cdf_swap_class(&ssi->si_class);
- ssi->si_count = CDF_TOLE4(si->si_count);
- *count = 0;
- maxcount = 0;
- *info = NULL;
- if (cdf_read_property_info(sst, h, CDF_TOLE4(sd->sd_offset), info,
- count, &maxcount) == -1)
- return -1;
- return 0;
-}
-
-
-#define extract_catalog_field(t, f, l) \
- if (b + l + sizeof(cep->f) > eb) { \
- cep->ce_namlen = 0; \
- break; \
- } \
- memcpy(&cep->f, b + (l), sizeof(cep->f)); \
- ce[i].f = CAST(t, CDF_TOLE(cep->f))
-
-int
-cdf_unpack_catalog(const cdf_header_t *h, const cdf_stream_t *sst,
- cdf_catalog_t **cat)
-{
- size_t ss = cdf_check_stream(sst, h);
- const char *b = CAST(const char *, sst->sst_tab);
- const char *nb, *eb = b + ss * sst->sst_len;
- size_t nr, i, j, k;
- cdf_catalog_entry_t *ce;
- uint16_t reclen;
- const uint16_t *np;
-
- for (nr = 0;; nr++) {
- memcpy(&reclen, b, sizeof(reclen));
- reclen = CDF_TOLE2(reclen);
- if (reclen == 0)
- break;
- b += reclen;
- if (b > eb)
- break;
- }
- if (nr == 0)
- return -1;
- nr--;
- *cat = CAST(cdf_catalog_t *,
- CDF_MALLOC(sizeof(cdf_catalog_t) + nr * sizeof(*ce)));
- if (*cat == NULL)
- return -1;
- ce = (*cat)->cat_e;
- memset(ce, 0, nr * sizeof(*ce));
- b = CAST(const char *, sst->sst_tab);
- for (j = i = 0; i < nr; b += reclen) {
- cdf_catalog_entry_t *cep = &ce[j];
- uint16_t rlen;
-
- extract_catalog_field(uint16_t, ce_namlen, 0);
- extract_catalog_field(uint16_t, ce_num, 4);
- extract_catalog_field(uint64_t, ce_timestamp, 8);
- reclen = cep->ce_namlen;
-
- if (reclen < 14) {
- cep->ce_namlen = 0;
- continue;
- }
-
- cep->ce_namlen = __arraycount(cep->ce_name) - 1;
- rlen = reclen - 14;
- if (cep->ce_namlen > rlen)
- cep->ce_namlen = rlen;
-
- np = CAST(const uint16_t *, CAST(const void *, (b + 16)));
- nb = CAST(const char *, CAST(const void *,
- (np + cep->ce_namlen)));
- if (nb > eb) {
- cep->ce_namlen = 0;
- break;
- }
-
- for (k = 0; k < cep->ce_namlen; k++)
- cep->ce_name[k] = np[k]; /* XXX: CDF_TOLE2? */
- cep->ce_name[cep->ce_namlen] = 0;
- j = i;
- i++;
- }
- (*cat)->cat_num = j;
- return 0;
-}
-
-int
-cdf_print_classid(char *buf, size_t buflen, const cdf_classid_t *id)
-{
- return snprintf(buf, buflen, "%.8x-%.4x-%.4x-%.2x%.2x-"
- "%.2x%.2x%.2x%.2x%.2x%.2x", id->cl_dword, id->cl_word[0],
- id->cl_word[1], id->cl_two[0], id->cl_two[1], id->cl_six[0],
- id->cl_six[1], id->cl_six[2], id->cl_six[3], id->cl_six[4],
- id->cl_six[5]);
-}
-
-static const struct {
- uint32_t v;
- const char *n;
-} vn[] = {
- { CDF_PROPERTY_CODE_PAGE, "Code page" },
- { CDF_PROPERTY_TITLE, "Title" },
- { CDF_PROPERTY_SUBJECT, "Subject" },
- { CDF_PROPERTY_AUTHOR, "Author" },
- { CDF_PROPERTY_KEYWORDS, "Keywords" },
- { CDF_PROPERTY_COMMENTS, "Comments" },
- { CDF_PROPERTY_TEMPLATE, "Template" },
- { CDF_PROPERTY_LAST_SAVED_BY, "Last Saved By" },
- { CDF_PROPERTY_REVISION_NUMBER, "Revision Number" },
- { CDF_PROPERTY_TOTAL_EDITING_TIME, "Total Editing Time" },
- { CDF_PROPERTY_LAST_PRINTED, "Last Printed" },
- { CDF_PROPERTY_CREATE_TIME, "Create Time/Date" },
- { CDF_PROPERTY_LAST_SAVED_TIME, "Last Saved Time/Date" },
- { CDF_PROPERTY_NUMBER_OF_PAGES, "Number of Pages" },
- { CDF_PROPERTY_NUMBER_OF_WORDS, "Number of Words" },
- { CDF_PROPERTY_NUMBER_OF_CHARACTERS, "Number of Characters" },
- { CDF_PROPERTY_THUMBNAIL, "Thumbnail" },
- { CDF_PROPERTY_NAME_OF_APPLICATION, "Name of Creating Application" },
- { CDF_PROPERTY_SECURITY, "Security" },
- { CDF_PROPERTY_LOCALE_ID, "Locale ID" },
-};
-
-int
-cdf_print_property_name(char *buf, size_t bufsiz, uint32_t p)
-{
- size_t i;
-
- for (i = 0; i < __arraycount(vn); i++)
- if (vn[i].v == p)
- return snprintf(buf, bufsiz, "%s", vn[i].n);
- return snprintf(buf, bufsiz, "%#x", p);
-}
-
-int
-cdf_print_elapsed_time(char *buf, size_t bufsiz, cdf_timestamp_t ts)
-{
- int len = 0;
- int days, hours, mins, secs;
-
- ts /= CDF_TIME_PREC;
- secs = CAST(int, ts % 60);
- ts /= 60;
- mins = CAST(int, ts % 60);
- ts /= 60;
- hours = CAST(int, ts % 24);
- ts /= 24;
- days = CAST(int, ts);
-
- if (days) {
- len += snprintf(buf + len, bufsiz - len, "%dd+", days);
- if (CAST(size_t, len) >= bufsiz)
- return len;
- }
-
- if (days || hours) {
- len += snprintf(buf + len, bufsiz - len, "%.2d:", hours);
- if (CAST(size_t, len) >= bufsiz)
- return len;
- }
-
- len += snprintf(buf + len, bufsiz - len, "%.2d:", mins);
- if (CAST(size_t, len) >= bufsiz)
- return len;
-
- len += snprintf(buf + len, bufsiz - len, "%.2d", secs);
- return len;
-}
-
-char *
-cdf_u16tos8(char *buf, size_t len, const uint16_t *p)
-{
- size_t i;
- for (i = 0; i < len && p[i]; i++)
- buf[i] = CAST(char, p[i]);
- buf[i] = '\0';
- return buf;
-}
-
-#ifdef CDF_DEBUG
-void
-cdf_dump_header(const cdf_header_t *h)
-{
- size_t i;
-
-#define DUMP(a, b) (void)fprintf(stderr, "%40.40s = " a "\n", # b, h->h_ ## b)
-#define DUMP2(a, b) (void)fprintf(stderr, "%40.40s = " a " (" a ")\n", # b, \
- h->h_ ## b, 1 << h->h_ ## b)
- DUMP("%d", revision);
- DUMP("%d", version);
- DUMP("%#x", byte_order);
- DUMP2("%d", sec_size_p2);
- DUMP2("%d", short_sec_size_p2);
- DUMP("%d", num_sectors_in_sat);
- DUMP("%d", secid_first_directory);
- DUMP("%d", min_size_standard_stream);
- DUMP("%d", secid_first_sector_in_short_sat);
- DUMP("%d", num_sectors_in_short_sat);
- DUMP("%d", secid_first_sector_in_master_sat);
- DUMP("%d", num_sectors_in_master_sat);
- for (i = 0; i < __arraycount(h->h_master_sat); i++) {
- if (h->h_master_sat[i] == CDF_SECID_FREE)
- break;
- (void)fprintf(stderr, "%35.35s[%.3" SIZE_T_FORMAT "u] = %d\n",
- "master_sat", i, h->h_master_sat[i]);
- }
-}
-
-void
-cdf_dump_sat(const char *prefix, const cdf_sat_t *sat, size_t size)
-{
- size_t i, j, s = size / sizeof(cdf_secid_t);
-
- for (i = 0; i < sat->sat_len; i++) {
- (void)fprintf(stderr, "%s[%" SIZE_T_FORMAT "u]:\n%.6"
- SIZE_T_FORMAT "u: ", prefix, i, i * s);
- for (j = 0; j < s; j++) {
- (void)fprintf(stderr, "%5d, ",
- CDF_TOLE4(sat->sat_tab[s * i + j]));
- if ((j + 1) % 10 == 0)
- (void)fprintf(stderr, "\n%.6" SIZE_T_FORMAT
- "u: ", i * s + j + 1);
- }
- (void)fprintf(stderr, "\n");
- }
-}
-
-void
-cdf_dump(const void *v, size_t len)
-{
- size_t i, j;
- const unsigned char *p = v;
- char abuf[16];
-
- (void)fprintf(stderr, "%.4x: ", 0);
- for (i = 0, j = 0; i < len; i++, p++) {
- (void)fprintf(stderr, "%.2x ", *p);
- abuf[j++] = isprint(*p) ? *p : '.';
- if (j == 16) {
- j = 0;
- abuf[15] = '\0';
- (void)fprintf(stderr, "%s\n%.4" SIZE_T_FORMAT "x: ",
- abuf, i + 1);
- }
- }
- (void)fprintf(stderr, "\n");
-}
-
-void
-cdf_dump_stream(const cdf_stream_t *sst)
-{
- size_t ss = sst->sst_ss;
- cdf_dump(sst->sst_tab, ss * sst->sst_len);
-}
-
-void
-cdf_dump_dir(const cdf_info_t *info, const cdf_header_t *h,
- const cdf_sat_t *sat, const cdf_sat_t *ssat, const cdf_stream_t *sst,
- const cdf_dir_t *dir)
-{
- size_t i, j;
- cdf_directory_t *d;
- char name[__arraycount(d->d_name)];
- cdf_stream_t scn;
- struct timespec ts;
-
- static const char *types[] = { "empty", "user storage",
- "user stream", "lockbytes", "property", "root storage" };
-
- for (i = 0; i < dir->dir_len; i++) {
- char buf[26];
- d = &dir->dir_tab[i];
- for (j = 0; j < sizeof(name); j++)
- name[j] = (char)CDF_TOLE2(d->d_name[j]);
- (void)fprintf(stderr, "Directory %" SIZE_T_FORMAT "u: %s\n",
- i, name);
- if (d->d_type < __arraycount(types))
- (void)fprintf(stderr, "Type: %s\n", types[d->d_type]);
- else
- (void)fprintf(stderr, "Type: %d\n", d->d_type);
- (void)fprintf(stderr, "Color: %s\n",
- d->d_color ? "black" : "red");
- (void)fprintf(stderr, "Left child: %d\n", d->d_left_child);
- (void)fprintf(stderr, "Right child: %d\n", d->d_right_child);
- (void)fprintf(stderr, "Flags: %#x\n", d->d_flags);
- cdf_timestamp_to_timespec(&ts, d->d_created);
- (void)fprintf(stderr, "Created %s", cdf_ctime(&ts.tv_sec, buf));
- cdf_timestamp_to_timespec(&ts, d->d_modified);
- (void)fprintf(stderr, "Modified %s",
- cdf_ctime(&ts.tv_sec, buf));
- (void)fprintf(stderr, "Stream %d\n", d->d_stream_first_sector);
- (void)fprintf(stderr, "Size %d\n", d->d_size);
- switch (d->d_type) {
- case CDF_DIR_TYPE_USER_STORAGE:
- (void)fprintf(stderr, "Storage: %d\n", d->d_storage);
- break;
- case CDF_DIR_TYPE_USER_STREAM:
- if (sst == NULL)
- break;
- if (cdf_read_sector_chain(info, h, sat, ssat, sst,
- d->d_stream_first_sector, d->d_size, &scn) == -1) {
- warn("Can't read stream for %s at %d len %d",
- name, d->d_stream_first_sector, d->d_size);
- break;
- }
- cdf_dump_stream(&scn);
- free(scn.sst_tab);
- break;
- default:
- break;
- }
-
- }
-}
-
-void
-cdf_dump_property_info(const cdf_property_info_t *info, size_t count)
-{
- cdf_timestamp_t tp;
- struct timespec ts;
- char buf[64];
- size_t i, j;
-
- for (i = 0; i < count; i++) {
- cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
- (void)fprintf(stderr, "%" SIZE_T_FORMAT "u) %s: ", i, buf);
- switch (info[i].pi_type) {
- case CDF_NULL:
- break;
- case CDF_SIGNED16:
- (void)fprintf(stderr, "signed 16 [%hd]\n",
- info[i].pi_s16);
- break;
- case CDF_SIGNED32:
- (void)fprintf(stderr, "signed 32 [%d]\n",
- info[i].pi_s32);
- break;
- case CDF_UNSIGNED32:
- (void)fprintf(stderr, "unsigned 32 [%u]\n",
- info[i].pi_u32);
- break;
- case CDF_FLOAT:
- (void)fprintf(stderr, "float [%g]\n",
- info[i].pi_f);
- break;
- case CDF_DOUBLE:
- (void)fprintf(stderr, "double [%g]\n",
- info[i].pi_d);
- break;
- case CDF_LENGTH32_STRING:
- (void)fprintf(stderr, "string %u [%.*s]\n",
- info[i].pi_str.s_len,
- info[i].pi_str.s_len, info[i].pi_str.s_buf);
- break;
- case CDF_LENGTH32_WSTRING:
- (void)fprintf(stderr, "string %u [",
- info[i].pi_str.s_len);
- for (j = 0; j < info[i].pi_str.s_len - 1; j++)
- (void)fputc(info[i].pi_str.s_buf[j << 1], stderr);
- (void)fprintf(stderr, "]\n");
- break;
- case CDF_FILETIME:
- tp = info[i].pi_tp;
- if (tp < 1000000000000000LL) {
- cdf_print_elapsed_time(buf, sizeof(buf), tp);
- (void)fprintf(stderr, "timestamp %s\n", buf);
- } else {
- char tbuf[26];
- cdf_timestamp_to_timespec(&ts, tp);
- (void)fprintf(stderr, "timestamp %s",
- cdf_ctime(&ts.tv_sec, tbuf));
- }
- break;
- case CDF_CLIPBOARD:
- (void)fprintf(stderr, "CLIPBOARD %u\n", info[i].pi_u32);
- break;
- default:
- DPRINTF(("Don't know how to deal with %#x\n",
- info[i].pi_type));
- break;
- }
- }
-}
-
-
-void
-cdf_dump_summary_info(const cdf_header_t *h, const cdf_stream_t *sst)
-{
- char buf[128];
- cdf_summary_info_header_t ssi;
- cdf_property_info_t *info;
- size_t count;
-
- (void)&h;
- if (cdf_unpack_summary_info(sst, h, &ssi, &info, &count) == -1)
- return;
- (void)fprintf(stderr, "Endian: %#x\n", ssi.si_byte_order);
- (void)fprintf(stderr, "Os Version %d.%d\n", ssi.si_os_version & 0xff,
- ssi.si_os_version >> 8);
- (void)fprintf(stderr, "Os %d\n", ssi.si_os);
- cdf_print_classid(buf, sizeof(buf), &ssi.si_class);
- (void)fprintf(stderr, "Class %s\n", buf);
- (void)fprintf(stderr, "Count %d\n", ssi.si_count);
- cdf_dump_property_info(info, count);
- free(info);
-}
-
-
-void
-cdf_dump_catalog(const cdf_header_t *h, const cdf_stream_t *sst)
-{
- cdf_catalog_t *cat;
- cdf_unpack_catalog(h, sst, &cat);
- const cdf_catalog_entry_t *ce = cat->cat_e;
- struct timespec ts;
- char tbuf[64], sbuf[256];
- size_t i;
-
- printf("Catalog:\n");
- for (i = 0; i < cat->cat_num; i++) {
- cdf_timestamp_to_timespec(&ts, ce[i].ce_timestamp);
- printf("\t%d %s %s", ce[i].ce_num,
- cdf_u16tos8(sbuf, ce[i].ce_namlen, ce[i].ce_name),
- cdf_ctime(&ts.tv_sec, tbuf));
- }
- free(cat);
-}
-
-#endif
-
-#ifdef TEST
-int
-main(int argc, char *argv[])
-{
- int i;
- cdf_header_t h;
- cdf_sat_t sat, ssat;
- cdf_stream_t sst, scn;
- cdf_dir_t dir;
- cdf_info_t info;
- const cdf_directory_t *root;
-#ifdef __linux__
-#define getprogname() __progname
- extern char *__progname;
-#endif
- if (argc < 2) {
- (void)fprintf(stderr, "Usage: %s <filename>\n", getprogname());
- return -1;
- }
-
- info.i_buf = NULL;
- info.i_len = 0;
- for (i = 1; i < argc; i++) {
- if ((info.i_fd = open(argv[1], O_RDONLY)) == -1)
- err(EXIT_FAILURE, "Cannot open `%s'", argv[1]);
-
- if (cdf_read_header(&info, &h) == -1)
- err(EXIT_FAILURE, "Cannot read header");
-#ifdef CDF_DEBUG
- cdf_dump_header(&h);
-#endif
-
- if (cdf_read_sat(&info, &h, &sat) == -1)
- err(EXIT_FAILURE, "Cannot read sat");
-#ifdef CDF_DEBUG
- cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h));
-#endif
-
- if (cdf_read_ssat(&info, &h, &sat, &ssat) == -1)
- err(EXIT_FAILURE, "Cannot read ssat");
-#ifdef CDF_DEBUG
- cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h));
-#endif
-
- if (cdf_read_dir(&info, &h, &sat, &dir) == -1)
- err(EXIT_FAILURE, "Cannot read dir");
-
- if (cdf_read_short_stream(&info, &h, &sat, &dir, &sst, &root)
- == -1)
- err(EXIT_FAILURE, "Cannot read short stream");
-#ifdef CDF_DEBUG
- cdf_dump_stream(&sst);
-#endif
-
-#ifdef CDF_DEBUG
- cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir);
-#endif
-
-
- if (cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir,
- &scn) == -1)
- warn("Cannot read summary info");
-#ifdef CDF_DEBUG
- else
- cdf_dump_summary_info(&h, &scn);
-#endif
- if (cdf_read_user_stream(&info, &h, &sat, &ssat, &sst,
- &dir, "Catalog", &scn) == -1)
- warn("Cannot read catalog");
-#ifdef CDF_DEBUG
- else
- cdf_dump_catalog(&h, &scn);
-#endif
-
- (void)close(info.i_fd);
- }
-
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/cdf.h b/contrib/libs/libmagic/src/cdf.h
deleted file mode 100644
index 05056668fb..0000000000
--- a/contrib/libs/libmagic/src/cdf.h
+++ /dev/null
@@ -1,353 +0,0 @@
-/*-
- * Copyright (c) 2008 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/*
- * Parse Composite Document Files, the format used in Microsoft Office
- * document files before they switched to zipped XML.
- * Info from: http://sc.openoffice.org/compdocfileformat.pdf
- *
- * N.B. This is the "Composite Document File" format, and not the
- * "Compound Document Format", nor the "Channel Definition Format".
- */
-
-#ifndef _H_CDF_
-#define _H_CDF_
-
-#ifdef WIN32
-#include <winsock2.h>
-#define timespec timeval
-#define tv_nsec tv_usec
-#endif
-#ifdef __DJGPP__
-#define timespec timeval
-#define tv_nsec tv_usec
-#endif
-
-typedef int32_t cdf_secid_t;
-
-#define CDF_LOOP_LIMIT 10000
-#define CDF_ELEMENT_LIMIT 100000
-
-#define CDF_SECID_NULL 0
-#define CDF_SECID_FREE -1
-#define CDF_SECID_END_OF_CHAIN -2
-#define CDF_SECID_SECTOR_ALLOCATION_TABLE -3
-#define CDF_SECID_MASTER_SECTOR_ALLOCATION_TABLE -4
-
-typedef struct {
- uint64_t h_magic;
-#define CDF_MAGIC 0xE11AB1A1E011CFD0LL
- uint64_t h_uuid[2];
- uint16_t h_revision;
- uint16_t h_version;
- uint16_t h_byte_order;
- uint16_t h_sec_size_p2;
- uint16_t h_short_sec_size_p2;
- uint8_t h_unused0[10];
- uint32_t h_num_sectors_in_sat;
- uint32_t h_secid_first_directory;
- uint8_t h_unused1[4];
- uint32_t h_min_size_standard_stream;
- cdf_secid_t h_secid_first_sector_in_short_sat;
- uint32_t h_num_sectors_in_short_sat;
- cdf_secid_t h_secid_first_sector_in_master_sat;
- uint32_t h_num_sectors_in_master_sat;
- cdf_secid_t h_master_sat[436/4];
-} cdf_header_t;
-
-#define CDF_SEC_SIZE(h) CAST(size_t, 1 << (h)->h_sec_size_p2)
-#define CDF_SEC_POS(h, secid) (CDF_SEC_SIZE(h) + (secid) * CDF_SEC_SIZE(h))
-#define CDF_SHORT_SEC_SIZE(h) CAST(size_t, 1 << (h)->h_short_sec_size_p2)
-#define CDF_SHORT_SEC_POS(h, secid) ((secid) * CDF_SHORT_SEC_SIZE(h))
-
-typedef int32_t cdf_dirid_t;
-#define CDF_DIRID_NULL -1
-
-typedef int64_t cdf_timestamp_t;
-#define CDF_BASE_YEAR 1601
-#define CDF_TIME_PREC 10000000
-
-typedef struct {
- uint16_t d_name[32];
- uint16_t d_namelen;
- uint8_t d_type;
-#define CDF_DIR_TYPE_EMPTY 0
-#define CDF_DIR_TYPE_USER_STORAGE 1
-#define CDF_DIR_TYPE_USER_STREAM 2
-#define CDF_DIR_TYPE_LOCKBYTES 3
-#define CDF_DIR_TYPE_PROPERTY 4
-#define CDF_DIR_TYPE_ROOT_STORAGE 5
- uint8_t d_color;
-#define CDF_DIR_COLOR_READ 0
-#define CDF_DIR_COLOR_BLACK 1
- cdf_dirid_t d_left_child;
- cdf_dirid_t d_right_child;
- cdf_dirid_t d_storage;
- uint64_t d_storage_uuid[2];
- uint32_t d_flags;
- cdf_timestamp_t d_created;
- cdf_timestamp_t d_modified;
- cdf_secid_t d_stream_first_sector;
- uint32_t d_size;
- uint32_t d_unused0;
-} cdf_directory_t;
-
-#define CDF_DIRECTORY_SIZE 128
-
-typedef struct {
- cdf_secid_t *sat_tab;
- size_t sat_len;
-} cdf_sat_t;
-
-typedef struct {
- cdf_directory_t *dir_tab;
- size_t dir_len;
-} cdf_dir_t;
-
-typedef struct {
- void *sst_tab;
- size_t sst_len; /* Number of sectors */
- size_t sst_dirlen; /* Directory sector size */
- size_t sst_ss; /* Sector size */
-} cdf_stream_t;
-
-typedef struct {
- uint32_t cl_dword;
- uint16_t cl_word[2];
- uint8_t cl_two[2];
- uint8_t cl_six[6];
-} cdf_classid_t;
-
-typedef struct {
- uint16_t si_byte_order;
- uint16_t si_zero;
- uint16_t si_os_version;
- uint16_t si_os;
- cdf_classid_t si_class;
- uint32_t si_count;
-} cdf_summary_info_header_t;
-
-#define CDF_SECTION_DECLARATION_OFFSET 0x1c
-
-typedef struct {
- cdf_classid_t sd_class;
- uint32_t sd_offset;
-} cdf_section_declaration_t;
-
-typedef struct {
- uint32_t sh_len;
- uint32_t sh_properties;
-} cdf_section_header_t;
-
-typedef struct {
- uint32_t pi_id;
- uint32_t pi_type;
- union {
- uint16_t _pi_u16;
- int16_t _pi_s16;
- uint32_t _pi_u32;
- int32_t _pi_s32;
- uint64_t _pi_u64;
- int64_t _pi_s64;
- cdf_timestamp_t _pi_tp;
- float _pi_f;
- double _pi_d;
- struct {
- uint32_t s_len;
- const char *s_buf;
- } _pi_str;
- } pi_val;
-#define pi_u64 pi_val._pi_u64
-#define pi_s64 pi_val._pi_s64
-#define pi_u32 pi_val._pi_u32
-#define pi_s32 pi_val._pi_s32
-#define pi_u16 pi_val._pi_u16
-#define pi_s16 pi_val._pi_s16
-#define pi_f pi_val._pi_f
-#define pi_d pi_val._pi_d
-#define pi_tp pi_val._pi_tp
-#define pi_str pi_val._pi_str
-} cdf_property_info_t;
-
-#define CDF_ROUND(val, by) (((val) + (by) - 1) & ~((by) - 1))
-
-/* Variant type definitions */
-#define CDF_EMPTY 0x00000000
-#define CDF_NULL 0x00000001
-#define CDF_SIGNED16 0x00000002
-#define CDF_SIGNED32 0x00000003
-#define CDF_FLOAT 0x00000004
-#define CDF_DOUBLE 0x00000005
-#define CDF_CY 0x00000006
-#define CDF_DATE 0x00000007
-#define CDF_BSTR 0x00000008
-#define CDF_DISPATCH 0x00000009
-#define CDF_ERROR 0x0000000a
-#define CDF_BOOL 0x0000000b
-#define CDF_VARIANT 0x0000000c
-#define CDF_UNKNOWN 0x0000000d
-#define CDF_DECIMAL 0x0000000e
-#define CDF_SIGNED8 0x00000010
-#define CDF_UNSIGNED8 0x00000011
-#define CDF_UNSIGNED16 0x00000012
-#define CDF_UNSIGNED32 0x00000013
-#define CDF_SIGNED64 0x00000014
-#define CDF_UNSIGNED64 0x00000015
-#define CDF_INT 0x00000016
-#define CDF_UINT 0x00000017
-#define CDF_VOID 0x00000018
-#define CDF_HRESULT 0x00000019
-#define CDF_PTR 0x0000001a
-#define CDF_SAFEARRAY 0x0000001b
-#define CDF_CARRAY 0x0000001c
-#define CDF_USERDEFINED 0x0000001d
-#define CDF_LENGTH32_STRING 0x0000001e
-#define CDF_LENGTH32_WSTRING 0x0000001f
-#define CDF_FILETIME 0x00000040
-#define CDF_BLOB 0x00000041
-#define CDF_STREAM 0x00000042
-#define CDF_STORAGE 0x00000043
-#define CDF_STREAMED_OBJECT 0x00000044
-#define CDF_STORED_OBJECT 0x00000045
-#define CDF_BLOB_OBJECT 0x00000046
-#define CDF_CLIPBOARD 0x00000047
-#define CDF_CLSID 0x00000048
-#define CDF_VECTOR 0x00001000
-#define CDF_ARRAY 0x00002000
-#define CDF_BYREF 0x00004000
-#define CDF_RESERVED 0x00008000
-#define CDF_ILLEGAL 0x0000ffff
-#define CDF_ILLEGALMASKED 0x00000fff
-#define CDF_TYPEMASK 0x00000fff
-
-#define CDF_PROPERTY_CODE_PAGE 0x00000001
-#define CDF_PROPERTY_TITLE 0x00000002
-#define CDF_PROPERTY_SUBJECT 0x00000003
-#define CDF_PROPERTY_AUTHOR 0x00000004
-#define CDF_PROPERTY_KEYWORDS 0x00000005
-#define CDF_PROPERTY_COMMENTS 0x00000006
-#define CDF_PROPERTY_TEMPLATE 0x00000007
-#define CDF_PROPERTY_LAST_SAVED_BY 0x00000008
-#define CDF_PROPERTY_REVISION_NUMBER 0x00000009
-#define CDF_PROPERTY_TOTAL_EDITING_TIME 0x0000000a
-#define CDF_PROPERTY_LAST_PRINTED 0X0000000b
-#define CDF_PROPERTY_CREATE_TIME 0x0000000c
-#define CDF_PROPERTY_LAST_SAVED_TIME 0x0000000d
-#define CDF_PROPERTY_NUMBER_OF_PAGES 0x0000000e
-#define CDF_PROPERTY_NUMBER_OF_WORDS 0x0000000f
-#define CDF_PROPERTY_NUMBER_OF_CHARACTERS 0x00000010
-#define CDF_PROPERTY_THUMBNAIL 0x00000011
-#define CDF_PROPERTY_NAME_OF_APPLICATION 0x00000012
-#define CDF_PROPERTY_SECURITY 0x00000013
-#define CDF_PROPERTY_LOCALE_ID 0x80000000
-
-typedef struct {
- int i_fd;
- const unsigned char *i_buf;
- size_t i_len;
-} cdf_info_t;
-
-
-typedef struct {
- uint16_t ce_namlen;
- uint32_t ce_num;
- uint64_t ce_timestamp;
- uint16_t ce_name[256];
-} cdf_catalog_entry_t;
-
-typedef struct {
- size_t cat_num;
- cdf_catalog_entry_t cat_e[1];
-} cdf_catalog_t;
-
-struct timespec;
-int cdf_timestamp_to_timespec(struct timespec *, cdf_timestamp_t);
-int cdf_timespec_to_timestamp(cdf_timestamp_t *, const struct timespec *);
-int cdf_read_header(const cdf_info_t *, cdf_header_t *);
-void cdf_swap_header(cdf_header_t *);
-void cdf_unpack_header(cdf_header_t *, char *);
-void cdf_swap_dir(cdf_directory_t *);
-void cdf_unpack_dir(cdf_directory_t *, char *);
-void cdf_swap_class(cdf_classid_t *);
-ssize_t cdf_read_sector(const cdf_info_t *, void *, size_t, size_t,
- const cdf_header_t *, cdf_secid_t);
-ssize_t cdf_read_short_sector(const cdf_stream_t *, void *, size_t, size_t,
- const cdf_header_t *, cdf_secid_t);
-int cdf_read_sat(const cdf_info_t *, cdf_header_t *, cdf_sat_t *);
-size_t cdf_count_chain(const cdf_sat_t *, cdf_secid_t, size_t);
-int cdf_read_long_sector_chain(const cdf_info_t *, const cdf_header_t *,
- const cdf_sat_t *, cdf_secid_t, size_t, cdf_stream_t *);
-int cdf_read_short_sector_chain(const cdf_header_t *, const cdf_sat_t *,
- const cdf_stream_t *, cdf_secid_t, size_t, cdf_stream_t *);
-int cdf_read_sector_chain(const cdf_info_t *, const cdf_header_t *,
- const cdf_sat_t *, const cdf_sat_t *, const cdf_stream_t *, cdf_secid_t,
- size_t, cdf_stream_t *);
-int cdf_read_dir(const cdf_info_t *, const cdf_header_t *, const cdf_sat_t *,
- cdf_dir_t *);
-int cdf_read_ssat(const cdf_info_t *, const cdf_header_t *, const cdf_sat_t *,
- cdf_sat_t *);
-int cdf_read_short_stream(const cdf_info_t *, const cdf_header_t *,
- const cdf_sat_t *, const cdf_dir_t *, cdf_stream_t *,
- const cdf_directory_t **);
-int cdf_read_property_info(const cdf_stream_t *, const cdf_header_t *, uint32_t,
- cdf_property_info_t **, size_t *, size_t *);
-int cdf_read_user_stream(const cdf_info_t *, const cdf_header_t *,
- const cdf_sat_t *, const cdf_sat_t *, const cdf_stream_t *,
- const cdf_dir_t *, const char *, cdf_stream_t *);
-int cdf_find_stream(const cdf_dir_t *, const char *, int);
-int cdf_zero_stream(cdf_stream_t *);
-int cdf_read_doc_summary_info(const cdf_info_t *, const cdf_header_t *,
- const cdf_sat_t *, const cdf_sat_t *, const cdf_stream_t *,
- const cdf_dir_t *, cdf_stream_t *);
-int cdf_read_summary_info(const cdf_info_t *, const cdf_header_t *,
- const cdf_sat_t *, const cdf_sat_t *, const cdf_stream_t *,
- const cdf_dir_t *, cdf_stream_t *);
-int cdf_unpack_summary_info(const cdf_stream_t *, const cdf_header_t *,
- cdf_summary_info_header_t *, cdf_property_info_t **, size_t *);
-int cdf_unpack_catalog(const cdf_header_t *, const cdf_stream_t *,
- cdf_catalog_t **);
-int cdf_print_classid(char *, size_t, const cdf_classid_t *);
-int cdf_print_property_name(char *, size_t, uint32_t);
-int cdf_print_elapsed_time(char *, size_t, cdf_timestamp_t);
-uint16_t cdf_tole2(uint16_t);
-uint32_t cdf_tole4(uint32_t);
-uint64_t cdf_tole8(uint64_t);
-char *cdf_ctime(const time_t *, char *);
-char *cdf_u16tos8(char *, size_t, const uint16_t *);
-
-#ifdef CDF_DEBUG
-void cdf_dump_header(const cdf_header_t *);
-void cdf_dump_sat(const char *, const cdf_sat_t *, size_t);
-void cdf_dump(const void *, size_t);
-void cdf_dump_stream(const cdf_stream_t *);
-void cdf_dump_dir(const cdf_info_t *, const cdf_header_t *, const cdf_sat_t *,
- const cdf_sat_t *, const cdf_stream_t *, const cdf_dir_t *);
-void cdf_dump_property_info(const cdf_property_info_t *, size_t);
-void cdf_dump_summary_info(const cdf_header_t *, const cdf_stream_t *);
-void cdf_dump_catalog(const cdf_header_t *, const cdf_stream_t *);
-#endif
-
-
-#endif /* _H_CDF_ */
diff --git a/contrib/libs/libmagic/src/cdf_time.c b/contrib/libs/libmagic/src/cdf_time.c
deleted file mode 100644
index 56eda5ecda..0000000000
--- a/contrib/libs/libmagic/src/cdf_time.c
+++ /dev/null
@@ -1,203 +0,0 @@
-/*-
- * Copyright (c) 2008 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: cdf_time.c,v 1.24 2023/07/17 15:54:44 christos Exp $")
-#endif
-
-#include <time.h>
-#ifdef TEST
-#include <err.h>
-#endif
-#include <string.h>
-
-#include "cdf.h"
-
-#define isleap(y) ((((y) % 4) == 0) && \
- ((((y) % 100) != 0) || (((y) % 400) == 0)))
-
-static const int mdays[] = {
- 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
-};
-
-/*
- * Return the number of days between jan 01 1601 and jan 01 of year.
- */
-static int
-cdf_getdays(int year)
-{
- int days = 0;
- int y;
-
- for (y = CDF_BASE_YEAR; y < year; y++)
- days += isleap(y) + 365;
-
- return days;
-}
-
-/*
- * Return the day within the month
- */
-static int
-cdf_getday(int year, int days)
-{
- size_t m;
-
- for (m = 0; m < __arraycount(mdays); m++) {
- int sub = mdays[m] + (m == 1 && isleap(year));
- if (days < sub)
- return days;
- days -= sub;
- }
- return days;
-}
-
-/*
- * Return the 0...11 month number.
- */
-static int
-cdf_getmonth(int year, int days)
-{
- size_t m;
-
- for (m = 0; m < __arraycount(mdays); m++) {
- days -= mdays[m];
- if (m == 1 && isleap(year))
- days--;
- if (days <= 0)
- return CAST(int, m);
- }
- return CAST(int, m);
-}
-
-int
-cdf_timestamp_to_timespec(struct timespec *ts, cdf_timestamp_t t)
-{
- struct tm tm;
-#ifdef HAVE_STRUCT_TM_TM_ZONE
- static char UTC[] = "UTC";
-#endif
- int rdays;
-
- /* Unit is 100's of nanoseconds */
- ts->tv_nsec = (t % CDF_TIME_PREC) * 100;
-
- t /= CDF_TIME_PREC;
- tm.tm_sec = CAST(int, t % 60);
- t /= 60;
-
- tm.tm_min = CAST(int, t % 60);
- t /= 60;
-
- tm.tm_hour = CAST(int, t % 24);
- t /= 24;
-
- /* XXX: Approx */
- tm.tm_year = CAST(int, CDF_BASE_YEAR + (t / 365));
-
- rdays = cdf_getdays(tm.tm_year);
- t -= rdays - 1;
- tm.tm_mday = cdf_getday(tm.tm_year, CAST(int, t));
- tm.tm_mon = cdf_getmonth(tm.tm_year, CAST(int, t));
- tm.tm_wday = 0;
- tm.tm_yday = 0;
- tm.tm_isdst = 0;
-#ifdef HAVE_STRUCT_TM_TM_GMTOFF
- tm.tm_gmtoff = 0;
-#endif
-#ifdef HAVE_STRUCT_TM_TM_ZONE
- tm.tm_zone = UTC;
-#endif
- tm.tm_year -= 1900;
- ts->tv_sec = mktime(&tm);
- if (ts->tv_sec == -1) {
- errno = EINVAL;
- return -1;
- }
- return 0;
-}
-
-int
-/*ARGSUSED*/
-cdf_timespec_to_timestamp(cdf_timestamp_t *t, const struct timespec *ts)
-{
-#ifndef __lint__
- (void)&t;
- (void)&ts;
-#endif
-#ifdef notyet
- struct tm tm;
- if (gmtime_r(&ts->ts_sec, &tm) == NULL) {
- errno = EINVAL;
- return -1;
- }
- *t = (ts->ts_nsec / 100) * CDF_TIME_PREC;
- *t += tm.tm_sec;
- *t += tm.tm_min * 60;
- *t += tm.tm_hour * 60 * 60;
- *t += tm.tm_mday * 60 * 60 * 24;
-#endif
- return 0;
-}
-
-char *
-cdf_ctime(const time_t *sec, char *buf)
-{
- char *ptr = *sec > MAX_CTIME ? NULL : ctime_r(sec, buf);
- if (ptr != NULL)
- return buf;
-#ifdef WIN32
- (void)snprintf(buf, 26, "*Bad* 0x%16.16I64x\n",
- CAST(long long, *sec));
-#else
- (void)snprintf(buf, 26, "*Bad* %#16.16" INT64_T_FORMAT "x\n",
- CAST(long long, *sec));
-#endif
- return buf;
-}
-
-
-#ifdef TEST_TIME
-int
-main(int argc, char *argv[])
-{
- struct timespec ts;
- char buf[25];
- static const cdf_timestamp_t tst = 0x01A5E403C2D59C00ULL;
- static const char *ref = "Sat Apr 23 01:30:00 1977";
- char *p, *q;
-
- cdf_timestamp_to_timespec(&ts, tst);
- p = cdf_ctime(&ts.tv_sec, buf);
- if ((q = strchr(p, '\n')) != NULL)
- *q = '\0';
- if (strcmp(ref, p) != 0)
- errx(1, "Error date %s != %s\n", ref, p);
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/compress.c b/contrib/libs/libmagic/src/compress.c
deleted file mode 100644
index 36cf85f455..0000000000
--- a/contrib/libs/libmagic/src/compress.c
+++ /dev/null
@@ -1,1227 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * compress routines:
- * zmagic() - returns 0 if not recognized, uncompresses and prints
- * information if recognized
- * uncompress(method, old, n, newch) - uncompress old into new,
- * using method, return sizeof new
- */
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: compress.c,v 1.157 2023/05/21 15:59:58 christos Exp $")
-#endif
-
-#include "magic.h"
-#include <stdlib.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#ifdef HAVE_SPAWN_H
-#include <spawn.h>
-#endif
-#include <string.h>
-#include <errno.h>
-#include <ctype.h>
-#include <stdarg.h>
-#include <signal.h>
-#ifndef HAVE_SIG_T
-typedef void (*sig_t)(int);
-#endif /* HAVE_SIG_T */
-#ifdef HAVE_SYS_IOCTL_H
-#include <sys/ioctl.h>
-#endif
-#ifdef HAVE_SYS_WAIT_H
-#include <sys/wait.h>
-#endif
-#if defined(HAVE_SYS_TIME_H)
-#include <sys/time.h>
-#endif
-
-#if defined(HAVE_ZLIB_H) && defined(ZLIBSUPPORT)
-#define BUILTIN_DECOMPRESS
-#include <zlib.h>
-#endif
-
-#if defined(HAVE_BZLIB_H) && defined(BZLIBSUPPORT)
-#define BUILTIN_BZLIB
-#error #include <bzlib.h>
-#endif
-
-#if defined(HAVE_LZMA_H) && defined(XZLIBSUPPORT)
-#define BUILTIN_XZLIB
-#error #include <lzma.h>
-#endif
-
-#if defined(HAVE_ZSTD_H) && defined(ZSTDLIBSUPPORT)
-#define BUILTIN_ZSTDLIB
-#error #include <zstd.h>
-#error #include <zstd_errors.h>
-#endif
-
-#if defined(HAVE_LZLIB_H) && defined(LZLIBSUPPORT)
-#define BUILTIN_LZLIB
-#error #include <lzlib.h>
-#endif
-
-#ifdef DEBUG
-int tty = -1;
-#define DPRINTF(...) do { \
- if (tty == -1) \
- tty = open("/dev/tty", O_RDWR); \
- if (tty == -1) \
- abort(); \
- dprintf(tty, __VA_ARGS__); \
-} while (/*CONSTCOND*/0)
-#else
-#define DPRINTF(...)
-#endif
-
-#ifdef ZLIBSUPPORT
-/*
- * The following python code is not really used because ZLIBSUPPORT is only
- * defined if we have a built-in zlib, and the built-in zlib handles that.
- * That is not true for android where we have zlib.h and not -lz.
- */
-static const char zlibcode[] =
- "import sys, zlib; sys.stdout.write(zlib.decompress(sys.stdin.read()))";
-
-static const char *zlib_args[] = { "python", "-c", zlibcode, NULL };
-
-static int
-zlibcmp(const unsigned char *buf)
-{
- unsigned short x = 1;
- unsigned char *s = CAST(unsigned char *, CAST(void *, &x));
-
- if ((buf[0] & 0xf) != 8 || (buf[0] & 0x80) != 0)
- return 0;
- if (s[0] != 1) /* endianness test */
- x = buf[0] | (buf[1] << 8);
- else
- x = buf[1] | (buf[0] << 8);
- if (x % 31)
- return 0;
- return 1;
-}
-#endif
-
-static int
-lzmacmp(const unsigned char *buf)
-{
- if (buf[0] != 0x5d || buf[1] || buf[2])
- return 0;
- if (buf[12] && buf[12] != 0xff)
- return 0;
- return 1;
-}
-
-#define gzip_flags "-cd"
-#define lzip_flags gzip_flags
-
-static const char *gzip_args[] = {
- "gzip", gzip_flags, NULL
-};
-static const char *uncompress_args[] = {
- "uncompress", "-c", NULL
-};
-static const char *bzip2_args[] = {
- "bzip2", "-cd", NULL
-};
-static const char *lzip_args[] = {
- "lzip", lzip_flags, NULL
-};
-static const char *xz_args[] = {
- "xz", "-cd", NULL
-};
-static const char *lrzip_args[] = {
- "lrzip", "-qdf", "-", NULL
-};
-static const char *lz4_args[] = {
- "lz4", "-cd", NULL
-};
-static const char *zstd_args[] = {
- "zstd", "-cd", NULL
-};
-
-#define do_zlib NULL
-#define do_bzlib NULL
-
-file_private const struct {
- union {
- const char *magic;
- int (*func)(const unsigned char *);
- } u;
- int maglen;
- const char **argv;
- void *unused;
-} compr[] = {
-#define METH_FROZEN 2
-#define METH_BZIP 7
-#define METH_XZ 9
-#define METH_LZIP 8
-#define METH_ZSTD 12
-#define METH_LZMA 13
-#define METH_ZLIB 14
- { { .magic = "\037\235" }, 2, gzip_args, NULL }, /* 0, compressed */
- /* Uncompress can get stuck; so use gzip first if we have it
- * Idea from Damien Clark, thanks! */
- { { .magic = "\037\235" }, 2, uncompress_args, NULL },/* 1, compressed */
- { { .magic = "\037\213" }, 2, gzip_args, do_zlib },/* 2, gzipped */
- { { .magic = "\037\236" }, 2, gzip_args, NULL }, /* 3, frozen */
- { { .magic = "\037\240" }, 2, gzip_args, NULL }, /* 4, SCO LZH */
- /* the standard pack utilities do not accept standard input */
- { { .magic = "\037\036" }, 2, gzip_args, NULL }, /* 5, packed */
- { { .magic = "PK\3\4" }, 4, gzip_args, NULL }, /* 6, pkziped */
- /* ...only first file examined */
- { { .magic = "BZh" }, 3, bzip2_args, do_bzlib },/* 7, bzip2-ed */
- { { .magic = "LZIP" }, 4, lzip_args, NULL }, /* 8, lzip-ed */
- { { .magic = "\3757zXZ\0" },6, xz_args, NULL }, /* 9, XZ Util */
- { { .magic = "LRZI" }, 4, lrzip_args, NULL }, /* 10, LRZIP */
- { { .magic = "\004\"M\030" },4, lz4_args, NULL }, /* 11, LZ4 */
- { { .magic = "\x28\xB5\x2F\xFD" }, 4, zstd_args, NULL },/* 12, zstd */
- { { .func = lzmacmp }, -13, xz_args, NULL }, /* 13, lzma */
-#ifdef ZLIBSUPPORT
- { { .func = zlibcmp }, -2, zlib_args, NULL }, /* 14, zlib */
-#endif
-};
-
-#define OKDATA 0
-#define NODATA 1
-#define ERRDATA 2
-
-file_private ssize_t swrite(int, const void *, size_t);
-#if HAVE_FORK
-file_private size_t ncompr = __arraycount(compr);
-file_private int uncompressbuf(int, size_t, size_t, int, const unsigned char *,
- unsigned char **, size_t *);
-#ifdef BUILTIN_DECOMPRESS
-file_private int uncompresszlib(const unsigned char *, unsigned char **, size_t,
- size_t *, int);
-file_private int uncompressgzipped(const unsigned char *, unsigned char **, size_t,
- size_t *, int);
-#endif
-#ifdef BUILTIN_BZLIB
-file_private int uncompressbzlib(const unsigned char *, unsigned char **, size_t,
- size_t *, int);
-#endif
-#ifdef BUILTIN_XZLIB
-file_private int uncompressxzlib(const unsigned char *, unsigned char **, size_t,
- size_t *, int);
-#endif
-#ifdef BUILTIN_ZSTDLIB
-file_private int uncompresszstd(const unsigned char *, unsigned char **, size_t,
- size_t *, int);
-#endif
-#ifdef BUILTIN_LZLIB
-file_private int uncompresslzlib(const unsigned char *, unsigned char **, size_t,
- size_t *, int);
-#endif
-
-static int makeerror(unsigned char **, size_t *, const char *, ...)
- __attribute__((__format__(__printf__, 3, 4)));
-file_private const char *methodname(size_t);
-
-file_private int
-format_decompression_error(struct magic_set *ms, size_t i, unsigned char *buf)
-{
- unsigned char *p;
- int mime = ms->flags & MAGIC_MIME;
-
- if (!mime)
- return file_printf(ms, "ERROR:[%s: %s]", methodname(i), buf);
-
- for (p = buf; *p; p++)
- if (!isalnum(*p))
- *p = '-';
-
- return file_printf(ms, "application/x-decompression-error-%s-%s",
- methodname(i), buf);
-}
-
-file_protected int
-file_zmagic(struct magic_set *ms, const struct buffer *b, const char *name)
-{
- unsigned char *newbuf = NULL;
- size_t i, nsz;
- char *rbuf;
- file_pushbuf_t *pb;
- int urv, prv, rv = 0;
- int mime = ms->flags & MAGIC_MIME;
- int fd = b->fd;
- const unsigned char *buf = CAST(const unsigned char *, b->fbuf);
- size_t nbytes = b->flen;
- int sa_saved = 0;
- struct sigaction sig_act;
-
- if ((ms->flags & MAGIC_COMPRESS) == 0)
- return 0;
-
- for (i = 0; i < ncompr; i++) {
- int zm;
- if (nbytes < CAST(size_t, abs(compr[i].maglen)))
- continue;
- if (compr[i].maglen < 0) {
- zm = (*compr[i].u.func)(buf);
- } else {
- zm = memcmp(buf, compr[i].u.magic,
- CAST(size_t, compr[i].maglen)) == 0;
- }
-
- if (!zm)
- continue;
-
- /* Prevent SIGPIPE death if child dies unexpectedly */
- if (!sa_saved) {
- //We can use sig_act for both new and old, but
- struct sigaction new_act;
- memset(&new_act, 0, sizeof(new_act));
- new_act.sa_handler = SIG_IGN;
- sa_saved = sigaction(SIGPIPE, &new_act, &sig_act) != -1;
- }
-
- nsz = nbytes;
- free(newbuf);
- urv = uncompressbuf(fd, ms->bytes_max, i,
- (ms->flags & MAGIC_NO_COMPRESS_FORK), buf, &newbuf, &nsz);
- DPRINTF("uncompressbuf = %d, %s, %" SIZE_T_FORMAT "u\n", urv,
- (char *)newbuf, nsz);
- switch (urv) {
- case OKDATA:
- case ERRDATA:
- ms->flags &= ~MAGIC_COMPRESS;
- if (urv == ERRDATA)
- prv = format_decompression_error(ms, i, newbuf);
- else
- prv = file_buffer(ms, -1, NULL, name, newbuf,
- nsz);
- if (prv == -1)
- goto error;
- rv = 1;
- if ((ms->flags & MAGIC_COMPRESS_TRANSP) != 0)
- goto out;
- if (mime != MAGIC_MIME && mime != 0)
- goto out;
- if ((file_printf(ms,
- mime ? " compressed-encoding=" : " (")) == -1)
- goto error;
- if ((pb = file_push_buffer(ms)) == NULL)
- goto error;
- /*
- * XXX: If file_buffer fails here, we overwrite
- * the compressed text. FIXME.
- */
- if (file_buffer(ms, -1, NULL, NULL, buf, nbytes) == -1)
- {
- if (file_pop_buffer(ms, pb) != NULL)
- abort();
- goto error;
- }
- if ((rbuf = file_pop_buffer(ms, pb)) != NULL) {
- if (file_printf(ms, "%s", rbuf) == -1) {
- free(rbuf);
- goto error;
- }
- free(rbuf);
- }
- if (!mime && file_printf(ms, ")") == -1)
- goto error;
- /*FALLTHROUGH*/
- case NODATA:
- break;
- default:
- abort();
- /*NOTREACHED*/
- error:
- rv = -1;
- break;
- }
- }
-out:
- DPRINTF("rv = %d\n", rv);
-
- if (sa_saved && sig_act.sa_handler != SIG_IGN)
- (void)sigaction(SIGPIPE, &sig_act, NULL);
-
- free(newbuf);
- ms->flags |= MAGIC_COMPRESS;
- DPRINTF("Zmagic returns %d\n", rv);
- return rv;
-}
-#endif
-/*
- * `safe' write for sockets and pipes.
- */
-file_private ssize_t
-swrite(int fd, const void *buf, size_t n)
-{
- ssize_t rv;
- size_t rn = n;
-
- do
- switch (rv = write(fd, buf, n)) {
- case -1:
- if (errno == EINTR)
- continue;
- return -1;
- default:
- n -= rv;
- buf = CAST(const char *, buf) + rv;
- break;
- }
- while (n > 0);
- return rn;
-}
-
-
-/*
- * `safe' read for sockets and pipes.
- */
-file_protected ssize_t
-sread(int fd, void *buf, size_t n, int canbepipe __attribute__((__unused__)))
-{
- ssize_t rv;
-#if defined(FIONREAD) && !defined(__MINGW32__)
- int t = 0;
-#endif
- size_t rn = n;
-
- if (fd == STDIN_FILENO)
- goto nocheck;
-
-#if defined(FIONREAD) && !defined(__MINGW32__)
- if (canbepipe && (ioctl(fd, FIONREAD, &t) == -1 || t == 0)) {
-#ifdef FD_ZERO
- ssize_t cnt;
- for (cnt = 0;; cnt++) {
- fd_set check;
- struct timeval tout = {0, 100 * 1000};
- int selrv;
-
- FD_ZERO(&check);
- FD_SET(fd, &check);
-
- /*
- * Avoid soft deadlock: do not read if there
- * is nothing to read from sockets and pipes.
- */
- selrv = select(fd + 1, &check, NULL, NULL, &tout);
- if (selrv == -1) {
- if (errno == EINTR || errno == EAGAIN)
- continue;
- } else if (selrv == 0 && cnt >= 5) {
- return 0;
- } else
- break;
- }
-#endif
- (void)ioctl(fd, FIONREAD, &t);
- }
-
- if (t > 0 && CAST(size_t, t) < n) {
- n = t;
- rn = n;
- }
-#endif
-
-nocheck:
- do
- switch ((rv = read(fd, buf, n))) {
- case -1:
- if (errno == EINTR)
- continue;
- return -1;
- case 0:
- return rn - n;
- default:
- n -= rv;
- buf = CAST(char *, CCAST(void *, buf)) + rv;
- break;
- }
- while (n > 0);
- return rn;
-}
-
-file_protected int
-file_pipe2file(struct magic_set *ms, int fd, const void *startbuf,
- size_t nbytes)
-{
- char buf[4096];
- ssize_t r;
- int tfd;
-
-#ifdef WIN32
- const char *t;
- buf[0] = '\0';
- if ((t = getenv("TEMP")) != NULL)
- (void)strlcpy(buf, t, sizeof(buf));
- else if ((t = getenv("TMP")) != NULL)
- (void)strlcpy(buf, t, sizeof(buf));
- else if ((t = getenv("TMPDIR")) != NULL)
- (void)strlcpy(buf, t, sizeof(buf));
- if (buf[0] != '\0')
- (void)strlcat(buf, "/", sizeof(buf));
- (void)strlcat(buf, "file.XXXXXX", sizeof(buf));
-#else
- (void)strlcpy(buf, "/tmp/file.XXXXXX", sizeof(buf));
-#endif
-#ifndef HAVE_MKSTEMP
- {
- char *ptr = mktemp(buf);
- tfd = open(ptr, O_RDWR|O_TRUNC|O_EXCL|O_CREAT, 0600);
- r = errno;
- (void)unlink(ptr);
- errno = r;
- }
-#else
- {
- int te;
- mode_t ou = umask(0);
- tfd = mkstemp(buf);
- (void)umask(ou);
- te = errno;
- (void)unlink(buf);
- errno = te;
- }
-#endif
- if (tfd == -1) {
- file_error(ms, errno,
- "cannot create temporary file for pipe copy");
- return -1;
- }
-
- if (swrite(tfd, startbuf, nbytes) != CAST(ssize_t, nbytes))
- r = 1;
- else {
- while ((r = sread(fd, buf, sizeof(buf), 1)) > 0)
- if (swrite(tfd, buf, CAST(size_t, r)) != r)
- break;
- }
-
- switch (r) {
- case -1:
- file_error(ms, errno, "error copying from pipe to temp file");
- return -1;
- case 0:
- break;
- default:
- file_error(ms, errno, "error while writing to temp file");
- return -1;
- }
-
- /*
- * We duplicate the file descriptor, because fclose on a
- * tmpfile will delete the file, but any open descriptors
- * can still access the phantom inode.
- */
- if ((fd = dup2(tfd, fd)) == -1) {
- file_error(ms, errno, "could not dup descriptor for temp file");
- return -1;
- }
- (void)close(tfd);
- if (lseek(fd, CAST(off_t, 0), SEEK_SET) == CAST(off_t, -1)) {
- file_badseek(ms);
- return -1;
- }
- return fd;
-}
-#if HAVE_FORK
-#ifdef BUILTIN_DECOMPRESS
-
-#define FHCRC (1 << 1)
-#define FEXTRA (1 << 2)
-#define FNAME (1 << 3)
-#define FCOMMENT (1 << 4)
-
-
-file_private int
-uncompressgzipped(const unsigned char *old, unsigned char **newch,
- size_t bytes_max, size_t *n, int extra __attribute__((__unused__)))
-{
- unsigned char flg;
- size_t data_start = 10;
-
- if (*n < 4) {
- goto err;
- }
-
- flg = old[3];
-
- if (flg & FEXTRA) {
- if (data_start + 1 >= *n)
- goto err;
- data_start += 2 + old[data_start] + old[data_start + 1] * 256;
- }
- if (flg & FNAME) {
- while(data_start < *n && old[data_start])
- data_start++;
- data_start++;
- }
- if (flg & FCOMMENT) {
- while(data_start < *n && old[data_start])
- data_start++;
- data_start++;
- }
- if (flg & FHCRC)
- data_start += 2;
-
- if (data_start >= *n)
- goto err;
-
- *n -= data_start;
- old += data_start;
- return uncompresszlib(old, newch, bytes_max, n, 0);
-err:
- return makeerror(newch, n, "File too short");
-}
-
-file_private int
-uncompresszlib(const unsigned char *old, unsigned char **newch,
- size_t bytes_max, size_t *n, int zlib)
-{
- int rc;
- z_stream z;
-
- DPRINTF("builtin zlib decompression\n");
- z.next_in = CCAST(Bytef *, old);
- z.avail_in = CAST(uint32_t, *n);
- z.next_out = *newch;
- z.avail_out = CAST(unsigned int, bytes_max);
- z.zalloc = Z_NULL;
- z.zfree = Z_NULL;
- z.opaque = Z_NULL;
-
- /* LINTED bug in header macro */
- rc = zlib ? inflateInit(&z) : inflateInit2(&z, -15);
- if (rc != Z_OK)
- goto err;
-
- rc = inflate(&z, Z_SYNC_FLUSH);
- if (rc != Z_OK && rc != Z_STREAM_END) {
- inflateEnd(&z);
- goto err;
- }
-
- *n = CAST(size_t, z.total_out);
- rc = inflateEnd(&z);
- if (rc != Z_OK)
- goto err;
-
- /* let's keep the nul-terminate tradition */
- (*newch)[*n] = '\0';
-
- return OKDATA;
-err:
- return makeerror(newch, n, "%s", z.msg ? z.msg : zError(rc));
-}
-#endif
-
-#ifdef BUILTIN_BZLIB
-file_private int
-uncompressbzlib(const unsigned char *old, unsigned char **newch,
- size_t bytes_max, size_t *n, int extra __attribute__((__unused__)))
-{
- int rc;
- bz_stream bz;
-
- DPRINTF("builtin bzlib decompression\n");
- memset(&bz, 0, sizeof(bz));
- rc = BZ2_bzDecompressInit(&bz, 0, 0);
- if (rc != BZ_OK)
- goto err;
-
- bz.next_in = CCAST(char *, RCAST(const char *, old));
- bz.avail_in = CAST(uint32_t, *n);
- bz.next_out = RCAST(char *, *newch);
- bz.avail_out = CAST(unsigned int, bytes_max);
-
- rc = BZ2_bzDecompress(&bz);
- if (rc != BZ_OK && rc != BZ_STREAM_END) {
- BZ2_bzDecompressEnd(&bz);
- goto err;
- }
-
- /* Assume byte_max is within 32bit */
- /* assert(bz.total_out_hi32 == 0); */
- *n = CAST(size_t, bz.total_out_lo32);
- rc = BZ2_bzDecompressEnd(&bz);
- if (rc != BZ_OK)
- goto err;
-
- /* let's keep the nul-terminate tradition */
- (*newch)[*n] = '\0';
-
- return OKDATA;
-err:
- return makeerror(newch, n, "bunzip error %d", rc);
-}
-#endif
-
-#ifdef BUILTIN_XZLIB
-file_private int
-uncompressxzlib(const unsigned char *old, unsigned char **newch,
- size_t bytes_max, size_t *n, int extra __attribute__((__unused__)))
-{
- int rc;
- lzma_stream xz;
-
- DPRINTF("builtin xzlib decompression\n");
- memset(&xz, 0, sizeof(xz));
- rc = lzma_auto_decoder(&xz, UINT64_MAX, 0);
- if (rc != LZMA_OK)
- goto err;
-
- xz.next_in = CCAST(const uint8_t *, old);
- xz.avail_in = CAST(uint32_t, *n);
- xz.next_out = RCAST(uint8_t *, *newch);
- xz.avail_out = CAST(unsigned int, bytes_max);
-
- rc = lzma_code(&xz, LZMA_RUN);
- if (rc != LZMA_OK && rc != LZMA_STREAM_END) {
- lzma_end(&xz);
- goto err;
- }
-
- *n = CAST(size_t, xz.total_out);
-
- lzma_end(&xz);
-
- /* let's keep the nul-terminate tradition */
- (*newch)[*n] = '\0';
-
- return OKDATA;
-err:
- return makeerror(newch, n, "unxz error %d", rc);
-}
-#endif
-
-#ifdef BUILTIN_ZSTDLIB
-file_private int
-uncompresszstd(const unsigned char *old, unsigned char **newch,
- size_t bytes_max, size_t *n, int extra __attribute__((__unused__)))
-{
- size_t rc;
- ZSTD_DStream *zstd;
- ZSTD_inBuffer in;
- ZSTD_outBuffer out;
-
- DPRINTF("builtin zstd decompression\n");
- if ((zstd = ZSTD_createDStream()) == NULL) {
- return makeerror(newch, n, "No ZSTD decompression stream, %s",
- strerror(errno));
- }
-
- rc = ZSTD_DCtx_reset(zstd, ZSTD_reset_session_only);
- if (ZSTD_isError(rc))
- goto err;
-
- in.src = CCAST(const void *, old);
- in.size = *n;
- in.pos = 0;
- out.dst = RCAST(void *, *newch);
- out.size = bytes_max;
- out.pos = 0;
-
- rc = ZSTD_decompressStream(zstd, &out, &in);
- if (ZSTD_isError(rc))
- goto err;
-
- *n = out.pos;
-
- ZSTD_freeDStream(zstd);
-
- /* let's keep the nul-terminate tradition */
- (*newch)[*n] = '\0';
-
- return OKDATA;
-err:
- ZSTD_freeDStream(zstd);
- return makeerror(newch, n, "zstd error %d", ZSTD_getErrorCode(rc));
-}
-#endif
-
-#ifdef BUILTIN_LZLIB
-file_private int
-uncompresslzlib(const unsigned char *old, unsigned char **newch,
- size_t bytes_max, size_t *n, int extra __attribute__((__unused__)))
-{
- enum LZ_Errno err;
- size_t old_remaining = *n;
- size_t new_remaining = bytes_max;
- size_t total_read = 0;
- unsigned char *bufp;
- struct LZ_Decoder *dec;
-
- bufp = *newch;
-
- DPRINTF("builtin lzlib decompression\n");
- dec = LZ_decompress_open();
- if (!dec) {
- return makeerror(newch, n, "unable to allocate LZ_Decoder");
- }
- if (LZ_decompress_errno(dec) != LZ_ok)
- goto err;
-
- for (;;) {
- // LZ_decompress_read() stops at member boundaries, so we may
- // have more than one successful read after writing all data
- // we have.
- if (old_remaining > 0) {
- int wr = LZ_decompress_write(dec, old, old_remaining);
- if (wr < 0)
- goto err;
- old_remaining -= wr;
- old += wr;
- }
-
- int rd = LZ_decompress_read(dec, bufp, new_remaining);
- if (rd > 0) {
- new_remaining -= rd;
- bufp += rd;
- total_read += rd;
- }
-
- if (rd < 0 || LZ_decompress_errno(dec) != LZ_ok)
- goto err;
- if (new_remaining == 0)
- break;
- if (old_remaining == 0 && rd == 0)
- break;
- }
-
- LZ_decompress_close(dec);
- *n = total_read;
-
- /* let's keep the nul-terminate tradition */
- *bufp = '\0';
-
- return OKDATA;
-err:
- err = LZ_decompress_errno(dec);
- LZ_decompress_close(dec);
- return makeerror(newch, n, "lzlib error: %s", LZ_strerror(err));
-}
-#endif
-
-
-static int
-makeerror(unsigned char **buf, size_t *len, const char *fmt, ...)
-{
- char *msg;
- va_list ap;
- int rv;
-
- DPRINTF("Makeerror %s\n", fmt);
- free(*buf);
- va_start(ap, fmt);
- rv = vasprintf(&msg, fmt, ap);
- va_end(ap);
- if (rv < 0) {
- DPRINTF("Makeerror failed");
- *buf = NULL;
- *len = 0;
- return NODATA;
- }
- *buf = RCAST(unsigned char *, msg);
- *len = strlen(msg);
- return ERRDATA;
-}
-
-static void
-closefd(int *fd, size_t i)
-{
- if (fd[i] == -1)
- return;
- (void) close(fd[i]);
- fd[i] = -1;
-}
-
-static void
-closep(int *fd)
-{
- size_t i;
- for (i = 0; i < 2; i++)
- closefd(fd, i);
-}
-
-static void
-movedesc(void *v, int i, int fd)
-{
- if (fd == i)
- return; /* "no dup was necessary" */
-#ifdef HAVE_POSIX_SPAWNP
- posix_spawn_file_actions_t *fa = RCAST(posix_spawn_file_actions_t *, v);
- posix_spawn_file_actions_adddup2(fa, fd, i);
- posix_spawn_file_actions_addclose(fa, fd);
-#else
- if (dup2(fd, i) == -1) {
- DPRINTF("dup(%d, %d) failed (%s)\n", fd, i, strerror(errno));
- exit(EXIT_FAILURE);
- }
- close(v ? fd : fd);
-#endif
-}
-
-static void
-closedesc(void *v, int fd)
-{
-#ifdef HAVE_POSIX_SPAWNP
- posix_spawn_file_actions_t *fa = RCAST(posix_spawn_file_actions_t *, v);
- posix_spawn_file_actions_addclose(fa, fd);
-#else
- close(v ? fd : fd);
-#endif
-}
-
-static void
-handledesc(void *v, int fd, int fdp[3][2])
-{
- if (fd != -1) {
- (void) lseek(fd, CAST(off_t, 0), SEEK_SET);
- movedesc(v, STDIN_FILENO, fd);
- } else {
- movedesc(v, STDIN_FILENO, fdp[STDIN_FILENO][0]);
- if (fdp[STDIN_FILENO][1] > 2)
- closedesc(v, fdp[STDIN_FILENO][1]);
- }
-
- file_clear_closexec(STDIN_FILENO);
-
-///FIXME: if one of the fdp[i][j] is 0 or 1, this can bomb spectacularly
- movedesc(v, STDOUT_FILENO, fdp[STDOUT_FILENO][1]);
- if (fdp[STDOUT_FILENO][0] > 2)
- closedesc(v, fdp[STDOUT_FILENO][0]);
-
- file_clear_closexec(STDOUT_FILENO);
-
- movedesc(v, STDERR_FILENO, fdp[STDERR_FILENO][1]);
- if (fdp[STDERR_FILENO][0] > 2)
- closedesc(v, fdp[STDERR_FILENO][0]);
-
- file_clear_closexec(STDERR_FILENO);
-}
-
-static pid_t
-writechild(int fd, const void *old, size_t n)
-{
- pid_t pid;
-
- /*
- * fork again, to avoid blocking because both
- * pipes filled
- */
- pid = fork();
- if (pid == -1) {
- DPRINTF("Fork failed (%s)\n", strerror(errno));
- return -1;
- }
- if (pid == 0) {
- /* child */
- if (swrite(fd, old, n) != CAST(ssize_t, n)) {
- DPRINTF("Write failed (%s)\n", strerror(errno));
- exit(EXIT_FAILURE);
- }
- exit(EXIT_SUCCESS);
- }
- /* parent */
- return pid;
-}
-
-static ssize_t
-filter_error(unsigned char *ubuf, ssize_t n)
-{
- char *p;
- char *buf;
-
- ubuf[n] = '\0';
- buf = RCAST(char *, ubuf);
- while (isspace(CAST(unsigned char, *buf)))
- buf++;
- DPRINTF("Filter error[[[%s]]]\n", buf);
- if ((p = strchr(CAST(char *, buf), '\n')) != NULL)
- *p = '\0';
- if ((p = strchr(CAST(char *, buf), ';')) != NULL)
- *p = '\0';
- if ((p = strrchr(CAST(char *, buf), ':')) != NULL) {
- ++p;
- while (isspace(CAST(unsigned char, *p)))
- p++;
- n = strlen(p);
- memmove(ubuf, p, CAST(size_t, n + 1));
- }
- DPRINTF("Filter error after[[[%s]]]\n", (char *)ubuf);
- if (islower(*ubuf))
- *ubuf = toupper(*ubuf);
- return n;
-}
-
-file_private const char *
-methodname(size_t method)
-{
- switch (method) {
-#ifdef BUILTIN_DECOMPRESS
- case METH_FROZEN:
- case METH_ZLIB:
- return "zlib";
-#endif
-#ifdef BUILTIN_BZLIB
- case METH_BZIP:
- return "bzlib";
-#endif
-#ifdef BUILTIN_XZLIB
- case METH_XZ:
- case METH_LZMA:
- return "xzlib";
-#endif
-#ifdef BUILTIN_ZSTDLIB
- case METH_ZSTD:
- return "zstd";
-#endif
-#ifdef BUILTIN_LZLIB
- case METH_LZIP:
- return "lzlib";
-#endif
- default:
- return compr[method].argv[0];
- }
-}
-
-file_private int (*
-getdecompressor(size_t method))(const unsigned char *, unsigned char **, size_t,
- size_t *, int)
-{
- switch (method) {
-#ifdef BUILTIN_DECOMPRESS
- case METH_FROZEN:
- return uncompressgzipped;
- case METH_ZLIB:
- return uncompresszlib;
-#endif
-#ifdef BUILTIN_BZLIB
- case METH_BZIP:
- return uncompressbzlib;
-#endif
-#ifdef BUILTIN_XZLIB
- case METH_XZ:
- case METH_LZMA:
- return uncompressxzlib;
-#endif
-#ifdef BUILTIN_ZSTDLIB
- case METH_ZSTD:
- return uncompresszstd;
-#endif
-#ifdef BUILTIN_LZLIB
- case METH_LZIP:
- return uncompresslzlib;
-#endif
- default:
- return NULL;
- }
-}
-
-file_private int
-uncompressbuf(int fd, size_t bytes_max, size_t method, int nofork,
- const unsigned char *old, unsigned char **newch, size_t* n)
-{
- int fdp[3][2];
- int status, rv, w;
- pid_t pid;
- pid_t writepid = -1;
- size_t i;
- ssize_t r, re;
- char *const *args;
-#ifdef HAVE_POSIX_SPAWNP
- posix_spawn_file_actions_t fa;
-#endif
- int (*decompress)(const unsigned char *, unsigned char **,
- size_t, size_t *, int) = getdecompressor(method);
-
- *newch = CAST(unsigned char *, malloc(bytes_max + 1));
- if (*newch == NULL)
- return makeerror(newch, n, "No buffer, %s", strerror(errno));
-
- if (decompress) {
- if (nofork) {
- return makeerror(newch, n,
- "Fork is required to uncompress, but disabled");
- }
- return (*decompress)(old, newch, bytes_max, n, 1);
- }
-
- (void)fflush(stdout);
- (void)fflush(stderr);
-
- for (i = 0; i < __arraycount(fdp); i++)
- fdp[i][0] = fdp[i][1] = -1;
-
- /*
- * There are multithreaded users who run magic_file()
- * from dozens of threads. If two parallel magic_file() calls
- * analyze two large compressed files, both will spawn
- * an uncompressing child here, which writes out uncompressed data.
- * We read some portion, then close the pipe, then waitpid() the child.
- * If uncompressed data is larger, child should get EPIPE and exit.
- * However, with *parallel* calls OTHER child may unintentionally
- * inherit pipe fds, thus keeping pipe open and making writes in
- * our child block instead of failing with EPIPE!
- * (For the bug to occur, two threads must mutually inherit their pipes,
- * and both must have large outputs. Thus it happens not that often).
- * To avoid this, be sure to create pipes with O_CLOEXEC.
- */
- if ((fd == -1 && file_pipe_closexec(fdp[STDIN_FILENO]) == -1) ||
- file_pipe_closexec(fdp[STDOUT_FILENO]) == -1 ||
- file_pipe_closexec(fdp[STDERR_FILENO]) == -1) {
- closep(fdp[STDIN_FILENO]);
- closep(fdp[STDOUT_FILENO]);
- return makeerror(newch, n, "Cannot create pipe, %s",
- strerror(errno));
- }
-
- args = RCAST(char *const *, RCAST(intptr_t, compr[method].argv));
-#ifdef HAVE_POSIX_SPAWNP
- posix_spawn_file_actions_init(&fa);
-
- handledesc(&fa, fd, fdp);
-
- DPRINTF("Executing %s\n", compr[method].argv[0]);
- status = posix_spawnp(&pid, compr[method].argv[0], &fa, NULL,
- args, NULL);
-
- posix_spawn_file_actions_destroy(&fa);
-
- if (status == -1) {
- return makeerror(newch, n, "Cannot posix_spawn `%s', %s",
- compr[method].argv[0], strerror(errno));
- }
-#else
- /* For processes with large mapped virtual sizes, vfork
- * may be _much_ faster (10-100 times) than fork.
- */
- pid = vfork();
- if (pid == -1) {
- return makeerror(newch, n, "Cannot vfork, %s",
- strerror(errno));
- }
- if (pid == 0) {
- /* child */
- /* Note: we are after vfork, do not modify memory
- * in a way which confuses parent. In particular,
- * do not modify fdp[i][j].
- */
- handledesc(NULL, fd, fdp);
- DPRINTF("Executing %s\n", compr[method].argv[0]);
-
- (void)execvp(compr[method].argv[0], args);
- dprintf(STDERR_FILENO, "exec `%s' failed, %s",
- compr[method].argv[0], strerror(errno));
- _exit(EXIT_FAILURE); /* _exit(), not exit(), because of vfork */
- }
-#endif
- /* parent */
- /* Close write sides of child stdout/err pipes */
- for (i = 1; i < __arraycount(fdp); i++)
- closefd(fdp[i], 1);
- /* Write the buffer data to child stdin, if we don't have fd */
- if (fd == -1) {
- closefd(fdp[STDIN_FILENO], 0);
- writepid = writechild(fdp[STDIN_FILENO][1], old, *n);
- if (writepid == (pid_t)-1) {
- rv = makeerror(newch, n, "Write to child failed, %s",
- strerror(errno));
- DPRINTF("Write to child failed\n");
- goto err;
- }
- closefd(fdp[STDIN_FILENO], 1);
- }
-
- rv = OKDATA;
- r = sread(fdp[STDOUT_FILENO][0], *newch, bytes_max, 0);
- DPRINTF("read got %zd\n", r);
- if (r < 0) {
- rv = ERRDATA;
- DPRINTF("Read stdout failed %d (%s)\n", fdp[STDOUT_FILENO][0],
- strerror(errno));
- goto err;
- }
- if (CAST(size_t, r) == bytes_max) {
- /*
- * close fd so that the child exits with sigpipe and ignore
- * errors, otherwise we risk the child blocking and never
- * exiting.
- */
- DPRINTF("Closing stdout for bytes_max\n");
- closefd(fdp[STDOUT_FILENO], 0);
- goto ok;
- }
- if ((re = sread(fdp[STDERR_FILENO][0], *newch, bytes_max, 0)) > 0) {
- DPRINTF("Got stuff from stderr %s\n", *newch);
- rv = ERRDATA;
- r = filter_error(*newch, r);
- goto ok;
- }
- if (re == 0)
- goto ok;
- rv = makeerror(newch, n, "Read stderr failed, %s",
- strerror(errno));
- goto err;
-ok:
- *n = r;
- /* NUL terminate, as every buffer is handled here. */
- (*newch)[*n] = '\0';
-err:
- closefd(fdp[STDIN_FILENO], 1);
- closefd(fdp[STDOUT_FILENO], 0);
- closefd(fdp[STDERR_FILENO], 0);
-
- w = waitpid(pid, &status, 0);
-wait_err:
- if (w == -1) {
- rv = makeerror(newch, n, "Wait failed, %s", strerror(errno));
- DPRINTF("Child wait return %#x\n", status);
- } else if (!WIFEXITED(status)) {
- DPRINTF("Child not exited (%#x)\n", status);
- } else if (WEXITSTATUS(status) != 0) {
- DPRINTF("Child exited (%#x)\n", WEXITSTATUS(status));
- }
- if (writepid > 0) {
- /* _After_ we know decompressor has exited, our input writer
- * definitely will exit now (at worst, writing fails in it,
- * since output fd is closed now on the reading size).
- */
- w = waitpid(writepid, &status, 0);
- writepid = -1;
- goto wait_err;
- }
-
- closefd(fdp[STDIN_FILENO], 0); //why? it is already closed here!
- DPRINTF("Returning %p n=%" SIZE_T_FORMAT "u rv=%d\n", *newch, *n, rv);
-
- return rv;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/der.c b/contrib/libs/libmagic/src/der.c
deleted file mode 100644
index 3a036517d0..0000000000
--- a/contrib/libs/libmagic/src/der.c
+++ /dev/null
@@ -1,458 +0,0 @@
-/*-
- * Copyright (c) 2016 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-/*
- * DER (Distinguished Encoding Rules) Parser
- *
- * Sources:
- * https://en.wikipedia.org/wiki/X.690
- * http://fm4dd.com/openssl/certexamples.htm
- * http://blog.engelke.com/2014/10/17/parsing-ber-and-der-encoded-asn-1-objects/
- */
-#ifndef TEST_DER
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: der.c,v 1.27 2022/09/24 20:30:13 christos Exp $")
-#endif
-#else
-#define SIZE_T_FORMAT "z"
-#define CAST(a, b) ((a)(b))
-#endif
-
-#include <sys/types.h>
-
-#include <stdio.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-
-#ifndef TEST_DER
-#include "magic.h"
-#include "der.h"
-#else
-#include <sys/mman.h>
-#include <sys/stat.h>
-#include <err.h>
-#endif
-
-#define DER_BAD CAST(uint32_t, -1)
-
-#define DER_CLASS_UNIVERSAL 0
-#define DER_CLASS_APPLICATION 1
-#define DER_CLASS_CONTEXT 2
-#define DER_CLASS_PRIVATE 3
-#if defined(DEBUG_DER) || defined(TEST_DER)
-static const char der_class[] = "UACP";
-#endif
-
-#define DER_TYPE_PRIMITIVE 0
-#define DER_TYPE_CONSTRUCTED 1
-#if defined(DEBUG_DER) || defined(TEST_DER)
-static const char der_type[] = "PC";
-#endif
-
-#define DER_TAG_EOC 0x00
-#define DER_TAG_BOOLEAN 0x01
-#define DER_TAG_INTEGER 0x02
-#define DER_TAG_BIT STRING 0x03
-#define DER_TAG_OCTET_STRING 0x04
-#define DER_TAG_NULL 0x05
-#define DER_TAG_OBJECT_IDENTIFIER 0x06
-#define DER_TAG_OBJECT_DESCRIPTOR 0x07
-#define DER_TAG_EXTERNAL 0x08
-#define DER_TAG_REAL 0x09
-#define DER_TAG_ENUMERATED 0x0a
-#define DER_TAG_EMBEDDED_PDV 0x0b
-#define DER_TAG_UTF8_STRING 0x0c
-#define DER_TAG_RELATIVE_OID 0x0d
-#define DER_TAG_TIME 0x0e
-#define DER_TAG_RESERVED_2 0x0f
-#define DER_TAG_SEQUENCE 0x10
-#define DER_TAG_SET 0x11
-#define DER_TAG_NUMERIC_STRING 0x12
-#define DER_TAG_PRINTABLE_STRING 0x13
-#define DER_TAG_T61_STRING 0x14
-#define DER_TAG_VIDEOTEX_STRING 0x15
-#define DER_TAG_IA5_STRING 0x16
-#define DER_TAG_UTCTIME 0x17
-#define DER_TAG_GENERALIZED_TIME 0x18
-#define DER_TAG_GRAPHIC_STRING 0x19
-#define DER_TAG_VISIBLE_STRING 0x1a
-#define DER_TAG_GENERAL_STRING 0x1b
-#define DER_TAG_UNIVERSAL_STRING 0x1c
-#define DER_TAG_CHARACTER_STRING 0x1d
-#define DER_TAG_BMP_STRING 0x1e
-#define DER_TAG_DATE 0x1f
-#define DER_TAG_TIME_OF_DAY 0x20
-#define DER_TAG_DATE_TIME 0x21
-#define DER_TAG_DURATION 0x22
-#define DER_TAG_OID_IRI 0x23
-#define DER_TAG_RELATIVE_OID_IRI 0x24
-#define DER_TAG_LAST 0x25
-
-static const char *der__tag[] = {
- "eoc", "bool", "int", "bit_str", "octet_str",
- "null", "obj_id", "obj_desc", "ext", "real",
- "enum", "embed", "utf8_str", "rel_oid", "time",
- "res2", "seq", "set", "num_str", "prt_str",
- "t61_str", "vid_str", "ia5_str", "utc_time", "gen_time",
- "gr_str", "vis_str", "gen_str", "univ_str", "char_str",
- "bmp_str", "date", "tod", "datetime", "duration",
- "oid-iri", "rel-oid-iri",
-};
-
-#ifdef DEBUG_DER
-#define DPRINTF(a) printf a
-#else
-#define DPRINTF(a)
-#endif
-
-#ifdef TEST_DER
-static uint8_t
-getclass(uint8_t c)
-{
- return c >> 6;
-}
-
-static uint8_t
-gettype(uint8_t c)
-{
- return (c >> 5) & 1;
-}
-#endif
-
-static uint32_t
-gettag(const uint8_t *c, size_t *p, size_t l)
-{
- uint32_t tag;
-
- if (*p >= l)
- return DER_BAD;
-
- tag = c[(*p)++] & 0x1f;
-
- if (tag != 0x1f)
- return tag;
-
- if (*p >= l)
- return DER_BAD;
-
- while (c[*p] >= 0x80) {
- tag = tag * 128 + c[(*p)++] - 0x80;
- if (*p >= l)
- return DER_BAD;
- }
- return tag;
-}
-
-/*
- * Read the length of a DER tag from the input.
- *
- * `c` is the input, `p` is an output parameter that specifies how much of the
- * input we consumed, and `l` is the maximum input length.
- *
- * Returns the length, or DER_BAD if the end of the input is reached or the
- * length exceeds the remaining input.
- */
-static uint32_t
-getlength(const uint8_t *c, size_t *p, size_t l)
-{
- uint8_t digits, i;
- size_t len;
- int is_onebyte_result;
-
- if (*p >= l) {
- DPRINTF(("%s:[1] %zu >= %zu\n", __func__, *p, l));
- return DER_BAD;
- }
-
- /*
- * Digits can either be 0b0 followed by the result, or 0b1
- * followed by the number of digits of the result. In either case,
- * we verify that we can read so many bytes from the input.
- */
- is_onebyte_result = (c[*p] & 0x80) == 0;
- digits = c[(*p)++] & 0x7f;
- if (*p + digits >= l) {
- DPRINTF(("%s:[2] %zu + %u >= %zu\n", __func__, *p, digits, l));
- return DER_BAD;
- }
-
- if (is_onebyte_result)
- return digits;
-
- /*
- * Decode len. We've already verified that we're allowed to read
- * `digits` bytes.
- */
- len = 0;
- for (i = 0; i < digits; i++)
- len = (len << 8) | c[(*p)++];
-
- if (len > UINT32_MAX - *p || *p + len > l) {
- DPRINTF(("%s:[3] bad len %zu + %zu >= %zu\n",
- __func__, *p, len, l));
- return DER_BAD;
- }
- return CAST(uint32_t, len);
-}
-
-static const char *
-der_tag(char *buf, size_t len, uint32_t tag)
-{
- if (tag < DER_TAG_LAST)
- strlcpy(buf, der__tag[tag], len);
- else
- snprintf(buf, len, "%#x", tag);
- return buf;
-}
-
-#ifndef TEST_DER
-static int
-der_data(char *buf, size_t blen, uint32_t tag, const void *q, uint32_t len)
-{
- uint32_t i;
- const uint8_t *d = CAST(const uint8_t *, q);
- switch (tag) {
- case DER_TAG_PRINTABLE_STRING:
- case DER_TAG_UTF8_STRING:
- case DER_TAG_IA5_STRING:
- return snprintf(buf, blen, "%.*s", len, RCAST(const char *, q));
- case DER_TAG_UTCTIME:
- if (len < 12)
- break;
- return snprintf(buf, blen,
- "20%c%c-%c%c-%c%c %c%c:%c%c:%c%c GMT", d[0], d[1], d[2],
- d[3], d[4], d[5], d[6], d[7], d[8], d[9], d[10], d[11]);
- default:
- break;
- }
-
- for (i = 0; i < len; i++) {
- uint32_t z = i << 1;
- if (z < blen - 2)
- snprintf(buf + z, blen - z, "%.2x", d[i]);
- }
- return len * 2;
-}
-
-int32_t
-der_offs(struct magic_set *ms, struct magic *m, size_t nbytes)
-{
- const uint8_t *b = RCAST(const uint8_t *, ms->search.s);
- size_t offs = 0, len = ms->search.s_len ? ms->search.s_len : nbytes;
-
- if (gettag(b, &offs, len) == DER_BAD) {
- DPRINTF(("%s: bad tag 1\n", __func__));
- return -1;
- }
- DPRINTF(("%s1: %u %" SIZE_T_FORMAT "u %d\n", __func__, ms->offset,
- offs, m->offset));
-
- uint32_t tlen = getlength(b, &offs, len);
- if (tlen == DER_BAD) {
- DPRINTF(("%s: bad tag 2\n", __func__));
- return -1;
- }
- DPRINTF(("%s2: %u %" SIZE_T_FORMAT "u %u\n", __func__, ms->offset,
- offs, tlen));
-
- offs += ms->offset + m->offset;
- DPRINTF(("cont_level = %d\n", m->cont_level));
-#ifdef DEBUG_DER
- size_t i;
- for (i = 0; i < m->cont_level; i++)
- printf("cont_level[%" SIZE_T_FORMAT "u] = %d\n", i,
- ms->c.li[i].off);
-#endif
- if (m->cont_level != 0) {
- if (offs + tlen > nbytes)
- return -1;
- ms->c.li[m->cont_level - 1].off = CAST(int, offs + tlen);
- DPRINTF(("cont_level[%u] = %d\n", m->cont_level - 1,
- ms->c.li[m->cont_level - 1].off));
- }
- return CAST(int32_t, offs);
-}
-
-int
-der_cmp(struct magic_set *ms, struct magic *m)
-{
- const uint8_t *b = RCAST(const uint8_t *, ms->search.s);
- const char *s = m->value.s;
- size_t offs = 0, len = ms->search.s_len;
- uint32_t tag, tlen;
- char buf[128];
-
- DPRINTF(("%s: compare %zu bytes\n", __func__, len));
-
- tag = gettag(b, &offs, len);
- if (tag == DER_BAD) {
- DPRINTF(("%s: bad tag 1\n", __func__));
- return -1;
- }
-
- DPRINTF(("%s1: %d %" SIZE_T_FORMAT "u %d\n", __func__, ms->offset,
- offs, m->offset));
-
- tlen = getlength(b, &offs, len);
- if (tlen == DER_BAD) {
- DPRINTF(("%s: bad tag 2\n", __func__));
- return -1;
- }
-
- der_tag(buf, sizeof(buf), tag);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "%s: tag %p got=%s exp=%s\n", __func__, b,
- buf, s);
- size_t slen = strlen(buf);
-
- if (strncmp(buf, s, slen) != 0) {
- DPRINTF(("%s: no string match %s != %s\n", __func__, buf, s));
- return 0;
- }
-
- s += slen;
-
-again:
- switch (*s) {
- case '\0':
- DPRINTF(("%s: EOF match\n", __func__));
- return 1;
- case '=':
- s++;
- goto val;
- default:
- if (!isdigit(CAST(unsigned char, *s))) {
- DPRINTF(("%s: no digit %c\n", __func__, *s));
- return 0;
- }
-
- slen = 0;
- do
- slen = slen * 10 + *s - '0';
- while (isdigit(CAST(unsigned char, *++s)));
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "%s: len %" SIZE_T_FORMAT "u %u\n",
- __func__, slen, tlen);
- if (tlen != slen) {
- DPRINTF(("%s: len %u != %zu\n", __func__, tlen, slen));
- return 0;
- }
- goto again;
- }
-val:
- DPRINTF(("%s: before data %" SIZE_T_FORMAT "u %u\n", __func__, offs,
- tlen));
- der_data(buf, sizeof(buf), tag, b + offs, tlen);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "%s: data %s %s\n", __func__, buf, s);
- if (strcmp(buf, s) != 0 && strcmp("x", s) != 0) {
- DPRINTF(("%s: no string match %s != %s\n", __func__, buf, s));
- return 0;
- }
- strlcpy(ms->ms_value.s, buf, sizeof(ms->ms_value.s));
- DPRINTF(("%s: complete match\n", __func__));
- return 1;
-}
-#endif
-
-#ifdef TEST_DER
-static void
-printtag(uint32_t tag, const void *q, uint32_t len)
-{
- const uint8_t *d = q;
- switch (tag) {
- case DER_TAG_PRINTABLE_STRING:
- case DER_TAG_UTF8_STRING:
- case DER_TAG_IA5_STRING:
- case DER_TAG_UTCTIME:
- printf("%.*s\n", len, (const char *)q);
- return;
- default:
- break;
- }
-
- for (uint32_t i = 0; i < len; i++)
- printf("%.2x", d[i]);
- printf("\n");
-}
-
-static void
-printdata(size_t level, const void *v, size_t x, size_t l)
-{
- const uint8_t *p = v, *ep = p + l;
- size_t ox;
- char buf[128];
-
- while (p + x < ep) {
- const uint8_t *q;
- uint8_t c = getclass(p[x]);
- uint8_t t = gettype(p[x]);
- ox = x;
-// if (x != 0)
-// printf("%.2x %.2x %.2x\n", p[x - 1], p[x], p[x + 1]);
- uint32_t tag = gettag(p, &x, ep - p + x);
- if (p + x >= ep)
- break;
- uint32_t len = getlength(p, &x, ep - p + x);
-
- printf("%" SIZE_T_FORMAT "u %" SIZE_T_FORMAT "u-%"
- SIZE_T_FORMAT "u %c,%c,%s,%u:", level, ox, x,
- der_class[c], der_type[t],
- der_tag(buf, sizeof(buf), tag), len);
- q = p + x;
- if (p + len > ep)
- errx(EXIT_FAILURE, "corrupt der");
- printtag(tag, q, len);
- if (t != DER_TYPE_PRIMITIVE)
- printdata(level + 1, p, x, len + x);
- x += len;
- }
-}
-
-int
-main(int argc, char *argv[])
-{
- int fd;
- struct stat st;
- size_t l;
- void *p;
-
- if ((fd = open(argv[1], O_RDONLY)) == -1)
- err(EXIT_FAILURE, "open `%s'", argv[1]);
- if (fstat(fd, &st) == -1)
- err(EXIT_FAILURE, "stat `%s'", argv[1]);
- l = (size_t)st.st_size;
- if ((p = mmap(NULL, l, PROT_READ, MAP_FILE, fd, 0)) == MAP_FAILED)
- err(EXIT_FAILURE, "mmap `%s'", argv[1]);
-
- printdata(0, p, 0, l);
- munmap(p, l);
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/der.h b/contrib/libs/libmagic/src/der.h
deleted file mode 100644
index 3333239201..0000000000
--- a/contrib/libs/libmagic/src/der.h
+++ /dev/null
@@ -1,28 +0,0 @@
-/*-
- * Copyright (c) 2016 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-extern int der_offs(struct magic_set *, struct magic *, size_t);
-extern int der_cmp(struct magic_set *, struct magic *);
diff --git a/contrib/libs/libmagic/src/elfclass.h b/contrib/libs/libmagic/src/elfclass.h
deleted file mode 100644
index 936d8dc912..0000000000
--- a/contrib/libs/libmagic/src/elfclass.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2008.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
- if (nbytes <= sizeof(elfhdr))
- return 0;
-
- u.l = 1;
- (void)memcpy(&elfhdr, buf, sizeof elfhdr);
- swap = (u.c[sizeof(int32_t) - 1] + 1) != elfhdr.e_ident[EI_DATA];
-
- type = elf_getu16(swap, elfhdr.e_type);
- notecount = ms->elf_notes_max;
- switch (type) {
-#ifdef ELFCORE
- case ET_CORE:
- phnum = elf_getu16(swap, elfhdr.e_phnum);
- if (phnum > ms->elf_phnum_max)
- return toomany(ms, "program headers", phnum);
- flags |= FLAGS_IS_CORE;
- if (dophn_core(ms, clazz, swap, fd,
- CAST(off_t, elf_getu(swap, elfhdr.e_phoff)), phnum,
- CAST(size_t, elf_getu16(swap, elfhdr.e_phentsize)),
- fsize, &flags, &notecount) == -1)
- return -1;
- break;
-#endif
- case ET_EXEC:
- case ET_DYN:
- phnum = elf_getu16(swap, elfhdr.e_phnum);
- if (phnum > ms->elf_phnum_max)
- return toomany(ms, "program", phnum);
- shnum = elf_getu16(swap, elfhdr.e_shnum);
- if (shnum > ms->elf_shnum_max)
- return toomany(ms, "section", shnum);
- if (dophn_exec(ms, clazz, swap, fd,
- CAST(off_t, elf_getu(swap, elfhdr.e_phoff)), phnum,
- CAST(size_t, elf_getu16(swap, elfhdr.e_phentsize)),
- fsize, shnum, &flags, &notecount) == -1)
- return -1;
- /*FALLTHROUGH*/
- case ET_REL:
- shnum = elf_getu16(swap, elfhdr.e_shnum);
- if (shnum > ms->elf_shnum_max)
- return toomany(ms, "section headers", shnum);
- if (doshn(ms, clazz, swap, fd,
- CAST(off_t, elf_getu(swap, elfhdr.e_shoff)), shnum,
- CAST(size_t, elf_getu16(swap, elfhdr.e_shentsize)),
- fsize, elf_getu16(swap, elfhdr.e_machine),
- CAST(int, elf_getu16(swap, elfhdr.e_shstrndx)),
- &flags, &notecount) == -1)
- return -1;
- break;
-
- default:
- break;
- }
- if (notecount == 0)
- return toomany(ms, "notes", ms->elf_notes_max);
- return 1;
diff --git a/contrib/libs/libmagic/src/encoding.c b/contrib/libs/libmagic/src/encoding.c
deleted file mode 100644
index 9dbb9dd93e..0000000000
--- a/contrib/libs/libmagic/src/encoding.c
+++ /dev/null
@@ -1,658 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * Encoding -- determine the character encoding of a text file.
- *
- * Joerg Wunsch <joerg@freebsd.org> wrote the original support for 8-bit
- * international characters.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: encoding.c,v 1.42 2022/12/26 17:31:14 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <string.h>
-#include <stdlib.h>
-
-
-file_private int looks_ascii(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private int looks_utf8_with_BOM(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private int looks_utf7(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private int looks_ucs16(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private int looks_ucs32(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private int looks_latin1(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private int looks_extended(const unsigned char *, size_t, file_unichar_t *,
- size_t *);
-file_private void from_ebcdic(const unsigned char *, size_t, unsigned char *);
-
-#ifdef DEBUG_ENCODING
-#define DPRINTF(a) printf a
-#else
-#define DPRINTF(a)
-#endif
-
-/*
- * Try to determine whether text is in some character code we can
- * identify. Each of these tests, if it succeeds, will leave
- * the text converted into one-file_unichar_t-per-character Unicode in
- * ubuf, and the number of characters converted in ulen.
- */
-file_protected int
-file_encoding(struct magic_set *ms, const struct buffer *b,
- file_unichar_t **ubuf, size_t *ulen, const char **code,
- const char **code_mime, const char **type)
-{
- const unsigned char *buf = CAST(const unsigned char *, b->fbuf);
- size_t nbytes = b->flen;
- size_t mlen;
- int rv = 1, ucs_type;
- file_unichar_t *udefbuf;
- size_t udeflen;
-
- if (ubuf == NULL)
- ubuf = &udefbuf;
- if (ulen == NULL)
- ulen = &udeflen;
-
- *type = "text";
- *ulen = 0;
- *code = "unknown";
- *code_mime = "binary";
-
- if (nbytes > ms->encoding_max)
- nbytes = ms->encoding_max;
-
- mlen = (nbytes + 1) * sizeof((*ubuf)[0]);
- *ubuf = CAST(file_unichar_t *, calloc(CAST(size_t, 1), mlen));
- if (*ubuf == NULL) {
- file_oomem(ms, mlen);
- goto done;
- }
- if (looks_ascii(buf, nbytes, *ubuf, ulen)) {
- if (looks_utf7(buf, nbytes, *ubuf, ulen) > 0) {
- DPRINTF(("utf-7 %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "Unicode text, UTF-7";
- *code_mime = "utf-7";
- } else {
- DPRINTF(("ascii %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "ASCII";
- *code_mime = "us-ascii";
- }
- } else if (looks_utf8_with_BOM(buf, nbytes, *ubuf, ulen) > 0) {
- DPRINTF(("utf8/bom %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "Unicode text, UTF-8 (with BOM)";
- *code_mime = "utf-8";
- } else if (file_looks_utf8(buf, nbytes, *ubuf, ulen) > 1) {
- DPRINTF(("utf8 %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "Unicode text, UTF-8";
- *code_mime = "utf-8";
- } else if ((ucs_type = looks_ucs32(buf, nbytes, *ubuf, ulen)) != 0) {
- if (ucs_type == 1) {
- *code = "Unicode text, UTF-32, little-endian";
- *code_mime = "utf-32le";
- } else {
- *code = "Unicode text, UTF-32, big-endian";
- *code_mime = "utf-32be";
- }
- DPRINTF(("ucs32 %" SIZE_T_FORMAT "u\n", *ulen));
- } else if ((ucs_type = looks_ucs16(buf, nbytes, *ubuf, ulen)) != 0) {
- if (ucs_type == 1) {
- *code = "Unicode text, UTF-16, little-endian";
- *code_mime = "utf-16le";
- } else {
- *code = "Unicode text, UTF-16, big-endian";
- *code_mime = "utf-16be";
- }
- DPRINTF(("ucs16 %" SIZE_T_FORMAT "u\n", *ulen));
- } else if (looks_latin1(buf, nbytes, *ubuf, ulen)) {
- DPRINTF(("latin1 %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "ISO-8859";
- *code_mime = "iso-8859-1";
- } else if (looks_extended(buf, nbytes, *ubuf, ulen)) {
- DPRINTF(("extended %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "Non-ISO extended-ASCII";
- *code_mime = "unknown-8bit";
- } else {
- unsigned char *nbuf;
-
- mlen = (nbytes + 1) * sizeof(nbuf[0]);
- if ((nbuf = CAST(unsigned char *, malloc(mlen))) == NULL) {
- file_oomem(ms, mlen);
- goto done;
- }
- from_ebcdic(buf, nbytes, nbuf);
-
- if (looks_ascii(nbuf, nbytes, *ubuf, ulen)) {
- DPRINTF(("ebcdic %" SIZE_T_FORMAT "u\n", *ulen));
- *code = "EBCDIC";
- *code_mime = "ebcdic";
- } else if (looks_latin1(nbuf, nbytes, *ubuf, ulen)) {
- DPRINTF(("ebcdic/international %" SIZE_T_FORMAT "u\n",
- *ulen));
- *code = "International EBCDIC";
- *code_mime = "ebcdic";
- } else { /* Doesn't look like text at all */
- DPRINTF(("binary\n"));
- rv = 0;
- *type = "binary";
- }
- free(nbuf);
- }
-
- done:
- if (ubuf == &udefbuf)
- free(udefbuf);
-
- return rv;
-}
-
-/*
- * This table reflects a particular philosophy about what constitutes
- * "text," and there is room for disagreement about it.
- *
- * Version 3.31 of the file command considered a file to be ASCII if
- * each of its characters was approved by either the isascii() or
- * isalpha() function. On most systems, this would mean that any
- * file consisting only of characters in the range 0x00 ... 0x7F
- * would be called ASCII text, but many systems might reasonably
- * consider some characters outside this range to be alphabetic,
- * so the file command would call such characters ASCII. It might
- * have been more accurate to call this "considered textual on the
- * local system" than "ASCII."
- *
- * It considered a file to be "International language text" if each
- * of its characters was either an ASCII printing character (according
- * to the real ASCII standard, not the above test), a character in
- * the range 0x80 ... 0xFF, or one of the following control characters:
- * backspace, tab, line feed, vertical tab, form feed, carriage return,
- * escape. No attempt was made to determine the language in which files
- * of this type were written.
- *
- *
- * The table below considers a file to be ASCII if all of its characters
- * are either ASCII printing characters (again, according to the X3.4
- * standard, not isascii()) or any of the following controls: bell,
- * backspace, tab, line feed, form feed, carriage return, esc, nextline.
- *
- * I include bell because some programs (particularly shell scripts)
- * use it literally, even though it is rare in normal text. I exclude
- * vertical tab because it never seems to be used in real text. I also
- * include, with hesitation, the X3.64/ECMA-43 control nextline (0x85),
- * because that's what the dd EBCDIC->ASCII table maps the EBCDIC newline
- * character to. It might be more appropriate to include it in the 8859
- * set instead of the ASCII set, but it's got to be included in *something*
- * we recognize or EBCDIC files aren't going to be considered textual.
- * Some old Unix source files use SO/SI (^N/^O) to shift between Greek
- * and Latin characters, so these should possibly be allowed. But they
- * make a real mess on VT100-style displays if they're not paired properly,
- * so we are probably better off not calling them text.
- *
- * A file is considered to be ISO-8859 text if its characters are all
- * either ASCII, according to the above definition, or printing characters
- * from the ISO-8859 8-bit extension, characters 0xA0 ... 0xFF.
- *
- * Finally, a file is considered to be international text from some other
- * character code if its characters are all either ISO-8859 (according to
- * the above definition) or characters in the range 0x80 ... 0x9F, which
- * ISO-8859 considers to be control characters but the IBM PC and Macintosh
- * consider to be printing characters.
- */
-
-#define F 0 /* character never appears in text */
-#define T 1 /* character appears in plain ASCII text */
-#define I 2 /* character appears in ISO-8859 text */
-#define X 3 /* character appears in non-ISO extended ASCII (Mac, IBM PC) */
-
-file_private char text_chars[256] = {
- /* BEL BS HT LF VT FF CR */
- F, F, F, F, F, F, F, T, T, T, T, T, T, T, F, F, /* 0x0X */
- /* ESC */
- F, F, F, F, F, F, F, F, F, F, F, T, F, F, F, F, /* 0x1X */
- T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x2X */
- T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x3X */
- T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x4X */
- T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x5X */
- T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, /* 0x6X */
- T, T, T, T, T, T, T, T, T, T, T, T, T, T, T, F, /* 0x7X */
- /* NEL */
- X, X, X, X, X, T, X, X, X, X, X, X, X, X, X, X, /* 0x8X */
- X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, X, /* 0x9X */
- I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xaX */
- I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xbX */
- I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xcX */
- I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xdX */
- I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, /* 0xeX */
- I, I, I, I, I, I, I, I, I, I, I, I, I, I, I, I /* 0xfX */
-};
-
-#define LOOKS(NAME, COND) \
-file_private int \
-looks_ ## NAME(const unsigned char *buf, size_t nbytes, file_unichar_t *ubuf, \
- size_t *ulen) \
-{ \
- size_t i; \
-\
- *ulen = 0; \
-\
- for (i = 0; i < nbytes; i++) { \
- int t = text_chars[buf[i]]; \
-\
- if (COND) \
- return 0; \
-\
- ubuf[(*ulen)++] = buf[i]; \
- } \
- return 1; \
-}
-
-LOOKS(ascii, t != T)
-LOOKS(latin1, t != T && t != I)
-LOOKS(extended, t != T && t != I && t != X)
-
-/*
- * Decide whether some text looks like UTF-8. Returns:
- *
- * -1: invalid UTF-8
- * 0: uses odd control characters, so doesn't look like text
- * 1: 7-bit text
- * 2: definitely UTF-8 text (valid high-bit set bytes)
- *
- * If ubuf is non-NULL on entry, text is decoded into ubuf, *ulen;
- * ubuf must be big enough!
- */
-
-// from: https://golang.org/src/unicode/utf8/utf8.go
-
-#define XX 0xF1 // invalid: size 1
-#define AS 0xF0 // ASCII: size 1
-#define S1 0x02 // accept 0, size 2
-#define S2 0x13 // accept 1, size 3
-#define S3 0x03 // accept 0, size 3
-#define S4 0x23 // accept 2, size 3
-#define S5 0x34 // accept 3, size 4
-#define S6 0x04 // accept 0, size 4
-#define S7 0x44 // accept 4, size 4
-
-#define LOCB 0x80
-#define HICB 0xBF
-
-// first is information about the first byte in a UTF-8 sequence.
-static const uint8_t first[] = {
- // 1 2 3 4 5 6 7 8 9 A B C D E F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x00-0x0F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x10-0x1F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x20-0x2F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x30-0x3F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x40-0x4F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x50-0x5F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x60-0x6F
- AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, AS, // 0x70-0x7F
- // 1 2 3 4 5 6 7 8 9 A B C D E F
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0x80-0x8F
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0x90-0x9F
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xA0-0xAF
- XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xB0-0xBF
- XX, XX, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, // 0xC0-0xCF
- S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, S1, // 0xD0-0xDF
- S2, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S3, S4, S3, S3, // 0xE0-0xEF
- S5, S6, S6, S6, S7, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, XX, // 0xF0-0xFF
-};
-
-// acceptRange gives the range of valid values for the second byte in a UTF-8
-// sequence.
-struct accept_range {
- uint8_t lo; // lowest value for second byte.
- uint8_t hi; // highest value for second byte.
-} accept_ranges[16] = {
-// acceptRanges has size 16 to avoid bounds checks in the code that uses it.
- { LOCB, HICB },
- { 0xA0, HICB },
- { LOCB, 0x9F },
- { 0x90, HICB },
- { LOCB, 0x8F },
-};
-
-file_protected int
-file_looks_utf8(const unsigned char *buf, size_t nbytes, file_unichar_t *ubuf,
- size_t *ulen)
-{
- size_t i;
- int n;
- file_unichar_t c;
- int gotone = 0, ctrl = 0;
-
- if (ubuf)
- *ulen = 0;
-
- for (i = 0; i < nbytes; i++) {
- if ((buf[i] & 0x80) == 0) { /* 0xxxxxxx is plain ASCII */
- /*
- * Even if the whole file is valid UTF-8 sequences,
- * still reject it if it uses weird control characters.
- */
-
- if (text_chars[buf[i]] != T)
- ctrl = 1;
-
- if (ubuf)
- ubuf[(*ulen)++] = buf[i];
- } else if ((buf[i] & 0x40) == 0) { /* 10xxxxxx never 1st byte */
- return -1;
- } else { /* 11xxxxxx begins UTF-8 */
- int following;
- uint8_t x = first[buf[i]];
- const struct accept_range *ar =
- &accept_ranges[(unsigned int)x >> 4];
- if (x == XX)
- return -1;
-
- if ((buf[i] & 0x20) == 0) { /* 110xxxxx */
- c = buf[i] & 0x1f;
- following = 1;
- } else if ((buf[i] & 0x10) == 0) { /* 1110xxxx */
- c = buf[i] & 0x0f;
- following = 2;
- } else if ((buf[i] & 0x08) == 0) { /* 11110xxx */
- c = buf[i] & 0x07;
- following = 3;
- } else if ((buf[i] & 0x04) == 0) { /* 111110xx */
- c = buf[i] & 0x03;
- following = 4;
- } else if ((buf[i] & 0x02) == 0) { /* 1111110x */
- c = buf[i] & 0x01;
- following = 5;
- } else
- return -1;
-
- for (n = 0; n < following; n++) {
- i++;
- if (i >= nbytes)
- goto done;
-
- if (n == 0 &&
- (buf[i] < ar->lo || buf[i] > ar->hi))
- return -1;
-
- if ((buf[i] & 0x80) == 0 || (buf[i] & 0x40))
- return -1;
-
- c = (c << 6) + (buf[i] & 0x3f);
- }
-
- if (ubuf)
- ubuf[(*ulen)++] = c;
- gotone = 1;
- }
- }
-done:
- return ctrl ? 0 : (gotone ? 2 : 1);
-}
-
-/*
- * Decide whether some text looks like UTF-8 with BOM. If there is no
- * BOM, return -1; otherwise return the result of looks_utf8 on the
- * rest of the text.
- */
-file_private int
-looks_utf8_with_BOM(const unsigned char *buf, size_t nbytes,
- file_unichar_t *ubuf, size_t *ulen)
-{
- if (nbytes > 3 && buf[0] == 0xef && buf[1] == 0xbb && buf[2] == 0xbf)
- return file_looks_utf8(buf + 3, nbytes - 3, ubuf, ulen);
- else
- return -1;
-}
-
-file_private int
-looks_utf7(const unsigned char *buf, size_t nbytes, file_unichar_t *ubuf,
- size_t *ulen)
-{
- if (nbytes > 4 && buf[0] == '+' && buf[1] == '/' && buf[2] == 'v')
- switch (buf[3]) {
- case '8':
- case '9':
- case '+':
- case '/':
- if (ubuf)
- *ulen = 0;
- return 1;
- default:
- return -1;
- }
- else
- return -1;
-}
-
-#define UCS16_NOCHAR(c) ((c) >= 0xfdd0 && (c) <= 0xfdef)
-#define UCS16_HISURR(c) ((c) >= 0xd800 && (c) <= 0xdbff)
-#define UCS16_LOSURR(c) ((c) >= 0xdc00 && (c) <= 0xdfff)
-
-file_private int
-looks_ucs16(const unsigned char *bf, size_t nbytes, file_unichar_t *ubf,
- size_t *ulen)
-{
- int bigend;
- uint32_t hi;
- size_t i;
-
- if (nbytes < 2)
- return 0;
-
- if (bf[0] == 0xff && bf[1] == 0xfe)
- bigend = 0;
- else if (bf[0] == 0xfe && bf[1] == 0xff)
- bigend = 1;
- else
- return 0;
-
- *ulen = 0;
- hi = 0;
-
- for (i = 2; i + 1 < nbytes; i += 2) {
- uint32_t uc;
-
- if (bigend)
- uc = CAST(uint32_t,
- bf[i + 1] | (CAST(file_unichar_t, bf[i]) << 8));
- else
- uc = CAST(uint32_t,
- bf[i] | (CAST(file_unichar_t, bf[i + 1]) << 8));
-
- uc &= 0xffff;
-
- switch (uc) {
- case 0xfffe:
- case 0xffff:
- return 0;
- default:
- if (UCS16_NOCHAR(uc))
- return 0;
- break;
- }
- if (hi) {
- if (!UCS16_LOSURR(uc))
- return 0;
- uc = 0x10000 + 0x400 * (hi - 1) + (uc - 0xdc00);
- hi = 0;
- }
- if (uc < 128 && text_chars[CAST(size_t, uc)] != T)
- return 0;
- ubf[(*ulen)++] = uc;
- if (UCS16_HISURR(uc))
- hi = uc - 0xd800 + 1;
- if (UCS16_LOSURR(uc))
- return 0;
- }
-
- return 1 + bigend;
-}
-
-file_private int
-looks_ucs32(const unsigned char *bf, size_t nbytes, file_unichar_t *ubf,
- size_t *ulen)
-{
- int bigend;
- size_t i;
-
- if (nbytes < 4)
- return 0;
-
- if (bf[0] == 0xff && bf[1] == 0xfe && bf[2] == 0 && bf[3] == 0)
- bigend = 0;
- else if (bf[0] == 0 && bf[1] == 0 && bf[2] == 0xfe && bf[3] == 0xff)
- bigend = 1;
- else
- return 0;
-
- *ulen = 0;
-
- for (i = 4; i + 3 < nbytes; i += 4) {
- /* XXX fix to properly handle chars > 65536 */
-
- if (bigend)
- ubf[(*ulen)++] = CAST(file_unichar_t, bf[i + 3])
- | (CAST(file_unichar_t, bf[i + 2]) << 8)
- | (CAST(file_unichar_t, bf[i + 1]) << 16)
- | (CAST(file_unichar_t, bf[i]) << 24);
- else
- ubf[(*ulen)++] = CAST(file_unichar_t, bf[i + 0])
- | (CAST(file_unichar_t, bf[i + 1]) << 8)
- | (CAST(file_unichar_t, bf[i + 2]) << 16)
- | (CAST(file_unichar_t, bf[i + 3]) << 24);
-
- if (ubf[*ulen - 1] == 0xfffe)
- return 0;
- if (ubf[*ulen - 1] < 128 &&
- text_chars[CAST(size_t, ubf[*ulen - 1])] != T)
- return 0;
- }
-
- return 1 + bigend;
-}
-#undef F
-#undef T
-#undef I
-#undef X
-
-/*
- * This table maps each EBCDIC character to an (8-bit extended) ASCII
- * character, as specified in the rationale for the dd(1) command in
- * draft 11.2 (September, 1991) of the POSIX P1003.2 standard.
- *
- * Unfortunately it does not seem to correspond exactly to any of the
- * five variants of EBCDIC documented in IBM's _Enterprise Systems
- * Architecture/390: Principles of Operation_, SA22-7201-06, Seventh
- * Edition, July, 1999, pp. I-1 - I-4.
- *
- * Fortunately, though, all versions of EBCDIC, including this one, agree
- * on most of the printing characters that also appear in (7-bit) ASCII.
- * Of these, only '|', '!', '~', '^', '[', and ']' are in question at all.
- *
- * Fortunately too, there is general agreement that codes 0x00 through
- * 0x3F represent control characters, 0x41 a nonbreaking space, and the
- * remainder printing characters.
- *
- * This is sufficient to allow us to identify EBCDIC text and to distinguish
- * between old-style and internationalized examples of text.
- */
-
-file_private unsigned char ebcdic_to_ascii[] = {
- 0, 1, 2, 3, 156, 9, 134, 127, 151, 141, 142, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 157, 133, 8, 135, 24, 25, 146, 143, 28, 29, 30, 31,
-128, 129, 130, 131, 132, 10, 23, 27, 136, 137, 138, 139, 140, 5, 6, 7,
-144, 145, 22, 147, 148, 149, 150, 4, 152, 153, 154, 155, 20, 21, 158, 26,
-' ', 160, 161, 162, 163, 164, 165, 166, 167, 168, 213, '.', '<', '(', '+', '|',
-'&', 169, 170, 171, 172, 173, 174, 175, 176, 177, '!', '$', '*', ')', ';', '~',
-'-', '/', 178, 179, 180, 181, 182, 183, 184, 185, 203, ',', '%', '_', '>', '?',
-186, 187, 188, 189, 190, 191, 192, 193, 194, '`', ':', '#', '@', '\'','=', '"',
-195, 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 196, 197, 198, 199, 200, 201,
-202, 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', '^', 204, 205, 206, 207, 208,
-209, 229, 's', 't', 'u', 'v', 'w', 'x', 'y', 'z', 210, 211, 212, '[', 214, 215,
-216, 217, 218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, ']', 230, 231,
-'{', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 232, 233, 234, 235, 236, 237,
-'}', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 238, 239, 240, 241, 242, 243,
-'\\',159, 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z', 244, 245, 246, 247, 248, 249,
-'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 250, 251, 252, 253, 254, 255
-};
-
-#ifdef notdef
-/*
- * The following EBCDIC-to-ASCII table may relate more closely to reality,
- * or at least to modern reality. It comes from
- *
- * http://ftp.s390.ibm.com/products/oe/bpxqp9.html
- *
- * and maps the characters of EBCDIC code page 1047 (the code used for
- * Unix-derived software on IBM's 390 systems) to the corresponding
- * characters from ISO 8859-1.
- *
- * If this table is used instead of the above one, some of the special
- * cases for the NEL character can be taken out of the code.
- */
-
-file_private unsigned char ebcdic_1047_to_8859[] = {
-0x00,0x01,0x02,0x03,0x9C,0x09,0x86,0x7F,0x97,0x8D,0x8E,0x0B,0x0C,0x0D,0x0E,0x0F,
-0x10,0x11,0x12,0x13,0x9D,0x0A,0x08,0x87,0x18,0x19,0x92,0x8F,0x1C,0x1D,0x1E,0x1F,
-0x80,0x81,0x82,0x83,0x84,0x85,0x17,0x1B,0x88,0x89,0x8A,0x8B,0x8C,0x05,0x06,0x07,
-0x90,0x91,0x16,0x93,0x94,0x95,0x96,0x04,0x98,0x99,0x9A,0x9B,0x14,0x15,0x9E,0x1A,
-0x20,0xA0,0xE2,0xE4,0xE0,0xE1,0xE3,0xE5,0xE7,0xF1,0xA2,0x2E,0x3C,0x28,0x2B,0x7C,
-0x26,0xE9,0xEA,0xEB,0xE8,0xED,0xEE,0xEF,0xEC,0xDF,0x21,0x24,0x2A,0x29,0x3B,0x5E,
-0x2D,0x2F,0xC2,0xC4,0xC0,0xC1,0xC3,0xC5,0xC7,0xD1,0xA6,0x2C,0x25,0x5F,0x3E,0x3F,
-0xF8,0xC9,0xCA,0xCB,0xC8,0xCD,0xCE,0xCF,0xCC,0x60,0x3A,0x23,0x40,0x27,0x3D,0x22,
-0xD8,0x61,0x62,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0xAB,0xBB,0xF0,0xFD,0xFE,0xB1,
-0xB0,0x6A,0x6B,0x6C,0x6D,0x6E,0x6F,0x70,0x71,0x72,0xAA,0xBA,0xE6,0xB8,0xC6,0xA4,
-0xB5,0x7E,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7A,0xA1,0xBF,0xD0,0x5B,0xDE,0xAE,
-0xAC,0xA3,0xA5,0xB7,0xA9,0xA7,0xB6,0xBC,0xBD,0xBE,0xDD,0xA8,0xAF,0x5D,0xB4,0xD7,
-0x7B,0x41,0x42,0x43,0x44,0x45,0x46,0x47,0x48,0x49,0xAD,0xF4,0xF6,0xF2,0xF3,0xF5,
-0x7D,0x4A,0x4B,0x4C,0x4D,0x4E,0x4F,0x50,0x51,0x52,0xB9,0xFB,0xFC,0xF9,0xFA,0xFF,
-0x5C,0xF7,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5A,0xB2,0xD4,0xD6,0xD2,0xD3,0xD5,
-0x30,0x31,0x32,0x33,0x34,0x35,0x36,0x37,0x38,0x39,0xB3,0xDB,0xDC,0xD9,0xDA,0x9F
-};
-#endif
-
-/*
- * Copy buf[0 ... nbytes-1] into out[], translating EBCDIC to ASCII.
- */
-file_private void
-from_ebcdic(const unsigned char *buf, size_t nbytes, unsigned char *out)
-{
- size_t i;
-
- for (i = 0; i < nbytes; i++) {
- out[i] = ebcdic_to_ascii[buf[i]];
- }
-}
diff --git a/contrib/libs/libmagic/src/file.c b/contrib/libs/libmagic/src/file.c
deleted file mode 100644
index c20391317a..0000000000
--- a/contrib/libs/libmagic/src/file.c
+++ /dev/null
@@ -1,859 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * file - find type of a file or files - main program.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: file.c,v 1.215 2023/05/21 17:08:34 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#ifdef RESTORE_TIME
-# if (__COHERENT__ >= 0x420)
-# include <sys/utime.h>
-# else
-# ifdef USE_UTIMES
-# include <sys/time.h>
-# else
-# include <utime.h>
-# endif
-# endif
-#endif
-#ifdef HAVE_UNISTD_H
-#include <unistd.h> /* for read() */
-#endif
-#ifdef HAVE_WCHAR_H
-#include <wchar.h>
-#endif
-#ifdef HAVE_WCTYPE_H
-#include <wctype.h>
-#endif
-#if defined(HAVE_WCHAR_H) && defined(HAVE_MBRTOWC) && defined(HAVE_WCWIDTH) && \
- defined(HAVE_WCTYPE_H)
-#define FILE_WIDE_SUPPORT
-#else
-#include <ctype.h>
-#endif
-
-#if defined(HAVE_GETOPT_H) && defined(HAVE_STRUCT_OPTION)
-# include <getopt.h>
-# ifndef HAVE_GETOPT_LONG
-int getopt_long(int, char * const *, const char *,
- const struct option *, int *);
-# endif
-# else
-# error #include "mygetopt.h"
-#endif
-
-#ifdef S_IFLNK
-# define IFLNK_h "h"
-# define IFLNK_L "L"
-#else
-# define IFLNK_h ""
-# define IFLNK_L ""
-#endif
-
-#define FILE_FLAGS "bcCdE" IFLNK_h "ik" IFLNK_L "lNnprsSvzZ0"
-#define OPTSTRING "bcCde:Ef:F:hiklLm:nNpP:rsSvzZ0"
-
-# define USAGE \
- "Usage: %s [-" FILE_FLAGS "] [--apple] [--extension] [--mime-encoding]\n" \
- " [--mime-type] [-e <testname>] [-F <separator>] " \
- " [-f <namefile>]\n" \
- " [-m <magicfiles>] [-P <parameter=value>] [--exclude-quiet]\n" \
- " <file> ...\n" \
- " %s -C [-m <magicfiles>]\n" \
- " %s [--help]\n"
-
-file_private int /* Global command-line options */
- bflag = 0, /* brief output format */
- nopad = 0, /* Don't pad output */
- nobuffer = 0, /* Do not buffer stdout */
- nulsep = 0; /* Append '\0' to the separator */
-
-file_private const char *separator = ":"; /* Default field separator */
-file_private const struct option long_options[] = {
-#define OPT_HELP 1
-#define OPT_APPLE 2
-#define OPT_EXTENSIONS 3
-#define OPT_MIME_TYPE 4
-#define OPT_MIME_ENCODING 5
-#define OPT_EXCLUDE_QUIET 6
-#define OPT(shortname, longname, opt, def, doc) \
- {longname, opt, NULL, shortname},
-#define OPT_LONGONLY(longname, opt, def, doc, id) \
- {longname, opt, NULL, id},
-#include "file_opts.h"
-#undef OPT
-#undef OPT_LONGONLY
- {0, 0, NULL, 0}
- };
-
-file_private const struct {
- const char *name;
- int value;
-} nv[] = {
- { "apptype", MAGIC_NO_CHECK_APPTYPE },
- { "ascii", MAGIC_NO_CHECK_ASCII },
- { "cdf", MAGIC_NO_CHECK_CDF },
- { "compress", MAGIC_NO_CHECK_COMPRESS },
- { "csv", MAGIC_NO_CHECK_CSV },
- { "elf", MAGIC_NO_CHECK_ELF },
- { "encoding", MAGIC_NO_CHECK_ENCODING },
- { "soft", MAGIC_NO_CHECK_SOFT },
- { "tar", MAGIC_NO_CHECK_TAR },
- { "json", MAGIC_NO_CHECK_JSON },
- { "simh", MAGIC_NO_CHECK_SIMH },
- { "text", MAGIC_NO_CHECK_TEXT }, /* synonym for ascii */
- { "tokens", MAGIC_NO_CHECK_TOKENS }, /* OBSOLETE: ignored for backwards compatibility */
-};
-
-file_private struct {
- const char *name;
- size_t value;
- size_t def;
- const char *desc;
- int tag;
- int set;
-} pm[] = {
- { "bytes", 0, FILE_BYTES_MAX, "max bytes to look inside file",
- MAGIC_PARAM_BYTES_MAX, 0 },
- { "elf_notes", 0, FILE_ELF_NOTES_MAX, "max ELF notes processed",
- MAGIC_PARAM_ELF_NOTES_MAX, 0 },
- { "elf_phnum", 0, FILE_ELF_PHNUM_MAX, "max ELF prog sections processed",
- MAGIC_PARAM_ELF_PHNUM_MAX, 0 },
- { "elf_shnum", 0, FILE_ELF_SHNUM_MAX, "max ELF sections processed",
- MAGIC_PARAM_ELF_SHNUM_MAX, 0 },
- { "elf_shsize", 0, FILE_ELF_SHSIZE_MAX, "max ELF section size",
- MAGIC_PARAM_ELF_SHSIZE_MAX, 0 },
- { "encoding", 0, FILE_ENCODING_MAX, "max bytes to scan for encoding",
- MAGIC_PARAM_ENCODING_MAX, 0 },
- { "indir", 0, FILE_INDIR_MAX, "recursion limit for indirection",
- MAGIC_PARAM_INDIR_MAX, 0 },
- { "name", 0, FILE_NAME_MAX, "use limit for name/use magic",
- MAGIC_PARAM_NAME_MAX, 0 },
- { "regex", 0, FILE_REGEX_MAX, "length limit for REGEX searches",
- MAGIC_PARAM_REGEX_MAX, 0 },
-};
-
-file_private int posixly;
-
-#ifdef __dead
-__dead
-#endif
-file_private void usage(void);
-file_private void docprint(const char *, int);
-#ifdef __dead
-__dead
-#endif
-file_private void help(void);
-
-file_private int unwrap(struct magic_set *, const char *);
-file_private int process(struct magic_set *ms, const char *, int);
-file_private struct magic_set *load(const char *, int);
-file_private void setparam(const char *);
-file_private void applyparam(magic_t);
-
-
-/*
- * main - parse arguments and handle options
- */
-int
-main(int argc, char *argv[])
-{
- int c;
- size_t i, j, wid, nw;
- int action = 0, didsomefiles = 0, errflg = 0;
- int flags = 0, e = 0;
-#ifdef HAVE_LIBSECCOMP
- int sandbox = 1;
-#endif
- struct magic_set *magic = NULL;
- int longindex;
- const char *magicfile = NULL; /* where the magic is */
- char *progname;
-
- /* makes islower etc work for other langs */
- (void)setlocale(LC_CTYPE, "");
-
-#ifdef __EMX__
- /* sh-like wildcard expansion! Shouldn't hurt at least ... */
- _wildcard(&argc, &argv);
-#endif
-
- if ((progname = strrchr(argv[0], '/')) != NULL)
- progname++;
- else
- progname = argv[0];
-
- file_setprogname(progname);
-
-
-#ifdef S_IFLNK
- posixly = getenv("POSIXLY_CORRECT") != NULL;
- flags |= posixly ? MAGIC_SYMLINK : 0;
-#endif
- while ((c = getopt_long(argc, argv, OPTSTRING, long_options,
- &longindex)) != -1)
- switch (c) {
- case OPT_HELP:
- help();
- break;
- case OPT_APPLE:
- flags |= MAGIC_APPLE;
- break;
- case OPT_EXTENSIONS:
- flags |= MAGIC_EXTENSION;
- break;
- case OPT_MIME_TYPE:
- flags |= MAGIC_MIME_TYPE;
- break;
- case OPT_MIME_ENCODING:
- flags |= MAGIC_MIME_ENCODING;
- break;
- case '0':
- nulsep++;
- break;
- case 'b':
- bflag++;
- break;
- case 'c':
- action = FILE_CHECK;
- break;
- case 'C':
- action = FILE_COMPILE;
- break;
- case 'd':
- flags |= MAGIC_DEBUG|MAGIC_CHECK;
- break;
- case 'E':
- flags |= MAGIC_ERROR;
- break;
- case 'e':
- case OPT_EXCLUDE_QUIET:
- for (i = 0; i < __arraycount(nv); i++)
- if (strcmp(nv[i].name, optarg) == 0)
- break;
-
- if (i == __arraycount(nv)) {
- if (c != OPT_EXCLUDE_QUIET)
- errflg++;
- } else
- flags |= nv[i].value;
- break;
-
- case 'f':
- if(action)
- usage();
- if (magic == NULL)
- if ((magic = load(magicfile, flags)) == NULL)
- return 1;
- applyparam(magic);
- e |= unwrap(magic, optarg);
- ++didsomefiles;
- break;
- case 'F':
- separator = optarg;
- break;
- case 'i':
- flags |= MAGIC_MIME;
- break;
- case 'k':
- flags |= MAGIC_CONTINUE;
- break;
- case 'l':
- action = FILE_LIST;
- break;
- case 'm':
- magicfile = optarg;
- break;
- case 'n':
- ++nobuffer;
- break;
- case 'N':
- ++nopad;
- break;
-#if defined(HAVE_UTIME) || defined(HAVE_UTIMES)
- case 'p':
- flags |= MAGIC_PRESERVE_ATIME;
- break;
-#endif
- case 'P':
- setparam(optarg);
- break;
- case 'r':
- flags |= MAGIC_RAW;
- break;
- case 's':
- flags |= MAGIC_DEVICES;
- break;
- case 'S':
-#ifdef HAVE_LIBSECCOMP
- sandbox = 0;
-#endif
- break;
- case 'v':
- if (magicfile == NULL)
- magicfile = magic_getpath(magicfile, action);
- (void)fprintf(stdout, "%s-%s\n", file_getprogname(),
- VERSION);
- (void)fprintf(stdout, "magic file from %s\n",
- magicfile);
-#ifdef HAVE_LIBSECCOMP
- (void)fprintf(stdout, "seccomp support included\n");
-#endif
- return 0;
- case 'z':
- flags |= MAGIC_COMPRESS;
- break;
-
- case 'Z':
- flags |= MAGIC_COMPRESS|MAGIC_COMPRESS_TRANSP;
- break;
-#ifdef S_IFLNK
- case 'L':
- flags |= MAGIC_SYMLINK;
- break;
- case 'h':
- flags &= ~MAGIC_SYMLINK;
- break;
-#endif
- case '?':
- default:
- errflg++;
- break;
- }
-
- if (errflg) {
- usage();
- }
- if (e)
- return e;
-
-#ifdef HAVE_LIBSECCOMP
-#if 0
- if (sandbox && enable_sandbox_basic() == -1)
-#else
- if (sandbox && enable_sandbox_full() == -1)
-#endif
- file_err(EXIT_FAILURE, "SECCOMP initialisation failed");
- if (sandbox)
- flags |= MAGIC_NO_COMPRESS_FORK;
-#endif /* HAVE_LIBSECCOMP */
-
- if (MAGIC_VERSION != magic_version())
- file_warnx("Compiled magic version [%d] "
- "does not match with shared library magic version [%d]\n",
- MAGIC_VERSION, magic_version());
-
- switch(action) {
- case FILE_CHECK:
- case FILE_COMPILE:
- case FILE_LIST:
- /*
- * Don't try to check/compile ~/.magic unless we explicitly
- * ask for it.
- */
- magic = magic_open(flags|MAGIC_CHECK);
- if (magic == NULL) {
- file_warn("Can't create magic");
- return 1;
- }
-
-
- switch(action) {
- case FILE_CHECK:
- c = magic_check(magic, magicfile);
- break;
- case FILE_COMPILE:
- c = magic_compile(magic, magicfile);
- break;
- case FILE_LIST:
- c = magic_list(magic, magicfile);
- break;
- default:
- abort();
- }
- if (c == -1) {
- file_warnx("%s", magic_error(magic));
- e = 1;
- goto out;
- }
- goto out;
- default:
- if (magic == NULL)
- if ((magic = load(magicfile, flags)) == NULL)
- return 1;
- applyparam(magic);
- }
-
- if (optind == argc) {
- if (!didsomefiles)
- usage();
- goto out;
- }
-
- for (wid = 0, j = CAST(size_t, optind); j < CAST(size_t, argc);
- j++) {
- nw = file_mbswidth(magic, argv[j]);
- if (nw > wid)
- wid = nw;
- }
-
- /*
- * If bflag is only set twice, set it depending on
- * number of files [this is undocumented, and subject to change]
- */
- if (bflag == 2) {
- bflag = optind >= argc - 1;
- }
- for (; optind < argc; optind++)
- e |= process(magic, argv[optind], wid);
-
-out:
- if (!nobuffer)
- e |= fflush(stdout) != 0;
-
- if (magic)
- magic_close(magic);
- return e;
-}
-
-file_private void
-applyparam(magic_t magic)
-{
- size_t i;
-
- for (i = 0; i < __arraycount(pm); i++) {
- if (!pm[i].set)
- continue;
- if (magic_setparam(magic, pm[i].tag, &pm[i].value) == -1)
- file_err(EXIT_FAILURE, "Can't set %s", pm[i].name);
- }
-}
-
-file_private void
-setparam(const char *p)
-{
- size_t i;
- char *s;
-
- if ((s = CCAST(char *, strchr(p, '='))) == NULL)
- goto badparm;
-
- for (i = 0; i < __arraycount(pm); i++) {
- if (strncmp(p, pm[i].name, s - p) != 0)
- continue;
- pm[i].value = atoi(s + 1);
- pm[i].set = 1;
- return;
- }
-badparm:
- file_errx(EXIT_FAILURE, "Unknown param %s", p);
-}
-
-file_private struct magic_set *
-/*ARGSUSED*/
-load(const char *magicfile, int flags)
-{
- struct magic_set *magic = magic_open(flags);
- const char *e;
-
- if (magic == NULL) {
- file_warn("Can't create magic");
- return NULL;
- }
- if (magic_load(magic, magicfile) == -1) {
- file_warn("%s", magic_error(magic));
- magic_close(magic);
- return NULL;
- }
- if ((e = magic_error(magic)) != NULL)
- file_warn("%s", e);
- return magic;
-}
-
-/*
- * unwrap -- read a file of filenames, do each one.
- */
-file_private int
-unwrap(struct magic_set *ms, const char *fn)
-{
- FILE *f;
- ssize_t len;
- char *line = NULL;
- size_t llen = 0;
- int wid = 0, cwid;
- int e = 0;
- size_t fi = 0, fimax = 0;
- char **flist = NULL;
-
- if (strcmp("-", fn) == 0)
- f = stdin;
- else {
- if ((f = fopen(fn, "r")) == NULL) {
- file_warn("Cannot open `%s'", fn);
- return 1;
- }
- }
-
- while ((len = getline(&line, &llen, f)) > 0) {
- if (line[len - 1] == '\n')
- line[len - 1] = '\0';
- cwid = file_mbswidth(ms, line);
- if (nobuffer) {
- e |= process(ms, line, cwid);
- free(line);
- line = NULL;
- llen = 0;
- continue;
- }
- if (cwid > wid)
- wid = cwid;
- if (fi >= fimax) {
- fimax += 100;
- char **nf = CAST(char **,
- realloc(flist, fimax * sizeof(*flist)));
- if (nf == NULL) {
- file_err(EXIT_FAILURE,
- "Cannot allocate memory for file list");
- }
- flist = nf;
- }
- flist[fi++] = line;
- line = NULL;
- llen = 0;
- }
-
- if (!nobuffer) {
- fimax = fi;
- for (fi = 0; fi < fimax; fi++) {
- e |= process(ms, flist[fi], wid);
- free(flist[fi]);
- }
- }
- free(flist);
-
- if (f != stdin)
- (void)fclose(f);
- return e;
-}
-
-file_private void
-file_octal(unsigned char c)
-{
- (void)putc('\\', stdout);
- (void)putc(((c >> 6) & 7) + '0', stdout);
- (void)putc(((c >> 3) & 7) + '0', stdout);
- (void)putc(((c >> 0) & 7) + '0', stdout);
-}
-
-file_private void
-fname_print(const char *inname)
-{
- size_t n = strlen(inname);
-#ifdef FILE_WIDE_SUPPORT
- mbstate_t state;
- wchar_t nextchar;
- size_t bytesconsumed;
-
-
- (void)memset(&state, 0, sizeof(state));
- while (n > 0) {
- bytesconsumed = mbrtowc(&nextchar, inname, n, &state);
- if (bytesconsumed == CAST(size_t, -1) ||
- bytesconsumed == CAST(size_t, -2)) {
- nextchar = *inname++;
- n--;
- (void)memset(&state, 0, sizeof(state));
- file_octal(CAST(unsigned char, nextchar));
- continue;
- }
- inname += bytesconsumed;
- n -= bytesconsumed;
- if (iswprint(nextchar)) {
- printf("%lc", (wint_t)nextchar);
- continue;
- }
- /* XXX: What if it is > 255? */
- file_octal(CAST(unsigned char, nextchar));
- }
-#else
- size_t i;
- for (i = 0; i < n; i++) {
- unsigned char c = CAST(unsigned char, inname[i]);
- if (isprint(c)) {
- (void)putc(c, stdout);
- continue;
- }
- file_octal(c);
- }
-#endif
-}
-
-/*
- * Called for each input file on the command line (or in a list of files)
- */
-file_private int
-process(struct magic_set *ms, const char *inname, int wid)
-{
- const char *type, c = nulsep > 1 ? '\0' : '\n';
- int std_in = strcmp(inname, "-") == 0;
- int haderror = 0;
-
- if (wid > 0 && !bflag) {
- const char *pname = std_in ? "/dev/stdin" : inname;
- if ((ms->flags & MAGIC_RAW) == 0)
- fname_print(pname);
- else
- (void)printf("%s", pname);
- if (nulsep)
- (void)putc('\0', stdout);
- if (nulsep < 2) {
- (void)printf("%s", separator);
- (void)printf("%*s ", CAST(int, nopad ? 0
- : (wid - file_mbswidth(ms, inname))), "");
- }
- }
-
- type = magic_file(ms, std_in ? NULL : inname);
-
- if (type == NULL) {
- haderror |= printf("ERROR: %s%c", magic_error(ms), c);
- } else {
- haderror |= printf("%s%c", type, c) < 0;
- }
- if (nobuffer)
- haderror |= fflush(stdout) != 0;
- return haderror || type == NULL;
-}
-
-file_protected size_t
-file_mbswidth(struct magic_set *ms, const char *s)
-{
- size_t width = 0;
-#ifdef FILE_WIDE_SUPPORT
- size_t bytesconsumed, n;
- mbstate_t state;
- wchar_t nextchar;
-
- (void)memset(&state, 0, sizeof(state));
- n = strlen(s);
-
- while (n > 0) {
- bytesconsumed = mbrtowc(&nextchar, s, n, &state);
- if (bytesconsumed == CAST(size_t, -1) ||
- bytesconsumed == CAST(size_t, -2)) {
- nextchar = *s;
- bytesconsumed = 1;
- (void)memset(&state, 0, sizeof(state));
- width += 4;
- } else {
- int w = wcwidth(nextchar);
- width += ((ms->flags & MAGIC_RAW) != 0
- || iswprint(nextchar)) ? (w > 0 ? w : 1) : 4;
- }
-
- s += bytesconsumed, n -= bytesconsumed;
- }
-#else
- for (; *s; s++) {
- width += (ms->flags & MAGIC_RAW) != 0
- || isprint(CAST(unsigned char, *s)) ? 1 : 4;
- }
-#endif
- return width;
-}
-
-file_private void
-usage(void)
-{
- const char *pn = file_getprogname();
- (void)fprintf(stderr, USAGE, pn, pn, pn);
- exit(EXIT_FAILURE);
-}
-
-file_private void
-defprint(int def)
-{
- if (!def)
- return;
- if (((def & 1) && posixly) || ((def & 2) && !posixly))
- (void)fprintf(stdout, " (default)");
- (void)putc('\n', stdout);
-}
-
-file_private void
-docprint(const char *opts, int def)
-{
- size_t i;
- int comma, pad;
- char *sp, *p;
-
- p = CCAST(char *, strchr(opts, '%'));
- if (p == NULL) {
- (void)fprintf(stdout, "%s", opts);
- defprint(def);
- return;
- }
-
- for (sp = p - 1; sp > opts && *sp == ' '; sp--)
- continue;
-
- (void)printf("%.*s", CAST(int, p - opts), opts);
- pad = (int)CAST(int, p - sp - 1);
-
- switch (*++p) {
- case 'e':
- comma = 0;
- for (i = 0; i < __arraycount(nv); i++) {
- (void)printf("%s%s", comma++ ? ", " : "", nv[i].name);
- if (i && i % 5 == 0 && i != __arraycount(nv) - 1) {
- (void)printf(",\n%*s", pad, "");
- comma = 0;
- }
- }
- break;
- case 'P':
- for (i = 0; i < __arraycount(pm); i++) {
- (void)printf("%9s %7zu %s", pm[i].name, pm[i].def,
- pm[i].desc);
- if (i != __arraycount(pm) - 1)
- (void)printf("\n%*s", pad, "");
- }
- break;
- default:
- file_errx(EXIT_FAILURE, "Unknown escape `%c' in long options",
- *p);
- break;
- }
- (void)printf("%s", opts + (p - opts) + 1);
-
-}
-
-file_private void
-help(void)
-{
- (void)fputs(
-"Usage: file [OPTION...] [FILE...]\n"
-"Determine type of FILEs.\n"
-"\n", stdout);
-#define OPT(shortname, longname, opt, def, doc) \
- (void)printf(" -%c, --" longname, shortname), \
- docprint(doc, def);
-#define OPT_LONGONLY(longname, opt, def, doc, id) \
- (void)printf(" --" longname), \
- docprint(doc, def);
-#include "file_opts.h"
-#undef OPT
-#undef OPT_LONGONLY
- (void)printf("\nReport bugs to https://bugs.astron.com/\n");
- exit(EXIT_SUCCESS);
-}
-
-file_private const char *file_progname;
-
-file_protected void
-file_setprogname(const char *progname)
-{
- file_progname = progname;
-}
-
-file_protected const char *
-file_getprogname(void)
-{
- return file_progname;
-}
-
-file_protected void
-file_err(int e, const char *fmt, ...)
-{
- va_list ap;
- int se = errno;
-
- va_start(ap, fmt);
- (void)fprintf(stderr, "%s: ", file_progname);
- (void)vfprintf(stderr, fmt, ap);
- va_end(ap);
- if (se)
- (void)fprintf(stderr, " (%s)\n", strerror(se));
- else
- fputc('\n', stderr);
- exit(e);
-}
-
-file_protected void
-file_errx(int e, const char *fmt, ...)
-{
- va_list ap;
-
- va_start(ap, fmt);
- (void)fprintf(stderr, "%s: ", file_progname);
- (void)vfprintf(stderr, fmt, ap);
- va_end(ap);
- (void)fprintf(stderr, "\n");
- exit(e);
-}
-
-file_protected void
-file_warn(const char *fmt, ...)
-{
- va_list ap;
- int se = errno;
-
- va_start(ap, fmt);
- (void)fprintf(stderr, "%s: ", file_progname);
- (void)vfprintf(stderr, fmt, ap);
- va_end(ap);
- if (se)
- (void)fprintf(stderr, " (%s)\n", strerror(se));
- else
- fputc('\n', stderr);
- errno = se;
-}
-
-file_protected void
-file_warnx(const char *fmt, ...)
-{
- va_list ap;
- int se = errno;
-
- va_start(ap, fmt);
- (void)fprintf(stderr, "%s: ", file_progname);
- (void)vfprintf(stderr, fmt, ap);
- va_end(ap);
- (void)fprintf(stderr, "\n");
- errno = se;
-}
diff --git a/contrib/libs/libmagic/src/file.h b/contrib/libs/libmagic/src/file.h
deleted file mode 100644
index 2e0494d2fd..0000000000
--- a/contrib/libs/libmagic/src/file.h
+++ /dev/null
@@ -1,729 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * file.h - definitions for file(1) program
- * @(#)$File: file.h,v 1.247 2023/07/27 19:40:22 christos Exp $
- */
-
-#ifndef __file_h__
-#define __file_h__
-
-#ifdef HAVE_CONFIG_H
-#include <config.h>
-#endif
-
-#ifdef HAVE_STDINT_H
-#include <stdint.h>
-#endif
-
-#ifdef HAVE_INTTYPES_H
-#include <inttypes.h>
-#endif
-
-#ifndef __STDC_LIMIT_MACROS
-#define __STDC_LIMIT_MACROS
-#endif
-#ifndef __STDC_FORMAT_MACROS
-#define __STDC_FORMAT_MACROS
-#endif
-
-#ifdef _WIN32
-# ifdef PRIu32
-# ifdef _WIN64
-# define SIZE_T_FORMAT PRIu64
-# else
-# define SIZE_T_FORMAT PRIu32
-# endif
-# define INT64_T_FORMAT PRIi64
-# define INTMAX_T_FORMAT PRIiMAX
-# else
-# ifdef _WIN64
-# define SIZE_T_FORMAT "I64"
-# else
-# define SIZE_T_FORMAT ""
-# endif
-# define INT64_T_FORMAT "I64"
-# define INTMAX_T_FORMAT "I64"
-# endif
-#else
-# define SIZE_T_FORMAT "z"
-# define INT64_T_FORMAT "ll"
-# define INTMAX_T_FORMAT "j"
-#endif
-
-#include <stdio.h> /* Include that here, to make sure __P gets defined */
-#include <errno.h>
-#include <fcntl.h> /* For open and flags */
-#include <regex.h>
-#include <time.h>
-#include <sys/types.h>
-#ifndef WIN32
-#include <sys/param.h>
-#endif
-/* Do this here and now, because struct stat gets re-defined on solaris */
-#include <sys/stat.h>
-#include <stdarg.h>
-#include <locale.h>
-#if defined(HAVE_XLOCALE_H)
-#include <xlocale.h>
-#endif
-
-#define ENABLE_CONDITIONALS
-
-#ifndef MAGIC
-#define MAGIC "/etc/magic"
-#endif
-
-#if defined(__EMX__) || defined (WIN32)
-#define PATHSEP ';'
-#else
-#define PATHSEP ':'
-#endif
-
-#define file_private static
-
-#if HAVE_VISIBILITY && !defined(WIN32)
-#define file_public __attribute__ ((__visibility__("default")))
-#ifndef file_protected
-#define file_protected __attribute__ ((__visibility__("hidden")))
-#endif
-#else
-#define file_public
-#ifndef file_protected
-#define file_protected
-#endif
-#endif
-
-#ifndef __arraycount
-#define __arraycount(a) (sizeof(a) / sizeof(a[0]))
-#endif
-
-#ifndef __GNUC_PREREQ__
-#ifdef __GNUC__
-#define __GNUC_PREREQ__(x, y) \
- ((__GNUC__ == (x) && __GNUC_MINOR__ >= (y)) || \
- (__GNUC__ > (x)))
-#else
-#define __GNUC_PREREQ__(x, y) 0
-#endif
-#endif
-
-#ifndef __GNUC__
-#ifndef __attribute__
-#define __attribute__(a)
-#endif
-#endif
-
-#ifndef MIN
-#define MIN(a,b) (((a) < (b)) ? (a) : (b))
-#endif
-
-#ifndef MAX
-#define MAX(a,b) (((a) > (b)) ? (a) : (b))
-#endif
-
-#ifndef O_CLOEXEC
-# define O_CLOEXEC 0
-#endif
-
-#ifndef FD_CLOEXEC
-# define FD_CLOEXEC 1
-#endif
-
-
-/*
- * Dec 31, 23:59:59 9999
- * we need to make sure that we don't exceed 9999 because some libc
- * implementations like muslc crash otherwise
- */
-#define MAX_CTIME CAST(time_t, 0x3afff487cfULL)
-
-#define FILE_BADSIZE CAST(size_t, ~0ul)
-#define MAXDESC 64 /* max len of text description/MIME type */
-#define MAXMIME 80 /* max len of text MIME type */
-#define MAXstring 128 /* max len of "string" types */
-
-#define MAGICNO 0xF11E041C
-#define VERSIONNO 18
-#define FILE_MAGICSIZE 376
-
-#define FILE_GUID_SIZE sizeof("XXXXXXXX-XXXX-XXXX-XXXX-XXXXXXXXXXXX")
-
-#define FILE_LOAD 0
-#define FILE_CHECK 1
-#define FILE_COMPILE 2
-#define FILE_LIST 3
-
-typedef regex_t file_regex_t;
-
-struct buffer {
- int fd;
- struct stat st;
- const void *fbuf;
- size_t flen;
- off_t eoff;
- void *ebuf;
- size_t elen;
-};
-
-union VALUETYPE {
- uint8_t b;
- uint16_t h;
- uint32_t l;
- uint64_t q;
- uint8_t hs[2]; /* 2 bytes of a fixed-endian "short" */
- uint8_t hl[4]; /* 4 bytes of a fixed-endian "long" */
- uint8_t hq[8]; /* 8 bytes of a fixed-endian "quad" */
- char s[MAXstring]; /* the search string or regex pattern */
- unsigned char us[MAXstring];
- uint64_t guid[2];
- float f;
- double d;
-};
-
-struct magic {
- /* Word 1 */
- uint16_t cont_level; /* level of ">" */
- uint8_t flag;
-#define INDIR 0x01 /* if '(...)' appears */
-#define OFFADD 0x02 /* if '>&' or '>...(&' appears */
-#define INDIROFFADD 0x04 /* if '>&(' appears */
-#define UNSIGNED 0x08 /* comparison is unsigned */
-#define NOSPACE 0x10 /* suppress space character before output */
-#define BINTEST 0x20 /* test is for a binary type (set only
- for top-level tests) */
-#define TEXTTEST 0x40 /* for passing to file_softmagic */
-#define OFFNEGATIVE 0x80 /* relative to the end of file */
-
- uint8_t factor;
-
- /* Word 2 */
- uint8_t reln; /* relation (0=eq, '>'=gt, etc) */
- uint8_t vallen; /* length of string value, if any */
- uint8_t type; /* comparison type (FILE_*) */
- uint8_t in_type; /* type of indirection */
-#define FILE_INVALID 0
-#define FILE_BYTE 1
-#define FILE_SHORT 2
-#define FILE_DEFAULT 3
-#define FILE_LONG 4
-#define FILE_STRING 5
-#define FILE_DATE 6
-#define FILE_BESHORT 7
-#define FILE_BELONG 8
-#define FILE_BEDATE 9
-#define FILE_LESHORT 10
-#define FILE_LELONG 11
-#define FILE_LEDATE 12
-#define FILE_PSTRING 13
-#define FILE_LDATE 14
-#define FILE_BELDATE 15
-#define FILE_LELDATE 16
-#define FILE_REGEX 17
-#define FILE_BESTRING16 18
-#define FILE_LESTRING16 19
-#define FILE_SEARCH 20
-#define FILE_MEDATE 21
-#define FILE_MELDATE 22
-#define FILE_MELONG 23
-#define FILE_QUAD 24
-#define FILE_LEQUAD 25
-#define FILE_BEQUAD 26
-#define FILE_QDATE 27
-#define FILE_LEQDATE 28
-#define FILE_BEQDATE 29
-#define FILE_QLDATE 30
-#define FILE_LEQLDATE 31
-#define FILE_BEQLDATE 32
-#define FILE_FLOAT 33
-#define FILE_BEFLOAT 34
-#define FILE_LEFLOAT 35
-#define FILE_DOUBLE 36
-#define FILE_BEDOUBLE 37
-#define FILE_LEDOUBLE 38
-#define FILE_BEID3 39
-#define FILE_LEID3 40
-#define FILE_INDIRECT 41
-#define FILE_QWDATE 42
-#define FILE_LEQWDATE 43
-#define FILE_BEQWDATE 44
-#define FILE_NAME 45
-#define FILE_USE 46
-#define FILE_CLEAR 47
-#define FILE_DER 48
-#define FILE_GUID 49
-#define FILE_OFFSET 50
-#define FILE_BEVARINT 51
-#define FILE_LEVARINT 52
-#define FILE_MSDOSDATE 53
-#define FILE_LEMSDOSDATE 54
-#define FILE_BEMSDOSDATE 55
-#define FILE_MSDOSTIME 56
-#define FILE_LEMSDOSTIME 57
-#define FILE_BEMSDOSTIME 58
-#define FILE_OCTAL 59
-#define FILE_NAMES_SIZE 60 /* size of array to contain all names */
-
-#define IS_STRING(t) \
- ((t) == FILE_STRING || \
- (t) == FILE_PSTRING || \
- (t) == FILE_BESTRING16 || \
- (t) == FILE_LESTRING16 || \
- (t) == FILE_REGEX || \
- (t) == FILE_SEARCH || \
- (t) == FILE_INDIRECT || \
- (t) == FILE_NAME || \
- (t) == FILE_USE || \
- (t) == FILE_OCTAL)
-
-#define FILE_FMT_NONE 0
-#define FILE_FMT_NUM 1 /* "cduxXi" */
-#define FILE_FMT_STR 2 /* "s" */
-#define FILE_FMT_QUAD 3 /* "ll" */
-#define FILE_FMT_FLOAT 4 /* "eEfFgG" */
-#define FILE_FMT_DOUBLE 5 /* "eEfFgG" */
-
- /* Word 3 */
- uint8_t in_op; /* operator for indirection */
- uint8_t mask_op; /* operator for mask */
-#ifdef ENABLE_CONDITIONALS
- uint8_t cond; /* conditional type */
-#else
- uint8_t dummy;
-#endif
- uint8_t factor_op;
-#define FILE_FACTOR_OP_PLUS '+'
-#define FILE_FACTOR_OP_MINUS '-'
-#define FILE_FACTOR_OP_TIMES '*'
-#define FILE_FACTOR_OP_DIV '/'
-#define FILE_FACTOR_OP_NONE '\0'
-
-#define FILE_OPS "&|^+-*/%"
-#define FILE_OPAND 0
-#define FILE_OPOR 1
-#define FILE_OPXOR 2
-#define FILE_OPADD 3
-#define FILE_OPMINUS 4
-#define FILE_OPMULTIPLY 5
-#define FILE_OPDIVIDE 6
-#define FILE_OPMODULO 7
-#define FILE_OPS_MASK 0x07 /* mask for above ops */
-#define FILE_UNUSED_1 0x08
-#define FILE_UNUSED_2 0x10
-#define FILE_OPSIGNED 0x20
-#define FILE_OPINVERSE 0x40
-#define FILE_OPINDIRECT 0x80
-
-#ifdef ENABLE_CONDITIONALS
-#define COND_NONE 0
-#define COND_IF 1
-#define COND_ELIF 2
-#define COND_ELSE 3
-#endif /* ENABLE_CONDITIONALS */
-
- /* Word 4 */
- int32_t offset; /* offset to magic number */
- /* Word 5 */
- int32_t in_offset; /* offset from indirection */
- /* Word 6 */
- uint32_t lineno; /* line number in magic file */
- /* Word 7,8 */
- union {
- uint64_t _mask; /* for use with numeric and date types */
- struct {
- uint32_t _count; /* repeat/line count */
- uint32_t _flags; /* modifier flags */
- } _s; /* for use with string types */
- } _u;
-#define num_mask _u._mask
-#define str_range _u._s._count
-#define str_flags _u._s._flags
- /* Words 9-24 */
- union VALUETYPE value; /* either number or string */
- /* Words 25-40 */
- char desc[MAXDESC]; /* description */
- /* Words 41-60 */
- char mimetype[MAXMIME]; /* MIME type */
- /* Words 61-62 */
- char apple[8]; /* APPLE CREATOR/TYPE */
- /* Words 63-78 */
- char ext[64]; /* Popular extensions */
-};
-
-#define BIT(A) (1 << (A))
-#define STRING_COMPACT_WHITESPACE BIT(0)
-#define STRING_COMPACT_OPTIONAL_WHITESPACE BIT(1)
-#define STRING_IGNORE_LOWERCASE BIT(2)
-#define STRING_IGNORE_UPPERCASE BIT(3)
-#define REGEX_OFFSET_START BIT(4)
-#define STRING_TEXTTEST BIT(5)
-#define STRING_BINTEST BIT(6)
-#define PSTRING_1_BE BIT(7)
-#define PSTRING_1_LE BIT(7)
-#define PSTRING_2_BE BIT(8)
-#define PSTRING_2_LE BIT(9)
-#define PSTRING_4_BE BIT(10)
-#define PSTRING_4_LE BIT(11)
-#define REGEX_LINE_COUNT BIT(11)
-#define PSTRING_LEN \
- (PSTRING_1_BE|PSTRING_2_LE|PSTRING_2_BE|PSTRING_4_LE|PSTRING_4_BE)
-#define PSTRING_LENGTH_INCLUDES_ITSELF BIT(12)
-#define STRING_TRIM BIT(13)
-#define STRING_FULL_WORD BIT(14)
-#define CHAR_COMPACT_WHITESPACE 'W'
-#define CHAR_COMPACT_OPTIONAL_WHITESPACE 'w'
-#define CHAR_IGNORE_LOWERCASE 'c'
-#define CHAR_IGNORE_UPPERCASE 'C'
-#define CHAR_REGEX_OFFSET_START 's'
-#define CHAR_TEXTTEST 't'
-#define CHAR_TRIM 'T'
-#define CHAR_FULL_WORD 'f'
-#define CHAR_BINTEST 'b'
-#define CHAR_PSTRING_1_BE 'B'
-#define CHAR_PSTRING_1_LE 'B'
-#define CHAR_PSTRING_2_BE 'H'
-#define CHAR_PSTRING_2_LE 'h'
-#define CHAR_PSTRING_4_BE 'L'
-#define CHAR_PSTRING_4_LE 'l'
-#define CHAR_PSTRING_LENGTH_INCLUDES_ITSELF 'J'
-#define STRING_IGNORE_CASE (STRING_IGNORE_LOWERCASE|STRING_IGNORE_UPPERCASE)
-#define STRING_DEFAULT_RANGE 100
-
-#define INDIRECT_RELATIVE BIT(0)
-#define CHAR_INDIRECT_RELATIVE 'r'
-
-/* list of magic entries */
-struct mlist {
- struct magic *magic; /* array of magic entries */
- file_regex_t **magic_rxcomp; /* array of compiled regexps */
- size_t nmagic; /* number of entries in array */
- void *map; /* internal resources used by entry */
- struct mlist *next, *prev;
-};
-
-#ifdef __cplusplus
-#define CAST(T, b) static_cast<T>(b)
-#define RCAST(T, b) reinterpret_cast<T>(b)
-#define CCAST(T, b) const_cast<T>(b)
-#else
-#define CAST(T, b) ((T)(b))
-#define RCAST(T, b) ((T)(uintptr_t)(b))
-#define CCAST(T, b) ((T)(uintptr_t)(b))
-#endif
-
-struct level_info {
- int32_t off;
- int got_match;
-#ifdef ENABLE_CONDITIONALS
- int last_match;
- int last_cond; /* used for error checking by parse() */
-#endif
-};
-
-struct cont {
- size_t len;
- struct level_info *li;
-};
-
-#define MAGIC_SETS 2
-
-struct magic_set {
- struct mlist *mlist[MAGIC_SETS]; /* list of regular entries */
- struct cont c;
- struct out {
- char *buf; /* Accumulation buffer */
- size_t blen; /* Length of buffer */
- char *pbuf; /* Printable buffer */
- } o;
- uint32_t offset; /* a copy of m->offset while we */
- /* are working on the magic entry */
- uint32_t eoffset; /* offset from end of file */
- int error;
- int flags; /* Control magic tests. */
- int event_flags; /* Note things that happened. */
-#define EVENT_HAD_ERR 0x01
- const char *file;
- size_t line; /* current magic line number */
- mode_t mode; /* copy of current stat mode */
-
- /* data for searches */
- struct {
- const char *s; /* start of search in original source */
- size_t s_len; /* length of search region */
- size_t offset; /* starting offset in source: XXX - should this be off_t? */
- size_t rm_len; /* match length */
- } search;
-
- /* FIXME: Make the string dynamically allocated so that e.g.
- strings matched in files can be longer than MAXstring */
- union VALUETYPE ms_value; /* either number or string */
- uint16_t indir_max;
- uint16_t name_max;
- uint16_t elf_shnum_max;
- uint16_t elf_phnum_max;
- uint16_t elf_notes_max;
- uint16_t regex_max;
- size_t bytes_max; /* number of bytes to read from file */
- size_t encoding_max; /* bytes to look for encoding */
- size_t elf_shsize_max;
-#ifndef FILE_BYTES_MAX
-# define FILE_BYTES_MAX (7 * 1024 * 1024)/* how much of the file to look at */
-#endif /* above 0x6ab0f4 map offset for HelveticaNeue.dfont */
-#define FILE_ELF_NOTES_MAX 256
-#define FILE_ELF_PHNUM_MAX 2048
-#define FILE_ELF_SHNUM_MAX 32768
-#define FILE_ELF_SHSIZE_MAX (128 * 1024 * 1024)
-#define FILE_INDIR_MAX 50
-#define FILE_NAME_MAX 50
-#define FILE_REGEX_MAX 8192
-#define FILE_ENCODING_MAX (64 * 1024)
-#if defined(HAVE_NEWLOCALE) && defined(HAVE_USELOCALE) && defined(HAVE_FREELOCALE)
-#define USE_C_LOCALE
- locale_t c_lc_ctype;
-#define file_locale_used
-#else
-#define file_locale_used __attribute__((__unused__))
-#endif
-};
-
-/* Type for Unicode characters */
-typedef unsigned long file_unichar_t;
-
-struct stat;
-#define FILE_T_LOCAL 1
-#define FILE_T_WINDOWS 2
-file_protected const char *file_fmtdatetime(char *, size_t, uint64_t, int);
-file_protected const char *file_fmtdate(char *, size_t, uint16_t);
-file_protected const char *file_fmttime(char *, size_t, uint16_t);
-file_protected const char *file_fmtvarint(char *, size_t, const unsigned char *,
- int);
-file_protected const char *file_fmtnum(char *, size_t, const char *, int);
-file_protected struct magic_set *file_ms_alloc(int);
-file_protected void file_ms_free(struct magic_set *);
-file_protected int file_default(struct magic_set *, size_t);
-file_protected int file_buffer(struct magic_set *, int, struct stat *,
- const char *, const void *, size_t);
-file_protected int file_fsmagic(struct magic_set *, const char *,
- struct stat *);
-file_protected int file_pipe2file(struct magic_set *, int, const void *,
- size_t);
-file_protected int file_vprintf(struct magic_set *, const char *, va_list)
- __attribute__((__format__(__printf__, 2, 0)));
-file_protected int file_separator(struct magic_set *);
-file_protected char *file_copystr(char *, size_t, size_t, const char *);
-file_protected int file_checkfmt(char *, size_t, const char *);
-file_protected size_t file_printedlen(const struct magic_set *);
-file_protected int file_print_guid(char *, size_t, const uint64_t *);
-file_protected int file_parse_guid(const char *, uint64_t *);
-file_protected int file_replace(struct magic_set *, const char *, const char *);
-file_protected int file_printf(struct magic_set *, const char *, ...)
- __attribute__((__format__(__printf__, 2, 3)));
-file_protected int file_reset(struct magic_set *, int);
-file_protected int file_tryelf(struct magic_set *, const struct buffer *);
-file_protected int file_trycdf(struct magic_set *, const struct buffer *);
-#if HAVE_FORK
-file_protected int file_zmagic(struct magic_set *, const struct buffer *,
- const char *);
-#endif
-file_protected int file_ascmagic(struct magic_set *, const struct buffer *,
- int);
-file_protected int file_ascmagic_with_encoding(struct magic_set *,
- const struct buffer *, file_unichar_t *, size_t, const char *, const char *, int);
-file_protected int file_encoding(struct magic_set *, const struct buffer *,
- file_unichar_t **, size_t *, const char **, const char **, const char **);
-file_protected int file_is_json(struct magic_set *, const struct buffer *);
-file_protected int file_is_csv(struct magic_set *, const struct buffer *, int,
- const char *);
-file_protected int file_is_simh(struct magic_set *, const struct buffer *);
-file_protected int file_is_tar(struct magic_set *, const struct buffer *);
-file_protected int file_softmagic(struct magic_set *, const struct buffer *,
- uint16_t *, uint16_t *, int, int);
-file_protected int file_apprentice(struct magic_set *, const char *, int);
-file_protected size_t file_magic_strength(const struct magic *, size_t);
-file_protected int buffer_apprentice(struct magic_set *, struct magic **,
- size_t *, size_t);
-file_protected int file_magicfind(struct magic_set *, const char *,
- struct mlist *);
-file_protected uint64_t file_signextend(struct magic_set *, struct magic *,
- uint64_t);
-file_protected uintmax_t file_varint2uintmax_t(const unsigned char *, int,
- size_t *);
-
-file_protected void file_badread(struct magic_set *);
-file_protected void file_badseek(struct magic_set *);
-file_protected void file_oomem(struct magic_set *, size_t);
-file_protected void file_error(struct magic_set *, int, const char *, ...)
- __attribute__((__format__(__printf__, 3, 4)));
-file_protected void file_magerror(struct magic_set *, const char *, ...)
- __attribute__((__format__(__printf__, 2, 3)));
-file_protected void file_magwarn(struct magic_set *, const char *, ...)
- __attribute__((__format__(__printf__, 2, 3)));
-file_protected void file_mdump(struct magic *);
-file_protected void file_showstr(FILE *, const char *, size_t);
-file_protected size_t file_mbswidth(struct magic_set *, const char *);
-file_protected const char *file_getbuffer(struct magic_set *);
-file_protected ssize_t sread(int, void *, size_t, int);
-file_protected int file_check_mem(struct magic_set *, unsigned int);
-file_protected int file_looks_utf8(const unsigned char *, size_t,
- file_unichar_t *, size_t *);
-file_protected size_t file_pstring_length_size(struct magic_set *,
- const struct magic *);
-file_protected size_t file_pstring_get_length(struct magic_set *,
- const struct magic *, const char *);
-file_protected char * file_printable(struct magic_set *, char *, size_t,
- const char *, size_t);
-#ifdef __EMX__
-file_protected int file_os2_apptype(struct magic_set *, const char *,
- const void *, size_t);
-#endif /* __EMX__ */
-file_protected int file_pipe_closexec(int *);
-file_protected int file_clear_closexec(int);
-file_protected char *file_strtrim(char *);
-
-file_protected void buffer_init(struct buffer *, int, const struct stat *,
- const void *, size_t);
-file_protected void buffer_fini(struct buffer *);
-file_protected int buffer_fill(const struct buffer *);
-
-
-
-file_protected int file_regcomp(struct magic_set *, file_regex_t *,
- const char *, int);
-file_protected int file_regexec(struct magic_set *, file_regex_t *,
- const char *, size_t, regmatch_t *, int);
-file_protected void file_regfree(file_regex_t *);
-
-typedef struct {
- char *buf;
- size_t blen;
- uint32_t offset;
-} file_pushbuf_t;
-
-file_protected file_pushbuf_t *file_push_buffer(struct magic_set *);
-file_protected char *file_pop_buffer(struct magic_set *, file_pushbuf_t *);
-
-#ifndef COMPILE_ONLY
-extern const char *file_names[];
-extern const size_t file_nnames;
-#endif
-
-#ifndef HAVE_PREAD
-ssize_t pread(int, void *, size_t, off_t);
-#endif
-#ifndef HAVE_VASPRINTF
-int vasprintf(char **, const char *, va_list);
-#endif
-#ifndef HAVE_ASPRINTF
-int asprintf(char **, const char *, ...);
-#endif
-#ifndef HAVE_DPRINTF
-int dprintf(int, const char *, ...);
-#endif
-
-#ifndef HAVE_STRLCPY
-size_t strlcpy(char *, const char *, size_t);
-#endif
-#ifndef HAVE_STRLCAT
-size_t strlcat(char *, const char *, size_t);
-#endif
-#ifndef HAVE_STRCASESTR
-char *strcasestr(const char *, const char *);
-#endif
-#ifndef HAVE_GETLINE
-ssize_t getline(char **, size_t *, FILE *);
-ssize_t getdelim(char **, size_t *, int, FILE *);
-#endif
-#ifndef HAVE_CTIME_R
-char *ctime_r(const time_t *, char *);
-#endif
-#ifndef HAVE_ASCTIME_R
-char *asctime_r(const struct tm *, char *);
-#endif
-#ifndef HAVE_GMTIME_R
-struct tm *gmtime_r(const time_t *, struct tm *);
-#endif
-#ifndef HAVE_LOCALTIME_R
-struct tm *localtime_r(const time_t *, struct tm *);
-#endif
-#ifndef HAVE_FMTCHECK
-const char *fmtcheck(const char *, const char *)
- __attribute__((__format_arg__(2)));
-#endif
-
-#ifdef HAVE_LIBSECCOMP
-// basic filter
-// this mode should not interfere with normal operations
-// only some dangerous syscalls are blacklisted
-int enable_sandbox_basic(void);
-
-// enhanced filter
-// this mode allows only the necessary syscalls used during normal operation
-// extensive testing required !!!
-int enable_sandbox_full(void);
-#endif
-
-file_protected const char *file_getprogname(void);
-file_protected void file_setprogname(const char *);
-file_protected void file_err(int, const char *, ...)
- __attribute__((__format__(__printf__, 2, 3), __noreturn__));
-file_protected void file_errx(int, const char *, ...)
- __attribute__((__format__(__printf__, 2, 3), __noreturn__));
-file_protected void file_warn(const char *, ...)
- __attribute__((__format__(__printf__, 1, 2)));
-file_protected void file_warnx(const char *, ...)
- __attribute__((__format__(__printf__, 1, 2)));
-
-#if defined(HAVE_MMAP) && defined(HAVE_SYS_MMAN_H) && !defined(QUICK)
-#define QUICK
-#endif
-
-#ifndef O_BINARY
-#define O_BINARY 0
-#endif
-#ifndef O_NONBLOCK
-#define O_NONBLOCK 0
-#endif
-
-#ifndef __cplusplus
-#if defined(__GNUC__) && (__GNUC__ >= 3)
-#define FILE_RCSID(id) \
-static const char rcsid[] __attribute__((__used__)) = id;
-#else
-#define FILE_RCSID(id) \
-static const char *rcsid(const char *p) { \
- return rcsid(p = id); \
-}
-#endif
-#else
-#define FILE_RCSID(id)
-#endif
-#ifndef __RCSID
-#define __RCSID(a)
-#endif
-
-#endif /* __file_h__ */
diff --git a/contrib/libs/libmagic/src/file/ya.make b/contrib/libs/libmagic/src/file/ya.make
deleted file mode 100644
index edd4ded276..0000000000
--- a/contrib/libs/libmagic/src/file/ya.make
+++ /dev/null
@@ -1,34 +0,0 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-WITHOUT_LICENSE_TEXTS()
-
-LICENSE(BSD-2-Clause)
-
-PEERDIR(
- contrib/libs/libmagic/src
-)
-
-ADDINCL(
- contrib/libs/libmagic
- contrib/libs/libmagic/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_RUNTIME()
-
-CFLAGS(
- -DHAVE_CONFIG_H
- -DMAGIC=\"res@/magic/magic.mgc\"
-)
-
-SRCDIR(contrib/libs/libmagic/src)
-
-SRCS(
- file.c
- seccomp.c
-)
-
-END()
diff --git a/contrib/libs/libmagic/src/file_opts.h b/contrib/libs/libmagic/src/file_opts.h
deleted file mode 100644
index 3aed7dea7e..0000000000
--- a/contrib/libs/libmagic/src/file_opts.h
+++ /dev/null
@@ -1,89 +0,0 @@
-/*
- * Table of command-line options
- *
- * The first column specifies the short name, if any, or 0 if none.
- * The second column specifies the long name.
- * The third column specifies whether it takes a parameter.
- * The fourth column specifies whether is marked as "default"
- * if POSIXLY_CORRECT is defined: 1,
- * if POSIXLY_CORRECT is not defined: 2.
- * The fifth column is the documentation.
- *
- * N.B. The long options' order must correspond to the code in file.c,
- * and OPTSTRING must be kept up-to-date with the short options.
- * Pay particular attention to the numbers of long-only options in the
- * switch statement!
- */
-
-OPT_LONGONLY("help", 0, 0,
- " display this help and exit\n", OPT_HELP)
-OPT('v', "version", 0, 0,
- " output version information and exit\n")
-OPT('m', "magic-file", 1, 0,
- " LIST use LIST as a colon-separated list of magic\n"
- " number files\n")
-OPT('z', "uncompress", 0, 0,
- " try to look inside compressed files\n")
-OPT('Z', "uncompress-noreport", 0, 0,
- " only print the contents of compressed files\n")
-OPT('b', "brief", 0, 0,
- " do not prepend filenames to output lines\n")
-OPT('c', "checking-printout", 0, 0,
- " print the parsed form of the magic file, use in\n"
- " conjunction with -m to debug a new magic file\n"
- " before installing it\n")
-OPT('e', "exclude", 1, 0,
- " TEST exclude TEST from the list of test to be\n"
- " performed for file. Valid tests are:\n"
- " %e\n")
-OPT_LONGONLY("exclude-quiet", 1, 0,
- " TEST like exclude, but ignore unknown tests\n", OPT_EXCLUDE_QUIET)
-OPT('f', "files-from", 1, 0,
- " FILE read the filenames to be examined from FILE\n")
-OPT('F', "separator", 1, 0,
- " STRING use string as separator instead of `:'\n")
-OPT('i', "mime", 0, 0,
- " output MIME type strings (--mime-type and\n"
- " --mime-encoding)\n")
-OPT_LONGONLY("apple", 0, 0,
- " output the Apple CREATOR/TYPE\n", OPT_APPLE)
-OPT_LONGONLY("extension", 0, 0,
- " output a slash-separated list of extensions\n", OPT_EXTENSIONS)
-OPT_LONGONLY("mime-type", 0, 0,
- " output the MIME type\n", OPT_MIME_TYPE)
-OPT_LONGONLY("mime-encoding", 0, 0,
- " output the MIME encoding\n", OPT_MIME_ENCODING)
-OPT('k', "keep-going", 0, 0,
- " don't stop at the first match\n")
-OPT('l', "list", 0, 0,
- " list magic strength\n")
-#ifdef S_IFLNK
-OPT('L', "dereference", 0, 1,
- " follow symlinks (default if POSIXLY_CORRECT is set)")
-OPT('h', "no-dereference", 0, 2,
- " don't follow symlinks (default if POSIXLY_CORRECT is not set)")
-#endif
-OPT('n', "no-buffer", 0, 0,
- " do not buffer output\n")
-OPT('N', "no-pad", 0, 0,
- " do not pad output\n")
-OPT('0', "print0", 0, 0,
- " terminate filenames with ASCII NUL\n")
-#if defined(HAVE_UTIME) || defined(HAVE_UTIMES)
-OPT('p', "preserve-date", 0, 0,
- " preserve access times on files\n")
-#endif
-OPT('P', "parameter", 1, 0,
- " set file engine parameter limits\n"
- " %P\n")
-OPT('r', "raw", 0, 0,
- " don't translate unprintable chars to \\ooo\n")
-OPT('s', "special-files", 0, 0,
- " treat special (block/char devices) files as\n"
- " ordinary ones\n")
-OPT('S', "no-sandbox", 0, 0,
- " disable system call sandboxing\n")
-OPT('C', "compile", 0, 0,
- " compile file specified by -m\n")
-OPT('d', "debug", 0, 0,
- " print debugging messages\n")
diff --git a/contrib/libs/libmagic/src/fmtcheck.c b/contrib/libs/libmagic/src/fmtcheck.c
deleted file mode 100644
index 868d06fd95..0000000000
--- a/contrib/libs/libmagic/src/fmtcheck.c
+++ /dev/null
@@ -1,254 +0,0 @@
-/* $NetBSD: fmtcheck.c,v 1.8 2008/04/28 20:22:59 martin Exp $ */
-
-/*-
- * Copyright (c) 2000 The NetBSD Foundation, Inc.
- * All rights reserved.
- *
- * This code was contributed to The NetBSD Foundation by Allen Briggs.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "file.h"
-#ifndef lint
-FILE_RCSID("@(#)$File: fmtcheck.c,v 1.6 2022/09/24 20:30:13 christos Exp $")
-#endif /* lint */
-
-#include <stdio.h>
-#include <string.h>
-#include <ctype.h>
-
-enum __e_fmtcheck_types {
- FMTCHECK_START,
- FMTCHECK_SHORT,
- FMTCHECK_INT,
- FMTCHECK_LONG,
- FMTCHECK_QUAD,
- FMTCHECK_SHORTPOINTER,
- FMTCHECK_INTPOINTER,
- FMTCHECK_LONGPOINTER,
- FMTCHECK_QUADPOINTER,
- FMTCHECK_DOUBLE,
- FMTCHECK_LONGDOUBLE,
- FMTCHECK_STRING,
- FMTCHECK_WIDTH,
- FMTCHECK_PRECISION,
- FMTCHECK_DONE,
- FMTCHECK_UNKNOWN
-};
-typedef enum __e_fmtcheck_types EFT;
-
-#define RETURN(pf,f,r) do { \
- *(pf) = (f); \
- return r; \
- } /*NOTREACHED*/ /*CONSTCOND*/ while (0)
-
-static EFT
-get_next_format_from_precision(const char **pf)
-{
- int sh, lg, quad, longdouble;
- const char *f;
-
- sh = lg = quad = longdouble = 0;
-
- f = *pf;
- switch (*f) {
- case 'h':
- f++;
- sh = 1;
- break;
- case 'l':
- f++;
- if (!*f) RETURN(pf,f,FMTCHECK_UNKNOWN);
- if (*f == 'l') {
- f++;
- quad = 1;
- } else {
- lg = 1;
- }
- break;
- case 'q':
- f++;
- quad = 1;
- break;
- case 'L':
- f++;
- longdouble = 1;
- break;
-#ifdef WIN32
- case 'I':
- f++;
- if (!*f) RETURN(pf,f,FMTCHECK_UNKNOWN);
- if (*f == '3' && f[1] == '2') {
- f += 2;
- } else if (*f == '6' && f[1] == '4') {
- f += 2;
- quad = 1;
- }
-#ifdef _WIN64
- else {
- quad = 1;
- }
-#endif
- break;
-#endif
- default:
- break;
- }
- if (!*f) RETURN(pf,f,FMTCHECK_UNKNOWN);
- if (strchr("diouxX", *f)) {
- if (longdouble)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- if (lg)
- RETURN(pf,f,FMTCHECK_LONG);
- if (quad)
- RETURN(pf,f,FMTCHECK_QUAD);
- RETURN(pf,f,FMTCHECK_INT);
- }
- if (*f == 'n') {
- if (longdouble)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- if (sh)
- RETURN(pf,f,FMTCHECK_SHORTPOINTER);
- if (lg)
- RETURN(pf,f,FMTCHECK_LONGPOINTER);
- if (quad)
- RETURN(pf,f,FMTCHECK_QUADPOINTER);
- RETURN(pf,f,FMTCHECK_INTPOINTER);
- }
- if (strchr("DOU", *f)) {
- if (sh + lg + quad + longdouble)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- RETURN(pf,f,FMTCHECK_LONG);
- }
- if (strchr("eEfg", *f)) {
- if (longdouble)
- RETURN(pf,f,FMTCHECK_LONGDOUBLE);
- if (sh + lg + quad)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- RETURN(pf,f,FMTCHECK_DOUBLE);
- }
- if (*f == 'c') {
- if (sh + lg + quad + longdouble)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- RETURN(pf,f,FMTCHECK_INT);
- }
- if (*f == 's') {
- if (sh + lg + quad + longdouble)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- RETURN(pf,f,FMTCHECK_STRING);
- }
- if (*f == 'p') {
- if (sh + lg + quad + longdouble)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- RETURN(pf,f,FMTCHECK_LONG);
- }
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- /*NOTREACHED*/
-}
-
-static EFT
-get_next_format_from_width(const char **pf)
-{
- const char *f;
-
- f = *pf;
- if (*f == '.') {
- f++;
- if (*f == '*') {
- RETURN(pf,f,FMTCHECK_PRECISION);
- }
- /* eat any precision (empty is allowed) */
- while (isdigit((unsigned char)*f)) f++;
- if (!*f) RETURN(pf,f,FMTCHECK_UNKNOWN);
- }
- RETURN(pf,f,get_next_format_from_precision(pf));
- /*NOTREACHED*/
-}
-
-static EFT
-get_next_format(const char **pf, EFT eft)
-{
- int infmt;
- const char *f;
-
- if (eft == FMTCHECK_WIDTH) {
- (*pf)++;
- return get_next_format_from_width(pf);
- } else if (eft == FMTCHECK_PRECISION) {
- (*pf)++;
- return get_next_format_from_precision(pf);
- }
-
- f = *pf;
- infmt = 0;
- while (!infmt) {
- f = strchr(f, '%');
- if (f == NULL)
- RETURN(pf,f,FMTCHECK_DONE);
- f++;
- if (!*f)
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- if (*f != '%')
- infmt = 1;
- else
- f++;
- }
-
- /* Eat any of the flags */
- while (*f && (strchr("#0- +", *f)))
- f++;
-
- if (*f == '*') {
- RETURN(pf,f,FMTCHECK_WIDTH);
- }
- /* eat any width */
- while (isdigit((unsigned char)*f)) f++;
- if (!*f) {
- RETURN(pf,f,FMTCHECK_UNKNOWN);
- }
-
- RETURN(pf,f,get_next_format_from_width(pf));
- /*NOTREACHED*/
-}
-
-const char *
-fmtcheck(const char *f1, const char *f2)
-{
- const char *f1p, *f2p;
- EFT f1t, f2t;
-
- if (!f1) return f2;
-
- f1p = f1;
- f1t = FMTCHECK_START;
- f2p = f2;
- f2t = FMTCHECK_START;
- while ((f1t = get_next_format(&f1p, f1t)) != FMTCHECK_DONE) {
- if (f1t == FMTCHECK_UNKNOWN)
- return f2;
- f2t = get_next_format(&f2p, f2t);
- if (f1t != f2t)
- return f2;
- }
- return f1;
-}
diff --git a/contrib/libs/libmagic/src/fsmagic.c b/contrib/libs/libmagic/src/fsmagic.c
deleted file mode 100644
index 5a13dbda50..0000000000
--- a/contrib/libs/libmagic/src/fsmagic.c
+++ /dev/null
@@ -1,435 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * fsmagic - magic based on filesystem info - directory, special files, etc.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: fsmagic.c,v 1.85 2022/12/26 17:31:14 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <string.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <stdlib.h>
-/* Since major is a function on SVR4, we cannot use `ifndef major'. */
-#ifdef MAJOR_IN_MKDEV
-# include <sys/mkdev.h>
-# define HAVE_MAJOR
-#endif
-#ifdef HAVE_SYS_SYSMACROS_H
-# include <sys/sysmacros.h>
-#endif
-#ifdef MAJOR_IN_SYSMACROS
-# define HAVE_MAJOR
-#endif
-#if defined(major) && !defined(HAVE_MAJOR)
-/* Might be defined in sys/types.h. */
-# define HAVE_MAJOR
-#endif
-#ifdef WIN32
-# define WIN32_LEAN_AND_MEAN
-# include <windows.h>
-#endif
-
-#ifndef HAVE_MAJOR
-# define major(dev) (((dev) >> 8) & 0xff)
-# define minor(dev) ((dev) & 0xff)
-#endif
-#undef HAVE_MAJOR
-#ifdef S_IFLNK
-file_private int
-bad_link(struct magic_set *ms, int err, char *buf)
-{
- int mime = ms->flags & MAGIC_MIME;
- if ((mime & MAGIC_MIME_TYPE) &&
- file_printf(ms, "inode/symlink")
- == -1)
- return -1;
- else if (!mime) {
- if (ms->flags & MAGIC_ERROR) {
- file_error(ms, err,
- "broken symbolic link to %s", buf);
- return -1;
- }
- if (file_printf(ms, "broken symbolic link to %s", buf) == -1)
- return -1;
- }
- return 1;
-}
-#endif
-file_private int
-handle_mime(struct magic_set *ms, int mime, const char *str)
-{
- if ((mime & MAGIC_MIME_TYPE)) {
- if (file_printf(ms, "inode/%s", str) == -1)
- return -1;
- if ((mime & MAGIC_MIME_ENCODING) && file_printf(ms,
- "; charset=") == -1)
- return -1;
- }
- if ((mime & MAGIC_MIME_ENCODING) && file_printf(ms, "binary") == -1)
- return -1;
- return 0;
-}
-
-file_protected int
-file_fsmagic(struct magic_set *ms, const char *fn, struct stat *sb)
-{
- int ret, did = 0;
- int mime = ms->flags & MAGIC_MIME;
- int silent = ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION);
-#ifdef S_IFLNK
- char buf[BUFSIZ+4];
- ssize_t nch;
- struct stat tstatbuf;
-#endif
-
- if (fn == NULL)
- return 0;
-
-#define COMMA (did++ ? ", " : "")
- /*
- * Fstat is cheaper but fails for files you don't have read perms on.
- * On 4.2BSD and similar systems, use lstat() to identify symlinks.
- */
-#ifdef S_IFLNK
- if ((ms->flags & MAGIC_SYMLINK) == 0)
- ret = lstat(fn, sb);
- else
-#endif
- ret = stat(fn, sb); /* don't merge into if; see "ret =" above */
-
-#ifdef WIN32
- {
- HANDLE hFile = CreateFile((LPCSTR)fn, 0, FILE_SHARE_DELETE |
- FILE_SHARE_READ | FILE_SHARE_WRITE, NULL, OPEN_EXISTING, 0,
- NULL);
- if (hFile != INVALID_HANDLE_VALUE) {
- /*
- * Stat failed, but we can still open it - assume it's
- * a block device, if nothing else.
- */
- if (ret) {
- sb->st_mode = S_IFBLK;
- ret = 0;
- }
- switch (GetFileType(hFile)) {
- case FILE_TYPE_CHAR:
- sb->st_mode |= S_IFCHR;
- sb->st_mode &= ~S_IFREG;
- break;
- case FILE_TYPE_PIPE:
- sb->st_mode |= S_IFIFO;
- sb->st_mode &= ~S_IFREG;
- break;
- }
- CloseHandle(hFile);
- }
- }
-#endif
-
- if (ret) {
- if (ms->flags & MAGIC_ERROR) {
- file_error(ms, errno, "cannot stat `%s'", fn);
- return -1;
- }
- if (file_printf(ms, "cannot open `%s' (%s)",
- fn, strerror(errno)) == -1)
- return -1;
- return 0;
- }
-
- ret = 1;
- if (!mime && !silent) {
-#ifdef S_ISUID
- if (sb->st_mode & S_ISUID)
- if (file_printf(ms, "%ssetuid", COMMA) == -1)
- return -1;
-#endif
-#ifdef S_ISGID
- if (sb->st_mode & S_ISGID)
- if (file_printf(ms, "%ssetgid", COMMA) == -1)
- return -1;
-#endif
-#ifdef S_ISVTX
- if (sb->st_mode & S_ISVTX)
- if (file_printf(ms, "%ssticky", COMMA) == -1)
- return -1;
-#endif
- }
-
- switch (sb->st_mode & S_IFMT) {
- case S_IFDIR:
- if (mime) {
- if (handle_mime(ms, mime, "directory") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms, "%sdirectory", COMMA) == -1)
- return -1;
- break;
-#ifdef S_IFCHR
- case S_IFCHR:
- /*
- * If -s has been specified, treat character special files
- * like ordinary files. Otherwise, just report that they
- * are block special files and go on to the next file.
- */
- if ((ms->flags & MAGIC_DEVICES) != 0) {
- ret = 0;
- break;
- }
- if (mime) {
- if (handle_mime(ms, mime, "chardevice") == -1)
- return -1;
- } else if (silent) {
- } else {
-#ifdef HAVE_STRUCT_STAT_ST_RDEV
-# ifdef dv_unit
- if (file_printf(ms, "%scharacter special (%d/%d/%d)",
- COMMA, major(sb->st_rdev), dv_unit(sb->st_rdev),
- dv_subunit(sb->st_rdev)) == -1)
- return -1;
-# else
- if (file_printf(ms, "%scharacter special (%ld/%ld)",
- COMMA, (long)major(sb->st_rdev),
- (long)minor(sb->st_rdev)) == -1)
- return -1;
-# endif
-#else
- if (file_printf(ms, "%scharacter special", COMMA) == -1)
- return -1;
-#endif
- }
- break;
-#endif
-#ifdef S_IFBLK
- case S_IFBLK:
- /*
- * If -s has been specified, treat block special files
- * like ordinary files. Otherwise, just report that they
- * are block special files and go on to the next file.
- */
- if ((ms->flags & MAGIC_DEVICES) != 0) {
- ret = 0;
- break;
- }
- if (mime) {
- if (handle_mime(ms, mime, "blockdevice") == -1)
- return -1;
- } else if (silent) {
- } else {
-#ifdef HAVE_STRUCT_STAT_ST_RDEV
-# ifdef dv_unit
- if (file_printf(ms, "%sblock special (%d/%d/%d)",
- COMMA, major(sb->st_rdev), dv_unit(sb->st_rdev),
- dv_subunit(sb->st_rdev)) == -1)
- return -1;
-# else
- if (file_printf(ms, "%sblock special (%ld/%ld)",
- COMMA, (long)major(sb->st_rdev),
- (long)minor(sb->st_rdev)) == -1)
- return -1;
-# endif
-#else
- if (file_printf(ms, "%sblock special", COMMA) == -1)
- return -1;
-#endif
- }
- break;
-#endif
- /* TODO add code to handle V7 MUX and Blit MUX files */
-#ifdef S_IFIFO
- case S_IFIFO:
- if((ms->flags & MAGIC_DEVICES) != 0)
- break;
- if (mime) {
- if (handle_mime(ms, mime, "fifo") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms, "%sfifo (named pipe)", COMMA) == -1)
- return -1;
- break;
-#endif
-#ifdef S_IFDOOR
- case S_IFDOOR:
- if (mime) {
- if (handle_mime(ms, mime, "door") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms, "%sdoor", COMMA) == -1)
- return -1;
- break;
-#endif
-#ifdef S_IFLNK
- case S_IFLNK:
- if ((nch = readlink(fn, buf, BUFSIZ-1)) <= 0) {
- if (ms->flags & MAGIC_ERROR) {
- file_error(ms, errno, "unreadable symlink `%s'",
- fn);
- return -1;
- }
- if (mime) {
- if (handle_mime(ms, mime, "symlink") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms,
- "%sunreadable symlink `%s' (%s)", COMMA, fn,
- strerror(errno)) == -1)
- return -1;
- break;
- }
- buf[nch] = '\0'; /* readlink(2) does not do this */
-
- /* If broken symlink, say so and quit early. */
-#ifdef __linux__
- /*
- * linux procfs/devfs makes symlinks like pipe:[3515864880]
- * that we can't stat their readlink output, so stat the
- * original filename instead.
- */
- if (stat(fn, &tstatbuf) < 0)
- return bad_link(ms, errno, buf);
-#else
- if (*buf == '/') {
- if (stat(buf, &tstatbuf) < 0)
- return bad_link(ms, errno, buf);
- } else {
- char *tmp;
- char buf2[BUFSIZ+BUFSIZ+4];
-
- if ((tmp = CCAST(char *, strrchr(fn, '/'))) == NULL) {
- tmp = buf; /* in current directory anyway */
- } else {
- if (tmp - fn + 1 > BUFSIZ) {
- if (ms->flags & MAGIC_ERROR) {
- file_error(ms, 0,
- "path too long: `%s'", buf);
- return -1;
- }
- if (mime) {
- if (handle_mime(ms, mime,
- "x-path-too-long") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms,
- "%spath too long: `%s'", COMMA,
- fn) == -1)
- return -1;
- break;
- }
- /* take dir part */
- (void)strlcpy(buf2, fn, sizeof buf2);
- buf2[tmp - fn + 1] = '\0';
- /* plus (rel) link */
- (void)strlcat(buf2, buf, sizeof buf2);
- tmp = buf2;
- }
- if (stat(tmp, &tstatbuf) < 0)
- return bad_link(ms, errno, buf);
- }
-#endif
-
- /* Otherwise, handle it. */
- if ((ms->flags & MAGIC_SYMLINK) != 0) {
- const char *p;
- ms->flags &= MAGIC_SYMLINK;
- p = magic_file(ms, buf);
- ms->flags |= MAGIC_SYMLINK;
- if (p == NULL)
- return -1;
- } else { /* just print what it points to */
- if (mime) {
- if (handle_mime(ms, mime, "symlink") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms, "%ssymbolic link to %s",
- COMMA, buf) == -1)
- return -1;
- }
- break;
-#endif
-#ifdef S_IFSOCK
-#ifndef __COHERENT__
- case S_IFSOCK:
- if (mime) {
- if (handle_mime(ms, mime, "socket") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms, "%ssocket", COMMA) == -1)
- return -1;
- break;
-#endif
-#endif
- case S_IFREG:
- /*
- * regular file, check next possibility
- *
- * If stat() tells us the file has zero length, report here that
- * the file is empty, so we can skip all the work of opening and
- * reading the file.
- * But if the -s option has been given, we skip this
- * optimization, since on some systems, stat() reports zero
- * size for raw disk partitions. (If the block special device
- * really has zero length, the fact that it is empty will be
- * detected and reported correctly when we read the file.)
- */
- if ((ms->flags & MAGIC_DEVICES) == 0 && sb->st_size == 0) {
- if (mime) {
- if (handle_mime(ms, mime, "x-empty") == -1)
- return -1;
- } else if (silent) {
- } else if (file_printf(ms, "%sempty", COMMA) == -1)
- return -1;
- break;
- }
- ret = 0;
- break;
-
- default:
- file_error(ms, 0, "invalid mode 0%o", sb->st_mode);
- return -1;
- /*NOTREACHED*/
- }
-
- if (!silent && !mime && did && ret == 0) {
- if (file_printf(ms, " ") == -1)
- return -1;
- }
- /*
- * If we were looking for extensions or apple (silent) it is not our
- * job to print here, so don't count this as a match.
- */
- if (ret == 1 && silent)
- return 0;
- return ret;
-}
diff --git a/contrib/libs/libmagic/src/funcs.c b/contrib/libs/libmagic/src/funcs.c
deleted file mode 100644
index af15d69886..0000000000
--- a/contrib/libs/libmagic/src/funcs.c
+++ /dev/null
@@ -1,932 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2003.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: funcs.c,v 1.140 2023/05/21 17:08:34 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <assert.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#include <string.h>
-#include <ctype.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h> /* for pipe2() */
-#endif
-#if defined(HAVE_WCHAR_H)
-#include <wchar.h>
-#endif
-#if defined(HAVE_WCTYPE_H)
-#include <wctype.h>
-#endif
-#include <limits.h>
-
-#ifndef SIZE_MAX
-#define SIZE_MAX ((size_t)~0)
-#endif
-
-file_protected char *
-file_copystr(char *buf, size_t blen, size_t width, const char *str)
-{
- if (blen == 0)
- return buf;
- if (width >= blen)
- width = blen - 1;
- memcpy(buf, str, width);
- buf[width] = '\0';
- return buf;
-}
-
-file_private void
-file_clearbuf(struct magic_set *ms)
-{
- free(ms->o.buf);
- ms->o.buf = NULL;
- ms->o.blen = 0;
-}
-
-file_private int
-file_checkfield(char *msg, size_t mlen, const char *what, const char **pp)
-{
- const char *p = *pp;
- int fw = 0;
-
- while (*p && isdigit((unsigned char)*p))
- fw = fw * 10 + (*p++ - '0');
-
- *pp = p;
-
- if (fw < 1024)
- return 1;
- if (msg)
- snprintf(msg, mlen, "field %s too large: %d", what, fw);
-
- return 0;
-}
-
-file_protected int
-file_checkfmt(char *msg, size_t mlen, const char *fmt)
-{
- const char *p;
- for (p = fmt; *p; p++) {
- if (*p != '%')
- continue;
- if (*++p == '%')
- continue;
- // Skip uninteresting.
- while (strchr("#0.'+- ", *p) != NULL)
- p++;
- if (*p == '*') {
- if (msg)
- snprintf(msg, mlen, "* not allowed in format");
- return -1;
- }
-
- if (!file_checkfield(msg, mlen, "width", &p))
- return -1;
-
- if (*p == '.') {
- p++;
- if (!file_checkfield(msg, mlen, "precision", &p))
- return -1;
- }
-
- if (!isalpha((unsigned char)*p)) {
- if (msg)
- snprintf(msg, mlen, "bad format char: %c", *p);
- return -1;
- }
- }
- return 0;
-}
-
-/*
- * Like printf, only we append to a buffer.
- */
-file_protected int
-file_vprintf(struct magic_set *ms, const char *fmt, va_list ap)
-{
- int len;
- char *buf, *newstr;
- char tbuf[1024];
-
- if (ms->event_flags & EVENT_HAD_ERR)
- return 0;
-
- if (file_checkfmt(tbuf, sizeof(tbuf), fmt)) {
- file_clearbuf(ms);
- file_error(ms, 0, "Bad magic format `%s' (%s)", fmt, tbuf);
- return -1;
- }
-
- len = vasprintf(&buf, fmt, ap);
- if (len < 0 || (size_t)len > 1024 || len + ms->o.blen > 1024 * 1024) {
- size_t blen = ms->o.blen;
- free(buf);
- file_clearbuf(ms);
- file_error(ms, 0, "Output buffer space exceeded %d+%"
- SIZE_T_FORMAT "u", len, blen);
- return -1;
- }
-
- if (ms->o.buf != NULL) {
- len = asprintf(&newstr, "%s%s", ms->o.buf, buf);
- free(buf);
- if (len < 0)
- goto out;
- free(ms->o.buf);
- buf = newstr;
- }
- ms->o.buf = buf;
- ms->o.blen = len;
- return 0;
-out:
- file_clearbuf(ms);
- file_error(ms, errno, "vasprintf failed");
- return -1;
-}
-
-file_protected int
-file_printf(struct magic_set *ms, const char *fmt, ...)
-{
- int rv;
- va_list ap;
-
- va_start(ap, fmt);
- rv = file_vprintf(ms, fmt, ap);
- va_end(ap);
- return rv;
-}
-
-/*
- * error - print best error message possible
- */
-/*VARARGS*/
-__attribute__((__format__(__printf__, 3, 0)))
-file_private void
-file_error_core(struct magic_set *ms, int error, const char *f, va_list va,
- size_t lineno)
-{
- /* Only the first error is ok */
- if (ms->event_flags & EVENT_HAD_ERR)
- return;
- if (lineno != 0) {
- file_clearbuf(ms);
- (void)file_printf(ms, "line %" SIZE_T_FORMAT "u:", lineno);
- }
- if (ms->o.buf && *ms->o.buf)
- (void)file_printf(ms, " ");
- (void)file_vprintf(ms, f, va);
- if (error > 0)
- (void)file_printf(ms, " (%s)", strerror(error));
- ms->event_flags |= EVENT_HAD_ERR;
- ms->error = error;
-}
-
-/*VARARGS*/
-file_protected void
-file_error(struct magic_set *ms, int error, const char *f, ...)
-{
- va_list va;
- va_start(va, f);
- file_error_core(ms, error, f, va, 0);
- va_end(va);
-}
-
-/*
- * Print an error with magic line number.
- */
-/*VARARGS*/
-file_protected void
-file_magerror(struct magic_set *ms, const char *f, ...)
-{
- va_list va;
- va_start(va, f);
- file_error_core(ms, 0, f, va, ms->line);
- va_end(va);
-}
-
-file_protected void
-file_oomem(struct magic_set *ms, size_t len)
-{
- file_error(ms, errno, "cannot allocate %" SIZE_T_FORMAT "u bytes",
- len);
-}
-
-file_protected void
-file_badseek(struct magic_set *ms)
-{
- file_error(ms, errno, "error seeking");
-}
-
-file_protected void
-file_badread(struct magic_set *ms)
-{
- file_error(ms, errno, "error reading");
-}
-
-#ifndef COMPILE_ONLY
-#define FILE_SEPARATOR "\n- "
-
-file_protected int
-file_separator(struct magic_set *ms)
-{
- return file_printf(ms, FILE_SEPARATOR);
-}
-
-static void
-trim_separator(struct magic_set *ms)
-{
- size_t l;
-
- if (ms->o.buf == NULL)
- return;
-
- l = strlen(ms->o.buf);
- if (l < sizeof(FILE_SEPARATOR))
- return;
-
- l -= sizeof(FILE_SEPARATOR) - 1;
- if (strcmp(ms->o.buf + l, FILE_SEPARATOR) != 0)
- return;
-
- ms->o.buf[l] = '\0';
-}
-
-static int
-checkdone(struct magic_set *ms, int *rv)
-{
- if ((ms->flags & MAGIC_CONTINUE) == 0)
- return 1;
- if (file_separator(ms) == -1)
- *rv = -1;
- return 0;
-}
-
-file_protected int
-file_default(struct magic_set *ms, size_t nb)
-{
- if (ms->flags & MAGIC_MIME) {
- if ((ms->flags & MAGIC_MIME_TYPE) &&
- file_printf(ms, "application/%s",
- nb ? "octet-stream" : "x-empty") == -1)
- return -1;
- return 1;
- }
- if (ms->flags & MAGIC_APPLE) {
- if (file_printf(ms, "UNKNUNKN") == -1)
- return -1;
- return 1;
- }
- if (ms->flags & MAGIC_EXTENSION) {
- if (file_printf(ms, "???") == -1)
- return -1;
- return 1;
- }
- return 0;
-}
-
-/*
- * The magic detection functions return:
- * 1: found
- * 0: not found
- * -1: error
- */
-/*ARGSUSED*/
-file_protected int
-file_buffer(struct magic_set *ms, int fd, struct stat *st,
- const char *inname __attribute__ ((__unused__)),
- const void *buf, size_t nb)
-{
- int m = 0, rv = 0, looks_text = 0;
- const char *code = NULL;
- const char *code_mime = "binary";
- const char *def = "data";
- const char *ftype = NULL;
- char *rbuf = NULL;
- struct buffer b;
-
- buffer_init(&b, fd, st, buf, nb);
- ms->mode = b.st.st_mode;
-
- if (nb == 0) {
- def = "empty";
- goto simple;
- } else if (nb == 1) {
- def = "very short file (no magic)";
- goto simple;
- }
-
- if ((ms->flags & MAGIC_NO_CHECK_ENCODING) == 0) {
- looks_text = file_encoding(ms, &b, NULL, 0,
- &code, &code_mime, &ftype);
- }
-
-#ifdef __EMX__
- if ((ms->flags & MAGIC_NO_CHECK_APPTYPE) == 0 && inname) {
- m = file_os2_apptype(ms, inname, &b);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try os2_apptype %d]\n", m);
- switch (m) {
- case -1:
- return -1;
- case 0:
- break;
- default:
- return 1;
- }
- }
-#endif
-#if HAVE_FORK
- /* try compression stuff */
- if ((ms->flags & MAGIC_NO_CHECK_COMPRESS) == 0) {
- m = file_zmagic(ms, &b, inname);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try zmagic %d]\n", m);
- if (m) {
- goto done_encoding;
- }
- }
-#endif
- /* Check if we have a tar file */
- if ((ms->flags & MAGIC_NO_CHECK_TAR) == 0) {
- m = file_is_tar(ms, &b);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try tar %d]\n", m);
- if (m) {
- if (checkdone(ms, &rv))
- goto done;
- }
- }
-
- /* Check if we have a JSON file */
- if ((ms->flags & MAGIC_NO_CHECK_JSON) == 0) {
- m = file_is_json(ms, &b);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try json %d]\n", m);
- if (m) {
- if (checkdone(ms, &rv))
- goto done;
- }
- }
-
- /* Check if we have a CSV file */
- if ((ms->flags & MAGIC_NO_CHECK_CSV) == 0) {
- m = file_is_csv(ms, &b, looks_text, code);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try csv %d]\n", m);
- if (m) {
- if (checkdone(ms, &rv))
- goto done;
- }
- }
-
- /* Check if we have a SIMH tape file */
- if ((ms->flags & MAGIC_NO_CHECK_SIMH) == 0) {
- m = file_is_simh(ms, &b);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try simh %d]\n", m);
- if (m) {
- if (checkdone(ms, &rv))
- goto done;
- }
- }
-
- /* Check if we have a CDF file */
- if ((ms->flags & MAGIC_NO_CHECK_CDF) == 0) {
- m = file_trycdf(ms, &b);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try cdf %d]\n", m);
- if (m) {
- if (checkdone(ms, &rv))
- goto done;
- }
- }
-#ifdef BUILTIN_ELF
- if ((ms->flags & MAGIC_NO_CHECK_ELF) == 0 && nb > 5 && fd != -1) {
- file_pushbuf_t *pb;
- /*
- * We matched something in the file, so this
- * *might* be an ELF file, and the file is at
- * least 5 bytes long, so if it's an ELF file
- * it has at least one byte past the ELF magic
- * number - try extracting information from the
- * ELF headers that cannot easily be extracted
- * with rules in the magic file. We we don't
- * print the information yet.
- */
- if ((pb = file_push_buffer(ms)) == NULL)
- return -1;
-
- rv = file_tryelf(ms, &b);
- rbuf = file_pop_buffer(ms, pb);
- if (rv == -1) {
- free(rbuf);
- rbuf = NULL;
- }
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try elf %d]\n", m);
- }
-#endif
-
- /* try soft magic tests */
- if ((ms->flags & MAGIC_NO_CHECK_SOFT) == 0) {
- m = file_softmagic(ms, &b, NULL, NULL, BINTEST, looks_text);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try softmagic %d]\n", m);
- if (m == 1 && rbuf) {
- if (file_printf(ms, "%s", rbuf) == -1)
- goto done;
- }
- if (m) {
- if (checkdone(ms, &rv))
- goto done;
- }
- }
-
- /* try text properties */
- if ((ms->flags & MAGIC_NO_CHECK_TEXT) == 0) {
-
- m = file_ascmagic(ms, &b, looks_text);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void)fprintf(stderr, "[try ascmagic %d]\n", m);
- if (m) {
- goto done;
- }
- }
-
-simple:
- /* give up */
- if (m == 0) {
- m = 1;
- rv = file_default(ms, nb);
- if (rv == 0)
- if (file_printf(ms, "%s", def) == -1)
- rv = -1;
- }
- done:
- trim_separator(ms);
- if ((ms->flags & MAGIC_MIME_ENCODING) != 0) {
- if (ms->flags & MAGIC_MIME_TYPE)
- if (file_printf(ms, "; charset=") == -1)
- rv = -1;
- if (file_printf(ms, "%s", code_mime) == -1)
- rv = -1;
- }
-#if HAVE_FORK
- done_encoding:
-#endif
- free(rbuf);
- buffer_fini(&b);
- if (rv)
- return rv;
-
- return m;
-}
-#endif
-
-file_protected int
-file_reset(struct magic_set *ms, int checkloaded)
-{
- if (checkloaded && ms->mlist[0] == NULL) {
- file_error(ms, 0, "no magic files loaded");
- return -1;
- }
- file_clearbuf(ms);
- if (ms->o.pbuf) {
- free(ms->o.pbuf);
- ms->o.pbuf = NULL;
- }
- ms->event_flags &= ~EVENT_HAD_ERR;
- ms->error = -1;
- return 0;
-}
-
-#define OCTALIFY(n, o) \
- /*LINTED*/ \
- (void)(*(n)++ = '\\', \
- *(n)++ = ((CAST(uint32_t, *(o)) >> 6) & 3) + '0', \
- *(n)++ = ((CAST(uint32_t, *(o)) >> 3) & 7) + '0', \
- *(n)++ = ((CAST(uint32_t, *(o)) >> 0) & 7) + '0', \
- (o)++)
-
-file_protected const char *
-file_getbuffer(struct magic_set *ms)
-{
- char *pbuf, *op, *np;
- size_t psize, len;
-
- if (ms->event_flags & EVENT_HAD_ERR)
- return NULL;
-
- if (ms->flags & MAGIC_RAW)
- return ms->o.buf;
-
- if (ms->o.buf == NULL)
- return NULL;
-
- /* * 4 is for octal representation, + 1 is for NUL */
- len = strlen(ms->o.buf);
- if (len > (SIZE_MAX - 1) / 4) {
- file_oomem(ms, len);
- return NULL;
- }
- psize = len * 4 + 1;
- if ((pbuf = CAST(char *, realloc(ms->o.pbuf, psize))) == NULL) {
- file_oomem(ms, psize);
- return NULL;
- }
- ms->o.pbuf = pbuf;
-
-#if defined(HAVE_WCHAR_H) && defined(HAVE_MBRTOWC) && defined(HAVE_WCWIDTH)
- {
- mbstate_t state;
- wchar_t nextchar;
- int mb_conv = 1;
- size_t bytesconsumed;
- char *eop;
- (void)memset(&state, 0, sizeof(mbstate_t));
-
- np = ms->o.pbuf;
- op = ms->o.buf;
- eop = op + len;
-
- while (op < eop) {
- bytesconsumed = mbrtowc(&nextchar, op,
- CAST(size_t, eop - op), &state);
- if (bytesconsumed == CAST(size_t, -1) ||
- bytesconsumed == CAST(size_t, -2)) {
- mb_conv = 0;
- break;
- }
-
- if (iswprint(nextchar)) {
- (void)memcpy(np, op, bytesconsumed);
- op += bytesconsumed;
- np += bytesconsumed;
- } else {
- while (bytesconsumed-- > 0)
- OCTALIFY(np, op);
- }
- }
- *np = '\0';
-
- /* Parsing succeeded as a multi-byte sequence */
- if (mb_conv != 0)
- return ms->o.pbuf;
- }
-#endif
-
- for (np = ms->o.pbuf, op = ms->o.buf; *op;) {
- if (isprint(CAST(unsigned char, *op))) {
- *np++ = *op++;
- } else {
- OCTALIFY(np, op);
- }
- }
- *np = '\0';
- return ms->o.pbuf;
-}
-
-file_protected int
-file_check_mem(struct magic_set *ms, unsigned int level)
-{
- size_t len;
-
- if (level >= ms->c.len) {
- len = (ms->c.len = 20 + level) * sizeof(*ms->c.li);
- ms->c.li = CAST(struct level_info *, (ms->c.li == NULL) ?
- malloc(len) :
- realloc(ms->c.li, len));
- if (ms->c.li == NULL) {
- file_oomem(ms, len);
- return -1;
- }
- }
- ms->c.li[level].got_match = 0;
-#ifdef ENABLE_CONDITIONALS
- ms->c.li[level].last_match = 0;
- ms->c.li[level].last_cond = COND_NONE;
-#endif /* ENABLE_CONDITIONALS */
- return 0;
-}
-
-file_protected size_t
-file_printedlen(const struct magic_set *ms)
-{
- return ms->o.blen;
-}
-
-file_protected int
-file_replace(struct magic_set *ms, const char *pat, const char *rep)
-{
- file_regex_t rx;
- int rc, rv = -1;
-
- rc = file_regcomp(ms, &rx, pat, REG_EXTENDED);
- if (rc == 0) {
- regmatch_t rm;
- int nm = 0;
- while (file_regexec(ms, &rx, ms->o.buf, 1, &rm, 0) == 0) {
- ms->o.buf[rm.rm_so] = '\0';
- if (file_printf(ms, "%s%s", rep,
- rm.rm_eo != 0 ? ms->o.buf + rm.rm_eo : "") == -1)
- goto out;
- nm++;
- }
- rv = nm;
- }
-out:
- file_regfree(&rx);
- return rv;
-}
-
-file_private int
-check_regex(struct magic_set *ms, const char *pat)
-{
- char sbuf[512];
- unsigned char oc = '\0';
- const char *p;
-
- for (p = pat; *p; p++) {
- unsigned char c = *p;
- // Avoid repetition
- if (c == oc && strchr("?*+{", c) != NULL) {
- size_t len = strlen(pat);
- file_magwarn(ms,
- "repetition-operator operand `%c' "
- "invalid in regex `%s'", c,
- file_printable(ms, sbuf, sizeof(sbuf), pat, len));
- return -1;
- }
- oc = c;
- if (isprint(c) || isspace(c) || c == '\b'
- || c == 0x8a) // XXX: apple magic fixme
- continue;
- size_t len = strlen(pat);
- file_magwarn(ms,
- "non-ascii characters in regex \\%#o `%s'",
- c, file_printable(ms, sbuf, sizeof(sbuf), pat, len));
- return -1;
- }
- return 0;
-}
-
-file_protected int
-file_regcomp(struct magic_set *ms file_locale_used, file_regex_t *rx,
- const char *pat, int flags)
-{
- if (check_regex(ms, pat) == -1)
- return -1;
-
-#ifdef USE_C_LOCALE
- locale_t old = uselocale(ms->c_lc_ctype);
- assert(old != NULL);
-#else
- char old[1024];
- strlcpy(old, setlocale(LC_CTYPE, NULL), sizeof(old));
- (void)setlocale(LC_CTYPE, "C");
-#endif
- int rc;
- rc = regcomp(rx, pat, flags);
-
-#ifdef USE_C_LOCALE
- uselocale(old);
-#else
- (void)setlocale(LC_CTYPE, old);
-#endif
- if (rc > 0 && (ms->flags & MAGIC_CHECK)) {
- char errmsg[512], buf[512];
-
- (void)regerror(rc, rx, errmsg, sizeof(errmsg));
- file_magerror(ms, "regex error %d for `%s', (%s)", rc,
- file_printable(ms, buf, sizeof(buf), pat, strlen(pat)),
- errmsg);
- }
- return rc;
-}
-
-/*ARGSUSED*/
-file_protected int
-file_regexec(struct magic_set *ms file_locale_used, file_regex_t *rx,
- const char *str, size_t nmatch, regmatch_t* pmatch, int eflags)
-{
-#ifdef USE_C_LOCALE
- locale_t old = uselocale(ms->c_lc_ctype);
- assert(old != NULL);
-#else
- char old[1024];
- strlcpy(old, setlocale(LC_CTYPE, NULL), sizeof(old));
- (void)setlocale(LC_CTYPE, "C");
-#endif
- int rc;
- /* XXX: force initialization because glibc does not always do this */
- if (nmatch != 0)
- memset(pmatch, 0, nmatch * sizeof(*pmatch));
- rc = regexec(rx, str, nmatch, pmatch, eflags);
-#ifdef USE_C_LOCALE
- uselocale(old);
-#else
- (void)setlocale(LC_CTYPE, old);
-#endif
- return rc;
-}
-
-file_protected void
-file_regfree(file_regex_t *rx)
-{
- regfree(rx);
-}
-
-file_protected file_pushbuf_t *
-file_push_buffer(struct magic_set *ms)
-{
- file_pushbuf_t *pb;
-
- if (ms->event_flags & EVENT_HAD_ERR)
- return NULL;
-
- if ((pb = (CAST(file_pushbuf_t *, malloc(sizeof(*pb))))) == NULL)
- return NULL;
-
- pb->buf = ms->o.buf;
- pb->blen = ms->o.blen;
- pb->offset = ms->offset;
-
- ms->o.buf = NULL;
- ms->o.blen = 0;
- ms->offset = 0;
-
- return pb;
-}
-
-file_protected char *
-file_pop_buffer(struct magic_set *ms, file_pushbuf_t *pb)
-{
- char *rbuf;
-
- if (ms->event_flags & EVENT_HAD_ERR) {
- free(pb->buf);
- free(pb);
- return NULL;
- }
-
- rbuf = ms->o.buf;
-
- ms->o.buf = pb->buf;
- ms->o.blen = pb->blen;
- ms->offset = pb->offset;
-
- free(pb);
- return rbuf;
-}
-
-/*
- * convert string to ascii printable format.
- */
-file_protected char *
-file_printable(struct magic_set *ms, char *buf, size_t bufsiz,
- const char *str, size_t slen)
-{
- char *ptr, *eptr = buf + bufsiz - 1;
- const unsigned char *s = RCAST(const unsigned char *, str);
- const unsigned char *es = s + slen;
-
- for (ptr = buf; ptr < eptr && s < es && *s; s++) {
- if ((ms->flags & MAGIC_RAW) != 0 || isprint(*s)) {
- *ptr++ = *s;
- continue;
- }
- if (ptr >= eptr - 3)
- break;
- *ptr++ = '\\';
- *ptr++ = ((CAST(unsigned int, *s) >> 6) & 7) + '0';
- *ptr++ = ((CAST(unsigned int, *s) >> 3) & 7) + '0';
- *ptr++ = ((CAST(unsigned int, *s) >> 0) & 7) + '0';
- }
- *ptr = '\0';
- return buf;
-}
-
-struct guid {
- uint32_t data1;
- uint16_t data2;
- uint16_t data3;
- uint8_t data4[8];
-};
-
-file_protected int
-file_parse_guid(const char *s, uint64_t *guid)
-{
- struct guid *g = CAST(struct guid *, CAST(void *, guid));
-#ifndef WIN32
- return sscanf(s,
- "%8x-%4hx-%4hx-%2hhx%2hhx-%2hhx%2hhx%2hhx%2hhx%2hhx%2hhx",
- &g->data1, &g->data2, &g->data3, &g->data4[0], &g->data4[1],
- &g->data4[2], &g->data4[3], &g->data4[4], &g->data4[5],
- &g->data4[6], &g->data4[7]) == 11 ? 0 : -1;
-#else
- /* MS-Windows runtime doesn't support %hhx, except under
- non-default __USE_MINGW_ANSI_STDIO. */
- uint16_t data16[8];
- int rv = sscanf(s, "%8x-%4hx-%4hx-%2hx%2hx-%2hx%2hx%2hx%2hx%2hx%2hx",
- &g->data1, &g->data2, &g->data3, &data16[0], &data16[1],
- &data16[2], &data16[3], &data16[4], &data16[5],
- &data16[6], &data16[7]) == 11 ? 0 : -1;
- int i;
- for (i = 0; i < 8; i++)
- g->data4[i] = data16[i];
- return rv;
-#endif
-}
-
-file_protected int
-file_print_guid(char *str, size_t len, const uint64_t *guid)
-{
- const struct guid *g = CAST(const struct guid *,
- CAST(const void *, guid));
-
-#ifndef WIN32
- return snprintf(str, len, "%.8X-%.4hX-%.4hX-%.2hhX%.2hhX-"
- "%.2hhX%.2hhX%.2hhX%.2hhX%.2hhX%.2hhX",
- g->data1, g->data2, g->data3, g->data4[0], g->data4[1],
- g->data4[2], g->data4[3], g->data4[4], g->data4[5],
- g->data4[6], g->data4[7]);
-#else
- return snprintf(str, len, "%.8X-%.4hX-%.4hX-%.2hX%.2hX-"
- "%.2hX%.2hX%.2hX%.2hX%.2hX%.2hX",
- g->data1, g->data2, g->data3, g->data4[0], g->data4[1],
- g->data4[2], g->data4[3], g->data4[4], g->data4[5],
- g->data4[6], g->data4[7]);
-#endif
-}
-
-file_protected int
-file_pipe_closexec(int *fds)
-{
-#ifdef __MINGW32__
- return 0;
-#elif defined(HAVE_PIPE2)
- return pipe2(fds, O_CLOEXEC);
-#else
- if (pipe(fds) == -1)
- return -1;
-# ifdef F_SETFD
- (void)fcntl(fds[0], F_SETFD, FD_CLOEXEC);
- (void)fcntl(fds[1], F_SETFD, FD_CLOEXEC);
-# endif
- return 0;
-#endif
-}
-
-file_protected int
-file_clear_closexec(int fd) {
-#ifdef F_SETFD
- return fcntl(fd, F_SETFD, 0);
-#else
- return 0;
-#endif
-}
-
-file_protected char *
-file_strtrim(char *str)
-{
- char *last;
-
- while (isspace(CAST(unsigned char, *str)))
- str++;
- last = str;
- while (*last)
- last++;
- --last;
- while (isspace(CAST(unsigned char, *last)))
- last--;
- *++last = '\0';
- return str;
-}
diff --git a/contrib/libs/libmagic/src/is_csv.c b/contrib/libs/libmagic/src/is_csv.c
deleted file mode 100644
index 7b95e3b851..0000000000
--- a/contrib/libs/libmagic/src/is_csv.c
+++ /dev/null
@@ -1,198 +0,0 @@
-/*-
- * Copyright (c) 2019 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Parse CSV object serialization format (RFC-4180, RFC-7111)
- */
-
-#ifndef TEST
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: is_csv.c,v 1.13 2023/07/17 16:08:17 christos Exp $")
-#endif
-
-#include <string.h>
-#include "magic.h"
-#else
-#include <sys/types.h>
-#endif
-
-
-#ifdef DEBUG
-#include <stdio.h>
-#define DPRINTF(fmt, ...) printf(fmt, __VA_ARGS__)
-#else
-#define DPRINTF(fmt, ...)
-#endif
-
-/*
- * if CSV_LINES == 0:
- * check all the lines in the buffer
- * otherwise:
- * check only up-to the number of lines specified
- *
- * the last line count is always ignored if it does not end in CRLF
- */
-#ifndef CSV_LINES
-#define CSV_LINES 10
-#endif
-
-static int csv_parse(const unsigned char *, const unsigned char *);
-
-static const unsigned char *
-eatquote(const unsigned char *uc, const unsigned char *ue)
-{
- int quote = 0;
-
- while (uc < ue) {
- unsigned char c = *uc++;
- if (c != '"') {
- // We already got one, done.
- if (quote) {
- return --uc;
- }
- continue;
- }
- if (quote) {
- // quote-quote escapes
- quote = 0;
- continue;
- }
- // first quote
- quote = 1;
- }
- return ue;
-}
-
-static int
-csv_parse(const unsigned char *uc, const unsigned char *ue)
-{
- size_t nf = 0, tf = 0, nl = 0;
-
- while (uc < ue) {
- switch (*uc++) {
- case '"':
- // Eat until the matching quote
- uc = eatquote(uc, ue);
- break;
- case ',':
- nf++;
- break;
- case '\n':
- DPRINTF("%zu %zu %zu\n", nl, nf, tf);
- nl++;
-#if CSV_LINES
- if (nl == CSV_LINES)
- return tf != 0 && tf == nf;
-#endif
- if (tf == 0) {
- // First time and no fields, give up
- if (nf == 0)
- return 0;
- // First time, set the number of fields
- tf = nf;
- } else if (tf != nf) {
- // Field number mismatch, we are done.
- return 0;
- }
- nf = 0;
- break;
- default:
- break;
- }
- }
- return tf && nl >= 2;
-}
-
-#ifndef TEST
-int
-file_is_csv(struct magic_set *ms, const struct buffer *b, int looks_text,
- const char *code)
-{
- const unsigned char *uc = CAST(const unsigned char *, b->fbuf);
- const unsigned char *ue = uc + b->flen;
- int mime = ms->flags & MAGIC_MIME;
-
- if (!looks_text)
- return 0;
-
- if ((ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION)) != 0)
- return 0;
-
- if (!csv_parse(uc, ue))
- return 0;
-
- if (mime == MAGIC_MIME_ENCODING)
- return 1;
-
- if (mime) {
- if (file_printf(ms, "text/csv") == -1)
- return -1;
- return 1;
- }
-
- if (file_printf(ms, "CSV %s%stext", code ? code : "",
- code ? " " : "") == -1)
- return -1;
-
- return 1;
-}
-
-#else
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <err.h>
-
-int
-main(int argc, char *argv[])
-{
- int fd;
- struct stat st;
- unsigned char *p;
-
- if ((fd = open(argv[1], O_RDONLY)) == -1)
- err(EXIT_FAILURE, "Can't open `%s'", argv[1]);
-
- if (fstat(fd, &st) == -1)
- err(EXIT_FAILURE, "Can't stat `%s'", argv[1]);
-
- if ((p = CAST(char *, malloc(st.st_size))) == NULL)
- err(EXIT_FAILURE, "Can't allocate %jd bytes",
- (intmax_t)st.st_size);
- if (read(fd, p, st.st_size) != st.st_size)
- err(EXIT_FAILURE, "Can't read %jd bytes",
- (intmax_t)st.st_size);
- printf("is csv %d\n", csv_parse(p, p + st.st_size));
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/is_json.c b/contrib/libs/libmagic/src/is_json.c
deleted file mode 100644
index eca2a49ee7..0000000000
--- a/contrib/libs/libmagic/src/is_json.c
+++ /dev/null
@@ -1,500 +0,0 @@
-/*-
- * Copyright (c) 2018 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Parse JSON object serialization format (RFC-7159)
- */
-
-#ifndef TEST
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: is_json.c,v 1.30 2022/09/27 19:12:40 christos Exp $")
-#endif
-
-#include "magic.h"
-#else
-#include <stdio.h>
-#include <stddef.h>
-#endif
-#include <string.h>
-
-#ifdef DEBUG
-#include <stdio.h>
-#define DPRINTF(a, b, c) \
- printf("%*s%s [%.2x/%c] %.*s\n", (int)lvl, "", (a), *(b), *(b), \
- (int)(b - c), (const char *)(c))
-#define __file_debugused
-#else
-#define DPRINTF(a, b, c) do { } while (/*CONSTCOND*/0)
-#define __file_debugused __attribute__((__unused__))
-#endif
-
-#define JSON_ARRAY 0
-#define JSON_CONSTANT 1
-#define JSON_NUMBER 2
-#define JSON_OBJECT 3
-#define JSON_STRING 4
-#define JSON_ARRAYN 5
-#define JSON_MAX 6
-
-/*
- * if JSON_COUNT != 0:
- * count all the objects, require that we have the whole data file
- * otherwise:
- * stop if we find an object or an array
- */
-#ifndef JSON_COUNT
-#define JSON_COUNT 0
-#endif
-
-static int json_parse(const unsigned char **, const unsigned char *, size_t *,
- size_t);
-
-static int
-json_isspace(const unsigned char uc)
-{
- switch (uc) {
- case ' ':
- case '\n':
- case '\r':
- case '\t':
- return 1;
- default:
- return 0;
- }
-}
-
-static int
-json_isdigit(unsigned char uc)
-{
- switch (uc) {
- case '0': case '1': case '2': case '3': case '4':
- case '5': case '6': case '7': case '8': case '9':
- return 1;
- default:
- return 0;
- }
-}
-
-static int
-json_isxdigit(unsigned char uc)
-{
- if (json_isdigit(uc))
- return 1;
- switch (uc) {
- case 'a': case 'b': case 'c': case 'd': case 'e': case 'f':
- case 'A': case 'B': case 'C': case 'D': case 'E': case 'F':
- return 1;
- default:
- return 0;
- }
-}
-
-static const unsigned char *
-json_skip_space(const unsigned char *uc, const unsigned char *ue)
-{
- while (uc < ue && json_isspace(*uc))
- uc++;
- return uc;
-}
-
-/*ARGSUSED*/
-static int
-json_parse_string(const unsigned char **ucp, const unsigned char *ue,
- size_t lvl __file_debugused)
-{
- const unsigned char *uc = *ucp;
- size_t i;
-
- DPRINTF("Parse string: ", uc, *ucp);
- while (uc < ue) {
- switch (*uc++) {
- case '\0':
- goto out;
- case '\\':
- if (uc == ue)
- goto out;
- switch (*uc++) {
- case '\0':
- goto out;
- case '"':
- case '\\':
- case '/':
- case 'b':
- case 'f':
- case 'n':
- case 'r':
- case 't':
- continue;
- case 'u':
- if (ue - uc < 4) {
- uc = ue;
- goto out;
- }
- for (i = 0; i < 4; i++)
- if (!json_isxdigit(*uc++))
- goto out;
- continue;
- default:
- goto out;
- }
- case '"':
- DPRINTF("Good string: ", uc, *ucp);
- *ucp = uc;
- return 1;
- default:
- continue;
- }
- }
-out:
- DPRINTF("Bad string: ", uc, *ucp);
- *ucp = uc;
- return 0;
-}
-
-static int
-json_parse_array(const unsigned char **ucp, const unsigned char *ue,
- size_t *st, size_t lvl)
-{
- const unsigned char *uc = *ucp;
-
- DPRINTF("Parse array: ", uc, *ucp);
- while (uc < ue) {
- uc = json_skip_space(uc, ue);
- if (uc == ue)
- goto out;
- if (*uc == ']')
- goto done;
- if (!json_parse(&uc, ue, st, lvl + 1))
- goto out;
- if (uc == ue)
- goto out;
- switch (*uc) {
- case ',':
- uc++;
- continue;
- case ']':
- done:
- st[JSON_ARRAYN]++;
- DPRINTF("Good array: ", uc, *ucp);
- *ucp = uc + 1;
- return 1;
- default:
- goto out;
- }
- }
-out:
- DPRINTF("Bad array: ", uc, *ucp);
- *ucp = uc;
- return 0;
-}
-
-static int
-json_parse_object(const unsigned char **ucp, const unsigned char *ue,
- size_t *st, size_t lvl)
-{
- const unsigned char *uc = *ucp;
- DPRINTF("Parse object: ", uc, *ucp);
- while (uc < ue) {
- uc = json_skip_space(uc, ue);
- if (uc == ue)
- goto out;
- if (*uc == '}') {
- uc++;
- goto done;
- }
- if (*uc++ != '"') {
- DPRINTF("not string", uc, *ucp);
- goto out;
- }
- DPRINTF("next field", uc, *ucp);
- if (!json_parse_string(&uc, ue, lvl)) {
- DPRINTF("not string", uc, *ucp);
- goto out;
- }
- uc = json_skip_space(uc, ue);
- if (uc == ue)
- goto out;
- if (*uc++ != ':') {
- DPRINTF("not colon", uc, *ucp);
- goto out;
- }
- if (!json_parse(&uc, ue, st, lvl + 1)) {
- DPRINTF("not json", uc, *ucp);
- goto out;
- }
- if (uc == ue)
- goto out;
- switch (*uc++) {
- case ',':
- continue;
- case '}': /* { */
- done:
- DPRINTF("Good object: ", uc, *ucp);
- *ucp = uc;
- return 1;
- default:
- DPRINTF("not more", uc, *ucp);
- *ucp = uc - 1;
- goto out;
- }
- }
-out:
- DPRINTF("Bad object: ", uc, *ucp);
- *ucp = uc;
- return 0;
-}
-
-/*ARGSUSED*/
-static int
-json_parse_number(const unsigned char **ucp, const unsigned char *ue,
- size_t lvl __file_debugused)
-{
- const unsigned char *uc = *ucp;
- int got = 0;
-
- DPRINTF("Parse number: ", uc, *ucp);
- if (uc == ue)
- return 0;
- if (*uc == '-')
- uc++;
-
- for (; uc < ue; uc++) {
- if (!json_isdigit(*uc))
- break;
- got = 1;
- }
- if (uc == ue)
- goto out;
- if (*uc == '.')
- uc++;
- for (; uc < ue; uc++) {
- if (!json_isdigit(*uc))
- break;
- got = 1;
- }
- if (uc == ue)
- goto out;
- if (got && (*uc == 'e' || *uc == 'E')) {
- uc++;
- got = 0;
- if (uc == ue)
- goto out;
- if (*uc == '+' || *uc == '-')
- uc++;
- for (; uc < ue; uc++) {
- if (!json_isdigit(*uc))
- break;
- got = 1;
- }
- }
-out:
- if (!got)
- DPRINTF("Bad number: ", uc, *ucp);
- else
- DPRINTF("Good number: ", uc, *ucp);
- *ucp = uc;
- return got;
-}
-
-/*ARGSUSED*/
-static int
-json_parse_const(const unsigned char **ucp, const unsigned char *ue,
- const char *str, size_t len, size_t lvl __file_debugused)
-{
- const unsigned char *uc = *ucp;
-
- DPRINTF("Parse const: ", uc, *ucp);
- *ucp += --len - 1;
- if (*ucp > ue)
- *ucp = ue;
- for (; uc < ue && --len;) {
- if (*uc++ != *++str) {
- DPRINTF("Bad const: ", uc, *ucp);
- return 0;
- }
- }
- DPRINTF("Good const: ", uc, *ucp);
- return 1;
-}
-
-static int
-json_parse(const unsigned char **ucp, const unsigned char *ue,
- size_t *st, size_t lvl)
-{
- const unsigned char *uc, *ouc;
- int rv = 0;
- int t;
-
- ouc = uc = json_skip_space(*ucp, ue);
- if (uc == ue)
- goto out;
-
- // Avoid recursion
- if (lvl > 500) {
- DPRINTF("Too many levels", uc, *ucp);
- return 0;
- }
-#if JSON_COUNT
- /* bail quickly if not counting */
- if (lvl > 1 && (st[JSON_OBJECT] || st[JSON_ARRAYN]))
- return 1;
-#endif
-
- DPRINTF("Parse general: ", uc, *ucp);
- switch (*uc++) {
- case '"':
- rv = json_parse_string(&uc, ue, lvl + 1);
- t = JSON_STRING;
- break;
- case '[':
- rv = json_parse_array(&uc, ue, st, lvl + 1);
- t = JSON_ARRAY;
- break;
- case '{': /* '}' */
- rv = json_parse_object(&uc, ue, st, lvl + 1);
- t = JSON_OBJECT;
- break;
- case 't':
- rv = json_parse_const(&uc, ue, "true", sizeof("true"), lvl + 1);
- t = JSON_CONSTANT;
- break;
- case 'f':
- rv = json_parse_const(&uc, ue, "false", sizeof("false"),
- lvl + 1);
- t = JSON_CONSTANT;
- break;
- case 'n':
- rv = json_parse_const(&uc, ue, "null", sizeof("null"), lvl + 1);
- t = JSON_CONSTANT;
- break;
- default:
- --uc;
- rv = json_parse_number(&uc, ue, lvl + 1);
- t = JSON_NUMBER;
- break;
- }
- if (rv)
- st[t]++;
- uc = json_skip_space(uc, ue);
-out:
- DPRINTF("End general: ", uc, *ucp);
- *ucp = uc;
- if (lvl == 0) {
- if (!rv)
- return 0;
- if (uc == ue)
- return (st[JSON_ARRAYN] || st[JSON_OBJECT]) ? 1 : 0;
- if (*ouc == *uc && json_parse(&uc, ue, st, 1))
- return (st[JSON_ARRAYN] || st[JSON_OBJECT]) ? 2 : 0;
- else
- return 0;
- }
- return rv;
-}
-
-#ifndef TEST
-int
-file_is_json(struct magic_set *ms, const struct buffer *b)
-{
- const unsigned char *uc = CAST(const unsigned char *, b->fbuf);
- const unsigned char *ue = uc + b->flen;
- size_t st[JSON_MAX];
- int mime = ms->flags & MAGIC_MIME;
- int jt;
-
-
- if ((ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION)) != 0)
- return 0;
-
- memset(st, 0, sizeof(st));
-
- if ((jt = json_parse(&uc, ue, st, 0)) == 0)
- return 0;
-
- if (mime == MAGIC_MIME_ENCODING)
- return 1;
- if (mime) {
- if (file_printf(ms, "application/%s",
- jt == 1 ? "json" : "x-ndjson") == -1)
- return -1;
- return 1;
- }
- if (file_printf(ms, "%sJSON text data",
- jt == 1 ? "" : "New Line Delimited ") == -1)
- return -1;
-#if JSON_COUNT
-#define P(n) st[n], st[n] > 1 ? "s" : ""
- if (file_printf(ms, " (%" SIZE_T_FORMAT "u object%s, %" SIZE_T_FORMAT
- "u array%s, %" SIZE_T_FORMAT "u string%s, %" SIZE_T_FORMAT
- "u constant%s, %" SIZE_T_FORMAT "u number%s, %" SIZE_T_FORMAT
- "u >1array%s)",
- P(JSON_OBJECT), P(JSON_ARRAY), P(JSON_STRING), P(JSON_CONSTANT),
- P(JSON_NUMBER), P(JSON_ARRAYN))
- == -1)
- return -1;
-#endif
- return 1;
-}
-
-#else
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <err.h>
-
-int
-main(int argc, char *argv[])
-{
- int fd;
- struct stat st;
- unsigned char *p;
- size_t stats[JSON_MAX];
-
- if ((fd = open(argv[1], O_RDONLY)) == -1)
- err(EXIT_FAILURE, "Can't open `%s'", argv[1]);
-
- if (fstat(fd, &st) == -1)
- err(EXIT_FAILURE, "Can't stat `%s'", argv[1]);
-
- if ((p = CAST(char *, malloc(st.st_size))) == NULL)
- err(EXIT_FAILURE, "Can't allocate %jd bytes",
- (intmax_t)st.st_size);
- if (read(fd, p, st.st_size) != st.st_size)
- err(EXIT_FAILURE, "Can't read %jd bytes",
- (intmax_t)st.st_size);
- memset(stats, 0, sizeof(stats));
- printf("is json %d\n", json_parse((const unsigned char **)&p,
- p + st.st_size, stats, 0));
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/is_simh.c b/contrib/libs/libmagic/src/is_simh.c
deleted file mode 100644
index 4e78173fc4..0000000000
--- a/contrib/libs/libmagic/src/is_simh.c
+++ /dev/null
@@ -1,209 +0,0 @@
-/*-
- * Copyright (c) 2023 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-
-/*
- * Parse SIM-H tape files
- * http://simh.trailing-edge.com/docs/simh_magtape.pdf
- */
-
-#ifndef TEST
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: is_simh.c,v 1.10 2023/07/27 19:39:55 christos Exp $")
-#endif
-
-#include <string.h>
-#include <stddef.h>
-#include "magic.h"
-#else
-#include <stdint.h>
-#include <sys/types.h>
-#include <string.h>
-#include <stddef.h>
-#define CAST(a, b) (a)(b)
-#endif
-
-
-#ifdef DEBUG
-#include <stdio.h>
-#define DPRINTF(fmt, ...) printf(fmt, __VA_ARGS__)
-#else
-#define DPRINTF(fmt, ...)
-#endif
-
-/*
- * if SIMH_TAPEMARKS == 0:
- * check all the records and tapemarks
- * otherwise:
- * check only up-to the number of tapemarks specified
- */
-#ifndef SIMH_TAPEMARKS
-#define SIMH_TAPEMARKS 10
-#endif
-
-typedef union {
- char s[4];
- uint32_t u;
-} myword;
-
-static myword simh_bo;
-
-#define NEED_SWAP (simh_bo.u == CAST(uint32_t, 0x01020304))
-
-/*
- * swap an int
- */
-static uint32_t
-swap4(uint32_t sv)
-{
- myword d, s;
- s.u = sv;
- d.s[0] = s.s[3];
- d.s[1] = s.s[2];
- d.s[2] = s.s[1];
- d.s[3] = s.s[0];
- return d.u;
-}
-
-
-static uint32_t
-getlen(const unsigned char **uc)
-{
- uint32_t n;
- memcpy(&n, *uc, sizeof(n));
- *uc += sizeof(n);
- if (NEED_SWAP)
- n = swap4(n);
- if (n == 0xffffffff) /* check for End of Medium */
- return n;
- n &= 0x00ffffff; /* keep only the record len */
- if (n & 1)
- n++;
- return n;
-}
-
-static int
-simh_parse(const unsigned char *uc, const unsigned char *ue)
-{
- uint32_t nbytes, cbytes;
- const unsigned char *orig_uc = uc;
- size_t nt = 0, nr = 0;
-
- (void)memcpy(simh_bo.s, "\01\02\03\04", 4);
-
- while (ue - uc >= CAST(ptrdiff_t, sizeof(nbytes))) {
- nbytes = getlen(&uc);
- if ((nt > 0 || nr > 0) && nbytes == 0xFFFFFFFF)
- /* EOM after at least one record or tapemark */
- break;
- if (nbytes == 0) {
- nt++; /* count tapemarks */
-#if SIMH_TAPEMARKS
- if (nt == SIMH_TAPEMARKS)
- break;
-#endif
- continue;
- }
- /* handle a data record */
- uc += nbytes;
- if (ue - uc < CAST(ptrdiff_t, sizeof(nbytes)))
- break;
- cbytes = getlen(&uc);
- if (nbytes != cbytes)
- return 0;
- nr++;
- }
- if (nt * sizeof(uint32_t) == CAST(size_t, uc - orig_uc))
- return 0; /* All examined data was tapemarks (0) */
- if (nr == 0) /* No records */
- return 0;
- return 1;
-}
-
-#ifndef TEST
-int
-file_is_simh(struct magic_set *ms, const struct buffer *b)
-{
- const unsigned char *uc = CAST(const unsigned char *, b->fbuf);
- const unsigned char *ue = uc + b->flen;
- int mime = ms->flags & MAGIC_MIME;
-
- if ((ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION)) != 0)
- return 0;
-
- if (!simh_parse(uc, ue))
- return 0;
-
- if (mime == MAGIC_MIME_ENCODING)
- return 1;
-
- if (mime) {
- if (file_printf(ms, "application/SIMH-tape-data") == -1)
- return -1;
- return 1;
- }
-
- if (file_printf(ms, "SIMH tape data") == -1)
- return -1;
-
- return 1;
-}
-
-#else
-
-#include <sys/types.h>
-#include <sys/stat.h>
-#include <stdio.h>
-#include <fcntl.h>
-#include <unistd.h>
-#include <stdlib.h>
-#include <stdint.h>
-#include <err.h>
-
-int
-main(int argc, char *argv[])
-{
- int fd;
- struct stat st;
- unsigned char *p;
-
- if ((fd = open(argv[1], O_RDONLY)) == -1)
- err(EXIT_FAILURE, "Can't open `%s'", argv[1]);
-
- if (fstat(fd, &st) == -1)
- err(EXIT_FAILURE, "Can't stat `%s'", argv[1]);
-
- if ((p = CAST(char *, malloc(st.st_size))) == NULL)
- err(EXIT_FAILURE, "Can't allocate %jd bytes",
- (intmax_t)st.st_size);
- if (read(fd, p, st.st_size) != st.st_size)
- err(EXIT_FAILURE, "Can't read %jd bytes",
- (intmax_t)st.st_size);
- printf("is simh %d\n", simh_parse(p, p + st.st_size));
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/is_tar.c b/contrib/libs/libmagic/src/is_tar.c
deleted file mode 100644
index fa83e1e241..0000000000
--- a/contrib/libs/libmagic/src/is_tar.c
+++ /dev/null
@@ -1,179 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * is_tar() -- figure out whether file is a tar archive.
- *
- * Stolen (by the author!) from the file_public domain tar program:
- * Public Domain version written 26 Aug 1985 John Gilmore (ihnp4!hoptoad!gnu).
- *
- * @(#)list.c 1.18 9/23/86 Public Domain - gnu
- *
- * Comments changed and some code/comments reformatted
- * for file command by Ian Darwin.
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: is_tar.c,v 1.50 2022/12/26 17:31:14 christos Exp $")
-#endif
-
-#include "magic.h"
-#include <string.h>
-#include <ctype.h>
-#include "tar.h"
-
-#define isodigit(c) ( ((c) >= '0') && ((c) <= '7') )
-
-file_private int is_tar(const unsigned char *, size_t);
-file_private int from_oct(const char *, size_t); /* Decode octal number */
-
-static const char tartype[][32] = { /* should be equal to messages */
- "tar archive", /* found in ../magic/Magdir/archive */
- "POSIX tar archive",
- "POSIX tar archive (GNU)", /* */
-};
-
-file_protected int
-file_is_tar(struct magic_set *ms, const struct buffer *b)
-{
- const unsigned char *buf = CAST(const unsigned char *, b->fbuf);
- size_t nbytes = b->flen;
- /*
- * Do the tar test first, because if the first file in the tar
- * archive starts with a dot, we can confuse it with an nroff file.
- */
- int tar;
- int mime = ms->flags & MAGIC_MIME;
-
- if ((ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION)) != 0)
- return 0;
-
- tar = is_tar(buf, nbytes);
- if (tar < 1 || tar > 3)
- return 0;
-
- if (mime == MAGIC_MIME_ENCODING)
- return 1;
-
- if (file_printf(ms, "%s", mime ? "application/x-tar" :
- tartype[tar - 1]) == -1)
- return -1;
-
- return 1;
-}
-
-/*
- * Return
- * 0 if the checksum is bad (i.e., probably not a tar archive),
- * 1 for old UNIX tar file,
- * 2 for Unix Std (POSIX) tar file,
- * 3 for GNU tar file.
- */
-file_private int
-is_tar(const unsigned char *buf, size_t nbytes)
-{
- static const char gpkg_match[] = "/gpkg-1";
-
- const union record *header = RCAST(const union record *,
- RCAST(const void *, buf));
- size_t i;
- int sum, recsum;
- const unsigned char *p, *ep;
- const char *nulp;
-
- if (nbytes < sizeof(*header))
- return 0;
-
- /* If the file looks like Gentoo GLEP 78 binary package (GPKG),
- * don't waste time on further checks and fall back to magic rules.
- */
- nulp = CAST(const char *,
- memchr(header->header.name, 0, sizeof(header->header.name)));
- if (nulp != NULL && nulp >= header->header.name + sizeof(gpkg_match) &&
- memcmp(nulp - sizeof(gpkg_match) + 1, gpkg_match,
- sizeof(gpkg_match)) == 0)
- return 0;
-
- recsum = from_oct(header->header.chksum, sizeof(header->header.chksum));
-
- sum = 0;
- p = header->charptr;
- ep = header->charptr + sizeof(*header);
- while (p < ep)
- sum += *p++;
-
- /* Adjust checksum to count the "chksum" field as blanks. */
- for (i = 0; i < sizeof(header->header.chksum); i++)
- sum -= header->header.chksum[i];
- sum += ' ' * sizeof(header->header.chksum);
-
- if (sum != recsum)
- return 0; /* Not a tar archive */
-
- if (strncmp(header->header.magic, GNUTMAGIC,
- sizeof(header->header.magic)) == 0)
- return 3; /* GNU Unix Standard tar archive */
-
- if (strncmp(header->header.magic, TMAGIC,
- sizeof(header->header.magic)) == 0)
- return 2; /* Unix Standard tar archive */
-
- return 1; /* Old fashioned tar archive */
-}
-
-
-/*
- * Quick and dirty octal conversion.
- *
- * Result is -1 if the field is invalid (all blank, or non-octal).
- */
-file_private int
-from_oct(const char *where, size_t digs)
-{
- int value;
-
- if (digs == 0)
- return -1;
-
- while (isspace(CAST(unsigned char, *where))) { /* Skip spaces */
- where++;
- if (digs-- == 0)
- return -1; /* All blank field */
- }
- value = 0;
- while (digs > 0 && isodigit(*where)) { /* Scan til non-octal */
- value = (value << 3) | (*where++ - '0');
- digs--;
- }
-
- if (digs > 0 && *where && !isspace(CAST(unsigned char, *where)))
- return -1; /* Ended on non-(space/NUL) */
-
- return value;
-}
diff --git a/contrib/libs/libmagic/src/magic.c b/contrib/libs/libmagic/src/magic.c
deleted file mode 100644
index e8fbd594db..0000000000
--- a/contrib/libs/libmagic/src/magic.c
+++ /dev/null
@@ -1,686 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2003.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-
-#ifdef WIN32
-#include <windows.h>
-#include <shlwapi.h>
-#endif
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: magic.c,v 1.121 2023/02/09 17:45:19 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#ifdef QUICK
-#include <sys/mman.h>
-#endif
-#include <limits.h> /* for PIPE_BUF */
-
-#if defined(HAVE_UTIMES)
-# include <sys/time.h>
-#elif defined(HAVE_UTIME)
-# if defined(HAVE_SYS_UTIME_H)
-# include <sys/utime.h>
-# elif defined(HAVE_UTIME_H)
-# include <utime.h>
-# endif
-#endif
-
-#ifdef HAVE_UNISTD_H
-#include <unistd.h> /* for read() */
-#endif
-
-#ifndef PIPE_BUF
-/* Get the PIPE_BUF from pathconf */
-#ifdef _PC_PIPE_BUF
-#define PIPE_BUF pathconf(".", _PC_PIPE_BUF)
-#else
-#define PIPE_BUF 512
-#endif
-#endif
-
-file_private void close_and_restore(const struct magic_set *, const char *, int,
- const struct stat *);
-file_private int unreadable_info(struct magic_set *, mode_t, const char *);
-file_private const char* get_default_magic(void);
-#ifndef COMPILE_ONLY
-file_private const char *file_or_fd(struct magic_set *, const char *, int);
-#endif
-
-#ifndef STDIN_FILENO
-#define STDIN_FILENO 0
-#endif
-
-#ifdef WIN32
-/* HINSTANCE of this shared library. Needed for get_default_magic() */
-static HINSTANCE _w32_dll_instance = NULL;
-
-static void
-_w32_append_path(char **hmagicpath, const char *fmt, ...)
-{
- char *tmppath;
- char *newpath;
- va_list ap;
-
- va_start(ap, fmt);
- if (vasprintf(&tmppath, fmt, ap) < 0) {
- va_end(ap);
- return;
- }
- va_end(ap);
-
- if (access(tmppath, R_OK) == -1)
- goto out;
-
- if (*hmagicpath == NULL) {
- *hmagicpath = tmppath;
- return;
- }
-
- if (asprintf(&newpath, "%s%c%s", *hmagicpath, PATHSEP, tmppath) < 0)
- goto out;
-
- free(*hmagicpath);
- free(tmppath);
- *hmagicpath = newpath;
- return;
-out:
- free(tmppath);
-}
-
-static void
-_w32_get_magic_relative_to(char **hmagicpath, HINSTANCE module)
-{
- static const char *trypaths[] = {
- "%s/share/misc/magic.mgc",
- "%s/magic.mgc",
- };
- LPSTR dllpath;
- size_t sp;
-
- dllpath = calloc(MAX_PATH + 1, sizeof(*dllpath));
-
- if (!GetModuleFileNameA(module, dllpath, MAX_PATH))
- goto out;
-
- PathRemoveFileSpecA(dllpath);
-
- if (module) {
- char exepath[MAX_PATH];
- GetModuleFileNameA(NULL, exepath, MAX_PATH);
- PathRemoveFileSpecA(exepath);
- if (stricmp(exepath, dllpath) == 0)
- goto out;
- }
-
- sp = strlen(dllpath);
- if (sp > 3 && stricmp(&dllpath[sp - 3], "bin") == 0) {
- _w32_append_path(hmagicpath,
- "%s/../share/misc/magic.mgc", dllpath);
- goto out;
- }
-
- for (sp = 0; sp < __arraycount(trypaths); sp++)
- _w32_append_path(hmagicpath, trypaths[sp], dllpath);
-out:
- free(dllpath);
-}
-
-#ifndef BUILD_AS_WINDOWS_STATIC_LIBARAY
-/* Placate GCC by offering a sacrificial previous prototype */
-BOOL WINAPI DllMain(HINSTANCE, DWORD, LPVOID);
-
-BOOL WINAPI
-DllMain(HINSTANCE hinstDLL, DWORD fdwReason,
- LPVOID lpvReserved __attribute__((__unused__)))
-{
- if (fdwReason == DLL_PROCESS_ATTACH)
- _w32_dll_instance = hinstDLL;
- return 1;
-}
-#endif
-#endif
-
-file_private const char *
-get_default_magic(void)
-{
- static const char hmagic[] = "/.magic/magic.mgc";
- static char *default_magic;
- char *home, *hmagicpath;
-
-#ifndef WIN32
- struct stat st;
-
- if (default_magic) {
- free(default_magic);
- default_magic = NULL;
- }
- if ((home = getenv("HOME")) == NULL)
- return MAGIC;
-
- if (asprintf(&hmagicpath, "%s/.magic.mgc", home) < 0)
- return MAGIC;
- if (stat(hmagicpath, &st) == -1) {
- free(hmagicpath);
- if (asprintf(&hmagicpath, "%s/.magic", home) < 0)
- return MAGIC;
- if (stat(hmagicpath, &st) == -1)
- goto out;
- if (S_ISDIR(st.st_mode)) {
- free(hmagicpath);
- if (asprintf(&hmagicpath, "%s/%s", home, hmagic) < 0)
- return MAGIC;
- if (access(hmagicpath, R_OK) == -1)
- goto out;
- }
- }
-
- if (asprintf(&default_magic, "%s:%s", hmagicpath, MAGIC) < 0)
- goto out;
- free(hmagicpath);
- return default_magic;
-out:
- default_magic = NULL;
- free(hmagicpath);
- return MAGIC;
-#else
- hmagicpath = NULL;
-
- if (default_magic) {
- free(default_magic);
- default_magic = NULL;
- }
-
- /* Before anything else, try to get a magic file from user HOME */
- if ((home = getenv("HOME")) != NULL)
- _w32_append_path(&hmagicpath, "%s%s", home, hmagic);
-
- /* First, try to get a magic file from user-application data */
- if ((home = getenv("LOCALAPPDATA")) != NULL)
- _w32_append_path(&hmagicpath, "%s%s", home, hmagic);
-
- /* Second, try to get a magic file from the user profile data */
- if ((home = getenv("USERPROFILE")) != NULL)
- _w32_append_path(&hmagicpath,
- "%s/Local Settings/Application Data%s", home, hmagic);
-
- /* Third, try to get a magic file from Common Files */
- if ((home = getenv("COMMONPROGRAMFILES")) != NULL)
- _w32_append_path(&hmagicpath, "%s%s", home, hmagic);
-
- /* Fourth, try to get magic file relative to exe location */
- _w32_get_magic_relative_to(&hmagicpath, NULL);
-
- /* Fifth, try to get magic file relative to dll location */
- _w32_get_magic_relative_to(&hmagicpath, _w32_dll_instance);
-
- /* Avoid MAGIC constant - it likely points to a file within MSys tree */
- default_magic = hmagicpath;
- return default_magic;
-#endif
-}
-
-file_public const char *
-magic_getpath(const char *magicfile, int action)
-{
- if (magicfile != NULL)
- return magicfile;
-
- magicfile = getenv("MAGIC");
- if (magicfile != NULL)
- return magicfile;
-
- return MAGIC;
-}
-
-file_public struct magic_set *
-magic_open(int flags)
-{
- return file_ms_alloc(flags);
-}
-
-file_private int
-unreadable_info(struct magic_set *ms, mode_t md, const char *file)
-{
- if (file) {
- /* We cannot open it, but we were able to stat it. */
- if (access(file, W_OK) == 0)
- if (file_printf(ms, "writable, ") == -1)
- return -1;
-#ifndef WIN32
- if (access(file, X_OK) == 0)
- if (file_printf(ms, "executable, ") == -1)
- return -1;
-#else
- /* X_OK doesn't work well on MS-Windows */
- {
- const char *p = strrchr(file, '.');
- if (p && (stricmp(p, ".exe")
- || stricmp(p, ".dll")
- || stricmp(p, ".bat")
- || stricmp(p, ".cmd")))
- if (file_printf(ms, "writable, ") == -1)
- return -1;
- }
-#endif
- }
- if (S_ISREG(md))
- if (file_printf(ms, "regular file, ") == -1)
- return -1;
- if (file_printf(ms, "no read permission") == -1)
- return -1;
- return 0;
-}
-
-file_public void
-magic_close(struct magic_set *ms)
-{
- if (ms == NULL)
- return;
- file_ms_free(ms);
-}
-
-/*
- * load a magic file
- */
-file_public int
-magic_load(struct magic_set *ms, const char *magicfile)
-{
- if (ms == NULL)
- return -1;
- return file_apprentice(ms, magicfile, FILE_LOAD);
-}
-
-#ifndef COMPILE_ONLY
-/*
- * Install a set of compiled magic buffers.
- */
-file_public int
-magic_load_buffers(struct magic_set *ms, void **bufs, size_t *sizes,
- size_t nbufs)
-{
- if (ms == NULL)
- return -1;
- return buffer_apprentice(ms, RCAST(struct magic **, bufs),
- sizes, nbufs);
-}
-#endif
-
-file_public int
-magic_compile(struct magic_set *ms, const char *magicfile)
-{
- if (ms == NULL)
- return -1;
- return file_apprentice(ms, magicfile, FILE_COMPILE);
-}
-
-file_public int
-magic_check(struct magic_set *ms, const char *magicfile)
-{
- if (ms == NULL)
- return -1;
- return file_apprentice(ms, magicfile, FILE_CHECK);
-}
-
-file_public int
-magic_list(struct magic_set *ms, const char *magicfile)
-{
- if (ms == NULL)
- return -1;
- return file_apprentice(ms, magicfile, FILE_LIST);
-}
-
-file_private void
-close_and_restore(const struct magic_set *ms, const char *name, int fd,
- const struct stat *sb)
-{
- if (fd == STDIN_FILENO || name == NULL)
- return;
- (void) close(fd);
-
- if ((ms->flags & MAGIC_PRESERVE_ATIME) != 0) {
- /*
- * Try to restore access, modification times if read it.
- * This is really *bad* because it will modify the status
- * time of the file... And of course this will affect
- * backup programs
- */
-#ifdef HAVE_UTIMES
- struct timeval utsbuf[2];
- (void)memset(utsbuf, 0, sizeof(utsbuf));
- utsbuf[0].tv_sec = sb->st_atime;
- utsbuf[1].tv_sec = sb->st_mtime;
-
- (void) utimes(name, utsbuf); /* don't care if loses */
-#elif defined(HAVE_UTIME_H) || defined(HAVE_SYS_UTIME_H)
- struct utimbuf utbuf;
-
- (void)memset(&utbuf, 0, sizeof(utbuf));
- utbuf.actime = sb->st_atime;
- utbuf.modtime = sb->st_mtime;
- (void) utime(name, &utbuf); /* don't care if loses */
-#endif
- }
-}
-
-#ifndef COMPILE_ONLY
-
-/*
- * find type of descriptor
- */
-file_public const char *
-magic_descriptor(struct magic_set *ms, int fd)
-{
- if (ms == NULL)
- return NULL;
- return file_or_fd(ms, NULL, fd);
-}
-
-/*
- * find type of named file
- */
-file_public const char *
-magic_file(struct magic_set *ms, const char *inname)
-{
- if (ms == NULL)
- return NULL;
- return file_or_fd(ms, inname, STDIN_FILENO);
-}
-
-file_private const char *
-file_or_fd(struct magic_set *ms, const char *inname, int fd)
-{
- int rv = -1;
- unsigned char *buf;
- struct stat sb;
- ssize_t nbytes = 0; /* number of bytes read from a datafile */
- int ispipe = 0;
- int okstat = 0;
- off_t pos = CAST(off_t, -1);
-
- if (file_reset(ms, 1) == -1)
- goto out;
-
- /*
- * one extra for terminating '\0', and
- * some overlapping space for matches near EOF
- */
-#define SLOP (1 + sizeof(union VALUETYPE))
- if ((buf = CAST(unsigned char *, malloc(ms->bytes_max + SLOP))) == NULL)
- return NULL;
-
- switch (file_fsmagic(ms, inname, &sb)) {
- case -1: /* error */
- goto done;
- case 0: /* nothing found */
- break;
- default: /* matched it and printed type */
- rv = 0;
- goto done;
- }
-
-#ifdef WIN32
- /* Place stdin in binary mode, so EOF (Ctrl+Z) doesn't stop early. */
- if (fd == STDIN_FILENO)
- _setmode(STDIN_FILENO, O_BINARY);
-#endif
- if (inname != NULL) {
- int flags = O_RDONLY|O_BINARY|O_NONBLOCK|O_CLOEXEC;
- errno = 0;
- if ((fd = open(inname, flags)) < 0) {
- okstat = stat(inname, &sb) == 0;
-#ifdef WIN32
- /*
- * Can't stat, can't open. It may have been opened in
- * fsmagic, so if the user doesn't have read permission,
- * allow it to say so; otherwise an error was probably
- * displayed in fsmagic.
- */
- if (!okstat && errno == EACCES) {
- sb.st_mode = S_IFBLK;
- okstat = 1;
- }
-#endif
- if (okstat &&
- unreadable_info(ms, sb.st_mode, inname) == -1)
- goto done;
- rv = 0;
- goto done;
- }
-#if O_CLOEXEC == 0 && defined(F_SETFD)
- (void)fcntl(fd, F_SETFD, FD_CLOEXEC);
-#endif
- }
-
- if (fd != -1) {
- okstat = fstat(fd, &sb) == 0;
- if (okstat && S_ISFIFO(sb.st_mode))
- ispipe = 1;
- if (inname == NULL)
- pos = lseek(fd, CAST(off_t, 0), SEEK_CUR);
- }
-
- /*
- * try looking at the first ms->bytes_max bytes
- */
- if (ispipe) {
- if (fd != -1) {
- ssize_t r = 0;
-
- while ((r = sread(fd, RCAST(void *, &buf[nbytes]),
- CAST(size_t, ms->bytes_max - nbytes), 1)) > 0) {
- nbytes += r;
- if (r < PIPE_BUF) break;
- }
- }
-
- if (nbytes == 0 && inname) {
- /* We can not read it, but we were able to stat it. */
- if (unreadable_info(ms, sb.st_mode, inname) == -1)
- goto done;
- rv = 0;
- goto done;
- }
-
- } else if (fd != -1) {
- /* Windows refuses to read from a big console buffer. */
- size_t howmany =
-#ifdef WIN32
- _isatty(fd) ? 8 * 1024 :
-#endif
- ms->bytes_max;
- if ((nbytes = read(fd, RCAST(void *, buf), howmany)) == -1) {
- if (inname == NULL && fd != STDIN_FILENO)
- file_error(ms, errno, "cannot read fd %d", fd);
- else
- file_error(ms, errno, "cannot read `%s'",
- inname == NULL ? "/dev/stdin" : inname);
- goto done;
- }
- }
-
- (void)memset(buf + nbytes, 0, SLOP); /* NUL terminate */
- if (file_buffer(ms, fd, okstat ? &sb : NULL, inname, buf, CAST(size_t, nbytes)) == -1)
- goto done;
- rv = 0;
-done:
- free(buf);
- if (fd != -1) {
- if (pos != CAST(off_t, -1))
- (void)lseek(fd, pos, SEEK_SET);
- close_and_restore(ms, inname, fd, &sb);
- }
-out:
- return rv == 0 ? file_getbuffer(ms) : NULL;
-}
-
-
-file_public const char *
-magic_buffer(struct magic_set *ms, const void *buf, size_t nb)
-{
- if (ms == NULL)
- return NULL;
- if (file_reset(ms, 1) == -1)
- return NULL;
- /*
- * The main work is done here!
- * We have the file name and/or the data buffer to be identified.
- */
- if (file_buffer(ms, -1, NULL, NULL, buf, nb) == -1) {
- return NULL;
- }
- return file_getbuffer(ms);
-}
-#endif
-
-file_public const char *
-magic_error(struct magic_set *ms)
-{
- if (ms == NULL)
- return "Magic database is not open";
- return (ms->event_flags & EVENT_HAD_ERR) ? ms->o.buf : NULL;
-}
-
-file_public int
-magic_errno(struct magic_set *ms)
-{
- if (ms == NULL)
- return EINVAL;
- return (ms->event_flags & EVENT_HAD_ERR) ? ms->error : 0;
-}
-
-file_public int
-magic_getflags(struct magic_set *ms)
-{
- if (ms == NULL)
- return -1;
-
- return ms->flags;
-}
-
-file_public int
-magic_setflags(struct magic_set *ms, int flags)
-{
- if (ms == NULL)
- return -1;
-#if !defined(HAVE_UTIME) && !defined(HAVE_UTIMES)
- if (flags & MAGIC_PRESERVE_ATIME)
- return -1;
-#endif
- ms->flags = flags;
- return 0;
-}
-
-file_public int
-magic_version(void)
-{
- return MAGIC_VERSION;
-}
-
-file_public int
-magic_setparam(struct magic_set *ms, int param, const void *val)
-{
- if (ms == NULL)
- return -1;
- switch (param) {
- case MAGIC_PARAM_INDIR_MAX:
- ms->indir_max = CAST(uint16_t, *CAST(const size_t *, val));
- return 0;
- case MAGIC_PARAM_NAME_MAX:
- ms->name_max = CAST(uint16_t, *CAST(const size_t *, val));
- return 0;
- case MAGIC_PARAM_ELF_PHNUM_MAX:
- ms->elf_phnum_max = CAST(uint16_t, *CAST(const size_t *, val));
- return 0;
- case MAGIC_PARAM_ELF_SHNUM_MAX:
- ms->elf_shnum_max = CAST(uint16_t, *CAST(const size_t *, val));
- return 0;
- case MAGIC_PARAM_ELF_SHSIZE_MAX:
- ms->elf_shsize_max = *CAST(const size_t *, val);
- return 0;
- case MAGIC_PARAM_ELF_NOTES_MAX:
- ms->elf_notes_max = CAST(uint16_t, *CAST(const size_t *, val));
- return 0;
- case MAGIC_PARAM_REGEX_MAX:
- ms->regex_max = CAST(uint16_t, *CAST(const size_t *, val));
- return 0;
- case MAGIC_PARAM_BYTES_MAX:
- ms->bytes_max = *CAST(const size_t *, val);
- return 0;
- case MAGIC_PARAM_ENCODING_MAX:
- ms->encoding_max = *CAST(const size_t *, val);
- return 0;
- default:
- errno = EINVAL;
- return -1;
- }
-}
-
-file_public int
-magic_getparam(struct magic_set *ms, int param, void *val)
-{
- if (ms == NULL)
- return -1;
- switch (param) {
- case MAGIC_PARAM_INDIR_MAX:
- *CAST(size_t *, val) = ms->indir_max;
- return 0;
- case MAGIC_PARAM_NAME_MAX:
- *CAST(size_t *, val) = ms->name_max;
- return 0;
- case MAGIC_PARAM_ELF_PHNUM_MAX:
- *CAST(size_t *, val) = ms->elf_phnum_max;
- return 0;
- case MAGIC_PARAM_ELF_SHNUM_MAX:
- *CAST(size_t *, val) = ms->elf_shnum_max;
- return 0;
- case MAGIC_PARAM_ELF_SHSIZE_MAX:
- *CAST(size_t *, val) = ms->elf_shsize_max;
- return 0;
- case MAGIC_PARAM_ELF_NOTES_MAX:
- *CAST(size_t *, val) = ms->elf_notes_max;
- return 0;
- case MAGIC_PARAM_REGEX_MAX:
- *CAST(size_t *, val) = ms->regex_max;
- return 0;
- case MAGIC_PARAM_BYTES_MAX:
- *CAST(size_t *, val) = ms->bytes_max;
- return 0;
- case MAGIC_PARAM_ENCODING_MAX:
- *CAST(size_t *, val) = ms->encoding_max;
- return 0;
- default:
- errno = EINVAL;
- return -1;
- }
-}
diff --git a/contrib/libs/libmagic/src/magic.h b/contrib/libs/libmagic/src/magic.h
deleted file mode 100644
index 29961f3936..0000000000
--- a/contrib/libs/libmagic/src/magic.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2003.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#ifndef _MAGIC_H
-#define _MAGIC_H
-
-#include <sys/types.h>
-
-#define MAGIC_NONE 0x0000000 /* No flags */
-#define MAGIC_DEBUG 0x0000001 /* Turn on debugging */
-#define MAGIC_SYMLINK 0x0000002 /* Follow symlinks */
-#define MAGIC_COMPRESS 0x0000004 /* Check inside compressed files */
-#define MAGIC_DEVICES 0x0000008 /* Look at the contents of devices */
-#define MAGIC_MIME_TYPE 0x0000010 /* Return the MIME type */
-#define MAGIC_CONTINUE 0x0000020 /* Return all matches */
-#define MAGIC_CHECK 0x0000040 /* Print warnings to stderr */
-#define MAGIC_PRESERVE_ATIME 0x0000080 /* Restore access time on exit */
-#define MAGIC_RAW 0x0000100 /* Don't convert unprintable chars */
-#define MAGIC_ERROR 0x0000200 /* Handle ENOENT etc as real errors */
-#define MAGIC_MIME_ENCODING 0x0000400 /* Return the MIME encoding */
-#define MAGIC_MIME (MAGIC_MIME_TYPE|MAGIC_MIME_ENCODING)
-#define MAGIC_APPLE 0x0000800 /* Return the Apple creator/type */
-#define MAGIC_EXTENSION 0x1000000 /* Return a /-separated list of
- * extensions */
-#define MAGIC_COMPRESS_TRANSP 0x2000000 /* Check inside compressed files
- * but not report compression */
-#define MAGIC_NO_COMPRESS_FORK 0x4000000 /* Don't allow decompression that
- * needs to fork */
-#define MAGIC_NODESC (MAGIC_EXTENSION|MAGIC_MIME|MAGIC_APPLE)
-
-#define MAGIC_NO_CHECK_COMPRESS 0x0001000 /* Don't check for compressed files */
-#define MAGIC_NO_CHECK_TAR 0x0002000 /* Don't check for tar files */
-#define MAGIC_NO_CHECK_SOFT 0x0004000 /* Don't check magic entries */
-#define MAGIC_NO_CHECK_APPTYPE 0x0008000 /* Don't check application type */
-#define MAGIC_NO_CHECK_ELF 0x0010000 /* Don't check for elf details */
-#define MAGIC_NO_CHECK_TEXT 0x0020000 /* Don't check for text files */
-#define MAGIC_NO_CHECK_CDF 0x0040000 /* Don't check for cdf files */
-#define MAGIC_NO_CHECK_CSV 0x0080000 /* Don't check for CSV files */
-#define MAGIC_NO_CHECK_TOKENS 0x0100000 /* Don't check tokens */
-#define MAGIC_NO_CHECK_ENCODING 0x0200000 /* Don't check text encodings */
-#define MAGIC_NO_CHECK_JSON 0x0400000 /* Don't check for JSON files */
-#define MAGIC_NO_CHECK_SIMH 0x0800000 /* Don't check for SIMH tape files */
-
-/* No built-in tests; only consult the magic file */
-#define MAGIC_NO_CHECK_BUILTIN ( \
- MAGIC_NO_CHECK_COMPRESS | \
- MAGIC_NO_CHECK_TAR | \
-/* MAGIC_NO_CHECK_SOFT | */ \
- MAGIC_NO_CHECK_APPTYPE | \
- MAGIC_NO_CHECK_ELF | \
- MAGIC_NO_CHECK_TEXT | \
- MAGIC_NO_CHECK_CSV | \
- MAGIC_NO_CHECK_CDF | \
- MAGIC_NO_CHECK_TOKENS | \
- MAGIC_NO_CHECK_ENCODING | \
- MAGIC_NO_CHECK_JSON | \
- MAGIC_NO_CHECK_SIMH | \
- 0 \
-)
-
-#define MAGIC_SNPRINTB "\177\020\
-b\0debug\0\
-b\1symlink\0\
-b\2compress\0\
-b\3devices\0\
-b\4mime_type\0\
-b\5continue\0\
-b\6check\0\
-b\7preserve_atime\0\
-b\10raw\0\
-b\11error\0\
-b\12mime_encoding\0\
-b\13apple\0\
-b\14no_check_compress\0\
-b\15no_check_tar\0\
-b\16no_check_soft\0\
-b\17no_check_sapptype\0\
-b\20no_check_elf\0\
-b\21no_check_text\0\
-b\22no_check_cdf\0\
-b\23no_check_csv\0\
-b\24no_check_tokens\0\
-b\25no_check_encoding\0\
-b\26no_check_json\0\
-b\27no_check_simh\0\
-b\30extension\0\
-b\31transp_compression\0\
-"
-
-/* Defined for backwards compatibility (renamed) */
-#define MAGIC_NO_CHECK_ASCII MAGIC_NO_CHECK_TEXT
-
-/* Defined for backwards compatibility; do nothing */
-#define MAGIC_NO_CHECK_FORTRAN 0x000000 /* Don't check ascii/fortran */
-#define MAGIC_NO_CHECK_TROFF 0x000000 /* Don't check ascii/troff */
-
-#define MAGIC_VERSION 545 /* This implementation */
-
-
-#ifdef __cplusplus
-extern "C" {
-#endif
-
-typedef struct magic_set *magic_t;
-magic_t magic_open(int);
-void magic_close(magic_t);
-
-const char *magic_getpath(const char *, int);
-const char *magic_file(magic_t, const char *);
-const char *magic_descriptor(magic_t, int);
-const char *magic_buffer(magic_t, const void *, size_t);
-
-const char *magic_error(magic_t);
-int magic_getflags(magic_t);
-int magic_setflags(magic_t, int);
-
-int magic_version(void);
-int magic_load(magic_t, const char *);
-int magic_load_buffers(magic_t, void **, size_t *, size_t);
-
-int magic_compile(magic_t, const char *);
-int magic_check(magic_t, const char *);
-int magic_list(magic_t, const char *);
-int magic_errno(magic_t);
-
-#define MAGIC_PARAM_INDIR_MAX 0
-#define MAGIC_PARAM_NAME_MAX 1
-#define MAGIC_PARAM_ELF_PHNUM_MAX 2
-#define MAGIC_PARAM_ELF_SHNUM_MAX 3
-#define MAGIC_PARAM_ELF_NOTES_MAX 4
-#define MAGIC_PARAM_REGEX_MAX 5
-#define MAGIC_PARAM_BYTES_MAX 6
-#define MAGIC_PARAM_ENCODING_MAX 7
-#define MAGIC_PARAM_ELF_SHSIZE_MAX 8
-
-int magic_setparam(magic_t, int, const void *);
-int magic_getparam(magic_t, int, void *);
-
-#ifdef __cplusplus
-};
-#endif
-
-#endif /* _MAGIC_H */
diff --git a/contrib/libs/libmagic/src/print.c b/contrib/libs/libmagic/src/print.c
deleted file mode 100644
index 9ab383aad0..0000000000
--- a/contrib/libs/libmagic/src/print.c
+++ /dev/null
@@ -1,368 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * print.c - debugging printout routines
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: print.c,v 1.99 2023/07/17 16:40:57 christos Exp $")
-#endif /* lint */
-
-#include <string.h>
-#include <stdarg.h>
-#include <stdlib.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-#include <time.h>
-
-#include "cdf.h"
-
-#ifndef COMPILE_ONLY
-file_protected void
-file_mdump(struct magic *m)
-{
- static const char optyp[] = { FILE_OPS };
- char tbuf[256];
-
- (void) fprintf(stderr, "%u: %.*s %d", m->lineno,
- (m->cont_level & 7) + 1, ">>>>>>>>", m->offset);
-
- if (m->flag & INDIR) {
- (void) fprintf(stderr, "(%s,",
- /* Note: type is unsigned */
- (m->in_type < file_nnames) ? file_names[m->in_type] :
- "*bad in_type*");
- if (m->in_op & FILE_OPINVERSE)
- (void) fputc('~', stderr);
- (void) fprintf(stderr, "%c%d),",
- (CAST(size_t, m->in_op & FILE_OPS_MASK) <
- __arraycount(optyp)) ?
- optyp[m->in_op & FILE_OPS_MASK] : '?', m->in_offset);
- }
- (void) fprintf(stderr, " %s%s", (m->flag & UNSIGNED) ? "u" : "",
- /* Note: type is unsigned */
- (m->type < file_nnames) ? file_names[m->type] : "*bad type");
- if (m->mask_op & FILE_OPINVERSE)
- (void) fputc('~', stderr);
-
- if (IS_STRING(m->type)) {
- if (m->str_flags) {
- (void) fputc('/', stderr);
- if (m->str_flags & STRING_COMPACT_WHITESPACE)
- (void) fputc(CHAR_COMPACT_WHITESPACE, stderr);
- if (m->str_flags & STRING_COMPACT_OPTIONAL_WHITESPACE)
- (void) fputc(CHAR_COMPACT_OPTIONAL_WHITESPACE,
- stderr);
- if (m->str_flags & STRING_IGNORE_LOWERCASE)
- (void) fputc(CHAR_IGNORE_LOWERCASE, stderr);
- if (m->str_flags & STRING_IGNORE_UPPERCASE)
- (void) fputc(CHAR_IGNORE_UPPERCASE, stderr);
- if (m->str_flags & REGEX_OFFSET_START)
- (void) fputc(CHAR_REGEX_OFFSET_START, stderr);
- if (m->str_flags & STRING_TEXTTEST)
- (void) fputc(CHAR_TEXTTEST, stderr);
- if (m->str_flags & STRING_BINTEST)
- (void) fputc(CHAR_BINTEST, stderr);
- if (m->str_flags & PSTRING_1_BE)
- (void) fputc(CHAR_PSTRING_1_BE, stderr);
- if (m->str_flags & PSTRING_2_BE)
- (void) fputc(CHAR_PSTRING_2_BE, stderr);
- if (m->str_flags & PSTRING_2_LE)
- (void) fputc(CHAR_PSTRING_2_LE, stderr);
- if (m->str_flags & PSTRING_4_BE)
- (void) fputc(CHAR_PSTRING_4_BE, stderr);
- if (m->str_flags & PSTRING_4_LE)
- (void) fputc(CHAR_PSTRING_4_LE, stderr);
- if (m->str_flags & PSTRING_LENGTH_INCLUDES_ITSELF)
- (void) fputc(
- CHAR_PSTRING_LENGTH_INCLUDES_ITSELF,
- stderr);
- }
- if (m->str_range)
- (void) fprintf(stderr, "/%u", m->str_range);
- }
- else {
- if (CAST(size_t, m->mask_op & FILE_OPS_MASK) <
- __arraycount(optyp))
- (void) fputc(optyp[m->mask_op & FILE_OPS_MASK], stderr);
- else
- (void) fputc('?', stderr);
-
- if (m->num_mask) {
- (void) fprintf(stderr, "%.8llx",
- CAST(unsigned long long, m->num_mask));
- }
- }
- (void) fprintf(stderr, ",%c", m->reln);
-
- if (m->reln != 'x') {
- switch (m->type) {
- case FILE_BYTE:
- case FILE_SHORT:
- case FILE_LONG:
- case FILE_LESHORT:
- case FILE_LELONG:
- case FILE_MELONG:
- case FILE_BESHORT:
- case FILE_BELONG:
- case FILE_INDIRECT:
- (void) fprintf(stderr, "%d", CAST(int32_t, m->value.l));
- break;
- case FILE_BEQUAD:
- case FILE_LEQUAD:
- case FILE_QUAD:
- case FILE_OFFSET:
- (void) fprintf(stderr, "%" INT64_T_FORMAT "d",
- CAST(long long, m->value.q));
- break;
- case FILE_PSTRING:
- case FILE_STRING:
- case FILE_REGEX:
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- case FILE_SEARCH:
- file_showstr(stderr, m->value.s,
- CAST(size_t, m->vallen));
- break;
- case FILE_DATE:
- case FILE_LEDATE:
- case FILE_BEDATE:
- case FILE_MEDATE:
- (void)fprintf(stderr, "%s,",
- file_fmtdatetime(tbuf, sizeof(tbuf), m->value.l, 0));
- break;
- case FILE_LDATE:
- case FILE_LELDATE:
- case FILE_BELDATE:
- case FILE_MELDATE:
- (void)fprintf(stderr, "%s,",
- file_fmtdatetime(tbuf, sizeof(tbuf), m->value.l,
- FILE_T_LOCAL));
- break;
- case FILE_QDATE:
- case FILE_LEQDATE:
- case FILE_BEQDATE:
- (void)fprintf(stderr, "%s,",
- file_fmtdatetime(tbuf, sizeof(tbuf), m->value.q, 0));
- break;
- case FILE_QLDATE:
- case FILE_LEQLDATE:
- case FILE_BEQLDATE:
- (void)fprintf(stderr, "%s,",
- file_fmtdatetime(tbuf, sizeof(tbuf), m->value.q,
- FILE_T_LOCAL));
- break;
- case FILE_QWDATE:
- case FILE_LEQWDATE:
- case FILE_BEQWDATE:
- (void)fprintf(stderr, "%s,",
- file_fmtdatetime(tbuf, sizeof(tbuf), m->value.q,
- FILE_T_WINDOWS));
- break;
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- (void) fprintf(stderr, "%G", m->value.f);
- break;
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- (void) fprintf(stderr, "%G", m->value.d);
- break;
- case FILE_LEVARINT:
- case FILE_BEVARINT:
- (void)fprintf(stderr, "%s", file_fmtvarint(
- tbuf, sizeof(tbuf), m->value.us, m->type));
- break;
- case FILE_MSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_LEMSDOSDATE:
- (void)fprintf(stderr, "%s,",
- file_fmtdate(tbuf, sizeof(tbuf), m->value.h));
- break;
- case FILE_MSDOSTIME:
- case FILE_BEMSDOSTIME:
- case FILE_LEMSDOSTIME:
- (void)fprintf(stderr, "%s,",
- file_fmttime(tbuf, sizeof(tbuf), m->value.h));
- break;
- case FILE_OCTAL:
- (void)fprintf(stderr, "%s",
- file_fmtnum(tbuf, sizeof(tbuf), m->value.s, 8));
- break;
- case FILE_DEFAULT:
- /* XXX - do anything here? */
- break;
- case FILE_USE:
- case FILE_NAME:
- case FILE_DER:
- (void) fprintf(stderr, "'%s'", m->value.s);
- break;
- case FILE_GUID:
- (void) file_print_guid(tbuf, sizeof(tbuf),
- m->value.guid);
- (void) fprintf(stderr, "%s", tbuf);
- break;
-
- default:
- (void) fprintf(stderr, "*bad type %d*", m->type);
- break;
- }
- }
- (void) fprintf(stderr, ",\"%s\"]\n", m->desc);
-}
-#endif
-
-/*VARARGS*/
-file_protected void
-file_magwarn(struct magic_set *ms, const char *f, ...)
-{
- va_list va;
-
- /* cuz we use stdout for most, stderr here */
- (void) fflush(stdout);
-
- if (ms && ms->file)
- (void) fprintf(stderr, "%s, %lu: ", ms->file,
- CAST(unsigned long, ms->line));
- (void) fprintf(stderr, "Warning: ");
- va_start(va, f);
- (void) vfprintf(stderr, f, va);
- va_end(va);
- (void) fputc('\n', stderr);
-}
-
-file_protected const char *
-file_fmtvarint(char *buf, size_t blen, const unsigned char *us, int t)
-{
- snprintf(buf, blen, "%jd", CAST(intmax_t,
- file_varint2uintmax_t(us, t, NULL)));
- return buf;
-}
-
-file_protected const char *
-file_fmtdatetime(char *buf, size_t bsize, uint64_t v, int flags)
-{
- char *pp;
- time_t t;
- struct tm *tm, tmz;
-
- if (flags & FILE_T_WINDOWS) {
- struct timespec ts;
- cdf_timestamp_to_timespec(&ts, CAST(cdf_timestamp_t, v));
- t = ts.tv_sec;
- } else {
- // XXX: perhaps detect and print something if overflow
- // on 32 bit time_t?
- t = CAST(time_t, v);
- }
-
- if (t > MAX_CTIME)
- goto out;
-
- if (flags & FILE_T_LOCAL) {
- tm = localtime_r(&t, &tmz);
- } else {
- tm = gmtime_r(&t, &tmz);
- }
- if (tm == NULL)
- goto out;
- pp = asctime_r(tm, buf);
-
- if (pp == NULL)
- goto out;
- pp[strcspn(pp, "\n")] = '\0';
- return pp;
-out:
- strlcpy(buf, "*Invalid datetime*", bsize);
- return buf;
-}
-
-/*
- * https://docs.microsoft.com/en-us/windows/win32/api/winbase/\
- * nf-winbase-dosdatetimetofiletime?redirectedfrom=MSDN
- */
-file_protected const char *
-file_fmtdate(char *buf, size_t bsize, uint16_t v)
-{
- struct tm tm;
-
- memset(&tm, 0, sizeof(tm));
- tm.tm_mday = v & 0x1f;
- tm.tm_mon = ((v >> 5) & 0xf) - 1;
- tm.tm_year = (v >> 9) + 80;
-
- if (strftime(buf, bsize, "%a, %b %d %Y", &tm) == 0)
- goto out;
-
- return buf;
-out:
- strlcpy(buf, "*Invalid date*", bsize);
- return buf;
-}
-
-file_protected const char *
-file_fmttime(char *buf, size_t bsize, uint16_t v)
-{
- struct tm tm;
-
- memset(&tm, 0, sizeof(tm));
- tm.tm_sec = (v & 0x1f) * 2;
- tm.tm_min = ((v >> 5) & 0x3f);
- tm.tm_hour = (v >> 11);
-
- if (strftime(buf, bsize, "%T", &tm) == 0)
- goto out;
-
- return buf;
-out:
- strlcpy(buf, "*Invalid time*", bsize);
- return buf;
-
-}
-
-file_protected const char *
-file_fmtnum(char *buf, size_t blen, const char *us, int base)
-{
- char *endptr;
- unsigned long long val;
-
- errno = 0;
- val = strtoull(us, &endptr, base);
- if (*endptr || errno) {
-bad: strlcpy(buf, "*Invalid number*", blen);
- return buf;
- }
-
- if (snprintf(buf, blen, "%llu", val) < 0)
- goto bad;
- return buf;
-}
diff --git a/contrib/libs/libmagic/src/readcdf.c b/contrib/libs/libmagic/src/readcdf.c
deleted file mode 100644
index 30c3d24667..0000000000
--- a/contrib/libs/libmagic/src/readcdf.c
+++ /dev/null
@@ -1,682 +0,0 @@
-/*-
- * Copyright (c) 2008, 2016 Christos Zoulas
- * All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
- * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
- * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
- * POSSIBILITY OF SUCH DAMAGE.
- */
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: readcdf.c,v 1.80 2023/01/24 20:13:40 christos Exp $")
-#endif
-
-#include <assert.h>
-#include <stdlib.h>
-#include <unistd.h>
-#include <string.h>
-#include <time.h>
-#include <ctype.h>
-
-#include "cdf.h"
-#include "magic.h"
-
-#define NOTMIME(ms) (((ms)->flags & MAGIC_MIME) == 0)
-
-static const struct nv {
- const char *pattern;
- const char *mime;
-} app2mime[] = {
- { "Word", "msword", },
- { "Excel", "vnd.ms-excel", },
- { "Powerpoint", "vnd.ms-powerpoint", },
- { "Crystal Reports", "x-rpt", },
- { "Advanced Installer", "vnd.ms-msi", },
- { "InstallShield", "vnd.ms-msi", },
- { "Microsoft Patch Compiler", "vnd.ms-msi", },
- { "NAnt", "vnd.ms-msi", },
- { "Windows Installer", "vnd.ms-msi", },
- { NULL, NULL, },
-}, name2mime[] = {
- { "Book", "vnd.ms-excel", },
- { "Workbook", "vnd.ms-excel", },
- { "WordDocument", "msword", },
- { "PowerPoint", "vnd.ms-powerpoint", },
- { "DigitalSignature", "vnd.ms-msi", },
- { NULL, NULL, },
-}, name2desc[] = {
- { "Book", "Microsoft Excel", },
- { "Workbook", "Microsoft Excel", },
- { "WordDocument", "Microsoft Word", },
- { "PowerPoint", "Microsoft PowerPoint", },
- { "DigitalSignature", "Microsoft Installer", },
- { NULL, NULL, },
-};
-
-static const struct cv {
- uint64_t clsid[2];
- const char *mime;
-} clsid2mime[] = {
- {
- { 0x00000000000c1084ULL, 0x46000000000000c0ULL },
- "x-msi",
- },
- { { 0, 0 },
- NULL,
- },
-}, clsid2desc[] = {
- {
- { 0x00000000000c1084ULL, 0x46000000000000c0ULL },
- "MSI Installer",
- },
- { { 0, 0 },
- NULL,
- },
-};
-
-file_private const char *
-cdf_clsid_to_mime(const uint64_t clsid[2], const struct cv *cv)
-{
- size_t i;
- for (i = 0; cv[i].mime != NULL; i++) {
- if (clsid[0] == cv[i].clsid[0] && clsid[1] == cv[i].clsid[1])
- return cv[i].mime;
- }
-#ifdef CDF_DEBUG
- fprintf(stderr, "unknown mime %" PRIx64 ", %" PRIx64 "\n", clsid[0],
- clsid[1]);
-#endif
- return NULL;
-}
-
-file_private const char *
-cdf_app_to_mime(const char *vbuf, const struct nv *nv)
-{
- size_t i;
- const char *rv = NULL;
-#ifdef USE_C_LOCALE
- locale_t old_lc_ctype, c_lc_ctype;
-
- c_lc_ctype = newlocale(LC_CTYPE_MASK, "C", 0);
- assert(c_lc_ctype != NULL);
- old_lc_ctype = uselocale(c_lc_ctype);
- assert(old_lc_ctype != NULL);
-#else
- char *old_lc_ctype = setlocale(LC_CTYPE, NULL);
- assert(old_lc_ctype != NULL);
- old_lc_ctype = strdup(old_lc_ctype);
- assert(old_lc_ctype != NULL);
- (void)setlocale(LC_CTYPE, "C");
-#endif
- for (i = 0; nv[i].pattern != NULL; i++)
- if (strcasestr(vbuf, nv[i].pattern) != NULL) {
- rv = nv[i].mime;
- break;
- }
-#ifdef CDF_DEBUG
- fprintf(stderr, "unknown app %s\n", vbuf);
-#endif
-#ifdef USE_C_LOCALE
- (void)uselocale(old_lc_ctype);
- freelocale(c_lc_ctype);
-#else
- (void)setlocale(LC_CTYPE, old_lc_ctype);
- free(old_lc_ctype);
-#endif
- return rv;
-}
-
-file_private int
-cdf_file_property_info(struct magic_set *ms, const cdf_property_info_t *info,
- size_t count, const cdf_directory_t *root_storage)
-{
- size_t i;
- cdf_timestamp_t tp;
- struct timespec ts;
- char buf[64];
- const char *str = NULL;
- const char *s, *e;
- int len;
-
- if (!NOTMIME(ms) && root_storage)
- str = cdf_clsid_to_mime(root_storage->d_storage_uuid,
- clsid2mime);
-
- for (i = 0; i < count; i++) {
- cdf_print_property_name(buf, sizeof(buf), info[i].pi_id);
- switch (info[i].pi_type) {
- case CDF_NULL:
- break;
- case CDF_SIGNED16:
- if (NOTMIME(ms) && file_printf(ms, ", %s: %hd", buf,
- info[i].pi_s16) == -1)
- return -1;
- break;
- case CDF_SIGNED32:
- if (NOTMIME(ms) && file_printf(ms, ", %s: %d", buf,
- info[i].pi_s32) == -1)
- return -1;
- break;
- case CDF_UNSIGNED32:
- if (NOTMIME(ms) && file_printf(ms, ", %s: %u", buf,
- info[i].pi_u32) == -1)
- return -1;
- break;
- case CDF_FLOAT:
- if (NOTMIME(ms) && file_printf(ms, ", %s: %g", buf,
- info[i].pi_f) == -1)
- return -1;
- break;
- case CDF_DOUBLE:
- if (NOTMIME(ms) && file_printf(ms, ", %s: %g", buf,
- info[i].pi_d) == -1)
- return -1;
- break;
- case CDF_LENGTH32_STRING:
- case CDF_LENGTH32_WSTRING:
- len = info[i].pi_str.s_len;
- if (len > 1) {
- char vbuf[1024];
- size_t j, k = 1;
-
- if (info[i].pi_type == CDF_LENGTH32_WSTRING)
- k++;
- s = info[i].pi_str.s_buf;
- e = info[i].pi_str.s_buf + len;
- for (j = 0; s < e && j < sizeof(vbuf)
- && len--; s += k) {
- if (*s == '\0')
- break;
- if (isprint(CAST(unsigned char, *s)))
- vbuf[j++] = *s;
- }
- if (j == sizeof(vbuf))
- --j;
- vbuf[j] = '\0';
- if (NOTMIME(ms)) {
- if (vbuf[0]) {
- if (file_printf(ms, ", %s: %s",
- buf, vbuf) == -1)
- return -1;
- }
- } else if (str == NULL && info[i].pi_id ==
- CDF_PROPERTY_NAME_OF_APPLICATION) {
- str = cdf_app_to_mime(vbuf, app2mime);
- }
- }
- break;
- case CDF_FILETIME:
- tp = info[i].pi_tp;
- if (tp != 0) {
- char tbuf[64];
- if (tp < 1000000000000000LL) {
- cdf_print_elapsed_time(tbuf,
- sizeof(tbuf), tp);
- if (NOTMIME(ms) && file_printf(ms,
- ", %s: %s", buf, tbuf) == -1)
- return -1;
- } else {
- char *c, *ec;
- cdf_timestamp_to_timespec(&ts, tp);
- c = cdf_ctime(&ts.tv_sec, tbuf);
- if (c != NULL &&
- (ec = strchr(c, '\n')) != NULL)
- *ec = '\0';
-
- if (NOTMIME(ms) && file_printf(ms,
- ", %s: %s", buf, c) == -1)
- return -1;
- }
- }
- break;
- case CDF_CLIPBOARD:
- break;
- default:
- return -1;
- }
- }
- if (ms->flags & MAGIC_MIME_TYPE) {
- if (str == NULL)
- return 0;
- if (file_printf(ms, "application/%s", str) == -1)
- return -1;
- }
- return 1;
-}
-
-file_private int
-cdf_file_catalog(struct magic_set *ms, const cdf_header_t *h,
- const cdf_stream_t *sst)
-{
- cdf_catalog_t *cat;
- size_t i;
- char buf[256];
- cdf_catalog_entry_t *ce;
-
- if (NOTMIME(ms)) {
- if (file_printf(ms, "Microsoft Thumbs.db [") == -1)
- return -1;
- if (cdf_unpack_catalog(h, sst, &cat) == -1)
- return -1;
- ce = cat->cat_e;
- /* skip first entry since it has a , or paren */
- for (i = 1; i < cat->cat_num; i++)
- if (file_printf(ms, "%s%s",
- cdf_u16tos8(buf, ce[i].ce_namlen, ce[i].ce_name),
- i == cat->cat_num - 1 ? "]" : ", ") == -1) {
- free(cat);
- return -1;
- }
- free(cat);
- } else if (ms->flags & MAGIC_MIME_TYPE) {
- if (file_printf(ms, "application/CDFV2") == -1)
- return -1;
- }
- return 1;
-}
-
-file_private int
-cdf_file_summary_info(struct magic_set *ms, const cdf_header_t *h,
- const cdf_stream_t *sst, const cdf_directory_t *root_storage)
-{
- cdf_summary_info_header_t si;
- cdf_property_info_t *info;
- size_t count;
- int m;
-
- if (cdf_unpack_summary_info(sst, h, &si, &info, &count) == -1)
- return -1;
-
- if (NOTMIME(ms)) {
- const char *str;
-
- if (file_printf(ms, "Composite Document File V2 Document")
- == -1)
- return -1;
-
- if (file_printf(ms, ", %s Endian",
- si.si_byte_order == 0xfffe ? "Little" : "Big") == -1)
- return -2;
- switch (si.si_os) {
- case 2:
- if (file_printf(ms, ", Os: Windows, Version %d.%d",
- si.si_os_version & 0xff,
- CAST(uint32_t, si.si_os_version) >> 8) == -1)
- return -2;
- break;
- case 1:
- if (file_printf(ms, ", Os: MacOS, Version %d.%d",
- CAST(uint32_t, si.si_os_version) >> 8,
- si.si_os_version & 0xff) == -1)
- return -2;
- break;
- default:
- if (file_printf(ms, ", Os %d, Version: %d.%d", si.si_os,
- si.si_os_version & 0xff,
- CAST(uint32_t, si.si_os_version) >> 8) == -1)
- return -2;
- break;
- }
- if (root_storage) {
- str = cdf_clsid_to_mime(root_storage->d_storage_uuid,
- clsid2desc);
- if (str) {
- if (file_printf(ms, ", %s", str) == -1)
- return -2;
- }
- }
- }
-
- m = cdf_file_property_info(ms, info, count, root_storage);
- free(info);
-
- return m == -1 ? -2 : m;
-}
-
-#ifdef notdef
-file_private char *
-format_clsid(char *buf, size_t len, const uint64_t uuid[2]) {
- snprintf(buf, len, "%.8" PRIx64 "-%.4" PRIx64 "-%.4" PRIx64 "-%.4"
- PRIx64 "-%.12" PRIx64,
- (uuid[0] >> 32) & (uint64_t)0x000000000ffffffffULL,
- (uuid[0] >> 16) & (uint64_t)0x0000000000000ffffULL,
- (uuid[0] >> 0) & (uint64_t)0x0000000000000ffffULL,
- (uuid[1] >> 48) & (uint64_t)0x0000000000000ffffULL,
- (uuid[1] >> 0) & (uint64_t)0x0000fffffffffffffULL);
- return buf;
-}
-#endif
-
-file_private int
-cdf_file_catalog_info(struct magic_set *ms, const cdf_info_t *info,
- const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat,
- const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn)
-{
- int i;
-
- if ((i = cdf_read_user_stream(info, h, sat, ssat, sst,
- dir, "Catalog", scn)) == -1)
- return i;
-#ifdef CDF_DEBUG
- cdf_dump_catalog(h, scn);
-#endif
- if ((i = cdf_file_catalog(ms, h, scn)) == -1)
- return -1;
- return i;
-}
-
-file_private int
-cdf_check_summary_info(struct magic_set *ms, const cdf_info_t *info,
- const cdf_header_t *h, const cdf_sat_t *sat, const cdf_sat_t *ssat,
- const cdf_stream_t *sst, const cdf_dir_t *dir, cdf_stream_t *scn,
- const cdf_directory_t *root_storage, const char **expn)
-{
- int i;
- const char *str = NULL;
- cdf_directory_t *d;
- char name[__arraycount(d->d_name)];
- size_t j, k;
-
-#ifdef CDF_DEBUG
- cdf_dump_summary_info(h, scn);
-#endif
- if ((i = cdf_file_summary_info(ms, h, scn, root_storage)) < 0) {
- *expn = "Can't expand summary_info";
- return i;
- }
- if (i == 1)
- return i;
- for (j = 0; str == NULL && j < dir->dir_len; j++) {
- d = &dir->dir_tab[j];
- for (k = 0; k < sizeof(name); k++)
- name[k] = CAST(char, cdf_tole2(d->d_name[k]));
- str = cdf_app_to_mime(name,
- NOTMIME(ms) ? name2desc : name2mime);
- }
- if (NOTMIME(ms)) {
- if (str != NULL) {
- if (file_printf(ms, "%s", str) == -1)
- return -1;
- i = 1;
- }
- } else if (ms->flags & MAGIC_MIME_TYPE) {
- if (str == NULL)
- str = "vnd.ms-office";
- if (file_printf(ms, "application/%s", str) == -1)
- return -1;
- i = 1;
- }
- if (i <= 0) {
- i = cdf_file_catalog_info(ms, info, h, sat, ssat, sst,
- dir, scn);
- }
- return i;
-}
-
-file_private struct sinfo {
- const char *name;
- const char *mime;
- const char *sections[5];
- const int types[5];
-} sectioninfo[] = {
- { "Encrypted", "encrypted",
- {
- "EncryptedPackage", "EncryptedSummary",
- NULL, NULL, NULL,
- },
- {
- CDF_DIR_TYPE_USER_STREAM,
- CDF_DIR_TYPE_USER_STREAM,
- 0, 0, 0,
-
- },
- },
- { "QuickBooks", "quickbooks",
- {
-#if 0
- "TaxForms", "PDFTaxForms", "modulesInBackup",
-#endif
- "mfbu_header", NULL, NULL, NULL, NULL,
- },
- {
-#if 0
- CDF_DIR_TYPE_USER_STORAGE,
- CDF_DIR_TYPE_USER_STORAGE,
- CDF_DIR_TYPE_USER_STREAM,
-#endif
- CDF_DIR_TYPE_USER_STREAM,
- 0, 0, 0, 0
- },
- },
- { "Microsoft Excel", "vnd.ms-excel",
- {
- "Book", "Workbook", NULL, NULL, NULL,
- },
- {
- CDF_DIR_TYPE_USER_STREAM,
- CDF_DIR_TYPE_USER_STREAM,
- 0, 0, 0,
- },
- },
- { "Microsoft Word", "msword",
- {
- "WordDocument", NULL, NULL, NULL, NULL,
- },
- {
- CDF_DIR_TYPE_USER_STREAM,
- 0, 0, 0, 0,
- },
- },
- { "Microsoft PowerPoint", "vnd.ms-powerpoint",
- {
- "PowerPoint", NULL, NULL, NULL, NULL,
- },
- {
- CDF_DIR_TYPE_USER_STREAM,
- 0, 0, 0, 0,
- },
- },
- { "Microsoft Outlook Message", "vnd.ms-outlook",
- {
- "__properties_version1.0",
- "__recip_version1.0_#00000000",
- NULL, NULL, NULL,
- },
- {
- CDF_DIR_TYPE_USER_STREAM,
- CDF_DIR_TYPE_USER_STORAGE,
- 0, 0, 0,
- },
- },
-};
-
-file_private int
-cdf_file_dir_info(struct magic_set *ms, const cdf_dir_t *dir)
-{
- size_t sd, j;
-
- for (sd = 0; sd < __arraycount(sectioninfo); sd++) {
- const struct sinfo *si = &sectioninfo[sd];
- for (j = 0; si->sections[j]; j++) {
- if (cdf_find_stream(dir, si->sections[j], si->types[j])
- > 0)
- break;
-#ifdef CDF_DEBUG
- fprintf(stderr, "Can't read %s\n", si->sections[j]);
-#endif
- }
- if (si->sections[j] == NULL)
- continue;
- if (NOTMIME(ms)) {
- if (file_printf(ms, "CDFV2 %s", si->name) == -1)
- return -1;
- } else if (ms->flags & MAGIC_MIME_TYPE) {
- if (file_printf(ms, "application/%s", si->mime) == -1)
- return -1;
- }
- return 1;
- }
- return -1;
-}
-
-file_protected int
-file_trycdf(struct magic_set *ms, const struct buffer *b)
-{
- int fd = b->fd;
- const unsigned char *buf = CAST(const unsigned char *, b->fbuf);
- size_t nbytes = b->flen;
- cdf_info_t info;
- cdf_header_t h;
- cdf_sat_t sat, ssat;
- cdf_stream_t sst, scn;
- cdf_dir_t dir;
- int i;
- const char *expn = "";
- const cdf_directory_t *root_storage;
-
- scn.sst_tab = NULL;
- info.i_fd = fd;
- info.i_buf = buf;
- info.i_len = nbytes;
- if (ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION))
- return 0;
- if (cdf_read_header(&info, &h) == -1)
- return 0;
-#ifdef CDF_DEBUG
- cdf_dump_header(&h);
-#endif
-
- if ((i = cdf_read_sat(&info, &h, &sat)) == -1) {
- expn = "Can't read SAT";
- goto out0;
- }
-#ifdef CDF_DEBUG
- cdf_dump_sat("SAT", &sat, CDF_SEC_SIZE(&h));
-#endif
-
- if ((i = cdf_read_ssat(&info, &h, &sat, &ssat)) == -1) {
- expn = "Can't read SSAT";
- goto out1;
- }
-#ifdef CDF_DEBUG
- cdf_dump_sat("SSAT", &ssat, CDF_SHORT_SEC_SIZE(&h));
-#endif
-
- if ((i = cdf_read_dir(&info, &h, &sat, &dir)) == -1) {
- expn = "Can't read directory";
- goto out2;
- }
-
- if ((i = cdf_read_short_stream(&info, &h, &sat, &dir, &sst,
- &root_storage)) == -1) {
- expn = "Cannot read short stream";
- goto out3;
- }
-#ifdef CDF_DEBUG
- cdf_dump_dir(&info, &h, &sat, &ssat, &sst, &dir);
-#endif
-#ifdef notdef
- if (root_storage) {
- if (NOTMIME(ms)) {
- char clsbuf[128];
- if (file_printf(ms, "CLSID %s, ",
- format_clsid(clsbuf, sizeof(clsbuf),
- root_storage->d_storage_uuid)) == -1)
- return -1;
- }
- }
-#endif
-
- if (cdf_read_user_stream(&info, &h, &sat, &ssat, &sst, &dir,
- "FileHeader", &scn) != -1) {
-#define HWP5_SIGNATURE "HWP Document File"
- if (scn.sst_len * scn.sst_ss >= sizeof(HWP5_SIGNATURE) - 1
- && memcmp(scn.sst_tab, HWP5_SIGNATURE,
- sizeof(HWP5_SIGNATURE) - 1) == 0) {
- if (NOTMIME(ms)) {
- if (file_printf(ms,
- "Hancom HWP (Hangul Word Processor) file, version 5.0") == -1)
- return -1;
- } else if (ms->flags & MAGIC_MIME_TYPE) {
- if (file_printf(ms, "application/x-hwp") == -1)
- return -1;
- }
- i = 1;
- goto out5;
- } else {
- cdf_zero_stream(&scn);
- }
- }
-
- if ((i = cdf_read_summary_info(&info, &h, &sat, &ssat, &sst, &dir,
- &scn)) == -1) {
- if (errno != ESRCH) {
- expn = "Cannot read summary info";
- }
- } else {
- i = cdf_check_summary_info(ms, &info, &h,
- &sat, &ssat, &sst, &dir, &scn, root_storage, &expn);
- cdf_zero_stream(&scn);
- }
- if (i <= 0) {
- if ((i = cdf_read_doc_summary_info(&info, &h, &sat, &ssat,
- &sst, &dir, &scn)) == -1) {
- if (errno != ESRCH) {
- expn = "Cannot read summary info";
- }
- } else {
- i = cdf_check_summary_info(ms, &info, &h, &sat, &ssat,
- &sst, &dir, &scn, root_storage, &expn);
- }
- }
- if (i <= 0) {
- i = cdf_file_dir_info(ms, &dir);
- if (i < 0)
- expn = "Cannot read section info";
- }
-out5:
- cdf_zero_stream(&scn);
- cdf_zero_stream(&sst);
-out3:
- free(dir.dir_tab);
-out2:
- free(ssat.sat_tab);
-out1:
- free(sat.sat_tab);
-out0:
- /* If we handled it already, return */
- if (i != -1)
- return i;
- /* Provide a default handler */
- if (NOTMIME(ms)) {
- if (file_printf(ms,
- "Composite Document File V2 Document") == -1)
- return -1;
- if (*expn)
- if (file_printf(ms, ", %s", expn) == -1)
- return -1;
- } else if (ms->flags & MAGIC_MIME_TYPE) {
- /* https://reposcope.com/mimetype/application/x-ole-storage */
- if (file_printf(ms, "application/x-ole-storage") == -1)
- return -1;
- }
- return 1;
-}
diff --git a/contrib/libs/libmagic/src/readelf.c b/contrib/libs/libmagic/src/readelf.c
deleted file mode 100644
index a2a66ddd72..0000000000
--- a/contrib/libs/libmagic/src/readelf.c
+++ /dev/null
@@ -1,1899 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2003.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: readelf.c,v 1.190 2023/07/27 19:39:06 christos Exp $")
-#endif
-
-#ifdef BUILTIN_ELF
-#include <string.h>
-#include <ctype.h>
-#include <stdlib.h>
-#ifdef HAVE_UNISTD_H
-#include <unistd.h>
-#endif
-
-#include "readelf.h"
-#include "magic.h"
-
-#ifdef ELFCORE
-file_private int dophn_core(struct magic_set *, int, int, int, off_t, int,
- size_t, off_t, int *, uint16_t *);
-#endif
-file_private int dophn_exec(struct magic_set *, int, int, int, off_t, int,
- size_t, off_t, int, int *, uint16_t *);
-file_private int doshn(struct magic_set *, int, int, int, off_t, int, size_t,
- off_t, int, int, int *, uint16_t *);
-file_private size_t donote(struct magic_set *, void *, size_t, size_t, int,
- int, size_t, int *, uint16_t *, int, off_t, int, off_t);
-
-#define ELF_ALIGN(a) ((((a) + align - 1) / align) * align)
-
-#define isquote(c) (strchr("'\"`", (c)) != NULL)
-
-file_private uint16_t getu16(int, uint16_t);
-file_private uint32_t getu32(int, uint32_t);
-file_private uint64_t getu64(int, uint64_t);
-
-#define SIZE_UNKNOWN CAST(off_t, -1)
-
-file_private int
-toomany(struct magic_set *ms, const char *name, uint16_t num)
-{
- if (ms->flags & MAGIC_MIME)
- return 1;
- if (file_printf(ms, ", too many %s (%u)", name, num) == -1)
- return -1;
- return 1;
-}
-
-file_private uint16_t
-getu16(int swap, uint16_t value)
-{
- union {
- uint16_t ui;
- char c[2];
- } retval, tmpval;
-
- if (swap) {
- tmpval.ui = value;
-
- retval.c[0] = tmpval.c[1];
- retval.c[1] = tmpval.c[0];
-
- return retval.ui;
- } else
- return value;
-}
-
-file_private uint32_t
-getu32(int swap, uint32_t value)
-{
- union {
- uint32_t ui;
- char c[4];
- } retval, tmpval;
-
- if (swap) {
- tmpval.ui = value;
-
- retval.c[0] = tmpval.c[3];
- retval.c[1] = tmpval.c[2];
- retval.c[2] = tmpval.c[1];
- retval.c[3] = tmpval.c[0];
-
- return retval.ui;
- } else
- return value;
-}
-
-file_private uint64_t
-getu64(int swap, uint64_t value)
-{
- union {
- uint64_t ui;
- char c[8];
- } retval, tmpval;
-
- if (swap) {
- tmpval.ui = value;
-
- retval.c[0] = tmpval.c[7];
- retval.c[1] = tmpval.c[6];
- retval.c[2] = tmpval.c[5];
- retval.c[3] = tmpval.c[4];
- retval.c[4] = tmpval.c[3];
- retval.c[5] = tmpval.c[2];
- retval.c[6] = tmpval.c[1];
- retval.c[7] = tmpval.c[0];
-
- return retval.ui;
- } else
- return value;
-}
-
-#define elf_getu16(swap, value) getu16(swap, value)
-#define elf_getu32(swap, value) getu32(swap, value)
-#define elf_getu64(swap, value) getu64(swap, value)
-
-#define xsh_addr (clazz == ELFCLASS32 \
- ? CAST(void *, &sh32) \
- : CAST(void *, &sh64))
-#define xsh_sizeof (clazz == ELFCLASS32 \
- ? sizeof(sh32) \
- : sizeof(sh64))
-#define xsh_size CAST(size_t, (clazz == ELFCLASS32 \
- ? elf_getu32(swap, sh32.sh_size) \
- : elf_getu64(swap, sh64.sh_size)))
-#define xsh_offset CAST(off_t, (clazz == ELFCLASS32 \
- ? elf_getu32(swap, sh32.sh_offset) \
- : elf_getu64(swap, sh64.sh_offset)))
-#define xsh_type (clazz == ELFCLASS32 \
- ? elf_getu32(swap, sh32.sh_type) \
- : elf_getu32(swap, sh64.sh_type))
-#define xsh_name (clazz == ELFCLASS32 \
- ? elf_getu32(swap, sh32.sh_name) \
- : elf_getu32(swap, sh64.sh_name))
-
-#define xph_addr (clazz == ELFCLASS32 \
- ? CAST(void *, &ph32) \
- : CAST(void *, &ph64))
-#define xph_sizeof (clazz == ELFCLASS32 \
- ? sizeof(ph32) \
- : sizeof(ph64))
-#define xph_type (clazz == ELFCLASS32 \
- ? elf_getu32(swap, ph32.p_type) \
- : elf_getu32(swap, ph64.p_type))
-#define xph_offset CAST(off_t, (clazz == ELFCLASS32 \
- ? elf_getu32(swap, ph32.p_offset) \
- : elf_getu64(swap, ph64.p_offset)))
-#define xph_align CAST(size_t, (clazz == ELFCLASS32 \
- ? CAST(off_t, (ph32.p_align ? \
- elf_getu32(swap, ph32.p_align) : 4))\
- : CAST(off_t, (ph64.p_align ? \
- elf_getu64(swap, ph64.p_align) : 4))))
-#define xph_vaddr CAST(size_t, (clazz == ELFCLASS32 \
- ? CAST(off_t, (ph32.p_vaddr ? \
- elf_getu32(swap, ph32.p_vaddr) : 4))\
- : CAST(off_t, (ph64.p_vaddr ? \
- elf_getu64(swap, ph64.p_vaddr) : 4))))
-#define xph_filesz CAST(size_t, (clazz == ELFCLASS32 \
- ? elf_getu32(swap, ph32.p_filesz) \
- : elf_getu64(swap, ph64.p_filesz)))
-#define xph_memsz CAST(size_t, ((clazz == ELFCLASS32 \
- ? elf_getu32(swap, ph32.p_memsz) \
- : elf_getu64(swap, ph64.p_memsz))))
-#define xnh_addr (clazz == ELFCLASS32 \
- ? CAST(void *, &nh32) \
- : CAST(void *, &nh64))
-#define xnh_sizeof (clazz == ELFCLASS32 \
- ? sizeof(nh32) \
- : sizeof(nh64))
-#define xnh_type (clazz == ELFCLASS32 \
- ? elf_getu32(swap, nh32.n_type) \
- : elf_getu32(swap, nh64.n_type))
-#define xnh_namesz (clazz == ELFCLASS32 \
- ? elf_getu32(swap, nh32.n_namesz) \
- : elf_getu32(swap, nh64.n_namesz))
-#define xnh_descsz (clazz == ELFCLASS32 \
- ? elf_getu32(swap, nh32.n_descsz) \
- : elf_getu32(swap, nh64.n_descsz))
-
-#define xdh_addr (clazz == ELFCLASS32 \
- ? CAST(void *, &dh32) \
- : CAST(void *, &dh64))
-#define xdh_sizeof (clazz == ELFCLASS32 \
- ? sizeof(dh32) \
- : sizeof(dh64))
-#define xdh_tag (clazz == ELFCLASS32 \
- ? elf_getu32(swap, dh32.d_tag) \
- : elf_getu64(swap, dh64.d_tag))
-#define xdh_val (clazz == ELFCLASS32 \
- ? elf_getu32(swap, dh32.d_un.d_val) \
- : elf_getu64(swap, dh64.d_un.d_val))
-
-#define xcap_addr (clazz == ELFCLASS32 \
- ? CAST(void *, &cap32) \
- : CAST(void *, &cap64))
-#define xcap_sizeof (clazz == ELFCLASS32 \
- ? sizeof(cap32) \
- : sizeof(cap64))
-#define xcap_tag (clazz == ELFCLASS32 \
- ? elf_getu32(swap, cap32.c_tag) \
- : elf_getu64(swap, cap64.c_tag))
-#define xcap_val (clazz == ELFCLASS32 \
- ? elf_getu32(swap, cap32.c_un.c_val) \
- : elf_getu64(swap, cap64.c_un.c_val))
-
-#define xauxv_addr (clazz == ELFCLASS32 \
- ? CAST(void *, &auxv32) \
- : CAST(void *, &auxv64))
-#define xauxv_sizeof (clazz == ELFCLASS32 \
- ? sizeof(auxv32) \
- : sizeof(auxv64))
-#define xauxv_type (clazz == ELFCLASS32 \
- ? elf_getu32(swap, auxv32.a_type) \
- : elf_getu64(swap, auxv64.a_type))
-#define xauxv_val (clazz == ELFCLASS32 \
- ? elf_getu32(swap, auxv32.a_v) \
- : elf_getu64(swap, auxv64.a_v))
-
-#define prpsoffsets(i) (clazz == ELFCLASS32 \
- ? prpsoffsets32[i] \
- : prpsoffsets64[i])
-
-#ifdef ELFCORE
-/*
- * Try larger offsets first to avoid false matches
- * from earlier data that happen to look like strings.
- */
-static const size_t prpsoffsets32[] = {
-#ifdef USE_NT_PSINFO
- 104, /* SunOS 5.x (command line) */
- 88, /* SunOS 5.x (short name) */
-#endif /* USE_NT_PSINFO */
-
- 100, /* SunOS 5.x (command line) */
- 84, /* SunOS 5.x (short name) */
-
- 44, /* Linux (command line) */
- 28, /* Linux (short name) */
-
- 48, /* Linux PowerPC (command line) */
- 32, /* Linux PowerPC (short name) */
-
- 8, /* FreeBSD */
-};
-
-static const size_t prpsoffsets64[] = {
-#ifdef USE_NT_PSINFO
- 152, /* SunOS 5.x (command line) */
- 136, /* SunOS 5.x (short name) */
-#endif /* USE_NT_PSINFO */
-
- 136, /* SunOS 5.x, 64-bit (command line) */
- 120, /* SunOS 5.x, 64-bit (short name) */
-
- 56, /* Linux (command line) */
- 40, /* Linux (tested on core from 2.4.x, short name) */
-
- 16, /* FreeBSD, 64-bit */
-};
-
-#define NOFFSETS32 __arraycount(prpsoffsets32)
-#define NOFFSETS64 __arraycount(prpsoffsets64)
-
-#define NOFFSETS (clazz == ELFCLASS32 ? NOFFSETS32 : NOFFSETS64)
-
-/*
- * Look through the program headers of an executable image, searching
- * for a PT_NOTE section of type NT_PRPSINFO, with a name "CORE" or
- * "FreeBSD"; if one is found, try looking in various places in its
- * contents for a 16-character string containing only printable
- * characters - if found, that string should be the name of the program
- * that dropped core. Note: right after that 16-character string is,
- * at least in SunOS 5.x (and possibly other SVR4-flavored systems) and
- * Linux, a longer string (80 characters, in 5.x, probably other
- * SVR4-flavored systems, and Linux) containing the start of the
- * command line for that program.
- *
- * SunOS 5.x core files contain two PT_NOTE sections, with the types
- * NT_PRPSINFO (old) and NT_PSINFO (new). These structs contain the
- * same info about the command name and command line, so it probably
- * isn't worthwhile to look for NT_PSINFO, but the offsets are provided
- * above (see USE_NT_PSINFO), in case we ever decide to do so. The
- * NT_PRPSINFO and NT_PSINFO sections are always in order and adjacent;
- * the SunOS 5.x file command relies on this (and prefers the latter).
- *
- * The signal number probably appears in a section of type NT_PRSTATUS,
- * but that's also rather OS-dependent, in ways that are harder to
- * dissect with heuristics, so I'm not bothering with the signal number.
- * (I suppose the signal number could be of interest in situations where
- * you don't have the binary of the program that dropped core; if you
- * *do* have that binary, the debugger will probably tell you what
- * signal it was.)
- */
-
-#define OS_STYLE_SVR4 0
-#define OS_STYLE_FREEBSD 1
-#define OS_STYLE_NETBSD 2
-
-file_private const char os_style_names[][8] = {
- "SVR4",
- "FreeBSD",
- "NetBSD",
-};
-
-#define FLAGS_CORE_STYLE 0x0003
-
-#define FLAGS_DID_CORE 0x0004
-#define FLAGS_DID_OS_NOTE 0x0008
-#define FLAGS_DID_BUILD_ID 0x0010
-#define FLAGS_DID_CORE_STYLE 0x0020
-#define FLAGS_DID_NETBSD_PAX 0x0040
-#define FLAGS_DID_NETBSD_MARCH 0x0080
-#define FLAGS_DID_NETBSD_CMODEL 0x0100
-#define FLAGS_DID_NETBSD_EMULATION 0x0200
-#define FLAGS_DID_NETBSD_UNKNOWN 0x0400
-#define FLAGS_IS_CORE 0x0800
-#define FLAGS_DID_AUXV 0x1000
-
-file_private int
-dophn_core(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
- int num, size_t size, off_t fsize, int *flags, uint16_t *notecount)
-{
- Elf32_Phdr ph32;
- Elf64_Phdr ph64;
- size_t offset, len;
- unsigned char nbuf[BUFSIZ];
- ssize_t bufsize;
- off_t ph_off = off, offs;
- int ph_num = num;
-
- if (ms->flags & MAGIC_MIME)
- return 0;
-
- if (num == 0) {
- if (file_printf(ms, ", no program header") == -1)
- return -1;
- return 0;
- }
- if (size != xph_sizeof) {
- if (file_printf(ms, ", corrupted program header size") == -1)
- return -1;
- return 0;
- }
-
- /*
- * Loop through all the program headers.
- */
- for ( ; num; num--) {
- if (pread(fd, xph_addr, xph_sizeof, off) <
- CAST(ssize_t, xph_sizeof)) {
- if (file_printf(ms,
- ", can't read elf program headers at %jd",
- (intmax_t)off) == -1)
- return -1;
- return 0;
- }
- off += size;
-
- if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
- /* Perhaps warn here */
- continue;
- }
-
- if (xph_type != PT_NOTE)
- continue;
-
- /*
- * This is a PT_NOTE section; loop through all the notes
- * in the section.
- */
- len = xph_filesz < sizeof(nbuf) ? xph_filesz : sizeof(nbuf);
- offs = xph_offset;
- if ((bufsize = pread(fd, nbuf, len, offs)) == -1) {
- if (file_printf(ms, " can't read note section at %jd",
- (intmax_t)offs) == -1)
- return -1;
- return 0;
- }
- offset = 0;
- for (;;) {
- if (offset >= CAST(size_t, bufsize))
- break;
- offset = donote(ms, nbuf, offset, CAST(size_t, bufsize),
- clazz, swap, 4, flags, notecount, fd, ph_off,
- ph_num, fsize);
- if (offset == 0)
- break;
-
- }
- }
- return 0;
-}
-#endif
-
-static int
-do_note_netbsd_version(struct magic_set *ms, int swap, void *v)
-{
- uint32_t desc;
- memcpy(&desc, v, sizeof(desc));
- desc = elf_getu32(swap, desc);
-
- if (file_printf(ms, ", for NetBSD") == -1)
- return -1;
- /*
- * The version number used to be stuck as 199905, and was thus
- * basically content-free. Newer versions of NetBSD have fixed
- * this and now use the encoding of __NetBSD_Version__:
- *
- * MMmmrrpp00
- *
- * M = major version
- * m = minor version
- * r = release ["",A-Z,Z[A-Z] but numeric]
- * p = patchlevel
- */
- if (desc > 100000000U) {
- uint32_t ver_patch = (desc / 100) % 100;
- uint32_t ver_rel = (desc / 10000) % 100;
- uint32_t ver_min = (desc / 1000000) % 100;
- uint32_t ver_maj = desc / 100000000;
-
- if (file_printf(ms, " %u.%u", ver_maj, ver_min) == -1)
- return -1;
- if (ver_maj >= 9) {
- ver_patch += 100 * ver_rel;
- ver_rel = 0;
- }
- if (ver_rel == 0 && ver_patch != 0) {
- if (file_printf(ms, ".%u", ver_patch) == -1)
- return -1;
- } else if (ver_rel != 0) {
- while (ver_rel > 26) {
- if (file_printf(ms, "Z") == -1)
- return -1;
- ver_rel -= 26;
- }
- if (file_printf(ms, "%c", 'A' + ver_rel - 1) == -1)
- return -1;
- }
- }
- return 0;
-}
-
-static int
-do_note_freebsd_version(struct magic_set *ms, int swap, void *v)
-{
- uint32_t desc;
-
- memcpy(&desc, v, sizeof(desc));
- desc = elf_getu32(swap, desc);
- if (file_printf(ms, ", for FreeBSD") == -1)
- return -1;
-
- /*
- * Contents is __FreeBSD_version, whose relation to OS
- * versions is defined by a huge table in the Porter's
- * Handbook. This is the general scheme:
- *
- * Releases:
- * Mmp000 (before 4.10)
- * Mmi0p0 (before 5.0)
- * Mmm0p0
- *
- * Development branches:
- * Mmpxxx (before 4.6)
- * Mmp1xx (before 4.10)
- * Mmi1xx (before 5.0)
- * M000xx (pre-M.0)
- * Mmm1xx
- *
- * M = major version
- * m = minor version
- * i = minor version increment (491000 -> 4.10)
- * p = patchlevel
- * x = revision
- *
- * The first release of FreeBSD to use ELF by default
- * was version 3.0.
- */
- if (desc == 460002) {
- if (file_printf(ms, " 4.6.2") == -1)
- return -1;
- } else if (desc < 460100) {
- if (file_printf(ms, " %d.%d", desc / 100000,
- desc / 10000 % 10) == -1)
- return -1;
- if (desc / 1000 % 10 > 0)
- if (file_printf(ms, ".%d", desc / 1000 % 10) == -1)
- return -1;
- if ((desc % 1000 > 0) || (desc % 100000 == 0))
- if (file_printf(ms, " (%d)", desc) == -1)
- return -1;
- } else if (desc < 500000) {
- if (file_printf(ms, " %d.%d", desc / 100000,
- desc / 10000 % 10 + desc / 1000 % 10) == -1)
- return -1;
- if (desc / 100 % 10 > 0) {
- if (file_printf(ms, " (%d)", desc) == -1)
- return -1;
- } else if (desc / 10 % 10 > 0) {
- if (file_printf(ms, ".%d", desc / 10 % 10) == -1)
- return -1;
- }
- } else {
- if (file_printf(ms, " %d.%d", desc / 100000,
- desc / 1000 % 100) == -1)
- return -1;
- if ((desc / 100 % 10 > 0) ||
- (desc % 100000 / 100 == 0)) {
- if (file_printf(ms, " (%d)", desc) == -1)
- return -1;
- } else if (desc / 10 % 10 > 0) {
- if (file_printf(ms, ".%d", desc / 10 % 10) == -1)
- return -1;
- }
- }
- return 0;
-}
-
-file_private int
-/*ARGSUSED*/
-do_bid_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
- int swap __attribute__((__unused__)), uint32_t namesz, uint32_t descsz,
- size_t noff, size_t doff, int *flags)
-{
- if (namesz == 4 && strcmp(RCAST(char *, &nbuf[noff]), "GNU") == 0 &&
- type == NT_GNU_BUILD_ID && (descsz >= 4 && descsz <= 20)) {
- uint8_t desc[20];
- const char *btype;
- uint32_t i;
- *flags |= FLAGS_DID_BUILD_ID;
- switch (descsz) {
- case 8:
- btype = "xxHash";
- break;
- case 16:
- btype = "md5/uuid";
- break;
- case 20:
- btype = "sha1";
- break;
- default:
- btype = "unknown";
- break;
- }
- if (file_printf(ms, ", BuildID[%s]=", btype) == -1)
- return -1;
- memcpy(desc, &nbuf[doff], descsz);
- for (i = 0; i < descsz; i++)
- if (file_printf(ms, "%02x", desc[i]) == -1)
- return -1;
- return 1;
- }
- if (namesz == 4 && strcmp(RCAST(char *, &nbuf[noff]), "Go") == 0 &&
- type == NT_GO_BUILD_ID && descsz < 128) {
- char buf[256];
- if (file_printf(ms, ", Go BuildID=%s",
- file_copystr(buf, sizeof(buf), descsz,
- RCAST(const char *, &nbuf[doff]))) == -1)
- return -1;
- return 1;
- }
- return 0;
-}
-
-file_private int
-do_os_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
- int swap, uint32_t namesz, uint32_t descsz,
- size_t noff, size_t doff, int *flags)
-{
- const char *name = RCAST(const char *, &nbuf[noff]);
-
- if (namesz == 5 && strcmp(name, "SuSE") == 0 &&
- type == NT_GNU_VERSION && descsz == 2) {
- *flags |= FLAGS_DID_OS_NOTE;
- if (file_printf(ms, ", for SuSE %d.%d", nbuf[doff],
- nbuf[doff + 1]) == -1)
- return -1;
- return 1;
- }
-
- if (namesz == 4 && strcmp(name, "GNU") == 0 &&
- type == NT_GNU_VERSION && descsz == 16) {
- uint32_t desc[4];
- memcpy(desc, &nbuf[doff], sizeof(desc));
-
- *flags |= FLAGS_DID_OS_NOTE;
- if (file_printf(ms, ", for GNU/") == -1)
- return -1;
- switch (elf_getu32(swap, desc[0])) {
- case GNU_OS_LINUX:
- if (file_printf(ms, "Linux") == -1)
- return -1;
- break;
- case GNU_OS_HURD:
- if (file_printf(ms, "Hurd") == -1)
- return -1;
- break;
- case GNU_OS_SOLARIS:
- if (file_printf(ms, "Solaris") == -1)
- return -1;
- break;
- case GNU_OS_KFREEBSD:
- if (file_printf(ms, "kFreeBSD") == -1)
- return -1;
- break;
- case GNU_OS_KNETBSD:
- if (file_printf(ms, "kNetBSD") == -1)
- return -1;
- break;
- default:
- if (file_printf(ms, "<unknown>") == -1)
- return -1;
- }
- if (file_printf(ms, " %d.%d.%d", elf_getu32(swap, desc[1]),
- elf_getu32(swap, desc[2]), elf_getu32(swap, desc[3])) == -1)
- return -1;
- return 1;
- }
-
- if (namesz == 7 && strcmp(name, "NetBSD") == 0) {
- if (type == NT_NETBSD_VERSION && descsz == 4) {
- *flags |= FLAGS_DID_OS_NOTE;
- if (do_note_netbsd_version(ms, swap, &nbuf[doff]) == -1)
- return -1;
- return 1;
- }
- }
-
- if (namesz == 8 && strcmp(name, "FreeBSD") == 0) {
- if (type == NT_FREEBSD_VERSION && descsz == 4) {
- *flags |= FLAGS_DID_OS_NOTE;
- if (do_note_freebsd_version(ms, swap, &nbuf[doff])
- == -1)
- return -1;
- return 1;
- }
- }
-
- if (namesz == 8 && strcmp(name, "OpenBSD") == 0 &&
- type == NT_OPENBSD_VERSION && descsz == 4) {
- *flags |= FLAGS_DID_OS_NOTE;
- if (file_printf(ms, ", for OpenBSD") == -1)
- return -1;
- /* Content of note is always 0 */
- return 1;
- }
-
- if (namesz == 10 && strcmp(name, "DragonFly") == 0 &&
- type == NT_DRAGONFLY_VERSION && descsz == 4) {
- uint32_t desc;
- *flags |= FLAGS_DID_OS_NOTE;
- if (file_printf(ms, ", for DragonFly") == -1)
- return -1;
- memcpy(&desc, &nbuf[doff], sizeof(desc));
- desc = elf_getu32(swap, desc);
- if (file_printf(ms, " %d.%d.%d", desc / 100000,
- desc / 10000 % 10, desc % 10000) == -1)
- return -1;
- return 1;
- }
- return 0;
-}
-
-file_private int
-do_pax_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
- int swap, uint32_t namesz, uint32_t descsz,
- size_t noff, size_t doff, int *flags)
-{
- const char *name = RCAST(const char *, &nbuf[noff]);
-
- if (namesz == 4 && strcmp(name, "PaX") == 0 &&
- type == NT_NETBSD_PAX && descsz == 4) {
- static const char *pax[] = {
- "+mprotect",
- "-mprotect",
- "+segvguard",
- "-segvguard",
- "+ASLR",
- "-ASLR",
- };
- uint32_t desc;
- size_t i;
- int did = 0;
-
- *flags |= FLAGS_DID_NETBSD_PAX;
- memcpy(&desc, &nbuf[doff], sizeof(desc));
- desc = elf_getu32(swap, desc);
-
- if (desc && file_printf(ms, ", PaX: ") == -1)
- return -1;
-
- for (i = 0; i < __arraycount(pax); i++) {
- if (((1 << CAST(int, i)) & desc) == 0)
- continue;
- if (file_printf(ms, "%s%s", did++ ? "," : "",
- pax[i]) == -1)
- return -1;
- }
- return 1;
- }
- return 0;
-}
-
-file_private int
-do_core_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
- int swap, uint32_t namesz, uint32_t descsz,
- size_t noff, size_t doff, int *flags, size_t size, int clazz)
-{
-#ifdef ELFCORE
- char buf[256];
- const char *name = RCAST(const char *, &nbuf[noff]);
-
- int os_style = -1;
- /*
- * Sigh. The 2.0.36 kernel in Debian 2.1, at
- * least, doesn't correctly implement name
- * sections, in core dumps, as specified by
- * the "Program Linking" section of "UNIX(R) System
- * V Release 4 Programmer's Guide: ANSI C and
- * Programming Support Tools", because my copy
- * clearly says "The first 'namesz' bytes in 'name'
- * contain a *null-terminated* [emphasis mine]
- * character representation of the entry's owner
- * or originator", but the 2.0.36 kernel code
- * doesn't include the terminating null in the
- * name....
- */
- if ((namesz == 4 && strncmp(name, "CORE", 4) == 0) ||
- (namesz == 5 && strcmp(name, "CORE") == 0)) {
- os_style = OS_STYLE_SVR4;
- }
-
- if ((namesz == 8 && strcmp(name, "FreeBSD") == 0)) {
- os_style = OS_STYLE_FREEBSD;
- }
-
- if ((namesz >= 11 && strncmp(name, "NetBSD-CORE", 11)
- == 0)) {
- os_style = OS_STYLE_NETBSD;
- }
-
- if (os_style != -1 && (*flags & FLAGS_DID_CORE_STYLE) == 0) {
- if (file_printf(ms, ", %s-style", os_style_names[os_style])
- == -1)
- return -1;
- *flags |= FLAGS_DID_CORE_STYLE;
- *flags |= os_style;
- }
-
- switch (os_style) {
- case OS_STYLE_NETBSD:
- if (type == NT_NETBSD_CORE_PROCINFO) {
- char sbuf[512];
- struct NetBSD_elfcore_procinfo pi;
- memset(&pi, 0, sizeof(pi));
- memcpy(&pi, nbuf + doff, MIN(descsz, sizeof(pi)));
-
- if (file_printf(ms, ", from '%.31s', pid=%u, uid=%u, "
- "gid=%u, nlwps=%u, lwp=%u (signal %u/code %u)",
- file_printable(ms, sbuf, sizeof(sbuf),
- RCAST(char *, pi.cpi_name), sizeof(pi.cpi_name)),
- elf_getu32(swap, CAST(uint32_t, pi.cpi_pid)),
- elf_getu32(swap, pi.cpi_euid),
- elf_getu32(swap, pi.cpi_egid),
- elf_getu32(swap, pi.cpi_nlwps),
- elf_getu32(swap, CAST(uint32_t, pi.cpi_siglwp)),
- elf_getu32(swap, pi.cpi_signo),
- elf_getu32(swap, pi.cpi_sigcode)) == -1)
- return -1;
-
- *flags |= FLAGS_DID_CORE;
- return 1;
- }
- break;
-
- case OS_STYLE_FREEBSD:
- if (type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) {
- size_t argoff, pidoff;
-
- if (clazz == ELFCLASS32)
- argoff = 4 + 4 + 17;
- else
- argoff = 4 + 4 + 8 + 17;
- if (file_printf(ms, ", from '%.80s'", nbuf + doff +
- argoff) == -1)
- return -1;
- pidoff = argoff + 81 + 2;
- if (doff + pidoff + 4 <= size) {
- if (file_printf(ms, ", pid=%u",
- elf_getu32(swap, *RCAST(uint32_t *, (nbuf +
- doff + pidoff)))) == -1)
- return -1;
- }
- *flags |= FLAGS_DID_CORE;
- }
- break;
-
- default:
- if (type == NT_PRPSINFO && *flags & FLAGS_IS_CORE) {
- size_t i, j;
- unsigned char c;
- /*
- * Extract the program name. We assume
- * it to be 16 characters (that's what it
- * is in SunOS 5.x and Linux).
- *
- * Unfortunately, it's at a different offset
- * in various OSes, so try multiple offsets.
- * If the characters aren't all printable,
- * reject it.
- */
- for (i = 0; i < NOFFSETS; i++) {
- unsigned char *cname, *cp;
- size_t reloffset = prpsoffsets(i);
- size_t noffset = doff + reloffset;
- size_t k;
- for (j = 0; j < 16; j++, noffset++,
- reloffset++) {
- /*
- * Make sure we're not past
- * the end of the buffer; if
- * we are, just give up.
- */
- if (noffset >= size)
- goto tryanother;
-
- /*
- * Make sure we're not past
- * the end of the contents;
- * if we are, this obviously
- * isn't the right offset.
- */
- if (reloffset >= descsz)
- goto tryanother;
-
- c = nbuf[noffset];
- if (c == '\0') {
- /*
- * A '\0' at the
- * beginning is
- * obviously wrong.
- * Any other '\0'
- * means we're done.
- */
- if (j == 0)
- goto tryanother;
- else
- break;
- } else {
- /*
- * A nonprintable
- * character is also
- * wrong.
- */
- if (!isprint(c) || isquote(c))
- goto tryanother;
- }
- }
- /*
- * Well, that worked.
- */
-
- /*
- * Try next offsets, in case this match is
- * in the middle of a string.
- */
- for (k = i + 1 ; k < NOFFSETS; k++) {
- size_t no;
- int adjust = 1;
- if (prpsoffsets(k) >= prpsoffsets(i))
- continue;
- /*
- * pr_fname == pr_psargs - 16 &&
- * non-nul-terminated fname (qemu)
- */
- if (prpsoffsets(k) ==
- prpsoffsets(i) - 16 && j == 16)
- continue;
- for (no = doff + prpsoffsets(k);
- no < doff + prpsoffsets(i); no++)
- adjust = adjust
- && isprint(nbuf[no]);
- if (adjust)
- i = k;
- }
-
- cname = CAST(unsigned char *,
- &nbuf[doff + prpsoffsets(i)]);
- for (cp = cname; cp < nbuf + size && *cp
- && isprint(*cp); cp++)
- continue;
- /*
- * Linux apparently appends a space at the end
- * of the command line: remove it.
- */
- while (cp > cname && isspace(cp[-1]))
- cp--;
- if (file_printf(ms, ", from '%s'",
- file_copystr(buf, sizeof(buf),
- CAST(size_t, cp - cname),
- RCAST(char *, cname))) == -1)
- return -1;
- *flags |= FLAGS_DID_CORE;
- return 1;
-
- tryanother:
- ;
- }
- }
- break;
- }
-#endif
- return 0;
-}
-
-file_private off_t
-get_offset_from_virtaddr(struct magic_set *ms, int swap, int clazz, int fd,
- off_t off, int num, off_t fsize, uint64_t virtaddr)
-{
- Elf32_Phdr ph32;
- Elf64_Phdr ph64;
-
- /*
- * Loop through all the program headers and find the header with
- * virtual address in which the "virtaddr" belongs to.
- */
- for ( ; num; num--) {
- if (pread(fd, xph_addr, xph_sizeof, off) <
- CAST(ssize_t, xph_sizeof)) {
- if (file_printf(ms,
- ", can't read elf program header at %jd",
- (intmax_t)off) == -1)
- return -1;
- return 0;
-
- }
- off += xph_sizeof;
-
- if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
- /* Perhaps warn here */
- continue;
- }
-
- if (virtaddr >= xph_vaddr && virtaddr < xph_vaddr + xph_filesz)
- return xph_offset + (virtaddr - xph_vaddr);
- }
- return 0;
-}
-
-file_private size_t
-get_string_on_virtaddr(struct magic_set *ms,
- int swap, int clazz, int fd, off_t ph_off, int ph_num,
- off_t fsize, uint64_t virtaddr, char *buf, ssize_t buflen)
-{
- char *bptr;
- off_t offset;
-
- if (buflen == 0)
- return 0;
-
- offset = get_offset_from_virtaddr(ms, swap, clazz, fd, ph_off, ph_num,
- fsize, virtaddr);
- if (offset < 0 ||
- (buflen = pread(fd, buf, CAST(size_t, buflen), offset)) <= 0) {
- (void)file_printf(ms, ", can't read elf string at %jd",
- (intmax_t)offset);
- return 0;
- }
-
- buf[buflen - 1] = '\0';
-
- /* We expect only printable characters, so return if buffer contains
- * non-printable character before the '\0' or just '\0'. */
- for (bptr = buf; *bptr && isprint(CAST(unsigned char, *bptr)); bptr++)
- continue;
- if (*bptr != '\0')
- return 0;
-
- return bptr - buf;
-}
-
-
-/*ARGSUSED*/
-file_private int
-do_auxv_note(struct magic_set *ms, unsigned char *nbuf, uint32_t type,
- int swap, uint32_t namesz __attribute__((__unused__)),
- uint32_t descsz __attribute__((__unused__)),
- size_t noff __attribute__((__unused__)), size_t doff,
- int *flags, size_t size __attribute__((__unused__)), int clazz,
- int fd, off_t ph_off, int ph_num, off_t fsize)
-{
-#ifdef ELFCORE
- Aux32Info auxv32;
- Aux64Info auxv64;
- size_t elsize = xauxv_sizeof;
- const char *tag;
- int is_string;
- size_t nval, off;
-
- if ((*flags & (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE)) !=
- (FLAGS_IS_CORE|FLAGS_DID_CORE_STYLE))
- return 0;
-
- switch (*flags & FLAGS_CORE_STYLE) {
- case OS_STYLE_SVR4:
- if (type != NT_AUXV)
- return 0;
- break;
-#ifdef notyet
- case OS_STYLE_NETBSD:
- if (type != NT_NETBSD_CORE_AUXV)
- return 0;
- break;
- case OS_STYLE_FREEBSD:
- if (type != NT_FREEBSD_PROCSTAT_AUXV)
- return 0;
- break;
-#endif
- default:
- return 0;
- }
-
- *flags |= FLAGS_DID_AUXV;
-
- nval = 0;
- for (off = 0; off + elsize <= descsz; off += elsize) {
- memcpy(xauxv_addr, &nbuf[doff + off], xauxv_sizeof);
- /* Limit processing to 50 vector entries to prevent DoS */
- if (nval++ >= 50) {
- file_error(ms, 0, "Too many ELF Auxv elements");
- return 1;
- }
-
- switch(xauxv_type) {
- case AT_LINUX_EXECFN:
- is_string = 1;
- tag = "execfn";
- break;
- case AT_LINUX_PLATFORM:
- is_string = 1;
- tag = "platform";
- break;
- case AT_LINUX_UID:
- is_string = 0;
- tag = "real uid";
- break;
- case AT_LINUX_GID:
- is_string = 0;
- tag = "real gid";
- break;
- case AT_LINUX_EUID:
- is_string = 0;
- tag = "effective uid";
- break;
- case AT_LINUX_EGID:
- is_string = 0;
- tag = "effective gid";
- break;
- default:
- is_string = 0;
- tag = NULL;
- break;
- }
-
- if (tag == NULL)
- continue;
-
- if (is_string) {
- char buf[256];
- ssize_t buflen;
- buflen = get_string_on_virtaddr(ms, swap, clazz, fd,
- ph_off, ph_num, fsize, xauxv_val, buf, sizeof(buf));
-
- if (buflen == 0)
- continue;
-
- if (file_printf(ms, ", %s: '%s'", tag, buf) == -1)
- return -1;
- } else {
- if (file_printf(ms, ", %s: %d", tag,
- CAST(int, xauxv_val)) == -1)
- return -1;
- }
- }
- return 1;
-#else
- return 0;
-#endif
-}
-
-file_private size_t
-dodynamic(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
- int clazz, int swap, int *pie, size_t *need)
-{
- Elf32_Dyn dh32;
- Elf64_Dyn dh64;
- unsigned char *dbuf = CAST(unsigned char *, vbuf);
-
- if (xdh_sizeof + offset > size) {
- /*
- * We're out of note headers.
- */
- return xdh_sizeof + offset;
- }
-
- memcpy(xdh_addr, &dbuf[offset], xdh_sizeof);
- offset += xdh_sizeof;
-
- switch (xdh_tag) {
- case DT_FLAGS_1:
- *pie = 1;
- if (xdh_val & DF_1_PIE)
- ms->mode |= 0111;
- else
- ms->mode &= ~0111;
- break;
- case DT_NEEDED:
- (*need)++;
- break;
- default:
- break;
- }
- return offset;
-}
-
-
-file_private size_t
-donote(struct magic_set *ms, void *vbuf, size_t offset, size_t size,
- int clazz, int swap, size_t align, int *flags, uint16_t *notecount,
- int fd, off_t ph_off, int ph_num, off_t fsize)
-{
- Elf32_Nhdr nh32;
- Elf64_Nhdr nh64;
- size_t noff, doff;
- uint32_t namesz, descsz;
- char buf[256];
- unsigned char *nbuf = CAST(unsigned char *, vbuf);
-
- if (*notecount == 0)
- return 0;
- --*notecount;
-
- if (xnh_sizeof + offset > size) {
- /*
- * We're out of note headers.
- */
- return xnh_sizeof + offset;
- }
- /*XXX: GCC */
- memset(&nh32, 0, sizeof(nh32));
- memset(&nh64, 0, sizeof(nh64));
-
- memcpy(xnh_addr, &nbuf[offset], xnh_sizeof);
- offset += xnh_sizeof;
-
- namesz = xnh_namesz;
- descsz = xnh_descsz;
-
- if ((namesz == 0) && (descsz == 0)) {
- /*
- * We're out of note headers.
- */
- return (offset >= size) ? offset : size;
- }
-
- if (namesz & 0x80000000) {
- (void)file_printf(ms, ", bad note name size %#lx",
- CAST(unsigned long, namesz));
- return 0;
- }
-
- if (descsz & 0x80000000) {
- (void)file_printf(ms, ", bad note description size %#lx",
- CAST(unsigned long, descsz));
- return 0;
- }
-
- noff = offset;
- doff = ELF_ALIGN(offset + namesz);
-
- if (offset + namesz > size) {
- /*
- * We're past the end of the buffer.
- */
- return doff;
- }
-
- offset = ELF_ALIGN(doff + descsz);
- if (doff + descsz > size) {
- /*
- * We're past the end of the buffer.
- */
- return (offset >= size) ? offset : size;
- }
-
-
- if ((*flags & FLAGS_DID_OS_NOTE) == 0) {
- if (do_os_note(ms, nbuf, xnh_type, swap,
- namesz, descsz, noff, doff, flags))
- return offset;
- }
-
- if ((*flags & FLAGS_DID_BUILD_ID) == 0) {
- if (do_bid_note(ms, nbuf, xnh_type, swap,
- namesz, descsz, noff, doff, flags))
- return offset;
- }
-
- if ((*flags & FLAGS_DID_NETBSD_PAX) == 0) {
- if (do_pax_note(ms, nbuf, xnh_type, swap,
- namesz, descsz, noff, doff, flags))
- return offset;
- }
-
- if ((*flags & FLAGS_DID_CORE) == 0) {
- if (do_core_note(ms, nbuf, xnh_type, swap,
- namesz, descsz, noff, doff, flags, size, clazz))
- return offset;
- }
-
- if ((*flags & FLAGS_DID_AUXV) == 0) {
- if (do_auxv_note(ms, nbuf, xnh_type, swap,
- namesz, descsz, noff, doff, flags, size, clazz,
- fd, ph_off, ph_num, fsize))
- return offset;
- }
-
- if (namesz == 7 && strcmp(RCAST(char *, &nbuf[noff]), "NetBSD") == 0) {
- int descw, flag;
- const char *str, *tag;
- if (descsz > 100)
- descsz = 100;
- switch (xnh_type) {
- case NT_NETBSD_VERSION:
- return offset;
- case NT_NETBSD_MARCH:
- flag = FLAGS_DID_NETBSD_MARCH;
- tag = "compiled for";
- break;
- case NT_NETBSD_CMODEL:
- flag = FLAGS_DID_NETBSD_CMODEL;
- tag = "compiler model";
- break;
- case NT_NETBSD_EMULATION:
- flag = FLAGS_DID_NETBSD_EMULATION;
- tag = "emulation:";
- break;
- default:
- if (*flags & FLAGS_DID_NETBSD_UNKNOWN)
- return offset;
- *flags |= FLAGS_DID_NETBSD_UNKNOWN;
- if (file_printf(ms, ", note=%u", xnh_type) == -1)
- return offset;
- return offset;
- }
-
- if (*flags & flag)
- return offset;
- str = RCAST(const char *, &nbuf[doff]);
- descw = CAST(int, descsz);
- *flags |= flag;
- file_printf(ms, ", %s: %s", tag,
- file_copystr(buf, sizeof(buf), descw, str));
- return offset;
- }
-
- return offset;
-}
-
-/* SunOS 5.x hardware capability descriptions */
-typedef struct cap_desc {
- uint64_t cd_mask;
- const char *cd_name;
-} cap_desc_t;
-
-static const cap_desc_t cap_desc_sparc[] = {
- { AV_SPARC_MUL32, "MUL32" },
- { AV_SPARC_DIV32, "DIV32" },
- { AV_SPARC_FSMULD, "FSMULD" },
- { AV_SPARC_V8PLUS, "V8PLUS" },
- { AV_SPARC_POPC, "POPC" },
- { AV_SPARC_VIS, "VIS" },
- { AV_SPARC_VIS2, "VIS2" },
- { AV_SPARC_ASI_BLK_INIT, "ASI_BLK_INIT" },
- { AV_SPARC_FMAF, "FMAF" },
- { AV_SPARC_FJFMAU, "FJFMAU" },
- { AV_SPARC_IMA, "IMA" },
- { 0, NULL }
-};
-
-static const cap_desc_t cap_desc_386[] = {
- { AV_386_FPU, "FPU" },
- { AV_386_TSC, "TSC" },
- { AV_386_CX8, "CX8" },
- { AV_386_SEP, "SEP" },
- { AV_386_AMD_SYSC, "AMD_SYSC" },
- { AV_386_CMOV, "CMOV" },
- { AV_386_MMX, "MMX" },
- { AV_386_AMD_MMX, "AMD_MMX" },
- { AV_386_AMD_3DNow, "AMD_3DNow" },
- { AV_386_AMD_3DNowx, "AMD_3DNowx" },
- { AV_386_FXSR, "FXSR" },
- { AV_386_SSE, "SSE" },
- { AV_386_SSE2, "SSE2" },
- { AV_386_PAUSE, "PAUSE" },
- { AV_386_SSE3, "SSE3" },
- { AV_386_MON, "MON" },
- { AV_386_CX16, "CX16" },
- { AV_386_AHF, "AHF" },
- { AV_386_TSCP, "TSCP" },
- { AV_386_AMD_SSE4A, "AMD_SSE4A" },
- { AV_386_POPCNT, "POPCNT" },
- { AV_386_AMD_LZCNT, "AMD_LZCNT" },
- { AV_386_SSSE3, "SSSE3" },
- { AV_386_SSE4_1, "SSE4.1" },
- { AV_386_SSE4_2, "SSE4.2" },
- { 0, NULL }
-};
-
-file_private int
-doshn(struct magic_set *ms, int clazz, int swap, int fd, off_t off, int num,
- size_t size, off_t fsize, int mach, int strtab, int *flags,
- uint16_t *notecount)
-{
- Elf32_Shdr sh32;
- Elf64_Shdr sh64;
- int stripped = 1, has_debug_info = 0;
- size_t nbadcap = 0;
- void *nbuf;
- off_t noff, coff, name_off, offs;
- uint64_t cap_hw1 = 0; /* SunOS 5.x hardware capabilities */
- uint64_t cap_sf1 = 0; /* SunOS 5.x software capabilities */
- char name[50];
- ssize_t namesize;
-
- if (ms->flags & MAGIC_MIME)
- return 0;
-
- if (num == 0) {
- if (file_printf(ms, ", no section header") == -1)
- return -1;
- return 0;
- }
- if (size != xsh_sizeof) {
- if (file_printf(ms, ", corrupted section header size") == -1)
- return -1;
- return 0;
- }
-
- /* Read offset of name section to be able to read section names later */
- offs = CAST(off_t, (off + size * strtab));
- if (pread(fd, xsh_addr, xsh_sizeof, offs) < CAST(ssize_t, xsh_sizeof)) {
- if (file_printf(ms, ", missing section headers at %jd",
- (intmax_t)offs) == -1)
- return -1;
- return 0;
- }
- name_off = xsh_offset;
-
- if (fsize != SIZE_UNKNOWN && fsize < name_off) {
- if (file_printf(ms, ", too large section header offset %jd",
- (intmax_t)name_off) == -1)
- return -1;
- return 0;
- }
-
- for ( ; num; num--) {
- /* Read the name of this section. */
- offs = name_off + xsh_name;
- if ((namesize = pread(fd, name, sizeof(name) - 1, offs))
- == -1) {
- if (file_printf(ms,
- ", can't read name of elf section at %jd",
- (intmax_t)offs) == -1)
- return -1;
- return 0;
- }
- name[namesize] = '\0';
- if (strcmp(name, ".debug_info") == 0) {
- has_debug_info = 1;
- stripped = 0;
- }
-
- if (pread(fd, xsh_addr, xsh_sizeof, off) <
- CAST(ssize_t, xsh_sizeof)) {
- if (file_printf(ms, ", can't read elf section at %jd",
- (intmax_t)off) == -1)
- return -1;
- return 0;
- }
- off += size;
-
- /* Things we can determine before we seek */
- switch (xsh_type) {
- case SHT_SYMTAB:
-#if 0
- case SHT_DYNSYM:
-#endif
- stripped = 0;
- break;
- default:
- if (fsize != SIZE_UNKNOWN && xsh_offset > fsize) {
- /* Perhaps warn here */
- continue;
- }
- break;
- }
-
-
- /* Things we can determine when we seek */
- switch (xsh_type) {
- case SHT_NOTE:
- if (CAST(uintmax_t, (xsh_size + xsh_offset)) >
- CAST(uintmax_t, fsize)) {
- if (file_printf(ms,
- ", note offset/size %#" INTMAX_T_FORMAT
- "x+%#" INTMAX_T_FORMAT "x exceeds"
- " file size %#" INTMAX_T_FORMAT "x",
- CAST(uintmax_t, xsh_offset),
- CAST(uintmax_t, xsh_size),
- CAST(uintmax_t, fsize)) == -1)
- return -1;
- return 0;
- }
- if (xsh_size > ms->elf_shsize_max) {
- file_error(ms, errno, "Note section size too "
- "big (%ju > %zu)", (uintmax_t)xsh_size,
- ms->elf_shsize_max);
- return -1;
- }
- if ((nbuf = malloc(xsh_size)) == NULL) {
- file_error(ms, errno, "Cannot allocate memory"
- " for note");
- return -1;
- }
- offs = xsh_offset;
- if (pread(fd, nbuf, xsh_size, offs) <
- CAST(ssize_t, xsh_size)) {
- free(nbuf);
- if (file_printf(ms,
- ", can't read elf note at %jd",
- (intmax_t)offs) == -1)
- return -1;
- return 0;
- }
-
- noff = 0;
- for (;;) {
- if (noff >= CAST(off_t, xsh_size))
- break;
- noff = donote(ms, nbuf, CAST(size_t, noff),
- xsh_size, clazz, swap, 4, flags, notecount,
- fd, 0, 0, 0);
- if (noff == 0)
- break;
- }
- free(nbuf);
- break;
- case SHT_SUNW_cap:
- switch (mach) {
- case EM_SPARC:
- case EM_SPARCV9:
- case EM_IA_64:
- case EM_386:
- case EM_AMD64:
- break;
- default:
- goto skip;
- }
-
- if (nbadcap > 5)
- break;
- if (lseek(fd, xsh_offset, SEEK_SET)
- == CAST(off_t, -1)) {
- file_badseek(ms);
- return -1;
- }
- coff = 0;
- for (;;) {
- Elf32_Cap cap32;
- Elf64_Cap cap64;
- char cbuf[/*CONSTCOND*/
- MAX(sizeof(cap32), sizeof(cap64))];
- if ((coff += xcap_sizeof) >
- CAST(off_t, xsh_size))
- break;
- if (read(fd, cbuf, CAST(size_t, xcap_sizeof)) !=
- CAST(ssize_t, xcap_sizeof)) {
- file_badread(ms);
- return -1;
- }
- if (cbuf[0] == 'A') {
-#ifdef notyet
- char *p = cbuf + 1;
- uint32_t len, tag;
- memcpy(&len, p, sizeof(len));
- p += 4;
- len = getu32(swap, len);
- if (memcmp("gnu", p, 3) != 0) {
- if (file_printf(ms,
- ", unknown capability %.3s", p)
- == -1)
- return -1;
- break;
- }
- p += strlen(p) + 1;
- tag = *p++;
- memcpy(&len, p, sizeof(len));
- p += 4;
- len = getu32(swap, len);
- if (tag != 1) {
- if (file_printf(ms, ", unknown gnu"
- " capability tag %d", tag)
- == -1)
- return -1;
- break;
- }
- // gnu attributes
-#endif
- break;
- }
- memcpy(xcap_addr, cbuf, xcap_sizeof);
- switch (xcap_tag) {
- case CA_SUNW_NULL:
- break;
- case CA_SUNW_HW_1:
- cap_hw1 |= xcap_val;
- break;
- case CA_SUNW_SF_1:
- cap_sf1 |= xcap_val;
- break;
- default:
- if (file_printf(ms,
- ", with unknown capability "
- "%#" INT64_T_FORMAT "x = %#"
- INT64_T_FORMAT "x",
- CAST(unsigned long long, xcap_tag),
- CAST(unsigned long long, xcap_val))
- == -1)
- return -1;
- if (nbadcap++ > 2)
- coff = xsh_size;
- break;
- }
- }
- /*FALLTHROUGH*/
- skip:
- default:
- break;
- }
- }
-
- if (has_debug_info) {
- if (file_printf(ms, ", with debug_info") == -1)
- return -1;
- }
- if (file_printf(ms, ", %sstripped", stripped ? "" : "not ") == -1)
- return -1;
- if (cap_hw1) {
- const cap_desc_t *cdp;
- switch (mach) {
- case EM_SPARC:
- case EM_SPARC32PLUS:
- case EM_SPARCV9:
- cdp = cap_desc_sparc;
- break;
- case EM_386:
- case EM_IA_64:
- case EM_AMD64:
- cdp = cap_desc_386;
- break;
- default:
- cdp = NULL;
- break;
- }
- if (file_printf(ms, ", uses") == -1)
- return -1;
- if (cdp) {
- while (cdp->cd_name) {
- if (cap_hw1 & cdp->cd_mask) {
- if (file_printf(ms,
- " %s", cdp->cd_name) == -1)
- return -1;
- cap_hw1 &= ~cdp->cd_mask;
- }
- ++cdp;
- }
- if (cap_hw1)
- if (file_printf(ms,
- " unknown hardware capability %#"
- INT64_T_FORMAT "x",
- CAST(unsigned long long, cap_hw1)) == -1)
- return -1;
- } else {
- if (file_printf(ms,
- " hardware capability %#" INT64_T_FORMAT "x",
- CAST(unsigned long long, cap_hw1)) == -1)
- return -1;
- }
- }
- if (cap_sf1) {
- if (cap_sf1 & SF1_SUNW_FPUSED) {
- if (file_printf(ms,
- (cap_sf1 & SF1_SUNW_FPKNWN)
- ? ", uses frame pointer"
- : ", not known to use frame pointer") == -1)
- return -1;
- }
- cap_sf1 &= ~SF1_SUNW_MASK;
- if (cap_sf1)
- if (file_printf(ms,
- ", with unknown software capability %#"
- INT64_T_FORMAT "x",
- CAST(unsigned long long, cap_sf1)) == -1)
- return -1;
- }
- return 0;
-}
-
-/*
- * Look through the program headers of an executable image, to determine
- * if it is statically or dynamically linked. If it has a dynamic section,
- * it is pie, and does not have an interpreter or needed libraries, we
- * call it static pie.
- */
-file_private int
-dophn_exec(struct magic_set *ms, int clazz, int swap, int fd, off_t off,
- int num, size_t size, off_t fsize, int sh_num, int *flags,
- uint16_t *notecount)
-{
- Elf32_Phdr ph32;
- Elf64_Phdr ph64;
- const char *linking_style;
- unsigned char nbuf[BUFSIZ];
- char ibuf[BUFSIZ];
- char interp[BUFSIZ];
- ssize_t bufsize;
- size_t offset, align, need = 0;
- int pie = 0, dynamic = 0;
-
- if (num == 0) {
- if (file_printf(ms, ", no program header") == -1)
- return -1;
- return 0;
- }
- if (size != xph_sizeof) {
- if (file_printf(ms, ", corrupted program header size") == -1)
- return -1;
- return 0;
- }
-
- interp[0] = '\0';
- for ( ; num; num--) {
- int doread;
- if (pread(fd, xph_addr, xph_sizeof, off) <
- CAST(ssize_t, xph_sizeof)) {
- if (file_printf(ms,
- ", can't read elf program headers at %jd",
- (intmax_t)off) == -1)
- return -1;
- return 0;
- }
-
- off += size;
- bufsize = 0;
- align = 4;
-
- /* Things we can determine before we seek */
- switch (xph_type) {
- case PT_DYNAMIC:
- doread = 1;
- break;
- case PT_NOTE:
- if (sh_num) /* Did this through section headers */
- continue;
- if (((align = xph_align) & 0x80000000UL) != 0 ||
- align < 4) {
- if (file_printf(ms,
- ", invalid note alignment %#lx",
- CAST(unsigned long, align)) == -1)
- return -1;
- align = 4;
- }
- /*FALLTHROUGH*/
- case PT_INTERP:
- doread = 1;
- break;
- default:
- doread = 0;
- if (fsize != SIZE_UNKNOWN && xph_offset > fsize) {
- /* Maybe warn here? */
- continue;
- }
- break;
- }
-
- if (doread) {
- size_t len = xph_filesz < sizeof(nbuf) ? xph_filesz
- : sizeof(nbuf);
- off_t offs = xph_offset;
- bufsize = pread(fd, nbuf, len, offs);
- if (bufsize == -1) {
- if (file_printf(ms,
- ", can't read section at %jd",
- (intmax_t)offs) == -1)
- return -1;
- return 0;
- }
- }
-
- /* Things we can determine when we seek */
- switch (xph_type) {
- case PT_DYNAMIC:
- dynamic = 1;
- offset = 0;
- // Let DF_1 determine if we are PIE or not.
- ms->mode &= ~0111;
- for (;;) {
- if (offset >= CAST(size_t, bufsize))
- break;
- offset = dodynamic(ms, nbuf, offset,
- CAST(size_t, bufsize), clazz, swap,
- &pie, &need);
- if (offset == 0)
- break;
- }
- if (ms->flags & MAGIC_MIME)
- continue;
- break;
-
- case PT_INTERP:
- need++;
- if (ms->flags & MAGIC_MIME)
- continue;
- if (bufsize && nbuf[0]) {
- nbuf[bufsize - 1] = '\0';
- memcpy(interp, nbuf, CAST(size_t, bufsize));
- } else
- strlcpy(interp, "*empty*", sizeof(interp));
- break;
- case PT_NOTE:
- if (ms->flags & MAGIC_MIME)
- return 0;
- /*
- * This is a PT_NOTE section; loop through all the notes
- * in the section.
- */
- offset = 0;
- for (;;) {
- if (offset >= CAST(size_t, bufsize))
- break;
- offset = donote(ms, nbuf, offset,
- CAST(size_t, bufsize), clazz, swap, align,
- flags, notecount, fd, 0, 0, 0);
- if (offset == 0)
- break;
- }
- break;
- default:
- if (ms->flags & MAGIC_MIME)
- continue;
- break;
- }
- }
- if (ms->flags & MAGIC_MIME)
- return 0;
- if (dynamic) {
- if (pie && need == 0)
- linking_style = "static-pie";
- else
- linking_style = "dynamically";
- } else {
- linking_style = "statically";
- }
- if (file_printf(ms, ", %s linked", linking_style) == -1)
- return -1;
- if (interp[0])
- if (file_printf(ms, ", interpreter %s", file_printable(ms,
- ibuf, sizeof(ibuf), interp, sizeof(interp))) == -1)
- return -1;
- return 0;
-}
-
-
-file_protected int
-file_tryelf(struct magic_set *ms, const struct buffer *b)
-{
- int fd = b->fd;
- const unsigned char *buf = CAST(const unsigned char *, b->fbuf);
- size_t nbytes = b->flen;
- union {
- int32_t l;
- char c[sizeof(int32_t)];
- } u;
- int clazz;
- int swap;
- struct stat st;
- const struct stat *stp;
- off_t fsize;
- int flags = 0;
- Elf32_Ehdr elf32hdr;
- Elf64_Ehdr elf64hdr;
- uint16_t type, phnum, shnum, notecount;
-
- if (ms->flags & (MAGIC_APPLE|MAGIC_EXTENSION))
- return 0;
- /*
- * ELF executables have multiple section headers in arbitrary
- * file locations and thus file(1) cannot determine it from easily.
- * Instead we traverse thru all section headers until a symbol table
- * one is found or else the binary is stripped.
- * Return immediately if it's not ELF (so we avoid pipe2file unless
- * needed).
- */
- if (buf[EI_MAG0] != ELFMAG0
- || (buf[EI_MAG1] != ELFMAG1 && buf[EI_MAG1] != OLFMAG1)
- || buf[EI_MAG2] != ELFMAG2 || buf[EI_MAG3] != ELFMAG3)
- return 0;
-
- /*
- * If we cannot seek, it must be a pipe, socket or fifo.
- */
- if((lseek(fd, CAST(off_t, 0), SEEK_SET) == CAST(off_t, -1))
- && (errno == ESPIPE))
- fd = file_pipe2file(ms, fd, buf, nbytes);
-
- if (fd == -1) {
- file_badread(ms);
- return -1;
- }
-
- stp = &b->st;
- /*
- * b->st.st_size != 0 if previous fstat() succeeded,
- * which is likely, we can avoid extra stat() call.
- */
- if (b->st.st_size == 0) {
- stp = &st;
- if (fstat(fd, &st) == -1) {
- file_badread(ms);
- return -1;
- }
- }
- if (S_ISREG(stp->st_mode) || stp->st_size != 0)
- fsize = stp->st_size;
- else
- fsize = SIZE_UNKNOWN;
-
- clazz = buf[EI_CLASS];
-
- switch (clazz) {
- case ELFCLASS32:
-#undef elf_getu
-#define elf_getu(a, b) elf_getu32(a, b)
-#undef elfhdr
-#define elfhdr elf32hdr
-#include "elfclass.h"
- case ELFCLASS64:
-#undef elf_getu
-#define elf_getu(a, b) elf_getu64(a, b)
-#undef elfhdr
-#define elfhdr elf64hdr
-#include "elfclass.h"
- default:
- if (file_printf(ms, ", unknown class %d", clazz) == -1)
- return -1;
- break;
- }
- return 0;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/readelf.h b/contrib/libs/libmagic/src/readelf.h
deleted file mode 100644
index 809d3f7573..0000000000
--- a/contrib/libs/libmagic/src/readelf.h
+++ /dev/null
@@ -1,545 +0,0 @@
-/*
- * Copyright (c) Christos Zoulas 2003.
- * All Rights Reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * @(#)Id: readelf.h,v 1.9 2002/05/16 18:45:56 christos Exp
- *
- * Provide elf data structures for non-elf machines, allowing file
- * non-elf hosts to determine if an elf binary is stripped.
- * Note: cobbled from the linux header file, with modifications
- */
-#ifndef __fake_elf_h__
-#define __fake_elf_h__
-
-#if HAVE_STDINT_H
-#include <stdint.h>
-#endif
-
-typedef uint32_t Elf32_Addr;
-typedef uint32_t Elf32_Off;
-typedef uint16_t Elf32_Half;
-typedef uint32_t Elf32_Word;
-typedef uint8_t Elf32_Char;
-
-typedef uint64_t Elf64_Addr;
-typedef uint64_t Elf64_Off;
-typedef uint64_t Elf64_Xword;
-typedef uint16_t Elf64_Half;
-typedef uint32_t Elf64_Word;
-typedef uint8_t Elf64_Char;
-
-#define EI_NIDENT 16
-
-typedef struct {
- Elf32_Word a_type; /* 32-bit id */
- Elf32_Word a_v; /* 32-bit id */
-} Aux32Info;
-
-typedef struct {
- Elf64_Xword a_type; /* 64-bit id */
- Elf64_Xword a_v; /* 64-bit id */
-} Aux64Info;
-
-#define AT_NULL 0 /* end of vector */
-#define AT_IGNORE 1 /* entry should be ignored */
-#define AT_EXECFD 2 /* file descriptor of program */
-#define AT_PHDR 3 /* program headers for program */
-#define AT_PHENT 4 /* size of program header entry */
-#define AT_PHNUM 5 /* number of program headers */
-#define AT_PAGESZ 6 /* system page size */
-#define AT_BASE 7 /* base address of interpreter */
-#define AT_FLAGS 8 /* flags */
-#define AT_ENTRY 9 /* entry point of program */
-#define AT_LINUX_NOTELF 10 /* program is not ELF */
-#define AT_LINUX_UID 11 /* real uid */
-#define AT_LINUX_EUID 12 /* effective uid */
-#define AT_LINUX_GID 13 /* real gid */
-#define AT_LINUX_EGID 14 /* effective gid */
-#define AT_LINUX_PLATFORM 15 /* string identifying CPU for optimizations */
-#define AT_LINUX_HWCAP 16 /* arch dependent hints at CPU capabilities */
-#define AT_LINUX_CLKTCK 17 /* frequency at which times() increments */
-/* AT_* values 18 through 22 are reserved */
-#define AT_LINUX_SECURE 23 /* secure mode boolean */
-#define AT_LINUX_BASE_PLATFORM 24 /* string identifying real platform, may
- * differ from AT_PLATFORM. */
-#define AT_LINUX_RANDOM 25 /* address of 16 random bytes */
-#define AT_LINUX_HWCAP2 26 /* extension of AT_HWCAP */
-#define AT_LINUX_EXECFN 31 /* filename of program */
-
-typedef struct {
- Elf32_Char e_ident[EI_NIDENT];
- Elf32_Half e_type;
- Elf32_Half e_machine;
- Elf32_Word e_version;
- Elf32_Addr e_entry; /* Entry point */
- Elf32_Off e_phoff;
- Elf32_Off e_shoff;
- Elf32_Word e_flags;
- Elf32_Half e_ehsize;
- Elf32_Half e_phentsize;
- Elf32_Half e_phnum;
- Elf32_Half e_shentsize;
- Elf32_Half e_shnum;
- Elf32_Half e_shstrndx;
-} Elf32_Ehdr;
-
-typedef struct {
- Elf64_Char e_ident[EI_NIDENT];
- Elf64_Half e_type;
- Elf64_Half e_machine;
- Elf64_Word e_version;
- Elf64_Addr e_entry; /* Entry point */
- Elf64_Off e_phoff;
- Elf64_Off e_shoff;
- Elf64_Word e_flags;
- Elf64_Half e_ehsize;
- Elf64_Half e_phentsize;
- Elf64_Half e_phnum;
- Elf64_Half e_shentsize;
- Elf64_Half e_shnum;
- Elf64_Half e_shstrndx;
-} Elf64_Ehdr;
-
-/* e_type */
-#define ET_REL 1
-#define ET_EXEC 2
-#define ET_DYN 3
-#define ET_CORE 4
-
-/* e_machine (used only for SunOS 5.x hardware capabilities) */
-#define EM_SPARC 2
-#define EM_386 3
-#define EM_SPARC32PLUS 18
-#define EM_SPARCV9 43
-#define EM_IA_64 50
-#define EM_AMD64 62
-
-/* sh_type */
-#define SHT_SYMTAB 2
-#define SHT_NOTE 7
-#define SHT_DYNSYM 11
-#define SHT_SUNW_cap 0x6ffffff5 /* SunOS 5.x hw/sw capabilities */
-
-/* elf type */
-#define ELFDATANONE 0 /* e_ident[EI_DATA] */
-#define ELFDATA2LSB 1
-#define ELFDATA2MSB 2
-
-/* elf class */
-#define ELFCLASSNONE 0
-#define ELFCLASS32 1
-#define ELFCLASS64 2
-
-/* magic number */
-#define EI_MAG0 0 /* e_ident[] indexes */
-#define EI_MAG1 1
-#define EI_MAG2 2
-#define EI_MAG3 3
-#define EI_CLASS 4
-#define EI_DATA 5
-#define EI_VERSION 6
-#define EI_PAD 7
-
-#define ELFMAG0 0x7f /* EI_MAG */
-#define ELFMAG1 'E'
-#define ELFMAG2 'L'
-#define ELFMAG3 'F'
-#define ELFMAG "\177ELF"
-
-#define OLFMAG1 'O'
-#define OLFMAG "\177OLF"
-
-typedef struct {
- Elf32_Word p_type;
- Elf32_Off p_offset;
- Elf32_Addr p_vaddr;
- Elf32_Addr p_paddr;
- Elf32_Word p_filesz;
- Elf32_Word p_memsz;
- Elf32_Word p_flags;
- Elf32_Word p_align;
-} Elf32_Phdr;
-
-typedef struct {
- Elf64_Word p_type;
- Elf64_Word p_flags;
- Elf64_Off p_offset;
- Elf64_Addr p_vaddr;
- Elf64_Addr p_paddr;
- Elf64_Xword p_filesz;
- Elf64_Xword p_memsz;
- Elf64_Xword p_align;
-} Elf64_Phdr;
-
-#define PT_NULL 0 /* p_type */
-#define PT_LOAD 1
-#define PT_DYNAMIC 2
-#define PT_INTERP 3
-#define PT_NOTE 4
-#define PT_SHLIB 5
-#define PT_PHDR 6
-#define PT_NUM 7
-
-typedef struct {
- Elf32_Word sh_name;
- Elf32_Word sh_type;
- Elf32_Word sh_flags;
- Elf32_Addr sh_addr;
- Elf32_Off sh_offset;
- Elf32_Word sh_size;
- Elf32_Word sh_link;
- Elf32_Word sh_info;
- Elf32_Word sh_addralign;
- Elf32_Word sh_entsize;
-} Elf32_Shdr;
-
-typedef struct {
- Elf64_Word sh_name;
- Elf64_Word sh_type;
- Elf64_Off sh_flags;
- Elf64_Addr sh_addr;
- Elf64_Off sh_offset;
- Elf64_Off sh_size;
- Elf64_Word sh_link;
- Elf64_Word sh_info;
- Elf64_Off sh_addralign;
- Elf64_Off sh_entsize;
-} Elf64_Shdr;
-
-#define NT_NETBSD_CORE_PROCINFO 1
-#define NT_NETBSD_CORE_AUXV 2
-
-struct NetBSD_elfcore_procinfo {
- /* Version 1 fields start here. */
- uint32_t cpi_version; /* our version */
- uint32_t cpi_cpisize; /* sizeof(this struct) */
- uint32_t cpi_signo; /* killing signal */
- uint32_t cpi_sigcode; /* signal code */
- uint32_t cpi_sigpend[4]; /* pending signals */
- uint32_t cpi_sigmask[4]; /* blocked signals */
- uint32_t cpi_sigignore[4]; /* ignored signals */
- uint32_t cpi_sigcatch[4]; /* caught signals */
- int32_t cpi_pid; /* process ID */
- int32_t cpi_ppid; /* parent process ID */
- int32_t cpi_pgrp; /* process group ID */
- int32_t cpi_sid; /* session ID */
- uint32_t cpi_ruid; /* real user ID */
- uint32_t cpi_euid; /* effective user ID */
- uint32_t cpi_svuid; /* saved user ID */
- uint32_t cpi_rgid; /* real group ID */
- uint32_t cpi_egid; /* effective group ID */
- uint32_t cpi_svgid; /* saved group ID */
- uint32_t cpi_nlwps; /* number of LWPs */
- int8_t cpi_name[32]; /* copy of p->p_comm */
- /* Add version 2 fields below here. */
- int32_t cpi_siglwp; /* LWP target of killing signal */
-};
-
-/* Note header in a PT_NOTE section */
-typedef struct elf_note {
- Elf32_Word n_namesz; /* Name size */
- Elf32_Word n_descsz; /* Content size */
- Elf32_Word n_type; /* Content type */
-} Elf32_Nhdr;
-
-typedef struct {
- Elf64_Word n_namesz;
- Elf64_Word n_descsz;
- Elf64_Word n_type;
-} Elf64_Nhdr;
-
-/* Notes used in ET_CORE */
-#define NT_PRSTATUS 1
-#define NT_PRFPREG 2
-#define NT_PRPSINFO 3
-#define NT_PRXREG 4
-#define NT_TASKSTRUCT 4
-#define NT_PLATFORM 5
-#define NT_AUXV 6
-
-/* Note types used in executables */
-/* NetBSD executables (name = "NetBSD") */
-#define NT_NETBSD_VERSION 1
-#define NT_NETBSD_EMULATION 2
-#define NT_FREEBSD_VERSION 1
-#define NT_OPENBSD_VERSION 1
-#define NT_DRAGONFLY_VERSION 1
-/*
- * GNU executables (name = "GNU")
- * word[0]: GNU OS tags
- * word[1]: major version
- * word[2]: minor version
- * word[3]: tiny version
- */
-#define NT_GNU_VERSION 1
-
-/* GNU OS tags */
-#define GNU_OS_LINUX 0
-#define GNU_OS_HURD 1
-#define GNU_OS_SOLARIS 2
-#define GNU_OS_KFREEBSD 3
-#define GNU_OS_KNETBSD 4
-
-/*
- * GNU Hardware capability information
- * word[0]: Number of entries
- * word[1]: Bitmask of enabled entries
- * Followed by a byte id, and a NUL terminated string per entry
- */
-#define NT_GNU_HWCAP 2
-
-/*
- * GNU Build ID generated by ld
- * 160 bit SHA1 [default]
- * 128 bit md5 or uuid
- */
-#define NT_GNU_BUILD_ID 3
-
-/*
- * NetBSD-specific note type: PaX.
- * There should be 1 NOTE per executable.
- * name: PaX\0
- * namesz: 4
- * desc:
- * word[0]: capability bitmask
- * descsz: 4
- */
-#define NT_NETBSD_PAX 3
-#define NT_NETBSD_PAX_MPROTECT 0x01 /* Force enable Mprotect */
-#define NT_NETBSD_PAX_NOMPROTECT 0x02 /* Force disable Mprotect */
-#define NT_NETBSD_PAX_GUARD 0x04 /* Force enable Segvguard */
-#define NT_NETBSD_PAX_NOGUARD 0x08 /* Force disable Servguard */
-#define NT_NETBSD_PAX_ASLR 0x10 /* Force enable ASLR */
-#define NT_NETBSD_PAX_NOASLR 0x20 /* Force disable ASLR */
-
-/*
- * NetBSD-specific note type: MACHINE_ARCH.
- * There should be 1 NOTE per executable.
- * name: NetBSD\0
- * namesz: 7
- * desc: string
- * descsz: variable
- */
-#define NT_NETBSD_MARCH 5
-
-/*
- * NetBSD-specific note type: COMPILER MODEL.
- * There should be 1 NOTE per executable.
- * name: NetBSD\0
- * namesz: 7
- * desc: string
- * descsz: variable
- */
-#define NT_NETBSD_CMODEL 6
-
-/*
- * Golang-specific note type
- * name: Go\0\0
- * namesz: 4
- * desc: base-64 build id.
- * descsz: < 128
- */
-#define NT_GO_BUILD_ID 4
-
-/*
- * FreeBSD specific notes
- */
-#define NT_FREEBSD_PROCSTAT_AUXV 16
-
-#if !defined(ELFSIZE) && defined(ARCH_ELFSIZE)
-#define ELFSIZE ARCH_ELFSIZE
-#endif
-/* SunOS 5.x hardware/software capabilities */
-typedef struct {
- Elf32_Word c_tag;
- union {
- Elf32_Word c_val;
- Elf32_Addr c_ptr;
- } c_un;
-} Elf32_Cap;
-
-typedef struct {
- Elf64_Xword c_tag;
- union {
- Elf64_Xword c_val;
- Elf64_Addr c_ptr;
- } c_un;
-} Elf64_Cap;
-
-/* SunOS 5.x hardware/software capability tags */
-#define CA_SUNW_NULL 0
-#define CA_SUNW_HW_1 1
-#define CA_SUNW_SF_1 2
-
-/* SunOS 5.x software capabilities */
-#define SF1_SUNW_FPKNWN 0x01
-#define SF1_SUNW_FPUSED 0x02
-#define SF1_SUNW_MASK 0x03
-
-/* SunOS 5.x hardware capabilities: sparc */
-#define AV_SPARC_MUL32 0x0001
-#define AV_SPARC_DIV32 0x0002
-#define AV_SPARC_FSMULD 0x0004
-#define AV_SPARC_V8PLUS 0x0008
-#define AV_SPARC_POPC 0x0010
-#define AV_SPARC_VIS 0x0020
-#define AV_SPARC_VIS2 0x0040
-#define AV_SPARC_ASI_BLK_INIT 0x0080
-#define AV_SPARC_FMAF 0x0100
-#define AV_SPARC_FJFMAU 0x4000
-#define AV_SPARC_IMA 0x8000
-
-/* SunOS 5.x hardware capabilities: 386 */
-#define AV_386_FPU 0x00000001
-#define AV_386_TSC 0x00000002
-#define AV_386_CX8 0x00000004
-#define AV_386_SEP 0x00000008
-#define AV_386_AMD_SYSC 0x00000010
-#define AV_386_CMOV 0x00000020
-#define AV_386_MMX 0x00000040
-#define AV_386_AMD_MMX 0x00000080
-#define AV_386_AMD_3DNow 0x00000100
-#define AV_386_AMD_3DNowx 0x00000200
-#define AV_386_FXSR 0x00000400
-#define AV_386_SSE 0x00000800
-#define AV_386_SSE2 0x00001000
-#define AV_386_PAUSE 0x00002000
-#define AV_386_SSE3 0x00004000
-#define AV_386_MON 0x00008000
-#define AV_386_CX16 0x00010000
-#define AV_386_AHF 0x00020000
-#define AV_386_TSCP 0x00040000
-#define AV_386_AMD_SSE4A 0x00080000
-#define AV_386_POPCNT 0x00100000
-#define AV_386_AMD_LZCNT 0x00200000
-#define AV_386_SSSE3 0x00400000
-#define AV_386_SSE4_1 0x00800000
-#define AV_386_SSE4_2 0x01000000
-
-/*
- * Dynamic Section structure array
- */
-typedef struct {
- Elf32_Word d_tag; /* entry tag value */
- union {
- Elf32_Addr d_ptr;
- Elf32_Word d_val;
- } d_un;
-} Elf32_Dyn;
-
-typedef struct {
- Elf64_Xword d_tag; /* entry tag value */
- union {
- Elf64_Addr d_ptr;
- Elf64_Xword d_val;
- } d_un;
-} Elf64_Dyn;
-
-/* d_tag */
-#define DT_NULL 0 /* Marks end of dynamic array */
-#define DT_NEEDED 1 /* Name of needed library (DT_STRTAB offset) */
-#define DT_PLTRELSZ 2 /* Size, in bytes, of relocations in PLT */
-#define DT_PLTGOT 3 /* Address of PLT and/or GOT */
-#define DT_HASH 4 /* Address of symbol hash table */
-#define DT_STRTAB 5 /* Address of string table */
-#define DT_SYMTAB 6 /* Address of symbol table */
-#define DT_RELA 7 /* Address of Rela relocation table */
-#define DT_RELASZ 8 /* Size, in bytes, of DT_RELA table */
-#define DT_RELAENT 9 /* Size, in bytes, of one DT_RELA entry */
-#define DT_STRSZ 10 /* Size, in bytes, of DT_STRTAB table */
-#define DT_SYMENT 11 /* Size, in bytes, of one DT_SYMTAB entry */
-#define DT_INIT 12 /* Address of initialization function */
-#define DT_FINI 13 /* Address of termination function */
-#define DT_SONAME 14 /* Shared object name (DT_STRTAB offset) */
-#define DT_RPATH 15 /* Library search path (DT_STRTAB offset) */
-#define DT_SYMBOLIC 16 /* Start symbol search within local object */
-#define DT_REL 17 /* Address of Rel relocation table */
-#define DT_RELSZ 18 /* Size, in bytes, of DT_REL table */
-#define DT_RELENT 19 /* Size, in bytes, of one DT_REL entry */
-#define DT_PLTREL 20 /* Type of PLT relocation entries */
-#define DT_DEBUG 21 /* Used for debugging; unspecified */
-#define DT_TEXTREL 22 /* Relocations might modify non-writable seg */
-#define DT_JMPREL 23 /* Address of relocations associated with PLT */
-#define DT_BIND_NOW 24 /* Process all relocations at load-time */
-#define DT_INIT_ARRAY 25 /* Address of initialization function array */
-#define DT_FINI_ARRAY 26 /* Size, in bytes, of DT_INIT_ARRAY array */
-#define DT_INIT_ARRAYSZ 27 /* Address of termination function array */
-#define DT_FINI_ARRAYSZ 28 /* Size, in bytes, of DT_FINI_ARRAY array*/
-#define DT_RUNPATH 29 /* overrides DT_RPATH */
-#define DT_FLAGS 30 /* Encodes ORIGIN, SYMBOLIC, TEXTREL, BIND_NOW, STATIC_TLS */
-#define DT_ENCODING 31 /* ??? */
-#define DT_PREINIT_ARRAY 32 /* Address of pre-init function array */
-#define DT_PREINIT_ARRAYSZ 33 /* Size, in bytes, of DT_PREINIT_ARRAY array */
-#define DT_NUM 34
-
-#define DT_LOOS 0x60000000 /* Operating system specific range */
-#define DT_VERSYM 0x6ffffff0 /* Symbol versions */
-#define DT_FLAGS_1 0x6ffffffb /* ELF dynamic flags */
-#define DT_VERDEF 0x6ffffffc /* Versions defined by file */
-#define DT_VERDEFNUM 0x6ffffffd /* Number of versions defined by file */
-#define DT_VERNEED 0x6ffffffe /* Versions needed by file */
-#define DT_VERNEEDNUM 0x6fffffff /* Number of versions needed by file */
-#define DT_HIOS 0x6fffffff
-#define DT_LOPROC 0x70000000 /* Processor-specific range */
-#define DT_HIPROC 0x7fffffff
-
-/* Flag values for DT_FLAGS */
-#define DF_ORIGIN 0x00000001 /* uses $ORIGIN */
-#define DF_SYMBOLIC 0x00000002 /* */
-#define DF_TEXTREL 0x00000004 /* */
-#define DF_BIND_NOW 0x00000008 /* */
-#define DF_STATIC_TLS 0x00000010 /* */
-
-/* Flag values for DT_FLAGS_1 */
-#define DF_1_NOW 0x00000001 /* Same as DF_BIND_NOW */
-#define DF_1_GLOBAL 0x00000002 /* Unused */
-#define DF_1_GROUP 0x00000004 /* Is member of group */
-#define DF_1_NODELETE 0x00000008 /* Cannot be deleted from process */
-#define DF_1_LOADFLTR 0x00000010 /* Immediate loading of filters */
-#define DF_1_INITFIRST 0x00000020 /* init/fini takes priority */
-#define DF_1_NOOPEN 0x00000040 /* Do not allow loading on dlopen() */
-#define DF_1_ORIGIN 0x00000080 /* Require $ORIGIN processing */
-#define DF_1_DIRECT 0x00000100 /* Enable direct bindings */
-#define DF_1_INTERPOSE 0x00000400 /* Is an interposer */
-#define DF_1_NODEFLIB 0x00000800 /* Ignore default library search path */
-#define DF_1_NODUMP 0x00001000 /* Cannot be dumped with dldump(3C) */
-#define DF_1_CONFALT 0x00002000 /* Configuration alternative */
-#define DF_1_ENDFILTEE 0x00004000 /* Filtee ends filter's search */
-#define DF_1_DISPRELDNE 0x00008000 /* Did displacement relocation */
-#define DF_1_DISPRELPND 0x00010000 /* Pending displacement relocation */
-#define DF_1_NODIRECT 0x00020000 /* Has non-direct bindings */
-#define DF_1_IGNMULDEF 0x00040000 /* Used internally */
-#define DF_1_NOKSYMS 0x00080000 /* Used internally */
-#define DF_1_NOHDR 0x00100000 /* Used internally */
-#define DF_1_EDITED 0x00200000 /* Has been modified since build */
-#define DF_1_NORELOC 0x00400000 /* Used internally */
-#define DF_1_SYMINTPOSE 0x00800000 /* Has individual symbol interposers */
-#define DF_1_GLOBAUDIT 0x01000000 /* Require global auditing */
-#define DF_1_SINGLETON 0x02000000 /* Has singleton symbols */
-#define DF_1_STUB 0x04000000 /* Stub */
-#define DF_1_PIE 0x08000000 /* Position Independent Executable */
-
-#endif
diff --git a/contrib/libs/libmagic/src/res.cpp b/contrib/libs/libmagic/src/res.cpp
deleted file mode 100644
index 99a5af6ee5..0000000000
--- a/contrib/libs/libmagic/src/res.cpp
+++ /dev/null
@@ -1,7 +0,0 @@
-#include <library/cpp/resource/resource.h>
-
-extern "C" void _magic_read_res(const char* res, void** out, size_t* size) {
- TString s;
- if (NResource::FindExact(res, &s) && (*size = s.size()) && (*out = malloc(*size)))
- memcpy(*out, s.data(), *size);
-}
diff --git a/contrib/libs/libmagic/src/seccomp.c b/contrib/libs/libmagic/src/seccomp.c
deleted file mode 100644
index d2ce43c585..0000000000
--- a/contrib/libs/libmagic/src/seccomp.c
+++ /dev/null
@@ -1,290 +0,0 @@
-/*
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * libseccomp hooks.
- */
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: seccomp.c,v 1.25 2022/12/26 18:57:29 christos Exp $")
-#endif /* lint */
-
-#if HAVE_LIBSECCOMP
-#error #include <seccomp.h> /* libseccomp */
-#include <sys/prctl.h> /* prctl */
-#include <sys/ioctl.h>
-#include <sys/socket.h>
-#include <termios.h>
-#include <fcntl.h>
-#include <stdlib.h>
-#include <errno.h>
-
-#define DENY_RULE(call) \
- do \
- if (seccomp_rule_add (ctx, SCMP_ACT_KILL, SCMP_SYS(call), 0) == -1) \
- goto out; \
- while (/*CONSTCOND*/0)
-#define ALLOW_RULE(call) \
- do \
- if (seccomp_rule_add (ctx, SCMP_ACT_ALLOW, SCMP_SYS(call), 0) == -1) \
- goto out; \
- while (/*CONSTCOND*/0)
-
-#define ALLOW_IOCTL_RULE(param) \
- do \
- if (seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(ioctl), 1, \
- SCMP_CMP(1, SCMP_CMP_EQ, (scmp_datum_t)param, \
- (scmp_datum_t)0)) == -1) \
- goto out; \
- while (/*CONSTCOND*/0)
-
-static scmp_filter_ctx ctx;
-
-int
-enable_sandbox_basic(void)
-{
-
- if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) == -1)
- return -1;
-
- if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) == -1)
- return -1;
-
- // initialize the filter
- ctx = seccomp_init(SCMP_ACT_ALLOW);
- if (ctx == NULL)
- return 1;
-
- DENY_RULE(_sysctl);
- DENY_RULE(acct);
- DENY_RULE(add_key);
- DENY_RULE(adjtimex);
- DENY_RULE(chroot);
- DENY_RULE(clock_adjtime);
- DENY_RULE(create_module);
- DENY_RULE(delete_module);
- DENY_RULE(fanotify_init);
- DENY_RULE(finit_module);
- DENY_RULE(get_kernel_syms);
- DENY_RULE(get_mempolicy);
- DENY_RULE(init_module);
- DENY_RULE(io_cancel);
- DENY_RULE(io_destroy);
- DENY_RULE(io_getevents);
- DENY_RULE(io_setup);
- DENY_RULE(io_submit);
- DENY_RULE(ioperm);
- DENY_RULE(iopl);
- DENY_RULE(ioprio_set);
- DENY_RULE(kcmp);
-#ifdef __NR_kexec_file_load
- DENY_RULE(kexec_file_load);
-#endif
- DENY_RULE(kexec_load);
- DENY_RULE(keyctl);
- DENY_RULE(lookup_dcookie);
- DENY_RULE(mbind);
- DENY_RULE(nfsservctl);
- DENY_RULE(migrate_pages);
- DENY_RULE(modify_ldt);
- DENY_RULE(mount);
- DENY_RULE(move_pages);
- DENY_RULE(name_to_handle_at);
- DENY_RULE(open_by_handle_at);
- DENY_RULE(perf_event_open);
- DENY_RULE(pivot_root);
- DENY_RULE(process_vm_readv);
- DENY_RULE(process_vm_writev);
- DENY_RULE(ptrace);
- DENY_RULE(reboot);
- DENY_RULE(remap_file_pages);
- DENY_RULE(request_key);
- DENY_RULE(set_mempolicy);
- DENY_RULE(swapoff);
- DENY_RULE(swapon);
- DENY_RULE(sysfs);
- DENY_RULE(syslog);
- DENY_RULE(tuxcall);
- DENY_RULE(umount2);
- DENY_RULE(uselib);
- DENY_RULE(vmsplice);
-
- // blocking dangerous syscalls that file should not need
- DENY_RULE (execve);
- DENY_RULE (socket);
- // ...
-
-
- // applying filter...
- if (seccomp_load (ctx) == -1)
- goto out;
- // free ctx after the filter has been loaded into the kernel
- seccomp_release(ctx);
- return 0;
-
-out:
- seccomp_release(ctx);
- return -1;
-}
-
-
-int
-enable_sandbox_full(void)
-{
-
- // prevent child processes from getting more priv e.g. via setuid,
- // capabilities, ...
- if (prctl(PR_SET_NO_NEW_PRIVS, 1, 0, 0, 0) == -1)
- return -1;
-
- if (prctl(PR_SET_DUMPABLE, 0, 0, 0, 0) == -1)
- return -1;
-
- // initialize the filter
- ctx = seccomp_init(SCMP_ACT_KILL);
- if (ctx == NULL)
- return -1;
-
- ALLOW_RULE(access);
- ALLOW_RULE(brk);
- ALLOW_RULE(close);
- ALLOW_RULE(dup2);
- ALLOW_RULE(exit);
- ALLOW_RULE(exit_group);
-#ifdef __NR_faccessat
- ALLOW_RULE(faccessat);
-#endif
- ALLOW_RULE(fcntl);
- ALLOW_RULE(fcntl64);
-#ifdef __NR_fstat
- ALLOW_RULE(fstat);
-#endif
- ALLOW_RULE(fstat64);
-#ifdef __NR_fstatat64
- ALLOW_RULE(fstatat64);
-#endif
- ALLOW_RULE(futex);
- ALLOW_RULE(getdents);
-#ifdef __NR_getdents64
- ALLOW_RULE(getdents64);
-#endif
-#ifdef FIONREAD
- // called in src/compress.c under sread
- ALLOW_IOCTL_RULE(FIONREAD);
-#endif
-#ifdef TIOCGWINSZ
- // musl libc may call ioctl TIOCGWINSZ on stdout
- ALLOW_IOCTL_RULE(TIOCGWINSZ);
-#endif
-#ifdef TCGETS
- // glibc may call ioctl TCGETS on stdout on physical terminal
- ALLOW_IOCTL_RULE(TCGETS);
-#endif
- ALLOW_RULE(lseek);
- ALLOW_RULE(_llseek);
- ALLOW_RULE(lstat);
- ALLOW_RULE(lstat64);
- ALLOW_RULE(madvise);
- ALLOW_RULE(mmap);
- ALLOW_RULE(mmap2);
- ALLOW_RULE(mprotect);
- ALLOW_RULE(mremap);
- ALLOW_RULE(munmap);
-#ifdef __NR_newfstatat
- ALLOW_RULE(newfstatat);
-#endif
- ALLOW_RULE(open);
- ALLOW_RULE(openat);
- ALLOW_RULE(pread64);
- ALLOW_RULE(read);
- ALLOW_RULE(readlink);
-#ifdef __NR_readlinkat
- ALLOW_RULE(readlinkat);
-#endif
- ALLOW_RULE(rt_sigaction);
- ALLOW_RULE(rt_sigprocmask);
- ALLOW_RULE(rt_sigreturn);
- ALLOW_RULE(select);
- ALLOW_RULE(stat);
- ALLOW_RULE(statx);
- ALLOW_RULE(stat64);
- ALLOW_RULE(sysinfo);
- ALLOW_RULE(umask); // Used in file_pipe2file()
- ALLOW_RULE(getpid); // Used by glibc in file_pipe2file()
- ALLOW_RULE(unlink);
- ALLOW_RULE(utimes);
- ALLOW_RULE(write);
- ALLOW_RULE(writev);
-
-
-#if 0
- // needed by valgrind
- ALLOW_RULE(gettid);
- ALLOW_RULE(rt_sigtimedwait);
-#endif
-
-#if 0
- /* special restrictions for socket, only allow AF_UNIX/AF_LOCAL */
- if (seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(socket), 1,
- SCMP_CMP(0, SCMP_CMP_EQ, AF_UNIX)) == -1)
- goto out;
-
- if (seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(socket), 1,
- SCMP_CMP(0, SCMP_CMP_EQ, AF_LOCAL)) == -1)
- goto out;
-
-
- /* special restrictions for open, prevent opening files for writing */
- if (seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(open), 1,
- SCMP_CMP(1, SCMP_CMP_MASKED_EQ, O_WRONLY | O_RDWR, 0)) == -1)
- goto out;
-
- if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EACCES), SCMP_SYS(open), 1,
- SCMP_CMP(1, SCMP_CMP_MASKED_EQ, O_WRONLY, O_WRONLY)) == -1)
- goto out;
-
- if (seccomp_rule_add(ctx, SCMP_ACT_ERRNO(EACCES), SCMP_SYS(open), 1,
- SCMP_CMP(1, SCMP_CMP_MASKED_EQ, O_RDWR, O_RDWR)) == -1)
- goto out;
-
-
- /* allow stderr */
- if (seccomp_rule_add(ctx, SCMP_ACT_ALLOW, SCMP_SYS(write), 1,
- SCMP_CMP(0, SCMP_CMP_EQ, 2)) == -1)
- goto out;
-#endif
-
- // applying filter...
- if (seccomp_load(ctx) == -1)
- goto out;
- // free ctx after the filter has been loaded into the kernel
- seccomp_release(ctx);
- return 0;
-
-out:
- // something went wrong
- seccomp_release(ctx);
- return -1;
-}
-#endif
diff --git a/contrib/libs/libmagic/src/softmagic.c b/contrib/libs/libmagic/src/softmagic.c
deleted file mode 100644
index ea466ecd00..0000000000
--- a/contrib/libs/libmagic/src/softmagic.c
+++ /dev/null
@@ -1,2522 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * softmagic - interpret variable magic from MAGIC
- */
-
-#include "file.h"
-
-#ifndef lint
-FILE_RCSID("@(#)$File: softmagic.c,v 1.345 2023/07/02 12:48:39 christos Exp $")
-#endif /* lint */
-
-#include "magic.h"
-#include <assert.h>
-#include <math.h>
-#include <string.h>
-#include <ctype.h>
-#include <stdlib.h>
-#include <limits.h>
-#include <time.h>
-#include "der.h"
-
-file_private int match(struct magic_set *, struct magic *, file_regex_t **, size_t,
- const struct buffer *, size_t, int, int, int, uint16_t *,
- uint16_t *, int *, int *, int *, int *, int *);
-file_private int mget(struct magic_set *, struct magic *, const struct buffer *,
- const unsigned char *, size_t,
- size_t, unsigned int, int, int, int, uint16_t *,
- uint16_t *, int *, int *, int *, int *, int *);
-file_private int msetoffset(struct magic_set *, struct magic *, struct buffer *,
- const struct buffer *, size_t, unsigned int);
-file_private int magiccheck(struct magic_set *, struct magic *, file_regex_t **);
-file_private int mprint(struct magic_set *, struct magic *);
-file_private int moffset(struct magic_set *, struct magic *, const struct buffer *,
- int32_t *);
-file_private void mdebug(uint32_t, const char *, size_t);
-file_private int mcopy(struct magic_set *, union VALUETYPE *, int, int,
- const unsigned char *, uint32_t, size_t, struct magic *);
-file_private int mconvert(struct magic_set *, struct magic *, int);
-file_private int print_sep(struct magic_set *, int);
-file_private int handle_annotation(struct magic_set *, struct magic *, int);
-file_private int cvt_8(union VALUETYPE *, const struct magic *);
-file_private int cvt_16(union VALUETYPE *, const struct magic *);
-file_private int cvt_32(union VALUETYPE *, const struct magic *);
-file_private int cvt_64(union VALUETYPE *, const struct magic *);
-
-#define OFFSET_OOB(n, o, i) ((n) < CAST(uint32_t, (o)) || (i) > ((n) - (o)))
-#define BE64(p) ( \
- (CAST(uint64_t, (p)->hq[0])<<56)| \
- (CAST(uint64_t, (p)->hq[1])<<48)| \
- (CAST(uint64_t, (p)->hq[2])<<40)| \
- (CAST(uint64_t, (p)->hq[3])<<32)| \
- (CAST(uint64_t, (p)->hq[4])<<24)| \
- (CAST(uint64_t, (p)->hq[5])<<16)| \
- (CAST(uint64_t, (p)->hq[6])<<8)| \
- (CAST(uint64_t, (p)->hq[7])))
-#define LE64(p) ( \
- (CAST(uint64_t, (p)->hq[7])<<56)| \
- (CAST(uint64_t, (p)->hq[6])<<48)| \
- (CAST(uint64_t, (p)->hq[5])<<40)| \
- (CAST(uint64_t, (p)->hq[4])<<32)| \
- (CAST(uint64_t, (p)->hq[3])<<24)| \
- (CAST(uint64_t, (p)->hq[2])<<16)| \
- (CAST(uint64_t, (p)->hq[1])<<8)| \
- (CAST(uint64_t, (p)->hq[0])))
-#define LE32(p) ( \
- (CAST(uint32_t, (p)->hl[3])<<24)| \
- (CAST(uint32_t, (p)->hl[2])<<16)| \
- (CAST(uint32_t, (p)->hl[1])<<8)| \
- (CAST(uint32_t, (p)->hl[0])))
-#define BE32(p) ( \
- (CAST(uint32_t, (p)->hl[0])<<24)| \
- (CAST(uint32_t, (p)->hl[1])<<16)| \
- (CAST(uint32_t, (p)->hl[2])<<8)| \
- (CAST(uint32_t, (p)->hl[3])))
-#define ME32(p) ( \
- (CAST(uint32_t, (p)->hl[1])<<24)| \
- (CAST(uint32_t, (p)->hl[0])<<16)| \
- (CAST(uint32_t, (p)->hl[3])<<8)| \
- (CAST(uint32_t, (p)->hl[2])))
-
-#define BE16(p) ((CAST(uint16_t, (p)->hs[0])<<8)|(CAST(uint16_t, (p)->hs[1])))
-#define LE16(p) ((CAST(uint16_t, (p)->hs[1])<<8)|(CAST(uint16_t, (p)->hs[0])))
-#define SEXT(s,v,p) ((s) ? \
- CAST(intmax_t, CAST(int##v##_t, p)) : \
- CAST(intmax_t, CAST(uint##v##_t, p)))
-
-/*
- * softmagic - lookup one file in parsed, in-memory copy of database
- * Passed the name and FILE * of one file to be typed.
- */
-/*ARGSUSED1*/ /* nbytes passed for regularity, maybe need later */
-file_protected int
-file_softmagic(struct magic_set *ms, const struct buffer *b,
- uint16_t *indir_count, uint16_t *name_count, int mode, int text)
-{
- struct mlist *ml;
- int rv = 0, printed_something = 0, need_separator = 0, firstline = 1;
- uint16_t nc, ic;
-
- if (name_count == NULL) {
- nc = 0;
- name_count = &nc;
- }
- if (indir_count == NULL) {
- ic = 0;
- indir_count = &ic;
- }
-
- for (ml = ms->mlist[0]->next; ml != ms->mlist[0]; ml = ml->next) {
- int ret = match(ms, ml->magic, ml->magic_rxcomp, ml->nmagic, b,
- 0, mode, text, 0, indir_count, name_count,
- &printed_something, &need_separator, &firstline,
- NULL, NULL);
- switch (ret) {
- case -1:
- return ret;
- case 0:
- continue;
- default:
- if ((ms->flags & MAGIC_CONTINUE) == 0)
- return ret;
- rv = ret;
- break;
- }
- }
-
- return rv;
-}
-
-#define FILE_FMTDEBUG
-#ifdef FILE_FMTDEBUG
-#define F(a, b, c) file_fmtcheck((a), (b), (c), __FILE__, __LINE__)
-
-file_private const char * __attribute__((__format_arg__(3)))
-file_fmtcheck(struct magic_set *ms, const char *desc, const char *def,
- const char *file, size_t line)
-{
- const char *ptr;
-
- if (strchr(desc, '%') == NULL)
- return desc;
-
- ptr = fmtcheck(desc, def);
- if (ptr == def)
- file_magerror(ms,
- "%s, %" SIZE_T_FORMAT "u: format `%s' does not match"
- " with `%s'", file, line, desc, def);
- return ptr;
-}
-#else
-#define F(a, b, c) fmtcheck((b), (c))
-#endif
-
-/*
- * Go through the whole list, stopping if you find a match. Process all
- * the continuations of that match before returning.
- *
- * We support multi-level continuations:
- *
- * At any time when processing a successful top-level match, there is a
- * current continuation level; it represents the level of the last
- * successfully matched continuation.
- *
- * Continuations above that level are skipped as, if we see one, it
- * means that the continuation that controls them - i.e, the
- * lower-level continuation preceding them - failed to match.
- *
- * Continuations below that level are processed as, if we see one,
- * it means we've finished processing or skipping higher-level
- * continuations under the control of a successful or unsuccessful
- * lower-level continuation, and are now seeing the next lower-level
- * continuation and should process it. The current continuation
- * level reverts to the level of the one we're seeing.
- *
- * Continuations at the current level are processed as, if we see
- * one, there's no lower-level continuation that may have failed.
- *
- * If a continuation matches, we bump the current continuation level
- * so that higher-level continuations are processed.
- */
-file_private int
-match(struct magic_set *ms, struct magic *magic, file_regex_t **magic_rxcomp,
- size_t nmagic, const struct buffer *b, size_t offset, int mode, int text,
- int flip, uint16_t *indir_count, uint16_t *name_count,
- int *printed_something, int *need_separator, int *firstline,
- int *returnval, int *found_match)
-{
- uint32_t magindex = 0;
- unsigned int cont_level = 0;
- int found_matchv = 0; /* if a match is found it is set to 1*/
- int returnvalv = 0, e;
- struct buffer bb;
- int print = (ms->flags & MAGIC_NODESC) == 0;
-
- /*
- * returnval can be 0 if a match is found, but there was no
- * annotation to be printed.
- */
- if (returnval == NULL)
- returnval = &returnvalv;
- if (found_match == NULL)
- found_match = &found_matchv;
-
- if (file_check_mem(ms, cont_level) == -1)
- return -1;
-
- for (magindex = 0; magindex < nmagic; magindex++) {
- int flush = 0;
- struct magic *m = &magic[magindex];
- file_regex_t **m_rxcomp = &magic_rxcomp[magindex];
-
- if (m->type != FILE_NAME)
- if ((IS_STRING(m->type) &&
-#define FLT (STRING_BINTEST | STRING_TEXTTEST)
- ((text && (m->str_flags & FLT) == STRING_BINTEST) ||
- (!text && (m->str_flags & FLT) == STRING_TEXTTEST))) ||
- (m->flag & mode) != mode) {
-flush:
- /* Skip sub-tests */
- while (magindex < nmagic - 1 &&
- magic[magindex + 1].cont_level != 0)
- magindex++;
- cont_level = 0;
- continue; /* Skip to next top-level test*/
- }
-
- if (msetoffset(ms, m, &bb, b, offset, cont_level) == -1)
- goto flush;
- ms->line = m->lineno;
-
- /* if main entry matches, print it... */
- switch (mget(ms, m, b, CAST(const unsigned char *, bb.fbuf),
- bb.flen, offset, cont_level,
- mode, text, flip, indir_count, name_count,
- printed_something, need_separator, firstline, returnval,
- found_match))
- {
- case -1:
- return -1;
- case 0:
- flush = m->reln != '!';
- break;
- default:
- if (m->type == FILE_INDIRECT) {
- *found_match = 1;
- *returnval = 1;
- }
-
- switch (magiccheck(ms, m, m_rxcomp)) {
- case -1:
- return -1;
- case 0:
- flush++;
- break;
- default:
- flush = 0;
- break;
- }
- break;
- }
- if (flush) {
- /*
- * main entry didn't match,
- * flush its continuations
- */
- goto flush;
- }
-
- if ((e = handle_annotation(ms, m, *firstline)) != 0)
- {
- *found_match = 1;
- *need_separator = 1;
- *printed_something = 1;
- *returnval = 1;
- *firstline = 0;
- return e;
- }
-
- /*
- * If we are going to print something, we'll need to print
- * a blank before we print something else.
- */
- if (*m->desc) {
- *found_match = 1;
- if (print) {
- *returnval = 1;
- *need_separator = 1;
- *printed_something = 1;
- if (print_sep(ms, *firstline) == -1)
- return -1;
- if (mprint(ms, m) == -1)
- return -1;
- }
- }
-
- switch (moffset(ms, m, &bb, &ms->c.li[cont_level].off)) {
- case -1:
- case 0:
- goto flush;
- default:
- break;
- }
-
- /* and any continuations that match */
- if (file_check_mem(ms, ++cont_level) == -1)
- return -1;
-
- while (magindex + 1 < nmagic &&
- magic[magindex + 1].cont_level != 0) {
- m = &magic[++magindex];
- m_rxcomp = &magic_rxcomp[magindex];
- ms->line = m->lineno; /* for messages */
-
- if (cont_level < m->cont_level)
- continue;
- if (cont_level > m->cont_level) {
- /*
- * We're at the end of the level
- * "cont_level" continuations.
- */
- cont_level = m->cont_level;
- }
- if (msetoffset(ms, m, &bb, b, offset, cont_level) == -1)
- goto flush;
- if (m->flag & OFFADD) {
- if (cont_level == 0) {
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr,
- "direct *zero*"
- " cont_level\n");
- return 0;
- }
- ms->offset +=
- ms->c.li[cont_level - 1].off;
- }
-
-#ifdef ENABLE_CONDITIONALS
- if (m->cond == COND_ELSE ||
- m->cond == COND_ELIF) {
- if (ms->c.li[cont_level].last_match == 1)
- continue;
- }
-#endif
- switch (mget(ms, m, b, CAST(const unsigned char *,
- bb.fbuf), bb.flen, offset,
- cont_level, mode, text, flip, indir_count,
- name_count, printed_something, need_separator,
- firstline, returnval, found_match)) {
- case -1:
- return -1;
- case 0:
- if (m->reln != '!')
- continue;
- flush = 1;
- break;
- default:
- if (m->type == FILE_INDIRECT) {
- *found_match = 1;
- *returnval = 1;
- }
- flush = 0;
- break;
- }
-
- switch (flush ? 1 : magiccheck(ms, m, m_rxcomp)) {
- case -1:
- return -1;
- case 0:
-#ifdef ENABLE_CONDITIONALS
- ms->c.li[cont_level].last_match = 0;
-#endif
- break;
- default:
-#ifdef ENABLE_CONDITIONALS
- ms->c.li[cont_level].last_match = 1;
-#endif
- if (m->type == FILE_CLEAR)
- ms->c.li[cont_level].got_match = 0;
- else if (ms->c.li[cont_level].got_match) {
- if (m->type == FILE_DEFAULT)
- break;
- } else
- ms->c.li[cont_level].got_match = 1;
-
- if ((e = handle_annotation(ms, m, *firstline))
- != 0) {
- *found_match = 1;
- *need_separator = 1;
- *printed_something = 1;
- *returnval = 1;
- return e;
- }
- if (*m->desc) {
- *found_match = 1;
- }
- if (print && *m->desc) {
- *returnval = 1;
- /*
- * This continuation matched. Print
- * its message, with a blank before it
- * if the previous item printed and
- * this item isn't empty.
- */
- /*
- * If we are going to print something,
- * make sure that we have a separator
- * first.
- */
- if (!*printed_something) {
- *printed_something = 1;
- if (print_sep(ms, *firstline)
- == -1)
- return -1;
- }
- /* space if previous printed */
- if (*need_separator
- && (m->flag & NOSPACE) == 0) {
- if (file_printf(ms, " ") == -1)
- return -1;
- }
- if (mprint(ms, m) == -1)
- return -1;
- *need_separator = 1;
- }
-
- switch (moffset(ms, m, &bb,
- &ms->c.li[cont_level].off)) {
- case -1:
- case 0:
- cont_level--;
- break;
- default:
- break;
- }
-
- /*
- * If we see any continuations
- * at a higher level,
- * process them.
- */
- if (file_check_mem(ms, ++cont_level) == -1)
- return -1;
- break;
- }
- }
- if (*printed_something) {
- *firstline = 0;
- }
- if (*found_match) {
- if ((ms->flags & MAGIC_CONTINUE) == 0)
- return *returnval;
- // So that we print a separator
- *printed_something = 0;
- *firstline = 0;
- }
- cont_level = 0;
- }
- return *returnval;
-}
-
-file_private int
-check_fmt(struct magic_set *ms, const char *fmt)
-{
- file_regex_t rx;
- int rc, rv = -1;
- const char* pat = "%[-0-9\\.]*s";
-
- if (strchr(fmt, '%') == NULL)
- return 0;
-
- rc = file_regcomp(ms, &rx, pat, REG_EXTENDED|REG_NOSUB);
- if (rc == 0) {
- rc = file_regexec(ms, &rx, fmt, 0, 0, 0);
- rv = !rc;
- }
- file_regfree(&rx);
- return rv;
-}
-
-#if !defined(HAVE_STRNDUP) || defined(__aiws__) || defined(_AIX)
-# if defined(__aiws__) || defined(_AIX)
-# define strndup aix_strndup /* aix is broken */
-# endif
-char *strndup(const char *, size_t);
-
-char *
-strndup(const char *str, size_t n)
-{
- size_t len;
- char *copy;
-
- for (len = 0; len < n && str[len]; len++)
- continue;
- if ((copy = CAST(char *, malloc(len + 1))) == NULL)
- return NULL;
- (void)memcpy(copy, str, len);
- copy[len] = '\0';
- return copy;
-}
-#endif /* HAVE_STRNDUP */
-
-static int
-varexpand(struct magic_set *ms, char *buf, size_t len, const char *str)
-{
- const char *ptr, *sptr, *e, *t, *ee, *et;
- size_t l;
-
- for (sptr = str; (ptr = strstr(sptr, "${")) != NULL;) {
- l = CAST(size_t, ptr - sptr);
- if (l >= len)
- return -1;
- memcpy(buf, sptr, l);
- buf += l;
- len -= l;
- ptr += 2;
- if (!*ptr || ptr[1] != '?')
- return -1;
- for (et = t = ptr + 2; *et && *et != ':'; et++)
- continue;
- if (*et != ':')
- return -1;
- for (ee = e = et + 1; *ee && *ee != '}'; ee++)
- continue;
- if (*ee != '}')
- return -1;
- switch (*ptr) {
- case 'x':
- if (ms->mode & 0111) {
- ptr = t;
- l = et - t;
- } else {
- ptr = e;
- l = ee - e;
- }
- break;
- default:
- return -1;
- }
- if (l >= len)
- return -1;
- memcpy(buf, ptr, l);
- buf += l;
- len -= l;
- sptr = ee + 1;
- }
-
- l = strlen(sptr);
- if (l >= len)
- return -1;
-
- memcpy(buf, sptr, l);
- buf[l] = '\0';
- return 0;
-}
-
-
-file_private int
-mprint(struct magic_set *ms, struct magic *m)
-{
- uint64_t v;
- float vf;
- double vd;
- char buf[128], tbuf[26], sbuf[512], ebuf[512];
- const char *desc;
- union VALUETYPE *p = &ms->ms_value;
-
- if (varexpand(ms, ebuf, sizeof(ebuf), m->desc) == -1)
- desc = m->desc;
- else
- desc = ebuf;
-
-#define PRINTER(value, format, stype, utype) \
- v = file_signextend(ms, m, CAST(uint64_t, value)); \
- switch (check_fmt(ms, desc)) { \
- case -1: \
- return -1; \
- case 1: \
- if (m->flag & UNSIGNED) { \
- (void)snprintf(buf, sizeof(buf), "%" format "u", \
- CAST(utype, v)); \
- } else { \
- (void)snprintf(buf, sizeof(buf), "%" format "d", \
- CAST(stype, v)); \
- } \
- if (file_printf(ms, F(ms, desc, "%s"), buf) == -1) \
- return -1; \
- break; \
- default: \
- if (m->flag & UNSIGNED) { \
- if (file_printf(ms, F(ms, desc, "%" format "u"), \
- CAST(utype, v)) == -1) \
- return -1; \
- } else { \
- if (file_printf(ms, F(ms, desc, "%" format "d"), \
- CAST(stype, v)) == -1) \
- return -1; \
- } \
- break; \
- } \
- break
-
- switch (m->type) {
- case FILE_BYTE:
- PRINTER(p->b, "", int8_t, uint8_t);
-
- case FILE_SHORT:
- case FILE_BESHORT:
- case FILE_LESHORT:
- PRINTER(p->h, "", int16_t, uint16_t);
-
- case FILE_LONG:
- case FILE_BELONG:
- case FILE_LELONG:
- case FILE_MELONG:
- PRINTER(p->l, "", int32_t, uint32_t);
-
- case FILE_QUAD:
- case FILE_BEQUAD:
- case FILE_LEQUAD:
- case FILE_OFFSET:
- PRINTER(p->q, INT64_T_FORMAT, long long, unsigned long long);
-
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- if (m->reln == '=' || m->reln == '!') {
- if (file_printf(ms, F(ms, desc, "%s"),
- file_printable(ms, sbuf, sizeof(sbuf), m->value.s,
- sizeof(m->value.s))) == -1)
- return -1;
- }
- else {
- char *str = p->s;
-
- /* compute t before we mangle the string? */
-
- if (*m->value.s == '\0')
- str[strcspn(str, "\r\n")] = '\0';
-
- if (m->str_flags & STRING_TRIM)
- str = file_strtrim(str);
-
- if (file_printf(ms, F(ms, desc, "%s"),
- file_printable(ms, sbuf, sizeof(sbuf), str,
- sizeof(p->s) - (str - p->s))) == -1)
- return -1;
-
- if (m->type == FILE_PSTRING) {
- size_t l = file_pstring_length_size(ms, m);
- if (l == FILE_BADSIZE)
- return -1;
- }
- }
- break;
-
- case FILE_DATE:
- case FILE_BEDATE:
- case FILE_LEDATE:
- case FILE_MEDATE:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmtdatetime(tbuf, sizeof(tbuf), p->l, 0)) == -1)
- return -1;
- break;
-
- case FILE_LDATE:
- case FILE_BELDATE:
- case FILE_LELDATE:
- case FILE_MELDATE:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmtdatetime(tbuf, sizeof(tbuf), p->l, FILE_T_LOCAL))
- == -1)
- return -1;
- break;
-
- case FILE_QDATE:
- case FILE_BEQDATE:
- case FILE_LEQDATE:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmtdatetime(tbuf, sizeof(tbuf), p->q, 0)) == -1)
- return -1;
- break;
-
- case FILE_QLDATE:
- case FILE_BEQLDATE:
- case FILE_LEQLDATE:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmtdatetime(tbuf, sizeof(tbuf), p->q, FILE_T_LOCAL)) == -1)
- return -1;
- break;
-
- case FILE_QWDATE:
- case FILE_BEQWDATE:
- case FILE_LEQWDATE:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmtdatetime(tbuf, sizeof(tbuf), p->q, FILE_T_WINDOWS))
- == -1)
- return -1;
- break;
-
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- vf = p->f;
- switch (check_fmt(ms, desc)) {
- case -1:
- return -1;
- case 1:
- (void)snprintf(buf, sizeof(buf), "%g", vf);
- if (file_printf(ms, F(ms, desc, "%s"), buf) == -1)
- return -1;
- break;
- default:
- if (file_printf(ms, F(ms, desc, "%g"), vf) == -1)
- return -1;
- break;
- }
- break;
-
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- vd = p->d;
- switch (check_fmt(ms, desc)) {
- case -1:
- return -1;
- case 1:
- (void)snprintf(buf, sizeof(buf), "%g", vd);
- if (file_printf(ms, F(ms, desc, "%s"), buf) == -1)
- return -1;
- break;
- default:
- if (file_printf(ms, F(ms, desc, "%g"), vd) == -1)
- return -1;
- break;
- }
- break;
-
- case FILE_SEARCH:
- case FILE_REGEX: {
- char *cp, *scp;
- int rval;
-
- cp = strndup(RCAST(const char *, ms->search.s),
- ms->search.rm_len);
- if (cp == NULL) {
- file_oomem(ms, ms->search.rm_len);
- return -1;
- }
- scp = (m->str_flags & STRING_TRIM) ? file_strtrim(cp) : cp;
-
- rval = file_printf(ms, F(ms, desc, "%s"), file_printable(ms,
- sbuf, sizeof(sbuf), scp, ms->search.rm_len));
- free(cp);
-
- if (rval == -1)
- return -1;
- break;
- }
-
- case FILE_DEFAULT:
- case FILE_CLEAR:
- if (file_printf(ms, "%s", m->desc) == -1)
- return -1;
- break;
-
- case FILE_INDIRECT:
- case FILE_USE:
- case FILE_NAME:
- break;
- case FILE_DER:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_printable(ms, sbuf, sizeof(sbuf), ms->ms_value.s,
- sizeof(ms->ms_value.s))) == -1)
- return -1;
- break;
- case FILE_GUID:
- (void) file_print_guid(buf, sizeof(buf), ms->ms_value.guid);
- if (file_printf(ms, F(ms, desc, "%s"), buf) == -1)
- return -1;
- break;
- case FILE_MSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_LEMSDOSDATE:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmtdate(tbuf, sizeof(tbuf), p->h)) == -1)
- return -1;
- break;
- case FILE_MSDOSTIME:
- case FILE_BEMSDOSTIME:
- case FILE_LEMSDOSTIME:
- if (file_printf(ms, F(ms, desc, "%s"),
- file_fmttime(tbuf, sizeof(tbuf), p->h)) == -1)
- return -1;
- break;
- case FILE_OCTAL:
- file_fmtnum(buf, sizeof(buf), m->value.s, 8);
- if (file_printf(ms, F(ms, desc, "%s"), buf) == -1)
- return -1;
- break;
- default:
- file_magerror(ms, "invalid m->type (%d) in mprint()", m->type);
- return -1;
- }
- return 0;
-}
-
-file_private int
-moffset(struct magic_set *ms, struct magic *m, const struct buffer *b,
- int32_t *op)
-{
- size_t nbytes = b->flen;
- int32_t o;
-
- switch (m->type) {
- case FILE_BYTE:
- o = CAST(int32_t, (ms->offset + sizeof(char)));
- break;
-
- case FILE_SHORT:
- case FILE_BESHORT:
- case FILE_LESHORT:
- case FILE_MSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_LEMSDOSTIME:
- case FILE_BEMSDOSTIME:
- o = CAST(int32_t, (ms->offset + sizeof(short)));
- break;
-
- case FILE_LONG:
- case FILE_BELONG:
- case FILE_LELONG:
- case FILE_MELONG:
- o = CAST(int32_t, (ms->offset + sizeof(int32_t)));
- break;
-
- case FILE_QUAD:
- case FILE_BEQUAD:
- case FILE_LEQUAD:
- o = CAST(int32_t, (ms->offset + sizeof(int64_t)));
- break;
-
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- case FILE_OCTAL:
- if (m->reln == '=' || m->reln == '!') {
- o = ms->offset + m->vallen;
- } else {
- union VALUETYPE *p = &ms->ms_value;
-
- if (*m->value.s == '\0')
- p->s[strcspn(p->s, "\r\n")] = '\0';
- o = CAST(uint32_t, (ms->offset + strlen(p->s)));
- if (m->type == FILE_PSTRING) {
- size_t l = file_pstring_length_size(ms, m);
- if (l == FILE_BADSIZE)
- return -1;
- o += CAST(uint32_t, l);
- }
- }
- break;
-
- case FILE_DATE:
- case FILE_BEDATE:
- case FILE_LEDATE:
- case FILE_MEDATE:
- o = CAST(int32_t, (ms->offset + sizeof(uint32_t)));
- break;
-
- case FILE_LDATE:
- case FILE_BELDATE:
- case FILE_LELDATE:
- case FILE_MELDATE:
- o = CAST(int32_t, (ms->offset + sizeof(uint32_t)));
- break;
-
- case FILE_QDATE:
- case FILE_BEQDATE:
- case FILE_LEQDATE:
- o = CAST(int32_t, (ms->offset + sizeof(uint64_t)));
- break;
-
- case FILE_QLDATE:
- case FILE_BEQLDATE:
- case FILE_LEQLDATE:
- o = CAST(int32_t, (ms->offset + sizeof(uint64_t)));
- break;
-
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- o = CAST(int32_t, (ms->offset + sizeof(float)));
- break;
-
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- o = CAST(int32_t, (ms->offset + sizeof(double)));
- break;
-
- case FILE_REGEX:
- if ((m->str_flags & REGEX_OFFSET_START) != 0)
- o = CAST(int32_t, ms->search.offset);
- else
- o = CAST(int32_t,
- (ms->search.offset + ms->search.rm_len));
- break;
-
- case FILE_SEARCH:
- if ((m->str_flags & REGEX_OFFSET_START) != 0)
- o = CAST(int32_t, ms->search.offset);
- else
- o = CAST(int32_t, (ms->search.offset + m->vallen));
- break;
-
- case FILE_CLEAR:
- case FILE_DEFAULT:
- case FILE_INDIRECT:
- case FILE_OFFSET:
- case FILE_USE:
- o = ms->offset;
- break;
-
- case FILE_DER:
- o = der_offs(ms, m, nbytes);
- if (o == -1 || CAST(size_t, o) > nbytes) {
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- (void)fprintf(stderr,
- "Bad DER offset %d nbytes=%"
- SIZE_T_FORMAT "u", o, nbytes);
- }
- *op = 0;
- return 0;
- }
- break;
-
- case FILE_GUID:
- o = CAST(int32_t, (ms->offset + 2 * sizeof(uint64_t)));
- break;
-
- default:
- o = 0;
- break;
- }
-
- if (CAST(size_t, o) > nbytes) {
-#if 0
- file_error(ms, 0, "Offset out of range %" SIZE_T_FORMAT
- "u > %" SIZE_T_FORMAT "u", (size_t)o, nbytes);
-#endif
- return -1;
- }
- *op = o;
- return 1;
-}
-
-file_private uint32_t
-cvt_id3(struct magic_set *ms, uint32_t v)
-{
- v = ((((v >> 0) & 0x7f) << 0) |
- (((v >> 8) & 0x7f) << 7) |
- (((v >> 16) & 0x7f) << 14) |
- (((v >> 24) & 0x7f) << 21));
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "id3 offs=%u\n", v);
- return v;
-}
-
-file_private int
-cvt_flip(int type, int flip)
-{
- if (flip == 0)
- return type;
- switch (type) {
- case FILE_BESHORT:
- return FILE_LESHORT;
- case FILE_BELONG:
- return FILE_LELONG;
- case FILE_BEDATE:
- return FILE_LEDATE;
- case FILE_BELDATE:
- return FILE_LELDATE;
- case FILE_BEQUAD:
- return FILE_LEQUAD;
- case FILE_BEQDATE:
- return FILE_LEQDATE;
- case FILE_BEQLDATE:
- return FILE_LEQLDATE;
- case FILE_BEQWDATE:
- return FILE_LEQWDATE;
- case FILE_LESHORT:
- return FILE_BESHORT;
- case FILE_LELONG:
- return FILE_BELONG;
- case FILE_LEDATE:
- return FILE_BEDATE;
- case FILE_LELDATE:
- return FILE_BELDATE;
- case FILE_LEQUAD:
- return FILE_BEQUAD;
- case FILE_LEQDATE:
- return FILE_BEQDATE;
- case FILE_LEQLDATE:
- return FILE_BEQLDATE;
- case FILE_LEQWDATE:
- return FILE_BEQWDATE;
- case FILE_BEFLOAT:
- return FILE_LEFLOAT;
- case FILE_LEFLOAT:
- return FILE_BEFLOAT;
- case FILE_BEDOUBLE:
- return FILE_LEDOUBLE;
- case FILE_LEDOUBLE:
- return FILE_BEDOUBLE;
- default:
- return type;
- }
-}
-#define DO_CVT(fld, type) \
- if (m->num_mask) \
- switch (m->mask_op & FILE_OPS_MASK) { \
- case FILE_OPAND: \
- p->fld &= CAST(type, m->num_mask); \
- break; \
- case FILE_OPOR: \
- p->fld |= CAST(type, m->num_mask); \
- break; \
- case FILE_OPXOR: \
- p->fld ^= CAST(type, m->num_mask); \
- break; \
- case FILE_OPADD: \
- p->fld += CAST(type, m->num_mask); \
- break; \
- case FILE_OPMINUS: \
- p->fld -= CAST(type, m->num_mask); \
- break; \
- case FILE_OPMULTIPLY: \
- p->fld *= CAST(type, m->num_mask); \
- break; \
- case FILE_OPDIVIDE: \
- if (CAST(type, m->num_mask) == 0) \
- return -1; \
- p->fld /= CAST(type, m->num_mask); \
- break; \
- case FILE_OPMODULO: \
- if (CAST(type, m->num_mask) == 0) \
- return -1; \
- p->fld %= CAST(type, m->num_mask); \
- break; \
- } \
- if (m->mask_op & FILE_OPINVERSE) \
- p->fld = ~p->fld \
-
-file_private int
-cvt_8(union VALUETYPE *p, const struct magic *m)
-{
- DO_CVT(b, uint8_t);
- return 0;
-}
-
-file_private int
-cvt_16(union VALUETYPE *p, const struct magic *m)
-{
- DO_CVT(h, uint16_t);
- return 0;
-}
-
-file_private int
-cvt_32(union VALUETYPE *p, const struct magic *m)
-{
- DO_CVT(l, uint32_t);
- return 0;
-}
-
-file_private int
-cvt_64(union VALUETYPE *p, const struct magic *m)
-{
- DO_CVT(q, uint64_t);
- return 0;
-}
-
-#define DO_CVT2(fld, type) \
- if (m->num_mask) \
- switch (m->mask_op & FILE_OPS_MASK) { \
- case FILE_OPADD: \
- p->fld += CAST(type, m->num_mask); \
- break; \
- case FILE_OPMINUS: \
- p->fld -= CAST(type, m->num_mask); \
- break; \
- case FILE_OPMULTIPLY: \
- p->fld *= CAST(type, m->num_mask); \
- break; \
- case FILE_OPDIVIDE: \
- if (CAST(type, m->num_mask) == 0) \
- return -1; \
- p->fld /= CAST(type, m->num_mask); \
- break; \
- } \
-
-file_private int
-cvt_float(union VALUETYPE *p, const struct magic *m)
-{
- DO_CVT2(f, float);
- return 0;
-}
-
-file_private int
-cvt_double(union VALUETYPE *p, const struct magic *m)
-{
- DO_CVT2(d, double);
- return 0;
-}
-
-/*
- * Convert the byte order of the data we are looking at
- * While we're here, let's apply the mask operation
- * (unless you have a better idea)
- */
-file_private int
-mconvert(struct magic_set *ms, struct magic *m, int flip)
-{
- union VALUETYPE *p = &ms->ms_value;
-
- switch (cvt_flip(m->type, flip)) {
- case FILE_BYTE:
- if (cvt_8(p, m) == -1)
- goto out;
- return 1;
- case FILE_SHORT:
- case FILE_MSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_LEMSDOSTIME:
- case FILE_BEMSDOSTIME:
- if (cvt_16(p, m) == -1)
- goto out;
- return 1;
- case FILE_LONG:
- case FILE_DATE:
- case FILE_LDATE:
- if (cvt_32(p, m) == -1)
- goto out;
- return 1;
- case FILE_QUAD:
- case FILE_QDATE:
- case FILE_QLDATE:
- case FILE_QWDATE:
- case FILE_OFFSET:
- if (cvt_64(p, m) == -1)
- goto out;
- return 1;
- case FILE_STRING:
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- case FILE_OCTAL: {
- /* Null terminate and eat *trailing* return */
- p->s[sizeof(p->s) - 1] = '\0';
- return 1;
- }
- case FILE_PSTRING: {
- char *ptr1, *ptr2;
- size_t len, sz = file_pstring_length_size(ms, m);
- if (sz == FILE_BADSIZE)
- return 0;
- ptr1 = p->s;
- ptr2 = ptr1 + sz;
- len = file_pstring_get_length(ms, m, ptr1);
- if (len == FILE_BADSIZE)
- return 0;
- sz = sizeof(p->s) - sz; /* maximum length of string */
- if (len >= sz) {
- /*
- * The size of the pascal string length (sz)
- * is 1, 2, or 4. We need at least 1 byte for NUL
- * termination, but we've already truncated the
- * string by p->s, so we need to deduct sz.
- * Because we can use one of the bytes of the length
- * after we shifted as NUL termination.
- */
- len = sz;
- }
- while (len--)
- *ptr1++ = *ptr2++;
- *ptr1 = '\0';
- return 1;
- }
- case FILE_BESHORT:
- p->h = CAST(short, BE16(p));
- if (cvt_16(p, m) == -1)
- goto out;
- return 1;
- case FILE_BELONG:
- case FILE_BEDATE:
- case FILE_BELDATE:
- p->l = CAST(int32_t, BE32(p));
- if (cvt_32(p, m) == -1)
- goto out;
- return 1;
- case FILE_BEQUAD:
- case FILE_BEQDATE:
- case FILE_BEQLDATE:
- case FILE_BEQWDATE:
- p->q = CAST(uint64_t, BE64(p));
- if (cvt_64(p, m) == -1)
- goto out;
- return 1;
- case FILE_LESHORT:
- p->h = CAST(short, LE16(p));
- if (cvt_16(p, m) == -1)
- goto out;
- return 1;
- case FILE_LELONG:
- case FILE_LEDATE:
- case FILE_LELDATE:
- p->l = CAST(int32_t, LE32(p));
- if (cvt_32(p, m) == -1)
- goto out;
- return 1;
- case FILE_LEQUAD:
- case FILE_LEQDATE:
- case FILE_LEQLDATE:
- case FILE_LEQWDATE:
- p->q = CAST(uint64_t, LE64(p));
- if (cvt_64(p, m) == -1)
- goto out;
- return 1;
- case FILE_MELONG:
- case FILE_MEDATE:
- case FILE_MELDATE:
- p->l = CAST(int32_t, ME32(p));
- if (cvt_32(p, m) == -1)
- goto out;
- return 1;
- case FILE_FLOAT:
- if (cvt_float(p, m) == -1)
- goto out;
- return 1;
- case FILE_BEFLOAT:
- p->l = BE32(p);
- if (cvt_float(p, m) == -1)
- goto out;
- return 1;
- case FILE_LEFLOAT:
- p->l = LE32(p);
- if (cvt_float(p, m) == -1)
- goto out;
- return 1;
- case FILE_DOUBLE:
- if (cvt_double(p, m) == -1)
- goto out;
- return 1;
- case FILE_BEDOUBLE:
- p->q = BE64(p);
- if (cvt_double(p, m) == -1)
- goto out;
- return 1;
- case FILE_LEDOUBLE:
- p->q = LE64(p);
- if (cvt_double(p, m) == -1)
- goto out;
- return 1;
- case FILE_REGEX:
- case FILE_SEARCH:
- case FILE_DEFAULT:
- case FILE_CLEAR:
- case FILE_NAME:
- case FILE_USE:
- case FILE_DER:
- case FILE_GUID:
- return 1;
- default:
- file_magerror(ms, "invalid type %d in mconvert()", m->type);
- return 0;
- }
-out:
- file_magerror(ms, "zerodivide in mconvert()");
- return 0;
-}
-
-
-file_private void
-mdebug(uint32_t offset, const char *str, size_t len)
-{
- (void) fprintf(stderr, "mget/%" SIZE_T_FORMAT "u @%d: ", len, offset);
- file_showstr(stderr, str, len);
- (void) fputc('\n', stderr);
- (void) fputc('\n', stderr);
-}
-
-file_private int
-mcopy(struct magic_set *ms, union VALUETYPE *p, int type, int indir,
- const unsigned char *s, uint32_t offset, size_t nbytes, struct magic *m)
-{
- size_t size = sizeof(*p);
- /*
- * Note: FILE_SEARCH and FILE_REGEX do not actually copy
- * anything, but setup pointers into the source
- */
- if (indir == 0) {
- switch (type) {
- case FILE_DER:
- case FILE_SEARCH:
- if (offset > nbytes)
- offset = CAST(uint32_t, nbytes);
- ms->search.s = RCAST(const char *, s) + offset;
- ms->search.s_len = nbytes - offset;
- ms->search.offset = offset;
- return 0;
-
- case FILE_REGEX: {
- const char *b;
- const char *c;
- const char *last; /* end of search region */
- const char *buf; /* start of search region */
- const char *end;
- size_t lines, linecnt, bytecnt;
-
- if (s == NULL || nbytes < offset) {
- ms->search.s_len = 0;
- ms->search.s = NULL;
- return 0;
- }
-
- if (m->str_flags & REGEX_LINE_COUNT) {
- linecnt = m->str_range;
- bytecnt = linecnt * 80;
- } else {
- linecnt = 0;
- bytecnt = m->str_range;
- }
-
- if (bytecnt == 0 || bytecnt > nbytes - offset)
- bytecnt = nbytes - offset;
- if (bytecnt > ms->regex_max)
- bytecnt = ms->regex_max;
-
- buf = RCAST(const char *, s) + offset;
- end = last = RCAST(const char *, s) + bytecnt + offset;
- /* mget() guarantees buf <= last */
- for (lines = linecnt, b = buf; lines && b < end &&
- ((b = CAST(const char *,
- memchr(c = b, '\n', CAST(size_t, (end - b)))))
- || (b = CAST(const char *,
- memchr(c, '\r', CAST(size_t, (end - c))))));
- lines--, b++) {
- if (b < end - 1 && b[0] == '\r' && b[1] == '\n')
- b++;
- if (b < end - 1 && b[0] == '\n')
- b++;
- last = b;
- }
- if (lines)
- last = end;
-
- ms->search.s = buf;
- ms->search.s_len = last - buf;
- ms->search.offset = offset;
- ms->search.rm_len = 0;
- return 0;
- }
- case FILE_BESTRING16:
- case FILE_LESTRING16: {
- const unsigned char *src = s + offset;
- const unsigned char *esrc = s + nbytes;
- char *dst = p->s;
- char *edst = &p->s[sizeof(p->s) - 1];
-
- if (type == FILE_BESTRING16)
- src++;
-
- /* check that offset is within range */
- if (offset >= nbytes)
- break;
- for (/*EMPTY*/; src < esrc; src += 2, dst++) {
- if (dst < edst)
- *dst = *src;
- else
- break;
- if (*dst == '\0') {
- if (type == FILE_BESTRING16 ?
- *(src - 1) != '\0' :
- ((src + 1 < esrc) &&
- *(src + 1) != '\0'))
- *dst = ' ';
- }
- }
- *edst = '\0';
- return 0;
- }
- case FILE_STRING: /* XXX - these two should not need */
- case FILE_PSTRING: /* to copy anything, but do anyway. */
- if (m->str_range != 0 && m->str_range < sizeof(*p))
- size = m->str_range;
- break;
- default:
- break;
- }
- }
-
- if (type == FILE_OFFSET) {
- (void)memset(p, '\0', sizeof(*p));
- p->q = offset;
- return 0;
- }
-
- if (offset >= nbytes) {
- (void)memset(p, '\0', sizeof(*p));
- return 0;
- }
- if (nbytes - offset < size)
- nbytes = nbytes - offset;
- else
- nbytes = size;
-
- (void)memcpy(p, s + offset, nbytes);
-
- /*
- * the usefulness of padding with zeroes eludes me, it
- * might even cause problems
- */
- if (nbytes < sizeof(*p))
- (void)memset(RCAST(char *, RCAST(void *, p)) + nbytes, '\0',
- sizeof(*p) - nbytes);
- return 0;
-}
-
-file_private int
-do_ops(struct magic_set *ms, struct magic *m, uint32_t *rv, intmax_t lhs,
- intmax_t off)
-{
- intmax_t offset;
- // On purpose not INTMAX_MAX
- if (lhs >= UINT_MAX || lhs <= INT_MIN ||
- off >= UINT_MAX || off <= INT_MIN) {
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "lhs/off overflow %jd %jd\n", lhs, off);
- return 1;
- }
-
- if (off) {
- switch (m->in_op & FILE_OPS_MASK) {
- case FILE_OPAND:
- offset = lhs & off;
- break;
- case FILE_OPOR:
- offset = lhs | off;
- break;
- case FILE_OPXOR:
- offset = lhs ^ off;
- break;
- case FILE_OPADD:
- offset = lhs + off;
- break;
- case FILE_OPMINUS:
- offset = lhs - off;
- break;
- case FILE_OPMULTIPLY:
- offset = lhs * off;
- break;
- case FILE_OPDIVIDE:
- offset = lhs / off;
- break;
- case FILE_OPMODULO:
- offset = lhs % off;
- break;
- }
- } else
- offset = lhs;
- if (m->in_op & FILE_OPINVERSE)
- offset = ~offset;
- if (offset >= UINT_MAX) {
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "offset overflow %jd\n", offset);
- return 1;
- }
- *rv = CAST(uint32_t, offset);
- return 0;
-}
-
-file_private int
-msetoffset(struct magic_set *ms, struct magic *m, struct buffer *bb,
- const struct buffer *b, size_t o, unsigned int cont_level)
-{
- int32_t offset;
- if (m->flag & OFFNEGATIVE) {
- offset = -m->offset;
- if (cont_level > 0) {
- if (m->flag & (OFFADD|INDIROFFADD))
- goto normal;
-#if 0
- file_error(ms, 0, "negative offset %d at continuation"
- "level %u", m->offset, cont_level);
- return -1;
-#endif
- }
- if (buffer_fill(b) == -1)
- return -1;
- if (o != 0) {
- // Not yet!
- file_magerror(ms, "non zero offset %" SIZE_T_FORMAT
- "u at level %u", o, cont_level);
- return -1;
- }
- if (CAST(size_t, m->offset) > b->elen)
- return -1;
- buffer_init(bb, -1, NULL, b->ebuf, b->elen);
- ms->eoffset = ms->offset = CAST(int32_t, b->elen - m->offset);
- } else {
- offset = m->offset;
- if (cont_level == 0) {
-normal:
- // XXX: Pass real fd, then who frees bb?
- buffer_init(bb, -1, NULL, b->fbuf, b->flen);
- ms->offset = offset;
- ms->eoffset = 0;
- } else {
- ms->offset = ms->eoffset + offset;
- }
- }
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- fprintf(stderr, "bb=[%p,%" SIZE_T_FORMAT "u,%"
- SIZE_T_FORMAT "u], %d [b=%p,%"
- SIZE_T_FORMAT "u,%" SIZE_T_FORMAT "u], [o=%#x, c=%d]\n",
- bb->fbuf, bb->flen, bb->elen, ms->offset, b->fbuf,
- b->flen, b->elen, offset, cont_level);
- }
- return 0;
-}
-
-file_private int
-save_cont(struct magic_set *ms, struct cont *c)
-{
- size_t len;
- *c = ms->c;
- len = c->len * sizeof(*c->li);
- ms->c.li = CAST(struct level_info *, malloc(len));
- if (ms->c.li == NULL) {
- ms->c = *c;
- return -1;
- }
- memcpy(ms->c.li, c->li, len);
- return 0;
-}
-
-file_private void
-restore_cont(struct magic_set *ms, struct cont *c)
-{
- free(ms->c.li);
- ms->c = *c;
-}
-
-file_private int
-mget(struct magic_set *ms, struct magic *m, const struct buffer *b,
- const unsigned char *s, size_t nbytes, size_t o, unsigned int cont_level,
- int mode, int text, int flip, uint16_t *indir_count, uint16_t *name_count,
- int *printed_something, int *need_separator, int *firstline, int *returnval,
- int *found_match)
-{
- uint32_t eoffset, offset = ms->offset;
- struct buffer bb;
- intmax_t lhs;
- file_pushbuf_t *pb;
- int rv, oneed_separator, in_type, nfound_match;
- char *rbuf;
- union VALUETYPE *p = &ms->ms_value;
- struct mlist ml, *mlp;
- struct cont c;
-
- if (*indir_count >= ms->indir_max) {
- file_error(ms, 0, "indirect count (%hu) exceeded",
- *indir_count);
- return -1;
- }
-
- if (*name_count >= ms->name_max) {
- file_error(ms, 0, "name use count (%hu) exceeded",
- *name_count);
- return -1;
- }
-
-
-
- if (mcopy(ms, p, m->type, m->flag & INDIR, s,
- CAST(uint32_t, offset + o), CAST(uint32_t, nbytes), m) == -1)
- return -1;
-
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- fprintf(stderr, "mget(type=%d, flag=%#x, offset=%u, o=%"
- SIZE_T_FORMAT "u, " "nbytes=%" SIZE_T_FORMAT
- "u, il=%hu, nc=%hu)\n",
- m->type, m->flag, offset, o, nbytes,
- *indir_count, *name_count);
- mdebug(offset, RCAST(char *, RCAST(void *, p)),
- sizeof(union VALUETYPE));
-#ifndef COMPILE_ONLY
- file_mdump(m);
-#endif
- }
-
- if (m->flag & INDIR) {
- intmax_t off = m->in_offset;
- const int sgn = m->in_op & FILE_OPSIGNED;
- if (m->in_op & FILE_OPINDIRECT) {
- const union VALUETYPE *q = CAST(const union VALUETYPE *,
- RCAST(const void *, s + offset + off));
- int op;
- switch (op = cvt_flip(m->in_type, flip)) {
- case FILE_BYTE:
- if (OFFSET_OOB(nbytes, offset + off, 1))
- return 0;
- off = SEXT(sgn,8,q->b);
- break;
- case FILE_SHORT:
- if (OFFSET_OOB(nbytes, offset + off, 2))
- return 0;
- off = SEXT(sgn,16,q->h);
- break;
- case FILE_BESHORT:
- if (OFFSET_OOB(nbytes, offset + off, 2))
- return 0;
- off = SEXT(sgn,16,BE16(q));
- break;
- case FILE_LESHORT:
- if (OFFSET_OOB(nbytes, offset + off, 2))
- return 0;
- off = SEXT(sgn,16,LE16(q));
- break;
- case FILE_LONG:
- if (OFFSET_OOB(nbytes, offset + off, 4))
- return 0;
- off = SEXT(sgn,32,q->l);
- break;
- case FILE_BELONG:
- case FILE_BEID3:
- if (OFFSET_OOB(nbytes, offset + off, 4))
- return 0;
- off = SEXT(sgn,32,BE32(q));
- break;
- case FILE_LEID3:
- case FILE_LELONG:
- if (OFFSET_OOB(nbytes, offset + off, 4))
- return 0;
- off = SEXT(sgn,32,LE32(q));
- break;
- case FILE_MELONG:
- if (OFFSET_OOB(nbytes, offset + off, 4))
- return 0;
- off = SEXT(sgn,32,ME32(q));
- break;
- case FILE_BEQUAD:
- if (OFFSET_OOB(nbytes, offset + off, 8))
- return 0;
- off = SEXT(sgn,64,BE64(q));
- break;
- case FILE_LEQUAD:
- if (OFFSET_OOB(nbytes, offset + off, 8))
- return 0;
- off = SEXT(sgn,64,LE64(q));
- break;
- case FILE_OCTAL:
- if (OFFSET_OOB(nbytes, offset, m->vallen))
- return 0;
- off = SEXT(sgn,64,strtoull(p->s, NULL, 8));
- break;
- default:
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "bad op=%d\n", op);
- return 0;
- }
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "indirect offs=%jd\n", off);
- }
- switch (in_type = cvt_flip(m->in_type, flip)) {
- case FILE_BYTE:
- if (OFFSET_OOB(nbytes, offset, 1))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,8,p->b), off))
- return 0;
- break;
- case FILE_BESHORT:
- if (OFFSET_OOB(nbytes, offset, 2))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,16,BE16(p)), off))
- return 0;
- break;
- case FILE_LESHORT:
- if (OFFSET_OOB(nbytes, offset, 2))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,16,LE16(p)), off))
- return 0;
- break;
- case FILE_SHORT:
- if (OFFSET_OOB(nbytes, offset, 2))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,16,p->h), off))
- return 0;
- break;
- case FILE_BELONG:
- case FILE_BEID3:
- if (OFFSET_OOB(nbytes, offset, 4))
- return 0;
- lhs = BE32(p);
- if (in_type == FILE_BEID3)
- lhs = cvt_id3(ms, CAST(uint32_t, lhs));
- if (do_ops(ms, m, &offset, SEXT(sgn,32,lhs), off))
- return 0;
- break;
- case FILE_LELONG:
- case FILE_LEID3:
- if (OFFSET_OOB(nbytes, offset, 4))
- return 0;
- lhs = LE32(p);
- if (in_type == FILE_LEID3)
- lhs = cvt_id3(ms, CAST(uint32_t, lhs));
- if (do_ops(ms, m, &offset, SEXT(sgn,32,lhs), off))
- return 0;
- break;
- case FILE_MELONG:
- if (OFFSET_OOB(nbytes, offset, 4))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,32,ME32(p)), off))
- return 0;
- break;
- case FILE_LONG:
- if (OFFSET_OOB(nbytes, offset, 4))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,32,p->l), off))
- return 0;
- break;
- case FILE_LEQUAD:
- if (OFFSET_OOB(nbytes, offset, 8))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,64,LE64(p)), off))
- return 0;
- break;
- case FILE_BEQUAD:
- if (OFFSET_OOB(nbytes, offset, 8))
- return 0;
- if (do_ops(ms, m, &offset, SEXT(sgn,64,BE64(p)), off))
- return 0;
- break;
- case FILE_OCTAL:
- if (OFFSET_OOB(nbytes, offset, m->vallen))
- return 0;
- if(do_ops(ms, m, &offset,
- SEXT(sgn,64,strtoull(p->s, NULL, 8)), off))
- return 0;
- break;
- default:
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "bad in_type=%d\n", in_type);
- return 0;
- }
-
- if (m->flag & INDIROFFADD) {
- if (cont_level == 0) {
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr,
- "indirect *zero* cont_level\n");
- return 0;
- }
- offset += ms->c.li[cont_level - 1].off;
- if (offset == 0) {
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr,
- "indirect *zero* offset\n");
- return 0;
- }
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "indirect +offs=%u\n", offset);
- }
- if (mcopy(ms, p, m->type, 0, s, offset, nbytes, m) == -1)
- return -1;
- ms->offset = offset;
-
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- mdebug(offset, RCAST(char *, RCAST(void *, p)),
- sizeof(union VALUETYPE));
-#ifndef COMPILE_ONLY
- file_mdump(m);
-#endif
- }
- }
-
- /* Verify we have enough data to match magic type */
- switch (m->type) {
- case FILE_BYTE:
- if (OFFSET_OOB(nbytes, offset, 1))
- return 0;
- break;
-
- case FILE_SHORT:
- case FILE_BESHORT:
- case FILE_LESHORT:
- if (OFFSET_OOB(nbytes, offset, 2))
- return 0;
- break;
-
- case FILE_LONG:
- case FILE_BELONG:
- case FILE_LELONG:
- case FILE_MELONG:
- case FILE_DATE:
- case FILE_BEDATE:
- case FILE_LEDATE:
- case FILE_MEDATE:
- case FILE_LDATE:
- case FILE_BELDATE:
- case FILE_LELDATE:
- case FILE_MELDATE:
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- if (OFFSET_OOB(nbytes, offset, 4))
- return 0;
- break;
-
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- if (OFFSET_OOB(nbytes, offset, 8))
- return 0;
- break;
-
- case FILE_GUID:
- if (OFFSET_OOB(nbytes, offset, 16))
- return 0;
- break;
-
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_SEARCH:
- case FILE_OCTAL:
- if (OFFSET_OOB(nbytes, offset, m->vallen))
- return 0;
- break;
-
- case FILE_REGEX:
- if (nbytes < offset)
- return 0;
- break;
-
- case FILE_INDIRECT:
- if (m->str_flags & INDIRECT_RELATIVE)
- offset += CAST(uint32_t, o);
- if (offset == 0)
- return 0;
-
- if (nbytes < offset)
- return 0;
-
- if ((pb = file_push_buffer(ms)) == NULL)
- return -1;
-
- (*indir_count)++;
- bb = *b;
- bb.fbuf = s + offset;
- bb.flen = nbytes - offset;
- bb.ebuf = NULL;
- bb.elen = 0;
- rv = -1;
- for (mlp = ms->mlist[0]->next; mlp != ms->mlist[0];
- mlp = mlp->next)
- {
- if ((rv = match(ms, mlp->magic, mlp->magic_rxcomp,
- mlp->nmagic, &bb, 0, BINTEST, text, 0, indir_count,
- name_count, printed_something, need_separator,
- firstline, NULL, NULL)) != 0)
- break;
- }
- buffer_fini(&bb);
-
- if ((ms->flags & MAGIC_DEBUG) != 0)
- fprintf(stderr, "indirect @offs=%u[%d]\n", offset, rv);
-
- rbuf = file_pop_buffer(ms, pb);
- if (rbuf == NULL && ms->event_flags & EVENT_HAD_ERR)
- return -1;
-
- if (rv == 1) {
- if ((ms->flags & MAGIC_NODESC) == 0 &&
- file_printf(ms, F(ms, m->desc, "%u"), offset) == -1)
- {
- free(rbuf);
- return -1;
- }
- if (file_printf(ms, "%s", rbuf) == -1) {
- free(rbuf);
- return -1;
- }
- }
- free(rbuf);
- return rv;
-
- case FILE_USE:
- if (nbytes < offset)
- return 0;
- rbuf = m->value.s;
- if (*rbuf == '^') {
- rbuf++;
- flip = !flip;
- }
- if (file_magicfind(ms, rbuf, &ml) == -1) {
- file_error(ms, 0, "cannot find entry `%s'", rbuf);
- return -1;
- }
- if (save_cont(ms, &c) == -1) {
- file_error(ms, errno, "can't allocate continuation");
- return -1;
- }
-
- oneed_separator = *need_separator;
- if (m->flag & NOSPACE)
- *need_separator = 0;
-
- nfound_match = 0;
- (*name_count)++;
- eoffset = ms->eoffset;
- rv = match(ms, ml.magic, ml.magic_rxcomp, ml.nmagic, b,
- offset + o, mode, text, flip, indir_count, name_count,
- printed_something, need_separator, firstline, returnval,
- &nfound_match);
- ms->ms_value.q = nfound_match;
- (*name_count)--;
- *found_match |= nfound_match;
-
- restore_cont(ms, &c);
-
- if (rv != 1)
- *need_separator = oneed_separator;
- ms->offset = offset;
- ms->eoffset = eoffset;
- return rv || *found_match;
-
- case FILE_NAME:
- if (ms->flags & MAGIC_NODESC)
- return 1;
- if (file_printf(ms, "%s", m->desc) == -1)
- return -1;
- return 1;
- case FILE_DER:
- case FILE_DEFAULT: /* nothing to check */
- case FILE_CLEAR:
- default:
- break;
- }
- if (!mconvert(ms, m, flip))
- return 0;
- return 1;
-}
-
-file_private uint64_t
-file_strncmp(const char *s1, const char *s2, size_t len, size_t maxlen,
- uint32_t flags)
-{
- /*
- * Convert the source args to unsigned here so that (1) the
- * compare will be unsigned as it is in strncmp() and (2) so
- * the ctype functions will work correctly without extra
- * casting.
- */
- const unsigned char *a = RCAST(const unsigned char *, s1);
- const unsigned char *b = RCAST(const unsigned char *, s2);
- uint32_t ws = flags & (STRING_COMPACT_WHITESPACE |
- STRING_COMPACT_OPTIONAL_WHITESPACE);
- const unsigned char *eb = b + (ws ? maxlen : len);
- uint64_t v;
-
- /*
- * What we want here is v = strncmp(s1, s2, len),
- * but ignoring any nulls.
- */
- v = 0;
- len++;
- if (0L == flags) { /* normal string: do it fast */
- while (--len > 0)
- if ((v = *b++ - *a++) != '\0')
- break;
- }
- else { /* combine the others */
- while (--len > 0) {
- if (b >= eb) {
- v = 1;
- break;
- }
- if ((flags & STRING_IGNORE_LOWERCASE) &&
- islower(*a)) {
- if ((v = tolower(*b++) - *a++) != '\0')
- break;
- }
- else if ((flags & STRING_IGNORE_UPPERCASE) &&
- isupper(*a)) {
- if ((v = toupper(*b++) - *a++) != '\0')
- break;
- }
- else if ((flags & STRING_COMPACT_WHITESPACE) &&
- isspace(*a)) {
- a++;
- if (isspace(*b)) {
- b++;
- if (!isspace(*a))
- while (b < eb && isspace(*b))
- b++;
- }
- else {
- v = 1;
- break;
- }
- }
- else if ((flags & STRING_COMPACT_OPTIONAL_WHITESPACE) &&
- isspace(*a)) {
- a++;
- while (b < eb && isspace(*b))
- b++;
- }
- else {
- if ((v = *b++ - *a++) != '\0')
- break;
- }
- }
- if (len == 0 && v == 0 && (flags & STRING_FULL_WORD)) {
- if (*b && !isspace(*b))
- v = 1;
- }
- }
- return v;
-}
-
-file_private uint64_t
-file_strncmp16(const char *a, const char *b, size_t len, size_t maxlen,
- uint32_t flags)
-{
- /*
- * XXX - The 16-bit string compare probably needs to be done
- * differently, especially if the flags are to be supported.
- * At the moment, I am unsure.
- */
- flags = 0;
- return file_strncmp(a, b, len, maxlen, flags);
-}
-
-file_private file_regex_t *
-alloc_regex(struct magic_set *ms, struct magic *m)
-{
- int rc;
- file_regex_t *rx = CAST(file_regex_t *, malloc(sizeof(*rx)));
-
- if (rx == NULL) {
- file_error(ms, errno, "can't allocate %" SIZE_T_FORMAT
- "u bytes", sizeof(*rx));
- return NULL;
- }
-
- rc = file_regcomp(ms, rx, m->value.s, REG_EXTENDED | REG_NEWLINE |
- ((m->str_flags & STRING_IGNORE_CASE) ? REG_ICASE : 0));
- if (rc == 0)
- return rx;
-
- free(rx);
- return NULL;
-}
-
-file_private int
-magiccheck(struct magic_set *ms, struct magic *m, file_regex_t **m_cache)
-{
- uint64_t l = m->value.q;
- uint64_t v;
- float fl, fv;
- double dl, dv;
- int matched;
- union VALUETYPE *p = &ms->ms_value;
-
- switch (m->type) {
- case FILE_BYTE:
- v = p->b;
- break;
-
- case FILE_SHORT:
- case FILE_BESHORT:
- case FILE_LESHORT:
- case FILE_MSDOSDATE:
- case FILE_LEMSDOSDATE:
- case FILE_BEMSDOSDATE:
- case FILE_MSDOSTIME:
- case FILE_LEMSDOSTIME:
- case FILE_BEMSDOSTIME:
- v = p->h;
- break;
-
- case FILE_LONG:
- case FILE_BELONG:
- case FILE_LELONG:
- case FILE_MELONG:
- case FILE_DATE:
- case FILE_BEDATE:
- case FILE_LEDATE:
- case FILE_MEDATE:
- case FILE_LDATE:
- case FILE_BELDATE:
- case FILE_LELDATE:
- case FILE_MELDATE:
- v = p->l;
- break;
-
- case FILE_QUAD:
- case FILE_LEQUAD:
- case FILE_BEQUAD:
- case FILE_QDATE:
- case FILE_BEQDATE:
- case FILE_LEQDATE:
- case FILE_QLDATE:
- case FILE_BEQLDATE:
- case FILE_LEQLDATE:
- case FILE_QWDATE:
- case FILE_BEQWDATE:
- case FILE_LEQWDATE:
- case FILE_OFFSET:
- v = p->q;
- break;
-
- case FILE_FLOAT:
- case FILE_BEFLOAT:
- case FILE_LEFLOAT:
- fl = m->value.f;
- fv = p->f;
- switch (m->reln) {
- case 'x':
- matched = 1;
- break;
-
- case '!':
- matched = isunordered(fl, fv) ? 1 : fv != fl;
- break;
-
- case '=':
- matched = isunordered(fl, fv) ? 0 : fv == fl;
- break;
-
- case '>':
- matched = isgreater(fv, fl);
- break;
-
- case '<':
- matched = isless(fv, fl);
- break;
-
- default:
- file_magerror(ms, "cannot happen with float: "
- "invalid relation `%c'", m->reln);
- return -1;
- }
- return matched;
-
- case FILE_DOUBLE:
- case FILE_BEDOUBLE:
- case FILE_LEDOUBLE:
- dl = m->value.d;
- dv = p->d;
- switch (m->reln) {
- case 'x':
- matched = 1;
- break;
-
- case '!':
- matched = isunordered(dv, dl) ? 1 : dv != dl;
- break;
-
- case '=':
- matched = isunordered(dv, dl) ? 0 : dv == dl;
- break;
-
- case '>':
- matched = isgreater(dv, dl);
- break;
-
- case '<':
- matched = isless(dv, dl);
- break;
-
- default:
- file_magerror(ms, "cannot happen with double: "
- "invalid relation `%c'", m->reln);
- return -1;
- }
- return matched;
-
- case FILE_DEFAULT:
- case FILE_CLEAR:
- l = 0;
- v = 0;
- break;
-
- case FILE_STRING:
- case FILE_PSTRING:
- case FILE_OCTAL:
- l = 0;
- v = file_strncmp(m->value.s, p->s, CAST(size_t, m->vallen),
- sizeof(p->s), m->str_flags);
- break;
-
- case FILE_BESTRING16:
- case FILE_LESTRING16:
- l = 0;
- v = file_strncmp16(m->value.s, p->s, CAST(size_t, m->vallen),
- sizeof(p->s), m->str_flags);
- break;
-
- case FILE_SEARCH: { /* search ms->search.s for the string m->value.s */
- size_t slen;
- size_t idx;
-
- if (ms->search.s == NULL)
- return 0;
-
- slen = MIN(m->vallen, sizeof(m->value.s));
- l = 0;
- v = 0;
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- size_t xlen = ms->search.s_len > 100 ? 100
- : ms->search.s_len;
-
- fprintf(stderr, "search: [");
- file_showstr(stderr, ms->search.s, xlen);
- fprintf(stderr, "%s] for [", ms->search.s_len == xlen
- ? "" : "...");
- file_showstr(stderr, m->value.s, slen);
- }
-#ifdef HAVE_MEMMEM
- if (slen > 0 && m->str_flags == 0) {
- const char *found;
- idx = m->str_range + slen;
- if (m->str_range == 0 || ms->search.s_len < idx)
- idx = ms->search.s_len;
- found = CAST(const char *, memmem(ms->search.s, idx,
- m->value.s, slen));
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- fprintf(stderr, "] %sfound\n",
- found ? "" : "not ");
- }
- if (!found) {
- v = 1;
- break;
- }
- idx = found - ms->search.s;
- ms->search.offset += idx;
- ms->search.rm_len = ms->search.s_len - idx;
- break;
- }
-#endif
-
- for (idx = 0; m->str_range == 0 || idx < m->str_range; idx++) {
- if (slen + idx > ms->search.s_len) {
- v = 1;
- break;
- }
-
- v = file_strncmp(m->value.s, ms->search.s + idx, slen,
- ms->search.s_len - idx, m->str_flags);
- if (v == 0) { /* found match */
- ms->search.offset += idx;
- ms->search.rm_len = ms->search.s_len - idx;
- break;
- }
- }
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- fprintf(stderr, "] %sfound\n", v == 0 ? "" : "not ");
- }
- break;
- }
- case FILE_REGEX: {
- int rc;
- file_regex_t *rx = *m_cache;
- const char *search;
- regmatch_t pmatch;
- size_t slen = ms->search.s_len;
- char *copy;
-
- if (ms->search.s == NULL)
- return 0;
-
- if (rx == NULL) {
- rx = *m_cache = alloc_regex(ms, m);
- if (rx == NULL)
- return -1;
- }
- l = 0;
- if (slen != 0) {
- copy = CAST(char *, malloc(slen));
- if (copy == NULL) {
- file_error(ms, errno,
- "can't allocate %" SIZE_T_FORMAT "u bytes",
- slen);
- return -1;
- }
- memcpy(copy, ms->search.s, slen);
- copy[--slen] = '\0';
- search = copy;
- } else {
- search = CCAST(char *, "");
- copy = NULL;
- }
- rc = file_regexec(ms, rx, RCAST(const char *, search),
- 1, &pmatch, 0);
- free(copy);
- switch (rc) {
- case 0:
- ms->search.s += CAST(int, pmatch.rm_so);
- ms->search.offset += CAST(size_t, pmatch.rm_so);
- ms->search.rm_len = CAST(size_t,
- pmatch.rm_eo - pmatch.rm_so);
- v = 0;
- break;
-
- case REG_NOMATCH:
- v = 1;
- break;
-
- default:
- return -1;
- }
- break;
- }
- case FILE_USE:
- return ms->ms_value.q != 0;
- case FILE_NAME:
- case FILE_INDIRECT:
- return 1;
- case FILE_DER:
- matched = der_cmp(ms, m);
- if (matched == -1) {
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- (void) fprintf(stderr,
- "EOF comparing DER entries\n");
- }
- return 0;
- }
- return matched;
- case FILE_GUID:
- l = 0;
- v = memcmp(m->value.guid, p->guid, sizeof(p->guid));
- break;
- default:
- file_magerror(ms, "invalid type %d in magiccheck()", m->type);
- return -1;
- }
-
- v = file_signextend(ms, m, v);
-
- switch (m->reln) {
- case 'x':
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT
- "u == *any* = 1", CAST(unsigned long long, v));
- matched = 1;
- break;
-
- case '!':
- matched = v != l;
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT "u != %"
- INT64_T_FORMAT "u = %d",
- CAST(unsigned long long, v),
- CAST(unsigned long long, l), matched);
- break;
-
- case '=':
- matched = v == l;
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT "u == %"
- INT64_T_FORMAT "u = %d",
- CAST(unsigned long long, v),
- CAST(unsigned long long, l), matched);
- break;
-
- case '>':
- if (m->flag & UNSIGNED) {
- matched = v > l;
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT
- "u > %" INT64_T_FORMAT "u = %d",
- CAST(unsigned long long, v),
- CAST(unsigned long long, l), matched);
- }
- else {
- matched = CAST(int64_t, v) > CAST(int64_t, l);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT
- "d > %" INT64_T_FORMAT "d = %d",
- CAST(long long, v),
- CAST(long long, l), matched);
- }
- break;
-
- case '<':
- if (m->flag & UNSIGNED) {
- matched = v < l;
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT
- "u < %" INT64_T_FORMAT "u = %d",
- CAST(unsigned long long, v),
- CAST(unsigned long long, l), matched);
- }
- else {
- matched = CAST(int64_t, v) < CAST(int64_t, l);
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "%" INT64_T_FORMAT
- "d < %" INT64_T_FORMAT "d = %d",
- CAST(long long, v),
- CAST(long long, l), matched);
- }
- break;
-
- case '&':
- matched = (v & l) == l;
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %"
- INT64_T_FORMAT "x) == %" INT64_T_FORMAT
- "x) = %d", CAST(unsigned long long, v),
- CAST(unsigned long long, l),
- CAST(unsigned long long, l),
- matched);
- break;
-
- case '^':
- matched = (v & l) != l;
- if ((ms->flags & MAGIC_DEBUG) != 0)
- (void) fprintf(stderr, "((%" INT64_T_FORMAT "x & %"
- INT64_T_FORMAT "x) != %" INT64_T_FORMAT
- "x) = %d", CAST(unsigned long long, v),
- CAST(unsigned long long, l),
- CAST(unsigned long long, l), matched);
- break;
-
- default:
- file_magerror(ms, "cannot happen: invalid relation `%c'",
- m->reln);
- return -1;
- }
- if ((ms->flags & MAGIC_DEBUG) != 0) {
- (void) fprintf(stderr, " strength=%zu\n",
- file_magic_strength(m, 1));
- }
-
- return matched;
-}
-
-file_private int
-handle_annotation(struct magic_set *ms, struct magic *m, int firstline)
-{
- if ((ms->flags & MAGIC_APPLE) && m->apple[0]) {
- if (print_sep(ms, firstline) == -1)
- return -1;
- if (file_printf(ms, "%.8s", m->apple) == -1)
- return -1;
- return 1;
- }
- if ((ms->flags & MAGIC_EXTENSION) && m->ext[0]) {
- if (print_sep(ms, firstline) == -1)
- return -1;
- if (file_printf(ms, "%s", m->ext) == -1)
- return -1;
- return 1;
- }
- if ((ms->flags & MAGIC_MIME_TYPE) && m->mimetype[0]) {
- char buf[1024];
- const char *p;
- if (print_sep(ms, firstline) == -1)
- return -1;
- if (varexpand(ms, buf, sizeof(buf), m->mimetype) == -1)
- p = m->mimetype;
- else
- p = buf;
- if (file_printf(ms, "%s", p) == -1)
- return -1;
- return 1;
- }
- return 0;
-}
-
-file_private int
-print_sep(struct magic_set *ms, int firstline)
-{
- if (firstline)
- return 0;
- /*
- * we found another match
- * put a newline and '-' to do some simple formatting
- */
- return file_separator(ms);
-}
diff --git a/contrib/libs/libmagic/src/tar.h b/contrib/libs/libmagic/src/tar.h
deleted file mode 100644
index ced4f39398..0000000000
--- a/contrib/libs/libmagic/src/tar.h
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (c) Ian F. Darwin 1986-1995.
- * Software written by Ian F. Darwin and others;
- * maintained 1995-present by Christos Zoulas and others.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice immediately at the beginning of the file, without modification,
- * this list of conditions, and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
- * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- */
-/*
- * Header file for file_public domain tar (tape archive) program.
- *
- * @(#)tar.h 1.20 86/10/29 Public Domain.
- *
- * Created 25 August 1985 by John Gilmore, ihnp4!hoptoad!gnu.
- *
- * $File: tar.h,v 1.16 2022/12/26 17:31:14 christos Exp $ # checkin only
- */
-
-/*
- * Header block on tape.
- *
- * I'm going to use traditional DP naming conventions here.
- * A "block" is a big chunk of stuff that we do I/O on.
- * A "record" is a piece of info that we care about.
- * Typically many "record"s fit into a "block".
- */
-#define RECORDSIZE 512
-#define NAMSIZ 100
-#define TUNMLEN 32
-#define TGNMLEN 32
-
-union record {
- unsigned char charptr[RECORDSIZE];
- struct header {
- char name[NAMSIZ];
- char mode[8];
- char uid[8];
- char gid[8];
- char size[12];
- char mtime[12];
- char chksum[8];
- char linkflag;
- char linkname[NAMSIZ];
- char magic[8];
- char uname[TUNMLEN];
- char gname[TGNMLEN];
- char devmajor[8];
- char devminor[8];
- } header;
-};
-
-/* The magic field is filled with this if uname and gname are valid. */
-#define TMAGIC "ustar" /* 5 chars and a null */
-#define GNUTMAGIC "ustar " /* 7 chars and a null */
diff --git a/contrib/libs/libmagic/src/ya.make b/contrib/libs/libmagic/src/ya.make
deleted file mode 100644
index 56433e73c2..0000000000
--- a/contrib/libs/libmagic/src/ya.make
+++ /dev/null
@@ -1,60 +0,0 @@
-# Generated by devtools/yamaker.
-
-LIBRARY()
-
-LICENSE(
- BSD-2-Clause AND
- Bsd-Simplified-Darwin AND
- Public-Domain
-)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-PEERDIR(
- contrib/libs/libc_compat
- contrib/libs/zlib
- library/cpp/resource
-)
-
-ADDINCL(
- contrib/libs/libmagic
- contrib/libs/libmagic/src
-)
-
-NO_COMPILER_WARNINGS()
-
-CFLAGS(
- -DHAVE_CONFIG_H
- -DMAGIC=\"res@/magic/magic.mgc\"
-)
-
-SRCS(
- apprentice.c
- apptype.c
- ascmagic.c
- buffer.c
- cdf.c
- cdf_time.c
- compress.c
- der.c
- encoding.c
- fmtcheck.c
- fsmagic.c
- funcs.c
- is_csv.c
- is_json.c
- is_simh.c
- is_tar.c
- magic.c
- print.c
- readcdf.c
- readelf.c
- res.cpp
- softmagic.c
-)
-
-END()
-
-RECURSE(
- file
-)
diff --git a/contrib/libs/libmagic/ya.make b/contrib/libs/libmagic/ya.make
deleted file mode 100644
index fb83b2a780..0000000000
--- a/contrib/libs/libmagic/ya.make
+++ /dev/null
@@ -1,28 +0,0 @@
-# Generated by devtools/yamaker from nixpkgs 22.11.
-
-LIBRARY()
-
-LICENSE(Bsd-Simplified-Darwin)
-
-LICENSE_TEXTS(.yandex_meta/licenses.list.txt)
-
-VERSION(5.45)
-
-ORIGINAL_SOURCE(https://github.com/file/file/archive/FILE5_45.tar.gz)
-
-PEERDIR(
- contrib/libs/libmagic/magic
- contrib/libs/libmagic/src
-)
-
-ADDINCL(
- GLOBAL contrib/libs/libmagic/include
-)
-
-END()
-
-RECURSE(
- file
- magic
- src
-)
diff --git a/contrib/libs/python/Include/opcode.h b/contrib/libs/python/Include/opcode.h
deleted file mode 100644
index add566d1ce..0000000000
--- a/contrib/libs/python/Include/opcode.h
+++ /dev/null
@@ -1,7 +0,0 @@
-#pragma once
-
-#ifdef USE_PYTHON3
-#include <contrib/tools/python3/src/Include/opcode.h>
-#else
-#include <contrib/tools/python/src/Include/opcode.h>
-#endif
diff --git a/contrib/python/coverage/plugins/coveragerc.txt b/contrib/python/coverage/plugins/coveragerc.txt
deleted file mode 100644
index 83bfed8690..0000000000
--- a/contrib/python/coverage/plugins/coveragerc.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-[report]
-skip_empty = True
-
-exclude_lines =
- pragma\s*:\s*no\s*cover
- def __repr__
- raise AssertionError
- raise NotImplementedError
- if 0:
- if False:
- if __name__ == .__main__.:
- if self\.debug:
- if settings\.DEBUG
-
-[run]
-suppress_plugin_errors = False
-plugins =
- contrib.python.coverage.plugins.yarcadia.plugin
- contrib.tools.cython.Cython.Coverage
-
-[contrib.python.coverage.plugins.yarcadia.plugin]
-pylib_paths =
- # don't trace contrib
- contrib/python
- contrib/python3
- # don't trace python sources
- contrib/tools/python
- contrib/tools/python3
- contrib/libs/protobuf
diff --git a/contrib/python/coverage/plugins/ya.make b/contrib/python/coverage/plugins/ya.make
deleted file mode 100644
index 30be33f72a..0000000000
--- a/contrib/python/coverage/plugins/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- build/plugins/lib/test_const
- contrib/tools/cython/Cython
- library/python/testing/coverage_utils
-)
-
-PY_SRCS(
- yarcadia/plugin.py
-)
-
-RESOURCE(
- coveragerc.txt /coverage_plugins/coveragerc.txt
-)
-
-END()
diff --git a/contrib/python/coverage/plugins/yarcadia/plugin.py b/contrib/python/coverage/plugins/yarcadia/plugin.py
deleted file mode 100644
index 44d9b003ca..0000000000
--- a/contrib/python/coverage/plugins/yarcadia/plugin.py
+++ /dev/null
@@ -1,114 +0,0 @@
-# coding: utf-8
-
-import os
-
-import coverage.config
-import coverage.files
-import coverage.misc
-import coverage.parser
-import coverage.plugin
-import coverage.python
-
-from build.plugins.lib import test_const
-from library.python.testing import coverage_utils
-
-
-SKIP_FILENAME = '__SKIP_FILENAME__'
-
-
-class YarcadiaPlugin(
- coverage.plugin.CoveragePlugin,
- coverage.plugin.FileTracer
-):
-
- def __init__(self, options):
- self.config = coverage.config.CoverageConfig()
- self.config.from_args(**options)
-
- dirs = options.get("pylib_paths", "").split("\n")
- dirs = [d for d in dirs if d and not d.startswith("#")]
- self.pylib_paths = dirs
-
- self._filename = None
- self._exclude = None
-
- self._setup_file_filter()
-
- def _setup_file_filter(self):
- prefix_filter = os.environ.get('PYTHON_COVERAGE_PREFIX_FILTER', '')
- exclude_regexp = os.environ.get('PYTHON_COVERAGE_EXCLUDE_REGEXP', '')
- self.file_filter = coverage_utils.make_filter(prefix_filter, exclude_regexp)
-
- def configure(self, config):
- self._exclude = coverage.misc.join_regex(config.get_option('report:exclude_lines'))
-
- def get_pylib_paths(self):
- return self.pylib_paths
-
- def file_tracer(self, filename):
- if not filename.endswith(test_const.COVERAGE_PYTHON_EXTS):
- # Catch all generated modules (__file__ without proper extension)
- self._filename = SKIP_FILENAME
- return self
-
- if not self.file_filter(filename):
- # we need to catch all filtered out files (including cython) to pass them to get_source
- self._filename = SKIP_FILENAME
- return self
-
- if filename.endswith(".py"):
- self._filename = filename
- return self
-
- # Let cython plugin register it's own file tracer for pyx/pxi files
- return None
-
- def has_dynamic_source_filename(self):
- return False
-
- def source_filename(self):
- return self._filename
-
- def file_reporter(self, morf):
- source_root = os.environ.get("PYTHON_COVERAGE_ARCADIA_SOURCE_ROOT")
- if source_root:
- return FileReporter(morf, source_root, self, self._exclude)
- # use default file reporter
- return "python"
-
-
-class FileReporter(coverage.python.PythonFileReporter):
-
- def __init__(self, morf, source_root, coverage=None, exclude=None):
- super(FileReporter, self).__init__(morf, coverage)
- self._source = get_source(morf, source_root)
- # use custom parser to provide proper way to get required source
- self._parser = Parser(morf, self._source, exclude)
- self._parser.parse_source()
-
-
-class Parser(coverage.parser.PythonParser):
-
- def __init__(self, morf, source_code, exclude):
- # provide source code to avoid default way to get it
- super(Parser, self).__init__(text=source_code, filename=morf, exclude=exclude)
-
-
-def get_source(filename, source_root):
- assert source_root
-
- if filename == SKIP_FILENAME:
- return ''
-
- abs_filename = os.path.join(source_root, filename)
- if not os.path.isfile(abs_filename):
- # it's fake generated package
- return u''
-
- return coverage.python.get_python_source(abs_filename, force_fs=True)
-
-
-def coverage_init(reg, options):
- plugin = YarcadiaPlugin(options)
- reg.add_configurer(plugin)
- reg.add_file_tracer(plugin)
diff --git a/contrib/python/coverage/py2/.dist-info/METADATA b/contrib/python/coverage/py2/.dist-info/METADATA
deleted file mode 100644
index 25a6049c45..0000000000
--- a/contrib/python/coverage/py2/.dist-info/METADATA
+++ /dev/null
@@ -1,190 +0,0 @@
-Metadata-Version: 2.1
-Name: coverage
-Version: 5.5
-Summary: Code coverage measurement for Python
-Home-page: https://github.com/nedbat/coveragepy
-Author: Ned Batchelder and 142 others
-Author-email: ned@nedbatchelder.com
-License: Apache 2.0
-Project-URL: Documentation, https://coverage.readthedocs.io
-Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi
-Project-URL: Issues, https://github.com/nedbat/coveragepy/issues
-Keywords: code coverage testing
-Platform: UNKNOWN
-Classifier: Environment :: Console
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Software Development :: Quality Assurance
-Classifier: Topic :: Software Development :: Testing
-Classifier: Development Status :: 5 - Production/Stable
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4
-Description-Content-Type: text/x-rst
-Provides-Extra: toml
-Requires-Dist: toml ; extra == 'toml'
-
-.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-===========
-Coverage.py
-===========
-
-Code coverage testing for Python.
-
-| |license| |versions| |status|
-| |test-status| |quality-status| |docs| |codecov|
-| |kit| |format| |repos| |downloads|
-| |stars| |forks| |contributors|
-| |tidelift| |twitter-coveragepy| |twitter-nedbat|
-
-Coverage.py measures code coverage, typically during test execution. It uses
-the code analysis tools and tracing hooks provided in the Python standard
-library to determine which lines are executable, and which have been executed.
-
-Coverage.py runs on many versions of Python:
-
-* CPython 2.7.
-* CPython 3.5 through 3.10 alpha.
-* PyPy2 7.3.3 and PyPy3 7.3.3.
-
-Documentation is on `Read the Docs`_. Code repository and issue tracker are on
-`GitHub`_.
-
-.. _Read the Docs: https://coverage.readthedocs.io/
-.. _GitHub: https://github.com/nedbat/coveragepy
-
-
-**New in 5.x:** SQLite data storage, JSON report, contexts, relative filenames,
-dropped support for Python 2.6, 3.3 and 3.4.
-
-
-For Enterprise
---------------
-
-.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png
- :alt: Tidelift
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
-
-.. list-table::
- :widths: 10 100
-
- * - |tideliftlogo|
- - `Available as part of the Tidelift Subscription. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
- Coverage and thousands of other packages are working with
- Tidelift to deliver one enterprise subscription that covers all of the open
- source you use. If you want the flexibility of open source and the confidence
- of commercial-grade software, this is for you.
- `Learn more. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
-
-
-Getting Started
----------------
-
-See the `Quick Start section`_ of the docs.
-
-.. _Quick Start section: https://coverage.readthedocs.io/#quick-start
-
-
-Change history
---------------
-
-The complete history of changes is on the `change history page`_.
-
-.. _change history page: https://coverage.readthedocs.io/en/latest/changes.html
-
-
-Contributing
-------------
-
-See the `Contributing section`_ of the docs.
-
-.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html
-
-
-Security
---------
-
-To report a security vulnerability, please use the `Tidelift security
-contact`_. Tidelift will coordinate the fix and disclosure.
-
-.. _Tidelift security contact: https://tidelift.com/security
-
-
-License
--------
-
-Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_.
-
-.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
-.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-
-.. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml
- :alt: Test suite status
-.. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml
- :alt: Quality check status
-.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
- :target: https://coverage.readthedocs.io/
- :alt: Documentation
-.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
- :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
- :alt: Requirements status
-.. |kit| image:: https://badge.fury.io/py/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: PyPI status
-.. |format| image:: https://img.shields.io/pypi/format/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Kit format
-.. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Weekly PyPI downloads
-.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072
- :target: https://pypi.org/project/coverage/
- :alt: Python versions supported
-.. |status| image:: https://img.shields.io/pypi/status/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Package stability
-.. |license| image:: https://img.shields.io/pypi/l/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: License
-.. |codecov| image:: https://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2
- :target: https://codecov.io/github/nedbat/coveragepy?branch=master
- :alt: Coverage!
-.. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg
- :target: https://repology.org/metapackage/python:coverage/versions
- :alt: Packaging status
-.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
- :alt: Tidelift
-.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/stargazers
- :alt: Github stars
-.. |forks| image:: https://img.shields.io/github/forks/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/network/members
- :alt: Github forks
-.. |contributors| image:: https://img.shields.io/github/contributors/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/graphs/contributors
- :alt: Contributors
-.. |twitter-coveragepy| image:: https://img.shields.io/twitter/follow/coveragepy.svg?label=coveragepy&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/coveragepy
- :alt: coverage.py on Twitter
-.. |twitter-nedbat| image:: https://img.shields.io/twitter/follow/nedbat.svg?label=nedbat&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/nedbat
- :alt: nedbat on Twitter
-
-
diff --git a/contrib/python/coverage/py2/.dist-info/entry_points.txt b/contrib/python/coverage/py2/.dist-info/entry_points.txt
deleted file mode 100644
index cd083fc1ff..0000000000
--- a/contrib/python/coverage/py2/.dist-info/entry_points.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-[console_scripts]
-coverage = coverage.cmdline:main
-coverage-3.9 = coverage.cmdline:main
-coverage3 = coverage.cmdline:main
-
diff --git a/contrib/python/coverage/py2/.dist-info/top_level.txt b/contrib/python/coverage/py2/.dist-info/top_level.txt
deleted file mode 100644
index 4ebc8aea50..0000000000
--- a/contrib/python/coverage/py2/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-coverage
diff --git a/contrib/python/coverage/py2/LICENSE.txt b/contrib/python/coverage/py2/LICENSE.txt
deleted file mode 100644
index f433b1a53f..0000000000
--- a/contrib/python/coverage/py2/LICENSE.txt
+++ /dev/null
@@ -1,177 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/contrib/python/coverage/py2/NOTICE.txt b/contrib/python/coverage/py2/NOTICE.txt
deleted file mode 100644
index 37ded535bf..0000000000
--- a/contrib/python/coverage/py2/NOTICE.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Copyright 2001 Gareth Rees. All rights reserved.
-Copyright 2004-2021 Ned Batchelder. All rights reserved.
-
-Except where noted otherwise, this software is licensed under the Apache
-License, Version 2.0 (the "License"); you may not use this work except in
-compliance with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/contrib/python/coverage/py2/README.rst b/contrib/python/coverage/py2/README.rst
deleted file mode 100644
index 072f30ffeb..0000000000
--- a/contrib/python/coverage/py2/README.rst
+++ /dev/null
@@ -1,151 +0,0 @@
-.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-===========
-Coverage.py
-===========
-
-Code coverage testing for Python.
-
-| |license| |versions| |status|
-| |test-status| |quality-status| |docs| |codecov|
-| |kit| |format| |repos| |downloads|
-| |stars| |forks| |contributors|
-| |tidelift| |twitter-coveragepy| |twitter-nedbat|
-
-Coverage.py measures code coverage, typically during test execution. It uses
-the code analysis tools and tracing hooks provided in the Python standard
-library to determine which lines are executable, and which have been executed.
-
-Coverage.py runs on many versions of Python:
-
-* CPython 2.7.
-* CPython 3.5 through 3.10 alpha.
-* PyPy2 7.3.3 and PyPy3 7.3.3.
-
-Documentation is on `Read the Docs`_. Code repository and issue tracker are on
-`GitHub`_.
-
-.. _Read the Docs: https://coverage.readthedocs.io/
-.. _GitHub: https://github.com/nedbat/coveragepy
-
-
-**New in 5.x:** SQLite data storage, JSON report, contexts, relative filenames,
-dropped support for Python 2.6, 3.3 and 3.4.
-
-
-For Enterprise
---------------
-
-.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png
- :alt: Tidelift
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
-
-.. list-table::
- :widths: 10 100
-
- * - |tideliftlogo|
- - `Available as part of the Tidelift Subscription. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
- Coverage and thousands of other packages are working with
- Tidelift to deliver one enterprise subscription that covers all of the open
- source you use. If you want the flexibility of open source and the confidence
- of commercial-grade software, this is for you.
- `Learn more. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
-
-
-Getting Started
----------------
-
-See the `Quick Start section`_ of the docs.
-
-.. _Quick Start section: https://coverage.readthedocs.io/#quick-start
-
-
-Change history
---------------
-
-The complete history of changes is on the `change history page`_.
-
-.. _change history page: https://coverage.readthedocs.io/en/latest/changes.html
-
-
-Contributing
-------------
-
-See the `Contributing section`_ of the docs.
-
-.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html
-
-
-Security
---------
-
-To report a security vulnerability, please use the `Tidelift security
-contact`_. Tidelift will coordinate the fix and disclosure.
-
-.. _Tidelift security contact: https://tidelift.com/security
-
-
-License
--------
-
-Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_.
-
-.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
-.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-
-.. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml
- :alt: Test suite status
-.. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml
- :alt: Quality check status
-.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
- :target: https://coverage.readthedocs.io/
- :alt: Documentation
-.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
- :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
- :alt: Requirements status
-.. |kit| image:: https://badge.fury.io/py/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: PyPI status
-.. |format| image:: https://img.shields.io/pypi/format/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Kit format
-.. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Weekly PyPI downloads
-.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072
- :target: https://pypi.org/project/coverage/
- :alt: Python versions supported
-.. |status| image:: https://img.shields.io/pypi/status/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Package stability
-.. |license| image:: https://img.shields.io/pypi/l/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: License
-.. |codecov| image:: https://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2
- :target: https://codecov.io/github/nedbat/coveragepy?branch=master
- :alt: Coverage!
-.. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg
- :target: https://repology.org/metapackage/python:coverage/versions
- :alt: Packaging status
-.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
- :alt: Tidelift
-.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/stargazers
- :alt: Github stars
-.. |forks| image:: https://img.shields.io/github/forks/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/network/members
- :alt: Github forks
-.. |contributors| image:: https://img.shields.io/github/contributors/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/graphs/contributors
- :alt: Contributors
-.. |twitter-coveragepy| image:: https://img.shields.io/twitter/follow/coveragepy.svg?label=coveragepy&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/coveragepy
- :alt: coverage.py on Twitter
-.. |twitter-nedbat| image:: https://img.shields.io/twitter/follow/nedbat.svg?label=nedbat&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/nedbat
- :alt: nedbat on Twitter
diff --git a/contrib/python/coverage/py2/coverage/__init__.py b/contrib/python/coverage/py2/coverage/__init__.py
deleted file mode 100644
index 331b304b68..0000000000
--- a/contrib/python/coverage/py2/coverage/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Code coverage measurement for Python.
-
-Ned Batchelder
-https://nedbatchelder.com/code/coverage
-
-"""
-
-import sys
-
-from coverage.version import __version__, __url__, version_info
-
-from coverage.control import Coverage, process_startup
-from coverage.data import CoverageData
-from coverage.misc import CoverageException
-from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
-from coverage.pytracer import PyTracer
-
-# Backward compatibility.
-coverage = Coverage
-
-# On Windows, we encode and decode deep enough that something goes wrong and
-# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
-# Adding a reference here prevents it from being unloaded. Yuk.
-import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order
-
-# Because of the "from coverage.control import fooey" lines at the top of the
-# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
-# This makes some inspection tools (like pydoc) unable to find the class
-# coverage.coverage. So remove that entry.
-try:
- del sys.modules['coverage.coverage']
-except KeyError:
- pass
diff --git a/contrib/python/coverage/py2/coverage/__main__.py b/contrib/python/coverage/py2/coverage/__main__.py
deleted file mode 100644
index 79aa4e2b35..0000000000
--- a/contrib/python/coverage/py2/coverage/__main__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Coverage.py's main entry point."""
-
-import sys
-from coverage.cmdline import main
-sys.exit(main())
diff --git a/contrib/python/coverage/py2/coverage/annotate.py b/contrib/python/coverage/py2/coverage/annotate.py
deleted file mode 100644
index 999ab6e557..0000000000
--- a/contrib/python/coverage/py2/coverage/annotate.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Source file annotation for coverage.py."""
-
-import io
-import os
-import re
-
-from coverage.files import flat_rootname
-from coverage.misc import ensure_dir, isolate_module
-from coverage.report import get_analysis_to_report
-
-os = isolate_module(os)
-
-
-class AnnotateReporter(object):
- """Generate annotated source files showing line coverage.
-
- This reporter creates annotated copies of the measured source files. Each
- .py file is copied as a .py,cover file, with a left-hand margin annotating
- each line::
-
- > def h(x):
- - if 0: #pragma: no cover
- - pass
- > if x == 1:
- ! a = 1
- > else:
- > a = 2
-
- > h(2)
-
- Executed lines use '>', lines not executed use '!', lines excluded from
- consideration use '-'.
-
- """
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.directory = None
-
- blank_re = re.compile(r"\s*(#|$)")
- else_re = re.compile(r"\s*else\s*:\s*(#|$)")
-
- def report(self, morfs, directory=None):
- """Run the report.
-
- See `coverage.report()` for arguments.
-
- """
- self.directory = directory
- self.coverage.get_data()
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.annotate_file(fr, analysis)
-
- def annotate_file(self, fr, analysis):
- """Annotate a single file.
-
- `fr` is the FileReporter for the file to annotate.
-
- """
- statements = sorted(analysis.statements)
- missing = sorted(analysis.missing)
- excluded = sorted(analysis.excluded)
-
- if self.directory:
- ensure_dir(self.directory)
- dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
- if dest_file.endswith("_py"):
- dest_file = dest_file[:-3] + ".py"
- dest_file += ",cover"
- else:
- dest_file = fr.filename + ",cover"
-
- with io.open(dest_file, 'w', encoding='utf8') as dest:
- i = 0
- j = 0
- covered = True
- source = fr.source()
- for lineno, line in enumerate(source.splitlines(True), start=1):
- while i < len(statements) and statements[i] < lineno:
- i += 1
- while j < len(missing) and missing[j] < lineno:
- j += 1
- if i < len(statements) and statements[i] == lineno:
- covered = j >= len(missing) or missing[j] > lineno
- if self.blank_re.match(line):
- dest.write(u' ')
- elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
- if i >= len(statements) and j >= len(missing):
- dest.write(u'! ')
- elif i >= len(statements) or j >= len(missing):
- dest.write(u'> ')
- elif statements[i] == missing[j]:
- dest.write(u'! ')
- else:
- dest.write(u'> ')
- elif lineno in excluded:
- dest.write(u'- ')
- elif covered:
- dest.write(u'> ')
- else:
- dest.write(u'! ')
-
- dest.write(line)
diff --git a/contrib/python/coverage/py2/coverage/backward.py b/contrib/python/coverage/py2/coverage/backward.py
deleted file mode 100644
index ac781ab96a..0000000000
--- a/contrib/python/coverage/py2/coverage/backward.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Add things to old Pythons so I can pretend they are newer."""
-
-# This file's purpose is to provide modules to be imported from here.
-# pylint: disable=unused-import
-
-import os
-import sys
-
-from datetime import datetime
-
-from coverage import env
-
-
-# Pythons 2 and 3 differ on where to get StringIO.
-try:
- from cStringIO import StringIO
-except ImportError:
- from io import StringIO
-
-# In py3, ConfigParser was renamed to the more-standard configparser.
-# But there's a py3 backport that installs "configparser" in py2, and I don't
-# want it because it has annoying deprecation warnings. So try the real py2
-# import first.
-try:
- import ConfigParser as configparser
-except ImportError:
- import configparser
-
-# What's a string called?
-try:
- string_class = basestring
-except NameError:
- string_class = str
-
-# What's a Unicode string called?
-try:
- unicode_class = unicode
-except NameError:
- unicode_class = str
-
-# range or xrange?
-try:
- range = xrange # pylint: disable=redefined-builtin
-except NameError:
- range = range
-
-try:
- from itertools import zip_longest
-except ImportError:
- from itertools import izip_longest as zip_longest
-
-# Where do we get the thread id from?
-try:
- from thread import get_ident as get_thread_id
-except ImportError:
- from threading import get_ident as get_thread_id
-
-try:
- os.PathLike
-except AttributeError:
- # This is Python 2 and 3
- path_types = (bytes, string_class, unicode_class)
-else:
- # 3.6+
- path_types = (bytes, str, os.PathLike)
-
-# shlex.quote is new, but there's an undocumented implementation in "pipes",
-# who knew!?
-try:
- from shlex import quote as shlex_quote
-except ImportError:
- # Useful function, available under a different (undocumented) name
- # in Python versions earlier than 3.3.
- from pipes import quote as shlex_quote
-
-try:
- import reprlib
-except ImportError: # pragma: not covered
- # We need this on Python 2, but in testing environments, a backport is
- # installed, so this import isn't used.
- import repr as reprlib
-
-# A function to iterate listlessly over a dict's items, and one to get the
-# items as a list.
-try:
- {}.iteritems
-except AttributeError:
- # Python 3
- def iitems(d):
- """Produce the items from dict `d`."""
- return d.items()
-
- def litems(d):
- """Return a list of items from dict `d`."""
- return list(d.items())
-else:
- # Python 2
- def iitems(d):
- """Produce the items from dict `d`."""
- return d.iteritems()
-
- def litems(d):
- """Return a list of items from dict `d`."""
- return d.items()
-
-# Getting the `next` function from an iterator is different in 2 and 3.
-try:
- iter([]).next
-except AttributeError:
- def iternext(seq):
- """Get the `next` function for iterating over `seq`."""
- return iter(seq).__next__
-else:
- def iternext(seq):
- """Get the `next` function for iterating over `seq`."""
- return iter(seq).next
-
-# Python 3.x is picky about bytes and strings, so provide methods to
-# get them right, and make them no-ops in 2.x
-if env.PY3:
- def to_bytes(s):
- """Convert string `s` to bytes."""
- return s.encode('utf8')
-
- def to_string(b):
- """Convert bytes `b` to string."""
- return b.decode('utf8')
-
- def binary_bytes(byte_values):
- """Produce a byte string with the ints from `byte_values`."""
- return bytes(byte_values)
-
- def byte_to_int(byte):
- """Turn a byte indexed from a bytes object into an int."""
- return byte
-
- def bytes_to_ints(bytes_value):
- """Turn a bytes object into a sequence of ints."""
- # In Python 3, iterating bytes gives ints.
- return bytes_value
-
-else:
- def to_bytes(s):
- """Convert string `s` to bytes (no-op in 2.x)."""
- return s
-
- def to_string(b):
- """Convert bytes `b` to string."""
- return b
-
- def binary_bytes(byte_values):
- """Produce a byte string with the ints from `byte_values`."""
- return "".join(chr(b) for b in byte_values)
-
- def byte_to_int(byte):
- """Turn a byte indexed from a bytes object into an int."""
- return ord(byte)
-
- def bytes_to_ints(bytes_value):
- """Turn a bytes object into a sequence of ints."""
- for byte in bytes_value:
- yield ord(byte)
-
-
-try:
- # In Python 2.x, the builtins were in __builtin__
- BUILTINS = sys.modules['__builtin__']
-except KeyError:
- # In Python 3.x, they're in builtins
- BUILTINS = sys.modules['builtins']
-
-
-# imp was deprecated in Python 3.3
-try:
- import importlib
- import importlib.util
- imp = None
-except ImportError:
- importlib = None
-
-# We only want to use importlib if it has everything we need.
-try:
- importlib_util_find_spec = importlib.util.find_spec
-except Exception:
- import imp
- importlib_util_find_spec = None
-
-# What is the .pyc magic number for this version of Python?
-try:
- PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
-except AttributeError:
- PYC_MAGIC_NUMBER = imp.get_magic()
-
-
-def code_object(fn):
- """Get the code object from a function."""
- try:
- return fn.func_code
- except AttributeError:
- return fn.__code__
-
-
-try:
- from types import SimpleNamespace
-except ImportError:
- # The code from https://docs.python.org/3/library/types.html#types.SimpleNamespace
- class SimpleNamespace:
- """Python implementation of SimpleNamespace, for Python 2."""
- def __init__(self, **kwargs):
- self.__dict__.update(kwargs)
-
- def __repr__(self):
- keys = sorted(self.__dict__)
- items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
- return "{}({})".format(type(self).__name__, ", ".join(items))
-
-
-def format_local_datetime(dt):
- """Return a string with local timezone representing the date.
- If python version is lower than 3.6, the time zone is not included.
- """
- try:
- return dt.astimezone().strftime('%Y-%m-%d %H:%M %z')
- except (TypeError, ValueError):
- # Datetime.astimezone in Python 3.5 can not handle naive datetime
- return dt.strftime('%Y-%m-%d %H:%M')
-
-
-def invalidate_import_caches():
- """Invalidate any import caches that may or may not exist."""
- if importlib and hasattr(importlib, "invalidate_caches"):
- importlib.invalidate_caches()
-
-
-def import_local_file(modname, modfile=None):
- """Import a local file as a module.
-
- Opens a file in the current directory named `modname`.py, imports it
- as `modname`, and returns the module object. `modfile` is the file to
- import if it isn't in the current directory.
-
- """
- try:
- import importlib.util as importlib_util
- except ImportError:
- importlib_util = None
-
- if modfile is None:
- modfile = modname + '.py'
- if importlib_util:
- spec = importlib_util.spec_from_file_location(modname, modfile)
- mod = importlib_util.module_from_spec(spec)
- sys.modules[modname] = mod
- spec.loader.exec_module(mod)
- else:
- for suff in imp.get_suffixes(): # pragma: part covered
- if suff[0] == '.py':
- break
-
- with open(modfile, 'r') as f:
- # pylint: disable=undefined-loop-variable
- mod = imp.load_module(modname, f, modfile, suff)
-
- return mod
diff --git a/contrib/python/coverage/py2/coverage/bytecode.py b/contrib/python/coverage/py2/coverage/bytecode.py
deleted file mode 100644
index ceb18cf374..0000000000
--- a/contrib/python/coverage/py2/coverage/bytecode.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Bytecode manipulation for coverage.py"""
-
-import types
-
-
-def code_objects(code):
- """Iterate over all the code objects in `code`."""
- stack = [code]
- while stack:
- # We're going to return the code object on the stack, but first
- # push its children for later returning.
- code = stack.pop()
- for c in code.co_consts:
- if isinstance(c, types.CodeType):
- stack.append(c)
- yield code
diff --git a/contrib/python/coverage/py2/coverage/cmdline.py b/contrib/python/coverage/py2/coverage/cmdline.py
deleted file mode 100644
index 0be0cca19f..0000000000
--- a/contrib/python/coverage/py2/coverage/cmdline.py
+++ /dev/null
@@ -1,910 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Command-line support for coverage.py."""
-
-from __future__ import print_function
-
-import glob
-import optparse
-import os.path
-import shlex
-import sys
-import textwrap
-import traceback
-
-import coverage
-from coverage import Coverage
-from coverage import env
-from coverage.collector import CTracer
-from coverage.data import line_counts
-from coverage.debug import info_formatter, info_header, short_stack
-from coverage.execfile import PyRunner
-from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource, output_encoding
-from coverage.results import should_fail_under
-
-
-class Opts(object):
- """A namespace class for individual options we'll build parsers from."""
-
- append = optparse.make_option(
- '-a', '--append', action='store_true',
- help="Append coverage data to .coverage, otherwise it starts clean each time.",
- )
- keep = optparse.make_option(
- '', '--keep', action='store_true',
- help="Keep original coverage files, otherwise they are deleted.",
- )
- branch = optparse.make_option(
- '', '--branch', action='store_true',
- help="Measure branch coverage in addition to statement coverage.",
- )
- CONCURRENCY_CHOICES = [
- "thread", "gevent", "greenlet", "eventlet", "multiprocessing",
- ]
- concurrency = optparse.make_option(
- '', '--concurrency', action='store', metavar="LIB",
- choices=CONCURRENCY_CHOICES,
- help=(
- "Properly measure code using a concurrency library. "
- "Valid values are: %s."
- ) % ", ".join(CONCURRENCY_CHOICES),
- )
- context = optparse.make_option(
- '', '--context', action='store', metavar="LABEL",
- help="The context label to record for this coverage run.",
- )
- debug = optparse.make_option(
- '', '--debug', action='store', metavar="OPTS",
- help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
- )
- directory = optparse.make_option(
- '-d', '--directory', action='store', metavar="DIR",
- help="Write the output files to DIR.",
- )
- fail_under = optparse.make_option(
- '', '--fail-under', action='store', metavar="MIN", type="float",
- help="Exit with a status of 2 if the total coverage is less than MIN.",
- )
- help = optparse.make_option(
- '-h', '--help', action='store_true',
- help="Get help on this command.",
- )
- ignore_errors = optparse.make_option(
- '-i', '--ignore-errors', action='store_true',
- help="Ignore errors while reading source files.",
- )
- include = optparse.make_option(
- '', '--include', action='store',
- metavar="PAT1,PAT2,...",
- help=(
- "Include only files whose paths match one of these patterns. "
- "Accepts shell-style wildcards, which must be quoted."
- ),
- )
- pylib = optparse.make_option(
- '-L', '--pylib', action='store_true',
- help=(
- "Measure coverage even inside the Python installed library, "
- "which isn't done by default."
- ),
- )
- sort = optparse.make_option(
- '--sort', action='store', metavar='COLUMN',
- help="Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. "
- "Default is name."
- )
- show_missing = optparse.make_option(
- '-m', '--show-missing', action='store_true',
- help="Show line numbers of statements in each module that weren't executed.",
- )
- skip_covered = optparse.make_option(
- '--skip-covered', action='store_true',
- help="Skip files with 100% coverage.",
- )
- no_skip_covered = optparse.make_option(
- '--no-skip-covered', action='store_false', dest='skip_covered',
- help="Disable --skip-covered.",
- )
- skip_empty = optparse.make_option(
- '--skip-empty', action='store_true',
- help="Skip files with no code.",
- )
- show_contexts = optparse.make_option(
- '--show-contexts', action='store_true',
- help="Show contexts for covered lines.",
- )
- omit = optparse.make_option(
- '', '--omit', action='store',
- metavar="PAT1,PAT2,...",
- help=(
- "Omit files whose paths match one of these patterns. "
- "Accepts shell-style wildcards, which must be quoted."
- ),
- )
- contexts = optparse.make_option(
- '', '--contexts', action='store',
- metavar="REGEX1,REGEX2,...",
- help=(
- "Only display data from lines covered in the given contexts. "
- "Accepts Python regexes, which must be quoted."
- ),
- )
- output_xml = optparse.make_option(
- '-o', '', action='store', dest="outfile",
- metavar="OUTFILE",
- help="Write the XML report to this file. Defaults to 'coverage.xml'",
- )
- output_json = optparse.make_option(
- '-o', '', action='store', dest="outfile",
- metavar="OUTFILE",
- help="Write the JSON report to this file. Defaults to 'coverage.json'",
- )
- json_pretty_print = optparse.make_option(
- '', '--pretty-print', action='store_true',
- help="Format the JSON for human readers.",
- )
- parallel_mode = optparse.make_option(
- '-p', '--parallel-mode', action='store_true',
- help=(
- "Append the machine name, process id and random number to the "
- ".coverage data file name to simplify collecting data from "
- "many processes."
- ),
- )
- module = optparse.make_option(
- '-m', '--module', action='store_true',
- help=(
- "<pyfile> is an importable Python module, not a script path, "
- "to be run as 'python -m' would run it."
- ),
- )
- precision = optparse.make_option(
- '', '--precision', action='store', metavar='N', type=int,
- help=(
- "Number of digits after the decimal point to display for "
- "reported coverage percentages."
- ),
- )
- rcfile = optparse.make_option(
- '', '--rcfile', action='store',
- help=(
- "Specify configuration file. "
- "By default '.coveragerc', 'setup.cfg', 'tox.ini', and "
- "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]"
- ),
- )
- source = optparse.make_option(
- '', '--source', action='store', metavar="SRC1,SRC2,...",
- help="A list of packages or directories of code to be measured.",
- )
- timid = optparse.make_option(
- '', '--timid', action='store_true',
- help=(
- "Use a simpler but slower trace method. Try this if you get "
- "seemingly impossible results!"
- ),
- )
- title = optparse.make_option(
- '', '--title', action='store', metavar="TITLE",
- help="A text string to use as the title on the HTML.",
- )
- version = optparse.make_option(
- '', '--version', action='store_true',
- help="Display version information and exit.",
- )
-
-
-class CoverageOptionParser(optparse.OptionParser, object):
- """Base OptionParser for coverage.py.
-
- Problems don't exit the program.
- Defaults are initialized for all options.
-
- """
-
- def __init__(self, *args, **kwargs):
- super(CoverageOptionParser, self).__init__(
- add_help_option=False, *args, **kwargs
- )
- self.set_defaults(
- action=None,
- append=None,
- branch=None,
- concurrency=None,
- context=None,
- debug=None,
- directory=None,
- fail_under=None,
- help=None,
- ignore_errors=None,
- include=None,
- keep=None,
- module=None,
- omit=None,
- contexts=None,
- parallel_mode=None,
- precision=None,
- pylib=None,
- rcfile=True,
- show_missing=None,
- skip_covered=None,
- skip_empty=None,
- show_contexts=None,
- sort=None,
- source=None,
- timid=None,
- title=None,
- version=None,
- )
-
- self.disable_interspersed_args()
-
- class OptionParserError(Exception):
- """Used to stop the optparse error handler ending the process."""
- pass
-
- def parse_args_ok(self, args=None, options=None):
- """Call optparse.parse_args, but return a triple:
-
- (ok, options, args)
-
- """
- try:
- options, args = super(CoverageOptionParser, self).parse_args(args, options)
- except self.OptionParserError:
- return False, None, None
- return True, options, args
-
- def error(self, msg):
- """Override optparse.error so sys.exit doesn't get called."""
- show_help(msg)
- raise self.OptionParserError
-
-
-class GlobalOptionParser(CoverageOptionParser):
- """Command-line parser for coverage.py global option arguments."""
-
- def __init__(self):
- super(GlobalOptionParser, self).__init__()
-
- self.add_options([
- Opts.help,
- Opts.version,
- ])
-
-
-class CmdOptionParser(CoverageOptionParser):
- """Parse one of the new-style commands for coverage.py."""
-
- def __init__(self, action, options, defaults=None, usage=None, description=None):
- """Create an OptionParser for a coverage.py command.
-
- `action` is the slug to put into `options.action`.
- `options` is a list of Option's for the command.
- `defaults` is a dict of default value for options.
- `usage` is the usage string to display in help.
- `description` is the description of the command, for the help text.
-
- """
- if usage:
- usage = "%prog " + usage
- super(CmdOptionParser, self).__init__(
- usage=usage,
- description=description,
- )
- self.set_defaults(action=action, **(defaults or {}))
- self.add_options(options)
- self.cmd = action
-
- def __eq__(self, other):
- # A convenience equality, so that I can put strings in unit test
- # results, and they will compare equal to objects.
- return (other == "<CmdOptionParser:%s>" % self.cmd)
-
- __hash__ = None # This object doesn't need to be hashed.
-
- def get_prog_name(self):
- """Override of an undocumented function in optparse.OptionParser."""
- program_name = super(CmdOptionParser, self).get_prog_name()
-
- # Include the sub-command for this parser as part of the command.
- return "{command} {subcommand}".format(command=program_name, subcommand=self.cmd)
-
-
-GLOBAL_ARGS = [
- Opts.debug,
- Opts.help,
- Opts.rcfile,
- ]
-
-CMDS = {
- 'annotate': CmdOptionParser(
- "annotate",
- [
- Opts.directory,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description=(
- "Make annotated copies of the given files, marking statements that are executed "
- "with > and statements that are missed with !."
- ),
- ),
-
- 'combine': CmdOptionParser(
- "combine",
- [
- Opts.append,
- Opts.keep,
- ] + GLOBAL_ARGS,
- usage="[options] <path1> <path2> ... <pathN>",
- description=(
- "Combine data from multiple coverage files collected "
- "with 'run -p'. The combined results are written to a single "
- "file representing the union of the data. The positional "
- "arguments are data files or directories containing data files. "
- "If no paths are provided, data files in the default data file's "
- "directory are combined."
- ),
- ),
-
- 'debug': CmdOptionParser(
- "debug", GLOBAL_ARGS,
- usage="<topic>",
- description=(
- "Display information about the internals of coverage.py, "
- "for diagnosing problems. "
- "Topics are: "
- "'data' to show a summary of the collected data; "
- "'sys' to show installation information; "
- "'config' to show the configuration; "
- "'premain' to show what is calling coverage."
- ),
- ),
-
- 'erase': CmdOptionParser(
- "erase", GLOBAL_ARGS,
- description="Erase previously collected coverage data.",
- ),
-
- 'help': CmdOptionParser(
- "help", GLOBAL_ARGS,
- usage="[command]",
- description="Describe how to use coverage.py",
- ),
-
- 'html': CmdOptionParser(
- "html",
- [
- Opts.contexts,
- Opts.directory,
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.precision,
- Opts.show_contexts,
- Opts.skip_covered,
- Opts.no_skip_covered,
- Opts.skip_empty,
- Opts.title,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description=(
- "Create an HTML report of the coverage of the files. "
- "Each file gets its own page, with the source decorated to show "
- "executed, excluded, and missed lines."
- ),
- ),
-
- 'json': CmdOptionParser(
- "json",
- [
- Opts.contexts,
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.output_json,
- Opts.json_pretty_print,
- Opts.show_contexts,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description="Generate a JSON report of coverage results."
- ),
-
- 'report': CmdOptionParser(
- "report",
- [
- Opts.contexts,
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.precision,
- Opts.sort,
- Opts.show_missing,
- Opts.skip_covered,
- Opts.no_skip_covered,
- Opts.skip_empty,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description="Report coverage statistics on modules."
- ),
-
- 'run': CmdOptionParser(
- "run",
- [
- Opts.append,
- Opts.branch,
- Opts.concurrency,
- Opts.context,
- Opts.include,
- Opts.module,
- Opts.omit,
- Opts.pylib,
- Opts.parallel_mode,
- Opts.source,
- Opts.timid,
- ] + GLOBAL_ARGS,
- usage="[options] <pyfile> [program options]",
- description="Run a Python program, measuring code execution."
- ),
-
- 'xml': CmdOptionParser(
- "xml",
- [
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.output_xml,
- Opts.skip_empty,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description="Generate an XML report of coverage results."
- ),
-}
-
-
-def show_help(error=None, topic=None, parser=None):
- """Display an error message, or the named topic."""
- assert error or topic or parser
-
- program_path = sys.argv[0]
- if program_path.endswith(os.path.sep + '__main__.py'):
- # The path is the main module of a package; get that path instead.
- program_path = os.path.dirname(program_path)
- program_name = os.path.basename(program_path)
- if env.WINDOWS:
- # entry_points={'console_scripts':...} on Windows makes files
- # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
- # invoke coverage-script.py, coverage3-script.py, and
- # coverage-3.5-script.py. argv[0] is the .py file, but we want to
- # get back to the original form.
- auto_suffix = "-script.py"
- if program_name.endswith(auto_suffix):
- program_name = program_name[:-len(auto_suffix)]
-
- help_params = dict(coverage.__dict__)
- help_params['program_name'] = program_name
- if CTracer is not None:
- help_params['extension_modifier'] = 'with C extension'
- else:
- help_params['extension_modifier'] = 'without C extension'
-
- if error:
- print(error, file=sys.stderr)
- print("Use '%s help' for help." % (program_name,), file=sys.stderr)
- elif parser:
- print(parser.format_help().strip())
- print()
- else:
- help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
- if help_msg:
- print(help_msg.format(**help_params))
- else:
- print("Don't know topic %r" % topic)
- print("Full documentation is at {__url__}".format(**help_params))
-
-
-OK, ERR, FAIL_UNDER = 0, 1, 2
-
-
-class CoverageScript(object):
- """The command-line interface to coverage.py."""
-
- def __init__(self):
- self.global_option = False
- self.coverage = None
-
- def command_line(self, argv):
- """The bulk of the command line interface to coverage.py.
-
- `argv` is the argument list to process.
-
- Returns 0 if all is well, 1 if something went wrong.
-
- """
- # Collect the command-line options.
- if not argv:
- show_help(topic='minimum_help')
- return OK
-
- # The command syntax we parse depends on the first argument. Global
- # switch syntax always starts with an option.
- self.global_option = argv[0].startswith('-')
- if self.global_option:
- parser = GlobalOptionParser()
- else:
- parser = CMDS.get(argv[0])
- if not parser:
- show_help("Unknown command: '%s'" % argv[0])
- return ERR
- argv = argv[1:]
-
- ok, options, args = parser.parse_args_ok(argv)
- if not ok:
- return ERR
-
- # Handle help and version.
- if self.do_help(options, args, parser):
- return OK
-
- # Listify the list options.
- source = unshell_list(options.source)
- omit = unshell_list(options.omit)
- include = unshell_list(options.include)
- debug = unshell_list(options.debug)
- contexts = unshell_list(options.contexts)
-
- # Do something.
- self.coverage = Coverage(
- data_suffix=options.parallel_mode,
- cover_pylib=options.pylib,
- timid=options.timid,
- branch=options.branch,
- config_file=options.rcfile,
- source=source,
- omit=omit,
- include=include,
- debug=debug,
- concurrency=options.concurrency,
- check_preimported=True,
- context=options.context,
- )
-
- if options.action == "debug":
- return self.do_debug(args)
-
- elif options.action == "erase":
- self.coverage.erase()
- return OK
-
- elif options.action == "run":
- return self.do_run(options, args)
-
- elif options.action == "combine":
- if options.append:
- self.coverage.load()
- data_dirs = args or None
- self.coverage.combine(data_dirs, strict=True, keep=bool(options.keep))
- self.coverage.save()
- return OK
-
- # Remaining actions are reporting, with some common options.
- report_args = dict(
- morfs=unglob_args(args),
- ignore_errors=options.ignore_errors,
- omit=omit,
- include=include,
- contexts=contexts,
- )
-
- # We need to be able to import from the current directory, because
- # plugins may try to, for example, to read Django settings.
- sys.path.insert(0, '')
-
- self.coverage.load()
-
- total = None
- if options.action == "report":
- total = self.coverage.report(
- show_missing=options.show_missing,
- skip_covered=options.skip_covered,
- skip_empty=options.skip_empty,
- precision=options.precision,
- sort=options.sort,
- **report_args
- )
- elif options.action == "annotate":
- self.coverage.annotate(directory=options.directory, **report_args)
- elif options.action == "html":
- total = self.coverage.html_report(
- directory=options.directory,
- title=options.title,
- skip_covered=options.skip_covered,
- skip_empty=options.skip_empty,
- show_contexts=options.show_contexts,
- precision=options.precision,
- **report_args
- )
- elif options.action == "xml":
- outfile = options.outfile
- total = self.coverage.xml_report(
- outfile=outfile, skip_empty=options.skip_empty,
- **report_args
- )
- elif options.action == "json":
- outfile = options.outfile
- total = self.coverage.json_report(
- outfile=outfile,
- pretty_print=options.pretty_print,
- show_contexts=options.show_contexts,
- **report_args
- )
-
- if total is not None:
- # Apply the command line fail-under options, and then use the config
- # value, so we can get fail_under from the config file.
- if options.fail_under is not None:
- self.coverage.set_option("report:fail_under", options.fail_under)
-
- fail_under = self.coverage.get_option("report:fail_under")
- precision = self.coverage.get_option("report:precision")
- if should_fail_under(total, fail_under, precision):
- msg = "total of {total:.{p}f} is less than fail-under={fail_under:.{p}f}".format(
- total=total, fail_under=fail_under, p=precision,
- )
- print("Coverage failure:", msg)
- return FAIL_UNDER
-
- return OK
-
- def do_help(self, options, args, parser):
- """Deal with help requests.
-
- Return True if it handled the request, False if not.
-
- """
- # Handle help.
- if options.help:
- if self.global_option:
- show_help(topic='help')
- else:
- show_help(parser=parser)
- return True
-
- if options.action == "help":
- if args:
- for a in args:
- parser = CMDS.get(a)
- if parser:
- show_help(parser=parser)
- else:
- show_help(topic=a)
- else:
- show_help(topic='help')
- return True
-
- # Handle version.
- if options.version:
- show_help(topic='version')
- return True
-
- return False
-
- def do_run(self, options, args):
- """Implementation of 'coverage run'."""
-
- if not args:
- if options.module:
- # Specified -m with nothing else.
- show_help("No module specified for -m")
- return ERR
- command_line = self.coverage.get_option("run:command_line")
- if command_line is not None:
- args = shlex.split(command_line)
- if args and args[0] == "-m":
- options.module = True
- args = args[1:]
- if not args:
- show_help("Nothing to do.")
- return ERR
-
- if options.append and self.coverage.get_option("run:parallel"):
- show_help("Can't append to data files in parallel mode.")
- return ERR
-
- if options.concurrency == "multiprocessing":
- # Can't set other run-affecting command line options with
- # multiprocessing.
- for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']:
- # As it happens, all of these options have no default, meaning
- # they will be None if they have not been specified.
- if getattr(options, opt_name) is not None:
- show_help(
- "Options affecting multiprocessing must only be specified "
- "in a configuration file.\n"
- "Remove --{} from the command line.".format(opt_name)
- )
- return ERR
-
- runner = PyRunner(args, as_module=bool(options.module))
- runner.prepare()
-
- if options.append:
- self.coverage.load()
-
- # Run the script.
- self.coverage.start()
- code_ran = True
- try:
- runner.run()
- except NoSource:
- code_ran = False
- raise
- finally:
- self.coverage.stop()
- if code_ran:
- self.coverage.save()
-
- return OK
-
- def do_debug(self, args):
- """Implementation of 'coverage debug'."""
-
- if not args:
- show_help("What information would you like: config, data, sys, premain?")
- return ERR
-
- for info in args:
- if info == 'sys':
- sys_info = self.coverage.sys_info()
- print(info_header("sys"))
- for line in info_formatter(sys_info):
- print(" %s" % line)
- elif info == 'data':
- self.coverage.load()
- data = self.coverage.get_data()
- print(info_header("data"))
- print("path: %s" % data.data_filename())
- if data:
- print("has_arcs: %r" % data.has_arcs())
- summary = line_counts(data, fullpath=True)
- filenames = sorted(summary.keys())
- print("\n%d files:" % len(filenames))
- for f in filenames:
- line = "%s: %d lines" % (f, summary[f])
- plugin = data.file_tracer(f)
- if plugin:
- line += " [%s]" % plugin
- print(line)
- else:
- print("No data collected")
- elif info == 'config':
- print(info_header("config"))
- config_info = self.coverage.config.__dict__.items()
- for line in info_formatter(config_info):
- print(" %s" % line)
- elif info == "premain":
- print(info_header("premain"))
- print(short_stack())
- else:
- show_help("Don't know what you mean by %r" % info)
- return ERR
-
- return OK
-
-
-def unshell_list(s):
- """Turn a command-line argument into a list."""
- if not s:
- return None
- if env.WINDOWS:
- # When running coverage.py as coverage.exe, some of the behavior
- # of the shell is emulated: wildcards are expanded into a list of
- # file names. So you have to single-quote patterns on the command
- # line, but (not) helpfully, the single quotes are included in the
- # argument, so we have to strip them off here.
- s = s.strip("'")
- return s.split(',')
-
-
-def unglob_args(args):
- """Interpret shell wildcards for platforms that need it."""
- if env.WINDOWS:
- globbed = []
- for arg in args:
- if '?' in arg or '*' in arg:
- globbed.extend(glob.glob(arg))
- else:
- globbed.append(arg)
- args = globbed
- return args
-
-
-HELP_TOPICS = {
- 'help': """\
- Coverage.py, version {__version__} {extension_modifier}
- Measure, collect, and report on code coverage in Python programs.
-
- usage: {program_name} <command> [options] [args]
-
- Commands:
- annotate Annotate source files with execution information.
- combine Combine a number of data files.
- debug Display information about the internals of coverage.py
- erase Erase previously collected coverage data.
- help Get help on using coverage.py.
- html Create an HTML report.
- json Create a JSON report of coverage results.
- report Report coverage stats on modules.
- run Run a Python program and measure code execution.
- xml Create an XML report of coverage results.
-
- Use "{program_name} help <command>" for detailed help on any command.
- """,
-
- 'minimum_help': """\
- Code coverage for Python, version {__version__} {extension_modifier}. Use '{program_name} help' for help.
- """,
-
- 'version': """\
- Coverage.py, version {__version__} {extension_modifier}
- """,
-}
-
-
-def main(argv=None):
- """The main entry point to coverage.py.
-
- This is installed as the script entry point.
-
- """
- if argv is None:
- argv = sys.argv[1:]
- try:
- status = CoverageScript().command_line(argv)
- except ExceptionDuringRun as err:
- # An exception was caught while running the product code. The
- # sys.exc_info() return tuple is packed into an ExceptionDuringRun
- # exception.
- traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter
- status = ERR
- except BaseCoverageException as err:
- # A controlled error inside coverage.py: print the message to the user.
- msg = err.args[0]
- if env.PY2:
- msg = msg.encode(output_encoding())
- print(msg)
- status = ERR
- except SystemExit as err:
- # The user called `sys.exit()`. Exit with their argument, if any.
- if err.args:
- status = err.args[0]
- else:
- status = None
- return status
-
-# Profiling using ox_profile. Install it from GitHub:
-# pip install git+https://github.com/emin63/ox_profile.git
-#
-# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile.
-_profile = os.environ.get("COVERAGE_PROFILE", "")
-if _profile: # pragma: debugging
- from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error
- original_main = main
-
- def main(argv=None): # pylint: disable=function-redefined
- """A wrapper around main that profiles."""
- profiler = SimpleLauncher.launch()
- try:
- return original_main(argv)
- finally:
- data, _ = profiler.query(re_filter='coverage', max_records=100)
- print(profiler.show(query=data, limit=100, sep='', col=''))
- profiler.cancel()
diff --git a/contrib/python/coverage/py2/coverage/collector.py b/contrib/python/coverage/py2/coverage/collector.py
deleted file mode 100644
index c42d29feec..0000000000
--- a/contrib/python/coverage/py2/coverage/collector.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Raw data collector for coverage.py."""
-
-import os
-import sys
-
-from coverage import env
-from coverage.backward import litems, range # pylint: disable=redefined-builtin
-from coverage.debug import short_stack
-from coverage.disposition import FileDisposition
-from coverage.misc import CoverageException, isolate_module
-from coverage.pytracer import PyTracer
-
-os = isolate_module(os)
-
-
-try:
- # Use the C extension code when we can, for speed.
- from coverage.tracer import CTracer, CFileDisposition
-except ImportError:
- # Couldn't import the C extension, maybe it isn't built.
- if os.getenv('COVERAGE_TEST_TRACER') == 'c':
- # During testing, we use the COVERAGE_TEST_TRACER environment variable
- # to indicate that we've fiddled with the environment to test this
- # fallback code. If we thought we had a C tracer, but couldn't import
- # it, then exit quickly and clearly instead of dribbling confusing
- # errors. I'm using sys.exit here instead of an exception because an
- # exception here causes all sorts of other noise in unittest.
- sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
- sys.exit(1)
- CTracer = None
-
-
-class Collector(object):
- """Collects trace data.
-
- Creates a Tracer object for each thread, since they track stack
- information. Each Tracer points to the same shared data, contributing
- traced data points.
-
- When the Collector is started, it creates a Tracer for the current thread,
- and installs a function to create Tracers for each new thread started.
- When the Collector is stopped, all active Tracers are stopped.
-
- Threads started while the Collector is stopped will never have Tracers
- associated with them.
-
- """
-
- # The stack of active Collectors. Collectors are added here when started,
- # and popped when stopped. Collectors on the stack are paused when not
- # the top, and resumed when they become the top again.
- _collectors = []
-
- # The concurrency settings we support here.
- SUPPORTED_CONCURRENCIES = {"greenlet", "eventlet", "gevent", "thread"}
-
- def __init__(
- self, should_trace, check_include, should_start_context, file_mapper,
- timid, branch, warn, concurrency,
- ):
- """Create a collector.
-
- `should_trace` is a function, taking a file name and a frame, and
- returning a `coverage.FileDisposition object`.
-
- `check_include` is a function taking a file name and a frame. It returns
- a boolean: True if the file should be traced, False if not.
-
- `should_start_context` is a function taking a frame, and returning a
- string. If the frame should be the start of a new context, the string
- is the new context. If the frame should not be the start of a new
- context, return None.
-
- `file_mapper` is a function taking a filename, and returning a Unicode
- filename. The result is the name that will be recorded in the data
- file.
-
- If `timid` is true, then a slower simpler trace function will be
- used. This is important for some environments where manipulation of
- tracing functions make the faster more sophisticated trace function not
- operate properly.
-
- If `branch` is true, then branches will be measured. This involves
- collecting data on which statements followed each other (arcs). Use
- `get_arc_data` to get the arc data.
-
- `warn` is a warning function, taking a single string message argument
- and an optional slug argument which will be a string or None, to be
- used if a warning needs to be issued.
-
- `concurrency` is a list of strings indicating the concurrency libraries
- in use. Valid values are "greenlet", "eventlet", "gevent", or "thread"
- (the default). Of these four values, only one can be supplied. Other
- values are ignored.
-
- """
- self.should_trace = should_trace
- self.check_include = check_include
- self.should_start_context = should_start_context
- self.file_mapper = file_mapper
- self.warn = warn
- self.branch = branch
- self.threading = None
- self.covdata = None
-
- self.static_context = None
-
- self.origin = short_stack()
-
- self.concur_id_func = None
- self.mapped_file_cache = {}
-
- # We can handle a few concurrency options here, but only one at a time.
- these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency)
- if len(these_concurrencies) > 1:
- raise CoverageException("Conflicting concurrency settings: %s" % concurrency)
- self.concurrency = these_concurrencies.pop() if these_concurrencies else ''
-
- try:
- if self.concurrency == "greenlet":
- import greenlet
- self.concur_id_func = greenlet.getcurrent
- elif self.concurrency == "eventlet":
- import eventlet.greenthread # pylint: disable=import-error,useless-suppression
- self.concur_id_func = eventlet.greenthread.getcurrent
- elif self.concurrency == "gevent":
- import gevent # pylint: disable=import-error,useless-suppression
- self.concur_id_func = gevent.getcurrent
- elif self.concurrency == "thread" or not self.concurrency:
- # It's important to import threading only if we need it. If
- # it's imported early, and the program being measured uses
- # gevent, then gevent's monkey-patching won't work properly.
- import threading
- self.threading = threading
- else:
- raise CoverageException("Don't understand concurrency=%s" % concurrency)
- except ImportError:
- raise CoverageException(
- "Couldn't trace with concurrency=%s, the module isn't installed." % (
- self.concurrency,
- )
- )
-
- self.reset()
-
- if timid:
- # Being timid: use the simple Python trace function.
- self._trace_class = PyTracer
- else:
- # Being fast: use the C Tracer if it is available, else the Python
- # trace function.
- self._trace_class = CTracer or PyTracer
-
- if self._trace_class is CTracer:
- self.file_disposition_class = CFileDisposition
- self.supports_plugins = True
- else:
- self.file_disposition_class = FileDisposition
- self.supports_plugins = False
-
- def __repr__(self):
- return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
-
- def use_data(self, covdata, context):
- """Use `covdata` for recording data."""
- self.covdata = covdata
- self.static_context = context
- self.covdata.set_context(self.static_context)
-
- def tracer_name(self):
- """Return the class name of the tracer we're using."""
- return self._trace_class.__name__
-
- def _clear_data(self):
- """Clear out existing data, but stay ready for more collection."""
- # We used to used self.data.clear(), but that would remove filename
- # keys and data values that were still in use higher up the stack
- # when we are called as part of switch_context.
- for d in self.data.values():
- d.clear()
-
- for tracer in self.tracers:
- tracer.reset_activity()
-
- def reset(self):
- """Clear collected data, and prepare to collect more."""
- # A dictionary mapping file names to dicts with line number keys (if not
- # branch coverage), or mapping file names to dicts with line number
- # pairs as keys (if branch coverage).
- self.data = {}
-
- # A dictionary mapping file names to file tracer plugin names that will
- # handle them.
- self.file_tracers = {}
-
- self.disabled_plugins = set()
-
- # The .should_trace_cache attribute is a cache from file names to
- # coverage.FileDisposition objects, or None. When a file is first
- # considered for tracing, a FileDisposition is obtained from
- # Coverage.should_trace. Its .trace attribute indicates whether the
- # file should be traced or not. If it should be, a plugin with dynamic
- # file names can decide not to trace it based on the dynamic file name
- # being excluded by the inclusion rules, in which case the
- # FileDisposition will be replaced by None in the cache.
- if env.PYPY:
- import __pypy__ # pylint: disable=import-error
- # Alex Gaynor said:
- # should_trace_cache is a strictly growing key: once a key is in
- # it, it never changes. Further, the keys used to access it are
- # generally constant, given sufficient context. That is to say, at
- # any given point _trace() is called, pypy is able to know the key.
- # This is because the key is determined by the physical source code
- # line, and that's invariant with the call site.
- #
- # This property of a dict with immutable keys, combined with
- # call-site-constant keys is a match for PyPy's module dict,
- # which is optimized for such workloads.
- #
- # This gives a 20% benefit on the workload described at
- # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
- self.should_trace_cache = __pypy__.newdict("module")
- else:
- self.should_trace_cache = {}
-
- # Our active Tracers.
- self.tracers = []
-
- self._clear_data()
-
- def _start_tracer(self):
- """Start a new Tracer object, and store it in self.tracers."""
- tracer = self._trace_class()
- tracer.data = self.data
- tracer.trace_arcs = self.branch
- tracer.should_trace = self.should_trace
- tracer.should_trace_cache = self.should_trace_cache
- tracer.warn = self.warn
-
- if hasattr(tracer, 'concur_id_func'):
- tracer.concur_id_func = self.concur_id_func
- elif self.concur_id_func:
- raise CoverageException(
- "Can't support concurrency=%s with %s, only threads are supported" % (
- self.concurrency, self.tracer_name(),
- )
- )
-
- if hasattr(tracer, 'file_tracers'):
- tracer.file_tracers = self.file_tracers
- if hasattr(tracer, 'threading'):
- tracer.threading = self.threading
- if hasattr(tracer, 'check_include'):
- tracer.check_include = self.check_include
- if hasattr(tracer, 'should_start_context'):
- tracer.should_start_context = self.should_start_context
- tracer.switch_context = self.switch_context
- if hasattr(tracer, 'disable_plugin'):
- tracer.disable_plugin = self.disable_plugin
-
- fn = tracer.start()
- self.tracers.append(tracer)
-
- return fn
-
- # The trace function has to be set individually on each thread before
- # execution begins. Ironically, the only support the threading module has
- # for running code before the thread main is the tracing function. So we
- # install this as a trace function, and the first time it's called, it does
- # the real trace installation.
-
- def _installation_trace(self, frame, event, arg):
- """Called on new threads, installs the real tracer."""
- # Remove ourselves as the trace function.
- sys.settrace(None)
- # Install the real tracer.
- fn = self._start_tracer()
- # Invoke the real trace function with the current event, to be sure
- # not to lose an event.
- if fn:
- fn = fn(frame, event, arg)
- # Return the new trace function to continue tracing in this scope.
- return fn
-
- def start(self):
- """Start collecting trace information."""
- if self._collectors:
- self._collectors[-1].pause()
-
- self.tracers = []
-
- # Check to see whether we had a fullcoverage tracer installed. If so,
- # get the stack frames it stashed away for us.
- traces0 = []
- fn0 = sys.gettrace()
- if fn0:
- tracer0 = getattr(fn0, '__self__', None)
- if tracer0:
- traces0 = getattr(tracer0, 'traces', [])
-
- try:
- # Install the tracer on this thread.
- fn = self._start_tracer()
- except:
- if self._collectors:
- self._collectors[-1].resume()
- raise
-
- # If _start_tracer succeeded, then we add ourselves to the global
- # stack of collectors.
- self._collectors.append(self)
-
- # Replay all the events from fullcoverage into the new trace function.
- for args in traces0:
- (frame, event, arg), lineno = args
- try:
- fn(frame, event, arg, lineno=lineno)
- except TypeError:
- raise Exception("fullcoverage must be run with the C trace function.")
-
- # Install our installation tracer in threading, to jump-start other
- # threads.
- if self.threading:
- self.threading.settrace(self._installation_trace)
-
- def stop(self):
- """Stop collecting trace information."""
- assert self._collectors
- if self._collectors[-1] is not self:
- print("self._collectors:")
- for c in self._collectors:
- print(" {!r}\n{}".format(c, c.origin))
- assert self._collectors[-1] is self, (
- "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
- )
-
- self.pause()
-
- # Remove this Collector from the stack, and resume the one underneath
- # (if any).
- self._collectors.pop()
- if self._collectors:
- self._collectors[-1].resume()
-
- def pause(self):
- """Pause tracing, but be prepared to `resume`."""
- for tracer in self.tracers:
- tracer.stop()
- stats = tracer.get_stats()
- if stats:
- print("\nCoverage.py tracer stats:")
- for k in sorted(stats.keys()):
- print("%20s: %s" % (k, stats[k]))
- if self.threading:
- self.threading.settrace(None)
-
- def resume(self):
- """Resume tracing after a `pause`."""
- for tracer in self.tracers:
- tracer.start()
- if self.threading:
- self.threading.settrace(self._installation_trace)
- else:
- self._start_tracer()
-
- def _activity(self):
- """Has any activity been traced?
-
- Returns a boolean, True if any trace function was invoked.
-
- """
- return any(tracer.activity() for tracer in self.tracers)
-
- def switch_context(self, new_context):
- """Switch to a new dynamic context."""
- self.flush_data()
- if self.static_context:
- context = self.static_context
- if new_context:
- context += "|" + new_context
- else:
- context = new_context
- self.covdata.set_context(context)
-
- def disable_plugin(self, disposition):
- """Disable the plugin mentioned in `disposition`."""
- file_tracer = disposition.file_tracer
- plugin = file_tracer._coverage_plugin
- plugin_name = plugin._coverage_plugin_name
- self.warn("Disabling plug-in {!r} due to previous exception".format(plugin_name))
- plugin._coverage_enabled = False
- disposition.trace = False
-
- def cached_mapped_file(self, filename):
- """A locally cached version of file names mapped through file_mapper."""
- key = (type(filename), filename)
- try:
- return self.mapped_file_cache[key]
- except KeyError:
- return self.mapped_file_cache.setdefault(key, self.file_mapper(filename))
-
- def mapped_file_dict(self, d):
- """Return a dict like d, but with keys modified by file_mapper."""
- # The call to litems() ensures that the GIL protects the dictionary
- # iterator against concurrent modifications by tracers running
- # in other threads. We try three times in case of concurrent
- # access, hoping to get a clean copy.
- runtime_err = None
- for _ in range(3):
- try:
- items = litems(d)
- except RuntimeError as ex:
- runtime_err = ex
- else:
- break
- else:
- raise runtime_err
-
- if getattr(sys, 'is_standalone_binary', False):
- # filenames should stay relative to the arcadia root, because files may not exist
- return dict((k, v) for k, v in items if v)
-
- return dict((self.cached_mapped_file(k), v) for k, v in items if v)
-
- def plugin_was_disabled(self, plugin):
- """Record that `plugin` was disabled during the run."""
- self.disabled_plugins.add(plugin._coverage_plugin_name)
-
- def flush_data(self):
- """Save the collected data to our associated `CoverageData`.
-
- Data may have also been saved along the way. This forces the
- last of the data to be saved.
-
- Returns True if there was data to save, False if not.
- """
- if not self._activity():
- return False
-
- if self.branch:
- self.covdata.add_arcs(self.mapped_file_dict(self.data))
- else:
- self.covdata.add_lines(self.mapped_file_dict(self.data))
-
- file_tracers = {
- k: v for k, v in self.file_tracers.items()
- if v not in self.disabled_plugins
- }
- self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers))
-
- self._clear_data()
- return True
diff --git a/contrib/python/coverage/py2/coverage/config.py b/contrib/python/coverage/py2/coverage/config.py
deleted file mode 100644
index ceb7201b65..0000000000
--- a/contrib/python/coverage/py2/coverage/config.py
+++ /dev/null
@@ -1,605 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Config file for coverage.py"""
-
-import collections
-import copy
-import os
-import os.path
-import re
-import sys
-
-from coverage import env
-from coverage.backward import configparser, iitems, string_class
-from coverage.misc import contract, CoverageException, isolate_module
-from coverage.misc import substitute_variables
-
-from coverage.tomlconfig import TomlConfigParser, TomlDecodeError
-
-os = isolate_module(os)
-
-
-class HandyConfigParser(configparser.RawConfigParser):
- """Our specialization of ConfigParser."""
-
- def __init__(self, our_file):
- """Create the HandyConfigParser.
-
- `our_file` is True if this config file is specifically for coverage,
- False if we are examining another config file (tox.ini, setup.cfg)
- for possible settings.
- """
-
- configparser.RawConfigParser.__init__(self)
- self.section_prefixes = ["coverage:"]
- if our_file:
- self.section_prefixes.append("")
-
- def read(self, filenames, encoding=None):
- """Read a file name as UTF-8 configuration data."""
- kwargs = {}
- if env.PYVERSION >= (3, 2):
- kwargs['encoding'] = encoding or "utf-8"
- return configparser.RawConfigParser.read(self, filenames, **kwargs)
-
- def has_option(self, section, option):
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- has = configparser.RawConfigParser.has_option(self, real_section, option)
- if has:
- return has
- return False
-
- def has_section(self, section):
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- has = configparser.RawConfigParser.has_section(self, real_section)
- if has:
- return real_section
- return False
-
- def options(self, section):
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- if configparser.RawConfigParser.has_section(self, real_section):
- return configparser.RawConfigParser.options(self, real_section)
- raise configparser.NoSectionError(section)
-
- def get_section(self, section):
- """Get the contents of a section, as a dictionary."""
- d = {}
- for opt in self.options(section):
- d[opt] = self.get(section, opt)
- return d
-
- def get(self, section, option, *args, **kwargs):
- """Get a value, replacing environment variables also.
-
- The arguments are the same as `RawConfigParser.get`, but in the found
- value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
- environment variable ``WORD``.
-
- Returns the finished value.
-
- """
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- if configparser.RawConfigParser.has_option(self, real_section, option):
- break
- else:
- raise configparser.NoOptionError(option, section)
-
- v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs)
- v = substitute_variables(v, os.environ)
- return v
-
- def getlist(self, section, option):
- """Read a list of strings.
-
- The value of `section` and `option` is treated as a comma- and newline-
- separated list of strings. Each value is stripped of whitespace.
-
- Returns the list of strings.
-
- """
- value_list = self.get(section, option)
- values = []
- for value_line in value_list.split('\n'):
- for value in value_line.split(','):
- value = value.strip()
- if value:
- values.append(value)
- return values
-
- def getregexlist(self, section, option):
- """Read a list of full-line regexes.
-
- The value of `section` and `option` is treated as a newline-separated
- list of regexes. Each value is stripped of whitespace.
-
- Returns the list of strings.
-
- """
- line_list = self.get(section, option)
- value_list = []
- for value in line_list.splitlines():
- value = value.strip()
- try:
- re.compile(value)
- except re.error as e:
- raise CoverageException(
- "Invalid [%s].%s value %r: %s" % (section, option, value, e)
- )
- if value:
- value_list.append(value)
- return value_list
-
-
-# The default line exclusion regexes.
-DEFAULT_EXCLUDE = [
- r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)',
-]
-
-# The default partial branch regexes, to be modified by the user.
-DEFAULT_PARTIAL = [
- r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)',
-]
-
-# The default partial branch regexes, based on Python semantics.
-# These are any Python branching constructs that can't actually execute all
-# their branches.
-DEFAULT_PARTIAL_ALWAYS = [
- 'while (True|1|False|0):',
- 'if (True|1|False|0):',
-]
-
-
-class CoverageConfig(object):
- """Coverage.py configuration.
-
- The attributes of this class are the various settings that control the
- operation of coverage.py.
-
- """
- # pylint: disable=too-many-instance-attributes
-
- def __init__(self):
- """Initialize the configuration attributes to their defaults."""
- # Metadata about the config.
- # We tried to read these config files.
- self.attempted_config_files = []
- # We did read these config files, but maybe didn't find any content for us.
- self.config_files_read = []
- # The file that gave us our configuration.
- self.config_file = None
- self._config_contents = None
-
- # Defaults for [run] and [report]
- self._include = None
- self._omit = None
-
- # Defaults for [run]
- self.branch = False
- self.command_line = None
- self.concurrency = None
- self.context = None
- self.cover_pylib = False
- self.data_file = ".coverage"
- self.debug = []
- self.disable_warnings = []
- self.dynamic_context = None
- self.note = None
- self.parallel = False
- self.plugins = []
- self.relative_files = False
- self.run_include = None
- self.run_omit = None
- self.source = None
- self.source_pkgs = []
- self.timid = False
- self._crash = None
-
- # Defaults for [report]
- self.exclude_list = DEFAULT_EXCLUDE[:]
- self.fail_under = 0.0
- self.ignore_errors = False
- self.report_include = None
- self.report_omit = None
- self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
- self.partial_list = DEFAULT_PARTIAL[:]
- self.precision = 0
- self.report_contexts = None
- self.show_missing = False
- self.skip_covered = False
- self.skip_empty = False
- self.sort = None
-
- # Defaults for [html]
- self.extra_css = None
- self.html_dir = "htmlcov"
- self.html_skip_covered = None
- self.html_skip_empty = None
- self.html_title = "Coverage report"
- self.show_contexts = False
-
- # Defaults for [xml]
- self.xml_output = "coverage.xml"
- self.xml_package_depth = 99
-
- # Defaults for [json]
- self.json_output = "coverage.json"
- self.json_pretty_print = False
- self.json_show_contexts = False
-
- # Defaults for [paths]
- self.paths = collections.OrderedDict()
-
- # Options for plugins
- self.plugin_options = {}
- self.suppress_plugin_errors = True
-
- MUST_BE_LIST = [
- "debug", "concurrency", "plugins",
- "report_omit", "report_include",
- "run_omit", "run_include",
- ]
-
- def from_args(self, **kwargs):
- """Read config values from `kwargs`."""
- for k, v in iitems(kwargs):
- if v is not None:
- if k in self.MUST_BE_LIST and isinstance(v, string_class):
- v = [v]
- setattr(self, k, v)
-
- def from_resource(self, resource_name):
- assert getattr(sys, 'is_standalone_binary', False), 'You have used method by mistake in script, not binary'
- cp, self._config_contents = _load_config_from_resource(resource_name)
- return self._parse_config(cp, resource_name, True)
-
- @contract(filename=str)
- def from_file(self, filename, our_file):
- """Read configuration from a .rc file.
-
- `filename` is a file name to read.
-
- `our_file` is True if this config file is specifically for coverage,
- False if we are examining another config file (tox.ini, setup.cfg)
- for possible settings.
-
- Returns True or False, whether the file could be read, and it had some
- coverage.py settings in it.
-
- """
- _, ext = os.path.splitext(filename)
- if ext == '.toml':
- cp = TomlConfigParser(our_file)
- else:
- cp = HandyConfigParser(our_file)
-
- self.attempted_config_files.append(filename)
-
- try:
- files_read = cp.read(filename)
- except (configparser.Error, TomlDecodeError) as err:
- raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
- if not files_read:
- return False
-
- self.config_files_read.extend(map(os.path.abspath, files_read))
-
- return self._parse_config(cp, filename, our_file)
-
- def _parse_config(self, cp, filename, our_file):
- any_set = False
- try:
- for option_spec in self.CONFIG_FILE_OPTIONS:
- was_set = self._set_attr_from_config_option(cp, *option_spec)
- if was_set:
- any_set = True
- except ValueError as err:
- raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
-
- # Check that there are no unrecognized options.
- all_options = collections.defaultdict(set)
- for option_spec in self.CONFIG_FILE_OPTIONS:
- section, option = option_spec[1].split(":")
- all_options[section].add(option)
-
- for section, options in iitems(all_options):
- real_section = cp.has_section(section)
- if real_section:
- for unknown in set(cp.options(section)) - options:
- raise CoverageException(
- "Unrecognized option '[%s] %s=' in config file %s" % (
- real_section, unknown, filename
- )
- )
-
- # [paths] is special
- if cp.has_section('paths'):
- for option in cp.options('paths'):
- self.paths[option] = cp.getlist('paths', option)
- any_set = True
-
- # plugins can have options
- for plugin in self.plugins:
- if cp.has_section(plugin):
- self.plugin_options[plugin] = cp.get_section(plugin)
- any_set = True
-
- # Was this file used as a config file? If it's specifically our file,
- # then it was used. If we're piggybacking on someone else's file,
- # then it was only used if we found some settings in it.
- if our_file:
- used = True
- else:
- used = any_set
-
- if used:
- self.config_file = os.path.abspath(filename)
- if not getattr(sys, 'is_standalone_binary', False):
- with open(filename, "rb") as f:
- self._config_contents = f.read()
-
- return used
-
- def copy(self):
- """Return a copy of the configuration."""
- return copy.deepcopy(self)
-
- CONFIG_FILE_OPTIONS = [
- # These are *args for _set_attr_from_config_option:
- # (attr, where, type_="")
- #
- # attr is the attribute to set on the CoverageConfig object.
- # where is the section:name to read from the configuration file.
- # type_ is the optional type to apply, by using .getTYPE to read the
- # configuration value from the file.
-
- # [run]
- ('branch', 'run:branch', 'boolean'),
- ('command_line', 'run:command_line'),
- ('concurrency', 'run:concurrency', 'list'),
- ('context', 'run:context'),
- ('cover_pylib', 'run:cover_pylib', 'boolean'),
- ('data_file', 'run:data_file'),
- ('debug', 'run:debug', 'list'),
- ('disable_warnings', 'run:disable_warnings', 'list'),
- ('dynamic_context', 'run:dynamic_context'),
- ('note', 'run:note'),
- ('parallel', 'run:parallel', 'boolean'),
- ('plugins', 'run:plugins', 'list'),
- ('relative_files', 'run:relative_files', 'boolean'),
- ('run_include', 'run:include', 'list'),
- ('run_omit', 'run:omit', 'list'),
- ('source', 'run:source', 'list'),
- ('source_pkgs', 'run:source_pkgs', 'list'),
- ('timid', 'run:timid', 'boolean'),
- ('_crash', 'run:_crash'),
- ('suppress_plugin_errors', 'run:suppress_plugin_errors', 'boolean'),
-
- # [report]
- ('exclude_list', 'report:exclude_lines', 'regexlist'),
- ('fail_under', 'report:fail_under', 'float'),
- ('ignore_errors', 'report:ignore_errors', 'boolean'),
- ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
- ('partial_list', 'report:partial_branches', 'regexlist'),
- ('precision', 'report:precision', 'int'),
- ('report_contexts', 'report:contexts', 'list'),
- ('report_include', 'report:include', 'list'),
- ('report_omit', 'report:omit', 'list'),
- ('show_missing', 'report:show_missing', 'boolean'),
- ('skip_covered', 'report:skip_covered', 'boolean'),
- ('skip_empty', 'report:skip_empty', 'boolean'),
- ('sort', 'report:sort'),
-
- # [html]
- ('extra_css', 'html:extra_css'),
- ('html_dir', 'html:directory'),
- ('html_skip_covered', 'html:skip_covered', 'boolean'),
- ('html_skip_empty', 'html:skip_empty', 'boolean'),
- ('html_title', 'html:title'),
- ('show_contexts', 'html:show_contexts', 'boolean'),
-
- # [xml]
- ('xml_output', 'xml:output'),
- ('xml_package_depth', 'xml:package_depth', 'int'),
-
- # [json]
- ('json_output', 'json:output'),
- ('json_pretty_print', 'json:pretty_print', 'boolean'),
- ('json_show_contexts', 'json:show_contexts', 'boolean'),
- ]
-
- def _set_attr_from_config_option(self, cp, attr, where, type_=''):
- """Set an attribute on self if it exists in the ConfigParser.
-
- Returns True if the attribute was set.
-
- """
- section, option = where.split(":")
- if cp.has_option(section, option):
- method = getattr(cp, 'get' + type_)
- setattr(self, attr, method(section, option))
- return True
- return False
-
- def get_plugin_options(self, plugin):
- """Get a dictionary of options for the plugin named `plugin`."""
- return self.plugin_options.get(plugin, {})
-
- def set_option(self, option_name, value):
- """Set an option in the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with `"run:branch"`.
-
- `value` is the new value for the option.
-
- """
- # Special-cased options.
- if option_name == "paths":
- self.paths = value
- return
-
- # Check all the hard-coded options.
- for option_spec in self.CONFIG_FILE_OPTIONS:
- attr, where = option_spec[:2]
- if where == option_name:
- setattr(self, attr, value)
- return
-
- # See if it's a plugin option.
- plugin_name, _, key = option_name.partition(":")
- if key and plugin_name in self.plugins:
- self.plugin_options.setdefault(plugin_name, {})[key] = value
- return
-
- # If we get here, we didn't find the option.
- raise CoverageException("No such option: %r" % option_name)
-
- def get_option(self, option_name):
- """Get an option from the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with `"run:branch"`.
-
- Returns the value of the option.
-
- """
- # Special-cased options.
- if option_name == "paths":
- return self.paths
-
- # Check all the hard-coded options.
- for option_spec in self.CONFIG_FILE_OPTIONS:
- attr, where = option_spec[:2]
- if where == option_name:
- return getattr(self, attr)
-
- # See if it's a plugin option.
- plugin_name, _, key = option_name.partition(":")
- if key and plugin_name in self.plugins:
- return self.plugin_options.get(plugin_name, {}).get(key)
-
- # If we get here, we didn't find the option.
- raise CoverageException("No such option: %r" % option_name)
-
- def post_process_file(self, path):
- """Make final adjustments to a file path to make it usable."""
- return os.path.expanduser(path)
-
- def post_process(self):
- """Make final adjustments to settings to make them usable."""
- self.data_file = self.post_process_file(self.data_file)
- self.html_dir = self.post_process_file(self.html_dir)
- self.xml_output = self.post_process_file(self.xml_output)
- self.paths = collections.OrderedDict(
- (k, [self.post_process_file(f) for f in v])
- for k, v in self.paths.items()
- )
-
-
-def config_files_to_try(config_file):
- """What config files should we try to read?
-
- Returns a list of tuples:
- (filename, is_our_file, was_file_specified)
- """
-
- # Some API users were specifying ".coveragerc" to mean the same as
- # True, so make it so.
- if config_file == ".coveragerc":
- config_file = True
- specified_file = (config_file is not True)
- if not specified_file:
- # No file was specified. Check COVERAGE_RCFILE.
- config_file = os.environ.get('COVERAGE_RCFILE')
- if config_file:
- specified_file = True
- if not specified_file:
- # Still no file specified. Default to .coveragerc
- config_file = ".coveragerc"
- files_to_try = [
- (config_file, True, specified_file),
- ("setup.cfg", False, False),
- ("tox.ini", False, False),
- ("pyproject.toml", False, False),
- ]
- return files_to_try
-
-
-def read_coverage_config(config_file, **kwargs):
- """Read the coverage.py configuration.
-
- Arguments:
- config_file: a boolean or string, see the `Coverage` class for the
- tricky details.
- all others: keyword arguments from the `Coverage` class, used for
- setting values in the configuration.
-
- Returns:
- config:
- config is a CoverageConfig object read from the appropriate
- configuration file.
-
- """
- # Build the configuration from a number of sources:
- # 1) defaults:
- config = CoverageConfig()
-
- # 1.1 built-in config
- if getattr(sys, 'is_standalone_binary', False):
- config.from_resource("/coverage_plugins/coveragerc.txt")
-
- # 2) from a file:
- if config_file:
- files_to_try = config_files_to_try(config_file)
-
- for fname, our_file, specified_file in files_to_try:
- if getattr(sys, 'is_standalone_binary', False) and fname == "/coverage_plugins/coveragerc.txt":
- continue
- config_read = config.from_file(fname, our_file=our_file)
- if config_read:
- break
- if specified_file:
- raise CoverageException("Couldn't read '%s' as a config file" % fname)
-
- # $set_env.py: COVERAGE_DEBUG - Options for --debug.
- # 3) from environment variables:
- env_data_file = os.environ.get('COVERAGE_FILE')
- if env_data_file:
- config.data_file = env_data_file
- debugs = os.environ.get('COVERAGE_DEBUG')
- if debugs:
- config.debug.extend(d.strip() for d in debugs.split(","))
-
- # 4) from constructor arguments:
- config.from_args(**kwargs)
-
- # Once all the config has been collected, there's a little post-processing
- # to do.
- config.post_process()
-
- return config
-
-
-def _load_config_from_resource(resource_name):
- from io import StringIO
- from library.python import resource
-
- config_data = resource.find(resource_name)
- if config_data is None:
- raise IOError("No such resource: " + resource_name)
-
- config_data = config_data.decode('utf-8')
- cp = HandyConfigParser(True)
- try:
- cp.readfp(StringIO(config_data))
- except configparser.Error as err:
- raise CoverageException("Couldn't read config %s: %s" % (resource_name, err))
- return cp, config_data
diff --git a/contrib/python/coverage/py2/coverage/context.py b/contrib/python/coverage/py2/coverage/context.py
deleted file mode 100644
index ea13da21ed..0000000000
--- a/contrib/python/coverage/py2/coverage/context.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Determine contexts for coverage.py"""
-
-
-def combine_context_switchers(context_switchers):
- """Create a single context switcher from multiple switchers.
-
- `context_switchers` is a list of functions that take a frame as an
- argument and return a string to use as the new context label.
-
- Returns a function that composites `context_switchers` functions, or None
- if `context_switchers` is an empty list.
-
- When invoked, the combined switcher calls `context_switchers` one-by-one
- until a string is returned. The combined switcher returns None if all
- `context_switchers` return None.
- """
- if not context_switchers:
- return None
-
- if len(context_switchers) == 1:
- return context_switchers[0]
-
- def should_start_context(frame):
- """The combiner for multiple context switchers."""
- for switcher in context_switchers:
- new_context = switcher(frame)
- if new_context is not None:
- return new_context
- return None
-
- return should_start_context
-
-
-def should_start_context_test_function(frame):
- """Is this frame calling a test_* function?"""
- co_name = frame.f_code.co_name
- if co_name.startswith("test") or co_name == "runTest":
- return qualname_from_frame(frame)
- return None
-
-
-def qualname_from_frame(frame):
- """Get a qualified name for the code running in `frame`."""
- co = frame.f_code
- fname = co.co_name
- method = None
- if co.co_argcount and co.co_varnames[0] == "self":
- self = frame.f_locals["self"]
- method = getattr(self, fname, None)
-
- if method is None:
- func = frame.f_globals.get(fname)
- if func is None:
- return None
- return func.__module__ + '.' + fname
-
- func = getattr(method, '__func__', None)
- if func is None:
- cls = self.__class__
- return cls.__module__ + '.' + cls.__name__ + "." + fname
-
- if hasattr(func, '__qualname__'):
- qname = func.__module__ + '.' + func.__qualname__
- else:
- for cls in getattr(self.__class__, '__mro__', ()):
- f = cls.__dict__.get(fname, None)
- if f is None:
- continue
- if f is func:
- qname = cls.__module__ + '.' + cls.__name__ + "." + fname
- break
- else:
- # Support for old-style classes.
- def mro(bases):
- for base in bases:
- f = base.__dict__.get(fname, None)
- if f is func:
- return base.__module__ + '.' + base.__name__ + "." + fname
- for base in bases:
- qname = mro(base.__bases__)
- if qname is not None:
- return qname
- return None
- qname = mro([self.__class__])
- if qname is None:
- qname = func.__module__ + '.' + fname
-
- return qname
diff --git a/contrib/python/coverage/py2/coverage/control.py b/contrib/python/coverage/py2/coverage/control.py
deleted file mode 100644
index 605b50c26b..0000000000
--- a/contrib/python/coverage/py2/coverage/control.py
+++ /dev/null
@@ -1,1162 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Core control stuff for coverage.py."""
-
-import atexit
-import collections
-import contextlib
-import os
-import os.path
-import platform
-import sys
-import time
-import json
-
-from coverage import env
-from coverage.annotate import AnnotateReporter
-from coverage.backward import string_class, iitems
-from coverage.collector import Collector, CTracer
-from coverage.config import read_coverage_config
-from coverage.context import should_start_context_test_function, combine_context_switchers
-from coverage.data import CoverageData, combine_parallel_data
-from coverage.debug import DebugControl, short_stack, write_formatted_info
-from coverage.disposition import disposition_debug_msg
-from coverage.files import PathAliases, abs_file, canonical_filename, relative_filename, set_relative_directory
-from coverage.html import HtmlReporter
-from coverage.inorout import InOrOut
-from coverage.jsonreport import JsonReporter
-from coverage.misc import CoverageException, bool_or_none, join_regex
-from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module
-from coverage.plugin import FileReporter
-from coverage.plugin_support import Plugins
-from coverage.python import PythonFileReporter
-from coverage.report import render_report
-from coverage.results import Analysis, Numbers
-from coverage.summary import SummaryReporter
-from coverage.xmlreport import XmlReporter
-
-try:
- from coverage.multiproc import patch_multiprocessing
-except ImportError: # pragma: only jython
- # Jython has no multiprocessing module.
- patch_multiprocessing = None
-
-os = isolate_module(os)
-
-@contextlib.contextmanager
-def override_config(cov, **kwargs):
- """Temporarily tweak the configuration of `cov`.
-
- The arguments are applied to `cov.config` with the `from_args` method.
- At the end of the with-statement, the old configuration is restored.
- """
- original_config = cov.config
- cov.config = cov.config.copy()
- try:
- cov.config.from_args(**kwargs)
- yield
- finally:
- cov.config = original_config
-
-
-_DEFAULT_DATAFILE = DefaultValue("MISSING")
-
-class Coverage(object):
- """Programmatic access to coverage.py.
-
- To use::
-
- from coverage import Coverage
-
- cov = Coverage()
- cov.start()
- #.. call your code ..
- cov.stop()
- cov.html_report(directory='covhtml')
-
- Note: in keeping with Python custom, names starting with underscore are
- not part of the public API. They might stop working at any point. Please
- limit yourself to documented methods to avoid problems.
-
- """
-
- # The stack of started Coverage instances.
- _instances = []
-
- @classmethod
- def current(cls):
- """Get the latest started `Coverage` instance, if any.
-
- Returns: a `Coverage` instance, or None.
-
- .. versionadded:: 5.0
-
- """
- if cls._instances:
- return cls._instances[-1]
- else:
- return None
-
- def __init__(
- self, data_file=_DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None,
- auto_data=False, timid=None, branch=None, config_file=True,
- source=None, source_pkgs=None, omit=None, include=None, debug=None,
- concurrency=None, check_preimported=False, context=None,
- ): # pylint: disable=too-many-arguments
- """
- Many of these arguments duplicate and override values that can be
- provided in a configuration file. Parameters that are missing here
- will use values from the config file.
-
- `data_file` is the base name of the data file to use. The config value
- defaults to ".coverage". None can be provided to prevent writing a data
- file. `data_suffix` is appended (with a dot) to `data_file` to create
- the final file name. If `data_suffix` is simply True, then a suffix is
- created with the machine and process identity included.
-
- `cover_pylib` is a boolean determining whether Python code installed
- with the Python interpreter is measured. This includes the Python
- standard library and any packages installed with the interpreter.
-
- If `auto_data` is true, then any existing data file will be read when
- coverage measurement starts, and data will be saved automatically when
- measurement stops.
-
- If `timid` is true, then a slower and simpler trace function will be
- used. This is important for some environments where manipulation of
- tracing functions breaks the faster trace function.
-
- If `branch` is true, then branch coverage will be measured in addition
- to the usual statement coverage.
-
- `config_file` determines what configuration file to read:
-
- * If it is ".coveragerc", it is interpreted as if it were True,
- for backward compatibility.
-
- * If it is a string, it is the name of the file to read. If the
- file can't be read, it is an error.
-
- * If it is True, then a few standard files names are tried
- (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for
- these files to not be found.
-
- * If it is False, then no configuration file is read.
-
- `source` is a list of file paths or package names. Only code located
- in the trees indicated by the file paths or package names will be
- measured.
-
- `source_pkgs` is a list of package names. It works the same as
- `source`, but can be used to name packages where the name can also be
- interpreted as a file path.
-
- `include` and `omit` are lists of file name patterns. Files that match
- `include` will be measured, files that match `omit` will not. Each
- will also accept a single string argument.
-
- `debug` is a list of strings indicating what debugging information is
- desired.
-
- `concurrency` is a string indicating the concurrency library being used
- in the measured code. Without this, coverage.py will get incorrect
- results if these libraries are in use. Valid strings are "greenlet",
- "eventlet", "gevent", "multiprocessing", or "thread" (the default).
- This can also be a list of these strings.
-
- If `check_preimported` is true, then when coverage is started, the
- already-imported files will be checked to see if they should be
- measured by coverage. Importing measured files before coverage is
- started can mean that code is missed.
-
- `context` is a string to use as the :ref:`static context
- <static_contexts>` label for collected data.
-
- .. versionadded:: 4.0
- The `concurrency` parameter.
-
- .. versionadded:: 4.2
- The `concurrency` parameter can now be a list of strings.
-
- .. versionadded:: 5.0
- The `check_preimported` and `context` parameters.
-
- .. versionadded:: 5.3
- The `source_pkgs` parameter.
-
- """
- # data_file=None means no disk file at all. data_file missing means
- # use the value from the config file.
- self._no_disk = data_file is None
- if data_file is _DEFAULT_DATAFILE:
- data_file = None
-
- # Build our configuration from a number of sources.
- self.config = read_coverage_config(
- config_file=config_file,
- data_file=data_file, cover_pylib=cover_pylib, timid=timid,
- branch=branch, parallel=bool_or_none(data_suffix),
- source=source, source_pkgs=source_pkgs, run_omit=omit, run_include=include, debug=debug,
- report_omit=omit, report_include=include,
- concurrency=concurrency, context=context,
- )
-
- # This is injectable by tests.
- self._debug_file = None
-
- self._auto_load = self._auto_save = auto_data
- self._data_suffix_specified = data_suffix
-
- # Is it ok for no data to be collected?
- self._warn_no_data = True
- self._warn_unimported_source = True
- self._warn_preimported_source = check_preimported
- self._no_warn_slugs = None
-
- # A record of all the warnings that have been issued.
- self._warnings = []
-
- # Other instance attributes, set later.
- self._data = self._collector = None
- self._plugins = None
- self._inorout = None
- self._data_suffix = self._run_suffix = None
- self._exclude_re = None
- self._debug = None
- self._file_mapper = None
-
- # State machine variables:
- # Have we initialized everything?
- self._inited = False
- self._inited_for_start = False
- # Have we started collecting and not stopped it?
- self._started = False
- # Should we write the debug output?
- self._should_write_debug = True
-
- # If we have sub-process measurement happening automatically, then we
- # want any explicit creation of a Coverage object to mean, this process
- # is already coverage-aware, so don't auto-measure it. By now, the
- # auto-creation of a Coverage object has already happened. But we can
- # find it and tell it not to save its data.
- if not env.METACOV:
- _prevent_sub_process_measurement()
-
- # Store constructor args to reproduce Coverage object in a subprocess created via multiprocessing.Process
- self._dumped_args = json.dumps(dict(
- data_file=data_file, data_suffix=data_suffix, cover_pylib=cover_pylib,
- auto_data=auto_data, timid=timid, branch=branch, config_file=config_file,
- source=source, omit=omit, include=include, debug=debug,
- concurrency=concurrency
- ))
-
- def _init(self):
- """Set all the initial state.
-
- This is called by the public methods to initialize state. This lets us
- construct a :class:`Coverage` object, then tweak its state before this
- function is called.
-
- """
- if self._inited:
- return
-
- self._inited = True
-
- # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
- # is an environment variable, the name of a file to append debug logs
- # to.
- self._debug = DebugControl(self.config.debug, self._debug_file)
-
- if "multiprocessing" in (self.config.concurrency or ()):
- # Multi-processing uses parallel for the subprocesses, so also use
- # it for the main process.
- self.config.parallel = True
-
- # _exclude_re is a dict that maps exclusion list names to compiled regexes.
- self._exclude_re = {}
-
- set_relative_directory()
-
- if getattr(sys, 'is_standalone_binary', False):
- self._file_mapper = canonical_filename
- else:
- self._file_mapper = relative_filename if self.config.relative_files else abs_file
-
- # Load plugins
- self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug)
-
- # Run configuring plugins.
- for plugin in self._plugins.configurers:
- # We need an object with set_option and get_option. Either self or
- # self.config will do. Choosing randomly stops people from doing
- # other things with those objects, against the public API. Yes,
- # this is a bit childish. :)
- plugin.configure([self, self.config][int(time.time()) % 2])
-
- def _post_init(self):
- """Stuff to do after everything is initialized."""
- if self._should_write_debug:
- self._should_write_debug = False
- self._write_startup_debug()
-
- # '[run] _crash' will raise an exception if the value is close by in
- # the call stack, for testing error handling.
- if self.config._crash and self.config._crash in short_stack(limit=4):
- raise Exception("Crashing because called by {}".format(self.config._crash))
-
- def _write_startup_debug(self):
- """Write out debug info at startup if needed."""
- wrote_any = False
- with self._debug.without_callers():
- if self._debug.should('config'):
- config_info = sorted(self.config.__dict__.items())
- config_info = [(k, v) for k, v in config_info if not k.startswith('_')]
- write_formatted_info(self._debug, "config", config_info)
- wrote_any = True
-
- if self._debug.should('sys'):
- write_formatted_info(self._debug, "sys", self.sys_info())
- for plugin in self._plugins:
- header = "sys: " + plugin._coverage_plugin_name
- info = plugin.sys_info()
- write_formatted_info(self._debug, header, info)
- wrote_any = True
-
- if wrote_any:
- write_formatted_info(self._debug, "end", ())
-
- def _should_trace(self, filename, frame):
- """Decide whether to trace execution in `filename`.
-
- Calls `_should_trace_internal`, and returns the FileDisposition.
-
- """
- disp = self._inorout.should_trace(filename, frame)
- if self._debug.should('trace'):
- self._debug.write(disposition_debug_msg(disp))
- return disp
-
- def _check_include_omit_etc(self, filename, frame):
- """Check a file name against the include/omit/etc, rules, verbosely.
-
- Returns a boolean: True if the file should be traced, False if not.
-
- """
- reason = self._inorout.check_include_omit_etc(filename, frame)
- if self._debug.should('trace'):
- if not reason:
- msg = "Including %r" % (filename,)
- else:
- msg = "Not including %r: %s" % (filename, reason)
- self._debug.write(msg)
-
- return not reason
-
- def _warn(self, msg, slug=None, once=False):
- """Use `msg` as a warning.
-
- For warning suppression, use `slug` as the shorthand.
-
- If `once` is true, only show this warning once (determined by the
- slug.)
-
- """
- if self._no_warn_slugs is None:
- self._no_warn_slugs = list(self.config.disable_warnings)
-
- if slug in self._no_warn_slugs:
- # Don't issue the warning
- return
-
- self._warnings.append(msg)
- if slug:
- msg = "%s (%s)" % (msg, slug)
- if self._debug.should('pid'):
- msg = "[%d] %s" % (os.getpid(), msg)
- sys.stderr.write("Coverage.py warning: %s\n" % msg)
-
- if once:
- self._no_warn_slugs.append(slug)
-
- def get_option(self, option_name):
- """Get an option from the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with `"run:branch"`.
-
- Returns the value of the option. The type depends on the option
- selected.
-
- As a special case, an `option_name` of ``"paths"`` will return an
- OrderedDict with the entire ``[paths]`` section value.
-
- .. versionadded:: 4.0
-
- """
- return self.config.get_option(option_name)
-
- def set_option(self, option_name, value):
- """Set an option in the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with ``"run:branch"``.
-
- `value` is the new value for the option. This should be an
- appropriate Python value. For example, use True for booleans, not the
- string ``"True"``.
-
- As an example, calling::
-
- cov.set_option("run:branch", True)
-
- has the same effect as this configuration file::
-
- [run]
- branch = True
-
- As a special case, an `option_name` of ``"paths"`` will replace the
- entire ``[paths]`` section. The value should be an OrderedDict.
-
- .. versionadded:: 4.0
-
- """
- self.config.set_option(option_name, value)
-
- def load(self):
- """Load previously-collected coverage data from the data file."""
- self._init()
- if self._collector:
- self._collector.reset()
- should_skip = self.config.parallel and not os.path.exists(self.config.data_file)
- if not should_skip:
- self._init_data(suffix=None)
- self._post_init()
- if not should_skip:
- self._data.read()
-
- def _init_for_start(self):
- """Initialization for start()"""
- # Construct the collector.
- concurrency = self.config.concurrency or ()
- if "multiprocessing" in concurrency:
- if not patch_multiprocessing:
- raise CoverageException( # pragma: only jython
- "multiprocessing is not supported on this Python"
- )
- patch_multiprocessing(rcfile=self.config.config_file, coverage_args=self._dumped_args)
-
- dycon = self.config.dynamic_context
- if not dycon or dycon == "none":
- context_switchers = []
- elif dycon == "test_function":
- context_switchers = [should_start_context_test_function]
- else:
- raise CoverageException(
- "Don't understand dynamic_context setting: {!r}".format(dycon)
- )
-
- context_switchers.extend(
- plugin.dynamic_context for plugin in self._plugins.context_switchers
- )
-
- should_start_context = combine_context_switchers(context_switchers)
-
- self._collector = Collector(
- should_trace=self._should_trace,
- check_include=self._check_include_omit_etc,
- should_start_context=should_start_context,
- file_mapper=self._file_mapper,
- timid=self.config.timid,
- branch=self.config.branch,
- warn=self._warn,
- concurrency=concurrency,
- )
-
- suffix = self._data_suffix_specified
- if suffix or self.config.parallel:
- if not isinstance(suffix, string_class):
- # if data_suffix=True, use .machinename.pid.random
- suffix = True
- else:
- suffix = None
-
- self._init_data(suffix)
-
- self._collector.use_data(self._data, self.config.context)
-
- # Early warning if we aren't going to be able to support plugins.
- if self._plugins.file_tracers and not self._collector.supports_plugins:
- self._warn(
- "Plugin file tracers (%s) aren't supported with %s" % (
- ", ".join(
- plugin._coverage_plugin_name
- for plugin in self._plugins.file_tracers
- ),
- self._collector.tracer_name(),
- )
- )
- for plugin in self._plugins.file_tracers:
- plugin._coverage_enabled = False
-
- # Create the file classifying substructure.
- self._inorout = InOrOut(
- warn=self._warn,
- debug=(self._debug if self._debug.should('trace') else None),
- )
- self._inorout.configure(self.config)
- self._inorout.plugins = self._plugins
- self._inorout.disp_class = self._collector.file_disposition_class
-
- # It's useful to write debug info after initing for start.
- self._should_write_debug = True
-
- atexit.register(self._atexit)
-
- def _init_data(self, suffix):
- """Create a data file if we don't have one yet."""
- if self._data is None:
- # Create the data file. We do this at construction time so that the
- # data file will be written into the directory where the process
- # started rather than wherever the process eventually chdir'd to.
- ensure_dir_for_file(self.config.data_file)
- self._data = CoverageData(
- basename=self.config.data_file,
- suffix=suffix,
- warn=self._warn,
- debug=self._debug,
- no_disk=self._no_disk,
- )
-
- def start(self):
- """Start measuring code coverage.
-
- Coverage measurement only occurs in functions called after
- :meth:`start` is invoked. Statements in the same scope as
- :meth:`start` won't be measured.
-
- Once you invoke :meth:`start`, you must also call :meth:`stop`
- eventually, or your process might not shut down cleanly.
-
- """
- self._init()
- if not self._inited_for_start:
- self._inited_for_start = True
- self._init_for_start()
- self._post_init()
-
- # Issue warnings for possible problems.
- self._inorout.warn_conflicting_settings()
-
- # See if we think some code that would eventually be measured has
- # already been imported.
- if self._warn_preimported_source:
- self._inorout.warn_already_imported_files()
-
- if self._auto_load:
- self.load()
-
- self._collector.start()
- self._started = True
- self._instances.append(self)
-
- def stop(self):
- """Stop measuring code coverage."""
- if self._instances:
- if self._instances[-1] is self:
- self._instances.pop()
- if self._started:
- self._collector.stop()
- self._started = False
-
- def _atexit(self):
- """Clean up on process shutdown."""
- if self._debug.should("process"):
- self._debug.write("atexit: pid: {}, instance: {!r}".format(os.getpid(), self))
- if self._started:
- self.stop()
- if self._auto_save:
- self.save()
-
- def erase(self):
- """Erase previously collected coverage data.
-
- This removes the in-memory data collected in this session as well as
- discarding the data file.
-
- """
- self._init()
- self._post_init()
- if self._collector:
- self._collector.reset()
- self._init_data(suffix=None)
- self._data.erase(parallel=self.config.parallel)
- self._data = None
- self._inited_for_start = False
-
- def switch_context(self, new_context):
- """Switch to a new dynamic context.
-
- `new_context` is a string to use as the :ref:`dynamic context
- <dynamic_contexts>` label for collected data. If a :ref:`static
- context <static_contexts>` is in use, the static and dynamic context
- labels will be joined together with a pipe character.
-
- Coverage collection must be started already.
-
- .. versionadded:: 5.0
-
- """
- if not self._started: # pragma: part started
- raise CoverageException(
- "Cannot switch context, coverage is not started"
- )
-
- if self._collector.should_start_context:
- self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True)
-
- self._collector.switch_context(new_context)
-
- def clear_exclude(self, which='exclude'):
- """Clear the exclude list."""
- self._init()
- setattr(self.config, which + "_list", [])
- self._exclude_regex_stale()
-
- def exclude(self, regex, which='exclude'):
- """Exclude source lines from execution consideration.
-
- A number of lists of regular expressions are maintained. Each list
- selects lines that are treated differently during reporting.
-
- `which` determines which list is modified. The "exclude" list selects
- lines that are not considered executable at all. The "partial" list
- indicates lines with branches that are not taken.
-
- `regex` is a regular expression. The regex is added to the specified
- list. If any of the regexes in the list is found in a line, the line
- is marked for special treatment during reporting.
-
- """
- self._init()
- excl_list = getattr(self.config, which + "_list")
- excl_list.append(regex)
- self._exclude_regex_stale()
-
- def _exclude_regex_stale(self):
- """Drop all the compiled exclusion regexes, a list was modified."""
- self._exclude_re.clear()
-
- def _exclude_regex(self, which):
- """Return a compiled regex for the given exclusion list."""
- if which not in self._exclude_re:
- excl_list = getattr(self.config, which + "_list")
- self._exclude_re[which] = join_regex(excl_list)
- return self._exclude_re[which]
-
- def get_exclude_list(self, which='exclude'):
- """Return a list of excluded regex patterns.
-
- `which` indicates which list is desired. See :meth:`exclude` for the
- lists that are available, and their meaning.
-
- """
- self._init()
- return getattr(self.config, which + "_list")
-
- def save(self):
- """Save the collected coverage data to the data file."""
- data = self.get_data()
- data.write()
-
- def combine(self, data_paths=None, strict=False, keep=False):
- """Combine together a number of similarly-named coverage data files.
-
- All coverage data files whose name starts with `data_file` (from the
- coverage() constructor) will be read, and combined together into the
- current measurements.
-
- `data_paths` is a list of files or directories from which data should
- be combined. If no list is passed, then the data files from the
- directory indicated by the current data file (probably the current
- directory) will be combined.
-
- If `strict` is true, then it is an error to attempt to combine when
- there are no data files to combine.
-
- If `keep` is true, then original input data files won't be deleted.
-
- .. versionadded:: 4.0
- The `data_paths` parameter.
-
- .. versionadded:: 4.3
- The `strict` parameter.
-
- .. versionadded: 5.5
- The `keep` parameter.
- """
- self._init()
- self._init_data(suffix=None)
- self._post_init()
- self.get_data()
-
- aliases = None
- if self.config.paths:
- aliases = PathAliases()
- for paths in self.config.paths.values():
- result = paths[0]
- for pattern in paths[1:]:
- aliases.add(pattern, result)
-
- combine_parallel_data(
- self._data,
- aliases=aliases,
- data_paths=data_paths,
- strict=strict,
- keep=keep,
- )
-
- def get_data(self):
- """Get the collected data.
-
- Also warn about various problems collecting data.
-
- Returns a :class:`coverage.CoverageData`, the collected coverage data.
-
- .. versionadded:: 4.0
-
- """
- self._init()
- self._init_data(suffix=None)
- self._post_init()
-
- for plugin in self._plugins:
- if not plugin._coverage_enabled:
- self._collector.plugin_was_disabled(plugin)
-
- if self._collector and self._collector.flush_data():
- self._post_save_work()
-
- return self._data
-
- def _post_save_work(self):
- """After saving data, look for warnings, post-work, etc.
-
- Warn about things that should have happened but didn't.
- Look for unexecuted files.
-
- """
- # If there are still entries in the source_pkgs_unmatched list,
- # then we never encountered those packages.
- if self._warn_unimported_source:
- self._inorout.warn_unimported_source()
-
- # Find out if we got any data.
- if not self._data and self._warn_no_data:
- self._warn("No data was collected.", slug="no-data-collected")
-
- # Touch all the files that could have executed, so that we can
- # mark completely unexecuted files as 0% covered.
- if self._data is not None:
- file_paths = collections.defaultdict(list)
- for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files():
- file_path = self._file_mapper(file_path)
- file_paths[plugin_name].append(file_path)
- for plugin_name, paths in file_paths.items():
- self._data.touch_files(paths, plugin_name)
-
- if self.config.note:
- self._warn("The '[run] note' setting is no longer supported.")
-
- # Backward compatibility with version 1.
- def analysis(self, morf):
- """Like `analysis2` but doesn't return excluded line numbers."""
- f, s, _, m, mf = self.analysis2(morf)
- return f, s, m, mf
-
- def analysis2(self, morf):
- """Analyze a module.
-
- `morf` is a module or a file name. It will be analyzed to determine
- its coverage statistics. The return value is a 5-tuple:
-
- * The file name for the module.
- * A list of line numbers of executable statements.
- * A list of line numbers of excluded statements.
- * A list of line numbers of statements not run (missing from
- execution).
- * A readable formatted string of the missing line numbers.
-
- The analysis uses the source file itself and the current measured
- coverage data.
-
- """
- analysis = self._analyze(morf)
- return (
- analysis.filename,
- sorted(analysis.statements),
- sorted(analysis.excluded),
- sorted(analysis.missing),
- analysis.missing_formatted(),
- )
-
- def _analyze(self, it):
- """Analyze a single morf or code unit.
-
- Returns an `Analysis` object.
-
- """
- # All reporting comes through here, so do reporting initialization.
- self._init()
- Numbers.set_precision(self.config.precision)
- self._post_init()
-
- data = self.get_data()
- if not isinstance(it, FileReporter):
- it = self._get_file_reporter(it)
-
- return Analysis(data, it, self._file_mapper)
-
- def _get_file_reporter(self, morf):
- """Get a FileReporter for a module or file name."""
- plugin = None
- file_reporter = "python"
-
- if isinstance(morf, string_class):
- if getattr(sys, 'is_standalone_binary', False):
- # Leave morf in canonical format - relative to the arcadia root
- mapped_morf = morf
- else:
- mapped_morf = self._file_mapper(morf)
- plugin_name = self._data.file_tracer(mapped_morf)
- if plugin_name:
- plugin = self._plugins.get(plugin_name)
-
- if plugin:
- file_reporter = plugin.file_reporter(mapped_morf)
- if file_reporter is None:
- raise CoverageException(
- "Plugin %r did not provide a file reporter for %r." % (
- plugin._coverage_plugin_name, morf
- )
- )
-
- if file_reporter == "python":
- file_reporter = PythonFileReporter(morf, self)
-
- return file_reporter
-
- def _get_file_reporters(self, morfs=None):
- """Get a list of FileReporters for a list of modules or file names.
-
- For each module or file name in `morfs`, find a FileReporter. Return
- the list of FileReporters.
-
- If `morfs` is a single module or file name, this returns a list of one
- FileReporter. If `morfs` is empty or None, then the list of all files
- measured is used to find the FileReporters.
-
- """
- if not morfs:
- morfs = self._data.measured_files()
-
- # Be sure we have a collection.
- if not isinstance(morfs, (list, tuple, set)):
- morfs = [morfs]
-
- file_reporters = [self._get_file_reporter(morf) for morf in morfs]
- return file_reporters
-
- def report(
- self, morfs=None, show_missing=None, ignore_errors=None,
- file=None, omit=None, include=None, skip_covered=None,
- contexts=None, skip_empty=None, precision=None, sort=None
- ):
- """Write a textual summary report to `file`.
-
- Each module in `morfs` is listed, with counts of statements, executed
- statements, missing statements, and a list of lines missed.
-
- If `show_missing` is true, then details of which lines or branches are
- missing will be included in the report. If `ignore_errors` is true,
- then a failure while reporting a single file will not stop the entire
- report.
-
- `file` is a file-like object, suitable for writing.
-
- `include` is a list of file name patterns. Files that match will be
- included in the report. Files matching `omit` will not be included in
- the report.
-
- If `skip_covered` is true, don't report on files with 100% coverage.
-
- If `skip_empty` is true, don't report on empty files (those that have
- no statements).
-
- `contexts` is a list of regular expressions. Only data from
- :ref:`dynamic contexts <dynamic_contexts>` that match one of those
- expressions (using :func:`re.search <python:re.search>`) will be
- included in the report.
-
- `precision` is the number of digits to display after the decimal
- point for percentages.
-
- All of the arguments default to the settings read from the
- :ref:`configuration file <config>`.
-
- Returns a float, the total percentage covered.
-
- .. versionadded:: 4.0
- The `skip_covered` parameter.
-
- .. versionadded:: 5.0
- The `contexts` and `skip_empty` parameters.
-
- .. versionadded:: 5.2
- The `precision` parameter.
-
- """
- with override_config(
- self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- show_missing=show_missing, skip_covered=skip_covered,
- report_contexts=contexts, skip_empty=skip_empty, precision=precision,
- sort=sort
- ):
- reporter = SummaryReporter(self)
- return reporter.report(morfs, outfile=file)
-
- def annotate(
- self, morfs=None, directory=None, ignore_errors=None,
- omit=None, include=None, contexts=None,
- ):
- """Annotate a list of modules.
-
- Each module in `morfs` is annotated. The source is written to a new
- file, named with a ",cover" suffix, with each line prefixed with a
- marker to indicate the coverage of the line. Covered lines have ">",
- excluded lines have "-", and missing lines have "!".
-
- See :meth:`report` for other arguments.
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit,
- report_include=include, report_contexts=contexts,
- ):
- reporter = AnnotateReporter(self)
- reporter.report(morfs, directory=directory)
-
- def html_report(
- self, morfs=None, directory=None, ignore_errors=None,
- omit=None, include=None, extra_css=None, title=None,
- skip_covered=None, show_contexts=None, contexts=None,
- skip_empty=None, precision=None,
- ):
- """Generate an HTML report.
-
- The HTML is written to `directory`. The file "index.html" is the
- overview starting point, with links to more detailed pages for
- individual modules.
-
- `extra_css` is a path to a file of other CSS to apply on the page.
- It will be copied into the HTML directory.
-
- `title` is a text string (not HTML) to use as the title of the HTML
- report.
-
- See :meth:`report` for other arguments.
-
- Returns a float, the total percentage covered.
-
- .. note::
- The HTML report files are generated incrementally based on the
- source files and coverage results. If you modify the report files,
- the changes will not be considered. You should be careful about
- changing the files in the report folder.
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- html_dir=directory, extra_css=extra_css, html_title=title,
- html_skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts,
- html_skip_empty=skip_empty, precision=precision,
- ):
- reporter = HtmlReporter(self)
- return reporter.report(morfs)
-
- def xml_report(
- self, morfs=None, outfile=None, ignore_errors=None,
- omit=None, include=None, contexts=None, skip_empty=None,
- ):
- """Generate an XML report of coverage results.
-
- The report is compatible with Cobertura reports.
-
- Each module in `morfs` is included in the report. `outfile` is the
- path to write the file to, "-" will write to stdout.
-
- See :meth:`report` for other arguments.
-
- Returns a float, the total percentage covered.
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- xml_output=outfile, report_contexts=contexts, skip_empty=skip_empty,
- ):
- return render_report(self.config.xml_output, XmlReporter(self), morfs)
-
- def json_report(
- self, morfs=None, outfile=None, ignore_errors=None,
- omit=None, include=None, contexts=None, pretty_print=None,
- show_contexts=None
- ):
- """Generate a JSON report of coverage results.
-
- Each module in `morfs` is included in the report. `outfile` is the
- path to write the file to, "-" will write to stdout.
-
- See :meth:`report` for other arguments.
-
- Returns a float, the total percentage covered.
-
- .. versionadded:: 5.0
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print,
- json_show_contexts=show_contexts
- ):
- return render_report(self.config.json_output, JsonReporter(self), morfs)
-
- def sys_info(self):
- """Return a list of (key, value) pairs showing internal information."""
-
- import coverage as covmod
-
- self._init()
- self._post_init()
-
- def plugin_info(plugins):
- """Make an entry for the sys_info from a list of plug-ins."""
- entries = []
- for plugin in plugins:
- entry = plugin._coverage_plugin_name
- if not plugin._coverage_enabled:
- entry += " (disabled)"
- entries.append(entry)
- return entries
-
- info = [
- ('version', covmod.__version__),
- ('coverage', covmod.__file__),
- ('tracer', self._collector.tracer_name() if self._collector else "-none-"),
- ('CTracer', 'available' if CTracer else "unavailable"),
- ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)),
- ('plugins.configurers', plugin_info(self._plugins.configurers)),
- ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)),
- ('configs_attempted', self.config.attempted_config_files),
- ('configs_read', self.config.config_files_read),
- ('config_file', self.config.config_file),
- ('config_contents',
- repr(self.config._config_contents)
- if self.config._config_contents
- else '-none-'
- ),
- ('data_file', self._data.data_filename() if self._data is not None else "-none-"),
- ('python', sys.version.replace('\n', '')),
- ('platform', platform.platform()),
- ('implementation', platform.python_implementation()),
- ('executable', sys.executable),
- ('def_encoding', sys.getdefaultencoding()),
- ('fs_encoding', sys.getfilesystemencoding()),
- ('pid', os.getpid()),
- ('cwd', os.getcwd()),
- ('path', sys.path),
- ('environment', sorted(
- ("%s = %s" % (k, v))
- for k, v in iitems(os.environ)
- if any(slug in k for slug in ("COV", "PY"))
- )),
- ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))),
- ]
-
- if self._inorout:
- info.extend(self._inorout.sys_info())
-
- info.extend(CoverageData.sys_info())
-
- return info
-
-
-# Mega debugging...
-# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage.
-if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging
- from coverage.debug import decorate_methods, show_calls
-
- Coverage = decorate_methods(show_calls(show_args=True), butnot=['get_data'])(Coverage)
-
-
-def process_startup():
- """Call this at Python start-up to perhaps measure coverage.
-
- If the environment variable COVERAGE_PROCESS_START is defined, coverage
- measurement is started. The value of the variable is the config file
- to use.
-
- There are two ways to configure your Python installation to invoke this
- function when Python starts:
-
- #. Create or append to sitecustomize.py to add these lines::
-
- import coverage
- coverage.process_startup()
-
- #. Create a .pth file in your Python installation containing::
-
- import coverage; coverage.process_startup()
-
- Returns the :class:`Coverage` instance that was started, or None if it was
- not started by this call.
-
- """
- cps = os.environ.get("COVERAGE_PROCESS_START")
- if not cps:
- # No request for coverage, nothing to do.
- return None
-
- # This function can be called more than once in a process. This happens
- # because some virtualenv configurations make the same directory visible
- # twice in sys.path. This means that the .pth file will be found twice,
- # and executed twice, executing this function twice. We set a global
- # flag (an attribute on this function) to indicate that coverage.py has
- # already been started, so we can avoid doing it twice.
- #
- # https://github.com/nedbat/coveragepy/issues/340 has more details.
-
- if hasattr(process_startup, "coverage"):
- # We've annotated this function before, so we must have already
- # started coverage.py in this process. Nothing to do.
- return None
-
- cov = Coverage(config_file=cps)
- process_startup.coverage = cov
- cov._warn_no_data = False
- cov._warn_unimported_source = False
- cov._warn_preimported_source = False
- cov._auto_save = True
- cov.start()
-
- return cov
-
-
-def _prevent_sub_process_measurement():
- """Stop any subprocess auto-measurement from writing data."""
- auto_created_coverage = getattr(process_startup, "coverage", None)
- if auto_created_coverage is not None:
- auto_created_coverage._auto_save = False
diff --git a/contrib/python/coverage/py2/coverage/ctracer/datastack.c b/contrib/python/coverage/py2/coverage/ctracer/datastack.c
deleted file mode 100644
index a9cfcc2cf2..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/datastack.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#include "util.h"
-#include "datastack.h"
-
-#define STACK_DELTA 20
-
-int
-DataStack_init(Stats *pstats, DataStack *pdata_stack)
-{
- pdata_stack->depth = -1;
- pdata_stack->stack = NULL;
- pdata_stack->alloc = 0;
- return RET_OK;
-}
-
-void
-DataStack_dealloc(Stats *pstats, DataStack *pdata_stack)
-{
- int i;
-
- for (i = 0; i < pdata_stack->alloc; i++) {
- Py_XDECREF(pdata_stack->stack[i].file_data);
- }
- PyMem_Free(pdata_stack->stack);
-}
-
-int
-DataStack_grow(Stats *pstats, DataStack *pdata_stack)
-{
- pdata_stack->depth++;
- if (pdata_stack->depth >= pdata_stack->alloc) {
- /* We've outgrown our data_stack array: make it bigger. */
- int bigger = pdata_stack->alloc + STACK_DELTA;
- DataStackEntry * bigger_data_stack = PyMem_Realloc(pdata_stack->stack, bigger * sizeof(DataStackEntry));
- STATS( pstats->stack_reallocs++; )
- if (bigger_data_stack == NULL) {
- PyErr_NoMemory();
- pdata_stack->depth--;
- return RET_ERROR;
- }
- /* Zero the new entries. */
- memset(bigger_data_stack + pdata_stack->alloc, 0, STACK_DELTA * sizeof(DataStackEntry));
-
- pdata_stack->stack = bigger_data_stack;
- pdata_stack->alloc = bigger;
- }
- return RET_OK;
-}
diff --git a/contrib/python/coverage/py2/coverage/ctracer/datastack.h b/contrib/python/coverage/py2/coverage/ctracer/datastack.h
deleted file mode 100644
index 3b3078ba27..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/datastack.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_DATASTACK_H
-#define _COVERAGE_DATASTACK_H
-
-#include "util.h"
-#include "stats.h"
-
-/* An entry on the data stack. For each call frame, we need to record all
- * the information needed for CTracer_handle_line to operate as quickly as
- * possible.
- */
-typedef struct DataStackEntry {
- /* The current file_data dictionary. Owned. */
- PyObject * file_data;
-
- /* The disposition object for this frame. A borrowed instance of CFileDisposition. */
- PyObject * disposition;
-
- /* The FileTracer handling this frame, or None if it's Python. Borrowed. */
- PyObject * file_tracer;
-
- /* The line number of the last line recorded, for tracing arcs.
- -1 means there was no previous line, as when entering a code object.
- */
- int last_line;
-
- BOOL started_context;
-} DataStackEntry;
-
-/* A data stack is a dynamically allocated vector of DataStackEntry's. */
-typedef struct DataStack {
- int depth; /* The index of the last-used entry in stack. */
- int alloc; /* number of entries allocated at stack. */
- /* The file data at each level, or NULL if not recording. */
- DataStackEntry * stack;
-} DataStack;
-
-
-int DataStack_init(Stats * pstats, DataStack *pdata_stack);
-void DataStack_dealloc(Stats * pstats, DataStack *pdata_stack);
-int DataStack_grow(Stats * pstats, DataStack *pdata_stack);
-
-#endif /* _COVERAGE_DATASTACK_H */
diff --git a/contrib/python/coverage/py2/coverage/ctracer/filedisp.c b/contrib/python/coverage/py2/coverage/ctracer/filedisp.c
deleted file mode 100644
index 47782ae090..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/filedisp.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#include "util.h"
-#include "filedisp.h"
-
-void
-CFileDisposition_dealloc(CFileDisposition *self)
-{
- Py_XDECREF(self->original_filename);
- Py_XDECREF(self->canonical_filename);
- Py_XDECREF(self->source_filename);
- Py_XDECREF(self->trace);
- Py_XDECREF(self->reason);
- Py_XDECREF(self->file_tracer);
- Py_XDECREF(self->has_dynamic_filename);
-}
-
-static PyMemberDef
-CFileDisposition_members[] = {
- { "original_filename", T_OBJECT, offsetof(CFileDisposition, original_filename), 0,
- PyDoc_STR("") },
-
- { "canonical_filename", T_OBJECT, offsetof(CFileDisposition, canonical_filename), 0,
- PyDoc_STR("") },
-
- { "source_filename", T_OBJECT, offsetof(CFileDisposition, source_filename), 0,
- PyDoc_STR("") },
-
- { "trace", T_OBJECT, offsetof(CFileDisposition, trace), 0,
- PyDoc_STR("") },
-
- { "reason", T_OBJECT, offsetof(CFileDisposition, reason), 0,
- PyDoc_STR("") },
-
- { "file_tracer", T_OBJECT, offsetof(CFileDisposition, file_tracer), 0,
- PyDoc_STR("") },
-
- { "has_dynamic_filename", T_OBJECT, offsetof(CFileDisposition, has_dynamic_filename), 0,
- PyDoc_STR("") },
-
- { NULL }
-};
-
-PyTypeObject
-CFileDispositionType = {
- MyType_HEAD_INIT
- "coverage.CFileDispositionType", /*tp_name*/
- sizeof(CFileDisposition), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)CFileDisposition_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "CFileDisposition objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- CFileDisposition_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
diff --git a/contrib/python/coverage/py2/coverage/ctracer/filedisp.h b/contrib/python/coverage/py2/coverage/ctracer/filedisp.h
deleted file mode 100644
index 860f9a50b1..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/filedisp.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_FILEDISP_H
-#define _COVERAGE_FILEDISP_H
-
-#include "util.h"
-#include "structmember.h"
-
-typedef struct CFileDisposition {
- PyObject_HEAD
-
- PyObject * original_filename;
- PyObject * canonical_filename;
- PyObject * source_filename;
- PyObject * trace;
- PyObject * reason;
- PyObject * file_tracer;
- PyObject * has_dynamic_filename;
-} CFileDisposition;
-
-void CFileDisposition_dealloc(CFileDisposition *self);
-
-extern PyTypeObject CFileDispositionType;
-
-#endif /* _COVERAGE_FILEDISP_H */
diff --git a/contrib/python/coverage/py2/coverage/ctracer/module.c b/contrib/python/coverage/py2/coverage/ctracer/module.c
deleted file mode 100644
index f308902b69..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/module.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#include "util.h"
-#include "tracer.h"
-#include "filedisp.h"
-
-/* Module definition */
-
-#define MODULE_DOC PyDoc_STR("Fast coverage tracer.")
-
-#if PY_MAJOR_VERSION >= 3
-
-static PyModuleDef
-moduledef = {
- PyModuleDef_HEAD_INIT,
- "coverage.tracer",
- MODULE_DOC,
- -1,
- NULL, /* methods */
- NULL,
- NULL, /* traverse */
- NULL, /* clear */
- NULL
-};
-
-
-PyObject *
-PyInit_tracer(void)
-{
- PyObject * mod = PyModule_Create(&moduledef);
- if (mod == NULL) {
- return NULL;
- }
-
- if (CTracer_intern_strings() < 0) {
- return NULL;
- }
-
- /* Initialize CTracer */
- CTracerType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CTracerType) < 0) {
- Py_DECREF(mod);
- return NULL;
- }
-
- Py_INCREF(&CTracerType);
- if (PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType) < 0) {
- Py_DECREF(mod);
- Py_DECREF(&CTracerType);
- return NULL;
- }
-
- /* Initialize CFileDisposition */
- CFileDispositionType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CFileDispositionType) < 0) {
- Py_DECREF(mod);
- Py_DECREF(&CTracerType);
- return NULL;
- }
-
- Py_INCREF(&CFileDispositionType);
- if (PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType) < 0) {
- Py_DECREF(mod);
- Py_DECREF(&CTracerType);
- Py_DECREF(&CFileDispositionType);
- return NULL;
- }
-
- return mod;
-}
-
-#else
-
-void
-inittracer(void)
-{
- PyObject * mod;
-
- mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC);
- if (mod == NULL) {
- return;
- }
-
- if (CTracer_intern_strings() < 0) {
- return;
- }
-
- /* Initialize CTracer */
- CTracerType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CTracerType) < 0) {
- return;
- }
-
- Py_INCREF(&CTracerType);
- PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
-
- /* Initialize CFileDisposition */
- CFileDispositionType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CFileDispositionType) < 0) {
- return;
- }
-
- Py_INCREF(&CFileDispositionType);
- PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType);
-}
-
-#endif /* Py3k */
diff --git a/contrib/python/coverage/py2/coverage/ctracer/stats.h b/contrib/python/coverage/py2/coverage/ctracer/stats.h
deleted file mode 100644
index 05173369f7..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/stats.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_STATS_H
-#define _COVERAGE_STATS_H
-
-#include "util.h"
-
-#if COLLECT_STATS
-#define STATS(x) x
-#else
-#define STATS(x)
-#endif
-
-typedef struct Stats {
- unsigned int calls; /* Need at least one member, but the rest only if needed. */
-#if COLLECT_STATS
- unsigned int lines;
- unsigned int returns;
- unsigned int exceptions;
- unsigned int others;
- unsigned int files;
- unsigned int missed_returns;
- unsigned int stack_reallocs;
- unsigned int errors;
- unsigned int pycalls;
- unsigned int start_context_calls;
-#endif
-} Stats;
-
-#endif /* _COVERAGE_STATS_H */
diff --git a/contrib/python/coverage/py2/coverage/ctracer/tracer.c b/contrib/python/coverage/py2/coverage/ctracer/tracer.c
deleted file mode 100644
index 00e4218d8e..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/tracer.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-/* C-based Tracer for coverage.py. */
-
-#include "util.h"
-#include "datastack.h"
-#include "filedisp.h"
-#include "tracer.h"
-
-/* Python C API helpers. */
-
-static int
-pyint_as_int(PyObject * pyint, int *pint)
-{
- int the_int = MyInt_AsInt(pyint);
- if (the_int == -1 && PyErr_Occurred()) {
- return RET_ERROR;
- }
-
- *pint = the_int;
- return RET_OK;
-}
-
-
-/* Interned strings to speed GetAttr etc. */
-
-static PyObject *str_trace;
-static PyObject *str_file_tracer;
-static PyObject *str__coverage_enabled;
-static PyObject *str__coverage_plugin;
-static PyObject *str__coverage_plugin_name;
-static PyObject *str_dynamic_source_filename;
-static PyObject *str_line_number_range;
-
-int
-CTracer_intern_strings(void)
-{
- int ret = RET_ERROR;
-
-#define INTERN_STRING(v, s) \
- v = MyText_InternFromString(s); \
- if (v == NULL) { \
- goto error; \
- }
-
- INTERN_STRING(str_trace, "trace")
- INTERN_STRING(str_file_tracer, "file_tracer")
- INTERN_STRING(str__coverage_enabled, "_coverage_enabled")
- INTERN_STRING(str__coverage_plugin, "_coverage_plugin")
- INTERN_STRING(str__coverage_plugin_name, "_coverage_plugin_name")
- INTERN_STRING(str_dynamic_source_filename, "dynamic_source_filename")
- INTERN_STRING(str_line_number_range, "line_number_range")
-
- ret = RET_OK;
-
-error:
- return ret;
-}
-
-static void CTracer_disable_plugin(CTracer *self, PyObject * disposition);
-
-static int
-CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
-{
- int ret = RET_ERROR;
-
- if (DataStack_init(&self->stats, &self->data_stack) < 0) {
- goto error;
- }
-
- self->pdata_stack = &self->data_stack;
-
- self->context = Py_None;
- Py_INCREF(self->context);
-
- ret = RET_OK;
- goto ok;
-
-error:
- STATS( self->stats.errors++; )
-
-ok:
- return ret;
-}
-
-static void
-CTracer_dealloc(CTracer *self)
-{
- int i;
-
- if (self->started) {
- PyEval_SetTrace(NULL, NULL);
- }
-
- Py_XDECREF(self->should_trace);
- Py_XDECREF(self->check_include);
- Py_XDECREF(self->warn);
- Py_XDECREF(self->concur_id_func);
- Py_XDECREF(self->data);
- Py_XDECREF(self->file_tracers);
- Py_XDECREF(self->should_trace_cache);
- Py_XDECREF(self->should_start_context);
- Py_XDECREF(self->switch_context);
- Py_XDECREF(self->context);
- Py_XDECREF(self->disable_plugin);
-
- DataStack_dealloc(&self->stats, &self->data_stack);
- if (self->data_stacks) {
- for (i = 0; i < self->data_stacks_used; i++) {
- DataStack_dealloc(&self->stats, self->data_stacks + i);
- }
- PyMem_Free(self->data_stacks);
- }
-
- Py_XDECREF(self->data_stack_index);
-
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-#if TRACE_LOG
-static const char *
-indent(int n)
-{
- static const char * spaces =
- " "
- " "
- " "
- " "
- ;
- return spaces + strlen(spaces) - n*2;
-}
-
-static BOOL logging = FALSE;
-/* Set these constants to be a file substring and line number to start logging. */
-static const char * start_file = "tests/views";
-static int start_line = 27;
-
-static void
-showlog(int depth, int lineno, PyObject * filename, const char * msg)
-{
- if (logging) {
- printf("%s%3d ", indent(depth), depth);
- if (lineno) {
- printf("%4d", lineno);
- }
- else {
- printf(" ");
- }
- if (filename) {
- PyObject *ascii = MyText_AS_BYTES(filename);
- printf(" %s", MyBytes_AS_STRING(ascii));
- Py_DECREF(ascii);
- }
- if (msg) {
- printf(" %s", msg);
- }
- printf("\n");
- }
-}
-
-#define SHOWLOG(a,b,c,d) showlog(a,b,c,d)
-#else
-#define SHOWLOG(a,b,c,d)
-#endif /* TRACE_LOG */
-
-#if WHAT_LOG
-static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
-#endif
-
-/* Record a pair of integers in self->pcur_entry->file_data. */
-static int
-CTracer_record_pair(CTracer *self, int l1, int l2)
-{
- int ret = RET_ERROR;
-
- PyObject * t = NULL;
-
- t = Py_BuildValue("(ii)", l1, l2);
- if (t == NULL) {
- goto error;
- }
-
- if (PyDict_SetItem(self->pcur_entry->file_data, t, Py_None) < 0) {
- goto error;
- }
-
- ret = RET_OK;
-
-error:
- Py_XDECREF(t);
-
- return ret;
-}
-
-/* Set self->pdata_stack to the proper data_stack to use. */
-static int
-CTracer_set_pdata_stack(CTracer *self)
-{
- int ret = RET_ERROR;
- PyObject * co_obj = NULL;
- PyObject * stack_index = NULL;
-
- if (self->concur_id_func != Py_None) {
- int the_index = 0;
-
- if (self->data_stack_index == NULL) {
- PyObject * weakref = NULL;
-
- weakref = PyImport_ImportModule("weakref");
- if (weakref == NULL) {
- goto error;
- }
- STATS( self->stats.pycalls++; )
- self->data_stack_index = PyObject_CallMethod(weakref, "WeakKeyDictionary", NULL);
- Py_XDECREF(weakref);
-
- if (self->data_stack_index == NULL) {
- goto error;
- }
- }
-
- STATS( self->stats.pycalls++; )
- co_obj = PyObject_CallObject(self->concur_id_func, NULL);
- if (co_obj == NULL) {
- goto error;
- }
- stack_index = PyObject_GetItem(self->data_stack_index, co_obj);
- if (stack_index == NULL) {
- /* PyObject_GetItem sets an exception if it didn't find the thing. */
- PyErr_Clear();
-
- /* A new concurrency object. Make a new data stack. */
- the_index = self->data_stacks_used;
- stack_index = MyInt_FromInt(the_index);
- if (stack_index == NULL) {
- goto error;
- }
- if (PyObject_SetItem(self->data_stack_index, co_obj, stack_index) < 0) {
- goto error;
- }
- self->data_stacks_used++;
- if (self->data_stacks_used >= self->data_stacks_alloc) {
- int bigger = self->data_stacks_alloc + 10;
- DataStack * bigger_stacks = PyMem_Realloc(self->data_stacks, bigger * sizeof(DataStack));
- if (bigger_stacks == NULL) {
- PyErr_NoMemory();
- goto error;
- }
- self->data_stacks = bigger_stacks;
- self->data_stacks_alloc = bigger;
- }
- DataStack_init(&self->stats, &self->data_stacks[the_index]);
- }
- else {
- if (pyint_as_int(stack_index, &the_index) < 0) {
- goto error;
- }
- }
-
- self->pdata_stack = &self->data_stacks[the_index];
- }
- else {
- self->pdata_stack = &self->data_stack;
- }
-
- ret = RET_OK;
-
-error:
-
- Py_XDECREF(co_obj);
- Py_XDECREF(stack_index);
-
- return ret;
-}
-
-/*
- * Parts of the trace function.
- */
-
-static int
-CTracer_check_missing_return(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
-
- if (self->last_exc_back) {
- if (frame == self->last_exc_back) {
- /* Looks like someone forgot to send a return event. We'll clear
- the exception state and do the RETURN code here. Notice that the
- frame we have in hand here is not the correct frame for the RETURN,
- that frame is gone. Our handling for RETURN doesn't need the
- actual frame, but we do log it, so that will look a little off if
- you're looking at the detailed log.
-
- If someday we need to examine the frame when doing RETURN, then
- we'll need to keep more of the missed frame's state.
- */
- STATS( self->stats.missed_returns++; )
- if (CTracer_set_pdata_stack(self) < 0) {
- goto error;
- }
- if (self->pdata_stack->depth >= 0) {
- if (self->tracing_arcs && self->pcur_entry->file_data) {
- if (CTracer_record_pair(self, self->pcur_entry->last_line, -self->last_exc_firstlineno) < 0) {
- goto error;
- }
- }
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), frame->f_code->co_filename, "missedreturn");
- self->pdata_stack->depth--;
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
- }
- }
- self->last_exc_back = NULL;
- }
-
- ret = RET_OK;
-
-error:
-
- return ret;
-}
-
-static int
-CTracer_handle_call(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
- int ret2;
-
- /* Owned references that we clean up at the very end of the function. */
- PyObject * disposition = NULL;
- PyObject * plugin = NULL;
- PyObject * plugin_name = NULL;
- PyObject * next_tracename = NULL;
-
- /* Borrowed references. */
- PyObject * filename = NULL;
- PyObject * disp_trace = NULL;
- PyObject * tracename = NULL;
- PyObject * file_tracer = NULL;
- PyObject * has_dynamic_filename = NULL;
-
- CFileDisposition * pdisp = NULL;
-
- STATS( self->stats.calls++; )
-
- /* Grow the stack. */
- if (CTracer_set_pdata_stack(self) < 0) {
- goto error;
- }
- if (DataStack_grow(&self->stats, self->pdata_stack) < 0) {
- goto error;
- }
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
-
- /* See if this frame begins a new context. */
- if (self->should_start_context != Py_None && self->context == Py_None) {
- PyObject * context;
- /* We're looking for our context, ask should_start_context if this is the start. */
- STATS( self->stats.start_context_calls++; )
- STATS( self->stats.pycalls++; )
- context = PyObject_CallFunctionObjArgs(self->should_start_context, frame, NULL);
- if (context == NULL) {
- goto error;
- }
- if (context != Py_None) {
- PyObject * val;
- Py_DECREF(self->context);
- self->context = context;
- self->pcur_entry->started_context = TRUE;
- STATS( self->stats.pycalls++; )
- val = PyObject_CallFunctionObjArgs(self->switch_context, context, NULL);
- if (val == NULL) {
- goto error;
- }
- Py_DECREF(val);
- }
- else {
- Py_DECREF(context);
- self->pcur_entry->started_context = FALSE;
- }
- }
- else {
- self->pcur_entry->started_context = FALSE;
- }
-
- /* Check if we should trace this line. */
- filename = frame->f_code->co_filename;
- disposition = PyDict_GetItem(self->should_trace_cache, filename);
- if (disposition == NULL) {
- if (PyErr_Occurred()) {
- goto error;
- }
- STATS( self->stats.files++; )
-
- /* We've never considered this file before. */
- /* Ask should_trace about it. */
- STATS( self->stats.pycalls++; )
- disposition = PyObject_CallFunctionObjArgs(self->should_trace, filename, frame, NULL);
- if (disposition == NULL) {
- /* An error occurred inside should_trace. */
- goto error;
- }
- if (PyDict_SetItem(self->should_trace_cache, filename, disposition) < 0) {
- goto error;
- }
- }
- else {
- Py_INCREF(disposition);
- }
-
- if (disposition == Py_None) {
- /* A later check_include returned false, so don't trace it. */
- disp_trace = Py_False;
- }
- else {
- /* The object we got is a CFileDisposition, use it efficiently. */
- pdisp = (CFileDisposition *) disposition;
- disp_trace = pdisp->trace;
- if (disp_trace == NULL) {
- goto error;
- }
- }
-
- if (disp_trace == Py_True) {
- /* If tracename is a string, then we're supposed to trace. */
- tracename = pdisp->source_filename;
- if (tracename == NULL) {
- goto error;
- }
- file_tracer = pdisp->file_tracer;
- if (file_tracer == NULL) {
- goto error;
- }
- if (file_tracer != Py_None) {
- plugin = PyObject_GetAttr(file_tracer, str__coverage_plugin);
- if (plugin == NULL) {
- goto error;
- }
- plugin_name = PyObject_GetAttr(plugin, str__coverage_plugin_name);
- if (plugin_name == NULL) {
- goto error;
- }
- }
- has_dynamic_filename = pdisp->has_dynamic_filename;
- if (has_dynamic_filename == NULL) {
- goto error;
- }
- if (has_dynamic_filename == Py_True) {
- STATS( self->stats.pycalls++; )
- next_tracename = PyObject_CallMethodObjArgs(
- file_tracer, str_dynamic_source_filename,
- tracename, frame, NULL
- );
- if (next_tracename == NULL) {
- /* An exception from the function. Alert the user with a
- * warning and a traceback.
- */
- CTracer_disable_plugin(self, disposition);
- /* Because we handled the error, goto ok. */
- goto ok;
- }
- tracename = next_tracename;
-
- if (tracename != Py_None) {
- /* Check the dynamic source filename against the include rules. */
- PyObject * included = NULL;
- int should_include;
- included = PyDict_GetItem(self->should_trace_cache, tracename);
- if (included == NULL) {
- PyObject * should_include_bool;
- if (PyErr_Occurred()) {
- goto error;
- }
- STATS( self->stats.files++; )
- STATS( self->stats.pycalls++; )
- should_include_bool = PyObject_CallFunctionObjArgs(self->check_include, tracename, frame, NULL);
- if (should_include_bool == NULL) {
- goto error;
- }
- should_include = (should_include_bool == Py_True);
- Py_DECREF(should_include_bool);
- if (PyDict_SetItem(self->should_trace_cache, tracename, should_include ? disposition : Py_None) < 0) {
- goto error;
- }
- }
- else {
- should_include = (included != Py_None);
- }
- if (!should_include) {
- tracename = Py_None;
- }
- }
- }
- }
- else {
- tracename = Py_None;
- }
-
- if (tracename != Py_None) {
- PyObject * file_data = PyDict_GetItem(self->data, tracename);
-
- if (file_data == NULL) {
- if (PyErr_Occurred()) {
- goto error;
- }
- file_data = PyDict_New();
- if (file_data == NULL) {
- goto error;
- }
- ret2 = PyDict_SetItem(self->data, tracename, file_data);
- if (ret2 < 0) {
- goto error;
- }
-
- /* If the disposition mentions a plugin, record that. */
- if (file_tracer != Py_None) {
- ret2 = PyDict_SetItem(self->file_tracers, tracename, plugin_name);
- if (ret2 < 0) {
- goto error;
- }
- }
- }
- else {
- /* PyDict_GetItem gives a borrowed reference. Own it. */
- Py_INCREF(file_data);
- }
-
- Py_XDECREF(self->pcur_entry->file_data);
- self->pcur_entry->file_data = file_data;
- self->pcur_entry->file_tracer = file_tracer;
-
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), filename, "traced");
- }
- else {
- Py_XDECREF(self->pcur_entry->file_data);
- self->pcur_entry->file_data = NULL;
- self->pcur_entry->file_tracer = Py_None;
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), filename, "skipped");
- }
-
- self->pcur_entry->disposition = disposition;
-
- /* Make the frame right in case settrace(gettrace()) happens. */
- Py_INCREF(self);
- My_XSETREF(frame->f_trace, (PyObject*)self);
-
- /* A call event is really a "start frame" event, and can happen for
- * re-entering a generator also. f_lasti is -1 for a true call, and a
- * real byte offset for a generator re-entry.
- */
- if (frame->f_lasti < 0) {
- self->pcur_entry->last_line = -frame->f_code->co_firstlineno;
- }
- else {
- self->pcur_entry->last_line = PyFrame_GetLineNumber(frame);
- }
-
-ok:
- ret = RET_OK;
-
-error:
- Py_XDECREF(next_tracename);
- Py_XDECREF(disposition);
- Py_XDECREF(plugin);
- Py_XDECREF(plugin_name);
-
- return ret;
-}
-
-
-static void
-CTracer_disable_plugin(CTracer *self, PyObject * disposition)
-{
- PyObject * ret;
- PyErr_Print();
-
- STATS( self->stats.pycalls++; )
- ret = PyObject_CallFunctionObjArgs(self->disable_plugin, disposition, NULL);
- if (ret == NULL) {
- goto error;
- }
- Py_DECREF(ret);
-
- return;
-
-error:
- /* This function doesn't return a status, so if an error happens, print it,
- * but don't interrupt the flow. */
- /* PySys_WriteStderr is nicer, but is not in the public API. */
- fprintf(stderr, "Error occurred while disabling plug-in:\n");
- PyErr_Print();
-}
-
-
-static int
-CTracer_unpack_pair(CTracer *self, PyObject *pair, int *p_one, int *p_two)
-{
- int ret = RET_ERROR;
- int the_int;
- PyObject * pyint = NULL;
- int index;
-
- if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2) {
- PyErr_SetString(
- PyExc_TypeError,
- "line_number_range must return 2-tuple"
- );
- goto error;
- }
-
- for (index = 0; index < 2; index++) {
- pyint = PyTuple_GetItem(pair, index);
- if (pyint == NULL) {
- goto error;
- }
- if (pyint_as_int(pyint, &the_int) < 0) {
- goto error;
- }
- *(index == 0 ? p_one : p_two) = the_int;
- }
-
- ret = RET_OK;
-
-error:
- return ret;
-}
-
-static int
-CTracer_handle_line(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
- int ret2;
-
- STATS( self->stats.lines++; )
- if (self->pdata_stack->depth >= 0) {
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), frame->f_code->co_filename, "line");
- if (self->pcur_entry->file_data) {
- int lineno_from = -1;
- int lineno_to = -1;
-
- /* We're tracing in this frame: record something. */
- if (self->pcur_entry->file_tracer != Py_None) {
- PyObject * from_to = NULL;
- STATS( self->stats.pycalls++; )
- from_to = PyObject_CallMethodObjArgs(self->pcur_entry->file_tracer, str_line_number_range, frame, NULL);
- if (from_to == NULL) {
- CTracer_disable_plugin(self, self->pcur_entry->disposition);
- goto ok;
- }
- ret2 = CTracer_unpack_pair(self, from_to, &lineno_from, &lineno_to);
- Py_DECREF(from_to);
- if (ret2 < 0) {
- CTracer_disable_plugin(self, self->pcur_entry->disposition);
- goto ok;
- }
- }
- else {
- lineno_from = lineno_to = PyFrame_GetLineNumber(frame);
- }
-
- if (lineno_from != -1) {
- for (; lineno_from <= lineno_to; lineno_from++) {
- if (self->tracing_arcs) {
- /* Tracing arcs: key is (last_line,this_line). */
- if (CTracer_record_pair(self, self->pcur_entry->last_line, lineno_from) < 0) {
- goto error;
- }
- }
- else {
- /* Tracing lines: key is simply this_line. */
- PyObject * this_line = MyInt_FromInt(lineno_from);
- if (this_line == NULL) {
- goto error;
- }
-
- ret2 = PyDict_SetItem(self->pcur_entry->file_data, this_line, Py_None);
- Py_DECREF(this_line);
- if (ret2 < 0) {
- goto error;
- }
- }
-
- self->pcur_entry->last_line = lineno_from;
- }
- }
- }
- }
-
-ok:
- ret = RET_OK;
-
-error:
-
- return ret;
-}
-
-static int
-CTracer_handle_return(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
-
- STATS( self->stats.returns++; )
- /* A near-copy of this code is above in the missing-return handler. */
- if (CTracer_set_pdata_stack(self) < 0) {
- goto error;
- }
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
-
- if (self->pdata_stack->depth >= 0) {
- if (self->tracing_arcs && self->pcur_entry->file_data) {
- /* Need to distinguish between RETURN_VALUE and YIELD_VALUE. Read
- * the current bytecode to see what it is. In unusual circumstances
- * (Cython code), co_code can be the empty string, so range-check
- * f_lasti before reading the byte.
- */
- int bytecode = RETURN_VALUE;
- PyObject * pCode = frame->f_code->co_code;
- int lasti = frame->f_lasti;
-
- if (lasti < MyBytes_GET_SIZE(pCode)) {
- bytecode = MyBytes_AS_STRING(pCode)[lasti];
- }
- if (bytecode != YIELD_VALUE) {
- int first = frame->f_code->co_firstlineno;
- if (CTracer_record_pair(self, self->pcur_entry->last_line, -first) < 0) {
- goto error;
- }
- }
- }
-
- /* If this frame started a context, then returning from it ends the context. */
- if (self->pcur_entry->started_context) {
- PyObject * val;
- Py_DECREF(self->context);
- self->context = Py_None;
- Py_INCREF(self->context);
- STATS( self->stats.pycalls++; )
-
- val = PyObject_CallFunctionObjArgs(self->switch_context, self->context, NULL);
- if (val == NULL) {
- goto error;
- }
- Py_DECREF(val);
- }
-
- /* Pop the stack. */
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), frame->f_code->co_filename, "return");
- self->pdata_stack->depth--;
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
- }
-
- ret = RET_OK;
-
-error:
-
- return ret;
-}
-
-static int
-CTracer_handle_exception(CTracer *self, PyFrameObject *frame)
-{
- /* Some code (Python 2.3, and pyexpat anywhere) fires an exception event
- without a return event. To detect that, we'll keep a copy of the
- parent frame for an exception event. If the next event is in that
- frame, then we must have returned without a return event. We can
- synthesize the missing event then.
-
- Python itself fixed this problem in 2.4. Pyexpat still has the bug.
- I've reported the problem with pyexpat as http://bugs.python.org/issue6359 .
- If it gets fixed, this code should still work properly. Maybe some day
- the bug will be fixed everywhere coverage.py is supported, and we can
- remove this missing-return detection.
-
- More about this fix: https://nedbatchelder.com/blog/200907/a_nasty_little_bug.html
- */
- STATS( self->stats.exceptions++; )
- self->last_exc_back = frame->f_back;
- self->last_exc_firstlineno = frame->f_code->co_firstlineno;
-
- return RET_OK;
-}
-
-/*
- * The Trace Function
- */
-static int
-CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused)
-{
- int ret = RET_ERROR;
-
- #if DO_NOTHING
- return RET_OK;
- #endif
-
- if (!self->started) {
- /* If CTracer.stop() has been called from another thread, the tracer
- is still active in the current thread. Let's deactivate ourselves
- now. */
- PyEval_SetTrace(NULL, NULL);
- return RET_OK;
- }
-
- #if WHAT_LOG || TRACE_LOG
- PyObject * ascii = NULL;
- #endif
-
- #if WHAT_LOG
- if (what <= (int)(sizeof(what_sym)/sizeof(const char *))) {
- ascii = MyText_AS_BYTES(frame->f_code->co_filename);
- printf("trace: %s @ %s %d\n", what_sym[what], MyBytes_AS_STRING(ascii), PyFrame_GetLineNumber(frame));
- Py_DECREF(ascii);
- }
- #endif
-
- #if TRACE_LOG
- ascii = MyText_AS_BYTES(frame->f_code->co_filename);
- if (strstr(MyBytes_AS_STRING(ascii), start_file) && PyFrame_GetLineNumber(frame) == start_line) {
- logging = TRUE;
- }
- Py_DECREF(ascii);
- #endif
-
- /* See below for details on missing-return detection. */
- if (CTracer_check_missing_return(self, frame) < 0) {
- goto error;
- }
-
- self->activity = TRUE;
-
- switch (what) {
- case PyTrace_CALL:
- if (CTracer_handle_call(self, frame) < 0) {
- goto error;
- }
- break;
-
- case PyTrace_RETURN:
- if (CTracer_handle_return(self, frame) < 0) {
- goto error;
- }
- break;
-
- case PyTrace_LINE:
- if (CTracer_handle_line(self, frame) < 0) {
- goto error;
- }
- break;
-
- case PyTrace_EXCEPTION:
- if (CTracer_handle_exception(self, frame) < 0) {
- goto error;
- }
- break;
-
- default:
- STATS( self->stats.others++; )
- break;
- }
-
- ret = RET_OK;
- goto cleanup;
-
-error:
- STATS( self->stats.errors++; )
-
-cleanup:
- return ret;
-}
-
-
-/*
- * Python has two ways to set the trace function: sys.settrace(fn), which
- * takes a Python callable, and PyEval_SetTrace(func, obj), which takes
- * a C function and a Python object. The way these work together is that
- * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the
- * Python callable as the object in PyEval_SetTrace. So sys.gettrace()
- * simply returns the Python object used as the second argument to
- * PyEval_SetTrace. So sys.gettrace() will return our self parameter, which
- * means it must be callable to be used in sys.settrace().
- *
- * So we make ourself callable, equivalent to invoking our trace function.
- *
- * To help with the process of replaying stored frames, this function has an
- * optional keyword argument:
- *
- * def CTracer_call(frame, event, arg, lineno=0)
- *
- * If provided, the lineno argument is used as the line number, and the
- * frame's f_lineno member is ignored.
- */
-static PyObject *
-CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
-{
- PyFrameObject *frame;
- PyObject *what_str;
- PyObject *arg;
- int lineno = 0;
- int what;
- int orig_lineno;
- PyObject *ret = NULL;
- PyObject * ascii = NULL;
-
- #if DO_NOTHING
- CRASH
- #endif
-
- static char *what_names[] = {
- "call", "exception", "line", "return",
- "c_call", "c_exception", "c_return",
- NULL
- };
-
- static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist,
- &PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) {
- goto done;
- }
-
- /* In Python, the what argument is a string, we need to find an int
- for the C function. */
- for (what = 0; what_names[what]; what++) {
- int should_break;
- ascii = MyText_AS_BYTES(what_str);
- should_break = !strcmp(MyBytes_AS_STRING(ascii), what_names[what]);
- Py_DECREF(ascii);
- if (should_break) {
- break;
- }
- }
-
- #if WHAT_LOG
- ascii = MyText_AS_BYTES(frame->f_code->co_filename);
- printf("pytrace: %s @ %s %d\n", what_sym[what], MyBytes_AS_STRING(ascii), PyFrame_GetLineNumber(frame));
- Py_DECREF(ascii);
- #endif
-
- /* Save off the frame's lineno, and use the forced one, if provided. */
- orig_lineno = frame->f_lineno;
- if (lineno > 0) {
- frame->f_lineno = lineno;
- }
-
- /* Invoke the C function, and return ourselves. */
- if (CTracer_trace(self, frame, what, arg) == RET_OK) {
- Py_INCREF(self);
- ret = (PyObject *)self;
- }
-
- /* Clean up. */
- frame->f_lineno = orig_lineno;
-
- /* For better speed, install ourselves the C way so that future calls go
- directly to CTracer_trace, without this intermediate function.
-
- Only do this if this is a CALL event, since new trace functions only
- take effect then. If we don't condition it on CALL, then we'll clobber
- the new trace function before it has a chance to get called. To
- understand why, there are three internal values to track: frame.f_trace,
- c_tracefunc, and c_traceobj. They are explained here:
- https://nedbatchelder.com/text/trace-function.html
-
- Without the conditional on PyTrace_CALL, this is what happens:
-
- def func(): # f_trace c_tracefunc c_traceobj
- # -------------- -------------- --------------
- # CTracer CTracer.trace CTracer
- sys.settrace(my_func)
- # CTracer trampoline my_func
- # Now Python calls trampoline(CTracer), which calls this function
- # which calls PyEval_SetTrace below, setting us as the tracer again:
- # CTracer CTracer.trace CTracer
- # and it's as if the settrace never happened.
- */
- if (what == PyTrace_CALL) {
- PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
- }
-
-done:
- return ret;
-}
-
-static PyObject *
-CTracer_start(CTracer *self, PyObject *args_unused)
-{
- PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
- self->started = TRUE;
- self->tracing_arcs = self->trace_arcs && PyObject_IsTrue(self->trace_arcs);
-
- /* start() returns a trace function usable with sys.settrace() */
- Py_INCREF(self);
- return (PyObject *)self;
-}
-
-static PyObject *
-CTracer_stop(CTracer *self, PyObject *args_unused)
-{
- if (self->started) {
- /* Set the started flag only. The actual call to
- PyEval_SetTrace(NULL, NULL) is delegated to the callback
- itself to ensure that it called from the right thread.
- */
- self->started = FALSE;
- }
-
- Py_RETURN_NONE;
-}
-
-static PyObject *
-CTracer_activity(CTracer *self, PyObject *args_unused)
-{
- if (self->activity) {
- Py_RETURN_TRUE;
- }
- else {
- Py_RETURN_FALSE;
- }
-}
-
-static PyObject *
-CTracer_reset_activity(CTracer *self, PyObject *args_unused)
-{
- self->activity = FALSE;
- Py_RETURN_NONE;
-}
-
-static PyObject *
-CTracer_get_stats(CTracer *self, PyObject *args_unused)
-{
-#if COLLECT_STATS
- return Py_BuildValue(
- "{sI,sI,sI,sI,sI,sI,sI,sI,si,sI,sI,sI}",
- "calls", self->stats.calls,
- "lines", self->stats.lines,
- "returns", self->stats.returns,
- "exceptions", self->stats.exceptions,
- "others", self->stats.others,
- "files", self->stats.files,
- "missed_returns", self->stats.missed_returns,
- "stack_reallocs", self->stats.stack_reallocs,
- "stack_alloc", self->pdata_stack->alloc,
- "errors", self->stats.errors,
- "pycalls", self->stats.pycalls,
- "start_context_calls", self->stats.start_context_calls
- );
-#else
- Py_RETURN_NONE;
-#endif /* COLLECT_STATS */
-}
-
-static PyMemberDef
-CTracer_members[] = {
- { "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0,
- PyDoc_STR("Function indicating whether to trace a file.") },
-
- { "check_include", T_OBJECT, offsetof(CTracer, check_include), 0,
- PyDoc_STR("Function indicating whether to include a file.") },
-
- { "warn", T_OBJECT, offsetof(CTracer, warn), 0,
- PyDoc_STR("Function for issuing warnings.") },
-
- { "concur_id_func", T_OBJECT, offsetof(CTracer, concur_id_func), 0,
- PyDoc_STR("Function for determining concurrency context") },
-
- { "data", T_OBJECT, offsetof(CTracer, data), 0,
- PyDoc_STR("The raw dictionary of trace data.") },
-
- { "file_tracers", T_OBJECT, offsetof(CTracer, file_tracers), 0,
- PyDoc_STR("Mapping from file name to plugin name.") },
-
- { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
- PyDoc_STR("Dictionary caching should_trace results.") },
-
- { "trace_arcs", T_OBJECT, offsetof(CTracer, trace_arcs), 0,
- PyDoc_STR("Should we trace arcs, or just lines?") },
-
- { "should_start_context", T_OBJECT, offsetof(CTracer, should_start_context), 0,
- PyDoc_STR("Function for starting contexts.") },
-
- { "switch_context", T_OBJECT, offsetof(CTracer, switch_context), 0,
- PyDoc_STR("Function for switching to a new context.") },
-
- { "disable_plugin", T_OBJECT, offsetof(CTracer, disable_plugin), 0,
- PyDoc_STR("Function for disabling a plugin.") },
-
- { NULL }
-};
-
-static PyMethodDef
-CTracer_methods[] = {
- { "start", (PyCFunction) CTracer_start, METH_VARARGS,
- PyDoc_STR("Start the tracer") },
-
- { "stop", (PyCFunction) CTracer_stop, METH_VARARGS,
- PyDoc_STR("Stop the tracer") },
-
- { "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS,
- PyDoc_STR("Get statistics about the tracing") },
-
- { "activity", (PyCFunction) CTracer_activity, METH_VARARGS,
- PyDoc_STR("Has there been any activity?") },
-
- { "reset_activity", (PyCFunction) CTracer_reset_activity, METH_VARARGS,
- PyDoc_STR("Reset the activity flag") },
-
- { NULL }
-};
-
-PyTypeObject
-CTracerType = {
- MyType_HEAD_INIT
- "coverage.CTracer", /*tp_name*/
- sizeof(CTracer), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)CTracer_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- (ternaryfunc)CTracer_call, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "CTracer objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- CTracer_methods, /* tp_methods */
- CTracer_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)CTracer_init, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
diff --git a/contrib/python/coverage/py2/coverage/ctracer/tracer.h b/contrib/python/coverage/py2/coverage/ctracer/tracer.h
deleted file mode 100644
index 8994a9e3d6..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/tracer.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_TRACER_H
-#define _COVERAGE_TRACER_H
-
-#include "util.h"
-#include "structmember.h"
-#include "frameobject.h"
-#include "opcode.h"
-
-#include "datastack.h"
-
-/* The CTracer type. */
-
-typedef struct CTracer {
- PyObject_HEAD
-
- /* Python objects manipulated directly by the Collector class. */
- PyObject * should_trace;
- PyObject * check_include;
- PyObject * warn;
- PyObject * concur_id_func;
- PyObject * data;
- PyObject * file_tracers;
- PyObject * should_trace_cache;
- PyObject * trace_arcs;
- PyObject * should_start_context;
- PyObject * switch_context;
- PyObject * disable_plugin;
-
- /* Has the tracer been started? */
- BOOL started;
- /* Are we tracing arcs, or just lines? */
- BOOL tracing_arcs;
- /* Have we had any activity? */
- BOOL activity;
- /* The current dynamic context. */
- PyObject * context;
-
- /*
- The data stack is a stack of dictionaries. Each dictionary collects
- data for a single source file. The data stack parallels the call stack:
- each call pushes the new frame's file data onto the data stack, and each
- return pops file data off.
-
- The file data is a dictionary whose form depends on the tracing options.
- If tracing arcs, the keys are line number pairs. If not tracing arcs,
- the keys are line numbers. In both cases, the value is irrelevant
- (None).
- */
-
- DataStack data_stack; /* Used if we aren't doing concurrency. */
-
- PyObject * data_stack_index; /* Used if we are doing concurrency. */
- DataStack * data_stacks;
- int data_stacks_alloc;
- int data_stacks_used;
- DataStack * pdata_stack;
-
- /* The current file's data stack entry. */
- DataStackEntry * pcur_entry;
-
- /* The parent frame for the last exception event, to fix missing returns. */
- PyFrameObject * last_exc_back;
- int last_exc_firstlineno;
-
- Stats stats;
-} CTracer;
-
-int CTracer_intern_strings(void);
-
-extern PyTypeObject CTracerType;
-
-#endif /* _COVERAGE_TRACER_H */
diff --git a/contrib/python/coverage/py2/coverage/ctracer/util.h b/contrib/python/coverage/py2/coverage/ctracer/util.h
deleted file mode 100644
index 5cba9b3096..0000000000
--- a/contrib/python/coverage/py2/coverage/ctracer/util.h
+++ /dev/null
@@ -1,67 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_UTIL_H
-#define _COVERAGE_UTIL_H
-
-#include <Python.h>
-
-/* Compile-time debugging helpers */
-#undef WHAT_LOG /* Define to log the WHAT params in the trace function. */
-#undef TRACE_LOG /* Define to log our bookkeeping. */
-#undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */
-#undef DO_NOTHING /* Define this to make the tracer do nothing. */
-
-/* Py 2.x and 3.x compatibility */
-
-#if PY_MAJOR_VERSION >= 3
-
-#define MyText_Type PyUnicode_Type
-#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o)
-#define MyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
-#define MyBytes_AS_STRING(o) PyBytes_AS_STRING(o)
-#define MyText_AsString(o) PyUnicode_AsUTF8(o)
-#define MyText_FromFormat PyUnicode_FromFormat
-#define MyInt_FromInt(i) PyLong_FromLong((long)i)
-#define MyInt_AsInt(o) (int)PyLong_AsLong(o)
-#define MyText_InternFromString(s) PyUnicode_InternFromString(s)
-
-#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
-
-#else
-
-#define MyText_Type PyString_Type
-#define MyText_AS_BYTES(o) (Py_INCREF(o), o)
-#define MyBytes_GET_SIZE(o) PyString_GET_SIZE(o)
-#define MyBytes_AS_STRING(o) PyString_AS_STRING(o)
-#define MyText_AsString(o) PyString_AsString(o)
-#define MyText_FromFormat PyUnicode_FromFormat
-#define MyInt_FromInt(i) PyInt_FromLong((long)i)
-#define MyInt_AsInt(o) (int)PyInt_AsLong(o)
-#define MyText_InternFromString(s) PyString_InternFromString(s)
-
-#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0,
-
-#endif /* Py3k */
-
-// Undocumented, and not in all 2.7.x, so our own copy of it.
-#define My_XSETREF(op, op2) \
- do { \
- PyObject *_py_tmp = (PyObject *)(op); \
- (op) = (op2); \
- Py_XDECREF(_py_tmp); \
- } while (0)
-
-/* The values returned to indicate ok or error. */
-#define RET_OK 0
-#define RET_ERROR -1
-
-/* Nicer booleans */
-typedef int BOOL;
-#define FALSE 0
-#define TRUE 1
-
-/* Only for extreme machete-mode debugging! */
-#define CRASH { printf("*** CRASH! ***\n"); *((int*)1) = 1; }
-
-#endif /* _COVERAGE_UTIL_H */
diff --git a/contrib/python/coverage/py2/coverage/data.py b/contrib/python/coverage/py2/coverage/data.py
deleted file mode 100644
index 5dd1dfe3f0..0000000000
--- a/contrib/python/coverage/py2/coverage/data.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Coverage data for coverage.py.
-
-This file had the 4.x JSON data support, which is now gone. This file still
-has storage-agnostic helpers, and is kept to avoid changing too many imports.
-CoverageData is now defined in sqldata.py, and imported here to keep the
-imports working.
-
-"""
-
-import glob
-import os.path
-
-from coverage.misc import CoverageException, file_be_gone
-from coverage.sqldata import CoverageData
-
-
-def line_counts(data, fullpath=False):
- """Return a dict summarizing the line coverage data.
-
- Keys are based on the file names, and values are the number of executed
- lines. If `fullpath` is true, then the keys are the full pathnames of
- the files, otherwise they are the basenames of the files.
-
- Returns a dict mapping file names to counts of lines.
-
- """
- summ = {}
- if fullpath:
- filename_fn = lambda f: f
- else:
- filename_fn = os.path.basename
- for filename in data.measured_files():
- summ[filename_fn(filename)] = len(data.lines(filename))
- return summ
-
-
-def add_data_to_hash(data, filename, hasher):
- """Contribute `filename`'s data to the `hasher`.
-
- `hasher` is a `coverage.misc.Hasher` instance to be updated with
- the file's data. It should only get the results data, not the run
- data.
-
- """
- if data.has_arcs():
- hasher.update(sorted(data.arcs(filename) or []))
- else:
- hasher.update(sorted(data.lines(filename) or []))
- hasher.update(data.file_tracer(filename))
-
-
-def combine_parallel_data(data, aliases=None, data_paths=None, strict=False, keep=False):
- """Combine a number of data files together.
-
- Treat `data.filename` as a file prefix, and combine the data from all
- of the data files starting with that prefix plus a dot.
-
- If `aliases` is provided, it's a `PathAliases` object that is used to
- re-map paths to match the local machine's.
-
- If `data_paths` is provided, it is a list of directories or files to
- combine. Directories are searched for files that start with
- `data.filename` plus dot as a prefix, and those files are combined.
-
- If `data_paths` is not provided, then the directory portion of
- `data.filename` is used as the directory to search for data files.
-
- Unless `keep` is True every data file found and combined is then deleted from disk. If a file
- cannot be read, a warning will be issued, and the file will not be
- deleted.
-
- If `strict` is true, and no files are found to combine, an error is
- raised.
-
- """
- # Because of the os.path.abspath in the constructor, data_dir will
- # never be an empty string.
- data_dir, local = os.path.split(data.base_filename())
- localdot = local + '.*'
-
- data_paths = data_paths or [data_dir]
- files_to_combine = []
- for p in data_paths:
- if os.path.isfile(p):
- files_to_combine.append(os.path.abspath(p))
- elif os.path.isdir(p):
- pattern = os.path.join(os.path.abspath(p), localdot)
- files_to_combine.extend(glob.glob(pattern))
- else:
- raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
-
- if strict and not files_to_combine:
- raise CoverageException("No data to combine")
-
- files_combined = 0
- for f in files_to_combine:
- if f == data.data_filename():
- # Sometimes we are combining into a file which is one of the
- # parallel files. Skip that file.
- if data._debug.should('dataio'):
- data._debug.write("Skipping combining ourself: %r" % (f,))
- continue
- if data._debug.should('dataio'):
- data._debug.write("Combining data file %r" % (f,))
- try:
- new_data = CoverageData(f, debug=data._debug)
- new_data.read()
- except CoverageException as exc:
- if data._warn:
- # The CoverageException has the file name in it, so just
- # use the message as the warning.
- data._warn(str(exc))
- else:
- data.update(new_data, aliases=aliases)
- files_combined += 1
- if not keep:
- if data._debug.should('dataio'):
- data._debug.write("Deleting combined data file %r" % (f,))
- file_be_gone(f)
-
- if strict and not files_combined:
- raise CoverageException("No usable data files")
diff --git a/contrib/python/coverage/py2/coverage/debug.py b/contrib/python/coverage/py2/coverage/debug.py
deleted file mode 100644
index 194f16f50d..0000000000
--- a/contrib/python/coverage/py2/coverage/debug.py
+++ /dev/null
@@ -1,406 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Control of and utilities for debugging."""
-
-import contextlib
-import functools
-import inspect
-import itertools
-import os
-import pprint
-import sys
-try:
- import _thread
-except ImportError:
- import thread as _thread
-
-from coverage.backward import reprlib, StringIO
-from coverage.misc import isolate_module
-
-os = isolate_module(os)
-
-
-# When debugging, it can be helpful to force some options, especially when
-# debugging the configuration mechanisms you usually use to control debugging!
-# This is a list of forced debugging options.
-FORCED_DEBUG = []
-FORCED_DEBUG_FILE = None
-
-
-class DebugControl(object):
- """Control and output for debugging."""
-
- show_repr_attr = False # For SimpleReprMixin
-
- def __init__(self, options, output):
- """Configure the options and output file for debugging."""
- self.options = list(options) + FORCED_DEBUG
- self.suppress_callers = False
-
- filters = []
- if self.should('pid'):
- filters.append(add_pid_and_tid)
- self.output = DebugOutputFile.get_one(
- output,
- show_process=self.should('process'),
- filters=filters,
- )
- self.raw_output = self.output.outfile
-
- def __repr__(self):
- return "<DebugControl options=%r raw_output=%r>" % (self.options, self.raw_output)
-
- def should(self, option):
- """Decide whether to output debug information in category `option`."""
- if option == "callers" and self.suppress_callers:
- return False
- return (option in self.options)
-
- @contextlib.contextmanager
- def without_callers(self):
- """A context manager to prevent call stacks from being logged."""
- old = self.suppress_callers
- self.suppress_callers = True
- try:
- yield
- finally:
- self.suppress_callers = old
-
- def write(self, msg):
- """Write a line of debug output.
-
- `msg` is the line to write. A newline will be appended.
-
- """
- self.output.write(msg+"\n")
- if self.should('self'):
- caller_self = inspect.stack()[1][0].f_locals.get('self')
- if caller_self is not None:
- self.output.write("self: {!r}\n".format(caller_self))
- if self.should('callers'):
- dump_stack_frames(out=self.output, skip=1)
- self.output.flush()
-
-
-class DebugControlString(DebugControl):
- """A `DebugControl` that writes to a StringIO, for testing."""
- def __init__(self, options):
- super(DebugControlString, self).__init__(options, StringIO())
-
- def get_output(self):
- """Get the output text from the `DebugControl`."""
- return self.raw_output.getvalue()
-
-
-class NoDebugging(object):
- """A replacement for DebugControl that will never try to do anything."""
- def should(self, option): # pylint: disable=unused-argument
- """Should we write debug messages? Never."""
- return False
-
-
-def info_header(label):
- """Make a nice header string."""
- return "--{:-<60s}".format(" "+label+" ")
-
-
-def info_formatter(info):
- """Produce a sequence of formatted lines from info.
-
- `info` is a sequence of pairs (label, data). The produced lines are
- nicely formatted, ready to print.
-
- """
- info = list(info)
- if not info:
- return
- label_len = 30
- assert all(len(l) < label_len for l, _ in info)
- for label, data in info:
- if data == []:
- data = "-none-"
- if isinstance(data, (list, set, tuple)):
- prefix = "%*s:" % (label_len, label)
- for e in data:
- yield "%*s %s" % (label_len+1, prefix, e)
- prefix = ""
- else:
- yield "%*s: %s" % (label_len, label, data)
-
-
-def write_formatted_info(writer, header, info):
- """Write a sequence of (label,data) pairs nicely."""
- writer.write(info_header(header))
- for line in info_formatter(info):
- writer.write(" %s" % line)
-
-
-def short_stack(limit=None, skip=0):
- """Return a string summarizing the call stack.
-
- The string is multi-line, with one line per stack frame. Each line shows
- the function name, the file name, and the line number:
-
- ...
- start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
- import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
- import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
- ...
-
- `limit` is the number of frames to include, defaulting to all of them.
-
- `skip` is the number of frames to skip, so that debugging functions can
- call this and not be included in the result.
-
- """
- stack = inspect.stack()[limit:skip:-1]
- return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack)
-
-
-def dump_stack_frames(limit=None, out=None, skip=0):
- """Print a summary of the stack to stdout, or someplace else."""
- out = out or sys.stdout
- out.write(short_stack(limit=limit, skip=skip+1))
- out.write("\n")
-
-
-def clipped_repr(text, numchars=50):
- """`repr(text)`, but limited to `numchars`."""
- r = reprlib.Repr()
- r.maxstring = numchars
- return r.repr(text)
-
-
-def short_id(id64):
- """Given a 64-bit id, make a shorter 16-bit one."""
- id16 = 0
- for offset in range(0, 64, 16):
- id16 ^= id64 >> offset
- return id16 & 0xFFFF
-
-
-def add_pid_and_tid(text):
- """A filter to add pid and tid to debug messages."""
- # Thread ids are useful, but too long. Make a shorter one.
- tid = "{:04x}".format(short_id(_thread.get_ident()))
- text = "{:5d}.{}: {}".format(os.getpid(), tid, text)
- return text
-
-
-class SimpleReprMixin(object):
- """A mixin implementing a simple __repr__."""
- simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id']
-
- def __repr__(self):
- show_attrs = (
- (k, v) for k, v in self.__dict__.items()
- if getattr(v, "show_repr_attr", True)
- and not callable(v)
- and k not in self.simple_repr_ignore
- )
- return "<{klass} @0x{id:x} {attrs}>".format(
- klass=self.__class__.__name__,
- id=id(self),
- attrs=" ".join("{}={!r}".format(k, v) for k, v in show_attrs),
- )
-
-
-def simplify(v): # pragma: debugging
- """Turn things which are nearly dict/list/etc into dict/list/etc."""
- if isinstance(v, dict):
- return {k:simplify(vv) for k, vv in v.items()}
- elif isinstance(v, (list, tuple)):
- return type(v)(simplify(vv) for vv in v)
- elif hasattr(v, "__dict__"):
- return simplify({'.'+k: v for k, v in v.__dict__.items()})
- else:
- return v
-
-
-def pp(v): # pragma: debugging
- """Debug helper to pretty-print data, including SimpleNamespace objects."""
- # Might not be needed in 3.9+
- pprint.pprint(simplify(v))
-
-
-def filter_text(text, filters):
- """Run `text` through a series of filters.
-
- `filters` is a list of functions. Each takes a string and returns a
- string. Each is run in turn.
-
- Returns: the final string that results after all of the filters have
- run.
-
- """
- clean_text = text.rstrip()
- ending = text[len(clean_text):]
- text = clean_text
- for fn in filters:
- lines = []
- for line in text.splitlines():
- lines.extend(fn(line).splitlines())
- text = "\n".join(lines)
- return text + ending
-
-
-class CwdTracker(object): # pragma: debugging
- """A class to add cwd info to debug messages."""
- def __init__(self):
- self.cwd = None
-
- def filter(self, text):
- """Add a cwd message for each new cwd."""
- cwd = os.getcwd()
- if cwd != self.cwd:
- text = "cwd is now {!r}\n".format(cwd) + text
- self.cwd = cwd
- return text
-
-
-class DebugOutputFile(object): # pragma: debugging
- """A file-like object that includes pid and cwd information."""
- def __init__(self, outfile, show_process, filters):
- self.outfile = outfile
- self.show_process = show_process
- self.filters = list(filters)
-
- if self.show_process:
- self.filters.insert(0, CwdTracker().filter)
- self.write("New process: executable: %r\n" % (sys.executable,))
- self.write("New process: cmd: %r\n" % (getattr(sys, 'argv', None),))
- if hasattr(os, 'getppid'):
- self.write("New process: pid: %r, parent pid: %r\n" % (os.getpid(), os.getppid()))
-
- SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one'
-
- @classmethod
- def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False):
- """Get a DebugOutputFile.
-
- If `fileobj` is provided, then a new DebugOutputFile is made with it.
-
- If `fileobj` isn't provided, then a file is chosen
- (COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton
- DebugOutputFile is made.
-
- `show_process` controls whether the debug file adds process-level
- information, and filters is a list of other message filters to apply.
-
- `filters` are the text filters to apply to the stream to annotate with
- pids, etc.
-
- If `interim` is true, then a future `get_one` can replace this one.
-
- """
- if fileobj is not None:
- # Make DebugOutputFile around the fileobj passed.
- return cls(fileobj, show_process, filters)
-
- # Because of the way igor.py deletes and re-imports modules,
- # this class can be defined more than once. But we really want
- # a process-wide singleton. So stash it in sys.modules instead of
- # on a class attribute. Yes, this is aggressively gross.
- the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True))
- if the_one is None or is_interim:
- if fileobj is None:
- debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
- if debug_file_name:
- fileobj = open(debug_file_name, "a")
- else:
- fileobj = sys.stderr
- the_one = cls(fileobj, show_process, filters)
- sys.modules[cls.SYS_MOD_NAME] = (the_one, interim)
- return the_one
-
- def write(self, text):
- """Just like file.write, but filter through all our filters."""
- self.outfile.write(filter_text(text, self.filters))
- self.outfile.flush()
-
- def flush(self):
- """Flush our file."""
- self.outfile.flush()
-
-
-def log(msg, stack=False): # pragma: debugging
- """Write a log message as forcefully as possible."""
- out = DebugOutputFile.get_one(interim=True)
- out.write(msg+"\n")
- if stack:
- dump_stack_frames(out=out, skip=1)
-
-
-def decorate_methods(decorator, butnot=(), private=False): # pragma: debugging
- """A class decorator to apply a decorator to methods."""
- def _decorator(cls):
- for name, meth in inspect.getmembers(cls, inspect.isroutine):
- if name not in cls.__dict__:
- continue
- if name != "__init__":
- if not private and name.startswith("_"):
- continue
- if name in butnot:
- continue
- setattr(cls, name, decorator(meth))
- return cls
- return _decorator
-
-
-def break_in_pudb(func): # pragma: debugging
- """A function decorator to stop in the debugger for each call."""
- @functools.wraps(func)
- def _wrapper(*args, **kwargs):
- import pudb
- sys.stdout = sys.__stdout__
- pudb.set_trace()
- return func(*args, **kwargs)
- return _wrapper
-
-
-OBJ_IDS = itertools.count()
-CALLS = itertools.count()
-OBJ_ID_ATTR = "$coverage.object_id"
-
-def show_calls(show_args=True, show_stack=False, show_return=False): # pragma: debugging
- """A method decorator to debug-log each call to the function."""
- def _decorator(func):
- @functools.wraps(func)
- def _wrapper(self, *args, **kwargs):
- oid = getattr(self, OBJ_ID_ATTR, None)
- if oid is None:
- oid = "{:08d} {:04d}".format(os.getpid(), next(OBJ_IDS))
- setattr(self, OBJ_ID_ATTR, oid)
- extra = ""
- if show_args:
- eargs = ", ".join(map(repr, args))
- ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
- extra += "("
- extra += eargs
- if eargs and ekwargs:
- extra += ", "
- extra += ekwargs
- extra += ")"
- if show_stack:
- extra += " @ "
- extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines())
- callid = next(CALLS)
- msg = "{} {:04d} {}{}\n".format(oid, callid, func.__name__, extra)
- DebugOutputFile.get_one(interim=True).write(msg)
- ret = func(self, *args, **kwargs)
- if show_return:
- msg = "{} {:04d} {} return {!r}\n".format(oid, callid, func.__name__, ret)
- DebugOutputFile.get_one(interim=True).write(msg)
- return ret
- return _wrapper
- return _decorator
-
-
-def _clean_stack_line(s): # pragma: debugging
- """Simplify some paths in a stack trace, for compactness."""
- s = s.strip()
- s = s.replace(os.path.dirname(__file__) + '/', '')
- s = s.replace(os.path.dirname(os.__file__) + '/', '')
- s = s.replace(sys.prefix + '/', '')
- return s
diff --git a/contrib/python/coverage/py2/coverage/disposition.py b/contrib/python/coverage/py2/coverage/disposition.py
deleted file mode 100644
index 9b9a997d8a..0000000000
--- a/contrib/python/coverage/py2/coverage/disposition.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Simple value objects for tracking what to do with files."""
-
-
-class FileDisposition(object):
- """A simple value type for recording what to do with a file."""
- pass
-
-
-# FileDisposition "methods": FileDisposition is a pure value object, so it can
-# be implemented in either C or Python. Acting on them is done with these
-# functions.
-
-def disposition_init(cls, original_filename):
- """Construct and initialize a new FileDisposition object."""
- disp = cls()
- disp.original_filename = original_filename
- disp.canonical_filename = original_filename
- disp.source_filename = None
- disp.trace = False
- disp.reason = ""
- disp.file_tracer = None
- disp.has_dynamic_filename = False
- return disp
-
-
-def disposition_debug_msg(disp):
- """Make a nice debug message of what the FileDisposition is doing."""
- if disp.trace:
- msg = "Tracing %r" % (disp.original_filename,)
- if disp.file_tracer:
- msg += ": will be traced by %r" % disp.file_tracer
- else:
- msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
- return msg
diff --git a/contrib/python/coverage/py2/coverage/env.py b/contrib/python/coverage/py2/coverage/env.py
deleted file mode 100644
index ea78a5be89..0000000000
--- a/contrib/python/coverage/py2/coverage/env.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Determine facts about the environment."""
-
-import os
-import platform
-import sys
-
-# Operating systems.
-WINDOWS = sys.platform == "win32"
-LINUX = sys.platform.startswith("linux")
-
-# Python implementations.
-CPYTHON = (platform.python_implementation() == "CPython")
-PYPY = (platform.python_implementation() == "PyPy")
-JYTHON = (platform.python_implementation() == "Jython")
-IRONPYTHON = (platform.python_implementation() == "IronPython")
-
-# Python versions. We amend version_info with one more value, a zero if an
-# official version, or 1 if built from source beyond an official version.
-PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),)
-PY2 = PYVERSION < (3, 0)
-PY3 = PYVERSION >= (3, 0)
-
-if PYPY:
- PYPYVERSION = sys.pypy_version_info
-
-PYPY2 = PYPY and PY2
-PYPY3 = PYPY and PY3
-
-# Python behavior.
-class PYBEHAVIOR(object):
- """Flags indicating this Python's behavior."""
-
- pep626 = CPYTHON and (PYVERSION > (3, 10, 0, 'alpha', 4))
-
- # Is "if __debug__" optimized away?
- if PYPY3:
- optimize_if_debug = True
- elif PYPY2:
- optimize_if_debug = False
- else:
- optimize_if_debug = not pep626
-
- # Is "if not __debug__" optimized away?
- optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4))
- if pep626:
- optimize_if_not_debug = False
- if PYPY3:
- optimize_if_not_debug = True
-
- # Is "if not __debug__" optimized away even better?
- optimize_if_not_debug2 = (not PYPY) and (PYVERSION >= (3, 8, 0, 'beta', 1))
- if pep626:
- optimize_if_not_debug2 = False
-
- # Do we have yield-from?
- yield_from = (PYVERSION >= (3, 3))
-
- # Do we have PEP 420 namespace packages?
- namespaces_pep420 = (PYVERSION >= (3, 3))
-
- # Do .pyc files have the source file size recorded in them?
- size_in_pyc = (PYVERSION >= (3, 3))
-
- # Do we have async and await syntax?
- async_syntax = (PYVERSION >= (3, 5))
-
- # PEP 448 defined additional unpacking generalizations
- unpackings_pep448 = (PYVERSION >= (3, 5))
-
- # Can co_lnotab have negative deltas?
- negative_lnotab = (PYVERSION >= (3, 6)) and not (PYPY and PYPYVERSION < (7, 2))
-
- # Do .pyc files conform to PEP 552? Hash-based pyc's.
- hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4))
-
- # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It
- # used to be an empty string (meaning the current directory). It changed
- # to be the actual path to the current directory, so that os.chdir wouldn't
- # affect the outcome.
- actual_syspath0_dash_m = CPYTHON and (PYVERSION >= (3, 7, 0, 'beta', 3))
-
- # 3.7 changed how functions with only docstrings are numbered.
- docstring_only_function = (not PYPY) and ((3, 7, 0, 'beta', 5) <= PYVERSION <= (3, 10))
-
- # When a break/continue/return statement in a try block jumps to a finally
- # block, does the finally block do the break/continue/return (pre-3.8), or
- # does the finally jump back to the break/continue/return (3.8) to do the
- # work?
- finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10))
-
- # When a function is decorated, does the trace function get called for the
- # @-line and also the def-line (new behavior in 3.8)? Or just the @-line
- # (old behavior)?
- trace_decorated_def = (PYVERSION >= (3, 8))
-
- # Are while-true loops optimized into absolute jumps with no loop setup?
- nix_while_true = (PYVERSION >= (3, 8))
-
- # Python 3.9a1 made sys.argv[0] and other reported files absolute paths.
- report_absolute_files = (PYVERSION >= (3, 9))
-
- # Lines after break/continue/return/raise are no longer compiled into the
- # bytecode. They used to be marked as missing, now they aren't executable.
- omit_after_jump = pep626
-
- # PyPy has always omitted statements after return.
- omit_after_return = omit_after_jump or PYPY
-
- # Modules used to have firstlineno equal to the line number of the first
- # real line of code. Now they always start at 1.
- module_firstline_1 = pep626
-
- # Are "if 0:" lines (and similar) kept in the compiled code?
- keep_constant_test = pep626
-
-# Coverage.py specifics.
-
-# Are we using the C-implemented trace function?
-C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
-
-# Are we coverage-measuring ourselves?
-METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
-
-# Are we running our test suite?
-# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
-# test-specific behavior like contracts.
-TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
diff --git a/contrib/python/coverage/py2/coverage/execfile.py b/contrib/python/coverage/py2/coverage/execfile.py
deleted file mode 100644
index 29409d517a..0000000000
--- a/contrib/python/coverage/py2/coverage/execfile.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Execute files of Python code."""
-
-import inspect
-import marshal
-import os
-import struct
-import sys
-import types
-
-from coverage import env
-from coverage.backward import BUILTINS
-from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
-from coverage.files import canonical_filename, python_reported_file
-from coverage.misc import CoverageException, ExceptionDuringRun, NoCode, NoSource, isolate_module
-from coverage.phystokens import compile_unicode
-from coverage.python import get_python_source
-
-os = isolate_module(os)
-
-
-class DummyLoader(object):
- """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
-
- Currently only implements the .fullname attribute
- """
- def __init__(self, fullname, *_args):
- self.fullname = fullname
-
-
-if importlib_util_find_spec:
- def find_module(modulename):
- """Find the module named `modulename`.
-
- Returns the file path of the module, the name of the enclosing
- package, and the spec.
- """
- try:
- spec = importlib_util_find_spec(modulename)
- except ImportError as err:
- raise NoSource(str(err))
- if not spec:
- raise NoSource("No module named %r" % (modulename,))
- pathname = spec.origin
- packagename = spec.name
- if spec.submodule_search_locations:
- mod_main = modulename + ".__main__"
- spec = importlib_util_find_spec(mod_main)
- if not spec:
- raise NoSource(
- "No module named %s; "
- "%r is a package and cannot be directly executed"
- % (mod_main, modulename)
- )
- pathname = spec.origin
- packagename = spec.name
- packagename = packagename.rpartition(".")[0]
- return pathname, packagename, spec
-else:
- def find_module(modulename):
- """Find the module named `modulename`.
-
- Returns the file path of the module, the name of the enclosing
- package, and None (where a spec would have been).
- """
- openfile = None
- glo, loc = globals(), locals()
- try:
- # Search for the module - inside its parent package, if any - using
- # standard import mechanics.
- if '.' in modulename:
- packagename, name = modulename.rsplit('.', 1)
- package = __import__(packagename, glo, loc, ['__path__'])
- searchpath = package.__path__
- else:
- packagename, name = None, modulename
- searchpath = None # "top-level search" in imp.find_module()
- openfile, pathname, _ = imp.find_module(name, searchpath)
-
- # Complain if this is a magic non-file module.
- if openfile is None and pathname is None:
- raise NoSource(
- "module does not live in a file: %r" % modulename
- )
-
- # If `modulename` is actually a package, not a mere module, then we
- # pretend to be Python 2.7 and try running its __main__.py script.
- if openfile is None:
- packagename = modulename
- name = '__main__'
- package = __import__(packagename, glo, loc, ['__path__'])
- searchpath = package.__path__
- openfile, pathname, _ = imp.find_module(name, searchpath)
- except ImportError as err:
- raise NoSource(str(err))
- finally:
- if openfile:
- openfile.close()
-
- return pathname, packagename, None
-
-
-class PyRunner(object):
- """Multi-stage execution of Python code.
-
- This is meant to emulate real Python execution as closely as possible.
-
- """
- def __init__(self, args, as_module=False):
- self.args = args
- self.as_module = as_module
-
- self.arg0 = args[0]
- self.package = self.modulename = self.pathname = self.loader = self.spec = None
-
- def prepare(self):
- """Set sys.path properly.
-
- This needs to happen before any importing, and without importing anything.
- """
- if self.as_module:
- if env.PYBEHAVIOR.actual_syspath0_dash_m:
- path0 = os.getcwd()
- else:
- path0 = ""
- elif os.path.isdir(self.arg0):
- # Running a directory means running the __main__.py file in that
- # directory.
- path0 = self.arg0
- else:
- path0 = os.path.abspath(os.path.dirname(self.arg0))
-
- if os.path.isdir(sys.path[0]):
- # sys.path fakery. If we are being run as a command, then sys.path[0]
- # is the directory of the "coverage" script. If this is so, replace
- # sys.path[0] with the directory of the file we're running, or the
- # current directory when running modules. If it isn't so, then we
- # don't know what's going on, and just leave it alone.
- top_file = inspect.stack()[-1][0].f_code.co_filename
- sys_path_0_abs = os.path.abspath(sys.path[0])
- top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
- sys_path_0_abs = canonical_filename(sys_path_0_abs)
- top_file_dir_abs = canonical_filename(top_file_dir_abs)
- if sys_path_0_abs != top_file_dir_abs:
- path0 = None
-
- else:
- # sys.path[0] is a file. Is the next entry the directory containing
- # that file?
- if sys.path[1] == os.path.dirname(sys.path[0]):
- # Can it be right to always remove that?
- del sys.path[1]
-
- if path0 is not None:
- sys.path[0] = python_reported_file(path0)
-
- def _prepare2(self):
- """Do more preparation to run Python code.
-
- Includes finding the module to run and adjusting sys.argv[0].
- This method is allowed to import code.
-
- """
- if self.as_module:
- self.modulename = self.arg0
- pathname, self.package, self.spec = find_module(self.modulename)
- if self.spec is not None:
- self.modulename = self.spec.name
- self.loader = DummyLoader(self.modulename)
- self.pathname = os.path.abspath(pathname)
- self.args[0] = self.arg0 = self.pathname
- elif os.path.isdir(self.arg0):
- # Running a directory means running the __main__.py file in that
- # directory.
- for ext in [".py", ".pyc", ".pyo"]:
- try_filename = os.path.join(self.arg0, "__main__" + ext)
- if os.path.exists(try_filename):
- self.arg0 = try_filename
- break
- else:
- raise NoSource("Can't find '__main__' module in '%s'" % self.arg0)
-
- if env.PY2:
- self.arg0 = os.path.abspath(self.arg0)
-
- # Make a spec. I don't know if this is the right way to do it.
- try:
- import importlib.machinery
- except ImportError:
- pass
- else:
- try_filename = python_reported_file(try_filename)
- self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename)
- self.spec.has_location = True
- self.package = ""
- self.loader = DummyLoader("__main__")
- else:
- if env.PY3:
- self.loader = DummyLoader("__main__")
-
- self.arg0 = python_reported_file(self.arg0)
-
- def run(self):
- """Run the Python code!"""
-
- self._prepare2()
-
- # Create a module to serve as __main__
- main_mod = types.ModuleType('__main__')
-
- from_pyc = self.arg0.endswith((".pyc", ".pyo"))
- main_mod.__file__ = self.arg0
- if from_pyc:
- main_mod.__file__ = main_mod.__file__[:-1]
- if self.package is not None:
- main_mod.__package__ = self.package
- main_mod.__loader__ = self.loader
- if self.spec is not None:
- main_mod.__spec__ = self.spec
-
- main_mod.__builtins__ = BUILTINS
-
- sys.modules['__main__'] = main_mod
-
- # Set sys.argv properly.
- sys.argv = self.args
-
- try:
- # Make a code object somehow.
- if from_pyc:
- code = make_code_from_pyc(self.arg0)
- else:
- code = make_code_from_py(self.arg0)
- except CoverageException:
- raise
- except Exception as exc:
- msg = "Couldn't run '{filename}' as Python code: {exc.__class__.__name__}: {exc}"
- raise CoverageException(msg.format(filename=self.arg0, exc=exc))
-
- # Execute the code object.
- # Return to the original directory in case the test code exits in
- # a non-existent directory.
- cwd = os.getcwd()
- try:
- exec(code, main_mod.__dict__)
- except SystemExit: # pylint: disable=try-except-raise
- # The user called sys.exit(). Just pass it along to the upper
- # layers, where it will be handled.
- raise
- except Exception:
- # Something went wrong while executing the user code.
- # Get the exc_info, and pack them into an exception that we can
- # throw up to the outer loop. We peel one layer off the traceback
- # so that the coverage.py code doesn't appear in the final printed
- # traceback.
- typ, err, tb = sys.exc_info()
-
- # PyPy3 weirdness. If I don't access __context__, then somehow it
- # is non-None when the exception is reported at the upper layer,
- # and a nested exception is shown to the user. This getattr fixes
- # it somehow? https://bitbucket.org/pypy/pypy/issue/1903
- getattr(err, '__context__', None)
-
- # Call the excepthook.
- try:
- if hasattr(err, "__traceback__"):
- err.__traceback__ = err.__traceback__.tb_next
- sys.excepthook(typ, err, tb.tb_next)
- except SystemExit: # pylint: disable=try-except-raise
- raise
- except Exception:
- # Getting the output right in the case of excepthook
- # shenanigans is kind of involved.
- sys.stderr.write("Error in sys.excepthook:\n")
- typ2, err2, tb2 = sys.exc_info()
- err2.__suppress_context__ = True
- if hasattr(err2, "__traceback__"):
- err2.__traceback__ = err2.__traceback__.tb_next
- sys.__excepthook__(typ2, err2, tb2.tb_next)
- sys.stderr.write("\nOriginal exception was:\n")
- raise ExceptionDuringRun(typ, err, tb.tb_next)
- else:
- sys.exit(1)
- finally:
- os.chdir(cwd)
-
-
-def run_python_module(args):
- """Run a Python module, as though with ``python -m name args...``.
-
- `args` is the argument array to present as sys.argv, including the first
- element naming the module being executed.
-
- This is a helper for tests, to encapsulate how to use PyRunner.
-
- """
- runner = PyRunner(args, as_module=True)
- runner.prepare()
- runner.run()
-
-
-def run_python_file(args):
- """Run a Python file as if it were the main program on the command line.
-
- `args` is the argument array to present as sys.argv, including the first
- element naming the file being executed. `package` is the name of the
- enclosing package, if any.
-
- This is a helper for tests, to encapsulate how to use PyRunner.
-
- """
- runner = PyRunner(args, as_module=False)
- runner.prepare()
- runner.run()
-
-
-def make_code_from_py(filename):
- """Get source from `filename` and make a code object of it."""
- # Open the source file.
- try:
- source = get_python_source(filename)
- except (IOError, NoSource):
- raise NoSource("No file to run: '%s'" % filename)
-
- code = compile_unicode(source, filename, "exec")
- return code
-
-
-def make_code_from_pyc(filename):
- """Get a code object from a .pyc file."""
- try:
- fpyc = open(filename, "rb")
- except IOError:
- raise NoCode("No file to run: '%s'" % filename)
-
- with fpyc:
- # First four bytes are a version-specific magic number. It has to
- # match or we won't run the file.
- magic = fpyc.read(4)
- if magic != PYC_MAGIC_NUMBER:
- raise NoCode("Bad magic number in .pyc file: {} != {}".format(magic, PYC_MAGIC_NUMBER))
-
- date_based = True
- if env.PYBEHAVIOR.hashed_pyc_pep552:
- flags = struct.unpack('<L', fpyc.read(4))[0]
- hash_based = flags & 0x01
- if hash_based:
- fpyc.read(8) # Skip the hash.
- date_based = False
- if date_based:
- # Skip the junk in the header that we don't need.
- fpyc.read(4) # Skip the moddate.
- if env.PYBEHAVIOR.size_in_pyc:
- # 3.3 added another long to the header (size), skip it.
- fpyc.read(4)
-
- # The rest of the file is the code object we want.
- code = marshal.load(fpyc)
-
- return code
diff --git a/contrib/python/coverage/py2/coverage/files.py b/contrib/python/coverage/py2/coverage/files.py
deleted file mode 100644
index 5133ad07f3..0000000000
--- a/contrib/python/coverage/py2/coverage/files.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""File wrangling."""
-
-import hashlib
-import fnmatch
-import ntpath
-import os
-import os.path
-import posixpath
-import re
-import sys
-
-from coverage import env
-from coverage.backward import unicode_class
-from coverage.misc import contract, CoverageException, join_regex, isolate_module
-
-
-os = isolate_module(os)
-
-
-def set_relative_directory():
- """Set the directory that `relative_filename` will be relative to."""
- global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
-
- # The absolute path to our current directory.
- RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
-
- # Cache of results of calling the canonical_filename() method, to
- # avoid duplicating work.
- CANONICAL_FILENAME_CACHE = {}
-
-
-def relative_directory():
- """Return the directory that `relative_filename` is relative to."""
- return RELATIVE_DIR
-
-
-@contract(returns='unicode')
-def relative_filename(filename):
- """Return the relative form of `filename`.
-
- The file name will be relative to the current directory when the
- `set_relative_directory` was called.
-
- """
- fnorm = os.path.normcase(filename)
- if fnorm.startswith(RELATIVE_DIR):
- filename = filename[len(RELATIVE_DIR):]
- return unicode_filename(filename)
-
-
-@contract(returns='unicode')
-def canonical_filename(filename):
- """Return a canonical file name for `filename`.
-
- An absolute path with no redundant components and normalized case.
-
- """
- if filename not in CANONICAL_FILENAME_CACHE:
- cf = filename
- if not os.path.isabs(filename):
- for path in [os.curdir] + sys.path:
- if path is None:
- continue
- f = os.path.join(path, filename)
- try:
- exists = os.path.exists(f)
- except UnicodeError:
- exists = False
- if exists:
- cf = f
- break
- cf = abs_file(cf)
- CANONICAL_FILENAME_CACHE[filename] = cf
- return CANONICAL_FILENAME_CACHE[filename]
-
-if getattr(sys, 'is_standalone_binary', False):
- # filename for py files in binary is already canonical,
- # it's relative to the arcadia root
- def canonical_filename(filename):
- # next assert does not needed in case when we load coverage from not arcadia source in arcadia binary
- # assert not filename.startswith("/"), filename
- return filename
-
-MAX_FLAT = 200
-
-@contract(filename='unicode', returns='unicode')
-def flat_rootname(filename):
- """A base for a flat file name to correspond to this file.
-
- Useful for writing files about the code where you want all the files in
- the same directory, but need to differentiate same-named files from
- different directories.
-
- For example, the file a/b/c.py will return 'a_b_c_py'
-
- """
- name = ntpath.splitdrive(filename)[1]
- name = re.sub(r"[\\/.:]", "_", name)
- if len(name) > MAX_FLAT:
- h = hashlib.sha1(name.encode('UTF-8')).hexdigest()
- name = name[-(MAX_FLAT-len(h)-1):] + '_' + h
- return name
-
-
-if env.WINDOWS:
-
- _ACTUAL_PATH_CACHE = {}
- _ACTUAL_PATH_LIST_CACHE = {}
-
- def actual_path(path):
- """Get the actual path of `path`, including the correct case."""
- if env.PY2 and isinstance(path, unicode_class):
- path = path.encode(sys.getfilesystemencoding())
- if path in _ACTUAL_PATH_CACHE:
- return _ACTUAL_PATH_CACHE[path]
-
- head, tail = os.path.split(path)
- if not tail:
- # This means head is the drive spec: normalize it.
- actpath = head.upper()
- elif not head:
- actpath = tail
- else:
- head = actual_path(head)
- if head in _ACTUAL_PATH_LIST_CACHE:
- files = _ACTUAL_PATH_LIST_CACHE[head]
- else:
- try:
- files = os.listdir(head)
- except Exception:
- # This will raise OSError, or this bizarre TypeError:
- # https://bugs.python.org/issue1776160
- files = []
- _ACTUAL_PATH_LIST_CACHE[head] = files
- normtail = os.path.normcase(tail)
- for f in files:
- if os.path.normcase(f) == normtail:
- tail = f
- break
- actpath = os.path.join(head, tail)
- _ACTUAL_PATH_CACHE[path] = actpath
- return actpath
-
-else:
- def actual_path(filename):
- """The actual path for non-Windows platforms."""
- return filename
-
-
-if env.PY2:
- @contract(returns='unicode')
- def unicode_filename(filename):
- """Return a Unicode version of `filename`."""
- if isinstance(filename, str):
- encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
- filename = filename.decode(encoding, "replace")
- return filename
-else:
- @contract(filename='unicode', returns='unicode')
- def unicode_filename(filename):
- """Return a Unicode version of `filename`."""
- return filename
-
-
-@contract(returns='unicode')
-def abs_file(path):
- """Return the absolute normalized form of `path`."""
- try:
- path = os.path.realpath(path)
- except UnicodeError:
- pass
- path = os.path.abspath(path)
- path = actual_path(path)
- path = unicode_filename(path)
- return path
-
-
-def python_reported_file(filename):
- """Return the string as Python would describe this file name."""
- if env.PYBEHAVIOR.report_absolute_files:
- filename = os.path.abspath(filename)
- return filename
-
-
-RELATIVE_DIR = None
-CANONICAL_FILENAME_CACHE = None
-set_relative_directory()
-
-
-def isabs_anywhere(filename):
- """Is `filename` an absolute path on any OS?"""
- return ntpath.isabs(filename) or posixpath.isabs(filename)
-
-
-def prep_patterns(patterns):
- """Prepare the file patterns for use in a `FnmatchMatcher`.
-
- If a pattern starts with a wildcard, it is used as a pattern
- as-is. If it does not start with a wildcard, then it is made
- absolute with the current directory.
-
- If `patterns` is None, an empty list is returned.
-
- """
- prepped = []
- for p in patterns or []:
- if p.startswith(("*", "?")):
- prepped.append(p)
- else:
- prepped.append(abs_file(p))
- return prepped
-
-
-class TreeMatcher(object):
- """A matcher for files in a tree.
-
- Construct with a list of paths, either files or directories. Paths match
- with the `match` method if they are one of the files, or if they are
- somewhere in a subtree rooted at one of the directories.
-
- """
- def __init__(self, paths):
- self.paths = list(paths)
-
- def __repr__(self):
- return "<TreeMatcher %r>" % self.paths
-
- def info(self):
- """A list of strings for displaying when dumping state."""
- return self.paths
-
- def match(self, fpath):
- """Does `fpath` indicate a file in one of our trees?"""
- for p in self.paths:
- if fpath.startswith(p):
- if fpath == p:
- # This is the same file!
- return True
- if fpath[len(p)] == os.sep:
- # This is a file in the directory
- return True
- return False
-
-
-class ModuleMatcher(object):
- """A matcher for modules in a tree."""
- def __init__(self, module_names):
- self.modules = list(module_names)
-
- def __repr__(self):
- return "<ModuleMatcher %r>" % (self.modules)
-
- def info(self):
- """A list of strings for displaying when dumping state."""
- return self.modules
-
- def match(self, module_name):
- """Does `module_name` indicate a module in one of our packages?"""
- if not module_name:
- return False
-
- for m in self.modules:
- if module_name.startswith(m):
- if module_name == m:
- return True
- if module_name[len(m)] == '.':
- # This is a module in the package
- return True
-
- return False
-
-
-class FnmatchMatcher(object):
- """A matcher for files by file name pattern."""
- def __init__(self, pats):
- self.pats = list(pats)
- self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS)
-
- def __repr__(self):
- return "<FnmatchMatcher %r>" % self.pats
-
- def info(self):
- """A list of strings for displaying when dumping state."""
- return self.pats
-
- def match(self, fpath):
- """Does `fpath` match one of our file name patterns?"""
- return self.re.match(fpath) is not None
-
-
-def sep(s):
- """Find the path separator used in this string, or os.sep if none."""
- sep_match = re.search(r"[\\/]", s)
- if sep_match:
- the_sep = sep_match.group(0)
- else:
- the_sep = os.sep
- return the_sep
-
-
-def fnmatches_to_regex(patterns, case_insensitive=False, partial=False):
- """Convert fnmatch patterns to a compiled regex that matches any of them.
-
- Slashes are always converted to match either slash or backslash, for
- Windows support, even when running elsewhere.
-
- If `partial` is true, then the pattern will match if the target string
- starts with the pattern. Otherwise, it must match the entire string.
-
- Returns: a compiled regex object. Use the .match method to compare target
- strings.
-
- """
- regexes = (fnmatch.translate(pattern) for pattern in patterns)
- # Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/",
- # so we have to deal with maybe a backslash.
- regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes)
-
- if partial:
- # fnmatch always adds a \Z to match the whole string, which we don't
- # want, so we remove the \Z. While removing it, we only replace \Z if
- # followed by paren (introducing flags), or at end, to keep from
- # destroying a literal \Z in the pattern.
- regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes)
-
- flags = 0
- if case_insensitive:
- flags |= re.IGNORECASE
- compiled = re.compile(join_regex(regexes), flags=flags)
-
- return compiled
-
-
-class PathAliases(object):
- """A collection of aliases for paths.
-
- When combining data files from remote machines, often the paths to source
- code are different, for example, due to OS differences, or because of
- serialized checkouts on continuous integration machines.
-
- A `PathAliases` object tracks a list of pattern/result pairs, and can
- map a path through those aliases to produce a unified path.
-
- """
- def __init__(self):
- self.aliases = []
-
- def pprint(self): # pragma: debugging
- """Dump the important parts of the PathAliases, for debugging."""
- for regex, result in self.aliases:
- print("{!r} --> {!r}".format(regex.pattern, result))
-
- def add(self, pattern, result):
- """Add the `pattern`/`result` pair to the list of aliases.
-
- `pattern` is an `fnmatch`-style pattern. `result` is a simple
- string. When mapping paths, if a path starts with a match against
- `pattern`, then that match is replaced with `result`. This models
- isomorphic source trees being rooted at different places on two
- different machines.
-
- `pattern` can't end with a wildcard component, since that would
- match an entire tree, and not just its root.
-
- """
- pattern_sep = sep(pattern)
-
- if len(pattern) > 1:
- pattern = pattern.rstrip(r"\/")
-
- # The pattern can't end with a wildcard component.
- if pattern.endswith("*"):
- raise CoverageException("Pattern must not end with wildcards.")
-
- # The pattern is meant to match a filepath. Let's make it absolute
- # unless it already is, or is meant to match any prefix.
- if not pattern.startswith('*') and not isabs_anywhere(pattern +
- pattern_sep):
- pattern = abs_file(pattern)
- if not pattern.endswith(pattern_sep):
- pattern += pattern_sep
-
- # Make a regex from the pattern.
- regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True)
-
- # Normalize the result: it must end with a path separator.
- result_sep = sep(result)
- result = result.rstrip(r"\/") + result_sep
- self.aliases.append((regex, result))
-
- def map(self, path):
- """Map `path` through the aliases.
-
- `path` is checked against all of the patterns. The first pattern to
- match is used to replace the root of the path with the result root.
- Only one pattern is ever used. If no patterns match, `path` is
- returned unchanged.
-
- The separator style in the result is made to match that of the result
- in the alias.
-
- Returns the mapped path. If a mapping has happened, this is a
- canonical path. If no mapping has happened, it is the original value
- of `path` unchanged.
-
- """
- for regex, result in self.aliases:
- m = regex.match(path)
- if m:
- new = path.replace(m.group(0), result)
- new = new.replace(sep(path), sep(result))
- new = canonical_filename(new)
- return new
- return path
-
-
-def find_python_files(dirname):
- """Yield all of the importable Python files in `dirname`, recursively.
-
- To be importable, the files have to be in a directory with a __init__.py,
- except for `dirname` itself, which isn't required to have one. The
- assumption is that `dirname` was specified directly, so the user knows
- best, but sub-directories are checked for a __init__.py to be sure we only
- find the importable files.
-
- """
- for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
- if i > 0 and '__init__.py' not in filenames:
- # If a directory doesn't have __init__.py, then it isn't
- # importable and neither are its files
- del dirnames[:]
- continue
- for filename in filenames:
- # We're only interested in files that look like reasonable Python
- # files: Must end with .py or .pyw, and must not have certain funny
- # characters that probably mean they are editor junk.
- if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
- yield os.path.join(dirpath, filename)
diff --git a/contrib/python/coverage/py2/coverage/fullcoverage/encodings.py b/contrib/python/coverage/py2/coverage/fullcoverage/encodings.py
deleted file mode 100644
index aeb416e406..0000000000
--- a/contrib/python/coverage/py2/coverage/fullcoverage/encodings.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Imposter encodings module that installs a coverage-style tracer.
-
-This is NOT the encodings module; it is an imposter that sets up tracing
-instrumentation and then replaces itself with the real encodings module.
-
-If the directory that holds this file is placed first in the PYTHONPATH when
-using "coverage" to run Python's tests, then this file will become the very
-first module imported by the internals of Python 3. It installs a
-coverage.py-compatible trace function that can watch Standard Library modules
-execute from the very earliest stages of Python's own boot process. This fixes
-a problem with coverage.py - that it starts too late to trace the coverage of
-many of the most fundamental modules in the Standard Library.
-
-"""
-
-import sys
-
-class FullCoverageTracer(object):
- def __init__(self):
- # `traces` is a list of trace events. Frames are tricky: the same
- # frame object is used for a whole scope, with new line numbers
- # written into it. So in one scope, all the frame objects are the
- # same object, and will eventually all will point to the last line
- # executed. So we keep the line numbers alongside the frames.
- # The list looks like:
- #
- # traces = [
- # ((frame, event, arg), lineno), ...
- # ]
- #
- self.traces = []
-
- def fullcoverage_trace(self, *args):
- frame, event, arg = args
- self.traces.append((args, frame.f_lineno))
- return self.fullcoverage_trace
-
-sys.settrace(FullCoverageTracer().fullcoverage_trace)
-
-# In coverage/files.py is actual_filename(), which uses glob.glob. I don't
-# understand why, but that use of glob borks everything if fullcoverage is in
-# effect. So here we make an ugly hail-mary pass to switch off glob.glob over
-# there. This means when using fullcoverage, Windows path names will not be
-# their actual case.
-
-#sys.fullcoverage = True
-
-# Finally, remove our own directory from sys.path; remove ourselves from
-# sys.modules; and re-import "encodings", which will be the real package
-# this time. Note that the delete from sys.modules dictionary has to
-# happen last, since all of the symbols in this module will become None
-# at that exact moment, including "sys".
-
-parentdir = max(filter(__file__.startswith, sys.path), key=len)
-sys.path.remove(parentdir)
-del sys.modules['encodings']
-import encodings
diff --git a/contrib/python/coverage/py2/coverage/html.py b/contrib/python/coverage/py2/coverage/html.py
deleted file mode 100644
index 9d8e342716..0000000000
--- a/contrib/python/coverage/py2/coverage/html.py
+++ /dev/null
@@ -1,539 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""HTML reporting for coverage.py."""
-
-import datetime
-import json
-import os
-import re
-import shutil
-import sys
-
-import coverage
-from coverage import env
-from coverage.backward import iitems, SimpleNamespace, format_local_datetime
-from coverage.data import add_data_to_hash
-from coverage.files import flat_rootname
-from coverage.misc import CoverageException, ensure_dir, file_be_gone, Hasher, isolate_module
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-from coverage.templite import Templite
-
-os = isolate_module(os)
-
-
-# Static files are looked for in a list of places.
-STATIC_PATH = [
- # The place Debian puts system Javascript libraries.
- "/usr/share/javascript",
-
- # Our htmlfiles directory.
- os.path.join(os.path.dirname(__file__), "htmlfiles"),
-]
-
-
-def data_filename(fname, pkgdir=""):
- """Return the path to a data file of ours.
-
- The file is searched for on `STATIC_PATH`, and the first place it's found,
- is returned.
-
- Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
- is provided, at that sub-directory.
-
- """
- tried = []
- for static_dir in STATIC_PATH:
- static_filename = os.path.join(static_dir, fname)
- if os.path.exists(static_filename):
- return static_filename
- else:
- tried.append(static_filename)
- if pkgdir:
- static_filename = os.path.join(static_dir, pkgdir, fname)
- if os.path.exists(static_filename):
- return static_filename
- else:
- tried.append(static_filename)
- raise CoverageException(
- "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
- )
-
-
-def get_htmlfiles_resource(name):
- import pkgutil
- return pkgutil.get_data(__package__, 'htmlfiles/' + name)
-
-
-def read_data(fname):
- """Return the contents of a data file of ours."""
- if getattr(sys, 'is_standalone_binary', False):
- res_buf = get_htmlfiles_resource(fname).decode()
- if res_buf is not None:
- return res_buf
-
- with open(data_filename(fname)) as data_file:
- return data_file.read()
-
-
-def write_html(fname, html):
- """Write `html` to `fname`, properly encoded."""
- html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
- with open(fname, "wb") as fout:
- fout.write(html.encode('ascii', 'xmlcharrefreplace'))
-
-
-class HtmlDataGeneration(object):
- """Generate structured data to be turned into HTML reports."""
-
- EMPTY = "(empty)"
-
- def __init__(self, cov):
- self.coverage = cov
- self.config = self.coverage.config
- data = self.coverage.get_data()
- self.has_arcs = data.has_arcs()
- if self.config.show_contexts:
- if data.measured_contexts() == {""}:
- self.coverage._warn("No contexts were measured")
- data.set_query_contexts(self.config.report_contexts)
-
- def data_for_file(self, fr, analysis):
- """Produce the data needed for one file's report."""
- if self.has_arcs:
- missing_branch_arcs = analysis.missing_branch_arcs()
- arcs_executed = analysis.arcs_executed()
-
- if self.config.show_contexts:
- contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
-
- lines = []
-
- for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
- # Figure out how to mark this line.
- category = None
- short_annotations = []
- long_annotations = []
-
- if lineno in analysis.excluded:
- category = 'exc'
- elif lineno in analysis.missing:
- category = 'mis'
- elif self.has_arcs and lineno in missing_branch_arcs:
- category = 'par'
- for b in missing_branch_arcs[lineno]:
- if b < 0:
- short_annotations.append("exit")
- else:
- short_annotations.append(b)
- long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
- elif lineno in analysis.statements:
- category = 'run'
-
- contexts = contexts_label = None
- context_list = None
- if category and self.config.show_contexts:
- contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno])
- if contexts == [self.EMPTY]:
- contexts_label = self.EMPTY
- else:
- contexts_label = "{} ctx".format(len(contexts))
- context_list = contexts
-
- lines.append(SimpleNamespace(
- tokens=tokens,
- number=lineno,
- category=category,
- statement=(lineno in analysis.statements),
- contexts=contexts,
- contexts_label=contexts_label,
- context_list=context_list,
- short_annotations=short_annotations,
- long_annotations=long_annotations,
- ))
-
- file_data = SimpleNamespace(
- relative_filename=fr.relative_filename(),
- nums=analysis.numbers,
- lines=lines,
- )
-
- return file_data
-
-
-class HtmlReporter(object):
- """HTML reporting."""
-
- # These files will be copied from the htmlfiles directory to the output
- # directory.
- STATIC_FILES = [
- ("style.css", ""),
- ("jquery.min.js", "jquery"),
- ("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"),
- ("jquery.hotkeys.js", "jquery-hotkeys"),
- ("jquery.isonscreen.js", "jquery-isonscreen"),
- ("jquery.tablesorter.min.js", "jquery-tablesorter"),
- ("coverage_html.js", ""),
- ("keybd_closed.png", ""),
- ("keybd_open.png", ""),
- ("favicon_32.png", ""),
- ]
-
- def __init__(self, cov):
- self.coverage = cov
- self.config = self.coverage.config
- self.directory = self.config.html_dir
-
- self.skip_covered = self.config.html_skip_covered
- if self.skip_covered is None:
- self.skip_covered = self.config.skip_covered
- self.skip_empty = self.config.html_skip_empty
- if self.skip_empty is None:
- self.skip_empty= self.config.skip_empty
-
- title = self.config.html_title
- if env.PY2:
- title = title.decode("utf8")
-
- if self.config.extra_css:
- self.extra_css = os.path.basename(self.config.extra_css)
- else:
- self.extra_css = None
-
- self.data = self.coverage.get_data()
- self.has_arcs = self.data.has_arcs()
-
- self.file_summaries = []
- self.all_files_nums = []
- self.incr = IncrementalChecker(self.directory)
- self.datagen = HtmlDataGeneration(self.coverage)
- self.totals = Numbers()
-
- self.template_globals = {
- # Functions available in the templates.
- 'escape': escape,
- 'pair': pair,
- 'len': len,
-
- # Constants for this report.
- '__url__': coverage.__url__,
- '__version__': coverage.__version__,
- 'title': title,
- 'time_stamp': format_local_datetime(datetime.datetime.now()),
- 'extra_css': self.extra_css,
- 'has_arcs': self.has_arcs,
- 'show_contexts': self.config.show_contexts,
-
- # Constants for all reports.
- # These css classes determine which lines are highlighted by default.
- 'category': {
- 'exc': 'exc show_exc',
- 'mis': 'mis show_mis',
- 'par': 'par run show_par',
- 'run': 'run',
- }
- }
- self.pyfile_html_source = read_data("pyfile.html")
- self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
-
- def report(self, morfs):
- """Generate an HTML report for `morfs`.
-
- `morfs` is a list of modules or file names.
-
- """
- # Read the status data and check that this run used the same
- # global data as the last run.
- self.incr.read()
- self.incr.check_global_data(self.config, self.pyfile_html_source)
-
- # Process all the files.
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.html_file(fr, analysis)
-
- if not self.all_files_nums:
- raise CoverageException("No data to report.")
-
- self.totals = sum(self.all_files_nums)
-
- # Write the index file.
- self.index_file()
-
- self.make_local_static_report_files()
- return self.totals.n_statements and self.totals.pc_covered
-
- def make_local_static_report_files(self):
- """Make local instances of static files for HTML report."""
- # The files we provide must always be copied.
- for static, pkgdir in self.STATIC_FILES:
- if getattr(sys, 'is_standalone_binary', False):
- data = get_htmlfiles_resource(static)
- if data is None:
- raise IOError("No such resource: " + static)
-
- with open(os.path.join(self.directory, static), "wb") as afile:
- afile.write(data)
- else:
- shutil.copyfile(
- data_filename(static, pkgdir),
- os.path.join(self.directory, static)
- )
-
- # The user may have extra CSS they want copied.
- if self.extra_css:
- shutil.copyfile(
- self.config.extra_css,
- os.path.join(self.directory, self.extra_css)
- )
-
- def html_file(self, fr, analysis):
- """Generate an HTML file for one source file."""
- rootname = flat_rootname(fr.relative_filename())
- html_filename = rootname + ".html"
- ensure_dir(self.directory)
- html_path = os.path.join(self.directory, html_filename)
-
- # Get the numbers for this file.
- nums = analysis.numbers
- self.all_files_nums.append(nums)
-
- if self.skip_covered:
- # Don't report on 100% files.
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if no_missing_lines and no_missing_branches:
- # If there's an existing file, remove it.
- file_be_gone(html_path)
- return
-
- if self.skip_empty:
- # Don't report on empty files.
- if nums.n_statements == 0:
- file_be_gone(html_path)
- return
-
- # Find out if the file on disk is already correct.
- if self.incr.can_skip_file(self.data, fr, rootname):
- self.file_summaries.append(self.incr.index_info(rootname))
- return
-
- # Write the HTML page for this file.
- file_data = self.datagen.data_for_file(fr, analysis)
- for ldata in file_data.lines:
- # Build the HTML for the line.
- html = []
- for tok_type, tok_text in ldata.tokens:
- if tok_type == "ws":
- html.append(escape(tok_text))
- else:
- tok_html = escape(tok_text) or '&nbsp;'
- html.append(
- u'<span class="{}">{}</span>'.format(tok_type, tok_html)
- )
- ldata.html = ''.join(html)
-
- if ldata.short_annotations:
- # 202F is NARROW NO-BREAK SPACE.
- # 219B is RIGHTWARDS ARROW WITH STROKE.
- ldata.annotate = u",&nbsp;&nbsp; ".join(
- u"{}&#x202F;&#x219B;&#x202F;{}".format(ldata.number, d)
- for d in ldata.short_annotations
- )
- else:
- ldata.annotate = None
-
- if ldata.long_annotations:
- longs = ldata.long_annotations
- if len(longs) == 1:
- ldata.annotate_long = longs[0]
- else:
- ldata.annotate_long = u"{:d} missed branches: {}".format(
- len(longs),
- u", ".join(
- u"{:d}) {}".format(num, ann_long)
- for num, ann_long in enumerate(longs, start=1)
- ),
- )
- else:
- ldata.annotate_long = None
-
- css_classes = []
- if ldata.category:
- css_classes.append(self.template_globals['category'][ldata.category])
- ldata.css_class = ' '.join(css_classes) or "pln"
-
- html = self.source_tmpl.render(file_data.__dict__)
- write_html(html_path, html)
-
- # Save this file's information for the index file.
- index_info = {
- 'nums': nums,
- 'html_filename': html_filename,
- 'relative_filename': fr.relative_filename(),
- }
- self.file_summaries.append(index_info)
- self.incr.set_index_info(rootname, index_info)
-
- def index_file(self):
- """Write the index.html file for this report."""
- index_tmpl = Templite(read_data("index.html"), self.template_globals)
-
- html = index_tmpl.render({
- 'files': self.file_summaries,
- 'totals': self.totals,
- })
-
- write_html(os.path.join(self.directory, "index.html"), html)
-
- # Write the latest hashes for next time.
- self.incr.write()
-
-
-class IncrementalChecker(object):
- """Logic and data to support incremental reporting."""
-
- STATUS_FILE = "status.json"
- STATUS_FORMAT = 2
-
- # pylint: disable=wrong-spelling-in-comment,useless-suppression
- # The data looks like:
- #
- # {
- # "format": 2,
- # "globals": "540ee119c15d52a68a53fe6f0897346d",
- # "version": "4.0a1",
- # "files": {
- # "cogapp___init__": {
- # "hash": "e45581a5b48f879f301c0f30bf77a50c",
- # "index": {
- # "html_filename": "cogapp___init__.html",
- # "relative_filename": "cogapp/__init__",
- # "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
- # }
- # },
- # ...
- # "cogapp_whiteutils": {
- # "hash": "8504bb427fc488c4176809ded0277d51",
- # "index": {
- # "html_filename": "cogapp_whiteutils.html",
- # "relative_filename": "cogapp/whiteutils",
- # "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
- # }
- # }
- # }
- # }
-
- def __init__(self, directory):
- self.directory = directory
- self.reset()
-
- def reset(self):
- """Initialize to empty. Causes all files to be reported."""
- self.globals = ''
- self.files = {}
-
- def read(self):
- """Read the information we stored last time."""
- usable = False
- try:
- status_file = os.path.join(self.directory, self.STATUS_FILE)
- with open(status_file) as fstatus:
- status = json.load(fstatus)
- except (IOError, ValueError):
- usable = False
- else:
- usable = True
- if status['format'] != self.STATUS_FORMAT:
- usable = False
- elif status['version'] != coverage.__version__:
- usable = False
-
- if usable:
- self.files = {}
- for filename, fileinfo in iitems(status['files']):
- fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
- self.files[filename] = fileinfo
- self.globals = status['globals']
- else:
- self.reset()
-
- def write(self):
- """Write the current status."""
- status_file = os.path.join(self.directory, self.STATUS_FILE)
- files = {}
- for filename, fileinfo in iitems(self.files):
- fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
- files[filename] = fileinfo
-
- status = {
- 'format': self.STATUS_FORMAT,
- 'version': coverage.__version__,
- 'globals': self.globals,
- 'files': files,
- }
- with open(status_file, "w") as fout:
- json.dump(status, fout, separators=(',', ':'))
-
- def check_global_data(self, *data):
- """Check the global data that can affect incremental reporting."""
- m = Hasher()
- for d in data:
- m.update(d)
- these_globals = m.hexdigest()
- if self.globals != these_globals:
- self.reset()
- self.globals = these_globals
-
- def can_skip_file(self, data, fr, rootname):
- """Can we skip reporting this file?
-
- `data` is a CoverageData object, `fr` is a `FileReporter`, and
- `rootname` is the name being used for the file.
- """
- m = Hasher()
- m.update(fr.source().encode('utf-8'))
- add_data_to_hash(data, fr.filename, m)
- this_hash = m.hexdigest()
-
- that_hash = self.file_hash(rootname)
-
- if this_hash == that_hash:
- # Nothing has changed to require the file to be reported again.
- return True
- else:
- self.set_file_hash(rootname, this_hash)
- return False
-
- def file_hash(self, fname):
- """Get the hash of `fname`'s contents."""
- return self.files.get(fname, {}).get('hash', '')
-
- def set_file_hash(self, fname, val):
- """Set the hash of `fname`'s contents."""
- self.files.setdefault(fname, {})['hash'] = val
-
- def index_info(self, fname):
- """Get the information for index.html for `fname`."""
- return self.files.get(fname, {}).get('index', {})
-
- def set_index_info(self, fname, info):
- """Set the information for index.html for `fname`."""
- self.files.setdefault(fname, {})['index'] = info
-
-
-# Helpers for templates and generating HTML
-
-def escape(t):
- """HTML-escape the text in `t`.
-
- This is only suitable for HTML text, not attributes.
-
- """
- # Convert HTML special chars into HTML entities.
- return t.replace("&", "&amp;").replace("<", "&lt;")
-
-
-def pair(ratio):
- """Format a pair of numbers so JavaScript can read them in an attribute."""
- return "%s %s" % ratio
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/coverage_html.js b/contrib/python/coverage/py2/coverage/htmlfiles/coverage_html.js
deleted file mode 100644
index 27b49b36f9..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/coverage_html.js
+++ /dev/null
@@ -1,616 +0,0 @@
-// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-// Coverage.py HTML report browser code.
-/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
-/*global coverage: true, document, window, $ */
-
-coverage = {};
-
-// Find all the elements with shortkey_* class, and use them to assign a shortcut key.
-coverage.assign_shortkeys = function () {
- $("*[class*='shortkey_']").each(function (i, e) {
- $.each($(e).attr("class").split(" "), function (i, c) {
- if (/^shortkey_/.test(c)) {
- $(document).bind('keydown', c.substr(9), function () {
- $(e).click();
- });
- }
- });
- });
-};
-
-// Create the events for the help panel.
-coverage.wire_up_help_panel = function () {
- $("#keyboard_icon").click(function () {
- // Show the help panel, and position it so the keyboard icon in the
- // panel is in the same place as the keyboard icon in the header.
- $(".help_panel").show();
- var koff = $("#keyboard_icon").offset();
- var poff = $("#panel_icon").position();
- $(".help_panel").offset({
- top: koff.top-poff.top,
- left: koff.left-poff.left
- });
- });
- $("#panel_icon").click(function () {
- $(".help_panel").hide();
- });
-};
-
-// Create the events for the filter box.
-coverage.wire_up_filter = function () {
- // Cache elements.
- var table = $("table.index");
- var table_rows = table.find("tbody tr");
- var table_row_names = table_rows.find("td.name a");
- var no_rows = $("#no_rows");
-
- // Create a duplicate table footer that we can modify with dynamic summed values.
- var table_footer = $("table.index tfoot tr");
- var table_dynamic_footer = table_footer.clone();
- table_dynamic_footer.attr('class', 'total_dynamic hidden');
- table_footer.after(table_dynamic_footer);
-
- // Observe filter keyevents.
- $("#filter").on("keyup change", $.debounce(150, function (event) {
- var filter_value = $(this).val();
-
- if (filter_value === "") {
- // Filter box is empty, remove all filtering.
- table_rows.removeClass("hidden");
-
- // Show standard footer, hide dynamic footer.
- table_footer.removeClass("hidden");
- table_dynamic_footer.addClass("hidden");
-
- // Hide placeholder, show table.
- if (no_rows.length > 0) {
- no_rows.hide();
- }
- table.show();
-
- }
- else {
- // Filter table items by value.
- var hidden = 0;
- var shown = 0;
-
- // Hide / show elements.
- $.each(table_row_names, function () {
- var element = $(this).parents("tr");
-
- if ($(this).text().indexOf(filter_value) === -1) {
- // hide
- element.addClass("hidden");
- hidden++;
- }
- else {
- // show
- element.removeClass("hidden");
- shown++;
- }
- });
-
- // Show placeholder if no rows will be displayed.
- if (no_rows.length > 0) {
- if (shown === 0) {
- // Show placeholder, hide table.
- no_rows.show();
- table.hide();
- }
- else {
- // Hide placeholder, show table.
- no_rows.hide();
- table.show();
- }
- }
-
- // Manage dynamic header:
- if (hidden > 0) {
- // Calculate new dynamic sum values based on visible rows.
- for (var column = 2; column < 20; column++) {
- // Calculate summed value.
- var cells = table_rows.find('td:nth-child(' + column + ')');
- if (!cells.length) {
- // No more columns...!
- break;
- }
-
- var sum = 0, numer = 0, denom = 0;
- $.each(cells.filter(':visible'), function () {
- var ratio = $(this).data("ratio");
- if (ratio) {
- var splitted = ratio.split(" ");
- numer += parseInt(splitted[0], 10);
- denom += parseInt(splitted[1], 10);
- }
- else {
- sum += parseInt(this.innerHTML, 10);
- }
- });
-
- // Get footer cell element.
- var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')');
-
- // Set value into dynamic footer cell element.
- if (cells[0].innerHTML.indexOf('%') > -1) {
- // Percentage columns use the numerator and denominator,
- // and adapt to the number of decimal places.
- var match = /\.([0-9]+)/.exec(cells[0].innerHTML);
- var places = 0;
- if (match) {
- places = match[1].length;
- }
- var pct = numer * 100 / denom;
- footer_cell.text(pct.toFixed(places) + '%');
- }
- else {
- footer_cell.text(sum);
- }
- }
-
- // Hide standard footer, show dynamic footer.
- table_footer.addClass("hidden");
- table_dynamic_footer.removeClass("hidden");
- }
- else {
- // Show standard footer, hide dynamic footer.
- table_footer.removeClass("hidden");
- table_dynamic_footer.addClass("hidden");
- }
- }
- }));
-
- // Trigger change event on setup, to force filter on page refresh
- // (filter value may still be present).
- $("#filter").trigger("change");
-};
-
-// Loaded on index.html
-coverage.index_ready = function ($) {
- // Look for a localStorage item containing previous sort settings:
- var sort_list = [];
- var storage_name = "COVERAGE_INDEX_SORT";
- var stored_list = undefined;
- try {
- stored_list = localStorage.getItem(storage_name);
- } catch(err) {}
-
- if (stored_list) {
- sort_list = JSON.parse('[[' + stored_list + ']]');
- }
-
- // Create a new widget which exists only to save and restore
- // the sort order:
- $.tablesorter.addWidget({
- id: "persistentSort",
-
- // Format is called by the widget before displaying:
- format: function (table) {
- if (table.config.sortList.length === 0 && sort_list.length > 0) {
- // This table hasn't been sorted before - we'll use
- // our stored settings:
- $(table).trigger('sorton', [sort_list]);
- }
- else {
- // This is not the first load - something has
- // already defined sorting so we'll just update
- // our stored value to match:
- sort_list = table.config.sortList;
- }
- }
- });
-
- // Configure our tablesorter to handle the variable number of
- // columns produced depending on report options:
- var headers = [];
- var col_count = $("table.index > thead > tr > th").length;
-
- headers[0] = { sorter: 'text' };
- for (i = 1; i < col_count-1; i++) {
- headers[i] = { sorter: 'digit' };
- }
- headers[col_count-1] = { sorter: 'percent' };
-
- // Enable the table sorter:
- $("table.index").tablesorter({
- widgets: ['persistentSort'],
- headers: headers
- });
-
- coverage.assign_shortkeys();
- coverage.wire_up_help_panel();
- coverage.wire_up_filter();
-
- // Watch for page unload events so we can save the final sort settings:
- $(window).on("unload", function () {
- try {
- localStorage.setItem(storage_name, sort_list.toString())
- } catch(err) {}
- });
-};
-
-// -- pyfile stuff --
-
-coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
-
-coverage.pyfile_ready = function ($) {
- // If we're directed to a particular line number, highlight the line.
- var frag = location.hash;
- if (frag.length > 2 && frag[1] === 't') {
- $(frag).addClass('highlight');
- coverage.set_sel(parseInt(frag.substr(2), 10));
- }
- else {
- coverage.set_sel(0);
- }
-
- $(document)
- .bind('keydown', 'j', coverage.to_next_chunk_nicely)
- .bind('keydown', 'k', coverage.to_prev_chunk_nicely)
- .bind('keydown', '0', coverage.to_top)
- .bind('keydown', '1', coverage.to_first_chunk)
- ;
-
- $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");});
- $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");});
- $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");});
- $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");});
-
- coverage.filters = undefined;
- try {
- coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE);
- } catch(err) {}
-
- if (coverage.filters) {
- coverage.filters = JSON.parse(coverage.filters);
- }
- else {
- coverage.filters = {run: false, exc: true, mis: true, par: true};
- }
-
- for (cls in coverage.filters) {
- coverage.set_line_visibilty(cls, coverage.filters[cls]);
- }
-
- coverage.assign_shortkeys();
- coverage.wire_up_help_panel();
-
- coverage.init_scroll_markers();
-
- // Rebuild scroll markers when the window height changes.
- $(window).resize(coverage.build_scroll_markers);
-};
-
-coverage.toggle_lines = function (btn, cls) {
- var onoff = !$(btn).hasClass("show_" + cls);
- coverage.set_line_visibilty(cls, onoff);
- coverage.build_scroll_markers();
- coverage.filters[cls] = onoff;
- try {
- localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters));
- } catch(err) {}
-};
-
-coverage.set_line_visibilty = function (cls, onoff) {
- var show = "show_" + cls;
- var btn = $(".button_toggle_" + cls);
- if (onoff) {
- $("#source ." + cls).addClass(show);
- btn.addClass(show);
- }
- else {
- $("#source ." + cls).removeClass(show);
- btn.removeClass(show);
- }
-};
-
-// Return the nth line div.
-coverage.line_elt = function (n) {
- return $("#t" + n);
-};
-
-// Return the nth line number div.
-coverage.num_elt = function (n) {
- return $("#n" + n);
-};
-
-// Set the selection. b and e are line numbers.
-coverage.set_sel = function (b, e) {
- // The first line selected.
- coverage.sel_begin = b;
- // The next line not selected.
- coverage.sel_end = (e === undefined) ? b+1 : e;
-};
-
-coverage.to_top = function () {
- coverage.set_sel(0, 1);
- coverage.scroll_window(0);
-};
-
-coverage.to_first_chunk = function () {
- coverage.set_sel(0, 1);
- coverage.to_next_chunk();
-};
-
-// Return a string indicating what kind of chunk this line belongs to,
-// or null if not a chunk.
-coverage.chunk_indicator = function (line_elt) {
- var klass = line_elt.attr('class');
- if (klass) {
- var m = klass.match(/\bshow_\w+\b/);
- if (m) {
- return m[0];
- }
- }
- return null;
-};
-
-coverage.to_next_chunk = function () {
- var c = coverage;
-
- // Find the start of the next colored chunk.
- var probe = c.sel_end;
- var chunk_indicator, probe_line;
- while (true) {
- probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- return;
- }
- chunk_indicator = c.chunk_indicator(probe_line);
- if (chunk_indicator) {
- break;
- }
- probe++;
- }
-
- // There's a next chunk, `probe` points to it.
- var begin = probe;
-
- // Find the end of this chunk.
- var next_indicator = chunk_indicator;
- while (next_indicator === chunk_indicator) {
- probe++;
- probe_line = c.line_elt(probe);
- next_indicator = c.chunk_indicator(probe_line);
- }
- c.set_sel(begin, probe);
- c.show_selection();
-};
-
-coverage.to_prev_chunk = function () {
- var c = coverage;
-
- // Find the end of the prev colored chunk.
- var probe = c.sel_begin-1;
- var probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- return;
- }
- var chunk_indicator = c.chunk_indicator(probe_line);
- while (probe > 0 && !chunk_indicator) {
- probe--;
- probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- return;
- }
- chunk_indicator = c.chunk_indicator(probe_line);
- }
-
- // There's a prev chunk, `probe` points to its last line.
- var end = probe+1;
-
- // Find the beginning of this chunk.
- var prev_indicator = chunk_indicator;
- while (prev_indicator === chunk_indicator) {
- probe--;
- probe_line = c.line_elt(probe);
- prev_indicator = c.chunk_indicator(probe_line);
- }
- c.set_sel(probe+1, end);
- c.show_selection();
-};
-
-// Return the line number of the line nearest pixel position pos
-coverage.line_at_pos = function (pos) {
- var l1 = coverage.line_elt(1),
- l2 = coverage.line_elt(2),
- result;
- if (l1.length && l2.length) {
- var l1_top = l1.offset().top,
- line_height = l2.offset().top - l1_top,
- nlines = (pos - l1_top) / line_height;
- if (nlines < 1) {
- result = 1;
- }
- else {
- result = Math.ceil(nlines);
- }
- }
- else {
- result = 1;
- }
- return result;
-};
-
-// Returns 0, 1, or 2: how many of the two ends of the selection are on
-// the screen right now?
-coverage.selection_ends_on_screen = function () {
- if (coverage.sel_begin === 0) {
- return 0;
- }
-
- var top = coverage.line_elt(coverage.sel_begin);
- var next = coverage.line_elt(coverage.sel_end-1);
-
- return (
- (top.isOnScreen() ? 1 : 0) +
- (next.isOnScreen() ? 1 : 0)
- );
-};
-
-coverage.to_next_chunk_nicely = function () {
- coverage.finish_scrolling();
- if (coverage.selection_ends_on_screen() === 0) {
- // The selection is entirely off the screen: select the top line on
- // the screen.
- var win = $(window);
- coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop()));
- }
- coverage.to_next_chunk();
-};
-
-coverage.to_prev_chunk_nicely = function () {
- coverage.finish_scrolling();
- if (coverage.selection_ends_on_screen() === 0) {
- var win = $(window);
- coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height()));
- }
- coverage.to_prev_chunk();
-};
-
-// Select line number lineno, or if it is in a colored chunk, select the
-// entire chunk
-coverage.select_line_or_chunk = function (lineno) {
- var c = coverage;
- var probe_line = c.line_elt(lineno);
- if (probe_line.length === 0) {
- return;
- }
- var the_indicator = c.chunk_indicator(probe_line);
- if (the_indicator) {
- // The line is in a highlighted chunk.
- // Search backward for the first line.
- var probe = lineno;
- var indicator = the_indicator;
- while (probe > 0 && indicator === the_indicator) {
- probe--;
- probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- break;
- }
- indicator = c.chunk_indicator(probe_line);
- }
- var begin = probe + 1;
-
- // Search forward for the last line.
- probe = lineno;
- indicator = the_indicator;
- while (indicator === the_indicator) {
- probe++;
- probe_line = c.line_elt(probe);
- indicator = c.chunk_indicator(probe_line);
- }
-
- coverage.set_sel(begin, probe);
- }
- else {
- coverage.set_sel(lineno);
- }
-};
-
-coverage.show_selection = function () {
- var c = coverage;
-
- // Highlight the lines in the chunk
- $(".linenos .highlight").removeClass("highlight");
- for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) {
- c.num_elt(probe).addClass("highlight");
- }
-
- c.scroll_to_selection();
-};
-
-coverage.scroll_to_selection = function () {
- // Scroll the page if the chunk isn't fully visible.
- if (coverage.selection_ends_on_screen() < 2) {
- // Need to move the page. The html,body trick makes it scroll in all
- // browsers, got it from http://stackoverflow.com/questions/3042651
- var top = coverage.line_elt(coverage.sel_begin);
- var top_pos = parseInt(top.offset().top, 10);
- coverage.scroll_window(top_pos - 30);
- }
-};
-
-coverage.scroll_window = function (to_pos) {
- $("html,body").animate({scrollTop: to_pos}, 200);
-};
-
-coverage.finish_scrolling = function () {
- $("html,body").stop(true, true);
-};
-
-coverage.init_scroll_markers = function () {
- var c = coverage;
- // Init some variables
- c.lines_len = $('#source p').length;
- c.body_h = $('body').height();
- c.header_h = $('div#header').height();
-
- // Build html
- c.build_scroll_markers();
-};
-
-coverage.build_scroll_markers = function () {
- var c = coverage,
- min_line_height = 3,
- max_line_height = 10,
- visible_window_h = $(window).height();
-
- c.lines_to_mark = $('#source').find('p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par');
- $('#scroll_marker').remove();
- // Don't build markers if the window has no scroll bar.
- if (c.body_h <= visible_window_h) {
- return;
- }
-
- $("body").append("<div id='scroll_marker'>&nbsp;</div>");
- var scroll_marker = $('#scroll_marker'),
- marker_scale = scroll_marker.height() / c.body_h,
- line_height = scroll_marker.height() / c.lines_len;
-
- // Line height must be between the extremes.
- if (line_height > min_line_height) {
- if (line_height > max_line_height) {
- line_height = max_line_height;
- }
- }
- else {
- line_height = min_line_height;
- }
-
- var previous_line = -99,
- last_mark,
- last_top,
- offsets = {};
-
- // Calculate line offsets outside loop to prevent relayouts
- c.lines_to_mark.each(function() {
- offsets[this.id] = $(this).offset().top;
- });
- c.lines_to_mark.each(function () {
- var id_name = $(this).attr('id'),
- line_top = Math.round(offsets[id_name] * marker_scale),
- line_number = parseInt(id_name.substring(1, id_name.length));
-
- if (line_number === previous_line + 1) {
- // If this solid missed block just make previous mark higher.
- last_mark.css({
- 'height': line_top + line_height - last_top
- });
- }
- else {
- // Add colored line in scroll_marker block.
- scroll_marker.append('<div id="m' + line_number + '" class="marker"></div>');
- last_mark = $('#m' + line_number);
- last_mark.css({
- 'height': line_height,
- 'top': line_top
- });
- last_top = line_top;
- }
-
- previous_line = line_number;
- });
-};
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/favicon_32.png b/contrib/python/coverage/py2/coverage/htmlfiles/favicon_32.png
deleted file mode 100644
index 8649f0475d..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/favicon_32.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/index.html b/contrib/python/coverage/py2/coverage/htmlfiles/index.html
deleted file mode 100644
index 983db06125..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/index.html
+++ /dev/null
@@ -1,119 +0,0 @@
-{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
-{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
-
-<!DOCTYPE html>
-<html>
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
- <title>{{ title|escape }}</title>
- <link rel="icon" sizes="32x32" href="favicon_32.png">
- <link rel="stylesheet" href="style.css" type="text/css">
- {% if extra_css %}
- <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
- {% endif %}
- <script type="text/javascript" src="jquery.min.js"></script>
- <script type="text/javascript" src="jquery.ba-throttle-debounce.min.js"></script>
- <script type="text/javascript" src="jquery.tablesorter.min.js"></script>
- <script type="text/javascript" src="jquery.hotkeys.js"></script>
- <script type="text/javascript" src="coverage_html.js"></script>
- <script type="text/javascript">
- jQuery(document).ready(coverage.index_ready);
- </script>
-</head>
-<body class="indexfile">
-
-<div id="header">
- <div class="content">
- <h1>{{ title|escape }}:
- <span class="pc_cov">{{totals.pc_covered_str}}%</span>
- </h1>
-
- <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
-
- <form id="filter_container">
- <input id="filter" type="text" value="" placeholder="filter..." />
- </form>
- </div>
-</div>
-
-<div class="help_panel">
- <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
- <p class="legend">Hot-keys on this page</p>
- <div>
- <p class="keyhelp">
- <span class="key">n</span>
- <span class="key">s</span>
- <span class="key">m</span>
- <span class="key">x</span>
- {% if has_arcs %}
- <span class="key">b</span>
- <span class="key">p</span>
- {% endif %}
- <span class="key">c</span> &nbsp; change column sorting
- </p>
- </div>
-</div>
-
-<div id="index">
- <table class="index">
- <thead>
- {# The title="" attr doesn"t work in Safari. #}
- <tr class="tablehead" title="Click to sort">
- <th class="name left headerSortDown shortkey_n">Module</th>
- <th class="shortkey_s">statements</th>
- <th class="shortkey_m">missing</th>
- <th class="shortkey_x">excluded</th>
- {% if has_arcs %}
- <th class="shortkey_b">branches</th>
- <th class="shortkey_p">partial</th>
- {% endif %}
- <th class="right shortkey_c">coverage</th>
- </tr>
- </thead>
- {# HTML syntax requires thead, tfoot, tbody #}
- <tfoot>
- <tr class="total">
- <td class="name left">Total</td>
- <td>{{totals.n_statements}}</td>
- <td>{{totals.n_missing}}</td>
- <td>{{totals.n_excluded}}</td>
- {% if has_arcs %}
- <td>{{totals.n_branches}}</td>
- <td>{{totals.n_partial_branches}}</td>
- {% endif %}
- <td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
- </tr>
- </tfoot>
- <tbody>
- {% for file in files %}
- <tr class="file">
- <td class="name left"><a href="{{file.html_filename}}">{{file.relative_filename}}</a></td>
- <td>{{file.nums.n_statements}}</td>
- <td>{{file.nums.n_missing}}</td>
- <td>{{file.nums.n_excluded}}</td>
- {% if has_arcs %}
- <td>{{file.nums.n_branches}}</td>
- <td>{{file.nums.n_partial_branches}}</td>
- {% endif %}
- <td class="right" data-ratio="{{file.nums.ratio_covered|pair}}">{{file.nums.pc_covered_str}}%</td>
- </tr>
- {% endfor %}
- </tbody>
- </table>
-
- <p id="no_rows">
- No items found using the specified filter.
- </p>
-</div>
-
-<div id="footer">
- <div class="content">
- <p>
- <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
- created at {{ time_stamp }}
- </p>
- </div>
-</div>
-
-</body>
-</html>
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js b/contrib/python/coverage/py2/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js
deleted file mode 100644
index 648fe5d3c2..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * jQuery throttle / debounce - v1.1 - 3/7/2010
- * http://benalman.com/projects/jquery-throttle-debounce-plugin/
- *
- * Copyright (c) 2010 "Cowboy" Ben Alman
- * Dual licensed under the MIT and GPL licenses.
- * http://benalman.com/about/license/
- */
-(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this);
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.hotkeys.js b/contrib/python/coverage/py2/coverage/htmlfiles/jquery.hotkeys.js
deleted file mode 100644
index 09b21e03c7..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.hotkeys.js
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * jQuery Hotkeys Plugin
- * Copyright 2010, John Resig
- * Dual licensed under the MIT or GPL Version 2 licenses.
- *
- * Based upon the plugin by Tzury Bar Yochay:
- * http://github.com/tzuryby/hotkeys
- *
- * Original idea by:
- * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/
-*/
-
-(function(jQuery){
-
- jQuery.hotkeys = {
- version: "0.8",
-
- specialKeys: {
- 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause",
- 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home",
- 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del",
- 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7",
- 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/",
- 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8",
- 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta"
- },
-
- shiftNums: {
- "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&",
- "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<",
- ".": ">", "/": "?", "\\": "|"
- }
- };
-
- function keyHandler( handleObj ) {
- // Only care when a possible input has been specified
- if ( typeof handleObj.data !== "string" ) {
- return;
- }
-
- var origHandler = handleObj.handler,
- keys = handleObj.data.toLowerCase().split(" ");
-
- handleObj.handler = function( event ) {
- // Don't fire in text-accepting inputs that we didn't directly bind to
- if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) ||
- event.target.type === "text") ) {
- return;
- }
-
- // Keypress represents characters, not special keys
- var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ],
- character = String.fromCharCode( event.which ).toLowerCase(),
- key, modif = "", possible = {};
-
- // check combinations (alt|ctrl|shift+anything)
- if ( event.altKey && special !== "alt" ) {
- modif += "alt+";
- }
-
- if ( event.ctrlKey && special !== "ctrl" ) {
- modif += "ctrl+";
- }
-
- // TODO: Need to make sure this works consistently across platforms
- if ( event.metaKey && !event.ctrlKey && special !== "meta" ) {
- modif += "meta+";
- }
-
- if ( event.shiftKey && special !== "shift" ) {
- modif += "shift+";
- }
-
- if ( special ) {
- possible[ modif + special ] = true;
-
- } else {
- possible[ modif + character ] = true;
- possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true;
-
- // "$" can be triggered as "Shift+4" or "Shift+$" or just "$"
- if ( modif === "shift+" ) {
- possible[ jQuery.hotkeys.shiftNums[ character ] ] = true;
- }
- }
-
- for ( var i = 0, l = keys.length; i < l; i++ ) {
- if ( possible[ keys[i] ] ) {
- return origHandler.apply( this, arguments );
- }
- }
- };
- }
-
- jQuery.each([ "keydown", "keyup", "keypress" ], function() {
- jQuery.event.special[ this ] = { add: keyHandler };
- });
-
-})( jQuery );
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.isonscreen.js b/contrib/python/coverage/py2/coverage/htmlfiles/jquery.isonscreen.js
deleted file mode 100644
index 0182ebd213..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.isonscreen.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2010
- * @author Laurence Wheway
- * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php)
- * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses.
- *
- * @version 1.2.0
- */
-(function($) {
- jQuery.extend({
- isOnScreen: function(box, container) {
- //ensure numbers come in as intgers (not strings) and remove 'px' is it's there
- for(var i in box){box[i] = parseFloat(box[i])};
- for(var i in container){container[i] = parseFloat(container[i])};
-
- if(!container){
- container = {
- left: $(window).scrollLeft(),
- top: $(window).scrollTop(),
- width: $(window).width(),
- height: $(window).height()
- }
- }
-
- if( box.left+box.width-container.left > 0 &&
- box.left < container.width+container.left &&
- box.top+box.height-container.top > 0 &&
- box.top < container.height+container.top
- ) return true;
- return false;
- }
- })
-
-
- jQuery.fn.isOnScreen = function (container) {
- for(var i in container){container[i] = parseFloat(container[i])};
-
- if(!container){
- container = {
- left: $(window).scrollLeft(),
- top: $(window).scrollTop(),
- width: $(window).width(),
- height: $(window).height()
- }
- }
-
- if( $(this).offset().left+$(this).width()-container.left > 0 &&
- $(this).offset().left < container.width+container.left &&
- $(this).offset().top+$(this).height()-container.top > 0 &&
- $(this).offset().top < container.height+container.top
- ) return true;
- return false;
- }
-})(jQuery);
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.min.js b/contrib/python/coverage/py2/coverage/htmlfiles/jquery.min.js
deleted file mode 100644
index d1608e37ff..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.min.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
-!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="<select msallowclip=''><option selected=''></option></select>",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=lb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=mb(b);function pb(){}pb.prototype=d.filters=d.pseudos,d.setFilters=new pb,g=fb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fb.error(a):z(a,i).slice(0)};function qb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;
-if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=m._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=["Top","Right","Bottom","Left"],U=function(a,b){return a=b||a,"none"===m.css(a,"display")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav></:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="<input type='radio' checked='checked' name='t'/>",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==cb()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===cb()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return m.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d="on"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ab:bb):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:bb,isPropagationStopped:bb,isImmediatePropagationStopped:bb,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ab,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ab,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ab,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,"form")?!1:void m.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=m.nodeName(b,"input")||m.nodeName(b,"button")?b.form:void 0;c&&!m._data(c,"submitBubbles")&&(m.event.add(c,"submit._submit",function(a){a._submit_bubble=!0}),m._data(c,"submitBubbles",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,"form")?!1:void m.event.remove(this,"._submit")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(m.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate("change",this,a,!0)})),!1):void m.event.add(this,"beforeactivate._change",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,"changeBubbles")&&(m.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate("change",this.parentNode,a,!0)}),m._data(b,"changeBubbles",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,"._change"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=bb;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=bb),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function db(a){var b=eb.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var eb="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",fb=/ jQuery\d+="(?:null|\d+)"/g,gb=new RegExp("<(?:"+eb+")[\\s/>]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/<tbody/i,lb=/<|&#?\w+;/,mb=/<(?:script|style|link)/i,nb=/checked\s*(?:[^=]|=\s*.checked.)/i,ob=/^$|\/(?:java|ecma)script/i,pb=/^true\/(.*)/,qb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,rb={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:k.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1></$2>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?"<table>"!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Cb[0].contentWindow||Cb[0].contentDocument).document,b.write(),b.close(),c=Eb(a,b),Cb.detach()),Db[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName("body")[0],c&&c.style?(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(y.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Gb=/^margin/,Hb=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ib,Jb,Kb=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ib=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(""!==g||m.contains(a.ownerDocument,a)||(g=m.style(a,b)),Hb.test(g)&&Gb.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+""}):y.documentElement.currentStyle&&(Ib=function(a){return a.currentStyle},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Hb.test(g)&&!Kb.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement("div"),b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=d&&d.style){c.cssText="float:left;opacity:.5",k.opacity="0.5"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip="content-box",b.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===b.style.backgroundClip,k.boxSizing=""===c.boxSizing||""===c.MozBoxSizing||""===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),b.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",e=f=!1,h=!0,a.getComputedStyle&&(e="1%"!==(a.getComputedStyle(b,null)||{}).top,f="4px"===(a.getComputedStyle(b,null)||{width:"4px"}).width,i=b.appendChild(y.createElement("div")),i.style.cssText=b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",i.style.marginRight=i.style.width="0",b.style.width="1px",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight)),b.innerHTML="<table><tr><td></td><td>t</td></tr></table>",i=b.getElementsByTagName("td"),i[0].style.cssText="margin:0;border:0;padding:0;display:none",g=0===i[0].offsetHeight,g&&(i[0].style.display="",i[1].style.display="none",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Mb=/alpha\([^)]*\)/i,Nb=/opacity\s*=\s*([^)]*)/,Ob=/^(none|table(?!-c[ea]).+)/,Pb=new RegExp("^("+S+")(.*)$","i"),Qb=new RegExp("^([+-])=("+S+")","i"),Rb={position:"absolute",visibility:"hidden",display:"block"},Sb={letterSpacing:"0",fontWeight:"400"},Tb=["Webkit","O","Moz","ms"];function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Tb.length;while(e--)if(b=Tb[e]+c,b in a)return b;return d}function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&U(d)&&(f[g]=m._data(d,"olddisplay",Fb(d.nodeName)))):(e=U(d),(c&&"none"!==c||!e)&&m._data(d,"olddisplay",e?c:m.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Xb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=m.css(a,c+T[f],!0,e)),d?("content"===c&&(g-=m.css(a,"padding"+T[f],!0,e)),"margin"!==c&&(g-=m.css(a,"border"+T[f]+"Width",!0,e))):(g+=m.css(a,"padding"+T[f],!0,e),"padding"!==c&&(g+=m.css(a,"border"+T[f]+"Width",!0,e)));return g}function Yb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ib(a),g=k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Jb(a,b,f),(0>e||null==e)&&(e=a.style[b]),Hb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xb(a,b,c||(g?"border":"content"),d,f)+"px"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Jb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":k.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ub(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=Qb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||m.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ub(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Jb(a,b,d)),"normal"===f&&b in Sb&&(f=Sb[b]),""===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each(["height","width"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Ob.test(m.css(a,"display"))&&0===a.offsetWidth?m.swap(a,Rb,function(){return Yb(a,b,d)}):Yb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ib(a);return Wb(a,c,d?Xb(a,b,d,k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Nb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=m.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===m.trim(f.replace(Mb,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Mb.test(f)?f.replace(Mb,e):f+" "+e)}}),m.cssHooks.marginRight=Lb(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:"inline-block"},Jb,[a,"marginRight"]):void 0}),m.each({margin:"",padding:"",border:"Width"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Gb.test(a)||(m.cssHooks[a+b].set=Wb)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ib(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Vb(this,!0)},hide:function(){return Vb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}m.Tween=Zb,Zb.prototype={constructor:Zb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?"":"px")
-},cur:function(){var a=Zb.propHooks[this.prop];return a&&a.get?a.get(this):Zb.propHooks._default.get(this)},run:function(a){var b,c=Zb.propHooks[this.prop];return this.pos=b=this.options.duration?m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Zb.propHooks._default.set(this),this}},Zb.prototype.init.prototype=Zb.prototype,Zb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Zb.propHooks.scrollTop=Zb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Zb.prototype.init,m.fx.step={};var $b,_b,ac=/^(?:toggle|show|hide)$/,bc=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),cc=/queueHooks$/,dc=[ic],ec={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bc.exec(b),f=e&&e[3]||(m.cssNumber[a]?"":"px"),g=(m.cssNumber[a]||"px"!==f&&+d)&&bc.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,"fxshow");c.queue||(h=m._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,"display"),l="none"===j?m._data(a,"olddisplay")||Fb(a.nodeName):j,"inline"===l&&"none"===m.css(a,"float")&&(k.inlineBlockNeedsLayout&&"inline"!==Fb(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ac.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))"inline"===("none"===j?Fb(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=m._data(a,"fxshow",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,"fxshow");for(b in o)m.style(a,b,o[b])});for(d in o)g=hc(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$b||fc(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$b||fc(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jc(k,j.opts.specialEasing);g>f;f++)if(d=dc[f].call(j,a,k,j.opts))return d;return m.map(k,hc,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kc,{tweener:function(a,b){m.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],ec[c]=ec[c]||[],ec[c].unshift(b)},prefilter:function(a,b){b?dc.unshift(a):dc.push(a)}}),m.speed=function(a,b,c){var d=a&&"object"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kc(this,m.extend({},a),f);(e||m._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cc.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=m._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each(["toggle","show","hide"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(gc(b,!0),a,d,e)}}),m.each({slideDown:gc("show"),slideUp:gc("hide"),slideToggle:gc("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($b=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$b=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_b||(_b=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_b),_b=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement("div"),b.setAttribute("className","t"),b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=y.createElement("select"),e=c.appendChild(y.createElement("option")),a=b.getElementsByTagName("input")[0],d.style.cssText="top:1px",k.getSetAttribute="t"!==b.className,k.style=/top/.test(d.getAttribute("style")),k.hrefNormalized="/a"===d.getAttribute("href"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement("form").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement("input"),a.setAttribute("value",""),k.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),k.radioValue="t"===a.value}();var lc=/\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e="":"number"==typeof e?e+="":m.isArray(e)&&(e=m.map(e,function(a){return null==a?"":a+""})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(lc,""):null==c?"":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,"value");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&m.nodeName(c.parentNode,"optgroup"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each(["radio","checkbox"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var mc,nc,oc=m.expr.attrHandle,pc=/^(?:checked|selected)$/i,qc=k.getSetAttribute,rc=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nc:mc)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rc&&qc||!pc.test(c)?a[d]=!1:a[m.camelCase("default-"+c)]=a[d]=!1:m.attr(a,c,""),a.removeAttribute(qc?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&m.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),nc={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rc&&qc||!pc.test(c)?a.setAttribute(!qc&&m.propFix[c]||c,c):a[m.camelCase("default-"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\w+/g),function(a,b){var c=oc[b]||m.find.attr;oc[b]=rc&&qc||!pc.test(b)?function(a,b,d){var e,f;return d||(f=oc[b],oc[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,oc[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase("default-"+b)]?b.toLowerCase():null}}),rc&&qc||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,"input")?void(a.defaultValue=b):mc&&mc.set(a,b,c)}}),qc||(mc={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},oc.id=oc.name=oc.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:mc.set},m.attrHooks.contenteditable={set:function(a,b,c){mc.set(a,""===b?!1:b,c)}},m.each(["width","height"],function(a,b){m.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var sc=/^(?:input|select|textarea|button|object)$/i,tc=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,"tabindex");return b?parseInt(b,10):sc.test(a.nodeName)||tc.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each(["href","src"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype="encoding");var uc=/[\t\r\n\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j="string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=m.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||"string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?m.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||"boolean"===c)&&(this.className&&m._data(this,"__className__",this.className),this.className=this.className||a===!1?"":m._data(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(uc," ").indexOf(b)>=0)return!0;return!1}}),m.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var vc=m.now(),wc=/\?/,xc=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;m.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=m.trim(b+"");return e&&!m.trim(e.replace(xc,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():m.error("Invalid JSON: "+b)},m.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||m.error("Invalid XML: "+b),c};var yc,zc,Ac=/#.*$/,Bc=/([?&])_=[^&]*/,Cc=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Dc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Ec=/^(?:GET|HEAD)$/,Fc=/^\/\//,Gc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Hc={},Ic={},Jc="*/".concat("*");try{zc=location.href}catch(Kc){zc=y.createElement("a"),zc.href="",zc=zc.href}yc=Gc.exec(zc.toLowerCase())||[];function Lc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zc,type:"GET",isLocal:Dc.test(yc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Jc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":m.parseJSON,"text xml":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nc(Nc(a,m.ajaxSettings),b):Nc(m.ajaxSettings,a)},ajaxPrefilter:Lc(Hc),ajaxTransport:Lc(Ic),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cc.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zc)+"").replace(Ac,"").replace(Fc,yc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(c=Gc.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yc[1]&&c[2]===yc[2]&&(c[3]||("http:"===c[1]?"80":"443"))===(yc[3]||("http:"===yc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mc(Hc,k,b,v),2===t)return v;h=k.global,h&&0===m.active++&&m.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!Ec.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wc.test(e)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=Bc.test(e)?e.replace(Bc,"$1_="+vc++):e+(wc.test(e)?"&":"?")+"_="+vc++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader("If-Modified-Since",m.lastModified[e]),m.etag[e]&&v.setRequestHeader("If-None-Match",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+Jc+"; q=0.01":""):k.accepts["*"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mc(Ic,k,b,v)){v.readyState=1,h&&n.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Oc(k,v,c)),u=Pc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(m.lastModified[e]=w),w=v.getResponseHeader("etag"),w&&(m.etag[e]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger("ajaxComplete",[v,k]),--m.active||m.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,"json")},getScript:function(a,b){return m.get(a,void 0,b,"script")}}),m.each(["get","post"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){m.fn[b]=function(a){return this.on(b,a)}}),m._evalUrl=function(a){return m.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,"body")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&"none"===(a.style&&a.style.display||m.css(a,"display"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qc=/%20/g,Rc=/\[\]$/,Sc=/\r?\n/g,Tc=/^(?:submit|button|image|reset|file)$/i,Uc=/^(?:input|select|textarea|keygen)/i;function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc.test(a)?d(a,e):Vc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==m.type(b))d(a,b);else for(e in b)Vc(a+"["+e+"]",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vc(c,a[c],b,e);return d.join("&").replace(Qc,"+")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,"elements");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(":disabled")&&Uc.test(this.nodeName)&&!Tc.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sc,"\r\n")}}):{name:b.name,value:c.replace(Sc,"\r\n")}}).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLocal&&/^(get|post|head|put|delete|options)$/i.test(this.type)&&Zc()||$c()}:Zc;var Wc=0,Xc={},Yc=m.ajaxSettings.xhr();a.ActiveXObject&&m(a).on("unload",function(){for(var a in Xc)Xc[a](void 0,!0)}),k.cors=!!Yc&&"withCredentials"in Yc,Yc=k.ajax=!!Yc,Yc&&m.ajaxTransport(function(a){if(!a.crossDomain||k.cors){var b;return{send:function(c,d){var e,f=a.xhr(),g=++Wc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)void 0!==c[e]&&f.setRequestHeader(e,c[e]+"");f.send(a.hasContent&&a.data||null),b=function(c,e){var h,i,j;if(b&&(e||4===f.readyState))if(delete Xc[g],b=void 0,f.onreadystatechange=m.noop,e)4!==f.readyState&&f.abort();else{j={},h=f.status,"string"==typeof f.responseText&&(j.text=f.responseText);try{i=f.statusText}catch(k){i=""}h||!a.isLocal||a.crossDomain?1223===h&&(h=204):h=j.text?200:404}j&&d(h,i,j,f.getAllResponseHeaders())},a.async?4===f.readyState?setTimeout(b):f.onreadystatechange=Xc[g]=b:b()},abort:function(){b&&b(void 0,!0)}}}});function Zc(){try{return new a.XMLHttpRequest}catch(b){}}function $c(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}m.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return m.globalEval(a),a}}}),m.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),m.ajaxTransport("script",function(a){if(a.crossDomain){var b,c=y.head||m("head")[0]||y.documentElement;return{send:function(d,e){b=y.createElement("script"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||e(200,"success"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var _c=[],ad=/(=)\?(?=&|$)|\?\?/;m.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=_c.pop()||m.expando+"_"+vc++;return this[a]=!0,a}}),m.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(ad.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&ad.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=m.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(ad,"$1"+e):b.jsonp!==!1&&(b.url+=(wc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||m.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,_c.push(e)),g&&m.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),m.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||y;var d=u.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=m.buildFragment([a],b,e),e&&e.length&&m(e).remove(),m.merge([],d.childNodes))};var bd=m.fn.load;m.fn.load=function(a,b,c){if("string"!=typeof a&&bd)return bd.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=m.trim(a.slice(h,a.length)),a=a.slice(0,h)),m.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(f="POST"),g.length>0&&m.ajax({url:a,type:f,dataType:"html",data:b}).done(function(a){e=arguments,g.html(d?m("<div>").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,"position"),l=m(a),n={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=m.css(a,"top"),i=m.css(a,"left"),j=("absolute"===k||"fixed"===k)&&m.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),"using"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===m.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],"html")||(c=a.offset()),c.top+=m.css(a[0],"borderTopWidth",!0),c.left+=m.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-m.css(d,"marginTop",!0),left:b.left-c.left-m.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,"html")&&"static"===m.css(a,"position"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each(["top","left"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+"px":c):void 0})}),m.each({Height:"height",Width:"width"},function(a,b){m.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m}); \ No newline at end of file
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.tablesorter.min.js b/contrib/python/coverage/py2/coverage/htmlfiles/jquery.tablesorter.min.js
deleted file mode 100644
index 64c7007129..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/jquery.tablesorter.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-
-(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery); \ No newline at end of file
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/keybd_closed.png b/contrib/python/coverage/py2/coverage/htmlfiles/keybd_closed.png
deleted file mode 100644
index db114023f0..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/keybd_closed.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/keybd_open.png b/contrib/python/coverage/py2/coverage/htmlfiles/keybd_open.png
deleted file mode 100644
index db114023f0..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/keybd_open.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/pyfile.html b/contrib/python/coverage/py2/coverage/htmlfiles/pyfile.html
deleted file mode 100644
index e15be066fb..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/pyfile.html
+++ /dev/null
@@ -1,113 +0,0 @@
-{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
-{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
-
-<!DOCTYPE html>
-<html>
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
- {# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #}
- {# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #}
- <meta http-equiv="X-UA-Compatible" content="IE=emulateIE7" />
- <title>Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}%</title>
- <link rel="icon" sizes="32x32" href="favicon_32.png">
- <link rel="stylesheet" href="style.css" type="text/css">
- {% if extra_css %}
- <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
- {% endif %}
- <script type="text/javascript" src="jquery.min.js"></script>
- <script type="text/javascript" src="jquery.hotkeys.js"></script>
- <script type="text/javascript" src="jquery.isonscreen.js"></script>
- <script type="text/javascript" src="coverage_html.js"></script>
- <script type="text/javascript">
- jQuery(document).ready(coverage.pyfile_ready);
- </script>
-</head>
-<body class="pyfile">
-
-<div id="header">
- <div class="content">
- <h1>Coverage for <b>{{relative_filename|escape}}</b> :
- <span class="pc_cov">{{nums.pc_covered_str}}%</span>
- </h1>
-
- <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
-
- <h2 class="stats">
- {{nums.n_statements}} statements &nbsp;
- <button type="button" class="{{category.run}} shortkey_r button_toggle_run" title="Toggle lines run">{{nums.n_executed}} run</button>
- <button type="button" class="{{category.mis}} shortkey_m button_toggle_mis" title="Toggle lines missing">{{nums.n_missing}} missing</button>
- <button type="button" class="{{category.exc}} shortkey_x button_toggle_exc" title="Toggle lines excluded">{{nums.n_excluded}} excluded</button>
-
- {% if has_arcs %}
- <button type="button" class="{{category.par}} shortkey_p button_toggle_par" title="Toggle lines partially run">{{nums.n_partial_branches}} partial</button>
- {% endif %}
- </h2>
- </div>
-</div>
-
-<div class="help_panel">
- <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
- <p class="legend">Hot-keys on this page</p>
- <div>
- <p class="keyhelp">
- <span class="key">r</span>
- <span class="key">m</span>
- <span class="key">x</span>
- <span class="key">p</span> &nbsp; toggle line displays
- </p>
- <p class="keyhelp">
- <span class="key">j</span>
- <span class="key">k</span> &nbsp; next/prev highlighted chunk
- </p>
- <p class="keyhelp">
- <span class="key">0</span> &nbsp; (zero) top of page
- </p>
- <p class="keyhelp">
- <span class="key">1</span> &nbsp; (one) first highlighted chunk
- </p>
- </div>
-</div>
-
-<div id="source">
- {% for line in lines -%}
- {% joined %}
- <p id="t{{line.number}}" class="{{line.css_class}}">
- <span class="n"><a href="#t{{line.number}}">{{line.number}}</a></span>
- <span class="t">{{line.html}}&nbsp;</span>
- {% if line.context_list %}
- <input type="checkbox" id="ctxs{{line.number}}" />
- {% endif %}
- {# Things that should float right in the line. #}
- <span class="r">
- {% if line.annotate %}
- <span class="annotate short">{{line.annotate}}</span>
- <span class="annotate long">{{line.annotate_long}}</span>
- {% endif %}
- {% if line.contexts %}
- <label for="ctxs{{line.number}}" class="ctx">{{ line.contexts_label }}</label>
- {% endif %}
- </span>
- {# Things that should appear below the line. #}
- {% if line.context_list %}
- <span class="ctxs">
- {% for context in line.context_list %}
- <span>{{context}}</span>
- {% endfor %}
- </span>
- {% endif %}
- </p>
- {% endjoined %}
- {% endfor %}
-</div>
-
-<div id="footer">
- <div class="content">
- <p>
- <a class="nav" href="index.html">&#xab; index</a> &nbsp; &nbsp; <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
- created at {{ time_stamp }}
- </p>
- </div>
-</div>
-
-</body>
-</html>
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/style.css b/contrib/python/coverage/py2/coverage/htmlfiles/style.css
deleted file mode 100644
index 36ee2a6e65..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/style.css
+++ /dev/null
@@ -1,291 +0,0 @@
-@charset "UTF-8";
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-/* Don't edit this .css file. Edit the .scss file instead! */
-html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; }
-
-body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; }
-
-@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } }
-
-@media (prefers-color-scheme: dark) { body { color: #eee; } }
-
-html > body { font-size: 16px; }
-
-a:active, a:focus { outline: 2px dashed #007acc; }
-
-p { font-size: .875em; line-height: 1.4em; }
-
-table { border-collapse: collapse; }
-
-td { vertical-align: top; }
-
-table tr.hidden { display: none !important; }
-
-p#no_rows { display: none; font-size: 1.2em; }
-
-a.nav { text-decoration: none; color: inherit; }
-
-a.nav:hover { text-decoration: underline; color: inherit; }
-
-#header { background: #f8f8f8; width: 100%; border-bottom: 1px solid #eee; }
-
-@media (prefers-color-scheme: dark) { #header { background: black; } }
-
-@media (prefers-color-scheme: dark) { #header { border-color: #333; } }
-
-.indexfile #footer { margin: 1rem 3.5rem; }
-
-.pyfile #footer { margin: 1rem 1rem; }
-
-#footer .content { padding: 0; color: #666; font-style: italic; }
-
-@media (prefers-color-scheme: dark) { #footer .content { color: #aaa; } }
-
-#index { margin: 1rem 0 0 3.5rem; }
-
-#header .content { padding: 1rem 3.5rem; }
-
-h1 { font-size: 1.25em; display: inline-block; }
-
-#filter_container { float: right; margin: 0 2em 0 0; }
-
-#filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; }
-
-@media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } }
-
-@media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } }
-
-@media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } }
-
-#filter_container input:focus { border-color: #007acc; }
-
-h2.stats { margin-top: .5em; font-size: 1em; }
-
-.stats button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
-
-@media (prefers-color-scheme: dark) { .stats button { border-color: #444; } }
-
-.stats button:active, .stats button:focus { outline: 2px dashed #007acc; }
-
-.stats button:active, .stats button:focus { outline: 2px dashed #007acc; }
-
-.stats button.run { background: #eeffee; }
-
-@media (prefers-color-scheme: dark) { .stats button.run { background: #373d29; } }
-
-.stats button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.run.show_run { background: #373d29; } }
-
-.stats button.mis { background: #ffeeee; }
-
-@media (prefers-color-scheme: dark) { .stats button.mis { background: #4b1818; } }
-
-.stats button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.mis.show_mis { background: #4b1818; } }
-
-.stats button.exc { background: #f7f7f7; }
-
-@media (prefers-color-scheme: dark) { .stats button.exc { background: #333; } }
-
-.stats button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.exc.show_exc { background: #333; } }
-
-.stats button.par { background: #ffffd5; }
-
-@media (prefers-color-scheme: dark) { .stats button.par { background: #650; } }
-
-.stats button.par.show_par { background: #ffa; border: 2px solid #dddd00; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.par.show_par { background: #650; } }
-
-.help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; }
-
-#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; }
-
-#keyboard_icon { float: right; margin: 5px; cursor: pointer; }
-
-.help_panel { padding: .5em; border: 1px solid #883; }
-
-.help_panel .legend { font-style: italic; margin-bottom: 1em; }
-
-.indexfile .help_panel { width: 20em; min-height: 4em; }
-
-.pyfile .help_panel { width: 16em; min-height: 8em; }
-
-#panel_icon { float: right; cursor: pointer; }
-
-.keyhelp { margin: .75em; }
-
-.keyhelp .key { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; }
-
-#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; }
-
-#source p { position: relative; white-space: pre; }
-
-#source p * { box-sizing: border-box; }
-
-#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; }
-
-@media (prefers-color-scheme: dark) { #source p .n { color: #777; } }
-
-#source p .n a { text-decoration: none; color: #999; }
-
-@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
-
-#source p .n a:hover { text-decoration: underline; color: #999; }
-
-@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } }
-
-#source p.highlight .n { background: #ffdd00; }
-
-#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; }
-
-@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } }
-
-#source p .t:hover { background: #f2f2f2; }
-
-@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } }
-
-#source p .t:hover ~ .r .annotate.long { display: block; }
-
-#source p .t .com { color: #008000; font-style: italic; line-height: 1px; }
-
-@media (prefers-color-scheme: dark) { #source p .t .com { color: #6A9955; } }
-
-#source p .t .key { font-weight: bold; line-height: 1px; }
-
-#source p .t .str { color: #0451A5; }
-
-@media (prefers-color-scheme: dark) { #source p .t .str { color: #9CDCFE; } }
-
-#source p.mis .t { border-left: 0.2em solid #ff0000; }
-
-#source p.mis.show_mis .t { background: #fdd; }
-
-@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } }
-
-#source p.mis.show_mis .t:hover { background: #f2d2d2; }
-
-@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } }
-
-#source p.run .t { border-left: 0.2em solid #00dd00; }
-
-#source p.run.show_run .t { background: #dfd; }
-
-@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } }
-
-#source p.run.show_run .t:hover { background: #d2f2d2; }
-
-@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } }
-
-#source p.exc .t { border-left: 0.2em solid #808080; }
-
-#source p.exc.show_exc .t { background: #eee; }
-
-@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } }
-
-#source p.exc.show_exc .t:hover { background: #e2e2e2; }
-
-@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } }
-
-#source p.par .t { border-left: 0.2em solid #dddd00; }
-
-#source p.par.show_par .t { background: #ffa; }
-
-@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } }
-
-#source p.par.show_par .t:hover { background: #f2f2a2; }
-
-@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } }
-
-#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; }
-
-#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; }
-
-@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } }
-
-#source p .annotate.short:hover ~ .long { display: block; }
-
-#source p .annotate.long { width: 30em; right: 2.5em; }
-
-#source p input { display: none; }
-
-#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; }
-
-#source p input ~ .r label.ctx::before { content: "â–¶ "; }
-
-#source p input ~ .r label.ctx:hover { background: #d5f7ff; color: #666; }
-
-@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } }
-
-@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } }
-
-#source p input:checked ~ .r label.ctx { background: #aef; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; }
-
-@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } }
-
-@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } }
-
-#source p input:checked ~ .r label.ctx::before { content: "â–¼ "; }
-
-#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; }
-
-#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; }
-
-@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } }
-
-#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #aef; border-radius: .25em; margin-right: 1.75em; }
-
-@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } }
-
-#source p .ctxs span { display: block; text-align: right; }
-
-#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; }
-
-#index table.index { margin-left: -.5em; }
-
-#index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; }
-
-@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } }
-
-#index td.name, #index th.name { text-align: left; width: auto; }
-
-#index th { font-style: italic; color: #333; cursor: pointer; }
-
-@media (prefers-color-scheme: dark) { #index th { color: #ddd; } }
-
-#index th:hover { background: #eee; }
-
-@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } }
-
-#index th.headerSortDown, #index th.headerSortUp { white-space: nowrap; background: #eee; }
-
-@media (prefers-color-scheme: dark) { #index th.headerSortDown, #index th.headerSortUp { background: #333; } }
-
-#index th.headerSortDown:after { content: " ↑"; }
-
-#index th.headerSortUp:after { content: " ↓"; }
-
-#index td.name a { text-decoration: none; color: inherit; }
-
-#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; }
-
-#index tr.file:hover { background: #eee; }
-
-@media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } }
-
-#index tr.file:hover td.name { text-decoration: underline; color: inherit; }
-
-#scroll_marker { position: fixed; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; }
-
-@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } }
-
-@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } }
-
-#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; }
-
-@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } }
diff --git a/contrib/python/coverage/py2/coverage/htmlfiles/style.scss b/contrib/python/coverage/py2/coverage/htmlfiles/style.scss
deleted file mode 100644
index 158d1fb493..0000000000
--- a/contrib/python/coverage/py2/coverage/htmlfiles/style.scss
+++ /dev/null
@@ -1,660 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-// CSS styles for coverage.py HTML reports.
-
-// When you edit this file, you need to run "make css" to get the CSS file
-// generated, and then check in both the .scss and the .css files.
-
-// When working on the file, this command is useful:
-// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css
-//
-// OR you can process sass purely in python with `pip install pysass`, then:
-// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css
-
-// Ignore this comment, it's for the CSS output file:
-/* Don't edit this .css file. Edit the .scss file instead! */
-
-// Dimensions
-$left-gutter: 3.5rem;
-
-
-//
-// Declare colors and variables
-//
-
-$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
-$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace;
-
-$off-button-lighten: 50%;
-$hover-dark-amt: 95%;
-
-$focus-color: #007acc;
-
-$mis-color: #ff0000;
-$run-color: #00dd00;
-$exc-color: #808080;
-$par-color: #dddd00;
-
-$light-bg: #fff;
-$light-fg: #000;
-$light-gray1: #f8f8f8;
-$light-gray2: #eee;
-$light-gray3: #ccc;
-$light-gray4: #999;
-$light-gray5: #666;
-$light-gray6: #333;
-$light-pln-bg: $light-bg;
-$light-mis-bg: #fdd;
-$light-run-bg: #dfd;
-$light-exc-bg: $light-gray2;
-$light-par-bg: #ffa;
-$light-token-com: #008000;
-$light-token-str: #0451A5;
-$light-context-bg-color: #aef;
-
-$dark-bg: #1e1e1e;
-$dark-fg: #eee;
-$dark-gray1: #222;
-$dark-gray2: #333;
-$dark-gray3: #444;
-$dark-gray4: #777;
-$dark-gray5: #aaa;
-$dark-gray6: #ddd;
-$dark-pln-bg: $dark-bg;
-$dark-mis-bg: #4b1818;
-$dark-run-bg: #373d29;
-$dark-exc-bg: $dark-gray2;
-$dark-par-bg: #650;
-$dark-token-com: #6A9955;
-$dark-token-str: #9CDCFE;
-$dark-context-bg-color: #056;
-
-//
-// Mixins and utilities
-//
-@mixin background-dark($color) {
- @media (prefers-color-scheme: dark) {
- background: $color;
- }
-}
-@mixin color-dark($color) {
- @media (prefers-color-scheme: dark) {
- color: $color;
- }
-}
-@mixin border-color-dark($color) {
- @media (prefers-color-scheme: dark) {
- border-color: $color;
- }
-}
-
-// Add visual outline to navigable elements on focus improve accessibility.
-@mixin focus-border {
- &:active, &:focus {
- outline: 2px dashed $focus-color;
- }
-}
-
-// Page-wide styles
-html, body, h1, h2, h3, p, table, td, th {
- margin: 0;
- padding: 0;
- border: 0;
- font-weight: inherit;
- font-style: inherit;
- font-size: 100%;
- font-family: inherit;
- vertical-align: baseline;
-}
-
-// Set baseline grid to 16 pt.
-body {
- font-family: $font-normal;
- font-size: 1em;
- background: $light-bg;
- color: $light-fg;
- @include background-dark($dark-bg);
- @include color-dark($dark-fg);
-}
-
-html>body {
- font-size: 16px;
-}
-
-a {
- @include focus-border;
-}
-
-p {
- font-size: .875em;
- line-height: 1.4em;
-}
-
-table {
- border-collapse: collapse;
-}
-td {
- vertical-align: top;
-}
-table tr.hidden {
- display: none !important;
-}
-
-p#no_rows {
- display: none;
- font-size: 1.2em;
-}
-
-a.nav {
- text-decoration: none;
- color: inherit;
-
- &:hover {
- text-decoration: underline;
- color: inherit;
- }
-}
-
-// Page structure
-#header {
- background: $light-gray1;
- @include background-dark(black);
- width: 100%;
- border-bottom: 1px solid $light-gray2;
- @include border-color-dark($dark-gray2);
-}
-
-.indexfile #footer {
- margin: 1rem $left-gutter;
-}
-
-.pyfile #footer {
- margin: 1rem 1rem;
-}
-
-#footer .content {
- padding: 0;
- color: $light-gray5;
- @include color-dark($dark-gray5);
- font-style: italic;
-}
-
-#index {
- margin: 1rem 0 0 $left-gutter;
-}
-
-// Header styles
-#header .content {
- padding: 1rem $left-gutter;
-}
-
-h1 {
- font-size: 1.25em;
- display: inline-block;
-}
-
-#filter_container {
- float: right;
- margin: 0 2em 0 0;
-
- input {
- width: 10em;
- padding: 0.2em 0.5em;
- border: 2px solid $light-gray3;
- background: $light-bg;
- color: $light-fg;
- @include border-color-dark($dark-gray3);
- @include background-dark($dark-bg);
- @include color-dark($dark-fg);
- &:focus {
- border-color: $focus-color;
- }
- }
-}
-
-h2.stats {
- margin-top: .5em;
- font-size: 1em;
-}
-.stats button {
- font-family: inherit;
- font-size: inherit;
- border: 1px solid;
- border-radius: .2em;
- color: inherit;
- padding: .1em .5em;
- margin: 1px calc(.1em + 1px);
- cursor: pointer;
- border-color: $light-gray3;
- @include border-color-dark($dark-gray3);
- @include focus-border;
-
- @include focus-border;
-
- &.run {
- background: mix($light-run-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-run-bg);
- &.show_run {
- background: $light-run-bg;
- @include background-dark($dark-run-bg);
- border: 2px solid $run-color;
- margin: 0 .1em;
- }
- }
- &.mis {
- background: mix($light-mis-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-mis-bg);
- &.show_mis {
- background: $light-mis-bg;
- @include background-dark($dark-mis-bg);
- border: 2px solid $mis-color;
- margin: 0 .1em;
- }
- }
- &.exc {
- background: mix($light-exc-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-exc-bg);
- &.show_exc {
- background: $light-exc-bg;
- @include background-dark($dark-exc-bg);
- border: 2px solid $exc-color;
- margin: 0 .1em;
- }
- }
- &.par {
- background: mix($light-par-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-par-bg);
- &.show_par {
- background: $light-par-bg;
- @include background-dark($dark-par-bg);
- border: 2px solid $par-color;
- margin: 0 .1em;
- }
- }
-}
-
-// Yellow post-it things.
-%popup {
- display: none;
- position: absolute;
- z-index: 999;
- background: #ffffcc;
- border: 1px solid #888;
- border-radius: .2em;
- color: #333;
- padding: .25em .5em;
-}
-
-// Yellow post-it's in the text listings.
-%in-text-popup {
- @extend %popup;
- white-space: normal;
- float: right;
- top: 1.75em;
- right: 1em;
- height: auto;
-}
-
-// Help panel
-#keyboard_icon {
- float: right;
- margin: 5px;
- cursor: pointer;
-}
-
-.help_panel {
- @extend %popup;
- padding: .5em;
- border: 1px solid #883;
-
- .legend {
- font-style: italic;
- margin-bottom: 1em;
- }
-
- .indexfile & {
- width: 20em;
- min-height: 4em;
- }
-
- .pyfile & {
- width: 16em;
- min-height: 8em;
- }
-}
-
-#panel_icon {
- float: right;
- cursor: pointer;
-}
-
-.keyhelp {
- margin: .75em;
-
- .key {
- border: 1px solid black;
- border-color: #888 #333 #333 #888;
- padding: .1em .35em;
- font-family: $font-code;
- font-weight: bold;
- background: #eee;
- }
-}
-
-// Source file styles
-
-// The slim bar at the left edge of the source lines, colored by coverage.
-$border-indicator-width: .2em;
-
-#source {
- padding: 1em 0 1em $left-gutter;
- font-family: $font-code;
-
- p {
- // position relative makes position:absolute pop-ups appear in the right place.
- position: relative;
- white-space: pre;
-
- * {
- box-sizing: border-box;
- }
-
- .n {
- float: left;
- text-align: right;
- width: $left-gutter;
- box-sizing: border-box;
- margin-left: -$left-gutter;
- padding-right: 1em;
- color: $light-gray4;
- @include color-dark($dark-gray4);
-
- a {
- text-decoration: none;
- color: $light-gray4;
- @include color-dark($dark-gray4);
- &:hover {
- text-decoration: underline;
- color: $light-gray4;
- @include color-dark($dark-gray4);
- }
- }
- }
-
- &.highlight .n {
- background: #ffdd00;
- }
-
- .t {
- display: inline-block;
- width: 100%;
- box-sizing: border-box;
- margin-left: -.5em;
- padding-left: .5em - $border-indicator-width;
- border-left: $border-indicator-width solid $light-bg;
- @include border-color-dark($dark-bg);
-
- &:hover {
- background: mix($light-pln-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt));
-
- & ~ .r .annotate.long {
- display: block;
- }
- }
-
- // Syntax coloring
- .com {
- color: $light-token-com;
- @include color-dark($dark-token-com);
- font-style: italic;
- line-height: 1px;
- }
- .key {
- font-weight: bold;
- line-height: 1px;
- }
- .str {
- color: $light-token-str;
- @include color-dark($dark-token-str);
- }
- }
-
- &.mis {
- .t {
- border-left: $border-indicator-width solid $mis-color;
- }
-
- &.show_mis .t {
- background: $light-mis-bg;
- @include background-dark($dark-mis-bg);
-
- &:hover {
- background: mix($light-mis-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt));
- }
- }
- }
-
- &.run {
- .t {
- border-left: $border-indicator-width solid $run-color;
- }
-
- &.show_run .t {
- background: $light-run-bg;
- @include background-dark($dark-run-bg);
-
- &:hover {
- background: mix($light-run-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt));
- }
- }
- }
-
- &.exc {
- .t {
- border-left: $border-indicator-width solid $exc-color;
- }
-
- &.show_exc .t {
- background: $light-exc-bg;
- @include background-dark($dark-exc-bg);
-
- &:hover {
- background: mix($light-exc-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt));
- }
- }
- }
-
- &.par {
- .t {
- border-left: $border-indicator-width solid $par-color;
- }
-
- &.show_par .t {
- background: $light-par-bg;
- @include background-dark($dark-par-bg);
-
- &:hover {
- background: mix($light-par-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt));
- }
- }
-
- }
-
- .r {
- position: absolute;
- top: 0;
- right: 2.5em;
- font-family: $font-normal;
- }
-
- .annotate {
- font-family: $font-normal;
- color: $light-gray5;
- @include color-dark($dark-gray6);
- padding-right: .5em;
-
- &.short:hover ~ .long {
- display: block;
- }
-
- &.long {
- @extend %in-text-popup;
- width: 30em;
- right: 2.5em;
- }
- }
-
- input {
- display: none;
-
- & ~ .r label.ctx {
- cursor: pointer;
- border-radius: .25em;
- &::before {
- content: "â–¶ ";
- }
- &:hover {
- background: mix($light-context-bg-color, $light-bg, $off-button-lighten);
- @include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten));
- color: $light-gray5;
- @include color-dark($dark-gray5);
- }
- }
-
- &:checked ~ .r label.ctx {
- background: $light-context-bg-color;
- @include background-dark($dark-context-bg-color);
- color: $light-gray5;
- @include color-dark($dark-gray5);
- border-radius: .75em .75em 0 0;
- padding: 0 .5em;
- margin: -.25em 0;
- &::before {
- content: "â–¼ ";
- }
- }
-
- &:checked ~ .ctxs {
- padding: .25em .5em;
- overflow-y: scroll;
- max-height: 10.5em;
- }
- }
-
- label.ctx {
- color: $light-gray4;
- @include color-dark($dark-gray4);
- display: inline-block;
- padding: 0 .5em;
- font-size: .8333em; // 10/12
- }
-
- .ctxs {
- display: block;
- max-height: 0;
- overflow-y: hidden;
- transition: all .2s;
- padding: 0 .5em;
- font-family: $font-normal;
- white-space: nowrap;
- background: $light-context-bg-color;
- @include background-dark($dark-context-bg-color);
- border-radius: .25em;
- margin-right: 1.75em;
- span {
- display: block;
- text-align: right;
- }
- }
- }
-}
-
-
-// index styles
-#index {
- font-family: $font-code;
- font-size: 0.875em;
-
- table.index {
- margin-left: -.5em;
- }
- td, th {
- text-align: right;
- width: 5em;
- padding: .25em .5em;
- border-bottom: 1px solid $light-gray2;
- @include border-color-dark($dark-gray2);
- &.name {
- text-align: left;
- width: auto;
- }
- }
- th {
- font-style: italic;
- color: $light-gray6;
- @include color-dark($dark-gray6);
- cursor: pointer;
- &:hover {
- background: $light-gray2;
- @include background-dark($dark-gray2);
- }
- &.headerSortDown, &.headerSortUp {
- white-space: nowrap;
- background: $light-gray2;
- @include background-dark($dark-gray2);
- }
- &.headerSortDown:after {
- content: " ↑";
- }
- &.headerSortUp:after {
- content: " ↓";
- }
- }
- td.name a {
- text-decoration: none;
- color: inherit;
- }
-
- tr.total td,
- tr.total_dynamic td {
- font-weight: bold;
- border-top: 1px solid #ccc;
- border-bottom: none;
- }
- tr.file:hover {
- background: $light-gray2;
- @include background-dark($dark-gray2);
- td.name {
- text-decoration: underline;
- color: inherit;
- }
- }
-}
-
-// scroll marker styles
-#scroll_marker {
- position: fixed;
- right: 0;
- top: 0;
- width: 16px;
- height: 100%;
- background: $light-bg;
- border-left: 1px solid $light-gray2;
- @include background-dark($dark-bg);
- @include border-color-dark($dark-gray2);
- will-change: transform; // for faster scrolling of fixed element in Chrome
-
- .marker {
- background: $light-gray3;
- @include background-dark($dark-gray3);
- position: absolute;
- min-height: 3px;
- width: 100%;
- }
-}
diff --git a/contrib/python/coverage/py2/coverage/inorout.py b/contrib/python/coverage/py2/coverage/inorout.py
deleted file mode 100644
index cbc80e8fb5..0000000000
--- a/contrib/python/coverage/py2/coverage/inorout.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Determining whether files are being measured/reported or not."""
-
-# For finding the stdlib
-import atexit
-import inspect
-import itertools
-import os
-import platform
-import re
-import sys
-import traceback
-
-from coverage import env
-from coverage.backward import code_object
-from coverage.disposition import FileDisposition, disposition_init
-from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher
-from coverage.files import prep_patterns, find_python_files, canonical_filename
-from coverage.misc import CoverageException
-from coverage.python import source_for_file, source_for_morf
-
-
-# Pypy has some unusual stuff in the "stdlib". Consider those locations
-# when deciding where the stdlib is. These modules are not used for anything,
-# they are modules importable from the pypy lib directories, so that we can
-# find those directories.
-_structseq = _pypy_irc_topic = None
-if env.PYPY:
- try:
- import _structseq
- except ImportError:
- pass
-
- try:
- import _pypy_irc_topic
- except ImportError:
- pass
-
-
-def canonical_path(morf, directory=False):
- """Return the canonical path of the module or file `morf`.
-
- If the module is a package, then return its directory. If it is a
- module, then return its file, unless `directory` is True, in which
- case return its enclosing directory.
-
- """
- morf_path = canonical_filename(source_for_morf(morf))
- if morf_path.endswith("__init__.py") or directory:
- morf_path = os.path.split(morf_path)[0]
- return morf_path
-
-
-def name_for_module(filename, frame):
- """Get the name of the module for a filename and frame.
-
- For configurability's sake, we allow __main__ modules to be matched by
- their importable name.
-
- If loaded via runpy (aka -m), we can usually recover the "original"
- full dotted module name, otherwise, we resort to interpreting the
- file name to get the module's name. In the case that the module name
- can't be determined, None is returned.
-
- """
- module_globals = frame.f_globals if frame is not None else {}
- if module_globals is None: # pragma: only ironpython
- # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
- module_globals = {}
-
- dunder_name = module_globals.get('__name__', None)
-
- if isinstance(dunder_name, str) and dunder_name != '__main__':
- # This is the usual case: an imported module.
- return dunder_name
-
- loader = module_globals.get('__loader__', None)
- for attrname in ('fullname', 'name'): # attribute renamed in py3.2
- if hasattr(loader, attrname):
- fullname = getattr(loader, attrname)
- else:
- continue
-
- if isinstance(fullname, str) and fullname != '__main__':
- # Module loaded via: runpy -m
- return fullname
-
- # Script as first argument to Python command line.
- inspectedname = inspect.getmodulename(filename)
- if inspectedname is not None:
- return inspectedname
- else:
- return dunder_name
-
-
-def module_is_namespace(mod):
- """Is the module object `mod` a PEP420 namespace module?"""
- return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
-
-
-def module_has_file(mod):
- """Does the module object `mod` have an existing __file__ ?"""
- mod__file__ = getattr(mod, '__file__', None)
- if mod__file__ is None:
- return False
- return os.path.exists(mod__file__)
-
-
-class InOrOut(object):
- """Machinery for determining what files to measure."""
-
- def __init__(self, warn, debug):
- self.warn = warn
- self.debug = debug
-
- # The matchers for should_trace.
- self.source_match = None
- self.source_pkgs_match = None
- self.pylib_paths = self.cover_paths = None
- self.pylib_match = self.cover_match = None
- self.include_match = self.omit_match = None
- self.plugins = []
- self.disp_class = FileDisposition
-
- # The source argument can be directories or package names.
- self.source = []
- self.source_pkgs = []
- self.source_pkgs_unmatched = []
- self.omit = self.include = None
-
- def configure(self, config):
- """Apply the configuration to get ready for decision-time."""
- self.config = config
- self.source_pkgs.extend(config.source_pkgs)
- for src in config.source or []:
- if os.path.isdir(src):
- self.source.append(canonical_filename(src))
- else:
- self.source_pkgs.append(src)
- self.source_pkgs_unmatched = self.source_pkgs[:]
-
- self.omit = prep_patterns(config.run_omit)
- if getattr(sys, 'is_standalone_binary', False):
- # don't trace contrib
- self.omit.append('contrib/python/*')
- self.omit.append('contrib/libs/protobuf/*')
- self.omit.append('library/python/pytest/*')
- self.include = prep_patterns(config.run_include)
-
- # The directories for files considered "installed with the interpreter".
- self.pylib_paths = set()
- if getattr(sys, 'is_standalone_binary', False):
- self.pylib_paths.add('contrib/tools/python')
- self.pylib_paths.add('contrib/tools/python3')
- if not self.pylib_paths and not config.cover_pylib:
- # Look at where some standard modules are located. That's the
- # indication for "installed with the interpreter". In some
- # environments (virtualenv, for example), these modules may be
- # spread across a few locations. Look at all the candidate modules
- # we've imported, and take all the different ones.
- for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback):
- if m is not None and hasattr(m, "__file__"):
- self.pylib_paths.add(canonical_path(m, directory=True))
-
- if _structseq and not hasattr(_structseq, '__file__'):
- # PyPy 2.4 has no __file__ in the builtin modules, but the code
- # objects still have the file names. So dig into one to find
- # the path to exclude. The "filename" might be synthetic,
- # don't be fooled by those.
- structseq_file = code_object(_structseq.structseq_new).co_filename
- if not structseq_file.startswith("<"):
- self.pylib_paths.add(canonical_path(structseq_file))
-
- # To avoid tracing the coverage.py code itself, we skip anything
- # located where we are.
- if getattr(sys, 'is_standalone_binary', False):
- self.cover_paths = ["contrib/python/coverage"]
- else:
- self.cover_paths = [canonical_path(__file__, directory=True)]
- if env.TESTING:
- # Don't include our own test code.
- self.cover_paths.append(os.path.join(self.cover_paths[0], "tests"))
-
- # When testing, we use PyContracts, which should be considered
- # part of coverage.py, and it uses six. Exclude those directories
- # just as we exclude ourselves.
- import contracts
- import six
- for mod in [contracts, six]:
- self.cover_paths.append(canonical_path(mod))
-
- def debug(msg):
- if self.debug:
- self.debug.write(msg)
-
- # Create the matchers we need for should_trace
- if self.source or self.source_pkgs:
- against = []
- if self.source:
- self.source_match = TreeMatcher(self.source)
- against.append("trees {!r}".format(self.source_match))
- if self.source_pkgs:
- self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
- against.append("modules {!r}".format(self.source_pkgs_match))
- debug("Source matching against " + " and ".join(against))
- else:
- if self.cover_paths:
- self.cover_match = TreeMatcher(self.cover_paths)
- debug("Coverage code matching: {!r}".format(self.cover_match))
- if self.pylib_paths:
- self.pylib_match = TreeMatcher(self.pylib_paths)
- debug("Python stdlib matching: {!r}".format(self.pylib_match))
- if self.include:
- self.include_match = FnmatchMatcher(self.include)
- debug("Include matching: {!r}".format(self.include_match))
- if self.omit:
- self.omit_match = FnmatchMatcher(self.omit)
- debug("Omit matching: {!r}".format(self.omit_match))
-
- def should_trace(self, filename, frame=None):
- """Decide whether to trace execution in `filename`, with a reason.
-
- This function is called from the trace function. As each new file name
- is encountered, this function determines whether it is traced or not.
-
- Returns a FileDisposition object.
-
- """
- original_filename = filename
- disp = disposition_init(self.disp_class, filename)
-
- def nope(disp, reason):
- """Simple helper to make it easy to return NO."""
- disp.trace = False
- disp.reason = reason
- return disp
-
- if frame is not None:
- # Compiled Python files have two file names: frame.f_code.co_filename is
- # the file name at the time the .pyc was compiled. The second name is
- # __file__, which is where the .pyc was actually loaded from. Since
- # .pyc files can be moved after compilation (for example, by being
- # installed), we look for __file__ in the frame and prefer it to the
- # co_filename value.
- dunder_file = frame.f_globals and frame.f_globals.get('__file__')
- if dunder_file:
- filename = source_for_file(dunder_file)
- if original_filename and not original_filename.startswith('<'):
- orig = os.path.basename(original_filename)
- if orig != os.path.basename(filename):
- # Files shouldn't be renamed when moved. This happens when
- # exec'ing code. If it seems like something is wrong with
- # the frame's file name, then just use the original.
- filename = original_filename
-
- if not filename:
- # Empty string is pretty useless.
- return nope(disp, "empty string isn't a file name")
-
- if filename.startswith('memory:'):
- return nope(disp, "memory isn't traceable")
-
- if filename.startswith('<'):
- # Lots of non-file execution is represented with artificial
- # file names like "<string>", "<doctest readme.txt[0]>", or
- # "<exec_function>". Don't ever trace these executions, since we
- # can't do anything with the data later anyway.
- return nope(disp, "not a real file name")
-
- # pyexpat does a dumb thing, calling the trace function explicitly from
- # C code with a C file name.
- if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
- return nope(disp, "pyexpat lies about itself")
-
- # Jython reports the .class file to the tracer, use the source file.
- if filename.endswith("$py.class"):
- filename = filename[:-9] + ".py"
-
- # XXX maybe we need to support both at the same time?
- # Don't trace modules imported from environment in standalone mode
- if getattr(sys, 'is_standalone_binary', False) and filename.startswith("/"):
- return nope(disp, "skip modules from environment")
-
- canonical = canonical_filename(filename)
- disp.canonical_filename = canonical
-
- # Try the plugins, see if they have an opinion about the file.
- plugin = None
- for plugin in self.plugins.file_tracers:
- if not plugin._coverage_enabled:
- continue
-
- try:
- file_tracer = plugin.file_tracer(canonical)
- if file_tracer is not None:
- file_tracer._coverage_plugin = plugin
- disp.trace = True
- disp.file_tracer = file_tracer
- if file_tracer.has_dynamic_source_filename():
- disp.has_dynamic_filename = True
- else:
- disp.source_filename = canonical_filename(
- file_tracer.source_filename()
- )
- break
- except Exception:
- if not self.config.suppress_plugin_errors:
- raise
- self.warn(
- "Disabling plug-in %r due to an exception:" % (plugin._coverage_plugin_name)
- )
- traceback.print_exc()
- plugin._coverage_enabled = False
- continue
- else:
- # No plugin wanted it: it's Python.
- disp.trace = True
- disp.source_filename = canonical
-
- if not disp.has_dynamic_filename:
- if not disp.source_filename:
- raise CoverageException(
- "Plugin %r didn't set source_filename for %r" %
- (plugin, disp.original_filename)
- )
- reason = self.check_include_omit_etc(disp.source_filename, frame)
- if reason:
- nope(disp, reason)
-
- return disp
-
- def check_include_omit_etc(self, filename, frame):
- """Check a file name against the include, omit, etc, rules.
-
- Returns a string or None. String means, don't trace, and is the reason
- why. None means no reason found to not trace.
-
- """
- modulename = name_for_module(filename, frame)
-
- # If the user specified source or include, then that's authoritative
- # about the outer bound of what to measure and we don't have to apply
- # any canned exclusions. If they didn't, then we have to exclude the
- # stdlib and coverage.py directories.
- if self.source_match or self.source_pkgs_match:
- extra = ""
- ok = False
- if self.source_pkgs_match:
- if self.source_pkgs_match.match(modulename):
- ok = True
- if modulename in self.source_pkgs_unmatched:
- self.source_pkgs_unmatched.remove(modulename)
- else:
- extra = "module {!r} ".format(modulename)
- if not ok and self.source_match:
- if self.source_match.match(filename):
- ok = True
- if not ok:
- return extra + "falls outside the --source spec"
- elif self.include_match:
- if not self.include_match.match(filename):
- return "falls outside the --include trees"
- else:
- # If we aren't supposed to trace installed code, then check if this
- # is near the Python standard library and skip it if so.
- if self.pylib_match and self.pylib_match.match(filename):
- return "is in the stdlib"
-
- # We exclude the coverage.py code itself, since a little of it
- # will be measured otherwise.
- if self.cover_match and self.cover_match.match(filename):
- return "is part of coverage.py"
-
- # Check the file against the omit pattern.
- if self.omit_match and self.omit_match.match(filename):
- return "is inside an --omit pattern"
-
- # No point tracing a file we can't later write to SQLite.
- try:
- filename.encode("utf8")
- except UnicodeEncodeError:
- return "non-encodable filename"
-
- # No reason found to skip this file.
- return None
-
- def warn_conflicting_settings(self):
- """Warn if there are settings that conflict."""
- if self.include:
- if self.source or self.source_pkgs:
- self.warn("--include is ignored because --source is set", slug="include-ignored")
-
- def warn_already_imported_files(self):
- """Warn if files have already been imported that we will be measuring."""
- if self.include or self.source or self.source_pkgs:
- warned = set()
- for mod in list(sys.modules.values()):
- filename = getattr(mod, "__file__", None)
- if filename is None:
- continue
- if filename in warned:
- continue
-
- disp = self.should_trace(filename)
- if disp.trace:
- msg = "Already imported a file that will be measured: {}".format(filename)
- self.warn(msg, slug="already-imported")
- warned.add(filename)
-
- def warn_unimported_source(self):
- """Warn about source packages that were of interest, but never traced."""
- for pkg in self.source_pkgs_unmatched:
- self._warn_about_unmeasured_code(pkg)
-
- def _warn_about_unmeasured_code(self, pkg):
- """Warn about a package or module that we never traced.
-
- `pkg` is a string, the name of the package or module.
-
- """
- mod = sys.modules.get(pkg)
- if mod is None:
- self.warn("Module %s was never imported." % pkg, slug="module-not-imported")
- return
-
- if module_is_namespace(mod):
- # A namespace package. It's OK for this not to have been traced,
- # since there is no code directly in it.
- return
-
- if not module_has_file(mod):
- self.warn("Module %s has no Python source." % pkg, slug="module-not-python")
- return
-
- # The module was in sys.modules, and seems like a module with code, but
- # we never measured it. I guess that means it was imported before
- # coverage even started.
- self.warn(
- "Module %s was previously imported, but not measured" % pkg,
- slug="module-not-measured",
- )
-
- def find_possibly_unexecuted_files(self):
- """Find files in the areas of interest that might be untraced.
-
- Yields pairs: file path, and responsible plug-in name.
- """
- for pkg in self.source_pkgs:
- if (not pkg in sys.modules or
- not module_has_file(sys.modules[pkg])):
- continue
- pkg_file = source_for_file(sys.modules[pkg].__file__)
- for ret in self._find_executable_files(canonical_path(pkg_file)):
- yield ret
-
- for src in self.source:
- for ret in self._find_executable_files(src):
- yield ret
-
- def _find_plugin_files(self, src_dir):
- """Get executable files from the plugins."""
- for plugin in self.plugins.file_tracers:
- for x_file in plugin.find_executable_files(src_dir):
- yield x_file, plugin._coverage_plugin_name
-
- def _find_executable_files(self, src_dir):
- """Find executable files in `src_dir`.
-
- Search for files in `src_dir` that can be executed because they
- are probably importable. Don't include ones that have been omitted
- by the configuration.
-
- Yield the file path, and the plugin name that handles the file.
-
- """
- py_files = ((py_file, None) for py_file in find_python_files(src_dir))
- plugin_files = self._find_plugin_files(src_dir)
-
- for file_path, plugin_name in itertools.chain(py_files, plugin_files):
- file_path = canonical_filename(file_path)
- if self.omit_match and self.omit_match.match(file_path):
- # Turns out this file was omitted, so don't pull it back
- # in as unexecuted.
- continue
- yield file_path, plugin_name
-
- def sys_info(self):
- """Our information for Coverage.sys_info.
-
- Returns a list of (key, value) pairs.
- """
- info = [
- ('cover_paths', self.cover_paths),
- ('pylib_paths', self.pylib_paths),
- ]
-
- matcher_names = [
- 'source_match', 'source_pkgs_match',
- 'include_match', 'omit_match',
- 'cover_match', 'pylib_match',
- ]
-
- for matcher_name in matcher_names:
- matcher = getattr(self, matcher_name)
- if matcher:
- matcher_info = matcher.info()
- else:
- matcher_info = '-none-'
- info.append((matcher_name, matcher_info))
-
- return info
diff --git a/contrib/python/coverage/py2/coverage/jsonreport.py b/contrib/python/coverage/py2/coverage/jsonreport.py
deleted file mode 100644
index 4287bc79a3..0000000000
--- a/contrib/python/coverage/py2/coverage/jsonreport.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# coding: utf-8
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Json reporting for coverage.py"""
-import datetime
-import json
-import sys
-
-from coverage import __version__
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-
-
-class JsonReporter(object):
- """A reporter for writing JSON coverage results."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.total = Numbers()
- self.report_data = {}
-
- def report(self, morfs, outfile=None):
- """Generate a json report for `morfs`.
-
- `morfs` is a list of modules or file names.
-
- `outfile` is a file object to write the json to
-
- """
- outfile = outfile or sys.stdout
- coverage_data = self.coverage.get_data()
- coverage_data.set_query_contexts(self.config.report_contexts)
- self.report_data["meta"] = {
- "version": __version__,
- "timestamp": datetime.datetime.now().isoformat(),
- "branch_coverage": coverage_data.has_arcs(),
- "show_contexts": self.config.json_show_contexts,
- }
-
- measured_files = {}
- for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
- measured_files[file_reporter.relative_filename()] = self.report_one_file(
- coverage_data,
- analysis
- )
-
- self.report_data["files"] = measured_files
-
- self.report_data["totals"] = {
- 'covered_lines': self.total.n_executed,
- 'num_statements': self.total.n_statements,
- 'percent_covered': self.total.pc_covered,
- 'missing_lines': self.total.n_missing,
- 'excluded_lines': self.total.n_excluded,
- }
-
- if coverage_data.has_arcs():
- self.report_data["totals"].update({
- 'num_branches': self.total.n_branches,
- 'num_partial_branches': self.total.n_partial_branches,
- 'covered_branches': self.total.n_executed_branches,
- 'missing_branches': self.total.n_missing_branches,
- })
-
- json.dump(
- self.report_data,
- outfile,
- indent=4 if self.config.json_pretty_print else None
- )
-
- return self.total.n_statements and self.total.pc_covered
-
- def report_one_file(self, coverage_data, analysis):
- """Extract the relevant report data for a single file"""
- nums = analysis.numbers
- self.total += nums
- summary = {
- 'covered_lines': nums.n_executed,
- 'num_statements': nums.n_statements,
- 'percent_covered': nums.pc_covered,
- 'missing_lines': nums.n_missing,
- 'excluded_lines': nums.n_excluded,
- }
- reported_file = {
- 'executed_lines': sorted(analysis.executed),
- 'summary': summary,
- 'missing_lines': sorted(analysis.missing),
- 'excluded_lines': sorted(analysis.excluded)
- }
- if self.config.json_show_contexts:
- reported_file['contexts'] = analysis.data.contexts_by_lineno(
- analysis.filename,
- )
- if coverage_data.has_arcs():
- reported_file['summary'].update({
- 'num_branches': nums.n_branches,
- 'num_partial_branches': nums.n_partial_branches,
- 'covered_branches': nums.n_executed_branches,
- 'missing_branches': nums.n_missing_branches,
- })
- return reported_file
diff --git a/contrib/python/coverage/py2/coverage/misc.py b/contrib/python/coverage/py2/coverage/misc.py
deleted file mode 100644
index 034e288eb9..0000000000
--- a/contrib/python/coverage/py2/coverage/misc.py
+++ /dev/null
@@ -1,361 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Miscellaneous stuff for coverage.py."""
-
-import errno
-import hashlib
-import inspect
-import locale
-import os
-import os.path
-import random
-import re
-import socket
-import sys
-import types
-
-from coverage import env
-from coverage.backward import to_bytes, unicode_class
-
-ISOLATED_MODULES = {}
-
-
-def isolate_module(mod):
- """Copy a module so that we are isolated from aggressive mocking.
-
- If a test suite mocks os.path.exists (for example), and then we need to use
- it during the test, everything will get tangled up if we use their mock.
- Making a copy of the module when we import it will isolate coverage.py from
- those complications.
- """
- if mod not in ISOLATED_MODULES:
- new_mod = types.ModuleType(mod.__name__)
- ISOLATED_MODULES[mod] = new_mod
- for name in dir(mod):
- value = getattr(mod, name)
- if isinstance(value, types.ModuleType):
- value = isolate_module(value)
- setattr(new_mod, name, value)
- return ISOLATED_MODULES[mod]
-
-os = isolate_module(os)
-
-
-def dummy_decorator_with_args(*args_unused, **kwargs_unused):
- """Dummy no-op implementation of a decorator with arguments."""
- def _decorator(func):
- return func
- return _decorator
-
-
-# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging
-# tests to remove noise from stack traces.
-# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces.
-USE_CONTRACTS = env.TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
-
-# Use PyContracts for assertion testing on parameters and returns, but only if
-# we are running our own test suite.
-if USE_CONTRACTS:
- from contracts import contract # pylint: disable=unused-import
- from contracts import new_contract as raw_new_contract
-
- def new_contract(*args, **kwargs):
- """A proxy for contracts.new_contract that doesn't mind happening twice."""
- try:
- raw_new_contract(*args, **kwargs)
- except ValueError:
- # During meta-coverage, this module is imported twice, and
- # PyContracts doesn't like redefining contracts. It's OK.
- pass
-
- # Define contract words that PyContract doesn't have.
- new_contract('bytes', lambda v: isinstance(v, bytes))
- if env.PY3:
- new_contract('unicode', lambda v: isinstance(v, unicode_class))
-
- def one_of(argnames):
- """Ensure that only one of the argnames is non-None."""
- def _decorator(func):
- argnameset = {name.strip() for name in argnames.split(",")}
- def _wrapper(*args, **kwargs):
- vals = [kwargs.get(name) for name in argnameset]
- assert sum(val is not None for val in vals) == 1
- return func(*args, **kwargs)
- return _wrapper
- return _decorator
-else: # pragma: not testing
- # We aren't using real PyContracts, so just define our decorators as
- # stunt-double no-ops.
- contract = dummy_decorator_with_args
- one_of = dummy_decorator_with_args
-
- def new_contract(*args_unused, **kwargs_unused):
- """Dummy no-op implementation of `new_contract`."""
- pass
-
-
-def nice_pair(pair):
- """Make a nice string representation of a pair of numbers.
-
- If the numbers are equal, just return the number, otherwise return the pair
- with a dash between them, indicating the range.
-
- """
- start, end = pair
- if start == end:
- return "%d" % start
- else:
- return "%d-%d" % (start, end)
-
-
-def expensive(fn):
- """A decorator to indicate that a method shouldn't be called more than once.
-
- Normally, this does nothing. During testing, this raises an exception if
- called more than once.
-
- """
- if env.TESTING:
- attr = "_once_" + fn.__name__
-
- def _wrapper(self):
- if hasattr(self, attr):
- raise AssertionError("Shouldn't have called %s more than once" % fn.__name__)
- setattr(self, attr, True)
- return fn(self)
- return _wrapper
- else:
- return fn # pragma: not testing
-
-
-def bool_or_none(b):
- """Return bool(b), but preserve None."""
- if b is None:
- return None
- else:
- return bool(b)
-
-
-def join_regex(regexes):
- """Combine a list of regexes into one that matches any of them."""
- return "|".join("(?:%s)" % r for r in regexes)
-
-
-def file_be_gone(path):
- """Remove a file, and don't get annoyed if it doesn't exist."""
- try:
- os.remove(path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
-
-def ensure_dir(directory):
- """Make sure the directory exists.
-
- If `directory` is None or empty, do nothing.
- """
- if directory and not os.path.isdir(directory):
- os.makedirs(directory)
-
-
-def ensure_dir_for_file(path):
- """Make sure the directory for the path exists."""
- ensure_dir(os.path.dirname(path))
-
-
-def output_encoding(outfile=None):
- """Determine the encoding to use for output written to `outfile` or stdout."""
- if outfile is None:
- outfile = sys.stdout
- encoding = (
- getattr(outfile, "encoding", None) or
- getattr(sys.__stdout__, "encoding", None) or
- locale.getpreferredencoding()
- )
- return encoding
-
-
-def filename_suffix(suffix):
- """Compute a filename suffix for a data file.
-
- If `suffix` is a string or None, simply return it. If `suffix` is True,
- then build a suffix incorporating the hostname, process id, and a random
- number.
-
- Returns a string or None.
-
- """
- if suffix is True:
- # If data_suffix was a simple true value, then make a suffix with
- # plenty of distinguishing information. We do this here in
- # `save()` at the last minute so that the pid will be correct even
- # if the process forks.
- dice = random.Random(os.urandom(8)).randint(0, 999999)
- suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
- return suffix
-
-
-class Hasher(object):
- """Hashes Python data into md5."""
- def __init__(self):
- self.md5 = hashlib.md5()
-
- def update(self, v):
- """Add `v` to the hash, recursively if needed."""
- self.md5.update(to_bytes(str(type(v))))
- if isinstance(v, unicode_class):
- self.md5.update(v.encode('utf8'))
- elif isinstance(v, bytes):
- self.md5.update(v)
- elif v is None:
- pass
- elif isinstance(v, (int, float)):
- self.md5.update(to_bytes(str(v)))
- elif isinstance(v, (tuple, list)):
- for e in v:
- self.update(e)
- elif isinstance(v, dict):
- keys = v.keys()
- for k in sorted(keys):
- self.update(k)
- self.update(v[k])
- else:
- for k in dir(v):
- if k.startswith('__'):
- continue
- a = getattr(v, k)
- if inspect.isroutine(a):
- continue
- self.update(k)
- self.update(a)
- self.md5.update(b'.')
-
- def hexdigest(self):
- """Retrieve the hex digest of the hash."""
- return self.md5.hexdigest()
-
-
-def _needs_to_implement(that, func_name):
- """Helper to raise NotImplementedError in interface stubs."""
- if hasattr(that, "_coverage_plugin_name"):
- thing = "Plugin"
- name = that._coverage_plugin_name
- else:
- thing = "Class"
- klass = that.__class__
- name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
-
- raise NotImplementedError(
- "{thing} {name!r} needs to implement {func_name}()".format(
- thing=thing, name=name, func_name=func_name
- )
- )
-
-
-class DefaultValue(object):
- """A sentinel object to use for unusual default-value needs.
-
- Construct with a string that will be used as the repr, for display in help
- and Sphinx output.
-
- """
- def __init__(self, display_as):
- self.display_as = display_as
-
- def __repr__(self):
- return self.display_as
-
-
-def substitute_variables(text, variables):
- """Substitute ``${VAR}`` variables in `text` with their values.
-
- Variables in the text can take a number of shell-inspired forms::
-
- $VAR
- ${VAR}
- ${VAR?} strict: an error if VAR isn't defined.
- ${VAR-missing} defaulted: "missing" if VAR isn't defined.
- $$ just a dollar sign.
-
- `variables` is a dictionary of variable values.
-
- Returns the resulting text with values substituted.
-
- """
- dollar_pattern = r"""(?x) # Use extended regex syntax
- \$ # A dollar sign,
- (?: # then
- (?P<dollar>\$) | # a dollar sign, or
- (?P<word1>\w+) | # a plain word, or
- { # a {-wrapped
- (?P<word2>\w+) # word,
- (?:
- (?P<strict>\?) | # with a strict marker
- -(?P<defval>[^}]*) # or a default value
- )? # maybe.
- }
- )
- """
-
- def dollar_replace(match):
- """Called for each $replacement."""
- # Only one of the groups will have matched, just get its text.
- word = next(g for g in match.group('dollar', 'word1', 'word2') if g)
- if word == "$":
- return "$"
- elif word in variables:
- return variables[word]
- elif match.group('strict'):
- msg = "Variable {} is undefined: {!r}".format(word, text)
- raise CoverageException(msg)
- else:
- return match.group('defval')
-
- text = re.sub(dollar_pattern, dollar_replace, text)
- return text
-
-
-class BaseCoverageException(Exception):
- """The base of all Coverage exceptions."""
- pass
-
-
-class CoverageException(BaseCoverageException):
- """An exception raised by a coverage.py function."""
- pass
-
-
-class NoSource(CoverageException):
- """We couldn't find the source for a module."""
- pass
-
-
-class NoCode(NoSource):
- """We couldn't find any code at all."""
- pass
-
-
-class NotPython(CoverageException):
- """A source file turned out not to be parsable Python."""
- pass
-
-
-class ExceptionDuringRun(CoverageException):
- """An exception happened while running customer code.
-
- Construct it with three arguments, the values from `sys.exc_info`.
-
- """
- pass
-
-
-class StopEverything(BaseCoverageException):
- """An exception that means everything should stop.
-
- The CoverageTest class converts these to SkipTest, so that when running
- tests, raising this exception will automatically skip the test.
-
- """
- pass
diff --git a/contrib/python/coverage/py2/coverage/multiproc.py b/contrib/python/coverage/py2/coverage/multiproc.py
deleted file mode 100644
index 21ed2e2c95..0000000000
--- a/contrib/python/coverage/py2/coverage/multiproc.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Monkey-patching to add multiprocessing support for coverage.py"""
-
-import multiprocessing
-import multiprocessing.process
-import os
-import os.path
-import sys
-import traceback
-
-from coverage import env
-from coverage.misc import contract
-
-# An attribute that will be set on the module to indicate that it has been
-# monkey-patched.
-PATCHED_MARKER = "_coverage$patched"
-
-COVERAGE_CONFIGURATION_ENV = "_COVERAGE_CONFIGURATION_ENV"
-
-
-if env.PYVERSION >= (3, 4):
- OriginalProcess = multiprocessing.process.BaseProcess
-else:
- OriginalProcess = multiprocessing.Process
-
-original_bootstrap = OriginalProcess._bootstrap
-
-class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method
- """A replacement for multiprocess.Process that starts coverage."""
-
- def _bootstrap(self, *args, **kwargs):
- """Wrapper around _bootstrap to start coverage."""
- try:
- from coverage import Coverage # avoid circular import
- import json
- kwconf = json.loads(os.environ[COVERAGE_CONFIGURATION_ENV])
- cov = Coverage(**kwconf)
- cov._warn_preimported_source = False
- cov.start()
- debug = cov._debug
- if debug.should("multiproc"):
- debug.write("Calling multiprocessing bootstrap")
- except Exception:
- print("Exception during multiprocessing bootstrap init:")
- traceback.print_exc(file=sys.stdout)
- sys.stdout.flush()
- raise
- try:
- return original_bootstrap(self, *args, **kwargs)
- finally:
- if debug.should("multiproc"):
- debug.write("Finished multiprocessing bootstrap")
- cov.stop()
- cov.save()
- if debug.should("multiproc"):
- debug.write("Saved multiprocessing data")
-
-class Stowaway(object):
- """An object to pickle, so when it is unpickled, it can apply the monkey-patch."""
- def __init__(self, rcfile):
- self.rcfile = rcfile
-
- def __getstate__(self):
- return {'rcfile': self.rcfile}
-
- def __setstate__(self, state):
- patch_multiprocessing(state['rcfile'])
-
-
-@contract(rcfile=str)
-def patch_multiprocessing(rcfile, coverage_args):
- """Monkey-patch the multiprocessing module.
-
- This enables coverage measurement of processes started by multiprocessing.
- This involves aggressive monkey-patching.
-
- `rcfile` is the path to the rcfile being used.
-
- """
-
- if hasattr(multiprocessing, PATCHED_MARKER):
- return
-
- if env.PYVERSION >= (3, 4):
- OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
- else:
- multiprocessing.Process = ProcessWithCoverage
-
- # Set the value in ProcessWithCoverage that will be pickled into the child
- # process.
- os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
-
- os.environ[COVERAGE_CONFIGURATION_ENV] = coverage_args
-
- # When spawning processes rather than forking them, we have no state in the
- # new process. We sneak in there with a Stowaway: we stuff one of our own
- # objects into the data that gets pickled and sent to the sub-process. When
- # the Stowaway is unpickled, it's __setstate__ method is called, which
- # re-applies the monkey-patch.
- # Windows only spawns, so this is needed to keep Windows working.
- try:
- from multiprocessing import spawn
- original_get_preparation_data = spawn.get_preparation_data
- except (ImportError, AttributeError):
- pass
- else:
- def get_preparation_data_with_stowaway(name):
- """Get the original preparation data, and also insert our stowaway."""
- d = original_get_preparation_data(name)
- d['stowaway'] = Stowaway(rcfile)
- return d
-
- spawn.get_preparation_data = get_preparation_data_with_stowaway
-
- setattr(multiprocessing, PATCHED_MARKER, True)
diff --git a/contrib/python/coverage/py2/coverage/numbits.py b/contrib/python/coverage/py2/coverage/numbits.py
deleted file mode 100644
index 6ca96fbcf7..0000000000
--- a/contrib/python/coverage/py2/coverage/numbits.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""
-Functions to manipulate packed binary representations of number sets.
-
-To save space, coverage stores sets of line numbers in SQLite using a packed
-binary representation called a numbits. A numbits is a set of positive
-integers.
-
-A numbits is stored as a blob in the database. The exact meaning of the bytes
-in the blobs should be considered an implementation detail that might change in
-the future. Use these functions to work with those binary blobs of data.
-
-"""
-import json
-
-from coverage import env
-from coverage.backward import byte_to_int, bytes_to_ints, binary_bytes, zip_longest
-from coverage.misc import contract, new_contract
-
-if env.PY3:
- def _to_blob(b):
- """Convert a bytestring into a type SQLite will accept for a blob."""
- return b
-
- new_contract('blob', lambda v: isinstance(v, bytes))
-else:
- def _to_blob(b):
- """Convert a bytestring into a type SQLite will accept for a blob."""
- return buffer(b) # pylint: disable=undefined-variable
-
- new_contract('blob', lambda v: isinstance(v, buffer)) # pylint: disable=undefined-variable
-
-
-@contract(nums='Iterable', returns='blob')
-def nums_to_numbits(nums):
- """Convert `nums` into a numbits.
-
- Arguments:
- nums: a reusable iterable of integers, the line numbers to store.
-
- Returns:
- A binary blob.
- """
- try:
- nbytes = max(nums) // 8 + 1
- except ValueError:
- # nums was empty.
- return _to_blob(b'')
- b = bytearray(nbytes)
- for num in nums:
- b[num//8] |= 1 << num % 8
- return _to_blob(bytes(b))
-
-
-@contract(numbits='blob', returns='list[int]')
-def numbits_to_nums(numbits):
- """Convert a numbits into a list of numbers.
-
- Arguments:
- numbits: a binary blob, the packed number set.
-
- Returns:
- A list of ints.
-
- When registered as a SQLite function by :func:`register_sqlite_functions`,
- this returns a string, a JSON-encoded list of ints.
-
- """
- nums = []
- for byte_i, byte in enumerate(bytes_to_ints(numbits)):
- for bit_i in range(8):
- if (byte & (1 << bit_i)):
- nums.append(byte_i * 8 + bit_i)
- return nums
-
-
-@contract(numbits1='blob', numbits2='blob', returns='blob')
-def numbits_union(numbits1, numbits2):
- """Compute the union of two numbits.
-
- Returns:
- A new numbits, the union of `numbits1` and `numbits2`.
- """
- byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
- return _to_blob(binary_bytes(b1 | b2 for b1, b2 in byte_pairs))
-
-
-@contract(numbits1='blob', numbits2='blob', returns='blob')
-def numbits_intersection(numbits1, numbits2):
- """Compute the intersection of two numbits.
-
- Returns:
- A new numbits, the intersection `numbits1` and `numbits2`.
- """
- byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
- intersection_bytes = binary_bytes(b1 & b2 for b1, b2 in byte_pairs)
- return _to_blob(intersection_bytes.rstrip(b'\0'))
-
-
-@contract(numbits1='blob', numbits2='blob', returns='bool')
-def numbits_any_intersection(numbits1, numbits2):
- """Is there any number that appears in both numbits?
-
- Determine whether two number sets have a non-empty intersection. This is
- faster than computing the intersection.
-
- Returns:
- A bool, True if there is any number in both `numbits1` and `numbits2`.
- """
- byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
- return any(b1 & b2 for b1, b2 in byte_pairs)
-
-
-@contract(num='int', numbits='blob', returns='bool')
-def num_in_numbits(num, numbits):
- """Does the integer `num` appear in `numbits`?
-
- Returns:
- A bool, True if `num` is a member of `numbits`.
- """
- nbyte, nbit = divmod(num, 8)
- if nbyte >= len(numbits):
- return False
- return bool(byte_to_int(numbits[nbyte]) & (1 << nbit))
-
-
-def register_sqlite_functions(connection):
- """
- Define numbits functions in a SQLite connection.
-
- This defines these functions for use in SQLite statements:
-
- * :func:`numbits_union`
- * :func:`numbits_intersection`
- * :func:`numbits_any_intersection`
- * :func:`num_in_numbits`
- * :func:`numbits_to_nums`
-
- `connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
- object. After creating the connection, pass it to this function to
- register the numbits functions. Then you can use numbits functions in your
- queries::
-
- import sqlite3
- from coverage.numbits import register_sqlite_functions
-
- conn = sqlite3.connect('example.db')
- register_sqlite_functions(conn)
- c = conn.cursor()
- # Kind of a nonsense query: find all the files and contexts that
- # executed line 47 in any file:
- c.execute(
- "select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
- (47,)
- )
- """
- connection.create_function("numbits_union", 2, numbits_union)
- connection.create_function("numbits_intersection", 2, numbits_intersection)
- connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
- connection.create_function("num_in_numbits", 2, num_in_numbits)
- connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
diff --git a/contrib/python/coverage/py2/coverage/parser.py b/contrib/python/coverage/py2/coverage/parser.py
deleted file mode 100644
index 258f956039..0000000000
--- a/contrib/python/coverage/py2/coverage/parser.py
+++ /dev/null
@@ -1,1276 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Code parsing for coverage.py."""
-
-import ast
-import collections
-import os
-import re
-import token
-import tokenize
-
-from coverage import env
-from coverage.backward import range # pylint: disable=redefined-builtin
-from coverage.backward import bytes_to_ints, string_class
-from coverage.bytecode import code_objects
-from coverage.debug import short_stack
-from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of
-from coverage.misc import NoSource, NotPython, StopEverything
-from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
-
-
-class PythonParser(object):
- """Parse code to find executable lines, excluded lines, etc.
-
- This information is all based on static analysis: no code execution is
- involved.
-
- """
- @contract(text='unicode|None')
- def __init__(self, text=None, filename=None, exclude=None):
- """
- Source can be provided as `text`, the text itself, or `filename`, from
- which the text will be read. Excluded lines are those that match
- `exclude`, a regex.
-
- """
- assert text or filename, "PythonParser needs either text or filename"
- self.filename = filename or "<code>"
- self.text = text
- if not self.text:
- from coverage.python import get_python_source
- try:
- self.text = get_python_source(self.filename)
- except IOError as err:
- raise NoSource(
- "No source for code: '%s': %s" % (self.filename, err)
- )
-
- self.exclude = exclude
-
- # The text lines of the parsed code.
- self.lines = self.text.split('\n')
-
- # The normalized line numbers of the statements in the code. Exclusions
- # are taken into account, and statements are adjusted to their first
- # lines.
- self.statements = set()
-
- # The normalized line numbers of the excluded lines in the code,
- # adjusted to their first lines.
- self.excluded = set()
-
- # The raw_* attributes are only used in this class, and in
- # lab/parser.py to show how this class is working.
-
- # The line numbers that start statements, as reported by the line
- # number table in the bytecode.
- self.raw_statements = set()
-
- # The raw line numbers of excluded lines of code, as marked by pragmas.
- self.raw_excluded = set()
-
- # The line numbers of class definitions.
- self.raw_classdefs = set()
-
- # Function definitions (start, end, name)
- self._raw_funcdefs = set()
-
- # The line numbers of docstring lines.
- self.raw_docstrings = set()
-
- # Internal detail, used by lab/parser.py.
- self.show_tokens = False
-
- # A dict mapping line numbers to lexical statement starts for
- # multi-line statements.
- self._multiline = {}
-
- # Lazily-created ByteParser, arc data, and missing arc descriptions.
- self._byte_parser = None
- self._all_arcs = None
- self._missing_arc_fragments = None
-
- @property
- def byte_parser(self):
- """Create a ByteParser on demand."""
- if not self._byte_parser:
- self._byte_parser = ByteParser(self.text, filename=self.filename)
- return self._byte_parser
-
- @property
- def raw_funcdefs(self):
- return self._raw_funcdefs
-
- def lines_matching(self, *regexes):
- """Find the lines matching one of a list of regexes.
-
- Returns a set of line numbers, the lines that contain a match for one
- of the regexes in `regexes`. The entire line needn't match, just a
- part of it.
-
- """
- combined = join_regex(regexes)
- if env.PY2:
- combined = combined.decode("utf8")
- regex_c = re.compile(combined)
- matches = set()
- for i, ltext in enumerate(self.lines, start=1):
- if regex_c.search(ltext):
- matches.add(i)
- return matches
-
- def _raw_parse(self):
- """Parse the source to find the interesting facts about its lines.
-
- A handful of attributes are updated.
-
- """
- # Find lines which match an exclusion pattern.
- if self.exclude:
- self.raw_excluded = self.lines_matching(self.exclude)
-
- # Tokenize, to find excluded suites, to find docstrings, and to find
- # multi-line statements.
- indent = 0
- exclude_indent = 0
- excluding = False
- excluding_decorators = False
- prev_toktype = token.INDENT
- first_line = None
- empty = True
- first_on_line = True
-
- tokgen = generate_tokens(self.text)
- for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
- if self.show_tokens: # pragma: debugging
- print("%10s %5s %-20r %r" % (
- tokenize.tok_name.get(toktype, toktype),
- nice_pair((slineno, elineno)), ttext, ltext
- ))
- if toktype == token.INDENT:
- indent += 1
- elif toktype == token.DEDENT:
- indent -= 1
- elif toktype == token.NAME:
- if ttext == 'class':
- # Class definitions look like branches in the bytecode, so
- # we need to exclude them. The simplest way is to note the
- # lines with the 'class' keyword.
- self.raw_classdefs.add(slineno)
- elif toktype == token.OP:
- if ttext == ':':
- should_exclude = (elineno in self.raw_excluded) or excluding_decorators
- if not excluding and should_exclude:
- # Start excluding a suite. We trigger off of the colon
- # token so that the #pragma comment will be recognized on
- # the same line as the colon.
- self.raw_excluded.add(elineno)
- exclude_indent = indent
- excluding = True
- excluding_decorators = False
- elif ttext == '@' and first_on_line:
- # A decorator.
- if elineno in self.raw_excluded:
- excluding_decorators = True
- if excluding_decorators:
- self.raw_excluded.add(elineno)
- elif toktype == token.STRING and prev_toktype == token.INDENT:
- # Strings that are first on an indented line are docstrings.
- # (a trick from trace.py in the stdlib.) This works for
- # 99.9999% of cases. For the rest (!) see:
- # http://stackoverflow.com/questions/1769332/x/1769794#1769794
- self.raw_docstrings.update(range(slineno, elineno+1))
- elif toktype == token.NEWLINE:
- if first_line is not None and elineno != first_line:
- # We're at the end of a line, and we've ended on a
- # different line than the first line of the statement,
- # so record a multi-line range.
- for l in range(first_line, elineno+1):
- self._multiline[l] = first_line
- first_line = None
- first_on_line = True
-
- if ttext.strip() and toktype != tokenize.COMMENT:
- # A non-whitespace token.
- empty = False
- if first_line is None:
- # The token is not whitespace, and is the first in a
- # statement.
- first_line = slineno
- # Check whether to end an excluded suite.
- if excluding and indent <= exclude_indent:
- excluding = False
- if excluding:
- self.raw_excluded.add(elineno)
- first_on_line = False
-
- prev_toktype = toktype
-
- # Find the starts of the executable statements.
- if not empty:
- self.raw_statements.update(self.byte_parser._find_statements())
-
- # The first line of modules can lie and say 1 always, even if the first
- # line of code is later. If so, map 1 to the actual first line of the
- # module.
- if env.PYBEHAVIOR.module_firstline_1 and self._multiline:
- self._multiline[1] = min(self.raw_statements)
-
- def first_line(self, line):
- """Return the first line number of the statement including `line`."""
- if line < 0:
- line = -self._multiline.get(-line, -line)
- else:
- line = self._multiline.get(line, line)
- return line
-
- def first_lines(self, lines):
- """Map the line numbers in `lines` to the correct first line of the
- statement.
-
- Returns a set of the first lines.
-
- """
- return {self.first_line(l) for l in lines}
-
- def translate_lines(self, lines):
- """Implement `FileReporter.translate_lines`."""
- return self.first_lines(lines)
-
- def translate_arcs(self, arcs):
- """Implement `FileReporter.translate_arcs`."""
- return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
-
- def parse_source(self):
- """Parse source text to find executable lines, excluded lines, etc.
-
- Sets the .excluded and .statements attributes, normalized to the first
- line of multi-line statements.
-
- """
- try:
- self._raw_parse()
- except (tokenize.TokenError, IndentationError) as err:
- if hasattr(err, "lineno"):
- lineno = err.lineno # IndentationError
- else:
- lineno = err.args[1][0] # TokenError
- raise NotPython(
- u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
- self.filename, err.args[0], lineno
- )
- )
-
- self.excluded = self.first_lines(self.raw_excluded)
-
- ignore = self.excluded | self.raw_docstrings
- starts = self.raw_statements - ignore
- self.statements = self.first_lines(starts) - ignore
-
- def arcs(self):
- """Get information about the arcs available in the code.
-
- Returns a set of line number pairs. Line numbers have been normalized
- to the first line of multi-line statements.
-
- """
- if self._all_arcs is None:
- self._analyze_ast()
- return self._all_arcs
-
- def _analyze_ast(self):
- """Run the AstArcAnalyzer and save its results.
-
- `_all_arcs` is the set of arcs in the code.
-
- """
- aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
- aaa.analyze()
-
- self._all_arcs = set()
- for l1, l2 in aaa.arcs:
- fl1 = self.first_line(l1)
- fl2 = self.first_line(l2)
- if fl1 != fl2:
- self._all_arcs.add((fl1, fl2))
-
- self._missing_arc_fragments = aaa.missing_arc_fragments
- self._raw_funcdefs = aaa.funcdefs
-
- def exit_counts(self):
- """Get a count of exits from that each line.
-
- Excluded lines are excluded.
-
- """
- exit_counts = collections.defaultdict(int)
- for l1, l2 in self.arcs():
- if l1 < 0:
- # Don't ever report -1 as a line number
- continue
- if l1 in self.excluded:
- # Don't report excluded lines as line numbers.
- continue
- if l2 in self.excluded:
- # Arcs to excluded lines shouldn't count.
- continue
- exit_counts[l1] += 1
-
- # Class definitions have one extra exit, so remove one for each:
- for l in self.raw_classdefs:
- # Ensure key is there: class definitions can include excluded lines.
- if l in exit_counts:
- exit_counts[l] -= 1
-
- return exit_counts
-
- def missing_arc_description(self, start, end, executed_arcs=None):
- """Provide an English sentence describing a missing arc."""
- if self._missing_arc_fragments is None:
- self._analyze_ast()
-
- actual_start = start
-
- if (
- executed_arcs and
- end < 0 and end == -start and
- (end, start) not in executed_arcs and
- (end, start) in self._missing_arc_fragments
- ):
- # It's a one-line callable, and we never even started it,
- # and we have a message about not starting it.
- start, end = end, start
-
- fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
-
- msgs = []
- for smsg, emsg in fragment_pairs:
- if emsg is None:
- if end < 0:
- # Hmm, maybe we have a one-line callable, let's check.
- if (-end, end) in self._missing_arc_fragments:
- return self.missing_arc_description(-end, end)
- emsg = "didn't jump to the function exit"
- else:
- emsg = "didn't jump to line {lineno}"
- emsg = emsg.format(lineno=end)
-
- msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg)
- if smsg is not None:
- msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start))
-
- msgs.append(msg)
-
- return " or ".join(msgs)
-
-
-class ByteParser(object):
- """Parse bytecode to understand the structure of code."""
-
- @contract(text='unicode')
- def __init__(self, text, code=None, filename=None):
- self.text = text
- if code:
- self.code = code
- else:
- try:
- self.code = compile_unicode(text, filename, "exec")
- except SyntaxError as synerr:
- raise NotPython(
- u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
- filename, synerr.msg, synerr.lineno
- )
- )
-
- # Alternative Python implementations don't always provide all the
- # attributes on code objects that we need to do the analysis.
- for attr in ['co_lnotab', 'co_firstlineno']:
- if not hasattr(self.code, attr):
- raise StopEverything( # pragma: only jython
- "This implementation of Python doesn't support code analysis.\n"
- "Run coverage.py under another Python for this command."
- )
-
- def child_parsers(self):
- """Iterate over all the code objects nested within this one.
-
- The iteration includes `self` as its first value.
-
- """
- return (ByteParser(self.text, code=c) for c in code_objects(self.code))
-
- def _line_numbers(self):
- """Yield the line numbers possible in this code object.
-
- Uses co_lnotab described in Python/compile.c to find the
- line numbers. Produces a sequence: l0, l1, ...
- """
- if hasattr(self.code, "co_lines"):
- for _, _, line in self.code.co_lines():
- if line is not None:
- yield line
- else:
- # Adapted from dis.py in the standard library.
- byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
- line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
-
- last_line_num = None
- line_num = self.code.co_firstlineno
- byte_num = 0
- for byte_incr, line_incr in zip(byte_increments, line_increments):
- if byte_incr:
- if line_num != last_line_num:
- yield line_num
- last_line_num = line_num
- byte_num += byte_incr
- if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80:
- line_incr -= 0x100
- line_num += line_incr
- if line_num != last_line_num:
- yield line_num
-
- def _find_statements(self):
- """Find the statements in `self.code`.
-
- Produce a sequence of line numbers that start statements. Recurses
- into all code objects reachable from `self.code`.
-
- """
- for bp in self.child_parsers():
- # Get all of the lineno information from this code.
- for l in bp._line_numbers():
- yield l
-
-
-#
-# AST analysis
-#
-
-class LoopBlock(object):
- """A block on the block stack representing a `for` or `while` loop."""
- @contract(start=int)
- def __init__(self, start):
- # The line number where the loop starts.
- self.start = start
- # A set of ArcStarts, the arcs from break statements exiting this loop.
- self.break_exits = set()
-
-
-class FunctionBlock(object):
- """A block on the block stack representing a function definition."""
- @contract(start=int, name=str)
- def __init__(self, start, name):
- # The line number where the function starts.
- self.start = start
- # The name of the function.
- self.name = name
-
-
-class TryBlock(object):
- """A block on the block stack representing a `try` block."""
- @contract(handler_start='int|None', final_start='int|None')
- def __init__(self, handler_start, final_start):
- # The line number of the first "except" handler, if any.
- self.handler_start = handler_start
- # The line number of the "finally:" clause, if any.
- self.final_start = final_start
-
- # The ArcStarts for breaks/continues/returns/raises inside the "try:"
- # that need to route through the "finally:" clause.
- self.break_from = set()
- self.continue_from = set()
- self.return_from = set()
- self.raise_from = set()
-
-
-class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
- """The information needed to start an arc.
-
- `lineno` is the line number the arc starts from.
-
- `cause` is an English text fragment used as the `startmsg` for
- AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
- arc wasn't executed, so should fit well into a sentence of the form,
- "Line 17 didn't run because {cause}." The fragment can include "{lineno}"
- to have `lineno` interpolated into it.
-
- """
- def __new__(cls, lineno, cause=None):
- return super(ArcStart, cls).__new__(cls, lineno, cause)
-
-
-# Define contract words that PyContract doesn't have.
-# ArcStarts is for a list or set of ArcStart's.
-new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
-
-
-# Turn on AST dumps with an environment variable.
-# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
-AST_DUMP = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
-
-class NodeList(object):
- """A synthetic fictitious node, containing a sequence of nodes.
-
- This is used when collapsing optimized if-statements, to represent the
- unconditional execution of one of the clauses.
-
- """
- def __init__(self, body):
- self.body = body
- self.lineno = body[0].lineno
-
-
-# TODO: some add_arcs methods here don't add arcs, they return them. Rename them.
-# TODO: the cause messages have too many commas.
-# TODO: Shouldn't the cause messages join with "and" instead of "or"?
-
-class AstArcAnalyzer(object):
- """Analyze source text with an AST to find executable code paths."""
-
- @contract(text='unicode', statements=set)
- def __init__(self, text, statements, multiline):
- self.root_node = ast.parse(neuter_encoding_declaration(text))
- # TODO: I think this is happening in too many places.
- self.statements = {multiline.get(l, l) for l in statements}
- self.multiline = multiline
-
- if AST_DUMP: # pragma: debugging
- # Dump the AST so that failing tests have helpful output.
- print("Statements: {}".format(self.statements))
- print("Multiline map: {}".format(self.multiline))
- ast_dump(self.root_node)
-
- self.arcs = set()
-
- # A map from arc pairs to a list of pairs of sentence fragments:
- # { (start, end): [(startmsg, endmsg), ...], }
- #
- # For an arc from line 17, they should be usable like:
- # "Line 17 {endmsg}, because {startmsg}"
- self.missing_arc_fragments = collections.defaultdict(list)
- self.block_stack = []
- self.funcdefs = set()
-
- # $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code.
- self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
-
- def analyze(self):
- """Examine the AST tree from `root_node` to determine possible arcs.
-
- This sets the `arcs` attribute to be a set of (from, to) line number
- pairs.
-
- """
- for node in ast.walk(self.root_node):
- node_name = node.__class__.__name__
- code_object_handler = getattr(self, "_code_object__" + node_name, None)
- if code_object_handler is not None:
- code_object_handler(node)
-
- @contract(start=int, end=int)
- def add_arc(self, start, end, smsg=None, emsg=None):
- """Add an arc, including message fragments to use if it is missing."""
- if self.debug: # pragma: debugging
- print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg))
- print(short_stack(limit=6))
- self.arcs.add((start, end))
-
- if smsg is not None or emsg is not None:
- self.missing_arc_fragments[(start, end)].append((smsg, emsg))
-
- def nearest_blocks(self):
- """Yield the blocks in nearest-to-farthest order."""
- return reversed(self.block_stack)
-
- @contract(returns=int)
- def line_for_node(self, node):
- """What is the right line number to use for this node?
-
- This dispatches to _line__Node functions where needed.
-
- """
- node_name = node.__class__.__name__
- handler = getattr(self, "_line__" + node_name, None)
- if handler is not None:
- return handler(node)
- else:
- return node.lineno
-
- def _line_decorated(self, node):
- """Compute first line number for things that can be decorated (classes and functions)."""
- lineno = node.lineno
- if env.PYBEHAVIOR.trace_decorated_def:
- if node.decorator_list:
- lineno = node.decorator_list[0].lineno
- return lineno
-
- def _line__Assign(self, node):
- return self.line_for_node(node.value)
-
- _line__ClassDef = _line_decorated
-
- def _line__Dict(self, node):
- # Python 3.5 changed how dict literals are made.
- if env.PYVERSION >= (3, 5) and node.keys:
- if node.keys[0] is not None:
- return node.keys[0].lineno
- else:
- # Unpacked dict literals `{**{'a':1}}` have None as the key,
- # use the value in that case.
- return node.values[0].lineno
- else:
- return node.lineno
-
- _line__FunctionDef = _line_decorated
- _line__AsyncFunctionDef = _line_decorated
-
- def _line__List(self, node):
- if node.elts:
- return self.line_for_node(node.elts[0])
- else:
- return node.lineno
-
- def _line__Module(self, node):
- if env.PYBEHAVIOR.module_firstline_1:
- return 1
- elif node.body:
- return self.line_for_node(node.body[0])
- else:
- # Empty modules have no line number, they always start at 1.
- return 1
-
- # The node types that just flow to the next node with no complications.
- OK_TO_DEFAULT = {
- "Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
- "Import", "ImportFrom", "Nonlocal", "Pass", "Print",
- }
-
- @contract(returns='ArcStarts')
- def add_arcs(self, node):
- """Add the arcs for `node`.
-
- Return a set of ArcStarts, exits from this node to the next. Because a
- node represents an entire sub-tree (including its children), the exits
- from a node can be arbitrarily complex::
-
- if something(1):
- if other(2):
- doit(3)
- else:
- doit(5)
-
- There are two exits from line 1: they start at line 3 and line 5.
-
- """
- node_name = node.__class__.__name__
- handler = getattr(self, "_handle__" + node_name, None)
- if handler is not None:
- return handler(node)
- else:
- # No handler: either it's something that's ok to default (a simple
- # statement), or it's something we overlooked. Change this 0 to 1
- # to see if it's overlooked.
- if 0:
- if node_name not in self.OK_TO_DEFAULT:
- print("*** Unhandled: {}".format(node))
-
- # Default for simple statements: one exit from this node.
- return {ArcStart(self.line_for_node(node))}
-
- @one_of("from_start, prev_starts")
- @contract(returns='ArcStarts')
- def add_body_arcs(self, body, from_start=None, prev_starts=None):
- """Add arcs for the body of a compound statement.
-
- `body` is the body node. `from_start` is a single `ArcStart` that can
- be the previous line in flow before this body. `prev_starts` is a set
- of ArcStarts that can be the previous line. Only one of them should be
- given.
-
- Returns a set of ArcStarts, the exits from this body.
-
- """
- if prev_starts is None:
- prev_starts = {from_start}
- for body_node in body:
- lineno = self.line_for_node(body_node)
- first_line = self.multiline.get(lineno, lineno)
- if first_line not in self.statements:
- body_node = self.find_non_missing_node(body_node)
- if body_node is None:
- continue
- lineno = self.line_for_node(body_node)
- for prev_start in prev_starts:
- self.add_arc(prev_start.lineno, lineno, prev_start.cause)
- prev_starts = self.add_arcs(body_node)
- return prev_starts
-
- def find_non_missing_node(self, node):
- """Search `node` looking for a child that has not been optimized away.
-
- This might return the node you started with, or it will work recursively
- to find a child node in self.statements.
-
- Returns a node, or None if none of the node remains.
-
- """
- # This repeats work just done in add_body_arcs, but this duplication
- # means we can avoid a function call in the 99.9999% case of not
- # optimizing away statements.
- lineno = self.line_for_node(node)
- first_line = self.multiline.get(lineno, lineno)
- if first_line in self.statements:
- return node
-
- missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None)
- if missing_fn:
- node = missing_fn(node)
- else:
- node = None
- return node
-
- # Missing nodes: _missing__*
- #
- # Entire statements can be optimized away by Python. They will appear in
- # the AST, but not the bytecode. These functions are called (by
- # find_non_missing_node) to find a node to use instead of the missing
- # node. They can return None if the node should truly be gone.
-
- def _missing__If(self, node):
- # If the if-node is missing, then one of its children might still be
- # here, but not both. So return the first of the two that isn't missing.
- # Use a NodeList to hold the clauses as a single node.
- non_missing = self.find_non_missing_node(NodeList(node.body))
- if non_missing:
- return non_missing
- if node.orelse:
- return self.find_non_missing_node(NodeList(node.orelse))
- return None
-
- def _missing__NodeList(self, node):
- # A NodeList might be a mixture of missing and present nodes. Find the
- # ones that are present.
- non_missing_children = []
- for child in node.body:
- child = self.find_non_missing_node(child)
- if child is not None:
- non_missing_children.append(child)
-
- # Return the simplest representation of the present children.
- if not non_missing_children:
- return None
- if len(non_missing_children) == 1:
- return non_missing_children[0]
- return NodeList(non_missing_children)
-
- def _missing__While(self, node):
- body_nodes = self.find_non_missing_node(NodeList(node.body))
- if not body_nodes:
- return None
- # Make a synthetic While-true node.
- new_while = ast.While()
- new_while.lineno = body_nodes.lineno
- new_while.test = ast.Name()
- new_while.test.lineno = body_nodes.lineno
- new_while.test.id = "True"
- new_while.body = body_nodes.body
- new_while.orelse = None
- return new_while
-
- def is_constant_expr(self, node):
- """Is this a compile-time constant?"""
- node_name = node.__class__.__name__
- if node_name in ["Constant", "NameConstant", "Num"]:
- return "Num"
- elif node_name == "Name":
- if node.id in ["True", "False", "None", "__debug__"]:
- return "Name"
- return None
-
- # In the fullness of time, these might be good tests to write:
- # while EXPR:
- # while False:
- # listcomps hidden deep in other expressions
- # listcomps hidden in lists: x = [[i for i in range(10)]]
- # nested function definitions
-
-
- # Exit processing: process_*_exits
- #
- # These functions process the four kinds of jump exits: break, continue,
- # raise, and return. To figure out where an exit goes, we have to look at
- # the block stack context. For example, a break will jump to the nearest
- # enclosing loop block, or the nearest enclosing finally block, whichever
- # is nearer.
-
- @contract(exits='ArcStarts')
- def process_break_exits(self, exits):
- """Add arcs due to jumps from `exits` being breaks."""
- for block in self.nearest_blocks():
- if isinstance(block, LoopBlock):
- block.break_exits.update(exits)
- break
- elif isinstance(block, TryBlock) and block.final_start is not None:
- block.break_from.update(exits)
- break
-
- @contract(exits='ArcStarts')
- def process_continue_exits(self, exits):
- """Add arcs due to jumps from `exits` being continues."""
- for block in self.nearest_blocks():
- if isinstance(block, LoopBlock):
- for xit in exits:
- self.add_arc(xit.lineno, block.start, xit.cause)
- break
- elif isinstance(block, TryBlock) and block.final_start is not None:
- block.continue_from.update(exits)
- break
-
- @contract(exits='ArcStarts')
- def process_raise_exits(self, exits):
- """Add arcs due to jumps from `exits` being raises."""
- for block in self.nearest_blocks():
- if isinstance(block, TryBlock):
- if block.handler_start is not None:
- for xit in exits:
- self.add_arc(xit.lineno, block.handler_start, xit.cause)
- break
- elif block.final_start is not None:
- block.raise_from.update(exits)
- break
- elif isinstance(block, FunctionBlock):
- for xit in exits:
- self.add_arc(
- xit.lineno, -block.start, xit.cause,
- "didn't except from function {!r}".format(block.name),
- )
- break
-
- @contract(exits='ArcStarts')
- def process_return_exits(self, exits):
- """Add arcs due to jumps from `exits` being returns."""
- for block in self.nearest_blocks():
- if isinstance(block, TryBlock) and block.final_start is not None:
- block.return_from.update(exits)
- break
- elif isinstance(block, FunctionBlock):
- for xit in exits:
- self.add_arc(
- xit.lineno, -block.start, xit.cause,
- "didn't return from function {!r}".format(block.name),
- )
- break
-
-
- # Handlers: _handle__*
- #
- # Each handler deals with a specific AST node type, dispatched from
- # add_arcs. Handlers return the set of exits from that node, and can
- # also call self.add_arc to record arcs they find. These functions mirror
- # the Python semantics of each syntactic construct. See the docstring
- # for add_arcs to understand the concept of exits from a node.
-
- @contract(returns='ArcStarts')
- def _handle__Break(self, node):
- here = self.line_for_node(node)
- break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
- self.process_break_exits([break_start])
- return set()
-
- @contract(returns='ArcStarts')
- def _handle_decorated(self, node):
- """Add arcs for things that can be decorated (classes and functions)."""
- main_line = last = node.lineno
- if node.decorator_list:
- if env.PYBEHAVIOR.trace_decorated_def:
- last = None
- for dec_node in node.decorator_list:
- dec_start = self.line_for_node(dec_node)
- if last is not None and dec_start != last:
- self.add_arc(last, dec_start)
- last = dec_start
- if env.PYBEHAVIOR.trace_decorated_def:
- self.add_arc(last, main_line)
- last = main_line
- # The definition line may have been missed, but we should have it
- # in `self.statements`. For some constructs, `line_for_node` is
- # not what we'd think of as the first line in the statement, so map
- # it to the first one.
- if node.body:
- body_start = self.line_for_node(node.body[0])
- body_start = self.multiline.get(body_start, body_start)
- for lineno in range(last+1, body_start):
- if lineno in self.statements:
- self.add_arc(last, lineno)
- last = lineno
- # The body is handled in collect_arcs.
- return {ArcStart(last)}
-
- _handle__ClassDef = _handle_decorated
-
- @contract(returns='ArcStarts')
- def _handle__Continue(self, node):
- here = self.line_for_node(node)
- continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
- self.process_continue_exits([continue_start])
- return set()
-
- @contract(returns='ArcStarts')
- def _handle__For(self, node):
- start = self.line_for_node(node.iter)
- self.block_stack.append(LoopBlock(start=start))
- from_start = ArcStart(start, cause="the loop on line {lineno} never started")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- # Any exit from the body will go back to the top of the loop.
- for xit in exits:
- self.add_arc(xit.lineno, start, xit.cause)
- my_block = self.block_stack.pop()
- exits = my_block.break_exits
- from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
- if node.orelse:
- else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
- exits |= else_exits
- else:
- # No else clause: exit from the for line.
- exits.add(from_start)
- return exits
-
- _handle__AsyncFor = _handle__For
-
- _handle__FunctionDef = _handle_decorated
- _handle__AsyncFunctionDef = _handle_decorated
-
- @contract(returns='ArcStarts')
- def _handle__If(self, node):
- start = self.line_for_node(node.test)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
- exits |= self.add_body_arcs(node.orelse, from_start=from_start)
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__NodeList(self, node):
- start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__Raise(self, node):
- here = self.line_for_node(node)
- raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
- self.process_raise_exits([raise_start])
- # `raise` statement jumps away, no exits from here.
- return set()
-
- @contract(returns='ArcStarts')
- def _handle__Return(self, node):
- here = self.line_for_node(node)
- return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
- self.process_return_exits([return_start])
- # `return` statement jumps away, no exits from here.
- return set()
-
- @contract(returns='ArcStarts')
- def _handle__Try(self, node):
- if node.handlers:
- handler_start = self.line_for_node(node.handlers[0])
- else:
- handler_start = None
-
- if node.finalbody:
- final_start = self.line_for_node(node.finalbody[0])
- else:
- final_start = None
-
- try_block = TryBlock(handler_start, final_start)
- self.block_stack.append(try_block)
-
- start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
-
- # We're done with the `try` body, so this block no longer handles
- # exceptions. We keep the block so the `finally` clause can pick up
- # flows from the handlers and `else` clause.
- if node.finalbody:
- try_block.handler_start = None
- if node.handlers:
- # If there are `except` clauses, then raises in the try body
- # will already jump to them. Start this set over for raises in
- # `except` and `else`.
- try_block.raise_from = set()
- else:
- self.block_stack.pop()
-
- handler_exits = set()
-
- if node.handlers:
- last_handler_start = None
- for handler_node in node.handlers:
- handler_start = self.line_for_node(handler_node)
- if last_handler_start is not None:
- self.add_arc(last_handler_start, handler_start)
- last_handler_start = handler_start
- from_cause = "the exception caught by line {lineno} didn't happen"
- from_start = ArcStart(handler_start, cause=from_cause)
- handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
-
- if node.orelse:
- exits = self.add_body_arcs(node.orelse, prev_starts=exits)
-
- exits |= handler_exits
-
- if node.finalbody:
- self.block_stack.pop()
- final_from = ( # You can get to the `finally` clause from:
- exits | # the exits of the body or `else` clause,
- try_block.break_from | # or a `break`,
- try_block.continue_from | # or a `continue`,
- try_block.raise_from | # or a `raise`,
- try_block.return_from # or a `return`.
- )
-
- final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
-
- if try_block.break_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for break_line in try_block.break_from:
- lineno = break_line.lineno
- cause = break_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- breaks = try_block.break_from
- else:
- breaks = self._combine_finally_starts(try_block.break_from, final_exits)
- self.process_break_exits(breaks)
-
- if try_block.continue_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for continue_line in try_block.continue_from:
- lineno = continue_line.lineno
- cause = continue_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- continues = try_block.continue_from
- else:
- continues = self._combine_finally_starts(try_block.continue_from, final_exits)
- self.process_continue_exits(continues)
-
- if try_block.raise_from:
- self.process_raise_exits(
- self._combine_finally_starts(try_block.raise_from, final_exits)
- )
-
- if try_block.return_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for return_line in try_block.return_from:
- lineno = return_line.lineno
- cause = return_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- returns = try_block.return_from
- else:
- returns = self._combine_finally_starts(try_block.return_from, final_exits)
- self.process_return_exits(returns)
-
- if exits:
- # The finally clause's exits are only exits for the try block
- # as a whole if the try block had some exits to begin with.
- exits = final_exits
-
- return exits
-
- @contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts')
- def _combine_finally_starts(self, starts, exits):
- """Helper for building the cause of `finally` branches.
-
- "finally" clauses might not execute their exits, and the causes could
- be due to a failure to execute any of the exits in the try block. So
- we use the causes from `starts` as the causes for `exits`.
- """
- causes = []
- for start in sorted(starts):
- if start.cause is not None:
- causes.append(start.cause.format(lineno=start.lineno))
- cause = " or ".join(causes)
- exits = {ArcStart(xit.lineno, cause) for xit in exits}
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__TryExcept(self, node):
- # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
- # TryExcept, it means there was no finally, so fake it, and treat as
- # a general Try node.
- node.finalbody = []
- return self._handle__Try(node)
-
- @contract(returns='ArcStarts')
- def _handle__TryFinally(self, node):
- # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
- # TryFinally, see if there's a TryExcept nested inside. If so, merge
- # them. Otherwise, fake fields to complete a Try node.
- node.handlers = []
- node.orelse = []
-
- first = node.body[0]
- if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
- assert len(node.body) == 1
- node.body = first.body
- node.handlers = first.handlers
- node.orelse = first.orelse
-
- return self._handle__Try(node)
-
- @contract(returns='ArcStarts')
- def _handle__While(self, node):
- start = to_top = self.line_for_node(node.test)
- constant_test = self.is_constant_expr(node.test)
- top_is_body0 = False
- if constant_test and (env.PY3 or constant_test == "Num"):
- top_is_body0 = True
- if env.PYBEHAVIOR.keep_constant_test:
- top_is_body0 = False
- if top_is_body0:
- to_top = self.line_for_node(node.body[0])
- self.block_stack.append(LoopBlock(start=to_top))
- from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- for xit in exits:
- self.add_arc(xit.lineno, to_top, xit.cause)
- exits = set()
- my_block = self.block_stack.pop()
- exits.update(my_block.break_exits)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
- if node.orelse:
- else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
- exits |= else_exits
- else:
- # No `else` clause: you can exit from the start.
- if not constant_test:
- exits.add(from_start)
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__With(self, node):
- start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- return exits
-
- _handle__AsyncWith = _handle__With
-
- def _code_object__Module(self, node):
- start = self.line_for_node(node)
- if node.body:
- exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
- for xit in exits:
- self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
- else:
- # Empty module.
- self.add_arc(-start, start)
- self.add_arc(start, -start)
-
- def _process_function_def(self, start, node):
- self.funcdefs.add((start, node.body[-1].lineno, node.name))
-
- def _code_object__FunctionDef(self, node):
- start = self.line_for_node(node)
- self.block_stack.append(FunctionBlock(start=start, name=node.name))
- exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
- self.process_return_exits(exits)
- self._process_function_def(start, node)
- self.block_stack.pop()
-
- _code_object__AsyncFunctionDef = _code_object__FunctionDef
-
- def _code_object__ClassDef(self, node):
- start = self.line_for_node(node)
- self.add_arc(-start, start)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- for xit in exits:
- self.add_arc(
- xit.lineno, -start, xit.cause,
- "didn't exit the body of class {!r}".format(node.name),
- )
-
- def _make_oneline_code_method(noun): # pylint: disable=no-self-argument
- """A function to make methods for online callable _code_object__ methods."""
- def _code_object__oneline_callable(self, node):
- start = self.line_for_node(node)
- self.add_arc(-start, start, None, "didn't run the {} on line {}".format(noun, start))
- self.add_arc(
- start, -start, None,
- "didn't finish the {} on line {}".format(noun, start),
- )
- return _code_object__oneline_callable
-
- _code_object__Lambda = _make_oneline_code_method("lambda")
- _code_object__GeneratorExp = _make_oneline_code_method("generator expression")
- _code_object__DictComp = _make_oneline_code_method("dictionary comprehension")
- _code_object__SetComp = _make_oneline_code_method("set comprehension")
- if env.PY3:
- _code_object__ListComp = _make_oneline_code_method("list comprehension")
-
-
-if AST_DUMP: # pragma: debugging
- # Code only used when dumping the AST for debugging.
-
- SKIP_DUMP_FIELDS = ["ctx"]
-
- def _is_simple_value(value):
- """Is `value` simple enough to be displayed on a single line?"""
- return (
- value in [None, [], (), {}, set()] or
- isinstance(value, (string_class, int, float))
- )
-
- def ast_dump(node, depth=0):
- """Dump the AST for `node`.
-
- This recursively walks the AST, printing a readable version.
-
- """
- indent = " " * depth
- if not isinstance(node, ast.AST):
- print("{}<{} {!r}>".format(indent, node.__class__.__name__, node))
- return
-
- lineno = getattr(node, "lineno", None)
- if lineno is not None:
- linemark = " @ {}".format(node.lineno)
- else:
- linemark = ""
- head = "{}<{}{}".format(indent, node.__class__.__name__, linemark)
-
- named_fields = [
- (name, value)
- for name, value in ast.iter_fields(node)
- if name not in SKIP_DUMP_FIELDS
- ]
- if not named_fields:
- print("{}>".format(head))
- elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
- field_name, value = named_fields[0]
- print("{} {}: {!r}>".format(head, field_name, value))
- else:
- print(head)
- if 0:
- print("{}# mro: {}".format(
- indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
- ))
- next_indent = indent + " "
- for field_name, value in named_fields:
- prefix = "{}{}:".format(next_indent, field_name)
- if _is_simple_value(value):
- print("{} {!r}".format(prefix, value))
- elif isinstance(value, list):
- print("{} [".format(prefix))
- for n in value:
- ast_dump(n, depth + 8)
- print("{}]".format(next_indent))
- else:
- print(prefix)
- ast_dump(value, depth + 8)
-
- print("{}>".format(indent))
diff --git a/contrib/python/coverage/py2/coverage/phystokens.py b/contrib/python/coverage/py2/coverage/phystokens.py
deleted file mode 100644
index 54378b3bc8..0000000000
--- a/contrib/python/coverage/py2/coverage/phystokens.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Better tokenizing for coverage.py."""
-
-import codecs
-import keyword
-import re
-import sys
-import token
-import tokenize
-
-from coverage import env
-from coverage.backward import iternext, unicode_class
-from coverage.misc import contract
-
-
-def phys_tokens(toks):
- """Return all physical tokens, even line continuations.
-
- tokenize.generate_tokens() doesn't return a token for the backslash that
- continues lines. This wrapper provides those tokens so that we can
- re-create a faithful representation of the original source.
-
- Returns the same values as generate_tokens()
-
- """
- last_line = None
- last_lineno = -1
- last_ttext = None
- for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
- if last_lineno != elineno:
- if last_line and last_line.endswith("\\\n"):
- # We are at the beginning of a new line, and the last line
- # ended with a backslash. We probably have to inject a
- # backslash token into the stream. Unfortunately, there's more
- # to figure out. This code::
- #
- # usage = """\
- # HEY THERE
- # """
- #
- # triggers this condition, but the token text is::
- #
- # '"""\\\nHEY THERE\n"""'
- #
- # so we need to figure out if the backslash is already in the
- # string token or not.
- inject_backslash = True
- if last_ttext.endswith("\\"):
- inject_backslash = False
- elif ttype == token.STRING:
- if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
- # It's a multi-line string and the first line ends with
- # a backslash, so we don't need to inject another.
- inject_backslash = False
- if inject_backslash:
- # Figure out what column the backslash is in.
- ccol = len(last_line.split("\n")[-2]) - 1
- # Yield the token, with a fake token type.
- yield (
- 99999, "\\\n",
- (slineno, ccol), (slineno, ccol+2),
- last_line
- )
- last_line = ltext
- if ttype not in (tokenize.NEWLINE, tokenize.NL):
- last_ttext = ttext
- yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
- last_lineno = elineno
-
-
-@contract(source='unicode')
-def source_token_lines(source):
- """Generate a series of lines, one for each line in `source`.
-
- Each line is a list of pairs, each pair is a token::
-
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
-
- Each pair has a token class, and the token text.
-
- If you concatenate all the token texts, and then join them with newlines,
- you should have your original `source` back, with two differences:
- trailing whitespace is not preserved, and a final line with no newline
- is indistinguishable from a final line with a newline.
-
- """
-
- ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
- line = []
- col = 0
-
- source = source.expandtabs(8).replace('\r\n', '\n')
- tokgen = generate_tokens(source)
-
- for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
- mark_start = True
- for part in re.split('(\n)', ttext):
- if part == '\n':
- yield line
- line = []
- col = 0
- mark_end = False
- elif part == '':
- mark_end = False
- elif ttype in ws_tokens:
- mark_end = False
- else:
- if mark_start and scol > col:
- line.append(("ws", u" " * (scol - col)))
- mark_start = False
- tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
- if ttype == token.NAME and keyword.iskeyword(ttext):
- tok_class = "key"
- line.append((tok_class, part))
- mark_end = True
- scol = 0
- if mark_end:
- col = ecol
-
- if line:
- yield line
-
-
-class CachedTokenizer(object):
- """A one-element cache around tokenize.generate_tokens.
-
- When reporting, coverage.py tokenizes files twice, once to find the
- structure of the file, and once to syntax-color it. Tokenizing is
- expensive, and easily cached.
-
- This is a one-element cache so that our twice-in-a-row tokenizing doesn't
- actually tokenize twice.
-
- """
- def __init__(self):
- self.last_text = None
- self.last_tokens = None
-
- @contract(text='unicode')
- def generate_tokens(self, text):
- """A stand-in for `tokenize.generate_tokens`."""
- if text != self.last_text:
- self.last_text = text
- readline = iternext(text.splitlines(True))
- self.last_tokens = list(tokenize.generate_tokens(readline))
- return self.last_tokens
-
-# Create our generate_tokens cache as a callable replacement function.
-generate_tokens = CachedTokenizer().generate_tokens
-
-
-COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
-
-@contract(source='bytes')
-def _source_encoding_py2(source):
- """Determine the encoding for `source`, according to PEP 263.
-
- `source` is a byte string, the text of the program.
-
- Returns a string, the name of the encoding.
-
- """
- assert isinstance(source, bytes)
-
- # Do this so the detect_encode code we copied will work.
- readline = iternext(source.splitlines(True))
-
- # This is mostly code adapted from Py3.2's tokenize module.
-
- def _get_normal_name(orig_enc):
- """Imitates get_normal_name in tokenizer.c."""
- # Only care about the first 12 characters.
- enc = orig_enc[:12].lower().replace("_", "-")
- if re.match(r"^utf-8($|-)", enc):
- return "utf-8"
- if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc):
- return "iso-8859-1"
- return orig_enc
-
- # From detect_encode():
- # It detects the encoding from the presence of a UTF-8 BOM or an encoding
- # cookie as specified in PEP-0263. If both a BOM and a cookie are present,
- # but disagree, a SyntaxError will be raised. If the encoding cookie is an
- # invalid charset, raise a SyntaxError. Note that if a UTF-8 BOM is found,
- # 'utf-8-sig' is returned.
-
- # If no encoding is specified, then the default will be returned.
- default = 'ascii'
-
- bom_found = False
- encoding = None
-
- def read_or_stop():
- """Get the next source line, or ''."""
- try:
- return readline()
- except StopIteration:
- return ''
-
- def find_cookie(line):
- """Find an encoding cookie in `line`."""
- try:
- line_string = line.decode('ascii')
- except UnicodeDecodeError:
- return None
-
- matches = COOKIE_RE.findall(line_string)
- if not matches:
- return None
- encoding = _get_normal_name(matches[0])
- try:
- codec = codecs.lookup(encoding)
- except LookupError:
- # This behavior mimics the Python interpreter
- raise SyntaxError("unknown encoding: " + encoding)
-
- if bom_found:
- # codecs in 2.3 were raw tuples of functions, assume the best.
- codec_name = getattr(codec, 'name', encoding)
- if codec_name != 'utf-8':
- # This behavior mimics the Python interpreter
- raise SyntaxError('encoding problem: utf-8')
- encoding += '-sig'
- return encoding
-
- first = read_or_stop()
- if first.startswith(codecs.BOM_UTF8):
- bom_found = True
- first = first[3:]
- default = 'utf-8-sig'
- if not first:
- return default
-
- encoding = find_cookie(first)
- if encoding:
- return encoding
-
- second = read_or_stop()
- if not second:
- return default
-
- encoding = find_cookie(second)
- if encoding:
- return encoding
-
- return default
-
-
-@contract(source='bytes')
-def _source_encoding_py3(source):
- """Determine the encoding for `source`, according to PEP 263.
-
- `source` is a byte string: the text of the program.
-
- Returns a string, the name of the encoding.
-
- """
- readline = iternext(source.splitlines(True))
- return tokenize.detect_encoding(readline)[0]
-
-
-if env.PY3:
- source_encoding = _source_encoding_py3
-else:
- source_encoding = _source_encoding_py2
-
-
-@contract(source='unicode')
-def compile_unicode(source, filename, mode):
- """Just like the `compile` builtin, but works on any Unicode string.
-
- Python 2's compile() builtin has a stupid restriction: if the source string
- is Unicode, then it may not have a encoding declaration in it. Why not?
- Who knows! It also decodes to utf8, and then tries to interpret those utf8
- bytes according to the encoding declaration. Why? Who knows!
-
- This function neuters the coding declaration, and compiles it.
-
- """
- source = neuter_encoding_declaration(source)
- if env.PY2 and isinstance(filename, unicode_class):
- filename = filename.encode(sys.getfilesystemencoding(), "replace")
- code = compile(source, filename, mode)
- return code
-
-
-@contract(source='unicode', returns='unicode')
-def neuter_encoding_declaration(source):
- """Return `source`, with any encoding declaration neutered."""
- if COOKIE_RE.search(source):
- source_lines = source.splitlines(True)
- for lineno in range(min(2, len(source_lines))):
- source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno])
- source = "".join(source_lines)
- return source
diff --git a/contrib/python/coverage/py2/coverage/plugin.py b/contrib/python/coverage/py2/coverage/plugin.py
deleted file mode 100644
index 6997b489bb..0000000000
--- a/contrib/python/coverage/py2/coverage/plugin.py
+++ /dev/null
@@ -1,533 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""
-.. versionadded:: 4.0
-
-Plug-in interfaces for coverage.py.
-
-Coverage.py supports a few different kinds of plug-ins that change its
-behavior:
-
-* File tracers implement tracing of non-Python file types.
-
-* Configurers add custom configuration, using Python code to change the
- configuration.
-
-* Dynamic context switchers decide when the dynamic context has changed, for
- example, to record what test function produced the coverage.
-
-To write a coverage.py plug-in, create a module with a subclass of
-:class:`~coverage.CoveragePlugin`. You will override methods in your class to
-participate in various aspects of coverage.py's processing.
-Different types of plug-ins have to override different methods.
-
-Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info`
-to provide debugging information about their operation.
-
-Your module must also contain a ``coverage_init`` function that registers an
-instance of your plug-in class::
-
- import coverage
-
- class MyPlugin(coverage.CoveragePlugin):
- ...
-
- def coverage_init(reg, options):
- reg.add_file_tracer(MyPlugin())
-
-You use the `reg` parameter passed to your ``coverage_init`` function to
-register your plug-in object. The registration method you call depends on
-what kind of plug-in it is.
-
-If your plug-in takes options, the `options` parameter is a dictionary of your
-plug-in's options from the coverage.py configuration file. Use them however
-you want to configure your object before registering it.
-
-Coverage.py will store its own information on your plug-in object, using
-attributes whose names start with ``_coverage_``. Don't be startled.
-
-.. warning::
- Plug-ins are imported by coverage.py before it begins measuring code.
- If you write a plugin in your own project, it might import your product
- code before coverage.py can start measuring. This can result in your
- own code being reported as missing.
-
- One solution is to put your plugins in your project tree, but not in
- your importable Python package.
-
-
-.. _file_tracer_plugins:
-
-File Tracers
-============
-
-File tracers implement measurement support for non-Python files. File tracers
-implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim
-files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report
-on those files.
-
-In your ``coverage_init`` function, use the ``add_file_tracer`` method to
-register your file tracer.
-
-
-.. _configurer_plugins:
-
-Configurers
-===========
-
-.. versionadded:: 4.5
-
-Configurers modify the configuration of coverage.py during start-up.
-Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to
-change the configuration.
-
-In your ``coverage_init`` function, use the ``add_configurer`` method to
-register your configurer.
-
-
-.. _dynamic_context_plugins:
-
-Dynamic Context Switchers
-=========================
-
-.. versionadded:: 5.0
-
-Dynamic context switcher plugins implement the
-:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute
-the context label for each measured frame.
-
-Computed context labels are useful when you want to group measured data without
-modifying the source code.
-
-For example, you could write a plugin that checks `frame.f_code` to inspect
-the currently executed method, and set the context label to a fully qualified
-method name if it's an instance method of `unittest.TestCase` and the method
-name starts with 'test'. Such a plugin would provide basic coverage grouping
-by test and could be used with test runners that have no built-in coveragepy
-support.
-
-In your ``coverage_init`` function, use the ``add_dynamic_context`` method to
-register your dynamic context switcher.
-
-"""
-
-from coverage import files
-from coverage.misc import contract, _needs_to_implement
-
-
-class CoveragePlugin(object):
- """Base class for coverage.py plug-ins."""
-
- def file_tracer(self, filename): # pylint: disable=unused-argument
- """Get a :class:`FileTracer` object for a file.
-
- Plug-in type: file tracer.
-
- Every Python source file is offered to your plug-in to give it a chance
- to take responsibility for tracing the file. If your plug-in can
- handle the file, it should return a :class:`FileTracer` object.
- Otherwise return None.
-
- There is no way to register your plug-in for particular files.
- Instead, this method is invoked for all files as they are executed,
- and the plug-in decides whether it can trace the file or not.
- Be prepared for `filename` to refer to all kinds of files that have
- nothing to do with your plug-in.
-
- The file name will be a Python file being executed. There are two
- broad categories of behavior for a plug-in, depending on the kind of
- files your plug-in supports:
-
- * Static file names: each of your original source files has been
- converted into a distinct Python file. Your plug-in is invoked with
- the Python file name, and it maps it back to its original source
- file.
-
- * Dynamic file names: all of your source files are executed by the same
- Python file. In this case, your plug-in implements
- :meth:`FileTracer.dynamic_source_filename` to provide the actual
- source file for each execution frame.
-
- `filename` is a string, the path to the file being considered. This is
- the absolute real path to the file. If you are comparing to other
- paths, be sure to take this into account.
-
- Returns a :class:`FileTracer` object to use to trace `filename`, or
- None if this plug-in cannot trace this file.
-
- """
- return None
-
- def file_reporter(self, filename): # pylint: disable=unused-argument
- """Get the :class:`FileReporter` class to use for a file.
-
- Plug-in type: file tracer.
-
- This will only be invoked if `filename` returns non-None from
- :meth:`file_tracer`. It's an error to return None from this method.
-
- Returns a :class:`FileReporter` object to use to report on `filename`,
- or the string `"python"` to have coverage.py treat the file as Python.
-
- """
- _needs_to_implement(self, "file_reporter")
-
- def dynamic_context(self, frame): # pylint: disable=unused-argument
- """Get the dynamically computed context label for `frame`.
-
- Plug-in type: dynamic context.
-
- This method is invoked for each frame when outside of a dynamic
- context, to see if a new dynamic context should be started. If it
- returns a string, a new context label is set for this and deeper
- frames. The dynamic context ends when this frame returns.
-
- Returns a string to start a new dynamic context, or None if no new
- context should be started.
-
- """
- return None
-
- def find_executable_files(self, src_dir): # pylint: disable=unused-argument
- """Yield all of the executable files in `src_dir`, recursively.
-
- Plug-in type: file tracer.
-
- Executability is a plug-in-specific property, but generally means files
- which would have been considered for coverage analysis, had they been
- included automatically.
-
- Returns or yields a sequence of strings, the paths to files that could
- have been executed, including files that had been executed.
-
- """
- return []
-
- def configure(self, config):
- """Modify the configuration of coverage.py.
-
- Plug-in type: configurer.
-
- This method is called during coverage.py start-up, to give your plug-in
- a chance to change the configuration. The `config` parameter is an
- object with :meth:`~coverage.Coverage.get_option` and
- :meth:`~coverage.Coverage.set_option` methods. Do not call any other
- methods on the `config` object.
-
- """
- pass
-
- def sys_info(self):
- """Get a list of information useful for debugging.
-
- Plug-in type: any.
-
- This method will be invoked for ``--debug=sys``. Your
- plug-in can return any information it wants to be displayed.
-
- Returns a list of pairs: `[(name, value), ...]`.
-
- """
- return []
-
-
-class FileTracer(object):
- """Support needed for files during the execution phase.
-
- File tracer plug-ins implement subclasses of FileTracer to return from
- their :meth:`~CoveragePlugin.file_tracer` method.
-
- You may construct this object from :meth:`CoveragePlugin.file_tracer` any
- way you like. A natural choice would be to pass the file name given to
- `file_tracer`.
-
- `FileTracer` objects should only be created in the
- :meth:`CoveragePlugin.file_tracer` method.
-
- See :ref:`howitworks` for details of the different coverage.py phases.
-
- """
-
- def source_filename(self):
- """The source file name for this file.
-
- This may be any file name you like. A key responsibility of a plug-in
- is to own the mapping from Python execution back to whatever source
- file name was originally the source of the code.
-
- See :meth:`CoveragePlugin.file_tracer` for details about static and
- dynamic file names.
-
- Returns the file name to credit with this execution.
-
- """
- _needs_to_implement(self, "source_filename")
-
- def has_dynamic_source_filename(self):
- """Does this FileTracer have dynamic source file names?
-
- FileTracers can provide dynamically determined file names by
- implementing :meth:`dynamic_source_filename`. Invoking that function
- is expensive. To determine whether to invoke it, coverage.py uses the
- result of this function to know if it needs to bother invoking
- :meth:`dynamic_source_filename`.
-
- See :meth:`CoveragePlugin.file_tracer` for details about static and
- dynamic file names.
-
- Returns True if :meth:`dynamic_source_filename` should be called to get
- dynamic source file names.
-
- """
- return False
-
- def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument
- """Get a dynamically computed source file name.
-
- Some plug-ins need to compute the source file name dynamically for each
- frame.
-
- This function will not be invoked if
- :meth:`has_dynamic_source_filename` returns False.
-
- Returns the source file name for this frame, or None if this frame
- shouldn't be measured.
-
- """
- return None
-
- def line_number_range(self, frame):
- """Get the range of source line numbers for a given a call frame.
-
- The call frame is examined, and the source line number in the original
- file is returned. The return value is a pair of numbers, the starting
- line number and the ending line number, both inclusive. For example,
- returning (5, 7) means that lines 5, 6, and 7 should be considered
- executed.
-
- This function might decide that the frame doesn't indicate any lines
- from the source file were executed. Return (-1, -1) in this case to
- tell coverage.py that no lines should be recorded for this frame.
-
- """
- lineno = frame.f_lineno
- return lineno, lineno
-
-
-class FileReporter(object):
- """Support needed for files during the analysis and reporting phases.
-
- File tracer plug-ins implement a subclass of `FileReporter`, and return
- instances from their :meth:`CoveragePlugin.file_reporter` method.
-
- There are many methods here, but only :meth:`lines` is required, to provide
- the set of executable lines in the file.
-
- See :ref:`howitworks` for details of the different coverage.py phases.
-
- """
-
- def __init__(self, filename):
- """Simple initialization of a `FileReporter`.
-
- The `filename` argument is the path to the file being reported. This
- will be available as the `.filename` attribute on the object. Other
- method implementations on this base class rely on this attribute.
-
- """
- self.filename = filename
-
- def __repr__(self):
- return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
-
- def relative_filename(self):
- """Get the relative file name for this file.
-
- This file path will be displayed in reports. The default
- implementation will supply the actual project-relative file path. You
- only need to supply this method if you have an unusual syntax for file
- paths.
-
- """
- return files.relative_filename(self.filename)
-
- @contract(returns='unicode')
- def source(self):
- """Get the source for the file.
-
- Returns a Unicode string.
-
- The base implementation simply reads the `self.filename` file and
- decodes it as UTF8. Override this method if your file isn't readable
- as a text file, or if you need other encoding support.
-
- """
- with open(self.filename, "rb") as f:
- return f.read().decode("utf8")
-
- def lines(self):
- """Get the executable lines in this file.
-
- Your plug-in must determine which lines in the file were possibly
- executable. This method returns a set of those line numbers.
-
- Returns a set of line numbers.
-
- """
- _needs_to_implement(self, "lines")
-
- def excluded_lines(self):
- """Get the excluded executable lines in this file.
-
- Your plug-in can use any method it likes to allow the user to exclude
- executable lines from consideration.
-
- Returns a set of line numbers.
-
- The base implementation returns the empty set.
-
- """
- return set()
-
- def translate_lines(self, lines):
- """Translate recorded lines into reported lines.
-
- Some file formats will want to report lines slightly differently than
- they are recorded. For example, Python records the last line of a
- multi-line statement, but reports are nicer if they mention the first
- line.
-
- Your plug-in can optionally define this method to perform these kinds
- of adjustment.
-
- `lines` is a sequence of integers, the recorded line numbers.
-
- Returns a set of integers, the adjusted line numbers.
-
- The base implementation returns the numbers unchanged.
-
- """
- return set(lines)
-
- def arcs(self):
- """Get the executable arcs in this file.
-
- To support branch coverage, your plug-in needs to be able to indicate
- possible execution paths, as a set of line number pairs. Each pair is
- a `(prev, next)` pair indicating that execution can transition from the
- `prev` line number to the `next` line number.
-
- Returns a set of pairs of line numbers. The default implementation
- returns an empty set.
-
- """
- return set()
-
- def no_branch_lines(self):
- """Get the lines excused from branch coverage in this file.
-
- Your plug-in can use any method it likes to allow the user to exclude
- lines from consideration of branch coverage.
-
- Returns a set of line numbers.
-
- The base implementation returns the empty set.
-
- """
- return set()
-
- def translate_arcs(self, arcs):
- """Translate recorded arcs into reported arcs.
-
- Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
- line number pairs.
-
- Returns a set of line number pairs.
-
- The default implementation returns `arcs` unchanged.
-
- """
- return arcs
-
- def exit_counts(self):
- """Get a count of exits from that each line.
-
- To determine which lines are branches, coverage.py looks for lines that
- have more than one exit. This function creates a dict mapping each
- executable line number to a count of how many exits it has.
-
- To be honest, this feels wrong, and should be refactored. Let me know
- if you attempt to implement this method in your plug-in...
-
- """
- return {}
-
- def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument
- """Provide an English sentence describing a missing arc.
-
- The `start` and `end` arguments are the line numbers of the missing
- arc. Negative numbers indicate entering or exiting code objects.
-
- The `executed_arcs` argument is a set of line number pairs, the arcs
- that were executed in this file.
-
- By default, this simply returns the string "Line {start} didn't jump
- to {end}".
-
- """
- return "Line {start} didn't jump to line {end}".format(start=start, end=end)
-
- def source_token_lines(self):
- """Generate a series of tokenized lines, one for each line in `source`.
-
- These tokens are used for syntax-colored reports.
-
- Each line is a list of pairs, each pair is a token::
-
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
-
- Each pair has a token class, and the token text. The token classes
- are:
-
- * ``'com'``: a comment
- * ``'key'``: a keyword
- * ``'nam'``: a name, or identifier
- * ``'num'``: a number
- * ``'op'``: an operator
- * ``'str'``: a string literal
- * ``'ws'``: some white space
- * ``'txt'``: some other kind of text
-
- If you concatenate all the token texts, and then join them with
- newlines, you should have your original source back.
-
- The default implementation simply returns each line tagged as
- ``'txt'``.
-
- """
- for line in self.source().splitlines():
- yield [('txt', line)]
-
- # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
- # of them defined.
-
- def __eq__(self, other):
- return isinstance(other, FileReporter) and self.filename == other.filename
-
- def __ne__(self, other):
- return not (self == other)
-
- def __lt__(self, other):
- return self.filename < other.filename
-
- def __le__(self, other):
- return self.filename <= other.filename
-
- def __gt__(self, other):
- return self.filename > other.filename
-
- def __ge__(self, other):
- return self.filename >= other.filename
-
- __hash__ = None # This object doesn't need to be hashed.
diff --git a/contrib/python/coverage/py2/coverage/plugin_support.py b/contrib/python/coverage/py2/coverage/plugin_support.py
deleted file mode 100644
index 89c1c7658f..0000000000
--- a/contrib/python/coverage/py2/coverage/plugin_support.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Support for plugins."""
-
-import os
-import os.path
-import sys
-
-from coverage.misc import CoverageException, isolate_module
-from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
-
-os = isolate_module(os)
-
-
-class Plugins(object):
- """The currently loaded collection of coverage.py plugins."""
-
- def __init__(self):
- self.order = []
- self.names = {}
- self.file_tracers = []
- self.configurers = []
- self.context_switchers = []
-
- self.current_module = None
- self.debug = None
-
- @classmethod
- def load_plugins(cls, modules, config, debug=None):
- """Load plugins from `modules`.
-
- Returns a Plugins object with the loaded and configured plugins.
-
- """
- plugins = cls()
- plugins.debug = debug
-
- for module in modules:
- plugins.current_module = module
- __import__(module)
- mod = sys.modules[module]
-
- coverage_init = getattr(mod, "coverage_init", None)
- if not coverage_init:
- raise CoverageException(
- "Plugin module %r didn't define a coverage_init function" % module
- )
-
- options = config.get_plugin_options(module)
- coverage_init(plugins, options)
-
- plugins.current_module = None
- return plugins
-
- def add_file_tracer(self, plugin):
- """Add a file tracer plugin.
-
- `plugin` is an instance of a third-party plugin class. It must
- implement the :meth:`CoveragePlugin.file_tracer` method.
-
- """
- self._add_plugin(plugin, self.file_tracers)
-
- def add_configurer(self, plugin):
- """Add a configuring plugin.
-
- `plugin` is an instance of a third-party plugin class. It must
- implement the :meth:`CoveragePlugin.configure` method.
-
- """
- self._add_plugin(plugin, self.configurers)
-
- def add_dynamic_context(self, plugin):
- """Add a dynamic context plugin.
-
- `plugin` is an instance of a third-party plugin class. It must
- implement the :meth:`CoveragePlugin.dynamic_context` method.
-
- """
- self._add_plugin(plugin, self.context_switchers)
-
- def add_noop(self, plugin):
- """Add a plugin that does nothing.
-
- This is only useful for testing the plugin support.
-
- """
- self._add_plugin(plugin, None)
-
- def _add_plugin(self, plugin, specialized):
- """Add a plugin object.
-
- `plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
- is a list to append the plugin to.
-
- """
- plugin_name = "%s.%s" % (self.current_module, plugin.__class__.__name__)
- if self.debug and self.debug.should('plugin'):
- self.debug.write("Loaded plugin %r: %r" % (self.current_module, plugin))
- labelled = LabelledDebug("plugin %r" % (self.current_module,), self.debug)
- plugin = DebugPluginWrapper(plugin, labelled)
-
- # pylint: disable=attribute-defined-outside-init
- plugin._coverage_plugin_name = plugin_name
- plugin._coverage_enabled = True
- self.order.append(plugin)
- self.names[plugin_name] = plugin
- if specialized is not None:
- specialized.append(plugin)
-
- def __nonzero__(self):
- return bool(self.order)
-
- __bool__ = __nonzero__
-
- def __iter__(self):
- return iter(self.order)
-
- def get(self, plugin_name):
- """Return a plugin by name."""
- return self.names[plugin_name]
-
-
-class LabelledDebug(object):
- """A Debug writer, but with labels for prepending to the messages."""
-
- def __init__(self, label, debug, prev_labels=()):
- self.labels = list(prev_labels) + [label]
- self.debug = debug
-
- def add_label(self, label):
- """Add a label to the writer, and return a new `LabelledDebug`."""
- return LabelledDebug(label, self.debug, self.labels)
-
- def message_prefix(self):
- """The prefix to use on messages, combining the labels."""
- prefixes = self.labels + ['']
- return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
-
- def write(self, message):
- """Write `message`, but with the labels prepended."""
- self.debug.write("%s%s" % (self.message_prefix(), message))
-
-
-class DebugPluginWrapper(CoveragePlugin):
- """Wrap a plugin, and use debug to report on what it's doing."""
-
- def __init__(self, plugin, debug):
- super(DebugPluginWrapper, self).__init__()
- self.plugin = plugin
- self.debug = debug
-
- def file_tracer(self, filename):
- tracer = self.plugin.file_tracer(filename)
- self.debug.write("file_tracer(%r) --> %r" % (filename, tracer))
- if tracer:
- debug = self.debug.add_label("file %r" % (filename,))
- tracer = DebugFileTracerWrapper(tracer, debug)
- return tracer
-
- def file_reporter(self, filename):
- reporter = self.plugin.file_reporter(filename)
- self.debug.write("file_reporter(%r) --> %r" % (filename, reporter))
- if reporter:
- debug = self.debug.add_label("file %r" % (filename,))
- reporter = DebugFileReporterWrapper(filename, reporter, debug)
- return reporter
-
- def dynamic_context(self, frame):
- context = self.plugin.dynamic_context(frame)
- self.debug.write("dynamic_context(%r) --> %r" % (frame, context))
- return context
-
- def find_executable_files(self, src_dir):
- executable_files = self.plugin.find_executable_files(src_dir)
- self.debug.write("find_executable_files(%r) --> %r" % (src_dir, executable_files))
- return executable_files
-
- def configure(self, config):
- self.debug.write("configure(%r)" % (config,))
- self.plugin.configure(config)
-
- def sys_info(self):
- return self.plugin.sys_info()
-
-
-class DebugFileTracerWrapper(FileTracer):
- """A debugging `FileTracer`."""
-
- def __init__(self, tracer, debug):
- self.tracer = tracer
- self.debug = debug
-
- def _show_frame(self, frame):
- """A short string identifying a frame, for debug messages."""
- return "%s@%d" % (
- os.path.basename(frame.f_code.co_filename),
- frame.f_lineno,
- )
-
- def source_filename(self):
- sfilename = self.tracer.source_filename()
- self.debug.write("source_filename() --> %r" % (sfilename,))
- return sfilename
-
- def has_dynamic_source_filename(self):
- has = self.tracer.has_dynamic_source_filename()
- self.debug.write("has_dynamic_source_filename() --> %r" % (has,))
- return has
-
- def dynamic_source_filename(self, filename, frame):
- dyn = self.tracer.dynamic_source_filename(filename, frame)
- self.debug.write("dynamic_source_filename(%r, %s) --> %r" % (
- filename, self._show_frame(frame), dyn,
- ))
- return dyn
-
- def line_number_range(self, frame):
- pair = self.tracer.line_number_range(frame)
- self.debug.write("line_number_range(%s) --> %r" % (self._show_frame(frame), pair))
- return pair
-
-
-class DebugFileReporterWrapper(FileReporter):
- """A debugging `FileReporter`."""
-
- def __init__(self, filename, reporter, debug):
- super(DebugFileReporterWrapper, self).__init__(filename)
- self.reporter = reporter
- self.debug = debug
-
- def relative_filename(self):
- ret = self.reporter.relative_filename()
- self.debug.write("relative_filename() --> %r" % (ret,))
- return ret
-
- def lines(self):
- ret = self.reporter.lines()
- self.debug.write("lines() --> %r" % (ret,))
- return ret
-
- def excluded_lines(self):
- ret = self.reporter.excluded_lines()
- self.debug.write("excluded_lines() --> %r" % (ret,))
- return ret
-
- def translate_lines(self, lines):
- ret = self.reporter.translate_lines(lines)
- self.debug.write("translate_lines(%r) --> %r" % (lines, ret))
- return ret
-
- def translate_arcs(self, arcs):
- ret = self.reporter.translate_arcs(arcs)
- self.debug.write("translate_arcs(%r) --> %r" % (arcs, ret))
- return ret
-
- def no_branch_lines(self):
- ret = self.reporter.no_branch_lines()
- self.debug.write("no_branch_lines() --> %r" % (ret,))
- return ret
-
- def exit_counts(self):
- ret = self.reporter.exit_counts()
- self.debug.write("exit_counts() --> %r" % (ret,))
- return ret
-
- def arcs(self):
- ret = self.reporter.arcs()
- self.debug.write("arcs() --> %r" % (ret,))
- return ret
-
- def source(self):
- ret = self.reporter.source()
- self.debug.write("source() --> %d chars" % (len(ret),))
- return ret
-
- def source_token_lines(self):
- ret = list(self.reporter.source_token_lines())
- self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
- return ret
diff --git a/contrib/python/coverage/py2/coverage/python.py b/contrib/python/coverage/py2/coverage/python.py
deleted file mode 100644
index 6ff19c34fe..0000000000
--- a/contrib/python/coverage/py2/coverage/python.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Python source expertise for coverage.py"""
-
-import sys
-import os.path
-import types
-import zipimport
-
-from coverage import env, files
-from coverage.misc import contract, expensive, isolate_module, join_regex
-from coverage.misc import CoverageException, NoSource
-from coverage.parser import PythonParser
-from coverage.phystokens import source_token_lines, source_encoding
-from coverage.plugin import FileReporter
-
-os = isolate_module(os)
-
-
-@contract(returns='bytes')
-def read_python_source(filename):
- """Read the Python source text from `filename`.
-
- Returns bytes.
-
- """
- with open(filename, "rb") as f:
- source = f.read()
-
- if env.IRONPYTHON:
- # IronPython reads Unicode strings even for "rb" files.
- source = bytes(source)
-
- return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
-
-
-@contract(returns='unicode')
-def get_python_source(filename, force_fs=False):
- """Return the source code, as unicode."""
- if getattr(sys, 'is_standalone_binary', False) and not force_fs:
- import __res
-
- modname = __res.importer.file_source(filename)
- if modname:
- source = __res.find(modname)
- source = source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
- return source.decode('utf-8')
- else:
- # it's fake generated package
- return u''
- base, ext = os.path.splitext(filename)
- if ext == ".py" and env.WINDOWS:
- exts = [".py", ".pyw"]
- else:
- exts = [ext]
-
- for ext in exts:
- try_filename = base + ext
- if os.path.exists(try_filename):
- # A regular text file: open it.
- source = read_python_source(try_filename)
- break
-
- # Maybe it's in a zip file?
- source = get_zip_bytes(try_filename)
- if source is not None:
- break
- else:
- # Couldn't find source.
- exc_msg = "No source for code: '%s'.\n" % (filename,)
- exc_msg += "Aborting report output, consider using -i."
- raise NoSource(exc_msg)
-
- # Replace \f because of http://bugs.python.org/issue19035
- source = source.replace(b'\f', b' ')
- source = source.decode(source_encoding(source), "replace")
-
- # Python code should always end with a line with a newline.
- if source and source[-1] != '\n':
- source += '\n'
-
- return source
-
-
-@contract(returns='bytes|None')
-def get_zip_bytes(filename):
- """Get data from `filename` if it is a zip file path.
-
- Returns the bytestring data read from the zip file, or None if no zip file
- could be found or `filename` isn't in it. The data returned will be
- an empty string if the file is empty.
-
- """
- markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep]
- for marker in markers:
- if marker in filename:
- parts = filename.split(marker)
- try:
- zi = zipimport.zipimporter(parts[0]+marker[:-1])
- except zipimport.ZipImportError:
- continue
- try:
- data = zi.get_data(parts[1])
- except IOError:
- continue
- return data
- return None
-
-
-def source_for_file(filename):
- """Return the source filename for `filename`.
-
- Given a file name being traced, return the best guess as to the source
- file to attribute it to.
-
- """
- if filename.endswith(".py"):
- # .py files are themselves source files.
- return filename
-
- elif filename.endswith((".pyc", ".pyo")):
- # Bytecode files probably have source files near them.
- py_filename = filename[:-1]
- if os.path.exists(py_filename):
- # Found a .py file, use that.
- return py_filename
- if env.WINDOWS:
- # On Windows, it could be a .pyw file.
- pyw_filename = py_filename + "w"
- if os.path.exists(pyw_filename):
- return pyw_filename
- # Didn't find source, but it's probably the .py file we want.
- return py_filename
-
- elif filename.endswith("$py.class"):
- # Jython is easy to guess.
- return filename[:-9] + ".py"
-
- # No idea, just use the file name as-is.
- return filename
-
-
-def source_for_morf(morf):
- """Get the source filename for the module-or-file `morf`."""
- if hasattr(morf, '__file__') and morf.__file__:
- filename = morf.__file__
- elif isinstance(morf, types.ModuleType):
- # A module should have had .__file__, otherwise we can't use it.
- # This could be a PEP-420 namespace package.
- raise CoverageException("Module {} has no file".format(morf))
- else:
- filename = morf
-
- filename = source_for_file(files.unicode_filename(filename))
- return filename
-
-
-class PythonFileReporter(FileReporter):
- """Report support for a Python file."""
-
- def __init__(self, morf, coverage=None):
- self.coverage = coverage
-
- filename = source_for_morf(morf)
-
- super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
-
- if hasattr(morf, '__name__'):
- name = morf.__name__.replace(".", os.sep)
- if os.path.basename(filename).startswith('__init__.'):
- name += os.sep + "__init__"
- name += ".py"
- name = files.unicode_filename(name)
- else:
- name = files.relative_filename(filename)
- self.relname = name
-
- self._source = None
- self._parser = None
- self._excluded = None
-
- def __repr__(self):
- return "<PythonFileReporter {!r}>".format(self.filename)
-
- @contract(returns='unicode')
- def relative_filename(self):
- return self.relname
-
- @property
- def parser(self):
- """Lazily create a :class:`PythonParser`."""
- if self._parser is None:
- self._parser = PythonParser(
- filename=self.filename,
- exclude=self.coverage._exclude_regex('exclude'),
- )
- self._parser.parse_source()
- return self._parser
-
- def lines(self):
- """Return the line numbers of statements in the file."""
- return self.parser.statements
-
- def excluded_lines(self):
- """Return the line numbers of statements in the file."""
- return self.parser.excluded
-
- def translate_lines(self, lines):
- return self.parser.translate_lines(lines)
-
- def translate_arcs(self, arcs):
- return self.parser.translate_arcs(arcs)
-
- @expensive
- def no_branch_lines(self):
- no_branch = self.parser.lines_matching(
- join_regex(self.coverage.config.partial_list),
- join_regex(self.coverage.config.partial_always_list)
- )
- return no_branch
-
- @expensive
- def arcs(self):
- return self.parser.arcs()
-
- @expensive
- def exit_counts(self):
- return self.parser.exit_counts()
-
- def missing_arc_description(self, start, end, executed_arcs=None):
- return self.parser.missing_arc_description(start, end, executed_arcs)
-
- @contract(returns='unicode')
- def source(self):
- if self._source is None:
- self._source = get_python_source(self.filename)
- return self._source
-
- def should_be_python(self):
- """Does it seem like this file should contain Python?
-
- This is used to decide if a file reported as part of the execution of
- a program was really likely to have contained Python in the first
- place.
-
- """
- # Get the file extension.
- _, ext = os.path.splitext(self.filename)
-
- # Anything named *.py* should be Python.
- if ext.startswith('.py'):
- return True
- # A file with no extension should be Python.
- if not ext:
- return True
- # Everything else is probably not Python.
- return False
-
- def source_token_lines(self):
- return source_token_lines(self.source())
diff --git a/contrib/python/coverage/py2/coverage/pytracer.py b/contrib/python/coverage/py2/coverage/pytracer.py
deleted file mode 100644
index 7ab4d3ef92..0000000000
--- a/contrib/python/coverage/py2/coverage/pytracer.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Raw data collector for coverage.py."""
-
-import atexit
-import dis
-import sys
-
-from coverage import env
-
-# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
-YIELD_VALUE = dis.opmap['YIELD_VALUE']
-if env.PY2:
- YIELD_VALUE = chr(YIELD_VALUE)
-
-# When running meta-coverage, this file can try to trace itself, which confuses
-# everything. Don't trace ourselves.
-
-THIS_FILE = __file__.rstrip("co")
-
-
-class PyTracer(object):
- """Python implementation of the raw data tracer."""
-
- # Because of poor implementations of trace-function-manipulating tools,
- # the Python trace function must be kept very simple. In particular, there
- # must be only one function ever set as the trace function, both through
- # sys.settrace, and as the return value from the trace function. Put
- # another way, the trace function must always return itself. It cannot
- # swap in other functions, or return None to avoid tracing a particular
- # frame.
- #
- # The trace manipulator that introduced this restriction is DecoratorTools,
- # which sets a trace function, and then later restores the pre-existing one
- # by calling sys.settrace with a function it found in the current frame.
- #
- # Systems that use DecoratorTools (or similar trace manipulations) must use
- # PyTracer to get accurate results. The command-line --timid argument is
- # used to force the use of this tracer.
-
- def __init__(self):
- # Attributes set from the collector:
- self.data = None
- self.trace_arcs = False
- self.should_trace = None
- self.should_trace_cache = None
- self.should_start_context = None
- self.warn = None
- # The threading module to use, if any.
- self.threading = None
-
- self.cur_file_dict = None
- self.last_line = 0 # int, but uninitialized.
- self.cur_file_name = None
- self.context = None
- self.started_context = False
-
- self.data_stack = []
- self.last_exc_back = None
- self.last_exc_firstlineno = 0
- self.thread = None
- self.stopped = False
- self._activity = False
-
- self.in_atexit = False
- # On exit, self.in_atexit = True
- atexit.register(setattr, self, 'in_atexit', True)
-
- def __repr__(self):
- return "<PyTracer at {}: {} lines in {} files>".format(
- id(self),
- sum(len(v) for v in self.data.values()),
- len(self.data),
- )
-
- def log(self, marker, *args):
- """For hard-core logging of what this tracer is doing."""
- with open("/tmp/debug_trace.txt", "a") as f:
- f.write("{} {}[{}]".format(
- marker,
- id(self),
- len(self.data_stack),
- ))
- if 0:
- f.write(".{:x}.{:x}".format(
- self.thread.ident,
- self.threading.currentThread().ident,
- ))
- f.write(" {}".format(" ".join(map(str, args))))
- if 0:
- f.write(" | ")
- stack = " / ".join(
- (fname or "???").rpartition("/")[-1]
- for _, fname, _, _ in self.data_stack
- )
- f.write(stack)
- f.write("\n")
-
- def _trace(self, frame, event, arg_unused):
- """The trace function passed to sys.settrace."""
-
- if THIS_FILE in frame.f_code.co_filename:
- return None
-
- #self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event)
-
- if (self.stopped and sys.gettrace() == self._trace): # pylint: disable=comparison-with-callable
- # The PyTrace.stop() method has been called, possibly by another
- # thread, let's deactivate ourselves now.
- if 0:
- self.log("---\nX", frame.f_code.co_filename, frame.f_lineno)
- f = frame
- while f:
- self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace)
- f = f.f_back
- sys.settrace(None)
- self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
- return None
-
- if self.last_exc_back:
- if frame == self.last_exc_back:
- # Someone forgot a return event.
- if self.trace_arcs and self.cur_file_dict:
- pair = (self.last_line, -self.last_exc_firstlineno)
- self.cur_file_dict[pair] = None
- self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
- self.last_exc_back = None
-
- # if event != 'call' and frame.f_code.co_filename != self.cur_file_name:
- # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
-
- if event == 'call':
- # Should we start a new context?
- if self.should_start_context and self.context is None:
- context_maybe = self.should_start_context(frame)
- if context_maybe is not None:
- self.context = context_maybe
- self.started_context = True
- self.switch_context(self.context)
- else:
- self.started_context = False
- else:
- self.started_context = False
-
- # Entering a new frame. Decide if we should trace
- # in this file.
- self._activity = True
- self.data_stack.append(
- (
- self.cur_file_dict,
- self.cur_file_name,
- self.last_line,
- self.started_context,
- )
- )
- filename = frame.f_code.co_filename
- self.cur_file_name = filename
- disp = self.should_trace_cache.get(filename)
- if disp is None:
- disp = self.should_trace(filename, frame)
- self.should_trace_cache[filename] = disp
-
- self.cur_file_dict = None
- if disp.trace:
- tracename = disp.source_filename
- if tracename not in self.data:
- self.data[tracename] = {}
- self.cur_file_dict = self.data[tracename]
- # The call event is really a "start frame" event, and happens for
- # function calls and re-entering generators. The f_lasti field is
- # -1 for calls, and a real offset for generators. Use <0 as the
- # line number for calls, and the real line number for generators.
- if getattr(frame, 'f_lasti', -1) < 0:
- self.last_line = -frame.f_code.co_firstlineno
- else:
- self.last_line = frame.f_lineno
- elif event == 'line':
- # Record an executed line.
- if self.cur_file_dict is not None:
- lineno = frame.f_lineno
-
- if self.trace_arcs:
- self.cur_file_dict[(self.last_line, lineno)] = None
- else:
- self.cur_file_dict[lineno] = None
- self.last_line = lineno
- elif event == 'return':
- if self.trace_arcs and self.cur_file_dict:
- # Record an arc leaving the function, but beware that a
- # "return" event might just mean yielding from a generator.
- # Jython seems to have an empty co_code, so just assume return.
- code = frame.f_code.co_code
- if (not code) or code[frame.f_lasti] != YIELD_VALUE:
- first = frame.f_code.co_firstlineno
- self.cur_file_dict[(self.last_line, -first)] = None
- # Leaving this function, pop the filename stack.
- self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
- # Leaving a context?
- if self.started_context:
- self.context = None
- self.switch_context(None)
- elif event == 'exception':
- self.last_exc_back = frame.f_back
- self.last_exc_firstlineno = frame.f_code.co_firstlineno
- return self._trace
-
- def start(self):
- """Start this Tracer.
-
- Return a Python function suitable for use with sys.settrace().
-
- """
- self.stopped = False
- if self.threading:
- if self.thread is None:
- self.thread = self.threading.currentThread()
- else:
- if self.thread.ident != self.threading.currentThread().ident:
- # Re-starting from a different thread!? Don't set the trace
- # function, but we are marked as running again, so maybe it
- # will be ok?
- #self.log("~", "starting on different threads")
- return self._trace
-
- sys.settrace(self._trace)
- return self._trace
-
- def stop(self):
- """Stop this Tracer."""
- # Get the active tracer callback before setting the stop flag to be
- # able to detect if the tracer was changed prior to stopping it.
- tf = sys.gettrace()
-
- # Set the stop flag. The actual call to sys.settrace(None) will happen
- # in the self._trace callback itself to make sure to call it from the
- # right thread.
- self.stopped = True
-
- if self.threading and self.thread.ident != self.threading.currentThread().ident:
- # Called on a different thread than started us: we can't unhook
- # ourselves, but we've set the flag that we should stop, so we
- # won't do any more tracing.
- #self.log("~", "stopping on different threads")
- return
-
- if self.warn:
- # PyPy clears the trace function before running atexit functions,
- # so don't warn if we are in atexit on PyPy and the trace function
- # has changed to None.
- dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None)
- if (not dont_warn) and tf != self._trace: # pylint: disable=comparison-with-callable
- self.warn(
- "Trace function changed, measurement is likely wrong: %r" % (tf,),
- slug="trace-changed",
- )
-
- def activity(self):
- """Has there been any activity?"""
- return self._activity
-
- def reset_activity(self):
- """Reset the activity() flag."""
- self._activity = False
-
- def get_stats(self):
- """Return a dictionary of statistics, or None."""
- return None
diff --git a/contrib/python/coverage/py2/coverage/report.py b/contrib/python/coverage/py2/coverage/report.py
deleted file mode 100644
index 64678ff95d..0000000000
--- a/contrib/python/coverage/py2/coverage/report.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Reporter foundation for coverage.py."""
-import sys
-
-from coverage import env
-from coverage.files import prep_patterns, FnmatchMatcher
-from coverage.misc import CoverageException, NoSource, NotPython, ensure_dir_for_file, file_be_gone
-
-
-def render_report(output_path, reporter, morfs):
- """Run the provided reporter ensuring any required setup and cleanup is done
-
- At a high level this method ensures the output file is ready to be written to. Then writes the
- report to it. Then closes the file and deletes any garbage created if necessary.
- """
- file_to_close = None
- delete_file = False
- if output_path:
- if output_path == '-':
- outfile = sys.stdout
- else:
- # Ensure that the output directory is created; done here
- # because this report pre-opens the output file.
- # HTMLReport does this using the Report plumbing because
- # its task is more complex, being multiple files.
- ensure_dir_for_file(output_path)
- open_kwargs = {}
- if env.PY3:
- open_kwargs['encoding'] = 'utf8'
- outfile = open(output_path, "w", **open_kwargs)
- file_to_close = outfile
- try:
- return reporter.report(morfs, outfile=outfile)
- except CoverageException:
- delete_file = True
- raise
- finally:
- if file_to_close:
- file_to_close.close()
- if delete_file:
- file_be_gone(output_path)
-
-
-def get_analysis_to_report(coverage, morfs):
- """Get the files to report on.
-
- For each morf in `morfs`, if it should be reported on (based on the omit
- and include configuration options), yield a pair, the `FileReporter` and
- `Analysis` for the morf.
-
- """
- file_reporters = coverage._get_file_reporters(morfs)
- config = coverage.config
-
- if config.report_include:
- matcher = FnmatchMatcher(prep_patterns(config.report_include))
- file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
-
- if config.report_omit:
- matcher = FnmatchMatcher(prep_patterns(config.report_omit))
- file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
-
- if not file_reporters:
- raise CoverageException("No data to report.")
-
- for fr in sorted(file_reporters):
- try:
- analysis = coverage._analyze(fr)
- except NoSource:
- if not config.ignore_errors:
- raise
- except NotPython:
- # Only report errors for .py files, and only if we didn't
- # explicitly suppress those errors.
- # NotPython is only raised by PythonFileReporter, which has a
- # should_be_python() method.
- if fr.should_be_python():
- if config.ignore_errors:
- msg = "Couldn't parse Python file '{}'".format(fr.filename)
- coverage._warn(msg, slug="couldnt-parse")
- else:
- raise
- else:
- yield (fr, analysis)
diff --git a/contrib/python/coverage/py2/coverage/results.py b/contrib/python/coverage/py2/coverage/results.py
deleted file mode 100644
index 4916864df3..0000000000
--- a/contrib/python/coverage/py2/coverage/results.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Results of coverage measurement."""
-
-import collections
-
-from coverage.backward import iitems
-from coverage.debug import SimpleReprMixin
-from coverage.misc import contract, CoverageException, nice_pair
-
-
-class Analysis(object):
- """The results of analyzing a FileReporter."""
-
- def __init__(self, data, file_reporter, file_mapper):
- self.data = data
- self.file_reporter = file_reporter
- self.filename = file_mapper(self.file_reporter.filename)
- self.statements = self.file_reporter.lines()
- self.excluded = self.file_reporter.excluded_lines()
-
- # Identify missing statements.
- executed = self.data.lines(self.filename) or []
- executed = self.file_reporter.translate_lines(executed)
- self.executed = executed
- self.missing = self.statements - self.executed
-
- if self.data.has_arcs():
- self._arc_possibilities = sorted(self.file_reporter.arcs())
- self.exit_counts = self.file_reporter.exit_counts()
- self.no_branch = self.file_reporter.no_branch_lines()
- n_branches = self._total_branches()
- mba = self.missing_branch_arcs()
- n_partial_branches = sum(len(v) for k,v in iitems(mba) if k not in self.missing)
- n_missing_branches = sum(len(v) for k,v in iitems(mba))
- else:
- self._arc_possibilities = []
- self.exit_counts = {}
- self.no_branch = set()
- n_branches = n_partial_branches = n_missing_branches = 0
-
- self.numbers = Numbers(
- n_files=1,
- n_statements=len(self.statements),
- n_excluded=len(self.excluded),
- n_missing=len(self.missing),
- n_branches=n_branches,
- n_partial_branches=n_partial_branches,
- n_missing_branches=n_missing_branches,
- )
-
- def missing_formatted(self, branches=False):
- """The missing line numbers, formatted nicely.
-
- Returns a string like "1-2, 5-11, 13-14".
-
- If `branches` is true, includes the missing branch arcs also.
-
- """
- if branches and self.has_arcs():
- arcs = iitems(self.missing_branch_arcs())
- else:
- arcs = None
-
- return format_lines(self.statements, self.missing, arcs=arcs)
-
- def has_arcs(self):
- """Were arcs measured in this result?"""
- return self.data.has_arcs()
-
- @contract(returns='list(tuple(int, int))')
- def arc_possibilities(self):
- """Returns a sorted list of the arcs in the code."""
- return self._arc_possibilities
-
- @contract(returns='list(tuple(int, int))')
- def arcs_executed(self):
- """Returns a sorted list of the arcs actually executed in the code."""
- executed = self.data.arcs(self.filename) or []
- executed = self.file_reporter.translate_arcs(executed)
- return sorted(executed)
-
- @contract(returns='list(tuple(int, int))')
- def arcs_missing(self):
- """Returns a sorted list of the arcs in the code not executed."""
- possible = self.arc_possibilities()
- executed = self.arcs_executed()
- missing = (
- p for p in possible
- if p not in executed
- and p[0] not in self.no_branch
- )
- return sorted(missing)
-
- @contract(returns='list(tuple(int, int))')
- def arcs_unpredicted(self):
- """Returns a sorted list of the executed arcs missing from the code."""
- possible = self.arc_possibilities()
- executed = self.arcs_executed()
- # Exclude arcs here which connect a line to itself. They can occur
- # in executed data in some cases. This is where they can cause
- # trouble, and here is where it's the least burden to remove them.
- # Also, generators can somehow cause arcs from "enter" to "exit", so
- # make sure we have at least one positive value.
- unpredicted = (
- e for e in executed
- if e not in possible
- and e[0] != e[1]
- and (e[0] > 0 or e[1] > 0)
- )
- return sorted(unpredicted)
-
- def _branch_lines(self):
- """Returns a list of line numbers that have more than one exit."""
- return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
-
- def _total_branches(self):
- """How many total branches are there?"""
- return sum(count for count in self.exit_counts.values() if count > 1)
-
- @contract(returns='dict(int: list(int))')
- def missing_branch_arcs(self):
- """Return arcs that weren't executed from branch lines.
-
- Returns {l1:[l2a,l2b,...], ...}
-
- """
- missing = self.arcs_missing()
- branch_lines = set(self._branch_lines())
- mba = collections.defaultdict(list)
- for l1, l2 in missing:
- if l1 in branch_lines:
- mba[l1].append(l2)
- return mba
-
- @contract(returns='dict(int: tuple(int, int))')
- def branch_stats(self):
- """Get stats about branches.
-
- Returns a dict mapping line numbers to a tuple:
- (total_exits, taken_exits).
- """
-
- missing_arcs = self.missing_branch_arcs()
- stats = {}
- for lnum in self._branch_lines():
- exits = self.exit_counts[lnum]
- missing = len(missing_arcs[lnum])
- stats[lnum] = (exits, exits - missing)
- return stats
-
-
-class Numbers(SimpleReprMixin):
- """The numerical results of measuring coverage.
-
- This holds the basic statistics from `Analysis`, and is used to roll
- up statistics across files.
-
- """
- # A global to determine the precision on coverage percentages, the number
- # of decimal places.
- _precision = 0
- _near0 = 1.0 # These will change when _precision is changed.
- _near100 = 99.0
-
- def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
- n_branches=0, n_partial_branches=0, n_missing_branches=0
- ):
- self.n_files = n_files
- self.n_statements = n_statements
- self.n_excluded = n_excluded
- self.n_missing = n_missing
- self.n_branches = n_branches
- self.n_partial_branches = n_partial_branches
- self.n_missing_branches = n_missing_branches
-
- def init_args(self):
- """Return a list for __init__(*args) to recreate this object."""
- return [
- self.n_files, self.n_statements, self.n_excluded, self.n_missing,
- self.n_branches, self.n_partial_branches, self.n_missing_branches,
- ]
-
- @classmethod
- def set_precision(cls, precision):
- """Set the number of decimal places used to report percentages."""
- assert 0 <= precision < 10
- cls._precision = precision
- cls._near0 = 1.0 / 10**precision
- cls._near100 = 100.0 - cls._near0
-
- @property
- def n_executed(self):
- """Returns the number of executed statements."""
- return self.n_statements - self.n_missing
-
- @property
- def n_executed_branches(self):
- """Returns the number of executed branches."""
- return self.n_branches - self.n_missing_branches
-
- @property
- def pc_covered(self):
- """Returns a single percentage value for coverage."""
- if self.n_statements > 0:
- numerator, denominator = self.ratio_covered
- pc_cov = (100.0 * numerator) / denominator
- else:
- pc_cov = 100.0
- return pc_cov
-
- @property
- def pc_covered_str(self):
- """Returns the percent covered, as a string, without a percent sign.
-
- Note that "0" is only returned when the value is truly zero, and "100"
- is only returned when the value is truly 100. Rounding can never
- result in either "0" or "100".
-
- """
- pc = self.pc_covered
- if 0 < pc < self._near0:
- pc = self._near0
- elif self._near100 < pc < 100:
- pc = self._near100
- else:
- pc = round(pc, self._precision)
- return "%.*f" % (self._precision, pc)
-
- @classmethod
- def pc_str_width(cls):
- """How many characters wide can pc_covered_str be?"""
- width = 3 # "100"
- if cls._precision > 0:
- width += 1 + cls._precision
- return width
-
- @property
- def ratio_covered(self):
- """Return a numerator and denominator for the coverage ratio."""
- numerator = self.n_executed + self.n_executed_branches
- denominator = self.n_statements + self.n_branches
- return numerator, denominator
-
- def __add__(self, other):
- nums = Numbers()
- nums.n_files = self.n_files + other.n_files
- nums.n_statements = self.n_statements + other.n_statements
- nums.n_excluded = self.n_excluded + other.n_excluded
- nums.n_missing = self.n_missing + other.n_missing
- nums.n_branches = self.n_branches + other.n_branches
- nums.n_partial_branches = (
- self.n_partial_branches + other.n_partial_branches
- )
- nums.n_missing_branches = (
- self.n_missing_branches + other.n_missing_branches
- )
- return nums
-
- def __radd__(self, other):
- # Implementing 0+Numbers allows us to sum() a list of Numbers.
- if other == 0:
- return self
- return NotImplemented # pragma: not covered (we never call it this way)
-
-
-def _line_ranges(statements, lines):
- """Produce a list of ranges for `format_lines`."""
- statements = sorted(statements)
- lines = sorted(lines)
-
- pairs = []
- start = None
- lidx = 0
- for stmt in statements:
- if lidx >= len(lines):
- break
- if stmt == lines[lidx]:
- lidx += 1
- if not start:
- start = stmt
- end = stmt
- elif start:
- pairs.append((start, end))
- start = None
- if start:
- pairs.append((start, end))
- return pairs
-
-
-def format_lines(statements, lines, arcs=None):
- """Nicely format a list of line numbers.
-
- Format a list of line numbers for printing by coalescing groups of lines as
- long as the lines represent consecutive statements. This will coalesce
- even if there are gaps between statements.
-
- For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
- `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
-
- Both `lines` and `statements` can be any iterable. All of the elements of
- `lines` must be in `statements`, and all of the values must be positive
- integers.
-
- If `arcs` is provided, they are (start,[end,end,end]) pairs that will be
- included in the output as long as start isn't in `lines`.
-
- """
- line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
- if arcs:
- line_exits = sorted(arcs)
- for line, exits in line_exits:
- for ex in sorted(exits):
- if line not in lines and ex not in lines:
- dest = (ex if ex > 0 else "exit")
- line_items.append((line, "%d->%s" % (line, dest)))
-
- ret = ', '.join(t[-1] for t in sorted(line_items))
- return ret
-
-
-@contract(total='number', fail_under='number', precision=int, returns=bool)
-def should_fail_under(total, fail_under, precision):
- """Determine if a total should fail due to fail-under.
-
- `total` is a float, the coverage measurement total. `fail_under` is the
- fail_under setting to compare with. `precision` is the number of digits
- to consider after the decimal point.
-
- Returns True if the total should fail.
-
- """
- # We can never achieve higher than 100% coverage, or less than zero.
- if not (0 <= fail_under <= 100.0):
- msg = "fail_under={} is invalid. Must be between 0 and 100.".format(fail_under)
- raise CoverageException(msg)
-
- # Special case for fail_under=100, it must really be 100.
- if fail_under == 100.0 and total != 100.0:
- return True
-
- return round(total, precision) < fail_under
diff --git a/contrib/python/coverage/py2/coverage/sqldata.py b/contrib/python/coverage/py2/coverage/sqldata.py
deleted file mode 100644
index a150fdfd0f..0000000000
--- a/contrib/python/coverage/py2/coverage/sqldata.py
+++ /dev/null
@@ -1,1123 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Sqlite coverage data."""
-
-# TODO: factor out dataop debugging to a wrapper class?
-# TODO: make sure all dataop debugging is in place somehow
-
-import collections
-import datetime
-import glob
-import itertools
-import os
-import re
-import sqlite3
-import sys
-import zlib
-
-from coverage import env
-from coverage.backward import get_thread_id, iitems, to_bytes, to_string
-from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr
-from coverage.files import PathAliases
-from coverage.misc import CoverageException, contract, file_be_gone, filename_suffix, isolate_module
-from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits
-from coverage.version import __version__
-
-os = isolate_module(os)
-
-# If you change the schema, increment the SCHEMA_VERSION, and update the
-# docs in docs/dbschema.rst also.
-
-SCHEMA_VERSION = 7
-
-# Schema versions:
-# 1: Released in 5.0a2
-# 2: Added contexts in 5.0a3.
-# 3: Replaced line table with line_map table.
-# 4: Changed line_map.bitmap to line_map.numbits.
-# 5: Added foreign key declarations.
-# 6: Key-value in meta.
-# 7: line_map -> line_bits
-
-SCHEMA = """\
-CREATE TABLE coverage_schema (
- -- One row, to record the version of the schema in this db.
- version integer
-);
-
-CREATE TABLE meta (
- -- Key-value pairs, to record metadata about the data
- key text,
- value text,
- unique (key)
- -- Keys:
- -- 'has_arcs' boolean -- Is this data recording branches?
- -- 'sys_argv' text -- The coverage command line that recorded the data.
- -- 'version' text -- The version of coverage.py that made the file.
- -- 'when' text -- Datetime when the file was created.
-);
-
-CREATE TABLE file (
- -- A row per file measured.
- id integer primary key,
- path text,
- unique (path)
-);
-
-CREATE TABLE context (
- -- A row per context measured.
- id integer primary key,
- context text,
- unique (context)
-);
-
-CREATE TABLE line_bits (
- -- If recording lines, a row per context per file executed.
- -- All of the line numbers for that file/context are in one numbits.
- file_id integer, -- foreign key to `file`.
- context_id integer, -- foreign key to `context`.
- numbits blob, -- see the numbits functions in coverage.numbits
- foreign key (file_id) references file (id),
- foreign key (context_id) references context (id),
- unique (file_id, context_id)
-);
-
-CREATE TABLE arc (
- -- If recording branches, a row per context per from/to line transition executed.
- file_id integer, -- foreign key to `file`.
- context_id integer, -- foreign key to `context`.
- fromno integer, -- line number jumped from.
- tono integer, -- line number jumped to.
- foreign key (file_id) references file (id),
- foreign key (context_id) references context (id),
- unique (file_id, context_id, fromno, tono)
-);
-
-CREATE TABLE tracer (
- -- A row per file indicating the tracer used for that file.
- file_id integer primary key,
- tracer text,
- foreign key (file_id) references file (id)
-);
-"""
-
-class CoverageData(SimpleReprMixin):
- """Manages collected coverage data, including file storage.
-
- This class is the public supported API to the data that coverage.py
- collects during program execution. It includes information about what code
- was executed. It does not include information from the analysis phase, to
- determine what lines could have been executed, or what lines were not
- executed.
-
- .. note::
-
- The data file is currently a SQLite database file, with a
- :ref:`documented schema <dbschema>`. The schema is subject to change
- though, so be careful about querying it directly. Use this API if you
- can to isolate yourself from changes.
-
- There are a number of kinds of data that can be collected:
-
- * **lines**: the line numbers of source lines that were executed.
- These are always available.
-
- * **arcs**: pairs of source and destination line numbers for transitions
- between source lines. These are only available if branch coverage was
- used.
-
- * **file tracer names**: the module names of the file tracer plugins that
- handled each file in the data.
-
- Lines, arcs, and file tracer names are stored for each source file. File
- names in this API are case-sensitive, even on platforms with
- case-insensitive file systems.
-
- A data file either stores lines, or arcs, but not both.
-
- A data file is associated with the data when the :class:`CoverageData`
- is created, using the parameters `basename`, `suffix`, and `no_disk`. The
- base name can be queried with :meth:`base_filename`, and the actual file
- name being used is available from :meth:`data_filename`.
-
- To read an existing coverage.py data file, use :meth:`read`. You can then
- access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
- or :meth:`file_tracer`.
-
- The :meth:`has_arcs` method indicates whether arc data is available. You
- can get a set of the files in the data with :meth:`measured_files`. As
- with most Python containers, you can determine if there is any data at all
- by using this object as a boolean value.
-
- The contexts for each line in a file can be read with
- :meth:`contexts_by_lineno`.
-
- To limit querying to certain contexts, use :meth:`set_query_context` or
- :meth:`set_query_contexts`. These will narrow the focus of subsequent
- :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set
- of all measured context names can be retrieved with
- :meth:`measured_contexts`.
-
- Most data files will be created by coverage.py itself, but you can use
- methods here to create data files if you like. The :meth:`add_lines`,
- :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
- that are convenient for coverage.py.
-
- To record data for contexts, use :meth:`set_context` to set a context to
- be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls.
-
- To add a source file without any measured data, use :meth:`touch_file`,
- or :meth:`touch_files` for a list of such files.
-
- Write the data to its file with :meth:`write`.
-
- You can clear the data in memory with :meth:`erase`. Two data collections
- can be combined by using :meth:`update` on one :class:`CoverageData`,
- passing it the other.
-
- Data in a :class:`CoverageData` can be serialized and deserialized with
- :meth:`dumps` and :meth:`loads`.
-
- """
-
- def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None):
- """Create a :class:`CoverageData` object to hold coverage-measured data.
-
- Arguments:
- basename (str): the base name of the data file, defaulting to
- ".coverage".
- suffix (str or bool): has the same meaning as the `data_suffix`
- argument to :class:`coverage.Coverage`.
- no_disk (bool): if True, keep all data in memory, and don't
- write any disk file.
- warn: a warning callback function, accepting a warning message
- argument.
- debug: a `DebugControl` object (optional)
-
- """
- self._no_disk = no_disk
- self._basename = os.path.abspath(basename or ".coverage")
- self._suffix = suffix
- self._warn = warn
- self._debug = debug or NoDebugging()
-
- self._choose_filename()
- self._file_map = {}
- # Maps thread ids to SqliteDb objects.
- self._dbs = {}
- self._pid = os.getpid()
-
- # Are we in sync with the data file?
- self._have_used = False
-
- self._has_lines = False
- self._has_arcs = False
-
- self._current_context = None
- self._current_context_id = None
- self._query_context_ids = None
-
- def _choose_filename(self):
- """Set self._filename based on inited attributes."""
- if self._no_disk:
- self._filename = ":memory:"
- else:
- self._filename = self._basename
- suffix = filename_suffix(self._suffix)
- if suffix:
- self._filename += "." + suffix
-
- def _reset(self):
- """Reset our attributes."""
- if self._dbs:
- for db in self._dbs.values():
- db.close()
- self._dbs = {}
- self._file_map = {}
- self._have_used = False
- self._current_context_id = None
-
- def _create_db(self):
- """Create a db file that doesn't exist yet.
-
- Initializes the schema and certain metadata.
- """
- if self._debug.should('dataio'):
- self._debug.write("Creating data file {!r}".format(self._filename))
- self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug)
- with db:
- db.executescript(SCHEMA)
- db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
- db.executemany(
- "insert into meta (key, value) values (?, ?)",
- [
- ('sys_argv', str(getattr(sys, 'argv', None))),
- ('version', __version__),
- ('when', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
- ]
- )
-
- def _open_db(self):
- """Open an existing db file, and read its metadata."""
- if self._debug.should('dataio'):
- self._debug.write("Opening data file {!r}".format(self._filename))
- self._dbs[get_thread_id()] = SqliteDb(self._filename, self._debug)
- self._read_db()
-
- def _read_db(self):
- """Read the metadata from a database so that we are ready to use it."""
- with self._dbs[get_thread_id()] as db:
- try:
- schema_version, = db.execute_one("select version from coverage_schema")
- except Exception as exc:
- raise CoverageException(
- "Data file {!r} doesn't seem to be a coverage data file: {}".format(
- self._filename, exc
- )
- )
- else:
- if schema_version != SCHEMA_VERSION:
- raise CoverageException(
- "Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
- self._filename, schema_version, SCHEMA_VERSION
- )
- )
-
- for row in db.execute("select value from meta where key = 'has_arcs'"):
- self._has_arcs = bool(int(row[0]))
- self._has_lines = not self._has_arcs
-
- for path, file_id in db.execute("select path, id from file"):
- self._file_map[path] = file_id
-
- def _connect(self):
- """Get the SqliteDb object to use."""
- if get_thread_id() not in self._dbs:
- if os.path.exists(self._filename):
- self._open_db()
- else:
- self._create_db()
- return self._dbs[get_thread_id()]
-
- def __nonzero__(self):
- if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)):
- return False
- try:
- with self._connect() as con:
- rows = con.execute("select * from file limit 1")
- return bool(list(rows))
- except CoverageException:
- return False
-
- __bool__ = __nonzero__
-
- @contract(returns='bytes')
- def dumps(self):
- """Serialize the current data to a byte string.
-
- The format of the serialized data is not documented. It is only
- suitable for use with :meth:`loads` in the same version of
- coverage.py.
-
- Returns:
- A byte string of serialized data.
-
- .. versionadded:: 5.0
-
- """
- if self._debug.should('dataio'):
- self._debug.write("Dumping data from data file {!r}".format(self._filename))
- with self._connect() as con:
- return b'z' + zlib.compress(to_bytes(con.dump()))
-
- @contract(data='bytes')
- def loads(self, data):
- """Deserialize data from :meth:`dumps`
-
- Use with a newly-created empty :class:`CoverageData` object. It's
- undefined what happens if the object already has data in it.
-
- Arguments:
- data: A byte string of serialized data produced by :meth:`dumps`.
-
- .. versionadded:: 5.0
-
- """
- if self._debug.should('dataio'):
- self._debug.write("Loading data into data file {!r}".format(self._filename))
- if data[:1] != b'z':
- raise CoverageException(
- "Unrecognized serialization: {!r} (head of {} bytes)".format(data[:40], len(data))
- )
- script = to_string(zlib.decompress(data[1:]))
- self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug)
- with db:
- db.executescript(script)
- self._read_db()
- self._have_used = True
-
- def _file_id(self, filename, add=False):
- """Get the file id for `filename`.
-
- If filename is not in the database yet, add it if `add` is True.
- If `add` is not True, return None.
- """
- if filename not in self._file_map:
- if add:
- with self._connect() as con:
- cur = con.execute("insert or replace into file (path) values (?)", (filename,))
- self._file_map[filename] = cur.lastrowid
- return self._file_map.get(filename)
-
- def _context_id(self, context):
- """Get the id for a context."""
- assert context is not None
- self._start_using()
- with self._connect() as con:
- row = con.execute_one("select id from context where context = ?", (context,))
- if row is not None:
- return row[0]
- else:
- return None
-
- def set_context(self, context):
- """Set the current context for future :meth:`add_lines` etc.
-
- `context` is a str, the name of the context to use for the next data
- additions. The context persists until the next :meth:`set_context`.
-
- .. versionadded:: 5.0
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Setting context: %r" % (context,))
- self._current_context = context
- self._current_context_id = None
-
- def _set_context_id(self):
- """Use the _current_context to set _current_context_id."""
- context = self._current_context or ""
- context_id = self._context_id(context)
- if context_id is not None:
- self._current_context_id = context_id
- else:
- with self._connect() as con:
- cur = con.execute("insert into context (context) values (?)", (context,))
- self._current_context_id = cur.lastrowid
-
- def base_filename(self):
- """The base filename for storing data.
-
- .. versionadded:: 5.0
-
- """
- return self._basename
-
- def data_filename(self):
- """Where is the data stored?
-
- .. versionadded:: 5.0
-
- """
- return self._filename
-
- def add_lines(self, line_data):
- """Add measured line data.
-
- `line_data` is a dictionary mapping file names to dictionaries::
-
- { filename: { lineno: None, ... }, ...}
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Adding lines: %d files, %d lines total" % (
- len(line_data), sum(len(lines) for lines in line_data.values())
- ))
- self._start_using()
- self._choose_lines_or_arcs(lines=True)
- if not line_data:
- return
- with self._connect() as con:
- self._set_context_id()
- for filename, linenos in iitems(line_data):
- linemap = nums_to_numbits(linenos)
- file_id = self._file_id(filename, add=True)
- query = "select numbits from line_bits where file_id = ? and context_id = ?"
- existing = list(con.execute(query, (file_id, self._current_context_id)))
- if existing:
- linemap = numbits_union(linemap, existing[0][0])
-
- con.execute(
- "insert or replace into line_bits "
- " (file_id, context_id, numbits) values (?, ?, ?)",
- (file_id, self._current_context_id, linemap),
- )
-
- def add_arcs(self, arc_data):
- """Add measured arc data.
-
- `arc_data` is a dictionary mapping file names to dictionaries::
-
- { filename: { (l1,l2): None, ... }, ...}
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Adding arcs: %d files, %d arcs total" % (
- len(arc_data), sum(len(arcs) for arcs in arc_data.values())
- ))
- self._start_using()
- self._choose_lines_or_arcs(arcs=True)
- if not arc_data:
- return
- with self._connect() as con:
- self._set_context_id()
- for filename, arcs in iitems(arc_data):
- file_id = self._file_id(filename, add=True)
- data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
- con.executemany(
- "insert or ignore into arc "
- "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
- data,
- )
-
- def _choose_lines_or_arcs(self, lines=False, arcs=False):
- """Force the data file to choose between lines and arcs."""
- assert lines or arcs
- assert not (lines and arcs)
- if lines and self._has_arcs:
- raise CoverageException("Can't add line measurements to existing branch data")
- if arcs and self._has_lines:
- raise CoverageException("Can't add branch measurements to existing line data")
- if not self._has_arcs and not self._has_lines:
- self._has_lines = lines
- self._has_arcs = arcs
- with self._connect() as con:
- con.execute(
- "insert into meta (key, value) values (?, ?)",
- ('has_arcs', str(int(arcs)))
- )
-
- def add_file_tracers(self, file_tracers):
- """Add per-file plugin information.
-
- `file_tracers` is { filename: plugin_name, ... }
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
- if not file_tracers:
- return
- self._start_using()
- with self._connect() as con:
- for filename, plugin_name in iitems(file_tracers):
- file_id = self._file_id(filename)
- if file_id is None:
- raise CoverageException(
- "Can't add file tracer data for unmeasured file '%s'" % (filename,)
- )
-
- existing_plugin = self.file_tracer(filename)
- if existing_plugin:
- if existing_plugin != plugin_name:
- raise CoverageException(
- "Conflicting file tracer name for '%s': %r vs %r" % (
- filename, existing_plugin, plugin_name,
- )
- )
- elif plugin_name:
- con.execute(
- "insert into tracer (file_id, tracer) values (?, ?)",
- (file_id, plugin_name)
- )
-
- def touch_file(self, filename, plugin_name=""):
- """Ensure that `filename` appears in the data, empty if needed.
-
- `plugin_name` is the name of the plugin responsible for this file. It is used
- to associate the right filereporter, etc.
- """
- self.touch_files([filename], plugin_name)
-
- def touch_files(self, filenames, plugin_name=""):
- """Ensure that `filenames` appear in the data, empty if needed.
-
- `plugin_name` is the name of the plugin responsible for these files. It is used
- to associate the right filereporter, etc.
- """
- if self._debug.should('dataop'):
- self._debug.write("Touching %r" % (filenames,))
- self._start_using()
- with self._connect(): # Use this to get one transaction.
- if not self._has_arcs and not self._has_lines:
- raise CoverageException("Can't touch files in an empty CoverageData")
-
- for filename in filenames:
- self._file_id(filename, add=True)
- if plugin_name:
- # Set the tracer for this file
- self.add_file_tracers({filename: plugin_name})
-
- def update(self, other_data, aliases=None):
- """Update this data with data from several other :class:`CoverageData` instances.
-
- If `aliases` is provided, it's a `PathAliases` object that is used to
- re-map paths to match the local machine's.
- """
- if self._debug.should('dataop'):
- self._debug.write("Updating with data from %r" % (
- getattr(other_data, '_filename', '???'),
- ))
- if self._has_lines and other_data._has_arcs:
- raise CoverageException("Can't combine arc data with line data")
- if self._has_arcs and other_data._has_lines:
- raise CoverageException("Can't combine line data with arc data")
-
- aliases = aliases or PathAliases()
-
- # Force the database we're writing to to exist before we start nesting
- # contexts.
- self._start_using()
-
- # Collector for all arcs, lines and tracers
- other_data.read()
- with other_data._connect() as conn:
- # Get files data.
- cur = conn.execute('select path from file')
- files = {path: aliases.map(path) for (path,) in cur}
- cur.close()
-
- # Get contexts data.
- cur = conn.execute('select context from context')
- contexts = [context for (context,) in cur]
- cur.close()
-
- # Get arc data.
- cur = conn.execute(
- 'select file.path, context.context, arc.fromno, arc.tono '
- 'from arc '
- 'inner join file on file.id = arc.file_id '
- 'inner join context on context.id = arc.context_id'
- )
- arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
- cur.close()
-
- # Get line data.
- cur = conn.execute(
- 'select file.path, context.context, line_bits.numbits '
- 'from line_bits '
- 'inner join file on file.id = line_bits.file_id '
- 'inner join context on context.id = line_bits.context_id'
- )
- lines = {
- (files[path], context): numbits
- for (path, context, numbits) in cur
- }
- cur.close()
-
- # Get tracer data.
- cur = conn.execute(
- 'select file.path, tracer '
- 'from tracer '
- 'inner join file on file.id = tracer.file_id'
- )
- tracers = {files[path]: tracer for (path, tracer) in cur}
- cur.close()
-
- with self._connect() as conn:
- conn.con.isolation_level = 'IMMEDIATE'
-
- # Get all tracers in the DB. Files not in the tracers are assumed
- # to have an empty string tracer. Since Sqlite does not support
- # full outer joins, we have to make two queries to fill the
- # dictionary.
- this_tracers = {path: '' for path, in conn.execute('select path from file')}
- this_tracers.update({
- aliases.map(path): tracer
- for path, tracer in conn.execute(
- 'select file.path, tracer from tracer '
- 'inner join file on file.id = tracer.file_id'
- )
- })
-
- # Create all file and context rows in the DB.
- conn.executemany(
- 'insert or ignore into file (path) values (?)',
- ((file,) for file in files.values())
- )
- file_ids = {
- path: id
- for id, path in conn.execute('select id, path from file')
- }
- conn.executemany(
- 'insert or ignore into context (context) values (?)',
- ((context,) for context in contexts)
- )
- context_ids = {
- context: id
- for id, context in conn.execute('select id, context from context')
- }
-
- # Prepare tracers and fail, if a conflict is found.
- # tracer_paths is used to ensure consistency over the tracer data
- # and tracer_map tracks the tracers to be inserted.
- tracer_map = {}
- for path in files.values():
- this_tracer = this_tracers.get(path)
- other_tracer = tracers.get(path, '')
- # If there is no tracer, there is always the None tracer.
- if this_tracer is not None and this_tracer != other_tracer:
- raise CoverageException(
- "Conflicting file tracer name for '%s': %r vs %r" % (
- path, this_tracer, other_tracer
- )
- )
- tracer_map[path] = other_tracer
-
- # Prepare arc and line rows to be inserted by converting the file
- # and context strings with integer ids. Then use the efficient
- # `executemany()` to insert all rows at once.
- arc_rows = (
- (file_ids[file], context_ids[context], fromno, tono)
- for file, context, fromno, tono in arcs
- )
-
- # Get line data.
- cur = conn.execute(
- 'select file.path, context.context, line_bits.numbits '
- 'from line_bits '
- 'inner join file on file.id = line_bits.file_id '
- 'inner join context on context.id = line_bits.context_id'
- )
- for path, context, numbits in cur:
- key = (aliases.map(path), context)
- if key in lines:
- numbits = numbits_union(lines[key], numbits)
- lines[key] = numbits
- cur.close()
-
- if arcs:
- self._choose_lines_or_arcs(arcs=True)
-
- # Write the combined data.
- conn.executemany(
- 'insert or ignore into arc '
- '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)',
- arc_rows
- )
-
- if lines:
- self._choose_lines_or_arcs(lines=True)
- conn.execute("delete from line_bits")
- conn.executemany(
- "insert into line_bits "
- "(file_id, context_id, numbits) values (?, ?, ?)",
- [
- (file_ids[file], context_ids[context], numbits)
- for (file, context), numbits in lines.items()
- ]
- )
- conn.executemany(
- 'insert or ignore into tracer (file_id, tracer) values (?, ?)',
- ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
- )
-
- # Update all internal cache data.
- self._reset()
- self.read()
-
- def erase(self, parallel=False):
- """Erase the data in this object.
-
- If `parallel` is true, then also deletes data files created from the
- basename by parallel-mode.
-
- """
- self._reset()
- if self._no_disk:
- return
- if self._debug.should('dataio'):
- self._debug.write("Erasing data file {!r}".format(self._filename))
- file_be_gone(self._filename)
- if parallel:
- data_dir, local = os.path.split(self._filename)
- localdot = local + '.*'
- pattern = os.path.join(os.path.abspath(data_dir), localdot)
- for filename in glob.glob(pattern):
- if self._debug.should('dataio'):
- self._debug.write("Erasing parallel data file {!r}".format(filename))
- file_be_gone(filename)
-
- def read(self):
- """Start using an existing data file."""
- with self._connect(): # TODO: doesn't look right
- self._have_used = True
-
- def write(self):
- """Ensure the data is written to the data file."""
- pass
-
- def _start_using(self):
- """Call this before using the database at all."""
- if self._pid != os.getpid():
- # Looks like we forked! Have to start a new data file.
- self._reset()
- self._choose_filename()
- self._pid = os.getpid()
- if not self._have_used:
- self.erase()
- self._have_used = True
-
- def has_arcs(self):
- """Does the database have arcs (True) or lines (False)."""
- return bool(self._has_arcs)
-
- def measured_files(self):
- """A set of all files that had been measured."""
- return set(self._file_map)
-
- def measured_contexts(self):
- """A set of all contexts that have been measured.
-
- .. versionadded:: 5.0
-
- """
- self._start_using()
- with self._connect() as con:
- contexts = {row[0] for row in con.execute("select distinct(context) from context")}
- return contexts
-
- def file_tracer(self, filename):
- """Get the plugin name of the file tracer for a file.
-
- Returns the name of the plugin that handles this file. If the file was
- measured, but didn't use a plugin, then "" is returned. If the file
- was not measured, then None is returned.
-
- """
- self._start_using()
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return None
- row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,))
- if row is not None:
- return row[0] or ""
- return "" # File was measured, but no tracer associated.
-
- def set_query_context(self, context):
- """Set a context for subsequent querying.
-
- The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
- calls will be limited to only one context. `context` is a string which
- must match a context exactly. If it does not, no exception is raised,
- but queries will return no data.
-
- .. versionadded:: 5.0
-
- """
- self._start_using()
- with self._connect() as con:
- cur = con.execute("select id from context where context = ?", (context,))
- self._query_context_ids = [row[0] for row in cur.fetchall()]
-
- def set_query_contexts(self, contexts):
- """Set a number of contexts for subsequent querying.
-
- The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
- calls will be limited to the specified contexts. `contexts` is a list
- of Python regular expressions. Contexts will be matched using
- :func:`re.search <python:re.search>`. Data will be included in query
- results if they are part of any of the contexts matched.
-
- .. versionadded:: 5.0
-
- """
- self._start_using()
- if contexts:
- with self._connect() as con:
- context_clause = ' or '.join(['context regexp ?'] * len(contexts))
- cur = con.execute("select id from context where " + context_clause, contexts)
- self._query_context_ids = [row[0] for row in cur.fetchall()]
- else:
- self._query_context_ids = None
-
- def lines(self, filename):
- """Get the list of lines executed for a file.
-
- If the file was not measured, returns None. A file might be measured,
- and have no lines executed, in which case an empty list is returned.
-
- If the file was executed, returns a list of integers, the line numbers
- executed in the file. The list is in no particular order.
-
- """
- self._start_using()
- if self.has_arcs():
- arcs = self.arcs(filename)
- if arcs is not None:
- all_lines = itertools.chain.from_iterable(arcs)
- return list({l for l in all_lines if l > 0})
-
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return None
- else:
- query = "select numbits from line_bits where file_id = ?"
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and context_id in (" + ids_array + ")"
- data += self._query_context_ids
- bitmaps = list(con.execute(query, data))
- nums = set()
- for row in bitmaps:
- nums.update(numbits_to_nums(row[0]))
- return list(nums)
-
- def arcs(self, filename):
- """Get the list of arcs executed for a file.
-
- If the file was not measured, returns None. A file might be measured,
- and have no arcs executed, in which case an empty list is returned.
-
- If the file was executed, returns a list of 2-tuples of integers. Each
- pair is a starting line number and an ending line number for a
- transition from one line to another. The list is in no particular
- order.
-
- Negative numbers have special meaning. If the starting line number is
- -N, it represents an entry to the code object that starts at line N.
- If the ending ling number is -N, it's an exit from the code object that
- starts at line N.
-
- """
- self._start_using()
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return None
- else:
- query = "select distinct fromno, tono from arc where file_id = ?"
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and context_id in (" + ids_array + ")"
- data += self._query_context_ids
- arcs = con.execute(query, data)
- return list(arcs)
-
- def contexts_by_lineno(self, filename):
- """Get the contexts for each line in a file.
-
- Returns:
- A dict mapping line numbers to a list of context names.
-
- .. versionadded:: 5.0
-
- """
- lineno_contexts_map = collections.defaultdict(list)
- self._start_using()
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return lineno_contexts_map
- if self.has_arcs():
- query = (
- "select arc.fromno, arc.tono, context.context "
- "from arc, context "
- "where arc.file_id = ? and arc.context_id = context.id"
- )
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and arc.context_id in (" + ids_array + ")"
- data += self._query_context_ids
- for fromno, tono, context in con.execute(query, data):
- if context not in lineno_contexts_map[fromno]:
- lineno_contexts_map[fromno].append(context)
- if context not in lineno_contexts_map[tono]:
- lineno_contexts_map[tono].append(context)
- else:
- query = (
- "select l.numbits, c.context from line_bits l, context c "
- "where l.context_id = c.id "
- "and file_id = ?"
- )
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and l.context_id in (" + ids_array + ")"
- data += self._query_context_ids
- for numbits, context in con.execute(query, data):
- for lineno in numbits_to_nums(numbits):
- lineno_contexts_map[lineno].append(context)
- return lineno_contexts_map
-
- @classmethod
- def sys_info(cls):
- """Our information for `Coverage.sys_info`.
-
- Returns a list of (key, value) pairs.
-
- """
- with SqliteDb(":memory:", debug=NoDebugging()) as db:
- temp_store = [row[0] for row in db.execute("pragma temp_store")]
- compile_options = [row[0] for row in db.execute("pragma compile_options")]
-
- return [
- ('sqlite3_version', sqlite3.version),
- ('sqlite3_sqlite_version', sqlite3.sqlite_version),
- ('sqlite3_temp_store', temp_store),
- ('sqlite3_compile_options', compile_options),
- ]
-
-
-class SqliteDb(SimpleReprMixin):
- """A simple abstraction over a SQLite database.
-
- Use as a context manager, then you can use it like a
- :class:`python:sqlite3.Connection` object::
-
- with SqliteDb(filename, debug_control) as db:
- db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,))
-
- """
- def __init__(self, filename, debug):
- self.debug = debug if debug.should('sql') else None
- self.filename = filename
- self.nest = 0
- self.con = None
-
- def _connect(self):
- """Connect to the db and do universal initialization."""
- if self.con is not None:
- return
-
- # SQLite on Windows on py2 won't open a file if the filename argument
- # has non-ascii characters in it. Opening a relative file name avoids
- # a problem if the current directory has non-ascii.
- filename = self.filename
- if env.WINDOWS and env.PY2:
- try:
- filename = os.path.relpath(self.filename)
- except ValueError:
- # ValueError can be raised under Windows when os.getcwd() returns a
- # folder from a different drive than the drive of self.filename in
- # which case we keep the original value of self.filename unchanged,
- # hoping that we won't face the non-ascii directory problem.
- pass
-
- # It can happen that Python switches threads while the tracer writes
- # data. The second thread will also try to write to the data,
- # effectively causing a nested context. However, given the idempotent
- # nature of the tracer operations, sharing a connection among threads
- # is not a problem.
- if self.debug:
- self.debug.write("Connecting to {!r}".format(self.filename))
- self.con = sqlite3.connect(filename, check_same_thread=False)
- self.con.create_function('REGEXP', 2, _regexp)
-
- # This pragma makes writing faster. It disables rollbacks, but we never need them.
- # PyPy needs the .close() calls here, or sqlite gets twisted up:
- # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
- self.execute("pragma journal_mode=off").close()
- # This pragma makes writing faster.
- self.execute("pragma synchronous=off").close()
-
- def close(self):
- """If needed, close the connection."""
- if self.con is not None and self.filename != ":memory:":
- self.con.close()
- self.con = None
-
- def __enter__(self):
- if self.nest == 0:
- self._connect()
- self.con.__enter__()
- self.nest += 1
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.nest -= 1
- if self.nest == 0:
- try:
- self.con.__exit__(exc_type, exc_value, traceback)
- self.close()
- except Exception as exc:
- if self.debug:
- self.debug.write("EXCEPTION from __exit__: {}".format(exc))
- raise
-
- def execute(self, sql, parameters=()):
- """Same as :meth:`python:sqlite3.Connection.execute`."""
- if self.debug:
- tail = " with {!r}".format(parameters) if parameters else ""
- self.debug.write("Executing {!r}{}".format(sql, tail))
- try:
- try:
- return self.con.execute(sql, parameters)
- except Exception:
- # In some cases, an error might happen that isn't really an
- # error. Try again immediately.
- # https://github.com/nedbat/coveragepy/issues/1010
- return self.con.execute(sql, parameters)
- except sqlite3.Error as exc:
- msg = str(exc)
- try:
- # `execute` is the first thing we do with the database, so try
- # hard to provide useful hints if something goes wrong now.
- with open(self.filename, "rb") as bad_file:
- cov4_sig = b"!coverage.py: This is a private format"
- if bad_file.read(len(cov4_sig)) == cov4_sig:
- msg = (
- "Looks like a coverage 4.x data file. "
- "Are you mixing versions of coverage?"
- )
- except Exception:
- pass
- if self.debug:
- self.debug.write("EXCEPTION from execute: {}".format(msg))
- raise CoverageException("Couldn't use data file {!r}: {}".format(self.filename, msg))
-
- def execute_one(self, sql, parameters=()):
- """Execute a statement and return the one row that results.
-
- This is like execute(sql, parameters).fetchone(), except it is
- correct in reading the entire result set. This will raise an
- exception if more than one row results.
-
- Returns a row, or None if there were no rows.
- """
- rows = list(self.execute(sql, parameters))
- if len(rows) == 0:
- return None
- elif len(rows) == 1:
- return rows[0]
- else:
- raise CoverageException("Sql {!r} shouldn't return {} rows".format(sql, len(rows)))
-
- def executemany(self, sql, data):
- """Same as :meth:`python:sqlite3.Connection.executemany`."""
- if self.debug:
- data = list(data)
- self.debug.write("Executing many {!r} with {} rows".format(sql, len(data)))
- return self.con.executemany(sql, data)
-
- def executescript(self, script):
- """Same as :meth:`python:sqlite3.Connection.executescript`."""
- if self.debug:
- self.debug.write("Executing script with {} chars: {}".format(
- len(script), clipped_repr(script, 100),
- ))
- self.con.executescript(script)
-
- def dump(self):
- """Return a multi-line string, the SQL dump of the database."""
- return "\n".join(self.con.iterdump())
-
-
-def _regexp(text, pattern):
- """A regexp function for SQLite."""
- return re.search(text, pattern) is not None
diff --git a/contrib/python/coverage/py2/coverage/summary.py b/contrib/python/coverage/py2/coverage/summary.py
deleted file mode 100644
index 65f8047006..0000000000
--- a/contrib/python/coverage/py2/coverage/summary.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Summary reporting"""
-
-import sys
-
-from coverage import env
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-from coverage.misc import CoverageException, output_encoding
-
-
-class SummaryReporter(object):
- """A reporter for writing the summary report."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.branches = coverage.get_data().has_arcs()
- self.outfile = None
- self.fr_analysis = []
- self.skipped_count = 0
- self.empty_count = 0
- self.total = Numbers()
- self.fmt_err = u"%s %s: %s"
-
- def writeout(self, line):
- """Write a line to the output, adding a newline."""
- if env.PY2:
- line = line.encode(output_encoding())
- self.outfile.write(line.rstrip())
- self.outfile.write("\n")
-
- def report(self, morfs, outfile=None):
- """Writes a report summarizing coverage statistics per module.
-
- `outfile` is a file object to write the summary to. It must be opened
- for native strings (bytes on Python 2, Unicode on Python 3).
-
- """
- self.outfile = outfile or sys.stdout
-
- self.coverage.get_data().set_query_contexts(self.config.report_contexts)
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.report_one_file(fr, analysis)
-
- # Prepare the formatting strings, header, and column sorting.
- max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5])
- fmt_name = u"%%- %ds " % max_name
- fmt_skip_covered = u"\n%s file%s skipped due to complete coverage."
- fmt_skip_empty = u"\n%s empty file%s skipped."
-
- header = (fmt_name % "Name") + u" Stmts Miss"
- fmt_coverage = fmt_name + u"%6d %6d"
- if self.branches:
- header += u" Branch BrPart"
- fmt_coverage += u" %6d %6d"
- width100 = Numbers.pc_str_width()
- header += u"%*s" % (width100+4, "Cover")
- fmt_coverage += u"%%%ds%%%%" % (width100+3,)
- if self.config.show_missing:
- header += u" Missing"
- fmt_coverage += u" %s"
- rule = u"-" * len(header)
-
- column_order = dict(name=0, stmts=1, miss=2, cover=-1)
- if self.branches:
- column_order.update(dict(branch=3, brpart=4))
-
- # Write the header
- self.writeout(header)
- self.writeout(rule)
-
- # `lines` is a list of pairs, (line text, line values). The line text
- # is a string that will be printed, and line values is a tuple of
- # sortable values.
- lines = []
-
- for (fr, analysis) in self.fr_analysis:
- nums = analysis.numbers
-
- args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
- if self.branches:
- args += (nums.n_branches, nums.n_partial_branches)
- args += (nums.pc_covered_str,)
- if self.config.show_missing:
- args += (analysis.missing_formatted(branches=True),)
- text = fmt_coverage % args
- # Add numeric percent coverage so that sorting makes sense.
- args += (nums.pc_covered,)
- lines.append((text, args))
-
- # Sort the lines and write them out.
- if getattr(self.config, 'sort', None):
- sort_option = self.config.sort.lower()
- reverse = False
- if sort_option[0] == '-':
- reverse = True
- sort_option = sort_option[1:]
- elif sort_option[0] == '+':
- sort_option = sort_option[1:]
-
- position = column_order.get(sort_option)
- if position is None:
- raise CoverageException("Invalid sorting option: {!r}".format(self.config.sort))
- lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse)
-
- for line in lines:
- self.writeout(line[0])
-
- # Write a TOTAL line if we had at least one file.
- if self.total.n_files > 0:
- self.writeout(rule)
- args = ("TOTAL", self.total.n_statements, self.total.n_missing)
- if self.branches:
- args += (self.total.n_branches, self.total.n_partial_branches)
- args += (self.total.pc_covered_str,)
- if self.config.show_missing:
- args += ("",)
- self.writeout(fmt_coverage % args)
-
- # Write other final lines.
- if not self.total.n_files and not self.skipped_count:
- raise CoverageException("No data to report.")
-
- if self.config.skip_covered and self.skipped_count:
- self.writeout(
- fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '')
- )
- if self.config.skip_empty and self.empty_count:
- self.writeout(
- fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '')
- )
-
- return self.total.n_statements and self.total.pc_covered
-
- def report_one_file(self, fr, analysis):
- """Report on just one file, the callback from report()."""
- nums = analysis.numbers
- self.total += nums
-
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if self.config.skip_covered and no_missing_lines and no_missing_branches:
- # Don't report on 100% files.
- self.skipped_count += 1
- elif self.config.skip_empty and nums.n_statements == 0:
- # Don't report on empty files.
- self.empty_count += 1
- else:
- self.fr_analysis.append((fr, analysis))
diff --git a/contrib/python/coverage/py2/coverage/templite.py b/contrib/python/coverage/py2/coverage/templite.py
deleted file mode 100644
index 7d4024e0af..0000000000
--- a/contrib/python/coverage/py2/coverage/templite.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""A simple Python template renderer, for a nano-subset of Django syntax.
-
-For a detailed discussion of this code, see this chapter from 500 Lines:
-http://aosabook.org/en/500L/a-template-engine.html
-
-"""
-
-# Coincidentally named the same as http://code.activestate.com/recipes/496702/
-
-import re
-
-from coverage import env
-
-
-class TempliteSyntaxError(ValueError):
- """Raised when a template has a syntax error."""
- pass
-
-
-class TempliteValueError(ValueError):
- """Raised when an expression won't evaluate in a template."""
- pass
-
-
-class CodeBuilder(object):
- """Build source code conveniently."""
-
- def __init__(self, indent=0):
- self.code = []
- self.indent_level = indent
-
- def __str__(self):
- return "".join(str(c) for c in self.code)
-
- def add_line(self, line):
- """Add a line of source to the code.
-
- Indentation and newline will be added for you, don't provide them.
-
- """
- self.code.extend([" " * self.indent_level, line, "\n"])
-
- def add_section(self):
- """Add a section, a sub-CodeBuilder."""
- section = CodeBuilder(self.indent_level)
- self.code.append(section)
- return section
-
- INDENT_STEP = 4 # PEP8 says so!
-
- def indent(self):
- """Increase the current indent for following lines."""
- self.indent_level += self.INDENT_STEP
-
- def dedent(self):
- """Decrease the current indent for following lines."""
- self.indent_level -= self.INDENT_STEP
-
- def get_globals(self):
- """Execute the code, and return a dict of globals it defines."""
- # A check that the caller really finished all the blocks they started.
- assert self.indent_level == 0
- # Get the Python source as a single string.
- python_source = str(self)
- # Execute the source, defining globals, and return them.
- global_namespace = {}
- exec(python_source, global_namespace)
- return global_namespace
-
-
-class Templite(object):
- """A simple template renderer, for a nano-subset of Django syntax.
-
- Supported constructs are extended variable access::
-
- {{var.modifier.modifier|filter|filter}}
-
- loops::
-
- {% for var in list %}...{% endfor %}
-
- and ifs::
-
- {% if var %}...{% endif %}
-
- Comments are within curly-hash markers::
-
- {# This will be ignored #}
-
- Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped
- and joined. Be careful, this could join words together!
-
- Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
- which will collapse the whitespace following the tag.
-
- Construct a Templite with the template text, then use `render` against a
- dictionary context to create a finished string::
-
- templite = Templite('''
- <h1>Hello {{name|upper}}!</h1>
- {% for topic in topics %}
- <p>You are interested in {{topic}}.</p>
- {% endif %}
- ''',
- {'upper': str.upper},
- )
- text = templite.render({
- 'name': "Ned",
- 'topics': ['Python', 'Geometry', 'Juggling'],
- })
-
- """
- def __init__(self, text, *contexts):
- """Construct a Templite with the given `text`.
-
- `contexts` are dictionaries of values to use for future renderings.
- These are good for filters and global values.
-
- """
- self.context = {}
- for context in contexts:
- self.context.update(context)
-
- self.all_vars = set()
- self.loop_vars = set()
-
- # We construct a function in source form, then compile it and hold onto
- # it, and execute it to render the template.
- code = CodeBuilder()
-
- code.add_line("def render_function(context, do_dots):")
- code.indent()
- vars_code = code.add_section()
- code.add_line("result = []")
- code.add_line("append_result = result.append")
- code.add_line("extend_result = result.extend")
- if env.PY2:
- code.add_line("to_str = unicode")
- else:
- code.add_line("to_str = str")
-
- buffered = []
-
- def flush_output():
- """Force `buffered` to the code builder."""
- if len(buffered) == 1:
- code.add_line("append_result(%s)" % buffered[0])
- elif len(buffered) > 1:
- code.add_line("extend_result([%s])" % ", ".join(buffered))
- del buffered[:]
-
- ops_stack = []
-
- # Split the text to form a list of tokens.
- tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
-
- squash = in_joined = False
-
- for token in tokens:
- if token.startswith('{'):
- start, end = 2, -2
- squash = (token[-3] == '-')
- if squash:
- end = -3
-
- if token.startswith('{#'):
- # Comment: ignore it and move on.
- continue
- elif token.startswith('{{'):
- # An expression to evaluate.
- expr = self._expr_code(token[start:end].strip())
- buffered.append("to_str(%s)" % expr)
- else:
- # token.startswith('{%')
- # Action tag: split into words and parse further.
- flush_output()
-
- words = token[start:end].strip().split()
- if words[0] == 'if':
- # An if statement: evaluate the expression to determine if.
- if len(words) != 2:
- self._syntax_error("Don't understand if", token)
- ops_stack.append('if')
- code.add_line("if %s:" % self._expr_code(words[1]))
- code.indent()
- elif words[0] == 'for':
- # A loop: iterate over expression result.
- if len(words) != 4 or words[2] != 'in':
- self._syntax_error("Don't understand for", token)
- ops_stack.append('for')
- self._variable(words[1], self.loop_vars)
- code.add_line(
- "for c_%s in %s:" % (
- words[1],
- self._expr_code(words[3])
- )
- )
- code.indent()
- elif words[0] == 'joined':
- ops_stack.append('joined')
- in_joined = True
- elif words[0].startswith('end'):
- # Endsomething. Pop the ops stack.
- if len(words) != 1:
- self._syntax_error("Don't understand end", token)
- end_what = words[0][3:]
- if not ops_stack:
- self._syntax_error("Too many ends", token)
- start_what = ops_stack.pop()
- if start_what != end_what:
- self._syntax_error("Mismatched end tag", end_what)
- if end_what == 'joined':
- in_joined = False
- else:
- code.dedent()
- else:
- self._syntax_error("Don't understand tag", words[0])
- else:
- # Literal content. If it isn't empty, output it.
- if in_joined:
- token = re.sub(r"\s*\n\s*", "", token.strip())
- elif squash:
- token = token.lstrip()
- if token:
- buffered.append(repr(token))
-
- if ops_stack:
- self._syntax_error("Unmatched action tag", ops_stack[-1])
-
- flush_output()
-
- for var_name in self.all_vars - self.loop_vars:
- vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
-
- code.add_line('return "".join(result)')
- code.dedent()
- self._render_function = code.get_globals()['render_function']
-
- def _expr_code(self, expr):
- """Generate a Python expression for `expr`."""
- if "|" in expr:
- pipes = expr.split("|")
- code = self._expr_code(pipes[0])
- for func in pipes[1:]:
- self._variable(func, self.all_vars)
- code = "c_%s(%s)" % (func, code)
- elif "." in expr:
- dots = expr.split(".")
- code = self._expr_code(dots[0])
- args = ", ".join(repr(d) for d in dots[1:])
- code = "do_dots(%s, %s)" % (code, args)
- else:
- self._variable(expr, self.all_vars)
- code = "c_%s" % expr
- return code
-
- def _syntax_error(self, msg, thing):
- """Raise a syntax error using `msg`, and showing `thing`."""
- raise TempliteSyntaxError("%s: %r" % (msg, thing))
-
- def _variable(self, name, vars_set):
- """Track that `name` is used as a variable.
-
- Adds the name to `vars_set`, a set of variable names.
-
- Raises an syntax error if `name` is not a valid name.
-
- """
- if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
- self._syntax_error("Not a valid name", name)
- vars_set.add(name)
-
- def render(self, context=None):
- """Render this template by applying it to `context`.
-
- `context` is a dictionary of values to use in this rendering.
-
- """
- # Make the complete context we'll use.
- render_context = dict(self.context)
- if context:
- render_context.update(context)
- return self._render_function(render_context, self._do_dots)
-
- def _do_dots(self, value, *dots):
- """Evaluate dotted expressions at run-time."""
- for dot in dots:
- try:
- value = getattr(value, dot)
- except AttributeError:
- try:
- value = value[dot]
- except (TypeError, KeyError):
- raise TempliteValueError(
- "Couldn't evaluate %r.%s" % (value, dot)
- )
- if callable(value):
- value = value()
- return value
diff --git a/contrib/python/coverage/py2/coverage/tomlconfig.py b/contrib/python/coverage/py2/coverage/tomlconfig.py
deleted file mode 100644
index 3ad581571c..0000000000
--- a/contrib/python/coverage/py2/coverage/tomlconfig.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""TOML configuration support for coverage.py"""
-
-import io
-import os
-import re
-
-from coverage import env
-from coverage.backward import configparser, path_types
-from coverage.misc import CoverageException, substitute_variables
-
-# TOML support is an install-time extra option.
-try:
- import toml
-except ImportError: # pragma: not covered
- toml = None
-
-
-class TomlDecodeError(Exception):
- """An exception class that exists even when toml isn't installed."""
- pass
-
-
-class TomlConfigParser:
- """TOML file reading with the interface of HandyConfigParser."""
-
- # This class has the same interface as config.HandyConfigParser, no
- # need for docstrings.
- # pylint: disable=missing-function-docstring
-
- def __init__(self, our_file):
- self.our_file = our_file
- self.data = None
-
- def read(self, filenames):
- # RawConfigParser takes a filename or list of filenames, but we only
- # ever call this with a single filename.
- assert isinstance(filenames, path_types)
- filename = filenames
- if env.PYVERSION >= (3, 6):
- filename = os.fspath(filename)
-
- try:
- with io.open(filename, encoding='utf-8') as fp:
- toml_text = fp.read()
- except IOError:
- return []
- if toml:
- toml_text = substitute_variables(toml_text, os.environ)
- try:
- self.data = toml.loads(toml_text)
- except toml.TomlDecodeError as err:
- raise TomlDecodeError(*err.args)
- return [filename]
- else:
- has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
- if self.our_file or has_toml:
- # Looks like they meant to read TOML, but we can't read it.
- msg = "Can't read {!r} without TOML support. Install with [toml] extra"
- raise CoverageException(msg.format(filename))
- return []
-
- def _get_section(self, section):
- """Get a section from the data.
-
- Arguments:
- section (str): A section name, which can be dotted.
-
- Returns:
- name (str): the actual name of the section that was found, if any,
- or None.
- data (str): the dict of data in the section, or None if not found.
-
- """
- prefixes = ["tool.coverage."]
- if self.our_file:
- prefixes.append("")
- for prefix in prefixes:
- real_section = prefix + section
- parts = real_section.split(".")
- try:
- data = self.data[parts[0]]
- for part in parts[1:]:
- data = data[part]
- except KeyError:
- continue
- break
- else:
- return None, None
- return real_section, data
-
- def _get(self, section, option):
- """Like .get, but returns the real section name and the value."""
- name, data = self._get_section(section)
- if data is None:
- raise configparser.NoSectionError(section)
- try:
- return name, data[option]
- except KeyError:
- raise configparser.NoOptionError(option, name)
-
- def has_option(self, section, option):
- _, data = self._get_section(section)
- if data is None:
- return False
- return option in data
-
- def has_section(self, section):
- name, _ = self._get_section(section)
- return name
-
- def options(self, section):
- _, data = self._get_section(section)
- if data is None:
- raise configparser.NoSectionError(section)
- return list(data.keys())
-
- def get_section(self, section):
- _, data = self._get_section(section)
- return data
-
- def get(self, section, option):
- _, value = self._get(section, option)
- return value
-
- def _check_type(self, section, option, value, type_, type_desc):
- if not isinstance(value, type_):
- raise ValueError(
- 'Option {!r} in section {!r} is not {}: {!r}'
- .format(option, section, type_desc, value)
- )
-
- def getboolean(self, section, option):
- name, value = self._get(section, option)
- self._check_type(name, option, value, bool, "a boolean")
- return value
-
- def getlist(self, section, option):
- name, values = self._get(section, option)
- self._check_type(name, option, values, list, "a list")
- return values
-
- def getregexlist(self, section, option):
- name, values = self._get(section, option)
- self._check_type(name, option, values, list, "a list")
- for value in values:
- value = value.strip()
- try:
- re.compile(value)
- except re.error as e:
- raise CoverageException(
- "Invalid [%s].%s value %r: %s" % (name, option, value, e)
- )
- return values
-
- def getint(self, section, option):
- name, value = self._get(section, option)
- self._check_type(name, option, value, int, "an integer")
- return value
-
- def getfloat(self, section, option):
- name, value = self._get(section, option)
- if isinstance(value, int):
- value = float(value)
- self._check_type(name, option, value, float, "a float")
- return value
diff --git a/contrib/python/coverage/py2/coverage/version.py b/contrib/python/coverage/py2/coverage/version.py
deleted file mode 100644
index d141a11da3..0000000000
--- a/contrib/python/coverage/py2/coverage/version.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""The version and URL for coverage.py"""
-# This file is exec'ed in setup.py, don't import anything!
-
-# Same semantics as sys.version_info.
-version_info = (5, 5, 0, "final", 0)
-
-
-def _make_version(major, minor, micro, releaselevel, serial):
- """Create a readable version string from version_info tuple components."""
- assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
- version = "%d.%d" % (major, minor)
- if micro:
- version += ".%d" % (micro,)
- if releaselevel != 'final':
- short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
- version += "%s%d" % (short, serial)
- return version
-
-
-def _make_url(major, minor, micro, releaselevel, serial):
- """Make the URL people should start at for this version of coverage.py."""
- url = "https://coverage.readthedocs.io"
- if releaselevel != 'final':
- # For pre-releases, use a version-specific URL.
- url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial)
- return url
-
-
-__version__ = _make_version(*version_info)
-__url__ = _make_url(*version_info)
diff --git a/contrib/python/coverage/py2/coverage/xmlreport.py b/contrib/python/coverage/py2/coverage/xmlreport.py
deleted file mode 100644
index 6d012ee692..0000000000
--- a/contrib/python/coverage/py2/coverage/xmlreport.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# coding: utf-8
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""XML reporting for coverage.py"""
-
-import os
-import os.path
-import sys
-import time
-import xml.dom.minidom
-
-from coverage import env
-from coverage import __url__, __version__, files
-from coverage.backward import iitems
-from coverage.misc import isolate_module
-from coverage.report import get_analysis_to_report
-
-os = isolate_module(os)
-
-
-DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
-
-
-def rate(hit, num):
- """Return the fraction of `hit`/`num`, as a string."""
- if num == 0:
- return "1"
- else:
- return "%.4g" % (float(hit) / num)
-
-
-class XmlReporter(object):
- """A reporter for writing Cobertura-style XML coverage results."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
-
- self.source_paths = set()
- if self.config.source:
- for src in self.config.source:
- if os.path.exists(src):
- if not self.config.relative_files:
- src = files.canonical_filename(src)
- self.source_paths.add(src)
- self.packages = {}
- self.xml_out = None
-
- def report(self, morfs, outfile=None):
- """Generate a Cobertura-compatible XML report for `morfs`.
-
- `morfs` is a list of modules or file names.
-
- `outfile` is a file object to write the XML to.
-
- """
- # Initial setup.
- outfile = outfile or sys.stdout
- has_arcs = self.coverage.get_data().has_arcs()
-
- # Create the DOM that will store the data.
- impl = xml.dom.minidom.getDOMImplementation()
- self.xml_out = impl.createDocument(None, "coverage", None)
-
- # Write header stuff.
- xcoverage = self.xml_out.documentElement
- xcoverage.setAttribute("version", __version__)
- xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
- xcoverage.appendChild(self.xml_out.createComment(
- " Generated by coverage.py: %s " % __url__
- ))
- xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL))
-
- # Call xml_file for each file in the data.
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.xml_file(fr, analysis, has_arcs)
-
- xsources = self.xml_out.createElement("sources")
- xcoverage.appendChild(xsources)
-
- # Populate the XML DOM with the source info.
- for path in sorted(self.source_paths):
- xsource = self.xml_out.createElement("source")
- xsources.appendChild(xsource)
- txt = self.xml_out.createTextNode(path)
- xsource.appendChild(txt)
-
- lnum_tot, lhits_tot = 0, 0
- bnum_tot, bhits_tot = 0, 0
-
- xpackages = self.xml_out.createElement("packages")
- xcoverage.appendChild(xpackages)
-
- # Populate the XML DOM with the package info.
- for pkg_name, pkg_data in sorted(iitems(self.packages)):
- class_elts, lhits, lnum, bhits, bnum = pkg_data
- xpackage = self.xml_out.createElement("package")
- xpackages.appendChild(xpackage)
- xclasses = self.xml_out.createElement("classes")
- xpackage.appendChild(xclasses)
- for _, class_elt in sorted(iitems(class_elts)):
- xclasses.appendChild(class_elt)
- xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
- xpackage.setAttribute("line-rate", rate(lhits, lnum))
- if has_arcs:
- branch_rate = rate(bhits, bnum)
- else:
- branch_rate = "0"
- xpackage.setAttribute("branch-rate", branch_rate)
- xpackage.setAttribute("complexity", "0")
-
- lnum_tot += lnum
- lhits_tot += lhits
- bnum_tot += bnum
- bhits_tot += bhits
-
- xcoverage.setAttribute("lines-valid", str(lnum_tot))
- xcoverage.setAttribute("lines-covered", str(lhits_tot))
- xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
- if has_arcs:
- xcoverage.setAttribute("branches-valid", str(bnum_tot))
- xcoverage.setAttribute("branches-covered", str(bhits_tot))
- xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
- else:
- xcoverage.setAttribute("branches-covered", "0")
- xcoverage.setAttribute("branches-valid", "0")
- xcoverage.setAttribute("branch-rate", "0")
- xcoverage.setAttribute("complexity", "0")
-
- # Write the output file.
- outfile.write(serialize_xml(self.xml_out))
-
- # Return the total percentage.
- denom = lnum_tot + bnum_tot
- if denom == 0:
- pct = 0.0
- else:
- pct = 100.0 * (lhits_tot + bhits_tot) / denom
- return pct
-
- def xml_file(self, fr, analysis, has_arcs):
- """Add to the XML report for a single file."""
-
- if self.config.skip_empty:
- if analysis.numbers.n_statements == 0:
- return
-
- # Create the 'lines' and 'package' XML elements, which
- # are populated later. Note that a package == a directory.
- filename = fr.filename.replace("\\", "/")
- for source_path in self.source_paths:
- source_path = files.canonical_filename(source_path)
- if filename.startswith(source_path.replace("\\", "/") + "/"):
- rel_name = filename[len(source_path)+1:]
- break
- else:
- rel_name = fr.relative_filename()
- self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
-
- dirname = os.path.dirname(rel_name) or u"."
- dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
- package_name = dirname.replace("/", ".")
-
- package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
-
- xclass = self.xml_out.createElement("class")
-
- xclass.appendChild(self.xml_out.createElement("methods"))
-
- xlines = self.xml_out.createElement("lines")
- xclass.appendChild(xlines)
-
- xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
- xclass.setAttribute("filename", rel_name.replace("\\", "/"))
- xclass.setAttribute("complexity", "0")
-
- branch_stats = analysis.branch_stats()
- missing_branch_arcs = analysis.missing_branch_arcs()
-
- # For each statement, create an XML 'line' element.
- for line in sorted(analysis.statements):
- xline = self.xml_out.createElement("line")
- xline.setAttribute("number", str(line))
-
- # Q: can we get info about the number of times a statement is
- # executed? If so, that should be recorded here.
- xline.setAttribute("hits", str(int(line not in analysis.missing)))
-
- if has_arcs:
- if line in branch_stats:
- total, taken = branch_stats[line]
- xline.setAttribute("branch", "true")
- xline.setAttribute(
- "condition-coverage",
- "%d%% (%d/%d)" % (100*taken//total, taken, total)
- )
- if line in missing_branch_arcs:
- annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
- xline.setAttribute("missing-branches", ",".join(annlines))
- xlines.appendChild(xline)
-
- class_lines = len(analysis.statements)
- class_hits = class_lines - len(analysis.missing)
-
- if has_arcs:
- class_branches = sum(t for t, k in branch_stats.values())
- missing_branches = sum(t - k for t, k in branch_stats.values())
- class_br_hits = class_branches - missing_branches
- else:
- class_branches = 0.0
- class_br_hits = 0.0
-
- # Finalize the statistics that are collected in the XML DOM.
- xclass.setAttribute("line-rate", rate(class_hits, class_lines))
- if has_arcs:
- branch_rate = rate(class_br_hits, class_branches)
- else:
- branch_rate = "0"
- xclass.setAttribute("branch-rate", branch_rate)
-
- package[0][rel_name] = xclass
- package[1] += class_hits
- package[2] += class_lines
- package[3] += class_br_hits
- package[4] += class_branches
-
-
-def serialize_xml(dom):
- """Serialize a minidom node to XML."""
- out = dom.toprettyxml()
- if env.PY2:
- out = out.encode("utf8")
- return out
diff --git a/contrib/python/coverage/py2/ya.make b/contrib/python/coverage/py2/ya.make
deleted file mode 100644
index 7bff35eea3..0000000000
--- a/contrib/python/coverage/py2/ya.make
+++ /dev/null
@@ -1,98 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY2_LIBRARY()
-
-VERSION(5.5)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/python/coverage/plugins
- library/python/resource
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_LINT()
-
-NO_CHECK_IMPORTS(
- coverage.fullcoverage.encodings
-)
-
-SRCS(
- coverage/ctracer/datastack.c
- coverage/ctracer/filedisp.c
- coverage/ctracer/module.c
- coverage/ctracer/tracer.c
-)
-
-PY_REGISTER(
- coverage.tracer
-)
-
-PY_SRCS(
- TOP_LEVEL
- coverage/__init__.py
- coverage/__main__.py
- coverage/annotate.py
- coverage/backward.py
- coverage/bytecode.py
- coverage/cmdline.py
- coverage/collector.py
- coverage/config.py
- coverage/context.py
- coverage/control.py
- coverage/data.py
- coverage/debug.py
- coverage/disposition.py
- coverage/env.py
- coverage/execfile.py
- coverage/files.py
- coverage/fullcoverage/encodings.py
- coverage/html.py
- coverage/inorout.py
- coverage/jsonreport.py
- coverage/misc.py
- coverage/multiproc.py
- coverage/numbits.py
- coverage/parser.py
- coverage/phystokens.py
- coverage/plugin.py
- coverage/plugin_support.py
- coverage/python.py
- coverage/pytracer.py
- coverage/report.py
- coverage/results.py
- coverage/sqldata.py
- coverage/summary.py
- coverage/templite.py
- coverage/tomlconfig.py
- coverage/version.py
- coverage/xmlreport.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/coverage/py2/
- .dist-info/METADATA
- .dist-info/entry_points.txt
- .dist-info/top_level.txt
- coverage/htmlfiles/coverage_html.js
- coverage/htmlfiles/favicon_32.png
- coverage/htmlfiles/index.html
- coverage/htmlfiles/jquery.ba-throttle-debounce.min.js
- coverage/htmlfiles/jquery.hotkeys.js
- coverage/htmlfiles/jquery.isonscreen.js
- coverage/htmlfiles/jquery.min.js
- coverage/htmlfiles/jquery.tablesorter.min.js
- coverage/htmlfiles/keybd_closed.png
- coverage/htmlfiles/keybd_open.png
- coverage/htmlfiles/pyfile.html
- coverage/htmlfiles/style.css
- coverage/htmlfiles/style.scss
-)
-
-END()
-
-RECURSE(
- bin
-)
diff --git a/contrib/python/coverage/py3/.dist-info/METADATA b/contrib/python/coverage/py3/.dist-info/METADATA
deleted file mode 100644
index 25a6049c45..0000000000
--- a/contrib/python/coverage/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,190 +0,0 @@
-Metadata-Version: 2.1
-Name: coverage
-Version: 5.5
-Summary: Code coverage measurement for Python
-Home-page: https://github.com/nedbat/coveragepy
-Author: Ned Batchelder and 142 others
-Author-email: ned@nedbatchelder.com
-License: Apache 2.0
-Project-URL: Documentation, https://coverage.readthedocs.io
-Project-URL: Funding, https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=pypi
-Project-URL: Issues, https://github.com/nedbat/coveragepy/issues
-Keywords: code coverage testing
-Platform: UNKNOWN
-Classifier: Environment :: Console
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: 3.10
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Software Development :: Quality Assurance
-Classifier: Topic :: Software Development :: Testing
-Classifier: Development Status :: 5 - Production/Stable
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, <4
-Description-Content-Type: text/x-rst
-Provides-Extra: toml
-Requires-Dist: toml ; extra == 'toml'
-
-.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-===========
-Coverage.py
-===========
-
-Code coverage testing for Python.
-
-| |license| |versions| |status|
-| |test-status| |quality-status| |docs| |codecov|
-| |kit| |format| |repos| |downloads|
-| |stars| |forks| |contributors|
-| |tidelift| |twitter-coveragepy| |twitter-nedbat|
-
-Coverage.py measures code coverage, typically during test execution. It uses
-the code analysis tools and tracing hooks provided in the Python standard
-library to determine which lines are executable, and which have been executed.
-
-Coverage.py runs on many versions of Python:
-
-* CPython 2.7.
-* CPython 3.5 through 3.10 alpha.
-* PyPy2 7.3.3 and PyPy3 7.3.3.
-
-Documentation is on `Read the Docs`_. Code repository and issue tracker are on
-`GitHub`_.
-
-.. _Read the Docs: https://coverage.readthedocs.io/
-.. _GitHub: https://github.com/nedbat/coveragepy
-
-
-**New in 5.x:** SQLite data storage, JSON report, contexts, relative filenames,
-dropped support for Python 2.6, 3.3 and 3.4.
-
-
-For Enterprise
---------------
-
-.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png
- :alt: Tidelift
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
-
-.. list-table::
- :widths: 10 100
-
- * - |tideliftlogo|
- - `Available as part of the Tidelift Subscription. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
- Coverage and thousands of other packages are working with
- Tidelift to deliver one enterprise subscription that covers all of the open
- source you use. If you want the flexibility of open source and the confidence
- of commercial-grade software, this is for you.
- `Learn more. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
-
-
-Getting Started
----------------
-
-See the `Quick Start section`_ of the docs.
-
-.. _Quick Start section: https://coverage.readthedocs.io/#quick-start
-
-
-Change history
---------------
-
-The complete history of changes is on the `change history page`_.
-
-.. _change history page: https://coverage.readthedocs.io/en/latest/changes.html
-
-
-Contributing
-------------
-
-See the `Contributing section`_ of the docs.
-
-.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html
-
-
-Security
---------
-
-To report a security vulnerability, please use the `Tidelift security
-contact`_. Tidelift will coordinate the fix and disclosure.
-
-.. _Tidelift security contact: https://tidelift.com/security
-
-
-License
--------
-
-Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_.
-
-.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
-.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-
-.. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml
- :alt: Test suite status
-.. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml
- :alt: Quality check status
-.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
- :target: https://coverage.readthedocs.io/
- :alt: Documentation
-.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
- :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
- :alt: Requirements status
-.. |kit| image:: https://badge.fury.io/py/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: PyPI status
-.. |format| image:: https://img.shields.io/pypi/format/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Kit format
-.. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Weekly PyPI downloads
-.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072
- :target: https://pypi.org/project/coverage/
- :alt: Python versions supported
-.. |status| image:: https://img.shields.io/pypi/status/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Package stability
-.. |license| image:: https://img.shields.io/pypi/l/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: License
-.. |codecov| image:: https://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2
- :target: https://codecov.io/github/nedbat/coveragepy?branch=master
- :alt: Coverage!
-.. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg
- :target: https://repology.org/metapackage/python:coverage/versions
- :alt: Packaging status
-.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
- :alt: Tidelift
-.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/stargazers
- :alt: Github stars
-.. |forks| image:: https://img.shields.io/github/forks/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/network/members
- :alt: Github forks
-.. |contributors| image:: https://img.shields.io/github/contributors/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/graphs/contributors
- :alt: Contributors
-.. |twitter-coveragepy| image:: https://img.shields.io/twitter/follow/coveragepy.svg?label=coveragepy&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/coveragepy
- :alt: coverage.py on Twitter
-.. |twitter-nedbat| image:: https://img.shields.io/twitter/follow/nedbat.svg?label=nedbat&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/nedbat
- :alt: nedbat on Twitter
-
-
diff --git a/contrib/python/coverage/py3/.dist-info/entry_points.txt b/contrib/python/coverage/py3/.dist-info/entry_points.txt
deleted file mode 100644
index cd083fc1ff..0000000000
--- a/contrib/python/coverage/py3/.dist-info/entry_points.txt
+++ /dev/null
@@ -1,5 +0,0 @@
-[console_scripts]
-coverage = coverage.cmdline:main
-coverage-3.9 = coverage.cmdline:main
-coverage3 = coverage.cmdline:main
-
diff --git a/contrib/python/coverage/py3/.dist-info/top_level.txt b/contrib/python/coverage/py3/.dist-info/top_level.txt
deleted file mode 100644
index 4ebc8aea50..0000000000
--- a/contrib/python/coverage/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-coverage
diff --git a/contrib/python/coverage/py3/LICENSE.txt b/contrib/python/coverage/py3/LICENSE.txt
deleted file mode 100644
index f433b1a53f..0000000000
--- a/contrib/python/coverage/py3/LICENSE.txt
+++ /dev/null
@@ -1,177 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
diff --git a/contrib/python/coverage/py3/NOTICE.txt b/contrib/python/coverage/py3/NOTICE.txt
deleted file mode 100644
index 37ded535bf..0000000000
--- a/contrib/python/coverage/py3/NOTICE.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Copyright 2001 Gareth Rees. All rights reserved.
-Copyright 2004-2021 Ned Batchelder. All rights reserved.
-
-Except where noted otherwise, this software is licensed under the Apache
-License, Version 2.0 (the "License"); you may not use this work except in
-compliance with the License. You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
diff --git a/contrib/python/coverage/py3/README.rst b/contrib/python/coverage/py3/README.rst
deleted file mode 100644
index 072f30ffeb..0000000000
--- a/contrib/python/coverage/py3/README.rst
+++ /dev/null
@@ -1,151 +0,0 @@
-.. Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-.. For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-===========
-Coverage.py
-===========
-
-Code coverage testing for Python.
-
-| |license| |versions| |status|
-| |test-status| |quality-status| |docs| |codecov|
-| |kit| |format| |repos| |downloads|
-| |stars| |forks| |contributors|
-| |tidelift| |twitter-coveragepy| |twitter-nedbat|
-
-Coverage.py measures code coverage, typically during test execution. It uses
-the code analysis tools and tracing hooks provided in the Python standard
-library to determine which lines are executable, and which have been executed.
-
-Coverage.py runs on many versions of Python:
-
-* CPython 2.7.
-* CPython 3.5 through 3.10 alpha.
-* PyPy2 7.3.3 and PyPy3 7.3.3.
-
-Documentation is on `Read the Docs`_. Code repository and issue tracker are on
-`GitHub`_.
-
-.. _Read the Docs: https://coverage.readthedocs.io/
-.. _GitHub: https://github.com/nedbat/coveragepy
-
-
-**New in 5.x:** SQLite data storage, JSON report, contexts, relative filenames,
-dropped support for Python 2.6, 3.3 and 3.4.
-
-
-For Enterprise
---------------
-
-.. |tideliftlogo| image:: https://nedbatchelder.com/pix/Tidelift_Logo_small.png
- :alt: Tidelift
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
-
-.. list-table::
- :widths: 10 100
-
- * - |tideliftlogo|
- - `Available as part of the Tidelift Subscription. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
- Coverage and thousands of other packages are working with
- Tidelift to deliver one enterprise subscription that covers all of the open
- source you use. If you want the flexibility of open source and the confidence
- of commercial-grade software, this is for you.
- `Learn more. <https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme>`_
-
-
-Getting Started
----------------
-
-See the `Quick Start section`_ of the docs.
-
-.. _Quick Start section: https://coverage.readthedocs.io/#quick-start
-
-
-Change history
---------------
-
-The complete history of changes is on the `change history page`_.
-
-.. _change history page: https://coverage.readthedocs.io/en/latest/changes.html
-
-
-Contributing
-------------
-
-See the `Contributing section`_ of the docs.
-
-.. _Contributing section: https://coverage.readthedocs.io/en/latest/contributing.html
-
-
-Security
---------
-
-To report a security vulnerability, please use the `Tidelift security
-contact`_. Tidelift will coordinate the fix and disclosure.
-
-.. _Tidelift security contact: https://tidelift.com/security
-
-
-License
--------
-
-Licensed under the `Apache 2.0 License`_. For details, see `NOTICE.txt`_.
-
-.. _Apache 2.0 License: http://www.apache.org/licenses/LICENSE-2.0
-.. _NOTICE.txt: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-
-.. |test-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/testsuite.yml
- :alt: Test suite status
-.. |quality-status| image:: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml/badge.svg?branch=master&event=push
- :target: https://github.com/nedbat/coveragepy/actions/workflows/quality.yml
- :alt: Quality check status
-.. |docs| image:: https://readthedocs.org/projects/coverage/badge/?version=latest&style=flat
- :target: https://coverage.readthedocs.io/
- :alt: Documentation
-.. |reqs| image:: https://requires.io/github/nedbat/coveragepy/requirements.svg?branch=master
- :target: https://requires.io/github/nedbat/coveragepy/requirements/?branch=master
- :alt: Requirements status
-.. |kit| image:: https://badge.fury.io/py/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: PyPI status
-.. |format| image:: https://img.shields.io/pypi/format/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Kit format
-.. |downloads| image:: https://img.shields.io/pypi/dw/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Weekly PyPI downloads
-.. |versions| image:: https://img.shields.io/pypi/pyversions/coverage.svg?logo=python&logoColor=FBE072
- :target: https://pypi.org/project/coverage/
- :alt: Python versions supported
-.. |status| image:: https://img.shields.io/pypi/status/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: Package stability
-.. |license| image:: https://img.shields.io/pypi/l/coverage.svg
- :target: https://pypi.org/project/coverage/
- :alt: License
-.. |codecov| image:: https://codecov.io/github/nedbat/coveragepy/coverage.svg?branch=master&precision=2
- :target: https://codecov.io/github/nedbat/coveragepy?branch=master
- :alt: Coverage!
-.. |repos| image:: https://repology.org/badge/tiny-repos/python:coverage.svg
- :target: https://repology.org/metapackage/python:coverage/versions
- :alt: Packaging status
-.. |tidelift| image:: https://tidelift.com/badges/package/pypi/coverage
- :target: https://tidelift.com/subscription/pkg/pypi-coverage?utm_source=pypi-coverage&utm_medium=referral&utm_campaign=readme
- :alt: Tidelift
-.. |stars| image:: https://img.shields.io/github/stars/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/stargazers
- :alt: Github stars
-.. |forks| image:: https://img.shields.io/github/forks/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/network/members
- :alt: Github forks
-.. |contributors| image:: https://img.shields.io/github/contributors/nedbat/coveragepy.svg?logo=github
- :target: https://github.com/nedbat/coveragepy/graphs/contributors
- :alt: Contributors
-.. |twitter-coveragepy| image:: https://img.shields.io/twitter/follow/coveragepy.svg?label=coveragepy&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/coveragepy
- :alt: coverage.py on Twitter
-.. |twitter-nedbat| image:: https://img.shields.io/twitter/follow/nedbat.svg?label=nedbat&style=flat&logo=twitter&logoColor=4FADFF
- :target: https://twitter.com/nedbat
- :alt: nedbat on Twitter
diff --git a/contrib/python/coverage/py3/coverage/__init__.py b/contrib/python/coverage/py3/coverage/__init__.py
deleted file mode 100644
index 331b304b68..0000000000
--- a/contrib/python/coverage/py3/coverage/__init__.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Code coverage measurement for Python.
-
-Ned Batchelder
-https://nedbatchelder.com/code/coverage
-
-"""
-
-import sys
-
-from coverage.version import __version__, __url__, version_info
-
-from coverage.control import Coverage, process_startup
-from coverage.data import CoverageData
-from coverage.misc import CoverageException
-from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
-from coverage.pytracer import PyTracer
-
-# Backward compatibility.
-coverage = Coverage
-
-# On Windows, we encode and decode deep enough that something goes wrong and
-# the encodings.utf_8 module is loaded and then unloaded, I don't know why.
-# Adding a reference here prevents it from being unloaded. Yuk.
-import encodings.utf_8 # pylint: disable=wrong-import-position, wrong-import-order
-
-# Because of the "from coverage.control import fooey" lines at the top of the
-# file, there's an entry for coverage.coverage in sys.modules, mapped to None.
-# This makes some inspection tools (like pydoc) unable to find the class
-# coverage.coverage. So remove that entry.
-try:
- del sys.modules['coverage.coverage']
-except KeyError:
- pass
diff --git a/contrib/python/coverage/py3/coverage/__main__.py b/contrib/python/coverage/py3/coverage/__main__.py
deleted file mode 100644
index 79aa4e2b35..0000000000
--- a/contrib/python/coverage/py3/coverage/__main__.py
+++ /dev/null
@@ -1,8 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Coverage.py's main entry point."""
-
-import sys
-from coverage.cmdline import main
-sys.exit(main())
diff --git a/contrib/python/coverage/py3/coverage/annotate.py b/contrib/python/coverage/py3/coverage/annotate.py
deleted file mode 100644
index 999ab6e557..0000000000
--- a/contrib/python/coverage/py3/coverage/annotate.py
+++ /dev/null
@@ -1,108 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Source file annotation for coverage.py."""
-
-import io
-import os
-import re
-
-from coverage.files import flat_rootname
-from coverage.misc import ensure_dir, isolate_module
-from coverage.report import get_analysis_to_report
-
-os = isolate_module(os)
-
-
-class AnnotateReporter(object):
- """Generate annotated source files showing line coverage.
-
- This reporter creates annotated copies of the measured source files. Each
- .py file is copied as a .py,cover file, with a left-hand margin annotating
- each line::
-
- > def h(x):
- - if 0: #pragma: no cover
- - pass
- > if x == 1:
- ! a = 1
- > else:
- > a = 2
-
- > h(2)
-
- Executed lines use '>', lines not executed use '!', lines excluded from
- consideration use '-'.
-
- """
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.directory = None
-
- blank_re = re.compile(r"\s*(#|$)")
- else_re = re.compile(r"\s*else\s*:\s*(#|$)")
-
- def report(self, morfs, directory=None):
- """Run the report.
-
- See `coverage.report()` for arguments.
-
- """
- self.directory = directory
- self.coverage.get_data()
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.annotate_file(fr, analysis)
-
- def annotate_file(self, fr, analysis):
- """Annotate a single file.
-
- `fr` is the FileReporter for the file to annotate.
-
- """
- statements = sorted(analysis.statements)
- missing = sorted(analysis.missing)
- excluded = sorted(analysis.excluded)
-
- if self.directory:
- ensure_dir(self.directory)
- dest_file = os.path.join(self.directory, flat_rootname(fr.relative_filename()))
- if dest_file.endswith("_py"):
- dest_file = dest_file[:-3] + ".py"
- dest_file += ",cover"
- else:
- dest_file = fr.filename + ",cover"
-
- with io.open(dest_file, 'w', encoding='utf8') as dest:
- i = 0
- j = 0
- covered = True
- source = fr.source()
- for lineno, line in enumerate(source.splitlines(True), start=1):
- while i < len(statements) and statements[i] < lineno:
- i += 1
- while j < len(missing) and missing[j] < lineno:
- j += 1
- if i < len(statements) and statements[i] == lineno:
- covered = j >= len(missing) or missing[j] > lineno
- if self.blank_re.match(line):
- dest.write(u' ')
- elif self.else_re.match(line):
- # Special logic for lines containing only 'else:'.
- if i >= len(statements) and j >= len(missing):
- dest.write(u'! ')
- elif i >= len(statements) or j >= len(missing):
- dest.write(u'> ')
- elif statements[i] == missing[j]:
- dest.write(u'! ')
- else:
- dest.write(u'> ')
- elif lineno in excluded:
- dest.write(u'- ')
- elif covered:
- dest.write(u'> ')
- else:
- dest.write(u'! ')
-
- dest.write(line)
diff --git a/contrib/python/coverage/py3/coverage/backward.py b/contrib/python/coverage/py3/coverage/backward.py
deleted file mode 100644
index ac781ab96a..0000000000
--- a/contrib/python/coverage/py3/coverage/backward.py
+++ /dev/null
@@ -1,267 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Add things to old Pythons so I can pretend they are newer."""
-
-# This file's purpose is to provide modules to be imported from here.
-# pylint: disable=unused-import
-
-import os
-import sys
-
-from datetime import datetime
-
-from coverage import env
-
-
-# Pythons 2 and 3 differ on where to get StringIO.
-try:
- from cStringIO import StringIO
-except ImportError:
- from io import StringIO
-
-# In py3, ConfigParser was renamed to the more-standard configparser.
-# But there's a py3 backport that installs "configparser" in py2, and I don't
-# want it because it has annoying deprecation warnings. So try the real py2
-# import first.
-try:
- import ConfigParser as configparser
-except ImportError:
- import configparser
-
-# What's a string called?
-try:
- string_class = basestring
-except NameError:
- string_class = str
-
-# What's a Unicode string called?
-try:
- unicode_class = unicode
-except NameError:
- unicode_class = str
-
-# range or xrange?
-try:
- range = xrange # pylint: disable=redefined-builtin
-except NameError:
- range = range
-
-try:
- from itertools import zip_longest
-except ImportError:
- from itertools import izip_longest as zip_longest
-
-# Where do we get the thread id from?
-try:
- from thread import get_ident as get_thread_id
-except ImportError:
- from threading import get_ident as get_thread_id
-
-try:
- os.PathLike
-except AttributeError:
- # This is Python 2 and 3
- path_types = (bytes, string_class, unicode_class)
-else:
- # 3.6+
- path_types = (bytes, str, os.PathLike)
-
-# shlex.quote is new, but there's an undocumented implementation in "pipes",
-# who knew!?
-try:
- from shlex import quote as shlex_quote
-except ImportError:
- # Useful function, available under a different (undocumented) name
- # in Python versions earlier than 3.3.
- from pipes import quote as shlex_quote
-
-try:
- import reprlib
-except ImportError: # pragma: not covered
- # We need this on Python 2, but in testing environments, a backport is
- # installed, so this import isn't used.
- import repr as reprlib
-
-# A function to iterate listlessly over a dict's items, and one to get the
-# items as a list.
-try:
- {}.iteritems
-except AttributeError:
- # Python 3
- def iitems(d):
- """Produce the items from dict `d`."""
- return d.items()
-
- def litems(d):
- """Return a list of items from dict `d`."""
- return list(d.items())
-else:
- # Python 2
- def iitems(d):
- """Produce the items from dict `d`."""
- return d.iteritems()
-
- def litems(d):
- """Return a list of items from dict `d`."""
- return d.items()
-
-# Getting the `next` function from an iterator is different in 2 and 3.
-try:
- iter([]).next
-except AttributeError:
- def iternext(seq):
- """Get the `next` function for iterating over `seq`."""
- return iter(seq).__next__
-else:
- def iternext(seq):
- """Get the `next` function for iterating over `seq`."""
- return iter(seq).next
-
-# Python 3.x is picky about bytes and strings, so provide methods to
-# get them right, and make them no-ops in 2.x
-if env.PY3:
- def to_bytes(s):
- """Convert string `s` to bytes."""
- return s.encode('utf8')
-
- def to_string(b):
- """Convert bytes `b` to string."""
- return b.decode('utf8')
-
- def binary_bytes(byte_values):
- """Produce a byte string with the ints from `byte_values`."""
- return bytes(byte_values)
-
- def byte_to_int(byte):
- """Turn a byte indexed from a bytes object into an int."""
- return byte
-
- def bytes_to_ints(bytes_value):
- """Turn a bytes object into a sequence of ints."""
- # In Python 3, iterating bytes gives ints.
- return bytes_value
-
-else:
- def to_bytes(s):
- """Convert string `s` to bytes (no-op in 2.x)."""
- return s
-
- def to_string(b):
- """Convert bytes `b` to string."""
- return b
-
- def binary_bytes(byte_values):
- """Produce a byte string with the ints from `byte_values`."""
- return "".join(chr(b) for b in byte_values)
-
- def byte_to_int(byte):
- """Turn a byte indexed from a bytes object into an int."""
- return ord(byte)
-
- def bytes_to_ints(bytes_value):
- """Turn a bytes object into a sequence of ints."""
- for byte in bytes_value:
- yield ord(byte)
-
-
-try:
- # In Python 2.x, the builtins were in __builtin__
- BUILTINS = sys.modules['__builtin__']
-except KeyError:
- # In Python 3.x, they're in builtins
- BUILTINS = sys.modules['builtins']
-
-
-# imp was deprecated in Python 3.3
-try:
- import importlib
- import importlib.util
- imp = None
-except ImportError:
- importlib = None
-
-# We only want to use importlib if it has everything we need.
-try:
- importlib_util_find_spec = importlib.util.find_spec
-except Exception:
- import imp
- importlib_util_find_spec = None
-
-# What is the .pyc magic number for this version of Python?
-try:
- PYC_MAGIC_NUMBER = importlib.util.MAGIC_NUMBER
-except AttributeError:
- PYC_MAGIC_NUMBER = imp.get_magic()
-
-
-def code_object(fn):
- """Get the code object from a function."""
- try:
- return fn.func_code
- except AttributeError:
- return fn.__code__
-
-
-try:
- from types import SimpleNamespace
-except ImportError:
- # The code from https://docs.python.org/3/library/types.html#types.SimpleNamespace
- class SimpleNamespace:
- """Python implementation of SimpleNamespace, for Python 2."""
- def __init__(self, **kwargs):
- self.__dict__.update(kwargs)
-
- def __repr__(self):
- keys = sorted(self.__dict__)
- items = ("{}={!r}".format(k, self.__dict__[k]) for k in keys)
- return "{}({})".format(type(self).__name__, ", ".join(items))
-
-
-def format_local_datetime(dt):
- """Return a string with local timezone representing the date.
- If python version is lower than 3.6, the time zone is not included.
- """
- try:
- return dt.astimezone().strftime('%Y-%m-%d %H:%M %z')
- except (TypeError, ValueError):
- # Datetime.astimezone in Python 3.5 can not handle naive datetime
- return dt.strftime('%Y-%m-%d %H:%M')
-
-
-def invalidate_import_caches():
- """Invalidate any import caches that may or may not exist."""
- if importlib and hasattr(importlib, "invalidate_caches"):
- importlib.invalidate_caches()
-
-
-def import_local_file(modname, modfile=None):
- """Import a local file as a module.
-
- Opens a file in the current directory named `modname`.py, imports it
- as `modname`, and returns the module object. `modfile` is the file to
- import if it isn't in the current directory.
-
- """
- try:
- import importlib.util as importlib_util
- except ImportError:
- importlib_util = None
-
- if modfile is None:
- modfile = modname + '.py'
- if importlib_util:
- spec = importlib_util.spec_from_file_location(modname, modfile)
- mod = importlib_util.module_from_spec(spec)
- sys.modules[modname] = mod
- spec.loader.exec_module(mod)
- else:
- for suff in imp.get_suffixes(): # pragma: part covered
- if suff[0] == '.py':
- break
-
- with open(modfile, 'r') as f:
- # pylint: disable=undefined-loop-variable
- mod = imp.load_module(modname, f, modfile, suff)
-
- return mod
diff --git a/contrib/python/coverage/py3/coverage/bytecode.py b/contrib/python/coverage/py3/coverage/bytecode.py
deleted file mode 100644
index ceb18cf374..0000000000
--- a/contrib/python/coverage/py3/coverage/bytecode.py
+++ /dev/null
@@ -1,19 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Bytecode manipulation for coverage.py"""
-
-import types
-
-
-def code_objects(code):
- """Iterate over all the code objects in `code`."""
- stack = [code]
- while stack:
- # We're going to return the code object on the stack, but first
- # push its children for later returning.
- code = stack.pop()
- for c in code.co_consts:
- if isinstance(c, types.CodeType):
- stack.append(c)
- yield code
diff --git a/contrib/python/coverage/py3/coverage/cmdline.py b/contrib/python/coverage/py3/coverage/cmdline.py
deleted file mode 100644
index 0be0cca19f..0000000000
--- a/contrib/python/coverage/py3/coverage/cmdline.py
+++ /dev/null
@@ -1,910 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Command-line support for coverage.py."""
-
-from __future__ import print_function
-
-import glob
-import optparse
-import os.path
-import shlex
-import sys
-import textwrap
-import traceback
-
-import coverage
-from coverage import Coverage
-from coverage import env
-from coverage.collector import CTracer
-from coverage.data import line_counts
-from coverage.debug import info_formatter, info_header, short_stack
-from coverage.execfile import PyRunner
-from coverage.misc import BaseCoverageException, ExceptionDuringRun, NoSource, output_encoding
-from coverage.results import should_fail_under
-
-
-class Opts(object):
- """A namespace class for individual options we'll build parsers from."""
-
- append = optparse.make_option(
- '-a', '--append', action='store_true',
- help="Append coverage data to .coverage, otherwise it starts clean each time.",
- )
- keep = optparse.make_option(
- '', '--keep', action='store_true',
- help="Keep original coverage files, otherwise they are deleted.",
- )
- branch = optparse.make_option(
- '', '--branch', action='store_true',
- help="Measure branch coverage in addition to statement coverage.",
- )
- CONCURRENCY_CHOICES = [
- "thread", "gevent", "greenlet", "eventlet", "multiprocessing",
- ]
- concurrency = optparse.make_option(
- '', '--concurrency', action='store', metavar="LIB",
- choices=CONCURRENCY_CHOICES,
- help=(
- "Properly measure code using a concurrency library. "
- "Valid values are: %s."
- ) % ", ".join(CONCURRENCY_CHOICES),
- )
- context = optparse.make_option(
- '', '--context', action='store', metavar="LABEL",
- help="The context label to record for this coverage run.",
- )
- debug = optparse.make_option(
- '', '--debug', action='store', metavar="OPTS",
- help="Debug options, separated by commas. [env: COVERAGE_DEBUG]",
- )
- directory = optparse.make_option(
- '-d', '--directory', action='store', metavar="DIR",
- help="Write the output files to DIR.",
- )
- fail_under = optparse.make_option(
- '', '--fail-under', action='store', metavar="MIN", type="float",
- help="Exit with a status of 2 if the total coverage is less than MIN.",
- )
- help = optparse.make_option(
- '-h', '--help', action='store_true',
- help="Get help on this command.",
- )
- ignore_errors = optparse.make_option(
- '-i', '--ignore-errors', action='store_true',
- help="Ignore errors while reading source files.",
- )
- include = optparse.make_option(
- '', '--include', action='store',
- metavar="PAT1,PAT2,...",
- help=(
- "Include only files whose paths match one of these patterns. "
- "Accepts shell-style wildcards, which must be quoted."
- ),
- )
- pylib = optparse.make_option(
- '-L', '--pylib', action='store_true',
- help=(
- "Measure coverage even inside the Python installed library, "
- "which isn't done by default."
- ),
- )
- sort = optparse.make_option(
- '--sort', action='store', metavar='COLUMN',
- help="Sort the report by the named column: name, stmts, miss, branch, brpart, or cover. "
- "Default is name."
- )
- show_missing = optparse.make_option(
- '-m', '--show-missing', action='store_true',
- help="Show line numbers of statements in each module that weren't executed.",
- )
- skip_covered = optparse.make_option(
- '--skip-covered', action='store_true',
- help="Skip files with 100% coverage.",
- )
- no_skip_covered = optparse.make_option(
- '--no-skip-covered', action='store_false', dest='skip_covered',
- help="Disable --skip-covered.",
- )
- skip_empty = optparse.make_option(
- '--skip-empty', action='store_true',
- help="Skip files with no code.",
- )
- show_contexts = optparse.make_option(
- '--show-contexts', action='store_true',
- help="Show contexts for covered lines.",
- )
- omit = optparse.make_option(
- '', '--omit', action='store',
- metavar="PAT1,PAT2,...",
- help=(
- "Omit files whose paths match one of these patterns. "
- "Accepts shell-style wildcards, which must be quoted."
- ),
- )
- contexts = optparse.make_option(
- '', '--contexts', action='store',
- metavar="REGEX1,REGEX2,...",
- help=(
- "Only display data from lines covered in the given contexts. "
- "Accepts Python regexes, which must be quoted."
- ),
- )
- output_xml = optparse.make_option(
- '-o', '', action='store', dest="outfile",
- metavar="OUTFILE",
- help="Write the XML report to this file. Defaults to 'coverage.xml'",
- )
- output_json = optparse.make_option(
- '-o', '', action='store', dest="outfile",
- metavar="OUTFILE",
- help="Write the JSON report to this file. Defaults to 'coverage.json'",
- )
- json_pretty_print = optparse.make_option(
- '', '--pretty-print', action='store_true',
- help="Format the JSON for human readers.",
- )
- parallel_mode = optparse.make_option(
- '-p', '--parallel-mode', action='store_true',
- help=(
- "Append the machine name, process id and random number to the "
- ".coverage data file name to simplify collecting data from "
- "many processes."
- ),
- )
- module = optparse.make_option(
- '-m', '--module', action='store_true',
- help=(
- "<pyfile> is an importable Python module, not a script path, "
- "to be run as 'python -m' would run it."
- ),
- )
- precision = optparse.make_option(
- '', '--precision', action='store', metavar='N', type=int,
- help=(
- "Number of digits after the decimal point to display for "
- "reported coverage percentages."
- ),
- )
- rcfile = optparse.make_option(
- '', '--rcfile', action='store',
- help=(
- "Specify configuration file. "
- "By default '.coveragerc', 'setup.cfg', 'tox.ini', and "
- "'pyproject.toml' are tried. [env: COVERAGE_RCFILE]"
- ),
- )
- source = optparse.make_option(
- '', '--source', action='store', metavar="SRC1,SRC2,...",
- help="A list of packages or directories of code to be measured.",
- )
- timid = optparse.make_option(
- '', '--timid', action='store_true',
- help=(
- "Use a simpler but slower trace method. Try this if you get "
- "seemingly impossible results!"
- ),
- )
- title = optparse.make_option(
- '', '--title', action='store', metavar="TITLE",
- help="A text string to use as the title on the HTML.",
- )
- version = optparse.make_option(
- '', '--version', action='store_true',
- help="Display version information and exit.",
- )
-
-
-class CoverageOptionParser(optparse.OptionParser, object):
- """Base OptionParser for coverage.py.
-
- Problems don't exit the program.
- Defaults are initialized for all options.
-
- """
-
- def __init__(self, *args, **kwargs):
- super(CoverageOptionParser, self).__init__(
- add_help_option=False, *args, **kwargs
- )
- self.set_defaults(
- action=None,
- append=None,
- branch=None,
- concurrency=None,
- context=None,
- debug=None,
- directory=None,
- fail_under=None,
- help=None,
- ignore_errors=None,
- include=None,
- keep=None,
- module=None,
- omit=None,
- contexts=None,
- parallel_mode=None,
- precision=None,
- pylib=None,
- rcfile=True,
- show_missing=None,
- skip_covered=None,
- skip_empty=None,
- show_contexts=None,
- sort=None,
- source=None,
- timid=None,
- title=None,
- version=None,
- )
-
- self.disable_interspersed_args()
-
- class OptionParserError(Exception):
- """Used to stop the optparse error handler ending the process."""
- pass
-
- def parse_args_ok(self, args=None, options=None):
- """Call optparse.parse_args, but return a triple:
-
- (ok, options, args)
-
- """
- try:
- options, args = super(CoverageOptionParser, self).parse_args(args, options)
- except self.OptionParserError:
- return False, None, None
- return True, options, args
-
- def error(self, msg):
- """Override optparse.error so sys.exit doesn't get called."""
- show_help(msg)
- raise self.OptionParserError
-
-
-class GlobalOptionParser(CoverageOptionParser):
- """Command-line parser for coverage.py global option arguments."""
-
- def __init__(self):
- super(GlobalOptionParser, self).__init__()
-
- self.add_options([
- Opts.help,
- Opts.version,
- ])
-
-
-class CmdOptionParser(CoverageOptionParser):
- """Parse one of the new-style commands for coverage.py."""
-
- def __init__(self, action, options, defaults=None, usage=None, description=None):
- """Create an OptionParser for a coverage.py command.
-
- `action` is the slug to put into `options.action`.
- `options` is a list of Option's for the command.
- `defaults` is a dict of default value for options.
- `usage` is the usage string to display in help.
- `description` is the description of the command, for the help text.
-
- """
- if usage:
- usage = "%prog " + usage
- super(CmdOptionParser, self).__init__(
- usage=usage,
- description=description,
- )
- self.set_defaults(action=action, **(defaults or {}))
- self.add_options(options)
- self.cmd = action
-
- def __eq__(self, other):
- # A convenience equality, so that I can put strings in unit test
- # results, and they will compare equal to objects.
- return (other == "<CmdOptionParser:%s>" % self.cmd)
-
- __hash__ = None # This object doesn't need to be hashed.
-
- def get_prog_name(self):
- """Override of an undocumented function in optparse.OptionParser."""
- program_name = super(CmdOptionParser, self).get_prog_name()
-
- # Include the sub-command for this parser as part of the command.
- return "{command} {subcommand}".format(command=program_name, subcommand=self.cmd)
-
-
-GLOBAL_ARGS = [
- Opts.debug,
- Opts.help,
- Opts.rcfile,
- ]
-
-CMDS = {
- 'annotate': CmdOptionParser(
- "annotate",
- [
- Opts.directory,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description=(
- "Make annotated copies of the given files, marking statements that are executed "
- "with > and statements that are missed with !."
- ),
- ),
-
- 'combine': CmdOptionParser(
- "combine",
- [
- Opts.append,
- Opts.keep,
- ] + GLOBAL_ARGS,
- usage="[options] <path1> <path2> ... <pathN>",
- description=(
- "Combine data from multiple coverage files collected "
- "with 'run -p'. The combined results are written to a single "
- "file representing the union of the data. The positional "
- "arguments are data files or directories containing data files. "
- "If no paths are provided, data files in the default data file's "
- "directory are combined."
- ),
- ),
-
- 'debug': CmdOptionParser(
- "debug", GLOBAL_ARGS,
- usage="<topic>",
- description=(
- "Display information about the internals of coverage.py, "
- "for diagnosing problems. "
- "Topics are: "
- "'data' to show a summary of the collected data; "
- "'sys' to show installation information; "
- "'config' to show the configuration; "
- "'premain' to show what is calling coverage."
- ),
- ),
-
- 'erase': CmdOptionParser(
- "erase", GLOBAL_ARGS,
- description="Erase previously collected coverage data.",
- ),
-
- 'help': CmdOptionParser(
- "help", GLOBAL_ARGS,
- usage="[command]",
- description="Describe how to use coverage.py",
- ),
-
- 'html': CmdOptionParser(
- "html",
- [
- Opts.contexts,
- Opts.directory,
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.precision,
- Opts.show_contexts,
- Opts.skip_covered,
- Opts.no_skip_covered,
- Opts.skip_empty,
- Opts.title,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description=(
- "Create an HTML report of the coverage of the files. "
- "Each file gets its own page, with the source decorated to show "
- "executed, excluded, and missed lines."
- ),
- ),
-
- 'json': CmdOptionParser(
- "json",
- [
- Opts.contexts,
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.output_json,
- Opts.json_pretty_print,
- Opts.show_contexts,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description="Generate a JSON report of coverage results."
- ),
-
- 'report': CmdOptionParser(
- "report",
- [
- Opts.contexts,
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.precision,
- Opts.sort,
- Opts.show_missing,
- Opts.skip_covered,
- Opts.no_skip_covered,
- Opts.skip_empty,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description="Report coverage statistics on modules."
- ),
-
- 'run': CmdOptionParser(
- "run",
- [
- Opts.append,
- Opts.branch,
- Opts.concurrency,
- Opts.context,
- Opts.include,
- Opts.module,
- Opts.omit,
- Opts.pylib,
- Opts.parallel_mode,
- Opts.source,
- Opts.timid,
- ] + GLOBAL_ARGS,
- usage="[options] <pyfile> [program options]",
- description="Run a Python program, measuring code execution."
- ),
-
- 'xml': CmdOptionParser(
- "xml",
- [
- Opts.fail_under,
- Opts.ignore_errors,
- Opts.include,
- Opts.omit,
- Opts.output_xml,
- Opts.skip_empty,
- ] + GLOBAL_ARGS,
- usage="[options] [modules]",
- description="Generate an XML report of coverage results."
- ),
-}
-
-
-def show_help(error=None, topic=None, parser=None):
- """Display an error message, or the named topic."""
- assert error or topic or parser
-
- program_path = sys.argv[0]
- if program_path.endswith(os.path.sep + '__main__.py'):
- # The path is the main module of a package; get that path instead.
- program_path = os.path.dirname(program_path)
- program_name = os.path.basename(program_path)
- if env.WINDOWS:
- # entry_points={'console_scripts':...} on Windows makes files
- # called coverage.exe, coverage3.exe, and coverage-3.5.exe. These
- # invoke coverage-script.py, coverage3-script.py, and
- # coverage-3.5-script.py. argv[0] is the .py file, but we want to
- # get back to the original form.
- auto_suffix = "-script.py"
- if program_name.endswith(auto_suffix):
- program_name = program_name[:-len(auto_suffix)]
-
- help_params = dict(coverage.__dict__)
- help_params['program_name'] = program_name
- if CTracer is not None:
- help_params['extension_modifier'] = 'with C extension'
- else:
- help_params['extension_modifier'] = 'without C extension'
-
- if error:
- print(error, file=sys.stderr)
- print("Use '%s help' for help." % (program_name,), file=sys.stderr)
- elif parser:
- print(parser.format_help().strip())
- print()
- else:
- help_msg = textwrap.dedent(HELP_TOPICS.get(topic, '')).strip()
- if help_msg:
- print(help_msg.format(**help_params))
- else:
- print("Don't know topic %r" % topic)
- print("Full documentation is at {__url__}".format(**help_params))
-
-
-OK, ERR, FAIL_UNDER = 0, 1, 2
-
-
-class CoverageScript(object):
- """The command-line interface to coverage.py."""
-
- def __init__(self):
- self.global_option = False
- self.coverage = None
-
- def command_line(self, argv):
- """The bulk of the command line interface to coverage.py.
-
- `argv` is the argument list to process.
-
- Returns 0 if all is well, 1 if something went wrong.
-
- """
- # Collect the command-line options.
- if not argv:
- show_help(topic='minimum_help')
- return OK
-
- # The command syntax we parse depends on the first argument. Global
- # switch syntax always starts with an option.
- self.global_option = argv[0].startswith('-')
- if self.global_option:
- parser = GlobalOptionParser()
- else:
- parser = CMDS.get(argv[0])
- if not parser:
- show_help("Unknown command: '%s'" % argv[0])
- return ERR
- argv = argv[1:]
-
- ok, options, args = parser.parse_args_ok(argv)
- if not ok:
- return ERR
-
- # Handle help and version.
- if self.do_help(options, args, parser):
- return OK
-
- # Listify the list options.
- source = unshell_list(options.source)
- omit = unshell_list(options.omit)
- include = unshell_list(options.include)
- debug = unshell_list(options.debug)
- contexts = unshell_list(options.contexts)
-
- # Do something.
- self.coverage = Coverage(
- data_suffix=options.parallel_mode,
- cover_pylib=options.pylib,
- timid=options.timid,
- branch=options.branch,
- config_file=options.rcfile,
- source=source,
- omit=omit,
- include=include,
- debug=debug,
- concurrency=options.concurrency,
- check_preimported=True,
- context=options.context,
- )
-
- if options.action == "debug":
- return self.do_debug(args)
-
- elif options.action == "erase":
- self.coverage.erase()
- return OK
-
- elif options.action == "run":
- return self.do_run(options, args)
-
- elif options.action == "combine":
- if options.append:
- self.coverage.load()
- data_dirs = args or None
- self.coverage.combine(data_dirs, strict=True, keep=bool(options.keep))
- self.coverage.save()
- return OK
-
- # Remaining actions are reporting, with some common options.
- report_args = dict(
- morfs=unglob_args(args),
- ignore_errors=options.ignore_errors,
- omit=omit,
- include=include,
- contexts=contexts,
- )
-
- # We need to be able to import from the current directory, because
- # plugins may try to, for example, to read Django settings.
- sys.path.insert(0, '')
-
- self.coverage.load()
-
- total = None
- if options.action == "report":
- total = self.coverage.report(
- show_missing=options.show_missing,
- skip_covered=options.skip_covered,
- skip_empty=options.skip_empty,
- precision=options.precision,
- sort=options.sort,
- **report_args
- )
- elif options.action == "annotate":
- self.coverage.annotate(directory=options.directory, **report_args)
- elif options.action == "html":
- total = self.coverage.html_report(
- directory=options.directory,
- title=options.title,
- skip_covered=options.skip_covered,
- skip_empty=options.skip_empty,
- show_contexts=options.show_contexts,
- precision=options.precision,
- **report_args
- )
- elif options.action == "xml":
- outfile = options.outfile
- total = self.coverage.xml_report(
- outfile=outfile, skip_empty=options.skip_empty,
- **report_args
- )
- elif options.action == "json":
- outfile = options.outfile
- total = self.coverage.json_report(
- outfile=outfile,
- pretty_print=options.pretty_print,
- show_contexts=options.show_contexts,
- **report_args
- )
-
- if total is not None:
- # Apply the command line fail-under options, and then use the config
- # value, so we can get fail_under from the config file.
- if options.fail_under is not None:
- self.coverage.set_option("report:fail_under", options.fail_under)
-
- fail_under = self.coverage.get_option("report:fail_under")
- precision = self.coverage.get_option("report:precision")
- if should_fail_under(total, fail_under, precision):
- msg = "total of {total:.{p}f} is less than fail-under={fail_under:.{p}f}".format(
- total=total, fail_under=fail_under, p=precision,
- )
- print("Coverage failure:", msg)
- return FAIL_UNDER
-
- return OK
-
- def do_help(self, options, args, parser):
- """Deal with help requests.
-
- Return True if it handled the request, False if not.
-
- """
- # Handle help.
- if options.help:
- if self.global_option:
- show_help(topic='help')
- else:
- show_help(parser=parser)
- return True
-
- if options.action == "help":
- if args:
- for a in args:
- parser = CMDS.get(a)
- if parser:
- show_help(parser=parser)
- else:
- show_help(topic=a)
- else:
- show_help(topic='help')
- return True
-
- # Handle version.
- if options.version:
- show_help(topic='version')
- return True
-
- return False
-
- def do_run(self, options, args):
- """Implementation of 'coverage run'."""
-
- if not args:
- if options.module:
- # Specified -m with nothing else.
- show_help("No module specified for -m")
- return ERR
- command_line = self.coverage.get_option("run:command_line")
- if command_line is not None:
- args = shlex.split(command_line)
- if args and args[0] == "-m":
- options.module = True
- args = args[1:]
- if not args:
- show_help("Nothing to do.")
- return ERR
-
- if options.append and self.coverage.get_option("run:parallel"):
- show_help("Can't append to data files in parallel mode.")
- return ERR
-
- if options.concurrency == "multiprocessing":
- # Can't set other run-affecting command line options with
- # multiprocessing.
- for opt_name in ['branch', 'include', 'omit', 'pylib', 'source', 'timid']:
- # As it happens, all of these options have no default, meaning
- # they will be None if they have not been specified.
- if getattr(options, opt_name) is not None:
- show_help(
- "Options affecting multiprocessing must only be specified "
- "in a configuration file.\n"
- "Remove --{} from the command line.".format(opt_name)
- )
- return ERR
-
- runner = PyRunner(args, as_module=bool(options.module))
- runner.prepare()
-
- if options.append:
- self.coverage.load()
-
- # Run the script.
- self.coverage.start()
- code_ran = True
- try:
- runner.run()
- except NoSource:
- code_ran = False
- raise
- finally:
- self.coverage.stop()
- if code_ran:
- self.coverage.save()
-
- return OK
-
- def do_debug(self, args):
- """Implementation of 'coverage debug'."""
-
- if not args:
- show_help("What information would you like: config, data, sys, premain?")
- return ERR
-
- for info in args:
- if info == 'sys':
- sys_info = self.coverage.sys_info()
- print(info_header("sys"))
- for line in info_formatter(sys_info):
- print(" %s" % line)
- elif info == 'data':
- self.coverage.load()
- data = self.coverage.get_data()
- print(info_header("data"))
- print("path: %s" % data.data_filename())
- if data:
- print("has_arcs: %r" % data.has_arcs())
- summary = line_counts(data, fullpath=True)
- filenames = sorted(summary.keys())
- print("\n%d files:" % len(filenames))
- for f in filenames:
- line = "%s: %d lines" % (f, summary[f])
- plugin = data.file_tracer(f)
- if plugin:
- line += " [%s]" % plugin
- print(line)
- else:
- print("No data collected")
- elif info == 'config':
- print(info_header("config"))
- config_info = self.coverage.config.__dict__.items()
- for line in info_formatter(config_info):
- print(" %s" % line)
- elif info == "premain":
- print(info_header("premain"))
- print(short_stack())
- else:
- show_help("Don't know what you mean by %r" % info)
- return ERR
-
- return OK
-
-
-def unshell_list(s):
- """Turn a command-line argument into a list."""
- if not s:
- return None
- if env.WINDOWS:
- # When running coverage.py as coverage.exe, some of the behavior
- # of the shell is emulated: wildcards are expanded into a list of
- # file names. So you have to single-quote patterns on the command
- # line, but (not) helpfully, the single quotes are included in the
- # argument, so we have to strip them off here.
- s = s.strip("'")
- return s.split(',')
-
-
-def unglob_args(args):
- """Interpret shell wildcards for platforms that need it."""
- if env.WINDOWS:
- globbed = []
- for arg in args:
- if '?' in arg or '*' in arg:
- globbed.extend(glob.glob(arg))
- else:
- globbed.append(arg)
- args = globbed
- return args
-
-
-HELP_TOPICS = {
- 'help': """\
- Coverage.py, version {__version__} {extension_modifier}
- Measure, collect, and report on code coverage in Python programs.
-
- usage: {program_name} <command> [options] [args]
-
- Commands:
- annotate Annotate source files with execution information.
- combine Combine a number of data files.
- debug Display information about the internals of coverage.py
- erase Erase previously collected coverage data.
- help Get help on using coverage.py.
- html Create an HTML report.
- json Create a JSON report of coverage results.
- report Report coverage stats on modules.
- run Run a Python program and measure code execution.
- xml Create an XML report of coverage results.
-
- Use "{program_name} help <command>" for detailed help on any command.
- """,
-
- 'minimum_help': """\
- Code coverage for Python, version {__version__} {extension_modifier}. Use '{program_name} help' for help.
- """,
-
- 'version': """\
- Coverage.py, version {__version__} {extension_modifier}
- """,
-}
-
-
-def main(argv=None):
- """The main entry point to coverage.py.
-
- This is installed as the script entry point.
-
- """
- if argv is None:
- argv = sys.argv[1:]
- try:
- status = CoverageScript().command_line(argv)
- except ExceptionDuringRun as err:
- # An exception was caught while running the product code. The
- # sys.exc_info() return tuple is packed into an ExceptionDuringRun
- # exception.
- traceback.print_exception(*err.args) # pylint: disable=no-value-for-parameter
- status = ERR
- except BaseCoverageException as err:
- # A controlled error inside coverage.py: print the message to the user.
- msg = err.args[0]
- if env.PY2:
- msg = msg.encode(output_encoding())
- print(msg)
- status = ERR
- except SystemExit as err:
- # The user called `sys.exit()`. Exit with their argument, if any.
- if err.args:
- status = err.args[0]
- else:
- status = None
- return status
-
-# Profiling using ox_profile. Install it from GitHub:
-# pip install git+https://github.com/emin63/ox_profile.git
-#
-# $set_env.py: COVERAGE_PROFILE - Set to use ox_profile.
-_profile = os.environ.get("COVERAGE_PROFILE", "")
-if _profile: # pragma: debugging
- from ox_profile.core.launchers import SimpleLauncher # pylint: disable=import-error
- original_main = main
-
- def main(argv=None): # pylint: disable=function-redefined
- """A wrapper around main that profiles."""
- profiler = SimpleLauncher.launch()
- try:
- return original_main(argv)
- finally:
- data, _ = profiler.query(re_filter='coverage', max_records=100)
- print(profiler.show(query=data, limit=100, sep='', col=''))
- profiler.cancel()
diff --git a/contrib/python/coverage/py3/coverage/collector.py b/contrib/python/coverage/py3/coverage/collector.py
deleted file mode 100644
index c42d29feec..0000000000
--- a/contrib/python/coverage/py3/coverage/collector.py
+++ /dev/null
@@ -1,455 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Raw data collector for coverage.py."""
-
-import os
-import sys
-
-from coverage import env
-from coverage.backward import litems, range # pylint: disable=redefined-builtin
-from coverage.debug import short_stack
-from coverage.disposition import FileDisposition
-from coverage.misc import CoverageException, isolate_module
-from coverage.pytracer import PyTracer
-
-os = isolate_module(os)
-
-
-try:
- # Use the C extension code when we can, for speed.
- from coverage.tracer import CTracer, CFileDisposition
-except ImportError:
- # Couldn't import the C extension, maybe it isn't built.
- if os.getenv('COVERAGE_TEST_TRACER') == 'c':
- # During testing, we use the COVERAGE_TEST_TRACER environment variable
- # to indicate that we've fiddled with the environment to test this
- # fallback code. If we thought we had a C tracer, but couldn't import
- # it, then exit quickly and clearly instead of dribbling confusing
- # errors. I'm using sys.exit here instead of an exception because an
- # exception here causes all sorts of other noise in unittest.
- sys.stderr.write("*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n")
- sys.exit(1)
- CTracer = None
-
-
-class Collector(object):
- """Collects trace data.
-
- Creates a Tracer object for each thread, since they track stack
- information. Each Tracer points to the same shared data, contributing
- traced data points.
-
- When the Collector is started, it creates a Tracer for the current thread,
- and installs a function to create Tracers for each new thread started.
- When the Collector is stopped, all active Tracers are stopped.
-
- Threads started while the Collector is stopped will never have Tracers
- associated with them.
-
- """
-
- # The stack of active Collectors. Collectors are added here when started,
- # and popped when stopped. Collectors on the stack are paused when not
- # the top, and resumed when they become the top again.
- _collectors = []
-
- # The concurrency settings we support here.
- SUPPORTED_CONCURRENCIES = {"greenlet", "eventlet", "gevent", "thread"}
-
- def __init__(
- self, should_trace, check_include, should_start_context, file_mapper,
- timid, branch, warn, concurrency,
- ):
- """Create a collector.
-
- `should_trace` is a function, taking a file name and a frame, and
- returning a `coverage.FileDisposition object`.
-
- `check_include` is a function taking a file name and a frame. It returns
- a boolean: True if the file should be traced, False if not.
-
- `should_start_context` is a function taking a frame, and returning a
- string. If the frame should be the start of a new context, the string
- is the new context. If the frame should not be the start of a new
- context, return None.
-
- `file_mapper` is a function taking a filename, and returning a Unicode
- filename. The result is the name that will be recorded in the data
- file.
-
- If `timid` is true, then a slower simpler trace function will be
- used. This is important for some environments where manipulation of
- tracing functions make the faster more sophisticated trace function not
- operate properly.
-
- If `branch` is true, then branches will be measured. This involves
- collecting data on which statements followed each other (arcs). Use
- `get_arc_data` to get the arc data.
-
- `warn` is a warning function, taking a single string message argument
- and an optional slug argument which will be a string or None, to be
- used if a warning needs to be issued.
-
- `concurrency` is a list of strings indicating the concurrency libraries
- in use. Valid values are "greenlet", "eventlet", "gevent", or "thread"
- (the default). Of these four values, only one can be supplied. Other
- values are ignored.
-
- """
- self.should_trace = should_trace
- self.check_include = check_include
- self.should_start_context = should_start_context
- self.file_mapper = file_mapper
- self.warn = warn
- self.branch = branch
- self.threading = None
- self.covdata = None
-
- self.static_context = None
-
- self.origin = short_stack()
-
- self.concur_id_func = None
- self.mapped_file_cache = {}
-
- # We can handle a few concurrency options here, but only one at a time.
- these_concurrencies = self.SUPPORTED_CONCURRENCIES.intersection(concurrency)
- if len(these_concurrencies) > 1:
- raise CoverageException("Conflicting concurrency settings: %s" % concurrency)
- self.concurrency = these_concurrencies.pop() if these_concurrencies else ''
-
- try:
- if self.concurrency == "greenlet":
- import greenlet
- self.concur_id_func = greenlet.getcurrent
- elif self.concurrency == "eventlet":
- import eventlet.greenthread # pylint: disable=import-error,useless-suppression
- self.concur_id_func = eventlet.greenthread.getcurrent
- elif self.concurrency == "gevent":
- import gevent # pylint: disable=import-error,useless-suppression
- self.concur_id_func = gevent.getcurrent
- elif self.concurrency == "thread" or not self.concurrency:
- # It's important to import threading only if we need it. If
- # it's imported early, and the program being measured uses
- # gevent, then gevent's monkey-patching won't work properly.
- import threading
- self.threading = threading
- else:
- raise CoverageException("Don't understand concurrency=%s" % concurrency)
- except ImportError:
- raise CoverageException(
- "Couldn't trace with concurrency=%s, the module isn't installed." % (
- self.concurrency,
- )
- )
-
- self.reset()
-
- if timid:
- # Being timid: use the simple Python trace function.
- self._trace_class = PyTracer
- else:
- # Being fast: use the C Tracer if it is available, else the Python
- # trace function.
- self._trace_class = CTracer or PyTracer
-
- if self._trace_class is CTracer:
- self.file_disposition_class = CFileDisposition
- self.supports_plugins = True
- else:
- self.file_disposition_class = FileDisposition
- self.supports_plugins = False
-
- def __repr__(self):
- return "<Collector at 0x%x: %s>" % (id(self), self.tracer_name())
-
- def use_data(self, covdata, context):
- """Use `covdata` for recording data."""
- self.covdata = covdata
- self.static_context = context
- self.covdata.set_context(self.static_context)
-
- def tracer_name(self):
- """Return the class name of the tracer we're using."""
- return self._trace_class.__name__
-
- def _clear_data(self):
- """Clear out existing data, but stay ready for more collection."""
- # We used to used self.data.clear(), but that would remove filename
- # keys and data values that were still in use higher up the stack
- # when we are called as part of switch_context.
- for d in self.data.values():
- d.clear()
-
- for tracer in self.tracers:
- tracer.reset_activity()
-
- def reset(self):
- """Clear collected data, and prepare to collect more."""
- # A dictionary mapping file names to dicts with line number keys (if not
- # branch coverage), or mapping file names to dicts with line number
- # pairs as keys (if branch coverage).
- self.data = {}
-
- # A dictionary mapping file names to file tracer plugin names that will
- # handle them.
- self.file_tracers = {}
-
- self.disabled_plugins = set()
-
- # The .should_trace_cache attribute is a cache from file names to
- # coverage.FileDisposition objects, or None. When a file is first
- # considered for tracing, a FileDisposition is obtained from
- # Coverage.should_trace. Its .trace attribute indicates whether the
- # file should be traced or not. If it should be, a plugin with dynamic
- # file names can decide not to trace it based on the dynamic file name
- # being excluded by the inclusion rules, in which case the
- # FileDisposition will be replaced by None in the cache.
- if env.PYPY:
- import __pypy__ # pylint: disable=import-error
- # Alex Gaynor said:
- # should_trace_cache is a strictly growing key: once a key is in
- # it, it never changes. Further, the keys used to access it are
- # generally constant, given sufficient context. That is to say, at
- # any given point _trace() is called, pypy is able to know the key.
- # This is because the key is determined by the physical source code
- # line, and that's invariant with the call site.
- #
- # This property of a dict with immutable keys, combined with
- # call-site-constant keys is a match for PyPy's module dict,
- # which is optimized for such workloads.
- #
- # This gives a 20% benefit on the workload described at
- # https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
- self.should_trace_cache = __pypy__.newdict("module")
- else:
- self.should_trace_cache = {}
-
- # Our active Tracers.
- self.tracers = []
-
- self._clear_data()
-
- def _start_tracer(self):
- """Start a new Tracer object, and store it in self.tracers."""
- tracer = self._trace_class()
- tracer.data = self.data
- tracer.trace_arcs = self.branch
- tracer.should_trace = self.should_trace
- tracer.should_trace_cache = self.should_trace_cache
- tracer.warn = self.warn
-
- if hasattr(tracer, 'concur_id_func'):
- tracer.concur_id_func = self.concur_id_func
- elif self.concur_id_func:
- raise CoverageException(
- "Can't support concurrency=%s with %s, only threads are supported" % (
- self.concurrency, self.tracer_name(),
- )
- )
-
- if hasattr(tracer, 'file_tracers'):
- tracer.file_tracers = self.file_tracers
- if hasattr(tracer, 'threading'):
- tracer.threading = self.threading
- if hasattr(tracer, 'check_include'):
- tracer.check_include = self.check_include
- if hasattr(tracer, 'should_start_context'):
- tracer.should_start_context = self.should_start_context
- tracer.switch_context = self.switch_context
- if hasattr(tracer, 'disable_plugin'):
- tracer.disable_plugin = self.disable_plugin
-
- fn = tracer.start()
- self.tracers.append(tracer)
-
- return fn
-
- # The trace function has to be set individually on each thread before
- # execution begins. Ironically, the only support the threading module has
- # for running code before the thread main is the tracing function. So we
- # install this as a trace function, and the first time it's called, it does
- # the real trace installation.
-
- def _installation_trace(self, frame, event, arg):
- """Called on new threads, installs the real tracer."""
- # Remove ourselves as the trace function.
- sys.settrace(None)
- # Install the real tracer.
- fn = self._start_tracer()
- # Invoke the real trace function with the current event, to be sure
- # not to lose an event.
- if fn:
- fn = fn(frame, event, arg)
- # Return the new trace function to continue tracing in this scope.
- return fn
-
- def start(self):
- """Start collecting trace information."""
- if self._collectors:
- self._collectors[-1].pause()
-
- self.tracers = []
-
- # Check to see whether we had a fullcoverage tracer installed. If so,
- # get the stack frames it stashed away for us.
- traces0 = []
- fn0 = sys.gettrace()
- if fn0:
- tracer0 = getattr(fn0, '__self__', None)
- if tracer0:
- traces0 = getattr(tracer0, 'traces', [])
-
- try:
- # Install the tracer on this thread.
- fn = self._start_tracer()
- except:
- if self._collectors:
- self._collectors[-1].resume()
- raise
-
- # If _start_tracer succeeded, then we add ourselves to the global
- # stack of collectors.
- self._collectors.append(self)
-
- # Replay all the events from fullcoverage into the new trace function.
- for args in traces0:
- (frame, event, arg), lineno = args
- try:
- fn(frame, event, arg, lineno=lineno)
- except TypeError:
- raise Exception("fullcoverage must be run with the C trace function.")
-
- # Install our installation tracer in threading, to jump-start other
- # threads.
- if self.threading:
- self.threading.settrace(self._installation_trace)
-
- def stop(self):
- """Stop collecting trace information."""
- assert self._collectors
- if self._collectors[-1] is not self:
- print("self._collectors:")
- for c in self._collectors:
- print(" {!r}\n{}".format(c, c.origin))
- assert self._collectors[-1] is self, (
- "Expected current collector to be %r, but it's %r" % (self, self._collectors[-1])
- )
-
- self.pause()
-
- # Remove this Collector from the stack, and resume the one underneath
- # (if any).
- self._collectors.pop()
- if self._collectors:
- self._collectors[-1].resume()
-
- def pause(self):
- """Pause tracing, but be prepared to `resume`."""
- for tracer in self.tracers:
- tracer.stop()
- stats = tracer.get_stats()
- if stats:
- print("\nCoverage.py tracer stats:")
- for k in sorted(stats.keys()):
- print("%20s: %s" % (k, stats[k]))
- if self.threading:
- self.threading.settrace(None)
-
- def resume(self):
- """Resume tracing after a `pause`."""
- for tracer in self.tracers:
- tracer.start()
- if self.threading:
- self.threading.settrace(self._installation_trace)
- else:
- self._start_tracer()
-
- def _activity(self):
- """Has any activity been traced?
-
- Returns a boolean, True if any trace function was invoked.
-
- """
- return any(tracer.activity() for tracer in self.tracers)
-
- def switch_context(self, new_context):
- """Switch to a new dynamic context."""
- self.flush_data()
- if self.static_context:
- context = self.static_context
- if new_context:
- context += "|" + new_context
- else:
- context = new_context
- self.covdata.set_context(context)
-
- def disable_plugin(self, disposition):
- """Disable the plugin mentioned in `disposition`."""
- file_tracer = disposition.file_tracer
- plugin = file_tracer._coverage_plugin
- plugin_name = plugin._coverage_plugin_name
- self.warn("Disabling plug-in {!r} due to previous exception".format(plugin_name))
- plugin._coverage_enabled = False
- disposition.trace = False
-
- def cached_mapped_file(self, filename):
- """A locally cached version of file names mapped through file_mapper."""
- key = (type(filename), filename)
- try:
- return self.mapped_file_cache[key]
- except KeyError:
- return self.mapped_file_cache.setdefault(key, self.file_mapper(filename))
-
- def mapped_file_dict(self, d):
- """Return a dict like d, but with keys modified by file_mapper."""
- # The call to litems() ensures that the GIL protects the dictionary
- # iterator against concurrent modifications by tracers running
- # in other threads. We try three times in case of concurrent
- # access, hoping to get a clean copy.
- runtime_err = None
- for _ in range(3):
- try:
- items = litems(d)
- except RuntimeError as ex:
- runtime_err = ex
- else:
- break
- else:
- raise runtime_err
-
- if getattr(sys, 'is_standalone_binary', False):
- # filenames should stay relative to the arcadia root, because files may not exist
- return dict((k, v) for k, v in items if v)
-
- return dict((self.cached_mapped_file(k), v) for k, v in items if v)
-
- def plugin_was_disabled(self, plugin):
- """Record that `plugin` was disabled during the run."""
- self.disabled_plugins.add(plugin._coverage_plugin_name)
-
- def flush_data(self):
- """Save the collected data to our associated `CoverageData`.
-
- Data may have also been saved along the way. This forces the
- last of the data to be saved.
-
- Returns True if there was data to save, False if not.
- """
- if not self._activity():
- return False
-
- if self.branch:
- self.covdata.add_arcs(self.mapped_file_dict(self.data))
- else:
- self.covdata.add_lines(self.mapped_file_dict(self.data))
-
- file_tracers = {
- k: v for k, v in self.file_tracers.items()
- if v not in self.disabled_plugins
- }
- self.covdata.add_file_tracers(self.mapped_file_dict(file_tracers))
-
- self._clear_data()
- return True
diff --git a/contrib/python/coverage/py3/coverage/config.py b/contrib/python/coverage/py3/coverage/config.py
deleted file mode 100644
index ceb7201b65..0000000000
--- a/contrib/python/coverage/py3/coverage/config.py
+++ /dev/null
@@ -1,605 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Config file for coverage.py"""
-
-import collections
-import copy
-import os
-import os.path
-import re
-import sys
-
-from coverage import env
-from coverage.backward import configparser, iitems, string_class
-from coverage.misc import contract, CoverageException, isolate_module
-from coverage.misc import substitute_variables
-
-from coverage.tomlconfig import TomlConfigParser, TomlDecodeError
-
-os = isolate_module(os)
-
-
-class HandyConfigParser(configparser.RawConfigParser):
- """Our specialization of ConfigParser."""
-
- def __init__(self, our_file):
- """Create the HandyConfigParser.
-
- `our_file` is True if this config file is specifically for coverage,
- False if we are examining another config file (tox.ini, setup.cfg)
- for possible settings.
- """
-
- configparser.RawConfigParser.__init__(self)
- self.section_prefixes = ["coverage:"]
- if our_file:
- self.section_prefixes.append("")
-
- def read(self, filenames, encoding=None):
- """Read a file name as UTF-8 configuration data."""
- kwargs = {}
- if env.PYVERSION >= (3, 2):
- kwargs['encoding'] = encoding or "utf-8"
- return configparser.RawConfigParser.read(self, filenames, **kwargs)
-
- def has_option(self, section, option):
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- has = configparser.RawConfigParser.has_option(self, real_section, option)
- if has:
- return has
- return False
-
- def has_section(self, section):
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- has = configparser.RawConfigParser.has_section(self, real_section)
- if has:
- return real_section
- return False
-
- def options(self, section):
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- if configparser.RawConfigParser.has_section(self, real_section):
- return configparser.RawConfigParser.options(self, real_section)
- raise configparser.NoSectionError(section)
-
- def get_section(self, section):
- """Get the contents of a section, as a dictionary."""
- d = {}
- for opt in self.options(section):
- d[opt] = self.get(section, opt)
- return d
-
- def get(self, section, option, *args, **kwargs):
- """Get a value, replacing environment variables also.
-
- The arguments are the same as `RawConfigParser.get`, but in the found
- value, ``$WORD`` or ``${WORD}`` are replaced by the value of the
- environment variable ``WORD``.
-
- Returns the finished value.
-
- """
- for section_prefix in self.section_prefixes:
- real_section = section_prefix + section
- if configparser.RawConfigParser.has_option(self, real_section, option):
- break
- else:
- raise configparser.NoOptionError(option, section)
-
- v = configparser.RawConfigParser.get(self, real_section, option, *args, **kwargs)
- v = substitute_variables(v, os.environ)
- return v
-
- def getlist(self, section, option):
- """Read a list of strings.
-
- The value of `section` and `option` is treated as a comma- and newline-
- separated list of strings. Each value is stripped of whitespace.
-
- Returns the list of strings.
-
- """
- value_list = self.get(section, option)
- values = []
- for value_line in value_list.split('\n'):
- for value in value_line.split(','):
- value = value.strip()
- if value:
- values.append(value)
- return values
-
- def getregexlist(self, section, option):
- """Read a list of full-line regexes.
-
- The value of `section` and `option` is treated as a newline-separated
- list of regexes. Each value is stripped of whitespace.
-
- Returns the list of strings.
-
- """
- line_list = self.get(section, option)
- value_list = []
- for value in line_list.splitlines():
- value = value.strip()
- try:
- re.compile(value)
- except re.error as e:
- raise CoverageException(
- "Invalid [%s].%s value %r: %s" % (section, option, value, e)
- )
- if value:
- value_list.append(value)
- return value_list
-
-
-# The default line exclusion regexes.
-DEFAULT_EXCLUDE = [
- r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(cover|COVER)',
-]
-
-# The default partial branch regexes, to be modified by the user.
-DEFAULT_PARTIAL = [
- r'#\s*(pragma|PRAGMA)[:\s]?\s*(no|NO)\s*(branch|BRANCH)',
-]
-
-# The default partial branch regexes, based on Python semantics.
-# These are any Python branching constructs that can't actually execute all
-# their branches.
-DEFAULT_PARTIAL_ALWAYS = [
- 'while (True|1|False|0):',
- 'if (True|1|False|0):',
-]
-
-
-class CoverageConfig(object):
- """Coverage.py configuration.
-
- The attributes of this class are the various settings that control the
- operation of coverage.py.
-
- """
- # pylint: disable=too-many-instance-attributes
-
- def __init__(self):
- """Initialize the configuration attributes to their defaults."""
- # Metadata about the config.
- # We tried to read these config files.
- self.attempted_config_files = []
- # We did read these config files, but maybe didn't find any content for us.
- self.config_files_read = []
- # The file that gave us our configuration.
- self.config_file = None
- self._config_contents = None
-
- # Defaults for [run] and [report]
- self._include = None
- self._omit = None
-
- # Defaults for [run]
- self.branch = False
- self.command_line = None
- self.concurrency = None
- self.context = None
- self.cover_pylib = False
- self.data_file = ".coverage"
- self.debug = []
- self.disable_warnings = []
- self.dynamic_context = None
- self.note = None
- self.parallel = False
- self.plugins = []
- self.relative_files = False
- self.run_include = None
- self.run_omit = None
- self.source = None
- self.source_pkgs = []
- self.timid = False
- self._crash = None
-
- # Defaults for [report]
- self.exclude_list = DEFAULT_EXCLUDE[:]
- self.fail_under = 0.0
- self.ignore_errors = False
- self.report_include = None
- self.report_omit = None
- self.partial_always_list = DEFAULT_PARTIAL_ALWAYS[:]
- self.partial_list = DEFAULT_PARTIAL[:]
- self.precision = 0
- self.report_contexts = None
- self.show_missing = False
- self.skip_covered = False
- self.skip_empty = False
- self.sort = None
-
- # Defaults for [html]
- self.extra_css = None
- self.html_dir = "htmlcov"
- self.html_skip_covered = None
- self.html_skip_empty = None
- self.html_title = "Coverage report"
- self.show_contexts = False
-
- # Defaults for [xml]
- self.xml_output = "coverage.xml"
- self.xml_package_depth = 99
-
- # Defaults for [json]
- self.json_output = "coverage.json"
- self.json_pretty_print = False
- self.json_show_contexts = False
-
- # Defaults for [paths]
- self.paths = collections.OrderedDict()
-
- # Options for plugins
- self.plugin_options = {}
- self.suppress_plugin_errors = True
-
- MUST_BE_LIST = [
- "debug", "concurrency", "plugins",
- "report_omit", "report_include",
- "run_omit", "run_include",
- ]
-
- def from_args(self, **kwargs):
- """Read config values from `kwargs`."""
- for k, v in iitems(kwargs):
- if v is not None:
- if k in self.MUST_BE_LIST and isinstance(v, string_class):
- v = [v]
- setattr(self, k, v)
-
- def from_resource(self, resource_name):
- assert getattr(sys, 'is_standalone_binary', False), 'You have used method by mistake in script, not binary'
- cp, self._config_contents = _load_config_from_resource(resource_name)
- return self._parse_config(cp, resource_name, True)
-
- @contract(filename=str)
- def from_file(self, filename, our_file):
- """Read configuration from a .rc file.
-
- `filename` is a file name to read.
-
- `our_file` is True if this config file is specifically for coverage,
- False if we are examining another config file (tox.ini, setup.cfg)
- for possible settings.
-
- Returns True or False, whether the file could be read, and it had some
- coverage.py settings in it.
-
- """
- _, ext = os.path.splitext(filename)
- if ext == '.toml':
- cp = TomlConfigParser(our_file)
- else:
- cp = HandyConfigParser(our_file)
-
- self.attempted_config_files.append(filename)
-
- try:
- files_read = cp.read(filename)
- except (configparser.Error, TomlDecodeError) as err:
- raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
- if not files_read:
- return False
-
- self.config_files_read.extend(map(os.path.abspath, files_read))
-
- return self._parse_config(cp, filename, our_file)
-
- def _parse_config(self, cp, filename, our_file):
- any_set = False
- try:
- for option_spec in self.CONFIG_FILE_OPTIONS:
- was_set = self._set_attr_from_config_option(cp, *option_spec)
- if was_set:
- any_set = True
- except ValueError as err:
- raise CoverageException("Couldn't read config file %s: %s" % (filename, err))
-
- # Check that there are no unrecognized options.
- all_options = collections.defaultdict(set)
- for option_spec in self.CONFIG_FILE_OPTIONS:
- section, option = option_spec[1].split(":")
- all_options[section].add(option)
-
- for section, options in iitems(all_options):
- real_section = cp.has_section(section)
- if real_section:
- for unknown in set(cp.options(section)) - options:
- raise CoverageException(
- "Unrecognized option '[%s] %s=' in config file %s" % (
- real_section, unknown, filename
- )
- )
-
- # [paths] is special
- if cp.has_section('paths'):
- for option in cp.options('paths'):
- self.paths[option] = cp.getlist('paths', option)
- any_set = True
-
- # plugins can have options
- for plugin in self.plugins:
- if cp.has_section(plugin):
- self.plugin_options[plugin] = cp.get_section(plugin)
- any_set = True
-
- # Was this file used as a config file? If it's specifically our file,
- # then it was used. If we're piggybacking on someone else's file,
- # then it was only used if we found some settings in it.
- if our_file:
- used = True
- else:
- used = any_set
-
- if used:
- self.config_file = os.path.abspath(filename)
- if not getattr(sys, 'is_standalone_binary', False):
- with open(filename, "rb") as f:
- self._config_contents = f.read()
-
- return used
-
- def copy(self):
- """Return a copy of the configuration."""
- return copy.deepcopy(self)
-
- CONFIG_FILE_OPTIONS = [
- # These are *args for _set_attr_from_config_option:
- # (attr, where, type_="")
- #
- # attr is the attribute to set on the CoverageConfig object.
- # where is the section:name to read from the configuration file.
- # type_ is the optional type to apply, by using .getTYPE to read the
- # configuration value from the file.
-
- # [run]
- ('branch', 'run:branch', 'boolean'),
- ('command_line', 'run:command_line'),
- ('concurrency', 'run:concurrency', 'list'),
- ('context', 'run:context'),
- ('cover_pylib', 'run:cover_pylib', 'boolean'),
- ('data_file', 'run:data_file'),
- ('debug', 'run:debug', 'list'),
- ('disable_warnings', 'run:disable_warnings', 'list'),
- ('dynamic_context', 'run:dynamic_context'),
- ('note', 'run:note'),
- ('parallel', 'run:parallel', 'boolean'),
- ('plugins', 'run:plugins', 'list'),
- ('relative_files', 'run:relative_files', 'boolean'),
- ('run_include', 'run:include', 'list'),
- ('run_omit', 'run:omit', 'list'),
- ('source', 'run:source', 'list'),
- ('source_pkgs', 'run:source_pkgs', 'list'),
- ('timid', 'run:timid', 'boolean'),
- ('_crash', 'run:_crash'),
- ('suppress_plugin_errors', 'run:suppress_plugin_errors', 'boolean'),
-
- # [report]
- ('exclude_list', 'report:exclude_lines', 'regexlist'),
- ('fail_under', 'report:fail_under', 'float'),
- ('ignore_errors', 'report:ignore_errors', 'boolean'),
- ('partial_always_list', 'report:partial_branches_always', 'regexlist'),
- ('partial_list', 'report:partial_branches', 'regexlist'),
- ('precision', 'report:precision', 'int'),
- ('report_contexts', 'report:contexts', 'list'),
- ('report_include', 'report:include', 'list'),
- ('report_omit', 'report:omit', 'list'),
- ('show_missing', 'report:show_missing', 'boolean'),
- ('skip_covered', 'report:skip_covered', 'boolean'),
- ('skip_empty', 'report:skip_empty', 'boolean'),
- ('sort', 'report:sort'),
-
- # [html]
- ('extra_css', 'html:extra_css'),
- ('html_dir', 'html:directory'),
- ('html_skip_covered', 'html:skip_covered', 'boolean'),
- ('html_skip_empty', 'html:skip_empty', 'boolean'),
- ('html_title', 'html:title'),
- ('show_contexts', 'html:show_contexts', 'boolean'),
-
- # [xml]
- ('xml_output', 'xml:output'),
- ('xml_package_depth', 'xml:package_depth', 'int'),
-
- # [json]
- ('json_output', 'json:output'),
- ('json_pretty_print', 'json:pretty_print', 'boolean'),
- ('json_show_contexts', 'json:show_contexts', 'boolean'),
- ]
-
- def _set_attr_from_config_option(self, cp, attr, where, type_=''):
- """Set an attribute on self if it exists in the ConfigParser.
-
- Returns True if the attribute was set.
-
- """
- section, option = where.split(":")
- if cp.has_option(section, option):
- method = getattr(cp, 'get' + type_)
- setattr(self, attr, method(section, option))
- return True
- return False
-
- def get_plugin_options(self, plugin):
- """Get a dictionary of options for the plugin named `plugin`."""
- return self.plugin_options.get(plugin, {})
-
- def set_option(self, option_name, value):
- """Set an option in the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with `"run:branch"`.
-
- `value` is the new value for the option.
-
- """
- # Special-cased options.
- if option_name == "paths":
- self.paths = value
- return
-
- # Check all the hard-coded options.
- for option_spec in self.CONFIG_FILE_OPTIONS:
- attr, where = option_spec[:2]
- if where == option_name:
- setattr(self, attr, value)
- return
-
- # See if it's a plugin option.
- plugin_name, _, key = option_name.partition(":")
- if key and plugin_name in self.plugins:
- self.plugin_options.setdefault(plugin_name, {})[key] = value
- return
-
- # If we get here, we didn't find the option.
- raise CoverageException("No such option: %r" % option_name)
-
- def get_option(self, option_name):
- """Get an option from the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with `"run:branch"`.
-
- Returns the value of the option.
-
- """
- # Special-cased options.
- if option_name == "paths":
- return self.paths
-
- # Check all the hard-coded options.
- for option_spec in self.CONFIG_FILE_OPTIONS:
- attr, where = option_spec[:2]
- if where == option_name:
- return getattr(self, attr)
-
- # See if it's a plugin option.
- plugin_name, _, key = option_name.partition(":")
- if key and plugin_name in self.plugins:
- return self.plugin_options.get(plugin_name, {}).get(key)
-
- # If we get here, we didn't find the option.
- raise CoverageException("No such option: %r" % option_name)
-
- def post_process_file(self, path):
- """Make final adjustments to a file path to make it usable."""
- return os.path.expanduser(path)
-
- def post_process(self):
- """Make final adjustments to settings to make them usable."""
- self.data_file = self.post_process_file(self.data_file)
- self.html_dir = self.post_process_file(self.html_dir)
- self.xml_output = self.post_process_file(self.xml_output)
- self.paths = collections.OrderedDict(
- (k, [self.post_process_file(f) for f in v])
- for k, v in self.paths.items()
- )
-
-
-def config_files_to_try(config_file):
- """What config files should we try to read?
-
- Returns a list of tuples:
- (filename, is_our_file, was_file_specified)
- """
-
- # Some API users were specifying ".coveragerc" to mean the same as
- # True, so make it so.
- if config_file == ".coveragerc":
- config_file = True
- specified_file = (config_file is not True)
- if not specified_file:
- # No file was specified. Check COVERAGE_RCFILE.
- config_file = os.environ.get('COVERAGE_RCFILE')
- if config_file:
- specified_file = True
- if not specified_file:
- # Still no file specified. Default to .coveragerc
- config_file = ".coveragerc"
- files_to_try = [
- (config_file, True, specified_file),
- ("setup.cfg", False, False),
- ("tox.ini", False, False),
- ("pyproject.toml", False, False),
- ]
- return files_to_try
-
-
-def read_coverage_config(config_file, **kwargs):
- """Read the coverage.py configuration.
-
- Arguments:
- config_file: a boolean or string, see the `Coverage` class for the
- tricky details.
- all others: keyword arguments from the `Coverage` class, used for
- setting values in the configuration.
-
- Returns:
- config:
- config is a CoverageConfig object read from the appropriate
- configuration file.
-
- """
- # Build the configuration from a number of sources:
- # 1) defaults:
- config = CoverageConfig()
-
- # 1.1 built-in config
- if getattr(sys, 'is_standalone_binary', False):
- config.from_resource("/coverage_plugins/coveragerc.txt")
-
- # 2) from a file:
- if config_file:
- files_to_try = config_files_to_try(config_file)
-
- for fname, our_file, specified_file in files_to_try:
- if getattr(sys, 'is_standalone_binary', False) and fname == "/coverage_plugins/coveragerc.txt":
- continue
- config_read = config.from_file(fname, our_file=our_file)
- if config_read:
- break
- if specified_file:
- raise CoverageException("Couldn't read '%s' as a config file" % fname)
-
- # $set_env.py: COVERAGE_DEBUG - Options for --debug.
- # 3) from environment variables:
- env_data_file = os.environ.get('COVERAGE_FILE')
- if env_data_file:
- config.data_file = env_data_file
- debugs = os.environ.get('COVERAGE_DEBUG')
- if debugs:
- config.debug.extend(d.strip() for d in debugs.split(","))
-
- # 4) from constructor arguments:
- config.from_args(**kwargs)
-
- # Once all the config has been collected, there's a little post-processing
- # to do.
- config.post_process()
-
- return config
-
-
-def _load_config_from_resource(resource_name):
- from io import StringIO
- from library.python import resource
-
- config_data = resource.find(resource_name)
- if config_data is None:
- raise IOError("No such resource: " + resource_name)
-
- config_data = config_data.decode('utf-8')
- cp = HandyConfigParser(True)
- try:
- cp.readfp(StringIO(config_data))
- except configparser.Error as err:
- raise CoverageException("Couldn't read config %s: %s" % (resource_name, err))
- return cp, config_data
diff --git a/contrib/python/coverage/py3/coverage/context.py b/contrib/python/coverage/py3/coverage/context.py
deleted file mode 100644
index ea13da21ed..0000000000
--- a/contrib/python/coverage/py3/coverage/context.py
+++ /dev/null
@@ -1,91 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Determine contexts for coverage.py"""
-
-
-def combine_context_switchers(context_switchers):
- """Create a single context switcher from multiple switchers.
-
- `context_switchers` is a list of functions that take a frame as an
- argument and return a string to use as the new context label.
-
- Returns a function that composites `context_switchers` functions, or None
- if `context_switchers` is an empty list.
-
- When invoked, the combined switcher calls `context_switchers` one-by-one
- until a string is returned. The combined switcher returns None if all
- `context_switchers` return None.
- """
- if not context_switchers:
- return None
-
- if len(context_switchers) == 1:
- return context_switchers[0]
-
- def should_start_context(frame):
- """The combiner for multiple context switchers."""
- for switcher in context_switchers:
- new_context = switcher(frame)
- if new_context is not None:
- return new_context
- return None
-
- return should_start_context
-
-
-def should_start_context_test_function(frame):
- """Is this frame calling a test_* function?"""
- co_name = frame.f_code.co_name
- if co_name.startswith("test") or co_name == "runTest":
- return qualname_from_frame(frame)
- return None
-
-
-def qualname_from_frame(frame):
- """Get a qualified name for the code running in `frame`."""
- co = frame.f_code
- fname = co.co_name
- method = None
- if co.co_argcount and co.co_varnames[0] == "self":
- self = frame.f_locals["self"]
- method = getattr(self, fname, None)
-
- if method is None:
- func = frame.f_globals.get(fname)
- if func is None:
- return None
- return func.__module__ + '.' + fname
-
- func = getattr(method, '__func__', None)
- if func is None:
- cls = self.__class__
- return cls.__module__ + '.' + cls.__name__ + "." + fname
-
- if hasattr(func, '__qualname__'):
- qname = func.__module__ + '.' + func.__qualname__
- else:
- for cls in getattr(self.__class__, '__mro__', ()):
- f = cls.__dict__.get(fname, None)
- if f is None:
- continue
- if f is func:
- qname = cls.__module__ + '.' + cls.__name__ + "." + fname
- break
- else:
- # Support for old-style classes.
- def mro(bases):
- for base in bases:
- f = base.__dict__.get(fname, None)
- if f is func:
- return base.__module__ + '.' + base.__name__ + "." + fname
- for base in bases:
- qname = mro(base.__bases__)
- if qname is not None:
- return qname
- return None
- qname = mro([self.__class__])
- if qname is None:
- qname = func.__module__ + '.' + fname
-
- return qname
diff --git a/contrib/python/coverage/py3/coverage/control.py b/contrib/python/coverage/py3/coverage/control.py
deleted file mode 100644
index 605b50c26b..0000000000
--- a/contrib/python/coverage/py3/coverage/control.py
+++ /dev/null
@@ -1,1162 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Core control stuff for coverage.py."""
-
-import atexit
-import collections
-import contextlib
-import os
-import os.path
-import platform
-import sys
-import time
-import json
-
-from coverage import env
-from coverage.annotate import AnnotateReporter
-from coverage.backward import string_class, iitems
-from coverage.collector import Collector, CTracer
-from coverage.config import read_coverage_config
-from coverage.context import should_start_context_test_function, combine_context_switchers
-from coverage.data import CoverageData, combine_parallel_data
-from coverage.debug import DebugControl, short_stack, write_formatted_info
-from coverage.disposition import disposition_debug_msg
-from coverage.files import PathAliases, abs_file, canonical_filename, relative_filename, set_relative_directory
-from coverage.html import HtmlReporter
-from coverage.inorout import InOrOut
-from coverage.jsonreport import JsonReporter
-from coverage.misc import CoverageException, bool_or_none, join_regex
-from coverage.misc import DefaultValue, ensure_dir_for_file, isolate_module
-from coverage.plugin import FileReporter
-from coverage.plugin_support import Plugins
-from coverage.python import PythonFileReporter
-from coverage.report import render_report
-from coverage.results import Analysis, Numbers
-from coverage.summary import SummaryReporter
-from coverage.xmlreport import XmlReporter
-
-try:
- from coverage.multiproc import patch_multiprocessing
-except ImportError: # pragma: only jython
- # Jython has no multiprocessing module.
- patch_multiprocessing = None
-
-os = isolate_module(os)
-
-@contextlib.contextmanager
-def override_config(cov, **kwargs):
- """Temporarily tweak the configuration of `cov`.
-
- The arguments are applied to `cov.config` with the `from_args` method.
- At the end of the with-statement, the old configuration is restored.
- """
- original_config = cov.config
- cov.config = cov.config.copy()
- try:
- cov.config.from_args(**kwargs)
- yield
- finally:
- cov.config = original_config
-
-
-_DEFAULT_DATAFILE = DefaultValue("MISSING")
-
-class Coverage(object):
- """Programmatic access to coverage.py.
-
- To use::
-
- from coverage import Coverage
-
- cov = Coverage()
- cov.start()
- #.. call your code ..
- cov.stop()
- cov.html_report(directory='covhtml')
-
- Note: in keeping with Python custom, names starting with underscore are
- not part of the public API. They might stop working at any point. Please
- limit yourself to documented methods to avoid problems.
-
- """
-
- # The stack of started Coverage instances.
- _instances = []
-
- @classmethod
- def current(cls):
- """Get the latest started `Coverage` instance, if any.
-
- Returns: a `Coverage` instance, or None.
-
- .. versionadded:: 5.0
-
- """
- if cls._instances:
- return cls._instances[-1]
- else:
- return None
-
- def __init__(
- self, data_file=_DEFAULT_DATAFILE, data_suffix=None, cover_pylib=None,
- auto_data=False, timid=None, branch=None, config_file=True,
- source=None, source_pkgs=None, omit=None, include=None, debug=None,
- concurrency=None, check_preimported=False, context=None,
- ): # pylint: disable=too-many-arguments
- """
- Many of these arguments duplicate and override values that can be
- provided in a configuration file. Parameters that are missing here
- will use values from the config file.
-
- `data_file` is the base name of the data file to use. The config value
- defaults to ".coverage". None can be provided to prevent writing a data
- file. `data_suffix` is appended (with a dot) to `data_file` to create
- the final file name. If `data_suffix` is simply True, then a suffix is
- created with the machine and process identity included.
-
- `cover_pylib` is a boolean determining whether Python code installed
- with the Python interpreter is measured. This includes the Python
- standard library and any packages installed with the interpreter.
-
- If `auto_data` is true, then any existing data file will be read when
- coverage measurement starts, and data will be saved automatically when
- measurement stops.
-
- If `timid` is true, then a slower and simpler trace function will be
- used. This is important for some environments where manipulation of
- tracing functions breaks the faster trace function.
-
- If `branch` is true, then branch coverage will be measured in addition
- to the usual statement coverage.
-
- `config_file` determines what configuration file to read:
-
- * If it is ".coveragerc", it is interpreted as if it were True,
- for backward compatibility.
-
- * If it is a string, it is the name of the file to read. If the
- file can't be read, it is an error.
-
- * If it is True, then a few standard files names are tried
- (".coveragerc", "setup.cfg", "tox.ini"). It is not an error for
- these files to not be found.
-
- * If it is False, then no configuration file is read.
-
- `source` is a list of file paths or package names. Only code located
- in the trees indicated by the file paths or package names will be
- measured.
-
- `source_pkgs` is a list of package names. It works the same as
- `source`, but can be used to name packages where the name can also be
- interpreted as a file path.
-
- `include` and `omit` are lists of file name patterns. Files that match
- `include` will be measured, files that match `omit` will not. Each
- will also accept a single string argument.
-
- `debug` is a list of strings indicating what debugging information is
- desired.
-
- `concurrency` is a string indicating the concurrency library being used
- in the measured code. Without this, coverage.py will get incorrect
- results if these libraries are in use. Valid strings are "greenlet",
- "eventlet", "gevent", "multiprocessing", or "thread" (the default).
- This can also be a list of these strings.
-
- If `check_preimported` is true, then when coverage is started, the
- already-imported files will be checked to see if they should be
- measured by coverage. Importing measured files before coverage is
- started can mean that code is missed.
-
- `context` is a string to use as the :ref:`static context
- <static_contexts>` label for collected data.
-
- .. versionadded:: 4.0
- The `concurrency` parameter.
-
- .. versionadded:: 4.2
- The `concurrency` parameter can now be a list of strings.
-
- .. versionadded:: 5.0
- The `check_preimported` and `context` parameters.
-
- .. versionadded:: 5.3
- The `source_pkgs` parameter.
-
- """
- # data_file=None means no disk file at all. data_file missing means
- # use the value from the config file.
- self._no_disk = data_file is None
- if data_file is _DEFAULT_DATAFILE:
- data_file = None
-
- # Build our configuration from a number of sources.
- self.config = read_coverage_config(
- config_file=config_file,
- data_file=data_file, cover_pylib=cover_pylib, timid=timid,
- branch=branch, parallel=bool_or_none(data_suffix),
- source=source, source_pkgs=source_pkgs, run_omit=omit, run_include=include, debug=debug,
- report_omit=omit, report_include=include,
- concurrency=concurrency, context=context,
- )
-
- # This is injectable by tests.
- self._debug_file = None
-
- self._auto_load = self._auto_save = auto_data
- self._data_suffix_specified = data_suffix
-
- # Is it ok for no data to be collected?
- self._warn_no_data = True
- self._warn_unimported_source = True
- self._warn_preimported_source = check_preimported
- self._no_warn_slugs = None
-
- # A record of all the warnings that have been issued.
- self._warnings = []
-
- # Other instance attributes, set later.
- self._data = self._collector = None
- self._plugins = None
- self._inorout = None
- self._data_suffix = self._run_suffix = None
- self._exclude_re = None
- self._debug = None
- self._file_mapper = None
-
- # State machine variables:
- # Have we initialized everything?
- self._inited = False
- self._inited_for_start = False
- # Have we started collecting and not stopped it?
- self._started = False
- # Should we write the debug output?
- self._should_write_debug = True
-
- # If we have sub-process measurement happening automatically, then we
- # want any explicit creation of a Coverage object to mean, this process
- # is already coverage-aware, so don't auto-measure it. By now, the
- # auto-creation of a Coverage object has already happened. But we can
- # find it and tell it not to save its data.
- if not env.METACOV:
- _prevent_sub_process_measurement()
-
- # Store constructor args to reproduce Coverage object in a subprocess created via multiprocessing.Process
- self._dumped_args = json.dumps(dict(
- data_file=data_file, data_suffix=data_suffix, cover_pylib=cover_pylib,
- auto_data=auto_data, timid=timid, branch=branch, config_file=config_file,
- source=source, omit=omit, include=include, debug=debug,
- concurrency=concurrency
- ))
-
- def _init(self):
- """Set all the initial state.
-
- This is called by the public methods to initialize state. This lets us
- construct a :class:`Coverage` object, then tweak its state before this
- function is called.
-
- """
- if self._inited:
- return
-
- self._inited = True
-
- # Create and configure the debugging controller. COVERAGE_DEBUG_FILE
- # is an environment variable, the name of a file to append debug logs
- # to.
- self._debug = DebugControl(self.config.debug, self._debug_file)
-
- if "multiprocessing" in (self.config.concurrency or ()):
- # Multi-processing uses parallel for the subprocesses, so also use
- # it for the main process.
- self.config.parallel = True
-
- # _exclude_re is a dict that maps exclusion list names to compiled regexes.
- self._exclude_re = {}
-
- set_relative_directory()
-
- if getattr(sys, 'is_standalone_binary', False):
- self._file_mapper = canonical_filename
- else:
- self._file_mapper = relative_filename if self.config.relative_files else abs_file
-
- # Load plugins
- self._plugins = Plugins.load_plugins(self.config.plugins, self.config, self._debug)
-
- # Run configuring plugins.
- for plugin in self._plugins.configurers:
- # We need an object with set_option and get_option. Either self or
- # self.config will do. Choosing randomly stops people from doing
- # other things with those objects, against the public API. Yes,
- # this is a bit childish. :)
- plugin.configure([self, self.config][int(time.time()) % 2])
-
- def _post_init(self):
- """Stuff to do after everything is initialized."""
- if self._should_write_debug:
- self._should_write_debug = False
- self._write_startup_debug()
-
- # '[run] _crash' will raise an exception if the value is close by in
- # the call stack, for testing error handling.
- if self.config._crash and self.config._crash in short_stack(limit=4):
- raise Exception("Crashing because called by {}".format(self.config._crash))
-
- def _write_startup_debug(self):
- """Write out debug info at startup if needed."""
- wrote_any = False
- with self._debug.without_callers():
- if self._debug.should('config'):
- config_info = sorted(self.config.__dict__.items())
- config_info = [(k, v) for k, v in config_info if not k.startswith('_')]
- write_formatted_info(self._debug, "config", config_info)
- wrote_any = True
-
- if self._debug.should('sys'):
- write_formatted_info(self._debug, "sys", self.sys_info())
- for plugin in self._plugins:
- header = "sys: " + plugin._coverage_plugin_name
- info = plugin.sys_info()
- write_formatted_info(self._debug, header, info)
- wrote_any = True
-
- if wrote_any:
- write_formatted_info(self._debug, "end", ())
-
- def _should_trace(self, filename, frame):
- """Decide whether to trace execution in `filename`.
-
- Calls `_should_trace_internal`, and returns the FileDisposition.
-
- """
- disp = self._inorout.should_trace(filename, frame)
- if self._debug.should('trace'):
- self._debug.write(disposition_debug_msg(disp))
- return disp
-
- def _check_include_omit_etc(self, filename, frame):
- """Check a file name against the include/omit/etc, rules, verbosely.
-
- Returns a boolean: True if the file should be traced, False if not.
-
- """
- reason = self._inorout.check_include_omit_etc(filename, frame)
- if self._debug.should('trace'):
- if not reason:
- msg = "Including %r" % (filename,)
- else:
- msg = "Not including %r: %s" % (filename, reason)
- self._debug.write(msg)
-
- return not reason
-
- def _warn(self, msg, slug=None, once=False):
- """Use `msg` as a warning.
-
- For warning suppression, use `slug` as the shorthand.
-
- If `once` is true, only show this warning once (determined by the
- slug.)
-
- """
- if self._no_warn_slugs is None:
- self._no_warn_slugs = list(self.config.disable_warnings)
-
- if slug in self._no_warn_slugs:
- # Don't issue the warning
- return
-
- self._warnings.append(msg)
- if slug:
- msg = "%s (%s)" % (msg, slug)
- if self._debug.should('pid'):
- msg = "[%d] %s" % (os.getpid(), msg)
- sys.stderr.write("Coverage.py warning: %s\n" % msg)
-
- if once:
- self._no_warn_slugs.append(slug)
-
- def get_option(self, option_name):
- """Get an option from the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with `"run:branch"`.
-
- Returns the value of the option. The type depends on the option
- selected.
-
- As a special case, an `option_name` of ``"paths"`` will return an
- OrderedDict with the entire ``[paths]`` section value.
-
- .. versionadded:: 4.0
-
- """
- return self.config.get_option(option_name)
-
- def set_option(self, option_name, value):
- """Set an option in the configuration.
-
- `option_name` is a colon-separated string indicating the section and
- option name. For example, the ``branch`` option in the ``[run]``
- section of the config file would be indicated with ``"run:branch"``.
-
- `value` is the new value for the option. This should be an
- appropriate Python value. For example, use True for booleans, not the
- string ``"True"``.
-
- As an example, calling::
-
- cov.set_option("run:branch", True)
-
- has the same effect as this configuration file::
-
- [run]
- branch = True
-
- As a special case, an `option_name` of ``"paths"`` will replace the
- entire ``[paths]`` section. The value should be an OrderedDict.
-
- .. versionadded:: 4.0
-
- """
- self.config.set_option(option_name, value)
-
- def load(self):
- """Load previously-collected coverage data from the data file."""
- self._init()
- if self._collector:
- self._collector.reset()
- should_skip = self.config.parallel and not os.path.exists(self.config.data_file)
- if not should_skip:
- self._init_data(suffix=None)
- self._post_init()
- if not should_skip:
- self._data.read()
-
- def _init_for_start(self):
- """Initialization for start()"""
- # Construct the collector.
- concurrency = self.config.concurrency or ()
- if "multiprocessing" in concurrency:
- if not patch_multiprocessing:
- raise CoverageException( # pragma: only jython
- "multiprocessing is not supported on this Python"
- )
- patch_multiprocessing(rcfile=self.config.config_file, coverage_args=self._dumped_args)
-
- dycon = self.config.dynamic_context
- if not dycon or dycon == "none":
- context_switchers = []
- elif dycon == "test_function":
- context_switchers = [should_start_context_test_function]
- else:
- raise CoverageException(
- "Don't understand dynamic_context setting: {!r}".format(dycon)
- )
-
- context_switchers.extend(
- plugin.dynamic_context for plugin in self._plugins.context_switchers
- )
-
- should_start_context = combine_context_switchers(context_switchers)
-
- self._collector = Collector(
- should_trace=self._should_trace,
- check_include=self._check_include_omit_etc,
- should_start_context=should_start_context,
- file_mapper=self._file_mapper,
- timid=self.config.timid,
- branch=self.config.branch,
- warn=self._warn,
- concurrency=concurrency,
- )
-
- suffix = self._data_suffix_specified
- if suffix or self.config.parallel:
- if not isinstance(suffix, string_class):
- # if data_suffix=True, use .machinename.pid.random
- suffix = True
- else:
- suffix = None
-
- self._init_data(suffix)
-
- self._collector.use_data(self._data, self.config.context)
-
- # Early warning if we aren't going to be able to support plugins.
- if self._plugins.file_tracers and not self._collector.supports_plugins:
- self._warn(
- "Plugin file tracers (%s) aren't supported with %s" % (
- ", ".join(
- plugin._coverage_plugin_name
- for plugin in self._plugins.file_tracers
- ),
- self._collector.tracer_name(),
- )
- )
- for plugin in self._plugins.file_tracers:
- plugin._coverage_enabled = False
-
- # Create the file classifying substructure.
- self._inorout = InOrOut(
- warn=self._warn,
- debug=(self._debug if self._debug.should('trace') else None),
- )
- self._inorout.configure(self.config)
- self._inorout.plugins = self._plugins
- self._inorout.disp_class = self._collector.file_disposition_class
-
- # It's useful to write debug info after initing for start.
- self._should_write_debug = True
-
- atexit.register(self._atexit)
-
- def _init_data(self, suffix):
- """Create a data file if we don't have one yet."""
- if self._data is None:
- # Create the data file. We do this at construction time so that the
- # data file will be written into the directory where the process
- # started rather than wherever the process eventually chdir'd to.
- ensure_dir_for_file(self.config.data_file)
- self._data = CoverageData(
- basename=self.config.data_file,
- suffix=suffix,
- warn=self._warn,
- debug=self._debug,
- no_disk=self._no_disk,
- )
-
- def start(self):
- """Start measuring code coverage.
-
- Coverage measurement only occurs in functions called after
- :meth:`start` is invoked. Statements in the same scope as
- :meth:`start` won't be measured.
-
- Once you invoke :meth:`start`, you must also call :meth:`stop`
- eventually, or your process might not shut down cleanly.
-
- """
- self._init()
- if not self._inited_for_start:
- self._inited_for_start = True
- self._init_for_start()
- self._post_init()
-
- # Issue warnings for possible problems.
- self._inorout.warn_conflicting_settings()
-
- # See if we think some code that would eventually be measured has
- # already been imported.
- if self._warn_preimported_source:
- self._inorout.warn_already_imported_files()
-
- if self._auto_load:
- self.load()
-
- self._collector.start()
- self._started = True
- self._instances.append(self)
-
- def stop(self):
- """Stop measuring code coverage."""
- if self._instances:
- if self._instances[-1] is self:
- self._instances.pop()
- if self._started:
- self._collector.stop()
- self._started = False
-
- def _atexit(self):
- """Clean up on process shutdown."""
- if self._debug.should("process"):
- self._debug.write("atexit: pid: {}, instance: {!r}".format(os.getpid(), self))
- if self._started:
- self.stop()
- if self._auto_save:
- self.save()
-
- def erase(self):
- """Erase previously collected coverage data.
-
- This removes the in-memory data collected in this session as well as
- discarding the data file.
-
- """
- self._init()
- self._post_init()
- if self._collector:
- self._collector.reset()
- self._init_data(suffix=None)
- self._data.erase(parallel=self.config.parallel)
- self._data = None
- self._inited_for_start = False
-
- def switch_context(self, new_context):
- """Switch to a new dynamic context.
-
- `new_context` is a string to use as the :ref:`dynamic context
- <dynamic_contexts>` label for collected data. If a :ref:`static
- context <static_contexts>` is in use, the static and dynamic context
- labels will be joined together with a pipe character.
-
- Coverage collection must be started already.
-
- .. versionadded:: 5.0
-
- """
- if not self._started: # pragma: part started
- raise CoverageException(
- "Cannot switch context, coverage is not started"
- )
-
- if self._collector.should_start_context:
- self._warn("Conflicting dynamic contexts", slug="dynamic-conflict", once=True)
-
- self._collector.switch_context(new_context)
-
- def clear_exclude(self, which='exclude'):
- """Clear the exclude list."""
- self._init()
- setattr(self.config, which + "_list", [])
- self._exclude_regex_stale()
-
- def exclude(self, regex, which='exclude'):
- """Exclude source lines from execution consideration.
-
- A number of lists of regular expressions are maintained. Each list
- selects lines that are treated differently during reporting.
-
- `which` determines which list is modified. The "exclude" list selects
- lines that are not considered executable at all. The "partial" list
- indicates lines with branches that are not taken.
-
- `regex` is a regular expression. The regex is added to the specified
- list. If any of the regexes in the list is found in a line, the line
- is marked for special treatment during reporting.
-
- """
- self._init()
- excl_list = getattr(self.config, which + "_list")
- excl_list.append(regex)
- self._exclude_regex_stale()
-
- def _exclude_regex_stale(self):
- """Drop all the compiled exclusion regexes, a list was modified."""
- self._exclude_re.clear()
-
- def _exclude_regex(self, which):
- """Return a compiled regex for the given exclusion list."""
- if which not in self._exclude_re:
- excl_list = getattr(self.config, which + "_list")
- self._exclude_re[which] = join_regex(excl_list)
- return self._exclude_re[which]
-
- def get_exclude_list(self, which='exclude'):
- """Return a list of excluded regex patterns.
-
- `which` indicates which list is desired. See :meth:`exclude` for the
- lists that are available, and their meaning.
-
- """
- self._init()
- return getattr(self.config, which + "_list")
-
- def save(self):
- """Save the collected coverage data to the data file."""
- data = self.get_data()
- data.write()
-
- def combine(self, data_paths=None, strict=False, keep=False):
- """Combine together a number of similarly-named coverage data files.
-
- All coverage data files whose name starts with `data_file` (from the
- coverage() constructor) will be read, and combined together into the
- current measurements.
-
- `data_paths` is a list of files or directories from which data should
- be combined. If no list is passed, then the data files from the
- directory indicated by the current data file (probably the current
- directory) will be combined.
-
- If `strict` is true, then it is an error to attempt to combine when
- there are no data files to combine.
-
- If `keep` is true, then original input data files won't be deleted.
-
- .. versionadded:: 4.0
- The `data_paths` parameter.
-
- .. versionadded:: 4.3
- The `strict` parameter.
-
- .. versionadded: 5.5
- The `keep` parameter.
- """
- self._init()
- self._init_data(suffix=None)
- self._post_init()
- self.get_data()
-
- aliases = None
- if self.config.paths:
- aliases = PathAliases()
- for paths in self.config.paths.values():
- result = paths[0]
- for pattern in paths[1:]:
- aliases.add(pattern, result)
-
- combine_parallel_data(
- self._data,
- aliases=aliases,
- data_paths=data_paths,
- strict=strict,
- keep=keep,
- )
-
- def get_data(self):
- """Get the collected data.
-
- Also warn about various problems collecting data.
-
- Returns a :class:`coverage.CoverageData`, the collected coverage data.
-
- .. versionadded:: 4.0
-
- """
- self._init()
- self._init_data(suffix=None)
- self._post_init()
-
- for plugin in self._plugins:
- if not plugin._coverage_enabled:
- self._collector.plugin_was_disabled(plugin)
-
- if self._collector and self._collector.flush_data():
- self._post_save_work()
-
- return self._data
-
- def _post_save_work(self):
- """After saving data, look for warnings, post-work, etc.
-
- Warn about things that should have happened but didn't.
- Look for unexecuted files.
-
- """
- # If there are still entries in the source_pkgs_unmatched list,
- # then we never encountered those packages.
- if self._warn_unimported_source:
- self._inorout.warn_unimported_source()
-
- # Find out if we got any data.
- if not self._data and self._warn_no_data:
- self._warn("No data was collected.", slug="no-data-collected")
-
- # Touch all the files that could have executed, so that we can
- # mark completely unexecuted files as 0% covered.
- if self._data is not None:
- file_paths = collections.defaultdict(list)
- for file_path, plugin_name in self._inorout.find_possibly_unexecuted_files():
- file_path = self._file_mapper(file_path)
- file_paths[plugin_name].append(file_path)
- for plugin_name, paths in file_paths.items():
- self._data.touch_files(paths, plugin_name)
-
- if self.config.note:
- self._warn("The '[run] note' setting is no longer supported.")
-
- # Backward compatibility with version 1.
- def analysis(self, morf):
- """Like `analysis2` but doesn't return excluded line numbers."""
- f, s, _, m, mf = self.analysis2(morf)
- return f, s, m, mf
-
- def analysis2(self, morf):
- """Analyze a module.
-
- `morf` is a module or a file name. It will be analyzed to determine
- its coverage statistics. The return value is a 5-tuple:
-
- * The file name for the module.
- * A list of line numbers of executable statements.
- * A list of line numbers of excluded statements.
- * A list of line numbers of statements not run (missing from
- execution).
- * A readable formatted string of the missing line numbers.
-
- The analysis uses the source file itself and the current measured
- coverage data.
-
- """
- analysis = self._analyze(morf)
- return (
- analysis.filename,
- sorted(analysis.statements),
- sorted(analysis.excluded),
- sorted(analysis.missing),
- analysis.missing_formatted(),
- )
-
- def _analyze(self, it):
- """Analyze a single morf or code unit.
-
- Returns an `Analysis` object.
-
- """
- # All reporting comes through here, so do reporting initialization.
- self._init()
- Numbers.set_precision(self.config.precision)
- self._post_init()
-
- data = self.get_data()
- if not isinstance(it, FileReporter):
- it = self._get_file_reporter(it)
-
- return Analysis(data, it, self._file_mapper)
-
- def _get_file_reporter(self, morf):
- """Get a FileReporter for a module or file name."""
- plugin = None
- file_reporter = "python"
-
- if isinstance(morf, string_class):
- if getattr(sys, 'is_standalone_binary', False):
- # Leave morf in canonical format - relative to the arcadia root
- mapped_morf = morf
- else:
- mapped_morf = self._file_mapper(morf)
- plugin_name = self._data.file_tracer(mapped_morf)
- if plugin_name:
- plugin = self._plugins.get(plugin_name)
-
- if plugin:
- file_reporter = plugin.file_reporter(mapped_morf)
- if file_reporter is None:
- raise CoverageException(
- "Plugin %r did not provide a file reporter for %r." % (
- plugin._coverage_plugin_name, morf
- )
- )
-
- if file_reporter == "python":
- file_reporter = PythonFileReporter(morf, self)
-
- return file_reporter
-
- def _get_file_reporters(self, morfs=None):
- """Get a list of FileReporters for a list of modules or file names.
-
- For each module or file name in `morfs`, find a FileReporter. Return
- the list of FileReporters.
-
- If `morfs` is a single module or file name, this returns a list of one
- FileReporter. If `morfs` is empty or None, then the list of all files
- measured is used to find the FileReporters.
-
- """
- if not morfs:
- morfs = self._data.measured_files()
-
- # Be sure we have a collection.
- if not isinstance(morfs, (list, tuple, set)):
- morfs = [morfs]
-
- file_reporters = [self._get_file_reporter(morf) for morf in morfs]
- return file_reporters
-
- def report(
- self, morfs=None, show_missing=None, ignore_errors=None,
- file=None, omit=None, include=None, skip_covered=None,
- contexts=None, skip_empty=None, precision=None, sort=None
- ):
- """Write a textual summary report to `file`.
-
- Each module in `morfs` is listed, with counts of statements, executed
- statements, missing statements, and a list of lines missed.
-
- If `show_missing` is true, then details of which lines or branches are
- missing will be included in the report. If `ignore_errors` is true,
- then a failure while reporting a single file will not stop the entire
- report.
-
- `file` is a file-like object, suitable for writing.
-
- `include` is a list of file name patterns. Files that match will be
- included in the report. Files matching `omit` will not be included in
- the report.
-
- If `skip_covered` is true, don't report on files with 100% coverage.
-
- If `skip_empty` is true, don't report on empty files (those that have
- no statements).
-
- `contexts` is a list of regular expressions. Only data from
- :ref:`dynamic contexts <dynamic_contexts>` that match one of those
- expressions (using :func:`re.search <python:re.search>`) will be
- included in the report.
-
- `precision` is the number of digits to display after the decimal
- point for percentages.
-
- All of the arguments default to the settings read from the
- :ref:`configuration file <config>`.
-
- Returns a float, the total percentage covered.
-
- .. versionadded:: 4.0
- The `skip_covered` parameter.
-
- .. versionadded:: 5.0
- The `contexts` and `skip_empty` parameters.
-
- .. versionadded:: 5.2
- The `precision` parameter.
-
- """
- with override_config(
- self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- show_missing=show_missing, skip_covered=skip_covered,
- report_contexts=contexts, skip_empty=skip_empty, precision=precision,
- sort=sort
- ):
- reporter = SummaryReporter(self)
- return reporter.report(morfs, outfile=file)
-
- def annotate(
- self, morfs=None, directory=None, ignore_errors=None,
- omit=None, include=None, contexts=None,
- ):
- """Annotate a list of modules.
-
- Each module in `morfs` is annotated. The source is written to a new
- file, named with a ",cover" suffix, with each line prefixed with a
- marker to indicate the coverage of the line. Covered lines have ">",
- excluded lines have "-", and missing lines have "!".
-
- See :meth:`report` for other arguments.
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit,
- report_include=include, report_contexts=contexts,
- ):
- reporter = AnnotateReporter(self)
- reporter.report(morfs, directory=directory)
-
- def html_report(
- self, morfs=None, directory=None, ignore_errors=None,
- omit=None, include=None, extra_css=None, title=None,
- skip_covered=None, show_contexts=None, contexts=None,
- skip_empty=None, precision=None,
- ):
- """Generate an HTML report.
-
- The HTML is written to `directory`. The file "index.html" is the
- overview starting point, with links to more detailed pages for
- individual modules.
-
- `extra_css` is a path to a file of other CSS to apply on the page.
- It will be copied into the HTML directory.
-
- `title` is a text string (not HTML) to use as the title of the HTML
- report.
-
- See :meth:`report` for other arguments.
-
- Returns a float, the total percentage covered.
-
- .. note::
- The HTML report files are generated incrementally based on the
- source files and coverage results. If you modify the report files,
- the changes will not be considered. You should be careful about
- changing the files in the report folder.
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- html_dir=directory, extra_css=extra_css, html_title=title,
- html_skip_covered=skip_covered, show_contexts=show_contexts, report_contexts=contexts,
- html_skip_empty=skip_empty, precision=precision,
- ):
- reporter = HtmlReporter(self)
- return reporter.report(morfs)
-
- def xml_report(
- self, morfs=None, outfile=None, ignore_errors=None,
- omit=None, include=None, contexts=None, skip_empty=None,
- ):
- """Generate an XML report of coverage results.
-
- The report is compatible with Cobertura reports.
-
- Each module in `morfs` is included in the report. `outfile` is the
- path to write the file to, "-" will write to stdout.
-
- See :meth:`report` for other arguments.
-
- Returns a float, the total percentage covered.
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- xml_output=outfile, report_contexts=contexts, skip_empty=skip_empty,
- ):
- return render_report(self.config.xml_output, XmlReporter(self), morfs)
-
- def json_report(
- self, morfs=None, outfile=None, ignore_errors=None,
- omit=None, include=None, contexts=None, pretty_print=None,
- show_contexts=None
- ):
- """Generate a JSON report of coverage results.
-
- Each module in `morfs` is included in the report. `outfile` is the
- path to write the file to, "-" will write to stdout.
-
- See :meth:`report` for other arguments.
-
- Returns a float, the total percentage covered.
-
- .. versionadded:: 5.0
-
- """
- with override_config(self,
- ignore_errors=ignore_errors, report_omit=omit, report_include=include,
- json_output=outfile, report_contexts=contexts, json_pretty_print=pretty_print,
- json_show_contexts=show_contexts
- ):
- return render_report(self.config.json_output, JsonReporter(self), morfs)
-
- def sys_info(self):
- """Return a list of (key, value) pairs showing internal information."""
-
- import coverage as covmod
-
- self._init()
- self._post_init()
-
- def plugin_info(plugins):
- """Make an entry for the sys_info from a list of plug-ins."""
- entries = []
- for plugin in plugins:
- entry = plugin._coverage_plugin_name
- if not plugin._coverage_enabled:
- entry += " (disabled)"
- entries.append(entry)
- return entries
-
- info = [
- ('version', covmod.__version__),
- ('coverage', covmod.__file__),
- ('tracer', self._collector.tracer_name() if self._collector else "-none-"),
- ('CTracer', 'available' if CTracer else "unavailable"),
- ('plugins.file_tracers', plugin_info(self._plugins.file_tracers)),
- ('plugins.configurers', plugin_info(self._plugins.configurers)),
- ('plugins.context_switchers', plugin_info(self._plugins.context_switchers)),
- ('configs_attempted', self.config.attempted_config_files),
- ('configs_read', self.config.config_files_read),
- ('config_file', self.config.config_file),
- ('config_contents',
- repr(self.config._config_contents)
- if self.config._config_contents
- else '-none-'
- ),
- ('data_file', self._data.data_filename() if self._data is not None else "-none-"),
- ('python', sys.version.replace('\n', '')),
- ('platform', platform.platform()),
- ('implementation', platform.python_implementation()),
- ('executable', sys.executable),
- ('def_encoding', sys.getdefaultencoding()),
- ('fs_encoding', sys.getfilesystemencoding()),
- ('pid', os.getpid()),
- ('cwd', os.getcwd()),
- ('path', sys.path),
- ('environment', sorted(
- ("%s = %s" % (k, v))
- for k, v in iitems(os.environ)
- if any(slug in k for slug in ("COV", "PY"))
- )),
- ('command_line', " ".join(getattr(sys, 'argv', ['-none-']))),
- ]
-
- if self._inorout:
- info.extend(self._inorout.sys_info())
-
- info.extend(CoverageData.sys_info())
-
- return info
-
-
-# Mega debugging...
-# $set_env.py: COVERAGE_DEBUG_CALLS - Lots and lots of output about calls to Coverage.
-if int(os.environ.get("COVERAGE_DEBUG_CALLS", 0)): # pragma: debugging
- from coverage.debug import decorate_methods, show_calls
-
- Coverage = decorate_methods(show_calls(show_args=True), butnot=['get_data'])(Coverage)
-
-
-def process_startup():
- """Call this at Python start-up to perhaps measure coverage.
-
- If the environment variable COVERAGE_PROCESS_START is defined, coverage
- measurement is started. The value of the variable is the config file
- to use.
-
- There are two ways to configure your Python installation to invoke this
- function when Python starts:
-
- #. Create or append to sitecustomize.py to add these lines::
-
- import coverage
- coverage.process_startup()
-
- #. Create a .pth file in your Python installation containing::
-
- import coverage; coverage.process_startup()
-
- Returns the :class:`Coverage` instance that was started, or None if it was
- not started by this call.
-
- """
- cps = os.environ.get("COVERAGE_PROCESS_START")
- if not cps:
- # No request for coverage, nothing to do.
- return None
-
- # This function can be called more than once in a process. This happens
- # because some virtualenv configurations make the same directory visible
- # twice in sys.path. This means that the .pth file will be found twice,
- # and executed twice, executing this function twice. We set a global
- # flag (an attribute on this function) to indicate that coverage.py has
- # already been started, so we can avoid doing it twice.
- #
- # https://github.com/nedbat/coveragepy/issues/340 has more details.
-
- if hasattr(process_startup, "coverage"):
- # We've annotated this function before, so we must have already
- # started coverage.py in this process. Nothing to do.
- return None
-
- cov = Coverage(config_file=cps)
- process_startup.coverage = cov
- cov._warn_no_data = False
- cov._warn_unimported_source = False
- cov._warn_preimported_source = False
- cov._auto_save = True
- cov.start()
-
- return cov
-
-
-def _prevent_sub_process_measurement():
- """Stop any subprocess auto-measurement from writing data."""
- auto_created_coverage = getattr(process_startup, "coverage", None)
- if auto_created_coverage is not None:
- auto_created_coverage._auto_save = False
diff --git a/contrib/python/coverage/py3/coverage/ctracer/datastack.c b/contrib/python/coverage/py3/coverage/ctracer/datastack.c
deleted file mode 100644
index a9cfcc2cf2..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/datastack.c
+++ /dev/null
@@ -1,50 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#include "util.h"
-#include "datastack.h"
-
-#define STACK_DELTA 20
-
-int
-DataStack_init(Stats *pstats, DataStack *pdata_stack)
-{
- pdata_stack->depth = -1;
- pdata_stack->stack = NULL;
- pdata_stack->alloc = 0;
- return RET_OK;
-}
-
-void
-DataStack_dealloc(Stats *pstats, DataStack *pdata_stack)
-{
- int i;
-
- for (i = 0; i < pdata_stack->alloc; i++) {
- Py_XDECREF(pdata_stack->stack[i].file_data);
- }
- PyMem_Free(pdata_stack->stack);
-}
-
-int
-DataStack_grow(Stats *pstats, DataStack *pdata_stack)
-{
- pdata_stack->depth++;
- if (pdata_stack->depth >= pdata_stack->alloc) {
- /* We've outgrown our data_stack array: make it bigger. */
- int bigger = pdata_stack->alloc + STACK_DELTA;
- DataStackEntry * bigger_data_stack = PyMem_Realloc(pdata_stack->stack, bigger * sizeof(DataStackEntry));
- STATS( pstats->stack_reallocs++; )
- if (bigger_data_stack == NULL) {
- PyErr_NoMemory();
- pdata_stack->depth--;
- return RET_ERROR;
- }
- /* Zero the new entries. */
- memset(bigger_data_stack + pdata_stack->alloc, 0, STACK_DELTA * sizeof(DataStackEntry));
-
- pdata_stack->stack = bigger_data_stack;
- pdata_stack->alloc = bigger;
- }
- return RET_OK;
-}
diff --git a/contrib/python/coverage/py3/coverage/ctracer/datastack.h b/contrib/python/coverage/py3/coverage/ctracer/datastack.h
deleted file mode 100644
index 3b3078ba27..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/datastack.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_DATASTACK_H
-#define _COVERAGE_DATASTACK_H
-
-#include "util.h"
-#include "stats.h"
-
-/* An entry on the data stack. For each call frame, we need to record all
- * the information needed for CTracer_handle_line to operate as quickly as
- * possible.
- */
-typedef struct DataStackEntry {
- /* The current file_data dictionary. Owned. */
- PyObject * file_data;
-
- /* The disposition object for this frame. A borrowed instance of CFileDisposition. */
- PyObject * disposition;
-
- /* The FileTracer handling this frame, or None if it's Python. Borrowed. */
- PyObject * file_tracer;
-
- /* The line number of the last line recorded, for tracing arcs.
- -1 means there was no previous line, as when entering a code object.
- */
- int last_line;
-
- BOOL started_context;
-} DataStackEntry;
-
-/* A data stack is a dynamically allocated vector of DataStackEntry's. */
-typedef struct DataStack {
- int depth; /* The index of the last-used entry in stack. */
- int alloc; /* number of entries allocated at stack. */
- /* The file data at each level, or NULL if not recording. */
- DataStackEntry * stack;
-} DataStack;
-
-
-int DataStack_init(Stats * pstats, DataStack *pdata_stack);
-void DataStack_dealloc(Stats * pstats, DataStack *pdata_stack);
-int DataStack_grow(Stats * pstats, DataStack *pdata_stack);
-
-#endif /* _COVERAGE_DATASTACK_H */
diff --git a/contrib/python/coverage/py3/coverage/ctracer/filedisp.c b/contrib/python/coverage/py3/coverage/ctracer/filedisp.c
deleted file mode 100644
index 47782ae090..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/filedisp.c
+++ /dev/null
@@ -1,85 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#include "util.h"
-#include "filedisp.h"
-
-void
-CFileDisposition_dealloc(CFileDisposition *self)
-{
- Py_XDECREF(self->original_filename);
- Py_XDECREF(self->canonical_filename);
- Py_XDECREF(self->source_filename);
- Py_XDECREF(self->trace);
- Py_XDECREF(self->reason);
- Py_XDECREF(self->file_tracer);
- Py_XDECREF(self->has_dynamic_filename);
-}
-
-static PyMemberDef
-CFileDisposition_members[] = {
- { "original_filename", T_OBJECT, offsetof(CFileDisposition, original_filename), 0,
- PyDoc_STR("") },
-
- { "canonical_filename", T_OBJECT, offsetof(CFileDisposition, canonical_filename), 0,
- PyDoc_STR("") },
-
- { "source_filename", T_OBJECT, offsetof(CFileDisposition, source_filename), 0,
- PyDoc_STR("") },
-
- { "trace", T_OBJECT, offsetof(CFileDisposition, trace), 0,
- PyDoc_STR("") },
-
- { "reason", T_OBJECT, offsetof(CFileDisposition, reason), 0,
- PyDoc_STR("") },
-
- { "file_tracer", T_OBJECT, offsetof(CFileDisposition, file_tracer), 0,
- PyDoc_STR("") },
-
- { "has_dynamic_filename", T_OBJECT, offsetof(CFileDisposition, has_dynamic_filename), 0,
- PyDoc_STR("") },
-
- { NULL }
-};
-
-PyTypeObject
-CFileDispositionType = {
- MyType_HEAD_INIT
- "coverage.CFileDispositionType", /*tp_name*/
- sizeof(CFileDisposition), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)CFileDisposition_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- 0, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "CFileDisposition objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- 0, /* tp_methods */
- CFileDisposition_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- 0, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
diff --git a/contrib/python/coverage/py3/coverage/ctracer/filedisp.h b/contrib/python/coverage/py3/coverage/ctracer/filedisp.h
deleted file mode 100644
index 860f9a50b1..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/filedisp.h
+++ /dev/null
@@ -1,26 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_FILEDISP_H
-#define _COVERAGE_FILEDISP_H
-
-#include "util.h"
-#include "structmember.h"
-
-typedef struct CFileDisposition {
- PyObject_HEAD
-
- PyObject * original_filename;
- PyObject * canonical_filename;
- PyObject * source_filename;
- PyObject * trace;
- PyObject * reason;
- PyObject * file_tracer;
- PyObject * has_dynamic_filename;
-} CFileDisposition;
-
-void CFileDisposition_dealloc(CFileDisposition *self);
-
-extern PyTypeObject CFileDispositionType;
-
-#endif /* _COVERAGE_FILEDISP_H */
diff --git a/contrib/python/coverage/py3/coverage/ctracer/module.c b/contrib/python/coverage/py3/coverage/ctracer/module.c
deleted file mode 100644
index f308902b69..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/module.c
+++ /dev/null
@@ -1,108 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#include "util.h"
-#include "tracer.h"
-#include "filedisp.h"
-
-/* Module definition */
-
-#define MODULE_DOC PyDoc_STR("Fast coverage tracer.")
-
-#if PY_MAJOR_VERSION >= 3
-
-static PyModuleDef
-moduledef = {
- PyModuleDef_HEAD_INIT,
- "coverage.tracer",
- MODULE_DOC,
- -1,
- NULL, /* methods */
- NULL,
- NULL, /* traverse */
- NULL, /* clear */
- NULL
-};
-
-
-PyObject *
-PyInit_tracer(void)
-{
- PyObject * mod = PyModule_Create(&moduledef);
- if (mod == NULL) {
- return NULL;
- }
-
- if (CTracer_intern_strings() < 0) {
- return NULL;
- }
-
- /* Initialize CTracer */
- CTracerType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CTracerType) < 0) {
- Py_DECREF(mod);
- return NULL;
- }
-
- Py_INCREF(&CTracerType);
- if (PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType) < 0) {
- Py_DECREF(mod);
- Py_DECREF(&CTracerType);
- return NULL;
- }
-
- /* Initialize CFileDisposition */
- CFileDispositionType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CFileDispositionType) < 0) {
- Py_DECREF(mod);
- Py_DECREF(&CTracerType);
- return NULL;
- }
-
- Py_INCREF(&CFileDispositionType);
- if (PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType) < 0) {
- Py_DECREF(mod);
- Py_DECREF(&CTracerType);
- Py_DECREF(&CFileDispositionType);
- return NULL;
- }
-
- return mod;
-}
-
-#else
-
-void
-inittracer(void)
-{
- PyObject * mod;
-
- mod = Py_InitModule3("coverage.tracer", NULL, MODULE_DOC);
- if (mod == NULL) {
- return;
- }
-
- if (CTracer_intern_strings() < 0) {
- return;
- }
-
- /* Initialize CTracer */
- CTracerType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CTracerType) < 0) {
- return;
- }
-
- Py_INCREF(&CTracerType);
- PyModule_AddObject(mod, "CTracer", (PyObject *)&CTracerType);
-
- /* Initialize CFileDisposition */
- CFileDispositionType.tp_new = PyType_GenericNew;
- if (PyType_Ready(&CFileDispositionType) < 0) {
- return;
- }
-
- Py_INCREF(&CFileDispositionType);
- PyModule_AddObject(mod, "CFileDisposition", (PyObject *)&CFileDispositionType);
-}
-
-#endif /* Py3k */
diff --git a/contrib/python/coverage/py3/coverage/ctracer/stats.h b/contrib/python/coverage/py3/coverage/ctracer/stats.h
deleted file mode 100644
index 05173369f7..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/stats.h
+++ /dev/null
@@ -1,31 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_STATS_H
-#define _COVERAGE_STATS_H
-
-#include "util.h"
-
-#if COLLECT_STATS
-#define STATS(x) x
-#else
-#define STATS(x)
-#endif
-
-typedef struct Stats {
- unsigned int calls; /* Need at least one member, but the rest only if needed. */
-#if COLLECT_STATS
- unsigned int lines;
- unsigned int returns;
- unsigned int exceptions;
- unsigned int others;
- unsigned int files;
- unsigned int missed_returns;
- unsigned int stack_reallocs;
- unsigned int errors;
- unsigned int pycalls;
- unsigned int start_context_calls;
-#endif
-} Stats;
-
-#endif /* _COVERAGE_STATS_H */
diff --git a/contrib/python/coverage/py3/coverage/ctracer/tracer.c b/contrib/python/coverage/py3/coverage/ctracer/tracer.c
deleted file mode 100644
index 3e776958cd..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/tracer.c
+++ /dev/null
@@ -1,1149 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-/* C-based Tracer for coverage.py. */
-
-#include "util.h"
-#include "datastack.h"
-#include "filedisp.h"
-#include "tracer.h"
-
-/* Python C API helpers. */
-
-static int
-pyint_as_int(PyObject * pyint, int *pint)
-{
- int the_int = MyInt_AsInt(pyint);
- if (the_int == -1 && PyErr_Occurred()) {
- return RET_ERROR;
- }
-
- *pint = the_int;
- return RET_OK;
-}
-
-
-/* Interned strings to speed GetAttr etc. */
-
-static PyObject *str_trace;
-static PyObject *str_file_tracer;
-static PyObject *str__coverage_enabled;
-static PyObject *str__coverage_plugin;
-static PyObject *str__coverage_plugin_name;
-static PyObject *str_dynamic_source_filename;
-static PyObject *str_line_number_range;
-
-int
-CTracer_intern_strings(void)
-{
- int ret = RET_ERROR;
-
-#define INTERN_STRING(v, s) \
- v = MyText_InternFromString(s); \
- if (v == NULL) { \
- goto error; \
- }
-
- INTERN_STRING(str_trace, "trace")
- INTERN_STRING(str_file_tracer, "file_tracer")
- INTERN_STRING(str__coverage_enabled, "_coverage_enabled")
- INTERN_STRING(str__coverage_plugin, "_coverage_plugin")
- INTERN_STRING(str__coverage_plugin_name, "_coverage_plugin_name")
- INTERN_STRING(str_dynamic_source_filename, "dynamic_source_filename")
- INTERN_STRING(str_line_number_range, "line_number_range")
-
- ret = RET_OK;
-
-error:
- return ret;
-}
-
-static void CTracer_disable_plugin(CTracer *self, PyObject * disposition);
-
-static int
-CTracer_init(CTracer *self, PyObject *args_unused, PyObject *kwds_unused)
-{
- int ret = RET_ERROR;
-
- if (DataStack_init(&self->stats, &self->data_stack) < 0) {
- goto error;
- }
-
- self->pdata_stack = &self->data_stack;
-
- self->context = Py_None;
- Py_INCREF(self->context);
-
- ret = RET_OK;
- goto ok;
-
-error:
- STATS( self->stats.errors++; )
-
-ok:
- return ret;
-}
-
-static void
-CTracer_dealloc(CTracer *self)
-{
- int i;
-
- if (self->started) {
- PyEval_SetTrace(NULL, NULL);
- }
-
- Py_XDECREF(self->should_trace);
- Py_XDECREF(self->check_include);
- Py_XDECREF(self->warn);
- Py_XDECREF(self->concur_id_func);
- Py_XDECREF(self->data);
- Py_XDECREF(self->file_tracers);
- Py_XDECREF(self->should_trace_cache);
- Py_XDECREF(self->should_start_context);
- Py_XDECREF(self->switch_context);
- Py_XDECREF(self->context);
- Py_XDECREF(self->disable_plugin);
-
- DataStack_dealloc(&self->stats, &self->data_stack);
- if (self->data_stacks) {
- for (i = 0; i < self->data_stacks_used; i++) {
- DataStack_dealloc(&self->stats, self->data_stacks + i);
- }
- PyMem_Free(self->data_stacks);
- }
-
- Py_XDECREF(self->data_stack_index);
-
- Py_TYPE(self)->tp_free((PyObject*)self);
-}
-
-#if TRACE_LOG
-static const char *
-indent(int n)
-{
- static const char * spaces =
- " "
- " "
- " "
- " "
- ;
- return spaces + strlen(spaces) - n*2;
-}
-
-static BOOL logging = FALSE;
-/* Set these constants to be a file substring and line number to start logging. */
-static const char * start_file = "tests/views";
-static int start_line = 27;
-
-static void
-showlog(int depth, int lineno, PyObject * filename, const char * msg)
-{
- if (logging) {
- printf("%s%3d ", indent(depth), depth);
- if (lineno) {
- printf("%4d", lineno);
- }
- else {
- printf(" ");
- }
- if (filename) {
- PyObject *ascii = MyText_AS_BYTES(filename);
- printf(" %s", MyBytes_AS_STRING(ascii));
- Py_DECREF(ascii);
- }
- if (msg) {
- printf(" %s", msg);
- }
- printf("\n");
- }
-}
-
-#define SHOWLOG(a,b,c,d) showlog(a,b,c,d)
-#else
-#define SHOWLOG(a,b,c,d)
-#endif /* TRACE_LOG */
-
-#if WHAT_LOG
-static const char * what_sym[] = {"CALL", "EXC ", "LINE", "RET "};
-#endif
-
-/* Record a pair of integers in self->pcur_entry->file_data. */
-static int
-CTracer_record_pair(CTracer *self, int l1, int l2)
-{
- int ret = RET_ERROR;
-
- PyObject * t = NULL;
-
- t = Py_BuildValue("(ii)", l1, l2);
- if (t == NULL) {
- goto error;
- }
-
- if (PyDict_SetItem(self->pcur_entry->file_data, t, Py_None) < 0) {
- goto error;
- }
-
- ret = RET_OK;
-
-error:
- Py_XDECREF(t);
-
- return ret;
-}
-
-/* Set self->pdata_stack to the proper data_stack to use. */
-static int
-CTracer_set_pdata_stack(CTracer *self)
-{
- int ret = RET_ERROR;
- PyObject * co_obj = NULL;
- PyObject * stack_index = NULL;
-
- if (self->concur_id_func != Py_None) {
- int the_index = 0;
-
- if (self->data_stack_index == NULL) {
- PyObject * weakref = NULL;
-
- weakref = PyImport_ImportModule("weakref");
- if (weakref == NULL) {
- goto error;
- }
- STATS( self->stats.pycalls++; )
- self->data_stack_index = PyObject_CallMethod(weakref, "WeakKeyDictionary", NULL);
- Py_XDECREF(weakref);
-
- if (self->data_stack_index == NULL) {
- goto error;
- }
- }
-
- STATS( self->stats.pycalls++; )
- co_obj = PyObject_CallObject(self->concur_id_func, NULL);
- if (co_obj == NULL) {
- goto error;
- }
- stack_index = PyObject_GetItem(self->data_stack_index, co_obj);
- if (stack_index == NULL) {
- /* PyObject_GetItem sets an exception if it didn't find the thing. */
- PyErr_Clear();
-
- /* A new concurrency object. Make a new data stack. */
- the_index = self->data_stacks_used;
- stack_index = MyInt_FromInt(the_index);
- if (stack_index == NULL) {
- goto error;
- }
- if (PyObject_SetItem(self->data_stack_index, co_obj, stack_index) < 0) {
- goto error;
- }
- self->data_stacks_used++;
- if (self->data_stacks_used >= self->data_stacks_alloc) {
- int bigger = self->data_stacks_alloc + 10;
- DataStack * bigger_stacks = PyMem_Realloc(self->data_stacks, bigger * sizeof(DataStack));
- if (bigger_stacks == NULL) {
- PyErr_NoMemory();
- goto error;
- }
- self->data_stacks = bigger_stacks;
- self->data_stacks_alloc = bigger;
- }
- DataStack_init(&self->stats, &self->data_stacks[the_index]);
- }
- else {
- if (pyint_as_int(stack_index, &the_index) < 0) {
- goto error;
- }
- }
-
- self->pdata_stack = &self->data_stacks[the_index];
- }
- else {
- self->pdata_stack = &self->data_stack;
- }
-
- ret = RET_OK;
-
-error:
-
- Py_XDECREF(co_obj);
- Py_XDECREF(stack_index);
-
- return ret;
-}
-
-/*
- * Parts of the trace function.
- */
-
-static int
-CTracer_check_missing_return(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
-
- if (self->last_exc_back) {
- if (frame == self->last_exc_back) {
- /* Looks like someone forgot to send a return event. We'll clear
- the exception state and do the RETURN code here. Notice that the
- frame we have in hand here is not the correct frame for the RETURN,
- that frame is gone. Our handling for RETURN doesn't need the
- actual frame, but we do log it, so that will look a little off if
- you're looking at the detailed log.
-
- If someday we need to examine the frame when doing RETURN, then
- we'll need to keep more of the missed frame's state.
- */
- STATS( self->stats.missed_returns++; )
- if (CTracer_set_pdata_stack(self) < 0) {
- goto error;
- }
- if (self->pdata_stack->depth >= 0) {
- if (self->tracing_arcs && self->pcur_entry->file_data) {
- if (CTracer_record_pair(self, self->pcur_entry->last_line, -self->last_exc_firstlineno) < 0) {
- goto error;
- }
- }
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), MyFrame_GetCode(frame)->co_filename, "missedreturn");
- self->pdata_stack->depth--;
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
- }
- }
- self->last_exc_back = NULL;
- }
-
- ret = RET_OK;
-
-error:
-
- return ret;
-}
-
-static int
-CTracer_handle_call(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
- int ret2;
-
- /* Owned references that we clean up at the very end of the function. */
- PyObject * disposition = NULL;
- PyObject * plugin = NULL;
- PyObject * plugin_name = NULL;
- PyObject * next_tracename = NULL;
-
- /* Borrowed references. */
- PyObject * filename = NULL;
- PyObject * disp_trace = NULL;
- PyObject * tracename = NULL;
- PyObject * file_tracer = NULL;
- PyObject * has_dynamic_filename = NULL;
-
- CFileDisposition * pdisp = NULL;
-
- STATS( self->stats.calls++; )
-
- /* Grow the stack. */
- if (CTracer_set_pdata_stack(self) < 0) {
- goto error;
- }
- if (DataStack_grow(&self->stats, self->pdata_stack) < 0) {
- goto error;
- }
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
-
- /* See if this frame begins a new context. */
- if (self->should_start_context != Py_None && self->context == Py_None) {
- PyObject * context;
- /* We're looking for our context, ask should_start_context if this is the start. */
- STATS( self->stats.start_context_calls++; )
- STATS( self->stats.pycalls++; )
- context = PyObject_CallFunctionObjArgs(self->should_start_context, frame, NULL);
- if (context == NULL) {
- goto error;
- }
- if (context != Py_None) {
- PyObject * val;
- Py_DECREF(self->context);
- self->context = context;
- self->pcur_entry->started_context = TRUE;
- STATS( self->stats.pycalls++; )
- val = PyObject_CallFunctionObjArgs(self->switch_context, context, NULL);
- if (val == NULL) {
- goto error;
- }
- Py_DECREF(val);
- }
- else {
- Py_DECREF(context);
- self->pcur_entry->started_context = FALSE;
- }
- }
- else {
- self->pcur_entry->started_context = FALSE;
- }
-
- /* Check if we should trace this line. */
- filename = MyFrame_GetCode(frame)->co_filename;
- disposition = PyDict_GetItem(self->should_trace_cache, filename);
- if (disposition == NULL) {
- if (PyErr_Occurred()) {
- goto error;
- }
- STATS( self->stats.files++; )
-
- /* We've never considered this file before. */
- /* Ask should_trace about it. */
- STATS( self->stats.pycalls++; )
- disposition = PyObject_CallFunctionObjArgs(self->should_trace, filename, frame, NULL);
- if (disposition == NULL) {
- /* An error occurred inside should_trace. */
- goto error;
- }
- if (PyDict_SetItem(self->should_trace_cache, filename, disposition) < 0) {
- goto error;
- }
- }
- else {
- Py_INCREF(disposition);
- }
-
- if (disposition == Py_None) {
- /* A later check_include returned false, so don't trace it. */
- disp_trace = Py_False;
- }
- else {
- /* The object we got is a CFileDisposition, use it efficiently. */
- pdisp = (CFileDisposition *) disposition;
- disp_trace = pdisp->trace;
- if (disp_trace == NULL) {
- goto error;
- }
- }
-
- if (disp_trace == Py_True) {
- /* If tracename is a string, then we're supposed to trace. */
- tracename = pdisp->source_filename;
- if (tracename == NULL) {
- goto error;
- }
- file_tracer = pdisp->file_tracer;
- if (file_tracer == NULL) {
- goto error;
- }
- if (file_tracer != Py_None) {
- plugin = PyObject_GetAttr(file_tracer, str__coverage_plugin);
- if (plugin == NULL) {
- goto error;
- }
- plugin_name = PyObject_GetAttr(plugin, str__coverage_plugin_name);
- if (plugin_name == NULL) {
- goto error;
- }
- }
- has_dynamic_filename = pdisp->has_dynamic_filename;
- if (has_dynamic_filename == NULL) {
- goto error;
- }
- if (has_dynamic_filename == Py_True) {
- STATS( self->stats.pycalls++; )
- next_tracename = PyObject_CallMethodObjArgs(
- file_tracer, str_dynamic_source_filename,
- tracename, frame, NULL
- );
- if (next_tracename == NULL) {
- /* An exception from the function. Alert the user with a
- * warning and a traceback.
- */
- CTracer_disable_plugin(self, disposition);
- /* Because we handled the error, goto ok. */
- goto ok;
- }
- tracename = next_tracename;
-
- if (tracename != Py_None) {
- /* Check the dynamic source filename against the include rules. */
- PyObject * included = NULL;
- int should_include;
- included = PyDict_GetItem(self->should_trace_cache, tracename);
- if (included == NULL) {
- PyObject * should_include_bool;
- if (PyErr_Occurred()) {
- goto error;
- }
- STATS( self->stats.files++; )
- STATS( self->stats.pycalls++; )
- should_include_bool = PyObject_CallFunctionObjArgs(self->check_include, tracename, frame, NULL);
- if (should_include_bool == NULL) {
- goto error;
- }
- should_include = (should_include_bool == Py_True);
- Py_DECREF(should_include_bool);
- if (PyDict_SetItem(self->should_trace_cache, tracename, should_include ? disposition : Py_None) < 0) {
- goto error;
- }
- }
- else {
- should_include = (included != Py_None);
- }
- if (!should_include) {
- tracename = Py_None;
- }
- }
- }
- }
- else {
- tracename = Py_None;
- }
-
- if (tracename != Py_None) {
- PyObject * file_data = PyDict_GetItem(self->data, tracename);
-
- if (file_data == NULL) {
- if (PyErr_Occurred()) {
- goto error;
- }
- file_data = PyDict_New();
- if (file_data == NULL) {
- goto error;
- }
- ret2 = PyDict_SetItem(self->data, tracename, file_data);
- if (ret2 < 0) {
- goto error;
- }
-
- /* If the disposition mentions a plugin, record that. */
- if (file_tracer != Py_None) {
- ret2 = PyDict_SetItem(self->file_tracers, tracename, plugin_name);
- if (ret2 < 0) {
- goto error;
- }
- }
- }
- else {
- /* PyDict_GetItem gives a borrowed reference. Own it. */
- Py_INCREF(file_data);
- }
-
- Py_XDECREF(self->pcur_entry->file_data);
- self->pcur_entry->file_data = file_data;
- self->pcur_entry->file_tracer = file_tracer;
-
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), filename, "traced");
- }
- else {
- Py_XDECREF(self->pcur_entry->file_data);
- self->pcur_entry->file_data = NULL;
- self->pcur_entry->file_tracer = Py_None;
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), filename, "skipped");
- }
-
- self->pcur_entry->disposition = disposition;
-
- /* Make the frame right in case settrace(gettrace()) happens. */
- Py_INCREF(self);
- My_XSETREF(frame->f_trace, (PyObject*)self);
-
- /* A call event is really a "start frame" event, and can happen for
- * re-entering a generator also. f_lasti is -1 for a true call, and a
- * real byte offset for a generator re-entry.
- */
- if (MyFrame_GetLasti(frame) < 0) {
- self->pcur_entry->last_line = -MyFrame_GetCode(frame)->co_firstlineno;
- }
- else {
- self->pcur_entry->last_line = PyFrame_GetLineNumber(frame);
- }
-
-ok:
- ret = RET_OK;
-
-error:
- Py_XDECREF(next_tracename);
- Py_XDECREF(disposition);
- Py_XDECREF(plugin);
- Py_XDECREF(plugin_name);
-
- return ret;
-}
-
-
-static void
-CTracer_disable_plugin(CTracer *self, PyObject * disposition)
-{
- PyObject * ret;
- PyErr_Print();
-
- STATS( self->stats.pycalls++; )
- ret = PyObject_CallFunctionObjArgs(self->disable_plugin, disposition, NULL);
- if (ret == NULL) {
- goto error;
- }
- Py_DECREF(ret);
-
- return;
-
-error:
- /* This function doesn't return a status, so if an error happens, print it,
- * but don't interrupt the flow. */
- /* PySys_WriteStderr is nicer, but is not in the public API. */
- fprintf(stderr, "Error occurred while disabling plug-in:\n");
- PyErr_Print();
-}
-
-
-static int
-CTracer_unpack_pair(CTracer *self, PyObject *pair, int *p_one, int *p_two)
-{
- int ret = RET_ERROR;
- int the_int;
- PyObject * pyint = NULL;
- int index;
-
- if (!PyTuple_Check(pair) || PyTuple_Size(pair) != 2) {
- PyErr_SetString(
- PyExc_TypeError,
- "line_number_range must return 2-tuple"
- );
- goto error;
- }
-
- for (index = 0; index < 2; index++) {
- pyint = PyTuple_GetItem(pair, index);
- if (pyint == NULL) {
- goto error;
- }
- if (pyint_as_int(pyint, &the_int) < 0) {
- goto error;
- }
- *(index == 0 ? p_one : p_two) = the_int;
- }
-
- ret = RET_OK;
-
-error:
- return ret;
-}
-
-static int
-CTracer_handle_line(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
- int ret2;
-
- STATS( self->stats.lines++; )
- if (self->pdata_stack->depth >= 0) {
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), MyFrame_GetCode(frame)->co_filename, "line");
- if (self->pcur_entry->file_data) {
- int lineno_from = -1;
- int lineno_to = -1;
-
- /* We're tracing in this frame: record something. */
- if (self->pcur_entry->file_tracer != Py_None) {
- PyObject * from_to = NULL;
- STATS( self->stats.pycalls++; )
- from_to = PyObject_CallMethodObjArgs(self->pcur_entry->file_tracer, str_line_number_range, frame, NULL);
- if (from_to == NULL) {
- CTracer_disable_plugin(self, self->pcur_entry->disposition);
- goto ok;
- }
- ret2 = CTracer_unpack_pair(self, from_to, &lineno_from, &lineno_to);
- Py_DECREF(from_to);
- if (ret2 < 0) {
- CTracer_disable_plugin(self, self->pcur_entry->disposition);
- goto ok;
- }
- }
- else {
- lineno_from = lineno_to = PyFrame_GetLineNumber(frame);
- }
-
- if (lineno_from != -1) {
- for (; lineno_from <= lineno_to; lineno_from++) {
- if (self->tracing_arcs) {
- /* Tracing arcs: key is (last_line,this_line). */
- if (CTracer_record_pair(self, self->pcur_entry->last_line, lineno_from) < 0) {
- goto error;
- }
- }
- else {
- /* Tracing lines: key is simply this_line. */
- PyObject * this_line = MyInt_FromInt(lineno_from);
- if (this_line == NULL) {
- goto error;
- }
-
- ret2 = PyDict_SetItem(self->pcur_entry->file_data, this_line, Py_None);
- Py_DECREF(this_line);
- if (ret2 < 0) {
- goto error;
- }
- }
-
- self->pcur_entry->last_line = lineno_from;
- }
- }
- }
- }
-
-ok:
- ret = RET_OK;
-
-error:
-
- return ret;
-}
-
-static int
-CTracer_handle_return(CTracer *self, PyFrameObject *frame)
-{
- int ret = RET_ERROR;
-
- STATS( self->stats.returns++; )
- /* A near-copy of this code is above in the missing-return handler. */
- if (CTracer_set_pdata_stack(self) < 0) {
- goto error;
- }
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
-
- if (self->pdata_stack->depth >= 0) {
- if (self->tracing_arcs && self->pcur_entry->file_data) {
- /* Need to distinguish between RETURN_VALUE and YIELD_VALUE. Read
- * the current bytecode to see what it is. In unusual circumstances
- * (Cython code), co_code can be the empty string, so range-check
- * f_lasti before reading the byte.
- */
- int bytecode = RETURN_VALUE;
- PyObject * pCode = MyCode_GetCode(MyFrame_GetCode(frame));
- int lasti = MyFrame_GetLasti(frame);
-
- if (lasti < MyBytes_GET_SIZE(pCode)) {
- bytecode = MyBytes_AS_STRING(pCode)[lasti];
- }
- if (bytecode != YIELD_VALUE) {
- int first = MyFrame_GetCode(frame)->co_firstlineno;
- if (CTracer_record_pair(self, self->pcur_entry->last_line, -first) < 0) {
- goto error;
- }
- }
- }
-
- /* If this frame started a context, then returning from it ends the context. */
- if (self->pcur_entry->started_context) {
- PyObject * val;
- Py_DECREF(self->context);
- self->context = Py_None;
- Py_INCREF(self->context);
- STATS( self->stats.pycalls++; )
-
- val = PyObject_CallFunctionObjArgs(self->switch_context, self->context, NULL);
- if (val == NULL) {
- goto error;
- }
- Py_DECREF(val);
- }
-
- /* Pop the stack. */
- SHOWLOG(self->pdata_stack->depth, PyFrame_GetLineNumber(frame), MyFrame_GetCode(frame)->co_filename, "return");
- self->pdata_stack->depth--;
- self->pcur_entry = &self->pdata_stack->stack[self->pdata_stack->depth];
- }
-
- ret = RET_OK;
-
-error:
-
- return ret;
-}
-
-static int
-CTracer_handle_exception(CTracer *self, PyFrameObject *frame)
-{
- /* Some code (Python 2.3, and pyexpat anywhere) fires an exception event
- without a return event. To detect that, we'll keep a copy of the
- parent frame for an exception event. If the next event is in that
- frame, then we must have returned without a return event. We can
- synthesize the missing event then.
-
- Python itself fixed this problem in 2.4. Pyexpat still has the bug.
- I've reported the problem with pyexpat as http://bugs.python.org/issue6359 .
- If it gets fixed, this code should still work properly. Maybe some day
- the bug will be fixed everywhere coverage.py is supported, and we can
- remove this missing-return detection.
-
- More about this fix: https://nedbatchelder.com/blog/200907/a_nasty_little_bug.html
- */
- STATS( self->stats.exceptions++; )
- self->last_exc_back = frame->f_back;
- self->last_exc_firstlineno = MyFrame_GetCode(frame)->co_firstlineno;
-
- return RET_OK;
-}
-
-/*
- * The Trace Function
- */
-static int
-CTracer_trace(CTracer *self, PyFrameObject *frame, int what, PyObject *arg_unused)
-{
- int ret = RET_ERROR;
-
- #if DO_NOTHING
- return RET_OK;
- #endif
-
- if (!self->started) {
- /* If CTracer.stop() has been called from another thread, the tracer
- is still active in the current thread. Let's deactivate ourselves
- now. */
- PyEval_SetTrace(NULL, NULL);
- return RET_OK;
- }
-
- #if WHAT_LOG || TRACE_LOG
- PyObject * ascii = NULL;
- #endif
-
- #if WHAT_LOG
- if (what <= (int)(sizeof(what_sym)/sizeof(const char *))) {
- ascii = MyText_AS_BYTES(MyFrame_GetCode(frame)->co_filename);
- printf("trace: %s @ %s %d\n", what_sym[what], MyBytes_AS_STRING(ascii), PyFrame_GetLineNumber(frame));
- Py_DECREF(ascii);
- }
- #endif
-
- #if TRACE_LOG
- ascii = MyText_AS_BYTES(MyFrame_GetCode(frame)->co_filename);
- if (strstr(MyBytes_AS_STRING(ascii), start_file) && PyFrame_GetLineNumber(frame) == start_line) {
- logging = TRUE;
- }
- Py_DECREF(ascii);
- #endif
-
- /* See below for details on missing-return detection. */
- if (CTracer_check_missing_return(self, frame) < 0) {
- goto error;
- }
-
- self->activity = TRUE;
-
- switch (what) {
- case PyTrace_CALL:
- if (CTracer_handle_call(self, frame) < 0) {
- goto error;
- }
- break;
-
- case PyTrace_RETURN:
- if (CTracer_handle_return(self, frame) < 0) {
- goto error;
- }
- break;
-
- case PyTrace_LINE:
- if (CTracer_handle_line(self, frame) < 0) {
- goto error;
- }
- break;
-
- case PyTrace_EXCEPTION:
- if (CTracer_handle_exception(self, frame) < 0) {
- goto error;
- }
- break;
-
- default:
- STATS( self->stats.others++; )
- break;
- }
-
- ret = RET_OK;
- goto cleanup;
-
-error:
- STATS( self->stats.errors++; )
-
-cleanup:
- return ret;
-}
-
-
-/*
- * Python has two ways to set the trace function: sys.settrace(fn), which
- * takes a Python callable, and PyEval_SetTrace(func, obj), which takes
- * a C function and a Python object. The way these work together is that
- * sys.settrace(pyfn) calls PyEval_SetTrace(builtin_func, pyfn), using the
- * Python callable as the object in PyEval_SetTrace. So sys.gettrace()
- * simply returns the Python object used as the second argument to
- * PyEval_SetTrace. So sys.gettrace() will return our self parameter, which
- * means it must be callable to be used in sys.settrace().
- *
- * So we make ourself callable, equivalent to invoking our trace function.
- *
- * To help with the process of replaying stored frames, this function has an
- * optional keyword argument:
- *
- * def CTracer_call(frame, event, arg, lineno=0)
- *
- * If provided, the lineno argument is used as the line number, and the
- * frame's f_lineno member is ignored.
- */
-static PyObject *
-CTracer_call(CTracer *self, PyObject *args, PyObject *kwds)
-{
- PyFrameObject *frame;
- PyObject *what_str;
- PyObject *arg;
- int lineno = 0;
- int what;
- int orig_lineno;
- PyObject *ret = NULL;
- PyObject * ascii = NULL;
-
- #if DO_NOTHING
- CRASH
- #endif
-
- static char *what_names[] = {
- "call", "exception", "line", "return",
- "c_call", "c_exception", "c_return",
- NULL
- };
-
- static char *kwlist[] = {"frame", "event", "arg", "lineno", NULL};
-
- if (!PyArg_ParseTupleAndKeywords(args, kwds, "O!O!O|i:Tracer_call", kwlist,
- &PyFrame_Type, &frame, &MyText_Type, &what_str, &arg, &lineno)) {
- goto done;
- }
-
- /* In Python, the what argument is a string, we need to find an int
- for the C function. */
- for (what = 0; what_names[what]; what++) {
- int should_break;
- ascii = MyText_AS_BYTES(what_str);
- should_break = !strcmp(MyBytes_AS_STRING(ascii), what_names[what]);
- Py_DECREF(ascii);
- if (should_break) {
- break;
- }
- }
-
- #if WHAT_LOG
- ascii = MyText_AS_BYTES(MyFrame_GetCode(frame)->co_filename);
- printf("pytrace: %s @ %s %d\n", what_sym[what], MyBytes_AS_STRING(ascii), PyFrame_GetLineNumber(frame));
- Py_DECREF(ascii);
- #endif
-
- /* Save off the frame's lineno, and use the forced one, if provided. */
- orig_lineno = frame->f_lineno;
- if (lineno > 0) {
- frame->f_lineno = lineno;
- }
-
- /* Invoke the C function, and return ourselves. */
- if (CTracer_trace(self, frame, what, arg) == RET_OK) {
- Py_INCREF(self);
- ret = (PyObject *)self;
- }
-
- /* Clean up. */
- frame->f_lineno = orig_lineno;
-
- /* For better speed, install ourselves the C way so that future calls go
- directly to CTracer_trace, without this intermediate function.
-
- Only do this if this is a CALL event, since new trace functions only
- take effect then. If we don't condition it on CALL, then we'll clobber
- the new trace function before it has a chance to get called. To
- understand why, there are three internal values to track: frame.f_trace,
- c_tracefunc, and c_traceobj. They are explained here:
- https://nedbatchelder.com/text/trace-function.html
-
- Without the conditional on PyTrace_CALL, this is what happens:
-
- def func(): # f_trace c_tracefunc c_traceobj
- # -------------- -------------- --------------
- # CTracer CTracer.trace CTracer
- sys.settrace(my_func)
- # CTracer trampoline my_func
- # Now Python calls trampoline(CTracer), which calls this function
- # which calls PyEval_SetTrace below, setting us as the tracer again:
- # CTracer CTracer.trace CTracer
- # and it's as if the settrace never happened.
- */
- if (what == PyTrace_CALL) {
- PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
- }
-
-done:
- return ret;
-}
-
-static PyObject *
-CTracer_start(CTracer *self, PyObject *args_unused)
-{
- PyEval_SetTrace((Py_tracefunc)CTracer_trace, (PyObject*)self);
- self->started = TRUE;
- self->tracing_arcs = self->trace_arcs && PyObject_IsTrue(self->trace_arcs);
-
- /* start() returns a trace function usable with sys.settrace() */
- Py_INCREF(self);
- return (PyObject *)self;
-}
-
-static PyObject *
-CTracer_stop(CTracer *self, PyObject *args_unused)
-{
- if (self->started) {
- /* Set the started flag only. The actual call to
- PyEval_SetTrace(NULL, NULL) is delegated to the callback
- itself to ensure that it called from the right thread.
- */
- self->started = FALSE;
- }
-
- Py_RETURN_NONE;
-}
-
-static PyObject *
-CTracer_activity(CTracer *self, PyObject *args_unused)
-{
- if (self->activity) {
- Py_RETURN_TRUE;
- }
- else {
- Py_RETURN_FALSE;
- }
-}
-
-static PyObject *
-CTracer_reset_activity(CTracer *self, PyObject *args_unused)
-{
- self->activity = FALSE;
- Py_RETURN_NONE;
-}
-
-static PyObject *
-CTracer_get_stats(CTracer *self, PyObject *args_unused)
-{
-#if COLLECT_STATS
- return Py_BuildValue(
- "{sI,sI,sI,sI,sI,sI,sI,sI,si,sI,sI,sI}",
- "calls", self->stats.calls,
- "lines", self->stats.lines,
- "returns", self->stats.returns,
- "exceptions", self->stats.exceptions,
- "others", self->stats.others,
- "files", self->stats.files,
- "missed_returns", self->stats.missed_returns,
- "stack_reallocs", self->stats.stack_reallocs,
- "stack_alloc", self->pdata_stack->alloc,
- "errors", self->stats.errors,
- "pycalls", self->stats.pycalls,
- "start_context_calls", self->stats.start_context_calls
- );
-#else
- Py_RETURN_NONE;
-#endif /* COLLECT_STATS */
-}
-
-static PyMemberDef
-CTracer_members[] = {
- { "should_trace", T_OBJECT, offsetof(CTracer, should_trace), 0,
- PyDoc_STR("Function indicating whether to trace a file.") },
-
- { "check_include", T_OBJECT, offsetof(CTracer, check_include), 0,
- PyDoc_STR("Function indicating whether to include a file.") },
-
- { "warn", T_OBJECT, offsetof(CTracer, warn), 0,
- PyDoc_STR("Function for issuing warnings.") },
-
- { "concur_id_func", T_OBJECT, offsetof(CTracer, concur_id_func), 0,
- PyDoc_STR("Function for determining concurrency context") },
-
- { "data", T_OBJECT, offsetof(CTracer, data), 0,
- PyDoc_STR("The raw dictionary of trace data.") },
-
- { "file_tracers", T_OBJECT, offsetof(CTracer, file_tracers), 0,
- PyDoc_STR("Mapping from file name to plugin name.") },
-
- { "should_trace_cache", T_OBJECT, offsetof(CTracer, should_trace_cache), 0,
- PyDoc_STR("Dictionary caching should_trace results.") },
-
- { "trace_arcs", T_OBJECT, offsetof(CTracer, trace_arcs), 0,
- PyDoc_STR("Should we trace arcs, or just lines?") },
-
- { "should_start_context", T_OBJECT, offsetof(CTracer, should_start_context), 0,
- PyDoc_STR("Function for starting contexts.") },
-
- { "switch_context", T_OBJECT, offsetof(CTracer, switch_context), 0,
- PyDoc_STR("Function for switching to a new context.") },
-
- { "disable_plugin", T_OBJECT, offsetof(CTracer, disable_plugin), 0,
- PyDoc_STR("Function for disabling a plugin.") },
-
- { NULL }
-};
-
-static PyMethodDef
-CTracer_methods[] = {
- { "start", (PyCFunction) CTracer_start, METH_VARARGS,
- PyDoc_STR("Start the tracer") },
-
- { "stop", (PyCFunction) CTracer_stop, METH_VARARGS,
- PyDoc_STR("Stop the tracer") },
-
- { "get_stats", (PyCFunction) CTracer_get_stats, METH_VARARGS,
- PyDoc_STR("Get statistics about the tracing") },
-
- { "activity", (PyCFunction) CTracer_activity, METH_VARARGS,
- PyDoc_STR("Has there been any activity?") },
-
- { "reset_activity", (PyCFunction) CTracer_reset_activity, METH_VARARGS,
- PyDoc_STR("Reset the activity flag") },
-
- { NULL }
-};
-
-PyTypeObject
-CTracerType = {
- MyType_HEAD_INIT
- "coverage.CTracer", /*tp_name*/
- sizeof(CTracer), /*tp_basicsize*/
- 0, /*tp_itemsize*/
- (destructor)CTracer_dealloc, /*tp_dealloc*/
- 0, /*tp_print*/
- 0, /*tp_getattr*/
- 0, /*tp_setattr*/
- 0, /*tp_compare*/
- 0, /*tp_repr*/
- 0, /*tp_as_number*/
- 0, /*tp_as_sequence*/
- 0, /*tp_as_mapping*/
- 0, /*tp_hash */
- (ternaryfunc)CTracer_call, /*tp_call*/
- 0, /*tp_str*/
- 0, /*tp_getattro*/
- 0, /*tp_setattro*/
- 0, /*tp_as_buffer*/
- Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, /*tp_flags*/
- "CTracer objects", /* tp_doc */
- 0, /* tp_traverse */
- 0, /* tp_clear */
- 0, /* tp_richcompare */
- 0, /* tp_weaklistoffset */
- 0, /* tp_iter */
- 0, /* tp_iternext */
- CTracer_methods, /* tp_methods */
- CTracer_members, /* tp_members */
- 0, /* tp_getset */
- 0, /* tp_base */
- 0, /* tp_dict */
- 0, /* tp_descr_get */
- 0, /* tp_descr_set */
- 0, /* tp_dictoffset */
- (initproc)CTracer_init, /* tp_init */
- 0, /* tp_alloc */
- 0, /* tp_new */
-};
diff --git a/contrib/python/coverage/py3/coverage/ctracer/tracer.h b/contrib/python/coverage/py3/coverage/ctracer/tracer.h
deleted file mode 100644
index 8994a9e3d6..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/tracer.h
+++ /dev/null
@@ -1,75 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_TRACER_H
-#define _COVERAGE_TRACER_H
-
-#include "util.h"
-#include "structmember.h"
-#include "frameobject.h"
-#include "opcode.h"
-
-#include "datastack.h"
-
-/* The CTracer type. */
-
-typedef struct CTracer {
- PyObject_HEAD
-
- /* Python objects manipulated directly by the Collector class. */
- PyObject * should_trace;
- PyObject * check_include;
- PyObject * warn;
- PyObject * concur_id_func;
- PyObject * data;
- PyObject * file_tracers;
- PyObject * should_trace_cache;
- PyObject * trace_arcs;
- PyObject * should_start_context;
- PyObject * switch_context;
- PyObject * disable_plugin;
-
- /* Has the tracer been started? */
- BOOL started;
- /* Are we tracing arcs, or just lines? */
- BOOL tracing_arcs;
- /* Have we had any activity? */
- BOOL activity;
- /* The current dynamic context. */
- PyObject * context;
-
- /*
- The data stack is a stack of dictionaries. Each dictionary collects
- data for a single source file. The data stack parallels the call stack:
- each call pushes the new frame's file data onto the data stack, and each
- return pops file data off.
-
- The file data is a dictionary whose form depends on the tracing options.
- If tracing arcs, the keys are line number pairs. If not tracing arcs,
- the keys are line numbers. In both cases, the value is irrelevant
- (None).
- */
-
- DataStack data_stack; /* Used if we aren't doing concurrency. */
-
- PyObject * data_stack_index; /* Used if we are doing concurrency. */
- DataStack * data_stacks;
- int data_stacks_alloc;
- int data_stacks_used;
- DataStack * pdata_stack;
-
- /* The current file's data stack entry. */
- DataStackEntry * pcur_entry;
-
- /* The parent frame for the last exception event, to fix missing returns. */
- PyFrameObject * last_exc_back;
- int last_exc_firstlineno;
-
- Stats stats;
-} CTracer;
-
-int CTracer_intern_strings(void);
-
-extern PyTypeObject CTracerType;
-
-#endif /* _COVERAGE_TRACER_H */
diff --git a/contrib/python/coverage/py3/coverage/ctracer/util.h b/contrib/python/coverage/py3/coverage/ctracer/util.h
deleted file mode 100644
index 973672db02..0000000000
--- a/contrib/python/coverage/py3/coverage/ctracer/util.h
+++ /dev/null
@@ -1,103 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-#ifndef _COVERAGE_UTIL_H
-#define _COVERAGE_UTIL_H
-
-#include <Python.h>
-
-/* Compile-time debugging helpers */
-#undef WHAT_LOG /* Define to log the WHAT params in the trace function. */
-#undef TRACE_LOG /* Define to log our bookkeeping. */
-#undef COLLECT_STATS /* Collect counters: stats are printed when tracer is stopped. */
-#undef DO_NOTHING /* Define this to make the tracer do nothing. */
-
-#if PY_VERSION_HEX >= 0x030B00A0
-// 3.11 moved f_lasti into an internal structure. This is totally the wrong way
-// to make this work, but it's all I've got until https://bugs.python.org/issue40421
-// is resolved.
-#include <internal/pycore_frame.h>
-#if PY_VERSION_HEX >= 0x030B00A7
-#define MyFrame_GetLasti(f) (PyFrame_GetLasti(f))
-#else
-#define MyFrame_GetLasti(f) ((f)->f_frame->f_lasti * 2)
-#endif
-#elif PY_VERSION_HEX >= 0x030A00A7
-// The f_lasti field changed meaning in 3.10.0a7. It had been bytes, but
-// now is instructions, so we need to adjust it to use it as a byte index.
-#define MyFrame_GetLasti(f) ((f)->f_lasti * 2)
-#else
-#define MyFrame_GetLasti(f) ((f)->f_lasti)
-#endif
-
-// Access f_code should be done through a helper starting in 3.9.
-#if PY_VERSION_HEX >= 0x03090000
-#define MyFrame_GetCode(f) (PyFrame_GetCode(f))
-#else
-#define MyFrame_GetCode(f) ((f)->f_code)
-#endif
-
-#if PY_VERSION_HEX >= 0x030B00B1
-#define MyCode_GetCode(co) (PyCode_GetCode(co))
-#define MyCode_FreeCode(code) Py_XDECREF(code)
-#elif PY_VERSION_HEX >= 0x030B00A7
-#define MyCode_GetCode(co) (PyObject_GetAttrString((PyObject *)(co), "co_code"))
-#define MyCode_FreeCode(code) Py_XDECREF(code)
-#else
-#define MyCode_GetCode(co) ((co)->co_code)
-#define MyCode_FreeCode(code)
-#endif
-
-/* Py 2.x and 3.x compatibility */
-
-#if PY_MAJOR_VERSION >= 3
-
-#define MyText_Type PyUnicode_Type
-#define MyText_AS_BYTES(o) PyUnicode_AsASCIIString(o)
-#define MyBytes_GET_SIZE(o) PyBytes_GET_SIZE(o)
-#define MyBytes_AS_STRING(o) PyBytes_AS_STRING(o)
-#define MyText_AsString(o) PyUnicode_AsUTF8(o)
-#define MyText_FromFormat PyUnicode_FromFormat
-#define MyInt_FromInt(i) PyLong_FromLong((long)i)
-#define MyInt_AsInt(o) (int)PyLong_AsLong(o)
-#define MyText_InternFromString(s) PyUnicode_InternFromString(s)
-
-#define MyType_HEAD_INIT PyVarObject_HEAD_INIT(NULL, 0)
-
-#else
-
-#define MyText_Type PyString_Type
-#define MyText_AS_BYTES(o) (Py_INCREF(o), o)
-#define MyBytes_GET_SIZE(o) PyString_GET_SIZE(o)
-#define MyBytes_AS_STRING(o) PyString_AS_STRING(o)
-#define MyText_AsString(o) PyString_AsString(o)
-#define MyText_FromFormat PyUnicode_FromFormat
-#define MyInt_FromInt(i) PyInt_FromLong((long)i)
-#define MyInt_AsInt(o) (int)PyInt_AsLong(o)
-#define MyText_InternFromString(s) PyString_InternFromString(s)
-
-#define MyType_HEAD_INIT PyObject_HEAD_INIT(NULL) 0,
-
-#endif /* Py3k */
-
-// Undocumented, and not in all 2.7.x, so our own copy of it.
-#define My_XSETREF(op, op2) \
- do { \
- PyObject *_py_tmp = (PyObject *)(op); \
- (op) = (op2); \
- Py_XDECREF(_py_tmp); \
- } while (0)
-
-/* The values returned to indicate ok or error. */
-#define RET_OK 0
-#define RET_ERROR -1
-
-/* Nicer booleans */
-typedef int BOOL;
-#define FALSE 0
-#define TRUE 1
-
-/* Only for extreme machete-mode debugging! */
-#define CRASH { printf("*** CRASH! ***\n"); *((int*)1) = 1; }
-
-#endif /* _COVERAGE_UTIL_H */
diff --git a/contrib/python/coverage/py3/coverage/data.py b/contrib/python/coverage/py3/coverage/data.py
deleted file mode 100644
index 5dd1dfe3f0..0000000000
--- a/contrib/python/coverage/py3/coverage/data.py
+++ /dev/null
@@ -1,125 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Coverage data for coverage.py.
-
-This file had the 4.x JSON data support, which is now gone. This file still
-has storage-agnostic helpers, and is kept to avoid changing too many imports.
-CoverageData is now defined in sqldata.py, and imported here to keep the
-imports working.
-
-"""
-
-import glob
-import os.path
-
-from coverage.misc import CoverageException, file_be_gone
-from coverage.sqldata import CoverageData
-
-
-def line_counts(data, fullpath=False):
- """Return a dict summarizing the line coverage data.
-
- Keys are based on the file names, and values are the number of executed
- lines. If `fullpath` is true, then the keys are the full pathnames of
- the files, otherwise they are the basenames of the files.
-
- Returns a dict mapping file names to counts of lines.
-
- """
- summ = {}
- if fullpath:
- filename_fn = lambda f: f
- else:
- filename_fn = os.path.basename
- for filename in data.measured_files():
- summ[filename_fn(filename)] = len(data.lines(filename))
- return summ
-
-
-def add_data_to_hash(data, filename, hasher):
- """Contribute `filename`'s data to the `hasher`.
-
- `hasher` is a `coverage.misc.Hasher` instance to be updated with
- the file's data. It should only get the results data, not the run
- data.
-
- """
- if data.has_arcs():
- hasher.update(sorted(data.arcs(filename) or []))
- else:
- hasher.update(sorted(data.lines(filename) or []))
- hasher.update(data.file_tracer(filename))
-
-
-def combine_parallel_data(data, aliases=None, data_paths=None, strict=False, keep=False):
- """Combine a number of data files together.
-
- Treat `data.filename` as a file prefix, and combine the data from all
- of the data files starting with that prefix plus a dot.
-
- If `aliases` is provided, it's a `PathAliases` object that is used to
- re-map paths to match the local machine's.
-
- If `data_paths` is provided, it is a list of directories or files to
- combine. Directories are searched for files that start with
- `data.filename` plus dot as a prefix, and those files are combined.
-
- If `data_paths` is not provided, then the directory portion of
- `data.filename` is used as the directory to search for data files.
-
- Unless `keep` is True every data file found and combined is then deleted from disk. If a file
- cannot be read, a warning will be issued, and the file will not be
- deleted.
-
- If `strict` is true, and no files are found to combine, an error is
- raised.
-
- """
- # Because of the os.path.abspath in the constructor, data_dir will
- # never be an empty string.
- data_dir, local = os.path.split(data.base_filename())
- localdot = local + '.*'
-
- data_paths = data_paths or [data_dir]
- files_to_combine = []
- for p in data_paths:
- if os.path.isfile(p):
- files_to_combine.append(os.path.abspath(p))
- elif os.path.isdir(p):
- pattern = os.path.join(os.path.abspath(p), localdot)
- files_to_combine.extend(glob.glob(pattern))
- else:
- raise CoverageException("Couldn't combine from non-existent path '%s'" % (p,))
-
- if strict and not files_to_combine:
- raise CoverageException("No data to combine")
-
- files_combined = 0
- for f in files_to_combine:
- if f == data.data_filename():
- # Sometimes we are combining into a file which is one of the
- # parallel files. Skip that file.
- if data._debug.should('dataio'):
- data._debug.write("Skipping combining ourself: %r" % (f,))
- continue
- if data._debug.should('dataio'):
- data._debug.write("Combining data file %r" % (f,))
- try:
- new_data = CoverageData(f, debug=data._debug)
- new_data.read()
- except CoverageException as exc:
- if data._warn:
- # The CoverageException has the file name in it, so just
- # use the message as the warning.
- data._warn(str(exc))
- else:
- data.update(new_data, aliases=aliases)
- files_combined += 1
- if not keep:
- if data._debug.should('dataio'):
- data._debug.write("Deleting combined data file %r" % (f,))
- file_be_gone(f)
-
- if strict and not files_combined:
- raise CoverageException("No usable data files")
diff --git a/contrib/python/coverage/py3/coverage/debug.py b/contrib/python/coverage/py3/coverage/debug.py
deleted file mode 100644
index 194f16f50d..0000000000
--- a/contrib/python/coverage/py3/coverage/debug.py
+++ /dev/null
@@ -1,406 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Control of and utilities for debugging."""
-
-import contextlib
-import functools
-import inspect
-import itertools
-import os
-import pprint
-import sys
-try:
- import _thread
-except ImportError:
- import thread as _thread
-
-from coverage.backward import reprlib, StringIO
-from coverage.misc import isolate_module
-
-os = isolate_module(os)
-
-
-# When debugging, it can be helpful to force some options, especially when
-# debugging the configuration mechanisms you usually use to control debugging!
-# This is a list of forced debugging options.
-FORCED_DEBUG = []
-FORCED_DEBUG_FILE = None
-
-
-class DebugControl(object):
- """Control and output for debugging."""
-
- show_repr_attr = False # For SimpleReprMixin
-
- def __init__(self, options, output):
- """Configure the options and output file for debugging."""
- self.options = list(options) + FORCED_DEBUG
- self.suppress_callers = False
-
- filters = []
- if self.should('pid'):
- filters.append(add_pid_and_tid)
- self.output = DebugOutputFile.get_one(
- output,
- show_process=self.should('process'),
- filters=filters,
- )
- self.raw_output = self.output.outfile
-
- def __repr__(self):
- return "<DebugControl options=%r raw_output=%r>" % (self.options, self.raw_output)
-
- def should(self, option):
- """Decide whether to output debug information in category `option`."""
- if option == "callers" and self.suppress_callers:
- return False
- return (option in self.options)
-
- @contextlib.contextmanager
- def without_callers(self):
- """A context manager to prevent call stacks from being logged."""
- old = self.suppress_callers
- self.suppress_callers = True
- try:
- yield
- finally:
- self.suppress_callers = old
-
- def write(self, msg):
- """Write a line of debug output.
-
- `msg` is the line to write. A newline will be appended.
-
- """
- self.output.write(msg+"\n")
- if self.should('self'):
- caller_self = inspect.stack()[1][0].f_locals.get('self')
- if caller_self is not None:
- self.output.write("self: {!r}\n".format(caller_self))
- if self.should('callers'):
- dump_stack_frames(out=self.output, skip=1)
- self.output.flush()
-
-
-class DebugControlString(DebugControl):
- """A `DebugControl` that writes to a StringIO, for testing."""
- def __init__(self, options):
- super(DebugControlString, self).__init__(options, StringIO())
-
- def get_output(self):
- """Get the output text from the `DebugControl`."""
- return self.raw_output.getvalue()
-
-
-class NoDebugging(object):
- """A replacement for DebugControl that will never try to do anything."""
- def should(self, option): # pylint: disable=unused-argument
- """Should we write debug messages? Never."""
- return False
-
-
-def info_header(label):
- """Make a nice header string."""
- return "--{:-<60s}".format(" "+label+" ")
-
-
-def info_formatter(info):
- """Produce a sequence of formatted lines from info.
-
- `info` is a sequence of pairs (label, data). The produced lines are
- nicely formatted, ready to print.
-
- """
- info = list(info)
- if not info:
- return
- label_len = 30
- assert all(len(l) < label_len for l, _ in info)
- for label, data in info:
- if data == []:
- data = "-none-"
- if isinstance(data, (list, set, tuple)):
- prefix = "%*s:" % (label_len, label)
- for e in data:
- yield "%*s %s" % (label_len+1, prefix, e)
- prefix = ""
- else:
- yield "%*s: %s" % (label_len, label, data)
-
-
-def write_formatted_info(writer, header, info):
- """Write a sequence of (label,data) pairs nicely."""
- writer.write(info_header(header))
- for line in info_formatter(info):
- writer.write(" %s" % line)
-
-
-def short_stack(limit=None, skip=0):
- """Return a string summarizing the call stack.
-
- The string is multi-line, with one line per stack frame. Each line shows
- the function name, the file name, and the line number:
-
- ...
- start_import_stop : /Users/ned/coverage/trunk/tests/coveragetest.py @95
- import_local_file : /Users/ned/coverage/trunk/tests/coveragetest.py @81
- import_local_file : /Users/ned/coverage/trunk/coverage/backward.py @159
- ...
-
- `limit` is the number of frames to include, defaulting to all of them.
-
- `skip` is the number of frames to skip, so that debugging functions can
- call this and not be included in the result.
-
- """
- stack = inspect.stack()[limit:skip:-1]
- return "\n".join("%30s : %s:%d" % (t[3], t[1], t[2]) for t in stack)
-
-
-def dump_stack_frames(limit=None, out=None, skip=0):
- """Print a summary of the stack to stdout, or someplace else."""
- out = out or sys.stdout
- out.write(short_stack(limit=limit, skip=skip+1))
- out.write("\n")
-
-
-def clipped_repr(text, numchars=50):
- """`repr(text)`, but limited to `numchars`."""
- r = reprlib.Repr()
- r.maxstring = numchars
- return r.repr(text)
-
-
-def short_id(id64):
- """Given a 64-bit id, make a shorter 16-bit one."""
- id16 = 0
- for offset in range(0, 64, 16):
- id16 ^= id64 >> offset
- return id16 & 0xFFFF
-
-
-def add_pid_and_tid(text):
- """A filter to add pid and tid to debug messages."""
- # Thread ids are useful, but too long. Make a shorter one.
- tid = "{:04x}".format(short_id(_thread.get_ident()))
- text = "{:5d}.{}: {}".format(os.getpid(), tid, text)
- return text
-
-
-class SimpleReprMixin(object):
- """A mixin implementing a simple __repr__."""
- simple_repr_ignore = ['simple_repr_ignore', '$coverage.object_id']
-
- def __repr__(self):
- show_attrs = (
- (k, v) for k, v in self.__dict__.items()
- if getattr(v, "show_repr_attr", True)
- and not callable(v)
- and k not in self.simple_repr_ignore
- )
- return "<{klass} @0x{id:x} {attrs}>".format(
- klass=self.__class__.__name__,
- id=id(self),
- attrs=" ".join("{}={!r}".format(k, v) for k, v in show_attrs),
- )
-
-
-def simplify(v): # pragma: debugging
- """Turn things which are nearly dict/list/etc into dict/list/etc."""
- if isinstance(v, dict):
- return {k:simplify(vv) for k, vv in v.items()}
- elif isinstance(v, (list, tuple)):
- return type(v)(simplify(vv) for vv in v)
- elif hasattr(v, "__dict__"):
- return simplify({'.'+k: v for k, v in v.__dict__.items()})
- else:
- return v
-
-
-def pp(v): # pragma: debugging
- """Debug helper to pretty-print data, including SimpleNamespace objects."""
- # Might not be needed in 3.9+
- pprint.pprint(simplify(v))
-
-
-def filter_text(text, filters):
- """Run `text` through a series of filters.
-
- `filters` is a list of functions. Each takes a string and returns a
- string. Each is run in turn.
-
- Returns: the final string that results after all of the filters have
- run.
-
- """
- clean_text = text.rstrip()
- ending = text[len(clean_text):]
- text = clean_text
- for fn in filters:
- lines = []
- for line in text.splitlines():
- lines.extend(fn(line).splitlines())
- text = "\n".join(lines)
- return text + ending
-
-
-class CwdTracker(object): # pragma: debugging
- """A class to add cwd info to debug messages."""
- def __init__(self):
- self.cwd = None
-
- def filter(self, text):
- """Add a cwd message for each new cwd."""
- cwd = os.getcwd()
- if cwd != self.cwd:
- text = "cwd is now {!r}\n".format(cwd) + text
- self.cwd = cwd
- return text
-
-
-class DebugOutputFile(object): # pragma: debugging
- """A file-like object that includes pid and cwd information."""
- def __init__(self, outfile, show_process, filters):
- self.outfile = outfile
- self.show_process = show_process
- self.filters = list(filters)
-
- if self.show_process:
- self.filters.insert(0, CwdTracker().filter)
- self.write("New process: executable: %r\n" % (sys.executable,))
- self.write("New process: cmd: %r\n" % (getattr(sys, 'argv', None),))
- if hasattr(os, 'getppid'):
- self.write("New process: pid: %r, parent pid: %r\n" % (os.getpid(), os.getppid()))
-
- SYS_MOD_NAME = '$coverage.debug.DebugOutputFile.the_one'
-
- @classmethod
- def get_one(cls, fileobj=None, show_process=True, filters=(), interim=False):
- """Get a DebugOutputFile.
-
- If `fileobj` is provided, then a new DebugOutputFile is made with it.
-
- If `fileobj` isn't provided, then a file is chosen
- (COVERAGE_DEBUG_FILE, or stderr), and a process-wide singleton
- DebugOutputFile is made.
-
- `show_process` controls whether the debug file adds process-level
- information, and filters is a list of other message filters to apply.
-
- `filters` are the text filters to apply to the stream to annotate with
- pids, etc.
-
- If `interim` is true, then a future `get_one` can replace this one.
-
- """
- if fileobj is not None:
- # Make DebugOutputFile around the fileobj passed.
- return cls(fileobj, show_process, filters)
-
- # Because of the way igor.py deletes and re-imports modules,
- # this class can be defined more than once. But we really want
- # a process-wide singleton. So stash it in sys.modules instead of
- # on a class attribute. Yes, this is aggressively gross.
- the_one, is_interim = sys.modules.get(cls.SYS_MOD_NAME, (None, True))
- if the_one is None or is_interim:
- if fileobj is None:
- debug_file_name = os.environ.get("COVERAGE_DEBUG_FILE", FORCED_DEBUG_FILE)
- if debug_file_name:
- fileobj = open(debug_file_name, "a")
- else:
- fileobj = sys.stderr
- the_one = cls(fileobj, show_process, filters)
- sys.modules[cls.SYS_MOD_NAME] = (the_one, interim)
- return the_one
-
- def write(self, text):
- """Just like file.write, but filter through all our filters."""
- self.outfile.write(filter_text(text, self.filters))
- self.outfile.flush()
-
- def flush(self):
- """Flush our file."""
- self.outfile.flush()
-
-
-def log(msg, stack=False): # pragma: debugging
- """Write a log message as forcefully as possible."""
- out = DebugOutputFile.get_one(interim=True)
- out.write(msg+"\n")
- if stack:
- dump_stack_frames(out=out, skip=1)
-
-
-def decorate_methods(decorator, butnot=(), private=False): # pragma: debugging
- """A class decorator to apply a decorator to methods."""
- def _decorator(cls):
- for name, meth in inspect.getmembers(cls, inspect.isroutine):
- if name not in cls.__dict__:
- continue
- if name != "__init__":
- if not private and name.startswith("_"):
- continue
- if name in butnot:
- continue
- setattr(cls, name, decorator(meth))
- return cls
- return _decorator
-
-
-def break_in_pudb(func): # pragma: debugging
- """A function decorator to stop in the debugger for each call."""
- @functools.wraps(func)
- def _wrapper(*args, **kwargs):
- import pudb
- sys.stdout = sys.__stdout__
- pudb.set_trace()
- return func(*args, **kwargs)
- return _wrapper
-
-
-OBJ_IDS = itertools.count()
-CALLS = itertools.count()
-OBJ_ID_ATTR = "$coverage.object_id"
-
-def show_calls(show_args=True, show_stack=False, show_return=False): # pragma: debugging
- """A method decorator to debug-log each call to the function."""
- def _decorator(func):
- @functools.wraps(func)
- def _wrapper(self, *args, **kwargs):
- oid = getattr(self, OBJ_ID_ATTR, None)
- if oid is None:
- oid = "{:08d} {:04d}".format(os.getpid(), next(OBJ_IDS))
- setattr(self, OBJ_ID_ATTR, oid)
- extra = ""
- if show_args:
- eargs = ", ".join(map(repr, args))
- ekwargs = ", ".join("{}={!r}".format(*item) for item in kwargs.items())
- extra += "("
- extra += eargs
- if eargs and ekwargs:
- extra += ", "
- extra += ekwargs
- extra += ")"
- if show_stack:
- extra += " @ "
- extra += "; ".join(_clean_stack_line(l) for l in short_stack().splitlines())
- callid = next(CALLS)
- msg = "{} {:04d} {}{}\n".format(oid, callid, func.__name__, extra)
- DebugOutputFile.get_one(interim=True).write(msg)
- ret = func(self, *args, **kwargs)
- if show_return:
- msg = "{} {:04d} {} return {!r}\n".format(oid, callid, func.__name__, ret)
- DebugOutputFile.get_one(interim=True).write(msg)
- return ret
- return _wrapper
- return _decorator
-
-
-def _clean_stack_line(s): # pragma: debugging
- """Simplify some paths in a stack trace, for compactness."""
- s = s.strip()
- s = s.replace(os.path.dirname(__file__) + '/', '')
- s = s.replace(os.path.dirname(os.__file__) + '/', '')
- s = s.replace(sys.prefix + '/', '')
- return s
diff --git a/contrib/python/coverage/py3/coverage/disposition.py b/contrib/python/coverage/py3/coverage/disposition.py
deleted file mode 100644
index 9b9a997d8a..0000000000
--- a/contrib/python/coverage/py3/coverage/disposition.py
+++ /dev/null
@@ -1,37 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Simple value objects for tracking what to do with files."""
-
-
-class FileDisposition(object):
- """A simple value type for recording what to do with a file."""
- pass
-
-
-# FileDisposition "methods": FileDisposition is a pure value object, so it can
-# be implemented in either C or Python. Acting on them is done with these
-# functions.
-
-def disposition_init(cls, original_filename):
- """Construct and initialize a new FileDisposition object."""
- disp = cls()
- disp.original_filename = original_filename
- disp.canonical_filename = original_filename
- disp.source_filename = None
- disp.trace = False
- disp.reason = ""
- disp.file_tracer = None
- disp.has_dynamic_filename = False
- return disp
-
-
-def disposition_debug_msg(disp):
- """Make a nice debug message of what the FileDisposition is doing."""
- if disp.trace:
- msg = "Tracing %r" % (disp.original_filename,)
- if disp.file_tracer:
- msg += ": will be traced by %r" % disp.file_tracer
- else:
- msg = "Not tracing %r: %s" % (disp.original_filename, disp.reason)
- return msg
diff --git a/contrib/python/coverage/py3/coverage/env.py b/contrib/python/coverage/py3/coverage/env.py
deleted file mode 100644
index ea78a5be89..0000000000
--- a/contrib/python/coverage/py3/coverage/env.py
+++ /dev/null
@@ -1,130 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Determine facts about the environment."""
-
-import os
-import platform
-import sys
-
-# Operating systems.
-WINDOWS = sys.platform == "win32"
-LINUX = sys.platform.startswith("linux")
-
-# Python implementations.
-CPYTHON = (platform.python_implementation() == "CPython")
-PYPY = (platform.python_implementation() == "PyPy")
-JYTHON = (platform.python_implementation() == "Jython")
-IRONPYTHON = (platform.python_implementation() == "IronPython")
-
-# Python versions. We amend version_info with one more value, a zero if an
-# official version, or 1 if built from source beyond an official version.
-PYVERSION = sys.version_info + (int(platform.python_version()[-1] == "+"),)
-PY2 = PYVERSION < (3, 0)
-PY3 = PYVERSION >= (3, 0)
-
-if PYPY:
- PYPYVERSION = sys.pypy_version_info
-
-PYPY2 = PYPY and PY2
-PYPY3 = PYPY and PY3
-
-# Python behavior.
-class PYBEHAVIOR(object):
- """Flags indicating this Python's behavior."""
-
- pep626 = CPYTHON and (PYVERSION > (3, 10, 0, 'alpha', 4))
-
- # Is "if __debug__" optimized away?
- if PYPY3:
- optimize_if_debug = True
- elif PYPY2:
- optimize_if_debug = False
- else:
- optimize_if_debug = not pep626
-
- # Is "if not __debug__" optimized away?
- optimize_if_not_debug = (not PYPY) and (PYVERSION >= (3, 7, 0, 'alpha', 4))
- if pep626:
- optimize_if_not_debug = False
- if PYPY3:
- optimize_if_not_debug = True
-
- # Is "if not __debug__" optimized away even better?
- optimize_if_not_debug2 = (not PYPY) and (PYVERSION >= (3, 8, 0, 'beta', 1))
- if pep626:
- optimize_if_not_debug2 = False
-
- # Do we have yield-from?
- yield_from = (PYVERSION >= (3, 3))
-
- # Do we have PEP 420 namespace packages?
- namespaces_pep420 = (PYVERSION >= (3, 3))
-
- # Do .pyc files have the source file size recorded in them?
- size_in_pyc = (PYVERSION >= (3, 3))
-
- # Do we have async and await syntax?
- async_syntax = (PYVERSION >= (3, 5))
-
- # PEP 448 defined additional unpacking generalizations
- unpackings_pep448 = (PYVERSION >= (3, 5))
-
- # Can co_lnotab have negative deltas?
- negative_lnotab = (PYVERSION >= (3, 6)) and not (PYPY and PYPYVERSION < (7, 2))
-
- # Do .pyc files conform to PEP 552? Hash-based pyc's.
- hashed_pyc_pep552 = (PYVERSION >= (3, 7, 0, 'alpha', 4))
-
- # Python 3.7.0b3 changed the behavior of the sys.path[0] entry for -m. It
- # used to be an empty string (meaning the current directory). It changed
- # to be the actual path to the current directory, so that os.chdir wouldn't
- # affect the outcome.
- actual_syspath0_dash_m = CPYTHON and (PYVERSION >= (3, 7, 0, 'beta', 3))
-
- # 3.7 changed how functions with only docstrings are numbered.
- docstring_only_function = (not PYPY) and ((3, 7, 0, 'beta', 5) <= PYVERSION <= (3, 10))
-
- # When a break/continue/return statement in a try block jumps to a finally
- # block, does the finally block do the break/continue/return (pre-3.8), or
- # does the finally jump back to the break/continue/return (3.8) to do the
- # work?
- finally_jumps_back = ((3, 8) <= PYVERSION < (3, 10))
-
- # When a function is decorated, does the trace function get called for the
- # @-line and also the def-line (new behavior in 3.8)? Or just the @-line
- # (old behavior)?
- trace_decorated_def = (PYVERSION >= (3, 8))
-
- # Are while-true loops optimized into absolute jumps with no loop setup?
- nix_while_true = (PYVERSION >= (3, 8))
-
- # Python 3.9a1 made sys.argv[0] and other reported files absolute paths.
- report_absolute_files = (PYVERSION >= (3, 9))
-
- # Lines after break/continue/return/raise are no longer compiled into the
- # bytecode. They used to be marked as missing, now they aren't executable.
- omit_after_jump = pep626
-
- # PyPy has always omitted statements after return.
- omit_after_return = omit_after_jump or PYPY
-
- # Modules used to have firstlineno equal to the line number of the first
- # real line of code. Now they always start at 1.
- module_firstline_1 = pep626
-
- # Are "if 0:" lines (and similar) kept in the compiled code?
- keep_constant_test = pep626
-
-# Coverage.py specifics.
-
-# Are we using the C-implemented trace function?
-C_TRACER = os.getenv('COVERAGE_TEST_TRACER', 'c') == 'c'
-
-# Are we coverage-measuring ourselves?
-METACOV = os.getenv('COVERAGE_COVERAGE', '') != ''
-
-# Are we running our test suite?
-# Even when running tests, you can use COVERAGE_TESTING=0 to disable the
-# test-specific behavior like contracts.
-TESTING = os.getenv('COVERAGE_TESTING', '') == 'True'
diff --git a/contrib/python/coverage/py3/coverage/execfile.py b/contrib/python/coverage/py3/coverage/execfile.py
deleted file mode 100644
index 29409d517a..0000000000
--- a/contrib/python/coverage/py3/coverage/execfile.py
+++ /dev/null
@@ -1,362 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Execute files of Python code."""
-
-import inspect
-import marshal
-import os
-import struct
-import sys
-import types
-
-from coverage import env
-from coverage.backward import BUILTINS
-from coverage.backward import PYC_MAGIC_NUMBER, imp, importlib_util_find_spec
-from coverage.files import canonical_filename, python_reported_file
-from coverage.misc import CoverageException, ExceptionDuringRun, NoCode, NoSource, isolate_module
-from coverage.phystokens import compile_unicode
-from coverage.python import get_python_source
-
-os = isolate_module(os)
-
-
-class DummyLoader(object):
- """A shim for the pep302 __loader__, emulating pkgutil.ImpLoader.
-
- Currently only implements the .fullname attribute
- """
- def __init__(self, fullname, *_args):
- self.fullname = fullname
-
-
-if importlib_util_find_spec:
- def find_module(modulename):
- """Find the module named `modulename`.
-
- Returns the file path of the module, the name of the enclosing
- package, and the spec.
- """
- try:
- spec = importlib_util_find_spec(modulename)
- except ImportError as err:
- raise NoSource(str(err))
- if not spec:
- raise NoSource("No module named %r" % (modulename,))
- pathname = spec.origin
- packagename = spec.name
- if spec.submodule_search_locations:
- mod_main = modulename + ".__main__"
- spec = importlib_util_find_spec(mod_main)
- if not spec:
- raise NoSource(
- "No module named %s; "
- "%r is a package and cannot be directly executed"
- % (mod_main, modulename)
- )
- pathname = spec.origin
- packagename = spec.name
- packagename = packagename.rpartition(".")[0]
- return pathname, packagename, spec
-else:
- def find_module(modulename):
- """Find the module named `modulename`.
-
- Returns the file path of the module, the name of the enclosing
- package, and None (where a spec would have been).
- """
- openfile = None
- glo, loc = globals(), locals()
- try:
- # Search for the module - inside its parent package, if any - using
- # standard import mechanics.
- if '.' in modulename:
- packagename, name = modulename.rsplit('.', 1)
- package = __import__(packagename, glo, loc, ['__path__'])
- searchpath = package.__path__
- else:
- packagename, name = None, modulename
- searchpath = None # "top-level search" in imp.find_module()
- openfile, pathname, _ = imp.find_module(name, searchpath)
-
- # Complain if this is a magic non-file module.
- if openfile is None and pathname is None:
- raise NoSource(
- "module does not live in a file: %r" % modulename
- )
-
- # If `modulename` is actually a package, not a mere module, then we
- # pretend to be Python 2.7 and try running its __main__.py script.
- if openfile is None:
- packagename = modulename
- name = '__main__'
- package = __import__(packagename, glo, loc, ['__path__'])
- searchpath = package.__path__
- openfile, pathname, _ = imp.find_module(name, searchpath)
- except ImportError as err:
- raise NoSource(str(err))
- finally:
- if openfile:
- openfile.close()
-
- return pathname, packagename, None
-
-
-class PyRunner(object):
- """Multi-stage execution of Python code.
-
- This is meant to emulate real Python execution as closely as possible.
-
- """
- def __init__(self, args, as_module=False):
- self.args = args
- self.as_module = as_module
-
- self.arg0 = args[0]
- self.package = self.modulename = self.pathname = self.loader = self.spec = None
-
- def prepare(self):
- """Set sys.path properly.
-
- This needs to happen before any importing, and without importing anything.
- """
- if self.as_module:
- if env.PYBEHAVIOR.actual_syspath0_dash_m:
- path0 = os.getcwd()
- else:
- path0 = ""
- elif os.path.isdir(self.arg0):
- # Running a directory means running the __main__.py file in that
- # directory.
- path0 = self.arg0
- else:
- path0 = os.path.abspath(os.path.dirname(self.arg0))
-
- if os.path.isdir(sys.path[0]):
- # sys.path fakery. If we are being run as a command, then sys.path[0]
- # is the directory of the "coverage" script. If this is so, replace
- # sys.path[0] with the directory of the file we're running, or the
- # current directory when running modules. If it isn't so, then we
- # don't know what's going on, and just leave it alone.
- top_file = inspect.stack()[-1][0].f_code.co_filename
- sys_path_0_abs = os.path.abspath(sys.path[0])
- top_file_dir_abs = os.path.abspath(os.path.dirname(top_file))
- sys_path_0_abs = canonical_filename(sys_path_0_abs)
- top_file_dir_abs = canonical_filename(top_file_dir_abs)
- if sys_path_0_abs != top_file_dir_abs:
- path0 = None
-
- else:
- # sys.path[0] is a file. Is the next entry the directory containing
- # that file?
- if sys.path[1] == os.path.dirname(sys.path[0]):
- # Can it be right to always remove that?
- del sys.path[1]
-
- if path0 is not None:
- sys.path[0] = python_reported_file(path0)
-
- def _prepare2(self):
- """Do more preparation to run Python code.
-
- Includes finding the module to run and adjusting sys.argv[0].
- This method is allowed to import code.
-
- """
- if self.as_module:
- self.modulename = self.arg0
- pathname, self.package, self.spec = find_module(self.modulename)
- if self.spec is not None:
- self.modulename = self.spec.name
- self.loader = DummyLoader(self.modulename)
- self.pathname = os.path.abspath(pathname)
- self.args[0] = self.arg0 = self.pathname
- elif os.path.isdir(self.arg0):
- # Running a directory means running the __main__.py file in that
- # directory.
- for ext in [".py", ".pyc", ".pyo"]:
- try_filename = os.path.join(self.arg0, "__main__" + ext)
- if os.path.exists(try_filename):
- self.arg0 = try_filename
- break
- else:
- raise NoSource("Can't find '__main__' module in '%s'" % self.arg0)
-
- if env.PY2:
- self.arg0 = os.path.abspath(self.arg0)
-
- # Make a spec. I don't know if this is the right way to do it.
- try:
- import importlib.machinery
- except ImportError:
- pass
- else:
- try_filename = python_reported_file(try_filename)
- self.spec = importlib.machinery.ModuleSpec("__main__", None, origin=try_filename)
- self.spec.has_location = True
- self.package = ""
- self.loader = DummyLoader("__main__")
- else:
- if env.PY3:
- self.loader = DummyLoader("__main__")
-
- self.arg0 = python_reported_file(self.arg0)
-
- def run(self):
- """Run the Python code!"""
-
- self._prepare2()
-
- # Create a module to serve as __main__
- main_mod = types.ModuleType('__main__')
-
- from_pyc = self.arg0.endswith((".pyc", ".pyo"))
- main_mod.__file__ = self.arg0
- if from_pyc:
- main_mod.__file__ = main_mod.__file__[:-1]
- if self.package is not None:
- main_mod.__package__ = self.package
- main_mod.__loader__ = self.loader
- if self.spec is not None:
- main_mod.__spec__ = self.spec
-
- main_mod.__builtins__ = BUILTINS
-
- sys.modules['__main__'] = main_mod
-
- # Set sys.argv properly.
- sys.argv = self.args
-
- try:
- # Make a code object somehow.
- if from_pyc:
- code = make_code_from_pyc(self.arg0)
- else:
- code = make_code_from_py(self.arg0)
- except CoverageException:
- raise
- except Exception as exc:
- msg = "Couldn't run '{filename}' as Python code: {exc.__class__.__name__}: {exc}"
- raise CoverageException(msg.format(filename=self.arg0, exc=exc))
-
- # Execute the code object.
- # Return to the original directory in case the test code exits in
- # a non-existent directory.
- cwd = os.getcwd()
- try:
- exec(code, main_mod.__dict__)
- except SystemExit: # pylint: disable=try-except-raise
- # The user called sys.exit(). Just pass it along to the upper
- # layers, where it will be handled.
- raise
- except Exception:
- # Something went wrong while executing the user code.
- # Get the exc_info, and pack them into an exception that we can
- # throw up to the outer loop. We peel one layer off the traceback
- # so that the coverage.py code doesn't appear in the final printed
- # traceback.
- typ, err, tb = sys.exc_info()
-
- # PyPy3 weirdness. If I don't access __context__, then somehow it
- # is non-None when the exception is reported at the upper layer,
- # and a nested exception is shown to the user. This getattr fixes
- # it somehow? https://bitbucket.org/pypy/pypy/issue/1903
- getattr(err, '__context__', None)
-
- # Call the excepthook.
- try:
- if hasattr(err, "__traceback__"):
- err.__traceback__ = err.__traceback__.tb_next
- sys.excepthook(typ, err, tb.tb_next)
- except SystemExit: # pylint: disable=try-except-raise
- raise
- except Exception:
- # Getting the output right in the case of excepthook
- # shenanigans is kind of involved.
- sys.stderr.write("Error in sys.excepthook:\n")
- typ2, err2, tb2 = sys.exc_info()
- err2.__suppress_context__ = True
- if hasattr(err2, "__traceback__"):
- err2.__traceback__ = err2.__traceback__.tb_next
- sys.__excepthook__(typ2, err2, tb2.tb_next)
- sys.stderr.write("\nOriginal exception was:\n")
- raise ExceptionDuringRun(typ, err, tb.tb_next)
- else:
- sys.exit(1)
- finally:
- os.chdir(cwd)
-
-
-def run_python_module(args):
- """Run a Python module, as though with ``python -m name args...``.
-
- `args` is the argument array to present as sys.argv, including the first
- element naming the module being executed.
-
- This is a helper for tests, to encapsulate how to use PyRunner.
-
- """
- runner = PyRunner(args, as_module=True)
- runner.prepare()
- runner.run()
-
-
-def run_python_file(args):
- """Run a Python file as if it were the main program on the command line.
-
- `args` is the argument array to present as sys.argv, including the first
- element naming the file being executed. `package` is the name of the
- enclosing package, if any.
-
- This is a helper for tests, to encapsulate how to use PyRunner.
-
- """
- runner = PyRunner(args, as_module=False)
- runner.prepare()
- runner.run()
-
-
-def make_code_from_py(filename):
- """Get source from `filename` and make a code object of it."""
- # Open the source file.
- try:
- source = get_python_source(filename)
- except (IOError, NoSource):
- raise NoSource("No file to run: '%s'" % filename)
-
- code = compile_unicode(source, filename, "exec")
- return code
-
-
-def make_code_from_pyc(filename):
- """Get a code object from a .pyc file."""
- try:
- fpyc = open(filename, "rb")
- except IOError:
- raise NoCode("No file to run: '%s'" % filename)
-
- with fpyc:
- # First four bytes are a version-specific magic number. It has to
- # match or we won't run the file.
- magic = fpyc.read(4)
- if magic != PYC_MAGIC_NUMBER:
- raise NoCode("Bad magic number in .pyc file: {} != {}".format(magic, PYC_MAGIC_NUMBER))
-
- date_based = True
- if env.PYBEHAVIOR.hashed_pyc_pep552:
- flags = struct.unpack('<L', fpyc.read(4))[0]
- hash_based = flags & 0x01
- if hash_based:
- fpyc.read(8) # Skip the hash.
- date_based = False
- if date_based:
- # Skip the junk in the header that we don't need.
- fpyc.read(4) # Skip the moddate.
- if env.PYBEHAVIOR.size_in_pyc:
- # 3.3 added another long to the header (size), skip it.
- fpyc.read(4)
-
- # The rest of the file is the code object we want.
- code = marshal.load(fpyc)
-
- return code
diff --git a/contrib/python/coverage/py3/coverage/files.py b/contrib/python/coverage/py3/coverage/files.py
deleted file mode 100644
index 5133ad07f3..0000000000
--- a/contrib/python/coverage/py3/coverage/files.py
+++ /dev/null
@@ -1,441 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""File wrangling."""
-
-import hashlib
-import fnmatch
-import ntpath
-import os
-import os.path
-import posixpath
-import re
-import sys
-
-from coverage import env
-from coverage.backward import unicode_class
-from coverage.misc import contract, CoverageException, join_regex, isolate_module
-
-
-os = isolate_module(os)
-
-
-def set_relative_directory():
- """Set the directory that `relative_filename` will be relative to."""
- global RELATIVE_DIR, CANONICAL_FILENAME_CACHE
-
- # The absolute path to our current directory.
- RELATIVE_DIR = os.path.normcase(abs_file(os.curdir) + os.sep)
-
- # Cache of results of calling the canonical_filename() method, to
- # avoid duplicating work.
- CANONICAL_FILENAME_CACHE = {}
-
-
-def relative_directory():
- """Return the directory that `relative_filename` is relative to."""
- return RELATIVE_DIR
-
-
-@contract(returns='unicode')
-def relative_filename(filename):
- """Return the relative form of `filename`.
-
- The file name will be relative to the current directory when the
- `set_relative_directory` was called.
-
- """
- fnorm = os.path.normcase(filename)
- if fnorm.startswith(RELATIVE_DIR):
- filename = filename[len(RELATIVE_DIR):]
- return unicode_filename(filename)
-
-
-@contract(returns='unicode')
-def canonical_filename(filename):
- """Return a canonical file name for `filename`.
-
- An absolute path with no redundant components and normalized case.
-
- """
- if filename not in CANONICAL_FILENAME_CACHE:
- cf = filename
- if not os.path.isabs(filename):
- for path in [os.curdir] + sys.path:
- if path is None:
- continue
- f = os.path.join(path, filename)
- try:
- exists = os.path.exists(f)
- except UnicodeError:
- exists = False
- if exists:
- cf = f
- break
- cf = abs_file(cf)
- CANONICAL_FILENAME_CACHE[filename] = cf
- return CANONICAL_FILENAME_CACHE[filename]
-
-if getattr(sys, 'is_standalone_binary', False):
- # filename for py files in binary is already canonical,
- # it's relative to the arcadia root
- def canonical_filename(filename):
- # next assert does not needed in case when we load coverage from not arcadia source in arcadia binary
- # assert not filename.startswith("/"), filename
- return filename
-
-MAX_FLAT = 200
-
-@contract(filename='unicode', returns='unicode')
-def flat_rootname(filename):
- """A base for a flat file name to correspond to this file.
-
- Useful for writing files about the code where you want all the files in
- the same directory, but need to differentiate same-named files from
- different directories.
-
- For example, the file a/b/c.py will return 'a_b_c_py'
-
- """
- name = ntpath.splitdrive(filename)[1]
- name = re.sub(r"[\\/.:]", "_", name)
- if len(name) > MAX_FLAT:
- h = hashlib.sha1(name.encode('UTF-8')).hexdigest()
- name = name[-(MAX_FLAT-len(h)-1):] + '_' + h
- return name
-
-
-if env.WINDOWS:
-
- _ACTUAL_PATH_CACHE = {}
- _ACTUAL_PATH_LIST_CACHE = {}
-
- def actual_path(path):
- """Get the actual path of `path`, including the correct case."""
- if env.PY2 and isinstance(path, unicode_class):
- path = path.encode(sys.getfilesystemencoding())
- if path in _ACTUAL_PATH_CACHE:
- return _ACTUAL_PATH_CACHE[path]
-
- head, tail = os.path.split(path)
- if not tail:
- # This means head is the drive spec: normalize it.
- actpath = head.upper()
- elif not head:
- actpath = tail
- else:
- head = actual_path(head)
- if head in _ACTUAL_PATH_LIST_CACHE:
- files = _ACTUAL_PATH_LIST_CACHE[head]
- else:
- try:
- files = os.listdir(head)
- except Exception:
- # This will raise OSError, or this bizarre TypeError:
- # https://bugs.python.org/issue1776160
- files = []
- _ACTUAL_PATH_LIST_CACHE[head] = files
- normtail = os.path.normcase(tail)
- for f in files:
- if os.path.normcase(f) == normtail:
- tail = f
- break
- actpath = os.path.join(head, tail)
- _ACTUAL_PATH_CACHE[path] = actpath
- return actpath
-
-else:
- def actual_path(filename):
- """The actual path for non-Windows platforms."""
- return filename
-
-
-if env.PY2:
- @contract(returns='unicode')
- def unicode_filename(filename):
- """Return a Unicode version of `filename`."""
- if isinstance(filename, str):
- encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
- filename = filename.decode(encoding, "replace")
- return filename
-else:
- @contract(filename='unicode', returns='unicode')
- def unicode_filename(filename):
- """Return a Unicode version of `filename`."""
- return filename
-
-
-@contract(returns='unicode')
-def abs_file(path):
- """Return the absolute normalized form of `path`."""
- try:
- path = os.path.realpath(path)
- except UnicodeError:
- pass
- path = os.path.abspath(path)
- path = actual_path(path)
- path = unicode_filename(path)
- return path
-
-
-def python_reported_file(filename):
- """Return the string as Python would describe this file name."""
- if env.PYBEHAVIOR.report_absolute_files:
- filename = os.path.abspath(filename)
- return filename
-
-
-RELATIVE_DIR = None
-CANONICAL_FILENAME_CACHE = None
-set_relative_directory()
-
-
-def isabs_anywhere(filename):
- """Is `filename` an absolute path on any OS?"""
- return ntpath.isabs(filename) or posixpath.isabs(filename)
-
-
-def prep_patterns(patterns):
- """Prepare the file patterns for use in a `FnmatchMatcher`.
-
- If a pattern starts with a wildcard, it is used as a pattern
- as-is. If it does not start with a wildcard, then it is made
- absolute with the current directory.
-
- If `patterns` is None, an empty list is returned.
-
- """
- prepped = []
- for p in patterns or []:
- if p.startswith(("*", "?")):
- prepped.append(p)
- else:
- prepped.append(abs_file(p))
- return prepped
-
-
-class TreeMatcher(object):
- """A matcher for files in a tree.
-
- Construct with a list of paths, either files or directories. Paths match
- with the `match` method if they are one of the files, or if they are
- somewhere in a subtree rooted at one of the directories.
-
- """
- def __init__(self, paths):
- self.paths = list(paths)
-
- def __repr__(self):
- return "<TreeMatcher %r>" % self.paths
-
- def info(self):
- """A list of strings for displaying when dumping state."""
- return self.paths
-
- def match(self, fpath):
- """Does `fpath` indicate a file in one of our trees?"""
- for p in self.paths:
- if fpath.startswith(p):
- if fpath == p:
- # This is the same file!
- return True
- if fpath[len(p)] == os.sep:
- # This is a file in the directory
- return True
- return False
-
-
-class ModuleMatcher(object):
- """A matcher for modules in a tree."""
- def __init__(self, module_names):
- self.modules = list(module_names)
-
- def __repr__(self):
- return "<ModuleMatcher %r>" % (self.modules)
-
- def info(self):
- """A list of strings for displaying when dumping state."""
- return self.modules
-
- def match(self, module_name):
- """Does `module_name` indicate a module in one of our packages?"""
- if not module_name:
- return False
-
- for m in self.modules:
- if module_name.startswith(m):
- if module_name == m:
- return True
- if module_name[len(m)] == '.':
- # This is a module in the package
- return True
-
- return False
-
-
-class FnmatchMatcher(object):
- """A matcher for files by file name pattern."""
- def __init__(self, pats):
- self.pats = list(pats)
- self.re = fnmatches_to_regex(self.pats, case_insensitive=env.WINDOWS)
-
- def __repr__(self):
- return "<FnmatchMatcher %r>" % self.pats
-
- def info(self):
- """A list of strings for displaying when dumping state."""
- return self.pats
-
- def match(self, fpath):
- """Does `fpath` match one of our file name patterns?"""
- return self.re.match(fpath) is not None
-
-
-def sep(s):
- """Find the path separator used in this string, or os.sep if none."""
- sep_match = re.search(r"[\\/]", s)
- if sep_match:
- the_sep = sep_match.group(0)
- else:
- the_sep = os.sep
- return the_sep
-
-
-def fnmatches_to_regex(patterns, case_insensitive=False, partial=False):
- """Convert fnmatch patterns to a compiled regex that matches any of them.
-
- Slashes are always converted to match either slash or backslash, for
- Windows support, even when running elsewhere.
-
- If `partial` is true, then the pattern will match if the target string
- starts with the pattern. Otherwise, it must match the entire string.
-
- Returns: a compiled regex object. Use the .match method to compare target
- strings.
-
- """
- regexes = (fnmatch.translate(pattern) for pattern in patterns)
- # Python3.7 fnmatch translates "/" as "/". Before that, it translates as "\/",
- # so we have to deal with maybe a backslash.
- regexes = (re.sub(r"\\?/", r"[\\\\/]", regex) for regex in regexes)
-
- if partial:
- # fnmatch always adds a \Z to match the whole string, which we don't
- # want, so we remove the \Z. While removing it, we only replace \Z if
- # followed by paren (introducing flags), or at end, to keep from
- # destroying a literal \Z in the pattern.
- regexes = (re.sub(r'\\Z(\(\?|$)', r'\1', regex) for regex in regexes)
-
- flags = 0
- if case_insensitive:
- flags |= re.IGNORECASE
- compiled = re.compile(join_regex(regexes), flags=flags)
-
- return compiled
-
-
-class PathAliases(object):
- """A collection of aliases for paths.
-
- When combining data files from remote machines, often the paths to source
- code are different, for example, due to OS differences, or because of
- serialized checkouts on continuous integration machines.
-
- A `PathAliases` object tracks a list of pattern/result pairs, and can
- map a path through those aliases to produce a unified path.
-
- """
- def __init__(self):
- self.aliases = []
-
- def pprint(self): # pragma: debugging
- """Dump the important parts of the PathAliases, for debugging."""
- for regex, result in self.aliases:
- print("{!r} --> {!r}".format(regex.pattern, result))
-
- def add(self, pattern, result):
- """Add the `pattern`/`result` pair to the list of aliases.
-
- `pattern` is an `fnmatch`-style pattern. `result` is a simple
- string. When mapping paths, if a path starts with a match against
- `pattern`, then that match is replaced with `result`. This models
- isomorphic source trees being rooted at different places on two
- different machines.
-
- `pattern` can't end with a wildcard component, since that would
- match an entire tree, and not just its root.
-
- """
- pattern_sep = sep(pattern)
-
- if len(pattern) > 1:
- pattern = pattern.rstrip(r"\/")
-
- # The pattern can't end with a wildcard component.
- if pattern.endswith("*"):
- raise CoverageException("Pattern must not end with wildcards.")
-
- # The pattern is meant to match a filepath. Let's make it absolute
- # unless it already is, or is meant to match any prefix.
- if not pattern.startswith('*') and not isabs_anywhere(pattern +
- pattern_sep):
- pattern = abs_file(pattern)
- if not pattern.endswith(pattern_sep):
- pattern += pattern_sep
-
- # Make a regex from the pattern.
- regex = fnmatches_to_regex([pattern], case_insensitive=True, partial=True)
-
- # Normalize the result: it must end with a path separator.
- result_sep = sep(result)
- result = result.rstrip(r"\/") + result_sep
- self.aliases.append((regex, result))
-
- def map(self, path):
- """Map `path` through the aliases.
-
- `path` is checked against all of the patterns. The first pattern to
- match is used to replace the root of the path with the result root.
- Only one pattern is ever used. If no patterns match, `path` is
- returned unchanged.
-
- The separator style in the result is made to match that of the result
- in the alias.
-
- Returns the mapped path. If a mapping has happened, this is a
- canonical path. If no mapping has happened, it is the original value
- of `path` unchanged.
-
- """
- for regex, result in self.aliases:
- m = regex.match(path)
- if m:
- new = path.replace(m.group(0), result)
- new = new.replace(sep(path), sep(result))
- new = canonical_filename(new)
- return new
- return path
-
-
-def find_python_files(dirname):
- """Yield all of the importable Python files in `dirname`, recursively.
-
- To be importable, the files have to be in a directory with a __init__.py,
- except for `dirname` itself, which isn't required to have one. The
- assumption is that `dirname` was specified directly, so the user knows
- best, but sub-directories are checked for a __init__.py to be sure we only
- find the importable files.
-
- """
- for i, (dirpath, dirnames, filenames) in enumerate(os.walk(dirname)):
- if i > 0 and '__init__.py' not in filenames:
- # If a directory doesn't have __init__.py, then it isn't
- # importable and neither are its files
- del dirnames[:]
- continue
- for filename in filenames:
- # We're only interested in files that look like reasonable Python
- # files: Must end with .py or .pyw, and must not have certain funny
- # characters that probably mean they are editor junk.
- if re.match(r"^[^.#~!$@%^&*()+=,]+\.pyw?$", filename):
- yield os.path.join(dirpath, filename)
diff --git a/contrib/python/coverage/py3/coverage/fullcoverage/encodings.py b/contrib/python/coverage/py3/coverage/fullcoverage/encodings.py
deleted file mode 100644
index aeb416e406..0000000000
--- a/contrib/python/coverage/py3/coverage/fullcoverage/encodings.py
+++ /dev/null
@@ -1,60 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Imposter encodings module that installs a coverage-style tracer.
-
-This is NOT the encodings module; it is an imposter that sets up tracing
-instrumentation and then replaces itself with the real encodings module.
-
-If the directory that holds this file is placed first in the PYTHONPATH when
-using "coverage" to run Python's tests, then this file will become the very
-first module imported by the internals of Python 3. It installs a
-coverage.py-compatible trace function that can watch Standard Library modules
-execute from the very earliest stages of Python's own boot process. This fixes
-a problem with coverage.py - that it starts too late to trace the coverage of
-many of the most fundamental modules in the Standard Library.
-
-"""
-
-import sys
-
-class FullCoverageTracer(object):
- def __init__(self):
- # `traces` is a list of trace events. Frames are tricky: the same
- # frame object is used for a whole scope, with new line numbers
- # written into it. So in one scope, all the frame objects are the
- # same object, and will eventually all will point to the last line
- # executed. So we keep the line numbers alongside the frames.
- # The list looks like:
- #
- # traces = [
- # ((frame, event, arg), lineno), ...
- # ]
- #
- self.traces = []
-
- def fullcoverage_trace(self, *args):
- frame, event, arg = args
- self.traces.append((args, frame.f_lineno))
- return self.fullcoverage_trace
-
-sys.settrace(FullCoverageTracer().fullcoverage_trace)
-
-# In coverage/files.py is actual_filename(), which uses glob.glob. I don't
-# understand why, but that use of glob borks everything if fullcoverage is in
-# effect. So here we make an ugly hail-mary pass to switch off glob.glob over
-# there. This means when using fullcoverage, Windows path names will not be
-# their actual case.
-
-#sys.fullcoverage = True
-
-# Finally, remove our own directory from sys.path; remove ourselves from
-# sys.modules; and re-import "encodings", which will be the real package
-# this time. Note that the delete from sys.modules dictionary has to
-# happen last, since all of the symbols in this module will become None
-# at that exact moment, including "sys".
-
-parentdir = max(filter(__file__.startswith, sys.path), key=len)
-sys.path.remove(parentdir)
-del sys.modules['encodings']
-import encodings
diff --git a/contrib/python/coverage/py3/coverage/html.py b/contrib/python/coverage/py3/coverage/html.py
deleted file mode 100644
index 9d8e342716..0000000000
--- a/contrib/python/coverage/py3/coverage/html.py
+++ /dev/null
@@ -1,539 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""HTML reporting for coverage.py."""
-
-import datetime
-import json
-import os
-import re
-import shutil
-import sys
-
-import coverage
-from coverage import env
-from coverage.backward import iitems, SimpleNamespace, format_local_datetime
-from coverage.data import add_data_to_hash
-from coverage.files import flat_rootname
-from coverage.misc import CoverageException, ensure_dir, file_be_gone, Hasher, isolate_module
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-from coverage.templite import Templite
-
-os = isolate_module(os)
-
-
-# Static files are looked for in a list of places.
-STATIC_PATH = [
- # The place Debian puts system Javascript libraries.
- "/usr/share/javascript",
-
- # Our htmlfiles directory.
- os.path.join(os.path.dirname(__file__), "htmlfiles"),
-]
-
-
-def data_filename(fname, pkgdir=""):
- """Return the path to a data file of ours.
-
- The file is searched for on `STATIC_PATH`, and the first place it's found,
- is returned.
-
- Each directory in `STATIC_PATH` is searched as-is, and also, if `pkgdir`
- is provided, at that sub-directory.
-
- """
- tried = []
- for static_dir in STATIC_PATH:
- static_filename = os.path.join(static_dir, fname)
- if os.path.exists(static_filename):
- return static_filename
- else:
- tried.append(static_filename)
- if pkgdir:
- static_filename = os.path.join(static_dir, pkgdir, fname)
- if os.path.exists(static_filename):
- return static_filename
- else:
- tried.append(static_filename)
- raise CoverageException(
- "Couldn't find static file %r from %r, tried: %r" % (fname, os.getcwd(), tried)
- )
-
-
-def get_htmlfiles_resource(name):
- import pkgutil
- return pkgutil.get_data(__package__, 'htmlfiles/' + name)
-
-
-def read_data(fname):
- """Return the contents of a data file of ours."""
- if getattr(sys, 'is_standalone_binary', False):
- res_buf = get_htmlfiles_resource(fname).decode()
- if res_buf is not None:
- return res_buf
-
- with open(data_filename(fname)) as data_file:
- return data_file.read()
-
-
-def write_html(fname, html):
- """Write `html` to `fname`, properly encoded."""
- html = re.sub(r"(\A\s+)|(\s+$)", "", html, flags=re.MULTILINE) + "\n"
- with open(fname, "wb") as fout:
- fout.write(html.encode('ascii', 'xmlcharrefreplace'))
-
-
-class HtmlDataGeneration(object):
- """Generate structured data to be turned into HTML reports."""
-
- EMPTY = "(empty)"
-
- def __init__(self, cov):
- self.coverage = cov
- self.config = self.coverage.config
- data = self.coverage.get_data()
- self.has_arcs = data.has_arcs()
- if self.config.show_contexts:
- if data.measured_contexts() == {""}:
- self.coverage._warn("No contexts were measured")
- data.set_query_contexts(self.config.report_contexts)
-
- def data_for_file(self, fr, analysis):
- """Produce the data needed for one file's report."""
- if self.has_arcs:
- missing_branch_arcs = analysis.missing_branch_arcs()
- arcs_executed = analysis.arcs_executed()
-
- if self.config.show_contexts:
- contexts_by_lineno = analysis.data.contexts_by_lineno(analysis.filename)
-
- lines = []
-
- for lineno, tokens in enumerate(fr.source_token_lines(), start=1):
- # Figure out how to mark this line.
- category = None
- short_annotations = []
- long_annotations = []
-
- if lineno in analysis.excluded:
- category = 'exc'
- elif lineno in analysis.missing:
- category = 'mis'
- elif self.has_arcs and lineno in missing_branch_arcs:
- category = 'par'
- for b in missing_branch_arcs[lineno]:
- if b < 0:
- short_annotations.append("exit")
- else:
- short_annotations.append(b)
- long_annotations.append(fr.missing_arc_description(lineno, b, arcs_executed))
- elif lineno in analysis.statements:
- category = 'run'
-
- contexts = contexts_label = None
- context_list = None
- if category and self.config.show_contexts:
- contexts = sorted(c or self.EMPTY for c in contexts_by_lineno[lineno])
- if contexts == [self.EMPTY]:
- contexts_label = self.EMPTY
- else:
- contexts_label = "{} ctx".format(len(contexts))
- context_list = contexts
-
- lines.append(SimpleNamespace(
- tokens=tokens,
- number=lineno,
- category=category,
- statement=(lineno in analysis.statements),
- contexts=contexts,
- contexts_label=contexts_label,
- context_list=context_list,
- short_annotations=short_annotations,
- long_annotations=long_annotations,
- ))
-
- file_data = SimpleNamespace(
- relative_filename=fr.relative_filename(),
- nums=analysis.numbers,
- lines=lines,
- )
-
- return file_data
-
-
-class HtmlReporter(object):
- """HTML reporting."""
-
- # These files will be copied from the htmlfiles directory to the output
- # directory.
- STATIC_FILES = [
- ("style.css", ""),
- ("jquery.min.js", "jquery"),
- ("jquery.ba-throttle-debounce.min.js", "jquery-throttle-debounce"),
- ("jquery.hotkeys.js", "jquery-hotkeys"),
- ("jquery.isonscreen.js", "jquery-isonscreen"),
- ("jquery.tablesorter.min.js", "jquery-tablesorter"),
- ("coverage_html.js", ""),
- ("keybd_closed.png", ""),
- ("keybd_open.png", ""),
- ("favicon_32.png", ""),
- ]
-
- def __init__(self, cov):
- self.coverage = cov
- self.config = self.coverage.config
- self.directory = self.config.html_dir
-
- self.skip_covered = self.config.html_skip_covered
- if self.skip_covered is None:
- self.skip_covered = self.config.skip_covered
- self.skip_empty = self.config.html_skip_empty
- if self.skip_empty is None:
- self.skip_empty= self.config.skip_empty
-
- title = self.config.html_title
- if env.PY2:
- title = title.decode("utf8")
-
- if self.config.extra_css:
- self.extra_css = os.path.basename(self.config.extra_css)
- else:
- self.extra_css = None
-
- self.data = self.coverage.get_data()
- self.has_arcs = self.data.has_arcs()
-
- self.file_summaries = []
- self.all_files_nums = []
- self.incr = IncrementalChecker(self.directory)
- self.datagen = HtmlDataGeneration(self.coverage)
- self.totals = Numbers()
-
- self.template_globals = {
- # Functions available in the templates.
- 'escape': escape,
- 'pair': pair,
- 'len': len,
-
- # Constants for this report.
- '__url__': coverage.__url__,
- '__version__': coverage.__version__,
- 'title': title,
- 'time_stamp': format_local_datetime(datetime.datetime.now()),
- 'extra_css': self.extra_css,
- 'has_arcs': self.has_arcs,
- 'show_contexts': self.config.show_contexts,
-
- # Constants for all reports.
- # These css classes determine which lines are highlighted by default.
- 'category': {
- 'exc': 'exc show_exc',
- 'mis': 'mis show_mis',
- 'par': 'par run show_par',
- 'run': 'run',
- }
- }
- self.pyfile_html_source = read_data("pyfile.html")
- self.source_tmpl = Templite(self.pyfile_html_source, self.template_globals)
-
- def report(self, morfs):
- """Generate an HTML report for `morfs`.
-
- `morfs` is a list of modules or file names.
-
- """
- # Read the status data and check that this run used the same
- # global data as the last run.
- self.incr.read()
- self.incr.check_global_data(self.config, self.pyfile_html_source)
-
- # Process all the files.
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.html_file(fr, analysis)
-
- if not self.all_files_nums:
- raise CoverageException("No data to report.")
-
- self.totals = sum(self.all_files_nums)
-
- # Write the index file.
- self.index_file()
-
- self.make_local_static_report_files()
- return self.totals.n_statements and self.totals.pc_covered
-
- def make_local_static_report_files(self):
- """Make local instances of static files for HTML report."""
- # The files we provide must always be copied.
- for static, pkgdir in self.STATIC_FILES:
- if getattr(sys, 'is_standalone_binary', False):
- data = get_htmlfiles_resource(static)
- if data is None:
- raise IOError("No such resource: " + static)
-
- with open(os.path.join(self.directory, static), "wb") as afile:
- afile.write(data)
- else:
- shutil.copyfile(
- data_filename(static, pkgdir),
- os.path.join(self.directory, static)
- )
-
- # The user may have extra CSS they want copied.
- if self.extra_css:
- shutil.copyfile(
- self.config.extra_css,
- os.path.join(self.directory, self.extra_css)
- )
-
- def html_file(self, fr, analysis):
- """Generate an HTML file for one source file."""
- rootname = flat_rootname(fr.relative_filename())
- html_filename = rootname + ".html"
- ensure_dir(self.directory)
- html_path = os.path.join(self.directory, html_filename)
-
- # Get the numbers for this file.
- nums = analysis.numbers
- self.all_files_nums.append(nums)
-
- if self.skip_covered:
- # Don't report on 100% files.
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if no_missing_lines and no_missing_branches:
- # If there's an existing file, remove it.
- file_be_gone(html_path)
- return
-
- if self.skip_empty:
- # Don't report on empty files.
- if nums.n_statements == 0:
- file_be_gone(html_path)
- return
-
- # Find out if the file on disk is already correct.
- if self.incr.can_skip_file(self.data, fr, rootname):
- self.file_summaries.append(self.incr.index_info(rootname))
- return
-
- # Write the HTML page for this file.
- file_data = self.datagen.data_for_file(fr, analysis)
- for ldata in file_data.lines:
- # Build the HTML for the line.
- html = []
- for tok_type, tok_text in ldata.tokens:
- if tok_type == "ws":
- html.append(escape(tok_text))
- else:
- tok_html = escape(tok_text) or '&nbsp;'
- html.append(
- u'<span class="{}">{}</span>'.format(tok_type, tok_html)
- )
- ldata.html = ''.join(html)
-
- if ldata.short_annotations:
- # 202F is NARROW NO-BREAK SPACE.
- # 219B is RIGHTWARDS ARROW WITH STROKE.
- ldata.annotate = u",&nbsp;&nbsp; ".join(
- u"{}&#x202F;&#x219B;&#x202F;{}".format(ldata.number, d)
- for d in ldata.short_annotations
- )
- else:
- ldata.annotate = None
-
- if ldata.long_annotations:
- longs = ldata.long_annotations
- if len(longs) == 1:
- ldata.annotate_long = longs[0]
- else:
- ldata.annotate_long = u"{:d} missed branches: {}".format(
- len(longs),
- u", ".join(
- u"{:d}) {}".format(num, ann_long)
- for num, ann_long in enumerate(longs, start=1)
- ),
- )
- else:
- ldata.annotate_long = None
-
- css_classes = []
- if ldata.category:
- css_classes.append(self.template_globals['category'][ldata.category])
- ldata.css_class = ' '.join(css_classes) or "pln"
-
- html = self.source_tmpl.render(file_data.__dict__)
- write_html(html_path, html)
-
- # Save this file's information for the index file.
- index_info = {
- 'nums': nums,
- 'html_filename': html_filename,
- 'relative_filename': fr.relative_filename(),
- }
- self.file_summaries.append(index_info)
- self.incr.set_index_info(rootname, index_info)
-
- def index_file(self):
- """Write the index.html file for this report."""
- index_tmpl = Templite(read_data("index.html"), self.template_globals)
-
- html = index_tmpl.render({
- 'files': self.file_summaries,
- 'totals': self.totals,
- })
-
- write_html(os.path.join(self.directory, "index.html"), html)
-
- # Write the latest hashes for next time.
- self.incr.write()
-
-
-class IncrementalChecker(object):
- """Logic and data to support incremental reporting."""
-
- STATUS_FILE = "status.json"
- STATUS_FORMAT = 2
-
- # pylint: disable=wrong-spelling-in-comment,useless-suppression
- # The data looks like:
- #
- # {
- # "format": 2,
- # "globals": "540ee119c15d52a68a53fe6f0897346d",
- # "version": "4.0a1",
- # "files": {
- # "cogapp___init__": {
- # "hash": "e45581a5b48f879f301c0f30bf77a50c",
- # "index": {
- # "html_filename": "cogapp___init__.html",
- # "relative_filename": "cogapp/__init__",
- # "nums": [ 1, 14, 0, 0, 0, 0, 0 ]
- # }
- # },
- # ...
- # "cogapp_whiteutils": {
- # "hash": "8504bb427fc488c4176809ded0277d51",
- # "index": {
- # "html_filename": "cogapp_whiteutils.html",
- # "relative_filename": "cogapp/whiteutils",
- # "nums": [ 1, 59, 0, 1, 28, 2, 2 ]
- # }
- # }
- # }
- # }
-
- def __init__(self, directory):
- self.directory = directory
- self.reset()
-
- def reset(self):
- """Initialize to empty. Causes all files to be reported."""
- self.globals = ''
- self.files = {}
-
- def read(self):
- """Read the information we stored last time."""
- usable = False
- try:
- status_file = os.path.join(self.directory, self.STATUS_FILE)
- with open(status_file) as fstatus:
- status = json.load(fstatus)
- except (IOError, ValueError):
- usable = False
- else:
- usable = True
- if status['format'] != self.STATUS_FORMAT:
- usable = False
- elif status['version'] != coverage.__version__:
- usable = False
-
- if usable:
- self.files = {}
- for filename, fileinfo in iitems(status['files']):
- fileinfo['index']['nums'] = Numbers(*fileinfo['index']['nums'])
- self.files[filename] = fileinfo
- self.globals = status['globals']
- else:
- self.reset()
-
- def write(self):
- """Write the current status."""
- status_file = os.path.join(self.directory, self.STATUS_FILE)
- files = {}
- for filename, fileinfo in iitems(self.files):
- fileinfo['index']['nums'] = fileinfo['index']['nums'].init_args()
- files[filename] = fileinfo
-
- status = {
- 'format': self.STATUS_FORMAT,
- 'version': coverage.__version__,
- 'globals': self.globals,
- 'files': files,
- }
- with open(status_file, "w") as fout:
- json.dump(status, fout, separators=(',', ':'))
-
- def check_global_data(self, *data):
- """Check the global data that can affect incremental reporting."""
- m = Hasher()
- for d in data:
- m.update(d)
- these_globals = m.hexdigest()
- if self.globals != these_globals:
- self.reset()
- self.globals = these_globals
-
- def can_skip_file(self, data, fr, rootname):
- """Can we skip reporting this file?
-
- `data` is a CoverageData object, `fr` is a `FileReporter`, and
- `rootname` is the name being used for the file.
- """
- m = Hasher()
- m.update(fr.source().encode('utf-8'))
- add_data_to_hash(data, fr.filename, m)
- this_hash = m.hexdigest()
-
- that_hash = self.file_hash(rootname)
-
- if this_hash == that_hash:
- # Nothing has changed to require the file to be reported again.
- return True
- else:
- self.set_file_hash(rootname, this_hash)
- return False
-
- def file_hash(self, fname):
- """Get the hash of `fname`'s contents."""
- return self.files.get(fname, {}).get('hash', '')
-
- def set_file_hash(self, fname, val):
- """Set the hash of `fname`'s contents."""
- self.files.setdefault(fname, {})['hash'] = val
-
- def index_info(self, fname):
- """Get the information for index.html for `fname`."""
- return self.files.get(fname, {}).get('index', {})
-
- def set_index_info(self, fname, info):
- """Set the information for index.html for `fname`."""
- self.files.setdefault(fname, {})['index'] = info
-
-
-# Helpers for templates and generating HTML
-
-def escape(t):
- """HTML-escape the text in `t`.
-
- This is only suitable for HTML text, not attributes.
-
- """
- # Convert HTML special chars into HTML entities.
- return t.replace("&", "&amp;").replace("<", "&lt;")
-
-
-def pair(ratio):
- """Format a pair of numbers so JavaScript can read them in an attribute."""
- return "%s %s" % ratio
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/coverage_html.js b/contrib/python/coverage/py3/coverage/htmlfiles/coverage_html.js
deleted file mode 100644
index 27b49b36f9..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/coverage_html.js
+++ /dev/null
@@ -1,616 +0,0 @@
-// Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-// For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-// Coverage.py HTML report browser code.
-/*jslint browser: true, sloppy: true, vars: true, plusplus: true, maxerr: 50, indent: 4 */
-/*global coverage: true, document, window, $ */
-
-coverage = {};
-
-// Find all the elements with shortkey_* class, and use them to assign a shortcut key.
-coverage.assign_shortkeys = function () {
- $("*[class*='shortkey_']").each(function (i, e) {
- $.each($(e).attr("class").split(" "), function (i, c) {
- if (/^shortkey_/.test(c)) {
- $(document).bind('keydown', c.substr(9), function () {
- $(e).click();
- });
- }
- });
- });
-};
-
-// Create the events for the help panel.
-coverage.wire_up_help_panel = function () {
- $("#keyboard_icon").click(function () {
- // Show the help panel, and position it so the keyboard icon in the
- // panel is in the same place as the keyboard icon in the header.
- $(".help_panel").show();
- var koff = $("#keyboard_icon").offset();
- var poff = $("#panel_icon").position();
- $(".help_panel").offset({
- top: koff.top-poff.top,
- left: koff.left-poff.left
- });
- });
- $("#panel_icon").click(function () {
- $(".help_panel").hide();
- });
-};
-
-// Create the events for the filter box.
-coverage.wire_up_filter = function () {
- // Cache elements.
- var table = $("table.index");
- var table_rows = table.find("tbody tr");
- var table_row_names = table_rows.find("td.name a");
- var no_rows = $("#no_rows");
-
- // Create a duplicate table footer that we can modify with dynamic summed values.
- var table_footer = $("table.index tfoot tr");
- var table_dynamic_footer = table_footer.clone();
- table_dynamic_footer.attr('class', 'total_dynamic hidden');
- table_footer.after(table_dynamic_footer);
-
- // Observe filter keyevents.
- $("#filter").on("keyup change", $.debounce(150, function (event) {
- var filter_value = $(this).val();
-
- if (filter_value === "") {
- // Filter box is empty, remove all filtering.
- table_rows.removeClass("hidden");
-
- // Show standard footer, hide dynamic footer.
- table_footer.removeClass("hidden");
- table_dynamic_footer.addClass("hidden");
-
- // Hide placeholder, show table.
- if (no_rows.length > 0) {
- no_rows.hide();
- }
- table.show();
-
- }
- else {
- // Filter table items by value.
- var hidden = 0;
- var shown = 0;
-
- // Hide / show elements.
- $.each(table_row_names, function () {
- var element = $(this).parents("tr");
-
- if ($(this).text().indexOf(filter_value) === -1) {
- // hide
- element.addClass("hidden");
- hidden++;
- }
- else {
- // show
- element.removeClass("hidden");
- shown++;
- }
- });
-
- // Show placeholder if no rows will be displayed.
- if (no_rows.length > 0) {
- if (shown === 0) {
- // Show placeholder, hide table.
- no_rows.show();
- table.hide();
- }
- else {
- // Hide placeholder, show table.
- no_rows.hide();
- table.show();
- }
- }
-
- // Manage dynamic header:
- if (hidden > 0) {
- // Calculate new dynamic sum values based on visible rows.
- for (var column = 2; column < 20; column++) {
- // Calculate summed value.
- var cells = table_rows.find('td:nth-child(' + column + ')');
- if (!cells.length) {
- // No more columns...!
- break;
- }
-
- var sum = 0, numer = 0, denom = 0;
- $.each(cells.filter(':visible'), function () {
- var ratio = $(this).data("ratio");
- if (ratio) {
- var splitted = ratio.split(" ");
- numer += parseInt(splitted[0], 10);
- denom += parseInt(splitted[1], 10);
- }
- else {
- sum += parseInt(this.innerHTML, 10);
- }
- });
-
- // Get footer cell element.
- var footer_cell = table_dynamic_footer.find('td:nth-child(' + column + ')');
-
- // Set value into dynamic footer cell element.
- if (cells[0].innerHTML.indexOf('%') > -1) {
- // Percentage columns use the numerator and denominator,
- // and adapt to the number of decimal places.
- var match = /\.([0-9]+)/.exec(cells[0].innerHTML);
- var places = 0;
- if (match) {
- places = match[1].length;
- }
- var pct = numer * 100 / denom;
- footer_cell.text(pct.toFixed(places) + '%');
- }
- else {
- footer_cell.text(sum);
- }
- }
-
- // Hide standard footer, show dynamic footer.
- table_footer.addClass("hidden");
- table_dynamic_footer.removeClass("hidden");
- }
- else {
- // Show standard footer, hide dynamic footer.
- table_footer.removeClass("hidden");
- table_dynamic_footer.addClass("hidden");
- }
- }
- }));
-
- // Trigger change event on setup, to force filter on page refresh
- // (filter value may still be present).
- $("#filter").trigger("change");
-};
-
-// Loaded on index.html
-coverage.index_ready = function ($) {
- // Look for a localStorage item containing previous sort settings:
- var sort_list = [];
- var storage_name = "COVERAGE_INDEX_SORT";
- var stored_list = undefined;
- try {
- stored_list = localStorage.getItem(storage_name);
- } catch(err) {}
-
- if (stored_list) {
- sort_list = JSON.parse('[[' + stored_list + ']]');
- }
-
- // Create a new widget which exists only to save and restore
- // the sort order:
- $.tablesorter.addWidget({
- id: "persistentSort",
-
- // Format is called by the widget before displaying:
- format: function (table) {
- if (table.config.sortList.length === 0 && sort_list.length > 0) {
- // This table hasn't been sorted before - we'll use
- // our stored settings:
- $(table).trigger('sorton', [sort_list]);
- }
- else {
- // This is not the first load - something has
- // already defined sorting so we'll just update
- // our stored value to match:
- sort_list = table.config.sortList;
- }
- }
- });
-
- // Configure our tablesorter to handle the variable number of
- // columns produced depending on report options:
- var headers = [];
- var col_count = $("table.index > thead > tr > th").length;
-
- headers[0] = { sorter: 'text' };
- for (i = 1; i < col_count-1; i++) {
- headers[i] = { sorter: 'digit' };
- }
- headers[col_count-1] = { sorter: 'percent' };
-
- // Enable the table sorter:
- $("table.index").tablesorter({
- widgets: ['persistentSort'],
- headers: headers
- });
-
- coverage.assign_shortkeys();
- coverage.wire_up_help_panel();
- coverage.wire_up_filter();
-
- // Watch for page unload events so we can save the final sort settings:
- $(window).on("unload", function () {
- try {
- localStorage.setItem(storage_name, sort_list.toString())
- } catch(err) {}
- });
-};
-
-// -- pyfile stuff --
-
-coverage.LINE_FILTERS_STORAGE = "COVERAGE_LINE_FILTERS";
-
-coverage.pyfile_ready = function ($) {
- // If we're directed to a particular line number, highlight the line.
- var frag = location.hash;
- if (frag.length > 2 && frag[1] === 't') {
- $(frag).addClass('highlight');
- coverage.set_sel(parseInt(frag.substr(2), 10));
- }
- else {
- coverage.set_sel(0);
- }
-
- $(document)
- .bind('keydown', 'j', coverage.to_next_chunk_nicely)
- .bind('keydown', 'k', coverage.to_prev_chunk_nicely)
- .bind('keydown', '0', coverage.to_top)
- .bind('keydown', '1', coverage.to_first_chunk)
- ;
-
- $(".button_toggle_run").click(function (evt) {coverage.toggle_lines(evt.target, "run");});
- $(".button_toggle_exc").click(function (evt) {coverage.toggle_lines(evt.target, "exc");});
- $(".button_toggle_mis").click(function (evt) {coverage.toggle_lines(evt.target, "mis");});
- $(".button_toggle_par").click(function (evt) {coverage.toggle_lines(evt.target, "par");});
-
- coverage.filters = undefined;
- try {
- coverage.filters = localStorage.getItem(coverage.LINE_FILTERS_STORAGE);
- } catch(err) {}
-
- if (coverage.filters) {
- coverage.filters = JSON.parse(coverage.filters);
- }
- else {
- coverage.filters = {run: false, exc: true, mis: true, par: true};
- }
-
- for (cls in coverage.filters) {
- coverage.set_line_visibilty(cls, coverage.filters[cls]);
- }
-
- coverage.assign_shortkeys();
- coverage.wire_up_help_panel();
-
- coverage.init_scroll_markers();
-
- // Rebuild scroll markers when the window height changes.
- $(window).resize(coverage.build_scroll_markers);
-};
-
-coverage.toggle_lines = function (btn, cls) {
- var onoff = !$(btn).hasClass("show_" + cls);
- coverage.set_line_visibilty(cls, onoff);
- coverage.build_scroll_markers();
- coverage.filters[cls] = onoff;
- try {
- localStorage.setItem(coverage.LINE_FILTERS_STORAGE, JSON.stringify(coverage.filters));
- } catch(err) {}
-};
-
-coverage.set_line_visibilty = function (cls, onoff) {
- var show = "show_" + cls;
- var btn = $(".button_toggle_" + cls);
- if (onoff) {
- $("#source ." + cls).addClass(show);
- btn.addClass(show);
- }
- else {
- $("#source ." + cls).removeClass(show);
- btn.removeClass(show);
- }
-};
-
-// Return the nth line div.
-coverage.line_elt = function (n) {
- return $("#t" + n);
-};
-
-// Return the nth line number div.
-coverage.num_elt = function (n) {
- return $("#n" + n);
-};
-
-// Set the selection. b and e are line numbers.
-coverage.set_sel = function (b, e) {
- // The first line selected.
- coverage.sel_begin = b;
- // The next line not selected.
- coverage.sel_end = (e === undefined) ? b+1 : e;
-};
-
-coverage.to_top = function () {
- coverage.set_sel(0, 1);
- coverage.scroll_window(0);
-};
-
-coverage.to_first_chunk = function () {
- coverage.set_sel(0, 1);
- coverage.to_next_chunk();
-};
-
-// Return a string indicating what kind of chunk this line belongs to,
-// or null if not a chunk.
-coverage.chunk_indicator = function (line_elt) {
- var klass = line_elt.attr('class');
- if (klass) {
- var m = klass.match(/\bshow_\w+\b/);
- if (m) {
- return m[0];
- }
- }
- return null;
-};
-
-coverage.to_next_chunk = function () {
- var c = coverage;
-
- // Find the start of the next colored chunk.
- var probe = c.sel_end;
- var chunk_indicator, probe_line;
- while (true) {
- probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- return;
- }
- chunk_indicator = c.chunk_indicator(probe_line);
- if (chunk_indicator) {
- break;
- }
- probe++;
- }
-
- // There's a next chunk, `probe` points to it.
- var begin = probe;
-
- // Find the end of this chunk.
- var next_indicator = chunk_indicator;
- while (next_indicator === chunk_indicator) {
- probe++;
- probe_line = c.line_elt(probe);
- next_indicator = c.chunk_indicator(probe_line);
- }
- c.set_sel(begin, probe);
- c.show_selection();
-};
-
-coverage.to_prev_chunk = function () {
- var c = coverage;
-
- // Find the end of the prev colored chunk.
- var probe = c.sel_begin-1;
- var probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- return;
- }
- var chunk_indicator = c.chunk_indicator(probe_line);
- while (probe > 0 && !chunk_indicator) {
- probe--;
- probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- return;
- }
- chunk_indicator = c.chunk_indicator(probe_line);
- }
-
- // There's a prev chunk, `probe` points to its last line.
- var end = probe+1;
-
- // Find the beginning of this chunk.
- var prev_indicator = chunk_indicator;
- while (prev_indicator === chunk_indicator) {
- probe--;
- probe_line = c.line_elt(probe);
- prev_indicator = c.chunk_indicator(probe_line);
- }
- c.set_sel(probe+1, end);
- c.show_selection();
-};
-
-// Return the line number of the line nearest pixel position pos
-coverage.line_at_pos = function (pos) {
- var l1 = coverage.line_elt(1),
- l2 = coverage.line_elt(2),
- result;
- if (l1.length && l2.length) {
- var l1_top = l1.offset().top,
- line_height = l2.offset().top - l1_top,
- nlines = (pos - l1_top) / line_height;
- if (nlines < 1) {
- result = 1;
- }
- else {
- result = Math.ceil(nlines);
- }
- }
- else {
- result = 1;
- }
- return result;
-};
-
-// Returns 0, 1, or 2: how many of the two ends of the selection are on
-// the screen right now?
-coverage.selection_ends_on_screen = function () {
- if (coverage.sel_begin === 0) {
- return 0;
- }
-
- var top = coverage.line_elt(coverage.sel_begin);
- var next = coverage.line_elt(coverage.sel_end-1);
-
- return (
- (top.isOnScreen() ? 1 : 0) +
- (next.isOnScreen() ? 1 : 0)
- );
-};
-
-coverage.to_next_chunk_nicely = function () {
- coverage.finish_scrolling();
- if (coverage.selection_ends_on_screen() === 0) {
- // The selection is entirely off the screen: select the top line on
- // the screen.
- var win = $(window);
- coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop()));
- }
- coverage.to_next_chunk();
-};
-
-coverage.to_prev_chunk_nicely = function () {
- coverage.finish_scrolling();
- if (coverage.selection_ends_on_screen() === 0) {
- var win = $(window);
- coverage.select_line_or_chunk(coverage.line_at_pos(win.scrollTop() + win.height()));
- }
- coverage.to_prev_chunk();
-};
-
-// Select line number lineno, or if it is in a colored chunk, select the
-// entire chunk
-coverage.select_line_or_chunk = function (lineno) {
- var c = coverage;
- var probe_line = c.line_elt(lineno);
- if (probe_line.length === 0) {
- return;
- }
- var the_indicator = c.chunk_indicator(probe_line);
- if (the_indicator) {
- // The line is in a highlighted chunk.
- // Search backward for the first line.
- var probe = lineno;
- var indicator = the_indicator;
- while (probe > 0 && indicator === the_indicator) {
- probe--;
- probe_line = c.line_elt(probe);
- if (probe_line.length === 0) {
- break;
- }
- indicator = c.chunk_indicator(probe_line);
- }
- var begin = probe + 1;
-
- // Search forward for the last line.
- probe = lineno;
- indicator = the_indicator;
- while (indicator === the_indicator) {
- probe++;
- probe_line = c.line_elt(probe);
- indicator = c.chunk_indicator(probe_line);
- }
-
- coverage.set_sel(begin, probe);
- }
- else {
- coverage.set_sel(lineno);
- }
-};
-
-coverage.show_selection = function () {
- var c = coverage;
-
- // Highlight the lines in the chunk
- $(".linenos .highlight").removeClass("highlight");
- for (var probe = c.sel_begin; probe > 0 && probe < c.sel_end; probe++) {
- c.num_elt(probe).addClass("highlight");
- }
-
- c.scroll_to_selection();
-};
-
-coverage.scroll_to_selection = function () {
- // Scroll the page if the chunk isn't fully visible.
- if (coverage.selection_ends_on_screen() < 2) {
- // Need to move the page. The html,body trick makes it scroll in all
- // browsers, got it from http://stackoverflow.com/questions/3042651
- var top = coverage.line_elt(coverage.sel_begin);
- var top_pos = parseInt(top.offset().top, 10);
- coverage.scroll_window(top_pos - 30);
- }
-};
-
-coverage.scroll_window = function (to_pos) {
- $("html,body").animate({scrollTop: to_pos}, 200);
-};
-
-coverage.finish_scrolling = function () {
- $("html,body").stop(true, true);
-};
-
-coverage.init_scroll_markers = function () {
- var c = coverage;
- // Init some variables
- c.lines_len = $('#source p').length;
- c.body_h = $('body').height();
- c.header_h = $('div#header').height();
-
- // Build html
- c.build_scroll_markers();
-};
-
-coverage.build_scroll_markers = function () {
- var c = coverage,
- min_line_height = 3,
- max_line_height = 10,
- visible_window_h = $(window).height();
-
- c.lines_to_mark = $('#source').find('p.show_run, p.show_mis, p.show_exc, p.show_exc, p.show_par');
- $('#scroll_marker').remove();
- // Don't build markers if the window has no scroll bar.
- if (c.body_h <= visible_window_h) {
- return;
- }
-
- $("body").append("<div id='scroll_marker'>&nbsp;</div>");
- var scroll_marker = $('#scroll_marker'),
- marker_scale = scroll_marker.height() / c.body_h,
- line_height = scroll_marker.height() / c.lines_len;
-
- // Line height must be between the extremes.
- if (line_height > min_line_height) {
- if (line_height > max_line_height) {
- line_height = max_line_height;
- }
- }
- else {
- line_height = min_line_height;
- }
-
- var previous_line = -99,
- last_mark,
- last_top,
- offsets = {};
-
- // Calculate line offsets outside loop to prevent relayouts
- c.lines_to_mark.each(function() {
- offsets[this.id] = $(this).offset().top;
- });
- c.lines_to_mark.each(function () {
- var id_name = $(this).attr('id'),
- line_top = Math.round(offsets[id_name] * marker_scale),
- line_number = parseInt(id_name.substring(1, id_name.length));
-
- if (line_number === previous_line + 1) {
- // If this solid missed block just make previous mark higher.
- last_mark.css({
- 'height': line_top + line_height - last_top
- });
- }
- else {
- // Add colored line in scroll_marker block.
- scroll_marker.append('<div id="m' + line_number + '" class="marker"></div>');
- last_mark = $('#m' + line_number);
- last_mark.css({
- 'height': line_height,
- 'top': line_top
- });
- last_top = line_top;
- }
-
- previous_line = line_number;
- });
-};
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/favicon_32.png b/contrib/python/coverage/py3/coverage/htmlfiles/favicon_32.png
deleted file mode 100644
index 8649f0475d..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/favicon_32.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/index.html b/contrib/python/coverage/py3/coverage/htmlfiles/index.html
deleted file mode 100644
index 983db06125..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/index.html
+++ /dev/null
@@ -1,119 +0,0 @@
-{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
-{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
-
-<!DOCTYPE html>
-<html>
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
- <title>{{ title|escape }}</title>
- <link rel="icon" sizes="32x32" href="favicon_32.png">
- <link rel="stylesheet" href="style.css" type="text/css">
- {% if extra_css %}
- <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
- {% endif %}
- <script type="text/javascript" src="jquery.min.js"></script>
- <script type="text/javascript" src="jquery.ba-throttle-debounce.min.js"></script>
- <script type="text/javascript" src="jquery.tablesorter.min.js"></script>
- <script type="text/javascript" src="jquery.hotkeys.js"></script>
- <script type="text/javascript" src="coverage_html.js"></script>
- <script type="text/javascript">
- jQuery(document).ready(coverage.index_ready);
- </script>
-</head>
-<body class="indexfile">
-
-<div id="header">
- <div class="content">
- <h1>{{ title|escape }}:
- <span class="pc_cov">{{totals.pc_covered_str}}%</span>
- </h1>
-
- <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
-
- <form id="filter_container">
- <input id="filter" type="text" value="" placeholder="filter..." />
- </form>
- </div>
-</div>
-
-<div class="help_panel">
- <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
- <p class="legend">Hot-keys on this page</p>
- <div>
- <p class="keyhelp">
- <span class="key">n</span>
- <span class="key">s</span>
- <span class="key">m</span>
- <span class="key">x</span>
- {% if has_arcs %}
- <span class="key">b</span>
- <span class="key">p</span>
- {% endif %}
- <span class="key">c</span> &nbsp; change column sorting
- </p>
- </div>
-</div>
-
-<div id="index">
- <table class="index">
- <thead>
- {# The title="" attr doesn"t work in Safari. #}
- <tr class="tablehead" title="Click to sort">
- <th class="name left headerSortDown shortkey_n">Module</th>
- <th class="shortkey_s">statements</th>
- <th class="shortkey_m">missing</th>
- <th class="shortkey_x">excluded</th>
- {% if has_arcs %}
- <th class="shortkey_b">branches</th>
- <th class="shortkey_p">partial</th>
- {% endif %}
- <th class="right shortkey_c">coverage</th>
- </tr>
- </thead>
- {# HTML syntax requires thead, tfoot, tbody #}
- <tfoot>
- <tr class="total">
- <td class="name left">Total</td>
- <td>{{totals.n_statements}}</td>
- <td>{{totals.n_missing}}</td>
- <td>{{totals.n_excluded}}</td>
- {% if has_arcs %}
- <td>{{totals.n_branches}}</td>
- <td>{{totals.n_partial_branches}}</td>
- {% endif %}
- <td class="right" data-ratio="{{totals.ratio_covered|pair}}">{{totals.pc_covered_str}}%</td>
- </tr>
- </tfoot>
- <tbody>
- {% for file in files %}
- <tr class="file">
- <td class="name left"><a href="{{file.html_filename}}">{{file.relative_filename}}</a></td>
- <td>{{file.nums.n_statements}}</td>
- <td>{{file.nums.n_missing}}</td>
- <td>{{file.nums.n_excluded}}</td>
- {% if has_arcs %}
- <td>{{file.nums.n_branches}}</td>
- <td>{{file.nums.n_partial_branches}}</td>
- {% endif %}
- <td class="right" data-ratio="{{file.nums.ratio_covered|pair}}">{{file.nums.pc_covered_str}}%</td>
- </tr>
- {% endfor %}
- </tbody>
- </table>
-
- <p id="no_rows">
- No items found using the specified filter.
- </p>
-</div>
-
-<div id="footer">
- <div class="content">
- <p>
- <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
- created at {{ time_stamp }}
- </p>
- </div>
-</div>
-
-</body>
-</html>
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js b/contrib/python/coverage/py3/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js
deleted file mode 100644
index 648fe5d3c2..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.ba-throttle-debounce.min.js
+++ /dev/null
@@ -1,9 +0,0 @@
-/*
- * jQuery throttle / debounce - v1.1 - 3/7/2010
- * http://benalman.com/projects/jquery-throttle-debounce-plugin/
- *
- * Copyright (c) 2010 "Cowboy" Ben Alman
- * Dual licensed under the MIT and GPL licenses.
- * http://benalman.com/about/license/
- */
-(function(b,c){var $=b.jQuery||b.Cowboy||(b.Cowboy={}),a;$.throttle=a=function(e,f,j,i){var h,d=0;if(typeof f!=="boolean"){i=j;j=f;f=c}function g(){var o=this,m=+new Date()-d,n=arguments;function l(){d=+new Date();j.apply(o,n)}function k(){h=c}if(i&&!h){l()}h&&clearTimeout(h);if(i===c&&m>e){l()}else{if(f!==true){h=setTimeout(i?k:l,i===c?e-m:e)}}}if($.guid){g.guid=j.guid=j.guid||$.guid++}return g};$.debounce=function(d,e,f){return f===c?a(d,e,false):a(d,f,e!==false)}})(this);
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.hotkeys.js b/contrib/python/coverage/py3/coverage/htmlfiles/jquery.hotkeys.js
deleted file mode 100644
index 09b21e03c7..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.hotkeys.js
+++ /dev/null
@@ -1,99 +0,0 @@
-/*
- * jQuery Hotkeys Plugin
- * Copyright 2010, John Resig
- * Dual licensed under the MIT or GPL Version 2 licenses.
- *
- * Based upon the plugin by Tzury Bar Yochay:
- * http://github.com/tzuryby/hotkeys
- *
- * Original idea by:
- * Binny V A, http://www.openjs.com/scripts/events/keyboard_shortcuts/
-*/
-
-(function(jQuery){
-
- jQuery.hotkeys = {
- version: "0.8",
-
- specialKeys: {
- 8: "backspace", 9: "tab", 13: "return", 16: "shift", 17: "ctrl", 18: "alt", 19: "pause",
- 20: "capslock", 27: "esc", 32: "space", 33: "pageup", 34: "pagedown", 35: "end", 36: "home",
- 37: "left", 38: "up", 39: "right", 40: "down", 45: "insert", 46: "del",
- 96: "0", 97: "1", 98: "2", 99: "3", 100: "4", 101: "5", 102: "6", 103: "7",
- 104: "8", 105: "9", 106: "*", 107: "+", 109: "-", 110: ".", 111 : "/",
- 112: "f1", 113: "f2", 114: "f3", 115: "f4", 116: "f5", 117: "f6", 118: "f7", 119: "f8",
- 120: "f9", 121: "f10", 122: "f11", 123: "f12", 144: "numlock", 145: "scroll", 191: "/", 224: "meta"
- },
-
- shiftNums: {
- "`": "~", "1": "!", "2": "@", "3": "#", "4": "$", "5": "%", "6": "^", "7": "&",
- "8": "*", "9": "(", "0": ")", "-": "_", "=": "+", ";": ": ", "'": "\"", ",": "<",
- ".": ">", "/": "?", "\\": "|"
- }
- };
-
- function keyHandler( handleObj ) {
- // Only care when a possible input has been specified
- if ( typeof handleObj.data !== "string" ) {
- return;
- }
-
- var origHandler = handleObj.handler,
- keys = handleObj.data.toLowerCase().split(" ");
-
- handleObj.handler = function( event ) {
- // Don't fire in text-accepting inputs that we didn't directly bind to
- if ( this !== event.target && (/textarea|select/i.test( event.target.nodeName ) ||
- event.target.type === "text") ) {
- return;
- }
-
- // Keypress represents characters, not special keys
- var special = event.type !== "keypress" && jQuery.hotkeys.specialKeys[ event.which ],
- character = String.fromCharCode( event.which ).toLowerCase(),
- key, modif = "", possible = {};
-
- // check combinations (alt|ctrl|shift+anything)
- if ( event.altKey && special !== "alt" ) {
- modif += "alt+";
- }
-
- if ( event.ctrlKey && special !== "ctrl" ) {
- modif += "ctrl+";
- }
-
- // TODO: Need to make sure this works consistently across platforms
- if ( event.metaKey && !event.ctrlKey && special !== "meta" ) {
- modif += "meta+";
- }
-
- if ( event.shiftKey && special !== "shift" ) {
- modif += "shift+";
- }
-
- if ( special ) {
- possible[ modif + special ] = true;
-
- } else {
- possible[ modif + character ] = true;
- possible[ modif + jQuery.hotkeys.shiftNums[ character ] ] = true;
-
- // "$" can be triggered as "Shift+4" or "Shift+$" or just "$"
- if ( modif === "shift+" ) {
- possible[ jQuery.hotkeys.shiftNums[ character ] ] = true;
- }
- }
-
- for ( var i = 0, l = keys.length; i < l; i++ ) {
- if ( possible[ keys[i] ] ) {
- return origHandler.apply( this, arguments );
- }
- }
- };
- }
-
- jQuery.each([ "keydown", "keyup", "keypress" ], function() {
- jQuery.event.special[ this ] = { add: keyHandler };
- });
-
-})( jQuery );
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.isonscreen.js b/contrib/python/coverage/py3/coverage/htmlfiles/jquery.isonscreen.js
deleted file mode 100644
index 0182ebd213..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.isonscreen.js
+++ /dev/null
@@ -1,53 +0,0 @@
-/* Copyright (c) 2010
- * @author Laurence Wheway
- * Dual licensed under the MIT (http://www.opensource.org/licenses/mit-license.php)
- * and GPL (http://www.opensource.org/licenses/gpl-license.php) licenses.
- *
- * @version 1.2.0
- */
-(function($) {
- jQuery.extend({
- isOnScreen: function(box, container) {
- //ensure numbers come in as intgers (not strings) and remove 'px' is it's there
- for(var i in box){box[i] = parseFloat(box[i])};
- for(var i in container){container[i] = parseFloat(container[i])};
-
- if(!container){
- container = {
- left: $(window).scrollLeft(),
- top: $(window).scrollTop(),
- width: $(window).width(),
- height: $(window).height()
- }
- }
-
- if( box.left+box.width-container.left > 0 &&
- box.left < container.width+container.left &&
- box.top+box.height-container.top > 0 &&
- box.top < container.height+container.top
- ) return true;
- return false;
- }
- })
-
-
- jQuery.fn.isOnScreen = function (container) {
- for(var i in container){container[i] = parseFloat(container[i])};
-
- if(!container){
- container = {
- left: $(window).scrollLeft(),
- top: $(window).scrollTop(),
- width: $(window).width(),
- height: $(window).height()
- }
- }
-
- if( $(this).offset().left+$(this).width()-container.left > 0 &&
- $(this).offset().left < container.width+container.left &&
- $(this).offset().top+$(this).height()-container.top > 0 &&
- $(this).offset().top < container.height+container.top
- ) return true;
- return false;
- }
-})(jQuery);
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.min.js b/contrib/python/coverage/py3/coverage/htmlfiles/jquery.min.js
deleted file mode 100644
index d1608e37ff..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.min.js
+++ /dev/null
@@ -1,4 +0,0 @@
-/*! jQuery v1.11.1 | (c) 2005, 2014 jQuery Foundation, Inc. | jquery.org/license */
-!function(a,b){"object"==typeof module&&"object"==typeof module.exports?module.exports=a.document?b(a,!0):function(a){if(!a.document)throw new Error("jQuery requires a window with a document");return b(a)}:b(a)}("undefined"!=typeof window?window:this,function(a,b){var c=[],d=c.slice,e=c.concat,f=c.push,g=c.indexOf,h={},i=h.toString,j=h.hasOwnProperty,k={},l="1.11.1",m=function(a,b){return new m.fn.init(a,b)},n=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,o=/^-ms-/,p=/-([\da-z])/gi,q=function(a,b){return b.toUpperCase()};m.fn=m.prototype={jquery:l,constructor:m,selector:"",length:0,toArray:function(){return d.call(this)},get:function(a){return null!=a?0>a?this[a+this.length]:this[a]:d.call(this)},pushStack:function(a){var b=m.merge(this.constructor(),a);return b.prevObject=this,b.context=this.context,b},each:function(a,b){return m.each(this,a,b)},map:function(a){return this.pushStack(m.map(this,function(b,c){return a.call(b,c,b)}))},slice:function(){return this.pushStack(d.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(a){var b=this.length,c=+a+(0>a?b:0);return this.pushStack(c>=0&&b>c?[this[c]]:[])},end:function(){return this.prevObject||this.constructor(null)},push:f,sort:c.sort,splice:c.splice},m.extend=m.fn.extend=function(){var a,b,c,d,e,f,g=arguments[0]||{},h=1,i=arguments.length,j=!1;for("boolean"==typeof g&&(j=g,g=arguments[h]||{},h++),"object"==typeof g||m.isFunction(g)||(g={}),h===i&&(g=this,h--);i>h;h++)if(null!=(e=arguments[h]))for(d in e)a=g[d],c=e[d],g!==c&&(j&&c&&(m.isPlainObject(c)||(b=m.isArray(c)))?(b?(b=!1,f=a&&m.isArray(a)?a:[]):f=a&&m.isPlainObject(a)?a:{},g[d]=m.extend(j,f,c)):void 0!==c&&(g[d]=c));return g},m.extend({expando:"jQuery"+(l+Math.random()).replace(/\D/g,""),isReady:!0,error:function(a){throw new Error(a)},noop:function(){},isFunction:function(a){return"function"===m.type(a)},isArray:Array.isArray||function(a){return"array"===m.type(a)},isWindow:function(a){return null!=a&&a==a.window},isNumeric:function(a){return!m.isArray(a)&&a-parseFloat(a)>=0},isEmptyObject:function(a){var b;for(b in a)return!1;return!0},isPlainObject:function(a){var b;if(!a||"object"!==m.type(a)||a.nodeType||m.isWindow(a))return!1;try{if(a.constructor&&!j.call(a,"constructor")&&!j.call(a.constructor.prototype,"isPrototypeOf"))return!1}catch(c){return!1}if(k.ownLast)for(b in a)return j.call(a,b);for(b in a);return void 0===b||j.call(a,b)},type:function(a){return null==a?a+"":"object"==typeof a||"function"==typeof a?h[i.call(a)]||"object":typeof a},globalEval:function(b){b&&m.trim(b)&&(a.execScript||function(b){a.eval.call(a,b)})(b)},camelCase:function(a){return a.replace(o,"ms-").replace(p,q)},nodeName:function(a,b){return a.nodeName&&a.nodeName.toLowerCase()===b.toLowerCase()},each:function(a,b,c){var d,e=0,f=a.length,g=r(a);if(c){if(g){for(;f>e;e++)if(d=b.apply(a[e],c),d===!1)break}else for(e in a)if(d=b.apply(a[e],c),d===!1)break}else if(g){for(;f>e;e++)if(d=b.call(a[e],e,a[e]),d===!1)break}else for(e in a)if(d=b.call(a[e],e,a[e]),d===!1)break;return a},trim:function(a){return null==a?"":(a+"").replace(n,"")},makeArray:function(a,b){var c=b||[];return null!=a&&(r(Object(a))?m.merge(c,"string"==typeof a?[a]:a):f.call(c,a)),c},inArray:function(a,b,c){var d;if(b){if(g)return g.call(b,a,c);for(d=b.length,c=c?0>c?Math.max(0,d+c):c:0;d>c;c++)if(c in b&&b[c]===a)return c}return-1},merge:function(a,b){var c=+b.length,d=0,e=a.length;while(c>d)a[e++]=b[d++];if(c!==c)while(void 0!==b[d])a[e++]=b[d++];return a.length=e,a},grep:function(a,b,c){for(var d,e=[],f=0,g=a.length,h=!c;g>f;f++)d=!b(a[f],f),d!==h&&e.push(a[f]);return e},map:function(a,b,c){var d,f=0,g=a.length,h=r(a),i=[];if(h)for(;g>f;f++)d=b(a[f],f,c),null!=d&&i.push(d);else for(f in a)d=b(a[f],f,c),null!=d&&i.push(d);return e.apply([],i)},guid:1,proxy:function(a,b){var c,e,f;return"string"==typeof b&&(f=a[b],b=a,a=f),m.isFunction(a)?(c=d.call(arguments,2),e=function(){return a.apply(b||this,c.concat(d.call(arguments)))},e.guid=a.guid=a.guid||m.guid++,e):void 0},now:function(){return+new Date},support:k}),m.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(a,b){h["[object "+b+"]"]=b.toLowerCase()});function r(a){var b=a.length,c=m.type(a);return"function"===c||m.isWindow(a)?!1:1===a.nodeType&&b?!0:"array"===c||0===b||"number"==typeof b&&b>0&&b-1 in a}var s=function(a){var b,c,d,e,f,g,h,i,j,k,l,m,n,o,p,q,r,s,t,u="sizzle"+-new Date,v=a.document,w=0,x=0,y=gb(),z=gb(),A=gb(),B=function(a,b){return a===b&&(l=!0),0},C="undefined",D=1<<31,E={}.hasOwnProperty,F=[],G=F.pop,H=F.push,I=F.push,J=F.slice,K=F.indexOf||function(a){for(var b=0,c=this.length;c>b;b++)if(this[b]===a)return b;return-1},L="checked|selected|async|autofocus|autoplay|controls|defer|disabled|hidden|ismap|loop|multiple|open|readonly|required|scoped",M="[\\x20\\t\\r\\n\\f]",N="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=N.replace("w","w#"),P="\\["+M+"*("+N+")(?:"+M+"*([*^$|!~]?=)"+M+"*(?:'((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\"|("+O+"))|)"+M+"*\\]",Q=":("+N+")(?:\\((('((?:\\\\.|[^\\\\'])*)'|\"((?:\\\\.|[^\\\\\"])*)\")|((?:\\\\.|[^\\\\()[\\]]|"+P+")*)|.*)\\)|)",R=new RegExp("^"+M+"+|((?:^|[^\\\\])(?:\\\\.)*)"+M+"+$","g"),S=new RegExp("^"+M+"*,"+M+"*"),T=new RegExp("^"+M+"*([>+~]|"+M+")"+M+"*"),U=new RegExp("="+M+"*([^\\]'\"]*?)"+M+"*\\]","g"),V=new RegExp(Q),W=new RegExp("^"+O+"$"),X={ID:new RegExp("^#("+N+")"),CLASS:new RegExp("^\\.("+N+")"),TAG:new RegExp("^("+N.replace("w","w*")+")"),ATTR:new RegExp("^"+P),PSEUDO:new RegExp("^"+Q),CHILD:new RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+M+"*(even|odd|(([+-]|)(\\d*)n|)"+M+"*(?:([+-]|)"+M+"*(\\d+)|))"+M+"*\\)|)","i"),bool:new RegExp("^(?:"+L+")$","i"),needsContext:new RegExp("^"+M+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+M+"*((?:-\\d)?\\d*)"+M+"*\\)|)(?=[^-]|$)","i")},Y=/^(?:input|select|textarea|button)$/i,Z=/^h\d$/i,$=/^[^{]+\{\s*\[native \w/,_=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,ab=/[+~]/,bb=/'|\\/g,cb=new RegExp("\\\\([\\da-f]{1,6}"+M+"?|("+M+")|.)","ig"),db=function(a,b,c){var d="0x"+b-65536;return d!==d||c?b:0>d?String.fromCharCode(d+65536):String.fromCharCode(d>>10|55296,1023&d|56320)};try{I.apply(F=J.call(v.childNodes),v.childNodes),F[v.childNodes.length].nodeType}catch(eb){I={apply:F.length?function(a,b){H.apply(a,J.call(b))}:function(a,b){var c=a.length,d=0;while(a[c++]=b[d++]);a.length=c-1}}}function fb(a,b,d,e){var f,h,j,k,l,o,r,s,w,x;if((b?b.ownerDocument||b:v)!==n&&m(b),b=b||n,d=d||[],!a||"string"!=typeof a)return d;if(1!==(k=b.nodeType)&&9!==k)return[];if(p&&!e){if(f=_.exec(a))if(j=f[1]){if(9===k){if(h=b.getElementById(j),!h||!h.parentNode)return d;if(h.id===j)return d.push(h),d}else if(b.ownerDocument&&(h=b.ownerDocument.getElementById(j))&&t(b,h)&&h.id===j)return d.push(h),d}else{if(f[2])return I.apply(d,b.getElementsByTagName(a)),d;if((j=f[3])&&c.getElementsByClassName&&b.getElementsByClassName)return I.apply(d,b.getElementsByClassName(j)),d}if(c.qsa&&(!q||!q.test(a))){if(s=r=u,w=b,x=9===k&&a,1===k&&"object"!==b.nodeName.toLowerCase()){o=g(a),(r=b.getAttribute("id"))?s=r.replace(bb,"\\$&"):b.setAttribute("id",s),s="[id='"+s+"'] ",l=o.length;while(l--)o[l]=s+qb(o[l]);w=ab.test(a)&&ob(b.parentNode)||b,x=o.join(",")}if(x)try{return I.apply(d,w.querySelectorAll(x)),d}catch(y){}finally{r||b.removeAttribute("id")}}}return i(a.replace(R,"$1"),b,d,e)}function gb(){var a=[];function b(c,e){return a.push(c+" ")>d.cacheLength&&delete b[a.shift()],b[c+" "]=e}return b}function hb(a){return a[u]=!0,a}function ib(a){var b=n.createElement("div");try{return!!a(b)}catch(c){return!1}finally{b.parentNode&&b.parentNode.removeChild(b),b=null}}function jb(a,b){var c=a.split("|"),e=a.length;while(e--)d.attrHandle[c[e]]=b}function kb(a,b){var c=b&&a,d=c&&1===a.nodeType&&1===b.nodeType&&(~b.sourceIndex||D)-(~a.sourceIndex||D);if(d)return d;if(c)while(c=c.nextSibling)if(c===b)return-1;return a?1:-1}function lb(a){return function(b){var c=b.nodeName.toLowerCase();return"input"===c&&b.type===a}}function mb(a){return function(b){var c=b.nodeName.toLowerCase();return("input"===c||"button"===c)&&b.type===a}}function nb(a){return hb(function(b){return b=+b,hb(function(c,d){var e,f=a([],c.length,b),g=f.length;while(g--)c[e=f[g]]&&(c[e]=!(d[e]=c[e]))})})}function ob(a){return a&&typeof a.getElementsByTagName!==C&&a}c=fb.support={},f=fb.isXML=function(a){var b=a&&(a.ownerDocument||a).documentElement;return b?"HTML"!==b.nodeName:!1},m=fb.setDocument=function(a){var b,e=a?a.ownerDocument||a:v,g=e.defaultView;return e!==n&&9===e.nodeType&&e.documentElement?(n=e,o=e.documentElement,p=!f(e),g&&g!==g.top&&(g.addEventListener?g.addEventListener("unload",function(){m()},!1):g.attachEvent&&g.attachEvent("onunload",function(){m()})),c.attributes=ib(function(a){return a.className="i",!a.getAttribute("className")}),c.getElementsByTagName=ib(function(a){return a.appendChild(e.createComment("")),!a.getElementsByTagName("*").length}),c.getElementsByClassName=$.test(e.getElementsByClassName)&&ib(function(a){return a.innerHTML="<div class='a'></div><div class='a i'></div>",a.firstChild.className="i",2===a.getElementsByClassName("i").length}),c.getById=ib(function(a){return o.appendChild(a).id=u,!e.getElementsByName||!e.getElementsByName(u).length}),c.getById?(d.find.ID=function(a,b){if(typeof b.getElementById!==C&&p){var c=b.getElementById(a);return c&&c.parentNode?[c]:[]}},d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){return a.getAttribute("id")===b}}):(delete d.find.ID,d.filter.ID=function(a){var b=a.replace(cb,db);return function(a){var c=typeof a.getAttributeNode!==C&&a.getAttributeNode("id");return c&&c.value===b}}),d.find.TAG=c.getElementsByTagName?function(a,b){return typeof b.getElementsByTagName!==C?b.getElementsByTagName(a):void 0}:function(a,b){var c,d=[],e=0,f=b.getElementsByTagName(a);if("*"===a){while(c=f[e++])1===c.nodeType&&d.push(c);return d}return f},d.find.CLASS=c.getElementsByClassName&&function(a,b){return typeof b.getElementsByClassName!==C&&p?b.getElementsByClassName(a):void 0},r=[],q=[],(c.qsa=$.test(e.querySelectorAll))&&(ib(function(a){a.innerHTML="<select msallowclip=''><option selected=''></option></select>",a.querySelectorAll("[msallowclip^='']").length&&q.push("[*^$]="+M+"*(?:''|\"\")"),a.querySelectorAll("[selected]").length||q.push("\\["+M+"*(?:value|"+L+")"),a.querySelectorAll(":checked").length||q.push(":checked")}),ib(function(a){var b=e.createElement("input");b.setAttribute("type","hidden"),a.appendChild(b).setAttribute("name","D"),a.querySelectorAll("[name=d]").length&&q.push("name"+M+"*[*^$|!~]?="),a.querySelectorAll(":enabled").length||q.push(":enabled",":disabled"),a.querySelectorAll("*,:x"),q.push(",.*:")})),(c.matchesSelector=$.test(s=o.matches||o.webkitMatchesSelector||o.mozMatchesSelector||o.oMatchesSelector||o.msMatchesSelector))&&ib(function(a){c.disconnectedMatch=s.call(a,"div"),s.call(a,"[s!='']:x"),r.push("!=",Q)}),q=q.length&&new RegExp(q.join("|")),r=r.length&&new RegExp(r.join("|")),b=$.test(o.compareDocumentPosition),t=b||$.test(o.contains)?function(a,b){var c=9===a.nodeType?a.documentElement:a,d=b&&b.parentNode;return a===d||!(!d||1!==d.nodeType||!(c.contains?c.contains(d):a.compareDocumentPosition&&16&a.compareDocumentPosition(d)))}:function(a,b){if(b)while(b=b.parentNode)if(b===a)return!0;return!1},B=b?function(a,b){if(a===b)return l=!0,0;var d=!a.compareDocumentPosition-!b.compareDocumentPosition;return d?d:(d=(a.ownerDocument||a)===(b.ownerDocument||b)?a.compareDocumentPosition(b):1,1&d||!c.sortDetached&&b.compareDocumentPosition(a)===d?a===e||a.ownerDocument===v&&t(v,a)?-1:b===e||b.ownerDocument===v&&t(v,b)?1:k?K.call(k,a)-K.call(k,b):0:4&d?-1:1)}:function(a,b){if(a===b)return l=!0,0;var c,d=0,f=a.parentNode,g=b.parentNode,h=[a],i=[b];if(!f||!g)return a===e?-1:b===e?1:f?-1:g?1:k?K.call(k,a)-K.call(k,b):0;if(f===g)return kb(a,b);c=a;while(c=c.parentNode)h.unshift(c);c=b;while(c=c.parentNode)i.unshift(c);while(h[d]===i[d])d++;return d?kb(h[d],i[d]):h[d]===v?-1:i[d]===v?1:0},e):n},fb.matches=function(a,b){return fb(a,null,null,b)},fb.matchesSelector=function(a,b){if((a.ownerDocument||a)!==n&&m(a),b=b.replace(U,"='$1']"),!(!c.matchesSelector||!p||r&&r.test(b)||q&&q.test(b)))try{var d=s.call(a,b);if(d||c.disconnectedMatch||a.document&&11!==a.document.nodeType)return d}catch(e){}return fb(b,n,null,[a]).length>0},fb.contains=function(a,b){return(a.ownerDocument||a)!==n&&m(a),t(a,b)},fb.attr=function(a,b){(a.ownerDocument||a)!==n&&m(a);var e=d.attrHandle[b.toLowerCase()],f=e&&E.call(d.attrHandle,b.toLowerCase())?e(a,b,!p):void 0;return void 0!==f?f:c.attributes||!p?a.getAttribute(b):(f=a.getAttributeNode(b))&&f.specified?f.value:null},fb.error=function(a){throw new Error("Syntax error, unrecognized expression: "+a)},fb.uniqueSort=function(a){var b,d=[],e=0,f=0;if(l=!c.detectDuplicates,k=!c.sortStable&&a.slice(0),a.sort(B),l){while(b=a[f++])b===a[f]&&(e=d.push(f));while(e--)a.splice(d[e],1)}return k=null,a},e=fb.getText=function(a){var b,c="",d=0,f=a.nodeType;if(f){if(1===f||9===f||11===f){if("string"==typeof a.textContent)return a.textContent;for(a=a.firstChild;a;a=a.nextSibling)c+=e(a)}else if(3===f||4===f)return a.nodeValue}else while(b=a[d++])c+=e(b);return c},d=fb.selectors={cacheLength:50,createPseudo:hb,match:X,attrHandle:{},find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(a){return a[1]=a[1].replace(cb,db),a[3]=(a[3]||a[4]||a[5]||"").replace(cb,db),"~="===a[2]&&(a[3]=" "+a[3]+" "),a.slice(0,4)},CHILD:function(a){return a[1]=a[1].toLowerCase(),"nth"===a[1].slice(0,3)?(a[3]||fb.error(a[0]),a[4]=+(a[4]?a[5]+(a[6]||1):2*("even"===a[3]||"odd"===a[3])),a[5]=+(a[7]+a[8]||"odd"===a[3])):a[3]&&fb.error(a[0]),a},PSEUDO:function(a){var b,c=!a[6]&&a[2];return X.CHILD.test(a[0])?null:(a[3]?a[2]=a[4]||a[5]||"":c&&V.test(c)&&(b=g(c,!0))&&(b=c.indexOf(")",c.length-b)-c.length)&&(a[0]=a[0].slice(0,b),a[2]=c.slice(0,b)),a.slice(0,3))}},filter:{TAG:function(a){var b=a.replace(cb,db).toLowerCase();return"*"===a?function(){return!0}:function(a){return a.nodeName&&a.nodeName.toLowerCase()===b}},CLASS:function(a){var b=y[a+" "];return b||(b=new RegExp("(^|"+M+")"+a+"("+M+"|$)"))&&y(a,function(a){return b.test("string"==typeof a.className&&a.className||typeof a.getAttribute!==C&&a.getAttribute("class")||"")})},ATTR:function(a,b,c){return function(d){var e=fb.attr(d,a);return null==e?"!="===b:b?(e+="","="===b?e===c:"!="===b?e!==c:"^="===b?c&&0===e.indexOf(c):"*="===b?c&&e.indexOf(c)>-1:"$="===b?c&&e.slice(-c.length)===c:"~="===b?(" "+e+" ").indexOf(c)>-1:"|="===b?e===c||e.slice(0,c.length+1)===c+"-":!1):!0}},CHILD:function(a,b,c,d,e){var f="nth"!==a.slice(0,3),g="last"!==a.slice(-4),h="of-type"===b;return 1===d&&0===e?function(a){return!!a.parentNode}:function(b,c,i){var j,k,l,m,n,o,p=f!==g?"nextSibling":"previousSibling",q=b.parentNode,r=h&&b.nodeName.toLowerCase(),s=!i&&!h;if(q){if(f){while(p){l=b;while(l=l[p])if(h?l.nodeName.toLowerCase()===r:1===l.nodeType)return!1;o=p="only"===a&&!o&&"nextSibling"}return!0}if(o=[g?q.firstChild:q.lastChild],g&&s){k=q[u]||(q[u]={}),j=k[a]||[],n=j[0]===w&&j[1],m=j[0]===w&&j[2],l=n&&q.childNodes[n];while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if(1===l.nodeType&&++m&&l===b){k[a]=[w,n,m];break}}else if(s&&(j=(b[u]||(b[u]={}))[a])&&j[0]===w)m=j[1];else while(l=++n&&l&&l[p]||(m=n=0)||o.pop())if((h?l.nodeName.toLowerCase()===r:1===l.nodeType)&&++m&&(s&&((l[u]||(l[u]={}))[a]=[w,m]),l===b))break;return m-=e,m===d||m%d===0&&m/d>=0}}},PSEUDO:function(a,b){var c,e=d.pseudos[a]||d.setFilters[a.toLowerCase()]||fb.error("unsupported pseudo: "+a);return e[u]?e(b):e.length>1?(c=[a,a,"",b],d.setFilters.hasOwnProperty(a.toLowerCase())?hb(function(a,c){var d,f=e(a,b),g=f.length;while(g--)d=K.call(a,f[g]),a[d]=!(c[d]=f[g])}):function(a){return e(a,0,c)}):e}},pseudos:{not:hb(function(a){var b=[],c=[],d=h(a.replace(R,"$1"));return d[u]?hb(function(a,b,c,e){var f,g=d(a,null,e,[]),h=a.length;while(h--)(f=g[h])&&(a[h]=!(b[h]=f))}):function(a,e,f){return b[0]=a,d(b,null,f,c),!c.pop()}}),has:hb(function(a){return function(b){return fb(a,b).length>0}}),contains:hb(function(a){return function(b){return(b.textContent||b.innerText||e(b)).indexOf(a)>-1}}),lang:hb(function(a){return W.test(a||"")||fb.error("unsupported lang: "+a),a=a.replace(cb,db).toLowerCase(),function(b){var c;do if(c=p?b.lang:b.getAttribute("xml:lang")||b.getAttribute("lang"))return c=c.toLowerCase(),c===a||0===c.indexOf(a+"-");while((b=b.parentNode)&&1===b.nodeType);return!1}}),target:function(b){var c=a.location&&a.location.hash;return c&&c.slice(1)===b.id},root:function(a){return a===o},focus:function(a){return a===n.activeElement&&(!n.hasFocus||n.hasFocus())&&!!(a.type||a.href||~a.tabIndex)},enabled:function(a){return a.disabled===!1},disabled:function(a){return a.disabled===!0},checked:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&!!a.checked||"option"===b&&!!a.selected},selected:function(a){return a.parentNode&&a.parentNode.selectedIndex,a.selected===!0},empty:function(a){for(a=a.firstChild;a;a=a.nextSibling)if(a.nodeType<6)return!1;return!0},parent:function(a){return!d.pseudos.empty(a)},header:function(a){return Z.test(a.nodeName)},input:function(a){return Y.test(a.nodeName)},button:function(a){var b=a.nodeName.toLowerCase();return"input"===b&&"button"===a.type||"button"===b},text:function(a){var b;return"input"===a.nodeName.toLowerCase()&&"text"===a.type&&(null==(b=a.getAttribute("type"))||"text"===b.toLowerCase())},first:nb(function(){return[0]}),last:nb(function(a,b){return[b-1]}),eq:nb(function(a,b,c){return[0>c?c+b:c]}),even:nb(function(a,b){for(var c=0;b>c;c+=2)a.push(c);return a}),odd:nb(function(a,b){for(var c=1;b>c;c+=2)a.push(c);return a}),lt:nb(function(a,b,c){for(var d=0>c?c+b:c;--d>=0;)a.push(d);return a}),gt:nb(function(a,b,c){for(var d=0>c?c+b:c;++d<b;)a.push(d);return a})}},d.pseudos.nth=d.pseudos.eq;for(b in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})d.pseudos[b]=lb(b);for(b in{submit:!0,reset:!0})d.pseudos[b]=mb(b);function pb(){}pb.prototype=d.filters=d.pseudos,d.setFilters=new pb,g=fb.tokenize=function(a,b){var c,e,f,g,h,i,j,k=z[a+" "];if(k)return b?0:k.slice(0);h=a,i=[],j=d.preFilter;while(h){(!c||(e=S.exec(h)))&&(e&&(h=h.slice(e[0].length)||h),i.push(f=[])),c=!1,(e=T.exec(h))&&(c=e.shift(),f.push({value:c,type:e[0].replace(R," ")}),h=h.slice(c.length));for(g in d.filter)!(e=X[g].exec(h))||j[g]&&!(e=j[g](e))||(c=e.shift(),f.push({value:c,type:g,matches:e}),h=h.slice(c.length));if(!c)break}return b?h.length:h?fb.error(a):z(a,i).slice(0)};function qb(a){for(var b=0,c=a.length,d="";c>b;b++)d+=a[b].value;return d}function rb(a,b,c){var d=b.dir,e=c&&"parentNode"===d,f=x++;return b.first?function(b,c,f){while(b=b[d])if(1===b.nodeType||e)return a(b,c,f)}:function(b,c,g){var h,i,j=[w,f];if(g){while(b=b[d])if((1===b.nodeType||e)&&a(b,c,g))return!0}else while(b=b[d])if(1===b.nodeType||e){if(i=b[u]||(b[u]={}),(h=i[d])&&h[0]===w&&h[1]===f)return j[2]=h[2];if(i[d]=j,j[2]=a(b,c,g))return!0}}}function sb(a){return a.length>1?function(b,c,d){var e=a.length;while(e--)if(!a[e](b,c,d))return!1;return!0}:a[0]}function tb(a,b,c){for(var d=0,e=b.length;e>d;d++)fb(a,b[d],c);return c}function ub(a,b,c,d,e){for(var f,g=[],h=0,i=a.length,j=null!=b;i>h;h++)(f=a[h])&&(!c||c(f,d,e))&&(g.push(f),j&&b.push(h));return g}function vb(a,b,c,d,e,f){return d&&!d[u]&&(d=vb(d)),e&&!e[u]&&(e=vb(e,f)),hb(function(f,g,h,i){var j,k,l,m=[],n=[],o=g.length,p=f||tb(b||"*",h.nodeType?[h]:h,[]),q=!a||!f&&b?p:ub(p,m,a,h,i),r=c?e||(f?a:o||d)?[]:g:q;if(c&&c(q,r,h,i),d){j=ub(r,n),d(j,[],h,i),k=j.length;while(k--)(l=j[k])&&(r[n[k]]=!(q[n[k]]=l))}if(f){if(e||a){if(e){j=[],k=r.length;while(k--)(l=r[k])&&j.push(q[k]=l);e(null,r=[],j,i)}k=r.length;while(k--)(l=r[k])&&(j=e?K.call(f,l):m[k])>-1&&(f[j]=!(g[j]=l))}}else r=ub(r===g?r.splice(o,r.length):r),e?e(null,g,r,i):I.apply(g,r)})}function wb(a){for(var b,c,e,f=a.length,g=d.relative[a[0].type],h=g||d.relative[" "],i=g?1:0,k=rb(function(a){return a===b},h,!0),l=rb(function(a){return K.call(b,a)>-1},h,!0),m=[function(a,c,d){return!g&&(d||c!==j)||((b=c).nodeType?k(a,c,d):l(a,c,d))}];f>i;i++)if(c=d.relative[a[i].type])m=[rb(sb(m),c)];else{if(c=d.filter[a[i].type].apply(null,a[i].matches),c[u]){for(e=++i;f>e;e++)if(d.relative[a[e].type])break;return vb(i>1&&sb(m),i>1&&qb(a.slice(0,i-1).concat({value:" "===a[i-2].type?"*":""})).replace(R,"$1"),c,e>i&&wb(a.slice(i,e)),f>e&&wb(a=a.slice(e)),f>e&&qb(a))}m.push(c)}return sb(m)}function xb(a,b){var c=b.length>0,e=a.length>0,f=function(f,g,h,i,k){var l,m,o,p=0,q="0",r=f&&[],s=[],t=j,u=f||e&&d.find.TAG("*",k),v=w+=null==t?1:Math.random()||.1,x=u.length;for(k&&(j=g!==n&&g);q!==x&&null!=(l=u[q]);q++){if(e&&l){m=0;while(o=a[m++])if(o(l,g,h)){i.push(l);break}k&&(w=v)}c&&((l=!o&&l)&&p--,f&&r.push(l))}if(p+=q,c&&q!==p){m=0;while(o=b[m++])o(r,s,g,h);if(f){if(p>0)while(q--)r[q]||s[q]||(s[q]=G.call(i));s=ub(s)}I.apply(i,s),k&&!f&&s.length>0&&p+b.length>1&&fb.uniqueSort(i)}return k&&(w=v,j=t),r};return c?hb(f):f}return h=fb.compile=function(a,b){var c,d=[],e=[],f=A[a+" "];if(!f){b||(b=g(a)),c=b.length;while(c--)f=wb(b[c]),f[u]?d.push(f):e.push(f);f=A(a,xb(e,d)),f.selector=a}return f},i=fb.select=function(a,b,e,f){var i,j,k,l,m,n="function"==typeof a&&a,o=!f&&g(a=n.selector||a);if(e=e||[],1===o.length){if(j=o[0]=o[0].slice(0),j.length>2&&"ID"===(k=j[0]).type&&c.getById&&9===b.nodeType&&p&&d.relative[j[1].type]){if(b=(d.find.ID(k.matches[0].replace(cb,db),b)||[])[0],!b)return e;n&&(b=b.parentNode),a=a.slice(j.shift().value.length)}i=X.needsContext.test(a)?0:j.length;while(i--){if(k=j[i],d.relative[l=k.type])break;if((m=d.find[l])&&(f=m(k.matches[0].replace(cb,db),ab.test(j[0].type)&&ob(b.parentNode)||b))){if(j.splice(i,1),a=f.length&&qb(j),!a)return I.apply(e,f),e;break}}}return(n||h(a,o))(f,b,!p,e,ab.test(a)&&ob(b.parentNode)||b),e},c.sortStable=u.split("").sort(B).join("")===u,c.detectDuplicates=!!l,m(),c.sortDetached=ib(function(a){return 1&a.compareDocumentPosition(n.createElement("div"))}),ib(function(a){return a.innerHTML="<a href='#'></a>","#"===a.firstChild.getAttribute("href")})||jb("type|href|height|width",function(a,b,c){return c?void 0:a.getAttribute(b,"type"===b.toLowerCase()?1:2)}),c.attributes&&ib(function(a){return a.innerHTML="<input/>",a.firstChild.setAttribute("value",""),""===a.firstChild.getAttribute("value")})||jb("value",function(a,b,c){return c||"input"!==a.nodeName.toLowerCase()?void 0:a.defaultValue}),ib(function(a){return null==a.getAttribute("disabled")})||jb(L,function(a,b,c){var d;return c?void 0:a[b]===!0?b.toLowerCase():(d=a.getAttributeNode(b))&&d.specified?d.value:null}),fb}(a);m.find=s,m.expr=s.selectors,m.expr[":"]=m.expr.pseudos,m.unique=s.uniqueSort,m.text=s.getText,m.isXMLDoc=s.isXML,m.contains=s.contains;var t=m.expr.match.needsContext,u=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,v=/^.[^:#\[\.,]*$/;function w(a,b,c){if(m.isFunction(b))return m.grep(a,function(a,d){return!!b.call(a,d,a)!==c});if(b.nodeType)return m.grep(a,function(a){return a===b!==c});if("string"==typeof b){if(v.test(b))return m.filter(b,a,c);b=m.filter(b,a)}return m.grep(a,function(a){return m.inArray(a,b)>=0!==c})}m.filter=function(a,b,c){var d=b[0];return c&&(a=":not("+a+")"),1===b.length&&1===d.nodeType?m.find.matchesSelector(d,a)?[d]:[]:m.find.matches(a,m.grep(b,function(a){return 1===a.nodeType}))},m.fn.extend({find:function(a){var b,c=[],d=this,e=d.length;if("string"!=typeof a)return this.pushStack(m(a).filter(function(){for(b=0;e>b;b++)if(m.contains(d[b],this))return!0}));for(b=0;e>b;b++)m.find(a,d[b],c);return c=this.pushStack(e>1?m.unique(c):c),c.selector=this.selector?this.selector+" "+a:a,c},filter:function(a){return this.pushStack(w(this,a||[],!1))},not:function(a){return this.pushStack(w(this,a||[],!0))},is:function(a){return!!w(this,"string"==typeof a&&t.test(a)?m(a):a||[],!1).length}});var x,y=a.document,z=/^(?:\s*(<[\w\W]+>)[^>]*|#([\w-]*))$/,A=m.fn.init=function(a,b){var c,d;if(!a)return this;if("string"==typeof a){if(c="<"===a.charAt(0)&&">"===a.charAt(a.length-1)&&a.length>=3?[null,a,null]:z.exec(a),!c||!c[1]&&b)return!b||b.jquery?(b||x).find(a):this.constructor(b).find(a);if(c[1]){if(b=b instanceof m?b[0]:b,m.merge(this,m.parseHTML(c[1],b&&b.nodeType?b.ownerDocument||b:y,!0)),u.test(c[1])&&m.isPlainObject(b))for(c in b)m.isFunction(this[c])?this[c](b[c]):this.attr(c,b[c]);return this}if(d=y.getElementById(c[2]),d&&d.parentNode){if(d.id!==c[2])return x.find(a);this.length=1,this[0]=d}return this.context=y,this.selector=a,this}return a.nodeType?(this.context=this[0]=a,this.length=1,this):m.isFunction(a)?"undefined"!=typeof x.ready?x.ready(a):a(m):(void 0!==a.selector&&(this.selector=a.selector,this.context=a.context),m.makeArray(a,this))};A.prototype=m.fn,x=m(y);var B=/^(?:parents|prev(?:Until|All))/,C={children:!0,contents:!0,next:!0,prev:!0};m.extend({dir:function(a,b,c){var d=[],e=a[b];while(e&&9!==e.nodeType&&(void 0===c||1!==e.nodeType||!m(e).is(c)))1===e.nodeType&&d.push(e),e=e[b];return d},sibling:function(a,b){for(var c=[];a;a=a.nextSibling)1===a.nodeType&&a!==b&&c.push(a);return c}}),m.fn.extend({has:function(a){var b,c=m(a,this),d=c.length;return this.filter(function(){for(b=0;d>b;b++)if(m.contains(this,c[b]))return!0})},closest:function(a,b){for(var c,d=0,e=this.length,f=[],g=t.test(a)||"string"!=typeof a?m(a,b||this.context):0;e>d;d++)for(c=this[d];c&&c!==b;c=c.parentNode)if(c.nodeType<11&&(g?g.index(c)>-1:1===c.nodeType&&m.find.matchesSelector(c,a))){f.push(c);break}return this.pushStack(f.length>1?m.unique(f):f)},index:function(a){return a?"string"==typeof a?m.inArray(this[0],m(a)):m.inArray(a.jquery?a[0]:a,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(a,b){return this.pushStack(m.unique(m.merge(this.get(),m(a,b))))},addBack:function(a){return this.add(null==a?this.prevObject:this.prevObject.filter(a))}});function D(a,b){do a=a[b];while(a&&1!==a.nodeType);return a}m.each({parent:function(a){var b=a.parentNode;return b&&11!==b.nodeType?b:null},parents:function(a){return m.dir(a,"parentNode")},parentsUntil:function(a,b,c){return m.dir(a,"parentNode",c)},next:function(a){return D(a,"nextSibling")},prev:function(a){return D(a,"previousSibling")},nextAll:function(a){return m.dir(a,"nextSibling")},prevAll:function(a){return m.dir(a,"previousSibling")},nextUntil:function(a,b,c){return m.dir(a,"nextSibling",c)},prevUntil:function(a,b,c){return m.dir(a,"previousSibling",c)},siblings:function(a){return m.sibling((a.parentNode||{}).firstChild,a)},children:function(a){return m.sibling(a.firstChild)},contents:function(a){return m.nodeName(a,"iframe")?a.contentDocument||a.contentWindow.document:m.merge([],a.childNodes)}},function(a,b){m.fn[a]=function(c,d){var e=m.map(this,b,c);return"Until"!==a.slice(-5)&&(d=c),d&&"string"==typeof d&&(e=m.filter(d,e)),this.length>1&&(C[a]||(e=m.unique(e)),B.test(a)&&(e=e.reverse())),this.pushStack(e)}});var E=/\S+/g,F={};function G(a){var b=F[a]={};return m.each(a.match(E)||[],function(a,c){b[c]=!0}),b}m.Callbacks=function(a){a="string"==typeof a?F[a]||G(a):m.extend({},a);var b,c,d,e,f,g,h=[],i=!a.once&&[],j=function(l){for(c=a.memory&&l,d=!0,f=g||0,g=0,e=h.length,b=!0;h&&e>f;f++)if(h[f].apply(l[0],l[1])===!1&&a.stopOnFalse){c=!1;break}b=!1,h&&(i?i.length&&j(i.shift()):c?h=[]:k.disable())},k={add:function(){if(h){var d=h.length;!function f(b){m.each(b,function(b,c){var d=m.type(c);"function"===d?a.unique&&k.has(c)||h.push(c):c&&c.length&&"string"!==d&&f(c)})}(arguments),b?e=h.length:c&&(g=d,j(c))}return this},remove:function(){return h&&m.each(arguments,function(a,c){var d;while((d=m.inArray(c,h,d))>-1)h.splice(d,1),b&&(e>=d&&e--,f>=d&&f--)}),this},has:function(a){return a?m.inArray(a,h)>-1:!(!h||!h.length)},empty:function(){return h=[],e=0,this},disable:function(){return h=i=c=void 0,this},disabled:function(){return!h},lock:function(){return i=void 0,c||k.disable(),this},locked:function(){return!i},fireWith:function(a,c){return!h||d&&!i||(c=c||[],c=[a,c.slice?c.slice():c],b?i.push(c):j(c)),this},fire:function(){return k.fireWith(this,arguments),this},fired:function(){return!!d}};return k},m.extend({Deferred:function(a){var b=[["resolve","done",m.Callbacks("once memory"),"resolved"],["reject","fail",m.Callbacks("once memory"),"rejected"],["notify","progress",m.Callbacks("memory")]],c="pending",d={state:function(){return c},always:function(){return e.done(arguments).fail(arguments),this},then:function(){var a=arguments;return m.Deferred(function(c){m.each(b,function(b,f){var g=m.isFunction(a[b])&&a[b];e[f[1]](function(){var a=g&&g.apply(this,arguments);a&&m.isFunction(a.promise)?a.promise().done(c.resolve).fail(c.reject).progress(c.notify):c[f[0]+"With"](this===d?c.promise():this,g?[a]:arguments)})}),a=null}).promise()},promise:function(a){return null!=a?m.extend(a,d):d}},e={};return d.pipe=d.then,m.each(b,function(a,f){var g=f[2],h=f[3];d[f[1]]=g.add,h&&g.add(function(){c=h},b[1^a][2].disable,b[2][2].lock),e[f[0]]=function(){return e[f[0]+"With"](this===e?d:this,arguments),this},e[f[0]+"With"]=g.fireWith}),d.promise(e),a&&a.call(e,e),e},when:function(a){var b=0,c=d.call(arguments),e=c.length,f=1!==e||a&&m.isFunction(a.promise)?e:0,g=1===f?a:m.Deferred(),h=function(a,b,c){return function(e){b[a]=this,c[a]=arguments.length>1?d.call(arguments):e,c===i?g.notifyWith(b,c):--f||g.resolveWith(b,c)}},i,j,k;if(e>1)for(i=new Array(e),j=new Array(e),k=new Array(e);e>b;b++)c[b]&&m.isFunction(c[b].promise)?c[b].promise().done(h(b,k,c)).fail(g.reject).progress(h(b,j,i)):--f;return f||g.resolveWith(k,c),g.promise()}});var H;m.fn.ready=function(a){return m.ready.promise().done(a),this},m.extend({isReady:!1,readyWait:1,holdReady:function(a){a?m.readyWait++:m.ready(!0)},ready:function(a){if(a===!0?!--m.readyWait:!m.isReady){if(!y.body)return setTimeout(m.ready);m.isReady=!0,a!==!0&&--m.readyWait>0||(H.resolveWith(y,[m]),m.fn.triggerHandler&&(m(y).triggerHandler("ready"),m(y).off("ready")))}}});function I(){y.addEventListener?(y.removeEventListener("DOMContentLoaded",J,!1),a.removeEventListener("load",J,!1)):(y.detachEvent("onreadystatechange",J),a.detachEvent("onload",J))}function J(){(y.addEventListener||"load"===event.type||"complete"===y.readyState)&&(I(),m.ready())}m.ready.promise=function(b){if(!H)if(H=m.Deferred(),"complete"===y.readyState)setTimeout(m.ready);else if(y.addEventListener)y.addEventListener("DOMContentLoaded",J,!1),a.addEventListener("load",J,!1);else{y.attachEvent("onreadystatechange",J),a.attachEvent("onload",J);var c=!1;try{c=null==a.frameElement&&y.documentElement}catch(d){}c&&c.doScroll&&!function e(){if(!m.isReady){try{c.doScroll("left")}catch(a){return setTimeout(e,50)}I(),m.ready()}}()}return H.promise(b)};var K="undefined",L;for(L in m(k))break;k.ownLast="0"!==L,k.inlineBlockNeedsLayout=!1,m(function(){var a,b,c,d;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="display:inline;margin:0;border:0;padding:1px;width:1px;zoom:1",k.inlineBlockNeedsLayout=a=3===b.offsetWidth,a&&(c.style.zoom=1)),c.removeChild(d))}),function(){var a=y.createElement("div");if(null==k.deleteExpando){k.deleteExpando=!0;try{delete a.test}catch(b){k.deleteExpando=!1}}a=null}(),m.acceptData=function(a){var b=m.noData[(a.nodeName+" ").toLowerCase()],c=+a.nodeType||1;return 1!==c&&9!==c?!1:!b||b!==!0&&a.getAttribute("classid")===b};var M=/^(?:\{[\w\W]*\}|\[[\w\W]*\])$/,N=/([A-Z])/g;function O(a,b,c){if(void 0===c&&1===a.nodeType){var d="data-"+b.replace(N,"-$1").toLowerCase();if(c=a.getAttribute(d),"string"==typeof c){try{c="true"===c?!0:"false"===c?!1:"null"===c?null:+c+""===c?+c:M.test(c)?m.parseJSON(c):c}catch(e){}m.data(a,b,c)}else c=void 0}return c}function P(a){var b;for(b in a)if(("data"!==b||!m.isEmptyObject(a[b]))&&"toJSON"!==b)return!1;return!0}function Q(a,b,d,e){if(m.acceptData(a)){var f,g,h=m.expando,i=a.nodeType,j=i?m.cache:a,k=i?a[h]:a[h]&&h;
-if(k&&j[k]&&(e||j[k].data)||void 0!==d||"string"!=typeof b)return k||(k=i?a[h]=c.pop()||m.guid++:h),j[k]||(j[k]=i?{}:{toJSON:m.noop}),("object"==typeof b||"function"==typeof b)&&(e?j[k]=m.extend(j[k],b):j[k].data=m.extend(j[k].data,b)),g=j[k],e||(g.data||(g.data={}),g=g.data),void 0!==d&&(g[m.camelCase(b)]=d),"string"==typeof b?(f=g[b],null==f&&(f=g[m.camelCase(b)])):f=g,f}}function R(a,b,c){if(m.acceptData(a)){var d,e,f=a.nodeType,g=f?m.cache:a,h=f?a[m.expando]:m.expando;if(g[h]){if(b&&(d=c?g[h]:g[h].data)){m.isArray(b)?b=b.concat(m.map(b,m.camelCase)):b in d?b=[b]:(b=m.camelCase(b),b=b in d?[b]:b.split(" ")),e=b.length;while(e--)delete d[b[e]];if(c?!P(d):!m.isEmptyObject(d))return}(c||(delete g[h].data,P(g[h])))&&(f?m.cleanData([a],!0):k.deleteExpando||g!=g.window?delete g[h]:g[h]=null)}}}m.extend({cache:{},noData:{"applet ":!0,"embed ":!0,"object ":"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000"},hasData:function(a){return a=a.nodeType?m.cache[a[m.expando]]:a[m.expando],!!a&&!P(a)},data:function(a,b,c){return Q(a,b,c)},removeData:function(a,b){return R(a,b)},_data:function(a,b,c){return Q(a,b,c,!0)},_removeData:function(a,b){return R(a,b,!0)}}),m.fn.extend({data:function(a,b){var c,d,e,f=this[0],g=f&&f.attributes;if(void 0===a){if(this.length&&(e=m.data(f),1===f.nodeType&&!m._data(f,"parsedAttrs"))){c=g.length;while(c--)g[c]&&(d=g[c].name,0===d.indexOf("data-")&&(d=m.camelCase(d.slice(5)),O(f,d,e[d])));m._data(f,"parsedAttrs",!0)}return e}return"object"==typeof a?this.each(function(){m.data(this,a)}):arguments.length>1?this.each(function(){m.data(this,a,b)}):f?O(f,a,m.data(f,a)):void 0},removeData:function(a){return this.each(function(){m.removeData(this,a)})}}),m.extend({queue:function(a,b,c){var d;return a?(b=(b||"fx")+"queue",d=m._data(a,b),c&&(!d||m.isArray(c)?d=m._data(a,b,m.makeArray(c)):d.push(c)),d||[]):void 0},dequeue:function(a,b){b=b||"fx";var c=m.queue(a,b),d=c.length,e=c.shift(),f=m._queueHooks(a,b),g=function(){m.dequeue(a,b)};"inprogress"===e&&(e=c.shift(),d--),e&&("fx"===b&&c.unshift("inprogress"),delete f.stop,e.call(a,g,f)),!d&&f&&f.empty.fire()},_queueHooks:function(a,b){var c=b+"queueHooks";return m._data(a,c)||m._data(a,c,{empty:m.Callbacks("once memory").add(function(){m._removeData(a,b+"queue"),m._removeData(a,c)})})}}),m.fn.extend({queue:function(a,b){var c=2;return"string"!=typeof a&&(b=a,a="fx",c--),arguments.length<c?m.queue(this[0],a):void 0===b?this:this.each(function(){var c=m.queue(this,a,b);m._queueHooks(this,a),"fx"===a&&"inprogress"!==c[0]&&m.dequeue(this,a)})},dequeue:function(a){return this.each(function(){m.dequeue(this,a)})},clearQueue:function(a){return this.queue(a||"fx",[])},promise:function(a,b){var c,d=1,e=m.Deferred(),f=this,g=this.length,h=function(){--d||e.resolveWith(f,[f])};"string"!=typeof a&&(b=a,a=void 0),a=a||"fx";while(g--)c=m._data(f[g],a+"queueHooks"),c&&c.empty&&(d++,c.empty.add(h));return h(),e.promise(b)}});var S=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,T=["Top","Right","Bottom","Left"],U=function(a,b){return a=b||a,"none"===m.css(a,"display")||!m.contains(a.ownerDocument,a)},V=m.access=function(a,b,c,d,e,f,g){var h=0,i=a.length,j=null==c;if("object"===m.type(c)){e=!0;for(h in c)m.access(a,b,h,c[h],!0,f,g)}else if(void 0!==d&&(e=!0,m.isFunction(d)||(g=!0),j&&(g?(b.call(a,d),b=null):(j=b,b=function(a,b,c){return j.call(m(a),c)})),b))for(;i>h;h++)b(a[h],c,g?d:d.call(a[h],h,b(a[h],c)));return e?a:j?b.call(a):i?b(a[0],c):f},W=/^(?:checkbox|radio)$/i;!function(){var a=y.createElement("input"),b=y.createElement("div"),c=y.createDocumentFragment();if(b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",k.leadingWhitespace=3===b.firstChild.nodeType,k.tbody=!b.getElementsByTagName("tbody").length,k.htmlSerialize=!!b.getElementsByTagName("link").length,k.html5Clone="<:nav></:nav>"!==y.createElement("nav").cloneNode(!0).outerHTML,a.type="checkbox",a.checked=!0,c.appendChild(a),k.appendChecked=a.checked,b.innerHTML="<textarea>x</textarea>",k.noCloneChecked=!!b.cloneNode(!0).lastChild.defaultValue,c.appendChild(b),b.innerHTML="<input type='radio' checked='checked' name='t'/>",k.checkClone=b.cloneNode(!0).cloneNode(!0).lastChild.checked,k.noCloneEvent=!0,b.attachEvent&&(b.attachEvent("onclick",function(){k.noCloneEvent=!1}),b.cloneNode(!0).click()),null==k.deleteExpando){k.deleteExpando=!0;try{delete b.test}catch(d){k.deleteExpando=!1}}}(),function(){var b,c,d=y.createElement("div");for(b in{submit:!0,change:!0,focusin:!0})c="on"+b,(k[b+"Bubbles"]=c in a)||(d.setAttribute(c,"t"),k[b+"Bubbles"]=d.attributes[c].expando===!1);d=null}();var X=/^(?:input|select|textarea)$/i,Y=/^key/,Z=/^(?:mouse|pointer|contextmenu)|click/,$=/^(?:focusinfocus|focusoutblur)$/,_=/^([^.]*)(?:\.(.+)|)$/;function ab(){return!0}function bb(){return!1}function cb(){try{return y.activeElement}catch(a){}}m.event={global:{},add:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m._data(a);if(r){c.handler&&(i=c,c=i.handler,e=i.selector),c.guid||(c.guid=m.guid++),(g=r.events)||(g=r.events={}),(k=r.handle)||(k=r.handle=function(a){return typeof m===K||a&&m.event.triggered===a.type?void 0:m.event.dispatch.apply(k.elem,arguments)},k.elem=a),b=(b||"").match(E)||[""],h=b.length;while(h--)f=_.exec(b[h])||[],o=q=f[1],p=(f[2]||"").split(".").sort(),o&&(j=m.event.special[o]||{},o=(e?j.delegateType:j.bindType)||o,j=m.event.special[o]||{},l=m.extend({type:o,origType:q,data:d,handler:c,guid:c.guid,selector:e,needsContext:e&&m.expr.match.needsContext.test(e),namespace:p.join(".")},i),(n=g[o])||(n=g[o]=[],n.delegateCount=0,j.setup&&j.setup.call(a,d,p,k)!==!1||(a.addEventListener?a.addEventListener(o,k,!1):a.attachEvent&&a.attachEvent("on"+o,k))),j.add&&(j.add.call(a,l),l.handler.guid||(l.handler.guid=c.guid)),e?n.splice(n.delegateCount++,0,l):n.push(l),m.event.global[o]=!0);a=null}},remove:function(a,b,c,d,e){var f,g,h,i,j,k,l,n,o,p,q,r=m.hasData(a)&&m._data(a);if(r&&(k=r.events)){b=(b||"").match(E)||[""],j=b.length;while(j--)if(h=_.exec(b[j])||[],o=q=h[1],p=(h[2]||"").split(".").sort(),o){l=m.event.special[o]||{},o=(d?l.delegateType:l.bindType)||o,n=k[o]||[],h=h[2]&&new RegExp("(^|\\.)"+p.join("\\.(?:.*\\.|)")+"(\\.|$)"),i=f=n.length;while(f--)g=n[f],!e&&q!==g.origType||c&&c.guid!==g.guid||h&&!h.test(g.namespace)||d&&d!==g.selector&&("**"!==d||!g.selector)||(n.splice(f,1),g.selector&&n.delegateCount--,l.remove&&l.remove.call(a,g));i&&!n.length&&(l.teardown&&l.teardown.call(a,p,r.handle)!==!1||m.removeEvent(a,o,r.handle),delete k[o])}else for(o in k)m.event.remove(a,o+b[j],c,d,!0);m.isEmptyObject(k)&&(delete r.handle,m._removeData(a,"events"))}},trigger:function(b,c,d,e){var f,g,h,i,k,l,n,o=[d||y],p=j.call(b,"type")?b.type:b,q=j.call(b,"namespace")?b.namespace.split("."):[];if(h=l=d=d||y,3!==d.nodeType&&8!==d.nodeType&&!$.test(p+m.event.triggered)&&(p.indexOf(".")>=0&&(q=p.split("."),p=q.shift(),q.sort()),g=p.indexOf(":")<0&&"on"+p,b=b[m.expando]?b:new m.Event(p,"object"==typeof b&&b),b.isTrigger=e?2:3,b.namespace=q.join("."),b.namespace_re=b.namespace?new RegExp("(^|\\.)"+q.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,b.result=void 0,b.target||(b.target=d),c=null==c?[b]:m.makeArray(c,[b]),k=m.event.special[p]||{},e||!k.trigger||k.trigger.apply(d,c)!==!1)){if(!e&&!k.noBubble&&!m.isWindow(d)){for(i=k.delegateType||p,$.test(i+p)||(h=h.parentNode);h;h=h.parentNode)o.push(h),l=h;l===(d.ownerDocument||y)&&o.push(l.defaultView||l.parentWindow||a)}n=0;while((h=o[n++])&&!b.isPropagationStopped())b.type=n>1?i:k.bindType||p,f=(m._data(h,"events")||{})[b.type]&&m._data(h,"handle"),f&&f.apply(h,c),f=g&&h[g],f&&f.apply&&m.acceptData(h)&&(b.result=f.apply(h,c),b.result===!1&&b.preventDefault());if(b.type=p,!e&&!b.isDefaultPrevented()&&(!k._default||k._default.apply(o.pop(),c)===!1)&&m.acceptData(d)&&g&&d[p]&&!m.isWindow(d)){l=d[g],l&&(d[g]=null),m.event.triggered=p;try{d[p]()}catch(r){}m.event.triggered=void 0,l&&(d[g]=l)}return b.result}},dispatch:function(a){a=m.event.fix(a);var b,c,e,f,g,h=[],i=d.call(arguments),j=(m._data(this,"events")||{})[a.type]||[],k=m.event.special[a.type]||{};if(i[0]=a,a.delegateTarget=this,!k.preDispatch||k.preDispatch.call(this,a)!==!1){h=m.event.handlers.call(this,a,j),b=0;while((f=h[b++])&&!a.isPropagationStopped()){a.currentTarget=f.elem,g=0;while((e=f.handlers[g++])&&!a.isImmediatePropagationStopped())(!a.namespace_re||a.namespace_re.test(e.namespace))&&(a.handleObj=e,a.data=e.data,c=((m.event.special[e.origType]||{}).handle||e.handler).apply(f.elem,i),void 0!==c&&(a.result=c)===!1&&(a.preventDefault(),a.stopPropagation()))}return k.postDispatch&&k.postDispatch.call(this,a),a.result}},handlers:function(a,b){var c,d,e,f,g=[],h=b.delegateCount,i=a.target;if(h&&i.nodeType&&(!a.button||"click"!==a.type))for(;i!=this;i=i.parentNode||this)if(1===i.nodeType&&(i.disabled!==!0||"click"!==a.type)){for(e=[],f=0;h>f;f++)d=b[f],c=d.selector+" ",void 0===e[c]&&(e[c]=d.needsContext?m(c,this).index(i)>=0:m.find(c,this,null,[i]).length),e[c]&&e.push(d);e.length&&g.push({elem:i,handlers:e})}return h<b.length&&g.push({elem:this,handlers:b.slice(h)}),g},fix:function(a){if(a[m.expando])return a;var b,c,d,e=a.type,f=a,g=this.fixHooks[e];g||(this.fixHooks[e]=g=Z.test(e)?this.mouseHooks:Y.test(e)?this.keyHooks:{}),d=g.props?this.props.concat(g.props):this.props,a=new m.Event(f),b=d.length;while(b--)c=d[b],a[c]=f[c];return a.target||(a.target=f.srcElement||y),3===a.target.nodeType&&(a.target=a.target.parentNode),a.metaKey=!!a.metaKey,g.filter?g.filter(a,f):a},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(a,b){return null==a.which&&(a.which=null!=b.charCode?b.charCode:b.keyCode),a}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(a,b){var c,d,e,f=b.button,g=b.fromElement;return null==a.pageX&&null!=b.clientX&&(d=a.target.ownerDocument||y,e=d.documentElement,c=d.body,a.pageX=b.clientX+(e&&e.scrollLeft||c&&c.scrollLeft||0)-(e&&e.clientLeft||c&&c.clientLeft||0),a.pageY=b.clientY+(e&&e.scrollTop||c&&c.scrollTop||0)-(e&&e.clientTop||c&&c.clientTop||0)),!a.relatedTarget&&g&&(a.relatedTarget=g===a.target?b.toElement:g),a.which||void 0===f||(a.which=1&f?1:2&f?3:4&f?2:0),a}},special:{load:{noBubble:!0},focus:{trigger:function(){if(this!==cb()&&this.focus)try{return this.focus(),!1}catch(a){}},delegateType:"focusin"},blur:{trigger:function(){return this===cb()&&this.blur?(this.blur(),!1):void 0},delegateType:"focusout"},click:{trigger:function(){return m.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):void 0},_default:function(a){return m.nodeName(a.target,"a")}},beforeunload:{postDispatch:function(a){void 0!==a.result&&a.originalEvent&&(a.originalEvent.returnValue=a.result)}}},simulate:function(a,b,c,d){var e=m.extend(new m.Event,c,{type:a,isSimulated:!0,originalEvent:{}});d?m.event.trigger(e,null,b):m.event.dispatch.call(b,e),e.isDefaultPrevented()&&c.preventDefault()}},m.removeEvent=y.removeEventListener?function(a,b,c){a.removeEventListener&&a.removeEventListener(b,c,!1)}:function(a,b,c){var d="on"+b;a.detachEvent&&(typeof a[d]===K&&(a[d]=null),a.detachEvent(d,c))},m.Event=function(a,b){return this instanceof m.Event?(a&&a.type?(this.originalEvent=a,this.type=a.type,this.isDefaultPrevented=a.defaultPrevented||void 0===a.defaultPrevented&&a.returnValue===!1?ab:bb):this.type=a,b&&m.extend(this,b),this.timeStamp=a&&a.timeStamp||m.now(),void(this[m.expando]=!0)):new m.Event(a,b)},m.Event.prototype={isDefaultPrevented:bb,isPropagationStopped:bb,isImmediatePropagationStopped:bb,preventDefault:function(){var a=this.originalEvent;this.isDefaultPrevented=ab,a&&(a.preventDefault?a.preventDefault():a.returnValue=!1)},stopPropagation:function(){var a=this.originalEvent;this.isPropagationStopped=ab,a&&(a.stopPropagation&&a.stopPropagation(),a.cancelBubble=!0)},stopImmediatePropagation:function(){var a=this.originalEvent;this.isImmediatePropagationStopped=ab,a&&a.stopImmediatePropagation&&a.stopImmediatePropagation(),this.stopPropagation()}},m.each({mouseenter:"mouseover",mouseleave:"mouseout",pointerenter:"pointerover",pointerleave:"pointerout"},function(a,b){m.event.special[a]={delegateType:b,bindType:b,handle:function(a){var c,d=this,e=a.relatedTarget,f=a.handleObj;return(!e||e!==d&&!m.contains(d,e))&&(a.type=f.origType,c=f.handler.apply(this,arguments),a.type=b),c}}}),k.submitBubbles||(m.event.special.submit={setup:function(){return m.nodeName(this,"form")?!1:void m.event.add(this,"click._submit keypress._submit",function(a){var b=a.target,c=m.nodeName(b,"input")||m.nodeName(b,"button")?b.form:void 0;c&&!m._data(c,"submitBubbles")&&(m.event.add(c,"submit._submit",function(a){a._submit_bubble=!0}),m._data(c,"submitBubbles",!0))})},postDispatch:function(a){a._submit_bubble&&(delete a._submit_bubble,this.parentNode&&!a.isTrigger&&m.event.simulate("submit",this.parentNode,a,!0))},teardown:function(){return m.nodeName(this,"form")?!1:void m.event.remove(this,"._submit")}}),k.changeBubbles||(m.event.special.change={setup:function(){return X.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(m.event.add(this,"propertychange._change",function(a){"checked"===a.originalEvent.propertyName&&(this._just_changed=!0)}),m.event.add(this,"click._change",function(a){this._just_changed&&!a.isTrigger&&(this._just_changed=!1),m.event.simulate("change",this,a,!0)})),!1):void m.event.add(this,"beforeactivate._change",function(a){var b=a.target;X.test(b.nodeName)&&!m._data(b,"changeBubbles")&&(m.event.add(b,"change._change",function(a){!this.parentNode||a.isSimulated||a.isTrigger||m.event.simulate("change",this.parentNode,a,!0)}),m._data(b,"changeBubbles",!0))})},handle:function(a){var b=a.target;return this!==b||a.isSimulated||a.isTrigger||"radio"!==b.type&&"checkbox"!==b.type?a.handleObj.handler.apply(this,arguments):void 0},teardown:function(){return m.event.remove(this,"._change"),!X.test(this.nodeName)}}),k.focusinBubbles||m.each({focus:"focusin",blur:"focusout"},function(a,b){var c=function(a){m.event.simulate(b,a.target,m.event.fix(a),!0)};m.event.special[b]={setup:function(){var d=this.ownerDocument||this,e=m._data(d,b);e||d.addEventListener(a,c,!0),m._data(d,b,(e||0)+1)},teardown:function(){var d=this.ownerDocument||this,e=m._data(d,b)-1;e?m._data(d,b,e):(d.removeEventListener(a,c,!0),m._removeData(d,b))}}}),m.fn.extend({on:function(a,b,c,d,e){var f,g;if("object"==typeof a){"string"!=typeof b&&(c=c||b,b=void 0);for(f in a)this.on(f,b,c,a[f],e);return this}if(null==c&&null==d?(d=b,c=b=void 0):null==d&&("string"==typeof b?(d=c,c=void 0):(d=c,c=b,b=void 0)),d===!1)d=bb;else if(!d)return this;return 1===e&&(g=d,d=function(a){return m().off(a),g.apply(this,arguments)},d.guid=g.guid||(g.guid=m.guid++)),this.each(function(){m.event.add(this,a,d,c,b)})},one:function(a,b,c,d){return this.on(a,b,c,d,1)},off:function(a,b,c){var d,e;if(a&&a.preventDefault&&a.handleObj)return d=a.handleObj,m(a.delegateTarget).off(d.namespace?d.origType+"."+d.namespace:d.origType,d.selector,d.handler),this;if("object"==typeof a){for(e in a)this.off(e,b,a[e]);return this}return(b===!1||"function"==typeof b)&&(c=b,b=void 0),c===!1&&(c=bb),this.each(function(){m.event.remove(this,a,c,b)})},trigger:function(a,b){return this.each(function(){m.event.trigger(a,b,this)})},triggerHandler:function(a,b){var c=this[0];return c?m.event.trigger(a,b,c,!0):void 0}});function db(a){var b=eb.split("|"),c=a.createDocumentFragment();if(c.createElement)while(b.length)c.createElement(b.pop());return c}var eb="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",fb=/ jQuery\d+="(?:null|\d+)"/g,gb=new RegExp("<(?:"+eb+")[\\s/>]","i"),hb=/^\s+/,ib=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,jb=/<([\w:]+)/,kb=/<tbody/i,lb=/<|&#?\w+;/,mb=/<(?:script|style|link)/i,nb=/checked\s*(?:[^=]|=\s*.checked.)/i,ob=/^$|\/(?:java|ecma)script/i,pb=/^true\/(.*)/,qb=/^\s*<!(?:\[CDATA\[|--)|(?:\]\]|--)>\s*$/g,rb={option:[1,"<select multiple='multiple'>","</select>"],legend:[1,"<fieldset>","</fieldset>"],area:[1,"<map>","</map>"],param:[1,"<object>","</object>"],thead:[1,"<table>","</table>"],tr:[2,"<table><tbody>","</tbody></table>"],col:[2,"<table><tbody></tbody><colgroup>","</colgroup></table>"],td:[3,"<table><tbody><tr>","</tr></tbody></table>"],_default:k.htmlSerialize?[0,"",""]:[1,"X<div>","</div>"]},sb=db(y),tb=sb.appendChild(y.createElement("div"));rb.optgroup=rb.option,rb.tbody=rb.tfoot=rb.colgroup=rb.caption=rb.thead,rb.th=rb.td;function ub(a,b){var c,d,e=0,f=typeof a.getElementsByTagName!==K?a.getElementsByTagName(b||"*"):typeof a.querySelectorAll!==K?a.querySelectorAll(b||"*"):void 0;if(!f)for(f=[],c=a.childNodes||a;null!=(d=c[e]);e++)!b||m.nodeName(d,b)?f.push(d):m.merge(f,ub(d,b));return void 0===b||b&&m.nodeName(a,b)?m.merge([a],f):f}function vb(a){W.test(a.type)&&(a.defaultChecked=a.checked)}function wb(a,b){return m.nodeName(a,"table")&&m.nodeName(11!==b.nodeType?b:b.firstChild,"tr")?a.getElementsByTagName("tbody")[0]||a.appendChild(a.ownerDocument.createElement("tbody")):a}function xb(a){return a.type=(null!==m.find.attr(a,"type"))+"/"+a.type,a}function yb(a){var b=pb.exec(a.type);return b?a.type=b[1]:a.removeAttribute("type"),a}function zb(a,b){for(var c,d=0;null!=(c=a[d]);d++)m._data(c,"globalEval",!b||m._data(b[d],"globalEval"))}function Ab(a,b){if(1===b.nodeType&&m.hasData(a)){var c,d,e,f=m._data(a),g=m._data(b,f),h=f.events;if(h){delete g.handle,g.events={};for(c in h)for(d=0,e=h[c].length;e>d;d++)m.event.add(b,c,h[c][d])}g.data&&(g.data=m.extend({},g.data))}}function Bb(a,b){var c,d,e;if(1===b.nodeType){if(c=b.nodeName.toLowerCase(),!k.noCloneEvent&&b[m.expando]){e=m._data(b);for(d in e.events)m.removeEvent(b,d,e.handle);b.removeAttribute(m.expando)}"script"===c&&b.text!==a.text?(xb(b).text=a.text,yb(b)):"object"===c?(b.parentNode&&(b.outerHTML=a.outerHTML),k.html5Clone&&a.innerHTML&&!m.trim(b.innerHTML)&&(b.innerHTML=a.innerHTML)):"input"===c&&W.test(a.type)?(b.defaultChecked=b.checked=a.checked,b.value!==a.value&&(b.value=a.value)):"option"===c?b.defaultSelected=b.selected=a.defaultSelected:("input"===c||"textarea"===c)&&(b.defaultValue=a.defaultValue)}}m.extend({clone:function(a,b,c){var d,e,f,g,h,i=m.contains(a.ownerDocument,a);if(k.html5Clone||m.isXMLDoc(a)||!gb.test("<"+a.nodeName+">")?f=a.cloneNode(!0):(tb.innerHTML=a.outerHTML,tb.removeChild(f=tb.firstChild)),!(k.noCloneEvent&&k.noCloneChecked||1!==a.nodeType&&11!==a.nodeType||m.isXMLDoc(a)))for(d=ub(f),h=ub(a),g=0;null!=(e=h[g]);++g)d[g]&&Bb(e,d[g]);if(b)if(c)for(h=h||ub(a),d=d||ub(f),g=0;null!=(e=h[g]);g++)Ab(e,d[g]);else Ab(a,f);return d=ub(f,"script"),d.length>0&&zb(d,!i&&ub(a,"script")),d=h=e=null,f},buildFragment:function(a,b,c,d){for(var e,f,g,h,i,j,l,n=a.length,o=db(b),p=[],q=0;n>q;q++)if(f=a[q],f||0===f)if("object"===m.type(f))m.merge(p,f.nodeType?[f]:f);else if(lb.test(f)){h=h||o.appendChild(b.createElement("div")),i=(jb.exec(f)||["",""])[1].toLowerCase(),l=rb[i]||rb._default,h.innerHTML=l[1]+f.replace(ib,"<$1></$2>")+l[2],e=l[0];while(e--)h=h.lastChild;if(!k.leadingWhitespace&&hb.test(f)&&p.push(b.createTextNode(hb.exec(f)[0])),!k.tbody){f="table"!==i||kb.test(f)?"<table>"!==l[1]||kb.test(f)?0:h:h.firstChild,e=f&&f.childNodes.length;while(e--)m.nodeName(j=f.childNodes[e],"tbody")&&!j.childNodes.length&&f.removeChild(j)}m.merge(p,h.childNodes),h.textContent="";while(h.firstChild)h.removeChild(h.firstChild);h=o.lastChild}else p.push(b.createTextNode(f));h&&o.removeChild(h),k.appendChecked||m.grep(ub(p,"input"),vb),q=0;while(f=p[q++])if((!d||-1===m.inArray(f,d))&&(g=m.contains(f.ownerDocument,f),h=ub(o.appendChild(f),"script"),g&&zb(h),c)){e=0;while(f=h[e++])ob.test(f.type||"")&&c.push(f)}return h=null,o},cleanData:function(a,b){for(var d,e,f,g,h=0,i=m.expando,j=m.cache,l=k.deleteExpando,n=m.event.special;null!=(d=a[h]);h++)if((b||m.acceptData(d))&&(f=d[i],g=f&&j[f])){if(g.events)for(e in g.events)n[e]?m.event.remove(d,e):m.removeEvent(d,e,g.handle);j[f]&&(delete j[f],l?delete d[i]:typeof d.removeAttribute!==K?d.removeAttribute(i):d[i]=null,c.push(f))}}}),m.fn.extend({text:function(a){return V(this,function(a){return void 0===a?m.text(this):this.empty().append((this[0]&&this[0].ownerDocument||y).createTextNode(a))},null,a,arguments.length)},append:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.appendChild(a)}})},prepend:function(){return this.domManip(arguments,function(a){if(1===this.nodeType||11===this.nodeType||9===this.nodeType){var b=wb(this,a);b.insertBefore(a,b.firstChild)}})},before:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this)})},after:function(){return this.domManip(arguments,function(a){this.parentNode&&this.parentNode.insertBefore(a,this.nextSibling)})},remove:function(a,b){for(var c,d=a?m.filter(a,this):this,e=0;null!=(c=d[e]);e++)b||1!==c.nodeType||m.cleanData(ub(c)),c.parentNode&&(b&&m.contains(c.ownerDocument,c)&&zb(ub(c,"script")),c.parentNode.removeChild(c));return this},empty:function(){for(var a,b=0;null!=(a=this[b]);b++){1===a.nodeType&&m.cleanData(ub(a,!1));while(a.firstChild)a.removeChild(a.firstChild);a.options&&m.nodeName(a,"select")&&(a.options.length=0)}return this},clone:function(a,b){return a=null==a?!1:a,b=null==b?a:b,this.map(function(){return m.clone(this,a,b)})},html:function(a){return V(this,function(a){var b=this[0]||{},c=0,d=this.length;if(void 0===a)return 1===b.nodeType?b.innerHTML.replace(fb,""):void 0;if(!("string"!=typeof a||mb.test(a)||!k.htmlSerialize&&gb.test(a)||!k.leadingWhitespace&&hb.test(a)||rb[(jb.exec(a)||["",""])[1].toLowerCase()])){a=a.replace(ib,"<$1></$2>");try{for(;d>c;c++)b=this[c]||{},1===b.nodeType&&(m.cleanData(ub(b,!1)),b.innerHTML=a);b=0}catch(e){}}b&&this.empty().append(a)},null,a,arguments.length)},replaceWith:function(){var a=arguments[0];return this.domManip(arguments,function(b){a=this.parentNode,m.cleanData(ub(this)),a&&a.replaceChild(b,this)}),a&&(a.length||a.nodeType)?this:this.remove()},detach:function(a){return this.remove(a,!0)},domManip:function(a,b){a=e.apply([],a);var c,d,f,g,h,i,j=0,l=this.length,n=this,o=l-1,p=a[0],q=m.isFunction(p);if(q||l>1&&"string"==typeof p&&!k.checkClone&&nb.test(p))return this.each(function(c){var d=n.eq(c);q&&(a[0]=p.call(this,c,d.html())),d.domManip(a,b)});if(l&&(i=m.buildFragment(a,this[0].ownerDocument,!1,this),c=i.firstChild,1===i.childNodes.length&&(i=c),c)){for(g=m.map(ub(i,"script"),xb),f=g.length;l>j;j++)d=i,j!==o&&(d=m.clone(d,!0,!0),f&&m.merge(g,ub(d,"script"))),b.call(this[j],d,j);if(f)for(h=g[g.length-1].ownerDocument,m.map(g,yb),j=0;f>j;j++)d=g[j],ob.test(d.type||"")&&!m._data(d,"globalEval")&&m.contains(h,d)&&(d.src?m._evalUrl&&m._evalUrl(d.src):m.globalEval((d.text||d.textContent||d.innerHTML||"").replace(qb,"")));i=c=null}return this}}),m.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(a,b){m.fn[a]=function(a){for(var c,d=0,e=[],g=m(a),h=g.length-1;h>=d;d++)c=d===h?this:this.clone(!0),m(g[d])[b](c),f.apply(e,c.get());return this.pushStack(e)}});var Cb,Db={};function Eb(b,c){var d,e=m(c.createElement(b)).appendTo(c.body),f=a.getDefaultComputedStyle&&(d=a.getDefaultComputedStyle(e[0]))?d.display:m.css(e[0],"display");return e.detach(),f}function Fb(a){var b=y,c=Db[a];return c||(c=Eb(a,b),"none"!==c&&c||(Cb=(Cb||m("<iframe frameborder='0' width='0' height='0'/>")).appendTo(b.documentElement),b=(Cb[0].contentWindow||Cb[0].contentDocument).document,b.write(),b.close(),c=Eb(a,b),Cb.detach()),Db[a]=c),c}!function(){var a;k.shrinkWrapBlocks=function(){if(null!=a)return a;a=!1;var b,c,d;return c=y.getElementsByTagName("body")[0],c&&c.style?(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),typeof b.style.zoom!==K&&(b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:1px;width:1px;zoom:1",b.appendChild(y.createElement("div")).style.width="5px",a=3!==b.offsetWidth),c.removeChild(d),a):void 0}}();var Gb=/^margin/,Hb=new RegExp("^("+S+")(?!px)[a-z%]+$","i"),Ib,Jb,Kb=/^(top|right|bottom|left)$/;a.getComputedStyle?(Ib=function(a){return a.ownerDocument.defaultView.getComputedStyle(a,null)},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c.getPropertyValue(b)||c[b]:void 0,c&&(""!==g||m.contains(a.ownerDocument,a)||(g=m.style(a,b)),Hb.test(g)&&Gb.test(b)&&(d=h.width,e=h.minWidth,f=h.maxWidth,h.minWidth=h.maxWidth=h.width=g,g=c.width,h.width=d,h.minWidth=e,h.maxWidth=f)),void 0===g?g:g+""}):y.documentElement.currentStyle&&(Ib=function(a){return a.currentStyle},Jb=function(a,b,c){var d,e,f,g,h=a.style;return c=c||Ib(a),g=c?c[b]:void 0,null==g&&h&&h[b]&&(g=h[b]),Hb.test(g)&&!Kb.test(b)&&(d=h.left,e=a.runtimeStyle,f=e&&e.left,f&&(e.left=a.currentStyle.left),h.left="fontSize"===b?"1em":g,g=h.pixelLeft+"px",h.left=d,f&&(e.left=f)),void 0===g?g:g+""||"auto"});function Lb(a,b){return{get:function(){var c=a();if(null!=c)return c?void delete this.get:(this.get=b).apply(this,arguments)}}}!function(){var b,c,d,e,f,g,h;if(b=y.createElement("div"),b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=d&&d.style){c.cssText="float:left;opacity:.5",k.opacity="0.5"===c.opacity,k.cssFloat=!!c.cssFloat,b.style.backgroundClip="content-box",b.cloneNode(!0).style.backgroundClip="",k.clearCloneStyle="content-box"===b.style.backgroundClip,k.boxSizing=""===c.boxSizing||""===c.MozBoxSizing||""===c.WebkitBoxSizing,m.extend(k,{reliableHiddenOffsets:function(){return null==g&&i(),g},boxSizingReliable:function(){return null==f&&i(),f},pixelPosition:function(){return null==e&&i(),e},reliableMarginRight:function(){return null==h&&i(),h}});function i(){var b,c,d,i;c=y.getElementsByTagName("body")[0],c&&c.style&&(b=y.createElement("div"),d=y.createElement("div"),d.style.cssText="position:absolute;border:0;width:0;height:0;top:0;left:-9999px",c.appendChild(d).appendChild(b),b.style.cssText="-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;display:block;margin-top:1%;top:1%;border:1px;padding:1px;width:4px;position:absolute",e=f=!1,h=!0,a.getComputedStyle&&(e="1%"!==(a.getComputedStyle(b,null)||{}).top,f="4px"===(a.getComputedStyle(b,null)||{width:"4px"}).width,i=b.appendChild(y.createElement("div")),i.style.cssText=b.style.cssText="-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;display:block;margin:0;border:0;padding:0",i.style.marginRight=i.style.width="0",b.style.width="1px",h=!parseFloat((a.getComputedStyle(i,null)||{}).marginRight)),b.innerHTML="<table><tr><td></td><td>t</td></tr></table>",i=b.getElementsByTagName("td"),i[0].style.cssText="margin:0;border:0;padding:0;display:none",g=0===i[0].offsetHeight,g&&(i[0].style.display="",i[1].style.display="none",g=0===i[0].offsetHeight),c.removeChild(d))}}}(),m.swap=function(a,b,c,d){var e,f,g={};for(f in b)g[f]=a.style[f],a.style[f]=b[f];e=c.apply(a,d||[]);for(f in b)a.style[f]=g[f];return e};var Mb=/alpha\([^)]*\)/i,Nb=/opacity\s*=\s*([^)]*)/,Ob=/^(none|table(?!-c[ea]).+)/,Pb=new RegExp("^("+S+")(.*)$","i"),Qb=new RegExp("^([+-])=("+S+")","i"),Rb={position:"absolute",visibility:"hidden",display:"block"},Sb={letterSpacing:"0",fontWeight:"400"},Tb=["Webkit","O","Moz","ms"];function Ub(a,b){if(b in a)return b;var c=b.charAt(0).toUpperCase()+b.slice(1),d=b,e=Tb.length;while(e--)if(b=Tb[e]+c,b in a)return b;return d}function Vb(a,b){for(var c,d,e,f=[],g=0,h=a.length;h>g;g++)d=a[g],d.style&&(f[g]=m._data(d,"olddisplay"),c=d.style.display,b?(f[g]||"none"!==c||(d.style.display=""),""===d.style.display&&U(d)&&(f[g]=m._data(d,"olddisplay",Fb(d.nodeName)))):(e=U(d),(c&&"none"!==c||!e)&&m._data(d,"olddisplay",e?c:m.css(d,"display"))));for(g=0;h>g;g++)d=a[g],d.style&&(b&&"none"!==d.style.display&&""!==d.style.display||(d.style.display=b?f[g]||"":"none"));return a}function Wb(a,b,c){var d=Pb.exec(b);return d?Math.max(0,d[1]-(c||0))+(d[2]||"px"):b}function Xb(a,b,c,d,e){for(var f=c===(d?"border":"content")?4:"width"===b?1:0,g=0;4>f;f+=2)"margin"===c&&(g+=m.css(a,c+T[f],!0,e)),d?("content"===c&&(g-=m.css(a,"padding"+T[f],!0,e)),"margin"!==c&&(g-=m.css(a,"border"+T[f]+"Width",!0,e))):(g+=m.css(a,"padding"+T[f],!0,e),"padding"!==c&&(g+=m.css(a,"border"+T[f]+"Width",!0,e)));return g}function Yb(a,b,c){var d=!0,e="width"===b?a.offsetWidth:a.offsetHeight,f=Ib(a),g=k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,f);if(0>=e||null==e){if(e=Jb(a,b,f),(0>e||null==e)&&(e=a.style[b]),Hb.test(e))return e;d=g&&(k.boxSizingReliable()||e===a.style[b]),e=parseFloat(e)||0}return e+Xb(a,b,c||(g?"border":"content"),d,f)+"px"}m.extend({cssHooks:{opacity:{get:function(a,b){if(b){var c=Jb(a,"opacity");return""===c?"1":c}}}},cssNumber:{columnCount:!0,fillOpacity:!0,flexGrow:!0,flexShrink:!0,fontWeight:!0,lineHeight:!0,opacity:!0,order:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":k.cssFloat?"cssFloat":"styleFloat"},style:function(a,b,c,d){if(a&&3!==a.nodeType&&8!==a.nodeType&&a.style){var e,f,g,h=m.camelCase(b),i=a.style;if(b=m.cssProps[h]||(m.cssProps[h]=Ub(i,h)),g=m.cssHooks[b]||m.cssHooks[h],void 0===c)return g&&"get"in g&&void 0!==(e=g.get(a,!1,d))?e:i[b];if(f=typeof c,"string"===f&&(e=Qb.exec(c))&&(c=(e[1]+1)*e[2]+parseFloat(m.css(a,b)),f="number"),null!=c&&c===c&&("number"!==f||m.cssNumber[h]||(c+="px"),k.clearCloneStyle||""!==c||0!==b.indexOf("background")||(i[b]="inherit"),!(g&&"set"in g&&void 0===(c=g.set(a,c,d)))))try{i[b]=c}catch(j){}}},css:function(a,b,c,d){var e,f,g,h=m.camelCase(b);return b=m.cssProps[h]||(m.cssProps[h]=Ub(a.style,h)),g=m.cssHooks[b]||m.cssHooks[h],g&&"get"in g&&(f=g.get(a,!0,c)),void 0===f&&(f=Jb(a,b,d)),"normal"===f&&b in Sb&&(f=Sb[b]),""===c||c?(e=parseFloat(f),c===!0||m.isNumeric(e)?e||0:f):f}}),m.each(["height","width"],function(a,b){m.cssHooks[b]={get:function(a,c,d){return c?Ob.test(m.css(a,"display"))&&0===a.offsetWidth?m.swap(a,Rb,function(){return Yb(a,b,d)}):Yb(a,b,d):void 0},set:function(a,c,d){var e=d&&Ib(a);return Wb(a,c,d?Xb(a,b,d,k.boxSizing&&"border-box"===m.css(a,"boxSizing",!1,e),e):0)}}}),k.opacity||(m.cssHooks.opacity={get:function(a,b){return Nb.test((b&&a.currentStyle?a.currentStyle.filter:a.style.filter)||"")?.01*parseFloat(RegExp.$1)+"":b?"1":""},set:function(a,b){var c=a.style,d=a.currentStyle,e=m.isNumeric(b)?"alpha(opacity="+100*b+")":"",f=d&&d.filter||c.filter||"";c.zoom=1,(b>=1||""===b)&&""===m.trim(f.replace(Mb,""))&&c.removeAttribute&&(c.removeAttribute("filter"),""===b||d&&!d.filter)||(c.filter=Mb.test(f)?f.replace(Mb,e):f+" "+e)}}),m.cssHooks.marginRight=Lb(k.reliableMarginRight,function(a,b){return b?m.swap(a,{display:"inline-block"},Jb,[a,"marginRight"]):void 0}),m.each({margin:"",padding:"",border:"Width"},function(a,b){m.cssHooks[a+b]={expand:function(c){for(var d=0,e={},f="string"==typeof c?c.split(" "):[c];4>d;d++)e[a+T[d]+b]=f[d]||f[d-2]||f[0];return e}},Gb.test(a)||(m.cssHooks[a+b].set=Wb)}),m.fn.extend({css:function(a,b){return V(this,function(a,b,c){var d,e,f={},g=0;if(m.isArray(b)){for(d=Ib(a),e=b.length;e>g;g++)f[b[g]]=m.css(a,b[g],!1,d);return f}return void 0!==c?m.style(a,b,c):m.css(a,b)},a,b,arguments.length>1)},show:function(){return Vb(this,!0)},hide:function(){return Vb(this)},toggle:function(a){return"boolean"==typeof a?a?this.show():this.hide():this.each(function(){U(this)?m(this).show():m(this).hide()})}});function Zb(a,b,c,d,e){return new Zb.prototype.init(a,b,c,d,e)}m.Tween=Zb,Zb.prototype={constructor:Zb,init:function(a,b,c,d,e,f){this.elem=a,this.prop=c,this.easing=e||"swing",this.options=b,this.start=this.now=this.cur(),this.end=d,this.unit=f||(m.cssNumber[c]?"":"px")
-},cur:function(){var a=Zb.propHooks[this.prop];return a&&a.get?a.get(this):Zb.propHooks._default.get(this)},run:function(a){var b,c=Zb.propHooks[this.prop];return this.pos=b=this.options.duration?m.easing[this.easing](a,this.options.duration*a,0,1,this.options.duration):a,this.now=(this.end-this.start)*b+this.start,this.options.step&&this.options.step.call(this.elem,this.now,this),c&&c.set?c.set(this):Zb.propHooks._default.set(this),this}},Zb.prototype.init.prototype=Zb.prototype,Zb.propHooks={_default:{get:function(a){var b;return null==a.elem[a.prop]||a.elem.style&&null!=a.elem.style[a.prop]?(b=m.css(a.elem,a.prop,""),b&&"auto"!==b?b:0):a.elem[a.prop]},set:function(a){m.fx.step[a.prop]?m.fx.step[a.prop](a):a.elem.style&&(null!=a.elem.style[m.cssProps[a.prop]]||m.cssHooks[a.prop])?m.style(a.elem,a.prop,a.now+a.unit):a.elem[a.prop]=a.now}}},Zb.propHooks.scrollTop=Zb.propHooks.scrollLeft={set:function(a){a.elem.nodeType&&a.elem.parentNode&&(a.elem[a.prop]=a.now)}},m.easing={linear:function(a){return a},swing:function(a){return.5-Math.cos(a*Math.PI)/2}},m.fx=Zb.prototype.init,m.fx.step={};var $b,_b,ac=/^(?:toggle|show|hide)$/,bc=new RegExp("^(?:([+-])=|)("+S+")([a-z%]*)$","i"),cc=/queueHooks$/,dc=[ic],ec={"*":[function(a,b){var c=this.createTween(a,b),d=c.cur(),e=bc.exec(b),f=e&&e[3]||(m.cssNumber[a]?"":"px"),g=(m.cssNumber[a]||"px"!==f&&+d)&&bc.exec(m.css(c.elem,a)),h=1,i=20;if(g&&g[3]!==f){f=f||g[3],e=e||[],g=+d||1;do h=h||".5",g/=h,m.style(c.elem,a,g+f);while(h!==(h=c.cur()/d)&&1!==h&&--i)}return e&&(g=c.start=+g||+d||0,c.unit=f,c.end=e[1]?g+(e[1]+1)*e[2]:+e[2]),c}]};function fc(){return setTimeout(function(){$b=void 0}),$b=m.now()}function gc(a,b){var c,d={height:a},e=0;for(b=b?1:0;4>e;e+=2-b)c=T[e],d["margin"+c]=d["padding"+c]=a;return b&&(d.opacity=d.width=a),d}function hc(a,b,c){for(var d,e=(ec[b]||[]).concat(ec["*"]),f=0,g=e.length;g>f;f++)if(d=e[f].call(c,b,a))return d}function ic(a,b,c){var d,e,f,g,h,i,j,l,n=this,o={},p=a.style,q=a.nodeType&&U(a),r=m._data(a,"fxshow");c.queue||(h=m._queueHooks(a,"fx"),null==h.unqueued&&(h.unqueued=0,i=h.empty.fire,h.empty.fire=function(){h.unqueued||i()}),h.unqueued++,n.always(function(){n.always(function(){h.unqueued--,m.queue(a,"fx").length||h.empty.fire()})})),1===a.nodeType&&("height"in b||"width"in b)&&(c.overflow=[p.overflow,p.overflowX,p.overflowY],j=m.css(a,"display"),l="none"===j?m._data(a,"olddisplay")||Fb(a.nodeName):j,"inline"===l&&"none"===m.css(a,"float")&&(k.inlineBlockNeedsLayout&&"inline"!==Fb(a.nodeName)?p.zoom=1:p.display="inline-block")),c.overflow&&(p.overflow="hidden",k.shrinkWrapBlocks()||n.always(function(){p.overflow=c.overflow[0],p.overflowX=c.overflow[1],p.overflowY=c.overflow[2]}));for(d in b)if(e=b[d],ac.exec(e)){if(delete b[d],f=f||"toggle"===e,e===(q?"hide":"show")){if("show"!==e||!r||void 0===r[d])continue;q=!0}o[d]=r&&r[d]||m.style(a,d)}else j=void 0;if(m.isEmptyObject(o))"inline"===("none"===j?Fb(a.nodeName):j)&&(p.display=j);else{r?"hidden"in r&&(q=r.hidden):r=m._data(a,"fxshow",{}),f&&(r.hidden=!q),q?m(a).show():n.done(function(){m(a).hide()}),n.done(function(){var b;m._removeData(a,"fxshow");for(b in o)m.style(a,b,o[b])});for(d in o)g=hc(q?r[d]:0,d,n),d in r||(r[d]=g.start,q&&(g.end=g.start,g.start="width"===d||"height"===d?1:0))}}function jc(a,b){var c,d,e,f,g;for(c in a)if(d=m.camelCase(c),e=b[d],f=a[c],m.isArray(f)&&(e=f[1],f=a[c]=f[0]),c!==d&&(a[d]=f,delete a[c]),g=m.cssHooks[d],g&&"expand"in g){f=g.expand(f),delete a[d];for(c in f)c in a||(a[c]=f[c],b[c]=e)}else b[d]=e}function kc(a,b,c){var d,e,f=0,g=dc.length,h=m.Deferred().always(function(){delete i.elem}),i=function(){if(e)return!1;for(var b=$b||fc(),c=Math.max(0,j.startTime+j.duration-b),d=c/j.duration||0,f=1-d,g=0,i=j.tweens.length;i>g;g++)j.tweens[g].run(f);return h.notifyWith(a,[j,f,c]),1>f&&i?c:(h.resolveWith(a,[j]),!1)},j=h.promise({elem:a,props:m.extend({},b),opts:m.extend(!0,{specialEasing:{}},c),originalProperties:b,originalOptions:c,startTime:$b||fc(),duration:c.duration,tweens:[],createTween:function(b,c){var d=m.Tween(a,j.opts,b,c,j.opts.specialEasing[b]||j.opts.easing);return j.tweens.push(d),d},stop:function(b){var c=0,d=b?j.tweens.length:0;if(e)return this;for(e=!0;d>c;c++)j.tweens[c].run(1);return b?h.resolveWith(a,[j,b]):h.rejectWith(a,[j,b]),this}}),k=j.props;for(jc(k,j.opts.specialEasing);g>f;f++)if(d=dc[f].call(j,a,k,j.opts))return d;return m.map(k,hc,j),m.isFunction(j.opts.start)&&j.opts.start.call(a,j),m.fx.timer(m.extend(i,{elem:a,anim:j,queue:j.opts.queue})),j.progress(j.opts.progress).done(j.opts.done,j.opts.complete).fail(j.opts.fail).always(j.opts.always)}m.Animation=m.extend(kc,{tweener:function(a,b){m.isFunction(a)?(b=a,a=["*"]):a=a.split(" ");for(var c,d=0,e=a.length;e>d;d++)c=a[d],ec[c]=ec[c]||[],ec[c].unshift(b)},prefilter:function(a,b){b?dc.unshift(a):dc.push(a)}}),m.speed=function(a,b,c){var d=a&&"object"==typeof a?m.extend({},a):{complete:c||!c&&b||m.isFunction(a)&&a,duration:a,easing:c&&b||b&&!m.isFunction(b)&&b};return d.duration=m.fx.off?0:"number"==typeof d.duration?d.duration:d.duration in m.fx.speeds?m.fx.speeds[d.duration]:m.fx.speeds._default,(null==d.queue||d.queue===!0)&&(d.queue="fx"),d.old=d.complete,d.complete=function(){m.isFunction(d.old)&&d.old.call(this),d.queue&&m.dequeue(this,d.queue)},d},m.fn.extend({fadeTo:function(a,b,c,d){return this.filter(U).css("opacity",0).show().end().animate({opacity:b},a,c,d)},animate:function(a,b,c,d){var e=m.isEmptyObject(a),f=m.speed(b,c,d),g=function(){var b=kc(this,m.extend({},a),f);(e||m._data(this,"finish"))&&b.stop(!0)};return g.finish=g,e||f.queue===!1?this.each(g):this.queue(f.queue,g)},stop:function(a,b,c){var d=function(a){var b=a.stop;delete a.stop,b(c)};return"string"!=typeof a&&(c=b,b=a,a=void 0),b&&a!==!1&&this.queue(a||"fx",[]),this.each(function(){var b=!0,e=null!=a&&a+"queueHooks",f=m.timers,g=m._data(this);if(e)g[e]&&g[e].stop&&d(g[e]);else for(e in g)g[e]&&g[e].stop&&cc.test(e)&&d(g[e]);for(e=f.length;e--;)f[e].elem!==this||null!=a&&f[e].queue!==a||(f[e].anim.stop(c),b=!1,f.splice(e,1));(b||!c)&&m.dequeue(this,a)})},finish:function(a){return a!==!1&&(a=a||"fx"),this.each(function(){var b,c=m._data(this),d=c[a+"queue"],e=c[a+"queueHooks"],f=m.timers,g=d?d.length:0;for(c.finish=!0,m.queue(this,a,[]),e&&e.stop&&e.stop.call(this,!0),b=f.length;b--;)f[b].elem===this&&f[b].queue===a&&(f[b].anim.stop(!0),f.splice(b,1));for(b=0;g>b;b++)d[b]&&d[b].finish&&d[b].finish.call(this);delete c.finish})}}),m.each(["toggle","show","hide"],function(a,b){var c=m.fn[b];m.fn[b]=function(a,d,e){return null==a||"boolean"==typeof a?c.apply(this,arguments):this.animate(gc(b,!0),a,d,e)}}),m.each({slideDown:gc("show"),slideUp:gc("hide"),slideToggle:gc("toggle"),fadeIn:{opacity:"show"},fadeOut:{opacity:"hide"},fadeToggle:{opacity:"toggle"}},function(a,b){m.fn[a]=function(a,c,d){return this.animate(b,a,c,d)}}),m.timers=[],m.fx.tick=function(){var a,b=m.timers,c=0;for($b=m.now();c<b.length;c++)a=b[c],a()||b[c]!==a||b.splice(c--,1);b.length||m.fx.stop(),$b=void 0},m.fx.timer=function(a){m.timers.push(a),a()?m.fx.start():m.timers.pop()},m.fx.interval=13,m.fx.start=function(){_b||(_b=setInterval(m.fx.tick,m.fx.interval))},m.fx.stop=function(){clearInterval(_b),_b=null},m.fx.speeds={slow:600,fast:200,_default:400},m.fn.delay=function(a,b){return a=m.fx?m.fx.speeds[a]||a:a,b=b||"fx",this.queue(b,function(b,c){var d=setTimeout(b,a);c.stop=function(){clearTimeout(d)}})},function(){var a,b,c,d,e;b=y.createElement("div"),b.setAttribute("className","t"),b.innerHTML=" <link/><table></table><a href='/a'>a</a><input type='checkbox'/>",d=b.getElementsByTagName("a")[0],c=y.createElement("select"),e=c.appendChild(y.createElement("option")),a=b.getElementsByTagName("input")[0],d.style.cssText="top:1px",k.getSetAttribute="t"!==b.className,k.style=/top/.test(d.getAttribute("style")),k.hrefNormalized="/a"===d.getAttribute("href"),k.checkOn=!!a.value,k.optSelected=e.selected,k.enctype=!!y.createElement("form").enctype,c.disabled=!0,k.optDisabled=!e.disabled,a=y.createElement("input"),a.setAttribute("value",""),k.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),k.radioValue="t"===a.value}();var lc=/\r/g;m.fn.extend({val:function(a){var b,c,d,e=this[0];{if(arguments.length)return d=m.isFunction(a),this.each(function(c){var e;1===this.nodeType&&(e=d?a.call(this,c,m(this).val()):a,null==e?e="":"number"==typeof e?e+="":m.isArray(e)&&(e=m.map(e,function(a){return null==a?"":a+""})),b=m.valHooks[this.type]||m.valHooks[this.nodeName.toLowerCase()],b&&"set"in b&&void 0!==b.set(this,e,"value")||(this.value=e))});if(e)return b=m.valHooks[e.type]||m.valHooks[e.nodeName.toLowerCase()],b&&"get"in b&&void 0!==(c=b.get(e,"value"))?c:(c=e.value,"string"==typeof c?c.replace(lc,""):null==c?"":c)}}}),m.extend({valHooks:{option:{get:function(a){var b=m.find.attr(a,"value");return null!=b?b:m.trim(m.text(a))}},select:{get:function(a){for(var b,c,d=a.options,e=a.selectedIndex,f="select-one"===a.type||0>e,g=f?null:[],h=f?e+1:d.length,i=0>e?h:f?e:0;h>i;i++)if(c=d[i],!(!c.selected&&i!==e||(k.optDisabled?c.disabled:null!==c.getAttribute("disabled"))||c.parentNode.disabled&&m.nodeName(c.parentNode,"optgroup"))){if(b=m(c).val(),f)return b;g.push(b)}return g},set:function(a,b){var c,d,e=a.options,f=m.makeArray(b),g=e.length;while(g--)if(d=e[g],m.inArray(m.valHooks.option.get(d),f)>=0)try{d.selected=c=!0}catch(h){d.scrollHeight}else d.selected=!1;return c||(a.selectedIndex=-1),e}}}}),m.each(["radio","checkbox"],function(){m.valHooks[this]={set:function(a,b){return m.isArray(b)?a.checked=m.inArray(m(a).val(),b)>=0:void 0}},k.checkOn||(m.valHooks[this].get=function(a){return null===a.getAttribute("value")?"on":a.value})});var mc,nc,oc=m.expr.attrHandle,pc=/^(?:checked|selected)$/i,qc=k.getSetAttribute,rc=k.input;m.fn.extend({attr:function(a,b){return V(this,m.attr,a,b,arguments.length>1)},removeAttr:function(a){return this.each(function(){m.removeAttr(this,a)})}}),m.extend({attr:function(a,b,c){var d,e,f=a.nodeType;if(a&&3!==f&&8!==f&&2!==f)return typeof a.getAttribute===K?m.prop(a,b,c):(1===f&&m.isXMLDoc(a)||(b=b.toLowerCase(),d=m.attrHooks[b]||(m.expr.match.bool.test(b)?nc:mc)),void 0===c?d&&"get"in d&&null!==(e=d.get(a,b))?e:(e=m.find.attr(a,b),null==e?void 0:e):null!==c?d&&"set"in d&&void 0!==(e=d.set(a,c,b))?e:(a.setAttribute(b,c+""),c):void m.removeAttr(a,b))},removeAttr:function(a,b){var c,d,e=0,f=b&&b.match(E);if(f&&1===a.nodeType)while(c=f[e++])d=m.propFix[c]||c,m.expr.match.bool.test(c)?rc&&qc||!pc.test(c)?a[d]=!1:a[m.camelCase("default-"+c)]=a[d]=!1:m.attr(a,c,""),a.removeAttribute(qc?c:d)},attrHooks:{type:{set:function(a,b){if(!k.radioValue&&"radio"===b&&m.nodeName(a,"input")){var c=a.value;return a.setAttribute("type",b),c&&(a.value=c),b}}}}}),nc={set:function(a,b,c){return b===!1?m.removeAttr(a,c):rc&&qc||!pc.test(c)?a.setAttribute(!qc&&m.propFix[c]||c,c):a[m.camelCase("default-"+c)]=a[c]=!0,c}},m.each(m.expr.match.bool.source.match(/\w+/g),function(a,b){var c=oc[b]||m.find.attr;oc[b]=rc&&qc||!pc.test(b)?function(a,b,d){var e,f;return d||(f=oc[b],oc[b]=e,e=null!=c(a,b,d)?b.toLowerCase():null,oc[b]=f),e}:function(a,b,c){return c?void 0:a[m.camelCase("default-"+b)]?b.toLowerCase():null}}),rc&&qc||(m.attrHooks.value={set:function(a,b,c){return m.nodeName(a,"input")?void(a.defaultValue=b):mc&&mc.set(a,b,c)}}),qc||(mc={set:function(a,b,c){var d=a.getAttributeNode(c);return d||a.setAttributeNode(d=a.ownerDocument.createAttribute(c)),d.value=b+="","value"===c||b===a.getAttribute(c)?b:void 0}},oc.id=oc.name=oc.coords=function(a,b,c){var d;return c?void 0:(d=a.getAttributeNode(b))&&""!==d.value?d.value:null},m.valHooks.button={get:function(a,b){var c=a.getAttributeNode(b);return c&&c.specified?c.value:void 0},set:mc.set},m.attrHooks.contenteditable={set:function(a,b,c){mc.set(a,""===b?!1:b,c)}},m.each(["width","height"],function(a,b){m.attrHooks[b]={set:function(a,c){return""===c?(a.setAttribute(b,"auto"),c):void 0}}})),k.style||(m.attrHooks.style={get:function(a){return a.style.cssText||void 0},set:function(a,b){return a.style.cssText=b+""}});var sc=/^(?:input|select|textarea|button|object)$/i,tc=/^(?:a|area)$/i;m.fn.extend({prop:function(a,b){return V(this,m.prop,a,b,arguments.length>1)},removeProp:function(a){return a=m.propFix[a]||a,this.each(function(){try{this[a]=void 0,delete this[a]}catch(b){}})}}),m.extend({propFix:{"for":"htmlFor","class":"className"},prop:function(a,b,c){var d,e,f,g=a.nodeType;if(a&&3!==g&&8!==g&&2!==g)return f=1!==g||!m.isXMLDoc(a),f&&(b=m.propFix[b]||b,e=m.propHooks[b]),void 0!==c?e&&"set"in e&&void 0!==(d=e.set(a,c,b))?d:a[b]=c:e&&"get"in e&&null!==(d=e.get(a,b))?d:a[b]},propHooks:{tabIndex:{get:function(a){var b=m.find.attr(a,"tabindex");return b?parseInt(b,10):sc.test(a.nodeName)||tc.test(a.nodeName)&&a.href?0:-1}}}}),k.hrefNormalized||m.each(["href","src"],function(a,b){m.propHooks[b]={get:function(a){return a.getAttribute(b,4)}}}),k.optSelected||(m.propHooks.selected={get:function(a){var b=a.parentNode;return b&&(b.selectedIndex,b.parentNode&&b.parentNode.selectedIndex),null}}),m.each(["tabIndex","readOnly","maxLength","cellSpacing","cellPadding","rowSpan","colSpan","useMap","frameBorder","contentEditable"],function(){m.propFix[this.toLowerCase()]=this}),k.enctype||(m.propFix.enctype="encoding");var uc=/[\t\r\n\f]/g;m.fn.extend({addClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j="string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).addClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):" ")){f=0;while(e=b[f++])d.indexOf(" "+e+" ")<0&&(d+=e+" ");g=m.trim(d),c.className!==g&&(c.className=g)}return this},removeClass:function(a){var b,c,d,e,f,g,h=0,i=this.length,j=0===arguments.length||"string"==typeof a&&a;if(m.isFunction(a))return this.each(function(b){m(this).removeClass(a.call(this,b,this.className))});if(j)for(b=(a||"").match(E)||[];i>h;h++)if(c=this[h],d=1===c.nodeType&&(c.className?(" "+c.className+" ").replace(uc," "):"")){f=0;while(e=b[f++])while(d.indexOf(" "+e+" ")>=0)d=d.replace(" "+e+" "," ");g=a?m.trim(d):"",c.className!==g&&(c.className=g)}return this},toggleClass:function(a,b){var c=typeof a;return"boolean"==typeof b&&"string"===c?b?this.addClass(a):this.removeClass(a):this.each(m.isFunction(a)?function(c){m(this).toggleClass(a.call(this,c,this.className,b),b)}:function(){if("string"===c){var b,d=0,e=m(this),f=a.match(E)||[];while(b=f[d++])e.hasClass(b)?e.removeClass(b):e.addClass(b)}else(c===K||"boolean"===c)&&(this.className&&m._data(this,"__className__",this.className),this.className=this.className||a===!1?"":m._data(this,"__className__")||"")})},hasClass:function(a){for(var b=" "+a+" ",c=0,d=this.length;d>c;c++)if(1===this[c].nodeType&&(" "+this[c].className+" ").replace(uc," ").indexOf(b)>=0)return!0;return!1}}),m.each("blur focus focusin focusout load resize scroll unload click dblclick mousedown mouseup mousemove mouseover mouseout mouseenter mouseleave change select submit keydown keypress keyup error contextmenu".split(" "),function(a,b){m.fn[b]=function(a,c){return arguments.length>0?this.on(b,null,a,c):this.trigger(b)}}),m.fn.extend({hover:function(a,b){return this.mouseenter(a).mouseleave(b||a)},bind:function(a,b,c){return this.on(a,null,b,c)},unbind:function(a,b){return this.off(a,null,b)},delegate:function(a,b,c,d){return this.on(b,a,c,d)},undelegate:function(a,b,c){return 1===arguments.length?this.off(a,"**"):this.off(b,a||"**",c)}});var vc=m.now(),wc=/\?/,xc=/(,)|(\[|{)|(}|])|"(?:[^"\\\r\n]|\\["\\\/bfnrt]|\\u[\da-fA-F]{4})*"\s*:?|true|false|null|-?(?!0\d)\d+(?:\.\d+|)(?:[eE][+-]?\d+|)/g;m.parseJSON=function(b){if(a.JSON&&a.JSON.parse)return a.JSON.parse(b+"");var c,d=null,e=m.trim(b+"");return e&&!m.trim(e.replace(xc,function(a,b,e,f){return c&&b&&(d=0),0===d?a:(c=e||b,d+=!f-!e,"")}))?Function("return "+e)():m.error("Invalid JSON: "+b)},m.parseXML=function(b){var c,d;if(!b||"string"!=typeof b)return null;try{a.DOMParser?(d=new DOMParser,c=d.parseFromString(b,"text/xml")):(c=new ActiveXObject("Microsoft.XMLDOM"),c.async="false",c.loadXML(b))}catch(e){c=void 0}return c&&c.documentElement&&!c.getElementsByTagName("parsererror").length||m.error("Invalid XML: "+b),c};var yc,zc,Ac=/#.*$/,Bc=/([?&])_=[^&]*/,Cc=/^(.*?):[ \t]*([^\r\n]*)\r?$/gm,Dc=/^(?:about|app|app-storage|.+-extension|file|res|widget):$/,Ec=/^(?:GET|HEAD)$/,Fc=/^\/\//,Gc=/^([\w.+-]+:)(?:\/\/(?:[^\/?#]*@|)([^\/?#:]*)(?::(\d+)|)|)/,Hc={},Ic={},Jc="*/".concat("*");try{zc=location.href}catch(Kc){zc=y.createElement("a"),zc.href="",zc=zc.href}yc=Gc.exec(zc.toLowerCase())||[];function Lc(a){return function(b,c){"string"!=typeof b&&(c=b,b="*");var d,e=0,f=b.toLowerCase().match(E)||[];if(m.isFunction(c))while(d=f[e++])"+"===d.charAt(0)?(d=d.slice(1)||"*",(a[d]=a[d]||[]).unshift(c)):(a[d]=a[d]||[]).push(c)}}function Mc(a,b,c,d){var e={},f=a===Ic;function g(h){var i;return e[h]=!0,m.each(a[h]||[],function(a,h){var j=h(b,c,d);return"string"!=typeof j||f||e[j]?f?!(i=j):void 0:(b.dataTypes.unshift(j),g(j),!1)}),i}return g(b.dataTypes[0])||!e["*"]&&g("*")}function Nc(a,b){var c,d,e=m.ajaxSettings.flatOptions||{};for(d in b)void 0!==b[d]&&((e[d]?a:c||(c={}))[d]=b[d]);return c&&m.extend(!0,a,c),a}function Oc(a,b,c){var d,e,f,g,h=a.contents,i=a.dataTypes;while("*"===i[0])i.shift(),void 0===e&&(e=a.mimeType||b.getResponseHeader("Content-Type"));if(e)for(g in h)if(h[g]&&h[g].test(e)){i.unshift(g);break}if(i[0]in c)f=i[0];else{for(g in c){if(!i[0]||a.converters[g+" "+i[0]]){f=g;break}d||(d=g)}f=f||d}return f?(f!==i[0]&&i.unshift(f),c[f]):void 0}function Pc(a,b,c,d){var e,f,g,h,i,j={},k=a.dataTypes.slice();if(k[1])for(g in a.converters)j[g.toLowerCase()]=a.converters[g];f=k.shift();while(f)if(a.responseFields[f]&&(c[a.responseFields[f]]=b),!i&&d&&a.dataFilter&&(b=a.dataFilter(b,a.dataType)),i=f,f=k.shift())if("*"===f)f=i;else if("*"!==i&&i!==f){if(g=j[i+" "+f]||j["* "+f],!g)for(e in j)if(h=e.split(" "),h[1]===f&&(g=j[i+" "+h[0]]||j["* "+h[0]])){g===!0?g=j[e]:j[e]!==!0&&(f=h[0],k.unshift(h[1]));break}if(g!==!0)if(g&&a["throws"])b=g(b);else try{b=g(b)}catch(l){return{state:"parsererror",error:g?l:"No conversion from "+i+" to "+f}}}return{state:"success",data:b}}m.extend({active:0,lastModified:{},etag:{},ajaxSettings:{url:zc,type:"GET",isLocal:Dc.test(yc[1]),global:!0,processData:!0,async:!0,contentType:"application/x-www-form-urlencoded; charset=UTF-8",accepts:{"*":Jc,text:"text/plain",html:"text/html",xml:"application/xml, text/xml",json:"application/json, text/javascript"},contents:{xml:/xml/,html:/html/,json:/json/},responseFields:{xml:"responseXML",text:"responseText",json:"responseJSON"},converters:{"* text":String,"text html":!0,"text json":m.parseJSON,"text xml":m.parseXML},flatOptions:{url:!0,context:!0}},ajaxSetup:function(a,b){return b?Nc(Nc(a,m.ajaxSettings),b):Nc(m.ajaxSettings,a)},ajaxPrefilter:Lc(Hc),ajaxTransport:Lc(Ic),ajax:function(a,b){"object"==typeof a&&(b=a,a=void 0),b=b||{};var c,d,e,f,g,h,i,j,k=m.ajaxSetup({},b),l=k.context||k,n=k.context&&(l.nodeType||l.jquery)?m(l):m.event,o=m.Deferred(),p=m.Callbacks("once memory"),q=k.statusCode||{},r={},s={},t=0,u="canceled",v={readyState:0,getResponseHeader:function(a){var b;if(2===t){if(!j){j={};while(b=Cc.exec(f))j[b[1].toLowerCase()]=b[2]}b=j[a.toLowerCase()]}return null==b?null:b},getAllResponseHeaders:function(){return 2===t?f:null},setRequestHeader:function(a,b){var c=a.toLowerCase();return t||(a=s[c]=s[c]||a,r[a]=b),this},overrideMimeType:function(a){return t||(k.mimeType=a),this},statusCode:function(a){var b;if(a)if(2>t)for(b in a)q[b]=[q[b],a[b]];else v.always(a[v.status]);return this},abort:function(a){var b=a||u;return i&&i.abort(b),x(0,b),this}};if(o.promise(v).complete=p.add,v.success=v.done,v.error=v.fail,k.url=((a||k.url||zc)+"").replace(Ac,"").replace(Fc,yc[1]+"//"),k.type=b.method||b.type||k.method||k.type,k.dataTypes=m.trim(k.dataType||"*").toLowerCase().match(E)||[""],null==k.crossDomain&&(c=Gc.exec(k.url.toLowerCase()),k.crossDomain=!(!c||c[1]===yc[1]&&c[2]===yc[2]&&(c[3]||("http:"===c[1]?"80":"443"))===(yc[3]||("http:"===yc[1]?"80":"443")))),k.data&&k.processData&&"string"!=typeof k.data&&(k.data=m.param(k.data,k.traditional)),Mc(Hc,k,b,v),2===t)return v;h=k.global,h&&0===m.active++&&m.event.trigger("ajaxStart"),k.type=k.type.toUpperCase(),k.hasContent=!Ec.test(k.type),e=k.url,k.hasContent||(k.data&&(e=k.url+=(wc.test(e)?"&":"?")+k.data,delete k.data),k.cache===!1&&(k.url=Bc.test(e)?e.replace(Bc,"$1_="+vc++):e+(wc.test(e)?"&":"?")+"_="+vc++)),k.ifModified&&(m.lastModified[e]&&v.setRequestHeader("If-Modified-Since",m.lastModified[e]),m.etag[e]&&v.setRequestHeader("If-None-Match",m.etag[e])),(k.data&&k.hasContent&&k.contentType!==!1||b.contentType)&&v.setRequestHeader("Content-Type",k.contentType),v.setRequestHeader("Accept",k.dataTypes[0]&&k.accepts[k.dataTypes[0]]?k.accepts[k.dataTypes[0]]+("*"!==k.dataTypes[0]?", "+Jc+"; q=0.01":""):k.accepts["*"]);for(d in k.headers)v.setRequestHeader(d,k.headers[d]);if(k.beforeSend&&(k.beforeSend.call(l,v,k)===!1||2===t))return v.abort();u="abort";for(d in{success:1,error:1,complete:1})v[d](k[d]);if(i=Mc(Ic,k,b,v)){v.readyState=1,h&&n.trigger("ajaxSend",[v,k]),k.async&&k.timeout>0&&(g=setTimeout(function(){v.abort("timeout")},k.timeout));try{t=1,i.send(r,x)}catch(w){if(!(2>t))throw w;x(-1,w)}}else x(-1,"No Transport");function x(a,b,c,d){var j,r,s,u,w,x=b;2!==t&&(t=2,g&&clearTimeout(g),i=void 0,f=d||"",v.readyState=a>0?4:0,j=a>=200&&300>a||304===a,c&&(u=Oc(k,v,c)),u=Pc(k,u,v,j),j?(k.ifModified&&(w=v.getResponseHeader("Last-Modified"),w&&(m.lastModified[e]=w),w=v.getResponseHeader("etag"),w&&(m.etag[e]=w)),204===a||"HEAD"===k.type?x="nocontent":304===a?x="notmodified":(x=u.state,r=u.data,s=u.error,j=!s)):(s=x,(a||!x)&&(x="error",0>a&&(a=0))),v.status=a,v.statusText=(b||x)+"",j?o.resolveWith(l,[r,x,v]):o.rejectWith(l,[v,x,s]),v.statusCode(q),q=void 0,h&&n.trigger(j?"ajaxSuccess":"ajaxError",[v,k,j?r:s]),p.fireWith(l,[v,x]),h&&(n.trigger("ajaxComplete",[v,k]),--m.active||m.event.trigger("ajaxStop")))}return v},getJSON:function(a,b,c){return m.get(a,b,c,"json")},getScript:function(a,b){return m.get(a,void 0,b,"script")}}),m.each(["get","post"],function(a,b){m[b]=function(a,c,d,e){return m.isFunction(c)&&(e=e||d,d=c,c=void 0),m.ajax({url:a,type:b,dataType:e,data:c,success:d})}}),m.each(["ajaxStart","ajaxStop","ajaxComplete","ajaxError","ajaxSuccess","ajaxSend"],function(a,b){m.fn[b]=function(a){return this.on(b,a)}}),m._evalUrl=function(a){return m.ajax({url:a,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0})},m.fn.extend({wrapAll:function(a){if(m.isFunction(a))return this.each(function(b){m(this).wrapAll(a.call(this,b))});if(this[0]){var b=m(a,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&b.insertBefore(this[0]),b.map(function(){var a=this;while(a.firstChild&&1===a.firstChild.nodeType)a=a.firstChild;return a}).append(this)}return this},wrapInner:function(a){return this.each(m.isFunction(a)?function(b){m(this).wrapInner(a.call(this,b))}:function(){var b=m(this),c=b.contents();c.length?c.wrapAll(a):b.append(a)})},wrap:function(a){var b=m.isFunction(a);return this.each(function(c){m(this).wrapAll(b?a.call(this,c):a)})},unwrap:function(){return this.parent().each(function(){m.nodeName(this,"body")||m(this).replaceWith(this.childNodes)}).end()}}),m.expr.filters.hidden=function(a){return a.offsetWidth<=0&&a.offsetHeight<=0||!k.reliableHiddenOffsets()&&"none"===(a.style&&a.style.display||m.css(a,"display"))},m.expr.filters.visible=function(a){return!m.expr.filters.hidden(a)};var Qc=/%20/g,Rc=/\[\]$/,Sc=/\r?\n/g,Tc=/^(?:submit|button|image|reset|file)$/i,Uc=/^(?:input|select|textarea|keygen)/i;function Vc(a,b,c,d){var e;if(m.isArray(b))m.each(b,function(b,e){c||Rc.test(a)?d(a,e):Vc(a+"["+("object"==typeof e?b:"")+"]",e,c,d)});else if(c||"object"!==m.type(b))d(a,b);else for(e in b)Vc(a+"["+e+"]",b[e],c,d)}m.param=function(a,b){var c,d=[],e=function(a,b){b=m.isFunction(b)?b():null==b?"":b,d[d.length]=encodeURIComponent(a)+"="+encodeURIComponent(b)};if(void 0===b&&(b=m.ajaxSettings&&m.ajaxSettings.traditional),m.isArray(a)||a.jquery&&!m.isPlainObject(a))m.each(a,function(){e(this.name,this.value)});else for(c in a)Vc(c,a[c],b,e);return d.join("&").replace(Qc,"+")},m.fn.extend({serialize:function(){return m.param(this.serializeArray())},serializeArray:function(){return this.map(function(){var a=m.prop(this,"elements");return a?m.makeArray(a):this}).filter(function(){var a=this.type;return this.name&&!m(this).is(":disabled")&&Uc.test(this.nodeName)&&!Tc.test(a)&&(this.checked||!W.test(a))}).map(function(a,b){var c=m(this).val();return null==c?null:m.isArray(c)?m.map(c,function(a){return{name:b.name,value:a.replace(Sc,"\r\n")}}):{name:b.name,value:c.replace(Sc,"\r\n")}}).get()}}),m.ajaxSettings.xhr=void 0!==a.ActiveXObject?function(){return!this.isLocal&&/^(get|post|head|put|delete|options)$/i.test(this.type)&&Zc()||$c()}:Zc;var Wc=0,Xc={},Yc=m.ajaxSettings.xhr();a.ActiveXObject&&m(a).on("unload",function(){for(var a in Xc)Xc[a](void 0,!0)}),k.cors=!!Yc&&"withCredentials"in Yc,Yc=k.ajax=!!Yc,Yc&&m.ajaxTransport(function(a){if(!a.crossDomain||k.cors){var b;return{send:function(c,d){var e,f=a.xhr(),g=++Wc;if(f.open(a.type,a.url,a.async,a.username,a.password),a.xhrFields)for(e in a.xhrFields)f[e]=a.xhrFields[e];a.mimeType&&f.overrideMimeType&&f.overrideMimeType(a.mimeType),a.crossDomain||c["X-Requested-With"]||(c["X-Requested-With"]="XMLHttpRequest");for(e in c)void 0!==c[e]&&f.setRequestHeader(e,c[e]+"");f.send(a.hasContent&&a.data||null),b=function(c,e){var h,i,j;if(b&&(e||4===f.readyState))if(delete Xc[g],b=void 0,f.onreadystatechange=m.noop,e)4!==f.readyState&&f.abort();else{j={},h=f.status,"string"==typeof f.responseText&&(j.text=f.responseText);try{i=f.statusText}catch(k){i=""}h||!a.isLocal||a.crossDomain?1223===h&&(h=204):h=j.text?200:404}j&&d(h,i,j,f.getAllResponseHeaders())},a.async?4===f.readyState?setTimeout(b):f.onreadystatechange=Xc[g]=b:b()},abort:function(){b&&b(void 0,!0)}}}});function Zc(){try{return new a.XMLHttpRequest}catch(b){}}function $c(){try{return new a.ActiveXObject("Microsoft.XMLHTTP")}catch(b){}}m.ajaxSetup({accepts:{script:"text/javascript, application/javascript, application/ecmascript, application/x-ecmascript"},contents:{script:/(?:java|ecma)script/},converters:{"text script":function(a){return m.globalEval(a),a}}}),m.ajaxPrefilter("script",function(a){void 0===a.cache&&(a.cache=!1),a.crossDomain&&(a.type="GET",a.global=!1)}),m.ajaxTransport("script",function(a){if(a.crossDomain){var b,c=y.head||m("head")[0]||y.documentElement;return{send:function(d,e){b=y.createElement("script"),b.async=!0,a.scriptCharset&&(b.charset=a.scriptCharset),b.src=a.url,b.onload=b.onreadystatechange=function(a,c){(c||!b.readyState||/loaded|complete/.test(b.readyState))&&(b.onload=b.onreadystatechange=null,b.parentNode&&b.parentNode.removeChild(b),b=null,c||e(200,"success"))},c.insertBefore(b,c.firstChild)},abort:function(){b&&b.onload(void 0,!0)}}}});var _c=[],ad=/(=)\?(?=&|$)|\?\?/;m.ajaxSetup({jsonp:"callback",jsonpCallback:function(){var a=_c.pop()||m.expando+"_"+vc++;return this[a]=!0,a}}),m.ajaxPrefilter("json jsonp",function(b,c,d){var e,f,g,h=b.jsonp!==!1&&(ad.test(b.url)?"url":"string"==typeof b.data&&!(b.contentType||"").indexOf("application/x-www-form-urlencoded")&&ad.test(b.data)&&"data");return h||"jsonp"===b.dataTypes[0]?(e=b.jsonpCallback=m.isFunction(b.jsonpCallback)?b.jsonpCallback():b.jsonpCallback,h?b[h]=b[h].replace(ad,"$1"+e):b.jsonp!==!1&&(b.url+=(wc.test(b.url)?"&":"?")+b.jsonp+"="+e),b.converters["script json"]=function(){return g||m.error(e+" was not called"),g[0]},b.dataTypes[0]="json",f=a[e],a[e]=function(){g=arguments},d.always(function(){a[e]=f,b[e]&&(b.jsonpCallback=c.jsonpCallback,_c.push(e)),g&&m.isFunction(f)&&f(g[0]),g=f=void 0}),"script"):void 0}),m.parseHTML=function(a,b,c){if(!a||"string"!=typeof a)return null;"boolean"==typeof b&&(c=b,b=!1),b=b||y;var d=u.exec(a),e=!c&&[];return d?[b.createElement(d[1])]:(d=m.buildFragment([a],b,e),e&&e.length&&m(e).remove(),m.merge([],d.childNodes))};var bd=m.fn.load;m.fn.load=function(a,b,c){if("string"!=typeof a&&bd)return bd.apply(this,arguments);var d,e,f,g=this,h=a.indexOf(" ");return h>=0&&(d=m.trim(a.slice(h,a.length)),a=a.slice(0,h)),m.isFunction(b)?(c=b,b=void 0):b&&"object"==typeof b&&(f="POST"),g.length>0&&m.ajax({url:a,type:f,dataType:"html",data:b}).done(function(a){e=arguments,g.html(d?m("<div>").append(m.parseHTML(a)).find(d):a)}).complete(c&&function(a,b){g.each(c,e||[a.responseText,b,a])}),this},m.expr.filters.animated=function(a){return m.grep(m.timers,function(b){return a===b.elem}).length};var cd=a.document.documentElement;function dd(a){return m.isWindow(a)?a:9===a.nodeType?a.defaultView||a.parentWindow:!1}m.offset={setOffset:function(a,b,c){var d,e,f,g,h,i,j,k=m.css(a,"position"),l=m(a),n={};"static"===k&&(a.style.position="relative"),h=l.offset(),f=m.css(a,"top"),i=m.css(a,"left"),j=("absolute"===k||"fixed"===k)&&m.inArray("auto",[f,i])>-1,j?(d=l.position(),g=d.top,e=d.left):(g=parseFloat(f)||0,e=parseFloat(i)||0),m.isFunction(b)&&(b=b.call(a,c,h)),null!=b.top&&(n.top=b.top-h.top+g),null!=b.left&&(n.left=b.left-h.left+e),"using"in b?b.using.call(a,n):l.css(n)}},m.fn.extend({offset:function(a){if(arguments.length)return void 0===a?this:this.each(function(b){m.offset.setOffset(this,a,b)});var b,c,d={top:0,left:0},e=this[0],f=e&&e.ownerDocument;if(f)return b=f.documentElement,m.contains(b,e)?(typeof e.getBoundingClientRect!==K&&(d=e.getBoundingClientRect()),c=dd(f),{top:d.top+(c.pageYOffset||b.scrollTop)-(b.clientTop||0),left:d.left+(c.pageXOffset||b.scrollLeft)-(b.clientLeft||0)}):d},position:function(){if(this[0]){var a,b,c={top:0,left:0},d=this[0];return"fixed"===m.css(d,"position")?b=d.getBoundingClientRect():(a=this.offsetParent(),b=this.offset(),m.nodeName(a[0],"html")||(c=a.offset()),c.top+=m.css(a[0],"borderTopWidth",!0),c.left+=m.css(a[0],"borderLeftWidth",!0)),{top:b.top-c.top-m.css(d,"marginTop",!0),left:b.left-c.left-m.css(d,"marginLeft",!0)}}},offsetParent:function(){return this.map(function(){var a=this.offsetParent||cd;while(a&&!m.nodeName(a,"html")&&"static"===m.css(a,"position"))a=a.offsetParent;return a||cd})}}),m.each({scrollLeft:"pageXOffset",scrollTop:"pageYOffset"},function(a,b){var c=/Y/.test(b);m.fn[a]=function(d){return V(this,function(a,d,e){var f=dd(a);return void 0===e?f?b in f?f[b]:f.document.documentElement[d]:a[d]:void(f?f.scrollTo(c?m(f).scrollLeft():e,c?e:m(f).scrollTop()):a[d]=e)},a,d,arguments.length,null)}}),m.each(["top","left"],function(a,b){m.cssHooks[b]=Lb(k.pixelPosition,function(a,c){return c?(c=Jb(a,b),Hb.test(c)?m(a).position()[b]+"px":c):void 0})}),m.each({Height:"height",Width:"width"},function(a,b){m.each({padding:"inner"+a,content:b,"":"outer"+a},function(c,d){m.fn[d]=function(d,e){var f=arguments.length&&(c||"boolean"!=typeof d),g=c||(d===!0||e===!0?"margin":"border");return V(this,function(b,c,d){var e;return m.isWindow(b)?b.document.documentElement["client"+a]:9===b.nodeType?(e=b.documentElement,Math.max(b.body["scroll"+a],e["scroll"+a],b.body["offset"+a],e["offset"+a],e["client"+a])):void 0===d?m.css(b,c,g):m.style(b,c,d,g)},b,f?d:void 0,f,null)}})}),m.fn.size=function(){return this.length},m.fn.andSelf=m.fn.addBack,"function"==typeof define&&define.amd&&define("jquery",[],function(){return m});var ed=a.jQuery,fd=a.$;return m.noConflict=function(b){return a.$===m&&(a.$=fd),b&&a.jQuery===m&&(a.jQuery=ed),m},typeof b===K&&(a.jQuery=a.$=m),m}); \ No newline at end of file
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.tablesorter.min.js b/contrib/python/coverage/py3/coverage/htmlfiles/jquery.tablesorter.min.js
deleted file mode 100644
index 64c7007129..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/jquery.tablesorter.min.js
+++ /dev/null
@@ -1,2 +0,0 @@
-
-(function($){$.extend({tablesorter:new function(){var parsers=[],widgets=[];this.defaults={cssHeader:"header",cssAsc:"headerSortUp",cssDesc:"headerSortDown",sortInitialOrder:"asc",sortMultiSortKey:"shiftKey",sortForce:null,sortAppend:null,textExtraction:"simple",parsers:{},widgets:[],widgetZebra:{css:["even","odd"]},headers:{},widthFixed:false,cancelSelection:true,sortList:[],headerList:[],dateFormat:"us",decimal:'.',debug:false};function benchmark(s,d){log(s+","+(new Date().getTime()-d.getTime())+"ms");}this.benchmark=benchmark;function log(s){if(typeof console!="undefined"&&typeof console.debug!="undefined"){console.log(s);}else{alert(s);}}function buildParserCache(table,$headers){if(table.config.debug){var parsersDebug="";}var rows=table.tBodies[0].rows;if(table.tBodies[0].rows[0]){var list=[],cells=rows[0].cells,l=cells.length;for(var i=0;i<l;i++){var p=false;if($.metadata&&($($headers[i]).metadata()&&$($headers[i]).metadata().sorter)){p=getParserById($($headers[i]).metadata().sorter);}else if((table.config.headers[i]&&table.config.headers[i].sorter)){p=getParserById(table.config.headers[i].sorter);}if(!p){p=detectParserForColumn(table,cells[i]);}if(table.config.debug){parsersDebug+="column:"+i+" parser:"+p.id+"\n";}list.push(p);}}if(table.config.debug){log(parsersDebug);}return list;};function detectParserForColumn(table,node){var l=parsers.length;for(var i=1;i<l;i++){if(parsers[i].is($.trim(getElementText(table.config,node)),table,node)){return parsers[i];}}return parsers[0];}function getParserById(name){var l=parsers.length;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==name.toLowerCase()){return parsers[i];}}return false;}function buildCache(table){if(table.config.debug){var cacheTime=new Date();}var totalRows=(table.tBodies[0]&&table.tBodies[0].rows.length)||0,totalCells=(table.tBodies[0].rows[0]&&table.tBodies[0].rows[0].cells.length)||0,parsers=table.config.parsers,cache={row:[],normalized:[]};for(var i=0;i<totalRows;++i){var c=table.tBodies[0].rows[i],cols=[];cache.row.push($(c));for(var j=0;j<totalCells;++j){cols.push(parsers[j].format(getElementText(table.config,c.cells[j]),table,c.cells[j]));}cols.push(i);cache.normalized.push(cols);cols=null;};if(table.config.debug){benchmark("Building cache for "+totalRows+" rows:",cacheTime);}return cache;};function getElementText(config,node){if(!node)return"";var t="";if(config.textExtraction=="simple"){if(node.childNodes[0]&&node.childNodes[0].hasChildNodes()){t=node.childNodes[0].innerHTML;}else{t=node.innerHTML;}}else{if(typeof(config.textExtraction)=="function"){t=config.textExtraction(node);}else{t=$(node).text();}}return t;}function appendToTable(table,cache){if(table.config.debug){var appendTime=new Date()}var c=cache,r=c.row,n=c.normalized,totalRows=n.length,checkCell=(n[0].length-1),tableBody=$(table.tBodies[0]),rows=[];for(var i=0;i<totalRows;i++){rows.push(r[n[i][checkCell]]);if(!table.config.appender){var o=r[n[i][checkCell]];var l=o.length;for(var j=0;j<l;j++){tableBody[0].appendChild(o[j]);}}}if(table.config.appender){table.config.appender(table,rows);}rows=null;if(table.config.debug){benchmark("Rebuilt table:",appendTime);}applyWidget(table);setTimeout(function(){$(table).trigger("sortEnd");},0);};function buildHeaders(table){if(table.config.debug){var time=new Date();}var meta=($.metadata)?true:false,tableHeadersRows=[];for(var i=0;i<table.tHead.rows.length;i++){tableHeadersRows[i]=0;};$tableHeaders=$("thead th",table);$tableHeaders.each(function(index){this.count=0;this.column=index;this.order=formatSortingOrder(table.config.sortInitialOrder);if(checkHeaderMetadata(this)||checkHeaderOptions(table,index))this.sortDisabled=true;if(!this.sortDisabled){$(this).addClass(table.config.cssHeader);}table.config.headerList[index]=this;});if(table.config.debug){benchmark("Built headers:",time);log($tableHeaders);}return $tableHeaders;};function checkCellColSpan(table,rows,row){var arr=[],r=table.tHead.rows,c=r[row].cells;for(var i=0;i<c.length;i++){var cell=c[i];if(cell.colSpan>1){arr=arr.concat(checkCellColSpan(table,headerArr,row++));}else{if(table.tHead.length==1||(cell.rowSpan>1||!r[row+1])){arr.push(cell);}}}return arr;};function checkHeaderMetadata(cell){if(($.metadata)&&($(cell).metadata().sorter===false)){return true;};return false;}function checkHeaderOptions(table,i){if((table.config.headers[i])&&(table.config.headers[i].sorter===false)){return true;};return false;}function applyWidget(table){var c=table.config.widgets;var l=c.length;for(var i=0;i<l;i++){getWidgetById(c[i]).format(table);}}function getWidgetById(name){var l=widgets.length;for(var i=0;i<l;i++){if(widgets[i].id.toLowerCase()==name.toLowerCase()){return widgets[i];}}};function formatSortingOrder(v){if(typeof(v)!="Number"){i=(v.toLowerCase()=="desc")?1:0;}else{i=(v==(0||1))?v:0;}return i;}function isValueInArray(v,a){var l=a.length;for(var i=0;i<l;i++){if(a[i][0]==v){return true;}}return false;}function setHeadersCss(table,$headers,list,css){$headers.removeClass(css[0]).removeClass(css[1]);var h=[];$headers.each(function(offset){if(!this.sortDisabled){h[this.column]=$(this);}});var l=list.length;for(var i=0;i<l;i++){h[list[i][0]].addClass(css[list[i][1]]);}}function fixColumnWidth(table,$headers){var c=table.config;if(c.widthFixed){var colgroup=$('<colgroup>');$("tr:first td",table.tBodies[0]).each(function(){colgroup.append($('<col>').css('width',$(this).width()));});$(table).prepend(colgroup);};}function updateHeaderSortCount(table,sortList){var c=table.config,l=sortList.length;for(var i=0;i<l;i++){var s=sortList[i],o=c.headerList[s[0]];o.count=s[1];o.count++;}}function multisort(table,sortList,cache){if(table.config.debug){var sortTime=new Date();}var dynamicExp="var sortWrapper = function(a,b) {",l=sortList.length;for(var i=0;i<l;i++){var c=sortList[i][0];var order=sortList[i][1];var s=(getCachedSortType(table.config.parsers,c)=="text")?((order==0)?"sortText":"sortTextDesc"):((order==0)?"sortNumeric":"sortNumericDesc");var e="e"+i;dynamicExp+="var "+e+" = "+s+"(a["+c+"],b["+c+"]); ";dynamicExp+="if("+e+") { return "+e+"; } ";dynamicExp+="else { ";}var orgOrderCol=cache.normalized[0].length-1;dynamicExp+="return a["+orgOrderCol+"]-b["+orgOrderCol+"];";for(var i=0;i<l;i++){dynamicExp+="}; ";}dynamicExp+="return 0; ";dynamicExp+="}; ";eval(dynamicExp);cache.normalized.sort(sortWrapper);if(table.config.debug){benchmark("Sorting on "+sortList.toString()+" and dir "+order+" time:",sortTime);}return cache;};function sortText(a,b){return((a<b)?-1:((a>b)?1:0));};function sortTextDesc(a,b){return((b<a)?-1:((b>a)?1:0));};function sortNumeric(a,b){return a-b;};function sortNumericDesc(a,b){return b-a;};function getCachedSortType(parsers,i){return parsers[i].type;};this.construct=function(settings){return this.each(function(){if(!this.tHead||!this.tBodies)return;var $this,$document,$headers,cache,config,shiftDown=0,sortOrder;this.config={};config=$.extend(this.config,$.tablesorter.defaults,settings);$this=$(this);$headers=buildHeaders(this);this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);var sortCSS=[config.cssDesc,config.cssAsc];fixColumnWidth(this);$headers.click(function(e){$this.trigger("sortStart");var totalRows=($this[0].tBodies[0]&&$this[0].tBodies[0].rows.length)||0;if(!this.sortDisabled&&totalRows>0){var $cell=$(this);var i=this.column;this.order=this.count++%2;if(!e[config.sortMultiSortKey]){config.sortList=[];if(config.sortForce!=null){var a=config.sortForce;for(var j=0;j<a.length;j++){if(a[j][0]!=i){config.sortList.push(a[j]);}}}config.sortList.push([i,this.order]);}else{if(isValueInArray(i,config.sortList)){for(var j=0;j<config.sortList.length;j++){var s=config.sortList[j],o=config.headerList[s[0]];if(s[0]==i){o.count=s[1];o.count++;s[1]=o.count%2;}}}else{config.sortList.push([i,this.order]);}};setTimeout(function(){setHeadersCss($this[0],$headers,config.sortList,sortCSS);appendToTable($this[0],multisort($this[0],config.sortList,cache));},1);return false;}}).mousedown(function(){if(config.cancelSelection){this.onselectstart=function(){return false};return false;}});$this.bind("update",function(){this.config.parsers=buildParserCache(this,$headers);cache=buildCache(this);}).bind("sorton",function(e,list){$(this).trigger("sortStart");config.sortList=list;var sortList=config.sortList;updateHeaderSortCount(this,sortList);setHeadersCss(this,$headers,sortList,sortCSS);appendToTable(this,multisort(this,sortList,cache));}).bind("appendCache",function(){appendToTable(this,cache);}).bind("applyWidgetId",function(e,id){getWidgetById(id).format(this);}).bind("applyWidgets",function(){applyWidget(this);});if($.metadata&&($(this).metadata()&&$(this).metadata().sortlist)){config.sortList=$(this).metadata().sortlist;}if(config.sortList.length>0){$this.trigger("sorton",[config.sortList]);}applyWidget(this);});};this.addParser=function(parser){var l=parsers.length,a=true;for(var i=0;i<l;i++){if(parsers[i].id.toLowerCase()==parser.id.toLowerCase()){a=false;}}if(a){parsers.push(parser);};};this.addWidget=function(widget){widgets.push(widget);};this.formatFloat=function(s){var i=parseFloat(s);return(isNaN(i))?0:i;};this.formatInt=function(s){var i=parseInt(s);return(isNaN(i))?0:i;};this.isDigit=function(s,config){var DECIMAL='\\'+config.decimal;var exp='/(^[+]?0('+DECIMAL+'0+)?$)|(^([-+]?[1-9][0-9]*)$)|(^([-+]?((0?|[1-9][0-9]*)'+DECIMAL+'(0*[1-9][0-9]*)))$)|(^[-+]?[1-9]+[0-9]*'+DECIMAL+'0+$)/';return RegExp(exp).test($.trim(s));};this.clearTableBody=function(table){if($.browser.msie){function empty(){while(this.firstChild)this.removeChild(this.firstChild);}empty.apply(table.tBodies[0]);}else{table.tBodies[0].innerHTML="";}};}});$.fn.extend({tablesorter:$.tablesorter.construct});var ts=$.tablesorter;ts.addParser({id:"text",is:function(s){return true;},format:function(s){return $.trim(s.toLowerCase());},type:"text"});ts.addParser({id:"digit",is:function(s,table){var c=table.config;return $.tablesorter.isDigit(s,c);},format:function(s){return $.tablesorter.formatFloat(s);},type:"numeric"});ts.addParser({id:"currency",is:function(s){return/^[£$€?.]/.test(s);},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/[^0-9.]/g),""));},type:"numeric"});ts.addParser({id:"ipAddress",is:function(s){return/^\d{2,3}[\.]\d{2,3}[\.]\d{2,3}[\.]\d{2,3}$/.test(s);},format:function(s){var a=s.split("."),r="",l=a.length;for(var i=0;i<l;i++){var item=a[i];if(item.length==2){r+="0"+item;}else{r+=item;}}return $.tablesorter.formatFloat(r);},type:"numeric"});ts.addParser({id:"url",is:function(s){return/^(https?|ftp|file):\/\/$/.test(s);},format:function(s){return jQuery.trim(s.replace(new RegExp(/(https?|ftp|file):\/\//),''));},type:"text"});ts.addParser({id:"isoDate",is:function(s){return/^\d{4}[\/-]\d{1,2}[\/-]\d{1,2}$/.test(s);},format:function(s){return $.tablesorter.formatFloat((s!="")?new Date(s.replace(new RegExp(/-/g),"/")).getTime():"0");},type:"numeric"});ts.addParser({id:"percent",is:function(s){return/\%$/.test($.trim(s));},format:function(s){return $.tablesorter.formatFloat(s.replace(new RegExp(/%/g),""));},type:"numeric"});ts.addParser({id:"usLongDate",is:function(s){return s.match(new RegExp(/^[A-Za-z]{3,10}\.? [0-9]{1,2}, ([0-9]{4}|'?[0-9]{2}) (([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(AM|PM)))$/));},format:function(s){return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"shortDate",is:function(s){return/\d{1,2}[\/\-]\d{1,2}[\/\-]\d{2,4}/.test(s);},format:function(s,table){var c=table.config;s=s.replace(/\-/g,"/");if(c.dateFormat=="us"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$1/$2");}else if(c.dateFormat=="uk"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{4})/,"$3/$2/$1");}else if(c.dateFormat=="dd/mm/yy"||c.dateFormat=="dd-mm-yy"){s=s.replace(/(\d{1,2})[\/\-](\d{1,2})[\/\-](\d{2})/,"$1/$2/$3");}return $.tablesorter.formatFloat(new Date(s).getTime());},type:"numeric"});ts.addParser({id:"time",is:function(s){return/^(([0-2]?[0-9]:[0-5][0-9])|([0-1]?[0-9]:[0-5][0-9]\s(am|pm)))$/.test(s);},format:function(s){return $.tablesorter.formatFloat(new Date("2000/01/01 "+s).getTime());},type:"numeric"});ts.addParser({id:"metadata",is:function(s){return false;},format:function(s,table,cell){var c=table.config,p=(!c.parserMetadataName)?'sortValue':c.parserMetadataName;return $(cell).metadata()[p];},type:"numeric"});ts.addWidget({id:"zebra",format:function(table){if(table.config.debug){var time=new Date();}$("tr:visible",table.tBodies[0]).filter(':even').removeClass(table.config.widgetZebra.css[1]).addClass(table.config.widgetZebra.css[0]).end().filter(':odd').removeClass(table.config.widgetZebra.css[0]).addClass(table.config.widgetZebra.css[1]);if(table.config.debug){$.tablesorter.benchmark("Applying Zebra widget",time);}}});})(jQuery); \ No newline at end of file
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/keybd_closed.png b/contrib/python/coverage/py3/coverage/htmlfiles/keybd_closed.png
deleted file mode 100644
index db114023f0..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/keybd_closed.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/keybd_open.png b/contrib/python/coverage/py3/coverage/htmlfiles/keybd_open.png
deleted file mode 100644
index db114023f0..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/keybd_open.png
+++ /dev/null
Binary files differ
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/pyfile.html b/contrib/python/coverage/py3/coverage/htmlfiles/pyfile.html
deleted file mode 100644
index e15be066fb..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/pyfile.html
+++ /dev/null
@@ -1,113 +0,0 @@
-{# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 #}
-{# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt #}
-
-<!DOCTYPE html>
-<html>
-<head>
- <meta http-equiv="Content-Type" content="text/html; charset=utf-8">
- {# IE8 rounds line-height incorrectly, and adding this emulateIE7 line makes it right! #}
- {# http://social.msdn.microsoft.com/Forums/en-US/iewebdevelopment/thread/7684445e-f080-4d8f-8529-132763348e21 #}
- <meta http-equiv="X-UA-Compatible" content="IE=emulateIE7" />
- <title>Coverage for {{relative_filename|escape}}: {{nums.pc_covered_str}}%</title>
- <link rel="icon" sizes="32x32" href="favicon_32.png">
- <link rel="stylesheet" href="style.css" type="text/css">
- {% if extra_css %}
- <link rel="stylesheet" href="{{ extra_css }}" type="text/css">
- {% endif %}
- <script type="text/javascript" src="jquery.min.js"></script>
- <script type="text/javascript" src="jquery.hotkeys.js"></script>
- <script type="text/javascript" src="jquery.isonscreen.js"></script>
- <script type="text/javascript" src="coverage_html.js"></script>
- <script type="text/javascript">
- jQuery(document).ready(coverage.pyfile_ready);
- </script>
-</head>
-<body class="pyfile">
-
-<div id="header">
- <div class="content">
- <h1>Coverage for <b>{{relative_filename|escape}}</b> :
- <span class="pc_cov">{{nums.pc_covered_str}}%</span>
- </h1>
-
- <img id="keyboard_icon" src="keybd_closed.png" alt="Show keyboard shortcuts" />
-
- <h2 class="stats">
- {{nums.n_statements}} statements &nbsp;
- <button type="button" class="{{category.run}} shortkey_r button_toggle_run" title="Toggle lines run">{{nums.n_executed}} run</button>
- <button type="button" class="{{category.mis}} shortkey_m button_toggle_mis" title="Toggle lines missing">{{nums.n_missing}} missing</button>
- <button type="button" class="{{category.exc}} shortkey_x button_toggle_exc" title="Toggle lines excluded">{{nums.n_excluded}} excluded</button>
-
- {% if has_arcs %}
- <button type="button" class="{{category.par}} shortkey_p button_toggle_par" title="Toggle lines partially run">{{nums.n_partial_branches}} partial</button>
- {% endif %}
- </h2>
- </div>
-</div>
-
-<div class="help_panel">
- <img id="panel_icon" src="keybd_open.png" alt="Hide keyboard shortcuts" />
- <p class="legend">Hot-keys on this page</p>
- <div>
- <p class="keyhelp">
- <span class="key">r</span>
- <span class="key">m</span>
- <span class="key">x</span>
- <span class="key">p</span> &nbsp; toggle line displays
- </p>
- <p class="keyhelp">
- <span class="key">j</span>
- <span class="key">k</span> &nbsp; next/prev highlighted chunk
- </p>
- <p class="keyhelp">
- <span class="key">0</span> &nbsp; (zero) top of page
- </p>
- <p class="keyhelp">
- <span class="key">1</span> &nbsp; (one) first highlighted chunk
- </p>
- </div>
-</div>
-
-<div id="source">
- {% for line in lines -%}
- {% joined %}
- <p id="t{{line.number}}" class="{{line.css_class}}">
- <span class="n"><a href="#t{{line.number}}">{{line.number}}</a></span>
- <span class="t">{{line.html}}&nbsp;</span>
- {% if line.context_list %}
- <input type="checkbox" id="ctxs{{line.number}}" />
- {% endif %}
- {# Things that should float right in the line. #}
- <span class="r">
- {% if line.annotate %}
- <span class="annotate short">{{line.annotate}}</span>
- <span class="annotate long">{{line.annotate_long}}</span>
- {% endif %}
- {% if line.contexts %}
- <label for="ctxs{{line.number}}" class="ctx">{{ line.contexts_label }}</label>
- {% endif %}
- </span>
- {# Things that should appear below the line. #}
- {% if line.context_list %}
- <span class="ctxs">
- {% for context in line.context_list %}
- <span>{{context}}</span>
- {% endfor %}
- </span>
- {% endif %}
- </p>
- {% endjoined %}
- {% endfor %}
-</div>
-
-<div id="footer">
- <div class="content">
- <p>
- <a class="nav" href="index.html">&#xab; index</a> &nbsp; &nbsp; <a class="nav" href="{{__url__}}">coverage.py v{{__version__}}</a>,
- created at {{ time_stamp }}
- </p>
- </div>
-</div>
-
-</body>
-</html>
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/style.css b/contrib/python/coverage/py3/coverage/htmlfiles/style.css
deleted file mode 100644
index 36ee2a6e65..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/style.css
+++ /dev/null
@@ -1,291 +0,0 @@
-@charset "UTF-8";
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-/* Don't edit this .css file. Edit the .scss file instead! */
-html, body, h1, h2, h3, p, table, td, th { margin: 0; padding: 0; border: 0; font-weight: inherit; font-style: inherit; font-size: 100%; font-family: inherit; vertical-align: baseline; }
-
-body { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; font-size: 1em; background: #fff; color: #000; }
-
-@media (prefers-color-scheme: dark) { body { background: #1e1e1e; } }
-
-@media (prefers-color-scheme: dark) { body { color: #eee; } }
-
-html > body { font-size: 16px; }
-
-a:active, a:focus { outline: 2px dashed #007acc; }
-
-p { font-size: .875em; line-height: 1.4em; }
-
-table { border-collapse: collapse; }
-
-td { vertical-align: top; }
-
-table tr.hidden { display: none !important; }
-
-p#no_rows { display: none; font-size: 1.2em; }
-
-a.nav { text-decoration: none; color: inherit; }
-
-a.nav:hover { text-decoration: underline; color: inherit; }
-
-#header { background: #f8f8f8; width: 100%; border-bottom: 1px solid #eee; }
-
-@media (prefers-color-scheme: dark) { #header { background: black; } }
-
-@media (prefers-color-scheme: dark) { #header { border-color: #333; } }
-
-.indexfile #footer { margin: 1rem 3.5rem; }
-
-.pyfile #footer { margin: 1rem 1rem; }
-
-#footer .content { padding: 0; color: #666; font-style: italic; }
-
-@media (prefers-color-scheme: dark) { #footer .content { color: #aaa; } }
-
-#index { margin: 1rem 0 0 3.5rem; }
-
-#header .content { padding: 1rem 3.5rem; }
-
-h1 { font-size: 1.25em; display: inline-block; }
-
-#filter_container { float: right; margin: 0 2em 0 0; }
-
-#filter_container input { width: 10em; padding: 0.2em 0.5em; border: 2px solid #ccc; background: #fff; color: #000; }
-
-@media (prefers-color-scheme: dark) { #filter_container input { border-color: #444; } }
-
-@media (prefers-color-scheme: dark) { #filter_container input { background: #1e1e1e; } }
-
-@media (prefers-color-scheme: dark) { #filter_container input { color: #eee; } }
-
-#filter_container input:focus { border-color: #007acc; }
-
-h2.stats { margin-top: .5em; font-size: 1em; }
-
-.stats button { font-family: inherit; font-size: inherit; border: 1px solid; border-radius: .2em; color: inherit; padding: .1em .5em; margin: 1px calc(.1em + 1px); cursor: pointer; border-color: #ccc; }
-
-@media (prefers-color-scheme: dark) { .stats button { border-color: #444; } }
-
-.stats button:active, .stats button:focus { outline: 2px dashed #007acc; }
-
-.stats button:active, .stats button:focus { outline: 2px dashed #007acc; }
-
-.stats button.run { background: #eeffee; }
-
-@media (prefers-color-scheme: dark) { .stats button.run { background: #373d29; } }
-
-.stats button.run.show_run { background: #dfd; border: 2px solid #00dd00; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.run.show_run { background: #373d29; } }
-
-.stats button.mis { background: #ffeeee; }
-
-@media (prefers-color-scheme: dark) { .stats button.mis { background: #4b1818; } }
-
-.stats button.mis.show_mis { background: #fdd; border: 2px solid #ff0000; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.mis.show_mis { background: #4b1818; } }
-
-.stats button.exc { background: #f7f7f7; }
-
-@media (prefers-color-scheme: dark) { .stats button.exc { background: #333; } }
-
-.stats button.exc.show_exc { background: #eee; border: 2px solid #808080; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.exc.show_exc { background: #333; } }
-
-.stats button.par { background: #ffffd5; }
-
-@media (prefers-color-scheme: dark) { .stats button.par { background: #650; } }
-
-.stats button.par.show_par { background: #ffa; border: 2px solid #dddd00; margin: 0 .1em; }
-
-@media (prefers-color-scheme: dark) { .stats button.par.show_par { background: #650; } }
-
-.help_panel, #source p .annotate.long { display: none; position: absolute; z-index: 999; background: #ffffcc; border: 1px solid #888; border-radius: .2em; color: #333; padding: .25em .5em; }
-
-#source p .annotate.long { white-space: normal; float: right; top: 1.75em; right: 1em; height: auto; }
-
-#keyboard_icon { float: right; margin: 5px; cursor: pointer; }
-
-.help_panel { padding: .5em; border: 1px solid #883; }
-
-.help_panel .legend { font-style: italic; margin-bottom: 1em; }
-
-.indexfile .help_panel { width: 20em; min-height: 4em; }
-
-.pyfile .help_panel { width: 16em; min-height: 8em; }
-
-#panel_icon { float: right; cursor: pointer; }
-
-.keyhelp { margin: .75em; }
-
-.keyhelp .key { border: 1px solid black; border-color: #888 #333 #333 #888; padding: .1em .35em; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-weight: bold; background: #eee; }
-
-#source { padding: 1em 0 1em 3.5rem; font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; }
-
-#source p { position: relative; white-space: pre; }
-
-#source p * { box-sizing: border-box; }
-
-#source p .n { float: left; text-align: right; width: 3.5rem; box-sizing: border-box; margin-left: -3.5rem; padding-right: 1em; color: #999; }
-
-@media (prefers-color-scheme: dark) { #source p .n { color: #777; } }
-
-#source p .n a { text-decoration: none; color: #999; }
-
-@media (prefers-color-scheme: dark) { #source p .n a { color: #777; } }
-
-#source p .n a:hover { text-decoration: underline; color: #999; }
-
-@media (prefers-color-scheme: dark) { #source p .n a:hover { color: #777; } }
-
-#source p.highlight .n { background: #ffdd00; }
-
-#source p .t { display: inline-block; width: 100%; box-sizing: border-box; margin-left: -.5em; padding-left: 0.3em; border-left: 0.2em solid #fff; }
-
-@media (prefers-color-scheme: dark) { #source p .t { border-color: #1e1e1e; } }
-
-#source p .t:hover { background: #f2f2f2; }
-
-@media (prefers-color-scheme: dark) { #source p .t:hover { background: #282828; } }
-
-#source p .t:hover ~ .r .annotate.long { display: block; }
-
-#source p .t .com { color: #008000; font-style: italic; line-height: 1px; }
-
-@media (prefers-color-scheme: dark) { #source p .t .com { color: #6A9955; } }
-
-#source p .t .key { font-weight: bold; line-height: 1px; }
-
-#source p .t .str { color: #0451A5; }
-
-@media (prefers-color-scheme: dark) { #source p .t .str { color: #9CDCFE; } }
-
-#source p.mis .t { border-left: 0.2em solid #ff0000; }
-
-#source p.mis.show_mis .t { background: #fdd; }
-
-@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t { background: #4b1818; } }
-
-#source p.mis.show_mis .t:hover { background: #f2d2d2; }
-
-@media (prefers-color-scheme: dark) { #source p.mis.show_mis .t:hover { background: #532323; } }
-
-#source p.run .t { border-left: 0.2em solid #00dd00; }
-
-#source p.run.show_run .t { background: #dfd; }
-
-@media (prefers-color-scheme: dark) { #source p.run.show_run .t { background: #373d29; } }
-
-#source p.run.show_run .t:hover { background: #d2f2d2; }
-
-@media (prefers-color-scheme: dark) { #source p.run.show_run .t:hover { background: #404633; } }
-
-#source p.exc .t { border-left: 0.2em solid #808080; }
-
-#source p.exc.show_exc .t { background: #eee; }
-
-@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t { background: #333; } }
-
-#source p.exc.show_exc .t:hover { background: #e2e2e2; }
-
-@media (prefers-color-scheme: dark) { #source p.exc.show_exc .t:hover { background: #3c3c3c; } }
-
-#source p.par .t { border-left: 0.2em solid #dddd00; }
-
-#source p.par.show_par .t { background: #ffa; }
-
-@media (prefers-color-scheme: dark) { #source p.par.show_par .t { background: #650; } }
-
-#source p.par.show_par .t:hover { background: #f2f2a2; }
-
-@media (prefers-color-scheme: dark) { #source p.par.show_par .t:hover { background: #6d5d0c; } }
-
-#source p .r { position: absolute; top: 0; right: 2.5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; }
-
-#source p .annotate { font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; color: #666; padding-right: .5em; }
-
-@media (prefers-color-scheme: dark) { #source p .annotate { color: #ddd; } }
-
-#source p .annotate.short:hover ~ .long { display: block; }
-
-#source p .annotate.long { width: 30em; right: 2.5em; }
-
-#source p input { display: none; }
-
-#source p input ~ .r label.ctx { cursor: pointer; border-radius: .25em; }
-
-#source p input ~ .r label.ctx::before { content: "â–¶ "; }
-
-#source p input ~ .r label.ctx:hover { background: #d5f7ff; color: #666; }
-
-@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { background: #0f3a42; } }
-
-@media (prefers-color-scheme: dark) { #source p input ~ .r label.ctx:hover { color: #aaa; } }
-
-#source p input:checked ~ .r label.ctx { background: #aef; color: #666; border-radius: .75em .75em 0 0; padding: 0 .5em; margin: -.25em 0; }
-
-@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { background: #056; } }
-
-@media (prefers-color-scheme: dark) { #source p input:checked ~ .r label.ctx { color: #aaa; } }
-
-#source p input:checked ~ .r label.ctx::before { content: "â–¼ "; }
-
-#source p input:checked ~ .ctxs { padding: .25em .5em; overflow-y: scroll; max-height: 10.5em; }
-
-#source p label.ctx { color: #999; display: inline-block; padding: 0 .5em; font-size: .8333em; }
-
-@media (prefers-color-scheme: dark) { #source p label.ctx { color: #777; } }
-
-#source p .ctxs { display: block; max-height: 0; overflow-y: hidden; transition: all .2s; padding: 0 .5em; font-family: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif; white-space: nowrap; background: #aef; border-radius: .25em; margin-right: 1.75em; }
-
-@media (prefers-color-scheme: dark) { #source p .ctxs { background: #056; } }
-
-#source p .ctxs span { display: block; text-align: right; }
-
-#index { font-family: SFMono-Regular, Menlo, Monaco, Consolas, monospace; font-size: 0.875em; }
-
-#index table.index { margin-left: -.5em; }
-
-#index td, #index th { text-align: right; width: 5em; padding: .25em .5em; border-bottom: 1px solid #eee; }
-
-@media (prefers-color-scheme: dark) { #index td, #index th { border-color: #333; } }
-
-#index td.name, #index th.name { text-align: left; width: auto; }
-
-#index th { font-style: italic; color: #333; cursor: pointer; }
-
-@media (prefers-color-scheme: dark) { #index th { color: #ddd; } }
-
-#index th:hover { background: #eee; }
-
-@media (prefers-color-scheme: dark) { #index th:hover { background: #333; } }
-
-#index th.headerSortDown, #index th.headerSortUp { white-space: nowrap; background: #eee; }
-
-@media (prefers-color-scheme: dark) { #index th.headerSortDown, #index th.headerSortUp { background: #333; } }
-
-#index th.headerSortDown:after { content: " ↑"; }
-
-#index th.headerSortUp:after { content: " ↓"; }
-
-#index td.name a { text-decoration: none; color: inherit; }
-
-#index tr.total td, #index tr.total_dynamic td { font-weight: bold; border-top: 1px solid #ccc; border-bottom: none; }
-
-#index tr.file:hover { background: #eee; }
-
-@media (prefers-color-scheme: dark) { #index tr.file:hover { background: #333; } }
-
-#index tr.file:hover td.name { text-decoration: underline; color: inherit; }
-
-#scroll_marker { position: fixed; right: 0; top: 0; width: 16px; height: 100%; background: #fff; border-left: 1px solid #eee; will-change: transform; }
-
-@media (prefers-color-scheme: dark) { #scroll_marker { background: #1e1e1e; } }
-
-@media (prefers-color-scheme: dark) { #scroll_marker { border-color: #333; } }
-
-#scroll_marker .marker { background: #ccc; position: absolute; min-height: 3px; width: 100%; }
-
-@media (prefers-color-scheme: dark) { #scroll_marker .marker { background: #444; } }
diff --git a/contrib/python/coverage/py3/coverage/htmlfiles/style.scss b/contrib/python/coverage/py3/coverage/htmlfiles/style.scss
deleted file mode 100644
index 158d1fb493..0000000000
--- a/contrib/python/coverage/py3/coverage/htmlfiles/style.scss
+++ /dev/null
@@ -1,660 +0,0 @@
-/* Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0 */
-/* For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt */
-
-// CSS styles for coverage.py HTML reports.
-
-// When you edit this file, you need to run "make css" to get the CSS file
-// generated, and then check in both the .scss and the .css files.
-
-// When working on the file, this command is useful:
-// sass --watch --style=compact --sourcemap=none --no-cache coverage/htmlfiles/style.scss:htmlcov/style.css
-//
-// OR you can process sass purely in python with `pip install pysass`, then:
-// pysassc --style=compact coverage/htmlfiles/style.scss coverage/htmlfiles/style.css
-
-// Ignore this comment, it's for the CSS output file:
-/* Don't edit this .css file. Edit the .scss file instead! */
-
-// Dimensions
-$left-gutter: 3.5rem;
-
-
-//
-// Declare colors and variables
-//
-
-$font-normal: -apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, Ubuntu, Cantarell, "Helvetica Neue", sans-serif;
-$font-code: SFMono-Regular, Menlo, Monaco, Consolas, monospace;
-
-$off-button-lighten: 50%;
-$hover-dark-amt: 95%;
-
-$focus-color: #007acc;
-
-$mis-color: #ff0000;
-$run-color: #00dd00;
-$exc-color: #808080;
-$par-color: #dddd00;
-
-$light-bg: #fff;
-$light-fg: #000;
-$light-gray1: #f8f8f8;
-$light-gray2: #eee;
-$light-gray3: #ccc;
-$light-gray4: #999;
-$light-gray5: #666;
-$light-gray6: #333;
-$light-pln-bg: $light-bg;
-$light-mis-bg: #fdd;
-$light-run-bg: #dfd;
-$light-exc-bg: $light-gray2;
-$light-par-bg: #ffa;
-$light-token-com: #008000;
-$light-token-str: #0451A5;
-$light-context-bg-color: #aef;
-
-$dark-bg: #1e1e1e;
-$dark-fg: #eee;
-$dark-gray1: #222;
-$dark-gray2: #333;
-$dark-gray3: #444;
-$dark-gray4: #777;
-$dark-gray5: #aaa;
-$dark-gray6: #ddd;
-$dark-pln-bg: $dark-bg;
-$dark-mis-bg: #4b1818;
-$dark-run-bg: #373d29;
-$dark-exc-bg: $dark-gray2;
-$dark-par-bg: #650;
-$dark-token-com: #6A9955;
-$dark-token-str: #9CDCFE;
-$dark-context-bg-color: #056;
-
-//
-// Mixins and utilities
-//
-@mixin background-dark($color) {
- @media (prefers-color-scheme: dark) {
- background: $color;
- }
-}
-@mixin color-dark($color) {
- @media (prefers-color-scheme: dark) {
- color: $color;
- }
-}
-@mixin border-color-dark($color) {
- @media (prefers-color-scheme: dark) {
- border-color: $color;
- }
-}
-
-// Add visual outline to navigable elements on focus improve accessibility.
-@mixin focus-border {
- &:active, &:focus {
- outline: 2px dashed $focus-color;
- }
-}
-
-// Page-wide styles
-html, body, h1, h2, h3, p, table, td, th {
- margin: 0;
- padding: 0;
- border: 0;
- font-weight: inherit;
- font-style: inherit;
- font-size: 100%;
- font-family: inherit;
- vertical-align: baseline;
-}
-
-// Set baseline grid to 16 pt.
-body {
- font-family: $font-normal;
- font-size: 1em;
- background: $light-bg;
- color: $light-fg;
- @include background-dark($dark-bg);
- @include color-dark($dark-fg);
-}
-
-html>body {
- font-size: 16px;
-}
-
-a {
- @include focus-border;
-}
-
-p {
- font-size: .875em;
- line-height: 1.4em;
-}
-
-table {
- border-collapse: collapse;
-}
-td {
- vertical-align: top;
-}
-table tr.hidden {
- display: none !important;
-}
-
-p#no_rows {
- display: none;
- font-size: 1.2em;
-}
-
-a.nav {
- text-decoration: none;
- color: inherit;
-
- &:hover {
- text-decoration: underline;
- color: inherit;
- }
-}
-
-// Page structure
-#header {
- background: $light-gray1;
- @include background-dark(black);
- width: 100%;
- border-bottom: 1px solid $light-gray2;
- @include border-color-dark($dark-gray2);
-}
-
-.indexfile #footer {
- margin: 1rem $left-gutter;
-}
-
-.pyfile #footer {
- margin: 1rem 1rem;
-}
-
-#footer .content {
- padding: 0;
- color: $light-gray5;
- @include color-dark($dark-gray5);
- font-style: italic;
-}
-
-#index {
- margin: 1rem 0 0 $left-gutter;
-}
-
-// Header styles
-#header .content {
- padding: 1rem $left-gutter;
-}
-
-h1 {
- font-size: 1.25em;
- display: inline-block;
-}
-
-#filter_container {
- float: right;
- margin: 0 2em 0 0;
-
- input {
- width: 10em;
- padding: 0.2em 0.5em;
- border: 2px solid $light-gray3;
- background: $light-bg;
- color: $light-fg;
- @include border-color-dark($dark-gray3);
- @include background-dark($dark-bg);
- @include color-dark($dark-fg);
- &:focus {
- border-color: $focus-color;
- }
- }
-}
-
-h2.stats {
- margin-top: .5em;
- font-size: 1em;
-}
-.stats button {
- font-family: inherit;
- font-size: inherit;
- border: 1px solid;
- border-radius: .2em;
- color: inherit;
- padding: .1em .5em;
- margin: 1px calc(.1em + 1px);
- cursor: pointer;
- border-color: $light-gray3;
- @include border-color-dark($dark-gray3);
- @include focus-border;
-
- @include focus-border;
-
- &.run {
- background: mix($light-run-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-run-bg);
- &.show_run {
- background: $light-run-bg;
- @include background-dark($dark-run-bg);
- border: 2px solid $run-color;
- margin: 0 .1em;
- }
- }
- &.mis {
- background: mix($light-mis-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-mis-bg);
- &.show_mis {
- background: $light-mis-bg;
- @include background-dark($dark-mis-bg);
- border: 2px solid $mis-color;
- margin: 0 .1em;
- }
- }
- &.exc {
- background: mix($light-exc-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-exc-bg);
- &.show_exc {
- background: $light-exc-bg;
- @include background-dark($dark-exc-bg);
- border: 2px solid $exc-color;
- margin: 0 .1em;
- }
- }
- &.par {
- background: mix($light-par-bg, $light-bg, $off-button-lighten);
- @include background-dark($dark-par-bg);
- &.show_par {
- background: $light-par-bg;
- @include background-dark($dark-par-bg);
- border: 2px solid $par-color;
- margin: 0 .1em;
- }
- }
-}
-
-// Yellow post-it things.
-%popup {
- display: none;
- position: absolute;
- z-index: 999;
- background: #ffffcc;
- border: 1px solid #888;
- border-radius: .2em;
- color: #333;
- padding: .25em .5em;
-}
-
-// Yellow post-it's in the text listings.
-%in-text-popup {
- @extend %popup;
- white-space: normal;
- float: right;
- top: 1.75em;
- right: 1em;
- height: auto;
-}
-
-// Help panel
-#keyboard_icon {
- float: right;
- margin: 5px;
- cursor: pointer;
-}
-
-.help_panel {
- @extend %popup;
- padding: .5em;
- border: 1px solid #883;
-
- .legend {
- font-style: italic;
- margin-bottom: 1em;
- }
-
- .indexfile & {
- width: 20em;
- min-height: 4em;
- }
-
- .pyfile & {
- width: 16em;
- min-height: 8em;
- }
-}
-
-#panel_icon {
- float: right;
- cursor: pointer;
-}
-
-.keyhelp {
- margin: .75em;
-
- .key {
- border: 1px solid black;
- border-color: #888 #333 #333 #888;
- padding: .1em .35em;
- font-family: $font-code;
- font-weight: bold;
- background: #eee;
- }
-}
-
-// Source file styles
-
-// The slim bar at the left edge of the source lines, colored by coverage.
-$border-indicator-width: .2em;
-
-#source {
- padding: 1em 0 1em $left-gutter;
- font-family: $font-code;
-
- p {
- // position relative makes position:absolute pop-ups appear in the right place.
- position: relative;
- white-space: pre;
-
- * {
- box-sizing: border-box;
- }
-
- .n {
- float: left;
- text-align: right;
- width: $left-gutter;
- box-sizing: border-box;
- margin-left: -$left-gutter;
- padding-right: 1em;
- color: $light-gray4;
- @include color-dark($dark-gray4);
-
- a {
- text-decoration: none;
- color: $light-gray4;
- @include color-dark($dark-gray4);
- &:hover {
- text-decoration: underline;
- color: $light-gray4;
- @include color-dark($dark-gray4);
- }
- }
- }
-
- &.highlight .n {
- background: #ffdd00;
- }
-
- .t {
- display: inline-block;
- width: 100%;
- box-sizing: border-box;
- margin-left: -.5em;
- padding-left: .5em - $border-indicator-width;
- border-left: $border-indicator-width solid $light-bg;
- @include border-color-dark($dark-bg);
-
- &:hover {
- background: mix($light-pln-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-pln-bg, $dark-fg, $hover-dark-amt));
-
- & ~ .r .annotate.long {
- display: block;
- }
- }
-
- // Syntax coloring
- .com {
- color: $light-token-com;
- @include color-dark($dark-token-com);
- font-style: italic;
- line-height: 1px;
- }
- .key {
- font-weight: bold;
- line-height: 1px;
- }
- .str {
- color: $light-token-str;
- @include color-dark($dark-token-str);
- }
- }
-
- &.mis {
- .t {
- border-left: $border-indicator-width solid $mis-color;
- }
-
- &.show_mis .t {
- background: $light-mis-bg;
- @include background-dark($dark-mis-bg);
-
- &:hover {
- background: mix($light-mis-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-mis-bg, $dark-fg, $hover-dark-amt));
- }
- }
- }
-
- &.run {
- .t {
- border-left: $border-indicator-width solid $run-color;
- }
-
- &.show_run .t {
- background: $light-run-bg;
- @include background-dark($dark-run-bg);
-
- &:hover {
- background: mix($light-run-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-run-bg, $dark-fg, $hover-dark-amt));
- }
- }
- }
-
- &.exc {
- .t {
- border-left: $border-indicator-width solid $exc-color;
- }
-
- &.show_exc .t {
- background: $light-exc-bg;
- @include background-dark($dark-exc-bg);
-
- &:hover {
- background: mix($light-exc-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-exc-bg, $dark-fg, $hover-dark-amt));
- }
- }
- }
-
- &.par {
- .t {
- border-left: $border-indicator-width solid $par-color;
- }
-
- &.show_par .t {
- background: $light-par-bg;
- @include background-dark($dark-par-bg);
-
- &:hover {
- background: mix($light-par-bg, $light-fg, $hover-dark-amt);
- @include background-dark(mix($dark-par-bg, $dark-fg, $hover-dark-amt));
- }
- }
-
- }
-
- .r {
- position: absolute;
- top: 0;
- right: 2.5em;
- font-family: $font-normal;
- }
-
- .annotate {
- font-family: $font-normal;
- color: $light-gray5;
- @include color-dark($dark-gray6);
- padding-right: .5em;
-
- &.short:hover ~ .long {
- display: block;
- }
-
- &.long {
- @extend %in-text-popup;
- width: 30em;
- right: 2.5em;
- }
- }
-
- input {
- display: none;
-
- & ~ .r label.ctx {
- cursor: pointer;
- border-radius: .25em;
- &::before {
- content: "â–¶ ";
- }
- &:hover {
- background: mix($light-context-bg-color, $light-bg, $off-button-lighten);
- @include background-dark(mix($dark-context-bg-color, $dark-bg, $off-button-lighten));
- color: $light-gray5;
- @include color-dark($dark-gray5);
- }
- }
-
- &:checked ~ .r label.ctx {
- background: $light-context-bg-color;
- @include background-dark($dark-context-bg-color);
- color: $light-gray5;
- @include color-dark($dark-gray5);
- border-radius: .75em .75em 0 0;
- padding: 0 .5em;
- margin: -.25em 0;
- &::before {
- content: "â–¼ ";
- }
- }
-
- &:checked ~ .ctxs {
- padding: .25em .5em;
- overflow-y: scroll;
- max-height: 10.5em;
- }
- }
-
- label.ctx {
- color: $light-gray4;
- @include color-dark($dark-gray4);
- display: inline-block;
- padding: 0 .5em;
- font-size: .8333em; // 10/12
- }
-
- .ctxs {
- display: block;
- max-height: 0;
- overflow-y: hidden;
- transition: all .2s;
- padding: 0 .5em;
- font-family: $font-normal;
- white-space: nowrap;
- background: $light-context-bg-color;
- @include background-dark($dark-context-bg-color);
- border-radius: .25em;
- margin-right: 1.75em;
- span {
- display: block;
- text-align: right;
- }
- }
- }
-}
-
-
-// index styles
-#index {
- font-family: $font-code;
- font-size: 0.875em;
-
- table.index {
- margin-left: -.5em;
- }
- td, th {
- text-align: right;
- width: 5em;
- padding: .25em .5em;
- border-bottom: 1px solid $light-gray2;
- @include border-color-dark($dark-gray2);
- &.name {
- text-align: left;
- width: auto;
- }
- }
- th {
- font-style: italic;
- color: $light-gray6;
- @include color-dark($dark-gray6);
- cursor: pointer;
- &:hover {
- background: $light-gray2;
- @include background-dark($dark-gray2);
- }
- &.headerSortDown, &.headerSortUp {
- white-space: nowrap;
- background: $light-gray2;
- @include background-dark($dark-gray2);
- }
- &.headerSortDown:after {
- content: " ↑";
- }
- &.headerSortUp:after {
- content: " ↓";
- }
- }
- td.name a {
- text-decoration: none;
- color: inherit;
- }
-
- tr.total td,
- tr.total_dynamic td {
- font-weight: bold;
- border-top: 1px solid #ccc;
- border-bottom: none;
- }
- tr.file:hover {
- background: $light-gray2;
- @include background-dark($dark-gray2);
- td.name {
- text-decoration: underline;
- color: inherit;
- }
- }
-}
-
-// scroll marker styles
-#scroll_marker {
- position: fixed;
- right: 0;
- top: 0;
- width: 16px;
- height: 100%;
- background: $light-bg;
- border-left: 1px solid $light-gray2;
- @include background-dark($dark-bg);
- @include border-color-dark($dark-gray2);
- will-change: transform; // for faster scrolling of fixed element in Chrome
-
- .marker {
- background: $light-gray3;
- @include background-dark($dark-gray3);
- position: absolute;
- min-height: 3px;
- width: 100%;
- }
-}
diff --git a/contrib/python/coverage/py3/coverage/inorout.py b/contrib/python/coverage/py3/coverage/inorout.py
deleted file mode 100644
index cbc80e8fb5..0000000000
--- a/contrib/python/coverage/py3/coverage/inorout.py
+++ /dev/null
@@ -1,513 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Determining whether files are being measured/reported or not."""
-
-# For finding the stdlib
-import atexit
-import inspect
-import itertools
-import os
-import platform
-import re
-import sys
-import traceback
-
-from coverage import env
-from coverage.backward import code_object
-from coverage.disposition import FileDisposition, disposition_init
-from coverage.files import TreeMatcher, FnmatchMatcher, ModuleMatcher
-from coverage.files import prep_patterns, find_python_files, canonical_filename
-from coverage.misc import CoverageException
-from coverage.python import source_for_file, source_for_morf
-
-
-# Pypy has some unusual stuff in the "stdlib". Consider those locations
-# when deciding where the stdlib is. These modules are not used for anything,
-# they are modules importable from the pypy lib directories, so that we can
-# find those directories.
-_structseq = _pypy_irc_topic = None
-if env.PYPY:
- try:
- import _structseq
- except ImportError:
- pass
-
- try:
- import _pypy_irc_topic
- except ImportError:
- pass
-
-
-def canonical_path(morf, directory=False):
- """Return the canonical path of the module or file `morf`.
-
- If the module is a package, then return its directory. If it is a
- module, then return its file, unless `directory` is True, in which
- case return its enclosing directory.
-
- """
- morf_path = canonical_filename(source_for_morf(morf))
- if morf_path.endswith("__init__.py") or directory:
- morf_path = os.path.split(morf_path)[0]
- return morf_path
-
-
-def name_for_module(filename, frame):
- """Get the name of the module for a filename and frame.
-
- For configurability's sake, we allow __main__ modules to be matched by
- their importable name.
-
- If loaded via runpy (aka -m), we can usually recover the "original"
- full dotted module name, otherwise, we resort to interpreting the
- file name to get the module's name. In the case that the module name
- can't be determined, None is returned.
-
- """
- module_globals = frame.f_globals if frame is not None else {}
- if module_globals is None: # pragma: only ironpython
- # IronPython doesn't provide globals: https://github.com/IronLanguages/main/issues/1296
- module_globals = {}
-
- dunder_name = module_globals.get('__name__', None)
-
- if isinstance(dunder_name, str) and dunder_name != '__main__':
- # This is the usual case: an imported module.
- return dunder_name
-
- loader = module_globals.get('__loader__', None)
- for attrname in ('fullname', 'name'): # attribute renamed in py3.2
- if hasattr(loader, attrname):
- fullname = getattr(loader, attrname)
- else:
- continue
-
- if isinstance(fullname, str) and fullname != '__main__':
- # Module loaded via: runpy -m
- return fullname
-
- # Script as first argument to Python command line.
- inspectedname = inspect.getmodulename(filename)
- if inspectedname is not None:
- return inspectedname
- else:
- return dunder_name
-
-
-def module_is_namespace(mod):
- """Is the module object `mod` a PEP420 namespace module?"""
- return hasattr(mod, '__path__') and getattr(mod, '__file__', None) is None
-
-
-def module_has_file(mod):
- """Does the module object `mod` have an existing __file__ ?"""
- mod__file__ = getattr(mod, '__file__', None)
- if mod__file__ is None:
- return False
- return os.path.exists(mod__file__)
-
-
-class InOrOut(object):
- """Machinery for determining what files to measure."""
-
- def __init__(self, warn, debug):
- self.warn = warn
- self.debug = debug
-
- # The matchers for should_trace.
- self.source_match = None
- self.source_pkgs_match = None
- self.pylib_paths = self.cover_paths = None
- self.pylib_match = self.cover_match = None
- self.include_match = self.omit_match = None
- self.plugins = []
- self.disp_class = FileDisposition
-
- # The source argument can be directories or package names.
- self.source = []
- self.source_pkgs = []
- self.source_pkgs_unmatched = []
- self.omit = self.include = None
-
- def configure(self, config):
- """Apply the configuration to get ready for decision-time."""
- self.config = config
- self.source_pkgs.extend(config.source_pkgs)
- for src in config.source or []:
- if os.path.isdir(src):
- self.source.append(canonical_filename(src))
- else:
- self.source_pkgs.append(src)
- self.source_pkgs_unmatched = self.source_pkgs[:]
-
- self.omit = prep_patterns(config.run_omit)
- if getattr(sys, 'is_standalone_binary', False):
- # don't trace contrib
- self.omit.append('contrib/python/*')
- self.omit.append('contrib/libs/protobuf/*')
- self.omit.append('library/python/pytest/*')
- self.include = prep_patterns(config.run_include)
-
- # The directories for files considered "installed with the interpreter".
- self.pylib_paths = set()
- if getattr(sys, 'is_standalone_binary', False):
- self.pylib_paths.add('contrib/tools/python')
- self.pylib_paths.add('contrib/tools/python3')
- if not self.pylib_paths and not config.cover_pylib:
- # Look at where some standard modules are located. That's the
- # indication for "installed with the interpreter". In some
- # environments (virtualenv, for example), these modules may be
- # spread across a few locations. Look at all the candidate modules
- # we've imported, and take all the different ones.
- for m in (atexit, inspect, os, platform, _pypy_irc_topic, re, _structseq, traceback):
- if m is not None and hasattr(m, "__file__"):
- self.pylib_paths.add(canonical_path(m, directory=True))
-
- if _structseq and not hasattr(_structseq, '__file__'):
- # PyPy 2.4 has no __file__ in the builtin modules, but the code
- # objects still have the file names. So dig into one to find
- # the path to exclude. The "filename" might be synthetic,
- # don't be fooled by those.
- structseq_file = code_object(_structseq.structseq_new).co_filename
- if not structseq_file.startswith("<"):
- self.pylib_paths.add(canonical_path(structseq_file))
-
- # To avoid tracing the coverage.py code itself, we skip anything
- # located where we are.
- if getattr(sys, 'is_standalone_binary', False):
- self.cover_paths = ["contrib/python/coverage"]
- else:
- self.cover_paths = [canonical_path(__file__, directory=True)]
- if env.TESTING:
- # Don't include our own test code.
- self.cover_paths.append(os.path.join(self.cover_paths[0], "tests"))
-
- # When testing, we use PyContracts, which should be considered
- # part of coverage.py, and it uses six. Exclude those directories
- # just as we exclude ourselves.
- import contracts
- import six
- for mod in [contracts, six]:
- self.cover_paths.append(canonical_path(mod))
-
- def debug(msg):
- if self.debug:
- self.debug.write(msg)
-
- # Create the matchers we need for should_trace
- if self.source or self.source_pkgs:
- against = []
- if self.source:
- self.source_match = TreeMatcher(self.source)
- against.append("trees {!r}".format(self.source_match))
- if self.source_pkgs:
- self.source_pkgs_match = ModuleMatcher(self.source_pkgs)
- against.append("modules {!r}".format(self.source_pkgs_match))
- debug("Source matching against " + " and ".join(against))
- else:
- if self.cover_paths:
- self.cover_match = TreeMatcher(self.cover_paths)
- debug("Coverage code matching: {!r}".format(self.cover_match))
- if self.pylib_paths:
- self.pylib_match = TreeMatcher(self.pylib_paths)
- debug("Python stdlib matching: {!r}".format(self.pylib_match))
- if self.include:
- self.include_match = FnmatchMatcher(self.include)
- debug("Include matching: {!r}".format(self.include_match))
- if self.omit:
- self.omit_match = FnmatchMatcher(self.omit)
- debug("Omit matching: {!r}".format(self.omit_match))
-
- def should_trace(self, filename, frame=None):
- """Decide whether to trace execution in `filename`, with a reason.
-
- This function is called from the trace function. As each new file name
- is encountered, this function determines whether it is traced or not.
-
- Returns a FileDisposition object.
-
- """
- original_filename = filename
- disp = disposition_init(self.disp_class, filename)
-
- def nope(disp, reason):
- """Simple helper to make it easy to return NO."""
- disp.trace = False
- disp.reason = reason
- return disp
-
- if frame is not None:
- # Compiled Python files have two file names: frame.f_code.co_filename is
- # the file name at the time the .pyc was compiled. The second name is
- # __file__, which is where the .pyc was actually loaded from. Since
- # .pyc files can be moved after compilation (for example, by being
- # installed), we look for __file__ in the frame and prefer it to the
- # co_filename value.
- dunder_file = frame.f_globals and frame.f_globals.get('__file__')
- if dunder_file:
- filename = source_for_file(dunder_file)
- if original_filename and not original_filename.startswith('<'):
- orig = os.path.basename(original_filename)
- if orig != os.path.basename(filename):
- # Files shouldn't be renamed when moved. This happens when
- # exec'ing code. If it seems like something is wrong with
- # the frame's file name, then just use the original.
- filename = original_filename
-
- if not filename:
- # Empty string is pretty useless.
- return nope(disp, "empty string isn't a file name")
-
- if filename.startswith('memory:'):
- return nope(disp, "memory isn't traceable")
-
- if filename.startswith('<'):
- # Lots of non-file execution is represented with artificial
- # file names like "<string>", "<doctest readme.txt[0]>", or
- # "<exec_function>". Don't ever trace these executions, since we
- # can't do anything with the data later anyway.
- return nope(disp, "not a real file name")
-
- # pyexpat does a dumb thing, calling the trace function explicitly from
- # C code with a C file name.
- if re.search(r"[/\\]Modules[/\\]pyexpat.c", filename):
- return nope(disp, "pyexpat lies about itself")
-
- # Jython reports the .class file to the tracer, use the source file.
- if filename.endswith("$py.class"):
- filename = filename[:-9] + ".py"
-
- # XXX maybe we need to support both at the same time?
- # Don't trace modules imported from environment in standalone mode
- if getattr(sys, 'is_standalone_binary', False) and filename.startswith("/"):
- return nope(disp, "skip modules from environment")
-
- canonical = canonical_filename(filename)
- disp.canonical_filename = canonical
-
- # Try the plugins, see if they have an opinion about the file.
- plugin = None
- for plugin in self.plugins.file_tracers:
- if not plugin._coverage_enabled:
- continue
-
- try:
- file_tracer = plugin.file_tracer(canonical)
- if file_tracer is not None:
- file_tracer._coverage_plugin = plugin
- disp.trace = True
- disp.file_tracer = file_tracer
- if file_tracer.has_dynamic_source_filename():
- disp.has_dynamic_filename = True
- else:
- disp.source_filename = canonical_filename(
- file_tracer.source_filename()
- )
- break
- except Exception:
- if not self.config.suppress_plugin_errors:
- raise
- self.warn(
- "Disabling plug-in %r due to an exception:" % (plugin._coverage_plugin_name)
- )
- traceback.print_exc()
- plugin._coverage_enabled = False
- continue
- else:
- # No plugin wanted it: it's Python.
- disp.trace = True
- disp.source_filename = canonical
-
- if not disp.has_dynamic_filename:
- if not disp.source_filename:
- raise CoverageException(
- "Plugin %r didn't set source_filename for %r" %
- (plugin, disp.original_filename)
- )
- reason = self.check_include_omit_etc(disp.source_filename, frame)
- if reason:
- nope(disp, reason)
-
- return disp
-
- def check_include_omit_etc(self, filename, frame):
- """Check a file name against the include, omit, etc, rules.
-
- Returns a string or None. String means, don't trace, and is the reason
- why. None means no reason found to not trace.
-
- """
- modulename = name_for_module(filename, frame)
-
- # If the user specified source or include, then that's authoritative
- # about the outer bound of what to measure and we don't have to apply
- # any canned exclusions. If they didn't, then we have to exclude the
- # stdlib and coverage.py directories.
- if self.source_match or self.source_pkgs_match:
- extra = ""
- ok = False
- if self.source_pkgs_match:
- if self.source_pkgs_match.match(modulename):
- ok = True
- if modulename in self.source_pkgs_unmatched:
- self.source_pkgs_unmatched.remove(modulename)
- else:
- extra = "module {!r} ".format(modulename)
- if not ok and self.source_match:
- if self.source_match.match(filename):
- ok = True
- if not ok:
- return extra + "falls outside the --source spec"
- elif self.include_match:
- if not self.include_match.match(filename):
- return "falls outside the --include trees"
- else:
- # If we aren't supposed to trace installed code, then check if this
- # is near the Python standard library and skip it if so.
- if self.pylib_match and self.pylib_match.match(filename):
- return "is in the stdlib"
-
- # We exclude the coverage.py code itself, since a little of it
- # will be measured otherwise.
- if self.cover_match and self.cover_match.match(filename):
- return "is part of coverage.py"
-
- # Check the file against the omit pattern.
- if self.omit_match and self.omit_match.match(filename):
- return "is inside an --omit pattern"
-
- # No point tracing a file we can't later write to SQLite.
- try:
- filename.encode("utf8")
- except UnicodeEncodeError:
- return "non-encodable filename"
-
- # No reason found to skip this file.
- return None
-
- def warn_conflicting_settings(self):
- """Warn if there are settings that conflict."""
- if self.include:
- if self.source or self.source_pkgs:
- self.warn("--include is ignored because --source is set", slug="include-ignored")
-
- def warn_already_imported_files(self):
- """Warn if files have already been imported that we will be measuring."""
- if self.include or self.source or self.source_pkgs:
- warned = set()
- for mod in list(sys.modules.values()):
- filename = getattr(mod, "__file__", None)
- if filename is None:
- continue
- if filename in warned:
- continue
-
- disp = self.should_trace(filename)
- if disp.trace:
- msg = "Already imported a file that will be measured: {}".format(filename)
- self.warn(msg, slug="already-imported")
- warned.add(filename)
-
- def warn_unimported_source(self):
- """Warn about source packages that were of interest, but never traced."""
- for pkg in self.source_pkgs_unmatched:
- self._warn_about_unmeasured_code(pkg)
-
- def _warn_about_unmeasured_code(self, pkg):
- """Warn about a package or module that we never traced.
-
- `pkg` is a string, the name of the package or module.
-
- """
- mod = sys.modules.get(pkg)
- if mod is None:
- self.warn("Module %s was never imported." % pkg, slug="module-not-imported")
- return
-
- if module_is_namespace(mod):
- # A namespace package. It's OK for this not to have been traced,
- # since there is no code directly in it.
- return
-
- if not module_has_file(mod):
- self.warn("Module %s has no Python source." % pkg, slug="module-not-python")
- return
-
- # The module was in sys.modules, and seems like a module with code, but
- # we never measured it. I guess that means it was imported before
- # coverage even started.
- self.warn(
- "Module %s was previously imported, but not measured" % pkg,
- slug="module-not-measured",
- )
-
- def find_possibly_unexecuted_files(self):
- """Find files in the areas of interest that might be untraced.
-
- Yields pairs: file path, and responsible plug-in name.
- """
- for pkg in self.source_pkgs:
- if (not pkg in sys.modules or
- not module_has_file(sys.modules[pkg])):
- continue
- pkg_file = source_for_file(sys.modules[pkg].__file__)
- for ret in self._find_executable_files(canonical_path(pkg_file)):
- yield ret
-
- for src in self.source:
- for ret in self._find_executable_files(src):
- yield ret
-
- def _find_plugin_files(self, src_dir):
- """Get executable files from the plugins."""
- for plugin in self.plugins.file_tracers:
- for x_file in plugin.find_executable_files(src_dir):
- yield x_file, plugin._coverage_plugin_name
-
- def _find_executable_files(self, src_dir):
- """Find executable files in `src_dir`.
-
- Search for files in `src_dir` that can be executed because they
- are probably importable. Don't include ones that have been omitted
- by the configuration.
-
- Yield the file path, and the plugin name that handles the file.
-
- """
- py_files = ((py_file, None) for py_file in find_python_files(src_dir))
- plugin_files = self._find_plugin_files(src_dir)
-
- for file_path, plugin_name in itertools.chain(py_files, plugin_files):
- file_path = canonical_filename(file_path)
- if self.omit_match and self.omit_match.match(file_path):
- # Turns out this file was omitted, so don't pull it back
- # in as unexecuted.
- continue
- yield file_path, plugin_name
-
- def sys_info(self):
- """Our information for Coverage.sys_info.
-
- Returns a list of (key, value) pairs.
- """
- info = [
- ('cover_paths', self.cover_paths),
- ('pylib_paths', self.pylib_paths),
- ]
-
- matcher_names = [
- 'source_match', 'source_pkgs_match',
- 'include_match', 'omit_match',
- 'cover_match', 'pylib_match',
- ]
-
- for matcher_name in matcher_names:
- matcher = getattr(self, matcher_name)
- if matcher:
- matcher_info = matcher.info()
- else:
- matcher_info = '-none-'
- info.append((matcher_name, matcher_info))
-
- return info
diff --git a/contrib/python/coverage/py3/coverage/jsonreport.py b/contrib/python/coverage/py3/coverage/jsonreport.py
deleted file mode 100644
index 4287bc79a3..0000000000
--- a/contrib/python/coverage/py3/coverage/jsonreport.py
+++ /dev/null
@@ -1,103 +0,0 @@
-# coding: utf-8
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Json reporting for coverage.py"""
-import datetime
-import json
-import sys
-
-from coverage import __version__
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-
-
-class JsonReporter(object):
- """A reporter for writing JSON coverage results."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.total = Numbers()
- self.report_data = {}
-
- def report(self, morfs, outfile=None):
- """Generate a json report for `morfs`.
-
- `morfs` is a list of modules or file names.
-
- `outfile` is a file object to write the json to
-
- """
- outfile = outfile or sys.stdout
- coverage_data = self.coverage.get_data()
- coverage_data.set_query_contexts(self.config.report_contexts)
- self.report_data["meta"] = {
- "version": __version__,
- "timestamp": datetime.datetime.now().isoformat(),
- "branch_coverage": coverage_data.has_arcs(),
- "show_contexts": self.config.json_show_contexts,
- }
-
- measured_files = {}
- for file_reporter, analysis in get_analysis_to_report(self.coverage, morfs):
- measured_files[file_reporter.relative_filename()] = self.report_one_file(
- coverage_data,
- analysis
- )
-
- self.report_data["files"] = measured_files
-
- self.report_data["totals"] = {
- 'covered_lines': self.total.n_executed,
- 'num_statements': self.total.n_statements,
- 'percent_covered': self.total.pc_covered,
- 'missing_lines': self.total.n_missing,
- 'excluded_lines': self.total.n_excluded,
- }
-
- if coverage_data.has_arcs():
- self.report_data["totals"].update({
- 'num_branches': self.total.n_branches,
- 'num_partial_branches': self.total.n_partial_branches,
- 'covered_branches': self.total.n_executed_branches,
- 'missing_branches': self.total.n_missing_branches,
- })
-
- json.dump(
- self.report_data,
- outfile,
- indent=4 if self.config.json_pretty_print else None
- )
-
- return self.total.n_statements and self.total.pc_covered
-
- def report_one_file(self, coverage_data, analysis):
- """Extract the relevant report data for a single file"""
- nums = analysis.numbers
- self.total += nums
- summary = {
- 'covered_lines': nums.n_executed,
- 'num_statements': nums.n_statements,
- 'percent_covered': nums.pc_covered,
- 'missing_lines': nums.n_missing,
- 'excluded_lines': nums.n_excluded,
- }
- reported_file = {
- 'executed_lines': sorted(analysis.executed),
- 'summary': summary,
- 'missing_lines': sorted(analysis.missing),
- 'excluded_lines': sorted(analysis.excluded)
- }
- if self.config.json_show_contexts:
- reported_file['contexts'] = analysis.data.contexts_by_lineno(
- analysis.filename,
- )
- if coverage_data.has_arcs():
- reported_file['summary'].update({
- 'num_branches': nums.n_branches,
- 'num_partial_branches': nums.n_partial_branches,
- 'covered_branches': nums.n_executed_branches,
- 'missing_branches': nums.n_missing_branches,
- })
- return reported_file
diff --git a/contrib/python/coverage/py3/coverage/misc.py b/contrib/python/coverage/py3/coverage/misc.py
deleted file mode 100644
index 034e288eb9..0000000000
--- a/contrib/python/coverage/py3/coverage/misc.py
+++ /dev/null
@@ -1,361 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Miscellaneous stuff for coverage.py."""
-
-import errno
-import hashlib
-import inspect
-import locale
-import os
-import os.path
-import random
-import re
-import socket
-import sys
-import types
-
-from coverage import env
-from coverage.backward import to_bytes, unicode_class
-
-ISOLATED_MODULES = {}
-
-
-def isolate_module(mod):
- """Copy a module so that we are isolated from aggressive mocking.
-
- If a test suite mocks os.path.exists (for example), and then we need to use
- it during the test, everything will get tangled up if we use their mock.
- Making a copy of the module when we import it will isolate coverage.py from
- those complications.
- """
- if mod not in ISOLATED_MODULES:
- new_mod = types.ModuleType(mod.__name__)
- ISOLATED_MODULES[mod] = new_mod
- for name in dir(mod):
- value = getattr(mod, name)
- if isinstance(value, types.ModuleType):
- value = isolate_module(value)
- setattr(new_mod, name, value)
- return ISOLATED_MODULES[mod]
-
-os = isolate_module(os)
-
-
-def dummy_decorator_with_args(*args_unused, **kwargs_unused):
- """Dummy no-op implementation of a decorator with arguments."""
- def _decorator(func):
- return func
- return _decorator
-
-
-# Environment COVERAGE_NO_CONTRACTS=1 can turn off contracts while debugging
-# tests to remove noise from stack traces.
-# $set_env.py: COVERAGE_NO_CONTRACTS - Disable PyContracts to simplify stack traces.
-USE_CONTRACTS = env.TESTING and not bool(int(os.environ.get("COVERAGE_NO_CONTRACTS", 0)))
-
-# Use PyContracts for assertion testing on parameters and returns, but only if
-# we are running our own test suite.
-if USE_CONTRACTS:
- from contracts import contract # pylint: disable=unused-import
- from contracts import new_contract as raw_new_contract
-
- def new_contract(*args, **kwargs):
- """A proxy for contracts.new_contract that doesn't mind happening twice."""
- try:
- raw_new_contract(*args, **kwargs)
- except ValueError:
- # During meta-coverage, this module is imported twice, and
- # PyContracts doesn't like redefining contracts. It's OK.
- pass
-
- # Define contract words that PyContract doesn't have.
- new_contract('bytes', lambda v: isinstance(v, bytes))
- if env.PY3:
- new_contract('unicode', lambda v: isinstance(v, unicode_class))
-
- def one_of(argnames):
- """Ensure that only one of the argnames is non-None."""
- def _decorator(func):
- argnameset = {name.strip() for name in argnames.split(",")}
- def _wrapper(*args, **kwargs):
- vals = [kwargs.get(name) for name in argnameset]
- assert sum(val is not None for val in vals) == 1
- return func(*args, **kwargs)
- return _wrapper
- return _decorator
-else: # pragma: not testing
- # We aren't using real PyContracts, so just define our decorators as
- # stunt-double no-ops.
- contract = dummy_decorator_with_args
- one_of = dummy_decorator_with_args
-
- def new_contract(*args_unused, **kwargs_unused):
- """Dummy no-op implementation of `new_contract`."""
- pass
-
-
-def nice_pair(pair):
- """Make a nice string representation of a pair of numbers.
-
- If the numbers are equal, just return the number, otherwise return the pair
- with a dash between them, indicating the range.
-
- """
- start, end = pair
- if start == end:
- return "%d" % start
- else:
- return "%d-%d" % (start, end)
-
-
-def expensive(fn):
- """A decorator to indicate that a method shouldn't be called more than once.
-
- Normally, this does nothing. During testing, this raises an exception if
- called more than once.
-
- """
- if env.TESTING:
- attr = "_once_" + fn.__name__
-
- def _wrapper(self):
- if hasattr(self, attr):
- raise AssertionError("Shouldn't have called %s more than once" % fn.__name__)
- setattr(self, attr, True)
- return fn(self)
- return _wrapper
- else:
- return fn # pragma: not testing
-
-
-def bool_or_none(b):
- """Return bool(b), but preserve None."""
- if b is None:
- return None
- else:
- return bool(b)
-
-
-def join_regex(regexes):
- """Combine a list of regexes into one that matches any of them."""
- return "|".join("(?:%s)" % r for r in regexes)
-
-
-def file_be_gone(path):
- """Remove a file, and don't get annoyed if it doesn't exist."""
- try:
- os.remove(path)
- except OSError as e:
- if e.errno != errno.ENOENT:
- raise
-
-
-def ensure_dir(directory):
- """Make sure the directory exists.
-
- If `directory` is None or empty, do nothing.
- """
- if directory and not os.path.isdir(directory):
- os.makedirs(directory)
-
-
-def ensure_dir_for_file(path):
- """Make sure the directory for the path exists."""
- ensure_dir(os.path.dirname(path))
-
-
-def output_encoding(outfile=None):
- """Determine the encoding to use for output written to `outfile` or stdout."""
- if outfile is None:
- outfile = sys.stdout
- encoding = (
- getattr(outfile, "encoding", None) or
- getattr(sys.__stdout__, "encoding", None) or
- locale.getpreferredencoding()
- )
- return encoding
-
-
-def filename_suffix(suffix):
- """Compute a filename suffix for a data file.
-
- If `suffix` is a string or None, simply return it. If `suffix` is True,
- then build a suffix incorporating the hostname, process id, and a random
- number.
-
- Returns a string or None.
-
- """
- if suffix is True:
- # If data_suffix was a simple true value, then make a suffix with
- # plenty of distinguishing information. We do this here in
- # `save()` at the last minute so that the pid will be correct even
- # if the process forks.
- dice = random.Random(os.urandom(8)).randint(0, 999999)
- suffix = "%s.%s.%06d" % (socket.gethostname(), os.getpid(), dice)
- return suffix
-
-
-class Hasher(object):
- """Hashes Python data into md5."""
- def __init__(self):
- self.md5 = hashlib.md5()
-
- def update(self, v):
- """Add `v` to the hash, recursively if needed."""
- self.md5.update(to_bytes(str(type(v))))
- if isinstance(v, unicode_class):
- self.md5.update(v.encode('utf8'))
- elif isinstance(v, bytes):
- self.md5.update(v)
- elif v is None:
- pass
- elif isinstance(v, (int, float)):
- self.md5.update(to_bytes(str(v)))
- elif isinstance(v, (tuple, list)):
- for e in v:
- self.update(e)
- elif isinstance(v, dict):
- keys = v.keys()
- for k in sorted(keys):
- self.update(k)
- self.update(v[k])
- else:
- for k in dir(v):
- if k.startswith('__'):
- continue
- a = getattr(v, k)
- if inspect.isroutine(a):
- continue
- self.update(k)
- self.update(a)
- self.md5.update(b'.')
-
- def hexdigest(self):
- """Retrieve the hex digest of the hash."""
- return self.md5.hexdigest()
-
-
-def _needs_to_implement(that, func_name):
- """Helper to raise NotImplementedError in interface stubs."""
- if hasattr(that, "_coverage_plugin_name"):
- thing = "Plugin"
- name = that._coverage_plugin_name
- else:
- thing = "Class"
- klass = that.__class__
- name = "{klass.__module__}.{klass.__name__}".format(klass=klass)
-
- raise NotImplementedError(
- "{thing} {name!r} needs to implement {func_name}()".format(
- thing=thing, name=name, func_name=func_name
- )
- )
-
-
-class DefaultValue(object):
- """A sentinel object to use for unusual default-value needs.
-
- Construct with a string that will be used as the repr, for display in help
- and Sphinx output.
-
- """
- def __init__(self, display_as):
- self.display_as = display_as
-
- def __repr__(self):
- return self.display_as
-
-
-def substitute_variables(text, variables):
- """Substitute ``${VAR}`` variables in `text` with their values.
-
- Variables in the text can take a number of shell-inspired forms::
-
- $VAR
- ${VAR}
- ${VAR?} strict: an error if VAR isn't defined.
- ${VAR-missing} defaulted: "missing" if VAR isn't defined.
- $$ just a dollar sign.
-
- `variables` is a dictionary of variable values.
-
- Returns the resulting text with values substituted.
-
- """
- dollar_pattern = r"""(?x) # Use extended regex syntax
- \$ # A dollar sign,
- (?: # then
- (?P<dollar>\$) | # a dollar sign, or
- (?P<word1>\w+) | # a plain word, or
- { # a {-wrapped
- (?P<word2>\w+) # word,
- (?:
- (?P<strict>\?) | # with a strict marker
- -(?P<defval>[^}]*) # or a default value
- )? # maybe.
- }
- )
- """
-
- def dollar_replace(match):
- """Called for each $replacement."""
- # Only one of the groups will have matched, just get its text.
- word = next(g for g in match.group('dollar', 'word1', 'word2') if g)
- if word == "$":
- return "$"
- elif word in variables:
- return variables[word]
- elif match.group('strict'):
- msg = "Variable {} is undefined: {!r}".format(word, text)
- raise CoverageException(msg)
- else:
- return match.group('defval')
-
- text = re.sub(dollar_pattern, dollar_replace, text)
- return text
-
-
-class BaseCoverageException(Exception):
- """The base of all Coverage exceptions."""
- pass
-
-
-class CoverageException(BaseCoverageException):
- """An exception raised by a coverage.py function."""
- pass
-
-
-class NoSource(CoverageException):
- """We couldn't find the source for a module."""
- pass
-
-
-class NoCode(NoSource):
- """We couldn't find any code at all."""
- pass
-
-
-class NotPython(CoverageException):
- """A source file turned out not to be parsable Python."""
- pass
-
-
-class ExceptionDuringRun(CoverageException):
- """An exception happened while running customer code.
-
- Construct it with three arguments, the values from `sys.exc_info`.
-
- """
- pass
-
-
-class StopEverything(BaseCoverageException):
- """An exception that means everything should stop.
-
- The CoverageTest class converts these to SkipTest, so that when running
- tests, raising this exception will automatically skip the test.
-
- """
- pass
diff --git a/contrib/python/coverage/py3/coverage/multiproc.py b/contrib/python/coverage/py3/coverage/multiproc.py
deleted file mode 100644
index 21ed2e2c95..0000000000
--- a/contrib/python/coverage/py3/coverage/multiproc.py
+++ /dev/null
@@ -1,117 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Monkey-patching to add multiprocessing support for coverage.py"""
-
-import multiprocessing
-import multiprocessing.process
-import os
-import os.path
-import sys
-import traceback
-
-from coverage import env
-from coverage.misc import contract
-
-# An attribute that will be set on the module to indicate that it has been
-# monkey-patched.
-PATCHED_MARKER = "_coverage$patched"
-
-COVERAGE_CONFIGURATION_ENV = "_COVERAGE_CONFIGURATION_ENV"
-
-
-if env.PYVERSION >= (3, 4):
- OriginalProcess = multiprocessing.process.BaseProcess
-else:
- OriginalProcess = multiprocessing.Process
-
-original_bootstrap = OriginalProcess._bootstrap
-
-class ProcessWithCoverage(OriginalProcess): # pylint: disable=abstract-method
- """A replacement for multiprocess.Process that starts coverage."""
-
- def _bootstrap(self, *args, **kwargs):
- """Wrapper around _bootstrap to start coverage."""
- try:
- from coverage import Coverage # avoid circular import
- import json
- kwconf = json.loads(os.environ[COVERAGE_CONFIGURATION_ENV])
- cov = Coverage(**kwconf)
- cov._warn_preimported_source = False
- cov.start()
- debug = cov._debug
- if debug.should("multiproc"):
- debug.write("Calling multiprocessing bootstrap")
- except Exception:
- print("Exception during multiprocessing bootstrap init:")
- traceback.print_exc(file=sys.stdout)
- sys.stdout.flush()
- raise
- try:
- return original_bootstrap(self, *args, **kwargs)
- finally:
- if debug.should("multiproc"):
- debug.write("Finished multiprocessing bootstrap")
- cov.stop()
- cov.save()
- if debug.should("multiproc"):
- debug.write("Saved multiprocessing data")
-
-class Stowaway(object):
- """An object to pickle, so when it is unpickled, it can apply the monkey-patch."""
- def __init__(self, rcfile):
- self.rcfile = rcfile
-
- def __getstate__(self):
- return {'rcfile': self.rcfile}
-
- def __setstate__(self, state):
- patch_multiprocessing(state['rcfile'])
-
-
-@contract(rcfile=str)
-def patch_multiprocessing(rcfile, coverage_args):
- """Monkey-patch the multiprocessing module.
-
- This enables coverage measurement of processes started by multiprocessing.
- This involves aggressive monkey-patching.
-
- `rcfile` is the path to the rcfile being used.
-
- """
-
- if hasattr(multiprocessing, PATCHED_MARKER):
- return
-
- if env.PYVERSION >= (3, 4):
- OriginalProcess._bootstrap = ProcessWithCoverage._bootstrap
- else:
- multiprocessing.Process = ProcessWithCoverage
-
- # Set the value in ProcessWithCoverage that will be pickled into the child
- # process.
- os.environ["COVERAGE_RCFILE"] = os.path.abspath(rcfile)
-
- os.environ[COVERAGE_CONFIGURATION_ENV] = coverage_args
-
- # When spawning processes rather than forking them, we have no state in the
- # new process. We sneak in there with a Stowaway: we stuff one of our own
- # objects into the data that gets pickled and sent to the sub-process. When
- # the Stowaway is unpickled, it's __setstate__ method is called, which
- # re-applies the monkey-patch.
- # Windows only spawns, so this is needed to keep Windows working.
- try:
- from multiprocessing import spawn
- original_get_preparation_data = spawn.get_preparation_data
- except (ImportError, AttributeError):
- pass
- else:
- def get_preparation_data_with_stowaway(name):
- """Get the original preparation data, and also insert our stowaway."""
- d = original_get_preparation_data(name)
- d['stowaway'] = Stowaway(rcfile)
- return d
-
- spawn.get_preparation_data = get_preparation_data_with_stowaway
-
- setattr(multiprocessing, PATCHED_MARKER, True)
diff --git a/contrib/python/coverage/py3/coverage/numbits.py b/contrib/python/coverage/py3/coverage/numbits.py
deleted file mode 100644
index 6ca96fbcf7..0000000000
--- a/contrib/python/coverage/py3/coverage/numbits.py
+++ /dev/null
@@ -1,163 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""
-Functions to manipulate packed binary representations of number sets.
-
-To save space, coverage stores sets of line numbers in SQLite using a packed
-binary representation called a numbits. A numbits is a set of positive
-integers.
-
-A numbits is stored as a blob in the database. The exact meaning of the bytes
-in the blobs should be considered an implementation detail that might change in
-the future. Use these functions to work with those binary blobs of data.
-
-"""
-import json
-
-from coverage import env
-from coverage.backward import byte_to_int, bytes_to_ints, binary_bytes, zip_longest
-from coverage.misc import contract, new_contract
-
-if env.PY3:
- def _to_blob(b):
- """Convert a bytestring into a type SQLite will accept for a blob."""
- return b
-
- new_contract('blob', lambda v: isinstance(v, bytes))
-else:
- def _to_blob(b):
- """Convert a bytestring into a type SQLite will accept for a blob."""
- return buffer(b) # pylint: disable=undefined-variable
-
- new_contract('blob', lambda v: isinstance(v, buffer)) # pylint: disable=undefined-variable
-
-
-@contract(nums='Iterable', returns='blob')
-def nums_to_numbits(nums):
- """Convert `nums` into a numbits.
-
- Arguments:
- nums: a reusable iterable of integers, the line numbers to store.
-
- Returns:
- A binary blob.
- """
- try:
- nbytes = max(nums) // 8 + 1
- except ValueError:
- # nums was empty.
- return _to_blob(b'')
- b = bytearray(nbytes)
- for num in nums:
- b[num//8] |= 1 << num % 8
- return _to_blob(bytes(b))
-
-
-@contract(numbits='blob', returns='list[int]')
-def numbits_to_nums(numbits):
- """Convert a numbits into a list of numbers.
-
- Arguments:
- numbits: a binary blob, the packed number set.
-
- Returns:
- A list of ints.
-
- When registered as a SQLite function by :func:`register_sqlite_functions`,
- this returns a string, a JSON-encoded list of ints.
-
- """
- nums = []
- for byte_i, byte in enumerate(bytes_to_ints(numbits)):
- for bit_i in range(8):
- if (byte & (1 << bit_i)):
- nums.append(byte_i * 8 + bit_i)
- return nums
-
-
-@contract(numbits1='blob', numbits2='blob', returns='blob')
-def numbits_union(numbits1, numbits2):
- """Compute the union of two numbits.
-
- Returns:
- A new numbits, the union of `numbits1` and `numbits2`.
- """
- byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
- return _to_blob(binary_bytes(b1 | b2 for b1, b2 in byte_pairs))
-
-
-@contract(numbits1='blob', numbits2='blob', returns='blob')
-def numbits_intersection(numbits1, numbits2):
- """Compute the intersection of two numbits.
-
- Returns:
- A new numbits, the intersection `numbits1` and `numbits2`.
- """
- byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
- intersection_bytes = binary_bytes(b1 & b2 for b1, b2 in byte_pairs)
- return _to_blob(intersection_bytes.rstrip(b'\0'))
-
-
-@contract(numbits1='blob', numbits2='blob', returns='bool')
-def numbits_any_intersection(numbits1, numbits2):
- """Is there any number that appears in both numbits?
-
- Determine whether two number sets have a non-empty intersection. This is
- faster than computing the intersection.
-
- Returns:
- A bool, True if there is any number in both `numbits1` and `numbits2`.
- """
- byte_pairs = zip_longest(bytes_to_ints(numbits1), bytes_to_ints(numbits2), fillvalue=0)
- return any(b1 & b2 for b1, b2 in byte_pairs)
-
-
-@contract(num='int', numbits='blob', returns='bool')
-def num_in_numbits(num, numbits):
- """Does the integer `num` appear in `numbits`?
-
- Returns:
- A bool, True if `num` is a member of `numbits`.
- """
- nbyte, nbit = divmod(num, 8)
- if nbyte >= len(numbits):
- return False
- return bool(byte_to_int(numbits[nbyte]) & (1 << nbit))
-
-
-def register_sqlite_functions(connection):
- """
- Define numbits functions in a SQLite connection.
-
- This defines these functions for use in SQLite statements:
-
- * :func:`numbits_union`
- * :func:`numbits_intersection`
- * :func:`numbits_any_intersection`
- * :func:`num_in_numbits`
- * :func:`numbits_to_nums`
-
- `connection` is a :class:`sqlite3.Connection <python:sqlite3.Connection>`
- object. After creating the connection, pass it to this function to
- register the numbits functions. Then you can use numbits functions in your
- queries::
-
- import sqlite3
- from coverage.numbits import register_sqlite_functions
-
- conn = sqlite3.connect('example.db')
- register_sqlite_functions(conn)
- c = conn.cursor()
- # Kind of a nonsense query: find all the files and contexts that
- # executed line 47 in any file:
- c.execute(
- "select file_id, context_id from line_bits where num_in_numbits(?, numbits)",
- (47,)
- )
- """
- connection.create_function("numbits_union", 2, numbits_union)
- connection.create_function("numbits_intersection", 2, numbits_intersection)
- connection.create_function("numbits_any_intersection", 2, numbits_any_intersection)
- connection.create_function("num_in_numbits", 2, num_in_numbits)
- connection.create_function("numbits_to_nums", 1, lambda b: json.dumps(numbits_to_nums(b)))
diff --git a/contrib/python/coverage/py3/coverage/parser.py b/contrib/python/coverage/py3/coverage/parser.py
deleted file mode 100644
index 258f956039..0000000000
--- a/contrib/python/coverage/py3/coverage/parser.py
+++ /dev/null
@@ -1,1276 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Code parsing for coverage.py."""
-
-import ast
-import collections
-import os
-import re
-import token
-import tokenize
-
-from coverage import env
-from coverage.backward import range # pylint: disable=redefined-builtin
-from coverage.backward import bytes_to_ints, string_class
-from coverage.bytecode import code_objects
-from coverage.debug import short_stack
-from coverage.misc import contract, join_regex, new_contract, nice_pair, one_of
-from coverage.misc import NoSource, NotPython, StopEverything
-from coverage.phystokens import compile_unicode, generate_tokens, neuter_encoding_declaration
-
-
-class PythonParser(object):
- """Parse code to find executable lines, excluded lines, etc.
-
- This information is all based on static analysis: no code execution is
- involved.
-
- """
- @contract(text='unicode|None')
- def __init__(self, text=None, filename=None, exclude=None):
- """
- Source can be provided as `text`, the text itself, or `filename`, from
- which the text will be read. Excluded lines are those that match
- `exclude`, a regex.
-
- """
- assert text or filename, "PythonParser needs either text or filename"
- self.filename = filename or "<code>"
- self.text = text
- if not self.text:
- from coverage.python import get_python_source
- try:
- self.text = get_python_source(self.filename)
- except IOError as err:
- raise NoSource(
- "No source for code: '%s': %s" % (self.filename, err)
- )
-
- self.exclude = exclude
-
- # The text lines of the parsed code.
- self.lines = self.text.split('\n')
-
- # The normalized line numbers of the statements in the code. Exclusions
- # are taken into account, and statements are adjusted to their first
- # lines.
- self.statements = set()
-
- # The normalized line numbers of the excluded lines in the code,
- # adjusted to their first lines.
- self.excluded = set()
-
- # The raw_* attributes are only used in this class, and in
- # lab/parser.py to show how this class is working.
-
- # The line numbers that start statements, as reported by the line
- # number table in the bytecode.
- self.raw_statements = set()
-
- # The raw line numbers of excluded lines of code, as marked by pragmas.
- self.raw_excluded = set()
-
- # The line numbers of class definitions.
- self.raw_classdefs = set()
-
- # Function definitions (start, end, name)
- self._raw_funcdefs = set()
-
- # The line numbers of docstring lines.
- self.raw_docstrings = set()
-
- # Internal detail, used by lab/parser.py.
- self.show_tokens = False
-
- # A dict mapping line numbers to lexical statement starts for
- # multi-line statements.
- self._multiline = {}
-
- # Lazily-created ByteParser, arc data, and missing arc descriptions.
- self._byte_parser = None
- self._all_arcs = None
- self._missing_arc_fragments = None
-
- @property
- def byte_parser(self):
- """Create a ByteParser on demand."""
- if not self._byte_parser:
- self._byte_parser = ByteParser(self.text, filename=self.filename)
- return self._byte_parser
-
- @property
- def raw_funcdefs(self):
- return self._raw_funcdefs
-
- def lines_matching(self, *regexes):
- """Find the lines matching one of a list of regexes.
-
- Returns a set of line numbers, the lines that contain a match for one
- of the regexes in `regexes`. The entire line needn't match, just a
- part of it.
-
- """
- combined = join_regex(regexes)
- if env.PY2:
- combined = combined.decode("utf8")
- regex_c = re.compile(combined)
- matches = set()
- for i, ltext in enumerate(self.lines, start=1):
- if regex_c.search(ltext):
- matches.add(i)
- return matches
-
- def _raw_parse(self):
- """Parse the source to find the interesting facts about its lines.
-
- A handful of attributes are updated.
-
- """
- # Find lines which match an exclusion pattern.
- if self.exclude:
- self.raw_excluded = self.lines_matching(self.exclude)
-
- # Tokenize, to find excluded suites, to find docstrings, and to find
- # multi-line statements.
- indent = 0
- exclude_indent = 0
- excluding = False
- excluding_decorators = False
- prev_toktype = token.INDENT
- first_line = None
- empty = True
- first_on_line = True
-
- tokgen = generate_tokens(self.text)
- for toktype, ttext, (slineno, _), (elineno, _), ltext in tokgen:
- if self.show_tokens: # pragma: debugging
- print("%10s %5s %-20r %r" % (
- tokenize.tok_name.get(toktype, toktype),
- nice_pair((slineno, elineno)), ttext, ltext
- ))
- if toktype == token.INDENT:
- indent += 1
- elif toktype == token.DEDENT:
- indent -= 1
- elif toktype == token.NAME:
- if ttext == 'class':
- # Class definitions look like branches in the bytecode, so
- # we need to exclude them. The simplest way is to note the
- # lines with the 'class' keyword.
- self.raw_classdefs.add(slineno)
- elif toktype == token.OP:
- if ttext == ':':
- should_exclude = (elineno in self.raw_excluded) or excluding_decorators
- if not excluding and should_exclude:
- # Start excluding a suite. We trigger off of the colon
- # token so that the #pragma comment will be recognized on
- # the same line as the colon.
- self.raw_excluded.add(elineno)
- exclude_indent = indent
- excluding = True
- excluding_decorators = False
- elif ttext == '@' and first_on_line:
- # A decorator.
- if elineno in self.raw_excluded:
- excluding_decorators = True
- if excluding_decorators:
- self.raw_excluded.add(elineno)
- elif toktype == token.STRING and prev_toktype == token.INDENT:
- # Strings that are first on an indented line are docstrings.
- # (a trick from trace.py in the stdlib.) This works for
- # 99.9999% of cases. For the rest (!) see:
- # http://stackoverflow.com/questions/1769332/x/1769794#1769794
- self.raw_docstrings.update(range(slineno, elineno+1))
- elif toktype == token.NEWLINE:
- if first_line is not None and elineno != first_line:
- # We're at the end of a line, and we've ended on a
- # different line than the first line of the statement,
- # so record a multi-line range.
- for l in range(first_line, elineno+1):
- self._multiline[l] = first_line
- first_line = None
- first_on_line = True
-
- if ttext.strip() and toktype != tokenize.COMMENT:
- # A non-whitespace token.
- empty = False
- if first_line is None:
- # The token is not whitespace, and is the first in a
- # statement.
- first_line = slineno
- # Check whether to end an excluded suite.
- if excluding and indent <= exclude_indent:
- excluding = False
- if excluding:
- self.raw_excluded.add(elineno)
- first_on_line = False
-
- prev_toktype = toktype
-
- # Find the starts of the executable statements.
- if not empty:
- self.raw_statements.update(self.byte_parser._find_statements())
-
- # The first line of modules can lie and say 1 always, even if the first
- # line of code is later. If so, map 1 to the actual first line of the
- # module.
- if env.PYBEHAVIOR.module_firstline_1 and self._multiline:
- self._multiline[1] = min(self.raw_statements)
-
- def first_line(self, line):
- """Return the first line number of the statement including `line`."""
- if line < 0:
- line = -self._multiline.get(-line, -line)
- else:
- line = self._multiline.get(line, line)
- return line
-
- def first_lines(self, lines):
- """Map the line numbers in `lines` to the correct first line of the
- statement.
-
- Returns a set of the first lines.
-
- """
- return {self.first_line(l) for l in lines}
-
- def translate_lines(self, lines):
- """Implement `FileReporter.translate_lines`."""
- return self.first_lines(lines)
-
- def translate_arcs(self, arcs):
- """Implement `FileReporter.translate_arcs`."""
- return [(self.first_line(a), self.first_line(b)) for (a, b) in arcs]
-
- def parse_source(self):
- """Parse source text to find executable lines, excluded lines, etc.
-
- Sets the .excluded and .statements attributes, normalized to the first
- line of multi-line statements.
-
- """
- try:
- self._raw_parse()
- except (tokenize.TokenError, IndentationError) as err:
- if hasattr(err, "lineno"):
- lineno = err.lineno # IndentationError
- else:
- lineno = err.args[1][0] # TokenError
- raise NotPython(
- u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
- self.filename, err.args[0], lineno
- )
- )
-
- self.excluded = self.first_lines(self.raw_excluded)
-
- ignore = self.excluded | self.raw_docstrings
- starts = self.raw_statements - ignore
- self.statements = self.first_lines(starts) - ignore
-
- def arcs(self):
- """Get information about the arcs available in the code.
-
- Returns a set of line number pairs. Line numbers have been normalized
- to the first line of multi-line statements.
-
- """
- if self._all_arcs is None:
- self._analyze_ast()
- return self._all_arcs
-
- def _analyze_ast(self):
- """Run the AstArcAnalyzer and save its results.
-
- `_all_arcs` is the set of arcs in the code.
-
- """
- aaa = AstArcAnalyzer(self.text, self.raw_statements, self._multiline)
- aaa.analyze()
-
- self._all_arcs = set()
- for l1, l2 in aaa.arcs:
- fl1 = self.first_line(l1)
- fl2 = self.first_line(l2)
- if fl1 != fl2:
- self._all_arcs.add((fl1, fl2))
-
- self._missing_arc_fragments = aaa.missing_arc_fragments
- self._raw_funcdefs = aaa.funcdefs
-
- def exit_counts(self):
- """Get a count of exits from that each line.
-
- Excluded lines are excluded.
-
- """
- exit_counts = collections.defaultdict(int)
- for l1, l2 in self.arcs():
- if l1 < 0:
- # Don't ever report -1 as a line number
- continue
- if l1 in self.excluded:
- # Don't report excluded lines as line numbers.
- continue
- if l2 in self.excluded:
- # Arcs to excluded lines shouldn't count.
- continue
- exit_counts[l1] += 1
-
- # Class definitions have one extra exit, so remove one for each:
- for l in self.raw_classdefs:
- # Ensure key is there: class definitions can include excluded lines.
- if l in exit_counts:
- exit_counts[l] -= 1
-
- return exit_counts
-
- def missing_arc_description(self, start, end, executed_arcs=None):
- """Provide an English sentence describing a missing arc."""
- if self._missing_arc_fragments is None:
- self._analyze_ast()
-
- actual_start = start
-
- if (
- executed_arcs and
- end < 0 and end == -start and
- (end, start) not in executed_arcs and
- (end, start) in self._missing_arc_fragments
- ):
- # It's a one-line callable, and we never even started it,
- # and we have a message about not starting it.
- start, end = end, start
-
- fragment_pairs = self._missing_arc_fragments.get((start, end), [(None, None)])
-
- msgs = []
- for smsg, emsg in fragment_pairs:
- if emsg is None:
- if end < 0:
- # Hmm, maybe we have a one-line callable, let's check.
- if (-end, end) in self._missing_arc_fragments:
- return self.missing_arc_description(-end, end)
- emsg = "didn't jump to the function exit"
- else:
- emsg = "didn't jump to line {lineno}"
- emsg = emsg.format(lineno=end)
-
- msg = "line {start} {emsg}".format(start=actual_start, emsg=emsg)
- if smsg is not None:
- msg += ", because {smsg}".format(smsg=smsg.format(lineno=actual_start))
-
- msgs.append(msg)
-
- return " or ".join(msgs)
-
-
-class ByteParser(object):
- """Parse bytecode to understand the structure of code."""
-
- @contract(text='unicode')
- def __init__(self, text, code=None, filename=None):
- self.text = text
- if code:
- self.code = code
- else:
- try:
- self.code = compile_unicode(text, filename, "exec")
- except SyntaxError as synerr:
- raise NotPython(
- u"Couldn't parse '%s' as Python source: '%s' at line %d" % (
- filename, synerr.msg, synerr.lineno
- )
- )
-
- # Alternative Python implementations don't always provide all the
- # attributes on code objects that we need to do the analysis.
- for attr in ['co_lnotab', 'co_firstlineno']:
- if not hasattr(self.code, attr):
- raise StopEverything( # pragma: only jython
- "This implementation of Python doesn't support code analysis.\n"
- "Run coverage.py under another Python for this command."
- )
-
- def child_parsers(self):
- """Iterate over all the code objects nested within this one.
-
- The iteration includes `self` as its first value.
-
- """
- return (ByteParser(self.text, code=c) for c in code_objects(self.code))
-
- def _line_numbers(self):
- """Yield the line numbers possible in this code object.
-
- Uses co_lnotab described in Python/compile.c to find the
- line numbers. Produces a sequence: l0, l1, ...
- """
- if hasattr(self.code, "co_lines"):
- for _, _, line in self.code.co_lines():
- if line is not None:
- yield line
- else:
- # Adapted from dis.py in the standard library.
- byte_increments = bytes_to_ints(self.code.co_lnotab[0::2])
- line_increments = bytes_to_ints(self.code.co_lnotab[1::2])
-
- last_line_num = None
- line_num = self.code.co_firstlineno
- byte_num = 0
- for byte_incr, line_incr in zip(byte_increments, line_increments):
- if byte_incr:
- if line_num != last_line_num:
- yield line_num
- last_line_num = line_num
- byte_num += byte_incr
- if env.PYBEHAVIOR.negative_lnotab and line_incr >= 0x80:
- line_incr -= 0x100
- line_num += line_incr
- if line_num != last_line_num:
- yield line_num
-
- def _find_statements(self):
- """Find the statements in `self.code`.
-
- Produce a sequence of line numbers that start statements. Recurses
- into all code objects reachable from `self.code`.
-
- """
- for bp in self.child_parsers():
- # Get all of the lineno information from this code.
- for l in bp._line_numbers():
- yield l
-
-
-#
-# AST analysis
-#
-
-class LoopBlock(object):
- """A block on the block stack representing a `for` or `while` loop."""
- @contract(start=int)
- def __init__(self, start):
- # The line number where the loop starts.
- self.start = start
- # A set of ArcStarts, the arcs from break statements exiting this loop.
- self.break_exits = set()
-
-
-class FunctionBlock(object):
- """A block on the block stack representing a function definition."""
- @contract(start=int, name=str)
- def __init__(self, start, name):
- # The line number where the function starts.
- self.start = start
- # The name of the function.
- self.name = name
-
-
-class TryBlock(object):
- """A block on the block stack representing a `try` block."""
- @contract(handler_start='int|None', final_start='int|None')
- def __init__(self, handler_start, final_start):
- # The line number of the first "except" handler, if any.
- self.handler_start = handler_start
- # The line number of the "finally:" clause, if any.
- self.final_start = final_start
-
- # The ArcStarts for breaks/continues/returns/raises inside the "try:"
- # that need to route through the "finally:" clause.
- self.break_from = set()
- self.continue_from = set()
- self.return_from = set()
- self.raise_from = set()
-
-
-class ArcStart(collections.namedtuple("Arc", "lineno, cause")):
- """The information needed to start an arc.
-
- `lineno` is the line number the arc starts from.
-
- `cause` is an English text fragment used as the `startmsg` for
- AstArcAnalyzer.missing_arc_fragments. It will be used to describe why an
- arc wasn't executed, so should fit well into a sentence of the form,
- "Line 17 didn't run because {cause}." The fragment can include "{lineno}"
- to have `lineno` interpolated into it.
-
- """
- def __new__(cls, lineno, cause=None):
- return super(ArcStart, cls).__new__(cls, lineno, cause)
-
-
-# Define contract words that PyContract doesn't have.
-# ArcStarts is for a list or set of ArcStart's.
-new_contract('ArcStarts', lambda seq: all(isinstance(x, ArcStart) for x in seq))
-
-
-# Turn on AST dumps with an environment variable.
-# $set_env.py: COVERAGE_AST_DUMP - Dump the AST nodes when parsing code.
-AST_DUMP = bool(int(os.environ.get("COVERAGE_AST_DUMP", 0)))
-
-class NodeList(object):
- """A synthetic fictitious node, containing a sequence of nodes.
-
- This is used when collapsing optimized if-statements, to represent the
- unconditional execution of one of the clauses.
-
- """
- def __init__(self, body):
- self.body = body
- self.lineno = body[0].lineno
-
-
-# TODO: some add_arcs methods here don't add arcs, they return them. Rename them.
-# TODO: the cause messages have too many commas.
-# TODO: Shouldn't the cause messages join with "and" instead of "or"?
-
-class AstArcAnalyzer(object):
- """Analyze source text with an AST to find executable code paths."""
-
- @contract(text='unicode', statements=set)
- def __init__(self, text, statements, multiline):
- self.root_node = ast.parse(neuter_encoding_declaration(text))
- # TODO: I think this is happening in too many places.
- self.statements = {multiline.get(l, l) for l in statements}
- self.multiline = multiline
-
- if AST_DUMP: # pragma: debugging
- # Dump the AST so that failing tests have helpful output.
- print("Statements: {}".format(self.statements))
- print("Multiline map: {}".format(self.multiline))
- ast_dump(self.root_node)
-
- self.arcs = set()
-
- # A map from arc pairs to a list of pairs of sentence fragments:
- # { (start, end): [(startmsg, endmsg), ...], }
- #
- # For an arc from line 17, they should be usable like:
- # "Line 17 {endmsg}, because {startmsg}"
- self.missing_arc_fragments = collections.defaultdict(list)
- self.block_stack = []
- self.funcdefs = set()
-
- # $set_env.py: COVERAGE_TRACK_ARCS - Trace every arc added while parsing code.
- self.debug = bool(int(os.environ.get("COVERAGE_TRACK_ARCS", 0)))
-
- def analyze(self):
- """Examine the AST tree from `root_node` to determine possible arcs.
-
- This sets the `arcs` attribute to be a set of (from, to) line number
- pairs.
-
- """
- for node in ast.walk(self.root_node):
- node_name = node.__class__.__name__
- code_object_handler = getattr(self, "_code_object__" + node_name, None)
- if code_object_handler is not None:
- code_object_handler(node)
-
- @contract(start=int, end=int)
- def add_arc(self, start, end, smsg=None, emsg=None):
- """Add an arc, including message fragments to use if it is missing."""
- if self.debug: # pragma: debugging
- print("\nAdding arc: ({}, {}): {!r}, {!r}".format(start, end, smsg, emsg))
- print(short_stack(limit=6))
- self.arcs.add((start, end))
-
- if smsg is not None or emsg is not None:
- self.missing_arc_fragments[(start, end)].append((smsg, emsg))
-
- def nearest_blocks(self):
- """Yield the blocks in nearest-to-farthest order."""
- return reversed(self.block_stack)
-
- @contract(returns=int)
- def line_for_node(self, node):
- """What is the right line number to use for this node?
-
- This dispatches to _line__Node functions where needed.
-
- """
- node_name = node.__class__.__name__
- handler = getattr(self, "_line__" + node_name, None)
- if handler is not None:
- return handler(node)
- else:
- return node.lineno
-
- def _line_decorated(self, node):
- """Compute first line number for things that can be decorated (classes and functions)."""
- lineno = node.lineno
- if env.PYBEHAVIOR.trace_decorated_def:
- if node.decorator_list:
- lineno = node.decorator_list[0].lineno
- return lineno
-
- def _line__Assign(self, node):
- return self.line_for_node(node.value)
-
- _line__ClassDef = _line_decorated
-
- def _line__Dict(self, node):
- # Python 3.5 changed how dict literals are made.
- if env.PYVERSION >= (3, 5) and node.keys:
- if node.keys[0] is not None:
- return node.keys[0].lineno
- else:
- # Unpacked dict literals `{**{'a':1}}` have None as the key,
- # use the value in that case.
- return node.values[0].lineno
- else:
- return node.lineno
-
- _line__FunctionDef = _line_decorated
- _line__AsyncFunctionDef = _line_decorated
-
- def _line__List(self, node):
- if node.elts:
- return self.line_for_node(node.elts[0])
- else:
- return node.lineno
-
- def _line__Module(self, node):
- if env.PYBEHAVIOR.module_firstline_1:
- return 1
- elif node.body:
- return self.line_for_node(node.body[0])
- else:
- # Empty modules have no line number, they always start at 1.
- return 1
-
- # The node types that just flow to the next node with no complications.
- OK_TO_DEFAULT = {
- "Assign", "Assert", "AugAssign", "Delete", "Exec", "Expr", "Global",
- "Import", "ImportFrom", "Nonlocal", "Pass", "Print",
- }
-
- @contract(returns='ArcStarts')
- def add_arcs(self, node):
- """Add the arcs for `node`.
-
- Return a set of ArcStarts, exits from this node to the next. Because a
- node represents an entire sub-tree (including its children), the exits
- from a node can be arbitrarily complex::
-
- if something(1):
- if other(2):
- doit(3)
- else:
- doit(5)
-
- There are two exits from line 1: they start at line 3 and line 5.
-
- """
- node_name = node.__class__.__name__
- handler = getattr(self, "_handle__" + node_name, None)
- if handler is not None:
- return handler(node)
- else:
- # No handler: either it's something that's ok to default (a simple
- # statement), or it's something we overlooked. Change this 0 to 1
- # to see if it's overlooked.
- if 0:
- if node_name not in self.OK_TO_DEFAULT:
- print("*** Unhandled: {}".format(node))
-
- # Default for simple statements: one exit from this node.
- return {ArcStart(self.line_for_node(node))}
-
- @one_of("from_start, prev_starts")
- @contract(returns='ArcStarts')
- def add_body_arcs(self, body, from_start=None, prev_starts=None):
- """Add arcs for the body of a compound statement.
-
- `body` is the body node. `from_start` is a single `ArcStart` that can
- be the previous line in flow before this body. `prev_starts` is a set
- of ArcStarts that can be the previous line. Only one of them should be
- given.
-
- Returns a set of ArcStarts, the exits from this body.
-
- """
- if prev_starts is None:
- prev_starts = {from_start}
- for body_node in body:
- lineno = self.line_for_node(body_node)
- first_line = self.multiline.get(lineno, lineno)
- if first_line not in self.statements:
- body_node = self.find_non_missing_node(body_node)
- if body_node is None:
- continue
- lineno = self.line_for_node(body_node)
- for prev_start in prev_starts:
- self.add_arc(prev_start.lineno, lineno, prev_start.cause)
- prev_starts = self.add_arcs(body_node)
- return prev_starts
-
- def find_non_missing_node(self, node):
- """Search `node` looking for a child that has not been optimized away.
-
- This might return the node you started with, or it will work recursively
- to find a child node in self.statements.
-
- Returns a node, or None if none of the node remains.
-
- """
- # This repeats work just done in add_body_arcs, but this duplication
- # means we can avoid a function call in the 99.9999% case of not
- # optimizing away statements.
- lineno = self.line_for_node(node)
- first_line = self.multiline.get(lineno, lineno)
- if first_line in self.statements:
- return node
-
- missing_fn = getattr(self, "_missing__" + node.__class__.__name__, None)
- if missing_fn:
- node = missing_fn(node)
- else:
- node = None
- return node
-
- # Missing nodes: _missing__*
- #
- # Entire statements can be optimized away by Python. They will appear in
- # the AST, but not the bytecode. These functions are called (by
- # find_non_missing_node) to find a node to use instead of the missing
- # node. They can return None if the node should truly be gone.
-
- def _missing__If(self, node):
- # If the if-node is missing, then one of its children might still be
- # here, but not both. So return the first of the two that isn't missing.
- # Use a NodeList to hold the clauses as a single node.
- non_missing = self.find_non_missing_node(NodeList(node.body))
- if non_missing:
- return non_missing
- if node.orelse:
- return self.find_non_missing_node(NodeList(node.orelse))
- return None
-
- def _missing__NodeList(self, node):
- # A NodeList might be a mixture of missing and present nodes. Find the
- # ones that are present.
- non_missing_children = []
- for child in node.body:
- child = self.find_non_missing_node(child)
- if child is not None:
- non_missing_children.append(child)
-
- # Return the simplest representation of the present children.
- if not non_missing_children:
- return None
- if len(non_missing_children) == 1:
- return non_missing_children[0]
- return NodeList(non_missing_children)
-
- def _missing__While(self, node):
- body_nodes = self.find_non_missing_node(NodeList(node.body))
- if not body_nodes:
- return None
- # Make a synthetic While-true node.
- new_while = ast.While()
- new_while.lineno = body_nodes.lineno
- new_while.test = ast.Name()
- new_while.test.lineno = body_nodes.lineno
- new_while.test.id = "True"
- new_while.body = body_nodes.body
- new_while.orelse = None
- return new_while
-
- def is_constant_expr(self, node):
- """Is this a compile-time constant?"""
- node_name = node.__class__.__name__
- if node_name in ["Constant", "NameConstant", "Num"]:
- return "Num"
- elif node_name == "Name":
- if node.id in ["True", "False", "None", "__debug__"]:
- return "Name"
- return None
-
- # In the fullness of time, these might be good tests to write:
- # while EXPR:
- # while False:
- # listcomps hidden deep in other expressions
- # listcomps hidden in lists: x = [[i for i in range(10)]]
- # nested function definitions
-
-
- # Exit processing: process_*_exits
- #
- # These functions process the four kinds of jump exits: break, continue,
- # raise, and return. To figure out where an exit goes, we have to look at
- # the block stack context. For example, a break will jump to the nearest
- # enclosing loop block, or the nearest enclosing finally block, whichever
- # is nearer.
-
- @contract(exits='ArcStarts')
- def process_break_exits(self, exits):
- """Add arcs due to jumps from `exits` being breaks."""
- for block in self.nearest_blocks():
- if isinstance(block, LoopBlock):
- block.break_exits.update(exits)
- break
- elif isinstance(block, TryBlock) and block.final_start is not None:
- block.break_from.update(exits)
- break
-
- @contract(exits='ArcStarts')
- def process_continue_exits(self, exits):
- """Add arcs due to jumps from `exits` being continues."""
- for block in self.nearest_blocks():
- if isinstance(block, LoopBlock):
- for xit in exits:
- self.add_arc(xit.lineno, block.start, xit.cause)
- break
- elif isinstance(block, TryBlock) and block.final_start is not None:
- block.continue_from.update(exits)
- break
-
- @contract(exits='ArcStarts')
- def process_raise_exits(self, exits):
- """Add arcs due to jumps from `exits` being raises."""
- for block in self.nearest_blocks():
- if isinstance(block, TryBlock):
- if block.handler_start is not None:
- for xit in exits:
- self.add_arc(xit.lineno, block.handler_start, xit.cause)
- break
- elif block.final_start is not None:
- block.raise_from.update(exits)
- break
- elif isinstance(block, FunctionBlock):
- for xit in exits:
- self.add_arc(
- xit.lineno, -block.start, xit.cause,
- "didn't except from function {!r}".format(block.name),
- )
- break
-
- @contract(exits='ArcStarts')
- def process_return_exits(self, exits):
- """Add arcs due to jumps from `exits` being returns."""
- for block in self.nearest_blocks():
- if isinstance(block, TryBlock) and block.final_start is not None:
- block.return_from.update(exits)
- break
- elif isinstance(block, FunctionBlock):
- for xit in exits:
- self.add_arc(
- xit.lineno, -block.start, xit.cause,
- "didn't return from function {!r}".format(block.name),
- )
- break
-
-
- # Handlers: _handle__*
- #
- # Each handler deals with a specific AST node type, dispatched from
- # add_arcs. Handlers return the set of exits from that node, and can
- # also call self.add_arc to record arcs they find. These functions mirror
- # the Python semantics of each syntactic construct. See the docstring
- # for add_arcs to understand the concept of exits from a node.
-
- @contract(returns='ArcStarts')
- def _handle__Break(self, node):
- here = self.line_for_node(node)
- break_start = ArcStart(here, cause="the break on line {lineno} wasn't executed")
- self.process_break_exits([break_start])
- return set()
-
- @contract(returns='ArcStarts')
- def _handle_decorated(self, node):
- """Add arcs for things that can be decorated (classes and functions)."""
- main_line = last = node.lineno
- if node.decorator_list:
- if env.PYBEHAVIOR.trace_decorated_def:
- last = None
- for dec_node in node.decorator_list:
- dec_start = self.line_for_node(dec_node)
- if last is not None and dec_start != last:
- self.add_arc(last, dec_start)
- last = dec_start
- if env.PYBEHAVIOR.trace_decorated_def:
- self.add_arc(last, main_line)
- last = main_line
- # The definition line may have been missed, but we should have it
- # in `self.statements`. For some constructs, `line_for_node` is
- # not what we'd think of as the first line in the statement, so map
- # it to the first one.
- if node.body:
- body_start = self.line_for_node(node.body[0])
- body_start = self.multiline.get(body_start, body_start)
- for lineno in range(last+1, body_start):
- if lineno in self.statements:
- self.add_arc(last, lineno)
- last = lineno
- # The body is handled in collect_arcs.
- return {ArcStart(last)}
-
- _handle__ClassDef = _handle_decorated
-
- @contract(returns='ArcStarts')
- def _handle__Continue(self, node):
- here = self.line_for_node(node)
- continue_start = ArcStart(here, cause="the continue on line {lineno} wasn't executed")
- self.process_continue_exits([continue_start])
- return set()
-
- @contract(returns='ArcStarts')
- def _handle__For(self, node):
- start = self.line_for_node(node.iter)
- self.block_stack.append(LoopBlock(start=start))
- from_start = ArcStart(start, cause="the loop on line {lineno} never started")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- # Any exit from the body will go back to the top of the loop.
- for xit in exits:
- self.add_arc(xit.lineno, start, xit.cause)
- my_block = self.block_stack.pop()
- exits = my_block.break_exits
- from_start = ArcStart(start, cause="the loop on line {lineno} didn't complete")
- if node.orelse:
- else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
- exits |= else_exits
- else:
- # No else clause: exit from the for line.
- exits.add(from_start)
- return exits
-
- _handle__AsyncFor = _handle__For
-
- _handle__FunctionDef = _handle_decorated
- _handle__AsyncFunctionDef = _handle_decorated
-
- @contract(returns='ArcStarts')
- def _handle__If(self, node):
- start = self.line_for_node(node.test)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
- exits |= self.add_body_arcs(node.orelse, from_start=from_start)
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__NodeList(self, node):
- start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__Raise(self, node):
- here = self.line_for_node(node)
- raise_start = ArcStart(here, cause="the raise on line {lineno} wasn't executed")
- self.process_raise_exits([raise_start])
- # `raise` statement jumps away, no exits from here.
- return set()
-
- @contract(returns='ArcStarts')
- def _handle__Return(self, node):
- here = self.line_for_node(node)
- return_start = ArcStart(here, cause="the return on line {lineno} wasn't executed")
- self.process_return_exits([return_start])
- # `return` statement jumps away, no exits from here.
- return set()
-
- @contract(returns='ArcStarts')
- def _handle__Try(self, node):
- if node.handlers:
- handler_start = self.line_for_node(node.handlers[0])
- else:
- handler_start = None
-
- if node.finalbody:
- final_start = self.line_for_node(node.finalbody[0])
- else:
- final_start = None
-
- try_block = TryBlock(handler_start, final_start)
- self.block_stack.append(try_block)
-
- start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
-
- # We're done with the `try` body, so this block no longer handles
- # exceptions. We keep the block so the `finally` clause can pick up
- # flows from the handlers and `else` clause.
- if node.finalbody:
- try_block.handler_start = None
- if node.handlers:
- # If there are `except` clauses, then raises in the try body
- # will already jump to them. Start this set over for raises in
- # `except` and `else`.
- try_block.raise_from = set()
- else:
- self.block_stack.pop()
-
- handler_exits = set()
-
- if node.handlers:
- last_handler_start = None
- for handler_node in node.handlers:
- handler_start = self.line_for_node(handler_node)
- if last_handler_start is not None:
- self.add_arc(last_handler_start, handler_start)
- last_handler_start = handler_start
- from_cause = "the exception caught by line {lineno} didn't happen"
- from_start = ArcStart(handler_start, cause=from_cause)
- handler_exits |= self.add_body_arcs(handler_node.body, from_start=from_start)
-
- if node.orelse:
- exits = self.add_body_arcs(node.orelse, prev_starts=exits)
-
- exits |= handler_exits
-
- if node.finalbody:
- self.block_stack.pop()
- final_from = ( # You can get to the `finally` clause from:
- exits | # the exits of the body or `else` clause,
- try_block.break_from | # or a `break`,
- try_block.continue_from | # or a `continue`,
- try_block.raise_from | # or a `raise`,
- try_block.return_from # or a `return`.
- )
-
- final_exits = self.add_body_arcs(node.finalbody, prev_starts=final_from)
-
- if try_block.break_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for break_line in try_block.break_from:
- lineno = break_line.lineno
- cause = break_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- breaks = try_block.break_from
- else:
- breaks = self._combine_finally_starts(try_block.break_from, final_exits)
- self.process_break_exits(breaks)
-
- if try_block.continue_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for continue_line in try_block.continue_from:
- lineno = continue_line.lineno
- cause = continue_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- continues = try_block.continue_from
- else:
- continues = self._combine_finally_starts(try_block.continue_from, final_exits)
- self.process_continue_exits(continues)
-
- if try_block.raise_from:
- self.process_raise_exits(
- self._combine_finally_starts(try_block.raise_from, final_exits)
- )
-
- if try_block.return_from:
- if env.PYBEHAVIOR.finally_jumps_back:
- for return_line in try_block.return_from:
- lineno = return_line.lineno
- cause = return_line.cause.format(lineno=lineno)
- for final_exit in final_exits:
- self.add_arc(final_exit.lineno, lineno, cause)
- returns = try_block.return_from
- else:
- returns = self._combine_finally_starts(try_block.return_from, final_exits)
- self.process_return_exits(returns)
-
- if exits:
- # The finally clause's exits are only exits for the try block
- # as a whole if the try block had some exits to begin with.
- exits = final_exits
-
- return exits
-
- @contract(starts='ArcStarts', exits='ArcStarts', returns='ArcStarts')
- def _combine_finally_starts(self, starts, exits):
- """Helper for building the cause of `finally` branches.
-
- "finally" clauses might not execute their exits, and the causes could
- be due to a failure to execute any of the exits in the try block. So
- we use the causes from `starts` as the causes for `exits`.
- """
- causes = []
- for start in sorted(starts):
- if start.cause is not None:
- causes.append(start.cause.format(lineno=start.lineno))
- cause = " or ".join(causes)
- exits = {ArcStart(xit.lineno, cause) for xit in exits}
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__TryExcept(self, node):
- # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
- # TryExcept, it means there was no finally, so fake it, and treat as
- # a general Try node.
- node.finalbody = []
- return self._handle__Try(node)
-
- @contract(returns='ArcStarts')
- def _handle__TryFinally(self, node):
- # Python 2.7 uses separate TryExcept and TryFinally nodes. If we get
- # TryFinally, see if there's a TryExcept nested inside. If so, merge
- # them. Otherwise, fake fields to complete a Try node.
- node.handlers = []
- node.orelse = []
-
- first = node.body[0]
- if first.__class__.__name__ == "TryExcept" and node.lineno == first.lineno:
- assert len(node.body) == 1
- node.body = first.body
- node.handlers = first.handlers
- node.orelse = first.orelse
-
- return self._handle__Try(node)
-
- @contract(returns='ArcStarts')
- def _handle__While(self, node):
- start = to_top = self.line_for_node(node.test)
- constant_test = self.is_constant_expr(node.test)
- top_is_body0 = False
- if constant_test and (env.PY3 or constant_test == "Num"):
- top_is_body0 = True
- if env.PYBEHAVIOR.keep_constant_test:
- top_is_body0 = False
- if top_is_body0:
- to_top = self.line_for_node(node.body[0])
- self.block_stack.append(LoopBlock(start=to_top))
- from_start = ArcStart(start, cause="the condition on line {lineno} was never true")
- exits = self.add_body_arcs(node.body, from_start=from_start)
- for xit in exits:
- self.add_arc(xit.lineno, to_top, xit.cause)
- exits = set()
- my_block = self.block_stack.pop()
- exits.update(my_block.break_exits)
- from_start = ArcStart(start, cause="the condition on line {lineno} was never false")
- if node.orelse:
- else_exits = self.add_body_arcs(node.orelse, from_start=from_start)
- exits |= else_exits
- else:
- # No `else` clause: you can exit from the start.
- if not constant_test:
- exits.add(from_start)
- return exits
-
- @contract(returns='ArcStarts')
- def _handle__With(self, node):
- start = self.line_for_node(node)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- return exits
-
- _handle__AsyncWith = _handle__With
-
- def _code_object__Module(self, node):
- start = self.line_for_node(node)
- if node.body:
- exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
- for xit in exits:
- self.add_arc(xit.lineno, -start, xit.cause, "didn't exit the module")
- else:
- # Empty module.
- self.add_arc(-start, start)
- self.add_arc(start, -start)
-
- def _process_function_def(self, start, node):
- self.funcdefs.add((start, node.body[-1].lineno, node.name))
-
- def _code_object__FunctionDef(self, node):
- start = self.line_for_node(node)
- self.block_stack.append(FunctionBlock(start=start, name=node.name))
- exits = self.add_body_arcs(node.body, from_start=ArcStart(-start))
- self.process_return_exits(exits)
- self._process_function_def(start, node)
- self.block_stack.pop()
-
- _code_object__AsyncFunctionDef = _code_object__FunctionDef
-
- def _code_object__ClassDef(self, node):
- start = self.line_for_node(node)
- self.add_arc(-start, start)
- exits = self.add_body_arcs(node.body, from_start=ArcStart(start))
- for xit in exits:
- self.add_arc(
- xit.lineno, -start, xit.cause,
- "didn't exit the body of class {!r}".format(node.name),
- )
-
- def _make_oneline_code_method(noun): # pylint: disable=no-self-argument
- """A function to make methods for online callable _code_object__ methods."""
- def _code_object__oneline_callable(self, node):
- start = self.line_for_node(node)
- self.add_arc(-start, start, None, "didn't run the {} on line {}".format(noun, start))
- self.add_arc(
- start, -start, None,
- "didn't finish the {} on line {}".format(noun, start),
- )
- return _code_object__oneline_callable
-
- _code_object__Lambda = _make_oneline_code_method("lambda")
- _code_object__GeneratorExp = _make_oneline_code_method("generator expression")
- _code_object__DictComp = _make_oneline_code_method("dictionary comprehension")
- _code_object__SetComp = _make_oneline_code_method("set comprehension")
- if env.PY3:
- _code_object__ListComp = _make_oneline_code_method("list comprehension")
-
-
-if AST_DUMP: # pragma: debugging
- # Code only used when dumping the AST for debugging.
-
- SKIP_DUMP_FIELDS = ["ctx"]
-
- def _is_simple_value(value):
- """Is `value` simple enough to be displayed on a single line?"""
- return (
- value in [None, [], (), {}, set()] or
- isinstance(value, (string_class, int, float))
- )
-
- def ast_dump(node, depth=0):
- """Dump the AST for `node`.
-
- This recursively walks the AST, printing a readable version.
-
- """
- indent = " " * depth
- if not isinstance(node, ast.AST):
- print("{}<{} {!r}>".format(indent, node.__class__.__name__, node))
- return
-
- lineno = getattr(node, "lineno", None)
- if lineno is not None:
- linemark = " @ {}".format(node.lineno)
- else:
- linemark = ""
- head = "{}<{}{}".format(indent, node.__class__.__name__, linemark)
-
- named_fields = [
- (name, value)
- for name, value in ast.iter_fields(node)
- if name not in SKIP_DUMP_FIELDS
- ]
- if not named_fields:
- print("{}>".format(head))
- elif len(named_fields) == 1 and _is_simple_value(named_fields[0][1]):
- field_name, value = named_fields[0]
- print("{} {}: {!r}>".format(head, field_name, value))
- else:
- print(head)
- if 0:
- print("{}# mro: {}".format(
- indent, ", ".join(c.__name__ for c in node.__class__.__mro__[1:]),
- ))
- next_indent = indent + " "
- for field_name, value in named_fields:
- prefix = "{}{}:".format(next_indent, field_name)
- if _is_simple_value(value):
- print("{} {!r}".format(prefix, value))
- elif isinstance(value, list):
- print("{} [".format(prefix))
- for n in value:
- ast_dump(n, depth + 8)
- print("{}]".format(next_indent))
- else:
- print(prefix)
- ast_dump(value, depth + 8)
-
- print("{}>".format(indent))
diff --git a/contrib/python/coverage/py3/coverage/phystokens.py b/contrib/python/coverage/py3/coverage/phystokens.py
deleted file mode 100644
index 54378b3bc8..0000000000
--- a/contrib/python/coverage/py3/coverage/phystokens.py
+++ /dev/null
@@ -1,297 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Better tokenizing for coverage.py."""
-
-import codecs
-import keyword
-import re
-import sys
-import token
-import tokenize
-
-from coverage import env
-from coverage.backward import iternext, unicode_class
-from coverage.misc import contract
-
-
-def phys_tokens(toks):
- """Return all physical tokens, even line continuations.
-
- tokenize.generate_tokens() doesn't return a token for the backslash that
- continues lines. This wrapper provides those tokens so that we can
- re-create a faithful representation of the original source.
-
- Returns the same values as generate_tokens()
-
- """
- last_line = None
- last_lineno = -1
- last_ttext = None
- for ttype, ttext, (slineno, scol), (elineno, ecol), ltext in toks:
- if last_lineno != elineno:
- if last_line and last_line.endswith("\\\n"):
- # We are at the beginning of a new line, and the last line
- # ended with a backslash. We probably have to inject a
- # backslash token into the stream. Unfortunately, there's more
- # to figure out. This code::
- #
- # usage = """\
- # HEY THERE
- # """
- #
- # triggers this condition, but the token text is::
- #
- # '"""\\\nHEY THERE\n"""'
- #
- # so we need to figure out if the backslash is already in the
- # string token or not.
- inject_backslash = True
- if last_ttext.endswith("\\"):
- inject_backslash = False
- elif ttype == token.STRING:
- if "\n" in ttext and ttext.split('\n', 1)[0][-1] == '\\':
- # It's a multi-line string and the first line ends with
- # a backslash, so we don't need to inject another.
- inject_backslash = False
- if inject_backslash:
- # Figure out what column the backslash is in.
- ccol = len(last_line.split("\n")[-2]) - 1
- # Yield the token, with a fake token type.
- yield (
- 99999, "\\\n",
- (slineno, ccol), (slineno, ccol+2),
- last_line
- )
- last_line = ltext
- if ttype not in (tokenize.NEWLINE, tokenize.NL):
- last_ttext = ttext
- yield ttype, ttext, (slineno, scol), (elineno, ecol), ltext
- last_lineno = elineno
-
-
-@contract(source='unicode')
-def source_token_lines(source):
- """Generate a series of lines, one for each line in `source`.
-
- Each line is a list of pairs, each pair is a token::
-
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
-
- Each pair has a token class, and the token text.
-
- If you concatenate all the token texts, and then join them with newlines,
- you should have your original `source` back, with two differences:
- trailing whitespace is not preserved, and a final line with no newline
- is indistinguishable from a final line with a newline.
-
- """
-
- ws_tokens = {token.INDENT, token.DEDENT, token.NEWLINE, tokenize.NL}
- line = []
- col = 0
-
- source = source.expandtabs(8).replace('\r\n', '\n')
- tokgen = generate_tokens(source)
-
- for ttype, ttext, (_, scol), (_, ecol), _ in phys_tokens(tokgen):
- mark_start = True
- for part in re.split('(\n)', ttext):
- if part == '\n':
- yield line
- line = []
- col = 0
- mark_end = False
- elif part == '':
- mark_end = False
- elif ttype in ws_tokens:
- mark_end = False
- else:
- if mark_start and scol > col:
- line.append(("ws", u" " * (scol - col)))
- mark_start = False
- tok_class = tokenize.tok_name.get(ttype, 'xx').lower()[:3]
- if ttype == token.NAME and keyword.iskeyword(ttext):
- tok_class = "key"
- line.append((tok_class, part))
- mark_end = True
- scol = 0
- if mark_end:
- col = ecol
-
- if line:
- yield line
-
-
-class CachedTokenizer(object):
- """A one-element cache around tokenize.generate_tokens.
-
- When reporting, coverage.py tokenizes files twice, once to find the
- structure of the file, and once to syntax-color it. Tokenizing is
- expensive, and easily cached.
-
- This is a one-element cache so that our twice-in-a-row tokenizing doesn't
- actually tokenize twice.
-
- """
- def __init__(self):
- self.last_text = None
- self.last_tokens = None
-
- @contract(text='unicode')
- def generate_tokens(self, text):
- """A stand-in for `tokenize.generate_tokens`."""
- if text != self.last_text:
- self.last_text = text
- readline = iternext(text.splitlines(True))
- self.last_tokens = list(tokenize.generate_tokens(readline))
- return self.last_tokens
-
-# Create our generate_tokens cache as a callable replacement function.
-generate_tokens = CachedTokenizer().generate_tokens
-
-
-COOKIE_RE = re.compile(r"^[ \t]*#.*coding[:=][ \t]*([-\w.]+)", flags=re.MULTILINE)
-
-@contract(source='bytes')
-def _source_encoding_py2(source):
- """Determine the encoding for `source`, according to PEP 263.
-
- `source` is a byte string, the text of the program.
-
- Returns a string, the name of the encoding.
-
- """
- assert isinstance(source, bytes)
-
- # Do this so the detect_encode code we copied will work.
- readline = iternext(source.splitlines(True))
-
- # This is mostly code adapted from Py3.2's tokenize module.
-
- def _get_normal_name(orig_enc):
- """Imitates get_normal_name in tokenizer.c."""
- # Only care about the first 12 characters.
- enc = orig_enc[:12].lower().replace("_", "-")
- if re.match(r"^utf-8($|-)", enc):
- return "utf-8"
- if re.match(r"^(latin-1|iso-8859-1|iso-latin-1)($|-)", enc):
- return "iso-8859-1"
- return orig_enc
-
- # From detect_encode():
- # It detects the encoding from the presence of a UTF-8 BOM or an encoding
- # cookie as specified in PEP-0263. If both a BOM and a cookie are present,
- # but disagree, a SyntaxError will be raised. If the encoding cookie is an
- # invalid charset, raise a SyntaxError. Note that if a UTF-8 BOM is found,
- # 'utf-8-sig' is returned.
-
- # If no encoding is specified, then the default will be returned.
- default = 'ascii'
-
- bom_found = False
- encoding = None
-
- def read_or_stop():
- """Get the next source line, or ''."""
- try:
- return readline()
- except StopIteration:
- return ''
-
- def find_cookie(line):
- """Find an encoding cookie in `line`."""
- try:
- line_string = line.decode('ascii')
- except UnicodeDecodeError:
- return None
-
- matches = COOKIE_RE.findall(line_string)
- if not matches:
- return None
- encoding = _get_normal_name(matches[0])
- try:
- codec = codecs.lookup(encoding)
- except LookupError:
- # This behavior mimics the Python interpreter
- raise SyntaxError("unknown encoding: " + encoding)
-
- if bom_found:
- # codecs in 2.3 were raw tuples of functions, assume the best.
- codec_name = getattr(codec, 'name', encoding)
- if codec_name != 'utf-8':
- # This behavior mimics the Python interpreter
- raise SyntaxError('encoding problem: utf-8')
- encoding += '-sig'
- return encoding
-
- first = read_or_stop()
- if first.startswith(codecs.BOM_UTF8):
- bom_found = True
- first = first[3:]
- default = 'utf-8-sig'
- if not first:
- return default
-
- encoding = find_cookie(first)
- if encoding:
- return encoding
-
- second = read_or_stop()
- if not second:
- return default
-
- encoding = find_cookie(second)
- if encoding:
- return encoding
-
- return default
-
-
-@contract(source='bytes')
-def _source_encoding_py3(source):
- """Determine the encoding for `source`, according to PEP 263.
-
- `source` is a byte string: the text of the program.
-
- Returns a string, the name of the encoding.
-
- """
- readline = iternext(source.splitlines(True))
- return tokenize.detect_encoding(readline)[0]
-
-
-if env.PY3:
- source_encoding = _source_encoding_py3
-else:
- source_encoding = _source_encoding_py2
-
-
-@contract(source='unicode')
-def compile_unicode(source, filename, mode):
- """Just like the `compile` builtin, but works on any Unicode string.
-
- Python 2's compile() builtin has a stupid restriction: if the source string
- is Unicode, then it may not have a encoding declaration in it. Why not?
- Who knows! It also decodes to utf8, and then tries to interpret those utf8
- bytes according to the encoding declaration. Why? Who knows!
-
- This function neuters the coding declaration, and compiles it.
-
- """
- source = neuter_encoding_declaration(source)
- if env.PY2 and isinstance(filename, unicode_class):
- filename = filename.encode(sys.getfilesystemencoding(), "replace")
- code = compile(source, filename, mode)
- return code
-
-
-@contract(source='unicode', returns='unicode')
-def neuter_encoding_declaration(source):
- """Return `source`, with any encoding declaration neutered."""
- if COOKIE_RE.search(source):
- source_lines = source.splitlines(True)
- for lineno in range(min(2, len(source_lines))):
- source_lines[lineno] = COOKIE_RE.sub("# (deleted declaration)", source_lines[lineno])
- source = "".join(source_lines)
- return source
diff --git a/contrib/python/coverage/py3/coverage/plugin.py b/contrib/python/coverage/py3/coverage/plugin.py
deleted file mode 100644
index 6997b489bb..0000000000
--- a/contrib/python/coverage/py3/coverage/plugin.py
+++ /dev/null
@@ -1,533 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""
-.. versionadded:: 4.0
-
-Plug-in interfaces for coverage.py.
-
-Coverage.py supports a few different kinds of plug-ins that change its
-behavior:
-
-* File tracers implement tracing of non-Python file types.
-
-* Configurers add custom configuration, using Python code to change the
- configuration.
-
-* Dynamic context switchers decide when the dynamic context has changed, for
- example, to record what test function produced the coverage.
-
-To write a coverage.py plug-in, create a module with a subclass of
-:class:`~coverage.CoveragePlugin`. You will override methods in your class to
-participate in various aspects of coverage.py's processing.
-Different types of plug-ins have to override different methods.
-
-Any plug-in can optionally implement :meth:`~coverage.CoveragePlugin.sys_info`
-to provide debugging information about their operation.
-
-Your module must also contain a ``coverage_init`` function that registers an
-instance of your plug-in class::
-
- import coverage
-
- class MyPlugin(coverage.CoveragePlugin):
- ...
-
- def coverage_init(reg, options):
- reg.add_file_tracer(MyPlugin())
-
-You use the `reg` parameter passed to your ``coverage_init`` function to
-register your plug-in object. The registration method you call depends on
-what kind of plug-in it is.
-
-If your plug-in takes options, the `options` parameter is a dictionary of your
-plug-in's options from the coverage.py configuration file. Use them however
-you want to configure your object before registering it.
-
-Coverage.py will store its own information on your plug-in object, using
-attributes whose names start with ``_coverage_``. Don't be startled.
-
-.. warning::
- Plug-ins are imported by coverage.py before it begins measuring code.
- If you write a plugin in your own project, it might import your product
- code before coverage.py can start measuring. This can result in your
- own code being reported as missing.
-
- One solution is to put your plugins in your project tree, but not in
- your importable Python package.
-
-
-.. _file_tracer_plugins:
-
-File Tracers
-============
-
-File tracers implement measurement support for non-Python files. File tracers
-implement the :meth:`~coverage.CoveragePlugin.file_tracer` method to claim
-files and the :meth:`~coverage.CoveragePlugin.file_reporter` method to report
-on those files.
-
-In your ``coverage_init`` function, use the ``add_file_tracer`` method to
-register your file tracer.
-
-
-.. _configurer_plugins:
-
-Configurers
-===========
-
-.. versionadded:: 4.5
-
-Configurers modify the configuration of coverage.py during start-up.
-Configurers implement the :meth:`~coverage.CoveragePlugin.configure` method to
-change the configuration.
-
-In your ``coverage_init`` function, use the ``add_configurer`` method to
-register your configurer.
-
-
-.. _dynamic_context_plugins:
-
-Dynamic Context Switchers
-=========================
-
-.. versionadded:: 5.0
-
-Dynamic context switcher plugins implement the
-:meth:`~coverage.CoveragePlugin.dynamic_context` method to dynamically compute
-the context label for each measured frame.
-
-Computed context labels are useful when you want to group measured data without
-modifying the source code.
-
-For example, you could write a plugin that checks `frame.f_code` to inspect
-the currently executed method, and set the context label to a fully qualified
-method name if it's an instance method of `unittest.TestCase` and the method
-name starts with 'test'. Such a plugin would provide basic coverage grouping
-by test and could be used with test runners that have no built-in coveragepy
-support.
-
-In your ``coverage_init`` function, use the ``add_dynamic_context`` method to
-register your dynamic context switcher.
-
-"""
-
-from coverage import files
-from coverage.misc import contract, _needs_to_implement
-
-
-class CoveragePlugin(object):
- """Base class for coverage.py plug-ins."""
-
- def file_tracer(self, filename): # pylint: disable=unused-argument
- """Get a :class:`FileTracer` object for a file.
-
- Plug-in type: file tracer.
-
- Every Python source file is offered to your plug-in to give it a chance
- to take responsibility for tracing the file. If your plug-in can
- handle the file, it should return a :class:`FileTracer` object.
- Otherwise return None.
-
- There is no way to register your plug-in for particular files.
- Instead, this method is invoked for all files as they are executed,
- and the plug-in decides whether it can trace the file or not.
- Be prepared for `filename` to refer to all kinds of files that have
- nothing to do with your plug-in.
-
- The file name will be a Python file being executed. There are two
- broad categories of behavior for a plug-in, depending on the kind of
- files your plug-in supports:
-
- * Static file names: each of your original source files has been
- converted into a distinct Python file. Your plug-in is invoked with
- the Python file name, and it maps it back to its original source
- file.
-
- * Dynamic file names: all of your source files are executed by the same
- Python file. In this case, your plug-in implements
- :meth:`FileTracer.dynamic_source_filename` to provide the actual
- source file for each execution frame.
-
- `filename` is a string, the path to the file being considered. This is
- the absolute real path to the file. If you are comparing to other
- paths, be sure to take this into account.
-
- Returns a :class:`FileTracer` object to use to trace `filename`, or
- None if this plug-in cannot trace this file.
-
- """
- return None
-
- def file_reporter(self, filename): # pylint: disable=unused-argument
- """Get the :class:`FileReporter` class to use for a file.
-
- Plug-in type: file tracer.
-
- This will only be invoked if `filename` returns non-None from
- :meth:`file_tracer`. It's an error to return None from this method.
-
- Returns a :class:`FileReporter` object to use to report on `filename`,
- or the string `"python"` to have coverage.py treat the file as Python.
-
- """
- _needs_to_implement(self, "file_reporter")
-
- def dynamic_context(self, frame): # pylint: disable=unused-argument
- """Get the dynamically computed context label for `frame`.
-
- Plug-in type: dynamic context.
-
- This method is invoked for each frame when outside of a dynamic
- context, to see if a new dynamic context should be started. If it
- returns a string, a new context label is set for this and deeper
- frames. The dynamic context ends when this frame returns.
-
- Returns a string to start a new dynamic context, or None if no new
- context should be started.
-
- """
- return None
-
- def find_executable_files(self, src_dir): # pylint: disable=unused-argument
- """Yield all of the executable files in `src_dir`, recursively.
-
- Plug-in type: file tracer.
-
- Executability is a plug-in-specific property, but generally means files
- which would have been considered for coverage analysis, had they been
- included automatically.
-
- Returns or yields a sequence of strings, the paths to files that could
- have been executed, including files that had been executed.
-
- """
- return []
-
- def configure(self, config):
- """Modify the configuration of coverage.py.
-
- Plug-in type: configurer.
-
- This method is called during coverage.py start-up, to give your plug-in
- a chance to change the configuration. The `config` parameter is an
- object with :meth:`~coverage.Coverage.get_option` and
- :meth:`~coverage.Coverage.set_option` methods. Do not call any other
- methods on the `config` object.
-
- """
- pass
-
- def sys_info(self):
- """Get a list of information useful for debugging.
-
- Plug-in type: any.
-
- This method will be invoked for ``--debug=sys``. Your
- plug-in can return any information it wants to be displayed.
-
- Returns a list of pairs: `[(name, value), ...]`.
-
- """
- return []
-
-
-class FileTracer(object):
- """Support needed for files during the execution phase.
-
- File tracer plug-ins implement subclasses of FileTracer to return from
- their :meth:`~CoveragePlugin.file_tracer` method.
-
- You may construct this object from :meth:`CoveragePlugin.file_tracer` any
- way you like. A natural choice would be to pass the file name given to
- `file_tracer`.
-
- `FileTracer` objects should only be created in the
- :meth:`CoveragePlugin.file_tracer` method.
-
- See :ref:`howitworks` for details of the different coverage.py phases.
-
- """
-
- def source_filename(self):
- """The source file name for this file.
-
- This may be any file name you like. A key responsibility of a plug-in
- is to own the mapping from Python execution back to whatever source
- file name was originally the source of the code.
-
- See :meth:`CoveragePlugin.file_tracer` for details about static and
- dynamic file names.
-
- Returns the file name to credit with this execution.
-
- """
- _needs_to_implement(self, "source_filename")
-
- def has_dynamic_source_filename(self):
- """Does this FileTracer have dynamic source file names?
-
- FileTracers can provide dynamically determined file names by
- implementing :meth:`dynamic_source_filename`. Invoking that function
- is expensive. To determine whether to invoke it, coverage.py uses the
- result of this function to know if it needs to bother invoking
- :meth:`dynamic_source_filename`.
-
- See :meth:`CoveragePlugin.file_tracer` for details about static and
- dynamic file names.
-
- Returns True if :meth:`dynamic_source_filename` should be called to get
- dynamic source file names.
-
- """
- return False
-
- def dynamic_source_filename(self, filename, frame): # pylint: disable=unused-argument
- """Get a dynamically computed source file name.
-
- Some plug-ins need to compute the source file name dynamically for each
- frame.
-
- This function will not be invoked if
- :meth:`has_dynamic_source_filename` returns False.
-
- Returns the source file name for this frame, or None if this frame
- shouldn't be measured.
-
- """
- return None
-
- def line_number_range(self, frame):
- """Get the range of source line numbers for a given a call frame.
-
- The call frame is examined, and the source line number in the original
- file is returned. The return value is a pair of numbers, the starting
- line number and the ending line number, both inclusive. For example,
- returning (5, 7) means that lines 5, 6, and 7 should be considered
- executed.
-
- This function might decide that the frame doesn't indicate any lines
- from the source file were executed. Return (-1, -1) in this case to
- tell coverage.py that no lines should be recorded for this frame.
-
- """
- lineno = frame.f_lineno
- return lineno, lineno
-
-
-class FileReporter(object):
- """Support needed for files during the analysis and reporting phases.
-
- File tracer plug-ins implement a subclass of `FileReporter`, and return
- instances from their :meth:`CoveragePlugin.file_reporter` method.
-
- There are many methods here, but only :meth:`lines` is required, to provide
- the set of executable lines in the file.
-
- See :ref:`howitworks` for details of the different coverage.py phases.
-
- """
-
- def __init__(self, filename):
- """Simple initialization of a `FileReporter`.
-
- The `filename` argument is the path to the file being reported. This
- will be available as the `.filename` attribute on the object. Other
- method implementations on this base class rely on this attribute.
-
- """
- self.filename = filename
-
- def __repr__(self):
- return "<{0.__class__.__name__} filename={0.filename!r}>".format(self)
-
- def relative_filename(self):
- """Get the relative file name for this file.
-
- This file path will be displayed in reports. The default
- implementation will supply the actual project-relative file path. You
- only need to supply this method if you have an unusual syntax for file
- paths.
-
- """
- return files.relative_filename(self.filename)
-
- @contract(returns='unicode')
- def source(self):
- """Get the source for the file.
-
- Returns a Unicode string.
-
- The base implementation simply reads the `self.filename` file and
- decodes it as UTF8. Override this method if your file isn't readable
- as a text file, or if you need other encoding support.
-
- """
- with open(self.filename, "rb") as f:
- return f.read().decode("utf8")
-
- def lines(self):
- """Get the executable lines in this file.
-
- Your plug-in must determine which lines in the file were possibly
- executable. This method returns a set of those line numbers.
-
- Returns a set of line numbers.
-
- """
- _needs_to_implement(self, "lines")
-
- def excluded_lines(self):
- """Get the excluded executable lines in this file.
-
- Your plug-in can use any method it likes to allow the user to exclude
- executable lines from consideration.
-
- Returns a set of line numbers.
-
- The base implementation returns the empty set.
-
- """
- return set()
-
- def translate_lines(self, lines):
- """Translate recorded lines into reported lines.
-
- Some file formats will want to report lines slightly differently than
- they are recorded. For example, Python records the last line of a
- multi-line statement, but reports are nicer if they mention the first
- line.
-
- Your plug-in can optionally define this method to perform these kinds
- of adjustment.
-
- `lines` is a sequence of integers, the recorded line numbers.
-
- Returns a set of integers, the adjusted line numbers.
-
- The base implementation returns the numbers unchanged.
-
- """
- return set(lines)
-
- def arcs(self):
- """Get the executable arcs in this file.
-
- To support branch coverage, your plug-in needs to be able to indicate
- possible execution paths, as a set of line number pairs. Each pair is
- a `(prev, next)` pair indicating that execution can transition from the
- `prev` line number to the `next` line number.
-
- Returns a set of pairs of line numbers. The default implementation
- returns an empty set.
-
- """
- return set()
-
- def no_branch_lines(self):
- """Get the lines excused from branch coverage in this file.
-
- Your plug-in can use any method it likes to allow the user to exclude
- lines from consideration of branch coverage.
-
- Returns a set of line numbers.
-
- The base implementation returns the empty set.
-
- """
- return set()
-
- def translate_arcs(self, arcs):
- """Translate recorded arcs into reported arcs.
-
- Similar to :meth:`translate_lines`, but for arcs. `arcs` is a set of
- line number pairs.
-
- Returns a set of line number pairs.
-
- The default implementation returns `arcs` unchanged.
-
- """
- return arcs
-
- def exit_counts(self):
- """Get a count of exits from that each line.
-
- To determine which lines are branches, coverage.py looks for lines that
- have more than one exit. This function creates a dict mapping each
- executable line number to a count of how many exits it has.
-
- To be honest, this feels wrong, and should be refactored. Let me know
- if you attempt to implement this method in your plug-in...
-
- """
- return {}
-
- def missing_arc_description(self, start, end, executed_arcs=None): # pylint: disable=unused-argument
- """Provide an English sentence describing a missing arc.
-
- The `start` and `end` arguments are the line numbers of the missing
- arc. Negative numbers indicate entering or exiting code objects.
-
- The `executed_arcs` argument is a set of line number pairs, the arcs
- that were executed in this file.
-
- By default, this simply returns the string "Line {start} didn't jump
- to {end}".
-
- """
- return "Line {start} didn't jump to line {end}".format(start=start, end=end)
-
- def source_token_lines(self):
- """Generate a series of tokenized lines, one for each line in `source`.
-
- These tokens are used for syntax-colored reports.
-
- Each line is a list of pairs, each pair is a token::
-
- [('key', 'def'), ('ws', ' '), ('nam', 'hello'), ('op', '('), ... ]
-
- Each pair has a token class, and the token text. The token classes
- are:
-
- * ``'com'``: a comment
- * ``'key'``: a keyword
- * ``'nam'``: a name, or identifier
- * ``'num'``: a number
- * ``'op'``: an operator
- * ``'str'``: a string literal
- * ``'ws'``: some white space
- * ``'txt'``: some other kind of text
-
- If you concatenate all the token texts, and then join them with
- newlines, you should have your original source back.
-
- The default implementation simply returns each line tagged as
- ``'txt'``.
-
- """
- for line in self.source().splitlines():
- yield [('txt', line)]
-
- # Annoying comparison operators. Py3k wants __lt__ etc, and Py2k needs all
- # of them defined.
-
- def __eq__(self, other):
- return isinstance(other, FileReporter) and self.filename == other.filename
-
- def __ne__(self, other):
- return not (self == other)
-
- def __lt__(self, other):
- return self.filename < other.filename
-
- def __le__(self, other):
- return self.filename <= other.filename
-
- def __gt__(self, other):
- return self.filename > other.filename
-
- def __ge__(self, other):
- return self.filename >= other.filename
-
- __hash__ = None # This object doesn't need to be hashed.
diff --git a/contrib/python/coverage/py3/coverage/plugin_support.py b/contrib/python/coverage/py3/coverage/plugin_support.py
deleted file mode 100644
index 89c1c7658f..0000000000
--- a/contrib/python/coverage/py3/coverage/plugin_support.py
+++ /dev/null
@@ -1,281 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Support for plugins."""
-
-import os
-import os.path
-import sys
-
-from coverage.misc import CoverageException, isolate_module
-from coverage.plugin import CoveragePlugin, FileTracer, FileReporter
-
-os = isolate_module(os)
-
-
-class Plugins(object):
- """The currently loaded collection of coverage.py plugins."""
-
- def __init__(self):
- self.order = []
- self.names = {}
- self.file_tracers = []
- self.configurers = []
- self.context_switchers = []
-
- self.current_module = None
- self.debug = None
-
- @classmethod
- def load_plugins(cls, modules, config, debug=None):
- """Load plugins from `modules`.
-
- Returns a Plugins object with the loaded and configured plugins.
-
- """
- plugins = cls()
- plugins.debug = debug
-
- for module in modules:
- plugins.current_module = module
- __import__(module)
- mod = sys.modules[module]
-
- coverage_init = getattr(mod, "coverage_init", None)
- if not coverage_init:
- raise CoverageException(
- "Plugin module %r didn't define a coverage_init function" % module
- )
-
- options = config.get_plugin_options(module)
- coverage_init(plugins, options)
-
- plugins.current_module = None
- return plugins
-
- def add_file_tracer(self, plugin):
- """Add a file tracer plugin.
-
- `plugin` is an instance of a third-party plugin class. It must
- implement the :meth:`CoveragePlugin.file_tracer` method.
-
- """
- self._add_plugin(plugin, self.file_tracers)
-
- def add_configurer(self, plugin):
- """Add a configuring plugin.
-
- `plugin` is an instance of a third-party plugin class. It must
- implement the :meth:`CoveragePlugin.configure` method.
-
- """
- self._add_plugin(plugin, self.configurers)
-
- def add_dynamic_context(self, plugin):
- """Add a dynamic context plugin.
-
- `plugin` is an instance of a third-party plugin class. It must
- implement the :meth:`CoveragePlugin.dynamic_context` method.
-
- """
- self._add_plugin(plugin, self.context_switchers)
-
- def add_noop(self, plugin):
- """Add a plugin that does nothing.
-
- This is only useful for testing the plugin support.
-
- """
- self._add_plugin(plugin, None)
-
- def _add_plugin(self, plugin, specialized):
- """Add a plugin object.
-
- `plugin` is a :class:`CoveragePlugin` instance to add. `specialized`
- is a list to append the plugin to.
-
- """
- plugin_name = "%s.%s" % (self.current_module, plugin.__class__.__name__)
- if self.debug and self.debug.should('plugin'):
- self.debug.write("Loaded plugin %r: %r" % (self.current_module, plugin))
- labelled = LabelledDebug("plugin %r" % (self.current_module,), self.debug)
- plugin = DebugPluginWrapper(plugin, labelled)
-
- # pylint: disable=attribute-defined-outside-init
- plugin._coverage_plugin_name = plugin_name
- plugin._coverage_enabled = True
- self.order.append(plugin)
- self.names[plugin_name] = plugin
- if specialized is not None:
- specialized.append(plugin)
-
- def __nonzero__(self):
- return bool(self.order)
-
- __bool__ = __nonzero__
-
- def __iter__(self):
- return iter(self.order)
-
- def get(self, plugin_name):
- """Return a plugin by name."""
- return self.names[plugin_name]
-
-
-class LabelledDebug(object):
- """A Debug writer, but with labels for prepending to the messages."""
-
- def __init__(self, label, debug, prev_labels=()):
- self.labels = list(prev_labels) + [label]
- self.debug = debug
-
- def add_label(self, label):
- """Add a label to the writer, and return a new `LabelledDebug`."""
- return LabelledDebug(label, self.debug, self.labels)
-
- def message_prefix(self):
- """The prefix to use on messages, combining the labels."""
- prefixes = self.labels + ['']
- return ":\n".join(" "*i+label for i, label in enumerate(prefixes))
-
- def write(self, message):
- """Write `message`, but with the labels prepended."""
- self.debug.write("%s%s" % (self.message_prefix(), message))
-
-
-class DebugPluginWrapper(CoveragePlugin):
- """Wrap a plugin, and use debug to report on what it's doing."""
-
- def __init__(self, plugin, debug):
- super(DebugPluginWrapper, self).__init__()
- self.plugin = plugin
- self.debug = debug
-
- def file_tracer(self, filename):
- tracer = self.plugin.file_tracer(filename)
- self.debug.write("file_tracer(%r) --> %r" % (filename, tracer))
- if tracer:
- debug = self.debug.add_label("file %r" % (filename,))
- tracer = DebugFileTracerWrapper(tracer, debug)
- return tracer
-
- def file_reporter(self, filename):
- reporter = self.plugin.file_reporter(filename)
- self.debug.write("file_reporter(%r) --> %r" % (filename, reporter))
- if reporter:
- debug = self.debug.add_label("file %r" % (filename,))
- reporter = DebugFileReporterWrapper(filename, reporter, debug)
- return reporter
-
- def dynamic_context(self, frame):
- context = self.plugin.dynamic_context(frame)
- self.debug.write("dynamic_context(%r) --> %r" % (frame, context))
- return context
-
- def find_executable_files(self, src_dir):
- executable_files = self.plugin.find_executable_files(src_dir)
- self.debug.write("find_executable_files(%r) --> %r" % (src_dir, executable_files))
- return executable_files
-
- def configure(self, config):
- self.debug.write("configure(%r)" % (config,))
- self.plugin.configure(config)
-
- def sys_info(self):
- return self.plugin.sys_info()
-
-
-class DebugFileTracerWrapper(FileTracer):
- """A debugging `FileTracer`."""
-
- def __init__(self, tracer, debug):
- self.tracer = tracer
- self.debug = debug
-
- def _show_frame(self, frame):
- """A short string identifying a frame, for debug messages."""
- return "%s@%d" % (
- os.path.basename(frame.f_code.co_filename),
- frame.f_lineno,
- )
-
- def source_filename(self):
- sfilename = self.tracer.source_filename()
- self.debug.write("source_filename() --> %r" % (sfilename,))
- return sfilename
-
- def has_dynamic_source_filename(self):
- has = self.tracer.has_dynamic_source_filename()
- self.debug.write("has_dynamic_source_filename() --> %r" % (has,))
- return has
-
- def dynamic_source_filename(self, filename, frame):
- dyn = self.tracer.dynamic_source_filename(filename, frame)
- self.debug.write("dynamic_source_filename(%r, %s) --> %r" % (
- filename, self._show_frame(frame), dyn,
- ))
- return dyn
-
- def line_number_range(self, frame):
- pair = self.tracer.line_number_range(frame)
- self.debug.write("line_number_range(%s) --> %r" % (self._show_frame(frame), pair))
- return pair
-
-
-class DebugFileReporterWrapper(FileReporter):
- """A debugging `FileReporter`."""
-
- def __init__(self, filename, reporter, debug):
- super(DebugFileReporterWrapper, self).__init__(filename)
- self.reporter = reporter
- self.debug = debug
-
- def relative_filename(self):
- ret = self.reporter.relative_filename()
- self.debug.write("relative_filename() --> %r" % (ret,))
- return ret
-
- def lines(self):
- ret = self.reporter.lines()
- self.debug.write("lines() --> %r" % (ret,))
- return ret
-
- def excluded_lines(self):
- ret = self.reporter.excluded_lines()
- self.debug.write("excluded_lines() --> %r" % (ret,))
- return ret
-
- def translate_lines(self, lines):
- ret = self.reporter.translate_lines(lines)
- self.debug.write("translate_lines(%r) --> %r" % (lines, ret))
- return ret
-
- def translate_arcs(self, arcs):
- ret = self.reporter.translate_arcs(arcs)
- self.debug.write("translate_arcs(%r) --> %r" % (arcs, ret))
- return ret
-
- def no_branch_lines(self):
- ret = self.reporter.no_branch_lines()
- self.debug.write("no_branch_lines() --> %r" % (ret,))
- return ret
-
- def exit_counts(self):
- ret = self.reporter.exit_counts()
- self.debug.write("exit_counts() --> %r" % (ret,))
- return ret
-
- def arcs(self):
- ret = self.reporter.arcs()
- self.debug.write("arcs() --> %r" % (ret,))
- return ret
-
- def source(self):
- ret = self.reporter.source()
- self.debug.write("source() --> %d chars" % (len(ret),))
- return ret
-
- def source_token_lines(self):
- ret = list(self.reporter.source_token_lines())
- self.debug.write("source_token_lines() --> %d tokens" % (len(ret),))
- return ret
diff --git a/contrib/python/coverage/py3/coverage/python.py b/contrib/python/coverage/py3/coverage/python.py
deleted file mode 100644
index 6ff19c34fe..0000000000
--- a/contrib/python/coverage/py3/coverage/python.py
+++ /dev/null
@@ -1,261 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Python source expertise for coverage.py"""
-
-import sys
-import os.path
-import types
-import zipimport
-
-from coverage import env, files
-from coverage.misc import contract, expensive, isolate_module, join_regex
-from coverage.misc import CoverageException, NoSource
-from coverage.parser import PythonParser
-from coverage.phystokens import source_token_lines, source_encoding
-from coverage.plugin import FileReporter
-
-os = isolate_module(os)
-
-
-@contract(returns='bytes')
-def read_python_source(filename):
- """Read the Python source text from `filename`.
-
- Returns bytes.
-
- """
- with open(filename, "rb") as f:
- source = f.read()
-
- if env.IRONPYTHON:
- # IronPython reads Unicode strings even for "rb" files.
- source = bytes(source)
-
- return source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
-
-
-@contract(returns='unicode')
-def get_python_source(filename, force_fs=False):
- """Return the source code, as unicode."""
- if getattr(sys, 'is_standalone_binary', False) and not force_fs:
- import __res
-
- modname = __res.importer.file_source(filename)
- if modname:
- source = __res.find(modname)
- source = source.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
- return source.decode('utf-8')
- else:
- # it's fake generated package
- return u''
- base, ext = os.path.splitext(filename)
- if ext == ".py" and env.WINDOWS:
- exts = [".py", ".pyw"]
- else:
- exts = [ext]
-
- for ext in exts:
- try_filename = base + ext
- if os.path.exists(try_filename):
- # A regular text file: open it.
- source = read_python_source(try_filename)
- break
-
- # Maybe it's in a zip file?
- source = get_zip_bytes(try_filename)
- if source is not None:
- break
- else:
- # Couldn't find source.
- exc_msg = "No source for code: '%s'.\n" % (filename,)
- exc_msg += "Aborting report output, consider using -i."
- raise NoSource(exc_msg)
-
- # Replace \f because of http://bugs.python.org/issue19035
- source = source.replace(b'\f', b' ')
- source = source.decode(source_encoding(source), "replace")
-
- # Python code should always end with a line with a newline.
- if source and source[-1] != '\n':
- source += '\n'
-
- return source
-
-
-@contract(returns='bytes|None')
-def get_zip_bytes(filename):
- """Get data from `filename` if it is a zip file path.
-
- Returns the bytestring data read from the zip file, or None if no zip file
- could be found or `filename` isn't in it. The data returned will be
- an empty string if the file is empty.
-
- """
- markers = ['.zip'+os.sep, '.egg'+os.sep, '.pex'+os.sep]
- for marker in markers:
- if marker in filename:
- parts = filename.split(marker)
- try:
- zi = zipimport.zipimporter(parts[0]+marker[:-1])
- except zipimport.ZipImportError:
- continue
- try:
- data = zi.get_data(parts[1])
- except IOError:
- continue
- return data
- return None
-
-
-def source_for_file(filename):
- """Return the source filename for `filename`.
-
- Given a file name being traced, return the best guess as to the source
- file to attribute it to.
-
- """
- if filename.endswith(".py"):
- # .py files are themselves source files.
- return filename
-
- elif filename.endswith((".pyc", ".pyo")):
- # Bytecode files probably have source files near them.
- py_filename = filename[:-1]
- if os.path.exists(py_filename):
- # Found a .py file, use that.
- return py_filename
- if env.WINDOWS:
- # On Windows, it could be a .pyw file.
- pyw_filename = py_filename + "w"
- if os.path.exists(pyw_filename):
- return pyw_filename
- # Didn't find source, but it's probably the .py file we want.
- return py_filename
-
- elif filename.endswith("$py.class"):
- # Jython is easy to guess.
- return filename[:-9] + ".py"
-
- # No idea, just use the file name as-is.
- return filename
-
-
-def source_for_morf(morf):
- """Get the source filename for the module-or-file `morf`."""
- if hasattr(morf, '__file__') and morf.__file__:
- filename = morf.__file__
- elif isinstance(morf, types.ModuleType):
- # A module should have had .__file__, otherwise we can't use it.
- # This could be a PEP-420 namespace package.
- raise CoverageException("Module {} has no file".format(morf))
- else:
- filename = morf
-
- filename = source_for_file(files.unicode_filename(filename))
- return filename
-
-
-class PythonFileReporter(FileReporter):
- """Report support for a Python file."""
-
- def __init__(self, morf, coverage=None):
- self.coverage = coverage
-
- filename = source_for_morf(morf)
-
- super(PythonFileReporter, self).__init__(files.canonical_filename(filename))
-
- if hasattr(morf, '__name__'):
- name = morf.__name__.replace(".", os.sep)
- if os.path.basename(filename).startswith('__init__.'):
- name += os.sep + "__init__"
- name += ".py"
- name = files.unicode_filename(name)
- else:
- name = files.relative_filename(filename)
- self.relname = name
-
- self._source = None
- self._parser = None
- self._excluded = None
-
- def __repr__(self):
- return "<PythonFileReporter {!r}>".format(self.filename)
-
- @contract(returns='unicode')
- def relative_filename(self):
- return self.relname
-
- @property
- def parser(self):
- """Lazily create a :class:`PythonParser`."""
- if self._parser is None:
- self._parser = PythonParser(
- filename=self.filename,
- exclude=self.coverage._exclude_regex('exclude'),
- )
- self._parser.parse_source()
- return self._parser
-
- def lines(self):
- """Return the line numbers of statements in the file."""
- return self.parser.statements
-
- def excluded_lines(self):
- """Return the line numbers of statements in the file."""
- return self.parser.excluded
-
- def translate_lines(self, lines):
- return self.parser.translate_lines(lines)
-
- def translate_arcs(self, arcs):
- return self.parser.translate_arcs(arcs)
-
- @expensive
- def no_branch_lines(self):
- no_branch = self.parser.lines_matching(
- join_regex(self.coverage.config.partial_list),
- join_regex(self.coverage.config.partial_always_list)
- )
- return no_branch
-
- @expensive
- def arcs(self):
- return self.parser.arcs()
-
- @expensive
- def exit_counts(self):
- return self.parser.exit_counts()
-
- def missing_arc_description(self, start, end, executed_arcs=None):
- return self.parser.missing_arc_description(start, end, executed_arcs)
-
- @contract(returns='unicode')
- def source(self):
- if self._source is None:
- self._source = get_python_source(self.filename)
- return self._source
-
- def should_be_python(self):
- """Does it seem like this file should contain Python?
-
- This is used to decide if a file reported as part of the execution of
- a program was really likely to have contained Python in the first
- place.
-
- """
- # Get the file extension.
- _, ext = os.path.splitext(self.filename)
-
- # Anything named *.py* should be Python.
- if ext.startswith('.py'):
- return True
- # A file with no extension should be Python.
- if not ext:
- return True
- # Everything else is probably not Python.
- return False
-
- def source_token_lines(self):
- return source_token_lines(self.source())
diff --git a/contrib/python/coverage/py3/coverage/pytracer.py b/contrib/python/coverage/py3/coverage/pytracer.py
deleted file mode 100644
index 7ab4d3ef92..0000000000
--- a/contrib/python/coverage/py3/coverage/pytracer.py
+++ /dev/null
@@ -1,274 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Raw data collector for coverage.py."""
-
-import atexit
-import dis
-import sys
-
-from coverage import env
-
-# We need the YIELD_VALUE opcode below, in a comparison-friendly form.
-YIELD_VALUE = dis.opmap['YIELD_VALUE']
-if env.PY2:
- YIELD_VALUE = chr(YIELD_VALUE)
-
-# When running meta-coverage, this file can try to trace itself, which confuses
-# everything. Don't trace ourselves.
-
-THIS_FILE = __file__.rstrip("co")
-
-
-class PyTracer(object):
- """Python implementation of the raw data tracer."""
-
- # Because of poor implementations of trace-function-manipulating tools,
- # the Python trace function must be kept very simple. In particular, there
- # must be only one function ever set as the trace function, both through
- # sys.settrace, and as the return value from the trace function. Put
- # another way, the trace function must always return itself. It cannot
- # swap in other functions, or return None to avoid tracing a particular
- # frame.
- #
- # The trace manipulator that introduced this restriction is DecoratorTools,
- # which sets a trace function, and then later restores the pre-existing one
- # by calling sys.settrace with a function it found in the current frame.
- #
- # Systems that use DecoratorTools (or similar trace manipulations) must use
- # PyTracer to get accurate results. The command-line --timid argument is
- # used to force the use of this tracer.
-
- def __init__(self):
- # Attributes set from the collector:
- self.data = None
- self.trace_arcs = False
- self.should_trace = None
- self.should_trace_cache = None
- self.should_start_context = None
- self.warn = None
- # The threading module to use, if any.
- self.threading = None
-
- self.cur_file_dict = None
- self.last_line = 0 # int, but uninitialized.
- self.cur_file_name = None
- self.context = None
- self.started_context = False
-
- self.data_stack = []
- self.last_exc_back = None
- self.last_exc_firstlineno = 0
- self.thread = None
- self.stopped = False
- self._activity = False
-
- self.in_atexit = False
- # On exit, self.in_atexit = True
- atexit.register(setattr, self, 'in_atexit', True)
-
- def __repr__(self):
- return "<PyTracer at {}: {} lines in {} files>".format(
- id(self),
- sum(len(v) for v in self.data.values()),
- len(self.data),
- )
-
- def log(self, marker, *args):
- """For hard-core logging of what this tracer is doing."""
- with open("/tmp/debug_trace.txt", "a") as f:
- f.write("{} {}[{}]".format(
- marker,
- id(self),
- len(self.data_stack),
- ))
- if 0:
- f.write(".{:x}.{:x}".format(
- self.thread.ident,
- self.threading.currentThread().ident,
- ))
- f.write(" {}".format(" ".join(map(str, args))))
- if 0:
- f.write(" | ")
- stack = " / ".join(
- (fname or "???").rpartition("/")[-1]
- for _, fname, _, _ in self.data_stack
- )
- f.write(stack)
- f.write("\n")
-
- def _trace(self, frame, event, arg_unused):
- """The trace function passed to sys.settrace."""
-
- if THIS_FILE in frame.f_code.co_filename:
- return None
-
- #self.log(":", frame.f_code.co_filename, frame.f_lineno, frame.f_code.co_name + "()", event)
-
- if (self.stopped and sys.gettrace() == self._trace): # pylint: disable=comparison-with-callable
- # The PyTrace.stop() method has been called, possibly by another
- # thread, let's deactivate ourselves now.
- if 0:
- self.log("---\nX", frame.f_code.co_filename, frame.f_lineno)
- f = frame
- while f:
- self.log(">", f.f_code.co_filename, f.f_lineno, f.f_code.co_name, f.f_trace)
- f = f.f_back
- sys.settrace(None)
- self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
- return None
-
- if self.last_exc_back:
- if frame == self.last_exc_back:
- # Someone forgot a return event.
- if self.trace_arcs and self.cur_file_dict:
- pair = (self.last_line, -self.last_exc_firstlineno)
- self.cur_file_dict[pair] = None
- self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
- self.last_exc_back = None
-
- # if event != 'call' and frame.f_code.co_filename != self.cur_file_name:
- # self.log("---\n*", frame.f_code.co_filename, self.cur_file_name, frame.f_lineno)
-
- if event == 'call':
- # Should we start a new context?
- if self.should_start_context and self.context is None:
- context_maybe = self.should_start_context(frame)
- if context_maybe is not None:
- self.context = context_maybe
- self.started_context = True
- self.switch_context(self.context)
- else:
- self.started_context = False
- else:
- self.started_context = False
-
- # Entering a new frame. Decide if we should trace
- # in this file.
- self._activity = True
- self.data_stack.append(
- (
- self.cur_file_dict,
- self.cur_file_name,
- self.last_line,
- self.started_context,
- )
- )
- filename = frame.f_code.co_filename
- self.cur_file_name = filename
- disp = self.should_trace_cache.get(filename)
- if disp is None:
- disp = self.should_trace(filename, frame)
- self.should_trace_cache[filename] = disp
-
- self.cur_file_dict = None
- if disp.trace:
- tracename = disp.source_filename
- if tracename not in self.data:
- self.data[tracename] = {}
- self.cur_file_dict = self.data[tracename]
- # The call event is really a "start frame" event, and happens for
- # function calls and re-entering generators. The f_lasti field is
- # -1 for calls, and a real offset for generators. Use <0 as the
- # line number for calls, and the real line number for generators.
- if getattr(frame, 'f_lasti', -1) < 0:
- self.last_line = -frame.f_code.co_firstlineno
- else:
- self.last_line = frame.f_lineno
- elif event == 'line':
- # Record an executed line.
- if self.cur_file_dict is not None:
- lineno = frame.f_lineno
-
- if self.trace_arcs:
- self.cur_file_dict[(self.last_line, lineno)] = None
- else:
- self.cur_file_dict[lineno] = None
- self.last_line = lineno
- elif event == 'return':
- if self.trace_arcs and self.cur_file_dict:
- # Record an arc leaving the function, but beware that a
- # "return" event might just mean yielding from a generator.
- # Jython seems to have an empty co_code, so just assume return.
- code = frame.f_code.co_code
- if (not code) or code[frame.f_lasti] != YIELD_VALUE:
- first = frame.f_code.co_firstlineno
- self.cur_file_dict[(self.last_line, -first)] = None
- # Leaving this function, pop the filename stack.
- self.cur_file_dict, self.cur_file_name, self.last_line, self.started_context = (
- self.data_stack.pop()
- )
- # Leaving a context?
- if self.started_context:
- self.context = None
- self.switch_context(None)
- elif event == 'exception':
- self.last_exc_back = frame.f_back
- self.last_exc_firstlineno = frame.f_code.co_firstlineno
- return self._trace
-
- def start(self):
- """Start this Tracer.
-
- Return a Python function suitable for use with sys.settrace().
-
- """
- self.stopped = False
- if self.threading:
- if self.thread is None:
- self.thread = self.threading.currentThread()
- else:
- if self.thread.ident != self.threading.currentThread().ident:
- # Re-starting from a different thread!? Don't set the trace
- # function, but we are marked as running again, so maybe it
- # will be ok?
- #self.log("~", "starting on different threads")
- return self._trace
-
- sys.settrace(self._trace)
- return self._trace
-
- def stop(self):
- """Stop this Tracer."""
- # Get the active tracer callback before setting the stop flag to be
- # able to detect if the tracer was changed prior to stopping it.
- tf = sys.gettrace()
-
- # Set the stop flag. The actual call to sys.settrace(None) will happen
- # in the self._trace callback itself to make sure to call it from the
- # right thread.
- self.stopped = True
-
- if self.threading and self.thread.ident != self.threading.currentThread().ident:
- # Called on a different thread than started us: we can't unhook
- # ourselves, but we've set the flag that we should stop, so we
- # won't do any more tracing.
- #self.log("~", "stopping on different threads")
- return
-
- if self.warn:
- # PyPy clears the trace function before running atexit functions,
- # so don't warn if we are in atexit on PyPy and the trace function
- # has changed to None.
- dont_warn = (env.PYPY and env.PYPYVERSION >= (5, 4) and self.in_atexit and tf is None)
- if (not dont_warn) and tf != self._trace: # pylint: disable=comparison-with-callable
- self.warn(
- "Trace function changed, measurement is likely wrong: %r" % (tf,),
- slug="trace-changed",
- )
-
- def activity(self):
- """Has there been any activity?"""
- return self._activity
-
- def reset_activity(self):
- """Reset the activity() flag."""
- self._activity = False
-
- def get_stats(self):
- """Return a dictionary of statistics, or None."""
- return None
diff --git a/contrib/python/coverage/py3/coverage/report.py b/contrib/python/coverage/py3/coverage/report.py
deleted file mode 100644
index 64678ff95d..0000000000
--- a/contrib/python/coverage/py3/coverage/report.py
+++ /dev/null
@@ -1,86 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Reporter foundation for coverage.py."""
-import sys
-
-from coverage import env
-from coverage.files import prep_patterns, FnmatchMatcher
-from coverage.misc import CoverageException, NoSource, NotPython, ensure_dir_for_file, file_be_gone
-
-
-def render_report(output_path, reporter, morfs):
- """Run the provided reporter ensuring any required setup and cleanup is done
-
- At a high level this method ensures the output file is ready to be written to. Then writes the
- report to it. Then closes the file and deletes any garbage created if necessary.
- """
- file_to_close = None
- delete_file = False
- if output_path:
- if output_path == '-':
- outfile = sys.stdout
- else:
- # Ensure that the output directory is created; done here
- # because this report pre-opens the output file.
- # HTMLReport does this using the Report plumbing because
- # its task is more complex, being multiple files.
- ensure_dir_for_file(output_path)
- open_kwargs = {}
- if env.PY3:
- open_kwargs['encoding'] = 'utf8'
- outfile = open(output_path, "w", **open_kwargs)
- file_to_close = outfile
- try:
- return reporter.report(morfs, outfile=outfile)
- except CoverageException:
- delete_file = True
- raise
- finally:
- if file_to_close:
- file_to_close.close()
- if delete_file:
- file_be_gone(output_path)
-
-
-def get_analysis_to_report(coverage, morfs):
- """Get the files to report on.
-
- For each morf in `morfs`, if it should be reported on (based on the omit
- and include configuration options), yield a pair, the `FileReporter` and
- `Analysis` for the morf.
-
- """
- file_reporters = coverage._get_file_reporters(morfs)
- config = coverage.config
-
- if config.report_include:
- matcher = FnmatchMatcher(prep_patterns(config.report_include))
- file_reporters = [fr for fr in file_reporters if matcher.match(fr.filename)]
-
- if config.report_omit:
- matcher = FnmatchMatcher(prep_patterns(config.report_omit))
- file_reporters = [fr for fr in file_reporters if not matcher.match(fr.filename)]
-
- if not file_reporters:
- raise CoverageException("No data to report.")
-
- for fr in sorted(file_reporters):
- try:
- analysis = coverage._analyze(fr)
- except NoSource:
- if not config.ignore_errors:
- raise
- except NotPython:
- # Only report errors for .py files, and only if we didn't
- # explicitly suppress those errors.
- # NotPython is only raised by PythonFileReporter, which has a
- # should_be_python() method.
- if fr.should_be_python():
- if config.ignore_errors:
- msg = "Couldn't parse Python file '{}'".format(fr.filename)
- coverage._warn(msg, slug="couldnt-parse")
- else:
- raise
- else:
- yield (fr, analysis)
diff --git a/contrib/python/coverage/py3/coverage/results.py b/contrib/python/coverage/py3/coverage/results.py
deleted file mode 100644
index 4916864df3..0000000000
--- a/contrib/python/coverage/py3/coverage/results.py
+++ /dev/null
@@ -1,343 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Results of coverage measurement."""
-
-import collections
-
-from coverage.backward import iitems
-from coverage.debug import SimpleReprMixin
-from coverage.misc import contract, CoverageException, nice_pair
-
-
-class Analysis(object):
- """The results of analyzing a FileReporter."""
-
- def __init__(self, data, file_reporter, file_mapper):
- self.data = data
- self.file_reporter = file_reporter
- self.filename = file_mapper(self.file_reporter.filename)
- self.statements = self.file_reporter.lines()
- self.excluded = self.file_reporter.excluded_lines()
-
- # Identify missing statements.
- executed = self.data.lines(self.filename) or []
- executed = self.file_reporter.translate_lines(executed)
- self.executed = executed
- self.missing = self.statements - self.executed
-
- if self.data.has_arcs():
- self._arc_possibilities = sorted(self.file_reporter.arcs())
- self.exit_counts = self.file_reporter.exit_counts()
- self.no_branch = self.file_reporter.no_branch_lines()
- n_branches = self._total_branches()
- mba = self.missing_branch_arcs()
- n_partial_branches = sum(len(v) for k,v in iitems(mba) if k not in self.missing)
- n_missing_branches = sum(len(v) for k,v in iitems(mba))
- else:
- self._arc_possibilities = []
- self.exit_counts = {}
- self.no_branch = set()
- n_branches = n_partial_branches = n_missing_branches = 0
-
- self.numbers = Numbers(
- n_files=1,
- n_statements=len(self.statements),
- n_excluded=len(self.excluded),
- n_missing=len(self.missing),
- n_branches=n_branches,
- n_partial_branches=n_partial_branches,
- n_missing_branches=n_missing_branches,
- )
-
- def missing_formatted(self, branches=False):
- """The missing line numbers, formatted nicely.
-
- Returns a string like "1-2, 5-11, 13-14".
-
- If `branches` is true, includes the missing branch arcs also.
-
- """
- if branches and self.has_arcs():
- arcs = iitems(self.missing_branch_arcs())
- else:
- arcs = None
-
- return format_lines(self.statements, self.missing, arcs=arcs)
-
- def has_arcs(self):
- """Were arcs measured in this result?"""
- return self.data.has_arcs()
-
- @contract(returns='list(tuple(int, int))')
- def arc_possibilities(self):
- """Returns a sorted list of the arcs in the code."""
- return self._arc_possibilities
-
- @contract(returns='list(tuple(int, int))')
- def arcs_executed(self):
- """Returns a sorted list of the arcs actually executed in the code."""
- executed = self.data.arcs(self.filename) or []
- executed = self.file_reporter.translate_arcs(executed)
- return sorted(executed)
-
- @contract(returns='list(tuple(int, int))')
- def arcs_missing(self):
- """Returns a sorted list of the arcs in the code not executed."""
- possible = self.arc_possibilities()
- executed = self.arcs_executed()
- missing = (
- p for p in possible
- if p not in executed
- and p[0] not in self.no_branch
- )
- return sorted(missing)
-
- @contract(returns='list(tuple(int, int))')
- def arcs_unpredicted(self):
- """Returns a sorted list of the executed arcs missing from the code."""
- possible = self.arc_possibilities()
- executed = self.arcs_executed()
- # Exclude arcs here which connect a line to itself. They can occur
- # in executed data in some cases. This is where they can cause
- # trouble, and here is where it's the least burden to remove them.
- # Also, generators can somehow cause arcs from "enter" to "exit", so
- # make sure we have at least one positive value.
- unpredicted = (
- e for e in executed
- if e not in possible
- and e[0] != e[1]
- and (e[0] > 0 or e[1] > 0)
- )
- return sorted(unpredicted)
-
- def _branch_lines(self):
- """Returns a list of line numbers that have more than one exit."""
- return [l1 for l1,count in iitems(self.exit_counts) if count > 1]
-
- def _total_branches(self):
- """How many total branches are there?"""
- return sum(count for count in self.exit_counts.values() if count > 1)
-
- @contract(returns='dict(int: list(int))')
- def missing_branch_arcs(self):
- """Return arcs that weren't executed from branch lines.
-
- Returns {l1:[l2a,l2b,...], ...}
-
- """
- missing = self.arcs_missing()
- branch_lines = set(self._branch_lines())
- mba = collections.defaultdict(list)
- for l1, l2 in missing:
- if l1 in branch_lines:
- mba[l1].append(l2)
- return mba
-
- @contract(returns='dict(int: tuple(int, int))')
- def branch_stats(self):
- """Get stats about branches.
-
- Returns a dict mapping line numbers to a tuple:
- (total_exits, taken_exits).
- """
-
- missing_arcs = self.missing_branch_arcs()
- stats = {}
- for lnum in self._branch_lines():
- exits = self.exit_counts[lnum]
- missing = len(missing_arcs[lnum])
- stats[lnum] = (exits, exits - missing)
- return stats
-
-
-class Numbers(SimpleReprMixin):
- """The numerical results of measuring coverage.
-
- This holds the basic statistics from `Analysis`, and is used to roll
- up statistics across files.
-
- """
- # A global to determine the precision on coverage percentages, the number
- # of decimal places.
- _precision = 0
- _near0 = 1.0 # These will change when _precision is changed.
- _near100 = 99.0
-
- def __init__(self, n_files=0, n_statements=0, n_excluded=0, n_missing=0,
- n_branches=0, n_partial_branches=0, n_missing_branches=0
- ):
- self.n_files = n_files
- self.n_statements = n_statements
- self.n_excluded = n_excluded
- self.n_missing = n_missing
- self.n_branches = n_branches
- self.n_partial_branches = n_partial_branches
- self.n_missing_branches = n_missing_branches
-
- def init_args(self):
- """Return a list for __init__(*args) to recreate this object."""
- return [
- self.n_files, self.n_statements, self.n_excluded, self.n_missing,
- self.n_branches, self.n_partial_branches, self.n_missing_branches,
- ]
-
- @classmethod
- def set_precision(cls, precision):
- """Set the number of decimal places used to report percentages."""
- assert 0 <= precision < 10
- cls._precision = precision
- cls._near0 = 1.0 / 10**precision
- cls._near100 = 100.0 - cls._near0
-
- @property
- def n_executed(self):
- """Returns the number of executed statements."""
- return self.n_statements - self.n_missing
-
- @property
- def n_executed_branches(self):
- """Returns the number of executed branches."""
- return self.n_branches - self.n_missing_branches
-
- @property
- def pc_covered(self):
- """Returns a single percentage value for coverage."""
- if self.n_statements > 0:
- numerator, denominator = self.ratio_covered
- pc_cov = (100.0 * numerator) / denominator
- else:
- pc_cov = 100.0
- return pc_cov
-
- @property
- def pc_covered_str(self):
- """Returns the percent covered, as a string, without a percent sign.
-
- Note that "0" is only returned when the value is truly zero, and "100"
- is only returned when the value is truly 100. Rounding can never
- result in either "0" or "100".
-
- """
- pc = self.pc_covered
- if 0 < pc < self._near0:
- pc = self._near0
- elif self._near100 < pc < 100:
- pc = self._near100
- else:
- pc = round(pc, self._precision)
- return "%.*f" % (self._precision, pc)
-
- @classmethod
- def pc_str_width(cls):
- """How many characters wide can pc_covered_str be?"""
- width = 3 # "100"
- if cls._precision > 0:
- width += 1 + cls._precision
- return width
-
- @property
- def ratio_covered(self):
- """Return a numerator and denominator for the coverage ratio."""
- numerator = self.n_executed + self.n_executed_branches
- denominator = self.n_statements + self.n_branches
- return numerator, denominator
-
- def __add__(self, other):
- nums = Numbers()
- nums.n_files = self.n_files + other.n_files
- nums.n_statements = self.n_statements + other.n_statements
- nums.n_excluded = self.n_excluded + other.n_excluded
- nums.n_missing = self.n_missing + other.n_missing
- nums.n_branches = self.n_branches + other.n_branches
- nums.n_partial_branches = (
- self.n_partial_branches + other.n_partial_branches
- )
- nums.n_missing_branches = (
- self.n_missing_branches + other.n_missing_branches
- )
- return nums
-
- def __radd__(self, other):
- # Implementing 0+Numbers allows us to sum() a list of Numbers.
- if other == 0:
- return self
- return NotImplemented # pragma: not covered (we never call it this way)
-
-
-def _line_ranges(statements, lines):
- """Produce a list of ranges for `format_lines`."""
- statements = sorted(statements)
- lines = sorted(lines)
-
- pairs = []
- start = None
- lidx = 0
- for stmt in statements:
- if lidx >= len(lines):
- break
- if stmt == lines[lidx]:
- lidx += 1
- if not start:
- start = stmt
- end = stmt
- elif start:
- pairs.append((start, end))
- start = None
- if start:
- pairs.append((start, end))
- return pairs
-
-
-def format_lines(statements, lines, arcs=None):
- """Nicely format a list of line numbers.
-
- Format a list of line numbers for printing by coalescing groups of lines as
- long as the lines represent consecutive statements. This will coalesce
- even if there are gaps between statements.
-
- For example, if `statements` is [1,2,3,4,5,10,11,12,13,14] and
- `lines` is [1,2,5,10,11,13,14] then the result will be "1-2, 5-11, 13-14".
-
- Both `lines` and `statements` can be any iterable. All of the elements of
- `lines` must be in `statements`, and all of the values must be positive
- integers.
-
- If `arcs` is provided, they are (start,[end,end,end]) pairs that will be
- included in the output as long as start isn't in `lines`.
-
- """
- line_items = [(pair[0], nice_pair(pair)) for pair in _line_ranges(statements, lines)]
- if arcs:
- line_exits = sorted(arcs)
- for line, exits in line_exits:
- for ex in sorted(exits):
- if line not in lines and ex not in lines:
- dest = (ex if ex > 0 else "exit")
- line_items.append((line, "%d->%s" % (line, dest)))
-
- ret = ', '.join(t[-1] for t in sorted(line_items))
- return ret
-
-
-@contract(total='number', fail_under='number', precision=int, returns=bool)
-def should_fail_under(total, fail_under, precision):
- """Determine if a total should fail due to fail-under.
-
- `total` is a float, the coverage measurement total. `fail_under` is the
- fail_under setting to compare with. `precision` is the number of digits
- to consider after the decimal point.
-
- Returns True if the total should fail.
-
- """
- # We can never achieve higher than 100% coverage, or less than zero.
- if not (0 <= fail_under <= 100.0):
- msg = "fail_under={} is invalid. Must be between 0 and 100.".format(fail_under)
- raise CoverageException(msg)
-
- # Special case for fail_under=100, it must really be 100.
- if fail_under == 100.0 and total != 100.0:
- return True
-
- return round(total, precision) < fail_under
diff --git a/contrib/python/coverage/py3/coverage/sqldata.py b/contrib/python/coverage/py3/coverage/sqldata.py
deleted file mode 100644
index a150fdfd0f..0000000000
--- a/contrib/python/coverage/py3/coverage/sqldata.py
+++ /dev/null
@@ -1,1123 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Sqlite coverage data."""
-
-# TODO: factor out dataop debugging to a wrapper class?
-# TODO: make sure all dataop debugging is in place somehow
-
-import collections
-import datetime
-import glob
-import itertools
-import os
-import re
-import sqlite3
-import sys
-import zlib
-
-from coverage import env
-from coverage.backward import get_thread_id, iitems, to_bytes, to_string
-from coverage.debug import NoDebugging, SimpleReprMixin, clipped_repr
-from coverage.files import PathAliases
-from coverage.misc import CoverageException, contract, file_be_gone, filename_suffix, isolate_module
-from coverage.numbits import numbits_to_nums, numbits_union, nums_to_numbits
-from coverage.version import __version__
-
-os = isolate_module(os)
-
-# If you change the schema, increment the SCHEMA_VERSION, and update the
-# docs in docs/dbschema.rst also.
-
-SCHEMA_VERSION = 7
-
-# Schema versions:
-# 1: Released in 5.0a2
-# 2: Added contexts in 5.0a3.
-# 3: Replaced line table with line_map table.
-# 4: Changed line_map.bitmap to line_map.numbits.
-# 5: Added foreign key declarations.
-# 6: Key-value in meta.
-# 7: line_map -> line_bits
-
-SCHEMA = """\
-CREATE TABLE coverage_schema (
- -- One row, to record the version of the schema in this db.
- version integer
-);
-
-CREATE TABLE meta (
- -- Key-value pairs, to record metadata about the data
- key text,
- value text,
- unique (key)
- -- Keys:
- -- 'has_arcs' boolean -- Is this data recording branches?
- -- 'sys_argv' text -- The coverage command line that recorded the data.
- -- 'version' text -- The version of coverage.py that made the file.
- -- 'when' text -- Datetime when the file was created.
-);
-
-CREATE TABLE file (
- -- A row per file measured.
- id integer primary key,
- path text,
- unique (path)
-);
-
-CREATE TABLE context (
- -- A row per context measured.
- id integer primary key,
- context text,
- unique (context)
-);
-
-CREATE TABLE line_bits (
- -- If recording lines, a row per context per file executed.
- -- All of the line numbers for that file/context are in one numbits.
- file_id integer, -- foreign key to `file`.
- context_id integer, -- foreign key to `context`.
- numbits blob, -- see the numbits functions in coverage.numbits
- foreign key (file_id) references file (id),
- foreign key (context_id) references context (id),
- unique (file_id, context_id)
-);
-
-CREATE TABLE arc (
- -- If recording branches, a row per context per from/to line transition executed.
- file_id integer, -- foreign key to `file`.
- context_id integer, -- foreign key to `context`.
- fromno integer, -- line number jumped from.
- tono integer, -- line number jumped to.
- foreign key (file_id) references file (id),
- foreign key (context_id) references context (id),
- unique (file_id, context_id, fromno, tono)
-);
-
-CREATE TABLE tracer (
- -- A row per file indicating the tracer used for that file.
- file_id integer primary key,
- tracer text,
- foreign key (file_id) references file (id)
-);
-"""
-
-class CoverageData(SimpleReprMixin):
- """Manages collected coverage data, including file storage.
-
- This class is the public supported API to the data that coverage.py
- collects during program execution. It includes information about what code
- was executed. It does not include information from the analysis phase, to
- determine what lines could have been executed, or what lines were not
- executed.
-
- .. note::
-
- The data file is currently a SQLite database file, with a
- :ref:`documented schema <dbschema>`. The schema is subject to change
- though, so be careful about querying it directly. Use this API if you
- can to isolate yourself from changes.
-
- There are a number of kinds of data that can be collected:
-
- * **lines**: the line numbers of source lines that were executed.
- These are always available.
-
- * **arcs**: pairs of source and destination line numbers for transitions
- between source lines. These are only available if branch coverage was
- used.
-
- * **file tracer names**: the module names of the file tracer plugins that
- handled each file in the data.
-
- Lines, arcs, and file tracer names are stored for each source file. File
- names in this API are case-sensitive, even on platforms with
- case-insensitive file systems.
-
- A data file either stores lines, or arcs, but not both.
-
- A data file is associated with the data when the :class:`CoverageData`
- is created, using the parameters `basename`, `suffix`, and `no_disk`. The
- base name can be queried with :meth:`base_filename`, and the actual file
- name being used is available from :meth:`data_filename`.
-
- To read an existing coverage.py data file, use :meth:`read`. You can then
- access the line, arc, or file tracer data with :meth:`lines`, :meth:`arcs`,
- or :meth:`file_tracer`.
-
- The :meth:`has_arcs` method indicates whether arc data is available. You
- can get a set of the files in the data with :meth:`measured_files`. As
- with most Python containers, you can determine if there is any data at all
- by using this object as a boolean value.
-
- The contexts for each line in a file can be read with
- :meth:`contexts_by_lineno`.
-
- To limit querying to certain contexts, use :meth:`set_query_context` or
- :meth:`set_query_contexts`. These will narrow the focus of subsequent
- :meth:`lines`, :meth:`arcs`, and :meth:`contexts_by_lineno` calls. The set
- of all measured context names can be retrieved with
- :meth:`measured_contexts`.
-
- Most data files will be created by coverage.py itself, but you can use
- methods here to create data files if you like. The :meth:`add_lines`,
- :meth:`add_arcs`, and :meth:`add_file_tracers` methods add data, in ways
- that are convenient for coverage.py.
-
- To record data for contexts, use :meth:`set_context` to set a context to
- be used for subsequent :meth:`add_lines` and :meth:`add_arcs` calls.
-
- To add a source file without any measured data, use :meth:`touch_file`,
- or :meth:`touch_files` for a list of such files.
-
- Write the data to its file with :meth:`write`.
-
- You can clear the data in memory with :meth:`erase`. Two data collections
- can be combined by using :meth:`update` on one :class:`CoverageData`,
- passing it the other.
-
- Data in a :class:`CoverageData` can be serialized and deserialized with
- :meth:`dumps` and :meth:`loads`.
-
- """
-
- def __init__(self, basename=None, suffix=None, no_disk=False, warn=None, debug=None):
- """Create a :class:`CoverageData` object to hold coverage-measured data.
-
- Arguments:
- basename (str): the base name of the data file, defaulting to
- ".coverage".
- suffix (str or bool): has the same meaning as the `data_suffix`
- argument to :class:`coverage.Coverage`.
- no_disk (bool): if True, keep all data in memory, and don't
- write any disk file.
- warn: a warning callback function, accepting a warning message
- argument.
- debug: a `DebugControl` object (optional)
-
- """
- self._no_disk = no_disk
- self._basename = os.path.abspath(basename or ".coverage")
- self._suffix = suffix
- self._warn = warn
- self._debug = debug or NoDebugging()
-
- self._choose_filename()
- self._file_map = {}
- # Maps thread ids to SqliteDb objects.
- self._dbs = {}
- self._pid = os.getpid()
-
- # Are we in sync with the data file?
- self._have_used = False
-
- self._has_lines = False
- self._has_arcs = False
-
- self._current_context = None
- self._current_context_id = None
- self._query_context_ids = None
-
- def _choose_filename(self):
- """Set self._filename based on inited attributes."""
- if self._no_disk:
- self._filename = ":memory:"
- else:
- self._filename = self._basename
- suffix = filename_suffix(self._suffix)
- if suffix:
- self._filename += "." + suffix
-
- def _reset(self):
- """Reset our attributes."""
- if self._dbs:
- for db in self._dbs.values():
- db.close()
- self._dbs = {}
- self._file_map = {}
- self._have_used = False
- self._current_context_id = None
-
- def _create_db(self):
- """Create a db file that doesn't exist yet.
-
- Initializes the schema and certain metadata.
- """
- if self._debug.should('dataio'):
- self._debug.write("Creating data file {!r}".format(self._filename))
- self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug)
- with db:
- db.executescript(SCHEMA)
- db.execute("insert into coverage_schema (version) values (?)", (SCHEMA_VERSION,))
- db.executemany(
- "insert into meta (key, value) values (?, ?)",
- [
- ('sys_argv', str(getattr(sys, 'argv', None))),
- ('version', __version__),
- ('when', datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')),
- ]
- )
-
- def _open_db(self):
- """Open an existing db file, and read its metadata."""
- if self._debug.should('dataio'):
- self._debug.write("Opening data file {!r}".format(self._filename))
- self._dbs[get_thread_id()] = SqliteDb(self._filename, self._debug)
- self._read_db()
-
- def _read_db(self):
- """Read the metadata from a database so that we are ready to use it."""
- with self._dbs[get_thread_id()] as db:
- try:
- schema_version, = db.execute_one("select version from coverage_schema")
- except Exception as exc:
- raise CoverageException(
- "Data file {!r} doesn't seem to be a coverage data file: {}".format(
- self._filename, exc
- )
- )
- else:
- if schema_version != SCHEMA_VERSION:
- raise CoverageException(
- "Couldn't use data file {!r}: wrong schema: {} instead of {}".format(
- self._filename, schema_version, SCHEMA_VERSION
- )
- )
-
- for row in db.execute("select value from meta where key = 'has_arcs'"):
- self._has_arcs = bool(int(row[0]))
- self._has_lines = not self._has_arcs
-
- for path, file_id in db.execute("select path, id from file"):
- self._file_map[path] = file_id
-
- def _connect(self):
- """Get the SqliteDb object to use."""
- if get_thread_id() not in self._dbs:
- if os.path.exists(self._filename):
- self._open_db()
- else:
- self._create_db()
- return self._dbs[get_thread_id()]
-
- def __nonzero__(self):
- if (get_thread_id() not in self._dbs and not os.path.exists(self._filename)):
- return False
- try:
- with self._connect() as con:
- rows = con.execute("select * from file limit 1")
- return bool(list(rows))
- except CoverageException:
- return False
-
- __bool__ = __nonzero__
-
- @contract(returns='bytes')
- def dumps(self):
- """Serialize the current data to a byte string.
-
- The format of the serialized data is not documented. It is only
- suitable for use with :meth:`loads` in the same version of
- coverage.py.
-
- Returns:
- A byte string of serialized data.
-
- .. versionadded:: 5.0
-
- """
- if self._debug.should('dataio'):
- self._debug.write("Dumping data from data file {!r}".format(self._filename))
- with self._connect() as con:
- return b'z' + zlib.compress(to_bytes(con.dump()))
-
- @contract(data='bytes')
- def loads(self, data):
- """Deserialize data from :meth:`dumps`
-
- Use with a newly-created empty :class:`CoverageData` object. It's
- undefined what happens if the object already has data in it.
-
- Arguments:
- data: A byte string of serialized data produced by :meth:`dumps`.
-
- .. versionadded:: 5.0
-
- """
- if self._debug.should('dataio'):
- self._debug.write("Loading data into data file {!r}".format(self._filename))
- if data[:1] != b'z':
- raise CoverageException(
- "Unrecognized serialization: {!r} (head of {} bytes)".format(data[:40], len(data))
- )
- script = to_string(zlib.decompress(data[1:]))
- self._dbs[get_thread_id()] = db = SqliteDb(self._filename, self._debug)
- with db:
- db.executescript(script)
- self._read_db()
- self._have_used = True
-
- def _file_id(self, filename, add=False):
- """Get the file id for `filename`.
-
- If filename is not in the database yet, add it if `add` is True.
- If `add` is not True, return None.
- """
- if filename not in self._file_map:
- if add:
- with self._connect() as con:
- cur = con.execute("insert or replace into file (path) values (?)", (filename,))
- self._file_map[filename] = cur.lastrowid
- return self._file_map.get(filename)
-
- def _context_id(self, context):
- """Get the id for a context."""
- assert context is not None
- self._start_using()
- with self._connect() as con:
- row = con.execute_one("select id from context where context = ?", (context,))
- if row is not None:
- return row[0]
- else:
- return None
-
- def set_context(self, context):
- """Set the current context for future :meth:`add_lines` etc.
-
- `context` is a str, the name of the context to use for the next data
- additions. The context persists until the next :meth:`set_context`.
-
- .. versionadded:: 5.0
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Setting context: %r" % (context,))
- self._current_context = context
- self._current_context_id = None
-
- def _set_context_id(self):
- """Use the _current_context to set _current_context_id."""
- context = self._current_context or ""
- context_id = self._context_id(context)
- if context_id is not None:
- self._current_context_id = context_id
- else:
- with self._connect() as con:
- cur = con.execute("insert into context (context) values (?)", (context,))
- self._current_context_id = cur.lastrowid
-
- def base_filename(self):
- """The base filename for storing data.
-
- .. versionadded:: 5.0
-
- """
- return self._basename
-
- def data_filename(self):
- """Where is the data stored?
-
- .. versionadded:: 5.0
-
- """
- return self._filename
-
- def add_lines(self, line_data):
- """Add measured line data.
-
- `line_data` is a dictionary mapping file names to dictionaries::
-
- { filename: { lineno: None, ... }, ...}
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Adding lines: %d files, %d lines total" % (
- len(line_data), sum(len(lines) for lines in line_data.values())
- ))
- self._start_using()
- self._choose_lines_or_arcs(lines=True)
- if not line_data:
- return
- with self._connect() as con:
- self._set_context_id()
- for filename, linenos in iitems(line_data):
- linemap = nums_to_numbits(linenos)
- file_id = self._file_id(filename, add=True)
- query = "select numbits from line_bits where file_id = ? and context_id = ?"
- existing = list(con.execute(query, (file_id, self._current_context_id)))
- if existing:
- linemap = numbits_union(linemap, existing[0][0])
-
- con.execute(
- "insert or replace into line_bits "
- " (file_id, context_id, numbits) values (?, ?, ?)",
- (file_id, self._current_context_id, linemap),
- )
-
- def add_arcs(self, arc_data):
- """Add measured arc data.
-
- `arc_data` is a dictionary mapping file names to dictionaries::
-
- { filename: { (l1,l2): None, ... }, ...}
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Adding arcs: %d files, %d arcs total" % (
- len(arc_data), sum(len(arcs) for arcs in arc_data.values())
- ))
- self._start_using()
- self._choose_lines_or_arcs(arcs=True)
- if not arc_data:
- return
- with self._connect() as con:
- self._set_context_id()
- for filename, arcs in iitems(arc_data):
- file_id = self._file_id(filename, add=True)
- data = [(file_id, self._current_context_id, fromno, tono) for fromno, tono in arcs]
- con.executemany(
- "insert or ignore into arc "
- "(file_id, context_id, fromno, tono) values (?, ?, ?, ?)",
- data,
- )
-
- def _choose_lines_or_arcs(self, lines=False, arcs=False):
- """Force the data file to choose between lines and arcs."""
- assert lines or arcs
- assert not (lines and arcs)
- if lines and self._has_arcs:
- raise CoverageException("Can't add line measurements to existing branch data")
- if arcs and self._has_lines:
- raise CoverageException("Can't add branch measurements to existing line data")
- if not self._has_arcs and not self._has_lines:
- self._has_lines = lines
- self._has_arcs = arcs
- with self._connect() as con:
- con.execute(
- "insert into meta (key, value) values (?, ?)",
- ('has_arcs', str(int(arcs)))
- )
-
- def add_file_tracers(self, file_tracers):
- """Add per-file plugin information.
-
- `file_tracers` is { filename: plugin_name, ... }
-
- """
- if self._debug.should('dataop'):
- self._debug.write("Adding file tracers: %d files" % (len(file_tracers),))
- if not file_tracers:
- return
- self._start_using()
- with self._connect() as con:
- for filename, plugin_name in iitems(file_tracers):
- file_id = self._file_id(filename)
- if file_id is None:
- raise CoverageException(
- "Can't add file tracer data for unmeasured file '%s'" % (filename,)
- )
-
- existing_plugin = self.file_tracer(filename)
- if existing_plugin:
- if existing_plugin != plugin_name:
- raise CoverageException(
- "Conflicting file tracer name for '%s': %r vs %r" % (
- filename, existing_plugin, plugin_name,
- )
- )
- elif plugin_name:
- con.execute(
- "insert into tracer (file_id, tracer) values (?, ?)",
- (file_id, plugin_name)
- )
-
- def touch_file(self, filename, plugin_name=""):
- """Ensure that `filename` appears in the data, empty if needed.
-
- `plugin_name` is the name of the plugin responsible for this file. It is used
- to associate the right filereporter, etc.
- """
- self.touch_files([filename], plugin_name)
-
- def touch_files(self, filenames, plugin_name=""):
- """Ensure that `filenames` appear in the data, empty if needed.
-
- `plugin_name` is the name of the plugin responsible for these files. It is used
- to associate the right filereporter, etc.
- """
- if self._debug.should('dataop'):
- self._debug.write("Touching %r" % (filenames,))
- self._start_using()
- with self._connect(): # Use this to get one transaction.
- if not self._has_arcs and not self._has_lines:
- raise CoverageException("Can't touch files in an empty CoverageData")
-
- for filename in filenames:
- self._file_id(filename, add=True)
- if plugin_name:
- # Set the tracer for this file
- self.add_file_tracers({filename: plugin_name})
-
- def update(self, other_data, aliases=None):
- """Update this data with data from several other :class:`CoverageData` instances.
-
- If `aliases` is provided, it's a `PathAliases` object that is used to
- re-map paths to match the local machine's.
- """
- if self._debug.should('dataop'):
- self._debug.write("Updating with data from %r" % (
- getattr(other_data, '_filename', '???'),
- ))
- if self._has_lines and other_data._has_arcs:
- raise CoverageException("Can't combine arc data with line data")
- if self._has_arcs and other_data._has_lines:
- raise CoverageException("Can't combine line data with arc data")
-
- aliases = aliases or PathAliases()
-
- # Force the database we're writing to to exist before we start nesting
- # contexts.
- self._start_using()
-
- # Collector for all arcs, lines and tracers
- other_data.read()
- with other_data._connect() as conn:
- # Get files data.
- cur = conn.execute('select path from file')
- files = {path: aliases.map(path) for (path,) in cur}
- cur.close()
-
- # Get contexts data.
- cur = conn.execute('select context from context')
- contexts = [context for (context,) in cur]
- cur.close()
-
- # Get arc data.
- cur = conn.execute(
- 'select file.path, context.context, arc.fromno, arc.tono '
- 'from arc '
- 'inner join file on file.id = arc.file_id '
- 'inner join context on context.id = arc.context_id'
- )
- arcs = [(files[path], context, fromno, tono) for (path, context, fromno, tono) in cur]
- cur.close()
-
- # Get line data.
- cur = conn.execute(
- 'select file.path, context.context, line_bits.numbits '
- 'from line_bits '
- 'inner join file on file.id = line_bits.file_id '
- 'inner join context on context.id = line_bits.context_id'
- )
- lines = {
- (files[path], context): numbits
- for (path, context, numbits) in cur
- }
- cur.close()
-
- # Get tracer data.
- cur = conn.execute(
- 'select file.path, tracer '
- 'from tracer '
- 'inner join file on file.id = tracer.file_id'
- )
- tracers = {files[path]: tracer for (path, tracer) in cur}
- cur.close()
-
- with self._connect() as conn:
- conn.con.isolation_level = 'IMMEDIATE'
-
- # Get all tracers in the DB. Files not in the tracers are assumed
- # to have an empty string tracer. Since Sqlite does not support
- # full outer joins, we have to make two queries to fill the
- # dictionary.
- this_tracers = {path: '' for path, in conn.execute('select path from file')}
- this_tracers.update({
- aliases.map(path): tracer
- for path, tracer in conn.execute(
- 'select file.path, tracer from tracer '
- 'inner join file on file.id = tracer.file_id'
- )
- })
-
- # Create all file and context rows in the DB.
- conn.executemany(
- 'insert or ignore into file (path) values (?)',
- ((file,) for file in files.values())
- )
- file_ids = {
- path: id
- for id, path in conn.execute('select id, path from file')
- }
- conn.executemany(
- 'insert or ignore into context (context) values (?)',
- ((context,) for context in contexts)
- )
- context_ids = {
- context: id
- for id, context in conn.execute('select id, context from context')
- }
-
- # Prepare tracers and fail, if a conflict is found.
- # tracer_paths is used to ensure consistency over the tracer data
- # and tracer_map tracks the tracers to be inserted.
- tracer_map = {}
- for path in files.values():
- this_tracer = this_tracers.get(path)
- other_tracer = tracers.get(path, '')
- # If there is no tracer, there is always the None tracer.
- if this_tracer is not None and this_tracer != other_tracer:
- raise CoverageException(
- "Conflicting file tracer name for '%s': %r vs %r" % (
- path, this_tracer, other_tracer
- )
- )
- tracer_map[path] = other_tracer
-
- # Prepare arc and line rows to be inserted by converting the file
- # and context strings with integer ids. Then use the efficient
- # `executemany()` to insert all rows at once.
- arc_rows = (
- (file_ids[file], context_ids[context], fromno, tono)
- for file, context, fromno, tono in arcs
- )
-
- # Get line data.
- cur = conn.execute(
- 'select file.path, context.context, line_bits.numbits '
- 'from line_bits '
- 'inner join file on file.id = line_bits.file_id '
- 'inner join context on context.id = line_bits.context_id'
- )
- for path, context, numbits in cur:
- key = (aliases.map(path), context)
- if key in lines:
- numbits = numbits_union(lines[key], numbits)
- lines[key] = numbits
- cur.close()
-
- if arcs:
- self._choose_lines_or_arcs(arcs=True)
-
- # Write the combined data.
- conn.executemany(
- 'insert or ignore into arc '
- '(file_id, context_id, fromno, tono) values (?, ?, ?, ?)',
- arc_rows
- )
-
- if lines:
- self._choose_lines_or_arcs(lines=True)
- conn.execute("delete from line_bits")
- conn.executemany(
- "insert into line_bits "
- "(file_id, context_id, numbits) values (?, ?, ?)",
- [
- (file_ids[file], context_ids[context], numbits)
- for (file, context), numbits in lines.items()
- ]
- )
- conn.executemany(
- 'insert or ignore into tracer (file_id, tracer) values (?, ?)',
- ((file_ids[filename], tracer) for filename, tracer in tracer_map.items())
- )
-
- # Update all internal cache data.
- self._reset()
- self.read()
-
- def erase(self, parallel=False):
- """Erase the data in this object.
-
- If `parallel` is true, then also deletes data files created from the
- basename by parallel-mode.
-
- """
- self._reset()
- if self._no_disk:
- return
- if self._debug.should('dataio'):
- self._debug.write("Erasing data file {!r}".format(self._filename))
- file_be_gone(self._filename)
- if parallel:
- data_dir, local = os.path.split(self._filename)
- localdot = local + '.*'
- pattern = os.path.join(os.path.abspath(data_dir), localdot)
- for filename in glob.glob(pattern):
- if self._debug.should('dataio'):
- self._debug.write("Erasing parallel data file {!r}".format(filename))
- file_be_gone(filename)
-
- def read(self):
- """Start using an existing data file."""
- with self._connect(): # TODO: doesn't look right
- self._have_used = True
-
- def write(self):
- """Ensure the data is written to the data file."""
- pass
-
- def _start_using(self):
- """Call this before using the database at all."""
- if self._pid != os.getpid():
- # Looks like we forked! Have to start a new data file.
- self._reset()
- self._choose_filename()
- self._pid = os.getpid()
- if not self._have_used:
- self.erase()
- self._have_used = True
-
- def has_arcs(self):
- """Does the database have arcs (True) or lines (False)."""
- return bool(self._has_arcs)
-
- def measured_files(self):
- """A set of all files that had been measured."""
- return set(self._file_map)
-
- def measured_contexts(self):
- """A set of all contexts that have been measured.
-
- .. versionadded:: 5.0
-
- """
- self._start_using()
- with self._connect() as con:
- contexts = {row[0] for row in con.execute("select distinct(context) from context")}
- return contexts
-
- def file_tracer(self, filename):
- """Get the plugin name of the file tracer for a file.
-
- Returns the name of the plugin that handles this file. If the file was
- measured, but didn't use a plugin, then "" is returned. If the file
- was not measured, then None is returned.
-
- """
- self._start_using()
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return None
- row = con.execute_one("select tracer from tracer where file_id = ?", (file_id,))
- if row is not None:
- return row[0] or ""
- return "" # File was measured, but no tracer associated.
-
- def set_query_context(self, context):
- """Set a context for subsequent querying.
-
- The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
- calls will be limited to only one context. `context` is a string which
- must match a context exactly. If it does not, no exception is raised,
- but queries will return no data.
-
- .. versionadded:: 5.0
-
- """
- self._start_using()
- with self._connect() as con:
- cur = con.execute("select id from context where context = ?", (context,))
- self._query_context_ids = [row[0] for row in cur.fetchall()]
-
- def set_query_contexts(self, contexts):
- """Set a number of contexts for subsequent querying.
-
- The next :meth:`lines`, :meth:`arcs`, or :meth:`contexts_by_lineno`
- calls will be limited to the specified contexts. `contexts` is a list
- of Python regular expressions. Contexts will be matched using
- :func:`re.search <python:re.search>`. Data will be included in query
- results if they are part of any of the contexts matched.
-
- .. versionadded:: 5.0
-
- """
- self._start_using()
- if contexts:
- with self._connect() as con:
- context_clause = ' or '.join(['context regexp ?'] * len(contexts))
- cur = con.execute("select id from context where " + context_clause, contexts)
- self._query_context_ids = [row[0] for row in cur.fetchall()]
- else:
- self._query_context_ids = None
-
- def lines(self, filename):
- """Get the list of lines executed for a file.
-
- If the file was not measured, returns None. A file might be measured,
- and have no lines executed, in which case an empty list is returned.
-
- If the file was executed, returns a list of integers, the line numbers
- executed in the file. The list is in no particular order.
-
- """
- self._start_using()
- if self.has_arcs():
- arcs = self.arcs(filename)
- if arcs is not None:
- all_lines = itertools.chain.from_iterable(arcs)
- return list({l for l in all_lines if l > 0})
-
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return None
- else:
- query = "select numbits from line_bits where file_id = ?"
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and context_id in (" + ids_array + ")"
- data += self._query_context_ids
- bitmaps = list(con.execute(query, data))
- nums = set()
- for row in bitmaps:
- nums.update(numbits_to_nums(row[0]))
- return list(nums)
-
- def arcs(self, filename):
- """Get the list of arcs executed for a file.
-
- If the file was not measured, returns None. A file might be measured,
- and have no arcs executed, in which case an empty list is returned.
-
- If the file was executed, returns a list of 2-tuples of integers. Each
- pair is a starting line number and an ending line number for a
- transition from one line to another. The list is in no particular
- order.
-
- Negative numbers have special meaning. If the starting line number is
- -N, it represents an entry to the code object that starts at line N.
- If the ending ling number is -N, it's an exit from the code object that
- starts at line N.
-
- """
- self._start_using()
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return None
- else:
- query = "select distinct fromno, tono from arc where file_id = ?"
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and context_id in (" + ids_array + ")"
- data += self._query_context_ids
- arcs = con.execute(query, data)
- return list(arcs)
-
- def contexts_by_lineno(self, filename):
- """Get the contexts for each line in a file.
-
- Returns:
- A dict mapping line numbers to a list of context names.
-
- .. versionadded:: 5.0
-
- """
- lineno_contexts_map = collections.defaultdict(list)
- self._start_using()
- with self._connect() as con:
- file_id = self._file_id(filename)
- if file_id is None:
- return lineno_contexts_map
- if self.has_arcs():
- query = (
- "select arc.fromno, arc.tono, context.context "
- "from arc, context "
- "where arc.file_id = ? and arc.context_id = context.id"
- )
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and arc.context_id in (" + ids_array + ")"
- data += self._query_context_ids
- for fromno, tono, context in con.execute(query, data):
- if context not in lineno_contexts_map[fromno]:
- lineno_contexts_map[fromno].append(context)
- if context not in lineno_contexts_map[tono]:
- lineno_contexts_map[tono].append(context)
- else:
- query = (
- "select l.numbits, c.context from line_bits l, context c "
- "where l.context_id = c.id "
- "and file_id = ?"
- )
- data = [file_id]
- if self._query_context_ids is not None:
- ids_array = ', '.join('?' * len(self._query_context_ids))
- query += " and l.context_id in (" + ids_array + ")"
- data += self._query_context_ids
- for numbits, context in con.execute(query, data):
- for lineno in numbits_to_nums(numbits):
- lineno_contexts_map[lineno].append(context)
- return lineno_contexts_map
-
- @classmethod
- def sys_info(cls):
- """Our information for `Coverage.sys_info`.
-
- Returns a list of (key, value) pairs.
-
- """
- with SqliteDb(":memory:", debug=NoDebugging()) as db:
- temp_store = [row[0] for row in db.execute("pragma temp_store")]
- compile_options = [row[0] for row in db.execute("pragma compile_options")]
-
- return [
- ('sqlite3_version', sqlite3.version),
- ('sqlite3_sqlite_version', sqlite3.sqlite_version),
- ('sqlite3_temp_store', temp_store),
- ('sqlite3_compile_options', compile_options),
- ]
-
-
-class SqliteDb(SimpleReprMixin):
- """A simple abstraction over a SQLite database.
-
- Use as a context manager, then you can use it like a
- :class:`python:sqlite3.Connection` object::
-
- with SqliteDb(filename, debug_control) as db:
- db.execute("insert into schema (version) values (?)", (SCHEMA_VERSION,))
-
- """
- def __init__(self, filename, debug):
- self.debug = debug if debug.should('sql') else None
- self.filename = filename
- self.nest = 0
- self.con = None
-
- def _connect(self):
- """Connect to the db and do universal initialization."""
- if self.con is not None:
- return
-
- # SQLite on Windows on py2 won't open a file if the filename argument
- # has non-ascii characters in it. Opening a relative file name avoids
- # a problem if the current directory has non-ascii.
- filename = self.filename
- if env.WINDOWS and env.PY2:
- try:
- filename = os.path.relpath(self.filename)
- except ValueError:
- # ValueError can be raised under Windows when os.getcwd() returns a
- # folder from a different drive than the drive of self.filename in
- # which case we keep the original value of self.filename unchanged,
- # hoping that we won't face the non-ascii directory problem.
- pass
-
- # It can happen that Python switches threads while the tracer writes
- # data. The second thread will also try to write to the data,
- # effectively causing a nested context. However, given the idempotent
- # nature of the tracer operations, sharing a connection among threads
- # is not a problem.
- if self.debug:
- self.debug.write("Connecting to {!r}".format(self.filename))
- self.con = sqlite3.connect(filename, check_same_thread=False)
- self.con.create_function('REGEXP', 2, _regexp)
-
- # This pragma makes writing faster. It disables rollbacks, but we never need them.
- # PyPy needs the .close() calls here, or sqlite gets twisted up:
- # https://bitbucket.org/pypy/pypy/issues/2872/default-isolation-mode-is-different-on
- self.execute("pragma journal_mode=off").close()
- # This pragma makes writing faster.
- self.execute("pragma synchronous=off").close()
-
- def close(self):
- """If needed, close the connection."""
- if self.con is not None and self.filename != ":memory:":
- self.con.close()
- self.con = None
-
- def __enter__(self):
- if self.nest == 0:
- self._connect()
- self.con.__enter__()
- self.nest += 1
- return self
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.nest -= 1
- if self.nest == 0:
- try:
- self.con.__exit__(exc_type, exc_value, traceback)
- self.close()
- except Exception as exc:
- if self.debug:
- self.debug.write("EXCEPTION from __exit__: {}".format(exc))
- raise
-
- def execute(self, sql, parameters=()):
- """Same as :meth:`python:sqlite3.Connection.execute`."""
- if self.debug:
- tail = " with {!r}".format(parameters) if parameters else ""
- self.debug.write("Executing {!r}{}".format(sql, tail))
- try:
- try:
- return self.con.execute(sql, parameters)
- except Exception:
- # In some cases, an error might happen that isn't really an
- # error. Try again immediately.
- # https://github.com/nedbat/coveragepy/issues/1010
- return self.con.execute(sql, parameters)
- except sqlite3.Error as exc:
- msg = str(exc)
- try:
- # `execute` is the first thing we do with the database, so try
- # hard to provide useful hints if something goes wrong now.
- with open(self.filename, "rb") as bad_file:
- cov4_sig = b"!coverage.py: This is a private format"
- if bad_file.read(len(cov4_sig)) == cov4_sig:
- msg = (
- "Looks like a coverage 4.x data file. "
- "Are you mixing versions of coverage?"
- )
- except Exception:
- pass
- if self.debug:
- self.debug.write("EXCEPTION from execute: {}".format(msg))
- raise CoverageException("Couldn't use data file {!r}: {}".format(self.filename, msg))
-
- def execute_one(self, sql, parameters=()):
- """Execute a statement and return the one row that results.
-
- This is like execute(sql, parameters).fetchone(), except it is
- correct in reading the entire result set. This will raise an
- exception if more than one row results.
-
- Returns a row, or None if there were no rows.
- """
- rows = list(self.execute(sql, parameters))
- if len(rows) == 0:
- return None
- elif len(rows) == 1:
- return rows[0]
- else:
- raise CoverageException("Sql {!r} shouldn't return {} rows".format(sql, len(rows)))
-
- def executemany(self, sql, data):
- """Same as :meth:`python:sqlite3.Connection.executemany`."""
- if self.debug:
- data = list(data)
- self.debug.write("Executing many {!r} with {} rows".format(sql, len(data)))
- return self.con.executemany(sql, data)
-
- def executescript(self, script):
- """Same as :meth:`python:sqlite3.Connection.executescript`."""
- if self.debug:
- self.debug.write("Executing script with {} chars: {}".format(
- len(script), clipped_repr(script, 100),
- ))
- self.con.executescript(script)
-
- def dump(self):
- """Return a multi-line string, the SQL dump of the database."""
- return "\n".join(self.con.iterdump())
-
-
-def _regexp(text, pattern):
- """A regexp function for SQLite."""
- return re.search(text, pattern) is not None
diff --git a/contrib/python/coverage/py3/coverage/summary.py b/contrib/python/coverage/py3/coverage/summary.py
deleted file mode 100644
index 65f8047006..0000000000
--- a/contrib/python/coverage/py3/coverage/summary.py
+++ /dev/null
@@ -1,152 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""Summary reporting"""
-
-import sys
-
-from coverage import env
-from coverage.report import get_analysis_to_report
-from coverage.results import Numbers
-from coverage.misc import CoverageException, output_encoding
-
-
-class SummaryReporter(object):
- """A reporter for writing the summary report."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
- self.branches = coverage.get_data().has_arcs()
- self.outfile = None
- self.fr_analysis = []
- self.skipped_count = 0
- self.empty_count = 0
- self.total = Numbers()
- self.fmt_err = u"%s %s: %s"
-
- def writeout(self, line):
- """Write a line to the output, adding a newline."""
- if env.PY2:
- line = line.encode(output_encoding())
- self.outfile.write(line.rstrip())
- self.outfile.write("\n")
-
- def report(self, morfs, outfile=None):
- """Writes a report summarizing coverage statistics per module.
-
- `outfile` is a file object to write the summary to. It must be opened
- for native strings (bytes on Python 2, Unicode on Python 3).
-
- """
- self.outfile = outfile or sys.stdout
-
- self.coverage.get_data().set_query_contexts(self.config.report_contexts)
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.report_one_file(fr, analysis)
-
- # Prepare the formatting strings, header, and column sorting.
- max_name = max([len(fr.relative_filename()) for (fr, analysis) in self.fr_analysis] + [5])
- fmt_name = u"%%- %ds " % max_name
- fmt_skip_covered = u"\n%s file%s skipped due to complete coverage."
- fmt_skip_empty = u"\n%s empty file%s skipped."
-
- header = (fmt_name % "Name") + u" Stmts Miss"
- fmt_coverage = fmt_name + u"%6d %6d"
- if self.branches:
- header += u" Branch BrPart"
- fmt_coverage += u" %6d %6d"
- width100 = Numbers.pc_str_width()
- header += u"%*s" % (width100+4, "Cover")
- fmt_coverage += u"%%%ds%%%%" % (width100+3,)
- if self.config.show_missing:
- header += u" Missing"
- fmt_coverage += u" %s"
- rule = u"-" * len(header)
-
- column_order = dict(name=0, stmts=1, miss=2, cover=-1)
- if self.branches:
- column_order.update(dict(branch=3, brpart=4))
-
- # Write the header
- self.writeout(header)
- self.writeout(rule)
-
- # `lines` is a list of pairs, (line text, line values). The line text
- # is a string that will be printed, and line values is a tuple of
- # sortable values.
- lines = []
-
- for (fr, analysis) in self.fr_analysis:
- nums = analysis.numbers
-
- args = (fr.relative_filename(), nums.n_statements, nums.n_missing)
- if self.branches:
- args += (nums.n_branches, nums.n_partial_branches)
- args += (nums.pc_covered_str,)
- if self.config.show_missing:
- args += (analysis.missing_formatted(branches=True),)
- text = fmt_coverage % args
- # Add numeric percent coverage so that sorting makes sense.
- args += (nums.pc_covered,)
- lines.append((text, args))
-
- # Sort the lines and write them out.
- if getattr(self.config, 'sort', None):
- sort_option = self.config.sort.lower()
- reverse = False
- if sort_option[0] == '-':
- reverse = True
- sort_option = sort_option[1:]
- elif sort_option[0] == '+':
- sort_option = sort_option[1:]
-
- position = column_order.get(sort_option)
- if position is None:
- raise CoverageException("Invalid sorting option: {!r}".format(self.config.sort))
- lines.sort(key=lambda l: (l[1][position], l[0]), reverse=reverse)
-
- for line in lines:
- self.writeout(line[0])
-
- # Write a TOTAL line if we had at least one file.
- if self.total.n_files > 0:
- self.writeout(rule)
- args = ("TOTAL", self.total.n_statements, self.total.n_missing)
- if self.branches:
- args += (self.total.n_branches, self.total.n_partial_branches)
- args += (self.total.pc_covered_str,)
- if self.config.show_missing:
- args += ("",)
- self.writeout(fmt_coverage % args)
-
- # Write other final lines.
- if not self.total.n_files and not self.skipped_count:
- raise CoverageException("No data to report.")
-
- if self.config.skip_covered and self.skipped_count:
- self.writeout(
- fmt_skip_covered % (self.skipped_count, 's' if self.skipped_count > 1 else '')
- )
- if self.config.skip_empty and self.empty_count:
- self.writeout(
- fmt_skip_empty % (self.empty_count, 's' if self.empty_count > 1 else '')
- )
-
- return self.total.n_statements and self.total.pc_covered
-
- def report_one_file(self, fr, analysis):
- """Report on just one file, the callback from report()."""
- nums = analysis.numbers
- self.total += nums
-
- no_missing_lines = (nums.n_missing == 0)
- no_missing_branches = (nums.n_partial_branches == 0)
- if self.config.skip_covered and no_missing_lines and no_missing_branches:
- # Don't report on 100% files.
- self.skipped_count += 1
- elif self.config.skip_empty and nums.n_statements == 0:
- # Don't report on empty files.
- self.empty_count += 1
- else:
- self.fr_analysis.append((fr, analysis))
diff --git a/contrib/python/coverage/py3/coverage/templite.py b/contrib/python/coverage/py3/coverage/templite.py
deleted file mode 100644
index 7d4024e0af..0000000000
--- a/contrib/python/coverage/py3/coverage/templite.py
+++ /dev/null
@@ -1,302 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""A simple Python template renderer, for a nano-subset of Django syntax.
-
-For a detailed discussion of this code, see this chapter from 500 Lines:
-http://aosabook.org/en/500L/a-template-engine.html
-
-"""
-
-# Coincidentally named the same as http://code.activestate.com/recipes/496702/
-
-import re
-
-from coverage import env
-
-
-class TempliteSyntaxError(ValueError):
- """Raised when a template has a syntax error."""
- pass
-
-
-class TempliteValueError(ValueError):
- """Raised when an expression won't evaluate in a template."""
- pass
-
-
-class CodeBuilder(object):
- """Build source code conveniently."""
-
- def __init__(self, indent=0):
- self.code = []
- self.indent_level = indent
-
- def __str__(self):
- return "".join(str(c) for c in self.code)
-
- def add_line(self, line):
- """Add a line of source to the code.
-
- Indentation and newline will be added for you, don't provide them.
-
- """
- self.code.extend([" " * self.indent_level, line, "\n"])
-
- def add_section(self):
- """Add a section, a sub-CodeBuilder."""
- section = CodeBuilder(self.indent_level)
- self.code.append(section)
- return section
-
- INDENT_STEP = 4 # PEP8 says so!
-
- def indent(self):
- """Increase the current indent for following lines."""
- self.indent_level += self.INDENT_STEP
-
- def dedent(self):
- """Decrease the current indent for following lines."""
- self.indent_level -= self.INDENT_STEP
-
- def get_globals(self):
- """Execute the code, and return a dict of globals it defines."""
- # A check that the caller really finished all the blocks they started.
- assert self.indent_level == 0
- # Get the Python source as a single string.
- python_source = str(self)
- # Execute the source, defining globals, and return them.
- global_namespace = {}
- exec(python_source, global_namespace)
- return global_namespace
-
-
-class Templite(object):
- """A simple template renderer, for a nano-subset of Django syntax.
-
- Supported constructs are extended variable access::
-
- {{var.modifier.modifier|filter|filter}}
-
- loops::
-
- {% for var in list %}...{% endfor %}
-
- and ifs::
-
- {% if var %}...{% endif %}
-
- Comments are within curly-hash markers::
-
- {# This will be ignored #}
-
- Lines between `{% joined %}` and `{% endjoined %}` will have lines stripped
- and joined. Be careful, this could join words together!
-
- Any of these constructs can have a hyphen at the end (`-}}`, `-%}`, `-#}`),
- which will collapse the whitespace following the tag.
-
- Construct a Templite with the template text, then use `render` against a
- dictionary context to create a finished string::
-
- templite = Templite('''
- <h1>Hello {{name|upper}}!</h1>
- {% for topic in topics %}
- <p>You are interested in {{topic}}.</p>
- {% endif %}
- ''',
- {'upper': str.upper},
- )
- text = templite.render({
- 'name': "Ned",
- 'topics': ['Python', 'Geometry', 'Juggling'],
- })
-
- """
- def __init__(self, text, *contexts):
- """Construct a Templite with the given `text`.
-
- `contexts` are dictionaries of values to use for future renderings.
- These are good for filters and global values.
-
- """
- self.context = {}
- for context in contexts:
- self.context.update(context)
-
- self.all_vars = set()
- self.loop_vars = set()
-
- # We construct a function in source form, then compile it and hold onto
- # it, and execute it to render the template.
- code = CodeBuilder()
-
- code.add_line("def render_function(context, do_dots):")
- code.indent()
- vars_code = code.add_section()
- code.add_line("result = []")
- code.add_line("append_result = result.append")
- code.add_line("extend_result = result.extend")
- if env.PY2:
- code.add_line("to_str = unicode")
- else:
- code.add_line("to_str = str")
-
- buffered = []
-
- def flush_output():
- """Force `buffered` to the code builder."""
- if len(buffered) == 1:
- code.add_line("append_result(%s)" % buffered[0])
- elif len(buffered) > 1:
- code.add_line("extend_result([%s])" % ", ".join(buffered))
- del buffered[:]
-
- ops_stack = []
-
- # Split the text to form a list of tokens.
- tokens = re.split(r"(?s)({{.*?}}|{%.*?%}|{#.*?#})", text)
-
- squash = in_joined = False
-
- for token in tokens:
- if token.startswith('{'):
- start, end = 2, -2
- squash = (token[-3] == '-')
- if squash:
- end = -3
-
- if token.startswith('{#'):
- # Comment: ignore it and move on.
- continue
- elif token.startswith('{{'):
- # An expression to evaluate.
- expr = self._expr_code(token[start:end].strip())
- buffered.append("to_str(%s)" % expr)
- else:
- # token.startswith('{%')
- # Action tag: split into words and parse further.
- flush_output()
-
- words = token[start:end].strip().split()
- if words[0] == 'if':
- # An if statement: evaluate the expression to determine if.
- if len(words) != 2:
- self._syntax_error("Don't understand if", token)
- ops_stack.append('if')
- code.add_line("if %s:" % self._expr_code(words[1]))
- code.indent()
- elif words[0] == 'for':
- # A loop: iterate over expression result.
- if len(words) != 4 or words[2] != 'in':
- self._syntax_error("Don't understand for", token)
- ops_stack.append('for')
- self._variable(words[1], self.loop_vars)
- code.add_line(
- "for c_%s in %s:" % (
- words[1],
- self._expr_code(words[3])
- )
- )
- code.indent()
- elif words[0] == 'joined':
- ops_stack.append('joined')
- in_joined = True
- elif words[0].startswith('end'):
- # Endsomething. Pop the ops stack.
- if len(words) != 1:
- self._syntax_error("Don't understand end", token)
- end_what = words[0][3:]
- if not ops_stack:
- self._syntax_error("Too many ends", token)
- start_what = ops_stack.pop()
- if start_what != end_what:
- self._syntax_error("Mismatched end tag", end_what)
- if end_what == 'joined':
- in_joined = False
- else:
- code.dedent()
- else:
- self._syntax_error("Don't understand tag", words[0])
- else:
- # Literal content. If it isn't empty, output it.
- if in_joined:
- token = re.sub(r"\s*\n\s*", "", token.strip())
- elif squash:
- token = token.lstrip()
- if token:
- buffered.append(repr(token))
-
- if ops_stack:
- self._syntax_error("Unmatched action tag", ops_stack[-1])
-
- flush_output()
-
- for var_name in self.all_vars - self.loop_vars:
- vars_code.add_line("c_%s = context[%r]" % (var_name, var_name))
-
- code.add_line('return "".join(result)')
- code.dedent()
- self._render_function = code.get_globals()['render_function']
-
- def _expr_code(self, expr):
- """Generate a Python expression for `expr`."""
- if "|" in expr:
- pipes = expr.split("|")
- code = self._expr_code(pipes[0])
- for func in pipes[1:]:
- self._variable(func, self.all_vars)
- code = "c_%s(%s)" % (func, code)
- elif "." in expr:
- dots = expr.split(".")
- code = self._expr_code(dots[0])
- args = ", ".join(repr(d) for d in dots[1:])
- code = "do_dots(%s, %s)" % (code, args)
- else:
- self._variable(expr, self.all_vars)
- code = "c_%s" % expr
- return code
-
- def _syntax_error(self, msg, thing):
- """Raise a syntax error using `msg`, and showing `thing`."""
- raise TempliteSyntaxError("%s: %r" % (msg, thing))
-
- def _variable(self, name, vars_set):
- """Track that `name` is used as a variable.
-
- Adds the name to `vars_set`, a set of variable names.
-
- Raises an syntax error if `name` is not a valid name.
-
- """
- if not re.match(r"[_a-zA-Z][_a-zA-Z0-9]*$", name):
- self._syntax_error("Not a valid name", name)
- vars_set.add(name)
-
- def render(self, context=None):
- """Render this template by applying it to `context`.
-
- `context` is a dictionary of values to use in this rendering.
-
- """
- # Make the complete context we'll use.
- render_context = dict(self.context)
- if context:
- render_context.update(context)
- return self._render_function(render_context, self._do_dots)
-
- def _do_dots(self, value, *dots):
- """Evaluate dotted expressions at run-time."""
- for dot in dots:
- try:
- value = getattr(value, dot)
- except AttributeError:
- try:
- value = value[dot]
- except (TypeError, KeyError):
- raise TempliteValueError(
- "Couldn't evaluate %r.%s" % (value, dot)
- )
- if callable(value):
- value = value()
- return value
diff --git a/contrib/python/coverage/py3/coverage/tomlconfig.py b/contrib/python/coverage/py3/coverage/tomlconfig.py
deleted file mode 100644
index 3ad581571c..0000000000
--- a/contrib/python/coverage/py3/coverage/tomlconfig.py
+++ /dev/null
@@ -1,168 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""TOML configuration support for coverage.py"""
-
-import io
-import os
-import re
-
-from coverage import env
-from coverage.backward import configparser, path_types
-from coverage.misc import CoverageException, substitute_variables
-
-# TOML support is an install-time extra option.
-try:
- import toml
-except ImportError: # pragma: not covered
- toml = None
-
-
-class TomlDecodeError(Exception):
- """An exception class that exists even when toml isn't installed."""
- pass
-
-
-class TomlConfigParser:
- """TOML file reading with the interface of HandyConfigParser."""
-
- # This class has the same interface as config.HandyConfigParser, no
- # need for docstrings.
- # pylint: disable=missing-function-docstring
-
- def __init__(self, our_file):
- self.our_file = our_file
- self.data = None
-
- def read(self, filenames):
- # RawConfigParser takes a filename or list of filenames, but we only
- # ever call this with a single filename.
- assert isinstance(filenames, path_types)
- filename = filenames
- if env.PYVERSION >= (3, 6):
- filename = os.fspath(filename)
-
- try:
- with io.open(filename, encoding='utf-8') as fp:
- toml_text = fp.read()
- except IOError:
- return []
- if toml:
- toml_text = substitute_variables(toml_text, os.environ)
- try:
- self.data = toml.loads(toml_text)
- except toml.TomlDecodeError as err:
- raise TomlDecodeError(*err.args)
- return [filename]
- else:
- has_toml = re.search(r"^\[tool\.coverage\.", toml_text, flags=re.MULTILINE)
- if self.our_file or has_toml:
- # Looks like they meant to read TOML, but we can't read it.
- msg = "Can't read {!r} without TOML support. Install with [toml] extra"
- raise CoverageException(msg.format(filename))
- return []
-
- def _get_section(self, section):
- """Get a section from the data.
-
- Arguments:
- section (str): A section name, which can be dotted.
-
- Returns:
- name (str): the actual name of the section that was found, if any,
- or None.
- data (str): the dict of data in the section, or None if not found.
-
- """
- prefixes = ["tool.coverage."]
- if self.our_file:
- prefixes.append("")
- for prefix in prefixes:
- real_section = prefix + section
- parts = real_section.split(".")
- try:
- data = self.data[parts[0]]
- for part in parts[1:]:
- data = data[part]
- except KeyError:
- continue
- break
- else:
- return None, None
- return real_section, data
-
- def _get(self, section, option):
- """Like .get, but returns the real section name and the value."""
- name, data = self._get_section(section)
- if data is None:
- raise configparser.NoSectionError(section)
- try:
- return name, data[option]
- except KeyError:
- raise configparser.NoOptionError(option, name)
-
- def has_option(self, section, option):
- _, data = self._get_section(section)
- if data is None:
- return False
- return option in data
-
- def has_section(self, section):
- name, _ = self._get_section(section)
- return name
-
- def options(self, section):
- _, data = self._get_section(section)
- if data is None:
- raise configparser.NoSectionError(section)
- return list(data.keys())
-
- def get_section(self, section):
- _, data = self._get_section(section)
- return data
-
- def get(self, section, option):
- _, value = self._get(section, option)
- return value
-
- def _check_type(self, section, option, value, type_, type_desc):
- if not isinstance(value, type_):
- raise ValueError(
- 'Option {!r} in section {!r} is not {}: {!r}'
- .format(option, section, type_desc, value)
- )
-
- def getboolean(self, section, option):
- name, value = self._get(section, option)
- self._check_type(name, option, value, bool, "a boolean")
- return value
-
- def getlist(self, section, option):
- name, values = self._get(section, option)
- self._check_type(name, option, values, list, "a list")
- return values
-
- def getregexlist(self, section, option):
- name, values = self._get(section, option)
- self._check_type(name, option, values, list, "a list")
- for value in values:
- value = value.strip()
- try:
- re.compile(value)
- except re.error as e:
- raise CoverageException(
- "Invalid [%s].%s value %r: %s" % (name, option, value, e)
- )
- return values
-
- def getint(self, section, option):
- name, value = self._get(section, option)
- self._check_type(name, option, value, int, "an integer")
- return value
-
- def getfloat(self, section, option):
- name, value = self._get(section, option)
- if isinstance(value, int):
- value = float(value)
- self._check_type(name, option, value, float, "a float")
- return value
diff --git a/contrib/python/coverage/py3/coverage/version.py b/contrib/python/coverage/py3/coverage/version.py
deleted file mode 100644
index d141a11da3..0000000000
--- a/contrib/python/coverage/py3/coverage/version.py
+++ /dev/null
@@ -1,33 +0,0 @@
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""The version and URL for coverage.py"""
-# This file is exec'ed in setup.py, don't import anything!
-
-# Same semantics as sys.version_info.
-version_info = (5, 5, 0, "final", 0)
-
-
-def _make_version(major, minor, micro, releaselevel, serial):
- """Create a readable version string from version_info tuple components."""
- assert releaselevel in ['alpha', 'beta', 'candidate', 'final']
- version = "%d.%d" % (major, minor)
- if micro:
- version += ".%d" % (micro,)
- if releaselevel != 'final':
- short = {'alpha': 'a', 'beta': 'b', 'candidate': 'rc'}[releaselevel]
- version += "%s%d" % (short, serial)
- return version
-
-
-def _make_url(major, minor, micro, releaselevel, serial):
- """Make the URL people should start at for this version of coverage.py."""
- url = "https://coverage.readthedocs.io"
- if releaselevel != 'final':
- # For pre-releases, use a version-specific URL.
- url += "/en/coverage-" + _make_version(major, minor, micro, releaselevel, serial)
- return url
-
-
-__version__ = _make_version(*version_info)
-__url__ = _make_url(*version_info)
diff --git a/contrib/python/coverage/py3/coverage/xmlreport.py b/contrib/python/coverage/py3/coverage/xmlreport.py
deleted file mode 100644
index 6d012ee692..0000000000
--- a/contrib/python/coverage/py3/coverage/xmlreport.py
+++ /dev/null
@@ -1,234 +0,0 @@
-# coding: utf-8
-# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
-# For details: https://github.com/nedbat/coveragepy/blob/master/NOTICE.txt
-
-"""XML reporting for coverage.py"""
-
-import os
-import os.path
-import sys
-import time
-import xml.dom.minidom
-
-from coverage import env
-from coverage import __url__, __version__, files
-from coverage.backward import iitems
-from coverage.misc import isolate_module
-from coverage.report import get_analysis_to_report
-
-os = isolate_module(os)
-
-
-DTD_URL = 'https://raw.githubusercontent.com/cobertura/web/master/htdocs/xml/coverage-04.dtd'
-
-
-def rate(hit, num):
- """Return the fraction of `hit`/`num`, as a string."""
- if num == 0:
- return "1"
- else:
- return "%.4g" % (float(hit) / num)
-
-
-class XmlReporter(object):
- """A reporter for writing Cobertura-style XML coverage results."""
-
- def __init__(self, coverage):
- self.coverage = coverage
- self.config = self.coverage.config
-
- self.source_paths = set()
- if self.config.source:
- for src in self.config.source:
- if os.path.exists(src):
- if not self.config.relative_files:
- src = files.canonical_filename(src)
- self.source_paths.add(src)
- self.packages = {}
- self.xml_out = None
-
- def report(self, morfs, outfile=None):
- """Generate a Cobertura-compatible XML report for `morfs`.
-
- `morfs` is a list of modules or file names.
-
- `outfile` is a file object to write the XML to.
-
- """
- # Initial setup.
- outfile = outfile or sys.stdout
- has_arcs = self.coverage.get_data().has_arcs()
-
- # Create the DOM that will store the data.
- impl = xml.dom.minidom.getDOMImplementation()
- self.xml_out = impl.createDocument(None, "coverage", None)
-
- # Write header stuff.
- xcoverage = self.xml_out.documentElement
- xcoverage.setAttribute("version", __version__)
- xcoverage.setAttribute("timestamp", str(int(time.time()*1000)))
- xcoverage.appendChild(self.xml_out.createComment(
- " Generated by coverage.py: %s " % __url__
- ))
- xcoverage.appendChild(self.xml_out.createComment(" Based on %s " % DTD_URL))
-
- # Call xml_file for each file in the data.
- for fr, analysis in get_analysis_to_report(self.coverage, morfs):
- self.xml_file(fr, analysis, has_arcs)
-
- xsources = self.xml_out.createElement("sources")
- xcoverage.appendChild(xsources)
-
- # Populate the XML DOM with the source info.
- for path in sorted(self.source_paths):
- xsource = self.xml_out.createElement("source")
- xsources.appendChild(xsource)
- txt = self.xml_out.createTextNode(path)
- xsource.appendChild(txt)
-
- lnum_tot, lhits_tot = 0, 0
- bnum_tot, bhits_tot = 0, 0
-
- xpackages = self.xml_out.createElement("packages")
- xcoverage.appendChild(xpackages)
-
- # Populate the XML DOM with the package info.
- for pkg_name, pkg_data in sorted(iitems(self.packages)):
- class_elts, lhits, lnum, bhits, bnum = pkg_data
- xpackage = self.xml_out.createElement("package")
- xpackages.appendChild(xpackage)
- xclasses = self.xml_out.createElement("classes")
- xpackage.appendChild(xclasses)
- for _, class_elt in sorted(iitems(class_elts)):
- xclasses.appendChild(class_elt)
- xpackage.setAttribute("name", pkg_name.replace(os.sep, '.'))
- xpackage.setAttribute("line-rate", rate(lhits, lnum))
- if has_arcs:
- branch_rate = rate(bhits, bnum)
- else:
- branch_rate = "0"
- xpackage.setAttribute("branch-rate", branch_rate)
- xpackage.setAttribute("complexity", "0")
-
- lnum_tot += lnum
- lhits_tot += lhits
- bnum_tot += bnum
- bhits_tot += bhits
-
- xcoverage.setAttribute("lines-valid", str(lnum_tot))
- xcoverage.setAttribute("lines-covered", str(lhits_tot))
- xcoverage.setAttribute("line-rate", rate(lhits_tot, lnum_tot))
- if has_arcs:
- xcoverage.setAttribute("branches-valid", str(bnum_tot))
- xcoverage.setAttribute("branches-covered", str(bhits_tot))
- xcoverage.setAttribute("branch-rate", rate(bhits_tot, bnum_tot))
- else:
- xcoverage.setAttribute("branches-covered", "0")
- xcoverage.setAttribute("branches-valid", "0")
- xcoverage.setAttribute("branch-rate", "0")
- xcoverage.setAttribute("complexity", "0")
-
- # Write the output file.
- outfile.write(serialize_xml(self.xml_out))
-
- # Return the total percentage.
- denom = lnum_tot + bnum_tot
- if denom == 0:
- pct = 0.0
- else:
- pct = 100.0 * (lhits_tot + bhits_tot) / denom
- return pct
-
- def xml_file(self, fr, analysis, has_arcs):
- """Add to the XML report for a single file."""
-
- if self.config.skip_empty:
- if analysis.numbers.n_statements == 0:
- return
-
- # Create the 'lines' and 'package' XML elements, which
- # are populated later. Note that a package == a directory.
- filename = fr.filename.replace("\\", "/")
- for source_path in self.source_paths:
- source_path = files.canonical_filename(source_path)
- if filename.startswith(source_path.replace("\\", "/") + "/"):
- rel_name = filename[len(source_path)+1:]
- break
- else:
- rel_name = fr.relative_filename()
- self.source_paths.add(fr.filename[:-len(rel_name)].rstrip(r"\/"))
-
- dirname = os.path.dirname(rel_name) or u"."
- dirname = "/".join(dirname.split("/")[:self.config.xml_package_depth])
- package_name = dirname.replace("/", ".")
-
- package = self.packages.setdefault(package_name, [{}, 0, 0, 0, 0])
-
- xclass = self.xml_out.createElement("class")
-
- xclass.appendChild(self.xml_out.createElement("methods"))
-
- xlines = self.xml_out.createElement("lines")
- xclass.appendChild(xlines)
-
- xclass.setAttribute("name", os.path.relpath(rel_name, dirname))
- xclass.setAttribute("filename", rel_name.replace("\\", "/"))
- xclass.setAttribute("complexity", "0")
-
- branch_stats = analysis.branch_stats()
- missing_branch_arcs = analysis.missing_branch_arcs()
-
- # For each statement, create an XML 'line' element.
- for line in sorted(analysis.statements):
- xline = self.xml_out.createElement("line")
- xline.setAttribute("number", str(line))
-
- # Q: can we get info about the number of times a statement is
- # executed? If so, that should be recorded here.
- xline.setAttribute("hits", str(int(line not in analysis.missing)))
-
- if has_arcs:
- if line in branch_stats:
- total, taken = branch_stats[line]
- xline.setAttribute("branch", "true")
- xline.setAttribute(
- "condition-coverage",
- "%d%% (%d/%d)" % (100*taken//total, taken, total)
- )
- if line in missing_branch_arcs:
- annlines = ["exit" if b < 0 else str(b) for b in missing_branch_arcs[line]]
- xline.setAttribute("missing-branches", ",".join(annlines))
- xlines.appendChild(xline)
-
- class_lines = len(analysis.statements)
- class_hits = class_lines - len(analysis.missing)
-
- if has_arcs:
- class_branches = sum(t for t, k in branch_stats.values())
- missing_branches = sum(t - k for t, k in branch_stats.values())
- class_br_hits = class_branches - missing_branches
- else:
- class_branches = 0.0
- class_br_hits = 0.0
-
- # Finalize the statistics that are collected in the XML DOM.
- xclass.setAttribute("line-rate", rate(class_hits, class_lines))
- if has_arcs:
- branch_rate = rate(class_br_hits, class_branches)
- else:
- branch_rate = "0"
- xclass.setAttribute("branch-rate", branch_rate)
-
- package[0][rel_name] = xclass
- package[1] += class_hits
- package[2] += class_lines
- package[3] += class_br_hits
- package[4] += class_branches
-
-
-def serialize_xml(dom):
- """Serialize a minidom node to XML."""
- out = dom.toprettyxml()
- if env.PY2:
- out = out.encode("utf8")
- return out
diff --git a/contrib/python/coverage/py3/ya.make b/contrib/python/coverage/py3/ya.make
deleted file mode 100644
index 76625b48eb..0000000000
--- a/contrib/python/coverage/py3/ya.make
+++ /dev/null
@@ -1,98 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(5.5)
-
-LICENSE(Apache-2.0)
-
-PEERDIR(
- contrib/python/coverage/plugins
- library/python/resource
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_LINT()
-
-NO_CHECK_IMPORTS(
- coverage.fullcoverage.encodings
-)
-
-SRCS(
- coverage/ctracer/datastack.c
- coverage/ctracer/filedisp.c
- coverage/ctracer/module.c
- coverage/ctracer/tracer.c
-)
-
-PY_REGISTER(
- coverage.tracer
-)
-
-PY_SRCS(
- TOP_LEVEL
- coverage/__init__.py
- coverage/__main__.py
- coverage/annotate.py
- coverage/backward.py
- coverage/bytecode.py
- coverage/cmdline.py
- coverage/collector.py
- coverage/config.py
- coverage/context.py
- coverage/control.py
- coverage/data.py
- coverage/debug.py
- coverage/disposition.py
- coverage/env.py
- coverage/execfile.py
- coverage/files.py
- coverage/fullcoverage/encodings.py
- coverage/html.py
- coverage/inorout.py
- coverage/jsonreport.py
- coverage/misc.py
- coverage/multiproc.py
- coverage/numbits.py
- coverage/parser.py
- coverage/phystokens.py
- coverage/plugin.py
- coverage/plugin_support.py
- coverage/python.py
- coverage/pytracer.py
- coverage/report.py
- coverage/results.py
- coverage/sqldata.py
- coverage/summary.py
- coverage/templite.py
- coverage/tomlconfig.py
- coverage/version.py
- coverage/xmlreport.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/coverage/py3/
- .dist-info/METADATA
- .dist-info/entry_points.txt
- .dist-info/top_level.txt
- coverage/htmlfiles/coverage_html.js
- coverage/htmlfiles/favicon_32.png
- coverage/htmlfiles/index.html
- coverage/htmlfiles/jquery.ba-throttle-debounce.min.js
- coverage/htmlfiles/jquery.hotkeys.js
- coverage/htmlfiles/jquery.isonscreen.js
- coverage/htmlfiles/jquery.min.js
- coverage/htmlfiles/jquery.tablesorter.min.js
- coverage/htmlfiles/keybd_closed.png
- coverage/htmlfiles/keybd_open.png
- coverage/htmlfiles/pyfile.html
- coverage/htmlfiles/style.css
- coverage/htmlfiles/style.scss
-)
-
-END()
-
-RECURSE(
- bin
-)
diff --git a/contrib/python/coverage/ya.make b/contrib/python/coverage/ya.make
deleted file mode 100644
index f7202723cd..0000000000
--- a/contrib/python/coverage/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/coverage/py2)
-ELSE()
- PEERDIR(contrib/python/coverage/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- plugins
- py2
- py3
-)
diff --git a/contrib/python/diff-match-patch/py2/.dist-info/METADATA b/contrib/python/diff-match-patch/py2/.dist-info/METADATA
deleted file mode 100644
index ea1c571881..0000000000
--- a/contrib/python/diff-match-patch/py2/.dist-info/METADATA
+++ /dev/null
@@ -1,112 +0,0 @@
-Metadata-Version: 2.1
-Name: diff-match-patch
-Version: 20200713
-Summary: Repackaging of Google's Diff Match and Patch libraries. Offers robust algorithms to perform the operations required for synchronizing plain text.
-Home-page: https://github.com/diff-match-patch-python/diff-match-patch
-Author: Neil Fraser
-Author-email: fraser@google.com
-Maintainer: John Reese
-Maintainer-email: john@noswap.com
-License: Apache
-Platform: UNKNOWN
-Classifier: Development Status :: 6 - Mature
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.4
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Text Processing
-Requires-Python: >=2.7
-Description-Content-Type: text/markdown
-
-# diff-match-patch
-
-Google's [Diff Match and Patch][DMP] library, packaged for modern Python.
-
-[![build status](https://travis-ci.org/diff-match-patch-python/diff-match-patch.svg?branch=master)](https://travis-ci.org/diff-match-patch-python/diff-match-patch)
-[![version](https://img.shields.io/pypi/v/diff-match-patch.svg)](https://pypi.org/project/diff-match-patch)
-[![license](https://img.shields.io/pypi/l/diff-match-patch.svg)](https://github.com/diff-match-patch-python/diff-match-patch/blob/master/LICENSE)
-
-## Install
-
-diff-match-patch is supported on Python 2.7 or Python 3.4 or newer.
-You can install it from PyPI:
-
-```shell
-python -m pip install diff-match-patch
-```
-
-## Usage
-
-Generating a patchset (analogous to unified diff) between two texts:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_make(text1, text2)
-diff = dmp.patch_toText(patches)
-```
-
-Applying a patchset to a text can then be done with:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_fromText(diff)
-new_text, _ = dmp.patch_apply(patches, text)
-```
-
-## Original README
-The Diff Match and Patch libraries offer robust algorithms to perform the
-operations required for synchronizing plain text.
-
-1. Diff:
- * Compare two blocks of plain text and efficiently return a list of differences.
- * [Diff Demo](https://neil.fraser.name/software/diff_match_patch/demos/diff.html)
-2. Match:
- * Given a search string, find its best fuzzy match in a block of plain text. Weighted for both accuracy and location.
- * [Match Demo](https://neil.fraser.name/software/diff_match_patch/demos/match.html)
-3. Patch:
- * Apply a list of patches onto plain text. Use best-effort to apply patch even when the underlying text doesn't match.
- * [Patch Demo](https://neil.fraser.name/software/diff_match_patch/demos/patch.html)
-
-Originally built in 2006 to power Google Docs, this library is now available in C++, C#, Dart, Java, JavaScript, Lua, Objective C, and Python.
-
-### Reference
-
-* [API](https://github.com/google/diff-match-patch/wiki/API) - Common API across all languages.
-* [Line or Word Diffs](https://github.com/google/diff-match-patch/wiki/Line-or-Word-Diffs) - Less detailed diffs.
-* [Plain Text vs. Structured Content](https://github.com/google/diff-match-patch/wiki/Plain-Text-vs.-Structured-Content) - How to deal with data like XML.
-* [Unidiff](https://github.com/google/diff-match-patch/wiki/Unidiff) - The patch serialization format.
-* [Support](https://groups.google.com/forum/#!forum/diff-match-patch) - Newsgroup for developers.
-
-### Languages
-Although each language port of Diff Match Patch uses the same API, there are some language-specific notes.
-
-* [C++](https://github.com/google/diff-match-patch/wiki/Language:-Cpp)
-* [C#](https://github.com/google/diff-match-patch/wiki/Language:-C%23)
-* [Dart](https://github.com/google/diff-match-patch/wiki/Language:-Dart)
-* [Java](https://github.com/google/diff-match-patch/wiki/Language:-Java)
-* [JavaScript](https://github.com/google/diff-match-patch/wiki/Language:-JavaScript)
-* [Lua](https://github.com/google/diff-match-patch/wiki/Language:-Lua)
-* [Objective-C](https://github.com/google/diff-match-patch/wiki/Language:-Objective-C)
-* [Python](https://github.com/google/diff-match-patch/wiki/Language:-Python)
-
-A standardized speed test tracks the [relative performance of diffs](https://docs.google.com/spreadsheets/d/1zpZccuBpjMZTvL1nGDMKJc7rWL_m_drF4XKOJvB27Kc/edit#gid=0) in each language.
-
-### Algorithms
-This library implements [Myer's diff algorithm](https://neil.fraser.name/writing/diff/myers.pdf) which is generally considered to be the best general-purpose diff. A layer of [pre-diff speedups and post-diff cleanups](https://neil.fraser.name/writing/diff/) surround the diff algorithm, improving both performance and output quality.
-
-This library also implements a [Bitap matching algorithm](https://neil.fraser.name/writing/patch/bitap.ps) at the heart of a [flexible matching and patching strategy](https://neil.fraser.name/writing/patch/).
-
-[DMP]: https://github.com/google/diff-match-patch
-[API]: https://github.com/google/diff-match-patch/wiki/API
-
-
diff --git a/contrib/python/diff-match-patch/py2/.dist-info/top_level.txt b/contrib/python/diff-match-patch/py2/.dist-info/top_level.txt
deleted file mode 100644
index 63904d71d9..0000000000
--- a/contrib/python/diff-match-patch/py2/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-diff_match_patch
diff --git a/contrib/python/diff-match-patch/py2/AUTHORS b/contrib/python/diff-match-patch/py2/AUTHORS
deleted file mode 100644
index c82809e726..0000000000
--- a/contrib/python/diff-match-patch/py2/AUTHORS
+++ /dev/null
@@ -1,10 +0,0 @@
-# Below is a list of people and organizations that have contributed
-# to the Diff Match Patch project.
-
-Google Inc.
-
-Duncan Cross <duncan.cross@gmail.com> (Lua port)
-Jan Weiß <jan@geheimwerk.de> (Objective C port)
-Matthaeus G. Chajdas <anteru@developer.shelter13.net> (C# port)
-Mike Slemmer <mikeslemmer@gmail.com> (C++ port)
-
diff --git a/contrib/python/diff-match-patch/py2/LICENSE b/contrib/python/diff-match-patch/py2/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/contrib/python/diff-match-patch/py2/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/contrib/python/diff-match-patch/py2/README.md b/contrib/python/diff-match-patch/py2/README.md
deleted file mode 100644
index 54f17337a7..0000000000
--- a/contrib/python/diff-match-patch/py2/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# diff-match-patch
-
-Google's [Diff Match and Patch][DMP] library, packaged for modern Python.
-
-[![build status](https://travis-ci.org/diff-match-patch-python/diff-match-patch.svg?branch=master)](https://travis-ci.org/diff-match-patch-python/diff-match-patch)
-[![version](https://img.shields.io/pypi/v/diff-match-patch.svg)](https://pypi.org/project/diff-match-patch)
-[![license](https://img.shields.io/pypi/l/diff-match-patch.svg)](https://github.com/diff-match-patch-python/diff-match-patch/blob/master/LICENSE)
-
-## Install
-
-diff-match-patch is supported on Python 2.7 or Python 3.4 or newer.
-You can install it from PyPI:
-
-```shell
-python -m pip install diff-match-patch
-```
-
-## Usage
-
-Generating a patchset (analogous to unified diff) between two texts:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_make(text1, text2)
-diff = dmp.patch_toText(patches)
-```
-
-Applying a patchset to a text can then be done with:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_fromText(diff)
-new_text, _ = dmp.patch_apply(patches, text)
-```
-
-## Original README
-The Diff Match and Patch libraries offer robust algorithms to perform the
-operations required for synchronizing plain text.
-
-1. Diff:
- * Compare two blocks of plain text and efficiently return a list of differences.
- * [Diff Demo](https://neil.fraser.name/software/diff_match_patch/demos/diff.html)
-2. Match:
- * Given a search string, find its best fuzzy match in a block of plain text. Weighted for both accuracy and location.
- * [Match Demo](https://neil.fraser.name/software/diff_match_patch/demos/match.html)
-3. Patch:
- * Apply a list of patches onto plain text. Use best-effort to apply patch even when the underlying text doesn't match.
- * [Patch Demo](https://neil.fraser.name/software/diff_match_patch/demos/patch.html)
-
-Originally built in 2006 to power Google Docs, this library is now available in C++, C#, Dart, Java, JavaScript, Lua, Objective C, and Python.
-
-### Reference
-
-* [API](https://github.com/google/diff-match-patch/wiki/API) - Common API across all languages.
-* [Line or Word Diffs](https://github.com/google/diff-match-patch/wiki/Line-or-Word-Diffs) - Less detailed diffs.
-* [Plain Text vs. Structured Content](https://github.com/google/diff-match-patch/wiki/Plain-Text-vs.-Structured-Content) - How to deal with data like XML.
-* [Unidiff](https://github.com/google/diff-match-patch/wiki/Unidiff) - The patch serialization format.
-* [Support](https://groups.google.com/forum/#!forum/diff-match-patch) - Newsgroup for developers.
-
-### Languages
-Although each language port of Diff Match Patch uses the same API, there are some language-specific notes.
-
-* [C++](https://github.com/google/diff-match-patch/wiki/Language:-Cpp)
-* [C#](https://github.com/google/diff-match-patch/wiki/Language:-C%23)
-* [Dart](https://github.com/google/diff-match-patch/wiki/Language:-Dart)
-* [Java](https://github.com/google/diff-match-patch/wiki/Language:-Java)
-* [JavaScript](https://github.com/google/diff-match-patch/wiki/Language:-JavaScript)
-* [Lua](https://github.com/google/diff-match-patch/wiki/Language:-Lua)
-* [Objective-C](https://github.com/google/diff-match-patch/wiki/Language:-Objective-C)
-* [Python](https://github.com/google/diff-match-patch/wiki/Language:-Python)
-
-A standardized speed test tracks the [relative performance of diffs](https://docs.google.com/spreadsheets/d/1zpZccuBpjMZTvL1nGDMKJc7rWL_m_drF4XKOJvB27Kc/edit#gid=0) in each language.
-
-### Algorithms
-This library implements [Myer's diff algorithm](https://neil.fraser.name/writing/diff/myers.pdf) which is generally considered to be the best general-purpose diff. A layer of [pre-diff speedups and post-diff cleanups](https://neil.fraser.name/writing/diff/) surround the diff algorithm, improving both performance and output quality.
-
-This library also implements a [Bitap matching algorithm](https://neil.fraser.name/writing/patch/bitap.ps) at the heart of a [flexible matching and patching strategy](https://neil.fraser.name/writing/patch/).
-
-[DMP]: https://github.com/google/diff-match-patch
-[API]: https://github.com/google/diff-match-patch/wiki/API
diff --git a/contrib/python/diff-match-patch/py2/diff_match_patch/__init__.py b/contrib/python/diff-match-patch/py2/diff_match_patch/__init__.py
deleted file mode 100644
index 37e762a3b1..0000000000
--- a/contrib/python/diff-match-patch/py2/diff_match_patch/__init__.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import sys
-
-if sys.version_info >= (3, 0):
- from .diff_match_patch import __author__, __doc__, diff_match_patch, patch_obj
-else:
- from .diff_match_patch_py2 import __author__, __doc__, diff_match_patch, patch_obj
-
-__version__ = "20200713"
-__packager__ = "John Reese (john@noswap.com)"
diff --git a/contrib/python/diff-match-patch/py2/diff_match_patch/diff_match_patch_py2.py b/contrib/python/diff-match-patch/py2/diff_match_patch/diff_match_patch_py2.py
deleted file mode 100644
index 5bfe9688a2..0000000000
--- a/contrib/python/diff-match-patch/py2/diff_match_patch/diff_match_patch_py2.py
+++ /dev/null
@@ -1,2037 +0,0 @@
-#!/usr/bin/python2.4
-
-from __future__ import division
-
-"""Diff Match and Patch
-Copyright 2018 The diff-match-patch Authors.
-https://github.com/google/diff-match-patch
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-"""Functions for diff, match and patch.
-
-Computes the difference between two texts to create a patch.
-Applies the patch onto another text, allowing for errors.
-"""
-
-__author__ = "fraser@google.com (Neil Fraser)"
-
-import re
-import sys
-import time
-import urllib
-
-
-class diff_match_patch:
- """Class containing the diff, match and patch methods.
-
- Also contains the behaviour settings.
- """
-
- def __init__(self):
- """Inits a diff_match_patch object with default settings.
- Redefine these in your program to override the defaults.
- """
-
- # Number of seconds to map a diff before giving up (0 for infinity).
- self.Diff_Timeout = 1.0
- # Cost of an empty edit operation in terms of edit characters.
- self.Diff_EditCost = 4
- # At what point is no match declared (0.0 = perfection, 1.0 = very loose).
- self.Match_Threshold = 0.5
- # How far to search for a match (0 = exact location, 1000+ = broad match).
- # A match this many characters away from the expected location will add
- # 1.0 to the score (0.0 is a perfect match).
- self.Match_Distance = 1000
- # When deleting a large block of text (over ~64 characters), how close do
- # the contents have to be to match the expected contents. (0.0 = perfection,
- # 1.0 = very loose). Note that Match_Threshold controls how closely the
- # end points of a delete need to match.
- self.Patch_DeleteThreshold = 0.5
- # Chunk size for context length.
- self.Patch_Margin = 4
-
- # The number of bits in an int.
- # Python has no maximum, thus to disable patch splitting set to 0.
- # However to avoid long patches in certain pathological cases, use 32.
- # Multiple short patches (using native ints) are much faster than long ones.
- self.Match_MaxBits = 32
-
- # DIFF FUNCTIONS
-
- # The data structure representing a diff is an array of tuples:
- # [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
- # which means: delete "Hello", add "Goodbye" and keep " world."
- DIFF_DELETE = -1
- DIFF_INSERT = 1
- DIFF_EQUAL = 0
-
- def diff_main(self, text1, text2, checklines=True, deadline=None):
- """Find the differences between two texts. Simplifies the problem by
- stripping any common prefix or suffix off the texts before diffing.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- checklines: Optional speedup flag. If present and false, then don't run
- a line-level diff first to identify the changed areas.
- Defaults to true, which does a faster, slightly less optimal diff.
- deadline: Optional time when the diff should be complete by. Used
- internally for recursive calls. Users should set DiffTimeout instead.
-
- Returns:
- Array of changes.
- """
- # Set a deadline by which time the diff must be complete.
- if deadline == None:
- # Unlike in most languages, Python counts time in seconds.
- if self.Diff_Timeout <= 0:
- deadline = sys.maxint
- else:
- deadline = time.time() + self.Diff_Timeout
-
- # Check for null inputs.
- if text1 == None or text2 == None:
- raise ValueError("Null inputs. (diff_main)")
-
- # Check for equality (speedup).
- if text1 == text2:
- if text1:
- return [(self.DIFF_EQUAL, text1)]
- return []
-
- # Trim off common prefix (speedup).
- commonlength = self.diff_commonPrefix(text1, text2)
- commonprefix = text1[:commonlength]
- text1 = text1[commonlength:]
- text2 = text2[commonlength:]
-
- # Trim off common suffix (speedup).
- commonlength = self.diff_commonSuffix(text1, text2)
- if commonlength == 0:
- commonsuffix = ""
- else:
- commonsuffix = text1[-commonlength:]
- text1 = text1[:-commonlength]
- text2 = text2[:-commonlength]
-
- # Compute the diff on the middle block.
- diffs = self.diff_compute(text1, text2, checklines, deadline)
-
- # Restore the prefix and suffix.
- if commonprefix:
- diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
- if commonsuffix:
- diffs.append((self.DIFF_EQUAL, commonsuffix))
- self.diff_cleanupMerge(diffs)
- return diffs
-
- def diff_compute(self, text1, text2, checklines, deadline):
- """Find the differences between two texts. Assumes that the texts do not
- have any common prefix or suffix.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- checklines: Speedup flag. If false, then don't run a line-level diff
- first to identify the changed areas.
- If true, then run a faster, slightly less optimal diff.
- deadline: Time when the diff should be complete by.
-
- Returns:
- Array of changes.
- """
- if not text1:
- # Just add some text (speedup).
- return [(self.DIFF_INSERT, text2)]
-
- if not text2:
- # Just delete some text (speedup).
- return [(self.DIFF_DELETE, text1)]
-
- if len(text1) > len(text2):
- (longtext, shorttext) = (text1, text2)
- else:
- (shorttext, longtext) = (text1, text2)
- i = longtext.find(shorttext)
- if i != -1:
- # Shorter text is inside the longer text (speedup).
- diffs = [
- (self.DIFF_INSERT, longtext[:i]),
- (self.DIFF_EQUAL, shorttext),
- (self.DIFF_INSERT, longtext[i + len(shorttext) :]),
- ]
- # Swap insertions for deletions if diff is reversed.
- if len(text1) > len(text2):
- diffs[0] = (self.DIFF_DELETE, diffs[0][1])
- diffs[2] = (self.DIFF_DELETE, diffs[2][1])
- return diffs
-
- if len(shorttext) == 1:
- # Single character string.
- # After the previous speedup, the character can't be an equality.
- return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
-
- # Check to see if the problem can be split in two.
- hm = self.diff_halfMatch(text1, text2)
- if hm:
- # A half-match was found, sort out the return data.
- (text1_a, text1_b, text2_a, text2_b, mid_common) = hm
- # Send both pairs off for separate processing.
- diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
- diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
- # Merge the results.
- return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
-
- if checklines and len(text1) > 100 and len(text2) > 100:
- return self.diff_lineMode(text1, text2, deadline)
-
- return self.diff_bisect(text1, text2, deadline)
-
- def diff_lineMode(self, text1, text2, deadline):
- """Do a quick line-level diff on both strings, then rediff the parts for
- greater accuracy.
- This speedup can produce non-minimal diffs.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- deadline: Time when the diff should be complete by.
-
- Returns:
- Array of changes.
- """
-
- # Scan the text on a line-by-line basis first.
- (text1, text2, linearray) = self.diff_linesToChars(text1, text2)
-
- diffs = self.diff_main(text1, text2, False, deadline)
-
- # Convert the diff back to original text.
- self.diff_charsToLines(diffs, linearray)
- # Eliminate freak matches (e.g. blank lines)
- self.diff_cleanupSemantic(diffs)
-
- # Rediff any replacement blocks, this time character-by-character.
- # Add a dummy entry at the end.
- diffs.append((self.DIFF_EQUAL, ""))
- pointer = 0
- count_delete = 0
- count_insert = 0
- text_delete = ""
- text_insert = ""
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_INSERT:
- count_insert += 1
- text_insert += diffs[pointer][1]
- elif diffs[pointer][0] == self.DIFF_DELETE:
- count_delete += 1
- text_delete += diffs[pointer][1]
- elif diffs[pointer][0] == self.DIFF_EQUAL:
- # Upon reaching an equality, check for prior redundancies.
- if count_delete >= 1 and count_insert >= 1:
- # Delete the offending records and add the merged ones.
- subDiff = self.diff_main(text_delete, text_insert, False, deadline)
- diffs[pointer - count_delete - count_insert : pointer] = subDiff
- pointer = pointer - count_delete - count_insert + len(subDiff)
- count_insert = 0
- count_delete = 0
- text_delete = ""
- text_insert = ""
-
- pointer += 1
-
- diffs.pop() # Remove the dummy entry at the end.
-
- return diffs
-
- def diff_bisect(self, text1, text2, deadline):
- """Find the 'middle snake' of a diff, split the problem in two
- and return the recursively constructed diff.
- See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- deadline: Time at which to bail if not yet complete.
-
- Returns:
- Array of diff tuples.
- """
-
- # Cache the text lengths to prevent multiple calls.
- text1_length = len(text1)
- text2_length = len(text2)
- max_d = (text1_length + text2_length + 1) // 2
- v_offset = max_d
- v_length = 2 * max_d
- v1 = [-1] * v_length
- v1[v_offset + 1] = 0
- v2 = v1[:]
- delta = text1_length - text2_length
- # If the total number of characters is odd, then the front path will
- # collide with the reverse path.
- front = delta % 2 != 0
- # Offsets for start and end of k loop.
- # Prevents mapping of space beyond the grid.
- k1start = 0
- k1end = 0
- k2start = 0
- k2end = 0
- for d in xrange(max_d):
- # Bail out if deadline is reached.
- if time.time() > deadline:
- break
-
- # Walk the front path one step.
- for k1 in xrange(-d + k1start, d + 1 - k1end, 2):
- k1_offset = v_offset + k1
- if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]):
- x1 = v1[k1_offset + 1]
- else:
- x1 = v1[k1_offset - 1] + 1
- y1 = x1 - k1
- while (
- x1 < text1_length and y1 < text2_length and text1[x1] == text2[y1]
- ):
- x1 += 1
- y1 += 1
- v1[k1_offset] = x1
- if x1 > text1_length:
- # Ran off the right of the graph.
- k1end += 2
- elif y1 > text2_length:
- # Ran off the bottom of the graph.
- k1start += 2
- elif front:
- k2_offset = v_offset + delta - k1
- if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
- # Mirror x2 onto top-left coordinate system.
- x2 = text1_length - v2[k2_offset]
- if x1 >= x2:
- # Overlap detected.
- return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
-
- # Walk the reverse path one step.
- for k2 in xrange(-d + k2start, d + 1 - k2end, 2):
- k2_offset = v_offset + k2
- if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]):
- x2 = v2[k2_offset + 1]
- else:
- x2 = v2[k2_offset - 1] + 1
- y2 = x2 - k2
- while (
- x2 < text1_length
- and y2 < text2_length
- and text1[-x2 - 1] == text2[-y2 - 1]
- ):
- x2 += 1
- y2 += 1
- v2[k2_offset] = x2
- if x2 > text1_length:
- # Ran off the left of the graph.
- k2end += 2
- elif y2 > text2_length:
- # Ran off the top of the graph.
- k2start += 2
- elif not front:
- k1_offset = v_offset + delta - k2
- if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
- x1 = v1[k1_offset]
- y1 = v_offset + x1 - k1_offset
- # Mirror x2 onto top-left coordinate system.
- x2 = text1_length - x2
- if x1 >= x2:
- # Overlap detected.
- return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
-
- # Diff took too long and hit the deadline or
- # number of diffs equals number of characters, no commonality at all.
- return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
-
- def diff_bisectSplit(self, text1, text2, x, y, deadline):
- """Given the location of the 'middle snake', split the diff in two parts
- and recurse.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- x: Index of split point in text1.
- y: Index of split point in text2.
- deadline: Time at which to bail if not yet complete.
-
- Returns:
- Array of diff tuples.
- """
- text1a = text1[:x]
- text2a = text2[:y]
- text1b = text1[x:]
- text2b = text2[y:]
-
- # Compute both diffs serially.
- diffs = self.diff_main(text1a, text2a, False, deadline)
- diffsb = self.diff_main(text1b, text2b, False, deadline)
-
- return diffs + diffsb
-
- def diff_linesToChars(self, text1, text2):
- """Split two texts into an array of strings. Reduce the texts to a string
- of hashes where each Unicode character represents one line.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- Three element tuple, containing the encoded text1, the encoded text2 and
- the array of unique strings. The zeroth element of the array of unique
- strings is intentionally blank.
- """
- lineArray = [] # e.g. lineArray[4] == "Hello\n"
- lineHash = {} # e.g. lineHash["Hello\n"] == 4
-
- # "\x00" is a valid character, but various debuggers don't like it.
- # So we'll insert a junk entry to avoid generating a null character.
- lineArray.append("")
-
- def diff_linesToCharsMunge(text):
- """Split a text into an array of strings. Reduce the texts to a string
- of hashes where each Unicode character represents one line.
- Modifies linearray and linehash through being a closure.
-
- Args:
- text: String to encode.
-
- Returns:
- Encoded string.
- """
- chars = []
- # Walk the text, pulling out a substring for each line.
- # text.split('\n') would would temporarily double our memory footprint.
- # Modifying text would create many large strings to garbage collect.
- lineStart = 0
- lineEnd = -1
- while lineEnd < len(text) - 1:
- lineEnd = text.find("\n", lineStart)
- if lineEnd == -1:
- lineEnd = len(text) - 1
- line = text[lineStart : lineEnd + 1]
-
- if line in lineHash:
- chars.append(unichr(lineHash[line]))
- else:
- if len(lineArray) == maxLines:
- # Bail out at 65535 because unichr(65536) throws.
- line = text[lineStart:]
- lineEnd = len(text)
- lineArray.append(line)
- lineHash[line] = len(lineArray) - 1
- chars.append(unichr(len(lineArray) - 1))
- lineStart = lineEnd + 1
- return "".join(chars)
-
- # Allocate 2/3rds of the space for text1, the rest for text2.
- maxLines = 40000
- chars1 = diff_linesToCharsMunge(text1)
- maxLines = 65535
- chars2 = diff_linesToCharsMunge(text2)
- return (chars1, chars2, lineArray)
-
- def diff_charsToLines(self, diffs, lineArray):
- """Rehydrate the text in a diff from a string of line hashes to real lines
- of text.
-
- Args:
- diffs: Array of diff tuples.
- lineArray: Array of unique strings.
- """
- for i in xrange(len(diffs)):
- text = []
- for char in diffs[i][1]:
- text.append(lineArray[ord(char)])
- diffs[i] = (diffs[i][0], "".join(text))
-
- def diff_commonPrefix(self, text1, text2):
- """Determine the common prefix of two strings.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- The number of characters common to the start of each string.
- """
- # Quick check for common null cases.
- if not text1 or not text2 or text1[0] != text2[0]:
- return 0
- # Binary search.
- # Performance analysis: https://neil.fraser.name/news/2007/10/09/
- pointermin = 0
- pointermax = min(len(text1), len(text2))
- pointermid = pointermax
- pointerstart = 0
- while pointermin < pointermid:
- if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
- pointermin = pointermid
- pointerstart = pointermin
- else:
- pointermax = pointermid
- pointermid = (pointermax - pointermin) // 2 + pointermin
- return pointermid
-
- def diff_commonSuffix(self, text1, text2):
- """Determine the common suffix of two strings.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- The number of characters common to the end of each string.
- """
- # Quick check for common null cases.
- if not text1 or not text2 or text1[-1] != text2[-1]:
- return 0
- # Binary search.
- # Performance analysis: https://neil.fraser.name/news/2007/10/09/
- pointermin = 0
- pointermax = min(len(text1), len(text2))
- pointermid = pointermax
- pointerend = 0
- while pointermin < pointermid:
- if (
- text1[-pointermid : len(text1) - pointerend]
- == text2[-pointermid : len(text2) - pointerend]
- ):
- pointermin = pointermid
- pointerend = pointermin
- else:
- pointermax = pointermid
- pointermid = (pointermax - pointermin) // 2 + pointermin
- return pointermid
-
- def diff_commonOverlap(self, text1, text2):
- """Determine if the suffix of one string is the prefix of another.
-
- Args:
- text1 First string.
- text2 Second string.
-
- Returns:
- The number of characters common to the end of the first
- string and the start of the second string.
- """
- # Cache the text lengths to prevent multiple calls.
- text1_length = len(text1)
- text2_length = len(text2)
- # Eliminate the null case.
- if text1_length == 0 or text2_length == 0:
- return 0
- # Truncate the longer string.
- if text1_length > text2_length:
- text1 = text1[-text2_length:]
- elif text1_length < text2_length:
- text2 = text2[:text1_length]
- text_length = min(text1_length, text2_length)
- # Quick check for the worst case.
- if text1 == text2:
- return text_length
-
- # Start by looking for a single character match
- # and increase length until no match is found.
- # Performance analysis: https://neil.fraser.name/news/2010/11/04/
- best = 0
- length = 1
- while True:
- pattern = text1[-length:]
- found = text2.find(pattern)
- if found == -1:
- return best
- length += found
- if found == 0 or text1[-length:] == text2[:length]:
- best = length
- length += 1
-
- def diff_halfMatch(self, text1, text2):
- """Do the two texts share a substring which is at least half the length of
- the longer text?
- This speedup can produce non-minimal diffs.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- Five element Array, containing the prefix of text1, the suffix of text1,
- the prefix of text2, the suffix of text2 and the common middle. Or None
- if there was no match.
- """
- if self.Diff_Timeout <= 0:
- # Don't risk returning a non-optimal diff if we have unlimited time.
- return None
- if len(text1) > len(text2):
- (longtext, shorttext) = (text1, text2)
- else:
- (shorttext, longtext) = (text1, text2)
- if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
- return None # Pointless.
-
- def diff_halfMatchI(longtext, shorttext, i):
- """Does a substring of shorttext exist within longtext such that the
- substring is at least half the length of longtext?
- Closure, but does not reference any external variables.
-
- Args:
- longtext: Longer string.
- shorttext: Shorter string.
- i: Start index of quarter length substring within longtext.
-
- Returns:
- Five element Array, containing the prefix of longtext, the suffix of
- longtext, the prefix of shorttext, the suffix of shorttext and the
- common middle. Or None if there was no match.
- """
- seed = longtext[i : i + len(longtext) // 4]
- best_common = ""
- j = shorttext.find(seed)
- while j != -1:
- prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
- suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
- if len(best_common) < suffixLength + prefixLength:
- best_common = (
- shorttext[j - suffixLength : j]
- + shorttext[j : j + prefixLength]
- )
- best_longtext_a = longtext[: i - suffixLength]
- best_longtext_b = longtext[i + prefixLength :]
- best_shorttext_a = shorttext[: j - suffixLength]
- best_shorttext_b = shorttext[j + prefixLength :]
- j = shorttext.find(seed, j + 1)
-
- if len(best_common) * 2 >= len(longtext):
- return (
- best_longtext_a,
- best_longtext_b,
- best_shorttext_a,
- best_shorttext_b,
- best_common,
- )
- else:
- return None
-
- # First check if the second quarter is the seed for a half-match.
- hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
- # Check again based on the third quarter.
- hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
- if not hm1 and not hm2:
- return None
- elif not hm2:
- hm = hm1
- elif not hm1:
- hm = hm2
- else:
- # Both matched. Select the longest.
- if len(hm1[4]) > len(hm2[4]):
- hm = hm1
- else:
- hm = hm2
-
- # A half-match was found, sort out the return data.
- if len(text1) > len(text2):
- (text1_a, text1_b, text2_a, text2_b, mid_common) = hm
- else:
- (text2_a, text2_b, text1_a, text1_b, mid_common) = hm
- return (text1_a, text1_b, text2_a, text2_b, mid_common)
-
- def diff_cleanupSemantic(self, diffs):
- """Reduce the number of edits by eliminating semantically trivial
- equalities.
-
- Args:
- diffs: Array of diff tuples.
- """
- changes = False
- equalities = [] # Stack of indices where equalities are found.
- lastEquality = None # Always equal to diffs[equalities[-1]][1]
- pointer = 0 # Index of current position.
- # Number of chars that changed prior to the equality.
- length_insertions1, length_deletions1 = 0, 0
- # Number of chars that changed after the equality.
- length_insertions2, length_deletions2 = 0, 0
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
- equalities.append(pointer)
- length_insertions1, length_insertions2 = length_insertions2, 0
- length_deletions1, length_deletions2 = length_deletions2, 0
- lastEquality = diffs[pointer][1]
- else: # An insertion or deletion.
- if diffs[pointer][0] == self.DIFF_INSERT:
- length_insertions2 += len(diffs[pointer][1])
- else:
- length_deletions2 += len(diffs[pointer][1])
- # Eliminate an equality that is smaller or equal to the edits on both
- # sides of it.
- if (
- lastEquality
- and (
- len(lastEquality) <= max(length_insertions1, length_deletions1)
- )
- and (
- len(lastEquality) <= max(length_insertions2, length_deletions2)
- )
- ):
- # Duplicate record.
- diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))
- # Change second copy to insert.
- diffs[equalities[-1] + 1] = (
- self.DIFF_INSERT,
- diffs[equalities[-1] + 1][1],
- )
- # Throw away the equality we just deleted.
- equalities.pop()
- # Throw away the previous equality (it needs to be reevaluated).
- if len(equalities):
- equalities.pop()
- if len(equalities):
- pointer = equalities[-1]
- else:
- pointer = -1
- # Reset the counters.
- length_insertions1, length_deletions1 = 0, 0
- length_insertions2, length_deletions2 = 0, 0
- lastEquality = None
- changes = True
- pointer += 1
-
- # Normalize the diff.
- if changes:
- self.diff_cleanupMerge(diffs)
- self.diff_cleanupSemanticLossless(diffs)
-
- # Find any overlaps between deletions and insertions.
- # e.g: <del>abcxxx</del><ins>xxxdef</ins>
- # -> <del>abc</del>xxx<ins>def</ins>
- # e.g: <del>xxxabc</del><ins>defxxx</ins>
- # -> <ins>def</ins>xxx<del>abc</del>
- # Only extract an overlap if it is as big as the edit ahead or behind it.
- pointer = 1
- while pointer < len(diffs):
- if (
- diffs[pointer - 1][0] == self.DIFF_DELETE
- and diffs[pointer][0] == self.DIFF_INSERT
- ):
- deletion = diffs[pointer - 1][1]
- insertion = diffs[pointer][1]
- overlap_length1 = self.diff_commonOverlap(deletion, insertion)
- overlap_length2 = self.diff_commonOverlap(insertion, deletion)
- if overlap_length1 >= overlap_length2:
- if (
- overlap_length1 >= len(deletion) / 2.0
- or overlap_length1 >= len(insertion) / 2.0
- ):
- # Overlap found. Insert an equality and trim the surrounding edits.
- diffs.insert(
- pointer, (self.DIFF_EQUAL, insertion[:overlap_length1])
- )
- diffs[pointer - 1] = (
- self.DIFF_DELETE,
- deletion[: len(deletion) - overlap_length1],
- )
- diffs[pointer + 1] = (
- self.DIFF_INSERT,
- insertion[overlap_length1:],
- )
- pointer += 1
- else:
- if (
- overlap_length2 >= len(deletion) / 2.0
- or overlap_length2 >= len(insertion) / 2.0
- ):
- # Reverse overlap found.
- # Insert an equality and swap and trim the surrounding edits.
- diffs.insert(
- pointer, (self.DIFF_EQUAL, deletion[:overlap_length2])
- )
- diffs[pointer - 1] = (
- self.DIFF_INSERT,
- insertion[: len(insertion) - overlap_length2],
- )
- diffs[pointer + 1] = (
- self.DIFF_DELETE,
- deletion[overlap_length2:],
- )
- pointer += 1
- pointer += 1
- pointer += 1
-
- def diff_cleanupSemanticLossless(self, diffs):
- """Look for single edits surrounded on both sides by equalities
- which can be shifted sideways to align the edit to a word boundary.
- e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
-
- Args:
- diffs: Array of diff tuples.
- """
-
- def diff_cleanupSemanticScore(one, two):
- """Given two strings, compute a score representing whether the
- internal boundary falls on logical boundaries.
- Scores range from 6 (best) to 0 (worst).
- Closure, but does not reference any external variables.
-
- Args:
- one: First string.
- two: Second string.
-
- Returns:
- The score.
- """
- if not one or not two:
- # Edges are the best.
- return 6
-
- # Each port of this function behaves slightly differently due to
- # subtle differences in each language's definition of things like
- # 'whitespace'. Since this function's purpose is largely cosmetic,
- # the choice has been made to use each language's native features
- # rather than force total conformity.
- char1 = one[-1]
- char2 = two[0]
- nonAlphaNumeric1 = not char1.isalnum()
- nonAlphaNumeric2 = not char2.isalnum()
- whitespace1 = nonAlphaNumeric1 and char1.isspace()
- whitespace2 = nonAlphaNumeric2 and char2.isspace()
- lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
- lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
- blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
- blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
-
- if blankLine1 or blankLine2:
- # Five points for blank lines.
- return 5
- elif lineBreak1 or lineBreak2:
- # Four points for line breaks.
- return 4
- elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
- # Three points for end of sentences.
- return 3
- elif whitespace1 or whitespace2:
- # Two points for whitespace.
- return 2
- elif nonAlphaNumeric1 or nonAlphaNumeric2:
- # One point for non-alphanumeric.
- return 1
- return 0
-
- pointer = 1
- # Intentionally ignore the first and last element (don't need checking).
- while pointer < len(diffs) - 1:
- if (
- diffs[pointer - 1][0] == self.DIFF_EQUAL
- and diffs[pointer + 1][0] == self.DIFF_EQUAL
- ):
- # This is a single edit surrounded by equalities.
- equality1 = diffs[pointer - 1][1]
- edit = diffs[pointer][1]
- equality2 = diffs[pointer + 1][1]
-
- # First, shift the edit as far left as possible.
- commonOffset = self.diff_commonSuffix(equality1, edit)
- if commonOffset:
- commonString = edit[-commonOffset:]
- equality1 = equality1[:-commonOffset]
- edit = commonString + edit[:-commonOffset]
- equality2 = commonString + equality2
-
- # Second, step character by character right, looking for the best fit.
- bestEquality1 = equality1
- bestEdit = edit
- bestEquality2 = equality2
- bestScore = diff_cleanupSemanticScore(
- equality1, edit
- ) + diff_cleanupSemanticScore(edit, equality2)
- while edit and equality2 and edit[0] == equality2[0]:
- equality1 += edit[0]
- edit = edit[1:] + equality2[0]
- equality2 = equality2[1:]
- score = diff_cleanupSemanticScore(
- equality1, edit
- ) + diff_cleanupSemanticScore(edit, equality2)
- # The >= encourages trailing rather than leading whitespace on edits.
- if score >= bestScore:
- bestScore = score
- bestEquality1 = equality1
- bestEdit = edit
- bestEquality2 = equality2
-
- if diffs[pointer - 1][1] != bestEquality1:
- # We have an improvement, save it back to the diff.
- if bestEquality1:
- diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
- else:
- del diffs[pointer - 1]
- pointer -= 1
- diffs[pointer] = (diffs[pointer][0], bestEdit)
- if bestEquality2:
- diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
- else:
- del diffs[pointer + 1]
- pointer -= 1
- pointer += 1
-
- # Define some regex patterns for matching boundaries.
- BLANKLINEEND = re.compile(r"\n\r?\n$")
- BLANKLINESTART = re.compile(r"^\r?\n\r?\n")
-
- def diff_cleanupEfficiency(self, diffs):
- """Reduce the number of edits by eliminating operationally trivial
- equalities.
-
- Args:
- diffs: Array of diff tuples.
- """
- changes = False
- equalities = [] # Stack of indices where equalities are found.
- lastEquality = None # Always equal to diffs[equalities[-1]][1]
- pointer = 0 # Index of current position.
- pre_ins = False # Is there an insertion operation before the last equality.
- pre_del = False # Is there a deletion operation before the last equality.
- post_ins = False # Is there an insertion operation after the last equality.
- post_del = False # Is there a deletion operation after the last equality.
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
- if len(diffs[pointer][1]) < self.Diff_EditCost and (
- post_ins or post_del
- ):
- # Candidate found.
- equalities.append(pointer)
- pre_ins = post_ins
- pre_del = post_del
- lastEquality = diffs[pointer][1]
- else:
- # Not a candidate, and can never become one.
- equalities = []
- lastEquality = None
-
- post_ins = post_del = False
- else: # An insertion or deletion.
- if diffs[pointer][0] == self.DIFF_DELETE:
- post_del = True
- else:
- post_ins = True
-
- # Five types to be split:
- # <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
- # <ins>A</ins>X<ins>C</ins><del>D</del>
- # <ins>A</ins><del>B</del>X<ins>C</ins>
- # <ins>A</del>X<ins>C</ins><del>D</del>
- # <ins>A</ins><del>B</del>X<del>C</del>
-
- if lastEquality and (
- (pre_ins and pre_del and post_ins and post_del)
- or (
- (len(lastEquality) < self.Diff_EditCost / 2)
- and (pre_ins + pre_del + post_ins + post_del) == 3
- )
- ):
- # Duplicate record.
- diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))
- # Change second copy to insert.
- diffs[equalities[-1] + 1] = (
- self.DIFF_INSERT,
- diffs[equalities[-1] + 1][1],
- )
- equalities.pop() # Throw away the equality we just deleted.
- lastEquality = None
- if pre_ins and pre_del:
- # No changes made which could affect previous entry, keep going.
- post_ins = post_del = True
- equalities = []
- else:
- if len(equalities):
- equalities.pop() # Throw away the previous equality.
- if len(equalities):
- pointer = equalities[-1]
- else:
- pointer = -1
- post_ins = post_del = False
- changes = True
- pointer += 1
-
- if changes:
- self.diff_cleanupMerge(diffs)
-
- def diff_cleanupMerge(self, diffs):
- """Reorder and merge like edit sections. Merge equalities.
- Any edit section can move as long as it doesn't cross an equality.
-
- Args:
- diffs: Array of diff tuples.
- """
- diffs.append((self.DIFF_EQUAL, "")) # Add a dummy entry at the end.
- pointer = 0
- count_delete = 0
- count_insert = 0
- text_delete = ""
- text_insert = ""
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_INSERT:
- count_insert += 1
- text_insert += diffs[pointer][1]
- pointer += 1
- elif diffs[pointer][0] == self.DIFF_DELETE:
- count_delete += 1
- text_delete += diffs[pointer][1]
- pointer += 1
- elif diffs[pointer][0] == self.DIFF_EQUAL:
- # Upon reaching an equality, check for prior redundancies.
- if count_delete + count_insert > 1:
- if count_delete != 0 and count_insert != 0:
- # Factor out any common prefixies.
- commonlength = self.diff_commonPrefix(text_insert, text_delete)
- if commonlength != 0:
- x = pointer - count_delete - count_insert - 1
- if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
- diffs[x] = (
- diffs[x][0],
- diffs[x][1] + text_insert[:commonlength],
- )
- else:
- diffs.insert(
- 0, (self.DIFF_EQUAL, text_insert[:commonlength])
- )
- pointer += 1
- text_insert = text_insert[commonlength:]
- text_delete = text_delete[commonlength:]
- # Factor out any common suffixies.
- commonlength = self.diff_commonSuffix(text_insert, text_delete)
- if commonlength != 0:
- diffs[pointer] = (
- diffs[pointer][0],
- text_insert[-commonlength:] + diffs[pointer][1],
- )
- text_insert = text_insert[:-commonlength]
- text_delete = text_delete[:-commonlength]
- # Delete the offending records and add the merged ones.
- new_ops = []
- if len(text_delete) != 0:
- new_ops.append((self.DIFF_DELETE, text_delete))
- if len(text_insert) != 0:
- new_ops.append((self.DIFF_INSERT, text_insert))
- pointer -= count_delete + count_insert
- diffs[pointer : pointer + count_delete + count_insert] = new_ops
- pointer += len(new_ops) + 1
- elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
- # Merge this equality with the previous one.
- diffs[pointer - 1] = (
- diffs[pointer - 1][0],
- diffs[pointer - 1][1] + diffs[pointer][1],
- )
- del diffs[pointer]
- else:
- pointer += 1
-
- count_insert = 0
- count_delete = 0
- text_delete = ""
- text_insert = ""
-
- if diffs[-1][1] == "":
- diffs.pop() # Remove the dummy entry at the end.
-
- # Second pass: look for single edits surrounded on both sides by equalities
- # which can be shifted sideways to eliminate an equality.
- # e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
- changes = False
- pointer = 1
- # Intentionally ignore the first and last element (don't need checking).
- while pointer < len(diffs) - 1:
- if (
- diffs[pointer - 1][0] == self.DIFF_EQUAL
- and diffs[pointer + 1][0] == self.DIFF_EQUAL
- ):
- # This is a single edit surrounded by equalities.
- if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
- # Shift the edit over the previous equality.
- if diffs[pointer - 1][1] != "":
- diffs[pointer] = (
- diffs[pointer][0],
- diffs[pointer - 1][1]
- + diffs[pointer][1][: -len(diffs[pointer - 1][1])],
- )
- diffs[pointer + 1] = (
- diffs[pointer + 1][0],
- diffs[pointer - 1][1] + diffs[pointer + 1][1],
- )
- del diffs[pointer - 1]
- changes = True
- elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
- # Shift the edit over the next equality.
- diffs[pointer - 1] = (
- diffs[pointer - 1][0],
- diffs[pointer - 1][1] + diffs[pointer + 1][1],
- )
- diffs[pointer] = (
- diffs[pointer][0],
- diffs[pointer][1][len(diffs[pointer + 1][1]) :]
- + diffs[pointer + 1][1],
- )
- del diffs[pointer + 1]
- changes = True
- pointer += 1
-
- # If shifts were made, the diff needs reordering and another shift sweep.
- if changes:
- self.diff_cleanupMerge(diffs)
-
- def diff_xIndex(self, diffs, loc):
- """loc is a location in text1, compute and return the equivalent location
- in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
-
- Args:
- diffs: Array of diff tuples.
- loc: Location within text1.
-
- Returns:
- Location within text2.
- """
- chars1 = 0
- chars2 = 0
- last_chars1 = 0
- last_chars2 = 0
- for x in xrange(len(diffs)):
- (op, text) = diffs[x]
- if op != self.DIFF_INSERT: # Equality or deletion.
- chars1 += len(text)
- if op != self.DIFF_DELETE: # Equality or insertion.
- chars2 += len(text)
- if chars1 > loc: # Overshot the location.
- break
- last_chars1 = chars1
- last_chars2 = chars2
-
- if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
- # The location was deleted.
- return last_chars2
- # Add the remaining len(character).
- return last_chars2 + (loc - last_chars1)
-
- def diff_prettyHtml(self, diffs):
- """Convert a diff array into a pretty HTML report.
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- HTML representation.
- """
- html = []
- for (op, data) in diffs:
- text = (
- data.replace("&", "&amp;")
- .replace("<", "&lt;")
- .replace(">", "&gt;")
- .replace("\n", "&para;<br>")
- )
- if op == self.DIFF_INSERT:
- html.append('<ins style="background:#e6ffe6;">%s</ins>' % text)
- elif op == self.DIFF_DELETE:
- html.append('<del style="background:#ffe6e6;">%s</del>' % text)
- elif op == self.DIFF_EQUAL:
- html.append("<span>%s</span>" % text)
- return "".join(html)
-
- def diff_text1(self, diffs):
- """Compute and return the source text (all equalities and deletions).
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Source text.
- """
- text = []
- for (op, data) in diffs:
- if op != self.DIFF_INSERT:
- text.append(data)
- return "".join(text)
-
- def diff_text2(self, diffs):
- """Compute and return the destination text (all equalities and insertions).
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Destination text.
- """
- text = []
- for (op, data) in diffs:
- if op != self.DIFF_DELETE:
- text.append(data)
- return "".join(text)
-
- def diff_levenshtein(self, diffs):
- """Compute the Levenshtein distance; the number of inserted, deleted or
- substituted characters.
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Number of changes.
- """
- levenshtein = 0
- insertions = 0
- deletions = 0
- for (op, data) in diffs:
- if op == self.DIFF_INSERT:
- insertions += len(data)
- elif op == self.DIFF_DELETE:
- deletions += len(data)
- elif op == self.DIFF_EQUAL:
- # A deletion and an insertion is one substitution.
- levenshtein += max(insertions, deletions)
- insertions = 0
- deletions = 0
- levenshtein += max(insertions, deletions)
- return levenshtein
-
- def diff_toDelta(self, diffs):
- """Crush the diff into an encoded string which describes the operations
- required to transform text1 into text2.
- E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
- Operations are tab-separated. Inserted text is escaped using %xx notation.
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Delta text.
- """
- text = []
- for (op, data) in diffs:
- if op == self.DIFF_INSERT:
- # High ascii will raise UnicodeDecodeError. Use Unicode instead.
- data = data.encode("utf-8")
- text.append("+" + urllib.quote(data, "!~*'();/?:@&=+$,# "))
- elif op == self.DIFF_DELETE:
- text.append("-%d" % len(data))
- elif op == self.DIFF_EQUAL:
- text.append("=%d" % len(data))
- return "\t".join(text)
-
- def diff_fromDelta(self, text1, delta):
- """Given the original text1, and an encoded string which describes the
- operations required to transform text1 into text2, compute the full diff.
-
- Args:
- text1: Source string for the diff.
- delta: Delta text.
-
- Returns:
- Array of diff tuples.
-
- Raises:
- ValueError: If invalid input.
- """
- if type(delta) == unicode:
- # Deltas should be composed of a subset of ascii chars, Unicode not
- # required. If this encode raises UnicodeEncodeError, delta is invalid.
- delta = delta.encode("ascii")
- diffs = []
- pointer = 0 # Cursor in text1
- tokens = delta.split("\t")
- for token in tokens:
- if token == "":
- # Blank tokens are ok (from a trailing \t).
- continue
- # Each token begins with a one character parameter which specifies the
- # operation of this token (delete, insert, equality).
- param = token[1:]
- if token[0] == "+":
- param = urllib.unquote(param).decode("utf-8")
- diffs.append((self.DIFF_INSERT, param))
- elif token[0] == "-" or token[0] == "=":
- try:
- n = int(param)
- except ValueError:
- raise ValueError("Invalid number in diff_fromDelta: " + param)
- if n < 0:
- raise ValueError("Negative number in diff_fromDelta: " + param)
- text = text1[pointer : pointer + n]
- pointer += n
- if token[0] == "=":
- diffs.append((self.DIFF_EQUAL, text))
- else:
- diffs.append((self.DIFF_DELETE, text))
- else:
- # Anything else is an error.
- raise ValueError(
- "Invalid diff operation in diff_fromDelta: " + token[0]
- )
- if pointer != len(text1):
- raise ValueError(
- "Delta length (%d) does not equal source text length (%d)."
- % (pointer, len(text1))
- )
- return diffs
-
- # MATCH FUNCTIONS
-
- def match_main(self, text, pattern, loc):
- """Locate the best instance of 'pattern' in 'text' near 'loc'.
-
- Args:
- text: The text to search.
- pattern: The pattern to search for.
- loc: The location to search around.
-
- Returns:
- Best match index or -1.
- """
- # Check for null inputs.
- if text == None or pattern == None:
- raise ValueError("Null inputs. (match_main)")
-
- loc = max(0, min(loc, len(text)))
- if text == pattern:
- # Shortcut (potentially not guaranteed by the algorithm)
- return 0
- elif not text:
- # Nothing to match.
- return -1
- elif text[loc : loc + len(pattern)] == pattern:
- # Perfect match at the perfect spot! (Includes case of null pattern)
- return loc
- else:
- # Do a fuzzy compare.
- match = self.match_bitap(text, pattern, loc)
- return match
-
- def match_bitap(self, text, pattern, loc):
- """Locate the best instance of 'pattern' in 'text' near 'loc' using the
- Bitap algorithm.
-
- Args:
- text: The text to search.
- pattern: The pattern to search for.
- loc: The location to search around.
-
- Returns:
- Best match index or -1.
- """
- # Python doesn't have a maxint limit, so ignore this check.
- # if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
- # raise ValueError("Pattern too long for this application.")
-
- # Initialise the alphabet.
- s = self.match_alphabet(pattern)
-
- def match_bitapScore(e, x):
- """Compute and return the score for a match with e errors and x location.
- Accesses loc and pattern through being a closure.
-
- Args:
- e: Number of errors in match.
- x: Location of match.
-
- Returns:
- Overall score for match (0.0 = good, 1.0 = bad).
- """
- accuracy = float(e) / len(pattern)
- proximity = abs(loc - x)
- if not self.Match_Distance:
- # Dodge divide by zero error.
- return proximity and 1.0 or accuracy
- return accuracy + (proximity / float(self.Match_Distance))
-
- # Highest score beyond which we give up.
- score_threshold = self.Match_Threshold
- # Is there a nearby exact match? (speedup)
- best_loc = text.find(pattern, loc)
- if best_loc != -1:
- score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
- # What about in the other direction? (speedup)
- best_loc = text.rfind(pattern, loc + len(pattern))
- if best_loc != -1:
- score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
-
- # Initialise the bit arrays.
- matchmask = 1 << (len(pattern) - 1)
- best_loc = -1
-
- bin_max = len(pattern) + len(text)
- # Empty initialization added to appease pychecker.
- last_rd = None
- for d in xrange(len(pattern)):
- # Scan for the best match each iteration allows for one more error.
- # Run a binary search to determine how far from 'loc' we can stray at
- # this error level.
- bin_min = 0
- bin_mid = bin_max
- while bin_min < bin_mid:
- if match_bitapScore(d, loc + bin_mid) <= score_threshold:
- bin_min = bin_mid
- else:
- bin_max = bin_mid
- bin_mid = (bin_max - bin_min) // 2 + bin_min
-
- # Use the result from this iteration as the maximum for the next.
- bin_max = bin_mid
- start = max(1, loc - bin_mid + 1)
- finish = min(loc + bin_mid, len(text)) + len(pattern)
-
- rd = [0] * (finish + 2)
- rd[finish + 1] = (1 << d) - 1
- for j in xrange(finish, start - 1, -1):
- if len(text) <= j - 1:
- # Out of range.
- charMatch = 0
- else:
- charMatch = s.get(text[j - 1], 0)
- if d == 0: # First pass: exact match.
- rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
- else: # Subsequent passes: fuzzy match.
- rd[j] = (
- (((rd[j + 1] << 1) | 1) & charMatch)
- | (((last_rd[j + 1] | last_rd[j]) << 1) | 1)
- | last_rd[j + 1]
- )
- if rd[j] & matchmask:
- score = match_bitapScore(d, j - 1)
- # This match will almost certainly be better than any existing match.
- # But check anyway.
- if score <= score_threshold:
- # Told you so.
- score_threshold = score
- best_loc = j - 1
- if best_loc > loc:
- # When passing loc, don't exceed our current distance from loc.
- start = max(1, 2 * loc - best_loc)
- else:
- # Already passed loc, downhill from here on in.
- break
- # No hope for a (better) match at greater error levels.
- if match_bitapScore(d + 1, loc) > score_threshold:
- break
- last_rd = rd
- return best_loc
-
- def match_alphabet(self, pattern):
- """Initialise the alphabet for the Bitap algorithm.
-
- Args:
- pattern: The text to encode.
-
- Returns:
- Hash of character locations.
- """
- s = {}
- for char in pattern:
- s[char] = 0
- for i in xrange(len(pattern)):
- s[pattern[i]] |= 1 << (len(pattern) - i - 1)
- return s
-
- # PATCH FUNCTIONS
-
- def patch_addContext(self, patch, text):
- """Increase the context until it is unique,
- but don't let the pattern expand beyond Match_MaxBits.
-
- Args:
- patch: The patch to grow.
- text: Source text.
- """
- if len(text) == 0:
- return
- pattern = text[patch.start2 : patch.start2 + patch.length1]
- padding = 0
-
- # Look for the first and last matches of pattern in text. If two different
- # matches are found, increase the pattern length.
- while text.find(pattern) != text.rfind(pattern) and (
- self.Match_MaxBits == 0
- or len(pattern) < self.Match_MaxBits - self.Patch_Margin - self.Patch_Margin
- ):
- padding += self.Patch_Margin
- pattern = text[
- max(0, patch.start2 - padding) : patch.start2 + patch.length1 + padding
- ]
- # Add one chunk for good luck.
- padding += self.Patch_Margin
-
- # Add the prefix.
- prefix = text[max(0, patch.start2 - padding) : patch.start2]
- if prefix:
- patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
- # Add the suffix.
- suffix = text[
- patch.start2 + patch.length1 : patch.start2 + patch.length1 + padding
- ]
- if suffix:
- patch.diffs.append((self.DIFF_EQUAL, suffix))
-
- # Roll back the start points.
- patch.start1 -= len(prefix)
- patch.start2 -= len(prefix)
- # Extend lengths.
- patch.length1 += len(prefix) + len(suffix)
- patch.length2 += len(prefix) + len(suffix)
-
- def patch_make(self, a, b=None, c=None):
- """Compute a list of patches to turn text1 into text2.
- Use diffs if provided, otherwise compute it ourselves.
- There are four ways to call this function, depending on what data is
- available to the caller:
- Method 1:
- a = text1, b = text2
- Method 2:
- a = diffs
- Method 3 (optimal):
- a = text1, b = diffs
- Method 4 (deprecated, use method 3):
- a = text1, b = text2, c = diffs
-
- Args:
- a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
- text2 (method 2).
- b: text2 (methods 1,4) or Array of diff tuples for text1 to
- text2 (method 3) or undefined (method 2).
- c: Array of diff tuples for text1 to text2 (method 4) or
- undefined (methods 1,2,3).
-
- Returns:
- Array of Patch objects.
- """
- text1 = None
- diffs = None
- # Note that texts may arrive as 'str' or 'unicode'.
- if isinstance(a, basestring) and isinstance(b, basestring) and c is None:
- # Method 1: text1, text2
- # Compute diffs from text1 and text2.
- text1 = a
- diffs = self.diff_main(text1, b, True)
- if len(diffs) > 2:
- self.diff_cleanupSemantic(diffs)
- self.diff_cleanupEfficiency(diffs)
- elif isinstance(a, list) and b is None and c is None:
- # Method 2: diffs
- # Compute text1 from diffs.
- diffs = a
- text1 = self.diff_text1(diffs)
- elif isinstance(a, basestring) and isinstance(b, list) and c is None:
- # Method 3: text1, diffs
- text1 = a
- diffs = b
- elif (
- isinstance(a, basestring)
- and isinstance(b, basestring)
- and isinstance(c, list)
- ):
- # Method 4: text1, text2, diffs
- # text2 is not used.
- text1 = a
- diffs = c
- else:
- raise ValueError("Unknown call format to patch_make.")
-
- if not diffs:
- return [] # Get rid of the None case.
- patches = []
- patch = patch_obj()
- char_count1 = 0 # Number of characters into the text1 string.
- char_count2 = 0 # Number of characters into the text2 string.
- prepatch_text = text1 # Recreate the patches to determine context info.
- postpatch_text = text1
- for x in xrange(len(diffs)):
- (diff_type, diff_text) = diffs[x]
- if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
- # A new patch starts here.
- patch.start1 = char_count1
- patch.start2 = char_count2
- if diff_type == self.DIFF_INSERT:
- # Insertion
- patch.diffs.append(diffs[x])
- patch.length2 += len(diff_text)
- postpatch_text = (
- postpatch_text[:char_count2]
- + diff_text
- + postpatch_text[char_count2:]
- )
- elif diff_type == self.DIFF_DELETE:
- # Deletion.
- patch.length1 += len(diff_text)
- patch.diffs.append(diffs[x])
- postpatch_text = (
- postpatch_text[:char_count2]
- + postpatch_text[char_count2 + len(diff_text) :]
- )
- elif (
- diff_type == self.DIFF_EQUAL
- and len(diff_text) <= 2 * self.Patch_Margin
- and len(patch.diffs) != 0
- and len(diffs) != x + 1
- ):
- # Small equality inside a patch.
- patch.diffs.append(diffs[x])
- patch.length1 += len(diff_text)
- patch.length2 += len(diff_text)
-
- if diff_type == self.DIFF_EQUAL and len(diff_text) >= 2 * self.Patch_Margin:
- # Time for a new patch.
- if len(patch.diffs) != 0:
- self.patch_addContext(patch, prepatch_text)
- patches.append(patch)
- patch = patch_obj()
- # Unlike Unidiff, our patch lists have a rolling context.
- # https://github.com/google/diff-match-patch/wiki/Unidiff
- # Update prepatch text & pos to reflect the application of the
- # just completed patch.
- prepatch_text = postpatch_text
- char_count1 = char_count2
-
- # Update the current character count.
- if diff_type != self.DIFF_INSERT:
- char_count1 += len(diff_text)
- if diff_type != self.DIFF_DELETE:
- char_count2 += len(diff_text)
-
- # Pick up the leftover patch if not empty.
- if len(patch.diffs) != 0:
- self.patch_addContext(patch, prepatch_text)
- patches.append(patch)
- return patches
-
- def patch_deepCopy(self, patches):
- """Given an array of patches, return another array that is identical.
-
- Args:
- patches: Array of Patch objects.
-
- Returns:
- Array of Patch objects.
- """
- patchesCopy = []
- for patch in patches:
- patchCopy = patch_obj()
- # No need to deep copy the tuples since they are immutable.
- patchCopy.diffs = patch.diffs[:]
- patchCopy.start1 = patch.start1
- patchCopy.start2 = patch.start2
- patchCopy.length1 = patch.length1
- patchCopy.length2 = patch.length2
- patchesCopy.append(patchCopy)
- return patchesCopy
-
- def patch_apply(self, patches, text):
- """Merge a set of patches onto the text. Return a patched text, as well
- as a list of true/false values indicating which patches were applied.
-
- Args:
- patches: Array of Patch objects.
- text: Old text.
-
- Returns:
- Two element Array, containing the new text and an array of boolean values.
- """
- if not patches:
- return (text, [])
-
- # Deep copy the patches so that no changes are made to originals.
- patches = self.patch_deepCopy(patches)
-
- nullPadding = self.patch_addPadding(patches)
- text = nullPadding + text + nullPadding
- self.patch_splitMax(patches)
-
- # delta keeps track of the offset between the expected and actual location
- # of the previous patch. If there are patches expected at positions 10 and
- # 20, but the first patch was found at 12, delta is 2 and the second patch
- # has an effective expected position of 22.
- delta = 0
- results = []
- for patch in patches:
- expected_loc = patch.start2 + delta
- text1 = self.diff_text1(patch.diffs)
- end_loc = -1
- if len(text1) > self.Match_MaxBits:
- # patch_splitMax will only provide an oversized pattern in the case of
- # a monster delete.
- start_loc = self.match_main(
- text, text1[: self.Match_MaxBits], expected_loc
- )
- if start_loc != -1:
- end_loc = self.match_main(
- text,
- text1[-self.Match_MaxBits :],
- expected_loc + len(text1) - self.Match_MaxBits,
- )
- if end_loc == -1 or start_loc >= end_loc:
- # Can't find valid trailing context. Drop this patch.
- start_loc = -1
- else:
- start_loc = self.match_main(text, text1, expected_loc)
- if start_loc == -1:
- # No match found. :(
- results.append(False)
- # Subtract the delta for this failed patch from subsequent patches.
- delta -= patch.length2 - patch.length1
- else:
- # Found a match. :)
- results.append(True)
- delta = start_loc - expected_loc
- if end_loc == -1:
- text2 = text[start_loc : start_loc + len(text1)]
- else:
- text2 = text[start_loc : end_loc + self.Match_MaxBits]
- if text1 == text2:
- # Perfect match, just shove the replacement text in.
- text = (
- text[:start_loc]
- + self.diff_text2(patch.diffs)
- + text[start_loc + len(text1) :]
- )
- else:
- # Imperfect match.
- # Run a diff to get a framework of equivalent indices.
- diffs = self.diff_main(text1, text2, False)
- if (
- len(text1) > self.Match_MaxBits
- and self.diff_levenshtein(diffs) / float(len(text1))
- > self.Patch_DeleteThreshold
- ):
- # The end points match, but the content is unacceptably bad.
- results[-1] = False
- else:
- self.diff_cleanupSemanticLossless(diffs)
- index1 = 0
- for (op, data) in patch.diffs:
- if op != self.DIFF_EQUAL:
- index2 = self.diff_xIndex(diffs, index1)
- if op == self.DIFF_INSERT: # Insertion
- text = (
- text[: start_loc + index2]
- + data
- + text[start_loc + index2 :]
- )
- elif op == self.DIFF_DELETE: # Deletion
- text = (
- text[: start_loc + index2]
- + text[
- start_loc
- + self.diff_xIndex(diffs, index1 + len(data)) :
- ]
- )
- if op != self.DIFF_DELETE:
- index1 += len(data)
- # Strip the padding off.
- text = text[len(nullPadding) : -len(nullPadding)]
- return (text, results)
-
- def patch_addPadding(self, patches):
- """Add some padding on text start and end so that edges can match
- something. Intended to be called only from within patch_apply.
-
- Args:
- patches: Array of Patch objects.
-
- Returns:
- The padding string added to each side.
- """
- paddingLength = self.Patch_Margin
- nullPadding = ""
- for x in xrange(1, paddingLength + 1):
- nullPadding += chr(x)
-
- # Bump all the patches forward.
- for patch in patches:
- patch.start1 += paddingLength
- patch.start2 += paddingLength
-
- # Add some padding on start of first diff.
- patch = patches[0]
- diffs = patch.diffs
- if not diffs or diffs[0][0] != self.DIFF_EQUAL:
- # Add nullPadding equality.
- diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
- patch.start1 -= paddingLength # Should be 0.
- patch.start2 -= paddingLength # Should be 0.
- patch.length1 += paddingLength
- patch.length2 += paddingLength
- elif paddingLength > len(diffs[0][1]):
- # Grow first equality.
- extraLength = paddingLength - len(diffs[0][1])
- newText = nullPadding[len(diffs[0][1]) :] + diffs[0][1]
- diffs[0] = (diffs[0][0], newText)
- patch.start1 -= extraLength
- patch.start2 -= extraLength
- patch.length1 += extraLength
- patch.length2 += extraLength
-
- # Add some padding on end of last diff.
- patch = patches[-1]
- diffs = patch.diffs
- if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
- # Add nullPadding equality.
- diffs.append((self.DIFF_EQUAL, nullPadding))
- patch.length1 += paddingLength
- patch.length2 += paddingLength
- elif paddingLength > len(diffs[-1][1]):
- # Grow last equality.
- extraLength = paddingLength - len(diffs[-1][1])
- newText = diffs[-1][1] + nullPadding[:extraLength]
- diffs[-1] = (diffs[-1][0], newText)
- patch.length1 += extraLength
- patch.length2 += extraLength
-
- return nullPadding
-
- def patch_splitMax(self, patches):
- """Look through the patches and break up any which are longer than the
- maximum limit of the match algorithm.
- Intended to be called only from within patch_apply.
-
- Args:
- patches: Array of Patch objects.
- """
- patch_size = self.Match_MaxBits
- if patch_size == 0:
- # Python has the option of not splitting strings due to its ability
- # to handle integers of arbitrary precision.
- return
- for x in xrange(len(patches)):
- if patches[x].length1 <= patch_size:
- continue
- bigpatch = patches[x]
- # Remove the big old patch.
- del patches[x]
- x -= 1
- start1 = bigpatch.start1
- start2 = bigpatch.start2
- precontext = ""
- while len(bigpatch.diffs) != 0:
- # Create one of several smaller patches.
- patch = patch_obj()
- empty = True
- patch.start1 = start1 - len(precontext)
- patch.start2 = start2 - len(precontext)
- if precontext:
- patch.length1 = patch.length2 = len(precontext)
- patch.diffs.append((self.DIFF_EQUAL, precontext))
-
- while (
- len(bigpatch.diffs) != 0
- and patch.length1 < patch_size - self.Patch_Margin
- ):
- (diff_type, diff_text) = bigpatch.diffs[0]
- if diff_type == self.DIFF_INSERT:
- # Insertions are harmless.
- patch.length2 += len(diff_text)
- start2 += len(diff_text)
- patch.diffs.append(bigpatch.diffs.pop(0))
- empty = False
- elif (
- diff_type == self.DIFF_DELETE
- and len(patch.diffs) == 1
- and patch.diffs[0][0] == self.DIFF_EQUAL
- and len(diff_text) > 2 * patch_size
- ):
- # This is a large deletion. Let it pass in one chunk.
- patch.length1 += len(diff_text)
- start1 += len(diff_text)
- empty = False
- patch.diffs.append((diff_type, diff_text))
- del bigpatch.diffs[0]
- else:
- # Deletion or equality. Only take as much as we can stomach.
- diff_text = diff_text[
- : patch_size - patch.length1 - self.Patch_Margin
- ]
- patch.length1 += len(diff_text)
- start1 += len(diff_text)
- if diff_type == self.DIFF_EQUAL:
- patch.length2 += len(diff_text)
- start2 += len(diff_text)
- else:
- empty = False
-
- patch.diffs.append((diff_type, diff_text))
- if diff_text == bigpatch.diffs[0][1]:
- del bigpatch.diffs[0]
- else:
- bigpatch.diffs[0] = (
- bigpatch.diffs[0][0],
- bigpatch.diffs[0][1][len(diff_text) :],
- )
-
- # Compute the head context for the next patch.
- precontext = self.diff_text2(patch.diffs)
- precontext = precontext[-self.Patch_Margin :]
- # Append the end context for this patch.
- postcontext = self.diff_text1(bigpatch.diffs)[: self.Patch_Margin]
- if postcontext:
- patch.length1 += len(postcontext)
- patch.length2 += len(postcontext)
- if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
- patch.diffs[-1] = (
- self.DIFF_EQUAL,
- patch.diffs[-1][1] + postcontext,
- )
- else:
- patch.diffs.append((self.DIFF_EQUAL, postcontext))
-
- if not empty:
- x += 1
- patches.insert(x, patch)
-
- def patch_toText(self, patches):
- """Take a list of patches and return a textual representation.
-
- Args:
- patches: Array of Patch objects.
-
- Returns:
- Text representation of patches.
- """
- text = []
- for patch in patches:
- text.append(str(patch))
- return "".join(text)
-
- def patch_fromText(self, textline):
- """Parse a textual representation of patches and return a list of patch
- objects.
-
- Args:
- textline: Text representation of patches.
-
- Returns:
- Array of Patch objects.
-
- Raises:
- ValueError: If invalid input.
- """
- if type(textline) == unicode:
- # Patches should be composed of a subset of ascii chars, Unicode not
- # required. If this encode raises UnicodeEncodeError, patch is invalid.
- textline = textline.encode("ascii")
- patches = []
- if not textline:
- return patches
- text = textline.split("\n")
- while len(text) != 0:
- m = re.match("^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
- if not m:
- raise ValueError("Invalid patch string: " + text[0])
- patch = patch_obj()
- patches.append(patch)
- patch.start1 = int(m.group(1))
- if m.group(2) == "":
- patch.start1 -= 1
- patch.length1 = 1
- elif m.group(2) == "0":
- patch.length1 = 0
- else:
- patch.start1 -= 1
- patch.length1 = int(m.group(2))
-
- patch.start2 = int(m.group(3))
- if m.group(4) == "":
- patch.start2 -= 1
- patch.length2 = 1
- elif m.group(4) == "0":
- patch.length2 = 0
- else:
- patch.start2 -= 1
- patch.length2 = int(m.group(4))
-
- del text[0]
-
- while len(text) != 0:
- if text[0]:
- sign = text[0][0]
- else:
- sign = ""
- line = urllib.unquote(text[0][1:])
- line = line.decode("utf-8")
- if sign == "+":
- # Insertion.
- patch.diffs.append((self.DIFF_INSERT, line))
- elif sign == "-":
- # Deletion.
- patch.diffs.append((self.DIFF_DELETE, line))
- elif sign == " ":
- # Minor equality.
- patch.diffs.append((self.DIFF_EQUAL, line))
- elif sign == "@":
- # Start of next patch.
- break
- elif sign == "":
- # Blank line? Whatever.
- pass
- else:
- # WTF?
- raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
- del text[0]
- return patches
-
-
-class patch_obj:
- """Class representing one patch operation.
- """
-
- def __init__(self):
- """Initializes with an empty list of diffs.
- """
- self.diffs = []
- self.start1 = None
- self.start2 = None
- self.length1 = 0
- self.length2 = 0
-
- def __str__(self):
- """Emulate GNU diff's format.
- Header: @@ -382,8 +481,9 @@
- Indices are printed as 1-based, not 0-based.
-
- Returns:
- The GNU diff string.
- """
- if self.length1 == 0:
- coords1 = str(self.start1) + ",0"
- elif self.length1 == 1:
- coords1 = str(self.start1 + 1)
- else:
- coords1 = str(self.start1 + 1) + "," + str(self.length1)
- if self.length2 == 0:
- coords2 = str(self.start2) + ",0"
- elif self.length2 == 1:
- coords2 = str(self.start2 + 1)
- else:
- coords2 = str(self.start2 + 1) + "," + str(self.length2)
- text = ["@@ -", coords1, " +", coords2, " @@\n"]
- # Escape the body of the patch with %xx notation.
- for (op, data) in self.diffs:
- if op == diff_match_patch.DIFF_INSERT:
- text.append("+")
- elif op == diff_match_patch.DIFF_DELETE:
- text.append("-")
- elif op == diff_match_patch.DIFF_EQUAL:
- text.append(" ")
- # High ascii will raise UnicodeDecodeError. Use Unicode instead.
- data = data.encode("utf-8")
- text.append(urllib.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
- return "".join(text)
diff --git a/contrib/python/diff-match-patch/py2/ya.make b/contrib/python/diff-match-patch/py2/ya.make
deleted file mode 100644
index 7dfc13ebe4..0000000000
--- a/contrib/python/diff-match-patch/py2/ya.make
+++ /dev/null
@@ -1,27 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY2_LIBRARY()
-
-VERSION(20200713)
-
-LICENSE(Apache-2.0)
-
-NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- diff_match_patch/__init__.py
- diff_match_patch/diff_match_patch_py2.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/diff-match-patch/py2/
- .dist-info/METADATA
- .dist-info/top_level.txt
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- tests
-)
diff --git a/contrib/python/diff-match-patch/py3/.dist-info/METADATA b/contrib/python/diff-match-patch/py3/.dist-info/METADATA
deleted file mode 100644
index eecf8db23f..0000000000
--- a/contrib/python/diff-match-patch/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,108 +0,0 @@
-Metadata-Version: 2.1
-Name: diff-match-patch
-Version: 20230430
-Summary: Diff Match and Patch
-Author-email: Neil Fraser <fraser@google.com>
-Maintainer-email: Amethyst Reese <amethyst@n7.gg>
-Requires-Python: >=3.7
-Description-Content-Type: text/markdown
-Classifier: Development Status :: 6 - Mature
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Topic :: Software Development :: Libraries
-Classifier: Topic :: Text Processing
-Requires-Dist: attribution==1.6.2 ; extra == "dev"
-Requires-Dist: black==23.3.0 ; extra == "dev"
-Requires-Dist: flit==3.8.0 ; extra == "dev"
-Requires-Dist: mypy==1.2.0 ; extra == "dev"
-Requires-Dist: ufmt==2.1.0 ; extra == "dev"
-Requires-Dist: usort==1.0.6 ; extra == "dev"
-Project-URL: Changelog, https://github.com/diff-match-patch-python/diff-match-patch/blob/main/CHANGELOG.md
-Project-URL: Github, https://github.com/diff-match-patch-python/diff-match-patch
-Provides-Extra: dev
-
-# diff-match-patch
-
-Google's [Diff Match and Patch][DMP] library, packaged for modern Python.
-
-[![version](https://img.shields.io/pypi/v/diff-match-patch.svg)](https://pypi.org/project/diff-match-patch)
-[![changelog](https://img.shields.io/badge/change-log-blue)](https://github.com/diff-match-patch-python/diff-match-patch/blob/main/CHANGELOG.md)
-[![license](https://img.shields.io/pypi/l/diff-match-patch.svg)](https://github.com/diff-match-patch-python/diff-match-patch/blob/master/LICENSE)
-
-## Install
-
-diff-match-patch is supported on Python 3.7 or newer.
-You can install it from PyPI:
-
-```shell
-python -m pip install diff-match-patch
-```
-
-## Usage
-
-Generating a patchset (analogous to unified diff) between two texts:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_make(text1, text2)
-diff = dmp.patch_toText(patches)
-```
-
-Applying a patchset to a text can then be done with:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_fromText(diff)
-new_text, _ = dmp.patch_apply(patches, text)
-```
-
-## Original README
-The Diff Match and Patch libraries offer robust algorithms to perform the
-operations required for synchronizing plain text.
-
-1. Diff:
- * Compare two blocks of plain text and efficiently return a list of differences.
- * [Diff Demo](https://neil.fraser.name/software/diff_match_patch/demos/diff.html)
-2. Match:
- * Given a search string, find its best fuzzy match in a block of plain text. Weighted for both accuracy and location.
- * [Match Demo](https://neil.fraser.name/software/diff_match_patch/demos/match.html)
-3. Patch:
- * Apply a list of patches onto plain text. Use best-effort to apply patch even when the underlying text doesn't match.
- * [Patch Demo](https://neil.fraser.name/software/diff_match_patch/demos/patch.html)
-
-Originally built in 2006 to power Google Docs, this library is now available in C++, C#, Dart, Java, JavaScript, Lua, Objective C, and Python.
-
-### Reference
-
-* [API](https://github.com/google/diff-match-patch/wiki/API) - Common API across all languages.
-* [Line or Word Diffs](https://github.com/google/diff-match-patch/wiki/Line-or-Word-Diffs) - Less detailed diffs.
-* [Plain Text vs. Structured Content](https://github.com/google/diff-match-patch/wiki/Plain-Text-vs.-Structured-Content) - How to deal with data like XML.
-* [Unidiff](https://github.com/google/diff-match-patch/wiki/Unidiff) - The patch serialization format.
-* [Support](https://groups.google.com/forum/#!forum/diff-match-patch) - Newsgroup for developers.
-
-### Languages
-Although each language port of Diff Match Patch uses the same API, there are some language-specific notes.
-
-* [C++](https://github.com/google/diff-match-patch/wiki/Language:-Cpp)
-* [C#](https://github.com/google/diff-match-patch/wiki/Language:-C%23)
-* [Dart](https://github.com/google/diff-match-patch/wiki/Language:-Dart)
-* [Java](https://github.com/google/diff-match-patch/wiki/Language:-Java)
-* [JavaScript](https://github.com/google/diff-match-patch/wiki/Language:-JavaScript)
-* [Lua](https://github.com/google/diff-match-patch/wiki/Language:-Lua)
-* [Objective-C](https://github.com/google/diff-match-patch/wiki/Language:-Objective-C)
-* [Python](https://github.com/google/diff-match-patch/wiki/Language:-Python)
-
-A standardized speed test tracks the [relative performance of diffs](https://docs.google.com/spreadsheets/d/1zpZccuBpjMZTvL1nGDMKJc7rWL_m_drF4XKOJvB27Kc/edit#gid=0) in each language.
-
-### Algorithms
-This library implements [Myer's diff algorithm](https://neil.fraser.name/writing/diff/myers.pdf) which is generally considered to be the best general-purpose diff. A layer of [pre-diff speedups and post-diff cleanups](https://neil.fraser.name/writing/diff/) surround the diff algorithm, improving both performance and output quality.
-
-This library also implements a [Bitap matching algorithm](https://neil.fraser.name/writing/patch/bitap.ps) at the heart of a [flexible matching and patching strategy](https://neil.fraser.name/writing/patch/).
-
-[DMP]: https://github.com/google/diff-match-patch
-[API]: https://github.com/google/diff-match-patch/wiki/API
-
diff --git a/contrib/python/diff-match-patch/py3/.dist-info/top_level.txt b/contrib/python/diff-match-patch/py3/.dist-info/top_level.txt
deleted file mode 100644
index 63904d71d9..0000000000
--- a/contrib/python/diff-match-patch/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-diff_match_patch
diff --git a/contrib/python/diff-match-patch/py3/AUTHORS b/contrib/python/diff-match-patch/py3/AUTHORS
deleted file mode 100644
index c82809e726..0000000000
--- a/contrib/python/diff-match-patch/py3/AUTHORS
+++ /dev/null
@@ -1,10 +0,0 @@
-# Below is a list of people and organizations that have contributed
-# to the Diff Match Patch project.
-
-Google Inc.
-
-Duncan Cross <duncan.cross@gmail.com> (Lua port)
-Jan Weiß <jan@geheimwerk.de> (Objective C port)
-Matthaeus G. Chajdas <anteru@developer.shelter13.net> (C# port)
-Mike Slemmer <mikeslemmer@gmail.com> (C++ port)
-
diff --git a/contrib/python/diff-match-patch/py3/LICENSE b/contrib/python/diff-match-patch/py3/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/contrib/python/diff-match-patch/py3/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/contrib/python/diff-match-patch/py3/README.md b/contrib/python/diff-match-patch/py3/README.md
deleted file mode 100644
index bdcd2a46e7..0000000000
--- a/contrib/python/diff-match-patch/py3/README.md
+++ /dev/null
@@ -1,84 +0,0 @@
-# diff-match-patch
-
-Google's [Diff Match and Patch][DMP] library, packaged for modern Python.
-
-[![version](https://img.shields.io/pypi/v/diff-match-patch.svg)](https://pypi.org/project/diff-match-patch)
-[![changelog](https://img.shields.io/badge/change-log-blue)](https://github.com/diff-match-patch-python/diff-match-patch/blob/main/CHANGELOG.md)
-[![license](https://img.shields.io/pypi/l/diff-match-patch.svg)](https://github.com/diff-match-patch-python/diff-match-patch/blob/master/LICENSE)
-
-## Install
-
-diff-match-patch is supported on Python 3.7 or newer.
-You can install it from PyPI:
-
-```shell
-python -m pip install diff-match-patch
-```
-
-## Usage
-
-Generating a patchset (analogous to unified diff) between two texts:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_make(text1, text2)
-diff = dmp.patch_toText(patches)
-```
-
-Applying a patchset to a text can then be done with:
-
-```python
-from diff_match_patch import diff_match_patch
-
-dmp = diff_match_patch()
-patches = dmp.patch_fromText(diff)
-new_text, _ = dmp.patch_apply(patches, text)
-```
-
-## Original README
-The Diff Match and Patch libraries offer robust algorithms to perform the
-operations required for synchronizing plain text.
-
-1. Diff:
- * Compare two blocks of plain text and efficiently return a list of differences.
- * [Diff Demo](https://neil.fraser.name/software/diff_match_patch/demos/diff.html)
-2. Match:
- * Given a search string, find its best fuzzy match in a block of plain text. Weighted for both accuracy and location.
- * [Match Demo](https://neil.fraser.name/software/diff_match_patch/demos/match.html)
-3. Patch:
- * Apply a list of patches onto plain text. Use best-effort to apply patch even when the underlying text doesn't match.
- * [Patch Demo](https://neil.fraser.name/software/diff_match_patch/demos/patch.html)
-
-Originally built in 2006 to power Google Docs, this library is now available in C++, C#, Dart, Java, JavaScript, Lua, Objective C, and Python.
-
-### Reference
-
-* [API](https://github.com/google/diff-match-patch/wiki/API) - Common API across all languages.
-* [Line or Word Diffs](https://github.com/google/diff-match-patch/wiki/Line-or-Word-Diffs) - Less detailed diffs.
-* [Plain Text vs. Structured Content](https://github.com/google/diff-match-patch/wiki/Plain-Text-vs.-Structured-Content) - How to deal with data like XML.
-* [Unidiff](https://github.com/google/diff-match-patch/wiki/Unidiff) - The patch serialization format.
-* [Support](https://groups.google.com/forum/#!forum/diff-match-patch) - Newsgroup for developers.
-
-### Languages
-Although each language port of Diff Match Patch uses the same API, there are some language-specific notes.
-
-* [C++](https://github.com/google/diff-match-patch/wiki/Language:-Cpp)
-* [C#](https://github.com/google/diff-match-patch/wiki/Language:-C%23)
-* [Dart](https://github.com/google/diff-match-patch/wiki/Language:-Dart)
-* [Java](https://github.com/google/diff-match-patch/wiki/Language:-Java)
-* [JavaScript](https://github.com/google/diff-match-patch/wiki/Language:-JavaScript)
-* [Lua](https://github.com/google/diff-match-patch/wiki/Language:-Lua)
-* [Objective-C](https://github.com/google/diff-match-patch/wiki/Language:-Objective-C)
-* [Python](https://github.com/google/diff-match-patch/wiki/Language:-Python)
-
-A standardized speed test tracks the [relative performance of diffs](https://docs.google.com/spreadsheets/d/1zpZccuBpjMZTvL1nGDMKJc7rWL_m_drF4XKOJvB27Kc/edit#gid=0) in each language.
-
-### Algorithms
-This library implements [Myer's diff algorithm](https://neil.fraser.name/writing/diff/myers.pdf) which is generally considered to be the best general-purpose diff. A layer of [pre-diff speedups and post-diff cleanups](https://neil.fraser.name/writing/diff/) surround the diff algorithm, improving both performance and output quality.
-
-This library also implements a [Bitap matching algorithm](https://neil.fraser.name/writing/patch/bitap.ps) at the heart of a [flexible matching and patching strategy](https://neil.fraser.name/writing/patch/).
-
-[DMP]: https://github.com/google/diff-match-patch
-[API]: https://github.com/google/diff-match-patch/wiki/API
diff --git a/contrib/python/diff-match-patch/py3/diff_match_patch/__init__.py b/contrib/python/diff-match-patch/py3/diff_match_patch/__init__.py
deleted file mode 100644
index 18ac58aadb..0000000000
--- a/contrib/python/diff-match-patch/py3/diff_match_patch/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-"""
-Repackaging of Google's Diff Match and Patch libraries.
-
-Offers robust algorithms to perform the operations required for synchronizing plain text.
-"""
-
-from .__version__ import __version__
-from .diff_match_patch import __author__, __doc__, diff_match_patch, patch_obj
-
-__packager__ = "Amethyst Reese (amy@noswap.com)"
diff --git a/contrib/python/diff-match-patch/py3/diff_match_patch/__version__.py b/contrib/python/diff-match-patch/py3/diff_match_patch/__version__.py
deleted file mode 100644
index 1e429654a2..0000000000
--- a/contrib/python/diff-match-patch/py3/diff_match_patch/__version__.py
+++ /dev/null
@@ -1,7 +0,0 @@
-"""
-This file is automatically generated by attribution.
-
-Do not edit manually. Get more info at https://attribution.omnilib.dev
-"""
-
-__version__ = "20230430"
diff --git a/contrib/python/diff-match-patch/py3/diff_match_patch/diff_match_patch.py b/contrib/python/diff-match-patch/py3/diff_match_patch/diff_match_patch.py
deleted file mode 100644
index 683f9487f2..0000000000
--- a/contrib/python/diff-match-patch/py3/diff_match_patch/diff_match_patch.py
+++ /dev/null
@@ -1,2019 +0,0 @@
-#!/usr/bin/python3
-
-"""Diff Match and Patch
-Copyright 2018 The diff-match-patch Authors.
-https://github.com/google/diff-match-patch
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-"""
-
-"""Functions for diff, match and patch.
-
-Computes the difference between two texts to create a patch.
-Applies the patch onto another text, allowing for errors.
-"""
-
-__author__ = "fraser@google.com (Neil Fraser)"
-
-import re
-import sys
-import time
-import urllib.parse
-
-
-class diff_match_patch:
- """Class containing the diff, match and patch methods.
-
- Also contains the behaviour settings.
- """
-
- def __init__(self):
- """Inits a diff_match_patch object with default settings.
- Redefine these in your program to override the defaults.
- """
-
- # Number of seconds to map a diff before giving up (0 for infinity).
- self.Diff_Timeout = 1.0
- # Cost of an empty edit operation in terms of edit characters.
- self.Diff_EditCost = 4
- # At what point is no match declared (0.0 = perfection, 1.0 = very loose).
- self.Match_Threshold = 0.5
- # How far to search for a match (0 = exact location, 1000+ = broad match).
- # A match this many characters away from the expected location will add
- # 1.0 to the score (0.0 is a perfect match).
- self.Match_Distance = 1000
- # When deleting a large block of text (over ~64 characters), how close do
- # the contents have to be to match the expected contents. (0.0 = perfection,
- # 1.0 = very loose). Note that Match_Threshold controls how closely the
- # end points of a delete need to match.
- self.Patch_DeleteThreshold = 0.5
- # Chunk size for context length.
- self.Patch_Margin = 4
-
- # The number of bits in an int.
- # Python has no maximum, thus to disable patch splitting set to 0.
- # However to avoid long patches in certain pathological cases, use 32.
- # Multiple short patches (using native ints) are much faster than long ones.
- self.Match_MaxBits = 32
-
- # DIFF FUNCTIONS
-
- # The data structure representing a diff is an array of tuples:
- # [(DIFF_DELETE, "Hello"), (DIFF_INSERT, "Goodbye"), (DIFF_EQUAL, " world.")]
- # which means: delete "Hello", add "Goodbye" and keep " world."
- DIFF_DELETE = -1
- DIFF_INSERT = 1
- DIFF_EQUAL = 0
-
- def diff_main(self, text1, text2, checklines=True, deadline=None):
- """Find the differences between two texts. Simplifies the problem by
- stripping any common prefix or suffix off the texts before diffing.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- checklines: Optional speedup flag. If present and false, then don't run
- a line-level diff first to identify the changed areas.
- Defaults to true, which does a faster, slightly less optimal diff.
- deadline: Optional time when the diff should be complete by. Used
- internally for recursive calls. Users should set DiffTimeout instead.
-
- Returns:
- Array of changes.
- """
- # Set a deadline by which time the diff must be complete.
- if deadline == None:
- # Unlike in most languages, Python counts time in seconds.
- if self.Diff_Timeout <= 0:
- deadline = sys.maxsize
- else:
- deadline = time.time() + self.Diff_Timeout
-
- # Check for null inputs.
- if text1 == None or text2 == None:
- raise ValueError("Null inputs. (diff_main)")
-
- # Check for equality (speedup).
- if text1 == text2:
- if text1:
- return [(self.DIFF_EQUAL, text1)]
- return []
-
- # Trim off common prefix (speedup).
- commonlength = self.diff_commonPrefix(text1, text2)
- commonprefix = text1[:commonlength]
- text1 = text1[commonlength:]
- text2 = text2[commonlength:]
-
- # Trim off common suffix (speedup).
- commonlength = self.diff_commonSuffix(text1, text2)
- if commonlength == 0:
- commonsuffix = ""
- else:
- commonsuffix = text1[-commonlength:]
- text1 = text1[:-commonlength]
- text2 = text2[:-commonlength]
-
- # Compute the diff on the middle block.
- diffs = self.diff_compute(text1, text2, checklines, deadline)
-
- # Restore the prefix and suffix.
- if commonprefix:
- diffs[:0] = [(self.DIFF_EQUAL, commonprefix)]
- if commonsuffix:
- diffs.append((self.DIFF_EQUAL, commonsuffix))
- self.diff_cleanupMerge(diffs)
- return diffs
-
- def diff_compute(self, text1, text2, checklines, deadline):
- """Find the differences between two texts. Assumes that the texts do not
- have any common prefix or suffix.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- checklines: Speedup flag. If false, then don't run a line-level diff
- first to identify the changed areas.
- If true, then run a faster, slightly less optimal diff.
- deadline: Time when the diff should be complete by.
-
- Returns:
- Array of changes.
- """
- if not text1:
- # Just add some text (speedup).
- return [(self.DIFF_INSERT, text2)]
-
- if not text2:
- # Just delete some text (speedup).
- return [(self.DIFF_DELETE, text1)]
-
- if len(text1) > len(text2):
- (longtext, shorttext) = (text1, text2)
- else:
- (shorttext, longtext) = (text1, text2)
- i = longtext.find(shorttext)
- if i != -1:
- # Shorter text is inside the longer text (speedup).
- diffs = [
- (self.DIFF_INSERT, longtext[:i]),
- (self.DIFF_EQUAL, shorttext),
- (self.DIFF_INSERT, longtext[i + len(shorttext) :]),
- ]
- # Swap insertions for deletions if diff is reversed.
- if len(text1) > len(text2):
- diffs[0] = (self.DIFF_DELETE, diffs[0][1])
- diffs[2] = (self.DIFF_DELETE, diffs[2][1])
- return diffs
-
- if len(shorttext) == 1:
- # Single character string.
- # After the previous speedup, the character can't be an equality.
- return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
-
- # Check to see if the problem can be split in two.
- hm = self.diff_halfMatch(text1, text2)
- if hm:
- # A half-match was found, sort out the return data.
- (text1_a, text1_b, text2_a, text2_b, mid_common) = hm
- # Send both pairs off for separate processing.
- diffs_a = self.diff_main(text1_a, text2_a, checklines, deadline)
- diffs_b = self.diff_main(text1_b, text2_b, checklines, deadline)
- # Merge the results.
- return diffs_a + [(self.DIFF_EQUAL, mid_common)] + diffs_b
-
- if checklines and len(text1) > 100 and len(text2) > 100:
- return self.diff_lineMode(text1, text2, deadline)
-
- return self.diff_bisect(text1, text2, deadline)
-
- def diff_lineMode(self, text1, text2, deadline):
- """Do a quick line-level diff on both strings, then rediff the parts for
- greater accuracy.
- This speedup can produce non-minimal diffs.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- deadline: Time when the diff should be complete by.
-
- Returns:
- Array of changes.
- """
-
- # Scan the text on a line-by-line basis first.
- (text1, text2, linearray) = self.diff_linesToChars(text1, text2)
-
- diffs = self.diff_main(text1, text2, False, deadline)
-
- # Convert the diff back to original text.
- self.diff_charsToLines(diffs, linearray)
- # Eliminate freak matches (e.g. blank lines)
- self.diff_cleanupSemantic(diffs)
-
- # Rediff any replacement blocks, this time character-by-character.
- # Add a dummy entry at the end.
- diffs.append((self.DIFF_EQUAL, ""))
- pointer = 0
- count_delete = 0
- count_insert = 0
- text_delete = ""
- text_insert = ""
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_INSERT:
- count_insert += 1
- text_insert += diffs[pointer][1]
- elif diffs[pointer][0] == self.DIFF_DELETE:
- count_delete += 1
- text_delete += diffs[pointer][1]
- elif diffs[pointer][0] == self.DIFF_EQUAL:
- # Upon reaching an equality, check for prior redundancies.
- if count_delete >= 1 and count_insert >= 1:
- # Delete the offending records and add the merged ones.
- subDiff = self.diff_main(text_delete, text_insert, False, deadline)
- diffs[pointer - count_delete - count_insert : pointer] = subDiff
- pointer = pointer - count_delete - count_insert + len(subDiff)
- count_insert = 0
- count_delete = 0
- text_delete = ""
- text_insert = ""
-
- pointer += 1
-
- diffs.pop() # Remove the dummy entry at the end.
-
- return diffs
-
- def diff_bisect(self, text1, text2, deadline):
- """Find the 'middle snake' of a diff, split the problem in two
- and return the recursively constructed diff.
- See Myers 1986 paper: An O(ND) Difference Algorithm and Its Variations.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- deadline: Time at which to bail if not yet complete.
-
- Returns:
- Array of diff tuples.
- """
-
- # Cache the text lengths to prevent multiple calls.
- text1_length = len(text1)
- text2_length = len(text2)
- max_d = (text1_length + text2_length + 1) // 2
- v_offset = max_d
- v_length = 2 * max_d
- v1 = [-1] * v_length
- v1[v_offset + 1] = 0
- v2 = v1[:]
- delta = text1_length - text2_length
- # If the total number of characters is odd, then the front path will
- # collide with the reverse path.
- front = delta % 2 != 0
- # Offsets for start and end of k loop.
- # Prevents mapping of space beyond the grid.
- k1start = 0
- k1end = 0
- k2start = 0
- k2end = 0
- for d in range(max_d):
- # Bail out if deadline is reached.
- if time.time() > deadline:
- break
-
- # Walk the front path one step.
- for k1 in range(-d + k1start, d + 1 - k1end, 2):
- k1_offset = v_offset + k1
- if k1 == -d or (k1 != d and v1[k1_offset - 1] < v1[k1_offset + 1]):
- x1 = v1[k1_offset + 1]
- else:
- x1 = v1[k1_offset - 1] + 1
- y1 = x1 - k1
- while (
- x1 < text1_length and y1 < text2_length and text1[x1] == text2[y1]
- ):
- x1 += 1
- y1 += 1
- v1[k1_offset] = x1
- if x1 > text1_length:
- # Ran off the right of the graph.
- k1end += 2
- elif y1 > text2_length:
- # Ran off the bottom of the graph.
- k1start += 2
- elif front:
- k2_offset = v_offset + delta - k1
- if k2_offset >= 0 and k2_offset < v_length and v2[k2_offset] != -1:
- # Mirror x2 onto top-left coordinate system.
- x2 = text1_length - v2[k2_offset]
- if x1 >= x2:
- # Overlap detected.
- return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
-
- # Walk the reverse path one step.
- for k2 in range(-d + k2start, d + 1 - k2end, 2):
- k2_offset = v_offset + k2
- if k2 == -d or (k2 != d and v2[k2_offset - 1] < v2[k2_offset + 1]):
- x2 = v2[k2_offset + 1]
- else:
- x2 = v2[k2_offset - 1] + 1
- y2 = x2 - k2
- while (
- x2 < text1_length
- and y2 < text2_length
- and text1[-x2 - 1] == text2[-y2 - 1]
- ):
- x2 += 1
- y2 += 1
- v2[k2_offset] = x2
- if x2 > text1_length:
- # Ran off the left of the graph.
- k2end += 2
- elif y2 > text2_length:
- # Ran off the top of the graph.
- k2start += 2
- elif not front:
- k1_offset = v_offset + delta - k2
- if k1_offset >= 0 and k1_offset < v_length and v1[k1_offset] != -1:
- x1 = v1[k1_offset]
- y1 = v_offset + x1 - k1_offset
- # Mirror x2 onto top-left coordinate system.
- x2 = text1_length - x2
- if x1 >= x2:
- # Overlap detected.
- return self.diff_bisectSplit(text1, text2, x1, y1, deadline)
-
- # Diff took too long and hit the deadline or
- # number of diffs equals number of characters, no commonality at all.
- return [(self.DIFF_DELETE, text1), (self.DIFF_INSERT, text2)]
-
- def diff_bisectSplit(self, text1, text2, x, y, deadline):
- """Given the location of the 'middle snake', split the diff in two parts
- and recurse.
-
- Args:
- text1: Old string to be diffed.
- text2: New string to be diffed.
- x: Index of split point in text1.
- y: Index of split point in text2.
- deadline: Time at which to bail if not yet complete.
-
- Returns:
- Array of diff tuples.
- """
- text1a = text1[:x]
- text2a = text2[:y]
- text1b = text1[x:]
- text2b = text2[y:]
-
- # Compute both diffs serially.
- diffs = self.diff_main(text1a, text2a, False, deadline)
- diffsb = self.diff_main(text1b, text2b, False, deadline)
-
- return diffs + diffsb
-
- def diff_linesToChars(self, text1, text2):
- """Split two texts into an array of strings. Reduce the texts to a string
- of hashes where each Unicode character represents one line.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- Three element tuple, containing the encoded text1, the encoded text2 and
- the array of unique strings. The zeroth element of the array of unique
- strings is intentionally blank.
- """
- lineArray = [] # e.g. lineArray[4] == "Hello\n"
- lineHash = {} # e.g. lineHash["Hello\n"] == 4
-
- # "\x00" is a valid character, but various debuggers don't like it.
- # So we'll insert a junk entry to avoid generating a null character.
- lineArray.append("")
-
- def diff_linesToCharsMunge(text):
- """Split a text into an array of strings. Reduce the texts to a string
- of hashes where each Unicode character represents one line.
- Modifies linearray and linehash through being a closure.
-
- Args:
- text: String to encode.
-
- Returns:
- Encoded string.
- """
- chars = []
- # Walk the text, pulling out a substring for each line.
- # text.split('\n') would would temporarily double our memory footprint.
- # Modifying text would create many large strings to garbage collect.
- lineStart = 0
- lineEnd = -1
- while lineEnd < len(text) - 1:
- lineEnd = text.find("\n", lineStart)
- if lineEnd == -1:
- lineEnd = len(text) - 1
- line = text[lineStart : lineEnd + 1]
-
- if line in lineHash:
- chars.append(chr(lineHash[line]))
- else:
- if len(lineArray) == maxLines:
- # Bail out at 1114111 because chr(1114112) throws.
- line = text[lineStart:]
- lineEnd = len(text)
- lineArray.append(line)
- lineHash[line] = len(lineArray) - 1
- chars.append(chr(len(lineArray) - 1))
- lineStart = lineEnd + 1
- return "".join(chars)
-
- # Allocate 2/3rds of the space for text1, the rest for text2.
- maxLines = 666666
- chars1 = diff_linesToCharsMunge(text1)
- maxLines = 1114111
- chars2 = diff_linesToCharsMunge(text2)
- return (chars1, chars2, lineArray)
-
- def diff_charsToLines(self, diffs, lineArray):
- """Rehydrate the text in a diff from a string of line hashes to real lines
- of text.
-
- Args:
- diffs: Array of diff tuples.
- lineArray: Array of unique strings.
- """
- for i in range(len(diffs)):
- text = []
- for char in diffs[i][1]:
- text.append(lineArray[ord(char)])
- diffs[i] = (diffs[i][0], "".join(text))
-
- def diff_commonPrefix(self, text1, text2):
- """Determine the common prefix of two strings.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- The number of characters common to the start of each string.
- """
- # Quick check for common null cases.
- if not text1 or not text2 or text1[0] != text2[0]:
- return 0
- # Binary search.
- # Performance analysis: https://neil.fraser.name/news/2007/10/09/
- pointermin = 0
- pointermax = min(len(text1), len(text2))
- pointermid = pointermax
- pointerstart = 0
- while pointermin < pointermid:
- if text1[pointerstart:pointermid] == text2[pointerstart:pointermid]:
- pointermin = pointermid
- pointerstart = pointermin
- else:
- pointermax = pointermid
- pointermid = (pointermax - pointermin) // 2 + pointermin
- return pointermid
-
- def diff_commonSuffix(self, text1, text2):
- """Determine the common suffix of two strings.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- The number of characters common to the end of each string.
- """
- # Quick check for common null cases.
- if not text1 or not text2 or text1[-1] != text2[-1]:
- return 0
- # Binary search.
- # Performance analysis: https://neil.fraser.name/news/2007/10/09/
- pointermin = 0
- pointermax = min(len(text1), len(text2))
- pointermid = pointermax
- pointerend = 0
- while pointermin < pointermid:
- if (
- text1[-pointermid : len(text1) - pointerend]
- == text2[-pointermid : len(text2) - pointerend]
- ):
- pointermin = pointermid
- pointerend = pointermin
- else:
- pointermax = pointermid
- pointermid = (pointermax - pointermin) // 2 + pointermin
- return pointermid
-
- def diff_commonOverlap(self, text1, text2):
- """Determine if the suffix of one string is the prefix of another.
-
- Args:
- text1 First string.
- text2 Second string.
-
- Returns:
- The number of characters common to the end of the first
- string and the start of the second string.
- """
- # Cache the text lengths to prevent multiple calls.
- text1_length = len(text1)
- text2_length = len(text2)
- # Eliminate the null case.
- if text1_length == 0 or text2_length == 0:
- return 0
- # Truncate the longer string.
- if text1_length > text2_length:
- text1 = text1[-text2_length:]
- elif text1_length < text2_length:
- text2 = text2[:text1_length]
- text_length = min(text1_length, text2_length)
- # Quick check for the worst case.
- if text1 == text2:
- return text_length
-
- # Start by looking for a single character match
- # and increase length until no match is found.
- # Performance analysis: https://neil.fraser.name/news/2010/11/04/
- best = 0
- length = 1
- while True:
- pattern = text1[-length:]
- found = text2.find(pattern)
- if found == -1:
- return best
- length += found
- if found == 0 or text1[-length:] == text2[:length]:
- best = length
- length += 1
-
- def diff_halfMatch(self, text1, text2):
- """Do the two texts share a substring which is at least half the length of
- the longer text?
- This speedup can produce non-minimal diffs.
-
- Args:
- text1: First string.
- text2: Second string.
-
- Returns:
- Five element Array, containing the prefix of text1, the suffix of text1,
- the prefix of text2, the suffix of text2 and the common middle. Or None
- if there was no match.
- """
- if self.Diff_Timeout <= 0:
- # Don't risk returning a non-optimal diff if we have unlimited time.
- return None
- if len(text1) > len(text2):
- (longtext, shorttext) = (text1, text2)
- else:
- (shorttext, longtext) = (text1, text2)
- if len(longtext) < 4 or len(shorttext) * 2 < len(longtext):
- return None # Pointless.
-
- def diff_halfMatchI(longtext, shorttext, i):
- """Does a substring of shorttext exist within longtext such that the
- substring is at least half the length of longtext?
- Closure, but does not reference any external variables.
-
- Args:
- longtext: Longer string.
- shorttext: Shorter string.
- i: Start index of quarter length substring within longtext.
-
- Returns:
- Five element Array, containing the prefix of longtext, the suffix of
- longtext, the prefix of shorttext, the suffix of shorttext and the
- common middle. Or None if there was no match.
- """
- seed = longtext[i : i + len(longtext) // 4]
- best_common = ""
- j = shorttext.find(seed)
- while j != -1:
- prefixLength = self.diff_commonPrefix(longtext[i:], shorttext[j:])
- suffixLength = self.diff_commonSuffix(longtext[:i], shorttext[:j])
- if len(best_common) < suffixLength + prefixLength:
- best_common = (
- shorttext[j - suffixLength : j]
- + shorttext[j : j + prefixLength]
- )
- best_longtext_a = longtext[: i - suffixLength]
- best_longtext_b = longtext[i + prefixLength :]
- best_shorttext_a = shorttext[: j - suffixLength]
- best_shorttext_b = shorttext[j + prefixLength :]
- j = shorttext.find(seed, j + 1)
-
- if len(best_common) * 2 >= len(longtext):
- return (
- best_longtext_a,
- best_longtext_b,
- best_shorttext_a,
- best_shorttext_b,
- best_common,
- )
- else:
- return None
-
- # First check if the second quarter is the seed for a half-match.
- hm1 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 3) // 4)
- # Check again based on the third quarter.
- hm2 = diff_halfMatchI(longtext, shorttext, (len(longtext) + 1) // 2)
- if not hm1 and not hm2:
- return None
- elif not hm2:
- hm = hm1
- elif not hm1:
- hm = hm2
- else:
- # Both matched. Select the longest.
- if len(hm1[4]) > len(hm2[4]):
- hm = hm1
- else:
- hm = hm2
-
- # A half-match was found, sort out the return data.
- if len(text1) > len(text2):
- (text1_a, text1_b, text2_a, text2_b, mid_common) = hm
- else:
- (text2_a, text2_b, text1_a, text1_b, mid_common) = hm
- return (text1_a, text1_b, text2_a, text2_b, mid_common)
-
- def diff_cleanupSemantic(self, diffs):
- """Reduce the number of edits by eliminating semantically trivial
- equalities.
-
- Args:
- diffs: Array of diff tuples.
- """
- changes = False
- equalities = [] # Stack of indices where equalities are found.
- lastEquality = None # Always equal to diffs[equalities[-1]][1]
- pointer = 0 # Index of current position.
- # Number of chars that changed prior to the equality.
- length_insertions1, length_deletions1 = 0, 0
- # Number of chars that changed after the equality.
- length_insertions2, length_deletions2 = 0, 0
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
- equalities.append(pointer)
- length_insertions1, length_insertions2 = length_insertions2, 0
- length_deletions1, length_deletions2 = length_deletions2, 0
- lastEquality = diffs[pointer][1]
- else: # An insertion or deletion.
- if diffs[pointer][0] == self.DIFF_INSERT:
- length_insertions2 += len(diffs[pointer][1])
- else:
- length_deletions2 += len(diffs[pointer][1])
- # Eliminate an equality that is smaller or equal to the edits on both
- # sides of it.
- if (
- lastEquality
- and (
- len(lastEquality) <= max(length_insertions1, length_deletions1)
- )
- and (
- len(lastEquality) <= max(length_insertions2, length_deletions2)
- )
- ):
- # Duplicate record.
- diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))
- # Change second copy to insert.
- diffs[equalities[-1] + 1] = (
- self.DIFF_INSERT,
- diffs[equalities[-1] + 1][1],
- )
- # Throw away the equality we just deleted.
- equalities.pop()
- # Throw away the previous equality (it needs to be reevaluated).
- if len(equalities):
- equalities.pop()
- if len(equalities):
- pointer = equalities[-1]
- else:
- pointer = -1
- # Reset the counters.
- length_insertions1, length_deletions1 = 0, 0
- length_insertions2, length_deletions2 = 0, 0
- lastEquality = None
- changes = True
- pointer += 1
-
- # Normalize the diff.
- if changes:
- self.diff_cleanupMerge(diffs)
- self.diff_cleanupSemanticLossless(diffs)
-
- # Find any overlaps between deletions and insertions.
- # e.g: <del>abcxxx</del><ins>xxxdef</ins>
- # -> <del>abc</del>xxx<ins>def</ins>
- # e.g: <del>xxxabc</del><ins>defxxx</ins>
- # -> <ins>def</ins>xxx<del>abc</del>
- # Only extract an overlap if it is as big as the edit ahead or behind it.
- pointer = 1
- while pointer < len(diffs):
- if (
- diffs[pointer - 1][0] == self.DIFF_DELETE
- and diffs[pointer][0] == self.DIFF_INSERT
- ):
- deletion = diffs[pointer - 1][1]
- insertion = diffs[pointer][1]
- overlap_length1 = self.diff_commonOverlap(deletion, insertion)
- overlap_length2 = self.diff_commonOverlap(insertion, deletion)
- if overlap_length1 >= overlap_length2:
- if (
- overlap_length1 >= len(deletion) / 2.0
- or overlap_length1 >= len(insertion) / 2.0
- ):
- # Overlap found. Insert an equality and trim the surrounding edits.
- diffs.insert(
- pointer, (self.DIFF_EQUAL, insertion[:overlap_length1])
- )
- diffs[pointer - 1] = (
- self.DIFF_DELETE,
- deletion[: len(deletion) - overlap_length1],
- )
- diffs[pointer + 1] = (
- self.DIFF_INSERT,
- insertion[overlap_length1:],
- )
- pointer += 1
- else:
- if (
- overlap_length2 >= len(deletion) / 2.0
- or overlap_length2 >= len(insertion) / 2.0
- ):
- # Reverse overlap found.
- # Insert an equality and swap and trim the surrounding edits.
- diffs.insert(
- pointer, (self.DIFF_EQUAL, deletion[:overlap_length2])
- )
- diffs[pointer - 1] = (
- self.DIFF_INSERT,
- insertion[: len(insertion) - overlap_length2],
- )
- diffs[pointer + 1] = (
- self.DIFF_DELETE,
- deletion[overlap_length2:],
- )
- pointer += 1
- pointer += 1
- pointer += 1
-
- def diff_cleanupSemanticLossless(self, diffs):
- """Look for single edits surrounded on both sides by equalities
- which can be shifted sideways to align the edit to a word boundary.
- e.g: The c<ins>at c</ins>ame. -> The <ins>cat </ins>came.
-
- Args:
- diffs: Array of diff tuples.
- """
-
- def diff_cleanupSemanticScore(one, two):
- """Given two strings, compute a score representing whether the
- internal boundary falls on logical boundaries.
- Scores range from 6 (best) to 0 (worst).
- Closure, but does not reference any external variables.
-
- Args:
- one: First string.
- two: Second string.
-
- Returns:
- The score.
- """
- if not one or not two:
- # Edges are the best.
- return 6
-
- # Each port of this function behaves slightly differently due to
- # subtle differences in each language's definition of things like
- # 'whitespace'. Since this function's purpose is largely cosmetic,
- # the choice has been made to use each language's native features
- # rather than force total conformity.
- char1 = one[-1]
- char2 = two[0]
- nonAlphaNumeric1 = not char1.isalnum()
- nonAlphaNumeric2 = not char2.isalnum()
- whitespace1 = nonAlphaNumeric1 and char1.isspace()
- whitespace2 = nonAlphaNumeric2 and char2.isspace()
- lineBreak1 = whitespace1 and (char1 == "\r" or char1 == "\n")
- lineBreak2 = whitespace2 and (char2 == "\r" or char2 == "\n")
- blankLine1 = lineBreak1 and self.BLANKLINEEND.search(one)
- blankLine2 = lineBreak2 and self.BLANKLINESTART.match(two)
-
- if blankLine1 or blankLine2:
- # Five points for blank lines.
- return 5
- elif lineBreak1 or lineBreak2:
- # Four points for line breaks.
- return 4
- elif nonAlphaNumeric1 and not whitespace1 and whitespace2:
- # Three points for end of sentences.
- return 3
- elif whitespace1 or whitespace2:
- # Two points for whitespace.
- return 2
- elif nonAlphaNumeric1 or nonAlphaNumeric2:
- # One point for non-alphanumeric.
- return 1
- return 0
-
- pointer = 1
- # Intentionally ignore the first and last element (don't need checking).
- while pointer < len(diffs) - 1:
- if (
- diffs[pointer - 1][0] == self.DIFF_EQUAL
- and diffs[pointer + 1][0] == self.DIFF_EQUAL
- ):
- # This is a single edit surrounded by equalities.
- equality1 = diffs[pointer - 1][1]
- edit = diffs[pointer][1]
- equality2 = diffs[pointer + 1][1]
-
- # First, shift the edit as far left as possible.
- commonOffset = self.diff_commonSuffix(equality1, edit)
- if commonOffset:
- commonString = edit[-commonOffset:]
- equality1 = equality1[:-commonOffset]
- edit = commonString + edit[:-commonOffset]
- equality2 = commonString + equality2
-
- # Second, step character by character right, looking for the best fit.
- bestEquality1 = equality1
- bestEdit = edit
- bestEquality2 = equality2
- bestScore = diff_cleanupSemanticScore(
- equality1, edit
- ) + diff_cleanupSemanticScore(edit, equality2)
- while edit and equality2 and edit[0] == equality2[0]:
- equality1 += edit[0]
- edit = edit[1:] + equality2[0]
- equality2 = equality2[1:]
- score = diff_cleanupSemanticScore(
- equality1, edit
- ) + diff_cleanupSemanticScore(edit, equality2)
- # The >= encourages trailing rather than leading whitespace on edits.
- if score >= bestScore:
- bestScore = score
- bestEquality1 = equality1
- bestEdit = edit
- bestEquality2 = equality2
-
- if diffs[pointer - 1][1] != bestEquality1:
- # We have an improvement, save it back to the diff.
- if bestEquality1:
- diffs[pointer - 1] = (diffs[pointer - 1][0], bestEquality1)
- else:
- del diffs[pointer - 1]
- pointer -= 1
- diffs[pointer] = (diffs[pointer][0], bestEdit)
- if bestEquality2:
- diffs[pointer + 1] = (diffs[pointer + 1][0], bestEquality2)
- else:
- del diffs[pointer + 1]
- pointer -= 1
- pointer += 1
-
- # Define some regex patterns for matching boundaries.
- BLANKLINEEND = re.compile(r"\n\r?\n$")
- BLANKLINESTART = re.compile(r"^\r?\n\r?\n")
-
- def diff_cleanupEfficiency(self, diffs):
- """Reduce the number of edits by eliminating operationally trivial
- equalities.
-
- Args:
- diffs: Array of diff tuples.
- """
- changes = False
- equalities = [] # Stack of indices where equalities are found.
- lastEquality = None # Always equal to diffs[equalities[-1]][1]
- pointer = 0 # Index of current position.
- pre_ins = False # Is there an insertion operation before the last equality.
- pre_del = False # Is there a deletion operation before the last equality.
- post_ins = False # Is there an insertion operation after the last equality.
- post_del = False # Is there a deletion operation after the last equality.
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_EQUAL: # Equality found.
- if len(diffs[pointer][1]) < self.Diff_EditCost and (
- post_ins or post_del
- ):
- # Candidate found.
- equalities.append(pointer)
- pre_ins = post_ins
- pre_del = post_del
- lastEquality = diffs[pointer][1]
- else:
- # Not a candidate, and can never become one.
- equalities = []
- lastEquality = None
-
- post_ins = post_del = False
- else: # An insertion or deletion.
- if diffs[pointer][0] == self.DIFF_DELETE:
- post_del = True
- else:
- post_ins = True
-
- # Five types to be split:
- # <ins>A</ins><del>B</del>XY<ins>C</ins><del>D</del>
- # <ins>A</ins>X<ins>C</ins><del>D</del>
- # <ins>A</ins><del>B</del>X<ins>C</ins>
- # <ins>A</del>X<ins>C</ins><del>D</del>
- # <ins>A</ins><del>B</del>X<del>C</del>
-
- if lastEquality and (
- (pre_ins and pre_del and post_ins and post_del)
- or (
- (len(lastEquality) < self.Diff_EditCost / 2)
- and (pre_ins + pre_del + post_ins + post_del) == 3
- )
- ):
- # Duplicate record.
- diffs.insert(equalities[-1], (self.DIFF_DELETE, lastEquality))
- # Change second copy to insert.
- diffs[equalities[-1] + 1] = (
- self.DIFF_INSERT,
- diffs[equalities[-1] + 1][1],
- )
- equalities.pop() # Throw away the equality we just deleted.
- lastEquality = None
- if pre_ins and pre_del:
- # No changes made which could affect previous entry, keep going.
- post_ins = post_del = True
- equalities = []
- else:
- if len(equalities):
- equalities.pop() # Throw away the previous equality.
- if len(equalities):
- pointer = equalities[-1]
- else:
- pointer = -1
- post_ins = post_del = False
- changes = True
- pointer += 1
-
- if changes:
- self.diff_cleanupMerge(diffs)
-
- def diff_cleanupMerge(self, diffs):
- """Reorder and merge like edit sections. Merge equalities.
- Any edit section can move as long as it doesn't cross an equality.
-
- Args:
- diffs: Array of diff tuples.
- """
- diffs.append((self.DIFF_EQUAL, "")) # Add a dummy entry at the end.
- pointer = 0
- count_delete = 0
- count_insert = 0
- text_delete = ""
- text_insert = ""
- while pointer < len(diffs):
- if diffs[pointer][0] == self.DIFF_INSERT:
- count_insert += 1
- text_insert += diffs[pointer][1]
- pointer += 1
- elif diffs[pointer][0] == self.DIFF_DELETE:
- count_delete += 1
- text_delete += diffs[pointer][1]
- pointer += 1
- elif diffs[pointer][0] == self.DIFF_EQUAL:
- # Upon reaching an equality, check for prior redundancies.
- if count_delete + count_insert > 1:
- if count_delete != 0 and count_insert != 0:
- # Factor out any common prefixies.
- commonlength = self.diff_commonPrefix(text_insert, text_delete)
- if commonlength != 0:
- x = pointer - count_delete - count_insert - 1
- if x >= 0 and diffs[x][0] == self.DIFF_EQUAL:
- diffs[x] = (
- diffs[x][0],
- diffs[x][1] + text_insert[:commonlength],
- )
- else:
- diffs.insert(
- 0, (self.DIFF_EQUAL, text_insert[:commonlength])
- )
- pointer += 1
- text_insert = text_insert[commonlength:]
- text_delete = text_delete[commonlength:]
- # Factor out any common suffixies.
- commonlength = self.diff_commonSuffix(text_insert, text_delete)
- if commonlength != 0:
- diffs[pointer] = (
- diffs[pointer][0],
- text_insert[-commonlength:] + diffs[pointer][1],
- )
- text_insert = text_insert[:-commonlength]
- text_delete = text_delete[:-commonlength]
- # Delete the offending records and add the merged ones.
- new_ops = []
- if len(text_delete) != 0:
- new_ops.append((self.DIFF_DELETE, text_delete))
- if len(text_insert) != 0:
- new_ops.append((self.DIFF_INSERT, text_insert))
- pointer -= count_delete + count_insert
- diffs[pointer : pointer + count_delete + count_insert] = new_ops
- pointer += len(new_ops) + 1
- elif pointer != 0 and diffs[pointer - 1][0] == self.DIFF_EQUAL:
- # Merge this equality with the previous one.
- diffs[pointer - 1] = (
- diffs[pointer - 1][0],
- diffs[pointer - 1][1] + diffs[pointer][1],
- )
- del diffs[pointer]
- else:
- pointer += 1
-
- count_insert = 0
- count_delete = 0
- text_delete = ""
- text_insert = ""
-
- if diffs[-1][1] == "":
- diffs.pop() # Remove the dummy entry at the end.
-
- # Second pass: look for single edits surrounded on both sides by equalities
- # which can be shifted sideways to eliminate an equality.
- # e.g: A<ins>BA</ins>C -> <ins>AB</ins>AC
- changes = False
- pointer = 1
- # Intentionally ignore the first and last element (don't need checking).
- while pointer < len(diffs) - 1:
- if (
- diffs[pointer - 1][0] == self.DIFF_EQUAL
- and diffs[pointer + 1][0] == self.DIFF_EQUAL
- ):
- # This is a single edit surrounded by equalities.
- if diffs[pointer][1].endswith(diffs[pointer - 1][1]):
- # Shift the edit over the previous equality.
- if diffs[pointer - 1][1] != "":
- diffs[pointer] = (
- diffs[pointer][0],
- diffs[pointer - 1][1]
- + diffs[pointer][1][: -len(diffs[pointer - 1][1])],
- )
- diffs[pointer + 1] = (
- diffs[pointer + 1][0],
- diffs[pointer - 1][1] + diffs[pointer + 1][1],
- )
- del diffs[pointer - 1]
- changes = True
- elif diffs[pointer][1].startswith(diffs[pointer + 1][1]):
- # Shift the edit over the next equality.
- diffs[pointer - 1] = (
- diffs[pointer - 1][0],
- diffs[pointer - 1][1] + diffs[pointer + 1][1],
- )
- diffs[pointer] = (
- diffs[pointer][0],
- diffs[pointer][1][len(diffs[pointer + 1][1]) :]
- + diffs[pointer + 1][1],
- )
- del diffs[pointer + 1]
- changes = True
- pointer += 1
-
- # If shifts were made, the diff needs reordering and another shift sweep.
- if changes:
- self.diff_cleanupMerge(diffs)
-
- def diff_xIndex(self, diffs, loc):
- """loc is a location in text1, compute and return the equivalent location
- in text2. e.g. "The cat" vs "The big cat", 1->1, 5->8
-
- Args:
- diffs: Array of diff tuples.
- loc: Location within text1.
-
- Returns:
- Location within text2.
- """
- chars1 = 0
- chars2 = 0
- last_chars1 = 0
- last_chars2 = 0
- for x in range(len(diffs)):
- (op, text) = diffs[x]
- if op != self.DIFF_INSERT: # Equality or deletion.
- chars1 += len(text)
- if op != self.DIFF_DELETE: # Equality or insertion.
- chars2 += len(text)
- if chars1 > loc: # Overshot the location.
- break
- last_chars1 = chars1
- last_chars2 = chars2
-
- if len(diffs) != x and diffs[x][0] == self.DIFF_DELETE:
- # The location was deleted.
- return last_chars2
- # Add the remaining len(character).
- return last_chars2 + (loc - last_chars1)
-
- def diff_prettyHtml(self, diffs):
- """Convert a diff array into a pretty HTML report.
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- HTML representation.
- """
- html = []
- for op, data in diffs:
- text = (
- data.replace("&", "&amp;")
- .replace("<", "&lt;")
- .replace(">", "&gt;")
- .replace("\n", "&para;<br>")
- )
- if op == self.DIFF_INSERT:
- html.append('<ins style="background:#e6ffe6;">%s</ins>' % text)
- elif op == self.DIFF_DELETE:
- html.append('<del style="background:#ffe6e6;">%s</del>' % text)
- elif op == self.DIFF_EQUAL:
- html.append("<span>%s</span>" % text)
- return "".join(html)
-
- def diff_text1(self, diffs):
- """Compute and return the source text (all equalities and deletions).
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Source text.
- """
- text = []
- for op, data in diffs:
- if op != self.DIFF_INSERT:
- text.append(data)
- return "".join(text)
-
- def diff_text2(self, diffs):
- """Compute and return the destination text (all equalities and insertions).
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Destination text.
- """
- text = []
- for op, data in diffs:
- if op != self.DIFF_DELETE:
- text.append(data)
- return "".join(text)
-
- def diff_levenshtein(self, diffs):
- """Compute the Levenshtein distance; the number of inserted, deleted or
- substituted characters.
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Number of changes.
- """
- levenshtein = 0
- insertions = 0
- deletions = 0
- for op, data in diffs:
- if op == self.DIFF_INSERT:
- insertions += len(data)
- elif op == self.DIFF_DELETE:
- deletions += len(data)
- elif op == self.DIFF_EQUAL:
- # A deletion and an insertion is one substitution.
- levenshtein += max(insertions, deletions)
- insertions = 0
- deletions = 0
- levenshtein += max(insertions, deletions)
- return levenshtein
-
- def diff_toDelta(self, diffs):
- """Crush the diff into an encoded string which describes the operations
- required to transform text1 into text2.
- E.g. =3\t-2\t+ing -> Keep 3 chars, delete 2 chars, insert 'ing'.
- Operations are tab-separated. Inserted text is escaped using %xx notation.
-
- Args:
- diffs: Array of diff tuples.
-
- Returns:
- Delta text.
- """
- text = []
- for op, data in diffs:
- if op == self.DIFF_INSERT:
- # High ascii will raise UnicodeDecodeError. Use Unicode instead.
- data = data.encode("utf-8")
- text.append("+" + urllib.parse.quote(data, "!~*'();/?:@&=+$,# "))
- elif op == self.DIFF_DELETE:
- text.append("-%d" % len(data))
- elif op == self.DIFF_EQUAL:
- text.append("=%d" % len(data))
- return "\t".join(text)
-
- def diff_fromDelta(self, text1, delta):
- """Given the original text1, and an encoded string which describes the
- operations required to transform text1 into text2, compute the full diff.
-
- Args:
- text1: Source string for the diff.
- delta: Delta text.
-
- Returns:
- Array of diff tuples.
-
- Raises:
- ValueError: If invalid input.
- """
- diffs = []
- pointer = 0 # Cursor in text1
- tokens = delta.split("\t")
- for token in tokens:
- if token == "":
- # Blank tokens are ok (from a trailing \t).
- continue
- # Each token begins with a one character parameter which specifies the
- # operation of this token (delete, insert, equality).
- param = token[1:]
- if token[0] == "+":
- param = urllib.parse.unquote(param)
- diffs.append((self.DIFF_INSERT, param))
- elif token[0] == "-" or token[0] == "=":
- try:
- n = int(param)
- except ValueError:
- raise ValueError("Invalid number in diff_fromDelta: " + param)
- if n < 0:
- raise ValueError("Negative number in diff_fromDelta: " + param)
- text = text1[pointer : pointer + n]
- pointer += n
- if token[0] == "=":
- diffs.append((self.DIFF_EQUAL, text))
- else:
- diffs.append((self.DIFF_DELETE, text))
- else:
- # Anything else is an error.
- raise ValueError(
- "Invalid diff operation in diff_fromDelta: " + token[0]
- )
- if pointer != len(text1):
- raise ValueError(
- "Delta length (%d) does not equal source text length (%d)."
- % (pointer, len(text1))
- )
- return diffs
-
- # MATCH FUNCTIONS
-
- def match_main(self, text, pattern, loc):
- """Locate the best instance of 'pattern' in 'text' near 'loc'.
-
- Args:
- text: The text to search.
- pattern: The pattern to search for.
- loc: The location to search around.
-
- Returns:
- Best match index or -1.
- """
- # Check for null inputs.
- if text == None or pattern == None:
- raise ValueError("Null inputs. (match_main)")
-
- loc = max(0, min(loc, len(text)))
- if text == pattern:
- # Shortcut (potentially not guaranteed by the algorithm)
- return 0
- elif not text:
- # Nothing to match.
- return -1
- elif text[loc : loc + len(pattern)] == pattern:
- # Perfect match at the perfect spot! (Includes case of null pattern)
- return loc
- else:
- # Do a fuzzy compare.
- match = self.match_bitap(text, pattern, loc)
- return match
-
- def match_bitap(self, text, pattern, loc):
- """Locate the best instance of 'pattern' in 'text' near 'loc' using the
- Bitap algorithm.
-
- Args:
- text: The text to search.
- pattern: The pattern to search for.
- loc: The location to search around.
-
- Returns:
- Best match index or -1.
- """
- # Python doesn't have a maxint limit, so ignore this check.
- # if self.Match_MaxBits != 0 and len(pattern) > self.Match_MaxBits:
- # raise ValueError("Pattern too long for this application.")
-
- # Initialise the alphabet.
- s = self.match_alphabet(pattern)
-
- def match_bitapScore(e, x):
- """Compute and return the score for a match with e errors and x location.
- Accesses loc and pattern through being a closure.
-
- Args:
- e: Number of errors in match.
- x: Location of match.
-
- Returns:
- Overall score for match (0.0 = good, 1.0 = bad).
- """
- accuracy = float(e) / len(pattern)
- proximity = abs(loc - x)
- if not self.Match_Distance:
- # Dodge divide by zero error.
- return proximity and 1.0 or accuracy
- return accuracy + (proximity / float(self.Match_Distance))
-
- # Highest score beyond which we give up.
- score_threshold = self.Match_Threshold
- # Is there a nearby exact match? (speedup)
- best_loc = text.find(pattern, loc)
- if best_loc != -1:
- score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
- # What about in the other direction? (speedup)
- best_loc = text.rfind(pattern, loc + len(pattern))
- if best_loc != -1:
- score_threshold = min(match_bitapScore(0, best_loc), score_threshold)
-
- # Initialise the bit arrays.
- matchmask = 1 << (len(pattern) - 1)
- best_loc = -1
-
- bin_max = len(pattern) + len(text)
- # Empty initialization added to appease pychecker.
- last_rd = None
- for d in range(len(pattern)):
- # Scan for the best match each iteration allows for one more error.
- # Run a binary search to determine how far from 'loc' we can stray at
- # this error level.
- bin_min = 0
- bin_mid = bin_max
- while bin_min < bin_mid:
- if match_bitapScore(d, loc + bin_mid) <= score_threshold:
- bin_min = bin_mid
- else:
- bin_max = bin_mid
- bin_mid = (bin_max - bin_min) // 2 + bin_min
-
- # Use the result from this iteration as the maximum for the next.
- bin_max = bin_mid
- start = max(1, loc - bin_mid + 1)
- finish = min(loc + bin_mid, len(text)) + len(pattern)
-
- rd = [0] * (finish + 2)
- rd[finish + 1] = (1 << d) - 1
- for j in range(finish, start - 1, -1):
- if len(text) <= j - 1:
- # Out of range.
- charMatch = 0
- else:
- charMatch = s.get(text[j - 1], 0)
- if d == 0: # First pass: exact match.
- rd[j] = ((rd[j + 1] << 1) | 1) & charMatch
- else: # Subsequent passes: fuzzy match.
- rd[j] = (
- (((rd[j + 1] << 1) | 1) & charMatch)
- | (((last_rd[j + 1] | last_rd[j]) << 1) | 1)
- | last_rd[j + 1]
- )
- if rd[j] & matchmask:
- score = match_bitapScore(d, j - 1)
- # This match will almost certainly be better than any existing match.
- # But check anyway.
- if score <= score_threshold:
- # Told you so.
- score_threshold = score
- best_loc = j - 1
- if best_loc > loc:
- # When passing loc, don't exceed our current distance from loc.
- start = max(1, 2 * loc - best_loc)
- else:
- # Already passed loc, downhill from here on in.
- break
- # No hope for a (better) match at greater error levels.
- if match_bitapScore(d + 1, loc) > score_threshold:
- break
- last_rd = rd
- return best_loc
-
- def match_alphabet(self, pattern):
- """Initialise the alphabet for the Bitap algorithm.
-
- Args:
- pattern: The text to encode.
-
- Returns:
- Hash of character locations.
- """
- s = {}
- for char in pattern:
- s[char] = 0
- for i in range(len(pattern)):
- s[pattern[i]] |= 1 << (len(pattern) - i - 1)
- return s
-
- # PATCH FUNCTIONS
-
- def patch_addContext(self, patch, text):
- """Increase the context until it is unique,
- but don't let the pattern expand beyond Match_MaxBits.
-
- Args:
- patch: The patch to grow.
- text: Source text.
- """
- if len(text) == 0:
- return
- pattern = text[patch.start2 : patch.start2 + patch.length1]
- padding = 0
-
- # Look for the first and last matches of pattern in text. If two different
- # matches are found, increase the pattern length.
- while text.find(pattern) != text.rfind(pattern) and (
- self.Match_MaxBits == 0
- or len(pattern) < self.Match_MaxBits - self.Patch_Margin - self.Patch_Margin
- ):
- padding += self.Patch_Margin
- pattern = text[
- max(0, patch.start2 - padding) : patch.start2 + patch.length1 + padding
- ]
- # Add one chunk for good luck.
- padding += self.Patch_Margin
-
- # Add the prefix.
- prefix = text[max(0, patch.start2 - padding) : patch.start2]
- if prefix:
- patch.diffs[:0] = [(self.DIFF_EQUAL, prefix)]
- # Add the suffix.
- suffix = text[
- patch.start2 + patch.length1 : patch.start2 + patch.length1 + padding
- ]
- if suffix:
- patch.diffs.append((self.DIFF_EQUAL, suffix))
-
- # Roll back the start points.
- patch.start1 -= len(prefix)
- patch.start2 -= len(prefix)
- # Extend lengths.
- patch.length1 += len(prefix) + len(suffix)
- patch.length2 += len(prefix) + len(suffix)
-
- def patch_make(self, a, b=None, c=None):
- """Compute a list of patches to turn text1 into text2.
- Use diffs if provided, otherwise compute it ourselves.
- There are four ways to call this function, depending on what data is
- available to the caller:
- Method 1:
- a = text1, b = text2
- Method 2:
- a = diffs
- Method 3 (optimal):
- a = text1, b = diffs
- Method 4 (deprecated, use method 3):
- a = text1, b = text2, c = diffs
-
- Args:
- a: text1 (methods 1,3,4) or Array of diff tuples for text1 to
- text2 (method 2).
- b: text2 (methods 1,4) or Array of diff tuples for text1 to
- text2 (method 3) or undefined (method 2).
- c: Array of diff tuples for text1 to text2 (method 4) or
- undefined (methods 1,2,3).
-
- Returns:
- Array of Patch objects.
- """
- text1 = None
- diffs = None
- if isinstance(a, str) and isinstance(b, str) and c is None:
- # Method 1: text1, text2
- # Compute diffs from text1 and text2.
- text1 = a
- diffs = self.diff_main(text1, b, True)
- if len(diffs) > 2:
- self.diff_cleanupSemantic(diffs)
- self.diff_cleanupEfficiency(diffs)
- elif isinstance(a, list) and b is None and c is None:
- # Method 2: diffs
- # Compute text1 from diffs.
- diffs = a
- text1 = self.diff_text1(diffs)
- elif isinstance(a, str) and isinstance(b, list) and c is None:
- # Method 3: text1, diffs
- text1 = a
- diffs = b
- elif isinstance(a, str) and isinstance(b, str) and isinstance(c, list):
- # Method 4: text1, text2, diffs
- # text2 is not used.
- text1 = a
- diffs = c
- else:
- raise ValueError("Unknown call format to patch_make.")
-
- if not diffs:
- return [] # Get rid of the None case.
- patches = []
- patch = patch_obj()
- char_count1 = 0 # Number of characters into the text1 string.
- char_count2 = 0 # Number of characters into the text2 string.
- prepatch_text = text1 # Recreate the patches to determine context info.
- postpatch_text = text1
- for x in range(len(diffs)):
- (diff_type, diff_text) = diffs[x]
- if len(patch.diffs) == 0 and diff_type != self.DIFF_EQUAL:
- # A new patch starts here.
- patch.start1 = char_count1
- patch.start2 = char_count2
- if diff_type == self.DIFF_INSERT:
- # Insertion
- patch.diffs.append(diffs[x])
- patch.length2 += len(diff_text)
- postpatch_text = (
- postpatch_text[:char_count2]
- + diff_text
- + postpatch_text[char_count2:]
- )
- elif diff_type == self.DIFF_DELETE:
- # Deletion.
- patch.length1 += len(diff_text)
- patch.diffs.append(diffs[x])
- postpatch_text = (
- postpatch_text[:char_count2]
- + postpatch_text[char_count2 + len(diff_text) :]
- )
- elif (
- diff_type == self.DIFF_EQUAL
- and len(diff_text) <= 2 * self.Patch_Margin
- and len(patch.diffs) != 0
- and len(diffs) != x + 1
- ):
- # Small equality inside a patch.
- patch.diffs.append(diffs[x])
- patch.length1 += len(diff_text)
- patch.length2 += len(diff_text)
-
- if diff_type == self.DIFF_EQUAL and len(diff_text) >= 2 * self.Patch_Margin:
- # Time for a new patch.
- if len(patch.diffs) != 0:
- self.patch_addContext(patch, prepatch_text)
- patches.append(patch)
- patch = patch_obj()
- # Unlike Unidiff, our patch lists have a rolling context.
- # https://github.com/google/diff-match-patch/wiki/Unidiff
- # Update prepatch text & pos to reflect the application of the
- # just completed patch.
- prepatch_text = postpatch_text
- char_count1 = char_count2
-
- # Update the current character count.
- if diff_type != self.DIFF_INSERT:
- char_count1 += len(diff_text)
- if diff_type != self.DIFF_DELETE:
- char_count2 += len(diff_text)
-
- # Pick up the leftover patch if not empty.
- if len(patch.diffs) != 0:
- self.patch_addContext(patch, prepatch_text)
- patches.append(patch)
- return patches
-
- def patch_deepCopy(self, patches):
- """Given an array of patches, return another array that is identical.
-
- Args:
- patches: Array of Patch objects.
-
- Returns:
- Array of Patch objects.
- """
- patchesCopy = []
- for patch in patches:
- patchCopy = patch_obj()
- # No need to deep copy the tuples since they are immutable.
- patchCopy.diffs = patch.diffs[:]
- patchCopy.start1 = patch.start1
- patchCopy.start2 = patch.start2
- patchCopy.length1 = patch.length1
- patchCopy.length2 = patch.length2
- patchesCopy.append(patchCopy)
- return patchesCopy
-
- def patch_apply(self, patches, text):
- """Merge a set of patches onto the text. Return a patched text, as well
- as a list of true/false values indicating which patches were applied.
-
- Args:
- patches: Array of Patch objects.
- text: Old text.
-
- Returns:
- Two element Array, containing the new text and an array of boolean values.
- """
- if not patches:
- return (text, [])
-
- # Deep copy the patches so that no changes are made to originals.
- patches = self.patch_deepCopy(patches)
-
- nullPadding = self.patch_addPadding(patches)
- text = nullPadding + text + nullPadding
- self.patch_splitMax(patches)
-
- # delta keeps track of the offset between the expected and actual location
- # of the previous patch. If there are patches expected at positions 10 and
- # 20, but the first patch was found at 12, delta is 2 and the second patch
- # has an effective expected position of 22.
- delta = 0
- results = []
- for patch in patches:
- expected_loc = patch.start2 + delta
- text1 = self.diff_text1(patch.diffs)
- end_loc = -1
- if len(text1) > self.Match_MaxBits:
- # patch_splitMax will only provide an oversized pattern in the case of
- # a monster delete.
- start_loc = self.match_main(
- text, text1[: self.Match_MaxBits], expected_loc
- )
- if start_loc != -1:
- end_loc = self.match_main(
- text,
- text1[-self.Match_MaxBits :],
- expected_loc + len(text1) - self.Match_MaxBits,
- )
- if end_loc == -1 or start_loc >= end_loc:
- # Can't find valid trailing context. Drop this patch.
- start_loc = -1
- else:
- start_loc = self.match_main(text, text1, expected_loc)
- if start_loc == -1:
- # No match found. :(
- results.append(False)
- # Subtract the delta for this failed patch from subsequent patches.
- delta -= patch.length2 - patch.length1
- else:
- # Found a match. :)
- results.append(True)
- delta = start_loc - expected_loc
- if end_loc == -1:
- text2 = text[start_loc : start_loc + len(text1)]
- else:
- text2 = text[start_loc : end_loc + self.Match_MaxBits]
- if text1 == text2:
- # Perfect match, just shove the replacement text in.
- text = (
- text[:start_loc]
- + self.diff_text2(patch.diffs)
- + text[start_loc + len(text1) :]
- )
- else:
- # Imperfect match.
- # Run a diff to get a framework of equivalent indices.
- diffs = self.diff_main(text1, text2, False)
- if (
- len(text1) > self.Match_MaxBits
- and self.diff_levenshtein(diffs) / float(len(text1))
- > self.Patch_DeleteThreshold
- ):
- # The end points match, but the content is unacceptably bad.
- results[-1] = False
- else:
- self.diff_cleanupSemanticLossless(diffs)
- index1 = 0
- for op, data in patch.diffs:
- if op != self.DIFF_EQUAL:
- index2 = self.diff_xIndex(diffs, index1)
- if op == self.DIFF_INSERT: # Insertion
- text = (
- text[: start_loc + index2]
- + data
- + text[start_loc + index2 :]
- )
- elif op == self.DIFF_DELETE: # Deletion
- text = (
- text[: start_loc + index2]
- + text[
- start_loc
- + self.diff_xIndex(diffs, index1 + len(data)) :
- ]
- )
- if op != self.DIFF_DELETE:
- index1 += len(data)
- # Strip the padding off.
- text = text[len(nullPadding) : -len(nullPadding)]
- return (text, results)
-
- def patch_addPadding(self, patches):
- """Add some padding on text start and end so that edges can match
- something. Intended to be called only from within patch_apply.
-
- Args:
- patches: Array of Patch objects.
-
- Returns:
- The padding string added to each side.
- """
- paddingLength = self.Patch_Margin
- nullPadding = ""
- for x in range(1, paddingLength + 1):
- nullPadding += chr(x)
-
- # Bump all the patches forward.
- for patch in patches:
- patch.start1 += paddingLength
- patch.start2 += paddingLength
-
- # Add some padding on start of first diff.
- patch = patches[0]
- diffs = patch.diffs
- if not diffs or diffs[0][0] != self.DIFF_EQUAL:
- # Add nullPadding equality.
- diffs.insert(0, (self.DIFF_EQUAL, nullPadding))
- patch.start1 -= paddingLength # Should be 0.
- patch.start2 -= paddingLength # Should be 0.
- patch.length1 += paddingLength
- patch.length2 += paddingLength
- elif paddingLength > len(diffs[0][1]):
- # Grow first equality.
- extraLength = paddingLength - len(diffs[0][1])
- newText = nullPadding[len(diffs[0][1]) :] + diffs[0][1]
- diffs[0] = (diffs[0][0], newText)
- patch.start1 -= extraLength
- patch.start2 -= extraLength
- patch.length1 += extraLength
- patch.length2 += extraLength
-
- # Add some padding on end of last diff.
- patch = patches[-1]
- diffs = patch.diffs
- if not diffs or diffs[-1][0] != self.DIFF_EQUAL:
- # Add nullPadding equality.
- diffs.append((self.DIFF_EQUAL, nullPadding))
- patch.length1 += paddingLength
- patch.length2 += paddingLength
- elif paddingLength > len(diffs[-1][1]):
- # Grow last equality.
- extraLength = paddingLength - len(diffs[-1][1])
- newText = diffs[-1][1] + nullPadding[:extraLength]
- diffs[-1] = (diffs[-1][0], newText)
- patch.length1 += extraLength
- patch.length2 += extraLength
-
- return nullPadding
-
- def patch_splitMax(self, patches):
- """Look through the patches and break up any which are longer than the
- maximum limit of the match algorithm.
- Intended to be called only from within patch_apply.
-
- Args:
- patches: Array of Patch objects.
- """
- patch_size = self.Match_MaxBits
- if patch_size == 0:
- # Python has the option of not splitting strings due to its ability
- # to handle integers of arbitrary precision.
- return
- for x in range(len(patches)):
- if patches[x].length1 <= patch_size:
- continue
- bigpatch = patches[x]
- # Remove the big old patch.
- del patches[x]
- x -= 1
- start1 = bigpatch.start1
- start2 = bigpatch.start2
- precontext = ""
- while len(bigpatch.diffs) != 0:
- # Create one of several smaller patches.
- patch = patch_obj()
- empty = True
- patch.start1 = start1 - len(precontext)
- patch.start2 = start2 - len(precontext)
- if precontext:
- patch.length1 = patch.length2 = len(precontext)
- patch.diffs.append((self.DIFF_EQUAL, precontext))
-
- while (
- len(bigpatch.diffs) != 0
- and patch.length1 < patch_size - self.Patch_Margin
- ):
- (diff_type, diff_text) = bigpatch.diffs[0]
- if diff_type == self.DIFF_INSERT:
- # Insertions are harmless.
- patch.length2 += len(diff_text)
- start2 += len(diff_text)
- patch.diffs.append(bigpatch.diffs.pop(0))
- empty = False
- elif (
- diff_type == self.DIFF_DELETE
- and len(patch.diffs) == 1
- and patch.diffs[0][0] == self.DIFF_EQUAL
- and len(diff_text) > 2 * patch_size
- ):
- # This is a large deletion. Let it pass in one chunk.
- patch.length1 += len(diff_text)
- start1 += len(diff_text)
- empty = False
- patch.diffs.append((diff_type, diff_text))
- del bigpatch.diffs[0]
- else:
- # Deletion or equality. Only take as much as we can stomach.
- diff_text = diff_text[
- : patch_size - patch.length1 - self.Patch_Margin
- ]
- patch.length1 += len(diff_text)
- start1 += len(diff_text)
- if diff_type == self.DIFF_EQUAL:
- patch.length2 += len(diff_text)
- start2 += len(diff_text)
- else:
- empty = False
-
- patch.diffs.append((diff_type, diff_text))
- if diff_text == bigpatch.diffs[0][1]:
- del bigpatch.diffs[0]
- else:
- bigpatch.diffs[0] = (
- bigpatch.diffs[0][0],
- bigpatch.diffs[0][1][len(diff_text) :],
- )
-
- # Compute the head context for the next patch.
- precontext = self.diff_text2(patch.diffs)
- precontext = precontext[-self.Patch_Margin :]
- # Append the end context for this patch.
- postcontext = self.diff_text1(bigpatch.diffs)[: self.Patch_Margin]
- if postcontext:
- patch.length1 += len(postcontext)
- patch.length2 += len(postcontext)
- if len(patch.diffs) != 0 and patch.diffs[-1][0] == self.DIFF_EQUAL:
- patch.diffs[-1] = (
- self.DIFF_EQUAL,
- patch.diffs[-1][1] + postcontext,
- )
- else:
- patch.diffs.append((self.DIFF_EQUAL, postcontext))
-
- if not empty:
- x += 1
- patches.insert(x, patch)
-
- def patch_toText(self, patches):
- """Take a list of patches and return a textual representation.
-
- Args:
- patches: Array of Patch objects.
-
- Returns:
- Text representation of patches.
- """
- text = []
- for patch in patches:
- text.append(str(patch))
- return "".join(text)
-
- def patch_fromText(self, textline):
- """Parse a textual representation of patches and return a list of patch
- objects.
-
- Args:
- textline: Text representation of patches.
-
- Returns:
- Array of Patch objects.
-
- Raises:
- ValueError: If invalid input.
- """
- patches = []
- if not textline:
- return patches
- text = textline.split("\n")
- while len(text) != 0:
- m = re.match(r"^@@ -(\d+),?(\d*) \+(\d+),?(\d*) @@$", text[0])
- if not m:
- raise ValueError("Invalid patch string: " + text[0])
- patch = patch_obj()
- patches.append(patch)
- patch.start1 = int(m.group(1))
- if m.group(2) == "":
- patch.start1 -= 1
- patch.length1 = 1
- elif m.group(2) == "0":
- patch.length1 = 0
- else:
- patch.start1 -= 1
- patch.length1 = int(m.group(2))
-
- patch.start2 = int(m.group(3))
- if m.group(4) == "":
- patch.start2 -= 1
- patch.length2 = 1
- elif m.group(4) == "0":
- patch.length2 = 0
- else:
- patch.start2 -= 1
- patch.length2 = int(m.group(4))
-
- del text[0]
-
- while len(text) != 0:
- if text[0]:
- sign = text[0][0]
- else:
- sign = ""
- line = urllib.parse.unquote(text[0][1:])
- if sign == "+":
- # Insertion.
- patch.diffs.append((self.DIFF_INSERT, line))
- elif sign == "-":
- # Deletion.
- patch.diffs.append((self.DIFF_DELETE, line))
- elif sign == " ":
- # Minor equality.
- patch.diffs.append((self.DIFF_EQUAL, line))
- elif sign == "@":
- # Start of next patch.
- break
- elif sign == "":
- # Blank line? Whatever.
- pass
- else:
- # WTF?
- raise ValueError("Invalid patch mode: '%s'\n%s" % (sign, line))
- del text[0]
- return patches
-
-
-class patch_obj:
- """Class representing one patch operation."""
-
- def __init__(self):
- """Initializes with an empty list of diffs."""
- self.diffs = []
- self.start1 = None
- self.start2 = None
- self.length1 = 0
- self.length2 = 0
-
- def __str__(self):
- """Emulate GNU diff's format.
- Header: @@ -382,8 +481,9 @@
- Indices are printed as 1-based, not 0-based.
-
- Returns:
- The GNU diff string.
- """
- if self.length1 == 0:
- coords1 = str(self.start1) + ",0"
- elif self.length1 == 1:
- coords1 = str(self.start1 + 1)
- else:
- coords1 = str(self.start1 + 1) + "," + str(self.length1)
- if self.length2 == 0:
- coords2 = str(self.start2) + ",0"
- elif self.length2 == 1:
- coords2 = str(self.start2 + 1)
- else:
- coords2 = str(self.start2 + 1) + "," + str(self.length2)
- text = ["@@ -", coords1, " +", coords2, " @@\n"]
- # Escape the body of the patch with %xx notation.
- for op, data in self.diffs:
- if op == diff_match_patch.DIFF_INSERT:
- text.append("+")
- elif op == diff_match_patch.DIFF_DELETE:
- text.append("-")
- elif op == diff_match_patch.DIFF_EQUAL:
- text.append(" ")
- # High ascii will raise UnicodeDecodeError. Use Unicode instead.
- data = data.encode("utf-8")
- text.append(urllib.parse.quote(data, "!~*'();/?:@&=+$,# ") + "\n")
- return "".join(text)
diff --git a/contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest1.txt b/contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest1.txt
deleted file mode 100644
index 54b438fd79..0000000000
--- a/contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest1.txt
+++ /dev/null
@@ -1,230 +0,0 @@
-This is a '''list of newspapers published by [[Journal Register Company]]'''.
-
-The company owns daily and weekly newspapers, other print media properties and newspaper-affiliated local Websites in the [[U.S.]] states of [[Connecticut]], [[Michigan]], [[New York]], [[Ohio]] and [[Pennsylvania]], organized in six geographic "clusters":<ref>[http://www.journalregister.com/newspapers.html Journal Register Company: Our Newspapers], accessed February 10, 2008.</ref>
-
-== Capital-Saratoga ==
-Three dailies, associated weeklies and [[pennysaver]]s in greater [[Albany, New York]]; also [http://www.capitalcentral.com capitalcentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com].
-
-* ''The Oneida Daily Dispatch'' {{WS|oneidadispatch.com}} of [[Oneida, New York]]
-* ''[[The Record (Troy)|The Record]]'' {{WS|troyrecord.com}} of [[Troy, New York]]
-* ''[[The Saratogian]]'' {{WS|saratogian.com}} of [[Saratoga Springs, New York]]
-* Weeklies:
-** ''Community News'' {{WS|cnweekly.com}} weekly of [[Clifton Park, New York]]
-** ''Rome Observer'' of [[Rome, New York]]
-** ''Life & Times of Utica'' of [[Utica, New York]]
-
-== Connecticut ==
-Five dailies, associated weeklies and [[pennysaver]]s in the state of [[Connecticut]]; also [http://www.ctcentral.com CTcentral.com], [http://www.ctcarsandtrucks.com CTCarsAndTrucks.com] and [http://www.jobsinct.com JobsInCT.com].
-
-* ''The Middletown Press'' {{WS|middletownpress.com}} of [[Middletown, Connecticut|Middletown]]
-* ''[[New Haven Register]]'' {{WS|newhavenregister.com}} of [[New Haven, Connecticut|New Haven]]
-* ''The Register Citizen'' {{WS|registercitizen.com}} of [[Torrington, Connecticut|Torrington]]
-
-* [[New Haven Register#Competitors|Elm City Newspapers]] {{WS|ctcentral.com}}
-** ''The Advertiser'' of [[East Haven, Connecticut|East Haven]]
-** ''Hamden Chronicle'' of [[Hamden, Connecticut|Hamden]]
-** ''Milford Weekly'' of [[Milford, Connecticut|Milford]]
-** ''The Orange Bulletin'' of [[Orange, Connecticut|Orange]]
-** ''The Post'' of [[North Haven, Connecticut|North Haven]]
-** ''Shelton Weekly'' of [[Shelton, Connecticut|Shelton]]
-** ''The Stratford Bard'' of [[Stratford, Connecticut|Stratford]]
-** ''Wallingford Voice'' of [[Wallingford, Connecticut|Wallingford]]
-** ''West Haven News'' of [[West Haven, Connecticut|West Haven]]
-* Housatonic Publications
-** ''The New Milford Times'' {{WS|newmilfordtimes.com}} of [[New Milford, Connecticut|New Milford]]
-** ''The Brookfield Journal'' of [[Brookfield, Connecticut|Brookfield]]
-** ''The Kent Good Times Dispatch'' of [[Kent, Connecticut|Kent]]
-** ''The Bethel Beacon'' of [[Bethel, Connecticut|Bethel]]
-** ''The Litchfield Enquirer'' of [[Litchfield, Connecticut|Litchfield]]
-** ''Litchfield County Times'' of [[Litchfield, Connecticut|Litchfield]]
-* Imprint Newspapers {{WS|imprintnewspapers.com}}
-** ''West Hartford News'' of [[West Hartford, Connecticut|West Hartford]]
-** ''Windsor Journal'' of [[Windsor, Connecticut|Windsor]]
-** ''Windsor Locks Journal'' of [[Windsor Locks, Connecticut|Windsor Locks]]
-** ''Avon Post'' of [[Avon, Connecticut|Avon]]
-** ''Farmington Post'' of [[Farmington, Connecticut|Farmington]]
-** ''Simsbury Post'' of [[Simsbury, Connecticut|Simsbury]]
-** ''Tri-Town Post'' of [[Burlington, Connecticut|Burlington]], [[Canton, Connecticut|Canton]] and [[Harwinton, Connecticut|Harwinton]]
-* Minuteman Publications
-** ''[[Fairfield Minuteman]]'' of [[Fairfield, Connecticut|Fairfield]]
-** ''The Westport Minuteman'' {{WS|westportminuteman.com}} of [[Westport, Connecticut|Westport]]
-* Shoreline Newspapers weeklies:
-** ''Branford Review'' of [[Branford, Connecticut|Branford]]
-** ''Clinton Recorder'' of [[Clinton, Connecticut|Clinton]]
-** ''The Dolphin'' of [[Naval Submarine Base New London]] in [[New London, Connecticut|New London]]
-** ''Main Street News'' {{WS|ctmainstreetnews.com}} of [[Essex, Connecticut|Essex]]
-** ''Pictorial Gazette'' of [[Old Saybrook, Connecticut|Old Saybrook]]
-** ''Regional Express'' of [[Colchester, Connecticut|Colchester]]
-** ''Regional Standard'' of [[Colchester, Connecticut|Colchester]]
-** ''Shoreline Times'' {{WS|shorelinetimes.com}} of [[Guilford, Connecticut|Guilford]]
-** ''Shore View East'' of [[Madison, Connecticut|Madison]]
-** ''Shore View West'' of [[Guilford, Connecticut|Guilford]]
-* Other weeklies:
-** ''Registro'' {{WS|registroct.com}} of [[New Haven, Connecticut|New Haven]]
-** ''Thomaston Express'' {{WS|thomastownexpress.com}} of [[Thomaston, Connecticut|Thomaston]]
-** ''Foothills Traders'' {{WS|foothillstrader.com}} of Torrington, Bristol, Canton
-
-== Michigan ==
-Four dailies, associated weeklies and [[pennysaver]]s in the state of [[Michigan]]; also [http://www.micentralhomes.com MIcentralhomes.com] and [http://www.micentralautos.com MIcentralautos.com]
-* ''[[Oakland Press]]'' {{WS|theoaklandpress.com}} of [[Oakland, Michigan|Oakland]]
-* ''Daily Tribune'' {{WS|dailytribune.com}} of [[Royal Oak, Michigan|Royal Oak]]
-* ''Macomb Daily'' {{WS|macombdaily.com}} of [[Mt. Clemens, Michigan|Mt. Clemens]]
-* ''[[Morning Sun]]'' {{WS|themorningsun.com}} of [[Mount Pleasant, Michigan|Mount Pleasant]]
-* Heritage Newspapers {{WS|heritage.com}}
-** ''Belleville View''
-** ''Ile Camera''
-** ''Monroe Guardian''
-** ''Ypsilanti Courier''
-** ''News-Herald''
-** ''Press & Guide''
-** ''Chelsea Standard & Dexter Leader''
-** ''Manchester Enterprise''
-** ''Milan News-Leader''
-** ''Saline Reporter''
-* Independent Newspapers {{WS|sourcenewspapers.com}}
-** ''Advisor''
-** ''Source''
-* Morning Star {{WS|morningstarpublishing.com}}
-** ''Alma Reminder''
-** ''Alpena Star''
-** ''Antrim County News''
-** ''Carson City Reminder''
-** ''The Leader & Kalkaskian''
-** ''Ogemaw/Oscoda County Star''
-** ''Petoskey/Charlevoix Star''
-** ''Presque Isle Star''
-** ''Preview Community Weekly''
-** ''Roscommon County Star''
-** ''St. Johns Reminder''
-** ''Straits Area Star''
-** ''The (Edmore) Advertiser''
-* Voice Newspapers {{WS|voicenews.com}}
-** ''Armada Times''
-** ''Bay Voice''
-** ''Blue Water Voice''
-** ''Downriver Voice''
-** ''Macomb Township Voice''
-** ''North Macomb Voice''
-** ''Weekend Voice''
-** ''Suburban Lifestyles'' {{WS|suburbanlifestyles.com}}
-
-== Mid-Hudson ==
-One daily, associated magazines in the [[Hudson River Valley]] of [[New York]]; also [http://www.midhudsoncentral.com MidHudsonCentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com].
-
-* ''[[Daily Freeman]]'' {{WS|dailyfreeman.com}} of [[Kingston, New York]]
-
-== Ohio ==
-Two dailies, associated magazines and three shared Websites, all in the state of [[Ohio]]: [http://www.allaroundcleveland.com AllAroundCleveland.com], [http://www.allaroundclevelandcars.com AllAroundClevelandCars.com] and [http://www.allaroundclevelandjobs.com AllAroundClevelandJobs.com].
-
-* ''[[The News-Herald (Ohio)|The News-Herald]]'' {{WS|news-herald.com}} of [[Willoughby, Ohio|Willoughby]]
-* ''[[The Morning Journal]]'' {{WS|morningjournal.com}} of [[Lorain, Ohio|Lorain]]
-
-== Philadelphia area ==
-Seven dailies and associated weeklies and magazines in [[Pennsylvania]] and [[New Jersey]], and associated Websites: [http://www.allaroundphilly.com AllAroundPhilly.com], [http://www.jobsinnj.com JobsInNJ.com], [http://www.jobsinpa.com JobsInPA.com], and [http://www.phillycarsearch.com PhillyCarSearch.com].
-
-* ''The Daily Local'' {{WS|dailylocal.com}} of [[West Chester, Pennsylvania|West Chester]]
-* ''[[Delaware County Daily and Sunday Times]] {{WS|delcotimes.com}} of Primos
-* ''[[The Mercury (Pennsylvania)|The Mercury]]'' {{WS|pottstownmercury.com}} of [[Pottstown, Pennsylvania|Pottstown]]
-* ''The Phoenix'' {{WS|phoenixvillenews.com}} of [[Phoenixville, Pennsylvania|Phoenixville]]
-* ''[[The Reporter (Lansdale)|The Reporter]]'' {{WS|thereporteronline.com}} of [[Lansdale, Pennsylvania|Lansdale]]
-* ''The Times Herald'' {{WS|timesherald.com}} of [[Norristown, Pennsylvania|Norristown]]
-* ''[[The Trentonian]]'' {{WS|trentonian.com}} of [[Trenton, New Jersey]]
-
-* Weeklies
-** ''El Latino Expreso'' of [[Trenton, New Jersey]]
-** ''La Voz'' of [[Norristown, Pennsylvania]]
-** ''The Village News'' of [[Downingtown, Pennsylvania]]
-** ''The Times Record'' of [[Kennett Square, Pennsylvania]]
-** ''The Tri-County Record'' {{WS|tricountyrecord.com}} of [[Morgantown, Pennsylvania]]
-** ''News of Delaware County'' {{WS|newsofdelawarecounty.com}}of [[Havertown, Pennsylvania]]
-** ''Main Line Times'' {{WS|mainlinetimes.com}}of [[Ardmore, Pennsylvania]]
-** ''Penny Pincher'' of [[Pottstown, Pennsylvania]]
-** ''Town Talk'' {{WS|towntalknews.com}} of [[Ridley, Pennsylvania]]
-* Chesapeake Publishing {{WS|pa8newsgroup.com}}
-** ''Solanco Sun Ledger'' of [[Quarryville, Pennsylvania]]
-** ''Columbia Ledger'' of [[Columbia, Pennsylvania]]
-** ''Coatesville Ledger'' of [[Downingtown, Pennsylvania]]
-** ''Parkesburg Post Ledger'' of [[Quarryville, Pennsylvania]]
-** ''Downingtown Ledger'' of [[Downingtown, Pennsylvania]]
-** ''The Kennett Paper'' of [[Kennett Square, Pennsylvania]]
-** ''Avon Grove Sun'' of [[West Grove, Pennsylvania]]
-** ''Oxford Tribune'' of [[Oxford, Pennsylvania]]
-** ''Elizabethtown Chronicle'' of [[Elizabethtown, Pennsylvania]]
-** ''Donegal Ledger'' of [[Donegal, Pennsylvania]]
-** ''Chadds Ford Post'' of [[Chadds Ford, Pennsylvania]]
-** ''The Central Record'' of [[Medford, New Jersey]]
-** ''Maple Shade Progress'' of [[Maple Shade, New Jersey]]
-* Intercounty Newspapers {{WS|buckslocalnews.com}}
-** ''The Review'' of Roxborough, Pennsylvania
-** ''The Recorder'' of [[Conshohocken, Pennsylvania]]
-** ''The Leader'' of [[Mount Airy, Pennsylvania|Mount Airy]] and West Oak Lake, Pennsylvania
-** ''The Pennington Post'' of [[Pennington, New Jersey]]
-** ''The Bristol Pilot'' of [[Bristol, Pennsylvania]]
-** ''Yardley News'' of [[Yardley, Pennsylvania]]
-** ''New Hope Gazette'' of [[New Hope, Pennsylvania]]
-** ''Doylestown Patriot'' of [[Doylestown, Pennsylvania]]
-** ''Newtown Advance'' of [[Newtown, Pennsylvania]]
-** ''The Plain Dealer'' of [[Williamstown, New Jersey]]
-** ''News Report'' of [[Sewell, New Jersey]]
-** ''Record Breeze'' of [[Berlin, New Jersey]]
-** ''Newsweekly'' of [[Moorestown, New Jersey]]
-** ''Haddon Herald'' of [[Haddonfield, New Jersey]]
-** ''New Egypt Press'' of [[New Egypt, New Jersey]]
-** ''Community News'' of [[Pemberton, New Jersey]]
-** ''Plymouth Meeting Journal'' of [[Plymouth Meeting, Pennsylvania]]
-** ''Lafayette Hill Journal'' of [[Lafayette Hill, Pennsylvania]]
-* Montgomery Newspapers {{WS|montgomerynews.com}}
-** ''Ambler Gazette'' of [[Ambler, Pennsylvania]]
-** ''Central Bucks Life'' of [[Bucks County, Pennsylvania]]
-** ''The Colonial'' of [[Plymouth Meeting, Pennsylvania]]
-** ''Glenside News'' of [[Glenside, Pennsylvania]]
-** ''The Globe'' of [[Lower Moreland Township, Pennsylvania]]
-** ''Main Line Life'' of [[Ardmore, Pennsylvania]]
-** ''Montgomery Life'' of [[Fort Washington, Pennsylvania]]
-** ''North Penn Life'' of [[Lansdale, Pennsylvania]]
-** ''Perkasie News Herald'' of [[Perkasie, Pennsylvania]]
-** ''Public Spirit'' of [[Hatboro, Pennsylvania]]
-** ''Souderton Independent'' of [[Souderton, Pennsylvania]]
-** ''Springfield Sun'' of [[Springfield, Pennsylvania]]
-** ''Spring-Ford Reporter'' of [[Royersford, Pennsylvania]]
-** ''Times Chronicle'' of [[Jenkintown, Pennsylvania]]
-** ''Valley Item'' of [[Perkiomenville, Pennsylvania]]
-** ''Willow Grove Guide'' of [[Willow Grove, Pennsylvania]]
-* News Gleaner Publications (closed December 2008) {{WS|newsgleaner.com}}
-** ''Life Newspapers'' of [[Philadelphia, Pennsylvania]]
-* Suburban Publications
-** ''The Suburban & Wayne Times'' {{WS|waynesuburban.com}} of [[Wayne, Pennsylvania]]
-** ''The Suburban Advertiser'' of [[Exton, Pennsylvania]]
-** ''The King of Prussia Courier'' of [[King of Prussia, Pennsylvania]]
-* Press Newspapers {{WS|countypressonline.com}}
-** ''County Press'' of [[Newtown Square, Pennsylvania]]
-** ''Garnet Valley Press'' of [[Glen Mills, Pennsylvania]]
-** ''Haverford Press'' of [[Newtown Square, Pennsylvania]] (closed January 2009)
-** ''Hometown Press'' of [[Glen Mills, Pennsylvania]] (closed January 2009)
-** ''Media Press'' of [[Newtown Square, Pennsylvania]] (closed January 2009)
-** ''Springfield Press'' of [[Springfield, Pennsylvania]]
-* Berks-Mont Newspapers {{WS|berksmontnews.com}}
-** ''The Boyertown Area Times'' of [[Boyertown, Pennsylvania]]
-** ''The Kutztown Area Patriot'' of [[Kutztown, Pennsylvania]]
-** ''The Hamburg Area Item'' of [[Hamburg, Pennsylvania]]
-** ''The Southern Berks News'' of [[Exeter Township, Berks County, Pennsylvania]]
-** ''The Free Press'' of [[Quakertown, Pennsylvania]]
-** ''The Saucon News'' of [[Quakertown, Pennsylvania]]
-** ''Westside Weekly'' of [[Reading, Pennsylvania]]
-
-* Magazines
-** ''Bucks Co. Town & Country Living''
-** ''Chester Co. Town & Country Living''
-** ''Montomgery Co. Town & Country Living''
-** ''Garden State Town & Country Living''
-** ''Montgomery Homes''
-** ''Philadelphia Golfer''
-** ''Parents Express''
-** ''Art Matters''
-
-{{JRC}}
-
-==References==
-<references />
-
-[[Category:Journal Register publications|*]]
diff --git a/contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest2.txt b/contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest2.txt
deleted file mode 100644
index 8f25a80fff..0000000000
--- a/contrib/python/diff-match-patch/py3/diff_match_patch/tests/speedtest2.txt
+++ /dev/null
@@ -1,188 +0,0 @@
-This is a '''list of newspapers published by [[Journal Register Company]]'''.
-
-The company owns daily and weekly newspapers, other print media properties and newspaper-affiliated local Websites in the [[U.S.]] states of [[Connecticut]], [[Michigan]], [[New York]], [[Ohio]], [[Pennsylvania]] and [[New Jersey]], organized in six geographic "clusters":<ref>[http://www.journalregister.com/publications.html Journal Register Company: Our Publications], accessed April 21, 2010.</ref>
-
-== Capital-Saratoga ==
-Three dailies, associated weeklies and [[pennysaver]]s in greater [[Albany, New York]]; also [http://www.capitalcentral.com capitalcentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com].
-
-* ''The Oneida Daily Dispatch'' {{WS|oneidadispatch.com}} of [[Oneida, New York]]
-* ''[[The Record (Troy)|The Record]]'' {{WS|troyrecord.com}} of [[Troy, New York]]
-* ''[[The Saratogian]]'' {{WS|saratogian.com}} of [[Saratoga Springs, New York]]
-* Weeklies:
-** ''Community News'' {{WS|cnweekly.com}} weekly of [[Clifton Park, New York]]
-** ''Rome Observer'' {{WS|romeobserver.com}} of [[Rome, New York]]
-** ''WG Life '' {{WS|saratogian.com/wglife/}} of [[Wilton, New York]]
-** ''Ballston Spa Life '' {{WS|saratogian.com/bspalife}} of [[Ballston Spa, New York]]
-** ''Greenbush Life'' {{WS|troyrecord.com/greenbush}} of [[Troy, New York]]
-** ''Latham Life'' {{WS|troyrecord.com/latham}} of [[Latham, New York]]
-** ''River Life'' {{WS|troyrecord.com/river}} of [[Troy, New York]]
-
-== Connecticut ==
-Three dailies, associated weeklies and [[pennysaver]]s in the state of [[Connecticut]]; also [http://www.ctcentral.com CTcentral.com], [http://www.ctcarsandtrucks.com CTCarsAndTrucks.com] and [http://www.jobsinct.com JobsInCT.com].
-
-* ''The Middletown Press'' {{WS|middletownpress.com}} of [[Middletown, Connecticut|Middletown]]
-* ''[[New Haven Register]]'' {{WS|newhavenregister.com}} of [[New Haven, Connecticut|New Haven]]
-* ''The Register Citizen'' {{WS|registercitizen.com}} of [[Torrington, Connecticut|Torrington]]
-
-* Housatonic Publications
-** ''The Housatonic Times'' {{WS|housatonictimes.com}} of [[New Milford, Connecticut|New Milford]]
-** ''Litchfield County Times'' {{WS|countytimes.com}} of [[Litchfield, Connecticut|Litchfield]]
-
-* Minuteman Publications
-** ''[[Fairfield Minuteman]]'' {{WS|fairfieldminuteman.com}}of [[Fairfield, Connecticut|Fairfield]]
-** ''The Westport Minuteman'' {{WS|westportminuteman.com}} of [[Westport, Connecticut|Westport]]
-
-* Shoreline Newspapers
-** ''The Dolphin'' {{WS|dolphin-news.com}} of [[Naval Submarine Base New London]] in [[New London, Connecticut|New London]]
-** ''Shoreline Times'' {{WS|shorelinetimes.com}} of [[Guilford, Connecticut|Guilford]]
-
-* Foothills Media Group {{WS|foothillsmediagroup.com}}
-** ''Thomaston Express'' {{WS|thomastonexpress.com}} of [[Thomaston, Connecticut|Thomaston]]
-** ''Good News About Torrington'' {{WS|goodnewsabouttorrington.com}} of [[Torrington, Connecticut|Torrington]]
-** ''Granby News'' {{WS|foothillsmediagroup.com/granby}} of [[Granby, Connecticut|Granby]]
-** ''Canton News'' {{WS|foothillsmediagroup.com/canton}} of [[Canton, Connecticut|Canton]]
-** ''Avon News'' {{WS|foothillsmediagroup.com/avon}} of [[Avon, Connecticut|Avon]]
-** ''Simsbury News'' {{WS|foothillsmediagroup.com/simsbury}} of [[Simsbury, Connecticut|Simsbury]]
-** ''Litchfield News'' {{WS|foothillsmediagroup.com/litchfield}} of [[Litchfield, Connecticut|Litchfield]]
-** ''Foothills Trader'' {{WS|foothillstrader.com}} of Torrington, Bristol, Canton
-
-* Other weeklies
-** ''The Milford-Orange Bulletin'' {{WS|ctbulletin.com}} of [[Orange, Connecticut|Orange]]
-** ''The Post-Chronicle'' {{WS|ctpostchronicle.com}} of [[North Haven, Connecticut|North Haven]]
-** ''West Hartford News'' {{WS|westhartfordnews.com}} of [[West Hartford, Connecticut|West Hartford]]
-
-* Magazines
-** ''The Connecticut Bride'' {{WS|connecticutmag.com}}
-** ''Connecticut Magazine'' {{WS|theconnecticutbride.com}}
-** ''Passport Magazine'' {{WS|passport-mag.com}}
-
-== Michigan ==
-Four dailies, associated weeklies and [[pennysaver]]s in the state of [[Michigan]]; also [http://www.micentralhomes.com MIcentralhomes.com] and [http://www.micentralautos.com MIcentralautos.com]
-* ''[[Oakland Press]]'' {{WS|theoaklandpress.com}} of [[Oakland, Michigan|Oakland]]
-* ''Daily Tribune'' {{WS|dailytribune.com}} of [[Royal Oak, Michigan|Royal Oak]]
-* ''Macomb Daily'' {{WS|macombdaily.com}} of [[Mt. Clemens, Michigan|Mt. Clemens]]
-* ''[[Morning Sun]]'' {{WS|themorningsun.com}} of [[Mount Pleasant, Michigan|Mount Pleasant]]
-
-* Heritage Newspapers {{WS|heritage.com}}
-** ''Belleville View'' {{WS|bellevilleview.com}}
-** ''Ile Camera'' {{WS|thenewsherald.com/ile_camera}}
-** ''Monroe Guardian'' {{WS|monreguardian.com}}
-** ''Ypsilanti Courier'' {{WS|ypsilanticourier.com}}
-** ''News-Herald'' {{WS|thenewsherald.com}}
-** ''Press & Guide'' {{WS|pressandguide.com}}
-** ''Chelsea Standard & Dexter Leader'' {{WS|chelseastandard.com}}
-** ''Manchester Enterprise'' {{WS|manchesterguardian.com}}
-** ''Milan News-Leader'' {{WS|milannews.com}}
-** ''Saline Reporter'' {{WS|salinereporter.com}}
-* Independent Newspapers
-** ''Advisor'' {{WS|sourcenewspapers.com}}
-** ''Source'' {{WS|sourcenewspapers.com}}
-* Morning Star {{WS|morningstarpublishing.com}}
-** ''The Leader & Kalkaskian'' {{WS|leaderandkalkaskian.com}}
-** ''Grand Traverse Insider'' {{WS|grandtraverseinsider.com}}
-** ''Alma Reminder''
-** ''Alpena Star''
-** ''Ogemaw/Oscoda County Star''
-** ''Presque Isle Star''
-** ''St. Johns Reminder''
-
-* Voice Newspapers {{WS|voicenews.com}}
-** ''Armada Times''
-** ''Bay Voice''
-** ''Blue Water Voice''
-** ''Downriver Voice''
-** ''Macomb Township Voice''
-** ''North Macomb Voice''
-** ''Weekend Voice''
-
-== Mid-Hudson ==
-One daily, associated magazines in the [[Hudson River Valley]] of [[New York]]; also [http://www.midhudsoncentral.com MidHudsonCentral.com] and [http://www.jobsinnewyork.com JobsInNewYork.com].
-
-* ''[[Daily Freeman]]'' {{WS|dailyfreeman.com}} of [[Kingston, New York]]
-* ''Las Noticias'' {{WS|lasnoticiasny.com}} of [[Kingston, New York]]
-
-== Ohio ==
-Two dailies, associated magazines and three shared Websites, all in the state of [[Ohio]]: [http://www.allaroundcleveland.com AllAroundCleveland.com], [http://www.allaroundclevelandcars.com AllAroundClevelandCars.com] and [http://www.allaroundclevelandjobs.com AllAroundClevelandJobs.com].
-
-* ''[[The News-Herald (Ohio)|The News-Herald]]'' {{WS|news-herald.com}} of [[Willoughby, Ohio|Willoughby]]
-* ''[[The Morning Journal]]'' {{WS|morningjournal.com}} of [[Lorain, Ohio|Lorain]]
-* ''El Latino Expreso'' {{WS|lorainlatino.com}} of [[Lorain, Ohio|Lorain]]
-
-== Philadelphia area ==
-Seven dailies and associated weeklies and magazines in [[Pennsylvania]] and [[New Jersey]], and associated Websites: [http://www.allaroundphilly.com AllAroundPhilly.com], [http://www.jobsinnj.com JobsInNJ.com], [http://www.jobsinpa.com JobsInPA.com], and [http://www.phillycarsearch.com PhillyCarSearch.com].
-
-* ''[[The Daily Local News]]'' {{WS|dailylocal.com}} of [[West Chester, Pennsylvania|West Chester]]
-* ''[[Delaware County Daily and Sunday Times]] {{WS|delcotimes.com}} of Primos [[Upper Darby Township, Pennsylvania]]
-* ''[[The Mercury (Pennsylvania)|The Mercury]]'' {{WS|pottstownmercury.com}} of [[Pottstown, Pennsylvania|Pottstown]]
-* ''[[The Reporter (Lansdale)|The Reporter]]'' {{WS|thereporteronline.com}} of [[Lansdale, Pennsylvania|Lansdale]]
-* ''The Times Herald'' {{WS|timesherald.com}} of [[Norristown, Pennsylvania|Norristown]]
-* ''[[The Trentonian]]'' {{WS|trentonian.com}} of [[Trenton, New Jersey]]
-
-* Weeklies
-* ''The Phoenix'' {{WS|phoenixvillenews.com}} of [[Phoenixville, Pennsylvania]]
-** ''El Latino Expreso'' {{WS|njexpreso.com}} of [[Trenton, New Jersey]]
-** ''La Voz'' {{WS|lavozpa.com}} of [[Norristown, Pennsylvania]]
-** ''The Tri County Record'' {{WS|tricountyrecord.com}} of [[Morgantown, Pennsylvania]]
-** ''Penny Pincher'' {{WS|pennypincherpa.com}}of [[Pottstown, Pennsylvania]]
-
-* Chesapeake Publishing {{WS|southernchestercountyweeklies.com}}
-** ''The Kennett Paper'' {{WS|kennettpaper.com}} of [[Kennett Square, Pennsylvania]]
-** ''Avon Grove Sun'' {{WS|avongrovesun.com}} of [[West Grove, Pennsylvania]]
-** ''The Central Record'' {{WS|medfordcentralrecord.com}} of [[Medford, New Jersey]]
-** ''Maple Shade Progress'' {{WS|mapleshadeprogress.com}} of [[Maple Shade, New Jersey]]
-
-* Intercounty Newspapers {{WS|buckslocalnews.com}} {{WS|southjerseylocalnews.com}}
-** ''The Pennington Post'' {{WS|penningtonpost.com}} of [[Pennington, New Jersey]]
-** ''The Bristol Pilot'' {{WS|bristolpilot.com}} of [[Bristol, Pennsylvania]]
-** ''Yardley News'' {{WS|yardleynews.com}} of [[Yardley, Pennsylvania]]
-** ''Advance of Bucks County'' {{WS|advanceofbucks.com}} of [[Newtown, Pennsylvania]]
-** ''Record Breeze'' {{WS|recordbreeze.com}} of [[Berlin, New Jersey]]
-** ''Community News'' {{WS|sjcommunitynews.com}} of [[Pemberton, New Jersey]]
-
-* Montgomery Newspapers {{WS|montgomerynews.com}}
-** ''Ambler Gazette'' {{WS|amblergazette.com}} of [[Ambler, Pennsylvania]]
-** ''The Colonial'' {{WS|colonialnews.com}} of [[Plymouth Meeting, Pennsylvania]]
-** ''Glenside News'' {{WS|glensidenews.com}} of [[Glenside, Pennsylvania]]
-** ''The Globe'' {{WS|globenewspaper.com}} of [[Lower Moreland Township, Pennsylvania]]
-** ''Montgomery Life'' {{WS|montgomerylife.com}} of [[Fort Washington, Pennsylvania]]
-** ''North Penn Life'' {{WS|northpennlife.com}} of [[Lansdale, Pennsylvania]]
-** ''Perkasie News Herald'' {{WS|perkasienewsherald.com}} of [[Perkasie, Pennsylvania]]
-** ''Public Spirit'' {{WS|thepublicspirit.com}} of [[Hatboro, Pennsylvania]]
-** ''Souderton Independent'' {{WS|soudertonindependent.com}} of [[Souderton, Pennsylvania]]
-** ''Springfield Sun'' {{WS|springfieldsun.com}} of [[Springfield, Pennsylvania]]
-** ''Spring-Ford Reporter'' {{WS|springfordreporter.com}} of [[Royersford, Pennsylvania]]
-** ''Times Chronicle'' {{WS|thetimeschronicle.com}} of [[Jenkintown, Pennsylvania]]
-** ''Valley Item'' {{WS|valleyitem.com}} of [[Perkiomenville, Pennsylvania]]
-** ''Willow Grove Guide'' {{WS|willowgroveguide.com}} of [[Willow Grove, Pennsylvania]]
-** ''The Review'' {{WS|roxreview.com}} of [[Roxborough, Philadelphia, Pennsylvania]]
-
-* Main Line Media News {{WS|mainlinemedianews.com}}
-** ''Main Line Times'' {{WS|mainlinetimes.com}} of [[Ardmore, Pennsylvania]]
-** ''Main Line Life'' {{WS|mainlinelife.com}} of [[Ardmore, Pennsylvania]]
-** ''The King of Prussia Courier'' {{WS|kingofprussiacourier.com}} of [[King of Prussia, Pennsylvania]]
-
-* Delaware County News Network {{WS|delconewsnetwork.com}}
-** ''News of Delaware County'' {{WS|newsofdelawarecounty.com}} of [[Havertown, Pennsylvania]]
-** ''County Press'' {{WS|countypressonline.com}} of [[Newtown Square, Pennsylvania]]
-** ''Garnet Valley Press'' {{WS|countypressonline.com}} of [[Glen Mills, Pennsylvania]]
-** ''Springfield Press'' {{WS|countypressonline.com}} of [[Springfield, Pennsylvania]]
-** ''Town Talk'' {{WS|towntalknews.com}} of [[Ridley, Pennsylvania]]
-
-* Berks-Mont Newspapers {{WS|berksmontnews.com}}
-** ''The Boyertown Area Times'' {{WS|berksmontnews.com/boyertown_area_times}} of [[Boyertown, Pennsylvania]]
-** ''The Kutztown Area Patriot'' {{WS|berksmontnews.com/kutztown_area_patriot}} of [[Kutztown, Pennsylvania]]
-** ''The Hamburg Area Item'' {{WS|berksmontnews.com/hamburg_area_item}} of [[Hamburg, Pennsylvania]]
-** ''The Southern Berks News'' {{WS|berksmontnews.com/southern_berks_news}} of [[Exeter Township, Berks County, Pennsylvania]]
-** ''Community Connection'' {{WS|berksmontnews.com/community_connection}} of [[Boyertown, Pennsylvania]]
-
-* Magazines
-** ''Bucks Co. Town & Country Living'' {{WS|buckscountymagazine.com}}
-** ''Parents Express'' {{WS|parents-express.com}}
-** ''Real Men, Rednecks'' {{WS|realmenredneck.com}}
-
-{{JRC}}
-
-==References==
-<references />
-
-[[Category:Journal Register publications|*]]
diff --git a/contrib/python/diff-match-patch/py3/ya.make b/contrib/python/diff-match-patch/py3/ya.make
deleted file mode 100644
index 9a510f1683..0000000000
--- a/contrib/python/diff-match-patch/py3/ya.make
+++ /dev/null
@@ -1,28 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(20230430)
-
-LICENSE(Apache-2.0)
-
-NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- diff_match_patch/__init__.py
- diff_match_patch/__version__.py
- diff_match_patch/diff_match_patch.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/diff-match-patch/py3/
- .dist-info/METADATA
- .dist-info/top_level.txt
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- tests
-)
diff --git a/contrib/python/diff-match-patch/ya.make b/contrib/python/diff-match-patch/ya.make
deleted file mode 100644
index ca419d06b4..0000000000
--- a/contrib/python/diff-match-patch/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/diff-match-patch/py2)
-ELSE()
- PEERDIR(contrib/python/diff-match-patch/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- py2
- py3
-)
diff --git a/contrib/python/humanfriendly/py2/LICENSE.txt b/contrib/python/humanfriendly/py2/LICENSE.txt
deleted file mode 100644
index 96ece318ed..0000000000
--- a/contrib/python/humanfriendly/py2/LICENSE.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2021 Peter Odding
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/humanfriendly/py2/README.rst b/contrib/python/humanfriendly/py2/README.rst
deleted file mode 100644
index 80145d564c..0000000000
--- a/contrib/python/humanfriendly/py2/README.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-humanfriendly: Human friendly input/output in Python
-====================================================
-
-.. image:: https://github.com/xolox/python-humanfriendly/actions/workflows/test.yml/badge.svg?branch=master
- :target: https://github.com/xolox/python-humanfriendly/actions
-
-.. image:: https://codecov.io/gh/xolox/python-humanfriendly/branch/master/graph/badge.svg?token=jYaj4T74TU
- :target: https://codecov.io/gh/xolox/python-humanfriendly
-
-The functions and classes in the `humanfriendly` package can be used to make
-text interfaces more user friendly. Some example features:
-
-- Parsing and formatting numbers, file sizes, pathnames and timespans in
- simple, human friendly formats.
-
-- Easy to use timers for long running operations, with human friendly
- formatting of the resulting timespans.
-
-- Prompting the user to select a choice from a list of options by typing the
- option's number or a unique substring of the option.
-
-- Terminal interaction including text styling (`ANSI escape sequences`_), user
- friendly rendering of usage messages and querying the terminal for its
- size.
-
-The `humanfriendly` package is currently tested on Python 2.7, 3.5+ and PyPy
-(2.7) on Linux and macOS. While the intention is to support Windows as well,
-you may encounter some rough edges.
-
-.. contents::
- :local:
-
-Getting started
----------------
-
-It's very simple to start using the `humanfriendly` package::
-
- >>> from humanfriendly import format_size, parse_size
- >>> from humanfriendly.prompts import prompt_for_input
- >>> user_input = prompt_for_input("Enter a readable file size: ")
-
- Enter a readable file size: 16G
-
- >>> num_bytes = parse_size(user_input)
- >>> print(num_bytes)
- 16000000000
- >>> print("You entered:", format_size(num_bytes))
- You entered: 16 GB
- >>> print("You entered:", format_size(num_bytes, binary=True))
- You entered: 14.9 GiB
-
-To get a demonstration of supported terminal text styles (based on
-`ANSI escape sequences`_) you can run the following command::
-
- $ humanfriendly --demo
-
-Command line
-------------
-
-.. A DRY solution to avoid duplication of the `humanfriendly --help' text:
-..
-.. [[[cog
-.. from humanfriendly.usage import inject_usage
-.. inject_usage('humanfriendly.cli')
-.. ]]]
-
-**Usage:** `humanfriendly [OPTIONS]`
-
-Human friendly input/output (text formatting) on the command
-line based on the Python package with the same name.
-
-**Supported options:**
-
-.. csv-table::
- :header: Option, Description
- :widths: 30, 70
-
-
- "``-c``, ``--run-command``","Execute an external command (given as the positional arguments) and render
- a spinner and timer while the command is running. The exit status of the
- command is propagated."
- ``--format-table``,"Read tabular data from standard input (each line is a row and each
- whitespace separated field is a column), format the data as a table and
- print the resulting table to standard output. See also the ``--delimiter``
- option."
- "``-d``, ``--delimiter=VALUE``","Change the delimiter used by ``--format-table`` to ``VALUE`` (a string). By default
- all whitespace is treated as a delimiter."
- "``-l``, ``--format-length=LENGTH``","Convert a length count (given as the integer or float ``LENGTH``) into a human
- readable string and print that string to standard output."
- "``-n``, ``--format-number=VALUE``","Format a number (given as the integer or floating point number ``VALUE``) with
- thousands separators and two decimal places (if needed) and print the
- formatted number to standard output."
- "``-s``, ``--format-size=BYTES``","Convert a byte count (given as the integer ``BYTES``) into a human readable
- string and print that string to standard output."
- "``-b``, ``--binary``","Change the output of ``-s``, ``--format-size`` to use binary multiples of bytes
- (base-2) instead of the default decimal multiples of bytes (base-10)."
- "``-t``, ``--format-timespan=SECONDS``","Convert a number of seconds (given as the floating point number ``SECONDS``)
- into a human readable timespan and print that string to standard output."
- ``--parse-length=VALUE``,"Parse a human readable length (given as the string ``VALUE``) and print the
- number of metres to standard output."
- ``--parse-size=VALUE``,"Parse a human readable data size (given as the string ``VALUE``) and print the
- number of bytes to standard output."
- ``--demo``,"Demonstrate changing the style and color of the terminal font using ANSI
- escape sequences."
- "``-h``, ``--help``",Show this message and exit.
-
-.. [[[end]]]
-
-A note about size units
------------------------
-
-When I originally published the `humanfriendly` package I went with binary
-multiples of bytes (powers of two). It was pointed out several times that this
-was a poor choice (see issue `#4`_ and pull requests `#8`_ and `#9`_) and thus
-the new default became decimal multiples of bytes (powers of ten):
-
-+------+---------------+---------------+
-| Unit | Binary value | Decimal value |
-+------+---------------+---------------+
-| KB | 1024 | 1000 +
-+------+---------------+---------------+
-| MB | 1048576 | 1000000 |
-+------+---------------+---------------+
-| GB | 1073741824 | 1000000000 |
-+------+---------------+---------------+
-| TB | 1099511627776 | 1000000000000 |
-+------+---------------+---------------+
-| etc | | |
-+------+---------------+---------------+
-
-The option to use binary multiples of bytes remains by passing the keyword
-argument `binary=True` to the `format_size()`_ and `parse_size()`_ functions.
-
-Windows support
----------------
-
-Windows 10 gained native support for ANSI escape sequences which means commands
-like ``humanfriendly --demo`` should work out of the box (if your system is
-up-to-date enough). If this doesn't work then you can install the colorama_
-package, it will be used automatically once installed.
-
-Contact
--------
-
-The latest version of `humanfriendly` is available on PyPI_ and GitHub_. The
-documentation is hosted on `Read the Docs`_ and includes a changelog_. For bug
-reports please create an issue on GitHub_. If you have questions, suggestions,
-etc. feel free to send me an e-mail at `peter@peterodding.com`_.
-
-License
--------
-
-This software is licensed under the `MIT license`_.
-
-© 2021 Peter Odding.
-
-.. External references:
-.. _#4: https://github.com/xolox/python-humanfriendly/issues/4
-.. _#8: https://github.com/xolox/python-humanfriendly/pull/8
-.. _#9: https://github.com/xolox/python-humanfriendly/pull/9
-.. _ANSI escape sequences: https://en.wikipedia.org/wiki/ANSI_escape_code
-.. _changelog: https://humanfriendly.readthedocs.io/en/latest/changelog.html
-.. _colorama: https://pypi.org/project/colorama
-.. _format_size(): https://humanfriendly.readthedocs.io/en/latest/#humanfriendly.format_size
-.. _GitHub: https://github.com/xolox/python-humanfriendly
-.. _MIT license: https://en.wikipedia.org/wiki/MIT_License
-.. _parse_size(): https://humanfriendly.readthedocs.io/en/latest/#humanfriendly.parse_size
-.. _peter@peterodding.com: peter@peterodding.com
-.. _PyPI: https://pypi.org/project/humanfriendly
-.. _Read the Docs: https://humanfriendly.readthedocs.io
diff --git a/contrib/python/humanfriendly/py3/.dist-info/METADATA b/contrib/python/humanfriendly/py3/.dist-info/METADATA
deleted file mode 100644
index c36fa4cafe..0000000000
--- a/contrib/python/humanfriendly/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,216 +0,0 @@
-Metadata-Version: 2.1
-Name: humanfriendly
-Version: 10.0
-Summary: Human friendly output for text interfaces using Python
-Home-page: https://humanfriendly.readthedocs.io
-Author: Peter Odding
-Author-email: peter@peterodding.com
-License: MIT
-Platform: UNKNOWN
-Classifier: Development Status :: 6 - Mature
-Classifier: Environment :: Console
-Classifier: Framework :: Sphinx :: Extension
-Classifier: Intended Audience :: Developers
-Classifier: Intended Audience :: System Administrators
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Natural Language :: English
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Classifier: Programming Language :: Python :: Implementation :: PyPy
-Classifier: Topic :: Communications
-Classifier: Topic :: Scientific/Engineering :: Human Machine Interfaces
-Classifier: Topic :: Software Development
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Software Development :: User Interfaces
-Classifier: Topic :: System :: Shells
-Classifier: Topic :: System :: System Shells
-Classifier: Topic :: System :: Systems Administration
-Classifier: Topic :: Terminals
-Classifier: Topic :: Text Processing :: General
-Classifier: Topic :: Text Processing :: Linguistic
-Classifier: Topic :: Utilities
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
-Requires-Dist: monotonic ; python_version == "2.7"
-Requires-Dist: pyreadline ; sys_platform == "win32" and python_version<"3.8"
-Requires-Dist: pyreadline3 ; sys_platform == "win32" and python_version>="3.8"
-
-humanfriendly: Human friendly input/output in Python
-====================================================
-
-.. image:: https://github.com/xolox/python-humanfriendly/actions/workflows/test.yml/badge.svg?branch=master
- :target: https://github.com/xolox/python-humanfriendly/actions
-
-.. image:: https://codecov.io/gh/xolox/python-humanfriendly/branch/master/graph/badge.svg?token=jYaj4T74TU
- :target: https://codecov.io/gh/xolox/python-humanfriendly
-
-The functions and classes in the `humanfriendly` package can be used to make
-text interfaces more user friendly. Some example features:
-
-- Parsing and formatting numbers, file sizes, pathnames and timespans in
- simple, human friendly formats.
-
-- Easy to use timers for long running operations, with human friendly
- formatting of the resulting timespans.
-
-- Prompting the user to select a choice from a list of options by typing the
- option's number or a unique substring of the option.
-
-- Terminal interaction including text styling (`ANSI escape sequences`_), user
- friendly rendering of usage messages and querying the terminal for its
- size.
-
-The `humanfriendly` package is currently tested on Python 2.7, 3.5+ and PyPy
-(2.7) on Linux and macOS. While the intention is to support Windows as well,
-you may encounter some rough edges.
-
-.. contents::
- :local:
-
-Getting started
----------------
-
-It's very simple to start using the `humanfriendly` package::
-
- >>> from humanfriendly import format_size, parse_size
- >>> from humanfriendly.prompts import prompt_for_input
- >>> user_input = prompt_for_input("Enter a readable file size: ")
-
- Enter a readable file size: 16G
-
- >>> num_bytes = parse_size(user_input)
- >>> print(num_bytes)
- 16000000000
- >>> print("You entered:", format_size(num_bytes))
- You entered: 16 GB
- >>> print("You entered:", format_size(num_bytes, binary=True))
- You entered: 14.9 GiB
-
-To get a demonstration of supported terminal text styles (based on
-`ANSI escape sequences`_) you can run the following command::
-
- $ humanfriendly --demo
-
-Command line
-------------
-
-.. A DRY solution to avoid duplication of the `humanfriendly --help' text:
-..
-.. [[[cog
-.. from humanfriendly.usage import inject_usage
-.. inject_usage('humanfriendly.cli')
-.. ]]]
-
-**Usage:** `humanfriendly [OPTIONS]`
-
-Human friendly input/output (text formatting) on the command
-line based on the Python package with the same name.
-
-**Supported options:**
-
-.. csv-table::
- :header: Option, Description
- :widths: 30, 70
-
-
- "``-c``, ``--run-command``","Execute an external command (given as the positional arguments) and render
- a spinner and timer while the command is running. The exit status of the
- command is propagated."
- ``--format-table``,"Read tabular data from standard input (each line is a row and each
- whitespace separated field is a column), format the data as a table and
- print the resulting table to standard output. See also the ``--delimiter``
- option."
- "``-d``, ``--delimiter=VALUE``","Change the delimiter used by ``--format-table`` to ``VALUE`` (a string). By default
- all whitespace is treated as a delimiter."
- "``-l``, ``--format-length=LENGTH``","Convert a length count (given as the integer or float ``LENGTH``) into a human
- readable string and print that string to standard output."
- "``-n``, ``--format-number=VALUE``","Format a number (given as the integer or floating point number ``VALUE``) with
- thousands separators and two decimal places (if needed) and print the
- formatted number to standard output."
- "``-s``, ``--format-size=BYTES``","Convert a byte count (given as the integer ``BYTES``) into a human readable
- string and print that string to standard output."
- "``-b``, ``--binary``","Change the output of ``-s``, ``--format-size`` to use binary multiples of bytes
- (base-2) instead of the default decimal multiples of bytes (base-10)."
- "``-t``, ``--format-timespan=SECONDS``","Convert a number of seconds (given as the floating point number ``SECONDS``)
- into a human readable timespan and print that string to standard output."
- ``--parse-length=VALUE``,"Parse a human readable length (given as the string ``VALUE``) and print the
- number of metres to standard output."
- ``--parse-size=VALUE``,"Parse a human readable data size (given as the string ``VALUE``) and print the
- number of bytes to standard output."
- ``--demo``,"Demonstrate changing the style and color of the terminal font using ANSI
- escape sequences."
- "``-h``, ``--help``",Show this message and exit.
-
-.. [[[end]]]
-
-A note about size units
------------------------
-
-When I originally published the `humanfriendly` package I went with binary
-multiples of bytes (powers of two). It was pointed out several times that this
-was a poor choice (see issue `#4`_ and pull requests `#8`_ and `#9`_) and thus
-the new default became decimal multiples of bytes (powers of ten):
-
-+------+---------------+---------------+
-| Unit | Binary value | Decimal value |
-+------+---------------+---------------+
-| KB | 1024 | 1000 +
-+------+---------------+---------------+
-| MB | 1048576 | 1000000 |
-+------+---------------+---------------+
-| GB | 1073741824 | 1000000000 |
-+------+---------------+---------------+
-| TB | 1099511627776 | 1000000000000 |
-+------+---------------+---------------+
-| etc | | |
-+------+---------------+---------------+
-
-The option to use binary multiples of bytes remains by passing the keyword
-argument `binary=True` to the `format_size()`_ and `parse_size()`_ functions.
-
-Windows support
----------------
-
-Windows 10 gained native support for ANSI escape sequences which means commands
-like ``humanfriendly --demo`` should work out of the box (if your system is
-up-to-date enough). If this doesn't work then you can install the colorama_
-package, it will be used automatically once installed.
-
-Contact
--------
-
-The latest version of `humanfriendly` is available on PyPI_ and GitHub_. The
-documentation is hosted on `Read the Docs`_ and includes a changelog_. For bug
-reports please create an issue on GitHub_. If you have questions, suggestions,
-etc. feel free to send me an e-mail at `peter@peterodding.com`_.
-
-License
--------
-
-This software is licensed under the `MIT license`_.
-
-© 2021 Peter Odding.
-
-.. External references:
-.. _#4: https://github.com/xolox/python-humanfriendly/issues/4
-.. _#8: https://github.com/xolox/python-humanfriendly/pull/8
-.. _#9: https://github.com/xolox/python-humanfriendly/pull/9
-.. _ANSI escape sequences: https://en.wikipedia.org/wiki/ANSI_escape_code
-.. _changelog: https://humanfriendly.readthedocs.io/en/latest/changelog.html
-.. _colorama: https://pypi.org/project/colorama
-.. _format_size(): https://humanfriendly.readthedocs.io/en/latest/#humanfriendly.format_size
-.. _GitHub: https://github.com/xolox/python-humanfriendly
-.. _MIT license: https://en.wikipedia.org/wiki/MIT_License
-.. _parse_size(): https://humanfriendly.readthedocs.io/en/latest/#humanfriendly.parse_size
-.. _peter@peterodding.com: peter@peterodding.com
-.. _PyPI: https://pypi.org/project/humanfriendly
-.. _Read the Docs: https://humanfriendly.readthedocs.io
-
-
diff --git a/contrib/python/humanfriendly/py3/.dist-info/entry_points.txt b/contrib/python/humanfriendly/py3/.dist-info/entry_points.txt
deleted file mode 100644
index 2ce8fb8353..0000000000
--- a/contrib/python/humanfriendly/py3/.dist-info/entry_points.txt
+++ /dev/null
@@ -1,3 +0,0 @@
-[console_scripts]
-humanfriendly = humanfriendly.cli:main
-
diff --git a/contrib/python/humanfriendly/py3/.dist-info/top_level.txt b/contrib/python/humanfriendly/py3/.dist-info/top_level.txt
deleted file mode 100644
index f5368c4974..0000000000
--- a/contrib/python/humanfriendly/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-humanfriendly
diff --git a/contrib/python/humanfriendly/py3/LICENSE.txt b/contrib/python/humanfriendly/py3/LICENSE.txt
deleted file mode 100644
index 96ece318ed..0000000000
--- a/contrib/python/humanfriendly/py3/LICENSE.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-Copyright (c) 2021 Peter Odding
-
-Permission is hereby granted, free of charge, to any person obtaining
-a copy of this software and associated documentation files (the
-"Software"), to deal in the Software without restriction, including
-without limitation the rights to use, copy, modify, merge, publish,
-distribute, sublicense, and/or sell copies of the Software, and to
-permit persons to whom the Software is furnished to do so, subject to
-the following conditions:
-
-The above copyright notice and this permission notice shall be
-included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
-EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
-MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
-NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
-LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
-OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
-WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/humanfriendly/py3/README.rst b/contrib/python/humanfriendly/py3/README.rst
deleted file mode 100644
index 80145d564c..0000000000
--- a/contrib/python/humanfriendly/py3/README.rst
+++ /dev/null
@@ -1,170 +0,0 @@
-humanfriendly: Human friendly input/output in Python
-====================================================
-
-.. image:: https://github.com/xolox/python-humanfriendly/actions/workflows/test.yml/badge.svg?branch=master
- :target: https://github.com/xolox/python-humanfriendly/actions
-
-.. image:: https://codecov.io/gh/xolox/python-humanfriendly/branch/master/graph/badge.svg?token=jYaj4T74TU
- :target: https://codecov.io/gh/xolox/python-humanfriendly
-
-The functions and classes in the `humanfriendly` package can be used to make
-text interfaces more user friendly. Some example features:
-
-- Parsing and formatting numbers, file sizes, pathnames and timespans in
- simple, human friendly formats.
-
-- Easy to use timers for long running operations, with human friendly
- formatting of the resulting timespans.
-
-- Prompting the user to select a choice from a list of options by typing the
- option's number or a unique substring of the option.
-
-- Terminal interaction including text styling (`ANSI escape sequences`_), user
- friendly rendering of usage messages and querying the terminal for its
- size.
-
-The `humanfriendly` package is currently tested on Python 2.7, 3.5+ and PyPy
-(2.7) on Linux and macOS. While the intention is to support Windows as well,
-you may encounter some rough edges.
-
-.. contents::
- :local:
-
-Getting started
----------------
-
-It's very simple to start using the `humanfriendly` package::
-
- >>> from humanfriendly import format_size, parse_size
- >>> from humanfriendly.prompts import prompt_for_input
- >>> user_input = prompt_for_input("Enter a readable file size: ")
-
- Enter a readable file size: 16G
-
- >>> num_bytes = parse_size(user_input)
- >>> print(num_bytes)
- 16000000000
- >>> print("You entered:", format_size(num_bytes))
- You entered: 16 GB
- >>> print("You entered:", format_size(num_bytes, binary=True))
- You entered: 14.9 GiB
-
-To get a demonstration of supported terminal text styles (based on
-`ANSI escape sequences`_) you can run the following command::
-
- $ humanfriendly --demo
-
-Command line
-------------
-
-.. A DRY solution to avoid duplication of the `humanfriendly --help' text:
-..
-.. [[[cog
-.. from humanfriendly.usage import inject_usage
-.. inject_usage('humanfriendly.cli')
-.. ]]]
-
-**Usage:** `humanfriendly [OPTIONS]`
-
-Human friendly input/output (text formatting) on the command
-line based on the Python package with the same name.
-
-**Supported options:**
-
-.. csv-table::
- :header: Option, Description
- :widths: 30, 70
-
-
- "``-c``, ``--run-command``","Execute an external command (given as the positional arguments) and render
- a spinner and timer while the command is running. The exit status of the
- command is propagated."
- ``--format-table``,"Read tabular data from standard input (each line is a row and each
- whitespace separated field is a column), format the data as a table and
- print the resulting table to standard output. See also the ``--delimiter``
- option."
- "``-d``, ``--delimiter=VALUE``","Change the delimiter used by ``--format-table`` to ``VALUE`` (a string). By default
- all whitespace is treated as a delimiter."
- "``-l``, ``--format-length=LENGTH``","Convert a length count (given as the integer or float ``LENGTH``) into a human
- readable string and print that string to standard output."
- "``-n``, ``--format-number=VALUE``","Format a number (given as the integer or floating point number ``VALUE``) with
- thousands separators and two decimal places (if needed) and print the
- formatted number to standard output."
- "``-s``, ``--format-size=BYTES``","Convert a byte count (given as the integer ``BYTES``) into a human readable
- string and print that string to standard output."
- "``-b``, ``--binary``","Change the output of ``-s``, ``--format-size`` to use binary multiples of bytes
- (base-2) instead of the default decimal multiples of bytes (base-10)."
- "``-t``, ``--format-timespan=SECONDS``","Convert a number of seconds (given as the floating point number ``SECONDS``)
- into a human readable timespan and print that string to standard output."
- ``--parse-length=VALUE``,"Parse a human readable length (given as the string ``VALUE``) and print the
- number of metres to standard output."
- ``--parse-size=VALUE``,"Parse a human readable data size (given as the string ``VALUE``) and print the
- number of bytes to standard output."
- ``--demo``,"Demonstrate changing the style and color of the terminal font using ANSI
- escape sequences."
- "``-h``, ``--help``",Show this message and exit.
-
-.. [[[end]]]
-
-A note about size units
------------------------
-
-When I originally published the `humanfriendly` package I went with binary
-multiples of bytes (powers of two). It was pointed out several times that this
-was a poor choice (see issue `#4`_ and pull requests `#8`_ and `#9`_) and thus
-the new default became decimal multiples of bytes (powers of ten):
-
-+------+---------------+---------------+
-| Unit | Binary value | Decimal value |
-+------+---------------+---------------+
-| KB | 1024 | 1000 +
-+------+---------------+---------------+
-| MB | 1048576 | 1000000 |
-+------+---------------+---------------+
-| GB | 1073741824 | 1000000000 |
-+------+---------------+---------------+
-| TB | 1099511627776 | 1000000000000 |
-+------+---------------+---------------+
-| etc | | |
-+------+---------------+---------------+
-
-The option to use binary multiples of bytes remains by passing the keyword
-argument `binary=True` to the `format_size()`_ and `parse_size()`_ functions.
-
-Windows support
----------------
-
-Windows 10 gained native support for ANSI escape sequences which means commands
-like ``humanfriendly --demo`` should work out of the box (if your system is
-up-to-date enough). If this doesn't work then you can install the colorama_
-package, it will be used automatically once installed.
-
-Contact
--------
-
-The latest version of `humanfriendly` is available on PyPI_ and GitHub_. The
-documentation is hosted on `Read the Docs`_ and includes a changelog_. For bug
-reports please create an issue on GitHub_. If you have questions, suggestions,
-etc. feel free to send me an e-mail at `peter@peterodding.com`_.
-
-License
--------
-
-This software is licensed under the `MIT license`_.
-
-© 2021 Peter Odding.
-
-.. External references:
-.. _#4: https://github.com/xolox/python-humanfriendly/issues/4
-.. _#8: https://github.com/xolox/python-humanfriendly/pull/8
-.. _#9: https://github.com/xolox/python-humanfriendly/pull/9
-.. _ANSI escape sequences: https://en.wikipedia.org/wiki/ANSI_escape_code
-.. _changelog: https://humanfriendly.readthedocs.io/en/latest/changelog.html
-.. _colorama: https://pypi.org/project/colorama
-.. _format_size(): https://humanfriendly.readthedocs.io/en/latest/#humanfriendly.format_size
-.. _GitHub: https://github.com/xolox/python-humanfriendly
-.. _MIT license: https://en.wikipedia.org/wiki/MIT_License
-.. _parse_size(): https://humanfriendly.readthedocs.io/en/latest/#humanfriendly.parse_size
-.. _peter@peterodding.com: peter@peterodding.com
-.. _PyPI: https://pypi.org/project/humanfriendly
-.. _Read the Docs: https://humanfriendly.readthedocs.io
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/__init__.py b/contrib/python/humanfriendly/py3/humanfriendly/__init__.py
deleted file mode 100644
index 4c0a333861..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/__init__.py
+++ /dev/null
@@ -1,838 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: September 17, 2021
-# URL: https://humanfriendly.readthedocs.io
-
-"""The main module of the `humanfriendly` package."""
-
-# Standard library modules.
-import collections
-import datetime
-import decimal
-import numbers
-import os
-import os.path
-import re
-import time
-
-# Modules included in our package.
-from humanfriendly.compat import is_string, monotonic
-from humanfriendly.deprecation import define_aliases
-from humanfriendly.text import concatenate, format, pluralize, tokenize
-
-# Public identifiers that require documentation.
-__all__ = (
- 'CombinedUnit',
- 'InvalidDate',
- 'InvalidLength',
- 'InvalidSize',
- 'InvalidTimespan',
- 'SizeUnit',
- 'Timer',
- '__version__',
- 'coerce_boolean',
- 'coerce_pattern',
- 'coerce_seconds',
- 'disk_size_units',
- 'format_length',
- 'format_number',
- 'format_path',
- 'format_size',
- 'format_timespan',
- 'length_size_units',
- 'parse_date',
- 'parse_length',
- 'parse_path',
- 'parse_size',
- 'parse_timespan',
- 'round_number',
- 'time_units',
-)
-
-# Semi-standard module versioning.
-__version__ = '10.0'
-
-# Named tuples to define units of size.
-SizeUnit = collections.namedtuple('SizeUnit', 'divider, symbol, name')
-CombinedUnit = collections.namedtuple('CombinedUnit', 'decimal, binary')
-
-# Common disk size units in binary (base-2) and decimal (base-10) multiples.
-disk_size_units = (
- CombinedUnit(SizeUnit(1000**1, 'KB', 'kilobyte'), SizeUnit(1024**1, 'KiB', 'kibibyte')),
- CombinedUnit(SizeUnit(1000**2, 'MB', 'megabyte'), SizeUnit(1024**2, 'MiB', 'mebibyte')),
- CombinedUnit(SizeUnit(1000**3, 'GB', 'gigabyte'), SizeUnit(1024**3, 'GiB', 'gibibyte')),
- CombinedUnit(SizeUnit(1000**4, 'TB', 'terabyte'), SizeUnit(1024**4, 'TiB', 'tebibyte')),
- CombinedUnit(SizeUnit(1000**5, 'PB', 'petabyte'), SizeUnit(1024**5, 'PiB', 'pebibyte')),
- CombinedUnit(SizeUnit(1000**6, 'EB', 'exabyte'), SizeUnit(1024**6, 'EiB', 'exbibyte')),
- CombinedUnit(SizeUnit(1000**7, 'ZB', 'zettabyte'), SizeUnit(1024**7, 'ZiB', 'zebibyte')),
- CombinedUnit(SizeUnit(1000**8, 'YB', 'yottabyte'), SizeUnit(1024**8, 'YiB', 'yobibyte')),
-)
-
-# Common length size units, used for formatting and parsing.
-length_size_units = (dict(prefix='nm', divider=1e-09, singular='nm', plural='nm'),
- dict(prefix='mm', divider=1e-03, singular='mm', plural='mm'),
- dict(prefix='cm', divider=1e-02, singular='cm', plural='cm'),
- dict(prefix='m', divider=1, singular='metre', plural='metres'),
- dict(prefix='km', divider=1000, singular='km', plural='km'))
-
-# Common time units, used for formatting of time spans.
-time_units = (dict(divider=1e-9, singular='nanosecond', plural='nanoseconds', abbreviations=['ns']),
- dict(divider=1e-6, singular='microsecond', plural='microseconds', abbreviations=['us']),
- dict(divider=1e-3, singular='millisecond', plural='milliseconds', abbreviations=['ms']),
- dict(divider=1, singular='second', plural='seconds', abbreviations=['s', 'sec', 'secs']),
- dict(divider=60, singular='minute', plural='minutes', abbreviations=['m', 'min', 'mins']),
- dict(divider=60 * 60, singular='hour', plural='hours', abbreviations=['h']),
- dict(divider=60 * 60 * 24, singular='day', plural='days', abbreviations=['d']),
- dict(divider=60 * 60 * 24 * 7, singular='week', plural='weeks', abbreviations=['w']),
- dict(divider=60 * 60 * 24 * 7 * 52, singular='year', plural='years', abbreviations=['y']))
-
-
-def coerce_boolean(value):
- """
- Coerce any value to a boolean.
-
- :param value: Any Python value. If the value is a string:
-
- - The strings '1', 'yes', 'true' and 'on' are coerced to :data:`True`.
- - The strings '0', 'no', 'false' and 'off' are coerced to :data:`False`.
- - Other strings raise an exception.
-
- Other Python values are coerced using :class:`bool`.
- :returns: A proper boolean value.
- :raises: :exc:`exceptions.ValueError` when the value is a string but
- cannot be coerced with certainty.
- """
- if is_string(value):
- normalized = value.strip().lower()
- if normalized in ('1', 'yes', 'true', 'on'):
- return True
- elif normalized in ('0', 'no', 'false', 'off', ''):
- return False
- else:
- msg = "Failed to coerce string to boolean! (%r)"
- raise ValueError(format(msg, value))
- else:
- return bool(value)
-
-
-def coerce_pattern(value, flags=0):
- """
- Coerce strings to compiled regular expressions.
-
- :param value: A string containing a regular expression pattern
- or a compiled regular expression.
- :param flags: The flags used to compile the pattern (an integer).
- :returns: A compiled regular expression.
- :raises: :exc:`~exceptions.ValueError` when `value` isn't a string
- and also isn't a compiled regular expression.
- """
- if is_string(value):
- value = re.compile(value, flags)
- else:
- empty_pattern = re.compile('')
- pattern_type = type(empty_pattern)
- if not isinstance(value, pattern_type):
- msg = "Failed to coerce value to compiled regular expression! (%r)"
- raise ValueError(format(msg, value))
- return value
-
-
-def coerce_seconds(value):
- """
- Coerce a value to the number of seconds.
-
- :param value: An :class:`int`, :class:`float` or
- :class:`datetime.timedelta` object.
- :returns: An :class:`int` or :class:`float` value.
-
- When `value` is a :class:`datetime.timedelta` object the
- :meth:`~datetime.timedelta.total_seconds()` method is called.
- """
- if isinstance(value, datetime.timedelta):
- return value.total_seconds()
- if not isinstance(value, numbers.Number):
- msg = "Failed to coerce value to number of seconds! (%r)"
- raise ValueError(format(msg, value))
- return value
-
-
-def format_size(num_bytes, keep_width=False, binary=False):
- """
- Format a byte count as a human readable file size.
-
- :param num_bytes: The size to format in bytes (an integer).
- :param keep_width: :data:`True` if trailing zeros should not be stripped,
- :data:`False` if they can be stripped.
- :param binary: :data:`True` to use binary multiples of bytes (base-2),
- :data:`False` to use decimal multiples of bytes (base-10).
- :returns: The corresponding human readable file size (a string).
-
- This function knows how to format sizes in bytes, kilobytes, megabytes,
- gigabytes, terabytes and petabytes. Some examples:
-
- >>> from humanfriendly import format_size
- >>> format_size(0)
- '0 bytes'
- >>> format_size(1)
- '1 byte'
- >>> format_size(5)
- '5 bytes'
- > format_size(1000)
- '1 KB'
- > format_size(1024, binary=True)
- '1 KiB'
- >>> format_size(1000 ** 3 * 4)
- '4 GB'
- """
- for unit in reversed(disk_size_units):
- if num_bytes >= unit.binary.divider and binary:
- number = round_number(float(num_bytes) / unit.binary.divider, keep_width=keep_width)
- return pluralize(number, unit.binary.symbol, unit.binary.symbol)
- elif num_bytes >= unit.decimal.divider and not binary:
- number = round_number(float(num_bytes) / unit.decimal.divider, keep_width=keep_width)
- return pluralize(number, unit.decimal.symbol, unit.decimal.symbol)
- return pluralize(num_bytes, 'byte')
-
-
-def parse_size(size, binary=False):
- """
- Parse a human readable data size and return the number of bytes.
-
- :param size: The human readable file size to parse (a string).
- :param binary: :data:`True` to use binary multiples of bytes (base-2) for
- ambiguous unit symbols and names, :data:`False` to use
- decimal multiples of bytes (base-10).
- :returns: The corresponding size in bytes (an integer).
- :raises: :exc:`InvalidSize` when the input can't be parsed.
-
- This function knows how to parse sizes in bytes, kilobytes, megabytes,
- gigabytes, terabytes and petabytes. Some examples:
-
- >>> from humanfriendly import parse_size
- >>> parse_size('42')
- 42
- >>> parse_size('13b')
- 13
- >>> parse_size('5 bytes')
- 5
- >>> parse_size('1 KB')
- 1000
- >>> parse_size('1 kilobyte')
- 1000
- >>> parse_size('1 KiB')
- 1024
- >>> parse_size('1 KB', binary=True)
- 1024
- >>> parse_size('1.5 GB')
- 1500000000
- >>> parse_size('1.5 GB', binary=True)
- 1610612736
- """
- tokens = tokenize(size)
- if tokens and isinstance(tokens[0], numbers.Number):
- # Get the normalized unit (if any) from the tokenized input.
- normalized_unit = tokens[1].lower() if len(tokens) == 2 and is_string(tokens[1]) else ''
- # If the input contains only a number, it's assumed to be the number of
- # bytes. The second token can also explicitly reference the unit bytes.
- if len(tokens) == 1 or normalized_unit.startswith('b'):
- return int(tokens[0])
- # Otherwise we expect two tokens: A number and a unit.
- if normalized_unit:
- # Convert plural units to singular units, for details:
- # https://github.com/xolox/python-humanfriendly/issues/26
- normalized_unit = normalized_unit.rstrip('s')
- for unit in disk_size_units:
- # First we check for unambiguous symbols (KiB, MiB, GiB, etc)
- # and names (kibibyte, mebibyte, gibibyte, etc) because their
- # handling is always the same.
- if normalized_unit in (unit.binary.symbol.lower(), unit.binary.name.lower()):
- return int(tokens[0] * unit.binary.divider)
- # Now we will deal with ambiguous prefixes (K, M, G, etc),
- # symbols (KB, MB, GB, etc) and names (kilobyte, megabyte,
- # gigabyte, etc) according to the caller's preference.
- if (normalized_unit in (unit.decimal.symbol.lower(), unit.decimal.name.lower()) or
- normalized_unit.startswith(unit.decimal.symbol[0].lower())):
- return int(tokens[0] * (unit.binary.divider if binary else unit.decimal.divider))
- # We failed to parse the size specification.
- msg = "Failed to parse size! (input %r was tokenized as %r)"
- raise InvalidSize(format(msg, size, tokens))
-
-
-def format_length(num_metres, keep_width=False):
- """
- Format a metre count as a human readable length.
-
- :param num_metres: The length to format in metres (float / integer).
- :param keep_width: :data:`True` if trailing zeros should not be stripped,
- :data:`False` if they can be stripped.
- :returns: The corresponding human readable length (a string).
-
- This function supports ranges from nanometres to kilometres.
-
- Some examples:
-
- >>> from humanfriendly import format_length
- >>> format_length(0)
- '0 metres'
- >>> format_length(1)
- '1 metre'
- >>> format_length(5)
- '5 metres'
- >>> format_length(1000)
- '1 km'
- >>> format_length(0.004)
- '4 mm'
- """
- for unit in reversed(length_size_units):
- if num_metres >= unit['divider']:
- number = round_number(float(num_metres) / unit['divider'], keep_width=keep_width)
- return pluralize(number, unit['singular'], unit['plural'])
- return pluralize(num_metres, 'metre')
-
-
-def parse_length(length):
- """
- Parse a human readable length and return the number of metres.
-
- :param length: The human readable length to parse (a string).
- :returns: The corresponding length in metres (a float).
- :raises: :exc:`InvalidLength` when the input can't be parsed.
-
- Some examples:
-
- >>> from humanfriendly import parse_length
- >>> parse_length('42')
- 42
- >>> parse_length('1 km')
- 1000
- >>> parse_length('5mm')
- 0.005
- >>> parse_length('15.3cm')
- 0.153
- """
- tokens = tokenize(length)
- if tokens and isinstance(tokens[0], numbers.Number):
- # If the input contains only a number, it's assumed to be the number of metres.
- if len(tokens) == 1:
- return tokens[0]
- # Otherwise we expect to find two tokens: A number and a unit.
- if len(tokens) == 2 and is_string(tokens[1]):
- normalized_unit = tokens[1].lower()
- # Try to match the first letter of the unit.
- for unit in length_size_units:
- if normalized_unit.startswith(unit['prefix']):
- return tokens[0] * unit['divider']
- # We failed to parse the length specification.
- msg = "Failed to parse length! (input %r was tokenized as %r)"
- raise InvalidLength(format(msg, length, tokens))
-
-
-def format_number(number, num_decimals=2):
- """
- Format a number as a string including thousands separators.
-
- :param number: The number to format (a number like an :class:`int`,
- :class:`long` or :class:`float`).
- :param num_decimals: The number of decimals to render (2 by default). If no
- decimal places are required to represent the number
- they will be omitted regardless of this argument.
- :returns: The formatted number (a string).
-
- This function is intended to make it easier to recognize the order of size
- of the number being formatted.
-
- Here's an example:
-
- >>> from humanfriendly import format_number
- >>> print(format_number(6000000))
- 6,000,000
- > print(format_number(6000000000.42))
- 6,000,000,000.42
- > print(format_number(6000000000.42, num_decimals=0))
- 6,000,000,000
- """
- integer_part, _, decimal_part = str(float(number)).partition('.')
- negative_sign = integer_part.startswith('-')
- reversed_digits = ''.join(reversed(integer_part.lstrip('-')))
- parts = []
- while reversed_digits:
- parts.append(reversed_digits[:3])
- reversed_digits = reversed_digits[3:]
- formatted_number = ''.join(reversed(','.join(parts)))
- decimals_to_add = decimal_part[:num_decimals].rstrip('0')
- if decimals_to_add:
- formatted_number += '.' + decimals_to_add
- if negative_sign:
- formatted_number = '-' + formatted_number
- return formatted_number
-
-
-def round_number(count, keep_width=False):
- """
- Round a floating point number to two decimal places in a human friendly format.
-
- :param count: The number to format.
- :param keep_width: :data:`True` if trailing zeros should not be stripped,
- :data:`False` if they can be stripped.
- :returns: The formatted number as a string. If no decimal places are
- required to represent the number, they will be omitted.
-
- The main purpose of this function is to be used by functions like
- :func:`format_length()`, :func:`format_size()` and
- :func:`format_timespan()`.
-
- Here are some examples:
-
- >>> from humanfriendly import round_number
- >>> round_number(1)
- '1'
- >>> round_number(math.pi)
- '3.14'
- >>> round_number(5.001)
- '5'
- """
- text = '%.2f' % float(count)
- if not keep_width:
- text = re.sub('0+$', '', text)
- text = re.sub(r'\.$', '', text)
- return text
-
-
-def format_timespan(num_seconds, detailed=False, max_units=3):
- """
- Format a timespan in seconds as a human readable string.
-
- :param num_seconds: Any value accepted by :func:`coerce_seconds()`.
- :param detailed: If :data:`True` milliseconds are represented separately
- instead of being represented as fractional seconds
- (defaults to :data:`False`).
- :param max_units: The maximum number of units to show in the formatted time
- span (an integer, defaults to three).
- :returns: The formatted timespan as a string.
- :raise: See :func:`coerce_seconds()`.
-
- Some examples:
-
- >>> from humanfriendly import format_timespan
- >>> format_timespan(0)
- '0 seconds'
- >>> format_timespan(1)
- '1 second'
- >>> import math
- >>> format_timespan(math.pi)
- '3.14 seconds'
- >>> hour = 60 * 60
- >>> day = hour * 24
- >>> week = day * 7
- >>> format_timespan(week * 52 + day * 2 + hour * 3)
- '1 year, 2 days and 3 hours'
- """
- num_seconds = coerce_seconds(num_seconds)
- if num_seconds < 60 and not detailed:
- # Fast path.
- return pluralize(round_number(num_seconds), 'second')
- else:
- # Slow path.
- result = []
- num_seconds = decimal.Decimal(str(num_seconds))
- relevant_units = list(reversed(time_units[0 if detailed else 3:]))
- for unit in relevant_units:
- # Extract the unit count from the remaining time.
- divider = decimal.Decimal(str(unit['divider']))
- count = num_seconds / divider
- num_seconds %= divider
- # Round the unit count appropriately.
- if unit != relevant_units[-1]:
- # Integer rounding for all but the smallest unit.
- count = int(count)
- else:
- # Floating point rounding for the smallest unit.
- count = round_number(count)
- # Only include relevant units in the result.
- if count not in (0, '0'):
- result.append(pluralize(count, unit['singular'], unit['plural']))
- if len(result) == 1:
- # A single count/unit combination.
- return result[0]
- else:
- if not detailed:
- # Remove `insignificant' data from the formatted timespan.
- result = result[:max_units]
- # Format the timespan in a readable way.
- return concatenate(result)
-
-
-def parse_timespan(timespan):
- """
- Parse a "human friendly" timespan into the number of seconds.
-
- :param value: A string like ``5h`` (5 hours), ``10m`` (10 minutes) or
- ``42s`` (42 seconds).
- :returns: The number of seconds as a floating point number.
- :raises: :exc:`InvalidTimespan` when the input can't be parsed.
-
- Note that the :func:`parse_timespan()` function is not meant to be the
- "mirror image" of the :func:`format_timespan()` function. Instead it's
- meant to allow humans to easily and succinctly specify a timespan with a
- minimal amount of typing. It's very useful to accept easy to write time
- spans as e.g. command line arguments to programs.
-
- The time units (and abbreviations) supported by this function are:
-
- - ms, millisecond, milliseconds
- - s, sec, secs, second, seconds
- - m, min, mins, minute, minutes
- - h, hour, hours
- - d, day, days
- - w, week, weeks
- - y, year, years
-
- Some examples:
-
- >>> from humanfriendly import parse_timespan
- >>> parse_timespan('42')
- 42.0
- >>> parse_timespan('42s')
- 42.0
- >>> parse_timespan('1m')
- 60.0
- >>> parse_timespan('1h')
- 3600.0
- >>> parse_timespan('1d')
- 86400.0
- """
- tokens = tokenize(timespan)
- if tokens and isinstance(tokens[0], numbers.Number):
- # If the input contains only a number, it's assumed to be the number of seconds.
- if len(tokens) == 1:
- return float(tokens[0])
- # Otherwise we expect to find two tokens: A number and a unit.
- if len(tokens) == 2 and is_string(tokens[1]):
- normalized_unit = tokens[1].lower()
- for unit in time_units:
- if (normalized_unit == unit['singular'] or
- normalized_unit == unit['plural'] or
- normalized_unit in unit['abbreviations']):
- return float(tokens[0]) * unit['divider']
- # We failed to parse the timespan specification.
- msg = "Failed to parse timespan! (input %r was tokenized as %r)"
- raise InvalidTimespan(format(msg, timespan, tokens))
-
-
-def parse_date(datestring):
- """
- Parse a date/time string into a tuple of integers.
-
- :param datestring: The date/time string to parse.
- :returns: A tuple with the numbers ``(year, month, day, hour, minute,
- second)`` (all numbers are integers).
- :raises: :exc:`InvalidDate` when the date cannot be parsed.
-
- Supported date/time formats:
-
- - ``YYYY-MM-DD``
- - ``YYYY-MM-DD HH:MM:SS``
-
- .. note:: If you want to parse date/time strings with a fixed, known
- format and :func:`parse_date()` isn't useful to you, consider
- :func:`time.strptime()` or :meth:`datetime.datetime.strptime()`,
- both of which are included in the Python standard library.
- Alternatively for more complex tasks consider using the date/time
- parsing module in the dateutil_ package.
-
- Examples:
-
- >>> from humanfriendly import parse_date
- >>> parse_date('2013-06-17')
- (2013, 6, 17, 0, 0, 0)
- >>> parse_date('2013-06-17 02:47:42')
- (2013, 6, 17, 2, 47, 42)
-
- Here's how you convert the result to a number (`Unix time`_):
-
- >>> from humanfriendly import parse_date
- >>> from time import mktime
- >>> mktime(parse_date('2013-06-17 02:47:42') + (-1, -1, -1))
- 1371430062.0
-
- And here's how you convert it to a :class:`datetime.datetime` object:
-
- >>> from humanfriendly import parse_date
- >>> from datetime import datetime
- >>> datetime(*parse_date('2013-06-17 02:47:42'))
- datetime.datetime(2013, 6, 17, 2, 47, 42)
-
- Here's an example that combines :func:`format_timespan()` and
- :func:`parse_date()` to calculate a human friendly timespan since a
- given date:
-
- >>> from humanfriendly import format_timespan, parse_date
- >>> from time import mktime, time
- >>> unix_time = mktime(parse_date('2013-06-17 02:47:42') + (-1, -1, -1))
- >>> seconds_since_then = time() - unix_time
- >>> print(format_timespan(seconds_since_then))
- 1 year, 43 weeks and 1 day
-
- .. _dateutil: https://dateutil.readthedocs.io/en/latest/parser.html
- .. _Unix time: http://en.wikipedia.org/wiki/Unix_time
- """
- try:
- tokens = [t.strip() for t in datestring.split()]
- if len(tokens) >= 2:
- date_parts = list(map(int, tokens[0].split('-'))) + [1, 1]
- time_parts = list(map(int, tokens[1].split(':'))) + [0, 0, 0]
- return tuple(date_parts[0:3] + time_parts[0:3])
- else:
- year, month, day = (list(map(int, datestring.split('-'))) + [1, 1])[0:3]
- return (year, month, day, 0, 0, 0)
- except Exception:
- msg = "Invalid date! (expected 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS' but got: %r)"
- raise InvalidDate(format(msg, datestring))
-
-
-def format_path(pathname):
- """
- Shorten a pathname to make it more human friendly.
-
- :param pathname: An absolute pathname (a string).
- :returns: The pathname with the user's home directory abbreviated.
-
- Given an absolute pathname, this function abbreviates the user's home
- directory to ``~/`` in order to shorten the pathname without losing
- information. It is not an error if the pathname is not relative to the
- current user's home directory.
-
- Here's an example of its usage:
-
- >>> from os import environ
- >>> from os.path import join
- >>> vimrc = join(environ['HOME'], '.vimrc')
- >>> vimrc
- '/home/peter/.vimrc'
- >>> from humanfriendly import format_path
- >>> format_path(vimrc)
- '~/.vimrc'
- """
- pathname = os.path.abspath(pathname)
- home = os.environ.get('HOME')
- if home:
- home = os.path.abspath(home)
- if pathname.startswith(home):
- pathname = os.path.join('~', os.path.relpath(pathname, home))
- return pathname
-
-
-def parse_path(pathname):
- """
- Convert a human friendly pathname to an absolute pathname.
-
- Expands leading tildes using :func:`os.path.expanduser()` and
- environment variables using :func:`os.path.expandvars()` and makes the
- resulting pathname absolute using :func:`os.path.abspath()`.
-
- :param pathname: A human friendly pathname (a string).
- :returns: An absolute pathname (a string).
- """
- return os.path.abspath(os.path.expanduser(os.path.expandvars(pathname)))
-
-
-class Timer(object):
-
- """
- Easy to use timer to keep track of long during operations.
- """
-
- def __init__(self, start_time=None, resumable=False):
- """
- Remember the time when the :class:`Timer` was created.
-
- :param start_time: The start time (a float, defaults to the current time).
- :param resumable: Create a resumable timer (defaults to :data:`False`).
-
- When `start_time` is given :class:`Timer` uses :func:`time.time()` as a
- clock source, otherwise it uses :func:`humanfriendly.compat.monotonic()`.
- """
- if resumable:
- self.monotonic = True
- self.resumable = True
- self.start_time = 0.0
- self.total_time = 0.0
- elif start_time:
- self.monotonic = False
- self.resumable = False
- self.start_time = start_time
- else:
- self.monotonic = True
- self.resumable = False
- self.start_time = monotonic()
-
- def __enter__(self):
- """
- Start or resume counting elapsed time.
-
- :returns: The :class:`Timer` object.
- :raises: :exc:`~exceptions.ValueError` when the timer isn't resumable.
- """
- if not self.resumable:
- raise ValueError("Timer is not resumable!")
- self.start_time = monotonic()
- return self
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """
- Stop counting elapsed time.
-
- :raises: :exc:`~exceptions.ValueError` when the timer isn't resumable.
- """
- if not self.resumable:
- raise ValueError("Timer is not resumable!")
- if self.start_time:
- self.total_time += monotonic() - self.start_time
- self.start_time = 0.0
-
- def sleep(self, seconds):
- """
- Easy to use rate limiting of repeating actions.
-
- :param seconds: The number of seconds to sleep (an
- integer or floating point number).
-
- This method sleeps for the given number of seconds minus the
- :attr:`elapsed_time`. If the resulting duration is negative
- :func:`time.sleep()` will still be called, but the argument
- given to it will be the number 0 (negative numbers cause
- :func:`time.sleep()` to raise an exception).
-
- The use case for this is to initialize a :class:`Timer` inside
- the body of a :keyword:`for` or :keyword:`while` loop and call
- :func:`Timer.sleep()` at the end of the loop body to rate limit
- whatever it is that is being done inside the loop body.
-
- For posterity: Although the implementation of :func:`sleep()` only
- requires a single line of code I've added it to :mod:`humanfriendly`
- anyway because now that I've thought about how to tackle this once I
- never want to have to think about it again :-P (unless I find ways to
- improve this).
- """
- time.sleep(max(0, seconds - self.elapsed_time))
-
- @property
- def elapsed_time(self):
- """
- Get the number of seconds counted so far.
- """
- elapsed_time = 0
- if self.resumable:
- elapsed_time += self.total_time
- if self.start_time:
- current_time = monotonic() if self.monotonic else time.time()
- elapsed_time += current_time - self.start_time
- return elapsed_time
-
- @property
- def rounded(self):
- """Human readable timespan rounded to seconds (a string)."""
- return format_timespan(round(self.elapsed_time))
-
- def __str__(self):
- """Show the elapsed time since the :class:`Timer` was created."""
- return format_timespan(self.elapsed_time)
-
-
-class InvalidDate(Exception):
-
- """
- Raised when a string cannot be parsed into a date.
-
- For example:
-
- >>> from humanfriendly import parse_date
- >>> parse_date('2013-06-XY')
- Traceback (most recent call last):
- File "humanfriendly.py", line 206, in parse_date
- raise InvalidDate(format(msg, datestring))
- humanfriendly.InvalidDate: Invalid date! (expected 'YYYY-MM-DD' or 'YYYY-MM-DD HH:MM:SS' but got: '2013-06-XY')
- """
-
-
-class InvalidSize(Exception):
-
- """
- Raised when a string cannot be parsed into a file size.
-
- For example:
-
- >>> from humanfriendly import parse_size
- >>> parse_size('5 Z')
- Traceback (most recent call last):
- File "humanfriendly/__init__.py", line 267, in parse_size
- raise InvalidSize(format(msg, size, tokens))
- humanfriendly.InvalidSize: Failed to parse size! (input '5 Z' was tokenized as [5, 'Z'])
- """
-
-
-class InvalidLength(Exception):
-
- """
- Raised when a string cannot be parsed into a length.
-
- For example:
-
- >>> from humanfriendly import parse_length
- >>> parse_length('5 Z')
- Traceback (most recent call last):
- File "humanfriendly/__init__.py", line 267, in parse_length
- raise InvalidLength(format(msg, length, tokens))
- humanfriendly.InvalidLength: Failed to parse length! (input '5 Z' was tokenized as [5, 'Z'])
- """
-
-
-class InvalidTimespan(Exception):
-
- """
- Raised when a string cannot be parsed into a timespan.
-
- For example:
-
- >>> from humanfriendly import parse_timespan
- >>> parse_timespan('1 age')
- Traceback (most recent call last):
- File "humanfriendly/__init__.py", line 419, in parse_timespan
- raise InvalidTimespan(format(msg, timespan, tokens))
- humanfriendly.InvalidTimespan: Failed to parse timespan! (input '1 age' was tokenized as [1, 'age'])
- """
-
-
-# Define aliases for backwards compatibility.
-define_aliases(
- module_name=__name__,
- # In humanfriendly 1.23 the format_table() function was added to render a
- # table using characters like dashes and vertical bars to emulate borders.
- # Since then support for other tables has been added and the name of
- # format_table() has changed.
- format_table='humanfriendly.tables.format_pretty_table',
- # In humanfriendly 1.30 the following text manipulation functions were
- # moved out into a separate module to enable their usage in other modules
- # of the humanfriendly package (without causing circular imports).
- compact='humanfriendly.text.compact',
- concatenate='humanfriendly.text.concatenate',
- dedent='humanfriendly.text.dedent',
- format='humanfriendly.text.format',
- is_empty_line='humanfriendly.text.is_empty_line',
- pluralize='humanfriendly.text.pluralize',
- tokenize='humanfriendly.text.tokenize',
- trim_empty_lines='humanfriendly.text.trim_empty_lines',
- # In humanfriendly 1.38 the prompt_for_choice() function was moved out into a
- # separate module because several variants of interactive prompts were added.
- prompt_for_choice='humanfriendly.prompts.prompt_for_choice',
- # In humanfriendly 8.0 the Spinner class and minimum_spinner_interval
- # variable were extracted to a new module and the erase_line_code,
- # hide_cursor_code and show_cursor_code variables were moved.
- AutomaticSpinner='humanfriendly.terminal.spinners.AutomaticSpinner',
- Spinner='humanfriendly.terminal.spinners.Spinner',
- erase_line_code='humanfriendly.terminal.ANSI_ERASE_LINE',
- hide_cursor_code='humanfriendly.terminal.ANSI_SHOW_CURSOR',
- minimum_spinner_interval='humanfriendly.terminal.spinners.MINIMUM_INTERVAL',
- show_cursor_code='humanfriendly.terminal.ANSI_HIDE_CURSOR',
-)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/case.py b/contrib/python/humanfriendly/py3/humanfriendly/case.py
deleted file mode 100644
index 4c71857e40..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/case.py
+++ /dev/null
@@ -1,157 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: April 19, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Simple case insensitive dictionaries.
-
-The :class:`CaseInsensitiveDict` class is a dictionary whose string keys
-are case insensitive. It works by automatically coercing string keys to
-:class:`CaseInsensitiveKey` objects. Keys that are not strings are
-supported as well, just without case insensitivity.
-
-At its core this module works by normalizing strings to lowercase before
-comparing or hashing them. It doesn't support proper case folding nor
-does it support Unicode normalization, hence the word "simple".
-"""
-
-# Standard library modules.
-import collections
-
-try:
- # Python >= 3.3.
- from collections.abc import Iterable, Mapping
-except ImportError:
- # Python 2.7.
- from collections import Iterable, Mapping
-
-# Modules included in our package.
-from humanfriendly.compat import basestring, unicode
-
-# Public identifiers that require documentation.
-__all__ = ("CaseInsensitiveDict", "CaseInsensitiveKey")
-
-
-class CaseInsensitiveDict(collections.OrderedDict):
-
- """
- Simple case insensitive dictionary implementation (that remembers insertion order).
-
- This class works by overriding methods that deal with dictionary keys to
- coerce string keys to :class:`CaseInsensitiveKey` objects before calling
- down to the regular dictionary handling methods. While intended to be
- complete this class has not been extensively tested yet.
- """
-
- def __init__(self, other=None, **kw):
- """Initialize a :class:`CaseInsensitiveDict` object."""
- # Initialize our superclass.
- super(CaseInsensitiveDict, self).__init__()
- # Handle the initializer arguments.
- self.update(other, **kw)
-
- def coerce_key(self, key):
- """
- Coerce string keys to :class:`CaseInsensitiveKey` objects.
-
- :param key: The value to coerce (any type).
- :returns: If `key` is a string then a :class:`CaseInsensitiveKey`
- object is returned, otherwise the value of `key` is
- returned unmodified.
- """
- if isinstance(key, basestring):
- key = CaseInsensitiveKey(key)
- return key
-
- @classmethod
- def fromkeys(cls, iterable, value=None):
- """Create a case insensitive dictionary with keys from `iterable` and values set to `value`."""
- return cls((k, value) for k in iterable)
-
- def get(self, key, default=None):
- """Get the value of an existing item."""
- return super(CaseInsensitiveDict, self).get(self.coerce_key(key), default)
-
- def pop(self, key, default=None):
- """Remove an item from a case insensitive dictionary."""
- return super(CaseInsensitiveDict, self).pop(self.coerce_key(key), default)
-
- def setdefault(self, key, default=None):
- """Get the value of an existing item or add a new item."""
- return super(CaseInsensitiveDict, self).setdefault(self.coerce_key(key), default)
-
- def update(self, other=None, **kw):
- """Update a case insensitive dictionary with new items."""
- if isinstance(other, Mapping):
- # Copy the items from the given mapping.
- for key, value in other.items():
- self[key] = value
- elif isinstance(other, Iterable):
- # Copy the items from the given iterable.
- for key, value in other:
- self[key] = value
- elif other is not None:
- # Complain about unsupported values.
- msg = "'%s' object is not iterable"
- type_name = type(value).__name__
- raise TypeError(msg % type_name)
- # Copy the keyword arguments (if any).
- for key, value in kw.items():
- self[key] = value
-
- def __contains__(self, key):
- """Check if a case insensitive dictionary contains the given key."""
- return super(CaseInsensitiveDict, self).__contains__(self.coerce_key(key))
-
- def __delitem__(self, key):
- """Delete an item in a case insensitive dictionary."""
- return super(CaseInsensitiveDict, self).__delitem__(self.coerce_key(key))
-
- def __getitem__(self, key):
- """Get the value of an item in a case insensitive dictionary."""
- return super(CaseInsensitiveDict, self).__getitem__(self.coerce_key(key))
-
- def __setitem__(self, key, value):
- """Set the value of an item in a case insensitive dictionary."""
- return super(CaseInsensitiveDict, self).__setitem__(self.coerce_key(key), value)
-
-
-class CaseInsensitiveKey(unicode):
-
- """
- Simple case insensitive dictionary key implementation.
-
- The :class:`CaseInsensitiveKey` class provides an intentionally simple
- implementation of case insensitive strings to be used as dictionary keys.
-
- If you need features like Unicode normalization or proper case folding
- please consider using a more advanced implementation like the :pypi:`istr`
- package instead.
- """
-
- def __new__(cls, value):
- """Create a :class:`CaseInsensitiveKey` object."""
- # Delegate string object creation to our superclass.
- obj = unicode.__new__(cls, value)
- # Store the lowercased string and its hash value.
- normalized = obj.lower()
- obj._normalized = normalized
- obj._hash_value = hash(normalized)
- return obj
-
- def __hash__(self):
- """Get the hash value of the lowercased string."""
- return self._hash_value
-
- def __eq__(self, other):
- """Compare two strings as lowercase."""
- if isinstance(other, CaseInsensitiveKey):
- # Fast path (and the most common case): Comparison with same type.
- return self._normalized == other._normalized
- elif isinstance(other, unicode):
- # Slow path: Comparison with strings that need lowercasing.
- return self._normalized == other.lower()
- else:
- return NotImplemented
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/cli.py b/contrib/python/humanfriendly/py3/humanfriendly/cli.py
deleted file mode 100644
index eb81db172b..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/cli.py
+++ /dev/null
@@ -1,291 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: March 1, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Usage: humanfriendly [OPTIONS]
-
-Human friendly input/output (text formatting) on the command
-line based on the Python package with the same name.
-
-Supported options:
-
- -c, --run-command
-
- Execute an external command (given as the positional arguments) and render
- a spinner and timer while the command is running. The exit status of the
- command is propagated.
-
- --format-table
-
- Read tabular data from standard input (each line is a row and each
- whitespace separated field is a column), format the data as a table and
- print the resulting table to standard output. See also the --delimiter
- option.
-
- -d, --delimiter=VALUE
-
- Change the delimiter used by --format-table to VALUE (a string). By default
- all whitespace is treated as a delimiter.
-
- -l, --format-length=LENGTH
-
- Convert a length count (given as the integer or float LENGTH) into a human
- readable string and print that string to standard output.
-
- -n, --format-number=VALUE
-
- Format a number (given as the integer or floating point number VALUE) with
- thousands separators and two decimal places (if needed) and print the
- formatted number to standard output.
-
- -s, --format-size=BYTES
-
- Convert a byte count (given as the integer BYTES) into a human readable
- string and print that string to standard output.
-
- -b, --binary
-
- Change the output of -s, --format-size to use binary multiples of bytes
- (base-2) instead of the default decimal multiples of bytes (base-10).
-
- -t, --format-timespan=SECONDS
-
- Convert a number of seconds (given as the floating point number SECONDS)
- into a human readable timespan and print that string to standard output.
-
- --parse-length=VALUE
-
- Parse a human readable length (given as the string VALUE) and print the
- number of metres to standard output.
-
- --parse-size=VALUE
-
- Parse a human readable data size (given as the string VALUE) and print the
- number of bytes to standard output.
-
- --demo
-
- Demonstrate changing the style and color of the terminal font using ANSI
- escape sequences.
-
- -h, --help
-
- Show this message and exit.
-"""
-
-# Standard library modules.
-import functools
-import getopt
-import pipes
-import subprocess
-import sys
-
-# Modules included in our package.
-from humanfriendly import (
- Timer,
- format_length,
- format_number,
- format_size,
- format_timespan,
- parse_length,
- parse_size,
-)
-from humanfriendly.tables import format_pretty_table, format_smart_table
-from humanfriendly.terminal import (
- ANSI_COLOR_CODES,
- ANSI_TEXT_STYLES,
- HIGHLIGHT_COLOR,
- ansi_strip,
- ansi_wrap,
- enable_ansi_support,
- find_terminal_size,
- output,
- usage,
- warning,
-)
-from humanfriendly.terminal.spinners import Spinner
-
-# Public identifiers that require documentation.
-__all__ = (
- 'demonstrate_256_colors',
- 'demonstrate_ansi_formatting',
- 'main',
- 'print_formatted_length',
- 'print_formatted_number',
- 'print_formatted_size',
- 'print_formatted_table',
- 'print_formatted_timespan',
- 'print_parsed_length',
- 'print_parsed_size',
- 'run_command',
-)
-
-
-def main():
- """Command line interface for the ``humanfriendly`` program."""
- enable_ansi_support()
- try:
- options, arguments = getopt.getopt(sys.argv[1:], 'cd:l:n:s:bt:h', [
- 'run-command', 'format-table', 'delimiter=', 'format-length=',
- 'format-number=', 'format-size=', 'binary', 'format-timespan=',
- 'parse-length=', 'parse-size=', 'demo', 'help',
- ])
- except Exception as e:
- warning("Error: %s", e)
- sys.exit(1)
- actions = []
- delimiter = None
- should_format_table = False
- binary = any(o in ('-b', '--binary') for o, v in options)
- for option, value in options:
- if option in ('-d', '--delimiter'):
- delimiter = value
- elif option == '--parse-size':
- actions.append(functools.partial(print_parsed_size, value))
- elif option == '--parse-length':
- actions.append(functools.partial(print_parsed_length, value))
- elif option in ('-c', '--run-command'):
- actions.append(functools.partial(run_command, arguments))
- elif option in ('-l', '--format-length'):
- actions.append(functools.partial(print_formatted_length, value))
- elif option in ('-n', '--format-number'):
- actions.append(functools.partial(print_formatted_number, value))
- elif option in ('-s', '--format-size'):
- actions.append(functools.partial(print_formatted_size, value, binary))
- elif option == '--format-table':
- should_format_table = True
- elif option in ('-t', '--format-timespan'):
- actions.append(functools.partial(print_formatted_timespan, value))
- elif option == '--demo':
- actions.append(demonstrate_ansi_formatting)
- elif option in ('-h', '--help'):
- usage(__doc__)
- return
- if should_format_table:
- actions.append(functools.partial(print_formatted_table, delimiter))
- if not actions:
- usage(__doc__)
- return
- for partial in actions:
- partial()
-
-
-def run_command(command_line):
- """Run an external command and show a spinner while the command is running."""
- timer = Timer()
- spinner_label = "Waiting for command: %s" % " ".join(map(pipes.quote, command_line))
- with Spinner(label=spinner_label, timer=timer) as spinner:
- process = subprocess.Popen(command_line)
- while True:
- spinner.step()
- spinner.sleep()
- if process.poll() is not None:
- break
- sys.exit(process.returncode)
-
-
-def print_formatted_length(value):
- """Print a human readable length."""
- if '.' in value:
- output(format_length(float(value)))
- else:
- output(format_length(int(value)))
-
-
-def print_formatted_number(value):
- """Print large numbers in a human readable format."""
- output(format_number(float(value)))
-
-
-def print_formatted_size(value, binary):
- """Print a human readable size."""
- output(format_size(int(value), binary=binary))
-
-
-def print_formatted_table(delimiter):
- """Read tabular data from standard input and print a table."""
- data = []
- for line in sys.stdin:
- line = line.rstrip()
- data.append(line.split(delimiter))
- output(format_pretty_table(data))
-
-
-def print_formatted_timespan(value):
- """Print a human readable timespan."""
- output(format_timespan(float(value)))
-
-
-def print_parsed_length(value):
- """Parse a human readable length and print the number of metres."""
- output(parse_length(value))
-
-
-def print_parsed_size(value):
- """Parse a human readable data size and print the number of bytes."""
- output(parse_size(value))
-
-
-def demonstrate_ansi_formatting():
- """Demonstrate the use of ANSI escape sequences."""
- # First we demonstrate the supported text styles.
- output('%s', ansi_wrap('Text styles:', bold=True))
- styles = ['normal', 'bright']
- styles.extend(ANSI_TEXT_STYLES.keys())
- for style_name in sorted(styles):
- options = dict(color=HIGHLIGHT_COLOR)
- if style_name != 'normal':
- options[style_name] = True
- style_label = style_name.replace('_', ' ').capitalize()
- output(' - %s', ansi_wrap(style_label, **options))
- # Now we demonstrate named foreground and background colors.
- for color_type, color_label in (('color', 'Foreground colors'),
- ('background', 'Background colors')):
- intensities = [
- ('normal', dict()),
- ('bright', dict(bright=True)),
- ]
- if color_type != 'background':
- intensities.insert(0, ('faint', dict(faint=True)))
- output('\n%s' % ansi_wrap('%s:' % color_label, bold=True))
- output(format_smart_table([
- [color_name] + [
- ansi_wrap(
- 'XXXXXX' if color_type != 'background' else (' ' * 6),
- **dict(list(kw.items()) + [(color_type, color_name)])
- ) for label, kw in intensities
- ] for color_name in sorted(ANSI_COLOR_CODES.keys())
- ], column_names=['Color'] + [
- label.capitalize() for label, kw in intensities
- ]))
- # Demonstrate support for 256 colors as well.
- demonstrate_256_colors(0, 7, 'standard colors')
- demonstrate_256_colors(8, 15, 'high-intensity colors')
- demonstrate_256_colors(16, 231, '216 colors')
- demonstrate_256_colors(232, 255, 'gray scale colors')
-
-
-def demonstrate_256_colors(i, j, group=None):
- """Demonstrate 256 color mode support."""
- # Generate the label.
- label = '256 color mode'
- if group:
- label += ' (%s)' % group
- output('\n' + ansi_wrap('%s:' % label, bold=True))
- # Generate a simple rendering of the colors in the requested range and
- # check if it will fit on a single line (given the terminal's width).
- single_line = ''.join(' ' + ansi_wrap(str(n), color=n) for n in range(i, j + 1))
- lines, columns = find_terminal_size()
- if columns >= len(ansi_strip(single_line)):
- output(single_line)
- else:
- # Generate a more complex rendering of the colors that will nicely wrap
- # over multiple lines without using too many lines.
- width = len(str(j)) + 1
- colors_per_line = int(columns / width)
- colors = [ansi_wrap(str(n).rjust(width), color=n) for n in range(i, j + 1)]
- blocks = [colors[n:n + colors_per_line] for n in range(0, len(colors), colors_per_line)]
- output('\n'.join(''.join(b) for b in blocks))
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/compat.py b/contrib/python/humanfriendly/py3/humanfriendly/compat.py
deleted file mode 100644
index 24c9d1833a..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/compat.py
+++ /dev/null
@@ -1,146 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: September 17, 2021
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Compatibility with Python 2 and 3.
-
-This module exposes aliases and functions that make it easier to write Python
-code that is compatible with Python 2 and Python 3.
-
-.. data:: basestring
-
- Alias for :func:`python2:basestring` (in Python 2) or :class:`python3:str`
- (in Python 3). See also :func:`is_string()`.
-
-.. data:: HTMLParser
-
- Alias for :class:`python2:HTMLParser.HTMLParser` (in Python 2) or
- :class:`python3:html.parser.HTMLParser` (in Python 3).
-
-.. data:: interactive_prompt
-
- Alias for :func:`python2:raw_input()` (in Python 2) or
- :func:`python3:input()` (in Python 3).
-
-.. data:: StringIO
-
- Alias for :class:`python2:StringIO.StringIO` (in Python 2) or
- :class:`python3:io.StringIO` (in Python 3).
-
-.. data:: unicode
-
- Alias for :func:`python2:unicode` (in Python 2) or :class:`python3:str` (in
- Python 3). See also :func:`coerce_string()`.
-
-.. data:: monotonic
-
- Alias for :func:`python3:time.monotonic()` (in Python 3.3 and higher) or
- `monotonic.monotonic()` (a `conditional dependency
- <https://pypi.org/project/monotonic/>`_ on older Python versions).
-"""
-
-__all__ = (
- 'HTMLParser',
- 'StringIO',
- 'basestring',
- 'coerce_string',
- 'interactive_prompt',
- 'is_string',
- 'is_unicode',
- 'monotonic',
- 'name2codepoint',
- 'on_macos',
- 'on_windows',
- 'unichr',
- 'unicode',
- 'which',
-)
-
-# Standard library modules.
-import sys
-
-# Differences between Python 2 and 3.
-try:
- # Python 2.
- unicode = unicode
- unichr = unichr
- basestring = basestring
- interactive_prompt = raw_input
- from distutils.spawn import find_executable as which
- from HTMLParser import HTMLParser
- from StringIO import StringIO
- from htmlentitydefs import name2codepoint
-except (ImportError, NameError):
- # Python 3.
- unicode = str
- unichr = chr
- basestring = str
- interactive_prompt = input
- from shutil import which
- from html.parser import HTMLParser
- from io import StringIO
- from html.entities import name2codepoint
-
-try:
- # Python 3.3 and higher.
- from time import monotonic
-except ImportError:
- # A replacement for older Python versions:
- # https://pypi.org/project/monotonic/
- try:
- from monotonic import monotonic
- except (ImportError, RuntimeError):
- # We fall back to the old behavior of using time.time() instead of
- # failing when {time,monotonic}.monotonic() are both missing.
- from time import time as monotonic
-
-
-def coerce_string(value):
- """
- Coerce any value to a Unicode string (:func:`python2:unicode` in Python 2 and :class:`python3:str` in Python 3).
-
- :param value: The value to coerce.
- :returns: The value coerced to a Unicode string.
- """
- return value if is_string(value) else unicode(value)
-
-
-def is_string(value):
- """
- Check if a value is a :func:`python2:basestring` (in Python 2) or :class:`python3:str` (in Python 3) object.
-
- :param value: The value to check.
- :returns: :data:`True` if the value is a string, :data:`False` otherwise.
- """
- return isinstance(value, basestring)
-
-
-def is_unicode(value):
- """
- Check if a value is a :func:`python2:unicode` (in Python 2) or :class:`python2:str` (in Python 3) object.
-
- :param value: The value to check.
- :returns: :data:`True` if the value is a Unicode string, :data:`False` otherwise.
- """
- return isinstance(value, unicode)
-
-
-def on_macos():
- """
- Check if we're running on Apple MacOS.
-
- :returns: :data:`True` if running MacOS, :data:`False` otherwise.
- """
- return sys.platform.startswith('darwin')
-
-
-def on_windows():
- """
- Check if we're running on the Microsoft Windows OS.
-
- :returns: :data:`True` if running Windows, :data:`False` otherwise.
- """
- return sys.platform.startswith('win')
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/decorators.py b/contrib/python/humanfriendly/py3/humanfriendly/decorators.py
deleted file mode 100644
index c90a59ea28..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/decorators.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: March 2, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""Simple function decorators to make Python programming easier."""
-
-# Standard library modules.
-import functools
-
-# Public identifiers that require documentation.
-__all__ = ('RESULTS_ATTRIBUTE', 'cached')
-
-RESULTS_ATTRIBUTE = 'cached_results'
-"""The name of the property used to cache the return values of functions (a string)."""
-
-
-def cached(function):
- """
- Rudimentary caching decorator for functions.
-
- :param function: The function whose return value should be cached.
- :returns: The decorated function.
-
- The given function will only be called once, the first time the wrapper
- function is called. The return value is cached by the wrapper function as
- an attribute of the given function and returned on each subsequent call.
-
- .. note:: Currently no function arguments are supported because only a
- single return value can be cached. Accepting any function
- arguments at all would imply that the cache is parametrized on
- function arguments, which is not currently the case.
- """
- @functools.wraps(function)
- def wrapper():
- try:
- return getattr(wrapper, RESULTS_ATTRIBUTE)
- except AttributeError:
- result = function()
- setattr(wrapper, RESULTS_ATTRIBUTE, result)
- return result
- return wrapper
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/deprecation.py b/contrib/python/humanfriendly/py3/humanfriendly/deprecation.py
deleted file mode 100644
index f2012bbd6f..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/deprecation.py
+++ /dev/null
@@ -1,251 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: March 2, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Support for deprecation warnings when importing names from old locations.
-
-When software evolves, things tend to move around. This is usually detrimental
-to backwards compatibility (in Python this primarily manifests itself as
-:exc:`~exceptions.ImportError` exceptions).
-
-While backwards compatibility is very important, it should not get in the way
-of progress. It would be great to have the agility to move things around
-without breaking backwards compatibility.
-
-This is where the :mod:`humanfriendly.deprecation` module comes in: It enables
-the definition of backwards compatible aliases that emit a deprecation warning
-when they are accessed.
-
-The way it works is that it wraps the original module in an :class:`DeprecationProxy`
-object that defines a :func:`~DeprecationProxy.__getattr__()` special method to
-override attribute access of the module.
-"""
-
-# Standard library modules.
-import collections
-import functools
-import importlib
-import inspect
-import sys
-import types
-import warnings
-
-# Modules included in our package.
-from humanfriendly.text import format
-
-# Registry of known aliases (used by humanfriendly.sphinx).
-REGISTRY = collections.defaultdict(dict)
-
-# Public identifiers that require documentation.
-__all__ = ("DeprecationProxy", "define_aliases", "deprecated_args", "get_aliases", "is_method")
-
-
-def define_aliases(module_name, **aliases):
- """
- Update a module with backwards compatible aliases.
-
- :param module_name: The ``__name__`` of the module (a string).
- :param aliases: Each keyword argument defines an alias. The values
- are expected to be "dotted paths" (strings).
-
- The behavior of this function depends on whether the Sphinx documentation
- generator is active, because the use of :class:`DeprecationProxy` to shadow the
- real module in :data:`sys.modules` has the unintended side effect of
- breaking autodoc support for ``:data:`` members (module variables).
-
- To avoid breaking Sphinx the proxy object is omitted and instead the
- aliased names are injected into the original module namespace, to make sure
- that imports can be satisfied when the documentation is being rendered.
-
- If you run into cyclic dependencies caused by :func:`define_aliases()` when
- running Sphinx, you can try moving the call to :func:`define_aliases()` to
- the bottom of the Python module you're working on.
- """
- module = sys.modules[module_name]
- proxy = DeprecationProxy(module, aliases)
- # Populate the registry of aliases.
- for name, target in aliases.items():
- REGISTRY[module.__name__][name] = target
- # Avoid confusing Sphinx.
- if "sphinx" in sys.modules:
- for name, target in aliases.items():
- setattr(module, name, proxy.resolve(target))
- else:
- # Install a proxy object to raise DeprecationWarning.
- sys.modules[module_name] = proxy
-
-
-def get_aliases(module_name):
- """
- Get the aliases defined by a module.
-
- :param module_name: The ``__name__`` of the module (a string).
- :returns: A dictionary with string keys and values:
-
- 1. Each key gives the name of an alias
- created for backwards compatibility.
-
- 2. Each value gives the dotted path of
- the proper location of the identifier.
-
- An empty dictionary is returned for modules that
- don't define any backwards compatible aliases.
- """
- return REGISTRY.get(module_name, {})
-
-
-def deprecated_args(*names):
- """
- Deprecate positional arguments without dropping backwards compatibility.
-
- :param names:
-
- The positional arguments to :func:`deprecated_args()` give the names of
- the positional arguments that the to-be-decorated function should warn
- about being deprecated and translate to keyword arguments.
-
- :returns: A decorator function specialized to `names`.
-
- The :func:`deprecated_args()` decorator function was created to make it
- easy to switch from positional arguments to keyword arguments [#]_ while
- preserving backwards compatibility [#]_ and informing call sites
- about the change.
-
- .. [#] Increased flexibility is the main reason why I find myself switching
- from positional arguments to (optional) keyword arguments as my code
- evolves to support more use cases.
-
- .. [#] In my experience positional argument order implicitly becomes part
- of API compatibility whether intended or not. While this makes sense
- for functions that over time adopt more and more optional arguments,
- at a certain point it becomes an inconvenience to code maintenance.
-
- Here's an example of how to use the decorator::
-
- @deprecated_args('text')
- def report_choice(**options):
- print(options['text'])
-
- When the decorated function is called with positional arguments
- a deprecation warning is given::
-
- >>> report_choice('this will give a deprecation warning')
- DeprecationWarning: report_choice has deprecated positional arguments, please switch to keyword arguments
- this will give a deprecation warning
-
- But when the function is called with keyword arguments no deprecation
- warning is emitted::
-
- >>> report_choice(text='this will not give a deprecation warning')
- this will not give a deprecation warning
- """
- def decorator(function):
- def translate(args, kw):
- # Raise TypeError when too many positional arguments are passed to the decorated function.
- if len(args) > len(names):
- raise TypeError(
- format(
- "{name} expected at most {limit} arguments, got {count}",
- name=function.__name__,
- limit=len(names),
- count=len(args),
- )
- )
- # Emit a deprecation warning when positional arguments are used.
- if args:
- warnings.warn(
- format(
- "{name} has deprecated positional arguments, please switch to keyword arguments",
- name=function.__name__,
- ),
- category=DeprecationWarning,
- stacklevel=3,
- )
- # Translate positional arguments to keyword arguments.
- for name, value in zip(names, args):
- kw[name] = value
- if is_method(function):
- @functools.wraps(function)
- def wrapper(*args, **kw):
- """Wrapper for instance methods."""
- args = list(args)
- self = args.pop(0)
- translate(args, kw)
- return function(self, **kw)
- else:
- @functools.wraps(function)
- def wrapper(*args, **kw):
- """Wrapper for module level functions."""
- translate(args, kw)
- return function(**kw)
- return wrapper
- return decorator
-
-
-def is_method(function):
- """Check if the expected usage of the given function is as an instance method."""
- try:
- # Python 3.3 and newer.
- signature = inspect.signature(function)
- return "self" in signature.parameters
- except AttributeError:
- # Python 3.2 and older.
- metadata = inspect.getargspec(function)
- return "self" in metadata.args
-
-
-class DeprecationProxy(types.ModuleType):
-
- """Emit deprecation warnings for imports that should be updated."""
-
- def __init__(self, module, aliases):
- """
- Initialize an :class:`DeprecationProxy` object.
-
- :param module: The original module object.
- :param aliases: A dictionary of aliases.
- """
- # Initialize our superclass.
- super(DeprecationProxy, self).__init__(name=module.__name__)
- # Store initializer arguments.
- self.module = module
- self.aliases = aliases
-
- def __getattr__(self, name):
- """
- Override module attribute lookup.
-
- :param name: The name to look up (a string).
- :returns: The attribute value.
- """
- # Check if the given name is an alias.
- target = self.aliases.get(name)
- if target is not None:
- # Emit the deprecation warning.
- warnings.warn(
- format("%s.%s was moved to %s, please update your imports", self.module.__name__, name, target),
- category=DeprecationWarning,
- stacklevel=2,
- )
- # Resolve the dotted path.
- return self.resolve(target)
- # Look up the name in the original module namespace.
- value = getattr(self.module, name, None)
- if value is not None:
- return value
- # Fall back to the default behavior.
- raise AttributeError(format("module '%s' has no attribute '%s'", self.module.__name__, name))
-
- def resolve(self, target):
- """
- Look up the target of an alias.
-
- :param target: The fully qualified dotted path (a string).
- :returns: The value of the given target.
- """
- module_name, _, member = target.rpartition(".")
- module = importlib.import_module(module_name)
- return getattr(module, member)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/prompts.py b/contrib/python/humanfriendly/py3/humanfriendly/prompts.py
deleted file mode 100644
index 07166b6709..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/prompts.py
+++ /dev/null
@@ -1,376 +0,0 @@
-# vim: fileencoding=utf-8
-
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: February 9, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Interactive terminal prompts.
-
-The :mod:`~humanfriendly.prompts` module enables interaction with the user
-(operator) by asking for confirmation (:func:`prompt_for_confirmation()`) and
-asking to choose from a list of options (:func:`prompt_for_choice()`). It works
-by rendering interactive prompts on the terminal.
-"""
-
-# Standard library modules.
-import logging
-import sys
-
-# Modules included in our package.
-from humanfriendly.compat import interactive_prompt
-from humanfriendly.terminal import (
- HIGHLIGHT_COLOR,
- ansi_strip,
- ansi_wrap,
- connected_to_terminal,
- terminal_supports_colors,
- warning,
-)
-from humanfriendly.text import format, concatenate
-
-# Public identifiers that require documentation.
-__all__ = (
- 'MAX_ATTEMPTS',
- 'TooManyInvalidReplies',
- 'logger',
- 'prepare_friendly_prompts',
- 'prepare_prompt_text',
- 'prompt_for_choice',
- 'prompt_for_confirmation',
- 'prompt_for_input',
- 'retry_limit',
-)
-
-MAX_ATTEMPTS = 10
-"""The number of times an interactive prompt is shown on invalid input (an integer)."""
-
-# Initialize a logger for this module.
-logger = logging.getLogger(__name__)
-
-
-def prompt_for_confirmation(question, default=None, padding=True):
- """
- Prompt the user for confirmation.
-
- :param question: The text that explains what the user is confirming (a string).
- :param default: The default value (a boolean) or :data:`None`.
- :param padding: Refer to the documentation of :func:`prompt_for_input()`.
- :returns: - If the user enters 'yes' or 'y' then :data:`True` is returned.
- - If the user enters 'no' or 'n' then :data:`False` is returned.
- - If the user doesn't enter any text or standard input is not
- connected to a terminal (which makes it impossible to prompt
- the user) the value of the keyword argument ``default`` is
- returned (if that value is not :data:`None`).
- :raises: - Any exceptions raised by :func:`retry_limit()`.
- - Any exceptions raised by :func:`prompt_for_input()`.
-
- When `default` is :data:`False` and the user doesn't enter any text an
- error message is printed and the prompt is repeated:
-
- >>> prompt_for_confirmation("Are you sure?")
- <BLANKLINE>
- Are you sure? [y/n]
- <BLANKLINE>
- Error: Please enter 'yes' or 'no' (there's no default choice).
- <BLANKLINE>
- Are you sure? [y/n]
-
- The same thing happens when the user enters text that isn't recognized:
-
- >>> prompt_for_confirmation("Are you sure?")
- <BLANKLINE>
- Are you sure? [y/n] about what?
- <BLANKLINE>
- Error: Please enter 'yes' or 'no' (the text 'about what?' is not recognized).
- <BLANKLINE>
- Are you sure? [y/n]
- """
- # Generate the text for the prompt.
- prompt_text = prepare_prompt_text(question, bold=True)
- # Append the valid replies (and default reply) to the prompt text.
- hint = "[Y/n]" if default else "[y/N]" if default is not None else "[y/n]"
- prompt_text += " %s " % prepare_prompt_text(hint, color=HIGHLIGHT_COLOR)
- # Loop until a valid response is given.
- logger.debug("Requesting interactive confirmation from terminal: %r", ansi_strip(prompt_text).rstrip())
- for attempt in retry_limit():
- reply = prompt_for_input(prompt_text, '', padding=padding, strip=True)
- if reply.lower() in ('y', 'yes'):
- logger.debug("Confirmation granted by reply (%r).", reply)
- return True
- elif reply.lower() in ('n', 'no'):
- logger.debug("Confirmation denied by reply (%r).", reply)
- return False
- elif (not reply) and default is not None:
- logger.debug("Default choice selected by empty reply (%r).",
- "granted" if default else "denied")
- return default
- else:
- details = ("the text '%s' is not recognized" % reply
- if reply else "there's no default choice")
- logger.debug("Got %s reply (%s), retrying (%i/%i) ..",
- "invalid" if reply else "empty", details,
- attempt, MAX_ATTEMPTS)
- warning("{indent}Error: Please enter 'yes' or 'no' ({details}).",
- indent=' ' if padding else '', details=details)
-
-
-def prompt_for_choice(choices, default=None, padding=True):
- """
- Prompt the user to select a choice from a group of options.
-
- :param choices: A sequence of strings with available options.
- :param default: The default choice if the user simply presses Enter
- (expected to be a string, defaults to :data:`None`).
- :param padding: Refer to the documentation of
- :func:`~humanfriendly.prompts.prompt_for_input()`.
- :returns: The string corresponding to the user's choice.
- :raises: - :exc:`~exceptions.ValueError` if `choices` is an empty sequence.
- - Any exceptions raised by
- :func:`~humanfriendly.prompts.retry_limit()`.
- - Any exceptions raised by
- :func:`~humanfriendly.prompts.prompt_for_input()`.
-
- When no options are given an exception is raised:
-
- >>> prompt_for_choice([])
- Traceback (most recent call last):
- File "humanfriendly/prompts.py", line 148, in prompt_for_choice
- raise ValueError("Can't prompt for choice without any options!")
- ValueError: Can't prompt for choice without any options!
-
- If a single option is given the user isn't prompted:
-
- >>> prompt_for_choice(['only one choice'])
- 'only one choice'
-
- Here's what the actual prompt looks like by default:
-
- >>> prompt_for_choice(['first option', 'second option'])
- <BLANKLINE>
- 1. first option
- 2. second option
- <BLANKLINE>
- Enter your choice as a number or unique substring (Control-C aborts): second
- <BLANKLINE>
- 'second option'
-
- If you don't like the whitespace (empty lines and indentation):
-
- >>> prompt_for_choice(['first option', 'second option'], padding=False)
- 1. first option
- 2. second option
- Enter your choice as a number or unique substring (Control-C aborts): first
- 'first option'
- """
- indent = ' ' if padding else ''
- # Make sure we can use 'choices' more than once (i.e. not a generator).
- choices = list(choices)
- if len(choices) == 1:
- # If there's only one option there's no point in prompting the user.
- logger.debug("Skipping interactive prompt because there's only option (%r).", choices[0])
- return choices[0]
- elif not choices:
- # We can't render a choice prompt without any options.
- raise ValueError("Can't prompt for choice without any options!")
- # Generate the prompt text.
- prompt_text = ('\n\n' if padding else '\n').join([
- # Present the available choices in a user friendly way.
- "\n".join([
- (u" %i. %s" % (i, choice)) + (" (default choice)" if choice == default else "")
- for i, choice in enumerate(choices, start=1)
- ]),
- # Instructions for the user.
- "Enter your choice as a number or unique substring (Control-C aborts): ",
- ])
- prompt_text = prepare_prompt_text(prompt_text, bold=True)
- # Loop until a valid choice is made.
- logger.debug("Requesting interactive choice on terminal (options are %s) ..",
- concatenate(map(repr, choices)))
- for attempt in retry_limit():
- reply = prompt_for_input(prompt_text, '', padding=padding, strip=True)
- if not reply and default is not None:
- logger.debug("Default choice selected by empty reply (%r).", default)
- return default
- elif reply.isdigit():
- index = int(reply) - 1
- if 0 <= index < len(choices):
- logger.debug("Option (%r) selected by numeric reply (%s).", choices[index], reply)
- return choices[index]
- # Check for substring matches.
- matches = []
- for choice in choices:
- lower_reply = reply.lower()
- lower_choice = choice.lower()
- if lower_reply == lower_choice:
- # If we have an 'exact' match we return it immediately.
- logger.debug("Option (%r) selected by reply (exact match).", choice)
- return choice
- elif lower_reply in lower_choice and len(lower_reply) > 0:
- # Otherwise we gather substring matches.
- matches.append(choice)
- if len(matches) == 1:
- # If a single choice was matched we return it.
- logger.debug("Option (%r) selected by reply (substring match on %r).", matches[0], reply)
- return matches[0]
- else:
- # Give the user a hint about what went wrong.
- if matches:
- details = format("text '%s' matches more than one choice: %s", reply, concatenate(matches))
- elif reply.isdigit():
- details = format("number %i is not a valid choice", int(reply))
- elif reply and not reply.isspace():
- details = format("text '%s' doesn't match any choices", reply)
- else:
- details = "there's no default choice"
- logger.debug("Got %s reply (%s), retrying (%i/%i) ..",
- "invalid" if reply else "empty", details,
- attempt, MAX_ATTEMPTS)
- warning("%sError: Invalid input (%s).", indent, details)
-
-
-def prompt_for_input(question, default=None, padding=True, strip=True):
- """
- Prompt the user for input (free form text).
-
- :param question: An explanation of what is expected from the user (a string).
- :param default: The return value if the user doesn't enter any text or
- standard input is not connected to a terminal (which
- makes it impossible to prompt the user).
- :param padding: Render empty lines before and after the prompt to make it
- stand out from the surrounding text? (a boolean, defaults
- to :data:`True`)
- :param strip: Strip leading/trailing whitespace from the user's reply?
- :returns: The text entered by the user (a string) or the value of the
- `default` argument.
- :raises: - :exc:`~exceptions.KeyboardInterrupt` when the program is
- interrupted_ while the prompt is active, for example
- because the user presses Control-C_.
- - :exc:`~exceptions.EOFError` when reading from `standard input`_
- fails, for example because the user presses Control-D_ or
- because the standard input stream is redirected (only if
- `default` is :data:`None`).
-
- .. _Control-C: https://en.wikipedia.org/wiki/Control-C#In_command-line_environments
- .. _Control-D: https://en.wikipedia.org/wiki/End-of-transmission_character#Meaning_in_Unix
- .. _interrupted: https://en.wikipedia.org/wiki/Unix_signal#SIGINT
- .. _standard input: https://en.wikipedia.org/wiki/Standard_streams#Standard_input_.28stdin.29
- """
- prepare_friendly_prompts()
- reply = None
- try:
- # Prefix an empty line to the text and indent by one space?
- if padding:
- question = '\n' + question
- question = question.replace('\n', '\n ')
- # Render the prompt and wait for the user's reply.
- try:
- reply = interactive_prompt(question)
- finally:
- if reply is None:
- # If the user terminated the prompt using Control-C or
- # Control-D instead of pressing Enter no newline will be
- # rendered after the prompt's text. The result looks kind of
- # weird:
- #
- # $ python -c 'print(raw_input("Are you sure? "))'
- # Are you sure? ^CTraceback (most recent call last):
- # File "<string>", line 1, in <module>
- # KeyboardInterrupt
- #
- # We can avoid this by emitting a newline ourselves if an
- # exception was raised (signaled by `reply' being None).
- sys.stderr.write('\n')
- if padding:
- # If the caller requested (didn't opt out of) `padding' then we'll
- # emit a newline regardless of whether an exception is being
- # handled. This helps to make interactive prompts `stand out' from
- # a surrounding `wall of text' on the terminal.
- sys.stderr.write('\n')
- except BaseException as e:
- if isinstance(e, EOFError) and default is not None:
- # If standard input isn't connected to an interactive terminal
- # but the caller provided a default we'll return that.
- logger.debug("Got EOF from terminal, returning default value (%r) ..", default)
- return default
- else:
- # Otherwise we log that the prompt was interrupted but propagate
- # the exception to the caller.
- logger.warning("Interactive prompt was interrupted by exception!", exc_info=True)
- raise
- if default is not None and not reply:
- # If the reply is empty and `default' is None we don't want to return
- # None because it's nicer for callers to be able to assume that the
- # return value is always a string.
- return default
- else:
- return reply.strip()
-
-
-def prepare_prompt_text(prompt_text, **options):
- """
- Wrap a text to be rendered as an interactive prompt in ANSI escape sequences.
-
- :param prompt_text: The text to render on the prompt (a string).
- :param options: Any keyword arguments are passed on to :func:`.ansi_wrap()`.
- :returns: The resulting prompt text (a string).
-
- ANSI escape sequences are only used when the standard output stream is
- connected to a terminal. When the standard input stream is connected to a
- terminal any escape sequences are wrapped in "readline hints".
- """
- return (ansi_wrap(prompt_text, readline_hints=connected_to_terminal(sys.stdin), **options)
- if terminal_supports_colors(sys.stdout)
- else prompt_text)
-
-
-def prepare_friendly_prompts():
- u"""
- Make interactive prompts more user friendly.
-
- The prompts presented by :func:`python2:raw_input()` (in Python 2) and
- :func:`python3:input()` (in Python 3) are not very user friendly by
- default, for example the cursor keys (:kbd:`â†`, :kbd:`↑`, :kbd:`→` and
- :kbd:`↓`) and the :kbd:`Home` and :kbd:`End` keys enter characters instead
- of performing the action you would expect them to. By simply importing the
- :mod:`readline` module these prompts become much friendlier (as mentioned
- in the Python standard library documentation).
-
- This function is called by the other functions in this module to enable
- user friendly prompts.
- """
- try:
- import readline # NOQA
- except ImportError:
- # might not be available on Windows if pyreadline isn't installed
- pass
-
-
-def retry_limit(limit=MAX_ATTEMPTS):
- """
- Allow the user to provide valid input up to `limit` times.
-
- :param limit: The maximum number of attempts (a number,
- defaults to :data:`MAX_ATTEMPTS`).
- :returns: A generator of numbers starting from one.
- :raises: :exc:`TooManyInvalidReplies` when an interactive prompt
- receives repeated invalid input (:data:`MAX_ATTEMPTS`).
-
- This function returns a generator for interactive prompts that want to
- repeat on invalid input without getting stuck in infinite loops.
- """
- for i in range(limit):
- yield i + 1
- msg = "Received too many invalid replies on interactive prompt, giving up! (tried %i times)"
- formatted_msg = msg % limit
- # Make sure the event is logged.
- logger.warning(formatted_msg)
- # Force the caller to decide what to do now.
- raise TooManyInvalidReplies(formatted_msg)
-
-
-class TooManyInvalidReplies(Exception):
-
- """Raised by interactive prompts when they've received too many invalid inputs."""
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/sphinx.py b/contrib/python/humanfriendly/py3/humanfriendly/sphinx.py
deleted file mode 100644
index cf5d1b3935..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/sphinx.py
+++ /dev/null
@@ -1,315 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: June 11, 2021
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Customizations for and integration with the Sphinx_ documentation generator.
-
-The :mod:`humanfriendly.sphinx` module uses the `Sphinx extension API`_ to
-customize the process of generating Sphinx based Python documentation. To
-explore the functionality this module offers its best to start reading
-from the :func:`setup()` function.
-
-.. _Sphinx: http://www.sphinx-doc.org/
-.. _Sphinx extension API: http://sphinx-doc.org/extdev/appapi.html
-"""
-
-# Standard library modules.
-import logging
-import types
-
-# External dependencies (if Sphinx is installed docutils will be installed).
-import docutils.nodes
-import docutils.utils
-
-# Modules included in our package.
-from humanfriendly.deprecation import get_aliases
-from humanfriendly.text import compact, dedent, format
-from humanfriendly.usage import USAGE_MARKER, render_usage
-
-# Public identifiers that require documentation.
-__all__ = (
- "deprecation_note_callback",
- "enable_deprecation_notes",
- "enable_man_role",
- "enable_pypi_role",
- "enable_special_methods",
- "enable_usage_formatting",
- "logger",
- "man_role",
- "pypi_role",
- "setup",
- "special_methods_callback",
- "usage_message_callback",
-)
-
-# Initialize a logger for this module.
-logger = logging.getLogger(__name__)
-
-
-def deprecation_note_callback(app, what, name, obj, options, lines):
- """
- Automatically document aliases defined using :func:`~humanfriendly.deprecation.define_aliases()`.
-
- Refer to :func:`enable_deprecation_notes()` to enable the use of this
- function (you probably don't want to call :func:`deprecation_note_callback()`
- directly).
-
- This function implements a callback for ``autodoc-process-docstring`` that
- reformats module docstrings to append an overview of aliases defined by the
- module.
-
- The parameters expected by this function are those defined for Sphinx event
- callback functions (i.e. I'm not going to document them here :-).
- """
- if isinstance(obj, types.ModuleType) and lines:
- aliases = get_aliases(obj.__name__)
- if aliases:
- # Convert the existing docstring to a string and remove leading
- # indentation from that string, otherwise our generated content
- # would have to match the existing indentation in order not to
- # break docstring parsing (because indentation is significant
- # in the reStructuredText format).
- blocks = [dedent("\n".join(lines))]
- # Use an admonition to group the deprecated aliases together and
- # to distinguish them from the autodoc entries that follow.
- blocks.append(".. note:: Deprecated names")
- indent = " " * 3
- if len(aliases) == 1:
- explanation = """
- The following alias exists to preserve backwards compatibility,
- however a :exc:`~exceptions.DeprecationWarning` is triggered
- when it is accessed, because this alias will be removed
- in a future release.
- """
- else:
- explanation = """
- The following aliases exist to preserve backwards compatibility,
- however a :exc:`~exceptions.DeprecationWarning` is triggered
- when they are accessed, because these aliases will be
- removed in a future release.
- """
- blocks.append(indent + compact(explanation))
- for name, target in aliases.items():
- blocks.append(format("%s.. data:: %s", indent, name))
- blocks.append(format("%sAlias for :obj:`%s`.", indent * 2, target))
- update_lines(lines, "\n\n".join(blocks))
-
-
-def enable_deprecation_notes(app):
- """
- Enable documenting backwards compatibility aliases using the autodoc_ extension.
-
- :param app: The Sphinx application object.
-
- This function connects the :func:`deprecation_note_callback()` function to
- ``autodoc-process-docstring`` events.
-
- .. _autodoc: http://www.sphinx-doc.org/en/stable/ext/autodoc.html
- """
- app.connect("autodoc-process-docstring", deprecation_note_callback)
-
-
-def enable_man_role(app):
- """
- Enable the ``:man:`` role for linking to Debian Linux manual pages.
-
- :param app: The Sphinx application object.
-
- This function registers the :func:`man_role()` function to handle the
- ``:man:`` role.
- """
- app.add_role("man", man_role)
-
-
-def enable_pypi_role(app):
- """
- Enable the ``:pypi:`` role for linking to the Python Package Index.
-
- :param app: The Sphinx application object.
-
- This function registers the :func:`pypi_role()` function to handle the
- ``:pypi:`` role.
- """
- app.add_role("pypi", pypi_role)
-
-
-def enable_special_methods(app):
- """
- Enable documenting "special methods" using the autodoc_ extension.
-
- :param app: The Sphinx application object.
-
- This function connects the :func:`special_methods_callback()` function to
- ``autodoc-skip-member`` events.
-
- .. _autodoc: http://www.sphinx-doc.org/en/stable/ext/autodoc.html
- """
- app.connect("autodoc-skip-member", special_methods_callback)
-
-
-def enable_usage_formatting(app):
- """
- Reformat human friendly usage messages to reStructuredText_.
-
- :param app: The Sphinx application object (as given to ``setup()``).
-
- This function connects the :func:`usage_message_callback()` function to
- ``autodoc-process-docstring`` events.
-
- .. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
- """
- app.connect("autodoc-process-docstring", usage_message_callback)
-
-
-def man_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
- """
- Convert a Linux manual topic to a hyperlink.
-
- Using the ``:man:`` role is very simple, here's an example:
-
- .. code-block:: rst
-
- See the :man:`python` documentation.
-
- This results in the following:
-
- See the :man:`python` documentation.
-
- As the example shows you can use the role inline, embedded in sentences of
- text. In the generated documentation the ``:man:`` text is omitted and a
- hyperlink pointing to the Debian Linux manual pages is emitted.
- """
- man_url = "https://manpages.debian.org/%s" % text
- reference = docutils.nodes.reference(rawtext, docutils.utils.unescape(text), refuri=man_url, **options)
- return [reference], []
-
-
-def pypi_role(role, rawtext, text, lineno, inliner, options={}, content=[]):
- """
- Generate hyperlinks to the Python Package Index.
-
- Using the ``:pypi:`` role is very simple, here's an example:
-
- .. code-block:: rst
-
- See the :pypi:`humanfriendly` package.
-
- This results in the following:
-
- See the :pypi:`humanfriendly` package.
-
- As the example shows you can use the role inline, embedded in sentences of
- text. In the generated documentation the ``:pypi:`` text is omitted and a
- hyperlink pointing to the Python Package Index is emitted.
- """
- pypi_url = "https://pypi.org/project/%s/" % text
- reference = docutils.nodes.reference(rawtext, docutils.utils.unescape(text), refuri=pypi_url, **options)
- return [reference], []
-
-
-def setup(app):
- """
- Enable all of the provided Sphinx_ customizations.
-
- :param app: The Sphinx application object.
-
- The :func:`setup()` function makes it easy to enable all of the Sphinx
- customizations provided by the :mod:`humanfriendly.sphinx` module with the
- least amount of code. All you need to do is to add the module name to the
- ``extensions`` variable in your ``conf.py`` file:
-
- .. code-block:: python
-
- # Sphinx extension module names.
- extensions = [
- 'sphinx.ext.autodoc',
- 'sphinx.ext.doctest',
- 'sphinx.ext.intersphinx',
- 'humanfriendly.sphinx',
- ]
-
- When Sphinx sees the :mod:`humanfriendly.sphinx` name it will import the
- module and call its :func:`setup()` function. This function will then call
- the following:
-
- - :func:`enable_deprecation_notes()`
- - :func:`enable_man_role()`
- - :func:`enable_pypi_role()`
- - :func:`enable_special_methods()`
- - :func:`enable_usage_formatting()`
-
- Of course more functionality may be added at a later stage. If you don't
- like that idea you may be better of calling the individual functions from
- your own ``setup()`` function.
- """
- from humanfriendly import __version__
-
- enable_deprecation_notes(app)
- enable_man_role(app)
- enable_pypi_role(app)
- enable_special_methods(app)
- enable_usage_formatting(app)
-
- return dict(parallel_read_safe=True, parallel_write_safe=True, version=__version__)
-
-
-def special_methods_callback(app, what, name, obj, skip, options):
- """
- Enable documenting "special methods" using the autodoc_ extension.
-
- Refer to :func:`enable_special_methods()` to enable the use of this
- function (you probably don't want to call
- :func:`special_methods_callback()` directly).
-
- This function implements a callback for ``autodoc-skip-member`` events to
- include documented "special methods" (method names with two leading and two
- trailing underscores) in your documentation. The result is similar to the
- use of the ``special-members`` flag with one big difference: Special
- methods are included but other types of members are ignored. This means
- that attributes like ``__weakref__`` will always be ignored (this was my
- main annoyance with the ``special-members`` flag).
-
- The parameters expected by this function are those defined for Sphinx event
- callback functions (i.e. I'm not going to document them here :-).
- """
- if getattr(obj, "__doc__", None) and isinstance(obj, (types.FunctionType, types.MethodType)):
- return False
- else:
- return skip
-
-
-def update_lines(lines, text):
- """Private helper for ``autodoc-process-docstring`` callbacks."""
- while lines:
- lines.pop()
- lines.extend(text.splitlines())
-
-
-def usage_message_callback(app, what, name, obj, options, lines):
- """
- Reformat human friendly usage messages to reStructuredText_.
-
- Refer to :func:`enable_usage_formatting()` to enable the use of this
- function (you probably don't want to call :func:`usage_message_callback()`
- directly).
-
- This function implements a callback for ``autodoc-process-docstring`` that
- reformats module docstrings using :func:`.render_usage()` so that Sphinx
- doesn't mangle usage messages that were written to be human readable
- instead of machine readable. Only module docstrings whose first line starts
- with :data:`.USAGE_MARKER` are reformatted.
-
- The parameters expected by this function are those defined for Sphinx event
- callback functions (i.e. I'm not going to document them here :-).
- """
- # Make sure we only modify the docstrings of modules.
- if isinstance(obj, types.ModuleType) and lines:
- # Make sure we only modify docstrings containing a usage message.
- if lines[0].startswith(USAGE_MARKER):
- # Convert the usage message to reStructuredText.
- text = render_usage("\n".join(lines))
- # Fill up the buffer with our modified docstring.
- update_lines(lines, text)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/tables.py b/contrib/python/humanfriendly/py3/humanfriendly/tables.py
deleted file mode 100644
index 3a32ba3b3f..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/tables.py
+++ /dev/null
@@ -1,341 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: February 16, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Functions that render ASCII tables.
-
-Some generic notes about the table formatting functions in this module:
-
-- These functions were not written with performance in mind (*at all*) because
- they're intended to format tabular data to be presented on a terminal. If
- someone were to run into a performance problem using these functions, they'd
- be printing so much tabular data to the terminal that a human wouldn't be
- able to digest the tabular data anyway, so the point is moot :-).
-
-- These functions ignore ANSI escape sequences (at least the ones generated by
- the :mod:`~humanfriendly.terminal` module) in the calculation of columns
- widths. On reason for this is that column names are highlighted in color when
- connected to a terminal. It also means that you can use ANSI escape sequences
- to highlight certain column's values if you feel like it (for example to
- highlight deviations from the norm in an overview of calculated values).
-"""
-
-# Standard library modules.
-import collections
-import re
-
-# Modules included in our package.
-from humanfriendly.compat import coerce_string
-from humanfriendly.terminal import (
- ansi_strip,
- ansi_width,
- ansi_wrap,
- terminal_supports_colors,
- find_terminal_size,
- HIGHLIGHT_COLOR,
-)
-
-# Public identifiers that require documentation.
-__all__ = (
- 'format_pretty_table',
- 'format_robust_table',
- 'format_rst_table',
- 'format_smart_table',
-)
-
-# Compiled regular expression pattern to recognize table columns containing
-# numeric data (integer and/or floating point numbers). Used to right-align the
-# contents of such columns.
-#
-# Pre-emptive snarky comment: This pattern doesn't match every possible
-# floating point number notation!?!1!1
-#
-# Response: I know, that's intentional. The use of this regular expression
-# pattern has a very high DWIM level and weird floating point notations do not
-# fall under the DWIM umbrella :-).
-NUMERIC_DATA_PATTERN = re.compile(r'^\d+(\.\d+)?$')
-
-
-def format_smart_table(data, column_names):
- """
- Render tabular data using the most appropriate representation.
-
- :param data: An iterable (e.g. a :func:`tuple` or :class:`list`)
- containing the rows of the table, where each row is an
- iterable containing the columns of the table (strings).
- :param column_names: An iterable of column names (strings).
- :returns: The rendered table (a string).
-
- If you want an easy way to render tabular data on a terminal in a human
- friendly format then this function is for you! It works as follows:
-
- - If the input data doesn't contain any line breaks the function
- :func:`format_pretty_table()` is used to render a pretty table. If the
- resulting table fits in the terminal without wrapping the rendered pretty
- table is returned.
-
- - If the input data does contain line breaks or if a pretty table would
- wrap (given the width of the terminal) then the function
- :func:`format_robust_table()` is used to render a more robust table that
- can deal with data containing line breaks and long text.
- """
- # Normalize the input in case we fall back from a pretty table to a robust
- # table (in which case we'll definitely iterate the input more than once).
- data = [normalize_columns(r) for r in data]
- column_names = normalize_columns(column_names)
- # Make sure the input data doesn't contain any line breaks (because pretty
- # tables break horribly when a column's text contains a line break :-).
- if not any(any('\n' in c for c in r) for r in data):
- # Render a pretty table.
- pretty_table = format_pretty_table(data, column_names)
- # Check if the pretty table fits in the terminal.
- table_width = max(map(ansi_width, pretty_table.splitlines()))
- num_rows, num_columns = find_terminal_size()
- if table_width <= num_columns:
- # The pretty table fits in the terminal without wrapping!
- return pretty_table
- # Fall back to a robust table when a pretty table won't work.
- return format_robust_table(data, column_names)
-
-
-def format_pretty_table(data, column_names=None, horizontal_bar='-', vertical_bar='|'):
- """
- Render a table using characters like dashes and vertical bars to emulate borders.
-
- :param data: An iterable (e.g. a :func:`tuple` or :class:`list`)
- containing the rows of the table, where each row is an
- iterable containing the columns of the table (strings).
- :param column_names: An iterable of column names (strings).
- :param horizontal_bar: The character used to represent a horizontal bar (a
- string).
- :param vertical_bar: The character used to represent a vertical bar (a
- string).
- :returns: The rendered table (a string).
-
- Here's an example:
-
- >>> from humanfriendly.tables import format_pretty_table
- >>> column_names = ['Version', 'Uploaded on', 'Downloads']
- >>> humanfriendly_releases = [
- ... ['1.23', '2015-05-25', '218'],
- ... ['1.23.1', '2015-05-26', '1354'],
- ... ['1.24', '2015-05-26', '223'],
- ... ['1.25', '2015-05-26', '4319'],
- ... ['1.25.1', '2015-06-02', '197'],
- ... ]
- >>> print(format_pretty_table(humanfriendly_releases, column_names))
- -------------------------------------
- | Version | Uploaded on | Downloads |
- -------------------------------------
- | 1.23 | 2015-05-25 | 218 |
- | 1.23.1 | 2015-05-26 | 1354 |
- | 1.24 | 2015-05-26 | 223 |
- | 1.25 | 2015-05-26 | 4319 |
- | 1.25.1 | 2015-06-02 | 197 |
- -------------------------------------
-
- Notes about the resulting table:
-
- - If a column contains numeric data (integer and/or floating point
- numbers) in all rows (ignoring column names of course) then the content
- of that column is right-aligned, as can be seen in the example above. The
- idea here is to make it easier to compare the numbers in different
- columns to each other.
-
- - The column names are highlighted in color so they stand out a bit more
- (see also :data:`.HIGHLIGHT_COLOR`). The following screen shot shows what
- that looks like (my terminals are always set to white text on a black
- background):
-
- .. image:: images/pretty-table.png
- """
- # Normalize the input because we'll have to iterate it more than once.
- data = [normalize_columns(r, expandtabs=True) for r in data]
- if column_names is not None:
- column_names = normalize_columns(column_names)
- if column_names:
- if terminal_supports_colors():
- column_names = [highlight_column_name(n) for n in column_names]
- data.insert(0, column_names)
- # Calculate the maximum width of each column.
- widths = collections.defaultdict(int)
- numeric_data = collections.defaultdict(list)
- for row_index, row in enumerate(data):
- for column_index, column in enumerate(row):
- widths[column_index] = max(widths[column_index], ansi_width(column))
- if not (column_names and row_index == 0):
- numeric_data[column_index].append(bool(NUMERIC_DATA_PATTERN.match(ansi_strip(column))))
- # Create a horizontal bar of dashes as a delimiter.
- line_delimiter = horizontal_bar * (sum(widths.values()) + len(widths) * 3 + 1)
- # Start the table with a vertical bar.
- lines = [line_delimiter]
- # Format the rows and columns.
- for row_index, row in enumerate(data):
- line = [vertical_bar]
- for column_index, column in enumerate(row):
- padding = ' ' * (widths[column_index] - ansi_width(column))
- if all(numeric_data[column_index]):
- line.append(' ' + padding + column + ' ')
- else:
- line.append(' ' + column + padding + ' ')
- line.append(vertical_bar)
- lines.append(u''.join(line))
- if column_names and row_index == 0:
- lines.append(line_delimiter)
- # End the table with a vertical bar.
- lines.append(line_delimiter)
- # Join the lines, returning a single string.
- return u'\n'.join(lines)
-
-
-def format_robust_table(data, column_names):
- """
- Render tabular data with one column per line (allowing columns with line breaks).
-
- :param data: An iterable (e.g. a :func:`tuple` or :class:`list`)
- containing the rows of the table, where each row is an
- iterable containing the columns of the table (strings).
- :param column_names: An iterable of column names (strings).
- :returns: The rendered table (a string).
-
- Here's an example:
-
- >>> from humanfriendly.tables import format_robust_table
- >>> column_names = ['Version', 'Uploaded on', 'Downloads']
- >>> humanfriendly_releases = [
- ... ['1.23', '2015-05-25', '218'],
- ... ['1.23.1', '2015-05-26', '1354'],
- ... ['1.24', '2015-05-26', '223'],
- ... ['1.25', '2015-05-26', '4319'],
- ... ['1.25.1', '2015-06-02', '197'],
- ... ]
- >>> print(format_robust_table(humanfriendly_releases, column_names))
- -----------------------
- Version: 1.23
- Uploaded on: 2015-05-25
- Downloads: 218
- -----------------------
- Version: 1.23.1
- Uploaded on: 2015-05-26
- Downloads: 1354
- -----------------------
- Version: 1.24
- Uploaded on: 2015-05-26
- Downloads: 223
- -----------------------
- Version: 1.25
- Uploaded on: 2015-05-26
- Downloads: 4319
- -----------------------
- Version: 1.25.1
- Uploaded on: 2015-06-02
- Downloads: 197
- -----------------------
-
- The column names are highlighted in bold font and color so they stand out a
- bit more (see :data:`.HIGHLIGHT_COLOR`).
- """
- blocks = []
- column_names = ["%s:" % n for n in normalize_columns(column_names)]
- if terminal_supports_colors():
- column_names = [highlight_column_name(n) for n in column_names]
- # Convert each row into one or more `name: value' lines (one per column)
- # and group each `row of lines' into a block (i.e. rows become blocks).
- for row in data:
- lines = []
- for column_index, column_text in enumerate(normalize_columns(row)):
- stripped_column = column_text.strip()
- if '\n' not in stripped_column:
- # Columns without line breaks are formatted inline.
- lines.append("%s %s" % (column_names[column_index], stripped_column))
- else:
- # Columns with line breaks could very well contain indented
- # lines, so we'll put the column name on a separate line. This
- # way any indentation remains intact, and it's easier to
- # copy/paste the text.
- lines.append(column_names[column_index])
- lines.extend(column_text.rstrip().splitlines())
- blocks.append(lines)
- # Calculate the width of the row delimiter.
- num_rows, num_columns = find_terminal_size()
- longest_line = max(max(map(ansi_width, lines)) for lines in blocks)
- delimiter = u"\n%s\n" % ('-' * min(longest_line, num_columns))
- # Force a delimiter at the start and end of the table.
- blocks.insert(0, "")
- blocks.append("")
- # Embed the row delimiter between every two blocks.
- return delimiter.join(u"\n".join(b) for b in blocks).strip()
-
-
-def format_rst_table(data, column_names=None):
- """
- Render a table in reStructuredText_ format.
-
- :param data: An iterable (e.g. a :func:`tuple` or :class:`list`)
- containing the rows of the table, where each row is an
- iterable containing the columns of the table (strings).
- :param column_names: An iterable of column names (strings).
- :returns: The rendered table (a string).
-
- Here's an example:
-
- >>> from humanfriendly.tables import format_rst_table
- >>> column_names = ['Version', 'Uploaded on', 'Downloads']
- >>> humanfriendly_releases = [
- ... ['1.23', '2015-05-25', '218'],
- ... ['1.23.1', '2015-05-26', '1354'],
- ... ['1.24', '2015-05-26', '223'],
- ... ['1.25', '2015-05-26', '4319'],
- ... ['1.25.1', '2015-06-02', '197'],
- ... ]
- >>> print(format_rst_table(humanfriendly_releases, column_names))
- ======= =========== =========
- Version Uploaded on Downloads
- ======= =========== =========
- 1.23 2015-05-25 218
- 1.23.1 2015-05-26 1354
- 1.24 2015-05-26 223
- 1.25 2015-05-26 4319
- 1.25.1 2015-06-02 197
- ======= =========== =========
-
- .. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
- """
- data = [normalize_columns(r) for r in data]
- if column_names:
- data.insert(0, normalize_columns(column_names))
- # Calculate the maximum width of each column.
- widths = collections.defaultdict(int)
- for row in data:
- for index, column in enumerate(row):
- widths[index] = max(widths[index], len(column))
- # Pad the columns using whitespace.
- for row in data:
- for index, column in enumerate(row):
- if index < (len(row) - 1):
- row[index] = column.ljust(widths[index])
- # Add table markers.
- delimiter = ['=' * w for i, w in sorted(widths.items())]
- if column_names:
- data.insert(1, delimiter)
- data.insert(0, delimiter)
- data.append(delimiter)
- # Join the lines and columns together.
- return '\n'.join(' '.join(r) for r in data)
-
-
-def normalize_columns(row, expandtabs=False):
- results = []
- for value in row:
- text = coerce_string(value)
- if expandtabs:
- text = text.expandtabs()
- results.append(text)
- return results
-
-
-def highlight_column_name(name):
- return ansi_wrap(name, bold=True, color=HIGHLIGHT_COLOR)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/terminal/__init__.py b/contrib/python/humanfriendly/py3/humanfriendly/terminal/__init__.py
deleted file mode 100644
index ba9739ccb2..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/terminal/__init__.py
+++ /dev/null
@@ -1,776 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: March 1, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Interaction with interactive text terminals.
-
-The :mod:`~humanfriendly.terminal` module makes it easy to interact with
-interactive text terminals and format text for rendering on such terminals. If
-the terms used in the documentation of this module don't make sense to you then
-please refer to the `Wikipedia article on ANSI escape sequences`_ for details
-about how ANSI escape sequences work.
-
-This module was originally developed for use on UNIX systems, but since then
-Windows 10 gained native support for ANSI escape sequences and this module was
-enhanced to recognize and support this. For details please refer to the
-:func:`enable_ansi_support()` function.
-
-.. _Wikipedia article on ANSI escape sequences: http://en.wikipedia.org/wiki/ANSI_escape_code#Sequence_elements
-"""
-
-# Standard library modules.
-import codecs
-import numbers
-import os
-import platform
-import re
-import subprocess
-import sys
-
-# The `fcntl' module is platform specific so importing it may give an error. We
-# hide this implementation detail from callers by handling the import error and
-# setting a flag instead.
-try:
- import fcntl
- import termios
- import struct
- HAVE_IOCTL = True
-except ImportError:
- HAVE_IOCTL = False
-
-# Modules included in our package.
-from humanfriendly.compat import coerce_string, is_unicode, on_windows, which
-from humanfriendly.decorators import cached
-from humanfriendly.deprecation import define_aliases
-from humanfriendly.text import concatenate, format
-from humanfriendly.usage import format_usage
-
-# Public identifiers that require documentation.
-__all__ = (
- 'ANSI_COLOR_CODES',
- 'ANSI_CSI',
- 'ANSI_ERASE_LINE',
- 'ANSI_HIDE_CURSOR',
- 'ANSI_RESET',
- 'ANSI_SGR',
- 'ANSI_SHOW_CURSOR',
- 'ANSI_TEXT_STYLES',
- 'CLEAN_OUTPUT_PATTERN',
- 'DEFAULT_COLUMNS',
- 'DEFAULT_ENCODING',
- 'DEFAULT_LINES',
- 'HIGHLIGHT_COLOR',
- 'ansi_strip',
- 'ansi_style',
- 'ansi_width',
- 'ansi_wrap',
- 'auto_encode',
- 'clean_terminal_output',
- 'connected_to_terminal',
- 'enable_ansi_support',
- 'find_terminal_size',
- 'find_terminal_size_using_ioctl',
- 'find_terminal_size_using_stty',
- 'get_pager_command',
- 'have_windows_native_ansi_support',
- 'message',
- 'output',
- 'readline_strip',
- 'readline_wrap',
- 'show_pager',
- 'terminal_supports_colors',
- 'usage',
- 'warning',
-)
-
-ANSI_CSI = '\x1b['
-"""The ANSI "Control Sequence Introducer" (a string)."""
-
-ANSI_SGR = 'm'
-"""The ANSI "Select Graphic Rendition" sequence (a string)."""
-
-ANSI_ERASE_LINE = '%sK' % ANSI_CSI
-"""The ANSI escape sequence to erase the current line (a string)."""
-
-ANSI_RESET = '%s0%s' % (ANSI_CSI, ANSI_SGR)
-"""The ANSI escape sequence to reset styling (a string)."""
-
-ANSI_HIDE_CURSOR = '%s?25l' % ANSI_CSI
-"""The ANSI escape sequence to hide the text cursor (a string)."""
-
-ANSI_SHOW_CURSOR = '%s?25h' % ANSI_CSI
-"""The ANSI escape sequence to show the text cursor (a string)."""
-
-ANSI_COLOR_CODES = dict(black=0, red=1, green=2, yellow=3, blue=4, magenta=5, cyan=6, white=7)
-"""
-A dictionary with (name, number) pairs of `portable color codes`_. Used by
-:func:`ansi_style()` to generate ANSI escape sequences that change font color.
-
-.. _portable color codes: http://en.wikipedia.org/wiki/ANSI_escape_code#Colors
-"""
-
-ANSI_TEXT_STYLES = dict(bold=1, faint=2, italic=3, underline=4, inverse=7, strike_through=9)
-"""
-A dictionary with (name, number) pairs of text styles (effects). Used by
-:func:`ansi_style()` to generate ANSI escape sequences that change text
-styles. Only widely supported text styles are included here.
-"""
-
-CLEAN_OUTPUT_PATTERN = re.compile(u'(\r|\n|\b|%s)' % re.escape(ANSI_ERASE_LINE))
-"""
-A compiled regular expression used to separate significant characters from other text.
-
-This pattern is used by :func:`clean_terminal_output()` to split terminal
-output into regular text versus backspace, carriage return and line feed
-characters and ANSI 'erase line' escape sequences.
-"""
-
-DEFAULT_LINES = 25
-"""The default number of lines in a terminal (an integer)."""
-
-DEFAULT_COLUMNS = 80
-"""The default number of columns in a terminal (an integer)."""
-
-DEFAULT_ENCODING = 'UTF-8'
-"""The output encoding for Unicode strings."""
-
-HIGHLIGHT_COLOR = os.environ.get('HUMANFRIENDLY_HIGHLIGHT_COLOR', 'green')
-"""
-The color used to highlight important tokens in formatted text (e.g. the usage
-message of the ``humanfriendly`` program). If the environment variable
-``$HUMANFRIENDLY_HIGHLIGHT_COLOR`` is set it determines the value of
-:data:`HIGHLIGHT_COLOR`.
-"""
-
-
-def ansi_strip(text, readline_hints=True):
- """
- Strip ANSI escape sequences from the given string.
-
- :param text: The text from which ANSI escape sequences should be removed (a
- string).
- :param readline_hints: If :data:`True` then :func:`readline_strip()` is
- used to remove `readline hints`_ from the string.
- :returns: The text without ANSI escape sequences (a string).
- """
- pattern = '%s.*?%s' % (re.escape(ANSI_CSI), re.escape(ANSI_SGR))
- text = re.sub(pattern, '', text)
- if readline_hints:
- text = readline_strip(text)
- return text
-
-
-def ansi_style(**kw):
- """
- Generate ANSI escape sequences for the given color and/or style(s).
-
- :param color: The foreground color. Three types of values are supported:
-
- - The name of a color (one of the strings 'black', 'red',
- 'green', 'yellow', 'blue', 'magenta', 'cyan' or 'white').
- - An integer that refers to the 256 color mode palette.
- - A tuple or list with three integers representing an RGB
- (red, green, blue) value.
-
- The value :data:`None` (the default) means no escape
- sequence to switch color will be emitted.
- :param background: The background color (see the description
- of the `color` argument).
- :param bright: Use high intensity colors instead of default colors
- (a boolean, defaults to :data:`False`).
- :param readline_hints: If :data:`True` then :func:`readline_wrap()` is
- applied to the generated ANSI escape sequences (the
- default is :data:`False`).
- :param kw: Any additional keyword arguments are expected to match a key
- in the :data:`ANSI_TEXT_STYLES` dictionary. If the argument's
- value evaluates to :data:`True` the respective style will be
- enabled.
- :returns: The ANSI escape sequences to enable the requested text styles or
- an empty string if no styles were requested.
- :raises: :exc:`~exceptions.ValueError` when an invalid color name is given.
-
- Even though only eight named colors are supported, the use of `bright=True`
- and `faint=True` increases the number of available colors to around 24 (it
- may be slightly lower, for example because faint black is just black).
-
- **Support for 8-bit colors**
-
- In `release 4.7`_ support for 256 color mode was added. While this
- significantly increases the available colors it's not very human friendly
- in usage because you need to look up color codes in the `256 color mode
- palette <https://en.wikipedia.org/wiki/ANSI_escape_code#8-bit>`_.
-
- You can use the ``humanfriendly --demo`` command to get a demonstration of
- the available colors, see also the screen shot below. Note that the small
- font size in the screen shot was so that the demonstration of 256 color
- mode support would fit into a single screen shot without scrolling :-)
- (I wasn't feeling very creative).
-
- .. image:: images/ansi-demo.png
-
- **Support for 24-bit colors**
-
- In `release 4.14`_ support for 24-bit colors was added by accepting a tuple
- or list with three integers representing the RGB (red, green, blue) value
- of a color. This is not included in the demo because rendering millions of
- colors was deemed unpractical ;-).
-
- .. _release 4.7: http://humanfriendly.readthedocs.io/en/latest/changelog.html#release-4-7-2018-01-14
- .. _release 4.14: http://humanfriendly.readthedocs.io/en/latest/changelog.html#release-4-14-2018-07-13
- """
- # Start with sequences that change text styles.
- sequences = [ANSI_TEXT_STYLES[k] for k, v in kw.items() if k in ANSI_TEXT_STYLES and v]
- # Append the color code (if any).
- for color_type in 'color', 'background':
- color_value = kw.get(color_type)
- if isinstance(color_value, (tuple, list)):
- if len(color_value) != 3:
- msg = "Invalid color value %r! (expected tuple or list with three numbers)"
- raise ValueError(msg % color_value)
- sequences.append(48 if color_type == 'background' else 38)
- sequences.append(2)
- sequences.extend(map(int, color_value))
- elif isinstance(color_value, numbers.Number):
- # Numeric values are assumed to be 256 color codes.
- sequences.extend((
- 39 if color_type == 'background' else 38,
- 5, int(color_value)
- ))
- elif color_value:
- # Other values are assumed to be strings containing one of the known color names.
- if color_value not in ANSI_COLOR_CODES:
- msg = "Invalid color value %r! (expected an integer or one of the strings %s)"
- raise ValueError(msg % (color_value, concatenate(map(repr, sorted(ANSI_COLOR_CODES)))))
- # Pick the right offset for foreground versus background
- # colors and regular intensity versus bright colors.
- offset = (
- (100 if kw.get('bright') else 40)
- if color_type == 'background'
- else (90 if kw.get('bright') else 30)
- )
- # Combine the offset and color code into a single integer.
- sequences.append(offset + ANSI_COLOR_CODES[color_value])
- if sequences:
- encoded = ANSI_CSI + ';'.join(map(str, sequences)) + ANSI_SGR
- return readline_wrap(encoded) if kw.get('readline_hints') else encoded
- else:
- return ''
-
-
-def ansi_width(text):
- """
- Calculate the effective width of the given text (ignoring ANSI escape sequences).
-
- :param text: The text whose width should be calculated (a string).
- :returns: The width of the text without ANSI escape sequences (an
- integer).
-
- This function uses :func:`ansi_strip()` to strip ANSI escape sequences from
- the given string and returns the length of the resulting string.
- """
- return len(ansi_strip(text))
-
-
-def ansi_wrap(text, **kw):
- """
- Wrap text in ANSI escape sequences for the given color and/or style(s).
-
- :param text: The text to wrap (a string).
- :param kw: Any keyword arguments are passed to :func:`ansi_style()`.
- :returns: The result of this function depends on the keyword arguments:
-
- - If :func:`ansi_style()` generates an ANSI escape sequence based
- on the keyword arguments, the given text is prefixed with the
- generated ANSI escape sequence and suffixed with
- :data:`ANSI_RESET`.
-
- - If :func:`ansi_style()` returns an empty string then the text
- given by the caller is returned unchanged.
- """
- start_sequence = ansi_style(**kw)
- if start_sequence:
- end_sequence = ANSI_RESET
- if kw.get('readline_hints'):
- end_sequence = readline_wrap(end_sequence)
- return start_sequence + text + end_sequence
- else:
- return text
-
-
-def auto_encode(stream, text, *args, **kw):
- """
- Reliably write Unicode strings to the terminal.
-
- :param stream: The file-like object to write to (a value like
- :data:`sys.stdout` or :data:`sys.stderr`).
- :param text: The text to write to the stream (a string).
- :param args: Refer to :func:`~humanfriendly.text.format()`.
- :param kw: Refer to :func:`~humanfriendly.text.format()`.
-
- Renders the text using :func:`~humanfriendly.text.format()` and writes it
- to the given stream. If an :exc:`~exceptions.UnicodeEncodeError` is
- encountered in doing so, the text is encoded using :data:`DEFAULT_ENCODING`
- and the write is retried. The reasoning behind this rather blunt approach
- is that it's preferable to get output on the command line in the wrong
- encoding then to have the Python program blow up with a
- :exc:`~exceptions.UnicodeEncodeError` exception.
- """
- text = format(text, *args, **kw)
- try:
- stream.write(text)
- except UnicodeEncodeError:
- stream.write(codecs.encode(text, DEFAULT_ENCODING))
-
-
-def clean_terminal_output(text):
- """
- Clean up the terminal output of a command.
-
- :param text: The raw text with special characters (a Unicode string).
- :returns: A list of Unicode strings (one for each line).
-
- This function emulates the effect of backspace (0x08), carriage return
- (0x0D) and line feed (0x0A) characters and the ANSI 'erase line' escape
- sequence on interactive terminals. It's intended to clean up command output
- that was originally meant to be rendered on an interactive terminal and
- that has been captured using e.g. the :man:`script` program [#]_ or the
- :mod:`pty` module [#]_.
-
- .. [#] My coloredlogs_ package supports the ``coloredlogs --to-html``
- command which uses :man:`script` to fool a subprocess into thinking
- that it's connected to an interactive terminal (in order to get it
- to emit ANSI escape sequences).
-
- .. [#] My capturer_ package uses the :mod:`pty` module to fool the current
- process and subprocesses into thinking they are connected to an
- interactive terminal (in order to get them to emit ANSI escape
- sequences).
-
- **Some caveats about the use of this function:**
-
- - Strictly speaking the effect of carriage returns cannot be emulated
- outside of an actual terminal due to the interaction between overlapping
- output, terminal widths and line wrapping. The goal of this function is
- to sanitize noise in terminal output while preserving useful output.
- Think of it as a useful and pragmatic but possibly lossy conversion.
-
- - The algorithm isn't smart enough to properly handle a pair of ANSI escape
- sequences that open before a carriage return and close after the last
- carriage return in a linefeed delimited string; the resulting string will
- contain only the closing end of the ANSI escape sequence pair. Tracking
- this kind of complexity requires a state machine and proper parsing.
-
- .. _capturer: https://pypi.org/project/capturer
- .. _coloredlogs: https://pypi.org/project/coloredlogs
- """
- cleaned_lines = []
- current_line = ''
- current_position = 0
- for token in CLEAN_OUTPUT_PATTERN.split(text):
- if token == '\r':
- # Seek back to the start of the current line.
- current_position = 0
- elif token == '\b':
- # Seek back one character in the current line.
- current_position = max(0, current_position - 1)
- else:
- if token == '\n':
- # Capture the current line.
- cleaned_lines.append(current_line)
- if token in ('\n', ANSI_ERASE_LINE):
- # Clear the current line.
- current_line = ''
- current_position = 0
- elif token:
- # Merge regular output into the current line.
- new_position = current_position + len(token)
- prefix = current_line[:current_position]
- suffix = current_line[new_position:]
- current_line = prefix + token + suffix
- current_position = new_position
- # Capture the last line (if any).
- cleaned_lines.append(current_line)
- # Remove any empty trailing lines.
- while cleaned_lines and not cleaned_lines[-1]:
- cleaned_lines.pop(-1)
- return cleaned_lines
-
-
-def connected_to_terminal(stream=None):
- """
- Check if a stream is connected to a terminal.
-
- :param stream: The stream to check (a file-like object,
- defaults to :data:`sys.stdout`).
- :returns: :data:`True` if the stream is connected to a terminal,
- :data:`False` otherwise.
-
- See also :func:`terminal_supports_colors()`.
- """
- stream = sys.stdout if stream is None else stream
- try:
- return stream.isatty()
- except Exception:
- return False
-
-
-@cached
-def enable_ansi_support():
- """
- Try to enable support for ANSI escape sequences (required on Windows).
-
- :returns: :data:`True` if ANSI is supported, :data:`False` otherwise.
-
- This functions checks for the following supported configurations, in the
- given order:
-
- 1. On Windows, if :func:`have_windows_native_ansi_support()` confirms
- native support for ANSI escape sequences :mod:`ctypes` will be used to
- enable this support.
-
- 2. On Windows, if the environment variable ``$ANSICON`` is set nothing is
- done because it is assumed that support for ANSI escape sequences has
- already been enabled via `ansicon <https://github.com/adoxa/ansicon>`_.
-
- 3. On Windows, an attempt is made to import and initialize the Python
- package :pypi:`colorama` instead (of course for this to work
- :pypi:`colorama` has to be installed).
-
- 4. On other platforms this function calls :func:`connected_to_terminal()`
- to determine whether ANSI escape sequences are supported (that is to
- say all platforms that are not Windows are assumed to support ANSI
- escape sequences natively, without weird contortions like above).
-
- This makes it possible to call :func:`enable_ansi_support()`
- unconditionally without checking the current platform.
-
- The :func:`~humanfriendly.decorators.cached` decorator is used to ensure
- that this function is only executed once, but its return value remains
- available on later calls.
- """
- if have_windows_native_ansi_support():
- import ctypes
- ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-11), 7)
- ctypes.windll.kernel32.SetConsoleMode(ctypes.windll.kernel32.GetStdHandle(-12), 7)
- return True
- elif on_windows():
- if 'ANSICON' in os.environ:
- return True
- try:
- import colorama
- colorama.init()
- return True
- except ImportError:
- return False
- else:
- return connected_to_terminal()
-
-
-def find_terminal_size():
- """
- Determine the number of lines and columns visible in the terminal.
-
- :returns: A tuple of two integers with the line and column count.
-
- The result of this function is based on the first of the following three
- methods that works:
-
- 1. First :func:`find_terminal_size_using_ioctl()` is tried,
- 2. then :func:`find_terminal_size_using_stty()` is tried,
- 3. finally :data:`DEFAULT_LINES` and :data:`DEFAULT_COLUMNS` are returned.
-
- .. note:: The :func:`find_terminal_size()` function performs the steps
- above every time it is called, the result is not cached. This is
- because the size of a virtual terminal can change at any time and
- the result of :func:`find_terminal_size()` should be correct.
-
- `Pre-emptive snarky comment`_: It's possible to cache the result
- of this function and use :mod:`signal.SIGWINCH <signal>` to
- refresh the cached values!
-
- Response: As a library I don't consider it the role of the
- :mod:`humanfriendly.terminal` module to install a process wide
- signal handler ...
-
- .. _Pre-emptive snarky comment: http://blogs.msdn.com/b/oldnewthing/archive/2008/01/30/7315957.aspx
- """
- # The first method. Any of the standard streams may have been redirected
- # somewhere and there's no telling which, so we'll just try them all.
- for stream in sys.stdin, sys.stdout, sys.stderr:
- try:
- result = find_terminal_size_using_ioctl(stream)
- if min(result) >= 1:
- return result
- except Exception:
- pass
- # The second method.
- try:
- result = find_terminal_size_using_stty()
- if min(result) >= 1:
- return result
- except Exception:
- pass
- # Fall back to conservative defaults.
- return DEFAULT_LINES, DEFAULT_COLUMNS
-
-
-def find_terminal_size_using_ioctl(stream):
- """
- Find the terminal size using :func:`fcntl.ioctl()`.
-
- :param stream: A stream connected to the terminal (a file object with a
- ``fileno`` attribute).
- :returns: A tuple of two integers with the line and column count.
- :raises: This function can raise exceptions but I'm not going to document
- them here, you should be using :func:`find_terminal_size()`.
-
- Based on an `implementation found on StackOverflow <http://stackoverflow.com/a/3010495/788200>`_.
- """
- if not HAVE_IOCTL:
- raise NotImplementedError("It looks like the `fcntl' module is not available!")
- h, w, hp, wp = struct.unpack('HHHH', fcntl.ioctl(stream, termios.TIOCGWINSZ, struct.pack('HHHH', 0, 0, 0, 0)))
- return h, w
-
-
-def find_terminal_size_using_stty():
- """
- Find the terminal size using the external command ``stty size``.
-
- :param stream: A stream connected to the terminal (a file object).
- :returns: A tuple of two integers with the line and column count.
- :raises: This function can raise exceptions but I'm not going to document
- them here, you should be using :func:`find_terminal_size()`.
- """
- stty = subprocess.Popen(['stty', 'size'],
- stdout=subprocess.PIPE,
- stderr=subprocess.PIPE)
- stdout, stderr = stty.communicate()
- tokens = stdout.split()
- if len(tokens) != 2:
- raise Exception("Invalid output from `stty size'!")
- return tuple(map(int, tokens))
-
-
-def get_pager_command(text=None):
- """
- Get the command to show a text on the terminal using a pager.
-
- :param text: The text to print to the terminal (a string).
- :returns: A list of strings with the pager command and arguments.
-
- The use of a pager helps to avoid the wall of text effect where the user
- has to scroll up to see where the output began (not very user friendly).
-
- If the given text contains ANSI escape sequences the command ``less
- --RAW-CONTROL-CHARS`` is used, otherwise the environment variable
- ``$PAGER`` is used (if ``$PAGER`` isn't set :man:`less` is used).
-
- When the selected pager is :man:`less`, the following options are used to
- make the experience more user friendly:
-
- - ``--quit-if-one-screen`` causes :man:`less` to automatically exit if the
- entire text can be displayed on the first screen. This makes the use of a
- pager transparent for smaller texts (because the operator doesn't have to
- quit the pager).
-
- - ``--no-init`` prevents :man:`less` from clearing the screen when it
- exits. This ensures that the operator gets a chance to review the text
- (for example a usage message) after quitting the pager, while composing
- the next command.
- """
- # Compose the pager command.
- if text and ANSI_CSI in text:
- command_line = ['less', '--RAW-CONTROL-CHARS']
- else:
- command_line = [os.environ.get('PAGER', 'less')]
- # Pass some additional options to `less' (to make it more
- # user friendly) without breaking support for other pagers.
- if os.path.basename(command_line[0]) == 'less':
- command_line.append('--no-init')
- command_line.append('--quit-if-one-screen')
- return command_line
-
-
-@cached
-def have_windows_native_ansi_support():
- """
- Check if we're running on a Windows 10 release with native support for ANSI escape sequences.
-
- :returns: :data:`True` if so, :data:`False` otherwise.
-
- The :func:`~humanfriendly.decorators.cached` decorator is used as a minor
- performance optimization. Semantically this should have zero impact because
- the answer doesn't change in the lifetime of a computer process.
- """
- if on_windows():
- try:
- # I can't be 100% sure this will never break and I'm not in a
- # position to test it thoroughly either, so I decided that paying
- # the price of one additional try / except statement is worth the
- # additional peace of mind :-).
- components = tuple(int(c) for c in platform.version().split('.'))
- return components >= (10, 0, 14393)
- except Exception:
- pass
- return False
-
-
-def message(text, *args, **kw):
- """
- Print a formatted message to the standard error stream.
-
- For details about argument handling please refer to
- :func:`~humanfriendly.text.format()`.
-
- Renders the message using :func:`~humanfriendly.text.format()` and writes
- the resulting string (followed by a newline) to :data:`sys.stderr` using
- :func:`auto_encode()`.
- """
- auto_encode(sys.stderr, coerce_string(text) + '\n', *args, **kw)
-
-
-def output(text, *args, **kw):
- """
- Print a formatted message to the standard output stream.
-
- For details about argument handling please refer to
- :func:`~humanfriendly.text.format()`.
-
- Renders the message using :func:`~humanfriendly.text.format()` and writes
- the resulting string (followed by a newline) to :data:`sys.stdout` using
- :func:`auto_encode()`.
- """
- auto_encode(sys.stdout, coerce_string(text) + '\n', *args, **kw)
-
-
-def readline_strip(expr):
- """
- Remove `readline hints`_ from a string.
-
- :param text: The text to strip (a string).
- :returns: The stripped text.
- """
- return expr.replace('\001', '').replace('\002', '')
-
-
-def readline_wrap(expr):
- """
- Wrap an ANSI escape sequence in `readline hints`_.
-
- :param text: The text with the escape sequence to wrap (a string).
- :returns: The wrapped text.
-
- .. _readline hints: http://superuser.com/a/301355
- """
- return '\001' + expr + '\002'
-
-
-def show_pager(formatted_text, encoding=DEFAULT_ENCODING):
- """
- Print a large text to the terminal using a pager.
-
- :param formatted_text: The text to print to the terminal (a string).
- :param encoding: The name of the text encoding used to encode the formatted
- text if the formatted text is a Unicode string (a string,
- defaults to :data:`DEFAULT_ENCODING`).
-
- When :func:`connected_to_terminal()` returns :data:`True` a pager is used
- to show the text on the terminal, otherwise the text is printed directly
- without invoking a pager.
-
- The use of a pager helps to avoid the wall of text effect where the user
- has to scroll up to see where the output began (not very user friendly).
-
- Refer to :func:`get_pager_command()` for details about the command line
- that's used to invoke the pager.
- """
- if connected_to_terminal():
- # Make sure the selected pager command is available.
- command_line = get_pager_command(formatted_text)
- if which(command_line[0]):
- pager = subprocess.Popen(command_line, stdin=subprocess.PIPE)
- if is_unicode(formatted_text):
- formatted_text = formatted_text.encode(encoding)
- pager.communicate(input=formatted_text)
- return
- output(formatted_text)
-
-
-def terminal_supports_colors(stream=None):
- """
- Check if a stream is connected to a terminal that supports ANSI escape sequences.
-
- :param stream: The stream to check (a file-like object,
- defaults to :data:`sys.stdout`).
- :returns: :data:`True` if the terminal supports ANSI escape sequences,
- :data:`False` otherwise.
-
- This function was originally inspired by the implementation of
- `django.core.management.color.supports_color()
- <https://github.com/django/django/blob/master/django/core/management/color.py>`_
- but has since evolved significantly.
- """
- if on_windows():
- # On Windows support for ANSI escape sequences is not a given.
- have_ansicon = 'ANSICON' in os.environ
- have_colorama = 'colorama' in sys.modules
- have_native_support = have_windows_native_ansi_support()
- if not (have_ansicon or have_colorama or have_native_support):
- return False
- return connected_to_terminal(stream)
-
-
-def usage(usage_text):
- """
- Print a human friendly usage message to the terminal.
-
- :param text: The usage message to print (a string).
-
- This function does two things:
-
- 1. If :data:`sys.stdout` is connected to a terminal (see
- :func:`connected_to_terminal()`) then the usage message is formatted
- using :func:`.format_usage()`.
- 2. The usage message is shown using a pager (see :func:`show_pager()`).
- """
- if terminal_supports_colors(sys.stdout):
- usage_text = format_usage(usage_text)
- show_pager(usage_text)
-
-
-def warning(text, *args, **kw):
- """
- Show a warning message on the terminal.
-
- For details about argument handling please refer to
- :func:`~humanfriendly.text.format()`.
-
- Renders the message using :func:`~humanfriendly.text.format()` and writes
- the resulting string (followed by a newline) to :data:`sys.stderr` using
- :func:`auto_encode()`.
-
- If :data:`sys.stderr` is connected to a terminal that supports colors,
- :func:`ansi_wrap()` is used to color the message in a red font (to make
- the warning stand out from surrounding text).
- """
- text = coerce_string(text)
- if terminal_supports_colors(sys.stderr):
- text = ansi_wrap(text, color='red')
- auto_encode(sys.stderr, text + '\n', *args, **kw)
-
-
-# Define aliases for backwards compatibility.
-define_aliases(
- module_name=__name__,
- # In humanfriendly 1.31 the find_meta_variables() and format_usage()
- # functions were extracted to the new module humanfriendly.usage.
- find_meta_variables='humanfriendly.usage.find_meta_variables',
- format_usage='humanfriendly.usage.format_usage',
- # In humanfriendly 8.0 the html_to_ansi() function and HTMLConverter
- # class were extracted to the new module humanfriendly.terminal.html.
- html_to_ansi='humanfriendly.terminal.html.html_to_ansi',
- HTMLConverter='humanfriendly.terminal.html.HTMLConverter',
-)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/terminal/html.py b/contrib/python/humanfriendly/py3/humanfriendly/terminal/html.py
deleted file mode 100644
index 4214e09e70..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/terminal/html.py
+++ /dev/null
@@ -1,423 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: February 29, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""Convert HTML with simple text formatting to text with ANSI escape sequences."""
-
-# Standard library modules.
-import re
-
-# Modules included in our package.
-from humanfriendly.compat import HTMLParser, StringIO, name2codepoint, unichr
-from humanfriendly.text import compact_empty_lines
-from humanfriendly.terminal import ANSI_COLOR_CODES, ANSI_RESET, ansi_style
-
-# Public identifiers that require documentation.
-__all__ = ('HTMLConverter', 'html_to_ansi')
-
-
-def html_to_ansi(data, callback=None):
- """
- Convert HTML with simple text formatting to text with ANSI escape sequences.
-
- :param data: The HTML to convert (a string).
- :param callback: Optional callback to pass to :class:`HTMLConverter`.
- :returns: Text with ANSI escape sequences (a string).
-
- Please refer to the documentation of the :class:`HTMLConverter` class for
- details about the conversion process (like which tags are supported) and an
- example with a screenshot.
- """
- converter = HTMLConverter(callback=callback)
- return converter(data)
-
-
-class HTMLConverter(HTMLParser):
-
- """
- Convert HTML with simple text formatting to text with ANSI escape sequences.
-
- The following text styles are supported:
-
- - Bold: ``<b>``, ``<strong>`` and ``<span style="font-weight: bold;">``
- - Italic: ``<i>``, ``<em>`` and ``<span style="font-style: italic;">``
- - Strike-through: ``<del>``, ``<s>`` and ``<span style="text-decoration: line-through;">``
- - Underline: ``<ins>``, ``<u>`` and ``<span style="text-decoration: underline">``
-
- Colors can be specified as follows:
-
- - Foreground color: ``<span style="color: #RRGGBB;">``
- - Background color: ``<span style="background-color: #RRGGBB;">``
-
- Here's a small demonstration:
-
- .. code-block:: python
-
- from humanfriendly.text import dedent
- from humanfriendly.terminal import html_to_ansi
-
- print(html_to_ansi(dedent('''
- <b>Hello world!</b>
- <i>Is this thing on?</i>
- I guess I can <u>underline</u> or <s>strike-through</s> text?
- And what about <span style="color: red">color</span>?
- ''')))
-
- rainbow_colors = [
- '#FF0000', '#E2571E', '#FF7F00', '#FFFF00', '#00FF00',
- '#96BF33', '#0000FF', '#4B0082', '#8B00FF', '#FFFFFF',
- ]
- html_rainbow = "".join('<span style="color: %s">o</span>' % c for c in rainbow_colors)
- print(html_to_ansi("Let's try a rainbow: %s" % html_rainbow))
-
- Here's what the results look like:
-
- .. image:: images/html-to-ansi.png
-
- Some more details:
-
- - Nested tags are supported, within reasonable limits.
-
- - Text in ``<code>`` and ``<pre>`` tags will be highlighted in a
- different color from the main text (currently this is yellow).
-
- - ``<a href="URL">TEXT</a>`` is converted to the format "TEXT (URL)" where
- the uppercase symbols are highlighted in light blue with an underline.
-
- - ``<div>``, ``<p>`` and ``<pre>`` tags are considered block level tags
- and are wrapped in vertical whitespace to prevent their content from
- "running into" surrounding text. This may cause runs of multiple empty
- lines to be emitted. As a *workaround* the :func:`__call__()` method
- will automatically call :func:`.compact_empty_lines()` on the generated
- output before returning it to the caller. Of course this won't work
- when `output` is set to something like :data:`sys.stdout`.
-
- - ``<br>`` is converted to a single plain text line break.
-
- Implementation notes:
-
- - A list of dictionaries with style information is used as a stack where
- new styling can be pushed and a pop will restore the previous styling.
- When new styling is pushed, it is merged with (but overrides) the current
- styling.
-
- - If you're going to be converting a lot of HTML it might be useful from
- a performance standpoint to re-use an existing :class:`HTMLConverter`
- object for unrelated HTML fragments, in this case take a look at the
- :func:`__call__()` method (it makes this use case very easy).
-
- .. versionadded:: 4.15
- :class:`humanfriendly.terminal.HTMLConverter` was added to the
- `humanfriendly` package during the initial development of my new
- `chat-archive <https://chat-archive.readthedocs.io/>`_ project, whose
- command line interface makes for a great demonstration of the
- flexibility that this feature provides (hint: check out how the search
- keyword highlighting combines with the regular highlighting).
- """
-
- BLOCK_TAGS = ('div', 'p', 'pre')
- """The names of tags that are padded with vertical whitespace."""
-
- def __init__(self, *args, **kw):
- """
- Initialize an :class:`HTMLConverter` object.
-
- :param callback: Optional keyword argument to specify a function that
- will be called to process text fragments before they
- are emitted on the output stream. Note that link text
- and preformatted text fragments are not processed by
- this callback.
- :param output: Optional keyword argument to redirect the output to the
- given file-like object. If this is not given a new
- :class:`~python3:io.StringIO` object is created.
- """
- # Hide our optional keyword arguments from the superclass.
- self.callback = kw.pop("callback", None)
- self.output = kw.pop("output", None)
- # Initialize the superclass.
- HTMLParser.__init__(self, *args, **kw)
-
- def __call__(self, data):
- """
- Reset the parser, convert some HTML and get the text with ANSI escape sequences.
-
- :param data: The HTML to convert to text (a string).
- :returns: The converted text (only in case `output` is
- a :class:`~python3:io.StringIO` object).
- """
- self.reset()
- self.feed(data)
- self.close()
- if isinstance(self.output, StringIO):
- return compact_empty_lines(self.output.getvalue())
-
- @property
- def current_style(self):
- """Get the current style from the top of the stack (a dictionary)."""
- return self.stack[-1] if self.stack else {}
-
- def close(self):
- """
- Close previously opened ANSI escape sequences.
-
- This method overrides the same method in the superclass to ensure that
- an :data:`.ANSI_RESET` code is emitted when parsing reaches the end of
- the input but a style is still active. This is intended to prevent
- malformed HTML from messing up terminal output.
- """
- if any(self.stack):
- self.output.write(ANSI_RESET)
- self.stack = []
- HTMLParser.close(self)
-
- def emit_style(self, style=None):
- """
- Emit an ANSI escape sequence for the given or current style to the output stream.
-
- :param style: A dictionary with arguments for :func:`.ansi_style()` or
- :data:`None`, in which case the style at the top of the
- stack is emitted.
- """
- # Clear the current text styles.
- self.output.write(ANSI_RESET)
- # Apply a new text style?
- style = self.current_style if style is None else style
- if style:
- self.output.write(ansi_style(**style))
-
- def handle_charref(self, value):
- """
- Process a decimal or hexadecimal numeric character reference.
-
- :param value: The decimal or hexadecimal value (a string).
- """
- self.output.write(unichr(int(value[1:], 16) if value.startswith('x') else int(value)))
-
- def handle_data(self, data):
- """
- Process textual data.
-
- :param data: The decoded text (a string).
- """
- if self.link_url:
- # Link text is captured literally so that we can reliably check
- # whether the text and the URL of the link are the same string.
- self.link_text = data
- elif self.callback and self.preformatted_text_level == 0:
- # Text that is not part of a link and not preformatted text is
- # passed to the user defined callback to allow for arbitrary
- # pre-processing.
- data = self.callback(data)
- # All text is emitted unmodified on the output stream.
- self.output.write(data)
-
- def handle_endtag(self, tag):
- """
- Process the end of an HTML tag.
-
- :param tag: The name of the tag (a string).
- """
- if tag in ('a', 'b', 'code', 'del', 'em', 'i', 'ins', 'pre', 's', 'strong', 'span', 'u'):
- old_style = self.current_style
- # The following conditional isn't necessary for well formed
- # HTML but prevents raising exceptions on malformed HTML.
- if self.stack:
- self.stack.pop(-1)
- new_style = self.current_style
- if tag == 'a':
- if self.urls_match(self.link_text, self.link_url):
- # Don't render the URL when it's part of the link text.
- self.emit_style(new_style)
- else:
- self.emit_style(new_style)
- self.output.write(' (')
- self.emit_style(old_style)
- self.output.write(self.render_url(self.link_url))
- self.emit_style(new_style)
- self.output.write(')')
- else:
- self.emit_style(new_style)
- if tag in ('code', 'pre'):
- self.preformatted_text_level -= 1
- if tag in self.BLOCK_TAGS:
- # Emit an empty line after block level tags.
- self.output.write('\n\n')
-
- def handle_entityref(self, name):
- """
- Process a named character reference.
-
- :param name: The name of the character reference (a string).
- """
- self.output.write(unichr(name2codepoint[name]))
-
- def handle_starttag(self, tag, attrs):
- """
- Process the start of an HTML tag.
-
- :param tag: The name of the tag (a string).
- :param attrs: A list of tuples with two strings each.
- """
- if tag in self.BLOCK_TAGS:
- # Emit an empty line before block level tags.
- self.output.write('\n\n')
- if tag == 'a':
- self.push_styles(color='blue', bright=True, underline=True)
- # Store the URL that the link points to for later use, so that we
- # can render the link text before the URL (with the reasoning that
- # this is the most intuitive way to present a link in a plain text
- # interface).
- self.link_url = next((v for n, v in attrs if n == 'href'), '')
- elif tag == 'b' or tag == 'strong':
- self.push_styles(bold=True)
- elif tag == 'br':
- self.output.write('\n')
- elif tag == 'code' or tag == 'pre':
- self.push_styles(color='yellow')
- self.preformatted_text_level += 1
- elif tag == 'del' or tag == 's':
- self.push_styles(strike_through=True)
- elif tag == 'em' or tag == 'i':
- self.push_styles(italic=True)
- elif tag == 'ins' or tag == 'u':
- self.push_styles(underline=True)
- elif tag == 'span':
- styles = {}
- css = next((v for n, v in attrs if n == 'style'), "")
- for rule in css.split(';'):
- name, _, value = rule.partition(':')
- name = name.strip()
- value = value.strip()
- if name == 'background-color':
- styles['background'] = self.parse_color(value)
- elif name == 'color':
- styles['color'] = self.parse_color(value)
- elif name == 'font-style' and value == 'italic':
- styles['italic'] = True
- elif name == 'font-weight' and value == 'bold':
- styles['bold'] = True
- elif name == 'text-decoration' and value == 'line-through':
- styles['strike_through'] = True
- elif name == 'text-decoration' and value == 'underline':
- styles['underline'] = True
- self.push_styles(**styles)
-
- def normalize_url(self, url):
- """
- Normalize a URL to enable string equality comparison.
-
- :param url: The URL to normalize (a string).
- :returns: The normalized URL (a string).
- """
- return re.sub('^mailto:', '', url)
-
- def parse_color(self, value):
- """
- Convert a CSS color to something that :func:`.ansi_style()` understands.
-
- :param value: A string like ``rgb(1,2,3)``, ``#AABBCC`` or ``yellow``.
- :returns: A color value supported by :func:`.ansi_style()` or :data:`None`.
- """
- # Parse an 'rgb(N,N,N)' expression.
- if value.startswith('rgb'):
- tokens = re.findall(r'\d+', value)
- if len(tokens) == 3:
- return tuple(map(int, tokens))
- # Parse an '#XXXXXX' expression.
- elif value.startswith('#'):
- value = value[1:]
- length = len(value)
- if length == 6:
- # Six hex digits (proper notation).
- return (
- int(value[:2], 16),
- int(value[2:4], 16),
- int(value[4:6], 16),
- )
- elif length == 3:
- # Three hex digits (shorthand).
- return (
- int(value[0], 16),
- int(value[1], 16),
- int(value[2], 16),
- )
- # Try to recognize a named color.
- value = value.lower()
- if value in ANSI_COLOR_CODES:
- return value
-
- def push_styles(self, **changes):
- """
- Push new style information onto the stack.
-
- :param changes: Any keyword arguments are passed on to :func:`.ansi_style()`.
-
- This method is a helper for :func:`handle_starttag()`
- that does the following:
-
- 1. Make a copy of the current styles (from the top of the stack),
- 2. Apply the given `changes` to the copy of the current styles,
- 3. Add the new styles to the stack,
- 4. Emit the appropriate ANSI escape sequence to the output stream.
- """
- prototype = self.current_style
- if prototype:
- new_style = dict(prototype)
- new_style.update(changes)
- else:
- new_style = changes
- self.stack.append(new_style)
- self.emit_style(new_style)
-
- def render_url(self, url):
- """
- Prepare a URL for rendering on the terminal.
-
- :param url: The URL to simplify (a string).
- :returns: The simplified URL (a string).
-
- This method pre-processes a URL before rendering on the terminal. The
- following modifications are made:
-
- - The ``mailto:`` prefix is stripped.
- - Spaces are converted to ``%20``.
- - A trailing parenthesis is converted to ``%29``.
- """
- url = re.sub('^mailto:', '', url)
- url = re.sub(' ', '%20', url)
- url = re.sub(r'\)$', '%29', url)
- return url
-
- def reset(self):
- """
- Reset the state of the HTML parser and ANSI converter.
-
- When `output` is a :class:`~python3:io.StringIO` object a new
- instance will be created (and the old one garbage collected).
- """
- # Reset the state of the superclass.
- HTMLParser.reset(self)
- # Reset our instance variables.
- self.link_text = None
- self.link_url = None
- self.preformatted_text_level = 0
- if self.output is None or isinstance(self.output, StringIO):
- # If the caller specified something like output=sys.stdout then it
- # doesn't make much sense to negate that choice here in reset().
- self.output = StringIO()
- self.stack = []
-
- def urls_match(self, a, b):
- """
- Compare two URLs for equality using :func:`normalize_url()`.
-
- :param a: A string containing a URL.
- :param b: A string containing a URL.
- :returns: :data:`True` if the URLs are the same, :data:`False` otherwise.
-
- This method is used by :func:`handle_endtag()` to omit the URL of a
- hyperlink (``<a href="...">``) when the link text is that same URL.
- """
- return self.normalize_url(a) == self.normalize_url(b)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/terminal/spinners.py b/contrib/python/humanfriendly/py3/humanfriendly/terminal/spinners.py
deleted file mode 100644
index e4dc55d302..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/terminal/spinners.py
+++ /dev/null
@@ -1,310 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: March 1, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Support for spinners that represent progress on interactive terminals.
-
-The :class:`Spinner` class shows a "spinner" on the terminal to let the user
-know that something is happening during long running operations that would
-otherwise be silent (leaving the user to wonder what they're waiting for).
-Below are some visual examples that should illustrate the point.
-
-**Simple spinners:**
-
- Here's a screen capture that shows the simplest form of spinner:
-
- .. image:: images/spinner-basic.gif
- :alt: Animated screen capture of a simple spinner.
-
- The following code was used to create the spinner above:
-
- .. code-block:: python
-
- import itertools
- import time
- from humanfriendly import Spinner
-
- with Spinner(label="Downloading") as spinner:
- for i in itertools.count():
- # Do something useful here.
- time.sleep(0.1)
- # Advance the spinner.
- spinner.step()
-
-**Spinners that show elapsed time:**
-
- Here's a spinner that shows the elapsed time since it started:
-
- .. image:: images/spinner-with-timer.gif
- :alt: Animated screen capture of a spinner showing elapsed time.
-
- The following code was used to create the spinner above:
-
- .. code-block:: python
-
- import itertools
- import time
- from humanfriendly import Spinner, Timer
-
- with Spinner(label="Downloading", timer=Timer()) as spinner:
- for i in itertools.count():
- # Do something useful here.
- time.sleep(0.1)
- # Advance the spinner.
- spinner.step()
-
-**Spinners that show progress:**
-
- Here's a spinner that shows a progress percentage:
-
- .. image:: images/spinner-with-progress.gif
- :alt: Animated screen capture of spinner showing progress.
-
- The following code was used to create the spinner above:
-
- .. code-block:: python
-
- import itertools
- import random
- import time
- from humanfriendly import Spinner, Timer
-
- with Spinner(label="Downloading", total=100) as spinner:
- progress = 0
- while progress < 100:
- # Do something useful here.
- time.sleep(0.1)
- # Advance the spinner.
- spinner.step(progress)
- # Determine the new progress value.
- progress += random.random() * 5
-
-If you want to provide user feedback during a long running operation but it's
-not practical to periodically call the :func:`~Spinner.step()` method consider
-using :class:`AutomaticSpinner` instead.
-
-As you may already have noticed in the examples above, :class:`Spinner` objects
-can be used as context managers to automatically call :func:`Spinner.clear()`
-when the spinner ends.
-"""
-
-# Standard library modules.
-import multiprocessing
-import sys
-import time
-
-# Modules included in our package.
-from humanfriendly import Timer
-from humanfriendly.deprecation import deprecated_args
-from humanfriendly.terminal import ANSI_ERASE_LINE
-
-# Public identifiers that require documentation.
-__all__ = ("AutomaticSpinner", "GLYPHS", "MINIMUM_INTERVAL", "Spinner")
-
-GLYPHS = ["-", "\\", "|", "/"]
-"""A list of strings with characters that together form a crude animation :-)."""
-
-MINIMUM_INTERVAL = 0.2
-"""Spinners are redrawn with a frequency no higher than this number (a floating point number of seconds)."""
-
-
-class Spinner(object):
-
- """Show a spinner on the terminal as a simple means of feedback to the user."""
-
- @deprecated_args('label', 'total', 'stream', 'interactive', 'timer')
- def __init__(self, **options):
- """
- Initialize a :class:`Spinner` object.
-
- :param label:
-
- The label for the spinner (a string or :data:`None`, defaults to
- :data:`None`).
-
- :param total:
-
- The expected number of steps (an integer or :data:`None`). If this is
- provided the spinner will show a progress percentage.
-
- :param stream:
-
- The output stream to show the spinner on (a file-like object,
- defaults to :data:`sys.stderr`).
-
- :param interactive:
-
- :data:`True` to enable rendering of the spinner, :data:`False` to
- disable (defaults to the result of ``stream.isatty()``).
-
- :param timer:
-
- A :class:`.Timer` object (optional). If this is given the spinner
- will show the elapsed time according to the timer.
-
- :param interval:
-
- The spinner will be updated at most once every this many seconds
- (a floating point number, defaults to :data:`MINIMUM_INTERVAL`).
-
- :param glyphs:
-
- A list of strings with single characters that are drawn in the same
- place in succession to implement a simple animated effect (defaults
- to :data:`GLYPHS`).
- """
- # Store initializer arguments.
- self.interactive = options.get('interactive')
- self.interval = options.get('interval', MINIMUM_INTERVAL)
- self.label = options.get('label')
- self.states = options.get('glyphs', GLYPHS)
- self.stream = options.get('stream', sys.stderr)
- self.timer = options.get('timer')
- self.total = options.get('total')
- # Define instance variables.
- self.counter = 0
- self.last_update = 0
- # Try to automatically discover whether the stream is connected to
- # a terminal, but don't fail if no isatty() method is available.
- if self.interactive is None:
- try:
- self.interactive = self.stream.isatty()
- except Exception:
- self.interactive = False
-
- def step(self, progress=0, label=None):
- """
- Advance the spinner by one step and redraw it.
-
- :param progress: The number of the current step, relative to the total
- given to the :class:`Spinner` constructor (an integer,
- optional). If not provided the spinner will not show
- progress.
- :param label: The label to use while redrawing (a string, optional). If
- not provided the label given to the :class:`Spinner`
- constructor is used instead.
-
- This method advances the spinner by one step without starting a new
- line, causing an animated effect which is very simple but much nicer
- than waiting for a prompt which is completely silent for a long time.
-
- .. note:: This method uses time based rate limiting to avoid redrawing
- the spinner too frequently. If you know you're dealing with
- code that will call :func:`step()` at a high frequency,
- consider using :func:`sleep()` to avoid creating the
- equivalent of a busy loop that's rate limiting the spinner
- 99% of the time.
- """
- if self.interactive:
- time_now = time.time()
- if time_now - self.last_update >= self.interval:
- self.last_update = time_now
- state = self.states[self.counter % len(self.states)]
- label = label or self.label
- if not label:
- raise Exception("No label set for spinner!")
- elif self.total and progress:
- label = "%s: %.2f%%" % (label, progress / (self.total / 100.0))
- elif self.timer and self.timer.elapsed_time > 2:
- label = "%s (%s)" % (label, self.timer.rounded)
- self.stream.write("%s %s %s ..\r" % (ANSI_ERASE_LINE, state, label))
- self.counter += 1
-
- def sleep(self):
- """
- Sleep for a short period before redrawing the spinner.
-
- This method is useful when you know you're dealing with code that will
- call :func:`step()` at a high frequency. It will sleep for the interval
- with which the spinner is redrawn (less than a second). This avoids
- creating the equivalent of a busy loop that's rate limiting the
- spinner 99% of the time.
-
- This method doesn't redraw the spinner, you still have to call
- :func:`step()` in order to do that.
- """
- time.sleep(MINIMUM_INTERVAL)
-
- def clear(self):
- """
- Clear the spinner.
-
- The next line which is shown on the standard output or error stream
- after calling this method will overwrite the line that used to show the
- spinner.
- """
- if self.interactive:
- self.stream.write(ANSI_ERASE_LINE)
-
- def __enter__(self):
- """
- Enable the use of spinners as context managers.
-
- :returns: The :class:`Spinner` object.
- """
- return self
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Clear the spinner when leaving the context."""
- self.clear()
-
-
-class AutomaticSpinner(object):
-
- """
- Show a spinner on the terminal that automatically starts animating.
-
- This class shows a spinner on the terminal (just like :class:`Spinner`
- does) that automatically starts animating. This class should be used as a
- context manager using the :keyword:`with` statement. The animation
- continues for as long as the context is active.
-
- :class:`AutomaticSpinner` provides an alternative to :class:`Spinner`
- for situations where it is not practical for the caller to periodically
- call :func:`~Spinner.step()` to advance the animation, e.g. because
- you're performing a blocking call and don't fancy implementing threading or
- subprocess handling just to provide some user feedback.
-
- This works using the :mod:`multiprocessing` module by spawning a
- subprocess to render the spinner while the main process is busy doing
- something more useful. By using the :keyword:`with` statement you're
- guaranteed that the subprocess is properly terminated at the appropriate
- time.
- """
-
- def __init__(self, label, show_time=True):
- """
- Initialize an automatic spinner.
-
- :param label: The label for the spinner (a string).
- :param show_time: If this is :data:`True` (the default) then the spinner
- shows elapsed time.
- """
- self.label = label
- self.show_time = show_time
- self.shutdown_event = multiprocessing.Event()
- self.subprocess = multiprocessing.Process(target=self._target)
-
- def __enter__(self):
- """Enable the use of automatic spinners as context managers."""
- self.subprocess.start()
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Enable the use of automatic spinners as context managers."""
- self.shutdown_event.set()
- self.subprocess.join()
-
- def _target(self):
- try:
- timer = Timer() if self.show_time else None
- with Spinner(label=self.label, timer=timer) as spinner:
- while not self.shutdown_event.is_set():
- spinner.step()
- spinner.sleep()
- except KeyboardInterrupt:
- # Swallow Control-C signals without producing a nasty traceback that
- # won't make any sense to the average user.
- pass
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/testing.py b/contrib/python/humanfriendly/py3/humanfriendly/testing.py
deleted file mode 100644
index f6abddf074..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/testing.py
+++ /dev/null
@@ -1,669 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: March 6, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Utility classes and functions that make it easy to write :mod:`unittest` compatible test suites.
-
-Over the years I've developed the habit of writing test suites for Python
-projects using the :mod:`unittest` module. During those years I've come to know
-:pypi:`pytest` and in fact I use :pypi:`pytest` to run my test suites (due to
-its much better error reporting) but I've yet to publish a test suite that
-*requires* :pypi:`pytest`. I have several reasons for doing so:
-
-- It's nice to keep my test suites as simple and accessible as possible and
- not requiring a specific test runner is part of that attitude.
-
-- Whereas :mod:`unittest` is quite explicit, :pypi:`pytest` contains a lot of
- magic, which kind of contradicts the Python mantra "explicit is better than
- implicit" (IMHO).
-"""
-
-# Standard library module
-import functools
-import logging
-import os
-import pipes
-import shutil
-import sys
-import tempfile
-import time
-import unittest
-
-# Modules included in our package.
-from humanfriendly.compat import StringIO
-from humanfriendly.text import random_string
-
-# Initialize a logger for this module.
-logger = logging.getLogger(__name__)
-
-# A unique object reference used to detect missing attributes.
-NOTHING = object()
-
-# Public identifiers that require documentation.
-__all__ = (
- 'CallableTimedOut',
- 'CaptureBuffer',
- 'CaptureOutput',
- 'ContextManager',
- 'CustomSearchPath',
- 'MockedProgram',
- 'PatchedAttribute',
- 'PatchedItem',
- 'TemporaryDirectory',
- 'TestCase',
- 'configure_logging',
- 'make_dirs',
- 'retry',
- 'run_cli',
- 'skip_on_raise',
- 'touch',
-)
-
-
-def configure_logging(log_level=logging.DEBUG):
- """configure_logging(log_level=logging.DEBUG)
- Automatically configure logging to the terminal.
-
- :param log_level: The log verbosity (a number, defaults
- to :mod:`logging.DEBUG <logging>`).
-
- When :mod:`coloredlogs` is installed :func:`coloredlogs.install()` will be
- used to configure logging to the terminal. When this fails with an
- :exc:`~exceptions.ImportError` then :func:`logging.basicConfig()` is used
- as a fall back.
- """
- try:
- import coloredlogs
- coloredlogs.install(level=log_level)
- except ImportError:
- logging.basicConfig(
- level=log_level,
- format='%(asctime)s %(name)s[%(process)d] %(levelname)s %(message)s',
- datefmt='%Y-%m-%d %H:%M:%S')
-
-
-def make_dirs(pathname):
- """
- Create missing directories.
-
- :param pathname: The pathname of a directory (a string).
- """
- if not os.path.isdir(pathname):
- os.makedirs(pathname)
-
-
-def retry(func, timeout=60, exc_type=AssertionError):
- """retry(func, timeout=60, exc_type=AssertionError)
- Retry a function until assertions no longer fail.
-
- :param func: A callable. When the callable returns
- :data:`False` it will also be retried.
- :param timeout: The number of seconds after which to abort (a number,
- defaults to 60).
- :param exc_type: The type of exceptions to retry (defaults
- to :exc:`~exceptions.AssertionError`).
- :returns: The value returned by `func`.
- :raises: Once the timeout has expired :func:`retry()` will raise the
- previously retried assertion error. When `func` keeps returning
- :data:`False` until `timeout` expires :exc:`CallableTimedOut`
- will be raised.
-
- This function sleeps between retries to avoid claiming CPU cycles we don't
- need. It starts by sleeping for 0.1 second but adjusts this to one second
- as the number of retries grows.
- """
- pause = 0.1
- timeout += time.time()
- while True:
- try:
- result = func()
- if result is not False:
- return result
- except exc_type:
- if time.time() > timeout:
- raise
- else:
- if time.time() > timeout:
- raise CallableTimedOut()
- time.sleep(pause)
- if pause < 1:
- pause *= 2
-
-
-def run_cli(entry_point, *arguments, **options):
- """
- Test a command line entry point.
-
- :param entry_point: The function that implements the command line interface
- (a callable).
- :param arguments: Any positional arguments (strings) become the command
- line arguments (:data:`sys.argv` items 1-N).
- :param options: The following keyword arguments are supported:
-
- **capture**
- Whether to use :class:`CaptureOutput`. Defaults
- to :data:`True` but can be disabled by passing
- :data:`False` instead.
- **input**
- Refer to :class:`CaptureOutput`.
- **merged**
- Refer to :class:`CaptureOutput`.
- **program_name**
- Used to set :data:`sys.argv` item 0.
- :returns: A tuple with two values:
-
- 1. The return code (an integer).
- 2. The captured output (a string).
- """
- # Add the `program_name' option to the arguments.
- arguments = list(arguments)
- arguments.insert(0, options.pop('program_name', sys.executable))
- # Log the command line arguments (and the fact that we're about to call the
- # command line entry point function).
- logger.debug("Calling command line entry point with arguments: %s", arguments)
- # Prepare to capture the return code and output even if the command line
- # interface raises an exception (whether the exception type is SystemExit
- # or something else).
- returncode = 0
- stdout = None
- stderr = None
- try:
- # Temporarily override sys.argv.
- with PatchedAttribute(sys, 'argv', arguments):
- # Manipulate the standard input/output/error streams?
- options['enabled'] = options.pop('capture', True)
- with CaptureOutput(**options) as capturer:
- try:
- # Call the command line interface.
- entry_point()
- finally:
- # Get the output even if an exception is raised.
- stdout = capturer.stdout.getvalue()
- stderr = capturer.stderr.getvalue()
- # Reconfigure logging to the terminal because it is very
- # likely that the entry point function has changed the
- # configured log level.
- configure_logging()
- except BaseException as e:
- if isinstance(e, SystemExit):
- logger.debug("Intercepting return code %s from SystemExit exception.", e.code)
- returncode = e.code
- else:
- logger.warning("Defaulting return code to 1 due to raised exception.", exc_info=True)
- returncode = 1
- else:
- logger.debug("Command line entry point returned successfully!")
- # Always log the output captured on stdout/stderr, to make it easier to
- # diagnose test failures (but avoid duplicate logging when merged=True).
- is_merged = options.get('merged', False)
- merged_streams = [('merged streams', stdout)]
- separate_streams = [('stdout', stdout), ('stderr', stderr)]
- streams = merged_streams if is_merged else separate_streams
- for name, value in streams:
- if value:
- logger.debug("Output on %s:\n%s", name, value)
- else:
- logger.debug("No output on %s.", name)
- return returncode, stdout
-
-
-def skip_on_raise(*exc_types):
- """
- Decorate a test function to translation specific exception types to :exc:`unittest.SkipTest`.
-
- :param exc_types: One or more positional arguments give the exception
- types to be translated to :exc:`unittest.SkipTest`.
- :returns: A decorator function specialized to `exc_types`.
- """
- def decorator(function):
- @functools.wraps(function)
- def wrapper(*args, **kw):
- try:
- return function(*args, **kw)
- except exc_types as e:
- logger.debug("Translating exception to unittest.SkipTest ..", exc_info=True)
- raise unittest.SkipTest("skipping test because %s was raised" % type(e))
- return wrapper
- return decorator
-
-
-def touch(filename):
- """
- The equivalent of the UNIX :man:`touch` program in Python.
-
- :param filename: The pathname of the file to touch (a string).
-
- Note that missing directories are automatically created using
- :func:`make_dirs()`.
- """
- make_dirs(os.path.dirname(filename))
- with open(filename, 'a'):
- os.utime(filename, None)
-
-
-class CallableTimedOut(Exception):
-
- """Raised by :func:`retry()` when the timeout expires."""
-
-
-class ContextManager(object):
-
- """Base class to enable composition of context managers."""
-
- def __enter__(self):
- """Enable use as context managers."""
- return self
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Enable use as context managers."""
-
-
-class PatchedAttribute(ContextManager):
-
- """Context manager that temporary replaces an object attribute using :func:`setattr()`."""
-
- def __init__(self, obj, name, value):
- """
- Initialize a :class:`PatchedAttribute` object.
-
- :param obj: The object to patch.
- :param name: An attribute name.
- :param value: The value to set.
- """
- self.object_to_patch = obj
- self.attribute_to_patch = name
- self.patched_value = value
- self.original_value = NOTHING
-
- def __enter__(self):
- """
- Replace (patch) the attribute.
-
- :returns: The object whose attribute was patched.
- """
- # Enable composition of context managers.
- super(PatchedAttribute, self).__enter__()
- # Patch the object's attribute.
- self.original_value = getattr(self.object_to_patch, self.attribute_to_patch, NOTHING)
- setattr(self.object_to_patch, self.attribute_to_patch, self.patched_value)
- return self.object_to_patch
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Restore the attribute to its original value."""
- # Enable composition of context managers.
- super(PatchedAttribute, self).__exit__(exc_type, exc_value, traceback)
- # Restore the object's attribute.
- if self.original_value is NOTHING:
- delattr(self.object_to_patch, self.attribute_to_patch)
- else:
- setattr(self.object_to_patch, self.attribute_to_patch, self.original_value)
-
-
-class PatchedItem(ContextManager):
-
- """Context manager that temporary replaces an object item using :meth:`~object.__setitem__()`."""
-
- def __init__(self, obj, item, value):
- """
- Initialize a :class:`PatchedItem` object.
-
- :param obj: The object to patch.
- :param item: The item to patch.
- :param value: The value to set.
- """
- self.object_to_patch = obj
- self.item_to_patch = item
- self.patched_value = value
- self.original_value = NOTHING
-
- def __enter__(self):
- """
- Replace (patch) the item.
-
- :returns: The object whose item was patched.
- """
- # Enable composition of context managers.
- super(PatchedItem, self).__enter__()
- # Patch the object's item.
- try:
- self.original_value = self.object_to_patch[self.item_to_patch]
- except KeyError:
- self.original_value = NOTHING
- self.object_to_patch[self.item_to_patch] = self.patched_value
- return self.object_to_patch
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Restore the item to its original value."""
- # Enable composition of context managers.
- super(PatchedItem, self).__exit__(exc_type, exc_value, traceback)
- # Restore the object's item.
- if self.original_value is NOTHING:
- del self.object_to_patch[self.item_to_patch]
- else:
- self.object_to_patch[self.item_to_patch] = self.original_value
-
-
-class TemporaryDirectory(ContextManager):
-
- """
- Easy temporary directory creation & cleanup using the :keyword:`with` statement.
-
- Here's an example of how to use this:
-
- .. code-block:: python
-
- with TemporaryDirectory() as directory:
- # Do something useful here.
- assert os.path.isdir(directory)
- """
-
- def __init__(self, **options):
- """
- Initialize a :class:`TemporaryDirectory` object.
-
- :param options: Any keyword arguments are passed on to
- :func:`tempfile.mkdtemp()`.
- """
- self.mkdtemp_options = options
- self.temporary_directory = None
-
- def __enter__(self):
- """
- Create the temporary directory using :func:`tempfile.mkdtemp()`.
-
- :returns: The pathname of the directory (a string).
- """
- # Enable composition of context managers.
- super(TemporaryDirectory, self).__enter__()
- # Create the temporary directory.
- self.temporary_directory = tempfile.mkdtemp(**self.mkdtemp_options)
- return self.temporary_directory
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Cleanup the temporary directory using :func:`shutil.rmtree()`."""
- # Enable composition of context managers.
- super(TemporaryDirectory, self).__exit__(exc_type, exc_value, traceback)
- # Cleanup the temporary directory.
- if self.temporary_directory is not None:
- shutil.rmtree(self.temporary_directory)
- self.temporary_directory = None
-
-
-class MockedHomeDirectory(PatchedItem, TemporaryDirectory):
-
- """
- Context manager to temporarily change ``$HOME`` (the current user's profile directory).
-
- This class is a composition of the :class:`PatchedItem` and
- :class:`TemporaryDirectory` context managers.
- """
-
- def __init__(self):
- """Initialize a :class:`MockedHomeDirectory` object."""
- PatchedItem.__init__(self, os.environ, 'HOME', os.environ.get('HOME'))
- TemporaryDirectory.__init__(self)
-
- def __enter__(self):
- """
- Activate the custom ``$PATH``.
-
- :returns: The pathname of the directory that has
- been added to ``$PATH`` (a string).
- """
- # Get the temporary directory.
- directory = TemporaryDirectory.__enter__(self)
- # Override the value to patch now that we have
- # the pathname of the temporary directory.
- self.patched_value = directory
- # Temporary patch $HOME.
- PatchedItem.__enter__(self)
- # Pass the pathname of the temporary directory to the caller.
- return directory
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Deactivate the custom ``$HOME``."""
- super(MockedHomeDirectory, self).__exit__(exc_type, exc_value, traceback)
-
-
-class CustomSearchPath(PatchedItem, TemporaryDirectory):
-
- """
- Context manager to temporarily customize ``$PATH`` (the executable search path).
-
- This class is a composition of the :class:`PatchedItem` and
- :class:`TemporaryDirectory` context managers.
- """
-
- def __init__(self, isolated=False):
- """
- Initialize a :class:`CustomSearchPath` object.
-
- :param isolated: :data:`True` to clear the original search path,
- :data:`False` to add the temporary directory to the
- start of the search path.
- """
- # Initialize our own instance variables.
- self.isolated_search_path = isolated
- # Selectively initialize our superclasses.
- PatchedItem.__init__(self, os.environ, 'PATH', self.current_search_path)
- TemporaryDirectory.__init__(self)
-
- def __enter__(self):
- """
- Activate the custom ``$PATH``.
-
- :returns: The pathname of the directory that has
- been added to ``$PATH`` (a string).
- """
- # Get the temporary directory.
- directory = TemporaryDirectory.__enter__(self)
- # Override the value to patch now that we have
- # the pathname of the temporary directory.
- self.patched_value = (
- directory if self.isolated_search_path
- else os.pathsep.join([directory] + self.current_search_path.split(os.pathsep))
- )
- # Temporary patch the $PATH.
- PatchedItem.__enter__(self)
- # Pass the pathname of the temporary directory to the caller
- # because they may want to `install' custom executables.
- return directory
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Deactivate the custom ``$PATH``."""
- super(CustomSearchPath, self).__exit__(exc_type, exc_value, traceback)
-
- @property
- def current_search_path(self):
- """The value of ``$PATH`` or :data:`os.defpath` (a string)."""
- return os.environ.get('PATH', os.defpath)
-
-
-class MockedProgram(CustomSearchPath):
-
- """
- Context manager to mock the existence of a program (executable).
-
- This class extends the functionality of :class:`CustomSearchPath`.
- """
-
- def __init__(self, name, returncode=0, script=None):
- """
- Initialize a :class:`MockedProgram` object.
-
- :param name: The name of the program (a string).
- :param returncode: The return code that the program should emit (a
- number, defaults to zero).
- :param script: Shell script code to include in the mocked program (a
- string or :data:`None`). This can be used to mock a
- program that is expected to generate specific output.
- """
- # Initialize our own instance variables.
- self.program_name = name
- self.program_returncode = returncode
- self.program_script = script
- self.program_signal_file = None
- # Initialize our superclasses.
- super(MockedProgram, self).__init__()
-
- def __enter__(self):
- """
- Create the mock program.
-
- :returns: The pathname of the directory that has
- been added to ``$PATH`` (a string).
- """
- directory = super(MockedProgram, self).__enter__()
- self.program_signal_file = os.path.join(directory, 'program-was-run-%s' % random_string(10))
- pathname = os.path.join(directory, self.program_name)
- with open(pathname, 'w') as handle:
- handle.write('#!/bin/sh\n')
- handle.write('echo > %s\n' % pipes.quote(self.program_signal_file))
- if self.program_script:
- handle.write('%s\n' % self.program_script.strip())
- handle.write('exit %i\n' % self.program_returncode)
- os.chmod(pathname, 0o755)
- return directory
-
- def __exit__(self, *args, **kw):
- """
- Ensure that the mock program was run.
-
- :raises: :exc:`~exceptions.AssertionError` when
- the mock program hasn't been run.
- """
- try:
- assert self.program_signal_file and os.path.isfile(self.program_signal_file), \
- ("It looks like %r was never run!" % self.program_name)
- finally:
- return super(MockedProgram, self).__exit__(*args, **kw)
-
-
-class CaptureOutput(ContextManager):
-
- """
- Context manager that captures what's written to :data:`sys.stdout` and :data:`sys.stderr`.
-
- .. attribute:: stdin
-
- The :class:`~humanfriendly.compat.StringIO` object used to feed the standard input stream.
-
- .. attribute:: stdout
-
- The :class:`CaptureBuffer` object used to capture the standard output stream.
-
- .. attribute:: stderr
-
- The :class:`CaptureBuffer` object used to capture the standard error stream.
- """
-
- def __init__(self, merged=False, input='', enabled=True):
- """
- Initialize a :class:`CaptureOutput` object.
-
- :param merged: :data:`True` to merge the streams,
- :data:`False` to capture them separately.
- :param input: The data that reads from :data:`sys.stdin`
- should return (a string).
- :param enabled: :data:`True` to enable capturing (the default),
- :data:`False` otherwise. This makes it easy to
- unconditionally use :class:`CaptureOutput` in
- a :keyword:`with` block while preserving the
- choice to opt out of capturing output.
- """
- self.stdin = StringIO(input)
- self.stdout = CaptureBuffer()
- self.stderr = self.stdout if merged else CaptureBuffer()
- self.patched_attributes = []
- if enabled:
- self.patched_attributes.extend(
- PatchedAttribute(sys, name, getattr(self, name))
- for name in ('stdin', 'stdout', 'stderr')
- )
-
- def __enter__(self):
- """Start capturing what's written to :data:`sys.stdout` and :data:`sys.stderr`."""
- super(CaptureOutput, self).__enter__()
- for context in self.patched_attributes:
- context.__enter__()
- return self
-
- def __exit__(self, exc_type=None, exc_value=None, traceback=None):
- """Stop capturing what's written to :data:`sys.stdout` and :data:`sys.stderr`."""
- super(CaptureOutput, self).__exit__(exc_type, exc_value, traceback)
- for context in self.patched_attributes:
- context.__exit__(exc_type, exc_value, traceback)
-
- def get_lines(self):
- """Get the contents of :attr:`stdout` split into separate lines."""
- return self.get_text().splitlines()
-
- def get_text(self):
- """Get the contents of :attr:`stdout` as a Unicode string."""
- return self.stdout.get_text()
-
- def getvalue(self):
- """Get the text written to :data:`sys.stdout`."""
- return self.stdout.getvalue()
-
-
-class CaptureBuffer(StringIO):
-
- """
- Helper for :class:`CaptureOutput` to provide an easy to use API.
-
- The two methods defined by this subclass were specifically chosen to match
- the names of the methods provided by my :pypi:`capturer` package which
- serves a similar role as :class:`CaptureOutput` but knows how to simulate
- an interactive terminal (tty).
- """
-
- def get_lines(self):
- """Get the contents of the buffer split into separate lines."""
- return self.get_text().splitlines()
-
- def get_text(self):
- """Get the contents of the buffer as a Unicode string."""
- return self.getvalue()
-
-
-class TestCase(unittest.TestCase):
-
- """Subclass of :class:`unittest.TestCase` with automatic logging and other miscellaneous features."""
-
- def __init__(self, *args, **kw):
- """
- Initialize a :class:`TestCase` object.
-
- Any positional and/or keyword arguments are passed on to the
- initializer of the superclass.
- """
- super(TestCase, self).__init__(*args, **kw)
-
- def setUp(self, log_level=logging.DEBUG):
- """setUp(log_level=logging.DEBUG)
- Automatically configure logging to the terminal.
-
- :param log_level: Refer to :func:`configure_logging()`.
-
- The :func:`setUp()` method is automatically called by
- :class:`unittest.TestCase` before each test method starts.
- It does two things:
-
- - Logging to the terminal is configured using
- :func:`configure_logging()`.
-
- - Before the test method starts a newline is emitted, to separate the
- name of the test method (which will be printed to the terminal by
- :mod:`unittest` or :pypi:`pytest`) from the first line of logging
- output that the test method is likely going to generate.
- """
- # Configure logging to the terminal.
- configure_logging(log_level)
- # Separate the name of the test method (printed by the superclass
- # and/or py.test without a newline at the end) from the first line of
- # logging output that the test method is likely going to generate.
- sys.stderr.write("\n")
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/text.py b/contrib/python/humanfriendly/py3/humanfriendly/text.py
deleted file mode 100644
index a257a6a189..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/text.py
+++ /dev/null
@@ -1,449 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: December 1, 2020
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Simple text manipulation functions.
-
-The :mod:`~humanfriendly.text` module contains simple functions to manipulate text:
-
-- The :func:`concatenate()` and :func:`pluralize()` functions make it easy to
- generate human friendly output.
-
-- The :func:`format()`, :func:`compact()` and :func:`dedent()` functions
- provide a clean and simple to use syntax for composing large text fragments
- with interpolated variables.
-
-- The :func:`tokenize()` function parses simple user input.
-"""
-
-# Standard library modules.
-import numbers
-import random
-import re
-import string
-import textwrap
-
-# Public identifiers that require documentation.
-__all__ = (
- 'compact',
- 'compact_empty_lines',
- 'concatenate',
- 'dedent',
- 'format',
- 'generate_slug',
- 'is_empty_line',
- 'join_lines',
- 'pluralize',
- 'pluralize_raw',
- 'random_string',
- 'split',
- 'split_paragraphs',
- 'tokenize',
- 'trim_empty_lines',
-)
-
-
-def compact(text, *args, **kw):
- '''
- Compact whitespace in a string.
-
- Trims leading and trailing whitespace, replaces runs of whitespace
- characters with a single space and interpolates any arguments using
- :func:`format()`.
-
- :param text: The text to compact (a string).
- :param args: Any positional arguments are interpolated using :func:`format()`.
- :param kw: Any keyword arguments are interpolated using :func:`format()`.
- :returns: The compacted text (a string).
-
- Here's an example of how I like to use the :func:`compact()` function, this
- is an example from a random unrelated project I'm working on at the moment::
-
- raise PortDiscoveryError(compact("""
- Failed to discover port(s) that Apache is listening on!
- Maybe I'm parsing the wrong configuration file? ({filename})
- """, filename=self.ports_config))
-
- The combination of :func:`compact()` and Python's multi line strings allows
- me to write long text fragments with interpolated variables that are easy
- to write, easy to read and work well with Python's whitespace
- sensitivity.
- '''
- non_whitespace_tokens = text.split()
- compacted_text = ' '.join(non_whitespace_tokens)
- return format(compacted_text, *args, **kw)
-
-
-def compact_empty_lines(text):
- """
- Replace repeating empty lines with a single empty line (similar to ``cat -s``).
-
- :param text: The text in which to compact empty lines (a string).
- :returns: The text with empty lines compacted (a string).
- """
- i = 0
- lines = text.splitlines(True)
- while i < len(lines):
- if i > 0 and is_empty_line(lines[i - 1]) and is_empty_line(lines[i]):
- lines.pop(i)
- else:
- i += 1
- return ''.join(lines)
-
-
-def concatenate(items, conjunction='and', serial_comma=False):
- """
- Concatenate a list of items in a human friendly way.
-
- :param items:
-
- A sequence of strings.
-
- :param conjunction:
-
- The word to use before the last item (a string, defaults to "and").
-
- :param serial_comma:
-
- :data:`True` to use a `serial comma`_, :data:`False` otherwise
- (defaults to :data:`False`).
-
- :returns:
-
- A single string.
-
- >>> from humanfriendly.text import concatenate
- >>> concatenate(["eggs", "milk", "bread"])
- 'eggs, milk and bread'
-
- .. _serial comma: https://en.wikipedia.org/wiki/Serial_comma
- """
- items = list(items)
- if len(items) > 1:
- final_item = items.pop()
- formatted = ', '.join(items)
- if serial_comma:
- formatted += ','
- return ' '.join([formatted, conjunction, final_item])
- elif items:
- return items[0]
- else:
- return ''
-
-
-def dedent(text, *args, **kw):
- """
- Dedent a string (remove common leading whitespace from all lines).
-
- Removes common leading whitespace from all lines in the string using
- :func:`textwrap.dedent()`, removes leading and trailing empty lines using
- :func:`trim_empty_lines()` and interpolates any arguments using
- :func:`format()`.
-
- :param text: The text to dedent (a string).
- :param args: Any positional arguments are interpolated using :func:`format()`.
- :param kw: Any keyword arguments are interpolated using :func:`format()`.
- :returns: The dedented text (a string).
-
- The :func:`compact()` function's documentation contains an example of how I
- like to use the :func:`compact()` and :func:`dedent()` functions. The main
- difference is that I use :func:`compact()` for text that will be presented
- to the user (where whitespace is not so significant) and :func:`dedent()`
- for data file and code generation tasks (where newlines and indentation are
- very significant).
- """
- dedented_text = textwrap.dedent(text)
- trimmed_text = trim_empty_lines(dedented_text)
- return format(trimmed_text, *args, **kw)
-
-
-def format(text, *args, **kw):
- """
- Format a string using the string formatting operator and/or :meth:`str.format()`.
-
- :param text: The text to format (a string).
- :param args: Any positional arguments are interpolated into the text using
- the string formatting operator (``%``). If no positional
- arguments are given no interpolation is done.
- :param kw: Any keyword arguments are interpolated into the text using the
- :meth:`str.format()` function. If no keyword arguments are given
- no interpolation is done.
- :returns: The text with any positional and/or keyword arguments
- interpolated (a string).
-
- The implementation of this function is so trivial that it seems silly to
- even bother writing and documenting it. Justifying this requires some
- context :-).
-
- **Why format() instead of the string formatting operator?**
-
- For really simple string interpolation Python's string formatting operator
- is ideal, but it does have some strange quirks:
-
- - When you switch from interpolating a single value to interpolating
- multiple values you have to wrap them in tuple syntax. Because
- :func:`format()` takes a `variable number of arguments`_ it always
- receives a tuple (which saves me a context switch :-). Here's an
- example:
-
- >>> from humanfriendly.text import format
- >>> # The string formatting operator.
- >>> print('the magic number is %s' % 42)
- the magic number is 42
- >>> print('the magic numbers are %s and %s' % (12, 42))
- the magic numbers are 12 and 42
- >>> # The format() function.
- >>> print(format('the magic number is %s', 42))
- the magic number is 42
- >>> print(format('the magic numbers are %s and %s', 12, 42))
- the magic numbers are 12 and 42
-
- - When you interpolate a single value and someone accidentally passes in a
- tuple your code raises a :exc:`~exceptions.TypeError`. Because
- :func:`format()` takes a `variable number of arguments`_ it always
- receives a tuple so this can never happen. Here's an example:
-
- >>> # How expecting to interpolate a single value can fail.
- >>> value = (12, 42)
- >>> print('the magic value is %s' % value)
- Traceback (most recent call last):
- File "<stdin>", line 1, in <module>
- TypeError: not all arguments converted during string formatting
- >>> # The following line works as intended, no surprises here!
- >>> print(format('the magic value is %s', value))
- the magic value is (12, 42)
-
- **Why format() instead of the str.format() method?**
-
- When you're doing complex string interpolation the :meth:`str.format()`
- function results in more readable code, however I frequently find myself
- adding parentheses to force evaluation order. The :func:`format()` function
- avoids this because of the relative priority between the comma and dot
- operators. Here's an example:
-
- >>> "{adjective} example" + " " + "(can't think of anything less {adjective})".format(adjective='silly')
- "{adjective} example (can't think of anything less silly)"
- >>> ("{adjective} example" + " " + "(can't think of anything less {adjective})").format(adjective='silly')
- "silly example (can't think of anything less silly)"
- >>> format("{adjective} example" + " " + "(can't think of anything less {adjective})", adjective='silly')
- "silly example (can't think of anything less silly)"
-
- The :func:`compact()` and :func:`dedent()` functions are wrappers that
- combine :func:`format()` with whitespace manipulation to make it easy to
- write nice to read Python code.
-
- .. _variable number of arguments: https://docs.python.org/2/tutorial/controlflow.html#arbitrary-argument-lists
- """
- if args:
- text %= args
- if kw:
- text = text.format(**kw)
- return text
-
-
-def generate_slug(text, delimiter="-"):
- """
- Convert text to a normalized "slug" without whitespace.
-
- :param text: The original text, for example ``Some Random Text!``.
- :param delimiter: The delimiter used to separate words
- (defaults to the ``-`` character).
- :returns: The slug text, for example ``some-random-text``.
- :raises: :exc:`~exceptions.ValueError` when the provided
- text is nonempty but results in an empty slug.
- """
- slug = text.lower()
- escaped = delimiter.replace("\\", "\\\\")
- slug = re.sub("[^a-z0-9]+", escaped, slug)
- slug = slug.strip(delimiter)
- if text and not slug:
- msg = "The provided text %r results in an empty slug!"
- raise ValueError(format(msg, text))
- return slug
-
-
-def is_empty_line(text):
- """
- Check if a text is empty or contains only whitespace.
-
- :param text: The text to check for "emptiness" (a string).
- :returns: :data:`True` if the text is empty or contains only whitespace,
- :data:`False` otherwise.
- """
- return len(text) == 0 or text.isspace()
-
-
-def join_lines(text):
- """
- Remove "hard wrapping" from the paragraphs in a string.
-
- :param text: The text to reformat (a string).
- :returns: The text without hard wrapping (a string).
-
- This function works by removing line breaks when the last character before
- a line break and the first character after the line break are both
- non-whitespace characters. This means that common leading indentation will
- break :func:`join_lines()` (in that case you can use :func:`dedent()`
- before calling :func:`join_lines()`).
- """
- return re.sub(r'(\S)\n(\S)', r'\1 \2', text)
-
-
-def pluralize(count, singular, plural=None):
- """
- Combine a count with the singular or plural form of a word.
-
- :param count: The count (a number).
- :param singular: The singular form of the word (a string).
- :param plural: The plural form of the word (a string or :data:`None`).
- :returns: The count and singular or plural word concatenated (a string).
-
- See :func:`pluralize_raw()` for the logic underneath :func:`pluralize()`.
- """
- return '%s %s' % (count, pluralize_raw(count, singular, plural))
-
-
-def pluralize_raw(count, singular, plural=None):
- """
- Select the singular or plural form of a word based on a count.
-
- :param count: The count (a number).
- :param singular: The singular form of the word (a string).
- :param plural: The plural form of the word (a string or :data:`None`).
- :returns: The singular or plural form of the word (a string).
-
- When the given count is exactly 1.0 the singular form of the word is
- selected, in all other cases the plural form of the word is selected.
-
- If the plural form of the word is not provided it is obtained by
- concatenating the singular form of the word with the letter "s". Of course
- this will not always be correct, which is why you have the option to
- specify both forms.
- """
- if not plural:
- plural = singular + 's'
- return singular if float(count) == 1.0 else plural
-
-
-def random_string(length=(25, 100), characters=string.ascii_letters):
- """random_string(length=(25, 100), characters=string.ascii_letters)
- Generate a random string.
-
- :param length: The length of the string to be generated (a number or a
- tuple with two numbers). If this is a tuple then a random
- number between the two numbers given in the tuple is used.
- :param characters: The characters to be used (a string, defaults
- to :data:`string.ascii_letters`).
- :returns: A random string.
-
- The :func:`random_string()` function is very useful in test suites; by the
- time I included it in :mod:`humanfriendly.text` I had already included
- variants of this function in seven different test suites :-).
- """
- if not isinstance(length, numbers.Number):
- length = random.randint(length[0], length[1])
- return ''.join(random.choice(characters) for _ in range(length))
-
-
-def split(text, delimiter=','):
- """
- Split a comma-separated list of strings.
-
- :param text: The text to split (a string).
- :param delimiter: The delimiter to split on (a string).
- :returns: A list of zero or more nonempty strings.
-
- Here's the default behavior of Python's built in :meth:`str.split()`
- function:
-
- >>> 'foo,bar, baz,'.split(',')
- ['foo', 'bar', ' baz', '']
-
- In contrast here's the default behavior of the :func:`split()` function:
-
- >>> from humanfriendly.text import split
- >>> split('foo,bar, baz,')
- ['foo', 'bar', 'baz']
-
- Here is an example that parses a nested data structure (a mapping of
- logging level names to one or more styles per level) that's encoded in a
- string so it can be set as an environment variable:
-
- >>> from pprint import pprint
- >>> encoded_data = 'debug=green;warning=yellow;error=red;critical=red,bold'
- >>> parsed_data = dict((k, split(v, ',')) for k, v in (split(kv, '=') for kv in split(encoded_data, ';')))
- >>> pprint(parsed_data)
- {'debug': ['green'],
- 'warning': ['yellow'],
- 'error': ['red'],
- 'critical': ['red', 'bold']}
- """
- return [token.strip() for token in text.split(delimiter) if token and not token.isspace()]
-
-
-def split_paragraphs(text):
- """
- Split a string into paragraphs (one or more lines delimited by an empty line).
-
- :param text: The text to split into paragraphs (a string).
- :returns: A list of strings.
- """
- paragraphs = []
- for chunk in text.split('\n\n'):
- chunk = trim_empty_lines(chunk)
- if chunk and not chunk.isspace():
- paragraphs.append(chunk)
- return paragraphs
-
-
-def tokenize(text):
- """
- Tokenize a text into numbers and strings.
-
- :param text: The text to tokenize (a string).
- :returns: A list of strings and/or numbers.
-
- This function is used to implement robust tokenization of user input in
- functions like :func:`.parse_size()` and :func:`.parse_timespan()`. It
- automatically coerces integer and floating point numbers, ignores
- whitespace and knows how to separate numbers from strings even without
- whitespace. Some examples to make this more concrete:
-
- >>> from humanfriendly.text import tokenize
- >>> tokenize('42')
- [42]
- >>> tokenize('42MB')
- [42, 'MB']
- >>> tokenize('42.5MB')
- [42.5, 'MB']
- >>> tokenize('42.5 MB')
- [42.5, 'MB']
- """
- tokenized_input = []
- for token in re.split(r'(\d+(?:\.\d+)?)', text):
- token = token.strip()
- if re.match(r'\d+\.\d+', token):
- tokenized_input.append(float(token))
- elif token.isdigit():
- tokenized_input.append(int(token))
- elif token:
- tokenized_input.append(token)
- return tokenized_input
-
-
-def trim_empty_lines(text):
- """
- Trim leading and trailing empty lines from the given text.
-
- :param text: The text to trim (a string).
- :returns: The trimmed text (a string).
- """
- lines = text.splitlines(True)
- while lines and is_empty_line(lines[0]):
- lines.pop(0)
- while lines and is_empty_line(lines[-1]):
- lines.pop(-1)
- return ''.join(lines)
diff --git a/contrib/python/humanfriendly/py3/humanfriendly/usage.py b/contrib/python/humanfriendly/py3/humanfriendly/usage.py
deleted file mode 100644
index 81ba943ae0..0000000000
--- a/contrib/python/humanfriendly/py3/humanfriendly/usage.py
+++ /dev/null
@@ -1,351 +0,0 @@
-# Human friendly input/output in Python.
-#
-# Author: Peter Odding <peter@peterodding.com>
-# Last Change: June 11, 2021
-# URL: https://humanfriendly.readthedocs.io
-
-"""
-Parsing and reformatting of usage messages.
-
-The :mod:`~humanfriendly.usage` module parses and reformats usage messages:
-
-- The :func:`format_usage()` function takes a usage message and inserts ANSI
- escape sequences that highlight items of special significance like command
- line options, meta variables, etc. The resulting usage message is (intended
- to be) easier to read on a terminal.
-
-- The :func:`render_usage()` function takes a usage message and rewrites it to
- reStructuredText_ suitable for inclusion in the documentation of a Python
- package. This provides a DRY solution to keeping a single authoritative
- definition of the usage message while making it easily available in
- documentation. As a cherry on the cake it's not just a pre-formatted dump of
- the usage message but a nicely formatted reStructuredText_ fragment.
-
-- The remaining functions in this module support the two functions above.
-
-Usage messages in general are free format of course, however the functions in
-this module assume a certain structure from usage messages in order to
-successfully parse and reformat them, refer to :func:`parse_usage()` for
-details.
-
-.. _DRY: https://en.wikipedia.org/wiki/Don%27t_repeat_yourself
-.. _reStructuredText: https://en.wikipedia.org/wiki/ReStructuredText
-"""
-
-# Standard library modules.
-import csv
-import functools
-import logging
-import re
-
-# Standard library module or external dependency (see setup.py).
-from importlib import import_module
-
-# Modules included in our package.
-from humanfriendly.compat import StringIO
-from humanfriendly.text import dedent, split_paragraphs, trim_empty_lines
-
-# Public identifiers that require documentation.
-__all__ = (
- 'find_meta_variables',
- 'format_usage',
- 'import_module', # previously exported (backwards compatibility)
- 'inject_usage',
- 'parse_usage',
- 'render_usage',
- 'USAGE_MARKER',
-)
-
-USAGE_MARKER = "Usage:"
-"""The string that starts the first line of a usage message."""
-
-START_OF_OPTIONS_MARKER = "Supported options:"
-"""The string that marks the start of the documented command line options."""
-
-# Compiled regular expression used to tokenize usage messages.
-USAGE_PATTERN = re.compile(r'''
- # Make sure whatever we're matching isn't preceded by a non-whitespace
- # character.
- (?<!\S)
- (
- # A short command line option or a long command line option
- # (possibly including a meta variable for a value).
- (-\w|--\w+(-\w+)*(=\S+)?)
- # Or ...
- |
- # An environment variable.
- \$[A-Za-z_][A-Za-z0-9_]*
- # Or ...
- |
- # Might be a meta variable (usage() will figure it out).
- [A-Z][A-Z0-9_]+
- )
-''', re.VERBOSE)
-
-# Compiled regular expression used to recognize options.
-OPTION_PATTERN = re.compile(r'^(-\w|--\w+(-\w+)*(=\S+)?)$')
-
-# Initialize a logger for this module.
-logger = logging.getLogger(__name__)
-
-
-def format_usage(usage_text):
- """
- Highlight special items in a usage message.
-
- :param usage_text: The usage message to process (a string).
- :returns: The usage message with special items highlighted.
-
- This function highlights the following special items:
-
- - The initial line of the form "Usage: ..."
- - Short and long command line options
- - Environment variables
- - Meta variables (see :func:`find_meta_variables()`)
-
- All items are highlighted in the color defined by
- :data:`.HIGHLIGHT_COLOR`.
- """
- # Ugly workaround to avoid circular import errors due to interdependencies
- # between the humanfriendly.terminal and humanfriendly.usage modules.
- from humanfriendly.terminal import ansi_wrap, HIGHLIGHT_COLOR
- formatted_lines = []
- meta_variables = find_meta_variables(usage_text)
- for line in usage_text.strip().splitlines(True):
- if line.startswith(USAGE_MARKER):
- # Highlight the "Usage: ..." line in bold font and color.
- formatted_lines.append(ansi_wrap(line, color=HIGHLIGHT_COLOR))
- else:
- # Highlight options, meta variables and environment variables.
- formatted_lines.append(replace_special_tokens(
- line, meta_variables,
- lambda token: ansi_wrap(token, color=HIGHLIGHT_COLOR),
- ))
- return ''.join(formatted_lines)
-
-
-def find_meta_variables(usage_text):
- """
- Find the meta variables in the given usage message.
-
- :param usage_text: The usage message to parse (a string).
- :returns: A list of strings with any meta variables found in the usage
- message.
-
- When a command line option requires an argument, the convention is to
- format such options as ``--option=ARG``. The text ``ARG`` in this example
- is the meta variable.
- """
- meta_variables = set()
- for match in USAGE_PATTERN.finditer(usage_text):
- token = match.group(0)
- if token.startswith('-'):
- option, _, value = token.partition('=')
- if value:
- meta_variables.add(value)
- return list(meta_variables)
-
-
-def parse_usage(text):
- """
- Parse a usage message by inferring its structure (and making some assumptions :-).
-
- :param text: The usage message to parse (a string).
- :returns: A tuple of two lists:
-
- 1. A list of strings with the paragraphs of the usage message's
- "introduction" (the paragraphs before the documentation of the
- supported command line options).
-
- 2. A list of strings with pairs of command line options and their
- descriptions: Item zero is a line listing a supported command
- line option, item one is the description of that command line
- option, item two is a line listing another supported command
- line option, etc.
-
- Usage messages in general are free format of course, however
- :func:`parse_usage()` assume a certain structure from usage messages in
- order to successfully parse them:
-
- - The usage message starts with a line ``Usage: ...`` that shows a symbolic
- representation of the way the program is to be invoked.
-
- - After some free form text a line ``Supported options:`` (surrounded by
- empty lines) precedes the documentation of the supported command line
- options.
-
- - The command line options are documented as follows::
-
- -v, --verbose
-
- Make more noise.
-
- So all of the variants of the command line option are shown together on a
- separate line, followed by one or more paragraphs describing the option.
-
- - There are several other minor assumptions, but to be honest I'm not sure if
- anyone other than me is ever going to use this functionality, so for now I
- won't list every intricate detail :-).
-
- If you're curious anyway, refer to the usage message of the `humanfriendly`
- package (defined in the :mod:`humanfriendly.cli` module) and compare it with
- the usage message you see when you run ``humanfriendly --help`` and the
- generated usage message embedded in the readme.
-
- Feel free to request more detailed documentation if you're interested in
- using the :mod:`humanfriendly.usage` module outside of the little ecosystem
- of Python packages that I have been building over the past years.
- """
- introduction = []
- documented_options = []
- # Split the raw usage message into paragraphs.
- paragraphs = split_paragraphs(text)
- # Get the paragraphs that are part of the introduction.
- while paragraphs:
- # Check whether we've found the end of the introduction.
- end_of_intro = (paragraphs[0] == START_OF_OPTIONS_MARKER)
- # Append the current paragraph to the introduction.
- introduction.append(paragraphs.pop(0))
- # Stop after we've processed the complete introduction.
- if end_of_intro:
- break
- logger.debug("Parsed introduction: %s", introduction)
- # Parse the paragraphs that document command line options.
- while paragraphs:
- documented_options.append(dedent(paragraphs.pop(0)))
- description = []
- while paragraphs:
- # Check if the next paragraph starts the documentation of another
- # command line option. We split on a comma followed by a space so
- # that our parsing doesn't trip up when the label used for an
- # option's value contains commas.
- tokens = [t.strip() for t in re.split(r',\s', paragraphs[0]) if t and not t.isspace()]
- if all(OPTION_PATTERN.match(t) for t in tokens):
- break
- else:
- description.append(paragraphs.pop(0))
- # Join the description's paragraphs back together so we can remove
- # common leading indentation.
- documented_options.append(dedent('\n\n'.join(description)))
- logger.debug("Parsed options: %s", documented_options)
- return introduction, documented_options
-
-
-def render_usage(text):
- """
- Reformat a command line program's usage message to reStructuredText_.
-
- :param text: The plain text usage message (a string).
- :returns: The usage message rendered to reStructuredText_ (a string).
- """
- meta_variables = find_meta_variables(text)
- introduction, options = parse_usage(text)
- output = [render_paragraph(p, meta_variables) for p in introduction]
- if options:
- output.append('\n'.join([
- '.. csv-table::',
- ' :header: Option, Description',
- ' :widths: 30, 70',
- '',
- ]))
- csv_buffer = StringIO()
- csv_writer = csv.writer(csv_buffer)
- while options:
- variants = options.pop(0)
- description = options.pop(0)
- csv_writer.writerow([
- render_paragraph(variants, meta_variables),
- ('\n\n'.join(render_paragraph(p, meta_variables) for p in split_paragraphs(description))).rstrip(),
- ])
- csv_lines = csv_buffer.getvalue().splitlines()
- output.append('\n'.join(' %s' % line for line in csv_lines))
- logger.debug("Rendered output: %s", output)
- return '\n\n'.join(trim_empty_lines(o) for o in output)
-
-
-def inject_usage(module_name):
- """
- Use cog_ to inject a usage message into a reStructuredText_ file.
-
- :param module_name: The name of the module whose ``__doc__`` attribute is
- the source of the usage message (a string).
-
- This simple wrapper around :func:`render_usage()` makes it very easy to
- inject a reformatted usage message into your documentation using cog_. To
- use it you add a fragment like the following to your ``*.rst`` file::
-
- .. [[[cog
- .. from humanfriendly.usage import inject_usage
- .. inject_usage('humanfriendly.cli')
- .. ]]]
- .. [[[end]]]
-
- The lines in the fragment above are single line reStructuredText_ comments
- that are not copied to the output. Their purpose is to instruct cog_ where
- to inject the reformatted usage message. Once you've added these lines to
- your ``*.rst`` file, updating the rendered usage message becomes really
- simple thanks to cog_:
-
- .. code-block:: sh
-
- $ cog.py -r README.rst
-
- This will inject or replace the rendered usage message in your
- ``README.rst`` file with an up to date copy.
-
- .. _cog: http://nedbatchelder.com/code/cog/
- """
- import cog
- usage_text = import_module(module_name).__doc__
- cog.out("\n" + render_usage(usage_text) + "\n\n")
-
-
-def render_paragraph(paragraph, meta_variables):
- # Reformat the "Usage:" line to highlight "Usage:" in bold and show the
- # remainder of the line as pre-formatted text.
- if paragraph.startswith(USAGE_MARKER):
- tokens = paragraph.split()
- return "**%s** `%s`" % (tokens[0], ' '.join(tokens[1:]))
- # Reformat the "Supported options:" line to highlight it in bold.
- if paragraph == 'Supported options:':
- return "**%s**" % paragraph
- # Reformat shell transcripts into code blocks.
- if re.match(r'^\s*\$\s+\S', paragraph):
- # Split the paragraph into lines.
- lines = paragraph.splitlines()
- # Check if the paragraph is already indented.
- if not paragraph[0].isspace():
- # If the paragraph isn't already indented we'll indent it now.
- lines = [' %s' % line for line in lines]
- lines.insert(0, '.. code-block:: sh')
- lines.insert(1, '')
- return "\n".join(lines)
- # The following reformatting applies only to paragraphs which are not
- # indented. Yes this is a hack - for now we assume that indented paragraphs
- # are code blocks, even though this assumption can be wrong.
- if not paragraph[0].isspace():
- # Change UNIX style `quoting' so it doesn't trip up DocUtils.
- paragraph = re.sub("`(.+?)'", r'"\1"', paragraph)
- # Escape asterisks.
- paragraph = paragraph.replace('*', r'\*')
- # Reformat inline tokens.
- paragraph = replace_special_tokens(
- paragraph, meta_variables,
- lambda token: '``%s``' % token,
- )
- return paragraph
-
-
-def replace_special_tokens(text, meta_variables, replace_fn):
- return USAGE_PATTERN.sub(functools.partial(
- replace_tokens_callback,
- meta_variables=meta_variables,
- replace_fn=replace_fn
- ), text)
-
-
-def replace_tokens_callback(match, meta_variables, replace_fn):
- token = match.group(0)
- if not (re.match('^[A-Z][A-Z0-9_]+$', token) and token not in meta_variables):
- token = replace_fn(token)
- return token
diff --git a/contrib/python/humanfriendly/py3/ya.make b/contrib/python/humanfriendly/py3/ya.make
deleted file mode 100644
index e6814f8a46..0000000000
--- a/contrib/python/humanfriendly/py3/ya.make
+++ /dev/null
@@ -1,41 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(10.0)
-
-LICENSE(MIT)
-
-NO_LINT()
-
-NO_CHECK_IMPORTS(
- humanfriendly.sphinx
-)
-
-PY_SRCS(
- TOP_LEVEL
- humanfriendly/__init__.py
- humanfriendly/case.py
- humanfriendly/cli.py
- humanfriendly/compat.py
- humanfriendly/decorators.py
- humanfriendly/deprecation.py
- humanfriendly/prompts.py
- humanfriendly/sphinx.py
- humanfriendly/tables.py
- humanfriendly/terminal/__init__.py
- humanfriendly/terminal/html.py
- humanfriendly/terminal/spinners.py
- humanfriendly/testing.py
- humanfriendly/text.py
- humanfriendly/usage.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/humanfriendly/py3/
- .dist-info/METADATA
- .dist-info/entry_points.txt
- .dist-info/top_level.txt
-)
-
-END()
diff --git a/contrib/python/humanfriendly/ya.make b/contrib/python/humanfriendly/ya.make
deleted file mode 100644
index 24b7388aa5..0000000000
--- a/contrib/python/humanfriendly/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/humanfriendly/py2)
-ELSE()
- PEERDIR(contrib/python/humanfriendly/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- py2
- py3
-)
diff --git a/contrib/python/marisa-trie/agent.pxd b/contrib/python/marisa-trie/agent.pxd
deleted file mode 100644
index bf019673c2..0000000000
--- a/contrib/python/marisa-trie/agent.pxd
+++ /dev/null
@@ -1,22 +0,0 @@
-cimport query, key
-
-cdef extern from "<marisa/agent.h>" namespace "marisa" nogil:
- cdef cppclass Agent:
- Agent() except +
-
- query.Query &query()
- key.Key &key()
-
- void set_query(char *str)
- void set_query(char *ptr, int length)
- void set_query(int key_id)
-
- void set_key(char *str)
- void set_key(char *ptr, int length)
- void set_key(int id)
-
- void clear()
-
- void init_state()
-
- void swap(Agent &rhs)
diff --git a/contrib/python/marisa-trie/base.pxd b/contrib/python/marisa-trie/base.pxd
deleted file mode 100644
index c434e82122..0000000000
--- a/contrib/python/marisa-trie/base.pxd
+++ /dev/null
@@ -1,63 +0,0 @@
-cdef extern from "<marisa/base.h>":
-
- # A dictionary consists of 3 tries in default. Usually more tries make a
- # dictionary space-efficient but time-inefficient.
- ctypedef enum marisa_num_tries:
- MARISA_MIN_NUM_TRIES
- MARISA_MAX_NUM_TRIES
- MARISA_DEFAULT_NUM_TRIES
-
-
- # This library uses a cache technique to accelerate search functions. The
- # following enumerated type marisa_cache_level gives a list of available cache
- # size options. A larger cache enables faster search but takes a more space.
- ctypedef enum marisa_cache_level:
- MARISA_HUGE_CACHE
- MARISA_LARGE_CACHE
- MARISA_NORMAL_CACHE
- MARISA_SMALL_CACHE
- MARISA_TINY_CACHE
- MARISA_DEFAULT_CACHE
-
- # This library provides 2 kinds of TAIL implementations.
- ctypedef enum marisa_tail_mode:
- # MARISA_TEXT_TAIL merges last labels as zero-terminated strings. So, it is
- # available if and only if the last labels do not contain a NULL character.
- # If MARISA_TEXT_TAIL is specified and a NULL character exists in the last
- # labels, the setting is automatically switched to MARISA_BINARY_TAIL.
- MARISA_TEXT_TAIL
-
- # MARISA_BINARY_TAIL also merges last labels but as byte sequences. It uses
- # a bit vector to detect the end of a sequence, instead of NULL characters.
- # So, MARISA_BINARY_TAIL requires a larger space if the average length of
- # labels is greater than 8.
- MARISA_BINARY_TAIL
-
- MARISA_DEFAULT_TAIL
-
- # The arrangement of nodes affects the time cost of matching and the order of
- # predictive search.
- ctypedef enum marisa_node_order:
- # MARISA_LABEL_ORDER arranges nodes in ascending label order.
- # MARISA_LABEL_ORDER is useful if an application needs to predict keys in
- # label order.
- MARISA_LABEL_ORDER
-
- # MARISA_WEIGHT_ORDER arranges nodes in descending weight order.
- # MARISA_WEIGHT_ORDER is generally a better choice because it enables faster
- # matching.
- MARISA_WEIGHT_ORDER
- MARISA_DEFAULT_ORDER
-
- ctypedef enum marisa_config_mask:
- MARISA_NUM_TRIES_MASK
- MARISA_CACHE_LEVEL_MASK
- MARISA_TAIL_MODE_MASK
- MARISA_NODE_ORDER_MASK
- MARISA_CONFIG_MASK
-
-
-cdef extern from "<marisa/base.h>" namespace "marisa":
- ctypedef marisa_cache_level CacheLevel
- ctypedef marisa_tail_mode TailMode
- ctypedef marisa_node_order NodeOrder
diff --git a/contrib/python/marisa-trie/iostream.pxd b/contrib/python/marisa-trie/iostream.pxd
deleted file mode 100644
index 435ee85bb0..0000000000
--- a/contrib/python/marisa-trie/iostream.pxd
+++ /dev/null
@@ -1,7 +0,0 @@
-from std_iostream cimport istream, ostream
-from trie cimport Trie
-
-cdef extern from "<marisa/iostream.h>" namespace "marisa" nogil:
-
- istream &read(istream &stream, Trie *trie)
- ostream &write(ostream &stream, Trie &trie)
diff --git a/contrib/python/marisa-trie/key.pxd b/contrib/python/marisa-trie/key.pxd
deleted file mode 100644
index d99dee5e04..0000000000
--- a/contrib/python/marisa-trie/key.pxd
+++ /dev/null
@@ -1,22 +0,0 @@
-cdef extern from "<marisa/key.h>" namespace "marisa" nogil:
-
- cdef cppclass Key:
- Key()
- Key(Key &query)
-
- #Key &operator=(Key &query)
-
- char operator[](int i)
-
- void set_str(char *str)
- void set_str(char *ptr, int length)
- void set_id(int id)
- void set_weight(float weight)
-
- char *ptr()
- int length()
- int id()
- float weight()
-
- void clear()
- void swap(Key &rhs)
diff --git a/contrib/python/marisa-trie/keyset.pxd b/contrib/python/marisa-trie/keyset.pxd
deleted file mode 100644
index 1fb99a40c5..0000000000
--- a/contrib/python/marisa-trie/keyset.pxd
+++ /dev/null
@@ -1,30 +0,0 @@
-cimport key
-
-cdef extern from "<marisa/keyset.h>" namespace "marisa" nogil:
- cdef cppclass Keyset:
-
-# cdef enum constants:
-# BASE_BLOCK_SIZE = 4096
-# EXTRA_BLOCK_SIZE = 1024
-# KEY_BLOCK_SIZE = 256
-
- Keyset()
-
- void push_back(key.Key &key)
- void push_back(key.Key &key, char end_marker)
-
- void push_back(char *str)
- void push_back(char *ptr, int length)
- void push_back(char *ptr, int length, float weight)
-
- key.Key &operator[](int i)
-
- int num_keys()
- bint empty()
-
- int size()
- int total_length()
-
- void reset()
- void clear()
- void swap(Keyset &rhs)
diff --git a/contrib/python/marisa-trie/marisa/agent.cc b/contrib/python/marisa-trie/marisa/agent.cc
deleted file mode 100644
index 7f7f49f1bc..0000000000
--- a/contrib/python/marisa-trie/marisa/agent.cc
+++ /dev/null
@@ -1,51 +0,0 @@
-#include <new>
-
-#include "agent.h"
-#include "grimoire/trie.h"
-
-namespace marisa {
-
-Agent::Agent() : query_(), key_(), state_() {}
-
-Agent::~Agent() {}
-
-void Agent::set_query(const char *str) {
- MARISA_THROW_IF(str == NULL, MARISA_NULL_ERROR);
- if (state_.get() != NULL) {
- state_->reset();
- }
- query_.set_str(str);
-}
-
-void Agent::set_query(const char *ptr, std::size_t length) {
- MARISA_THROW_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- if (state_.get() != NULL) {
- state_->reset();
- }
- query_.set_str(ptr, length);
-}
-
-void Agent::set_query(std::size_t key_id) {
- if (state_.get() != NULL) {
- state_->reset();
- }
- query_.set_id(key_id);
-}
-
-void Agent::init_state() {
- MARISA_THROW_IF(state_.get() != NULL, MARISA_STATE_ERROR);
- state_.reset(new (std::nothrow) grimoire::State);
- MARISA_THROW_IF(state_.get() == NULL, MARISA_MEMORY_ERROR);
-}
-
-void Agent::clear() {
- Agent().swap(*this);
-}
-
-void Agent::swap(Agent &rhs) {
- query_.swap(rhs.query_);
- key_.swap(rhs.key_);
- state_.swap(rhs.state_);
-}
-
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/agent.h b/contrib/python/marisa-trie/marisa/agent.h
deleted file mode 100644
index 0f89f7df0f..0000000000
--- a/contrib/python/marisa-trie/marisa/agent.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#pragma once
-
-#ifndef MARISA_AGENT_H_
-#define MARISA_AGENT_H_
-
-#include "key.h"
-#include "query.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class State;
-
-} // namespace trie
-} // namespace grimoire
-
-class Agent {
- public:
- Agent();
- ~Agent();
-
- const Query &query() const {
- return query_;
- }
- const Key &key() const {
- return key_;
- }
-
- void set_query(const char *str);
- void set_query(const char *ptr, std::size_t length);
- void set_query(std::size_t key_id);
-
- const grimoire::trie::State &state() const {
- return *state_;
- }
- grimoire::trie::State &state() {
- return *state_;
- }
-
- void set_key(const char *str) {
- MARISA_DEBUG_IF(str == NULL, MARISA_NULL_ERROR);
- key_.set_str(str);
- }
- void set_key(const char *ptr, std::size_t length) {
- MARISA_DEBUG_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- MARISA_DEBUG_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- key_.set_str(ptr, length);
- }
- void set_key(std::size_t id) {
- MARISA_DEBUG_IF(id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- key_.set_id(id);
- }
-
- bool has_state() const {
- return state_.get() != NULL;
- }
- void init_state();
-
- void clear();
- void swap(Agent &rhs);
-
- private:
- Query query_;
- Key key_;
- scoped_ptr<grimoire::trie::State> state_;
-
- // Disallows copy and assignment.
- Agent(const Agent &);
- Agent &operator=(const Agent &);
-};
-
-} // namespace marisa
-
-#endif // MARISA_AGENT_H_
diff --git a/contrib/python/marisa-trie/marisa/base.h b/contrib/python/marisa-trie/marisa/base.h
deleted file mode 100644
index 5c595dcd2b..0000000000
--- a/contrib/python/marisa-trie/marisa/base.h
+++ /dev/null
@@ -1,196 +0,0 @@
-#pragma once
-
-#ifndef MARISA_BASE_H_
-#define MARISA_BASE_H_
-
-// Old Visual C++ does not provide stdint.h.
-#ifndef _MSC_VER
- #include <stdint.h>
-#endif // _MSC_VER
-
-#ifdef __cplusplus
- #include <cstddef>
-#else // __cplusplus
- #include <stddef.h>
-#endif // __cplusplus
-
-#ifdef __cplusplus
-extern "C" {
-#endif // __cplusplus
-
-#ifdef _MSC_VER
-typedef unsigned __int8 marisa_uint8;
-typedef unsigned __int16 marisa_uint16;
-typedef unsigned __int32 marisa_uint32;
-typedef unsigned __int64 marisa_uint64;
-#else // _MSC_VER
-typedef uint8_t marisa_uint8;
-typedef uint16_t marisa_uint16;
-typedef uint32_t marisa_uint32;
-typedef uint64_t marisa_uint64;
-#endif // _MSC_VER
-
-#if defined(_WIN64) || defined(__amd64__) || defined(__x86_64__) || \
- defined(__ia64__) || defined(__ppc64__) || defined(__powerpc64__) || \
- defined(__sparc64__) || defined(__mips64__) || defined(__aarch64__) || \
- defined(__s390x__)
- #define MARISA_WORD_SIZE 64
-#else // defined(_WIN64), etc.
- #define MARISA_WORD_SIZE 32
-#endif // defined(_WIN64), etc.
-
-//#define MARISA_WORD_SIZE (sizeof(void *) * 8)
-
-#define MARISA_UINT8_MAX ((marisa_uint8)~(marisa_uint8)0)
-#define MARISA_UINT16_MAX ((marisa_uint16)~(marisa_uint16)0)
-#define MARISA_UINT32_MAX ((marisa_uint32)~(marisa_uint32)0)
-#define MARISA_UINT64_MAX ((marisa_uint64)~(marisa_uint64)0)
-#define MARISA_SIZE_MAX ((size_t)~(size_t)0)
-
-#define MARISA_INVALID_LINK_ID MARISA_UINT32_MAX
-#define MARISA_INVALID_KEY_ID MARISA_UINT32_MAX
-#define MARISA_INVALID_EXTRA (MARISA_UINT32_MAX >> 8)
-
-// Error codes are defined as members of marisa_error_code. This library throws
-// an exception with one of the error codes when an error occurs.
-typedef enum marisa_error_code_ {
- // MARISA_OK means that a requested operation has succeeded. In practice, an
- // exception never has MARISA_OK because it is not an error.
- MARISA_OK = 0,
-
- // MARISA_STATE_ERROR means that an object was not ready for a requested
- // operation. For example, an operation to modify a fixed vector throws an
- // exception with MARISA_STATE_ERROR.
- MARISA_STATE_ERROR = 1,
-
- // MARISA_NULL_ERROR means that an invalid NULL pointer has been given.
- MARISA_NULL_ERROR = 2,
-
- // MARISA_BOUND_ERROR means that an operation has tried to access an out of
- // range address.
- MARISA_BOUND_ERROR = 3,
-
- // MARISA_RANGE_ERROR means that an out of range value has appeared in
- // operation.
- MARISA_RANGE_ERROR = 4,
-
- // MARISA_CODE_ERROR means that an undefined code has appeared in operation.
- MARISA_CODE_ERROR = 5,
-
- // MARISA_RESET_ERROR means that a smart pointer has tried to reset itself.
- MARISA_RESET_ERROR = 6,
-
- // MARISA_SIZE_ERROR means that a size has exceeded a library limitation.
- MARISA_SIZE_ERROR = 7,
-
- // MARISA_MEMORY_ERROR means that a memory allocation has failed.
- MARISA_MEMORY_ERROR = 8,
-
- // MARISA_IO_ERROR means that an I/O operation has failed.
- MARISA_IO_ERROR = 9,
-
- // MARISA_FORMAT_ERROR means that input was in invalid format.
- MARISA_FORMAT_ERROR = 10,
-} marisa_error_code;
-
-// Min/max values, flags and masks for dictionary settings are defined below.
-// Please note that unspecified settings will be replaced with the default
-// settings. For example, 0 is equivalent to (MARISA_DEFAULT_NUM_TRIES |
-// MARISA_DEFAULT_TRIE | MARISA_DEFAULT_TAIL | MARISA_DEFAULT_ORDER).
-
-// A dictionary consists of 3 tries in default. Usually more tries make a
-// dictionary space-efficient but time-inefficient.
-typedef enum marisa_num_tries_ {
- MARISA_MIN_NUM_TRIES = 0x00001,
- MARISA_MAX_NUM_TRIES = 0x0007F,
- MARISA_DEFAULT_NUM_TRIES = 0x00003,
-} marisa_num_tries;
-
-// This library uses a cache technique to accelerate search functions. The
-// following enumerated type marisa_cache_level gives a list of available cache
-// size options. A larger cache enables faster search but takes a more space.
-typedef enum marisa_cache_level_ {
- MARISA_HUGE_CACHE = 0x00080,
- MARISA_LARGE_CACHE = 0x00100,
- MARISA_NORMAL_CACHE = 0x00200,
- MARISA_SMALL_CACHE = 0x00400,
- MARISA_TINY_CACHE = 0x00800,
- MARISA_DEFAULT_CACHE = MARISA_NORMAL_CACHE
-} marisa_cache_level;
-
-// This library provides 2 kinds of TAIL implementations.
-typedef enum marisa_tail_mode_ {
- // MARISA_TEXT_TAIL merges last labels as zero-terminated strings. So, it is
- // available if and only if the last labels do not contain a NULL character.
- // If MARISA_TEXT_TAIL is specified and a NULL character exists in the last
- // labels, the setting is automatically switched to MARISA_BINARY_TAIL.
- MARISA_TEXT_TAIL = 0x01000,
-
- // MARISA_BINARY_TAIL also merges last labels but as byte sequences. It uses
- // a bit vector to detect the end of a sequence, instead of NULL characters.
- // So, MARISA_BINARY_TAIL requires a larger space if the average length of
- // labels is greater than 8.
- MARISA_BINARY_TAIL = 0x02000,
-
- MARISA_DEFAULT_TAIL = MARISA_TEXT_TAIL,
-} marisa_tail_mode;
-
-// The arrangement of nodes affects the time cost of matching and the order of
-// predictive search.
-typedef enum marisa_node_order_ {
- // MARISA_LABEL_ORDER arranges nodes in ascending label order.
- // MARISA_LABEL_ORDER is useful if an application needs to predict keys in
- // label order.
- MARISA_LABEL_ORDER = 0x10000,
-
- // MARISA_WEIGHT_ORDER arranges nodes in descending weight order.
- // MARISA_WEIGHT_ORDER is generally a better choice because it enables faster
- // matching.
- MARISA_WEIGHT_ORDER = 0x20000,
-
- MARISA_DEFAULT_ORDER = MARISA_WEIGHT_ORDER,
-} marisa_node_order;
-
-typedef enum marisa_config_mask_ {
- MARISA_NUM_TRIES_MASK = 0x0007F,
- MARISA_CACHE_LEVEL_MASK = 0x00F80,
- MARISA_TAIL_MODE_MASK = 0x0F000,
- MARISA_NODE_ORDER_MASK = 0xF0000,
- MARISA_CONFIG_MASK = 0xFFFFF
-} marisa_config_mask;
-
-#ifdef __cplusplus
-} // extern "C"
-#endif // __cplusplus
-
-#ifdef __cplusplus
-namespace marisa {
-
-typedef ::marisa_uint8 UInt8;
-typedef ::marisa_uint16 UInt16;
-typedef ::marisa_uint32 UInt32;
-typedef ::marisa_uint64 UInt64;
-
-typedef ::marisa_error_code ErrorCode;
-
-typedef ::marisa_cache_level CacheLevel;
-typedef ::marisa_tail_mode TailMode;
-typedef ::marisa_node_order NodeOrder;
-
-template <typename T>
-inline void swap(T &lhs, T &rhs) {
- T temp = lhs;
- lhs = rhs;
- rhs = temp;
-}
-
-} // namespace marisa
-#endif // __cplusplus
-
-#ifdef __cplusplus
- #include "exception.h"
- #include "scoped-ptr.h"
- #include "scoped-array.h"
-#endif // __cplusplus
-
-#endif // MARISA_BASE_H_
diff --git a/contrib/python/marisa-trie/marisa/exception.h b/contrib/python/marisa-trie/marisa/exception.h
deleted file mode 100644
index 630936b23b..0000000000
--- a/contrib/python/marisa-trie/marisa/exception.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#pragma once
-
-#ifndef MARISA_EXCEPTION_H_
-#define MARISA_EXCEPTION_H_
-
-#include <exception>
-
-#include "base.h"
-
-namespace marisa {
-
-// An exception object keeps a filename, a line number, an error code and an
-// error message. The message format is as follows:
-// "__FILE__:__LINE__: error_code: error_message"
-class Exception : public std::exception {
- public:
- Exception(const char *filename, int line,
- ErrorCode error_code, const char *error_message)
- : std::exception(), filename_(filename), line_(line),
- error_code_(error_code), error_message_(error_message) {}
- Exception(const Exception &ex)
- : std::exception(), filename_(ex.filename_), line_(ex.line_),
- error_code_(ex.error_code_), error_message_(ex.error_message_) {}
- virtual ~Exception() {}
-
- Exception &operator=(const Exception &rhs) {
- filename_ = rhs.filename_;
- line_ = rhs.line_;
- error_code_ = rhs.error_code_;
- error_message_ = rhs.error_message_;
- return *this;
- }
-
- const char *filename() const {
- return filename_;
- }
- int line() const {
- return line_;
- }
- ErrorCode error_code() const {
- return error_code_;
- }
- const char *error_message() const {
- return error_message_;
- }
-
- virtual const char *what() const noexcept {
- return error_message_;
- }
-
- private:
- const char *filename_;
- int line_;
- ErrorCode error_code_;
- const char *error_message_;
-};
-
-// These macros are used to convert a line number to a string constant.
-#define MARISA_INT_TO_STR(value) #value
-#define MARISA_LINE_TO_STR(line) MARISA_INT_TO_STR(line)
-#define MARISA_LINE_STR MARISA_LINE_TO_STR(__LINE__)
-
-// MARISA_THROW throws an exception with a filename, a line number, an error
-// code and an error message. The message format is as follows:
-// "__FILE__:__LINE__: error_code: error_message"
-#define MARISA_THROW(error_code, error_message) \
- (throw marisa::Exception(__FILE__, __LINE__, error_code, \
- __FILE__ ":" MARISA_LINE_STR ": " #error_code ": " error_message))
-
-// MARISA_THROW_IF throws an exception if `condition' is true.
-#define MARISA_THROW_IF(condition, error_code) \
- (void)((!(condition)) || (MARISA_THROW(error_code, #condition), 0))
-
-// MARISA_DEBUG_IF is ignored if _DEBUG is undefined. So, it is useful for
-// debugging time-critical codes.
-#ifdef _DEBUG
- #define MARISA_DEBUG_IF(cond, error_code) MARISA_THROW_IF(cond, error_code)
-#else
- #define MARISA_DEBUG_IF(cond, error_code)
-#endif
-
-} // namespace marisa
-
-#endif // MARISA_EXCEPTION_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/algorithm.h b/contrib/python/marisa-trie/marisa/grimoire/algorithm.h
deleted file mode 100644
index 71baec34ac..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/algorithm.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_ALGORITHM_H_
-#define MARISA_GRIMOIRE_ALGORITHM_H_
-
-#include "algorithm/sort.h"
-
-namespace marisa {
-namespace grimoire {
-
-class Algorithm {
- public:
- Algorithm() {}
-
- template <typename Iterator>
- std::size_t sort(Iterator begin, Iterator end) const {
- return algorithm::sort(begin, end);
- }
-
- private:
- Algorithm(const Algorithm &);
- Algorithm &operator=(const Algorithm &);
-};
-
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_ALGORITHM_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/algorithm/sort.h b/contrib/python/marisa-trie/marisa/grimoire/algorithm/sort.h
deleted file mode 100644
index 9090336ce6..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/algorithm/sort.h
+++ /dev/null
@@ -1,197 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_ALGORITHM_SORT_H_
-#define MARISA_GRIMOIRE_ALGORITHM_SORT_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace algorithm {
-namespace details {
-
-enum {
- MARISA_INSERTION_SORT_THRESHOLD = 10
-};
-
-template <typename T>
-int get_label(const T &unit, std::size_t depth) {
- MARISA_DEBUG_IF(depth > unit.length(), MARISA_BOUND_ERROR);
-
- return (depth < unit.length()) ? (int)(UInt8)unit[depth] : -1;
-}
-
-template <typename T>
-int median(const T &a, const T &b, const T &c, std::size_t depth) {
- const int x = get_label(a, depth);
- const int y = get_label(b, depth);
- const int z = get_label(c, depth);
- if (x < y) {
- if (y < z) {
- return y;
- } else if (x < z) {
- return z;
- }
- return x;
- } else if (x < z) {
- return x;
- } else if (y < z) {
- return z;
- }
- return y;
-}
-
-template <typename T>
-int compare(const T &lhs, const T &rhs, std::size_t depth) {
- for (std::size_t i = depth; i < lhs.length(); ++i) {
- if (i == rhs.length()) {
- return 1;
- }
- if (lhs[i] != rhs[i]) {
- return (UInt8)lhs[i] - (UInt8)rhs[i];
- }
- }
- if (lhs.length() == rhs.length()) {
- return 0;
- }
- return (lhs.length() < rhs.length()) ? -1 : 1;
-}
-
-template <typename Iterator>
-std::size_t insertion_sort(Iterator l, Iterator r, std::size_t depth) {
- MARISA_DEBUG_IF(l > r, MARISA_BOUND_ERROR);
-
- std::size_t count = 1;
- for (Iterator i = l + 1; i < r; ++i) {
- int result = 0;
- for (Iterator j = i; j > l; --j) {
- result = compare(*(j - 1), *j, depth);
- if (result <= 0) {
- break;
- }
- marisa::swap(*(j - 1), *j);
- }
- if (result != 0) {
- ++count;
- }
- }
- return count;
-}
-
-template <typename Iterator>
-std::size_t sort(Iterator l, Iterator r, std::size_t depth) {
- MARISA_DEBUG_IF(l > r, MARISA_BOUND_ERROR);
-
- std::size_t count = 0;
- while ((r - l) > MARISA_INSERTION_SORT_THRESHOLD) {
- Iterator pl = l;
- Iterator pr = r;
- Iterator pivot_l = l;
- Iterator pivot_r = r;
-
- const int pivot = median(*l, *(l + (r - l) / 2), *(r - 1), depth);
- for ( ; ; ) {
- while (pl < pr) {
- const int label = get_label(*pl, depth);
- if (label > pivot) {
- break;
- } else if (label == pivot) {
- marisa::swap(*pl, *pivot_l);
- ++pivot_l;
- }
- ++pl;
- }
- while (pl < pr) {
- const int label = get_label(*--pr, depth);
- if (label < pivot) {
- break;
- } else if (label == pivot) {
- marisa::swap(*pr, *--pivot_r);
- }
- }
- if (pl >= pr) {
- break;
- }
- marisa::swap(*pl, *pr);
- ++pl;
- }
- while (pivot_l > l) {
- marisa::swap(*--pivot_l, *--pl);
- }
- while (pivot_r < r) {
- marisa::swap(*pivot_r, *pr);
- ++pivot_r;
- ++pr;
- }
-
- if (((pl - l) > (pr - pl)) || ((r - pr) > (pr - pl))) {
- if ((pr - pl) == 1) {
- ++count;
- } else if ((pr - pl) > 1) {
- if (pivot == -1) {
- ++count;
- } else {
- count += sort(pl, pr, depth + 1);
- }
- }
-
- if ((pl - l) < (r - pr)) {
- if ((pl - l) == 1) {
- ++count;
- } else if ((pl - l) > 1) {
- count += sort(l, pl, depth);
- }
- l = pr;
- } else {
- if ((r - pr) == 1) {
- ++count;
- } else if ((r - pr) > 1) {
- count += sort(pr, r, depth);
- }
- r = pl;
- }
- } else {
- if ((pl - l) == 1) {
- ++count;
- } else if ((pl - l) > 1) {
- count += sort(l, pl, depth);
- }
-
- if ((r - pr) == 1) {
- ++count;
- } else if ((r - pr) > 1) {
- count += sort(pr, r, depth);
- }
-
- l = pl, r = pr;
- if ((pr - pl) == 1) {
- ++count;
- } else if ((pr - pl) > 1) {
- if (pivot == -1) {
- l = r;
- ++count;
- } else {
- ++depth;
- }
- }
- }
- }
-
- if ((r - l) > 1) {
- count += insertion_sort(l, r, depth);
- }
- return count;
-}
-
-} // namespace details
-
-template <typename Iterator>
-std::size_t sort(Iterator begin, Iterator end) {
- MARISA_DEBUG_IF(begin > end, MARISA_BOUND_ERROR);
- return details::sort(begin, end, 0);
-};
-
-} // namespace algorithm
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_ALGORITHM_SORT_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/intrin.h b/contrib/python/marisa-trie/marisa/grimoire/intrin.h
deleted file mode 100644
index 16843b353c..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/intrin.h
+++ /dev/null
@@ -1,116 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_INTRIN_H_
-#define MARISA_GRIMOIRE_INTRIN_H_
-
-#include "../base.h"
-
-#if defined(__x86_64__) || defined(_M_X64)
- #define MARISA_X64
-#elif defined(__i386__) || defined(_M_IX86)
- #define MARISA_X86
-#else // defined(__i386__) || defined(_M_IX86)
- #ifdef MARISA_USE_POPCNT
- #undef MARISA_USE_POPCNT
- #endif // MARISA_USE_POPCNT
- #ifdef MARISA_USE_SSE4A
- #undef MARISA_USE_SSE4A
- #endif // MARISA_USE_SSE4A
- #ifdef MARISA_USE_SSE4
- #undef MARISA_USE_SSE4
- #endif // MARISA_USE_SSE4
- #ifdef MARISA_USE_SSE4_2
- #undef MARISA_USE_SSE4_2
- #endif // MARISA_USE_SSE4_2
- #ifdef MARISA_USE_SSE4_1
- #undef MARISA_USE_SSE4_1
- #endif // MARISA_USE_SSE4_1
- #ifdef MARISA_USE_SSSE3
- #undef MARISA_USE_SSSE3
- #endif // MARISA_USE_SSSE3
- #ifdef MARISA_USE_SSE3
- #undef MARISA_USE_SSE3
- #endif // MARISA_USE_SSE3
- #ifdef MARISA_USE_SSE2
- #undef MARISA_USE_SSE2
- #endif // MARISA_USE_SSE2
-#endif // defined(__i386__) || defined(_M_IX86)
-
-#ifdef MARISA_USE_POPCNT
- #ifndef MARISA_USE_SSE3
- #define MARISA_USE_SSE3
- #endif // MARISA_USE_SSE3
- #ifdef _MSC_VER
- #include <intrin.h>
- #else // _MSC_VER
- #include <popcntintrin.h>
- #endif // _MSC_VER
-#endif // MARISA_USE_POPCNT
-
-#ifdef MARISA_USE_SSE4A
- #ifndef MARISA_USE_SSE3
- #define MARISA_USE_SSE3
- #endif // MARISA_USE_SSE3
- #ifndef MARISA_USE_POPCNT
- #define MARISA_USE_POPCNT
- #endif // MARISA_USE_POPCNT
-#endif // MARISA_USE_SSE4A
-
-#ifdef MARISA_USE_SSE4
- #ifndef MARISA_USE_SSE4_2
- #define MARISA_USE_SSE4_2
- #endif // MARISA_USE_SSE4_2
-#endif // MARISA_USE_SSE4
-
-#ifdef MARISA_USE_SSE4_2
- #ifndef MARISA_USE_SSE4_1
- #define MARISA_USE_SSE4_1
- #endif // MARISA_USE_SSE4_1
- #ifndef MARISA_USE_POPCNT
- #define MARISA_USE_POPCNT
- #endif // MARISA_USE_POPCNT
-#endif // MARISA_USE_SSE4_2
-
-#ifdef MARISA_USE_SSE4_1
- #ifndef MARISA_USE_SSSE3
- #define MARISA_USE_SSSE3
- #endif // MARISA_USE_SSSE3
-#endif // MARISA_USE_SSE4_1
-
-#ifdef MARISA_USE_SSSE3
- #ifndef MARISA_USE_SSE3
- #define MARISA_USE_SSE3
- #endif // MARISA_USE_SSE3
- #ifdef MARISA_X64
- #define MARISA_X64_SSSE3
- #else // MARISA_X64
- #define MARISA_X86_SSSE3
- #endif // MAIRSA_X64
- #include <tmmintrin.h>
-#endif // MARISA_USE_SSSE3
-
-#ifdef MARISA_USE_SSE3
- #ifndef MARISA_USE_SSE2
- #define MARISA_USE_SSE2
- #endif // MARISA_USE_SSE2
-#endif // MARISA_USE_SSE3
-
-#ifdef MARISA_USE_SSE2
- #ifdef MARISA_X64
- #define MARISA_X64_SSE2
- #else // MARISA_X64
- #define MARISA_X86_SSE2
- #endif // MAIRSA_X64
- #include <emmintrin.h>
-#endif // MARISA_USE_SSE2
-
-#ifdef _MSC_VER
- #if MARISA_WORD_SIZE == 64
- #include <intrin.h>
- #pragma intrinsic(_BitScanForward64)
- #else // MARISA_WORD_SIZE == 64
- #include <intrin.h>
- #pragma intrinsic(_BitScanForward)
- #endif // MARISA_WORD_SIZE == 64
-#endif // _MSC_VER
-
-#endif // MARISA_GRIMOIRE_INTRIN_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io.h b/contrib/python/marisa-trie/marisa/grimoire/io.h
deleted file mode 100644
index 4de0110dbb..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_IO_H_
-#define MARISA_GRIMOIRE_IO_H_
-
-#include "io/mapper.h"
-#include "io/reader.h"
-#include "io/writer.h"
-
-namespace marisa {
-namespace grimoire {
-
-using io::Mapper;
-using io::Reader;
-using io::Writer;
-
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_IO_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io/mapper.cc b/contrib/python/marisa-trie/marisa/grimoire/io/mapper.cc
deleted file mode 100644
index 9ed6ffc755..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io/mapper.cc
+++ /dev/null
@@ -1,163 +0,0 @@
-#if (defined _WIN32) || (defined _WIN64)
- #include <sys/types.h>
- #include <sys/stat.h>
- #include <windows.h>
-#else // (defined _WIN32) || (defined _WIN64)
- #include <sys/mman.h>
- #include <sys/stat.h>
- #include <sys/types.h>
- #include <fcntl.h>
- #include <unistd.h>
-#endif // (defined _WIN32) || (defined _WIN64)
-
-#include "mapper.h"
-
-namespace marisa {
-namespace grimoire {
-namespace io {
-
-#if (defined _WIN32) || (defined _WIN64)
-Mapper::Mapper()
- : ptr_(NULL), origin_(NULL), avail_(0), size_(0),
- file_(NULL), map_(NULL) {}
-#else // (defined _WIN32) || (defined _WIN64)
-Mapper::Mapper()
- : ptr_(NULL), origin_(MAP_FAILED), avail_(0), size_(0), fd_(-1) {}
-#endif // (defined _WIN32) || (defined _WIN64)
-
-#if (defined _WIN32) || (defined _WIN64)
-Mapper::~Mapper() {
- if (origin_ != NULL) {
- ::UnmapViewOfFile(origin_);
- }
-
- if (map_ != NULL) {
- ::CloseHandle(map_);
- }
-
- if (file_ != NULL) {
- ::CloseHandle(file_);
- }
-}
-#else // (defined _WIN32) || (defined _WIN64)
-Mapper::~Mapper() {
- if (origin_ != MAP_FAILED) {
- ::munmap(origin_, size_);
- }
-
- if (fd_ != -1) {
- ::close(fd_);
- }
-}
-#endif // (defined _WIN32) || (defined _WIN64)
-
-void Mapper::open(const char *filename) {
- MARISA_THROW_IF(filename == NULL, MARISA_NULL_ERROR);
-
- Mapper temp;
- temp.open_(filename);
- swap(temp);
-}
-
-void Mapper::open(const void *ptr, std::size_t size) {
- MARISA_THROW_IF((ptr == NULL) && (size != 0), MARISA_NULL_ERROR);
-
- Mapper temp;
- temp.open_(ptr, size);
- swap(temp);
-}
-
-void Mapper::seek(std::size_t size) {
- MARISA_THROW_IF(!is_open(), MARISA_STATE_ERROR);
- MARISA_THROW_IF(size > avail_, MARISA_IO_ERROR);
-
- map_data(size);
-}
-
-bool Mapper::is_open() const {
- return ptr_ != NULL;
-}
-
-void Mapper::clear() {
- Mapper().swap(*this);
-}
-
-void Mapper::swap(Mapper &rhs) {
- marisa::swap(ptr_, rhs.ptr_);
- marisa::swap(avail_, rhs.avail_);
- marisa::swap(origin_, rhs.origin_);
- marisa::swap(size_, rhs.size_);
-#if (defined _WIN32) || (defined _WIN64)
- marisa::swap(file_, rhs.file_);
- marisa::swap(map_, rhs.map_);
-#else // (defined _WIN32) || (defined _WIN64)
- marisa::swap(fd_, rhs.fd_);
-#endif // (defined _WIN32) || (defined _WIN64)
-}
-
-const void *Mapper::map_data(std::size_t size) {
- MARISA_THROW_IF(!is_open(), MARISA_STATE_ERROR);
- MARISA_THROW_IF(size > avail_, MARISA_IO_ERROR);
-
- const char * const data = static_cast<const char *>(ptr_);
- ptr_ = data + size;
- avail_ -= size;
- return data;
-}
-
-#if (defined _WIN32) || (defined _WIN64)
- #ifdef __MSVCRT_VERSION__
- #if __MSVCRT_VERSION__ >= 0x0601
- #define MARISA_HAS_STAT64
- #endif // __MSVCRT_VERSION__ >= 0x0601
- #endif // __MSVCRT_VERSION__
-void Mapper::open_(const char *filename) {
- #ifdef MARISA_HAS_STAT64
- struct __stat64 st;
- MARISA_THROW_IF(::_stat64(filename, &st) != 0, MARISA_IO_ERROR);
- #else // MARISA_HAS_STAT64
- struct _stat st;
- MARISA_THROW_IF(::_stat(filename, &st) != 0, MARISA_IO_ERROR);
- #endif // MARISA_HAS_STAT64
- MARISA_THROW_IF((UInt64)st.st_size > MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- size_ = (std::size_t)st.st_size;
-
- file_ = ::CreateFileA(filename, GENERIC_READ, FILE_SHARE_READ,
- NULL, OPEN_EXISTING, FILE_ATTRIBUTE_NORMAL, NULL);
- MARISA_THROW_IF(file_ == INVALID_HANDLE_VALUE, MARISA_IO_ERROR);
-
- map_ = ::CreateFileMapping(file_, NULL, PAGE_READONLY, 0, 0, NULL);
- MARISA_THROW_IF(map_ == NULL, MARISA_IO_ERROR);
-
- origin_ = ::MapViewOfFile(map_, FILE_MAP_READ, 0, 0, 0);
- MARISA_THROW_IF(origin_ == NULL, MARISA_IO_ERROR);
-
- ptr_ = static_cast<const char *>(origin_);
- avail_ = size_;
-}
-#else // (defined _WIN32) || (defined _WIN64)
-void Mapper::open_(const char *filename) {
- struct stat st;
- MARISA_THROW_IF(::stat(filename, &st) != 0, MARISA_IO_ERROR);
- MARISA_THROW_IF((UInt64)st.st_size > MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- size_ = (std::size_t)st.st_size;
-
- fd_ = ::open(filename, O_RDONLY);
- MARISA_THROW_IF(fd_ == -1, MARISA_IO_ERROR);
-
- origin_ = ::mmap(NULL, size_, PROT_READ, MAP_SHARED, fd_, 0);
- MARISA_THROW_IF(origin_ == MAP_FAILED, MARISA_IO_ERROR);
-
- ptr_ = static_cast<const char *>(origin_);
- avail_ = size_;
-}
-#endif // (defined _WIN32) || (defined _WIN64)
-
-void Mapper::open_(const void *ptr, std::size_t size) {
- ptr_ = ptr;
- avail_ = size;
-}
-
-} // namespace io
-} // namespace grimoire
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io/mapper.h b/contrib/python/marisa-trie/marisa/grimoire/io/mapper.h
deleted file mode 100644
index e06072501d..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io/mapper.h
+++ /dev/null
@@ -1,68 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_IO_MAPPER_H_
-#define MARISA_GRIMOIRE_IO_MAPPER_H_
-
-#include <cstdio>
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace io {
-
-class Mapper {
- public:
- Mapper();
- ~Mapper();
-
- void open(const char *filename);
- void open(const void *ptr, std::size_t size);
-
- template <typename T>
- void map(T *obj) {
- MARISA_THROW_IF(obj == NULL, MARISA_NULL_ERROR);
- *obj = *static_cast<const T *>(map_data(sizeof(T)));
- }
-
- template <typename T>
- void map(const T **objs, std::size_t num_objs) {
- MARISA_THROW_IF((objs == NULL) && (num_objs != 0), MARISA_NULL_ERROR);
- MARISA_THROW_IF(num_objs > (MARISA_SIZE_MAX / sizeof(T)),
- MARISA_SIZE_ERROR);
- *objs = static_cast<const T *>(map_data(sizeof(T) * num_objs));
- }
-
- void seek(std::size_t size);
-
- bool is_open() const;
-
- void clear();
- void swap(Mapper &rhs);
-
- private:
- const void *ptr_;
- void *origin_;
- std::size_t avail_;
- std::size_t size_;
-#if (defined _WIN32) || (defined _WIN64)
- void *file_;
- void *map_;
-#else // (defined _WIN32) || (defined _WIN64)
- int fd_;
-#endif // (defined _WIN32) || (defined _WIN64)
-
- void open_(const char *filename);
- void open_(const void *ptr, std::size_t size);
-
- const void *map_data(std::size_t size);
-
- // Disallows copy and assignment.
- Mapper(const Mapper &);
- Mapper &operator=(const Mapper &);
-};
-
-} // namespace io
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_IO_MAPPER_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io/reader.cc b/contrib/python/marisa-trie/marisa/grimoire/io/reader.cc
deleted file mode 100644
index cb22fcbd4a..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io/reader.cc
+++ /dev/null
@@ -1,147 +0,0 @@
-#include <stdio.h>
-
-#ifdef _WIN32
- #include <io.h>
-#else // _WIN32
- #include <unistd.h>
-#endif // _WIN32
-
-#include <limits>
-
-#include "reader.h"
-
-namespace marisa {
-namespace grimoire {
-namespace io {
-
-Reader::Reader()
- : file_(NULL), fd_(-1), stream_(NULL), needs_fclose_(false) {}
-
-Reader::~Reader() {
- if (needs_fclose_) {
- ::fclose(file_);
- }
-}
-
-void Reader::open(const char *filename) {
- MARISA_THROW_IF(filename == NULL, MARISA_NULL_ERROR);
-
- Reader temp;
- temp.open_(filename);
- swap(temp);
-}
-
-void Reader::open(std::FILE *file) {
- MARISA_THROW_IF(file == NULL, MARISA_NULL_ERROR);
-
- Reader temp;
- temp.open_(file);
- swap(temp);
-}
-
-void Reader::open(int fd) {
- MARISA_THROW_IF(fd == -1, MARISA_CODE_ERROR);
-
- Reader temp;
- temp.open_(fd);
- swap(temp);
-}
-
-void Reader::open(std::istream &stream) {
- Reader temp;
- temp.open_(stream);
- swap(temp);
-}
-
-void Reader::clear() {
- Reader().swap(*this);
-}
-
-void Reader::swap(Reader &rhs) {
- marisa::swap(file_, rhs.file_);
- marisa::swap(fd_, rhs.fd_);
- marisa::swap(stream_, rhs.stream_);
- marisa::swap(needs_fclose_, rhs.needs_fclose_);
-}
-
-void Reader::seek(std::size_t size) {
- MARISA_THROW_IF(!is_open(), MARISA_STATE_ERROR);
- if (size == 0) {
- return;
- } else if (size <= 16) {
- char buf[16];
- read_data(buf, size);
- } else {
- char buf[1024];
- while (size != 0) {
- const std::size_t count = (size < sizeof(buf)) ? size : sizeof(buf);
- read_data(buf, count);
- size -= count;
- }
- }
-}
-
-bool Reader::is_open() const {
- return (file_ != NULL) || (fd_ != -1) || (stream_ != NULL);
-}
-
-void Reader::open_(const char *filename) {
- std::FILE *file = NULL;
-#ifdef _MSC_VER
- MARISA_THROW_IF(::fopen_s(&file, filename, "rb") != 0, MARISA_IO_ERROR);
-#else // _MSC_VER
- file = ::fopen(filename, "rb");
- MARISA_THROW_IF(file == NULL, MARISA_IO_ERROR);
-#endif // _MSC_VER
- file_ = file;
- needs_fclose_ = true;
-}
-
-void Reader::open_(std::FILE *file) {
- file_ = file;
-}
-
-void Reader::open_(int fd) {
- fd_ = fd;
-}
-
-void Reader::open_(std::istream &stream) {
- stream_ = &stream;
-}
-
-void Reader::read_data(void *buf, std::size_t size) {
- MARISA_THROW_IF(!is_open(), MARISA_STATE_ERROR);
- if (size == 0) {
- return;
- } else if (fd_ != -1) {
- while (size != 0) {
-#ifdef _WIN32
- static const std::size_t CHUNK_SIZE =
- std::numeric_limits<int>::max();
- const unsigned int count = (size < CHUNK_SIZE) ? size : CHUNK_SIZE;
- const int size_read = ::_read(fd_, buf, count);
-#else // _WIN32
- static const std::size_t CHUNK_SIZE =
- std::numeric_limits< ::ssize_t>::max();
- const ::size_t count = (size < CHUNK_SIZE) ? size : CHUNK_SIZE;
- const ::ssize_t size_read = ::read(fd_, buf, count);
-#endif // _WIN32
- MARISA_THROW_IF(size_read <= 0, MARISA_IO_ERROR);
- buf = static_cast<char *>(buf) + size_read;
- size -= size_read;
- }
- } else if (file_ != NULL) {
- MARISA_THROW_IF(::fread(buf, 1, size, file_) != size, MARISA_IO_ERROR);
- } else if (stream_ != NULL) {
- try {
- MARISA_THROW_IF(!stream_->read(static_cast<char *>(buf), size),
- MARISA_IO_ERROR);
- } catch (const std::ios_base::failure &) {
- MARISA_THROW(MARISA_IO_ERROR, "std::ios_base::failure");
- }
- }
-}
-
-} // namespace io
-} // namespace grimoire
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io/reader.h b/contrib/python/marisa-trie/marisa/grimoire/io/reader.h
deleted file mode 100644
index fc1ba5eea7..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io/reader.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_IO_READER_H_
-#define MARISA_GRIMOIRE_IO_READER_H_
-
-#include <cstdio>
-#include <iostream>
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace io {
-
-class Reader {
- public:
- Reader();
- ~Reader();
-
- void open(const char *filename);
- void open(std::FILE *file);
- void open(int fd);
- void open(std::istream &stream);
-
- template <typename T>
- void read(T *obj) {
- MARISA_THROW_IF(obj == NULL, MARISA_NULL_ERROR);
- read_data(obj, sizeof(T));
- }
-
- template <typename T>
- void read(T *objs, std::size_t num_objs) {
- MARISA_THROW_IF((objs == NULL) && (num_objs != 0), MARISA_NULL_ERROR);
- MARISA_THROW_IF(num_objs > (MARISA_SIZE_MAX / sizeof(T)),
- MARISA_SIZE_ERROR);
- read_data(objs, sizeof(T) * num_objs);
- }
-
- void seek(std::size_t size);
-
- bool is_open() const;
-
- void clear();
- void swap(Reader &rhs);
-
- private:
- std::FILE *file_;
- int fd_;
- std::istream *stream_;
- bool needs_fclose_;
-
- void open_(const char *filename);
- void open_(std::FILE *file);
- void open_(int fd);
- void open_(std::istream &stream);
-
- void read_data(void *buf, std::size_t size);
-
- // Disallows copy and assignment.
- Reader(const Reader &);
- Reader &operator=(const Reader &);
-};
-
-} // namespace io
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_IO_READER_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io/writer.cc b/contrib/python/marisa-trie/marisa/grimoire/io/writer.cc
deleted file mode 100644
index 1f079d8ce6..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io/writer.cc
+++ /dev/null
@@ -1,148 +0,0 @@
-#include <stdio.h>
-
-#ifdef _WIN32
- #include <io.h>
-#else // _WIN32
- #include <unistd.h>
-#endif // _WIN32
-
-#include <limits>
-
-#include "writer.h"
-
-namespace marisa {
-namespace grimoire {
-namespace io {
-
-Writer::Writer()
- : file_(NULL), fd_(-1), stream_(NULL), needs_fclose_(false) {}
-
-Writer::~Writer() {
- if (needs_fclose_) {
- ::fclose(file_);
- }
-}
-
-void Writer::open(const char *filename) {
- MARISA_THROW_IF(filename == NULL, MARISA_NULL_ERROR);
-
- Writer temp;
- temp.open_(filename);
- swap(temp);
-}
-
-void Writer::open(std::FILE *file) {
- MARISA_THROW_IF(file == NULL, MARISA_NULL_ERROR);
-
- Writer temp;
- temp.open_(file);
- swap(temp);
-}
-
-void Writer::open(int fd) {
- MARISA_THROW_IF(fd == -1, MARISA_CODE_ERROR);
-
- Writer temp;
- temp.open_(fd);
- swap(temp);
-}
-
-void Writer::open(std::ostream &stream) {
- Writer temp;
- temp.open_(stream);
- swap(temp);
-}
-
-void Writer::clear() {
- Writer().swap(*this);
-}
-
-void Writer::swap(Writer &rhs) {
- marisa::swap(file_, rhs.file_);
- marisa::swap(fd_, rhs.fd_);
- marisa::swap(stream_, rhs.stream_);
- marisa::swap(needs_fclose_, rhs.needs_fclose_);
-}
-
-void Writer::seek(std::size_t size) {
- MARISA_THROW_IF(!is_open(), MARISA_STATE_ERROR);
- if (size == 0) {
- return;
- } else if (size <= 16) {
- const char buf[16] = {};
- write_data(buf, size);
- } else {
- const char buf[1024] = {};
- do {
- const std::size_t count = (size < sizeof(buf)) ? size : sizeof(buf);
- write_data(buf, count);
- size -= count;
- } while (size != 0);
- }
-}
-
-bool Writer::is_open() const {
- return (file_ != NULL) || (fd_ != -1) || (stream_ != NULL);
-}
-
-void Writer::open_(const char *filename) {
- std::FILE *file = NULL;
-#ifdef _MSC_VER
- MARISA_THROW_IF(::fopen_s(&file, filename, "wb") != 0, MARISA_IO_ERROR);
-#else // _MSC_VER
- file = ::fopen(filename, "wb");
- MARISA_THROW_IF(file == NULL, MARISA_IO_ERROR);
-#endif // _MSC_VER
- file_ = file;
- needs_fclose_ = true;
-}
-
-void Writer::open_(std::FILE *file) {
- file_ = file;
-}
-
-void Writer::open_(int fd) {
- fd_ = fd;
-}
-
-void Writer::open_(std::ostream &stream) {
- stream_ = &stream;
-}
-
-void Writer::write_data(const void *data, std::size_t size) {
- MARISA_THROW_IF(!is_open(), MARISA_STATE_ERROR);
- if (size == 0) {
- return;
- } else if (fd_ != -1) {
- while (size != 0) {
-#ifdef _WIN32
- static const std::size_t CHUNK_SIZE =
- std::numeric_limits<int>::max();
- const unsigned int count = (size < CHUNK_SIZE) ? size : CHUNK_SIZE;
- const int size_written = ::_write(fd_, data, count);
-#else // _WIN32
- static const std::size_t CHUNK_SIZE =
- std::numeric_limits< ::ssize_t>::max();
- const ::size_t count = (size < CHUNK_SIZE) ? size : CHUNK_SIZE;
- const ::ssize_t size_written = ::write(fd_, data, count);
-#endif // _WIN32
- MARISA_THROW_IF(size_written <= 0, MARISA_IO_ERROR);
- data = static_cast<const char *>(data) + size_written;
- size -= size_written;
- }
- } else if (file_ != NULL) {
- MARISA_THROW_IF(::fwrite(data, 1, size, file_) != size, MARISA_IO_ERROR);
- MARISA_THROW_IF(::fflush(file_) != 0, MARISA_IO_ERROR);
- } else if (stream_ != NULL) {
- try {
- MARISA_THROW_IF(!stream_->write(static_cast<const char *>(data), size),
- MARISA_IO_ERROR);
- } catch (const std::ios_base::failure &) {
- MARISA_THROW(MARISA_IO_ERROR, "std::ios_base::failure");
- }
- }
-}
-
-} // namespace io
-} // namespace grimoire
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/grimoire/io/writer.h b/contrib/python/marisa-trie/marisa/grimoire/io/writer.h
deleted file mode 100644
index 1707b23de2..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/io/writer.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_IO_WRITER_H_
-#define MARISA_GRIMOIRE_IO_WRITER_H_
-
-#include <cstdio>
-#include <iostream>
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace io {
-
-class Writer {
- public:
- Writer();
- ~Writer();
-
- void open(const char *filename);
- void open(std::FILE *file);
- void open(int fd);
- void open(std::ostream &stream);
-
- template <typename T>
- void write(const T &obj) {
- write_data(&obj, sizeof(T));
- }
-
- template <typename T>
- void write(const T *objs, std::size_t num_objs) {
- MARISA_THROW_IF((objs == NULL) && (num_objs != 0), MARISA_NULL_ERROR);
- MARISA_THROW_IF(num_objs > (MARISA_SIZE_MAX / sizeof(T)),
- MARISA_SIZE_ERROR);
- write_data(objs, sizeof(T) * num_objs);
- }
-
- void seek(std::size_t size);
-
- bool is_open() const;
-
- void clear();
- void swap(Writer &rhs);
-
- private:
- std::FILE *file_;
- int fd_;
- std::ostream *stream_;
- bool needs_fclose_;
-
- void open_(const char *filename);
- void open_(std::FILE *file);
- void open_(int fd);
- void open_(std::ostream &stream);
-
- void write_data(const void *data, std::size_t size);
-
- // Disallows copy and assignment.
- Writer(const Writer &);
- Writer &operator=(const Writer &);
-};
-
-} // namespace io
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_IO_WRITER_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie.h b/contrib/python/marisa-trie/marisa/grimoire/trie.h
deleted file mode 100644
index d23852a4fd..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie.h
+++ /dev/null
@@ -1,17 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_H_
-#define MARISA_GRIMOIRE_TRIE_H_
-
-#include "trie/state.h"
-#include "trie/louds-trie.h"
-
-namespace marisa {
-namespace grimoire {
-
-using trie::State;
-using trie::LoudsTrie;
-
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/cache.h b/contrib/python/marisa-trie/marisa/grimoire/trie/cache.h
deleted file mode 100644
index f9da360869..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/cache.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_CACHE_H_
-#define MARISA_GRIMOIRE_TRIE_CACHE_H_
-
-#include <cfloat>
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Cache {
- public:
- Cache() : parent_(0), child_(0), union_() {
- union_.weight = FLT_MIN;
- }
- Cache(const Cache &cache)
- : parent_(cache.parent_), child_(cache.child_), union_(cache.union_) {}
-
- Cache &operator=(const Cache &cache) {
- parent_ = cache.parent_;
- child_ = cache.child_;
- union_ = cache.union_;
- return *this;
- }
-
- void set_parent(std::size_t parent) {
- MARISA_DEBUG_IF(parent > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- parent_ = (UInt32)parent;
- }
- void set_child(std::size_t child) {
- MARISA_DEBUG_IF(child > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- child_ = (UInt32)child;
- }
- void set_base(UInt8 base) {
- union_.link = (union_.link & ~0xFFU) | base;
- }
- void set_extra(std::size_t extra) {
- MARISA_DEBUG_IF(extra > (MARISA_UINT32_MAX >> 8), MARISA_SIZE_ERROR);
- union_.link = (UInt32)((union_.link & 0xFFU) | (extra << 8));
- }
- void set_weight(float weight) {
- union_.weight = weight;
- }
-
- std::size_t parent() const {
- return parent_;
- }
- std::size_t child() const {
- return child_;
- }
- UInt8 base() const {
- return (UInt8)(union_.link & 0xFFU);
- }
- std::size_t extra() const {
- return union_.link >> 8;
- }
- char label() const {
- return (char)base();
- }
- std::size_t link() const {
- return union_.link;
- }
- float weight() const {
- return union_.weight;
- }
-
- private:
- UInt32 parent_;
- UInt32 child_;
- union Union {
- UInt32 link;
- float weight;
- } union_;
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_CACHE_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/config.h b/contrib/python/marisa-trie/marisa/grimoire/trie/config.h
deleted file mode 100644
index 9b307de3e1..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/config.h
+++ /dev/null
@@ -1,156 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_CONFIG_H_
-#define MARISA_GRIMOIRE_TRIE_CONFIG_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Config {
- public:
- Config()
- : num_tries_(MARISA_DEFAULT_NUM_TRIES),
- cache_level_(MARISA_DEFAULT_CACHE),
- tail_mode_(MARISA_DEFAULT_TAIL),
- node_order_(MARISA_DEFAULT_ORDER) {}
-
- void parse(int config_flags) {
- Config temp;
- temp.parse_(config_flags);
- swap(temp);
- }
-
- int flags() const {
- return (int)num_tries_ | tail_mode_ | node_order_;
- }
-
- std::size_t num_tries() const {
- return num_tries_;
- }
- CacheLevel cache_level() const {
- return cache_level_;
- }
- TailMode tail_mode() const {
- return tail_mode_;
- }
- NodeOrder node_order() const {
- return node_order_;
- }
-
- void clear() {
- Config().swap(*this);
- }
- void swap(Config &rhs) {
- marisa::swap(num_tries_, rhs.num_tries_);
- marisa::swap(cache_level_, rhs.cache_level_);
- marisa::swap(tail_mode_, rhs.tail_mode_);
- marisa::swap(node_order_, rhs.node_order_);
- }
-
- private:
- std::size_t num_tries_;
- CacheLevel cache_level_;
- TailMode tail_mode_;
- NodeOrder node_order_;
-
- void parse_(int config_flags) {
- MARISA_THROW_IF((config_flags & ~MARISA_CONFIG_MASK) != 0,
- MARISA_CODE_ERROR);
-
- parse_num_tries(config_flags);
- parse_cache_level(config_flags);
- parse_tail_mode(config_flags);
- parse_node_order(config_flags);
- }
-
- void parse_num_tries(int config_flags) {
- const int num_tries = config_flags & MARISA_NUM_TRIES_MASK;
- if (num_tries != 0) {
- num_tries_ = num_tries;
- }
- }
-
- void parse_cache_level(int config_flags) {
- switch (config_flags & MARISA_CACHE_LEVEL_MASK) {
- case 0: {
- cache_level_ = MARISA_DEFAULT_CACHE;
- break;
- }
- case MARISA_HUGE_CACHE: {
- cache_level_ = MARISA_HUGE_CACHE;
- break;
- }
- case MARISA_LARGE_CACHE: {
- cache_level_ = MARISA_LARGE_CACHE;
- break;
- }
- case MARISA_NORMAL_CACHE: {
- cache_level_ = MARISA_NORMAL_CACHE;
- break;
- }
- case MARISA_SMALL_CACHE: {
- cache_level_ = MARISA_SMALL_CACHE;
- break;
- }
- case MARISA_TINY_CACHE: {
- cache_level_ = MARISA_TINY_CACHE;
- break;
- }
- default: {
- MARISA_THROW(MARISA_CODE_ERROR, "undefined cache level");
- }
- }
- }
-
- void parse_tail_mode(int config_flags) {
- switch (config_flags & MARISA_TAIL_MODE_MASK) {
- case 0: {
- tail_mode_ = MARISA_DEFAULT_TAIL;
- break;
- }
- case MARISA_TEXT_TAIL: {
- tail_mode_ = MARISA_TEXT_TAIL;
- break;
- }
- case MARISA_BINARY_TAIL: {
- tail_mode_ = MARISA_BINARY_TAIL;
- break;
- }
- default: {
- MARISA_THROW(MARISA_CODE_ERROR, "undefined tail mode");
- }
- }
- }
-
- void parse_node_order(int config_flags) {
- switch (config_flags & MARISA_NODE_ORDER_MASK) {
- case 0: {
- node_order_ = MARISA_DEFAULT_ORDER;
- break;
- }
- case MARISA_LABEL_ORDER: {
- node_order_ = MARISA_LABEL_ORDER;
- break;
- }
- case MARISA_WEIGHT_ORDER: {
- node_order_ = MARISA_WEIGHT_ORDER;
- break;
- }
- default: {
- MARISA_THROW(MARISA_CODE_ERROR, "undefined node order");
- }
- }
- }
-
- // Disallows copy and assignment.
- Config(const Config &);
- Config &operator=(const Config &);
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_CONFIG_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/entry.h b/contrib/python/marisa-trie/marisa/grimoire/trie/entry.h
deleted file mode 100644
index 834ab95e1e..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/entry.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_ENTRY_H_
-#define MARISA_GRIMOIRE_TRIE_ENTRY_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Entry {
- public:
- Entry()
- : ptr_(reinterpret_cast<const char *>(-1)), length_(0), id_(0) {}
- Entry(const Entry &entry)
- : ptr_(entry.ptr_), length_(entry.length_), id_(entry.id_) {}
-
- Entry &operator=(const Entry &entry) {
- ptr_ = entry.ptr_;
- length_ = entry.length_;
- id_ = entry.id_;
- return *this;
- }
-
- char operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= length_, MARISA_BOUND_ERROR);
- return *(ptr_ - i);
- }
-
- void set_str(const char *ptr, std::size_t length) {
- MARISA_DEBUG_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- MARISA_DEBUG_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- ptr_ = ptr + length - 1;
- length_ = (UInt32)length;
- }
- void set_id(std::size_t id) {
- MARISA_DEBUG_IF(id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- id_ = (UInt32)id;
- }
-
- const char *ptr() const {
- return ptr_ - length_ + 1;
- }
- std::size_t length() const {
- return length_;
- }
- std::size_t id() const {
- return id_;
- }
-
- class StringComparer {
- public:
- bool operator()(const Entry &lhs, const Entry &rhs) const {
- for (std::size_t i = 0; i < lhs.length(); ++i) {
- if (i == rhs.length()) {
- return true;
- }
- if (lhs[i] != rhs[i]) {
- return (UInt8)lhs[i] > (UInt8)rhs[i];
- }
- }
- return lhs.length() > rhs.length();
- }
- };
-
- class IDComparer {
- public:
- bool operator()(const Entry &lhs, const Entry &rhs) const {
- return lhs.id_ < rhs.id_;
- }
- };
-
- private:
- const char *ptr_;
- UInt32 length_;
- UInt32 id_;
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_ENTRY_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/header.h b/contrib/python/marisa-trie/marisa/grimoire/trie/header.h
deleted file mode 100644
index 04839f67e1..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/header.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_HEADER_H_
-#define MARISA_GRIMOIRE_TRIE_HEADER_H_
-
-#include "../io.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Header {
- public:
- enum {
- HEADER_SIZE = 16
- };
-
- Header() {}
-
- void map(Mapper &mapper) {
- const char *ptr;
- mapper.map(&ptr, HEADER_SIZE);
- MARISA_THROW_IF(!test_header(ptr), MARISA_FORMAT_ERROR);
- }
- void read(Reader &reader) {
- char buf[HEADER_SIZE];
- reader.read(buf, HEADER_SIZE);
- MARISA_THROW_IF(!test_header(buf), MARISA_FORMAT_ERROR);
- }
- void write(Writer &writer) const {
- writer.write(get_header(), HEADER_SIZE);
- }
-
- std::size_t io_size() const {
- return HEADER_SIZE;
- }
-
- private:
-
- static const char *get_header() {
- static const char buf[HEADER_SIZE] = "We love Marisa.";
- return buf;
- }
-
- static bool test_header(const char *ptr) {
- for (std::size_t i = 0; i < HEADER_SIZE; ++i) {
- if (ptr[i] != get_header()[i]) {
- return false;
- }
- }
- return true;
- }
-
- // Disallows copy and assignment.
- Header(const Header &);
- Header &operator=(const Header &);
-};
-
-} // namespace trie
-} // namespace marisa
-} // namespace grimoire
-
-#endif // MARISA_GRIMOIRE_TRIE_HEADER_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/history.h b/contrib/python/marisa-trie/marisa/grimoire/trie/history.h
deleted file mode 100644
index 9a3d272260..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/history.h
+++ /dev/null
@@ -1,66 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_STATE_HISTORY_H_
-#define MARISA_GRIMOIRE_TRIE_STATE_HISTORY_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class History {
- public:
- History()
- : node_id_(0), louds_pos_(0), key_pos_(0),
- link_id_(MARISA_INVALID_LINK_ID), key_id_(MARISA_INVALID_KEY_ID) {}
-
- void set_node_id(std::size_t node_id) {
- MARISA_DEBUG_IF(node_id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- node_id_ = (UInt32)node_id;
- }
- void set_louds_pos(std::size_t louds_pos) {
- MARISA_DEBUG_IF(louds_pos > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- louds_pos_ = (UInt32)louds_pos;
- }
- void set_key_pos(std::size_t key_pos) {
- MARISA_DEBUG_IF(key_pos > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- key_pos_ = (UInt32)key_pos;
- }
- void set_link_id(std::size_t link_id) {
- MARISA_DEBUG_IF(link_id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- link_id_ = (UInt32)link_id;
- }
- void set_key_id(std::size_t key_id) {
- MARISA_DEBUG_IF(key_id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- key_id_ = (UInt32)key_id;
- }
-
- std::size_t node_id() const {
- return node_id_;
- }
- std::size_t louds_pos() const {
- return louds_pos_;
- }
- std::size_t key_pos() const {
- return key_pos_;
- }
- std::size_t link_id() const {
- return link_id_;
- }
- std::size_t key_id() const {
- return key_id_;
- }
-
- private:
- UInt32 node_id_;
- UInt32 louds_pos_;
- UInt32 key_pos_;
- UInt32 link_id_;
- UInt32 key_id_;
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_STATE_HISTORY_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/key.h b/contrib/python/marisa-trie/marisa/grimoire/trie/key.h
deleted file mode 100644
index c09ea86cf8..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/key.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_KEY_H_
-#define MARISA_GRIMOIRE_TRIE_KEY_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Key {
- public:
- Key() : ptr_(NULL), length_(0), union_(), id_(0) {
- union_.terminal = 0;
- }
- Key(const Key &entry)
- : ptr_(entry.ptr_), length_(entry.length_),
- union_(entry.union_), id_(entry.id_) {}
-
- Key &operator=(const Key &entry) {
- ptr_ = entry.ptr_;
- length_ = entry.length_;
- union_ = entry.union_;
- id_ = entry.id_;
- return *this;
- }
-
- char operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= length_, MARISA_BOUND_ERROR);
- return ptr_[i];
- }
-
- void substr(std::size_t pos, std::size_t length) {
- MARISA_DEBUG_IF(pos > length_, MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(length > length_, MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(pos > (length_ - length), MARISA_BOUND_ERROR);
- ptr_ += pos;
- length_ = (UInt32)length;
- }
-
- void set_str(const char *ptr, std::size_t length) {
- MARISA_DEBUG_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- MARISA_DEBUG_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- ptr_ = ptr;
- length_ = (UInt32)length;
- }
- void set_weight(float weight) {
- union_.weight = weight;
- }
- void set_terminal(std::size_t terminal) {
- MARISA_DEBUG_IF(terminal > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- union_.terminal = (UInt32)terminal;
- }
- void set_id(std::size_t id) {
- MARISA_DEBUG_IF(id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- id_ = (UInt32)id;
- }
-
- const char *ptr() const {
- return ptr_;
- }
- std::size_t length() const {
- return length_;
- }
- float weight() const {
- return union_.weight;
- }
- std::size_t terminal() const {
- return union_.terminal;
- }
- std::size_t id() const {
- return id_;
- }
-
- private:
- const char *ptr_;
- UInt32 length_;
- union Union {
- float weight;
- UInt32 terminal;
- } union_;
- UInt32 id_;
-};
-
-inline bool operator==(const Key &lhs, const Key &rhs) {
- if (lhs.length() != rhs.length()) {
- return false;
- }
- for (std::size_t i = 0; i < lhs.length(); ++i) {
- if (lhs[i] != rhs[i]) {
- return false;
- }
- }
- return true;
-}
-
-inline bool operator!=(const Key &lhs, const Key &rhs) {
- return !(lhs == rhs);
-}
-
-inline bool operator<(const Key &lhs, const Key &rhs) {
- for (std::size_t i = 0; i < lhs.length(); ++i) {
- if (i == rhs.length()) {
- return false;
- }
- if (lhs[i] != rhs[i]) {
- return (UInt8)lhs[i] < (UInt8)rhs[i];
- }
- }
- return lhs.length() < rhs.length();
-}
-
-inline bool operator>(const Key &lhs, const Key &rhs) {
- return rhs < lhs;
-}
-
-class ReverseKey {
- public:
- ReverseKey() : ptr_(NULL), length_(0), union_(), id_(0) {
- union_.terminal = 0;
- }
- ReverseKey(const ReverseKey &entry)
- : ptr_(entry.ptr_), length_(entry.length_),
- union_(entry.union_), id_(entry.id_) {}
-
- ReverseKey &operator=(const ReverseKey &entry) {
- ptr_ = entry.ptr_;
- length_ = entry.length_;
- union_ = entry.union_;
- id_ = entry.id_;
- return *this;
- }
-
- char operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= length_, MARISA_BOUND_ERROR);
- return *(ptr_ - i - 1);
- }
-
- void substr(std::size_t pos, std::size_t length) {
- MARISA_DEBUG_IF(pos > length_, MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(length > length_, MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(pos > (length_ - length), MARISA_BOUND_ERROR);
- ptr_ -= pos;
- length_ = (UInt32)length;
- }
-
- void set_str(const char *ptr, std::size_t length) {
- MARISA_DEBUG_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- MARISA_DEBUG_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- ptr_ = ptr + length;
- length_ = (UInt32)length;
- }
- void set_weight(float weight) {
- union_.weight = weight;
- }
- void set_terminal(std::size_t terminal) {
- MARISA_DEBUG_IF(terminal > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- union_.terminal = (UInt32)terminal;
- }
- void set_id(std::size_t id) {
- MARISA_DEBUG_IF(id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- id_ = (UInt32)id;
- }
-
- const char *ptr() const {
- return ptr_ - length_;
- }
- std::size_t length() const {
- return length_;
- }
- float weight() const {
- return union_.weight;
- }
- std::size_t terminal() const {
- return union_.terminal;
- }
- std::size_t id() const {
- return id_;
- }
-
- private:
- const char *ptr_;
- UInt32 length_;
- union Union {
- float weight;
- UInt32 terminal;
- } union_;
- UInt32 id_;
-};
-
-inline bool operator==(const ReverseKey &lhs, const ReverseKey &rhs) {
- if (lhs.length() != rhs.length()) {
- return false;
- }
- for (std::size_t i = 0; i < lhs.length(); ++i) {
- if (lhs[i] != rhs[i]) {
- return false;
- }
- }
- return true;
-}
-
-inline bool operator!=(const ReverseKey &lhs, const ReverseKey &rhs) {
- return !(lhs == rhs);
-}
-
-inline bool operator<(const ReverseKey &lhs, const ReverseKey &rhs) {
- for (std::size_t i = 0; i < lhs.length(); ++i) {
- if (i == rhs.length()) {
- return false;
- }
- if (lhs[i] != rhs[i]) {
- return (UInt8)lhs[i] < (UInt8)rhs[i];
- }
- }
- return lhs.length() < rhs.length();
-}
-
-inline bool operator>(const ReverseKey &lhs, const ReverseKey &rhs) {
- return rhs < lhs;
-}
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_KEY_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.cc b/contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.cc
deleted file mode 100644
index ed168539bc..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.cc
+++ /dev/null
@@ -1,877 +0,0 @@
-#include <algorithm>
-#include <functional>
-#include <queue>
-
-#include "../algorithm.h"
-#include "header.h"
-#include "range.h"
-#include "state.h"
-#include "louds-trie.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-LoudsTrie::LoudsTrie()
- : louds_(), terminal_flags_(), link_flags_(), bases_(), extras_(),
- tail_(), next_trie_(), cache_(), cache_mask_(0), num_l1_nodes_(0),
- config_(), mapper_() {}
-
-LoudsTrie::~LoudsTrie() {}
-
-void LoudsTrie::build(Keyset &keyset, int flags) {
- Config config;
- config.parse(flags);
-
- LoudsTrie temp;
- temp.build_(keyset, config);
- swap(temp);
-}
-
-void LoudsTrie::map(Mapper &mapper) {
- Header().map(mapper);
-
- LoudsTrie temp;
- temp.map_(mapper);
- temp.mapper_.swap(mapper);
- swap(temp);
-}
-
-void LoudsTrie::read(Reader &reader) {
- Header().read(reader);
-
- LoudsTrie temp;
- temp.read_(reader);
- swap(temp);
-}
-
-void LoudsTrie::write(Writer &writer) const {
- Header().write(writer);
-
- write_(writer);
-}
-
-bool LoudsTrie::lookup(Agent &agent) const {
- MARISA_DEBUG_IF(!agent.has_state(), MARISA_STATE_ERROR);
-
- State &state = agent.state();
- state.lookup_init();
- while (state.query_pos() < agent.query().length()) {
- if (!find_child(agent)) {
- return false;
- }
- }
- if (!terminal_flags_[state.node_id()]) {
- return false;
- }
- agent.set_key(agent.query().ptr(), agent.query().length());
- agent.set_key(terminal_flags_.rank1(state.node_id()));
- return true;
-}
-
-void LoudsTrie::reverse_lookup(Agent &agent) const {
- MARISA_DEBUG_IF(!agent.has_state(), MARISA_STATE_ERROR);
- MARISA_THROW_IF(agent.query().id() >= size(), MARISA_BOUND_ERROR);
-
- State &state = agent.state();
- state.reverse_lookup_init();
-
- state.set_node_id(terminal_flags_.select1(agent.query().id()));
- if (state.node_id() == 0) {
- agent.set_key(state.key_buf().begin(), state.key_buf().size());
- agent.set_key(agent.query().id());
- return;
- }
- for ( ; ; ) {
- if (link_flags_[state.node_id()]) {
- const std::size_t prev_key_pos = state.key_buf().size();
- restore(agent, get_link(state.node_id()));
- std::reverse(state.key_buf().begin() + prev_key_pos,
- state.key_buf().end());
- } else {
- state.key_buf().push_back((char)bases_[state.node_id()]);
- }
-
- if (state.node_id() <= num_l1_nodes_) {
- std::reverse(state.key_buf().begin(), state.key_buf().end());
- agent.set_key(state.key_buf().begin(), state.key_buf().size());
- agent.set_key(agent.query().id());
- return;
- }
- state.set_node_id(louds_.select1(state.node_id()) - state.node_id() - 1);
- }
-}
-
-bool LoudsTrie::common_prefix_search(Agent &agent) const {
- MARISA_DEBUG_IF(!agent.has_state(), MARISA_STATE_ERROR);
-
- State &state = agent.state();
- if (state.status_code() == MARISA_END_OF_COMMON_PREFIX_SEARCH) {
- return false;
- }
-
- if (state.status_code() != MARISA_READY_TO_COMMON_PREFIX_SEARCH) {
- state.common_prefix_search_init();
- if (terminal_flags_[state.node_id()]) {
- agent.set_key(agent.query().ptr(), state.query_pos());
- agent.set_key(terminal_flags_.rank1(state.node_id()));
- return true;
- }
- }
-
- while (state.query_pos() < agent.query().length()) {
- if (!find_child(agent)) {
- state.set_status_code(MARISA_END_OF_COMMON_PREFIX_SEARCH);
- return false;
- } else if (terminal_flags_[state.node_id()]) {
- agent.set_key(agent.query().ptr(), state.query_pos());
- agent.set_key(terminal_flags_.rank1(state.node_id()));
- return true;
- }
- }
- state.set_status_code(MARISA_END_OF_COMMON_PREFIX_SEARCH);
- return false;
-}
-
-bool LoudsTrie::predictive_search(Agent &agent) const {
- MARISA_DEBUG_IF(!agent.has_state(), MARISA_STATE_ERROR);
-
- State &state = agent.state();
- if (state.status_code() == MARISA_END_OF_PREDICTIVE_SEARCH) {
- return false;
- }
-
- if (state.status_code() != MARISA_READY_TO_PREDICTIVE_SEARCH) {
- state.predictive_search_init();
- while (state.query_pos() < agent.query().length()) {
- if (!predictive_find_child(agent)) {
- state.set_status_code(MARISA_END_OF_PREDICTIVE_SEARCH);
- return false;
- }
- }
-
- History history;
- history.set_node_id(state.node_id());
- history.set_key_pos(state.key_buf().size());
- state.history().push_back(history);
- state.set_history_pos(1);
-
- if (terminal_flags_[state.node_id()]) {
- agent.set_key(state.key_buf().begin(), state.key_buf().size());
- agent.set_key(terminal_flags_.rank1(state.node_id()));
- return true;
- }
- }
-
- for ( ; ; ) {
- if (state.history_pos() == state.history().size()) {
- const History &current = state.history().back();
- History next;
- next.set_louds_pos(louds_.select0(current.node_id()) + 1);
- next.set_node_id(next.louds_pos() - current.node_id() - 1);
- state.history().push_back(next);
- }
-
- History &next = state.history()[state.history_pos()];
- const bool link_flag = louds_[next.louds_pos()];
- next.set_louds_pos(next.louds_pos() + 1);
- if (link_flag) {
- state.set_history_pos(state.history_pos() + 1);
- if (link_flags_[next.node_id()]) {
- next.set_link_id(update_link_id(next.link_id(), next.node_id()));
- restore(agent, get_link(next.node_id(), next.link_id()));
- } else {
- state.key_buf().push_back((char)bases_[next.node_id()]);
- }
- next.set_key_pos(state.key_buf().size());
-
- if (terminal_flags_[next.node_id()]) {
- if (next.key_id() == MARISA_INVALID_KEY_ID) {
- next.set_key_id(terminal_flags_.rank1(next.node_id()));
- } else {
- next.set_key_id(next.key_id() + 1);
- }
- agent.set_key(state.key_buf().begin(), state.key_buf().size());
- agent.set_key(next.key_id());
- return true;
- }
- } else if (state.history_pos() != 1) {
- History &current = state.history()[state.history_pos() - 1];
- current.set_node_id(current.node_id() + 1);
- const History &prev =
- state.history()[state.history_pos() - 2];
- state.key_buf().resize(prev.key_pos());
- state.set_history_pos(state.history_pos() - 1);
- } else {
- state.set_status_code(MARISA_END_OF_PREDICTIVE_SEARCH);
- return false;
- }
- }
-}
-
-std::size_t LoudsTrie::total_size() const {
- return louds_.total_size() + terminal_flags_.total_size()
- + link_flags_.total_size() + bases_.total_size()
- + extras_.total_size() + tail_.total_size()
- + ((next_trie_.get() != NULL) ? next_trie_->total_size() : 0)
- + cache_.total_size();
-}
-
-std::size_t LoudsTrie::io_size() const {
- return Header().io_size() + louds_.io_size()
- + terminal_flags_.io_size() + link_flags_.io_size()
- + bases_.io_size() + extras_.io_size() + tail_.io_size()
- + ((next_trie_.get() != NULL) ?
- (next_trie_->io_size() - Header().io_size()) : 0)
- + cache_.io_size() + (sizeof(UInt32) * 2);
-}
-
-void LoudsTrie::clear() {
- LoudsTrie().swap(*this);
-}
-
-void LoudsTrie::swap(LoudsTrie &rhs) {
- louds_.swap(rhs.louds_);
- terminal_flags_.swap(rhs.terminal_flags_);
- link_flags_.swap(rhs.link_flags_);
- bases_.swap(rhs.bases_);
- extras_.swap(rhs.extras_);
- tail_.swap(rhs.tail_);
- next_trie_.swap(rhs.next_trie_);
- cache_.swap(rhs.cache_);
- marisa::swap(cache_mask_, rhs.cache_mask_);
- marisa::swap(num_l1_nodes_, rhs.num_l1_nodes_);
- config_.swap(rhs.config_);
- mapper_.swap(rhs.mapper_);
-}
-
-void LoudsTrie::build_(Keyset &keyset, const Config &config) {
- Vector<Key> keys;
- keys.resize(keyset.size());
- for (std::size_t i = 0; i < keyset.size(); ++i) {
- keys[i].set_str(keyset[i].ptr(), keyset[i].length());
- keys[i].set_weight(keyset[i].weight());
- }
-
- Vector<UInt32> terminals;
- build_trie(keys, &terminals, config, 1);
-
- typedef std::pair<UInt32, UInt32> TerminalIdPair;
-
- Vector<TerminalIdPair> pairs;
- pairs.resize(terminals.size());
- for (std::size_t i = 0; i < pairs.size(); ++i) {
- pairs[i].first = terminals[i];
- pairs[i].second = (UInt32)i;
- }
- terminals.clear();
- std::sort(pairs.begin(), pairs.end());
-
- std::size_t node_id = 0;
- for (std::size_t i = 0; i < pairs.size(); ++i) {
- while (node_id < pairs[i].first) {
- terminal_flags_.push_back(false);
- ++node_id;
- }
- if (node_id == pairs[i].first) {
- terminal_flags_.push_back(true);
- ++node_id;
- }
- }
- while (node_id < bases_.size()) {
- terminal_flags_.push_back(false);
- ++node_id;
- }
- terminal_flags_.push_back(false);
- terminal_flags_.build(false, true);
-
- for (std::size_t i = 0; i < keyset.size(); ++i) {
- keyset[pairs[i].second].set_id(terminal_flags_.rank1(pairs[i].first));
- }
-}
-
-template <typename T>
-void LoudsTrie::build_trie(Vector<T> &keys,
- Vector<UInt32> *terminals, const Config &config, std::size_t trie_id) {
- build_current_trie(keys, terminals, config, trie_id);
-
- Vector<UInt32> next_terminals;
- if (!keys.empty()) {
- build_next_trie(keys, &next_terminals, config, trie_id);
- }
-
- if (next_trie_.get() != NULL) {
- config_.parse(static_cast<int>((next_trie_->num_tries() + 1)) |
- next_trie_->tail_mode() | next_trie_->node_order());
- } else {
- config_.parse(1 | tail_.mode() | config.node_order() |
- config.cache_level());
- }
-
- link_flags_.build(false, false);
- std::size_t node_id = 0;
- for (std::size_t i = 0; i < next_terminals.size(); ++i) {
- while (!link_flags_[node_id]) {
- ++node_id;
- }
- bases_[node_id] = (UInt8)(next_terminals[i] % 256);
- next_terminals[i] /= 256;
- ++node_id;
- }
- extras_.build(next_terminals);
- fill_cache();
-}
-
-template <typename T>
-void LoudsTrie::build_current_trie(Vector<T> &keys,
- Vector<UInt32> *terminals, const Config &config,
- std::size_t trie_id) try {
- for (std::size_t i = 0; i < keys.size(); ++i) {
- keys[i].set_id(i);
- }
- const std::size_t num_keys = Algorithm().sort(keys.begin(), keys.end());
- reserve_cache(config, trie_id, num_keys);
-
- louds_.push_back(true);
- louds_.push_back(false);
- bases_.push_back('\0');
- link_flags_.push_back(false);
-
- Vector<T> next_keys;
- std::queue<Range> queue;
- Vector<WeightedRange> w_ranges;
-
- queue.push(make_range(0, keys.size(), 0));
- while (!queue.empty()) {
- const std::size_t node_id = link_flags_.size() - queue.size();
-
- Range range = queue.front();
- queue.pop();
-
- while ((range.begin() < range.end()) &&
- (keys[range.begin()].length() == range.key_pos())) {
- keys[range.begin()].set_terminal(node_id);
- range.set_begin(range.begin() + 1);
- }
-
- if (range.begin() == range.end()) {
- louds_.push_back(false);
- continue;
- }
-
- w_ranges.clear();
- double weight = keys[range.begin()].weight();
- for (std::size_t i = range.begin() + 1; i < range.end(); ++i) {
- if (keys[i - 1][range.key_pos()] != keys[i][range.key_pos()]) {
- w_ranges.push_back(make_weighted_range(
- range.begin(), i, range.key_pos(), (float)weight));
- range.set_begin(i);
- weight = 0.0;
- }
- weight += keys[i].weight();
- }
- w_ranges.push_back(make_weighted_range(
- range.begin(), range.end(), range.key_pos(), (float)weight));
- if (config.node_order() == MARISA_WEIGHT_ORDER) {
- std::stable_sort(w_ranges.begin(), w_ranges.end(),
- std::greater<WeightedRange>());
- }
-
- if (node_id == 0) {
- num_l1_nodes_ = w_ranges.size();
- }
-
- for (std::size_t i = 0; i < w_ranges.size(); ++i) {
- WeightedRange &w_range = w_ranges[i];
- std::size_t key_pos = w_range.key_pos() + 1;
- while (key_pos < keys[w_range.begin()].length()) {
- std::size_t j;
- for (j = w_range.begin() + 1; j < w_range.end(); ++j) {
- if (keys[j - 1][key_pos] != keys[j][key_pos]) {
- break;
- }
- }
- if (j < w_range.end()) {
- break;
- }
- ++key_pos;
- }
- cache<T>(node_id, bases_.size(), w_range.weight(),
- keys[w_range.begin()][w_range.key_pos()]);
-
- if (key_pos == w_range.key_pos() + 1) {
- bases_.push_back(keys[w_range.begin()][w_range.key_pos()]);
- link_flags_.push_back(false);
- } else {
- bases_.push_back('\0');
- link_flags_.push_back(true);
- T next_key;
- next_key.set_str(keys[w_range.begin()].ptr(),
- keys[w_range.begin()].length());
- next_key.substr(w_range.key_pos(), key_pos - w_range.key_pos());
- next_key.set_weight(w_range.weight());
- next_keys.push_back(next_key);
- }
- w_range.set_key_pos(key_pos);
- queue.push(w_range.range());
- louds_.push_back(true);
- }
- louds_.push_back(false);
- }
-
- louds_.push_back(false);
- louds_.build(trie_id == 1, true);
- bases_.shrink();
-
- build_terminals(keys, terminals);
- keys.swap(next_keys);
-} catch (const std::bad_alloc &) {
- MARISA_THROW(MARISA_MEMORY_ERROR, "std::bad_alloc");
-}
-
-template <>
-void LoudsTrie::build_next_trie(Vector<Key> &keys,
- Vector<UInt32> *terminals, const Config &config, std::size_t trie_id) {
- if (trie_id == config.num_tries()) {
- Vector<Entry> entries;
- entries.resize(keys.size());
- for (std::size_t i = 0; i < keys.size(); ++i) {
- entries[i].set_str(keys[i].ptr(), keys[i].length());
- }
- tail_.build(entries, terminals, config.tail_mode());
- return;
- }
- Vector<ReverseKey> reverse_keys;
- reverse_keys.resize(keys.size());
- for (std::size_t i = 0; i < keys.size(); ++i) {
- reverse_keys[i].set_str(keys[i].ptr(), keys[i].length());
- reverse_keys[i].set_weight(keys[i].weight());
- }
- keys.clear();
- next_trie_.reset(new (std::nothrow) LoudsTrie);
- MARISA_THROW_IF(next_trie_.get() == NULL, MARISA_MEMORY_ERROR);
- next_trie_->build_trie(reverse_keys, terminals, config, trie_id + 1);
-}
-
-template <>
-void LoudsTrie::build_next_trie(Vector<ReverseKey> &keys,
- Vector<UInt32> *terminals, const Config &config, std::size_t trie_id) {
- if (trie_id == config.num_tries()) {
- Vector<Entry> entries;
- entries.resize(keys.size());
- for (std::size_t i = 0; i < keys.size(); ++i) {
- entries[i].set_str(keys[i].ptr(), keys[i].length());
- }
- tail_.build(entries, terminals, config.tail_mode());
- return;
- }
- next_trie_.reset(new (std::nothrow) LoudsTrie);
- MARISA_THROW_IF(next_trie_.get() == NULL, MARISA_MEMORY_ERROR);
- next_trie_->build_trie(keys, terminals, config, trie_id + 1);
-}
-
-template <typename T>
-void LoudsTrie::build_terminals(const Vector<T> &keys,
- Vector<UInt32> *terminals) const {
- Vector<UInt32> temp;
- temp.resize(keys.size());
- for (std::size_t i = 0; i < keys.size(); ++i) {
- temp[keys[i].id()] = (UInt32)keys[i].terminal();
- }
- terminals->swap(temp);
-}
-
-template <>
-void LoudsTrie::cache<Key>(std::size_t parent, std::size_t child,
- float weight, char label) {
- MARISA_DEBUG_IF(parent >= child, MARISA_RANGE_ERROR);
-
- const std::size_t cache_id = get_cache_id(parent, label);
- if (weight > cache_[cache_id].weight()) {
- cache_[cache_id].set_parent(parent);
- cache_[cache_id].set_child(child);
- cache_[cache_id].set_weight(weight);
- }
-}
-
-void LoudsTrie::reserve_cache(const Config &config, std::size_t trie_id,
- std::size_t num_keys) {
- std::size_t cache_size = (trie_id == 1) ? 256 : 1;
- while (cache_size < (num_keys / config.cache_level())) {
- cache_size *= 2;
- }
- cache_.resize(cache_size);
- cache_mask_ = cache_size - 1;
-}
-
-template <>
-void LoudsTrie::cache<ReverseKey>(std::size_t parent, std::size_t child,
- float weight, char) {
- MARISA_DEBUG_IF(parent >= child, MARISA_RANGE_ERROR);
-
- const std::size_t cache_id = get_cache_id(child);
- if (weight > cache_[cache_id].weight()) {
- cache_[cache_id].set_parent(parent);
- cache_[cache_id].set_child(child);
- cache_[cache_id].set_weight(weight);
- }
-}
-
-void LoudsTrie::fill_cache() {
- for (std::size_t i = 0; i < cache_.size(); ++i) {
- const std::size_t node_id = cache_[i].child();
- if (node_id != 0) {
- cache_[i].set_base(bases_[node_id]);
- cache_[i].set_extra(!link_flags_[node_id] ?
- MARISA_INVALID_EXTRA : extras_[link_flags_.rank1(node_id)]);
- } else {
- cache_[i].set_parent(MARISA_UINT32_MAX);
- cache_[i].set_child(MARISA_UINT32_MAX);
- }
- }
-}
-
-void LoudsTrie::map_(Mapper &mapper) {
- louds_.map(mapper);
- terminal_flags_.map(mapper);
- link_flags_.map(mapper);
- bases_.map(mapper);
- extras_.map(mapper);
- tail_.map(mapper);
- if ((link_flags_.num_1s() != 0) && tail_.empty()) {
- next_trie_.reset(new (std::nothrow) LoudsTrie);
- MARISA_THROW_IF(next_trie_.get() == NULL, MARISA_MEMORY_ERROR);
- next_trie_->map_(mapper);
- }
- cache_.map(mapper);
- cache_mask_ = cache_.size() - 1;
- {
- UInt32 temp_num_l1_nodes;
- mapper.map(&temp_num_l1_nodes);
- num_l1_nodes_ = temp_num_l1_nodes;
- }
- {
- UInt32 temp_config_flags;
- mapper.map(&temp_config_flags);
- config_.parse((int)temp_config_flags);
- }
-}
-
-void LoudsTrie::read_(Reader &reader) {
- louds_.read(reader);
- terminal_flags_.read(reader);
- link_flags_.read(reader);
- bases_.read(reader);
- extras_.read(reader);
- tail_.read(reader);
- if ((link_flags_.num_1s() != 0) && tail_.empty()) {
- next_trie_.reset(new (std::nothrow) LoudsTrie);
- MARISA_THROW_IF(next_trie_.get() == NULL, MARISA_MEMORY_ERROR);
- next_trie_->read_(reader);
- }
- cache_.read(reader);
- cache_mask_ = cache_.size() - 1;
- {
- UInt32 temp_num_l1_nodes;
- reader.read(&temp_num_l1_nodes);
- num_l1_nodes_ = temp_num_l1_nodes;
- }
- {
- UInt32 temp_config_flags;
- reader.read(&temp_config_flags);
- config_.parse((int)temp_config_flags);
- }
-}
-
-void LoudsTrie::write_(Writer &writer) const {
- louds_.write(writer);
- terminal_flags_.write(writer);
- link_flags_.write(writer);
- bases_.write(writer);
- extras_.write(writer);
- tail_.write(writer);
- if (next_trie_.get() != NULL) {
- next_trie_->write_(writer);
- }
- cache_.write(writer);
- writer.write((UInt32)num_l1_nodes_);
- writer.write((UInt32)config_.flags());
-}
-
-bool LoudsTrie::find_child(Agent &agent) const {
- MARISA_DEBUG_IF(agent.state().query_pos() >= agent.query().length(),
- MARISA_BOUND_ERROR);
-
- State &state = agent.state();
- const std::size_t cache_id = get_cache_id(state.node_id(),
- agent.query()[state.query_pos()]);
- if (state.node_id() == cache_[cache_id].parent()) {
- if (cache_[cache_id].extra() != MARISA_INVALID_EXTRA) {
- if (!match(agent, cache_[cache_id].link())) {
- return false;
- }
- } else {
- state.set_query_pos(state.query_pos() + 1);
- }
- state.set_node_id(cache_[cache_id].child());
- return true;
- }
-
- std::size_t louds_pos = louds_.select0(state.node_id()) + 1;
- if (!louds_[louds_pos]) {
- return false;
- }
- state.set_node_id(louds_pos - state.node_id() - 1);
- std::size_t link_id = MARISA_INVALID_LINK_ID;
- do {
- if (link_flags_[state.node_id()]) {
- link_id = update_link_id(link_id, state.node_id());
- const std::size_t prev_query_pos = state.query_pos();
- if (match(agent, get_link(state.node_id(), link_id))) {
- return true;
- } else if (state.query_pos() != prev_query_pos) {
- return false;
- }
- } else if (bases_[state.node_id()] ==
- (UInt8)agent.query()[state.query_pos()]) {
- state.set_query_pos(state.query_pos() + 1);
- return true;
- }
- state.set_node_id(state.node_id() + 1);
- ++louds_pos;
- } while (louds_[louds_pos]);
- return false;
-}
-
-bool LoudsTrie::predictive_find_child(Agent &agent) const {
- MARISA_DEBUG_IF(agent.state().query_pos() >= agent.query().length(),
- MARISA_BOUND_ERROR);
-
- State &state = agent.state();
- const std::size_t cache_id = get_cache_id(state.node_id(),
- agent.query()[state.query_pos()]);
- if (state.node_id() == cache_[cache_id].parent()) {
- if (cache_[cache_id].extra() != MARISA_INVALID_EXTRA) {
- if (!prefix_match(agent, cache_[cache_id].link())) {
- return false;
- }
- } else {
- state.key_buf().push_back(cache_[cache_id].label());
- state.set_query_pos(state.query_pos() + 1);
- }
- state.set_node_id(cache_[cache_id].child());
- return true;
- }
-
- std::size_t louds_pos = louds_.select0(state.node_id()) + 1;
- if (!louds_[louds_pos]) {
- return false;
- }
- state.set_node_id(louds_pos - state.node_id() - 1);
- std::size_t link_id = MARISA_INVALID_LINK_ID;
- do {
- if (link_flags_[state.node_id()]) {
- link_id = update_link_id(link_id, state.node_id());
- const std::size_t prev_query_pos = state.query_pos();
- if (prefix_match(agent, get_link(state.node_id(), link_id))) {
- return true;
- } else if (state.query_pos() != prev_query_pos) {
- return false;
- }
- } else if (bases_[state.node_id()] ==
- (UInt8)agent.query()[state.query_pos()]) {
- state.key_buf().push_back((char)bases_[state.node_id()]);
- state.set_query_pos(state.query_pos() + 1);
- return true;
- }
- state.set_node_id(state.node_id() + 1);
- ++louds_pos;
- } while (louds_[louds_pos]);
- return false;
-}
-
-void LoudsTrie::restore(Agent &agent, std::size_t link) const {
- if (next_trie_.get() != NULL) {
- next_trie_->restore_(agent, link);
- } else {
- tail_.restore(agent, link);
- }
-}
-
-bool LoudsTrie::match(Agent &agent, std::size_t link) const {
- if (next_trie_.get() != NULL) {
- return next_trie_->match_(agent, link);
- } else {
- return tail_.match(agent, link);
- }
-}
-
-bool LoudsTrie::prefix_match(Agent &agent, std::size_t link) const {
- if (next_trie_.get() != NULL) {
- return next_trie_->prefix_match_(agent, link);
- } else {
- return tail_.prefix_match(agent, link);
- }
-}
-
-void LoudsTrie::restore_(Agent &agent, std::size_t node_id) const {
- MARISA_DEBUG_IF(node_id == 0, MARISA_RANGE_ERROR);
-
- State &state = agent.state();
- for ( ; ; ) {
- const std::size_t cache_id = get_cache_id(node_id);
- if (node_id == cache_[cache_id].child()) {
- if (cache_[cache_id].extra() != MARISA_INVALID_EXTRA) {
- restore(agent, cache_[cache_id].link());
- } else {
- state.key_buf().push_back(cache_[cache_id].label());
- }
-
- node_id = cache_[cache_id].parent();
- if (node_id == 0) {
- return;
- }
- continue;
- }
-
- if (link_flags_[node_id]) {
- restore(agent, get_link(node_id));
- } else {
- state.key_buf().push_back((char)bases_[node_id]);
- }
-
- if (node_id <= num_l1_nodes_) {
- return;
- }
- node_id = louds_.select1(node_id) - node_id - 1;
- }
-}
-
-bool LoudsTrie::match_(Agent &agent, std::size_t node_id) const {
- MARISA_DEBUG_IF(agent.state().query_pos() >= agent.query().length(),
- MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(node_id == 0, MARISA_RANGE_ERROR);
-
- State &state = agent.state();
- for ( ; ; ) {
- const std::size_t cache_id = get_cache_id(node_id);
- if (node_id == cache_[cache_id].child()) {
- if (cache_[cache_id].extra() != MARISA_INVALID_EXTRA) {
- if (!match(agent, cache_[cache_id].link())) {
- return false;
- }
- } else if (cache_[cache_id].label() ==
- agent.query()[state.query_pos()]) {
- state.set_query_pos(state.query_pos() + 1);
- } else {
- return false;
- }
-
- node_id = cache_[cache_id].parent();
- if (node_id == 0) {
- return true;
- } else if (state.query_pos() >= agent.query().length()) {
- return false;
- }
- continue;
- }
-
- if (link_flags_[node_id]) {
- if (next_trie_.get() != NULL) {
- if (!match(agent, get_link(node_id))) {
- return false;
- }
- } else if (!tail_.match(agent, get_link(node_id))) {
- return false;
- }
- } else if (bases_[node_id] == (UInt8)agent.query()[state.query_pos()]) {
- state.set_query_pos(state.query_pos() + 1);
- } else {
- return false;
- }
-
- if (node_id <= num_l1_nodes_) {
- return true;
- } else if (state.query_pos() >= agent.query().length()) {
- return false;
- }
- node_id = louds_.select1(node_id) - node_id - 1;
- }
-}
-
-bool LoudsTrie::prefix_match_(Agent &agent, std::size_t node_id) const {
- MARISA_DEBUG_IF(agent.state().query_pos() >= agent.query().length(),
- MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(node_id == 0, MARISA_RANGE_ERROR);
-
- State &state = agent.state();
- for ( ; ; ) {
- const std::size_t cache_id = get_cache_id(node_id);
- if (node_id == cache_[cache_id].child()) {
- if (cache_[cache_id].extra() != MARISA_INVALID_EXTRA) {
- if (!prefix_match(agent, cache_[cache_id].link())) {
- return false;
- }
- } else if (cache_[cache_id].label() ==
- agent.query()[state.query_pos()]) {
- state.key_buf().push_back(cache_[cache_id].label());
- state.set_query_pos(state.query_pos() + 1);
- } else {
- return false;
- }
-
- node_id = cache_[cache_id].parent();
- if (node_id == 0) {
- return true;
- }
- } else {
- if (link_flags_[node_id]) {
- if (!prefix_match(agent, get_link(node_id))) {
- return false;
- }
- } else if (bases_[node_id] == (UInt8)agent.query()[state.query_pos()]) {
- state.key_buf().push_back((char)bases_[node_id]);
- state.set_query_pos(state.query_pos() + 1);
- } else {
- return false;
- }
-
- if (node_id <= num_l1_nodes_) {
- return true;
- }
- node_id = louds_.select1(node_id) - node_id - 1;
- }
-
- if (state.query_pos() >= agent.query().length()) {
- restore_(agent, node_id);
- return true;
- }
- }
-}
-
-std::size_t LoudsTrie::get_cache_id(std::size_t node_id, char label) const {
- return (node_id ^ (node_id << 5) ^ (UInt8)label) & cache_mask_;
-}
-
-std::size_t LoudsTrie::get_cache_id(std::size_t node_id) const {
- return node_id & cache_mask_;
-}
-
-std::size_t LoudsTrie::get_link(std::size_t node_id) const {
- return bases_[node_id] | (extras_[link_flags_.rank1(node_id)] * 256);
-}
-
-std::size_t LoudsTrie::get_link(std::size_t node_id,
- std::size_t link_id) const {
- return bases_[node_id] | (extras_[link_id] * 256);
-}
-
-std::size_t LoudsTrie::update_link_id(std::size_t link_id,
- std::size_t node_id) const {
- return (link_id == MARISA_INVALID_LINK_ID) ?
- link_flags_.rank1(node_id) : (link_id + 1);
-}
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.h b/contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.h
deleted file mode 100644
index 5a757ac8fc..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/louds-trie.h
+++ /dev/null
@@ -1,135 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_LOUDS_TRIE_H_
-#define MARISA_GRIMOIRE_TRIE_LOUDS_TRIE_H_
-
-#include "../../keyset.h"
-#include "../../agent.h"
-#include "../vector.h"
-#include "config.h"
-#include "key.h"
-#include "tail.h"
-#include "cache.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class LoudsTrie {
- public:
- LoudsTrie();
- ~LoudsTrie();
-
- void build(Keyset &keyset, int flags);
-
- void map(Mapper &mapper);
- void read(Reader &reader);
- void write(Writer &writer) const;
-
- bool lookup(Agent &agent) const;
- void reverse_lookup(Agent &agent) const;
- bool common_prefix_search(Agent &agent) const;
- bool predictive_search(Agent &agent) const;
-
- std::size_t num_tries() const {
- return config_.num_tries();
- }
- std::size_t num_keys() const {
- return size();
- }
- std::size_t num_nodes() const {
- return (louds_.size() / 2) - 1;
- }
-
- CacheLevel cache_level() const {
- return config_.cache_level();
- }
- TailMode tail_mode() const {
- return config_.tail_mode();
- }
- NodeOrder node_order() const {
- return config_.node_order();
- }
-
- bool empty() const {
- return size() == 0;
- }
- std::size_t size() const {
- return terminal_flags_.num_1s();
- }
- std::size_t total_size() const;
- std::size_t io_size() const;
-
- void clear();
- void swap(LoudsTrie &rhs);
-
- private:
- BitVector louds_;
- BitVector terminal_flags_;
- BitVector link_flags_;
- Vector<UInt8> bases_;
- FlatVector extras_;
- Tail tail_;
- scoped_ptr<LoudsTrie> next_trie_;
- Vector<Cache> cache_;
- std::size_t cache_mask_;
- std::size_t num_l1_nodes_;
- Config config_;
- Mapper mapper_;
-
- void build_(Keyset &keyset, const Config &config);
-
- template <typename T>
- void build_trie(Vector<T> &keys,
- Vector<UInt32> *terminals, const Config &config, std::size_t trie_id);
- template <typename T>
- void build_current_trie(Vector<T> &keys,
- Vector<UInt32> *terminals, const Config &config, std::size_t trie_id);
- template <typename T>
- void build_next_trie(Vector<T> &keys,
- Vector<UInt32> *terminals, const Config &config, std::size_t trie_id);
- template <typename T>
- void build_terminals(const Vector<T> &keys,
- Vector<UInt32> *terminals) const;
-
- void reserve_cache(const Config &config, std::size_t trie_id,
- std::size_t num_keys);
- template <typename T>
- void cache(std::size_t parent, std::size_t child,
- float weight, char label);
- void fill_cache();
-
- void map_(Mapper &mapper);
- void read_(Reader &reader);
- void write_(Writer &writer) const;
-
- inline bool find_child(Agent &agent) const;
- inline bool predictive_find_child(Agent &agent) const;
-
- inline void restore(Agent &agent, std::size_t node_id) const;
- inline bool match(Agent &agent, std::size_t node_id) const;
- inline bool prefix_match(Agent &agent, std::size_t node_id) const;
-
- void restore_(Agent &agent, std::size_t node_id) const;
- bool match_(Agent &agent, std::size_t node_id) const;
- bool prefix_match_(Agent &agent, std::size_t node_id) const;
-
- inline std::size_t get_cache_id(std::size_t node_id, char label) const;
- inline std::size_t get_cache_id(std::size_t node_id) const;
-
- inline std::size_t get_link(std::size_t node_id) const;
- inline std::size_t get_link(std::size_t node_id,
- std::size_t link_id) const;
-
- inline std::size_t update_link_id(std::size_t link_id,
- std::size_t node_id) const;
-
- // Disallows copy and assignment.
- LoudsTrie(const LoudsTrie &);
- LoudsTrie &operator=(const LoudsTrie &);
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_LOUDS_TRIE_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/range.h b/contrib/python/marisa-trie/marisa/grimoire/trie/range.h
deleted file mode 100644
index 4ca39a9c37..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/range.h
+++ /dev/null
@@ -1,116 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_RANGE_H_
-#define MARISA_GRIMOIRE_TRIE_RANGE_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Range {
- public:
- Range() : begin_(0), end_(0), key_pos_(0) {}
-
- void set_begin(std::size_t begin) {
- MARISA_DEBUG_IF(begin > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- begin_ = static_cast<UInt32>(begin);
- }
- void set_end(std::size_t end) {
- MARISA_DEBUG_IF(end > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- end_ = static_cast<UInt32>(end);
- }
- void set_key_pos(std::size_t key_pos) {
- MARISA_DEBUG_IF(key_pos > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- key_pos_ = static_cast<UInt32>(key_pos);
- }
-
- std::size_t begin() const {
- return begin_;
- }
- std::size_t end() const {
- return end_;
- }
- std::size_t key_pos() const {
- return key_pos_;
- }
-
- private:
- UInt32 begin_;
- UInt32 end_;
- UInt32 key_pos_;
-};
-
-inline Range make_range(std::size_t begin, std::size_t end,
- std::size_t key_pos) {
- Range range;
- range.set_begin(begin);
- range.set_end(end);
- range.set_key_pos(key_pos);
- return range;
-}
-
-class WeightedRange {
- public:
- WeightedRange() : range_(), weight_(0.0F) {}
-
- void set_range(const Range &range) {
- range_ = range;
- }
- void set_begin(std::size_t begin) {
- range_.set_begin(begin);
- }
- void set_end(std::size_t end) {
- range_.set_end(end);
- }
- void set_key_pos(std::size_t key_pos) {
- range_.set_key_pos(key_pos);
- }
- void set_weight(float weight) {
- weight_ = weight;
- }
-
- const Range &range() const {
- return range_;
- }
- std::size_t begin() const {
- return range_.begin();
- }
- std::size_t end() const {
- return range_.end();
- }
- std::size_t key_pos() const {
- return range_.key_pos();
- }
- float weight() const {
- return weight_;
- }
-
- private:
- Range range_;
- float weight_;
-};
-
-inline bool operator<(const WeightedRange &lhs, const WeightedRange &rhs) {
- return lhs.weight() < rhs.weight();
-}
-
-inline bool operator>(const WeightedRange &lhs, const WeightedRange &rhs) {
- return lhs.weight() > rhs.weight();
-}
-
-inline WeightedRange make_weighted_range(std::size_t begin, std::size_t end,
- std::size_t key_pos, float weight) {
- WeightedRange range;
- range.set_begin(begin);
- range.set_end(end);
- range.set_key_pos(key_pos);
- range.set_weight(weight);
- return range;
-}
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_RANGE_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/state.h b/contrib/python/marisa-trie/marisa/grimoire/trie/state.h
deleted file mode 100644
index 219bf9e03a..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/state.h
+++ /dev/null
@@ -1,118 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_STATE_H_
-#define MARISA_GRIMOIRE_TRIE_STATE_H_
-
-#include "../vector.h"
-#include "history.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-// A search agent has its internal state and the status codes are defined
-// below.
-typedef enum StatusCode {
- MARISA_READY_TO_ALL,
- MARISA_READY_TO_COMMON_PREFIX_SEARCH,
- MARISA_READY_TO_PREDICTIVE_SEARCH,
- MARISA_END_OF_COMMON_PREFIX_SEARCH,
- MARISA_END_OF_PREDICTIVE_SEARCH,
-} StatusCode;
-
-class State {
- public:
- State()
- : key_buf_(), history_(), node_id_(0), query_pos_(0),
- history_pos_(0), status_code_(MARISA_READY_TO_ALL) {}
-
- void set_node_id(std::size_t node_id) {
- MARISA_DEBUG_IF(node_id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- node_id_ = (UInt32)node_id;
- }
- void set_query_pos(std::size_t query_pos) {
- MARISA_DEBUG_IF(query_pos > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- query_pos_ = (UInt32)query_pos;
- }
- void set_history_pos(std::size_t history_pos) {
- MARISA_DEBUG_IF(history_pos > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- history_pos_ = (UInt32)history_pos;
- }
- void set_status_code(StatusCode status_code) {
- status_code_ = status_code;
- }
-
- std::size_t node_id() const {
- return node_id_;
- }
- std::size_t query_pos() const {
- return query_pos_;
- }
- std::size_t history_pos() const {
- return history_pos_;
- }
- StatusCode status_code() const {
- return status_code_;
- }
-
- const Vector<char> &key_buf() const {
- return key_buf_;
- }
- const Vector<History> &history() const {
- return history_;
- }
-
- Vector<char> &key_buf() {
- return key_buf_;
- }
- Vector<History> &history() {
- return history_;
- }
-
- void reset() {
- status_code_ = MARISA_READY_TO_ALL;
- }
-
- void lookup_init() {
- node_id_ = 0;
- query_pos_ = 0;
- status_code_ = MARISA_READY_TO_ALL;
- }
- void reverse_lookup_init() {
- key_buf_.resize(0);
- key_buf_.reserve(32);
- status_code_ = MARISA_READY_TO_ALL;
- }
- void common_prefix_search_init() {
- node_id_ = 0;
- query_pos_ = 0;
- status_code_ = MARISA_READY_TO_COMMON_PREFIX_SEARCH;
- }
- void predictive_search_init() {
- key_buf_.resize(0);
- key_buf_.reserve(64);
- history_.resize(0);
- history_.reserve(4);
- node_id_ = 0;
- query_pos_ = 0;
- history_pos_ = 0;
- status_code_ = MARISA_READY_TO_PREDICTIVE_SEARCH;
- }
-
- private:
- Vector<char> key_buf_;
- Vector<History> history_;
- UInt32 node_id_;
- UInt32 query_pos_;
- UInt32 history_pos_;
- StatusCode status_code_;
-
- // Disallows copy and assignment.
- State(const State &);
- State &operator=(const State &);
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_STATE_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/tail.cc b/contrib/python/marisa-trie/marisa/grimoire/trie/tail.cc
deleted file mode 100644
index 6ec3652e1c..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/tail.cc
+++ /dev/null
@@ -1,218 +0,0 @@
-#include "../algorithm.h"
-#include "state.h"
-#include "tail.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-Tail::Tail() : buf_(), end_flags_() {}
-
-void Tail::build(Vector<Entry> &entries, Vector<UInt32> *offsets,
- TailMode mode) {
- MARISA_THROW_IF(offsets == NULL, MARISA_NULL_ERROR);
-
- switch (mode) {
- case MARISA_TEXT_TAIL: {
- for (std::size_t i = 0; i < entries.size(); ++i) {
- const char * const ptr = entries[i].ptr();
- const std::size_t length = entries[i].length();
- for (std::size_t j = 0; j < length; ++j) {
- if (ptr[j] == '\0') {
- mode = MARISA_BINARY_TAIL;
- break;
- }
- }
- if (mode == MARISA_BINARY_TAIL) {
- break;
- }
- }
- break;
- }
- case MARISA_BINARY_TAIL: {
- break;
- }
- default: {
- MARISA_THROW(MARISA_CODE_ERROR, "undefined tail mode");
- }
- }
-
- Tail temp;
- temp.build_(entries, offsets, mode);
- swap(temp);
-}
-
-void Tail::map(Mapper &mapper) {
- Tail temp;
- temp.map_(mapper);
- swap(temp);
-}
-
-void Tail::read(Reader &reader) {
- Tail temp;
- temp.read_(reader);
- swap(temp);
-}
-
-void Tail::write(Writer &writer) const {
- write_(writer);
-}
-
-void Tail::restore(Agent &agent, std::size_t offset) const {
- MARISA_DEBUG_IF(buf_.empty(), MARISA_STATE_ERROR);
-
- State &state = agent.state();
- if (end_flags_.empty()) {
- for (const char *ptr = &buf_[offset]; *ptr != '\0'; ++ptr) {
- state.key_buf().push_back(*ptr);
- }
- } else {
- do {
- state.key_buf().push_back(buf_[offset]);
- } while (!end_flags_[offset++]);
- }
-}
-
-bool Tail::match(Agent &agent, std::size_t offset) const {
- MARISA_DEBUG_IF(buf_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(agent.state().query_pos() >= agent.query().length(),
- MARISA_BOUND_ERROR);
-
- State &state = agent.state();
- if (end_flags_.empty()) {
- const char * const ptr = &buf_[offset] - state.query_pos();
- do {
- if (ptr[state.query_pos()] != agent.query()[state.query_pos()]) {
- return false;
- }
- state.set_query_pos(state.query_pos() + 1);
- if (ptr[state.query_pos()] == '\0') {
- return true;
- }
- } while (state.query_pos() < agent.query().length());
- return false;
- } else {
- do {
- if (buf_[offset] != agent.query()[state.query_pos()]) {
- return false;
- }
- state.set_query_pos(state.query_pos() + 1);
- if (end_flags_[offset++]) {
- return true;
- }
- } while (state.query_pos() < agent.query().length());
- return false;
- }
-}
-
-bool Tail::prefix_match(Agent &agent, std::size_t offset) const {
- MARISA_DEBUG_IF(buf_.empty(), MARISA_STATE_ERROR);
-
- State &state = agent.state();
- if (end_flags_.empty()) {
- const char *ptr = &buf_[offset] - state.query_pos();
- do {
- if (ptr[state.query_pos()] != agent.query()[state.query_pos()]) {
- return false;
- }
- state.key_buf().push_back(ptr[state.query_pos()]);
- state.set_query_pos(state.query_pos() + 1);
- if (ptr[state.query_pos()] == '\0') {
- return true;
- }
- } while (state.query_pos() < agent.query().length());
- ptr += state.query_pos();
- do {
- state.key_buf().push_back(*ptr);
- } while (*++ptr != '\0');
- return true;
- } else {
- do {
- if (buf_[offset] != agent.query()[state.query_pos()]) {
- return false;
- }
- state.key_buf().push_back(buf_[offset]);
- state.set_query_pos(state.query_pos() + 1);
- if (end_flags_[offset++]) {
- return true;
- }
- } while (state.query_pos() < agent.query().length());
- do {
- state.key_buf().push_back(buf_[offset]);
- } while (!end_flags_[offset++]);
- return true;
- }
-}
-
-void Tail::clear() {
- Tail().swap(*this);
-}
-
-void Tail::swap(Tail &rhs) {
- buf_.swap(rhs.buf_);
- end_flags_.swap(rhs.end_flags_);
-}
-
-void Tail::build_(Vector<Entry> &entries, Vector<UInt32> *offsets,
- TailMode mode) {
- for (std::size_t i = 0; i < entries.size(); ++i) {
- entries[i].set_id(i);
- }
- Algorithm().sort(entries.begin(), entries.end());
-
- Vector<UInt32> temp_offsets;
- temp_offsets.resize(entries.size(), 0);
-
- const Entry dummy;
- const Entry *last = &dummy;
- for (std::size_t i = entries.size(); i > 0; --i) {
- const Entry &current = entries[i - 1];
- MARISA_THROW_IF(current.length() == 0, MARISA_RANGE_ERROR);
- std::size_t match = 0;
- while ((match < current.length()) && (match < last->length()) &&
- ((*last)[match] == current[match])) {
- ++match;
- }
- if ((match == current.length()) && (last->length() != 0)) {
- temp_offsets[current.id()] = (UInt32)(
- temp_offsets[last->id()] + (last->length() - match));
- } else {
- temp_offsets[current.id()] = (UInt32)buf_.size();
- for (std::size_t j = 1; j <= current.length(); ++j) {
- buf_.push_back(current[current.length() - j]);
- }
- if (mode == MARISA_TEXT_TAIL) {
- buf_.push_back('\0');
- } else {
- for (std::size_t j = 1; j < current.length(); ++j) {
- end_flags_.push_back(false);
- }
- end_flags_.push_back(true);
- }
- MARISA_THROW_IF(buf_.size() > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- }
- last = &current;
- }
- buf_.shrink();
-
- offsets->swap(temp_offsets);
-}
-
-void Tail::map_(Mapper &mapper) {
- buf_.map(mapper);
- end_flags_.map(mapper);
-}
-
-void Tail::read_(Reader &reader) {
- buf_.read(reader);
- end_flags_.read(reader);
-}
-
-void Tail::write_(Writer &writer) const {
- buf_.write(writer);
- end_flags_.write(writer);
-}
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/grimoire/trie/tail.h b/contrib/python/marisa-trie/marisa/grimoire/trie/tail.h
deleted file mode 100644
index 7e5ca1d3e7..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/trie/tail.h
+++ /dev/null
@@ -1,73 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_TRIE_TAIL_H_
-#define MARISA_GRIMOIRE_TRIE_TAIL_H_
-
-#include "../../agent.h"
-#include "../vector.h"
-#include "entry.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class Tail {
- public:
- Tail();
-
- void build(Vector<Entry> &entries, Vector<UInt32> *offsets,
- TailMode mode);
-
- void map(Mapper &mapper);
- void read(Reader &reader);
- void write(Writer &writer) const;
-
- void restore(Agent &agent, std::size_t offset) const;
- bool match(Agent &agent, std::size_t offset) const;
- bool prefix_match(Agent &agent, std::size_t offset) const;
-
- const char &operator[](std::size_t offset) const {
- MARISA_DEBUG_IF(offset >= buf_.size(), MARISA_BOUND_ERROR);
- return buf_[offset];
- }
-
- TailMode mode() const {
- return end_flags_.empty() ? MARISA_TEXT_TAIL : MARISA_BINARY_TAIL;
- }
-
- bool empty() const {
- return buf_.empty();
- }
- std::size_t size() const {
- return buf_.size();
- }
- std::size_t total_size() const {
- return buf_.total_size() + end_flags_.total_size();
- }
- std::size_t io_size() const {
- return buf_.io_size() + end_flags_.io_size();
- }
-
- void clear();
- void swap(Tail &rhs);
-
- private:
- Vector<char> buf_;
- BitVector end_flags_;
-
- void build_(Vector<Entry> &entries, Vector<UInt32> *offsets,
- TailMode mode);
-
- void map_(Mapper &mapper);
- void read_(Reader &reader);
- void write_(Writer &writer) const;
-
- // Disallows copy and assignment.
- Tail(const Tail &);
- Tail &operator=(const Tail &);
-};
-
-} // namespace trie
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_TRIE_TAIL_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector.h b/contrib/python/marisa-trie/marisa/grimoire/vector.h
deleted file mode 100644
index d942a7f279..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_VECTOR_H_
-#define MARISA_GRIMOIRE_VECTOR_H_
-
-#include "vector/vector.h"
-#include "vector/flat-vector.h"
-#include "vector/bit-vector.h"
-
-namespace marisa {
-namespace grimoire {
-
-using vector::Vector;
-typedef vector::FlatVector FlatVector;
-typedef vector::BitVector BitVector;
-
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_VECTOR_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.cc b/contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.cc
deleted file mode 100644
index a5abc69319..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.cc
+++ /dev/null
@@ -1,825 +0,0 @@
-#include "pop-count.h"
-#include "bit-vector.h"
-
-namespace marisa {
-namespace grimoire {
-namespace vector {
-namespace {
-
-const UInt8 SELECT_TABLE[8][256] = {
- {
- 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
- 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0
- },
- {
- 7, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1,
- 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
- 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1,
- 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
- 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 7, 7, 7, 1, 7, 2, 2, 1, 7, 3, 3, 1, 3, 2, 2, 1,
- 7, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 7, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
- 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 7, 6, 6, 1, 6, 2, 2, 1, 6, 3, 3, 1, 3, 2, 2, 1,
- 6, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1,
- 6, 5, 5, 1, 5, 2, 2, 1, 5, 3, 3, 1, 3, 2, 2, 1,
- 5, 4, 4, 1, 4, 2, 2, 1, 4, 3, 3, 1, 3, 2, 2, 1
- },
- {
- 7, 7, 7, 7, 7, 7, 7, 2, 7, 7, 7, 3, 7, 3, 3, 2,
- 7, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2,
- 7, 7, 7, 5, 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2,
- 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
- 7, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2,
- 7, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2,
- 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
- 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
- 7, 7, 7, 7, 7, 7, 7, 2, 7, 7, 7, 3, 7, 3, 3, 2,
- 7, 7, 7, 4, 7, 4, 4, 2, 7, 4, 4, 3, 4, 3, 3, 2,
- 7, 7, 7, 5, 7, 5, 5, 2, 7, 5, 5, 3, 5, 3, 3, 2,
- 7, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2,
- 7, 7, 7, 6, 7, 6, 6, 2, 7, 6, 6, 3, 6, 3, 3, 2,
- 7, 6, 6, 4, 6, 4, 4, 2, 6, 4, 4, 3, 4, 3, 3, 2,
- 7, 6, 6, 5, 6, 5, 5, 2, 6, 5, 5, 3, 5, 3, 3, 2,
- 6, 5, 5, 4, 5, 4, 4, 2, 5, 4, 4, 3, 4, 3, 3, 2
- },
- {
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 3,
- 7, 7, 7, 7, 7, 7, 7, 4, 7, 7, 7, 4, 7, 4, 4, 3,
- 7, 7, 7, 7, 7, 7, 7, 5, 7, 7, 7, 5, 7, 5, 5, 3,
- 7, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 3,
- 7, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, 6, 4, 4, 3,
- 7, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3,
- 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 3,
- 7, 7, 7, 7, 7, 7, 7, 4, 7, 7, 7, 4, 7, 4, 4, 3,
- 7, 7, 7, 7, 7, 7, 7, 5, 7, 7, 7, 5, 7, 5, 5, 3,
- 7, 7, 7, 5, 7, 5, 5, 4, 7, 5, 5, 4, 5, 4, 4, 3,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 3,
- 7, 7, 7, 6, 7, 6, 6, 4, 7, 6, 6, 4, 6, 4, 4, 3,
- 7, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 3,
- 7, 6, 6, 5, 6, 5, 5, 4, 6, 5, 5, 4, 5, 4, 4, 3
- },
- {
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 4,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 5,
- 7, 7, 7, 7, 7, 7, 7, 5, 7, 7, 7, 5, 7, 5, 5, 4,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 4,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 5,
- 7, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 4,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 5,
- 7, 7, 7, 7, 7, 7, 7, 5, 7, 7, 7, 5, 7, 5, 5, 4,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 4,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 5,
- 7, 7, 7, 6, 7, 6, 6, 5, 7, 6, 6, 5, 6, 5, 5, 4
- },
- {
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 5,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 5,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 5,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 6, 7, 7, 7, 6, 7, 6, 6, 5
- },
- {
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 6
- },
- {
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
- 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7
- }
-};
-
-#if MARISA_WORD_SIZE == 64
-const UInt64 MASK_55 = 0x5555555555555555ULL;
-const UInt64 MASK_33 = 0x3333333333333333ULL;
-const UInt64 MASK_0F = 0x0F0F0F0F0F0F0F0FULL;
-const UInt64 MASK_01 = 0x0101010101010101ULL;
-const UInt64 MASK_80 = 0x8080808080808080ULL;
-
-std::size_t select_bit(std::size_t i, std::size_t bit_id, UInt64 unit) {
- UInt64 counts;
- {
- #if defined(MARISA_X64) && defined(MARISA_USE_SSSE3)
- __m128i lower_nibbles = _mm_cvtsi64_si128(unit & 0x0F0F0F0F0F0F0F0FULL);
- __m128i upper_nibbles = _mm_cvtsi64_si128(unit & 0xF0F0F0F0F0F0F0F0ULL);
- upper_nibbles = _mm_srli_epi32(upper_nibbles, 4);
-
- __m128i lower_counts =
- _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
- lower_counts = _mm_shuffle_epi8(lower_counts, lower_nibbles);
- __m128i upper_counts =
- _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
- upper_counts = _mm_shuffle_epi8(upper_counts, upper_nibbles);
-
- counts = _mm_cvtsi128_si64(_mm_add_epi8(lower_counts, upper_counts));
- #else // defined(MARISA_X64) && defined(MARISA_USE_SSSE3)
- counts = unit - ((unit >> 1) & MASK_55);
- counts = (counts & MASK_33) + ((counts >> 2) & MASK_33);
- counts = (counts + (counts >> 4)) & MASK_0F;
- #endif // defined(MARISA_X64) && defined(MARISA_USE_SSSE3)
- counts *= MASK_01;
- }
-
- #if defined(MARISA_X64) && defined(MARISA_USE_POPCNT)
- UInt8 skip;
- {
- __m128i x = _mm_cvtsi64_si128((i + 1) * MASK_01);
- __m128i y = _mm_cvtsi64_si128(counts);
- x = _mm_cmpgt_epi8(x, y);
- skip = (UInt8)PopCount::count(_mm_cvtsi128_si64(x));
- }
- #else // defined(MARISA_X64) && defined(MARISA_USE_POPCNT)
- const UInt64 x = (counts | MASK_80) - ((i + 1) * MASK_01);
- #ifdef _MSC_VER
- unsigned long skip;
- ::_BitScanForward64(&skip, (x & MASK_80) >> 7);
- #else // _MSC_VER
- const int skip = ::__builtin_ctzll((x & MASK_80) >> 7);
- #endif // _MSC_VER
- #endif // defined(MARISA_X64) && defined(MARISA_USE_POPCNT)
-
- bit_id += skip;
- unit >>= skip;
- i -= ((counts << 8) >> skip) & 0xFF;
-
- return bit_id + SELECT_TABLE[i][unit & 0xFF];
-}
-#else // MARISA_WORD_SIZE == 64
- #ifdef MARISA_USE_SSE2
-const UInt8 POPCNT_TABLE[256] = {
- 0, 8, 8, 16, 8, 16, 16, 24, 8, 16, 16, 24, 16, 24, 24, 32,
- 8, 16, 16, 24, 16, 24, 24, 32, 16, 24, 24, 32, 24, 32, 32, 40,
- 8, 16, 16, 24, 16, 24, 24, 32, 16, 24, 24, 32, 24, 32, 32, 40,
- 16, 24, 24, 32, 24, 32, 32, 40, 24, 32, 32, 40, 32, 40, 40, 48,
- 8, 16, 16, 24, 16, 24, 24, 32, 16, 24, 24, 32, 24, 32, 32, 40,
- 16, 24, 24, 32, 24, 32, 32, 40, 24, 32, 32, 40, 32, 40, 40, 48,
- 16, 24, 24, 32, 24, 32, 32, 40, 24, 32, 32, 40, 32, 40, 40, 48,
- 24, 32, 32, 40, 32, 40, 40, 48, 32, 40, 40, 48, 40, 48, 48, 56,
- 8, 16, 16, 24, 16, 24, 24, 32, 16, 24, 24, 32, 24, 32, 32, 40,
- 16, 24, 24, 32, 24, 32, 32, 40, 24, 32, 32, 40, 32, 40, 40, 48,
- 16, 24, 24, 32, 24, 32, 32, 40, 24, 32, 32, 40, 32, 40, 40, 48,
- 24, 32, 32, 40, 32, 40, 40, 48, 32, 40, 40, 48, 40, 48, 48, 56,
- 16, 24, 24, 32, 24, 32, 32, 40, 24, 32, 32, 40, 32, 40, 40, 48,
- 24, 32, 32, 40, 32, 40, 40, 48, 32, 40, 40, 48, 40, 48, 48, 56,
- 24, 32, 32, 40, 32, 40, 40, 48, 32, 40, 40, 48, 40, 48, 48, 56,
- 32, 40, 40, 48, 40, 48, 48, 56, 40, 48, 48, 56, 48, 56, 56, 64
-};
-
-std::size_t select_bit(std::size_t i, std::size_t bit_id,
- UInt32 unit_lo, UInt32 unit_hi) {
- __m128i unit;
- {
- __m128i lower_dword = _mm_cvtsi32_si128(unit_lo);
- __m128i upper_dword = _mm_cvtsi32_si128(unit_hi);
- upper_dword = _mm_slli_si128(upper_dword, 4);
- unit = _mm_or_si128(lower_dword, upper_dword);
- }
-
- __m128i counts;
- {
- #ifdef MARISA_USE_SSSE3
- __m128i lower_nibbles = _mm_set1_epi8(0x0F);
- lower_nibbles = _mm_and_si128(lower_nibbles, unit);
- __m128i upper_nibbles = _mm_set1_epi8((UInt8)0xF0);
- upper_nibbles = _mm_and_si128(upper_nibbles, unit);
- upper_nibbles = _mm_srli_epi32(upper_nibbles, 4);
-
- __m128i lower_counts =
- _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
- lower_counts = _mm_shuffle_epi8(lower_counts, lower_nibbles);
- __m128i upper_counts =
- _mm_set_epi8(4, 3, 3, 2, 3, 2, 2, 1, 3, 2, 2, 1, 2, 1, 1, 0);
- upper_counts = _mm_shuffle_epi8(upper_counts, upper_nibbles);
-
- counts = _mm_add_epi8(lower_counts, upper_counts);
- #else // MARISA_USE_SSSE3
- __m128i x = _mm_srli_epi32(unit, 1);
- x = _mm_and_si128(x, _mm_set1_epi8(0x55));
- x = _mm_sub_epi8(unit, x);
-
- __m128i y = _mm_srli_epi32(x, 2);
- y = _mm_and_si128(y, _mm_set1_epi8(0x33));
- x = _mm_and_si128(x, _mm_set1_epi8(0x33));
- x = _mm_add_epi8(x, y);
-
- y = _mm_srli_epi32(x, 4);
- x = _mm_add_epi8(x, y);
- counts = _mm_and_si128(x, _mm_set1_epi8(0x0F));
- #endif // MARISA_USE_SSSE3
- }
-
- __m128i accumulated_counts;
- {
- __m128i x = counts;
- x = _mm_slli_si128(x, 1);
- __m128i y = counts;
- y = _mm_add_epi32(y, x);
-
- x = y;
- y = _mm_slli_si128(y, 2);
- x = _mm_add_epi32(x, y);
-
- y = x;
- x = _mm_slli_si128(x, 4);
- y = _mm_add_epi32(y, x);
-
- accumulated_counts = _mm_set_epi32(0x7F7F7F7FU, 0x7F7F7F7FU, 0, 0);
- accumulated_counts = _mm_or_si128(accumulated_counts, y);
- }
-
- UInt8 skip;
- {
- __m128i x = _mm_set1_epi8((UInt8)(i + 1));
- x = _mm_cmpgt_epi8(x, accumulated_counts);
- skip = POPCNT_TABLE[_mm_movemask_epi8(x)];
- }
-
- UInt8 byte;
- {
- #ifdef _MSC_VER
- __declspec(align(16)) UInt8 unit_bytes[16];
- __declspec(align(16)) UInt8 accumulated_counts_bytes[16];
- #else // _MSC_VER
- UInt8 unit_bytes[16] __attribute__ ((aligned (16)));
- UInt8 accumulated_counts_bytes[16] __attribute__ ((aligned (16)));
- #endif // _MSC_VER
- accumulated_counts = _mm_slli_si128(accumulated_counts, 1);
- _mm_store_si128(reinterpret_cast<__m128i *>(unit_bytes), unit);
- _mm_store_si128(reinterpret_cast<__m128i *>(accumulated_counts_bytes),
- accumulated_counts);
-
- bit_id += skip;
- byte = unit_bytes[skip / 8];
- i -= accumulated_counts_bytes[skip / 8];
- }
-
- return bit_id + SELECT_TABLE[i][byte];
-}
- #endif // MARISA_USE_SSE2
-#endif // MARISA_WORD_SIZE == 64
-
-} // namespace
-
-#if MARISA_WORD_SIZE == 64
-
-std::size_t BitVector::rank1(std::size_t i) const {
- MARISA_DEBUG_IF(ranks_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i > size_, MARISA_BOUND_ERROR);
-
- const RankIndex &rank = ranks_[i / 512];
- std::size_t offset = rank.abs();
- switch ((i / 64) % 8) {
- case 1: {
- offset += rank.rel1();
- break;
- }
- case 2: {
- offset += rank.rel2();
- break;
- }
- case 3: {
- offset += rank.rel3();
- break;
- }
- case 4: {
- offset += rank.rel4();
- break;
- }
- case 5: {
- offset += rank.rel5();
- break;
- }
- case 6: {
- offset += rank.rel6();
- break;
- }
- case 7: {
- offset += rank.rel7();
- break;
- }
- }
- offset += PopCount::count(units_[i / 64] & ((1ULL << (i % 64)) - 1));
- return offset;
-}
-
-std::size_t BitVector::select0(std::size_t i) const {
- MARISA_DEBUG_IF(select0s_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i >= num_0s(), MARISA_BOUND_ERROR);
-
- const std::size_t select_id = i / 512;
- MARISA_DEBUG_IF((select_id + 1) >= select0s_.size(), MARISA_BOUND_ERROR);
- if ((i % 512) == 0) {
- return select0s_[select_id];
- }
- std::size_t begin = select0s_[select_id] / 512;
- std::size_t end = (select0s_[select_id + 1] + 511) / 512;
- if (begin + 10 >= end) {
- while (i >= ((begin + 1) * 512) - ranks_[begin + 1].abs()) {
- ++begin;
- }
- } else {
- while (begin + 1 < end) {
- const std::size_t middle = (begin + end) / 2;
- if (i < (middle * 512) - ranks_[middle].abs()) {
- end = middle;
- } else {
- begin = middle;
- }
- }
- }
- const std::size_t rank_id = begin;
- i -= (rank_id * 512) - ranks_[rank_id].abs();
-
- const RankIndex &rank = ranks_[rank_id];
- std::size_t unit_id = rank_id * 8;
- if (i < (256U - rank.rel4())) {
- if (i < (128U - rank.rel2())) {
- if (i >= (64U - rank.rel1())) {
- unit_id += 1;
- i -= 64 - rank.rel1();
- }
- } else if (i < (192U - rank.rel3())) {
- unit_id += 2;
- i -= 128 - rank.rel2();
- } else {
- unit_id += 3;
- i -= 192 - rank.rel3();
- }
- } else if (i < (384U - rank.rel6())) {
- if (i < (320U - rank.rel5())) {
- unit_id += 4;
- i -= 256 - rank.rel4();
- } else {
- unit_id += 5;
- i -= 320 - rank.rel5();
- }
- } else if (i < (448U - rank.rel7())) {
- unit_id += 6;
- i -= 384 - rank.rel6();
- } else {
- unit_id += 7;
- i -= 448 - rank.rel7();
- }
-
- return select_bit(i, unit_id * 64, ~units_[unit_id]);
-}
-
-std::size_t BitVector::select1(std::size_t i) const {
- MARISA_DEBUG_IF(select1s_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i >= num_1s(), MARISA_BOUND_ERROR);
-
- const std::size_t select_id = i / 512;
- MARISA_DEBUG_IF((select_id + 1) >= select1s_.size(), MARISA_BOUND_ERROR);
- if ((i % 512) == 0) {
- return select1s_[select_id];
- }
- std::size_t begin = select1s_[select_id] / 512;
- std::size_t end = (select1s_[select_id + 1] + 511) / 512;
- if (begin + 10 >= end) {
- while (i >= ranks_[begin + 1].abs()) {
- ++begin;
- }
- } else {
- while (begin + 1 < end) {
- const std::size_t middle = (begin + end) / 2;
- if (i < ranks_[middle].abs()) {
- end = middle;
- } else {
- begin = middle;
- }
- }
- }
- const std::size_t rank_id = begin;
- i -= ranks_[rank_id].abs();
-
- const RankIndex &rank = ranks_[rank_id];
- std::size_t unit_id = rank_id * 8;
- if (i < rank.rel4()) {
- if (i < rank.rel2()) {
- if (i >= rank.rel1()) {
- unit_id += 1;
- i -= rank.rel1();
- }
- } else if (i < rank.rel3()) {
- unit_id += 2;
- i -= rank.rel2();
- } else {
- unit_id += 3;
- i -= rank.rel3();
- }
- } else if (i < rank.rel6()) {
- if (i < rank.rel5()) {
- unit_id += 4;
- i -= rank.rel4();
- } else {
- unit_id += 5;
- i -= rank.rel5();
- }
- } else if (i < rank.rel7()) {
- unit_id += 6;
- i -= rank.rel6();
- } else {
- unit_id += 7;
- i -= rank.rel7();
- }
-
- return select_bit(i, unit_id * 64, units_[unit_id]);
-}
-
-#else // MARISA_WORD_SIZE == 64
-
-std::size_t BitVector::rank1(std::size_t i) const {
- MARISA_DEBUG_IF(ranks_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i > size_, MARISA_BOUND_ERROR);
-
- const RankIndex &rank = ranks_[i / 512];
- std::size_t offset = rank.abs();
- switch ((i / 64) % 8) {
- case 1: {
- offset += rank.rel1();
- break;
- }
- case 2: {
- offset += rank.rel2();
- break;
- }
- case 3: {
- offset += rank.rel3();
- break;
- }
- case 4: {
- offset += rank.rel4();
- break;
- }
- case 5: {
- offset += rank.rel5();
- break;
- }
- case 6: {
- offset += rank.rel6();
- break;
- }
- case 7: {
- offset += rank.rel7();
- break;
- }
- }
- if (((i / 32) & 1) == 1) {
- offset += PopCount::count(units_[(i / 32) - 1]);
- }
- offset += PopCount::count(units_[i / 32] & ((1U << (i % 32)) - 1));
- return offset;
-}
-
-std::size_t BitVector::select0(std::size_t i) const {
- MARISA_DEBUG_IF(select0s_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i >= num_0s(), MARISA_BOUND_ERROR);
-
- const std::size_t select_id = i / 512;
- MARISA_DEBUG_IF((select_id + 1) >= select0s_.size(), MARISA_BOUND_ERROR);
- if ((i % 512) == 0) {
- return select0s_[select_id];
- }
- std::size_t begin = select0s_[select_id] / 512;
- std::size_t end = (select0s_[select_id + 1] + 511) / 512;
- if (begin + 10 >= end) {
- while (i >= ((begin + 1) * 512) - ranks_[begin + 1].abs()) {
- ++begin;
- }
- } else {
- while (begin + 1 < end) {
- const std::size_t middle = (begin + end) / 2;
- if (i < (middle * 512) - ranks_[middle].abs()) {
- end = middle;
- } else {
- begin = middle;
- }
- }
- }
- const std::size_t rank_id = begin;
- i -= (rank_id * 512) - ranks_[rank_id].abs();
-
- const RankIndex &rank = ranks_[rank_id];
- std::size_t unit_id = rank_id * 16;
- if (i < (256U - rank.rel4())) {
- if (i < (128U - rank.rel2())) {
- if (i >= (64U - rank.rel1())) {
- unit_id += 2;
- i -= 64 - rank.rel1();
- }
- } else if (i < (192U - rank.rel3())) {
- unit_id += 4;
- i -= 128 - rank.rel2();
- } else {
- unit_id += 6;
- i -= 192 - rank.rel3();
- }
- } else if (i < (384U - rank.rel6())) {
- if (i < (320U - rank.rel5())) {
- unit_id += 8;
- i -= 256 - rank.rel4();
- } else {
- unit_id += 10;
- i -= 320 - rank.rel5();
- }
- } else if (i < (448U - rank.rel7())) {
- unit_id += 12;
- i -= 384 - rank.rel6();
- } else {
- unit_id += 14;
- i -= 448 - rank.rel7();
- }
-
-#ifdef MARISA_USE_SSE2
- return select_bit(i, unit_id * 32, ~units_[unit_id], ~units_[unit_id + 1]);
-#else // MARISA_USE_SSE2
- UInt32 unit = ~units_[unit_id];
- PopCount count(unit);
- if (i >= count.lo32()) {
- ++unit_id;
- i -= count.lo32();
- unit = ~units_[unit_id];
- count = PopCount(unit);
- }
-
- std::size_t bit_id = unit_id * 32;
- if (i < count.lo16()) {
- if (i >= count.lo8()) {
- bit_id += 8;
- unit >>= 8;
- i -= count.lo8();
- }
- } else if (i < count.lo24()) {
- bit_id += 16;
- unit >>= 16;
- i -= count.lo16();
- } else {
- bit_id += 24;
- unit >>= 24;
- i -= count.lo24();
- }
- return bit_id + SELECT_TABLE[i][unit & 0xFF];
-#endif // MARISA_USE_SSE2
-}
-
-std::size_t BitVector::select1(std::size_t i) const {
- MARISA_DEBUG_IF(select1s_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i >= num_1s(), MARISA_BOUND_ERROR);
-
- const std::size_t select_id = i / 512;
- MARISA_DEBUG_IF((select_id + 1) >= select1s_.size(), MARISA_BOUND_ERROR);
- if ((i % 512) == 0) {
- return select1s_[select_id];
- }
- std::size_t begin = select1s_[select_id] / 512;
- std::size_t end = (select1s_[select_id + 1] + 511) / 512;
- if (begin + 10 >= end) {
- while (i >= ranks_[begin + 1].abs()) {
- ++begin;
- }
- } else {
- while (begin + 1 < end) {
- const std::size_t middle = (begin + end) / 2;
- if (i < ranks_[middle].abs()) {
- end = middle;
- } else {
- begin = middle;
- }
- }
- }
- const std::size_t rank_id = begin;
- i -= ranks_[rank_id].abs();
-
- const RankIndex &rank = ranks_[rank_id];
- std::size_t unit_id = rank_id * 16;
- if (i < rank.rel4()) {
- if (i < rank.rel2()) {
- if (i >= rank.rel1()) {
- unit_id += 2;
- i -= rank.rel1();
- }
- } else if (i < rank.rel3()) {
- unit_id += 4;
- i -= rank.rel2();
- } else {
- unit_id += 6;
- i -= rank.rel3();
- }
- } else if (i < rank.rel6()) {
- if (i < rank.rel5()) {
- unit_id += 8;
- i -= rank.rel4();
- } else {
- unit_id += 10;
- i -= rank.rel5();
- }
- } else if (i < rank.rel7()) {
- unit_id += 12;
- i -= rank.rel6();
- } else {
- unit_id += 14;
- i -= rank.rel7();
- }
-
-#ifdef MARISA_USE_SSE2
- return select_bit(i, unit_id * 32, units_[unit_id], units_[unit_id + 1]);
-#else // MARISA_USE_SSE2
- UInt32 unit = units_[unit_id];
- PopCount count(unit);
- if (i >= count.lo32()) {
- ++unit_id;
- i -= count.lo32();
- unit = units_[unit_id];
- count = PopCount(unit);
- }
-
- std::size_t bit_id = unit_id * 32;
- if (i < count.lo16()) {
- if (i >= count.lo8()) {
- bit_id += 8;
- unit >>= 8;
- i -= count.lo8();
- }
- } else if (i < count.lo24()) {
- bit_id += 16;
- unit >>= 16;
- i -= count.lo16();
- } else {
- bit_id += 24;
- unit >>= 24;
- i -= count.lo24();
- }
- return bit_id + SELECT_TABLE[i][unit & 0xFF];
-#endif // MARISA_USE_SSE2
-}
-
-#endif // MARISA_WORD_SIZE == 64
-
-void BitVector::build_index(const BitVector &bv,
- bool enables_select0, bool enables_select1) {
- ranks_.resize((bv.size() / 512) + (((bv.size() % 512) != 0) ? 1 : 0) + 1);
-
- std::size_t num_0s = 0;
- std::size_t num_1s = 0;
-
- for (std::size_t i = 0; i < bv.size(); ++i) {
- if ((i % 64) == 0) {
- const std::size_t rank_id = i / 512;
- switch ((i / 64) % 8) {
- case 0: {
- ranks_[rank_id].set_abs(num_1s);
- break;
- }
- case 1: {
- ranks_[rank_id].set_rel1(num_1s - ranks_[rank_id].abs());
- break;
- }
- case 2: {
- ranks_[rank_id].set_rel2(num_1s - ranks_[rank_id].abs());
- break;
- }
- case 3: {
- ranks_[rank_id].set_rel3(num_1s - ranks_[rank_id].abs());
- break;
- }
- case 4: {
- ranks_[rank_id].set_rel4(num_1s - ranks_[rank_id].abs());
- break;
- }
- case 5: {
- ranks_[rank_id].set_rel5(num_1s - ranks_[rank_id].abs());
- break;
- }
- case 6: {
- ranks_[rank_id].set_rel6(num_1s - ranks_[rank_id].abs());
- break;
- }
- case 7: {
- ranks_[rank_id].set_rel7(num_1s - ranks_[rank_id].abs());
- break;
- }
- }
- }
-
- if (bv[i]) {
- if (enables_select1 && ((num_1s % 512) == 0)) {
- select1s_.push_back(static_cast<UInt32>(i));
- }
- ++num_1s;
- } else {
- if (enables_select0 && ((num_0s % 512) == 0)) {
- select0s_.push_back(static_cast<UInt32>(i));
- }
- ++num_0s;
- }
- }
-
- if ((bv.size() % 512) != 0) {
- const std::size_t rank_id = (bv.size() - 1) / 512;
- switch (((bv.size() - 1) / 64) % 8) {
- case 0: {
- ranks_[rank_id].set_rel1(num_1s - ranks_[rank_id].abs());
- }
- case 1: {
- ranks_[rank_id].set_rel2(num_1s - ranks_[rank_id].abs());
- }
- case 2: {
- ranks_[rank_id].set_rel3(num_1s - ranks_[rank_id].abs());
- }
- case 3: {
- ranks_[rank_id].set_rel4(num_1s - ranks_[rank_id].abs());
- }
- case 4: {
- ranks_[rank_id].set_rel5(num_1s - ranks_[rank_id].abs());
- }
- case 5: {
- ranks_[rank_id].set_rel6(num_1s - ranks_[rank_id].abs());
- }
- case 6: {
- ranks_[rank_id].set_rel7(num_1s - ranks_[rank_id].abs());
- break;
- }
- }
- }
-
- size_ = bv.size();
- num_1s_ = bv.num_1s();
-
- ranks_.back().set_abs(num_1s);
- if (enables_select0) {
- select0s_.push_back(static_cast<UInt32>(bv.size()));
- select0s_.shrink();
- }
- if (enables_select1) {
- select1s_.push_back(static_cast<UInt32>(bv.size()));
- select1s_.shrink();
- }
-}
-
-} // namespace vector
-} // namespace grimoire
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.h b/contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.h
deleted file mode 100644
index 56f99ed699..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector/bit-vector.h
+++ /dev/null
@@ -1,180 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_VECTOR_BIT_VECTOR_H_
-#define MARISA_GRIMOIRE_VECTOR_BIT_VECTOR_H_
-
-#include "rank-index.h"
-#include "vector.h"
-
-namespace marisa {
-namespace grimoire {
-namespace vector {
-
-class BitVector {
- public:
-#if MARISA_WORD_SIZE == 64
- typedef UInt64 Unit;
-#else // MARISA_WORD_SIZE == 64
- typedef UInt32 Unit;
-#endif // MARISA_WORD_SIZE == 64
-
- BitVector()
- : units_(), size_(0), num_1s_(0), ranks_(), select0s_(), select1s_() {}
-
- void build(bool enables_select0, bool enables_select1) {
- BitVector temp;
- temp.build_index(*this, enables_select0, enables_select1);
- units_.shrink();
- temp.units_.swap(units_);
- swap(temp);
- }
-
- void map(Mapper &mapper) {
- BitVector temp;
- temp.map_(mapper);
- swap(temp);
- }
- void read(Reader &reader) {
- BitVector temp;
- temp.read_(reader);
- swap(temp);
- }
- void write(Writer &writer) const {
- write_(writer);
- }
-
- void disable_select0() {
- select0s_.clear();
- }
- void disable_select1() {
- select1s_.clear();
- }
-
- void push_back(bool bit) {
- MARISA_THROW_IF(size_ == MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- if (size_ == (MARISA_WORD_SIZE * units_.size())) {
- units_.resize(units_.size() + (64 / MARISA_WORD_SIZE), 0);
- }
- if (bit) {
- units_[size_ / MARISA_WORD_SIZE] |=
- (Unit)1 << (size_ % MARISA_WORD_SIZE);
- ++num_1s_;
- }
- ++size_;
- }
-
- bool operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
- return (units_[i / MARISA_WORD_SIZE]
- & ((Unit)1 << (i % MARISA_WORD_SIZE))) != 0;
- }
-
- std::size_t rank0(std::size_t i) const {
- MARISA_DEBUG_IF(ranks_.empty(), MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i > size_, MARISA_BOUND_ERROR);
- return i - rank1(i);
- }
- std::size_t rank1(std::size_t i) const;
-
- std::size_t select0(std::size_t i) const;
- std::size_t select1(std::size_t i) const;
-
- std::size_t num_0s() const {
- return size_ - num_1s_;
- }
- std::size_t num_1s() const {
- return num_1s_;
- }
-
- bool empty() const {
- return size_ == 0;
- }
- std::size_t size() const {
- return size_;
- }
- std::size_t total_size() const {
- return units_.total_size() + ranks_.total_size()
- + select0s_.total_size() + select1s_.total_size();
- }
- std::size_t io_size() const {
- return units_.io_size() + (sizeof(UInt32) * 2) + ranks_.io_size()
- + select0s_.io_size() + select1s_.io_size();
- }
-
- void clear() {
- BitVector().swap(*this);
- }
- void swap(BitVector &rhs) {
- units_.swap(rhs.units_);
- marisa::swap(size_, rhs.size_);
- marisa::swap(num_1s_, rhs.num_1s_);
- ranks_.swap(rhs.ranks_);
- select0s_.swap(rhs.select0s_);
- select1s_.swap(rhs.select1s_);
- }
-
- private:
- Vector<Unit> units_;
- std::size_t size_;
- std::size_t num_1s_;
- Vector<RankIndex> ranks_;
- Vector<UInt32> select0s_;
- Vector<UInt32> select1s_;
-
- void build_index(const BitVector &bv,
- bool enables_select0, bool enables_select1);
-
- void map_(Mapper &mapper) {
- units_.map(mapper);
- {
- UInt32 temp_size;
- mapper.map(&temp_size);
- size_ = temp_size;
- }
- {
- UInt32 temp_num_1s;
- mapper.map(&temp_num_1s);
- MARISA_THROW_IF(temp_num_1s > size_, MARISA_FORMAT_ERROR);
- num_1s_ = temp_num_1s;
- }
- ranks_.map(mapper);
- select0s_.map(mapper);
- select1s_.map(mapper);
- }
-
- void read_(Reader &reader) {
- units_.read(reader);
- {
- UInt32 temp_size;
- reader.read(&temp_size);
- size_ = temp_size;
- }
- {
- UInt32 temp_num_1s;
- reader.read(&temp_num_1s);
- MARISA_THROW_IF(temp_num_1s > size_, MARISA_FORMAT_ERROR);
- num_1s_ = temp_num_1s;
- }
- ranks_.read(reader);
- select0s_.read(reader);
- select1s_.read(reader);
- }
-
- void write_(Writer &writer) const {
- units_.write(writer);
- writer.write((UInt32)size_);
- writer.write((UInt32)num_1s_);
- ranks_.write(writer);
- select0s_.write(writer);
- select1s_.write(writer);
- }
-
- // Disallows copy and assignment.
- BitVector(const BitVector &);
- BitVector &operator=(const BitVector &);
-};
-
-} // namespace vector
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_VECTOR_BIT_VECTOR_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector/flat-vector.h b/contrib/python/marisa-trie/marisa/grimoire/vector/flat-vector.h
deleted file mode 100644
index 14b25d7d82..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector/flat-vector.h
+++ /dev/null
@@ -1,206 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_VECTOR_FLAT_VECTOR_H_
-#define MARISA_GRIMOIRE_VECTOR_FLAT_VECTOR_H_
-
-#include "vector.h"
-
-namespace marisa {
-namespace grimoire {
-namespace vector {
-
-class FlatVector {
- public:
-#if MARISA_WORD_SIZE == 64
- typedef UInt64 Unit;
-#else // MARISA_WORD_SIZE == 64
- typedef UInt32 Unit;
-#endif // MARISA_WORD_SIZE == 64
-
- FlatVector() : units_(), value_size_(0), mask_(0), size_(0) {}
-
- void build(const Vector<UInt32> &values) {
- FlatVector temp;
- temp.build_(values);
- swap(temp);
- }
-
- void map(Mapper &mapper) {
- FlatVector temp;
- temp.map_(mapper);
- swap(temp);
- }
- void read(Reader &reader) {
- FlatVector temp;
- temp.read_(reader);
- swap(temp);
- }
- void write(Writer &writer) const {
- write_(writer);
- }
-
- UInt32 operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
-
- const std::size_t pos = i * value_size_;
- const std::size_t unit_id = pos / MARISA_WORD_SIZE;
- const std::size_t unit_offset = pos % MARISA_WORD_SIZE;
-
- if ((unit_offset + value_size_) <= MARISA_WORD_SIZE) {
- return (UInt32)(units_[unit_id] >> unit_offset) & mask_;
- } else {
- return (UInt32)((units_[unit_id] >> unit_offset)
- | (units_[unit_id + 1] << (MARISA_WORD_SIZE - unit_offset))) & mask_;
- }
- }
-
- std::size_t value_size() const {
- return value_size_;
- }
- UInt32 mask() const {
- return mask_;
- }
-
- bool empty() const {
- return size_ == 0;
- }
- std::size_t size() const {
- return size_;
- }
- std::size_t total_size() const {
- return units_.total_size();
- }
- std::size_t io_size() const {
- return units_.io_size() + (sizeof(UInt32) * 2) + sizeof(UInt64);
- }
-
- void clear() {
- FlatVector().swap(*this);
- }
- void swap(FlatVector &rhs) {
- units_.swap(rhs.units_);
- marisa::swap(value_size_, rhs.value_size_);
- marisa::swap(mask_, rhs.mask_);
- marisa::swap(size_, rhs.size_);
- }
-
- private:
- Vector<Unit> units_;
- std::size_t value_size_;
- UInt32 mask_;
- std::size_t size_;
-
- void build_(const Vector<UInt32> &values) {
- UInt32 max_value = 0;
- for (std::size_t i = 0; i < values.size(); ++i) {
- if (values[i] > max_value) {
- max_value = values[i];
- }
- }
-
- std::size_t value_size = 0;
- while (max_value != 0) {
- ++value_size;
- max_value >>= 1;
- }
-
- std::size_t num_units = values.empty() ? 0 : (64 / MARISA_WORD_SIZE);
- if (value_size != 0) {
- num_units = (std::size_t)(
- (((UInt64)value_size * values.size()) + (MARISA_WORD_SIZE - 1))
- / MARISA_WORD_SIZE);
- num_units += num_units % (64 / MARISA_WORD_SIZE);
- }
-
- units_.resize(num_units);
- if (num_units > 0) {
- units_.back() = 0;
- }
-
- value_size_ = value_size;
- if (value_size != 0) {
- mask_ = MARISA_UINT32_MAX >> (32 - value_size);
- }
- size_ = values.size();
-
- for (std::size_t i = 0; i < values.size(); ++i) {
- set(i, values[i]);
- }
- }
-
- void map_(Mapper &mapper) {
- units_.map(mapper);
- {
- UInt32 temp_value_size;
- mapper.map(&temp_value_size);
- MARISA_THROW_IF(temp_value_size > 32, MARISA_FORMAT_ERROR);
- value_size_ = temp_value_size;
- }
- {
- UInt32 temp_mask;
- mapper.map(&temp_mask);
- mask_ = temp_mask;
- }
- {
- UInt64 temp_size;
- mapper.map(&temp_size);
- MARISA_THROW_IF(temp_size > MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- size_ = (std::size_t)temp_size;
- }
- }
-
- void read_(Reader &reader) {
- units_.read(reader);
- {
- UInt32 temp_value_size;
- reader.read(&temp_value_size);
- MARISA_THROW_IF(temp_value_size > 32, MARISA_FORMAT_ERROR);
- value_size_ = temp_value_size;
- }
- {
- UInt32 temp_mask;
- reader.read(&temp_mask);
- mask_ = temp_mask;
- }
- {
- UInt64 temp_size;
- reader.read(&temp_size);
- MARISA_THROW_IF(temp_size > MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- size_ = (std::size_t)temp_size;
- }
- }
-
- void write_(Writer &writer) const {
- units_.write(writer);
- writer.write((UInt32)value_size_);
- writer.write((UInt32)mask_);
- writer.write((UInt64)size_);
- }
-
- void set(std::size_t i, UInt32 value) {
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
- MARISA_DEBUG_IF(value > mask_, MARISA_RANGE_ERROR);
-
- const std::size_t pos = i * value_size_;
- const std::size_t unit_id = pos / MARISA_WORD_SIZE;
- const std::size_t unit_offset = pos % MARISA_WORD_SIZE;
-
- units_[unit_id] &= ~((Unit)mask_ << unit_offset);
- units_[unit_id] |= (Unit)(value & mask_) << unit_offset;
- if ((unit_offset + value_size_) > MARISA_WORD_SIZE) {
- units_[unit_id + 1] &=
- ~((Unit)mask_ >> (MARISA_WORD_SIZE - unit_offset));
- units_[unit_id + 1] |=
- (Unit)(value & mask_) >> (MARISA_WORD_SIZE - unit_offset);
- }
- }
-
- // Disallows copy and assignment.
- FlatVector(const FlatVector &);
- FlatVector &operator=(const FlatVector &);
-};
-
-} // namespace vector
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_VECTOR_FLAT_VECTOR_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector/pop-count.h b/contrib/python/marisa-trie/marisa/grimoire/vector/pop-count.h
deleted file mode 100644
index 6d04cf831d..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector/pop-count.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_VECTOR_POP_COUNT_H_
-#define MARISA_GRIMOIRE_VECTOR_POP_COUNT_H_
-
-#include "../intrin.h"
-
-namespace marisa {
-namespace grimoire {
-namespace vector {
-
-#if MARISA_WORD_SIZE == 64
-
-class PopCount {
- public:
- explicit PopCount(UInt64 x) : value_() {
- x = (x & 0x5555555555555555ULL) + ((x & 0xAAAAAAAAAAAAAAAAULL) >> 1);
- x = (x & 0x3333333333333333ULL) + ((x & 0xCCCCCCCCCCCCCCCCULL) >> 2);
- x = (x & 0x0F0F0F0F0F0F0F0FULL) + ((x & 0xF0F0F0F0F0F0F0F0ULL) >> 4);
- x *= 0x0101010101010101ULL;
- value_ = x;
- }
-
- std::size_t lo8() const {
- return (std::size_t)(value_ & 0xFFU);
- }
- std::size_t lo16() const {
- return (std::size_t)((value_ >> 8) & 0xFFU);
- }
- std::size_t lo24() const {
- return (std::size_t)((value_ >> 16) & 0xFFU);
- }
- std::size_t lo32() const {
- return (std::size_t)((value_ >> 24) & 0xFFU);
- }
- std::size_t lo40() const {
- return (std::size_t)((value_ >> 32) & 0xFFU);
- }
- std::size_t lo48() const {
- return (std::size_t)((value_ >> 40) & 0xFFU);
- }
- std::size_t lo56() const {
- return (std::size_t)((value_ >> 48) & 0xFFU);
- }
- std::size_t lo64() const {
- return (std::size_t)((value_ >> 56) & 0xFFU);
- }
-
- static std::size_t count(UInt64 x) {
-#if defined(MARISA_X64) && defined(MARISA_USE_POPCNT)
- #ifdef _MSC_VER
- return __popcnt64(x);
- #else // _MSC_VER
- return _mm_popcnt_u64(x);
- #endif // _MSC_VER
-#else // defined(MARISA_X64) && defined(MARISA_USE_POPCNT)
- return PopCount(x).lo64();
-#endif // defined(MARISA_X64) && defined(MARISA_USE_POPCNT)
- }
-
- private:
- UInt64 value_;
-};
-
-#else // MARISA_WORD_SIZE == 64
-
-class PopCount {
- public:
- explicit PopCount(UInt32 x) : value_() {
- x = (x & 0x55555555U) + ((x & 0xAAAAAAAAU) >> 1);
- x = (x & 0x33333333U) + ((x & 0xCCCCCCCCU) >> 2);
- x = (x & 0x0F0F0F0FU) + ((x & 0xF0F0F0F0U) >> 4);
- x *= 0x01010101U;
- value_ = x;
- }
-
- std::size_t lo8() const {
- return value_ & 0xFFU;
- }
- std::size_t lo16() const {
- return (value_ >> 8) & 0xFFU;
- }
- std::size_t lo24() const {
- return (value_ >> 16) & 0xFFU;
- }
- std::size_t lo32() const {
- return (value_ >> 24) & 0xFFU;
- }
-
- static std::size_t count(UInt32 x) {
-#ifdef MARISA_USE_POPCNT
- #ifdef _MSC_VER
- return __popcnt(x);
- #else // _MSC_VER
- return _mm_popcnt_u32(x);
- #endif // _MSC_VER
-#else // MARISA_USE_POPCNT
- return PopCount(x).lo32();
-#endif // MARISA_USE_POPCNT
- }
-
- private:
- UInt32 value_;
-};
-
-#endif // MARISA_WORD_SIZE == 64
-
-} // namespace vector
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_VECTOR_POP_COUNT_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector/rank-index.h b/contrib/python/marisa-trie/marisa/grimoire/vector/rank-index.h
deleted file mode 100644
index 2403709954..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector/rank-index.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_VECTOR_RANK_INDEX_H_
-#define MARISA_GRIMOIRE_VECTOR_RANK_INDEX_H_
-
-#include "../../base.h"
-
-namespace marisa {
-namespace grimoire {
-namespace vector {
-
-class RankIndex {
- public:
- RankIndex() : abs_(0), rel_lo_(0), rel_hi_(0) {}
-
- void set_abs(std::size_t value) {
- MARISA_DEBUG_IF(value > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- abs_ = (UInt32)value;
- }
- void set_rel1(std::size_t value) {
- MARISA_DEBUG_IF(value > 64, MARISA_RANGE_ERROR);
- rel_lo_ = (UInt32)((rel_lo_ & ~0x7FU) | (value & 0x7FU));
- }
- void set_rel2(std::size_t value) {
- MARISA_DEBUG_IF(value > 128, MARISA_RANGE_ERROR);
- rel_lo_ = (UInt32)((rel_lo_ & ~(0xFFU << 7)) | ((value & 0xFFU) << 7));
- }
- void set_rel3(std::size_t value) {
- MARISA_DEBUG_IF(value > 192, MARISA_RANGE_ERROR);
- rel_lo_ = (UInt32)((rel_lo_ & ~(0xFFU << 15)) | ((value & 0xFFU) << 15));
- }
- void set_rel4(std::size_t value) {
- MARISA_DEBUG_IF(value > 256, MARISA_RANGE_ERROR);
- rel_lo_ = (UInt32)((rel_lo_ & ~(0x1FFU << 23)) | ((value & 0x1FFU) << 23));
- }
- void set_rel5(std::size_t value) {
- MARISA_DEBUG_IF(value > 320, MARISA_RANGE_ERROR);
- rel_hi_ = (UInt32)((rel_hi_ & ~0x1FFU) | (value & 0x1FFU));
- }
- void set_rel6(std::size_t value) {
- MARISA_DEBUG_IF(value > 384, MARISA_RANGE_ERROR);
- rel_hi_ = (UInt32)((rel_hi_ & ~(0x1FFU << 9)) | ((value & 0x1FFU) << 9));
- }
- void set_rel7(std::size_t value) {
- MARISA_DEBUG_IF(value > 448, MARISA_RANGE_ERROR);
- rel_hi_ = (UInt32)((rel_hi_ & ~(0x1FFU << 18)) | ((value & 0x1FFU) << 18));
- }
-
- std::size_t abs() const {
- return abs_;
- }
- std::size_t rel1() const {
- return rel_lo_ & 0x7FU;
- }
- std::size_t rel2() const {
- return (rel_lo_ >> 7) & 0xFFU;
- }
- std::size_t rel3() const {
- return (rel_lo_ >> 15) & 0xFFU;
- }
- std::size_t rel4() const {
- return (rel_lo_ >> 23) & 0x1FFU;
- }
- std::size_t rel5() const {
- return rel_hi_ & 0x1FFU;
- }
- std::size_t rel6() const {
- return (rel_hi_ >> 9) & 0x1FFU;
- }
- std::size_t rel7() const {
- return (rel_hi_ >> 18) & 0x1FFU;
- }
-
- private:
- UInt32 abs_;
- UInt32 rel_lo_;
- UInt32 rel_hi_;
-};
-
-} // namespace vector
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_VECTOR_RANK_INDEX_H_
diff --git a/contrib/python/marisa-trie/marisa/grimoire/vector/vector.h b/contrib/python/marisa-trie/marisa/grimoire/vector/vector.h
deleted file mode 100644
index 148cc8b491..0000000000
--- a/contrib/python/marisa-trie/marisa/grimoire/vector/vector.h
+++ /dev/null
@@ -1,257 +0,0 @@
-#pragma once
-#ifndef MARISA_GRIMOIRE_VECTOR_VECTOR_H_
-#define MARISA_GRIMOIRE_VECTOR_VECTOR_H_
-
-#include <new>
-
-#include "../io.h"
-
-namespace marisa {
-namespace grimoire {
-namespace vector {
-
-template <typename T>
-class Vector {
- public:
- Vector()
- : buf_(), objs_(NULL), const_objs_(NULL),
- size_(0), capacity_(0), fixed_(false) {}
- ~Vector() {
- if (objs_ != NULL) {
- for (std::size_t i = 0; i < size_; ++i) {
- objs_[i].~T();
- }
- }
- }
-
- void map(Mapper &mapper) {
- Vector temp;
- temp.map_(mapper);
- swap(temp);
- }
-
- void read(Reader &reader) {
- Vector temp;
- temp.read_(reader);
- swap(temp);
- }
-
- void write(Writer &writer) const {
- write_(writer);
- }
-
- void push_back(const T &x) {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(size_ == max_size(), MARISA_SIZE_ERROR);
- reserve(size_ + 1);
- new (&objs_[size_]) T(x);
- ++size_;
- }
-
- void pop_back() {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(size_ == 0, MARISA_STATE_ERROR);
- objs_[--size_].~T();
- }
-
- // resize() assumes that T's placement new does not throw an exception.
- void resize(std::size_t size) {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- reserve(size);
- for (std::size_t i = size_; i < size; ++i) {
- new (&objs_[i]) T;
- }
- for (std::size_t i = size; i < size_; ++i) {
- objs_[i].~T();
- }
- size_ = size;
- }
-
- // resize() assumes that T's placement new does not throw an exception.
- void resize(std::size_t size, const T &x) {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- reserve(size);
- for (std::size_t i = size_; i < size; ++i) {
- new (&objs_[i]) T(x);
- }
- for (std::size_t i = size; i < size_; ++i) {
- objs_[i].~T();
- }
- size_ = size;
- }
-
- void reserve(std::size_t capacity) {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- if (capacity <= capacity_) {
- return;
- }
- MARISA_DEBUG_IF(capacity > max_size(), MARISA_SIZE_ERROR);
- std::size_t new_capacity = capacity;
- if (capacity_ > (capacity / 2)) {
- if (capacity_ > (max_size() / 2)) {
- new_capacity = max_size();
- } else {
- new_capacity = capacity_ * 2;
- }
- }
- realloc(new_capacity);
- }
-
- void shrink() {
- MARISA_THROW_IF(fixed_, MARISA_STATE_ERROR);
- if (size_ != capacity_) {
- realloc(size_);
- }
- }
-
- void fix() {
- MARISA_THROW_IF(fixed_, MARISA_STATE_ERROR);
- fixed_ = true;
- }
-
- const T *begin() const {
- return const_objs_;
- }
- const T *end() const {
- return const_objs_ + size_;
- }
- const T &operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
- return const_objs_[i];
- }
- const T &front() const {
- MARISA_DEBUG_IF(size_ == 0, MARISA_STATE_ERROR);
- return const_objs_[0];
- }
- const T &back() const {
- MARISA_DEBUG_IF(size_ == 0, MARISA_STATE_ERROR);
- return const_objs_[size_ - 1];
- }
-
- T *begin() {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- return objs_;
- }
- T *end() {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- return objs_ + size_;
- }
- T &operator[](std::size_t i) {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
- return objs_[i];
- }
- T &front() {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(size_ == 0, MARISA_STATE_ERROR);
- return objs_[0];
- }
- T &back() {
- MARISA_DEBUG_IF(fixed_, MARISA_STATE_ERROR);
- MARISA_DEBUG_IF(size_ == 0, MARISA_STATE_ERROR);
- return objs_[size_ - 1];
- }
-
- std::size_t size() const {
- return size_;
- }
- std::size_t capacity() const {
- return capacity_;
- }
- bool fixed() const {
- return fixed_;
- }
-
- bool empty() const {
- return size_ == 0;
- }
- std::size_t total_size() const {
- return sizeof(T) * size_;
- }
- std::size_t io_size() const {
- return sizeof(UInt64) + ((total_size() + 7) & ~(std::size_t)0x07);
- }
-
- void clear() {
- Vector().swap(*this);
- }
- void swap(Vector &rhs) {
- buf_.swap(rhs.buf_);
- marisa::swap(objs_, rhs.objs_);
- marisa::swap(const_objs_, rhs.const_objs_);
- marisa::swap(size_, rhs.size_);
- marisa::swap(capacity_, rhs.capacity_);
- marisa::swap(fixed_, rhs.fixed_);
- }
-
- static std::size_t max_size() {
- return MARISA_SIZE_MAX / sizeof(T);
- }
-
- private:
- scoped_array<char> buf_;
- T *objs_;
- const T *const_objs_;
- std::size_t size_;
- std::size_t capacity_;
- bool fixed_;
-
- void map_(Mapper &mapper) {
- UInt64 total_size;
- mapper.map(&total_size);
- MARISA_THROW_IF(total_size > MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- MARISA_THROW_IF((total_size % sizeof(T)) != 0, MARISA_FORMAT_ERROR);
- const std::size_t size = (std::size_t)(total_size / sizeof(T));
- mapper.map(&const_objs_, size);
- mapper.seek((std::size_t)((8 - (total_size % 8)) % 8));
- size_ = size;
- fix();
- }
- void read_(Reader &reader) {
- UInt64 total_size;
- reader.read(&total_size);
- MARISA_THROW_IF(total_size > MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- MARISA_THROW_IF((total_size % sizeof(T)) != 0, MARISA_FORMAT_ERROR);
- const std::size_t size = (std::size_t)(total_size / sizeof(T));
- resize(size);
- reader.read(objs_, size);
- reader.seek((std::size_t)((8 - (total_size % 8)) % 8));
- }
- void write_(Writer &writer) const {
- writer.write((UInt64)total_size());
- writer.write(const_objs_, size_);
- writer.seek((8 - (total_size() % 8)) % 8);
- }
-
- // realloc() assumes that T's placement new does not throw an exception.
- void realloc(std::size_t new_capacity) {
- MARISA_DEBUG_IF(new_capacity > max_size(), MARISA_SIZE_ERROR);
-
- scoped_array<char> new_buf(
- new (std::nothrow) char[sizeof(T) * new_capacity]);
- MARISA_DEBUG_IF(new_buf.get() == NULL, MARISA_MEMORY_ERROR);
- T *new_objs = reinterpret_cast<T *>(new_buf.get());
-
- for (std::size_t i = 0; i < size_; ++i) {
- new (&new_objs[i]) T(objs_[i]);
- }
- for (std::size_t i = 0; i < size_; ++i) {
- objs_[i].~T();
- }
-
- buf_.swap(new_buf);
- objs_ = new_objs;
- const_objs_ = new_objs;
- capacity_ = new_capacity;
- }
-
- // Disallows copy and assignment.
- Vector(const Vector &);
- Vector &operator=(const Vector &);
-};
-
-} // namespace vector
-} // namespace grimoire
-} // namespace marisa
-
-#endif // MARISA_GRIMOIRE_VECTOR_VECTOR_H_
diff --git a/contrib/python/marisa-trie/marisa/iostream.h b/contrib/python/marisa-trie/marisa/iostream.h
deleted file mode 100644
index da5ec77a6c..0000000000
--- a/contrib/python/marisa-trie/marisa/iostream.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#pragma once
-#ifndef MARISA_IOSTREAM_H_
-#define MARISA_IOSTREAM_H_
-
-#include <iosfwd>
-
-namespace marisa {
-
-class Trie;
-
-std::istream &read(std::istream &stream, Trie *trie);
-std::ostream &write(std::ostream &stream, const Trie &trie);
-
-std::istream &operator>>(std::istream &stream, Trie &trie);
-std::ostream &operator<<(std::ostream &stream, const Trie &trie);
-
-} // namespace marisa
-
-#endif // MARISA_IOSTREAM_H_
diff --git a/contrib/python/marisa-trie/marisa/key.h b/contrib/python/marisa-trie/marisa/key.h
deleted file mode 100644
index 48e03226c4..0000000000
--- a/contrib/python/marisa-trie/marisa/key.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#pragma once
-#ifndef MARISA_KEY_H_
-#define MARISA_KEY_H_
-
-#include "base.h"
-
-namespace marisa {
-
-class Key {
- public:
- Key() : ptr_(NULL), length_(0), union_() {
- union_.id = 0;
- }
- Key(const Key &key)
- : ptr_(key.ptr_), length_(key.length_), union_(key.union_) {}
-
- Key &operator=(const Key &key) {
- ptr_ = key.ptr_;
- length_ = key.length_;
- union_ = key.union_;
- return *this;
- }
-
- char operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= length_, MARISA_BOUND_ERROR);
- return ptr_[i];
- }
-
- void set_str(const char *str) {
- MARISA_DEBUG_IF(str == NULL, MARISA_NULL_ERROR);
- std::size_t length = 0;
- while (str[length] != '\0') {
- ++length;
- }
- MARISA_DEBUG_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- ptr_ = str;
- length_ = (UInt32)length;
- }
- void set_str(const char *ptr, std::size_t length) {
- MARISA_DEBUG_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- MARISA_DEBUG_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- ptr_ = ptr;
- length_ = (UInt32)length;
- }
- void set_id(std::size_t id) {
- MARISA_DEBUG_IF(id > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
- union_.id = (UInt32)id;
- }
- void set_weight(float weight) {
- union_.weight = weight;
- }
-
- const char *ptr() const {
- return ptr_;
- }
- std::size_t length() const {
- return length_;
- }
- std::size_t id() const {
- return union_.id;
- }
- float weight() const {
- return union_.weight;
- }
-
- void clear() {
- Key().swap(*this);
- }
- void swap(Key &rhs) {
- marisa::swap(ptr_, rhs.ptr_);
- marisa::swap(length_, rhs.length_);
- marisa::swap(union_.id, rhs.union_.id);
- }
-
- private:
- const char *ptr_;
- UInt32 length_;
- union Union {
- UInt32 id;
- float weight;
- } union_;
-};
-
-} // namespace marisa
-
-#endif // MARISA_KEY_H_
diff --git a/contrib/python/marisa-trie/marisa/keyset.cc b/contrib/python/marisa-trie/marisa/keyset.cc
deleted file mode 100644
index adb82b31fe..0000000000
--- a/contrib/python/marisa-trie/marisa/keyset.cc
+++ /dev/null
@@ -1,181 +0,0 @@
-#include <new>
-
-#include "keyset.h"
-
-namespace marisa {
-
-Keyset::Keyset()
- : base_blocks_(), base_blocks_size_(0), base_blocks_capacity_(0),
- extra_blocks_(), extra_blocks_size_(0), extra_blocks_capacity_(0),
- key_blocks_(), key_blocks_size_(0), key_blocks_capacity_(0),
- ptr_(NULL), avail_(0), size_(0), total_length_(0) {}
-
-void Keyset::push_back(const Key &key) {
- MARISA_DEBUG_IF(size_ == MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
-
- char * const key_ptr = reserve(key.length());
- for (std::size_t i = 0; i < key.length(); ++i) {
- key_ptr[i] = key[i];
- }
-
- Key &new_key = key_blocks_[size_ / KEY_BLOCK_SIZE][size_ % KEY_BLOCK_SIZE];
- new_key.set_str(key_ptr, key.length());
- new_key.set_id(key.id());
- ++size_;
- total_length_ += new_key.length();
-}
-
-void Keyset::push_back(const Key &key, char end_marker) {
- MARISA_DEBUG_IF(size_ == MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
-
- if ((size_ / KEY_BLOCK_SIZE) == key_blocks_size_) {
- append_key_block();
- }
-
- char * const key_ptr = reserve(key.length() + 1);
- for (std::size_t i = 0; i < key.length(); ++i) {
- key_ptr[i] = key[i];
- }
- key_ptr[key.length()] = end_marker;
-
- Key &new_key = key_blocks_[size_ / KEY_BLOCK_SIZE][size_ % KEY_BLOCK_SIZE];
- new_key.set_str(key_ptr, key.length());
- new_key.set_id(key.id());
- ++size_;
- total_length_ += new_key.length();
-}
-
-void Keyset::push_back(const char *str) {
- MARISA_DEBUG_IF(size_ == MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- MARISA_THROW_IF(str == NULL, MARISA_NULL_ERROR);
-
- std::size_t length = 0;
- while (str[length] != '\0') {
- ++length;
- }
- push_back(str, length);
-}
-
-void Keyset::push_back(const char *ptr, std::size_t length, float weight) {
- MARISA_DEBUG_IF(size_ == MARISA_SIZE_MAX, MARISA_SIZE_ERROR);
- MARISA_THROW_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- MARISA_THROW_IF(length > MARISA_UINT32_MAX, MARISA_SIZE_ERROR);
-
- char * const key_ptr = reserve(length);
- for (std::size_t i = 0; i < length; ++i) {
- key_ptr[i] = ptr[i];
- }
-
- Key &key = key_blocks_[size_ / KEY_BLOCK_SIZE][size_ % KEY_BLOCK_SIZE];
- key.set_str(key_ptr, length);
- key.set_weight(weight);
- ++size_;
- total_length_ += length;
-}
-
-void Keyset::reset() {
- base_blocks_size_ = 0;
- extra_blocks_size_ = 0;
- ptr_ = NULL;
- avail_ = 0;
- size_ = 0;
- total_length_ = 0;
-}
-
-void Keyset::clear() {
- Keyset().swap(*this);
-}
-
-void Keyset::swap(Keyset &rhs) {
- base_blocks_.swap(rhs.base_blocks_);
- marisa::swap(base_blocks_size_, rhs.base_blocks_size_);
- marisa::swap(base_blocks_capacity_, rhs.base_blocks_capacity_);
- extra_blocks_.swap(rhs.extra_blocks_);
- marisa::swap(extra_blocks_size_, rhs.extra_blocks_size_);
- marisa::swap(extra_blocks_capacity_, rhs.extra_blocks_capacity_);
- key_blocks_.swap(rhs.key_blocks_);
- marisa::swap(key_blocks_size_, rhs.key_blocks_size_);
- marisa::swap(key_blocks_capacity_, rhs.key_blocks_capacity_);
- marisa::swap(ptr_, rhs.ptr_);
- marisa::swap(avail_, rhs.avail_);
- marisa::swap(size_, rhs.size_);
- marisa::swap(total_length_, rhs.total_length_);
-}
-
-char *Keyset::reserve(std::size_t size) {
- if ((size_ / KEY_BLOCK_SIZE) == key_blocks_size_) {
- append_key_block();
- }
-
- if (size > EXTRA_BLOCK_SIZE) {
- append_extra_block(size);
- return extra_blocks_[extra_blocks_size_ - 1].get();
- } else {
- if (size > avail_) {
- append_base_block();
- }
- ptr_ += size;
- avail_ -= size;
- return ptr_ - size;
- }
-}
-
-void Keyset::append_base_block() {
- if (base_blocks_size_ == base_blocks_capacity_) {
- const std::size_t new_capacity =
- (base_blocks_size_ != 0) ? (base_blocks_size_ * 2) : 1;
- scoped_array<scoped_array<char> > new_blocks(
- new (std::nothrow) scoped_array<char>[new_capacity]);
- MARISA_THROW_IF(new_blocks.get() == NULL, MARISA_MEMORY_ERROR);
- for (std::size_t i = 0; i < base_blocks_size_; ++i) {
- base_blocks_[i].swap(new_blocks[i]);
- }
- base_blocks_.swap(new_blocks);
- base_blocks_capacity_ = new_capacity;
- }
- if (base_blocks_[base_blocks_size_].get() == NULL) {
- scoped_array<char> new_block(new (std::nothrow) char[BASE_BLOCK_SIZE]);
- MARISA_THROW_IF(new_block.get() == NULL, MARISA_MEMORY_ERROR);
- base_blocks_[base_blocks_size_].swap(new_block);
- }
- ptr_ = base_blocks_[base_blocks_size_++].get();
- avail_ = BASE_BLOCK_SIZE;
-}
-
-void Keyset::append_extra_block(std::size_t size) {
- if (extra_blocks_size_ == extra_blocks_capacity_) {
- const std::size_t new_capacity =
- (extra_blocks_size_ != 0) ? (extra_blocks_size_ * 2) : 1;
- scoped_array<scoped_array<char> > new_blocks(
- new (std::nothrow) scoped_array<char>[new_capacity]);
- MARISA_THROW_IF(new_blocks.get() == NULL, MARISA_MEMORY_ERROR);
- for (std::size_t i = 0; i < extra_blocks_size_; ++i) {
- extra_blocks_[i].swap(new_blocks[i]);
- }
- extra_blocks_.swap(new_blocks);
- extra_blocks_capacity_ = new_capacity;
- }
- scoped_array<char> new_block(new (std::nothrow) char[size]);
- MARISA_THROW_IF(new_block.get() == NULL, MARISA_MEMORY_ERROR);
- extra_blocks_[extra_blocks_size_++].swap(new_block);
-}
-
-void Keyset::append_key_block() {
- if (key_blocks_size_ == key_blocks_capacity_) {
- const std::size_t new_capacity =
- (key_blocks_size_ != 0) ? (key_blocks_size_ * 2) : 1;
- scoped_array<scoped_array<Key> > new_blocks(
- new (std::nothrow) scoped_array<Key>[new_capacity]);
- MARISA_THROW_IF(new_blocks.get() == NULL, MARISA_MEMORY_ERROR);
- for (std::size_t i = 0; i < key_blocks_size_; ++i) {
- key_blocks_[i].swap(new_blocks[i]);
- }
- key_blocks_.swap(new_blocks);
- key_blocks_capacity_ = new_capacity;
- }
- scoped_array<Key> new_block(new (std::nothrow) Key[KEY_BLOCK_SIZE]);
- MARISA_THROW_IF(new_block.get() == NULL, MARISA_MEMORY_ERROR);
- key_blocks_[key_blocks_size_++].swap(new_block);
-}
-
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/keyset.h b/contrib/python/marisa-trie/marisa/keyset.h
deleted file mode 100644
index 86762dba47..0000000000
--- a/contrib/python/marisa-trie/marisa/keyset.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#pragma once
-#ifndef MARISA_KEYSET_H_
-#define MARISA_KEYSET_H_
-
-#include "key.h"
-
-namespace marisa {
-
-class Keyset {
- public:
- enum {
- BASE_BLOCK_SIZE = 4096,
- EXTRA_BLOCK_SIZE = 1024,
- KEY_BLOCK_SIZE = 256
- };
-
- Keyset();
-
- void push_back(const Key &key);
- void push_back(const Key &key, char end_marker);
-
- void push_back(const char *str);
- void push_back(const char *ptr, std::size_t length, float weight = 1.0);
-
- const Key &operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
- return key_blocks_[i / KEY_BLOCK_SIZE][i % KEY_BLOCK_SIZE];
- }
- Key &operator[](std::size_t i) {
- MARISA_DEBUG_IF(i >= size_, MARISA_BOUND_ERROR);
- return key_blocks_[i / KEY_BLOCK_SIZE][i % KEY_BLOCK_SIZE];
- }
-
- std::size_t num_keys() const {
- return size_;
- }
-
- bool empty() const {
- return size_ == 0;
- }
- std::size_t size() const {
- return size_;
- }
- std::size_t total_length() const {
- return total_length_;
- }
-
- void reset();
-
- void clear();
- void swap(Keyset &rhs);
-
- private:
- scoped_array<scoped_array<char> > base_blocks_;
- std::size_t base_blocks_size_;
- std::size_t base_blocks_capacity_;
- scoped_array<scoped_array<char> > extra_blocks_;
- std::size_t extra_blocks_size_;
- std::size_t extra_blocks_capacity_;
- scoped_array<scoped_array<Key> > key_blocks_;
- std::size_t key_blocks_size_;
- std::size_t key_blocks_capacity_;
- char *ptr_;
- std::size_t avail_;
- std::size_t size_;
- std::size_t total_length_;
-
- char *reserve(std::size_t size);
-
- void append_base_block();
- void append_extra_block(std::size_t size);
- void append_key_block();
-
- // Disallows copy and assignment.
- Keyset(const Keyset &);
- Keyset &operator=(const Keyset &);
-};
-
-} // namespace marisa
-
-#endif // MARISA_KEYSET_H_
diff --git a/contrib/python/marisa-trie/marisa/query.h b/contrib/python/marisa-trie/marisa/query.h
deleted file mode 100644
index e08f8f72dc..0000000000
--- a/contrib/python/marisa-trie/marisa/query.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#pragma once
-#ifndef MARISA_QUERY_H_
-#define MARISA_QUERY_H_
-
-#include "base.h"
-
-namespace marisa {
-
-class Query {
- public:
- Query() : ptr_(NULL), length_(0), id_(0) {}
- Query(const Query &query)
- : ptr_(query.ptr_), length_(query.length_), id_(query.id_) {}
-
- Query &operator=(const Query &query) {
- ptr_ = query.ptr_;
- length_ = query.length_;
- id_ = query.id_;
- return *this;
- }
-
- char operator[](std::size_t i) const {
- MARISA_DEBUG_IF(i >= length_, MARISA_BOUND_ERROR);
- return ptr_[i];
- }
-
- void set_str(const char *str) {
- MARISA_DEBUG_IF(str == NULL, MARISA_NULL_ERROR);
- std::size_t length = 0;
- while (str[length] != '\0') {
- ++length;
- }
- ptr_ = str;
- length_ = length;
- }
- void set_str(const char *ptr, std::size_t length) {
- MARISA_DEBUG_IF((ptr == NULL) && (length != 0), MARISA_NULL_ERROR);
- ptr_ = ptr;
- length_ = length;
- }
- void set_id(std::size_t id) {
- id_ = id;
- }
-
- const char *ptr() const {
- return ptr_;
- }
- std::size_t length() const {
- return length_;
- }
- std::size_t id() const {
- return id_;
- }
-
- void clear() {
- Query().swap(*this);
- }
- void swap(Query &rhs) {
- marisa::swap(ptr_, rhs.ptr_);
- marisa::swap(length_, rhs.length_);
- marisa::swap(id_, rhs.id_);
- }
-
- private:
- const char *ptr_;
- std::size_t length_;
- std::size_t id_;
-};
-
-} // namespace marisa
-
-#endif // MARISA_QUERY_H_
diff --git a/contrib/python/marisa-trie/marisa/scoped-array.h b/contrib/python/marisa-trie/marisa/scoped-array.h
deleted file mode 100644
index 210cb908a7..0000000000
--- a/contrib/python/marisa-trie/marisa/scoped-array.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#pragma once
-#ifndef MARISA_SCOPED_ARRAY_H_
-#define MARISA_SCOPED_ARRAY_H_
-
-#include "base.h"
-
-namespace marisa {
-
-template <typename T>
-class scoped_array {
- public:
- scoped_array() : array_(NULL) {}
- explicit scoped_array(T *array) : array_(array) {}
-
- ~scoped_array() {
- delete [] array_;
- }
-
- void reset(T *array = NULL) {
- MARISA_THROW_IF((array != NULL) && (array == array_), MARISA_RESET_ERROR);
- scoped_array(array).swap(*this);
- }
-
- T &operator[](std::size_t i) const {
- MARISA_DEBUG_IF(array_ == NULL, MARISA_STATE_ERROR);
- return array_[i];
- }
- T *get() const {
- return array_;
- }
-
- void clear() {
- scoped_array().swap(*this);
- }
- void swap(scoped_array &rhs) {
- marisa::swap(array_, rhs.array_);
- }
-
- private:
- T *array_;
-
- // Disallows copy and assignment.
- scoped_array(const scoped_array &);
- scoped_array &operator=(const scoped_array &);
-};
-
-} // namespace marisa
-
-#endif // MARISA_SCOPED_ARRAY_H_
diff --git a/contrib/python/marisa-trie/marisa/scoped-ptr.h b/contrib/python/marisa-trie/marisa/scoped-ptr.h
deleted file mode 100644
index 9a9c447353..0000000000
--- a/contrib/python/marisa-trie/marisa/scoped-ptr.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#pragma once
-#ifndef MARISA_SCOPED_PTR_H_
-#define MARISA_SCOPED_PTR_H_
-
-#include "base.h"
-
-namespace marisa {
-
-template <typename T>
-class scoped_ptr {
- public:
- scoped_ptr() : ptr_(NULL) {}
- explicit scoped_ptr(T *ptr) : ptr_(ptr) {}
-
- ~scoped_ptr() {
- delete ptr_;
- }
-
- void reset(T *ptr = NULL) {
- MARISA_THROW_IF((ptr != NULL) && (ptr == ptr_), MARISA_RESET_ERROR);
- scoped_ptr(ptr).swap(*this);
- }
-
- T &operator*() const {
- MARISA_DEBUG_IF(ptr_ == NULL, MARISA_STATE_ERROR);
- return *ptr_;
- }
- T *operator->() const {
- MARISA_DEBUG_IF(ptr_ == NULL, MARISA_STATE_ERROR);
- return ptr_;
- }
- T *get() const {
- return ptr_;
- }
-
- void clear() {
- scoped_ptr().swap(*this);
- }
- void swap(scoped_ptr &rhs) {
- marisa::swap(ptr_, rhs.ptr_);
- }
-
- private:
- T *ptr_;
-
- // Disallows copy and assignment.
- scoped_ptr(const scoped_ptr &);
- scoped_ptr &operator=(const scoped_ptr &);
-};
-
-} // namespace marisa
-
-#endif // MARISA_SCOPED_PTR_H_
diff --git a/contrib/python/marisa-trie/marisa/stdio.h b/contrib/python/marisa-trie/marisa/stdio.h
deleted file mode 100644
index 334ce56816..0000000000
--- a/contrib/python/marisa-trie/marisa/stdio.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#pragma once
-#ifndef MARISA_MYSTDIO_H_
-#define MARISA_MYSTDIO_H_
-
-#include <cstdio>
-
-namespace marisa {
-
-class Trie;
-
-void fread(std::FILE *file, Trie *trie);
-void fwrite(std::FILE *file, const Trie &trie);
-
-} // namespace marisa
-
-#endif // MARISA_MYSTDIO_H_
diff --git a/contrib/python/marisa-trie/marisa/trie.cc b/contrib/python/marisa-trie/marisa/trie.cc
deleted file mode 100644
index 5baaf9b288..0000000000
--- a/contrib/python/marisa-trie/marisa/trie.cc
+++ /dev/null
@@ -1,249 +0,0 @@
-#include "stdio.h"
-#include "iostream.h"
-#include "trie.h"
-#include "grimoire/trie.h"
-
-namespace marisa {
-
-Trie::Trie() : trie_() {}
-
-Trie::~Trie() {}
-
-void Trie::build(Keyset &keyset, int config_flags) {
- scoped_ptr<grimoire::LoudsTrie> temp(new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- temp->build(keyset, config_flags);
- trie_.swap(temp);
-}
-
-void Trie::mmap(const char *filename) {
- MARISA_THROW_IF(filename == NULL, MARISA_NULL_ERROR);
-
- scoped_ptr<grimoire::LoudsTrie> temp(new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- grimoire::Mapper mapper;
- mapper.open(filename);
- temp->map(mapper);
- trie_.swap(temp);
-}
-
-void Trie::map(const void *ptr, std::size_t size) {
- MARISA_THROW_IF((ptr == NULL) && (size != 0), MARISA_NULL_ERROR);
-
- scoped_ptr<grimoire::LoudsTrie> temp(new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- grimoire::Mapper mapper;
- mapper.open(ptr, size);
- temp->map(mapper);
- trie_.swap(temp);
-}
-
-void Trie::load(const char *filename) {
- MARISA_THROW_IF(filename == NULL, MARISA_NULL_ERROR);
-
- scoped_ptr<grimoire::LoudsTrie> temp(new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- grimoire::Reader reader;
- reader.open(filename);
- temp->read(reader);
- trie_.swap(temp);
-}
-
-void Trie::read(int fd) {
- MARISA_THROW_IF(fd == -1, MARISA_CODE_ERROR);
-
- scoped_ptr<grimoire::LoudsTrie> temp(new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- grimoire::Reader reader;
- reader.open(fd);
- temp->read(reader);
- trie_.swap(temp);
-}
-
-void Trie::save(const char *filename) const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- MARISA_THROW_IF(filename == NULL, MARISA_NULL_ERROR);
-
- grimoire::Writer writer;
- writer.open(filename);
- trie_->write(writer);
-}
-
-void Trie::write(int fd) const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- MARISA_THROW_IF(fd == -1, MARISA_CODE_ERROR);
-
- grimoire::Writer writer;
- writer.open(fd);
- trie_->write(writer);
-}
-
-bool Trie::lookup(Agent &agent) const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- if (!agent.has_state()) {
- agent.init_state();
- }
- return trie_->lookup(agent);
-}
-
-void Trie::reverse_lookup(Agent &agent) const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- if (!agent.has_state()) {
- agent.init_state();
- }
- trie_->reverse_lookup(agent);
-}
-
-bool Trie::common_prefix_search(Agent &agent) const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- if (!agent.has_state()) {
- agent.init_state();
- }
- return trie_->common_prefix_search(agent);
-}
-
-bool Trie::predictive_search(Agent &agent) const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- if (!agent.has_state()) {
- agent.init_state();
- }
- return trie_->predictive_search(agent);
-}
-
-std::size_t Trie::num_tries() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->num_tries();
-}
-
-std::size_t Trie::num_keys() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->num_keys();
-}
-
-std::size_t Trie::num_nodes() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->num_nodes();
-}
-
-TailMode Trie::tail_mode() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->tail_mode();
-}
-
-NodeOrder Trie::node_order() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->node_order();
-}
-
-bool Trie::empty() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->empty();
-}
-
-std::size_t Trie::size() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->size();
-}
-
-std::size_t Trie::total_size() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->total_size();
-}
-
-std::size_t Trie::io_size() const {
- MARISA_THROW_IF(trie_.get() == NULL, MARISA_STATE_ERROR);
- return trie_->io_size();
-}
-
-void Trie::clear() {
- Trie().swap(*this);
-}
-
-void Trie::swap(Trie &rhs) {
- trie_.swap(rhs.trie_);
-}
-
-} // namespace marisa
-
-#include <iostream>
-
-namespace marisa {
-
-class TrieIO {
- public:
- static void fread(std::FILE *file, Trie *trie) {
- MARISA_THROW_IF(trie == NULL, MARISA_NULL_ERROR);
-
- scoped_ptr<grimoire::LoudsTrie> temp(
- new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- grimoire::Reader reader;
- reader.open(file);
- temp->read(reader);
- trie->trie_.swap(temp);
- }
- static void fwrite(std::FILE *file, const Trie &trie) {
- MARISA_THROW_IF(file == NULL, MARISA_NULL_ERROR);
- MARISA_THROW_IF(trie.trie_.get() == NULL, MARISA_STATE_ERROR);
- grimoire::Writer writer;
- writer.open(file);
- trie.trie_->write(writer);
- }
-
- static std::istream &read(std::istream &stream, Trie *trie) {
- MARISA_THROW_IF(trie == NULL, MARISA_NULL_ERROR);
-
- scoped_ptr<grimoire::LoudsTrie> temp(
- new (std::nothrow) grimoire::LoudsTrie);
- MARISA_THROW_IF(temp.get() == NULL, MARISA_MEMORY_ERROR);
-
- grimoire::Reader reader;
- reader.open(stream);
- temp->read(reader);
- trie->trie_.swap(temp);
- return stream;
- }
- static std::ostream &write(std::ostream &stream, const Trie &trie) {
- MARISA_THROW_IF(trie.trie_.get() == NULL, MARISA_STATE_ERROR);
- grimoire::Writer writer;
- writer.open(stream);
- trie.trie_->write(writer);
- return stream;
- }
-};
-
-void fread(std::FILE *file, Trie *trie) {
- MARISA_THROW_IF(file == NULL, MARISA_NULL_ERROR);
- MARISA_THROW_IF(trie == NULL, MARISA_NULL_ERROR);
- TrieIO::fread(file, trie);
-}
-
-void fwrite(std::FILE *file, const Trie &trie) {
- MARISA_THROW_IF(file == NULL, MARISA_NULL_ERROR);
- TrieIO::fwrite(file, trie);
-}
-
-std::istream &read(std::istream &stream, Trie *trie) {
- MARISA_THROW_IF(trie == NULL, MARISA_NULL_ERROR);
- return TrieIO::read(stream, trie);
-}
-
-std::ostream &write(std::ostream &stream, const Trie &trie) {
- return TrieIO::write(stream, trie);
-}
-
-std::istream &operator>>(std::istream &stream, Trie &trie) {
- return read(stream, &trie);
-}
-
-std::ostream &operator<<(std::ostream &stream, const Trie &trie) {
- return write(stream, trie);
-}
-
-} // namespace marisa
diff --git a/contrib/python/marisa-trie/marisa/trie.h b/contrib/python/marisa-trie/marisa/trie.h
deleted file mode 100644
index df85bd86ba..0000000000
--- a/contrib/python/marisa-trie/marisa/trie.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#pragma once
-#ifndef MARISA_TRIE_H_
-#define MARISA_TRIE_H_
-
-#include "keyset.h"
-#include "agent.h"
-
-namespace marisa {
-namespace grimoire {
-namespace trie {
-
-class LoudsTrie;
-
-} // namespace trie
-} // namespace grimoire
-
-class Trie {
- friend class TrieIO;
-
- public:
- Trie();
- ~Trie();
-
- void build(Keyset &keyset, int config_flags = 0);
-
- void mmap(const char *filename);
- void map(const void *ptr, std::size_t size);
-
- void load(const char *filename);
- void read(int fd);
-
- void save(const char *filename) const;
- void write(int fd) const;
-
- bool lookup(Agent &agent) const;
- void reverse_lookup(Agent &agent) const;
- bool common_prefix_search(Agent &agent) const;
- bool predictive_search(Agent &agent) const;
-
- std::size_t num_tries() const;
- std::size_t num_keys() const;
- std::size_t num_nodes() const;
-
- TailMode tail_mode() const;
- NodeOrder node_order() const;
-
- bool empty() const;
- std::size_t size() const;
- std::size_t total_size() const;
- std::size_t io_size() const;
-
- void clear();
- void swap(Trie &rhs);
-
- private:
- scoped_ptr<grimoire::trie::LoudsTrie> trie_;
-
- // Disallows copy and assignment.
- Trie(const Trie &);
- Trie &operator=(const Trie &);
-};
-
-} // namespace marisa
-
-#endif // MARISA_TRIE_H_
diff --git a/contrib/python/marisa-trie/marisa_trie.pyx b/contrib/python/marisa-trie/marisa_trie.pyx
deleted file mode 100644
index f9fe6f331b..0000000000
--- a/contrib/python/marisa-trie/marisa_trie.pyx
+++ /dev/null
@@ -1,763 +0,0 @@
-# cython: profile=False, embedsignature=True
-
-from __future__ import unicode_literals
-
-from std_iostream cimport stringstream, istream, ostream
-from libc.string cimport strncmp
-cimport keyset
-cimport key
-cimport agent
-cimport trie
-cimport iostream
-cimport base
-
-import itertools
-import struct
-import warnings
-
-try:
- from itertools import izip
-except ImportError:
- izip = zip
-
-
-DEFAULT_CACHE = base.MARISA_DEFAULT_CACHE
-HUGE_CACHE = base.MARISA_HUGE_CACHE
-LARGE_CACHE = base.MARISA_LARGE_CACHE
-NORMAL_CACHE = base.MARISA_NORMAL_CACHE
-SMALL_CACHE = base.MARISA_SMALL_CACHE
-TINY_CACHE = base.MARISA_TINY_CACHE
-
-MIN_NUM_TRIES = base.MARISA_MIN_NUM_TRIES
-MAX_NUM_TRIES = base.MARISA_MAX_NUM_TRIES
-DEFAULT_NUM_TRIES = base.MARISA_DEFAULT_NUM_TRIES
-
-# MARISA_TEXT_TAIL merges last labels as zero-terminated strings. So, it is
-# available if and only if the last labels do not contain a NULL character.
-# If MARISA_TEXT_TAIL is specified and a NULL character exists in the last
-# labels, the setting is automatically switched to MARISA_BINARY_TAIL.
-TEXT_TAIL = base.MARISA_TEXT_TAIL
-
-# MARISA_BINARY_TAIL also merges last labels but as byte sequences. It uses
-# a bit vector to detect the end of a sequence, instead of NULL characters.
-# So, MARISA_BINARY_TAIL requires a larger space if the average length of
-# labels is greater than 8.
-BINARY_TAIL = base.MARISA_BINARY_TAIL
-DEFAULT_TAIL = base.MARISA_DEFAULT_TAIL
-
-
-# MARISA_LABEL_ORDER arranges nodes in ascending label order.
-# MARISA_LABEL_ORDER is useful if an application needs to predict keys in
-# label order.
-LABEL_ORDER = base.MARISA_LABEL_ORDER
-
-# MARISA_WEIGHT_ORDER arranges nodes in descending weight order.
-# MARISA_WEIGHT_ORDER is generally a better choice because it enables faster
-# matching.
-WEIGHT_ORDER = base.MARISA_WEIGHT_ORDER
-DEFAULT_ORDER = base.MARISA_DEFAULT_ORDER
-
-
-cdef class _Trie:
- cdef trie.Trie* _trie
-
- cdef bytes _encode_key(self, key):
- return key
-
- cdef _get_key(self, agent.Agent& ag):
- return ag.key().ptr()[:ag.key().length()]
-
- def __init__(self, arg=None, num_tries=DEFAULT_NUM_TRIES, binary=False,
- cache_size=DEFAULT_CACHE, order=DEFAULT_ORDER, weights=None):
- """
- ``arg`` can be one of the following:
-
- * an iterable with bytes keys;
- * None (if you're going to load a trie later).
-
- Pass a ``weights`` iterable with expected lookup frequencies
- to optimize lookup and prefix search speed.
- """
-
- if self._trie:
- return
- self._trie = new trie.Trie()
-
- byte_keys = (self._encode_key(key) for key in (arg or []))
-
- self._build(
- byte_keys,
- weights,
- num_tries=num_tries,
- binary=binary,
- cache_size=cache_size,
- order=order
- )
-
- def __dealloc__(self):
- if self._trie:
- del self._trie
-
- def _config_flags(self, num_tries=DEFAULT_NUM_TRIES, binary=False,
- cache_size=DEFAULT_CACHE, order=DEFAULT_ORDER):
- if not MIN_NUM_TRIES <= num_tries <= MAX_NUM_TRIES:
- raise ValueError(
- "num_tries (which is %d) must be between between %d and %d" %
- (num_tries, MIN_NUM_TRIES, MAX_NUM_TRIES))
-
- binary_flag = BINARY_TAIL if binary else TEXT_TAIL
- return num_tries | binary_flag | cache_size | order
-
- def _build(self, byte_keys, weights=None, **options):
- if weights is None:
- weights = itertools.repeat(1.0)
-
- cdef char* data
- cdef float weight
- cdef keyset.Keyset *ks = new keyset.Keyset()
-
- try:
- for key, weight in izip(byte_keys, weights):
- ks.push_back(<char *>key, len(key), weight)
- self._trie.build(ks[0], self._config_flags(**options))
- finally:
- del ks
-
- def __richcmp__(self, other, int op):
- if op == 2: # ==
- if other is self:
- return True
- elif not isinstance(other, _Trie):
- return False
-
- return (<_Trie>self)._equals(other)
- elif op == 3: # !=
- return not (self == other)
-
- raise TypeError("unorderable types: {0} and {1}".format(
- self.__class__, other.__class__))
-
- cdef bint _equals(self, _Trie other) nogil:
- cdef int num_keys = self._trie.num_keys()
- cdef base.NodeOrder node_order = self._trie.node_order()
- if (other._trie.num_keys() != num_keys or
- other._trie.node_order() != node_order):
- return False
-
- cdef agent.Agent ag1, ag2
- ag1.set_query(b"")
- ag2.set_query(b"")
- cdef int i
- cdef key.Key key1, key2
- for i in range(num_keys):
- self._trie.predictive_search(ag1)
- other._trie.predictive_search(ag2)
- key1 = ag1.key()
- key2 = ag2.key()
- if (key1.length() != key2.length() or
- strncmp(key1.ptr(), key2.ptr(), key1.length()) != 0):
- return False
- return True
-
- def __iter__(self):
- return self.iterkeys()
-
- def __len__(self):
- return self._trie.num_keys()
-
- def __contains__(self, key):
- cdef bytes _key = self._encode_key(key)
- return self._contains(_key)
-
- cdef bint _contains(self, bytes key):
- cdef agent.Agent ag
- ag.set_query(key, len(key))
- return self._trie.lookup(ag)
-
- def read(self, f):
- """Read a trie from an open file.
-
- :param file f: a "real" on-disk file object. Passing a *file-like*
- object would result in an error.
-
- .. deprecated:: 0.7.3
-
- The method will be removed in version 0.8.0. Please use
- :meth:`load` instead.
- """
- warnings.warn("Trie.save is deprecated and will "
- "be removed in marisa_trie 0.8.0. Please use "
- "Trie.load instead.", DeprecationWarning)
- self._trie.read(f.fileno())
- return self
-
- def write(self, f):
- """Write a trie to an open file.
-
- :param file f: a "real" on-disk file object. Passing a *file-like*
- object would result in an error.
-
- .. deprecated:: 0.7.3
-
- The method will be removed in version 0.8.0. Please use
- :meth:`save` instead.
- """
- warnings.warn("Trie.write is deprecated and will "
- "be removed in marisa_trie 0.8.0. Please use "
- "Trie.save instead.", DeprecationWarning)
- self._trie.write(f.fileno())
-
- def save(self, path):
- """Save a trie to a specified path."""
- with open(path, 'w') as f:
- self._trie.write(f.fileno())
-
- def load(self, path):
- """Load a trie from a specified path."""
- with open(path, 'r') as f:
- self._trie.read(f.fileno())
- return self
-
- cpdef bytes tobytes(self) except +:
- """Return raw trie content as bytes."""
- cdef stringstream stream
- iostream.write((<ostream *> &stream)[0], self._trie[0])
- cdef bytes res = stream.str()
- return res
-
- cpdef frombytes(self, bytes data) except +:
- """Load a trie from raw bytes generated by :meth:`tobytes`."""
- cdef stringstream* stream = new stringstream(data)
- try:
- iostream.read((<istream *> stream)[0], self._trie)
- finally:
- del stream
- return self
-
- def __reduce__(self):
- return self.__class__, (), self.tobytes()
-
- __setstate__ = frombytes
-
- def mmap(self, path):
- """Memory map the content of a trie stored in a file.
-
- This allows to query trie without loading it fully in memory.
- """
- import sys
- str_path = path.encode(sys.getfilesystemencoding())
- cdef char* c_path = str_path
- self._trie.mmap(c_path)
- return self
-
- def iterkeys(self, prefix=None):
- """
- Return an iterator over trie keys starting with a given ``prefix``.
- """
- cdef agent.Agent ag
- cdef bytes b_prefix = b''
- if prefix is not None:
- b_prefix = self._encode_key(prefix)
- ag.set_query(b_prefix, len(b_prefix))
-
- while self._trie.predictive_search(ag):
- yield self._get_key(ag)
-
- cpdef list keys(self, prefix=None):
- """Return a list of trie keys starting with a given ``prefix``."""
- # non-generator inlined version of iterkeys()
- cdef list res = []
- cdef bytes b_prefix = b''
- if prefix is not None:
- b_prefix = self._encode_key(prefix)
- cdef agent.Agent ag
- ag.set_query(b_prefix, len(b_prefix))
-
- while self._trie.predictive_search(ag):
- res.append(self._get_key(ag))
-
- return res
-
- def has_keys_with_prefix(self, prefix=""):
- """
- Return ``True`` if any key in the trie begins with ``prefix``.
-
- .. deprecated:: 0.7.3
-
- The method will be removed in version 0.8.0. Please use
- :meth:`iterkeys` instead.
- """
- warnings.warn("Trie.has_keys_with_prefix is deprecated and will "
- "be removed in marisa_trie 0.8.0. Please use "
- "Trie.iterkeys instead.", DeprecationWarning)
-
- cdef agent.Agent ag
- cdef bytes b_prefix = self._encode_key(prefix)
- ag.set_query(b_prefix, len(b_prefix))
- return self._trie.predictive_search(ag)
-
-
-cdef class BinaryTrie(_Trie):
- """A trie mapping bytes keys to auto-generated unique IDs."""
-
- # key_id method is not in _Trie because it won't work for BytesTrie
- cpdef int key_id(self, bytes key) except -1:
- """Return an ID generated for a given ``key``.
-
- :raises KeyError: if key is not present in this trie.
- """
- cdef int res = self._key_id(key, len(key))
- if res == -1:
- raise KeyError(key)
- return res
-
- cdef int _key_id(self, char* key, int len):
- cdef bint res
- cdef agent.Agent ag
- ag.set_query(key, len)
- res = self._trie.lookup(ag)
- if not res:
- return -1
- return ag.key().id()
-
- cpdef restore_key(self, int index):
- """Return a key corresponding to a given ID."""
- cdef agent.Agent ag
- ag.set_query(index)
- try:
- self._trie.reverse_lookup(ag)
- except KeyError:
- raise KeyError(index)
- return self._get_key(ag)
-
- def __getitem__(self, bytes key):
- return self.key_id(key)
-
- def get(self, bytes key, default=None):
- """
- Return an ID for a given ``key`` or ``default`` if ``key`` is
- not present in this trie.
- """
- cdef int res
-
- res = self._key_id(key, len(key))
- if res == -1:
- return default
- return res
-
- def iter_prefixes(self, bytes key):
- """
- Return an iterator of all prefixes of a given key.
- """
- cdef agent.Agent ag
- ag.set_query(key, len(key))
-
- while self._trie.common_prefix_search(ag):
- yield self._get_key(ag)
-
- def prefixes(self, bytes key):
- """
- Return a list with all prefixes of a given key.
- """
- # this an inlined version of ``list(self.iter_prefixes(key))``
-
- cdef list res = []
- cdef agent.Agent ag
- ag.set_query(key, len(key))
-
- while self._trie.common_prefix_search(ag):
- res.append(self._get_key(ag))
- return res
-
- def items(self, bytes prefix=b""):
- # inlined for speed
- cdef list res = []
- cdef agent.Agent ag
- ag.set_query(prefix, len(prefix))
-
- while self._trie.predictive_search(ag):
- res.append((self._get_key(ag), ag.key().id()))
-
- return res
-
- def iteritems(self, bytes prefix=b""):
- """
- Return an iterator over items that have a prefix ``prefix``.
- """
- cdef agent.Agent ag
- ag.set_query(prefix, len(prefix))
-
- while self._trie.predictive_search(ag):
- yield self._get_key(ag), ag.key().id()
-
-
-cdef class _UnicodeKeyedTrie(_Trie):
- """
- MARISA-trie wrapper for unicode keys.
- """
- cdef bytes _encode_key(self, key):
- return key.encode('utf8')
-
- cdef _get_key(self, agent.Agent& ag):
- return <unicode>_Trie._get_key(self, ag).decode('utf8')
-
-
-cdef class Trie(_UnicodeKeyedTrie):
- """A trie mapping unicode keys to auto-generated unique IDs."""
-
- # key_id method is not in _Trie because it won't work for BytesTrie
- cpdef int key_id(self, unicode key) except -1:
- """Return an ID generated for a given ``key``.
-
- :raises KeyError: if key is not present in this trie.
- """
- cdef bytes _key = <bytes>key.encode('utf8')
- cdef int res = self._key_id(_key)
- if res == -1:
- raise KeyError(key)
- return res
-
- def __getitem__(self, unicode key):
- return self.key_id(key)
-
- def get(self, key, default=None):
- """
- Return an ID for a given ``key`` or ``default`` if ``key`` is
- not present in this trie.
- """
- cdef bytes b_key
- cdef int res
-
- if isinstance(key, unicode):
- b_key = <bytes>(<unicode>key).encode('utf8')
- else:
- b_key = key
-
- res = self._key_id(b_key)
- if res == -1:
- return default
- return res
-
- cpdef restore_key(self, int index):
- """Return a key corresponding to a given ID."""
- cdef agent.Agent ag
- ag.set_query(index)
- try:
- self._trie.reverse_lookup(ag)
- except KeyError:
- raise KeyError(index)
- return self._get_key(ag)
-
- cdef int _key_id(self, char* key):
- cdef bint res
- cdef agent.Agent ag
- ag.set_query(key)
- res = self._trie.lookup(ag)
- if not res:
- return -1
- return ag.key().id()
-
- def iter_prefixes(self, unicode key):
- """
- Return an iterator of all prefixes of a given key.
- """
- cdef bytes b_key = <bytes>key.encode('utf8')
- cdef agent.Agent ag
- ag.set_query(b_key)
-
- while self._trie.common_prefix_search(ag):
- yield self._get_key(ag)
-
- def prefixes(self, unicode key):
- """
- Return a list with all prefixes of a given key.
- """
- # this an inlined version of ``list(self.iter_prefixes(key))``
-
- cdef list res = []
- cdef bytes b_key = <bytes>key.encode('utf8')
- cdef agent.Agent ag
- ag.set_query(b_key)
-
- while self._trie.common_prefix_search(ag):
- res.append(self._get_key(ag))
- return res
-
- def iteritems(self, unicode prefix=""):
- """
- Return an iterator over items that have a prefix ``prefix``.
- """
- cdef bytes b_prefix = <bytes>prefix.encode('utf8')
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- yield self._get_key(ag), ag.key().id()
-
- def items(self, unicode prefix=""):
- # inlined for speed
- cdef list res = []
- cdef bytes b_prefix = <bytes>prefix.encode('utf8')
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- res.append((self._get_key(ag), ag.key().id()))
-
- return res
-
-
-# This symbol is not allowed in utf8 so it is safe to use
-# as a separator between utf8-encoded string and binary payload.
-# XXX: b'\xff' value changes sort order for BytesTrie and RecordTrie.
-# See https://github.com/kmike/DAWG docs for a description of a similar issue.
-cdef bytes _VALUE_SEPARATOR = b'\xff'
-
-
-cdef class BytesTrie(_UnicodeKeyedTrie):
- """A trie mapping unicode keys to lists of bytes objects.
-
- The mapping is implemented by appending binary values to UTF8-encoded
- and storing the result in MARISA-trie.
- """
- cdef bytes _b_value_separator
- cdef unsigned char _c_value_separator
-
- def __init__(self, arg=None, bytes value_separator=_VALUE_SEPARATOR,
- **options):
- """
- ``arg`` must be an iterable of tuples (unicode_key, bytes_payload).
- """
- super(BytesTrie, self).__init__()
-
- self._b_value_separator = value_separator
- self._c_value_separator = <unsigned char>ord(value_separator)
-
- byte_keys = (self._raw_key(d[0], d[1]) for d in (arg or []))
- self._build(byte_keys, **options)
-
- cpdef bytes _raw_key(self, unicode key, bytes payload):
- return key.encode('utf8') + self._b_value_separator + payload
-
- cdef bint _contains(self, bytes key):
- cdef agent.Agent ag
- cdef bytes _key = key + self._b_value_separator
- ag.set_query(_key)
- return self._trie.predictive_search(ag)
-
- cpdef list prefixes(self, unicode key):
- """
- Return a list with all prefixes of a given key.
- """
-
- # XXX: is there a char-walking API in libmarisa?
- # This implementation is suboptimal.
-
- cdef agent.Agent ag
- cdef list res = []
- cdef int key_len = len(key)
- cdef unicode prefix
- cdef bytes b_prefix
- cdef int ind = 1
-
- while ind <= key_len:
- prefix = key[:ind]
- b_prefix = <bytes>(prefix.encode('utf8') + self._b_value_separator)
- ag.set_query(b_prefix)
- if self._trie.predictive_search(ag):
- res.append(prefix)
-
- ind += 1
-
- return res
-
- def __getitem__(self, key):
- cdef list res = self.get(key)
- if res is None:
- raise KeyError(key)
- return res
-
- cpdef get(self, key, default=None):
- """
- Return a list of payloads (as byte objects) for a given key
- or ``default`` if the key is not found.
- """
- cdef list res
-
- if isinstance(key, unicode):
- res = self.get_value(<unicode>key)
- else:
- res = self.b_get_value(key)
-
- if not res:
- return default
- return res
-
- cpdef list get_value(self, unicode key):
- """
- Return a list of payloads (as byte objects) for a given unicode key.
- """
- cdef bytes b_key = <bytes>key.encode('utf8')
- return self.b_get_value(b_key)
-
- cpdef list b_get_value(self, bytes key):
- """
- Return a list of payloads (as byte objects) for a given utf8-encoded key.
- """
- cdef list res = []
- cdef bytes value
- cdef bytes b_prefix = key + self._b_value_separator
- cdef int prefix_len = len(b_prefix)
-
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- value = ag.key().ptr()[prefix_len:ag.key().length()]
- res.append(value)
-
- return res
-
- cpdef list items(self, unicode prefix=""):
- # copied from iteritems for speed
- cdef bytes b_prefix = <bytes>prefix.encode('utf8')
- cdef bytes value
- cdef unicode key
- cdef unsigned char* raw_key
- cdef list res = []
- cdef int i, value_len
-
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- raw_key = <unsigned char*>ag.key().ptr()
-
- for i in range(0, ag.key().length()):
- if raw_key[i] == self._c_value_separator:
- break
-
- key = raw_key[:i].decode('utf8')
- value = raw_key[i+1:ag.key().length()]
-
- res.append(
- (key, value)
- )
- return res
-
- def iteritems(self, unicode prefix=""):
- cdef bytes b_prefix = <bytes>prefix.encode('utf8')
- cdef bytes value
- cdef unicode key
- cdef unsigned char* raw_key
- cdef int i, value_len
-
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- raw_key = <unsigned char*>ag.key().ptr()
-
- for i in range(0, ag.key().length()):
- if raw_key[i] == self._c_value_separator:
- break
-
- key = raw_key[:i].decode('utf8')
- value = raw_key[i+1:ag.key().length()]
-
- yield key, value
-
- cpdef list keys(self, prefix=""):
- # copied from iterkeys for speed
- cdef bytes b_prefix = <bytes>prefix.encode('utf8')
- cdef unicode key
- cdef unsigned char* raw_key
- cdef list res = []
- cdef int i
-
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- raw_key = <unsigned char*>ag.key().ptr()
-
- for i in range(0, ag.key().length()):
- if raw_key[i] == self._c_value_separator:
- key = raw_key[:i].decode('utf8')
- res.append(key)
- break
- return res
-
- def iterkeys(self, unicode prefix=""):
- cdef bytes b_prefix = <bytes>prefix.encode('utf8')
- cdef unicode key
- cdef unsigned char* raw_key
- cdef int i
-
- cdef agent.Agent ag
- ag.set_query(b_prefix)
-
- while self._trie.predictive_search(ag):
- raw_key = <unsigned char*>ag.key().ptr()
-
- for i in range(0, ag.key().length()):
- if raw_key[i] == self._c_value_separator:
- yield raw_key[:i].decode('utf8')
- break
-
-
-cdef class _UnpackTrie(BytesTrie):
-
- def __init__(self, arg=None, **options):
- keys = ((d[0], self._pack(d[1])) for d in (arg or []))
- super(_UnpackTrie, self).__init__(keys, **options)
-
- cdef _unpack(self, bytes value):
- return value
-
- cdef bytes _pack(self, value):
- return value
-
- cpdef list b_get_value(self, bytes key):
- cdef list values = BytesTrie.b_get_value(self, key)
- return [self._unpack(val) for val in values]
-
- cpdef list items(self, unicode prefix=""):
- cdef list items = BytesTrie.items(self, prefix)
- return [(key, self._unpack(val)) for (key, val) in items]
-
- def iteritems(self, unicode prefix=""):
- return ((key, self._unpack(val)) for key, val in BytesTrie.iteritems(self, prefix))
-
-
-cdef class RecordTrie(_UnpackTrie):
- """A trie mapping unicode keys to lists of data tuples.
-
- The data is packed using :mod:`struct` module, therefore all
- tuples must be of the same format. See :mod:`struct` documentation
- for available format strings.
-
- The mapping is implemented by appending binary values to UTF8-encoded
- and storing the result in MARISA-trie.
- """
- cdef _struct
- cdef _fmt
-
- def __init__(self, fmt, arg=None, **options):
- """
- ``arg`` must be an iterable of tuples (unicode_key, data_tuple).
- Data tuples will be converted to bytes with
- ``struct.pack(fmt, *data_tuple)``.
- """
- self._fmt = fmt
- self._struct = struct.Struct(str(fmt))
- super(RecordTrie, self).__init__(arg, **options)
-
- cdef _unpack(self, bytes value):
- return self._struct.unpack(value)
-
- cdef bytes _pack(self, value):
- return self._struct.pack(*value)
-
- def __reduce__(self):
- return self.__class__, (self._fmt, ), self.tobytes()
diff --git a/contrib/python/marisa-trie/query.pxd b/contrib/python/marisa-trie/query.pxd
deleted file mode 100644
index a650bb8965..0000000000
--- a/contrib/python/marisa-trie/query.pxd
+++ /dev/null
@@ -1,20 +0,0 @@
-cdef extern from "<marisa/query.h>" namespace "marisa" nogil:
-
- cdef cppclass Query:
- Query()
- Query(Query &query)
-
- #Query &operator=(Query &query)
-
- char operator[](int i)
-
- void set_str(char *str)
- void set_str(char *ptr, int length)
- void set_id(int id)
-
- char *ptr()
- int length()
- int id()
-
- void clear()
- void swap(Query &rhs)
diff --git a/contrib/python/marisa-trie/std_iostream.pxd b/contrib/python/marisa-trie/std_iostream.pxd
deleted file mode 100644
index bf7d0e89aa..0000000000
--- a/contrib/python/marisa-trie/std_iostream.pxd
+++ /dev/null
@@ -1,18 +0,0 @@
-from libcpp.string cimport string
-
-cdef extern from "<istream>" namespace "std" nogil:
- cdef cppclass istream:
- istream() except +
- istream& read (char* s, int n) except +
-
- cdef cppclass ostream:
- ostream() except +
- ostream& write (char* s, int n) except +
-
-cdef extern from "<sstream>" namespace "std" nogil:
-
- cdef cppclass stringstream:
- stringstream()
- stringstream(string s)
- string str ()
-
diff --git a/contrib/python/marisa-trie/trie.pxd b/contrib/python/marisa-trie/trie.pxd
deleted file mode 100644
index f525caf8ad..0000000000
--- a/contrib/python/marisa-trie/trie.pxd
+++ /dev/null
@@ -1,41 +0,0 @@
-cimport agent
-cimport base
-cimport keyset
-
-
-cdef extern from "<marisa/trie.h>" namespace "marisa" nogil:
-
- cdef cppclass Trie:
- Trie()
-
- void build(keyset.Keyset &keyset, int config_flags) except +
- void build(keyset.Keyset &keyset) except +
-
- void mmap(char *filename) except +
- void map(void *ptr, int size) except +
-
- void load(char *filename) except +
- void read(int fd) except +
-
- void save(char *filename) except +
- void write(int fd) except +
-
- bint lookup(agent.Agent &agent) except +
- void reverse_lookup(agent.Agent &agent) except +KeyError
- bint common_prefix_search(agent.Agent &agent) except +
- bint predictive_search(agent.Agent &agent) except +
-
- int num_tries() except +
- int num_keys() except +
- int num_nodes() except +
-
- base.TailMode tail_mode()
- base.NodeOrder node_order()
-
- bint empty() except +
- int size() except +
- int total_size() except +
- int io_size() except +
-
- void clear() except +
- void swap(Trie &rhs) except +
diff --git a/contrib/python/marisa-trie/ya.make b/contrib/python/marisa-trie/ya.make
deleted file mode 100644
index 490eef9afa..0000000000
--- a/contrib/python/marisa-trie/ya.make
+++ /dev/null
@@ -1,33 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(MIT)
-
-VERSION(0.7.5)
-
-NO_COMPILER_WARNINGS()
-
-ADDINCL(
- contrib/python/marisa-trie
-)
-
-SRCS(
- marisa/agent.cc
- marisa/keyset.cc
- marisa/trie.cc
-
- marisa/grimoire/io/mapper.cc
- marisa/grimoire/io/reader.cc
- marisa/grimoire/io/writer.cc
- marisa/grimoire/trie/louds-trie.cc
- marisa/grimoire/trie/tail.cc
- marisa/grimoire/vector/bit-vector.cc
-)
-
-PY_SRCS(
- TOP_LEVEL
- marisa_trie.pyx
-)
-
-NO_LINT()
-
-END()
diff --git a/contrib/python/path.py/py2/LICENSE b/contrib/python/path.py/py2/LICENSE
deleted file mode 100644
index 5e795a61f3..0000000000
--- a/contrib/python/path.py/py2/LICENSE
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright Jason R. Coombs
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/path.py/py2/README.rst b/contrib/python/path.py/py2/README.rst
deleted file mode 100644
index 424d3e77c8..0000000000
--- a/contrib/python/path.py/py2/README.rst
+++ /dev/null
@@ -1,134 +0,0 @@
-.. image:: https://img.shields.io/pypi/v/path.py.svg
- :target: https://pypi.org/project/path.py
-
-.. image:: https://img.shields.io/pypi/pyversions/path.py.svg
-
-.. image:: https://img.shields.io/travis/jaraco/path.py/master.svg
- :target: https://travis-ci.org/jaraco/path.py
-
-.. image:: https://img.shields.io/appveyor/ci/jaraco/path-py/master.svg
- :target: https://ci.appveyor.com/project/jaraco/path-py/branch/master
-
-.. image:: https://readthedocs.org/projects/pathpy/badge/?version=latest
- :target: https://pathpy.readthedocs.io/en/latest/?badge=latest
-
-``path.py`` implements path objects as first-class entities, allowing
-common operations on files to be invoked on those path objects directly. For
-example:
-
-.. code-block:: python
-
- from path import Path
- d = Path('/home/guido/bin')
- for f in d.files('*.py'):
- f.chmod(0o755)
-
- # Globbing
- for f in d.files('*.py'):
- f.chmod(0o755)
-
- # Changing the working directory:
- with Path("somewhere"):
- # cwd in now `somewhere`
- ...
-
- # Concatenate paths with /
- foo_txt = Path("bar") / "foo.txt"
-
-``path.py`` is `hosted at Github <https://github.com/jaraco/path.py>`_.
-
-Find `the documentation here <https://pathpy.readthedocs.io>`_.
-
-Guides and Testimonials
-=======================
-
-Yasoob wrote the Python 101 `Writing a Cleanup Script
-<http://freepythontips.wordpress.com/2014/01/23/python-101-writing-a-cleanup-script/>`_
-based on ``path.py``.
-
-Installing
-==========
-
-Path.py may be installed using ``setuptools``, ``distribute``, or ``pip``::
-
- pip install path.py
-
-The latest release is always updated to the `Python Package Index
-<http://pypi.python.org/pypi/path.py>`_.
-
-You may also always download the source distribution (zip/tarball), extract
-it, and run ``python setup.py`` to install it.
-
-Advantages
-==========
-
-Python 3.4 introduced
-`pathlib <https://docs.python.org/3/library/pathlib.html>`_,
-which shares many characteristics with ``path.py``. In particular,
-it provides an object encapsulation for representing filesystem paths.
-One may have imagined ``pathlib`` would supersede ``path.py``.
-
-But the implementation and the usage quickly diverge, and ``path.py``
-has several advantages over ``pathlib``:
-
-- ``path.py`` implements ``Path`` objects as a subclass of
- ``str`` (unicode on Python 2), and as a result these ``Path``
- objects may be passed directly to other APIs that expect simple
- text representations of paths, whereas with ``pathlib``, one
- must first cast values to strings before passing them to
- APIs unaware of ``pathlib``. This shortcoming was `addressed
- by PEP 519 <https://www.python.org/dev/peps/pep-0519/>`_,
- in Python 3.6.
-- ``path.py`` goes beyond exposing basic functionality of a path
- and exposes commonly-used behaviors on a path, providing
- methods like ``rmtree`` (from shlib) and ``remove_p`` (remove
- a file if it exists).
-- As a PyPI-hosted package, ``path.py`` is free to iterate
- faster than a stdlib package. Contributions are welcome
- and encouraged.
-- ``path.py`` provides a uniform abstraction over its Path object,
- freeing the implementer to subclass it readily. One cannot
- subclass a ``pathlib.Path`` to add functionality, but must
- subclass ``Path``, ``PosixPath``, and ``WindowsPath``, even
- if one only wishes to add a ``__dict__`` to the subclass
- instances. ``path.py`` instead allows the ``Path.module``
- object to be overridden by subclasses, defaulting to the
- ``os.path``. Even advanced uses of ``path.Path`` that
- subclass the model do not need to be concerned with
- OS-specific nuances.
-
-Alternatives
-============
-
-In addition to
-`pathlib <https://docs.python.org/3/library/pathlib.html>`_, the
-`pylib project <https://pypi.org/project/py/>`_ implements a
-`LocalPath <https://github.com/pytest-dev/py/blob/72601dc8bbb5e11298bf9775bb23b0a395deb09b/py/_path/local.py#L106>`_
-class, which shares some behaviors and interfaces with ``path.py``.
-
-Development
-===========
-
-To install a development version, use the Github links to clone or
-download a snapshot of the latest code. Alternatively, if you have git
-installed, you may be able to use ``pip`` to install directly from
-the repository::
-
- pip install git+https://github.com/jaraco/path.py.git
-
-Testing
-=======
-
-Tests are continuously run by Travis-CI: |BuildStatus|_
-
-.. |BuildStatus| image:: https://secure.travis-ci.org/jaraco/path.py.png
-.. _BuildStatus: http://travis-ci.org/jaraco/path.py
-
-To run the tests, refer to the ``.travis.yml`` file for the steps run on the
-Travis-CI hosts.
-
-Releasing
-=========
-
-Tagged releases are automatically published to PyPI by Travis-CI, assuming
-the tests pass.
diff --git a/contrib/python/path.py/py3/.dist-info/METADATA b/contrib/python/path.py/py3/.dist-info/METADATA
deleted file mode 100644
index 68873c1391..0000000000
--- a/contrib/python/path.py/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,36 +0,0 @@
-Metadata-Version: 2.1
-Name: path.py
-Version: 12.5.0
-Summary: A module wrapper for os.path
-Home-page: https://github.com/jaraco/path
-Author: Jason Orendorff
-Author-email: jason.orendorff@gmail.com
-Maintainer: Jason R. Coombs
-Maintainer-email: jaraco@jaraco.com
-License: UNKNOWN
-Platform: UNKNOWN
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python :: 3
-Classifier: Operating System :: OS Independent
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Requires-Python: >=3.5
-Requires-Dist: path
-Provides-Extra: docs
-Requires-Dist: sphinx ; extra == 'docs'
-Requires-Dist: jaraco.packaging (>=3.2) ; extra == 'docs'
-Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
-Provides-Extra: testing
-Requires-Dist: pytest (!=3.7.3,>=3.5) ; extra == 'testing'
-Requires-Dist: pytest-checkdocs (>=1.2.3) ; extra == 'testing'
-Requires-Dist: pytest-flake8 ; extra == 'testing'
-Requires-Dist: pytest-black-multipy ; extra == 'testing'
-Requires-Dist: pytest-cov ; extra == 'testing'
-Requires-Dist: appdirs ; extra == 'testing'
-Requires-Dist: packaging ; extra == 'testing'
-Requires-Dist: pygments ; extra == 'testing'
-
-``path.py`` has been renamed to `path <https://pypi.org/project/path>`_.
-
-
diff --git a/contrib/python/path.py/py3/.dist-info/top_level.txt b/contrib/python/path.py/py3/.dist-info/top_level.txt
deleted file mode 100644
index e7a8fd4d0a..0000000000
--- a/contrib/python/path.py/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-path
diff --git a/contrib/python/path.py/py3/LICENSE b/contrib/python/path.py/py3/LICENSE
deleted file mode 100644
index 5e795a61f3..0000000000
--- a/contrib/python/path.py/py3/LICENSE
+++ /dev/null
@@ -1,7 +0,0 @@
-Copyright Jason R. Coombs
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
diff --git a/contrib/python/path.py/py3/README.rst b/contrib/python/path.py/py3/README.rst
deleted file mode 100644
index 4bc121481c..0000000000
--- a/contrib/python/path.py/py3/README.rst
+++ /dev/null
@@ -1 +0,0 @@
-``path.py`` has been renamed to `path <https://pypi.org/project/path>`_.
diff --git a/contrib/python/path.py/py3/ya.make b/contrib/python/path.py/py3/ya.make
deleted file mode 100644
index 241d32971e..0000000000
--- a/contrib/python/path.py/py3/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(12.5.0)
-
-LICENSE(MIT)
-
-PEERDIR(
- contrib/python/path
-)
-
-NO_LINT()
-
-RESOURCE_FILES(
- PREFIX contrib/python/path.py/py3/
- .dist-info/METADATA
- .dist-info/top_level.txt
-)
-
-END()
diff --git a/contrib/python/path.py/ya.make b/contrib/python/path.py/ya.make
deleted file mode 100644
index d0c1aca518..0000000000
--- a/contrib/python/path.py/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/path.py/py2)
-ELSE()
- PEERDIR(contrib/python/path.py/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- py2
- py3
-)
diff --git a/contrib/python/path/.dist-info/METADATA b/contrib/python/path/.dist-info/METADATA
deleted file mode 100644
index 7cddb38723..0000000000
--- a/contrib/python/path/.dist-info/METADATA
+++ /dev/null
@@ -1,201 +0,0 @@
-Metadata-Version: 2.1
-Name: path
-Version: 16.7.1
-Summary: A module wrapper for os.path
-Home-page: https://github.com/jaraco/path
-Author: Jason Orendorff
-Author-email: jason.orendorff@gmail.com
-Maintainer: Jason R. Coombs
-Maintainer-email: jaraco@jaraco.com
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3 :: Only
-Classifier: Operating System :: OS Independent
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Requires-Python: >=3.8
-License-File: LICENSE
-Provides-Extra: docs
-Requires-Dist: sphinx (>=3.5) ; extra == 'docs'
-Requires-Dist: jaraco.packaging (>=9.3) ; extra == 'docs'
-Requires-Dist: rst.linker (>=1.9) ; extra == 'docs'
-Requires-Dist: furo ; extra == 'docs'
-Requires-Dist: sphinx-lint ; extra == 'docs'
-Requires-Dist: jaraco.tidelift (>=1.4) ; extra == 'docs'
-Provides-Extra: testing
-Requires-Dist: pytest (>=6) ; extra == 'testing'
-Requires-Dist: pytest-checkdocs (>=2.4) ; extra == 'testing'
-Requires-Dist: pytest-cov ; extra == 'testing'
-Requires-Dist: pytest-enabler (>=2.2) ; extra == 'testing'
-Requires-Dist: pytest-ruff ; extra == 'testing'
-Requires-Dist: appdirs ; extra == 'testing'
-Requires-Dist: packaging ; extra == 'testing'
-Requires-Dist: pygments ; extra == 'testing'
-Requires-Dist: pytest-black (>=0.3.7) ; (platform_python_implementation != "PyPy") and extra == 'testing'
-Requires-Dist: pytest-mypy (>=0.9.1) ; (platform_python_implementation != "PyPy") and extra == 'testing'
-Requires-Dist: pywin32 ; (platform_system == "Windows" and python_version < "3.12") and extra == 'testing'
-
-.. image:: https://img.shields.io/pypi/v/path.svg
- :target: https://pypi.org/project/path
-
-.. image:: https://img.shields.io/pypi/pyversions/path.svg
-
-.. image:: https://github.com/jaraco/path/workflows/tests/badge.svg
- :target: https://github.com/jaraco/path/actions?query=workflow%3A%22tests%22
- :alt: tests
-
-.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
- :target: https://github.com/astral-sh/ruff
- :alt: Ruff
-
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
-.. image:: https://readthedocs.org/projects/path/badge/?version=latest
- :target: https://path.readthedocs.io/en/latest/?badge=latest
-
-.. image:: https://img.shields.io/badge/skeleton-2023-informational
- :target: https://blog.jaraco.com/skeleton
-
-.. image:: https://tidelift.com/badges/package/pypi/path
- :target: https://tidelift.com/subscription/pkg/pypi-path?utm_source=pypi-path&utm_medium=readme
-
-
-``path`` (aka path pie, formerly ``path.py``) implements path
-objects as first-class entities, allowing common operations on
-files to be invoked on those path objects directly. For example:
-
-.. code-block:: python
-
- from path import Path
-
- d = Path("/home/guido/bin")
- for f in d.files("*.py"):
- f.chmod(0o755)
-
- # Globbing
- for f in d.files("*.py"):
- f.chmod("u+rwx")
-
- # Changing the working directory:
- with Path("somewhere"):
- # cwd in now `somewhere`
- ...
-
- # Concatenate paths with /
- foo_txt = Path("bar") / "foo.txt"
-
-Path pie is `hosted at Github <https://github.com/jaraco/path>`_.
-
-Find `the documentation here <https://path.readthedocs.io>`_.
-
-Guides and Testimonials
-=======================
-
-Yasoob wrote the Python 101 `Writing a Cleanup Script
-<http://freepythontips.wordpress.com/2014/01/23/python-101-writing-a-cleanup-script/>`_
-based on ``path``.
-
-Advantages
-==========
-
-Python 3.4 introduced
-`pathlib <https://docs.python.org/3/library/pathlib.html>`_,
-which shares many characteristics with ``path``. In particular,
-it provides an object encapsulation for representing filesystem paths.
-One may have imagined ``pathlib`` would supersede ``path``.
-
-But the implementation and the usage quickly diverge, and ``path``
-has several advantages over ``pathlib``:
-
-- ``path`` implements ``Path`` objects as a subclass of
- ``str``, and as a result these ``Path``
- objects may be passed directly to other APIs that expect simple
- text representations of paths, whereas with ``pathlib``, one
- must first cast values to strings before passing them to
- APIs unaware of ``pathlib``. This shortcoming was `addressed
- by PEP 519 <https://www.python.org/dev/peps/pep-0519/>`_,
- in Python 3.6.
-- ``path`` goes beyond exposing basic functionality of a path
- and exposes commonly-used behaviors on a path, providing
- methods like ``rmtree`` (from shlib) and ``remove_p`` (remove
- a file if it exists).
-- As a PyPI-hosted package, ``path`` is free to iterate
- faster than a stdlib package. Contributions are welcome
- and encouraged.
-- ``path`` provides a uniform abstraction over its Path object,
- freeing the implementer to subclass it readily. One cannot
- subclass a ``pathlib.Path`` to add functionality, but must
- subclass ``Path``, ``PosixPath``, and ``WindowsPath``, even
- if one only wishes to add a ``__dict__`` to the subclass
- instances. ``path`` instead allows the ``Path.module``
- object to be overridden by subclasses, defaulting to the
- ``os.path``. Even advanced uses of ``path.Path`` that
- subclass the model do not need to be concerned with
- OS-specific nuances.
-
-This path project has the explicit aim to provide compatibility
-with ``pathlib`` objects where possible, such that a ``path.Path``
-object is a drop-in replacement for ``pathlib.Path*`` objects.
-This project welcomes contributions to improve that compatibility
-where it's lacking.
-
-Alternatives
-============
-
-In addition to
-`pathlib <https://docs.python.org/3/library/pathlib.html>`_, the
-`pylib project <https://pypi.org/project/py/>`_ implements a
-`LocalPath <https://github.com/pytest-dev/py/blob/72601dc8bbb5e11298bf9775bb23b0a395deb09b/py/_path/local.py#L106>`_
-class, which shares some behaviors and interfaces with ``path``.
-
-Development
-===========
-
-To install a development version, use the Github links to clone or
-download a snapshot of the latest code. Alternatively, if you have git
-installed, you may be able to use ``pip`` to install directly from
-the repository::
-
- pip install git+https://github.com/jaraco/path.git
-
-Testing
-=======
-
-Tests are invoked with `tox <https://pypi.org/project/tox>`_. After
-having installed tox, simply invoke ``tox`` in a checkout of the repo
-to invoke the tests.
-
-Tests are also run in continuous integration. See the badges above
-for links to the CI runs.
-
-Releasing
-=========
-
-Tagged releases are automatically published to PyPI by Azure
-Pipelines, assuming the tests pass.
-
-Origins
-=======
-
-The ``path.py`` project was initially released in 2003 by Jason Orendorff
-and has been continuously developed and supported by several maintainers
-over the years.
-
-For Enterprise
-==============
-
-Available as part of the Tidelift Subscription.
-
-This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
-
-`Learn more <https://tidelift.com/subscription/pkg/pypi-path?utm_source=pypi-path&utm_medium=referral&utm_campaign=github>`_.
-
-Security Contact
-================
-
-To report a security vulnerability, please use the
-`Tidelift security contact <https://tidelift.com/security>`_.
-Tidelift will coordinate the fix and disclosure.
diff --git a/contrib/python/path/.dist-info/top_level.txt b/contrib/python/path/.dist-info/top_level.txt
deleted file mode 100644
index e7a8fd4d0a..0000000000
--- a/contrib/python/path/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-path
diff --git a/contrib/python/path/LICENSE b/contrib/python/path/LICENSE
deleted file mode 100644
index 1bb5a44356..0000000000
--- a/contrib/python/path/LICENSE
+++ /dev/null
@@ -1,17 +0,0 @@
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to
-deal in the Software without restriction, including without limitation the
-rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-sell copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in
-all copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-IN THE SOFTWARE.
diff --git a/contrib/python/path/README.rst b/contrib/python/path/README.rst
deleted file mode 100644
index 69aa8737d6..0000000000
--- a/contrib/python/path/README.rst
+++ /dev/null
@@ -1,163 +0,0 @@
-.. image:: https://img.shields.io/pypi/v/path.svg
- :target: https://pypi.org/project/path
-
-.. image:: https://img.shields.io/pypi/pyversions/path.svg
-
-.. image:: https://github.com/jaraco/path/workflows/tests/badge.svg
- :target: https://github.com/jaraco/path/actions?query=workflow%3A%22tests%22
- :alt: tests
-
-.. image:: https://img.shields.io/endpoint?url=https://raw.githubusercontent.com/charliermarsh/ruff/main/assets/badge/v2.json
- :target: https://github.com/astral-sh/ruff
- :alt: Ruff
-
-.. image:: https://img.shields.io/badge/code%20style-black-000000.svg
- :target: https://github.com/psf/black
- :alt: Code style: Black
-
-.. image:: https://readthedocs.org/projects/path/badge/?version=latest
- :target: https://path.readthedocs.io/en/latest/?badge=latest
-
-.. image:: https://img.shields.io/badge/skeleton-2023-informational
- :target: https://blog.jaraco.com/skeleton
-
-.. image:: https://tidelift.com/badges/package/pypi/path
- :target: https://tidelift.com/subscription/pkg/pypi-path?utm_source=pypi-path&utm_medium=readme
-
-
-``path`` (aka path pie, formerly ``path.py``) implements path
-objects as first-class entities, allowing common operations on
-files to be invoked on those path objects directly. For example:
-
-.. code-block:: python
-
- from path import Path
-
- d = Path("/home/guido/bin")
- for f in d.files("*.py"):
- f.chmod(0o755)
-
- # Globbing
- for f in d.files("*.py"):
- f.chmod("u+rwx")
-
- # Changing the working directory:
- with Path("somewhere"):
- # cwd in now `somewhere`
- ...
-
- # Concatenate paths with /
- foo_txt = Path("bar") / "foo.txt"
-
-Path pie is `hosted at Github <https://github.com/jaraco/path>`_.
-
-Find `the documentation here <https://path.readthedocs.io>`_.
-
-Guides and Testimonials
-=======================
-
-Yasoob wrote the Python 101 `Writing a Cleanup Script
-<http://freepythontips.wordpress.com/2014/01/23/python-101-writing-a-cleanup-script/>`_
-based on ``path``.
-
-Advantages
-==========
-
-Python 3.4 introduced
-`pathlib <https://docs.python.org/3/library/pathlib.html>`_,
-which shares many characteristics with ``path``. In particular,
-it provides an object encapsulation for representing filesystem paths.
-One may have imagined ``pathlib`` would supersede ``path``.
-
-But the implementation and the usage quickly diverge, and ``path``
-has several advantages over ``pathlib``:
-
-- ``path`` implements ``Path`` objects as a subclass of
- ``str``, and as a result these ``Path``
- objects may be passed directly to other APIs that expect simple
- text representations of paths, whereas with ``pathlib``, one
- must first cast values to strings before passing them to
- APIs unaware of ``pathlib``. This shortcoming was `addressed
- by PEP 519 <https://www.python.org/dev/peps/pep-0519/>`_,
- in Python 3.6.
-- ``path`` goes beyond exposing basic functionality of a path
- and exposes commonly-used behaviors on a path, providing
- methods like ``rmtree`` (from shlib) and ``remove_p`` (remove
- a file if it exists).
-- As a PyPI-hosted package, ``path`` is free to iterate
- faster than a stdlib package. Contributions are welcome
- and encouraged.
-- ``path`` provides a uniform abstraction over its Path object,
- freeing the implementer to subclass it readily. One cannot
- subclass a ``pathlib.Path`` to add functionality, but must
- subclass ``Path``, ``PosixPath``, and ``WindowsPath``, even
- if one only wishes to add a ``__dict__`` to the subclass
- instances. ``path`` instead allows the ``Path.module``
- object to be overridden by subclasses, defaulting to the
- ``os.path``. Even advanced uses of ``path.Path`` that
- subclass the model do not need to be concerned with
- OS-specific nuances.
-
-This path project has the explicit aim to provide compatibility
-with ``pathlib`` objects where possible, such that a ``path.Path``
-object is a drop-in replacement for ``pathlib.Path*`` objects.
-This project welcomes contributions to improve that compatibility
-where it's lacking.
-
-Alternatives
-============
-
-In addition to
-`pathlib <https://docs.python.org/3/library/pathlib.html>`_, the
-`pylib project <https://pypi.org/project/py/>`_ implements a
-`LocalPath <https://github.com/pytest-dev/py/blob/72601dc8bbb5e11298bf9775bb23b0a395deb09b/py/_path/local.py#L106>`_
-class, which shares some behaviors and interfaces with ``path``.
-
-Development
-===========
-
-To install a development version, use the Github links to clone or
-download a snapshot of the latest code. Alternatively, if you have git
-installed, you may be able to use ``pip`` to install directly from
-the repository::
-
- pip install git+https://github.com/jaraco/path.git
-
-Testing
-=======
-
-Tests are invoked with `tox <https://pypi.org/project/tox>`_. After
-having installed tox, simply invoke ``tox`` in a checkout of the repo
-to invoke the tests.
-
-Tests are also run in continuous integration. See the badges above
-for links to the CI runs.
-
-Releasing
-=========
-
-Tagged releases are automatically published to PyPI by Azure
-Pipelines, assuming the tests pass.
-
-Origins
-=======
-
-The ``path.py`` project was initially released in 2003 by Jason Orendorff
-and has been continuously developed and supported by several maintainers
-over the years.
-
-For Enterprise
-==============
-
-Available as part of the Tidelift Subscription.
-
-This project and the maintainers of thousands of other packages are working with Tidelift to deliver one enterprise subscription that covers all of the open source you use.
-
-`Learn more <https://tidelift.com/subscription/pkg/pypi-path?utm_source=pypi-path&utm_medium=referral&utm_campaign=github>`_.
-
-Security Contact
-================
-
-To report a security vulnerability, please use the
-`Tidelift security contact <https://tidelift.com/security>`_.
-Tidelift will coordinate the fix and disclosure.
diff --git a/contrib/python/path/path/__init__.py b/contrib/python/path/path/__init__.py
deleted file mode 100644
index eebdc3a0b8..0000000000
--- a/contrib/python/path/path/__init__.py
+++ /dev/null
@@ -1,1665 +0,0 @@
-"""
-Path Pie
-
-Implements ``path.Path`` - An object representing a
-path to a file or directory.
-
-Example::
-
- from path import Path
- d = Path('/home/guido/bin')
-
- # Globbing
- for f in d.files('*.py'):
- f.chmod(0o755)
-
- # Changing the working directory:
- with Path("somewhere"):
- # cwd in now `somewhere`
- ...
-
- # Concatenate paths with /
- foo_txt = Path("bar") / "foo.txt"
-"""
-
-import sys
-import warnings
-import os
-import fnmatch
-import glob
-import shutil
-import hashlib
-import errno
-import tempfile
-import functools
-import re
-import contextlib
-import importlib
-import itertools
-import datetime
-from numbers import Number
-from typing import Union
-
-with contextlib.suppress(ImportError):
- import win32security
-
-with contextlib.suppress(ImportError):
- import pwd
-
-with contextlib.suppress(ImportError):
- import grp
-
-from . import matchers
-from . import masks
-from . import classes
-
-
-__all__ = ['Path', 'TempDir']
-
-
-LINESEPS = ['\r\n', '\r', '\n']
-U_LINESEPS = LINESEPS + ['\u0085', '\u2028', '\u2029']
-B_NEWLINE = re.compile('|'.join(LINESEPS).encode())
-U_NEWLINE = re.compile('|'.join(U_LINESEPS))
-B_NL_END = re.compile(B_NEWLINE.pattern + b'$')
-U_NL_END = re.compile(U_NEWLINE.pattern + '$')
-
-_default_linesep = object()
-
-
-def _make_timestamp_ns(value: Union[Number, datetime.datetime]) -> Number:
- timestamp_s = value if isinstance(value, Number) else value.timestamp()
- return int(timestamp_s * 10**9)
-
-
-class TreeWalkWarning(Warning):
- pass
-
-
-class Traversal:
- """
- Wrap a walk result to customize the traversal.
-
- `follow` is a function that takes an item and returns
- True if that item should be followed and False otherwise.
-
- For example, to avoid traversing into directories that
- begin with `.`:
-
- >>> traverse = Traversal(lambda dir: not dir.startswith('.'))
- >>> items = list(traverse(Path('.').walk()))
-
- Directories beginning with `.` will appear in the results, but
- their children will not.
-
- >>> dot_dir = next(item for item in items if item.isdir() and item.startswith('.'))
- >>> any(item.parent == dot_dir for item in items)
- False
- """
-
- def __init__(self, follow):
- self.follow = follow
-
- def __call__(self, walker):
- traverse = None
- while True:
- try:
- item = walker.send(traverse)
- except StopIteration:
- return
- yield item
-
- traverse = functools.partial(self.follow, item)
-
-
-def _strip_newlines(lines):
- r"""
- >>> list(_strip_newlines(['Hello World\r\n', 'foo']))
- ['Hello World', 'foo']
- """
- return (U_NL_END.sub('', line) for line in lines)
-
-
-class Path(str):
- """
- Represents a filesystem path.
-
- For documentation on individual methods, consult their
- counterparts in :mod:`os.path`.
-
- Some methods are additionally included from :mod:`shutil`.
- The functions are linked directly into the class namespace
- such that they will be bound to the Path instance. For example,
- ``Path(src).copy(target)`` is equivalent to
- ``shutil.copy(src, target)``. Therefore, when referencing
- the docs for these methods, assume `src` references `self`,
- the Path instance.
- """
-
- module = os.path
- """ The path module to use for path operations.
-
- .. seealso:: :mod:`os.path`
- """
-
- def __init__(self, other=''):
- if other is None:
- raise TypeError("Invalid initial value for path: None")
- with contextlib.suppress(AttributeError):
- self._validate()
-
- @classmethod
- @functools.lru_cache
- def using_module(cls, module):
- subclass_name = cls.__name__ + '_' + module.__name__
- bases = (cls,)
- ns = {'module': module}
- return type(subclass_name, bases, ns)
-
- @classes.ClassProperty
- @classmethod
- def _next_class(cls):
- """
- What class should be used to construct new instances from this class
- """
- return cls
-
- # --- Special Python methods.
-
- def __repr__(self):
- return '{}({})'.format(type(self).__name__, super().__repr__())
-
- # Adding a Path and a string yields a Path.
- def __add__(self, more):
- return self._next_class(super().__add__(more))
-
- def __radd__(self, other):
- return self._next_class(other.__add__(self))
-
- # The / operator joins Paths.
- def __div__(self, rel):
- """fp.__div__(rel) == fp / rel == fp.joinpath(rel)
-
- Join two path components, adding a separator character if
- needed.
-
- .. seealso:: :func:`os.path.join`
- """
- return self._next_class(self.module.join(self, rel))
-
- # Make the / operator work even when true division is enabled.
- __truediv__ = __div__
-
- # The / operator joins Paths the other way around
- def __rdiv__(self, rel):
- """fp.__rdiv__(rel) == rel / fp
-
- Join two path components, adding a separator character if
- needed.
-
- .. seealso:: :func:`os.path.join`
- """
- return self._next_class(self.module.join(rel, self))
-
- # Make the / operator work even when true division is enabled.
- __rtruediv__ = __rdiv__
-
- def __enter__(self):
- self._old_dir = self.getcwd()
- os.chdir(self)
- return self
-
- def __exit__(self, *_):
- os.chdir(self._old_dir)
-
- @classmethod
- def getcwd(cls):
- """Return the current working directory as a path object.
-
- .. seealso:: :func:`os.getcwd`
- """
- return cls(os.getcwd())
-
- #
- # --- Operations on Path strings.
-
- def abspath(self):
- """.. seealso:: :func:`os.path.abspath`"""
- return self._next_class(self.module.abspath(self))
-
- def normcase(self):
- """.. seealso:: :func:`os.path.normcase`"""
- return self._next_class(self.module.normcase(self))
-
- def normpath(self):
- """.. seealso:: :func:`os.path.normpath`"""
- return self._next_class(self.module.normpath(self))
-
- def realpath(self):
- """.. seealso:: :func:`os.path.realpath`"""
- return self._next_class(self.module.realpath(self))
-
- def expanduser(self):
- """.. seealso:: :func:`os.path.expanduser`"""
- return self._next_class(self.module.expanduser(self))
-
- def expandvars(self):
- """.. seealso:: :func:`os.path.expandvars`"""
- return self._next_class(self.module.expandvars(self))
-
- def dirname(self):
- """.. seealso:: :attr:`parent`, :func:`os.path.dirname`"""
- return self._next_class(self.module.dirname(self))
-
- def basename(self):
- """.. seealso:: :attr:`name`, :func:`os.path.basename`"""
- return self._next_class(self.module.basename(self))
-
- def expand(self):
- """Clean up a filename by calling :meth:`expandvars()`,
- :meth:`expanduser()`, and :meth:`normpath()` on it.
-
- This is commonly everything needed to clean up a filename
- read from a configuration file, for example.
- """
- return self.expandvars().expanduser().normpath()
-
- @property
- def stem(self):
- """The same as :meth:`name`, but with one file extension stripped off.
-
- >>> Path('/home/guido/python.tar.gz').stem
- 'python.tar'
- """
- base, ext = self.module.splitext(self.name)
- return base
-
- @property
- def ext(self):
- """The file extension, for example ``'.py'``."""
- f, ext = self.module.splitext(self)
- return ext
-
- def with_suffix(self, suffix):
- """Return a new path with the file suffix changed (or added, if none)
-
- >>> Path('/home/guido/python.tar.gz').with_suffix(".foo")
- Path('/home/guido/python.tar.foo')
-
- >>> Path('python').with_suffix('.zip')
- Path('python.zip')
-
- >>> Path('filename.ext').with_suffix('zip')
- Traceback (most recent call last):
- ...
- ValueError: Invalid suffix 'zip'
- """
- if not suffix.startswith('.'):
- raise ValueError(f"Invalid suffix {suffix!r}")
-
- return self.stripext() + suffix
-
- @property
- def drive(self):
- """The drive specifier, for example ``'C:'``.
-
- This is always empty on systems that don't use drive specifiers.
- """
- drive, r = self.module.splitdrive(self)
- return self._next_class(drive)
-
- parent = property(
- dirname,
- None,
- None,
- """ This path's parent directory, as a new Path object.
-
- For example,
- ``Path('/usr/local/lib/libpython.so').parent ==
- Path('/usr/local/lib')``
-
- .. seealso:: :meth:`dirname`, :func:`os.path.dirname`
- """,
- )
-
- name = property(
- basename,
- None,
- None,
- """ The name of this file or directory without the full path.
-
- For example,
- ``Path('/usr/local/lib/libpython.so').name == 'libpython.so'``
-
- .. seealso:: :meth:`basename`, :func:`os.path.basename`
- """,
- )
-
- def splitpath(self):
- """Return two-tuple of ``.parent``, ``.name``.
-
- .. seealso:: :attr:`parent`, :attr:`name`, :func:`os.path.split`
- """
- parent, child = self.module.split(self)
- return self._next_class(parent), child
-
- def splitdrive(self):
- """Return two-tuple of ``.drive`` and rest without drive.
-
- Split the drive specifier from this path. If there is
- no drive specifier, :samp:`{p.drive}` is empty, so the return value
- is simply ``(Path(''), p)``. This is always the case on Unix.
-
- .. seealso:: :func:`os.path.splitdrive`
- """
- drive, rel = self.module.splitdrive(self)
- return self._next_class(drive), self._next_class(rel)
-
- def splitext(self):
- """Return two-tuple of ``.stripext()`` and ``.ext``.
-
- Split the filename extension from this path and return
- the two parts. Either part may be empty.
-
- The extension is everything from ``'.'`` to the end of the
- last path segment. This has the property that if
- ``(a, b) == p.splitext()``, then ``a + b == p``.
-
- .. seealso:: :func:`os.path.splitext`
- """
- filename, ext = self.module.splitext(self)
- return self._next_class(filename), ext
-
- def stripext(self):
- """Remove one file extension from the path.
-
- For example, ``Path('/home/guido/python.tar.gz').stripext()``
- returns ``Path('/home/guido/python.tar')``.
- """
- return self.splitext()[0]
-
- @classes.multimethod
- def joinpath(cls, first, *others):
- """
- Join first to zero or more :class:`Path` components,
- adding a separator character (:samp:`{first}.module.sep`)
- if needed. Returns a new instance of
- :samp:`{first}._next_class`.
-
- .. seealso:: :func:`os.path.join`
- """
- return cls._next_class(cls.module.join(first, *others))
-
- def splitall(self):
- r"""Return a list of the path components in this path.
-
- The first item in the list will be a Path. Its value will be
- either :data:`os.curdir`, :data:`os.pardir`, empty, or the root
- directory of this path (for example, ``'/'`` or ``'C:\\'``). The
- other items in the list will be strings.
-
- ``Path.joinpath(*result)`` will yield the original path.
-
- >>> Path('/foo/bar/baz').splitall()
- [Path('/'), 'foo', 'bar', 'baz']
- """
- return list(self._parts())
-
- def parts(self):
- """
- >>> Path('/foo/bar/baz').parts()
- (Path('/'), 'foo', 'bar', 'baz')
- """
- return tuple(self._parts())
-
- def _parts(self):
- return reversed(tuple(self._parts_iter()))
-
- def _parts_iter(self):
- loc = self
- while loc != os.curdir and loc != os.pardir:
- prev = loc
- loc, child = prev.splitpath()
- if loc == prev:
- break
- yield child
- yield loc
-
- def relpath(self, start='.'):
- """Return this path as a relative path,
- based from `start`, which defaults to the current working directory.
- """
- cwd = self._next_class(start)
- return cwd.relpathto(self)
-
- def relpathto(self, dest):
- """Return a relative path from `self` to `dest`.
-
- If there is no relative path from `self` to `dest`, for example if
- they reside on different drives in Windows, then this returns
- ``dest.abspath()``.
- """
- origin = self.abspath()
- dest = self._next_class(dest).abspath()
-
- orig_list = origin.normcase().splitall()
- # Don't normcase dest! We want to preserve the case.
- dest_list = dest.splitall()
-
- if orig_list[0] != self.module.normcase(dest_list[0]):
- # Can't get here from there.
- return dest
-
- # Find the location where the two paths start to differ.
- i = 0
- for start_seg, dest_seg in zip(orig_list, dest_list):
- if start_seg != self.module.normcase(dest_seg):
- break
- i += 1
-
- # Now i is the point where the two paths diverge.
- # Need a certain number of "os.pardir"s to work up
- # from the origin to the point of divergence.
- segments = [os.pardir] * (len(orig_list) - i)
- # Need to add the diverging part of dest_list.
- segments += dest_list[i:]
- if len(segments) == 0:
- # If they happen to be identical, use os.curdir.
- relpath = os.curdir
- else:
- relpath = self.module.join(*segments)
- return self._next_class(relpath)
-
- # --- Listing, searching, walking, and matching
-
- def listdir(self, match=None):
- """List of items in this directory.
-
- Use :meth:`files` or :meth:`dirs` instead if you want a listing
- of just files or just subdirectories.
-
- The elements of the list are Path objects.
-
- With the optional `match` argument, a callable,
- only return items whose names match the given pattern.
-
- .. seealso:: :meth:`files`, :meth:`dirs`
- """
- match = matchers.load(match)
- return list(filter(match, (self / child for child in os.listdir(self))))
-
- def dirs(self, *args, **kwargs):
- """List of this directory's subdirectories.
-
- The elements of the list are Path objects.
- This does not walk recursively into subdirectories
- (but see :meth:`walkdirs`).
-
- Accepts parameters to :meth:`listdir`.
- """
- return [p for p in self.listdir(*args, **kwargs) if p.isdir()]
-
- def files(self, *args, **kwargs):
- """List of the files in self.
-
- The elements of the list are Path objects.
- This does not walk into subdirectories (see :meth:`walkfiles`).
-
- Accepts parameters to :meth:`listdir`.
- """
-
- return [p for p in self.listdir(*args, **kwargs) if p.isfile()]
-
- def walk(self, match=None, errors='strict'):
- """Iterator over files and subdirs, recursively.
-
- The iterator yields Path objects naming each child item of
- this directory and its descendants. This requires that
- ``D.isdir()``.
-
- This performs a depth-first traversal of the directory tree.
- Each directory is returned just before all its children.
-
- The `errors=` keyword argument controls behavior when an
- error occurs. The default is ``'strict'``, which causes an
- exception. Other allowed values are ``'warn'`` (which
- reports the error via :func:`warnings.warn()`), and ``'ignore'``.
- `errors` may also be an arbitrary callable taking a msg parameter.
- """
-
- errors = Handlers._resolve(errors)
- match = matchers.load(match)
-
- try:
- childList = self.listdir()
- except Exception as exc:
- errors(f"Unable to list directory '{self}': {exc}")
- return
-
- for child in childList:
- traverse = None
- if match(child):
- traverse = yield child
- traverse = traverse or child.isdir
- try:
- do_traverse = traverse()
- except Exception as exc:
- errors(f"Unable to access '{child}': {exc}")
- continue
-
- if do_traverse:
- yield from child.walk(errors=errors, match=match)
-
- def walkdirs(self, *args, **kwargs):
- """Iterator over subdirs, recursively."""
- return (item for item in self.walk(*args, **kwargs) if item.isdir())
-
- def walkfiles(self, *args, **kwargs):
- """Iterator over files, recursively."""
- return (item for item in self.walk(*args, **kwargs) if item.isfile())
-
- def fnmatch(self, pattern, normcase=None):
- """Return ``True`` if `self.name` matches the given `pattern`.
-
- `pattern` - A filename pattern with wildcards,
- for example ``'*.py'``. If the pattern contains a `normcase`
- attribute, it is applied to the name and path prior to comparison.
-
- `normcase` - (optional) A function used to normalize the pattern and
- filename before matching. Defaults to normcase from
- ``self.module``, :func:`os.path.normcase`.
-
- .. seealso:: :func:`fnmatch.fnmatch`
- """
- default_normcase = getattr(pattern, 'normcase', self.module.normcase)
- normcase = normcase or default_normcase
- name = normcase(self.name)
- pattern = normcase(pattern)
- return fnmatch.fnmatchcase(name, pattern)
-
- def glob(self, pattern):
- """Return a list of Path objects that match the pattern.
-
- `pattern` - a path relative to this directory, with wildcards.
-
- For example, ``Path('/users').glob('*/bin/*')`` returns a list
- of all the files users have in their :file:`bin` directories.
-
- .. seealso:: :func:`glob.glob`
-
- .. note:: Glob is **not** recursive, even when using ``**``.
- To do recursive globbing see :func:`walk`,
- :func:`walkdirs` or :func:`walkfiles`.
- """
- cls = self._next_class
- return [cls(s) for s in glob.glob(self / pattern)]
-
- def iglob(self, pattern):
- """Return an iterator of Path objects that match the pattern.
-
- `pattern` - a path relative to this directory, with wildcards.
-
- For example, ``Path('/users').iglob('*/bin/*')`` returns an
- iterator of all the files users have in their :file:`bin`
- directories.
-
- .. seealso:: :func:`glob.iglob`
-
- .. note:: Glob is **not** recursive, even when using ``**``.
- To do recursive globbing see :func:`walk`,
- :func:`walkdirs` or :func:`walkfiles`.
- """
- cls = self._next_class
- return (cls(s) for s in glob.iglob(self / pattern))
-
- #
- # --- Reading or writing an entire file at once.
-
- def open(self, *args, **kwargs):
- """Open this file and return a corresponding file object.
-
- Keyword arguments work as in :func:`io.open`. If the file cannot be
- opened, an :class:`OSError` is raised.
- """
- return open(self, *args, **kwargs)
-
- def bytes(self):
- """Open this file, read all bytes, return them as a string."""
- with self.open('rb') as f:
- return f.read()
-
- def chunks(self, size, *args, **kwargs):
- """Returns a generator yielding chunks of the file, so it can
- be read piece by piece with a simple for loop.
-
- Any argument you pass after `size` will be passed to :meth:`open`.
-
- :example:
-
- >>> hash = hashlib.md5()
- >>> for chunk in Path("NEWS.rst").chunks(8192, mode='rb'):
- ... hash.update(chunk)
-
- This will read the file by chunks of 8192 bytes.
- """
- with self.open(*args, **kwargs) as f:
- yield from iter(lambda: f.read(size) or None, None)
-
- def write_bytes(self, bytes, append=False):
- """Open this file and write the given bytes to it.
-
- Default behavior is to overwrite any existing file.
- Call ``p.write_bytes(bytes, append=True)`` to append instead.
- """
- with self.open('ab' if append else 'wb') as f:
- f.write(bytes)
-
- def read_text(self, encoding=None, errors=None):
- r"""Open this file, read it in, return the content as a string.
-
- Optional parameters are passed to :meth:`open`.
-
- .. seealso:: :meth:`lines`
- """
- with self.open(encoding=encoding, errors=errors) as f:
- return f.read()
-
- def read_bytes(self):
- r"""Return the contents of this file as bytes."""
- with self.open(mode='rb') as f:
- return f.read()
-
- def text(self, encoding=None, errors='strict'):
- r"""Legacy function to read text.
-
- Converts all newline sequences to ``\n``.
- """
- warnings.warn(
- ".text is deprecated; use read_text",
- DeprecationWarning,
- stacklevel=2,
- )
- return U_NEWLINE.sub('\n', self.read_text(encoding, errors))
-
- def write_text(
- self, text, encoding=None, errors='strict', linesep=os.linesep, append=False
- ):
- r"""Write the given text to this file.
-
- The default behavior is to overwrite any existing file;
- to append instead, use the `append=True` keyword argument.
-
- There are two differences between :meth:`write_text` and
- :meth:`write_bytes`: newline handling and Unicode handling.
- See below.
-
- Parameters:
-
- `text` - str/bytes - The text to be written.
-
- `encoding` - str - The text encoding used.
-
- `errors` - str - How to handle Unicode encoding errors.
- Default is ``'strict'``. See ``help(unicode.encode)`` for the
- options. Ignored if `text` isn't a Unicode string.
-
- `linesep` - keyword argument - str/unicode - The sequence of
- characters to be used to mark end-of-line. The default is
- :data:`os.linesep`. Specify ``None`` to
- use newlines unmodified.
-
- `append` - keyword argument - bool - Specifies what to do if
- the file already exists (``True``: append to the end of it;
- ``False``: overwrite it). The default is ``False``.
-
-
- --- Newline handling.
-
- ``write_text()`` converts all standard end-of-line sequences
- (``'\n'``, ``'\r'``, and ``'\r\n'``) to your platform's default
- end-of-line sequence (see :data:`os.linesep`; on Windows, for example,
- the end-of-line marker is ``'\r\n'``).
-
- To override the platform's default, pass the `linesep=`
- keyword argument. To preserve the newlines as-is, pass
- ``linesep=None``.
-
- This handling applies to Unicode text and bytes, except
- with Unicode, additional non-ASCII newlines are recognized:
- ``\x85``, ``\r\x85``, and ``\u2028``.
-
- --- Unicode
-
- If `text` isn't Unicode, then apart from newline handling, the
- bytes are written verbatim to the file. The `encoding` and
- `errors` arguments are not used and must be omitted.
-
- If `text` is Unicode, it is first converted to :func:`bytes` using the
- specified `encoding` (or the default encoding if `encoding`
- isn't specified). The `errors` argument applies only to this
- conversion.
- """
- if isinstance(text, str):
- if linesep is not None:
- text = U_NEWLINE.sub(linesep, text)
- bytes = text.encode(encoding or sys.getdefaultencoding(), errors)
- else:
- warnings.warn(
- "Writing bytes in write_text is deprecated",
- DeprecationWarning,
- stacklevel=1,
- )
- assert encoding is None
- if linesep is not None:
- text = B_NEWLINE.sub(linesep.encode(), text)
- bytes = text
- self.write_bytes(bytes, append=append)
-
- def lines(self, encoding=None, errors=None, retain=True):
- r"""Open this file, read all lines, return them in a list.
-
- Optional arguments:
- `encoding` - The Unicode encoding (or character set) of
- the file. The default is ``None``, meaning use
- ``locale.getpreferredencoding()``.
- `errors` - How to handle Unicode errors; see
- `open <https://docs.python.org/3/library/functions.html#open>`_
- for the options. Default is ``None`` meaning "strict".
- `retain` - If ``True`` (default), retain newline characters,
- but translate all newline
- characters to ``\n``. If ``False``, newline characters are
- omitted.
-
- .. seealso:: :meth:`text`
- """
- text = U_NEWLINE.sub('\n', self.read_text(encoding, errors))
- return text.splitlines(retain)
-
- def write_lines(
- self,
- lines,
- encoding=None,
- errors='strict',
- linesep=_default_linesep,
- append=False,
- ):
- r"""Write the given lines of text to this file.
-
- By default this overwrites any existing file at this path.
-
- This puts a platform-specific newline sequence on every line.
- See `linesep` below.
-
- `lines` - A list of strings.
-
- `encoding` - A Unicode encoding to use. This applies only if
- `lines` contains any Unicode strings.
-
- `errors` - How to handle errors in Unicode encoding. This
- also applies only to Unicode strings.
-
- linesep - (deprecated) The desired line-ending. This line-ending is
- applied to every line. If a line already has any
- standard line ending (``'\r'``, ``'\n'``, ``'\r\n'``,
- ``u'\x85'``, ``u'\r\x85'``, ``u'\u2028'``), that will
- be stripped off and this will be used instead. The
- default is os.linesep, which is platform-dependent
- (``'\r\n'`` on Windows, ``'\n'`` on Unix, etc.).
- Specify ``None`` to write the lines as-is, like
- ``.writelines`` on a file object.
-
- Use the keyword argument ``append=True`` to append lines to the
- file. The default is to overwrite the file.
- """
- mode = 'a' if append else 'w'
- with self.open(mode, encoding=encoding, errors=errors, newline='') as f:
- f.writelines(self._replace_linesep(lines, linesep))
-
- @staticmethod
- def _replace_linesep(lines, linesep):
- if linesep != _default_linesep:
- warnings.warn("linesep is deprecated", DeprecationWarning, stacklevel=3)
- else:
- linesep = os.linesep
- if linesep is None:
- return lines
-
- return (line + linesep for line in _strip_newlines(lines))
-
- def read_md5(self):
- """Calculate the md5 hash for this file.
-
- This reads through the entire file.
-
- .. seealso:: :meth:`read_hash`
- """
- return self.read_hash('md5')
-
- def _hash(self, hash_name):
- """Returns a hash object for the file at the current path.
-
- `hash_name` should be a hash algo name (such as ``'md5'``
- or ``'sha1'``) that's available in the :mod:`hashlib` module.
- """
- m = hashlib.new(hash_name)
- for chunk in self.chunks(8192, mode="rb"):
- m.update(chunk)
- return m
-
- def read_hash(self, hash_name):
- """Calculate given hash for this file.
-
- List of supported hashes can be obtained from :mod:`hashlib` package.
- This reads the entire file.
-
- .. seealso:: :meth:`hashlib.hash.digest`
- """
- return self._hash(hash_name).digest()
-
- def read_hexhash(self, hash_name):
- """Calculate given hash for this file, returning hexdigest.
-
- List of supported hashes can be obtained from :mod:`hashlib` package.
- This reads the entire file.
-
- .. seealso:: :meth:`hashlib.hash.hexdigest`
- """
- return self._hash(hash_name).hexdigest()
-
- # --- Methods for querying the filesystem.
- # N.B. On some platforms, the os.path functions may be implemented in C
- # (e.g. isdir on Windows, Python 3.2.2), and compiled functions don't get
- # bound. Playing it safe and wrapping them all in method calls.
-
- def isabs(self):
- """
- >>> Path('.').isabs()
- False
-
- .. seealso:: :func:`os.path.isabs`
- """
- return self.module.isabs(self)
-
- def exists(self):
- """.. seealso:: :func:`os.path.exists`"""
- return self.module.exists(self)
-
- def isdir(self):
- """.. seealso:: :func:`os.path.isdir`"""
- return self.module.isdir(self)
-
- def isfile(self):
- """.. seealso:: :func:`os.path.isfile`"""
- return self.module.isfile(self)
-
- def islink(self):
- """.. seealso:: :func:`os.path.islink`"""
- return self.module.islink(self)
-
- def ismount(self):
- """
- >>> Path('.').ismount()
- False
-
- .. seealso:: :func:`os.path.ismount`
- """
- return self.module.ismount(self)
-
- def samefile(self, other):
- """.. seealso:: :func:`os.path.samefile`"""
- return self.module.samefile(self, other)
-
- def getatime(self):
- """.. seealso:: :attr:`atime`, :func:`os.path.getatime`"""
- return self.module.getatime(self)
-
- def set_atime(self, value):
- mtime_ns = self.stat().st_atime_ns
- self.utime(ns=(_make_timestamp_ns(value), mtime_ns))
-
- atime = property(
- getatime,
- set_atime,
- None,
- """
- Last access time of the file.
-
- >>> Path('.').atime > 0
- True
-
- Allows setting:
-
- >>> some_file = Path(getfixture('tmp_path')).joinpath('file.txt').touch()
- >>> MST = datetime.timezone(datetime.timedelta(hours=-7))
- >>> some_file.atime = datetime.datetime(1976, 5, 7, 10, tzinfo=MST)
- >>> some_file.atime
- 200336400.0
-
- .. seealso:: :meth:`getatime`, :func:`os.path.getatime`
- """,
- )
-
- def getmtime(self):
- """.. seealso:: :attr:`mtime`, :func:`os.path.getmtime`"""
- return self.module.getmtime(self)
-
- def set_mtime(self, value):
- atime_ns = self.stat().st_atime_ns
- self.utime(ns=(atime_ns, _make_timestamp_ns(value)))
-
- mtime = property(
- getmtime,
- set_mtime,
- None,
- """
- Last modified time of the file.
-
- Allows setting:
-
- >>> some_file = Path(getfixture('tmp_path')).joinpath('file.txt').touch()
- >>> MST = datetime.timezone(datetime.timedelta(hours=-7))
- >>> some_file.mtime = datetime.datetime(1976, 5, 7, 10, tzinfo=MST)
- >>> some_file.mtime
- 200336400.0
-
- .. seealso:: :meth:`getmtime`, :func:`os.path.getmtime`
- """,
- )
-
- def getctime(self):
- """.. seealso:: :attr:`ctime`, :func:`os.path.getctime`"""
- return self.module.getctime(self)
-
- ctime = property(
- getctime,
- None,
- None,
- """ Creation time of the file.
-
- .. seealso:: :meth:`getctime`, :func:`os.path.getctime`
- """,
- )
-
- def getsize(self):
- """.. seealso:: :attr:`size`, :func:`os.path.getsize`"""
- return self.module.getsize(self)
-
- size = property(
- getsize,
- None,
- None,
- """ Size of the file, in bytes.
-
- .. seealso:: :meth:`getsize`, :func:`os.path.getsize`
- """,
- )
-
- @property
- def permissions(self) -> masks.Permissions:
- """
- Permissions.
-
- >>> perms = Path('.').permissions
- >>> isinstance(perms, int)
- True
- >>> set(perms.symbolic) <= set('rwx-')
- True
- >>> perms.symbolic
- 'r...'
- """
- return masks.Permissions(self.stat().st_mode)
-
- def access(self, *args, **kwargs):
- """
- Return does the real user have access to this path.
-
- >>> Path('.').access(os.F_OK)
- True
-
- .. seealso:: :func:`os.access`
- """
- return os.access(self, *args, **kwargs)
-
- def stat(self):
- """
- Perform a ``stat()`` system call on this path.
-
- >>> Path('.').stat()
- os.stat_result(...)
-
- .. seealso:: :meth:`lstat`, :func:`os.stat`
- """
- return os.stat(self)
-
- def lstat(self):
- """
- Like :meth:`stat`, but do not follow symbolic links.
-
- >>> Path('.').lstat() == Path('.').stat()
- True
-
- .. seealso:: :meth:`stat`, :func:`os.lstat`
- """
- return os.lstat(self)
-
- def __get_owner_windows(self): # pragma: nocover
- r"""
- Return the name of the owner of this file or directory. Follow
- symbolic links.
-
- Return a name of the form ``DOMAIN\User Name``; may be a group.
-
- .. seealso:: :attr:`owner`
- """
- desc = win32security.GetFileSecurity(
- self, win32security.OWNER_SECURITY_INFORMATION
- )
- sid = desc.GetSecurityDescriptorOwner()
- account, domain, typecode = win32security.LookupAccountSid(None, sid)
- return domain + '\\' + account
-
- def __get_owner_unix(self): # pragma: nocover
- """
- Return the name of the owner of this file or directory. Follow
- symbolic links.
-
- .. seealso:: :attr:`owner`
- """
- st = self.stat()
- return pwd.getpwuid(st.st_uid).pw_name
-
- def __get_owner_not_implemented(self): # pragma: nocover
- raise NotImplementedError("Ownership not available on this platform.")
-
- get_owner = (
- __get_owner_windows
- if 'win32security' in globals()
- else __get_owner_unix
- if 'pwd' in globals()
- else __get_owner_not_implemented
- )
-
- owner = property(
- get_owner,
- None,
- None,
- """ Name of the owner of this file or directory.
-
- .. seealso:: :meth:`get_owner`""",
- )
-
- if hasattr(os, 'statvfs'):
-
- def statvfs(self):
- """Perform a ``statvfs()`` system call on this path.
-
- .. seealso:: :func:`os.statvfs`
- """
- return os.statvfs(self)
-
- if hasattr(os, 'pathconf'):
-
- def pathconf(self, name):
- """.. seealso:: :func:`os.pathconf`"""
- return os.pathconf(self, name)
-
- #
- # --- Modifying operations on files and directories
-
- def utime(self, *args, **kwargs):
- """Set the access and modified times of this file.
-
- .. seealso:: :func:`os.utime`
- """
- os.utime(self, *args, **kwargs)
- return self
-
- def chmod(self, mode):
- """
- Set the mode. May be the new mode (os.chmod behavior) or a `symbolic
- mode <http://en.wikipedia.org/wiki/Chmod#Symbolic_modes>`_.
-
- >>> a_file = Path(getfixture('tmp_path')).joinpath('afile.txt').touch()
- >>> a_file.chmod(0o700)
- Path(...
- >>> a_file.chmod('u+x')
- Path(...
-
- .. seealso:: :func:`os.chmod`
- """
- if isinstance(mode, str):
- mask = masks.compound(mode)
- mode = mask(self.stat().st_mode)
- os.chmod(self, mode)
- return self
-
- if hasattr(os, 'chown'):
-
- def chown(self, uid=-1, gid=-1):
- """
- Change the owner and group by names or numbers.
-
- .. seealso:: :func:`os.chown`
- """
-
- def resolve_uid(uid):
- return uid if isinstance(uid, int) else pwd.getpwnam(uid).pw_uid
-
- def resolve_gid(gid):
- return gid if isinstance(gid, int) else grp.getgrnam(gid).gr_gid
-
- os.chown(self, resolve_uid(uid), resolve_gid(gid))
- return self
-
- def rename(self, new):
- """.. seealso:: :func:`os.rename`"""
- os.rename(self, new)
- return self._next_class(new)
-
- def renames(self, new):
- """.. seealso:: :func:`os.renames`"""
- os.renames(self, new)
- return self._next_class(new)
-
- #
- # --- Create/delete operations on directories
-
- def mkdir(self, mode=0o777):
- """.. seealso:: :func:`os.mkdir`"""
- os.mkdir(self, mode)
- return self
-
- def mkdir_p(self, mode=0o777):
- """Like :meth:`mkdir`, but does not raise an exception if the
- directory already exists."""
- with contextlib.suppress(FileExistsError):
- self.mkdir(mode)
- return self
-
- def makedirs(self, mode=0o777):
- """.. seealso:: :func:`os.makedirs`"""
- os.makedirs(self, mode)
- return self
-
- def makedirs_p(self, mode=0o777):
- """Like :meth:`makedirs`, but does not raise an exception if the
- directory already exists."""
- with contextlib.suppress(FileExistsError):
- self.makedirs(mode)
- return self
-
- def rmdir(self):
- """.. seealso:: :func:`os.rmdir`"""
- os.rmdir(self)
- return self
-
- def rmdir_p(self):
- """Like :meth:`rmdir`, but does not raise an exception if the
- directory is not empty or does not exist."""
- suppressed = FileNotFoundError, FileExistsError, DirectoryNotEmpty
- with contextlib.suppress(suppressed):
- with DirectoryNotEmpty.translate():
- self.rmdir()
- return self
-
- def removedirs(self):
- """.. seealso:: :func:`os.removedirs`"""
- os.removedirs(self)
- return self
-
- def removedirs_p(self):
- """Like :meth:`removedirs`, but does not raise an exception if the
- directory is not empty or does not exist."""
- with contextlib.suppress(FileExistsError, DirectoryNotEmpty):
- with DirectoryNotEmpty.translate():
- self.removedirs()
- return self
-
- # --- Modifying operations on files
-
- def touch(self):
- """Set the access/modified times of this file to the current time.
- Create the file if it does not exist.
- """
- os.close(os.open(self, os.O_WRONLY | os.O_CREAT, 0o666))
- os.utime(self, None)
- return self
-
- def remove(self):
- """.. seealso:: :func:`os.remove`"""
- os.remove(self)
- return self
-
- def remove_p(self):
- """Like :meth:`remove`, but does not raise an exception if the
- file does not exist."""
- with contextlib.suppress(FileNotFoundError):
- self.unlink()
- return self
-
- unlink = remove
- unlink_p = remove_p
-
- # --- Links
-
- def link(self, newpath):
- """Create a hard link at `newpath`, pointing to this file.
-
- .. seealso:: :func:`os.link`
- """
- os.link(self, newpath)
- return self._next_class(newpath)
-
- def symlink(self, newlink=None):
- """Create a symbolic link at `newlink`, pointing here.
-
- If newlink is not supplied, the symbolic link will assume
- the name self.basename(), creating the link in the cwd.
-
- .. seealso:: :func:`os.symlink`
- """
- if newlink is None:
- newlink = self.basename()
- os.symlink(self, newlink)
- return self._next_class(newlink)
-
- def readlink(self):
- """Return the path to which this symbolic link points.
-
- The result may be an absolute or a relative path.
-
- .. seealso:: :meth:`readlinkabs`, :func:`os.readlink`
- """
- return self._next_class(os.readlink(self))
-
- def readlinkabs(self):
- """Return the path to which this symbolic link points.
-
- The result is always an absolute path.
-
- .. seealso:: :meth:`readlink`, :func:`os.readlink`
- """
- p = self.readlink()
- return p if p.isabs() else (self.parent / p).abspath()
-
- # High-level functions from shutil
- # These functions will be bound to the instance such that
- # Path(name).copy(target) will invoke shutil.copy(name, target)
-
- copyfile = shutil.copyfile
- copymode = shutil.copymode
- copystat = shutil.copystat
- copy = shutil.copy
- copy2 = shutil.copy2
- copytree = shutil.copytree
- if hasattr(shutil, 'move'):
- move = shutil.move
- rmtree = shutil.rmtree
-
- def rmtree_p(self):
- """Like :meth:`rmtree`, but does not raise an exception if the
- directory does not exist."""
- with contextlib.suppress(FileNotFoundError):
- self.rmtree()
- return self
-
- def chdir(self):
- """.. seealso:: :func:`os.chdir`"""
- os.chdir(self)
-
- cd = chdir
-
- def merge_tree(
- self,
- dst,
- symlinks=False,
- *,
- copy_function=shutil.copy2,
- ignore=lambda dir, contents: [],
- ):
- """
- Copy entire contents of self to dst, overwriting existing
- contents in dst with those in self.
-
- Pass ``symlinks=True`` to copy symbolic links as links.
-
- Accepts a ``copy_function``, similar to copytree.
-
- To avoid overwriting newer files, supply a copy function
- wrapped in ``only_newer``. For example::
-
- src.merge_tree(dst, copy_function=only_newer(shutil.copy2))
- """
- dst = self._next_class(dst)
- dst.makedirs_p()
-
- sources = self.listdir()
- _ignored = ignore(self, [item.name for item in sources])
-
- def ignored(item):
- return item.name in _ignored
-
- for source in itertools.filterfalse(ignored, sources):
- dest = dst / source.name
- if symlinks and source.islink():
- target = source.readlink()
- target.symlink(dest)
- elif source.isdir():
- source.merge_tree(
- dest,
- symlinks=symlinks,
- copy_function=copy_function,
- ignore=ignore,
- )
- else:
- copy_function(source, dest)
-
- self.copystat(dst)
-
- #
- # --- Special stuff from os
-
- if hasattr(os, 'chroot'):
-
- def chroot(self): # pragma: nocover
- """.. seealso:: :func:`os.chroot`"""
- os.chroot(self)
-
- if hasattr(os, 'startfile'):
-
- def startfile(self, *args, **kwargs): # pragma: nocover
- """.. seealso:: :func:`os.startfile`"""
- os.startfile(self, *args, **kwargs)
- return self
-
- # in-place re-writing, courtesy of Martijn Pieters
- # http://www.zopatista.com/python/2013/11/26/inplace-file-rewriting/
- @contextlib.contextmanager
- def in_place(
- self,
- mode='r',
- buffering=-1,
- encoding=None,
- errors=None,
- newline=None,
- backup_extension=None,
- ):
- """
- A context in which a file may be re-written in-place with
- new content.
-
- Yields a tuple of :samp:`({readable}, {writable})` file
- objects, where `writable` replaces `readable`.
-
- If an exception occurs, the old file is restored, removing the
- written data.
-
- Mode *must not* use ``'w'``, ``'a'``, or ``'+'``; only
- read-only-modes are allowed. A :exc:`ValueError` is raised
- on invalid modes.
-
- For example, to add line numbers to a file::
-
- p = Path(filename)
- assert p.isfile()
- with p.in_place() as (reader, writer):
- for number, line in enumerate(reader, 1):
- writer.write('{0:3}: '.format(number)))
- writer.write(line)
-
- Thereafter, the file at `filename` will have line numbers in it.
- """
- if set(mode).intersection('wa+'):
- raise ValueError('Only read-only file modes can be used')
-
- # move existing file to backup, create new file with same permissions
- # borrowed extensively from the fileinput module
- backup_fn = self + (backup_extension or os.extsep + 'bak')
- backup_fn.remove_p()
- self.rename(backup_fn)
- readable = open(
- backup_fn,
- mode,
- buffering=buffering,
- encoding=encoding,
- errors=errors,
- newline=newline,
- )
- try:
- perm = os.fstat(readable.fileno()).st_mode
- except OSError:
- writable = self.open(
- 'w' + mode.replace('r', ''),
- buffering=buffering,
- encoding=encoding,
- errors=errors,
- newline=newline,
- )
- else:
- os_mode = os.O_CREAT | os.O_WRONLY | os.O_TRUNC
- os_mode |= getattr(os, 'O_BINARY', 0)
- fd = os.open(self, os_mode, perm)
- writable = open(
- fd,
- "w" + mode.replace('r', ''),
- buffering=buffering,
- encoding=encoding,
- errors=errors,
- newline=newline,
- )
- with contextlib.suppress(OSError, AttributeError):
- self.chmod(perm)
- try:
- yield readable, writable
- except Exception:
- # move backup back
- readable.close()
- writable.close()
- self.remove_p()
- backup_fn.rename(self)
- raise
- else:
- readable.close()
- writable.close()
- finally:
- backup_fn.remove_p()
-
- @classes.ClassProperty
- @classmethod
- def special(cls):
- """
- Return a SpecialResolver object suitable referencing a suitable
- directory for the relevant platform for the given
- type of content.
-
- For example, to get a user config directory, invoke:
-
- dir = Path.special().user.config
-
- Uses the `appdirs
- <https://pypi.python.org/pypi/appdirs/1.4.0>`_ to resolve
- the paths in a platform-friendly way.
-
- To create a config directory for 'My App', consider:
-
- dir = Path.special("My App").user.config.makedirs_p()
-
- If the ``appdirs`` module is not installed, invocation
- of special will raise an ImportError.
- """
- return functools.partial(SpecialResolver, cls)
-
-
-class DirectoryNotEmpty(OSError):
- @staticmethod
- @contextlib.contextmanager
- def translate():
- try:
- yield
- except OSError as exc:
- if exc.errno == errno.ENOTEMPTY:
- raise DirectoryNotEmpty(*exc.args) from exc
- raise
-
-
-def only_newer(copy_func):
- """
- Wrap a copy function (like shutil.copy2) to return
- the dst if it's newer than the source.
- """
-
- @functools.wraps(copy_func)
- def wrapper(src, dst, *args, **kwargs):
- is_newer_dst = dst.exists() and dst.getmtime() >= src.getmtime()
- if is_newer_dst:
- return dst
- return copy_func(src, dst, *args, **kwargs)
-
- return wrapper
-
-
-class ExtantPath(Path):
- """
- >>> ExtantPath('.')
- ExtantPath('.')
- >>> ExtantPath('does-not-exist')
- Traceback (most recent call last):
- OSError: does-not-exist does not exist.
- """
-
- def _validate(self):
- if not self.exists():
- raise OSError(f"{self} does not exist.")
-
-
-class ExtantFile(Path):
- """
- >>> ExtantFile('.')
- Traceback (most recent call last):
- FileNotFoundError: . does not exist as a file.
- >>> ExtantFile('does-not-exist')
- Traceback (most recent call last):
- FileNotFoundError: does-not-exist does not exist as a file.
- """
-
- def _validate(self):
- if not self.isfile():
- raise FileNotFoundError(f"{self} does not exist as a file.")
-
-
-class SpecialResolver:
- class ResolverScope:
- def __init__(self, paths, scope):
- self.paths = paths
- self.scope = scope
-
- def __getattr__(self, class_):
- return self.paths.get_dir(self.scope, class_)
-
- def __init__(self, path_class, *args, **kwargs):
- appdirs = importlib.import_module('appdirs')
-
- vars(self).update(
- path_class=path_class, wrapper=appdirs.AppDirs(*args, **kwargs)
- )
-
- def __getattr__(self, scope):
- return self.ResolverScope(self, scope)
-
- def get_dir(self, scope, class_):
- """
- Return the callable function from appdirs, but with the
- result wrapped in self.path_class
- """
- prop_name = f'{scope}_{class_}_dir'
- value = getattr(self.wrapper, prop_name)
- MultiPath = Multi.for_class(self.path_class)
- return MultiPath.detect(value)
-
-
-class Multi:
- """
- A mix-in for a Path which may contain multiple Path separated by pathsep.
- """
-
- @classmethod
- def for_class(cls, path_cls):
- name = 'Multi' + path_cls.__name__
- return type(name, (cls, path_cls), {})
-
- @classmethod
- def detect(cls, input):
- if os.pathsep not in input:
- cls = cls._next_class
- return cls(input)
-
- def __iter__(self):
- return iter(map(self._next_class, self.split(os.pathsep)))
-
- @classes.ClassProperty
- @classmethod
- def _next_class(cls):
- """
- Multi-subclasses should use the parent class
- """
- return next(class_ for class_ in cls.__mro__ if not issubclass(class_, Multi))
-
-
-class TempDir(Path):
- """
- A temporary directory via :func:`tempfile.mkdtemp`, and
- constructed with the same parameters that you can use
- as a context manager.
-
- For example:
-
- >>> with TempDir() as d:
- ... d.isdir() and isinstance(d, Path)
- True
-
- The directory is deleted automatically.
-
- >>> d.isdir()
- False
-
- .. seealso:: :func:`tempfile.mkdtemp`
- """
-
- @classes.ClassProperty
- @classmethod
- def _next_class(cls):
- return Path
-
- def __new__(cls, *args, **kwargs):
- dirname = tempfile.mkdtemp(*args, **kwargs)
- return super().__new__(cls, dirname)
-
- def __init__(self, *args, **kwargs):
- pass
-
- def __enter__(self):
- # TempDir should return a Path version of itself and not itself
- # so that a second context manager does not create a second
- # temporary directory, but rather changes CWD to the location
- # of the temporary directory.
- return self._next_class(self)
-
- def __exit__(self, exc_type, exc_value, traceback):
- self.rmtree()
-
-
-class Handlers:
- def strict(msg):
- raise
-
- def warn(msg):
- warnings.warn(msg, TreeWalkWarning)
-
- def ignore(msg):
- pass
-
- @classmethod
- def _resolve(cls, param):
- if not callable(param) and param not in vars(Handlers):
- raise ValueError("invalid errors parameter")
- return vars(cls).get(param, param)
diff --git a/contrib/python/path/path/classes.py b/contrib/python/path/path/classes.py
deleted file mode 100644
index b6101d0a7e..0000000000
--- a/contrib/python/path/path/classes.py
+++ /dev/null
@@ -1,27 +0,0 @@
-import functools
-
-
-class ClassProperty(property):
- def __get__(self, cls, owner):
- return self.fget.__get__(None, owner)()
-
-
-class multimethod:
- """
- Acts like a classmethod when invoked from the class and like an
- instancemethod when invoked from the instance.
- """
-
- def __init__(self, func):
- self.func = func
-
- def __get__(self, instance, owner):
- """
- If called on an instance, pass the instance as the first
- argument.
- """
- return (
- functools.partial(self.func, owner)
- if instance is None
- else functools.partial(self.func, owner, instance)
- )
diff --git a/contrib/python/path/path/masks.py b/contrib/python/path/path/masks.py
deleted file mode 100644
index e7037e9603..0000000000
--- a/contrib/python/path/path/masks.py
+++ /dev/null
@@ -1,159 +0,0 @@
-import re
-import functools
-import operator
-import itertools
-
-
-# from jaraco.functools
-def compose(*funcs):
- compose_two = lambda f1, f2: lambda *args, **kwargs: f1(f2(*args, **kwargs)) # noqa
- return functools.reduce(compose_two, funcs)
-
-
-# from jaraco.structures.binary
-def gen_bit_values(number):
- """
- Return a zero or one for each bit of a numeric value up to the most
- significant 1 bit, beginning with the least significant bit.
-
- >>> list(gen_bit_values(16))
- [0, 0, 0, 0, 1]
- """
- digits = bin(number)[2:]
- return map(int, reversed(digits))
-
-
-# from more_itertools
-def padded(iterable, fillvalue=None, n=None, next_multiple=False):
- """Yield the elements from *iterable*, followed by *fillvalue*, such that
- at least *n* items are emitted.
-
- >>> list(padded([1, 2, 3], '?', 5))
- [1, 2, 3, '?', '?']
-
- If *next_multiple* is ``True``, *fillvalue* will be emitted until the
- number of items emitted is a multiple of *n*::
-
- >>> list(padded([1, 2, 3, 4], n=3, next_multiple=True))
- [1, 2, 3, 4, None, None]
-
- If *n* is ``None``, *fillvalue* will be emitted indefinitely.
-
- """
- it = iter(iterable)
- if n is None:
- yield from itertools.chain(it, itertools.repeat(fillvalue))
- elif n < 1:
- raise ValueError('n must be at least 1')
- else:
- item_count = 0
- for item in it:
- yield item
- item_count += 1
-
- remaining = (n - item_count) % n if next_multiple else n - item_count
- for _ in range(remaining):
- yield fillvalue
-
-
-def compound(mode):
- """
- Support multiple, comma-separated Unix chmod symbolic modes.
-
- >>> oct(compound('a=r,u+w')(0))
- '0o644'
- """
- return compose(*map(simple, reversed(mode.split(','))))
-
-
-def simple(mode):
- """
- Convert a Unix chmod symbolic mode like ``'ugo+rwx'`` to a function
- suitable for applying to a mask to affect that change.
-
- >>> mask = simple('ugo+rwx')
- >>> mask(0o554) == 0o777
- True
-
- >>> simple('go-x')(0o777) == 0o766
- True
-
- >>> simple('o-x')(0o445) == 0o444
- True
-
- >>> simple('a+x')(0) == 0o111
- True
-
- >>> simple('a=rw')(0o057) == 0o666
- True
-
- >>> simple('u=x')(0o666) == 0o166
- True
-
- >>> simple('g=')(0o157) == 0o107
- True
-
- >>> simple('gobbledeegook')
- Traceback (most recent call last):
- ValueError: ('Unrecognized symbolic mode', 'gobbledeegook')
- """
- # parse the symbolic mode
- parsed = re.match('(?P<who>[ugoa]+)(?P<op>[-+=])(?P<what>[rwx]*)$', mode)
- if not parsed:
- raise ValueError("Unrecognized symbolic mode", mode)
-
- # generate a mask representing the specified permission
- spec_map = dict(r=4, w=2, x=1)
- specs = (spec_map[perm] for perm in parsed.group('what'))
- spec = functools.reduce(operator.or_, specs, 0)
-
- # now apply spec to each subject in who
- shift_map = dict(u=6, g=3, o=0)
- who = parsed.group('who').replace('a', 'ugo')
- masks = (spec << shift_map[subj] for subj in who)
- mask = functools.reduce(operator.or_, masks)
-
- op = parsed.group('op')
-
- # if op is -, invert the mask
- if op == '-':
- mask ^= 0o777
-
- # if op is =, retain extant values for unreferenced subjects
- if op == '=':
- masks = (0o7 << shift_map[subj] for subj in who)
- retain = functools.reduce(operator.or_, masks) ^ 0o777
-
- op_map = {
- '+': operator.or_,
- '-': operator.and_,
- '=': lambda mask, target: target & retain ^ mask,
- }
- return functools.partial(op_map[op], mask)
-
-
-class Permissions(int):
- """
- >>> perms = Permissions(0o764)
- >>> oct(perms)
- '0o764'
- >>> perms.symbolic
- 'rwxrw-r--'
- >>> str(perms)
- 'rwxrw-r--'
- >>> str(Permissions(0o222))
- '-w--w--w-'
- """
-
- @property
- def symbolic(self):
- return ''.join(
- ['-', val][bit] for val, bit in zip(itertools.cycle('rwx'), self.bits)
- )
-
- @property
- def bits(self):
- return reversed(tuple(padded(gen_bit_values(self), 0, n=9)))
-
- def __str__(self):
- return self.symbolic
diff --git a/contrib/python/path/path/matchers.py b/contrib/python/path/path/matchers.py
deleted file mode 100644
index 63ca218a80..0000000000
--- a/contrib/python/path/path/matchers.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import ntpath
-import fnmatch
-
-
-def load(param):
- """
- If the supplied parameter is a string, assume it's a simple
- pattern.
- """
- return (
- Pattern(param)
- if isinstance(param, str)
- else param
- if param is not None
- else Null()
- )
-
-
-class Base:
- pass
-
-
-class Null(Base):
- def __call__(self, path):
- return True
-
-
-class Pattern(Base):
- def __init__(self, pattern):
- self.pattern = pattern
-
- def get_pattern(self, normcase):
- try:
- return self._pattern
- except AttributeError:
- pass
- self._pattern = normcase(self.pattern)
- return self._pattern
-
- def __call__(self, path):
- normcase = getattr(self, 'normcase', path.module.normcase)
- pattern = self.get_pattern(normcase)
- return fnmatch.fnmatchcase(normcase(path.name), pattern)
-
-
-class CaseInsensitive(Pattern):
- """
- A Pattern with a ``'normcase'`` property, suitable for passing to
- :meth:`listdir`, :meth:`dirs`, :meth:`files`, :meth:`walk`,
- :meth:`walkdirs`, or :meth:`walkfiles` to match case-insensitive.
-
- For example, to get all files ending in .py, .Py, .pY, or .PY in the
- current directory::
-
- from path import Path, matchers
- Path('.').files(matchers.CaseInsensitive('*.py'))
- """
-
- normcase = staticmethod(ntpath.normcase)
diff --git a/contrib/python/path/path/py.typed b/contrib/python/path/path/py.typed
deleted file mode 100644
index e69de29bb2..0000000000
--- a/contrib/python/path/path/py.typed
+++ /dev/null
diff --git a/contrib/python/path/ya.make b/contrib/python/path/ya.make
deleted file mode 100644
index aa9cb07250..0000000000
--- a/contrib/python/path/ya.make
+++ /dev/null
@@ -1,30 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(16.7.1)
-
-LICENSE(MIT)
-
-NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- path/__init__.py
- path/__init__.pyi
- path/classes.py
- path/classes.pyi
- path/masks.py
- path/masks.pyi
- path/matchers.py
- path/matchers.pyi
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/path/
- .dist-info/METADATA
- .dist-info/top_level.txt
- path/py.typed
-)
-
-END()
diff --git a/contrib/python/pygtrie/py2/LICENSE b/contrib/python/pygtrie/py2/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/contrib/python/pygtrie/py2/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/contrib/python/pygtrie/py2/README.rst b/contrib/python/pygtrie/py2/README.rst
deleted file mode 100644
index 41ca83db94..0000000000
--- a/contrib/python/pygtrie/py2/README.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-pygtrie
-=======
-
-.. image:: https://readthedocs.org/projects/pygtrie/badge/?version=latest
- :target: http://pygtrie.readthedocs.io/en/latest/
- :alt: Documentation build status (latest)
-
-.. image:: https://readthedocs.org/projects/pygtrie/badge/?version=stable
- :target: http://pygtrie.readthedocs.io/en/stable/
- :alt: Documentation build status (stable)
-
-.. image:: https://api.travis-ci.com/mina86/pygtrie.svg
- :target: https://travis-ci.com/mina86/pygtrie
- :alt: Continuous integration status
-
-pygtrie is a pure Python implementation of a trie data structure
-compatible with Python 2.x and Python 3.x.
-
-`Trie data structure <http://en.wikipedia.org/wiki/Trie>`_, also known
-as radix or prefix tree, is a tree associating keys to values where
-all the descendants of a node have a common prefix (associated with
-that node).
-
-The trie module contains ``Trie``, ``CharTrie`` and ``StringTrie``
-classes each implementing a mutable mapping interface, i.e. ``dict``
-interface. As such, in most circumstances, ``Trie`` could be used as
-a drop-in replacement for a ``dict``, but the prefix nature of the
-data structure is trie’s real strength.
-
-The module also contains ``PrefixSet`` class which uses a trie to
-store a set of prefixes such that a key is contained in the set if it
-or its prefix is stored in the set.
-
-Features
---------
-
-- A full mutable mapping implementation.
-
-- Supports iterating over as well as deleting a subtrie.
-
-- Supports prefix checking as well as shortest and longest prefix
- look-up.
-
-- Extensible for any kind of user-defined keys.
-
-- A PrefixSet supports “all keys starting with given prefix†logic.
-
-- Can store any value including None.
-
-Installation
-------------
-
-To install pygtrie, simply run::
-
- pip install pygtrie
-
-or by adding line such as::
-
- pygtrie == 2.*
-
-to project’s `requirements file
-<https://pip.pypa.io/en/latest/user_guide/#requirements-files>`_.
-Alternatively, if installation from source is desired, it can be
-achieved by executing::
-
- python setup.py install
diff --git a/contrib/python/pygtrie/py3/.dist-info/METADATA b/contrib/python/pygtrie/py3/.dist-info/METADATA
deleted file mode 100644
index bb99559336..0000000000
--- a/contrib/python/pygtrie/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,220 +0,0 @@
-Metadata-Version: 2.1
-Name: pygtrie
-Version: 2.5.0
-Summary: A pure Python trie data structure implementation.
-Home-page: https://github.com/mina86/pygtrie
-Author: Michal Nazarewicz
-Author-email: mina86@mina86.com
-License: Apache-2.0
-Download-URL: https://github.com/mina86/pygtrie/tarball/v2.5.0
-Keywords: trie,prefix tree,data structure
-Platform: Platform Independent
-Classifier: Development Status :: 5 - Production/Stable
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: Apache Software License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-License-File: LICENSE
-
-pygtrie
-=======
-
-pygtrie is a pure Python implementation of a trie data structure
-compatible with Python 2.x and Python 3.x.
-
-`Trie data structure <http://en.wikipedia.org/wiki/Trie>`_, also known
-as radix or prefix tree, is a tree associating keys to values where
-all the descendants of a node have a common prefix (associated with
-that node).
-
-The trie module contains ``Trie``, ``CharTrie`` and ``StringTrie``
-classes each implementing a mutable mapping interface, i.e. ``dict``
-interface. As such, in most circumstances, ``Trie`` could be used as
-a drop-in replacement for a ``dict``, but the prefix nature of the
-data structure is trie’s real strength.
-
-The module also contains ``PrefixSet`` class which uses a trie to
-store a set of prefixes such that a key is contained in the set if it
-or its prefix is stored in the set.
-
-Features
---------
-
-- A full mutable mapping implementation.
-
-- Supports iterating over as well as deleting a subtrie.
-
-- Supports prefix checking as well as shortest and longest prefix
- look-up.
-
-- Extensible for any kind of user-defined keys.
-
-- A PrefixSet supports “all keys starting with given prefix†logic.
-
-- Can store any value including None.
-
-Installation
-------------
-
-To install pygtrie, simply run::
-
- pip install pygtrie
-
-or by adding line such as::
-
- pygtrie == 2.*
-
-to project’s `requirements file
-<https://pip.pypa.io/en/latest/user_guide/#requirements-files>`_.
-Alternatively, if installation from source is desired, it can be
-achieved by executing::
-
- python setup.py install
-
-Version History
----------------
-
-2.5: 2022/07/16
-
-- Add ``pygtrie.Trie.merge`` method which merges structures of two
- tries.
-
-- Add ``pygtrie.Trie.strictly_equals`` method which compares two
- tries with stricter rules than regular equality operator. It’s not
- sufficient that keys and values are the same but the structure of
- the tries must be the same as well. For example:
-
- >>> t0 = StringTrie({'foo/bar.baz': 42}, separator='/')
- >>> t1 = StringTrie({'foo/bar.baz': 42}, separator='.')
- >>> t0 == t1
- True
- >>> t0.strictly_equals(t1)
- False
-
-- Fix ``pygtrie.Trie.__eq__`` implementation such that key values
- are taken into consideration rather than just looking at trie
- structure. To see what this means it’s best to look at a few
- examples. Firstly:
-
- >>> t0 = StringTrie({'foo/bar': 42}, separator='/')
- >>> t1 = StringTrie({'foo.bar': 42}, separator='.')
- >>> t0 == t1
- False
-
- This used to be true since the two tries have the same node
- structure. However, as far as Mapping interface is concerned, they
- use different keys, i.e. ```set(t0) != set(t1)``. Secondly:
-
- >>> t0 = StringTrie({'foo/bar.baz': 42}, separator='/')
- >>> t1 = StringTrie({'foo/bar.baz': 42}, separator='.')
- >>> t0 == t1
- True
-
- This used to be false since the two tries have different node
- structures (the first one splits key into ``('foo', 'bar.baz')``
- while the second into ``('foo/bar', 'baz')``). However, their keys
- are the same, i.e. ```set(t0) == set(t1)``. And lastly:
-
- >>> t0 = Trie({'foo': 42})
- >>> t1 = CharTrie({'foo': 42})
- >>> t0 == t1
- False
-
- This used to be true since the two tries have the same node
- structure. However, the two classes return key as different values.
- ``pygtrie.Trie`` returns keys as tuples while
- ``pygtrie.CharTrie`` returns them as strings.
-
-2.4.2: 2021/01/03
-
-- Remove use of ‘super’ in ``setup.py`` to fix compatibility with
- Python 2.7. This changes build code only; no changes to the library
- itself.
-
-2.4.1: 2020/11/20
-
-- Remove dependency on ``packaging`` module from ``setup.py`` to fix
- installation on systems without that package. This changes build
- code only; no changes to the library itself. [Thanks to Eric
- McLachlan for reporting]
-
-2.4.0: 2020/11/19 [pulled back from PyPi]
-
-- Change ``children`` argument of the ``node_factory`` passed to
- ``pygtrie.Trie.traverse`` from a generator to an iterator with
- a custom bool conversion. This allows checking whether node has
- children without having to iterate over them (``bool(children)``)
-
- To test whether this feature is available, one can check whether
- `Trie.traverse.uses_bool_convertible_children` property is true,
- e.g.: ``getattr(pygtrie.Trie.traverse,
- 'uses_bool_convertible_children', False)``.
-
- [Thanks to Pallab Pain for suggesting the feature]
-
-2.3.3: 2020/04/04
-
-- Fix to ‘``AttributeError``: ``_NoChildren`` object has no
- attribute ``sorted_items``’ failure when iterating over a trie with
- sorting enabled. [Thanks to Pallab Pain for reporting]
-
-- Add ``value`` property setter to step objects returned by
- ``pygtrie.Trie.walk_towards`` et al. This deprecates the
- ``set`` method.
-
-- The module now exports `pygtrie.__version__` making it possible to
- determine version of the library at run-time.
-
-2.3.2: 2019/07/18
-
-- Trivial metadata fix
-
-2.3.1: 2019/07/18 [pulled back from PyPi]
-
-- Fix to ``pygtrie.PrefixSet`` initialisation incorrectly storing
- elements even if their prefixes are also added to the set.
-
- For example, ``PrefixSet(('foo', 'foobar'))`` incorrectly resulted
- in a two-element set even though the interface dictates that only
- ``foo`` is kept (recall that if ``foo`` is member of the set,
- ``foobar`` is as well). [Thanks to Tal Maimon for reporting]
-
-- Fix to ``pygtrie.Trie.copy`` method not preserving
- enable-sorting flag and, in case of ``pygtrie.StringTrie``,
- ``separator`` property.
-
-- Add support for the ``copy`` module so ``copy.copy`` can now be
- used with trie objects.
-
-- Leafs and nodes with just one child use more memory-optimised
- representation which reduces overall memory usage of a trie
- structure.
-
-- Minor performance improvement for adding new elements to
- a ``pygtrie.PrefixSet``.
-
-- Improvements to string representation of objects which now includes
- type and, for ``pygtrie.StringTrie`` object, value of separator
- property.
-
-2.3: 2018/08/10
-
-- New ``pygtrie.Trie.walk_towards`` method allows walking a path
- towards a node with given key accessing each step of the path.
- Compared to `pygtrie.Trie.walk_prefixes` method, steps for nodes
- without assigned values are returned.
-
-- Fix to ``pygtrie.PrefixSet.copy`` not preserving type of backing
- trie.
-
-- ``pygtrie.StringTrie`` now checks and explicitly rejects empty
- separators. Previously empty separator would be accepted but lead
- to confusing errors later on. [Thanks to Waren Long]
-
-- Various documentation improvements, Python 2/3 compatibility and
- test coverage (python-coverage reports 100%).
-
diff --git a/contrib/python/pygtrie/py3/.dist-info/top_level.txt b/contrib/python/pygtrie/py3/.dist-info/top_level.txt
deleted file mode 100644
index 5b98eaa2e7..0000000000
--- a/contrib/python/pygtrie/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-pygtrie
diff --git a/contrib/python/pygtrie/py3/LICENSE b/contrib/python/pygtrie/py3/LICENSE
deleted file mode 100644
index d645695673..0000000000
--- a/contrib/python/pygtrie/py3/LICENSE
+++ /dev/null
@@ -1,202 +0,0 @@
-
- Apache License
- Version 2.0, January 2004
- http://www.apache.org/licenses/
-
- TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
-
- 1. Definitions.
-
- "License" shall mean the terms and conditions for use, reproduction,
- and distribution as defined by Sections 1 through 9 of this document.
-
- "Licensor" shall mean the copyright owner or entity authorized by
- the copyright owner that is granting the License.
-
- "Legal Entity" shall mean the union of the acting entity and all
- other entities that control, are controlled by, or are under common
- control with that entity. For the purposes of this definition,
- "control" means (i) the power, direct or indirect, to cause the
- direction or management of such entity, whether by contract or
- otherwise, or (ii) ownership of fifty percent (50%) or more of the
- outstanding shares, or (iii) beneficial ownership of such entity.
-
- "You" (or "Your") shall mean an individual or Legal Entity
- exercising permissions granted by this License.
-
- "Source" form shall mean the preferred form for making modifications,
- including but not limited to software source code, documentation
- source, and configuration files.
-
- "Object" form shall mean any form resulting from mechanical
- transformation or translation of a Source form, including but
- not limited to compiled object code, generated documentation,
- and conversions to other media types.
-
- "Work" shall mean the work of authorship, whether in Source or
- Object form, made available under the License, as indicated by a
- copyright notice that is included in or attached to the work
- (an example is provided in the Appendix below).
-
- "Derivative Works" shall mean any work, whether in Source or Object
- form, that is based on (or derived from) the Work and for which the
- editorial revisions, annotations, elaborations, or other modifications
- represent, as a whole, an original work of authorship. For the purposes
- of this License, Derivative Works shall not include works that remain
- separable from, or merely link (or bind by name) to the interfaces of,
- the Work and Derivative Works thereof.
-
- "Contribution" shall mean any work of authorship, including
- the original version of the Work and any modifications or additions
- to that Work or Derivative Works thereof, that is intentionally
- submitted to Licensor for inclusion in the Work by the copyright owner
- or by an individual or Legal Entity authorized to submit on behalf of
- the copyright owner. For the purposes of this definition, "submitted"
- means any form of electronic, verbal, or written communication sent
- to the Licensor or its representatives, including but not limited to
- communication on electronic mailing lists, source code control systems,
- and issue tracking systems that are managed by, or on behalf of, the
- Licensor for the purpose of discussing and improving the Work, but
- excluding communication that is conspicuously marked or otherwise
- designated in writing by the copyright owner as "Not a Contribution."
-
- "Contributor" shall mean Licensor and any individual or Legal Entity
- on behalf of whom a Contribution has been received by Licensor and
- subsequently incorporated within the Work.
-
- 2. Grant of Copyright License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- copyright license to reproduce, prepare Derivative Works of,
- publicly display, publicly perform, sublicense, and distribute the
- Work and such Derivative Works in Source or Object form.
-
- 3. Grant of Patent License. Subject to the terms and conditions of
- this License, each Contributor hereby grants to You a perpetual,
- worldwide, non-exclusive, no-charge, royalty-free, irrevocable
- (except as stated in this section) patent license to make, have made,
- use, offer to sell, sell, import, and otherwise transfer the Work,
- where such license applies only to those patent claims licensable
- by such Contributor that are necessarily infringed by their
- Contribution(s) alone or by combination of their Contribution(s)
- with the Work to which such Contribution(s) was submitted. If You
- institute patent litigation against any entity (including a
- cross-claim or counterclaim in a lawsuit) alleging that the Work
- or a Contribution incorporated within the Work constitutes direct
- or contributory patent infringement, then any patent licenses
- granted to You under this License for that Work shall terminate
- as of the date such litigation is filed.
-
- 4. Redistribution. You may reproduce and distribute copies of the
- Work or Derivative Works thereof in any medium, with or without
- modifications, and in Source or Object form, provided that You
- meet the following conditions:
-
- (a) You must give any other recipients of the Work or
- Derivative Works a copy of this License; and
-
- (b) You must cause any modified files to carry prominent notices
- stating that You changed the files; and
-
- (c) You must retain, in the Source form of any Derivative Works
- that You distribute, all copyright, patent, trademark, and
- attribution notices from the Source form of the Work,
- excluding those notices that do not pertain to any part of
- the Derivative Works; and
-
- (d) If the Work includes a "NOTICE" text file as part of its
- distribution, then any Derivative Works that You distribute must
- include a readable copy of the attribution notices contained
- within such NOTICE file, excluding those notices that do not
- pertain to any part of the Derivative Works, in at least one
- of the following places: within a NOTICE text file distributed
- as part of the Derivative Works; within the Source form or
- documentation, if provided along with the Derivative Works; or,
- within a display generated by the Derivative Works, if and
- wherever such third-party notices normally appear. The contents
- of the NOTICE file are for informational purposes only and
- do not modify the License. You may add Your own attribution
- notices within Derivative Works that You distribute, alongside
- or as an addendum to the NOTICE text from the Work, provided
- that such additional attribution notices cannot be construed
- as modifying the License.
-
- You may add Your own copyright statement to Your modifications and
- may provide additional or different license terms and conditions
- for use, reproduction, or distribution of Your modifications, or
- for any such Derivative Works as a whole, provided Your use,
- reproduction, and distribution of the Work otherwise complies with
- the conditions stated in this License.
-
- 5. Submission of Contributions. Unless You explicitly state otherwise,
- any Contribution intentionally submitted for inclusion in the Work
- by You to the Licensor shall be under the terms and conditions of
- this License, without any additional terms or conditions.
- Notwithstanding the above, nothing herein shall supersede or modify
- the terms of any separate license agreement you may have executed
- with Licensor regarding such Contributions.
-
- 6. Trademarks. This License does not grant permission to use the trade
- names, trademarks, service marks, or product names of the Licensor,
- except as required for reasonable and customary use in describing the
- origin of the Work and reproducing the content of the NOTICE file.
-
- 7. Disclaimer of Warranty. Unless required by applicable law or
- agreed to in writing, Licensor provides the Work (and each
- Contributor provides its Contributions) on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
- implied, including, without limitation, any warranties or conditions
- of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
- PARTICULAR PURPOSE. You are solely responsible for determining the
- appropriateness of using or redistributing the Work and assume any
- risks associated with Your exercise of permissions under this License.
-
- 8. Limitation of Liability. In no event and under no legal theory,
- whether in tort (including negligence), contract, or otherwise,
- unless required by applicable law (such as deliberate and grossly
- negligent acts) or agreed to in writing, shall any Contributor be
- liable to You for damages, including any direct, indirect, special,
- incidental, or consequential damages of any character arising as a
- result of this License or out of the use or inability to use the
- Work (including but not limited to damages for loss of goodwill,
- work stoppage, computer failure or malfunction, or any and all
- other commercial damages or losses), even if such Contributor
- has been advised of the possibility of such damages.
-
- 9. Accepting Warranty or Additional Liability. While redistributing
- the Work or Derivative Works thereof, You may choose to offer,
- and charge a fee for, acceptance of support, warranty, indemnity,
- or other liability obligations and/or rights consistent with this
- License. However, in accepting such obligations, You may act only
- on Your own behalf and on Your sole responsibility, not on behalf
- of any other Contributor, and only if You agree to indemnify,
- defend, and hold each Contributor harmless for any liability
- incurred by, or claims asserted against, such Contributor by reason
- of your accepting any such warranty or additional liability.
-
- END OF TERMS AND CONDITIONS
-
- APPENDIX: How to apply the Apache License to your work.
-
- To apply the Apache License to your work, attach the following
- boilerplate notice, with the fields enclosed by brackets "[]"
- replaced with your own identifying information. (Don't include
- the brackets!) The text should be enclosed in the appropriate
- comment syntax for the file format. We also recommend that a
- file or class name and description of purpose be included on the
- same "printed page" as the copyright notice for easier
- identification within third-party archives.
-
- Copyright [yyyy] [name of copyright owner]
-
- Licensed under the Apache License, Version 2.0 (the "License");
- you may not use this file except in compliance with the License.
- You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
- Unless required by applicable law or agreed to in writing, software
- distributed under the License is distributed on an "AS IS" BASIS,
- WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- See the License for the specific language governing permissions and
- limitations under the License.
diff --git a/contrib/python/pygtrie/py3/README.rst b/contrib/python/pygtrie/py3/README.rst
deleted file mode 100644
index 41ca83db94..0000000000
--- a/contrib/python/pygtrie/py3/README.rst
+++ /dev/null
@@ -1,66 +0,0 @@
-pygtrie
-=======
-
-.. image:: https://readthedocs.org/projects/pygtrie/badge/?version=latest
- :target: http://pygtrie.readthedocs.io/en/latest/
- :alt: Documentation build status (latest)
-
-.. image:: https://readthedocs.org/projects/pygtrie/badge/?version=stable
- :target: http://pygtrie.readthedocs.io/en/stable/
- :alt: Documentation build status (stable)
-
-.. image:: https://api.travis-ci.com/mina86/pygtrie.svg
- :target: https://travis-ci.com/mina86/pygtrie
- :alt: Continuous integration status
-
-pygtrie is a pure Python implementation of a trie data structure
-compatible with Python 2.x and Python 3.x.
-
-`Trie data structure <http://en.wikipedia.org/wiki/Trie>`_, also known
-as radix or prefix tree, is a tree associating keys to values where
-all the descendants of a node have a common prefix (associated with
-that node).
-
-The trie module contains ``Trie``, ``CharTrie`` and ``StringTrie``
-classes each implementing a mutable mapping interface, i.e. ``dict``
-interface. As such, in most circumstances, ``Trie`` could be used as
-a drop-in replacement for a ``dict``, but the prefix nature of the
-data structure is trie’s real strength.
-
-The module also contains ``PrefixSet`` class which uses a trie to
-store a set of prefixes such that a key is contained in the set if it
-or its prefix is stored in the set.
-
-Features
---------
-
-- A full mutable mapping implementation.
-
-- Supports iterating over as well as deleting a subtrie.
-
-- Supports prefix checking as well as shortest and longest prefix
- look-up.
-
-- Extensible for any kind of user-defined keys.
-
-- A PrefixSet supports “all keys starting with given prefix†logic.
-
-- Can store any value including None.
-
-Installation
-------------
-
-To install pygtrie, simply run::
-
- pip install pygtrie
-
-or by adding line such as::
-
- pygtrie == 2.*
-
-to project’s `requirements file
-<https://pip.pypa.io/en/latest/user_guide/#requirements-files>`_.
-Alternatively, if installation from source is desired, it can be
-achieved by executing::
-
- python setup.py install
diff --git a/contrib/python/pygtrie/py3/pygtrie.py b/contrib/python/pygtrie/py3/pygtrie.py
deleted file mode 100644
index 5e91ef770c..0000000000
--- a/contrib/python/pygtrie/py3/pygtrie.py
+++ /dev/null
@@ -1,1939 +0,0 @@
-# -*- coding: utf-8 -*-
-"""Pure Python implementation of a trie data structure compatible with Python
-2.x and Python 3.x.
-
-`Trie data structure <http://en.wikipedia.org/wiki/Trie>`_, also known as radix
-or prefix tree, is a tree associating keys to values where all the descendants
-of a node have a common prefix (associated with that node).
-
-The trie module contains :class:`pygtrie.Trie`, :class:`pygtrie.CharTrie` and
-:class:`pygtrie.StringTrie` classes each implementing a mutable mapping
-interface, i.e. :class:`dict` interface. As such, in most circumstances,
-:class:`pygtrie.Trie` could be used as a drop-in replacement for
-a :class:`dict`, but the prefix nature of the data structure is trie’s real
-strength.
-
-The module also contains :class:`pygtrie.PrefixSet` class which uses a trie to
-store a set of prefixes such that a key is contained in the set if it or its
-prefix is stored in the set.
-
-Features
---------
-
-- A full mutable mapping implementation.
-
-- Supports iterating over as well as deleting of a branch of a trie
- (i.e. subtrie)
-
-- Supports prefix checking as well as shortest and longest prefix
- look-up.
-
-- Extensible for any kind of user-defined keys.
-
-- A PrefixSet supports “all keys starting with given prefix†logic.
-
-- Can store any value including None.
-
-For a few simple examples see ``example.py`` file.
-"""
-
-from __future__ import absolute_import, division, print_function
-
-__author__ = 'Michal Nazarewicz <mina86@mina86.com>'
-__copyright__ = ('Copyright 2014-2017 Google LLC',
- 'Copyright 2018-2020 Michal Nazarewicz <mina86@mina86.com>')
-__version__ = '2.5.0'
-
-
-import copy as _copy
-try:
- import collections.abc as _abc
-except ImportError: # Python 2 compatibility
- import collections as _abc
-
-
-class ShortKeyError(KeyError):
- """Raised when given key is a prefix of an existing longer key
- but does not have a value associated with itself."""
-
-
-class _NoChildren(object):
- """Collection representing lack of any children.
-
- Also acts as an empty iterable and an empty iterator. This isn’t the
- cleanest designs but it makes various things more concise and avoids object
- allocations in a few places.
-
- Don’t create objects of this type directly; instead use _EMPTY singleton.
- """
- __slots__ = ()
-
- def __bool__(self):
- return False
- __nonzero__ = __bool__
- def __len__(self):
- return 0
- def __iter__(self):
- return self
- iteritems = sorted_items = __iter__
- def __next__(self):
- raise StopIteration()
- next = __next__
-
- def get(self, _step):
- return None
-
- def add(self, parent, step):
- node = _Node()
- parent.children = _OneChild(step, node)
- return node
-
- require = add
-
- def copy(self, _make_copy, _queue):
- return self
-
- def __deepcopy__(self, memo):
- return self
-
- # delete is not implemented on purpose since it should never be called on
- # a node with no children.
-
-
-_EMPTY = _NoChildren()
-
-
-class _OneChild(object):
- """Children collection representing a single child."""
-
- __slots__ = ('step', 'node')
-
- def __init__(self, step, node):
- self.step = step
- self.node = node
-
- def __bool__(self):
- return True
- __nonzero__ = __bool__
- def __len__(self):
- return 1
-
- def sorted_items(self):
- return [(self.step, self.node)]
-
- def iteritems(self):
- return iter(((self.step, self.node),))
-
- def get(self, step):
- return self.node if step == self.step else None
-
- def add(self, parent, step):
- node = _Node()
- parent.children = _Children((self.step, self.node), (step, node))
- return node
-
- def require(self, parent, step):
- return self.node if self.step == step else self.add(parent, step)
-
- def merge(self, other, queue):
- """Moves children from other into this object."""
- if type(other) == _OneChild and other.step == self.step:
- queue.append((self.node, other.node))
- return self
- else:
- children = _Children((self.step, self.node))
- children.merge(other, queue)
- return children
-
- def delete(self, parent, _step):
- parent.children = _EMPTY
-
- def copy(self, make_copy, queue):
- cpy = _OneChild(make_copy(self.step), self.node.shallow_copy(make_copy))
- queue.append((cpy.node,))
- return cpy
-
-
-class _Children(dict):
- """Children collection representing more than one child."""
-
- __slots__ = ()
-
- def __init__(self, *items):
- super(_Children, self).__init__(items)
-
- if hasattr(dict, 'iteritems'): # Python 2 compatibility
- def sorted_items(self):
- items = self.items()
- items.sort()
- return items
- else:
- def sorted_items(self):
- return sorted(self.items())
-
- def iteritems(self):
- return iter(self.items())
-
- def add(self, _parent, step):
- self[step] = node = _Node()
- return node
-
- def require(self, _parent, step):
- return self.setdefault(step, _Node())
-
- def merge(self, other, queue):
- """Moves children from other into this object."""
- for step, other_node in other.iteritems():
- node = self.setdefault(step, other_node)
- if node is not other_node:
- queue.append((node, other_node))
- return self
-
- def delete(self, parent, step):
- del self[step]
- if len(self) == 1:
- parent.children = _OneChild(*self.popitem())
-
- def copy(self, make_copy, queue):
- cpy = _Children()
- cpy.update((make_copy(step), node.shallow_copy(make_copy))
- for step, node in self.items())
- queue.append(cpy.values())
- return cpy
-
-
-class _Node(object):
- """A single node of a trie.
-
- Stores value associated with the node and dictionary of children.
- """
- __slots__ = ('children', 'value')
-
- def __init__(self):
- self.children = _EMPTY
- self.value = _EMPTY
-
- def merge(self, other, overwrite):
- """Move children from other node into this one.
-
- Args:
- other: Other node to move children and value from.
- overwrite: Whether to overwrite existing node values.
- """
- queue = [(self, other)]
- while queue:
- lhs, rhs = queue.pop()
- if lhs.value is _EMPTY or (overwrite and rhs.value is not _EMPTY):
- lhs.value = rhs.value
- if lhs.children is _EMPTY:
- lhs.children = rhs.children
- elif rhs.children is not _EMPTY:
- lhs.children = lhs.children.merge(rhs.children, queue)
- rhs.children = _EMPTY
-
- def iterate(self, path, shallow, iteritems):
- """Yields all the nodes with values associated to them in the trie.
-
- Args:
- path: Path leading to this node. Used to construct the key when
- returning value of this node and as a prefix for children.
- shallow: Perform a shallow traversal, i.e. do not yield nodes if
- their prefix has been yielded.
- iteritems: A callable taking ``node.children`` as sole argument and
- returning an iterable of children as ``(step, node)`` pair. The
- callable would typically call ``iteritems`` or ``sorted_items``
- method on the argument depending on whether sorted output is
- desired.
-
- Yields:
- ``(path, value)`` tuples.
- """
- # Use iterative function with stack on the heap so we don't hit Python's
- # recursion depth limits.
- node = self
- stack = []
- while True:
- if node.value is not _EMPTY:
- yield path, node.value
-
- if (not shallow or node.value is _EMPTY) and node.children:
- stack.append(iter(iteritems(node.children)))
- path.append(None)
-
- while True:
- try:
- step, node = next(stack[-1])
- path[-1] = step
- break
- except StopIteration:
- stack.pop()
- path.pop()
- except IndexError:
- return
-
- def traverse(self, node_factory, path_conv, path, iteritems):
- """Traverses the node and returns another type of node from factory.
-
- Args:
- node_factory: Callable to construct return value.
- path_conv: Callable to convert node path to a key.
- path: Current path for this node.
- iteritems: A callable taking ``node.children`` as sole argument and
- returning an iterable of children as ``(step, node)`` pair. The
- callable would typically call ``iteritems`` or ``sorted_items``
- method on the argument depending on whether sorted output is
- desired.
-
- Returns:
- An object constructed by calling node_factory(path_conv, path,
- children, value=...), where children are constructed by node_factory
- from the children of this node. There doesn't need to be 1:1
- correspondence between original nodes in the trie and constructed
- nodes (see make_test_node_and_compress in test.py).
- """
- children = self.children and (
- node.traverse(node_factory, path_conv, path + [step], iteritems)
- for step, node in iteritems(self.children))
-
- value_maybe = ()
- if self.value is not _EMPTY:
- value_maybe = (self.value,)
-
- return node_factory(path_conv, tuple(path), children, *value_maybe)
-
- def equals(self, other):
- """Returns whether this and other node are recursively equal."""
- # Like iterate, we don't recurse so this works on deep tries.
- a, b = self, other
- stack = []
- while True:
- if a.value != b.value or len(a.children) != len(b.children):
- return False
- if len(a.children) == 1:
- # We know a.children and b.children are both _OneChild objects
- # but pylint doesn’t recognise that: pylint: disable=no-member
- if a.children.step != b.children.step:
- return False
- a = a.children.node
- b = b.children.node
- continue
- if a.children:
- stack.append((a.children.iteritems(), b.children))
-
- while True:
- try:
- key, a = next(stack[-1][0])
- b = stack[-1][1][key]
- break
- except StopIteration:
- stack.pop()
- except IndexError:
- return True
- except KeyError:
- return False
-
- __bool__ = __nonzero__ = __hash__ = None
-
- def shallow_copy(self, make_copy):
- """Returns a copy of the node which shares the children property."""
- cpy = _Node()
- cpy.children = self.children
- cpy.value = make_copy(self.value)
- return cpy
-
- def copy(self, make_copy):
- """Returns a copy of the node structure."""
- cpy = self.shallow_copy(make_copy)
- queue = [(cpy,)]
- while queue:
- for node in queue.pop():
- node.children = node.children.copy(make_copy, queue)
- return cpy
-
- def __getstate__(self):
- """Get state used for pickling.
-
- The state is encoded as a list of simple commands which consist of an
- integer and some command-dependent number of arguments. The commands
- modify what the current node is by navigating the trie up and down and
- setting node values. Possible commands are:
-
- * [n, step0, step1, ..., stepn-1, value], for n >= 0, specifies step
- needed to reach the next current node as well as its new value. There
- is no way to create a child node without setting its (or its
- descendant's) value.
-
- * [-n], for -n < 0, specifies to go up n steps in the trie.
-
- When encoded as a state, the commands are flattened into a single list.
-
- For example::
-
- [ 0, 'Root',
- 2, 'Foo', 'Bar', 'Root/Foo/Bar Node',
- -1,
- 1, 'Baz', 'Root/Foo/Baz Node',
- -2,
- 1, 'Qux', 'Root/Qux Node' ]
-
- Creates the following hierarchy::
-
- -* value: Root
- +-- Foo --* no value
- | +-- Bar -- * value: Root/Foo/Bar Node
- | +-- Baz -- * value: Root/Foo/Baz Node
- +-- Qux -- * value: Root/Qux Node
-
- Returns:
- A pickable state which can be passed to :func:`_Node.__setstate__`
- to reconstruct the node and its full hierarchy.
- """
- # Like iterate, we don't recurse so pickling works on deep tries.
- state = [] if self.value is _EMPTY else [0]
- last_cmd = 0
- node = self
- stack = []
- while True:
- if node.value is not _EMPTY:
- last_cmd = 0
- state.append(node.value)
- stack.append(node.children.iteritems())
-
- while True:
- step, node = next(stack[-1], (None, None))
- if node is not None:
- break
-
- if last_cmd < 0:
- state[-1] -= 1
- else:
- last_cmd = -1
- state.append(-1)
- stack.pop()
- if not stack:
- state.pop() # Final -n command is not necessary
- return state
-
- if last_cmd > 0:
- last_cmd += 1
- state[-last_cmd] += 1
- else:
- last_cmd = 1
- state.append(1)
- state.append(step)
-
- def __setstate__(self, state):
- """Unpickles node. See :func:`_Node.__getstate__`."""
- self.__init__()
- state = iter(state)
- stack = [self]
- for cmd in state:
- if cmd < 0:
- del stack[cmd:]
- else:
- while cmd > 0:
- parent = stack[-1]
- stack.append(parent.children.add(parent, next(state)))
- cmd -= 1
- stack[-1].value = next(state)
-
-
-class Trie(_abc.MutableMapping):
- """A trie implementation with dict interface plus some extensions.
-
- Keys used with the :class:`pygtrie.Trie` class must be iterable which each
- component being a hashable objects. In other words, for a given key,
- ``dict.fromkeys(key)`` must be valid expression.
-
- In particular, strings work well as trie keys, however when getting them
- back (for example via :func:`Trie.iterkeys` method), instead of strings,
- tuples of characters are produced. For that reason,
- :class:`pygtrie.CharTrie` or :class:`pygtrie.StringTrie` classes may be
- preferred when using string keys.
- """
-
- def __init__(self, *args, **kwargs):
- """Initialises the trie.
-
- Arguments are interpreted the same way :func:`Trie.update` interprets
- them.
- """
- self._root = _Node()
- self._iteritems = self._ITERITEMS_CALLBACKS[0]
- self.update(*args, **kwargs)
-
- _ITERITEMS_CALLBACKS = (lambda x: x.iteritems(), lambda x: x.sorted_items())
-
- def enable_sorting(self, enable=True):
- """Enables sorting of child nodes when iterating and traversing.
-
- Normally, child nodes are not sorted when iterating or traversing over
- the trie (just like dict elements are not sorted). This method allows
- sorting to be enabled (which was the behaviour prior to pygtrie 2.0
- release).
-
- For Trie class, enabling sorting of children is identical to simply
- sorting the list of items since Trie returns keys as tuples. However,
- for other implementations such as StringTrie the two may behave subtly
- different. For example, sorting items might produce::
-
- root/foo-bar
- root/foo/baz
-
- even though foo comes before foo-bar.
-
- Args:
- enable: Whether to enable sorting of child nodes.
- """
- self._iteritems = self._ITERITEMS_CALLBACKS[bool(enable)]
-
- def __getstate__(self):
- # encode self._iteritems as self._sorted when pickling
- state = self.__dict__.copy()
- callback = state.pop('_iteritems', None)
- state['_sorted'] = callback is self._ITERITEMS_CALLBACKS[1]
- return state
-
- def __setstate__(self, state):
- # translate self._sorted back to _iteritems when unpickling
- self.__dict__ = state
- self.enable_sorting(state.pop('_sorted'))
-
- def clear(self):
- """Removes all the values from the trie."""
- self._root = _Node()
-
- def update(self, *args, **kwargs): # pylint: disable=arguments-differ
- """Updates stored values. Works like :meth:`dict.update`."""
- if len(args) > 1:
- raise ValueError('update() takes at most one positional argument, '
- '%d given.' % len(args))
- # We have this here instead of just letting MutableMapping.update()
- # handle things because it will iterate over keys and for each key
- # retrieve the value. With Trie, this may be expensive since the path
- # to the node would have to be walked twice. Instead, we have our own
- # implementation where iteritems() is used avoiding the unnecessary
- # value look-up.
- if args and isinstance(args[0], Trie):
- for key, value in args[0].items():
- self[key] = value
- args = ()
- super(Trie, self).update(*args, **kwargs)
-
- def merge(self, other, overwrite=False):
- """Moves nodes from other trie into this one.
-
- The merging happens at trie structure level and as such is different
- than iterating over items of one trie and setting them in the other
- trie.
-
- The merging may happen between different types of tries resulting in
- different (key, value) pairs in the destination trie compared to the
- source. For example, merging two :class:`pygtrie.StringTrie` objects
- each using different separators will work as if the other trie had
- separator of this trie. Similarly, a :class:`pygtrie.CharTrie` may be
- merged into a :class:`pygtrie.StringTrie` but when keys are read those
- will be joined by the separator. For example:
-
- >>> import pygtrie
- >>> st = pygtrie.StringTrie(separator='.')
- >>> st.merge(pygtrie.StringTrie({'foo/bar': 42}))
- >>> list(st.items())
- [('foo.bar', 42)]
- >>> st.merge(pygtrie.CharTrie({'baz': 24}))
- >>> sorted(st.items())
- [('b.a.z', 24), ('foo.bar', 42)]
-
- Not all tries can be merged into other tries. For example,
- a :class:`pygtrie.StringTrie` may not be merged into
- a :class:`pygtrie.CharTrie` because the latter imposes a requirement for
- each component in the key to be exactly one character while in the
- former components may be arbitrary length.
-
- Note that the other trie is cleared and any references or iterators over
- it are invalidated. To preserve other’s value it needs to be copied
- first.
-
- Args:
- other: Other trie to move nodes from.
- overwrite: Whether to overwrite existing values in this trie.
- """
- if isinstance(self, type(other)):
- self._merge_impl(self, other, overwrite=overwrite)
- else:
- other._merge_impl(self, other, overwrite=overwrite) # pylint: disable=protected-access
- other.clear()
-
- @classmethod
- def _merge_impl(cls, dst, src, overwrite):
- # pylint: disable=protected-access
- dst._root.merge(src._root, overwrite=overwrite)
-
- def copy(self, __make_copy=lambda x: x):
- """Returns a shallow copy of the object."""
- # pylint: disable=protected-access
- cpy = self.__class__()
- cpy.__dict__ = self.__dict__.copy()
- cpy._root = self._root.copy(__make_copy)
- return cpy
-
- def __copy__(self):
- return self.copy()
-
- def __deepcopy__(self, memo):
- return self.copy(lambda x: _copy.deepcopy(x, memo))
-
- @classmethod
- def fromkeys(cls, keys, value=None):
- """Creates a new trie with given keys set.
-
- This is roughly equivalent to calling the constructor with a ``(key,
- value) for key in keys`` generator.
-
- Args:
- keys: An iterable of keys that should be set in the new trie.
- value: Value to associate with given keys.
-
- Returns:
- A new trie where each key from ``keys`` has been set to the given
- value.
- """
- trie = cls()
- for key in keys:
- trie[key] = value
- return trie
-
- def _get_node(self, key):
- """Returns node for given key. Creates it if requested.
-
- Args:
- key: A key to look for.
-
- Returns:
- ``(node, trace)`` tuple where ``node`` is the node for given key and
- ``trace`` is a list specifying path to reach the node including all
- the encountered nodes. Each element of trace is a ``(step, node)``
- tuple where ``step`` is a step from parent node to given node and
- ``node`` is node on the path. The first element of the path is
- always ``(None, self._root)``.
-
- Raises:
- KeyError: If there is no node for the key.
- """
- node = self._root
- trace = [(None, node)]
- for step in self.__path_from_key(key):
- # pylint thinks node.children is always _NoChildren and thus that
- # we’re assigning None here; pylint: disable=assignment-from-none
- node = node.children.get(step)
- if node is None:
- raise KeyError(key)
- trace.append((step, node))
- return node, trace
-
- def _set_node(self, key, value, only_if_missing=False):
- """Sets value for a given key.
-
- Args:
- key: Key to set value of.
- value: Value to set to.
- only_if_missing: If true, value won't be changed if the key is
- already associated with a value.
-
- Returns:
- The node.
- """
- node = self._root
- for step in self.__path_from_key(key):
- node = node.children.require(node, step)
- if node.value is _EMPTY or not only_if_missing:
- node.value = value
- return node
-
- def _set_node_if_no_prefix(self, key):
- """Sets given key to True but only if none of its prefixes are present.
-
- If value is set, removes all ancestors of the node.
-
- This is a method for exclusive use by PrefixSet.
-
- Args:
- key: Key to set value of.
- """
- steps = iter(self.__path_from_key(key))
- node = self._root
- try:
- while node.value is _EMPTY:
- node = node.children.require(node, next(steps))
- except StopIteration:
- node.value = True
- node.children = _EMPTY
-
- def __iter__(self):
- return self.iterkeys()
-
- # pylint: disable=arguments-differ
-
- def iteritems(self, prefix=_EMPTY, shallow=False):
- """Yields all nodes with associated values with given prefix.
-
- Only nodes with values are output. For example::
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo'] = 'Foo'
- >>> t['foo/bar/baz'] = 'Baz'
- >>> t['qux'] = 'Qux'
- >>> sorted(t.items())
- [('foo', 'Foo'), ('foo/bar/baz', 'Baz'), ('qux', 'Qux')]
-
- Items are generated in topological order (i.e. parents before child
- nodes) but the order of siblings is unspecified. At an expense of
- efficiency, :func:`Trie.enable_sorting` method can turn deterministic
- ordering of siblings.
-
- With ``prefix`` argument, only items with specified prefix are generated
- (i.e. only given subtrie is traversed) as demonstrated by::
-
- >>> t.items(prefix='foo')
- [('foo', 'Foo'), ('foo/bar/baz', 'Baz')]
-
- With ``shallow`` argument, if a node has value associated with it, it's
- children are not traversed even if they exist which can be seen in::
-
- >>> sorted(t.items(shallow=True))
- [('foo', 'Foo'), ('qux', 'Qux')]
-
- Args:
- prefix: Prefix to limit iteration to.
- shallow: Perform a shallow traversal, i.e. do not yield items if
- their prefix has been yielded.
-
- Yields:
- ``(key, value)`` tuples.
-
- Raises:
- KeyError: If ``prefix`` does not match any node.
- """
- node, _ = self._get_node(prefix)
- for path, value in node.iterate(list(self.__path_from_key(prefix)),
- shallow, self._iteritems):
- yield (self._key_from_path(path), value)
-
- def iterkeys(self, prefix=_EMPTY, shallow=False):
- """Yields all keys having associated values with given prefix.
-
- This is equivalent to taking first element of tuples generated by
- :func:`Trie.iteritems` which see for more detailed documentation.
-
- Args:
- prefix: Prefix to limit iteration to.
- shallow: Perform a shallow traversal, i.e. do not yield keys if
- their prefix has been yielded.
-
- Yields:
- All the keys (with given prefix) with associated values in the trie.
-
- Raises:
- KeyError: If ``prefix`` does not match any node.
- """
- for key, _ in self.iteritems(prefix=prefix, shallow=shallow):
- yield key
-
- def itervalues(self, prefix=_EMPTY, shallow=False):
- """Yields all values associated with keys with given prefix.
-
- This is equivalent to taking second element of tuples generated by
- :func:`Trie.iteritems` which see for more detailed documentation.
-
- Args:
- prefix: Prefix to limit iteration to.
- shallow: Perform a shallow traversal, i.e. do not yield values if
- their prefix has been yielded.
-
- Yields:
- All the values associated with keys (with given prefix) in the trie.
-
- Raises:
- KeyError: If ``prefix`` does not match any node.
- """
- node, _ = self._get_node(prefix)
- for _, value in node.iterate(list(self.__path_from_key(prefix)),
- shallow, self._iteritems):
- yield value
-
- def items(self, prefix=_EMPTY, shallow=False):
- """Returns a list of ``(key, value)`` pairs in given subtrie.
-
- This is equivalent to constructing a list from generator returned by
- :func:`Trie.iteritems` which see for more detailed documentation.
- """
- return list(self.iteritems(prefix=prefix, shallow=shallow))
-
- def keys(self, prefix=_EMPTY, shallow=False):
- """Returns a list of all the keys, with given prefix, in the trie.
-
- This is equivalent to constructing a list from generator returned by
- :func:`Trie.iterkeys` which see for more detailed documentation.
- """
- return list(self.iterkeys(prefix=prefix, shallow=shallow))
-
- def values(self, prefix=_EMPTY, shallow=False):
- """Returns a list of values in given subtrie.
-
- This is equivalent to constructing a list from generator returned by
- :func:`Trie.itervalues` which see for more detailed documentation.
- """
- return list(self.itervalues(prefix=prefix, shallow=shallow))
-
- def __len__(self):
- """Returns number of values in a trie.
-
- Note that this method is expensive as it iterates over the whole trie.
- """
- return sum(1 for _ in self.itervalues())
-
- def __bool__(self):
- return self._root.value is not _EMPTY or bool(self._root.children)
-
- __nonzero__ = __bool__
- __hash__ = None
-
- HAS_VALUE = 1
- HAS_SUBTRIE = 2
-
- def has_node(self, key):
- """Returns whether given node is in the trie.
-
- Return value is a bitwise or of ``HAS_VALUE`` and ``HAS_SUBTRIE``
- constants indicating node has a value associated with it and that it is
- a prefix of another existing key respectively. Both of those are
- independent of each other and all of the four combinations are possible.
- For example::
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo/bar'] = 'Bar'
- >>> t['foo/bar/baz'] = 'Baz'
- >>> t.has_node('qux') == 0
- True
- >>> t.has_node('foo/bar/baz') == pygtrie.Trie.HAS_VALUE
- True
- >>> t.has_node('foo') == pygtrie.Trie.HAS_SUBTRIE
- True
- >>> t.has_node('foo/bar') == (pygtrie.Trie.HAS_VALUE |
- ... pygtrie.Trie.HAS_SUBTRIE)
- True
-
- There are two higher level methods built on top of this one which give
- easier interface for the information. :func:`Trie.has_key` returns
- whether node has a value associated with it and :func:`Trie.has_subtrie`
- checks whether node is a prefix. Continuing previous example::
-
- >>> t.has_key('qux'), t.has_subtrie('qux')
- (False, False)
- >>> t.has_key('foo/bar/baz'), t.has_subtrie('foo/bar/baz')
- (True, False)
- >>> t.has_key('foo'), t.has_subtrie('foo')
- (False, True)
- >>> t.has_key('foo/bar'), t.has_subtrie('foo/bar')
- (True, True)
-
- Args:
- key: A key to look for.
-
- Returns:
- Non-zero if node exists and if it does a bit-field denoting whether
- it has a value associated with it and whether it has a subtrie.
- """
- try:
- node, _ = self._get_node(key)
- except KeyError:
- return 0
- return ((self.HAS_VALUE * (node.value is not _EMPTY)) |
- (self.HAS_SUBTRIE * bool(node.children)))
-
- def has_key(self, key):
- """Indicates whether given key has value associated with it.
-
- See :func:`Trie.has_node` for more detailed documentation.
- """
- return bool(self.has_node(key) & self.HAS_VALUE)
-
- def has_subtrie(self, key):
- """Returns whether given key is a prefix of another key in the trie.
-
- See :func:`Trie.has_node` for more detailed documentation.
- """
- return bool(self.has_node(key) & self.HAS_SUBTRIE)
-
- @staticmethod
- def _slice_maybe(key_or_slice):
- """Checks whether argument is a slice or a plain key.
-
- Args:
- key_or_slice: A key or a slice to test.
-
- Returns:
- ``(key, is_slice)`` tuple. ``is_slice`` indicates whether
- ``key_or_slice`` is a slice and ``key`` is either ``key_or_slice``
- itself (if it's not a slice) or slice's start position.
-
- Raises:
- TypeError: If ``key_or_slice`` is a slice whose stop or step are not
- ``None`` In other words, only ``[key:]`` slices are valid.
- """
- if isinstance(key_or_slice, slice):
- if key_or_slice.stop is not None or key_or_slice.step is not None:
- raise TypeError(key_or_slice)
- return key_or_slice.start, True
- return key_or_slice, False
-
- def __getitem__(self, key_or_slice):
- """Returns value associated with given key or raises KeyError.
-
- When argument is a single key, value for that key is returned (or
- :class:`KeyError` exception is thrown if the node does not exist or has
- no value associated with it).
-
- When argument is a slice, it must be one with only `start` set in which
- case the access is identical to :func:`Trie.itervalues` invocation with
- prefix argument.
-
- Example:
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo/bar'] = 'Bar'
- >>> t['foo/baz'] = 'Baz'
- >>> t['qux'] = 'Qux'
- >>> t['foo/bar']
- 'Bar'
- >>> sorted(t['foo':])
- ['Bar', 'Baz']
- >>> t['foo'] # doctest: +IGNORE_EXCEPTION_DETAIL
- Traceback (most recent call last):
- ...
- ShortKeyError: 'foo'
-
- Args:
- key_or_slice: A key or a slice to look for.
-
- Returns:
- If a single key is passed, a value associated with given key. If
- a slice is passed, a generator of values in specified subtrie.
-
- Raises:
- ShortKeyError: If the key has no value associated with it but is
- a prefix of some key with a value. Note that
- :class:`ShortKeyError` is subclass of :class:`KeyError`.
- KeyError: If key has no value associated with it nor is a prefix of
- an existing key.
- TypeError: If ``key_or_slice`` is a slice but it's stop or step are
- not ``None``.
- """
- if self._slice_maybe(key_or_slice)[1]:
- return self.itervalues(key_or_slice.start)
- node, _ = self._get_node(key_or_slice)
- if node.value is _EMPTY:
- raise ShortKeyError(key_or_slice)
- return node.value
-
- def __setitem__(self, key_or_slice, value):
- """Sets value associated with given key.
-
- If `key_or_slice` is a key, simply associate it with given value. If it
- is a slice (which must have `start` set only), it in addition clears any
- subtrie that might have been attached to particular key. For example::
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo/bar'] = 'Bar'
- >>> t['foo/baz'] = 'Baz'
- >>> sorted(t.keys())
- ['foo/bar', 'foo/baz']
- >>> t['foo':] = 'Foo'
- >>> t.keys()
- ['foo']
-
- Args:
- key_or_slice: A key to look for or a slice. If it is a slice, the
- whole subtrie (if present) will be replaced by a single node
- with given value set.
- value: Value to set.
-
- Raises:
- TypeError: If key is a slice whose stop or step are not None.
- """
- key, is_slice = self._slice_maybe(key_or_slice)
- node = self._set_node(key, value)
- if is_slice:
- node.children = _EMPTY
-
- def setdefault(self, key, default=None):
- """Sets value of a given node if not set already. Also returns it.
-
- In contrast to :func:`Trie.__setitem__`, this method does not accept
- slice as a key.
- """
- return self._set_node(key, default, only_if_missing=True).value
-
- @staticmethod
- def _pop_value(trace):
- """Removes value from given node and removes any empty nodes.
-
- Args:
- trace: Trace to the node to cleanup as returned by
- :func:`Trie._get_node`. The last element of the trace denotes
- the node to get value of.
-
- Returns:
- Value which was held in the node at the end of specified trace.
- This may be _EMPTY if the node didn’t have a value in the first
- place.
- """
- i = len(trace) - 1 # len(path) >= 1 since root is always there
- step, node = trace[i]
- value, node.value = node.value, _EMPTY
- while i and node.value is _EMPTY and not node.children:
- i -= 1
- parent_step, parent = trace[i]
- parent.children.delete(parent, step)
- step, node = parent_step, parent
- return value
-
- def pop(self, key, default=_EMPTY):
- """Deletes value associated with given key and returns it.
-
- Args:
- key: A key to look for.
- default: If specified, value that will be returned if given key has
- no value associated with it. If not specified, method will
- throw KeyError in such cases.
-
- Returns:
- Removed value, if key had value associated with it, or ``default``
- (if given).
-
- Raises:
- ShortKeyError: If ``default`` has not been specified and the key has
- no value associated with it but is a prefix of some key with
- a value. Note that :class:`ShortKeyError` is subclass of
- :class:`KeyError`.
- KeyError: If default has not been specified and key has no value
- associated with it nor is a prefix of an existing key.
- """
- try:
- _, trace = self._get_node(key)
- except KeyError:
- if default is not _EMPTY:
- return default
- raise
- value = self._pop_value(trace)
- if value is not _EMPTY:
- return value
- if default is not _EMPTY:
- return default
- raise ShortKeyError()
-
- def popitem(self):
- """Deletes an arbitrary value from the trie and returns it.
-
- There is no guarantee as to which item is deleted and returned. Neither
- in respect to its lexicographical nor topological order.
-
- Returns:
- ``(key, value)`` tuple indicating deleted key.
-
- Raises:
- KeyError: If the trie is empty.
- """
- if not self:
- raise KeyError()
- node = self._root
- trace = [(None, node)]
- while node.value is _EMPTY:
- step, node = next(node.children.iteritems())
- trace.append((step, node))
- key = self._key_from_path((step for step, _ in trace[1:]))
- return key, self._pop_value(trace)
-
- def __delitem__(self, key_or_slice):
- """Deletes value associated with given key or raises KeyError.
-
- If argument is a key, value associated with it is deleted. If the key
- is also a prefix, its descendents are not affected. On the other hand,
- if the argument is a slice (in which case it must have only start set),
- the whole subtrie is removed. For example::
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo'] = 'Foo'
- >>> t['foo/bar'] = 'Bar'
- >>> t['foo/bar/baz'] = 'Baz'
- >>> del t['foo/bar']
- >>> t.keys()
- ['foo', 'foo/bar/baz']
- >>> del t['foo':]
- >>> t.keys()
- []
-
- Args:
- key_or_slice: A key to look for or a slice. If key is a slice, the
- whole subtrie will be removed.
-
- Raises:
- ShortKeyError: If the key has no value associated with it but is
- a prefix of some key with a value. This is not thrown if
- key_or_slice is a slice -- in such cases, the whole subtrie is
- removed. Note that :class:`ShortKeyError` is subclass of
- :class:`KeyError`.
- KeyError: If key has no value associated with it nor is a prefix of
- an existing key.
- TypeError: If key is a slice whose stop or step are not ``None``.
- """
- key, is_slice = self._slice_maybe(key_or_slice)
- node, trace = self._get_node(key)
- if is_slice:
- node.children = _EMPTY
- elif node.value is _EMPTY:
- raise ShortKeyError(key)
- self._pop_value(trace)
-
- class _NoneStep(object):
- """Representation of a non-existent step towards non-existent node."""
-
- __slots__ = ()
-
- def __bool__(self):
- return False
- __nonzero__ = __bool__
-
- def get(self, default=None):
- return default
-
- is_set = has_subtrie = property(__bool__)
- key = value = property(lambda self: None)
-
- def __getitem__(self, index):
- """Makes object appear like a (key, value) tuple.
-
- This is deprecated and for backwards-compatibility only. Prefer
- using ``key`` and ``value`` properties directly.
-
- Args:
- index: Element index to return. Zero for key, one for value.
-
- Returns:
- ``self.key`` if index is ``0``, ``self.value`` if it's ``1``.
- Otherwise raises an IndexError exception.
-
- Raises:
- IndexError: if index is not 0 or 1.
- KeyError: if index is 1 but node has no value assigned.
- """
- if index == 0:
- return self.key
- if index == 1:
- return self.value
- raise IndexError('index out of range')
-
- def __repr__(self):
- return '(None Step)'
-
- class _Step(_NoneStep):
- """Representation of a single step on a path towards particular node."""
-
- __slots__ = ('_trie', '_path', '_pos', '_node', '__key')
-
- def __init__(self, trie, path, pos, node):
- self._trie = trie
- self._path = path
- self._pos = pos
- self._node = node
-
- def __bool__(self):
- return True
- __nonzero__ = __bool__
-
- @property
- def is_set(self):
- """Returns whether the node has value assigned to it."""
- return self._node.value is not _EMPTY
-
- @property
- def has_subtrie(self):
- """Returns whether the node has any children."""
- return bool(self._node.children)
-
- def get(self, default=None):
- """Returns node's value or the default if value is not assigned."""
- v = self._node.value
- return default if v is _EMPTY else v
-
- def set(self, value):
- """Deprecated. Use ``step.value = value`` instead."""
- self._node.value = value
-
- def setdefault(self, value):
- """Assigns value to the node if one is not set then returns it."""
- if self._node.value is _EMPTY:
- self._node.value = value
- return self._node.value
-
- def __repr__(self):
- return '(%r: %r)' % (self.key, self.value)
-
- @property
- def key(self):
- """Returns key of the node."""
- if not hasattr(self, '_Step__key'):
- # pylint:disable=protected-access,attribute-defined-outside-init
- self.__key = self._trie._key_from_path(self._path[:self._pos])
- return self.__key
-
- @property
- def value(self):
- """Returns node's value or raises KeyError."""
- v = self._node.value
- if v is _EMPTY:
- raise ShortKeyError(self.key)
- return v
-
- @value.setter
- def value(self, value):
- self._node.value = value
-
- _NONE_STEP = _NoneStep()
-
- def walk_towards(self, key):
- """Yields nodes on the path to given node.
-
- Args:
- key: Key of the node to look for.
-
- Yields:
- :class:`pygtrie.Trie._Step` objects which can be used to extract or
- set node's value as well as get node's key.
-
- When representing nodes with assigned values, the objects can be
- treated as ``(k, value)`` pairs denoting keys with associated values
- encountered on the way towards the specified key. This is
- deprecated, prefer using ``key`` and ``value`` properties or ``get``
- method of the object.
-
- Raises:
- KeyError: If node with given key does not exist. It's all right if
- they value is not assigned to the node provided it has a child
- node. Because the method is a generator, the exception is
- raised only once a missing node is encountered.
- """
- node = self._root
- path = self.__path_from_key(key)
- pos = 0
- while True:
- yield self._Step(self, path, pos, node)
- if pos == len(path):
- break
- # pylint thinks node.children is always _NoChildren and thus that
- # we’re assigning None here; pylint: disable=assignment-from-none
- node = node.children.get(path[pos])
- if node is None:
- raise KeyError(key)
- pos += 1
-
- def prefixes(self, key):
- """Walks towards the node specified by key and yields all found items.
-
- Example:
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo'] = 'Foo'
- >>> t['foo/bar/baz'] = 'Baz'
- >>> list(t.prefixes('foo/bar/baz/qux'))
- [('foo': 'Foo'), ('foo/bar/baz': 'Baz')]
- >>> list(t.prefixes('does/not/exist'))
- []
-
- Args:
- key: Key to look for.
-
- Yields:
- :class:`pygtrie.Trie._Step` objects which can be used to extract or
- set node's value as well as get node's key.
-
- The objects can be treated as ``(k, value)`` pairs denoting keys
- with associated values encountered on the way towards the specified
- key. This is deprecated, prefer using ``key`` and ``value``
- properties of the object.
- """
- try:
- for step in self.walk_towards(key):
- if step.is_set:
- yield step
- except KeyError:
- pass
-
- def shortest_prefix(self, key):
- """Finds the shortest prefix of a key with a value.
-
- This is roughly equivalent to taking the first object yielded by
- :func:`Trie.prefixes` with additional handling for situations when no
- prefixes are found.
-
- Example:
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo'] = 'Foo'
- >>> t['foo/bar/baz'] = 'Baz'
- >>> t.shortest_prefix('foo/bar/baz/qux')
- ('foo': 'Foo')
- >>> t.shortest_prefix('foo/bar/baz/qux').key
- 'foo'
- >>> t.shortest_prefix('foo/bar/baz/qux').value
- 'Foo'
- >>> t.shortest_prefix('does/not/exist')
- (None Step)
- >>> bool(t.shortest_prefix('does/not/exist'))
- False
-
- Args:
- key: Key to look for.
-
- Returns:
- :class:`pygtrie.Trie._Step` object (which can be used to extract or
- set node's value as well as get node's key), or
- a :class:`pygtrie.Trie._NoneStep` object (which is falsy value
- simulating a _Step with ``None`` key and value) if no prefix is
- found.
-
- The object can be treated as ``(key, value)`` pair denoting key with
- associated value of the prefix. This is deprecated, prefer using
- ``key`` and ``value`` properties of the object.
- """
- return next(self.prefixes(key), self._NONE_STEP)
-
- def longest_prefix(self, key):
- """Finds the longest prefix of a key with a value.
-
- This is roughly equivalent to taking the last object yielded by
- :func:`Trie.prefixes` with additional handling for situations when no
- prefixes are found.
-
- Example:
-
- >>> import pygtrie
- >>> t = pygtrie.StringTrie()
- >>> t['foo'] = 'Foo'
- >>> t['foo/bar/baz'] = 'Baz'
- >>> t.longest_prefix('foo/bar/baz/qux')
- ('foo/bar/baz': 'Baz')
- >>> t.longest_prefix('foo/bar/baz/qux').key
- 'foo/bar/baz'
- >>> t.longest_prefix('foo/bar/baz/qux').value
- 'Baz'
- >>> t.longest_prefix('does/not/exist')
- (None Step)
- >>> bool(t.longest_prefix('does/not/exist'))
- False
-
- Args:
- key: Key to look for.
-
- Returns:
- :class:`pygtrie.Trie._Step` object (which can be used to extract or
- set node's value as well as get node's key), or
- a :class:`pygtrie.Trie._NoneStep` object (which is falsy value
- simulating a _Step with ``None`` key and value) if no prefix is
- found.
-
- The object can be treated as ``(key, value)`` pair denoting key with
- associated value of the prefix. This is deprecated, prefer using
- ``key`` and ``value`` properties of the object.
- """
- ret = self._NONE_STEP
- for ret in self.prefixes(key):
- pass
- return ret
-
- def strictly_equals(self, other):
- """Checks whether tries are equal with the same structure.
-
- This is stricter comparison than the one performed by equality operator.
- It not only requires for keys and values to be equal but also for the
- two tries to be of the same type and have the same structure.
-
- For example, for two :class:`pygtrie.StringTrie` objects to be equal,
- they need to have the same structure as well as the same separator as
- seen below:
-
- >>> import pygtrie
- >>> t0 = StringTrie({'foo/bar': 42}, separator='/')
- >>> t1 = StringTrie({'foo.bar': 42}, separator='.')
- >>> t0.strictly_equals(t1)
- False
-
- >>> t0 = StringTrie({'foo/bar.baz': 42}, separator='/')
- >>> t1 = StringTrie({'foo/bar.baz': 42}, separator='.')
- >>> t0 == t1
- True
- >>> t0.strictly_equals(t1)
- False
-
- Args:
- other: Other trie to compare to.
-
- Returns:
- Whether the two tries are the same type and have the same structure.
- """
- if self is other:
- return True
- if type(self) != type(other):
- return False
- result = self._eq_impl(other)
- if result is NotImplemented:
- return False
- else:
- return result
-
- def __eq__(self, other):
- """Compares this trie’s mapping with another mapping.
-
- Note that this method doesn’t take trie’s structure into consideration.
- What matters is whether keys and values in both mappings are the same.
- This may lead to unexpected results, for example:
-
- >>> import pygtrie
- >>> t0 = StringTrie({'foo/bar': 42}, separator='/')
- >>> t1 = StringTrie({'foo.bar': 42}, separator='.')
- >>> t0 == t1
- False
-
- >>> t0 = StringTrie({'foo/bar.baz': 42}, separator='/')
- >>> t1 = StringTrie({'foo/bar.baz': 42}, separator='.')
- >>> t0 == t1
- True
-
- >>> t0 = Trie({'foo': 42})
- >>> t1 = CharTrie({'foo': 42})
- >>> t0 == t1
- False
-
- This behaviour is required to maintain consistency with Mapping
- interface and its __eq__ method. For example, this implementation
- maintains transitivity of the comparison:
-
- >>> t0 = StringTrie({'foo/bar.baz': 42}, separator='/')
- >>> d = {'foo/bar.baz': 42}
- >>> t1 = StringTrie({'foo/bar.baz': 42}, separator='.')
- >>> t0 == d
- True
- >>> d == t1
- True
- >>> t0 == t1
- True
-
- >>> t0 = Trie({'foo': 42})
- >>> d = {'foo': 42}
- >>> t1 = CharTrie({'foo': 42})
- >>> t0 == d
- False
- >>> d == t1
- True
- >>> t0 == t1
- False
-
- Args:
- other: Other object to compare to.
-
- Returns:
- ``NotImplemented`` if this method does not know how to perform the
- comparison or a ``bool`` denoting whether the two objects are equal
- or not.
- """
- if self is other:
- return True
- if type(other) == type(self):
- result = self._eq_impl(other)
- if result is not NotImplemented:
- return result
- return super(Trie, self).__eq__(other)
-
- def _eq_impl(self, other):
- return self._root.equals(other._root) # pylint: disable=protected-access
-
- def __ne__(self, other):
- return not self == other
-
- def _str_items(self, fmt='%s: %s'):
- return ', '.join(fmt % item for item in self.iteritems())
-
- def __str__(self):
- return '%s(%s)' % (type(self).__name__, self._str_items())
-
- def __repr__(self):
- return '%s([%s])' % (type(self).__name__, self._str_items('(%r, %r)'))
-
- def __path_from_key(self, key):
- """Converts a user visible key object to internal path representation.
-
- Args:
- key: User supplied key or ``_EMPTY``.
-
- Returns:
- An empty tuple if ``key`` was ``_EMPTY``, otherwise whatever
- :func:`Trie._path_from_key` returns.
-
- Raises:
- TypeError: If ``key`` is of invalid type.
- """
- return () if key is _EMPTY else self._path_from_key(key)
-
- def _path_from_key(self, key):
- """Converts a user visible key object to internal path representation.
-
- The default implementation simply returns key.
-
- Args:
- key: User supplied key.
-
- Returns:
- A path, which is an iterable of steps. Each step must be hashable.
-
- Raises:
- TypeError: If key is of invalid type.
- """
- return key
-
- def _key_from_path(self, path):
- """Converts an internal path into a user visible key object.
-
- The default implementation creates a tuple from the path.
-
- Args:
- path: Internal path representation.
- Returns:
- A user visible key object.
- """
- return tuple(path)
-
- def traverse(self, node_factory, prefix=_EMPTY):
- """Traverses the tree using node_factory object.
-
- node_factory is a callable which accepts (path_conv, path, children,
- value=...) arguments, where path_conv is a lambda converting path
- representation to key, path is the path to this node, children is an
- iterable of children nodes constructed by node_factory, optional value
- is the value associated with the path.
-
- node_factory's children argument is an iterator which has a few
- consequences:
-
- * To traverse into node's children, the object must be iterated over.
- This can by accomplished by a simple ``children = list(children)``
- statement.
- * Ignoring the argument allows node_factory to stop the traversal from
- going into the children of the node. In other words, whole subtries
- can be removed from traversal if node_factory chooses so.
- * If children is stored as is (i.e. as a iterator) when it is iterated
- over later on it may see an inconsistent state of the trie if it has
- changed between invocation of this method and the iteration.
-
- However, to allow constant-time determination whether the node has
- children or not, the iterator implements bool conversion such that
- ``has_children = bool(children)`` will tell whether node has children
- without iterating over them. (Note that ``bool(children)`` will
- continue returning ``True`` even if the iterator has been iterated
- over).
-
- :func:`Trie.traverse` has two advantages over :func:`Trie.iteritems` and
- similar methods:
-
- 1. it allows subtries to be skipped completely when going through the
- list of nodes based on the property of the parent node; and
-
- 2. it represents structure of the trie directly making it easy to
- convert structure into a different representation.
-
- For example, the below snippet prints all files in current directory
- counting how many HTML files were found but ignores hidden files and
- directories (i.e. those whose names start with a dot)::
-
- import os
- import pygtrie
-
- t = pygtrie.StringTrie(separator=os.sep)
-
- # Construct a trie with all files in current directory and all
- # of its sub-directories. Files get set a True value.
- # Directories are represented implicitly by being prefixes of
- # files.
- for root, _, files in os.walk('.'):
- for name in files: t[os.path.join(root, name)] = True
-
- def traverse_callback(path_conv, path, children, is_file=False):
- if path and path[-1] != '.' and path[-1][0] == '.':
- # Ignore hidden directory (but accept root node and '.')
- return 0
- elif is_file:
- print path_conv(path)
- return int(path[-1].endswith('.html'))
- else:
- # Otherwise, it's a directory. Traverse into children.
- return sum(children)
-
- print t.traverse(traverse_callback)
-
- As documented, ignoring the children argument causes subtrie to be
- omitted and not walked into.
-
- In the next example, the trie is converted to a tree representation
- where child nodes include a pointer to their parent. As before, hidden
- files and directories are ignored::
-
- import os
- import pygtrie
-
- t = pygtrie.StringTrie(separator=os.sep)
- for root, _, files in os.walk('.'):
- for name in files: t[os.path.join(root, name)] = True
-
- class File(object):
- def __init__(self, name):
- self.name = name
- self.parent = None
-
- class Directory(File):
- def __init__(self, name, children):
- super(Directory, self).__init__(name)
- self._children = children
- for child in children:
- child.parent = self
-
- def traverse_callback(path_conv, path, children, is_file=False):
- if not path or path[-1] == '.' or path[-1][0] != '.':
- if is_file:
- return File(path[-1])
- children = filter(None, children)
- return Directory(path[-1] if path else '', children)
-
- root = t.traverse(traverse_callback)
-
- Note: Unlike iterators, when used on a deep trie, traverse method is
- prone to rising a RuntimeError exception when Python's maximum recursion
- depth is reached. This can be addressed by not iterating over children
- inside of the node_factory. For example, the below code converts a trie
- into an undirected graph using adjacency list representation::
-
- def undirected_graph_from_trie(t):
- '''Converts trie into a graph and returns its nodes.'''
-
- Node = collections.namedtuple('Node', 'path neighbours')
-
- class Builder(object):
- def __init__(self, path_conv, path, children, _=None):
- self.node = Node(path_conv(path), [])
- self.children = children
- self.parent = None
-
- def build(self, queue):
- for builder in self.children:
- builder.parent = self.node
- queue.append(builder)
- if self.parent:
- self.parent.neighbours.append(self.node)
- self.node.neighbours.append(self.parent)
- return self.node
-
- nodes = [t.traverse(Builder)]
- i = 0
- while i < len(nodes):
- nodes[i] = nodes[i].build(nodes)
- i += 1
- return nodes
-
- Args:
- node_factory: Makes opaque objects from the keys and values of the
- trie.
- prefix: Prefix for node to start traversal, by default starts at
- root.
-
- Returns:
- Node object constructed by node_factory corresponding to the root
- node.
- """
- node, _ = self._get_node(prefix)
- return node.traverse(node_factory, self._key_from_path,
- list(self.__path_from_key(prefix)),
- self._iteritems)
-
- traverse.uses_bool_convertible_children = True
-
-class CharTrie(Trie):
- """A variant of a :class:`pygtrie.Trie` which accepts strings as keys.
-
- The only difference between :class:`pygtrie.CharTrie` and
- :class:`pygtrie.Trie` is that when :class:`pygtrie.CharTrie` returns keys
- back to the client (for instance when :func:`Trie.keys` method is called),
- those keys are returned as strings.
-
- Common example where this class can be used is a dictionary of words in
- a natural language. For example::
-
- >>> import pygtrie
- >>> t = pygtrie.CharTrie()
- >>> t['wombat'] = True
- >>> t['woman'] = True
- >>> t['man'] = True
- >>> t['manhole'] = True
- >>> t.has_subtrie('wo')
- True
- >>> t.has_key('man')
- True
- >>> t.has_subtrie('man')
- True
- >>> t.has_subtrie('manhole')
- False
- """
-
- def _key_from_path(self, path):
- return ''.join(path)
-
-
-class StringTrie(Trie):
- """:class:`pygtrie.Trie` variant accepting strings with a separator as keys.
-
- The trie accepts strings as keys which are split into components using
- a separator specified during initialisation (forward slash, i.e. ``/``, by
- default).
-
- Common example where this class can be used is when keys are paths. For
- example, it could map from a path to a request handler::
-
- import pygtrie
-
- def handle_root(): pass
- def handle_admin(): pass
- def handle_admin_images(): pass
-
- handlers = pygtrie.StringTrie()
- handlers[''] = handle_root
- handlers['/admin'] = handle_admin
- handlers['/admin/images'] = handle_admin_images
-
- request_path = '/admin/images/foo'
-
- handler = handlers.longest_prefix(request_path)
- """
-
- def __init__(self, *args, **kwargs): # pylint: disable=differing-param-doc
- """Initialises the trie.
-
- Except for a ``separator`` named argument, all other arguments are
- interpreted the same way :func:`Trie.update` interprets them.
-
- Args:
- *args: Passed to super class initialiser.
- **kwargs: Passed to super class initialiser.
- separator: A separator to use when splitting keys into paths used by
- the trie. "/" is used if this argument is not specified. This
- named argument is not specified on the function's prototype
- because of Python's limitations.
-
- Raises:
- TypeError: If ``separator`` is not a string.
- ValueError: If ``separator`` is empty.
- """
- separator = kwargs.pop('separator', '/')
- if not isinstance(separator, getattr(__builtins__, 'basestring', str)):
- raise TypeError('separator must be a string')
- if not separator:
- raise ValueError('separator can not be empty')
- self._separator = separator
- super(StringTrie, self).__init__(*args, **kwargs)
-
- @classmethod
- def fromkeys(cls, keys, value=None, separator='/'): # pylint: disable=arguments-differ
- trie = cls(separator=separator)
- for key in keys:
- trie[key] = value
- return trie
-
- @classmethod
- def _merge_impl(cls, dst, src, overwrite):
- if not isinstance(dst, StringTrie):
- raise TypeError('%s cannot be merged into a %s' % (
- type(src).__name__, type(dst).__name__))
- super(StringTrie, cls)._merge_impl(dst, src, overwrite=overwrite)
-
- def __str__(self):
- if not self:
- return '%s(separator=%s)' % (type(self).__name__, self._separator)
- return '%s(%s, separator=%s)' % (
- type(self).__name__, self._str_items(), self._separator)
-
- def __repr__(self):
- return '%s([%s], separator=%r)' % (
- type(self).__name__, self._str_items('(%r, %r)'), self._separator)
-
- def _eq_impl(self, other):
- # If separators differ, fall back to slow generic comparison. This is
- # because we want StringTrie(foo/bar.baz: 42, separator=/) compare equal
- # to StringTrie(foo/bar.baz: 42, separator=.) even though they have
- # different trie structure.
- if self._separator != other._separator: # pylint: disable=protected-access
- return NotImplemented
- return super(StringTrie, self)._eq_impl(other)
-
- def _path_from_key(self, key):
- return key.split(self._separator)
-
- def _key_from_path(self, path):
- return self._separator.join(path)
-
-
-class PrefixSet(_abc.MutableSet):
- """A set of prefixes.
-
- :class:`pygtrie.PrefixSet` works similar to a normal set except it is said
- to contain a key if the key or it's prefix is stored in the set. For
- instance, if "foo" is added to the set, the set contains "foo" as well as
- "foobar".
-
- The set supports addition of elements but does *not* support removal of
- elements. This is because there's no obvious consistent and intuitive
- behaviour for element deletion.
- """
-
- def __init__(self, iterable=(), factory=Trie, **kwargs):
- """Initialises the prefix set.
-
- Args:
- iterable: A sequence of keys to add to the set.
- factory: A function used to create a trie used by the
- :class:`pygtrie.PrefixSet`.
- kwargs: Additional keyword arguments passed to the factory function.
- """
- super(PrefixSet, self).__init__()
- self._trie = factory(**kwargs)
- for key in iterable:
- self.add(key)
-
- def copy(self):
- """Returns a shallow copy of the object."""
- return self.__copy__()
-
- def __copy__(self):
- # pylint: disable=protected-access
- cpy = self.__class__()
- cpy.__dict__ = self.__dict__.copy()
- cpy._trie = self._trie.__copy__()
- return cpy
-
- def __deepcopy__(self, memo):
- # pylint: disable=protected-access
- cpy = self.__class__()
- cpy.__dict__ = self.__dict__.copy()
- cpy._trie = self._trie.__deepcopy__(memo)
- return cpy
-
- def clear(self):
- """Removes all keys from the set."""
- self._trie.clear()
-
- def __contains__(self, key):
- """Checks whether set contains key or its prefix."""
- return bool(self._trie.shortest_prefix(key)[1])
-
- def __iter__(self):
- """Return iterator over all prefixes in the set.
-
- See :func:`PrefixSet.iter` method for more info.
- """
- return self._trie.iterkeys()
-
- def iter(self, prefix=_EMPTY):
- """Iterates over all keys in the set optionally starting with a prefix.
-
- Since a key does not have to be explicitly added to the set to be an
- element of the set, this method does not iterate over all possible keys
- that the set contains, but only over the shortest set of prefixes of all
- the keys the set contains.
-
- For example, if "foo" has been added to the set, the set contains also
- "foobar", but this method will *not* iterate over "foobar".
-
- If ``prefix`` argument is given, method will iterate over keys with
- given prefix only. The keys yielded from the function if prefix is
- given does not have to be a subset (in mathematical sense) of the keys
- yielded when there is not prefix. This happens, if the set contains
- a prefix of the given prefix.
-
- For example, if only "foo" has been added to the set, iter method called
- with no arguments will yield "foo" only. However, when called with
- "foobar" argument, it will yield "foobar" only.
- """
- if prefix is _EMPTY:
- return iter(self)
- if self._trie.has_node(prefix):
- return self._trie.iterkeys(prefix=prefix)
- if prefix in self:
- # Make sure the type of returned keys is consistent.
- # pylint: disable=protected-access
- return (
- self._trie._key_from_path(self._trie._path_from_key(prefix)),)
- return ()
-
- def __len__(self):
- """Returns number of keys stored in the set.
-
- Since a key does not have to be explicitly added to the set to be an
- element of the set, this method does not count over all possible keys
- that the set contains (since that would be infinity), but only over the
- shortest set of prefixes of all the keys the set contains.
-
- For example, if "foo" has been added to the set, the set contains also
- "foobar", but this method will *not* count "foobar".
-
- """
- return len(self._trie)
-
- def add(self, value):
- """Adds given value to the set.
-
- If the set already contains prefix of the value being added, this
- operation has no effect. If the value being added is a prefix of some
- existing values in the set, those values are deleted and replaced by
- a single entry for the value being added.
-
- For example, if the set contains value "foo" adding a value "foobar"
- does not change anything. On the other hand, if the set contains values
- "foobar" and "foobaz", adding a value "foo" will replace those two
- values with a single value "foo".
-
- This makes a difference when iterating over the values or counting
- number of values. Counter intuitively, adding of a value can *decrease*
- size of the set.
-
- Args:
- value: Value to add.
- """
- # We're friends with Trie; pylint: disable=protected-access
- self._trie._set_node_if_no_prefix(value)
-
- def discard(self, value):
- """Raises NotImplementedError."""
- raise NotImplementedError(
- 'Removing values from PrefixSet is not implemented.')
-
- def remove(self, value):
- """Raises NotImplementedError."""
- raise NotImplementedError(
- 'Removing values from PrefixSet is not implemented.')
-
- def pop(self):
- """Raises NotImplementedError."""
- raise NotImplementedError(
- 'Removing values from PrefixSet is not implemented.')
diff --git a/contrib/python/pygtrie/py3/ya.make b/contrib/python/pygtrie/py3/ya.make
deleted file mode 100644
index ecb0cffd62..0000000000
--- a/contrib/python/pygtrie/py3/ya.make
+++ /dev/null
@@ -1,26 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(2.5.0)
-
-LICENSE(Apache-2.0)
-
-NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- pygtrie.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/pygtrie/py3/
- .dist-info/METADATA
- .dist-info/top_level.txt
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- tests
-)
diff --git a/contrib/python/pygtrie/ya.make b/contrib/python/pygtrie/ya.make
deleted file mode 100644
index 03fb697fac..0000000000
--- a/contrib/python/pygtrie/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/pygtrie/py2)
-ELSE()
- PEERDIR(contrib/python/pygtrie/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- py2
- py3
-)
diff --git a/contrib/python/pyre2/py2/AUTHORS b/contrib/python/pyre2/py2/AUTHORS
deleted file mode 100644
index 0f1a37f2b4..0000000000
--- a/contrib/python/pyre2/py2/AUTHORS
+++ /dev/null
@@ -1,12 +0,0 @@
-All contributors own the copyright to their own contributions, but agree
-to release each of their contributions under the BSD license included
-in this software.
-
-Michael Axiak <mike@axiak.net>
-
-Contributors
-============
-
-Alec Berryman <alec@thened.net>
-Israel Tsadok <itsadok@gmail.com>
-Alex Willmer <alex@moreati.org.uk>
diff --git a/contrib/python/pyre2/py2/LICENSE b/contrib/python/pyre2/py2/LICENSE
deleted file mode 100644
index 803fbbcd9f..0000000000
--- a/contrib/python/pyre2/py2/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) 2010, Michael Axiak <mike@axiak.net>
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/pyre2/py2/README.rst b/contrib/python/pyre2/py2/README.rst
deleted file mode 100644
index 3f46ff6eaf..0000000000
--- a/contrib/python/pyre2/py2/README.rst
+++ /dev/null
@@ -1,250 +0,0 @@
-===============================================================
- pyre2: Python RE2 wrapper for linear-time regular expressions
-===============================================================
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Build/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Build
- :alt: Build CI Status
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Release/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Release
- :alt: Release CI Status
-
-.. image:: https://img.shields.io/github/v/tag/andreasvc/pyre2?color=green&include_prereleases&label=latest%20release
- :target: https://github.com/andreasvc/pyre2/releases
- :alt: GitHub tag (latest SemVer, including pre-release)
-
-.. image:: https://badge.fury.io/py/pyre2.svg
- :target: https://badge.fury.io/py/pyre2
- :alt: Pypi version
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Conda/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Conda
- :alt: Conda CI Status
-
-.. image:: https://img.shields.io/github/license/andreasvc/pyre2
- :target: https://github.com/andreasvc/pyre2/blob/master/LICENSE
- :alt: License
-
-.. image:: https://img.shields.io/badge/python-3.6+-blue.svg
- :target: https://www.python.org/downloads/
- :alt: Python version
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/version.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: version
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/platforms.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: platforms
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/downloads.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: downloads
-
-
-.. contents:: Table of Contents
- :depth: 2
- :backlinks: top
-
-
-Summary
-=======
-
-pyre2 is a Python extension that wraps
-`Google's RE2 regular expression library <https://github.com/google/re2>`_.
-The RE2 engine compiles (strictly) regular expressions to
-deterministic finite automata, which guarantees linear-time behavior.
-
-Intended as a drop-in replacement for ``re``. Unicode is supported by encoding
-to UTF-8, and bytes strings are treated as UTF-8 when the UNICODE flag is given.
-For best performance, work with UTF-8 encoded bytes strings.
-
-Installation
-============
-
-Normal usage for Linux/Mac/Windows::
-
- $ pip install pyre2
-
-Compiling from source
----------------------
-
-Requirements for building the C++ extension from the repo source:
-
-* A build environment with ``gcc`` or ``clang`` (e.g. ``sudo apt-get install build-essential``)
-* Build tools and libraries: RE2, pybind11, and cmake installed in the build
- environment.
-
- + On Ubuntu/Debian: ``sudo apt-get install build-essential cmake ninja-build python3-dev cython3 pybind11-dev libre2-dev``
- + On Gentoo, install dev-util/cmake, dev-python/pybind11, and dev-libs/re2
- + For a venv you can install the pybind11, cmake, and cython packages from PyPI
-
-On MacOS, use the ``brew`` package manager::
-
- $ brew install -s re2 pybind11
-
-On Windows use the ``vcpkg`` package manager::
-
- $ vcpkg install re2:x64-windows pybind11:x64-windows
-
-You can pass some cmake environment variables to alter the build type or
-pass a toolchain file (the latter is required on Windows) or specify the
-cmake generator. For example::
-
- $ CMAKE_GENERATOR="Unix Makefiles" CMAKE_TOOLCHAIN_FILE=clang_toolchain.cmake tox -e deploy
-
-For development, get the source::
-
- $ git clone git://github.com/andreasvc/pyre2.git
- $ cd pyre2
- $ make install
-
-
-Platform-agnostic building with conda
--------------------------------------
-
-An alternative to the above is provided via the `conda`_ recipe (use the
-`miniconda installer`_ if you don't have ``conda`` installed already).
-
-
-.. _conda: https://anaconda.org/conda-forge/pyre2
-.. _miniconda installer: https://docs.conda.io/en/latest/miniconda.html
-
-
-Backwards Compatibility
-=======================
-
-The stated goal of this module is to be a drop-in replacement for ``re``, i.e.::
-
- try:
- import re2 as re
- except ImportError:
- import re
-
-That being said, there are features of the ``re`` module that this module may
-never have; these will be handled through fallback to the original ``re`` module:
-
-* lookahead assertions ``(?!...)``
-* backreferences (``\\n`` in search pattern)
-* \W and \S not supported inside character classes
-
-On the other hand, unicode character classes are supported (e.g., ``\p{Greek}``).
-Syntax reference: https://github.com/google/re2/wiki/Syntax
-
-However, there are times when you may want to be notified of a failover. The
-function ``set_fallback_notification`` determines the behavior in these cases::
-
- try:
- import re2 as re
- except ImportError:
- import re
- else:
- re.set_fallback_notification(re.FALLBACK_WARNING)
-
-``set_fallback_notification`` takes three values:
-``re.FALLBACK_QUIETLY`` (default), ``re.FALLBACK_WARNING`` (raise a warning),
-and ``re.FALLBACK_EXCEPTION`` (raise an exception).
-
-Documentation
-=============
-
-Consult the docstrings in the source code or interactively
-through ipython or ``pydoc re2`` etc.
-
-Unicode Support
-===============
-
-Python ``bytes`` and ``unicode`` strings are fully supported, but note that
-``RE2`` works with UTF-8 encoded strings under the hood, which means that
-``unicode`` strings need to be encoded and decoded back and forth.
-There are two important factors:
-
-* whether a ``unicode`` pattern and search string is used (will be encoded to UTF-8 internally)
-* the ``UNICODE`` flag: whether operators such as ``\w`` recognize Unicode characters.
-
-To avoid the overhead of encoding and decoding to UTF-8, it is possible to pass
-UTF-8 encoded bytes strings directly but still treat them as ``unicode``::
-
- In [18]: re2.findall(u'\w'.encode('utf8'), u'Mötley Crüe'.encode('utf8'), flags=re2.UNICODE)
- Out[18]: ['M', '\xc3\xb6', 't', 'l', 'e', 'y', 'C', 'r', '\xc3\xbc', 'e']
- In [19]: re2.findall(u'\w'.encode('utf8'), u'Mötley Crüe'.encode('utf8'))
- Out[19]: ['M', 't', 'l', 'e', 'y', 'C', 'r', 'e']
-
-However, note that the indices in ``Match`` objects will refer to the bytes string.
-The indices of the match in the ``unicode`` string could be computed by
-decoding/encoding, but this is done automatically and more efficiently if you
-pass the ``unicode`` string::
-
- >>> re2.search(u'ü'.encode('utf8'), u'Mötley Crüe'.encode('utf8'), flags=re2.UNICODE)
- <re2.Match object; span=(10, 12), match='\xc3\xbc'>
- >>> re2.search(u'ü', u'Mötley Crüe', flags=re2.UNICODE)
- <re2.Match object; span=(9, 10), match=u'\xfc'>
-
-Finally, if you want to match bytes without regard for Unicode characters,
-pass bytes strings and leave out the ``UNICODE`` flag (this will cause Latin 1
-encoding to be used with ``RE2`` under the hood)::
-
- >>> re2.findall(br'.', b'\x80\x81\x82')
- ['\x80', '\x81', '\x82']
-
-Performance
-===========
-
-Performance is of course the point of this module, so it better perform well.
-Regular expressions vary widely in complexity, and the salient feature of ``RE2`` is
-that it behaves well asymptotically. This being said, for very simple substitutions,
-I've found that occasionally python's regular ``re`` module is actually slightly faster.
-However, when the ``re`` module gets slow, it gets *really* slow, while this module
-buzzes along.
-
-In the below example, I'm running the data against 8MB of text from the colossal Wikipedia
-XML file. I'm running them multiple times, being careful to use the ``timeit`` module.
-To see more details, please see the `performance script <http://github.com/andreasvc/pyre2/tree/master/tests/performance.py>`_.
-
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Test |Description |# total runs|``re`` time(s)|``re2`` time(s)|% ``re`` time|``regex`` time(s)|% ``regex`` time|
-+=================+===========================================================================+============+==============+===============+=============+=================+================+
-|Findall URI|Email|Find list of '([a-zA-Z][a-zA-Z0-9]*)://([^ /]+)(/[^ ]*)?|([^ @]+)@([^ @]+)'|2 |6.262 |0.131 |2.08% |5.119 |2.55% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Replace WikiLinks|This test replaces links of the form [[Obama|Barack_Obama]] to Obama. |100 |4.374 |0.815 |18.63% |1.176 |69.33% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Remove WikiLinks |This test splits the data by the <page> tag. |100 |4.153 |0.225 |5.43% |0.537 |42.01% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-
-Feel free to add more speed tests to the bottom of the script and send a pull request my way!
-
-Current Status
-==============
-
-The tests show the following differences with Python's ``re`` module:
-
-* The ``$`` operator in Python's ``re`` matches twice if the string ends
- with ``\n``. This can be simulated using ``\n?$``, except when doing
- substitutions.
-* The ``pyre2`` module and Python's ``re`` may behave differently with nested groups.
- See ``tests/test_emptygroups.txt`` for the examples.
-
-Please report any further issues with ``pyre2``.
-
-Tests
-=====
-
-If you would like to help, one thing that would be very useful
-is writing comprehensive tests for this. It's actually really easy:
-
-* Come up with regular expression problems using the regular python 're' module.
-* Write a session in python traceback format `Example <http://github.com/andreasvc/pyre2/blob/master/tests/test_search.txt>`_.
-* Replace your ``import re`` with ``import re2 as re``.
-* Save it with as ``test_<name>.txt`` in the tests directory. You can comment on it however you like and indent the code with 4 spaces.
-
-
-Credits
-=======
-This code builds on the following projects (in chronological order):
-
-- Google's RE2 regular expression library: https://github.com/google/re2
-- Facebook's pyre2 github repository: http://github.com/facebook/pyre2/
-- Mike Axiak's Cython version of this: http://github.com/axiak/pyre2/ (seems not actively maintained)
-- This fork adds Python 3 support and other improvements.
-
diff --git a/contrib/python/pyre2/py2/tests/test_charliterals.txt b/contrib/python/pyre2/py2/tests/test_charliterals.txt
deleted file mode 100644
index 2eaea128a3..0000000000
--- a/contrib/python/pyre2/py2/tests/test_charliterals.txt
+++ /dev/null
@@ -1,47 +0,0 @@
- >>> import re2 as re
- >>> import warnings
- >>> warnings.filterwarnings('ignore', category=DeprecationWarning)
-
-character literals:
-
- >>> i = 126
- >>> re.compile(r"\%03o" % i)
- re2.compile('\\176')
- >>> re.compile(r"\%03o" % i)._dump_pattern()
- '\\176'
- >>> re.match(r"\%03o" % i, chr(i)) is None
- False
- >>> re.match(r"\%03o0" % i, chr(i) + "0") is None
- False
- >>> re.match(r"\%03o8" % i, chr(i) + "8") is None
- False
- >>> re.match(r"\x%02x" % i, chr(i)) is None
- False
- >>> re.match(r"\x%02x0" % i, chr(i) + "0") is None
- False
- >>> re.match(r"\x%02xz" % i, chr(i) + "z") is None
- False
- >>> re.match("\911", "") # doctest: +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
- Traceback (most recent call last):
- ...
- re.error: invalid escape sequence: \9
-
-character class literals:
-
- >>> re.match(r"[\%03o]" % i, chr(i)) is None
- False
- >>> re.match(r"[\%03o0]" % i, chr(i) + "0") is None
- False
- >>> re.match(r"[\%03o8]" % i, chr(i) + "8") is None
- False
- >>> re.match(r"[\x%02x]" % i, chr(i)) is None
- False
- >>> re.match(r"[\x%02x0]" % i, chr(i) + "0") is None
- False
- >>> re.match(r"[\x%02xz]" % i, chr(i) + "z") is None
- False
- >>> re.match("[\911]", "") # doctest: +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
- Traceback (most recent call last):
- ...
- re.error: invalid escape sequence: \9
-
diff --git a/contrib/python/pyre2/py2/tests/test_count.txt b/contrib/python/pyre2/py2/tests/test_count.txt
deleted file mode 100644
index ce3525adc5..0000000000
--- a/contrib/python/pyre2/py2/tests/test_count.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-count tests
-===========
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-This one is from http://docs.python.org/library/re.html?#finding-all-adverbs:
-
- >>> re2.count(r"\w+ly", "He was carefully disguised but captured quickly by police.")
- 2
-
-Groups should not affect count():
-
- >>> re2.count(r"(\w+)=(\d+)", "foo=1,foo=2")
- 2
- >>> re2.count(r"(\w)\w", "fx")
- 1
-
-Zero matches:
-
- >>> re2.count("(f)", "gggg")
- 0
-
-A pattern matching an empty string:
-
- >>> re2.count(".*", "foo")
- 2
-
- >>> re2.count("", "foo")
- 4
-
-contains tests
-==============
-
- >>> re2.contains('a', 'bbabb')
- True
- >>> re2.contains('a', 'bbbbb')
- False
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_emptygroups.txt b/contrib/python/pyre2/py2/tests/test_emptygroups.txt
deleted file mode 100644
index 424c8ba25e..0000000000
--- a/contrib/python/pyre2/py2/tests/test_emptygroups.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Empty/unused groups
-===================
-
- >>> import re
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-Unused vs. empty group:
-
- >>> re.search( '(foo)?((.*).)(bar)?', 'a').groups()
- (None, 'a', '', None)
- >>> re2.search('(foo)?((.*).)(bar)?', 'a').groups()
- (None, 'a', '', None)
-
- >>> re.search(r'((.*)?.)', 'a').groups()
- ('a', '')
- >>> re2.search(r'((.*)?.)', 'a').groups()
- ('a', '')
- >>> re.search(r'((.*)+.)', 'a').groups()
- ('a', '')
- >>> re2.search(r'((.*)+.)', 'a').groups()
- ('a', '')
-
-The following show different behavior for re and re2:
-
- >>> re.search(r'((.*)*.)', 'a').groups()
- ('a', '')
- >>> re2.search(r'((.*)*.)', 'a').groups()
- ('a', None)
-
- >>> re.search(r'((.*)*.)', 'Hello').groups()
- ('Hello', '')
- >>> re2.search(r'((.*)*.)', 'Hello').groups()
- ('Hello', 'Hell')
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_findall.txt b/contrib/python/pyre2/py2/tests/test_findall.txt
deleted file mode 100644
index c753b936df..0000000000
--- a/contrib/python/pyre2/py2/tests/test_findall.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-findall tests
-=============
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-This one is from http://docs.python.org/library/re.html?#finding-all-adverbs:
-
- >>> re2.findall(r"\w+ly", "He was carefully disguised but captured quickly by police.")
- ['carefully', 'quickly']
-
-This one makes sure all groups are found:
-
- >>> re2.findall(r"(\w+)=(\d+)", "foo=1,foo=2")
- [('foo', '1'), ('foo', '2')]
-
-When there's only one matched group, it should not be returned in a tuple:
-
- >>> re2.findall(r"(\w)\w", "fx")
- ['f']
-
-Zero matches is an empty list:
-
- >>> re2.findall("(f)", "gggg")
- []
-
-If pattern matches an empty string, do it only once at the end:
-
- >>> re2.findall(".*", "foo")
- ['foo', '']
-
- >>> re2.findall("", "foo")
- ['', '', '', '']
-
-
- >>> import re
- >>> re.findall(r'\b', 'The quick brown fox jumped over the lazy dog')
- ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
- >>> re2.findall(r'\b', 'The quick brown fox jumped over the lazy dog')
- ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_finditer.txt b/contrib/python/pyre2/py2/tests/test_finditer.txt
deleted file mode 100644
index 3d60d199c7..0000000000
--- a/contrib/python/pyre2/py2/tests/test_finditer.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Simple tests for the ``finditer`` function.
-===========================================
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
- >>> with open('tests/cnn_homepage.dat') as tmp:
- ... data = tmp.read()
- >>> len(list(re2.finditer(r'\w+', data)))
- 14230
-
- >>> [m.group(1) for m in re2.finditer(r'\n#hdr-editions(.*?)\n', data)]
- [' a { text-decoration:none; }', ' li { padding:0 10px; }', ' ul li.no-pad-left span { font-size:12px; }']
-
- >>> [m.group(1) for m in re2.finditer(r'^#hdr-editions(.*?)$',
- ... data, re2.M)]
- [' a { text-decoration:none; }', ' li { padding:0 10px; }', ' ul li.no-pad-left span { font-size:12px; }']
-
- >>> for a in re2.finditer(r'\b', 'foo bar zed'): print(a)
- <re2.Match object; span=(0, 0), match=''>
- <re2.Match object; span=(3, 3), match=''>
- <re2.Match object; span=(4, 4), match=''>
- <re2.Match object; span=(7, 7), match=''>
- <re2.Match object; span=(8, 8), match=''>
- <re2.Match object; span=(11, 11), match=''>
-
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_match_expand.txt b/contrib/python/pyre2/py2/tests/test_match_expand.txt
deleted file mode 100644
index b3d5652c76..0000000000
--- a/contrib/python/pyre2/py2/tests/test_match_expand.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Match Expand Tests
-==================
-
-Match objects have an .expand() method which allows them to
-expand templates as if the .sub() method was called on the pattern.
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> m = re2.match("(\\w+) (\\w+)\\W+(?P<title>\\w+)", "Isaac Newton, physicist")
- >>> m.expand("\\2, \\1")
- 'Newton, Isaac'
- >>> m.expand("\\1 \\g<title>")
- 'Isaac physicist'
- >>> m.expand("\\2, \\1 \\2")
- 'Newton, Isaac Newton'
- >>> m.expand("\\3")
- 'physicist'
- >>> m.expand("\\1 \\g<foo>") # doctest: +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
- Traceback (most recent call last):
- ...
- IndexError: no such group 'foo'; available groups: ['title']
- >>> m.expand("\\0")
- '\x00'
- >>> m.expand("\01")
- '\x01'
- >>> m.expand('\t\n\x0b\r\x0c\x07\x08\\B\\Z\x07\\A\\w\\W\\s\\S\\d\\D')
- '\t\n\x0b\r\x0c\x07\x08\\B\\Z\x07\\A\\w\\W\\s\\S\\d\\D'
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_mmap.txt b/contrib/python/pyre2/py2/tests/test_mmap.txt
deleted file mode 100644
index 12ffa97498..0000000000
--- a/contrib/python/pyre2/py2/tests/test_mmap.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-
-Testing re2 on buffer object
-============================
-
- >>> import re2
- >>> import mmap
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
- >>> tmp = open("tests/cnn_homepage.dat", "rb+")
- >>> data = mmap.mmap(tmp.fileno(), 0)
-
- >>> len(list(re2.finditer(b'\\w+', data)))
- 14230
-
- >>> data.close()
- >>> tmp.close()
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_namedgroups.txt b/contrib/python/pyre2/py2/tests/test_namedgroups.txt
deleted file mode 100644
index 70f561a39f..0000000000
--- a/contrib/python/pyre2/py2/tests/test_namedgroups.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Testing some aspects of named groups
-=================================================
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
- >>> m = re2.match(r"(?P<first_name>\w+) (?P<last_name>\w+)", "Malcolm Reynolds")
- >>> m.start("first_name")
- 0
- >>> m.start("last_name")
- 8
-
- >>> m.span("last_name")
- (8, 16)
- >>> m.regs
- ((0, 16), (0, 7), (8, 16))
-
- >>> m = re2.match(u"(?P<first_name>\\w+) (?P<last_name>\\w+)", u"Malcolm Reynolds")
- >>> m.start(u"first_name")
- 0
- >>> m.start(u"last_name")
- 8
-
- >>> m.span(u"last_name")
- (8, 16)
- >>> m.regs
- ((0, 16), (0, 7), (8, 16))
-
-Compare patterns with and without unicode
-
- >>> pattern = re2.compile(br"(?P<first_name>\w+) (?P<last_name>\w+)")
- >>> print(pattern._dump_pattern().decode('utf8'))
- (?P<first_name>\w+) (?P<last_name>\w+)
- >>> pattern = re2.compile(u"(?P<first_name>\\w+) (?P<last_name>\\w+)",
- ... re2.UNICODE)
- >>> print(pattern._dump_pattern())
- (?P<first_name>[_\p{L}\p{Nd}]+) (?P<last_name>[_\p{L}\p{Nd}]+)
-
-Make sure positions are converted properly for unicode
-
- >>> m = pattern.match(
- ... u'\u05d9\u05e9\u05e8\u05d0\u05dc \u05e6\u05d3\u05d5\u05e7')
- >>> m.start(u"first_name")
- 0
- >>> m.start(u"last_name")
- 6
- >>> m.end(u"last_name")
- 10
- >>> m.regs
- ((0, 10), (0, 5), (6, 10))
- >>> m.span(2)
- (6, 10)
- >>> m.span(u"last_name")
- (6, 10)
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_pattern.txt b/contrib/python/pyre2/py2/tests/test_pattern.txt
deleted file mode 100644
index aab47359a2..0000000000
--- a/contrib/python/pyre2/py2/tests/test_pattern.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-pattern tests
-=============
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-We should be able to get back what we put in.
-
- >>> re2.compile("(foo|b[a]r?)").pattern
- '(foo|b[a]r?)'
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_search.txt b/contrib/python/pyre2/py2/tests/test_search.txt
deleted file mode 100644
index 9c1e18f08c..0000000000
--- a/contrib/python/pyre2/py2/tests/test_search.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-These are simple tests of the ``search`` function
-=================================================
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> import warnings
- >>> warnings.filterwarnings('ignore', category=DeprecationWarning)
-
- >>> re2.search("((?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])", "hello 28.224.2.1 test").group()
- '28.224.2.1'
-
- >>> re2.search("(\d{3})\D?(\d{3})\D?(\d{4})", "800-555-1212").groups()
- ('800', '555', '1212')
-
- >>> input = 'a' * 999
- >>> len(re2.search('(?:a{1000})?a{999}', input).group())
- 999
-
- >>> with open('tests/cnn_homepage.dat') as tmp:
- ... data = tmp.read()
- >>> re2.search(r'\n#hdr-editions(.*?)\n', data).groups()
- (' a { text-decoration:none; }',)
-
-Verify some sanity checks
-
- >>> re2.compile(r'x').search('x', 2000)
- >>> re2.compile(r'x').search('x', 1, -300)
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_split.txt b/contrib/python/pyre2/py2/tests/test_split.txt
deleted file mode 100644
index a3e44bc605..0000000000
--- a/contrib/python/pyre2/py2/tests/test_split.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Split tests
-===========
-
-This one tests to make sure that unicode / utf8 data is parsed correctly.
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> a = u'\u6211\u5f88\u597d, \u4f60\u5462?'
-
- >>> re2.split(u' ', a) == [u'\u6211\u5f88\u597d,', u'\u4f60\u5462?']
- True
- >>> re2.split(b' ', a.encode('utf8')) == [
- ... b'\xe6\x88\x91\xe5\xbe\x88\xe5\xa5\xbd,',
- ... b'\xe4\xbd\xa0\xe5\x91\xa2?']
- True
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py2/tests/test_sub.txt b/contrib/python/pyre2/py2/tests/test_sub.txt
deleted file mode 100644
index b41dd30d28..0000000000
--- a/contrib/python/pyre2/py2/tests/test_sub.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Tests of substitution
-=====================
-
-This first test is just looking to replace things between parentheses
-with an empty string.
-
-
- >>> import hashlib
- >>> import gzip
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> import warnings
- >>> warnings.filterwarnings('ignore', category=DeprecationWarning)
-
- >>> with gzip.open('tests/wikipages.xml.gz', 'rb') as tmp:
- ... data = tmp.read()
- >>> print(hashlib.md5(re2.sub(b'\(.*?\)', b'', data)).hexdigest())
- b7a469f55ab76cd5887c81dbb0cfe6d3
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
-
-Issue #26 re2.sub replacements with a match of "(.*)" hangs forever
-
- >>> re2.sub('(.*)', r'\1;replacement', 'original')
- 'original;replacement;replacement'
-
- >>> re2.sub('(.*)', lambda x: x.group() + ';replacement', 'original')
- 'original;replacement;replacement'
-
- >>> re2.subn("b*", lambda x: "X", "xyz", 4)
- ('XxXyXzX', 4)
diff --git a/contrib/python/pyre2/py2/tests/test_unicode.txt b/contrib/python/pyre2/py2/tests/test_unicode.txt
deleted file mode 100644
index 71d497b80d..0000000000
--- a/contrib/python/pyre2/py2/tests/test_unicode.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Here are some tests to make sure that utf-8 works
-=================================================
-
- >>> import sys
- >>> import re2 as re
- >>> re.set_fallback_notification(re.FALLBACK_EXCEPTION)
- >>> a = u'\u6211\u5f88\u597d'
- >>> c = re.compile(a[0])
- >>> c.search(a).group() == u'\u6211'
- True
-
-Test unicode stickyness
-
- >>> re.sub(u'x', u'y', u'x') == u'y'
- True
- >>> re.sub(r'x', 'y', 'x') == 'y'
- True
- >>> re.findall('.', 'x') == ['x']
- True
- >>> re.findall(u'.', u'x') == [u'x']
- True
- >>> re.split(',', '1,2,3') == ['1', '2', '3']
- True
- >>> re.split(u',', u'1,2,3') == [u'1', u'2', u'3']
- True
- >>> re.search('(\\d)', '1').group(1) == '1'
- True
- >>> re.search(u'(\\d)', u'1').group(1) == u'1'
- True
-
-Test unicode character groups
-
- >>> re.search(u'\\d', u'\u0661', re.UNICODE).group(0) == u'\u0661'
- True
- >>> int(re.search(u'\\d', u'\u0661', re.UNICODE).group(0)) == 1
- True
- >>> (re.search(u'\\w', u'\u0401') is None) == (sys.version_info[0] == 2)
- True
- >>> re.search(u'\\w', u'\u0401', re.UNICODE).group(0) == u'\u0401'
- True
- >>> re.search(u'\\s', u'\u1680', re.UNICODE).group(0) == u'\u1680'
- True
- >>> re.findall(r'[\s\d\w]', 'hey 123', re.UNICODE) == ['h', 'e', 'y', ' ', '1', '2', '3']
- True
- >>> re.search(u'\\D', u'\u0661x', re.UNICODE).group(0) == u'x'
- True
- >>> re.search(u'\\W', u'\u0401!', re.UNICODE).group(0) == u'!'
- True
- >>> re.search(u'\\S', u'\u1680x', re.UNICODE).group(0) == u'x'
- True
- >>> re.set_fallback_notification(re.FALLBACK_QUIETLY)
- >>> re.search(u'[\\W]', u'\u0401!', re.UNICODE).group(0) == u'!'
- True
- >>> re.search(u'[\\S]', u'\u1680x', re.UNICODE).group(0) == u'x'
- True
- >>> re.set_fallback_notification(re.FALLBACK_EXCEPTION)
-
-
-Positions are translated transparently between unicode and UTF-8
-
- >>> re.search(u' (.)', u'\U0001d200xxx\u1234 x').span(1)
- (6, 7)
- >>> re.search(b' (.)', u'\U0001d200xxx\u1234 x'.encode('utf-8')).span(1)
- (11, 12)
- >>> re.compile(u'x').findall(u'\u1234x', 1, 2) == [u'x']
- True
- >>> data = u'\U0001d200xxx\u1234 x'
- >>> re.search(u' (.)', data).string == data
- True
-
- >>> re.set_fallback_notification(re.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/.dist-info/METADATA b/contrib/python/pyre2/py3/.dist-info/METADATA
deleted file mode 100644
index 6f4f966e33..0000000000
--- a/contrib/python/pyre2/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,275 +0,0 @@
-Metadata-Version: 2.1
-Name: pyre2
-Version: 0.3.6
-Summary: Python wrapper for Google\'s RE2 using Cython
-Home-page: https://github.com/andreasvc/pyre2
-Author: Andreas van Cranenburgh
-Author-email: andreas@unstable.nl
-Maintainer: Steve Arnold
-Maintainer-email: nerdboy@gentoo.org
-License: BSD
-Platform: UNKNOWN
-Classifier: License :: OSI Approved :: BSD License
-Classifier: Programming Language :: Cython
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Intended Audience :: Developers
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Requires-Python: >=3.6
-Description-Content-Type: text/x-rst; charset=UTF-8
-Provides-Extra: perf
-Requires-Dist: regex ; extra == 'perf'
-Provides-Extra: test
-Requires-Dist: pytest ; extra == 'test'
-
-===============================================================
- pyre2: Python RE2 wrapper for linear-time regular expressions
-===============================================================
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Build/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Build
- :alt: Build CI Status
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Release/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Release
- :alt: Release CI Status
-
-.. image:: https://img.shields.io/github/v/tag/andreasvc/pyre2?color=green&include_prereleases&label=latest%20release
- :target: https://github.com/andreasvc/pyre2/releases
- :alt: GitHub tag (latest SemVer, including pre-release)
-
-.. image:: https://badge.fury.io/py/pyre2.svg
- :target: https://badge.fury.io/py/pyre2
- :alt: Pypi version
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Conda/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Conda
- :alt: Conda CI Status
-
-.. image:: https://img.shields.io/github/license/andreasvc/pyre2
- :target: https://github.com/andreasvc/pyre2/blob/master/LICENSE
- :alt: License
-
-.. image:: https://img.shields.io/badge/python-3.6+-blue.svg
- :target: https://www.python.org/downloads/
- :alt: Python version
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/version.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: version
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/platforms.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: platforms
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/downloads.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: downloads
-
-
-.. contents:: Table of Contents
- :depth: 2
- :backlinks: top
-
-
-Summary
-=======
-
-pyre2 is a Python extension that wraps
-`Google's RE2 regular expression library <https://github.com/google/re2>`_.
-The RE2 engine compiles (strictly) regular expressions to
-deterministic finite automata, which guarantees linear-time behavior.
-
-Intended as a drop-in replacement for ``re``. Unicode is supported by encoding
-to UTF-8, and bytes strings are treated as UTF-8 when the UNICODE flag is given.
-For best performance, work with UTF-8 encoded bytes strings.
-
-Installation
-============
-
-Normal usage for Linux/Mac/Windows::
-
- $ pip install pyre2
-
-Compiling from source
----------------------
-
-Requirements for building the C++ extension from the repo source:
-
-* A build environment with ``gcc`` or ``clang`` (e.g. ``sudo apt-get install build-essential``)
-* Build tools and libraries: RE2, pybind11, and cmake installed in the build
- environment.
-
- + On Ubuntu/Debian: ``sudo apt-get install build-essential cmake ninja-build python3-dev cython3 pybind11-dev libre2-dev``
- + On Gentoo, install dev-util/cmake, dev-python/pybind11, and dev-libs/re2
- + For a venv you can install the pybind11, cmake, and cython packages from PyPI
-
-On MacOS, use the ``brew`` package manager::
-
- $ brew install -s re2 pybind11
-
-On Windows use the ``vcpkg`` package manager::
-
- $ vcpkg install re2:x64-windows pybind11:x64-windows
-
-You can pass some cmake environment variables to alter the build type or
-pass a toolchain file (the latter is required on Windows) or specify the
-cmake generator. For example::
-
- $ CMAKE_GENERATOR="Unix Makefiles" CMAKE_TOOLCHAIN_FILE=clang_toolchain.cmake tox -e deploy
-
-For development, get the source::
-
- $ git clone git://github.com/andreasvc/pyre2.git
- $ cd pyre2
- $ make install
-
-
-Platform-agnostic building with conda
--------------------------------------
-
-An alternative to the above is provided via the `conda`_ recipe (use the
-`miniconda installer`_ if you don't have ``conda`` installed already).
-
-
-.. _conda: https://anaconda.org/conda-forge/pyre2
-.. _miniconda installer: https://docs.conda.io/en/latest/miniconda.html
-
-
-Backwards Compatibility
-=======================
-
-The stated goal of this module is to be a drop-in replacement for ``re``, i.e.::
-
- try:
- import re2 as re
- except ImportError:
- import re
-
-That being said, there are features of the ``re`` module that this module may
-never have; these will be handled through fallback to the original ``re`` module:
-
-* lookahead assertions ``(?!...)``
-* backreferences (``\\n`` in search pattern)
-* \W and \S not supported inside character classes
-
-On the other hand, unicode character classes are supported (e.g., ``\p{Greek}``).
-Syntax reference: https://github.com/google/re2/wiki/Syntax
-
-However, there are times when you may want to be notified of a failover. The
-function ``set_fallback_notification`` determines the behavior in these cases::
-
- try:
- import re2 as re
- except ImportError:
- import re
- else:
- re.set_fallback_notification(re.FALLBACK_WARNING)
-
-``set_fallback_notification`` takes three values:
-``re.FALLBACK_QUIETLY`` (default), ``re.FALLBACK_WARNING`` (raise a warning),
-and ``re.FALLBACK_EXCEPTION`` (raise an exception).
-
-Documentation
-=============
-
-Consult the docstrings in the source code or interactively
-through ipython or ``pydoc re2`` etc.
-
-Unicode Support
-===============
-
-Python ``bytes`` and ``unicode`` strings are fully supported, but note that
-``RE2`` works with UTF-8 encoded strings under the hood, which means that
-``unicode`` strings need to be encoded and decoded back and forth.
-There are two important factors:
-
-* whether a ``unicode`` pattern and search string is used (will be encoded to UTF-8 internally)
-* the ``UNICODE`` flag: whether operators such as ``\w`` recognize Unicode characters.
-
-To avoid the overhead of encoding and decoding to UTF-8, it is possible to pass
-UTF-8 encoded bytes strings directly but still treat them as ``unicode``::
-
- In [18]: re2.findall(u'\w'.encode('utf8'), u'Mötley Crüe'.encode('utf8'), flags=re2.UNICODE)
- Out[18]: ['M', '\xc3\xb6', 't', 'l', 'e', 'y', 'C', 'r', '\xc3\xbc', 'e']
- In [19]: re2.findall(u'\w'.encode('utf8'), u'Mötley Crüe'.encode('utf8'))
- Out[19]: ['M', 't', 'l', 'e', 'y', 'C', 'r', 'e']
-
-However, note that the indices in ``Match`` objects will refer to the bytes string.
-The indices of the match in the ``unicode`` string could be computed by
-decoding/encoding, but this is done automatically and more efficiently if you
-pass the ``unicode`` string::
-
- >>> re2.search(u'ü'.encode('utf8'), u'Mötley Crüe'.encode('utf8'), flags=re2.UNICODE)
- <re2.Match object; span=(10, 12), match='\xc3\xbc'>
- >>> re2.search(u'ü', u'Mötley Crüe', flags=re2.UNICODE)
- <re2.Match object; span=(9, 10), match=u'\xfc'>
-
-Finally, if you want to match bytes without regard for Unicode characters,
-pass bytes strings and leave out the ``UNICODE`` flag (this will cause Latin 1
-encoding to be used with ``RE2`` under the hood)::
-
- >>> re2.findall(br'.', b'\x80\x81\x82')
- ['\x80', '\x81', '\x82']
-
-Performance
-===========
-
-Performance is of course the point of this module, so it better perform well.
-Regular expressions vary widely in complexity, and the salient feature of ``RE2`` is
-that it behaves well asymptotically. This being said, for very simple substitutions,
-I've found that occasionally python's regular ``re`` module is actually slightly faster.
-However, when the ``re`` module gets slow, it gets *really* slow, while this module
-buzzes along.
-
-In the below example, I'm running the data against 8MB of text from the colossal Wikipedia
-XML file. I'm running them multiple times, being careful to use the ``timeit`` module.
-To see more details, please see the `performance script <http://github.com/andreasvc/pyre2/tree/master/tests/performance.py>`_.
-
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Test |Description |# total runs|``re`` time(s)|``re2`` time(s)|% ``re`` time|``regex`` time(s)|% ``regex`` time|
-+=================+===========================================================================+============+==============+===============+=============+=================+================+
-|Findall URI|Email|Find list of '([a-zA-Z][a-zA-Z0-9]*)://([^ /]+)(/[^ ]*)?|([^ @]+)@([^ @]+)'|2 |6.262 |0.131 |2.08% |5.119 |2.55% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Replace WikiLinks|This test replaces links of the form [[Obama|Barack_Obama]] to Obama. |100 |4.374 |0.815 |18.63% |1.176 |69.33% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Remove WikiLinks |This test splits the data by the <page> tag. |100 |4.153 |0.225 |5.43% |0.537 |42.01% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-
-Feel free to add more speed tests to the bottom of the script and send a pull request my way!
-
-Current Status
-==============
-
-The tests show the following differences with Python's ``re`` module:
-
-* The ``$`` operator in Python's ``re`` matches twice if the string ends
- with ``\n``. This can be simulated using ``\n?$``, except when doing
- substitutions.
-* The ``pyre2`` module and Python's ``re`` may behave differently with nested groups.
- See ``tests/test_emptygroups.txt`` for the examples.
-
-Please report any further issues with ``pyre2``.
-
-Tests
-=====
-
-If you would like to help, one thing that would be very useful
-is writing comprehensive tests for this. It's actually really easy:
-
-* Come up with regular expression problems using the regular python 're' module.
-* Write a session in python traceback format `Example <http://github.com/andreasvc/pyre2/blob/master/tests/test_search.txt>`_.
-* Replace your ``import re`` with ``import re2 as re``.
-* Save it with as ``test_<name>.txt`` in the tests directory. You can comment on it however you like and indent the code with 4 spaces.
-
-
-Credits
-=======
-This code builds on the following projects (in chronological order):
-
-- Google's RE2 regular expression library: https://github.com/google/re2
-- Facebook's pyre2 github repository: http://github.com/facebook/pyre2/
-- Mike Axiak's Cython version of this: http://github.com/axiak/pyre2/ (seems not actively maintained)
-- This fork adds Python 3 support and other improvements.
-
-
-
diff --git a/contrib/python/pyre2/py3/.dist-info/top_level.txt b/contrib/python/pyre2/py3/.dist-info/top_level.txt
deleted file mode 100644
index 94e9d8fad0..0000000000
--- a/contrib/python/pyre2/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-re2
diff --git a/contrib/python/pyre2/py3/AUTHORS b/contrib/python/pyre2/py3/AUTHORS
deleted file mode 100644
index 0f1a37f2b4..0000000000
--- a/contrib/python/pyre2/py3/AUTHORS
+++ /dev/null
@@ -1,12 +0,0 @@
-All contributors own the copyright to their own contributions, but agree
-to release each of their contributions under the BSD license included
-in this software.
-
-Michael Axiak <mike@axiak.net>
-
-Contributors
-============
-
-Alec Berryman <alec@thened.net>
-Israel Tsadok <itsadok@gmail.com>
-Alex Willmer <alex@moreati.org.uk>
diff --git a/contrib/python/pyre2/py3/LICENSE b/contrib/python/pyre2/py3/LICENSE
deleted file mode 100644
index 803fbbcd9f..0000000000
--- a/contrib/python/pyre2/py3/LICENSE
+++ /dev/null
@@ -1,9 +0,0 @@
-Copyright (c) 2010, Michael Axiak <mike@axiak.net>
-All rights reserved.
-
-Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
-
-Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
-Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
-Neither the name of the <ORGANIZATION> nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/contrib/python/pyre2/py3/README.rst b/contrib/python/pyre2/py3/README.rst
deleted file mode 100644
index 3f46ff6eaf..0000000000
--- a/contrib/python/pyre2/py3/README.rst
+++ /dev/null
@@ -1,250 +0,0 @@
-===============================================================
- pyre2: Python RE2 wrapper for linear-time regular expressions
-===============================================================
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Build/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Build
- :alt: Build CI Status
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Release/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Release
- :alt: Release CI Status
-
-.. image:: https://img.shields.io/github/v/tag/andreasvc/pyre2?color=green&include_prereleases&label=latest%20release
- :target: https://github.com/andreasvc/pyre2/releases
- :alt: GitHub tag (latest SemVer, including pre-release)
-
-.. image:: https://badge.fury.io/py/pyre2.svg
- :target: https://badge.fury.io/py/pyre2
- :alt: Pypi version
-
-.. image:: https://github.com/andreasvc/pyre2/workflows/Conda/badge.svg
- :target: https://github.com/andreasvc/pyre2/actions?query=workflow:Conda
- :alt: Conda CI Status
-
-.. image:: https://img.shields.io/github/license/andreasvc/pyre2
- :target: https://github.com/andreasvc/pyre2/blob/master/LICENSE
- :alt: License
-
-.. image:: https://img.shields.io/badge/python-3.6+-blue.svg
- :target: https://www.python.org/downloads/
- :alt: Python version
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/version.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: version
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/platforms.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: platforms
-
-.. image:: https://anaconda.org/conda-forge/pyre2/badges/downloads.svg
- :target: https://anaconda.org/conda-forge/pyre2
- :alt: downloads
-
-
-.. contents:: Table of Contents
- :depth: 2
- :backlinks: top
-
-
-Summary
-=======
-
-pyre2 is a Python extension that wraps
-`Google's RE2 regular expression library <https://github.com/google/re2>`_.
-The RE2 engine compiles (strictly) regular expressions to
-deterministic finite automata, which guarantees linear-time behavior.
-
-Intended as a drop-in replacement for ``re``. Unicode is supported by encoding
-to UTF-8, and bytes strings are treated as UTF-8 when the UNICODE flag is given.
-For best performance, work with UTF-8 encoded bytes strings.
-
-Installation
-============
-
-Normal usage for Linux/Mac/Windows::
-
- $ pip install pyre2
-
-Compiling from source
----------------------
-
-Requirements for building the C++ extension from the repo source:
-
-* A build environment with ``gcc`` or ``clang`` (e.g. ``sudo apt-get install build-essential``)
-* Build tools and libraries: RE2, pybind11, and cmake installed in the build
- environment.
-
- + On Ubuntu/Debian: ``sudo apt-get install build-essential cmake ninja-build python3-dev cython3 pybind11-dev libre2-dev``
- + On Gentoo, install dev-util/cmake, dev-python/pybind11, and dev-libs/re2
- + For a venv you can install the pybind11, cmake, and cython packages from PyPI
-
-On MacOS, use the ``brew`` package manager::
-
- $ brew install -s re2 pybind11
-
-On Windows use the ``vcpkg`` package manager::
-
- $ vcpkg install re2:x64-windows pybind11:x64-windows
-
-You can pass some cmake environment variables to alter the build type or
-pass a toolchain file (the latter is required on Windows) or specify the
-cmake generator. For example::
-
- $ CMAKE_GENERATOR="Unix Makefiles" CMAKE_TOOLCHAIN_FILE=clang_toolchain.cmake tox -e deploy
-
-For development, get the source::
-
- $ git clone git://github.com/andreasvc/pyre2.git
- $ cd pyre2
- $ make install
-
-
-Platform-agnostic building with conda
--------------------------------------
-
-An alternative to the above is provided via the `conda`_ recipe (use the
-`miniconda installer`_ if you don't have ``conda`` installed already).
-
-
-.. _conda: https://anaconda.org/conda-forge/pyre2
-.. _miniconda installer: https://docs.conda.io/en/latest/miniconda.html
-
-
-Backwards Compatibility
-=======================
-
-The stated goal of this module is to be a drop-in replacement for ``re``, i.e.::
-
- try:
- import re2 as re
- except ImportError:
- import re
-
-That being said, there are features of the ``re`` module that this module may
-never have; these will be handled through fallback to the original ``re`` module:
-
-* lookahead assertions ``(?!...)``
-* backreferences (``\\n`` in search pattern)
-* \W and \S not supported inside character classes
-
-On the other hand, unicode character classes are supported (e.g., ``\p{Greek}``).
-Syntax reference: https://github.com/google/re2/wiki/Syntax
-
-However, there are times when you may want to be notified of a failover. The
-function ``set_fallback_notification`` determines the behavior in these cases::
-
- try:
- import re2 as re
- except ImportError:
- import re
- else:
- re.set_fallback_notification(re.FALLBACK_WARNING)
-
-``set_fallback_notification`` takes three values:
-``re.FALLBACK_QUIETLY`` (default), ``re.FALLBACK_WARNING`` (raise a warning),
-and ``re.FALLBACK_EXCEPTION`` (raise an exception).
-
-Documentation
-=============
-
-Consult the docstrings in the source code or interactively
-through ipython or ``pydoc re2`` etc.
-
-Unicode Support
-===============
-
-Python ``bytes`` and ``unicode`` strings are fully supported, but note that
-``RE2`` works with UTF-8 encoded strings under the hood, which means that
-``unicode`` strings need to be encoded and decoded back and forth.
-There are two important factors:
-
-* whether a ``unicode`` pattern and search string is used (will be encoded to UTF-8 internally)
-* the ``UNICODE`` flag: whether operators such as ``\w`` recognize Unicode characters.
-
-To avoid the overhead of encoding and decoding to UTF-8, it is possible to pass
-UTF-8 encoded bytes strings directly but still treat them as ``unicode``::
-
- In [18]: re2.findall(u'\w'.encode('utf8'), u'Mötley Crüe'.encode('utf8'), flags=re2.UNICODE)
- Out[18]: ['M', '\xc3\xb6', 't', 'l', 'e', 'y', 'C', 'r', '\xc3\xbc', 'e']
- In [19]: re2.findall(u'\w'.encode('utf8'), u'Mötley Crüe'.encode('utf8'))
- Out[19]: ['M', 't', 'l', 'e', 'y', 'C', 'r', 'e']
-
-However, note that the indices in ``Match`` objects will refer to the bytes string.
-The indices of the match in the ``unicode`` string could be computed by
-decoding/encoding, but this is done automatically and more efficiently if you
-pass the ``unicode`` string::
-
- >>> re2.search(u'ü'.encode('utf8'), u'Mötley Crüe'.encode('utf8'), flags=re2.UNICODE)
- <re2.Match object; span=(10, 12), match='\xc3\xbc'>
- >>> re2.search(u'ü', u'Mötley Crüe', flags=re2.UNICODE)
- <re2.Match object; span=(9, 10), match=u'\xfc'>
-
-Finally, if you want to match bytes without regard for Unicode characters,
-pass bytes strings and leave out the ``UNICODE`` flag (this will cause Latin 1
-encoding to be used with ``RE2`` under the hood)::
-
- >>> re2.findall(br'.', b'\x80\x81\x82')
- ['\x80', '\x81', '\x82']
-
-Performance
-===========
-
-Performance is of course the point of this module, so it better perform well.
-Regular expressions vary widely in complexity, and the salient feature of ``RE2`` is
-that it behaves well asymptotically. This being said, for very simple substitutions,
-I've found that occasionally python's regular ``re`` module is actually slightly faster.
-However, when the ``re`` module gets slow, it gets *really* slow, while this module
-buzzes along.
-
-In the below example, I'm running the data against 8MB of text from the colossal Wikipedia
-XML file. I'm running them multiple times, being careful to use the ``timeit`` module.
-To see more details, please see the `performance script <http://github.com/andreasvc/pyre2/tree/master/tests/performance.py>`_.
-
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Test |Description |# total runs|``re`` time(s)|``re2`` time(s)|% ``re`` time|``regex`` time(s)|% ``regex`` time|
-+=================+===========================================================================+============+==============+===============+=============+=================+================+
-|Findall URI|Email|Find list of '([a-zA-Z][a-zA-Z0-9]*)://([^ /]+)(/[^ ]*)?|([^ @]+)@([^ @]+)'|2 |6.262 |0.131 |2.08% |5.119 |2.55% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Replace WikiLinks|This test replaces links of the form [[Obama|Barack_Obama]] to Obama. |100 |4.374 |0.815 |18.63% |1.176 |69.33% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-|Remove WikiLinks |This test splits the data by the <page> tag. |100 |4.153 |0.225 |5.43% |0.537 |42.01% |
-+-----------------+---------------------------------------------------------------------------+------------+--------------+---------------+-------------+-----------------+----------------+
-
-Feel free to add more speed tests to the bottom of the script and send a pull request my way!
-
-Current Status
-==============
-
-The tests show the following differences with Python's ``re`` module:
-
-* The ``$`` operator in Python's ``re`` matches twice if the string ends
- with ``\n``. This can be simulated using ``\n?$``, except when doing
- substitutions.
-* The ``pyre2`` module and Python's ``re`` may behave differently with nested groups.
- See ``tests/test_emptygroups.txt`` for the examples.
-
-Please report any further issues with ``pyre2``.
-
-Tests
-=====
-
-If you would like to help, one thing that would be very useful
-is writing comprehensive tests for this. It's actually really easy:
-
-* Come up with regular expression problems using the regular python 're' module.
-* Write a session in python traceback format `Example <http://github.com/andreasvc/pyre2/blob/master/tests/test_search.txt>`_.
-* Replace your ``import re`` with ``import re2 as re``.
-* Save it with as ``test_<name>.txt`` in the tests directory. You can comment on it however you like and indent the code with 4 spaces.
-
-
-Credits
-=======
-This code builds on the following projects (in chronological order):
-
-- Google's RE2 regular expression library: https://github.com/google/re2
-- Facebook's pyre2 github repository: http://github.com/facebook/pyre2/
-- Mike Axiak's Cython version of this: http://github.com/axiak/pyre2/ (seems not actively maintained)
-- This fork adds Python 3 support and other improvements.
-
diff --git a/contrib/python/pyre2/py3/src/_re2macros.h b/contrib/python/pyre2/py3/src/_re2macros.h
deleted file mode 100644
index b9ac82af6b..0000000000
--- a/contrib/python/pyre2/py3/src/_re2macros.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#ifndef __RE2MACROS_H
-#define __RE2MACROS_H
-
-#include <stdio.h>
-#include "re2/stringpiece.h"
-
-static inline re2::StringPiece * new_StringPiece_array(int n)
-{
- re2::StringPiece * sp = new re2::StringPiece[n];
- return sp;
-}
-
-#endif
diff --git a/contrib/python/pyre2/py3/src/compile.pxi b/contrib/python/pyre2/py3/src/compile.pxi
deleted file mode 100644
index 887a2778cd..0000000000
--- a/contrib/python/pyre2/py3/src/compile.pxi
+++ /dev/null
@@ -1,234 +0,0 @@
-
-def compile(pattern, int flags=0, int max_mem=8388608):
- cachekey = (type(pattern), pattern, flags)
- if cachekey in _cache:
- return _cache[cachekey]
- p = _compile(pattern, flags, max_mem)
-
- if len(_cache) >= _MAXCACHE:
- _cache.popitem()
- _cache[cachekey] = p
- return p
-
-
-def _compile(object pattern, int flags=0, int max_mem=8388608):
- """Compile a regular expression pattern, returning a pattern object."""
- def fallback(pattern, flags, error_msg):
- """Raise error, warn, or simply return fallback from re module."""
- if current_notification == FALLBACK_EXCEPTION:
- raise RegexError(error_msg)
- elif current_notification == FALLBACK_WARNING:
- warnings.warn("WARNING: Using re module. Reason: %s" % error_msg)
- try:
- result = PythonRePattern(pattern, flags)
- except re.error as err:
- raise RegexError(*err.args)
- return result
-
- cdef StringPiece * s
- cdef Options opts
- cdef int error_code
- cdef int encoded = 0
- cdef object original_pattern
-
- if isinstance(pattern, (Pattern, SREPattern)):
- if flags:
- raise ValueError(
- 'Cannot process flags argument with a compiled pattern')
- return pattern
-
- original_pattern = pattern
- if flags & _L:
- return fallback(original_pattern, flags, "re.LOCALE not supported")
- pattern = unicode_to_bytes(pattern, &encoded, -1)
- newflags = flags
- if not PY2:
- if not encoded and flags & _U: # re.UNICODE
- pass # can use UNICODE with bytes pattern, but assumes valid UTF-8
- # raise ValueError("can't use UNICODE flag with a bytes pattern")
- elif encoded and not (flags & ASCII): # re.ASCII (not in Python 2)
- newflags = flags | _U # re.UNICODE
- elif encoded and flags & ASCII:
- newflags = flags & ~_U # re.UNICODE
- try:
- pattern = _prepare_pattern(pattern, newflags)
- except BackreferencesException:
- return fallback(original_pattern, flags, "Backreferences not supported")
- except CharClassProblemException:
- return fallback(original_pattern, flags,
- "\W and \S not supported inside character classes")
-
- # Set the options given the flags above.
- if flags & _I:
- opts.set_case_sensitive(0);
-
- opts.set_max_mem(max_mem)
- opts.set_log_errors(0)
- if flags & _U or encoded:
- opts.set_encoding(EncodingUTF8)
- else: # re.UNICODE flag not passed, and pattern is bytes,
- # so allow matching of arbitrary byte sequences.
- opts.set_encoding(EncodingLatin1)
-
- s = new StringPiece(<char *><bytes>pattern, len(pattern))
-
- cdef RE2 *re_pattern
- with nogil:
- re_pattern = new RE2(s[0], opts)
-
- if not re_pattern.ok():
- # Something went wrong with the compilation.
- del s
- error_msg = cpp_to_unicode(re_pattern.error())
- error_code = re_pattern.error_code()
- del re_pattern
- if current_notification == FALLBACK_EXCEPTION:
- # Raise an exception regardless of the type of error.
- raise RegexError(error_msg)
- elif error_code not in (ErrorBadPerlOp, ErrorRepeatSize,
- # ErrorBadEscape,
- ErrorPatternTooLarge):
- # Raise an error because these will not be fixed by using the
- # ``re`` module.
- raise RegexError(error_msg)
- elif current_notification == FALLBACK_WARNING:
- warnings.warn("WARNING: Using re module. Reason: %s" % error_msg)
- return PythonRePattern(original_pattern, flags)
-
- cdef Pattern pypattern = Pattern()
- cdef map[cpp_string, int] named_groups = re_pattern.NamedCapturingGroups()
- pypattern.pattern = original_pattern
- pypattern.re_pattern = re_pattern
- pypattern.groups = re_pattern.NumberOfCapturingGroups()
- pypattern.encoded = encoded
- pypattern.flags = flags
- pypattern.groupindex = {}
- for it in named_groups:
- pypattern.groupindex[cpp_to_unicode(it.first)] = it.second
-
- if flags & DEBUG:
- print(repr(pypattern._dump_pattern()))
- del s
- return pypattern
-
-
-def _prepare_pattern(bytes pattern, int flags):
- """Translate pattern to RE2 syntax."""
- cdef bytearray result = bytearray()
- cdef unsigned char * cstring = pattern
- cdef unsigned char this, that
- cdef int size = len(pattern)
- cdef int n = 0
-
- if flags & (_S | _M):
- result.extend(b'(?')
- if flags & _S:
- result.extend(b's')
- if flags & _M:
- result.extend(b'm')
- result.extend(b')')
- while n < size:
- this = cstring[n]
- if flags & _X:
- if this in b' \t\n\r\f\v':
- n += 1
- continue
- elif this == b'#':
- while True:
- n += 1
- if n >= size:
- break
- this = cstring[n]
- if this == b'\n':
- break
- n += 1
- continue
-
- if this != b'[' and this != b'\\':
- result.append(this)
- n += 1
- continue
- elif this == b'[':
- result.append(this)
- while True:
- n += 1
- if n >= size:
- raise RegexError("unexpected end of regular expression")
- this = cstring[n]
- if this == b']':
- result.append(this)
- break
- elif this == b'\\':
- n += 1
- that = cstring[n]
- if that == b'b':
- result.extend(br'\010')
- elif flags & _U:
- if that == b'd':
- result.extend(br'\p{Nd}')
- elif that == b'w':
- result.extend(br'_\p{L}\p{Nd}')
- elif that == b's':
- result.extend(br'\s\p{Z}')
- elif that == b'D':
- result.extend(br'\P{Nd}')
- elif that == b'W':
- # Since \w and \s are made out of several character
- # groups, I don't see a way to convert their
- # complements into a group without rewriting the
- # whole expression, which seems too complicated.
- raise CharClassProblemException()
- elif that == b'S':
- raise CharClassProblemException()
- else:
- result.append(this)
- result.append(that)
- else:
- result.append(this)
- result.append(that)
- else:
- result.append(this)
- elif this == b'\\':
- n += 1
- that = cstring[n]
- if b'8' <= that <= b'9':
- raise BackreferencesException()
- elif isoct(that):
- if (n + 2 < size and isoct(cstring[n + 1])
- and isoct(cstring[n + 2])):
- # all clear, this is an octal escape
- result.extend(cstring[n - 1:n + 3])
- n += 2
- else:
- raise BackreferencesException()
- elif that == b'x':
- if (n + 2 < size and ishex(cstring[n + 1])
- and ishex(cstring[n + 2])):
- # hex escape
- result.extend(cstring[n - 1:n + 3])
- n += 2
- else:
- raise BackreferencesException()
- elif that == b'Z':
- result.extend(b'\\z')
- elif flags & _U:
- if that == b'd':
- result.extend(br'\p{Nd}')
- elif that == b'w':
- result.extend(br'[_\p{L}\p{Nd}]')
- elif that == b's':
- result.extend(br'[\s\p{Z}]')
- elif that == b'D':
- result.extend(br'[^\p{Nd}]')
- elif that == b'W':
- result.extend(br'[^_\p{L}\p{Nd}]')
- elif that == b'S':
- result.extend(br'[^\s\p{Z}]')
- else:
- result.append(this)
- result.append(that)
- else:
- result.append(this)
- result.append(that)
- n += 1
- return bytes(result)
diff --git a/contrib/python/pyre2/py3/src/includes.pxi b/contrib/python/pyre2/py3/src/includes.pxi
deleted file mode 100644
index 8c35b6d4b2..0000000000
--- a/contrib/python/pyre2/py3/src/includes.pxi
+++ /dev/null
@@ -1,109 +0,0 @@
-cimport cpython.unicode
-from libcpp.map cimport map
-from libcpp.string cimport string as cpp_string
-from cython.operator cimport postincrement, dereference
-from cpython.buffer cimport Py_buffer, PyBUF_SIMPLE, PyObject_CheckBuffer, \
- PyObject_GetBuffer, PyBuffer_Release
-from cpython.version cimport PY_MAJOR_VERSION
-
-
-cdef extern from *:
- cdef void emit_if_narrow_unicode "#if !defined(Py_UNICODE_WIDE) && PY_VERSION_HEX < 0x03030000 //" ()
- cdef void emit_endif "#endif //" ()
-
-
-cdef extern from "Python.h":
- int PyObject_CheckReadBuffer(object)
- int PyObject_AsReadBuffer(object, const void **, Py_ssize_t *)
-
-
-cdef extern from "re2/stringpiece.h" namespace "re2":
- cdef cppclass StringPiece:
- StringPiece()
- StringPiece(const char *)
- StringPiece(const char *, int)
- const char * data()
- int copy(char * buf, size_t n, size_t pos)
- int length()
-
-
-cdef extern from "re2/re2.h" namespace "re2":
- cdef enum Anchor:
- UNANCHORED "RE2::UNANCHORED"
- ANCHOR_START "RE2::ANCHOR_START"
- ANCHOR_BOTH "RE2::ANCHOR_BOTH"
-
- ctypedef Anchor re2_Anchor "RE2::Anchor"
-
- cdef enum ErrorCode:
- NoError "RE2::NoError"
- ErrorInternal "RE2::ErrorInternal"
- # Parse errors
- ErrorBadEscape "RE2::ErrorBadEscape" # bad escape sequence
- ErrorBadCharClass "RE2::ErrorBadCharClass" # bad character class
- ErrorBadCharRange "RE2::ErrorBadCharRange" # bad character class range
- ErrorMissingBracket "RE2::ErrorMissingBracket" # missing closing ]
- ErrorMissingParen "RE2::ErrorMissingParen" # missing closing )
- ErrorTrailingBackslash "RE2::ErrorTrailingBackslash" # trailing \ at end of regexp
- ErrorRepeatArgument "RE2::ErrorRepeatArgument" # repeat argument missing, e.g. "*"
- ErrorRepeatSize "RE2::ErrorRepeatSize" # bad repetition argument
- ErrorRepeatOp "RE2::ErrorRepeatOp" # bad repetition operator
- ErrorBadPerlOp "RE2::ErrorBadPerlOp" # bad perl operator
- ErrorBadUTF8 "RE2::ErrorBadUTF8" # invalid UTF-8 in regexp
- ErrorBadNamedCapture "RE2::ErrorBadNamedCapture" # bad named capture group
- ErrorPatternTooLarge "RE2::ErrorPatternTooLarge" # pattern too large (compile failed)
-
- cdef enum Encoding:
- EncodingUTF8 "RE2::Options::EncodingUTF8"
- EncodingLatin1 "RE2::Options::EncodingLatin1"
-
- ctypedef Encoding re2_Encoding "RE2::Options::Encoding"
-
- cdef cppclass Options "RE2::Options":
- Options()
- void set_posix_syntax(int b)
- void set_longest_match(int b)
- void set_log_errors(int b)
- void set_max_mem(int m)
- void set_literal(int b)
- void set_never_nl(int b)
- void set_case_sensitive(int b)
- void set_perl_classes(int b)
- void set_word_boundary(int b)
- void set_one_line(int b)
- int case_sensitive()
- void set_encoding(re2_Encoding encoding)
-
- cdef cppclass RE2:
- RE2(const StringPiece pattern, Options option) nogil
- RE2(const StringPiece pattern) nogil
- int Match(const StringPiece text, int startpos, int endpos,
- Anchor anchor, StringPiece * match, int nmatch) nogil
- int Replace(cpp_string *str, const RE2 pattern,
- const StringPiece rewrite) nogil
- int GlobalReplace(cpp_string *str, const RE2 pattern,
- const StringPiece rewrite) nogil
- int NumberOfCapturingGroups()
- int ok()
- const cpp_string pattern()
- cpp_string error()
- ErrorCode error_code()
- const map[cpp_string, int]& NamedCapturingGroups()
-
- # hack for static methods
- cdef int Replace "RE2::Replace"(
- cpp_string *str, const RE2 pattern,
- const StringPiece rewrite) nogil
- cdef int GlobalReplace "RE2::GlobalReplace"(
- cpp_string *str,
- const RE2 pattern,
- const StringPiece rewrite) nogil
-
-
-cdef extern from "_re2macros.h":
- StringPiece * new_StringPiece_array(int) nogil
-
-
-cdef extern from *:
- # StringPiece * new_StringPiece_array "new re2::StringPiece[n]" (int) nogil
- void delete_StringPiece_array "delete[]" (StringPiece *) nogil
diff --git a/contrib/python/pyre2/py3/src/match.pxi b/contrib/python/pyre2/py3/src/match.pxi
deleted file mode 100644
index 3eaae74b47..0000000000
--- a/contrib/python/pyre2/py3/src/match.pxi
+++ /dev/null
@@ -1,280 +0,0 @@
-cdef class Match:
- cdef readonly Pattern re
- cdef readonly object string
- cdef readonly int pos
- cdef readonly int endpos
- cdef readonly tuple regs
-
- cdef StringPiece * matches
- cdef int encoded
- cdef int nmatches
- cdef int _lastindex
- cdef tuple _groups
- cdef dict _named_groups
-
- property lastindex:
- def __get__(self):
- return None if self._lastindex < 1 else self._lastindex
-
- property lastgroup:
- def __get__(self):
- if self._lastindex < 1:
- return None
- for name, n in self.re.groupindex.items():
- if n == self._lastindex:
- return name
- return None
-
- def __init__(self, Pattern pattern_object, int num_groups):
- self._lastindex = -1
- self._groups = None
- self.pos = 0
- self.endpos = -1
- self.matches = new_StringPiece_array(num_groups + 1)
- self.nmatches = num_groups
- self.re = pattern_object
-
- cdef _init_groups(self):
- cdef list groups = []
- cdef int i
- cdef const char * last_end = NULL
- cdef const char * cur_end = NULL
-
- for i in range(self.nmatches):
- if self.matches[i].data() == NULL:
- groups.append(None)
- else:
- if i > 0:
- cur_end = self.matches[i].data() + self.matches[i].length()
-
- if last_end == NULL:
- last_end = cur_end
- self._lastindex = i
- else:
- # The rules for last group are a bit complicated:
- # if two groups end at the same point, the earlier one
- # is considered last, so we don't switch our selection
- # unless the end point has moved.
- if cur_end > last_end:
- last_end = cur_end
- self._lastindex = i
- groups.append(
- self.matches[i].data()[:self.matches[i].length()])
- self._groups = tuple(groups)
-
- cdef bytes _group(self, object groupnum):
- cdef int idx
- if isinstance(groupnum, int):
- idx = groupnum
- if idx > self.nmatches - 1:
- raise IndexError("no such group %d; available groups: %r"
- % (idx, list(range(self.nmatches))))
- return self._groups[idx]
- groupdict = self._groupdict()
- if groupnum not in groupdict:
- raise IndexError("no such group %r; available groups: %r"
- % (groupnum, list(groupdict)))
- return groupdict[groupnum]
-
- cdef dict _groupdict(self):
- if self._named_groups is None:
- self._named_groups = {name: self._groups[n]
- for name, n in self.re.groupindex.items()}
- return self._named_groups
-
- def groups(self, default=None):
- if self.encoded:
- return tuple([default if g is None else g.decode('utf8')
- for g in self._groups[1:]])
- return tuple([default if g is None else g
- for g in self._groups[1:]])
-
- def group(self, *args):
- if len(args) == 0:
- groupnum = 0
- elif len(args) == 1:
- groupnum = args[0]
- else: # len(args) > 1:
- return tuple([self.group(i) for i in args])
- if self.encoded:
- result = self._group(groupnum)
- return None if result is None else result.decode('utf8')
- return self._group(groupnum)
-
- def groupdict(self):
- result = self._groupdict()
- if self.encoded:
- return {a: None if b is None else b.decode('utf8')
- for a, b in result.items()}
- return result
-
- def expand(self, object template):
- """Expand a template with groups."""
- cdef bytearray result = bytearray()
- if isinstance(template, unicode):
- if not PY2 and not self.encoded:
- raise ValueError(
- 'cannot expand unicode template on bytes pattern')
- templ = template.encode('utf8')
- else:
- if not PY2 and self.encoded:
- raise ValueError(
- 'cannot expand bytes template on unicode pattern')
- templ = bytes(template)
- self._expand(templ, result)
- return result.decode('utf8') if self.encoded else bytes(result)
-
- cdef _expand(self, bytes templ, bytearray result):
- """Expand template by appending to an existing bytearray.
- Everything remains UTF-8 encoded."""
- cdef char * cstring
- cdef int n = 0, prev = 0, size
-
- # NB: cstring is used to get single characters, to avoid difference in
- # Python 2/3 behavior of bytes objects.
- cstring = templ
- size = len(templ)
- while True:
- prev = n
- n = templ.find(b'\\', prev)
- if n == -1:
- result.extend(templ[prev:])
- break
- result.extend(templ[prev:n])
- n += 1
- if (n + 2 < size and cstring[n] == b'x'
- and ishex(cstring[n + 1]) and ishex(cstring[n + 2])):
- # hex char reference \x1f
- result.append(int(templ[n + 1:n + 3], base=16) & 255)
- n += 3
- elif (n + 2 < size and isoct(cstring[n]) and isoct(cstring[n + 1])
- and isoct(cstring[n + 2])):
- # octal char reference \123
- result.append(int(templ[n:n + 3], base=8) & 255)
- n += 3
- elif cstring[n] == b'0':
- if n + 1 < size and isoct(cstring[n + 1]):
- # 2 character octal: \01
- result.append(int(templ[n:n + 2], base=8))
- n += 2
- else: # nul-terminator literal \0
- result.append(b'\0')
- n += 1
- elif b'0' <= cstring[n] <= b'9': # numeric group reference
- if n + 1 < size and isdigit(cstring[n + 1]):
- # 2 digit group ref \12
- groupno = int(templ[n:n + 2])
- n += 2
- else:
- # 1 digit group ref \1
- groupno = int(templ[n:n + 1])
- n += 1
- if groupno <= self.re.groups:
- groupval = self._group(groupno)
- if groupval is not None:
- result.extend(groupval)
- else:
- raise RegexError('invalid group reference.')
- elif cstring[n] == b'g': # named group reference
- n += 1
- if n >= size or cstring[n] != b'<':
- raise RegexError('missing group name')
- n += 1
- start = n
- while cstring[n] != b'>':
- if not isident(cstring[n]):
- raise RegexError('bad character in group name')
- n += 1
- if n >= size:
- raise RegexError('unterminated group name')
- if templ[start:n].isdigit():
- name = int(templ[start:n])
- elif isdigit(cstring[start]):
- raise RegexError('bad character in group name')
- else:
- name = templ[start:n]
- if self.encoded:
- name = name.decode('utf8')
- groupval = self._group(name)
- if groupval is not None:
- result.extend(groupval)
- n += 1
- else:
- if cstring[n] == b'n':
- result.append(b'\n')
- elif cstring[n] == b'r':
- result.append(b'\r')
- elif cstring[n] == b't':
- result.append(b'\t')
- elif cstring[n] == b'v':
- result.append(b'\v')
- elif cstring[n] == b'f':
- result.append(b'\f')
- elif cstring[n] == b'a':
- result.append(b'\a')
- elif cstring[n] == b'b':
- result.append(b'\b')
- elif cstring[n] == b'\\':
- result.append(b'\\')
- else: # copy verbatim
- result.append(b'\\')
- result.append(cstring[n])
- n += 1
- return bytes(result)
-
- def start(self, group=0):
- return self.span(group)[0]
-
- def end(self, group=0):
- return self.span(group)[1]
-
- def span(self, group=0):
- if isinstance(group, int):
- if group > len(self.regs):
- raise IndexError("no such group %d; available groups: %r"
- % (group, list(range(len(self.regs)))))
- return self.regs[group]
- else:
- self._groupdict()
- if group not in self.re.groupindex:
- raise IndexError("no such group %r; available groups: %r"
- % (group, list(self.re.groupindex)))
- return self.regs[self.re.groupindex[group]]
-
- cdef _make_spans(self, char * cstring, int size, int * cpos, int * upos):
- cdef int start, end
- cdef StringPiece * piece
-
- spans = []
- for i in range(self.nmatches):
- if self.matches[i].data() == NULL:
- spans.append((-1, -1))
- else:
- piece = &self.matches[i]
- if piece.data() == NULL:
- return (-1, -1)
- start = piece.data() - cstring
- end = start + piece.length()
- spans.append((start, end))
-
- if self.encoded == 2:
- spans = self._convert_spans(spans, cstring, size, cpos, upos)
-
- self.regs = tuple(spans)
-
- cdef list _convert_spans(self, spans,
- char * cstring, int size, int * cpos, int * upos):
- cdef map[int, int] positions
- cdef int x, y
- for x, y in spans:
- positions[x] = x
- positions[y] = y
- unicodeindices(positions, cstring, size, cpos, upos)
- return [(positions[x], positions[y]) for x, y in spans]
-
- def __dealloc__(self):
- delete_StringPiece_array(self.matches)
-
- def __repr__(self):
- return '<re2.Match object; span=%r, match=%r>' % (
- self.span(), self.group())
diff --git a/contrib/python/pyre2/py3/src/pattern.pxi b/contrib/python/pyre2/py3/src/pattern.pxi
deleted file mode 100644
index b8439d2007..0000000000
--- a/contrib/python/pyre2/py3/src/pattern.pxi
+++ /dev/null
@@ -1,650 +0,0 @@
-cdef class Pattern:
- cdef readonly object pattern # original pattern in Python format
- cdef readonly int flags
- cdef readonly int groups # number of groups
- cdef readonly dict groupindex # name => group number
- cdef object __weakref__
-
- cdef bint encoded # True if this was originally a Unicode pattern
- cdef RE2 * re_pattern
-
- def search(self, object string, int pos=0, int endpos=-1):
- """Scan through string looking for a match, and return a corresponding
- Match instance. Return None if no position in the string matches."""
- return self._search(string, pos, endpos, UNANCHORED)
-
- def match(self, object string, int pos=0, int endpos=-1):
- """Matches zero or more characters at the beginning of the string."""
- return self._search(string, pos, endpos, ANCHOR_START)
-
- def fullmatch(self, object string, int pos=0, int endpos=-1):
- """"fullmatch(string[, pos[, endpos]]) --> Match object or None."
-
- Matches the entire string."""
- return self._search(string, pos, endpos, ANCHOR_BOTH)
-
- cdef _search(self, object string, int pos, int endpos,
- re2_Anchor anchoring):
- """Scan through string looking for a match, and return a corresponding
- Match instance. Return None if no position in the string matches."""
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int retval
- cdef int encoded = 0
- cdef StringPiece * sp
- cdef Match m = Match(self, self.groups + 1)
- cdef int cpos = 0, upos = pos
-
- if 0 <= endpos <= pos:
- return None
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- try:
- if encoded == 2 and (pos or endpos != -1):
- utf8indices(cstring, size, &pos, &endpos)
- cpos = pos
- if pos > size:
- return None
- if 0 <= endpos < size:
- size = endpos
-
- sp = new StringPiece(cstring, size)
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- anchoring,
- m.matches,
- self.groups + 1)
- del sp
- if retval == 0:
- return None
-
- m.encoded = encoded
- m.nmatches = self.groups + 1
- m.string = string
- m.pos = pos
- if endpos == -1:
- m.endpos = size
- else:
- m.endpos = endpos
- m._make_spans(cstring, size, &cpos, &upos)
- m._init_groups()
- finally:
- release_cstring(&buf)
- return m
-
- def contains(self, object string, int pos=0, int endpos=-1):
- """"contains(string[, pos[, endpos]]) --> bool."
-
- Scan through string looking for a match, and return True or False."""
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int retval
- cdef int encoded = 0
- cdef StringPiece * sp
-
- if 0 <= endpos <= pos:
- return False
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- try:
- if encoded == 2 and (pos or endpos != -1):
- utf8indices(cstring, size, &pos, &endpos)
- if pos > size:
- return False
- if 0 <= endpos < size:
- size = endpos
-
- sp = new StringPiece(cstring, size)
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- UNANCHORED,
- NULL,
- 0)
- del sp
- finally:
- release_cstring(&buf)
- return retval != 0
-
- def count(self, object string, int pos=0, int endpos=-1):
- """Return number of non-overlapping matches of pattern in string."""
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int retval
- cdef int encoded = 0
- cdef int result = 0
- cdef StringPiece * sp = NULL
- cdef StringPiece * matches = NULL
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- try:
- if encoded == 2 and (pos or endpos != -1):
- utf8indices(cstring, size, &pos, &endpos)
- if pos > size:
- return 0
- if 0 <= endpos < size:
- size = endpos
-
- sp = new StringPiece(cstring, size)
- matches = new_StringPiece_array(1)
- try:
- while True:
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- UNANCHORED,
- matches,
- 1)
- if retval == 0:
- break
- result += 1
- if pos == size:
- break
- # offset the pos to move to the next point
- pos = matches[0].data() - cstring + (
- matches[0].length() or 1)
- finally:
- del sp
- delete_StringPiece_array(matches)
- finally:
- release_cstring(&buf)
- return result
-
- def findall(self, object string, int pos=0, int endpos=-1):
- """Return all non-overlapping matches of pattern in string as a list
- of strings."""
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int encoded = 0
- cdef int retval
- cdef list resultlist = []
- cdef StringPiece * sp = NULL
- cdef StringPiece * matches = NULL
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- try:
- if encoded == 2 and (pos or endpos != -1):
- utf8indices(cstring, size, &pos, &endpos)
- if pos > size:
- return []
- if 0 <= endpos < size:
- size = endpos
-
- sp = new StringPiece(cstring, size)
- matches = new_StringPiece_array(self.groups + 1)
-
- while True:
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- UNANCHORED,
- matches,
- self.groups + 1)
- if retval == 0:
- break
- if self.groups > 1:
- if encoded:
- resultlist.append(tuple([
- '' if matches[i].data() is NULL else
- matches[i].data()[:matches[i].length()
- ].decode('utf8')
- for i in range(1, self.groups + 1)]))
- else:
- resultlist.append(tuple([
- b'' if matches[i].data() is NULL
- else matches[i].data()[:matches[i].length()]
- for i in range(1, self.groups + 1)]))
- else: # 0 or 1 group; return list of strings
- if encoded:
- resultlist.append(matches[self.groups].data()[
- :matches[self.groups].length()].decode('utf8'))
- else:
- resultlist.append(matches[self.groups].data()[
- :matches[self.groups].length()])
- if pos == size:
- break
- # offset the pos to move to the next point
- pos = matches[0].data() - cstring + (matches[0].length() or 1)
- finally:
- del sp
- delete_StringPiece_array(matches)
- release_cstring(&buf)
- return resultlist
-
- def finditer(self, object string, int pos=0, int endpos=-1):
- """Yield all non-overlapping matches of pattern in string as Match
- objects."""
- result = iter(self._finditer(string, pos, endpos))
- next(result) # dummy value to raise error before start of generator
- return result
-
- def _finditer(self, object string, int pos=0, int endpos=-1):
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int retval
- cdef StringPiece * sp = NULL
- cdef Match m
- cdef int encoded = 0
- cdef int cpos = 0, upos = pos
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- try:
- if encoded == 2 and (pos or endpos != -1):
- utf8indices(cstring, size, &pos, &endpos)
- cpos = pos
- if pos > size:
- return
- if 0 <= endpos < size:
- size = endpos
-
- sp = new StringPiece(cstring, size)
-
- yield
- while True:
- m = Match(self, self.groups + 1)
- m.string = string
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- UNANCHORED,
- m.matches,
- self.groups + 1)
- if retval == 0:
- break
- m.encoded = encoded
- m.nmatches = self.groups + 1
- m.pos = pos
- if endpos == -1:
- m.endpos = size
- else:
- m.endpos = endpos
- m._make_spans(cstring, size, &cpos, &upos)
- m._init_groups()
- yield m
- if pos == size:
- break
- # offset the pos to move to the next point
- pos = m.matches[0].data() - cstring + (
- m.matches[0].length() or 1)
- finally:
- del sp
- release_cstring(&buf)
-
- def split(self, string, int maxsplit=0):
- """split(string[, maxsplit = 0]) --> list
-
- Split a string by the occurrences of the pattern."""
- cdef char * cstring
- cdef Py_ssize_t size
- cdef int retval
- cdef int pos = 0
- cdef int lookahead = 0
- cdef int num_split = 0
- cdef StringPiece * sp
- cdef StringPiece * matches
- cdef list resultlist = []
- cdef int encoded = 0
- cdef Py_buffer buf
-
- if maxsplit < 0:
- maxsplit = 0
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- matches = new_StringPiece_array(self.groups + 1)
- sp = new StringPiece(cstring, size)
- try:
-
- while True:
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos + lookahead,
- size,
- UNANCHORED,
- matches,
- self.groups + 1)
- if retval == 0:
- break
-
- match_start = matches[0].data() - cstring
- match_end = match_start + matches[0].length()
-
- # If an empty match, just look ahead until you find something
- if match_start == match_end:
- if pos + lookahead == size:
- break
- lookahead += 1
- continue
-
- if encoded:
- resultlist.append(
- char_to_unicode(&sp.data()[pos], match_start - pos))
- else:
- resultlist.append(sp.data()[pos:match_start])
- if self.groups > 0:
- for group in range(self.groups):
- if matches[group + 1].data() == NULL:
- resultlist.append(None)
- else:
- if encoded:
- resultlist.append(char_to_unicode(
- matches[group + 1].data(),
- matches[group + 1].length()))
- else:
- resultlist.append(matches[group + 1].data()[:
- matches[group + 1].length()])
-
- # offset the pos to move to the next point
- pos = match_end
- lookahead = 0
-
- num_split += 1
- if maxsplit and num_split >= maxsplit:
- break
-
- if encoded:
- resultlist.append(
- char_to_unicode(&sp.data()[pos], sp.length() - pos))
- else:
- resultlist.append(sp.data()[pos:])
- finally:
- del sp
- delete_StringPiece_array(matches)
- release_cstring(&buf)
- return resultlist
-
- def sub(self, repl, string, int count=0):
- """sub(repl, string[, count = 0]) --> newstring
-
- Return the string obtained by replacing the leftmost non-overlapping
- occurrences of pattern in string by the replacement repl."""
- cdef int num_repl = 0
- return self._subn(repl, string, count, &num_repl)
-
- def subn(self, repl, string, int count=0):
- """subn(repl, string[, count = 0]) --> (newstring, number of subs)
-
- Return the tuple (new_string, number_of_subs_made) found by replacing
- the leftmost non-overlapping occurrences of pattern with the
- replacement repl."""
- cdef int num_repl = 0
- result = self._subn(repl, string, count, &num_repl)
- return result, num_repl
-
- cdef _subn(self, repl, string, int count, int *num_repl):
- cdef bytes repl_b
- cdef char * cstring
- cdef object result
- cdef Py_ssize_t size
- cdef StringPiece * sp = NULL
- cdef cpp_string * input_str = NULL
- cdef int string_encoded = 0
- cdef int repl_encoded = 0
-
- if callable(repl):
- # This is a callback, so use the custom function
- return self._subn_callback(repl, string, count, num_repl)
-
- repl_b = unicode_to_bytes(repl, &repl_encoded, self.encoded)
- if not repl_encoded and not isinstance(repl, bytes):
- repl_b = bytes(repl) # coerce buffer to bytes object
-
- if count > 1 or (b'\\' if PY2 else <char>b'\\') in repl_b:
- # Limit on number of substitutions or replacement string contains
- # escape sequences; handle with Match.expand() implementation.
- # RE2 does support simple numeric group references \1, \2,
- # but the number of differences with Python behavior is
- # non-trivial.
- return self._subn_expand(repl_b, string, count, num_repl)
- try:
- cstring = repl_b
- size = len(repl_b)
- sp = new StringPiece(cstring, size)
-
- bytestr = unicode_to_bytes(string, &string_encoded, self.encoded)
- if not string_encoded and not isinstance(bytestr, bytes):
- bytestr = bytes(bytestr) # coerce buffer to bytes object
- input_str = new cpp_string(<char *>bytestr, len(bytestr))
- # NB: RE2 treats unmatched groups in repl as empty string;
- # Python raises an error.
- with nogil:
- if count == 0:
- num_repl[0] = GlobalReplace(
- input_str, self.re_pattern[0], sp[0])
- elif count == 1:
- num_repl[0] = Replace(
- input_str, self.re_pattern[0], sp[0])
-
- if string_encoded or (repl_encoded and num_repl[0] > 0):
- result = cpp_to_unicode(input_str[0])
- else:
- result = cpp_to_bytes(input_str[0])
- finally:
- del input_str, sp
- return result
-
- cdef _subn_callback(self, callback, string, int count, int * num_repl):
- # This function is probably the hardest to implement correctly.
- # This is my first attempt, but if anybody has a better solution,
- # please help out.
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int retval
- cdef int prevendpos = -1
- cdef int endpos = 0
- cdef int pos = 0
- cdef int encoded = 0
- cdef StringPiece * sp
- cdef Match m
- cdef bytearray result = bytearray()
- cdef int cpos = 0, upos = 0
-
- if count < 0:
- count = 0
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- sp = new StringPiece(cstring, size)
- try:
- while True:
- m = Match(self, self.groups + 1)
- m.string = string
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- UNANCHORED,
- m.matches,
- self.groups + 1)
- if retval == 0:
- break
-
- endpos = m.matches[0].data() - cstring
- if endpos == prevendpos:
- endpos += 1
- if endpos > size:
- break
- prevendpos = endpos
- result.extend(sp.data()[pos:endpos])
- pos = endpos + m.matches[0].length()
-
- m.encoded = encoded
- m.nmatches = self.groups + 1
- m._make_spans(cstring, size, &cpos, &upos)
- m._init_groups()
- tmp = callback(m)
- if tmp:
- result.extend(tmp.encode('utf8') if encoded else tmp)
- else:
- result.extend(b'')
-
- num_repl[0] += 1
- if count and num_repl[0] >= count:
- break
- result.extend(sp.data()[pos:])
- finally:
- del sp
- release_cstring(&buf)
- return result.decode('utf8') if encoded else bytes(result)
-
- cdef _subn_expand(self, bytes repl, string, int count, int * num_repl):
- """Perform ``count`` substitutions with replacement string and
- Match.expand."""
- cdef char * cstring
- cdef Py_ssize_t size
- cdef Py_buffer buf
- cdef int retval
- cdef int prevendpos = -1
- cdef int endpos = 0
- cdef int pos = 0
- cdef int encoded = 0
- cdef StringPiece * sp
- cdef Match m
- cdef bytearray result = bytearray()
-
- if count < 0:
- count = 0
-
- bytestr = unicode_to_bytes(string, &encoded, self.encoded)
- if pystring_to_cstring(bytestr, &cstring, &size, &buf) == -1:
- raise TypeError('expected string or buffer')
- sp = new StringPiece(cstring, size)
- try:
- while True:
- m = Match(self, self.groups + 1)
- m.string = string
- with nogil:
- retval = self.re_pattern.Match(
- sp[0],
- pos,
- size,
- UNANCHORED,
- m.matches,
- self.groups + 1)
- if retval == 0:
- break
-
- endpos = m.matches[0].data() - cstring
- if endpos == prevendpos:
- endpos += 1
- if endpos > size:
- break
- prevendpos = endpos
- result.extend(sp.data()[pos:endpos])
- pos = endpos + m.matches[0].length()
-
- m.encoded = encoded
- m.nmatches = self.groups + 1
- m._init_groups()
- m._expand(repl, result)
-
- num_repl[0] += 1
- if count and num_repl[0] >= count:
- break
- result.extend(sp.data()[pos:])
- finally:
- del sp
- release_cstring(&buf)
- return result.decode('utf8') if encoded else bytes(result)
-
- def scanner(self, arg):
- return re.compile(self.pattern).scanner(arg)
- # raise NotImplementedError
-
- def _dump_pattern(self):
- cdef cpp_string s = self.re_pattern.pattern()
- if self.encoded:
- return cpp_to_bytes(s).decode('utf8')
- return cpp_to_bytes(s)
-
- def __repr__(self):
- if self.flags == 0:
- return 're2.compile(%r)' % self.pattern
- return 're2.compile(%r, %r)' % (self.pattern, self.flags)
-
- def __reduce__(self):
- return (compile, (self.pattern, self.flags))
-
- def __dealloc__(self):
- del self.re_pattern
-
-
-class PythonRePattern:
- """A wrapper for re.Pattern to support the extra methods defined by re2
- (contains, count)."""
- def __init__(self, pattern, flags=None):
- self._pattern = re.compile(pattern, flags)
- self.pattern = pattern
- self.flags = flags
- self.groupindex = self._pattern.groupindex
- self.groups = self._pattern.groups
-
- def contains(self, string):
- return bool(self._pattern.search(string))
-
- def count(self, string, pos=0, endpos=9223372036854775807):
- return len(self._pattern.findall(string, pos, endpos))
-
- def findall(self, string, pos=0, endpos=9223372036854775807):
- return self._pattern.findall(string, pos, endpos)
-
- def finditer(self, string, pos=0, endpos=9223372036854775807):
- return self._pattern.finditer(string, pos, endpos)
-
- def fullmatch(self, string, pos=0, endpos=9223372036854775807):
- return self._pattern.fullmatch(string, pos, endpos)
-
- def match(self, string, pos=0, endpos=9223372036854775807):
- return self._pattern.match(string, pos, endpos)
-
- def scanner(self, string, pos=0, endpos=9223372036854775807):
- return self._pattern.scanner(string, pos, endpos)
-
- def search(self, string, pos=0, endpos=9223372036854775807):
- return self._pattern.search(string, pos, endpos)
-
- def split(self, string, maxsplit=0):
- return self._pattern.split(string, maxsplit)
-
- def sub(self, repl, string, count=0):
- return self._pattern.sub(repl, string, count)
-
- def subn(self, repl, string, count=0):
- return self._pattern.subn(repl, string, count)
-
- def __repr__(self):
- return repr(self._pattern)
-
- def __reduce__(self):
- return (self, (self.pattern, self.flags))
diff --git a/contrib/python/pyre2/py3/src/re2.pyx b/contrib/python/pyre2/py3/src/re2.pyx
deleted file mode 100644
index c48101426f..0000000000
--- a/contrib/python/pyre2/py3/src/re2.pyx
+++ /dev/null
@@ -1,458 +0,0 @@
-# cython: infer_types(False)
-r"""Regular expressions using Google's RE2 engine.
-
-Compared to Python's ``re``, the RE2 engine compiles regular expressions to
-deterministic finite automata, which guarantees linear-time behavior.
-
-Intended as a drop-in replacement for ``re``. Unicode is supported by encoding
-to UTF-8, and bytes strings are treated as UTF-8 when the UNICODE flag is given.
-For best performance, work with UTF-8 encoded bytes strings.
-
-Regular expressions that are not compatible with RE2 are processed with
-fallback to ``re``. Examples of features not supported by RE2:
-
- - lookahead assertions ``(?!...)``
- - backreferences (``\\n`` in search pattern)
- - \W and \S not supported inside character classes
-
-On the other hand, unicode character classes are supported (e.g., ``\p{Greek}``).
-Syntax reference: https://github.com/google/re2/wiki/Syntax
-
-What follows is a reference for the regular expression syntax supported by this
-module (i.e., without requiring fallback to `re`).
-
-Regular expressions can contain both special and ordinary characters.
-Most ordinary characters, like "A", "a", or "0", are the simplest
-regular expressions; they simply match themselves.
-
-The special characters are::
-
- "." Matches any character except a newline.
- "^" Matches the start of the string.
- "$" Matches the end of the string or just before the newline at
- the end of the string.
- "*" Matches 0 or more (greedy) repetitions of the preceding RE.
- Greedy means that it will match as many repetitions as possible.
- "+" Matches 1 or more (greedy) repetitions of the preceding RE.
- "?" Matches 0 or 1 (greedy) of the preceding RE.
- *?,+?,?? Non-greedy versions of the previous three special characters.
- {m,n} Matches from m to n repetitions of the preceding RE.
- {m,n}? Non-greedy version of the above.
- "\\" Either escapes special characters or signals a special sequence.
- [] Indicates a set of characters.
- A "^" as the first character indicates a complementing set.
- "|" A|B, creates an RE that will match either A or B.
- (...) Matches the RE inside the parentheses.
- The contents can be retrieved or matched later in the string.
- (?:...) Non-grouping version of regular parentheses.
- (?imsux) Set the I, M, S, U, or X flag for the RE (see below).
-
-The special sequences consist of "\\" and a character from the list
-below. If the ordinary character is not on the list, then the
-resulting RE will match the second character::
-
- \A Matches only at the start of the string.
- \Z Matches only at the end of the string.
- \b Matches the empty string, but only at the start or end of a word.
- \B Matches the empty string, but not at the start or end of a word.
- \d Matches any decimal digit.
- \D Matches any non-digit character.
- \s Matches any whitespace character.
- \S Matches any non-whitespace character.
- \w Matches any alphanumeric character.
- \W Matches the complement of \w.
- \\ Matches a literal backslash.
- \pN Unicode character class (one-letter name)
- \p{Greek} Unicode character class
- \PN negated Unicode character class (one-letter name)
- \P{Greek} negated Unicode character class
-
-This module exports the following functions::
-
- count Count all occurrences of a pattern in a string.
- match Match a regular expression pattern to the beginning of a string.
- fullmatch Match a regular expression pattern to all of a string.
- search Search a string for a pattern and return Match object.
- contains Same as search, but only return bool.
- sub Substitute occurrences of a pattern found in a string.
- subn Same as sub, but also return the number of substitutions made.
- split Split a string by the occurrences of a pattern.
- findall Find all occurrences of a pattern in a string.
- finditer Return an iterator yielding a match object for each match.
- compile Compile a pattern into a RegexObject.
- purge Clear the regular expression cache.
- escape Backslash all non-alphanumerics in a string.
-
-Some of the functions in this module takes flags as optional parameters::
-
- A ASCII Make \w, \W, \b, \B, \d, \D match the corresponding ASCII
- character categories (rather than the whole Unicode
- categories, which is the default).
- I IGNORECASE Perform case-insensitive matching.
- M MULTILINE "^" matches the beginning of lines (after a newline)
- as well as the string.
- "$" matches the end of lines (before a newline) as well
- as the end of the string.
- S DOTALL "." matches any character at all, including the newline.
- X VERBOSE Ignore whitespace and comments for nicer looking RE's.
- U UNICODE Enable Unicode character classes and make \w, \W, \b, \B,
- Unicode-aware (default for unicode patterns).
-
-This module also defines an exception 'RegexError' (also available under the
-alias 'error').
-
-"""
-
-include "includes.pxi"
-
-import re
-import sys
-import warnings
-from re import error as RegexError
-
-error = re.error
-
-# Import re flags to be compatible.
-I, M, S, U, X, L = re.I, re.M, re.S, re.U, re.X, re.L
-IGNORECASE = re.IGNORECASE
-MULTILINE = re.MULTILINE
-DOTALL = re.DOTALL
-UNICODE = re.UNICODE
-VERBOSE = re.VERBOSE
-LOCALE = re.LOCALE
-DEBUG = re.DEBUG
-ASCII = 256 # Python 3
-
-FALLBACK_QUIETLY = 0
-FALLBACK_WARNING = 1
-FALLBACK_EXCEPTION = 2
-
-VERSION = (0, 2, 23)
-VERSION_HEX = 0x000217
-
-cdef int _I = I, _M = M, _S = S, _U = U, _X = X, _L = L
-cdef int current_notification = FALLBACK_QUIETLY
-cdef bint PY2 = PY_MAJOR_VERSION == 2
-
-# Type of compiled re object from Python stdlib
-SREPattern = type(re.compile(''))
-
-_cache = {}
-_cache_repl = {}
-
-_MAXCACHE = 100
-
-
-include "compile.pxi"
-include "pattern.pxi"
-include "match.pxi"
-
-
-def purge():
- """Clear the regular expression caches."""
- _cache.clear()
- _cache_repl.clear()
-
-
-def search(pattern, string, int flags=0):
- """Scan through string looking for a match to the pattern, returning
- a ``Match`` object or none if no match was found."""
- return compile(pattern, flags).search(string)
-
-
-def match(pattern, string, int flags=0):
- """Try to apply the pattern at the start of the string, returning
- a ``Match`` object, or ``None`` if no match was found."""
- return compile(pattern, flags).match(string)
-
-
-def fullmatch(pattern, string, int flags=0):
- """Try to apply the pattern to the entire string, returning
- a ``Match`` object, or ``None`` if no match was found."""
- return compile(pattern, flags).fullmatch(string)
-
-
-def contains(pattern, string, int flags=0):
- """Scan through string looking for a match to the pattern, returning
- True or False."""
- return compile(pattern, flags).contains(string)
-
-
-def finditer(pattern, string, int flags=0):
- """Yield all non-overlapping matches in the string.
-
- For each match, the iterator returns a ``Match`` object.
- Empty matches are included in the result."""
- return compile(pattern, flags).finditer(string)
-
-
-def findall(pattern, string, int flags=0):
- """Return a list of all non-overlapping matches in the string.
-
- Each match is represented as a string or a tuple (when there are two ore
- more groups). Empty matches are included in the result."""
- return compile(pattern, flags).findall(string)
-
-
-def count(pattern, string, int flags=0):
- """Return number of non-overlapping matches in the string.
-
- Empty matches are included in the count."""
- return compile(pattern, flags).count(string)
-
-
-def split(pattern, string, int maxsplit=0, int flags=0):
- """Split the source string by the occurrences of the pattern,
- returning a list containing the resulting substrings."""
- return compile(pattern, flags).split(string, maxsplit)
-
-
-def sub(pattern, repl, string, int count=0, int flags=0):
- """Return the string obtained by replacing the leftmost
- non-overlapping occurrences of the pattern in string by the
- replacement ``repl``. ``repl`` can be either a string or a callable;
- if a string, backslash escapes in it are processed. If it is
- a callable, it's passed the ``Match`` object and must return
- a replacement string to be used."""
- return compile(pattern, flags).sub(repl, string, count)
-
-
-def subn(pattern, repl, string, int count=0, int flags=0):
- """Return a 2-tuple containing ``(new_string, number)``.
- new_string is the string obtained by replacing the leftmost
- non-overlapping occurrences of the pattern in the source
- string by the replacement ``repl``. ``number`` is the number of
- substitutions that were made. ``repl`` can be either a string or a
- callable; if a string, backslash escapes in it are processed.
- If it is a callable, it's passed the ``Match`` object and must
- return a replacement string to be used."""
- return compile(pattern, flags).subn(repl, string, count)
-
-
-def escape(pattern):
- """Escape all non-alphanumeric characters in pattern."""
- cdef bint uni = isinstance(pattern, unicode)
- cdef list s
- if PY2 or uni:
- s = list(pattern)
- else:
- s = [bytes([c]) for c in pattern]
- for i in range(len(pattern)):
- # c = pattern[i]
- c = s[i]
- if ord(c) < 0x80 and not c.isalnum():
- if uni:
- if c == u'\000':
- s[i] = u'\\000'
- else:
- s[i] = u"\\" + c
- else:
- if c == b'\000':
- s[i] = b'\\000'
- else:
- s[i] = b'\\' + c
- return u''.join(s) if uni else b''.join(s)
-
-
-class BackreferencesException(Exception):
- """Search pattern contains backreferences."""
- pass
-
-
-class CharClassProblemException(Exception):
- """Search pattern contains unsupported character class."""
- pass
-
-
-def set_fallback_notification(level):
- """Set the fallback notification to a level; one of:
- FALLBACK_QUIETLY
- FALLBACK_WARNING
- FALLBACK_EXCEPTION
- """
- global current_notification
- level = int(level)
- if level < 0 or level > 2:
- raise ValueError("This function expects a valid notification level.")
- current_notification = level
-
-
-cdef bint ishex(unsigned char c):
- """Test whether ``c`` is in ``[0-9a-fA-F]``"""
- return (b'0' <= c <= b'9' or b'a' <= c <= b'f' or b'A' <= c <= b'F')
-
-
-cdef bint isoct(unsigned char c):
- """Test whether ``c`` is in ``[0-7]``"""
- return b'0' <= c <= b'7'
-
-
-cdef bint isdigit(unsigned char c):
- """Test whether ``c`` is in ``[0-9]``"""
- return b'0' <= c <= b'9'
-
-
-cdef bint isident(unsigned char c):
- """Test whether ``c`` is in ``[a-zA-Z0-9_]``"""
- return (b'a' <= c <= b'z' or b'A' <= c <= b'Z'
- or b'0' <= c <= b'9' or c == b'_')
-
-
-cdef inline bytes cpp_to_bytes(cpp_string input):
- """Convert from a std::string object to a python string."""
- # By taking the slice we go to the right size,
- # despite spurious or missing null characters.
- return input.data()[:input.length()]
-
-
-cdef inline unicode cpp_to_unicode(cpp_string input):
- """Convert a std::string object to a unicode string."""
- return cpython.unicode.PyUnicode_DecodeUTF8(
- input.data(), input.length(), 'strict')
-
-
-cdef inline unicode char_to_unicode(const char * input, int length):
- """Convert a C string to a unicode string."""
- return cpython.unicode.PyUnicode_DecodeUTF8(input, length, 'strict')
-
-
-cdef inline unicode_to_bytes(object pystring, int * encoded,
- int checkotherencoding):
- """Convert a unicode string to a utf8 bytes object, if necessary.
-
- If pystring is a bytes string or a buffer, return unchanged.
- If checkotherencoding is 0 or 1 and using Python 3, raise an error
- if its truth value is not equal to that of encoded.
- encoded is set to 1 if encoded string can be treated as ASCII,
- and 2 if it contains multibyte unicode characters."""
- if cpython.unicode.PyUnicode_Check(pystring):
- origlen = len(pystring)
- pystring = pystring.encode('utf8')
- encoded[0] = 1 if origlen == len(pystring) else 2
- else:
- encoded[0] = 0
- if not PY2 and checkotherencoding > 0 and not encoded[0]:
- raise TypeError("can't use a string pattern on a bytes-like object")
- elif not PY2 and checkotherencoding == 0 and encoded[0]:
- raise TypeError("can't use a bytes pattern on a string-like object")
- return pystring
-
-
-cdef inline int pystring_to_cstring(
- object pystring, char ** cstring, Py_ssize_t * size,
- Py_buffer * buf):
- """Get a pointer from bytes/buffer object ``pystring``.
-
- On success, return 0, and set ``cstring``, ``size``, and ``buf``."""
- cdef int result = -1
- cstring[0] = NULL
- size[0] = 0
- if PyObject_CheckBuffer(pystring) == 1: # new-style Buffer interface
- result = PyObject_GetBuffer(pystring, buf, PyBUF_SIMPLE)
- if result == 0:
- cstring[0] = <char *>buf.buf
- size[0] = buf.len
- return result
-
-
-cdef inline void release_cstring(Py_buffer *buf):
- """Release buffer if necessary."""
- if not PY2:
- PyBuffer_Release(buf)
-
-
-cdef utf8indices(char * cstring, int size, int *pos, int *endpos):
- """Convert unicode indices ``pos`` and ``endpos`` to UTF-8 indices.
-
- If the indices are out of range, leave them unchanged."""
- cdef unsigned char * data = <unsigned char *>cstring
- cdef int newpos = pos[0], newendpos = -1
- cdef int cpos = 0, upos = 0
- while cpos < size:
- if data[cpos] < 0x80:
- cpos += 1
- upos += 1
- elif data[cpos] < 0xe0:
- cpos += 2
- upos += 1
- elif data[cpos] < 0xf0:
- cpos += 3
- upos += 1
- else:
- cpos += 4
- upos += 1
- # wide unicode chars get 2 unichars when Python <3.3 is compiled
- # with --enable-unicode=ucs2
- emit_if_narrow_unicode()
- upos += 1
- emit_endif()
-
- if upos == pos[0]:
- newpos = cpos
- if endpos[0] == -1:
- break
- elif upos == endpos[0]:
- newendpos = cpos
- break
- pos[0] = newpos
- endpos[0] = newendpos
-
-
-cdef void unicodeindices(map[int, int] &positions,
- char * cstring, int size, int * cpos, int * upos):
- """Convert UTF-8 byte indices to unicode indices."""
- cdef unsigned char * s = <unsigned char *>cstring
- cdef map[int, int].iterator it = positions.begin()
-
- if dereference(it).first == -1:
- dereference(it).second = -1
- postincrement(it)
- if it == positions.end():
- return
- if dereference(it).first == cpos[0]:
- dereference(it).second = upos[0]
- postincrement(it)
- if it == positions.end():
- return
-
- while cpos[0] < size:
- if s[cpos[0]] < 0x80:
- cpos[0] += 1
- upos[0] += 1
- elif s[cpos[0]] < 0xe0:
- cpos[0] += 2
- upos[0] += 1
- elif s[cpos[0]] < 0xf0:
- cpos[0] += 3
- upos[0] += 1
- else:
- cpos[0] += 4
- upos[0] += 1
- # wide unicode chars get 2 unichars when Python <3.3 is compiled
- # with --enable-unicode=ucs2
- emit_if_narrow_unicode()
- upos[0] += 1
- emit_endif()
-
- if dereference(it).first == cpos[0]:
- dereference(it).second = upos[0]
- postincrement(it)
- if it == positions.end():
- break
-
-
-__all__ = [
- # exceptions
- 'BackreferencesException', 'CharClassProblemException',
- 'RegexError', 'error',
- # constants
- 'FALLBACK_EXCEPTION', 'FALLBACK_QUIETLY', 'FALLBACK_WARNING', 'DEBUG',
- 'S', 'DOTALL', 'I', 'IGNORECASE', 'L', 'LOCALE', 'M', 'MULTILINE',
- 'U', 'UNICODE', 'X', 'VERBOSE', 'VERSION', 'VERSION_HEX',
- # classes
- 'Match', 'Pattern', 'SREPattern',
- # functions
- 'compile', 'count', 'escape', 'findall', 'finditer', 'fullmatch',
- 'match', 'purge', 'search', 'split', 'sub', 'subn',
- 'set_fallback_notification',
- ]
diff --git a/contrib/python/pyre2/py3/tests/test_charliterals.txt b/contrib/python/pyre2/py3/tests/test_charliterals.txt
deleted file mode 100644
index 2eaea128a3..0000000000
--- a/contrib/python/pyre2/py3/tests/test_charliterals.txt
+++ /dev/null
@@ -1,47 +0,0 @@
- >>> import re2 as re
- >>> import warnings
- >>> warnings.filterwarnings('ignore', category=DeprecationWarning)
-
-character literals:
-
- >>> i = 126
- >>> re.compile(r"\%03o" % i)
- re2.compile('\\176')
- >>> re.compile(r"\%03o" % i)._dump_pattern()
- '\\176'
- >>> re.match(r"\%03o" % i, chr(i)) is None
- False
- >>> re.match(r"\%03o0" % i, chr(i) + "0") is None
- False
- >>> re.match(r"\%03o8" % i, chr(i) + "8") is None
- False
- >>> re.match(r"\x%02x" % i, chr(i)) is None
- False
- >>> re.match(r"\x%02x0" % i, chr(i) + "0") is None
- False
- >>> re.match(r"\x%02xz" % i, chr(i) + "z") is None
- False
- >>> re.match("\911", "") # doctest: +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
- Traceback (most recent call last):
- ...
- re.error: invalid escape sequence: \9
-
-character class literals:
-
- >>> re.match(r"[\%03o]" % i, chr(i)) is None
- False
- >>> re.match(r"[\%03o0]" % i, chr(i) + "0") is None
- False
- >>> re.match(r"[\%03o8]" % i, chr(i) + "8") is None
- False
- >>> re.match(r"[\x%02x]" % i, chr(i)) is None
- False
- >>> re.match(r"[\x%02x0]" % i, chr(i) + "0") is None
- False
- >>> re.match(r"[\x%02xz]" % i, chr(i) + "z") is None
- False
- >>> re.match("[\911]", "") # doctest: +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
- Traceback (most recent call last):
- ...
- re.error: invalid escape sequence: \9
-
diff --git a/contrib/python/pyre2/py3/tests/test_count.txt b/contrib/python/pyre2/py3/tests/test_count.txt
deleted file mode 100644
index ce3525adc5..0000000000
--- a/contrib/python/pyre2/py3/tests/test_count.txt
+++ /dev/null
@@ -1,40 +0,0 @@
-count tests
-===========
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-This one is from http://docs.python.org/library/re.html?#finding-all-adverbs:
-
- >>> re2.count(r"\w+ly", "He was carefully disguised but captured quickly by police.")
- 2
-
-Groups should not affect count():
-
- >>> re2.count(r"(\w+)=(\d+)", "foo=1,foo=2")
- 2
- >>> re2.count(r"(\w)\w", "fx")
- 1
-
-Zero matches:
-
- >>> re2.count("(f)", "gggg")
- 0
-
-A pattern matching an empty string:
-
- >>> re2.count(".*", "foo")
- 2
-
- >>> re2.count("", "foo")
- 4
-
-contains tests
-==============
-
- >>> re2.contains('a', 'bbabb')
- True
- >>> re2.contains('a', 'bbbbb')
- False
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_emptygroups.txt b/contrib/python/pyre2/py3/tests/test_emptygroups.txt
deleted file mode 100644
index 424c8ba25e..0000000000
--- a/contrib/python/pyre2/py3/tests/test_emptygroups.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-Empty/unused groups
-===================
-
- >>> import re
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-Unused vs. empty group:
-
- >>> re.search( '(foo)?((.*).)(bar)?', 'a').groups()
- (None, 'a', '', None)
- >>> re2.search('(foo)?((.*).)(bar)?', 'a').groups()
- (None, 'a', '', None)
-
- >>> re.search(r'((.*)?.)', 'a').groups()
- ('a', '')
- >>> re2.search(r'((.*)?.)', 'a').groups()
- ('a', '')
- >>> re.search(r'((.*)+.)', 'a').groups()
- ('a', '')
- >>> re2.search(r'((.*)+.)', 'a').groups()
- ('a', '')
-
-The following show different behavior for re and re2:
-
- >>> re.search(r'((.*)*.)', 'a').groups()
- ('a', '')
- >>> re2.search(r'((.*)*.)', 'a').groups()
- ('a', None)
-
- >>> re.search(r'((.*)*.)', 'Hello').groups()
- ('Hello', '')
- >>> re2.search(r'((.*)*.)', 'Hello').groups()
- ('Hello', 'Hell')
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_findall.txt b/contrib/python/pyre2/py3/tests/test_findall.txt
deleted file mode 100644
index c753b936df..0000000000
--- a/contrib/python/pyre2/py3/tests/test_findall.txt
+++ /dev/null
@@ -1,42 +0,0 @@
-findall tests
-=============
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-This one is from http://docs.python.org/library/re.html?#finding-all-adverbs:
-
- >>> re2.findall(r"\w+ly", "He was carefully disguised but captured quickly by police.")
- ['carefully', 'quickly']
-
-This one makes sure all groups are found:
-
- >>> re2.findall(r"(\w+)=(\d+)", "foo=1,foo=2")
- [('foo', '1'), ('foo', '2')]
-
-When there's only one matched group, it should not be returned in a tuple:
-
- >>> re2.findall(r"(\w)\w", "fx")
- ['f']
-
-Zero matches is an empty list:
-
- >>> re2.findall("(f)", "gggg")
- []
-
-If pattern matches an empty string, do it only once at the end:
-
- >>> re2.findall(".*", "foo")
- ['foo', '']
-
- >>> re2.findall("", "foo")
- ['', '', '', '']
-
-
- >>> import re
- >>> re.findall(r'\b', 'The quick brown fox jumped over the lazy dog')
- ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
- >>> re2.findall(r'\b', 'The quick brown fox jumped over the lazy dog')
- ['', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '']
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_finditer.txt b/contrib/python/pyre2/py3/tests/test_finditer.txt
deleted file mode 100644
index 3d60d199c7..0000000000
--- a/contrib/python/pyre2/py3/tests/test_finditer.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-Simple tests for the ``finditer`` function.
-===========================================
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
- >>> with open('tests/cnn_homepage.dat') as tmp:
- ... data = tmp.read()
- >>> len(list(re2.finditer(r'\w+', data)))
- 14230
-
- >>> [m.group(1) for m in re2.finditer(r'\n#hdr-editions(.*?)\n', data)]
- [' a { text-decoration:none; }', ' li { padding:0 10px; }', ' ul li.no-pad-left span { font-size:12px; }']
-
- >>> [m.group(1) for m in re2.finditer(r'^#hdr-editions(.*?)$',
- ... data, re2.M)]
- [' a { text-decoration:none; }', ' li { padding:0 10px; }', ' ul li.no-pad-left span { font-size:12px; }']
-
- >>> for a in re2.finditer(r'\b', 'foo bar zed'): print(a)
- <re2.Match object; span=(0, 0), match=''>
- <re2.Match object; span=(3, 3), match=''>
- <re2.Match object; span=(4, 4), match=''>
- <re2.Match object; span=(7, 7), match=''>
- <re2.Match object; span=(8, 8), match=''>
- <re2.Match object; span=(11, 11), match=''>
-
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_match_expand.txt b/contrib/python/pyre2/py3/tests/test_match_expand.txt
deleted file mode 100644
index b3d5652c76..0000000000
--- a/contrib/python/pyre2/py3/tests/test_match_expand.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-Match Expand Tests
-==================
-
-Match objects have an .expand() method which allows them to
-expand templates as if the .sub() method was called on the pattern.
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> m = re2.match("(\\w+) (\\w+)\\W+(?P<title>\\w+)", "Isaac Newton, physicist")
- >>> m.expand("\\2, \\1")
- 'Newton, Isaac'
- >>> m.expand("\\1 \\g<title>")
- 'Isaac physicist'
- >>> m.expand("\\2, \\1 \\2")
- 'Newton, Isaac Newton'
- >>> m.expand("\\3")
- 'physicist'
- >>> m.expand("\\1 \\g<foo>") # doctest: +IGNORE_EXCEPTION_DETAIL +ELLIPSIS
- Traceback (most recent call last):
- ...
- IndexError: no such group 'foo'; available groups: ['title']
- >>> m.expand("\\0")
- '\x00'
- >>> m.expand("\01")
- '\x01'
- >>> m.expand('\t\n\x0b\r\x0c\x07\x08\\B\\Z\x07\\A\\w\\W\\s\\S\\d\\D')
- '\t\n\x0b\r\x0c\x07\x08\\B\\Z\x07\\A\\w\\W\\s\\S\\d\\D'
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_mmap.txt b/contrib/python/pyre2/py3/tests/test_mmap.txt
deleted file mode 100644
index 12ffa97498..0000000000
--- a/contrib/python/pyre2/py3/tests/test_mmap.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-
-Testing re2 on buffer object
-============================
-
- >>> import re2
- >>> import mmap
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
- >>> tmp = open("tests/cnn_homepage.dat", "rb+")
- >>> data = mmap.mmap(tmp.fileno(), 0)
-
- >>> len(list(re2.finditer(b'\\w+', data)))
- 14230
-
- >>> data.close()
- >>> tmp.close()
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_namedgroups.txt b/contrib/python/pyre2/py3/tests/test_namedgroups.txt
deleted file mode 100644
index 70f561a39f..0000000000
--- a/contrib/python/pyre2/py3/tests/test_namedgroups.txt
+++ /dev/null
@@ -1,56 +0,0 @@
-Testing some aspects of named groups
-=================================================
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
- >>> m = re2.match(r"(?P<first_name>\w+) (?P<last_name>\w+)", "Malcolm Reynolds")
- >>> m.start("first_name")
- 0
- >>> m.start("last_name")
- 8
-
- >>> m.span("last_name")
- (8, 16)
- >>> m.regs
- ((0, 16), (0, 7), (8, 16))
-
- >>> m = re2.match(u"(?P<first_name>\\w+) (?P<last_name>\\w+)", u"Malcolm Reynolds")
- >>> m.start(u"first_name")
- 0
- >>> m.start(u"last_name")
- 8
-
- >>> m.span(u"last_name")
- (8, 16)
- >>> m.regs
- ((0, 16), (0, 7), (8, 16))
-
-Compare patterns with and without unicode
-
- >>> pattern = re2.compile(br"(?P<first_name>\w+) (?P<last_name>\w+)")
- >>> print(pattern._dump_pattern().decode('utf8'))
- (?P<first_name>\w+) (?P<last_name>\w+)
- >>> pattern = re2.compile(u"(?P<first_name>\\w+) (?P<last_name>\\w+)",
- ... re2.UNICODE)
- >>> print(pattern._dump_pattern())
- (?P<first_name>[_\p{L}\p{Nd}]+) (?P<last_name>[_\p{L}\p{Nd}]+)
-
-Make sure positions are converted properly for unicode
-
- >>> m = pattern.match(
- ... u'\u05d9\u05e9\u05e8\u05d0\u05dc \u05e6\u05d3\u05d5\u05e7')
- >>> m.start(u"first_name")
- 0
- >>> m.start(u"last_name")
- 6
- >>> m.end(u"last_name")
- 10
- >>> m.regs
- ((0, 10), (0, 5), (6, 10))
- >>> m.span(2)
- (6, 10)
- >>> m.span(u"last_name")
- (6, 10)
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_pattern.txt b/contrib/python/pyre2/py3/tests/test_pattern.txt
deleted file mode 100644
index aab47359a2..0000000000
--- a/contrib/python/pyre2/py3/tests/test_pattern.txt
+++ /dev/null
@@ -1,12 +0,0 @@
-pattern tests
-=============
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
-
-We should be able to get back what we put in.
-
- >>> re2.compile("(foo|b[a]r?)").pattern
- '(foo|b[a]r?)'
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_search.txt b/contrib/python/pyre2/py3/tests/test_search.txt
deleted file mode 100644
index 9c1e18f08c..0000000000
--- a/contrib/python/pyre2/py3/tests/test_search.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-These are simple tests of the ``search`` function
-=================================================
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> import warnings
- >>> warnings.filterwarnings('ignore', category=DeprecationWarning)
-
- >>> re2.search("((?:[01]?\d{1,2}|2[0-4]\d|25[0-5])\.){3}(?:[01]?\d{1,2}|2[0-4]\d|25[0-5])", "hello 28.224.2.1 test").group()
- '28.224.2.1'
-
- >>> re2.search("(\d{3})\D?(\d{3})\D?(\d{4})", "800-555-1212").groups()
- ('800', '555', '1212')
-
- >>> input = 'a' * 999
- >>> len(re2.search('(?:a{1000})?a{999}', input).group())
- 999
-
- >>> with open('tests/cnn_homepage.dat') as tmp:
- ... data = tmp.read()
- >>> re2.search(r'\n#hdr-editions(.*?)\n', data).groups()
- (' a { text-decoration:none; }',)
-
-Verify some sanity checks
-
- >>> re2.compile(r'x').search('x', 2000)
- >>> re2.compile(r'x').search('x', 1, -300)
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_split.txt b/contrib/python/pyre2/py3/tests/test_split.txt
deleted file mode 100644
index a3e44bc605..0000000000
--- a/contrib/python/pyre2/py3/tests/test_split.txt
+++ /dev/null
@@ -1,17 +0,0 @@
-Split tests
-===========
-
-This one tests to make sure that unicode / utf8 data is parsed correctly.
-
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> a = u'\u6211\u5f88\u597d, \u4f60\u5462?'
-
- >>> re2.split(u' ', a) == [u'\u6211\u5f88\u597d,', u'\u4f60\u5462?']
- True
- >>> re2.split(b' ', a.encode('utf8')) == [
- ... b'\xe6\x88\x91\xe5\xbe\x88\xe5\xa5\xbd,',
- ... b'\xe4\xbd\xa0\xe5\x91\xa2?']
- True
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/tests/test_sub.txt b/contrib/python/pyre2/py3/tests/test_sub.txt
deleted file mode 100644
index b41dd30d28..0000000000
--- a/contrib/python/pyre2/py3/tests/test_sub.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-Tests of substitution
-=====================
-
-This first test is just looking to replace things between parentheses
-with an empty string.
-
-
- >>> import hashlib
- >>> import gzip
- >>> import re2
- >>> re2.set_fallback_notification(re2.FALLBACK_EXCEPTION)
- >>> import warnings
- >>> warnings.filterwarnings('ignore', category=DeprecationWarning)
-
- >>> with gzip.open('tests/wikipages.xml.gz', 'rb') as tmp:
- ... data = tmp.read()
- >>> print(hashlib.md5(re2.sub(b'\(.*?\)', b'', data)).hexdigest())
- b7a469f55ab76cd5887c81dbb0cfe6d3
-
- >>> re2.set_fallback_notification(re2.FALLBACK_QUIETLY)
-
-Issue #26 re2.sub replacements with a match of "(.*)" hangs forever
-
- >>> re2.sub('(.*)', r'\1;replacement', 'original')
- 'original;replacement;replacement'
-
- >>> re2.sub('(.*)', lambda x: x.group() + ';replacement', 'original')
- 'original;replacement;replacement'
-
- >>> re2.subn("b*", lambda x: "X", "xyz", 4)
- ('XxXyXzX', 4)
diff --git a/contrib/python/pyre2/py3/tests/test_unicode.txt b/contrib/python/pyre2/py3/tests/test_unicode.txt
deleted file mode 100644
index 71d497b80d..0000000000
--- a/contrib/python/pyre2/py3/tests/test_unicode.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Here are some tests to make sure that utf-8 works
-=================================================
-
- >>> import sys
- >>> import re2 as re
- >>> re.set_fallback_notification(re.FALLBACK_EXCEPTION)
- >>> a = u'\u6211\u5f88\u597d'
- >>> c = re.compile(a[0])
- >>> c.search(a).group() == u'\u6211'
- True
-
-Test unicode stickyness
-
- >>> re.sub(u'x', u'y', u'x') == u'y'
- True
- >>> re.sub(r'x', 'y', 'x') == 'y'
- True
- >>> re.findall('.', 'x') == ['x']
- True
- >>> re.findall(u'.', u'x') == [u'x']
- True
- >>> re.split(',', '1,2,3') == ['1', '2', '3']
- True
- >>> re.split(u',', u'1,2,3') == [u'1', u'2', u'3']
- True
- >>> re.search('(\\d)', '1').group(1) == '1'
- True
- >>> re.search(u'(\\d)', u'1').group(1) == u'1'
- True
-
-Test unicode character groups
-
- >>> re.search(u'\\d', u'\u0661', re.UNICODE).group(0) == u'\u0661'
- True
- >>> int(re.search(u'\\d', u'\u0661', re.UNICODE).group(0)) == 1
- True
- >>> (re.search(u'\\w', u'\u0401') is None) == (sys.version_info[0] == 2)
- True
- >>> re.search(u'\\w', u'\u0401', re.UNICODE).group(0) == u'\u0401'
- True
- >>> re.search(u'\\s', u'\u1680', re.UNICODE).group(0) == u'\u1680'
- True
- >>> re.findall(r'[\s\d\w]', 'hey 123', re.UNICODE) == ['h', 'e', 'y', ' ', '1', '2', '3']
- True
- >>> re.search(u'\\D', u'\u0661x', re.UNICODE).group(0) == u'x'
- True
- >>> re.search(u'\\W', u'\u0401!', re.UNICODE).group(0) == u'!'
- True
- >>> re.search(u'\\S', u'\u1680x', re.UNICODE).group(0) == u'x'
- True
- >>> re.set_fallback_notification(re.FALLBACK_QUIETLY)
- >>> re.search(u'[\\W]', u'\u0401!', re.UNICODE).group(0) == u'!'
- True
- >>> re.search(u'[\\S]', u'\u1680x', re.UNICODE).group(0) == u'x'
- True
- >>> re.set_fallback_notification(re.FALLBACK_EXCEPTION)
-
-
-Positions are translated transparently between unicode and UTF-8
-
- >>> re.search(u' (.)', u'\U0001d200xxx\u1234 x').span(1)
- (6, 7)
- >>> re.search(b' (.)', u'\U0001d200xxx\u1234 x'.encode('utf-8')).span(1)
- (11, 12)
- >>> re.compile(u'x').findall(u'\u1234x', 1, 2) == [u'x']
- True
- >>> data = u'\U0001d200xxx\u1234 x'
- >>> re.search(u' (.)', data).string == data
- True
-
- >>> re.set_fallback_notification(re.FALLBACK_QUIETLY)
diff --git a/contrib/python/pyre2/py3/ya.make b/contrib/python/pyre2/py3/ya.make
deleted file mode 100644
index 920808ff57..0000000000
--- a/contrib/python/pyre2/py3/ya.make
+++ /dev/null
@@ -1,39 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(0.3.6)
-
-LICENSE(BSD-3-Clause)
-
-PEERDIR(
- contrib/libs/re2
-)
-
-ADDINCL(
- contrib/python/pyre2/py3/src
-)
-
-NO_COMPILER_WARNINGS()
-
-NO_LINT()
-
-SRCDIR(contrib/python/pyre2/py3/src)
-
-PY_SRCS(
- TOP_LEVEL
- CYTHON_CPP
- re2.pyx
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/pyre2/py3/
- .dist-info/METADATA
- .dist-info/top_level.txt
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- tests
-)
diff --git a/contrib/python/pyre2/ya.make b/contrib/python/pyre2/ya.make
deleted file mode 100644
index 52c15dd0c8..0000000000
--- a/contrib/python/pyre2/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/pyre2/py2)
-ELSE()
- PEERDIR(contrib/python/pyre2/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- py2
- py3
-)
diff --git a/contrib/python/python-magic/py2/LICENSE b/contrib/python/python-magic/py2/LICENSE
deleted file mode 100644
index b8ca4b96dd..0000000000
--- a/contrib/python/python-magic/py2/LICENSE
+++ /dev/null
@@ -1,58 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2001-2014 Adam Hupp
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-====
-
-Portions of this package (magic/compat.py and test/libmagic_test.py)
-are distributed under the following copyright notice:
-
-
-$File: LEGAL.NOTICE,v 1.15 2006/05/03 18:48:33 christos Exp $
-Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.
-Software written by Ian F. Darwin and others;
-maintained 1994- Christos Zoulas.
-
-This software is not subject to any export provision of the United States
-Department of Commerce, and may be exported to any country or planet.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
- notice immediately at the beginning of the file, without modification,
- this list of conditions, and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
diff --git a/contrib/python/python-magic/py2/README.md b/contrib/python/python-magic/py2/README.md
deleted file mode 100644
index 9eb70e8a30..0000000000
--- a/contrib/python/python-magic/py2/README.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# python-magic
-[![PyPI version](https://badge.fury.io/py/python-magic.svg)](https://badge.fury.io/py/python-magic)
-[![Build Status](https://travis-ci.org/ahupp/python-magic.svg?branch=master)](https://travis-ci.org/ahupp/python-magic) [![Join the chat at https://gitter.im/ahupp/python-magic](https://badges.gitter.im/ahupp/python-magic.svg)](https://gitter.im/ahupp/python-magic?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-python-magic is a Python interface to the libmagic file type
-identification library. libmagic identifies file types by checking
-their headers according to a predefined list of file types. This
-functionality is exposed to the command line by the Unix command
-`file`.
-
-## Usage
-
-```python
->>> import magic
->>> magic.from_file("testdata/test.pdf")
-'PDF document, version 1.2'
-# recommend using at least the first 2048 bytes, as less can produce incorrect identification
->>> magic.from_buffer(open("testdata/test.pdf", "rb").read(2048))
-'PDF document, version 1.2'
->>> magic.from_file("testdata/test.pdf", mime=True)
-'application/pdf'
-```
-
-There is also a `Magic` class that provides more direct control,
-including overriding the magic database file and turning on character
-encoding detection. This is not recommended for general use. In
-particular, it's not safe for sharing across multiple threads and
-will fail throw if this is attempted.
-
-```python
->>> f = magic.Magic(uncompress=True)
->>> f.from_file('testdata/test.gz')
-'ASCII text (gzip compressed data, was "test", last modified: Sat Jun 28
-21:32:52 2008, from Unix)'
-```
-
-You can also combine the flag options:
-
-```python
->>> f = magic.Magic(mime=True, uncompress=True)
->>> f.from_file('testdata/test.gz')
-'text/plain'
-```
-
-## Installation
-
-The current stable version of python-magic is available on PyPI and
-can be installed by running `pip install python-magic`.
-
-Other sources:
-
-- PyPI: http://pypi.python.org/pypi/python-magic/
-- GitHub: https://github.com/ahupp/python-magic
-
-This module is a simple wrapper around the libmagic C library, and
-that must be installed as well:
-
-### Debian/Ubuntu
-
-```
-sudo apt-get install libmagic1
-```
-
-### Windows
-
-You'll need DLLs for libmagic. @julian-r maintains a pypi package with the DLLs, you can fetch it with:
-
-```
-pip install python-magic-bin
-```
-
-### OSX
-
-- When using Homebrew: `brew install libmagic`
-- When using macports: `port install file`
-
-### Troubleshooting
-
-- 'MagicException: could not find any magic files!': some
- installations of libmagic do not correctly point to their magic
- database file. Try specifying the path to the file explicitly in the
- constructor: `magic.Magic(magic_file="path_to_magic_file")`.
-
-- 'WindowsError: [Error 193] %1 is not a valid Win32 application':
- Attempting to run the 32-bit libmagic DLL in a 64-bit build of
- python will fail with this error. Here are 64-bit builds of libmagic for windows: https://github.com/pidydx/libmagicwin64.
- Newer version can be found here: https://github.com/nscaife/file-windows.
-
-- 'WindowsError: exception: access violation writing 0x00000000 ' This may indicate you are mixing
- Windows Python and Cygwin Python. Make sure your libmagic and python builds are consistent.
-
-
-## Bug Reports
-
-python-magic is a thin layer over the libmagic C library.
-Historically, most bugs that have been reported against python-magic
-are actually bugs in libmagic; libmagic bugs can be reported on their
-tracker here: https://bugs.astron.com/my_view_page.php. If you're not
-sure where the bug lies feel free to file an issue on GitHub and I can
-triage it.
-
-## Running the tests
-
-To run the tests across a variety of linux distributions (depends on Docker):
-
-```
-./test_docker.sh
-```
-
-To run tests locally across all available python versions:
-
-```
-./test/run.py
-```
-
-To run against a specific python version:
-
-```
-LC_ALL=en_US.UTF-8 python3 test/test.py
-```
-
-## libmagic python API compatibility
-
-The python bindings shipped with libmagic use a module name that conflicts with this package. To work around this, python-magic includes a compatibility layer for the libmagic API. See [COMPAT.md](COMPAT.md) for a guide to libmagic / python-magic compatibility.
-
-## Versioning
-
-Minor version bumps should be backwards compatible. Major bumps are not.
-
-## Author
-
-Written by Adam Hupp in 2001 for a project that never got off the
-ground. It originally used SWIG for the C library bindings, but
-switched to ctypes once that was part of the python standard library.
-
-You can contact me via my [website](http://hupp.org/adam) or
-[GitHub](http://github.com/ahupp).
-
-## License
-
-python-magic is distributed under the MIT license. See the included
-LICENSE file for details.
-
-I am providing code in the repository to you under an open source license. Because this is my personal repository, the license you receive to my code is from me and not my employer (Facebook).
diff --git a/contrib/python/python-magic/py3/.dist-info/METADATA b/contrib/python/python-magic/py3/.dist-info/METADATA
deleted file mode 100644
index 65b11c5555..0000000000
--- a/contrib/python/python-magic/py3/.dist-info/METADATA
+++ /dev/null
@@ -1,171 +0,0 @@
-Metadata-Version: 2.1
-Name: python-magic
-Version: 0.4.27
-Summary: File type identification using libmagic
-Home-page: http://github.com/ahupp/python-magic
-Author: Adam Hupp
-Author-email: adam@hupp.org
-License: MIT
-Keywords: mime magic file
-Platform: UNKNOWN
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Programming Language :: Python
-Classifier: Programming Language :: Python :: 2.7
-Classifier: Programming Language :: Python :: 3
-Classifier: Programming Language :: Python :: 3.5
-Classifier: Programming Language :: Python :: 3.6
-Classifier: Programming Language :: Python :: 3.7
-Classifier: Programming Language :: Python :: 3.8
-Classifier: Programming Language :: Python :: 3.9
-Classifier: Programming Language :: Python :: Implementation :: CPython
-Requires-Python: >=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*
-Description-Content-Type: text/markdown
-License-File: LICENSE
-
-# python-magic
-[![PyPI version](https://badge.fury.io/py/python-magic.svg)](https://badge.fury.io/py/python-magic)
-[![Build Status](https://travis-ci.org/ahupp/python-magic.svg?branch=master)](https://travis-ci.org/ahupp/python-magic) [![Join the chat at https://gitter.im/ahupp/python-magic](https://badges.gitter.im/ahupp/python-magic.svg)](https://gitter.im/ahupp/python-magic?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-python-magic is a Python interface to the libmagic file type
-identification library. libmagic identifies file types by checking
-their headers according to a predefined list of file types. This
-functionality is exposed to the command line by the Unix command
-`file`.
-
-## Usage
-
-```python
->>> import magic
->>> magic.from_file("testdata/test.pdf")
-'PDF document, version 1.2'
-# recommend using at least the first 2048 bytes, as less can produce incorrect identification
->>> magic.from_buffer(open("testdata/test.pdf", "rb").read(2048))
-'PDF document, version 1.2'
->>> magic.from_file("testdata/test.pdf", mime=True)
-'application/pdf'
-```
-
-There is also a `Magic` class that provides more direct control,
-including overriding the magic database file and turning on character
-encoding detection. This is not recommended for general use. In
-particular, it's not safe for sharing across multiple threads and
-will fail throw if this is attempted.
-
-```python
->>> f = magic.Magic(uncompress=True)
->>> f.from_file('testdata/test.gz')
-'ASCII text (gzip compressed data, was "test", last modified: Sat Jun 28
-21:32:52 2008, from Unix)'
-```
-
-You can also combine the flag options:
-
-```python
->>> f = magic.Magic(mime=True, uncompress=True)
->>> f.from_file('testdata/test.gz')
-'text/plain'
-```
-
-## Installation
-
-The current stable version of python-magic is available on PyPI and
-can be installed by running `pip install python-magic`.
-
-Other sources:
-
-- PyPI: http://pypi.python.org/pypi/python-magic/
-- GitHub: https://github.com/ahupp/python-magic
-
-This module is a simple wrapper around the libmagic C library, and
-that must be installed as well:
-
-### Debian/Ubuntu
-
-```
-sudo apt-get install libmagic1
-```
-
-### Windows
-
-You'll need DLLs for libmagic. @julian-r maintains a pypi package with the DLLs, you can fetch it with:
-
-```
-pip install python-magic-bin
-```
-
-### OSX
-
-- When using Homebrew: `brew install libmagic`
-- When using macports: `port install file`
-
-### Troubleshooting
-
-- 'MagicException: could not find any magic files!': some
- installations of libmagic do not correctly point to their magic
- database file. Try specifying the path to the file explicitly in the
- constructor: `magic.Magic(magic_file="path_to_magic_file")`.
-
-- 'WindowsError: [Error 193] %1 is not a valid Win32 application':
- Attempting to run the 32-bit libmagic DLL in a 64-bit build of
- python will fail with this error. Here are 64-bit builds of libmagic for windows: https://github.com/pidydx/libmagicwin64.
- Newer version can be found here: https://github.com/nscaife/file-windows.
-
-- 'WindowsError: exception: access violation writing 0x00000000 ' This may indicate you are mixing
- Windows Python and Cygwin Python. Make sure your libmagic and python builds are consistent.
-
-
-## Bug Reports
-
-python-magic is a thin layer over the libmagic C library.
-Historically, most bugs that have been reported against python-magic
-are actually bugs in libmagic; libmagic bugs can be reported on their
-tracker here: https://bugs.astron.com/my_view_page.php. If you're not
-sure where the bug lies feel free to file an issue on GitHub and I can
-triage it.
-
-## Running the tests
-
-To run the tests across a variety of linux distributions (depends on Docker):
-
-```
-./test_docker.sh
-```
-
-To run tests locally across all available python versions:
-
-```
-./test/run.py
-```
-
-To run against a specific python version:
-
-```
-LC_ALL=en_US.UTF-8 python3 test/test.py
-```
-
-## libmagic python API compatibility
-
-The python bindings shipped with libmagic use a module name that conflicts with this package. To work around this, python-magic includes a compatibility layer for the libmagic API. See [COMPAT.md](COMPAT.md) for a guide to libmagic / python-magic compatibility.
-
-## Versioning
-
-Minor version bumps should be backwards compatible. Major bumps are not.
-
-## Author
-
-Written by Adam Hupp in 2001 for a project that never got off the
-ground. It originally used SWIG for the C library bindings, but
-switched to ctypes once that was part of the python standard library.
-
-You can contact me via my [website](http://hupp.org/adam) or
-[GitHub](http://github.com/ahupp).
-
-## License
-
-python-magic is distributed under the MIT license. See the included
-LICENSE file for details.
-
-I am providing code in the repository to you under an open source license. Because this is my personal repository, the license you receive to my code is from me and not my employer (Facebook).
-
-
diff --git a/contrib/python/python-magic/py3/.dist-info/top_level.txt b/contrib/python/python-magic/py3/.dist-info/top_level.txt
deleted file mode 100644
index 71c947b02f..0000000000
--- a/contrib/python/python-magic/py3/.dist-info/top_level.txt
+++ /dev/null
@@ -1 +0,0 @@
-magic
diff --git a/contrib/python/python-magic/py3/LICENSE b/contrib/python/python-magic/py3/LICENSE
deleted file mode 100644
index b8ca4b96dd..0000000000
--- a/contrib/python/python-magic/py3/LICENSE
+++ /dev/null
@@ -1,58 +0,0 @@
-The MIT License (MIT)
-
-Copyright (c) 2001-2014 Adam Hupp
-
-Permission is hereby granted, free of charge, to any person obtaining a copy
-of this software and associated documentation files (the "Software"), to deal
-in the Software without restriction, including without limitation the rights
-to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
-copies of the Software, and to permit persons to whom the Software is
-furnished to do so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
-
-
-====
-
-Portions of this package (magic/compat.py and test/libmagic_test.py)
-are distributed under the following copyright notice:
-
-
-$File: LEGAL.NOTICE,v 1.15 2006/05/03 18:48:33 christos Exp $
-Copyright (c) Ian F. Darwin 1986, 1987, 1989, 1990, 1991, 1992, 1994, 1995.
-Software written by Ian F. Darwin and others;
-maintained 1994- Christos Zoulas.
-
-This software is not subject to any export provision of the United States
-Department of Commerce, and may be exported to any country or planet.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions
-are met:
-1. Redistributions of source code must retain the above copyright
- notice immediately at the beginning of the file, without modification,
- this list of conditions, and the following disclaimer.
-2. Redistributions in binary form must reproduce the above copyright
- notice, this list of conditions and the following disclaimer in the
- documentation and/or other materials provided with the distribution.
-
-THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
-ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
-ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
-ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
-DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
-OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
-HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
-LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
-OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
-SUCH DAMAGE.
diff --git a/contrib/python/python-magic/py3/README.md b/contrib/python/python-magic/py3/README.md
deleted file mode 100644
index 9eb70e8a30..0000000000
--- a/contrib/python/python-magic/py3/README.md
+++ /dev/null
@@ -1,144 +0,0 @@
-# python-magic
-[![PyPI version](https://badge.fury.io/py/python-magic.svg)](https://badge.fury.io/py/python-magic)
-[![Build Status](https://travis-ci.org/ahupp/python-magic.svg?branch=master)](https://travis-ci.org/ahupp/python-magic) [![Join the chat at https://gitter.im/ahupp/python-magic](https://badges.gitter.im/ahupp/python-magic.svg)](https://gitter.im/ahupp/python-magic?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge)
-
-python-magic is a Python interface to the libmagic file type
-identification library. libmagic identifies file types by checking
-their headers according to a predefined list of file types. This
-functionality is exposed to the command line by the Unix command
-`file`.
-
-## Usage
-
-```python
->>> import magic
->>> magic.from_file("testdata/test.pdf")
-'PDF document, version 1.2'
-# recommend using at least the first 2048 bytes, as less can produce incorrect identification
->>> magic.from_buffer(open("testdata/test.pdf", "rb").read(2048))
-'PDF document, version 1.2'
->>> magic.from_file("testdata/test.pdf", mime=True)
-'application/pdf'
-```
-
-There is also a `Magic` class that provides more direct control,
-including overriding the magic database file and turning on character
-encoding detection. This is not recommended for general use. In
-particular, it's not safe for sharing across multiple threads and
-will fail throw if this is attempted.
-
-```python
->>> f = magic.Magic(uncompress=True)
->>> f.from_file('testdata/test.gz')
-'ASCII text (gzip compressed data, was "test", last modified: Sat Jun 28
-21:32:52 2008, from Unix)'
-```
-
-You can also combine the flag options:
-
-```python
->>> f = magic.Magic(mime=True, uncompress=True)
->>> f.from_file('testdata/test.gz')
-'text/plain'
-```
-
-## Installation
-
-The current stable version of python-magic is available on PyPI and
-can be installed by running `pip install python-magic`.
-
-Other sources:
-
-- PyPI: http://pypi.python.org/pypi/python-magic/
-- GitHub: https://github.com/ahupp/python-magic
-
-This module is a simple wrapper around the libmagic C library, and
-that must be installed as well:
-
-### Debian/Ubuntu
-
-```
-sudo apt-get install libmagic1
-```
-
-### Windows
-
-You'll need DLLs for libmagic. @julian-r maintains a pypi package with the DLLs, you can fetch it with:
-
-```
-pip install python-magic-bin
-```
-
-### OSX
-
-- When using Homebrew: `brew install libmagic`
-- When using macports: `port install file`
-
-### Troubleshooting
-
-- 'MagicException: could not find any magic files!': some
- installations of libmagic do not correctly point to their magic
- database file. Try specifying the path to the file explicitly in the
- constructor: `magic.Magic(magic_file="path_to_magic_file")`.
-
-- 'WindowsError: [Error 193] %1 is not a valid Win32 application':
- Attempting to run the 32-bit libmagic DLL in a 64-bit build of
- python will fail with this error. Here are 64-bit builds of libmagic for windows: https://github.com/pidydx/libmagicwin64.
- Newer version can be found here: https://github.com/nscaife/file-windows.
-
-- 'WindowsError: exception: access violation writing 0x00000000 ' This may indicate you are mixing
- Windows Python and Cygwin Python. Make sure your libmagic and python builds are consistent.
-
-
-## Bug Reports
-
-python-magic is a thin layer over the libmagic C library.
-Historically, most bugs that have been reported against python-magic
-are actually bugs in libmagic; libmagic bugs can be reported on their
-tracker here: https://bugs.astron.com/my_view_page.php. If you're not
-sure where the bug lies feel free to file an issue on GitHub and I can
-triage it.
-
-## Running the tests
-
-To run the tests across a variety of linux distributions (depends on Docker):
-
-```
-./test_docker.sh
-```
-
-To run tests locally across all available python versions:
-
-```
-./test/run.py
-```
-
-To run against a specific python version:
-
-```
-LC_ALL=en_US.UTF-8 python3 test/test.py
-```
-
-## libmagic python API compatibility
-
-The python bindings shipped with libmagic use a module name that conflicts with this package. To work around this, python-magic includes a compatibility layer for the libmagic API. See [COMPAT.md](COMPAT.md) for a guide to libmagic / python-magic compatibility.
-
-## Versioning
-
-Minor version bumps should be backwards compatible. Major bumps are not.
-
-## Author
-
-Written by Adam Hupp in 2001 for a project that never got off the
-ground. It originally used SWIG for the C library bindings, but
-switched to ctypes once that was part of the python standard library.
-
-You can contact me via my [website](http://hupp.org/adam) or
-[GitHub](http://github.com/ahupp).
-
-## License
-
-python-magic is distributed under the MIT license. See the included
-LICENSE file for details.
-
-I am providing code in the repository to you under an open source license. Because this is my personal repository, the license you receive to my code is from me and not my employer (Facebook).
diff --git a/contrib/python/python-magic/py3/magic/__init__.py b/contrib/python/python-magic/py3/magic/__init__.py
deleted file mode 100644
index bab7c7b122..0000000000
--- a/contrib/python/python-magic/py3/magic/__init__.py
+++ /dev/null
@@ -1,469 +0,0 @@
-"""
-magic is a wrapper around the libmagic file identification library.
-
-See README for more information.
-
-Usage:
-
->>> import magic
->>> magic.from_file("testdata/test.pdf")
-'PDF document, version 1.2'
->>> magic.from_file("testdata/test.pdf", mime=True)
-'application/pdf'
->>> magic.from_buffer(open("testdata/test.pdf").read(1024))
-'PDF document, version 1.2'
->>>
-
-"""
-
-import sys
-import glob
-import ctypes
-import ctypes.util
-import threading
-import logging
-
-from ctypes import c_char_p, c_int, c_size_t, c_void_p, byref, POINTER
-
-# avoid shadowing the real open with the version from compat.py
-_real_open = open
-
-
-class MagicException(Exception):
- def __init__(self, message):
- super(Exception, self).__init__(message)
- self.message = message
-
-
-class Magic:
- """
- Magic is a wrapper around the libmagic C library.
- """
-
- def __init__(self, mime=False, magic_file=None, mime_encoding=False,
- keep_going=False, uncompress=False, raw=False, extension=False):
- """
- Create a new libmagic wrapper.
-
- mime - if True, mimetypes are returned instead of textual descriptions
- mime_encoding - if True, codec is returned
- magic_file - use a mime database other than the system default
- keep_going - don't stop at the first match, keep going
- uncompress - Try to look inside compressed files.
- raw - Do not try to decode "non-printable" chars.
- extension - Print a slash-separated list of valid extensions for the file type found.
- """
- self.flags = MAGIC_NONE
- if mime:
- self.flags |= MAGIC_MIME_TYPE
- if mime_encoding:
- self.flags |= MAGIC_MIME_ENCODING
- if keep_going:
- self.flags |= MAGIC_CONTINUE
- if uncompress:
- self.flags |= MAGIC_COMPRESS
- if raw:
- self.flags |= MAGIC_RAW
- if extension:
- self.flags |= MAGIC_EXTENSION
-
- self.cookie = magic_open(self.flags)
- self.lock = threading.Lock()
-
- magic_load(self.cookie, magic_file)
-
- # MAGIC_EXTENSION was added in 523 or 524, so bail if
- # it doesn't appear to be available
- if extension and (not _has_version or version() < 524):
- raise NotImplementedError('MAGIC_EXTENSION is not supported in this version of libmagic')
-
- # For https://github.com/ahupp/python-magic/issues/190
- # libmagic has fixed internal limits that some files exceed, causing
- # an error. We can avoid this (at least for the sample file given)
- # by bumping the limit up. It's not clear if this is a general solution
- # or whether other internal limits should be increased, but given
- # the lack of other reports I'll assume this is rare.
- if _has_param:
- try:
- self.setparam(MAGIC_PARAM_NAME_MAX, 64)
- except MagicException as e:
- # some versions of libmagic fail this call,
- # so rather than fail hard just use default behavior
- pass
-
- def from_buffer(self, buf):
- """
- Identify the contents of `buf`
- """
- with self.lock:
- try:
- # if we're on python3, convert buf to bytes
- # otherwise this string is passed as wchar*
- # which is not what libmagic expects
- # NEXTBREAK: only take bytes
- if type(buf) == str and str != bytes:
- buf = buf.encode('utf-8', errors='replace')
- return maybe_decode(magic_buffer(self.cookie, buf))
- except MagicException as e:
- return self._handle509Bug(e)
-
- def from_file(self, filename):
- # raise FileNotFoundException or IOError if the file does not exist
- with _real_open(filename):
- pass
-
- with self.lock:
- try:
- return maybe_decode(magic_file(self.cookie, filename))
- except MagicException as e:
- return self._handle509Bug(e)
-
- def from_descriptor(self, fd):
- with self.lock:
- try:
- return maybe_decode(magic_descriptor(self.cookie, fd))
- except MagicException as e:
- return self._handle509Bug(e)
-
- def _handle509Bug(self, e):
- # libmagic 5.09 has a bug where it might fail to identify the
- # mimetype of a file and returns null from magic_file (and
- # likely _buffer), but also does not return an error message.
- if e.message is None and (self.flags & MAGIC_MIME_TYPE):
- return "application/octet-stream"
- else:
- raise e
-
- def setparam(self, param, val):
- return magic_setparam(self.cookie, param, val)
-
- def getparam(self, param):
- return magic_getparam(self.cookie, param)
-
- def __del__(self):
- # no _thread_check here because there can be no other
- # references to this object at this point.
-
- # during shutdown magic_close may have been cleared already so
- # make sure it exists before using it.
-
- # the self.cookie check should be unnecessary and was an
- # incorrect fix for a threading problem, however I'm leaving
- # it in because it's harmless and I'm slightly afraid to
- # remove it.
- if hasattr(self, 'cookie') and self.cookie and magic_close:
- magic_close(self.cookie)
- self.cookie = None
-
-
-_instances = {}
-
-
-def _get_magic_type(mime):
- i = _instances.get(mime)
- if i is None:
- i = _instances[mime] = Magic(mime=mime)
- return i
-
-
-def from_file(filename, mime=False):
- """"
- Accepts a filename and returns the detected filetype. Return
- value is the mimetype if mime=True, otherwise a human readable
- name.
-
- >>> magic.from_file("testdata/test.pdf", mime=True)
- 'application/pdf'
- """
- m = _get_magic_type(mime)
- return m.from_file(filename)
-
-
-def from_buffer(buffer, mime=False):
- """
- Accepts a binary string and returns the detected filetype. Return
- value is the mimetype if mime=True, otherwise a human readable
- name.
-
- >>> magic.from_buffer(open("testdata/test.pdf").read(1024))
- 'PDF document, version 1.2'
- """
- m = _get_magic_type(mime)
- return m.from_buffer(buffer)
-
-
-def from_descriptor(fd, mime=False):
- """
- Accepts a file descriptor and returns the detected filetype. Return
- value is the mimetype if mime=True, otherwise a human readable
- name.
-
- >>> f = open("testdata/test.pdf")
- >>> magic.from_descriptor(f.fileno())
- 'PDF document, version 1.2'
- """
- m = _get_magic_type(mime)
- return m.from_descriptor(fd)
-
-from . import loader
-libmagic = loader.load_lib()
-
-magic_t = ctypes.c_void_p
-
-
-def errorcheck_null(result, func, args):
- if result is None:
- err = magic_error(args[0])
- raise MagicException(err)
- else:
- return result
-
-
-def errorcheck_negative_one(result, func, args):
- if result == -1:
- err = magic_error(args[0])
- raise MagicException(err)
- else:
- return result
-
-
-# return str on python3. Don't want to unconditionally
-# decode because that results in unicode on python2
-def maybe_decode(s):
- # NEXTBREAK: remove
- if str == bytes:
- return s
- else:
- # backslashreplace here because sometimes libmagic will return metadata in the charset
- # of the file, which is unknown to us (e.g the title of a Word doc)
- return s.decode('utf-8', 'backslashreplace')
-
-
-try:
- from os import PathLike
- def unpath(filename):
- if isinstance(filename, PathLike):
- return filename.__fspath__()
- else:
- return filename
-except ImportError:
- def unpath(filename):
- return filename
-
-def coerce_filename(filename):
- if filename is None:
- return None
-
- filename = unpath(filename)
-
- # ctypes will implicitly convert unicode strings to bytes with
- # .encode('ascii'). If you use the filesystem encoding
- # then you'll get inconsistent behavior (crashes) depending on the user's
- # LANG environment variable
- # NEXTBREAK: remove
- is_unicode = (sys.version_info[0] <= 2 and
- isinstance(filename, unicode)) or \
- (sys.version_info[0] >= 3 and
- isinstance(filename, str))
- if is_unicode:
- return filename.encode('utf-8', 'surrogateescape')
- else:
- return filename
-
-
-magic_open = libmagic.magic_open
-magic_open.restype = magic_t
-magic_open.argtypes = [c_int]
-
-magic_close = libmagic.magic_close
-magic_close.restype = None
-magic_close.argtypes = [magic_t]
-
-magic_error = libmagic.magic_error
-magic_error.restype = c_char_p
-magic_error.argtypes = [magic_t]
-
-magic_errno = libmagic.magic_errno
-magic_errno.restype = c_int
-magic_errno.argtypes = [magic_t]
-
-_magic_file = libmagic.magic_file
-_magic_file.restype = c_char_p
-_magic_file.argtypes = [magic_t, c_char_p]
-_magic_file.errcheck = errorcheck_null
-
-
-def magic_file(cookie, filename):
- return _magic_file(cookie, coerce_filename(filename))
-
-
-_magic_buffer = libmagic.magic_buffer
-_magic_buffer.restype = c_char_p
-_magic_buffer.argtypes = [magic_t, c_void_p, c_size_t]
-_magic_buffer.errcheck = errorcheck_null
-
-
-def magic_buffer(cookie, buf):
- return _magic_buffer(cookie, buf, len(buf))
-
-
-magic_descriptor = libmagic.magic_descriptor
-magic_descriptor.restype = c_char_p
-magic_descriptor.argtypes = [magic_t, c_int]
-magic_descriptor.errcheck = errorcheck_null
-
-_magic_descriptor = libmagic.magic_descriptor
-_magic_descriptor.restype = c_char_p
-_magic_descriptor.argtypes = [magic_t, c_int]
-_magic_descriptor.errcheck = errorcheck_null
-
-
-def magic_descriptor(cookie, fd):
- return _magic_descriptor(cookie, fd)
-
-
-_magic_load = libmagic.magic_load
-_magic_load.restype = c_int
-_magic_load.argtypes = [magic_t, c_char_p]
-_magic_load.errcheck = errorcheck_negative_one
-
-
-def magic_load(cookie, filename):
- return _magic_load(cookie, coerce_filename(filename))
-
-
-magic_setflags = libmagic.magic_setflags
-magic_setflags.restype = c_int
-magic_setflags.argtypes = [magic_t, c_int]
-
-magic_check = libmagic.magic_check
-magic_check.restype = c_int
-magic_check.argtypes = [magic_t, c_char_p]
-
-magic_compile = libmagic.magic_compile
-magic_compile.restype = c_int
-magic_compile.argtypes = [magic_t, c_char_p]
-
-_has_param = False
-if hasattr(libmagic, 'magic_setparam') and hasattr(libmagic, 'magic_getparam'):
- _has_param = True
- _magic_setparam = libmagic.magic_setparam
- _magic_setparam.restype = c_int
- _magic_setparam.argtypes = [magic_t, c_int, POINTER(c_size_t)]
- _magic_setparam.errcheck = errorcheck_negative_one
-
- _magic_getparam = libmagic.magic_getparam
- _magic_getparam.restype = c_int
- _magic_getparam.argtypes = [magic_t, c_int, POINTER(c_size_t)]
- _magic_getparam.errcheck = errorcheck_negative_one
-
-
-def magic_setparam(cookie, param, val):
- if not _has_param:
- raise NotImplementedError("magic_setparam not implemented")
- v = c_size_t(val)
- return _magic_setparam(cookie, param, byref(v))
-
-
-def magic_getparam(cookie, param):
- if not _has_param:
- raise NotImplementedError("magic_getparam not implemented")
- val = c_size_t()
- _magic_getparam(cookie, param, byref(val))
- return val.value
-
-
-_has_version = False
-if hasattr(libmagic, "magic_version"):
- _has_version = True
- magic_version = libmagic.magic_version
- magic_version.restype = c_int
- magic_version.argtypes = []
-
-
-def version():
- if not _has_version:
- raise NotImplementedError("magic_version not implemented")
- return magic_version()
-
-
-MAGIC_NONE = 0x000000 # No flags
-MAGIC_DEBUG = 0x000001 # Turn on debugging
-MAGIC_SYMLINK = 0x000002 # Follow symlinks
-MAGIC_COMPRESS = 0x000004 # Check inside compressed files
-MAGIC_DEVICES = 0x000008 # Look at the contents of devices
-MAGIC_MIME_TYPE = 0x000010 # Return a mime string
-MAGIC_MIME_ENCODING = 0x000400 # Return the MIME encoding
-# TODO: should be
-# MAGIC_MIME = MAGIC_MIME_TYPE | MAGIC_MIME_ENCODING
-MAGIC_MIME = 0x000010 # Return a mime string
-MAGIC_EXTENSION = 0x1000000 # Return a /-separated list of extensions
-
-MAGIC_CONTINUE = 0x000020 # Return all matches
-MAGIC_CHECK = 0x000040 # Print warnings to stderr
-MAGIC_PRESERVE_ATIME = 0x000080 # Restore access time on exit
-MAGIC_RAW = 0x000100 # Don't translate unprintable chars
-MAGIC_ERROR = 0x000200 # Handle ENOENT etc as real errors
-
-MAGIC_NO_CHECK_COMPRESS = 0x001000 # Don't check for compressed files
-MAGIC_NO_CHECK_TAR = 0x002000 # Don't check for tar files
-MAGIC_NO_CHECK_SOFT = 0x004000 # Don't check magic entries
-MAGIC_NO_CHECK_APPTYPE = 0x008000 # Don't check application type
-MAGIC_NO_CHECK_ELF = 0x010000 # Don't check for elf details
-MAGIC_NO_CHECK_ASCII = 0x020000 # Don't check for ascii files
-MAGIC_NO_CHECK_TROFF = 0x040000 # Don't check ascii/troff
-MAGIC_NO_CHECK_FORTRAN = 0x080000 # Don't check ascii/fortran
-MAGIC_NO_CHECK_TOKENS = 0x100000 # Don't check ascii/tokens
-
-MAGIC_PARAM_INDIR_MAX = 0 # Recursion limit for indirect magic
-MAGIC_PARAM_NAME_MAX = 1 # Use count limit for name/use magic
-MAGIC_PARAM_ELF_PHNUM_MAX = 2 # Max ELF notes processed
-MAGIC_PARAM_ELF_SHNUM_MAX = 3 # Max ELF program sections processed
-MAGIC_PARAM_ELF_NOTES_MAX = 4 # # Max ELF sections processed
-MAGIC_PARAM_REGEX_MAX = 5 # Length limit for regex searches
-MAGIC_PARAM_BYTES_MAX = 6 # Max number of bytes to read from file
-
-
-# This package name conflicts with the one provided by upstream
-# libmagic. This is a common source of confusion for users. To
-# resolve, We ship a copy of that module, and expose it's functions
-# wrapped in deprecation warnings.
-def _add_compat(to_module):
- import warnings, re
- from magic import compat
-
- def deprecation_wrapper(fn):
- def _(*args, **kwargs):
- warnings.warn(
- "Using compatibility mode with libmagic's python binding. "
- "See https://github.com/ahupp/python-magic/blob/master/COMPAT.md for details.",
- PendingDeprecationWarning)
-
- return fn(*args, **kwargs)
-
- return _
-
- fn = ['detect_from_filename',
- 'detect_from_content',
- 'detect_from_fobj',
- 'open']
- for fname in fn:
- to_module[fname] = deprecation_wrapper(compat.__dict__[fname])
-
- # copy constants over, ensuring there's no conflicts
- is_const_re = re.compile("^[A-Z_]+$")
- allowed_inconsistent = set(['MAGIC_MIME'])
- for name, value in compat.__dict__.items():
- if is_const_re.match(name):
- if name in to_module:
- if name in allowed_inconsistent:
- continue
- if to_module[name] != value:
- raise Exception("inconsistent value for " + name)
- else:
- continue
- else:
- to_module[name] = value
-
-
-_add_compat(globals())
diff --git a/contrib/python/python-magic/py3/magic/compat.py b/contrib/python/python-magic/py3/magic/compat.py
deleted file mode 100644
index 07fad45a8b..0000000000
--- a/contrib/python/python-magic/py3/magic/compat.py
+++ /dev/null
@@ -1,287 +0,0 @@
-# coding: utf-8
-
-'''
-Python bindings for libmagic
-'''
-
-import ctypes
-
-from collections import namedtuple
-
-from ctypes import *
-from ctypes.util import find_library
-
-
-from . import loader
-
-_libraries = {}
-_libraries['magic'] = loader.load_lib()
-
-# Flag constants for open and setflags
-MAGIC_NONE = NONE = 0
-MAGIC_DEBUG = DEBUG = 1
-MAGIC_SYMLINK = SYMLINK = 2
-MAGIC_COMPRESS = COMPRESS = 4
-MAGIC_DEVICES = DEVICES = 8
-MAGIC_MIME_TYPE = MIME_TYPE = 16
-MAGIC_CONTINUE = CONTINUE = 32
-MAGIC_CHECK = CHECK = 64
-MAGIC_PRESERVE_ATIME = PRESERVE_ATIME = 128
-MAGIC_RAW = RAW = 256
-MAGIC_ERROR = ERROR = 512
-MAGIC_MIME_ENCODING = MIME_ENCODING = 1024
-MAGIC_MIME = MIME = 1040 # MIME_TYPE + MIME_ENCODING
-MAGIC_APPLE = APPLE = 2048
-
-MAGIC_NO_CHECK_COMPRESS = NO_CHECK_COMPRESS = 4096
-MAGIC_NO_CHECK_TAR = NO_CHECK_TAR = 8192
-MAGIC_NO_CHECK_SOFT = NO_CHECK_SOFT = 16384
-MAGIC_NO_CHECK_APPTYPE = NO_CHECK_APPTYPE = 32768
-MAGIC_NO_CHECK_ELF = NO_CHECK_ELF = 65536
-MAGIC_NO_CHECK_TEXT = NO_CHECK_TEXT = 131072
-MAGIC_NO_CHECK_CDF = NO_CHECK_CDF = 262144
-MAGIC_NO_CHECK_TOKENS = NO_CHECK_TOKENS = 1048576
-MAGIC_NO_CHECK_ENCODING = NO_CHECK_ENCODING = 2097152
-
-MAGIC_NO_CHECK_BUILTIN = NO_CHECK_BUILTIN = 4173824
-
-FileMagic = namedtuple('FileMagic', ('mime_type', 'encoding', 'name'))
-
-
-class magic_set(Structure):
- pass
-
-
-magic_set._fields_ = []
-magic_t = POINTER(magic_set)
-
-_open = _libraries['magic'].magic_open
-_open.restype = magic_t
-_open.argtypes = [c_int]
-
-_close = _libraries['magic'].magic_close
-_close.restype = None
-_close.argtypes = [magic_t]
-
-_file = _libraries['magic'].magic_file
-_file.restype = c_char_p
-_file.argtypes = [magic_t, c_char_p]
-
-_descriptor = _libraries['magic'].magic_descriptor
-_descriptor.restype = c_char_p
-_descriptor.argtypes = [magic_t, c_int]
-
-_buffer = _libraries['magic'].magic_buffer
-_buffer.restype = c_char_p
-_buffer.argtypes = [magic_t, c_void_p, c_size_t]
-
-_error = _libraries['magic'].magic_error
-_error.restype = c_char_p
-_error.argtypes = [magic_t]
-
-_setflags = _libraries['magic'].magic_setflags
-_setflags.restype = c_int
-_setflags.argtypes = [magic_t, c_int]
-
-_load = _libraries['magic'].magic_load
-_load.restype = c_int
-_load.argtypes = [magic_t, c_char_p]
-
-_compile = _libraries['magic'].magic_compile
-_compile.restype = c_int
-_compile.argtypes = [magic_t, c_char_p]
-
-_check = _libraries['magic'].magic_check
-_check.restype = c_int
-_check.argtypes = [magic_t, c_char_p]
-
-_list = _libraries['magic'].magic_list
-_list.restype = c_int
-_list.argtypes = [magic_t, c_char_p]
-
-_errno = _libraries['magic'].magic_errno
-_errno.restype = c_int
-_errno.argtypes = [magic_t]
-
-
-class Magic(object):
- def __init__(self, ms):
- self._magic_t = ms
-
- def close(self):
- """
- Closes the magic database and deallocates any resources used.
- """
- _close(self._magic_t)
-
- @staticmethod
- def __tostr(s):
- if s is None:
- return None
- if isinstance(s, str):
- return s
- try: # keep Python 2 compatibility
- return str(s, 'utf-8')
- except TypeError:
- return str(s)
-
- @staticmethod
- def __tobytes(b):
- if b is None:
- return None
- if isinstance(b, bytes):
- return b
- try: # keep Python 2 compatibility
- return bytes(b, 'utf-8')
- except TypeError:
- return bytes(b)
-
- def file(self, filename):
- """
- Returns a textual description of the contents of the argument passed
- as a filename or None if an error occurred and the MAGIC_ERROR flag
- is set. A call to errno() will return the numeric error code.
- """
- return Magic.__tostr(_file(self._magic_t, Magic.__tobytes(filename)))
-
- def descriptor(self, fd):
- """
- Returns a textual description of the contents of the argument passed
- as a file descriptor or None if an error occurred and the MAGIC_ERROR
- flag is set. A call to errno() will return the numeric error code.
- """
- return Magic.__tostr(_descriptor(self._magic_t, fd))
-
- def buffer(self, buf):
- """
- Returns a textual description of the contents of the argument passed
- as a buffer or None if an error occurred and the MAGIC_ERROR flag
- is set. A call to errno() will return the numeric error code.
- """
- return Magic.__tostr(_buffer(self._magic_t, buf, len(buf)))
-
- def error(self):
- """
- Returns a textual explanation of the last error or None
- if there was no error.
- """
- return Magic.__tostr(_error(self._magic_t))
-
- def setflags(self, flags):
- """
- Set flags on the magic object which determine how magic checking
- behaves; a bitwise OR of the flags described in libmagic(3), but
- without the MAGIC_ prefix.
-
- Returns -1 on systems that don't support utime(2) or utimes(2)
- when PRESERVE_ATIME is set.
- """
- return _setflags(self._magic_t, flags)
-
- def load(self, filename=None):
- """
- Must be called to load entries in the colon separated list of database
- files passed as argument or the default database file if no argument
- before any magic queries can be performed.
-
- Returns 0 on success and -1 on failure.
- """
- return _load(self._magic_t, Magic.__tobytes(filename))
-
- def compile(self, dbs):
- """
- Compile entries in the colon separated list of database files
- passed as argument or the default database file if no argument.
- The compiled files created are named from the basename(1) of each file
- argument with ".mgc" appended to it.
-
- Returns 0 on success and -1 on failure.
- """
- return _compile(self._magic_t, Magic.__tobytes(dbs))
-
- def check(self, dbs):
- """
- Check the validity of entries in the colon separated list of
- database files passed as argument or the default database file
- if no argument.
-
- Returns 0 on success and -1 on failure.
- """
- return _check(self._magic_t, Magic.__tobytes(dbs))
-
- def list(self, dbs):
- """
- Check the validity of entries in the colon separated list of
- database files passed as argument or the default database file
- if no argument.
-
- Returns 0 on success and -1 on failure.
- """
- return _list(self._magic_t, Magic.__tobytes(dbs))
-
- def errno(self):
- """
- Returns a numeric error code. If return value is 0, an internal
- magic error occurred. If return value is non-zero, the value is
- an OS error code. Use the errno module or os.strerror() can be used
- to provide detailed error information.
- """
- return _errno(self._magic_t)
-
-
-def open(flags):
- """
- Returns a magic object on success and None on failure.
- Flags argument as for setflags.
- """
- return Magic(_open(flags))
-
-
-# Objects used by `detect_from_` functions
-mime_magic = Magic(_open(MAGIC_MIME))
-mime_magic.load()
-none_magic = Magic(_open(MAGIC_NONE))
-none_magic.load()
-
-
-def _create_filemagic(mime_detected, type_detected):
- splat = mime_detected.split('; ')
- mime_type = splat[0]
- if len(splat) == 2:
- mime_encoding = splat[1]
- else:
- mime_encoding = ''
-
- return FileMagic(name=type_detected, mime_type=mime_type,
- encoding=mime_encoding.replace('charset=', ''))
-
-
-def detect_from_filename(filename):
- '''Detect mime type, encoding and file type from a filename
-
- Returns a `FileMagic` namedtuple.
- '''
-
- return _create_filemagic(mime_magic.file(filename),
- none_magic.file(filename))
-
-
-def detect_from_fobj(fobj):
- '''Detect mime type, encoding and file type from file-like object
-
- Returns a `FileMagic` namedtuple.
- '''
-
- file_descriptor = fobj.fileno()
- return _create_filemagic(mime_magic.descriptor(file_descriptor),
- none_magic.descriptor(file_descriptor))
-
-
-def detect_from_content(byte_content):
- '''Detect mime type, encoding and file type from bytes
-
- Returns a `FileMagic` namedtuple.
- '''
-
- return _create_filemagic(mime_magic.buffer(byte_content),
- none_magic.buffer(byte_content))
diff --git a/contrib/python/python-magic/py3/magic/loader.py b/contrib/python/python-magic/py3/magic/loader.py
deleted file mode 100644
index 931f16193e..0000000000
--- a/contrib/python/python-magic/py3/magic/loader.py
+++ /dev/null
@@ -1,50 +0,0 @@
-from ctypes.util import find_library
-import ctypes
-import sys
-import glob
-import os.path
-
-def _lib_candidates():
-
- yield find_library('magic')
-
- if sys.platform == 'darwin':
-
- paths = [
- '/opt/local/lib',
- '/usr/local/lib',
- '/opt/homebrew/lib',
- ] + glob.glob('/usr/local/Cellar/libmagic/*/lib')
-
- for i in paths:
- yield os.path.join(i, 'libmagic.dylib')
-
- elif sys.platform in ('win32', 'cygwin'):
-
- prefixes = ['libmagic', 'magic1', 'cygmagic-1', 'libmagic-1', 'msys-magic-1']
-
- for i in prefixes:
- # find_library searches in %PATH% but not the current directory,
- # so look for both
- yield './%s.dll' % (i,)
- yield find_library(i)
-
- elif sys.platform == 'linux':
- # This is necessary because alpine is bad
- yield 'libmagic.so.1'
-
-
-def load_lib():
-
- for lib in _lib_candidates():
- # find_library returns None when lib not found
- if lib is None:
- continue
- try:
- return ctypes.CDLL(lib)
- except OSError:
- pass
- else:
- # It is better to raise an ImportError since we are importing magic module
- raise ImportError('failed to find libmagic. Check your installation')
-
diff --git a/contrib/python/python-magic/py3/magic/py.typed b/contrib/python/python-magic/py3/magic/py.typed
deleted file mode 100644
index e69de29bb2..0000000000
--- a/contrib/python/python-magic/py3/magic/py.typed
+++ /dev/null
diff --git a/contrib/python/python-magic/py3/ya.make b/contrib/python/python-magic/py3/ya.make
deleted file mode 100644
index f1aba820d5..0000000000
--- a/contrib/python/python-magic/py3/ya.make
+++ /dev/null
@@ -1,31 +0,0 @@
-# Generated by devtools/yamaker (pypi).
-
-PY3_LIBRARY()
-
-VERSION(0.4.27)
-
-LICENSE(MIT)
-
-PEERDIR(
- contrib/libs/libmagic
- library/python/symbols/libmagic
-)
-
-NO_LINT()
-
-PY_SRCS(
- TOP_LEVEL
- magic/__init__.py
- magic/__init__.pyi
- magic/compat.py
- magic/loader.py
-)
-
-RESOURCE_FILES(
- PREFIX contrib/python/python-magic/py3/
- .dist-info/METADATA
- .dist-info/top_level.txt
- magic/py.typed
-)
-
-END()
diff --git a/contrib/python/python-magic/ya.make b/contrib/python/python-magic/ya.make
deleted file mode 100644
index 6ed9425039..0000000000
--- a/contrib/python/python-magic/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-PY23_LIBRARY()
-
-LICENSE(Service-Py23-Proxy)
-
-IF (PYTHON2)
- PEERDIR(contrib/python/python-magic/py2)
-ELSE()
- PEERDIR(contrib/python/python-magic/py3)
-ENDIF()
-
-NO_LINT()
-
-END()
-
-RECURSE(
- py2
- py3
-)
diff --git a/contrib/tools/ragel5/common/buffer.h b/contrib/tools/ragel5/common/buffer.h
deleted file mode 100644
index 99c4e82d49..0000000000
--- a/contrib/tools/ragel5/common/buffer.h
+++ /dev/null
@@ -1,55 +0,0 @@
-/*
- * Copyright 2003 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _BUFFER_H
-#define _BUFFER_H
-
-#define BUFFER_INITIAL_SIZE 4096
-
-/* An automatically grown buffer for collecting tokens. Always reuses space;
- * never down resizes. */
-struct Buffer
-{
- Buffer()
- {
- data = (char*) malloc( BUFFER_INITIAL_SIZE );
- allocated = BUFFER_INITIAL_SIZE;
- length = 0;
- }
- ~Buffer() { free(data); }
-
- void append( char p )
- {
- if ( length == allocated ) {
- allocated *= 2;
- data = (char*) realloc( data, allocated );
- }
- data[length++] = p;
- }
-
- void clear() { length = 0; }
-
- char *data;
- int allocated;
- int length;
-};
-
-#endif /* _BUFFER_H */
diff --git a/contrib/tools/ragel5/common/common.cpp b/contrib/tools/ragel5/common/common.cpp
deleted file mode 100644
index 4484dcbd73..0000000000
--- a/contrib/tools/ragel5/common/common.cpp
+++ /dev/null
@@ -1,296 +0,0 @@
-/*
- * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "pcheck.h"
-#include "common.h"
-#include <string.h>
-#include <assert.h>
-
-#ifdef _WIN32
-#include <malloc.h>
-#else
-#include <alloca.h>
-#endif
-
-HostType hostTypesC[] =
-{
- { "char", 0, true, CHAR_MIN, CHAR_MAX, sizeof(char) },
- { "unsigned", "char", false, 0, UCHAR_MAX, sizeof(unsigned char) },
- { "short", 0, true, SHRT_MIN, SHRT_MAX, sizeof(short) },
- { "unsigned", "short", false, 0, USHRT_MAX, sizeof(unsigned short) },
- { "int", 0, true, INT_MIN, INT_MAX, sizeof(int) },
- { "unsigned", "int", false, 0, UINT_MAX, sizeof(unsigned int) },
- { "long", 0, true, LONG_MIN, LONG_MAX, sizeof(long) },
- { "unsigned", "long", false, 0, (long long)ULONG_MAX, sizeof(unsigned long) }
-};
-
-HostType hostTypesD[] =
-{
- { "byte", 0, true, CHAR_MIN, CHAR_MAX, 1 },
- { "ubyte", 0, false, 0, UCHAR_MAX, 1 },
- { "char", 0, false, 0, UCHAR_MAX, 1 },
- { "short", 0, true, SHRT_MIN, SHRT_MAX, 2 },
- { "ushort", 0, false, 0, USHRT_MAX, 2 },
- { "wchar", 0, false, 0, USHRT_MAX, 2 },
- { "int", 0, true, INT_MIN, INT_MAX, 4 },
- { "uint", 0, false, 0, UINT_MAX, 4 },
- { "dchar", 0, false, 0, UINT_MAX, 4 }
-};
-
-HostType hostTypesJava[] =
-{
- { "byte", 0, true, CHAR_MIN, CHAR_MAX, 1 },
- { "short", 0, true, SHRT_MIN, SHRT_MAX, 2 },
- { "char", 0, false, 0, USHRT_MAX, 2 },
- { "int", 0, true, INT_MIN, INT_MAX, 4 },
-};
-
-HostType hostTypesRuby[] =
-{
- { "byte", 0, true, CHAR_MIN, CHAR_MAX, 1 },
- { "short", 0, true, SHRT_MIN, SHRT_MAX, 2 },
- { "char", 0, false, 0, USHRT_MAX, 2 },
- { "int", 0, true, INT_MIN, INT_MAX, 4 },
-};
-
-HostLang hostLangC = { hostTypesC, 8, hostTypesC+0, true };
-HostLang hostLangD = { hostTypesD, 9, hostTypesD+2, true };
-HostLang hostLangJava = { hostTypesJava, 4, hostTypesJava+2, false };
-HostLang hostLangRuby = { hostTypesRuby, 4, hostTypesRuby+2, false };
-
-HostLang *hostLang = &hostLangC;
-HostLangType hostLangType = CCode;
-
-/* Construct a new parameter checker with for paramSpec. */
-ParamCheck::ParamCheck(const char *paramSpec, int argc, char **argv)
-:
- state(noparam),
- argOffset(0),
- curArg(0),
- iCurArg(1),
- paramSpec(paramSpec),
- argc(argc),
- argv(argv)
-{
-}
-
-/* Check a single option. Returns the index of the next parameter. Sets p to
- * the arg character if valid, 0 otherwise. Sets parg to the parameter arg if
- * there is one, NULL otherwise. */
-bool ParamCheck::check()
-{
- bool requiresParam;
-
- if ( iCurArg >= argc ) { /* Off the end of the arg list. */
- state = noparam;
- return false;
- }
-
- if ( argOffset != 0 && *argOffset == 0 ) {
- /* We are at the end of an arg string. */
- iCurArg += 1;
- if ( iCurArg >= argc ) {
- state = noparam;
- return false;
- }
- argOffset = 0;
- }
-
- if ( argOffset == 0 ) {
- /* Set the current arg. */
- curArg = argv[iCurArg];
-
- /* We are at the beginning of an arg string. */
- if ( argv[iCurArg] == 0 || /* Argv[iCurArg] is null. */
- argv[iCurArg][0] != '-' || /* Not a param. */
- argv[iCurArg][1] == 0 ) { /* Only a dash. */
- parameter = 0;
- parameterArg = 0;
-
- iCurArg += 1;
- state = noparam;
- return true;
- }
- argOffset = argv[iCurArg] + 1;
- }
-
- /* Get the arg char. */
- char argChar = *argOffset;
-
- /* Loop over all the parms and look for a match. */
- const char *pSpec = paramSpec;
- while ( *pSpec != 0 ) {
- char pSpecChar = *pSpec;
-
- /* If there is a ':' following the char then
- * it requires a parm. If a parm is required
- * then move ahead two in the parmspec. Otherwise
- * move ahead one in the parm spec. */
- if ( pSpec[1] == ':' ) {
- requiresParam = true;
- pSpec += 2;
- }
- else {
- requiresParam = false;
- pSpec += 1;
- }
-
- /* Do we have a match. */
- if ( argChar == pSpecChar ) {
- if ( requiresParam ) {
- if ( argOffset[1] == 0 ) {
- /* The param must follow. */
- if ( iCurArg + 1 == argc ) {
- /* We are the last arg so there
- * cannot be a parameter to it. */
- parameter = argChar;
- parameterArg = 0;
- iCurArg += 1;
- argOffset = 0;
- state = invalid;
- return true;
- }
- else {
- /* the parameter to the arg is the next arg. */
- parameter = pSpecChar;
- parameterArg = argv[iCurArg + 1];
- iCurArg += 2;
- argOffset = 0;
- state = match;
- return true;
- }
- }
- else {
- /* The param for the arg is built in. */
- parameter = pSpecChar;
- parameterArg = argOffset + 1;
- iCurArg += 1;
- argOffset = 0;
- state = match;
- return true;
- }
- }
- else {
- /* Good, we matched the parm and no
- * arg is required. */
- parameter = pSpecChar;
- parameterArg = 0;
- argOffset += 1;
- state = match;
- return true;
- }
- }
- }
-
- /* We did not find a match. Bad Argument. */
- parameter = argChar;
- parameterArg = 0;
- argOffset += 1;
- state = invalid;
- return true;
-}
-
-void NormalizeWinPath(char* input) {
- const size_t len = strlen(input);
- char* res = static_cast<char*>(alloca(len + 1));
- for (size_t i = 0, j = 0; i <= len; ++i, ++j) {
- if (input[i] == '\\') {
- res[j] = '/';
- if (i < len - 2 && input[i + 1] == '\\')
- ++i;
- } else {
- res[j] = input[i];
- }
- }
- strcpy(input, res);
-}
-
-/* Counts newlines before sending sync. */
-int output_filter::sync( )
-{
- line += 1;
- return std::filebuf::sync();
-}
-
-/* Counts newlines before sending data out to file. */
-std::streamsize output_filter::xsputn( const char *s, std::streamsize n )
-{
- for ( int i = 0; i < n; i++ ) {
- if ( s[i] == '\n' )
- line += 1;
- }
- return std::filebuf::xsputn( s, n );
-}
-
-/* Scans a string looking for the file extension. If there is a file
- * extension then pointer returned points to inside the string
- * passed in. Otherwise returns null. */
-char *findFileExtension( char *stemFile )
-{
- char *ppos = stemFile + strlen(stemFile) - 1;
-
- /* Scan backwards from the end looking for the first dot.
- * If we encounter a '/' before the first dot, then stop the scan. */
- while ( 1 ) {
- /* If we found a dot or got to the beginning of the string then
- * we are done. */
- if ( ppos == stemFile || *ppos == '.' )
- break;
-
- /* If we hit a / then there is no extension. Done. */
- if ( *ppos == '/' ) {
- ppos = stemFile;
- break;
- }
- ppos--;
- }
-
- /* If we got to the front of the string then bail we
- * did not find an extension */
- if ( ppos == stemFile )
- ppos = 0;
-
- return ppos;
-}
-
-/* Make a file name from a stem. Removes the old filename suffix and
- * replaces it with a new one. Returns a newed up string. */
-char *fileNameFromStem( char *stemFile, const char *suffix )
-{
- int len = strlen( stemFile );
- assert( len > 0 );
-
- /* Get the extension. */
- char *ppos = findFileExtension( stemFile );
-
- /* If an extension was found, then shorten what we think the len is. */
- if ( ppos != 0 )
- len = ppos - stemFile;
-
- /* Make the return string from the stem and the suffix. */
- char *retVal = new char[ len + strlen( suffix ) + 1 ];
- strncpy( retVal, stemFile, len );
- strcpy( retVal + len, suffix );
-
- return retVal;
-}
-
-
diff --git a/contrib/tools/ragel5/common/common.h b/contrib/tools/ragel5/common/common.h
deleted file mode 100644
index aae6f85add..0000000000
--- a/contrib/tools/ragel5/common/common.h
+++ /dev/null
@@ -1,308 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _COMMON_H
-#define _COMMON_H
-
-#include <fstream>
-#include <climits>
-#include "dlist.h"
-
-typedef unsigned long long Size;
-
-struct Key
-{
-private:
- long key;
-
-public:
- friend inline Key operator+(const Key key1, const Key key2);
- friend inline Key operator-(const Key key1, const Key key2);
- friend inline Key operator/(const Key key1, const Key key2);
- friend inline long operator&(const Key key1, const Key key2);
-
- friend inline bool operator<( const Key key1, const Key key2 );
- friend inline bool operator<=( const Key key1, const Key key2 );
- friend inline bool operator>( const Key key1, const Key key2 );
- friend inline bool operator>=( const Key key1, const Key key2 );
- friend inline bool operator==( const Key key1, const Key key2 );
- friend inline bool operator!=( const Key key1, const Key key2 );
-
- friend struct KeyOps;
-
- Key( ) {}
- Key( const Key &key ) : key(key.key) {}
- Key( long key ) : key(key) {}
-
- /* Returns the value used to represent the key. This value must be
- * interpreted based on signedness. */
- long getVal() const { return key; };
-
- /* Returns the key casted to a long long. This form of the key does not
- * require and signedness interpretation. */
- long long getLongLong() const;
-
- bool isUpper() const { return ( 'A' <= key && key <= 'Z' ); }
- bool isLower() const { return ( 'a' <= key && key <= 'z' ); }
- bool isPrintable() const
- {
- return ( 7 <= key && key <= 13 ) || ( 32 <= key && key < 127 );
- }
-
- Key toUpper() const
- { return Key( 'A' + ( key - 'a' ) ); }
- Key toLower() const
- { return Key( 'a' + ( key - 'A' ) ); }
-
- void operator+=( const Key other )
- {
- /* FIXME: must be made aware of isSigned. */
- key += other.key;
- }
-
- void operator-=( const Key other )
- {
- /* FIXME: must be made aware of isSigned. */
- key -= other.key;
- }
-
- void operator|=( const Key other )
- {
- /* FIXME: must be made aware of isSigned. */
- key |= other.key;
- }
-
- /* Decrement. Needed only for ranges. */
- inline void decrement();
- inline void increment();
-};
-
-struct HostType
-{
- const char *data1;
- const char *data2;
- bool isSigned;
- long long minVal;
- long long maxVal;
- unsigned int size;
-};
-
-struct HostLang
-{
- HostType *hostTypes;
- int numHostTypes;
- HostType *defaultAlphType;
- bool explicitUnsigned;
-};
-
-
-/* Target language. */
-enum HostLangType
-{
- CCode,
- DCode,
- JavaCode,
- RubyCode
-};
-
-extern HostLang *hostLang;
-extern HostLangType hostLangType;
-
-extern HostLang hostLangC;
-extern HostLang hostLangD;
-extern HostLang hostLangJava;
-extern HostLang hostLangRuby;
-
-/* An abstraction of the key operators that manages key operations such as
- * comparison and increment according the signedness of the key. */
-struct KeyOps
-{
- /* Default to signed alphabet. */
- KeyOps() :
- isSigned(true),
- alphType(0)
- {}
-
- /* Default to signed alphabet. */
- KeyOps( bool isSigned )
- :isSigned(isSigned) {}
-
- bool isSigned;
- Key minKey, maxKey;
- HostType *alphType;
-
- void setAlphType( HostType *alphType )
- {
- this->alphType = alphType;
- isSigned = alphType->isSigned;
- if ( isSigned ) {
- minKey = (long) alphType->minVal;
- maxKey = (long) alphType->maxVal;
- }
- else {
- minKey = (long) (unsigned long) alphType->minVal;
- maxKey = (long) (unsigned long) alphType->maxVal;
- }
- }
-
- /* Compute the distance between two keys. */
- Size span( Key key1, Key key2 )
- {
- return isSigned ?
- (unsigned long long)(
- (long long)key2.key -
- (long long)key1.key + 1) :
- (unsigned long long)(
- (unsigned long)key2.key) -
- (unsigned long long)((unsigned long)key1.key) + 1;
- }
-
- Size alphSize()
- { return span( minKey, maxKey ); }
-
- HostType *typeSubsumes( long long maxVal )
- {
- for ( int i = 0; i < hostLang->numHostTypes; i++ ) {
- if ( maxVal <= hostLang->hostTypes[i].maxVal )
- return hostLang->hostTypes + i;
- }
- return 0;
- }
-
- HostType *typeSubsumes( bool isSigned, long long maxVal )
- {
- for ( int i = 0; i < hostLang->numHostTypes; i++ ) {
- if ( ( isSigned == hostLang->hostTypes[i].isSigned ) &&
- maxVal <= hostLang->hostTypes[i].maxVal )
- return hostLang->hostTypes + i;
- }
- return 0;
- }
-};
-
-extern KeyOps *keyOps;
-
-inline bool operator<( const Key key1, const Key key2 )
-{
- return keyOps->isSigned ? key1.key < key2.key :
- (unsigned long)key1.key < (unsigned long)key2.key;
-}
-
-inline bool operator<=( const Key key1, const Key key2 )
-{
- return keyOps->isSigned ? key1.key <= key2.key :
- (unsigned long)key1.key <= (unsigned long)key2.key;
-}
-
-inline bool operator>( const Key key1, const Key key2 )
-{
- return keyOps->isSigned ? key1.key > key2.key :
- (unsigned long)key1.key > (unsigned long)key2.key;
-}
-
-inline bool operator>=( const Key key1, const Key key2 )
-{
- return keyOps->isSigned ? key1.key >= key2.key :
- (unsigned long)key1.key >= (unsigned long)key2.key;
-}
-
-inline bool operator==( const Key key1, const Key key2 )
-{
- return key1.key == key2.key;
-}
-
-inline bool operator!=( const Key key1, const Key key2 )
-{
- return key1.key != key2.key;
-}
-
-/* Decrement. Needed only for ranges. */
-inline void Key::decrement()
-{
- key = keyOps->isSigned ? key - 1 : ((unsigned long)key)-1;
-}
-
-/* Increment. Needed only for ranges. */
-inline void Key::increment()
-{
- key = keyOps->isSigned ? key+1 : ((unsigned long)key)+1;
-}
-
-inline long long Key::getLongLong() const
-{
- return keyOps->isSigned ? (long long)key : (long long)(unsigned long)key;
-}
-
-inline Key operator+(const Key key1, const Key key2)
-{
- /* FIXME: must be made aware of isSigned. */
- return Key( key1.key + key2.key );
-}
-
-inline Key operator-(const Key key1, const Key key2)
-{
- /* FIXME: must be made aware of isSigned. */
- return Key( key1.key - key2.key );
-}
-
-inline long operator&(const Key key1, const Key key2)
-{
- /* FIXME: must be made aware of isSigned. */
- return key1.key & key2.key;
-}
-
-inline Key operator/(const Key key1, const Key key2)
-{
- /* FIXME: must be made aware of isSigned. */
- return key1.key / key2.key;
-}
-
-/* Filter on the output stream that keeps track of the number of lines
- * output. */
-class output_filter : public std::filebuf
-{
-public:
- output_filter( char *fileName ) : fileName(fileName), line(1) { }
-
- virtual int sync();
- virtual std::streamsize xsputn(const char* s, std::streamsize n);
-
- char *fileName;
- int line;
-};
-
-char *findFileExtension( char *stemFile );
-char *fileNameFromStem( char *stemFile, const char *suffix );
-
-struct Export
-{
- Export(const char *name, Key key )
- : name(name), key(key) {}
-
- const char *name;
- Key key;
-
- Export *prev, *next;
-};
-
-typedef DList<Export> ExportList;
-
-#endif /* _COMMON_H */
diff --git a/contrib/tools/ragel5/common/config.h b/contrib/tools/ragel5/common/config.h
deleted file mode 100644
index 405cfd6c3b..0000000000
--- a/contrib/tools/ragel5/common/config.h
+++ /dev/null
@@ -1,39 +0,0 @@
-/* common/config.h. Generated by configure. */
-/*
- * Copyright 2001 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _CONFIG_H
-#define _CONFIG_H
-
-/* Programs. */
-/* #undef GDC */
-#define GOBJC gcc -x objective-c
-#define CXX c++
-#define CC cc
-/* #undef JAVAC */
-/* #undef TXL */
-/* #undef RUBY */
-
-#ifdef WIN32
-#define strcasecmp _stricmp
-#endif
-
-#endif /* _CONFIG_H */
diff --git a/contrib/tools/ragel5/common/pcheck.h b/contrib/tools/ragel5/common/pcheck.h
deleted file mode 100644
index 5f95dc3c12..0000000000
--- a/contrib/tools/ragel5/common/pcheck.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright 2001, 2002 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PCHECK_H
-#define _PCHECK_H
-
-class ParamCheck
-{
-public:
- ParamCheck(const char *paramSpec, int argc, char **argv);
-
- bool check();
-
- char *parameterArg; /* The argument to the parameter. */
- char parameter; /* The parameter matched. */
- enum { match, invalid, noparam } state;
-
- char *argOffset; /* If we are reading params inside an
- * arg this points to the offset. */
-
- char *curArg; /* Pointer to the current arg. */
- int iCurArg; /* Index to the current arg. */
-
-private:
- const char *paramSpec; /* Parameter spec supplied by the coder. */
- int argc; /* Arguement data from the command line. */
- char **argv;
-
-};
-
-void NormalizeWinPath(char* input);
-
-#endif /* _PCHECK_H */
diff --git a/contrib/tools/ragel5/common/version.h b/contrib/tools/ragel5/common/version.h
deleted file mode 100644
index dba4eb2154..0000000000
--- a/contrib/tools/ragel5/common/version.h
+++ /dev/null
@@ -1,2 +0,0 @@
-#define VERSION "5.19"
-#define PUBDATE "March 2007"
diff --git a/contrib/tools/ragel5/common/ya.make b/contrib/tools/ragel5/common/ya.make
deleted file mode 100644
index 7448cd2af3..0000000000
--- a/contrib/tools/ragel5/common/ya.make
+++ /dev/null
@@ -1,20 +0,0 @@
-LIBRARY()
-
-LICENSE(GPL-2.0-or-later)
-
-NO_UTIL()
-NO_COMPILER_WARNINGS()
-
-ADDINCL(
- GLOBAL contrib/tools/ragel5/common
-)
-
-PEERDIR(
- contrib/tools/ragel5/aapl
-)
-
-SRCS(
- common.cpp
-)
-
-END()
diff --git a/contrib/tools/ragel5/ragel/fsmap.cpp b/contrib/tools/ragel5/ragel/fsmap.cpp
deleted file mode 100644
index 551aea0391..0000000000
--- a/contrib/tools/ragel5/ragel/fsmap.cpp
+++ /dev/null
@@ -1,840 +0,0 @@
-/*
- * Copyright 2002-2004 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "fsmgraph.h"
-#include <iostream>
-using std::cerr;
-using std::endl;
-
-CondData *condData = 0;
-KeyOps *keyOps = 0;
-
-/* Insert an action into an action table. */
-void ActionTable::setAction( int ordering, Action *action )
-{
- /* Multi-insert in case specific instances of an action appear in a
- * transition more than once. */
- insertMulti( ordering, action );
-}
-
-/* Set all the action from another action table in this table. */
-void ActionTable::setActions( const ActionTable &other )
-{
- for ( ActionTable::Iter action = other; action.lte(); action++ )
- insertMulti( action->key, action->value );
-}
-
-void ActionTable::setActions( int *orderings, Action **actions, int nActs )
-{
- for ( int a = 0; a < nActs; a++ )
- insertMulti( orderings[a], actions[a] );
-}
-
-bool ActionTable::hasAction( Action *action )
-{
- for ( int a = 0; a < length(); a++ ) {
- if ( data[a].value == action )
- return true;
- }
- return false;
-}
-
-/* Insert an action into an action table. */
-void LmActionTable::setAction( int ordering, LongestMatchPart *action )
-{
- /* Multi-insert in case specific instances of an action appear in a
- * transition more than once. */
- insertMulti( ordering, action );
-}
-
-/* Set all the action from another action table in this table. */
-void LmActionTable::setActions( const LmActionTable &other )
-{
- for ( LmActionTable::Iter action = other; action.lte(); action++ )
- insertMulti( action->key, action->value );
-}
-
-void ErrActionTable::setAction( int ordering, Action *action, int transferPoint )
-{
- insertMulti( ErrActionTableEl( action, ordering, transferPoint ) );
-}
-
-void ErrActionTable::setActions( const ErrActionTable &other )
-{
- for ( ErrActionTable::Iter act = other; act.lte(); act++ )
- insertMulti( ErrActionTableEl( act->action, act->ordering, act->transferPoint ) );
-}
-
-/* Insert a priority into this priority table. Looks out for priorities on
- * duplicate keys. */
-void PriorTable::setPrior( int ordering, PriorDesc *desc )
-{
- PriorEl *lastHit = 0;
- PriorEl *insed = insert( PriorEl(ordering, desc), &lastHit );
- if ( insed == 0 ) {
- /* This already has a priority on the same key as desc. Overwrite the
- * priority if the ordering is larger (later in time). */
- if ( ordering >= lastHit->ordering )
- *lastHit = PriorEl( ordering, desc );
- }
-}
-
-/* Set all the priorities from a priorTable in this table. */
-void PriorTable::setPriors( const PriorTable &other )
-{
- /* Loop src priorities once to overwrite duplicates. */
- PriorTable::Iter priorIt = other;
- for ( ; priorIt.lte(); priorIt++ )
- setPrior( priorIt->ordering, priorIt->desc );
-}
-
-/* Set the priority of starting transitions. Isolates the start state so it has
- * no other entry points, then sets the priorities of all the transitions out
- * of the start state. If the start state is final, then the outPrior of the
- * start state is also set. The idea is that a machine that accepts the null
- * string can still specify the starting trans prior for when it accepts the
- * null word. */
-void FsmAp::startFsmPrior( int ordering, PriorDesc *prior )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
-
- /* Walk all transitions out of the start state. */
- for ( TransList::Iter trans = startState->outList; trans.lte(); trans++ ) {
- if ( trans->toState != 0 )
- trans->priorTable.setPrior( ordering, prior );
- }
-}
-
-/* Set the priority of all transitions in a graph. Walks all transition lists
- * and all def transitions. */
-void FsmAp::allTransPrior( int ordering, PriorDesc *prior )
-{
- /* Walk the list of all states. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Walk the out list of the state. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- if ( trans->toState != 0 )
- trans->priorTable.setPrior( ordering, prior );
- }
- }
-}
-
-/* Set the priority of all transitions that go into a final state. Note that if
- * any entry states are final, we will not be setting the priority of any
- * transitions that may go into those states in the future. The graph does not
- * support pending in transitions in the same way pending out transitions are
- * supported. */
-void FsmAp::finishFsmPrior( int ordering, PriorDesc *prior )
-{
- /* Walk all final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ ) {
- /* Walk all in transitions of the final state. */
- for ( TransInList::Iter trans = (*state)->inList; trans.lte(); trans++ )
- trans->priorTable.setPrior( ordering, prior );
- }
-}
-
-/* Set the priority of any future out transitions that may be made going out of
- * this state machine. */
-void FsmAp::leaveFsmPrior( int ordering, PriorDesc *prior )
-{
- /* Set priority in all final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->outPriorTable.setPrior( ordering, prior );
-}
-
-
-/* Set actions to execute on starting transitions. Isolates the start state
- * so it has no other entry points, then adds to the transition functions
- * of all the transitions out of the start state. If the start state is final,
- * then the func is also added to the start state's out func list. The idea is
- * that a machine that accepts the null string can execute a start func when it
- * matches the null word, which can only be done when leaving the start/final
- * state. */
-void FsmAp::startFsmAction( int ordering, Action *action )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
-
- /* Walk the start state's transitions, setting functions. */
- for ( TransList::Iter trans = startState->outList; trans.lte(); trans++ ) {
- if ( trans->toState != 0 )
- trans->actionTable.setAction( ordering, action );
- }
-}
-
-/* Set functions to execute on all transitions. Walks the out lists of all
- * states. */
-void FsmAp::allTransAction( int ordering, Action *action )
-{
- /* Walk all states. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Walk the out list of the state. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- if ( trans->toState != 0 )
- trans->actionTable.setAction( ordering, action );
- }
- }
-}
-
-/* Specify functions to execute upon entering final states. If the start state
- * is final we can't really specify a function to execute upon entering that
- * final state the first time. So function really means whenever entering a
- * final state from within the same fsm. */
-void FsmAp::finishFsmAction( int ordering, Action *action )
-{
- /* Walk all final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ ) {
- /* Walk the final state's in list. */
- for ( TransInList::Iter trans = (*state)->inList; trans.lte(); trans++ )
- trans->actionTable.setAction( ordering, action );
- }
-}
-
-/* Add functions to any future out transitions that may be made going out of
- * this state machine. */
-void FsmAp::leaveFsmAction( int ordering, Action *action )
-{
- /* Insert the action in the outActionTable of all final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->outActionTable.setAction( ordering, action );
-}
-
-/* Add functions to the longest match action table for constructing scanners. */
-void FsmAp::longMatchAction( int ordering, LongestMatchPart *lmPart )
-{
- /* Walk all final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ ) {
- /* Walk the final state's in list. */
- for ( TransInList::Iter trans = (*state)->inList; trans.lte(); trans++ )
- trans->lmActionTable.setAction( ordering, lmPart );
- }
-}
-
-void FsmAp::fillGaps( StateAp *state )
-{
- if ( state->outList.length() == 0 ) {
- /* Add the range on the lower and upper bound. */
- attachNewTrans( state, 0, keyOps->minKey, keyOps->maxKey );
- }
- else {
- TransList srcList;
- srcList.transfer( state->outList );
-
- /* Check for a gap at the beginning. */
- TransList::Iter trans = srcList, next;
- if ( keyOps->minKey < trans->lowKey ) {
- /* Make the high key and append. */
- Key highKey = trans->lowKey;
- highKey.decrement();
-
- attachNewTrans( state, 0, keyOps->minKey, highKey );
- }
-
- /* Write the transition. */
- next = trans.next();
- state->outList.append( trans );
-
- /* Keep the last high end. */
- Key lastHigh = trans->highKey;
-
- /* Loop each source range. */
- for ( trans = next; trans.lte(); trans = next ) {
- /* Make the next key following the last range. */
- Key nextKey = lastHigh;
- nextKey.increment();
-
- /* Check for a gap from last up to here. */
- if ( nextKey < trans->lowKey ) {
- /* Make the high end of the range that fills the gap. */
- Key highKey = trans->lowKey;
- highKey.decrement();
-
- attachNewTrans( state, 0, nextKey, highKey );
- }
-
- /* Reduce the transition. If it reduced to anything then add it. */
- next = trans.next();
- state->outList.append( trans );
-
- /* Keep the last high end. */
- lastHigh = trans->highKey;
- }
-
- /* Now check for a gap on the end to fill. */
- if ( lastHigh < keyOps->maxKey ) {
- /* Get a copy of the default. */
- lastHigh.increment();
-
- attachNewTrans( state, 0, lastHigh, keyOps->maxKey );
- }
- }
-}
-
-void FsmAp::setErrorAction( StateAp *state, int ordering, Action *action )
-{
- /* Fill any gaps in the out list with an error transition. */
- fillGaps( state );
-
- /* Set error transitions in the transitions that go to error. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- if ( trans->toState == 0 )
- trans->actionTable.setAction( ordering, action );
- }
-}
-
-
-/* Give a target state for error transitions. */
-void FsmAp::setErrorTarget( StateAp *state, StateAp *target, int *orderings,
- Action **actions, int nActs )
-{
- /* Fill any gaps in the out list with an error transition. */
- fillGaps( state );
-
- /* Set error target in the transitions that go to error. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- if ( trans->toState == 0 ) {
- /* The trans goes to error, redirect it. */
- redirectErrorTrans( trans->fromState, target, trans );
- trans->actionTable.setActions( orderings, actions, nActs );
- }
- }
-}
-
-void FsmAp::transferErrorActions( StateAp *state, int transferPoint )
-{
- for ( int i = 0; i < state->errActionTable.length(); ) {
- ErrActionTableEl *act = state->errActionTable.data + i;
- if ( act->transferPoint == transferPoint ) {
- /* Transfer the error action and remove it. */
- setErrorAction( state, act->ordering, act->action );
- state->errActionTable.vremove( i );
- }
- else {
- /* Not transfering and deleting, skip over the item. */
- i += 1;
- }
- }
-}
-
-/* Set error actions in the start state. */
-void FsmAp::startErrorAction( int ordering, Action *action, int transferPoint )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
-
- /* Add the actions. */
- startState->errActionTable.setAction( ordering, action, transferPoint );
-}
-
-/* Set error actions in all states where there is a transition out. */
-void FsmAp::allErrorAction( int ordering, Action *action, int transferPoint )
-{
- /* Insert actions in the error action table of all states. */
- for ( StateList::Iter state = stateList; state.lte(); state++ )
- state->errActionTable.setAction( ordering, action, transferPoint );
-}
-
-/* Set error actions in final states. */
-void FsmAp::finalErrorAction( int ordering, Action *action, int transferPoint )
-{
- /* Add the action to the error table of final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->errActionTable.setAction( ordering, action, transferPoint );
-}
-
-void FsmAp::notStartErrorAction( int ordering, Action *action, int transferPoint )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState )
- state->errActionTable.setAction( ordering, action, transferPoint );
- }
-}
-
-void FsmAp::notFinalErrorAction( int ordering, Action *action, int transferPoint )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( ! state->isFinState() )
- state->errActionTable.setAction( ordering, action, transferPoint );
- }
-}
-
-/* Set error actions in the states that have transitions into a final state. */
-void FsmAp::middleErrorAction( int ordering, Action *action, int transferPoint )
-{
- /* Isolate the start state in case it is reachable from in inside the
- * machine, in which case we don't want it set. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState && ! state->isFinState() )
- state->errActionTable.setAction( ordering, action, transferPoint );
- }
-}
-
-/* Set EOF actions in the start state. */
-void FsmAp::startEOFAction( int ordering, Action *action )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
-
- /* Add the actions. */
- startState->eofActionTable.setAction( ordering, action );
-}
-
-/* Set EOF actions in all states where there is a transition out. */
-void FsmAp::allEOFAction( int ordering, Action *action )
-{
- /* Insert actions in the EOF action table of all states. */
- for ( StateList::Iter state = stateList; state.lte(); state++ )
- state->eofActionTable.setAction( ordering, action );
-}
-
-/* Set EOF actions in final states. */
-void FsmAp::finalEOFAction( int ordering, Action *action )
-{
- /* Add the action to the error table of final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->eofActionTable.setAction( ordering, action );
-}
-
-void FsmAp::notStartEOFAction( int ordering, Action *action )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState )
- state->eofActionTable.setAction( ordering, action );
- }
-}
-
-void FsmAp::notFinalEOFAction( int ordering, Action *action )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( ! state->isFinState() )
- state->eofActionTable.setAction( ordering, action );
- }
-}
-
-/* Set EOF actions in the states that have transitions into a final state. */
-void FsmAp::middleEOFAction( int ordering, Action *action )
-{
- /* Set the actions in all states that are not the start state and not final. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState && ! state->isFinState() )
- state->eofActionTable.setAction( ordering, action );
- }
-}
-
-/*
- * Set To State Actions.
- */
-
-/* Set to state actions in the start state. */
-void FsmAp::startToStateAction( int ordering, Action *action )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
- startState->toStateActionTable.setAction( ordering, action );
-}
-
-/* Set to state actions in all states. */
-void FsmAp::allToStateAction( int ordering, Action *action )
-{
- /* Insert the action on all states. */
- for ( StateList::Iter state = stateList; state.lte(); state++ )
- state->toStateActionTable.setAction( ordering, action );
-}
-
-/* Set to state actions in final states. */
-void FsmAp::finalToStateAction( int ordering, Action *action )
-{
- /* Add the action to the error table of final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->toStateActionTable.setAction( ordering, action );
-}
-
-void FsmAp::notStartToStateAction( int ordering, Action *action )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState )
- state->toStateActionTable.setAction( ordering, action );
- }
-}
-
-void FsmAp::notFinalToStateAction( int ordering, Action *action )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( ! state->isFinState() )
- state->toStateActionTable.setAction( ordering, action );
- }
-}
-
-/* Set to state actions in states that are not final and not the start state. */
-void FsmAp::middleToStateAction( int ordering, Action *action )
-{
- /* Set the action in all states that are not the start state and not final. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState && ! state->isFinState() )
- state->toStateActionTable.setAction( ordering, action );
- }
-}
-
-/*
- * Set From State Actions.
- */
-
-void FsmAp::startFromStateAction( int ordering, Action *action )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
- startState->fromStateActionTable.setAction( ordering, action );
-}
-
-void FsmAp::allFromStateAction( int ordering, Action *action )
-{
- /* Insert the action on all states. */
- for ( StateList::Iter state = stateList; state.lte(); state++ )
- state->fromStateActionTable.setAction( ordering, action );
-}
-
-void FsmAp::finalFromStateAction( int ordering, Action *action )
-{
- /* Add the action to the error table of final states. */
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->fromStateActionTable.setAction( ordering, action );
-}
-
-void FsmAp::notStartFromStateAction( int ordering, Action *action )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState )
- state->fromStateActionTable.setAction( ordering, action );
- }
-}
-
-void FsmAp::notFinalFromStateAction( int ordering, Action *action )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( ! state->isFinState() )
- state->fromStateActionTable.setAction( ordering, action );
- }
-}
-
-void FsmAp::middleFromStateAction( int ordering, Action *action )
-{
- /* Set the action in all states that are not the start state and not final. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- if ( state != startState && ! state->isFinState() )
- state->fromStateActionTable.setAction( ordering, action );
- }
-}
-
-/* Shift the function ordering of the start transitions to start
- * at fromOrder and increase in units of 1. Useful before staring.
- * Returns the maximum number of order numbers used. */
-int FsmAp::shiftStartActionOrder( int fromOrder )
-{
- int maxUsed = 0;
-
- /* Walk the start state's transitions, shifting function ordering. */
- for ( TransList::Iter trans = startState->outList; trans.lte(); trans++ ) {
- /* Walk the function data for the transition and set the keys to
- * increasing values starting at fromOrder. */
- int curFromOrder = fromOrder;
- ActionTable::Iter action = trans->actionTable;
- for ( ; action.lte(); action++ )
- action->key = curFromOrder++;
-
- /* Keep track of the max number of orders used. */
- if ( curFromOrder - fromOrder > maxUsed )
- maxUsed = curFromOrder - fromOrder;
- }
-
- return maxUsed;
-}
-
-/* Remove all priorities. */
-void FsmAp::clearAllPriorities()
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Clear out priority data. */
- state->outPriorTable.empty();
-
- /* Clear transition data from the out transitions. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ )
- trans->priorTable.empty();
- }
-}
-
-/* Zeros out the function ordering keys. This may be called before minimization
- * when it is known that no more fsm operations are going to be done. This
- * will achieve greater reduction as states will not be separated on the basis
- * of function ordering. */
-void FsmAp::nullActionKeys( )
-{
- /* For each state... */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Walk the transitions for the state. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- /* Walk the action table for the transition. */
- for ( ActionTable::Iter action = trans->actionTable;
- action.lte(); action++ )
- action->key = 0;
-
- /* Walk the action table for the transition. */
- for ( LmActionTable::Iter action = trans->lmActionTable;
- action.lte(); action++ )
- action->key = 0;
- }
-
- /* Null the action keys of the to state action table. */
- for ( ActionTable::Iter action = state->toStateActionTable;
- action.lte(); action++ )
- action->key = 0;
-
- /* Null the action keys of the from state action table. */
- for ( ActionTable::Iter action = state->fromStateActionTable;
- action.lte(); action++ )
- action->key = 0;
-
- /* Null the action keys of the out transtions. */
- for ( ActionTable::Iter action = state->outActionTable;
- action.lte(); action++ )
- action->key = 0;
-
- /* Null the action keys of the error action table. */
- for ( ErrActionTable::Iter action = state->errActionTable;
- action.lte(); action++ )
- action->ordering = 0;
-
- /* Null the action keys eof action table. */
- for ( ActionTable::Iter action = state->eofActionTable;
- action.lte(); action++ )
- action->key = 0;
- }
-}
-
-/* Walk the list of states and verify that non final states do not have out
- * data, that all stateBits are cleared, and that there are no states with
- * zero foreign in transitions. */
-void FsmAp::verifyStates()
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Non final states should not have leaving data. */
- if ( ! (state->stateBits & SB_ISFINAL) ) {
- assert( state->outActionTable.length() == 0 );
- assert( state->outCondSet.length() == 0 );
- assert( state->outPriorTable.length() == 0 );
- }
-
- /* Data used in algorithms should be cleared. */
- assert( (state->stateBits & SB_BOTH) == 0 );
- assert( state->foreignInTrans > 0 );
- }
-}
-
-/* Compare two transitions according to their relative priority. Since the
- * base transition has no priority associated with it, the default is to
- * return equal. */
-int FsmAp::comparePrior( const PriorTable &priorTable1, const PriorTable &priorTable2 )
-{
- /* Looking for differing priorities on same keys. Need to concurrently
- * scan the priority lists. */
- PriorTable::Iter pd1 = priorTable1;
- PriorTable::Iter pd2 = priorTable2;
- while ( pd1.lte() && pd2.lte() ) {
- /* Check keys. */
- if ( pd1->desc->key < pd2->desc->key )
- pd1.increment();
- else if ( pd1->desc->key > pd2->desc->key )
- pd2.increment();
- /* Keys are the same, check priorities. */
- else if ( pd1->desc->priority < pd2->desc->priority )
- return -1;
- else if ( pd1->desc->priority > pd2->desc->priority )
- return 1;
- else {
- /* Keys and priorities are equal, advance both. */
- pd1.increment();
- pd2.increment();
- }
- }
-
- /* No differing priorities on the same key. */
- return 0;
-}
-
-/* Compares two transitions according to priority and functions. Pointers
- * should not be null. Does not consider to state or from state. Compare two
- * transitions according to the data contained in the transitions. Data means
- * any properties added to user transitions that may differentiate them. Since
- * the base transition has no data, the default is to return equal. */
-int FsmAp::compareTransData( TransAp *trans1, TransAp *trans2 )
-{
- /* Compare the prior table. */
- int cmpRes = CmpPriorTable::compare( trans1->priorTable,
- trans2->priorTable );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Compare longest match action tables. */
- cmpRes = CmpLmActionTable::compare(trans1->lmActionTable,
- trans2->lmActionTable);
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Compare action tables. */
- return CmpActionTable::compare(trans1->actionTable,
- trans2->actionTable);
-}
-
-/* Callback invoked when another trans (or possibly this) is added into this
- * transition during the merging process. Draw in any properties of srcTrans
- * into this transition. AddInTrans is called when a new transitions is made
- * that will be a duplicate of another transition or a combination of several
- * other transitions. AddInTrans will be called for each transition that the
- * new transition is to represent. */
-void FsmAp::addInTrans( TransAp *destTrans, TransAp *srcTrans )
-{
- /* Protect against adding in from ourselves. */
- if ( srcTrans == destTrans ) {
- /* Adding in ourselves, need to make a copy of the source transitions.
- * The priorities are not copied in as that would have no effect. */
- destTrans->lmActionTable.setActions( LmActionTable(srcTrans->lmActionTable) );
- destTrans->actionTable.setActions( ActionTable(srcTrans->actionTable) );
- }
- else {
- /* Not a copy of ourself, get the functions and priorities. */
- destTrans->lmActionTable.setActions( srcTrans->lmActionTable );
- destTrans->actionTable.setActions( srcTrans->actionTable );
- destTrans->priorTable.setPriors( srcTrans->priorTable );
- }
-}
-
-/* Compare the properties of states that are embedded by users. Compares out
- * priorities, out transitions, to, from, out, error and eof action tables. */
-int FsmAp::compareStateData( const StateAp *state1, const StateAp *state2 )
-{
- /* Compare the out priority table. */
- int cmpRes = CmpPriorTable::
- compare( state1->outPriorTable, state2->outPriorTable );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Test to state action tables. */
- cmpRes = CmpActionTable::compare( state1->toStateActionTable,
- state2->toStateActionTable );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Test from state action tables. */
- cmpRes = CmpActionTable::compare( state1->fromStateActionTable,
- state2->fromStateActionTable );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Test out action tables. */
- cmpRes = CmpActionTable::compare( state1->outActionTable,
- state2->outActionTable );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Test out condition sets. */
- cmpRes = CmpActionSet::compare( state1->outCondSet,
- state2->outCondSet );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Test out error action tables. */
- cmpRes = CmpErrActionTable::compare( state1->errActionTable,
- state2->errActionTable );
- if ( cmpRes != 0 )
- return cmpRes;
-
- /* Test eof action tables. */
- return CmpActionTable::compare( state1->eofActionTable,
- state2->eofActionTable );
-}
-
-
-/* Invoked when a state looses its final state status and the leaving
- * transition embedding data should be deleted. */
-void FsmAp::clearOutData( StateAp *state )
-{
- /* Kill the out actions and priorities. */
- state->outActionTable.empty();
- state->outCondSet.empty();
- state->outPriorTable.empty();
-}
-
-bool FsmAp::hasOutData( StateAp *state )
-{
- return ( state->outActionTable.length() > 0 ||
- state->outCondSet.length() > 0 ||
- state->outPriorTable.length() > 0 );
-}
-
-/*
- * Setting Conditions.
- */
-
-
-void logNewExpansion( Expansion *exp );
-void logCondSpace( CondSpace *condSpace );
-
-CondSpace *FsmAp::addCondSpace( const CondSet &condSet )
-{
- CondSpace *condSpace = condData->condSpaceMap.find( condSet );
- if ( condSpace == 0 ) {
- Key baseKey = condData->nextCondKey;
- condData->nextCondKey += (1 << condSet.length() ) * keyOps->alphSize();
-
- condSpace = new CondSpace( condSet );
- condSpace->baseKey = baseKey;
- condData->condSpaceMap.insert( condSpace );
-
- #ifdef LOG_CONDS
- cerr << "adding new condition space" << endl;
- cerr << " condition set: ";
- logCondSpace( condSpace );
- cerr << endl;
- cerr << " baseKey: " << baseKey.getVal() << endl;
- #endif
- }
- return condSpace;
-}
-
-void FsmAp::startFsmCondition( Action *condAction )
-{
- /* Make sure the start state has no other entry points. */
- isolateStartState();
- embedCondition( startState, condAction );
-}
-
-void FsmAp::allTransCondition( Action *condAction )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ )
- embedCondition( state, condAction );
-}
-
-void FsmAp::leaveFsmCondition( Action *condAction )
-{
- for ( StateSet::Iter state = finStateSet; state.lte(); state++ )
- (*state)->outCondSet.insert( condAction );
-}
diff --git a/contrib/tools/ragel5/ragel/fsmattach.cpp b/contrib/tools/ragel5/ragel/fsmattach.cpp
deleted file mode 100644
index 6a90df658a..0000000000
--- a/contrib/tools/ragel5/ragel/fsmattach.cpp
+++ /dev/null
@@ -1,425 +0,0 @@
-/*
- * Copyright 2001 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <string.h>
-#include <assert.h>
-#include "fsmgraph.h"
-
-#include <iostream>
-using namespace std;
-
-/* Insert a transition into an inlist. The head must be supplied. */
-void FsmAp::attachToInList( StateAp *from, StateAp *to,
- TransAp *&head, TransAp *trans )
-{
- trans->ilnext = head;
- trans->ilprev = 0;
-
- /* If in trans list is not empty, set the head->prev to trans. */
- if ( head != 0 )
- head->ilprev = trans;
-
- /* Now insert ourselves at the front of the list. */
- head = trans;
-
- /* Keep track of foreign transitions for from and to. */
- if ( from != to ) {
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions is about to go up to 1 then
- * move it from the misfit list to the main list. */
- if ( to->foreignInTrans == 0 )
- stateList.append( misfitList.detach( to ) );
- }
-
- to->foreignInTrans += 1;
- }
-};
-
-/* Detach a transition from an inlist. The head of the inlist must be supplied. */
-void FsmAp::detachFromInList( StateAp *from, StateAp *to,
- TransAp *&head, TransAp *trans )
-{
- /* Detach in the inTransList. */
- if ( trans->ilprev == 0 )
- head = trans->ilnext;
- else
- trans->ilprev->ilnext = trans->ilnext;
-
- if ( trans->ilnext != 0 )
- trans->ilnext->ilprev = trans->ilprev;
-
- /* Keep track of foreign transitions for from and to. */
- if ( from != to ) {
- to->foreignInTrans -= 1;
-
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions goes down to 0 then move it
- * from the main list to the misfit list. */
- if ( to->foreignInTrans == 0 )
- misfitList.append( stateList.detach( to ) );
- }
- }
-}
-
-/* Attach states on the default transition, range list or on out/in list key.
- * First makes a new transition. If there is already a transition out from
- * fromState on the default, then will assertion fail. */
-TransAp *FsmAp::attachNewTrans( StateAp *from, StateAp *to, Key lowKey, Key highKey )
-{
- /* Make the new transition. */
- TransAp *retVal = new TransAp();
-
- /* The transition is now attached. Remember the parties involved. */
- retVal->fromState = from;
- retVal->toState = to;
-
- /* Make the entry in the out list for the transitions. */
- from->outList.append( retVal );
-
- /* Set the the keys of the new trans. */
- retVal->lowKey = lowKey;
- retVal->highKey = highKey;
-
- /* Attach using inList as the head pointer. */
- if ( to != 0 )
- attachToInList( from, to, to->inList.head, retVal );
-
- return retVal;
-}
-
-/* Attach for range lists or for the default transition. This attach should
- * be used when a transition already is allocated and must be attached to a
- * target state. Does not handle adding the transition into the out list. */
-void FsmAp::attachTrans( StateAp *from, StateAp *to, TransAp *trans )
-{
- assert( trans->fromState == 0 && trans->toState == 0 );
- trans->fromState = from;
- trans->toState = to;
-
- if ( to != 0 ) {
- /* Attach using the inList pointer as the head pointer. */
- attachToInList( from, to, to->inList.head, trans );
- }
-}
-
-/* Redirect a transition away from error and towards some state. This is just
- * like attachTrans except it requires fromState to be set and does not touch
- * it. */
-void FsmAp::redirectErrorTrans( StateAp *from, StateAp *to, TransAp *trans )
-{
- assert( trans->fromState != 0 && trans->toState == 0 );
- trans->toState = to;
-
- if ( to != 0 ) {
- /* Attach using the inList pointer as the head pointer. */
- attachToInList( from, to, to->inList.head, trans );
- }
-}
-
-/* Detach for out/in lists or for default transition. */
-void FsmAp::detachTrans( StateAp *from, StateAp *to, TransAp *trans )
-{
- assert( trans->fromState == from && trans->toState == to );
- trans->fromState = 0;
- trans->toState = 0;
-
- if ( to != 0 ) {
- /* Detach using to's inList pointer as the head. */
- detachFromInList( from, to, to->inList.head, trans );
- }
-}
-
-
-/* Detach a state from the graph. Detaches and deletes transitions in and out
- * of the state. Empties inList and outList. Removes the state from the final
- * state set. A detached state becomes useless and should be deleted. */
-void FsmAp::detachState( StateAp *state )
-{
- /* Detach the in transitions from the inList list of transitions. */
- while ( state->inList.head != 0 ) {
- /* Get pointers to the trans and the state. */
- TransAp *trans = state->inList.head;
- StateAp *fromState = trans->fromState;
-
- /* Detach the transitions from the source state. */
- detachTrans( fromState, state, trans );
-
- /* Ok to delete the transition. */
- fromState->outList.detach( trans );
- delete trans;
- }
-
- /* Remove the entry points in on the machine. */
- while ( state->entryIds.length() > 0 )
- unsetEntry( state->entryIds[0], state );
-
- /* Detach out range transitions. */
- for ( TransList::Iter trans = state->outList; trans.lte(); ) {
- TransList::Iter next = trans.next();
- detachTrans( state, trans->toState, trans );
- delete trans;
- trans = next;
- }
-
- /* Delete all of the out range pointers. */
- state->outList.abandon();
-
- /* Unset final stateness before detaching from graph. */
- if ( state->stateBits & SB_ISFINAL )
- finStateSet.remove( state );
-}
-
-
-/* Duplicate a transition. Makes a new transition that is attached to the same
- * dest as srcTrans. The new transition has functions and priority taken from
- * srcTrans. Used for merging a transition in to a free spot. The trans can
- * just be dropped in. It does not conflict with an existing trans and need
- * not be crossed. Returns the new transition. */
-TransAp *FsmAp::dupTrans( StateAp *from, TransAp *srcTrans )
-{
- /* Make a new transition. */
- TransAp *newTrans = new TransAp();
-
- /* We can attach the transition, one does not exist. */
- attachTrans( from, srcTrans->toState, newTrans );
-
- /* Call the user callback to add in the original source transition. */
- addInTrans( newTrans, srcTrans );
-
- return newTrans;
-}
-
-/* In crossing, src trans and dest trans both go to existing states. Make one
- * state from the sets of states that src and dest trans go to. */
-TransAp *FsmAp::fsmAttachStates( MergeData &md, StateAp *from,
- TransAp *destTrans, TransAp *srcTrans )
-{
- /* The priorities are equal. We must merge the transitions. Does the
- * existing trans go to the state we are to attach to? ie, are we to
- * simply double up the transition? */
- StateAp *toState = srcTrans->toState;
- StateAp *existingState = destTrans->toState;
-
- if ( existingState == toState ) {
- /* The transition is a double up to the same state. Copy the src
- * trans into itself. We don't need to merge in the from out trans
- * data, that was done already. */
- addInTrans( destTrans, srcTrans );
- }
- else {
- /* The trans is not a double up. Dest trans cannot be the same as src
- * trans. Set up the state set. */
- StateSet stateSet;
-
- /* We go to all the states the existing trans goes to, plus... */
- if ( existingState->stateDictEl == 0 )
- stateSet.insert( existingState );
- else
- stateSet.insert( existingState->stateDictEl->stateSet );
-
- /* ... all the states that we have been told to go to. */
- if ( toState->stateDictEl == 0 )
- stateSet.insert( toState );
- else
- stateSet.insert( toState->stateDictEl->stateSet );
-
- /* Look for the state. If it is not there already, make it. */
- StateDictEl *lastFound;
- if ( md.stateDict.insert( stateSet, &lastFound ) ) {
- /* Make a new state representing the combination of states in
- * stateSet. It gets added to the fill list. This means that we
- * need to fill in it's transitions sometime in the future. We
- * don't do that now (ie, do not recurse). */
- StateAp *combinState = addState();
-
- /* Link up the dict element and the state. */
- lastFound->targState = combinState;
- combinState->stateDictEl = lastFound;
-
- /* Add to the fill list. */
- md.fillListAppend( combinState );
- }
-
- /* Get the state insertted/deleted. */
- StateAp *targ = lastFound->targState;
-
- /* Detach the state from existing state. */
- detachTrans( from, existingState, destTrans );
-
- /* Re-attach to the new target. */
- attachTrans( from, targ, destTrans );
-
- /* Add in src trans to the existing transition that we redirected to
- * the new state. We don't need to merge in the from out trans data,
- * that was done already. */
- addInTrans( destTrans, srcTrans );
- }
-
- return destTrans;
-}
-
-/* Two transitions are to be crossed, handle the possibility of either going
- * to the error state. */
-TransAp *FsmAp::mergeTrans( MergeData &md, StateAp *from,
- TransAp *destTrans, TransAp *srcTrans )
-{
- TransAp *retTrans = 0;
- if ( destTrans->toState == 0 && srcTrans->toState == 0 ) {
- /* Error added into error. */
- addInTrans( destTrans, srcTrans );
- retTrans = destTrans;
- }
- else if ( destTrans->toState == 0 && srcTrans->toState != 0 ) {
- /* Non error added into error we need to detach and reattach, */
- detachTrans( from, destTrans->toState, destTrans );
- attachTrans( from, srcTrans->toState, destTrans );
- addInTrans( destTrans, srcTrans );
- retTrans = destTrans;
- }
- else if ( srcTrans->toState == 0 ) {
- /* Dest goes somewhere but src doesn't, just add it it in. */
- addInTrans( destTrans, srcTrans );
- retTrans = destTrans;
- }
- else {
- /* Both go somewhere, run the actual cross. */
- retTrans = fsmAttachStates( md, from, destTrans, srcTrans );
- }
-
- return retTrans;
-}
-
-/* Find the trans with the higher priority. If src is lower priority then dest then
- * src is ignored. If src is higher priority than dest, then src overwrites dest. If
- * the priorities are equal, then they are merged. */
-TransAp *FsmAp::crossTransitions( MergeData &md, StateAp *from,
- TransAp *destTrans, TransAp *srcTrans )
-{
- TransAp *retTrans;
-
- /* Compare the priority of the dest and src transitions. */
- int compareRes = comparePrior( destTrans->priorTable, srcTrans->priorTable );
- if ( compareRes < 0 ) {
- /* Src trans has a higher priority than dest, src overwrites dest.
- * Detach dest and return a copy of src. */
- detachTrans( from, destTrans->toState, destTrans );
- retTrans = dupTrans( from, srcTrans );
- }
- else if ( compareRes > 0 ) {
- /* The dest trans has a higher priority, use dest. */
- retTrans = destTrans;
- }
- else {
- /* Src trans and dest trans have the same priority, they must be merged. */
- retTrans = mergeTrans( md, from, destTrans, srcTrans );
- }
-
- /* Return the transition that resulted from the cross. */
- return retTrans;
-}
-
-/* Copy the transitions in srcList to the outlist of dest. The srcList should
- * not be the outList of dest, otherwise you would be copying the contents of
- * srcList into itself as it's iterated: bad news. */
-void FsmAp::outTransCopy( MergeData &md, StateAp *dest, TransAp *srcList )
-{
- /* The destination list. */
- TransList destList;
-
- /* Set up an iterator to stop at breaks. */
- PairIter<TransAp> outPair( dest->outList.head, srcList );
- for ( ; !outPair.end(); outPair++ ) {
- switch ( outPair.userState ) {
- case RangeInS1: {
- /* The pair iter is the authority on the keys. It may have needed
- * to break the dest range. */
- TransAp *destTrans = outPair.s1Tel.trans;
- destTrans->lowKey = outPair.s1Tel.lowKey;
- destTrans->highKey = outPair.s1Tel.highKey;
- destList.append( destTrans );
- break;
- }
- case RangeInS2: {
- /* Src range may get crossed with dest's default transition. */
- TransAp *newTrans = dupTrans( dest, outPair.s2Tel.trans );
-
- /* Set up the transition's keys and append to the dest list. */
- newTrans->lowKey = outPair.s2Tel.lowKey;
- newTrans->highKey = outPair.s2Tel.highKey;
- destList.append( newTrans );
- break;
- }
- case RangeOverlap: {
- /* Exact overlap, cross them. */
- TransAp *newTrans = crossTransitions( md, dest,
- outPair.s1Tel.trans, outPair.s2Tel.trans );
-
- /* Set up the transition's keys and append to the dest list. */
- newTrans->lowKey = outPair.s1Tel.lowKey;
- newTrans->highKey = outPair.s1Tel.highKey;
- destList.append( newTrans );
- break;
- }
- case BreakS1: {
- /* Since we are always writing to the dest trans, the dest needs
- * to be copied when it is broken. The copy goes into the first
- * half of the break to "break it off". */
- outPair.s1Tel.trans = dupTrans( dest, outPair.s1Tel.trans );
- break;
- }
- case BreakS2:
- break;
- }
- }
-
- /* Abandon the old outList and transfer destList into it. */
- dest->outList.transfer( destList );
-}
-
-
-/* Move all the transitions that go into src so that they go into dest. */
-void FsmAp::inTransMove( StateAp *dest, StateAp *src )
-{
- /* Do not try to move in trans to and from the same state. */
- assert( dest != src );
-
- /* If src is the start state, dest becomes the start state. */
- if ( src == startState ) {
- unsetStartState();
- setStartState( dest );
- }
-
- /* For each entry point into, create an entry point into dest, when the
- * state is detached, the entry points to src will be removed. */
- for ( EntryIdSet::Iter enId = src->entryIds; enId.lte(); enId++ )
- changeEntry( *enId, dest, src );
-
- /* Move the transitions in inList. */
- while ( src->inList.head != 0 ) {
- /* Get trans and from state. */
- TransAp *trans = src->inList.head;
- StateAp *fromState = trans->fromState;
-
- /* Detach from src, reattach to dest. */
- detachTrans( fromState, src, trans );
- attachTrans( fromState, dest, trans );
- }
-}
diff --git a/contrib/tools/ragel5/ragel/fsmbase.cpp b/contrib/tools/ragel5/ragel/fsmbase.cpp
deleted file mode 100644
index f1d7141c09..0000000000
--- a/contrib/tools/ragel5/ragel/fsmbase.cpp
+++ /dev/null
@@ -1,598 +0,0 @@
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <string.h>
-#include <assert.h>
-#include "fsmgraph.h"
-
-/* Simple singly linked list append routine for the fill list. The new state
- * goes to the end of the list. */
-void MergeData::fillListAppend( StateAp *state )
-{
- state->alg.next = 0;
-
- if ( stfillHead == 0 ) {
- /* List is empty, state becomes head and tail. */
- stfillHead = state;
- stfillTail = state;
- }
- else {
- /* List is not empty, state goes after last element. */
- stfillTail->alg.next = state;
- stfillTail = state;
- }
-}
-
-/* Graph constructor. */
-FsmAp::FsmAp()
-:
- /* No start state. */
- startState(0),
- errState(0),
-
- /* Misfit accounting is a switch, turned on only at specific times. It
- * controls what happens when states have no way in from the outside
- * world.. */
- misfitAccounting(false)
-{
-}
-
-/* Copy all graph data including transitions. */
-FsmAp::FsmAp( const FsmAp &graph )
-:
- /* Lists start empty. Will be filled by copy. */
- stateList(),
- misfitList(),
-
- /* Copy in the entry points,
- * pointers will be resolved later. */
- entryPoints(graph.entryPoints),
- startState(graph.startState),
- errState(0),
-
- /* Will be filled by copy. */
- finStateSet(),
-
- /* Misfit accounting is only on during merging. */
- misfitAccounting(false)
-{
- /* Create the states and record their map in the original state. */
- StateList::Iter origState = graph.stateList;
- for ( ; origState.lte(); origState++ ) {
- /* Make the new state. */
- StateAp *newState = new StateAp( *origState );
-
- /* Add the state to the list. */
- stateList.append( newState );
-
- /* Set the mapsTo item of the old state. */
- origState->alg.stateMap = newState;
- }
-
- /* Derefernce all the state maps. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- /* The points to the original in the src machine. The taget's duplicate
- * is in the statemap. */
- StateAp *toState = trans->toState != 0 ? trans->toState->alg.stateMap : 0;
-
- /* Attach The transition to the duplicate. */
- trans->toState = 0;
- attachTrans( state, toState, trans );
- }
- }
-
- /* Fix the state pointers in the entry points array. */
- EntryMapEl *eel = entryPoints.data;
- for ( int e = 0; e < entryPoints.length(); e++, eel++ ) {
- /* Get the duplicate of the state. */
- eel->value = eel->value->alg.stateMap;
-
- /* Foreign in transitions must be built up when duping machines so
- * increment it here. */
- eel->value->foreignInTrans += 1;
- }
-
- /* Fix the start state pointer and the new start state's count of in
- * transiions. */
- startState = startState->alg.stateMap;
- startState->foreignInTrans += 1;
-
- /* Build the final state set. */
- StateSet::Iter st = graph.finStateSet;
- for ( ; st.lte(); st++ )
- finStateSet.insert((*st)->alg.stateMap);
-}
-
-/* Deletes all transition data then deletes each state. */
-FsmAp::~FsmAp()
-{
- /* Delete all the transitions. */
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Iterate the out transitions, deleting them. */
- state->outList.empty();
- }
-
- /* Delete all the states. */
- stateList.empty();
-}
-
-/* Set a state final. The state has its isFinState set to true and the state
- * is added to the finStateSet. */
-void FsmAp::setFinState( StateAp *state )
-{
- /* Is it already a fin state. */
- if ( state->stateBits & SB_ISFINAL )
- return;
-
- state->stateBits |= SB_ISFINAL;
- finStateSet.insert( state );
-}
-
-/* Set a state non-final. The has its isFinState flag set false and the state
- * is removed from the final state set. */
-void FsmAp::unsetFinState( StateAp *state )
-{
- /* Is it already a non-final state? */
- if ( ! (state->stateBits & SB_ISFINAL) )
- return;
-
- /* When a state looses its final state status it must relinquish all the
- * properties that are allowed only for final states. */
- clearOutData( state );
-
- state->stateBits &= ~ SB_ISFINAL;
- finStateSet.remove( state );
-}
-
-/* Set and unset a state as the start state. */
-void FsmAp::setStartState( StateAp *state )
-{
- /* Sould change from unset to set. */
- assert( startState == 0 );
- startState = state;
-
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions is about to go up to 1 then
- * take it off the misfit list and put it on the head list. */
- if ( state->foreignInTrans == 0 )
- stateList.append( misfitList.detach( state ) );
- }
-
- /* Up the foreign in transitions to the state. */
- state->foreignInTrans += 1;
-}
-
-void FsmAp::unsetStartState()
-{
- /* Should change from set to unset. */
- assert( startState != 0 );
-
- /* Decrement the entry's count of foreign entries. */
- startState->foreignInTrans -= 1;
-
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions just went down to 0 then take
- * it off the main list and put it on the misfit list. */
- if ( startState->foreignInTrans == 0 )
- misfitList.append( stateList.detach( startState ) );
- }
-
- startState = 0;
-}
-
-/* Associate an id with a state. Makes the state a named entry point. Has no
- * effect if the entry point is already mapped to the state. */
-void FsmAp::setEntry( int id, StateAp *state )
-{
- /* Insert the id into the state. If the state is already labelled with id,
- * nothing to do. */
- if ( state->entryIds.insert( id ) ) {
- /* Insert the entry and assert that it succeeds. */
- entryPoints.insertMulti( id, state );
-
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions is about to go up to 1 then
- * take it off the misfit list and put it on the head list. */
- if ( state->foreignInTrans == 0 )
- stateList.append( misfitList.detach( state ) );
- }
-
- /* Up the foreign in transitions to the state. */
- state->foreignInTrans += 1;
- }
-}
-
-/* Remove the association of an id with a state. The state looses it's entry
- * point status. Assumes that the id is indeed mapped to state. */
-void FsmAp::unsetEntry( int id, StateAp *state )
-{
- /* Find the entry point in on id. */
- EntryMapEl *enLow = 0, *enHigh = 0;
- entryPoints.findMulti( id, enLow, enHigh );
- while ( enLow->value != state )
- enLow += 1;
-
- /* Remove the record from the map. */
- entryPoints.remove( enLow );
-
- /* Remove the state's sense of the link. */
- state->entryIds.remove( id );
- state->foreignInTrans -= 1;
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions just went down to 0 then take
- * it off the main list and put it on the misfit list. */
- if ( state->foreignInTrans == 0 )
- misfitList.append( stateList.detach( state ) );
- }
-}
-
-/* Remove all association of an id with states. Assumes that the id is indeed
- * mapped to a state. */
-void FsmAp::unsetEntry( int id )
-{
- /* Find the entry point in on id. */
- EntryMapEl *enLow = 0, *enHigh = 0;
- entryPoints.findMulti( id, enLow, enHigh );
- for ( EntryMapEl *mel = enLow; mel <= enHigh; mel++ ) {
- /* Remove the state's sense of the link. */
- mel->value->entryIds.remove( id );
- mel->value->foreignInTrans -= 1;
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions just went down to 0
- * then take it off the main list and put it on the misfit list. */
- if ( mel->value->foreignInTrans == 0 )
- misfitList.append( stateList.detach( mel->value ) );
- }
- }
-
- /* Remove the records from the entry points map. */
- entryPoints.removeMulti( enLow, enHigh );
-}
-
-
-void FsmAp::changeEntry( int id, StateAp *to, StateAp *from )
-{
- /* Find the entry in the entry map. */
- EntryMapEl *enLow = 0, *enHigh = 0;
- entryPoints.findMulti( id, enLow, enHigh );
- while ( enLow->value != from )
- enLow += 1;
-
- /* Change it to the new target. */
- enLow->value = to;
-
- /* Remove from's sense of the link. */
- from->entryIds.remove( id );
- from->foreignInTrans -= 1;
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions just went down to 0 then take
- * it off the main list and put it on the misfit list. */
- if ( from->foreignInTrans == 0 )
- misfitList.append( stateList.detach( from ) );
- }
-
- /* Add to's sense of the link. */
- if ( to->entryIds.insert( id ) != 0 ) {
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions is about to go up to 1 then
- * take it off the misfit list and put it on the head list. */
- if ( to->foreignInTrans == 0 )
- stateList.append( misfitList.detach( to ) );
- }
-
- /* Up the foreign in transitions to the state. */
- to->foreignInTrans += 1;
- }
-}
-
-
-/* Clear all entry points from a machine. */
-void FsmAp::unsetAllEntryPoints()
-{
- for ( EntryMap::Iter en = entryPoints; en.lte(); en++ ) {
- /* Kill all the state's entry points at once. */
- if ( en->value->entryIds.length() > 0 ) {
- en->value->foreignInTrans -= en->value->entryIds.length();
-
- if ( misfitAccounting ) {
- /* If the number of foreign in transitions just went down to 0
- * then take it off the main list and put it on the misfit
- * list. */
- if ( en->value->foreignInTrans == 0 )
- misfitList.append( stateList.detach( en->value ) );
- }
-
- /* Clear the set of ids out all at once. */
- en->value->entryIds.empty();
- }
- }
-
- /* Now clear out the entry map all at once. */
- entryPoints.empty();
-}
-
-/* Assigning an epsilon transition into final states. */
-void FsmAp::epsilonTrans( int id )
-{
- for ( StateSet::Iter fs = finStateSet; fs.lte(); fs++ )
- (*fs)->epsilonTrans.append( id );
-}
-
-/* Mark all states reachable from state. Traverses transitions forward. Used
- * for removing states that have no path into them. */
-void FsmAp::markReachableFromHere( StateAp *state )
-{
- /* Base case: return; */
- if ( state->stateBits & SB_ISMARKED )
- return;
-
- /* Set this state as processed. We are going to visit all states that this
- * state has a transition to. */
- state->stateBits |= SB_ISMARKED;
-
- /* Recurse on all out transitions. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- if ( trans->toState != 0 )
- markReachableFromHere( trans->toState );
- }
-}
-
-void FsmAp::markReachableFromHereStopFinal( StateAp *state )
-{
- /* Base case: return; */
- if ( state->stateBits & SB_ISMARKED )
- return;
-
- /* Set this state as processed. We are going to visit all states that this
- * state has a transition to. */
- state->stateBits |= SB_ISMARKED;
-
- /* Recurse on all out transitions. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- StateAp *toState = trans->toState;
- if ( toState != 0 && !toState->isFinState() )
- markReachableFromHereStopFinal( toState );
- }
-}
-
-/* Mark all states reachable from state. Traverse transitions backwards. Used
- * for removing dead end paths in graphs. */
-void FsmAp::markReachableFromHereReverse( StateAp *state )
-{
- /* Base case: return; */
- if ( state->stateBits & SB_ISMARKED )
- return;
-
- /* Set this state as processed. We are going to visit all states with
- * transitions into this state. */
- state->stateBits |= SB_ISMARKED;
-
- /* Recurse on all items in transitions. */
- for ( TransInList::Iter trans = state->inList; trans.lte(); trans++ )
- markReachableFromHereReverse( trans->fromState );
-}
-
-/* Determine if there are any entry points into a start state other than the
- * start state. Setting starting transitions requires that the start state be
- * isolated. In most cases a start state will already be isolated. */
-bool FsmAp::isStartStateIsolated()
-{
- /* If there are any in transitions then the state is not isolated. */
- if ( startState->inList.head != 0 )
- return false;
-
- /* If there are any entry points then isolated. */
- if ( startState->entryIds.length() > 0 )
- return false;
-
- return true;
-}
-
-/* Bring in other's entry points. Assumes others states are going to be
- * copied into this machine. */
-void FsmAp::copyInEntryPoints( FsmAp *other )
-{
- /* Use insert multi because names are not unique. */
- for ( EntryMap::Iter en = other->entryPoints; en.lte(); en++ )
- entryPoints.insertMulti( en->key, en->value );
-}
-
-
-void FsmAp::unsetAllFinStates()
-{
- for ( StateSet::Iter st = finStateSet; st.lte(); st++ )
- (*st)->stateBits &= ~ SB_ISFINAL;
- finStateSet.empty();
-}
-
-void FsmAp::setFinBits( int finStateBits )
-{
- for ( int s = 0; s < finStateSet.length(); s++ )
- finStateSet.data[s]->stateBits |= finStateBits;
-}
-
-
-/* Tests the integrity of the transition lists and the fromStates. */
-void FsmAp::verifyIntegrity()
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ ) {
- /* Walk the out transitions and assert fromState is correct. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ )
- assert( trans->fromState == state );
-
- /* Walk the inlist and assert toState is correct. */
- for ( TransInList::Iter trans = state->inList; trans.lte(); trans++ )
- assert( trans->toState == state );
- }
-}
-
-void FsmAp::verifyReachability()
-{
- /* Mark all the states that can be reached
- * through the set of entry points. */
- markReachableFromHere( startState );
- for ( EntryMap::Iter en = entryPoints; en.lte(); en++ )
- markReachableFromHere( en->value );
-
- /* Check that everything got marked. */
- for ( StateList::Iter st = stateList; st.lte(); st++ ) {
- /* Assert it got marked and then clear the mark. */
- assert( st->stateBits & SB_ISMARKED );
- st->stateBits &= ~ SB_ISMARKED;
- }
-}
-
-void FsmAp::verifyNoDeadEndStates()
-{
- /* Mark all states that have paths to the final states. */
- for ( StateSet::Iter pst = finStateSet; pst.lte(); pst++ )
- markReachableFromHereReverse( *pst );
-
- /* Start state gets honorary marking. Must be done AFTER recursive call. */
- startState->stateBits |= SB_ISMARKED;
-
- /* Make sure everything got marked. */
- for ( StateList::Iter st = stateList; st.lte(); st++ ) {
- /* Assert the state got marked and unmark it. */
- assert( st->stateBits & SB_ISMARKED );
- st->stateBits &= ~ SB_ISMARKED;
- }
-}
-
-void FsmAp::depthFirstOrdering( StateAp *state )
-{
- /* Nothing to do if the state is already on the list. */
- if ( state->stateBits & SB_ONLIST )
- return;
-
- /* Doing depth first, put state on the list. */
- state->stateBits |= SB_ONLIST;
- stateList.append( state );
-
- /* Recurse on everything ranges. */
- for ( TransList::Iter tel = state->outList; tel.lte(); tel++ ) {
- if ( tel->toState != 0 )
- depthFirstOrdering( tel->toState );
- }
-}
-
-/* Ordering states by transition connections. */
-void FsmAp::depthFirstOrdering()
-{
- /* Init on state list flags. */
- for ( StateList::Iter st = stateList; st.lte(); st++ )
- st->stateBits &= ~SB_ONLIST;
-
- /* Clear out the state list, we will rebuild it. */
- int stateListLen = stateList.length();
- stateList.abandon();
-
- /* Add back to the state list from the start state and all other entry
- * points. */
- if ( errState != 0 )
- depthFirstOrdering( errState );
- depthFirstOrdering( startState );
- for ( EntryMap::Iter en = entryPoints; en.lte(); en++ )
- depthFirstOrdering( en->value );
-
- /* Make sure we put everything back on. */
- assert( stateListLen == stateList.length() );
-}
-
-/* Stable sort the states by final state status. */
-void FsmAp::sortStatesByFinal()
-{
- /* Move forward through the list and throw final states onto the end. */
- StateAp *state = 0;
- StateAp *next = stateList.head;
- StateAp *last = stateList.tail;
- while ( state != last ) {
- /* Move forward and load up the next. */
- state = next;
- next = state->next;
-
- /* Throw to the end? */
- if ( state->isFinState() ) {
- stateList.detach( state );
- stateList.append( state );
- }
- }
-}
-
-void FsmAp::setStateNumbers( int base )
-{
- for ( StateList::Iter state = stateList; state.lte(); state++ )
- state->alg.stateNum = base++;
-}
-
-
-bool FsmAp::checkErrTrans( StateAp *state, TransAp *trans )
-{
- /* Might go directly to error state. */
- if ( trans->toState == 0 )
- return true;
-
- if ( trans->prev == 0 ) {
- /* If this is the first transition. */
- if ( keyOps->minKey < trans->lowKey )
- return true;
- }
- else {
- /* Not the first transition. Compare against the prev. */
- TransAp *prev = trans->prev;
- Key nextKey = prev->highKey;
- nextKey.increment();
- if ( nextKey < trans->lowKey )
- return true;
- }
- return false;
-}
-
-bool FsmAp::checkErrTransFinish( StateAp *state )
-{
- /* Check if there are any ranges already. */
- if ( state->outList.length() == 0 )
- return true;
- else {
- /* Get the last and check for a gap on the end. */
- TransAp *last = state->outList.tail;
- if ( last->highKey < keyOps->maxKey )
- return true;
- }
- return 0;
-}
-
-bool FsmAp::hasErrorTrans()
-{
- bool result;
- for ( StateList::Iter st = stateList; st.lte(); st++ ) {
- for ( TransList::Iter tr = st->outList; tr.lte(); tr++ ) {
- result = checkErrTrans( st, tr );
- if ( result )
- return true;
- }
- result = checkErrTransFinish( st );
- if ( result )
- return true;
- }
- return false;
-}
diff --git a/contrib/tools/ragel5/ragel/fsmgraph.cpp b/contrib/tools/ragel5/ragel/fsmgraph.cpp
deleted file mode 100644
index d7d0ba4fe2..0000000000
--- a/contrib/tools/ragel5/ragel/fsmgraph.cpp
+++ /dev/null
@@ -1,1426 +0,0 @@
-/*
- * Copyright 2001, 2002, 2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <assert.h>
-#include <iostream>
-
-#include "fsmgraph.h"
-#include "mergesort.h"
-#include "parsedata.h"
-
-using std::cerr;
-using std::endl;
-
-/* Make a new state. The new state will be put on the graph's
- * list of state. The new state can be created final or non final. */
-StateAp *FsmAp::addState()
-{
- /* Make the new state to return. */
- StateAp *state = new StateAp();
-
- if ( misfitAccounting ) {
- /* Create the new state on the misfit list. All states are created
- * with no foreign in transitions. */
- misfitList.append( state );
- }
- else {
- /* Create the new state. */
- stateList.append( state );
- }
-
- return state;
-}
-
-/* Construct an FSM that is the concatenation of an array of characters. A new
- * machine will be made that has len+1 states with one transition between each
- * state for each integer in str. IsSigned determines if the integers are to
- * be considered as signed or unsigned ints. */
-void FsmAp::concatFsm( Key *str, int len )
-{
- /* Make the first state and set it as the start state. */
- StateAp *last = addState();
- setStartState( last );
-
- /* Attach subsequent states. */
- for ( int i = 0; i < len; i++ ) {
- StateAp *newState = addState();
- attachNewTrans( last, newState, str[i], str[i] );
- last = newState;
- }
-
- /* Make the last state the final state. */
- setFinState( last );
-}
-
-/* Case insensitive version of concatFsm. */
-void FsmAp::concatFsmCI( Key *str, int len )
-{
- /* Make the first state and set it as the start state. */
- StateAp *last = addState();
- setStartState( last );
-
- /* Attach subsequent states. */
- for ( int i = 0; i < len; i++ ) {
- StateAp *newState = addState();
-
- KeySet keySet;
- if ( str[i].isLower() )
- keySet.insert( str[i].toUpper() );
- if ( str[i].isUpper() )
- keySet.insert( str[i].toLower() );
- keySet.insert( str[i] );
-
- for ( int i = 0; i < keySet.length(); i++ )
- attachNewTrans( last, newState, keySet[i], keySet[i] );
-
- last = newState;
- }
-
- /* Make the last state the final state. */
- setFinState( last );
-}
-
-/* Construct a machine that matches one character. A new machine will be made
- * that has two states with a single transition between the states. IsSigned
- * determines if the integers are to be considered as signed or unsigned ints. */
-void FsmAp::concatFsm( Key chr )
-{
- /* Two states first start, second final. */
- setStartState( addState() );
-
- StateAp *end = addState();
- setFinState( end );
-
- /* Attach on the character. */
- attachNewTrans( startState, end, chr, chr );
-}
-
-/* Construct a machine that matches any character in set. A new machine will
- * be made that has two states and len transitions between the them. The set
- * should be ordered correctly accroding to KeyOps and should not contain
- * any duplicates. */
-void FsmAp::orFsm( Key *set, int len )
-{
- /* Two states first start, second final. */
- setStartState( addState() );
-
- StateAp *end = addState();
- setFinState( end );
-
- for ( int i = 1; i < len; i++ )
- assert( set[i-1] < set[i] );
-
- /* Attach on all the integers in the given string of ints. */
- for ( int i = 0; i < len; i++ )
- attachNewTrans( startState, end, set[i], set[i] );
-}
-
-/* Construct a machine that matches a range of characters. A new machine will
- * be made with two states and a range transition between them. The range will
- * match any characters from low to high inclusive. Low should be less than or
- * equal to high otherwise undefined behaviour results. IsSigned determines
- * if the integers are to be considered as signed or unsigned ints. */
-void FsmAp::rangeFsm( Key low, Key high )
-{
- /* Two states first start, second final. */
- setStartState( addState() );
-
- StateAp *end = addState();
- setFinState( end );
-
- /* Attach using the range of characters. */
- attachNewTrans( startState, end, low, high );
-}
-
-/* Construct a machine that a repeated range of characters. */
-void FsmAp::rangeStarFsm( Key low, Key high)
-{
- /* One state which is final and is the start state. */
- setStartState( addState() );
- setFinState( startState );
-
- /* Attach start to start using range of characters. */
- attachNewTrans( startState, startState, low, high );
-}
-
-/* Construct a machine that matches the empty string. A new machine will be
- * made with only one state. The new state will be both a start and final
- * state. IsSigned determines if the machine has a signed or unsigned
- * alphabet. Fsm operations must be done on machines with the same alphabet
- * signedness. */
-void FsmAp::lambdaFsm( )
-{
- /* Give it one state with no transitions making it
- * the start state and final state. */
- setStartState( addState() );
- setFinState( startState );
-}
-
-/* Construct a machine that matches nothing at all. A new machine will be
- * made with only one state. It will not be final. */
-void FsmAp::emptyFsm( )
-{
- /* Give it one state with no transitions making it
- * the start state and final state. */
- setStartState( addState() );
-}
-
-void FsmAp::transferOutData( StateAp *destState, StateAp *srcState )
-{
- for ( TransList::Iter trans = destState->outList; trans.lte(); trans++ ) {
- if ( trans->toState != 0 ) {
- /* Get the actions data from the outActionTable. */
- trans->actionTable.setActions( srcState->outActionTable );
-
- /* Get the priorities from the outPriorTable. */
- trans->priorTable.setPriors( srcState->outPriorTable );
- }
- }
-}
-
-/* Kleene star operator. Makes this machine the kleene star of itself. Any
- * transitions made going out of the machine and back into itself will be
- * notified that they are leaving transitions by having the leavingFromState
- * callback invoked. */
-void FsmAp::starOp( )
-{
- /* For the merging process. */
- MergeData md;
-
- /* Turn on misfit accounting to possibly catch the old start state. */
- setMisfitAccounting( true );
-
- /* Create the new new start state. It will be set final after the merging
- * of the final states with the start state is complete. */
- StateAp *prevStartState = startState;
- unsetStartState();
- setStartState( addState() );
-
- /* Merge the new start state with the old one to isolate it. */
- mergeStates( md, startState, prevStartState );
-
- /* Merge the start state into all final states. Except the start state on
- * the first pass. If the start state is set final we will be doubling up
- * its transitions, which will get transfered to any final states that
- * follow it in the final state set. This will be determined by the order
- * of items in the final state set. To prevent this we just merge with the
- * start on a second pass. */
- for ( StateSet::Iter st = finStateSet; st.lte(); st++ ) {
- if ( *st != startState )
- mergeStatesLeaving( md, *st, startState );
- }
-
- /* Now it is safe to merge the start state with itself (provided it
- * is set final). */
- if ( startState->isFinState() )
- mergeStatesLeaving( md, startState, startState );
-
- /* Now ensure the new start state is a final state. */
- setFinState( startState );
-
- /* Fill in any states that were newed up as combinations of others. */
- fillInStates( md );
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-void FsmAp::repeatOp( int times )
-{
- /* Must be 1 and up. 0 produces null machine and requires deleting this. */
- assert( times > 0 );
-
- /* A repeat of one does absolutely nothing. */
- if ( times == 1 )
- return;
-
- /* Make a machine to make copies from. */
- FsmAp *copyFrom = new FsmAp( *this );
-
- /* Concatentate duplicates onto the end up until before the last. */
- for ( int i = 1; i < times-1; i++ ) {
- FsmAp *dup = new FsmAp( *copyFrom );
- doConcat( dup, 0, false );
- }
-
- /* Now use the copyFrom on the end. */
- doConcat( copyFrom, 0, false );
-}
-
-void FsmAp::optionalRepeatOp( int times )
-{
- /* Must be 1 and up. 0 produces null machine and requires deleting this. */
- assert( times > 0 );
-
- /* A repeat of one optional merely allows zero string. */
- if ( times == 1 ) {
- setFinState( startState );
- return;
- }
-
- /* Make a machine to make copies from. */
- FsmAp *copyFrom = new FsmAp( *this );
-
- /* The state set used in the from end of the concatentation. Starts with
- * the initial final state set, then after each concatenation, gets set to
- * the the final states that come from the the duplicate. */
- StateSet lastFinSet( finStateSet );
-
- /* Set the initial state to zero to allow zero copies. */
- setFinState( startState );
-
- /* Concatentate duplicates onto the end up until before the last. */
- for ( int i = 1; i < times-1; i++ ) {
- /* Make a duplicate for concating and set the fin bits to graph 2 so we
- * can pick out it's final states after the optional style concat. */
- FsmAp *dup = new FsmAp( *copyFrom );
- dup->setFinBits( SB_GRAPH2 );
- doConcat( dup, &lastFinSet, true );
-
- /* Clear the last final state set and make the new one by taking only
- * the final states that come from graph 2.*/
- lastFinSet.empty();
- for ( int i = 0; i < finStateSet.length(); i++ ) {
- /* If the state came from graph 2, add it to the last set and clear
- * the bits. */
- StateAp *fs = finStateSet[i];
- if ( fs->stateBits & SB_GRAPH2 ) {
- lastFinSet.insert( fs );
- fs->stateBits &= ~SB_GRAPH2;
- }
- }
- }
-
- /* Now use the copyFrom on the end, no bits set, no bits to clear. */
- doConcat( copyFrom, &lastFinSet, true );
-}
-
-
-/* Fsm concatentation worker. Supports treating the concatentation as optional,
- * which essentially leaves the final states of machine one as final. */
-void FsmAp::doConcat( FsmAp *other, StateSet *fromStates, bool optional )
-{
- /* For the merging process. */
- StateSet finStateSetCopy, startStateSet;
- MergeData md;
-
- /* Turn on misfit accounting for both graphs. */
- setMisfitAccounting( true );
- other->setMisfitAccounting( true );
-
- /* Get the other's start state. */
- StateAp *otherStartState = other->startState;
-
- /* Unset other's start state before bringing in the entry points. */
- other->unsetStartState();
-
- /* Bring in the rest of other's entry points. */
- copyInEntryPoints( other );
- other->entryPoints.empty();
-
- /* Bring in other's states into our state lists. */
- stateList.append( other->stateList );
- misfitList.append( other->misfitList );
-
- /* If from states is not set, then get a copy of our final state set before
- * we clobber it and use it instead. */
- if ( fromStates == 0 ) {
- finStateSetCopy = finStateSet;
- fromStates = &finStateSetCopy;
- }
-
- /* Unset all of our final states and get the final states from other. */
- if ( !optional )
- unsetAllFinStates();
- finStateSet.insert( other->finStateSet );
-
- /* Since other's lists are empty, we can delete the fsm without
- * affecting any states. */
- delete other;
-
- /* Merge our former final states with the start state of other. */
- for ( int i = 0; i < fromStates->length(); i++ ) {
- StateAp *state = fromStates->data[i];
-
- /* Merge the former final state with other's start state. */
- mergeStatesLeaving( md, state, otherStartState );
-
- /* If the former final state was not reset final then we must clear
- * the state's out trans data. If it got reset final then it gets to
- * keep its out trans data. This must be done before fillInStates gets
- * called to prevent the data from being sourced. */
- if ( ! state->isFinState() )
- clearOutData( state );
- }
-
- /* Fill in any new states made from merging. */
- fillInStates( md );
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-/* Concatenates other to the end of this machine. Other is deleted. Any
- * transitions made leaving this machine and entering into other are notified
- * that they are leaving transitions by having the leavingFromState callback
- * invoked. */
-void FsmAp::concatOp( FsmAp *other )
-{
- /* Assert same signedness and return graph concatenation op. */
- doConcat( other, 0, false );
-}
-
-
-void FsmAp::doOr( FsmAp *other )
-{
- /* For the merging process. */
- MergeData md;
-
- /* Build a state set consisting of both start states */
- StateSet startStateSet;
- startStateSet.insert( startState );
- startStateSet.insert( other->startState );
-
- /* Both of the original start states loose their start state status. */
- unsetStartState();
- other->unsetStartState();
-
- /* Bring in the rest of other's entry points. */
- copyInEntryPoints( other );
- other->entryPoints.empty();
-
- /* Merge the lists. This will move all the states from other
- * into this. No states will be deleted. */
- stateList.append( other->stateList );
- misfitList.append( other->misfitList );
-
- /* Move the final set data from other into this. */
- finStateSet.insert(other->finStateSet);
- other->finStateSet.empty();
-
- /* Since other's list is empty, we can delete the fsm without
- * affecting any states. */
- delete other;
-
- /* Create a new start state. */
- setStartState( addState() );
-
- /* Merge the start states. */
- mergeStates( md, startState, startStateSet.data, startStateSet.length() );
-
- /* Fill in any new states made from merging. */
- fillInStates( md );
-}
-
-/* Unions other with this machine. Other is deleted. */
-void FsmAp::unionOp( FsmAp *other )
-{
- /* Turn on misfit accounting for both graphs. */
- setMisfitAccounting( true );
- other->setMisfitAccounting( true );
-
- /* Call Worker routine. */
- doOr( other );
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-/* Intersects other with this machine. Other is deleted. */
-void FsmAp::intersectOp( FsmAp *other )
-{
- /* Turn on misfit accounting for both graphs. */
- setMisfitAccounting( true );
- other->setMisfitAccounting( true );
-
- /* Set the fin bits on this and other to want each other. */
- setFinBits( SB_GRAPH1 );
- other->setFinBits( SB_GRAPH2 );
-
- /* Call worker Or routine. */
- doOr( other );
-
- /* Unset any final states that are no longer to
- * be final due to final bits. */
- unsetIncompleteFinals();
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-
- /* Remove states that have no path to a final state. */
- removeDeadEndStates();
-}
-
-/* Set subtracts other machine from this machine. Other is deleted. */
-void FsmAp::subtractOp( FsmAp *other )
-{
- /* Turn on misfit accounting for both graphs. */
- setMisfitAccounting( true );
- other->setMisfitAccounting( true );
-
- /* Set the fin bits of other to be killers. */
- other->setFinBits( SB_GRAPH1 );
-
- /* Call worker Or routine. */
- doOr( other );
-
- /* Unset any final states that are no longer to
- * be final due to final bits. */
- unsetKilledFinals();
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-
- /* Remove states that have no path to a final state. */
- removeDeadEndStates();
-}
-
-bool FsmAp::inEptVect( EptVect *eptVect, StateAp *state )
-{
- if ( eptVect != 0 ) {
- /* Vect is there, walk it looking for state. */
- for ( int i = 0; i < eptVect->length(); i++ ) {
- if ( eptVect->data[i].targ == state )
- return true;
- }
- }
- return false;
-}
-
-/* Fill epsilon vectors in a root state from a given starting point. Epmploys
- * a depth first search through the graph of epsilon transitions. */
-void FsmAp::epsilonFillEptVectFrom( StateAp *root, StateAp *from, bool parentLeaving )
-{
- /* Walk the epsilon transitions out of the state. */
- for ( EpsilonTrans::Iter ep = from->epsilonTrans; ep.lte(); ep++ ) {
- /* Find the entry point, if the it does not resove, ignore it. */
- EntryMapEl *enLow, *enHigh;
- if ( entryPoints.findMulti( *ep, enLow, enHigh ) ) {
- /* Loop the targets. */
- for ( EntryMapEl *en = enLow; en <= enHigh; en++ ) {
- /* Do not add the root or states already in eptVect. */
- StateAp *targ = en->value;
- if ( targ != from && !inEptVect(root->eptVect, targ) ) {
- /* Maybe need to create the eptVect. */
- if ( root->eptVect == 0 )
- root->eptVect = new EptVect();
-
- /* If moving to a different graph or if any parent is
- * leaving then we are leaving. */
- bool leaving = parentLeaving ||
- root->owningGraph != targ->owningGraph;
-
- /* All ok, add the target epsilon and recurse. */
- root->eptVect->append( EptVectEl(targ, leaving) );
- epsilonFillEptVectFrom( root, targ, leaving );
- }
- }
- }
- }
-}
-
-void FsmAp::shadowReadWriteStates( MergeData &md )
-{
- /* Init isolatedShadow algorithm data. */
- for ( StateList::Iter st = stateList; st.lte(); st++ )
- st->isolatedShadow = 0;
-
- /* Any states that may be both read from and written to must
- * be shadowed. */
- for ( StateList::Iter st = stateList; st.lte(); st++ ) {
- /* Find such states by looping through stateVect lists, which give us
- * the states that will be read from. May cause us to visit the states
- * that we are interested in more than once. */
- if ( st->eptVect != 0 ) {
- /* For all states that will be read from. */
- for ( EptVect::Iter ept = *st->eptVect; ept.lte(); ept++ ) {
- /* Check for read and write to the same state. */
- StateAp *targ = ept->targ;
- if ( targ->eptVect != 0 ) {
- /* State is to be written to, if the shadow is not already
- * there, create it. */
- if ( targ->isolatedShadow == 0 ) {
- StateAp *shadow = addState();
- mergeStates( md, shadow, targ );
- targ->isolatedShadow = shadow;
- }
-
- /* Write shadow into the state vector so that it is the
- * state that the epsilon transition will read from. */
- ept->targ = targ->isolatedShadow;
- }
- }
- }
- }
-}
-
-void FsmAp::resolveEpsilonTrans( MergeData &md )
-{
- /* Walk the state list and invoke recursive worker on each state. */
- for ( StateList::Iter st = stateList; st.lte(); st++ )
- epsilonFillEptVectFrom( st, st, false );
-
- /* Prevent reading from and writing to of the same state. */
- shadowReadWriteStates( md );
-
- /* For all states that have epsilon transitions out, draw the transitions,
- * clear the epsilon transitions. */
- for ( StateList::Iter st = stateList; st.lte(); st++ ) {
- /* If there is a state vector, then create the pre-merge state. */
- if ( st->eptVect != 0 ) {
- /* Merge all the epsilon targets into the state. */
- for ( EptVect::Iter ept = *st->eptVect; ept.lte(); ept++ ) {
- if ( ept->leaving )
- mergeStatesLeaving( md, st, ept->targ );
- else
- mergeStates( md, st, ept->targ );
- }
-
- /* Clean up the target list. */
- delete st->eptVect;
- st->eptVect = 0;
- }
-
- /* Clear the epsilon transitions vector. */
- st->epsilonTrans.empty();
- }
-}
-
-void FsmAp::epsilonOp()
-{
- /* For merging process. */
- MergeData md;
-
- setMisfitAccounting( true );
-
- for ( StateList::Iter st = stateList; st.lte(); st++ )
- st->owningGraph = 0;
-
- /* Perform merges. */
- resolveEpsilonTrans( md );
-
- /* Epsilons can caused merges which leave behind unreachable states. */
- fillInStates( md );
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-/* Make a new maching by joining together a bunch of machines without making
- * any transitions between them. A negative finalId results in there being no
- * final id. */
-void FsmAp::joinOp( int startId, int finalId, FsmAp **others, int numOthers )
-{
- /* For the merging process. */
- MergeData md;
-
- /* Set the owning machines. Start at one. Zero is reserved for the start
- * and final states. */
- for ( StateList::Iter st = stateList; st.lte(); st++ )
- st->owningGraph = 1;
- for ( int m = 0; m < numOthers; m++ ) {
- for ( StateList::Iter st = others[m]->stateList; st.lte(); st++ )
- st->owningGraph = 2+m;
- }
-
- /* All machines loose start state status. */
- unsetStartState();
- for ( int m = 0; m < numOthers; m++ )
- others[m]->unsetStartState();
-
- /* Bring the other machines into this. */
- for ( int m = 0; m < numOthers; m++ ) {
- /* Bring in the rest of other's entry points. */
- copyInEntryPoints( others[m] );
- others[m]->entryPoints.empty();
-
- /* Merge the lists. This will move all the states from other into
- * this. No states will be deleted. */
- stateList.append( others[m]->stateList );
- assert( others[m]->misfitList.length() == 0 );
-
- /* Move the final set data from other into this. */
- finStateSet.insert( others[m]->finStateSet );
- others[m]->finStateSet.empty();
-
- /* Since other's list is empty, we can delete the fsm without
- * affecting any states. */
- delete others[m];
- }
-
- /* Look up the start entry point. */
- EntryMapEl *enLow = 0, *enHigh = 0;
- bool findRes = entryPoints.findMulti( startId, enLow, enHigh );
- if ( ! findRes ) {
- /* No start state. Set a default one and proceed with the join. Note
- * that the result of the join will be a very uninteresting machine. */
- setStartState( addState() );
- }
- else {
- /* There is at least one start state, create a state that will become
- * the new start state. */
- StateAp *newStart = addState();
- setStartState( newStart );
-
- /* The start state is in an owning machine class all it's own. */
- newStart->owningGraph = 0;
-
- /* Create the set of states to merge from. */
- StateSet stateSet;
- for ( EntryMapEl *en = enLow; en <= enHigh; en++ )
- stateSet.insert( en->value );
-
- /* Merge in the set of start states into the new start state. */
- mergeStates( md, newStart, stateSet.data, stateSet.length() );
- }
-
- /* Take a copy of the final state set, before unsetting them all. This
- * will allow us to call clearOutData on the states that don't get
- * final state status back back. */
- StateSet finStateSetCopy = finStateSet;
-
- /* Now all final states are unset. */
- unsetAllFinStates();
-
- if ( finalId >= 0 ) {
- /* Create the implicit final state. */
- StateAp *finState = addState();
- setFinState( finState );
-
- /* Assign an entry into the final state on the final state entry id. Note
- * that there may already be an entry on this id. That's ok. Also set the
- * final state owning machine id. It's in a class all it's own. */
- setEntry( finalId, finState );
- finState->owningGraph = 0;
- }
-
- /* Hand over to workers for resolving epsilon trans. This will merge states
- * with the targets of their epsilon transitions. */
- resolveEpsilonTrans( md );
-
- /* Invoke the relinquish final callback on any states that did not get
- * final state status back. */
- for ( StateSet::Iter st = finStateSetCopy; st.lte(); st++ ) {
- if ( !((*st)->stateBits & SB_ISFINAL) )
- clearOutData( *st );
- }
-
- /* Fill in any new states made from merging. */
- fillInStates( md );
-
- /* Joining can be messy. Instead of having misfit accounting on (which is
- * tricky here) do a full cleaning. */
- removeUnreachableStates();
-}
-
-void FsmAp::globOp( FsmAp **others, int numOthers )
-{
- /* All other machines loose start states status. */
- for ( int m = 0; m < numOthers; m++ )
- others[m]->unsetStartState();
-
- /* Bring the other machines into this. */
- for ( int m = 0; m < numOthers; m++ ) {
- /* Bring in the rest of other's entry points. */
- copyInEntryPoints( others[m] );
- others[m]->entryPoints.empty();
-
- /* Merge the lists. This will move all the states from other into
- * this. No states will be deleted. */
- stateList.append( others[m]->stateList );
- assert( others[m]->misfitList.length() == 0 );
-
- /* Move the final set data from other into this. */
- finStateSet.insert( others[m]->finStateSet );
- others[m]->finStateSet.empty();
-
- /* Since other's list is empty, we can delete the fsm without
- * affecting any states. */
- delete others[m];
- }
-}
-
-void FsmAp::deterministicEntry()
-{
- /* For the merging process. */
- MergeData md;
-
- /* States may loose their entry points, turn on misfit accounting. */
- setMisfitAccounting( true );
-
- /* Get a copy of the entry map then clear all the entry points. As we
- * iterate the old entry map finding duplicates we will add the entry
- * points for the new states that we create. */
- EntryMap prevEntry = entryPoints;
- unsetAllEntryPoints();
-
- for ( int enId = 0; enId < prevEntry.length(); ) {
- /* Count the number of states on this entry key. */
- int highId = enId;
- while ( highId < prevEntry.length() && prevEntry[enId].key == prevEntry[highId].key )
- highId += 1;
-
- int numIds = highId - enId;
- if ( numIds == 1 ) {
- /* Only a single entry point, just set the entry. */
- setEntry( prevEntry[enId].key, prevEntry[enId].value );
- }
- else {
- /* Multiple entry points, need to create a new state and merge in
- * all the targets of entry points. */
- StateAp *newEntry = addState();
- for ( int en = enId; en < highId; en++ )
- mergeStates( md, newEntry, prevEntry[en].value );
-
- /* Add the new state as the single entry point. */
- setEntry( prevEntry[enId].key, newEntry );
- }
-
- enId += numIds;
- }
-
- /* The old start state may be unreachable. Remove the misfits and turn off
- * misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-/* Unset any final states that are no longer to be final due to final bits. */
-void FsmAp::unsetKilledFinals()
-{
- /* Duplicate the final state set before we begin modifying it. */
- StateSet fin( finStateSet );
-
- for ( int s = 0; s < fin.length(); s++ ) {
- /* Check for killing bit. */
- StateAp *state = fin.data[s];
- if ( state->stateBits & SB_GRAPH1 ) {
- /* One final state is a killer, set to non-final. */
- unsetFinState( state );
- }
-
- /* Clear all killing bits. Non final states should never have had those
- * state bits set in the first place. */
- state->stateBits &= ~SB_GRAPH1;
- }
-}
-
-/* Unset any final states that are no longer to be final due to final bits. */
-void FsmAp::unsetIncompleteFinals()
-{
- /* Duplicate the final state set before we begin modifying it. */
- StateSet fin( finStateSet );
-
- for ( int s = 0; s < fin.length(); s++ ) {
- /* Check for one set but not the other. */
- StateAp *state = fin.data[s];
- if ( state->stateBits & SB_BOTH &&
- (state->stateBits & SB_BOTH) != SB_BOTH )
- {
- /* One state wants the other but it is not there. */
- unsetFinState( state );
- }
-
- /* Clear wanting bits. Non final states should never have had those
- * state bits set in the first place. */
- state->stateBits &= ~SB_BOTH;
- }
-}
-
-/* Ensure that the start state is free of entry points (aside from the fact
- * that it is the start state). If the start state has entry points then Make a
- * new start state by merging with the old one. Useful before modifying start
- * transitions. If the existing start state has any entry points other than the
- * start state entry then modifying its transitions changes more than the start
- * transitions. So isolate the start state by separating it out such that it
- * only has start stateness as it's entry point. */
-void FsmAp::isolateStartState( )
-{
- /* For the merging process. */
- MergeData md;
-
- /* Bail out if the start state is already isolated. */
- if ( isStartStateIsolated() )
- return;
-
- /* Turn on misfit accounting to possibly catch the old start state. */
- setMisfitAccounting( true );
-
- /* This will be the new start state. The existing start
- * state is merged with it. */
- StateAp *prevStartState = startState;
- unsetStartState();
- setStartState( addState() );
-
- /* Merge the new start state with the old one to isolate it. */
- mergeStates( md, startState, prevStartState );
-
- /* Stfil and stateDict will be empty because the merging of the old start
- * state into the new one will not have any conflicting transitions. */
- assert( md.stateDict.treeSize == 0 );
- assert( md.stfillHead == 0 );
-
- /* The old start state may be unreachable. Remove the misfits and turn off
- * misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-#ifdef LOG_CONDS
-void logCondSpace( CondSpace *condSpace )
-{
- if ( condSpace == 0 )
- cerr << "<empty>";
- else {
- for ( CondSet::Iter csi = condSpace->condSet.last(); csi.gtb(); csi-- ) {
- if ( ! csi.last() )
- cerr << ',';
- (*csi)->actionName( cerr );
- }
- }
-}
-
-void logNewExpansion( Expansion *exp )
-{
- cerr << "created expansion:" << endl;
- cerr << " range: " << exp->lowKey.getVal() << " .. " <<
- exp->highKey.getVal() << endl;
-
- cerr << " fromCondSpace: ";
- logCondSpace( exp->fromCondSpace );
- cerr << endl;
- cerr << " fromVals: " << exp->fromVals << endl;
-
- cerr << " toCondSpace: ";
- logCondSpace( exp->toCondSpace );
- cerr << endl;
- cerr << " toValsList: ";
- for ( LongVect::Iter to = exp->toValsList; to.lte(); to++ )
- cerr << " " << *to;
- cerr << endl;
-}
-#endif
-
-
-void FsmAp::findTransExpansions( ExpansionList &expansionList,
- StateAp *destState, StateAp *srcState )
-{
- PairIter<TransAp, StateCond> transCond( destState->outList.head,
- srcState->stateCondList.head );
- for ( ; !transCond.end(); transCond++ ) {
- if ( transCond.userState == RangeOverlap ) {
- Expansion *expansion = new Expansion( transCond.s1Tel.lowKey,
- transCond.s1Tel.highKey );
- expansion->fromTrans = new TransAp(*transCond.s1Tel.trans);
- expansion->fromTrans->fromState = 0;
- expansion->fromTrans->toState = transCond.s1Tel.trans->toState;
- expansion->fromCondSpace = 0;
- expansion->fromVals = 0;
- CondSpace *srcCS = transCond.s2Tel.trans->condSpace;
- expansion->toCondSpace = srcCS;
-
- long numTargVals = (1 << srcCS->condSet.length());
- for ( long targVals = 0; targVals < numTargVals; targVals++ )
- expansion->toValsList.append( targVals );
-
- #ifdef LOG_CONDS
- logNewExpansion( expansion );
- #endif
- expansionList.append( expansion );
- }
- }
-}
-
-void FsmAp::findCondExpInTrans( ExpansionList &expansionList, StateAp *state,
- Key lowKey, Key highKey, CondSpace *fromCondSpace, CondSpace *toCondSpace,
- long fromVals, LongVect &toValsList )
-{
- TransAp searchTrans;
- searchTrans.lowKey = fromCondSpace->baseKey + fromVals * keyOps->alphSize() +
- (lowKey - keyOps->minKey);
- searchTrans.highKey = fromCondSpace->baseKey + fromVals * keyOps->alphSize() +
- (highKey - keyOps->minKey);
- searchTrans.prev = searchTrans.next = 0;
-
- PairIter<TransAp> pairIter( state->outList.head, &searchTrans );
- for ( ; !pairIter.end(); pairIter++ ) {
- if ( pairIter.userState == RangeOverlap ) {
- Expansion *expansion = new Expansion( lowKey, highKey );
- expansion->fromTrans = new TransAp(*pairIter.s1Tel.trans);
- expansion->fromTrans->fromState = 0;
- expansion->fromTrans->toState = pairIter.s1Tel.trans->toState;
- expansion->fromCondSpace = fromCondSpace;
- expansion->fromVals = fromVals;
- expansion->toCondSpace = toCondSpace;
- expansion->toValsList = toValsList;
-
- expansionList.append( expansion );
- #ifdef LOG_CONDS
- logNewExpansion( expansion );
- #endif
- }
- }
-}
-
-void FsmAp::findCondExpansions( ExpansionList &expansionList,
- StateAp *destState, StateAp *srcState )
-{
- PairIter<StateCond, StateCond> condCond( destState->stateCondList.head,
- srcState->stateCondList.head );
- for ( ; !condCond.end(); condCond++ ) {
- if ( condCond.userState == RangeOverlap ) {
- /* Loop over all existing condVals . */
- CondSet &destCS = condCond.s1Tel.trans->condSpace->condSet;
- long destLen = destCS.length();
-
- /* Find the items in src cond set that are not in dest
- * cond set. These are the items that we must expand. */
- CondSet srcOnlyCS = condCond.s2Tel.trans->condSpace->condSet;
- for ( CondSet::Iter dcsi = destCS; dcsi.lte(); dcsi++ )
- srcOnlyCS.remove( *dcsi );
- long srcOnlyLen = srcOnlyCS.length();
-
- if ( srcOnlyCS.length() > 0 ) {
- #ifdef LOG_CONDS
- cerr << "there are " << srcOnlyCS.length() << " item(s) that are "
- "only in the srcCS" << endl;
- #endif
-
- CondSet mergedCS = destCS;
- mergedCS.insert( condCond.s2Tel.trans->condSpace->condSet );
-
- CondSpace *fromCondSpace = addCondSpace( destCS );
- CondSpace *toCondSpace = addCondSpace( mergedCS );
-
- /* Loop all values in the dest space. */
- for ( long destVals = 0; destVals < (1 << destLen); destVals++ ) {
- long basicVals = 0;
- for ( CondSet::Iter csi = destCS; csi.lte(); csi++ ) {
- if ( destVals & (1 << csi.pos()) ) {
- Action **cim = mergedCS.find( *csi );
- long bitPos = (cim - mergedCS.data);
- basicVals |= 1 << bitPos;
- }
- }
-
- /* Loop all new values. */
- LongVect expandToVals;
- for ( long soVals = 0; soVals < (1 << srcOnlyLen); soVals++ ) {
- long targVals = basicVals;
- for ( CondSet::Iter csi = srcOnlyCS; csi.lte(); csi++ ) {
- if ( soVals & (1 << csi.pos()) ) {
- Action **cim = mergedCS.find( *csi );
- long bitPos = (cim - mergedCS.data);
- targVals |= 1 << bitPos;
- }
- }
- expandToVals.append( targVals );
- }
-
- findCondExpInTrans( expansionList, destState,
- condCond.s1Tel.lowKey, condCond.s1Tel.highKey,
- fromCondSpace, toCondSpace, destVals, expandToVals );
- }
- }
- }
- }
-}
-
-void FsmAp::doExpand( MergeData &md, StateAp *destState, ExpansionList &expList1 )
-{
- for ( ExpansionList::Iter exp = expList1; exp.lte(); exp++ ) {
- for ( LongVect::Iter to = exp->toValsList; to.lte(); to++ ) {
- long targVals = *to;
-
- /* We will use the copy of the transition that was made when the
- * expansion was created. It will get used multiple times. Each
- * time we must set up the keys, everything else is constant and
- * and already prepared. */
- TransAp *srcTrans = exp->fromTrans;
-
- srcTrans->lowKey = exp->toCondSpace->baseKey +
- targVals * keyOps->alphSize() + (exp->lowKey - keyOps->minKey);
- srcTrans->highKey = exp->toCondSpace->baseKey +
- targVals * keyOps->alphSize() + (exp->highKey - keyOps->minKey);
-
- TransList srcList;
- srcList.append( srcTrans );
- outTransCopy( md, destState, srcList.head );
- srcList.abandon();
- }
- }
-}
-
-
-void FsmAp::doRemove( MergeData &md, StateAp *destState, ExpansionList &expList1 )
-{
- for ( ExpansionList::Iter exp = expList1; exp.lte(); exp++ ) {
- Removal removal;
- if ( exp->fromCondSpace == 0 ) {
- removal.lowKey = exp->lowKey;
- removal.highKey = exp->highKey;
- }
- else {
- removal.lowKey = exp->fromCondSpace->baseKey +
- exp->fromVals * keyOps->alphSize() + (exp->lowKey - keyOps->minKey);
- removal.highKey = exp->fromCondSpace->baseKey +
- exp->fromVals * keyOps->alphSize() + (exp->highKey - keyOps->minKey);
- }
- removal.next = 0;
-
- TransList destList;
- PairIter<TransAp, Removal> pairIter( destState->outList.head, &removal );
- for ( ; !pairIter.end(); pairIter++ ) {
- switch ( pairIter.userState ) {
- case RangeInS1: {
- TransAp *destTrans = pairIter.s1Tel.trans;
- destTrans->lowKey = pairIter.s1Tel.lowKey;
- destTrans->highKey = pairIter.s1Tel.highKey;
- destList.append( destTrans );
- break;
- }
- case RangeInS2:
- break;
- case RangeOverlap: {
- TransAp *trans = pairIter.s1Tel.trans;
- detachTrans( trans->fromState, trans->toState, trans );
- delete trans;
- break;
- }
- case BreakS1: {
- pairIter.s1Tel.trans = dupTrans( destState,
- pairIter.s1Tel.trans );
- break;
- }
- case BreakS2:
- break;
- }
- }
- destState->outList.transfer( destList );
- }
-}
-
-void FsmAp::mergeStateConds( StateAp *destState, StateAp *srcState )
-{
- StateCondList destList;
- PairIter<StateCond> pairIter( destState->stateCondList.head,
- srcState->stateCondList.head );
- for ( ; !pairIter.end(); pairIter++ ) {
- switch ( pairIter.userState ) {
- case RangeInS1: {
- StateCond *destCond = pairIter.s1Tel.trans;
- destCond->lowKey = pairIter.s1Tel.lowKey;
- destCond->highKey = pairIter.s1Tel.highKey;
- destList.append( destCond );
- break;
- }
- case RangeInS2: {
- StateCond *newCond = new StateCond( *pairIter.s2Tel.trans );
- newCond->lowKey = pairIter.s2Tel.lowKey;
- newCond->highKey = pairIter.s2Tel.highKey;
- destList.append( newCond );
- break;
- }
- case RangeOverlap: {
- StateCond *destCond = pairIter.s1Tel.trans;
- StateCond *srcCond = pairIter.s2Tel.trans;
- CondSet mergedCondSet;
- mergedCondSet.insert( destCond->condSpace->condSet );
- mergedCondSet.insert( srcCond->condSpace->condSet );
- destCond->condSpace = addCondSpace( mergedCondSet );
-
- destCond->lowKey = pairIter.s1Tel.lowKey;
- destCond->highKey = pairIter.s1Tel.highKey;
- destList.append( destCond );
- break;
- }
- case BreakS1:
- pairIter.s1Tel.trans = new StateCond( *pairIter.s1Tel.trans );
- break;
-
- case BreakS2:
- break;
- }
- }
- destState->stateCondList.transfer( destList );
-}
-
-/* A state merge which represents the drawing in of leaving transitions. If
- * there is any out data then we duplicate the souce state, transfer the out
- * data, then merge in the state. The new state will be reaped because it will
- * not be given any in transitions. */
-void FsmAp::mergeStatesLeaving( MergeData &md, StateAp *destState, StateAp *srcState )
-{
- if ( !hasOutData( destState ) )
- mergeStates( md, destState, srcState );
- else {
- StateAp *ssMutable = addState();
- mergeStates( md, ssMutable, srcState );
- transferOutData( ssMutable, destState );
-
- for ( ActionSet::Iter cond = destState->outCondSet; cond.lte(); cond++ )
- embedCondition( md, ssMutable, *cond );
-
- mergeStates( md, destState, ssMutable );
- }
-}
-
-void FsmAp::mergeStates( MergeData &md, StateAp *destState,
- StateAp **srcStates, int numSrc )
-{
- for ( int s = 0; s < numSrc; s++ )
- mergeStates( md, destState, srcStates[s] );
-}
-
-void FsmAp::mergeStates( MergeData &md, StateAp *destState, StateAp *srcState )
-{
- ExpansionList expList1;
- ExpansionList expList2;
-
- findTransExpansions( expList1, destState, srcState );
- findCondExpansions( expList1, destState, srcState );
- findTransExpansions( expList2, srcState, destState );
- findCondExpansions( expList2, srcState, destState );
-
- mergeStateConds( destState, srcState );
-
- outTransCopy( md, destState, srcState->outList.head );
-
- doExpand( md, destState, expList1 );
- doExpand( md, destState, expList2 );
-
- doRemove( md, destState, expList1 );
- doRemove( md, destState, expList2 );
-
- expList1.empty();
- expList2.empty();
-
- /* Get its bits and final state status. */
- destState->stateBits |= ( srcState->stateBits & ~SB_ISFINAL );
- if ( srcState->isFinState() )
- setFinState( destState );
-
- /* Draw in any properties of srcState into destState. */
- if ( srcState == destState ) {
- /* Duplicate the list to protect against write to source. The
- * priorities sets are not copied in because that would have no
- * effect. */
- destState->epsilonTrans.append( EpsilonTrans( srcState->epsilonTrans ) );
-
- /* Get all actions, duplicating to protect against write to source. */
- destState->toStateActionTable.setActions(
- ActionTable( srcState->toStateActionTable ) );
- destState->fromStateActionTable.setActions(
- ActionTable( srcState->fromStateActionTable ) );
- destState->outActionTable.setActions( ActionTable( srcState->outActionTable ) );
- destState->outCondSet.insert( ActionSet( srcState->outCondSet ) );
- destState->errActionTable.setActions( ErrActionTable( srcState->errActionTable ) );
- destState->eofActionTable.setActions( ActionTable( srcState->eofActionTable ) );
- }
- else {
- /* Get the epsilons, out priorities. */
- destState->epsilonTrans.append( srcState->epsilonTrans );
- destState->outPriorTable.setPriors( srcState->outPriorTable );
-
- /* Get all actions. */
- destState->toStateActionTable.setActions( srcState->toStateActionTable );
- destState->fromStateActionTable.setActions( srcState->fromStateActionTable );
- destState->outActionTable.setActions( srcState->outActionTable );
- destState->outCondSet.insert( srcState->outCondSet );
- destState->errActionTable.setActions( srcState->errActionTable );
- destState->eofActionTable.setActions( srcState->eofActionTable );
- }
-}
-
-void FsmAp::fillInStates( MergeData &md )
-{
- /* Merge any states that are awaiting merging. This will likey cause
- * other states to be added to the stfil list. */
- StateAp *state = md.stfillHead;
- while ( state != 0 ) {
- StateSet *stateSet = &state->stateDictEl->stateSet;
- mergeStates( md, state, stateSet->data, stateSet->length() );
- state = state->alg.next;
- }
-
- /* Delete the state sets of all states that are on the fill list. */
- state = md.stfillHead;
- while ( state != 0 ) {
- /* Delete and reset the state set. */
- delete state->stateDictEl;
- state->stateDictEl = 0;
-
- /* Next state in the stfill list. */
- state = state->alg.next;
- }
-
- /* StateDict will still have its ptrs/size set but all of it's element
- * will be deleted so we don't need to clean it up. */
-}
-
-void FsmAp::findEmbedExpansions( ExpansionList &expansionList,
- StateAp *destState, Action *condAction )
-{
- StateCondList destList;
- PairIter<TransAp, StateCond> transCond( destState->outList.head,
- destState->stateCondList.head );
- for ( ; !transCond.end(); transCond++ ) {
- switch ( transCond.userState ) {
- case RangeInS1: {
- if ( transCond.s1Tel.lowKey <= keyOps->maxKey ) {
- assert( transCond.s1Tel.highKey <= keyOps->maxKey );
-
- /* Make a new state cond. */
- StateCond *newStateCond = new StateCond( transCond.s1Tel.lowKey,
- transCond.s1Tel.highKey );
- newStateCond->condSpace = addCondSpace( CondSet( condAction ) );
- destList.append( newStateCond );
-
- /* Create the expansion. */
- Expansion *expansion = new Expansion( transCond.s1Tel.lowKey,
- transCond.s1Tel.highKey );
- expansion->fromTrans = new TransAp(*transCond.s1Tel.trans);
- expansion->fromTrans->fromState = 0;
- expansion->fromTrans->toState = transCond.s1Tel.trans->toState;
- expansion->fromCondSpace = 0;
- expansion->fromVals = 0;
- expansion->toCondSpace = newStateCond->condSpace;
- expansion->toValsList.append( 1 );
- #ifdef LOG_CONDS
- logNewExpansion( expansion );
- #endif
- expansionList.append( expansion );
- }
- break;
- }
- case RangeInS2: {
- /* Enhance state cond and find the expansion. */
- StateCond *stateCond = transCond.s2Tel.trans;
- stateCond->lowKey = transCond.s2Tel.lowKey;
- stateCond->highKey = transCond.s2Tel.highKey;
-
- CondSet &destCS = stateCond->condSpace->condSet;
- long destLen = destCS.length();
- CondSpace *fromCondSpace = stateCond->condSpace;
-
- CondSet mergedCS = destCS;
- mergedCS.insert( condAction );
- CondSpace *toCondSpace = addCondSpace( mergedCS );
- stateCond->condSpace = toCondSpace;
- destList.append( stateCond );
-
- /* Loop all values in the dest space. */
- for ( long destVals = 0; destVals < (1 << destLen); destVals++ ) {
- long basicVals = 0;
- for ( CondSet::Iter csi = destCS; csi.lte(); csi++ ) {
- if ( destVals & (1 << csi.pos()) ) {
- Action **cim = mergedCS.find( *csi );
- long bitPos = (cim - mergedCS.data);
- basicVals |= 1 << bitPos;
- }
- }
-
- long targVals = basicVals;
- Action **cim = mergedCS.find( condAction );
- long bitPos = (cim - mergedCS.data);
- targVals |= 1 << bitPos;
-
- LongVect expandToVals( targVals );
- findCondExpInTrans( expansionList, destState,
- transCond.s2Tel.lowKey, transCond.s2Tel.highKey,
- fromCondSpace, toCondSpace, destVals, expandToVals );
- }
- break;
- }
-
-
- case RangeOverlap:
- case BreakS1:
- case BreakS2:
- assert( false );
- break;
- }
- }
-
- destState->stateCondList.transfer( destList );
-}
-
-void FsmAp::embedCondition( StateAp *state, Action *condAction )
-{
- MergeData md;
- ExpansionList expList;
-
- /* Turn on misfit accounting to possibly catch the old start state. */
- setMisfitAccounting( true );
-
- /* Worker. */
- embedCondition( md, state, condAction );
-
- /* Fill in any states that were newed up as combinations of others. */
- fillInStates( md );
-
- /* Remove the misfits and turn off misfit accounting. */
- removeMisfits();
- setMisfitAccounting( false );
-}
-
-void FsmAp::embedCondition( MergeData &md, StateAp *state, Action *condAction )
-{
- ExpansionList expList;
-
- findEmbedExpansions( expList, state, condAction );
- doExpand( md, state, expList );
- doRemove( md, state, expList );
- expList.empty();
-}
-
-/* Check if a machine defines a single character. This is useful in validating
- * ranges and machines to export. */
-bool FsmAp::checkSingleCharMachine()
-{
- /* Must have two states. */
- if ( stateList.length() != 2 )
- return false;
- /* The start state cannot be final. */
- if ( startState->isFinState() )
- return false;
- /* There should be only one final state. */
- if ( finStateSet.length() != 1 )
- return false;
- /* The final state cannot have any transitions out. */
- if ( finStateSet[0]->outList.length() != 0 )
- return false;
- /* The start state should have only one transition out. */
- if ( startState->outList.length() != 1 )
- return false;
- /* The singe transition out of the start state should not be a range. */
- TransAp *startTrans = startState->outList.head;
- if ( startTrans->lowKey != startTrans->highKey )
- return false;
- return true;
-}
-
diff --git a/contrib/tools/ragel5/ragel/fsmgraph.h b/contrib/tools/ragel5/ragel/fsmgraph.h
deleted file mode 100644
index 062031c3aa..0000000000
--- a/contrib/tools/ragel5/ragel/fsmgraph.h
+++ /dev/null
@@ -1,1482 +0,0 @@
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _FSMGRAPH_H
-#define _FSMGRAPH_H
-
-#include <assert.h>
-#include <iostream>
-#include "common.h"
-#include "vector.h"
-#include "bstset.h"
-#include "compare.h"
-#include "avltree.h"
-#include "dlist.h"
-#include "bstmap.h"
-#include "sbstmap.h"
-#include "sbstset.h"
-#include "sbsttable.h"
-#include "avlset.h"
-#include "avlmap.h"
-#include "ragel.h"
-
-//#define LOG_CONDS
-
-/* Flags that control merging. */
-#define SB_GRAPH1 0x01
-#define SB_GRAPH2 0x02
-#define SB_BOTH 0x03
-#define SB_ISFINAL 0x04
-#define SB_ISMARKED 0x08
-#define SB_ONLIST 0x10
-
-using std::ostream;
-
-struct TransAp;
-struct StateAp;
-struct FsmAp;
-struct Action;
-struct LongestMatchPart;
-
-/* State list element for unambiguous access to list element. */
-struct FsmListEl
-{
- StateAp *prev, *next;
-};
-
-/* This is the marked index for a state pair. Used in minimization. It keeps
- * track of whether or not the state pair is marked. */
-struct MarkIndex
-{
- MarkIndex(int states);
- ~MarkIndex();
-
- void markPair(int state1, int state2);
- bool isPairMarked(int state1, int state2);
-
-private:
- int numStates;
- bool *array;
-};
-
-extern KeyOps *keyOps;
-
-/* Transistion Action Element. */
-typedef SBstMapEl< int, Action* > ActionTableEl;
-
-/* Nodes in the tree that use this action. */
-struct NameInst;
-struct InlineList;
-typedef Vector<NameInst*> ActionRefs;
-
-/* Element in list of actions. Contains the string for the code to exectute. */
-struct Action
-:
- public DListEl<Action>,
- public AvlTreeEl<Action>
-{
-public:
-
- Action( const InputLoc &loc, const char *name, InlineList *inlineList, int condId )
- :
- loc(loc),
- name(name),
- inlineList(inlineList),
- actionId(-1),
- numTransRefs(0),
- numToStateRefs(0),
- numFromStateRefs(0),
- numEofRefs(0),
- numCondRefs(0),
- anyCall(false),
- isLmAction(false),
- condId(condId)
- {
- }
-
- /* Key for action dictionary. */
- const char *getKey() const { return name; }
-
- /* Data collected during parse. */
- InputLoc loc;
- const char *name;
- InlineList *inlineList;
- int actionId;
-
- void actionName( ostream &out )
- {
- if ( name != 0 )
- out << name;
- else
- out << loc.line << ":" << loc.col;
- }
-
- /* Places in the input text that reference the action. */
- ActionRefs actionRefs;
-
- /* Number of references in the final machine. */
- int numRefs()
- { return numTransRefs + numToStateRefs + numFromStateRefs + numEofRefs; }
- int numTransRefs;
- int numToStateRefs;
- int numFromStateRefs;
- int numEofRefs;
- int numCondRefs;
- bool anyCall;
-
- bool isLmAction;
- int condId;
-};
-
-struct CmpCondId
-{
- static inline int compare( const Action *cond1, const Action *cond2 )
- {
- if ( cond1->condId < cond2->condId )
- return -1;
- else if ( cond1->condId > cond2->condId )
- return 1;
- return 0;
- }
-};
-
-/* A list of actions. */
-typedef DList<Action> ActionList;
-typedef AvlTree<Action, char *, CmpStr> ActionDict;
-
-/* Structure for reverse action mapping. */
-struct RevActionMapEl
-{
- char *name;
- InputLoc location;
-};
-
-
-/* Transition Action Table. */
-struct ActionTable
- : public SBstMap< int, Action*, CmpOrd<int> >
-{
- void setAction( int ordering, Action *action );
- void setActions( int *orderings, Action **actions, int nActs );
- void setActions( const ActionTable &other );
-
- bool hasAction( Action *action );
-};
-
-typedef SBstSet< Action*, CmpOrd<Action*> > ActionSet;
-typedef CmpSTable< Action*, CmpOrd<Action*> > CmpActionSet;
-
-/* Transistion Action Element. */
-typedef SBstMapEl< int, LongestMatchPart* > LmActionTableEl;
-
-/* Transition Action Table. */
-struct LmActionTable
- : public SBstMap< int, LongestMatchPart*, CmpOrd<int> >
-{
- void setAction( int ordering, LongestMatchPart *action );
- void setActions( const LmActionTable &other );
-};
-
-/* Compare of a whole action table element (key & value). */
-struct CmpActionTableEl
-{
- static int compare( const ActionTableEl &action1,
- const ActionTableEl &action2 )
- {
- if ( action1.key < action2.key )
- return -1;
- else if ( action1.key > action2.key )
- return 1;
- else if ( action1.value < action2.value )
- return -1;
- else if ( action1.value > action2.value )
- return 1;
- return 0;
- }
-};
-
-/* Compare for ActionTable. */
-typedef CmpSTable< ActionTableEl, CmpActionTableEl > CmpActionTable;
-
-/* Compare of a whole lm action table element (key & value). */
-struct CmpLmActionTableEl
-{
- static int compare( const LmActionTableEl &lmAction1,
- const LmActionTableEl &lmAction2 )
- {
- if ( lmAction1.key < lmAction2.key )
- return -1;
- else if ( lmAction1.key > lmAction2.key )
- return 1;
- else if ( lmAction1.value < lmAction2.value )
- return -1;
- else if ( lmAction1.value > lmAction2.value )
- return 1;
- return 0;
- }
-};
-
-/* Compare for ActionTable. */
-typedef CmpSTable< LmActionTableEl, CmpLmActionTableEl > CmpLmActionTable;
-
-/* Action table element for error action tables. Adds the encoding of transfer
- * point. */
-struct ErrActionTableEl
-{
- ErrActionTableEl( Action *action, int ordering, int transferPoint )
- : ordering(ordering), action(action), transferPoint(transferPoint) { }
-
- /* Ordering and id of the action embedding. */
- int ordering;
- Action *action;
-
- /* Id of point of transfere from Error action table to transtions and
- * eofActionTable. */
- int transferPoint;
-
- int getKey() const { return ordering; }
-};
-
-struct ErrActionTable
- : public SBstTable< ErrActionTableEl, int, CmpOrd<int> >
-{
- void setAction( int ordering, Action *action, int transferPoint );
- void setActions( const ErrActionTable &other );
-};
-
-/* Compare of an error action table element (key & value). */
-struct CmpErrActionTableEl
-{
- static int compare( const ErrActionTableEl &action1,
- const ErrActionTableEl &action2 )
- {
- if ( action1.ordering < action2.ordering )
- return -1;
- else if ( action1.ordering > action2.ordering )
- return 1;
- else if ( action1.action < action2.action )
- return -1;
- else if ( action1.action > action2.action )
- return 1;
- else if ( action1.transferPoint < action2.transferPoint )
- return -1;
- else if ( action1.transferPoint > action2.transferPoint )
- return 1;
- return 0;
- }
-};
-
-/* Compare for ErrActionTable. */
-typedef CmpSTable< ErrActionTableEl, CmpErrActionTableEl > CmpErrActionTable;
-
-
-/* Descibe a priority, shared among PriorEls.
- * Has key and whether or not used. */
-struct PriorDesc
-{
- int key;
- int priority;
-};
-
-/* Element in the arrays of priorities for transitions and arrays. Ordering is
- * unique among instantiations of machines, desc is shared. */
-struct PriorEl
-{
- PriorEl( int ordering, PriorDesc *desc )
- : ordering(ordering), desc(desc) { }
-
- int ordering;
- PriorDesc *desc;
-};
-
-/* Compare priority elements, which are ordered by the priority descriptor
- * key. */
-struct PriorElCmp
-{
- static inline int compare( const PriorEl &pel1, const PriorEl &pel2 )
- {
- if ( pel1.desc->key < pel2.desc->key )
- return -1;
- else if ( pel1.desc->key > pel2.desc->key )
- return 1;
- else
- return 0;
- }
-};
-
-
-/* Priority Table. */
-struct PriorTable
- : public SBstSet< PriorEl, PriorElCmp >
-{
- void setPrior( int ordering, PriorDesc *desc );
- void setPriors( const PriorTable &other );
-};
-
-/* Compare of prior table elements for distinguising state data. */
-struct CmpPriorEl
-{
- static inline int compare( const PriorEl &pel1, const PriorEl &pel2 )
- {
- if ( pel1.desc < pel2.desc )
- return -1;
- else if ( pel1.desc > pel2.desc )
- return 1;
- else if ( pel1.ordering < pel2.ordering )
- return -1;
- else if ( pel1.ordering > pel2.ordering )
- return 1;
- return 0;
- }
-};
-
-/* Compare of PriorTable distinguising state data. Using a compare of the
- * pointers is a little more strict than it needs be. It requires that
- * prioritiy tables have the exact same set of priority assignment operators
- * (from the input lang) to be considered equal.
- *
- * Really only key-value pairs need be tested and ordering be merged. However
- * this would require that in the fuseing of states, priority descriptors be
- * chosen for the new fused state based on priority. Since the out transition
- * lists and ranges aren't necessarily going to line up, this is more work for
- * little gain. Final compression resets all priorities first, so this would
- * only be useful for compression at every operator, which is only an
- * undocumented test feature.
- */
-typedef CmpSTable<PriorEl, CmpPriorEl> CmpPriorTable;
-
-/* Plain action list that imposes no ordering. */
-typedef Vector<int> TransFuncList;
-
-/* Comparison for TransFuncList. */
-typedef CmpTable< int, CmpOrd<int> > TransFuncListCompare;
-
-/* Transition class that implements actions and priorities. */
-struct TransAp
-{
- TransAp() : fromState(0), toState(0) {}
- TransAp( const TransAp &other ) :
- lowKey(other.lowKey),
- highKey(other.highKey),
- fromState(0), toState(0),
- actionTable(other.actionTable),
- priorTable(other.priorTable)
- {
- assert( lmActionTable.length() == 0 && other.lmActionTable.length() == 0 );
- }
-
- Key lowKey, highKey;
- StateAp *fromState;
- StateAp *toState;
-
- /* Pointers for outlist. */
- TransAp *prev, *next;
-
- /* Pointers for in-list. */
- TransAp *ilprev, *ilnext;
-
- /* The function table and priority for the transition. */
- ActionTable actionTable;
- PriorTable priorTable;
-
- LmActionTable lmActionTable;
-};
-
-/* In transition list. Like DList except only has head pointers, which is all
- * that is required. Insertion and deletion is handled by the graph. This
- * class provides the iterator of a single list. */
-struct TransInList
-{
- TransInList() : head(0) { }
-
- TransAp *head;
-
- struct Iter
- {
- /* Default construct. */
- Iter() : ptr(0) { }
-
- /* Construct, assign from a list. */
- Iter( const TransInList &il ) : ptr(il.head) { }
- Iter &operator=( const TransInList &dl ) { ptr = dl.head; return *this; }
-
- /* At the end */
- bool lte() const { return ptr != 0; }
- bool end() const { return ptr == 0; }
-
- /* At the first, last element. */
- bool first() const { return ptr && ptr->ilprev == 0; }
- bool last() const { return ptr && ptr->ilnext == 0; }
-
- /* Cast, dereference, arrow ops. */
- operator TransAp*() const { return ptr; }
- TransAp &operator *() const { return *ptr; }
- TransAp *operator->() const { return ptr; }
-
- /* Increment, decrement. */
- inline void operator++(int) { ptr = ptr->ilnext; }
- inline void operator--(int) { ptr = ptr->ilprev; }
-
- /* The iterator is simply a pointer. */
- TransAp *ptr;
- };
-};
-
-typedef DList<TransAp> TransList;
-
-/* Set of states, list of states. */
-typedef BstSet<StateAp*> StateSet;
-typedef DList<StateAp> StateList;
-
-/* A element in a state dict. */
-struct StateDictEl
-:
- public AvlTreeEl<StateDictEl>
-{
- StateDictEl(const StateSet &stateSet)
- : stateSet(stateSet) { }
-
- const StateSet &getKey() { return stateSet; }
- StateSet stateSet;
- StateAp *targState;
-};
-
-/* Dictionary mapping a set of states to a target state. */
-typedef AvlTree< StateDictEl, StateSet, CmpTable<StateAp*> > StateDict;
-
-/* Data needed for a merge operation. */
-struct MergeData
-{
- MergeData()
- : stfillHead(0), stfillTail(0) { }
-
- StateDict stateDict;
-
- StateAp *stfillHead;
- StateAp *stfillTail;
-
- void fillListAppend( StateAp *state );
-};
-
-struct TransEl
-{
- /* Constructors. */
- TransEl() { }
- TransEl( Key lowKey, Key highKey )
- : lowKey(lowKey), highKey(highKey) { }
- TransEl( Key lowKey, Key highKey, TransAp *value )
- : lowKey(lowKey), highKey(highKey), value(value) { }
-
- Key lowKey, highKey;
- TransAp *value;
-};
-
-struct CmpKey
-{
- static int compare( const Key key1, const Key key2 )
- {
- if ( key1 < key2 )
- return -1;
- else if ( key1 > key2 )
- return 1;
- else
- return 0;
- }
-};
-
-/* Vector based set of key items. */
-typedef BstSet<Key, CmpKey> KeySet;
-
-struct MinPartition
-{
- MinPartition() : active(false) { }
-
- StateList list;
- bool active;
-
- MinPartition *prev, *next;
-};
-
-/* Epsilon transition stored in a state. Specifies the target */
-typedef Vector<int> EpsilonTrans;
-
-/* List of states that are to be drawn into this. */
-struct EptVectEl
-{
- EptVectEl( StateAp *targ, bool leaving )
- : targ(targ), leaving(leaving) { }
-
- StateAp *targ;
- bool leaving;
-};
-typedef Vector<EptVectEl> EptVect;
-
-/* Set of entry ids that go into this state. */
-typedef BstSet<int> EntryIdSet;
-
-/* Set of longest match items that may be active in a given state. */
-typedef BstSet<LongestMatchPart*> LmItemSet;
-
-/* Conditions. */
-typedef BstSet< Action*, CmpCondId > CondSet;
-typedef CmpTable< Action*, CmpCondId > CmpCondSet;
-
-struct CondSpace
- : public AvlTreeEl<CondSpace>
-{
- CondSpace( const CondSet &condSet )
- : condSet(condSet) {}
-
- const CondSet &getKey() { return condSet; }
-
- CondSet condSet;
- Key baseKey;
- long condSpaceId;
-};
-
-typedef Vector<CondSpace*> CondSpaceVect;
-
-typedef AvlTree<CondSpace, CondSet, CmpCondSet> CondSpaceMap;
-
-struct StateCond
-{
- StateCond( Key lowKey, Key highKey ) :
- lowKey(lowKey), highKey(highKey) {}
-
- Key lowKey;
- Key highKey;
- CondSpace *condSpace;
-
- StateCond *prev, *next;
-};
-
-typedef DList<StateCond> StateCondList;
-typedef Vector<long> LongVect;
-
-struct Expansion
-{
- Expansion( Key lowKey, Key highKey ) :
- lowKey(lowKey), highKey(highKey),
- fromTrans(0), fromCondSpace(0),
- toCondSpace(0) {}
-
- ~Expansion()
- {
- if ( fromTrans != 0 )
- delete fromTrans;
- }
-
- Key lowKey;
- Key highKey;
-
- TransAp *fromTrans;
- CondSpace *fromCondSpace;
- long fromVals;
-
- CondSpace *toCondSpace;
- LongVect toValsList;
-
- Expansion *prev, *next;
-};
-
-typedef DList<Expansion> ExpansionList;
-
-struct Removal
-{
- Key lowKey;
- Key highKey;
-
- Removal *next;
-};
-
-struct CondData
-{
- CondData() : nextCondKey(0) {}
-
- /* Condition info. */
- Key nextCondKey;
-
- CondSpaceMap condSpaceMap;
-};
-
-extern CondData *condData;
-
-/* State class that implements actions and priorities. */
-struct StateAp
-{
- StateAp();
- StateAp(const StateAp &other);
- ~StateAp();
-
- /* Is the state final? */
- bool isFinState() { return stateBits & SB_ISFINAL; }
-
- /* Out transition list and the pointer for the default out trans. */
- TransList outList;
-
- /* In transition Lists. */
- TransInList inList;
-
- /* Entry points into the state. */
- EntryIdSet entryIds;
-
- /* Epsilon transitions. */
- EpsilonTrans epsilonTrans;
-
- /* Condition info. */
- StateCondList stateCondList;
-
- /* Number of in transitions from states other than ourselves. */
- int foreignInTrans;
-
- /* Temporary data for various algorithms. */
- union {
- /* When duplicating the fsm we need to map each
- * state to the new state representing it. */
- StateAp *stateMap;
-
- /* When minimizing machines by partitioning, this maps to the group
- * the state is in. */
- MinPartition *partition;
-
- /* When merging states (state machine operations) this next pointer is
- * used for the list of states that need to be filled in. */
- StateAp *next;
-
- /* Identification for printing and stable minimization. */
- int stateNum;
-
- } alg;
-
- /* Data used in epsilon operation, maybe fit into alg? */
- StateAp *isolatedShadow;
- int owningGraph;
-
- /* A pointer to a dict element that contains the set of states this state
- * represents. This cannot go into alg, because alg.next is used during
- * the merging process. */
- StateDictEl *stateDictEl;
-
- /* When drawing epsilon transitions, holds the list of states to merge
- * with. */
- EptVect *eptVect;
-
- /* Bits controlling the behaviour of the state during collapsing to dfa. */
- int stateBits;
-
- /* State list elements. */
- StateAp *next, *prev;
-
- /*
- * Priority and Action data.
- */
-
- /* Out priorities transfered to out transitions. */
- PriorTable outPriorTable;
-
- /* The following two action tables are distinguished by the fact that when
- * toState actions are executed immediatly after transition actions of
- * incoming transitions and the current character will be the same as the
- * one available then. The fromState actions are executed immediately
- * before the transition actions of outgoing transitions and the current
- * character is same as the one available then. */
-
- /* Actions to execute upon entering into a state. */
- ActionTable toStateActionTable;
-
- /* Actions to execute when going from the state to the transition. */
- ActionTable fromStateActionTable;
-
- /* Actions to add to any future transitions that leave via this state. */
- ActionTable outActionTable;
-
- /* Conditions to add to any future transiions that leave via this sttate. */
- ActionSet outCondSet;
-
- /* Error action tables. */
- ErrActionTable errActionTable;
-
- /* Actions to execute on eof. */
- ActionTable eofActionTable;
-
- /* Set of longest match items that may be active in this state. */
- LmItemSet lmItemSet;
-};
-
-template <class ListItem> struct NextTrans
-{
- Key lowKey, highKey;
- ListItem *trans;
- ListItem *next;
-
- void load() {
- if ( trans == 0 )
- next = 0;
- else {
- next = trans->next;
- lowKey = trans->lowKey;
- highKey = trans->highKey;
- }
- }
-
- void set( ListItem *t ) {
- trans = t;
- load();
- }
-
- void increment() {
- trans = next;
- load();
- }
-};
-
-
-/* Encodes the different states that are meaningful to the of the iterator. */
-enum PairIterUserState
-{
- RangeInS1, RangeInS2,
- RangeOverlap,
- BreakS1, BreakS2
-};
-
-template <class ListItem1, class ListItem2 = ListItem1> struct PairIter
-{
- /* Encodes the different states that an fsm iterator can be in. */
- enum IterState {
- Begin,
- ConsumeS1Range, ConsumeS2Range,
- OnlyInS1Range, OnlyInS2Range,
- S1SticksOut, S1SticksOutBreak,
- S2SticksOut, S2SticksOutBreak,
- S1DragsBehind, S1DragsBehindBreak,
- S2DragsBehind, S2DragsBehindBreak,
- ExactOverlap, End
- };
-
- PairIter( ListItem1 *list1, ListItem2 *list2 );
-
- /* Query iterator. */
- bool lte() { return itState != End; }
- bool end() { return itState == End; }
- void operator++(int) { findNext(); }
- void operator++() { findNext(); }
-
- /* Iterator state. */
- ListItem1 *list1;
- ListItem2 *list2;
- IterState itState;
- PairIterUserState userState;
-
- NextTrans<ListItem1> s1Tel;
- NextTrans<ListItem2> s2Tel;
- Key bottomLow, bottomHigh;
- ListItem1 *bottomTrans1;
- ListItem2 *bottomTrans2;
-
-private:
- void findNext();
-};
-
-/* Init the iterator by advancing to the first item. */
-template <class ListItem1, class ListItem2> PairIter<ListItem1, ListItem2>::PairIter(
- ListItem1 *list1, ListItem2 *list2 )
-:
- list1(list1),
- list2(list2),
- itState(Begin)
-{
- findNext();
-}
-
-/* Return and re-entry for the co-routine iterators. This should ALWAYS be
- * used inside of a block. */
-#define CO_RETURN(label) \
- itState = label; \
- return; \
- entry##label: backIn = true
-
-/* Return and re-entry for the co-routine iterators. This should ALWAYS be
- * used inside of a block. */
-#define CO_RETURN2(label, uState) \
- itState = label; \
- userState = uState; \
- return; \
- entry##label: backIn = true
-
-/* Advance to the next transition. When returns, trans points to the next
- * transition, unless there are no more, in which case end() returns true. */
-template <class ListItem1, class ListItem2> void PairIter<ListItem1, ListItem2>::findNext()
-{
- /* This variable is used in dummy statements that follow the entry
- * goto labels. The compiler needs some statement to follow the label. */
- bool backIn;
-
- /* Jump into the iterator routine base on the iterator state. */
- switch ( itState ) {
- case Begin: goto entryBegin;
- case ConsumeS1Range: goto entryConsumeS1Range;
- case ConsumeS2Range: goto entryConsumeS2Range;
- case OnlyInS1Range: goto entryOnlyInS1Range;
- case OnlyInS2Range: goto entryOnlyInS2Range;
- case S1SticksOut: goto entryS1SticksOut;
- case S1SticksOutBreak: goto entryS1SticksOutBreak;
- case S2SticksOut: goto entryS2SticksOut;
- case S2SticksOutBreak: goto entryS2SticksOutBreak;
- case S1DragsBehind: goto entryS1DragsBehind;
- case S1DragsBehindBreak: goto entryS1DragsBehindBreak;
- case S2DragsBehind: goto entryS2DragsBehind;
- case S2DragsBehindBreak: goto entryS2DragsBehindBreak;
- case ExactOverlap: goto entryExactOverlap;
- case End: goto entryEnd;
- }
-
-entryBegin:
- /* Set up the next structs at the head of the transition lists. */
- s1Tel.set( list1 );
- s2Tel.set( list2 );
-
- /* Concurrently scan both out ranges. */
- while ( true ) {
- if ( s1Tel.trans == 0 ) {
- /* We are at the end of state1's ranges. Process the rest of
- * state2's ranges. */
- while ( s2Tel.trans != 0 ) {
- /* Range is only in s2. */
- CO_RETURN2( ConsumeS2Range, RangeInS2 );
- s2Tel.increment();
- }
- break;
- }
- else if ( s2Tel.trans == 0 ) {
- /* We are at the end of state2's ranges. Process the rest of
- * state1's ranges. */
- while ( s1Tel.trans != 0 ) {
- /* Range is only in s1. */
- CO_RETURN2( ConsumeS1Range, RangeInS1 );
- s1Tel.increment();
- }
- break;
- }
- /* Both state1's and state2's transition elements are good.
- * The signiture of no overlap is a back key being in front of a
- * front key. */
- else if ( s1Tel.highKey < s2Tel.lowKey ) {
- /* A range exists in state1 that does not overlap with state2. */
- CO_RETURN2( OnlyInS1Range, RangeInS1 );
- s1Tel.increment();
- }
- else if ( s2Tel.highKey < s1Tel.lowKey ) {
- /* A range exists in state2 that does not overlap with state1. */
- CO_RETURN2( OnlyInS2Range, RangeInS2 );
- s2Tel.increment();
- }
- /* There is overlap, must mix the ranges in some way. */
- else if ( s1Tel.lowKey < s2Tel.lowKey ) {
- /* Range from state1 sticks out front. Must break it into
- * non-overlaping and overlaping segments. */
- bottomLow = s2Tel.lowKey;
- bottomHigh = s1Tel.highKey;
- s1Tel.highKey = s2Tel.lowKey;
- s1Tel.highKey.decrement();
- bottomTrans1 = s1Tel.trans;
-
- /* Notify the caller that we are breaking s1. This gives them a
- * chance to duplicate s1Tel[0,1].value. */
- CO_RETURN2( S1SticksOutBreak, BreakS1 );
-
- /* Broken off range is only in s1. */
- CO_RETURN2( S1SticksOut, RangeInS1 );
-
- /* Advance over the part sticking out front. */
- s1Tel.lowKey = bottomLow;
- s1Tel.highKey = bottomHigh;
- s1Tel.trans = bottomTrans1;
- }
- else if ( s2Tel.lowKey < s1Tel.lowKey ) {
- /* Range from state2 sticks out front. Must break it into
- * non-overlaping and overlaping segments. */
- bottomLow = s1Tel.lowKey;
- bottomHigh = s2Tel.highKey;
- s2Tel.highKey = s1Tel.lowKey;
- s2Tel.highKey.decrement();
- bottomTrans2 = s2Tel.trans;
-
- /* Notify the caller that we are breaking s2. This gives them a
- * chance to duplicate s2Tel[0,1].value. */
- CO_RETURN2( S2SticksOutBreak, BreakS2 );
-
- /* Broken off range is only in s2. */
- CO_RETURN2( S2SticksOut, RangeInS2 );
-
- /* Advance over the part sticking out front. */
- s2Tel.lowKey = bottomLow;
- s2Tel.highKey = bottomHigh;
- s2Tel.trans = bottomTrans2;
- }
- /* Low ends are even. Are the high ends even? */
- else if ( s1Tel.highKey < s2Tel.highKey ) {
- /* Range from state2 goes longer than the range from state1. We
- * must break the range from state2 into an evenly overlaping
- * segment. */
- bottomLow = s1Tel.highKey;
- bottomLow.increment();
- bottomHigh = s2Tel.highKey;
- s2Tel.highKey = s1Tel.highKey;
- bottomTrans2 = s2Tel.trans;
-
- /* Notify the caller that we are breaking s2. This gives them a
- * chance to duplicate s2Tel[0,1].value. */
- CO_RETURN2( S2DragsBehindBreak, BreakS2 );
-
- /* Breaking s2 produces exact overlap. */
- CO_RETURN2( S2DragsBehind, RangeOverlap );
-
- /* Advance over the front we just broke off of range 2. */
- s2Tel.lowKey = bottomLow;
- s2Tel.highKey = bottomHigh;
- s2Tel.trans = bottomTrans2;
-
- /* Advance over the entire s1Tel. We have consumed it. */
- s1Tel.increment();
- }
- else if ( s2Tel.highKey < s1Tel.highKey ) {
- /* Range from state1 goes longer than the range from state2. We
- * must break the range from state1 into an evenly overlaping
- * segment. */
- bottomLow = s2Tel.highKey;
- bottomLow.increment();
- bottomHigh = s1Tel.highKey;
- s1Tel.highKey = s2Tel.highKey;
- bottomTrans1 = s1Tel.trans;
-
- /* Notify the caller that we are breaking s1. This gives them a
- * chance to duplicate s2Tel[0,1].value. */
- CO_RETURN2( S1DragsBehindBreak, BreakS1 );
-
- /* Breaking s1 produces exact overlap. */
- CO_RETURN2( S1DragsBehind, RangeOverlap );
-
- /* Advance over the front we just broke off of range 1. */
- s1Tel.lowKey = bottomLow;
- s1Tel.highKey = bottomHigh;
- s1Tel.trans = bottomTrans1;
-
- /* Advance over the entire s2Tel. We have consumed it. */
- s2Tel.increment();
- }
- else {
- /* There is an exact overlap. */
- CO_RETURN2( ExactOverlap, RangeOverlap );
-
- s1Tel.increment();
- s2Tel.increment();
- }
- }
-
- /* Done, go into end state. */
- CO_RETURN( End );
-}
-
-
-/* Compare lists of epsilon transitions. Entries are name ids of targets. */
-typedef CmpTable< int, CmpOrd<int> > CmpEpsilonTrans;
-
-/* Compare class for the Approximate minimization. */
-class ApproxCompare
-{
-public:
- ApproxCompare() { }
- int compare( const StateAp *pState1, const StateAp *pState2 );
-};
-
-/* Compare class for the initial partitioning of a partition minimization. */
-class InitPartitionCompare
-{
-public:
- InitPartitionCompare() { }
- int compare( const StateAp *pState1, const StateAp *pState2 );
-};
-
-/* Compare class for the regular partitioning of a partition minimization. */
-class PartitionCompare
-{
-public:
- PartitionCompare() { }
- int compare( const StateAp *pState1, const StateAp *pState2 );
-};
-
-/* Compare class for a minimization that marks pairs. Provides the shouldMark
- * routine. */
-class MarkCompare
-{
-public:
- MarkCompare() { }
- bool shouldMark( MarkIndex &markIndex, const StateAp *pState1,
- const StateAp *pState2 );
-};
-
-/* List of partitions. */
-typedef DList< MinPartition > PartitionList;
-
-/* List of transtions out of a state. */
-typedef Vector<TransEl> TransListVect;
-
-/* Entry point map used for keeping track of entry points in a machine. */
-typedef BstSet< int > EntryIdSet;
-typedef BstMapEl< int, StateAp* > EntryMapEl;
-typedef BstMap< int, StateAp* > EntryMap;
-typedef Vector<EntryMapEl> EntryMapBase;
-
-/* Graph class that implements actions and priorities. */
-struct FsmAp
-{
- /* Constructors/Destructors. */
- FsmAp( );
- FsmAp( const FsmAp &graph );
- ~FsmAp();
-
- /* The list of states. */
- StateList stateList;
- StateList misfitList;
-
- /* The map of entry points. */
- EntryMap entryPoints;
-
- /* The start state. */
- StateAp *startState;
-
- /* Error state, possibly created only when the final machine has been
- * created and the XML machine is about to be written. No transitions
- * point to this state. */
- StateAp *errState;
-
- /* The set of final states. */
- StateSet finStateSet;
-
- /* Misfit Accounting. Are misfits put on a separate list. */
- bool misfitAccounting;
-
- /*
- * Transition actions and priorities.
- */
-
- /* Set priorities on transtions. */
- void startFsmPrior( int ordering, PriorDesc *prior );
- void allTransPrior( int ordering, PriorDesc *prior );
- void finishFsmPrior( int ordering, PriorDesc *prior );
- void leaveFsmPrior( int ordering, PriorDesc *prior );
-
- /* Action setting support. */
- void transferErrorActions( StateAp *state, int transferPoint );
- void setErrorAction( StateAp *state, int ordering, Action *action );
-
- /* Fill all spaces in a transition list with an error transition. */
- void fillGaps( StateAp *state );
-
- /* Similar to setErrorAction, instead gives a state to go to on error. */
- void setErrorTarget( StateAp *state, StateAp *target, int *orderings,
- Action **actions, int nActs );
-
- /* Set actions to execute. */
- void startFsmAction( int ordering, Action *action );
- void allTransAction( int ordering, Action *action );
- void finishFsmAction( int ordering, Action *action );
- void leaveFsmAction( int ordering, Action *action );
- void longMatchAction( int ordering, LongestMatchPart *lmPart );
-
- /* Set conditions. */
- CondSpace *addCondSpace( const CondSet &condSet );
-
- void findEmbedExpansions( ExpansionList &expansionList,
- StateAp *destState, Action *condAction );
- void embedCondition( MergeData &md, StateAp *state, Action *condAction );
- void embedCondition( StateAp *state, Action *condAction );
-
- void startFsmCondition( Action *condAction );
- void allTransCondition( Action *condAction );
- void leaveFsmCondition( Action *condAction );
-
- /* Set error actions to execute. */
- void startErrorAction( int ordering, Action *action, int transferPoint );
- void allErrorAction( int ordering, Action *action, int transferPoint );
- void finalErrorAction( int ordering, Action *action, int transferPoint );
- void notStartErrorAction( int ordering, Action *action, int transferPoint );
- void notFinalErrorAction( int ordering, Action *action, int transferPoint );
- void middleErrorAction( int ordering, Action *action, int transferPoint );
-
- /* Set EOF actions. */
- void startEOFAction( int ordering, Action *action );
- void allEOFAction( int ordering, Action *action );
- void finalEOFAction( int ordering, Action *action );
- void notStartEOFAction( int ordering, Action *action );
- void notFinalEOFAction( int ordering, Action *action );
- void middleEOFAction( int ordering, Action *action );
-
- /* Set To State actions. */
- void startToStateAction( int ordering, Action *action );
- void allToStateAction( int ordering, Action *action );
- void finalToStateAction( int ordering, Action *action );
- void notStartToStateAction( int ordering, Action *action );
- void notFinalToStateAction( int ordering, Action *action );
- void middleToStateAction( int ordering, Action *action );
-
- /* Set From State actions. */
- void startFromStateAction( int ordering, Action *action );
- void allFromStateAction( int ordering, Action *action );
- void finalFromStateAction( int ordering, Action *action );
- void notStartFromStateAction( int ordering, Action *action );
- void notFinalFromStateAction( int ordering, Action *action );
- void middleFromStateAction( int ordering, Action *action );
-
- /* Shift the action ordering of the start transitions to start at
- * fromOrder and increase in units of 1. Useful before kleene star
- * operation. */
- int shiftStartActionOrder( int fromOrder );
-
- /* Clear all priorities from the fsm to so they won't affcet minimization
- * of the final fsm. */
- void clearAllPriorities();
-
- /* Zero out all the function keys. */
- void nullActionKeys();
-
- /* Walk the list of states and verify state properties. */
- void verifyStates();
-
- /* Misfit Accounting. Are misfits put on a separate list. */
- void setMisfitAccounting( bool val )
- { misfitAccounting = val; }
-
- /* Set and Unset a state as final. */
- void setFinState( StateAp *state );
- void unsetFinState( StateAp *state );
-
- void setStartState( StateAp *state );
- void unsetStartState( );
-
- /* Set and unset a state as an entry point. */
- void setEntry( int id, StateAp *state );
- void changeEntry( int id, StateAp *to, StateAp *from );
- void unsetEntry( int id, StateAp *state );
- void unsetEntry( int id );
- void unsetAllEntryPoints();
-
- /* Epsilon transitions. */
- void epsilonTrans( int id );
- void shadowReadWriteStates( MergeData &md );
-
- /*
- * Basic attaching and detaching.
- */
-
- /* Common to attaching/detaching list and default. */
- void attachToInList( StateAp *from, StateAp *to, TransAp *&head, TransAp *trans );
- void detachFromInList( StateAp *from, StateAp *to, TransAp *&head, TransAp *trans );
-
- /* Attach with a new transition. */
- TransAp *attachNewTrans( StateAp *from, StateAp *to,
- Key onChar1, Key onChar2 );
-
- /* Attach with an existing transition that already in an out list. */
- void attachTrans( StateAp *from, StateAp *to, TransAp *trans );
-
- /* Redirect a transition away from error and towards some state. */
- void redirectErrorTrans( StateAp *from, StateAp *to, TransAp *trans );
-
- /* Detach a transition from a target state. */
- void detachTrans( StateAp *from, StateAp *to, TransAp *trans );
-
- /* Detach a state from the graph. */
- void detachState( StateAp *state );
-
- /*
- * NFA to DFA conversion routines.
- */
-
- /* Duplicate a transition that will dropin to a free spot. */
- TransAp *dupTrans( StateAp *from, TransAp *srcTrans );
-
- /* In crossing, two transitions both go to real states. */
- TransAp *fsmAttachStates( MergeData &md, StateAp *from,
- TransAp *destTrans, TransAp *srcTrans );
-
- /* Two transitions are to be crossed, handle the possibility of either
- * going to the error state. */
- TransAp *mergeTrans( MergeData &md, StateAp *from,
- TransAp *destTrans, TransAp *srcTrans );
-
- /* Compare deterimne relative priorities of two transition tables. */
- int comparePrior( const PriorTable &priorTable1, const PriorTable &priorTable2 );
-
- /* Cross a src transition with one that is already occupying a spot. */
- TransAp *crossTransitions( MergeData &md, StateAp *from,
- TransAp *destTrans, TransAp *srcTrans );
-
- void outTransCopy( MergeData &md, StateAp *dest, TransAp *srcList );
-
- void doRemove( MergeData &md, StateAp *destState, ExpansionList &expList1 );
- void doExpand( MergeData &md, StateAp *destState, ExpansionList &expList1 );
- void findCondExpInTrans( ExpansionList &expansionList, StateAp *state,
- Key lowKey, Key highKey, CondSpace *fromCondSpace, CondSpace *toCondSpace,
- long destVals, LongVect &toValsList );
- void findTransExpansions( ExpansionList &expansionList,
- StateAp *destState, StateAp *srcState );
- void findCondExpansions( ExpansionList &expansionList,
- StateAp *destState, StateAp *srcState );
- void mergeStateConds( StateAp *destState, StateAp *srcState );
-
- /* Merge a set of states into newState. */
- void mergeStates( MergeData &md, StateAp *destState,
- StateAp **srcStates, int numSrc );
- void mergeStatesLeaving( MergeData &md, StateAp *destState, StateAp *srcState );
- void mergeStates( MergeData &md, StateAp *destState, StateAp *srcState );
-
- /* Make all states that are combinations of other states and that
- * have not yet had their out transitions filled in. This will
- * empty out stateDict and stFil. */
- void fillInStates( MergeData &md );
-
- /*
- * Transition Comparison.
- */
-
- /* Compare transition data. Either of the pointers may be null. */
- static inline int compareDataPtr( TransAp *trans1, TransAp *trans2 );
-
- /* Compare target state and transition data. Either pointer may be null. */
- static inline int compareFullPtr( TransAp *trans1, TransAp *trans2 );
-
- /* Compare target partitions. Either pointer may be null. */
- static inline int comparePartPtr( TransAp *trans1, TransAp *trans2 );
-
- /* Check marked status of target states. Either pointer may be null. */
- static inline bool shouldMarkPtr( MarkIndex &markIndex,
- TransAp *trans1, TransAp *trans2 );
-
- /*
- * Callbacks.
- */
-
- /* Compare priority and function table of transitions. */
- static int compareTransData( TransAp *trans1, TransAp *trans2 );
-
- /* Add in the properties of srcTrans into this. */
- void addInTrans( TransAp *destTrans, TransAp *srcTrans );
-
- /* Compare states on data stored in the states. */
- static int compareStateData( const StateAp *state1, const StateAp *state2 );
-
- /* Out transition data. */
- void clearOutData( StateAp *state );
- bool hasOutData( StateAp *state );
- void transferOutData( StateAp *destState, StateAp *srcState );
-
- /*
- * Allocation.
- */
-
- /* New up a state and add it to the graph. */
- StateAp *addState();
-
- /*
- * Building basic machines
- */
-
- void concatFsm( Key c );
- void concatFsm( Key *str, int len );
- void concatFsmCI( Key *str, int len );
- void orFsm( Key *set, int len );
- void rangeFsm( Key low, Key high );
- void rangeStarFsm( Key low, Key high );
- void emptyFsm( );
- void lambdaFsm( );
-
- /*
- * Fsm operators.
- */
-
- void starOp( );
- void repeatOp( int times );
- void optionalRepeatOp( int times );
- void concatOp( FsmAp *other );
- void unionOp( FsmAp *other );
- void intersectOp( FsmAp *other );
- void subtractOp( FsmAp *other );
- void epsilonOp();
- void joinOp( int startId, int finalId, FsmAp **others, int numOthers );
- void globOp( FsmAp **others, int numOthers );
- void deterministicEntry();
-
- /*
- * Operator workers
- */
-
- /* Determine if there are any entry points into a start state other than
- * the start state. */
- bool isStartStateIsolated();
-
- /* Make a new start state that has no entry points. Will not change the
- * identity of the fsm. */
- void isolateStartState();
-
- /* Workers for resolving epsilon transitions. */
- bool inEptVect( EptVect *eptVect, StateAp *targ );
- void epsilonFillEptVectFrom( StateAp *root, StateAp *from, bool parentLeaving );
- void resolveEpsilonTrans( MergeData &md );
-
- /* Workers for concatenation and union. */
- void doConcat( FsmAp *other, StateSet *fromStates, bool optional );
- void doOr( FsmAp *other );
-
- /*
- * Final states
- */
-
- /* Unset any final states that are no longer to be final
- * due to final bits. */
- void unsetIncompleteFinals();
- void unsetKilledFinals();
-
- /* Bring in other's entry points. Assumes others states are going to be
- * copied into this machine. */
- void copyInEntryPoints( FsmAp *other );
-
- /* Ordering states. */
- void depthFirstOrdering( StateAp *state );
- void depthFirstOrdering();
- void sortStatesByFinal();
-
- /* Set sqequential state numbers starting at 0. */
- void setStateNumbers( int base );
-
- /* Unset all final states. */
- void unsetAllFinStates();
-
- /* Set the bits of final states and clear the bits of non final states. */
- void setFinBits( int finStateBits );
-
- /*
- * Self-consistency checks.
- */
-
- /* Run a sanity check on the machine. */
- void verifyIntegrity();
-
- /* Verify that there are no unreachable states, or dead end states. */
- void verifyReachability();
- void verifyNoDeadEndStates();
-
- /*
- * Path pruning
- */
-
- /* Mark all states reachable from state. */
- void markReachableFromHereReverse( StateAp *state );
-
- /* Mark all states reachable from state. */
- void markReachableFromHere( StateAp *state );
- void markReachableFromHereStopFinal( StateAp *state );
-
- /* Removes states that cannot be reached by any path in the fsm and are
- * thus wasted silicon. */
- void removeDeadEndStates();
-
- /* Removes states that cannot be reached by any path in the fsm and are
- * thus wasted silicon. */
- void removeUnreachableStates();
-
- /* Remove error actions from states on which the error transition will
- * never be taken. */
- bool outListCovers( StateAp *state );
- bool anyErrorRange( StateAp *state );
-
- /* Remove states that are on the misfit list. */
- void removeMisfits();
-
- /*
- * FSM Minimization
- */
-
- /* Minimization by partitioning. */
- void minimizePartition1();
- void minimizePartition2();
-
- /* Minimize the final state Machine. The result is the minimal fsm. Slow
- * but stable, correct minimization. Uses n^2 space (lookout) and average
- * n^2 time. Worst case n^3 time, but a that is a very rare case. */
- void minimizeStable();
-
- /* Minimize the final state machine. Does not find the minimal fsm, but a
- * pretty good approximation. Does not use any extra space. Average n^2
- * time. Worst case n^3 time, but a that is a very rare case. */
- void minimizeApproximate();
-
- /* This is the worker for the minimize approximate solution. It merges
- * states that have identical out transitions. */
- bool minimizeRound( );
-
- /* Given an intial partioning of states, split partitions that have out trans
- * to differing partitions. */
- int partitionRound( StateAp **statePtrs, MinPartition *parts, int numParts );
-
- /* Split partitions that have a transition to a previously split partition, until
- * there are no more partitions to split. */
- int splitCandidates( StateAp **statePtrs, MinPartition *parts, int numParts );
-
- /* Fuse together states in the same partition. */
- void fusePartitions( MinPartition *parts, int numParts );
-
- /* Mark pairs where out final stateness differs, out trans data differs,
- * trans pairs go to a marked pair or trans data differs. Should get
- * alot of pairs. */
- void initialMarkRound( MarkIndex &markIndex );
-
- /* One marking round on all state pairs. Considers if trans pairs go
- * to a marked state only. Returns whether or not a pair was marked. */
- bool markRound( MarkIndex &markIndex );
-
- /* Move the in trans into src into dest. */
- void inTransMove(StateAp *dest, StateAp *src);
-
- /* Make state src and dest the same state. */
- void fuseEquivStates(StateAp *dest, StateAp *src);
-
- /* Find any states that didn't get marked by the marking algorithm and
- * merge them into the primary states of their equivalence class. */
- void fuseUnmarkedPairs( MarkIndex &markIndex );
-
- /* Merge neighboring transitions go to the same state and have the same
- * transitions data. */
- void compressTransitions();
-
- /* Returns true if there is a transtion (either explicit or by a gap) to
- * the error state. */
- bool checkErrTrans( StateAp *state, TransAp *trans );
- bool checkErrTransFinish( StateAp *state );
- bool hasErrorTrans();
-
- /* Check if a machine defines a single character. This is useful in
- * validating ranges and machines to export. */
- bool checkSingleCharMachine( );
-};
-
-
-#endif /* _FSMGRAPH_H */
diff --git a/contrib/tools/ragel5/ragel/fsmmin.cpp b/contrib/tools/ragel5/ragel/fsmmin.cpp
deleted file mode 100644
index 046d11afa6..0000000000
--- a/contrib/tools/ragel5/ragel/fsmmin.cpp
+++ /dev/null
@@ -1,732 +0,0 @@
-/*
- * Copyright 2002 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "fsmgraph.h"
-#include "mergesort.h"
-
-int FsmAp::partitionRound( StateAp **statePtrs, MinPartition *parts, int numParts )
-{
- /* Need a mergesort object and a single partition compare. */
- MergeSort<StateAp*, PartitionCompare> mergeSort;
- PartitionCompare partCompare;
-
- /* For each partition. */
- for ( int p = 0; p < numParts; p++ ) {
- /* Fill the pointer array with the states in the partition. */
- StateList::Iter state = parts[p].list;
- for ( int s = 0; state.lte(); state++, s++ )
- statePtrs[s] = state;
-
- /* Sort the states using the partitioning compare. */
- int numStates = parts[p].list.length();
- mergeSort.sort( statePtrs, numStates );
-
- /* Assign the states into partitions based on the results of the sort. */
- int destPart = p, firstNewPart = numParts;
- for ( int s = 1; s < numStates; s++ ) {
- /* If this state differs from the last then move to the next partition. */
- if ( partCompare.compare( statePtrs[s-1], statePtrs[s] ) < 0 ) {
- /* The new partition is the next avail spot. */
- destPart = numParts;
- numParts += 1;
- }
-
- /* If the state is not staying in the first partition, then
- * transfer it to its destination partition. */
- if ( destPart != p ) {
- StateAp *state = parts[p].list.detach( statePtrs[s] );
- parts[destPart].list.append( state );
- }
- }
-
- /* Fix the partition pointer for all the states that got moved to a new
- * partition. This must be done after the states are transfered so the
- * result of the sort is not altered. */
- for ( int newPart = firstNewPart; newPart < numParts; newPart++ ) {
- StateList::Iter state = parts[newPart].list;
- for ( ; state.lte(); state++ )
- state->alg.partition = &parts[newPart];
- }
- }
-
- return numParts;
-}
-
-/**
- * \brief Minimize by partitioning version 1.
- *
- * Repeatedly tries to split partitions until all partitions are unsplittable.
- * Produces the most minimal FSM possible.
- */
-void FsmAp::minimizePartition1()
-{
- /* Need one mergesort object and partition compares. */
- MergeSort<StateAp*, InitPartitionCompare> mergeSort;
- InitPartitionCompare initPartCompare;
-
- /* Nothing to do if there are no states. */
- if ( stateList.length() == 0 )
- return;
-
- /*
- * First thing is to partition the states by final state status and
- * transition functions. This gives us an initial partitioning to work
- * with.
- */
-
- /* Make a array of pointers to states. */
- int numStates = stateList.length();
- StateAp** statePtrs = new StateAp*[numStates];
-
- /* Fill up an array of pointers to the states for easy sorting. */
- StateList::Iter state = stateList;
- for ( int s = 0; state.lte(); state++, s++ )
- statePtrs[s] = state;
-
- /* Sort the states using the array of states. */
- mergeSort.sort( statePtrs, numStates );
-
- /* An array of lists of states is used to partition the states. */
- MinPartition *parts = new MinPartition[numStates];
-
- /* Assign the states into partitions. */
- int destPart = 0;
- for ( int s = 0; s < numStates; s++ ) {
- /* If this state differs from the last then move to the next partition. */
- if ( s > 0 && initPartCompare.compare( statePtrs[s-1], statePtrs[s] ) < 0 ) {
- /* Move to the next partition. */
- destPart += 1;
- }
-
- /* Put the state into its partition. */
- statePtrs[s]->alg.partition = &parts[destPart];
- parts[destPart].list.append( statePtrs[s] );
- }
-
- /* We just moved all the states from the main list into partitions without
- * taking them off the main list. So clean up the main list now. */
- stateList.abandon();
-
- /* Split partitions. */
- int numParts = destPart + 1;
- while ( true ) {
- /* Test all partitions for splitting. */
- int newNum = partitionRound( statePtrs, parts, numParts );
-
- /* When no partitions can be split, stop. */
- if ( newNum == numParts )
- break;
-
- numParts = newNum;
- }
-
- /* Fuse states in the same partition. The states will end up back on the
- * main list. */
- fusePartitions( parts, numParts );
-
- /* Cleanup. */
- delete[] statePtrs;
- delete[] parts;
-}
-
-/* Split partitions that need splittting, decide which partitions might need
- * to be split as a result, continue until there are no more that might need
- * to be split. */
-int FsmAp::splitCandidates( StateAp **statePtrs, MinPartition *parts, int numParts )
-{
- /* Need a mergesort and a partition compare. */
- MergeSort<StateAp*, PartitionCompare> mergeSort;
- PartitionCompare partCompare;
-
- /* The lists of unsplitable (partList) and splitable partitions.
- * Only partitions in the splitable list are check for needing splitting. */
- PartitionList partList, splittable;
-
- /* Initially, all partitions are born from a split (the initial
- * partitioning) and can cause other partitions to be split. So any
- * partition with a state with a transition out to another partition is a
- * candidate for splitting. This will make every partition except possibly
- * partitions of final states split candidates. */
- for ( int p = 0; p < numParts; p++ ) {
- /* Assume not active. */
- parts[p].active = false;
-
- /* Look for a trans out of any state in the partition. */
- for ( StateList::Iter state = parts[p].list; state.lte(); state++ ) {
- /* If there is at least one transition out to another state then
- * the partition becomes splittable. */
- if ( state->outList.length() > 0 ) {
- parts[p].active = true;
- break;
- }
- }
-
- /* If it was found active then it goes on the splittable list. */
- if ( parts[p].active )
- splittable.append( &parts[p] );
- else
- partList.append( &parts[p] );
- }
-
- /* While there are partitions that are splittable, pull one off and try
- * to split it. If it splits, determine which partitions may now be split
- * as a result of the newly split partition. */
- while ( splittable.length() > 0 ) {
- MinPartition *partition = splittable.detachFirst();
-
- /* Fill the pointer array with the states in the partition. */
- StateList::Iter state = partition->list;
- for ( int s = 0; state.lte(); state++, s++ )
- statePtrs[s] = state;
-
- /* Sort the states using the partitioning compare. */
- int numStates = partition->list.length();
- mergeSort.sort( statePtrs, numStates );
-
- /* Assign the states into partitions based on the results of the sort. */
- MinPartition *destPart = partition;
- int firstNewPart = numParts;
- for ( int s = 1; s < numStates; s++ ) {
- /* If this state differs from the last then move to the next partition. */
- if ( partCompare.compare( statePtrs[s-1], statePtrs[s] ) < 0 ) {
- /* The new partition is the next avail spot. */
- destPart = &parts[numParts];
- numParts += 1;
- }
-
- /* If the state is not staying in the first partition, then
- * transfer it to its destination partition. */
- if ( destPart != partition ) {
- StateAp *state = partition->list.detach( statePtrs[s] );
- destPart->list.append( state );
- }
- }
-
- /* Fix the partition pointer for all the states that got moved to a new
- * partition. This must be done after the states are transfered so the
- * result of the sort is not altered. */
- int newPart;
- for ( newPart = firstNewPart; newPart < numParts; newPart++ ) {
- StateList::Iter state = parts[newPart].list;
- for ( ; state.lte(); state++ )
- state->alg.partition = &parts[newPart];
- }
-
- /* Put the partition we just split and any new partitions that came out
- * of the split onto the inactive list. */
- partition->active = false;
- partList.append( partition );
- for ( newPart = firstNewPart; newPart < numParts; newPart++ ) {
- parts[newPart].active = false;
- partList.append( &parts[newPart] );
- }
-
- if ( destPart == partition )
- continue;
-
- /* Now determine which partitions are splittable as a result of
- * splitting partition by walking the in lists of the states in
- * partitions that got split. Partition is the faked first item in the
- * loop. */
- MinPartition *causalPart = partition;
- newPart = firstNewPart - 1;
- while ( newPart < numParts ) {
- /* Loop all states in the causal partition. */
- StateList::Iter state = causalPart->list;
- for ( ; state.lte(); state++ ) {
- /* Walk all transition into the state and put the partition
- * that the from state is in onto the splittable list. */
- for ( TransInList::Iter trans = state->inList; trans.lte(); trans++ ) {
- MinPartition *fromPart = trans->fromState->alg.partition;
- if ( ! fromPart->active ) {
- fromPart->active = true;
- partList.detach( fromPart );
- splittable.append( fromPart );
- }
- }
- }
-
- newPart += 1;
- causalPart = &parts[newPart];
- }
- }
- return numParts;
-}
-
-
-/**
- * \brief Minimize by partitioning version 2 (best alg).
- *
- * Repeatedly tries to split partitions that may splittable until there are no
- * more partitions that might possibly need splitting. Runs faster than
- * version 1. Produces the most minimal fsm possible.
- */
-void FsmAp::minimizePartition2()
-{
- /* Need a mergesort and an initial partition compare. */
- MergeSort<StateAp*, InitPartitionCompare> mergeSort;
- InitPartitionCompare initPartCompare;
-
- /* Nothing to do if there are no states. */
- if ( stateList.length() == 0 )
- return;
-
- /*
- * First thing is to partition the states by final state status and
- * transition functions. This gives us an initial partitioning to work
- * with.
- */
-
- /* Make a array of pointers to states. */
- int numStates = stateList.length();
- StateAp** statePtrs = new StateAp*[numStates];
-
- /* Fill up an array of pointers to the states for easy sorting. */
- StateList::Iter state = stateList;
- for ( int s = 0; state.lte(); state++, s++ )
- statePtrs[s] = state;
-
- /* Sort the states using the array of states. */
- mergeSort.sort( statePtrs, numStates );
-
- /* An array of lists of states is used to partition the states. */
- MinPartition *parts = new MinPartition[numStates];
-
- /* Assign the states into partitions. */
- int destPart = 0;
- for ( int s = 0; s < numStates; s++ ) {
- /* If this state differs from the last then move to the next partition. */
- if ( s > 0 && initPartCompare.compare( statePtrs[s-1], statePtrs[s] ) < 0 ) {
- /* Move to the next partition. */
- destPart += 1;
- }
-
- /* Put the state into its partition. */
- statePtrs[s]->alg.partition = &parts[destPart];
- parts[destPart].list.append( statePtrs[s] );
- }
-
- /* We just moved all the states from the main list into partitions without
- * taking them off the main list. So clean up the main list now. */
- stateList.abandon();
-
- /* Split partitions. */
- int numParts = splitCandidates( statePtrs, parts, destPart+1 );
-
- /* Fuse states in the same partition. The states will end up back on the
- * main list. */
- fusePartitions( parts, numParts );
-
- /* Cleanup. */
- delete[] statePtrs;
- delete[] parts;
-}
-
-void FsmAp::initialMarkRound( MarkIndex &markIndex )
-{
- /* P and q for walking pairs. */
- StateAp *p = stateList.head, *q;
-
- /* Need an initial partition compare. */
- InitPartitionCompare initPartCompare;
-
- /* Walk all unordered pairs of (p, q) where p != q.
- * The second depth of the walk stops before reaching p. This
- * gives us all unordered pairs of states (p, q) where p != q. */
- while ( p != 0 ) {
- q = stateList.head;
- while ( q != p ) {
- /* If the states differ on final state status, out transitions or
- * any transition data then they should be separated on the initial
- * round. */
- if ( initPartCompare.compare( p, q ) != 0 )
- markIndex.markPair( p->alg.stateNum, q->alg.stateNum );
-
- q = q->next;
- }
- p = p->next;
- }
-}
-
-bool FsmAp::markRound( MarkIndex &markIndex )
-{
- /* P an q for walking pairs. Take note if any pair gets marked. */
- StateAp *p = stateList.head, *q;
- bool pairWasMarked = false;
-
- /* Need a mark comparison. */
- MarkCompare markCompare;
-
- /* Walk all unordered pairs of (p, q) where p != q.
- * The second depth of the walk stops before reaching p. This
- * gives us all unordered pairs of states (p, q) where p != q. */
- while ( p != 0 ) {
- q = stateList.head;
- while ( q != p ) {
- /* Should we mark the pair? */
- if ( !markIndex.isPairMarked( p->alg.stateNum, q->alg.stateNum ) ) {
- if ( markCompare.shouldMark( markIndex, p, q ) ) {
- markIndex.markPair( p->alg.stateNum, q->alg.stateNum );
- pairWasMarked = true;
- }
- }
- q = q->next;
- }
- p = p->next;
- }
-
- return pairWasMarked;
-}
-
-
-/**
- * \brief Minimize by pair marking.
- *
- * Decides if each pair of states is distinct or not. Uses O(n^2) memory and
- * should only be used on small graphs. Produces the most minmimal FSM
- * possible.
- */
-void FsmAp::minimizeStable()
-{
- /* Set the state numbers. */
- setStateNumbers( 0 );
-
- /* This keeps track of which pairs have been marked. */
- MarkIndex markIndex( stateList.length() );
-
- /* Mark pairs where final stateness, out trans, or trans data differ. */
- initialMarkRound( markIndex );
-
- /* While the last round of marking succeeded in marking a state
- * continue to do another round. */
- int modified = markRound( markIndex );
- while (modified)
- modified = markRound( markIndex );
-
- /* Merge pairs that are unmarked. */
- fuseUnmarkedPairs( markIndex );
-}
-
-bool FsmAp::minimizeRound()
-{
- /* Nothing to do if there are no states. */
- if ( stateList.length() == 0 )
- return false;
-
- /* Need a mergesort on approx compare and an approx compare. */
- MergeSort<StateAp*, ApproxCompare> mergeSort;
- ApproxCompare approxCompare;
-
- /* Fill up an array of pointers to the states. */
- StateAp **statePtrs = new StateAp*[stateList.length()];
- StateList::Iter state = stateList;
- for ( int s = 0; state.lte(); state++, s++ )
- statePtrs[s] = state;
-
- bool modified = false;
-
- /* Sort The list. */
- mergeSort.sort( statePtrs, stateList.length() );
-
- /* Walk the list looking for duplicates next to each other,
- * merge in any duplicates. */
- StateAp **pLast = statePtrs;
- StateAp **pState = statePtrs + 1;
- for ( int i = 1; i < stateList.length(); i++, pState++ ) {
- if ( approxCompare.compare( *pLast, *pState ) == 0 ) {
- /* Last and pState are the same, so fuse together. Move forward
- * with pState but not with pLast. If any more are identical, we
- * must */
- fuseEquivStates( *pLast, *pState );
- modified = true;
- }
- else {
- /* Last and this are different, do not set to merge them. Move
- * pLast to the current (it may be way behind from merging many
- * states) and pState forward one to consider the next pair. */
- pLast = pState;
- }
- }
- delete[] statePtrs;
- return modified;
-}
-
-/**
- * \brief Minmimize by an approximation.
- *
- * Repeatedly tries to find states with transitions out to the same set of
- * states on the same set of keys until no more identical states can be found.
- * Does not produce the most minimial FSM possible.
- */
-void FsmAp::minimizeApproximate()
-{
- /* While the last minimization round succeeded in compacting states,
- * continue to try to compact states. */
- while ( true ) {
- bool modified = minimizeRound();
- if ( ! modified )
- break;
- }
-}
-
-
-/* Remove states that have no path to them from the start state. Recursively
- * traverses the graph marking states that have paths into them. Then removes
- * all states that did not get marked. */
-void FsmAp::removeUnreachableStates()
-{
- /* Misfit accounting should be off and there should be no states on the
- * misfit list. */
- assert( !misfitAccounting && misfitList.length() == 0 );
-
- /* Mark all the states that can be reached
- * through the existing set of entry points. */
- markReachableFromHere( startState );
- for ( EntryMap::Iter en = entryPoints; en.lte(); en++ )
- markReachableFromHere( en->value );
-
- /* Delete all states that are not marked
- * and unmark the ones that are marked. */
- StateAp *state = stateList.head;
- while ( state ) {
- StateAp *next = state->next;
-
- if ( state->stateBits & SB_ISMARKED )
- state->stateBits &= ~ SB_ISMARKED;
- else {
- detachState( state );
- stateList.detach( state );
- delete state;
- }
-
- state = next;
- }
-}
-
-bool FsmAp::outListCovers( StateAp *state )
-{
- /* Must be at least one range to cover. */
- if ( state->outList.length() == 0 )
- return false;
-
- /* The first must start at the lower bound. */
- TransList::Iter trans = state->outList.first();
- if ( keyOps->minKey < trans->lowKey )
- return false;
-
- /* Loop starts at second el. */
- trans.increment();
-
- /* Loop checks lower against prev upper. */
- for ( ; trans.lte(); trans++ ) {
- /* Lower end of the trans must be one greater than the
- * previous' high end. */
- Key lowKey = trans->lowKey;
- lowKey.decrement();
- if ( trans->prev->highKey < lowKey )
- return false;
- }
-
- /* Require that the last range extends to the upper bound. */
- trans = state->outList.last();
- if ( trans->highKey < keyOps->maxKey )
- return false;
-
- return true;
-}
-
-/* Remove states that that do not lead to a final states. Works recursivly traversing
- * the graph in reverse (starting from all final states) and marking seen states. Then
- * removes states that did not get marked. */
-void FsmAp::removeDeadEndStates()
-{
- /* Misfit accounting should be off and there should be no states on the
- * misfit list. */
- assert( !misfitAccounting && misfitList.length() == 0 );
-
- /* Mark all states that have paths to the final states. */
- StateAp **st = finStateSet.data;
- int nst = finStateSet.length();
- for ( int i = 0; i < nst; i++, st++ )
- markReachableFromHereReverse( *st );
-
- /* Start state gets honorary marking. If the machine accepts nothing we
- * still want the start state to hang around. This must be done after the
- * recursive call on all the final states so that it does not cause the
- * start state in transitions to be skipped when the start state is
- * visited by the traversal. */
- startState->stateBits |= SB_ISMARKED;
-
- /* Delete all states that are not marked
- * and unmark the ones that are marked. */
- StateAp *state = stateList.head;
- while ( state != 0 ) {
- StateAp *next = state->next;
-
- if ( state->stateBits & SB_ISMARKED )
- state->stateBits &= ~ SB_ISMARKED;
- else {
- detachState( state );
- stateList.detach( state );
- delete state;
- }
-
- state = next;
- }
-}
-
-/* Remove states on the misfit list. To work properly misfit accounting should
- * be on when this is called. The detaching of a state will likely cause
- * another misfit to be collected and it can then be removed. */
-void FsmAp::removeMisfits()
-{
- while ( misfitList.length() > 0 ) {
- /* Get the first state. */
- StateAp *state = misfitList.head;
-
- /* Detach and delete. */
- detachState( state );
-
- /* The state was previously on the misfit list and detaching can only
- * remove in transitions so the state must still be on the misfit
- * list. */
- misfitList.detach( state );
- delete state;
- }
-}
-
-/* Fuse src into dest because they have been deemed equivalent states.
- * Involves moving transitions into src to go into dest and invoking
- * callbacks. Src is deleted detached from the graph and deleted. */
-void FsmAp::fuseEquivStates( StateAp *dest, StateAp *src )
-{
- /* This would get ugly. */
- assert( dest != src );
-
- /* Cur is a duplicate. We can merge it with trail. */
- inTransMove( dest, src );
-
- detachState( src );
- stateList.detach( src );
- delete src;
-}
-
-void FsmAp::fuseUnmarkedPairs( MarkIndex &markIndex )
-{
- StateAp *p = stateList.head, *nextP, *q;
-
- /* Definition: The primary state of an equivalence class is the first state
- * encounterd that belongs to the equivalence class. All equivalence
- * classes have primary state including equivalence classes with one state
- * in it. */
-
- /* For each unmarked pair merge p into q and delete p. q is always the
- * primary state of it's equivalence class. We wouldn't have landed on it
- * here if it were not, because it would have been deleted.
- *
- * Proof that q is the primaray state of it's equivalence class: Assume q
- * is not the primary state of it's equivalence class, then it would be
- * merged into some state that came before it and thus p would be
- * equivalent to that state. But q is the first state that p is equivalent
- * to so we have a contradiction. */
-
- /* Walk all unordered pairs of (p, q) where p != q.
- * The second depth of the walk stops before reaching p. This
- * gives us all unordered pairs of states (p, q) where p != q. */
- while ( p != 0 ) {
- nextP = p->next;
-
- q = stateList.head;
- while ( q != p ) {
- /* If one of p or q is a final state then mark. */
- if ( ! markIndex.isPairMarked( p->alg.stateNum, q->alg.stateNum ) ) {
- fuseEquivStates( q, p );
- break;
- }
- q = q->next;
- }
- p = nextP;
- }
-}
-
-void FsmAp::fusePartitions( MinPartition *parts, int numParts )
-{
- /* For each partition, fuse state 2, 3, ... into state 1. */
- for ( int p = 0; p < numParts; p++ ) {
- /* Assume that there will always be at least one state. */
- StateAp *first = parts[p].list.head, *toFuse = first->next;
-
- /* Put the first state back onto the main state list. Don't bother
- * removing it from the partition list first. */
- stateList.append( first );
-
- /* Fuse the rest of the state into the first. */
- while ( toFuse != 0 ) {
- /* Save the next. We will trash it before it is needed. */
- StateAp *next = toFuse->next;
-
- /* Put the state to be fused in to the first back onto the main
- * list before it is fuse. the graph. The state needs to be on
- * the main list for the detach from the graph to work. Don't
- * bother removing the state from the partition list first. We
- * need not maintain it. */
- stateList.append( toFuse );
-
- /* Now fuse to the first. */
- fuseEquivStates( first, toFuse );
-
- /* Go to the next that we saved before trashing the next pointer. */
- toFuse = next;
- }
-
- /* We transfered the states from the partition list into the main list without
- * removing the states from the partition list first. Clean it up. */
- parts[p].list.abandon();
- }
-}
-
-
-/* Merge neighboring transitions go to the same state and have the same
- * transitions data. */
-void FsmAp::compressTransitions()
-{
- for ( StateList::Iter st = stateList; st.lte(); st++ ) {
- if ( st->outList.length() > 1 ) {
- for ( TransList::Iter trans = st->outList, next = trans.next(); next.lte(); ) {
- Key nextLow = next->lowKey;
- nextLow.decrement();
- if ( trans->highKey == nextLow && trans->toState == next->toState &&
- CmpActionTable::compare( trans->actionTable, next->actionTable ) == 0 )
- {
- trans->highKey = next->highKey;
- st->outList.detach( next );
- detachTrans( next->fromState, next->toState, next );
- delete next;
- next = trans.next();
- }
- else {
- trans.increment();
- next.increment();
- }
- }
- }
- }
-}
diff --git a/contrib/tools/ragel5/ragel/fsmstate.cpp b/contrib/tools/ragel5/ragel/fsmstate.cpp
deleted file mode 100644
index 4322c1060f..0000000000
--- a/contrib/tools/ragel5/ragel/fsmstate.cpp
+++ /dev/null
@@ -1,463 +0,0 @@
-/*
- * Copyright 2002 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <string.h>
-#include <assert.h>
-#include "fsmgraph.h"
-
-#include <iostream>
-using namespace std;
-
-/* Construct a mark index for a specified number of states. Must new up
- * an array that is states^2 in size. */
-MarkIndex::MarkIndex( int states ) : numStates(states)
-{
- /* Total pairs is states^2. Actually only use half of these, but we allocate
- * them all to make indexing into the array easier. */
- int total = states * states;
-
- /* New up chars so that individual DListEl constructors are
- * not called. Zero out the mem manually. */
- array = new bool[total];
- memset( array, 0, sizeof(bool) * total );
-}
-
-/* Free the array used to store state pairs. */
-MarkIndex::~MarkIndex()
-{
- delete[] array;
-}
-
-/* Mark a pair of states. States are specified by their number. The
- * marked states are moved from the unmarked list to the marked list. */
-void MarkIndex::markPair(int state1, int state2)
-{
- int pos = ( state1 >= state2 ) ?
- ( state1 * numStates ) + state2 :
- ( state2 * numStates ) + state1;
-
- array[pos] = true;
-}
-
-/* Returns true if the pair of states are marked. Returns false otherwise.
- * Ordering of states given does not matter. */
-bool MarkIndex::isPairMarked(int state1, int state2)
-{
- int pos = ( state1 >= state2 ) ?
- ( state1 * numStates ) + state2 :
- ( state2 * numStates ) + state1;
-
- return array[pos];
-}
-
-/* Create a new fsm state. State has not out transitions or in transitions, not
- * out out transition data and not number. */
-StateAp::StateAp()
-:
- /* No out or in transitions. */
- outList(),
- inList(),
-
- /* No entry points, or epsilon trans. */
- entryIds(),
- epsilonTrans(),
-
- /* Conditions. */
- stateCondList(),
-
- /* No transitions in from other states. */
- foreignInTrans(0),
-
- /* Only used during merging. Normally null. */
- stateDictEl(0),
- eptVect(0),
-
- /* No state identification bits. */
- stateBits(0),
-
- /* No Priority data. */
- outPriorTable(),
-
- /* No Action data. */
- toStateActionTable(),
- fromStateActionTable(),
- outActionTable(),
- outCondSet(),
- errActionTable(),
- eofActionTable()
-{
-}
-
-/* Copy everything except actual the transitions. That is left up to the
- * FsmAp copy constructor. */
-StateAp::StateAp(const StateAp &other)
-:
- /* All lists are cleared. They will be filled in when the
- * individual transitions are duplicated and attached. */
- outList(),
- inList(),
-
- /* Duplicate the entry id set and epsilon transitions. These
- * are sets of integers and as such need no fixing. */
- entryIds(other.entryIds),
- epsilonTrans(other.epsilonTrans),
-
- /* Copy in the elements of the conditions. */
- stateCondList( other.stateCondList ),
-
- /* No transitions in from other states. */
- foreignInTrans(0),
-
- /* This is only used during merging. Normally null. */
- stateDictEl(0),
- eptVect(0),
-
- /* Fsm state data. */
- stateBits(other.stateBits),
-
- /* Copy in priority data. */
- outPriorTable(other.outPriorTable),
-
- /* Copy in action data. */
- toStateActionTable(other.toStateActionTable),
- fromStateActionTable(other.fromStateActionTable),
- outActionTable(other.outActionTable),
- outCondSet(other.outCondSet),
- errActionTable(other.errActionTable),
- eofActionTable(other.eofActionTable)
-{
- /* Duplicate all the transitions. */
- for ( TransList::Iter trans = other.outList; trans.lte(); trans++ ) {
- /* Dupicate and store the orginal target in the transition. This will
- * be corrected once all the states have been created. */
- TransAp *newTrans = new TransAp(*trans);
- newTrans->toState = trans->toState;
- outList.append( newTrans );
- }
-}
-
-/* If there is a state dict element, then delete it. Everything else is left
- * up to the FsmGraph destructor. */
-StateAp::~StateAp()
-{
- if ( stateDictEl != 0 )
- delete stateDictEl;
-}
-
-/* Compare two states using pointers to the states. With the approximate
- * compare the idea is that if the compare finds them the same, they can
- * immediately be merged. */
-int ApproxCompare::compare( const StateAp *state1 , const StateAp *state2 )
-{
- int compareRes;
-
- /* Test final state status. */
- if ( (state1->stateBits & SB_ISFINAL) && !(state2->stateBits & SB_ISFINAL) )
- return -1;
- else if ( !(state1->stateBits & SB_ISFINAL) && (state2->stateBits & SB_ISFINAL) )
- return 1;
-
- /* Test epsilon transition sets. */
- compareRes = CmpEpsilonTrans::compare( state1->epsilonTrans,
- state2->epsilonTrans );
- if ( compareRes != 0 )
- return compareRes;
-
- /* Compare the out transitions. */
- compareRes = FsmAp::compareStateData( state1, state2 );
- if ( compareRes != 0 )
- return compareRes;
-
- /* Use a pair iterator to get the transition pairs. */
- PairIter<TransAp> outPair( state1->outList.head, state2->outList.head );
- for ( ; !outPair.end(); outPair++ ) {
- switch ( outPair.userState ) {
-
- case RangeInS1:
- compareRes = FsmAp::compareFullPtr( outPair.s1Tel.trans, 0 );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case RangeInS2:
- compareRes = FsmAp::compareFullPtr( 0, outPair.s2Tel.trans );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case RangeOverlap:
- compareRes = FsmAp::compareFullPtr(
- outPair.s1Tel.trans, outPair.s2Tel.trans );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case BreakS1:
- case BreakS2:
- break;
- }
- }
-
- /* Got through the entire state comparison, deem them equal. */
- return 0;
-}
-
-/* Compare class for the sort that does the intial partition of compaction. */
-int InitPartitionCompare::compare( const StateAp *state1 , const StateAp *state2 )
-{
- int compareRes;
-
- /* Test final state status. */
- if ( (state1->stateBits & SB_ISFINAL) && !(state2->stateBits & SB_ISFINAL) )
- return -1;
- else if ( !(state1->stateBits & SB_ISFINAL) && (state2->stateBits & SB_ISFINAL) )
- return 1;
-
- /* Test epsilon transition sets. */
- compareRes = CmpEpsilonTrans::compare( state1->epsilonTrans,
- state2->epsilonTrans );
- if ( compareRes != 0 )
- return compareRes;
-
- /* Compare the out transitions. */
- compareRes = FsmAp::compareStateData( state1, state2 );
- if ( compareRes != 0 )
- return compareRes;
-
- /* Use a pair iterator to test the condition pairs. */
- PairIter<StateCond> condPair( state1->stateCondList.head, state2->stateCondList.head );
- for ( ; !condPair.end(); condPair++ ) {
- switch ( condPair.userState ) {
- case RangeInS1:
- return 1;
- case RangeInS2:
- return -1;
-
- case RangeOverlap: {
- CondSpace *condSpace1 = condPair.s1Tel.trans->condSpace;
- CondSpace *condSpace2 = condPair.s2Tel.trans->condSpace;
- if ( condSpace1 < condSpace2 )
- return -1;
- else if ( condSpace1 > condSpace2 )
- return 1;
- break;
- }
- case BreakS1:
- case BreakS2:
- break;
- }
- }
-
- /* Use a pair iterator to test the transition pairs. */
- PairIter<TransAp> outPair( state1->outList.head, state2->outList.head );
- for ( ; !outPair.end(); outPair++ ) {
- switch ( outPair.userState ) {
-
- case RangeInS1:
- compareRes = FsmAp::compareDataPtr( outPair.s1Tel.trans, 0 );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case RangeInS2:
- compareRes = FsmAp::compareDataPtr( 0, outPair.s2Tel.trans );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case RangeOverlap:
- compareRes = FsmAp::compareDataPtr(
- outPair.s1Tel.trans, outPair.s2Tel.trans );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case BreakS1:
- case BreakS2:
- break;
- }
- }
-
- return 0;
-}
-
-/* Compare class for the sort that does the partitioning. */
-int PartitionCompare::compare( const StateAp *state1, const StateAp *state2 )
-{
- int compareRes;
-
- /* Use a pair iterator to get the transition pairs. */
- PairIter<TransAp> outPair( state1->outList.head, state2->outList.head );
- for ( ; !outPair.end(); outPair++ ) {
- switch ( outPair.userState ) {
-
- case RangeInS1:
- compareRes = FsmAp::comparePartPtr( outPair.s1Tel.trans, 0 );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case RangeInS2:
- compareRes = FsmAp::comparePartPtr( 0, outPair.s2Tel.trans );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case RangeOverlap:
- compareRes = FsmAp::comparePartPtr(
- outPair.s1Tel.trans, outPair.s2Tel.trans );
- if ( compareRes != 0 )
- return compareRes;
- break;
-
- case BreakS1:
- case BreakS2:
- break;
- }
- }
-
- return 0;
-}
-
-/* Compare class for the sort that does the partitioning. */
-bool MarkCompare::shouldMark( MarkIndex &markIndex, const StateAp *state1,
- const StateAp *state2 )
-{
- /* Use a pair iterator to get the transition pairs. */
- PairIter<TransAp> outPair( state1->outList.head, state2->outList.head );
- for ( ; !outPair.end(); outPair++ ) {
- switch ( outPair.userState ) {
-
- case RangeInS1:
- if ( FsmAp::shouldMarkPtr( markIndex, outPair.s1Tel.trans, 0 ) )
- return true;
- break;
-
- case RangeInS2:
- if ( FsmAp::shouldMarkPtr( markIndex, 0, outPair.s2Tel.trans ) )
- return true;
- break;
-
- case RangeOverlap:
- if ( FsmAp::shouldMarkPtr( markIndex,
- outPair.s1Tel.trans, outPair.s2Tel.trans ) )
- return true;
- break;
-
- case BreakS1:
- case BreakS2:
- break;
- }
- }
-
- return false;
-}
-
-/*
- * Transition Comparison.
- */
-
-/* Compare target partitions. Either pointer may be null. */
-int FsmAp::comparePartPtr( TransAp *trans1, TransAp *trans2 )
-{
- if ( trans1 != 0 ) {
- /* If trans1 is set then so should trans2. The initial partitioning
- * guarantees this for us. */
- if ( trans1->toState == 0 && trans2->toState != 0 )
- return -1;
- else if ( trans1->toState != 0 && trans2->toState == 0 )
- return 1;
- else if ( trans1->toState != 0 ) {
- /* Both of targets are set. */
- return CmpOrd< MinPartition* >::compare(
- trans1->toState->alg.partition, trans2->toState->alg.partition );
- }
- }
- return 0;
-}
-
-
-/* Compares two transition pointers according to priority and functions.
- * Either pointer may be null. Does not consider to state or from state. */
-int FsmAp::compareDataPtr( TransAp *trans1, TransAp *trans2 )
-{
- if ( trans1 == 0 && trans2 != 0 )
- return -1;
- else if ( trans1 != 0 && trans2 == 0 )
- return 1;
- else if ( trans1 != 0 ) {
- /* Both of the transition pointers are set. */
- int compareRes = compareTransData( trans1, trans2 );
- if ( compareRes != 0 )
- return compareRes;
- }
- return 0;
-}
-
-/* Compares two transitions according to target state, priority and functions.
- * Does not consider from state. Either of the pointers may be null. */
-int FsmAp::compareFullPtr( TransAp *trans1, TransAp *trans2 )
-{
- if ( (trans1 != 0) ^ (trans2 != 0) ) {
- /* Exactly one of the transitions is set. */
- if ( trans1 != 0 )
- return -1;
- else
- return 1;
- }
- else if ( trans1 != 0 ) {
- /* Both of the transition pointers are set. Test target state,
- * priority and funcs. */
- if ( trans1->toState < trans2->toState )
- return -1;
- else if ( trans1->toState > trans2->toState )
- return 1;
- else if ( trans1->toState != 0 ) {
- /* Test transition data. */
- int compareRes = compareTransData( trans1, trans2 );
- if ( compareRes != 0 )
- return compareRes;
- }
- }
- return 0;
-}
-
-
-bool FsmAp::shouldMarkPtr( MarkIndex &markIndex, TransAp *trans1,
- TransAp *trans2 )
-{
- if ( (trans1 != 0) ^ (trans2 != 0) ) {
- /* Exactly one of the transitions is set. The initial mark round
- * should rule out this case. */
- assert( false );
- }
- else if ( trans1 != 0 ) {
- /* Both of the transitions are set. If the target pair is marked, then
- * the pair we are considering gets marked. */
- return markIndex.isPairMarked( trans1->toState->alg.stateNum,
- trans2->toState->alg.stateNum );
- }
-
- /* Neither of the transitiosn are set. */
- return false;
-}
-
-
diff --git a/contrib/tools/ragel5/ragel/main.cpp b/contrib/tools/ragel5/ragel/main.cpp
deleted file mode 100644
index a22a34f1b0..0000000000
--- a/contrib/tools/ragel5/ragel/main.cpp
+++ /dev/null
@@ -1,355 +0,0 @@
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <iostream>
-#include <fstream>
-#ifndef _WIN32
-# include <unistd.h>
-#endif
-#include <sstream>
-
-/* Parsing. */
-#include "ragel.h"
-#include "rlscan.h"
-
-/* Parameters and output. */
-#include "pcheck.h"
-#include "vector.h"
-#include "version.h"
-#include "common.h"
-
-#ifdef _MSC_VER
-# define strncasecmp _strnicmp
-# define strcasecmp _stricmp
-#endif
-
-using std::istream;
-using std::ostream;
-using std::ifstream;
-using std::ofstream;
-using std::cin;
-using std::cout;
-using std::cerr;
-using std::endl;
-
-/* Controls minimization. */
-MinimizeLevel minimizeLevel = MinimizePartition2;
-MinimizeOpt minimizeOpt = MinimizeMostOps;
-
-/* Graphviz dot file generation. */
-char *machineSpec = 0, *machineName = 0;
-bool machineSpecFound = false;
-
-bool printStatistics = false;
-
-/* Print a summary of the options. */
-void usage()
-{
- cout <<
-"usage: ragel [options] file\n"
-"general:\n"
-" -h, -H, -?, --help Print this usage and exit\n"
-" -v, --version Print version information and exit\n"
-" -o <file> Write output to <file>\n"
-" -s Print some statistics on stderr\n"
-"fsm minimization:\n"
-" -n Do not perform minimization\n"
-" -m Minimize at the end of the compilation\n"
-" -l Minimize after most operations (default)\n"
-" -e Minimize after every operation\n"
-"machine selection:\n"
-" -S <spec> FSM specification to output for -V\n"
-" -M <machine> Machine definition/instantiation to output for -V\n"
-"host language:\n"
-" -C The host language is C, C++, Obj-C or Obj-C++ (default)\n"
-" -D The host language is D\n"
-" -J The host language is Java\n"
-" -R The host language is Ruby\n"
- ;
-}
-
-/* Print version information. */
-void version()
-{
- cout << "Ragel State Machine Compiler version " VERSION << " " PUBDATE << endl <<
- "Copyright (c) 2001-2006 by Adrian Thurston" << endl;
-}
-
-/* Total error count. */
-int gblErrorCount = 0;
-
-/* Print the opening to a warning in the input, then return the error ostream. */
-ostream &warning( const InputLoc &loc )
-{
- assert( loc.fileName != 0 );
- cerr << loc.fileName << ":" << loc.line << ":" <<
- loc.col << ": warning: ";
- return cerr;
-}
-
-/* Print the opening to a program error, then return the error stream. */
-ostream &error()
-{
- gblErrorCount += 1;
- cerr << PROGNAME ": ";
- return cerr;
-}
-
-ostream &error( const InputLoc &loc )
-{
- gblErrorCount += 1;
- assert( loc.fileName != 0 );
- cerr << loc.fileName << ":" << loc.line << ": ";
- return cerr;
-}
-
-void escapeLineDirectivePath( std::ostream &out, char *path )
-{
- for ( char *pc = path; *pc != 0; pc++ ) {
- if ( *pc == '\\' )
- out << "\\\\";
- else
- out << *pc;
- }
-}
-
-/* Main, process args and call yyparse to start scanning input. */
-int main(int argc, char **argv)
-{
- ParamCheck pc("o:nmleabjkS:M:CDJRvHh?-:s", argc, argv);
- char *inputFileName = 0;
- char inputFileNameArr[] = "<stdin>";
- char *outputFileName = 0;
-
- while ( pc.check() ) {
- switch ( pc.state ) {
- case ParamCheck::match:
- switch ( pc.parameter ) {
- /* Output. */
- case 'o':
- if ( *pc.parameterArg == 0 )
- error() << "a zero length output file name was given" << endl;
- else if ( outputFileName != 0 )
- error() << "more than one output file name was given" << endl;
- else {
- /* Ok, remember the output file name. */
- outputFileName = pc.parameterArg;
- }
- break;
-
- /* Minimization, mostly hidden options. */
- case 'n':
- minimizeOpt = MinimizeNone;
- break;
- case 'm':
- minimizeOpt = MinimizeEnd;
- break;
- case 'l':
- minimizeOpt = MinimizeMostOps;
- break;
- case 'e':
- minimizeOpt = MinimizeEveryOp;
- break;
- case 'a':
- minimizeLevel = MinimizeApprox;
- break;
- case 'b':
- minimizeLevel = MinimizeStable;
- break;
- case 'j':
- minimizeLevel = MinimizePartition1;
- break;
- case 'k':
- minimizeLevel = MinimizePartition2;
- break;
-
- /* Machine spec. */
- case 'S':
- if ( *pc.parameterArg == 0 )
- error() << "please specify an argument to -S" << endl;
- else if ( machineSpec != 0 )
- error() << "more than one -S argument was given" << endl;
- else {
- /* Ok, remember the path to the machine to generate. */
- machineSpec = pc.parameterArg;
- }
- break;
-
- /* Machine path. */
- case 'M':
- if ( *pc.parameterArg == 0 )
- error() << "please specify an argument to -M" << endl;
- else if ( machineName != 0 )
- error() << "more than one -M argument was given" << endl;
- else {
- /* Ok, remember the machine name to generate. */
- machineName = pc.parameterArg;
- }
- break;
-
- /* Host language types. */
- case 'C':
- hostLangType = CCode;
- hostLang = &hostLangC;
- break;
- case 'D':
- hostLangType = DCode;
- hostLang = &hostLangD;
- break;
- case 'J':
- hostLangType = JavaCode;
- hostLang = &hostLangJava;
- break;
- case 'R':
- hostLangType = RubyCode;
- hostLang = &hostLangRuby;
- break;
-
- /* Version and help. */
- case 'v':
- version();
- exit(0);
- case 'H': case 'h': case '?':
- usage();
- exit(0);
- case 's':
- printStatistics = true;
- break;
- case '-':
- if ( strcasecmp(pc.parameterArg, "help") == 0 ) {
- usage();
- exit(0);
- }
- else if ( strcasecmp(pc.parameterArg, "version") == 0 ) {
- version();
- exit(0);
- }
- else {
- error() << "--" << pc.parameterArg <<
- " is an invalid argument" << endl;
- }
- }
- break;
-
- case ParamCheck::invalid:
- error() << "-" << pc.parameter << " is an invalid argument" << endl;
- break;
-
- case ParamCheck::noparam:
- /* It is interpreted as an input file. */
- if ( *pc.curArg == 0 )
- error() << "a zero length input file name was given" << endl;
- else if ( inputFileName != 0 )
- error() << "more than one input file name was given" << endl;
- else {
- /* OK, Remember the filename. */
- inputFileName = pc.curArg;
- }
- break;
- }
- }
-
- /* Bail on above errors. */
- if ( gblErrorCount > 0 )
- exit(1);
-
- /* Make sure we are not writing to the same file as the input file. */
- if ( inputFileName != 0 && outputFileName != 0 &&
- strcmp( inputFileName, outputFileName ) == 0 )
- {
- error() << "output file \"" << outputFileName <<
- "\" is the same as the input file" << endl;
- }
-
- /* Open the input file for reading. */
- istream *inStream;
- if ( inputFileName != 0 ) {
- /* Open the input file for reading. */
- ifstream *inFile = new ifstream( inputFileName );
- inStream = inFile;
- if ( ! inFile->is_open() )
- error() << "could not open " << inputFileName << " for reading" << endl;
- }
- else {
- inStream = &cin;
- }
-
-
- /* Bail on above errors. */
- if ( gblErrorCount > 0 )
- exit(1);
-
- std::ostringstream outputBuffer;
-
- if ( machineSpec == 0 && machineName == 0 )
- outputBuffer << "<host line=\"1\" col=\"1\">";
-
-#if defined _WIN32 || defined _WIN64
- if (inputFileName != 0) {
- NormalizeWinPath(inputFileName);
- }
-#endif
- if (inputFileName == 0) {
- inputFileName = inputFileNameArr;
- }
-
- if (strrchr(inputFileName, '/') == NULL) {
- error() << "input file path should be absolute: " << inputFileName << endl;
- exit(1);
- }
-
- Scanner scanner( inputFileName, *inStream, outputBuffer, 0, 0, 0, false );
- scanner.do_scan();
-
- /* Finished, final check for errors.. */
- if ( gblErrorCount > 0 )
- return 1;
-
- /* Now send EOF to all parsers. */
- terminateAllParsers();
-
- /* Finished, final check for errors.. */
- if ( gblErrorCount > 0 )
- return 1;
-
- if ( machineSpec == 0 && machineName == 0 )
- outputBuffer << "</host>\n";
-
- if ( gblErrorCount > 0 )
- return 1;
-
- ostream *outputFile = 0;
- if ( outputFileName != 0 )
- outputFile = new ofstream( outputFileName );
- else
- outputFile = &cout;
-
- /* Write the machines, then the surrounding code. */
- writeMachines( *outputFile, outputBuffer.str(), inputFileName );
-
- if ( outputFileName != 0 )
- delete outputFile;
-
- return 0;
-}
diff --git a/contrib/tools/ragel5/ragel/parsedata.cpp b/contrib/tools/ragel5/ragel/parsedata.cpp
deleted file mode 100644
index 3e14cc618a..0000000000
--- a/contrib/tools/ragel5/ragel/parsedata.cpp
+++ /dev/null
@@ -1,1505 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <iostream>
-#include <iomanip>
-#include <errno.h>
-#include <stdlib.h>
-#include <limits.h>
-
-#include "ragel.h"
-#include "rlparse.h"
-#include "parsedata.h"
-#include "parsetree.h"
-#include "mergesort.h"
-#include "xmlcodegen.h"
-
-using namespace std;
-
-char mainMachine[] = "main";
-
-void Token::set(const char *str, int len )
-{
- length = len;
- data = new char[len+1];
- memcpy( data, str, len );
- data[len] = 0;
-}
-
-void Token::append( const Token &other )
-{
- int newLength = length + other.length;
- char *newString = new char[newLength+1];
- memcpy( newString, data, length );
- memcpy( newString + length, other.data, other.length );
- newString[newLength] = 0;
- data = newString;
- length = newLength;
-}
-
-/* Perform minimization after an operation according
- * to the command line args. */
-void afterOpMinimize( FsmAp *fsm, bool lastInSeq )
-{
- /* Switch on the prefered minimization algorithm. */
- if ( minimizeOpt == MinimizeEveryOp || minimizeOpt == MinimizeMostOps && lastInSeq ) {
- /* First clean up the graph. FsmAp operations may leave these
- * lying around. There should be no dead end states. The subtract
- * intersection operators are the only places where they may be
- * created and those operators clean them up. */
- fsm->removeUnreachableStates();
-
- switch ( minimizeLevel ) {
- case MinimizeApprox:
- fsm->minimizeApproximate();
- break;
- case MinimizePartition1:
- fsm->minimizePartition1();
- break;
- case MinimizePartition2:
- fsm->minimizePartition2();
- break;
- case MinimizeStable:
- fsm->minimizeStable();
- break;
- }
- }
-}
-
-/* Count the transitions in the fsm by walking the state list. */
-int countTransitions( FsmAp *fsm )
-{
- int numTrans = 0;
- StateAp *state = fsm->stateList.head;
- while ( state != 0 ) {
- numTrans += state->outList.length();
- state = state->next;
- }
- return numTrans;
-}
-
-Key makeFsmKeyHex( char *str, const InputLoc &loc, ParseData *pd )
-{
- /* Reset errno so we can check for overflow or underflow. In the event of
- * an error, sets the return val to the upper or lower bound being tested
- * against. */
- errno = 0;
- unsigned int size = keyOps->alphType->size;
- bool unusedBits = size < sizeof(unsigned long);
-
- unsigned long ul = strtoul( str, 0, 16 );
-
- if ( errno == ERANGE || unusedBits && ul >> (size * 8) ) {
- error(loc) << "literal " << str << " overflows the alphabet type" << endl;
- ul = 1 << (size * 8);
- }
-
- if ( unusedBits && keyOps->alphType->isSigned && ul >> (size * 8 - 1) )
- ul |= (0xffffffff >> (size*8 ) ) << (size*8);
-
- return Key( (long)ul );
-}
-
-#ifdef _MSC_VER
-# define strtoll _strtoi64
-#endif
-
-Key makeFsmKeyDec( char *str, const InputLoc &loc, ParseData *pd )
-{
- /* Convert the number to a decimal. First reset errno so we can check
- * for overflow or underflow. */
- errno = 0;
- long long minVal = keyOps->alphType->minVal;
- long long maxVal = keyOps->alphType->maxVal;
-
- long long ll = strtoll( str, 0, 10 );
-
- /* Check for underflow. */
- if ( errno == ERANGE && ll < 0 || ll < minVal) {
- error(loc) << "literal " << str << " underflows the alphabet type" << endl;
- ll = minVal;
- }
- /* Check for overflow. */
- else if ( errno == ERANGE && ll > 0 || ll > maxVal ) {
- error(loc) << "literal " << str << " overflows the alphabet type" << endl;
- ll = maxVal;
- }
-
- if ( keyOps->alphType->isSigned )
- return Key( (long)ll );
- else
- return Key( (unsigned long)ll );
-}
-
-/* Make an fsm key in int format (what the fsm graph uses) from an alphabet
- * number returned by the parser. Validates that the number doesn't overflow
- * the alphabet type. */
-Key makeFsmKeyNum( char *str, const InputLoc &loc, ParseData *pd )
-{
- /* Switch on hex/decimal format. */
- if ( str[0] == '0' && str[1] == 'x' )
- return makeFsmKeyHex( str, loc, pd );
- else
- return makeFsmKeyDec( str, loc, pd );
-}
-
-/* Make an fsm int format (what the fsm graph uses) from a single character.
- * Performs proper conversion depending on signed/unsigned property of the
- * alphabet. */
-Key makeFsmKeyChar( char c, ParseData *pd )
-{
- if ( keyOps->isSigned ) {
- /* Copy from a char type. */
- return Key( c );
- }
- else {
- /* Copy from an unsigned byte type. */
- return Key( (unsigned char)c );
- }
-}
-
-/* Make an fsm key array in int format (what the fsm graph uses) from a string
- * of characters. Performs proper conversion depending on signed/unsigned
- * property of the alphabet. */
-void makeFsmKeyArray( Key *result, char *data, int len, ParseData *pd )
-{
- if ( keyOps->isSigned ) {
- /* Copy from a char star type. */
- char *src = data;
- for ( int i = 0; i < len; i++ )
- result[i] = Key(src[i]);
- }
- else {
- /* Copy from an unsigned byte ptr type. */
- unsigned char *src = (unsigned char*) data;
- for ( int i = 0; i < len; i++ )
- result[i] = Key(src[i]);
- }
-}
-
-/* Like makeFsmKeyArray except the result has only unique keys. They ordering
- * will be changed. */
-void makeFsmUniqueKeyArray( KeySet &result, char *data, int len,
- bool caseInsensitive, ParseData *pd )
-{
- /* Use a transitions list for getting unique keys. */
- if ( keyOps->isSigned ) {
- /* Copy from a char star type. */
- char *src = data;
- for ( int si = 0; si < len; si++ ) {
- Key key( src[si] );
- result.insert( key );
- if ( caseInsensitive ) {
- if ( key.isLower() )
- result.insert( key.toUpper() );
- else if ( key.isUpper() )
- result.insert( key.toLower() );
- }
- }
- }
- else {
- /* Copy from an unsigned byte ptr type. */
- unsigned char *src = (unsigned char*) data;
- for ( int si = 0; si < len; si++ ) {
- Key key( src[si] );
- result.insert( key );
- if ( caseInsensitive ) {
- if ( key.isLower() )
- result.insert( key.toUpper() );
- else if ( key.isUpper() )
- result.insert( key.toLower() );
- }
- }
- }
-}
-
-FsmAp *dotFsm( ParseData *pd )
-{
- FsmAp *retFsm = new FsmAp();
- retFsm->rangeFsm( keyOps->minKey, keyOps->maxKey );
- return retFsm;
-}
-
-FsmAp *dotStarFsm( ParseData *pd )
-{
- FsmAp *retFsm = new FsmAp();
- retFsm->rangeStarFsm( keyOps->minKey, keyOps->maxKey );
- return retFsm;
-}
-
-/* Make a builtin type. Depends on the signed nature of the alphabet type. */
-FsmAp *makeBuiltin( BuiltinMachine builtin, ParseData *pd )
-{
- /* FsmAp created to return. */
- FsmAp *retFsm = 0;
- bool isSigned = keyOps->isSigned;
-
- switch ( builtin ) {
- case BT_Any: {
- /* All characters. */
- retFsm = dotFsm( pd );
- break;
- }
- case BT_Ascii: {
- /* Ascii characters 0 to 127. */
- retFsm = new FsmAp();
- retFsm->rangeFsm( 0, 127 );
- break;
- }
- case BT_Extend: {
- /* Ascii extended characters. This is the full byte range. Dependent
- * on signed, vs no signed. If the alphabet is one byte then just use
- * dot fsm. */
- if ( isSigned ) {
- retFsm = new FsmAp();
- retFsm->rangeFsm( -128, 127 );
- }
- else {
- retFsm = new FsmAp();
- retFsm->rangeFsm( 0, 255 );
- }
- break;
- }
- case BT_Alpha: {
- /* Alpha [A-Za-z]. */
- FsmAp *upper = new FsmAp(), *lower = new FsmAp();
- upper->rangeFsm( 'A', 'Z' );
- lower->rangeFsm( 'a', 'z' );
- upper->unionOp( lower );
- upper->minimizePartition2();
- retFsm = upper;
- break;
- }
- case BT_Digit: {
- /* Digits [0-9]. */
- retFsm = new FsmAp();
- retFsm->rangeFsm( '0', '9' );
- break;
- }
- case BT_Alnum: {
- /* Alpha numerics [0-9A-Za-z]. */
- FsmAp *digit = new FsmAp(), *lower = new FsmAp();
- FsmAp *upper = new FsmAp();
- digit->rangeFsm( '0', '9' );
- upper->rangeFsm( 'A', 'Z' );
- lower->rangeFsm( 'a', 'z' );
- digit->unionOp( upper );
- digit->unionOp( lower );
- digit->minimizePartition2();
- retFsm = digit;
- break;
- }
- case BT_Lower: {
- /* Lower case characters. */
- retFsm = new FsmAp();
- retFsm->rangeFsm( 'a', 'z' );
- break;
- }
- case BT_Upper: {
- /* Upper case characters. */
- retFsm = new FsmAp();
- retFsm->rangeFsm( 'A', 'Z' );
- break;
- }
- case BT_Cntrl: {
- /* Control characters. */
- FsmAp *cntrl = new FsmAp();
- FsmAp *highChar = new FsmAp();
- cntrl->rangeFsm( 0, 31 );
- highChar->concatFsm( 127 );
- cntrl->unionOp( highChar );
- cntrl->minimizePartition2();
- retFsm = cntrl;
- break;
- }
- case BT_Graph: {
- /* Graphical ascii characters [!-~]. */
- retFsm = new FsmAp();
- retFsm->rangeFsm( '!', '~' );
- break;
- }
- case BT_Print: {
- /* Printable characters. Same as graph except includes space. */
- retFsm = new FsmAp();
- retFsm->rangeFsm( ' ', '~' );
- break;
- }
- case BT_Punct: {
- /* Punctuation. */
- FsmAp *range1 = new FsmAp();
- FsmAp *range2 = new FsmAp();
- FsmAp *range3 = new FsmAp();
- FsmAp *range4 = new FsmAp();
- range1->rangeFsm( '!', '/' );
- range2->rangeFsm( ':', '@' );
- range3->rangeFsm( '[', '`' );
- range4->rangeFsm( '{', '~' );
- range1->unionOp( range2 );
- range1->unionOp( range3 );
- range1->unionOp( range4 );
- range1->minimizePartition2();
- retFsm = range1;
- break;
- }
- case BT_Space: {
- /* Whitespace: [\t\v\f\n\r ]. */
- FsmAp *cntrl = new FsmAp();
- FsmAp *space = new FsmAp();
- cntrl->rangeFsm( '\t', '\r' );
- space->concatFsm( ' ' );
- cntrl->unionOp( space );
- cntrl->minimizePartition2();
- retFsm = cntrl;
- break;
- }
- case BT_Xdigit: {
- /* Hex digits [0-9A-Fa-f]. */
- FsmAp *digit = new FsmAp();
- FsmAp *upper = new FsmAp();
- FsmAp *lower = new FsmAp();
- digit->rangeFsm( '0', '9' );
- upper->rangeFsm( 'A', 'F' );
- lower->rangeFsm( 'a', 'f' );
- digit->unionOp( upper );
- digit->unionOp( lower );
- digit->minimizePartition2();
- retFsm = digit;
- break;
- }
- case BT_Lambda: {
- retFsm = new FsmAp();
- retFsm->lambdaFsm();
- break;
- }
- case BT_Empty: {
- retFsm = new FsmAp();
- retFsm->emptyFsm();
- break;
- }}
-
- return retFsm;
-}
-
-/* Check if this name inst or any name inst below is referenced. */
-bool NameInst::anyRefsRec()
-{
- if ( numRefs > 0 )
- return true;
-
- /* Recurse on children until true. */
- for ( NameVect::Iter ch = childVect; ch.lte(); ch++ ) {
- if ( (*ch)->anyRefsRec() )
- return true;
- }
-
- return false;
-}
-
-/*
- * ParseData
- */
-
-/* Initialize the structure that will collect info during the parse of a
- * machine. */
-ParseData::ParseData(const char *fileName, char *sectionName,
- const InputLoc &sectionLoc )
-:
- sectionGraph(0),
- generatingSectionSubset(false),
- nextPriorKey(0),
- /* 0 is reserved for global error actions. */
- nextLocalErrKey(1),
- nextNameId(0),
- nextCondId(0),
- alphTypeSet(false),
- getKeyExpr(0),
- accessExpr(0),
- curStateExpr(0),
- lowerNum(0),
- upperNum(0),
- fileName(fileName),
- sectionName(sectionName),
- sectionLoc(sectionLoc),
- errorCount(0),
- curActionOrd(0),
- curPriorOrd(0),
- rootName(0),
- exportsRootName(0),
- nextEpsilonResolvedLink(0),
- nextLongestMatchId(1),
- lmRequiresErrorState(false)
-{
- /* Initialize the dictionary of graphs. This is our symbol table. The
- * initialization needs to be done on construction which happens at the
- * beginning of a machine spec so any assignment operators can reference
- * the builtins. */
- initGraphDict();
-}
-
-/* Clean up the data collected during a parse. */
-ParseData::~ParseData()
-{
- /* Delete all the nodes in the action list. Will cause all the
- * string data that represents the actions to be deallocated. */
- actionList.empty();
-}
-
-/* Make a name id in the current name instantiation scope if it is not
- * already there. */
-NameInst *ParseData::addNameInst( const InputLoc &loc, const char *data, bool isLabel )
-{
- /* Create the name instantitaion object and insert it. */
- NameInst *newNameInst = new NameInst( loc, curNameInst, data, nextNameId++, isLabel );
- curNameInst->childVect.append( newNameInst );
- if ( data != 0 )
- curNameInst->children.insertMulti( data, newNameInst );
- return newNameInst;
-}
-
-void ParseData::initNameWalk()
-{
- curNameInst = rootName;
- curNameChild = 0;
-}
-
-void ParseData::initExportsNameWalk()
-{
- curNameInst = exportsRootName;
- curNameChild = 0;
-}
-
-/* Goes into the next child scope. The number of the child is already set up.
- * We need this for the syncronous name tree and parse tree walk to work
- * properly. It is reset on entry into a scope and advanced on poping of a
- * scope. A call to enterNameScope should be accompanied by a corresponding
- * popNameScope. */
-NameFrame ParseData::enterNameScope( bool isLocal, int numScopes )
-{
- /* Save off the current data. */
- NameFrame retFrame;
- retFrame.prevNameInst = curNameInst;
- retFrame.prevNameChild = curNameChild;
- retFrame.prevLocalScope = localNameScope;
-
- /* Enter into the new name scope. */
- for ( int i = 0; i < numScopes; i++ ) {
- curNameInst = curNameInst->childVect[curNameChild];
- curNameChild = 0;
- }
-
- if ( isLocal )
- localNameScope = curNameInst;
-
- return retFrame;
-}
-
-/* Return from a child scope to a parent. The parent info must be specified as
- * an argument and is obtained from the corresponding call to enterNameScope.
- * */
-void ParseData::popNameScope( const NameFrame &frame )
-{
- /* Pop the name scope. */
- curNameInst = frame.prevNameInst;
- curNameChild = frame.prevNameChild+1;
- localNameScope = frame.prevLocalScope;
-}
-
-void ParseData::resetNameScope( const NameFrame &frame )
-{
- /* Pop the name scope. */
- curNameInst = frame.prevNameInst;
- curNameChild = frame.prevNameChild;
- localNameScope = frame.prevLocalScope;
-}
-
-
-void ParseData::unsetObsoleteEntries( FsmAp *graph )
-{
- /* Loop the reference names and increment the usage. Names that are no
- * longer needed will be unset in graph. */
- for ( NameVect::Iter ref = curNameInst->referencedNames; ref.lte(); ref++ ) {
- /* Get the name. */
- NameInst *name = *ref;
- name->numUses += 1;
-
- /* If the name is no longer needed unset its corresponding entry. */
- if ( name->numUses == name->numRefs ) {
- assert( graph->entryPoints.find( name->id ) != 0 );
- graph->unsetEntry( name->id );
- assert( graph->entryPoints.find( name->id ) == 0 );
- }
- }
-}
-
-NameSet ParseData::resolvePart( NameInst *refFrom, const char *data, bool recLabelsOnly )
-{
- /* Queue needed for breadth-first search, load it with the start node. */
- NameInstList nameQueue;
- nameQueue.append( refFrom );
-
- NameSet result;
- while ( nameQueue.length() > 0 ) {
- /* Pull the next from location off the queue. */
- NameInst *from = nameQueue.detachFirst();
-
- /* Look for the name. */
- NameMapEl *low, *high;
- if ( from->children.findMulti( data, low, high ) ) {
- /* Record all instances of the name. */
- for ( ; low <= high; low++ )
- result.insert( low->value );
- }
-
- /* Name not there, do breadth-first operation of appending all
- * childrent to the processing queue. */
- for ( NameVect::Iter name = from->childVect; name.lte(); name++ ) {
- if ( !recLabelsOnly || (*name)->isLabel )
- nameQueue.append( *name );
- }
- }
-
- /* Queue exhausted and name never found. */
- return result;
-}
-
-void ParseData::resolveFrom( NameSet &result, NameInst *refFrom,
- const NameRef &nameRef, int namePos )
-{
- /* Look for the name in the owning scope of the factor with aug. */
- NameSet partResult = resolvePart( refFrom, nameRef[namePos], false );
-
- /* If there are more parts to the name then continue on. */
- if ( ++namePos < nameRef.length() ) {
- /* There are more components to the name, search using all the part
- * results as the base. */
- for ( NameSet::Iter name = partResult; name.lte(); name++ )
- resolveFrom( result, *name, nameRef, namePos );
- }
- else {
- /* This is the last component, append the part results to the final
- * results. */
- result.insert( partResult );
- }
-}
-
-/* Write out a name reference. */
-ostream &operator<<( ostream &out, const NameRef &nameRef )
-{
- int pos = 0;
- if ( nameRef[pos] == 0 ) {
- out << "::";
- pos += 1;
- }
- out << nameRef[pos++];
- for ( ; pos < nameRef.length(); pos++ )
- out << "::" << nameRef[pos];
- return out;
-}
-
-ostream &operator<<( ostream &out, const NameInst &nameInst )
-{
- /* Count the number fully qualified name parts. */
- int numParents = 0;
- NameInst *curParent = nameInst.parent;
- while ( curParent != 0 ) {
- numParents += 1;
- curParent = curParent->parent;
- }
-
- /* Make an array and fill it in. */
- curParent = nameInst.parent;
- NameInst **parents = new NameInst*[numParents];
- for ( int p = numParents-1; p >= 0; p-- ) {
- parents[p] = curParent;
- curParent = curParent->parent;
- }
-
- /* Write the parents out, skip the root. */
- for ( int p = 1; p < numParents; p++ )
- out << "::" << ( parents[p]->name != 0 ? parents[p]->name : "<ANON>" );
-
- /* Write the name and cleanup. */
- out << "::" << ( nameInst.name != 0 ? nameInst.name : "<ANON>" );
- delete[] parents;
- return out;
-}
-
-struct CmpNameInstLoc
-{
- static int compare( const NameInst *ni1, const NameInst *ni2 )
- {
- if ( ni1->loc.line < ni2->loc.line )
- return -1;
- else if ( ni1->loc.line > ni2->loc.line )
- return 1;
- else if ( ni1->loc.col < ni2->loc.col )
- return -1;
- else if ( ni1->loc.col > ni2->loc.col )
- return 1;
- return 0;
- }
-};
-
-void errorStateLabels( const NameSet &resolved )
-{
- MergeSort<NameInst*, CmpNameInstLoc> mergeSort;
- mergeSort.sort( resolved.data, resolved.length() );
- for ( NameSet::Iter res = resolved; res.lte(); res++ )
- error((*res)->loc) << " -> " << **res << endl;
-}
-
-
-NameInst *ParseData::resolveStateRef( const NameRef &nameRef, InputLoc &loc, Action *action )
-{
- NameInst *nameInst = 0;
-
- /* Do the local search if the name is not strictly a root level name
- * search. */
- if ( nameRef[0] != 0 ) {
- /* If the action is referenced, resolve all of them. */
- if ( action != 0 && action->actionRefs.length() > 0 ) {
- /* Look for the name in all referencing scopes. */
- NameSet resolved;
- for ( ActionRefs::Iter actRef = action->actionRefs; actRef.lte(); actRef++ )
- resolveFrom( resolved, *actRef, nameRef, 0 );
-
- if ( resolved.length() > 0 ) {
- /* Take the first one. */
- nameInst = resolved[0];
- if ( resolved.length() > 1 ) {
- /* Complain about the multiple references. */
- error(loc) << "state reference " << nameRef <<
- " resolves to multiple entry points" << endl;
- errorStateLabels( resolved );
- }
- }
- }
- }
-
- /* If not found in the local scope, look in global. */
- if ( nameInst == 0 ) {
- NameSet resolved;
- int fromPos = nameRef[0] != 0 ? 0 : 1;
- resolveFrom( resolved, rootName, nameRef, fromPos );
-
- if ( resolved.length() > 0 ) {
- /* Take the first. */
- nameInst = resolved[0];
- if ( resolved.length() > 1 ) {
- /* Complain about the multiple references. */
- error(loc) << "state reference " << nameRef <<
- " resolves to multiple entry points" << endl;
- errorStateLabels( resolved );
- }
- }
- }
-
- if ( nameInst == 0 ) {
- /* If not found then complain. */
- error(loc) << "could not resolve state reference " << nameRef << endl;
- }
- return nameInst;
-}
-
-void ParseData::resolveNameRefs( InlineList *inlineList, Action *action )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- switch ( item->type ) {
- case InlineItem::Entry: case InlineItem::Goto:
- case InlineItem::Call: case InlineItem::Next: {
- /* Resolve, pass action for local search. */
- NameInst *target = resolveStateRef( *item->nameRef, item->loc, action );
-
- /* Check if the target goes into a longest match. */
- NameInst *search = target->parent;
- while ( search != 0 ) {
- if ( search->isLongestMatch ) {
- error(item->loc) << "cannot enter inside a longest "
- "match construction as an entry point" << endl;
- break;
- }
- search = search->parent;
- }
-
- /* Note the reference in the name. This will cause the entry
- * point to survive to the end of the graph generating walk. */
- if ( target != 0 )
- target->numRefs += 1;
- item->nameTarg = target;
- break;
- }
- default:
- break;
- }
-
- /* Some of the item types may have children. */
- if ( item->children != 0 )
- resolveNameRefs( item->children, action );
- }
-}
-
-/* Resolve references to labels in actions. */
-void ParseData::resolveActionNameRefs()
-{
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Only care about the actions that are referenced. */
- if ( act->actionRefs.length() > 0 )
- resolveNameRefs( act->inlineList, act );
- }
-}
-
-/* Walk a name tree starting at from and fill the name index. */
-void ParseData::fillNameIndex( NameInst *from )
-{
- /* Fill the value for from in the name index. */
- nameIndex[from->id] = from;
-
- /* Recurse on the implicit final state and then all children. */
- if ( from->final != 0 )
- fillNameIndex( from->final );
- for ( NameVect::Iter name = from->childVect; name.lte(); name++ )
- fillNameIndex( *name );
-}
-
-void ParseData::makeRootNames()
-{
- /* Create the root name. */
- rootName = new NameInst( InputLoc(), 0, 0, nextNameId++, false );
- exportsRootName = new NameInst( InputLoc(), 0, 0, nextNameId++, false );
-}
-
-/* Build the name tree and supporting data structures. */
-void ParseData::makeNameTree( GraphDictEl *dictEl )
-{
- /* Set up curNameInst for the walk. */
- initNameWalk();
-
- if ( dictEl != 0 ) {
- /* A start location has been specified. */
- dictEl->value->makeNameTree( dictEl->loc, this );
- }
- else {
- /* First make the name tree. */
- for ( GraphList::Iter glel = instanceList; glel.lte(); glel++ ) {
- /* Recurse on the instance. */
- glel->value->makeNameTree( glel->loc, this );
- }
- }
-
- /* The number of nodes in the tree can now be given by nextNameId */
- nameIndex = new NameInst*[nextNameId];
- memset( nameIndex, 0, sizeof(NameInst*)*nextNameId );
- fillNameIndex( rootName );
- fillNameIndex( exportsRootName );
-}
-
-
-void ParseData::createBuiltin(const char *name, BuiltinMachine builtin )
-{
- Expression *expression = new Expression( builtin );
- Join *join = new Join( expression );
- JoinOrLm *joinOrLm = new JoinOrLm( join );
- VarDef *varDef = new VarDef( name, joinOrLm );
- GraphDictEl *graphDictEl = new GraphDictEl( name, varDef );
- graphDict.insert( graphDictEl );
-}
-
-/* Initialize the graph dict with builtin types. */
-void ParseData::initGraphDict( )
-{
- createBuiltin( "any", BT_Any );
- createBuiltin( "ascii", BT_Ascii );
- createBuiltin( "extend", BT_Extend );
- createBuiltin( "alpha", BT_Alpha );
- createBuiltin( "digit", BT_Digit );
- createBuiltin( "alnum", BT_Alnum );
- createBuiltin( "lower", BT_Lower );
- createBuiltin( "upper", BT_Upper );
- createBuiltin( "cntrl", BT_Cntrl );
- createBuiltin( "graph", BT_Graph );
- createBuiltin( "print", BT_Print );
- createBuiltin( "punct", BT_Punct );
- createBuiltin( "space", BT_Space );
- createBuiltin( "xdigit", BT_Xdigit );
- createBuiltin( "null", BT_Lambda );
- createBuiltin( "zlen", BT_Lambda );
- createBuiltin( "empty", BT_Empty );
-}
-
-/* Set the alphabet type. If the types are not valid returns false. */
-bool ParseData::setAlphType( char *s1, char *s2 )
-{
- bool valid = false;
- for ( int i = 0; i < hostLang->numHostTypes; i++ ) {
- if ( strcmp( s1, hostLang->hostTypes[i].data1 ) == 0 &&
- hostLang->hostTypes[i].data2 != 0 &&
- strcmp( s2, hostLang->hostTypes[i].data2 ) == 0 )
- {
- valid = true;
- userAlphType = hostLang->hostTypes + i;
- break;
- }
- }
-
- alphTypeSet = true;
- return valid;
-}
-
-/* Set the alphabet type. If the types are not valid returns false. */
-bool ParseData::setAlphType( char *s1 )
-{
- bool valid = false;
- for ( int i = 0; i < hostLang->numHostTypes; i++ ) {
- if ( strcmp( s1, hostLang->hostTypes[i].data1 ) == 0 &&
- hostLang->hostTypes[i].data2 == 0 )
- {
- valid = true;
- userAlphType = hostLang->hostTypes + i;
- break;
- }
- }
-
- alphTypeSet = true;
- return valid;
-}
-
-/* Initialize the key operators object that will be referenced by all fsms
- * created. */
-void ParseData::initKeyOps( )
-{
- /* Signedness and bounds. */
- HostType *alphType = alphTypeSet ? userAlphType : hostLang->defaultAlphType;
- thisKeyOps.setAlphType( alphType );
-
- if ( lowerNum != 0 ) {
- /* If ranges are given then interpret the alphabet type. */
- thisKeyOps.minKey = makeFsmKeyNum( lowerNum, rangeLowLoc, this );
- thisKeyOps.maxKey = makeFsmKeyNum( upperNum, rangeHighLoc, this );
- }
-
- thisCondData.nextCondKey = thisKeyOps.maxKey;
- thisCondData.nextCondKey.increment();
-}
-
-void ParseData::printNameInst( NameInst *nameInst, int level )
-{
- for ( int i = 0; i < level; i++ )
- cerr << " ";
- cerr << (nameInst->name != 0 ? nameInst->name : "<ANON>") <<
- " id: " << nameInst->id <<
- " refs: " << nameInst->numRefs <<
- " uses: " << nameInst->numUses << endl;
- for ( NameVect::Iter name = nameInst->childVect; name.lte(); name++ )
- printNameInst( *name, level+1 );
-}
-
-/* Remove duplicates of unique actions from an action table. */
-void ParseData::removeDups( ActionTable &table )
-{
- /* Scan through the table looking for unique actions to
- * remove duplicates of. */
- for ( int i = 0; i < table.length(); i++ ) {
- /* Remove any duplicates ahead of i. */
- for ( int r = i+1; r < table.length(); ) {
- if ( table[r].value == table[i].value )
- table.vremove(r);
- else
- r += 1;
- }
- }
-}
-
-/* Remove duplicates from action lists. This operates only on transition and
- * eof action lists and so should be called once all actions have been
- * transfered to their final resting place. */
-void ParseData::removeActionDups( FsmAp *graph )
-{
- /* Loop all states. */
- for ( StateList::Iter state = graph->stateList; state.lte(); state++ ) {
- /* Loop all transitions. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ )
- removeDups( trans->actionTable );
- removeDups( state->toStateActionTable );
- removeDups( state->fromStateActionTable );
- removeDups( state->eofActionTable );
- }
-}
-
-Action *ParseData::newAction(const char *name, InlineList *inlineList )
-{
- InputLoc loc;
- loc.line = 1;
- loc.col = 1;
- loc.fileName = "<NONE>";
-
- Action *action = new Action( loc, name, inlineList, nextCondId++ );
- action->actionRefs.append( rootName );
- actionList.append( action );
- return action;
-}
-
-void ParseData::initLongestMatchData()
-{
- if ( lmList.length() > 0 ) {
- /* The initTokStart action resets the token start. */
- InlineList *il1 = new InlineList;
- il1->append( new InlineItem( InputLoc(), InlineItem::LmInitTokStart ) );
- initTokStart = newAction( "initts", il1 );
- initTokStart->isLmAction = true;
-
- /* The initActId action gives act a default value. */
- InlineList *il4 = new InlineList;
- il4->append( new InlineItem( InputLoc(), InlineItem::LmInitAct ) );
- initActId = newAction( "initact", il4 );
- initActId->isLmAction = true;
-
- /* The setTokStart action sets tokstart. */
- InlineList *il5 = new InlineList;
- il5->append( new InlineItem( InputLoc(), InlineItem::LmSetTokStart ) );
- setTokStart = newAction( "tokstart", il5 );
- setTokStart->isLmAction = true;
-
- /* The setTokEnd action sets tokend. */
- InlineList *il3 = new InlineList;
- il3->append( new InlineItem( InputLoc(), InlineItem::LmSetTokEnd ) );
- setTokEnd = newAction( "tokend", il3 );
- setTokEnd->isLmAction = true;
-
- /* The action will also need an ordering: ahead of all user action
- * embeddings. */
- initTokStartOrd = curActionOrd++;
- initActIdOrd = curActionOrd++;
- setTokStartOrd = curActionOrd++;
- setTokEndOrd = curActionOrd++;
- }
-}
-
-/* After building the graph, do some extra processing to ensure the runtime
- * data of the longest mactch operators is consistent. */
-void ParseData::setLongestMatchData( FsmAp *graph )
-{
- if ( lmList.length() > 0 ) {
- /* Make sure all entry points (targets of fgoto, fcall, fnext, fentry)
- * init the tokstart. */
- for ( EntryMap::Iter en = graph->entryPoints; en.lte(); en++ ) {
- /* This is run after duplicates are removed, we must guard against
- * inserting a duplicate. */
- ActionTable &actionTable = en->value->toStateActionTable;
- if ( ! actionTable.hasAction( initTokStart ) )
- actionTable.setAction( initTokStartOrd, initTokStart );
- }
-
- /* Find the set of states that are the target of transitions with
- * actions that have calls. These states will be targeted by fret
- * statements. */
- StateSet states;
- for ( StateList::Iter state = graph->stateList; state.lte(); state++ ) {
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- for ( ActionTable::Iter ati = trans->actionTable; ati.lte(); ati++ ) {
- if ( ati->value->anyCall && trans->toState != 0 )
- states.insert( trans->toState );
- }
- }
- }
-
-
- /* Init tokstart upon entering the above collected states. */
- for ( StateSet::Iter ps = states; ps.lte(); ps++ ) {
- /* This is run after duplicates are removed, we must guard against
- * inserting a duplicate. */
- ActionTable &actionTable = (*ps)->toStateActionTable;
- if ( ! actionTable.hasAction( initTokStart ) )
- actionTable.setAction( initTokStartOrd, initTokStart );
- }
- }
-}
-
-/* Make the graph from a graph dict node. Does minimization and state sorting. */
-FsmAp *ParseData::makeInstance( GraphDictEl *gdNode )
-{
- /* Build the graph from a walk of the parse tree. */
- FsmAp *graph = gdNode->value->walk( this );
-
- /* Resolve any labels that point to multiple states. Any labels that are
- * still around are referenced only by gotos and calls and they need to be
- * made into deterministic entry points. */
- graph->deterministicEntry();
-
- /*
- * All state construction is now complete.
- */
-
- /* Transfer global error actions. */
- for ( StateList::Iter state = graph->stateList; state.lte(); state++ )
- graph->transferErrorActions( state, 0 );
-
- removeActionDups( graph );
-
- /* Remove unreachable states. There should be no dead end states. The
- * subtract and intersection operators are the only places where they may
- * be created and those operators clean them up. */
- graph->removeUnreachableStates();
-
- /* No more fsm operations are to be done. Action ordering numbers are
- * no longer of use and will just hinder minimization. Clear them. */
- graph->nullActionKeys();
-
- /* Transition priorities are no longer of use. We can clear them
- * because they will just hinder minimization as well. Clear them. */
- graph->clearAllPriorities();
-
- if ( minimizeOpt != MinimizeNone ) {
- /* Minimize here even if we minimized at every op. Now that function
- * keys have been cleared we may get a more minimal fsm. */
- switch ( minimizeLevel ) {
- case MinimizeApprox:
- graph->minimizeApproximate();
- break;
- case MinimizeStable:
- graph->minimizeStable();
- break;
- case MinimizePartition1:
- graph->minimizePartition1();
- break;
- case MinimizePartition2:
- graph->minimizePartition2();
- break;
- }
- }
-
- graph->compressTransitions();
-
- return graph;
-}
-
-void ParseData::printNameTree()
-{
- /* Print the name instance map. */
- for ( NameVect::Iter name = rootName->childVect; name.lte(); name++ )
- printNameInst( *name, 0 );
-
- cerr << "name index:" << endl;
- /* Show that the name index is correct. */
- for ( int ni = 0; ni < nextNameId; ni++ ) {
- cerr << ni << ": ";
- const char *name = nameIndex[ni]->name;
- cerr << ( name != 0 ? name : "<ANON>" ) << endl;
- }
-}
-
-FsmAp *ParseData::makeSpecific( GraphDictEl *gdNode )
-{
- /* Build the name tree and supporting data structures. */
- makeNameTree( gdNode );
-
- /* Resove name references from gdNode. */
- initNameWalk();
- gdNode->value->resolveNameRefs( this );
-
- /* Do not resolve action references. Since we are not building the entire
- * graph there's a good chance that many name references will fail. This
- * is okay since generating part of the graph is usually only done when
- * inspecting the compiled machine. */
-
- /* Same story for extern entry point references. */
-
- /* Flag this case so that the XML code generator is aware that we haven't
- * looked up name references in actions. It can then avoid segfaulting. */
- generatingSectionSubset = true;
-
- /* Just building the specified graph. */
- initNameWalk();
- FsmAp *mainGraph = makeInstance( gdNode );
-
- return mainGraph;
-}
-
-FsmAp *ParseData::makeAll()
-{
- /* Build the name tree and supporting data structures. */
- makeNameTree( 0 );
-
- /* Resove name references in the tree. */
- initNameWalk();
- for ( GraphList::Iter glel = instanceList; glel.lte(); glel++ )
- glel->value->resolveNameRefs( this );
-
- /* Resolve action code name references. */
- resolveActionNameRefs();
-
- /* Force name references to the top level instantiations. */
- for ( NameVect::Iter inst = rootName->childVect; inst.lte(); inst++ )
- (*inst)->numRefs += 1;
-
- FsmAp *mainGraph = 0;
- FsmAp **graphs = new FsmAp*[instanceList.length()];
- int numOthers = 0;
-
- /* Make all the instantiations, we know that main exists in this list. */
- initNameWalk();
- for ( GraphList::Iter glel = instanceList; glel.lte(); glel++ ) {
- if ( strcmp( glel->key, mainMachine ) == 0 ) {
- /* Main graph is always instantiated. */
- mainGraph = makeInstance( glel );
- }
- else {
- /* Instantiate and store in others array. */
- graphs[numOthers++] = makeInstance( glel );
- }
- }
-
- if ( mainGraph == 0 )
- mainGraph = graphs[--numOthers];
-
- if ( numOthers > 0 ) {
- /* Add all the other graphs into main. */
- mainGraph->globOp( graphs, numOthers );
- }
-
- delete[] graphs;
- return mainGraph;
-}
-
-void ParseData::analyzeAction( Action *action, InlineList *inlineList )
-{
- /* FIXME: Actions used as conditions should be very constrained. */
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- if ( item->type == InlineItem::Call || item->type == InlineItem::CallExpr )
- action->anyCall = true;
-
- /* Need to recurse into longest match items. */
- if ( item->type == InlineItem::LmSwitch ) {
- LongestMatch *lm = item->longestMatch;
- for ( LmPartList::Iter lmi = *lm->longestMatchList; lmi.lte(); lmi++ ) {
- if ( lmi->action != 0 )
- analyzeAction( action, lmi->action->inlineList );
- }
- }
-
- if ( item->type == InlineItem::LmOnLast ||
- item->type == InlineItem::LmOnNext ||
- item->type == InlineItem::LmOnLagBehind )
- {
- LongestMatchPart *lmi = item->longestMatchPart;
- if ( lmi->action != 0 )
- analyzeAction( action, lmi->action->inlineList );
- }
-
- if ( item->children != 0 )
- analyzeAction( action, item->children );
- }
-}
-
-
-/* Check actions for bad uses of fsm directives. We don't go inside longest
- * match items in actions created by ragel, since we just want the user
- * actions. */
-void ParseData::checkInlineList( Action *act, InlineList *inlineList )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- /* EOF checks. */
- if ( act->numEofRefs > 0 ) {
- switch ( item->type ) {
- case InlineItem::PChar:
- error(item->loc) << "pointer to current element does not exist in "
- "EOF action code" << endl;
- break;
- case InlineItem::Char:
- error(item->loc) << "current element does not exist in "
- "EOF action code" << endl;
- break;
- case InlineItem::Hold:
- error(item->loc) << "changing the current element not possible in "
- "EOF action code" << endl;
- break;
- case InlineItem::Exec:
- error(item->loc) << "changing the current element not possible in "
- "EOF action code" << endl;
- break;
- case InlineItem::Goto: case InlineItem::Call:
- case InlineItem::Next: case InlineItem::GotoExpr:
- case InlineItem::CallExpr: case InlineItem::NextExpr:
- case InlineItem::Ret:
- error(item->loc) << "changing the current state not possible in "
- "EOF action code" << endl;
- break;
- default:
- break;
- }
- }
-
- /* Recurse. */
- if ( item->children != 0 )
- checkInlineList( act, item->children );
- }
-}
-
-void ParseData::checkAction( Action *action )
-{
- /* Check for actions with calls that are embedded within a longest match
- * machine. */
- if ( !action->isLmAction && action->numRefs() > 0 && action->anyCall ) {
- for ( ActionRefs::Iter ar = action->actionRefs; ar.lte(); ar++ ) {
- NameInst *check = *ar;
- while ( check != 0 ) {
- if ( check->isLongestMatch ) {
- error(action->loc) << "within a scanner, fcall is permitted"
- " only in pattern actions" << endl;
- break;
- }
- check = check->parent;
- }
- }
- }
-
- checkInlineList( action, action->inlineList );
-}
-
-
-void ParseData::analyzeGraph( FsmAp *graph )
-{
- for ( ActionList::Iter act = actionList; act.lte(); act++ )
- analyzeAction( act, act->inlineList );
-
- for ( StateList::Iter st = graph->stateList; st.lte(); st++ ) {
- /* The transition list. */
- for ( TransList::Iter trans = st->outList; trans.lte(); trans++ ) {
- for ( ActionTable::Iter at = trans->actionTable; at.lte(); at++ )
- at->value->numTransRefs += 1;
- }
-
- for ( ActionTable::Iter at = st->toStateActionTable; at.lte(); at++ )
- at->value->numToStateRefs += 1;
-
- for ( ActionTable::Iter at = st->fromStateActionTable; at.lte(); at++ )
- at->value->numFromStateRefs += 1;
-
- for ( ActionTable::Iter at = st->eofActionTable; at.lte(); at++ )
- at->value->numEofRefs += 1;
-
- for ( StateCondList::Iter sc = st->stateCondList; sc.lte(); sc++ ) {
- for ( CondSet::Iter sci = sc->condSpace->condSet; sci.lte(); sci++ )
- (*sci)->numCondRefs += 1;
- }
- }
-
- /* Checks for bad usage of directives in action code. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ )
- checkAction( act );
-}
-
-void ParseData::makeExportsNameTree()
-{
- /* Make a name tree for the exports. */
- initExportsNameWalk();
-
- /* First make the name tree. */
- for ( GraphDict::Iter gdel = graphDict; gdel.lte(); gdel++ ) {
- if ( gdel->value->isExport ) {
- /* Recurse on the instance. */
- gdel->value->makeNameTree( gdel->loc, this );
- }
- }
-}
-
-void ParseData::makeExports()
-{
- makeExportsNameTree();
-
- /* Resove name references in the tree. */
- initExportsNameWalk();
- for ( GraphDict::Iter gdel = graphDict; gdel.lte(); gdel++ ) {
- if ( gdel->value->isExport )
- gdel->value->resolveNameRefs( this );
- }
-
- /* Make all the instantiations, we know that main exists in this list. */
- initExportsNameWalk();
- for ( GraphDict::Iter gdel = graphDict; gdel.lte(); gdel++ ) {
- /* Check if this var def is an export. */
- if ( gdel->value->isExport ) {
- /* Build the graph from a walk of the parse tree. */
- FsmAp *graph = gdel->value->walk( this );
-
- /* Build the graph from a walk of the parse tree. */
- if ( !graph->checkSingleCharMachine() ) {
- error(gdel->loc) << "bad export machine, must define "
- "a single character" << endl;
- }
- else {
- /* Safe to extract the key and declare the export. */
- Key exportKey = graph->startState->outList.head->lowKey;
- exportList.append( new Export( gdel->value->name, exportKey ) );
- }
- }
- }
-
-}
-
-void ParseData::prepareMachineGen( GraphDictEl *graphDictEl )
-{
- beginProcessing();
- initKeyOps();
- makeRootNames();
- initLongestMatchData();
-
- /* Make the graph, do minimization. */
- if ( graphDictEl == 0 )
- sectionGraph = makeAll();
- else
- sectionGraph = makeSpecific( graphDictEl );
-
- /* Compute exports from the export definitions. */
- makeExports();
-
- /* If any errors have occured in the input file then don't write anything. */
- if ( gblErrorCount > 0 )
- return;
-
- analyzeGraph( sectionGraph );
-
- /* Depends on the graph analysis. */
- setLongestMatchData( sectionGraph );
-
- /* Decide if an error state is necessary.
- * 1. There is an error transition
- * 2. There is a gap in the transitions
- * 3. The longest match operator requires it. */
- if ( lmRequiresErrorState || sectionGraph->hasErrorTrans() )
- sectionGraph->errState = sectionGraph->addState();
-
- /* State numbers need to be assigned such that all final states have a
- * larger state id number than all non-final states. This enables the
- * first_final mechanism to function correctly. We also want states to be
- * ordered in a predictable fashion. So we first apply a depth-first
- * search, then do a stable sort by final state status, then assign
- * numbers. */
-
- sectionGraph->depthFirstOrdering();
- sectionGraph->sortStatesByFinal();
- sectionGraph->setStateNumbers( 0 );
-}
-
-void ParseData::generateXML( ostream &out )
-{
- beginProcessing();
-
- /* Make the generator. */
- XMLCodeGen codeGen( sectionName, this, sectionGraph, out );
-
- /* Write out with it. */
- codeGen.writeXML();
-
- if ( printStatistics ) {
- cerr << "fsm name : " << sectionName << endl;
- cerr << "num states: " << sectionGraph->stateList.length() << endl;
- cerr << endl;
- }
-}
-
-/* Send eof to all parsers. */
-void terminateAllParsers( )
-{
- /* FIXME: a proper token is needed here. Suppose we should use the
- * location of EOF in the last file that the parser was referenced in. */
- InputLoc loc;
- loc.fileName = "<EOF>";
- loc.line = 0;
- loc.col = 0;
- for ( ParserDict::Iter pdel = parserDict; pdel.lte(); pdel++ )
- pdel->value->token( loc, _eof, 0, 0 );
-}
-
-void writeLanguage( std::ostream &out )
-{
- out << " lang=\"";
- switch ( hostLangType ) {
- case CCode: out << "C"; break;
- case DCode: out << "D"; break;
- case JavaCode: out << "Java"; break;
- case RubyCode: out << "Ruby"; break;
- }
- out << "\"";
-
-}
-
-void writeMachines( std::ostream &out, std::string hostData, const char *inputFileName )
-{
- if ( machineSpec == 0 && machineName == 0 ) {
- /* No machine spec or machine name given. Generate everything. */
- for ( ParserDict::Iter parser = parserDict; parser.lte(); parser++ ) {
- ParseData *pd = parser->value->pd;
- if ( pd->instanceList.length() > 0 )
- pd->prepareMachineGen( 0 );
- }
-
- if ( gblErrorCount == 0 ) {
- out << "<ragel filename=\"" << inputFileName << "\"";
- writeLanguage( out );
- out << ">\n";
- for ( ParserDict::Iter parser = parserDict; parser.lte(); parser++ ) {
- ParseData *pd = parser->value->pd;
- if ( pd->instanceList.length() > 0 )
- pd->generateXML( out );
- }
- out << hostData;
- out << "</ragel>\n";
- }
- }
- else if ( parserDict.length() > 0 ) {
- /* There is either a machine spec or machine name given. */
- ParseData *parseData = 0;
- GraphDictEl *graphDictEl = 0;
-
- /* Traverse the sections, break out when we find a section/machine
- * that matches the one specified. */
- for ( ParserDict::Iter parser = parserDict; parser.lte(); parser++ ) {
- ParseData *checkPd = parser->value->pd;
- if ( machineSpec == 0 || strcmp( checkPd->sectionName, machineSpec ) == 0 ) {
- GraphDictEl *checkGdEl = 0;
- if ( machineName == 0 || (checkGdEl =
- checkPd->graphDict.find( machineName )) != 0 )
- {
- /* Have a machine spec and/or machine name that matches
- * the -M/-S options. */
- parseData = checkPd;
- graphDictEl = checkGdEl;
- break;
- }
- }
- }
-
- if ( parseData == 0 )
- error() << "could not locate machine specified with -S and/or -M" << endl;
- else {
- /* Section/Machine to emit was found. Prepare and emit it. */
- parseData->prepareMachineGen( graphDictEl );
- if ( gblErrorCount == 0 ) {
- out << "<ragel filename=\"" << inputFileName << "\"";
- writeLanguage( out );
- out << ">\n";
- parseData->generateXML( out );
- out << hostData;
- out << "</ragel>\n";
- }
- }
- }
-}
diff --git a/contrib/tools/ragel5/ragel/parsedata.h b/contrib/tools/ragel5/ragel/parsedata.h
deleted file mode 100644
index 2baa7373d2..0000000000
--- a/contrib/tools/ragel5/ragel/parsedata.h
+++ /dev/null
@@ -1,401 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PARSEDATA_H
-#define _PARSEDATA_H
-
-#include <iostream>
-#include <limits.h>
-#include "avlmap.h"
-#include "bstmap.h"
-#include "vector.h"
-#include "dlist.h"
-#include "fsmgraph.h"
-#include "compare.h"
-#include "vector.h"
-#include "common.h"
-#include "parsetree.h"
-
-/* Forwards. */
-using std::ostream;
-
-struct VarDef;
-struct Join;
-struct Expression;
-struct Term;
-struct FactorWithAug;
-struct FactorWithLabel;
-struct FactorWithRep;
-struct FactorWithNeg;
-struct Factor;
-struct Literal;
-struct Range;
-struct RegExpr;
-struct ReItem;
-struct ReOrBlock;
-struct ReOrItem;
-struct LongestMatch;
-typedef DList<LongestMatch> LmList;
-
-/* Graph dictionary. */
-struct GraphDictEl
-:
- public AvlTreeEl<GraphDictEl>,
- public DListEl<GraphDictEl>
-{
- GraphDictEl(const char *k )
- : key(k), value(0), isInstance(false) { }
- GraphDictEl(const char *k, VarDef *value )
- : key(k), value(value), isInstance(false) { }
-
- const char *getKey() { return key; }
-
- const char *key;
- VarDef *value;
- bool isInstance;
-
- /* Location info of graph definition. Points to variable name of assignment. */
- InputLoc loc;
-};
-
-typedef AvlTree<GraphDictEl, char*, CmpStr> GraphDict;
-typedef DList<GraphDictEl> GraphList;
-
-/* Priority name dictionary. */
-typedef AvlMapEl<char*, int> PriorDictEl;
-typedef AvlMap<char*, int, CmpStr> PriorDict;
-
-/* Local error name dictionary. */
-typedef AvlMapEl<const char*, int> LocalErrDictEl;
-typedef AvlMap<const char*, int, CmpStr> LocalErrDict;
-
-/* Tree of instantiated names. */
-typedef BstMapEl<const char*, NameInst*> NameMapEl;
-typedef BstMap<const char*, NameInst*, CmpStr> NameMap;
-typedef Vector<NameInst*> NameVect;
-typedef BstSet<NameInst*> NameSet;
-
-/* Node in the tree of instantiated names. */
-struct NameInst
-{
- NameInst( const InputLoc &loc, NameInst *parent, const char *name, int id, bool isLabel ) :
- loc(loc), parent(parent), name(name), id(id), isLabel(isLabel),
- isLongestMatch(false), numRefs(0), numUses(0), start(0), final(0) {}
-
- InputLoc loc;
-
- /* Keep parent pointers in the name tree to retrieve
- * fully qulified names. */
- NameInst *parent;
-
- const char *name;
- int id;
- bool isLabel;
- bool isLongestMatch;
-
- int numRefs;
- int numUses;
-
- /* Names underneath us, excludes anonymous names. */
- NameMap children;
-
- /* All names underneath us in order of appearance. */
- NameVect childVect;
-
- /* Join scopes need an implicit "final" target. */
- NameInst *start, *final;
-
- /* During a fsm generation walk, lists the names that are referenced by
- * epsilon operations in the current scope. After the link is made by the
- * epsilon reference and the join operation is complete, the label can
- * have its refcount decremented. Once there are no more references the
- * entry point can be removed from the fsm returned. */
- NameVect referencedNames;
-
- /* Pointers for the name search queue. */
- NameInst *prev, *next;
-
- /* Check if this name inst or any name inst below is referenced. */
- bool anyRefsRec();
-};
-
-typedef DList<NameInst> NameInstList;
-
-/* Stack frame used in walking the name tree. */
-struct NameFrame
-{
- NameInst *prevNameInst;
- int prevNameChild;
- NameInst *prevLocalScope;
-};
-
-/* Class to collect information about the machine during the
- * parse of input. */
-struct ParseData
-{
- /* Create a new parse data object. This is done at the beginning of every
- * fsm specification. */
- ParseData(const char *fileName, char *sectionName, const InputLoc &sectionLoc );
- ~ParseData();
-
- /*
- * Setting up the graph dict.
- */
-
- /* Initialize a graph dict with the basic fsms. */
- void initGraphDict();
- void createBuiltin(const char *name, BuiltinMachine builtin );
-
- /* Make a name id in the current name instantiation scope if it is not
- * already there. */
- NameInst *addNameInst( const InputLoc &loc, const char *data, bool isLabel );
- void makeRootNames();
- void makeNameTree( GraphDictEl *gdNode );
- void makeExportsNameTree();
- void fillNameIndex( NameInst *from );
- void printNameTree();
-
- /* Increments the usage count on entry names. Names that are no longer
- * needed will have their entry points unset. */
- void unsetObsoleteEntries( FsmAp *graph );
-
- /* Resove name references in action code and epsilon transitions. */
- NameSet resolvePart( NameInst *refFrom, const char *data, bool recLabelsOnly );
- void resolveFrom( NameSet &result, NameInst *refFrom,
- const NameRef &nameRef, int namePos );
- NameInst *resolveStateRef( const NameRef &nameRef, InputLoc &loc, Action *action );
- void resolveNameRefs( InlineList *inlineList, Action *action );
- void resolveActionNameRefs();
-
- /* Set the alphabet type. If type types are not valid returns false. */
- bool setAlphType( char *s1, char *s2 );
- bool setAlphType( char *s1 );
-
- /* Unique actions. */
- void removeDups( ActionTable &actionTable );
- void removeActionDups( FsmAp *graph );
-
- /* Dumping the name instantiation tree. */
- void printNameInst( NameInst *nameInst, int level );
-
- /* Make the graph from a graph dict node. Does minimization. */
- FsmAp *makeInstance( GraphDictEl *gdNode );
- FsmAp *makeSpecific( GraphDictEl *gdNode );
- FsmAp *makeAll();
-
- /* Checking the contents of actions. */
- void checkAction( Action *action );
- void checkInlineList( Action *act, InlineList *inlineList );
-
- void analyzeAction( Action *action, InlineList *inlineList );
- void analyzeGraph( FsmAp *graph );
- void makeExports();
-
- void prepareMachineGen( GraphDictEl *graphDictEl );
- void generateXML( ostream &out );
- FsmAp *sectionGraph;
- bool generatingSectionSubset;
-
- void initKeyOps();
-
- /*
- * Data collected during the parse.
- */
-
- /* Dictionary of graphs. Both instances and non-instances go here. */
- GraphDict graphDict;
-
- /* The list of instances. */
- GraphList instanceList;
-
- /* Dictionary of actions. Lets actions be defined and then referenced. */
- ActionDict actionDict;
-
- /* Dictionary of named priorities. */
- PriorDict priorDict;
-
- /* Dictionary of named local errors. */
- LocalErrDict localErrDict;
-
- /* List of actions. Will be pasted into a switch statement. */
- ActionList actionList;
-
- /* The id of the next priority name and label. */
- int nextPriorKey, nextLocalErrKey, nextNameId, nextCondId;
-
- /* The default priority number key for a machine. This is active during
- * the parse of the rhs of a machine assignment. */
- int curDefPriorKey;
-
- int curDefLocalErrKey;
-
- /* Alphabet type. */
- HostType *userAlphType;
- bool alphTypeSet;
-
- /* Element type and get key expression. */
- InlineList *getKeyExpr;
- InlineList *accessExpr;
- InlineList *curStateExpr;
-
- /* The alphabet range. */
- char *lowerNum, *upperNum;
- Key lowKey, highKey;
- InputLoc rangeLowLoc, rangeHighLoc;
-
- /* The name of the file the fsm is from, and the spec name. */
- const char *fileName;
- char *sectionName;
- InputLoc sectionLoc;
-
- /* Number of errors encountered parsing the fsm spec. */
- int errorCount;
-
- /* Counting the action and priority ordering. */
- int curActionOrd;
- int curPriorOrd;
-
- /* Root of the name tree. One root is for the instantiated machines. The
- * other root is for exported definitions. */
- NameInst *rootName;
- NameInst *exportsRootName;
-
- /* Name tree walking. */
- NameInst *curNameInst;
- int curNameChild;
-
- /* The place where resolved epsilon transitions go. These cannot go into
- * the parse tree because a single epsilon op can resolve more than once
- * to different nameInsts if the machine it's in is used more than once. */
- NameVect epsilonResolvedLinks;
- int nextEpsilonResolvedLink;
-
- /* Root of the name tree used for doing local name searches. */
- NameInst *localNameScope;
-
- void setLmInRetLoc( InlineList *inlineList );
- void initLongestMatchData();
- void setLongestMatchData( FsmAp *graph );
- void initNameWalk();
- void initExportsNameWalk();
- NameInst *nextNameScope() { return curNameInst->childVect[curNameChild]; }
- NameFrame enterNameScope( bool isLocal, int numScopes );
- void popNameScope( const NameFrame &frame );
- void resetNameScope( const NameFrame &frame );
-
- /* Make name ids to name inst pointers. */
- NameInst **nameIndex;
-
- /* Counter for assigning ids to longest match items. */
- int nextLongestMatchId;
- bool lmRequiresErrorState;
-
- /* List of all longest match parse tree items. */
- LmList lmList;
-
- Action *newAction(const char *name, InlineList *inlineList );
-
- Action *initTokStart;
- int initTokStartOrd;
-
- Action *setTokStart;
- int setTokStartOrd;
-
- Action *initActId;
- int initActIdOrd;
-
- Action *setTokEnd;
- int setTokEndOrd;
-
- void beginProcessing()
- {
- ::condData = &thisCondData;
- ::keyOps = &thisKeyOps;
- }
-
- CondData thisCondData;
- KeyOps thisKeyOps;
-
- ExportList exportList;
-};
-
-void afterOpMinimize( FsmAp *fsm, bool lastInSeq = true );
-Key makeFsmKeyHex( char *str, const InputLoc &loc, ParseData *pd );
-Key makeFsmKeyDec( char *str, const InputLoc &loc, ParseData *pd );
-Key makeFsmKeyNum( char *str, const InputLoc &loc, ParseData *pd );
-Key makeFsmKeyChar( char c, ParseData *pd );
-void makeFsmKeyArray( Key *result, char *data, int len, ParseData *pd );
-void makeFsmUniqueKeyArray( KeySet &result, char *data, int len,
- bool caseInsensitive, ParseData *pd );
-FsmAp *makeBuiltin( BuiltinMachine builtin, ParseData *pd );
-FsmAp *dotFsm( ParseData *pd );
-FsmAp *dotStarFsm( ParseData *pd );
-
-void errorStateLabels( const NameSet &locations );
-
-/* Data used by the parser specific to the current file. Supports the include
- * system, since a new parser is executed for each included file. */
-struct InputData
-{
- InputData( char *fileName, char *includeSpec, char *includeTo ) :
- pd(0), sectionName(0), defaultParseData(0),
- first_line(1), first_column(1),
- last_line(1), last_column(0),
- fileName(fileName), includeSpec(includeSpec),
- includeTo(includeTo), active(true)
- {}
-
- /* For collecting a name references. */
- NameRef nameRef;
- NameRefList nameRefList;
-
- /* The parse data. For each fsm spec, the parser collects things that it parses
- * in data structures in here. */
- ParseData *pd;
-
- char *sectionName;
- ParseData *defaultParseData;
-
- int first_line;
- int first_column;
- int last_line;
- int last_column;
-
- char *fileName;
-
- /* If this is an included file, this contains the specification to search
- * for. IncludeTo will contain the spec name that does the includng. */
- char *includeSpec;
- char *includeTo;
-
- bool active;
- InputLoc sectionLoc;
-};
-
-struct Parser;
-
-typedef AvlMap<char*, Parser *, CmpStr> ParserDict;
-typedef AvlMapEl<char*, Parser *> ParserDictEl;
-
-extern ParserDict parserDict;
-
-
-#endif /* _PARSEDATA_H */
diff --git a/contrib/tools/ragel5/ragel/parsetree.cpp b/contrib/tools/ragel5/ragel/parsetree.cpp
deleted file mode 100644
index 4755e3085b..0000000000
--- a/contrib/tools/ragel5/ragel/parsetree.cpp
+++ /dev/null
@@ -1,2089 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <iostream>
-#include <iomanip>
-#include <errno.h>
-#include <limits.h>
-#include <stdlib.h>
-
-/* Parsing. */
-#include "ragel.h"
-#include "rlparse.h"
-#include "parsetree.h"
-
-using namespace std;
-ostream &operator<<( ostream &out, const NameRef &nameRef );
-ostream &operator<<( ostream &out, const NameInst &nameInst );
-
-/* Convert the literal string which comes in from the scanner into an array of
- * characters with escapes and options interpreted. Also null terminates the
- * string. Though this null termination should not be relied on for
- * interpreting literals in the parser because the string may contain a
- * literal string with \0 */
-void Token::prepareLitString( Token &result, bool &caseInsensitive )
-{
- result.data = new char[this->length+1];
- caseInsensitive = false;
-
- char *src = this->data + 1;
- char *end = this->data + this->length - 1;
-
- while ( *end != '\'' && *end != '\"' ) {
- if ( *end == 'i' )
- caseInsensitive = true;
- else {
- error( this->loc ) << "literal string '" << *end <<
- "' option not supported" << endl;
- }
- end -= 1;
- }
-
- char *dest = result.data;
- int len = 0;
- while ( src != end ) {
- if ( *src == '\\' ) {
- switch ( src[1] ) {
- case '0': dest[len++] = '\0'; break;
- case 'a': dest[len++] = '\a'; break;
- case 'b': dest[len++] = '\b'; break;
- case 't': dest[len++] = '\t'; break;
- case 'n': dest[len++] = '\n'; break;
- case 'v': dest[len++] = '\v'; break;
- case 'f': dest[len++] = '\f'; break;
- case 'r': dest[len++] = '\r'; break;
- case '\n': break;
- default: dest[len++] = src[1]; break;
- }
- src += 2;
- }
- else {
- dest[len++] = *src++;
- }
- }
- result.length = len;
- result.data[result.length] = 0;
-}
-
-
-FsmAp *VarDef::walk( ParseData *pd )
-{
- /* We enter into a new name scope. */
- NameFrame nameFrame = pd->enterNameScope( true, 1 );
-
- /* Recurse on the expression. */
- FsmAp *rtnVal = joinOrLm->walk( pd );
-
- /* Do the tranfer of local error actions. */
- LocalErrDictEl *localErrDictEl = pd->localErrDict.find( name );
- if ( localErrDictEl != 0 ) {
- for ( StateList::Iter state = rtnVal->stateList; state.lte(); state++ )
- rtnVal->transferErrorActions( state, localErrDictEl->value );
- }
-
- /* If the expression below is a join operation with multiple expressions
- * then it just had epsilon transisions resolved. If it is a join
- * with only a single expression then run the epsilon op now. */
- if ( joinOrLm->type == JoinOrLm::JoinType && joinOrLm->join->exprList.length() == 1 )
- rtnVal->epsilonOp();
-
- /* We can now unset entry points that are not longer used. */
- pd->unsetObsoleteEntries( rtnVal );
-
- /* If the name of the variable is referenced then add the entry point to
- * the graph. */
- if ( pd->curNameInst->numRefs > 0 )
- rtnVal->setEntry( pd->curNameInst->id, rtnVal->startState );
-
- /* Pop the name scope. */
- pd->popNameScope( nameFrame );
- return rtnVal;
-}
-
-void VarDef::makeNameTree( const InputLoc &loc, ParseData *pd )
-{
- /* The variable definition enters a new scope. */
- NameInst *prevNameInst = pd->curNameInst;
- pd->curNameInst = pd->addNameInst( loc, name, false );
-
- if ( joinOrLm->type == JoinOrLm::LongestMatchType )
- pd->curNameInst->isLongestMatch = true;
-
- /* Recurse. */
- joinOrLm->makeNameTree( pd );
-
- /* The name scope ends, pop the name instantiation. */
- pd->curNameInst = prevNameInst;
-}
-
-void VarDef::resolveNameRefs( ParseData *pd )
-{
- /* Entering into a new scope. */
- NameFrame nameFrame = pd->enterNameScope( true, 1 );
-
- /* Recurse. */
- joinOrLm->resolveNameRefs( pd );
-
- /* The name scope ends, pop the name instantiation. */
- pd->popNameScope( nameFrame );
-}
-
-InputLoc LongestMatchPart::getLoc()
-{
- return action != 0 ? action->loc : semiLoc;
-}
-
-/*
- * If there are any LMs then all of the following entry points must reset
- * tokstart:
- *
- * 1. fentry(StateRef)
- * 2. ftoto(StateRef), fcall(StateRef), fnext(StateRef)
- * 3. targt of any transition that has an fcall (the return loc).
- * 4. start state of all longest match routines.
- */
-
-Action *LongestMatch::newAction( ParseData *pd, const InputLoc &loc,
- const char *name, InlineList *inlineList )
-{
- Action *action = new Action( loc, name, inlineList, pd->nextCondId++ );
- action->actionRefs.append( pd->curNameInst );
- pd->actionList.append( action );
- action->isLmAction = true;
- return action;
-}
-
-void LongestMatch::makeActions( ParseData *pd )
-{
- /* Make actions that set the action id. */
- for ( LmPartList::Iter lmi = *longestMatchList; lmi.lte(); lmi++ ) {
- /* For each part create actions for setting the match type. We need
- * to do this so that the actions will go into the actionIndex. */
- InlineList *inlineList = new InlineList;
- inlineList->append( new InlineItem( lmi->getLoc(), this, lmi, InlineItem::LmSetActId ) );
- char *actName = new char[50];
- sprintf( actName, "store%i", lmi->longestMatchId );
- lmi->setActId = newAction( pd, lmi->getLoc(), actName, inlineList );
- }
-
- /* Make actions that execute the user action and restart on the last character. */
- for ( LmPartList::Iter lmi = *longestMatchList; lmi.lte(); lmi++ ) {
- /* For each part create actions for setting the match type. We need
- * to do this so that the actions will go into the actionIndex. */
- InlineList *inlineList = new InlineList;
- inlineList->append( new InlineItem( lmi->getLoc(), this, lmi,
- InlineItem::LmOnLast ) );
- char *actName = new char[50];
- sprintf( actName, "imm%i", lmi->longestMatchId );
- lmi->actOnLast = newAction( pd, lmi->getLoc(), actName, inlineList );
- }
-
- /* Make actions that execute the user action and restart on the next
- * character. These actions will set tokend themselves (it is the current
- * char). */
- for ( LmPartList::Iter lmi = *longestMatchList; lmi.lte(); lmi++ ) {
- /* For each part create actions for setting the match type. We need
- * to do this so that the actions will go into the actionIndex. */
- InlineList *inlineList = new InlineList;
- inlineList->append( new InlineItem( lmi->getLoc(), this, lmi,
- InlineItem::LmOnNext ) );
- char *actName = new char[50];
- sprintf( actName, "lagh%i", lmi->longestMatchId );
- lmi->actOnNext = newAction( pd, lmi->getLoc(), actName, inlineList );
- }
-
- /* Make actions that execute the user action and restart at tokend. These
- * actions execute some time after matching the last char. */
- for ( LmPartList::Iter lmi = *longestMatchList; lmi.lte(); lmi++ ) {
- /* For each part create actions for setting the match type. We need
- * to do this so that the actions will go into the actionIndex. */
- InlineList *inlineList = new InlineList;
- inlineList->append( new InlineItem( lmi->getLoc(), this, lmi,
- InlineItem::LmOnLagBehind ) );
- char *actName = new char[50];
- sprintf( actName, "lag%i", lmi->longestMatchId );
- lmi->actLagBehind = newAction( pd, lmi->getLoc(), actName, inlineList );
- }
-
- InputLoc loc;
- loc.line = 1;
- loc.col = 1;
-
- /* Create the error action. */
- InlineList *il6 = new InlineList;
- il6->append( new InlineItem( loc, this, 0, InlineItem::LmSwitch ) );
- lmActSelect = newAction( pd, loc, "lagsel", il6 );
-}
-
-void LongestMatch::findName( ParseData *pd )
-{
- NameInst *nameInst = pd->curNameInst;
- while ( nameInst->name == 0 ) {
- nameInst = nameInst->parent;
- /* Since every machine must must have a name, we should always find a
- * name for the longest match. */
- assert( nameInst != 0 );
- }
- name = nameInst->name;
-}
-
-void LongestMatch::makeNameTree( ParseData *pd )
-{
- /* Create an anonymous scope for the longest match. Will be used for
- * restarting machine after matching a token. */
- NameInst *prevNameInst = pd->curNameInst;
- pd->curNameInst = pd->addNameInst( loc, 0, false );
-
- /* Recurse into all parts of the longest match operator. */
- for ( LmPartList::Iter lmi = *longestMatchList; lmi.lte(); lmi++ )
- lmi->join->makeNameTree( pd );
-
- /* Traverse the name tree upwards to find a name for this lm. */
- findName( pd );
-
- /* Also make the longest match's actions at this point. */
- makeActions( pd );
-
- /* The name scope ends, pop the name instantiation. */
- pd->curNameInst = prevNameInst;
-}
-
-void LongestMatch::resolveNameRefs( ParseData *pd )
-{
- /* The longest match gets its own name scope. */
- NameFrame nameFrame = pd->enterNameScope( true, 1 );
-
- /* Take an action reference for each longest match item and recurse. */
- for ( LmPartList::Iter lmi = *longestMatchList; lmi.lte(); lmi++ ) {
- /* Record the reference if the item has an action. */
- if ( lmi->action != 0 )
- lmi->action->actionRefs.append( pd->localNameScope );
-
- /* Recurse down the join. */
- lmi->join->resolveNameRefs( pd );
- }
-
- /* The name scope ends, pop the name instantiation. */
- pd->popNameScope( nameFrame );
-}
-
-void LongestMatch::restart( FsmAp *graph, TransAp *trans )
-{
- StateAp *fromState = trans->fromState;
- graph->detachTrans( fromState, trans->toState, trans );
- graph->attachTrans( fromState, graph->startState, trans );
-}
-
-void LongestMatch::runLonestMatch( ParseData *pd, FsmAp *graph )
-{
- graph->markReachableFromHereStopFinal( graph->startState );
- for ( StateList::Iter ms = graph->stateList; ms.lte(); ms++ ) {
- if ( ms->stateBits & SB_ISMARKED ) {
- ms->lmItemSet.insert( 0 );
- ms->stateBits &= ~ SB_ISMARKED;
- }
- }
-
- /* Transfer the first item of non-empty lmAction tables to the item sets
- * of the states that follow. Exclude states that have no transitions out.
- * This must happen on a separate pass so that on each iteration of the
- * next pass we have the item set entries from all lmAction tables. */
- for ( StateList::Iter st = graph->stateList; st.lte(); st++ ) {
- for ( TransList::Iter trans = st->outList; trans.lte(); trans++ ) {
- if ( trans->lmActionTable.length() > 0 ) {
- LmActionTableEl *lmAct = trans->lmActionTable.data;
- StateAp *toState = trans->toState;
- assert( toState );
-
- /* Check if there are transitions out, this may be a very
- * close approximation? Out transitions going nowhere?
- * FIXME: Check. */
- if ( toState->outList.length() > 0 ) {
- /* Fill the item sets. */
- graph->markReachableFromHereStopFinal( toState );
- for ( StateList::Iter ms = graph->stateList; ms.lte(); ms++ ) {
- if ( ms->stateBits & SB_ISMARKED ) {
- ms->lmItemSet.insert( lmAct->value );
- ms->stateBits &= ~ SB_ISMARKED;
- }
- }
- }
- }
- }
- }
-
- /* The lmItem sets are now filled, telling us which longest match rules
- * can succeed in which states. First determine if we need to make sure
- * act is defaulted to zero. We need to do this if there are any states
- * with lmItemSet.length() > 1 and NULL is included. That is, that the
- * switch may get called when in fact nothing has been matched. */
- int maxItemSetLength = 0;
- graph->markReachableFromHereStopFinal( graph->startState );
- for ( StateList::Iter ms = graph->stateList; ms.lte(); ms++ ) {
- if ( ms->stateBits & SB_ISMARKED ) {
- if ( ms->lmItemSet.length() > maxItemSetLength )
- maxItemSetLength = ms->lmItemSet.length();
- ms->stateBits &= ~ SB_ISMARKED;
- }
- }
-
- /* The actions executed on starting to match a token. */
- graph->startState->toStateActionTable.setAction( pd->initTokStartOrd, pd->initTokStart );
- graph->startState->fromStateActionTable.setAction( pd->setTokStartOrd, pd->setTokStart );
- if ( maxItemSetLength > 1 ) {
- /* The longest match action switch may be called when tokens are
- * matched, in which case act must be initialized, there must be a
- * case to handle the error, and the generated machine will require an
- * error state. */
- lmSwitchHandlesError = true;
- pd->lmRequiresErrorState = true;
- graph->startState->toStateActionTable.setAction( pd->initActIdOrd, pd->initActId );
- }
-
- /* The place to store transitions to restart. It maybe possible for the
- * restarting to affect the searching through the graph that follows. For
- * now take the safe route and save the list of transitions to restart
- * until after all searching is done. */
- Vector<TransAp*> restartTrans;
-
- /* Set actions that do immediate token recognition, set the longest match part
- * id and set the token ending. */
- for ( StateList::Iter st = graph->stateList; st.lte(); st++ ) {
- for ( TransList::Iter trans = st->outList; trans.lte(); trans++ ) {
- if ( trans->lmActionTable.length() > 0 ) {
- LmActionTableEl *lmAct = trans->lmActionTable.data;
- StateAp *toState = trans->toState;
- assert( toState );
-
- /* Check if there are transitions out, this may be a very
- * close approximation? Out transitions going nowhere?
- * FIXME: Check. */
- if ( toState->outList.length() == 0 ) {
- /* Can execute the immediate action for the longest match
- * part. Redirect the action to the start state. */
- trans->actionTable.setAction( lmAct->key,
- lmAct->value->actOnLast );
- restartTrans.append( trans );
- }
- else {
- /* Look for non final states that have a non-empty item
- * set. If these are present then we need to record the
- * end of the token. Also Find the highest item set
- * length reachable from here (excluding at transtions to
- * final states). */
- bool nonFinalNonEmptyItemSet = false;
- maxItemSetLength = 0;
- graph->markReachableFromHereStopFinal( toState );
- for ( StateList::Iter ms = graph->stateList; ms.lte(); ms++ ) {
- if ( ms->stateBits & SB_ISMARKED ) {
- if ( ms->lmItemSet.length() > 0 && !ms->isFinState() )
- nonFinalNonEmptyItemSet = true;
- if ( ms->lmItemSet.length() > maxItemSetLength )
- maxItemSetLength = ms->lmItemSet.length();
- ms->stateBits &= ~ SB_ISMARKED;
- }
- }
-
- /* If there are reachable states that are not final and
- * have non empty item sets or that have an item set
- * length greater than one then we need to set tokend
- * because the error action that matches the token will
- * require it. */
- if ( nonFinalNonEmptyItemSet || maxItemSetLength > 1 )
- trans->actionTable.setAction( pd->setTokEndOrd, pd->setTokEnd );
-
- /* Some states may not know which longest match item to
- * execute, must set it. */
- if ( maxItemSetLength > 1 ) {
- /* There are transitions out, another match may come. */
- trans->actionTable.setAction( lmAct->key,
- lmAct->value->setActId );
- }
- }
- }
- }
- }
-
- /* Now that all graph searching is done it certainly safe set the
- * restarting. It may be safe above, however this must be verified. */
- for ( Vector<TransAp*>::Iter pt = restartTrans; pt.lte(); pt++ )
- restart( graph, *pt );
-
- int lmErrActionOrd = pd->curActionOrd++;
-
- /* Embed the error for recognizing a char. */
- for ( StateList::Iter st = graph->stateList; st.lte(); st++ ) {
- if ( st->lmItemSet.length() == 1 && st->lmItemSet[0] != 0 ) {
- if ( st->isFinState() ) {
- /* On error execute the onActNext action, which knows that
- * the last character of the token was one back and restart. */
- graph->setErrorTarget( st, graph->startState, &lmErrActionOrd,
- &st->lmItemSet[0]->actOnNext, 1 );
- }
- else {
- graph->setErrorTarget( st, graph->startState, &lmErrActionOrd,
- &st->lmItemSet[0]->actLagBehind, 1 );
- }
- }
- else if ( st->lmItemSet.length() > 1 ) {
- /* Need to use the select. Take note of the which items the select
- * is needed for so only the necessary actions are included. */
- for ( LmItemSet::Iter plmi = st->lmItemSet; plmi.lte(); plmi++ ) {
- if ( *plmi != 0 )
- (*plmi)->inLmSelect = true;
- }
- /* On error, execute the action select and go to the start state. */
- graph->setErrorTarget( st, graph->startState, &lmErrActionOrd,
- &lmActSelect, 1 );
- }
- }
-
- /* Finally, the start state should be made final. */
- graph->setFinState( graph->startState );
-}
-
-FsmAp *LongestMatch::walk( ParseData *pd )
-{
- /* The longest match has it's own name scope. */
- NameFrame nameFrame = pd->enterNameScope( true, 1 );
-
- /* Make each part of the longest match. */
- FsmAp **parts = new FsmAp*[longestMatchList->length()];
- LmPartList::Iter lmi = *longestMatchList;
- for ( int i = 0; lmi.lte(); lmi++, i++ ) {
- /* Create the machine and embed the setting of the longest match id. */
- parts[i] = lmi->join->walk( pd );
- parts[i]->longMatchAction( pd->curActionOrd++, lmi );
- }
-
- /* Union machines one and up with machine zero. The grammar dictates that
- * there will always be at least one part. */
- FsmAp *rtnVal = parts[0];
- for ( int i = 1; i < longestMatchList->length(); i++ ) {
- rtnVal->unionOp( parts[i] );
- afterOpMinimize( rtnVal );
- }
-
- runLonestMatch( pd, rtnVal );
-
- /* Pop the name scope. */
- pd->popNameScope( nameFrame );
-
- delete[] parts;
- return rtnVal;
-}
-
-FsmAp *JoinOrLm::walk( ParseData *pd )
-{
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case JoinType:
- rtnVal = join->walk( pd );
- break;
- case LongestMatchType:
- rtnVal = longestMatch->walk( pd );
- break;
- }
- return rtnVal;
-}
-
-void JoinOrLm::makeNameTree( ParseData *pd )
-{
- switch ( type ) {
- case JoinType:
- join->makeNameTree( pd );
- break;
- case LongestMatchType:
- longestMatch->makeNameTree( pd );
- break;
- }
-}
-
-void JoinOrLm::resolveNameRefs( ParseData *pd )
-{
- switch ( type ) {
- case JoinType:
- join->resolveNameRefs( pd );
- break;
- case LongestMatchType:
- longestMatch->resolveNameRefs( pd );
- break;
- }
-}
-
-
-/* Construct with a location and the first expression. */
-Join::Join( const InputLoc &loc, Expression *expr )
-:
- loc(loc)
-{
- exprList.append( expr );
-}
-
-/* Construct with a location and the first expression. */
-Join::Join( Expression *expr )
-:
- loc(loc)
-{
- exprList.append( expr );
-}
-
-/* Walk an expression node. */
-FsmAp *Join::walk( ParseData *pd )
-{
- if ( exprList.length() > 1 )
- return walkJoin( pd );
- else
- return exprList.head->walk( pd );
-}
-
-/* There is a list of expressions to join. */
-FsmAp *Join::walkJoin( ParseData *pd )
-{
- /* We enter into a new name scope. */
- NameFrame nameFrame = pd->enterNameScope( true, 1 );
-
- /* Evaluate the machines. */
- FsmAp **fsms = new FsmAp*[exprList.length()];
- ExprList::Iter expr = exprList;
- for ( int e = 0; e < exprList.length(); e++, expr++ )
- fsms[e] = expr->walk( pd );
-
- /* Get the start and final names. Final is
- * guaranteed to exist, start is not. */
- NameInst *startName = pd->curNameInst->start;
- NameInst *finalName = pd->curNameInst->final;
-
- int startId = -1;
- if ( startName != 0 ) {
- /* Take note that there was an implicit link to the start machine. */
- pd->localNameScope->referencedNames.append( startName );
- startId = startName->id;
- }
-
- /* A final id of -1 indicates there is no epsilon that references the
- * final state, therefor do not create one or set an entry point to it. */
- int finalId = -1;
- if ( finalName->numRefs > 0 )
- finalId = finalName->id;
-
- /* Join machines 1 and up onto machine 0. */
- FsmAp *retFsm = fsms[0];
- retFsm->joinOp( startId, finalId, fsms+1, exprList.length()-1 );
-
- /* We can now unset entry points that are not longer used. */
- pd->unsetObsoleteEntries( retFsm );
-
- /* Pop the name scope. */
- pd->popNameScope( nameFrame );
-
- delete[] fsms;
- return retFsm;
-}
-
-void Join::makeNameTree( ParseData *pd )
-{
- if ( exprList.length() > 1 ) {
- /* Create the new anonymous scope. */
- NameInst *prevNameInst = pd->curNameInst;
- pd->curNameInst = pd->addNameInst( loc, 0, false );
-
- /* Join scopes need an implicit "final" target. */
- pd->curNameInst->final = new NameInst( InputLoc(), pd->curNameInst, "final",
- pd->nextNameId++, false );
-
- /* Recurse into all expressions in the list. */
- for ( ExprList::Iter expr = exprList; expr.lte(); expr++ )
- expr->makeNameTree( pd );
-
- /* The name scope ends, pop the name instantiation. */
- pd->curNameInst = prevNameInst;
- }
- else {
- /* Recurse into the single expression. */
- exprList.head->makeNameTree( pd );
- }
-}
-
-
-void Join::resolveNameRefs( ParseData *pd )
-{
- /* Branch on whether or not there is to be a join. */
- if ( exprList.length() > 1 ) {
- /* The variable definition enters a new scope. */
- NameFrame nameFrame = pd->enterNameScope( true, 1 );
-
- /* The join scope must contain a start label. */
- NameSet resolved = pd->resolvePart( pd->localNameScope, "start", true );
- if ( resolved.length() > 0 ) {
- /* Take the first. */
- pd->curNameInst->start = resolved[0];
- if ( resolved.length() > 1 ) {
- /* Complain about the multiple references. */
- error(loc) << "multiple start labels" << endl;
- errorStateLabels( resolved );
- }
- }
-
- /* Make sure there is a start label. */
- if ( pd->curNameInst->start != 0 ) {
- /* There is an implicit reference to start name. */
- pd->curNameInst->start->numRefs += 1;
- }
- else {
- /* No start label. Complain and recover by adding a label to the
- * adding one. Recover ignoring the problem. */
- error(loc) << "no start label" << endl;
- }
-
- /* Recurse into all expressions in the list. */
- for ( ExprList::Iter expr = exprList; expr.lte(); expr++ )
- expr->resolveNameRefs( pd );
-
- /* The name scope ends, pop the name instantiation. */
- pd->popNameScope( nameFrame );
- }
- else {
- /* Recurse into the single expression. */
- exprList.head->resolveNameRefs( pd );
- }
-}
-
-/* Clean up after an expression node. */
-Expression::~Expression()
-{
- switch ( type ) {
- case OrType: case IntersectType: case SubtractType:
- case StrongSubtractType:
- delete expression;
- delete term;
- break;
- case TermType:
- delete term;
- break;
- case BuiltinType:
- break;
- }
-}
-
-/* Evaluate a single expression node. */
-FsmAp *Expression::walk( ParseData *pd, bool lastInSeq )
-{
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case OrType: {
- /* Evaluate the expression. */
- rtnVal = expression->walk( pd, false );
- /* Evaluate the term. */
- FsmAp *rhs = term->walk( pd );
- /* Perform union. */
- rtnVal->unionOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case IntersectType: {
- /* Evaluate the expression. */
- rtnVal = expression->walk( pd );
- /* Evaluate the term. */
- FsmAp *rhs = term->walk( pd );
- /* Perform intersection. */
- rtnVal->intersectOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case SubtractType: {
- /* Evaluate the expression. */
- rtnVal = expression->walk( pd );
- /* Evaluate the term. */
- FsmAp *rhs = term->walk( pd );
- /* Perform subtraction. */
- rtnVal->subtractOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case StrongSubtractType: {
- /* Evaluate the expression. */
- rtnVal = expression->walk( pd );
-
- /* Evaluate the term and pad it with any* machines. */
- FsmAp *rhs = dotStarFsm( pd );
- FsmAp *termFsm = term->walk( pd );
- FsmAp *trailAnyStar = dotStarFsm( pd );
- rhs->concatOp( termFsm );
- rhs->concatOp( trailAnyStar );
-
- /* Perform subtraction. */
- rtnVal->subtractOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case TermType: {
- /* Return result of the term. */
- rtnVal = term->walk( pd );
- break;
- }
- case BuiltinType: {
- /* Duplicate the builtin. */
- rtnVal = makeBuiltin( builtin, pd );
- break;
- }
- }
-
- return rtnVal;
-}
-
-void Expression::makeNameTree( ParseData *pd )
-{
- switch ( type ) {
- case OrType:
- case IntersectType:
- case SubtractType:
- case StrongSubtractType:
- expression->makeNameTree( pd );
- term->makeNameTree( pd );
- break;
- case TermType:
- term->makeNameTree( pd );
- break;
- case BuiltinType:
- break;
- }
-}
-
-void Expression::resolveNameRefs( ParseData *pd )
-{
- switch ( type ) {
- case OrType:
- case IntersectType:
- case SubtractType:
- case StrongSubtractType:
- expression->resolveNameRefs( pd );
- term->resolveNameRefs( pd );
- break;
- case TermType:
- term->resolveNameRefs( pd );
- break;
- case BuiltinType:
- break;
- }
-}
-
-/* Clean up after a term node. */
-Term::~Term()
-{
- switch ( type ) {
- case ConcatType:
- case RightStartType:
- case RightFinishType:
- case LeftType:
- delete term;
- delete factorWithAug;
- break;
- case FactorWithAugType:
- delete factorWithAug;
- break;
- }
-}
-
-/* Evaluate a term node. */
-FsmAp *Term::walk( ParseData *pd, bool lastInSeq )
-{
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case ConcatType: {
- /* Evaluate the Term. */
- rtnVal = term->walk( pd, false );
- /* Evaluate the FactorWithRep. */
- FsmAp *rhs = factorWithAug->walk( pd );
- /* Perform concatenation. */
- rtnVal->concatOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case RightStartType: {
- /* Evaluate the Term. */
- rtnVal = term->walk( pd );
-
- /* Evaluate the FactorWithRep. */
- FsmAp *rhs = factorWithAug->walk( pd );
-
- /* Set up the priority descriptors. The left machine gets the
- * lower priority where as the right get the higher start priority. */
- priorDescs[0].key = pd->nextPriorKey++;
- priorDescs[0].priority = 0;
- rtnVal->allTransPrior( pd->curPriorOrd++, &priorDescs[0] );
-
- /* The start transitions right machine get the higher priority.
- * Use the same unique key. */
- priorDescs[1].key = priorDescs[0].key;
- priorDescs[1].priority = 1;
- rhs->startFsmPrior( pd->curPriorOrd++, &priorDescs[1] );
-
- /* Perform concatenation. */
- rtnVal->concatOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case RightFinishType: {
- /* Evaluate the Term. */
- rtnVal = term->walk( pd );
-
- /* Evaluate the FactorWithRep. */
- FsmAp *rhs = factorWithAug->walk( pd );
-
- /* Set up the priority descriptors. The left machine gets the
- * lower priority where as the finishing transitions to the right
- * get the higher priority. */
- priorDescs[0].key = pd->nextPriorKey++;
- priorDescs[0].priority = 0;
- rtnVal->allTransPrior( pd->curPriorOrd++, &priorDescs[0] );
-
- /* The finishing transitions of the right machine get the higher
- * priority. Use the same unique key. */
- priorDescs[1].key = priorDescs[0].key;
- priorDescs[1].priority = 1;
- rhs->finishFsmPrior( pd->curPriorOrd++, &priorDescs[1] );
-
- /* Perform concatenation. */
- rtnVal->concatOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case LeftType: {
- /* Evaluate the Term. */
- rtnVal = term->walk( pd );
-
- /* Evaluate the FactorWithRep. */
- FsmAp *rhs = factorWithAug->walk( pd );
-
- /* Set up the priority descriptors. The left machine gets the
- * higher priority. */
- priorDescs[0].key = pd->nextPriorKey++;
- priorDescs[0].priority = 1;
- rtnVal->allTransPrior( pd->curPriorOrd++, &priorDescs[0] );
-
- /* The right machine gets the lower priority. Since
- * startTransPrior might unnecessarily increase the number of
- * states during the state machine construction process (due to
- * isolation), we use allTransPrior instead, which has the same
- * effect. */
- priorDescs[1].key = priorDescs[0].key;
- priorDescs[1].priority = 0;
- rhs->allTransPrior( pd->curPriorOrd++, &priorDescs[1] );
-
- /* Perform concatenation. */
- rtnVal->concatOp( rhs );
- afterOpMinimize( rtnVal, lastInSeq );
- break;
- }
- case FactorWithAugType: {
- rtnVal = factorWithAug->walk( pd );
- break;
- }
- }
- return rtnVal;
-}
-
-void Term::makeNameTree( ParseData *pd )
-{
- switch ( type ) {
- case ConcatType:
- case RightStartType:
- case RightFinishType:
- case LeftType:
- term->makeNameTree( pd );
- factorWithAug->makeNameTree( pd );
- break;
- case FactorWithAugType:
- factorWithAug->makeNameTree( pd );
- break;
- }
-}
-
-void Term::resolveNameRefs( ParseData *pd )
-{
- switch ( type ) {
- case ConcatType:
- case RightStartType:
- case RightFinishType:
- case LeftType:
- term->resolveNameRefs( pd );
- factorWithAug->resolveNameRefs( pd );
- break;
- case FactorWithAugType:
- factorWithAug->resolveNameRefs( pd );
- break;
- }
-}
-
-/* Clean up after a factor with augmentation node. */
-FactorWithAug::~FactorWithAug()
-{
- delete factorWithRep;
-
- /* Walk the vector of parser actions, deleting function names. */
-
- /* Clean up priority descriptors. */
- if ( priorDescs != 0 )
- delete[] priorDescs;
-}
-
-void FactorWithAug::assignActions( ParseData *pd, FsmAp *graph, int *actionOrd )
-{
- /* Assign actions. */
- for ( int i = 0; i < actions.length(); i++ ) {
- switch ( actions[i].type ) {
- /* Transition actions. */
- case at_start:
- graph->startFsmAction( actionOrd[i], actions[i].action );
- afterOpMinimize( graph );
- break;
- case at_all:
- graph->allTransAction( actionOrd[i], actions[i].action );
- break;
- case at_finish:
- graph->finishFsmAction( actionOrd[i], actions[i].action );
- break;
- case at_leave:
- graph->leaveFsmAction( actionOrd[i], actions[i].action );
- break;
-
- /* Global error actions. */
- case at_start_gbl_error:
- graph->startErrorAction( actionOrd[i], actions[i].action, 0 );
- afterOpMinimize( graph );
- break;
- case at_all_gbl_error:
- graph->allErrorAction( actionOrd[i], actions[i].action, 0 );
- break;
- case at_final_gbl_error:
- graph->finalErrorAction( actionOrd[i], actions[i].action, 0 );
- break;
- case at_not_start_gbl_error:
- graph->notStartErrorAction( actionOrd[i], actions[i].action, 0 );
- break;
- case at_not_final_gbl_error:
- graph->notFinalErrorAction( actionOrd[i], actions[i].action, 0 );
- break;
- case at_middle_gbl_error:
- graph->middleErrorAction( actionOrd[i], actions[i].action, 0 );
- break;
-
- /* Local error actions. */
- case at_start_local_error:
- graph->startErrorAction( actionOrd[i], actions[i].action,
- actions[i].localErrKey );
- afterOpMinimize( graph );
- break;
- case at_all_local_error:
- graph->allErrorAction( actionOrd[i], actions[i].action,
- actions[i].localErrKey );
- break;
- case at_final_local_error:
- graph->finalErrorAction( actionOrd[i], actions[i].action,
- actions[i].localErrKey );
- break;
- case at_not_start_local_error:
- graph->notStartErrorAction( actionOrd[i], actions[i].action,
- actions[i].localErrKey );
- break;
- case at_not_final_local_error:
- graph->notFinalErrorAction( actionOrd[i], actions[i].action,
- actions[i].localErrKey );
- break;
- case at_middle_local_error:
- graph->middleErrorAction( actionOrd[i], actions[i].action,
- actions[i].localErrKey );
- break;
-
- /* EOF actions. */
- case at_start_eof:
- graph->startEOFAction( actionOrd[i], actions[i].action );
- afterOpMinimize( graph );
- break;
- case at_all_eof:
- graph->allEOFAction( actionOrd[i], actions[i].action );
- break;
- case at_final_eof:
- graph->finalEOFAction( actionOrd[i], actions[i].action );
- break;
- case at_not_start_eof:
- graph->notStartEOFAction( actionOrd[i], actions[i].action );
- break;
- case at_not_final_eof:
- graph->notFinalEOFAction( actionOrd[i], actions[i].action );
- break;
- case at_middle_eof:
- graph->middleEOFAction( actionOrd[i], actions[i].action );
- break;
-
- /* To State Actions. */
- case at_start_to_state:
- graph->startToStateAction( actionOrd[i], actions[i].action );
- afterOpMinimize( graph );
- break;
- case at_all_to_state:
- graph->allToStateAction( actionOrd[i], actions[i].action );
- break;
- case at_final_to_state:
- graph->finalToStateAction( actionOrd[i], actions[i].action );
- break;
- case at_not_start_to_state:
- graph->notStartToStateAction( actionOrd[i], actions[i].action );
- break;
- case at_not_final_to_state:
- graph->notFinalToStateAction( actionOrd[i], actions[i].action );
- break;
- case at_middle_to_state:
- graph->middleToStateAction( actionOrd[i], actions[i].action );
- break;
-
- /* From State Actions. */
- case at_start_from_state:
- graph->startFromStateAction( actionOrd[i], actions[i].action );
- afterOpMinimize( graph );
- break;
- case at_all_from_state:
- graph->allFromStateAction( actionOrd[i], actions[i].action );
- break;
- case at_final_from_state:
- graph->finalFromStateAction( actionOrd[i], actions[i].action );
- break;
- case at_not_start_from_state:
- graph->notStartFromStateAction( actionOrd[i], actions[i].action );
- break;
- case at_not_final_from_state:
- graph->notFinalFromStateAction( actionOrd[i], actions[i].action );
- break;
- case at_middle_from_state:
- graph->middleFromStateAction( actionOrd[i], actions[i].action );
- break;
-
- /* Remaining cases, prevented by the parser. */
- default:
- assert( false );
- break;
- }
- }
-}
-
-void FactorWithAug::assignPriorities( FsmAp *graph, int *priorOrd )
-{
- /* Assign priorities. */
- for ( int i = 0; i < priorityAugs.length(); i++ ) {
- switch ( priorityAugs[i].type ) {
- case at_start:
- graph->startFsmPrior( priorOrd[i], &priorDescs[i]);
- /* Start fsm priorities are a special case that may require
- * minimization afterwards. */
- afterOpMinimize( graph );
- break;
- case at_all:
- graph->allTransPrior( priorOrd[i], &priorDescs[i] );
- break;
- case at_finish:
- graph->finishFsmPrior( priorOrd[i], &priorDescs[i] );
- break;
- case at_leave:
- graph->leaveFsmPrior( priorOrd[i], &priorDescs[i] );
- break;
-
- default:
- /* Parser Prevents this case. */
- break;
- }
- }
-}
-
-void FactorWithAug::assignConditions( FsmAp *graph )
-{
- for ( int i = 0; i < conditions.length(); i++ ) {
- switch ( conditions[i].type ) {
- /* Transition actions. */
- case at_start:
- graph->startFsmCondition( conditions[i].action );
- afterOpMinimize( graph );
- break;
- case at_all:
- graph->allTransCondition( conditions[i].action );
- break;
- case at_leave:
- graph->leaveFsmCondition( conditions[i].action );
- break;
- default:
- break;
- }
- }
-}
-
-
-/* Evaluate a factor with augmentation node. */
-FsmAp *FactorWithAug::walk( ParseData *pd )
-{
- /* Enter into the scopes created for the labels. */
- NameFrame nameFrame = pd->enterNameScope( false, labels.length() );
-
- /* Make the array of function orderings. */
- int *actionOrd = 0;
- if ( actions.length() > 0 )
- actionOrd = new int[actions.length()];
-
- /* First walk the list of actions, assigning order to all starting
- * actions. */
- for ( int i = 0; i < actions.length(); i++ ) {
- if ( actions[i].type == at_start ||
- actions[i].type == at_start_gbl_error ||
- actions[i].type == at_start_local_error ||
- actions[i].type == at_start_to_state ||
- actions[i].type == at_start_from_state ||
- actions[i].type == at_start_eof )
- actionOrd[i] = pd->curActionOrd++;
- }
-
- /* Evaluate the factor with repetition. */
- FsmAp *rtnVal = factorWithRep->walk( pd );
-
- /* Compute the remaining action orderings. */
- for ( int i = 0; i < actions.length(); i++ ) {
- if ( actions[i].type != at_start &&
- actions[i].type != at_start_gbl_error &&
- actions[i].type != at_start_local_error &&
- actions[i].type != at_start_to_state &&
- actions[i].type != at_start_from_state &&
- actions[i].type != at_start_eof )
- actionOrd[i] = pd->curActionOrd++;
- }
-
- /* Embed conditions. */
- assignConditions( rtnVal );
-
- /* Embed actions. */
- assignActions( pd, rtnVal , actionOrd );
-
- /* Make the array of priority orderings. Orderings are local to this walk
- * of the factor with augmentation. */
- int *priorOrd = 0;
- if ( priorityAugs.length() > 0 )
- priorOrd = new int[priorityAugs.length()];
-
- /* Walk all priorities, assigning the priority ordering. */
- for ( int i = 0; i < priorityAugs.length(); i++ )
- priorOrd[i] = pd->curPriorOrd++;
-
- /* If the priority descriptors have not been made, make them now. Make
- * priority descriptors for each priority asignment that will be passed to
- * the fsm. Used to keep track of the key, value and used bit. */
- if ( priorDescs == 0 && priorityAugs.length() > 0 ) {
- priorDescs = new PriorDesc[priorityAugs.length()];
- for ( int i = 0; i < priorityAugs.length(); i++ ) {
- /* Init the prior descriptor for the priority setting. */
- priorDescs[i].key = priorityAugs[i].priorKey;
- priorDescs[i].priority = priorityAugs[i].priorValue;
- }
- }
-
- /* Assign priorities into the machine. */
- assignPriorities( rtnVal, priorOrd );
-
- /* Assign epsilon transitions. */
- for ( int e = 0; e < epsilonLinks.length(); e++ ) {
- /* Get the name, which may not exist. If it doesn't then silently
- * ignore it because an error has already been reported. */
- NameInst *epTarg = pd->epsilonResolvedLinks[pd->nextEpsilonResolvedLink++];
- if ( epTarg != 0 ) {
- /* Make the epsilon transitions. */
- rtnVal->epsilonTrans( epTarg->id );
-
- /* Note that we have made a link to the name. */
- pd->localNameScope->referencedNames.append( epTarg );
- }
- }
-
- /* Set entry points for labels. */
- if ( labels.length() > 0 ) {
- /* Pop the names. */
- pd->resetNameScope( nameFrame );
-
- /* Make labels that are referenced into entry points. */
- for ( int i = 0; i < labels.length(); i++ ) {
- pd->enterNameScope( false, 1 );
-
- /* Will always be found. */
- NameInst *name = pd->curNameInst;
-
- /* If the name is referenced then set the entry point. */
- if ( name->numRefs > 0 )
- rtnVal->setEntry( name->id, rtnVal->startState );
- }
-
- pd->popNameScope( nameFrame );
- }
-
- if ( priorOrd != 0 )
- delete[] priorOrd;
- if ( actionOrd != 0 )
- delete[] actionOrd;
- return rtnVal;
-}
-
-void FactorWithAug::makeNameTree( ParseData *pd )
-{
- /* Add the labels to the tree of instantiated names. Each label
- * makes a new scope. */
- NameInst *prevNameInst = pd->curNameInst;
- for ( int i = 0; i < labels.length(); i++ )
- pd->curNameInst = pd->addNameInst( labels[i].loc, labels[i].data, true );
-
- /* Recurse, then pop the names. */
- factorWithRep->makeNameTree( pd );
- pd->curNameInst = prevNameInst;
-}
-
-
-void FactorWithAug::resolveNameRefs( ParseData *pd )
-{
- /* Enter into the name scope created by any labels. */
- NameFrame nameFrame = pd->enterNameScope( false, labels.length() );
-
- /* Note action references. */
- for ( int i = 0; i < actions.length(); i++ )
- actions[i].action->actionRefs.append( pd->localNameScope );
-
- /* Recurse first. IMPORTANT: we must do the exact same traversal as when
- * the tree is constructed. */
- factorWithRep->resolveNameRefs( pd );
-
- /* Resolve epsilon transitions. */
- for ( int ep = 0; ep < epsilonLinks.length(); ep++ ) {
- /* Get the link. */
- EpsilonLink &link = epsilonLinks[ep];
- NameInst *resolvedName = 0;
-
- if ( link.target.length() == 1 && strcmp( link.target.data[0], "final" ) == 0 ) {
- /* Epsilon drawn to an implicit final state. An implicit final is
- * only available in join operations. */
- resolvedName = pd->localNameScope->final;
- }
- else {
- /* Do an search for the name. */
- NameSet resolved;
- pd->resolveFrom( resolved, pd->localNameScope, link.target, 0 );
- if ( resolved.length() > 0 ) {
- /* Take the first one. */
- resolvedName = resolved[0];
- if ( resolved.length() > 1 ) {
- /* Complain about the multiple references. */
- error(link.loc) << "state reference " << link.target <<
- " resolves to multiple entry points" << endl;
- errorStateLabels( resolved );
- }
- }
- }
-
- /* This is tricky, we stuff resolved epsilon transitions into one long
- * vector in the parse data structure. Since the name resolution and
- * graph generation both do identical walks of the parse tree we
- * should always find the link resolutions in the right place. */
- pd->epsilonResolvedLinks.append( resolvedName );
-
- if ( resolvedName != 0 ) {
- /* Found the name, bump of the reference count on it. */
- resolvedName->numRefs += 1;
- }
- else {
- /* Complain, no recovery action, the epsilon op will ignore any
- * epsilon transitions whose names did not resolve. */
- error(link.loc) << "could not resolve label " << link.target << endl;
- }
- }
-
- if ( labels.length() > 0 )
- pd->popNameScope( nameFrame );
-}
-
-
-/* Clean up after a factor with repetition node. */
-FactorWithRep::~FactorWithRep()
-{
- switch ( type ) {
- case StarType: case StarStarType: case OptionalType: case PlusType:
- case ExactType: case MaxType: case MinType: case RangeType:
- delete factorWithRep;
- break;
- case FactorWithNegType:
- delete factorWithNeg;
- break;
- }
-}
-
-/* Evaluate a factor with repetition node. */
-FsmAp *FactorWithRep::walk( ParseData *pd )
-{
- FsmAp *retFsm = 0;
-
- switch ( type ) {
- case StarType: {
- /* Evaluate the FactorWithRep. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying kleene star to a machine that "
- "accepts zero length word" << endl;
- }
-
- /* Shift over the start action orders then do the kleene star. */
- pd->curActionOrd += retFsm->shiftStartActionOrder( pd->curActionOrd );
- retFsm->starOp( );
- afterOpMinimize( retFsm );
- break;
- }
- case StarStarType: {
- /* Evaluate the FactorWithRep. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying kleene star to a machine that "
- "accepts zero length word" << endl;
- }
-
- /* Set up the prior descs. All gets priority one, whereas leaving gets
- * priority zero. Make a unique key so that these priorities don't
- * interfere with any priorities set by the user. */
- priorDescs[0].key = pd->nextPriorKey++;
- priorDescs[0].priority = 1;
- retFsm->allTransPrior( pd->curPriorOrd++, &priorDescs[0] );
-
- /* Leaveing gets priority 0. Use same unique key. */
- priorDescs[1].key = priorDescs[0].key;
- priorDescs[1].priority = 0;
- retFsm->leaveFsmPrior( pd->curPriorOrd++, &priorDescs[1] );
-
- /* Shift over the start action orders then do the kleene star. */
- pd->curActionOrd += retFsm->shiftStartActionOrder( pd->curActionOrd );
- retFsm->starOp( );
- afterOpMinimize( retFsm );
- break;
- }
- case OptionalType: {
- /* Make the null fsm. */
- FsmAp *nu = new FsmAp();
- nu->lambdaFsm( );
-
- /* Evaluate the FactorWithRep. */
- retFsm = factorWithRep->walk( pd );
-
- /* Perform the question operator. */
- retFsm->unionOp( nu );
- afterOpMinimize( retFsm );
- break;
- }
- case PlusType: {
- /* Evaluate the FactorWithRep. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying plus operator to a machine that "
- "accpets zero length word" << endl;
- }
-
- /* Need a duplicated for the star end. */
- FsmAp *dup = new FsmAp( *retFsm );
-
- /* The start func orders need to be shifted before doing the star. */
- pd->curActionOrd += dup->shiftStartActionOrder( pd->curActionOrd );
-
- /* Star the duplicate. */
- dup->starOp( );
- afterOpMinimize( dup );
-
- retFsm->concatOp( dup );
- afterOpMinimize( retFsm );
- break;
- }
- case ExactType: {
- /* Get an int from the repetition amount. */
- if ( lowerRep == 0 ) {
- /* No copies. Don't need to evaluate the factorWithRep.
- * This Defeats the purpose so give a warning. */
- warning(loc) << "exactly zero repetitions results "
- "in the null machine" << endl;
-
- retFsm = new FsmAp();
- retFsm->lambdaFsm();
- }
- else {
- /* Evaluate the first FactorWithRep. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying repetition to a machine that "
- "accepts zero length word" << endl;
- }
-
- /* The start func orders need to be shifted before doing the
- * repetition. */
- pd->curActionOrd += retFsm->shiftStartActionOrder( pd->curActionOrd );
-
- /* Do the repetition on the machine. Already guarded against n == 0 */
- retFsm->repeatOp( lowerRep );
- afterOpMinimize( retFsm );
- }
- break;
- }
- case MaxType: {
- /* Get an int from the repetition amount. */
- if ( upperRep == 0 ) {
- /* No copies. Don't need to evaluate the factorWithRep.
- * This Defeats the purpose so give a warning. */
- warning(loc) << "max zero repetitions results "
- "in the null machine" << endl;
-
- retFsm = new FsmAp();
- retFsm->lambdaFsm();
- }
- else {
- /* Evaluate the first FactorWithRep. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying max repetition to a machine that "
- "accepts zero length word" << endl;
- }
-
- /* The start func orders need to be shifted before doing the
- * repetition. */
- pd->curActionOrd += retFsm->shiftStartActionOrder( pd->curActionOrd );
-
- /* Do the repetition on the machine. Already guarded against n == 0 */
- retFsm->optionalRepeatOp( upperRep );
- afterOpMinimize( retFsm );
- }
- break;
- }
- case MinType: {
- /* Evaluate the repeated machine. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying min repetition to a machine that "
- "accepts zero length word" << endl;
- }
-
- /* The start func orders need to be shifted before doing the repetition
- * and the kleene star. */
- pd->curActionOrd += retFsm->shiftStartActionOrder( pd->curActionOrd );
-
- if ( lowerRep == 0 ) {
- /* Acts just like a star op on the machine to return. */
- retFsm->starOp( );
- afterOpMinimize( retFsm );
- }
- else {
- /* Take a duplicate for the plus. */
- FsmAp *dup = new FsmAp( *retFsm );
-
- /* Do repetition on the first half. */
- retFsm->repeatOp( lowerRep );
- afterOpMinimize( retFsm );
-
- /* Star the duplicate. */
- dup->starOp( );
- afterOpMinimize( dup );
-
- /* Tak on the kleene star. */
- retFsm->concatOp( dup );
- afterOpMinimize( retFsm );
- }
- break;
- }
- case RangeType: {
- /* Check for bogus range. */
- if ( upperRep - lowerRep < 0 ) {
- error(loc) << "invalid range repetition" << endl;
-
- /* Return null machine as recovery. */
- retFsm = new FsmAp();
- retFsm->lambdaFsm();
- }
- else if ( lowerRep == 0 && upperRep == 0 ) {
- /* No copies. Don't need to evaluate the factorWithRep. This
- * defeats the purpose so give a warning. */
- warning(loc) << "zero to zero repetitions results "
- "in the null machine" << endl;
-
- retFsm = new FsmAp();
- retFsm->lambdaFsm();
- }
- else {
- /* Now need to evaluate the repeated machine. */
- retFsm = factorWithRep->walk( pd );
- if ( retFsm->startState->isFinState() ) {
- warning(loc) << "applying range repetition to a machine that "
- "accepts zero length word" << endl;
- }
-
- /* The start func orders need to be shifted before doing both kinds
- * of repetition. */
- pd->curActionOrd += retFsm->shiftStartActionOrder( pd->curActionOrd );
-
- if ( lowerRep == 0 ) {
- /* Just doing max repetition. Already guarded against n == 0. */
- retFsm->optionalRepeatOp( upperRep );
- afterOpMinimize( retFsm );
- }
- else if ( lowerRep == upperRep ) {
- /* Just doing exact repetition. Already guarded against n == 0. */
- retFsm->repeatOp( lowerRep );
- afterOpMinimize( retFsm );
- }
- else {
- /* This is the case that 0 < lowerRep < upperRep. Take a
- * duplicate for the optional repeat. */
- FsmAp *dup = new FsmAp( *retFsm );
-
- /* Do repetition on the first half. */
- retFsm->repeatOp( lowerRep );
- afterOpMinimize( retFsm );
-
- /* Do optional repetition on the second half. */
- dup->optionalRepeatOp( upperRep - lowerRep );
- afterOpMinimize( dup );
-
- /* Tak on the duplicate machine. */
- retFsm->concatOp( dup );
- afterOpMinimize( retFsm );
- }
- }
- break;
- }
- case FactorWithNegType: {
- /* Evaluate the Factor. Pass it up. */
- retFsm = factorWithNeg->walk( pd );
- break;
- }}
- return retFsm;
-}
-
-void FactorWithRep::makeNameTree( ParseData *pd )
-{
- switch ( type ) {
- case StarType:
- case StarStarType:
- case OptionalType:
- case PlusType:
- case ExactType:
- case MaxType:
- case MinType:
- case RangeType:
- factorWithRep->makeNameTree( pd );
- break;
- case FactorWithNegType:
- factorWithNeg->makeNameTree( pd );
- break;
- }
-}
-
-void FactorWithRep::resolveNameRefs( ParseData *pd )
-{
- switch ( type ) {
- case StarType:
- case StarStarType:
- case OptionalType:
- case PlusType:
- case ExactType:
- case MaxType:
- case MinType:
- case RangeType:
- factorWithRep->resolveNameRefs( pd );
- break;
- case FactorWithNegType:
- factorWithNeg->resolveNameRefs( pd );
- break;
- }
-}
-
-/* Clean up after a factor with negation node. */
-FactorWithNeg::~FactorWithNeg()
-{
- switch ( type ) {
- case NegateType:
- case CharNegateType:
- delete factorWithNeg;
- break;
- case FactorType:
- delete factor;
- break;
- }
-}
-
-/* Evaluate a factor with negation node. */
-FsmAp *FactorWithNeg::walk( ParseData *pd )
-{
- FsmAp *retFsm = 0;
-
- switch ( type ) {
- case NegateType: {
- /* Evaluate the factorWithNeg. */
- FsmAp *toNegate = factorWithNeg->walk( pd );
-
- /* Negation is subtract from dot-star. */
- retFsm = dotStarFsm( pd );
- retFsm->subtractOp( toNegate );
- afterOpMinimize( retFsm );
- break;
- }
- case CharNegateType: {
- /* Evaluate the factorWithNeg. */
- FsmAp *toNegate = factorWithNeg->walk( pd );
-
- /* CharNegation is subtract from dot. */
- retFsm = dotFsm( pd );
- retFsm->subtractOp( toNegate );
- afterOpMinimize( retFsm );
- break;
- }
- case FactorType: {
- /* Evaluate the Factor. Pass it up. */
- retFsm = factor->walk( pd );
- break;
- }}
- return retFsm;
-}
-
-void FactorWithNeg::makeNameTree( ParseData *pd )
-{
- switch ( type ) {
- case NegateType:
- case CharNegateType:
- factorWithNeg->makeNameTree( pd );
- break;
- case FactorType:
- factor->makeNameTree( pd );
- break;
- }
-}
-
-void FactorWithNeg::resolveNameRefs( ParseData *pd )
-{
- switch ( type ) {
- case NegateType:
- case CharNegateType:
- factorWithNeg->resolveNameRefs( pd );
- break;
- case FactorType:
- factor->resolveNameRefs( pd );
- break;
- }
-}
-
-/* Clean up after a factor node. */
-Factor::~Factor()
-{
- switch ( type ) {
- case LiteralType:
- delete literal;
- break;
- case RangeType:
- delete range;
- break;
- case OrExprType:
- delete reItem;
- break;
- case RegExprType:
- delete regExpr;
- break;
- case ReferenceType:
- break;
- case ParenType:
- delete join;
- break;
- case LongestMatchType:
- delete longestMatch;
- break;
- }
-}
-
-/* Evaluate a factor node. */
-FsmAp *Factor::walk( ParseData *pd )
-{
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case LiteralType:
- rtnVal = literal->walk( pd );
- break;
- case RangeType:
- rtnVal = range->walk( pd );
- break;
- case OrExprType:
- rtnVal = reItem->walk( pd, 0 );
- break;
- case RegExprType:
- rtnVal = regExpr->walk( pd, 0 );
- break;
- case ReferenceType:
- rtnVal = varDef->walk( pd );
- break;
- case ParenType:
- rtnVal = join->walk( pd );
- break;
- case LongestMatchType:
- rtnVal = longestMatch->walk( pd );
- break;
- }
-
- return rtnVal;
-}
-
-void Factor::makeNameTree( ParseData *pd )
-{
- switch ( type ) {
- case LiteralType:
- case RangeType:
- case OrExprType:
- case RegExprType:
- break;
- case ReferenceType:
- varDef->makeNameTree( loc, pd );
- break;
- case ParenType:
- join->makeNameTree( pd );
- break;
- case LongestMatchType:
- longestMatch->makeNameTree( pd );
- break;
- }
-}
-
-void Factor::resolveNameRefs( ParseData *pd )
-{
- switch ( type ) {
- case LiteralType:
- case RangeType:
- case OrExprType:
- case RegExprType:
- break;
- case ReferenceType:
- varDef->resolveNameRefs( pd );
- break;
- case ParenType:
- join->resolveNameRefs( pd );
- break;
- case LongestMatchType:
- longestMatch->resolveNameRefs( pd );
- break;
- }
-}
-
-/* Clean up a range object. Must delete the two literals. */
-Range::~Range()
-{
- delete lowerLit;
- delete upperLit;
-}
-
-/* Evaluate a range. Gets the lower an upper key and makes an fsm range. */
-FsmAp *Range::walk( ParseData *pd )
-{
- /* Construct and verify the suitability of the lower end of the range. */
- FsmAp *lowerFsm = lowerLit->walk( pd );
- if ( !lowerFsm->checkSingleCharMachine() ) {
- error(lowerLit->token.loc) <<
- "bad range lower end, must be a single character" << endl;
- }
-
- /* Construct and verify the upper end. */
- FsmAp *upperFsm = upperLit->walk( pd );
- if ( !upperFsm->checkSingleCharMachine() ) {
- error(upperLit->token.loc) <<
- "bad range upper end, must be a single character" << endl;
- }
-
- /* Grab the keys from the machines, then delete them. */
- Key lowKey = lowerFsm->startState->outList.head->lowKey;
- Key highKey = upperFsm->startState->outList.head->lowKey;
- delete lowerFsm;
- delete upperFsm;
-
- /* Validate the range. */
- if ( lowKey > highKey ) {
- /* Recover by setting upper to lower; */
- error(lowerLit->token.loc) << "lower end of range is greater then upper end" << endl;
- highKey = lowKey;
- }
-
- /* Return the range now that it is validated. */
- FsmAp *retFsm = new FsmAp();
- retFsm->rangeFsm( lowKey, highKey );
- return retFsm;
-}
-
-/* Evaluate a literal object. */
-FsmAp *Literal::walk( ParseData *pd )
-{
- /* FsmAp to return, is the alphabet signed. */
- FsmAp *rtnVal = 0;
-
- switch ( type ) {
- case Number: {
- /* Make the fsm key in int format. */
- Key fsmKey = makeFsmKeyNum( token.data, token.loc, pd );
- /* Make the new machine. */
- rtnVal = new FsmAp();
- rtnVal->concatFsm( fsmKey );
- break;
- }
- case LitString: {
- /* Make the array of keys in int format. */
- Token interp;
- bool caseInsensitive;
- token.prepareLitString( interp, caseInsensitive );
- Key *arr = new Key[interp.length];
- makeFsmKeyArray( arr, interp.data, interp.length, pd );
-
- /* Make the new machine. */
- rtnVal = new FsmAp();
- if ( caseInsensitive )
- rtnVal->concatFsmCI( arr, interp.length );
- else
- rtnVal->concatFsm( arr, interp.length );
- delete[] interp.data;
- delete[] arr;
- break;
- }}
- return rtnVal;
-}
-
-/* Clean up after a regular expression object. */
-RegExpr::~RegExpr()
-{
- switch ( type ) {
- case RecurseItem:
- delete regExpr;
- delete item;
- break;
- case Empty:
- break;
- }
-}
-
-/* Evaluate a regular expression object. */
-FsmAp *RegExpr::walk( ParseData *pd, RegExpr *rootRegex )
-{
- /* This is the root regex, pass down a pointer to this. */
- if ( rootRegex == 0 )
- rootRegex = this;
-
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case RecurseItem: {
- /* Walk both items. */
- rtnVal = regExpr->walk( pd, rootRegex );
- FsmAp *fsm2 = item->walk( pd, rootRegex );
- rtnVal->concatOp( fsm2 );
- break;
- }
- case Empty: {
- rtnVal = new FsmAp();
- rtnVal->lambdaFsm();
- break;
- }
- }
- return rtnVal;
-}
-
-/* Clean up after an item in a regular expression. */
-ReItem::~ReItem()
-{
- switch ( type ) {
- case Data:
- case Dot:
- break;
- case OrBlock:
- case NegOrBlock:
- delete orBlock;
- break;
- }
-}
-
-/* Evaluate a regular expression object. */
-FsmAp *ReItem::walk( ParseData *pd, RegExpr *rootRegex )
-{
- /* The fsm to return, is the alphabet signed? */
- FsmAp *rtnVal = 0;
-
- switch ( type ) {
- case Data: {
- /* Move the data into an integer array and make a concat fsm. */
- Key *arr = new Key[token.length];
- makeFsmKeyArray( arr, token.data, token.length, pd );
-
- /* Make the concat fsm. */
- rtnVal = new FsmAp();
- if ( rootRegex != 0 && rootRegex->caseInsensitive )
- rtnVal->concatFsmCI( arr, token.length );
- else
- rtnVal->concatFsm( arr, token.length );
- delete[] arr;
- break;
- }
- case Dot: {
- /* Make the dot fsm. */
- rtnVal = dotFsm( pd );
- break;
- }
- case OrBlock: {
- /* Get the or block and minmize it. */
- rtnVal = orBlock->walk( pd, rootRegex );
- if ( rtnVal == 0 ) {
- rtnVal = new FsmAp();
- rtnVal->lambdaFsm();
- }
- rtnVal->minimizePartition2();
- break;
- }
- case NegOrBlock: {
- /* Get the or block and minimize it. */
- FsmAp *fsm = orBlock->walk( pd, rootRegex );
- fsm->minimizePartition2();
-
- /* Make a dot fsm and subtract from it. */
- rtnVal = dotFsm( pd );
- rtnVal->subtractOp( fsm );
- rtnVal->minimizePartition2();
- break;
- }
- }
-
- /* If the item is followed by a star, then apply the star op. */
- if ( star ) {
- if ( rtnVal->startState->isFinState() ) {
- warning(loc) << "applying kleene star to a machine that "
- "accpets zero length word" << endl;
- }
-
- rtnVal->starOp();
- rtnVal->minimizePartition2();
- }
- return rtnVal;
-}
-
-/* Clean up after an or block of a regular expression. */
-ReOrBlock::~ReOrBlock()
-{
- switch ( type ) {
- case RecurseItem:
- delete orBlock;
- delete item;
- break;
- case Empty:
- break;
- }
-}
-
-
-/* Evaluate an or block of a regular expression. */
-FsmAp *ReOrBlock::walk( ParseData *pd, RegExpr *rootRegex )
-{
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case RecurseItem: {
- /* Evaluate the two fsm. */
- FsmAp *fsm1 = orBlock->walk( pd, rootRegex );
- FsmAp *fsm2 = item->walk( pd, rootRegex );
- if ( fsm1 == 0 )
- rtnVal = fsm2;
- else {
- fsm1->unionOp( fsm2 );
- rtnVal = fsm1;
- }
- break;
- }
- case Empty: {
- rtnVal = 0;
- break;
- }
- }
- return rtnVal;;
-}
-
-/* Evaluate an or block item of a regular expression. */
-FsmAp *ReOrItem::walk( ParseData *pd, RegExpr *rootRegex )
-{
- /* The return value, is the alphabet signed? */
- FsmAp *rtnVal = 0;
- switch ( type ) {
- case Data: {
- /* Make the or machine. */
- rtnVal = new FsmAp();
-
- /* Put the or data into an array of ints. Note that we find unique
- * keys. Duplicates are silently ignored. The alternative would be to
- * issue warning or an error but since we can't with [a0-9a] or 'a' |
- * 'a' don't bother here. */
- KeySet keySet;
- makeFsmUniqueKeyArray( keySet, token.data, token.length,
- rootRegex != 0 ? rootRegex->caseInsensitive : false, pd );
-
- /* Run the or operator. */
- rtnVal->orFsm( keySet.data, keySet.length() );
- break;
- }
- case Range: {
- /* Make the upper and lower keys. */
- Key lowKey = makeFsmKeyChar( lower, pd );
- Key highKey = makeFsmKeyChar( upper, pd );
-
- /* Validate the range. */
- if ( lowKey > highKey ) {
- /* Recover by setting upper to lower; */
- error(loc) << "lower end of range is greater then upper end" << endl;
- highKey = lowKey;
- }
-
- /* Make the range machine. */
- rtnVal = new FsmAp();
- rtnVal->rangeFsm( lowKey, highKey );
-
- if ( rootRegex != 0 && rootRegex->caseInsensitive ) {
- if ( lowKey <= 'Z' && 'A' <= highKey ) {
- Key otherLow = lowKey < 'A' ? Key('A') : lowKey;
- Key otherHigh = 'Z' < highKey ? Key('Z') : highKey;
-
- otherLow = 'a' + ( otherLow - 'A' );
- otherHigh = 'a' + ( otherHigh - 'A' );
-
- FsmAp *otherRange = new FsmAp();
- otherRange->rangeFsm( otherLow, otherHigh );
- rtnVal->unionOp( otherRange );
- rtnVal->minimizePartition2();
- }
- else if ( lowKey <= 'z' && 'a' <= highKey ) {
- Key otherLow = lowKey < 'a' ? Key('a') : lowKey;
- Key otherHigh = 'z' < highKey ? Key('z') : highKey;
-
- otherLow = 'A' + ( otherLow - 'a' );
- otherHigh = 'A' + ( otherHigh - 'a' );
-
- FsmAp *otherRange = new FsmAp();
- otherRange->rangeFsm( otherLow, otherHigh );
- rtnVal->unionOp( otherRange );
- rtnVal->minimizePartition2();
- }
- }
-
- break;
- }}
- return rtnVal;
-}
diff --git a/contrib/tools/ragel5/ragel/parsetree.h b/contrib/tools/ragel5/ragel/parsetree.h
deleted file mode 100644
index 4f398683a9..0000000000
--- a/contrib/tools/ragel5/ragel/parsetree.h
+++ /dev/null
@@ -1,755 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _PARSETREE_H
-#define _PARSETREE_H
-
-#include "ragel.h"
-#include "avlmap.h"
-#include "bstmap.h"
-#include "vector.h"
-#include "dlist.h"
-
-struct NameInst;
-
-/* Types of builtin machines. */
-enum BuiltinMachine
-{
- BT_Any,
- BT_Ascii,
- BT_Extend,
- BT_Alpha,
- BT_Digit,
- BT_Alnum,
- BT_Lower,
- BT_Upper,
- BT_Cntrl,
- BT_Graph,
- BT_Print,
- BT_Punct,
- BT_Space,
- BT_Xdigit,
- BT_Lambda,
- BT_Empty
-};
-
-
-struct ParseData;
-
-/* Leaf type. */
-struct Literal;
-
-/* Tree nodes. */
-
-struct Term;
-struct FactorWithAug;
-struct FactorWithRep;
-struct FactorWithNeg;
-struct Factor;
-struct Expression;
-struct Join;
-struct JoinOrLm;
-struct LongestMatch;
-struct LongestMatchPart;
-struct LmPartList;
-struct Range;
-
-/* Type of augmentation. Describes locations in the machine. */
-enum AugType
-{
- /* Transition actions/priorities. */
- at_start,
- at_all,
- at_finish,
- at_leave,
-
- /* Global error actions. */
- at_start_gbl_error,
- at_all_gbl_error,
- at_final_gbl_error,
- at_not_start_gbl_error,
- at_not_final_gbl_error,
- at_middle_gbl_error,
-
- /* Local error actions. */
- at_start_local_error,
- at_all_local_error,
- at_final_local_error,
- at_not_start_local_error,
- at_not_final_local_error,
- at_middle_local_error,
-
- /* To State Action embedding. */
- at_start_to_state,
- at_all_to_state,
- at_final_to_state,
- at_not_start_to_state,
- at_not_final_to_state,
- at_middle_to_state,
-
- /* From State Action embedding. */
- at_start_from_state,
- at_all_from_state,
- at_final_from_state,
- at_not_start_from_state,
- at_not_final_from_state,
- at_middle_from_state,
-
- /* EOF Action embedding. */
- at_start_eof,
- at_all_eof,
- at_final_eof,
- at_not_start_eof,
- at_not_final_eof,
- at_middle_eof
-};
-
-/* IMPORTANT: These must follow the same order as the state augs in AugType
- * since we will be using this to compose AugType. */
-enum StateAugType
-{
- sat_start = 0,
- sat_all,
- sat_final,
- sat_not_start,
- sat_not_final,
- sat_middle
-};
-
-struct Action;
-struct PriorDesc;
-struct RegExpr;
-struct ReItem;
-struct ReOrBlock;
-struct ReOrItem;
-struct ExplicitMachine;
-struct InlineItem;
-struct InlineList;
-
-/* Reference to a named state. */
-typedef Vector<char*> NameRef;
-typedef Vector<NameRef*> NameRefList;
-typedef Vector<NameInst*> NameTargList;
-
-/* Structure for storing location of epsilon transitons. */
-struct EpsilonLink
-{
- EpsilonLink( const InputLoc &loc, NameRef &target )
- : loc(loc), target(target) { }
-
- InputLoc loc;
- NameRef target;
-};
-
-struct Label
-{
- Label( const InputLoc &loc, char *data )
- : loc(loc), data(data) { }
-
- InputLoc loc;
- char *data;
-};
-
-/* Structrue represents an action assigned to some FactorWithAug node. The
- * factor with aug will keep an array of these. */
-struct ParserAction
-{
- ParserAction( const InputLoc &loc, AugType type, int localErrKey, Action *action )
- : loc(loc), type(type), localErrKey(localErrKey), action(action) { }
-
- InputLoc loc;
- AugType type;
- int localErrKey;
- Action *action;
-};
-
-struct Token
-{
- char *data;
- int length;
- InputLoc loc;
-
- void prepareLitString( Token &result, bool &caseInsensitive );
- void append( const Token &other );
- void set(const char *str, int len );
-};
-
-/* Store the value and type of a priority augmentation. */
-struct PriorityAug
-{
- PriorityAug( AugType type, int priorKey, int priorValue ) :
- type(type), priorKey(priorKey), priorValue(priorValue) { }
-
- AugType type;
- int priorKey;
- int priorValue;
-};
-
-/*
- * A Variable Definition
- */
-struct VarDef
-{
- VarDef(const char *name, JoinOrLm *joinOrLm )
- : name(name), joinOrLm(joinOrLm), isExport(false) { }
-
- /* Parse tree traversal. */
- FsmAp *walk( ParseData *pd );
- void makeNameTree( const InputLoc &loc, ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- const char *name;
- JoinOrLm *joinOrLm;
- bool isExport;
-};
-
-
-/*
- * LongestMatch
- *
- * Wherever possible the item match will execute on the character. If not
- * possible the item match will execute on a lookahead character and either
- * hold the current char (if one away) or backup.
- *
- * How to handle the problem of backing up over a buffer break?
- *
- * Don't want to use pending out transitions for embedding item match because
- * the role of item match action is different: it may sometimes match on the
- * final transition, or may match on a lookahead character.
- *
- * Don't want to invent a new operator just for this. So just trail action
- * after machine, this means we can only use literal actions.
- *
- * The item action may
- *
- * What states of the machine will be final. The item actions that wrap around
- * on the last character will go straight to the start state.
- *
- * Some transitions will be lookahead transitions, they will hold the current
- * character. Crossing them with regular transitions must be restricted
- * because it does not make sense. The transition cannot simultaneously hold
- * and consume the current character.
- */
-struct LongestMatchPart
-{
- LongestMatchPart( Join *join, Action *action,
- InputLoc &semiLoc, int longestMatchId )
- :
- join(join), action(action), semiLoc(semiLoc),
- longestMatchId(longestMatchId), inLmSelect(false) { }
-
- InputLoc getLoc();
-
- Join *join;
- Action *action;
- InputLoc semiLoc;
-
- Action *setActId;
- Action *actOnLast;
- Action *actOnNext;
- Action *actLagBehind;
- int longestMatchId;
- bool inLmSelect;
- LongestMatch *longestMatch;
-
- LongestMatchPart *prev, *next;
-};
-
-/* Declare a new type so that ptreetypes.h need not include dlist.h. */
-struct LmPartList : DList<LongestMatchPart> {};
-
-struct LongestMatch
-{
- /* Construct with a list of joins */
- LongestMatch( const InputLoc &loc, LmPartList *longestMatchList ) :
- loc(loc), longestMatchList(longestMatchList), name(0),
- lmSwitchHandlesError(false) { }
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
- void runLonestMatch( ParseData *pd, FsmAp *graph );
- Action *newAction( ParseData *pd, const InputLoc &loc, const char *name,
- InlineList *inlineList );
- void makeActions( ParseData *pd );
- void findName( ParseData *pd );
- void restart( FsmAp *graph, TransAp *trans );
-
- InputLoc loc;
- LmPartList *longestMatchList;
- const char *name;
-
- Action *lmActSelect;
- bool lmSwitchHandlesError;
-
- LongestMatch *next, *prev;
-};
-
-
-/* List of Expressions. */
-typedef DList<Expression> ExprList;
-
-struct JoinOrLm
-{
- enum Type {
- JoinType,
- LongestMatchType
- };
-
- JoinOrLm( Join *join ) :
- join(join), type(JoinType) {}
- JoinOrLm( LongestMatch *longestMatch ) :
- longestMatch(longestMatch), type(LongestMatchType) {}
-
- FsmAp *walk( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- Join *join;
- LongestMatch *longestMatch;
- Type type;
-};
-
-/*
- * Join
- */
-struct Join
-{
- /* Construct with the first expression. */
- Join( Expression *expr );
- Join( const InputLoc &loc, Expression *expr );
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd );
- FsmAp *walkJoin( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- /* Data. */
- InputLoc loc;
- ExprList exprList;
-};
-
-/*
- * Expression
- */
-struct Expression
-{
- enum Type {
- OrType,
- IntersectType,
- SubtractType,
- StrongSubtractType,
- TermType,
- BuiltinType
- };
-
- /* Construct with an expression on the left and a term on the right. */
- Expression( Expression *expression, Term *term, Type type ) :
- expression(expression), term(term),
- builtin(builtin), type(type), prev(this), next(this) { }
-
- /* Construct with only a term. */
- Expression( Term *term ) :
- expression(0), term(term), builtin(builtin),
- type(TermType) , prev(this), next(this) { }
-
- /* Construct with a builtin type. */
- Expression( BuiltinMachine builtin ) :
- expression(0), term(0), builtin(builtin),
- type(BuiltinType), prev(this), next(this) { }
-
- ~Expression();
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd, bool lastInSeq = true );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- /* Node data. */
- Expression *expression;
- Term *term;
- BuiltinMachine builtin;
- Type type;
-
- Expression *prev, *next;
-};
-
-/*
- * Term
- */
-struct Term
-{
- enum Type {
- ConcatType,
- RightStartType,
- RightFinishType,
- LeftType,
- FactorWithAugType
- };
-
- Term( Term *term, FactorWithAug *factorWithAug ) :
- term(term), factorWithAug(factorWithAug), type(ConcatType) { }
-
- Term( Term *term, FactorWithAug *factorWithAug, Type type ) :
- term(term), factorWithAug(factorWithAug), type(type) { }
-
- Term( FactorWithAug *factorWithAug ) :
- term(0), factorWithAug(factorWithAug), type(FactorWithAugType) { }
-
- ~Term();
-
- FsmAp *walk( ParseData *pd, bool lastInSeq = true );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- Term *term;
- FactorWithAug *factorWithAug;
- Type type;
-
- /* Priority descriptor for RightFinish type. */
- PriorDesc priorDescs[2];
-};
-
-
-/* Third level of precedence. Augmenting nodes with actions and priorities. */
-struct FactorWithAug
-{
- FactorWithAug( FactorWithRep *factorWithRep ) :
- priorDescs(0), factorWithRep(factorWithRep) { }
- ~FactorWithAug();
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- void assignActions( ParseData *pd, FsmAp *graph, int *actionOrd );
- void assignPriorities( FsmAp *graph, int *priorOrd );
-
- void assignConditions( FsmAp *graph );
-
- /* Actions and priorities assigned to the factor node. */
- Vector<ParserAction> actions;
- Vector<PriorityAug> priorityAugs;
- PriorDesc *priorDescs;
- Vector<Label> labels;
- Vector<EpsilonLink> epsilonLinks;
- Vector<ParserAction> conditions;
-
- FactorWithRep *factorWithRep;
-};
-
-/* Fourth level of precedence. Trailing unary operators. Provide kleen star,
- * optional and plus. */
-struct FactorWithRep
-{
- enum Type {
- StarType,
- StarStarType,
- OptionalType,
- PlusType,
- ExactType,
- MaxType,
- MinType,
- RangeType,
- FactorWithNegType
- };
-
- FactorWithRep( const InputLoc &loc, FactorWithRep *factorWithRep,
- int lowerRep, int upperRep, Type type ) :
- loc(loc), factorWithRep(factorWithRep),
- factorWithNeg(0), lowerRep(lowerRep),
- upperRep(upperRep), type(type) { }
-
- FactorWithRep( FactorWithNeg *factorWithNeg )
- : factorWithNeg(factorWithNeg), type(FactorWithNegType) { }
-
- ~FactorWithRep();
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- InputLoc loc;
- FactorWithRep *factorWithRep;
- FactorWithNeg *factorWithNeg;
- int lowerRep, upperRep;
- Type type;
-
- /* Priority descriptor for StarStar type. */
- PriorDesc priorDescs[2];
-};
-
-/* Fifth level of precedence. Provides Negation. */
-struct FactorWithNeg
-{
- enum Type {
- NegateType,
- CharNegateType,
- FactorType
- };
-
- FactorWithNeg( const InputLoc &loc, FactorWithNeg *factorWithNeg, Type type) :
- loc(loc), factorWithNeg(factorWithNeg), factor(0), type(type) { }
-
- FactorWithNeg( Factor *factor ) :
- factorWithNeg(0), factor(factor), type(FactorType) { }
-
- ~FactorWithNeg();
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- InputLoc loc;
- FactorWithNeg *factorWithNeg;
- Factor *factor;
- Type type;
-};
-
-/*
- * Factor
- */
-struct Factor
-{
- /* Language elements a factor node can be. */
- enum Type {
- LiteralType,
- RangeType,
- OrExprType,
- RegExprType,
- ReferenceType,
- ParenType,
- LongestMatchType,
- };
-
- /* Construct with a literal fsm. */
- Factor( Literal *literal ) :
- literal(literal), type(LiteralType) { }
-
- /* Construct with a range. */
- Factor( Range *range ) :
- range(range), type(RangeType) { }
-
- /* Construct with the or part of a regular expression. */
- Factor( ReItem *reItem ) :
- reItem(reItem), type(OrExprType) { }
-
- /* Construct with a regular expression. */
- Factor( RegExpr *regExpr ) :
- regExpr(regExpr), type(RegExprType) { }
-
- /* Construct with a reference to a var def. */
- Factor( const InputLoc &loc, VarDef *varDef ) :
- loc(loc), varDef(varDef), type(ReferenceType) {}
-
- /* Construct with a parenthesized join. */
- Factor( Join *join ) :
- join(join), type(ParenType) {}
-
- /* Construct with a longest match operator. */
- Factor( LongestMatch *longestMatch ) :
- longestMatch(longestMatch), type(LongestMatchType) {}
-
- /* Cleanup. */
- ~Factor();
-
- /* Tree traversal. */
- FsmAp *walk( ParseData *pd );
- void makeNameTree( ParseData *pd );
- void resolveNameRefs( ParseData *pd );
-
- InputLoc loc;
- Literal *literal;
- Range *range;
- ReItem *reItem;
- RegExpr *regExpr;
- VarDef *varDef;
- Join *join;
- LongestMatch *longestMatch;
- int lower, upper;
- Type type;
-};
-
-/* A range machine. Only ever composed of two literals. */
-struct Range
-{
- Range( Literal *lowerLit, Literal *upperLit )
- : lowerLit(lowerLit), upperLit(upperLit) { }
-
- ~Range();
- FsmAp *walk( ParseData *pd );
-
- Literal *lowerLit;
- Literal *upperLit;
-};
-
-/* Some literal machine. Can be a number or literal string. */
-struct Literal
-{
- enum LiteralType { Number, LitString };
-
- Literal( const Token &token, LiteralType type )
- : token(token), type(type) { }
-
- FsmAp *walk( ParseData *pd );
-
- Token token;
- LiteralType type;
-};
-
-/* Regular expression. */
-struct RegExpr
-{
- enum RegExpType { RecurseItem, Empty };
-
- /* Constructors. */
- RegExpr() :
- type(Empty), caseInsensitive(false) { }
- RegExpr(RegExpr *regExpr, ReItem *item) :
- regExpr(regExpr), item(item),
- type(RecurseItem), caseInsensitive(false) { }
-
- ~RegExpr();
- FsmAp *walk( ParseData *pd, RegExpr *rootRegex );
-
- RegExpr *regExpr;
- ReItem *item;
- RegExpType type;
- bool caseInsensitive;
-};
-
-/* An item in a regular expression. */
-struct ReItem
-{
- enum ReItemType { Data, Dot, OrBlock, NegOrBlock };
-
- ReItem( const InputLoc &loc, const Token &token )
- : loc(loc), token(token), star(false), type(Data) { }
- ReItem( const InputLoc &loc, ReItemType type )
- : loc(loc), star(false), type(type) { }
- ReItem( const InputLoc &loc, ReOrBlock *orBlock, ReItemType type )
- : loc(loc), orBlock(orBlock), star(false), type(type) { }
-
- ~ReItem();
- FsmAp *walk( ParseData *pd, RegExpr *rootRegex );
-
- InputLoc loc;
- Token token;
- ReOrBlock *orBlock;
- bool star;
- ReItemType type;
-};
-
-/* An or block item. */
-struct ReOrBlock
-{
- enum ReOrBlockType { RecurseItem, Empty };
-
- /* Constructors. */
- ReOrBlock()
- : type(Empty) { }
- ReOrBlock(ReOrBlock *orBlock, ReOrItem *item)
- : orBlock(orBlock), item(item), type(RecurseItem) { }
-
- ~ReOrBlock();
- FsmAp *walk( ParseData *pd, RegExpr *rootRegex );
-
- ReOrBlock *orBlock;
- ReOrItem *item;
- ReOrBlockType type;
-};
-
-/* An item in an or block. */
-struct ReOrItem
-{
- enum ReOrItemType { Data, Range };
-
- ReOrItem( const InputLoc &loc, const Token &token )
- : loc(loc), token(token), type(Data) {}
- ReOrItem( const InputLoc &loc, char lower, char upper )
- : loc(loc), lower(lower), upper(upper), type(Range) { }
-
- FsmAp *walk( ParseData *pd, RegExpr *rootRegex );
-
- InputLoc loc;
- Token token;
- char lower;
- char upper;
- ReOrItemType type;
-};
-
-
-/*
- * Inline code tree
- */
-struct InlineList;
-struct InlineItem
-{
- enum Type
- {
- Text, Goto, Call, Next, GotoExpr, CallExpr, NextExpr, Ret, PChar,
- Char, Hold, Curs, Targs, Entry, Exec, LmSwitch, LmSetActId,
- LmSetTokEnd, LmOnLast, LmOnNext, LmOnLagBehind, LmInitAct,
- LmInitTokStart, LmSetTokStart, Break
- };
-
- InlineItem( const InputLoc &loc, char *data, Type type ) :
- loc(loc), data(data), nameRef(0), children(0), type(type) { }
-
- InlineItem( const InputLoc &loc, NameRef *nameRef, Type type ) :
- loc(loc), data(0), nameRef(nameRef), children(0), type(type) { }
-
- InlineItem( const InputLoc &loc, LongestMatch *longestMatch,
- LongestMatchPart *longestMatchPart, Type type ) : loc(loc), data(0),
- nameRef(0), children(0), longestMatch(longestMatch),
- longestMatchPart(longestMatchPart), type(type) { }
-
- InlineItem( const InputLoc &loc, NameInst *nameTarg, Type type ) :
- loc(loc), data(0), nameRef(0), nameTarg(nameTarg), children(0),
- type(type) { }
-
- InlineItem( const InputLoc &loc, Type type ) :
- loc(loc), data(0), nameRef(0), children(0), type(type) { }
-
- InputLoc loc;
- char *data;
- NameRef *nameRef;
- NameInst *nameTarg;
- InlineList *children;
- LongestMatch *longestMatch;
- LongestMatchPart *longestMatchPart;
- Type type;
-
- InlineItem *prev, *next;
-};
-
-/* Normally this would be atypedef, but that would entail including DList from
- * ptreetypes, which should be just typedef forwards. */
-struct InlineList : public DList<InlineItem> { };
-
-
-
-#endif /* _PARSETREE_H */
diff --git a/contrib/tools/ragel5/ragel/ragel.h b/contrib/tools/ragel5/ragel/ragel.h
deleted file mode 100644
index 736369c0ce..0000000000
--- a/contrib/tools/ragel5/ragel/ragel.h
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _RAGEL_H
-#define _RAGEL_H
-
-#include <stdio.h>
-#include <iostream>
-#include <fstream>
-#include <string>
-#include "config.h"
-
-#define PROGNAME "ragel"
-
-/* To what degree are machine minimized. */
-enum MinimizeLevel {
- MinimizeApprox,
- MinimizeStable,
- MinimizePartition1,
- MinimizePartition2
-};
-
-enum MinimizeOpt {
- MinimizeNone,
- MinimizeEnd,
- MinimizeMostOps,
- MinimizeEveryOp
-};
-
-/* Options. */
-extern MinimizeLevel minimizeLevel;
-extern MinimizeOpt minimizeOpt;
-extern char *machineSpec, *machineName;
-extern bool printStatistics;
-
-extern int gblErrorCount;
-extern char mainMachine[];
-
-/* Location in an input file. */
-struct InputLoc
-{
- const char *fileName;
- int line;
- int col;
-};
-
-/* Error reporting. */
-std::ostream &error();
-std::ostream &error( const InputLoc &loc );
-std::ostream &warning( const InputLoc &loc );
-
-void terminateAllParsers( );
-void writeMachines( std::ostream &out, std::string hostData, const char *inputFileName );
-void xmlEscapeHost( std::ostream &out, char *data, int len );
-
-#endif /* _RAGEL_H */
diff --git a/contrib/tools/ragel5/ragel/rlparse.cpp b/contrib/tools/ragel5/ragel/rlparse.cpp
deleted file mode 100644
index cd6fbde218..0000000000
--- a/contrib/tools/ragel5/ragel/rlparse.cpp
+++ /dev/null
@@ -1,6088 +0,0 @@
-/* Automatically generated by Kelbt from "rlparse.kl".
- *
- * Parts of this file are copied from Kelbt source covered by the GNU
- * GPL. As a special exception, you may use the parts of this file copied
- * from Kelbt source without restriction. The remainder is derived from
- * "rlparse.kl" and inherits the copyright status of that file.
- */
-
-#line 1 "rlparse.kl"
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlparse.h"
-#include "ragel.h"
-#include <iostream>
-#include <errno.h>
-
-#include <stdlib.h>
-//#include <malloc.h>
-
-using std::cout;
-using std::cerr;
-using std::endl;
-
-ParserDict parserDict;
-
-#line 93 "rlparse.kh"
-#line 96 "rlparse.kh"
-#line 126 "rlparse.kh"
-#line 1370 "rlparse.kl"
-
-
-#line 50 "rlparse.cpp"
-struct Parser_Lel_action_ref
-{
-#line 682 "rlparse.kl"
-
- Action *action;
-
-
-#line 57 "rlparse.cpp"
-};
-
-struct Parser_Lel_aug_type
-{
-#line 475 "rlparse.kl"
-
- InputLoc loc;
- AugType augType;
-
-
-#line 68 "rlparse.cpp"
-};
-
-struct Parser_Lel_expression
-{
-#line 297 "rlparse.kl"
-
- Expression *expression;
-
-
-#line 78 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor
-{
-#line 907 "rlparse.kl"
-
- Factor *factor;
-
-
-#line 88 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor_rep_num
-{
-#line 861 "rlparse.kl"
-
- int rep;
-
-
-#line 98 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor_with_aug
-{
-#line 392 "rlparse.kl"
-
- FactorWithAug *factorWithAug;
-
-
-#line 108 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor_with_ep
-{
-#line 376 "rlparse.kl"
-
- FactorWithAug *factorWithAug;
-
-
-#line 118 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor_with_label
-{
-#line 360 "rlparse.kl"
-
- FactorWithAug *factorWithAug;
-
-
-#line 128 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor_with_neg
-{
-#line 887 "rlparse.kl"
-
- FactorWithNeg *factorWithNeg;
-
-
-#line 138 "rlparse.cpp"
-};
-
-struct Parser_Lel_factor_with_rep
-{
-#line 811 "rlparse.kl"
-
- FactorWithRep *factorWithRep;
-
-
-#line 148 "rlparse.cpp"
-};
-
-struct Parser_Lel_inline_item
-{
-#line 1160 "rlparse.kl"
-
- InlineItem *inlineItem;
-
-
-#line 158 "rlparse.cpp"
-};
-
-struct Parser_Lel_inline_list
-{
-#line 1139 "rlparse.kl"
-
- InlineList *inlineList;
-
-
-#line 168 "rlparse.cpp"
-};
-
-struct Parser_Lel_join
-{
-#line 281 "rlparse.kl"
-
- Join *join;
-
-
-#line 178 "rlparse.cpp"
-};
-
-struct Parser_Lel_join_or_lm
-{
-#line 204 "rlparse.kl"
-
- JoinOrLm *joinOrLm;
-
-
-#line 188 "rlparse.cpp"
-};
-
-struct Parser_Lel_lm_part_list
-{
-#line 224 "rlparse.kl"
-
- LmPartList *lmPartList;
-
-
-#line 198 "rlparse.cpp"
-};
-
-struct Parser_Lel_local_err_name
-{
-#line 790 "rlparse.kl"
-
- int error_name;
-
-
-#line 208 "rlparse.cpp"
-};
-
-struct Parser_Lel_longest_match_part
-{
-#line 243 "rlparse.kl"
-
- LongestMatchPart *lmPart;
-
-
-#line 218 "rlparse.cpp"
-};
-
-struct Parser_Lel_opt_export
-{
-#line 64 "rlparse.kl"
-
- bool isSet;
-
-
-#line 228 "rlparse.cpp"
-};
-
-struct Parser_Lel_opt_lm_part_action
-{
-#line 262 "rlparse.kl"
-
- Action *action;
-
-
-#line 238 "rlparse.cpp"
-};
-
-struct Parser_Lel_priority_aug
-{
-#line 741 "rlparse.kl"
-
- int priorityNum;
-
-
-#line 248 "rlparse.cpp"
-};
-
-struct Parser_Lel_priority_name
-{
-#line 723 "rlparse.kl"
-
- int priorityName;
-
-
-#line 258 "rlparse.cpp"
-};
-
-struct Parser_Lel_range_lit
-{
-#line 975 "rlparse.kl"
-
- Literal *literal;
-
-
-#line 268 "rlparse.cpp"
-};
-
-struct Parser_Lel_regular_expr
-{
-#line 1013 "rlparse.kl"
-
- RegExpr *regExpr;
-
-
-#line 278 "rlparse.cpp"
-};
-
-struct Parser_Lel_regular_expr_char
-{
-#line 1062 "rlparse.kl"
-
- ReItem *reItem;
-
-
-#line 288 "rlparse.cpp"
-};
-
-struct Parser_Lel_regular_expr_item
-{
-#line 1046 "rlparse.kl"
-
- ReItem *reItem;
-
-
-#line 298 "rlparse.cpp"
-};
-
-struct Parser_Lel_regular_expr_or_char
-{
-#line 1121 "rlparse.kl"
-
- ReOrItem *reOrItem;
-
-
-#line 308 "rlparse.cpp"
-};
-
-struct Parser_Lel_regular_expr_or_data
-{
-#line 1088 "rlparse.kl"
-
- ReOrBlock *reOrBlock;
-
-
-#line 318 "rlparse.cpp"
-};
-
-struct Parser_Lel_term
-{
-#line 329 "rlparse.kl"
-
- Term *term;
-
-
-#line 328 "rlparse.cpp"
-};
-
-struct Parser_Lel_token_type
-{
-#line 104 "rlparse.kl"
-
- Token token;
-
-
-#line 338 "rlparse.cpp"
-};
-
-union Parser_UserData
-{
- struct Parser_Lel_action_ref action_ref;
- struct Parser_Lel_aug_type aug_type;
- struct Parser_Lel_expression expression;
- struct Parser_Lel_factor factor;
- struct Parser_Lel_factor_rep_num factor_rep_num;
- struct Parser_Lel_factor_with_aug factor_with_aug;
- struct Parser_Lel_factor_with_ep factor_with_ep;
- struct Parser_Lel_factor_with_label factor_with_label;
- struct Parser_Lel_factor_with_neg factor_with_neg;
- struct Parser_Lel_factor_with_rep factor_with_rep;
- struct Parser_Lel_inline_item inline_item;
- struct Parser_Lel_inline_list inline_list;
- struct Parser_Lel_join join;
- struct Parser_Lel_join_or_lm join_or_lm;
- struct Parser_Lel_lm_part_list lm_part_list;
- struct Parser_Lel_local_err_name local_err_name;
- struct Parser_Lel_longest_match_part longest_match_part;
- struct Parser_Lel_opt_export opt_export;
- struct Parser_Lel_opt_lm_part_action opt_lm_part_action;
- struct Parser_Lel_priority_aug priority_aug;
- struct Parser_Lel_priority_name priority_name;
- struct Parser_Lel_range_lit range_lit;
- struct Parser_Lel_regular_expr regular_expr;
- struct Parser_Lel_regular_expr_char regular_expr_char;
- struct Parser_Lel_regular_expr_item regular_expr_item;
- struct Parser_Lel_regular_expr_or_char regular_expr_or_char;
- struct Parser_Lel_regular_expr_or_data regular_expr_or_data;
- struct Parser_Lel_term term;
- struct Parser_Lel_token_type token_type;
- struct Token token;
-};
-
-struct Parser_LangEl
-{
- char *file;
- int line;
- int type;
- int reduction;
- int state;
- union Parser_UserData user;
- unsigned int retry;
- struct Parser_LangEl *next, *child;
-};
-
-#line 388 "rlparse.cpp"
-unsigned int Parser_startState = 0;
-
-short Parser_indicies[] = {
- 151, -1, -1, -1, -1, -1, 151, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 151, 151, 151, 151, -1, -1,
- -1, -1, -1, -1, 151, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 151, 151, -1, 151, 1, 0, 393,
- 153, -1, -1, -1, -1, -1, 153, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 153, 153, 153, 153, -1, -1,
- -1, -1, -1, -1, 153, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 153, 153, -1, 149, -1, -1, 2,
- 157, -1, -1, -1, -1, -1, 150, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 4, 5, 6, 7, -1, -1,
- -1, -1, -1, -1, 154, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 9, 8, -1, -1, -1, -1, -1,
- 152, 384, 385, 386, 387, 388, 389, 390,
- 391, 392, 10, 3, 161, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 24, 11, 12, 14, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 318, 320, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 13, 356, 356, 356, -1, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- -1, -1, -1, -1, -1, -1, 356, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 356, 356, 356,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- 356, -1, -1, -1, 356, 356, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 20, 356, 356, 356, -1, 356, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 356, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 356, -1, -1,
- -1, -1, -1, -1, 356, 356, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 356, 356, 356, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 356, 356, -1,
- -1, -1, 356, 356, 356, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 22, 170,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 170, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 21, 23, -1, -1, -1, -1, -1,
- -1, -1, -1, 155, 25, 164, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 26, 14,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 318, 320, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 27, 319,
- 368, 369, 370, -1, 367, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 166, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 366, -1, -1, -1,
- -1, -1, -1, 364, 365, -1, -1, -1,
- -1, -1, -1, -1, -1, 371, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 360, 361, 362, 363, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 372, 373, -1, -1,
- -1, 374, 375, 28, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 357, -1, 359, -1, 355, 358,
- 29, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 169, 368,
- 369, 370, -1, 367, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 167, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 366, -1, -1, -1, -1,
- -1, -1, 364, 365, -1, -1, -1, -1,
- -1, -1, -1, -1, 371, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 360, 361, 362, 363, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 372, 373, -1, -1, -1,
- 374, 375, 28, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 357, -1, 359, -1, 355, 358, 153,
- -1, -1, -1, -1, -1, -1, 153, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 153, 153, 153, 153, -1, -1, -1, -1,
- -1, -1, 153, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 153,
- 153, -1, -1, -1, -1, 30, 31, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 32, 334, 334, 334, -1, 334,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 334, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 334, -1, -1, -1, -1, -1, -1, 334,
- -1, -1, -1, -1, -1, -1, 334, 334,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 334, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 334, 334, 334,
- 334, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 334, 334, 334, 334,
- 334, 334, 334, 334, 334, 334, 334, 334,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 33, 163,
- 165, 34, 356, 356, 356, -1, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 356, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, -1,
- -1, -1, -1, -1, -1, 356, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 356, 356, 356, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, 356,
- -1, -1, -1, 356, 356, 356, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 35,
- 158, -1, -1, -1, -1, -1, -1, 157,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 4, 5, 6, 7, -1, -1, -1,
- -1, -1, -1, 154, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 9, 8, -1, -1, -1, -1, -1, 152,
- 384, 385, 386, 387, 388, 389, 390, 391,
- 392, 10, 3, 44, -1, -1, -1, -1,
- -1, -1, 52, -1, -1, -1, -1, 14,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 45, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 39, 46,
- -1, -1, -1, -1, -1, 318, 320, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 50, 48, 49, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 36, -1, -1, 47, -1,
- -1, -1, -1, -1, -1, -1, 37, 38,
- 193, 41, -1, 42, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 43, -1,
- -1, -1, 300, 304, -1, -1, 51, 44,
- -1, -1, -1, -1, -1, -1, 52, -1,
- -1, -1, -1, 14, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 45, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 39, 46, -1, -1, -1, -1,
- -1, 318, 320, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 55, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 50, 48, 49, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 54,
- 53, -1, 47, -1, -1, -1, -1, -1,
- -1, -1, 37, 38, 193, 41, -1, 42,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 43, -1, -1, -1, 300, 304,
- -1, -1, 51, 340, 341, 342, -1, 338,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 339, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 162, -1, -1, -1, -1, -1, -1, 366,
- -1, -1, -1, -1, -1, -1, 364, 365,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 343, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 360, 361, 362,
- 363, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 62, 57, 56, 372,
- 373, 58, 60, 61, 374, 375, 28, 59,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 333, 337, 335, 336, 344,
- 381, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 380, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 63, -1, -1, -1, -1, 64, 368,
- 369, 370, -1, 367, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 168, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 366, -1, -1, -1, -1,
- -1, -1, 364, 365, -1, -1, -1, -1,
- -1, -1, -1, -1, 371, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 360, 361, 362, 363, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 372, 373, -1, -1, -1,
- 374, 375, 28, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 357, -1, 359, -1, 355, 358, 70,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 159, 72,
- -1, -1, 182, -1, -1, 182, 73, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 182, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 182, 71, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 182, -1, -1, -1,
- 74, 44, -1, -1, -1, -1, 187, -1,
- 52, 187, -1, -1, 187, 19, 75, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 187, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 187, 187, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, 76,
- 77, 78, -1, 187, -1, -1, -1, 187,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 188, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 307, -1, -1,
- 307, 307, 307, -1, 307, 307, 307, 307,
- 307, 307, 307, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 66, 307,
- 307, -1, 307, 307, 307, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 307, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 307,
- 307, -1, -1, -1, -1, -1, -1, -1,
- 307, 307, -1, -1, -1, -1, -1, 307,
- 307, -1, -1, 307, 307, 307, 307, 307,
- 307, -1, -1, 307, 307, 307, 307, 307,
- 307, 307, 307, 307, 307, 307, 307, 307,
- 307, 307, 307, 307, 307, 307, 307, 307,
- 307, 307, 307, 307, 307, 307, 307, 307,
- 307, 307, 307, 307, 307, 307, 307, 307,
- 307, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 307, 195,
- -1, -1, -1, -1, 195, -1, 195, 195,
- -1, -1, 195, 195, 195, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 195, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 195, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 195, 195, -1, -1, -1, -1, -1,
- -1, -1, 195, 195, -1, -1, -1, -1,
- -1, 195, 195, -1, -1, 195, 195, 195,
- 79, 195, -1, -1, -1, 195, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 195, 195, 195, 197, -1, -1, 89, 88,
- 197, -1, 197, 197, -1, -1, 197, 197,
- 197, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 197, 91, -1,
- 90, -1, 87, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 197, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 197, 197, -1,
- -1, -1, -1, -1, -1, -1, 197, 197,
- -1, -1, -1, -1, -1, 197, 197, -1,
- -1, 197, 197, 197, 197, 197, -1, -1,
- -1, 197, 213, 215, 217, 92, 256, 260,
- 262, 264, 258, 266, 268, 272, 274, 276,
- 270, 278, 244, 248, 250, 252, 246, 254,
- 220, 224, 226, 228, 222, 230, 232, 236,
- 238, 240, 234, 242, 197, 197, 197, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 219, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 80, -1, -1, 81,
- 82, 83, 84, 85, 86, 208, -1, -1,
- 208, 208, 208, -1, 208, 208, 292, 295,
- 208, 208, 208, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 208,
- 208, -1, 208, 294, 208, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 208, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 93,
- 208, -1, -1, -1, -1, -1, -1, -1,
- 208, 208, -1, -1, -1, -1, -1, 208,
- 208, -1, -1, 208, 208, 208, 208, 208,
- 293, -1, -1, 208, 208, 208, 208, 208,
- 208, 208, 208, 208, 208, 208, 208, 208,
- 208, 208, 208, 208, 208, 208, 208, 208,
- 208, 208, 208, 208, 208, 208, 208, 208,
- 208, 208, 208, 208, 208, 208, 208, 208,
- 208, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 208, 44,
- -1, -1, -1, -1, -1, -1, 52, -1,
- -1, -1, -1, 14, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 45, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 308, 46, -1, -1, -1, -1,
- -1, 318, 320, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 50, 48, 49, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 47, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 302, 304,
- -1, -1, 51, 44, -1, -1, -1, -1,
- -1, -1, 52, -1, -1, -1, -1, 14,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 45, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 308, 46,
- -1, -1, -1, -1, -1, 318, 320, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 50, 48, 49, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 47, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 303, 304, -1, -1, 51, 305,
- -1, -1, 305, 305, 305, -1, 305, 305,
- 305, 305, 305, 305, 305, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 305, 305, -1, 305, 305, 305, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 305, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 305, 305, -1, -1, -1, -1, -1,
- -1, -1, 305, 305, -1, -1, -1, -1,
- -1, 305, 305, -1, 314, 305, 305, 305,
- 305, 305, 305, -1, -1, 305, 305, 305,
- 305, 305, 305, 305, 305, 305, 305, 305,
- 305, 305, 305, 305, 305, 305, 305, 305,
- 305, 305, 305, 305, 305, 305, 305, 305,
- 305, 305, 305, 305, 305, 305, 305, 305,
- 305, 305, 305, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 305, 306, -1, -1, 306, 306, 306, -1,
- 306, 306, 306, 306, 306, 306, 306, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 306, 306, -1, 306, 306,
- 306, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 306, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 306, 306, -1, -1, -1,
- -1, -1, -1, -1, 306, 306, -1, -1,
- -1, -1, -1, 306, 306, -1, 316, 306,
- 306, 306, 306, 306, 306, -1, -1, 306,
- 306, 306, 306, 306, 306, 306, 306, 306,
- 306, 306, 306, 306, 306, 306, 306, 306,
- 306, 306, 306, 306, 306, 306, 306, 306,
- 306, 306, 306, 306, 306, 306, 306, 306,
- 306, 306, 306, 306, 306, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 306, 330, -1, -1, -1, 330,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 68, 330, -1, -1, -1, 330, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 69, 322,
- 322, 322, -1, 322, -1, -1, 322, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 67, 94, 44, -1, -1, -1, -1, -1,
- -1, 52, -1, -1, -1, -1, 14, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 45,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 39, 46, -1,
- -1, -1, -1, -1, 318, 320, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 50, 48, 49, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 65, -1, -1, 47, -1, -1,
- -1, -1, -1, -1, -1, 37, 38, 193,
- 41, -1, 42, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 43, -1, -1,
- -1, 300, 304, -1, -1, 51, 160, 70,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 171, 44,
- -1, -1, -1, -1, -1, -1, 52, -1,
- -1, -1, -1, 14, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 45, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 40, 46, -1, -1, -1, -1,
- -1, 318, 320, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 50, 48, 49, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 4, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 154, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 176, -1, 175, -1, -1,
- -1, -1, -1, -1, 156, 97, -1, 96,
- -1, -1, 47, -1, -1, 95, 174, -1,
- -1, -1, 37, 38, 193, 41, -1, 42,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 43, -1, -1, -1, 300, 304,
- -1, -1, 51, 345, 356, 356, 356, -1,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, -1, -1, -1, -1, -1, -1, 356,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, 356, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, 356,
- 356, 356, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, 356, -1, -1, -1, 356, 356, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 98, 100, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 381, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 380, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 99, -1, -1,
- -1, -1, 64, 104, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 381, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 380, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 103, -1,
- -1, -1, -1, 64, 102, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 381, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 380,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 101,
- -1, -1, -1, -1, 64, 353, 354, 376,
- 383, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 105, 313, -1,
- -1, 70, 44, -1, -1, -1, -1, -1,
- -1, 52, -1, -1, -1, -1, 14, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 45,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 39, 46, -1,
- -1, -1, -1, -1, 318, 320, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 50, 48, 49, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 47, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 194,
- 41, -1, 42, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 43, -1, -1,
- -1, 300, 304, -1, -1, 51, 311, 107,
- 108, -1, 327, -1, -1, 328, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 321, 106, 309, -1, -1, -1, 109,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 329, 310, -1,
- -1, -1, 109, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 329, 44, -1, -1, -1, -1, -1, -1,
- 52, -1, -1, -1, -1, 14, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, 110, 38, 193, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 44, -1, -1,
- -1, -1, -1, -1, 52, -1, -1, -1,
- -1, 14, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 45, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 39, 46, -1, -1, -1, -1, -1, 318,
- 320, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 50, 48,
- 49, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 47, -1, -1, -1, -1, -1, -1, -1,
- -1, 113, 193, 41, -1, 42, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 43, -1, -1, -1, 300, 304, -1, -1,
- 51, 44, -1, -1, -1, -1, -1, -1,
- 52, -1, -1, -1, -1, 14, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, 111, 193, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 44, -1, -1,
- -1, -1, -1, -1, 52, -1, -1, -1,
- -1, 14, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 45, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 39, 46, -1, -1, -1, -1, -1, 318,
- 320, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 50, 48,
- 49, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 47, -1, -1, -1, -1, -1, -1, -1,
- -1, 112, 193, 41, -1, 42, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 43, -1, -1, -1, 300, 304, -1, -1,
- 51, 44, -1, -1, -1, -1, -1, -1,
- 52, -1, -1, -1, -1, 14, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, 114, 193, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 44, -1, -1,
- -1, -1, -1, -1, 52, -1, -1, -1,
- -1, 14, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 45, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 39, 46, -1, -1, -1, -1, -1, 318,
- 320, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 50, 48,
- 49, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 47, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 189, 41, -1, 42, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 43, -1, -1, -1, 300, 304, -1, -1,
- 51, 44, -1, -1, -1, -1, -1, -1,
- 52, -1, -1, -1, -1, 14, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 190, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 44, -1, -1,
- -1, -1, -1, -1, 52, -1, -1, -1,
- -1, 14, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 45, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 39, 46, -1, -1, -1, -1, -1, 318,
- 320, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 50, 48,
- 49, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 47, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 191, 41, -1, 42, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 43, -1, -1, -1, 300, 304, -1, -1,
- 51, 44, -1, -1, -1, -1, -1, -1,
- 52, -1, -1, -1, -1, 14, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 192, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 378, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 196, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 115, 116, -1, -1, 118, -1, 119,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 117, -1, -1,
- -1, -1, -1, -1, -1, -1, 284, -1,
- -1, -1, -1, -1, -1, 288, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 198, 282, -1, -1,
- -1, -1, -1, -1, -1, 199, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 280,
- 287, 120, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 117, -1, -1, -1,
- -1, -1, -1, -1, -1, 284, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 201, 282, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 280, 120,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 117, -1, -1, -1, -1, -1,
- -1, -1, -1, 284, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 202, 282, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 280, 120, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 117, -1, -1, -1, -1, -1, -1, -1,
- -1, 284, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 203,
- 282, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 280, 120, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 117, -1,
- -1, -1, -1, -1, -1, -1, -1, 284,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 204, 282, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 280, 120, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 117, -1, -1, -1,
- -1, -1, -1, -1, -1, 284, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 205, 282, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 280, 121,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 117, -1, -1, -1, -1, -1,
- -1, -1, -1, 284, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 206, 282, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 280, 209, -1, -1,
- 209, -1, 209, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 209, -1, -1, -1, -1, -1, -1, -1,
- -1, 209, -1, -1, -1, -1, -1, -1,
- 209, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 253, 265, 277, 229, 241, 210, -1, -1,
- 210, -1, 210, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 210, -1, -1, -1, -1, -1, -1, -1,
- -1, 210, -1, -1, -1, -1, -1, -1,
- 210, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 218,
- 251, 263, 275, 227, 239, 211, -1, -1,
- 211, -1, 211, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 211, -1, -1, -1, -1, -1, -1, -1,
- -1, 211, -1, -1, -1, -1, -1, -1,
- 211, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 216,
- 249, 261, 273, 225, 237, 212, -1, -1,
- 212, -1, 212, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 212, -1, -1, -1, -1, -1, -1, -1,
- -1, 212, -1, -1, -1, -1, -1, -1,
- 212, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 214,
- 245, 257, 269, 221, 233, 247, 259, 271,
- 223, 235, 255, 267, 279, 231, 243, 123,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 301, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 122, 14, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 315, -1, -1, -1, -1,
- -1, 318, 320, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 317, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 312, 44, -1, -1, -1, -1,
- -1, -1, 52, -1, 127, -1, -1, 14,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 45, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 40, 46,
- -1, -1, -1, -1, -1, 318, 320, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 50, 48, 49, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 4, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 154, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 176,
- -1, 175, -1, -1, -1, -1, -1, -1,
- 156, 97, -1, 96, -1, -1, 47, -1,
- -1, -1, 173, -1, -1, -1, 37, 38,
- 193, 41, -1, 42, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 43, -1,
- -1, -1, 300, 304, -1, -1, 51, 70,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 180, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 117, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 126, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 125, -1, 179, 161,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 124, 368, 369, 370, -1,
- 367, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 346,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 366, -1, -1, -1, -1, -1, -1, 364,
- 365, -1, -1, -1, -1, -1, -1, -1,
- -1, 371, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 360, 361,
- 362, 363, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 372, 373, -1, -1, -1, 374, 375, 28,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 357, -1,
- 359, -1, 355, 358, 347, 356, 356, 356,
- -1, 356, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 356, -1, -1, -1, -1, -1, -1,
- 356, 356, -1, -1, -1, -1, -1, -1,
- -1, -1, 356, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- 356, 356, 356, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 356, 356, -1, -1, -1, 356, 356,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 128, 351, 356, 356, 356, -1,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, -1, -1, -1, -1, -1, -1, 356,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, 356, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, 356,
- 356, 356, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, 356, -1, -1, -1, 356, 356, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 129, 349, 356, 356, 356, -1, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- -1, -1, -1, -1, -1, -1, 356, 356,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 356, 356, 356,
- 356, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 356,
- 356, -1, -1, -1, 356, 356, 356, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 130, 379, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 379, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 131, 324, 324,
- 324, -1, 324, 323, -1, 324, 330, -1,
- -1, -1, 330, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 132, 330, -1, -1, -1,
- 330, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 133, 331, -1, -1, 134, 331, 72,
- -1, -1, 181, -1, -1, 181, 73, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 181, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 181, 71, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 181, -1, -1, -1,
- 74, 44, -1, -1, -1, -1, 184, -1,
- 52, 184, -1, -1, 184, 16, 75, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 184, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 184, 184, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, 76,
- 77, 78, -1, 184, -1, -1, -1, 184,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 188, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 44, -1, -1,
- -1, -1, 185, -1, 52, 185, -1, -1,
- 185, 17, 75, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 185,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 45, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 185,
- 185, -1, -1, -1, -1, -1, -1, -1,
- 39, 46, -1, -1, -1, -1, -1, 318,
- 320, -1, -1, 76, 77, 78, -1, 185,
- -1, -1, -1, 185, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 50, 48,
- 49, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 47, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 188, 41, -1, 42, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 43, -1, -1, -1, 300, 304, -1, -1,
- 51, 44, -1, -1, -1, -1, 183, -1,
- 52, 183, -1, -1, 183, 15, 75, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 183, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 45, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 183, 183, -1, -1, -1,
- -1, -1, -1, -1, 39, 46, -1, -1,
- -1, -1, -1, 318, 320, -1, -1, 76,
- 77, 78, -1, 183, -1, -1, -1, 183,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 50, 48, 49, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 47, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 188, 41,
- -1, 42, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 43, -1, -1, -1,
- 300, 304, -1, -1, 51, 44, -1, -1,
- -1, -1, 186, -1, 52, 186, -1, -1,
- 186, 18, 75, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 186,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 45, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 186,
- 186, -1, -1, -1, -1, -1, -1, -1,
- 39, 46, -1, -1, -1, -1, -1, 318,
- 320, -1, -1, 76, 77, 78, -1, 186,
- -1, -1, -1, 186, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 50, 48,
- 49, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 47, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 188, 41, -1, 42, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 43, -1, -1, -1, 300, 304, -1, -1,
- 51, 383, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 135, 138,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 136,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 137, 334, 334, 334, -1, 334, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 334, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 334, -1,
- -1, -1, -1, -1, -1, 334, -1, -1,
- -1, -1, -1, -1, 334, 334, -1, -1,
- -1, -1, -1, -1, -1, -1, 334, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 334, 334, 334, 334, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 334, 334, 334, 334, 334, 334,
- 334, 334, 334, 334, 334, 334, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 139, 289, 290, 284,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 137, 141, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 140, -1, 137, 143, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 296, 301, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 142, 31, 177, 120, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 117,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 284, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 178, 282,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 280, 172, 368, 369, 370, -1, 367,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 348, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 366,
- -1, -1, -1, -1, -1, -1, 364, 365,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 371, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 360, 361, 362,
- 363, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 372,
- 373, -1, -1, -1, 374, 375, 28, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 357, -1, 359,
- -1, 355, 358, 368, 369, 370, -1, 367,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 352, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 366,
- -1, -1, -1, -1, -1, -1, 364, 365,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 371, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 360, 361, 362,
- 363, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 372,
- 373, -1, -1, -1, 374, 375, 28, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 357, -1, 359,
- -1, 355, 358, 368, 369, 370, -1, 367,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 350, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 366,
- -1, -1, -1, -1, -1, -1, 364, 365,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 371, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 360, 361, 362,
- 363, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 372,
- 373, -1, -1, -1, 374, 375, 28, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 357, -1, 359,
- -1, 355, 358, 382, 325, -1, -1, -1,
- 109, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 329, 326,
- -1, -1, -1, 109, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 329, 332, 377, -1, -1, -1, -1,
- 377, -1, 377, 377, -1, -1, 377, 377,
- 377, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 377, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 377, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 377, 377, -1,
- -1, -1, -1, -1, -1, -1, 377, 377,
- -1, -1, -1, -1, -1, 377, 377, -1,
- -1, 377, 377, 377, 377, 377, -1, 131,
- -1, 377, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 377, 377, 377, 144,
- 281, 283, -1, -1, 286, 340, 341, 342,
- -1, 338, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 339, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 285, -1, -1, -1, -1, -1,
- -1, 366, -1, -1, -1, -1, -1, -1,
- 364, 365, -1, -1, -1, -1, -1, -1,
- -1, -1, 343, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 360,
- 361, 362, 363, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 62, 57,
- 56, 372, 373, 58, 60, 61, 374, 375,
- 28, 59, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 333, 337, 335,
- 336, 344, 145, 283, -1, -1, 291, 297,
- 298, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 301, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 146, 118, -1, 119, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 288, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 147, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 287, 120,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 117, -1, -1, -1, -1, -1,
- -1, -1, -1, 284, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 148, 282, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 280, 299, 200, 207,
-
-};
-
-unsigned short Parser_keys[] = {
- 132, 226, 224, 224, 132, 227, 132, 239,
- 132, 240, 132, 132, 132, 132, 45, 244,
- 40, 245, 40, 245, 132, 246, 123, 132,
- 123, 123, 59, 132, 45, 244, 139, 139,
- 40, 287, 132, 194, 40, 287, 125, 227,
- 61, 137, 40, 243, 59, 59, 59, 59,
- 40, 40, 40, 245, 125, 239, 33, 276,
- 33, 276, 40, 284, 132, 290, 40, 287,
- 44, 59, 38, 151, 33, 276, 33, 202,
- 33, 188, 33, 266, 33, 202, 33, 276,
- 33, 276, 33, 202, 33, 202, 189, 274,
- 189, 274, 186, 275, 142, 142, 33, 276,
- 59, 59, 44, 59, 33, 276, 59, 59,
- 40, 245, 42, 290, 42, 290, 42, 290,
- 59, 59, 59, 59, 41, 41, 132, 289,
- 41, 44, 33, 276, 186, 278, 189, 279,
- 189, 279, 33, 276, 33, 276, 33, 276,
- 33, 276, 33, 276, 33, 276, 33, 276,
- 33, 276, 33, 276, 132, 288, 40, 270,
- 40, 269, 40, 269, 40, 269, 40, 269,
- 40, 269, 40, 269, 40, 207, 40, 207,
- 40, 207, 40, 207, 203, 207, 203, 207,
- 44, 271, 45, 276, 33, 276, 44, 251,
- 132, 240, 40, 287, 59, 59, 40, 245,
- 59, 59, 40, 245, 59, 59, 40, 245,
- 41, 149, 186, 193, 189, 274, 189, 274,
- 189, 193, 38, 151, 33, 276, 33, 276,
- 33, 276, 33, 276, 132, 289, 132, 269,
- 40, 243, 139, 139, 139, 139, 132, 269,
- 132, 269, 44, 125, 139, 271, 61, 61,
- 59, 59, 40, 269, 124, 124, 40, 287,
- 40, 287, 40, 287, 132, 132, 189, 279,
- 189, 279, 193, 193, 33, 188, 44, 44,
- 41, 41, 41, 44, 40, 284, 44, 44,
- 41, 44, 125, 125, 125, 271, 43, 270,
- 40, 269, 125, 125, 41, 41, 41, 41,
- 0, 0
-};
-
-unsigned int Parser_offsets[] = {
- 0, 95, 96, 192, 300, 409, 410, 411,
- 611, 817, 1023, 1138, 1148, 1149, 1223, 1423,
- 1424, 1672, 1735, 1983, 2086, 2163, 2367, 2368,
- 2369, 2370, 2576, 2691, 2935, 3179, 3424, 3583,
- 3831, 3847, 3961, 4205, 4375, 4531, 4765, 4935,
- 5179, 5423, 5593, 5763, 5849, 5935, 6025, 6026,
- 6270, 6271, 6287, 6531, 6532, 6738, 6987, 7236,
- 7485, 7486, 7487, 7488, 7646, 7650, 7894, 7987,
- 8078, 8169, 8413, 8657, 8901, 9145, 9389, 9633,
- 9877, 10121, 10365, 10522, 10753, 10983, 11213, 11443,
- 11673, 11903, 12133, 12301, 12469, 12637, 12805, 12810,
- 12815, 13043, 13275, 13519, 13727, 13836, 14084, 14085,
- 14291, 14292, 14498, 14499, 14705, 14814, 14822, 14908,
- 14994, 14999, 15113, 15357, 15601, 15845, 16089, 16247,
- 16385, 16589, 16590, 16591, 16729, 16867, 16949, 17082,
- 17083, 17084, 17314, 17315, 17563, 17811, 18059, 18060,
- 18151, 18242, 18243, 18399, 18400, 18401, 18405, 18650,
- 18651, 18655, 18656, 18803, 19031, 19261, 19262, 19263,
- 19264
-};
-
-unsigned short Parser_targs[] = {
- 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 15,
- 15, 15, 15, 15, 16, 17, 18, 19,
- 20, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35,
- 35, 36, 37, 38, 39, 40, 41, 42,
- 43, 44, 45, 46, 47, 48, 49, 50,
- 51, 52, 53, 54, 55, 56, 57, 58,
- 59, 60, 61, 62, 63, 64, 65, 66,
- 67, 68, 69, 70, 71, 72, 73, 74,
- 75, 76, 77, 78, 79, 80, 81, 82,
- 83, 84, 85, 86, 87, 88, 89, 90,
- 91, 92, 93, 94, 95, 96, 97, 98,
- 99, 100, 101, 102, 103, 104, 105, 106,
- 107, 108, 109, 110, 111, 112, 113, 114,
- 115, 116, 117, 118, 119, 120, 121, 122,
- 123, 124, 125, 126, 127, 128, 129, 130,
- 131, 132, 133, 134, 135, 136, 137, 138,
- 139, 140, 141, 142, 143, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144, 144, 144, 144, 144, 144, 144,
- 144, 144
-};
-
-unsigned int Parser_actInds[] = {
- 0, 2, 4, 6, 8, 10, 12, 14,
- 16, 18, 20, 22, 24, 26, 28, 30,
- 33, 36, 39, 42, 45, 47, 49, 51,
- 53, 55, 57, 59, 61, 63, 65, 67,
- 69, 71, 73, 75, 77, 79, 81, 83,
- 85, 88, 90, 92, 94, 96, 98, 100,
- 102, 104, 106, 108, 110, 112, 114, 116,
- 118, 120, 122, 124, 126, 128, 130, 132,
- 134, 136, 138, 140, 142, 144, 146, 148,
- 150, 152, 154, 156, 158, 160, 162, 164,
- 166, 168, 170, 172, 174, 176, 178, 180,
- 182, 184, 186, 188, 190, 192, 195, 197,
- 199, 201, 203, 205, 207, 209, 211, 213,
- 215, 217, 219, 221, 223, 225, 227, 229,
- 231, 233, 235, 237, 239, 241, 243, 245,
- 247, 249, 251, 253, 255, 257, 259, 261,
- 263, 265, 267, 269, 271, 273, 275, 277,
- 279, 281, 283, 285, 287, 289, 291, 293,
- 295, 297, 299, 301, 303, 305, 307, 309,
- 311, 313, 315, 317, 319, 321, 323, 325,
- 327, 329, 331, 333, 335, 337, 339, 341,
- 343, 345, 347, 349, 351, 353, 355, 357,
- 359, 361, 363, 365, 367, 369, 371, 373,
- 375, 377, 379, 381, 383, 385, 387, 389,
- 391, 393, 395, 397, 399, 401, 403, 405,
- 407, 409, 411, 413, 415, 417, 419, 421,
- 423, 425, 427, 429, 431, 433, 435, 437,
- 439, 441, 443, 445, 447, 449, 451, 453,
- 455, 457, 459, 461, 463, 465, 467, 469,
- 471, 473, 475, 477, 479, 481, 483, 485,
- 487, 489, 491, 493, 495, 497, 499, 501,
- 503, 505, 507, 509, 511, 513, 515, 517,
- 519, 521, 523, 525, 527, 529, 531, 533,
- 535, 537, 539, 541, 543, 545, 547, 549,
- 551, 553, 555, 557, 559, 561, 563, 565,
- 567, 569, 571, 573, 575, 577, 579, 581,
- 583, 585, 587, 589, 591, 593, 595, 597,
- 599, 601, 603, 605, 607, 609, 611, 613,
- 615, 617, 619, 621, 623, 625, 627, 629,
- 631, 633, 635, 637, 639, 641, 643, 645,
- 647, 649, 651, 653, 655, 657, 659, 661,
- 663, 665, 667, 669, 671, 673, 675, 677,
- 679, 681, 683, 685, 687, 689, 691, 693,
- 695, 697, 699, 701, 703, 705, 707, 709,
- 711, 713, 715, 717, 719, 721, 723, 725,
- 727, 729, 731, 733, 735, 737, 739, 741,
- 743, 745, 747, 749, 751, 753, 755, 757,
- 759, 761, 763, 765, 767, 769, 771, 773,
- 775, 777, 779, 781, 783, 785, 787, 789,
- 791, 793
-};
-
-unsigned int Parser_actions[] = {
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 170, 1,
- 0, 174, 1, 0, 178, 1, 0, 182,
- 1, 0, 186, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 66, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 270, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 1, 0, 1, 0, 1, 0, 1,
- 0, 2, 0, 7, 0, 10, 0, 15,
- 0, 18, 0, 59, 0, 62, 0, 63,
- 0, 66, 0, 71, 0, 75, 0, 79,
- 0, 83, 0, 87, 0, 91, 0, 95,
- 0, 99, 0, 103, 0, 107, 0, 111,
- 0, 115, 0, 118, 0, 122, 0, 127,
- 0, 131, 0, 135, 0, 139, 0, 143,
- 0, 147, 0, 151, 0, 155, 0, 158,
- 0, 162, 0, 166, 0, 170, 0, 174,
- 0, 178, 0, 182, 0, 186, 0, 191,
- 0, 195, 0, 199, 0, 203, 0, 207,
- 0, 211, 0, 215, 0, 218, 0, 223,
- 0, 226, 0, 231, 0, 235, 0, 239,
- 0, 243, 0, 247, 0, 251, 0, 255,
- 0, 259, 0, 263, 0, 267, 0, 270,
- 0, 274, 0, 278, 0, 282, 0, 286,
- 0, 291, 0, 295, 0, 299, 0, 303,
- 0, 307, 0, 311, 0, 315, 0, 319,
- 0, 323, 0, 327, 0, 331, 0, 335,
- 0, 339, 0, 343, 0, 347, 0, 351,
- 0, 355, 0, 359, 0, 363, 0, 367,
- 0, 371, 0, 375, 0, 379, 0, 383,
- 0, 387, 0, 391, 0, 395, 0, 399,
- 0, 403, 0, 407, 0, 411, 0, 415,
- 0, 419, 0, 423, 0, 427, 0, 431,
- 0, 435, 0, 439, 0, 443, 0, 447,
- 0, 451, 0, 455, 0, 459, 0, 463,
- 0, 467, 0, 471, 0, 475, 0, 479,
- 0, 483, 0, 487, 0, 491, 0, 495,
- 0, 499, 0, 503, 0, 507, 0, 511,
- 0, 515, 0, 519, 0, 523, 0, 527,
- 0, 531, 0, 535, 0, 539, 0, 543,
- 0, 547, 0, 551, 0, 555, 0, 559,
- 0, 563, 0, 567, 0, 570, 0, 571,
- 0, 575, 0, 578, 0, 583, 0, 587,
- 0, 591, 0, 595, 0, 598, 0, 603,
- 0, 607, 0, 611, 0, 615, 0, 619,
- 0, 623, 0, 627, 0, 631, 0, 635,
- 0, 639, 0, 643, 0, 647, 0, 651,
- 0, 654, 0, 658, 0, 662, 0, 663,
- 0, 667, 0, 671, 0, 675, 0, 679,
- 0, 683, 0, 686, 0, 687, 0, 690,
- 0, 691, 0, 695, 0, 699, 0, 703,
- 0, 707, 0, 710, 0, 715, 0, 718,
- 0, 723, 0, 727, 0, 731, 0, 735,
- 0, 739, 0, 742, 0, 746, 0, 751,
- 0, 755, 0, 758, 0, 763, 0, 767,
- 0, 771, 0, 775, 0, 779, 0, 783,
- 0, 787, 0, 791, 0, 795, 0, 799,
- 0, 803, 0, 807, 0, 811, 0, 815,
- 0, 819, 0, 823, 0, 827, 0, 831,
- 0, 835, 0, 839, 0, 843, 0, 846,
- 0, 851, 0, 855, 0, 859, 0, 863,
- 0, 867, 0, 871, 0, 875, 0, 879,
- 0, 883, 0, 887, 0, 891, 0, 895,
- 0, 899, 0, 903, 0, 907, 0, 911,
- 0, 915, 0, 919, 0, 923, 0, 927,
- 0, 930, 0, 934, 0, 938, 0, 943,
- 0, 946, 0, 951, 0, 955, 0, 23,
- 0, 27, 0, 31, 0, 35, 0, 39,
- 0, 43, 0, 47, 0, 51, 0, 55,
- 0, 1, 0
-};
-
-int Parser_commitLen[] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 2
-};
-
-unsigned int Parser_fssProdIdIndex[] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127,
- 128, 129, 130, 131, 132, 133, 134, 135,
- 136, 137, 138, 139, 140, 141, 142, 143,
- 144, 145, 146, 147, 148, 149, 150, 151,
- 152, 153, 154, 155, 156, 157, 158, 159,
- 160, 161, 162, 163, 164, 165, 166, 167,
- 168, 169, 170, 171, 172, 173, 174, 175,
- 176, 177, 178, 179, 180, 181, 182, 183,
- 184, 185, 186, 187, 188, 189, 190, 191,
- 192, 193, 194, 195, 196, 197, 198, 199,
- 200, 201, 202, 203, 204, 205, 206, 207,
- 208, 209, 210, 211, 212, 213, 214, 215,
- 216, 217, 218, 219, 220, 221, 222, 223,
- 224, 225, 226, 227, 228, 229, 230, 231,
- 232, 233, 234, 235, 236, 237, 238, 239
-};
-
-char Parser_fssProdLengths[] = {
- 1, 3, 0, 2, 0, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 0, 4, 5, 5, 1, 5, 4, 3,
- 4, 3, 3, 5, 2, 0, 1, 4,
- 2, 1, 1, 1, 3, 2, 1, 0,
- 3, 1, 3, 3, 3, 3, 1, 2,
- 3, 3, 3, 3, 1, 3, 1, 3,
- 1, 3, 3, 7, 3, 3, 3, 3,
- 3, 3, 7, 1, 1, 1, 1, 1,
- 1, 2, 1, 2, 1, 2, 1, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 2, 1, 2, 1,
- 2, 1, 2, 1, 3, 1, 1, 3,
- 1, 1, 1, 2, 2, 1, 2, 2,
- 2, 2, 4, 5, 5, 6, 1, 1,
- 2, 2, 1, 1, 1, 1, 3, 3,
- 3, 3, 3, 1, 1, 1, 2, 1,
- 2, 0, 2, 1, 3, 3, 1, 1,
- 2, 0, 1, 3, 2, 0, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 2, 3, 3, 4, 3, 4, 3, 4,
- 2, 2, 2, 0, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 4,
- 2, 0, 2, 1, 0, 3, 1, 1
-};
-
-unsigned short Parser_prodLhsIds[] = {
- 226, 225, 225, 227, 227, 228, 228, 228,
- 228, 228, 228, 228, 228, 228, 238, 239,
- 239, 237, 229, 230, 240, 231, 232, 232,
- 233, 234, 235, 236, 246, 246, 242, 242,
- 247, 247, 248, 248, 248, 249, 249, 249,
- 241, 241, 252, 252, 252, 252, 252, 253,
- 253, 253, 253, 253, 253, 254, 254, 255,
- 255, 257, 257, 257, 257, 257, 257, 257,
- 257, 257, 257, 257, 258, 258, 258, 258,
- 261, 261, 261, 261, 261, 261, 261, 262,
- 262, 262, 262, 262, 262, 262, 262, 262,
- 262, 262, 262, 263, 263, 263, 263, 263,
- 263, 263, 263, 263, 263, 263, 263, 264,
- 264, 264, 264, 264, 264, 264, 264, 264,
- 264, 264, 264, 265, 265, 265, 265, 265,
- 265, 265, 265, 265, 265, 265, 265, 266,
- 266, 266, 266, 266, 266, 266, 266, 266,
- 266, 266, 266, 250, 250, 250, 269, 251,
- 260, 259, 270, 270, 270, 267, 268, 268,
- 268, 268, 268, 268, 268, 268, 268, 271,
- 272, 272, 272, 273, 273, 273, 273, 273,
- 273, 273, 273, 276, 276, 244, 244, 244,
- 275, 275, 277, 277, 278, 278, 278, 278,
- 274, 274, 279, 279, 243, 243, 280, 280,
- 280, 283, 283, 283, 283, 283, 283, 281,
- 281, 281, 281, 281, 281, 281, 281, 281,
- 281, 281, 245, 245, 286, 286, 286, 282,
- 282, 282, 282, 282, 282, 282, 287, 287,
- 287, 287, 287, 284, 284, 284, 284, 284,
- 256, 288, 285, 290, 290, 289, 289, 291
-};
-
-const char *Parser_prodNames[] = {
- "start-1",
- "section_list-1",
- "section_list-2",
- "statement_list-1",
- "statement_list-2",
- "statement-1",
- "statement-2",
- "statement-3",
- "statement-4",
- "statement-5",
- "statement-6",
- "statement-7",
- "statement-8",
- "statement-9",
- "export_open-1",
- "opt_export-1",
- "opt_export-2",
- "export_block-1",
- "assignment-1",
- "instantiation-1",
- "machine_name-1",
- "action_spec-1",
- "alphtype_spec-1",
- "alphtype_spec-2",
- "range_spec-1",
- "getkey_spec-1",
- "access_spec-1",
- "variable_spec-1",
- "opt_whitespace-1",
- "opt_whitespace-2",
- "join_or_lm-1",
- "join_or_lm-2",
- "lm_part_list-1",
- "lm_part_list-2",
- "longest_match_part-1",
- "longest_match_part-2",
- "longest_match_part-3",
- "opt_lm_part_action-1",
- "opt_lm_part_action-2",
- "opt_lm_part_action-3",
- "join-1",
- "join-2",
- "expression-1",
- "expression-2",
- "expression-3",
- "expression-4",
- "expression-5",
- "term-1",
- "term-2",
- "term-3",
- "term-4",
- "term-5",
- "term-6",
- "factor_with_label-1",
- "factor_with_label-2",
- "factor_with_ep-1",
- "factor_with_ep-2",
- "factor_with_aug-1",
- "factor_with_aug-2",
- "factor_with_aug-3",
- "factor_with_aug-4",
- "factor_with_aug-5",
- "factor_with_aug-6",
- "factor_with_aug-7",
- "factor_with_aug-8",
- "factor_with_aug-9",
- "factor_with_aug-10",
- "factor_with_aug-11",
- "aug_type_base-1",
- "aug_type_base-2",
- "aug_type_base-3",
- "aug_type_base-4",
- "aug_type_cond-1",
- "aug_type_cond-2",
- "aug_type_cond-3",
- "aug_type_cond-4",
- "aug_type_cond-5",
- "aug_type_cond-6",
- "aug_type_cond-7",
- "aug_type_to_state-1",
- "aug_type_to_state-2",
- "aug_type_to_state-3",
- "aug_type_to_state-4",
- "aug_type_to_state-5",
- "aug_type_to_state-6",
- "aug_type_to_state-7",
- "aug_type_to_state-8",
- "aug_type_to_state-9",
- "aug_type_to_state-10",
- "aug_type_to_state-11",
- "aug_type_to_state-12",
- "aug_type_from_state-1",
- "aug_type_from_state-2",
- "aug_type_from_state-3",
- "aug_type_from_state-4",
- "aug_type_from_state-5",
- "aug_type_from_state-6",
- "aug_type_from_state-7",
- "aug_type_from_state-8",
- "aug_type_from_state-9",
- "aug_type_from_state-10",
- "aug_type_from_state-11",
- "aug_type_from_state-12",
- "aug_type_eof-1",
- "aug_type_eof-2",
- "aug_type_eof-3",
- "aug_type_eof-4",
- "aug_type_eof-5",
- "aug_type_eof-6",
- "aug_type_eof-7",
- "aug_type_eof-8",
- "aug_type_eof-9",
- "aug_type_eof-10",
- "aug_type_eof-11",
- "aug_type_eof-12",
- "aug_type_gbl_error-1",
- "aug_type_gbl_error-2",
- "aug_type_gbl_error-3",
- "aug_type_gbl_error-4",
- "aug_type_gbl_error-5",
- "aug_type_gbl_error-6",
- "aug_type_gbl_error-7",
- "aug_type_gbl_error-8",
- "aug_type_gbl_error-9",
- "aug_type_gbl_error-10",
- "aug_type_gbl_error-11",
- "aug_type_gbl_error-12",
- "aug_type_local_error-1",
- "aug_type_local_error-2",
- "aug_type_local_error-3",
- "aug_type_local_error-4",
- "aug_type_local_error-5",
- "aug_type_local_error-6",
- "aug_type_local_error-7",
- "aug_type_local_error-8",
- "aug_type_local_error-9",
- "aug_type_local_error-10",
- "aug_type_local_error-11",
- "aug_type_local_error-12",
- "action_embed-1",
- "action_embed-2",
- "action_embed-3",
- "action_embed_word-1",
- "action_embed_block-1",
- "priority_name-1",
- "priority_aug-1",
- "priority_aug_num-1",
- "priority_aug_num-2",
- "priority_aug_num-3",
- "local_err_name-1",
- "factor_with_rep-1",
- "factor_with_rep-2",
- "factor_with_rep-3",
- "factor_with_rep-4",
- "factor_with_rep-5",
- "factor_with_rep-6",
- "factor_with_rep-7",
- "factor_with_rep-8",
- "factor_with_rep-9",
- "factor_rep_num-1",
- "factor_with_neg-1",
- "factor_with_neg-2",
- "factor_with_neg-3",
- "factor-1",
- "factor-2",
- "factor-3",
- "factor-4",
- "factor-5",
- "factor-6",
- "factor-7",
- "factor-8",
- "range_lit-1",
- "range_lit-2",
- "alphabet_num-1",
- "alphabet_num-2",
- "alphabet_num-3",
- "regular_expr-1",
- "regular_expr-2",
- "regular_expr_item-1",
- "regular_expr_item-2",
- "regular_expr_char-1",
- "regular_expr_char-2",
- "regular_expr_char-3",
- "regular_expr_char-4",
- "regular_expr_or_data-1",
- "regular_expr_or_data-2",
- "regular_expr_or_char-1",
- "regular_expr_or_char-2",
- "inline_block-1",
- "inline_block-2",
- "inline_block_item-1",
- "inline_block_item-2",
- "inline_block_item-3",
- "inline_block_symbol-1",
- "inline_block_symbol-2",
- "inline_block_symbol-3",
- "inline_block_symbol-4",
- "inline_block_symbol-5",
- "inline_block_symbol-6",
- "inline_block_interpret-1",
- "inline_block_interpret-2",
- "inline_block_interpret-3",
- "inline_block_interpret-4",
- "inline_block_interpret-5",
- "inline_block_interpret-6",
- "inline_block_interpret-7",
- "inline_block_interpret-8",
- "inline_block_interpret-9",
- "inline_block_interpret-10",
- "inline_block_interpret-11",
- "inline_expr-1",
- "inline_expr-2",
- "inline_expr_item-1",
- "inline_expr_item-2",
- "inline_expr_item-3",
- "inline_expr_any-1",
- "inline_expr_any-2",
- "inline_expr_any-3",
- "inline_expr_any-4",
- "inline_expr_any-5",
- "inline_expr_any-6",
- "inline_expr_any-7",
- "inline_expr_symbol-1",
- "inline_expr_symbol-2",
- "inline_expr_symbol-3",
- "inline_expr_symbol-4",
- "inline_expr_symbol-5",
- "inline_expr_interpret-1",
- "inline_expr_interpret-2",
- "inline_expr_interpret-3",
- "inline_expr_interpret-4",
- "inline_expr_interpret-5",
- "local_state_ref-1",
- "no_name_sep-1",
- "state_ref-1",
- "opt_name_sep-1",
- "opt_name_sep-2",
- "state_ref_names-1",
- "state_ref_names-2",
- "_start-1"
-};
-
-const char *Parser_lelNames[] = {
- "D-0",
- "D-1",
- "D-2",
- "D-3",
- "D-4",
- "D-5",
- "D-6",
- "D-7",
- "D-8",
- "D-9",
- "D-10",
- "D-11",
- "D-12",
- "D-13",
- "D-14",
- "D-15",
- "D-16",
- "D-17",
- "D-18",
- "D-19",
- "D-20",
- "D-21",
- "D-22",
- "D-23",
- "D-24",
- "D-25",
- "D-26",
- "D-27",
- "D-28",
- "D-29",
- "D-30",
- "D-31",
- "D-32",
- "!",
- "\"",
- "#",
- "$",
- "%",
- "&",
- "'",
- "(",
- ")",
- "*",
- "+",
- ",",
- "-",
- ".",
- "/",
- "0",
- "1",
- "2",
- "3",
- "4",
- "5",
- "6",
- "7",
- "8",
- "9",
- ":",
- ";",
- "<",
- "=",
- ">",
- "?",
- "@",
- "A",
- "B",
- "C",
- "D",
- "E",
- "F",
- "G",
- "H",
- "I",
- "J",
- "K",
- "L",
- "M",
- "N",
- "O",
- "P",
- "Q",
- "R",
- "S",
- "T",
- "U",
- "V",
- "W",
- "X",
- "Y",
- "Z",
- "[",
- "\\",
- "]",
- "^",
- "_",
- "`",
- "a",
- "b",
- "c",
- "d",
- "e",
- "f",
- "g",
- "h",
- "i",
- "j",
- "k",
- "l",
- "m",
- "n",
- "o",
- "p",
- "q",
- "r",
- "s",
- "t",
- "u",
- "v",
- "w",
- "x",
- "y",
- "z",
- "{",
- "|",
- "}",
- "~",
- "D-127",
- "KW_Machine",
- "KW_Include",
- "KW_Import",
- "KW_Write",
- "TK_Word",
- "TK_Literal",
- "TK_Number",
- "TK_Inline",
- "TK_Reference",
- "TK_ColonEquals",
- "TK_EndSection",
- "TK_UInt",
- "TK_Hex",
- "TK_BaseClause",
- "TK_DotDot",
- "TK_ColonGt",
- "TK_ColonGtGt",
- "TK_LtColon",
- "TK_Arrow",
- "TK_DoubleArrow",
- "TK_StarStar",
- "TK_NameSep",
- "TK_BarStar",
- "TK_DashDash",
- "TK_StartCond",
- "TK_AllCond",
- "TK_LeavingCond",
- "TK_Middle",
- "TK_StartGblError",
- "TK_AllGblError",
- "TK_FinalGblError",
- "TK_NotFinalGblError",
- "TK_NotStartGblError",
- "TK_MiddleGblError",
- "TK_StartLocalError",
- "TK_AllLocalError",
- "TK_FinalLocalError",
- "TK_NotFinalLocalError",
- "TK_NotStartLocalError",
- "TK_MiddleLocalError",
- "TK_StartEOF",
- "TK_AllEOF",
- "TK_FinalEOF",
- "TK_NotFinalEOF",
- "TK_NotStartEOF",
- "TK_MiddleEOF",
- "TK_StartToState",
- "TK_AllToState",
- "TK_FinalToState",
- "TK_NotFinalToState",
- "TK_NotStartToState",
- "TK_MiddleToState",
- "TK_StartFromState",
- "TK_AllFromState",
- "TK_FinalFromState",
- "TK_NotFinalFromState",
- "TK_NotStartFromState",
- "TK_MiddleFromState",
- "RE_Slash",
- "RE_SqOpen",
- "RE_SqOpenNeg",
- "RE_SqClose",
- "RE_Dot",
- "RE_Star",
- "RE_Dash",
- "RE_Char",
- "IL_WhiteSpace",
- "IL_Comment",
- "IL_Literal",
- "IL_Symbol",
- "KW_Action",
- "KW_AlphType",
- "KW_Range",
- "KW_GetKey",
- "KW_When",
- "KW_Eof",
- "KW_Err",
- "KW_Lerr",
- "KW_To",
- "KW_From",
- "KW_Export",
- "KW_Break",
- "KW_Exec",
- "KW_Hold",
- "KW_PChar",
- "KW_Char",
- "KW_Goto",
- "KW_Call",
- "KW_Ret",
- "KW_CurState",
- "KW_TargState",
- "KW_Entry",
- "KW_Next",
- "KW_Variable",
- "KW_Access",
- "TK_Semi",
- "_eof",
- "section_list",
- "start",
- "statement_list",
- "statement",
- "assignment",
- "instantiation",
- "action_spec",
- "alphtype_spec",
- "range_spec",
- "getkey_spec",
- "access_spec",
- "variable_spec",
- "export_block",
- "export_open",
- "opt_export",
- "machine_name",
- "join",
- "join_or_lm",
- "inline_block",
- "alphabet_num",
- "inline_expr",
- "opt_whitespace",
- "lm_part_list",
- "longest_match_part",
- "opt_lm_part_action",
- "action_embed",
- "action_embed_block",
- "expression",
- "term",
- "factor_with_label",
- "factor_with_ep",
- "local_state_ref",
- "factor_with_aug",
- "aug_type_base",
- "priority_aug",
- "priority_name",
- "aug_type_cond",
- "aug_type_to_state",
- "aug_type_from_state",
- "aug_type_eof",
- "aug_type_gbl_error",
- "aug_type_local_error",
- "local_err_name",
- "factor_with_rep",
- "action_embed_word",
- "priority_aug_num",
- "factor_rep_num",
- "factor_with_neg",
- "factor",
- "regular_expr_or_data",
- "regular_expr",
- "range_lit",
- "regular_expr_item",
- "regular_expr_char",
- "regular_expr_or_char",
- "inline_block_item",
- "inline_block_interpret",
- "inline_expr_any",
- "inline_block_symbol",
- "inline_expr_interpret",
- "state_ref",
- "inline_expr_item",
- "inline_expr_symbol",
- "no_name_sep",
- "state_ref_names",
- "opt_name_sep",
- "_start"
-};
-
-#line 1375 "rlparse.kl"
-
-
-void Parser::init()
-{
- #line 3769 "rlparse.cpp"
- curs = Parser_startState;
- pool = 0;
- freshEl = (struct Parser_LangEl*) malloc( sizeof(struct Parser_LangEl)*8128);
- #ifdef LOG_ACTIONS
- cerr << "allocating 8128 LangEls" << endl;
- #endif
- stackTop = freshEl;
- stackTop->type = 0;
- stackTop->state = -1;
- stackTop->next = 0;
- stackTop->child = 0;
- freshPos = 1;
- lastFinal = stackTop;
- numRetry = 0;
- numNodes = 0;
- errCount = 0;
-#line 1380 "rlparse.kl"
-}
-
-int Parser::parseLangEl( int type, const Token *token )
-{
- #line 3791 "rlparse.cpp"
-#define reject() induceReject = 1
-
- int pos, targState;
- unsigned int *action;
- int rhsLen;
- struct Parser_LangEl *rhs[32];
- struct Parser_LangEl *lel;
- struct Parser_LangEl *input;
- char induceReject;
-
- if ( curs < 0 )
- return 0;
-
- if ( pool == 0 ) {
- if ( freshPos == 8128 ) {
- freshEl = (struct Parser_LangEl*) malloc(
- sizeof(struct Parser_LangEl)*8128);
- #ifdef LOG_ACTIONS
- cerr << "allocating 8128 LangEls" << endl;
- #endif
- freshPos = 0;
- }
- input = freshEl + freshPos++;
- }
- else {
- input = pool;
- pool = pool->next;
- }
- numNodes += 1;
- input->type = type;
- input->user.token = *token;
- input->next = 0;
- input->retry = 0;
- input->child = 0;
-
-again:
- if ( input == 0 )
- goto _out;
-
- lel = input;
- if ( lel->type < Parser_keys[curs<<1] || lel->type > Parser_keys[(curs<<1)+1] )
- goto parseError;
-
- pos = Parser_indicies[Parser_offsets[curs] + (lel->type - Parser_keys[curs<<1])];
- if ( pos < 0 )
- goto parseError;
-
- induceReject = 0;
- targState = Parser_targs[pos];
- action = Parser_actions + Parser_actInds[pos];
- if ( lel->retry & 0x0000ffff )
- action += (lel->retry & 0x0000ffff);
-
- if ( *action & 0x1 ) {
- #ifdef LOG_ACTIONS
- cerr << "shifted: " << Parser_lelNames[lel->type];
- #endif
- input = input->next;
- lel->state = curs;
- lel->next = stackTop;
- stackTop = lel;
-
- if ( action[1] == 0 )
- lel->retry &= 0xffff0000;
- else {
- lel->retry += 1;
- numRetry += 1;
- #ifdef LOG_ACTIONS
- cerr << " retry: " << stackTop;
- #endif
- }
- #ifdef LOG_ACTIONS
- cerr << endl;
- #endif
- }
-
- if ( Parser_commitLen[pos] != 0 ) {
- struct Parser_LangEl *commitHead = stackTop;
- int absCommitLen = Parser_commitLen[pos];
-
- #ifdef LOG_ACTIONS
- cerr << "running commit of length: " << Parser_commitLen[pos] << endl;
- #endif
-
- if ( absCommitLen < 0 ) {
- commitHead = commitHead->next;
- absCommitLen = -1 * absCommitLen;
- }
- {
- struct Parser_LangEl *lel = commitHead;
- struct Parser_LangEl **cmStack = (struct Parser_LangEl**) malloc( sizeof(struct Parser_LangEl) * numNodes);
- int n = absCommitLen, depth = 0, sp = 0;
-
-commit_head:
- if ( lel->retry > 0 ) {
- if ( lel->retry & 0x0000ffff )
- numRetry -= 1;
- if ( lel->retry & 0xffff0000 )
- numRetry -= 1;
- lel->retry = 0;
- }
-
- /* If depth is > 0 then move over lel freely, otherwise, make
- * sure that we have not already done n steps down the line. */
- if ( lel->next != 0 && ( depth > 0 || n > 1 ) ) {
- cmStack[sp++] = lel;
- lel = lel->next;
-
- /* If we are at the top level count the steps down the line. */
- if ( depth == 0 )
- n -= 1;
- goto commit_head;
- }
-
-commit_reverse:
- if ( lel->child != 0 ) {
- cmStack[sp++] = lel;
- lel = lel->child;
-
- /* When we move down we need to increment the depth. */
- depth += 1;
- goto commit_head;
- }
-
-commit_upwards:
- if ( sp > 0 ) {
- /* Figure out which place to return to. */
- if ( cmStack[sp-1]->next == lel ) {
- lel = cmStack[--sp];
- goto commit_reverse;
- }
- else {
- /* Going back up, adjust the depth. */
- lel = cmStack[--sp];
- depth -= 1;
- goto commit_upwards;
- }
- }
- free( cmStack );
- }
- if ( numRetry == 0 ) {
- #ifdef LOG_ACTIONS
- cerr << "number of retries is zero, "
- "executing final actions" << endl;
- #endif
- {
- struct Parser_LangEl *lel = commitHead;
- struct Parser_LangEl **cmStack = (struct Parser_LangEl**) malloc( sizeof( struct Parser_LangEl) * numNodes);
- int sp = 0;
- char doExec = 0;
-
-final_head:
- if ( lel == lastFinal ) {
- doExec = 1;
- goto hit_final;
- }
-
- if ( lel->next != 0 ) {
- cmStack[sp++] = lel;
- lel = lel->next;
- goto final_head;
- }
-
-final_reverse:
-
- if ( lel->child != 0 ) {
- cmStack[sp++] = lel;
- lel = lel->child;
- goto final_head;
- }
-
-final_upwards:
-
- if ( doExec ) {
-{
- if ( lel->type < 225 ) {
- }
- else {
- struct Parser_LangEl *redLel = lel;
- if ( redLel->child != 0 ) {
- int r = Parser_fssProdLengths[redLel->reduction] - 1;
- struct Parser_LangEl *rhsEl = redLel->child;
- while ( rhsEl != 0 ) {
- rhs[r--] = rhsEl;
- rhsEl = rhsEl->next;
- }
- }
-switch ( lel->reduction ) {
-case 14: {
-#line 59 "rlparse.kl"
-
- exportContext.append( true );
-
-
-#line 3985 "rlparse.cpp"
-} break;
-case 15: {
-#line 68 "rlparse.kl"
- (&redLel->user.opt_export)->isSet = true;
-
-#line 3991 "rlparse.cpp"
-} break;
-case 16: {
-#line 69 "rlparse.kl"
- (&redLel->user.opt_export)->isSet = false;
-
-#line 3997 "rlparse.cpp"
-} break;
-case 17: {
-#line 72 "rlparse.kl"
-
- exportContext.remove( exportContext.length()-1 );
-
-
-#line 4005 "rlparse.cpp"
-} break;
-case 18: {
-#line 77 "rlparse.kl"
-
- /* Main machine must be an instance. */
- bool isInstance = false;
- if ( strcmp((&rhs[1]->user.token_type)->token.data, mainMachine) == 0 ) {
- warning((&rhs[1]->user.token_type)->token.loc) <<
- "main machine will be implicitly instantiated" << endl;
- isInstance = true;
- }
-
- /* Generic creation of machine for instantiation and assignment. */
- JoinOrLm *joinOrLm = new JoinOrLm( (&rhs[3]->user.join)->join );
- tryMachineDef( (&rhs[1]->user.token_type)->token.loc, (&rhs[1]->user.token_type)->token.data, joinOrLm, isInstance );
-
- if ( (&rhs[0]->user.opt_export)->isSet )
- exportContext.remove( exportContext.length()-1 );
-
-
-#line 4026 "rlparse.cpp"
-} break;
-case 19: {
-#line 95 "rlparse.kl"
-
- /* Generic creation of machine for instantiation and assignment. */
- tryMachineDef( (&rhs[1]->user.token_type)->token.loc, (&rhs[1]->user.token_type)->token.data, (&rhs[3]->user.join_or_lm)->joinOrLm, true );
-
- if ( (&rhs[0]->user.opt_export)->isSet )
- exportContext.remove( exportContext.length()-1 );
-
-
-#line 4038 "rlparse.cpp"
-} break;
-case 20: {
-#line 111 "rlparse.kl"
-
- /* Make/get the priority key. The name may have already been referenced
- * and therefore exist. */
- PriorDictEl *priorDictEl;
- if ( pd->priorDict.insert( (&rhs[0]->user.token)->data, pd->nextPriorKey, &priorDictEl ) )
- pd->nextPriorKey += 1;
- pd->curDefPriorKey = priorDictEl->value;
-
- /* Make/get the local error key. */
- LocalErrDictEl *localErrDictEl;
- if ( pd->localErrDict.insert( (&rhs[0]->user.token)->data, pd->nextLocalErrKey, &localErrDictEl ) )
- pd->nextLocalErrKey += 1;
- pd->curDefLocalErrKey = localErrDictEl->value;
-
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-
-#line 4059 "rlparse.cpp"
-} break;
-case 21: {
-#line 129 "rlparse.kl"
-
- if ( pd->actionDict.find( (&rhs[1]->user.token)->data ) ) {
- /* Recover by just ignoring the duplicate. */
- error((&rhs[1]->user.token)->loc) << "action \"" << (&rhs[1]->user.token)->data << "\" already defined" << endl;
- }
- else {
- //cerr << "NEW ACTION " << $2->data << " " << $4->inlineList << endl;
- /* Add the action to the list of actions. */
- Action *newAction = new Action( (&rhs[2]->user.token)->loc, (&rhs[1]->user.token)->data,
- (&rhs[3]->user.inline_list)->inlineList, pd->nextCondId++ );
-
- /* Insert to list and dict. */
- pd->actionList.append( newAction );
- pd->actionDict.insert( newAction );
- }
-
-
-#line 4080 "rlparse.cpp"
-} break;
-case 22: {
-#line 149 "rlparse.kl"
-
- if ( ! pd->setAlphType( (&rhs[1]->user.token)->data, (&rhs[2]->user.token)->data ) ) {
- // Recover by ignoring the alphtype statement.
- error((&rhs[1]->user.token)->loc) << "\"" << (&rhs[1]->user.token)->data <<
- " " << (&rhs[2]->user.token)->data << "\" is not a valid alphabet type" << endl;
- }
-
-
-#line 4092 "rlparse.cpp"
-} break;
-case 23: {
-#line 158 "rlparse.kl"
-
- if ( ! pd->setAlphType( (&rhs[1]->user.token)->data ) ) {
- // Recover by ignoring the alphtype statement.
- error((&rhs[1]->user.token)->loc) << "\"" << (&rhs[1]->user.token)->data <<
- "\" is not a valid alphabet type" << endl;
- }
-
-
-#line 4104 "rlparse.cpp"
-} break;
-case 24: {
-#line 168 "rlparse.kl"
-
- // Save the upper and lower ends of the range and emit the line number.
- pd->lowerNum = (&rhs[1]->user.token_type)->token.data;
- pd->upperNum = (&rhs[2]->user.token_type)->token.data;
- pd->rangeLowLoc = (&rhs[1]->user.token_type)->token.loc;
- pd->rangeHighLoc = (&rhs[2]->user.token_type)->token.loc;
-
-
-#line 4116 "rlparse.cpp"
-} break;
-case 25: {
-#line 177 "rlparse.kl"
-
- pd->getKeyExpr = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 4124 "rlparse.cpp"
-} break;
-case 26: {
-#line 182 "rlparse.kl"
-
- pd->accessExpr = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 4132 "rlparse.cpp"
-} break;
-case 27: {
-#line 187 "rlparse.kl"
-
- /* FIXME: Need to implement the rest of this. */
- if ( strcmp( (&rhs[2]->user.token)->data, "curstate" ) == 0 )
- pd->curStateExpr = (&rhs[3]->user.inline_list)->inlineList;
- else {
- error((&rhs[2]->user.token)->loc) << "sorry, unimplementd" << endl;
- }
-
-
-#line 4145 "rlparse.cpp"
-} break;
-case 30: {
-#line 209 "rlparse.kl"
-
- (&redLel->user.join_or_lm)->joinOrLm = new JoinOrLm( (&rhs[0]->user.join)->join );
-
-
-#line 4153 "rlparse.cpp"
-} break;
-case 31: {
-#line 213 "rlparse.kl"
-
- /* Create a new factor going to a longest match structure. Record
- * in the parse data that we have a longest match. */
- LongestMatch *lm = new LongestMatch( (&rhs[0]->user.token)->loc, (&rhs[1]->user.lm_part_list)->lmPartList );
- pd->lmList.append( lm );
- for ( LmPartList::Iter lmp = *((&rhs[1]->user.lm_part_list)->lmPartList); lmp.lte(); lmp++ )
- lmp->longestMatch = lm;
- (&redLel->user.join_or_lm)->joinOrLm = new JoinOrLm( lm );
-
-
-#line 4167 "rlparse.cpp"
-} break;
-case 32: {
-#line 229 "rlparse.kl"
-
- if ( (&rhs[1]->user.longest_match_part)->lmPart != 0 )
- (&rhs[0]->user.lm_part_list)->lmPartList->append( (&rhs[1]->user.longest_match_part)->lmPart );
- (&redLel->user.lm_part_list)->lmPartList = (&rhs[0]->user.lm_part_list)->lmPartList;
-
-
-#line 4177 "rlparse.cpp"
-} break;
-case 33: {
-#line 235 "rlparse.kl"
-
- /* Create a new list with the part. */
- (&redLel->user.lm_part_list)->lmPartList = new LmPartList;
- if ( (&rhs[0]->user.longest_match_part)->lmPart != 0 )
- (&redLel->user.lm_part_list)->lmPartList->append( (&rhs[0]->user.longest_match_part)->lmPart );
-
-
-#line 4188 "rlparse.cpp"
-} break;
-case 34: {
-#line 248 "rlparse.kl"
- (&redLel->user.longest_match_part)->lmPart = 0;
-
-#line 4194 "rlparse.cpp"
-} break;
-case 35: {
-#line 250 "rlparse.kl"
- (&redLel->user.longest_match_part)->lmPart = 0;
-
-#line 4200 "rlparse.cpp"
-} break;
-case 36: {
-#line 252 "rlparse.kl"
-
- (&redLel->user.longest_match_part)->lmPart = 0;
- Action *action = (&rhs[1]->user.opt_lm_part_action)->action;
- if ( action != 0 )
- action->isLmAction = true;
- (&redLel->user.longest_match_part)->lmPart = new LongestMatchPart( (&rhs[0]->user.join)->join, action,
- (&rhs[2]->user.token)->loc, pd->nextLongestMatchId++ );
-
-
-#line 4213 "rlparse.cpp"
-} break;
-case 37: {
-#line 267 "rlparse.kl"
-
- (&redLel->user.opt_lm_part_action)->action = (&rhs[1]->user.action_ref)->action;
-
-
-#line 4221 "rlparse.cpp"
-} break;
-case 38: {
-#line 271 "rlparse.kl"
-
- (&redLel->user.opt_lm_part_action)->action = (&rhs[0]->user.action_ref)->action;
-
-
-#line 4229 "rlparse.cpp"
-} break;
-case 39: {
-#line 275 "rlparse.kl"
-
- (&redLel->user.opt_lm_part_action)->action = 0;
-
-
-#line 4237 "rlparse.cpp"
-} break;
-case 40: {
-#line 286 "rlparse.kl"
-
- /* Append the expression to the list and return it. */
- (&rhs[0]->user.join)->join->exprList.append( (&rhs[2]->user.expression)->expression );
- (&redLel->user.join)->join = (&rhs[0]->user.join)->join;
-
-
-#line 4247 "rlparse.cpp"
-} break;
-case 41: {
-#line 292 "rlparse.kl"
-
- (&redLel->user.join)->join = new Join( (&rhs[0]->user.expression)->expression );
-
-
-#line 4255 "rlparse.cpp"
-} break;
-case 42: {
-#line 302 "rlparse.kl"
-
- (&redLel->user.expression)->expression = new Expression( (&rhs[0]->user.expression)->expression,
- (&rhs[2]->user.term)->term, Expression::OrType );
-
-
-#line 4264 "rlparse.cpp"
-} break;
-case 43: {
-#line 307 "rlparse.kl"
-
- (&redLel->user.expression)->expression = new Expression( (&rhs[0]->user.expression)->expression,
- (&rhs[2]->user.term)->term, Expression::IntersectType );
-
-
-#line 4273 "rlparse.cpp"
-} break;
-case 44: {
-#line 314 "rlparse.kl"
-
- (&redLel->user.expression)->expression = new Expression( (&rhs[0]->user.expression)->expression,
- (&rhs[2]->user.term)->term, Expression::SubtractType );
-
-
-#line 4282 "rlparse.cpp"
-} break;
-case 45: {
-#line 319 "rlparse.kl"
-
- (&redLel->user.expression)->expression = new Expression( (&rhs[0]->user.expression)->expression,
- (&rhs[2]->user.term)->term, Expression::StrongSubtractType );
-
-
-#line 4291 "rlparse.cpp"
-} break;
-case 46: {
-#line 324 "rlparse.kl"
-
- (&redLel->user.expression)->expression = new Expression( (&rhs[0]->user.term)->term );
-
-
-#line 4299 "rlparse.cpp"
-} break;
-case 47: {
-#line 334 "rlparse.kl"
-
- (&redLel->user.term)->term = new Term( (&rhs[0]->user.term)->term, (&rhs[1]->user.factor_with_label)->factorWithAug );
-
-
-#line 4307 "rlparse.cpp"
-} break;
-case 48: {
-#line 338 "rlparse.kl"
-
- (&redLel->user.term)->term = new Term( (&rhs[0]->user.term)->term, (&rhs[2]->user.factor_with_label)->factorWithAug );
-
-
-#line 4315 "rlparse.cpp"
-} break;
-case 49: {
-#line 342 "rlparse.kl"
-
- (&redLel->user.term)->term = new Term( (&rhs[0]->user.term)->term, (&rhs[2]->user.factor_with_label)->factorWithAug, Term::RightStartType );
-
-
-#line 4323 "rlparse.cpp"
-} break;
-case 50: {
-#line 346 "rlparse.kl"
-
- (&redLel->user.term)->term = new Term( (&rhs[0]->user.term)->term, (&rhs[2]->user.factor_with_label)->factorWithAug, Term::RightFinishType );
-
-
-#line 4331 "rlparse.cpp"
-} break;
-case 51: {
-#line 350 "rlparse.kl"
-
- (&redLel->user.term)->term = new Term( (&rhs[0]->user.term)->term,
- (&rhs[2]->user.factor_with_label)->factorWithAug, Term::LeftType );
-
-
-#line 4340 "rlparse.cpp"
-} break;
-case 52: {
-#line 355 "rlparse.kl"
-
- (&redLel->user.term)->term = new Term( (&rhs[0]->user.factor_with_label)->factorWithAug );
-
-
-#line 4348 "rlparse.cpp"
-} break;
-case 53: {
-#line 365 "rlparse.kl"
-
- /* Add the label to the list and pass the factor up. */
- (&rhs[2]->user.factor_with_label)->factorWithAug->labels.prepend( Label((&rhs[0]->user.token)->loc, (&rhs[0]->user.token)->data) );
- (&redLel->user.factor_with_label)->factorWithAug = (&rhs[2]->user.factor_with_label)->factorWithAug;
-
-
-#line 4358 "rlparse.cpp"
-} break;
-case 54: {
-#line 371 "rlparse.kl"
-
- (&redLel->user.factor_with_label)->factorWithAug = (&rhs[0]->user.factor_with_ep)->factorWithAug;
-
-
-#line 4366 "rlparse.cpp"
-} break;
-case 55: {
-#line 381 "rlparse.kl"
-
- /* Add the target to the list and return the factor object. */
- (&rhs[0]->user.factor_with_ep)->factorWithAug->epsilonLinks.append( EpsilonLink( (&rhs[1]->user.token)->loc, nameRef ) );
- (&redLel->user.factor_with_ep)->factorWithAug = (&rhs[0]->user.factor_with_ep)->factorWithAug;
-
-
-#line 4376 "rlparse.cpp"
-} break;
-case 56: {
-#line 387 "rlparse.kl"
-
- (&redLel->user.factor_with_ep)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4384 "rlparse.cpp"
-} break;
-case 57: {
-#line 397 "rlparse.kl"
-
- /* Append the action to the factorWithAug, record the refernce from
- * factorWithAug to the action and pass up the factorWithAug. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append(
- ParserAction( (&rhs[1]->user.aug_type)->loc, (&rhs[1]->user.aug_type)->augType, 0, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4396 "rlparse.cpp"
-} break;
-case 58: {
-#line 405 "rlparse.kl"
-
- /* Append the named priority to the factorWithAug and pass it up. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->priorityAugs.append(
- PriorityAug( (&rhs[1]->user.aug_type)->augType, pd->curDefPriorKey, (&rhs[2]->user.priority_aug)->priorityNum ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4407 "rlparse.cpp"
-} break;
-case 59: {
-#line 412 "rlparse.kl"
-
- /* Append the priority using a default name. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->priorityAugs.append(
- PriorityAug( (&rhs[1]->user.aug_type)->augType, (&rhs[3]->user.priority_name)->priorityName, (&rhs[5]->user.priority_aug)->priorityNum ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4418 "rlparse.cpp"
-} break;
-case 60: {
-#line 419 "rlparse.kl"
-
- (&rhs[0]->user.factor_with_aug)->factorWithAug->conditions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, 0, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4428 "rlparse.cpp"
-} break;
-case 61: {
-#line 425 "rlparse.kl"
-
- /* Append the action, pass it up. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, 0, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4439 "rlparse.cpp"
-} break;
-case 62: {
-#line 432 "rlparse.kl"
-
- /* Append the action, pass it up. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, 0, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4450 "rlparse.cpp"
-} break;
-case 63: {
-#line 439 "rlparse.kl"
-
- /* Append the action, pass it up. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, 0, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4461 "rlparse.cpp"
-} break;
-case 64: {
-#line 446 "rlparse.kl"
-
- /* Append the action to the factorWithAug, record the refernce from
- * factorWithAug to the action and pass up the factorWithAug. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, pd->curDefLocalErrKey, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4473 "rlparse.cpp"
-} break;
-case 65: {
-#line 454 "rlparse.kl"
-
- /* Append the action to the factorWithAug, record the refernce from
- * factorWithAug to the action and pass up the factorWithAug. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, pd->curDefLocalErrKey, (&rhs[2]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4485 "rlparse.cpp"
-} break;
-case 66: {
-#line 462 "rlparse.kl"
-
- /* Append the action to the factorWithAug, record the refernce from
- * factorWithAug to the action and pass up the factorWithAug. */
- (&rhs[0]->user.factor_with_aug)->factorWithAug->actions.append( ParserAction( (&rhs[1]->user.aug_type)->loc,
- (&rhs[1]->user.aug_type)->augType, (&rhs[3]->user.local_err_name)->error_name, (&rhs[5]->user.action_ref)->action ) );
- (&redLel->user.factor_with_aug)->factorWithAug = (&rhs[0]->user.factor_with_aug)->factorWithAug;
-
-
-#line 4497 "rlparse.cpp"
-} break;
-case 67: {
-#line 470 "rlparse.kl"
-
- (&redLel->user.factor_with_aug)->factorWithAug = new FactorWithAug( (&rhs[0]->user.factor_with_rep)->factorWithRep );
-
-
-#line 4505 "rlparse.cpp"
-} break;
-case 68: {
-#line 483 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_finish;
-
-#line 4511 "rlparse.cpp"
-} break;
-case 69: {
-#line 484 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_leave;
-
-#line 4517 "rlparse.cpp"
-} break;
-case 70: {
-#line 485 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all;
-
-#line 4523 "rlparse.cpp"
-} break;
-case 71: {
-#line 486 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start;
-
-#line 4529 "rlparse.cpp"
-} break;
-case 72: {
-#line 491 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start;
-
-#line 4535 "rlparse.cpp"
-} break;
-case 73: {
-#line 492 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start;
-
-#line 4541 "rlparse.cpp"
-} break;
-case 74: {
-#line 493 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all;
-
-#line 4547 "rlparse.cpp"
-} break;
-case 75: {
-#line 494 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all;
-
-#line 4553 "rlparse.cpp"
-} break;
-case 76: {
-#line 495 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_leave;
-
-#line 4559 "rlparse.cpp"
-} break;
-case 77: {
-#line 496 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_leave;
-
-#line 4565 "rlparse.cpp"
-} break;
-case 78: {
-#line 497 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all;
-
-#line 4571 "rlparse.cpp"
-} break;
-case 79: {
-#line 506 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_to_state;
-
-#line 4577 "rlparse.cpp"
-} break;
-case 80: {
-#line 508 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_to_state;
-
-#line 4583 "rlparse.cpp"
-} break;
-case 81: {
-#line 511 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_to_state;
-
-#line 4589 "rlparse.cpp"
-} break;
-case 82: {
-#line 513 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_to_state;
-
-#line 4595 "rlparse.cpp"
-} break;
-case 83: {
-#line 516 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_to_state;
-
-#line 4601 "rlparse.cpp"
-} break;
-case 84: {
-#line 518 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_to_state;
-
-#line 4607 "rlparse.cpp"
-} break;
-case 85: {
-#line 521 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_to_state;
-
-#line 4613 "rlparse.cpp"
-} break;
-case 86: {
-#line 523 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_to_state;
-
-#line 4619 "rlparse.cpp"
-} break;
-case 87: {
-#line 526 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_to_state;
-
-#line 4625 "rlparse.cpp"
-} break;
-case 88: {
-#line 528 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_to_state;
-
-#line 4631 "rlparse.cpp"
-} break;
-case 89: {
-#line 531 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_to_state;
-
-#line 4637 "rlparse.cpp"
-} break;
-case 90: {
-#line 533 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_to_state;
-
-#line 4643 "rlparse.cpp"
-} break;
-case 91: {
-#line 542 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_from_state;
-
-#line 4649 "rlparse.cpp"
-} break;
-case 92: {
-#line 544 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_from_state;
-
-#line 4655 "rlparse.cpp"
-} break;
-case 93: {
-#line 547 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_from_state;
-
-#line 4661 "rlparse.cpp"
-} break;
-case 94: {
-#line 549 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_from_state;
-
-#line 4667 "rlparse.cpp"
-} break;
-case 95: {
-#line 552 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_from_state;
-
-#line 4673 "rlparse.cpp"
-} break;
-case 96: {
-#line 554 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_from_state;
-
-#line 4679 "rlparse.cpp"
-} break;
-case 97: {
-#line 557 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_from_state;
-
-#line 4685 "rlparse.cpp"
-} break;
-case 98: {
-#line 559 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_from_state;
-
-#line 4691 "rlparse.cpp"
-} break;
-case 99: {
-#line 562 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_from_state;
-
-#line 4697 "rlparse.cpp"
-} break;
-case 100: {
-#line 564 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_from_state;
-
-#line 4703 "rlparse.cpp"
-} break;
-case 101: {
-#line 567 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_from_state;
-
-#line 4709 "rlparse.cpp"
-} break;
-case 102: {
-#line 569 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_from_state;
-
-#line 4715 "rlparse.cpp"
-} break;
-case 103: {
-#line 578 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_eof;
-
-#line 4721 "rlparse.cpp"
-} break;
-case 104: {
-#line 580 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_eof;
-
-#line 4727 "rlparse.cpp"
-} break;
-case 105: {
-#line 583 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_eof;
-
-#line 4733 "rlparse.cpp"
-} break;
-case 106: {
-#line 585 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_eof;
-
-#line 4739 "rlparse.cpp"
-} break;
-case 107: {
-#line 588 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_eof;
-
-#line 4745 "rlparse.cpp"
-} break;
-case 108: {
-#line 590 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_eof;
-
-#line 4751 "rlparse.cpp"
-} break;
-case 109: {
-#line 593 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_eof;
-
-#line 4757 "rlparse.cpp"
-} break;
-case 110: {
-#line 595 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_eof;
-
-#line 4763 "rlparse.cpp"
-} break;
-case 111: {
-#line 598 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_eof;
-
-#line 4769 "rlparse.cpp"
-} break;
-case 112: {
-#line 600 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_eof;
-
-#line 4775 "rlparse.cpp"
-} break;
-case 113: {
-#line 603 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_eof;
-
-#line 4781 "rlparse.cpp"
-} break;
-case 114: {
-#line 605 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_eof;
-
-#line 4787 "rlparse.cpp"
-} break;
-case 115: {
-#line 614 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_gbl_error;
-
-#line 4793 "rlparse.cpp"
-} break;
-case 116: {
-#line 616 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_gbl_error;
-
-#line 4799 "rlparse.cpp"
-} break;
-case 117: {
-#line 619 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_gbl_error;
-
-#line 4805 "rlparse.cpp"
-} break;
-case 118: {
-#line 621 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_gbl_error;
-
-#line 4811 "rlparse.cpp"
-} break;
-case 119: {
-#line 624 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_gbl_error;
-
-#line 4817 "rlparse.cpp"
-} break;
-case 120: {
-#line 626 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_gbl_error;
-
-#line 4823 "rlparse.cpp"
-} break;
-case 121: {
-#line 629 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_gbl_error;
-
-#line 4829 "rlparse.cpp"
-} break;
-case 122: {
-#line 631 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_gbl_error;
-
-#line 4835 "rlparse.cpp"
-} break;
-case 123: {
-#line 634 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_gbl_error;
-
-#line 4841 "rlparse.cpp"
-} break;
-case 124: {
-#line 636 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_gbl_error;
-
-#line 4847 "rlparse.cpp"
-} break;
-case 125: {
-#line 639 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_gbl_error;
-
-#line 4853 "rlparse.cpp"
-} break;
-case 126: {
-#line 641 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_gbl_error;
-
-#line 4859 "rlparse.cpp"
-} break;
-case 127: {
-#line 651 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_local_error;
-
-#line 4865 "rlparse.cpp"
-} break;
-case 128: {
-#line 653 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_start_local_error;
-
-#line 4871 "rlparse.cpp"
-} break;
-case 129: {
-#line 656 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_local_error;
-
-#line 4877 "rlparse.cpp"
-} break;
-case 130: {
-#line 658 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_start_local_error;
-
-#line 4883 "rlparse.cpp"
-} break;
-case 131: {
-#line 661 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_local_error;
-
-#line 4889 "rlparse.cpp"
-} break;
-case 132: {
-#line 663 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_all_local_error;
-
-#line 4895 "rlparse.cpp"
-} break;
-case 133: {
-#line 666 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_local_error;
-
-#line 4901 "rlparse.cpp"
-} break;
-case 134: {
-#line 668 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_final_local_error;
-
-#line 4907 "rlparse.cpp"
-} break;
-case 135: {
-#line 671 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_local_error;
-
-#line 4913 "rlparse.cpp"
-} break;
-case 136: {
-#line 673 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_not_final_local_error;
-
-#line 4919 "rlparse.cpp"
-} break;
-case 137: {
-#line 676 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_local_error;
-
-#line 4925 "rlparse.cpp"
-} break;
-case 138: {
-#line 678 "rlparse.kl"
- (&redLel->user.aug_type)->loc = (&rhs[0]->user.token)->loc; (&redLel->user.aug_type)->augType = at_middle_local_error;
-
-#line 4931 "rlparse.cpp"
-} break;
-case 139: {
-#line 691 "rlparse.kl"
- (&redLel->user.action_ref)->action = (&rhs[0]->user.action_ref)->action;
-
-#line 4937 "rlparse.cpp"
-} break;
-case 140: {
-#line 692 "rlparse.kl"
- (&redLel->user.action_ref)->action = (&rhs[1]->user.action_ref)->action;
-
-#line 4943 "rlparse.cpp"
-} break;
-case 141: {
-#line 693 "rlparse.kl"
- (&redLel->user.action_ref)->action = (&rhs[0]->user.action_ref)->action;
-
-#line 4949 "rlparse.cpp"
-} break;
-case 142: {
-#line 698 "rlparse.kl"
-
- /* Set the name in the actionDict. */
- Action *action = pd->actionDict.find( (&rhs[0]->user.token)->data );
- if ( action != 0 ) {
- /* Pass up the action element */
- (&redLel->user.action_ref)->action = action;
- }
- else {
- /* Will recover by returning null as the action. */
- error((&rhs[0]->user.token)->loc) << "action lookup of \"" << (&rhs[0]->user.token)->data << "\" failed" << endl;
- (&redLel->user.action_ref)->action = 0;
- }
-
-
-#line 4967 "rlparse.cpp"
-} break;
-case 143: {
-#line 715 "rlparse.kl"
-
- /* Create the action, add it to the list and pass up. */
- Action *newAction = new Action( (&rhs[0]->user.token)->loc, 0, (&rhs[1]->user.inline_list)->inlineList, pd->nextCondId++ );
- pd->actionList.append( newAction );
- (&redLel->user.action_ref)->action = newAction;
-
-
-#line 4978 "rlparse.cpp"
-} break;
-case 144: {
-#line 730 "rlparse.kl"
-
- // Lookup/create the priority key.
- PriorDictEl *priorDictEl;
- if ( pd->priorDict.insert( (&rhs[0]->user.token)->data, pd->nextPriorKey, &priorDictEl ) )
- pd->nextPriorKey += 1;
-
- // Use the inserted/found priority key.
- (&redLel->user.priority_name)->priorityName = priorDictEl->value;
-
-
-#line 4992 "rlparse.cpp"
-} break;
-case 145: {
-#line 747 "rlparse.kl"
-
- // Convert the priority number to a long. Check for overflow.
- errno = 0;
- //cerr << "PRIOR AUG: " << $1->token.data << endl;
- long aug = strtol( (&rhs[0]->user.token_type)->token.data, 0, 10 );
- if ( errno == ERANGE && aug == LONG_MAX ) {
- /* Priority number too large. Recover by setting the priority to 0. */
- error((&rhs[0]->user.token_type)->token.loc) << "priority number " << (&rhs[0]->user.token_type)->token.data <<
- " overflows" << endl;
- (&redLel->user.priority_aug)->priorityNum = 0;
- }
- else if ( errno == ERANGE && aug == LONG_MIN ) {
- /* Priority number too large in the neg. Recover by using 0. */
- error((&rhs[0]->user.token_type)->token.loc) << "priority number " << (&rhs[0]->user.token_type)->token.data <<
- " underflows" << endl;
- (&redLel->user.priority_aug)->priorityNum = 0;
- }
- else {
- /* No overflow or underflow. */
- (&redLel->user.priority_aug)->priorityNum = aug;
- }
-
-
-#line 5019 "rlparse.cpp"
-} break;
-case 146: {
-#line 773 "rlparse.kl"
-
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-
-#line 5027 "rlparse.cpp"
-} break;
-case 147: {
-#line 777 "rlparse.kl"
-
- (&redLel->user.token_type)->token.set( "+", 1 );
- (&redLel->user.token_type)->token.loc = (&rhs[0]->user.token)->loc;
- (&redLel->user.token_type)->token.append( *(&rhs[1]->user.token) );
-
-
-#line 5037 "rlparse.cpp"
-} break;
-case 148: {
-#line 783 "rlparse.kl"
-
- (&redLel->user.token_type)->token.set( "-", 1 );
- (&redLel->user.token_type)->token.loc = (&rhs[0]->user.token)->loc;
- (&redLel->user.token_type)->token.append( *(&rhs[1]->user.token) );
-
-
-#line 5047 "rlparse.cpp"
-} break;
-case 149: {
-#line 795 "rlparse.kl"
-
- /* Lookup/create the priority key. */
- LocalErrDictEl *localErrDictEl;
- if ( pd->localErrDict.insert( (&rhs[0]->user.token)->data, pd->nextLocalErrKey, &localErrDictEl ) )
- pd->nextLocalErrKey += 1;
-
- /* Use the inserted/found priority key. */
- (&redLel->user.local_err_name)->error_name = localErrDictEl->value;
-
-
-#line 5061 "rlparse.cpp"
-} break;
-case 150: {
-#line 816 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- 0, 0, FactorWithRep::StarType );
-
-
-#line 5070 "rlparse.cpp"
-} break;
-case 151: {
-#line 821 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- 0, 0, FactorWithRep::StarStarType );
-
-
-#line 5079 "rlparse.cpp"
-} break;
-case 152: {
-#line 826 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- 0, 0, FactorWithRep::OptionalType );
-
-
-#line 5088 "rlparse.cpp"
-} break;
-case 153: {
-#line 831 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- 0, 0, FactorWithRep::PlusType );
-
-
-#line 5097 "rlparse.cpp"
-} break;
-case 154: {
-#line 836 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- (&rhs[2]->user.factor_rep_num)->rep, 0, FactorWithRep::ExactType );
-
-
-#line 5106 "rlparse.cpp"
-} break;
-case 155: {
-#line 841 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- 0, (&rhs[3]->user.factor_rep_num)->rep, FactorWithRep::MaxType );
-
-
-#line 5115 "rlparse.cpp"
-} break;
-case 156: {
-#line 846 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- (&rhs[2]->user.factor_rep_num)->rep, 0, FactorWithRep::MinType );
-
-
-#line 5124 "rlparse.cpp"
-} break;
-case 157: {
-#line 851 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[1]->user.token)->loc, (&rhs[0]->user.factor_with_rep)->factorWithRep,
- (&rhs[2]->user.factor_rep_num)->rep, (&rhs[4]->user.factor_rep_num)->rep, FactorWithRep::RangeType );
-
-
-#line 5133 "rlparse.cpp"
-} break;
-case 158: {
-#line 856 "rlparse.kl"
-
- (&redLel->user.factor_with_rep)->factorWithRep = new FactorWithRep( (&rhs[0]->user.factor_with_neg)->factorWithNeg );
-
-
-#line 5141 "rlparse.cpp"
-} break;
-case 159: {
-#line 866 "rlparse.kl"
-
- // Convert the priority number to a long. Check for overflow.
- errno = 0;
- long rep = strtol( (&rhs[0]->user.token)->data, 0, 10 );
- if ( errno == ERANGE && rep == LONG_MAX ) {
- // Repetition too large. Recover by returing repetition 1. */
- error((&rhs[0]->user.token)->loc) << "repetition number " << (&rhs[0]->user.token)->data << " overflows" << endl;
- (&redLel->user.factor_rep_num)->rep = 1;
- }
- else {
- // Cannot be negative, so no overflow.
- (&redLel->user.factor_rep_num)->rep = rep;
- }
-
-
-#line 5160 "rlparse.cpp"
-} break;
-case 160: {
-#line 892 "rlparse.kl"
-
- (&redLel->user.factor_with_neg)->factorWithNeg = new FactorWithNeg( (&rhs[0]->user.token)->loc,
- (&rhs[1]->user.factor_with_neg)->factorWithNeg, FactorWithNeg::NegateType );
-
-
-#line 5169 "rlparse.cpp"
-} break;
-case 161: {
-#line 897 "rlparse.kl"
-
- (&redLel->user.factor_with_neg)->factorWithNeg = new FactorWithNeg( (&rhs[0]->user.token)->loc,
- (&rhs[1]->user.factor_with_neg)->factorWithNeg, FactorWithNeg::CharNegateType );
-
-
-#line 5178 "rlparse.cpp"
-} break;
-case 162: {
-#line 902 "rlparse.kl"
-
- (&redLel->user.factor_with_neg)->factorWithNeg = new FactorWithNeg( (&rhs[0]->user.factor)->factor );
-
-
-#line 5186 "rlparse.cpp"
-} break;
-case 163: {
-#line 912 "rlparse.kl"
-
- /* Create a new factor node going to a concat literal. */
- (&redLel->user.factor)->factor = new Factor( new Literal( *(&rhs[0]->user.token), Literal::LitString ) );
-
-
-#line 5195 "rlparse.cpp"
-} break;
-case 164: {
-#line 917 "rlparse.kl"
-
- /* Create a new factor node going to a literal number. */
- (&redLel->user.factor)->factor = new Factor( new Literal( (&rhs[0]->user.token_type)->token, Literal::Number ) );
-
-
-#line 5204 "rlparse.cpp"
-} break;
-case 165: {
-#line 922 "rlparse.kl"
-
- /* Find the named graph. */
- GraphDictEl *gdNode = pd->graphDict.find( (&rhs[0]->user.token)->data );
- if ( gdNode == 0 ) {
- /* Recover by returning null as the factor node. */
- error((&rhs[0]->user.token)->loc) << "graph lookup of \"" << (&rhs[0]->user.token)->data << "\" failed" << endl;
- (&redLel->user.factor)->factor = 0;
- }
- else if ( gdNode->isInstance ) {
- /* Recover by retuning null as the factor node. */
- error((&rhs[0]->user.token)->loc) << "references to graph instantiations not allowed "
- "in expressions" << endl;
- (&redLel->user.factor)->factor = 0;
- }
- else {
- /* Create a factor node that is a lookup of an expression. */
- (&redLel->user.factor)->factor = new Factor( (&rhs[0]->user.token)->loc, gdNode->value );
- }
-
-
-#line 5228 "rlparse.cpp"
-} break;
-case 166: {
-#line 942 "rlparse.kl"
-
- /* Create a new factor node going to an OR expression. */
- (&redLel->user.factor)->factor = new Factor( new ReItem( (&rhs[0]->user.token)->loc, (&rhs[1]->user.regular_expr_or_data)->reOrBlock, ReItem::OrBlock ) );
-
-
-#line 5237 "rlparse.cpp"
-} break;
-case 167: {
-#line 947 "rlparse.kl"
-
- /* Create a new factor node going to a negated OR expression. */
- (&redLel->user.factor)->factor = new Factor( new ReItem( (&rhs[0]->user.token)->loc, (&rhs[1]->user.regular_expr_or_data)->reOrBlock, ReItem::NegOrBlock ) );
-
-
-#line 5246 "rlparse.cpp"
-} break;
-case 168: {
-#line 952 "rlparse.kl"
-
- if ( (&rhs[2]->user.token)->length > 1 ) {
- for ( char *p = (&rhs[2]->user.token)->data; *p != 0; p++ ) {
- if ( *p == 'i' )
- (&rhs[1]->user.regular_expr)->regExpr->caseInsensitive = true;
- }
- }
-
- /* Create a new factor node going to a regular exp. */
- (&redLel->user.factor)->factor = new Factor( (&rhs[1]->user.regular_expr)->regExpr );
-
-
-#line 5262 "rlparse.cpp"
-} break;
-case 169: {
-#line 964 "rlparse.kl"
-
- /* Create a new factor node going to a range. */
- (&redLel->user.factor)->factor = new Factor( new Range( (&rhs[0]->user.range_lit)->literal, (&rhs[2]->user.range_lit)->literal ) );
-
-
-#line 5271 "rlparse.cpp"
-} break;
-case 170: {
-#line 969 "rlparse.kl"
-
- /* Create a new factor going to a parenthesized join. */
- (&redLel->user.factor)->factor = new Factor( (&rhs[1]->user.join)->join );
-
-
-#line 5280 "rlparse.cpp"
-} break;
-case 171: {
-#line 981 "rlparse.kl"
-
- /* Range literas must have only one char. We restrict this in the parse tree. */
- (&redLel->user.range_lit)->literal = new Literal( *(&rhs[0]->user.token), Literal::LitString );
-
-
-#line 5289 "rlparse.cpp"
-} break;
-case 172: {
-#line 986 "rlparse.kl"
-
- /* Create a new literal number. */
- (&redLel->user.range_lit)->literal = new Literal( (&rhs[0]->user.token_type)->token, Literal::Number );
-
-
-#line 5298 "rlparse.cpp"
-} break;
-case 173: {
-#line 995 "rlparse.kl"
-
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-
-#line 5306 "rlparse.cpp"
-} break;
-case 174: {
-#line 999 "rlparse.kl"
-
- (&redLel->user.token_type)->token.set( "-", 1 );
- (&redLel->user.token_type)->token.loc = (&rhs[0]->user.token)->loc;
- (&redLel->user.token_type)->token.append( *(&rhs[1]->user.token) );
-
-
-#line 5316 "rlparse.cpp"
-} break;
-case 175: {
-#line 1005 "rlparse.kl"
-
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-
-#line 5324 "rlparse.cpp"
-} break;
-case 176: {
-#line 1020 "rlparse.kl"
-
- /* An optimization to lessen the tree size. If a non-starred char is
- * directly under the left side on the right and the right side is
- * another non-starred char then paste them together and return the
- * left side. Otherwise just put the two under a new reg exp node. */
- if ( (&rhs[1]->user.regular_expr_item)->reItem->type == ReItem::Data && !(&rhs[1]->user.regular_expr_item)->reItem->star &&
- (&rhs[0]->user.regular_expr)->regExpr->type == RegExpr::RecurseItem &&
- (&rhs[0]->user.regular_expr)->regExpr->item->type == ReItem::Data && !(&rhs[0]->user.regular_expr)->regExpr->item->star )
- {
- /* Append the right side to the right side of the left and toss the
- * right side. */
- (&rhs[0]->user.regular_expr)->regExpr->item->token.append( (&rhs[1]->user.regular_expr_item)->reItem->token );
- delete (&rhs[1]->user.regular_expr_item)->reItem;
- (&redLel->user.regular_expr)->regExpr = (&rhs[0]->user.regular_expr)->regExpr;
- }
- else {
- (&redLel->user.regular_expr)->regExpr = new RegExpr( (&rhs[0]->user.regular_expr)->regExpr, (&rhs[1]->user.regular_expr_item)->reItem );
- }
-
-
-#line 5348 "rlparse.cpp"
-} break;
-case 177: {
-#line 1040 "rlparse.kl"
-
- /* Can't optimize the tree. */
- (&redLel->user.regular_expr)->regExpr = new RegExpr();
-
-
-#line 5357 "rlparse.cpp"
-} break;
-case 178: {
-#line 1052 "rlparse.kl"
-
- (&rhs[0]->user.regular_expr_char)->reItem->star = true;
- (&redLel->user.regular_expr_item)->reItem = (&rhs[0]->user.regular_expr_char)->reItem;
-
-
-#line 5366 "rlparse.cpp"
-} break;
-case 179: {
-#line 1057 "rlparse.kl"
-
- (&redLel->user.regular_expr_item)->reItem = (&rhs[0]->user.regular_expr_char)->reItem;
-
-
-#line 5374 "rlparse.cpp"
-} break;
-case 180: {
-#line 1069 "rlparse.kl"
-
- (&redLel->user.regular_expr_char)->reItem = new ReItem( (&rhs[0]->user.token)->loc, (&rhs[1]->user.regular_expr_or_data)->reOrBlock, ReItem::OrBlock );
-
-
-#line 5382 "rlparse.cpp"
-} break;
-case 181: {
-#line 1073 "rlparse.kl"
-
- (&redLel->user.regular_expr_char)->reItem = new ReItem( (&rhs[0]->user.token)->loc, (&rhs[1]->user.regular_expr_or_data)->reOrBlock, ReItem::NegOrBlock );
-
-
-#line 5390 "rlparse.cpp"
-} break;
-case 182: {
-#line 1077 "rlparse.kl"
-
- (&redLel->user.regular_expr_char)->reItem = new ReItem( (&rhs[0]->user.token)->loc, ReItem::Dot );
-
-
-#line 5398 "rlparse.cpp"
-} break;
-case 183: {
-#line 1081 "rlparse.kl"
-
- (&redLel->user.regular_expr_char)->reItem = new ReItem( (&rhs[0]->user.token)->loc, *(&rhs[0]->user.token) );
-
-
-#line 5406 "rlparse.cpp"
-} break;
-case 184: {
-#line 1093 "rlparse.kl"
-
- /* An optimization to lessen the tree size. If an or char is directly
- * under the left side on the right and the right side is another or
- * char then paste them together and return the left side. Otherwise
- * just put the two under a new or data node. */
- if ( (&rhs[1]->user.regular_expr_or_char)->reOrItem->type == ReOrItem::Data &&
- (&rhs[0]->user.regular_expr_or_data)->reOrBlock->type == ReOrBlock::RecurseItem &&
- (&rhs[0]->user.regular_expr_or_data)->reOrBlock->item->type == ReOrItem::Data )
- {
- /* Append the right side to right side of the left and toss the
- * right side. */
- (&rhs[0]->user.regular_expr_or_data)->reOrBlock->item->token.append( (&rhs[1]->user.regular_expr_or_char)->reOrItem->token );
- delete (&rhs[1]->user.regular_expr_or_char)->reOrItem;
- (&redLel->user.regular_expr_or_data)->reOrBlock = (&rhs[0]->user.regular_expr_or_data)->reOrBlock;
- }
- else {
- /* Can't optimize, put the left and right under a new node. */
- (&redLel->user.regular_expr_or_data)->reOrBlock = new ReOrBlock( (&rhs[0]->user.regular_expr_or_data)->reOrBlock, (&rhs[1]->user.regular_expr_or_char)->reOrItem );
- }
-
-
-#line 5431 "rlparse.cpp"
-} break;
-case 185: {
-#line 1114 "rlparse.kl"
-
- (&redLel->user.regular_expr_or_data)->reOrBlock = new ReOrBlock();
-
-
-#line 5439 "rlparse.cpp"
-} break;
-case 186: {
-#line 1126 "rlparse.kl"
-
- (&redLel->user.regular_expr_or_char)->reOrItem = new ReOrItem( (&rhs[0]->user.token)->loc, *(&rhs[0]->user.token) );
-
-
-#line 5447 "rlparse.cpp"
-} break;
-case 187: {
-#line 1130 "rlparse.kl"
-
- (&redLel->user.regular_expr_or_char)->reOrItem = new ReOrItem( (&rhs[1]->user.token)->loc, (&rhs[0]->user.token)->data[0], (&rhs[2]->user.token)->data[0] );
-
-
-#line 5455 "rlparse.cpp"
-} break;
-case 188: {
-#line 1147 "rlparse.kl"
-
- /* Append the item to the list, return the list. */
- (&redLel->user.inline_list)->inlineList = (&rhs[0]->user.inline_list)->inlineList;
- (&redLel->user.inline_list)->inlineList->append( (&rhs[1]->user.inline_item)->inlineItem );
-
-
-#line 5465 "rlparse.cpp"
-} break;
-case 189: {
-#line 1154 "rlparse.kl"
-
- /* Start with empty list. */
- (&redLel->user.inline_list)->inlineList = new InlineList;
-
-
-#line 5474 "rlparse.cpp"
-} break;
-case 190: {
-#line 1169 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token_type)->token.loc, (&rhs[0]->user.token_type)->token.data, InlineItem::Text );
-
-
-#line 5482 "rlparse.cpp"
-} break;
-case 191: {
-#line 1175 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token_type)->token.loc, (&rhs[0]->user.token_type)->token.data, InlineItem::Text );
-
-
-#line 5490 "rlparse.cpp"
-} break;
-case 192: {
-#line 1181 "rlparse.kl"
-
- /* Pass the inline item up. */
- (&redLel->user.inline_item)->inlineItem = (&rhs[0]->user.inline_item)->inlineItem;
-
-
-#line 5499 "rlparse.cpp"
-} break;
-case 193: {
-#line 1188 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5505 "rlparse.cpp"
-} break;
-case 194: {
-#line 1189 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5511 "rlparse.cpp"
-} break;
-case 195: {
-#line 1190 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5517 "rlparse.cpp"
-} break;
-case 196: {
-#line 1191 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5523 "rlparse.cpp"
-} break;
-case 197: {
-#line 1192 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5529 "rlparse.cpp"
-} break;
-case 198: {
-#line 1193 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5535 "rlparse.cpp"
-} break;
-case 199: {
-#line 1197 "rlparse.kl"
-
- /* Pass up interpreted items of inline expressions. */
- (&redLel->user.inline_item)->inlineItem = (&rhs[0]->user.inline_item)->inlineItem;
-
-
-#line 5544 "rlparse.cpp"
-} break;
-case 200: {
-#line 1202 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Hold );
-
-
-#line 5552 "rlparse.cpp"
-} break;
-case 201: {
-#line 1206 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Exec );
- (&redLel->user.inline_item)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 5561 "rlparse.cpp"
-} break;
-case 202: {
-#line 1211 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc,
- new NameRef(nameRef), InlineItem::Goto );
-
-
-#line 5570 "rlparse.cpp"
-} break;
-case 203: {
-#line 1216 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::GotoExpr );
- (&redLel->user.inline_item)->inlineItem->children = (&rhs[2]->user.inline_list)->inlineList;
-
-
-#line 5579 "rlparse.cpp"
-} break;
-case 204: {
-#line 1221 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, new NameRef(nameRef), InlineItem::Next );
-
-
-#line 5587 "rlparse.cpp"
-} break;
-case 205: {
-#line 1225 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::NextExpr );
- (&redLel->user.inline_item)->inlineItem->children = (&rhs[2]->user.inline_list)->inlineList;
-
-
-#line 5596 "rlparse.cpp"
-} break;
-case 206: {
-#line 1230 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, new NameRef(nameRef), InlineItem::Call );
-
-
-#line 5604 "rlparse.cpp"
-} break;
-case 207: {
-#line 1234 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::CallExpr );
- (&redLel->user.inline_item)->inlineItem->children = (&rhs[2]->user.inline_list)->inlineList;
-
-
-#line 5613 "rlparse.cpp"
-} break;
-case 208: {
-#line 1239 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Ret );
-
-
-#line 5621 "rlparse.cpp"
-} break;
-case 209: {
-#line 1243 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Break );
-
-
-#line 5629 "rlparse.cpp"
-} break;
-case 210: {
-#line 1251 "rlparse.kl"
-
- (&redLel->user.inline_list)->inlineList = (&rhs[0]->user.inline_list)->inlineList;
- (&redLel->user.inline_list)->inlineList->append( (&rhs[1]->user.inline_item)->inlineItem );
-
-
-#line 5638 "rlparse.cpp"
-} break;
-case 211: {
-#line 1256 "rlparse.kl"
-
- /* Init the list used for this expr. */
- (&redLel->user.inline_list)->inlineList = new InlineList;
-
-
-#line 5647 "rlparse.cpp"
-} break;
-case 212: {
-#line 1265 "rlparse.kl"
-
- /* Return a text segment. */
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token_type)->token.loc, (&rhs[0]->user.token_type)->token.data, InlineItem::Text );
-
-
-#line 5656 "rlparse.cpp"
-} break;
-case 213: {
-#line 1271 "rlparse.kl"
-
- /* Return a text segment, must heap alloc the text. */
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token_type)->token.loc, (&rhs[0]->user.token_type)->token.data, InlineItem::Text );
-
-
-#line 5665 "rlparse.cpp"
-} break;
-case 214: {
-#line 1277 "rlparse.kl"
-
- /* Pass the inline item up. */
- (&redLel->user.inline_item)->inlineItem = (&rhs[0]->user.inline_item)->inlineItem;
-
-
-#line 5674 "rlparse.cpp"
-} break;
-case 227: {
-#line 1307 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::PChar );
-
-
-#line 5682 "rlparse.cpp"
-} break;
-case 228: {
-#line 1312 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Char );
-
-
-#line 5690 "rlparse.cpp"
-} break;
-case 229: {
-#line 1317 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Curs );
-
-
-#line 5698 "rlparse.cpp"
-} break;
-case 230: {
-#line 1322 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc, InlineItem::Targs );
-
-
-#line 5706 "rlparse.cpp"
-} break;
-case 231: {
-#line 1327 "rlparse.kl"
-
- (&redLel->user.inline_item)->inlineItem = new InlineItem( (&rhs[0]->user.token)->loc,
- new NameRef(nameRef), InlineItem::Entry );
-
-
-#line 5715 "rlparse.cpp"
-} break;
-case 233: {
-#line 1338 "rlparse.kl"
-
- nameRef.empty();
-
-
-#line 5723 "rlparse.cpp"
-} break;
-case 235: {
-#line 1348 "rlparse.kl"
-
- /* Insert an initial null pointer val to indicate the existence of the
- * initial name seperator. */
- nameRef.setAs( 0 );
-
-
-#line 5733 "rlparse.cpp"
-} break;
-case 236: {
-#line 1354 "rlparse.kl"
-
- nameRef.empty();
-
-
-#line 5741 "rlparse.cpp"
-} break;
-case 237: {
-#line 1361 "rlparse.kl"
-
- nameRef.append( (&rhs[2]->user.token)->data );
-
-
-#line 5749 "rlparse.cpp"
-} break;
-case 238: {
-#line 1366 "rlparse.kl"
-
- nameRef.append( (&rhs[0]->user.token)->data );
-
-
-#line 5757 "rlparse.cpp"
-} break;
-}
- }
-}
-
- if ( lel->child != 0 ) {
- struct Parser_LangEl *first = lel->child;
- struct Parser_LangEl *child = lel->child;
- numNodes -= 1;
- lel->child = 0;
- while ( child->next != 0 ) {
- child = child->next;
- numNodes -= 1;
- }
- child->next = pool;
- pool = first;
- }
- }
-
-hit_final:
- if ( sp > 0 ) {
- /* Figure out which place to return to. */
- if ( cmStack[sp-1]->next == lel ) {
- lel = cmStack[--sp];
- goto final_reverse;
- }
- else {
- lel = cmStack[--sp];
- goto final_upwards;
- }
- }
-
- lastFinal = lel;
- free( cmStack );
- }
- }
- }
-
- if ( *action & 0x2 ) {
- int fssRed = *action >> 2;
- int reduction = Parser_fssProdIdIndex[fssRed];
- struct Parser_LangEl *redLel;
- if ( pool == 0 ) {
- if ( freshPos == 8128 ) {
- freshEl = (struct Parser_LangEl*) malloc(
- sizeof(struct Parser_LangEl)*8128);
- #ifdef LOG_ACTIONS
- cerr << "allocating 8128 LangEls" << endl;
- #endif
- freshPos = 0;
- }
- redLel = freshEl + freshPos++;
- }
- else {
- redLel = pool;
- pool = pool->next;
- }
- numNodes += 1;
- redLel->type = Parser_prodLhsIds[reduction];
- redLel->reduction = reduction;
- redLel->child = 0;
- redLel->next = 0;
- redLel->retry = (lel->retry << 16);
- lel->retry &= 0xffff0000;
-
- rhsLen = Parser_fssProdLengths[fssRed];
- if ( rhsLen > 0 ) {
- int r;
- for ( r = rhsLen-1; r > 0; r-- ) {
- rhs[r] = stackTop;
- stackTop = stackTop->next;
- }
- rhs[0] = stackTop;
- stackTop = stackTop->next;
- rhs[0]->next = 0;
- }
-switch ( reduction ) {
-case 215: {
-#line 1284 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5839 "rlparse.cpp"
-} break;
-case 216: {
-#line 1285 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5845 "rlparse.cpp"
-} break;
-case 217: {
-#line 1286 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5851 "rlparse.cpp"
-} break;
-case 218: {
-#line 1287 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5857 "rlparse.cpp"
-} break;
-case 219: {
-#line 1288 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5863 "rlparse.cpp"
-} break;
-case 220: {
-#line 1289 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5869 "rlparse.cpp"
-} break;
-case 221: {
-#line 1290 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5875 "rlparse.cpp"
-} break;
-case 222: {
-#line 1297 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5881 "rlparse.cpp"
-} break;
-case 223: {
-#line 1298 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5887 "rlparse.cpp"
-} break;
-case 224: {
-#line 1299 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5893 "rlparse.cpp"
-} break;
-case 225: {
-#line 1300 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5899 "rlparse.cpp"
-} break;
-case 226: {
-#line 1301 "rlparse.kl"
- (&redLel->user.token_type)->token = *(&rhs[0]->user.token);
-
-#line 5905 "rlparse.cpp"
-} break;
-}
- #ifdef LOG_ACTIONS
- cerr << "reduced: "
- << Parser_prodNames[reduction]
- << " rhsLen: " << rhsLen;
- #endif
- if ( action[1] == 0 )
- redLel->retry = 0;
- else {
- redLel->retry += 0x10000;
- numRetry += 1;
- #ifdef LOG_ACTIONS
- cerr << " retry: " << redLel;
- #endif
- }
-
- #ifdef LOG_ACTIONS
- cerr << endl;
- #endif
-
- if ( rhsLen == 0 ) {
- redLel->file = lel->file;
- redLel->line = lel->line;
- targState = curs;
- }
- else {
- redLel->child = rhs[rhsLen-1];
- redLel->file = rhs[0]->file;
- redLel->line = rhs[0]->line;
- targState = rhs[0]->state;
- }
-
- if ( induceReject ) {
- #ifdef LOG_ACTIONS
- cerr << "error induced during reduction of " <<
- Parser_lelNames[redLel->type] << endl;
- #endif
- redLel->state = curs;
- redLel->next = stackTop;
- stackTop = redLel;
- curs = targState;
- goto parseError;
- }
- else {
- redLel->next = input;
- input = redLel;
- }
- }
-
-
- curs = targState;
- goto again;
-
-parseError:
- #ifdef LOG_BACKTRACK
- cerr << "hit error" << endl;
- #endif
- if ( numRetry > 0 ) {
- while ( 1 ) {
- struct Parser_LangEl *redLel = stackTop;
- if ( stackTop->type < 225 ) {
- #ifdef LOG_BACKTRACK
- cerr << "backing up over terminal: " <<
- Parser_lelNames[stackTop->type] << endl;
- #endif
- stackTop = stackTop->next;
- redLel->next = input;
- input = redLel;
- }
- else {
- #ifdef LOG_BACKTRACK
- cerr << "backing up over non-terminal: " <<
- Parser_lelNames[stackTop->type] << endl;
- #endif
- stackTop = stackTop->next;
- struct Parser_LangEl *first = redLel->child;
- if ( first == 0 )
- rhsLen = 0;
- else {
- rhsLen = 1;
- while ( first->next != 0 ) {
- first = first->next;
- rhsLen += 1;
- }
- first->next = stackTop;
- stackTop = redLel->child;
-
- struct Parser_LangEl *rhsEl = stackTop;
- int p = rhsLen;
- while ( p > 0 ) {
- rhs[--p] = rhsEl;
- rhsEl = rhsEl->next;
- }
- }
- redLel->next = pool;
- pool = redLel;
- numNodes -= 1;
- }
-
- if ( redLel->retry > 0 ) {
- #ifdef LOG_BACKTRACK
- cerr << "found retry targ: " << redLel << endl;
- #endif
- numRetry -= 1;
- #ifdef LOG_BACKTRACK
- cerr << "found retry: " << redLel << endl;
- #endif
- if ( redLel->retry & 0x0000ffff )
- curs = input->state;
- else {
- input->retry = redLel->retry >> 16;
- if ( stackTop->state < 0 )
- curs = Parser_startState;
- else {
- curs = Parser_targs[(int)Parser_indicies[Parser_offsets[stackTop->state] + (stackTop->type - Parser_keys[stackTop->state<<1])]];
- }
- }
- goto again;
- }
- }
- }
- curs = -1;
- errCount += 1;
-_out: {}
-#line 1385 "rlparse.kl"
- return errCount == 0 ? 0 : -1;
-}
-
-void Parser::tryMachineDef( InputLoc &loc, char *name,
- JoinOrLm *joinOrLm, bool isInstance )
-{
- GraphDictEl *newEl = pd->graphDict.insert( name );
- if ( newEl != 0 ) {
- /* New element in the dict, all good. */
- newEl->value = new VarDef( name, joinOrLm );
- newEl->isInstance = isInstance;
- newEl->loc = loc;
- newEl->value->isExport = exportContext[exportContext.length()-1];
-
- /* It it is an instance, put on the instance list. */
- if ( isInstance )
- pd->instanceList.append( newEl );
- }
- else {
- // Recover by ignoring the duplicate.
- error(loc) << "fsm \"" << name << "\" previously defined" << endl;
- }
-}
-
-ostream &Parser::parse_error( int tokId, Token &token )
-{
- /* Maintain the error count. */
- gblErrorCount += 1;
-
- cerr << token.loc.fileName << ":" << token.loc.line << ":" << token.loc.col << ": ";
- cerr << "at token ";
- if ( tokId < 128 )
- cerr << "\"" << Parser_lelNames[tokId] << "\"";
- else
- cerr << Parser_lelNames[tokId];
- if ( token.data != 0 )
- cerr << " with data \"" << token.data << "\"";
- cerr << ": ";
-
- return cerr;
-}
-
-int Parser::token( InputLoc &loc, int tokId, char *tokstart, int toklen )
-{
- Token token;
- token.data = tokstart;
- token.length = toklen;
- token.loc = loc;
- int res = parseLangEl( tokId, &token );
- if ( res < 0 ) {
- parse_error(tokId, token) << "parse error" << endl;
- exit(1);
- }
- return res;
-}
diff --git a/contrib/tools/ragel5/ragel/rlparse.h b/contrib/tools/ragel5/ragel/rlparse.h
deleted file mode 100644
index 957db0fd69..0000000000
--- a/contrib/tools/ragel5/ragel/rlparse.h
+++ /dev/null
@@ -1,184 +0,0 @@
-/* Automatically generated by Kelbt from "rlparse.kh".
- *
- * Parts of this file are copied from Kelbt source covered by the GNU
- * GPL. As a special exception, you may use the parts of this file copied
- * from Kelbt source without restriction. The remainder is derived from
- * "rlparse.kh" and inherits the copyright status of that file.
- */
-
-#line 1 "rlparse.kh"
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef RLPARSE_H
-#define RLPARSE_H
-
-#include <iostream>
-#include "avltree.h"
-#include "parsedata.h"
-
-struct Parser
-{
-#line 93 "rlparse.kh"
-
-
- #line 44 "rlparse.h"
- struct Parser_LangEl *freshEl;
- int freshPos;
- struct Parser_LangEl *pool;
- int numRetry;
- int numNodes;
- struct Parser_LangEl *stackTop;
- struct Parser_LangEl *lastFinal;
- int errCount;
- int curs;
-#line 96 "rlparse.kh"
-
- void init();
- int parseLangEl( int type, const Token *token );
-
- Parser(const char *fileName, char *sectionName, InputLoc &sectionLoc )
- : sectionName(sectionName)
- {
- pd = new ParseData( fileName, sectionName, sectionLoc );
- exportContext.append( false );
- }
-
- int token( InputLoc &loc, int tokId, char *tokstart, int toklen );
- void tryMachineDef( InputLoc &loc, char *name,
- JoinOrLm *joinOrLm, bool isInstance );
-
- /* Report an error encountered by the parser. */
- ostream &parse_error( int tokId, Token &token );
-
- ParseData *pd;
-
- /* The name of the root section, this does not change during an include. */
- char *sectionName;
-
- NameRef nameRef;
- NameRefList nameRefList;
-
- Vector<bool> exportContext;
-};
-
-#line 84 "rlparse.h"
-#define KW_Machine 128
-#define KW_Include 129
-#define KW_Import 130
-#define KW_Write 131
-#define TK_Word 132
-#define TK_Literal 133
-#define TK_Number 134
-#define TK_Inline 135
-#define TK_Reference 136
-#define TK_ColonEquals 137
-#define TK_EndSection 138
-#define TK_UInt 139
-#define TK_Hex 140
-#define TK_BaseClause 141
-#define TK_DotDot 142
-#define TK_ColonGt 143
-#define TK_ColonGtGt 144
-#define TK_LtColon 145
-#define TK_Arrow 146
-#define TK_DoubleArrow 147
-#define TK_StarStar 148
-#define TK_NameSep 149
-#define TK_BarStar 150
-#define TK_DashDash 151
-#define TK_StartCond 152
-#define TK_AllCond 153
-#define TK_LeavingCond 154
-#define TK_Middle 155
-#define TK_StartGblError 156
-#define TK_AllGblError 157
-#define TK_FinalGblError 158
-#define TK_NotFinalGblError 159
-#define TK_NotStartGblError 160
-#define TK_MiddleGblError 161
-#define TK_StartLocalError 162
-#define TK_AllLocalError 163
-#define TK_FinalLocalError 164
-#define TK_NotFinalLocalError 165
-#define TK_NotStartLocalError 166
-#define TK_MiddleLocalError 167
-#define TK_StartEOF 168
-#define TK_AllEOF 169
-#define TK_FinalEOF 170
-#define TK_NotFinalEOF 171
-#define TK_NotStartEOF 172
-#define TK_MiddleEOF 173
-#define TK_StartToState 174
-#define TK_AllToState 175
-#define TK_FinalToState 176
-#define TK_NotFinalToState 177
-#define TK_NotStartToState 178
-#define TK_MiddleToState 179
-#define TK_StartFromState 180
-#define TK_AllFromState 181
-#define TK_FinalFromState 182
-#define TK_NotFinalFromState 183
-#define TK_NotStartFromState 184
-#define TK_MiddleFromState 185
-#define RE_Slash 186
-#define RE_SqOpen 187
-#define RE_SqOpenNeg 188
-#define RE_SqClose 189
-#define RE_Dot 190
-#define RE_Star 191
-#define RE_Dash 192
-#define RE_Char 193
-#define IL_WhiteSpace 194
-#define IL_Comment 195
-#define IL_Literal 196
-#define IL_Symbol 197
-#define KW_Action 198
-#define KW_AlphType 199
-#define KW_Range 200
-#define KW_GetKey 201
-#define KW_When 202
-#define KW_Eof 203
-#define KW_Err 204
-#define KW_Lerr 205
-#define KW_To 206
-#define KW_From 207
-#define KW_Export 208
-#define KW_Break 209
-#define KW_Exec 210
-#define KW_Hold 211
-#define KW_PChar 212
-#define KW_Char 213
-#define KW_Goto 214
-#define KW_Call 215
-#define KW_Ret 216
-#define KW_CurState 217
-#define KW_TargState 218
-#define KW_Entry 219
-#define KW_Next 220
-#define KW_Variable 221
-#define KW_Access 222
-#define TK_Semi 223
-#define _eof 224
-
-#line 126 "rlparse.kh"
-
-#endif
diff --git a/contrib/tools/ragel5/ragel/rlscan.cpp b/contrib/tools/ragel5/ragel/rlscan.cpp
deleted file mode 100644
index 47a7f02148..0000000000
--- a/contrib/tools/ragel5/ragel/rlscan.cpp
+++ /dev/null
@@ -1,4876 +0,0 @@
-#line 1 "rlscan.rl"
-/*
- * Copyright 2006-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <iostream>
-#include <fstream>
-#include <string.h>
-
-#include "ragel.h"
-#include "rlscan.h"
-
-//#define LOG_TOKENS
-
-using std::ifstream;
-using std::istream;
-using std::ostream;
-using std::cout;
-using std::cerr;
-using std::endl;
-
-enum InlineBlockType
-{
- CurlyDelimited,
- SemiTerminated
-};
-
-
-/*
- * The Scanner for Importing
- */
-
-#define IMP_Word 128
-#define IMP_Literal 129
-#define IMP_UInt 130
-#define IMP_Define 131
-
-#line 124 "rlscan.rl"
-
-
-
-#line 60 "rlscan.cpp"
-static const int inline_token_scan_start = 2;
-
-static const int inline_token_scan_first_final = 2;
-
-static const int inline_token_scan_error = -1;
-
-#line 127 "rlscan.rl"
-
-void Scanner::flushImport()
-{
- int *p = token_data;
- int *pe = token_data + cur_token;
-
-
-#line 75 "rlscan.cpp"
- {
- tok_cs = inline_token_scan_start;
- tok_tokstart = 0;
- tok_tokend = 0;
- tok_act = 0;
- }
-#line 134 "rlscan.rl"
-
-#line 84 "rlscan.cpp"
- {
- if ( p == pe )
- goto _out;
- switch ( tok_cs )
- {
-tr0:
-#line 122 "rlscan.rl"
- {{p = (( tok_tokend))-1;}}
- goto st2;
-tr1:
-#line 108 "rlscan.rl"
- { tok_tokend = p+1;{
- int base = tok_tokstart - token_data;
- int nameOff = 0;
- int litOff = 2;
-
- directToParser( inclToParser, fileName, line, column, TK_Word,
- token_strings[base+nameOff], token_lens[base+nameOff] );
- directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
- directToParser( inclToParser, fileName, line, column, TK_Literal,
- token_strings[base+litOff], token_lens[base+litOff] );
- directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
- }{p = (( tok_tokend))-1;}}
- goto st2;
-tr2:
-#line 80 "rlscan.rl"
- { tok_tokend = p+1;{
- int base = tok_tokstart - token_data;
- int nameOff = 0;
- int numOff = 2;
-
- directToParser( inclToParser, fileName, line, column, TK_Word,
- token_strings[base+nameOff], token_lens[base+nameOff] );
- directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
- directToParser( inclToParser, fileName, line, column, TK_UInt,
- token_strings[base+numOff], token_lens[base+numOff] );
- directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
- }{p = (( tok_tokend))-1;}}
- goto st2;
-tr3:
-#line 94 "rlscan.rl"
- { tok_tokend = p+1;{
- int base = tok_tokstart - token_data;
- int nameOff = 1;
- int litOff = 2;
-
- directToParser( inclToParser, fileName, line, column, TK_Word,
- token_strings[base+nameOff], token_lens[base+nameOff] );
- directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
- directToParser( inclToParser, fileName, line, column, TK_Literal,
- token_strings[base+litOff], token_lens[base+litOff] );
- directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
- }{p = (( tok_tokend))-1;}}
- goto st2;
-tr4:
-#line 66 "rlscan.rl"
- { tok_tokend = p+1;{
- int base = tok_tokstart - token_data;
- int nameOff = 1;
- int numOff = 2;
-
- directToParser( inclToParser, fileName, line, column, TK_Word,
- token_strings[base+nameOff], token_lens[base+nameOff] );
- directToParser( inclToParser, fileName, line, column, '=', 0, 0 );
- directToParser( inclToParser, fileName, line, column, TK_UInt,
- token_strings[base+numOff], token_lens[base+numOff] );
- directToParser( inclToParser, fileName, line, column, ';', 0, 0 );
- }{p = (( tok_tokend))-1;}}
- goto st2;
-tr5:
-#line 122 "rlscan.rl"
- { tok_tokend = p+1;{p = (( tok_tokend))-1;}}
- goto st2;
-tr8:
-#line 122 "rlscan.rl"
- { tok_tokend = p;{p = (( tok_tokend))-1;}}
- goto st2;
-st2:
-#line 1 "rlscan.rl"
- { tok_tokstart = 0;}
- if ( ++p == pe )
- goto _out2;
-case 2:
-#line 1 "rlscan.rl"
- { tok_tokstart = p;}
-#line 170 "rlscan.cpp"
- switch( (*p) ) {
- case 128: goto tr6;
- case 131: goto tr7;
- }
- goto tr5;
-tr6:
-#line 1 "rlscan.rl"
- { tok_tokend = p+1;}
- goto st3;
-st3:
- if ( ++p == pe )
- goto _out3;
-case 3:
-#line 184 "rlscan.cpp"
- if ( (*p) == 61 )
- goto st0;
- goto tr8;
-st0:
- if ( ++p == pe )
- goto _out0;
-case 0:
- switch( (*p) ) {
- case 129: goto tr1;
- case 130: goto tr2;
- }
- goto tr0;
-tr7:
-#line 1 "rlscan.rl"
- { tok_tokend = p+1;}
- goto st4;
-st4:
- if ( ++p == pe )
- goto _out4;
-case 4:
-#line 205 "rlscan.cpp"
- if ( (*p) == 128 )
- goto st1;
- goto tr8;
-st1:
- if ( ++p == pe )
- goto _out1;
-case 1:
- switch( (*p) ) {
- case 129: goto tr3;
- case 130: goto tr4;
- }
- goto tr0;
- }
- _out2: tok_cs = 2; goto _out;
- _out3: tok_cs = 3; goto _out;
- _out0: tok_cs = 0; goto _out;
- _out4: tok_cs = 4; goto _out;
- _out1: tok_cs = 1; goto _out;
-
- _out: {}
- }
-#line 135 "rlscan.rl"
-
- if ( tok_tokstart == 0 )
- cur_token = 0;
- else {
- cur_token = pe - tok_tokstart;
- int ts_offset = tok_tokstart - token_data;
- memmove( token_data, token_data+ts_offset, cur_token*sizeof(token_data[0]) );
- memmove( token_strings, token_strings+ts_offset, cur_token*sizeof(token_strings[0]) );
- memmove( token_lens, token_lens+ts_offset, cur_token*sizeof(token_lens[0]) );
- }
-}
-
-void Scanner::directToParser( Parser *toParser, const char *tokFileName, int tokLine,
- int tokColumn, int type, char *tokdata, int toklen )
-{
- InputLoc loc;
-
- #ifdef LOG_TOKENS
- cerr << "scanner:" << tokLine << ":" << tokColumn <<
- ": sending token to the parser " << Parser_lelNames[type];
- cerr << " " << toklen;
- if ( tokdata != 0 )
- cerr << " " << tokdata;
- cerr << endl;
- #endif
-
- loc.fileName = tokFileName;
- loc.line = tokLine;
- loc.col = tokColumn;
-
- toParser->token( loc, type, tokdata, toklen );
-}
-
-void Scanner::importToken( int token, char *start, char *end )
-{
- if ( cur_token == max_tokens )
- flushImport();
-
- token_data[cur_token] = token;
- if ( start == 0 ) {
- token_strings[cur_token] = 0;
- token_lens[cur_token] = 0;
- }
- else {
- int toklen = end-start;
- token_lens[cur_token] = toklen;
- token_strings[cur_token] = new char[toklen+1];
- memcpy( token_strings[cur_token], start, toklen );
- token_strings[cur_token][toklen] = 0;
- }
- cur_token++;
-}
-
-void Scanner::pass( int token, char *start, char *end )
-{
- if ( importMachines )
- importToken( token, start, end );
- pass();
-}
-
-void Scanner::pass()
-{
- updateCol();
-
- /* If no errors and we are at the bottom of the include stack (the
- * source file listed on the command line) then write out the data. */
- if ( includeDepth == 0 && machineSpec == 0 && machineName == 0 )
- xmlEscapeHost( output, tokstart, tokend-tokstart );
-}
-
-/*
- * The scanner for processing sections, includes, imports, etc.
- */
-
-
-#line 303 "rlscan.cpp"
-static const int section_parse_start = 10;
-
-static const int section_parse_first_final = 10;
-
-static const int section_parse_error = 0;
-
-#line 213 "rlscan.rl"
-
-
-
-void Scanner::init( )
-{
-
-#line 317 "rlscan.cpp"
- {
- cs = section_parse_start;
- }
-#line 219 "rlscan.rl"
-}
-
-bool Scanner::active()
-{
- if ( ignoreSection )
- return false;
-
- if ( parser == 0 && ! parserExistsError ) {
- scan_error() << "there is no previous specification name" << endl;
- parserExistsError = true;
- }
-
- if ( parser == 0 )
- return false;
-
- return true;
-}
-
-ostream &Scanner::scan_error()
-{
- /* Maintain the error count. */
- gblErrorCount += 1;
- cerr << fileName << ":" << line << ":" << column << ": ";
- return cerr;
-}
-
-bool Scanner::recursiveInclude(const char *inclFileName, char *inclSectionName )
-{
- for ( IncludeStack::Iter si = includeStack; si.lte(); si++ ) {
- if ( strcmp( si->fileName, inclFileName ) == 0 &&
- strcmp( si->sectionName, inclSectionName ) == 0 )
- {
- return true;
- }
- }
- return false;
-}
-
-void Scanner::updateCol()
-{
- char *from = lastnl;
- if ( from == 0 )
- from = tokstart;
- //cerr << "adding " << tokend - from << " to column" << endl;
- column += tokend - from;
- lastnl = 0;
-}
-
-#line 442 "rlscan.rl"
-
-
-void Scanner::token( int type, char c )
-{
- token( type, &c, &c + 1 );
-}
-
-void Scanner::token( int type )
-{
- token( type, 0, 0 );
-}
-
-void Scanner::token( int type, char *start, char *end )
-{
- char *tokdata = 0;
- int toklen = 0;
- if ( start != 0 ) {
- toklen = end-start;
- tokdata = new char[toklen+1];
- memcpy( tokdata, start, toklen );
- tokdata[toklen] = 0;
- }
-
- processToken( type, tokdata, toklen );
-}
-
-void Scanner::processToken( int type, char *tokdata, int toklen )
-{
- int *p = &type;
- int *pe = &type + 1;
-
-
-#line 403 "rlscan.cpp"
- {
- if ( p == pe )
- goto _out;
- switch ( cs )
- {
-tr2:
-#line 289 "rlscan.rl"
- {
- /* Assign a name to the machine. */
- char *machine = word;
-
- if ( !importMachines && inclSectionTarg == 0 ) {
- ignoreSection = false;
-
- ParserDictEl *pdEl = parserDict.find( machine );
- if ( pdEl == 0 ) {
- pdEl = new ParserDictEl( machine );
- pdEl->value = new Parser( fileName, machine, sectionLoc );
- pdEl->value->init();
- parserDict.insert( pdEl );
- }
-
- parser = pdEl->value;
- }
- else if ( !importMachines && strcmp( inclSectionTarg, machine ) == 0 ) {
- /* found include target */
- ignoreSection = false;
- parser = inclToParser;
- }
- else {
- /* ignoring section */
- ignoreSection = true;
- parser = 0;
- }
- }
- goto st10;
-tr6:
-#line 323 "rlscan.rl"
- {
- if ( active() ) {
- char *inclSectionName = word;
- const char *inclFileName = 0;
-
- /* Implement defaults for the input file and section name. */
- if ( inclSectionName == 0 )
- inclSectionName = parser->sectionName;
-
- if ( lit != 0 )
- inclFileName = prepareFileName( lit, lit_len );
- else
- inclFileName = fileName;
-
- /* Check for a recursive include structure. Add the current file/section
- * name then check if what we are including is already in the stack. */
- includeStack.append( IncludeStackItem( fileName, parser->sectionName ) );
-
- if ( recursiveInclude( inclFileName, inclSectionName ) )
- scan_error() << "include: this is a recursive include operation" << endl;
- else {
- /* Open the input file for reading. */
- ifstream *inFile = new ifstream( inclFileName );
- if ( ! inFile->is_open() ) {
- scan_error() << "include: could not open " <<
- inclFileName << " for reading" << endl;
- }
-
- Scanner scanner( inclFileName, *inFile, output, parser,
- inclSectionName, includeDepth+1, false );
- scanner.do_scan( );
- delete inFile;
- }
-
- /* Remove the last element (len-1) */
- includeStack.remove( -1 );
- }
- }
- goto st10;
-tr10:
-#line 372 "rlscan.rl"
- {
- if ( active() ) {
- char *importFileName = prepareFileName( lit, lit_len );
-
- /* Open the input file for reading. */
- ifstream *inFile = new ifstream( importFileName );
- if ( ! inFile->is_open() ) {
- scan_error() << "import: could not open " <<
- importFileName << " for reading" << endl;
- }
-
- Scanner scanner( importFileName, *inFile, output, parser,
- 0, includeDepth+1, true );
- scanner.do_scan( );
- scanner.importToken( 0, 0, 0 );
- scanner.flushImport();
- delete inFile;
- }
- }
- goto st10;
-tr13:
-#line 414 "rlscan.rl"
- {
- if ( active() && machineSpec == 0 && machineName == 0 )
- output << "</write>\n";
- }
- goto st10;
-tr14:
-#line 425 "rlscan.rl"
- {
- /* Send the token off to the parser. */
- if ( active() )
- directToParser( parser, fileName, line, column, type, tokdata, toklen );
- }
- goto st10;
-st10:
- if ( ++p == pe )
- goto _out10;
-case 10:
-#line 522 "rlscan.cpp"
- switch( (*p) ) {
- case 128: goto st1;
- case 129: goto st3;
- case 130: goto st6;
- case 131: goto tr18;
- }
- goto tr14;
-st1:
- if ( ++p == pe )
- goto _out1;
-case 1:
- if ( (*p) == 132 )
- goto tr1;
- goto tr0;
-tr0:
-#line 283 "rlscan.rl"
- { scan_error() << "bad machine statement" << endl; }
- goto st0;
-tr3:
-#line 284 "rlscan.rl"
- { scan_error() << "bad include statement" << endl; }
- goto st0;
-tr8:
-#line 285 "rlscan.rl"
- { scan_error() << "bad import statement" << endl; }
- goto st0;
-tr11:
-#line 286 "rlscan.rl"
- { scan_error() << "bad write statement" << endl; }
- goto st0;
-#line 553 "rlscan.cpp"
-st0:
- goto _out0;
-tr1:
-#line 280 "rlscan.rl"
- { word = tokdata; word_len = toklen; }
- goto st2;
-st2:
- if ( ++p == pe )
- goto _out2;
-case 2:
-#line 564 "rlscan.cpp"
- if ( (*p) == 59 )
- goto tr2;
- goto tr0;
-st3:
- if ( ++p == pe )
- goto _out3;
-case 3:
- switch( (*p) ) {
- case 132: goto tr4;
- case 133: goto tr5;
- }
- goto tr3;
-tr4:
-#line 279 "rlscan.rl"
- { word = lit = 0; word_len = lit_len = 0; }
-#line 280 "rlscan.rl"
- { word = tokdata; word_len = toklen; }
- goto st4;
-st4:
- if ( ++p == pe )
- goto _out4;
-case 4:
-#line 587 "rlscan.cpp"
- switch( (*p) ) {
- case 59: goto tr6;
- case 133: goto tr7;
- }
- goto tr3;
-tr5:
-#line 279 "rlscan.rl"
- { word = lit = 0; word_len = lit_len = 0; }
-#line 281 "rlscan.rl"
- { lit = tokdata; lit_len = toklen; }
- goto st5;
-tr7:
-#line 281 "rlscan.rl"
- { lit = tokdata; lit_len = toklen; }
- goto st5;
-st5:
- if ( ++p == pe )
- goto _out5;
-case 5:
-#line 607 "rlscan.cpp"
- if ( (*p) == 59 )
- goto tr6;
- goto tr3;
-st6:
- if ( ++p == pe )
- goto _out6;
-case 6:
- if ( (*p) == 133 )
- goto tr9;
- goto tr8;
-tr9:
-#line 281 "rlscan.rl"
- { lit = tokdata; lit_len = toklen; }
- goto st7;
-st7:
- if ( ++p == pe )
- goto _out7;
-case 7:
-#line 626 "rlscan.cpp"
- if ( (*p) == 59 )
- goto tr10;
- goto tr8;
-tr18:
-#line 397 "rlscan.rl"
- {
- if ( active() && machineSpec == 0 && machineName == 0 ) {
- output << "<write"
- " def_name=\"" << parser->sectionName << "\""
- " line=\"" << line << "\""
- " col=\"" << column << "\""
- ">";
- }
- }
- goto st8;
-st8:
- if ( ++p == pe )
- goto _out8;
-case 8:
-#line 646 "rlscan.cpp"
- if ( (*p) == 132 )
- goto tr12;
- goto tr11;
-tr12:
-#line 408 "rlscan.rl"
- {
- if ( active() && machineSpec == 0 && machineName == 0 )
- output << "<arg>" << tokdata << "</arg>";
- }
- goto st9;
-st9:
- if ( ++p == pe )
- goto _out9;
-case 9:
-#line 661 "rlscan.cpp"
- switch( (*p) ) {
- case 59: goto tr13;
- case 132: goto tr12;
- }
- goto tr11;
- }
- _out10: cs = 10; goto _out;
- _out1: cs = 1; goto _out;
- _out0: cs = 0; goto _out;
- _out2: cs = 2; goto _out;
- _out3: cs = 3; goto _out;
- _out4: cs = 4; goto _out;
- _out5: cs = 5; goto _out;
- _out6: cs = 6; goto _out;
- _out7: cs = 7; goto _out;
- _out8: cs = 8; goto _out;
- _out9: cs = 9; goto _out;
-
- _out: {}
- }
-#line 476 "rlscan.rl"
-
-
- updateCol();
-
- /* Record the last token for use in controlling the scan of subsequent
- * tokens. */
- lastToken = type;
-}
-
-void Scanner::startSection( )
-{
- parserExistsError = false;
-
- if ( includeDepth == 0 ) {
- if ( machineSpec == 0 && machineName == 0 )
- output << "</host>\n";
- }
-
- sectionLoc.fileName = fileName;
- sectionLoc.line = line;
- sectionLoc.col = 0;
-}
-
-void Scanner::endSection( )
-{
- /* Execute the eof actions for the section parser. */
-
-#line 710 "rlscan.cpp"
- {
- switch ( cs ) {
- case 1:
- case 2:
-#line 283 "rlscan.rl"
- { scan_error() << "bad machine statement" << endl; }
- break;
- case 3:
- case 4:
- case 5:
-#line 284 "rlscan.rl"
- { scan_error() << "bad include statement" << endl; }
- break;
- case 6:
- case 7:
-#line 285 "rlscan.rl"
- { scan_error() << "bad import statement" << endl; }
- break;
- case 8:
- case 9:
-#line 286 "rlscan.rl"
- { scan_error() << "bad write statement" << endl; }
- break;
-#line 734 "rlscan.cpp"
- }
- }
-
-#line 505 "rlscan.rl"
-
-
- /* Close off the section with the parser. */
- if ( active() ) {
- InputLoc loc;
- loc.fileName = fileName;
- loc.line = line;
- loc.col = 0;
-
- parser->token( loc, TK_EndSection, 0, 0 );
- }
-
- if ( includeDepth == 0 ) {
- if ( machineSpec == 0 && machineName == 0 ) {
- /* The end section may include a newline on the end, so
- * we use the last line, which will count the newline. */
- output << "<host line=\"" << line << "\">";
- }
- }
-}
-
-#line 917 "rlscan.rl"
-
-
-
-#line 764 "rlscan.cpp"
-static const int rlscan_start = 23;
-
-static const int rlscan_first_final = 23;
-
-static const int rlscan_error = 0;
-
-#line 920 "rlscan.rl"
-
-void Scanner::do_scan()
-{
- int bufsize = 8;
- char *buf = new char[bufsize];
- const char last_char = 0;
- int cs, act, have = 0;
- int top, stack[1];
- int curly_count = 0;
- bool execute = true;
- bool singleLineSpec = false;
- InlineBlockType inlineBlockType = CurlyDelimited;
-
- /* Init the section parser and the character scanner. */
- init();
-
-#line 788 "rlscan.cpp"
- {
- cs = rlscan_start;
- top = 0;
- tokstart = 0;
- tokend = 0;
- act = 0;
- }
-#line 936 "rlscan.rl"
-
- while ( execute ) {
- char *p = buf + have;
- int space = bufsize - have;
-
- if ( space == 0 ) {
- /* We filled up the buffer trying to scan a token. Grow it. */
- bufsize = bufsize * 2;
- char *newbuf = new char[bufsize];
-
- /* Recompute p and space. */
- p = newbuf + have;
- space = bufsize - have;
-
- /* Patch up pointers possibly in use. */
- if ( tokstart != 0 )
- tokstart = newbuf + ( tokstart - buf );
- tokend = newbuf + ( tokend - buf );
-
- /* Copy the new buffer in. */
- memcpy( newbuf, buf, have );
- delete[] buf;
- buf = newbuf;
- }
-
- input.read( p, space );
- int len = input.gcount();
-
- /* If we see eof then append the EOF char. */
- if ( len == 0 ) {
- p[0] = last_char, len = 1;
- execute = false;
- }
-
- char *pe = p + len;
-
-#line 833 "rlscan.cpp"
- {
- if ( p == pe )
- goto _out;
- goto _resume;
-
-_again:
- switch ( cs ) {
- case 23: goto st23;
- case 24: goto st24;
- case 25: goto st25;
- case 1: goto st1;
- case 2: goto st2;
- case 26: goto st26;
- case 27: goto st27;
- case 28: goto st28;
- case 3: goto st3;
- case 4: goto st4;
- case 29: goto st29;
- case 5: goto st5;
- case 6: goto st6;
- case 7: goto st7;
- case 30: goto st30;
- case 31: goto st31;
- case 32: goto st32;
- case 33: goto st33;
- case 34: goto st34;
- case 35: goto st35;
- case 36: goto st36;
- case 37: goto st37;
- case 38: goto st38;
- case 39: goto st39;
- case 8: goto st8;
- case 9: goto st9;
- case 40: goto st40;
- case 10: goto st10;
- case 11: goto st11;
- case 41: goto st41;
- case 12: goto st12;
- case 13: goto st13;
- case 14: goto st14;
- case 42: goto st42;
- case 43: goto st43;
- case 15: goto st15;
- case 44: goto st44;
- case 45: goto st45;
- case 46: goto st46;
- case 47: goto st47;
- case 48: goto st48;
- case 49: goto st49;
- case 50: goto st50;
- case 51: goto st51;
- case 52: goto st52;
- case 53: goto st53;
- case 54: goto st54;
- case 55: goto st55;
- case 56: goto st56;
- case 57: goto st57;
- case 58: goto st58;
- case 59: goto st59;
- case 60: goto st60;
- case 61: goto st61;
- case 62: goto st62;
- case 63: goto st63;
- case 64: goto st64;
- case 65: goto st65;
- case 66: goto st66;
- case 67: goto st67;
- case 68: goto st68;
- case 69: goto st69;
- case 70: goto st70;
- case 71: goto st71;
- case 72: goto st72;
- case 73: goto st73;
- case 74: goto st74;
- case 75: goto st75;
- case 76: goto st76;
- case 77: goto st77;
- case 78: goto st78;
- case 79: goto st79;
- case 80: goto st80;
- case 81: goto st81;
- case 82: goto st82;
- case 83: goto st83;
- case 84: goto st84;
- case 85: goto st85;
- case 0: goto st0;
- case 86: goto st86;
- case 87: goto st87;
- case 88: goto st88;
- case 89: goto st89;
- case 90: goto st90;
- case 16: goto st16;
- case 91: goto st91;
- case 17: goto st17;
- case 92: goto st92;
- case 18: goto st18;
- case 93: goto st93;
- case 94: goto st94;
- case 95: goto st95;
- case 19: goto st19;
- case 20: goto st20;
- case 96: goto st96;
- case 97: goto st97;
- case 98: goto st98;
- case 99: goto st99;
- case 100: goto st100;
- case 21: goto st21;
- case 101: goto st101;
- case 102: goto st102;
- case 103: goto st103;
- case 104: goto st104;
- case 105: goto st105;
- case 106: goto st106;
- case 107: goto st107;
- case 108: goto st108;
- case 109: goto st109;
- case 110: goto st110;
- case 111: goto st111;
- case 112: goto st112;
- case 113: goto st113;
- case 114: goto st114;
- case 115: goto st115;
- case 116: goto st116;
- case 117: goto st117;
- case 118: goto st118;
- case 119: goto st119;
- case 120: goto st120;
- case 121: goto st121;
- case 122: goto st122;
- case 123: goto st123;
- case 124: goto st124;
- case 125: goto st125;
- case 126: goto st126;
- case 127: goto st127;
- case 128: goto st128;
- case 129: goto st129;
- case 130: goto st130;
- case 131: goto st131;
- case 132: goto st132;
- case 133: goto st133;
- case 134: goto st134;
- case 135: goto st135;
- case 136: goto st136;
- case 137: goto st137;
- case 138: goto st138;
- case 139: goto st139;
- case 140: goto st140;
- case 141: goto st141;
- case 142: goto st142;
- case 143: goto st143;
- case 144: goto st144;
- case 145: goto st145;
- case 146: goto st146;
- case 147: goto st147;
- case 148: goto st148;
- case 149: goto st149;
- case 150: goto st150;
- case 151: goto st151;
- case 152: goto st152;
- case 153: goto st153;
- case 154: goto st154;
- case 155: goto st155;
- case 156: goto st156;
- case 157: goto st157;
- case 158: goto st158;
- case 159: goto st159;
- case 160: goto st160;
- case 161: goto st161;
- case 162: goto st162;
- case 163: goto st163;
- case 164: goto st164;
- case 165: goto st165;
- case 166: goto st166;
- case 167: goto st167;
- case 168: goto st168;
- case 169: goto st169;
- case 170: goto st170;
- case 171: goto st171;
- case 172: goto st172;
- case 173: goto st173;
- case 174: goto st174;
- case 22: goto st22;
- default: break;
- }
-
- if ( ++p == pe )
- goto _out;
-_resume:
- switch ( cs )
- {
-tr2:
-#line 899 "rlscan.rl"
- {tokend = p+1;{ pass( IMP_Literal, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st23;
-tr10:
-#line 898 "rlscan.rl"
- {tokend = p+1;{ pass(); }{p = ((tokend))-1;}}
- goto st23;
-tr12:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
-#line 898 "rlscan.rl"
- {tokend = p+1;{ pass(); }{p = ((tokend))-1;}}
- goto st23;
-tr41:
-#line 915 "rlscan.rl"
- {tokend = p+1;{ pass( *tokstart, 0, 0 ); }{p = ((tokend))-1;}}
- goto st23;
-tr42:
-#line 914 "rlscan.rl"
- {tokend = p+1;{p = ((tokend))-1;}}
- goto st23;
-tr52:
-#line 913 "rlscan.rl"
- {tokend = p;{ pass(); }{p = ((tokend))-1;}}
- goto st23;
-tr53:
-#line 915 "rlscan.rl"
- {tokend = p;{ pass( *tokstart, 0, 0 ); }{p = ((tokend))-1;}}
- goto st23;
-tr55:
-#line 907 "rlscan.rl"
- {tokend = p;{
- updateCol();
- singleLineSpec = true;
- startSection();
- {{p = ((tokend))-1;}{goto st88;}}
- }{p = ((tokend))-1;}}
- goto st23;
-tr56:
-#line 901 "rlscan.rl"
- {tokend = p+1;{
- updateCol();
- singleLineSpec = false;
- startSection();
- {{p = ((tokend))-1;}{goto st88;}}
- }{p = ((tokend))-1;}}
- goto st23;
-tr57:
-#line 897 "rlscan.rl"
- {tokend = p;{ pass( IMP_UInt, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st23;
-tr58:
-#line 1 "rlscan.rl"
- { switch( act ) {
- case 137:
- { pass( IMP_Define, 0, 0 ); }
- break;
- case 138:
- { pass( IMP_Word, tokstart, tokend ); }
- break;
- default: break;
- }
- {p = ((tokend))-1;}}
- goto st23;
-tr59:
-#line 896 "rlscan.rl"
- {tokend = p;{ pass( IMP_Word, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st23;
-st23:
-#line 1 "rlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out23;
-case 23:
-#line 1 "rlscan.rl"
- {tokstart = p;}
-#line 1105 "rlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr42;
- case 9: goto st24;
- case 10: goto tr44;
- case 32: goto st24;
- case 34: goto tr45;
- case 37: goto st26;
- case 39: goto tr47;
- case 47: goto tr48;
- case 95: goto tr50;
- case 100: goto st32;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st30;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr41;
-tr44:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st24;
-st24:
- if ( ++p == pe )
- goto _out24;
-case 24:
-#line 1139 "rlscan.cpp"
- switch( (*p) ) {
- case 9: goto st24;
- case 10: goto tr44;
- case 32: goto st24;
- }
- goto tr52;
-tr45:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st25;
-st25:
- if ( ++p == pe )
- goto _out25;
-case 25:
-#line 1154 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr1;
- case 34: goto tr2;
- case 92: goto st2;
- }
- goto st1;
-tr1:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st1;
-st1:
- if ( ++p == pe )
- goto _out1;
-case 1:
-#line 1173 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr1;
- case 34: goto tr2;
- case 92: goto st2;
- }
- goto st1;
-st2:
- if ( ++p == pe )
- goto _out2;
-case 2:
- if ( (*p) == 10 )
- goto tr1;
- goto st1;
-st26:
- if ( ++p == pe )
- goto _out26;
-case 26:
- if ( (*p) == 37 )
- goto st27;
- goto tr53;
-st27:
- if ( ++p == pe )
- goto _out27;
-case 27:
- if ( (*p) == 123 )
- goto tr56;
- goto tr55;
-tr47:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st28;
-st28:
- if ( ++p == pe )
- goto _out28;
-case 28:
-#line 1209 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr5;
- case 39: goto tr2;
- case 92: goto st4;
- }
- goto st3;
-tr5:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st3;
-st3:
- if ( ++p == pe )
- goto _out3;
-case 3:
-#line 1228 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr5;
- case 39: goto tr2;
- case 92: goto st4;
- }
- goto st3;
-st4:
- if ( ++p == pe )
- goto _out4;
-case 4:
- if ( (*p) == 10 )
- goto tr5;
- goto st3;
-tr48:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st29;
-st29:
- if ( ++p == pe )
- goto _out29;
-case 29:
-#line 1250 "rlscan.cpp"
- switch( (*p) ) {
- case 42: goto st5;
- case 47: goto st7;
- }
- goto tr53;
-tr8:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st5;
-st5:
- if ( ++p == pe )
- goto _out5;
-case 5:
-#line 1268 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr8;
- case 42: goto st6;
- }
- goto st5;
-st6:
- if ( ++p == pe )
- goto _out6;
-case 6:
- switch( (*p) ) {
- case 10: goto tr8;
- case 42: goto st6;
- case 47: goto tr10;
- }
- goto st5;
-st7:
- if ( ++p == pe )
- goto _out7;
-case 7:
- if ( (*p) == 10 )
- goto tr12;
- goto st7;
-st30:
- if ( ++p == pe )
- goto _out30;
-case 30:
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st30;
- goto tr57;
-tr50:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 896 "rlscan.rl"
- {act = 138;}
- goto st31;
-tr64:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 895 "rlscan.rl"
- {act = 137;}
- goto st31;
-st31:
- if ( ++p == pe )
- goto _out31;
-case 31:
-#line 1314 "rlscan.cpp"
- if ( (*p) == 95 )
- goto tr50;
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr50;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr58;
-st32:
- if ( ++p == pe )
- goto _out32;
-case 32:
- switch( (*p) ) {
- case 95: goto tr50;
- case 101: goto st33;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr50;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr59;
-st33:
- if ( ++p == pe )
- goto _out33;
-case 33:
- switch( (*p) ) {
- case 95: goto tr50;
- case 102: goto st34;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr50;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr59;
-st34:
- if ( ++p == pe )
- goto _out34;
-case 34:
- switch( (*p) ) {
- case 95: goto tr50;
- case 105: goto st35;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr50;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr59;
-st35:
- if ( ++p == pe )
- goto _out35;
-case 35:
- switch( (*p) ) {
- case 95: goto tr50;
- case 110: goto st36;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr50;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr59;
-st36:
- if ( ++p == pe )
- goto _out36;
-case 36:
- switch( (*p) ) {
- case 95: goto tr50;
- case 101: goto tr64;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr50;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr50;
- } else
- goto tr50;
- goto tr59;
-tr15:
-#line 606 "rlscan.rl"
- {tokend = p+1;{ token( IL_Literal, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr23:
-#line 612 "rlscan.rl"
- {tokend = p+1;{ token( IL_Comment, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr25:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
-#line 612 "rlscan.rl"
- {tokend = p+1;{ token( IL_Comment, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr26:
-#line 602 "rlscan.rl"
- {{ token( TK_UInt, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr65:
-#line 659 "rlscan.rl"
- {tokend = p+1;{ token( IL_Symbol, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr66:
-#line 654 "rlscan.rl"
- {tokend = p+1;{
- scan_error() << "unterminated code block" << endl;
- }{p = ((tokend))-1;}}
- goto st37;
-tr71:
-#line 634 "rlscan.rl"
- {tokend = p+1;{ token( *tokstart, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr72:
-#line 629 "rlscan.rl"
- {tokend = p+1;{
- whitespaceOn = true;
- token( *tokstart, tokstart, tokend );
- }{p = ((tokend))-1;}}
- goto st37;
-tr77:
-#line 622 "rlscan.rl"
- {tokend = p+1;{
- whitespaceOn = true;
- token( *tokstart, tokstart, tokend );
- if ( inlineBlockType == SemiTerminated )
- {{p = ((tokend))-1;}{goto st88;}}
- }{p = ((tokend))-1;}}
- goto st37;
-tr80:
-#line 636 "rlscan.rl"
- {tokend = p+1;{
- token( IL_Symbol, tokstart, tokend );
- curly_count += 1;
- }{p = ((tokend))-1;}}
- goto st37;
-tr81:
-#line 641 "rlscan.rl"
- {tokend = p+1;{
- if ( --curly_count == 0 && inlineBlockType == CurlyDelimited ) {
- /* Inline code block ends. */
- token( '}' );
- {{p = ((tokend))-1;}{goto st88;}}
- }
- else {
- /* Either a semi terminated inline block or only the closing
- * brace of some inner scope, not the block's closing brace. */
- token( IL_Symbol, tokstart, tokend );
- }
- }{p = ((tokend))-1;}}
- goto st37;
-tr82:
-#line 608 "rlscan.rl"
- {tokend = p;{
- if ( whitespaceOn )
- token( IL_WhiteSpace, tokstart, tokend );
- }{p = ((tokend))-1;}}
- goto st37;
-tr83:
-#line 659 "rlscan.rl"
- {tokend = p;{ token( IL_Symbol, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr84:
-#line 602 "rlscan.rl"
- {tokend = p;{ token( TK_UInt, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr86:
-#line 603 "rlscan.rl"
- {tokend = p;{ token( TK_Hex, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr87:
-#line 614 "rlscan.rl"
- {tokend = p+1;{ token( TK_NameSep, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr88:
-#line 1 "rlscan.rl"
- { switch( act ) {
- case 1:
- { token( KW_PChar ); }
- break;
- case 3:
- { token( KW_CurState ); }
- break;
- case 4:
- { token( KW_TargState ); }
- break;
- case 5:
- {
- whitespaceOn = false;
- token( KW_Entry );
- }
- break;
- case 6:
- {
- whitespaceOn = false;
- token( KW_Hold );
- }
- break;
- case 7:
- { token( KW_Exec, 0, 0 ); }
- break;
- case 8:
- {
- whitespaceOn = false;
- token( KW_Goto );
- }
- break;
- case 9:
- {
- whitespaceOn = false;
- token( KW_Next );
- }
- break;
- case 10:
- {
- whitespaceOn = false;
- token( KW_Call );
- }
- break;
- case 11:
- {
- whitespaceOn = false;
- token( KW_Ret );
- }
- break;
- case 12:
- {
- whitespaceOn = false;
- token( KW_Break );
- }
- break;
- case 13:
- { token( TK_Word, tokstart, tokend ); }
- break;
- default: break;
- }
- {p = ((tokend))-1;}}
- goto st37;
-tr89:
-#line 600 "rlscan.rl"
- {tokend = p;{ token( TK_Word, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st37;
-tr103:
-#line 565 "rlscan.rl"
- {tokend = p;{ token( KW_Char ); }{p = ((tokend))-1;}}
- goto st37;
-st37:
-#line 1 "rlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out37;
-case 37:
-#line 1 "rlscan.rl"
- {tokstart = p;}
-#line 1588 "rlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr66;
- case 9: goto st38;
- case 10: goto tr68;
- case 32: goto st38;
- case 34: goto tr69;
- case 39: goto tr70;
- case 40: goto tr71;
- case 44: goto tr71;
- case 47: goto tr73;
- case 48: goto tr74;
- case 58: goto st45;
- case 59: goto tr77;
- case 95: goto tr78;
- case 102: goto st47;
- case 123: goto tr80;
- case 125: goto tr81;
- }
- if ( (*p) < 49 ) {
- if ( 41 <= (*p) && (*p) <= 42 )
- goto tr72;
- } else if ( (*p) > 57 ) {
- if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else if ( (*p) >= 65 )
- goto tr78;
- } else
- goto st43;
- goto tr65;
-tr68:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st38;
-st38:
- if ( ++p == pe )
- goto _out38;
-case 38:
-#line 1631 "rlscan.cpp"
- switch( (*p) ) {
- case 9: goto st38;
- case 10: goto tr68;
- case 32: goto st38;
- }
- goto tr82;
-tr69:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st39;
-st39:
- if ( ++p == pe )
- goto _out39;
-case 39:
-#line 1646 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr14;
- case 34: goto tr15;
- case 92: goto st9;
- }
- goto st8;
-tr14:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st8;
-st8:
- if ( ++p == pe )
- goto _out8;
-case 8:
-#line 1665 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr14;
- case 34: goto tr15;
- case 92: goto st9;
- }
- goto st8;
-st9:
- if ( ++p == pe )
- goto _out9;
-case 9:
- if ( (*p) == 10 )
- goto tr14;
- goto st8;
-tr70:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st40;
-st40:
- if ( ++p == pe )
- goto _out40;
-case 40:
-#line 1687 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr18;
- case 39: goto tr15;
- case 92: goto st11;
- }
- goto st10;
-tr18:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st10;
-st10:
- if ( ++p == pe )
- goto _out10;
-case 10:
-#line 1706 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr18;
- case 39: goto tr15;
- case 92: goto st11;
- }
- goto st10;
-st11:
- if ( ++p == pe )
- goto _out11;
-case 11:
- if ( (*p) == 10 )
- goto tr18;
- goto st10;
-tr73:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st41;
-st41:
- if ( ++p == pe )
- goto _out41;
-case 41:
-#line 1728 "rlscan.cpp"
- switch( (*p) ) {
- case 42: goto st12;
- case 47: goto st14;
- }
- goto tr83;
-tr21:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st12;
-st12:
- if ( ++p == pe )
- goto _out12;
-case 12:
-#line 1746 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr21;
- case 42: goto st13;
- }
- goto st12;
-st13:
- if ( ++p == pe )
- goto _out13;
-case 13:
- switch( (*p) ) {
- case 10: goto tr21;
- case 42: goto st13;
- case 47: goto tr23;
- }
- goto st12;
-st14:
- if ( ++p == pe )
- goto _out14;
-case 14:
- if ( (*p) == 10 )
- goto tr25;
- goto st14;
-tr74:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st42;
-st42:
- if ( ++p == pe )
- goto _out42;
-case 42:
-#line 1777 "rlscan.cpp"
- if ( (*p) == 120 )
- goto st15;
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st43;
- goto tr84;
-st43:
- if ( ++p == pe )
- goto _out43;
-case 43:
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st43;
- goto tr84;
-st15:
- if ( ++p == pe )
- goto _out15;
-case 15:
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st44;
- } else if ( (*p) > 70 ) {
- if ( 97 <= (*p) && (*p) <= 102 )
- goto st44;
- } else
- goto st44;
- goto tr26;
-st44:
- if ( ++p == pe )
- goto _out44;
-case 44:
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st44;
- } else if ( (*p) > 70 ) {
- if ( 97 <= (*p) && (*p) <= 102 )
- goto st44;
- } else
- goto st44;
- goto tr86;
-st45:
- if ( ++p == pe )
- goto _out45;
-case 45:
- if ( (*p) == 58 )
- goto tr87;
- goto tr83;
-tr78:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 600 "rlscan.rl"
- {act = 13;}
- goto st46;
-tr102:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 595 "rlscan.rl"
- {act = 12;}
- goto st46;
-tr107:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 587 "rlscan.rl"
- {act = 10;}
- goto st46;
-tr109:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 566 "rlscan.rl"
- {act = 3;}
- goto st46;
-tr114:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 568 "rlscan.rl"
- {act = 5;}
- goto st46;
-tr116:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 578 "rlscan.rl"
- {act = 7;}
- goto st46;
-tr119:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 579 "rlscan.rl"
- {act = 8;}
- goto st46;
-tr122:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 574 "rlscan.rl"
- {act = 6;}
- goto st46;
-tr125:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 583 "rlscan.rl"
- {act = 9;}
- goto st46;
-tr126:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 564 "rlscan.rl"
- {act = 1;}
- goto st46;
-tr128:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 591 "rlscan.rl"
- {act = 11;}
- goto st46;
-tr132:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 567 "rlscan.rl"
- {act = 4;}
- goto st46;
-st46:
- if ( ++p == pe )
- goto _out46;
-case 46:
-#line 1899 "rlscan.cpp"
- if ( (*p) == 95 )
- goto tr78;
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr88;
-st47:
- if ( ++p == pe )
- goto _out47;
-case 47:
- switch( (*p) ) {
- case 95: goto tr78;
- case 98: goto st48;
- case 99: goto st52;
- case 101: goto st57;
- case 103: goto st63;
- case 104: goto st66;
- case 110: goto st69;
- case 112: goto st72;
- case 114: goto st73;
- case 116: goto st75;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st48:
- if ( ++p == pe )
- goto _out48;
-case 48:
- switch( (*p) ) {
- case 95: goto tr78;
- case 114: goto st49;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st49:
- if ( ++p == pe )
- goto _out49;
-case 49:
- switch( (*p) ) {
- case 95: goto tr78;
- case 101: goto st50;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st50:
- if ( ++p == pe )
- goto _out50;
-case 50:
- switch( (*p) ) {
- case 95: goto tr78;
- case 97: goto st51;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st51:
- if ( ++p == pe )
- goto _out51;
-case 51:
- switch( (*p) ) {
- case 95: goto tr78;
- case 107: goto tr102;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st52:
- if ( ++p == pe )
- goto _out52;
-case 52:
- switch( (*p) ) {
- case 95: goto tr78;
- case 97: goto st53;
- case 117: goto st55;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr103;
-st53:
- if ( ++p == pe )
- goto _out53;
-case 53:
- switch( (*p) ) {
- case 95: goto tr78;
- case 108: goto st54;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st54:
- if ( ++p == pe )
- goto _out54;
-case 54:
- switch( (*p) ) {
- case 95: goto tr78;
- case 108: goto tr107;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st55:
- if ( ++p == pe )
- goto _out55;
-case 55:
- switch( (*p) ) {
- case 95: goto tr78;
- case 114: goto st56;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st56:
- if ( ++p == pe )
- goto _out56;
-case 56:
- switch( (*p) ) {
- case 95: goto tr78;
- case 115: goto tr109;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st57:
- if ( ++p == pe )
- goto _out57;
-case 57:
- switch( (*p) ) {
- case 95: goto tr78;
- case 110: goto st58;
- case 120: goto st61;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st58:
- if ( ++p == pe )
- goto _out58;
-case 58:
- switch( (*p) ) {
- case 95: goto tr78;
- case 116: goto st59;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st59:
- if ( ++p == pe )
- goto _out59;
-case 59:
- switch( (*p) ) {
- case 95: goto tr78;
- case 114: goto st60;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st60:
- if ( ++p == pe )
- goto _out60;
-case 60:
- switch( (*p) ) {
- case 95: goto tr78;
- case 121: goto tr114;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st61:
- if ( ++p == pe )
- goto _out61;
-case 61:
- switch( (*p) ) {
- case 95: goto tr78;
- case 101: goto st62;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st62:
- if ( ++p == pe )
- goto _out62;
-case 62:
- switch( (*p) ) {
- case 95: goto tr78;
- case 99: goto tr116;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st63:
- if ( ++p == pe )
- goto _out63;
-case 63:
- switch( (*p) ) {
- case 95: goto tr78;
- case 111: goto st64;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st64:
- if ( ++p == pe )
- goto _out64;
-case 64:
- switch( (*p) ) {
- case 95: goto tr78;
- case 116: goto st65;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st65:
- if ( ++p == pe )
- goto _out65;
-case 65:
- switch( (*p) ) {
- case 95: goto tr78;
- case 111: goto tr119;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st66:
- if ( ++p == pe )
- goto _out66;
-case 66:
- switch( (*p) ) {
- case 95: goto tr78;
- case 111: goto st67;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st67:
- if ( ++p == pe )
- goto _out67;
-case 67:
- switch( (*p) ) {
- case 95: goto tr78;
- case 108: goto st68;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st68:
- if ( ++p == pe )
- goto _out68;
-case 68:
- switch( (*p) ) {
- case 95: goto tr78;
- case 100: goto tr122;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st69:
- if ( ++p == pe )
- goto _out69;
-case 69:
- switch( (*p) ) {
- case 95: goto tr78;
- case 101: goto st70;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st70:
- if ( ++p == pe )
- goto _out70;
-case 70:
- switch( (*p) ) {
- case 95: goto tr78;
- case 120: goto st71;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st71:
- if ( ++p == pe )
- goto _out71;
-case 71:
- switch( (*p) ) {
- case 95: goto tr78;
- case 116: goto tr125;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st72:
- if ( ++p == pe )
- goto _out72;
-case 72:
- switch( (*p) ) {
- case 95: goto tr78;
- case 99: goto tr126;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st73:
- if ( ++p == pe )
- goto _out73;
-case 73:
- switch( (*p) ) {
- case 95: goto tr78;
- case 101: goto st74;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st74:
- if ( ++p == pe )
- goto _out74;
-case 74:
- switch( (*p) ) {
- case 95: goto tr78;
- case 116: goto tr128;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st75:
- if ( ++p == pe )
- goto _out75;
-case 75:
- switch( (*p) ) {
- case 95: goto tr78;
- case 97: goto st76;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st76:
- if ( ++p == pe )
- goto _out76;
-case 76:
- switch( (*p) ) {
- case 95: goto tr78;
- case 114: goto st77;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st77:
- if ( ++p == pe )
- goto _out77;
-case 77:
- switch( (*p) ) {
- case 95: goto tr78;
- case 103: goto st78;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-st78:
- if ( ++p == pe )
- goto _out78;
-case 78:
- switch( (*p) ) {
- case 95: goto tr78;
- case 115: goto tr132;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr78;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr78;
- } else
- goto tr78;
- goto tr89;
-tr133:
-#line 686 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st79;
-tr134:
-#line 681 "rlscan.rl"
- {tokend = p+1;{
- scan_error() << "unterminated OR literal" << endl;
- }{p = ((tokend))-1;}}
- goto st79;
-tr135:
-#line 676 "rlscan.rl"
- {tokend = p+1;{ token( RE_Dash, 0, 0 ); }{p = ((tokend))-1;}}
- goto st79;
-tr137:
-#line 679 "rlscan.rl"
- {tokend = p+1;{ token( RE_SqClose ); {{p = ((tokend))-1;}{cs = stack[--top]; goto _again;}} }{p = ((tokend))-1;}}
- goto st79;
-tr138:
-#line 673 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, tokstart+1, tokend ); }{p = ((tokend))-1;}}
- goto st79;
-tr139:
-#line 672 "rlscan.rl"
- {tokend = p+1;{ updateCol(); }{p = ((tokend))-1;}}
- goto st79;
-tr140:
-#line 664 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\0' ); }{p = ((tokend))-1;}}
- goto st79;
-tr141:
-#line 665 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\a' ); }{p = ((tokend))-1;}}
- goto st79;
-tr142:
-#line 666 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\b' ); }{p = ((tokend))-1;}}
- goto st79;
-tr143:
-#line 670 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\f' ); }{p = ((tokend))-1;}}
- goto st79;
-tr144:
-#line 668 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\n' ); }{p = ((tokend))-1;}}
- goto st79;
-tr145:
-#line 671 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\r' ); }{p = ((tokend))-1;}}
- goto st79;
-tr146:
-#line 667 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\t' ); }{p = ((tokend))-1;}}
- goto st79;
-tr147:
-#line 669 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\v' ); }{p = ((tokend))-1;}}
- goto st79;
-st79:
-#line 1 "rlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out79;
-case 79:
-#line 1 "rlscan.rl"
- {tokstart = p;}
-#line 2531 "rlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr134;
- case 45: goto tr135;
- case 92: goto st80;
- case 93: goto tr137;
- }
- goto tr133;
-st80:
- if ( ++p == pe )
- goto _out80;
-case 80:
- switch( (*p) ) {
- case 10: goto tr139;
- case 48: goto tr140;
- case 97: goto tr141;
- case 98: goto tr142;
- case 102: goto tr143;
- case 110: goto tr144;
- case 114: goto tr145;
- case 116: goto tr146;
- case 118: goto tr147;
- }
- goto tr138;
-tr148:
-#line 721 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st81;
-tr149:
-#line 716 "rlscan.rl"
- {tokend = p+1;{
- scan_error() << "unterminated regular expression" << endl;
- }{p = ((tokend))-1;}}
- goto st81;
-tr150:
-#line 711 "rlscan.rl"
- {tokend = p+1;{ token( RE_Star ); }{p = ((tokend))-1;}}
- goto st81;
-tr151:
-#line 710 "rlscan.rl"
- {tokend = p+1;{ token( RE_Dot ); }{p = ((tokend))-1;}}
- goto st81;
-tr155:
-#line 704 "rlscan.rl"
- {tokend = p;{
- token( RE_Slash, tokstart, tokend );
- {{p = ((tokend))-1;}{goto st88;}}
- }{p = ((tokend))-1;}}
- goto st81;
-tr156:
-#line 704 "rlscan.rl"
- {tokend = p+1;{
- token( RE_Slash, tokstart, tokend );
- {{p = ((tokend))-1;}{goto st88;}}
- }{p = ((tokend))-1;}}
- goto st81;
-tr157:
-#line 713 "rlscan.rl"
- {tokend = p;{ token( RE_SqOpen ); {{p = ((tokend))-1;}{stack[top++] = 81; goto st79;}} }{p = ((tokend))-1;}}
- goto st81;
-tr158:
-#line 714 "rlscan.rl"
- {tokend = p+1;{ token( RE_SqOpenNeg ); {{p = ((tokend))-1;}{stack[top++] = 81; goto st79;}} }{p = ((tokend))-1;}}
- goto st81;
-tr159:
-#line 701 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, tokstart+1, tokend ); }{p = ((tokend))-1;}}
- goto st81;
-tr160:
-#line 700 "rlscan.rl"
- {tokend = p+1;{ updateCol(); }{p = ((tokend))-1;}}
- goto st81;
-tr161:
-#line 692 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\0' ); }{p = ((tokend))-1;}}
- goto st81;
-tr162:
-#line 693 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\a' ); }{p = ((tokend))-1;}}
- goto st81;
-tr163:
-#line 694 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\b' ); }{p = ((tokend))-1;}}
- goto st81;
-tr164:
-#line 698 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\f' ); }{p = ((tokend))-1;}}
- goto st81;
-tr165:
-#line 696 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\n' ); }{p = ((tokend))-1;}}
- goto st81;
-tr166:
-#line 699 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\r' ); }{p = ((tokend))-1;}}
- goto st81;
-tr167:
-#line 695 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\t' ); }{p = ((tokend))-1;}}
- goto st81;
-tr168:
-#line 697 "rlscan.rl"
- {tokend = p+1;{ token( RE_Char, '\v' ); }{p = ((tokend))-1;}}
- goto st81;
-st81:
-#line 1 "rlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out81;
-case 81:
-#line 1 "rlscan.rl"
- {tokstart = p;}
-#line 2643 "rlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr149;
- case 42: goto tr150;
- case 46: goto tr151;
- case 47: goto st82;
- case 91: goto st83;
- case 92: goto st84;
- }
- goto tr148;
-st82:
- if ( ++p == pe )
- goto _out82;
-case 82:
- if ( (*p) == 105 )
- goto tr156;
- goto tr155;
-st83:
- if ( ++p == pe )
- goto _out83;
-case 83:
- if ( (*p) == 94 )
- goto tr158;
- goto tr157;
-st84:
- if ( ++p == pe )
- goto _out84;
-case 84:
- switch( (*p) ) {
- case 10: goto tr160;
- case 48: goto tr161;
- case 97: goto tr162;
- case 98: goto tr163;
- case 102: goto tr164;
- case 110: goto tr165;
- case 114: goto tr166;
- case 116: goto tr167;
- case 118: goto tr168;
- }
- goto tr159;
-tr169:
-#line 730 "rlscan.rl"
- {tokend = p+1;{
- scan_error() << "unterminated write statement" << endl;
- }{p = ((tokend))-1;}}
- goto st85;
-tr172:
-#line 728 "rlscan.rl"
- {tokend = p+1;{ token( ';' ); {{p = ((tokend))-1;}{goto st88;}} }{p = ((tokend))-1;}}
- goto st85;
-tr174:
-#line 727 "rlscan.rl"
- {tokend = p;{ updateCol(); }{p = ((tokend))-1;}}
- goto st85;
-tr175:
-#line 726 "rlscan.rl"
- {tokend = p;{ token( TK_Word, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st85;
-st85:
-#line 1 "rlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out85;
-case 85:
-#line 1 "rlscan.rl"
- {tokstart = p;}
-#line 2709 "rlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr169;
- case 32: goto st86;
- case 59: goto tr172;
- case 95: goto st87;
- }
- if ( (*p) < 65 ) {
- if ( 9 <= (*p) && (*p) <= 10 )
- goto st86;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto st87;
- } else
- goto st87;
- goto st0;
-st0:
- goto _out0;
-st86:
- if ( ++p == pe )
- goto _out86;
-case 86:
- if ( (*p) == 32 )
- goto st86;
- if ( 9 <= (*p) && (*p) <= 10 )
- goto st86;
- goto tr174;
-st87:
- if ( ++p == pe )
- goto _out87;
-case 87:
- if ( (*p) == 95 )
- goto st87;
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st87;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto st87;
- } else
- goto st87;
- goto tr175;
-tr33:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
-#line 790 "rlscan.rl"
- {tokend = p+1;{ updateCol(); }{p = ((tokend))-1;}}
- goto st88;
-tr37:
-#line 777 "rlscan.rl"
- {{ token( TK_UInt, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr39:
-#line 890 "rlscan.rl"
- {{ token( *tokstart ); }{p = ((tokend))-1;}}
- goto st88;
-tr40:
-#line 858 "rlscan.rl"
- {tokend = p+1;{
- updateCol();
- endSection();
- {{p = ((tokend))-1;}{goto st23;}}
- }{p = ((tokend))-1;}}
- goto st88;
-tr176:
-#line 890 "rlscan.rl"
- {tokend = p+1;{ token( *tokstart ); }{p = ((tokend))-1;}}
- goto st88;
-tr177:
-#line 886 "rlscan.rl"
- {tokend = p+1;{
- scan_error() << "unterminated ragel section" << endl;
- }{p = ((tokend))-1;}}
- goto st88;
-tr179:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
-#line 867 "rlscan.rl"
- {tokend = p+1;{
- updateCol();
- if ( singleLineSpec ) {
- endSection();
- {{p = ((tokend))-1;}{goto st23;}}
- }
- }{p = ((tokend))-1;}}
- goto st88;
-tr188:
-#line 787 "rlscan.rl"
- {tokend = p+1;{ token( RE_Slash ); {{p = ((tokend))-1;}{goto st81;}} }{p = ((tokend))-1;}}
- goto st88;
-tr208:
-#line 875 "rlscan.rl"
- {tokend = p+1;{
- if ( lastToken == KW_Export || lastToken == KW_Entry )
- token( '{' );
- else {
- token( '{' );
- curly_count = 1;
- inlineBlockType = CurlyDelimited;
- {{p = ((tokend))-1;}{goto st37;}}
- }
- }{p = ((tokend))-1;}}
- goto st88;
-tr211:
-#line 864 "rlscan.rl"
- {tokend = p;{ updateCol(); }{p = ((tokend))-1;}}
- goto st88;
-tr212:
-#line 782 "rlscan.rl"
- {tokend = p;{ token( TK_Literal, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr213:
-#line 782 "rlscan.rl"
- {tokend = p+1;{ token( TK_Literal, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr214:
-#line 890 "rlscan.rl"
- {tokend = p;{ token( *tokstart ); }{p = ((tokend))-1;}}
- goto st88;
-tr215:
-#line 820 "rlscan.rl"
- {tokend = p+1;{ token( TK_AllGblError ); }{p = ((tokend))-1;}}
- goto st88;
-tr216:
-#line 804 "rlscan.rl"
- {tokend = p+1;{ token( TK_AllFromState ); }{p = ((tokend))-1;}}
- goto st88;
-tr217:
-#line 812 "rlscan.rl"
- {tokend = p+1;{ token( TK_AllEOF ); }{p = ((tokend))-1;}}
- goto st88;
-tr218:
-#line 839 "rlscan.rl"
- {tokend = p+1;{ token( TK_AllCond ); }{p = ((tokend))-1;}}
- goto st88;
-tr219:
-#line 828 "rlscan.rl"
- {tokend = p+1;{ token( TK_AllLocalError ); }{p = ((tokend))-1;}}
- goto st88;
-tr220:
-#line 796 "rlscan.rl"
- {tokend = p+1;{ token( TK_AllToState ); }{p = ((tokend))-1;}}
- goto st88;
-tr221:
-#line 821 "rlscan.rl"
- {tokend = p+1;{ token( TK_FinalGblError ); }{p = ((tokend))-1;}}
- goto st88;
-tr222:
-#line 805 "rlscan.rl"
- {tokend = p+1;{ token( TK_FinalFromState ); }{p = ((tokend))-1;}}
- goto st88;
-tr223:
-#line 813 "rlscan.rl"
- {tokend = p+1;{ token( TK_FinalEOF ); }{p = ((tokend))-1;}}
- goto st88;
-tr224:
-#line 840 "rlscan.rl"
- {tokend = p+1;{ token( TK_LeavingCond ); }{p = ((tokend))-1;}}
- goto st88;
-tr225:
-#line 829 "rlscan.rl"
- {tokend = p+1;{ token( TK_FinalLocalError ); }{p = ((tokend))-1;}}
- goto st88;
-tr226:
-#line 797 "rlscan.rl"
- {tokend = p+1;{ token( TK_FinalToState ); }{p = ((tokend))-1;}}
- goto st88;
-tr227:
-#line 843 "rlscan.rl"
- {tokend = p+1;{ token( TK_StarStar ); }{p = ((tokend))-1;}}
- goto st88;
-tr228:
-#line 844 "rlscan.rl"
- {tokend = p+1;{ token( TK_DashDash ); }{p = ((tokend))-1;}}
- goto st88;
-tr229:
-#line 845 "rlscan.rl"
- {tokend = p+1;{ token( TK_Arrow ); }{p = ((tokend))-1;}}
- goto st88;
-tr230:
-#line 842 "rlscan.rl"
- {tokend = p+1;{ token( TK_DotDot ); }{p = ((tokend))-1;}}
- goto st88;
-tr231:
-#line 777 "rlscan.rl"
- {tokend = p;{ token( TK_UInt, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr233:
-#line 778 "rlscan.rl"
- {tokend = p;{ token( TK_Hex, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr234:
-#line 856 "rlscan.rl"
- {tokend = p+1;{ token( TK_NameSep, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr235:
-#line 792 "rlscan.rl"
- {tokend = p+1;{ token( TK_ColonEquals ); }{p = ((tokend))-1;}}
- goto st88;
-tr237:
-#line 848 "rlscan.rl"
- {tokend = p;{ token( TK_ColonGt ); }{p = ((tokend))-1;}}
- goto st88;
-tr238:
-#line 849 "rlscan.rl"
- {tokend = p+1;{ token( TK_ColonGtGt ); }{p = ((tokend))-1;}}
- goto st88;
-tr239:
-#line 822 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotStartGblError ); }{p = ((tokend))-1;}}
- goto st88;
-tr240:
-#line 806 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotStartFromState ); }{p = ((tokend))-1;}}
- goto st88;
-tr241:
-#line 814 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotStartEOF ); }{p = ((tokend))-1;}}
- goto st88;
-tr242:
-#line 850 "rlscan.rl"
- {tokend = p+1;{ token( TK_LtColon ); }{p = ((tokend))-1;}}
- goto st88;
-tr244:
-#line 830 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotStartLocalError ); }{p = ((tokend))-1;}}
- goto st88;
-tr245:
-#line 798 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotStartToState ); }{p = ((tokend))-1;}}
- goto st88;
-tr246:
-#line 835 "rlscan.rl"
- {tokend = p;{ token( TK_Middle ); }{p = ((tokend))-1;}}
- goto st88;
-tr247:
-#line 824 "rlscan.rl"
- {tokend = p+1;{ token( TK_MiddleGblError ); }{p = ((tokend))-1;}}
- goto st88;
-tr248:
-#line 808 "rlscan.rl"
- {tokend = p+1;{ token( TK_MiddleFromState ); }{p = ((tokend))-1;}}
- goto st88;
-tr249:
-#line 816 "rlscan.rl"
- {tokend = p+1;{ token( TK_MiddleEOF ); }{p = ((tokend))-1;}}
- goto st88;
-tr250:
-#line 832 "rlscan.rl"
- {tokend = p+1;{ token( TK_MiddleLocalError ); }{p = ((tokend))-1;}}
- goto st88;
-tr251:
-#line 800 "rlscan.rl"
- {tokend = p+1;{ token( TK_MiddleToState ); }{p = ((tokend))-1;}}
- goto st88;
-tr252:
-#line 846 "rlscan.rl"
- {tokend = p+1;{ token( TK_DoubleArrow ); }{p = ((tokend))-1;}}
- goto st88;
-tr253:
-#line 819 "rlscan.rl"
- {tokend = p+1;{ token( TK_StartGblError ); }{p = ((tokend))-1;}}
- goto st88;
-tr254:
-#line 803 "rlscan.rl"
- {tokend = p+1;{ token( TK_StartFromState ); }{p = ((tokend))-1;}}
- goto st88;
-tr255:
-#line 811 "rlscan.rl"
- {tokend = p+1;{ token( TK_StartEOF ); }{p = ((tokend))-1;}}
- goto st88;
-tr256:
-#line 838 "rlscan.rl"
- {tokend = p+1;{ token( TK_StartCond ); }{p = ((tokend))-1;}}
- goto st88;
-tr257:
-#line 827 "rlscan.rl"
- {tokend = p+1;{ token( TK_StartLocalError ); }{p = ((tokend))-1;}}
- goto st88;
-tr258:
-#line 795 "rlscan.rl"
- {tokend = p+1;{ token( TK_StartToState ); }{p = ((tokend))-1;}}
- goto st88;
-tr259:
-#line 823 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotFinalGblError ); }{p = ((tokend))-1;}}
- goto st88;
-tr260:
-#line 807 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotFinalFromState ); }{p = ((tokend))-1;}}
- goto st88;
-tr261:
-#line 815 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotFinalEOF ); }{p = ((tokend))-1;}}
- goto st88;
-tr262:
-#line 831 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotFinalLocalError ); }{p = ((tokend))-1;}}
- goto st88;
-tr263:
-#line 799 "rlscan.rl"
- {tokend = p+1;{ token( TK_NotFinalToState ); }{p = ((tokend))-1;}}
- goto st88;
-tr264:
-#line 1 "rlscan.rl"
- { switch( act ) {
- case 62:
- { token( KW_Machine ); }
- break;
- case 63:
- { token( KW_Include ); }
- break;
- case 64:
- { token( KW_Import ); }
- break;
- case 65:
- {
- token( KW_Write );
- {{p = ((tokend))-1;}{goto st85;}}
- }
- break;
- case 66:
- { token( KW_Action ); }
- break;
- case 67:
- { token( KW_AlphType ); }
- break;
- case 68:
- {
- token( KW_GetKey );
- inlineBlockType = SemiTerminated;
- {{p = ((tokend))-1;}{goto st37;}}
- }
- break;
- case 69:
- {
- token( KW_Access );
- inlineBlockType = SemiTerminated;
- {{p = ((tokend))-1;}{goto st37;}}
- }
- break;
- case 70:
- {
- token( KW_Variable );
- inlineBlockType = SemiTerminated;
- {{p = ((tokend))-1;}{goto st37;}}
- }
- break;
- case 71:
- { token( KW_When ); }
- break;
- case 72:
- { token( KW_Eof ); }
- break;
- case 73:
- { token( KW_Err ); }
- break;
- case 74:
- { token( KW_Lerr ); }
- break;
- case 75:
- { token( KW_To ); }
- break;
- case 76:
- { token( KW_From ); }
- break;
- case 77:
- { token( KW_Export ); }
- break;
- case 78:
- { token( TK_Word, tokstart, tokend ); }
- break;
- default: break;
- }
- {p = ((tokend))-1;}}
- goto st88;
-tr265:
-#line 784 "rlscan.rl"
- {tokend = p;{ token( RE_SqOpen ); {{p = ((tokend))-1;}{stack[top++] = 88; goto st79;}} }{p = ((tokend))-1;}}
- goto st88;
-tr266:
-#line 785 "rlscan.rl"
- {tokend = p+1;{ token( RE_SqOpenNeg ); {{p = ((tokend))-1;}{stack[top++] = 88; goto st79;}} }{p = ((tokend))-1;}}
- goto st88;
-tr267:
-#line 774 "rlscan.rl"
- {tokend = p;{ token( TK_Word, tokstart, tokend ); }{p = ((tokend))-1;}}
- goto st88;
-tr336:
-#line 853 "rlscan.rl"
- {tokend = p+1;{ token( TK_BarStar ); }{p = ((tokend))-1;}}
- goto st88;
-st88:
-#line 1 "rlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out88;
-case 88:
-#line 1 "rlscan.rl"
- {tokstart = p;}
-#line 3117 "rlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr177;
- case 9: goto st89;
- case 10: goto tr179;
- case 13: goto st89;
- case 32: goto st89;
- case 34: goto tr180;
- case 35: goto tr181;
- case 36: goto st93;
- case 37: goto st94;
- case 39: goto tr184;
- case 42: goto st96;
- case 45: goto st97;
- case 46: goto st98;
- case 47: goto tr188;
- case 48: goto tr189;
- case 58: goto st102;
- case 60: goto st104;
- case 61: goto st106;
- case 62: goto st107;
- case 64: goto st108;
- case 91: goto st110;
- case 95: goto tr196;
- case 97: goto st111;
- case 101: goto st125;
- case 102: goto st132;
- case 103: goto st135;
- case 105: goto st140;
- case 108: goto st150;
- case 109: goto st153;
- case 116: goto st159;
- case 118: goto st160;
- case 119: goto st167;
- case 123: goto tr208;
- case 124: goto st173;
- case 125: goto tr210;
- }
- if ( (*p) < 65 ) {
- if ( 49 <= (*p) && (*p) <= 57 )
- goto st100;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr176;
-st89:
- if ( ++p == pe )
- goto _out89;
-case 89:
- switch( (*p) ) {
- case 9: goto st89;
- case 13: goto st89;
- case 32: goto st89;
- }
- goto tr211;
-tr180:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st90;
-st90:
- if ( ++p == pe )
- goto _out90;
-case 90:
-#line 3182 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr29;
- case 34: goto st91;
- case 92: goto st17;
- }
- goto st16;
-tr29:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st16;
-st16:
- if ( ++p == pe )
- goto _out16;
-case 16:
-#line 3201 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr29;
- case 34: goto st91;
- case 92: goto st17;
- }
- goto st16;
-st91:
- if ( ++p == pe )
- goto _out91;
-case 91:
- if ( (*p) == 105 )
- goto tr213;
- goto tr212;
-st17:
- if ( ++p == pe )
- goto _out17;
-case 17:
- if ( (*p) == 10 )
- goto tr29;
- goto st16;
-tr181:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st92;
-st92:
- if ( ++p == pe )
- goto _out92;
-case 92:
-#line 3230 "rlscan.cpp"
- if ( (*p) == 10 )
- goto tr33;
- goto st18;
-st18:
- if ( ++p == pe )
- goto _out18;
-case 18:
- if ( (*p) == 10 )
- goto tr33;
- goto st18;
-st93:
- if ( ++p == pe )
- goto _out93;
-case 93:
- switch( (*p) ) {
- case 33: goto tr215;
- case 42: goto tr216;
- case 47: goto tr217;
- case 63: goto tr218;
- case 94: goto tr219;
- case 126: goto tr220;
- }
- goto tr214;
-st94:
- if ( ++p == pe )
- goto _out94;
-case 94:
- switch( (*p) ) {
- case 33: goto tr221;
- case 42: goto tr222;
- case 47: goto tr223;
- case 63: goto tr224;
- case 94: goto tr225;
- case 126: goto tr226;
- }
- goto tr214;
-tr184:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st95;
-st95:
- if ( ++p == pe )
- goto _out95;
-case 95:
-#line 3275 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr35;
- case 39: goto st91;
- case 92: goto st20;
- }
- goto st19;
-tr35:
-#line 532 "rlscan.rl"
- {
- lastnl = p;
- column = 0;
- line++;
- }
- goto st19;
-st19:
- if ( ++p == pe )
- goto _out19;
-case 19:
-#line 3294 "rlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr35;
- case 39: goto st91;
- case 92: goto st20;
- }
- goto st19;
-st20:
- if ( ++p == pe )
- goto _out20;
-case 20:
- if ( (*p) == 10 )
- goto tr35;
- goto st19;
-st96:
- if ( ++p == pe )
- goto _out96;
-case 96:
- if ( (*p) == 42 )
- goto tr227;
- goto tr214;
-st97:
- if ( ++p == pe )
- goto _out97;
-case 97:
- switch( (*p) ) {
- case 45: goto tr228;
- case 62: goto tr229;
- }
- goto tr214;
-st98:
- if ( ++p == pe )
- goto _out98;
-case 98:
- if ( (*p) == 46 )
- goto tr230;
- goto tr214;
-tr189:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st99;
-st99:
- if ( ++p == pe )
- goto _out99;
-case 99:
-#line 3339 "rlscan.cpp"
- if ( (*p) == 120 )
- goto st21;
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st100;
- goto tr231;
-st100:
- if ( ++p == pe )
- goto _out100;
-case 100:
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st100;
- goto tr231;
-st21:
- if ( ++p == pe )
- goto _out21;
-case 21:
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st101;
- } else if ( (*p) > 70 ) {
- if ( 97 <= (*p) && (*p) <= 102 )
- goto st101;
- } else
- goto st101;
- goto tr37;
-st101:
- if ( ++p == pe )
- goto _out101;
-case 101:
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto st101;
- } else if ( (*p) > 70 ) {
- if ( 97 <= (*p) && (*p) <= 102 )
- goto st101;
- } else
- goto st101;
- goto tr233;
-st102:
- if ( ++p == pe )
- goto _out102;
-case 102:
- switch( (*p) ) {
- case 58: goto tr234;
- case 61: goto tr235;
- case 62: goto st103;
- }
- goto tr214;
-st103:
- if ( ++p == pe )
- goto _out103;
-case 103:
- if ( (*p) == 62 )
- goto tr238;
- goto tr237;
-st104:
- if ( ++p == pe )
- goto _out104;
-case 104:
- switch( (*p) ) {
- case 33: goto tr239;
- case 42: goto tr240;
- case 47: goto tr241;
- case 58: goto tr242;
- case 62: goto st105;
- case 94: goto tr244;
- case 126: goto tr245;
- }
- goto tr214;
-st105:
- if ( ++p == pe )
- goto _out105;
-case 105:
- switch( (*p) ) {
- case 33: goto tr247;
- case 42: goto tr248;
- case 47: goto tr249;
- case 94: goto tr250;
- case 126: goto tr251;
- }
- goto tr246;
-st106:
- if ( ++p == pe )
- goto _out106;
-case 106:
- if ( (*p) == 62 )
- goto tr252;
- goto tr214;
-st107:
- if ( ++p == pe )
- goto _out107;
-case 107:
- switch( (*p) ) {
- case 33: goto tr253;
- case 42: goto tr254;
- case 47: goto tr255;
- case 63: goto tr256;
- case 94: goto tr257;
- case 126: goto tr258;
- }
- goto tr214;
-st108:
- if ( ++p == pe )
- goto _out108;
-case 108:
- switch( (*p) ) {
- case 33: goto tr259;
- case 42: goto tr260;
- case 47: goto tr261;
- case 94: goto tr262;
- case 126: goto tr263;
- }
- goto tr214;
-tr196:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 774 "rlscan.rl"
- {act = 78;}
- goto st109;
-tr274:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 755 "rlscan.rl"
- {act = 69;}
- goto st109;
-tr277:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 744 "rlscan.rl"
- {act = 66;}
- goto st109;
-tr283:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 745 "rlscan.rl"
- {act = 67;}
- goto st109;
-tr287:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 766 "rlscan.rl"
- {act = 72;}
- goto st109;
-tr288:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 767 "rlscan.rl"
- {act = 73;}
- goto st109;
-tr292:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 771 "rlscan.rl"
- {act = 77;}
- goto st109;
-tr295:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 770 "rlscan.rl"
- {act = 76;}
- goto st109;
-tr300:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 750 "rlscan.rl"
- {act = 68;}
- goto st109;
-tr306:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 739 "rlscan.rl"
- {act = 64;}
- goto st109;
-tr311:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 738 "rlscan.rl"
- {act = 63;}
- goto st109;
-tr314:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 768 "rlscan.rl"
- {act = 74;}
- goto st109;
-tr320:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 737 "rlscan.rl"
- {act = 62;}
- goto st109;
-tr321:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 769 "rlscan.rl"
- {act = 75;}
- goto st109;
-tr328:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 760 "rlscan.rl"
- {act = 70;}
- goto st109;
-tr332:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 765 "rlscan.rl"
- {act = 71;}
- goto st109;
-tr335:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
-#line 740 "rlscan.rl"
- {act = 65;}
- goto st109;
-st109:
- if ( ++p == pe )
- goto _out109;
-case 109:
-#line 3559 "rlscan.cpp"
- if ( (*p) == 95 )
- goto tr196;
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr264;
-st110:
- if ( ++p == pe )
- goto _out110;
-case 110:
- if ( (*p) == 94 )
- goto tr266;
- goto tr265;
-st111:
- if ( ++p == pe )
- goto _out111;
-case 111:
- switch( (*p) ) {
- case 95: goto tr196;
- case 99: goto st112;
- case 108: goto st119;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st112:
- if ( ++p == pe )
- goto _out112;
-case 112:
- switch( (*p) ) {
- case 95: goto tr196;
- case 99: goto st113;
- case 116: goto st116;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st113:
- if ( ++p == pe )
- goto _out113;
-case 113:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto st114;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st114:
- if ( ++p == pe )
- goto _out114;
-case 114:
- switch( (*p) ) {
- case 95: goto tr196;
- case 115: goto st115;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st115:
- if ( ++p == pe )
- goto _out115;
-case 115:
- switch( (*p) ) {
- case 95: goto tr196;
- case 115: goto tr274;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st116:
- if ( ++p == pe )
- goto _out116;
-case 116:
- switch( (*p) ) {
- case 95: goto tr196;
- case 105: goto st117;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st117:
- if ( ++p == pe )
- goto _out117;
-case 117:
- switch( (*p) ) {
- case 95: goto tr196;
- case 111: goto st118;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st118:
- if ( ++p == pe )
- goto _out118;
-case 118:
- switch( (*p) ) {
- case 95: goto tr196;
- case 110: goto tr277;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st119:
- if ( ++p == pe )
- goto _out119;
-case 119:
- switch( (*p) ) {
- case 95: goto tr196;
- case 112: goto st120;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st120:
- if ( ++p == pe )
- goto _out120;
-case 120:
- switch( (*p) ) {
- case 95: goto tr196;
- case 104: goto st121;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st121:
- if ( ++p == pe )
- goto _out121;
-case 121:
- switch( (*p) ) {
- case 95: goto tr196;
- case 116: goto st122;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st122:
- if ( ++p == pe )
- goto _out122;
-case 122:
- switch( (*p) ) {
- case 95: goto tr196;
- case 121: goto st123;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st123:
- if ( ++p == pe )
- goto _out123;
-case 123:
- switch( (*p) ) {
- case 95: goto tr196;
- case 112: goto st124;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st124:
- if ( ++p == pe )
- goto _out124;
-case 124:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto tr283;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st125:
- if ( ++p == pe )
- goto _out125;
-case 125:
- switch( (*p) ) {
- case 95: goto tr196;
- case 111: goto st126;
- case 114: goto st127;
- case 120: goto st128;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st126:
- if ( ++p == pe )
- goto _out126;
-case 126:
- switch( (*p) ) {
- case 95: goto tr196;
- case 102: goto tr287;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st127:
- if ( ++p == pe )
- goto _out127;
-case 127:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto tr288;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st128:
- if ( ++p == pe )
- goto _out128;
-case 128:
- switch( (*p) ) {
- case 95: goto tr196;
- case 112: goto st129;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st129:
- if ( ++p == pe )
- goto _out129;
-case 129:
- switch( (*p) ) {
- case 95: goto tr196;
- case 111: goto st130;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st130:
- if ( ++p == pe )
- goto _out130;
-case 130:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto st131;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st131:
- if ( ++p == pe )
- goto _out131;
-case 131:
- switch( (*p) ) {
- case 95: goto tr196;
- case 116: goto tr292;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st132:
- if ( ++p == pe )
- goto _out132;
-case 132:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto st133;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st133:
- if ( ++p == pe )
- goto _out133;
-case 133:
- switch( (*p) ) {
- case 95: goto tr196;
- case 111: goto st134;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st134:
- if ( ++p == pe )
- goto _out134;
-case 134:
- switch( (*p) ) {
- case 95: goto tr196;
- case 109: goto tr295;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st135:
- if ( ++p == pe )
- goto _out135;
-case 135:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto st136;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st136:
- if ( ++p == pe )
- goto _out136;
-case 136:
- switch( (*p) ) {
- case 95: goto tr196;
- case 116: goto st137;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st137:
- if ( ++p == pe )
- goto _out137;
-case 137:
- switch( (*p) ) {
- case 95: goto tr196;
- case 107: goto st138;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st138:
- if ( ++p == pe )
- goto _out138;
-case 138:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto st139;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st139:
- if ( ++p == pe )
- goto _out139;
-case 139:
- switch( (*p) ) {
- case 95: goto tr196;
- case 121: goto tr300;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st140:
- if ( ++p == pe )
- goto _out140;
-case 140:
- switch( (*p) ) {
- case 95: goto tr196;
- case 109: goto st141;
- case 110: goto st145;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st141:
- if ( ++p == pe )
- goto _out141;
-case 141:
- switch( (*p) ) {
- case 95: goto tr196;
- case 112: goto st142;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st142:
- if ( ++p == pe )
- goto _out142;
-case 142:
- switch( (*p) ) {
- case 95: goto tr196;
- case 111: goto st143;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st143:
- if ( ++p == pe )
- goto _out143;
-case 143:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto st144;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st144:
- if ( ++p == pe )
- goto _out144;
-case 144:
- switch( (*p) ) {
- case 95: goto tr196;
- case 116: goto tr306;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st145:
- if ( ++p == pe )
- goto _out145;
-case 145:
- switch( (*p) ) {
- case 95: goto tr196;
- case 99: goto st146;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st146:
- if ( ++p == pe )
- goto _out146;
-case 146:
- switch( (*p) ) {
- case 95: goto tr196;
- case 108: goto st147;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st147:
- if ( ++p == pe )
- goto _out147;
-case 147:
- switch( (*p) ) {
- case 95: goto tr196;
- case 117: goto st148;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st148:
- if ( ++p == pe )
- goto _out148;
-case 148:
- switch( (*p) ) {
- case 95: goto tr196;
- case 100: goto st149;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st149:
- if ( ++p == pe )
- goto _out149;
-case 149:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto tr311;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st150:
- if ( ++p == pe )
- goto _out150;
-case 150:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto st151;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st151:
- if ( ++p == pe )
- goto _out151;
-case 151:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto st152;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st152:
- if ( ++p == pe )
- goto _out152;
-case 152:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto tr314;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st153:
- if ( ++p == pe )
- goto _out153;
-case 153:
- switch( (*p) ) {
- case 95: goto tr196;
- case 97: goto st154;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st154:
- if ( ++p == pe )
- goto _out154;
-case 154:
- switch( (*p) ) {
- case 95: goto tr196;
- case 99: goto st155;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st155:
- if ( ++p == pe )
- goto _out155;
-case 155:
- switch( (*p) ) {
- case 95: goto tr196;
- case 104: goto st156;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st156:
- if ( ++p == pe )
- goto _out156;
-case 156:
- switch( (*p) ) {
- case 95: goto tr196;
- case 105: goto st157;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st157:
- if ( ++p == pe )
- goto _out157;
-case 157:
- switch( (*p) ) {
- case 95: goto tr196;
- case 110: goto st158;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st158:
- if ( ++p == pe )
- goto _out158;
-case 158:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto tr320;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st159:
- if ( ++p == pe )
- goto _out159;
-case 159:
- switch( (*p) ) {
- case 95: goto tr196;
- case 111: goto tr321;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st160:
- if ( ++p == pe )
- goto _out160;
-case 160:
- switch( (*p) ) {
- case 95: goto tr196;
- case 97: goto st161;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st161:
- if ( ++p == pe )
- goto _out161;
-case 161:
- switch( (*p) ) {
- case 95: goto tr196;
- case 114: goto st162;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st162:
- if ( ++p == pe )
- goto _out162;
-case 162:
- switch( (*p) ) {
- case 95: goto tr196;
- case 105: goto st163;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st163:
- if ( ++p == pe )
- goto _out163;
-case 163:
- switch( (*p) ) {
- case 95: goto tr196;
- case 97: goto st164;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 98 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st164:
- if ( ++p == pe )
- goto _out164;
-case 164:
- switch( (*p) ) {
- case 95: goto tr196;
- case 98: goto st165;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st165:
- if ( ++p == pe )
- goto _out165;
-case 165:
- switch( (*p) ) {
- case 95: goto tr196;
- case 108: goto st166;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st166:
- if ( ++p == pe )
- goto _out166;
-case 166:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto tr328;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st167:
- if ( ++p == pe )
- goto _out167;
-case 167:
- switch( (*p) ) {
- case 95: goto tr196;
- case 104: goto st168;
- case 114: goto st170;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st168:
- if ( ++p == pe )
- goto _out168;
-case 168:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto st169;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st169:
- if ( ++p == pe )
- goto _out169;
-case 169:
- switch( (*p) ) {
- case 95: goto tr196;
- case 110: goto tr332;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st170:
- if ( ++p == pe )
- goto _out170;
-case 170:
- switch( (*p) ) {
- case 95: goto tr196;
- case 105: goto st171;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st171:
- if ( ++p == pe )
- goto _out171;
-case 171:
- switch( (*p) ) {
- case 95: goto tr196;
- case 116: goto st172;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st172:
- if ( ++p == pe )
- goto _out172;
-case 172:
- switch( (*p) ) {
- case 95: goto tr196;
- case 101: goto tr335;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr196;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr196;
- } else
- goto tr196;
- goto tr267;
-st173:
- if ( ++p == pe )
- goto _out173;
-case 173:
- if ( (*p) == 42 )
- goto tr336;
- goto tr214;
-tr210:
-#line 1 "rlscan.rl"
- {tokend = p+1;}
- goto st174;
-st174:
- if ( ++p == pe )
- goto _out174;
-case 174:
-#line 4653 "rlscan.cpp"
- if ( (*p) == 37 )
- goto st22;
- goto tr214;
-st22:
- if ( ++p == pe )
- goto _out22;
-case 22:
- if ( (*p) == 37 )
- goto tr40;
- goto tr39;
- }
- _out23: cs = 23; goto _out;
- _out24: cs = 24; goto _out;
- _out25: cs = 25; goto _out;
- _out1: cs = 1; goto _out;
- _out2: cs = 2; goto _out;
- _out26: cs = 26; goto _out;
- _out27: cs = 27; goto _out;
- _out28: cs = 28; goto _out;
- _out3: cs = 3; goto _out;
- _out4: cs = 4; goto _out;
- _out29: cs = 29; goto _out;
- _out5: cs = 5; goto _out;
- _out6: cs = 6; goto _out;
- _out7: cs = 7; goto _out;
- _out30: cs = 30; goto _out;
- _out31: cs = 31; goto _out;
- _out32: cs = 32; goto _out;
- _out33: cs = 33; goto _out;
- _out34: cs = 34; goto _out;
- _out35: cs = 35; goto _out;
- _out36: cs = 36; goto _out;
- _out37: cs = 37; goto _out;
- _out38: cs = 38; goto _out;
- _out39: cs = 39; goto _out;
- _out8: cs = 8; goto _out;
- _out9: cs = 9; goto _out;
- _out40: cs = 40; goto _out;
- _out10: cs = 10; goto _out;
- _out11: cs = 11; goto _out;
- _out41: cs = 41; goto _out;
- _out12: cs = 12; goto _out;
- _out13: cs = 13; goto _out;
- _out14: cs = 14; goto _out;
- _out42: cs = 42; goto _out;
- _out43: cs = 43; goto _out;
- _out15: cs = 15; goto _out;
- _out44: cs = 44; goto _out;
- _out45: cs = 45; goto _out;
- _out46: cs = 46; goto _out;
- _out47: cs = 47; goto _out;
- _out48: cs = 48; goto _out;
- _out49: cs = 49; goto _out;
- _out50: cs = 50; goto _out;
- _out51: cs = 51; goto _out;
- _out52: cs = 52; goto _out;
- _out53: cs = 53; goto _out;
- _out54: cs = 54; goto _out;
- _out55: cs = 55; goto _out;
- _out56: cs = 56; goto _out;
- _out57: cs = 57; goto _out;
- _out58: cs = 58; goto _out;
- _out59: cs = 59; goto _out;
- _out60: cs = 60; goto _out;
- _out61: cs = 61; goto _out;
- _out62: cs = 62; goto _out;
- _out63: cs = 63; goto _out;
- _out64: cs = 64; goto _out;
- _out65: cs = 65; goto _out;
- _out66: cs = 66; goto _out;
- _out67: cs = 67; goto _out;
- _out68: cs = 68; goto _out;
- _out69: cs = 69; goto _out;
- _out70: cs = 70; goto _out;
- _out71: cs = 71; goto _out;
- _out72: cs = 72; goto _out;
- _out73: cs = 73; goto _out;
- _out74: cs = 74; goto _out;
- _out75: cs = 75; goto _out;
- _out76: cs = 76; goto _out;
- _out77: cs = 77; goto _out;
- _out78: cs = 78; goto _out;
- _out79: cs = 79; goto _out;
- _out80: cs = 80; goto _out;
- _out81: cs = 81; goto _out;
- _out82: cs = 82; goto _out;
- _out83: cs = 83; goto _out;
- _out84: cs = 84; goto _out;
- _out85: cs = 85; goto _out;
- _out0: cs = 0; goto _out;
- _out86: cs = 86; goto _out;
- _out87: cs = 87; goto _out;
- _out88: cs = 88; goto _out;
- _out89: cs = 89; goto _out;
- _out90: cs = 90; goto _out;
- _out16: cs = 16; goto _out;
- _out91: cs = 91; goto _out;
- _out17: cs = 17; goto _out;
- _out92: cs = 92; goto _out;
- _out18: cs = 18; goto _out;
- _out93: cs = 93; goto _out;
- _out94: cs = 94; goto _out;
- _out95: cs = 95; goto _out;
- _out19: cs = 19; goto _out;
- _out20: cs = 20; goto _out;
- _out96: cs = 96; goto _out;
- _out97: cs = 97; goto _out;
- _out98: cs = 98; goto _out;
- _out99: cs = 99; goto _out;
- _out100: cs = 100; goto _out;
- _out21: cs = 21; goto _out;
- _out101: cs = 101; goto _out;
- _out102: cs = 102; goto _out;
- _out103: cs = 103; goto _out;
- _out104: cs = 104; goto _out;
- _out105: cs = 105; goto _out;
- _out106: cs = 106; goto _out;
- _out107: cs = 107; goto _out;
- _out108: cs = 108; goto _out;
- _out109: cs = 109; goto _out;
- _out110: cs = 110; goto _out;
- _out111: cs = 111; goto _out;
- _out112: cs = 112; goto _out;
- _out113: cs = 113; goto _out;
- _out114: cs = 114; goto _out;
- _out115: cs = 115; goto _out;
- _out116: cs = 116; goto _out;
- _out117: cs = 117; goto _out;
- _out118: cs = 118; goto _out;
- _out119: cs = 119; goto _out;
- _out120: cs = 120; goto _out;
- _out121: cs = 121; goto _out;
- _out122: cs = 122; goto _out;
- _out123: cs = 123; goto _out;
- _out124: cs = 124; goto _out;
- _out125: cs = 125; goto _out;
- _out126: cs = 126; goto _out;
- _out127: cs = 127; goto _out;
- _out128: cs = 128; goto _out;
- _out129: cs = 129; goto _out;
- _out130: cs = 130; goto _out;
- _out131: cs = 131; goto _out;
- _out132: cs = 132; goto _out;
- _out133: cs = 133; goto _out;
- _out134: cs = 134; goto _out;
- _out135: cs = 135; goto _out;
- _out136: cs = 136; goto _out;
- _out137: cs = 137; goto _out;
- _out138: cs = 138; goto _out;
- _out139: cs = 139; goto _out;
- _out140: cs = 140; goto _out;
- _out141: cs = 141; goto _out;
- _out142: cs = 142; goto _out;
- _out143: cs = 143; goto _out;
- _out144: cs = 144; goto _out;
- _out145: cs = 145; goto _out;
- _out146: cs = 146; goto _out;
- _out147: cs = 147; goto _out;
- _out148: cs = 148; goto _out;
- _out149: cs = 149; goto _out;
- _out150: cs = 150; goto _out;
- _out151: cs = 151; goto _out;
- _out152: cs = 152; goto _out;
- _out153: cs = 153; goto _out;
- _out154: cs = 154; goto _out;
- _out155: cs = 155; goto _out;
- _out156: cs = 156; goto _out;
- _out157: cs = 157; goto _out;
- _out158: cs = 158; goto _out;
- _out159: cs = 159; goto _out;
- _out160: cs = 160; goto _out;
- _out161: cs = 161; goto _out;
- _out162: cs = 162; goto _out;
- _out163: cs = 163; goto _out;
- _out164: cs = 164; goto _out;
- _out165: cs = 165; goto _out;
- _out166: cs = 166; goto _out;
- _out167: cs = 167; goto _out;
- _out168: cs = 168; goto _out;
- _out169: cs = 169; goto _out;
- _out170: cs = 170; goto _out;
- _out171: cs = 171; goto _out;
- _out172: cs = 172; goto _out;
- _out173: cs = 173; goto _out;
- _out174: cs = 174; goto _out;
- _out22: cs = 22; goto _out;
-
- _out: {}
- }
-#line 972 "rlscan.rl"
-
- /* Check if we failed. */
- if ( cs == rlscan_error ) {
- /* Machine failed before finding a token. I'm not yet sure if this
- * is reachable. */
- scan_error() << "scanner error" << endl;
- exit(1);
- }
-
- /* Decide if we need to preserve anything. */
- char *preserve = tokstart;
-
- /* Now set up the prefix. */
- if ( preserve == 0 )
- have = 0;
- else {
- /* There is data that needs to be shifted over. */
- have = pe - preserve;
- memmove( buf, preserve, have );
- unsigned int shiftback = preserve - buf;
- if ( tokstart != 0 )
- tokstart -= shiftback;
- tokend -= shiftback;
-
- preserve = buf;
- }
- }
-
- delete[] buf;
-}
-
-void scan( char *fileName, istream &input, ostream &output )
-{
-}
diff --git a/contrib/tools/ragel5/ragel/rlscan.h b/contrib/tools/ragel5/ragel/rlscan.h
deleted file mode 100644
index e6302aa4c9..0000000000
--- a/contrib/tools/ragel5/ragel/rlscan.h
+++ /dev/null
@@ -1,161 +0,0 @@
-/*
- * Copyright 2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _RLSCAN_H
-#define _RLSCAN_H
-
-#include <iostream>
-#include "rlscan.h"
-#include "vector.h"
-#include "rlparse.h"
-#include "parsedata.h"
-#include "avltree.h"
-#include "vector.h"
-
-using std::istream;
-using std::ostream;
-
-extern char *Parser_lelNames[];
-
-/* This is used for tracking the current stack of include file/machine pairs. It is
- * is used to detect and recursive include structure. */
-struct IncludeStackItem
-{
- IncludeStackItem(const char *fileName, char *sectionName )
- : fileName(fileName), sectionName(sectionName) {}
-
- const char *fileName;
- char *sectionName;
-};
-
-typedef Vector<IncludeStackItem> IncludeStack;
-
-inline char* resolvePath(const char* rel, const char* abs) {
- const size_t l1 = strlen(rel);
- const size_t l2 = strlen(abs);
- char* ret = new char[l1 + l2 + 1];
-
- const char* p = strrchr(abs, '/') + 1;
- const size_t l3 = p - abs;
-
- memcpy(ret, abs, l3);
- strcpy(ret + l3, rel);
-
- return ret;
-}
-
-struct Scanner
-{
- Scanner(const char *fileName, istream &input, ostream &output,
- Parser *inclToParser, char *inclSectionTarg,
- int includeDepth, bool importMachines )
- :
- fileName(fileName), input(input), output(output),
- inclToParser(inclToParser),
- inclSectionTarg(inclSectionTarg),
- includeDepth(includeDepth),
- importMachines(importMachines),
- cur_token(0),
- line(1), column(1), lastnl(0),
- parser(0), ignoreSection(false),
- parserExistsError(false),
- whitespaceOn(true),
- lastToken(0)
- {}
-
- bool recursiveInclude(const char *inclFileName, char *inclSectionName );
-
- char *prepareFileName( char *inclFileName, int len )
- {
- if (*inclFileName == '\"') {
- inclFileName[len - 1] = 0;
- ++inclFileName;
- }
- char* res = resolvePath(inclFileName, fileName); // there was a memory leek in the original too
- return res;
- }
-
- void init();
- void token( int type, char *start, char *end );
- void token( int type, char c );
- void token( int type );
- void processToken( int type, char *tokdata, int toklen );
- void directToParser( Parser *toParser, const char *tokFileName, int tokLine,
- int tokColumn, int type, char *tokdata, int toklen );
- void flushImport( );
- void importToken( int type, char *start, char *end );
- void pass( int token, char *start, char *end );
- void pass();
- void updateCol();
- void startSection();
- void endSection();
- void do_scan();
- bool active();
- ostream &scan_error();
-
- const char *fileName;
- istream &input;
- ostream &output;
- Parser *inclToParser;
- char *inclSectionTarg;
- int includeDepth;
- bool importMachines;
-
- /* For import parsing. */
- int tok_cs, tok_act;
- int *tok_tokstart, *tok_tokend;
- int cur_token;
- static const int max_tokens = 32;
- int token_data[max_tokens];
- char *token_strings[max_tokens];
- int token_lens[max_tokens];
-
- /* For section processing. */
- int cs;
- char *word, *lit;
- int word_len, lit_len;
-
- /* For character scanning. */
- int line;
- InputLoc sectionLoc;
- char *tokstart, *tokend;
- int column;
- char *lastnl;
-
- /* Set by machine statements, these persist from section to section
- * allowing for unnamed sections. */
- Parser *parser;
- bool ignoreSection;
- IncludeStack includeStack;
-
- /* This is set if ragel has already emitted an error stating that
- * no section name has been seen and thus no parser exists. */
- bool parserExistsError;
-
- /* This is for inline code. By default it is on. It goes off for
- * statements and values in inline blocks which are parsed. */
- bool whitespaceOn;
-
- /* Keeps a record of the previous token sent to the section parser. */
- int lastToken;
-};
-
-#endif /* _RLSCAN_H */
diff --git a/contrib/tools/ragel5/ragel/xmlcodegen.cpp b/contrib/tools/ragel5/ragel/xmlcodegen.cpp
deleted file mode 100644
index 021c97e87d..0000000000
--- a/contrib/tools/ragel5/ragel/xmlcodegen.cpp
+++ /dev/null
@@ -1,713 +0,0 @@
-/*
- * Copyright 2005, 2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include "ragel.h"
-#include "xmlcodegen.h"
-#include "parsedata.h"
-#include "fsmgraph.h"
-#include <string.h>
-
-using namespace std;
-
-XMLCodeGen::XMLCodeGen( char *fsmName, ParseData *pd, FsmAp *fsm,
- std::ostream &out )
-:
- fsmName(fsmName),
- pd(pd),
- fsm(fsm),
- out(out),
- nextActionTableId(0)
-{
-}
-
-
-void XMLCodeGen::writeActionList()
-{
- /* Determine which actions to write. */
- int nextActionId = 0;
- for ( ActionList::Iter act = pd->actionList; act.lte(); act++ ) {
- if ( act->numRefs() > 0 || act->numCondRefs > 0 )
- act->actionId = nextActionId++;
- }
-
- /* Write the list. */
- out << " <action_list length=\"" << nextActionId << "\">\n";
- for ( ActionList::Iter act = pd->actionList; act.lte(); act++ ) {
- if ( act->actionId >= 0 )
- writeAction( act );
- }
- out << " </action_list>\n";
-}
-
-void XMLCodeGen::writeActionTableList()
-{
- /* Must first order the action tables based on their id. */
- int numTables = nextActionTableId;
- RedActionTable **tables = new RedActionTable*[numTables];
- for ( ActionTableMap::Iter at = actionTableMap; at.lte(); at++ )
- tables[at->id] = at;
-
- out << " <action_table_list length=\"" << numTables << "\">\n";
- for ( int t = 0; t < numTables; t++ ) {
- out << " <action_table id=\"" << t << "\" length=\"" <<
- tables[t]->key.length() << "\">";
- for ( ActionTable::Iter atel = tables[t]->key; atel.lte(); atel++ ) {
- out << atel->value->actionId;
- if ( ! atel.last() )
- out << " ";
- }
- out << "</action_table>\n";
- }
- out << " </action_table_list>\n";
-
- delete[] tables;
-}
-
-void XMLCodeGen::reduceActionTables()
-{
- /* Reduce the actions tables to a set. */
- for ( StateList::Iter st = fsm->stateList; st.lte(); st++ ) {
- RedActionTable *actionTable = 0;
-
- /* Reduce To State Actions. */
- if ( st->toStateActionTable.length() > 0 ) {
- if ( actionTableMap.insert( st->toStateActionTable, &actionTable ) )
- actionTable->id = nextActionTableId++;
- }
-
- /* Reduce From State Actions. */
- if ( st->fromStateActionTable.length() > 0 ) {
- if ( actionTableMap.insert( st->fromStateActionTable, &actionTable ) )
- actionTable->id = nextActionTableId++;
- }
-
- /* Reduce EOF actions. */
- if ( st->eofActionTable.length() > 0 ) {
- if ( actionTableMap.insert( st->eofActionTable, &actionTable ) )
- actionTable->id = nextActionTableId++;
- }
-
- /* Loop the transitions and reduce their actions. */
- for ( TransList::Iter trans = st->outList; trans.lte(); trans++ ) {
- if ( trans->actionTable.length() > 0 ) {
- if ( actionTableMap.insert( trans->actionTable, &actionTable ) )
- actionTable->id = nextActionTableId++;
- }
- }
- }
-}
-
-void XMLCodeGen::appendTrans( TransListVect &outList, Key lowKey,
- Key highKey, TransAp *trans )
-{
- if ( trans->toState != 0 || trans->actionTable.length() > 0 )
- outList.append( TransEl( lowKey, highKey, trans ) );
-}
-
-void XMLCodeGen::writeKey( Key key )
-{
- if ( keyOps->isSigned )
- out << key.getVal();
- else
- out << (unsigned long) key.getVal();
-}
-
-void XMLCodeGen::writeTrans( Key lowKey, Key highKey, TransAp *trans )
-{
- /* First reduce the action. */
- RedActionTable *actionTable = 0;
- if ( trans->actionTable.length() > 0 )
- actionTable = actionTableMap.find( trans->actionTable );
-
- /* Write the transition. */
- out << " <t>";
- writeKey( lowKey );
- out << " ";
- writeKey( highKey );
-
- if ( trans->toState != 0 )
- out << " " << trans->toState->alg.stateNum;
- else
- out << " x";
-
- if ( actionTable != 0 )
- out << " " << actionTable->id;
- else
- out << " x";
- out << "</t>\n";
-}
-
-void XMLCodeGen::writeTransList( StateAp *state )
-{
- TransListVect outList;
-
- /* If there is only are no ranges the task is simple. */
- if ( state->outList.length() > 0 ) {
- /* Loop each source range. */
- for ( TransList::Iter trans = state->outList; trans.lte(); trans++ ) {
- /* Reduce the transition. If it reduced to anything then add it. */
- appendTrans( outList, trans->lowKey, trans->highKey, trans );
- }
- }
-
- out << " <trans_list length=\"" << outList.length() << "\">\n";
- for ( TransListVect::Iter tvi = outList; tvi.lte(); tvi++ )
- writeTrans( tvi->lowKey, tvi->highKey, tvi->value );
- out << " </trans_list>\n";
-}
-
-void XMLCodeGen::writeLmSwitch( InlineItem *item )
-{
- LongestMatch *longestMatch = item->longestMatch;
-
- out << "<lm_switch";
- if ( longestMatch->lmSwitchHandlesError )
- out << " handles_error=\"t\"";
- out << ">\n";
-
- for ( LmPartList::Iter lmi = *longestMatch->longestMatchList; lmi.lte(); lmi++ ) {
- if ( lmi->inLmSelect && lmi->action != 0 ) {
- /* Open the action. Write it with the context that sets up _p
- * when doing control flow changes from inside the machine. */
- out << " <sub_action id=\"" << lmi->longestMatchId << "\">";
- writeInlineList( lmi->action->inlineList, item );
- out << "</sub_action>\n";
- }
- }
-
- out << " </lm_switch><exec><get_tokend></get_tokend></exec>";
-}
-
-void XMLCodeGen::writeText( InlineItem *item )
-{
- if ( item->prev == 0 || item->prev->type != InlineItem::Text )
- out << "<text>";
- xmlEscapeHost( out, item->data, strlen(item->data) );
- if ( item->next == 0 || item->next->type != InlineItem::Text )
- out << "</text>";
-}
-
-void XMLCodeGen::writeCtrlFlow( InlineItem *item, InlineItem *context )
-{
- if ( context != 0 ) {
- out << "<sub_action>";
-
- switch ( context->type ) {
- case InlineItem::LmOnLast:
- out << "<exec><get_tokend></get_tokend></exec>";
- break;
- case InlineItem::LmOnNext:
- out << "<exec><get_tokend></get_tokend></exec>";
- break;
- case InlineItem::LmOnLagBehind:
- out << "<exec><get_tokend></get_tokend></exec>";
- break;
- case InlineItem::LmSwitch:
- out << "<exec><get_tokend></get_tokend></exec>";
- break;
- default: break;
- }
- }
-
- switch ( item->type ) {
- case InlineItem::Goto:
- writeGoto( item, context );
- break;
- case InlineItem::GotoExpr:
- writeGotoExpr( item, context );
- break;
- case InlineItem::Call:
- writeCall( item, context );
- break;
- case InlineItem::CallExpr:
- writeCallExpr( item, context );
- break;
- case InlineItem::Next:
- writeNext( item, context );
- break;
- case InlineItem::NextExpr:
- writeNextExpr( item, context );
- break;
- case InlineItem::Break:
- out << "<break></break>";
- break;
- case InlineItem::Ret:
- out << "<ret></ret>";
- break;
- default: break;
- }
-
- if ( context != 0 )
- out << "</sub_action>";
-}
-
-void XMLCodeGen::writePtrMod( InlineItem *item, InlineItem *context )
-{
- if ( context != 0 && ( context->type == InlineItem::LmOnNext ||
- context->type == InlineItem::LmOnLagBehind ||
- context->type == InlineItem::LmSwitch ) )
- {
- switch ( item->type ) {
- case InlineItem::Hold:
- out << "<holdte></holdte>";
- break;
- case InlineItem::Exec:
- writeActionExecTE( item );
- break;
- default: break;
- }
- }
- else {
- switch ( item->type ) {
- case InlineItem::Hold:
- out << "<hold></hold>";
- break;
- case InlineItem::Exec:
- writeActionExec( item );
- break;
- default: break;
- }
- }
-}
-
-
-void XMLCodeGen::writeGoto( InlineItem *item, InlineItem *context )
-{
- if ( pd->generatingSectionSubset )
- out << "<goto>-1</goto>";
- else {
- EntryMapEl *targ = fsm->entryPoints.find( item->nameTarg->id );
- out << "<goto>" << targ->value->alg.stateNum << "</goto>";
- }
-}
-
-void XMLCodeGen::writeCall( InlineItem *item, InlineItem *context )
-{
- if ( pd->generatingSectionSubset )
- out << "<call>-1</call>";
- else {
- EntryMapEl *targ = fsm->entryPoints.find( item->nameTarg->id );
- out << "<call>" << targ->value->alg.stateNum << "</call>";
- }
-}
-
-void XMLCodeGen::writeNext( InlineItem *item, InlineItem *context )
-{
- if ( pd->generatingSectionSubset )
- out << "<next>-1</next>";
- else {
- EntryMapEl *targ = fsm->entryPoints.find( item->nameTarg->id );
- out << "<next>" << targ->value->alg.stateNum << "</next>";
- }
-}
-
-void XMLCodeGen::writeGotoExpr( InlineItem *item, InlineItem *context )
-{
- out << "<goto_expr>";
- writeInlineList( item->children, 0 );
- out << "</goto_expr>";
-}
-
-void XMLCodeGen::writeCallExpr( InlineItem *item, InlineItem *context )
-{
- out << "<call_expr>";
- writeInlineList( item->children, 0 );
- out << "</call_expr>";
-}
-
-void XMLCodeGen::writeNextExpr( InlineItem *item, InlineItem *context )
-{
- out << "<next_expr>";
- writeInlineList( item->children, 0 );
- out << "</next_expr>";
-}
-
-void XMLCodeGen::writeEntry( InlineItem * item )
-{
- if ( pd->generatingSectionSubset )
- out << "<entry>-1</entry>";
- else {
- EntryMapEl *targ = fsm->entryPoints.find( item->nameTarg->id );
- out << "<entry>" << targ->value->alg.stateNum << "</entry>";
- }
-}
-
-void XMLCodeGen::writeActionExec( InlineItem *item )
-{
- out << "<exec>";
- writeInlineList( item->children, 0 );
- out << "</exec>";
-}
-
-void XMLCodeGen::writeActionExecTE( InlineItem *item )
-{
- out << "<execte>";
- writeInlineList( item->children, 0 );
- out << "</execte>";
-}
-
-void XMLCodeGen::writeLmOnLast( InlineItem *item )
-{
- out << "<set_tokend>1</set_tokend>";
- if ( item->longestMatchPart->action != 0 ) {
- out << "<sub_action>";
- writeInlineList( item->longestMatchPart->action->inlineList, item );
- out << "</sub_action>";
- }
- out << "<exec><get_tokend></get_tokend></exec>";
-}
-
-void XMLCodeGen::writeLmOnNext( InlineItem *item )
-{
- out << "<set_tokend>0</set_tokend>";
- if ( item->longestMatchPart->action != 0 ) {
- out << "<sub_action>";
- writeInlineList( item->longestMatchPart->action->inlineList, item );
- out << "</sub_action>";
- }
- out << "<exec><get_tokend></get_tokend></exec>";
-}
-
-void XMLCodeGen::writeLmOnLagBehind( InlineItem *item )
-{
- if ( item->longestMatchPart->action != 0 ) {
- out << "<sub_action>";
- writeInlineList( item->longestMatchPart->action->inlineList, item );
- out << "</sub_action>";
- }
- out << "<exec><get_tokend></get_tokend></exec>";
-}
-
-
-void XMLCodeGen::writeInlineList( InlineList *inlineList, InlineItem *context )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- switch ( item->type ) {
- case InlineItem::Text:
- writeText( item );
- break;
- case InlineItem::Goto: case InlineItem::GotoExpr:
- case InlineItem::Call: case InlineItem::CallExpr:
- case InlineItem::Next: case InlineItem::NextExpr:
- case InlineItem::Break: case InlineItem::Ret:
- writeCtrlFlow( item, context );
- break;
- case InlineItem::PChar:
- out << "<pchar></pchar>";
- break;
- case InlineItem::Char:
- out << "<char></char>";
- break;
- case InlineItem::Curs:
- out << "<curs></curs>";
- break;
- case InlineItem::Targs:
- out << "<targs></targs>";
- break;
- case InlineItem::Entry:
- writeEntry( item );
- break;
-
- case InlineItem::Hold:
- case InlineItem::Exec:
- writePtrMod( item, context );
- break;
-
- case InlineItem::LmSwitch:
- writeLmSwitch( item );
- break;
- case InlineItem::LmSetActId:
- out << "<set_act>" <<
- item->longestMatchPart->longestMatchId <<
- "</set_act>";
- break;
- case InlineItem::LmSetTokEnd:
- out << "<set_tokend>1</set_tokend>";
- break;
- case InlineItem::LmOnLast:
- writeLmOnLast( item );
- break;
- case InlineItem::LmOnNext:
- writeLmOnNext( item );
- break;
- case InlineItem::LmOnLagBehind:
- writeLmOnLagBehind( item );
- break;
- case InlineItem::LmInitAct:
- out << "<init_act></init_act>";
- break;
- case InlineItem::LmInitTokStart:
- out << "<init_tokstart></init_tokstart>";
- break;
- case InlineItem::LmSetTokStart:
- out << "<set_tokstart></set_tokstart>";
- break;
- }
- }
-}
-
-void XMLCodeGen::writeAction( Action *action )
-{
- out << " <action id=\"" << action->actionId << "\"";
- if ( action->name != 0 )
- out << " name=\"" << action->name << "\"";
- out << " line=\"" << action->loc.line << "\" col=\"" << action->loc.col << "\">";
- writeInlineList( action->inlineList, 0 );
- out << "</action>\n";
-}
-
-void xmlEscapeHost( std::ostream &out, char *data, int len )
-{
- char *end = data + len;
- while ( data != end ) {
- switch ( *data ) {
- case '<': out << "&lt;"; break;
- case '>': out << "&gt;"; break;
- case '&': out << "&amp;"; break;
- default: out << *data; break;
- }
- data += 1;
- }
-}
-
-void XMLCodeGen::writeStateActions( StateAp *state )
-{
- RedActionTable *toStateActions = 0;
- if ( state->toStateActionTable.length() > 0 )
- toStateActions = actionTableMap.find( state->toStateActionTable );
-
- RedActionTable *fromStateActions = 0;
- if ( state->fromStateActionTable.length() > 0 )
- fromStateActions = actionTableMap.find( state->fromStateActionTable );
-
- RedActionTable *eofActions = 0;
- if ( state->eofActionTable.length() > 0 )
- eofActions = actionTableMap.find( state->eofActionTable );
-
- if ( toStateActions != 0 || fromStateActions != 0 || eofActions != 0 ) {
- out << " <state_actions>";
- if ( toStateActions != 0 )
- out << toStateActions->id;
- else
- out << "x";
-
- if ( fromStateActions != 0 )
- out << " " << fromStateActions->id;
- else
- out << " x";
-
- if ( eofActions != 0 )
- out << " " << eofActions->id;
- else
- out << " x"; out << "</state_actions>\n";
- }
-}
-
-void XMLCodeGen::writeStateConditions( StateAp *state )
-{
- if ( state->stateCondList.length() > 0 ) {
- out << " <cond_list length=\"" << state->stateCondList.length() << "\">\n";
- for ( StateCondList::Iter scdi = state->stateCondList; scdi.lte(); scdi++ ) {
- out << " <c>";
- writeKey( scdi->lowKey );
- out << " ";
- writeKey( scdi->highKey );
- out << " ";
- out << scdi->condSpace->condSpaceId;
- out << "</c>\n";
- }
- out << " </cond_list>\n";
- }
-}
-
-void XMLCodeGen::writeStateList()
-{
- /* Write the list of states. */
- out << " <state_list length=\"" << fsm->stateList.length() << "\">\n";
- for ( StateList::Iter st = fsm->stateList; st.lte(); st++ ) {
- out << " <state id=\"" << st->alg.stateNum << "\"";
- if ( st->isFinState() )
- out << " final=\"t\"";
- out << ">\n";
-
- writeStateActions( st );
- writeStateConditions( st );
- writeTransList( st );
-
- out << " </state>\n";
-
- if ( !st.last() )
- out << "\n";
- }
- out << " </state_list>\n";
-}
-
-bool XMLCodeGen::writeNameInst( NameInst *nameInst )
-{
- bool written = false;
- if ( nameInst->parent != 0 )
- written = writeNameInst( nameInst->parent );
-
- if ( nameInst->name != 0 ) {
- if ( written )
- out << '_';
- out << nameInst->name;
- written = true;
- }
-
- return written;
-}
-
-void XMLCodeGen::writeEntryPoints()
-{
- /* List of entry points other than start state. */
- if ( fsm->entryPoints.length() > 0 || pd->lmRequiresErrorState ) {
- out << " <entry_points";
- if ( pd->lmRequiresErrorState )
- out << " error=\"t\"";
- out << ">\n";
- for ( EntryMap::Iter en = fsm->entryPoints; en.lte(); en++ ) {
- /* Get the name instantiation from nameIndex. */
- NameInst *nameInst = pd->nameIndex[en->key];
- StateAp *state = en->value;
- out << " <entry name=\"";
- writeNameInst( nameInst );
- out << "\">" << state->alg.stateNum << "</entry>\n";
- }
- out << " </entry_points>\n";
- }
-}
-
-void XMLCodeGen::writeMachine()
-{
- /* Open the machine. */
- out << " <machine>\n";
-
- /* Action tables. */
- reduceActionTables();
-
- writeActionList();
- writeActionTableList();
- writeConditions();
-
- /* Start state. */
- GraphDictEl *mainEl = pd->graphDict.find( mainMachine );
- if ( mainEl != 0 ) {
- out << " <start_state>" << fsm->startState->alg.stateNum <<
- "</start_state>\n";
- }
-
- /* Error state. */
- if ( fsm->errState != 0 ) {
- out << " <error_state>" << fsm->errState->alg.stateNum <<
- "</error_state>\n";
- }
-
- writeEntryPoints();
- writeStateList();
-
- out << " </machine>\n";
-}
-
-void XMLCodeGen::writeAlphType()
-{
- out << " <alphtype>" <<
- (keyOps->alphType - hostLang->hostTypes) << "</alphtype>\n";
-}
-
-void XMLCodeGen::writeGetKeyExpr()
-{
- out << " <getkey>";
- writeInlineList( pd->getKeyExpr, 0 );
- out << "</getkey>\n";
-}
-
-void XMLCodeGen::writeAccessExpr()
-{
- out << " <access>";
- writeInlineList( pd->accessExpr, 0 );
- out << "</access>\n";
-}
-
-void XMLCodeGen::writeCurStateExpr()
-{
- out << " <curstate>";
- writeInlineList( pd->curStateExpr, 0 );
- out << "</curstate>\n";
-}
-
-void XMLCodeGen::writeConditions()
-{
- if ( condData->condSpaceMap.length() > 0 ) {
- long nextCondSpaceId = 0;
- for ( CondSpaceMap::Iter cs = condData->condSpaceMap; cs.lte(); cs++ )
- cs->condSpaceId = nextCondSpaceId++;
-
- out << " <cond_space_list length=\"" << condData->condSpaceMap.length() << "\">\n";
- for ( CondSpaceMap::Iter cs = condData->condSpaceMap; cs.lte(); cs++ ) {
- out << " <cond_space id=\"" << cs->condSpaceId <<
- "\" length=\"" << cs->condSet.length() << "\">";
- writeKey( cs->baseKey );
- for ( CondSet::Iter csi = cs->condSet; csi.lte(); csi++ )
- out << " " << (*csi)->actionId;
- out << "</cond_space>\n";
- }
- out << " </cond_space_list>\n";
- }
-}
-
-void XMLCodeGen::writeExports()
-{
- if ( pd->exportList.length() > 0 ) {
- out << " <exports>\n";
- for ( ExportList::Iter exp = pd->exportList; exp.lte(); exp++ ) {
- out << " <ex name=\"" << exp->name << "\">";
- writeKey( exp->key );
- out << "</ex>\n";
- }
- out << " </exports>\n";
- }
-}
-
-void XMLCodeGen::writeXML()
-{
- /* Open the definition. */
- out << "<ragel_def name=\"" << fsmName << "\">\n";
- writeAlphType();
-
- if ( pd->getKeyExpr != 0 )
- writeGetKeyExpr();
-
- if ( pd->accessExpr != 0 )
- writeAccessExpr();
-
- if ( pd->curStateExpr != 0 )
- writeCurStateExpr();
-
- writeExports();
-
- writeMachine();
-
- out <<
- "</ragel_def>\n";
-}
-
diff --git a/contrib/tools/ragel5/ragel/xmlcodegen.h b/contrib/tools/ragel5/ragel/xmlcodegen.h
deleted file mode 100644
index 99b985395a..0000000000
--- a/contrib/tools/ragel5/ragel/xmlcodegen.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright 2005, 2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _XMLDOTGEN_H
-#define _XMLDOTGEN_H
-
-#include <iostream>
-#include "avltree.h"
-#include "fsmgraph.h"
-#include "parsedata.h"
-
-/* Forwards. */
-struct TransAp;
-struct FsmAp;
-struct ParseData;
-
-struct RedActionTable
-:
- public AvlTreeEl<RedActionTable>
-{
- RedActionTable( const ActionTable &key )
- :
- key(key),
- id(0)
- { }
-
- const ActionTable &getKey()
- { return key; }
-
- ActionTable key;
- int id;
-};
-
-typedef AvlTree<RedActionTable, ActionTable, CmpActionTable> ActionTableMap;
-
-struct NextRedTrans
-{
- Key lowKey, highKey;
- TransAp *trans;
- TransAp *next;
-
- void load() {
- if ( trans != 0 ) {
- next = trans->next;
- lowKey = trans->lowKey;
- highKey = trans->highKey;
- }
- }
-
- NextRedTrans( TransAp *t ) {
- trans = t;
- load();
- }
-
- void increment() {
- trans = next;
- load();
- }
-};
-
-class XMLCodeGen
-{
-public:
- XMLCodeGen( char *fsmName, ParseData *pd, FsmAp *fsm, std::ostream &out );
- void writeXML( );
-
-private:
- void appendTrans( TransListVect &outList, Key lowKey, Key highKey, TransAp *trans );
- void writeStateActions( StateAp *state );
- void writeStateList();
- void writeStateConditions( StateAp *state );
-
- void writeKey( Key key );
- void writeText( InlineItem *item );
- void writeCtrlFlow( InlineItem *item, InlineItem *context );
- void writePtrMod( InlineItem *item, InlineItem *context );
- void writeGoto( InlineItem *item, InlineItem *context );
- void writeGotoExpr( InlineItem *item, InlineItem *context );
- void writeCall( InlineItem *item, InlineItem *context );
- void writeCallExpr( InlineItem *item, InlineItem *context );
- void writeNext( InlineItem *item, InlineItem *context );
- void writeNextExpr( InlineItem *item, InlineItem *context );
- void writeEntry( InlineItem *item );
- void writeLmSetActId( InlineItem *item );
- void writeLmOnLast( InlineItem *item );
- void writeLmOnNext( InlineItem *item );
- void writeLmOnLagBehind( InlineItem *item );
-
- void writeExports();
- bool writeNameInst( NameInst *nameInst );
- void writeEntryPoints();
- void writeGetKeyExpr();
- void writeAccessExpr();
- void writeCurStateExpr();
- void writeConditions();
- void writeInlineList( InlineList *inlineList, InlineItem *context );
- void writeAlphType();
- void writeActionList();
- void writeActionTableList();
- void reduceTrans( TransAp *trans );
- void reduceActionTables();
- void writeTransList( StateAp *state );
- void writeTrans( Key lowKey, Key highKey, TransAp *defTrans );
- void writeAction( Action *action );
- void writeLmSwitch( InlineItem *item );
- void writeMachine();
- void writeActionExec( InlineItem *item );
- void writeActionExecTE( InlineItem *item );
-
- char *fsmName;
- ParseData *pd;
- FsmAp *fsm;
- std::ostream &out;
- ActionTableMap actionTableMap;
- int nextActionTableId;
-};
-
-
-#endif /* _XMLDOTGEN_H */
diff --git a/contrib/tools/ragel5/ragel/ya.make b/contrib/tools/ragel5/ragel/ya.make
deleted file mode 100644
index 6966321b7c..0000000000
--- a/contrib/tools/ragel5/ragel/ya.make
+++ /dev/null
@@ -1,26 +0,0 @@
-PROGRAM(ragel5)
-
-NO_UTIL()
-NO_COMPILER_WARNINGS()
-
-PEERDIR(
- contrib/tools/ragel5/aapl
- contrib/tools/ragel5/common
-)
-
-SRCS(
- fsmap.cpp
- fsmattach.cpp
- fsmbase.cpp
- fsmgraph.cpp
- fsmmin.cpp
- fsmstate.cpp
- main.cpp
- parsedata.cpp
- parsetree.cpp
- rlparse.cpp
- rlscan.cpp
- xmlcodegen.cpp
-)
-
-END()
diff --git a/contrib/tools/ragel5/redfsm/gendata.cpp b/contrib/tools/ragel5/redfsm/gendata.cpp
deleted file mode 100644
index b0893ccdc2..0000000000
--- a/contrib/tools/ragel5/redfsm/gendata.cpp
+++ /dev/null
@@ -1,717 +0,0 @@
-/*
- * Copyright 2005-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "gendata.h"
-#include <iostream>
-
-using std::cerr;
-using std::endl;
-
-CodeGenData::CodeGenData( ostream &out )
-:
- sourceFileName(0),
- fsmName(0),
- out(out),
- redFsm(0),
- allActions(0),
- allActionTables(0),
- allConditions(0),
- allCondSpaces(0),
- allStates(0),
- nameIndex(0),
- startState(-1),
- errState(-1),
- getKeyExpr(0),
- accessExpr(0),
- curStateExpr(0),
- wantComplete(0),
- hasLongestMatch(false),
- codeGenErrCount(0),
- hasEnd(true),
- dataPrefix(true),
- writeFirstFinal(true),
- writeErr(true)
-{}
-
-
-void CodeGenData::createMachine()
-{
- redFsm = new RedFsmAp();
-}
-
-void CodeGenData::initActionList( unsigned long length )
-{
- allActions = new Action[length];
- for ( unsigned long a = 0; a < length; a++ )
- actionList.append( allActions+a );
-}
-
-void CodeGenData::newAction( int anum, char *name, int line,
- int col, InlineList *inlineList )
-{
- allActions[anum].actionId = anum;
- allActions[anum].name = name;
- allActions[anum].loc.line = line;
- allActions[anum].loc.col = col;
- allActions[anum].inlineList = inlineList;
-}
-
-void CodeGenData::initActionTableList( unsigned long length )
-{
- allActionTables = new RedAction[length];
-}
-
-void CodeGenData::initStateList( unsigned long length )
-{
- allStates = new RedStateAp[length];
- for ( unsigned long s = 0; s < length; s++ )
- redFsm->stateList.append( allStates+s );
-
- /* We get the start state as an offset, set the pointer now. */
- if ( startState >= 0 )
- redFsm->startState = allStates + startState;
- if ( errState >= 0 )
- redFsm->errState = allStates + errState;
- for ( EntryIdVect::Iter en = entryPointIds; en.lte(); en++ )
- redFsm->entryPoints.insert( allStates + *en );
-
- /* The nextStateId is no longer used to assign state ids (they come in set
- * from the frontend now), however generation code still depends on it.
- * Should eventually remove this variable. */
- redFsm->nextStateId = redFsm->stateList.length();
-}
-
-void CodeGenData::setStartState( unsigned long startState )
-{
- this->startState = startState;
-}
-
-void CodeGenData::setErrorState( unsigned long errState )
-{
- this->errState = errState;
-}
-
-void CodeGenData::addEntryPoint( char *name, unsigned long entryState )
-{
- entryPointIds.append( entryState );
- entryPointNames.append( name );
-}
-
-void CodeGenData::initTransList( int snum, unsigned long length )
-{
- /* Could preallocate the out range to save time growing it. For now do
- * nothing. */
-}
-
-void CodeGenData::newTrans( int snum, int tnum, Key lowKey,
- Key highKey, long targ, long action )
-{
- /* Get the current state and range. */
- RedStateAp *curState = allStates + snum;
- RedTransList &destRange = curState->outRange;
-
- if ( curState == redFsm->errState )
- return;
-
- /* Make the new transitions. */
- RedStateAp *targState = targ >= 0 ? (allStates + targ) :
- wantComplete ? redFsm->getErrorState() : 0;
- RedAction *actionTable = action >= 0 ? (allActionTables + action) : 0;
- RedTransAp *trans = redFsm->allocateTrans( targState, actionTable );
- RedTransEl transEl( lowKey, highKey, trans );
-
- if ( wantComplete ) {
- /* If the machine is to be complete then we need to fill any gaps with
- * the error transitions. */
- if ( destRange.length() == 0 ) {
- /* Range is currently empty. */
- if ( keyOps->minKey < lowKey ) {
- /* The first range doesn't start at the low end. */
- Key fillHighKey = lowKey;
- fillHighKey.decrement();
-
- /* Create the filler with the state's error transition. */
- RedTransEl newTel( keyOps->minKey, fillHighKey, redFsm->getErrorTrans() );
- destRange.append( newTel );
- }
- }
- else {
- /* The range list is not empty, get the the last range. */
- RedTransEl *last = &destRange[destRange.length()-1];
- Key nextKey = last->highKey;
- nextKey.increment();
- if ( nextKey < lowKey ) {
- /* There is a gap to fill. Make the high key. */
- Key fillHighKey = lowKey;
- fillHighKey.decrement();
-
- /* Create the filler with the state's error transtion. */
- RedTransEl newTel( nextKey, fillHighKey, redFsm->getErrorTrans() );
- destRange.append( newTel );
- }
- }
- }
-
- /* Filler taken care of. Append the range. */
- destRange.append( RedTransEl( lowKey, highKey, trans ) );
-}
-
-void CodeGenData::finishTransList( int snum )
-{
- /* Get the current state and range. */
- RedStateAp *curState = allStates + snum;
- RedTransList &destRange = curState->outRange;
-
- if ( curState == redFsm->errState )
- return;
-
- /* If building a complete machine we may need filler on the end. */
- if ( wantComplete ) {
- /* Check if there are any ranges already. */
- if ( destRange.length() == 0 ) {
- /* Fill with the whole alphabet. */
- /* Add the range on the lower and upper bound. */
- RedTransEl newTel( keyOps->minKey, keyOps->maxKey, redFsm->getErrorTrans() );
- destRange.append( newTel );
- }
- else {
- /* Get the last and check for a gap on the end. */
- RedTransEl *last = &destRange[destRange.length()-1];
- if ( last->highKey < keyOps->maxKey ) {
- /* Make the high key. */
- Key fillLowKey = last->highKey;
- fillLowKey.increment();
-
- /* Create the new range with the error trans and append it. */
- RedTransEl newTel( fillLowKey, keyOps->maxKey, redFsm->getErrorTrans() );
- destRange.append( newTel );
- }
- }
- }
-}
-
-void CodeGenData::setId( int snum, int id )
-{
- RedStateAp *curState = allStates + snum;
- curState->id = id;
-}
-
-void CodeGenData::setFinal( int snum )
-{
- RedStateAp *curState = allStates + snum;
- curState->isFinal = true;
-}
-
-
-void CodeGenData::setStateActions( int snum, long toStateAction,
- long fromStateAction, long eofAction )
-{
- RedStateAp *curState = allStates + snum;
- if ( toStateAction >= 0 )
- curState->toStateAction = allActionTables + toStateAction;
- if ( fromStateAction >= 0 )
- curState->fromStateAction = allActionTables + fromStateAction;
- if ( eofAction >= 0 )
- curState->eofAction = allActionTables + eofAction;
-}
-
-void CodeGenData::resolveTargetStates( InlineList *inlineList )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- switch ( item->type ) {
- case InlineItem::Goto: case InlineItem::Call:
- case InlineItem::Next: case InlineItem::Entry:
- item->targState = allStates + item->targId;
- break;
- default:
- break;
- }
-
- if ( item->children != 0 )
- resolveTargetStates( item->children );
- }
-}
-
-void CodeGenData::closeMachine()
-{
- for ( ActionList::Iter a = actionList; a.lte(); a++ )
- resolveTargetStates( a->inlineList );
-
- /* Note that even if we want a complete graph we do not give the error
- * state a default transition. All machines break out of the processing
- * loop when in the error state. */
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- for ( StateCondList::Iter sci = st->stateCondList; sci.lte(); sci++ )
- st->stateCondVect.append( sci );
- }
-}
-
-
-bool CodeGenData::setAlphType( char *data )
-{
- /* FIXME: This should validate the alphabet type selection. */
- HostType *alphType = hostLang->hostTypes + atoi(data);
- thisKeyOps.setAlphType( alphType );
- return true;
-}
-
-void CodeGenData::initCondSpaceList( ulong length )
-{
- allCondSpaces = new CondSpace[length];
- for ( ulong c = 0; c < length; c++ )
- condSpaceList.append( allCondSpaces + c );
-}
-
-void CodeGenData::newCondSpace( int cnum, int condSpaceId, Key baseKey )
-{
- CondSpace *cond = allCondSpaces + cnum;
- cond->condSpaceId = condSpaceId;
- cond->baseKey = baseKey;
-}
-
-void CodeGenData::condSpaceItem( int cnum, long condActionId )
-{
- CondSpace *cond = allCondSpaces + cnum;
- cond->condSet.append( allActions + condActionId );
-}
-
-void CodeGenData::initStateCondList( int snum, ulong length )
-{
- /* Could preallocate these, as we could with transitions. */
-}
-
-void CodeGenData::addStateCond( int snum, Key lowKey, Key highKey, long condNum )
-{
- RedStateAp *curState = allStates + snum;
-
- /* Create the new state condition. */
- StateCond *stateCond = new StateCond;
- stateCond->lowKey = lowKey;
- stateCond->highKey = highKey;
-
- /* Assign it a cond space. */
- CondSpace *condSpace = allCondSpaces + condNum;
- stateCond->condSpace = condSpace;
-
- curState->stateCondList.append( stateCond );
-}
-
-
-CondSpace *CodeGenData::findCondSpace( Key lowKey, Key highKey )
-{
- for ( CondSpaceList::Iter cs = condSpaceList; cs.lte(); cs++ ) {
- Key csHighKey = cs->baseKey;
- csHighKey += keyOps->alphSize() * (1 << cs->condSet.length());
-
- if ( lowKey >= cs->baseKey && highKey <= csHighKey )
- return cs;
- }
- return 0;
-}
-
-Condition *CodeGenData::findCondition( Key key )
-{
- for ( ConditionList::Iter cond = conditionList; cond.lte(); cond++ ) {
- Key upperKey = cond->baseKey + (1 << cond->condSet.length());
- if ( cond->baseKey <= key && key <= upperKey )
- return cond;
- }
- return 0;
-}
-
-Key CodeGenData::findMaxKey()
-{
- Key maxKey = keyOps->maxKey;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- assert( st->outSingle.length() == 0 );
- assert( st->defTrans == 0 );
-
- long rangeLen = st->outRange.length();
- if ( rangeLen > 0 ) {
- Key highKey = st->outRange[rangeLen-1].highKey;
- if ( highKey > maxKey )
- maxKey = highKey;
- }
- }
- return maxKey;
-}
-
-void CodeGenData::findFinalActionRefs()
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Rerence count out of single transitions. */
- for ( RedTransList::Iter rtel = st->outSingle; rtel.lte(); rtel++ ) {
- if ( rtel->value->action != 0 ) {
- rtel->value->action->numTransRefs += 1;
- for ( ActionTable::Iter item = rtel->value->action->key; item.lte(); item++ )
- item->value->numTransRefs += 1;
- }
- }
-
- /* Reference count out of range transitions. */
- for ( RedTransList::Iter rtel = st->outRange; rtel.lte(); rtel++ ) {
- if ( rtel->value->action != 0 ) {
- rtel->value->action->numTransRefs += 1;
- for ( ActionTable::Iter item = rtel->value->action->key; item.lte(); item++ )
- item->value->numTransRefs += 1;
- }
- }
-
- /* Reference count default transition. */
- if ( st->defTrans != 0 && st->defTrans->action != 0 ) {
- st->defTrans->action->numTransRefs += 1;
- for ( ActionTable::Iter item = st->defTrans->action->key; item.lte(); item++ )
- item->value->numTransRefs += 1;
- }
-
- /* Reference count to state actions. */
- if ( st->toStateAction != 0 ) {
- st->toStateAction->numToStateRefs += 1;
- for ( ActionTable::Iter item = st->toStateAction->key; item.lte(); item++ )
- item->value->numToStateRefs += 1;
- }
-
- /* Reference count from state actions. */
- if ( st->fromStateAction != 0 ) {
- st->fromStateAction->numFromStateRefs += 1;
- for ( ActionTable::Iter item = st->fromStateAction->key; item.lte(); item++ )
- item->value->numFromStateRefs += 1;
- }
-
- /* Reference count EOF actions. */
- if ( st->eofAction != 0 ) {
- st->eofAction->numEofRefs += 1;
- for ( ActionTable::Iter item = st->eofAction->key; item.lte(); item++ )
- item->value->numEofRefs += 1;
- }
- }
-}
-
-void CodeGenData::analyzeAction( Action *act, InlineList *inlineList )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- /* Only consider actions that are referenced. */
- if ( act->numRefs() > 0 ) {
- if ( item->type == InlineItem::Goto || item->type == InlineItem::GotoExpr )
- redFsm->bAnyActionGotos = true;
- else if ( item->type == InlineItem::Call || item->type == InlineItem::CallExpr )
- redFsm->bAnyActionCalls = true;
- else if ( item->type == InlineItem::Ret )
- redFsm->bAnyActionRets = true;
- }
-
- /* Check for various things in regular actions. */
- if ( act->numTransRefs > 0 || act->numToStateRefs > 0 || act->numFromStateRefs > 0 ) {
- /* Any returns in regular actions? */
- if ( item->type == InlineItem::Ret )
- redFsm->bAnyRegActionRets = true;
-
- /* Any next statements in the regular actions? */
- if ( item->type == InlineItem::Next || item->type == InlineItem::NextExpr )
- redFsm->bAnyRegNextStmt = true;
-
- /* Any by value control in regular actions? */
- if ( item->type == InlineItem::CallExpr || item->type == InlineItem::GotoExpr )
- redFsm->bAnyRegActionByValControl = true;
-
- /* Any references to the current state in regular actions? */
- if ( item->type == InlineItem::Curs )
- redFsm->bAnyRegCurStateRef = true;
-
- if ( item->type == InlineItem::Break )
- redFsm->bAnyRegBreak = true;
-
- if ( item->type == InlineItem::LmSwitch && item->handlesError )
- redFsm->bAnyLmSwitchError = true;
- }
-
- if ( item->children != 0 )
- analyzeAction( act, item->children );
- }
-}
-
-void CodeGenData::analyzeActionList( RedAction *redAct, InlineList *inlineList )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- /* Any next statements in the action table? */
- if ( item->type == InlineItem::Next || item->type == InlineItem::NextExpr )
- redAct->bAnyNextStmt = true;
-
- /* Any references to the current state. */
- if ( item->type == InlineItem::Curs )
- redAct->bAnyCurStateRef = true;
-
- if ( item->type == InlineItem::Break )
- redAct->bAnyBreakStmt = true;
-
- if ( item->children != 0 )
- analyzeActionList( redAct, item->children );
- }
-}
-
-/* Assign ids to referenced actions. */
-void CodeGenData::assignActionIds()
-{
- int nextActionId = 0;
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Only ever interested in referenced actions. */
- if ( act->numRefs() > 0 )
- act->actionId = nextActionId++;
- }
-}
-
-void CodeGenData::setValueLimits()
-{
- redFsm->maxSingleLen = 0;
- redFsm->maxRangeLen = 0;
- redFsm->maxKeyOffset = 0;
- redFsm->maxIndexOffset = 0;
- redFsm->maxActListId = 0;
- redFsm->maxActionLoc = 0;
- redFsm->maxActArrItem = 0;
- redFsm->maxSpan = 0;
- redFsm->maxCondSpan = 0;
- redFsm->maxFlatIndexOffset = 0;
- redFsm->maxCondOffset = 0;
- redFsm->maxCondLen = 0;
- redFsm->maxCondSpaceId = 0;
- redFsm->maxCondIndexOffset = 0;
-
- /* In both of these cases the 0 index is reserved for no value, so the max
- * is one more than it would be if they started at 0. */
- redFsm->maxIndex = redFsm->transSet.length();
- redFsm->maxCond = condSpaceList.length();
-
- /* The nextStateId - 1 is the last state id assigned. */
- redFsm->maxState = redFsm->nextStateId - 1;
-
- for ( CondSpaceList::Iter csi = condSpaceList; csi.lte(); csi++ ) {
- if ( csi->condSpaceId > redFsm->maxCondSpaceId )
- redFsm->maxCondSpaceId = csi->condSpaceId;
- }
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Maximum cond length. */
- if ( st->stateCondList.length() > redFsm->maxCondLen )
- redFsm->maxCondLen = st->stateCondList.length();
-
- /* Maximum single length. */
- if ( st->outSingle.length() > redFsm->maxSingleLen )
- redFsm->maxSingleLen = st->outSingle.length();
-
- /* Maximum range length. */
- if ( st->outRange.length() > redFsm->maxRangeLen )
- redFsm->maxRangeLen = st->outRange.length();
-
- /* The key offset index offset for the state after last is not used, skip it.. */
- if ( ! st.last() ) {
- redFsm->maxCondOffset += st->stateCondList.length();
- redFsm->maxKeyOffset += st->outSingle.length() + st->outRange.length()*2;
- redFsm->maxIndexOffset += st->outSingle.length() + st->outRange.length() + 1;
- }
-
- /* Max cond span. */
- if ( st->condList != 0 ) {
- unsigned long long span = keyOps->span( st->condLowKey, st->condHighKey );
- if ( span > redFsm->maxCondSpan )
- redFsm->maxCondSpan = span;
- }
-
- /* Max key span. */
- if ( st->transList != 0 ) {
- unsigned long long span = keyOps->span( st->lowKey, st->highKey );
- if ( span > redFsm->maxSpan )
- redFsm->maxSpan = span;
- }
-
- /* Max cond index offset. */
- if ( ! st.last() ) {
- if ( st->condList != 0 )
- redFsm->maxCondIndexOffset += keyOps->span( st->condLowKey, st->condHighKey );
- }
-
- /* Max flat index offset. */
- if ( ! st.last() ) {
- if ( st->transList != 0 )
- redFsm->maxFlatIndexOffset += keyOps->span( st->lowKey, st->highKey );
- redFsm->maxFlatIndexOffset += 1;
- }
- }
-
- for ( ActionTableMap::Iter at = redFsm->actionMap; at.lte(); at++ ) {
- /* Maximum id of action lists. */
- if ( at->actListId+1 > redFsm->maxActListId )
- redFsm->maxActListId = at->actListId+1;
-
- /* Maximum location of items in action array. */
- if ( at->location+1 > redFsm->maxActionLoc )
- redFsm->maxActionLoc = at->location+1;
-
- /* Maximum values going into the action array. */
- if ( at->key.length() > redFsm->maxActArrItem )
- redFsm->maxActArrItem = at->key.length();
- for ( ActionTable::Iter item = at->key; item.lte(); item++ ) {
- if ( item->value->actionId > redFsm->maxActArrItem )
- redFsm->maxActArrItem = item->value->actionId;
- }
- }
-}
-
-
-
-/* Gather various info on the machine. */
-void CodeGenData::analyzeMachine()
-{
- /* Find the true count of action references. */
- findFinalActionRefs();
-
- /* Check if there are any calls in action code. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Record the occurrence of various kinds of actions. */
- if ( act->numToStateRefs > 0 )
- redFsm->bAnyToStateActions = true;
- if ( act->numFromStateRefs > 0 )
- redFsm->bAnyFromStateActions = true;
- if ( act->numEofRefs > 0 )
- redFsm->bAnyEofActions = true;
- if ( act->numTransRefs > 0 )
- redFsm->bAnyRegActions = true;
-
- /* Recurse through the action's parse tree looking for various things. */
- analyzeAction( act, act->inlineList );
- }
-
- /* Analyze reduced action lists. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- for ( ActionTable::Iter act = redAct->key; act.lte(); act++ )
- analyzeActionList( redAct, act->value->inlineList );
- }
-
- /* Find states that have transitions with actions that have next
- * statements. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Check any actions out of outSinge. */
- for ( RedTransList::Iter rtel = st->outSingle; rtel.lte(); rtel++ ) {
- if ( rtel->value->action != 0 && rtel->value->action->anyCurStateRef() )
- st->bAnyRegCurStateRef = true;
- }
-
- /* Check any actions out of outRange. */
- for ( RedTransList::Iter rtel = st->outRange; rtel.lte(); rtel++ ) {
- if ( rtel->value->action != 0 && rtel->value->action->anyCurStateRef() )
- st->bAnyRegCurStateRef = true;
- }
-
- /* Check any action out of default. */
- if ( st->defTrans != 0 && st->defTrans->action != 0 &&
- st->defTrans->action->anyCurStateRef() )
- st->bAnyRegCurStateRef = true;
-
- if ( st->stateCondList.length() > 0 )
- redFsm->bAnyConditions = true;
- }
-
- /* Assign ids to actions that are referenced. */
- assignActionIds();
-
- /* Set the maximums of various values used for deciding types. */
- setValueLimits();
-}
-
-void CodeGenData::writeStatement( InputLoc &loc, int nargs, char **args )
-{
- /* FIXME: This should be moved to the virtual functions in the code
- * generators.
- *
- * Force a newline. */
- out << "\n";
- genLineDirective( out );
-
- if ( strcmp( args[0], "data" ) == 0 ) {
- for ( int i = 1; i < nargs; i++ ) {
- if ( strcmp( args[i], "noerror" ) == 0 )
- writeErr = false;
- else if ( strcmp( args[i], "noprefix" ) == 0 )
- dataPrefix = false;
- else if ( strcmp( args[i], "nofinal" ) == 0 )
- writeFirstFinal = false;
- else {
- source_warning(loc) << "unrecognized write option \"" <<
- args[i] << "\"" << endl;
- }
- }
- writeData();
- }
- else if ( strcmp( args[0], "init" ) == 0 ) {
- for ( int i = 1; i < nargs; i++ ) {
- source_warning(loc) << "unrecognized write option \"" <<
- args[i] << "\"" << endl;
- }
- writeInit();
- }
- else if ( strcmp( args[0], "exec" ) == 0 ) {
- for ( int i = 1; i < nargs; i++ ) {
- if ( strcmp( args[i], "noend" ) == 0 )
- hasEnd = false;
- else {
- source_warning(loc) << "unrecognized write option \"" <<
- args[i] << "\"" << endl;
- }
- }
- writeExec();
- }
- else if ( strcmp( args[0], "eof" ) == 0 ) {
- for ( int i = 1; i < nargs; i++ ) {
- source_warning(loc) << "unrecognized write option \"" <<
- args[i] << "\"" << endl;
- }
- writeEOF();
- }
- else if ( strcmp( args[0], "exports" ) == 0 ) {
- for ( int i = 1; i < nargs; i++ ) {
- source_warning(loc) << "unrecognized write option \"" <<
- args[i] << "\"" << endl;
- }
- writeExports();
- }
- else {
- /* EMIT An error here. */
- source_error(loc) << "unrecognized write command \"" <<
- args[0] << "\"" << endl;
- }
-}
-
-ostream &CodeGenData::source_warning( const InputLoc &loc )
-{
- cerr << sourceFileName << ":" << loc.line << ":" << loc.col << ": warning: ";
- return cerr;
-}
-
-ostream &CodeGenData::source_error( const InputLoc &loc )
-{
- codeGenErrCount += 1;
- assert( sourceFileName != 0 );
- cerr << sourceFileName << ":" << loc.line << ":" << loc.col << ": ";
- return cerr;
-}
-
-
diff --git a/contrib/tools/ragel5/redfsm/gendata.h b/contrib/tools/ragel5/redfsm/gendata.h
deleted file mode 100644
index 855e0710a7..0000000000
--- a/contrib/tools/ragel5/redfsm/gendata.h
+++ /dev/null
@@ -1,167 +0,0 @@
-/*
- * Copyright 2005-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _GENDATA_H
-#define _GENDATA_H
-
-#include <iostream>
-#include "redfsm.h"
-#include "common.h"
-
-using std::ostream;
-
-struct NameInst;
-typedef DList<Action> ActionList;
-
-typedef unsigned long ulong;
-
-struct FsmCodeGen;
-struct CodeGenData;
-
-typedef AvlMap<char *, CodeGenData*, CmpStr> CodeGenMap;
-typedef AvlMapEl<char *, CodeGenData*> CodeGenMapEl;
-
-/*
- * The interface to the parser
- */
-
-/* These functions must be implemented by the code generation executable.
- * The openOutput function is invoked when the root element is opened. The
- * makeCodeGen function is invoked when a ragel_def element is opened. */
-std::ostream *openOutput( char *inputFile );
-CodeGenData *makeCodeGen( char *sourceFileName,
- char *fsmName, ostream &out, bool wantComplete );
-
-void lineDirective( ostream &out, char *fileName, int line );
-void genLineDirective( ostream &out );
-
-/*********************************/
-
-struct CodeGenData
-{
- /*
- * The interface to the code generator.
- */
- virtual void finishRagelDef() {}
-
- /* These are invoked by the corresponding write statements. */
- virtual void writeData() {};
- virtual void writeInit() {};
- virtual void writeExec() {};
- virtual void writeEOF() {};
- virtual void writeExports() {};
-
- /* This can also be overwridden to modify the processing of write
- * statements. */
- virtual void writeStatement( InputLoc &loc, int nargs, char **args );
-
- /********************/
-
- CodeGenData( ostream &out );
- virtual ~CodeGenData() {}
-
- /*
- * Collecting the machine.
- */
-
- char *sourceFileName;
- char *fsmName;
- ostream &out;
- RedFsmAp *redFsm;
- Action *allActions;
- RedAction *allActionTables;
- Condition *allConditions;
- CondSpace *allCondSpaces;
- RedStateAp *allStates;
- NameInst **nameIndex;
- int startState;
- int errState;
- ActionList actionList;
- ConditionList conditionList;
- CondSpaceList condSpaceList;
- InlineList *getKeyExpr;
- InlineList *accessExpr;
- InlineList *curStateExpr;
- KeyOps thisKeyOps;
- bool wantComplete;
- EntryIdVect entryPointIds;
- EntryNameVect entryPointNames;
- bool hasLongestMatch;
- int codeGenErrCount;
- ExportList exportList;
-
- /* Write options. */
- bool hasEnd;
- bool dataPrefix;
- bool writeFirstFinal;
- bool writeErr;
-
- void createMachine();
- void initActionList( unsigned long length );
- void newAction( int anum, char *name, int line, int col, InlineList *inlineList );
- void initActionTableList( unsigned long length );
- void initStateList( unsigned long length );
- void setStartState( unsigned long startState );
- void setErrorState( unsigned long errState );
- void addEntryPoint( char *name, unsigned long entryState );
- void setId( int snum, int id );
- void setFinal( int snum );
- void initTransList( int snum, unsigned long length );
- void newTrans( int snum, int tnum, Key lowKey, Key highKey,
- long targ, long act );
- void finishTransList( int snum );
- void setStateActions( int snum, long toStateAction,
- long fromStateAction, long eofAction );
- void setForcedErrorState()
- { redFsm->forcedErrorState = true; }
-
-
- void initCondSpaceList( ulong length );
- void condSpaceItem( int cnum, long condActionId );
- void newCondSpace( int cnum, int condSpaceId, Key baseKey );
-
- void initStateCondList( int snum, ulong length );
- void addStateCond( int snum, Key lowKey, Key highKey, long condNum );
-
- CondSpace *findCondSpace( Key lowKey, Key highKey );
- Condition *findCondition( Key key );
-
- bool setAlphType( char *data );
-
- void resolveTargetStates( InlineList *inlineList );
- Key findMaxKey();
-
- /* Gather various info on the machine. */
- void analyzeActionList( RedAction *redAct, InlineList *inlineList );
- void analyzeAction( Action *act, InlineList *inlineList );
- void findFinalActionRefs();
- void analyzeMachine();
-
- void closeMachine();
- void setValueLimits();
- void assignActionIds();
-
- ostream &source_warning( const InputLoc &loc );
- ostream &source_error( const InputLoc &loc );
-};
-
-
-#endif /* _GENDATA_H */
diff --git a/contrib/tools/ragel5/redfsm/phash.h b/contrib/tools/ragel5/redfsm/phash.h
deleted file mode 100644
index 11ce7502a6..0000000000
--- a/contrib/tools/ragel5/redfsm/phash.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#pragma once
-
-class Perfect_Hash
-{
-private:
- static inline unsigned int hash (const char *str, unsigned int len);
-
-public:
- static struct XMLTagHashPair *in_word_set (const char *str, unsigned int len);
-};
diff --git a/contrib/tools/ragel5/redfsm/redfsm.cpp b/contrib/tools/ragel5/redfsm/redfsm.cpp
deleted file mode 100644
index 6a55b22ec7..0000000000
--- a/contrib/tools/ragel5/redfsm/redfsm.cpp
+++ /dev/null
@@ -1,559 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "redfsm.h"
-#include "avlmap.h"
-#include <iostream>
-#include <sstream>
-
-using std::ostringstream;
-
-KeyOps *keyOps = 0;
-
-string Action::nameOrLoc()
-{
- if ( name != 0 )
- return string(name);
- else {
- ostringstream ret;
- ret << loc.line << ":" << loc.col;
- return ret.str();
- }
-}
-
-RedFsmAp::RedFsmAp()
-:
- wantComplete(false),
- forcedErrorState(false),
- nextActionId(0),
- nextTransId(0),
- startState(0),
- errState(0),
- errTrans(0),
- firstFinState(0),
- numFinStates(0),
- bAnyToStateActions(false),
- bAnyFromStateActions(false),
- bAnyRegActions(false),
- bAnyEofActions(false),
- bAnyActionGotos(false),
- bAnyActionCalls(false),
- bAnyActionRets(false),
- bAnyRegActionRets(false),
- bAnyRegActionByValControl(false),
- bAnyRegNextStmt(false),
- bAnyRegCurStateRef(false),
- bAnyRegBreak(false),
- bAnyLmSwitchError(false),
- bAnyConditions(false)
-{
-}
-
-/* Does the machine have any actions. */
-bool RedFsmAp::anyActions()
-{
- return actionMap.length() > 0;
-}
-
-void RedFsmAp::depthFirstOrdering( RedStateAp *state )
-{
- /* Nothing to do if the state is already on the list. */
- if ( state->onStateList )
- return;
-
- /* Doing depth first, put state on the list. */
- state->onStateList = true;
- stateList.append( state );
-
- /* At this point transitions should only be in ranges. */
- assert( state->outSingle.length() == 0 );
- assert( state->defTrans == 0 );
-
- /* Recurse on everything ranges. */
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ ) {
- if ( rtel->value->targ != 0 )
- depthFirstOrdering( rtel->value->targ );
- }
-}
-
-/* Ordering states by transition connections. */
-void RedFsmAp::depthFirstOrdering()
-{
- /* Init on state list flags. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ )
- st->onStateList = false;
-
- /* Clear out the state list, we will rebuild it. */
- int stateListLen = stateList.length();
- stateList.abandon();
-
- /* Add back to the state list from the start state and all other entry
- * points. */
- if ( startState != 0 )
- depthFirstOrdering( startState );
- for ( RedStateSet::Iter en = entryPoints; en.lte(); en++ )
- depthFirstOrdering( *en );
- if ( forcedErrorState )
- depthFirstOrdering( errState );
-
- /* Make sure we put everything back on. */
- assert( stateListLen == stateList.length() );
-}
-
-/* Assign state ids by appearance in the state list. */
-void RedFsmAp::sequentialStateIds()
-{
- /* Table based machines depend on the state numbers starting at zero. */
- nextStateId = 0;
- for ( RedStateList::Iter st = stateList; st.lte(); st++ )
- st->id = nextStateId++;
-}
-
-/* Stable sort the states by final state status. */
-void RedFsmAp::sortStatesByFinal()
-{
- /* Move forward through the list and throw final states onto the end. */
- RedStateAp *state = 0;
- RedStateAp *next = stateList.head;
- RedStateAp *last = stateList.tail;
- while ( state != last ) {
- /* Move forward and load up the next. */
- state = next;
- next = state->next;
-
- /* Throw to the end? */
- if ( state->isFinal ) {
- stateList.detach( state );
- stateList.append( state );
- }
- }
-}
-
-/* Assign state ids by final state state status. */
-void RedFsmAp::sortStateIdsByFinal()
-{
- /* Table based machines depend on this starting at zero. */
- nextStateId = 0;
-
- /* First pass to assign non final ids. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- if ( ! st->isFinal )
- st->id = nextStateId++;
- }
-
- /* Second pass to assign final ids. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- if ( st->isFinal )
- st->id = nextStateId++;
- }
-}
-
-void RedFsmAp::sortByStateId()
-{
- /* FIXME: Implement. */
-}
-
-/* Find the final state with the lowest id. */
-void RedFsmAp::findFirstFinState()
-{
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- if ( st->isFinal && (firstFinState == 0 || st->id < firstFinState->id) )
- firstFinState = st;
- }
-}
-
-void RedFsmAp::assignActionLocs()
-{
- int nextLocation = 0;
- for ( ActionTableMap::Iter act = actionMap; act.lte(); act++ ) {
- /* Store the loc, skip over the array and a null terminator. */
- act->location = nextLocation;
- nextLocation += act->key.length() + 1;
- }
-}
-
-/* Check if we can extend the current range by displacing any ranges
- * ahead to the singles. */
-bool RedFsmAp::canExtend( const RedTransList &list, int pos )
-{
- /* Get the transition that we want to extend. */
- RedTransAp *extendTrans = list[pos].value;
-
- /* Look ahead in the transition list. */
- for ( int next = pos + 1; next < list.length(); pos++, next++ ) {
- /* If they are not continuous then cannot extend. */
- Key nextKey = list[next].lowKey;
- nextKey.decrement();
- if ( list[pos].highKey != nextKey )
- break;
-
- /* Check for the extenstion property. */
- if ( extendTrans == list[next].value )
- return true;
-
- /* If the span of the next element is more than one, then don't keep
- * checking, it won't be moved to single. */
- unsigned long long nextSpan = keyOps->span( list[next].lowKey, list[next].highKey );
- if ( nextSpan > 1 )
- break;
- }
- return false;
-}
-
-/* Move ranges to the singles list. */
-void RedFsmAp::moveTransToSingle( RedStateAp *state )
-{
- RedTransList &range = state->outRange;
- RedTransList &single = state->outSingle;
- for ( int rpos = 0; rpos < range.length(); ) {
- /* Check if this is a range we can extend. */
- if ( canExtend( range, rpos ) ) {
- /* Transfer singles over. */
- while ( range[rpos].value != range[rpos+1].value ) {
- /* Transfer the range to single. */
- single.append( range[rpos+1] );
- range.remove( rpos+1 );
- }
-
- /* Extend. */
- range[rpos].highKey = range[rpos+1].highKey;
- range.remove( rpos+1 );
- }
- /* Maybe move it to the singles. */
- else if ( keyOps->span( range[rpos].lowKey, range[rpos].highKey ) == 1 ) {
- single.append( range[rpos] );
- range.remove( rpos );
- }
- else {
- /* Keeping it in the ranges. */
- rpos += 1;
- }
- }
-}
-
-/* Look through ranges and choose suitable single character transitions. */
-void RedFsmAp::chooseSingle()
-{
- /* Loop the states. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- /* Rewrite the transition list taking out the suitable single
- * transtions. */
- moveTransToSingle( st );
- }
-}
-
-void RedFsmAp::makeFlat()
-{
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- if ( st->stateCondList.length() == 0 ) {
- st->condLowKey = 0;
- st->condHighKey = 0;
- }
- else {
- st->condLowKey = st->stateCondList.head->lowKey;
- st->condHighKey = st->stateCondList.tail->highKey;
-
- unsigned long long span = keyOps->span( st->condLowKey, st->condHighKey );
- st->condList = new CondSpace*[ span ];
- memset( st->condList, 0, sizeof(CondSpace*)*span );
-
- for ( StateCondList::Iter sci = st->stateCondList; sci.lte(); sci++ ) {
- unsigned long long base, trSpan;
- base = keyOps->span( st->condLowKey, sci->lowKey )-1;
- trSpan = keyOps->span( sci->lowKey, sci->highKey );
- for ( unsigned long long pos = 0; pos < trSpan; pos++ )
- st->condList[base+pos] = sci->condSpace;
- }
- }
-
- if ( st->outRange.length() == 0 ) {
- st->lowKey = st->highKey = 0;
- st->transList = 0;
- }
- else {
- st->lowKey = st->outRange[0].lowKey;
- st->highKey = st->outRange[st->outRange.length()-1].highKey;
- unsigned long long span = keyOps->span( st->lowKey, st->highKey );
- st->transList = new RedTransAp*[ span ];
- memset( st->transList, 0, sizeof(RedTransAp*)*span );
-
- for ( RedTransList::Iter trans = st->outRange; trans.lte(); trans++ ) {
- unsigned long long base, trSpan;
- base = keyOps->span( st->lowKey, trans->lowKey )-1;
- trSpan = keyOps->span( trans->lowKey, trans->highKey );
- for ( unsigned long long pos = 0; pos < trSpan; pos++ )
- st->transList[base+pos] = trans->value;
- }
-
- /* Fill in the gaps with the default transition. */
- for ( unsigned long long pos = 0; pos < span; pos++ ) {
- if ( st->transList[pos] == 0 )
- st->transList[pos] = st->defTrans;
- }
- }
- }
-}
-
-
-/* A default transition has been picked, move it from the outRange to the
- * default pointer. */
-void RedFsmAp::moveToDefault( RedTransAp *defTrans, RedStateAp *state )
-{
- /* Rewrite the outRange, omitting any ranges that use
- * the picked default. */
- RedTransList outRange;
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ ) {
- /* If it does not take the default, copy it over. */
- if ( rtel->value != defTrans )
- outRange.append( *rtel );
- }
-
- /* Save off the range we just created into the state's range. */
- state->outRange.transfer( outRange );
-
- /* Store the default. */
- state->defTrans = defTrans;
-}
-
-bool RedFsmAp::alphabetCovered( RedTransList &outRange )
-{
- /* Cannot cover without any out ranges. */
- if ( outRange.length() == 0 )
- return false;
-
- /* If the first range doesn't start at the the lower bound then the
- * alphabet is not covered. */
- RedTransList::Iter rtel = outRange;
- if ( keyOps->minKey < rtel->lowKey )
- return false;
-
- /* Check that every range is next to the previous one. */
- rtel.increment();
- for ( ; rtel.lte(); rtel++ ) {
- Key highKey = rtel[-1].highKey;
- highKey.increment();
- if ( highKey != rtel->lowKey )
- return false;
- }
-
- /* The last must extend to the upper bound. */
- RedTransEl *last = &outRange[outRange.length()-1];
- if ( last->highKey < keyOps->maxKey )
- return false;
-
- return true;
-}
-
-RedTransAp *RedFsmAp::chooseDefaultSpan( RedStateAp *state )
-{
- /* Make a set of transitions from the outRange. */
- RedTransSet stateTransSet;
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ )
- stateTransSet.insert( rtel->value );
-
- /* For each transition in the find how many alphabet characters the
- * transition spans. */
- unsigned long long *span = new unsigned long long[stateTransSet.length()];
- memset( span, 0, sizeof(unsigned long long) * stateTransSet.length() );
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ ) {
- /* Lookup the transition in the set. */
- RedTransAp **inSet = stateTransSet.find( rtel->value );
- int pos = inSet - stateTransSet.data;
- span[pos] += keyOps->span( rtel->lowKey, rtel->highKey );
- }
-
- /* Find the max span, choose it for making the default. */
- RedTransAp *maxTrans = 0;
- unsigned long long maxSpan = 0;
- for ( RedTransSet::Iter rtel = stateTransSet; rtel.lte(); rtel++ ) {
- if ( span[rtel.pos()] > maxSpan ) {
- maxSpan = span[rtel.pos()];
- maxTrans = *rtel;
- }
- }
-
- delete[] span;
- return maxTrans;
-}
-
-/* Pick default transitions from ranges for the states. */
-void RedFsmAp::chooseDefaultSpan()
-{
- /* Loop the states. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- /* Only pick a default transition if the alphabet is covered. This
- * avoids any transitions in the out range that go to error and avoids
- * the need for an ERR state. */
- if ( alphabetCovered( st->outRange ) ) {
- /* Pick a default transition by largest span. */
- RedTransAp *defTrans = chooseDefaultSpan( st );
-
- /* Rewrite the transition list taking out the transition we picked
- * as the default and store the default. */
- moveToDefault( defTrans, st );
- }
- }
-}
-
-RedTransAp *RedFsmAp::chooseDefaultGoto( RedStateAp *state )
-{
- /* Make a set of transitions from the outRange. */
- RedTransSet stateTransSet;
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ ) {
- if ( rtel->value->targ == state->next )
- return rtel->value;
- }
- return 0;
-}
-
-void RedFsmAp::chooseDefaultGoto()
-{
- /* Loop the states. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- /* Pick a default transition. */
- RedTransAp *defTrans = chooseDefaultGoto( st );
- if ( defTrans == 0 )
- defTrans = chooseDefaultSpan( st );
-
- /* Rewrite the transition list taking out the transition we picked
- * as the default and store the default. */
- moveToDefault( defTrans, st );
- }
-}
-
-RedTransAp *RedFsmAp::chooseDefaultNumRanges( RedStateAp *state )
-{
- /* Make a set of transitions from the outRange. */
- RedTransSet stateTransSet;
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ )
- stateTransSet.insert( rtel->value );
-
- /* For each transition in the find how many ranges use the transition. */
- int *numRanges = new int[stateTransSet.length()];
- memset( numRanges, 0, sizeof(int) * stateTransSet.length() );
- for ( RedTransList::Iter rtel = state->outRange; rtel.lte(); rtel++ ) {
- /* Lookup the transition in the set. */
- RedTransAp **inSet = stateTransSet.find( rtel->value );
- numRanges[inSet - stateTransSet.data] += 1;
- }
-
- /* Find the max number of ranges. */
- RedTransAp *maxTrans = 0;
- int maxNumRanges = 0;
- for ( RedTransSet::Iter rtel = stateTransSet; rtel.lte(); rtel++ ) {
- if ( numRanges[rtel.pos()] > maxNumRanges ) {
- maxNumRanges = numRanges[rtel.pos()];
- maxTrans = *rtel;
- }
- }
-
- delete[] numRanges;
- return maxTrans;
-}
-
-void RedFsmAp::chooseDefaultNumRanges()
-{
- /* Loop the states. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- /* Pick a default transition. */
- RedTransAp *defTrans = chooseDefaultNumRanges( st );
-
- /* Rewrite the transition list taking out the transition we picked
- * as the default and store the default. */
- moveToDefault( defTrans, st );
- }
-}
-
-RedTransAp *RedFsmAp::getErrorTrans( )
-{
- /* If the error trans has not been made aready, make it. */
- if ( errTrans == 0 ) {
- /* This insert should always succeed since no transition created by
- * the user can point to the error state. */
- errTrans = new RedTransAp( getErrorState(), 0, nextTransId++ );
- RedTransAp *inRes = transSet.insert( errTrans );
- assert( inRes != 0 );
- }
- return errTrans;
-}
-
-RedStateAp *RedFsmAp::getErrorState()
-{
- /* Something went wrong. An error state is needed but one was not supplied
- * by the frontend. */
- assert( errState != 0 );
- return errState;
-}
-
-
-RedTransAp *RedFsmAp::allocateTrans( RedStateAp *targ, RedAction *action )
-{
- /* Create a reduced trans and look for it in the transiton set. */
- RedTransAp redTrans( targ, action, 0 );
- RedTransAp *inDict = transSet.find( &redTrans );
- if ( inDict == 0 ) {
- inDict = new RedTransAp( targ, action, nextTransId++ );
- transSet.insert( inDict );
- }
- return inDict;
-}
-
-void RedFsmAp::partitionFsm( int nparts )
-{
- /* At this point the states are ordered by a depth-first traversal. We
- * will allocate to partitions based on this ordering. */
- this->nParts = nparts;
- int partSize = stateList.length() / nparts;
- int remainder = stateList.length() % nparts;
- int numInPart = partSize;
- int partition = 0;
- if ( remainder-- > 0 )
- numInPart += 1;
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- st->partition = partition;
-
- numInPart -= 1;
- if ( numInPart == 0 ) {
- partition += 1;
- numInPart = partSize;
- if ( remainder-- > 0 )
- numInPart += 1;
- }
- }
-}
-
-void RedFsmAp::setInTrans()
-{
- /* First pass counts the number of transitions. */
- for ( TransApSet::Iter trans = transSet; trans.lte(); trans++ )
- trans->targ->numInTrans += 1;
-
- /* Pass over states to allocate the needed memory. Reset the counts so we
- * can use them as the current size. */
- for ( RedStateList::Iter st = stateList; st.lte(); st++ ) {
- st->inTrans = new RedTransAp*[st->numInTrans];
- st->numInTrans = 0;
- }
-
- /* Second pass over transitions copies pointers into the in trans list. */
- for ( TransApSet::Iter trans = transSet; trans.lte(); trans++ )
- trans->targ->inTrans[trans->targ->numInTrans++] = trans;
-}
diff --git a/contrib/tools/ragel5/redfsm/redfsm.h b/contrib/tools/ragel5/redfsm/redfsm.h
deleted file mode 100644
index 515b1b621b..0000000000
--- a/contrib/tools/ragel5/redfsm/redfsm.h
+++ /dev/null
@@ -1,534 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _REDFSM_H
-#define _REDFSM_H
-
-#include <assert.h>
-#include <string.h>
-#include <string>
-#include "common.h"
-#include "vector.h"
-#include "dlist.h"
-#include "compare.h"
-#include "bstmap.h"
-#include "bstset.h"
-#include "avlmap.h"
-#include "avltree.h"
-#include "avlbasic.h"
-#include "mergesort.h"
-#include "sbstmap.h"
-#include "sbstset.h"
-#include "sbsttable.h"
-
-#define TRANS_ERR_TRANS 0
-#define STATE_ERR_STATE 0
-#define FUNC_NO_FUNC 0
-
-using std::string;
-
-struct RedStateAp;
-struct InlineList;
-struct Action;
-
-/* Location in an input file. */
-struct InputLoc
-{
- int line;
- int col;
-};
-
-/*
- * Inline code tree
- */
-struct InlineItem
-{
- enum Type
- {
- Text, Goto, Call, Next, GotoExpr, CallExpr, NextExpr, Ret,
- PChar, Char, Hold, Exec, HoldTE, ExecTE, Curs, Targs, Entry,
- LmSwitch, LmSetActId, LmSetTokEnd, LmGetTokEnd, LmInitTokStart,
- LmInitAct, LmSetTokStart, SubAction, Break
- };
-
- InlineItem( const InputLoc &loc, Type type ) :
- loc(loc), data(0), targId(0), targState(0),
- lmId(0), children(0), offset(0),
- handlesError(false), type(type) { }
-
- InputLoc loc;
- char *data;
- int targId;
- RedStateAp *targState;
- int lmId;
- InlineList *children;
- int offset;
- bool handlesError;
- Type type;
-
- InlineItem *prev, *next;
-};
-
-/* Normally this would be atypedef, but that would entail including DList from
- * ptreetypes, which should be just typedef forwards. */
-struct InlineList : public DList<InlineItem> { };
-
-/* Element in list of actions. Contains the string for the code to exectute. */
-struct Action
-:
- public DListEl<Action>
-{
- Action( )
- :
- name(0),
- inlineList(0),
- actionId(0),
- numTransRefs(0),
- numToStateRefs(0),
- numFromStateRefs(0),
- numEofRefs(0)
- {
- }
-
- /* Data collected during parse. */
- InputLoc loc;
- char *name;
- InlineList *inlineList;
- int actionId;
-
- string nameOrLoc();
-
- /* Number of references in the final machine. */
- int numRefs()
- { return numTransRefs + numToStateRefs + numFromStateRefs + numEofRefs; }
- int numTransRefs;
- int numToStateRefs;
- int numFromStateRefs;
- int numEofRefs;
-};
-
-
-/* Forwards. */
-struct RedStateAp;
-struct StateAp;
-
-/* Transistion Action Element. */
-typedef SBstMapEl< int, Action* > ActionTableEl;
-
-/* Transition Action Table. */
-struct ActionTable
- : public SBstMap< int, Action*, CmpOrd<int> >
-{
- void setAction( int ordering, Action *action );
- void setActions( int *orderings, Action **actions, int nActs );
- void setActions( const ActionTable &other );
-};
-
-/* Compare of a whole action table element (key & value). */
-struct CmpActionTableEl
-{
- static int compare( const ActionTableEl &action1,
- const ActionTableEl &action2 )
- {
- if ( action1.key < action2.key )
- return -1;
- else if ( action1.key > action2.key )
- return 1;
- else if ( action1.value < action2.value )
- return -1;
- else if ( action1.value > action2.value )
- return 1;
- return 0;
- }
-};
-
-/* Compare for ActionTable. */
-typedef CmpSTable< ActionTableEl, CmpActionTableEl > CmpActionTable;
-
-/* Set of states. */
-typedef BstSet<RedStateAp*> RedStateSet;
-typedef BstSet<int> IntSet;
-
-/* Reduced action. */
-struct RedAction
-:
- public AvlTreeEl<RedAction>
-{
- RedAction( )
- :
- key(),
- eofRefs(0),
- numTransRefs(0),
- numToStateRefs(0),
- numFromStateRefs(0),
- numEofRefs(0),
- bAnyNextStmt(false),
- bAnyCurStateRef(false),
- bAnyBreakStmt(false)
- { }
-
- const ActionTable &getKey()
- { return key; }
-
- ActionTable key;
- int actListId;
- int location;
- IntSet *eofRefs;
-
- /* Number of references in the final machine. */
- int numRefs()
- { return numTransRefs + numToStateRefs + numFromStateRefs + numEofRefs; }
- int numTransRefs;
- int numToStateRefs;
- int numFromStateRefs;
- int numEofRefs;
-
- bool anyNextStmt() { return bAnyNextStmt; }
- bool anyCurStateRef() { return bAnyCurStateRef; }
- bool anyBreakStmt() { return bAnyBreakStmt; }
-
- bool bAnyNextStmt;
- bool bAnyCurStateRef;
- bool bAnyBreakStmt;
-};
-typedef AvlTree<RedAction, ActionTable, CmpActionTable> ActionTableMap;
-
-/* Reduced transition. */
-struct RedTransAp
-:
- public AvlTreeEl<RedTransAp>
-{
- RedTransAp( RedStateAp *targ, RedAction *action, int id )
- : targ(targ), action(action), id(id), labelNeeded(true) { }
-
- RedStateAp *targ;
- RedAction *action;
- int id;
- bool partitionBoundary;
- bool labelNeeded;
-};
-
-/* Compare of transitions for the final reduction of transitions. Comparison
- * is on target and the pointer to the shared action table. It is assumed that
- * when this is used the action tables have been reduced. */
-struct CmpRedTransAp
-{
- static int compare( const RedTransAp &t1, const RedTransAp &t2 )
- {
- if ( t1.targ < t2.targ )
- return -1;
- else if ( t1.targ > t2.targ )
- return 1;
- else if ( t1.action < t2.action )
- return -1;
- else if ( t1.action > t2.action )
- return 1;
- else
- return 0;
- }
-};
-
-typedef AvlBasic<RedTransAp, CmpRedTransAp> TransApSet;
-
-/* Element in out range. */
-struct RedTransEl
-{
- /* Constructors. */
- RedTransEl( Key lowKey, Key highKey, RedTransAp *value )
- : lowKey(lowKey), highKey(highKey), value(value) { }
-
- Key lowKey, highKey;
- RedTransAp *value;
-};
-
-typedef Vector<RedTransEl> RedTransList;
-typedef Vector<RedStateAp*> RedStateVect;
-
-typedef BstMapEl<RedStateAp*, unsigned long long> RedSpanMapEl;
-typedef BstMap<RedStateAp*, unsigned long long> RedSpanMap;
-
-/* Compare used by span map sort. Reverse sorts by the span. */
-struct CmpRedSpanMapEl
-{
- static int compare( const RedSpanMapEl &smel1, const RedSpanMapEl &smel2 )
- {
- if ( smel1.value > smel2.value )
- return -1;
- else if ( smel1.value < smel2.value )
- return 1;
- else
- return 0;
- }
-};
-
-/* Sorting state-span map entries by span. */
-typedef MergeSort<RedSpanMapEl, CmpRedSpanMapEl> RedSpanMapSort;
-
-/* Set of entry ids that go into this state. */
-typedef Vector<int> EntryIdVect;
-typedef Vector<char*> EntryNameVect;
-
-typedef Vector< Action* > CondSet;
-
-struct Condition
-{
- Condition( )
- : key(0), baseKey(0) {}
-
- Key key;
- Key baseKey;
- CondSet condSet;
-
- Condition *next, *prev;
-};
-typedef DList<Condition> ConditionList;
-
-struct CondSpace
-{
- Key baseKey;
- CondSet condSet;
- int condSpaceId;
-
- CondSpace *next, *prev;
-};
-typedef DList<CondSpace> CondSpaceList;
-
-struct StateCond
-{
- Key lowKey;
- Key highKey;
-
- CondSpace *condSpace;
-
- StateCond *prev, *next;
-};
-typedef DList<StateCond> StateCondList;
-typedef Vector<StateCond*> StateCondVect;
-
-/* Reduced state. */
-struct RedStateAp
-{
- RedStateAp()
- :
- defTrans(0),
- condList(0),
- transList(0),
- isFinal(false),
- labelNeeded(false),
- outNeeded(false),
- onStateList(false),
- toStateAction(0),
- fromStateAction(0),
- eofAction(0),
- id(0),
- bAnyRegCurStateRef(false),
- partitionBoundary(false),
- inTrans(0),
- numInTrans(0)
- { }
-
- /* Transitions out. */
- RedTransList outSingle;
- RedTransList outRange;
- RedTransAp *defTrans;
-
- /* For flat conditions. */
- Key condLowKey, condHighKey;
- CondSpace **condList;
-
- /* For flat keys. */
- Key lowKey, highKey;
- RedTransAp **transList;
-
- /* The list of states that transitions from this state go to. */
- RedStateVect targStates;
-
- bool isFinal;
- bool labelNeeded;
- bool outNeeded;
- bool onStateList;
- RedAction *toStateAction;
- RedAction *fromStateAction;
- RedAction *eofAction;
- int id;
- StateCondList stateCondList;
- StateCondVect stateCondVect;
-
- /* Pointers for the list of states. */
- RedStateAp *prev, *next;
-
- bool anyRegCurStateRef() { return bAnyRegCurStateRef; }
- bool bAnyRegCurStateRef;
-
- int partition;
- bool partitionBoundary;
-
- RedTransAp **inTrans;
- int numInTrans;
-};
-
-/* List of states. */
-typedef DList<RedStateAp> RedStateList;
-
-/* Set of reduced transitons. Comparison is by pointer. */
-typedef BstSet< RedTransAp*, CmpOrd<RedTransAp*> > RedTransSet;
-
-/* Next version of the fsm machine. */
-struct RedFsmAp
-{
- RedFsmAp();
-
- bool wantComplete;
- bool forcedErrorState;
-
- int nextActionId;
- int nextTransId;
-
- /* Next State Id doubles as the total number of state ids. */
- int nextStateId;
-
- TransApSet transSet;
- ActionTableMap actionMap;
- RedStateList stateList;
- RedStateSet entryPoints;
- RedStateAp *startState;
- RedStateAp *errState;
- RedTransAp *errTrans;
- RedTransAp *errActionTrans;
- RedStateAp *firstFinState;
- int numFinStates;
- int nParts;
-
- bool bAnyToStateActions;
- bool bAnyFromStateActions;
- bool bAnyRegActions;
- bool bAnyEofActions;
- bool bAnyActionGotos;
- bool bAnyActionCalls;
- bool bAnyActionRets;
- bool bAnyRegActionRets;
- bool bAnyRegActionByValControl;
- bool bAnyRegNextStmt;
- bool bAnyRegCurStateRef;
- bool bAnyRegBreak;
- bool bAnyLmSwitchError;
- bool bAnyConditions;
-
- int maxState;
- int maxSingleLen;
- int maxRangeLen;
- int maxKeyOffset;
- int maxIndexOffset;
- int maxIndex;
- int maxActListId;
- int maxActionLoc;
- int maxActArrItem;
- unsigned long long maxSpan;
- unsigned long long maxCondSpan;
- int maxFlatIndexOffset;
- Key maxKey;
- int maxCondOffset;
- int maxCondLen;
- int maxCondSpaceId;
- int maxCondIndexOffset;
- int maxCond;
-
- bool anyActions();
- bool anyToStateActions() { return bAnyToStateActions; }
- bool anyFromStateActions() { return bAnyFromStateActions; }
- bool anyRegActions() { return bAnyRegActions; }
- bool anyEofActions() { return bAnyEofActions; }
- bool anyActionGotos() { return bAnyActionGotos; }
- bool anyActionCalls() { return bAnyActionCalls; }
- bool anyActionRets() { return bAnyActionRets; }
- bool anyRegActionRets() { return bAnyRegActionRets; }
- bool anyRegActionByValControl() { return bAnyRegActionByValControl; }
- bool anyRegNextStmt() { return bAnyRegNextStmt; }
- bool anyRegCurStateRef() { return bAnyRegCurStateRef; }
- bool anyRegBreak() { return bAnyRegBreak; }
- bool anyLmSwitchError() { return bAnyLmSwitchError; }
- bool anyConditions() { return bAnyConditions; }
-
-
- /* Is is it possible to extend a range by bumping ranges that span only
- * one character to the singles array. */
- bool canExtend( const RedTransList &list, int pos );
-
- /* Pick single transitions from the ranges. */
- void moveTransToSingle( RedStateAp *state );
- void chooseSingle();
-
- void makeFlat();
-
- /* Move a selected transition from ranges to default. */
- void moveToDefault( RedTransAp *defTrans, RedStateAp *state );
-
- /* Pick a default transition by largest span. */
- RedTransAp *chooseDefaultSpan( RedStateAp *state );
- void chooseDefaultSpan();
-
- /* Pick a default transition by most number of ranges. */
- RedTransAp *chooseDefaultNumRanges( RedStateAp *state );
- void chooseDefaultNumRanges();
-
- /* Pick a default transition tailored towards goto driven machine. */
- RedTransAp *chooseDefaultGoto( RedStateAp *state );
- void chooseDefaultGoto();
-
- /* Ordering states by transition connections. */
- void optimizeStateOrdering( RedStateAp *state );
- void optimizeStateOrdering();
-
- /* Ordering states by transition connections. */
- void depthFirstOrdering( RedStateAp *state );
- void depthFirstOrdering();
-
- /* Set state ids. */
- void sequentialStateIds();
- void sortStateIdsByFinal();
-
- /* Arrange states in by final id. This is a stable sort. */
- void sortStatesByFinal();
-
- /* Sorting states by id. */
- void sortByStateId();
-
- /* Locating the first final state. This is the final state with the lowest
- * id. */
- void findFirstFinState();
-
- void assignActionLocs();
-
- RedTransAp *getErrorTrans();
- RedStateAp *getErrorState();
-
- /* Is every char in the alphabet covered? */
- bool alphabetCovered( RedTransList &outRange );
-
- RedTransAp *allocateTrans( RedStateAp *targState, RedAction *actionTable );
-
- void partitionFsm( int nParts );
-
- void setInTrans();
-};
-
-
-#endif /* _REDFSM_H */
diff --git a/contrib/tools/ragel5/redfsm/xmlparse.cpp b/contrib/tools/ragel5/redfsm/xmlparse.cpp
deleted file mode 100644
index 6da8c50e91..0000000000
--- a/contrib/tools/ragel5/redfsm/xmlparse.cpp
+++ /dev/null
@@ -1,3549 +0,0 @@
-/* Automatically generated by Kelbt from "xmlparse.kl".
- *
- * Parts of this file are copied from Kelbt source covered by the GNU
- * GPL. As a special exception, you may use the parts of this file copied
- * from Kelbt source without restriction. The remainder is derived from
- * "xmlparse.kl" and inherits the copyright status of that file.
- */
-
-#line 1 "xmlparse.kl"
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "xmlparse.h"
-#include "common.h"
-#include "gendata.h"
-#include <iostream>
-
-#include <stdlib.h>
-//#include <malloc.h>
-
-using std::cout;
-using std::ostream;
-using std::istream;
-using std::cerr;
-using std::endl;
-
-Key readKey( char *td, char **end );
-long readOffsetPtr( char *td, char **end );
-unsigned long readLength( char *td );
-
-#line 117 "xmlparse.kh"
-#line 120 "xmlparse.kh"
-#line 163 "xmlparse.kh"
-#line 846 "xmlparse.kl"
-
-
-#line 54 "xmlparse.cpp"
-struct Parser_Lel_inline_item_type
-{
-#line 499 "xmlparse.kl"
-
- InlineItem *inlineItem;
-
-
-#line 61 "xmlparse.cpp"
-};
-
-struct Parser_Lel_inline_list
-{
-#line 480 "xmlparse.kl"
-
- InlineList *inlineList;
-
-
-#line 71 "xmlparse.cpp"
-};
-
-struct Parser_Lel_lm_action_list
-{
-#line 716 "xmlparse.kl"
-
- InlineList *inlineList;
-
-
-#line 81 "xmlparse.cpp"
-};
-
-struct Parser_Lel_tag_arg
-{
-#line 256 "xmlparse.kl"
-
- char *option;
-
-
-#line 91 "xmlparse.cpp"
-};
-
-struct Parser_Lel_tag_write_head
-{
-#line 220 "xmlparse.kl"
-
- InputLoc loc;
-
-
-#line 101 "xmlparse.cpp"
-};
-
-union Parser_UserData
-{
- struct Parser_Lel_inline_item_type inline_item_type;
- struct Parser_Lel_inline_list inline_list;
- struct Parser_Lel_lm_action_list lm_action_list;
- struct Parser_Lel_tag_arg tag_arg;
- struct Parser_Lel_tag_write_head tag_write_head;
- struct Token token;
-};
-
-struct Parser_LangEl
-{
- char *file;
- int line;
- int type;
- int reduction;
- int state;
- union Parser_UserData user;
- unsigned int retry;
- struct Parser_LangEl *next, *child;
-};
-
-#line 127 "xmlparse.cpp"
-unsigned int Parser_startState = 0;
-
-short Parser_indicies[] = {
- 142, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 140, 139, 0, 1, 283, 144, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 144, 144, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 144, -1, -1, -1, -1, -1,
- -1, -1, -1, 2, 146, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 151,
- 146, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 146, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 3, 143, -1, -1, -1,
- 4, 5, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 6, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 169, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 145, 147, 148, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 7, 153, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 153, -1, -1, -1, -1,
- -1, -1, 153, -1, 153, -1, -1, -1,
- -1, -1, -1, -1, 153, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 153, 153, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 8,
- 141, 9, 171, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 171, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 10, 11, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 174, -1, -1,
- -1, -1, -1, -1, 12, -1, 13, -1,
- -1, -1, -1, -1, -1, -1, 16, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 15, 14, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 152, 154, 155, 156, 157, 158,
- 159, -1, -1, -1, -1, -1, -1, 17,
- 149, 18, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 19, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 170, 150, 20, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 217, -1, -1, -1, -1,
- -1, -1, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, -1, 217, 217, 217, 217,
- 217, 217, 217, -1, -1, -1, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 21, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 217, -1, -1, -1, -1,
- -1, -1, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, -1, 217, 217, 217, 217,
- 217, 217, 217, -1, -1, -1, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 24, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 217, -1, -1, -1, -1,
- -1, -1, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, -1, 217, 217, 217, 217,
- 217, 217, 217, -1, -1, -1, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 23, 162, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 162, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 22, 176, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 176, -1, -1, -1, -1, 176, 176,
- 176, 176, -1, -1, -1, -1, -1, -1,
- 176, -1, 176, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 25, 168, 26, 164, 27, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 52, -1, -1, -1, -1, -1, -1,
- 28, 29, 30, 31, 32, 33, 34, 35,
- 37, 38, 39, 40, 41, 42, 43, 44,
- 45, -1, 53, 47, 51, 50, 48, 46,
- 49, -1, -1, -1, 36, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 216, -1, 218, 219,
- 220, 221, 222, 223, 224, 225, 226, 227,
- 228, 229, 230, 231, 232, 233, 234, 235,
- 236, 237, 238, 239, 240, 241, 242, 243,
- 54, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 55, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 161, 56,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 52, -1, -1, -1,
- -1, -1, -1, 28, 29, 30, 31, 32,
- 33, 34, 35, 37, 38, 39, 40, 41,
- 42, 43, 44, 45, -1, 53, 47, 51,
- 50, 48, 46, 49, -1, -1, -1, 36,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 216,
- -1, 218, 219, 220, 221, 222, 223, 224,
- 225, 226, 227, 228, 229, 230, 231, 232,
- 233, 234, 235, 236, 237, 238, 239, 240,
- 241, 242, 243, 57, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 52, -1, -1, -1, -1, -1, -1, 28,
- 29, 30, 31, 32, 33, 34, 35, 37,
- 38, 39, 40, 41, 42, 43, 44, 45,
- -1, 53, 47, 51, 50, 48, 46, 49,
- -1, -1, -1, 36, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 216, -1, 218, 219, 220,
- 221, 222, 223, 224, 225, 226, 227, 228,
- 229, 230, 231, 232, 233, 234, 235, 236,
- 237, 238, 239, 240, 241, 242, 243, 58,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 191, -1, -1, -1,
- -1, 59, 60, 212, 274, -1, -1, -1,
- -1, -1, -1, 61, -1, 279, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 175, 177, 178, 179,
- 180, 181, 182, 183, -1, -1, 62, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 63, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 64, -1, -1,
- 65, 172, 165, 67, 68, 69, 70, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 217, -1, -1, -1,
- -1, -1, -1, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, -1, 217, 217, 217,
- 217, 217, 217, 217, -1, -1, -1, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 71, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 217, -1, -1, -1,
- -1, -1, -1, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, -1, 217, 217, 217,
- 217, 217, 217, 217, -1, -1, -1, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 72, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 217, -1, -1, -1,
- -1, -1, -1, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, -1, 217, 217, 217,
- 217, 217, 217, 217, -1, -1, -1, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 73, 74,
- 91, 75, 76, 77, 217, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 217, -1, -1, -1, -1, -1, -1,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, -1, 217, 217, 217, 217, 217, 217,
- 217, -1, -1, -1, 217, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 78, 79, 217, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 217, -1, -1, -1, -1, -1,
- -1, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, -1, 217, 217, 217, 217, 217,
- 217, 217, -1, -1, -1, 217, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 80, 81, 82, 83,
- 89, 85, 88, 90, 87, 86, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 217, -1, -1, -1, -1,
- -1, -1, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, -1, 217, 217, 217, 217,
- 217, 217, 217, -1, -1, -1, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 66, 271, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 271, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 84, 160, 92, 167, 166, 173,
- 93, 94, 188, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 188, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 95,
- 193, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 193, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 96, 214, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 214,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 97,
- 276, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 276,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 98,
- 100, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 99, 281, 101, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 52, -1, -1, -1, -1, -1,
- -1, 28, 29, 30, 31, 32, 33, 34,
- 35, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, -1, 53, 47, 51, 50, 48,
- 46, 49, -1, -1, -1, 36, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 216, -1, 218,
- 219, 220, 221, 222, 223, 224, 225, 226,
- 227, 228, 229, 230, 231, 232, 233, 234,
- 235, 236, 237, 238, 239, 240, 241, 242,
- 243, 244, 245, 246, 247, 102, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 52, -1, -1, -1, -1, -1,
- -1, 28, 29, 30, 31, 32, 33, 34,
- 35, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, -1, 53, 47, 51, 50, 48,
- 46, 49, -1, -1, -1, 36, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 216, -1, 218,
- 219, 220, 221, 222, 223, 224, 225, 226,
- 227, 228, 229, 230, 231, 232, 233, 234,
- 235, 236, 237, 238, 239, 240, 241, 242,
- 243, 103, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 52, -1,
- -1, -1, -1, -1, -1, 28, 29, 30,
- 31, 32, 33, 34, 35, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, -1, 53,
- 47, 51, 50, 48, 46, 49, -1, -1,
- -1, 36, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 216, -1, 218, 219, 220, 221, 222,
- 223, 224, 225, 226, 227, 228, 229, 230,
- 231, 232, 233, 234, 235, 236, 237, 238,
- 239, 240, 241, 242, 243, 104, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 52, -1, -1, -1, -1, -1,
- -1, 28, 29, 30, 31, 32, 33, 34,
- 35, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, -1, 53, 47, 51, 50, 48,
- 46, 49, -1, -1, -1, 36, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 216, -1, 218,
- 219, 220, 221, 222, 223, 224, 225, 226,
- 227, 228, 229, 230, 231, 232, 233, 234,
- 235, 236, 237, 238, 239, 240, 241, 242,
- 243, 251, 253, 254, 255, 105, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 52, -1, -1, -1, -1, -1,
- -1, 28, 29, 30, 31, 32, 33, 34,
- 35, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, -1, 53, 47, 51, 50, 48,
- 46, 49, -1, -1, -1, 36, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 216, -1, 218,
- 219, 220, 221, 222, 223, 224, 225, 226,
- 227, 228, 229, 230, 231, 232, 233, 234,
- 235, 236, 237, 238, 239, 240, 241, 242,
- 243, 257, 106, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 52,
- -1, -1, -1, -1, -1, -1, 28, 29,
- 30, 31, 32, 33, 34, 35, 37, 38,
- 39, 40, 41, 42, 43, 44, 45, -1,
- 53, 47, 51, 50, 48, 46, 49, -1,
- -1, -1, 36, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 216, -1, 218, 219, 220, 221,
- 222, 223, 224, 225, 226, 227, 228, 229,
- 230, 231, 232, 233, 234, 235, 236, 237,
- 238, 239, 240, 241, 242, 243, 259, 260,
- 261, 107, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 108, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 270, 263,
- 267, 266, 264, 262, 265, 252, 163, 184,
- 185, 109, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 110, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 187,
- 111, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 112, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 192, 113, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- 114, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 213, 115, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 116, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 275, 118, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 100, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 280, 117,
- 268, 248, 249, 250, 256, 258, 269, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 217, -1, -1, -1,
- -1, -1, -1, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, -1, 217, 217, 217,
- 217, 217, 217, 217, -1, -1, -1, 217,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 119, 186,
- 120, 190, 196, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, 196, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 196, -1, -1,
- -1, -1, 196, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, 121, 211, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 217, -1, -1, -1, -1,
- -1, -1, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, 217, 217, 217, 217, 217,
- 217, 217, 217, -1, 217, 217, 217, 217,
- 217, 217, 217, -1, -1, -1, 217, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, 122, 273, 123,
- 282, 278, 124, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 52,
- -1, -1, -1, -1, -1, -1, 28, 29,
- 30, 31, 32, 33, 34, 35, 37, 38,
- 39, 40, 41, 42, 43, 44, 45, -1,
- 53, 47, 51, 50, 48, 46, 49, -1,
- -1, -1, 36, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 216, -1, 218, 219, 220, 221,
- 222, 223, 224, 225, 226, 227, 228, 229,
- 230, 231, 232, 233, 234, 235, 236, 237,
- 238, 239, 240, 241, 242, 243, 189, 125,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 207, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 126, -1, -1, -1, -1, 202,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 195, 197, 198, 199, 127, -1,
- -1, 128, 129, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 52,
- -1, -1, -1, -1, -1, -1, 28, 29,
- 30, 31, 32, 33, 34, 35, 37, 38,
- 39, 40, 41, 42, 43, 44, 45, -1,
- 53, 47, 51, 50, 48, 46, 49, -1,
- -1, -1, 36, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 216, -1, 218, 219, 220, 221,
- 222, 223, 224, 225, 226, 227, 228, 229,
- 230, 231, 232, 233, 234, 235, 236, 237,
- 238, 239, 240, 241, 242, 243, 277, 272,
- 194, 130, 204, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 204, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 131, 209, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 209, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, 132, 215,
- 200, 133, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 134, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, 203, 135, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, 136, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, -1,
- -1, -1, -1, -1, -1, -1, -1, 208,
- 201, 137, 206, 138, 205, 210,
-};
-
-unsigned short Parser_keys[] = {
- 129, 188, 185, 185, 47, 189, 47, 195,
- 47, 207, 47, 196, 129, 129, 47, 47,
- 47, 208, 47, 210, 131, 131, 47, 209,
- 130, 130, 47, 47, 47, 206, 47, 206,
- 47, 206, 47, 204, 47, 211, 180, 180,
- 47, 47, 143, 143, 47, 266, 47, 205,
- 47, 266, 47, 266, 47, 272, 184, 184,
- 145, 145, 47, 47, 47, 47, 47, 47,
- 47, 47, 47, 206, 47, 206, 47, 206,
- 47, 47, 47, 47, 47, 47, 47, 47,
- 47, 47, 47, 206, 47, 47, 47, 206,
- 47, 47, 47, 47, 47, 47, 47, 47,
- 47, 47, 47, 47, 47, 47, 47, 47,
- 47, 47, 47, 206, 47, 267, 153, 153,
- 47, 47, 181, 181, 182, 182, 136, 136,
- 47, 47, 47, 47, 47, 220, 47, 223,
- 47, 237, 47, 270, 150, 274, 47, 266,
- 155, 155, 156, 156, 157, 157, 158, 158,
- 47, 266, 47, 266, 47, 266, 162, 162,
- 163, 163, 164, 164, 165, 165, 47, 266,
- 167, 167, 47, 266, 169, 169, 170, 170,
- 171, 171, 47, 268, 174, 174, 175, 175,
- 176, 176, 177, 177, 178, 178, 179, 179,
- 183, 183, 154, 154, 137, 137, 138, 138,
- 47, 221, 47, 224, 47, 238, 47, 271,
- 47, 274, 47, 47, 148, 148, 159, 159,
- 160, 160, 161, 161, 166, 166, 168, 168,
- 173, 173, 47, 206, 147, 147, 47, 47,
- 132, 132, 47, 225, 139, 139, 47, 206,
- 140, 140, 47, 47, 150, 150, 149, 149,
- 47, 266, 171, 171, 47, 233, 47, 266,
- 142, 142, 148, 148, 133, 133, 47, 47,
- 47, 231, 47, 234, 141, 141, 146, 146,
- 47, 232, 47, 235, 151, 151, 47, 47,
- 134, 134, 47, 47, 152, 152, 135, 135,
- 0, 0
-};
-
-unsigned int Parser_offsets[] = {
- 0, 60, 61, 204, 353, 514, 664, 665,
- 666, 828, 992, 993, 1156, 1157, 1158, 1318,
- 1478, 1638, 1796, 1961, 1962, 1963, 1964, 2184,
- 2343, 2563, 2783, 3009, 3010, 3011, 3012, 3013,
- 3014, 3015, 3175, 3335, 3495, 3496, 3497, 3498,
- 3499, 3500, 3660, 3661, 3821, 3822, 3823, 3824,
- 3825, 3826, 3827, 3828, 3829, 3830, 3990, 4211,
- 4212, 4213, 4214, 4215, 4216, 4217, 4218, 4392,
- 4569, 4760, 4984, 5109, 5329, 5330, 5331, 5332,
- 5333, 5553, 5773, 5993, 5994, 5995, 5996, 5997,
- 6217, 6218, 6438, 6439, 6440, 6441, 6663, 6664,
- 6665, 6666, 6667, 6668, 6669, 6670, 6671, 6672,
- 6673, 6848, 7026, 7218, 7443, 7671, 7672, 7673,
- 7674, 7675, 7676, 7677, 7678, 7679, 7839, 7840,
- 7841, 7842, 8021, 8022, 8182, 8183, 8184, 8185,
- 8186, 8406, 8407, 8594, 8814, 8815, 8816, 8817,
- 8818, 9003, 9191, 9192, 9193, 9379, 9568, 9569,
- 9570, 9571, 9572, 9573, 9574
-};
-
-unsigned short Parser_targs[] = {
- 1, 2, 3, 4, 5, 6, 7, 8,
- 9, 10, 11, 12, 13, 14, 15, 16,
- 17, 18, 19, 20, 21, 22, 23, 24,
- 25, 26, 27, 28, 29, 30, 31, 32,
- 33, 34, 35, 36, 37, 38, 39, 40,
- 41, 42, 43, 44, 45, 46, 47, 48,
- 49, 50, 51, 52, 53, 54, 55, 56,
- 57, 58, 59, 60, 61, 62, 63, 64,
- 65, 66, 67, 68, 69, 70, 71, 72,
- 73, 74, 75, 76, 77, 78, 79, 80,
- 81, 82, 83, 84, 85, 86, 87, 88,
- 89, 90, 91, 92, 93, 94, 95, 96,
- 97, 98, 99, 100, 101, 102, 103, 104,
- 105, 106, 107, 108, 109, 110, 111, 112,
- 113, 114, 115, 116, 117, 118, 119, 120,
- 121, 122, 123, 124, 125, 126, 127, 128,
- 129, 130, 131, 132, 133, 134, 135, 136,
- 137, 138, 139, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140, 140, 140, 140, 140,
- 140, 140, 140, 140
-};
-
-unsigned int Parser_actInds[] = {
- 0, 2, 4, 6, 8, 10, 12, 14,
- 16, 18, 20, 22, 24, 26, 28, 30,
- 32, 34, 36, 38, 40, 42, 44, 46,
- 48, 50, 52, 54, 56, 58, 60, 62,
- 64, 66, 68, 70, 72, 74, 76, 78,
- 80, 82, 84, 86, 88, 90, 92, 94,
- 96, 98, 100, 102, 104, 106, 108, 110,
- 112, 114, 116, 118, 120, 122, 124, 126,
- 128, 130, 132, 134, 136, 138, 140, 142,
- 144, 146, 148, 150, 152, 154, 156, 158,
- 160, 162, 164, 166, 168, 170, 172, 174,
- 176, 178, 180, 182, 184, 186, 188, 190,
- 192, 194, 196, 198, 200, 202, 204, 206,
- 208, 210, 212, 214, 216, 218, 220, 222,
- 224, 226, 228, 230, 232, 234, 236, 238,
- 240, 242, 244, 246, 248, 250, 252, 254,
- 256, 258, 260, 262, 264, 266, 268, 270,
- 272, 274, 276, 278, 280, 282, 284, 286,
- 288, 290, 292, 294, 296, 298, 300, 302,
- 304, 306, 308, 310, 312, 314, 316, 318,
- 320, 322, 324, 326, 328, 330, 332, 334,
- 336, 338, 340, 342, 344, 346, 348, 350,
- 352, 354, 356, 358, 360, 362, 364, 366,
- 368, 370, 372, 374, 376, 378, 380, 382,
- 384, 386, 388, 390, 392, 394, 396, 398,
- 400, 402, 404, 406, 408, 410, 412, 414,
- 416, 418, 420, 422, 424, 426, 428, 430,
- 432, 434, 436, 438, 440, 442, 444, 446,
- 448, 450, 452, 454, 456, 458, 460, 462,
- 464, 466, 468, 470, 472, 474, 476, 478,
- 480, 482, 484, 486, 488, 490, 492, 494,
- 496, 498, 500, 502, 504, 506, 508, 510,
- 512, 514, 516, 518, 520, 522, 524, 526,
- 528, 530, 532, 534, 536, 538, 540, 542,
- 544, 546, 548, 550, 552, 554, 556, 558,
- 560, 562, 564, 566
-};
-
-unsigned int Parser_actions[] = {
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 1, 0,
- 1, 0, 1, 0, 1, 0, 3, 0,
- 6, 0, 11, 0, 15, 0, 19, 0,
- 22, 0, 27, 0, 30, 0, 35, 0,
- 39, 0, 43, 0, 47, 0, 51, 0,
- 55, 0, 58, 0, 63, 0, 67, 0,
- 71, 0, 75, 0, 79, 0, 83, 0,
- 87, 0, 91, 0, 94, 0, 99, 0,
- 103, 0, 107, 0, 111, 0, 115, 0,
- 119, 0, 123, 0, 127, 0, 130, 0,
- 135, 0, 139, 0, 143, 0, 147, 0,
- 150, 0, 155, 0, 159, 0, 163, 0,
- 167, 0, 171, 0, 175, 0, 179, 0,
- 183, 0, 187, 0, 191, 0, 195, 0,
- 198, 0, 203, 0, 207, 0, 211, 0,
- 215, 0, 218, 0, 223, 0, 227, 0,
- 230, 0, 235, 0, 239, 0, 243, 0,
- 247, 0, 251, 0, 255, 0, 259, 0,
- 262, 0, 267, 0, 271, 0, 275, 0,
- 279, 0, 282, 0, 287, 0, 291, 0,
- 295, 0, 299, 0, 302, 0, 307, 0,
- 311, 0, 314, 0, 319, 0, 323, 0,
- 327, 0, 331, 0, 335, 0, 339, 0,
- 343, 0, 347, 0, 351, 0, 355, 0,
- 359, 0, 363, 0, 367, 0, 371, 0,
- 375, 0, 379, 0, 383, 0, 387, 0,
- 391, 0, 395, 0, 399, 0, 403, 0,
- 407, 0, 411, 0, 415, 0, 419, 0,
- 423, 0, 427, 0, 431, 0, 435, 0,
- 439, 0, 443, 0, 447, 0, 451, 0,
- 455, 0, 459, 0, 463, 0, 467, 0,
- 471, 0, 475, 0, 479, 0, 483, 0,
- 487, 0, 491, 0, 495, 0, 499, 0,
- 503, 0, 507, 0, 511, 0, 515, 0,
- 519, 0, 523, 0, 527, 0, 530, 0,
- 535, 0, 539, 0, 543, 0, 547, 0,
- 550, 0, 555, 0, 559, 0, 563, 0,
- 567, 0, 571, 0, 575, 0, 1, 0
-};
-
-int Parser_commitLen[] = {
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 0, 0, 0, 0, 0,
- 0, 0, 0, 2
-};
-
-unsigned int Parser_fssProdIdIndex[] = {
- 0, 1, 2, 3, 4, 5, 6, 7,
- 8, 9, 10, 11, 12, 13, 14, 15,
- 16, 17, 18, 19, 20, 21, 22, 23,
- 24, 25, 26, 27, 28, 29, 30, 31,
- 32, 33, 34, 35, 36, 37, 38, 39,
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63,
- 64, 65, 66, 67, 68, 69, 70, 71,
- 72, 73, 74, 75, 76, 77, 78, 79,
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127,
- 128, 129, 130, 131, 132, 133, 134, 135,
- 136, 137, 138, 139, 140, 141, 142, 143,
- 144
-};
-
-char Parser_fssProdLengths[] = {
- 1, 0, 5, 1, 2, 0, 2, 0,
- 1, 1, 3, 4, 1, 2, 0, 1,
- 1, 1, 1, 1, 1, 4, 2, 0,
- 3, 3, 4, 4, 4, 4, 1, 2,
- 0, 3, 4, 1, 2, 0, 1, 1,
- 1, 1, 1, 1, 1, 3, 3, 4,
- 2, 0, 3, 4, 1, 2, 0, 4,
- 2, 0, 1, 1, 1, 3, 4, 1,
- 2, 0, 3, 4, 1, 2, 0, 3,
- 4, 1, 2, 0, 4, 2, 0, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 1, 1, 1, 1, 1, 1, 1,
- 1, 3, 3, 3, 3, 4, 4, 4,
- 3, 3, 3, 3, 3, 4, 3, 4,
- 3, 3, 3, 3, 3, 3, 3, 3,
- 3, 4, 4, 2, 0, 4, 4, 1,
- 2, 0, 3, 4, 1, 2, 1, 3,
- 1
-};
-
-unsigned short Parser_prodLhsIds[] = {
- 187, 187, 186, 188, 189, 189, 190, 190,
- 192, 192, 193, 191, 195, 196, 196, 197,
- 197, 197, 197, 197, 197, 202, 204, 204,
- 205, 198, 199, 200, 201, 194, 207, 208,
- 208, 209, 203, 210, 211, 211, 212, 212,
- 212, 212, 212, 212, 212, 213, 214, 215,
- 220, 220, 221, 216, 222, 223, 223, 224,
- 225, 225, 226, 226, 226, 227, 228, 230,
- 231, 231, 232, 229, 233, 234, 234, 235,
- 217, 236, 237, 237, 238, 206, 206, 239,
- 239, 239, 239, 239, 239, 239, 239, 239,
- 239, 239, 239, 239, 239, 239, 239, 239,
- 239, 239, 239, 239, 239, 239, 239, 239,
- 239, 241, 242, 243, 244, 245, 246, 247,
- 248, 249, 250, 251, 252, 253, 254, 255,
- 256, 257, 258, 259, 260, 261, 262, 263,
- 264, 265, 266, 267, 267, 268, 218, 269,
- 270, 270, 271, 219, 272, 273, 273, 274,
- 275
-};
-
-const char *Parser_prodNames[] = {
- "start-1",
- "start-2",
- "tag_ragel-1",
- "tag_ragel_head-1",
- "ragel_def_list-1",
- "ragel_def_list-2",
- "host_or_write_list-1",
- "host_or_write_list-2",
- "host_or_write-1",
- "host_or_write-2",
- "tag_host-1",
- "ragel_def-1",
- "tag_ragel_def_head-1",
- "ragel_def_item_list-1",
- "ragel_def_item_list-2",
- "ragel_def_item-1",
- "ragel_def_item-2",
- "ragel_def_item-3",
- "ragel_def_item-4",
- "ragel_def_item-5",
- "ragel_def_item-6",
- "tag_export_list-1",
- "export_list-1",
- "export_list-2",
- "tag_export-1",
- "tag_alph_type-1",
- "tag_getkey_expr-1",
- "tag_access_expr-1",
- "tag_curstate_expr-1",
- "tag_write-1",
- "tag_write_head-1",
- "write_option_list-1",
- "write_option_list-2",
- "tag_arg-1",
- "tag_machine-1",
- "tag_machine_head-1",
- "machine_item_list-1",
- "machine_item_list-2",
- "machine_item-1",
- "machine_item-2",
- "machine_item-3",
- "machine_item-4",
- "machine_item-5",
- "machine_item-6",
- "machine_item-7",
- "tag_start_state-1",
- "tag_error_state-1",
- "tag_entry_points-1",
- "entry_point_list-1",
- "entry_point_list-2",
- "tag_entry-1",
- "tag_state_list-1",
- "tag_state_list_head-1",
- "state_list-1",
- "state_list-2",
- "tag_state-1",
- "state_item_list-1",
- "state_item_list-2",
- "state_item-1",
- "state_item-2",
- "state_item-3",
- "tag_state_actions-1",
- "tag_state_cond_list-1",
- "tag_state_cond_list_head-1",
- "state_cond_list-1",
- "state_cond_list-2",
- "state_cond-1",
- "tag_trans_list-1",
- "tag_trans_list_head-1",
- "trans_list-1",
- "trans_list-2",
- "tag_trans-1",
- "tag_action_list-1",
- "tag_action_list_head-1",
- "action_list-1",
- "action_list-2",
- "tag_action-1",
- "inline_list-1",
- "inline_list-2",
- "inline_item-1",
- "inline_item-2",
- "inline_item-3",
- "inline_item-4",
- "inline_item-5",
- "inline_item-6",
- "inline_item-7",
- "inline_item-8",
- "inline_item-9",
- "inline_item-10",
- "inline_item-11",
- "inline_item-12",
- "inline_item-13",
- "inline_item-14",
- "inline_item-15",
- "inline_item-16",
- "inline_item-17",
- "inline_item-18",
- "inline_item-19",
- "inline_item-20",
- "inline_item-21",
- "inline_item-22",
- "inline_item-23",
- "inline_item-24",
- "inline_item-25",
- "inline_item-26",
- "tag_text-1",
- "tag_goto-1",
- "tag_call-1",
- "tag_next-1",
- "tag_goto_expr-1",
- "tag_call_expr-1",
- "tag_next_expr-1",
- "tag_ret-1",
- "tag_break-1",
- "tag_pchar-1",
- "tag_char-1",
- "tag_hold-1",
- "tag_exec-1",
- "tag_holdte-1",
- "tag_execte-1",
- "tag_curs-1",
- "tag_targs-1",
- "tag_il_entry-1",
- "tag_init_tokstart-1",
- "tag_init_act-1",
- "tag_get_tokend-1",
- "tag_set_tokstart-1",
- "tag_set_tokend-1",
- "tag_set_act-1",
- "tag_sub_action-1",
- "tag_lm_switch-1",
- "lm_action_list-1",
- "lm_action_list-2",
- "tag_inline_action-1",
- "tag_action_table_list-1",
- "tag_action_table_list_head-1",
- "action_table_list-1",
- "action_table_list-2",
- "tag_action_table-1",
- "tag_cond_space_list-1",
- "tag_cond_space_list_head-1",
- "cond_space_list-1",
- "cond_space_list-2",
- "tag_cond_space-1",
- "_start-1"
-};
-
-const char *Parser_lelNames[] = {
- "D-0",
- "D-1",
- "D-2",
- "D-3",
- "D-4",
- "D-5",
- "D-6",
- "D-7",
- "D-8",
- "D-9",
- "D-10",
- "D-11",
- "D-12",
- "D-13",
- "D-14",
- "D-15",
- "D-16",
- "D-17",
- "D-18",
- "D-19",
- "D-20",
- "D-21",
- "D-22",
- "D-23",
- "D-24",
- "D-25",
- "D-26",
- "D-27",
- "D-28",
- "D-29",
- "D-30",
- "D-31",
- "D-32",
- "!",
- "\"",
- "#",
- "$",
- "%",
- "&",
- "'",
- "(",
- ")",
- "*",
- "+",
- ",",
- "-",
- ".",
- "/",
- "0",
- "1",
- "2",
- "3",
- "4",
- "5",
- "6",
- "7",
- "8",
- "9",
- ":",
- ";",
- "<",
- "=",
- ">",
- "?",
- "@",
- "A",
- "B",
- "C",
- "D",
- "E",
- "F",
- "G",
- "H",
- "I",
- "J",
- "K",
- "L",
- "M",
- "N",
- "O",
- "P",
- "Q",
- "R",
- "S",
- "T",
- "U",
- "V",
- "W",
- "X",
- "Y",
- "Z",
- "[",
- "\\",
- "]",
- "^",
- "_",
- "`",
- "a",
- "b",
- "c",
- "d",
- "e",
- "f",
- "g",
- "h",
- "i",
- "j",
- "k",
- "l",
- "m",
- "n",
- "o",
- "p",
- "q",
- "r",
- "s",
- "t",
- "u",
- "v",
- "w",
- "x",
- "y",
- "z",
- "{",
- "|",
- "}",
- "~",
- "D-127",
- "TAG_unknown",
- "TAG_ragel",
- "TAG_ragel_def",
- "TAG_host",
- "TAG_state_list",
- "TAG_state",
- "TAG_trans_list",
- "TAG_t",
- "TAG_machine",
- "TAG_start_state",
- "TAG_error_state",
- "TAG_action_list",
- "TAG_action_table_list",
- "TAG_action",
- "TAG_action_table",
- "TAG_alphtype",
- "TAG_element",
- "TAG_getkey",
- "TAG_state_actions",
- "TAG_entry_points",
- "TAG_sub_action",
- "TAG_cond_space_list",
- "TAG_cond_space",
- "TAG_cond_list",
- "TAG_c",
- "TAG_exports",
- "TAG_ex",
- "TAG_text",
- "TAG_goto",
- "TAG_call",
- "TAG_next",
- "TAG_goto_expr",
- "TAG_call_expr",
- "TAG_next_expr",
- "TAG_ret",
- "TAG_pchar",
- "TAG_char",
- "TAG_hold",
- "TAG_exec",
- "TAG_holdte",
- "TAG_execte",
- "TAG_curs",
- "TAG_targs",
- "TAG_entry",
- "TAG_data",
- "TAG_lm_switch",
- "TAG_init_act",
- "TAG_set_act",
- "TAG_set_tokend",
- "TAG_get_tokend",
- "TAG_init_tokstart",
- "TAG_set_tokstart",
- "TAG_write",
- "TAG_curstate",
- "TAG_access",
- "TAG_break",
- "TAG_arg",
- "_eof",
- "tag_ragel",
- "start",
- "tag_ragel_head",
- "ragel_def_list",
- "host_or_write_list",
- "ragel_def",
- "host_or_write",
- "tag_host",
- "tag_write",
- "tag_ragel_def_head",
- "ragel_def_item_list",
- "ragel_def_item",
- "tag_alph_type",
- "tag_getkey_expr",
- "tag_access_expr",
- "tag_curstate_expr",
- "tag_export_list",
- "tag_machine",
- "export_list",
- "tag_export",
- "inline_list",
- "tag_write_head",
- "write_option_list",
- "tag_arg",
- "tag_machine_head",
- "machine_item_list",
- "machine_item",
- "tag_start_state",
- "tag_error_state",
- "tag_entry_points",
- "tag_state_list",
- "tag_action_list",
- "tag_action_table_list",
- "tag_cond_space_list",
- "entry_point_list",
- "tag_entry",
- "tag_state_list_head",
- "state_list",
- "tag_state",
- "state_item_list",
- "state_item",
- "tag_state_actions",
- "tag_state_cond_list",
- "tag_trans_list",
- "tag_state_cond_list_head",
- "state_cond_list",
- "state_cond",
- "tag_trans_list_head",
- "trans_list",
- "tag_trans",
- "tag_action_list_head",
- "action_list",
- "tag_action",
- "inline_item",
- "inline_item_type",
- "tag_text",
- "tag_goto",
- "tag_call",
- "tag_next",
- "tag_goto_expr",
- "tag_call_expr",
- "tag_next_expr",
- "tag_ret",
- "tag_break",
- "tag_pchar",
- "tag_char",
- "tag_hold",
- "tag_exec",
- "tag_holdte",
- "tag_execte",
- "tag_curs",
- "tag_targs",
- "tag_il_entry",
- "tag_init_tokstart",
- "tag_init_act",
- "tag_get_tokend",
- "tag_set_tokstart",
- "tag_set_tokend",
- "tag_set_act",
- "tag_sub_action",
- "tag_lm_switch",
- "lm_action_list",
- "tag_inline_action",
- "tag_action_table_list_head",
- "action_table_list",
- "tag_action_table",
- "tag_cond_space_list_head",
- "cond_space_list",
- "tag_cond_space",
- "_start"
-};
-
-#line 851 "xmlparse.kl"
-
-
-void Parser::init()
-{
- #line 2079 "xmlparse.cpp"
- curs = Parser_startState;
- pool = 0;
- freshEl = (struct Parser_LangEl*) malloc( sizeof(struct Parser_LangEl)*8128);
- #ifdef LOG_ACTIONS
- cerr << "allocating 8128 LangEls" << endl;
- #endif
- stackTop = freshEl;
- stackTop->type = 0;
- stackTop->state = -1;
- stackTop->next = 0;
- stackTop->child = 0;
- freshPos = 1;
- lastFinal = stackTop;
- numRetry = 0;
- numNodes = 0;
- errCount = 0;
-#line 856 "xmlparse.kl"
-}
-
-int Parser::parseLangEl( int type, const Token *token )
-{
- #line 2101 "xmlparse.cpp"
-#define reject() induceReject = 1
-
- int pos, targState;
- unsigned int *action;
- int rhsLen;
- struct Parser_LangEl *rhs[32];
- struct Parser_LangEl *lel;
- struct Parser_LangEl *input;
- char induceReject;
-
- if ( curs < 0 )
- return 0;
-
- if ( pool == 0 ) {
- if ( freshPos == 8128 ) {
- freshEl = (struct Parser_LangEl*) malloc(
- sizeof(struct Parser_LangEl)*8128);
- #ifdef LOG_ACTIONS
- cerr << "allocating 8128 LangEls" << endl;
- #endif
- freshPos = 0;
- }
- input = freshEl + freshPos++;
- }
- else {
- input = pool;
- pool = pool->next;
- }
- numNodes += 1;
- input->type = type;
- input->user.token = *token;
- input->next = 0;
- input->retry = 0;
- input->child = 0;
-
-again:
- if ( input == 0 )
- goto _out;
-
- lel = input;
- if ( lel->type < Parser_keys[curs<<1] || lel->type > Parser_keys[(curs<<1)+1] )
- goto parseError;
-
- pos = Parser_indicies[Parser_offsets[curs] + (lel->type - Parser_keys[curs<<1])];
- if ( pos < 0 )
- goto parseError;
-
- induceReject = 0;
- targState = Parser_targs[pos];
- action = Parser_actions + Parser_actInds[pos];
- if ( lel->retry & 0x0000ffff )
- action += (lel->retry & 0x0000ffff);
-
- if ( *action & 0x1 ) {
- #ifdef LOG_ACTIONS
- cerr << "shifted: " << Parser_lelNames[lel->type];
- #endif
- input = input->next;
- lel->state = curs;
- lel->next = stackTop;
- stackTop = lel;
-
- if ( action[1] == 0 )
- lel->retry &= 0xffff0000;
- else {
- lel->retry += 1;
- numRetry += 1;
- #ifdef LOG_ACTIONS
- cerr << " retry: " << stackTop;
- #endif
- }
- #ifdef LOG_ACTIONS
- cerr << endl;
- #endif
- }
-
- if ( Parser_commitLen[pos] != 0 ) {
- struct Parser_LangEl *commitHead = stackTop;
- int absCommitLen = Parser_commitLen[pos];
-
- #ifdef LOG_ACTIONS
- cerr << "running commit of length: " << Parser_commitLen[pos] << endl;
- #endif
-
- if ( absCommitLen < 0 ) {
- commitHead = commitHead->next;
- absCommitLen = -1 * absCommitLen;
- }
- {
- struct Parser_LangEl *lel = commitHead;
- struct Parser_LangEl **cmStack = (struct Parser_LangEl**) malloc( sizeof(struct Parser_LangEl) * numNodes);
- int n = absCommitLen, depth = 0, sp = 0;
-
-commit_head:
- if ( lel->retry > 0 ) {
- if ( lel->retry & 0x0000ffff )
- numRetry -= 1;
- if ( lel->retry & 0xffff0000 )
- numRetry -= 1;
- lel->retry = 0;
- }
-
- /* If depth is > 0 then move over lel freely, otherwise, make
- * sure that we have not already done n steps down the line. */
- if ( lel->next != 0 && ( depth > 0 || n > 1 ) ) {
- cmStack[sp++] = lel;
- lel = lel->next;
-
- /* If we are at the top level count the steps down the line. */
- if ( depth == 0 )
- n -= 1;
- goto commit_head;
- }
-
-commit_reverse:
- if ( lel->child != 0 ) {
- cmStack[sp++] = lel;
- lel = lel->child;
-
- /* When we move down we need to increment the depth. */
- depth += 1;
- goto commit_head;
- }
-
-commit_upwards:
- if ( sp > 0 ) {
- /* Figure out which place to return to. */
- if ( cmStack[sp-1]->next == lel ) {
- lel = cmStack[--sp];
- goto commit_reverse;
- }
- else {
- /* Going back up, adjust the depth. */
- lel = cmStack[--sp];
- depth -= 1;
- goto commit_upwards;
- }
- }
- free( cmStack );
- }
- if ( numRetry == 0 ) {
- #ifdef LOG_ACTIONS
- cerr << "number of retries is zero, "
- "executing final actions" << endl;
- #endif
- {
- struct Parser_LangEl *lel = commitHead;
- struct Parser_LangEl **cmStack = (struct Parser_LangEl**) malloc( sizeof( struct Parser_LangEl) * numNodes);
- int sp = 0;
- char doExec = 0;
-
-final_head:
- if ( lel == lastFinal ) {
- doExec = 1;
- goto hit_final;
- }
-
- if ( lel->next != 0 ) {
- cmStack[sp++] = lel;
- lel = lel->next;
- goto final_head;
- }
-
-final_reverse:
-
- if ( lel->child != 0 ) {
- cmStack[sp++] = lel;
- lel = lel->child;
- goto final_head;
- }
-
-final_upwards:
-
- if ( doExec ) {
-{
- if ( lel->type < 186 ) {
- }
- else {
- struct Parser_LangEl *redLel = lel;
- if ( redLel->child != 0 ) {
- int r = Parser_fssProdLengths[redLel->reduction] - 1;
- struct Parser_LangEl *rhsEl = redLel->child;
- while ( rhsEl != 0 ) {
- rhs[r--] = rhsEl;
- rhsEl = rhsEl->next;
- }
- }
-switch ( lel->reduction ) {
-case 1: {
-#line 46 "xmlparse.kl"
-
- /* If we get no input the assumption is that the frontend died and
- * emitted an error. */
- errCount += 1;
-
-
-#line 2297 "xmlparse.cpp"
-} break;
-case 3: {
-#line 55 "xmlparse.kl"
-
- Attribute *fileNameAttr = (&rhs[0]->user.token)->tag->findAttr( "filename" );
- if ( fileNameAttr == 0 ) {
- error((&rhs[0]->user.token)->loc) << "tag <ragel> requires a filename attribute" << endl;
- exit(1);
- }
- else {
- sourceFileName = fileNameAttr->value;
-
- Attribute *langAttr = (&rhs[0]->user.token)->tag->findAttr( "lang" );
- if ( langAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <ragel> requires a lang attribute" << endl;
- else {
- if ( strcmp( langAttr->value, "C" ) == 0 ) {
- hostLangType = CCode;
- hostLang = &hostLangC;
- }
- else if ( strcmp( langAttr->value, "D" ) == 0 ) {
- hostLangType = DCode;
- hostLang = &hostLangD;
- }
- else if ( strcmp( langAttr->value, "Java" ) == 0 ) {
- hostLangType = JavaCode;
- hostLang = &hostLangJava;
- }
- else if ( strcmp( langAttr->value, "Ruby" ) == 0 ) {
- hostLangType = RubyCode;
- hostLang = &hostLangRuby;
- }
- else {
- error((&rhs[0]->user.token)->loc) << "expecting lang attribute to be "
- "one of C, D, Java or Ruby" << endl;
- }
-
- outStream = openOutput( sourceFileName );
- }
- }
-
-
-#line 2340 "xmlparse.cpp"
-} break;
-case 10: {
-#line 105 "xmlparse.kl"
-
- Attribute *lineAttr = (&rhs[0]->user.token)->tag->findAttr( "line" );
- if ( lineAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <host> requires a line attribute" << endl;
- else {
- int line = atoi( lineAttr->value );
- if ( outputActive )
- lineDirective( *outStream, sourceFileName, line );
- }
-
- if ( outputActive )
- *outStream << (&rhs[2]->user.token)->tag->content;
-
-
-#line 2358 "xmlparse.cpp"
-} break;
-case 11: {
-#line 121 "xmlparse.kl"
-
- /* Do this before distributing transitions out to singles and defaults
- * makes life easier. */
- cgd->redFsm->maxKey = cgd->findMaxKey();
-
- cgd->redFsm->assignActionLocs();
-
- /* Find the first final state (The final state with the lowest id). */
- cgd->redFsm->findFirstFinState();
-
- /* Call the user's callback. */
- cgd->finishRagelDef();
-
-
-#line 2376 "xmlparse.cpp"
-} break;
-case 12: {
-#line 136 "xmlparse.kl"
-
- char *fsmName = 0;
- Attribute *nameAttr = (&rhs[0]->user.token)->tag->findAttr( "name" );
- if ( nameAttr != 0 ) {
- fsmName = nameAttr->value;
-
- CodeGenMapEl *mapEl = codeGenMap.find( fsmName );
- if ( mapEl != 0 )
- cgd = mapEl->value;
- else {
- cgd = makeCodeGen( sourceFileName, fsmName, *outStream, wantComplete );
- codeGenMap.insert( fsmName, cgd );
- }
- }
- else {
- cgd = makeCodeGen( sourceFileName, fsmName,
- *outStream, wantComplete );
- }
-
- ::keyOps = &cgd->thisKeyOps;
-
-
-#line 2402 "xmlparse.cpp"
-} break;
-case 24: {
-#line 174 "xmlparse.kl"
-
- Attribute *nameAttr = (&rhs[0]->user.token)->tag->findAttr( "name" );
- if ( nameAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <ex> requires a name attribute" << endl;
- else {
- char *td = (&rhs[2]->user.token)->tag->content;
- Key exportKey = readKey( td, &td );
- cgd->exportList.append( new Export( nameAttr->value, exportKey ) );
- }
-
-
-#line 2417 "xmlparse.cpp"
-} break;
-case 25: {
-#line 186 "xmlparse.kl"
-
- if ( ! cgd->setAlphType( (&rhs[2]->user.token)->tag->content ) )
- error((&rhs[0]->user.token)->loc) << "tag <alphtype> specifies unknown alphabet type" << endl;
-
-
-#line 2426 "xmlparse.cpp"
-} break;
-case 26: {
-#line 192 "xmlparse.kl"
-
- cgd->getKeyExpr = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 2434 "xmlparse.cpp"
-} break;
-case 27: {
-#line 197 "xmlparse.kl"
-
- cgd->accessExpr = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 2442 "xmlparse.cpp"
-} break;
-case 28: {
-#line 202 "xmlparse.kl"
-
- cgd->curStateExpr = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 2450 "xmlparse.cpp"
-} break;
-case 29: {
-#line 207 "xmlparse.kl"
-
- /* Terminate the options list and call the write statement handler. */
- writeOptions.append(0);
- cgd->writeStatement( (&rhs[0]->user.tag_write_head)->loc, writeOptions.length()-1, writeOptions.data );
-
- /* CodeGenData may have issued an error. */
- errCount += cgd->codeGenErrCount;
-
- /* Clear the options in prep for the next write statement. */
- writeOptions.empty();
-
-
-#line 2466 "xmlparse.cpp"
-} break;
-case 30: {
-#line 225 "xmlparse.kl"
-
- Attribute *nameAttr = (&rhs[0]->user.token)->tag->findAttr( "def_name" );
- Attribute *lineAttr = (&rhs[0]->user.token)->tag->findAttr( "line" );
- Attribute *colAttr = (&rhs[0]->user.token)->tag->findAttr( "col" );
-
- if ( nameAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <write> requires a def_name attribute" << endl;
- if ( lineAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <write> requires a line attribute" << endl;
- if ( colAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <write> requires a col attribute" << endl;
-
- if ( nameAttr != 0 && lineAttr != 0 && colAttr != 0 ) {
- CodeGenMapEl *mapEl = codeGenMap.find( nameAttr->value );
- if ( mapEl == 0 )
- error((&rhs[0]->user.token)->loc) << "internal error: cannot find codeGen" << endl;
- else {
- cgd = mapEl->value;
- ::keyOps = &cgd->thisKeyOps;
- }
-
- (&redLel->user.tag_write_head)->loc.line = atoi(lineAttr->value);
- (&redLel->user.tag_write_head)->loc.col = atoi(colAttr->value);
- }
-
-
-#line 2496 "xmlparse.cpp"
-} break;
-case 33: {
-#line 261 "xmlparse.kl"
-
- writeOptions.append( (&rhs[2]->user.token)->tag->content );
-
-
-#line 2504 "xmlparse.cpp"
-} break;
-case 34: {
-#line 266 "xmlparse.kl"
-
- cgd->closeMachine();
-
-
-#line 2512 "xmlparse.cpp"
-} break;
-case 35: {
-#line 271 "xmlparse.kl"
-
- cgd->createMachine();
-
-
-#line 2520 "xmlparse.cpp"
-} break;
-case 45: {
-#line 291 "xmlparse.kl"
-
- unsigned long startState = strtoul( (&rhs[2]->user.token)->tag->content, 0, 10 );
- cgd->setStartState( startState );
-
-
-#line 2529 "xmlparse.cpp"
-} break;
-case 46: {
-#line 297 "xmlparse.kl"
-
- unsigned long errorState = strtoul( (&rhs[2]->user.token)->tag->content, 0, 10 );
- cgd->setErrorState( errorState );
-
-
-#line 2538 "xmlparse.cpp"
-} break;
-case 47: {
-#line 303 "xmlparse.kl"
-
- Attribute *errorAttr = (&rhs[0]->user.token)->tag->findAttr( "error" );
- if ( errorAttr != 0 )
- cgd->setForcedErrorState();
-
-
-#line 2548 "xmlparse.cpp"
-} break;
-case 50: {
-#line 313 "xmlparse.kl"
-
- Attribute *nameAttr = (&rhs[0]->user.token)->tag->findAttr( "name" );
- if ( nameAttr == 0 ) {
- error((&rhs[0]->user.token)->loc) << "tag <entry_points>::<entry> "
- "requires a name attribute" << endl;
- }
- else {
- char *data = (&rhs[2]->user.token)->tag->content;
- unsigned long entry = strtoul( data, &data, 10 );
- cgd->addEntryPoint( nameAttr->value, entry );
- }
-
-
-#line 2565 "xmlparse.cpp"
-} break;
-case 52: {
-#line 329 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <state_list> requires a length attribute" << endl;
- else {
- unsigned long length = strtoul( lengthAttr->value, 0, 10 );
- cgd->initStateList( length );
- curState = 0;
- }
-
-
-#line 2580 "xmlparse.cpp"
-} break;
-case 55: {
-#line 344 "xmlparse.kl"
-
- Attribute *idAttr = (&rhs[0]->user.token)->tag->findAttr( "id" );
- if ( idAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <state> requires an id attribute" << endl;
- else {
- int id = atoi( idAttr->value );
- cgd->setId( curState, id );
- }
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "final" );
- if ( lengthAttr != 0 )
- cgd->setFinal( curState );
- curState += 1;
-
-
-#line 2599 "xmlparse.cpp"
-} break;
-case 61: {
-#line 367 "xmlparse.kl"
-
- char *ad = (&rhs[2]->user.token)->tag->content;
-
- long toStateAction = readOffsetPtr( ad, &ad );
- long fromStateAction = readOffsetPtr( ad, &ad );
- long eofAction = readOffsetPtr( ad, &ad );
-
- cgd->setStateActions( curState, toStateAction,
- fromStateAction, eofAction );
-
-
-#line 2614 "xmlparse.cpp"
-} break;
-case 63: {
-#line 381 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <cond_list> requires a length attribute" << endl;
- else {
- ulong length = readLength( lengthAttr->value );
- cgd->initStateCondList( curState, length );
- curStateCond = 0;
- }
-
-
-#line 2629 "xmlparse.cpp"
-} break;
-case 66: {
-#line 396 "xmlparse.kl"
-
- char *td = (&rhs[2]->user.token)->tag->content;
- Key lowKey = readKey( td, &td );
- Key highKey = readKey( td, &td );
- long condId = readOffsetPtr( td, &td );
- cgd->addStateCond( curState, lowKey, highKey, condId );
-
-
-#line 2641 "xmlparse.cpp"
-} break;
-case 67: {
-#line 405 "xmlparse.kl"
-
- cgd->finishTransList( curState );
-
-
-#line 2649 "xmlparse.cpp"
-} break;
-case 68: {
-#line 410 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <trans_list> requires a length attribute" << endl;
- else {
- unsigned long length = strtoul( lengthAttr->value, 0, 10 );
- cgd->initTransList( curState, length );
- curTrans = 0;
- }
-
-
-#line 2664 "xmlparse.cpp"
-} break;
-case 71: {
-#line 425 "xmlparse.kl"
-
- char *td = (&rhs[2]->user.token)->tag->content;
- Key lowKey = readKey( td, &td );
- Key highKey = readKey( td, &td );
- long targ = readOffsetPtr( td, &td );
- long action = readOffsetPtr( td, &td );
-
- cgd->newTrans( curState, curTrans++, lowKey, highKey, targ, action );
-
-
-#line 2678 "xmlparse.cpp"
-} break;
-case 73: {
-#line 442 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <action_list> requires a length attribute" << endl;
- else {
- unsigned long length = strtoul( lengthAttr->value, 0, 10 );
- cgd->initActionList( length );
- curAction = 0;
- }
-
-
-#line 2693 "xmlparse.cpp"
-} break;
-case 76: {
-#line 461 "xmlparse.kl"
-
- Attribute *lineAttr = (&rhs[0]->user.token)->tag->findAttr( "line" );
- Attribute *colAttr = (&rhs[0]->user.token)->tag->findAttr( "col" );
- Attribute *nameAttr = (&rhs[0]->user.token)->tag->findAttr( "name" );
- if ( lineAttr == 0 || colAttr == 0)
- error((&rhs[0]->user.token)->loc) << "tag <action> requires a line and col attributes" << endl;
- else {
- unsigned long line = strtoul( lineAttr->value, 0, 10 );
- unsigned long col = strtoul( colAttr->value, 0, 10 );
-
- char *name = 0;
- if ( nameAttr != 0 )
- name = nameAttr->value;
-
- cgd->newAction( curAction++, name, line, col, (&rhs[1]->user.inline_list)->inlineList );
- }
-
-
-#line 2715 "xmlparse.cpp"
-} break;
-case 77: {
-#line 486 "xmlparse.kl"
-
- /* Append the item to the list, return the list. */
- (&rhs[0]->user.inline_list)->inlineList->append( (&rhs[1]->user.inline_item_type)->inlineItem );
- (&redLel->user.inline_list)->inlineList = (&rhs[0]->user.inline_list)->inlineList;
-
-
-#line 2725 "xmlparse.cpp"
-} break;
-case 78: {
-#line 493 "xmlparse.kl"
-
- /* Start with empty list. */
- (&redLel->user.inline_list)->inlineList = new InlineList;
-
-
-#line 2734 "xmlparse.cpp"
-} break;
-case 79: {
-#line 505 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2740 "xmlparse.cpp"
-} break;
-case 80: {
-#line 506 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2746 "xmlparse.cpp"
-} break;
-case 81: {
-#line 507 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2752 "xmlparse.cpp"
-} break;
-case 82: {
-#line 508 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2758 "xmlparse.cpp"
-} break;
-case 83: {
-#line 509 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2764 "xmlparse.cpp"
-} break;
-case 84: {
-#line 510 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2770 "xmlparse.cpp"
-} break;
-case 85: {
-#line 511 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2776 "xmlparse.cpp"
-} break;
-case 86: {
-#line 512 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2782 "xmlparse.cpp"
-} break;
-case 87: {
-#line 513 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2788 "xmlparse.cpp"
-} break;
-case 88: {
-#line 514 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2794 "xmlparse.cpp"
-} break;
-case 89: {
-#line 515 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2800 "xmlparse.cpp"
-} break;
-case 90: {
-#line 516 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2806 "xmlparse.cpp"
-} break;
-case 91: {
-#line 517 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2812 "xmlparse.cpp"
-} break;
-case 92: {
-#line 518 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2818 "xmlparse.cpp"
-} break;
-case 93: {
-#line 519 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2824 "xmlparse.cpp"
-} break;
-case 94: {
-#line 520 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2830 "xmlparse.cpp"
-} break;
-case 95: {
-#line 521 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2836 "xmlparse.cpp"
-} break;
-case 96: {
-#line 522 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2842 "xmlparse.cpp"
-} break;
-case 97: {
-#line 523 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2848 "xmlparse.cpp"
-} break;
-case 98: {
-#line 524 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2854 "xmlparse.cpp"
-} break;
-case 99: {
-#line 525 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2860 "xmlparse.cpp"
-} break;
-case 100: {
-#line 526 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2866 "xmlparse.cpp"
-} break;
-case 101: {
-#line 527 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2872 "xmlparse.cpp"
-} break;
-case 102: {
-#line 528 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2878 "xmlparse.cpp"
-} break;
-case 103: {
-#line 529 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2884 "xmlparse.cpp"
-} break;
-case 104: {
-#line 530 "xmlparse.kl"
- (&redLel->user.inline_item_type)->inlineItem = (&rhs[0]->user.inline_item_type)->inlineItem;
-
-#line 2890 "xmlparse.cpp"
-} break;
-case 105: {
-#line 560 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Text );
- (&redLel->user.inline_item_type)->inlineItem->data = (&rhs[2]->user.token)->tag->content;
-
-
-#line 2899 "xmlparse.cpp"
-} break;
-case 106: {
-#line 566 "xmlparse.kl"
-
- int targ = strtol( (&rhs[2]->user.token)->tag->content, 0, 10 );
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Goto );
- (&redLel->user.inline_item_type)->inlineItem->targId = targ;
-
-
-#line 2909 "xmlparse.cpp"
-} break;
-case 107: {
-#line 573 "xmlparse.kl"
-
- int targ = strtol( (&rhs[2]->user.token)->tag->content, 0, 10 );
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Call );
- (&redLel->user.inline_item_type)->inlineItem->targId = targ;
-
-
-#line 2919 "xmlparse.cpp"
-} break;
-case 108: {
-#line 580 "xmlparse.kl"
-
- int targ = strtol( (&rhs[2]->user.token)->tag->content, 0, 10 );
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Next );
- (&redLel->user.inline_item_type)->inlineItem->targId = targ;
-
-
-#line 2929 "xmlparse.cpp"
-} break;
-case 109: {
-#line 587 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::GotoExpr );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 2938 "xmlparse.cpp"
-} break;
-case 110: {
-#line 593 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::CallExpr );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 2947 "xmlparse.cpp"
-} break;
-case 111: {
-#line 599 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::NextExpr );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 2956 "xmlparse.cpp"
-} break;
-case 112: {
-#line 605 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Ret );
-
-
-#line 2964 "xmlparse.cpp"
-} break;
-case 113: {
-#line 610 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Break );
-
-
-#line 2972 "xmlparse.cpp"
-} break;
-case 114: {
-#line 615 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::PChar );
-
-
-#line 2980 "xmlparse.cpp"
-} break;
-case 115: {
-#line 620 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Char );
-
-
-#line 2988 "xmlparse.cpp"
-} break;
-case 116: {
-#line 625 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Hold );
-
-
-#line 2996 "xmlparse.cpp"
-} break;
-case 117: {
-#line 630 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Exec );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 3005 "xmlparse.cpp"
-} break;
-case 118: {
-#line 636 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::HoldTE );
-
-
-#line 3013 "xmlparse.cpp"
-} break;
-case 119: {
-#line 641 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::ExecTE );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 3022 "xmlparse.cpp"
-} break;
-case 120: {
-#line 647 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Curs );
-
-
-#line 3030 "xmlparse.cpp"
-} break;
-case 121: {
-#line 652 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Targs );
-
-
-#line 3038 "xmlparse.cpp"
-} break;
-case 122: {
-#line 657 "xmlparse.kl"
-
- int targ = strtol( (&rhs[2]->user.token)->tag->content, 0, 10 );
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::Entry );
- (&redLel->user.inline_item_type)->inlineItem->targId = targ;
-
-
-#line 3048 "xmlparse.cpp"
-} break;
-case 123: {
-#line 664 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmInitTokStart );
-
-
-#line 3056 "xmlparse.cpp"
-} break;
-case 124: {
-#line 669 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmInitAct );
-
-
-#line 3064 "xmlparse.cpp"
-} break;
-case 125: {
-#line 674 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmGetTokEnd );
-
-
-#line 3072 "xmlparse.cpp"
-} break;
-case 126: {
-#line 679 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmSetTokStart );
- cgd->hasLongestMatch = true;
-
-
-#line 3081 "xmlparse.cpp"
-} break;
-case 127: {
-#line 685 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmSetTokEnd );
- (&redLel->user.inline_item_type)->inlineItem->offset = strtol( (&rhs[2]->user.token)->tag->content, 0, 10 );
-
-
-#line 3090 "xmlparse.cpp"
-} break;
-case 128: {
-#line 691 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmSetActId );
- (&redLel->user.inline_item_type)->inlineItem->lmId = strtol( (&rhs[2]->user.token)->tag->content, 0, 10 );
-
-
-#line 3099 "xmlparse.cpp"
-} break;
-case 129: {
-#line 697 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::SubAction );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
-
-#line 3108 "xmlparse.cpp"
-} break;
-case 130: {
-#line 704 "xmlparse.kl"
-
- bool handlesError = false;
- Attribute *handlesErrorAttr = (&rhs[0]->user.token)->tag->findAttr( "handles_error" );
- if ( handlesErrorAttr != 0 )
- handlesError = true;
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::LmSwitch );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.lm_action_list)->inlineList;
- (&redLel->user.inline_item_type)->inlineItem->handlesError = handlesError;
-
-
-#line 3123 "xmlparse.cpp"
-} break;
-case 131: {
-#line 721 "xmlparse.kl"
-
- (&redLel->user.lm_action_list)->inlineList = (&rhs[0]->user.lm_action_list)->inlineList;
- (&redLel->user.lm_action_list)->inlineList->append( (&rhs[1]->user.inline_item_type)->inlineItem );
-
-
-#line 3132 "xmlparse.cpp"
-} break;
-case 132: {
-#line 726 "xmlparse.kl"
-
- (&redLel->user.lm_action_list)->inlineList = new InlineList;
-
-
-#line 3140 "xmlparse.cpp"
-} break;
-case 133: {
-#line 733 "xmlparse.kl"
-
- (&redLel->user.inline_item_type)->inlineItem = new InlineItem( InputLoc(), InlineItem::SubAction );
- (&redLel->user.inline_item_type)->inlineItem->children = (&rhs[1]->user.inline_list)->inlineList;
-
- Attribute *idAttr = (&rhs[0]->user.token)->tag->findAttr( "id" );
- if ( idAttr != 0 ) {
- unsigned long id = strtoul( idAttr->value, 0, 10 );
- (&redLel->user.inline_item_type)->inlineItem->lmId = id;
- }
-
-
-#line 3155 "xmlparse.cpp"
-} break;
-case 135: {
-#line 752 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 ) {
- error((&rhs[0]->user.token)->loc) << "tag <action_table_list> requires "
- "a length attribute" << endl;
- }
- else {
- unsigned long length = strtoul( lengthAttr->value, 0, 10 );
- cgd->initActionTableList( length );
- curActionTable = 0;
- }
-
-
-#line 3172 "xmlparse.cpp"
-} break;
-case 138: {
-#line 769 "xmlparse.kl"
-
- /* Find the length of the action table. */
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <at> requires a length attribute" << endl;
- else {
- unsigned long length = strtoul( lengthAttr->value, 0, 10 );
-
- /* Collect the action table. */
- RedAction *redAct = cgd->allActionTables + curActionTable;
- redAct->actListId = curActionTable;
- redAct->key.setAsNew( length );
- char *ptr = (&rhs[2]->user.token)->tag->content;
- int pos = 0;
- while ( *ptr != 0 ) {
- unsigned long actionId = strtoul( ptr, &ptr, 10 );
- redAct->key[pos].key = 0;
- redAct->key[pos].value = cgd->allActions+actionId;
- pos += 1;
- }
-
- /* Insert into the action table map. */
- cgd->redFsm->actionMap.insert( redAct );
- }
-
- curActionTable += 1;
-
-
-#line 3204 "xmlparse.cpp"
-} break;
-case 140: {
-#line 804 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- if ( lengthAttr == 0 ) {
- error((&rhs[0]->user.token)->loc) << "tag <cond_space_list> "
- "requires a length attribute" << endl;
- }
- else {
- ulong length = readLength( lengthAttr->value );
- cgd->initCondSpaceList( length );
- curCondSpace = 0;
- }
-
-
-#line 3221 "xmlparse.cpp"
-} break;
-case 143: {
-#line 821 "xmlparse.kl"
-
- Attribute *lengthAttr = (&rhs[0]->user.token)->tag->findAttr( "length" );
- Attribute *idAttr = (&rhs[0]->user.token)->tag->findAttr( "id" );
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <cond_space> requires a length attribute" << endl;
- else {
- if ( lengthAttr == 0 )
- error((&rhs[0]->user.token)->loc) << "tag <cond_space> requires an id attribute" << endl;
- else {
- unsigned long condSpaceId = strtoul( idAttr->value, 0, 10 );
- ulong length = readLength( lengthAttr->value );
-
- char *td = (&rhs[2]->user.token)->tag->content;
- Key baseKey = readKey( td, &td );
-
- cgd->newCondSpace( curCondSpace, condSpaceId, baseKey );
- for ( ulong a = 0; a < length; a++ ) {
- long actionOffset = readOffsetPtr( td, &td );
- cgd->condSpaceItem( curCondSpace, actionOffset );
- }
- curCondSpace += 1;
- }
- }
-
-
-#line 3250 "xmlparse.cpp"
-} break;
-}
- }
-}
-
- if ( lel->child != 0 ) {
- struct Parser_LangEl *first = lel->child;
- struct Parser_LangEl *child = lel->child;
- numNodes -= 1;
- lel->child = 0;
- while ( child->next != 0 ) {
- child = child->next;
- numNodes -= 1;
- }
- child->next = pool;
- pool = first;
- }
- }
-
-hit_final:
- if ( sp > 0 ) {
- /* Figure out which place to return to. */
- if ( cmStack[sp-1]->next == lel ) {
- lel = cmStack[--sp];
- goto final_reverse;
- }
- else {
- lel = cmStack[--sp];
- goto final_upwards;
- }
- }
-
- lastFinal = lel;
- free( cmStack );
- }
- }
- }
-
- if ( *action & 0x2 ) {
- int fssRed = *action >> 2;
- int reduction = Parser_fssProdIdIndex[fssRed];
- struct Parser_LangEl *redLel;
- if ( pool == 0 ) {
- if ( freshPos == 8128 ) {
- freshEl = (struct Parser_LangEl*) malloc(
- sizeof(struct Parser_LangEl)*8128);
- #ifdef LOG_ACTIONS
- cerr << "allocating 8128 LangEls" << endl;
- #endif
- freshPos = 0;
- }
- redLel = freshEl + freshPos++;
- }
- else {
- redLel = pool;
- pool = pool->next;
- }
- numNodes += 1;
- redLel->type = Parser_prodLhsIds[reduction];
- redLel->reduction = reduction;
- redLel->child = 0;
- redLel->next = 0;
- redLel->retry = (lel->retry << 16);
- lel->retry &= 0xffff0000;
-
- rhsLen = Parser_fssProdLengths[fssRed];
- if ( rhsLen > 0 ) {
- int r;
- for ( r = rhsLen-1; r > 0; r-- ) {
- rhs[r] = stackTop;
- stackTop = stackTop->next;
- }
- rhs[0] = stackTop;
- stackTop = stackTop->next;
- rhs[0]->next = 0;
- }
- #ifdef LOG_ACTIONS
- cerr << "reduced: "
- << Parser_prodNames[reduction]
- << " rhsLen: " << rhsLen;
- #endif
- if ( action[1] == 0 )
- redLel->retry = 0;
- else {
- redLel->retry += 0x10000;
- numRetry += 1;
- #ifdef LOG_ACTIONS
- cerr << " retry: " << redLel;
- #endif
- }
-
- #ifdef LOG_ACTIONS
- cerr << endl;
- #endif
-
- if ( rhsLen == 0 ) {
- redLel->file = lel->file;
- redLel->line = lel->line;
- targState = curs;
- }
- else {
- redLel->child = rhs[rhsLen-1];
- redLel->file = rhs[0]->file;
- redLel->line = rhs[0]->line;
- targState = rhs[0]->state;
- }
-
- if ( induceReject ) {
- #ifdef LOG_ACTIONS
- cerr << "error induced during reduction of " <<
- Parser_lelNames[redLel->type] << endl;
- #endif
- redLel->state = curs;
- redLel->next = stackTop;
- stackTop = redLel;
- curs = targState;
- goto parseError;
- }
- else {
- redLel->next = input;
- input = redLel;
- }
- }
-
-
- curs = targState;
- goto again;
-
-parseError:
- #ifdef LOG_BACKTRACK
- cerr << "hit error" << endl;
- #endif
- if ( numRetry > 0 ) {
- while ( 1 ) {
- struct Parser_LangEl *redLel = stackTop;
- if ( stackTop->type < 186 ) {
- #ifdef LOG_BACKTRACK
- cerr << "backing up over terminal: " <<
- Parser_lelNames[stackTop->type] << endl;
- #endif
- stackTop = stackTop->next;
- redLel->next = input;
- input = redLel;
- }
- else {
- #ifdef LOG_BACKTRACK
- cerr << "backing up over non-terminal: " <<
- Parser_lelNames[stackTop->type] << endl;
- #endif
- stackTop = stackTop->next;
- struct Parser_LangEl *first = redLel->child;
- if ( first == 0 )
- rhsLen = 0;
- else {
- rhsLen = 1;
- while ( first->next != 0 ) {
- first = first->next;
- rhsLen += 1;
- }
- first->next = stackTop;
- stackTop = redLel->child;
-
- struct Parser_LangEl *rhsEl = stackTop;
- int p = rhsLen;
- while ( p > 0 ) {
- rhs[--p] = rhsEl;
- rhsEl = rhsEl->next;
- }
- }
- redLel->next = pool;
- pool = redLel;
- numNodes -= 1;
- }
-
- if ( redLel->retry > 0 ) {
- #ifdef LOG_BACKTRACK
- cerr << "found retry targ: " << redLel << endl;
- #endif
- numRetry -= 1;
- #ifdef LOG_BACKTRACK
- cerr << "found retry: " << redLel << endl;
- #endif
- if ( redLel->retry & 0x0000ffff )
- curs = input->state;
- else {
- input->retry = redLel->retry >> 16;
- if ( stackTop->state < 0 )
- curs = Parser_startState;
- else {
- curs = Parser_targs[(int)Parser_indicies[Parser_offsets[stackTop->state] + (stackTop->type - Parser_keys[stackTop->state<<1])]];
- }
- }
- goto again;
- }
- }
- }
- curs = -1;
- errCount += 1;
-_out: {}
-#line 861 "xmlparse.kl"
- return errCount == 0 ? 0 : -1;
-}
-
-
-unsigned long readLength( char *td )
-{
- return strtoul( td, 0, 10 );
-}
-
-Key readKey( char *td, char **end )
-{
- if ( keyOps->isSigned )
- return Key( strtol( td, end, 10 ) );
- else
- return Key( strtoul( td, end, 10 ) );
-}
-
-long readOffsetPtr( char *td, char **end )
-{
- while ( *td == ' ' || *td == '\t' )
- td++;
-
- if ( *td == 'x' ) {
- if ( end != 0 )
- *end = td + 1;
- return -1;
- }
-
- return strtol( td, end, 10 );
-}
-
-ostream &Parser::warning( const InputLoc &loc )
-{
- cerr << fileName << ":" << loc.line << ":" << loc.col << ": warning: ";
- return cerr;
-}
-
-ostream &Parser::error( const InputLoc &loc )
-{
- errCount += 1;
- assert( fileName != 0 );
- cerr << fileName << ":" << loc.line << ":" << loc.col << ": ";
- return cerr;
-}
-
-
-ostream &Parser::parser_error( int tokId, Token &token )
-{
- errCount += 1;
- assert( fileName != 0 );
- cerr << fileName << ":" << token.loc.line << ":" << token.loc.col;
- if ( token.tag != 0 ) {
- if ( token.tag->tagId == 0 )
- cerr << ": at unknown tag";
- else
- cerr << ": at tag <" << token.tag->tagId->name << ">";
- }
- cerr << ": ";
-
- return cerr;
-}
-
-int Parser::token( int tokenId, Token &tok )
-{
- int res = parseLangEl( tokenId, &tok );
- if ( res < 0 ) {
- parser_error( tokenId, tok ) << "parse error" << endl;
- exit(1);
- }
- return res;
-}
-
-int Parser::token( int tokenId, int col, int line )
-{
- Token tok;
- tok.loc.col = col;
- tok.loc.line = line;
- tok.tag = 0;
- return token( tokenId, tok );
-}
-
-int Parser::token( XMLTag *tag, int col, int line )
-{
- Token tok;
- tok.loc.col = col;
- tok.loc.line = line;
- tok.tag = tag;
-
- if ( tag->type == XMLTag::Close ) {
- int res = token( '/', tok );
- if ( res < 0 )
- return res;
- }
-
- tok.tag = tag;
- return token( tag->tagId != 0 ? tag->tagId->id : TAG_unknown, tok );
-}
diff --git a/contrib/tools/ragel5/redfsm/xmlparse.h b/contrib/tools/ragel5/redfsm/xmlparse.h
deleted file mode 100644
index b51a7cd67a..0000000000
--- a/contrib/tools/ragel5/redfsm/xmlparse.h
+++ /dev/null
@@ -1,228 +0,0 @@
-/* Automatically generated by Kelbt from "xmlparse.kh".
- *
- * Parts of this file are copied from Kelbt source covered by the GNU
- * GPL. As a special exception, you may use the parts of this file copied
- * from Kelbt source without restriction. The remainder is derived from
- * "xmlparse.kh" and inherits the copyright status of that file.
- */
-
-#line 1 "xmlparse.kh"
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#ifndef _XMLPARSE_H
-#define _XMLPARSE_H
-
-#include "vector.h"
-#include "gendata.h"
-#include <iostream>
-
-using std::ostream;
-
-struct AttrMarker
-{
- char *id;
- int idLen;
- char *value;
- int valueLen;
-};
-
-struct Attribute
-{
- char *id;
- char *value;
-};
-
-typedef Vector<AttrMarker> AttrMkList;
-typedef Vector<Attribute> AttrList;
-struct XMLTagHashPair;
-
-struct XMLTag
-{
- enum TagType { Open, Close };
-
- XMLTag( XMLTagHashPair *tagId, TagType type ) :
- tagId(tagId), type(type),
- content(0), attrList(0) {}
-
- Attribute *findAttr(const char *id )
- {
- if ( attrList != 0 ) {
- for ( AttrList::Iter attr = *attrList; attr.lte(); attr++ ) {
- if ( strcmp( id, attr->id ) == 0 )
- return attr;
- }
- }
- return 0;
- }
-
- XMLTagHashPair *tagId;
- TagType type;
-
- /* Content is associtated with closing tags. */
- char *content;
-
- /* Attribute lists are associated with opening tags. */
- AttrList *attrList;
-};
-
-
-struct XMLTagHashPair
-{
- const char *name;
- int id;
-};
-
-struct Token
-{
- XMLTag *tag;
- InputLoc loc;
-};
-
-struct InlineItem;
-struct InlineList;
-
-struct LmSwitchVect;
-struct LmSwitchAction;
-
-struct Parser
-{
- #line 117 "xmlparse.kh"
-
-
- #line 111 "xmlparse.h"
- struct Parser_LangEl *freshEl;
- int freshPos;
- struct Parser_LangEl *pool;
- int numRetry;
- int numNodes;
- struct Parser_LangEl *stackTop;
- struct Parser_LangEl *lastFinal;
- int errCount;
- int curs;
-#line 120 "xmlparse.kh"
-
- void init();
- int parseLangEl( int type, const Token *token );
-
- Parser(const char *fileName, bool outputActive, bool wantComplete ) :
- fileName(fileName), sourceFileName(0), outStream(0),
- outputActive(outputActive), wantComplete(wantComplete),
- cgd(0) { }
-
- int token( int tokenId, Token &token );
- int token( int tokenId, int col, int line );
- int token( XMLTag *tag, int col, int line );
-
- /* Report an error encountered by the parser. */
- ostream &warning( const InputLoc &loc );
- ostream &error();
- ostream &error( const InputLoc &loc );
- ostream &parser_error( int tokId, Token &token );
-
- /* The name of the root section, this does not change during an include. */
- const char *fileName;
- char *sourceFileName;
- ostream *outStream;
- bool outputActive;
- bool wantComplete;
-
- /* Collected during parsing. */
- char *attrKey;
- char *attrValue;
- int curAction;
- int curActionTable;
- int curTrans;
- int curState;
- int curCondSpace;
- int curStateCond;
-
- CodeGenData *cgd;
- CodeGenMap codeGenMap;
-
- Vector <char*> writeOptions;
-};
-
-#line 164 "xmlparse.h"
-#define TAG_unknown 128
-#define TAG_ragel 129
-#define TAG_ragel_def 130
-#define TAG_host 131
-#define TAG_state_list 132
-#define TAG_state 133
-#define TAG_trans_list 134
-#define TAG_t 135
-#define TAG_machine 136
-#define TAG_start_state 137
-#define TAG_error_state 138
-#define TAG_action_list 139
-#define TAG_action_table_list 140
-#define TAG_action 141
-#define TAG_action_table 142
-#define TAG_alphtype 143
-#define TAG_element 144
-#define TAG_getkey 145
-#define TAG_state_actions 146
-#define TAG_entry_points 147
-#define TAG_sub_action 148
-#define TAG_cond_space_list 149
-#define TAG_cond_space 150
-#define TAG_cond_list 151
-#define TAG_c 152
-#define TAG_exports 153
-#define TAG_ex 154
-#define TAG_text 155
-#define TAG_goto 156
-#define TAG_call 157
-#define TAG_next 158
-#define TAG_goto_expr 159
-#define TAG_call_expr 160
-#define TAG_next_expr 161
-#define TAG_ret 162
-#define TAG_pchar 163
-#define TAG_char 164
-#define TAG_hold 165
-#define TAG_exec 166
-#define TAG_holdte 167
-#define TAG_execte 168
-#define TAG_curs 169
-#define TAG_targs 170
-#define TAG_entry 171
-#define TAG_data 172
-#define TAG_lm_switch 173
-#define TAG_init_act 174
-#define TAG_set_act 175
-#define TAG_set_tokend 176
-#define TAG_get_tokend 177
-#define TAG_init_tokstart 178
-#define TAG_set_tokstart 179
-#define TAG_write 180
-#define TAG_curstate 181
-#define TAG_access 182
-#define TAG_break 183
-#define TAG_arg 184
-#define _eof 185
-
-#line 163 "xmlparse.kh"
-
-int xml_parse( std::istream &input, const char *fileName,
- bool outputActive, bool wantComplete );
-
-#endif /* _XMLPARSE_H */
diff --git a/contrib/tools/ragel5/redfsm/xmlscan.cpp b/contrib/tools/ragel5/redfsm/xmlscan.cpp
deleted file mode 100644
index a3d979a0ff..0000000000
--- a/contrib/tools/ragel5/redfsm/xmlscan.cpp
+++ /dev/null
@@ -1,925 +0,0 @@
-#line 1 "xmlscan.rl"
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-#include <iostream>
-#include <string.h>
-#include "vector.h"
-#include "xmlparse.h"
-#include "buffer.h"
-
-using std::istream;
-using std::cout;
-using std::cerr;
-using std::endl;
-
-#define BUFSIZE 4096
-
-
-#line 37 "xmlscan.cpp"
-static const int Scanner_start = 20;
-
-static const int Scanner_first_final = 20;
-
-static const int Scanner_error = 0;
-
-#line 37 "xmlscan.rl"
-
-#include "phash.h"
-
-struct Scanner
-{
- Scanner(const char *fileName, istream &input ) :
- fileName(fileName),
- input(input),
- curline(1),
- curcol(1),
- p(0), pe(0),
- done(false),
- data(0), data_len(0),
- value(0)
- {
-
-#line 69 "xmlscan.cpp"
- {
- cs = Scanner_start;
- tokstart = 0;
- tokend = 0;
- act = 0;
- }
-#line 63 "xmlscan.rl"
-
- }
-
- int scan();
- void adjustAttrPointers( int distance );
- std::ostream &error();
-
- const char *fileName;
- istream &input;
-
- /* Scanner State. */
- int cs, act, have, curline, curcol;
- char *tokstart, *tokend;
- char *p, *pe;
- int done;
-
- /* Token data */
- char *data;
- int data_len;
- int value;
- AttrMkList attrMkList;
- Buffer buffer;
- char *tag_id_start;
- int tag_id_len;
- int token_col, token_line;
-
- char buf[BUFSIZE];
-};
-
-
-#define TK_NO_TOKEN (-1)
-#define TK_ERR 1
-#define TK_SPACE 2
-#define TK_EOF 3
-#define TK_OpenTag 4
-#define TK_CloseTag 5
-
-#define ret_tok( _tok ) token = (_tok); data = tokstart
-
-void Scanner::adjustAttrPointers( int distance )
-{
- for ( AttrMkList::Iter attr = attrMkList; attr.lte(); attr++ ) {
- attr->id -= distance;
- attr->value -= distance;
- }
-}
-
-/* There is no claim that this is a proper XML parser, but it is good
- * enough for our purposes. */
-#line 178 "xmlscan.rl"
-
-
-int Scanner::scan( )
-{
- int token = TK_NO_TOKEN;
- int space = 0, readlen = 0;
- char *attr_id_start = 0;
- char *attr_value_start = 0;
- int attr_id_len = 0;
- int attr_value_len = 0;
-
- attrMkList.empty();
- buffer.clear();
-
- while ( 1 ) {
- if ( p == pe ) {
- //printf("scanner: need more data\n");
-
- if ( tokstart == 0 )
- have = 0;
- else {
- /* There is data that needs to be shifted over. */
- //printf("scanner: buffer broken mid token\n");
- have = pe - tokstart;
- memmove( buf, tokstart, have );
-
- int distance = tokstart - buf;
- tokend -= distance;
- tag_id_start -= distance;
- attr_id_start -= distance;
- attr_value_start -= distance;
- adjustAttrPointers( distance );
- tokstart = buf;
- }
-
- p = buf + have;
- space = BUFSIZE - have;
-
- if ( space == 0 ) {
- /* We filled up the buffer trying to scan a token. */
- return TK_SPACE;
- }
-
- if ( done ) {
- //printf("scanner: end of file\n");
- p[0] = 0;
- readlen = 1;
- }
- else {
- input.read( p, space );
- readlen = input.gcount();
- if ( input.eof() ) {
- //printf("scanner: setting done flag\n");
- done = 1;
- }
- }
-
- pe = p + readlen;
- }
-
-
-#line 188 "xmlscan.cpp"
- {
- if ( p == pe )
- goto _out;
- switch ( cs )
- {
-tr6:
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 168 "xmlscan.rl"
- {tokend = p+1;{ buffer.append( '&' ); }{p = ((tokend))-1;}}
- goto st20;
-tr8:
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 172 "xmlscan.rl"
- {tokend = p+1;{ buffer.append( '>' ); }{p = ((tokend))-1;}}
- goto st20;
-tr10:
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 170 "xmlscan.rl"
- {tokend = p+1;{ buffer.append( '<' ); }{p = ((tokend))-1;}}
- goto st20;
-tr20:
-#line 150 "xmlscan.rl"
- { tag_id_len = p - tag_id_start; }
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 160 "xmlscan.rl"
- {tokend = p+1;{ ret_tok( TK_CloseTag ); {{p = ((tokend))-1;}goto _out20;} }{p = ((tokend))-1;}}
- goto st20;
-tr23:
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 160 "xmlscan.rl"
- {tokend = p+1;{ ret_tok( TK_CloseTag ); {{p = ((tokend))-1;}goto _out20;} }{p = ((tokend))-1;}}
- goto st20;
-tr27:
-#line 150 "xmlscan.rl"
- { tag_id_len = p - tag_id_start; }
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 157 "xmlscan.rl"
- {tokend = p+1;{ ret_tok( TK_OpenTag ); {{p = ((tokend))-1;}goto _out20;} }{p = ((tokend))-1;}}
- goto st20;
-tr30:
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 157 "xmlscan.rl"
- {tokend = p+1;{ ret_tok( TK_OpenTag ); {{p = ((tokend))-1;}goto _out20;} }{p = ((tokend))-1;}}
- goto st20;
-tr46:
-#line 132 "xmlscan.rl"
- {
- attr_value_len = p - attr_value_start;
-
- AttrMarker newAttr;
- newAttr.id = attr_id_start;
- newAttr.idLen = attr_id_len;
- newAttr.value = attr_value_start;
- newAttr.valueLen = attr_value_len;
- attrMkList.append( newAttr );
- }
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 157 "xmlscan.rl"
- {tokend = p+1;{ ret_tok( TK_OpenTag ); {{p = ((tokend))-1;}goto _out20;} }{p = ((tokend))-1;}}
- goto st20;
-tr48:
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 164 "xmlscan.rl"
- {tokend = p+1;{ buffer.append( *p ); }{p = ((tokend))-1;}}
- goto st20;
-tr49:
-#line 116 "xmlscan.rl"
- { token_col = curcol; token_line = curline; }
-#line 175 "xmlscan.rl"
- {tokend = p+1;{ ret_tok( TK_EOF ); {{p = ((tokend))-1;}goto _out20;} }{p = ((tokend))-1;}}
- goto st20;
-tr50:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
-#line 164 "xmlscan.rl"
- {tokend = p+1;{ buffer.append( *p ); }{p = ((tokend))-1;}}
- goto st20;
-st20:
-#line 1 "xmlscan.rl"
- {tokstart = 0;}
- if ( ++p == pe )
- goto _out20;
-case 20:
-#line 1 "xmlscan.rl"
- {tokstart = p;}
-#line 285 "xmlscan.cpp"
- switch( (*p) ) {
- case 0: goto tr49;
- case 10: goto tr50;
- case 38: goto tr51;
- case 60: goto tr52;
- }
- goto tr48;
-tr51:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st1;
-st1:
- if ( ++p == pe )
- goto _out1;
-case 1:
-#line 301 "xmlscan.cpp"
- switch( (*p) ) {
- case 97: goto tr0;
- case 103: goto tr2;
- case 108: goto tr3;
- }
- goto st0;
-st0:
- goto _out0;
-tr0:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st2;
-st2:
- if ( ++p == pe )
- goto _out2;
-case 2:
-#line 318 "xmlscan.cpp"
- if ( (*p) == 109 )
- goto tr4;
- goto st0;
-tr4:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st3;
-st3:
- if ( ++p == pe )
- goto _out3;
-case 3:
-#line 330 "xmlscan.cpp"
- if ( (*p) == 112 )
- goto tr5;
- goto st0;
-tr5:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st4;
-st4:
- if ( ++p == pe )
- goto _out4;
-case 4:
-#line 342 "xmlscan.cpp"
- if ( (*p) == 59 )
- goto tr6;
- goto st0;
-tr2:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st5;
-st5:
- if ( ++p == pe )
- goto _out5;
-case 5:
-#line 354 "xmlscan.cpp"
- if ( (*p) == 116 )
- goto tr7;
- goto st0;
-tr7:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st6;
-st6:
- if ( ++p == pe )
- goto _out6;
-case 6:
-#line 366 "xmlscan.cpp"
- if ( (*p) == 59 )
- goto tr8;
- goto st0;
-tr3:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st7;
-st7:
- if ( ++p == pe )
- goto _out7;
-case 7:
-#line 378 "xmlscan.cpp"
- if ( (*p) == 116 )
- goto tr9;
- goto st0;
-tr9:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st8;
-st8:
- if ( ++p == pe )
- goto _out8;
-case 8:
-#line 390 "xmlscan.cpp"
- if ( (*p) == 59 )
- goto tr10;
- goto st0;
-tr11:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st9;
-tr12:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st9;
-tr52:
-#line 116 "xmlscan.rl"
- { token_col = curcol; token_line = curline; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st9;
-st9:
- if ( ++p == pe )
- goto _out9;
-case 9:
-#line 414 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr11;
- case 10: goto tr12;
- case 13: goto tr11;
- case 32: goto tr11;
- case 47: goto tr13;
- case 95: goto tr14;
- }
- if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr14;
- } else if ( (*p) >= 65 )
- goto tr14;
- goto st0;
-tr13:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st10;
-tr15:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st10;
-st10:
- if ( ++p == pe )
- goto _out10;
-case 10:
-#line 443 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr13;
- case 10: goto tr15;
- case 13: goto tr13;
- case 32: goto tr13;
- case 95: goto tr16;
- }
- if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr16;
- } else if ( (*p) >= 65 )
- goto tr16;
- goto st0;
-tr19:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st11;
-tr16:
-#line 149 "xmlscan.rl"
- { tag_id_start = p; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st11;
-st11:
- if ( ++p == pe )
- goto _out11;
-case 11:
-#line 471 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr17;
- case 10: goto tr18;
- case 13: goto tr17;
- case 32: goto tr17;
- case 62: goto tr20;
- case 95: goto tr19;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr19;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr19;
- } else
- goto tr19;
- goto st0;
-tr21:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st12;
-tr22:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st12;
-tr17:
-#line 150 "xmlscan.rl"
- { tag_id_len = p - tag_id_start; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st12;
-tr18:
-#line 150 "xmlscan.rl"
- { tag_id_len = p - tag_id_start; }
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st12;
-st12:
- if ( ++p == pe )
- goto _out12;
-case 12:
-#line 517 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr21;
- case 10: goto tr22;
- case 13: goto tr21;
- case 32: goto tr21;
- case 62: goto tr23;
- }
- goto st0;
-tr26:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st13;
-tr14:
-#line 149 "xmlscan.rl"
- { tag_id_start = p; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st13;
-st13:
- if ( ++p == pe )
- goto _out13;
-case 13:
-#line 540 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr24;
- case 10: goto tr25;
- case 13: goto tr24;
- case 32: goto tr24;
- case 62: goto tr27;
- case 95: goto tr26;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr26;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr26;
- } else
- goto tr26;
- goto st0;
-tr28:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st14;
-tr29:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st14;
-tr24:
-#line 150 "xmlscan.rl"
- { tag_id_len = p - tag_id_start; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st14;
-tr25:
-#line 150 "xmlscan.rl"
- { tag_id_len = p - tag_id_start; }
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st14;
-tr44:
-#line 132 "xmlscan.rl"
- {
- attr_value_len = p - attr_value_start;
-
- AttrMarker newAttr;
- newAttr.id = attr_id_start;
- newAttr.idLen = attr_id_len;
- newAttr.value = attr_value_start;
- newAttr.valueLen = attr_value_len;
- attrMkList.append( newAttr );
- }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st14;
-tr45:
-#line 132 "xmlscan.rl"
- {
- attr_value_len = p - attr_value_start;
-
- AttrMarker newAttr;
- newAttr.id = attr_id_start;
- newAttr.idLen = attr_id_len;
- newAttr.value = attr_value_start;
- newAttr.valueLen = attr_value_len;
- attrMkList.append( newAttr );
- }
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st14;
-st14:
- if ( ++p == pe )
- goto _out14;
-case 14:
-#line 618 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr28;
- case 10: goto tr29;
- case 13: goto tr28;
- case 32: goto tr28;
- case 62: goto tr30;
- case 95: goto tr31;
- }
- if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr31;
- } else if ( (*p) >= 65 )
- goto tr31;
- goto st0;
-tr34:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st15;
-tr31:
-#line 124 "xmlscan.rl"
- { attr_id_start = p; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st15;
-tr47:
-#line 132 "xmlscan.rl"
- {
- attr_value_len = p - attr_value_start;
-
- AttrMarker newAttr;
- newAttr.id = attr_id_start;
- newAttr.idLen = attr_id_len;
- newAttr.value = attr_value_start;
- newAttr.valueLen = attr_value_len;
- attrMkList.append( newAttr );
- }
-#line 124 "xmlscan.rl"
- { attr_id_start = p; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st15;
-st15:
- if ( ++p == pe )
- goto _out15;
-case 15:
-#line 664 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr32;
- case 10: goto tr33;
- case 13: goto tr32;
- case 32: goto tr32;
- case 61: goto tr35;
- case 95: goto tr34;
- }
- if ( (*p) < 65 ) {
- if ( 48 <= (*p) && (*p) <= 57 )
- goto tr34;
- } else if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr34;
- } else
- goto tr34;
- goto st0;
-tr36:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st16;
-tr37:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st16;
-tr32:
-#line 125 "xmlscan.rl"
- { attr_id_len = p - attr_id_start; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st16;
-tr33:
-#line 125 "xmlscan.rl"
- { attr_id_len = p - attr_id_start; }
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st16;
-st16:
- if ( ++p == pe )
- goto _out16;
-case 16:
-#line 710 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr36;
- case 10: goto tr37;
- case 13: goto tr36;
- case 32: goto tr36;
- case 61: goto tr38;
- }
- goto st0;
-tr38:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st17;
-tr39:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st17;
-tr35:
-#line 125 "xmlscan.rl"
- { attr_id_len = p - attr_id_start; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st17;
-st17:
- if ( ++p == pe )
- goto _out17;
-case 17:
-#line 739 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr38;
- case 10: goto tr39;
- case 13: goto tr38;
- case 32: goto tr38;
- case 34: goto tr40;
- }
- goto st0;
-tr41:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st18;
-tr42:
-#line 117 "xmlscan.rl"
- { curcol = 0; curline++; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st18;
-tr40:
-#line 130 "xmlscan.rl"
- { attr_value_start = p; }
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st18;
-st18:
- if ( ++p == pe )
- goto _out18;
-case 18:
-#line 768 "xmlscan.cpp"
- switch( (*p) ) {
- case 10: goto tr42;
- case 34: goto tr43;
- }
- goto tr41;
-tr43:
-#line 115 "xmlscan.rl"
- { curcol++; }
- goto st19;
-st19:
- if ( ++p == pe )
- goto _out19;
-case 19:
-#line 782 "xmlscan.cpp"
- switch( (*p) ) {
- case 9: goto tr44;
- case 10: goto tr45;
- case 13: goto tr44;
- case 32: goto tr44;
- case 62: goto tr46;
- case 95: goto tr47;
- }
- if ( (*p) > 90 ) {
- if ( 97 <= (*p) && (*p) <= 122 )
- goto tr47;
- } else if ( (*p) >= 65 )
- goto tr47;
- goto st0;
- }
- _out20: cs = 20; goto _out;
- _out1: cs = 1; goto _out;
- _out0: cs = 0; goto _out;
- _out2: cs = 2; goto _out;
- _out3: cs = 3; goto _out;
- _out4: cs = 4; goto _out;
- _out5: cs = 5; goto _out;
- _out6: cs = 6; goto _out;
- _out7: cs = 7; goto _out;
- _out8: cs = 8; goto _out;
- _out9: cs = 9; goto _out;
- _out10: cs = 10; goto _out;
- _out11: cs = 11; goto _out;
- _out12: cs = 12; goto _out;
- _out13: cs = 13; goto _out;
- _out14: cs = 14; goto _out;
- _out15: cs = 15; goto _out;
- _out16: cs = 16; goto _out;
- _out17: cs = 17; goto _out;
- _out18: cs = 18; goto _out;
- _out19: cs = 19; goto _out;
-
- _out: {}
- }
-#line 239 "xmlscan.rl"
-
- if ( cs == Scanner_error )
- return TK_ERR;
-
- if ( token != TK_NO_TOKEN ) {
- /* fbreak does not advance p, so we do it manually. */
- p = p + 1;
- data_len = p - data;
- return token;
- }
- }
-}
-
-int xml_parse( std::istream &input, const char *fileName,
- bool outputActive, bool wantComplete )
-{
- Scanner scanner( fileName, input );
- Parser parser( fileName, outputActive, wantComplete );
-
- parser.init();
-
- while ( 1 ) {
- int token = scanner.scan();
- if ( token == TK_NO_TOKEN ) {
- cerr << "xmlscan: interal error: scanner returned NO_TOKEN" << endl;
- exit(1);
- }
- else if ( token == TK_EOF ) {
- parser.token( _eof, scanner.token_col, scanner.token_line );
- break;
- }
- else if ( token == TK_ERR ) {
- scanner.error() << "scanner error" << endl;
- break;
- }
- else if ( token == TK_SPACE ) {
- scanner.error() << "scanner is out of buffer space" << endl;
- break;
- }
- else {
- /* All other tokens are either open or close tags. */
- XMLTagHashPair *tagId = Perfect_Hash::in_word_set(
- scanner.tag_id_start, scanner.tag_id_len );
-
- XMLTag *tag = new XMLTag( tagId, token == TK_OpenTag ?
- XMLTag::Open : XMLTag::Close );
-
- if ( tagId != 0 ) {
- /* Get attributes for open tags. */
- if ( token == TK_OpenTag && scanner.attrMkList.length() > 0 ) {
- tag->attrList = new AttrList;
- for ( AttrMkList::Iter attr = scanner.attrMkList;
- attr.lte(); attr++ )
- {
- Attribute newAttr;
- newAttr.id = new char[attr->idLen+1];
- memcpy( newAttr.id, attr->id, attr->idLen );
- newAttr.id[attr->idLen] = 0;
-
- /* Exclude the surrounding quotes. */
- newAttr.value = new char[attr->valueLen-1];
- memcpy( newAttr.value, attr->value+1, attr->valueLen-2 );
- newAttr.value[attr->valueLen-2] = 0;
-
- tag->attrList->append( newAttr );
- }
- }
-
- /* Get content for closing tags. */
- if ( token == TK_CloseTag ) {
- switch ( tagId->id ) {
- case TAG_host: case TAG_arg:
- case TAG_t: case TAG_alphtype:
- case TAG_text: case TAG_goto:
- case TAG_call: case TAG_next:
- case TAG_entry: case TAG_set_tokend:
- case TAG_set_act: case TAG_start_state:
- case TAG_error_state: case TAG_state_actions:
- case TAG_action_table: case TAG_cond_space:
- case TAG_c: case TAG_ex:
- tag->content = new char[scanner.buffer.length+1];
- memcpy( tag->content, scanner.buffer.data,
- scanner.buffer.length );
- tag->content[scanner.buffer.length] = 0;
- break;
- }
- }
- }
-
- #if 0
- cerr << "parser_driver: " << (tag->type == XMLTag::Open ? "open" : "close") <<
- ": " << (tag->tagId != 0 ? tag->tagId->name : "<unknown>") << endl;
- if ( tag->attrList != 0 ) {
- for ( AttrList::Iter attr = *tag->attrList; attr.lte(); attr++ )
- cerr << " " << attr->id << ": " << attr->value << endl;
- }
- if ( tag->content != 0 )
- cerr << " content: " << tag->content << endl;
- #endif
-
- parser.token( tag, scanner.token_col, scanner.token_line );
- }
- }
-
- return 0;
-}
-
-std::ostream &Scanner::error()
-{
- cerr << fileName << ":" << curline << ":" << curcol << ": ";
- return cerr;
-}
diff --git a/contrib/tools/ragel5/redfsm/xmltags.cpp b/contrib/tools/ragel5/redfsm/xmltags.cpp
deleted file mode 100644
index 5fbfabab1d..0000000000
--- a/contrib/tools/ragel5/redfsm/xmltags.cpp
+++ /dev/null
@@ -1,244 +0,0 @@
-/* C++ code produced by gperf version 3.0.1 */
-/* Command-line: gperf -L C++ -t xmltags.gperf */
-/* Computed positions: -k'1,3' */
-
-#if !((' ' == 32) && ('!' == 33) && ('"' == 34) && ('#' == 35) \
- && ('%' == 37) && ('&' == 38) && ('\'' == 39) && ('(' == 40) \
- && (')' == 41) && ('*' == 42) && ('+' == 43) && (',' == 44) \
- && ('-' == 45) && ('.' == 46) && ('/' == 47) && ('0' == 48) \
- && ('1' == 49) && ('2' == 50) && ('3' == 51) && ('4' == 52) \
- && ('5' == 53) && ('6' == 54) && ('7' == 55) && ('8' == 56) \
- && ('9' == 57) && (':' == 58) && (';' == 59) && ('<' == 60) \
- && ('=' == 61) && ('>' == 62) && ('?' == 63) && ('A' == 65) \
- && ('B' == 66) && ('C' == 67) && ('D' == 68) && ('E' == 69) \
- && ('F' == 70) && ('G' == 71) && ('H' == 72) && ('I' == 73) \
- && ('J' == 74) && ('K' == 75) && ('L' == 76) && ('M' == 77) \
- && ('N' == 78) && ('O' == 79) && ('P' == 80) && ('Q' == 81) \
- && ('R' == 82) && ('S' == 83) && ('T' == 84) && ('U' == 85) \
- && ('V' == 86) && ('W' == 87) && ('X' == 88) && ('Y' == 89) \
- && ('Z' == 90) && ('[' == 91) && ('\\' == 92) && (']' == 93) \
- && ('^' == 94) && ('_' == 95) && ('a' == 97) && ('b' == 98) \
- && ('c' == 99) && ('d' == 100) && ('e' == 101) && ('f' == 102) \
- && ('g' == 103) && ('h' == 104) && ('i' == 105) && ('j' == 106) \
- && ('k' == 107) && ('l' == 108) && ('m' == 109) && ('n' == 110) \
- && ('o' == 111) && ('p' == 112) && ('q' == 113) && ('r' == 114) \
- && ('s' == 115) && ('t' == 116) && ('u' == 117) && ('v' == 118) \
- && ('w' == 119) && ('x' == 120) && ('y' == 121) && ('z' == 122) \
- && ('{' == 123) && ('|' == 124) && ('}' == 125) && ('~' == 126))
-/* The character set is not based on ISO-646. */
-#error "gperf generated tables don't work with this execution character set. Please report a bug to <bug-gnu-gperf@gnu.org>."
-#endif
-
-#line 23 "xmltags.gperf"
-
-#include <string.h>
-#include "xmlparse.h"
-#line 28 "xmltags.gperf"
-struct XMLTagHashPair;
-
-#define TOTAL_KEYWORDS 55
-#define MIN_WORD_LENGTH 1
-#define MAX_WORD_LENGTH 17
-#define MIN_HASH_VALUE 5
-#define MAX_HASH_VALUE 84
-/* maximum key range = 80, duplicates = 0 */
-
-#include "phash.h"
-
-inline unsigned int
-Perfect_Hash::hash (register const char *str, register unsigned int len)
-{
- static const unsigned char asso_values[] =
- {
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 20, 85, 5, 41, 35,
- 5, 35, 85, 15, 10, 0, 85, 85, 40, 0,
- 15, 85, 40, 85, 25, 0, 10, 85, 85, 0,
- 56, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85, 85, 85, 85, 85,
- 85, 85, 85, 85, 85, 85
- };
- int hval = len;
-
- switch (hval)
- {
- default:
- hval += asso_values[(unsigned char)str[2]];
- /*FALLTHROUGH*/
- case 2:
- case 1:
- hval += asso_values[(unsigned char)str[0]];
- break;
- }
- return hval;
-}
-
-struct XMLTagHashPair *
-Perfect_Hash::in_word_set (register const char *str, register unsigned int len)
-{
- static struct XMLTagHashPair wordlist[] =
- {
- {""}, {""}, {""}, {""}, {""},
-#line 74 "xmltags.gperf"
- {"write", TAG_write},
- {""}, {""},
-#line 68 "xmltags.gperf"
- {"init_act", TAG_init_act},
- {""},
-#line 34 "xmltags.gperf"
- {"state", TAG_state},
-#line 36 "xmltags.gperf"
- {"t", TAG_t},
- {""},
-#line 72 "xmltags.gperf"
- {"init_tokstart", TAG_init_tokstart},
-#line 32 "xmltags.gperf"
- {"host", TAG_host},
-#line 33 "xmltags.gperf"
- {"state_list", TAG_state_list},
-#line 38 "xmltags.gperf"
- {"start_state", TAG_start_state},
-#line 69 "xmltags.gperf"
- {"set_act", TAG_set_act},
-#line 46 "xmltags.gperf"
- {"state_actions", TAG_state_actions},
-#line 65 "xmltags.gperf"
- {"data", TAG_data},
-#line 71 "xmltags.gperf"
- {"set_tokend", TAG_set_tokend},
-#line 41 "xmltags.gperf"
- {"action", TAG_action},
-#line 73 "xmltags.gperf"
- {"set_tokstart", TAG_set_tokstart},
-#line 78 "xmltags.gperf"
- {"arg", TAG_arg},
- {""},
-#line 35 "xmltags.gperf"
- {"trans_list", TAG_trans_list},
-#line 40 "xmltags.gperf"
- {"action_list", TAG_action_list},
-#line 43 "xmltags.gperf"
- {"action_table", TAG_action_table},
- {""},
-#line 49 "xmltags.gperf"
- {"goto", TAG_goto},
- {""},
-#line 45 "xmltags.gperf"
- {"getkey", TAG_getkey},
-#line 42 "xmltags.gperf"
- {"action_table_list", TAG_action_table_list},
- {""},
-#line 52 "xmltags.gperf"
- {"goto_expr", TAG_goto_expr},
-#line 70 "xmltags.gperf"
- {"get_tokend", TAG_get_tokend},
-#line 82 "xmltags.gperf"
- {"c", TAG_c},
-#line 84 "xmltags.gperf"
- {"ex", TAG_ex},
-#line 55 "xmltags.gperf"
- {"ret", TAG_ret},
- {""},
-#line 63 "xmltags.gperf"
- {"targs", TAG_targs},
- {""},
-#line 37 "xmltags.gperf"
- {"machine", TAG_machine},
- {""},
-#line 57 "xmltags.gperf"
- {"char", TAG_char},
-#line 30 "xmltags.gperf"
- {"ragel", TAG_ragel},
-#line 76 "xmltags.gperf"
- {"access", TAG_access},
- {""}, {""},
-#line 31 "xmltags.gperf"
- {"ragel_def", TAG_ragel_def},
-#line 64 "xmltags.gperf"
- {"entry", TAG_entry},
-#line 67 "xmltags.gperf"
- {"sub_action", TAG_sub_action},
- {""},
-#line 44 "xmltags.gperf"
- {"alphtype", TAG_alphtype},
-#line 58 "xmltags.gperf"
- {"hold", TAG_hold},
-#line 56 "xmltags.gperf"
- {"pchar", TAG_pchar},
-#line 60 "xmltags.gperf"
- {"holdte", TAG_holdte},
-#line 47 "xmltags.gperf"
- {"entry_points", TAG_entry_points},
- {""},
-#line 81 "xmltags.gperf"
- {"cond_list", TAG_cond_list},
-#line 80 "xmltags.gperf"
- {"cond_space", TAG_cond_space},
- {""}, {""}, {""},
-#line 62 "xmltags.gperf"
- {"curs", TAG_curs},
-#line 79 "xmltags.gperf"
- {"cond_space_list", TAG_cond_space_list},
- {""}, {""},
-#line 75 "xmltags.gperf"
- {"curstate", TAG_curstate},
-#line 66 "xmltags.gperf"
- {"lm_switch", TAG_lm_switch},
-#line 48 "xmltags.gperf"
- {"text", TAG_text},
-#line 39 "xmltags.gperf"
- {"error_state", TAG_error_state},
- {""}, {""},
-#line 59 "xmltags.gperf"
- {"exec", TAG_exec},
-#line 51 "xmltags.gperf"
- {"next", TAG_next},
-#line 61 "xmltags.gperf"
- {"execte", TAG_execte},
- {""}, {""},
-#line 50 "xmltags.gperf"
- {"call", TAG_call},
-#line 54 "xmltags.gperf"
- {"next_expr", TAG_next_expr},
-#line 77 "xmltags.gperf"
- {"break", TAG_break},
-#line 83 "xmltags.gperf"
- {"exports", TAG_exports},
- {""},
-#line 53 "xmltags.gperf"
- {"call_expr", TAG_call_expr}
- };
-
- if (len <= MAX_WORD_LENGTH && len >= MIN_WORD_LENGTH)
- {
- int key = hash (str, len);
-
- if (key <= MAX_HASH_VALUE && key >= 0)
- {
- const char *s = wordlist[key].name;
-
- if (*str == *s && !strncmp (str + 1, s + 1, len - 1) && s[len] == '\0')
- return &wordlist[key];
- }
- }
- return 0;
-}
diff --git a/contrib/tools/ragel5/redfsm/ya.make b/contrib/tools/ragel5/redfsm/ya.make
deleted file mode 100644
index 8bb2b97d44..0000000000
--- a/contrib/tools/ragel5/redfsm/ya.make
+++ /dev/null
@@ -1,25 +0,0 @@
-LIBRARY()
-
-LICENSE(GPL-2.0-or-later)
-
-NO_UTIL()
-NO_COMPILER_WARNINGS()
-
-ADDINCL(
- GLOBAL contrib/tools/ragel5/redfsm
-)
-
-PEERDIR(
- contrib/tools/ragel5/aapl
- contrib/tools/ragel5/common
-)
-
-SRCS(
- gendata.cpp
- redfsm.cpp
- xmlparse.cpp
- xmlscan.cpp
- xmltags.cpp
-)
-
-END()
diff --git a/contrib/tools/ragel5/rlgen-cd/fflatcodegen.cpp b/contrib/tools/ragel5/rlgen-cd/fflatcodegen.cpp
deleted file mode 100644
index 813347fd2b..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/fflatcodegen.cpp
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright 2004-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "fflatcodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-
-std::ostream &FFlatCodeGen::TO_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->toStateAction != 0 )
- act = state->toStateAction->actListId+1;
- out << act;
- return out;
-}
-
-std::ostream &FFlatCodeGen::FROM_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->fromStateAction != 0 )
- act = state->fromStateAction->actListId+1;
- out << act;
- return out;
-}
-
-std::ostream &FFlatCodeGen::EOF_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->eofAction != 0 )
- act = state->eofAction->actListId+1;
- out << act;
- return out;
-}
-
-/* Write out the function for a transition. */
-std::ostream &FFlatCodeGen::TRANS_ACTION( RedTransAp *trans )
-{
- int action = 0;
- if ( trans->action != 0 )
- action = trans->action->actListId+1;
- out << action;
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FFlatCodeGen::TO_STATE_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numToStateRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FFlatCodeGen::FROM_STATE_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numFromStateRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &FFlatCodeGen::EOF_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numEofRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, true );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FFlatCodeGen::ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numTransRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-void FFlatCodeGen::writeData()
-{
- if ( redFsm->anyConditions() ) {
- OPEN_ARRAY( WIDE_ALPH_TYPE(), CK() );
- COND_KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondSpan), CSP() );
- COND_KEY_SPANS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCond), C() );
- CONDS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondIndexOffset), CO() );
- COND_INDEX_OFFSET();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- OPEN_ARRAY( WIDE_ALPH_TYPE(), K() );
- KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxSpan), SP() );
- KEY_SPANS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxFlatIndexOffset), IO() );
- FLAT_INDEX_OFFSET();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndex), I() );
- INDICIES();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() );
- TRANS_TARGS();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), TA() );
- TRANS_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyToStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() );
- TO_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() );
- FROM_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyEofActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), EA() );
- EOF_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- STATE_IDS();
-}
-
-void FFlatCodeGen::writeExec()
-{
- outLabelUsed = false;
-
- out <<
- " {\n"
- " int _slen";
-
- if ( redFsm->anyRegCurStateRef() )
- out << ", _ps";
-
- out << ";\n";
- out << " int _trans";
-
- if ( redFsm->anyConditions() )
- out << ", _cond";
-
- out << ";\n";
-
- out <<
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_keys;\n"
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxIndex) << POINTER() << "_inds;\n";
-
- if ( redFsm->anyConditions() ) {
- out <<
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxCond) << POINTER() << "_conds;\n"
- " " << WIDE_ALPH_TYPE() << " _widec;\n";
- }
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << "_resume:\n";
-
- if ( redFsm->errState != 0 ) {
- outLabelUsed = true;
- out <<
- " if ( " << CS() << " == " << redFsm->errState->id << " )\n"
- " goto _out;\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- out <<
- " switch ( " << FSA() << "[" << CS() << "] ) {\n";
- FROM_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyConditions() )
- COND_TRANSLATE();
-
- LOCATE_TRANS();
-
- if ( redFsm->anyRegCurStateRef() )
- out << " _ps = " << CS() << ";\n";
-
- out <<
- " " << CS() << " = " << TT() << "[_trans];\n\n";
-
- if ( redFsm->anyRegActions() ) {
- out <<
- " if ( " << TA() << "[_trans] == 0 )\n"
- " goto _again;\n"
- "\n"
- " switch ( " << TA() << "[_trans] ) {\n";
- ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyRegActions() || redFsm->anyActionGotos() ||
- redFsm->anyActionCalls() || redFsm->anyActionRets() )
- out << "_again:\n";
-
- if ( redFsm->anyToStateActions() ) {
- out <<
- " switch ( " << TSA() << "[" << CS() << "] ) {\n";
- TO_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " != " << PE() << " )\n"
- " goto _resume;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n"
- " goto _resume;\n";
- }
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out << " }\n";
-}
-
-void FFlatCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " switch ( " << EA() << "[" << CS() << "] ) {\n";
- EOF_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/fflatcodegen.h b/contrib/tools/ragel5/rlgen-cd/fflatcodegen.h
deleted file mode 100644
index cf92fd9baf..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/fflatcodegen.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright 2004-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _FFLATCODEGEN_H
-#define _FFLATCODEGEN_H
-
-#include <iostream>
-#include "flatcodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-
-/*
- * FFlatCodeGen
- */
-class FFlatCodeGen : public FlatCodeGen
-{
-protected:
- FFlatCodeGen( ostream &out ) : FsmCodeGen(out), FlatCodeGen(out) {}
-
- std::ostream &TO_STATE_ACTION_SWITCH();
- std::ostream &FROM_STATE_ACTION_SWITCH();
- std::ostream &EOF_ACTION_SWITCH();
- std::ostream &ACTION_SWITCH();
-
- virtual std::ostream &TO_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &FROM_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &EOF_ACTION( RedStateAp *state );
- virtual std::ostream &TRANS_ACTION( RedTransAp *trans );
-
- virtual void writeData();
- virtual void writeEOF();
- virtual void writeExec();
-};
-
-/*
- * CFFlatCodeGen
- */
-struct CFFlatCodeGen
- : public FFlatCodeGen, public CCodeGen
-{
- CFFlatCodeGen( ostream &out ) :
- FsmCodeGen(out), FFlatCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * DFFlatCodeGen
- */
-struct DFFlatCodeGen
- : public FFlatCodeGen, public DCodeGen
-{
- DFFlatCodeGen( ostream &out ) :
- FsmCodeGen(out), FFlatCodeGen(out), DCodeGen(out) {}
-};
-
-#endif /* _FFLATCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/fgotocodegen.cpp b/contrib/tools/ragel5/rlgen-cd/fgotocodegen.cpp
deleted file mode 100644
index 9c4f039f39..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/fgotocodegen.cpp
+++ /dev/null
@@ -1,262 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "fgotocodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-#include "bstmap.h"
-
-std::ostream &FGotoCodeGen::EXEC_ACTIONS()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numTransRefs > 0 ) {
- /* We are at the start of a glob, write the case. */
- out << "f" << redAct->actListId << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tgoto _again;\n";
- }
- }
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FGotoCodeGen::TO_STATE_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numToStateRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FGotoCodeGen::FROM_STATE_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numFromStateRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &FGotoCodeGen::EOF_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numEofRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, true );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-
-std::ostream &FGotoCodeGen::FINISH_CASES()
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* States that are final and have an out action need a case. */
- if ( st->eofAction != 0 ) {
- /* Write the case label. */
- out << "\t\tcase " << st->id << ": ";
-
- /* Jump to the func. */
- out << "goto f" << st->eofAction->actListId << ";\n";
- }
- }
-
- return out;
-}
-
-unsigned int FGotoCodeGen::TO_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->toStateAction != 0 )
- act = state->toStateAction->actListId+1;
- return act;
-}
-
-unsigned int FGotoCodeGen::FROM_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->fromStateAction != 0 )
- act = state->fromStateAction->actListId+1;
- return act;
-}
-
-unsigned int FGotoCodeGen::EOF_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->eofAction != 0 )
- act = state->eofAction->actListId+1;
- return act;
-}
-
-void FGotoCodeGen::writeData()
-{
- if ( redFsm->anyToStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() );
- TO_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() );
- FROM_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyEofActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), EA() );
- EOF_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- STATE_IDS();
-}
-
-void FGotoCodeGen::writeExec()
-{
- outLabelUsed = false;
-
- out << " {\n";
-
- if ( redFsm->anyRegCurStateRef() )
- out << " int _ps = 0;\n";
-
- if ( redFsm->anyConditions() )
- out << " " << WIDE_ALPH_TYPE() << " _widec;\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << "_resume:\n";
-
- if ( redFsm->anyFromStateActions() ) {
- out <<
- " switch ( " << FSA() << "[" << CS() << "] ) {\n";
- FROM_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- out <<
- " switch ( " << CS() << " ) {\n";
- STATE_GOTOS();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- TRANSITIONS() <<
- "\n";
-
- if ( redFsm->anyRegActions() )
- EXEC_ACTIONS() << "\n";
-
- out << "_again:\n";
-
- if ( redFsm->anyToStateActions() ) {
- out <<
- " switch ( " << TSA() << "[" << CS() << "] ) {\n";
- TO_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " != " << PE() << " )\n"
- " goto _resume;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n"
- " goto _resume;\n";
- }
-
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out << " }\n";
-}
-
-void FGotoCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " switch ( " << EA() << "[" << CS() << "] ) {\n";
- EOF_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/fgotocodegen.h b/contrib/tools/ragel5/rlgen-cd/fgotocodegen.h
deleted file mode 100644
index 076f5c4f7f..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/fgotocodegen.h
+++ /dev/null
@@ -1,76 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _FGOTOCODEGEN_H
-#define _FGOTOCODEGEN_H
-
-#include <iostream>
-#include "gotocodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-
-
-/*
- * class FGotoCodeGen
- */
-class FGotoCodeGen : public GotoCodeGen
-{
-public:
- FGotoCodeGen( ostream &out ) : FsmCodeGen(out), GotoCodeGen(out) {}
-
- std::ostream &EXEC_ACTIONS();
- std::ostream &TO_STATE_ACTION_SWITCH();
- std::ostream &FROM_STATE_ACTION_SWITCH();
- std::ostream &FINISH_CASES();
- std::ostream &EOF_ACTION_SWITCH();
- unsigned int TO_STATE_ACTION( RedStateAp *state );
- unsigned int FROM_STATE_ACTION( RedStateAp *state );
- unsigned int EOF_ACTION( RedStateAp *state );
-
- virtual void writeData();
- virtual void writeEOF();
- virtual void writeExec();
-};
-
-/*
- * class CFGotoCodeGen
- */
-struct CFGotoCodeGen
- : public FGotoCodeGen, public CCodeGen
-{
- CFGotoCodeGen( ostream &out ) :
- FsmCodeGen(out), FGotoCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * class DFGotoCodeGen
- */
-struct DFGotoCodeGen
- : public FGotoCodeGen, public DCodeGen
-{
- DFGotoCodeGen( ostream &out ) :
- FsmCodeGen(out), FGotoCodeGen(out), DCodeGen(out) {}
-};
-
-#endif /* _FGOTOCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/flatcodegen.cpp b/contrib/tools/ragel5/rlgen-cd/flatcodegen.cpp
deleted file mode 100644
index 117f3798c9..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/flatcodegen.cpp
+++ /dev/null
@@ -1,766 +0,0 @@
-/*
- * Copyright 2004-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "flatcodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-
-std::ostream &FlatCodeGen::TO_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->toStateAction != 0 )
- act = state->toStateAction->location+1;
- out << act;
- return out;
-}
-
-std::ostream &FlatCodeGen::FROM_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->fromStateAction != 0 )
- act = state->fromStateAction->location+1;
- out << act;
- return out;
-}
-
-std::ostream &FlatCodeGen::EOF_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->eofAction != 0 )
- act = state->eofAction->location+1;
- out << act;
- return out;
-}
-
-std::ostream &FlatCodeGen::TRANS_ACTION( RedTransAp *trans )
-{
- /* If there are actions, emit them. Otherwise emit zero. */
- int act = 0;
- if ( trans->action != 0 )
- act = trans->action->location+1;
- out << act;
- return out;
-}
-
-std::ostream &FlatCodeGen::TO_STATE_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numToStateRefs > 0 ) {
- /* Write the case label, the action and the case break */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &FlatCodeGen::FROM_STATE_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numFromStateRefs > 0 ) {
- /* Write the case label, the action and the case break */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &FlatCodeGen::EOF_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numEofRefs > 0 ) {
- /* Write the case label, the action and the case break */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, true );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-
-std::ostream &FlatCodeGen::ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numTransRefs > 0 ) {
- /* Write the case label, the action and the case break */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-
-std::ostream &FlatCodeGen::FLAT_INDEX_OFFSET()
-{
- out << "\t";
- int totalStateNum = 0, curIndOffset = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write the index offset. */
- out << curIndOffset;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
-
- /* Move the index offset ahead. */
- if ( st->transList != 0 )
- curIndOffset += keyOps->span( st->lowKey, st->highKey );
-
- if ( st->defTrans != 0 )
- curIndOffset += 1;
- }
- out << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::KEY_SPANS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write singles length. */
- unsigned long long span = 0;
- if ( st->transList != 0 )
- span = keyOps->span( st->lowKey, st->highKey );
- out << span;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::TO_STATE_ACTIONS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write any eof action. */
- TO_STATE_ACTION(st);
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::FROM_STATE_ACTIONS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write any eof action. */
- FROM_STATE_ACTION(st);
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::EOF_ACTIONS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write any eof action. */
- EOF_ACTION(st);
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::COND_KEYS()
-{
- out << '\t';
- int totalTrans = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Emit just cond low key and cond high key. */
- out << KEY( st->condLowKey ) << ", ";
- out << KEY( st->condHighKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::COND_KEY_SPANS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write singles length. */
- unsigned long long span = 0;
- if ( st->condList != 0 )
- span = keyOps->span( st->condLowKey, st->condHighKey );
- out << span;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::CONDS()
-{
- int totalTrans = 0;
- out << '\t';
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->condList != 0 ) {
- /* Walk the singles. */
- unsigned long long span = keyOps->span( st->condLowKey, st->condHighKey );
- for ( unsigned long long pos = 0; pos < span; pos++ ) {
- if ( st->condList[pos] != 0 )
- out << st->condList[pos]->condSpaceId + 1 << ", ";
- else
- out << "0, ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::COND_INDEX_OFFSET()
-{
- out << "\t";
- int totalStateNum = 0, curIndOffset = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write the index offset. */
- out << curIndOffset;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
-
- /* Move the index offset ahead. */
- if ( st->condList != 0 )
- curIndOffset += keyOps->span( st->condLowKey, st->condHighKey );
- }
- out << "\n";
- return out;
-}
-
-
-std::ostream &FlatCodeGen::KEYS()
-{
- out << '\t';
- int totalTrans = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Emit just low key and high key. */
- out << KEY( st->lowKey ) << ", ";
- out << KEY( st->highKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::INDICIES()
-{
- int totalTrans = 0;
- out << '\t';
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->transList != 0 ) {
- /* Walk the singles. */
- unsigned long long span = keyOps->span( st->lowKey, st->highKey );
- for ( unsigned long long pos = 0; pos < span; pos++ ) {
- out << st->transList[pos]->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* The state's default index goes next. */
- if ( st->defTrans != 0 )
- out << st->defTrans->id << ", ";
-
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &FlatCodeGen::TRANS_TARGS()
-{
- /* Transitions must be written ordered by their id. */
- RedTransAp **transPtrs = new RedTransAp*[redFsm->transSet.length()];
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ )
- transPtrs[trans->id] = trans;
-
- /* Keep a count of the num of items in the array written. */
- out << '\t';
- int totalStates = 0;
- for ( int t = 0; t < redFsm->transSet.length(); t++ ) {
- /* Write out the target state. */
- RedTransAp *trans = transPtrs[t];
- out << trans->targ->id;
- if ( t < redFsm->transSet.length()-1 ) {
- out << ", ";
- if ( ++totalStates % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] transPtrs;
- return out;
-}
-
-
-std::ostream &FlatCodeGen::TRANS_ACTIONS()
-{
- /* Transitions must be written ordered by their id. */
- RedTransAp **transPtrs = new RedTransAp*[redFsm->transSet.length()];
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ )
- transPtrs[trans->id] = trans;
-
- /* Keep a count of the num of items in the array written. */
- out << '\t';
- int totalAct = 0;
- for ( int t = 0; t < redFsm->transSet.length(); t++ ) {
- /* Write the function for the transition. */
- RedTransAp *trans = transPtrs[t];
- TRANS_ACTION( trans );
- if ( t < redFsm->transSet.length()-1 ) {
- out << ", ";
- if ( ++totalAct % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] transPtrs;
- return out;
-}
-
-void FlatCodeGen::LOCATE_TRANS()
-{
- out <<
- " _keys = " << ARR_OFF( K(), "(" + CS() + "<<1)" ) << ";\n"
- " _inds = " << ARR_OFF( I(), IO() + "[" + CS() + "]" ) << ";\n"
- "\n"
- " _slen = " << SP() << "[" << CS() << "];\n"
- " _trans = _inds[ _slen > 0 && _keys[0] <=" << GET_WIDE_KEY() << " &&\n"
- " " << GET_WIDE_KEY() << " <= _keys[1] ?\n"
- " " << GET_WIDE_KEY() << " - _keys[0] : _slen ];\n"
- "\n";
-}
-
-void FlatCodeGen::GOTO( ostream &ret, int gotoDest, bool inFinish )
-{
- ret << "{" << CS() << " = " << gotoDest << "; " <<
- CTRL_FLOW() << "goto _again;}";
-}
-
-void FlatCodeGen::GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << "{" << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void FlatCodeGen::CURS( ostream &ret, bool inFinish )
-{
- ret << "(_ps)";
-}
-
-void FlatCodeGen::TARGS( ostream &ret, bool inFinish, int targState )
-{
- ret << "(" << CS() << ")";
-}
-
-void FlatCodeGen::NEXT( ostream &ret, int nextDest, bool inFinish )
-{
- ret << CS() << " = " << nextDest << ";";
-}
-
-void FlatCodeGen::NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << ");";
-}
-
-void FlatCodeGen::CALL( ostream &ret, int callDest, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << CS() << "; " << CS() << " = " <<
- callDest << "; " << CTRL_FLOW() << "goto _again;}";
-}
-
-
-void FlatCodeGen::CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << CS() << "; " << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, targState, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-
-void FlatCodeGen::RET( ostream &ret, bool inFinish )
-{
- ret << "{" << CS() << " = " << STACK() << "[--" << TOP() << "]; " <<
- CTRL_FLOW() << "goto _again;}";
-}
-
-void FlatCodeGen::BREAK( ostream &ret, int targState )
-{
- outLabelUsed = true;
- ret << CTRL_FLOW() << "goto _out;";
-}
-
-void FlatCodeGen::writeData()
-{
- /* If there are any transtion functions then output the array. If there
- * are none, don't bother emitting an empty array that won't be used. */
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActArrItem), A() );
- ACTIONS_ARRAY();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyConditions() ) {
- OPEN_ARRAY( WIDE_ALPH_TYPE(), CK() );
- COND_KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondSpan), CSP() );
- COND_KEY_SPANS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCond), C() );
- CONDS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondIndexOffset), CO() );
- COND_INDEX_OFFSET();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- OPEN_ARRAY( WIDE_ALPH_TYPE(), K() );
- KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxSpan), SP() );
- KEY_SPANS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxFlatIndexOffset), IO() );
- FLAT_INDEX_OFFSET();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndex), I() );
- INDICIES();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() );
- TRANS_TARGS();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TA() );
- TRANS_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyToStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() );
- TO_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() );
- FROM_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyEofActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), EA() );
- EOF_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- STATE_IDS();
-}
-
-void FlatCodeGen::COND_TRANSLATE()
-{
- out <<
- " _widec = " << GET_KEY() << ";\n";
-
- out <<
- " _keys = " << ARR_OFF( CK(), "(" + CS() + "<<1)" ) << ";\n"
- " _conds = " << ARR_OFF( C(), CO() + "[" + CS() + "]" ) << ";\n"
- "\n"
- " _slen = " << CSP() << "[" << CS() << "];\n"
- " _cond = _slen > 0 && _keys[0] <=" << GET_WIDE_KEY() << " &&\n"
- " " << GET_WIDE_KEY() << " <= _keys[1] ?\n"
- " _conds[" << GET_WIDE_KEY() << " - _keys[0]] : 0;\n"
- "\n";
-
- out <<
- " switch ( _cond ) {\n";
- for ( CondSpaceList::Iter csi = condSpaceList; csi.lte(); csi++ ) {
- CondSpace *condSpace = csi;
- out << " case " << condSpace->condSpaceId + 1 << ": {\n";
- out << TABS(2) << "_widec = " << CAST(WIDE_ALPH_TYPE()) << "(" <<
- KEY(condSpace->baseKey) << " + (" << GET_KEY() <<
- " - " << KEY(keyOps->minKey) << "));\n";
-
- for ( CondSet::Iter csi = condSpace->condSet; csi.lte(); csi++ ) {
- out << TABS(2) << "if ( ";
- CONDITION( out, *csi );
- Size condValOffset = ((1 << csi.pos()) * keyOps->alphSize());
- out << " ) _widec += " << condValOffset << ";\n";
- }
-
- out << " }\n";
- out << " break;\n";
- }
-
- SWITCH_DEFAULT();
-
- out <<
- " }\n";
-}
-
-void FlatCodeGen::writeExec()
-{
- outLabelUsed = false;
-
- out <<
- " {\n"
- " int _slen";
-
- if ( redFsm->anyRegCurStateRef() )
- out << ", _ps";
-
- out <<
- ";\n"
- " int _trans";
-
- if ( redFsm->anyConditions() )
- out << ", _cond";
- out << ";\n";
-
- if ( redFsm->anyToStateActions() ||
- redFsm->anyRegActions() || redFsm->anyFromStateActions() )
- {
- out <<
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxActArrItem) << POINTER() << "_acts;\n"
- " " << UINT() << " _nacts;\n";
- }
-
- out <<
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_keys;\n"
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxIndex) << POINTER() << "_inds;\n";
-
- if ( redFsm->anyConditions() ) {
- out <<
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxCond) << POINTER() << "_conds;\n"
- " " << WIDE_ALPH_TYPE() << " _widec;\n";
- }
-
- out << "\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << "_resume:\n";
-
- if ( redFsm->errState != 0 ) {
- outLabelUsed = true;
- out <<
- " if ( " << CS() << " == " << redFsm->errState->id << " )\n"
- " goto _out;\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- out <<
- " _acts = " << ARR_OFF( A(), FSA() + "[" + CS() + "]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- FROM_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyConditions() )
- COND_TRANSLATE();
-
- LOCATE_TRANS();
-
- if ( redFsm->anyRegCurStateRef() )
- out << " _ps = " << CS() << ";\n";
-
- out <<
- " " << CS() << " = " << TT() << "[_trans];\n"
- "\n";
-
- if ( redFsm->anyRegActions() ) {
- out <<
- " if ( " << TA() << "[_trans] == 0 )\n"
- " goto _again;\n"
- "\n"
- " _acts = " << ARR_OFF( A(), TA() + "[_trans]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *(_acts++) )\n {\n";
- ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyRegActions() || redFsm->anyActionGotos() ||
- redFsm->anyActionCalls() || redFsm->anyActionRets() )
- out << "_again:\n";
-
- if ( redFsm->anyToStateActions() ) {
- out <<
- " _acts = " << ARR_OFF( A(), TSA() + "[" + CS() + "]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- TO_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " != " << PE() << " )\n"
- " goto _resume;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n"
- " goto _resume;\n";
- }
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out << " }\n";
-}
-
-void FlatCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxActArrItem) << POINTER() << "_acts = " <<
- ARR_OFF( A(), EA() + "[" + CS() + "]" ) << ";\n"
- " " << UINT() << " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- EOF_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/flatcodegen.h b/contrib/tools/ragel5/rlgen-cd/flatcodegen.h
deleted file mode 100644
index 27dee2ef92..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/flatcodegen.h
+++ /dev/null
@@ -1,108 +0,0 @@
-/*
- * Copyright 2004-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _FLATCODEGEN_H
-#define _FLATCODEGEN_H
-
-#include <iostream>
-#include "fsmcodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-struct NameInst;
-struct RedTransAp;
-struct RedStateAp;
-
-/*
- * FlatCodeGen
- */
-class FlatCodeGen : virtual public FsmCodeGen
-{
-public:
- FlatCodeGen( ostream &out ) : FsmCodeGen(out) {}
- virtual ~FlatCodeGen() { }
-
-protected:
- std::ostream &TO_STATE_ACTION_SWITCH();
- std::ostream &FROM_STATE_ACTION_SWITCH();
- std::ostream &EOF_ACTION_SWITCH();
- std::ostream &ACTION_SWITCH();
- std::ostream &KEYS();
- std::ostream &INDICIES();
- std::ostream &FLAT_INDEX_OFFSET();
- std::ostream &KEY_SPANS();
- std::ostream &TO_STATE_ACTIONS();
- std::ostream &FROM_STATE_ACTIONS();
- std::ostream &EOF_ACTIONS();
- std::ostream &TRANS_TARGS();
- std::ostream &TRANS_ACTIONS();
- void LOCATE_TRANS();
-
- std::ostream &COND_INDEX_OFFSET();
- void COND_TRANSLATE();
- std::ostream &CONDS();
- std::ostream &COND_KEYS();
- std::ostream &COND_KEY_SPANS();
-
- void GOTO( ostream &ret, int gotoDest, bool inFinish );
- void CALL( ostream &ret, int callDest, int targState, bool inFinish );
- void NEXT( ostream &ret, int nextDest, bool inFinish );
- void GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish );
- void CURS( ostream &ret, bool inFinish );
- void TARGS( ostream &ret, bool inFinish, int targState );
- void RET( ostream &ret, bool inFinish );
- void BREAK( ostream &ret, int targState );
-
- virtual std::ostream &TO_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &FROM_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &EOF_ACTION( RedStateAp *state );
- virtual std::ostream &TRANS_ACTION( RedTransAp *trans );
-
- virtual void writeData();
- virtual void writeEOF();
- virtual void writeExec();
-};
-
-/*
- * CFlatCodeGen
- */
-struct CFlatCodeGen
- : public FlatCodeGen, public CCodeGen
-{
- CFlatCodeGen( ostream &out ) :
- FsmCodeGen(out), FlatCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * DFlatCodeGen
- */
-struct DFlatCodeGen
- : public FlatCodeGen, public DCodeGen
-{
- DFlatCodeGen( ostream &out ) :
- FsmCodeGen(out), FlatCodeGen(out), DCodeGen(out) {}
-};
-
-#endif /* _FLATCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/fsmcodegen.cpp b/contrib/tools/ragel5/rlgen-cd/fsmcodegen.cpp
deleted file mode 100644
index c0fc4b00f5..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/fsmcodegen.cpp
+++ /dev/null
@@ -1,749 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "fsmcodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-#include <sstream>
-#include <string>
-#include <assert.h>
-
-
-using std::ostream;
-using std::ostringstream;
-using std::string;
-using std::cerr;
-using std::endl;
-
-void lineDirective( ostream &out, char *fileName, int line )
-{
- if ( noLineDirectives )
- out << "/* ";
-
- /* Write the preprocessor line info for to the input file. */
- out << "#line " << line << " \"";
- for ( char *pc = fileName; *pc != 0; pc++ ) {
- if ( *pc == '\\' )
- out << "\\\\";
- else
- out << *pc;
- }
- out << '"';
-
- if ( noLineDirectives )
- out << " */";
-
- out << '\n';
-}
-
-void genLineDirective( ostream &out )
-{
- std::streambuf *sbuf = out.rdbuf();
- output_filter *filter = static_cast<output_filter*>(sbuf);
- lineDirective( out, filter->fileName, filter->line + 1 );
-}
-
-
-/* Init code gen with in parameters. */
-FsmCodeGen::FsmCodeGen( ostream &out )
-:
- CodeGenData(out)
-{
-}
-
-unsigned int FsmCodeGen::arrayTypeSize( unsigned long maxVal )
-{
- long long maxValLL = (long long) maxVal;
- HostType *arrayType = keyOps->typeSubsumes( maxValLL );
- assert( arrayType != 0 );
- return arrayType->size;
-}
-
-string FsmCodeGen::ARRAY_TYPE( unsigned long maxVal )
-{
- long long maxValLL = (long long) maxVal;
- HostType *arrayType = keyOps->typeSubsumes( maxValLL );
- assert( arrayType != 0 );
-
- string ret = arrayType->data1;
- if ( arrayType->data2 != 0 ) {
- ret += " ";
- ret += arrayType->data2;
- }
- return ret;
-}
-
-
-/* Write out the fsm name. */
-string FsmCodeGen::FSM_NAME()
-{
- return fsmName;
-}
-
-/* Emit the offset of the start state as a decimal integer. */
-string FsmCodeGen::START_STATE_ID()
-{
- ostringstream ret;
- ret << redFsm->startState->id;
- return ret.str();
-};
-
-/* Write out the array of actions. */
-std::ostream &FsmCodeGen::ACTIONS_ARRAY()
-{
- out << "\t0, ";
- int totalActions = 1;
- for ( ActionTableMap::Iter act = redFsm->actionMap; act.lte(); act++ ) {
- /* Write out the length, which will never be the last character. */
- out << act->key.length() << ", ";
- /* Put in a line break every 8 */
- if ( totalActions++ % 8 == 7 )
- out << "\n\t";
-
- for ( ActionTable::Iter item = act->key; item.lte(); item++ ) {
- out << item->value->actionId;
- if ( ! (act.last() && item.last()) )
- out << ", ";
-
- /* Put in a line break every 8 */
- if ( totalActions++ % 8 == 7 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-
-string FsmCodeGen::CS()
-{
- ostringstream ret;
- if ( curStateExpr != 0 ) {
- /* Emit the user supplied method of retrieving the key. */
- ret << "(";
- INLINE_LIST( ret, curStateExpr, 0, false );
- ret << ")";
- }
- else {
- /* Expression for retrieving the key, use simple dereference. */
- ret << ACCESS() << "cs";
- }
- return ret.str();
-}
-
-string FsmCodeGen::ACCESS()
-{
- ostringstream ret;
- if ( accessExpr != 0 )
- INLINE_LIST( ret, accessExpr, 0, false );
- return ret.str();
-}
-
-string FsmCodeGen::GET_WIDE_KEY()
-{
- if ( redFsm->anyConditions() )
- return "_widec";
- else
- return GET_KEY();
-}
-
-string FsmCodeGen::GET_WIDE_KEY( RedStateAp *state )
-{
- if ( state->stateCondList.length() > 0 )
- return "_widec";
- else
- return GET_KEY();
-}
-
-string FsmCodeGen::GET_KEY()
-{
- ostringstream ret;
- if ( getKeyExpr != 0 ) {
- /* Emit the user supplied method of retrieving the key. */
- ret << "(";
- INLINE_LIST( ret, getKeyExpr, 0, false );
- ret << ")";
- }
- else {
- /* Expression for retrieving the key, use simple dereference. */
- ret << "(*" << P() << ")";
- }
- return ret.str();
-}
-
-/* Write out level number of tabs. Makes the nested binary search nice
- * looking. */
-string FsmCodeGen::TABS( int level )
-{
- string result;
- while ( level-- > 0 )
- result += "\t";
- return result;
-}
-
-/* Write out a key from the fsm code gen. Depends on wether or not the key is
- * signed. */
-string FsmCodeGen::KEY( Key key )
-{
- ostringstream ret;
- if ( keyOps->isSigned || !hostLang->explicitUnsigned )
- ret << key.getVal();
- else
- ret << (unsigned long) key.getVal() << 'u';
- return ret.str();
-}
-
-void FsmCodeGen::EXEC( ostream &ret, InlineItem *item, int targState, int inFinish )
-{
- /* The parser gives fexec two children. The double brackets are for D
- * code. If the inline list is a single word it will get interpreted as a
- * C-style cast by the D compiler. */
- ret << "{" << P() << " = ((";
- INLINE_LIST( ret, item->children, targState, inFinish );
- ret << "))-1;}";
-}
-
-void FsmCodeGen::EXECTE( ostream &ret, InlineItem *item, int targState, int inFinish )
-{
- /* Tokend version of exec. */
-
- /* The parser gives fexec two children. The double brackets are for D
- * code. If the inline list is a single word it will get interpreted as a
- * C-style cast by the D compiler. */
- ret << "{" << TOKEND() << " = ((";
- INLINE_LIST( ret, item->children, targState, inFinish );
- ret << "));}";
-}
-
-
-void FsmCodeGen::LM_SWITCH( ostream &ret, InlineItem *item,
- int targState, int inFinish )
-{
- ret <<
- " switch( " << ACT() << " ) {\n";
-
- /* If the switch handles error then we also forced the error state. It
- * will exist. */
- if ( item->handlesError ) {
- ret << " case 0: " << TOKEND() << " = " << TOKSTART() << "; ";
- GOTO( ret, redFsm->errState->id, inFinish );
- ret << "\n";
- }
-
- for ( InlineList::Iter lma = *item->children; lma.lte(); lma++ ) {
- /* Write the case label, the action and the case break. */
- ret << " case " << lma->lmId << ":\n";
-
- /* Write the block and close it off. */
- ret << " {";
- INLINE_LIST( ret, lma->children, targState, inFinish );
- ret << "}\n";
-
- ret << " break;\n";
- }
- /* Default required for D code. */
- ret <<
- " default: break;\n"
- " }\n"
- "\t";
-}
-
-void FsmCodeGen::SET_ACT( ostream &ret, InlineItem *item )
-{
- ret << ACT() << " = " << item->lmId << ";";
-}
-
-void FsmCodeGen::SET_TOKEND( ostream &ret, InlineItem *item )
-{
- /* The tokend action sets tokend. */
- ret << TOKEND() << " = " << P();
- if ( item->offset != 0 )
- out << "+" << item->offset;
- out << ";";
-}
-
-void FsmCodeGen::GET_TOKEND( ostream &ret, InlineItem *item )
-{
- ret << TOKEND();
-}
-
-void FsmCodeGen::INIT_TOKSTART( ostream &ret, InlineItem *item )
-{
- ret << TOKSTART() << " = " << NULL_ITEM() << ";";
-}
-
-void FsmCodeGen::INIT_ACT( ostream &ret, InlineItem *item )
-{
- ret << ACT() << " = 0;";
-}
-
-void FsmCodeGen::SET_TOKSTART( ostream &ret, InlineItem *item )
-{
- ret << TOKSTART() << " = " << P() << ";";
-}
-
-void FsmCodeGen::SUB_ACTION( ostream &ret, InlineItem *item,
- int targState, bool inFinish )
-{
- if ( item->children->length() > 0 ) {
- /* Write the block and close it off. */
- ret << "{";
- INLINE_LIST( ret, item->children, targState, inFinish );
- ret << "}";
- }
-}
-
-
-/* Write out an inline tree structure. Walks the list and possibly calls out
- * to virtual functions than handle language specific items in the tree. */
-void FsmCodeGen::INLINE_LIST( ostream &ret, InlineList *inlineList,
- int targState, bool inFinish )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- switch ( item->type ) {
- case InlineItem::Text:
- ret << item->data;
- break;
- case InlineItem::Goto:
- GOTO( ret, item->targState->id, inFinish );
- break;
- case InlineItem::Call:
- CALL( ret, item->targState->id, targState, inFinish );
- break;
- case InlineItem::Next:
- NEXT( ret, item->targState->id, inFinish );
- break;
- case InlineItem::Ret:
- RET( ret, inFinish );
- break;
- case InlineItem::PChar:
- ret << P();
- break;
- case InlineItem::Char:
- ret << GET_KEY();
- break;
- case InlineItem::Hold:
- ret << P() << "--;";
- break;
- case InlineItem::Exec:
- EXEC( ret, item, targState, inFinish );
- break;
- case InlineItem::HoldTE:
- ret << TOKEND() << "--;";
- break;
- case InlineItem::ExecTE:
- EXECTE( ret, item, targState, inFinish );
- break;
- case InlineItem::Curs:
- CURS( ret, inFinish );
- break;
- case InlineItem::Targs:
- TARGS( ret, inFinish, targState );
- break;
- case InlineItem::Entry:
- ret << item->targState->id;
- break;
- case InlineItem::GotoExpr:
- GOTO_EXPR( ret, item, inFinish );
- break;
- case InlineItem::CallExpr:
- CALL_EXPR( ret, item, targState, inFinish );
- break;
- case InlineItem::NextExpr:
- NEXT_EXPR( ret, item, inFinish );
- break;
- case InlineItem::LmSwitch:
- LM_SWITCH( ret, item, targState, inFinish );
- break;
- case InlineItem::LmSetActId:
- SET_ACT( ret, item );
- break;
- case InlineItem::LmSetTokEnd:
- SET_TOKEND( ret, item );
- break;
- case InlineItem::LmGetTokEnd:
- GET_TOKEND( ret, item );
- break;
- case InlineItem::LmInitTokStart:
- INIT_TOKSTART( ret, item );
- break;
- case InlineItem::LmInitAct:
- INIT_ACT( ret, item );
- break;
- case InlineItem::LmSetTokStart:
- SET_TOKSTART( ret, item );
- break;
- case InlineItem::SubAction:
- SUB_ACTION( ret, item, targState, inFinish );
- break;
- case InlineItem::Break:
- BREAK( ret, targState );
- break;
- }
- }
-}
-/* Write out paths in line directives. Escapes any special characters. */
-string FsmCodeGen::LDIR_PATH( char *path )
-{
- ostringstream ret;
- for ( char *pc = path; *pc != 0; pc++ ) {
- if ( *pc == '\\' )
- ret << "\\\\";
- else
- ret << *pc;
- }
- return ret.str();
-}
-
-void FsmCodeGen::ACTION( ostream &ret, Action *action, int targState, bool inFinish )
-{
- /* Write the preprocessor line info for going into the source file. */
- lineDirective( ret, sourceFileName, action->loc.line );
-
- /* Write the block and close it off. */
- ret << "\t{";
- INLINE_LIST( ret, action->inlineList, targState, inFinish );
- ret << "}\n";
-}
-
-void FsmCodeGen::CONDITION( ostream &ret, Action *condition )
-{
- ret << "\n";
- lineDirective( ret, sourceFileName, condition->loc.line );
- INLINE_LIST( ret, condition->inlineList, 0, false );
-}
-
-string FsmCodeGen::ERROR_STATE()
-{
- ostringstream ret;
- if ( redFsm->errState != 0 )
- ret << redFsm->errState->id;
- else
- ret << "-1";
- return ret.str();
-}
-
-string FsmCodeGen::FIRST_FINAL_STATE()
-{
- ostringstream ret;
- if ( redFsm->firstFinState != 0 )
- ret << redFsm->firstFinState->id;
- else
- ret << redFsm->nextStateId;
- return ret.str();
-}
-
-void FsmCodeGen::writeInit()
-{
- out << " {\n";
-
- if ( redFsm->startState != 0 )
- out << "\t" << CS() << " = " << START() << ";\n";
-
- /* If there are any calls, then the stack top needs initialization. */
- if ( redFsm->anyActionCalls() || redFsm->anyActionRets() )
- out << "\t" << TOP() << " = 0;\n";
-
- if ( hasLongestMatch ) {
- out <<
- " " << TOKSTART() << " = " << NULL_ITEM() << ";\n"
- " " << TOKEND() << " = " << NULL_ITEM() << ";\n"
- " " << ACT() << " = 0;\n";
- }
- out << " }\n";
-}
-
-string FsmCodeGen::DATA_PREFIX()
-{
- if ( dataPrefix )
- return FSM_NAME() + "_";
- return "";
-}
-
-/* Emit the alphabet data type. */
-string FsmCodeGen::ALPH_TYPE()
-{
- string ret = keyOps->alphType->data1;
- if ( keyOps->alphType->data2 != 0 ) {
- ret += " ";
- ret += + keyOps->alphType->data2;
- }
- return ret;
-}
-
-/* Emit the alphabet data type. */
-string FsmCodeGen::WIDE_ALPH_TYPE()
-{
- string ret;
- if ( redFsm->maxKey <= keyOps->maxKey )
- ret = ALPH_TYPE();
- else {
- long long maxKeyVal = redFsm->maxKey.getLongLong();
- HostType *wideType = keyOps->typeSubsumes( keyOps->isSigned, maxKeyVal );
- assert( wideType != 0 );
-
- ret = wideType->data1;
- if ( wideType->data2 != 0 ) {
- ret += " ";
- ret += wideType->data2;
- }
- }
- return ret;
-}
-
-void FsmCodeGen::STATE_IDS()
-{
- if ( redFsm->startState != 0 )
- STATIC_VAR( "int", START() ) << " = " << START_STATE_ID() << "};\n";
-
- if ( writeFirstFinal )
- STATIC_VAR( "int" , FIRST_FINAL() ) << " = " << FIRST_FINAL_STATE() << "};\n";
-
- if ( writeErr )
- STATIC_VAR( "int", ERROR() ) << " = " << ERROR_STATE() << "};\n";
-
- out << "\n";
-
- if ( entryPointNames.length() > 0 ) {
- for ( EntryNameVect::Iter en = entryPointNames; en.lte(); en++ ) {
- STATIC_VAR( "int", DATA_PREFIX() + "en_" + *en ) <<
- " = " << entryPointIds[en.pos()] << "};\n";
- }
- out << "\n";
- }
-}
-
-
-/*
- * Language specific, but style independent code generators functions.
- */
-
-string CCodeGen::PTR_CONST()
-{
- return "const ";
-}
-
-std::ostream &CCodeGen::OPEN_ARRAY( const string& type, const string& name )
-{
- out << "#if defined(__GNUC__)\n";
- out << "static __attribute__((used)) const " << type << " " << name << "[] = {\n";
- out << "#else\n";
- out << "static const " << type << " " << name << "[] = {\n";
- out << "#endif\n";
- return out;
-}
-
-std::ostream &CCodeGen::CLOSE_ARRAY()
-{
- return out << "};\n";
-}
-
-std::ostream &CCodeGen::STATIC_VAR( const string& type, const string& name )
-{
- out << "enum {" << name;
- return out;
-}
-
-string CCodeGen::UINT( )
-{
- return "unsigned int";
-}
-
-string CCodeGen::ARR_OFF( const string& ptr, const string& offset )
-{
- return ptr + " + " + offset;
-}
-
-string CCodeGen::CAST( const string& type )
-{
- return "(" + type + ")";
-}
-
-string CCodeGen::NULL_ITEM()
-{
- return "0";
-}
-
-string CCodeGen::POINTER()
-{
- return " *";
-}
-
-std::ostream &CCodeGen::SWITCH_DEFAULT()
-{
- return out;
-}
-
-string CCodeGen::CTRL_FLOW()
-{
- return "";
-}
-
-void CCodeGen::writeExports()
-{
- if ( exportList.length() > 0 ) {
- for ( ExportList::Iter ex = exportList; ex.lte(); ex++ ) {
- out << "#define " << DATA_PREFIX() << "ex_" << ex->name << " " <<
- KEY(ex->key) << "\n";
- }
- out << "\n";
- }
-}
-
-/*
- * D Specific
- */
-
-string DCodeGen::NULL_ITEM()
-{
- return "null";
-}
-
-string DCodeGen::POINTER()
-{
- // multiple items seperated by commas can also be pointer types.
- return "* ";
-}
-
-string DCodeGen::PTR_CONST()
-{
- return "";
-}
-
-std::ostream &DCodeGen::OPEN_ARRAY( const string& type, const string& name )
-{
- out << "static const " << type << "[] " << name << " = [\n";
- return out;
-}
-
-std::ostream &DCodeGen::CLOSE_ARRAY()
-{
- return out << "];\n";
-}
-
-std::ostream &DCodeGen::STATIC_VAR( const string& type, const string& name )
-{
- out << "static const " << type << " " << name;
- return out;
-}
-
-string DCodeGen::ARR_OFF( const string& ptr, const string& offset )
-{
- return "&" + ptr + "[" + offset + "]";
-}
-
-string DCodeGen::CAST( const string& type )
-{
- return "cast(" + type + ")";
-}
-
-string DCodeGen::UINT( )
-{
- return "uint";
-}
-
-std::ostream &DCodeGen::SWITCH_DEFAULT()
-{
- out << " default: break;\n";
- return out;
-}
-
-string DCodeGen::CTRL_FLOW()
-{
- return "if (true) ";
-}
-
-void DCodeGen::writeExports()
-{
- if ( exportList.length() > 0 ) {
- for ( ExportList::Iter ex = exportList; ex.lte(); ex++ ) {
- out << "static const " << ALPH_TYPE() << " " << DATA_PREFIX() <<
- "ex_" << ex->name << " = " << KEY(ex->key) << ";\n";
- }
- out << "\n";
- }
-}
-
-/*
- * End D-specific code.
- */
-
-void FsmCodeGen::finishRagelDef()
-{
- if ( codeStyle == GenGoto || codeStyle == GenFGoto ||
- codeStyle == GenIpGoto || codeStyle == GenSplit )
- {
- /* For directly executable machines there is no required state
- * ordering. Choose a depth-first ordering to increase the
- * potential for fall-throughs. */
- redFsm->depthFirstOrdering();
- }
- else {
- /* The frontend will do this for us, but it may be a good idea to
- * force it if the intermediate file is edited. */
- redFsm->sortByStateId();
- }
-
- /* Choose default transitions and the single transition. */
- redFsm->chooseDefaultSpan();
-
- /* Maybe do flat expand, otherwise choose single. */
- if ( codeStyle == GenFlat || codeStyle == GenFFlat )
- redFsm->makeFlat();
- else
- redFsm->chooseSingle();
-
- /* If any errors have occured in the input file then don't write anything. */
- if ( gblErrorCount > 0 )
- return;
-
- if ( codeStyle == GenSplit )
- redFsm->partitionFsm( numSplitPartitions );
-
- if ( codeStyle == GenIpGoto || codeStyle == GenSplit )
- redFsm->setInTrans();
-
- /* Anlayze Machine will find the final action reference counts, among
- * other things. We will use these in reporting the usage
- * of fsm directives in action code. */
- analyzeMachine();
-
- /* Determine if we should use indicies. */
- calcIndexSize();
-}
-
-ostream &FsmCodeGen::source_warning( const InputLoc &loc )
-{
- cerr << sourceFileName << ":" << loc.line << ":" << loc.col << ": warning: ";
- return cerr;
-}
-
-ostream &FsmCodeGen::source_error( const InputLoc &loc )
-{
- gblErrorCount += 1;
- assert( sourceFileName != 0 );
- cerr << sourceFileName << ":" << loc.line << ":" << loc.col << ": ";
- return cerr;
-}
-
diff --git a/contrib/tools/ragel5/rlgen-cd/fsmcodegen.h b/contrib/tools/ragel5/rlgen-cd/fsmcodegen.h
deleted file mode 100644
index 77c76f1b1a..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/fsmcodegen.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _FSMCODEGEN_H
-#define _FSMCODEGEN_H
-
-#include <iostream>
-#include <string>
-#include <stdio.h>
-#include "common.h"
-#include "gendata.h"
-
-using std::string;
-using std::ostream;
-
-/* Integer array line length. */
-#define IALL 8
-
-/* Forwards. */
-struct RedFsmAp;
-struct RedStateAp;
-struct CodeGenData;
-struct Action;
-struct NameInst;
-struct InlineItem;
-struct InlineList;
-struct RedAction;
-struct LongestMatch;
-struct LongestMatchPart;
-
-inline string itoa( int i )
-{
- char buf[16];
- sprintf( buf, "%i", i );
- return buf;
-}
-
-/*
- * class FsmCodeGen
- */
-class FsmCodeGen : public CodeGenData
-{
-public:
- FsmCodeGen( ostream &out );
- virtual ~FsmCodeGen() {}
-
- virtual void finishRagelDef();
- virtual void writeInit();
-
-protected:
- string FSM_NAME();
- string START_STATE_ID();
- ostream &ACTIONS_ARRAY();
- string GET_WIDE_KEY();
- string GET_WIDE_KEY( RedStateAp *state );
- string TABS( int level );
- string KEY( Key key );
- string LDIR_PATH( char *path );
- void ACTION( ostream &ret, Action *action, int targState, bool inFinish );
- void CONDITION( ostream &ret, Action *condition );
- string ALPH_TYPE();
- string WIDE_ALPH_TYPE();
- string ARRAY_TYPE( unsigned long maxVal );
-
- virtual string ARR_OFF( const string& ptr, const string& offset ) = 0;
- virtual string CAST( const string& type ) = 0;
- virtual string UINT() = 0;
- virtual string NULL_ITEM() = 0;
- virtual string POINTER() = 0;
- virtual string GET_KEY();
- virtual ostream &SWITCH_DEFAULT() = 0;
-
- string P() { return "p"; }
- string PE() { return "pe"; }
-
- string ACCESS();
- string CS();
- string STACK() { return ACCESS() + "stack"; }
- string TOP() { return ACCESS() + "top"; }
- string TOKSTART() { return ACCESS() + "tokstart"; }
- string TOKEND() { return ACCESS() + "tokend"; }
- string ACT() { return ACCESS() + "act"; }
-
- string DATA_PREFIX();
- string PM() { return "_" + DATA_PREFIX() + "partition_map"; }
- string C() { return "_" + DATA_PREFIX() + "cond_spaces"; }
- string CK() { return "_" + DATA_PREFIX() + "cond_keys"; }
- string K() { return "_" + DATA_PREFIX() + "trans_keys"; }
- string I() { return "_" + DATA_PREFIX() + "indicies"; }
- string CO() { return "_" + DATA_PREFIX() + "cond_offsets"; }
- string KO() { return "_" + DATA_PREFIX() + "key_offsets"; }
- string IO() { return "_" + DATA_PREFIX() + "index_offsets"; }
- string CL() { return "_" + DATA_PREFIX() + "cond_lengths"; }
- string SL() { return "_" + DATA_PREFIX() + "single_lengths"; }
- string RL() { return "_" + DATA_PREFIX() + "range_lengths"; }
- string A() { return "_" + DATA_PREFIX() + "actions"; }
- string TA() { return "_" + DATA_PREFIX() + "trans_actions_wi"; }
- string TT() { return "_" + DATA_PREFIX() + "trans_targs_wi"; }
- string TSA() { return "_" + DATA_PREFIX() + "to_state_actions"; }
- string FSA() { return "_" + DATA_PREFIX() + "from_state_actions"; }
- string EA() { return "_" + DATA_PREFIX() + "eof_actions"; }
- string SP() { return "_" + DATA_PREFIX() + "key_spans"; }
- string CSP() { return "_" + DATA_PREFIX() + "cond_key_spans"; }
- string START() { return DATA_PREFIX() + "start"; }
- string ERROR() { return DATA_PREFIX() + "error"; }
- string FIRST_FINAL() { return DATA_PREFIX() + "first_final"; }
- string CTXDATA() { return DATA_PREFIX() + "ctxdata"; }
-
- void INLINE_LIST( ostream &ret, InlineList *inlineList, int targState, bool inFinish );
- virtual void GOTO( ostream &ret, int gotoDest, bool inFinish ) = 0;
- virtual void CALL( ostream &ret, int callDest, int targState, bool inFinish ) = 0;
- virtual void NEXT( ostream &ret, int nextDest, bool inFinish ) = 0;
- virtual void GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish ) = 0;
- virtual void NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish ) = 0;
- virtual void CALL_EXPR( ostream &ret, InlineItem *ilItem,
- int targState, bool inFinish ) = 0;
- virtual void RET( ostream &ret, bool inFinish ) = 0;
- virtual void BREAK( ostream &ret, int targState ) = 0;
- virtual void CURS( ostream &ret, bool inFinish ) = 0;
- virtual void TARGS( ostream &ret, bool inFinish, int targState ) = 0;
- void EXEC( ostream &ret, InlineItem *item, int targState, int inFinish );
- void EXECTE( ostream &ret, InlineItem *item, int targState, int inFinish );
- void LM_SWITCH( ostream &ret, InlineItem *item, int targState, int inFinish );
- void SET_ACT( ostream &ret, InlineItem *item );
- void INIT_TOKSTART( ostream &ret, InlineItem *item );
- void INIT_ACT( ostream &ret, InlineItem *item );
- void SET_TOKSTART( ostream &ret, InlineItem *item );
- void SET_TOKEND( ostream &ret, InlineItem *item );
- void GET_TOKEND( ostream &ret, InlineItem *item );
- void SUB_ACTION( ostream &ret, InlineItem *item,
- int targState, bool inFinish );
- void STATE_IDS();
-
- string ERROR_STATE();
- string FIRST_FINAL_STATE();
-
- virtual string PTR_CONST() = 0;
- virtual ostream &OPEN_ARRAY( const string& type, const string& name ) = 0;
- virtual ostream &CLOSE_ARRAY() = 0;
- virtual ostream &STATIC_VAR( const string& type, const string& name ) = 0;
-
- virtual string CTRL_FLOW() = 0;
-
- ostream &source_warning(const InputLoc &loc);
- ostream &source_error(const InputLoc &loc);
-
- unsigned int arrayTypeSize( unsigned long maxVal );
-
- bool outLabelUsed;
- bool againLabelUsed;
- bool useIndicies;
-
-public:
- /* Determine if we should use indicies. */
- virtual void calcIndexSize() {}
-};
-
-class CCodeGen : virtual public FsmCodeGen
-{
-public:
- CCodeGen( ostream &out ) : FsmCodeGen(out) {}
-
- virtual string NULL_ITEM();
- virtual string POINTER();
- virtual ostream &SWITCH_DEFAULT();
- virtual ostream &OPEN_ARRAY( const string& type, const string& name );
- virtual ostream &CLOSE_ARRAY();
- virtual ostream &STATIC_VAR( const string& type, const string& name );
- virtual string ARR_OFF( const string& ptr, const string& offset );
- virtual string CAST( const string& type );
- virtual string UINT();
- virtual string PTR_CONST();
- virtual string CTRL_FLOW();
-
- virtual void writeExports();
-};
-
-class DCodeGen : virtual public FsmCodeGen
-{
-public:
- DCodeGen( ostream &out ) : FsmCodeGen(out) {}
-
- virtual string NULL_ITEM();
- virtual string POINTER();
- virtual ostream &SWITCH_DEFAULT();
- virtual ostream &OPEN_ARRAY( const string& type, const string& name );
- virtual ostream &CLOSE_ARRAY();
- virtual ostream &STATIC_VAR( const string& type, const string& name );
- virtual string ARR_OFF( const string& ptr, const string& offset );
- virtual string CAST( const string& type );
- virtual string UINT();
- virtual string PTR_CONST();
- virtual string CTRL_FLOW();
-
- virtual void writeExports();
-};
-
-#endif /* _FSMCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/ftabcodegen.cpp b/contrib/tools/ragel5/rlgen-cd/ftabcodegen.cpp
deleted file mode 100644
index 1d65e7102c..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/ftabcodegen.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "ftabcodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-
-/* Determine if we should use indicies or not. */
-void FTabCodeGen::calcIndexSize()
-{
- int sizeWithInds = 0, sizeWithoutInds = 0;
-
- /* Calculate cost of using with indicies. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- int totalIndex = st->outSingle.length() + st->outRange.length() +
- (st->defTrans == 0 ? 0 : 1);
- sizeWithInds += arrayTypeSize(redFsm->maxIndex) * totalIndex;
- }
- sizeWithInds += arrayTypeSize(redFsm->maxState) * redFsm->transSet.length();
- if ( redFsm->anyActions() )
- sizeWithInds += arrayTypeSize(redFsm->maxActListId) * redFsm->transSet.length();
-
- /* Calculate the cost of not using indicies. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- int totalIndex = st->outSingle.length() + st->outRange.length() +
- (st->defTrans == 0 ? 0 : 1);
- sizeWithoutInds += arrayTypeSize(redFsm->maxState) * totalIndex;
- if ( redFsm->anyActions() )
- sizeWithoutInds += arrayTypeSize(redFsm->maxActListId) * totalIndex;
- }
-
- /* If using indicies reduces the size, use them. */
- useIndicies = sizeWithInds < sizeWithoutInds;
-}
-
-std::ostream &FTabCodeGen::TO_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->toStateAction != 0 )
- act = state->toStateAction->actListId+1;
- out << act;
- return out;
-}
-
-std::ostream &FTabCodeGen::FROM_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->fromStateAction != 0 )
- act = state->fromStateAction->actListId+1;
- out << act;
- return out;
-}
-
-std::ostream &FTabCodeGen::EOF_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->eofAction != 0 )
- act = state->eofAction->actListId+1;
- out << act;
- return out;
-}
-
-
-/* Write out the function for a transition. */
-std::ostream &FTabCodeGen::TRANS_ACTION( RedTransAp *trans )
-{
- int action = 0;
- if ( trans->action != 0 )
- action = trans->action->actListId+1;
- out << action;
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FTabCodeGen::TO_STATE_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numToStateRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FTabCodeGen::FROM_STATE_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numFromStateRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &FTabCodeGen::EOF_ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numEofRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, true );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-/* Write out the function switch. This switch is keyed on the values
- * of the func index. */
-std::ostream &FTabCodeGen::ACTION_SWITCH()
-{
- /* Loop the actions. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numTransRefs > 0 ) {
- /* Write the entry label. */
- out << "\tcase " << redAct->actListId+1 << ":\n";
-
- /* Write each action in the list of action items. */
- for ( ActionTable::Iter item = redAct->key; item.lte(); item++ )
- ACTION( out, item->value, 0, false );
-
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-void FTabCodeGen::writeData()
-{
- if ( redFsm->anyConditions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondOffset), CO() );
- COND_OFFSETS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondLen), CL() );
- COND_LENS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( WIDE_ALPH_TYPE(), CK() );
- COND_KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondSpaceId), C() );
- COND_SPACES();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxKeyOffset), KO() );
- KEY_OFFSETS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( WIDE_ALPH_TYPE(), K() );
- KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxSingleLen), SL() );
- SINGLE_LENS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxRangeLen), RL() );
- RANGE_LENS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndexOffset), IO() );
- INDEX_OFFSETS();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( useIndicies ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndex), I() );
- INDICIES();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() );
- TRANS_TARGS_WI();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), TA() );
- TRANS_ACTIONS_WI();
- CLOSE_ARRAY() <<
- "\n";
- }
- }
- else {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() );
- TRANS_TARGS();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), TA() );
- TRANS_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
- }
-
- if ( redFsm->anyToStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() );
- TO_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() );
- FROM_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyEofActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActListId), EA() );
- EOF_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- STATE_IDS();
-}
-
-void FTabCodeGen::writeExec()
-{
- outLabelUsed = false;
-
- out <<
- " {\n"
- " int _klen";
-
- if ( redFsm->anyRegCurStateRef() )
- out << ", _ps";
-
- out <<
- ";\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_keys;\n"
- " int _trans;\n";
-
- if ( redFsm->anyConditions() )
- out << " " << WIDE_ALPH_TYPE() << " _widec;\n";
-
- out << "\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << "_resume:\n";
-
- if ( redFsm->errState != 0 ) {
- outLabelUsed = true;
- out <<
- " if ( " << CS() << " == " << redFsm->errState->id << " )\n"
- " goto _out;\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- out <<
- " switch ( " << FSA() << "[" << CS() << "] ) {\n";
- FROM_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyConditions() )
- COND_TRANSLATE();
-
- LOCATE_TRANS();
-
- out << "_match:\n";
-
- if ( redFsm->anyRegCurStateRef() )
- out << " _ps = " << CS() << ";\n";
-
- if ( useIndicies )
- out << " _trans = " << I() << "[_trans];\n";
-
- out <<
- " " << CS() << " = " << TT() << "[_trans];\n"
- "\n";
-
- if ( redFsm->anyRegActions() ) {
- out <<
- " if ( " << TA() << "[_trans] == 0 )\n"
- " goto _again;\n"
- "\n"
- " switch ( " << TA() << "[_trans] ) {\n";
- ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyRegActions() || redFsm->anyActionGotos() ||
- redFsm->anyActionCalls() || redFsm->anyActionRets() )
- out << "_again:\n";
-
- if ( redFsm->anyToStateActions() ) {
- out <<
- " switch ( " << TSA() << "[" << CS() << "] ) {\n";
- TO_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- }
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " != " << PE() << " )\n"
- " goto _resume;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n"
- " goto _resume;\n";
- }
-
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out << " }\n";
-}
-
-
-void FTabCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " switch ( " << EA() << "[" << CS() << "] ) {\n";
- EOF_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/ftabcodegen.h b/contrib/tools/ragel5/rlgen-cd/ftabcodegen.h
deleted file mode 100644
index 9d26d1cadd..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/ftabcodegen.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _FTABCODEGEN_H
-#define _FTABCODEGEN_H
-
-#include <iostream>
-#include "tabcodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-
-
-/*
- * FTabCodeG\verb|e
- */
-class FTabCodeGen : public TabCodeGen
-{
-protected:
- FTabCodeGen( ostream &out ) : FsmCodeGen(out), TabCodeGen(out) {}
-
- std::ostream &TO_STATE_ACTION_SWITCH();
- std::ostream &FROM_STATE_ACTION_SWITCH();
- std::ostream &EOF_ACTION_SWITCH();
- std::ostream &ACTION_SWITCH();
-
- virtual std::ostream &TO_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &FROM_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &EOF_ACTION( RedStateAp *state );
- virtual std::ostream &TRANS_ACTION( RedTransAp *trans );
- virtual void writeData();
- virtual void writeEOF();
- virtual void writeExec();
- virtual void calcIndexSize();
-};
-
-
-/*
- * CFTabCodeGen
- */
-struct CFTabCodeGen
- : public FTabCodeGen, public CCodeGen
-{
- CFTabCodeGen( ostream &out ) :
- FsmCodeGen(out), FTabCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * class DFTabCodeGen
- */
-struct DFTabCodeGen
- : public FTabCodeGen, public DCodeGen
-{
- DFTabCodeGen( ostream &out ) :
- FsmCodeGen(out), FTabCodeGen(out), DCodeGen(out) {}
-};
-
-#endif /* _FTABCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/gotocodegen.cpp b/contrib/tools/ragel5/rlgen-cd/gotocodegen.cpp
deleted file mode 100644
index 13be67d097..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/gotocodegen.cpp
+++ /dev/null
@@ -1,742 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "gotocodegen.h"
-#include "redfsm.h"
-#include "bstmap.h"
-#include "gendata.h"
-
-/* Emit the goto to take for a given transition. */
-std::ostream &GotoCodeGen::TRANS_GOTO( RedTransAp *trans, int level )
-{
- out << TABS(level) << "goto tr" << trans->id << ";";
- return out;
-}
-
-std::ostream &GotoCodeGen::TO_STATE_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numToStateRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &GotoCodeGen::FROM_STATE_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numFromStateRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &GotoCodeGen::EOF_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numEofRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, true );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &GotoCodeGen::ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numTransRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-void GotoCodeGen::GOTO_HEADER( RedStateAp *state )
-{
- /* Label the state. */
- out << "case " << state->id << ":\n";
-}
-
-
-void GotoCodeGen::emitSingleSwitch( RedStateAp *state )
-{
- /* Load up the singles. */
- int numSingles = state->outSingle.length();
- RedTransEl *data = state->outSingle.data;
-
- if ( numSingles == 1 ) {
- /* If there is a single single key then write it out as an if. */
- out << "\tif ( " << GET_WIDE_KEY(state) << " == " <<
- KEY(data[0].lowKey) << " )\n\t\t";
-
- /* Virtual function for writing the target of the transition. */
- TRANS_GOTO(data[0].value, 0) << "\n";
- }
- else if ( numSingles > 1 ) {
- /* Write out single keys in a switch if there is more than one. */
- out << "\tswitch( " << GET_WIDE_KEY(state) << " ) {\n";
-
- /* Write out the single indicies. */
- for ( int j = 0; j < numSingles; j++ ) {
- out << "\t\tcase " << KEY(data[j].lowKey) << ": ";
- TRANS_GOTO(data[j].value, 0) << "\n";
- }
-
- /* Emits a default case for D code. */
- SWITCH_DEFAULT();
-
- /* Close off the transition switch. */
- out << "\t}\n";
- }
-}
-
-void GotoCodeGen::emitRangeBSearch( RedStateAp *state, int level, int low, int high )
-{
- /* Get the mid position, staying on the lower end of the range. */
- int mid = (low + high) >> 1;
- RedTransEl *data = state->outRange.data;
-
- /* Determine if we need to look higher or lower. */
- bool anyLower = mid > low;
- bool anyHigher = mid < high;
-
- /* Determine if the keys at mid are the limits of the alphabet. */
- bool limitLow = data[mid].lowKey == keyOps->minKey;
- bool limitHigh = data[mid].highKey == keyOps->maxKey;
-
- if ( anyLower && anyHigher ) {
- /* Can go lower and higher than mid. */
- out << TABS(level) << "if ( " << GET_WIDE_KEY(state) << " < " <<
- KEY(data[mid].lowKey) << " ) {\n";
- emitRangeBSearch( state, level+1, low, mid-1 );
- out << TABS(level) << "} else if ( " << GET_WIDE_KEY(state) << " > " <<
- KEY(data[mid].highKey) << " ) {\n";
- emitRangeBSearch( state, level+1, mid+1, high );
- out << TABS(level) << "} else\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- else if ( anyLower && !anyHigher ) {
- /* Can go lower than mid but not higher. */
- out << TABS(level) << "if ( " << GET_WIDE_KEY(state) << " < " <<
- KEY(data[mid].lowKey) << " ) {\n";
- emitRangeBSearch( state, level+1, low, mid-1 );
-
- /* if the higher is the highest in the alphabet then there is no
- * sense testing it. */
- if ( limitHigh ) {
- out << TABS(level) << "} else\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- else {
- out << TABS(level) << "} else if ( " << GET_WIDE_KEY(state) << " <= " <<
- KEY(data[mid].highKey) << " )\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- }
- else if ( !anyLower && anyHigher ) {
- /* Can go higher than mid but not lower. */
- out << TABS(level) << "if ( " << GET_WIDE_KEY(state) << " > " <<
- KEY(data[mid].highKey) << " ) {\n";
- emitRangeBSearch( state, level+1, mid+1, high );
-
- /* If the lower end is the lowest in the alphabet then there is no
- * sense testing it. */
- if ( limitLow ) {
- out << TABS(level) << "} else\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- else {
- out << TABS(level) << "} else if ( " << GET_WIDE_KEY(state) << " >= " <<
- KEY(data[mid].lowKey) << " )\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- }
- else {
- /* Cannot go higher or lower than mid. It's mid or bust. What
- * tests to do depends on limits of alphabet. */
- if ( !limitLow && !limitHigh ) {
- out << TABS(level) << "if ( " << KEY(data[mid].lowKey) << " <= " <<
- GET_WIDE_KEY(state) << " && " << GET_WIDE_KEY(state) << " <= " <<
- KEY(data[mid].highKey) << " )\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- else if ( limitLow && !limitHigh ) {
- out << TABS(level) << "if ( " << GET_WIDE_KEY(state) << " <= " <<
- KEY(data[mid].highKey) << " )\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- else if ( !limitLow && limitHigh ) {
- out << TABS(level) << "if ( " << KEY(data[mid].lowKey) << " <= " <<
- GET_WIDE_KEY(state) << " )\n";
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- else {
- /* Both high and low are at the limit. No tests to do. */
- TRANS_GOTO(data[mid].value, level+1) << "\n";
- }
- }
-}
-
-void GotoCodeGen::STATE_GOTO_ERROR()
-{
- /* Label the state and bail immediately. */
- outLabelUsed = true;
- RedStateAp *state = redFsm->errState;
- out << "case " << state->id << ":\n";
- out << " goto _out;\n";
-}
-
-void GotoCodeGen::COND_TRANSLATE( StateCond *stateCond, int level )
-{
- CondSpace *condSpace = stateCond->condSpace;
- out << TABS(level) << "_widec = " << CAST(WIDE_ALPH_TYPE()) << "(" <<
- KEY(condSpace->baseKey) << " + (" << GET_KEY() <<
- " - " << KEY(keyOps->minKey) << "));\n";
-
- for ( CondSet::Iter csi = condSpace->condSet; csi.lte(); csi++ ) {
- out << TABS(level) << "if ( ";
- CONDITION( out, *csi );
- Size condValOffset = ((1 << csi.pos()) * keyOps->alphSize());
- out << " ) _widec += " << condValOffset << ";\n";
- }
-}
-
-void GotoCodeGen::emitCondBSearch( RedStateAp *state, int level, int low, int high )
-{
- /* Get the mid position, staying on the lower end of the range. */
- int mid = (low + high) >> 1;
- StateCond **data = state->stateCondVect.data;
-
- /* Determine if we need to look higher or lower. */
- bool anyLower = mid > low;
- bool anyHigher = mid < high;
-
- /* Determine if the keys at mid are the limits of the alphabet. */
- bool limitLow = data[mid]->lowKey == keyOps->minKey;
- bool limitHigh = data[mid]->highKey == keyOps->maxKey;
-
- if ( anyLower && anyHigher ) {
- /* Can go lower and higher than mid. */
- out << TABS(level) << "if ( " << GET_KEY() << " < " <<
- KEY(data[mid]->lowKey) << " ) {\n";
- emitCondBSearch( state, level+1, low, mid-1 );
- out << TABS(level) << "} else if ( " << GET_KEY() << " > " <<
- KEY(data[mid]->highKey) << " ) {\n";
- emitCondBSearch( state, level+1, mid+1, high );
- out << TABS(level) << "} else {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- else if ( anyLower && !anyHigher ) {
- /* Can go lower than mid but not higher. */
- out << TABS(level) << "if ( " << GET_KEY() << " < " <<
- KEY(data[mid]->lowKey) << " ) {\n";
- emitCondBSearch( state, level+1, low, mid-1 );
-
- /* if the higher is the highest in the alphabet then there is no
- * sense testing it. */
- if ( limitHigh ) {
- out << TABS(level) << "} else {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- else {
- out << TABS(level) << "} else if ( " << GET_KEY() << " <= " <<
- KEY(data[mid]->highKey) << " ) {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- }
- else if ( !anyLower && anyHigher ) {
- /* Can go higher than mid but not lower. */
- out << TABS(level) << "if ( " << GET_KEY() << " > " <<
- KEY(data[mid]->highKey) << " ) {\n";
- emitCondBSearch( state, level+1, mid+1, high );
-
- /* If the lower end is the lowest in the alphabet then there is no
- * sense testing it. */
- if ( limitLow ) {
- out << TABS(level) << "} else {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- else {
- out << TABS(level) << "} else if ( " << GET_KEY() << " >= " <<
- KEY(data[mid]->lowKey) << " ) {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- }
- else {
- /* Cannot go higher or lower than mid. It's mid or bust. What
- * tests to do depends on limits of alphabet. */
- if ( !limitLow && !limitHigh ) {
- out << TABS(level) << "if ( " << KEY(data[mid]->lowKey) << " <= " <<
- GET_KEY() << " && " << GET_KEY() << " <= " <<
- KEY(data[mid]->highKey) << " ) {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- else if ( limitLow && !limitHigh ) {
- out << TABS(level) << "if ( " << GET_KEY() << " <= " <<
- KEY(data[mid]->highKey) << " ) {\n";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- else if ( !limitLow && limitHigh ) {
- out << TABS(level) << "if ( " << KEY(data[mid]->lowKey) << " <= " <<
- GET_KEY() << " )\n {";
- COND_TRANSLATE(data[mid], level+1);
- out << TABS(level) << "}\n";
- }
- else {
- /* Both high and low are at the limit. No tests to do. */
- COND_TRANSLATE(data[mid], level);
- }
- }
-}
-
-std::ostream &GotoCodeGen::STATE_GOTOS()
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st == redFsm->errState )
- STATE_GOTO_ERROR();
- else {
- /* Writing code above state gotos. */
- GOTO_HEADER( st );
-
- if ( st->stateCondVect.length() > 0 ) {
- out << " _widec = " << GET_KEY() << ";\n";
- emitCondBSearch( st, 1, 0, st->stateCondVect.length() - 1 );
- }
-
- /* Try singles. */
- if ( st->outSingle.length() > 0 )
- emitSingleSwitch( st );
-
- /* Default case is to binary search for the ranges, if that fails then */
- if ( st->outRange.length() > 0 )
- emitRangeBSearch( st, 1, 0, st->outRange.length() - 1 );
-
- /* Write the default transition. */
- TRANS_GOTO( st->defTrans, 1 ) << "\n";
- }
- }
- return out;
-}
-
-std::ostream &GotoCodeGen::TRANSITIONS()
-{
- /* Emit any transitions that have functions and that go to
- * this state. */
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ ) {
- /* Write the label for the transition so it can be jumped to. */
- out << " tr" << trans->id << ": ";
-
- /* Destination state. */
- if ( trans->action != 0 && trans->action->anyCurStateRef() )
- out << "_ps = " << CS() << ";";
- out << CS() << " = " << trans->targ->id << "; ";
-
- if ( trans->action != 0 ) {
- /* Write out the transition func. */
- out << "goto f" << trans->action->actListId << ";\n";
- }
- else {
- /* No code to execute, just loop around. */
- out << "goto _again;\n";
- }
- }
- return out;
-}
-
-std::ostream &GotoCodeGen::EXEC_FUNCS()
-{
- /* Make labels that set acts and jump to execFuncs. Loop func indicies. */
- for ( ActionTableMap::Iter redAct = redFsm->actionMap; redAct.lte(); redAct++ ) {
- if ( redAct->numTransRefs > 0 ) {
- out << " f" << redAct->actListId << ": " <<
- "_acts = " << ARR_OFF(A(), itoa( redAct->location+1 ) ) << ";"
- " goto execFuncs;\n";
- }
- }
-
- out <<
- "\n"
- "execFuncs:\n"
- " _nacts = *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- " goto _again;\n";
- return out;
-}
-
-unsigned int GotoCodeGen::TO_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->toStateAction != 0 )
- act = state->toStateAction->location+1;
- return act;
-}
-
-unsigned int GotoCodeGen::FROM_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->fromStateAction != 0 )
- act = state->fromStateAction->location+1;
- return act;
-}
-
-unsigned int GotoCodeGen::EOF_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->eofAction != 0 )
- act = state->eofAction->location+1;
- return act;
-}
-
-std::ostream &GotoCodeGen::TO_STATE_ACTIONS()
-{
- /* Take one off for the psuedo start state. */
- int numStates = redFsm->stateList.length();
- unsigned int *vals = new unsigned int[numStates];
- memset( vals, 0, sizeof(unsigned int)*numStates );
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- vals[st->id] = TO_STATE_ACTION(st);
-
- out << "\t";
- for ( int st = 0; st < redFsm->nextStateId; st++ ) {
- /* Write any eof action. */
- out << vals[st];
- if ( st < numStates-1 ) {
- out << ", ";
- if ( (st+1) % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] vals;
- return out;
-}
-
-std::ostream &GotoCodeGen::FROM_STATE_ACTIONS()
-{
- /* Take one off for the psuedo start state. */
- int numStates = redFsm->stateList.length();
- unsigned int *vals = new unsigned int[numStates];
- memset( vals, 0, sizeof(unsigned int)*numStates );
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- vals[st->id] = FROM_STATE_ACTION(st);
-
- out << "\t";
- for ( int st = 0; st < redFsm->nextStateId; st++ ) {
- /* Write any eof action. */
- out << vals[st];
- if ( st < numStates-1 ) {
- out << ", ";
- if ( (st+1) % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] vals;
- return out;
-}
-
-std::ostream &GotoCodeGen::EOF_ACTIONS()
-{
- /* Take one off for the psuedo start state. */
- int numStates = redFsm->stateList.length();
- unsigned int *vals = new unsigned int[numStates];
- memset( vals, 0, sizeof(unsigned int)*numStates );
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- vals[st->id] = EOF_ACTION(st);
-
- out << "\t";
- for ( int st = 0; st < redFsm->nextStateId; st++ ) {
- /* Write any eof action. */
- out << vals[st];
- if ( st < numStates-1 ) {
- out << ", ";
- if ( (st+1) % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] vals;
- return out;
-}
-
-std::ostream &GotoCodeGen::FINISH_CASES()
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* States that are final and have an out action need a case. */
- if ( st->eofAction != 0 ) {
- /* Write the case label. */
- out << "\t\tcase " << st->id << ": ";
-
- /* Write the goto func. */
- out << "goto f" << st->eofAction->actListId << ";\n";
- }
- }
-
- return out;
-}
-
-void GotoCodeGen::GOTO( ostream &ret, int gotoDest, bool inFinish )
-{
- ret << "{" << CS() << " = " << gotoDest << "; " <<
- CTRL_FLOW() << "goto _again;}";
-}
-
-void GotoCodeGen::GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << "{" << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void GotoCodeGen::CURS( ostream &ret, bool inFinish )
-{
- ret << "(_ps)";
-}
-
-void GotoCodeGen::TARGS( ostream &ret, bool inFinish, int targState )
-{
- ret << "(" << CS() << ")";
-}
-
-void GotoCodeGen::NEXT( ostream &ret, int nextDest, bool inFinish )
-{
- ret << CS() << " = " << nextDest << ";";
-}
-
-void GotoCodeGen::NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << ");";
-}
-
-void GotoCodeGen::CALL( ostream &ret, int callDest, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << CS() << "; " << CS() << " = " <<
- callDest << "; " << CTRL_FLOW() << "goto _again;}";
-}
-
-void GotoCodeGen::CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << CS() << "; " << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, targState, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void GotoCodeGen::RET( ostream &ret, bool inFinish )
-{
- ret << "{" << CS() << " = " << STACK() << "[--" << TOP() << "]; " <<
- CTRL_FLOW() << "goto _again;}";
-}
-
-void GotoCodeGen::BREAK( ostream &ret, int targState )
-{
- outLabelUsed = true;
- ret << CTRL_FLOW() << "goto _out;";
-}
-
-void GotoCodeGen::writeData()
-{
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActArrItem), A() );
- ACTIONS_ARRAY();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyToStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() );
- TO_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() );
- FROM_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyEofActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), EA() );
- EOF_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- STATE_IDS();
-}
-
-void GotoCodeGen::writeExec()
-{
- outLabelUsed = false;
-
- out << " {\n";
-
- if ( redFsm->anyRegCurStateRef() )
- out << " int _ps = 0;\n";
-
- if ( redFsm->anyToStateActions() || redFsm->anyRegActions()
- || redFsm->anyFromStateActions() )
- {
- out <<
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxActArrItem) << POINTER() << "_acts;\n"
- " " << UINT() << " _nacts;\n";
- }
-
- if ( redFsm->anyConditions() )
- out << " " << WIDE_ALPH_TYPE() << " _widec;\n";
-
- out << "\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << "_resume:\n";
-
- if ( redFsm->anyFromStateActions() ) {
- out <<
- " _acts = " << ARR_OFF( A(), FSA() + "[" + CS() + "]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- FROM_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- out <<
- " switch ( " << CS() << " ) {\n";
- STATE_GOTOS();
- SWITCH_DEFAULT() <<
- " }\n"
- "\n";
- TRANSITIONS() <<
- "\n";
-
- if ( redFsm->anyRegActions() )
- EXEC_FUNCS() << "\n";
-
- out << "_again:\n";
-
- if ( redFsm->anyToStateActions() ) {
- out <<
- " _acts = " << ARR_OFF( A(), TSA() + "[" + CS() + "]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- TO_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " != " << PE() << " )\n"
- " goto _resume;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n"
- " goto _resume;\n";
- }
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out << " }\n";
-}
-
-void GotoCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxActArrItem) << POINTER() << "_acts = " <<
- ARR_OFF( A(), EA() + "[" + CS() + "]" ) << ";\n"
- " " << UINT() << " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- EOF_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/gotocodegen.h b/contrib/tools/ragel5/rlgen-cd/gotocodegen.h
deleted file mode 100644
index 625c2c23bd..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/gotocodegen.h
+++ /dev/null
@@ -1,111 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _GOTOCODEGEN_H
-#define _GOTOCODEGEN_H
-
-#include <iostream>
-#include "fsmcodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-struct NameInst;
-struct RedTransAp;
-struct RedStateAp;
-struct StateCond;
-
-/*
- * Goto driven fsm.
- */
-class GotoCodeGen : virtual public FsmCodeGen
-{
-public:
- GotoCodeGen( ostream &out ) : FsmCodeGen(out) {}
- std::ostream &TO_STATE_ACTION_SWITCH();
- std::ostream &FROM_STATE_ACTION_SWITCH();
- std::ostream &EOF_ACTION_SWITCH();
- std::ostream &ACTION_SWITCH();
- std::ostream &STATE_GOTOS();
- std::ostream &TRANSITIONS();
- std::ostream &EXEC_FUNCS();
- std::ostream &FINISH_CASES();
-
- void GOTO( ostream &ret, int gotoDest, bool inFinish );
- void CALL( ostream &ret, int callDest, int targState, bool inFinish );
- void NEXT( ostream &ret, int nextDest, bool inFinish );
- void GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish );
- void CURS( ostream &ret, bool inFinish );
- void TARGS( ostream &ret, bool inFinish, int targState );
- void RET( ostream &ret, bool inFinish );
- void BREAK( ostream &ret, int targState );
-
- virtual unsigned int TO_STATE_ACTION( RedStateAp *state );
- virtual unsigned int FROM_STATE_ACTION( RedStateAp *state );
- virtual unsigned int EOF_ACTION( RedStateAp *state );
-
- std::ostream &TO_STATE_ACTIONS();
- std::ostream &FROM_STATE_ACTIONS();
- std::ostream &EOF_ACTIONS();
-
- void COND_TRANSLATE( StateCond *stateCond, int level );
- void emitCondBSearch( RedStateAp *state, int level, int low, int high );
- void STATE_CONDS( RedStateAp *state, bool genDefault );
-
- virtual std::ostream &TRANS_GOTO( RedTransAp *trans, int level );
-
- void emitSingleSwitch( RedStateAp *state );
- void emitRangeBSearch( RedStateAp *state, int level, int low, int high );
-
- /* Called from STATE_GOTOS just before writing the gotos */
- virtual void GOTO_HEADER( RedStateAp *state );
- virtual void STATE_GOTO_ERROR();
-
- virtual void writeData();
- virtual void writeEOF();
- virtual void writeExec();
-};
-
-/*
- * class CGotoCodeGen
- */
-struct CGotoCodeGen
- : public GotoCodeGen, public CCodeGen
-{
- CGotoCodeGen( ostream &out ) :
- FsmCodeGen(out), GotoCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * class DGotoCodeGen
- */
-struct DGotoCodeGen
- : public GotoCodeGen, public DCodeGen
-{
- DGotoCodeGen( ostream &out ) :
- FsmCodeGen(out), GotoCodeGen(out), DCodeGen(out) {}
-};
-
-
-#endif /* _GOTOCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/ipgotocodegen.cpp b/contrib/tools/ragel5/rlgen-cd/ipgotocodegen.cpp
deleted file mode 100644
index ed65be5fe0..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/ipgotocodegen.cpp
+++ /dev/null
@@ -1,414 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "ipgotocodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-#include "bstmap.h"
-
-bool IpGotoCodeGen::useAgainLabel()
-{
- return redFsm->anyRegActionRets() ||
- redFsm->anyRegActionByValControl() ||
- redFsm->anyRegNextStmt();
-}
-
-void IpGotoCodeGen::GOTO( ostream &ret, int gotoDest, bool inFinish )
-{
- ret << "{" << CTRL_FLOW() << "goto st" << gotoDest << ";}";
-}
-
-void IpGotoCodeGen::CALL( ostream &ret, int callDest, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << targState <<
- "; " << CTRL_FLOW() << "goto st" << callDest << ";}";
-}
-
-void IpGotoCodeGen::RET( ostream &ret, bool inFinish )
-{
- ret << "{" << CS() << " = " << STACK() << "[--" << TOP() << "]; " <<
- CTRL_FLOW() << "goto _again;}";
-}
-
-void IpGotoCodeGen::GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << "{" << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void IpGotoCodeGen::CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << targState << "; " << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void IpGotoCodeGen::NEXT( ostream &ret, int nextDest, bool inFinish )
-{
- ret << CS() << " = " << nextDest << ";";
-}
-
-void IpGotoCodeGen::NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << ");";
-}
-
-void IpGotoCodeGen::CURS( ostream &ret, bool inFinish )
-{
- ret << "(_ps)";
-}
-
-void IpGotoCodeGen::TARGS( ostream &ret, bool inFinish, int targState )
-{
- ret << targState;
-}
-
-void IpGotoCodeGen::BREAK( ostream &ret, int targState )
-{
- ret << CTRL_FLOW() << "goto _out" << targState << ";";
-}
-
-bool IpGotoCodeGen::IN_TRANS_ACTIONS( RedStateAp *state )
-{
- bool anyWritten = false;
-
- /* Emit any transitions that have actions and that go to this state. */
- for ( int it = 0; it < state->numInTrans; it++ ) {
- RedTransAp *trans = state->inTrans[it];
- if ( trans->action != 0 && trans->labelNeeded ) {
- /* Remember that we wrote an action so we know to write the
- * line directive for going back to the output. */
- anyWritten = true;
-
- /* Write the label for the transition so it can be jumped to. */
- out << "tr" << trans->id << ":\n";
-
- /* If the action contains a next, then we must preload the current
- * state since the action may or may not set it. */
- if ( trans->action->anyNextStmt() )
- out << " " << CS() << " = " << trans->targ->id << ";\n";
-
- /* Write each action in the list. */
- for ( ActionTable::Iter item = trans->action->key; item.lte(); item++ )
- ACTION( out, item->value, trans->targ->id, false );
-
- /* If the action contains a next then we need to reload, otherwise
- * jump directly to the target state. */
- if ( trans->action->anyNextStmt() )
- out << "\tgoto _again;\n";
- else
- out << "\tgoto st" << trans->targ->id << ";\n";
- }
- }
-
- return anyWritten;
-}
-
-/* Called from GotoCodeGen::STATE_GOTOS just before writing the gotos for each
- * state. */
-void IpGotoCodeGen::GOTO_HEADER( RedStateAp *state )
-{
- bool anyWritten = IN_TRANS_ACTIONS( state );
-
- if ( state->labelNeeded )
- out << "st" << state->id << ":\n";
-
- if ( state->toStateAction != 0 ) {
- /* Remember that we wrote an action. Write every action in the list. */
- anyWritten = true;
- for ( ActionTable::Iter item = state->toStateAction->key; item.lte(); item++ )
- ACTION( out, item->value, state->id, false );
- }
-
- /* Advance and test buffer pos. */
- if ( state->labelNeeded ) {
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " == " << PE() << " )\n"
- " goto _out" << state->id << ";\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n";
- }
- }
-
- /* Give the state a switch case. */
- out << "case " << state->id << ":\n";
-
- if ( state->fromStateAction != 0 ) {
- /* Remember that we wrote an action. Write every action in the list. */
- anyWritten = true;
- for ( ActionTable::Iter item = state->fromStateAction->key; item.lte(); item++ )
- ACTION( out, item->value, state->id, false );
- }
-
- if ( anyWritten )
- genLineDirective( out );
-
- /* Record the prev state if necessary. */
- if ( state->anyRegCurStateRef() )
- out << " _ps = " << state->id << ";\n";
-}
-
-void IpGotoCodeGen::STATE_GOTO_ERROR()
-{
- /* In the error state we need to emit some stuff that usually goes into
- * the header. */
- RedStateAp *state = redFsm->errState;
- bool anyWritten = IN_TRANS_ACTIONS( state );
-
- /* No case label needed since we don't switch on the error state. */
- if ( anyWritten )
- genLineDirective( out );
-
- if ( state->labelNeeded )
- out << "st" << state->id << ":\n";
-
- /* Break out here. */
- out << " goto _out" << state->id << ";\n";
-}
-
-
-/* Emit the goto to take for a given transition. */
-std::ostream &IpGotoCodeGen::TRANS_GOTO( RedTransAp *trans, int level )
-{
- if ( trans->action != 0 ) {
- /* Go to the transition which will go to the state. */
- out << TABS(level) << "goto tr" << trans->id << ";";
- }
- else {
- /* Go directly to the target state. */
- out << TABS(level) << "goto st" << trans->targ->id << ";";
- }
- return out;
-}
-
-std::ostream &IpGotoCodeGen::EXIT_STATES()
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->outNeeded ) {
- outLabelUsed = true;
- out << " _out" << st->id << ": " << CS() << " = " <<
- st->id << "; goto _out; \n";
- }
- }
- return out;
-}
-
-std::ostream &IpGotoCodeGen::AGAIN_CASES()
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- out <<
- " case " << st->id << ": goto st" << st->id << ";\n";
- }
- return out;
-}
-
-std::ostream &IpGotoCodeGen::FINISH_CASES()
-{
- bool anyWritten = false;
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->eofAction != 0 ) {
- if ( st->eofAction->eofRefs == 0 )
- st->eofAction->eofRefs = new IntSet;
- st->eofAction->eofRefs->insert( st->id );
- }
- }
-
- for ( ActionTableMap::Iter act = redFsm->actionMap; act.lte(); act++ ) {
- if ( act->eofRefs != 0 ) {
- for ( IntSet::Iter pst = *act->eofRefs; pst.lte(); pst++ )
- out << " case " << *pst << ": \n";
-
- /* Remember that we wrote a trans so we know to write the
- * line directive for going back to the output. */
- anyWritten = true;
-
- /* Write each action in the eof action list. */
- for ( ActionTable::Iter item = act->key; item.lte(); item++ )
- ACTION( out, item->value, STATE_ERR_STATE, true );
- out << "\tbreak;\n";
- }
- }
-
- if ( anyWritten )
- genLineDirective( out );
- return out;
-}
-
-void IpGotoCodeGen::setLabelsNeeded( InlineList *inlineList )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- switch ( item->type ) {
- case InlineItem::Goto: case InlineItem::Call: {
- /* Mark the target as needing a label. */
- item->targState->labelNeeded = true;
- break;
- }
- default: break;
- }
-
- if ( item->children != 0 )
- setLabelsNeeded( item->children );
- }
-}
-
-/* Set up labelNeeded flag for each state. */
-void IpGotoCodeGen::setLabelsNeeded()
-{
- /* If we use the _again label, then we the _again switch, which uses all
- * labels. */
- if ( useAgainLabel() ) {
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->labelNeeded = true;
- }
- else {
- /* Do not use all labels by default, init all labelNeeded vars to false. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->labelNeeded = false;
-
- if ( redFsm->errState != 0 && redFsm->anyLmSwitchError() )
- redFsm->errState->labelNeeded = true;
-
- /* Walk all transitions and set only those that have targs. */
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ ) {
- /* If there is no action with a next statement, then the label will be
- * needed. */
- if ( trans->action == 0 || !trans->action->anyNextStmt() )
- trans->targ->labelNeeded = true;
-
- /* Need labels for states that have goto or calls in action code
- * invoked on characters (ie, not from out action code). */
- if ( trans->action != 0 ) {
- /* Loop the actions. */
- for ( ActionTable::Iter act = trans->action->key; act.lte(); act++ ) {
- /* Get the action and walk it's tree. */
- setLabelsNeeded( act->value->inlineList );
- }
- }
- }
- }
-
- if ( hasEnd ) {
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->outNeeded = st->labelNeeded;
- }
- else {
- if ( redFsm->errState != 0 )
- redFsm->errState->outNeeded = true;
-
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ ) {
- /* Any state with a transition in that has a break will need an
- * out label. */
- if ( trans->action != 0 && trans->action->anyBreakStmt() )
- trans->targ->outNeeded = true;
- }
- }
-}
-
-void IpGotoCodeGen::writeData()
-{
- STATE_IDS();
-}
-
-void IpGotoCodeGen::writeExec()
-{
- /* Must set labels immediately before writing because we may depend on the
- * noend write option. */
- setLabelsNeeded();
- outLabelUsed = false;
-
- out << " {\n";
-
- if ( redFsm->anyRegCurStateRef() )
- out << " int _ps = 0;\n";
-
- if ( redFsm->anyConditions() )
- out << " " << WIDE_ALPH_TYPE() << " _widec;\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- if ( useAgainLabel() ) {
- out <<
- " goto _resume;\n"
- "\n"
- "_again:\n"
- " switch ( " << CS() << " ) {\n";
- AGAIN_CASES() <<
- " default: break;\n"
- " }\n"
- "\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( ++" << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n";
- }
-
- out << "_resume:\n";
- }
-
- out <<
- " switch ( " << CS() << " )\n {\n";
- STATE_GOTOS();
- SWITCH_DEFAULT() <<
- " }\n";
- EXIT_STATES() <<
- "\n";
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out <<
- " }\n";
-}
-
-void IpGotoCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " switch ( " << CS() << " ) {\n";
- FINISH_CASES();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/ipgotocodegen.h b/contrib/tools/ragel5/rlgen-cd/ipgotocodegen.h
deleted file mode 100644
index f32678baba..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/ipgotocodegen.h
+++ /dev/null
@@ -1,97 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _IPGCODEGEN_H
-#define _IPGCODEGEN_H
-
-#include <iostream>
-#include "gotocodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-
-/*
- * class FGotoCodeGen
- */
-class IpGotoCodeGen : public GotoCodeGen
-{
-public:
- IpGotoCodeGen( ostream &out ) : FsmCodeGen(out), GotoCodeGen(out) {}
-
- std::ostream &EXIT_STATES();
- std::ostream &TRANS_GOTO( RedTransAp *trans, int level );
- std::ostream &FINISH_CASES();
- std::ostream &AGAIN_CASES();
-
- void GOTO( ostream &ret, int gotoDest, bool inFinish );
- void CALL( ostream &ret, int callDest, int targState, bool inFinish );
- void NEXT( ostream &ret, int nextDest, bool inFinish );
- void GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish );
- void RET( ostream &ret, bool inFinish );
- void CURS( ostream &ret, bool inFinish );
- void TARGS( ostream &ret, bool inFinish, int targState );
- void BREAK( ostream &ret, int targState );
-
- virtual void writeData();
- virtual void writeEOF();
- virtual void writeExec();
-
-protected:
- bool useAgainLabel();
-
- /* Called from GotoCodeGen::STATE_GOTOS just before writing the gotos for
- * each state. */
- bool IN_TRANS_ACTIONS( RedStateAp *state );
- void GOTO_HEADER( RedStateAp *state );
- void STATE_GOTO_ERROR();
-
- /* Set up labelNeeded flag for each state. */
- void setLabelsNeeded( InlineList *inlineList );
- void setLabelsNeeded();
-};
-
-
-/*
- * class CIpGotoCodeGen
- */
-struct CIpGotoCodeGen
- : public IpGotoCodeGen, public CCodeGen
-{
- CIpGotoCodeGen( ostream &out ) :
- FsmCodeGen(out), IpGotoCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * class DIpGotoCodeGen
- */
-struct DIpGotoCodeGen
- : public IpGotoCodeGen, public DCodeGen
-{
- DIpGotoCodeGen( ostream &out ) :
- FsmCodeGen(out), IpGotoCodeGen(out), DCodeGen(out) {}
-};
-
-
-#endif /* _IPGCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/main.cpp b/contrib/tools/ragel5/rlgen-cd/main.cpp
deleted file mode 100644
index cabe4bd97d..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/main.cpp
+++ /dev/null
@@ -1,394 +0,0 @@
-/*
- * Copyright 2001-2007 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include <stdlib.h>
-#include <string.h>
-#include <stdio.h>
-#include <iostream>
-#include <fstream>
-#ifndef _WIN32
-# include <unistd.h>
-#endif
-
-#include "common.h"
-#include "rlgen-cd.h"
-#include "xmlparse.h"
-#include "pcheck.h"
-#include "vector.h"
-#include "version.h"
-
-/* Code generators. */
-#include "tabcodegen.h"
-#include "ftabcodegen.h"
-#include "flatcodegen.h"
-#include "fflatcodegen.h"
-#include "gotocodegen.h"
-#include "fgotocodegen.h"
-#include "ipgotocodegen.h"
-#include "splitcodegen.h"
-
-using std::istream;
-using std::ifstream;
-using std::ostream;
-using std::ios;
-using std::cin;
-using std::cout;
-using std::cerr;
-using std::endl;
-
-/* Target language and output style. */
-CodeStyleEnum codeStyle = GenTables;
-
-/* Io globals. */
-istream *inStream = 0;
-ostream *outStream = 0;
-output_filter *outFilter = 0;
-char *outputFileName = 0;
-
-/* Graphviz dot file generation. */
-bool graphvizDone = false;
-
-int numSplitPartitions = 0;
-bool noLineDirectives = false;
-bool printPrintables = false;
-
-/* Print a summary of the options. */
-void usage()
-{
- cout <<
-"usage: " PROGNAME " [options] file\n"
-"general:\n"
-" -h, -H, -?, --help Print this usage and exit\n"
-" -v, --version Print version information and exit\n"
-" -o <file> Write output to <file>\n"
-"code generation options:\n"
-" -l Inhibit writing of #line directives\n"
-"generated code style:\n"
-" -T0 Table driven FSM (default)\n"
-" -T1 Faster table driven FSM\n"
-" -F0 Flat table driven FSM\n"
-" -F1 Faster flat table-driven FSM\n"
-" -G0 Goto-driven FSM\n"
-" -G1 Faster goto-driven FSM\n"
-" -G2 Really fast goto-driven FSM\n"
-" -P<N> N-Way Split really fast goto-driven FSM\n"
- ;
-}
-
-/* Print version information. */
-void version()
-{
- cout << "Ragel Code Generator for C, C++, Objective-C and D" << endl <<
- "Version " VERSION << ", " PUBDATE << endl <<
- "Copyright (c) 2001-2007 by Adrian Thurston" << endl;
-}
-
-/* Total error count. */
-int gblErrorCount = 0;
-
-ostream &error()
-{
- gblErrorCount += 1;
- cerr << PROGNAME ": ";
- return cerr;
-}
-
-/*
- * Callbacks invoked by the XML data parser.
- */
-
-/* Invoked by the parser when the root element is opened. */
-ostream *openOutput( char *inputFile )
-{
- if ( hostLangType != CCode && hostLangType != DCode ) {
- error() << "this code generator is for C and D only" << endl;
- exit(1);
- }
-
- /* If the output format is code and no output file name is given, then
- * make a default. */
- if ( outputFileName == 0 ) {
- char *ext = findFileExtension( inputFile );
- if ( ext != 0 && strcmp( ext, ".rh" ) == 0 )
- outputFileName = fileNameFromStem( inputFile, ".h" );
- else {
- const char *defExtension = 0;
- switch ( hostLangType ) {
- case CCode: defExtension = ".c"; break;
- case DCode: defExtension = ".d"; break;
- default: break;
- }
- outputFileName = fileNameFromStem( inputFile, defExtension );
- }
- }
-
- /* Make sure we are not writing to the same file as the input file. */
- if ( outputFileName != 0 && strcmp( inputFile, outputFileName ) == 0 ) {
- error() << "output file \"" << outputFileName <<
- "\" is the same as the input file" << endl;
- }
-
- if ( outputFileName != 0 ) {
- /* Create the filter on the output and open it. */
- outFilter = new output_filter( outputFileName );
- outFilter->open( outputFileName, ios::out|ios::trunc );
- if ( !outFilter->is_open() ) {
- error() << "error opening " << outputFileName << " for writing" << endl;
- exit(1);
- }
-
- /* Open the output stream, attaching it to the filter. */
- outStream = new ostream( outFilter );
- }
- else {
- /* Writing out ot std out. */
- outStream = &cout;
- }
- return outStream;
-}
-
-/* Invoked by the parser when a ragel definition is opened. */
-CodeGenData *makeCodeGen( char *sourceFileName, char *fsmName,
- ostream &out, bool wantComplete )
-{
- CodeGenData *codeGen = 0;
- switch ( hostLangType ) {
- case CCode:
- switch ( codeStyle ) {
- case GenTables:
- codeGen = new CTabCodeGen(out);
- break;
- case GenFTables:
- codeGen = new CFTabCodeGen(out);
- break;
- case GenFlat:
- codeGen = new CFlatCodeGen(out);
- break;
- case GenFFlat:
- codeGen = new CFFlatCodeGen(out);
- break;
- case GenGoto:
- codeGen = new CGotoCodeGen(out);
- break;
- case GenFGoto:
- codeGen = new CFGotoCodeGen(out);
- break;
- case GenIpGoto:
- codeGen = new CIpGotoCodeGen(out);
- break;
- case GenSplit:
- codeGen = new CSplitCodeGen(out);
- break;
- }
- break;
-
- case DCode:
- switch ( codeStyle ) {
- case GenTables:
- codeGen = new DTabCodeGen(out);
- break;
- case GenFTables:
- codeGen = new DFTabCodeGen(out);
- break;
- case GenFlat:
- codeGen = new DFlatCodeGen(out);
- break;
- case GenFFlat:
- codeGen = new DFFlatCodeGen(out);
- break;
- case GenGoto:
- codeGen = new DGotoCodeGen(out);
- break;
- case GenFGoto:
- codeGen = new DFGotoCodeGen(out);
- break;
- case GenIpGoto:
- codeGen = new DIpGotoCodeGen(out);
- break;
- case GenSplit:
- codeGen = new DSplitCodeGen(out);
- break;
- }
- break;
-
- default: break;
- }
-
- codeGen->sourceFileName = sourceFileName;
- codeGen->fsmName = fsmName;
- codeGen->wantComplete = wantComplete;
-
- return codeGen;
-}
-
-
-
-/* Main, process args and call yyparse to start scanning input. */
-int main(int argc, char **argv)
-{
- ParamCheck pc("-:Hh?vlo:T:F:G:P:", argc, argv);
- const char *xmlInputFileName = 0;
-
- while ( pc.check() ) {
- switch ( pc.state ) {
- case ParamCheck::match:
- switch ( pc.parameter ) {
- /* Output. */
- case 'o':
- if ( *pc.parameterArg == 0 )
- error() << "a zero length output file name was given" << endl;
- else if ( outputFileName != 0 )
- error() << "more than one output file name was given" << endl;
- else {
- /* Ok, remember the output file name. */
- outputFileName = pc.parameterArg;
- }
- break;
-
- case 'l':
- noLineDirectives = true;
- break;
-
- /* Code style. */
- case 'T':
- if ( pc.parameterArg[0] == '0' )
- codeStyle = GenTables;
- else if ( pc.parameterArg[0] == '1' )
- codeStyle = GenFTables;
- else {
- error() << "-T" << pc.parameterArg[0] <<
- " is an invalid argument" << endl;
- exit(1);
- }
- break;
- case 'F':
- if ( pc.parameterArg[0] == '0' )
- codeStyle = GenFlat;
- else if ( pc.parameterArg[0] == '1' )
- codeStyle = GenFFlat;
- else {
- error() << "-F" << pc.parameterArg[0] <<
- " is an invalid argument" << endl;
- exit(1);
- }
- break;
- case 'G':
- if ( pc.parameterArg[0] == '0' )
- codeStyle = GenGoto;
- else if ( pc.parameterArg[0] == '1' )
- codeStyle = GenFGoto;
- else if ( pc.parameterArg[0] == '2' )
- codeStyle = GenIpGoto;
- else {
- error() << "-G" << pc.parameterArg[0] <<
- " is an invalid argument" << endl;
- exit(1);
- }
- break;
- case 'P':
- codeStyle = GenSplit;
- numSplitPartitions = atoi( pc.parameterArg );
- break;
-
- /* Version and help. */
- case 'v':
- version();
- exit(0);
- case 'H': case 'h': case '?':
- usage();
- exit(0);
- case '-':
- if ( strcasecmp(pc.parameterArg, "help") == 0 ) {
- usage();
- exit(0);
- }
- else if ( strcasecmp(pc.parameterArg, "version") == 0 ) {
- version();
- exit(0);
- }
- else {
- error() << "--" << pc.parameterArg <<
- " is an invalid argument" << endl;
- break;
- }
- }
- break;
-
- case ParamCheck::invalid:
- error() << "-" << pc.parameter << " is an invalid argument" << endl;
- break;
-
- case ParamCheck::noparam:
- if ( *pc.curArg == 0 )
- error() << "a zero length input file name was given" << endl;
- else if ( xmlInputFileName != 0 )
- error() << "more than one input file name was given" << endl;
- else {
- /* OK, Remember the filename. */
- xmlInputFileName = pc.curArg;
- }
- break;
- }
- }
-
- /* Bail on above errors. */
- if ( gblErrorCount > 0 )
- exit(1);
-
- /* Open the input file for reading. */
- if ( xmlInputFileName != 0 ) {
- /* Open the input file for reading. */
- ifstream *inFile = new ifstream( xmlInputFileName );
- inStream = inFile;
- if ( ! inFile->is_open() )
- error() << "could not open " << xmlInputFileName << " for reading" << endl;
- }
- else {
- xmlInputFileName = "<stdin>";
- inStream = &cin;
- }
-
- /* Bail on above errors. */
- if ( gblErrorCount > 0 )
- exit(1);
-
- bool wantComplete = true;
- bool outputActive = true;
-
- /* Parse the input! */
- xml_parse( *inStream, xmlInputFileName, outputActive, wantComplete );
-
- /* If writing to a file, delete the ostream, causing it to flush.
- * Standard out is flushed automatically. */
- if ( outputFileName != 0 ) {
- delete outStream;
- delete outFilter;
- }
-
- /* Finished, final check for errors.. */
- if ( gblErrorCount > 0 ) {
- /* If we opened an output file, remove it. */
- if ( outputFileName != 0 )
- unlink( outputFileName );
- exit(1);
- }
- return 0;
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/rlgen-cd.h b/contrib/tools/ragel5/rlgen-cd/rlgen-cd.h
deleted file mode 100644
index 93acd99bae..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/rlgen-cd.h
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _RLCODEGEN_H
-#define _RLCODEGEN_H
-
-#include <stdio.h>
-#include <iostream>
-#include "avltree.h"
-#include "vector.h"
-#include "config.h"
-
-#define PROGNAME "rlgen-cd"
-
-/* Target output style. */
-enum CodeStyleEnum
-{
- GenTables,
- GenFTables,
- GenFlat,
- GenFFlat,
- GenGoto,
- GenFGoto,
- GenIpGoto,
- GenSplit
-};
-
-extern CodeStyleEnum codeStyle;
-
-
-/* IO filenames and stream. */
-extern bool graphvizDone;
-
-extern int gblErrorCount;
-
-/* Options. */
-extern int numSplitPartitions;
-extern bool noLineDirectives;
-
-std::ostream &error();
-
-#endif /* _RLCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/splitcodegen.cpp b/contrib/tools/ragel5/rlgen-cd/splitcodegen.cpp
deleted file mode 100644
index d703b37eea..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/splitcodegen.cpp
+++ /dev/null
@@ -1,521 +0,0 @@
-/*
- * Copyright 2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-
-#include "rlgen-cd.h"
-#include "splitcodegen.h"
-#include "gendata.h"
-#include <assert.h>
-
-using std::ostream;
-using std::ios;
-using std::endl;
-
-/* Emit the goto to take for a given transition. */
-std::ostream &SplitCodeGen::TRANS_GOTO( RedTransAp *trans, int level )
-{
- if ( trans->targ->partition == currentPartition ) {
- if ( trans->action != 0 ) {
- /* Go to the transition which will go to the state. */
- out << TABS(level) << "goto tr" << trans->id << ";";
- }
- else {
- /* Go directly to the target state. */
- out << TABS(level) << "goto st" << trans->targ->id << ";";
- }
- }
- else {
- if ( trans->action != 0 ) {
- /* Go to the transition which will go to the state. */
- out << TABS(level) << "goto ptr" << trans->id << ";";
- trans->partitionBoundary = true;
- }
- else {
- /* Go directly to the target state. */
- out << TABS(level) << "goto pst" << trans->targ->id << ";";
- trans->targ->partitionBoundary = true;
- }
- }
- return out;
-}
-
-/* Called from before writing the gotos for each state. */
-void SplitCodeGen::GOTO_HEADER( RedStateAp *state, bool stateInPartition )
-{
- bool anyWritten = IN_TRANS_ACTIONS( state );
-
- if ( state->labelNeeded )
- out << "st" << state->id << ":\n";
-
- if ( state->toStateAction != 0 ) {
- /* Remember that we wrote an action. Write every action in the list. */
- anyWritten = true;
- for ( ActionTable::Iter item = state->toStateAction->key; item.lte(); item++ )
- ACTION( out, item->value, state->id, false );
- }
-
- /* Advance and test buffer pos. */
- if ( state->labelNeeded ) {
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " == " << PE() << " )\n"
- " goto _out" << state->id << ";\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n";
- }
- }
-
- /* Give the state a switch case. */
- out << "case " << state->id << ":\n";
-
- if ( state->fromStateAction != 0 ) {
- /* Remember that we wrote an action. Write every action in the list. */
- anyWritten = true;
- for ( ActionTable::Iter item = state->fromStateAction->key; item.lte(); item++ )
- ACTION( out, item->value, state->id, false );
- }
-
- if ( anyWritten )
- genLineDirective( out );
-
- /* Record the prev state if necessary. */
- if ( state->anyRegCurStateRef() )
- out << " _ps = " << state->id << ";\n";
-}
-
-std::ostream &SplitCodeGen::STATE_GOTOS( int partition )
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->partition == partition ) {
- if ( st == redFsm->errState )
- STATE_GOTO_ERROR();
- else {
- /* We call into the base of the goto which calls back into us
- * using virtual functions. Set the current partition rather
- * than coding parameter passing throughout. */
- currentPartition = partition;
-
- /* Writing code above state gotos. */
- GOTO_HEADER( st, st->partition == partition );
-
- if ( st->stateCondVect.length() > 0 ) {
- out << " _widec = " << GET_KEY() << ";\n";
- emitCondBSearch( st, 1, 0, st->stateCondVect.length() - 1 );
- }
-
- /* Try singles. */
- if ( st->outSingle.length() > 0 )
- emitSingleSwitch( st );
-
- /* Default case is to binary search for the ranges, if that fails then */
- if ( st->outRange.length() > 0 )
- emitRangeBSearch( st, 1, 0, st->outRange.length() - 1 );
-
- /* Write the default transition. */
- TRANS_GOTO( st->defTrans, 1 ) << "\n";
- }
- }
- }
- return out;
-}
-
-
-std::ostream &SplitCodeGen::PART_TRANS( int partition )
-{
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ ) {
- if ( trans->partitionBoundary ) {
- out <<
- "ptr" << trans->id << ":\n";
-
- if ( trans->action != 0 ) {
- /* If the action contains a next, then we must preload the current
- * state since the action may or may not set it. */
- if ( trans->action->anyNextStmt() )
- out << " " << CS() << " = " << trans->targ->id << ";\n";
-
- /* Write each action in the list. */
- for ( ActionTable::Iter item = trans->action->key; item.lte(); item++ )
- ACTION( out, item->value, trans->targ->id, false );
- }
-
- out <<
- " goto pst" << trans->targ->id << ";\n";
- trans->targ->partitionBoundary = true;
- }
- }
-
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->partitionBoundary ) {
- out <<
- " pst" << st->id << ":\n"
- " " << CS() << " = " << st->id << ";\n";
-
- if ( st->toStateAction != 0 ) {
- /* Remember that we wrote an action. Write every action in the list. */
- for ( ActionTable::Iter item = st->toStateAction->key; item.lte(); item++ )
- ACTION( out, item->value, st->id, false );
- genLineDirective( out );
- }
-
- ptOutLabelUsed = true;
- out << " goto _pt_out; \n";
- }
- }
- return out;
-}
-
-std::ostream &SplitCodeGen::EXIT_STATES( int partition )
-{
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- if ( st->partition == partition && st->outNeeded ) {
- outLabelUsed = true;
- out << " _out" << st->id << ": " << CS() << " = " <<
- st->id << "; goto _out; \n";
- }
- }
- return out;
-}
-
-
-std::ostream &SplitCodeGen::PARTITION( int partition )
-{
- outLabelUsed = false;
- ptOutLabelUsed = false;
-
- /* Initialize the partition boundaries, which get set during the writing
- * of states. After the state writing we will */
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ )
- trans->partitionBoundary = false;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->partitionBoundary = false;
-
- out << " " << ALPH_TYPE() << " *p = *_pp, *pe = *_ppe;\n";
-
- if ( redFsm->anyRegCurStateRef() )
- out << " int _ps = 0;\n";
-
- if ( redFsm->anyConditions() )
- out << " " << WIDE_ALPH_TYPE() << " _widec;\n";
-
- if ( useAgainLabel() ) {
- out <<
- " goto _resume;\n"
- "\n"
- "_again:\n"
- " switch ( " << CS() << " ) {\n";
- AGAIN_CASES() <<
- " default: break;\n"
- " }\n"
- "\n";
-
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( ++" << P() << " == " << PE() << " )\n"
- " goto _out;\n";
-
- }
- else {
- out <<
- " " << P() << " += 1;\n";
- }
-
- out <<
- "_resume:\n";
- }
-
- out <<
- " switch ( " << CS() << " )\n {\n";
- STATE_GOTOS( partition );
- SWITCH_DEFAULT() <<
- " }\n";
- PART_TRANS( partition );
- EXIT_STATES( partition );
-
- if ( outLabelUsed ) {
- out <<
- "\n"
- " _out:\n"
- " *_pp = p;\n"
- " *_ppe = pe;\n"
- " return 0;\n";
- }
-
- if ( ptOutLabelUsed ) {
- out <<
- "\n"
- " _pt_out:\n"
- " *_pp = p;\n"
- " *_ppe = pe;\n"
- " return 1;\n";
- }
-
- return out;
-}
-
-std::ostream &SplitCodeGen::PART_MAP()
-{
- int *partMap = new int[redFsm->stateList.length()];
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- partMap[st->id] = st->partition;
-
- out << "\t";
- int totalItem = 0;
- for ( int i = 0; i < redFsm->stateList.length(); i++ ) {
- out << partMap[i];
- if ( i != redFsm->stateList.length() - 1 ) {
- out << ", ";
- if ( ++totalItem % IALL == 0 )
- out << "\n\t";
- }
- }
-
- delete[] partMap;
- return out;
-}
-
-void SplitCodeGen::writeData()
-{
- out <<
- "static const int " << START() << " = " << START_STATE_ID() << ";\n"
- "\n";
-
- if ( writeFirstFinal ) {
- out <<
- "static const int " << FIRST_FINAL() << " = " << FIRST_FINAL_STATE() << ";\n"
- "\n";
- }
-
- if ( writeErr ) {
- out <<
- "static const int " << ERROR() << " = " << ERROR_STATE() << ";\n"
- "\n";
- }
-
-
- OPEN_ARRAY( ARRAY_TYPE(numSplitPartitions), PM() );
- PART_MAP();
- CLOSE_ARRAY() <<
- "\n";
-
- for ( int p = 0; p < redFsm->nParts; p++ ) {
- out << "int partition" << p << "( " << ALPH_TYPE() << " **_pp, " << ALPH_TYPE() <<
- " **_ppe, struct " << FSM_NAME() << " *fsm );\n";
- }
- out << "\n";
-}
-
-std::ostream &SplitCodeGen::ALL_PARTITIONS()
-{
- /* compute the format string. */
- int width = 0, high = redFsm->nParts - 1;
- while ( high > 0 ) {
- width++;
- high /= 10;
- }
- assert( width <= 8 );
- char suffFormat[] = "_%6.6d.c";
- suffFormat[2] = suffFormat[4] = ( '0' + width );
-
- for ( int p = 0; p < redFsm->nParts; p++ ) {
- char suffix[10];
- sprintf( suffix, suffFormat, p );
- char *fn = fileNameFromStem( sourceFileName, suffix );
- char *include = fileNameFromStem( sourceFileName, ".h" );
-
- /* Create the filter on the output and open it. */
- output_filter *partFilter = new output_filter( fn );
- partFilter->open( fn, ios::out|ios::trunc );
- if ( !partFilter->is_open() ) {
- error() << "error opening " << fn << " for writing" << endl;
- exit(1);
- }
-
- /* Attach the new file to the output stream. */
- std::streambuf *prev_rdbuf = out.rdbuf( partFilter );
-
- out <<
- "#include \"" << include << "\"\n"
- "int partition" << p << "( " << ALPH_TYPE() << " **_pp, " << ALPH_TYPE() <<
- " **_ppe, struct " << FSM_NAME() << " *fsm )\n"
- "{\n";
- PARTITION( p ) <<
- "}\n\n";
- out.flush();
-
- /* Fix the output stream. */
- out.rdbuf( prev_rdbuf );
- }
- return out;
-}
-
-
-void SplitCodeGen::writeExec()
-{
- /* Must set labels immediately before writing because we may depend on the
- * noend write option. */
- setLabelsNeeded();
- out <<
- " {\n"
- " int _stat = 0;\n";
-
- if ( hasEnd ) {
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << " goto _resume;\n";
-
- /* In this reentry, to-state actions have already been executed on the
- * partition-switch exit from the last partition. */
- out << "_reenter:\n";
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n";
- }
-
- out << "_resume:\n";
-
- out <<
- " switch ( " << PM() << "[" << CS() << "] ) {\n";
- for ( int p = 0; p < redFsm->nParts; p++ ) {
- out <<
- " case " << p << ":\n"
- " _stat = partition" << p << "( &p, &pe, fsm );\n"
- " break;\n";
- }
- out <<
- " }\n"
- " if ( _stat )\n"
- " goto _reenter;\n";
-
- if ( hasEnd )
- out << " _out: {}\n";
-
- out <<
- " }\n";
-
- ALL_PARTITIONS();
-}
-
-void SplitCodeGen::setLabelsNeeded( RedStateAp *fromState, InlineList *inlineList )
-{
- for ( InlineList::Iter item = *inlineList; item.lte(); item++ ) {
- switch ( item->type ) {
- case InlineItem::Goto: case InlineItem::Call: {
- /* In split code gen we only need labels for transitions across
- * partitions. */
- if ( fromState->partition == item->targState->partition ){
- /* Mark the target as needing a label. */
- item->targState->labelNeeded = true;
- }
- break;
- }
- default: break;
- }
-
- if ( item->children != 0 )
- setLabelsNeeded( fromState, item->children );
- }
-}
-
-void SplitCodeGen::setLabelsNeeded( RedStateAp *fromState, RedTransAp *trans )
-{
- /* In the split code gen we don't need labels for transitions across
- * partitions. */
- if ( fromState->partition == trans->targ->partition ) {
- /* If there is no action with a next statement, then the label will be
- * needed. */
- trans->labelNeeded = true;
- if ( trans->action == 0 || !trans->action->anyNextStmt() )
- trans->targ->labelNeeded = true;
- }
-
- /* Need labels for states that have goto or calls in action code
- * invoked on characters (ie, not from out action code). */
- if ( trans->action != 0 ) {
- /* Loop the actions. */
- for ( ActionTable::Iter act = trans->action->key; act.lte(); act++ ) {
- /* Get the action and walk it's tree. */
- setLabelsNeeded( fromState, act->value->inlineList );
- }
- }
-}
-
-/* Set up labelNeeded flag for each state. */
-void SplitCodeGen::setLabelsNeeded()
-{
- /* If we use the _again label, then we the _again switch, which uses all
- * labels. */
- if ( useAgainLabel() ) {
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->labelNeeded = true;
- }
- else {
- /* Do not use all labels by default, init all labelNeeded vars to false. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->labelNeeded = false;
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ )
- trans->labelNeeded = false;
-
- if ( redFsm->errState != 0 && redFsm->anyLmSwitchError() )
- redFsm->errState->labelNeeded = true;
-
- /* Walk all transitions and set only those that have targs. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- for ( RedTransList::Iter tel = st->outRange; tel.lte(); tel++ )
- setLabelsNeeded( st, tel->value );
-
- for ( RedTransList::Iter tel = st->outSingle; tel.lte(); tel++ )
- setLabelsNeeded( st, tel->value );
-
- if ( st->defTrans != 0 )
- setLabelsNeeded( st, st->defTrans );
- }
- }
-
- if ( hasEnd ) {
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ )
- st->outNeeded = st->labelNeeded;
- }
- else {
- if ( redFsm->errState != 0 )
- redFsm->errState->outNeeded = true;
-
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ ) {
- /* Any state with a transition in that has a break will need an
- * out label. */
- if ( trans->action != 0 && trans->action->anyBreakStmt() )
- trans->targ->outNeeded = true;
- }
- }
-}
-
diff --git a/contrib/tools/ragel5/rlgen-cd/splitcodegen.h b/contrib/tools/ragel5/rlgen-cd/splitcodegen.h
deleted file mode 100644
index 82fc37150e..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/splitcodegen.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright 2006 Adrian Thurston <thurston@cs.queensu.ca>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _SPLITCODEGEN_H
-#define _SPLITCODEGEN_H
-
-#include "ipgotocodegen.h"
-
-class SplitCodeGen : public IpGotoCodeGen
-{
-public:
- SplitCodeGen( ostream &out ) : FsmCodeGen(out), IpGotoCodeGen(out) {}
-
- bool ptOutLabelUsed;
-
- std::ostream &PART_MAP();
- std::ostream &EXIT_STATES( int partition );
- std::ostream &PART_TRANS( int partition );
- std::ostream &TRANS_GOTO( RedTransAp *trans, int level );
- void GOTO_HEADER( RedStateAp *state, bool stateInPartition );
- std::ostream &STATE_GOTOS( int partition );
- std::ostream &PARTITION( int partition );
- std::ostream &ALL_PARTITIONS();
- void writeData();
- void writeExec();
- void writeParts();
-
- void setLabelsNeeded( RedStateAp *fromState, InlineList *inlineList );
- void setLabelsNeeded( RedStateAp *fromState, RedTransAp *trans );
- void setLabelsNeeded();
-
- int currentPartition;
-};
-
-struct CSplitCodeGen
- : public SplitCodeGen, public CCodeGen
-{
- CSplitCodeGen( ostream &out ) :
- FsmCodeGen(out), SplitCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * class DIpGotoCodeGen
- */
-struct DSplitCodeGen
- : public SplitCodeGen, public DCodeGen
-{
- DSplitCodeGen( ostream &out ) :
- FsmCodeGen(out), SplitCodeGen(out), DCodeGen(out) {}
-};
-
-
-#endif /* _SPLITCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/tabcodegen.cpp b/contrib/tools/ragel5/rlgen-cd/tabcodegen.cpp
deleted file mode 100644
index 22f09534b2..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/tabcodegen.cpp
+++ /dev/null
@@ -1,988 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#include "rlgen-cd.h"
-#include "tabcodegen.h"
-#include "redfsm.h"
-#include "gendata.h"
-
-/* Determine if we should use indicies or not. */
-void TabCodeGen::calcIndexSize()
-{
- int sizeWithInds = 0, sizeWithoutInds = 0;
-
- /* Calculate cost of using with indicies. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- int totalIndex = st->outSingle.length() + st->outRange.length() +
- (st->defTrans == 0 ? 0 : 1);
- sizeWithInds += arrayTypeSize(redFsm->maxIndex) * totalIndex;
- }
- sizeWithInds += arrayTypeSize(redFsm->maxState) * redFsm->transSet.length();
- if ( redFsm->anyActions() )
- sizeWithInds += arrayTypeSize(redFsm->maxActionLoc) * redFsm->transSet.length();
-
- /* Calculate the cost of not using indicies. */
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- int totalIndex = st->outSingle.length() + st->outRange.length() +
- (st->defTrans == 0 ? 0 : 1);
- sizeWithoutInds += arrayTypeSize(redFsm->maxState) * totalIndex;
- if ( redFsm->anyActions() )
- sizeWithoutInds += arrayTypeSize(redFsm->maxActionLoc) * totalIndex;
- }
-
- /* If using indicies reduces the size, use them. */
- useIndicies = sizeWithInds < sizeWithoutInds;
-}
-
-std::ostream &TabCodeGen::TO_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->toStateAction != 0 )
- act = state->toStateAction->location+1;
- out << act;
- return out;
-}
-
-std::ostream &TabCodeGen::FROM_STATE_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->fromStateAction != 0 )
- act = state->fromStateAction->location+1;
- out << act;
- return out;
-}
-
-std::ostream &TabCodeGen::EOF_ACTION( RedStateAp *state )
-{
- int act = 0;
- if ( state->eofAction != 0 )
- act = state->eofAction->location+1;
- out << act;
- return out;
-}
-
-
-std::ostream &TabCodeGen::TRANS_ACTION( RedTransAp *trans )
-{
- /* If there are actions, emit them. Otherwise emit zero. */
- int act = 0;
- if ( trans->action != 0 )
- act = trans->action->location+1;
- out << act;
- return out;
-}
-
-std::ostream &TabCodeGen::TO_STATE_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numToStateRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &TabCodeGen::FROM_STATE_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numFromStateRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &TabCodeGen::EOF_ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numEofRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, true );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-
-std::ostream &TabCodeGen::ACTION_SWITCH()
-{
- /* Walk the list of functions, printing the cases. */
- for ( ActionList::Iter act = actionList; act.lte(); act++ ) {
- /* Write out referenced actions. */
- if ( act->numTransRefs > 0 ) {
- /* Write the case label, the action and the case break. */
- out << "\tcase " << act->actionId << ":\n";
- ACTION( out, act, 0, false );
- out << "\tbreak;\n";
- }
- }
-
- genLineDirective( out );
- return out;
-}
-
-std::ostream &TabCodeGen::COND_OFFSETS()
-{
- out << "\t";
- int totalStateNum = 0, curKeyOffset = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write the key offset. */
- out << curKeyOffset;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
-
- /* Move the key offset ahead. */
- curKeyOffset += st->stateCondList.length();
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::KEY_OFFSETS()
-{
- out << "\t";
- int totalStateNum = 0, curKeyOffset = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write the key offset. */
- out << curKeyOffset;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
-
- /* Move the key offset ahead. */
- curKeyOffset += st->outSingle.length() + st->outRange.length()*2;
- }
- out << "\n";
- return out;
-}
-
-
-std::ostream &TabCodeGen::INDEX_OFFSETS()
-{
- out << "\t";
- int totalStateNum = 0, curIndOffset = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write the index offset. */
- out << curIndOffset;
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
-
- /* Move the index offset ahead. */
- curIndOffset += st->outSingle.length() + st->outRange.length();
- if ( st->defTrans != 0 )
- curIndOffset += 1;
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::COND_LENS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write singles length. */
- out << st->stateCondList.length();
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-
-std::ostream &TabCodeGen::SINGLE_LENS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write singles length. */
- out << st->outSingle.length();
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::RANGE_LENS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Emit length of range index. */
- out << st->outRange.length();
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::TO_STATE_ACTIONS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write any eof action. */
- TO_STATE_ACTION(st);
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::FROM_STATE_ACTIONS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write any eof action. */
- FROM_STATE_ACTION(st);
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::EOF_ACTIONS()
-{
- out << "\t";
- int totalStateNum = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Write any eof action. */
- EOF_ACTION(st);
- if ( !st.last() ) {
- out << ", ";
- if ( ++totalStateNum % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::COND_KEYS()
-{
- out << '\t';
- int totalTrans = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Loop the state's transitions. */
- for ( StateCondList::Iter sc = st->stateCondList; sc.lte(); sc++ ) {
- /* Lower key. */
- out << KEY( sc->lowKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
-
- /* Upper key. */
- out << KEY( sc->highKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::COND_SPACES()
-{
- out << '\t';
- int totalTrans = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Loop the state's transitions. */
- for ( StateCondList::Iter sc = st->stateCondList; sc.lte(); sc++ ) {
- /* Cond Space id. */
- out << sc->condSpace->condSpaceId << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::KEYS()
-{
- out << '\t';
- int totalTrans = 0;
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Loop the singles. */
- for ( RedTransList::Iter stel = st->outSingle; stel.lte(); stel++ ) {
- out << KEY( stel->lowKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Loop the state's transitions. */
- for ( RedTransList::Iter rtel = st->outRange; rtel.lte(); rtel++ ) {
- /* Lower key. */
- out << KEY( rtel->lowKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
-
- /* Upper key. */
- out << KEY( rtel->highKey ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::INDICIES()
-{
- int totalTrans = 0;
- out << '\t';
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Walk the singles. */
- for ( RedTransList::Iter stel = st->outSingle; stel.lte(); stel++ ) {
- out << stel->value->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Walk the ranges. */
- for ( RedTransList::Iter rtel = st->outRange; rtel.lte(); rtel++ ) {
- out << rtel->value->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* The state's default index goes next. */
- if ( st->defTrans != 0 ) {
- out << st->defTrans->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::TRANS_TARGS()
-{
- int totalTrans = 0;
- out << '\t';
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Walk the singles. */
- for ( RedTransList::Iter stel = st->outSingle; stel.lte(); stel++ ) {
- RedTransAp *trans = stel->value;
- out << trans->targ->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Walk the ranges. */
- for ( RedTransList::Iter rtel = st->outRange; rtel.lte(); rtel++ ) {
- RedTransAp *trans = rtel->value;
- out << trans->targ->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* The state's default target state. */
- if ( st->defTrans != 0 ) {
- RedTransAp *trans = st->defTrans;
- out << trans->targ->id << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-
-std::ostream &TabCodeGen::TRANS_ACTIONS()
-{
- int totalTrans = 0;
- out << '\t';
- for ( RedStateList::Iter st = redFsm->stateList; st.lte(); st++ ) {
- /* Walk the singles. */
- for ( RedTransList::Iter stel = st->outSingle; stel.lte(); stel++ ) {
- RedTransAp *trans = stel->value;
- TRANS_ACTION( trans ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* Walk the ranges. */
- for ( RedTransList::Iter rtel = st->outRange; rtel.lte(); rtel++ ) {
- RedTransAp *trans = rtel->value;
- TRANS_ACTION( trans ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
-
- /* The state's default index goes next. */
- if ( st->defTrans != 0 ) {
- RedTransAp *trans = st->defTrans;
- TRANS_ACTION( trans ) << ", ";
- if ( ++totalTrans % IALL == 0 )
- out << "\n\t";
- }
- }
-
- /* Output one last number so we don't have to figure out when the last
- * entry is and avoid writing a comma. */
- out << 0 << "\n";
- return out;
-}
-
-std::ostream &TabCodeGen::TRANS_TARGS_WI()
-{
- /* Transitions must be written ordered by their id. */
- RedTransAp **transPtrs = new RedTransAp*[redFsm->transSet.length()];
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ )
- transPtrs[trans->id] = trans;
-
- /* Keep a count of the num of items in the array written. */
- out << '\t';
- int totalStates = 0;
- for ( int t = 0; t < redFsm->transSet.length(); t++ ) {
- /* Write out the target state. */
- RedTransAp *trans = transPtrs[t];
- out << trans->targ->id;
- if ( t < redFsm->transSet.length()-1 ) {
- out << ", ";
- if ( ++totalStates % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] transPtrs;
- return out;
-}
-
-
-std::ostream &TabCodeGen::TRANS_ACTIONS_WI()
-{
- /* Transitions must be written ordered by their id. */
- RedTransAp **transPtrs = new RedTransAp*[redFsm->transSet.length()];
- for ( TransApSet::Iter trans = redFsm->transSet; trans.lte(); trans++ )
- transPtrs[trans->id] = trans;
-
- /* Keep a count of the num of items in the array written. */
- out << '\t';
- int totalAct = 0;
- for ( int t = 0; t < redFsm->transSet.length(); t++ ) {
- /* Write the function for the transition. */
- RedTransAp *trans = transPtrs[t];
- TRANS_ACTION( trans );
- if ( t < redFsm->transSet.length()-1 ) {
- out << ", ";
- if ( ++totalAct % IALL == 0 )
- out << "\n\t";
- }
- }
- out << "\n";
- delete[] transPtrs;
- return out;
-}
-
-void TabCodeGen::LOCATE_TRANS()
-{
- out <<
- " _keys = " << ARR_OFF( K(), KO() + "[" + CS() + "]" ) << ";\n"
- " _trans = " << IO() << "[" << CS() << "];\n"
- "\n"
- " _klen = " << SL() << "[" << CS() << "];\n"
- " if ( _klen > 0 ) {\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_lower = _keys;\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_mid;\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_upper = _keys + _klen - 1;\n"
- " while (1) {\n"
- " if ( _upper < _lower )\n"
- " break;\n"
- "\n"
- " _mid = _lower + ((_upper-_lower) >> 1);\n"
- " if ( " << GET_WIDE_KEY() << " < *_mid )\n"
- " _upper = _mid - 1;\n"
- " else if ( " << GET_WIDE_KEY() << " > *_mid )\n"
- " _lower = _mid + 1;\n"
- " else {\n"
- " _trans += (_mid - _keys);\n"
- " goto _match;\n"
- " }\n"
- " }\n"
- " _keys += _klen;\n"
- " _trans += _klen;\n"
- " }\n"
- "\n"
- " _klen = " << RL() << "[" << CS() << "];\n"
- " if ( _klen > 0 ) {\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_lower = _keys;\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_mid;\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_upper = _keys + (_klen<<1) - 2;\n"
- " while (1) {\n"
- " if ( _upper < _lower )\n"
- " break;\n"
- "\n"
- " _mid = _lower + (((_upper-_lower) >> 1) & ~1);\n"
- " if ( " << GET_WIDE_KEY() << " < _mid[0] )\n"
- " _upper = _mid - 2;\n"
- " else if ( " << GET_WIDE_KEY() << " > _mid[1] )\n"
- " _lower = _mid + 2;\n"
- " else {\n"
- " _trans += ((_mid - _keys)>>1);\n"
- " goto _match;\n"
- " }\n"
- " }\n"
- " _trans += _klen;\n"
- " }\n"
- "\n";
-}
-
-void TabCodeGen::GOTO( ostream &ret, int gotoDest, bool inFinish )
-{
- ret << "{" << CS() << " = " << gotoDest << "; " <<
- CTRL_FLOW() << "goto _again;}";
-}
-
-void TabCodeGen::GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << "{" << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void TabCodeGen::CURS( ostream &ret, bool inFinish )
-{
- ret << "(_ps)";
-}
-
-void TabCodeGen::TARGS( ostream &ret, bool inFinish, int targState )
-{
- ret << "(" << CS() << ")";
-}
-
-void TabCodeGen::NEXT( ostream &ret, int nextDest, bool inFinish )
-{
- ret << CS() << " = " << nextDest << ";";
-}
-
-void TabCodeGen::NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish )
-{
- ret << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, 0, inFinish );
- ret << ");";
-}
-
-void TabCodeGen::CALL( ostream &ret, int callDest, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << CS() << "; " << CS() << " = " <<
- callDest << "; " << CTRL_FLOW() << "goto _again;}";
-}
-
-void TabCodeGen::CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish )
-{
- ret << "{" << STACK() << "[" << TOP() << "++] = " << CS() << "; " << CS() << " = (";
- INLINE_LIST( ret, ilItem->children, targState, inFinish );
- ret << "); " << CTRL_FLOW() << "goto _again;}";
-}
-
-void TabCodeGen::RET( ostream &ret, bool inFinish )
-{
- ret << "{" << CS() << " = " << STACK() << "[--" <<
- TOP() << "]; " << CTRL_FLOW() << "goto _again;}";
-}
-
-void TabCodeGen::BREAK( ostream &ret, int targState )
-{
- outLabelUsed = true;
- ret << CTRL_FLOW() << "goto _out;";
-}
-
-void TabCodeGen::writeData()
-{
- /* If there are any transtion functions then output the array. If there
- * are none, don't bother emitting an empty array that won't be used. */
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActArrItem), A() );
- ACTIONS_ARRAY();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyConditions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondOffset), CO() );
- COND_OFFSETS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondLen), CL() );
- COND_LENS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( WIDE_ALPH_TYPE(), CK() );
- COND_KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxCondSpaceId), C() );
- COND_SPACES();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxKeyOffset), KO() );
- KEY_OFFSETS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( WIDE_ALPH_TYPE(), K() );
- KEYS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxSingleLen), SL() );
- SINGLE_LENS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxRangeLen), RL() );
- RANGE_LENS();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndexOffset), IO() );
- INDEX_OFFSETS();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( useIndicies ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxIndex), I() );
- INDICIES();
- CLOSE_ARRAY() <<
- "\n";
-
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() );
- TRANS_TARGS_WI();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TA() );
- TRANS_ACTIONS_WI();
- CLOSE_ARRAY() <<
- "\n";
- }
- }
- else {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxState), TT() );
- TRANS_TARGS();
- CLOSE_ARRAY() <<
- "\n";
-
- if ( redFsm->anyActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TA() );
- TRANS_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
- }
-
- if ( redFsm->anyToStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), TSA() );
- TO_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), FSA() );
- FROM_STATE_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- if ( redFsm->anyEofActions() ) {
- OPEN_ARRAY( ARRAY_TYPE(redFsm->maxActionLoc), EA() );
- EOF_ACTIONS();
- CLOSE_ARRAY() <<
- "\n";
- }
-
- STATE_IDS();
-}
-
-void TabCodeGen::COND_TRANSLATE()
-{
- out <<
- " _widec = " << GET_KEY() << ";\n"
- " _klen = " << CL() << "[" << CS() << "];\n"
- " _keys = " << ARR_OFF( CK(), "(" + CO() + "[" + CS() + "]*2)" ) << ";\n"
- " if ( _klen > 0 ) {\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_lower = _keys;\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_mid;\n"
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_upper = _keys + (_klen<<1) - 2;\n"
- " while (1) {\n"
- " if ( _upper < _lower )\n"
- " break;\n"
- "\n"
- " _mid = _lower + (((_upper-_lower) >> 1) & ~1);\n"
- " if ( " << GET_WIDE_KEY() << " < _mid[0] )\n"
- " _upper = _mid - 2;\n"
- " else if ( " << GET_WIDE_KEY() << " > _mid[1] )\n"
- " _lower = _mid + 2;\n"
- " else {\n"
- " switch ( " << C() << "[" << CO() << "[" << CS() << "]"
- " + ((_mid - _keys)>>1)] ) {\n";
-
- for ( CondSpaceList::Iter csi = condSpaceList; csi.lte(); csi++ ) {
- CondSpace *condSpace = csi;
- out << " case " << condSpace->condSpaceId << ": {\n";
- out << TABS(2) << "_widec = " << CAST(WIDE_ALPH_TYPE()) << "(" <<
- KEY(condSpace->baseKey) << " + (" << GET_KEY() <<
- " - " << KEY(keyOps->minKey) << "));\n";
-
- for ( CondSet::Iter csi = condSpace->condSet; csi.lte(); csi++ ) {
- out << TABS(2) << "if ( ";
- CONDITION( out, *csi );
- Size condValOffset = ((1 << csi.pos()) * keyOps->alphSize());
- out << " ) _widec += " << condValOffset << ";\n";
- }
-
- out <<
- " break;\n"
- " }\n";
- }
-
- SWITCH_DEFAULT();
-
- out <<
- " }\n"
- " break;\n"
- " }\n"
- " }\n"
- " }\n"
- "\n";
-}
-
-void TabCodeGen::writeExec()
-{
- outLabelUsed = false;
-
- out <<
- " {\n"
- " int _klen";
-
- if ( redFsm->anyRegCurStateRef() )
- out << ", _ps";
-
- out <<
- ";\n"
- " " << UINT() << " _trans;\n";
-
- if ( redFsm->anyConditions() )
- out << " " << WIDE_ALPH_TYPE() << " _widec;\n";
-
- if ( redFsm->anyToStateActions() || redFsm->anyRegActions()
- || redFsm->anyFromStateActions() )
- {
- out <<
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxActArrItem) << POINTER() << "_acts;\n"
- " " << UINT() << " _nacts;\n";
- }
-
- out <<
- " " << PTR_CONST() << WIDE_ALPH_TYPE() << POINTER() << "_keys;\n"
- "\n";
-
- if ( hasEnd ) {
- outLabelUsed = true;
- out <<
- " if ( " << P() << " == " << PE() << " )\n"
- " goto _out;\n";
- }
-
- out << "_resume:\n";
-
- if ( redFsm->errState != 0 ) {
- outLabelUsed = true;
- out <<
- " if ( " << CS() << " == " << redFsm->errState->id << " )\n"
- " goto _out;\n";
- }
-
- if ( redFsm->anyFromStateActions() ) {
- out <<
- " _acts = " << ARR_OFF( A(), FSA() + "[" + CS() + "]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- FROM_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyConditions() )
- COND_TRANSLATE();
-
- LOCATE_TRANS();
-
- out << "_match:\n";
-
- if ( redFsm->anyRegCurStateRef() )
- out << " _ps = " << CS() << ";\n";
-
- if ( useIndicies )
- out << " _trans = " << I() << "[_trans];\n";
-
- out <<
- " " << CS() << " = " << TT() << "[_trans];\n"
- "\n";
-
- if ( redFsm->anyRegActions() ) {
- out <<
- " if ( " << TA() << "[_trans] == 0 )\n"
- " goto _again;\n"
- "\n"
- " _acts = " << ARR_OFF( A(), TA() + "[_trans]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 )\n {\n"
- " switch ( *_acts++ )\n {\n";
- ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( redFsm->anyRegActions() || redFsm->anyActionGotos() ||
- redFsm->anyActionCalls() || redFsm->anyActionRets() )
- out << "_again:\n";
-
- if ( redFsm->anyToStateActions() ) {
- out <<
- " _acts = " << ARR_OFF( A(), TSA() + "[" + CS() + "]" ) << ";\n"
- " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- TO_STATE_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- "\n";
- }
-
- if ( hasEnd ) {
- out <<
- " if ( ++" << P() << " != " << PE() << " )\n"
- " goto _resume;\n";
- }
- else {
- out <<
- " " << P() << " += 1;\n"
- " goto _resume;\n";
- }
-
- if ( outLabelUsed )
- out << " _out: {}\n";
-
- out << " }\n";
-}
-
-
-void TabCodeGen::writeEOF()
-{
- if ( redFsm->anyEofActions() ) {
- out <<
- " {\n"
- " " << PTR_CONST() << ARRAY_TYPE(redFsm->maxActArrItem) << POINTER() << "_acts = " <<
- ARR_OFF( A(), EA() + "[" + CS() + "]" ) << ";\n"
- " " << UINT() << " _nacts = " << CAST(UINT()) << " *_acts++;\n"
- " while ( _nacts-- > 0 ) {\n"
- " switch ( *_acts++ ) {\n";
- EOF_ACTION_SWITCH();
- SWITCH_DEFAULT() <<
- " }\n"
- " }\n"
- " }\n"
- "\n";
- }
-}
diff --git a/contrib/tools/ragel5/rlgen-cd/tabcodegen.h b/contrib/tools/ragel5/rlgen-cd/tabcodegen.h
deleted file mode 100644
index 745eb18d81..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/tabcodegen.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright 2001-2006 Adrian Thurston <thurston@cs.queensu.ca>
- * 2004 Erich Ocean <eric.ocean@ampede.com>
- * 2005 Alan West <alan@alanz.com>
- */
-
-/* This file is part of Ragel.
- *
- * Ragel is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * Ragel is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with Ragel; if not, write to the Free Software
- * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
- */
-
-#ifndef _TABCODEGEN_H
-#define _TABCODEGEN_H
-
-#include <iostream>
-#include "fsmcodegen.h"
-
-/* Forwards. */
-struct CodeGenData;
-struct NameInst;
-struct RedTransAp;
-struct RedStateAp;
-
-/*
- * TabCodeGen
- */
-class TabCodeGen : virtual public FsmCodeGen
-{
-public:
- TabCodeGen( ostream &out ) : FsmCodeGen(out) {}
- virtual ~TabCodeGen() { }
- virtual void writeData();
- virtual void writeExec();
-
-protected:
- std::ostream &TO_STATE_ACTION_SWITCH();
- std::ostream &FROM_STATE_ACTION_SWITCH();
- std::ostream &EOF_ACTION_SWITCH();
- std::ostream &ACTION_SWITCH();
-
- std::ostream &COND_KEYS();
- std::ostream &COND_SPACES();
- std::ostream &KEYS();
- std::ostream &INDICIES();
- std::ostream &COND_OFFSETS();
- std::ostream &KEY_OFFSETS();
- std::ostream &INDEX_OFFSETS();
- std::ostream &COND_LENS();
- std::ostream &SINGLE_LENS();
- std::ostream &RANGE_LENS();
- std::ostream &TO_STATE_ACTIONS();
- std::ostream &FROM_STATE_ACTIONS();
- std::ostream &EOF_ACTIONS();
- std::ostream &TRANS_TARGS();
- std::ostream &TRANS_ACTIONS();
- std::ostream &TRANS_TARGS_WI();
- std::ostream &TRANS_ACTIONS_WI();
- void LOCATE_TRANS();
-
- void COND_TRANSLATE();
-
- void GOTO( ostream &ret, int gotoDest, bool inFinish );
- void CALL( ostream &ret, int callDest, int targState, bool inFinish );
- void NEXT( ostream &ret, int nextDest, bool inFinish );
- void GOTO_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void NEXT_EXPR( ostream &ret, InlineItem *ilItem, bool inFinish );
- void CALL_EXPR( ostream &ret, InlineItem *ilItem, int targState, bool inFinish );
- void CURS( ostream &ret, bool inFinish );
- void TARGS( ostream &ret, bool inFinish, int targState );
- void RET( ostream &ret, bool inFinish );
- void BREAK( ostream &ret, int targState );
-
- virtual std::ostream &TO_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &FROM_STATE_ACTION( RedStateAp *state );
- virtual std::ostream &EOF_ACTION( RedStateAp *state );
- virtual std::ostream &TRANS_ACTION( RedTransAp *trans );
- virtual void calcIndexSize();
- virtual void writeEOF();
-};
-
-
-/*
- * CTabCodeGen
- */
-struct CTabCodeGen
- : public TabCodeGen, public CCodeGen
-{
- CTabCodeGen( ostream &out ) :
- FsmCodeGen(out), TabCodeGen(out), CCodeGen(out) {}
-};
-
-/*
- * DTabCodeGen
- */
-struct DTabCodeGen
- : public TabCodeGen, public DCodeGen
-{
- DTabCodeGen( ostream &out ) :
- FsmCodeGen(out), TabCodeGen(out), DCodeGen(out) {}
-};
-
-
-#endif /* _TABCODEGEN_H */
diff --git a/contrib/tools/ragel5/rlgen-cd/ya.make b/contrib/tools/ragel5/rlgen-cd/ya.make
deleted file mode 100644
index ef2a59f8c2..0000000000
--- a/contrib/tools/ragel5/rlgen-cd/ya.make
+++ /dev/null
@@ -1,25 +0,0 @@
-PROGRAM()
-
-NO_UTIL()
-NO_COMPILER_WARNINGS()
-
-PEERDIR(
- contrib/tools/ragel5/aapl
- contrib/tools/ragel5/common
- contrib/tools/ragel5/redfsm
-)
-
-SRCS(
- fflatcodegen.cpp
- fgotocodegen.cpp
- flatcodegen.cpp
- fsmcodegen.cpp
- ftabcodegen.cpp
- gotocodegen.cpp
- ipgotocodegen.cpp
- main.cpp
- splitcodegen.cpp
- tabcodegen.cpp
-)
-
-END()
diff --git a/contrib/tools/swig/Lib/exception.i b/contrib/tools/swig/Lib/exception.i
deleted file mode 100644
index 5cdea58e8b..0000000000
--- a/contrib/tools/swig/Lib/exception.i
+++ /dev/null
@@ -1,332 +0,0 @@
-/* -----------------------------------------------------------------------------
- * exception.i
- *
- * SWIG library file providing language independent exception handling
- * ----------------------------------------------------------------------------- */
-
-#if defined(SWIGUTL)
-#error "This version of exception.i should not be used"
-#endif
-
-
-%insert("runtime") "swigerrors.swg"
-
-
-#ifdef SWIGPHP
-%{
-#if PHP_MAJOR_VERSION >= 8
-# define SWIG_HANDLE_VALUE_ERROR_FOR_PHP8(code) code == SWIG_ValueError ? zend_ce_value_error :
-#else
-# define SWIG_HANDLE_VALUE_ERROR_FOR_PHP8(code)
-#endif
-#define SWIG_exception(code, msg) do { zend_throw_exception( \
- code == SWIG_TypeError ? zend_ce_type_error : \
- SWIG_HANDLE_VALUE_ERROR_FOR_PHP8(code) \
- code == SWIG_DivisionByZero ? zend_ce_division_by_zero_error : \
- code == SWIG_SyntaxError ? zend_ce_parse_error : \
- code == SWIG_OverflowError ? zend_ce_arithmetic_error : \
- NULL, msg, code); SWIG_fail; } while (0)
-%}
-#endif
-
-#ifdef SWIGGUILE
-%{
- SWIGINTERN void SWIG_exception_ (int code, const char *msg,
- const char *subr) {
-#define ERROR(scmerr) \
- scm_error(scm_from_locale_string((char *) (scmerr)), \
- (char *) subr, (char *) msg, \
- SCM_EOL, SCM_BOOL_F)
-#define MAP(swigerr, scmerr) \
- case swigerr: \
- ERROR(scmerr); \
- break
- switch (code) {
- MAP(SWIG_MemoryError, "swig-memory-error");
- MAP(SWIG_IOError, "swig-io-error");
- MAP(SWIG_RuntimeError, "swig-runtime-error");
- MAP(SWIG_IndexError, "swig-index-error");
- MAP(SWIG_TypeError, "swig-type-error");
- MAP(SWIG_DivisionByZero, "swig-division-by-zero");
- MAP(SWIG_OverflowError, "swig-overflow-error");
- MAP(SWIG_SyntaxError, "swig-syntax-error");
- MAP(SWIG_ValueError, "swig-value-error");
- MAP(SWIG_SystemError, "swig-system-error");
- default:
- ERROR("swig-error");
- }
-#undef ERROR
-#undef MAP
- }
-
-#define SWIG_exception(a,b) SWIG_exception_(a, b, FUNC_NAME)
-%}
-#endif
-
-#ifdef SWIGMZSCHEME
-
-%{
-SWIGINTERN void SWIG_exception_ (int code, const char *msg) {
-#define ERROR(errname) \
- scheme_signal_error(errname " (%s)", msg);
-#define MAP(swigerr, errname) \
- case swigerr: \
- ERROR(errname); \
- break
- switch (code) {
- MAP(SWIG_MemoryError, "swig-memory-error");
- MAP(SWIG_IOError, "swig-io-error");
- MAP(SWIG_RuntimeError, "swig-runtime-error");
- MAP(SWIG_IndexError, "swig-index-error");
- MAP(SWIG_TypeError, "swig-type-error");
- MAP(SWIG_DivisionByZero, "swig-division-by-zero");
- MAP(SWIG_OverflowError, "swig-overflow-error");
- MAP(SWIG_SyntaxError, "swig-syntax-error");
- MAP(SWIG_ValueError, "swig-value-error");
- MAP(SWIG_SystemError, "swig-system-error");
- default:
- ERROR("swig-error");
- }
-#undef ERROR
-#undef MAP
- }
-
-#define SWIG_exception(a,b) SWIG_exception_(a, b)
-%}
-#endif
-
-#ifdef SWIGJAVA
-%{
-SWIGINTERN void SWIG_JavaException(JNIEnv *jenv, int code, const char *msg) {
- SWIG_JavaExceptionCodes exception_code = SWIG_JavaUnknownError;
- switch(code) {
- case SWIG_MemoryError:
- exception_code = SWIG_JavaOutOfMemoryError;
- break;
- case SWIG_IOError:
- exception_code = SWIG_JavaIOException;
- break;
- case SWIG_SystemError:
- case SWIG_RuntimeError:
- exception_code = SWIG_JavaRuntimeException;
- break;
- case SWIG_OverflowError:
- case SWIG_IndexError:
- exception_code = SWIG_JavaIndexOutOfBoundsException;
- break;
- case SWIG_DivisionByZero:
- exception_code = SWIG_JavaArithmeticException;
- break;
- case SWIG_SyntaxError:
- case SWIG_ValueError:
- case SWIG_TypeError:
- exception_code = SWIG_JavaIllegalArgumentException;
- break;
- case SWIG_UnknownError:
- default:
- exception_code = SWIG_JavaUnknownError;
- break;
- }
- SWIG_JavaThrowException(jenv, exception_code, msg);
-}
-%}
-
-#define SWIG_exception(code, msg)\
-{ SWIG_JavaException(jenv, code, msg); return $null; }
-#endif // SWIGJAVA
-
-#ifdef SWIGOCAML
-%{
-SWIGINTERN void SWIG_OCamlException(int code, const char *msg) {
- CAMLparam0();
-
- SWIG_OCamlExceptionCodes exception_code = SWIG_OCamlUnknownError;
- switch (code) {
- case SWIG_DivisionByZero:
- exception_code = SWIG_OCamlArithmeticException;
- break;
- case SWIG_IndexError:
- exception_code = SWIG_OCamlIndexOutOfBoundsException;
- break;
- case SWIG_IOError:
- case SWIG_SystemError:
- exception_code = SWIG_OCamlSystemException;
- break;
- case SWIG_MemoryError:
- exception_code = SWIG_OCamlOutOfMemoryError;
- break;
- case SWIG_OverflowError:
- exception_code = SWIG_OCamlOverflowException;
- break;
- case SWIG_RuntimeError:
- exception_code = SWIG_OCamlRuntimeException;
- break;
- case SWIG_SyntaxError:
- case SWIG_TypeError:
- case SWIG_ValueError:
- exception_code = SWIG_OCamlIllegalArgumentException;
- break;
- case SWIG_UnknownError:
- default:
- exception_code = SWIG_OCamlUnknownError;
- break;
- }
- SWIG_OCamlThrowException(exception_code, msg);
- CAMLreturn0;
-}
-#define SWIG_exception(code, msg) SWIG_OCamlException(code, msg)
-%}
-#endif
-
-
-#ifdef SWIGCSHARP
-%{
-SWIGINTERN void SWIG_CSharpException(int code, const char *msg) {
- if (code == SWIG_ValueError) {
- SWIG_CSharpExceptionArgumentCodes exception_code = SWIG_CSharpArgumentOutOfRangeException;
- SWIG_CSharpSetPendingExceptionArgument(exception_code, msg, 0);
- } else {
- SWIG_CSharpExceptionCodes exception_code = SWIG_CSharpApplicationException;
- switch(code) {
- case SWIG_MemoryError:
- exception_code = SWIG_CSharpOutOfMemoryException;
- break;
- case SWIG_IndexError:
- exception_code = SWIG_CSharpIndexOutOfRangeException;
- break;
- case SWIG_DivisionByZero:
- exception_code = SWIG_CSharpDivideByZeroException;
- break;
- case SWIG_IOError:
- exception_code = SWIG_CSharpIOException;
- break;
- case SWIG_OverflowError:
- exception_code = SWIG_CSharpOverflowException;
- break;
- case SWIG_RuntimeError:
- case SWIG_TypeError:
- case SWIG_SyntaxError:
- case SWIG_SystemError:
- case SWIG_UnknownError:
- default:
- exception_code = SWIG_CSharpApplicationException;
- break;
- }
- SWIG_CSharpSetPendingException(exception_code, msg);
- }
-}
-%}
-
-#define SWIG_exception(code, msg)\
-{ SWIG_CSharpException(code, msg); return $null; }
-#endif // SWIGCSHARP
-
-#ifdef SWIGLUA
-
-%{
-#define SWIG_exception(a,b)\
-{ lua_pushfstring(L,"%s:%s",#a,b);SWIG_fail; }
-%}
-
-#endif // SWIGLUA
-
-#ifdef SWIGD
-%{
-SWIGINTERN void SWIG_DThrowException(int code, const char *msg) {
- SWIG_DExceptionCodes exception_code;
- switch(code) {
- case SWIG_IndexError:
- exception_code = SWIG_DNoSuchElementException;
- break;
- case SWIG_IOError:
- exception_code = SWIG_DIOException;
- break;
- case SWIG_ValueError:
- exception_code = SWIG_DIllegalArgumentException;
- break;
- case SWIG_DivisionByZero:
- case SWIG_MemoryError:
- case SWIG_OverflowError:
- case SWIG_RuntimeError:
- case SWIG_TypeError:
- case SWIG_SyntaxError:
- case SWIG_SystemError:
- case SWIG_UnknownError:
- default:
- exception_code = SWIG_DException;
- break;
- }
- SWIG_DSetPendingException(exception_code, msg);
-}
-%}
-
-#define SWIG_exception(code, msg)\
-{ SWIG_DThrowException(code, msg); return $null; }
-#endif // SWIGD
-
-#ifdef __cplusplus
-/*
- You can use the SWIG_CATCH_STDEXCEPT macro with the %exception
- directive as follows:
-
- %exception {
- try {
- $action
- }
- catch (my_except& e) {
- ...
- }
- SWIG_CATCH_STDEXCEPT // catch std::exception
- catch (...) {
- SWIG_exception(SWIG_UnknownError, "Unknown exception");
- }
- }
-*/
-%{
-#include <typeinfo>
-#include <stdexcept>
-%}
-%define SWIG_CATCH_STDEXCEPT
- /* catching std::exception */
- catch (std::invalid_argument& e) {
- SWIG_exception(SWIG_ValueError, e.what() );
- } catch (std::domain_error& e) {
- SWIG_exception(SWIG_ValueError, e.what() );
- } catch (std::overflow_error& e) {
- SWIG_exception(SWIG_OverflowError, e.what() );
- } catch (std::out_of_range& e) {
- SWIG_exception(SWIG_IndexError, e.what() );
- } catch (std::length_error& e) {
- SWIG_exception(SWIG_IndexError, e.what() );
- } catch (std::runtime_error& e) {
- SWIG_exception(SWIG_RuntimeError, e.what() );
- } catch (std::bad_cast& e) {
- SWIG_exception(SWIG_TypeError, e.what() );
- } catch (std::exception& e) {
- SWIG_exception(SWIG_SystemError, e.what() );
- }
-%enddef
-%define SWIG_CATCH_UNKNOWN
- catch (std::exception& e) {
- SWIG_exception(SWIG_SystemError, e.what() );
- }
- catch (...) {
- SWIG_exception(SWIG_UnknownError, "unknown exception");
- }
-%enddef
-
-/* rethrow the unknown exception */
-
-#if defined(SWIGCSHARP) || defined(SWIGD)
-%typemap(throws,noblock=1, canthrow=1) (...) {
- SWIG_exception(SWIG_RuntimeError,"unknown exception");
-}
-#else
-%typemap(throws,noblock=1) (...) {
- SWIG_exception(SWIG_RuntimeError,"unknown exception");
-}
-#endif
-
-#endif /* __cplusplus */
-
-/* exception.i ends here */
diff --git a/contrib/tools/swig/Lib/go/exception.i b/contrib/tools/swig/Lib/go/exception.i
deleted file mode 100644
index 5abd306a4e..0000000000
--- a/contrib/tools/swig/Lib/go/exception.i
+++ /dev/null
@@ -1,7 +0,0 @@
-%typemap(throws,noblock=1) (...) {
- SWIG_exception(SWIG_RuntimeError,"unknown exception");
-}
-
-%insert("runtime") %{
-#define SWIG_exception(code, msg) _swig_gopanic(msg)
-%}
diff --git a/contrib/tools/swig/Lib/go/std_common.i b/contrib/tools/swig/Lib/go/std_common.i
deleted file mode 100644
index c010facacd..0000000000
--- a/contrib/tools/swig/Lib/go/std_common.i
+++ /dev/null
@@ -1,4 +0,0 @@
-%include <std_except.i>
-
-%apply size_t { std::size_t };
-%apply const size_t& { const std::size_t& };
diff --git a/contrib/tools/swig/Lib/go/std_except.i b/contrib/tools/swig/Lib/go/std_except.i
deleted file mode 100644
index 4f021a1264..0000000000
--- a/contrib/tools/swig/Lib/go/std_except.i
+++ /dev/null
@@ -1,31 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_except.i
- *
- * Typemaps used by the STL wrappers that throw exceptions.
- * These typemaps are used when methods are declared with an STL exception specification, such as
- * size_t at() const throw (std::out_of_range);
- * ----------------------------------------------------------------------------- */
-
-%{
-#include <typeinfo>
-#include <stdexcept>
-%}
-
-namespace std
-{
- %ignore exception;
- struct exception {};
-}
-
-%typemap(throws) std::bad_cast %{_swig_gopanic($1.what());%}
-%typemap(throws) std::bad_exception %{_swig_gopanic($1.what());%}
-%typemap(throws) std::domain_error %{_swig_gopanic($1.what());%}
-%typemap(throws) std::exception %{_swig_gopanic($1.what());%}
-%typemap(throws) std::invalid_argument %{_swig_gopanic($1.what());%}
-%typemap(throws) std::length_error %{_swig_gopanic($1.what());%}
-%typemap(throws) std::logic_error %{_swig_gopanic($1.what());%}
-%typemap(throws) std::out_of_range %{_swig_gopanic($1.what());%}
-%typemap(throws) std::overflow_error %{_swig_gopanic($1.what());%}
-%typemap(throws) std::range_error %{_swig_gopanic($1.what());%}
-%typemap(throws) std::runtime_error %{_swig_gopanic($1.what());%}
-%typemap(throws) std::underflow_error %{_swig_gopanic($1.what());%}
diff --git a/contrib/tools/swig/Lib/go/std_string.i b/contrib/tools/swig/Lib/go/std_string.i
deleted file mode 100644
index 35b4a5e46b..0000000000
--- a/contrib/tools/swig/Lib/go/std_string.i
+++ /dev/null
@@ -1,162 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_string.i
- *
- * Typemaps for std::string and const std::string&
- * These are mapped to a Go string and are passed around by value.
- *
- * To use non-const std::string references use the following %apply. Note
- * that they are passed by value.
- * %apply const std::string & {std::string &};
- * ----------------------------------------------------------------------------- */
-
-%{
-#include <string>
-%}
-
-namespace std {
-
-%naturalvar string;
-
-class string;
-
-%typemap(gotype) string, const string & "string"
-
-%typemap(in) string
-%{ $1.assign($input.p, $input.n); %}
-
-%typemap(godirectorout) string
-%{
- {
- p := Swig_malloc(len($input))
- s := (*[1<<30]byte)(unsafe.Pointer(p))[:len($input)]
- copy(s, $input)
- $result = *(*string)(unsafe.Pointer(&s))
- }
-%}
-
-%typemap(directorout) string
-%{
- $result.assign($input.p, $input.n);
- free($input.p);
-%}
-
-%typemap(out,fragment="AllocateString") string
-%{ $result = Swig_AllocateString($1.data(), $1.length()); %}
-
-%typemap(goout,fragment="CopyString") string
-%{ $result = swigCopyString($1) %}
-
-%typemap(directorin,fragment="AllocateString") string
-%{ $input = Swig_AllocateString($1.data(), $1.length()); %}
-
-%typemap(godirectorin,fragment="CopyString") string
-%{ $result = swigCopyString($input) %}
-
-%typemap(throws) string
-%{ _swig_gopanic($1.c_str()); %}
-
-%typemap(in) const string &
-%{
- $*1_ltype $1_str($input.p, $input.n);
- $1 = &$1_str;
-%}
-
-%typemap(godirectorout) const string &
-%{
- {
- p := Swig_malloc(len($input))
- s := (*[1<<30]byte)(unsafe.Pointer(p))[:len($input)]
- copy(s, $input)
- $result = *(*string)(unsafe.Pointer(&s))
- }
-%}
-
-%typemap(directorout,warning=SWIGWARN_TYPEMAP_THREAD_UNSAFE_MSG) const string &
-%{
- static $*1_ltype $1_str;
- $1_str.assign($input.p, $input.n);
- free($input.p);
- $result = &$1_str;
-%}
-
-%typemap(out,fragment="AllocateString") const string &
-%{ $result = Swig_AllocateString((*$1).data(), (*$1).length()); %}
-
-%typemap(goout,fragment="CopyString") const string &
-%{ $result = swigCopyString($1) %}
-
-%typemap(directorin,fragment="AllocateString") const string &
-%{ $input = Swig_AllocateString($1.data(), $1.length()); %}
-
-%typemap(godirectorin,fragment="CopyString") const string &
-%{ $result = swigCopyString($input) %}
-
-%typemap(throws) const string &
-%{ _swig_gopanic($1.c_str()); %}
-
-
-%typemap(gotype) string * "*string"
-
-%typemap(in) string * (string temp)
-%{
- if ($input) {
- temp.assign($input->p, $input->n);
- $1 = &temp;
- } else
- $1 = 0;
-%}
-
-%typemap(godirectorout) string *
-%{
- if $input != nil {
- p := Swig_malloc(len(*$input))
- s := (*[1<<30]byte)(unsafe.Pointer(p))[:len(*$input)]
- copy(s, *$input)
- $result = (*string)(unsafe.Pointer(&s))
- } else {
- $result = nil
- }
-%}
-
-%typemap(directorout) string * (string temp)
-%{
- temp.assign($input->p, $input->n);
- $result = &temp;
- free($input.p);
-%}
-
-%typemap(out,fragment="AllocateString") string * (_gostring_ temp)
-%{
- temp = Swig_AllocateString($1->data(), $1->length());
- $result = &temp;
-%}
-
-%typemap(goout,fragment="CopyString") string *
-%{ *$result = swigCopyString(*$1) %}
-
-%typemap(directorin,fragment="AllocateString") string * (_gostring_ temp)
-%{
- if ($1) {
- temp = Swig_AllocateString($1->data(), $1->length());
- $input = &temp;
- } else
- $input = 0;
-%}
-
-%typemap(godirectorin,fragment="CopyString") string *
-%{ *$result = swigCopyString(*$input); %}
-
-%typemap(argout,fragment="AllocateString") string *
-%{
- if ($1)
- *$input = Swig_AllocateString($1->data(), $1->length());
-%}
-
-%typemap(goargout,fragment="CopyString") string *
-%{
- if $input != nil {
- *$1 = swigCopyString(*$input)
- }
-%}
-
-}
diff --git a/contrib/tools/swig/Lib/go/std_vector.i b/contrib/tools/swig/Lib/go/std_vector.i
deleted file mode 100644
index 679c707596..0000000000
--- a/contrib/tools/swig/Lib/go/std_vector.i
+++ /dev/null
@@ -1,92 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_vector.i
- * ----------------------------------------------------------------------------- */
-
-%{
-#include <vector>
-#include <stdexcept>
-%}
-
-namespace std {
-
- template<class T> class vector {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef T value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
-
- vector();
- vector(size_type n);
- vector(const vector& other);
-
- size_type size() const;
- size_type capacity() const;
- void reserve(size_type n);
- %rename(isEmpty) empty;
- bool empty() const;
- void clear();
- %rename(add) push_back;
- void push_back(const value_type& x);
- %extend {
- const_reference get(int i) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- return (*self)[i];
- else
- throw std::out_of_range("vector index out of range");
- }
- void set(int i, const value_type& val) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- (*self)[i] = val;
- else
- throw std::out_of_range("vector index out of range");
- }
- }
- };
-
- // bool specialization
- template<> class vector<bool> {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef bool value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef bool const_reference;
-
- vector();
- vector(size_type n);
- vector(const vector& other);
-
- size_type size() const;
- size_type capacity() const;
- void reserve(size_type n);
- %rename(isEmpty) empty;
- bool empty() const;
- void clear();
- %rename(add) push_back;
- void push_back(const value_type& x);
- %extend {
- bool get(int i) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- return (*self)[i];
- else
- throw std::out_of_range("vector index out of range");
- }
- void set(int i, const value_type& val) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- (*self)[i] = val;
- else
- throw std::out_of_range("vector index out of range");
- }
- }
- };
-}
diff --git a/contrib/tools/swig/Lib/java/std_common.i b/contrib/tools/swig/Lib/java/std_common.i
deleted file mode 100644
index cee11e8caa..0000000000
--- a/contrib/tools/swig/Lib/java/std_common.i
+++ /dev/null
@@ -1,5 +0,0 @@
-%include <std_except.i>
-
-%apply size_t { std::size_t };
-%apply const size_t& { const std::size_t& };
-
diff --git a/contrib/tools/swig/Lib/java/std_except.i b/contrib/tools/swig/Lib/java/std_except.i
deleted file mode 100644
index 91d2f92cf1..0000000000
--- a/contrib/tools/swig/Lib/java/std_except.i
+++ /dev/null
@@ -1,32 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_except.i
- *
- * Typemaps used by the STL wrappers that throw exceptions.
- * These typemaps are used when methods are declared with an STL exception specification, such as
- * size_t at() const throw (std::out_of_range);
- * ----------------------------------------------------------------------------- */
-
-%{
-#include <typeinfo>
-#include <stdexcept>
-%}
-
-namespace std
-{
- %ignore exception;
- struct exception {};
-}
-
-%typemap(throws) std::bad_cast "SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.what());\n return $null;"
-%typemap(throws) std::bad_exception "SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.what());\n return $null;"
-%typemap(throws) std::domain_error "SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.what());\n return $null;"
-%typemap(throws) std::exception "SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.what());\n return $null;"
-%typemap(throws) std::invalid_argument "SWIG_JavaThrowException(jenv, SWIG_JavaIllegalArgumentException, $1.what());\n return $null;"
-%typemap(throws) std::length_error "SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, $1.what());\n return $null;"
-%typemap(throws) std::logic_error "SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.what());\n return $null;"
-%typemap(throws) std::out_of_range "SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, $1.what());\n return $null;"
-%typemap(throws) std::overflow_error "SWIG_JavaThrowException(jenv, SWIG_JavaArithmeticException, $1.what());\n return $null;"
-%typemap(throws) std::range_error "SWIG_JavaThrowException(jenv, SWIG_JavaIndexOutOfBoundsException, $1.what());\n return $null;"
-%typemap(throws) std::runtime_error "SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.what());\n return $null;"
-%typemap(throws) std::underflow_error "SWIG_JavaThrowException(jenv, SWIG_JavaArithmeticException, $1.what());\n return $null;"
-
diff --git a/contrib/tools/swig/Lib/java/std_string.i b/contrib/tools/swig/Lib/java/std_string.i
deleted file mode 100644
index 830a896119..0000000000
--- a/contrib/tools/swig/Lib/java/std_string.i
+++ /dev/null
@@ -1,121 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_string.i
- *
- * Typemaps for std::string and const std::string&
- * These are mapped to a Java String and are passed around by value.
- *
- * To use non-const std::string references use the following %apply. Note
- * that they are passed by value.
- * %apply const std::string & {std::string &};
- * ----------------------------------------------------------------------------- */
-
-%{
-#include <string>
-%}
-
-namespace std {
-
-%naturalvar string;
-
-class string;
-
-// string
-%typemap(jni) string "jstring"
-%typemap(jtype) string "String"
-%typemap(jstype) string "String"
-%typemap(javadirectorin) string "$jniinput"
-%typemap(javadirectorout) string "$javacall"
-
-%typemap(in) string
-%{ if(!$input) {
- SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
- return $null;
- }
- const char *$1_pstr = (const char *)jenv->GetStringUTFChars($input, 0);
- if (!$1_pstr) return $null;
- $1.assign($1_pstr);
- jenv->ReleaseStringUTFChars($input, $1_pstr); %}
-
-%typemap(directorout) string
-%{ if(!$input) {
- if (!jenv->ExceptionCheck()) {
- SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
- }
- return $null;
- }
- const char *$1_pstr = (const char *)jenv->GetStringUTFChars($input, 0);
- if (!$1_pstr) return $null;
- $result.assign($1_pstr);
- jenv->ReleaseStringUTFChars($input, $1_pstr); %}
-
-%typemap(directorin,descriptor="Ljava/lang/String;") string
-%{ $input = jenv->NewStringUTF($1.c_str());
- Swig::LocalRefGuard $1_refguard(jenv, $input); %}
-
-%typemap(out) string
-%{ $result = jenv->NewStringUTF($1.c_str()); %}
-
-%typemap(javain) string "$javainput"
-
-%typemap(javaout) string {
- return $jnicall;
- }
-
-%typemap(typecheck) string = char *;
-
-%typemap(throws) string
-%{ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.c_str());
- return $null; %}
-
-// const string &
-%typemap(jni) const string & "jstring"
-%typemap(jtype) const string & "String"
-%typemap(jstype) const string & "String"
-%typemap(javadirectorin) const string & "$jniinput"
-%typemap(javadirectorout) const string & "$javacall"
-
-%typemap(in) const string &
-%{ if(!$input) {
- SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
- return $null;
- }
- const char *$1_pstr = (const char *)jenv->GetStringUTFChars($input, 0);
- if (!$1_pstr) return $null;
- $*1_ltype $1_str($1_pstr);
- $1 = &$1_str;
- jenv->ReleaseStringUTFChars($input, $1_pstr); %}
-
-%typemap(directorout,warning=SWIGWARN_TYPEMAP_THREAD_UNSAFE_MSG) const string &
-%{ if(!$input) {
- SWIG_JavaThrowException(jenv, SWIG_JavaNullPointerException, "null string");
- return $null;
- }
- const char *$1_pstr = (const char *)jenv->GetStringUTFChars($input, 0);
- if (!$1_pstr) return $null;
- /* possible thread/reentrant code problem */
- static $*1_ltype $1_str;
- $1_str = $1_pstr;
- $result = &$1_str;
- jenv->ReleaseStringUTFChars($input, $1_pstr); %}
-
-%typemap(directorin,descriptor="Ljava/lang/String;") const string &
-%{ $input = jenv->NewStringUTF($1.c_str());
- Swig::LocalRefGuard $1_refguard(jenv, $input); %}
-
-%typemap(out) const string &
-%{ $result = jenv->NewStringUTF($1->c_str()); %}
-
-%typemap(javain) const string & "$javainput"
-
-%typemap(javaout) const string & {
- return $jnicall;
- }
-
-%typemap(typecheck) const string & = char *;
-
-%typemap(throws) const string &
-%{ SWIG_JavaThrowException(jenv, SWIG_JavaRuntimeException, $1.c_str());
- return $null; %}
-
-}
-
diff --git a/contrib/tools/swig/Lib/java/std_vector.i b/contrib/tools/swig/Lib/java/std_vector.i
deleted file mode 100644
index 60ee23ebbe..0000000000
--- a/contrib/tools/swig/Lib/java/std_vector.i
+++ /dev/null
@@ -1,185 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_vector.i
- *
- * SWIG typemaps for std::vector.
- * The Java proxy class extends java.util.AbstractList and implements
- * java.util.RandomAccess. The std::vector container looks and feels much like a
- * java.util.ArrayList from Java.
- * ----------------------------------------------------------------------------- */
-
-%include <std_common.i>
-
-%{
-#include <vector>
-#include <stdexcept>
-%}
-
-%fragment("SWIG_VectorSize", "header", fragment="SWIG_JavaIntFromSize_t") {
-SWIGINTERN jint SWIG_VectorSize(size_t size) {
- jint sz = SWIG_JavaIntFromSize_t(size);
- if (sz == -1)
- throw std::out_of_range("vector size is too large to fit into a Java int");
- return sz;
-}
-}
-
-%define SWIG_STD_VECTOR_MINIMUM_INTERNAL(CTYPE, CONST_REFERENCE)
-%typemap(javabase) std::vector< CTYPE > "java.util.AbstractList<$typemap(jboxtype, CTYPE)>"
-%typemap(javainterfaces) std::vector< CTYPE > "java.util.RandomAccess"
-%proxycode %{
- public $javaclassname($typemap(jstype, CTYPE)[] initialElements) {
- this();
- reserve(initialElements.length);
-
- for ($typemap(jstype, CTYPE) element : initialElements) {
- add(element);
- }
- }
-
- public $javaclassname(Iterable<$typemap(jboxtype, CTYPE)> initialElements) {
- this();
- for ($typemap(jstype, CTYPE) element : initialElements) {
- add(element);
- }
- }
-
- public $typemap(jboxtype, CTYPE) get(int index) {
- return doGet(index);
- }
-
- public $typemap(jboxtype, CTYPE) set(int index, $typemap(jboxtype, CTYPE) e) {
- return doSet(index, e);
- }
-
- public boolean add($typemap(jboxtype, CTYPE) e) {
- modCount++;
- doAdd(e);
- return true;
- }
-
- public void add(int index, $typemap(jboxtype, CTYPE) e) {
- modCount++;
- doAdd(index, e);
- }
-
- public $typemap(jboxtype, CTYPE) remove(int index) {
- modCount++;
- return doRemove(index);
- }
-
- protected void removeRange(int fromIndex, int toIndex) {
- modCount++;
- doRemoveRange(fromIndex, toIndex);
- }
-
- public int size() {
- return doSize();
- }
-%}
-
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef CTYPE value_type;
- typedef CTYPE *pointer;
- typedef CTYPE const *const_pointer;
- typedef CTYPE &reference;
- typedef CONST_REFERENCE const_reference;
-
- vector();
- vector(const vector &other);
-
- size_type capacity() const;
- void reserve(size_type n) throw (std::length_error);
- %rename(isEmpty) empty;
- bool empty() const;
- void clear();
- %extend {
- %fragment("SWIG_VectorSize");
-
- vector(jint count, const CTYPE &value) throw (std::out_of_range) {
- if (count < 0)
- throw std::out_of_range("vector count must be positive");
- return new std::vector< CTYPE >(static_cast<std::vector< CTYPE >::size_type>(count), value);
- }
-
- jint doSize() const throw (std::out_of_range) {
- return SWIG_VectorSize(self->size());
- }
-
- void doAdd(const value_type& x) {
- self->push_back(x);
- }
-
- void doAdd(jint index, const value_type& x) throw (std::out_of_range) {
- jint size = static_cast<jint>(self->size());
- if (0 <= index && index <= size) {
- self->insert(self->begin() + index, x);
- } else {
- throw std::out_of_range("vector index out of range");
- }
- }
-
- value_type doRemove(jint index) throw (std::out_of_range) {
- jint size = static_cast<jint>(self->size());
- if (0 <= index && index < size) {
- CTYPE const old_value = (*self)[index];
- self->erase(self->begin() + index);
- return old_value;
- } else {
- throw std::out_of_range("vector index out of range");
- }
- }
-
- CONST_REFERENCE doGet(jint index) throw (std::out_of_range) {
- jint size = static_cast<jint>(self->size());
- if (index >= 0 && index < size)
- return (*self)[index];
- else
- throw std::out_of_range("vector index out of range");
- }
-
- value_type doSet(jint index, const value_type& val) throw (std::out_of_range) {
- jint size = static_cast<jint>(self->size());
- if (index >= 0 && index < size) {
- CTYPE const old_value = (*self)[index];
- (*self)[index] = val;
- return old_value;
- }
- else
- throw std::out_of_range("vector index out of range");
- }
-
- void doRemoveRange(jint fromIndex, jint toIndex) throw (std::out_of_range) {
- jint size = static_cast<jint>(self->size());
- if (0 <= fromIndex && fromIndex <= toIndex && toIndex <= size) {
- self->erase(self->begin() + fromIndex, self->begin() + toIndex);
- } else {
- throw std::out_of_range("vector index out of range");
- }
- }
- }
-%enddef
-
-%javamethodmodifiers std::vector::doSize "private";
-%javamethodmodifiers std::vector::doAdd "private";
-%javamethodmodifiers std::vector::doGet "private";
-%javamethodmodifiers std::vector::doSet "private";
-%javamethodmodifiers std::vector::doRemove "private";
-%javamethodmodifiers std::vector::doRemoveRange "private";
-
-namespace std {
-
- template<class T> class vector {
- SWIG_STD_VECTOR_MINIMUM_INTERNAL(T, const value_type&)
- };
-
- // bool specialization
- template<> class vector<bool> {
- SWIG_STD_VECTOR_MINIMUM_INTERNAL(bool, bool)
- };
-}
-
-%define specialize_std_vector(T)
-#warning "specialize_std_vector - specialization for type T no longer needed"
-%enddef
diff --git a/contrib/tools/swig/Lib/perl5/exception.i b/contrib/tools/swig/Lib/perl5/exception.i
deleted file mode 100644
index b786f25e29..0000000000
--- a/contrib/tools/swig/Lib/perl5/exception.i
+++ /dev/null
@@ -1,5 +0,0 @@
-%include <typemaps/exception.swg>
-
-%insert("runtime") {
- %define_as(SWIG_exception(code, msg), %block(%error(code, msg); SWIG_fail; ))
-}
diff --git a/contrib/tools/swig/Lib/perl5/std_common.i b/contrib/tools/swig/Lib/perl5/std_common.i
deleted file mode 100644
index 7c1ff23289..0000000000
--- a/contrib/tools/swig/Lib/perl5/std_common.i
+++ /dev/null
@@ -1,28 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_common.i
- *
- * SWIG typemaps for STL - common utilities
- * ----------------------------------------------------------------------------- */
-
-%include <std/std_except.i>
-
-%apply size_t { std::size_t };
-
-%fragment("<string>");
-%{
-SWIGINTERN
-double SwigSvToNumber(SV* sv) {
- return SvIOK(sv) ? double(SvIVX(sv)) : SvNVX(sv);
-}
-SWIGINTERN
-std::string SwigSvToString(SV* sv) {
- STRLEN len;
- char *ptr = SvPV(sv, len);
- return std::string(ptr, len);
-}
-SWIGINTERN
-void SwigSvFromString(SV* sv, const std::string& s) {
- sv_setpvn(sv,s.data(),s.size());
-}
-%}
-
diff --git a/contrib/tools/swig/Lib/perl5/std_except.i b/contrib/tools/swig/Lib/perl5/std_except.i
deleted file mode 100644
index af98428f65..0000000000
--- a/contrib/tools/swig/Lib/perl5/std_except.i
+++ /dev/null
@@ -1 +0,0 @@
-%include <typemaps/std_except.swg>
diff --git a/contrib/tools/swig/Lib/perl5/std_string.i b/contrib/tools/swig/Lib/perl5/std_string.i
deleted file mode 100644
index 6f34f18475..0000000000
--- a/contrib/tools/swig/Lib/perl5/std_string.i
+++ /dev/null
@@ -1,2 +0,0 @@
-%include <perlstrings.swg>
-%include <typemaps/std_string.swg>
diff --git a/contrib/tools/swig/Lib/perl5/std_vector.i b/contrib/tools/swig/Lib/perl5/std_vector.i
deleted file mode 100644
index 5bfd2c5ac8..0000000000
--- a/contrib/tools/swig/Lib/perl5/std_vector.i
+++ /dev/null
@@ -1,592 +0,0 @@
-/* -----------------------------------------------------------------------------
- * std_vector.i
- *
- * SWIG typemaps for std::vector types
- * ----------------------------------------------------------------------------- */
-
-%include <std_common.i>
-
-// ------------------------------------------------------------------------
-// std::vector
-//
-// The aim of all that follows would be to integrate std::vector with
-// Perl as much as possible, namely, to allow the user to pass and
-// be returned Perl arrays.
-// const declarations are used to guess the intent of the function being
-// exported; therefore, the following rationale is applied:
-//
-// -- f(std::vector<T>), f(const std::vector<T>&), f(const std::vector<T>*):
-// the parameter being read-only, either a Perl sequence or a
-// previously wrapped std::vector<T> can be passed.
-// -- f(std::vector<T>&), f(std::vector<T>*):
-// the parameter must be modified; therefore, only a wrapped std::vector
-// can be passed.
-// -- std::vector<T> f():
-// the vector is returned by copy; therefore, a Perl sequence of T:s
-// is returned which is most easily used in other Perl functions
-// -- std::vector<T>& f(), std::vector<T>* f(), const std::vector<T>& f(),
-// const std::vector<T>* f():
-// the vector is returned by reference; therefore, a wrapped std::vector
-// is returned
-// ------------------------------------------------------------------------
-
-%{
-#include <vector>
-%}
-%fragment("<algorithm>");
-%fragment("<stdexcept>");
-
-// exported class
-
-namespace std {
-
- template<class T> class vector {
- %typemap(in) vector<T> (std::vector<T>* v) {
- if (SWIG_ConvertPtr($input,(void **) &v,
- $&1_descriptor,1) != -1) {
- $1 = *v;
- } else if (SvROK($input)) {
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) != SVt_PVAV)
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- SV **tv;
- I32 len = av_len(av) + 1;
- T* obj;
- for (int i=0; i<len; i++) {
- tv = av_fetch(av, i, 0);
- if (SWIG_ConvertPtr(*tv, (void **)&obj,
- $descriptor(T *),0) != -1) {
- $1.push_back(*obj);
- } else {
- SWIG_croak("Type error in argument $argnum of "
- "$symname. "
- "Expected an array of " #T);
- }
- }
- } else {
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- }
- }
- %typemap(in) const vector<T>& (std::vector<T> temp,
- std::vector<T>* v),
- const vector<T>* (std::vector<T> temp,
- std::vector<T>* v) {
- if (SWIG_ConvertPtr($input,(void **) &v,
- $1_descriptor,1) != -1) {
- $1 = v;
- } else if (SvROK($input)) {
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) != SVt_PVAV)
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- SV **tv;
- I32 len = av_len(av) + 1;
- T* obj;
- for (int i=0; i<len; i++) {
- tv = av_fetch(av, i, 0);
- if (SWIG_ConvertPtr(*tv, (void **)&obj,
- $descriptor(T *),0) != -1) {
- temp.push_back(*obj);
- } else {
- SWIG_croak("Type error in argument $argnum of "
- "$symname. "
- "Expected an array of " #T);
- }
- }
- $1 = &temp;
- } else {
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- }
- }
- %typemap(out) vector<T> {
- size_t len = $1.size();
- SV **svs = new SV*[len];
- for (size_t i=0; i<len; i++) {
- T* ptr = new T($1[i]);
- svs[i] = sv_newmortal();
- SWIG_MakePtr(svs[i], (void*) ptr,
- $descriptor(T *), $shadow|$owner);
- }
- AV *myav = av_make(len, svs);
- delete[] svs;
- $result = newRV_noinc((SV*) myav);
- sv_2mortal($result);
- argvi++;
- }
- %typecheck(SWIG_TYPECHECK_VECTOR) vector<T> {
- {
- /* wrapped vector? */
- std::vector< T >* v;
- if (SWIG_ConvertPtr($input,(void **) &v,
- $&1_descriptor,0) != -1) {
- $1 = 1;
- } else if (SvROK($input)) {
- /* native sequence? */
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) == SVt_PVAV) {
- I32 len = av_len(av) + 1;
- if (len == 0) {
- /* an empty sequence can be of any type */
- $1 = 1;
- } else {
- /* check the first element only */
- T* obj;
- SV **tv = av_fetch(av, 0, 0);
- if (SWIG_ConvertPtr(*tv, (void **)&obj,
- $descriptor(T *),0) != -1)
- $1 = 1;
- else
- $1 = 0;
- }
- }
- } else {
- $1 = 0;
- }
- }
- }
- %typecheck(SWIG_TYPECHECK_VECTOR) const vector<T>&,
- const vector<T>* {
- {
- /* wrapped vector? */
- std::vector< T >* v;
- if (SWIG_ConvertPtr($input,(void **) &v,
- $1_descriptor,0) != -1) {
- $1 = 1;
- } else if (SvROK($input)) {
- /* native sequence? */
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) == SVt_PVAV) {
- I32 len = av_len(av) + 1;
- if (len == 0) {
- /* an empty sequence can be of any type */
- $1 = 1;
- } else {
- /* check the first element only */
- T* obj;
- SV **tv = av_fetch(av, 0, 0);
- if (SWIG_ConvertPtr(*tv, (void **)&obj,
- $descriptor(T *),0) != -1)
- $1 = 1;
- else
- $1 = 0;
- }
- }
- } else {
- $1 = 0;
- }
- }
- }
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef T value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
-
- vector(unsigned int size = 0);
- vector(unsigned int size, const T& value);
- vector(const vector& other);
-
- unsigned int size() const;
- bool empty() const;
- void clear();
- %rename(push) push_back;
- void push_back(const T& x);
- %extend {
- T pop() throw (std::out_of_range) {
- if (self->size() == 0)
- throw std::out_of_range("pop from empty vector");
- T x = self->back();
- self->pop_back();
- return x;
- }
- T& get(int i) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- return (*self)[i];
- else
- throw std::out_of_range("vector index out of range");
- }
- void set(int i, const T& x) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- (*self)[i] = x;
- else
- throw std::out_of_range("vector index out of range");
- }
- }
- };
-
- // specializations for pointers
- template<class T> class vector<T*> {
- %typemap(in) vector<T*> (std::vector<T*>* v) {
- int res = SWIG_ConvertPtr($input,(void **) &v, $&1_descriptor,0);
- if (SWIG_IsOK(res)){
- $1 = *v;
- } else if (SvROK($input)) {
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) != SVt_PVAV)
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- I32 len = av_len(av) + 1;
- for (int i=0; i<len; i++) {
- void *v;
- SV **tv = av_fetch(av, i, 0);
- int res = SWIG_ConvertPtr(*tv, &v, $descriptor(T *),0);
- if (SWIG_IsOK(res)) {
- $1.push_back(%static_cast(v, T *));
- } else {
- SWIG_croak("Type error in argument $argnum of "
- "$symname. "
- "Expected an array of " #T);
- }
- }
- } else {
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- }
- }
- %typemap(in) const vector<T *>& (std::vector<T *> temp,std::vector<T *>* v),
- const vector<T *>* (std::vector<T *> temp,std::vector<T *>* v) {
- int res = SWIG_ConvertPtr($input,(void **) &v, $1_descriptor,0);
- if (SWIG_IsOK(res)) {
- $1 = v;
- } else if (SvROK($input)) {
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) != SVt_PVAV)
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- I32 len = av_len(av) + 1;
- for (int i=0; i<len; i++) {
- void *v;
- SV **tv = av_fetch(av, i, 0);
- int res = SWIG_ConvertPtr(*tv, &v, $descriptor(T *),0);
- if (SWIG_IsOK(res)) {
- temp.push_back(%static_cast(v, T *));
- } else {
- SWIG_croak("Type error in argument $argnum of "
- "$symname. "
- "Expected an array of " #T);
- }
- }
- $1 = &temp;
- } else {
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- }
- }
- %typemap(out) vector<T *> {
- size_t len = $1.size();
- SV **svs = new SV*[len];
- for (size_t i=0; i<len; i++) {
- T *x = (($1_type &)$1)[i];
- svs[i] = sv_newmortal();
- sv_setsv(svs[i], SWIG_NewPointerObj(x, $descriptor(T *), 0));
- }
- AV *myav = av_make(len, svs);
- delete[] svs;
- $result = newRV_noinc((SV*) myav);
- sv_2mortal($result);
- argvi++;
- }
- %typecheck(SWIG_TYPECHECK_VECTOR) vector<T *> {
- {
- /* wrapped vector? */
- std::vector< T *>* v;
- int res = SWIG_ConvertPtr($input,(void **) &v, $&1_descriptor,0);
- if (SWIG_IsOK(res)) {
- $1 = 1;
- } else if (SvROK($input)) {
- /* native sequence? */
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) == SVt_PVAV) {
- I32 len = av_len(av) + 1;
- if (len == 0) {
- /* an empty sequence can be of any type */
- $1 = 1;
- } else {
- /* check the first element only */
- void *v;
- SV **tv = av_fetch(av, 0, 0);
- int res = SWIG_ConvertPtr(*tv, &v, $descriptor(T *),0);
- if (SWIG_IsOK(res))
- $1 = 1;
- else
- $1 = 0;
- }
- }
- } else {
- $1 = 0;
- }
- }
- }
- %typecheck(SWIG_TYPECHECK_VECTOR) const vector<T *>&,const vector<T *>* {
- {
- /* wrapped vector? */
- std::vector< T *> *v;
- int res = SWIG_ConvertPtr($input,%as_voidptrptr(&v), $1_descriptor,0);
- if (SWIG_IsOK(res)) {
- $1 = 1;
- } else if (SvROK($input)) {
- /* native sequence? */
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) == SVt_PVAV) {
- I32 len = av_len(av) + 1;
- if (len == 0) {
- /* an empty sequence can be of any type */
- $1 = 1;
- } else {
- /* check the first element only */
- void *v;
- SV **tv = av_fetch(av, 0, 0);
- int res = SWIG_ConvertPtr(*tv, &v, $descriptor(T *),0);
- if (SWIG_IsOK(res))
- $1 = 1;
- else
- $1 = 0;
- }
- }
- } else {
- $1 = 0;
- }
- }
- }
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef T* value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
-
- vector(unsigned int size = 0);
- vector(unsigned int size, T *value);
- vector(const vector& other);
-
- unsigned int size() const;
- bool empty() const;
- void clear();
- %rename(push) push_back;
- void push_back(T *x);
- %extend {
- T *pop() throw (std::out_of_range) {
- if (self->size() == 0)
- throw std::out_of_range("pop from empty vector");
- T *x = self->back();
- self->pop_back();
- return x;
- }
- T *get(int i) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- return (*self)[i];
- else
- throw std::out_of_range("vector index out of range");
- }
- void set(int i, T *x) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- (*self)[i] = x;
- else
- throw std::out_of_range("vector index out of range");
- }
- }
- };
-
-
- // specializations for built-ins
-
- %define specialize_std_vector(T,CHECK_T,TO_T,FROM_T)
- template<> class vector<T> {
- %typemap(in) vector<T> (std::vector<T>* v) {
- if (SWIG_ConvertPtr($input,(void **) &v,
- $&1_descriptor,1) != -1){
- $1 = *v;
- } else if (SvROK($input)) {
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) != SVt_PVAV)
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- SV **tv;
- I32 len = av_len(av) + 1;
- for (int i=0; i<len; i++) {
- tv = av_fetch(av, i, 0);
- if (CHECK_T(*tv)) {
- $1.push_back((T)TO_T(*tv));
- } else {
- SWIG_croak("Type error in argument $argnum of "
- "$symname. "
- "Expected an array of " #T);
- }
- }
- } else {
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- }
- }
- %typemap(in) const vector<T>& (std::vector<T> temp,
- std::vector<T>* v),
- const vector<T>* (std::vector<T> temp,
- std::vector<T>* v) {
- if (SWIG_ConvertPtr($input,(void **) &v,
- $1_descriptor,1) != -1) {
- $1 = v;
- } else if (SvROK($input)) {
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) != SVt_PVAV)
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- SV **tv;
- I32 len = av_len(av) + 1;
- for (int i=0; i<len; i++) {
- tv = av_fetch(av, i, 0);
- if (CHECK_T(*tv)) {
- temp.push_back((T)TO_T(*tv));
- } else {
- SWIG_croak("Type error in argument $argnum of "
- "$symname. "
- "Expected an array of " #T);
- }
- }
- $1 = &temp;
- } else {
- SWIG_croak("Type error in argument $argnum of $symname. "
- "Expected an array of " #T);
- }
- }
- %typemap(out) vector<T> {
- size_t len = $1.size();
- SV **svs = new SV*[len];
- for (size_t i=0; i<len; i++) {
- svs[i] = sv_newmortal();
- FROM_T(svs[i], $1[i]);
- }
- AV *myav = av_make(len, svs);
- delete[] svs;
- $result = newRV_noinc((SV*) myav);
- sv_2mortal($result);
- argvi++;
- }
- %typecheck(SWIG_TYPECHECK_VECTOR) vector<T> {
- {
- /* wrapped vector? */
- std::vector< T >* v;
- if (SWIG_ConvertPtr($input,(void **) &v,
- $&1_descriptor,0) != -1) {
- $1 = 1;
- } else if (SvROK($input)) {
- /* native sequence? */
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) == SVt_PVAV) {
- I32 len = av_len(av) + 1;
- if (len == 0) {
- /* an empty sequence can be of any type */
- $1 = 1;
- } else {
- /* check the first element only */
- SV **tv = av_fetch(av, 0, 0);
- if (CHECK_T(*tv))
- $1 = 1;
- else
- $1 = 0;
- }
- }
- } else {
- $1 = 0;
- }
- }
- }
- %typecheck(SWIG_TYPECHECK_VECTOR) const vector<T>&,
- const vector<T>* {
- {
- /* wrapped vector? */
- std::vector< T >* v;
- if (SWIG_ConvertPtr($input,(void **) &v,
- $1_descriptor,0) != -1) {
- $1 = 1;
- } else if (SvROK($input)) {
- /* native sequence? */
- AV *av = (AV *)SvRV($input);
- if (SvTYPE(av) == SVt_PVAV) {
- I32 len = av_len(av) + 1;
- if (len == 0) {
- /* an empty sequence can be of any type */
- $1 = 1;
- } else {
- /* check the first element only */
- SV **tv = av_fetch(av, 0, 0);
- if (CHECK_T(*tv))
- $1 = 1;
- else
- $1 = 0;
- }
- }
- } else {
- $1 = 0;
- }
- }
- }
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef T value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
-
- vector(unsigned int size = 0);
- vector(unsigned int size, T value);
- vector(const vector& other);
-
- unsigned int size() const;
- bool empty() const;
- void clear();
- %rename(push) push_back;
- void push_back(T x);
- %extend {
- T pop() throw (std::out_of_range) {
- if (self->size() == 0)
- throw std::out_of_range("pop from empty vector");
- T x = self->back();
- self->pop_back();
- return x;
- }
- T get(int i) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- return (*self)[i];
- else
- throw std::out_of_range("vector index out of range");
- }
- void set(int i, T x) throw (std::out_of_range) {
- int size = int(self->size());
- if (i>=0 && i<size)
- (*self)[i] = x;
- else
- throw std::out_of_range("vector index out of range");
- }
- }
- };
- %enddef
-
- specialize_std_vector(bool,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(char,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(int,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(short,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(long,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(unsigned char,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(unsigned int,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(unsigned short,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(unsigned long,SvIOK,SvIVX,sv_setiv);
- specialize_std_vector(float,SvNIOK,SwigSvToNumber,sv_setnv);
- specialize_std_vector(double,SvNIOK,SwigSvToNumber,sv_setnv);
- specialize_std_vector(std::string,SvPOK,SwigSvToString,SwigSvFromString);
-}
-
diff --git a/contrib/tools/swig/Lib/python/exception.i b/contrib/tools/swig/Lib/python/exception.i
deleted file mode 100644
index bb0b15c9dd..0000000000
--- a/contrib/tools/swig/Lib/python/exception.i
+++ /dev/null
@@ -1,6 +0,0 @@
-%include <typemaps/exception.swg>
-
-
-%insert("runtime") {
- %define_as(SWIG_exception(code, msg), %block(%error(code, msg); SWIG_fail; ))
-}
diff --git a/contrib/tools/swig/Lib/python/pycontainer.swg b/contrib/tools/swig/Lib/python/pycontainer.swg
deleted file mode 100644
index d6fdff0871..0000000000
--- a/contrib/tools/swig/Lib/python/pycontainer.swg
+++ /dev/null
@@ -1,1082 +0,0 @@
-/* -----------------------------------------------------------------------------
- * pycontainer.swg
- *
- * Python sequence <-> C++ container wrapper
- *
- * This wrapper, and its iterator, allows a general use (and reuse) of
- * the mapping between C++ and Python, thanks to the C++ templates.
- *
- * Of course, it needs the C++ compiler to support templates, but
- * since we will use this wrapper with the STL containers, that should
- * be the case.
- * ----------------------------------------------------------------------------- */
-
-%{
-#include <iostream>
-
-#if PY_VERSION_HEX >= 0x03020000
-# define SWIGPY_SLICEOBJECT PyObject
-#else
-# define SWIGPY_SLICEOBJECT PySliceObject
-#endif
-%}
-
-
-#if !defined(SWIG_NO_EXPORT_ITERATOR_METHODS)
-# if !defined(SWIG_EXPORT_ITERATOR_METHODS)
-# define SWIG_EXPORT_ITERATOR_METHODS SWIG_EXPORT_ITERATOR_METHODS
-# endif
-#endif
-
-%include <pyiterators.swg>
-
-/**** The PySequence C++ Wrap ***/
-
-%fragment("<stdexcept>");
-
-%include <std_except.i>
-
-%fragment("container_owner_attribute_init", "init") {
- // thread safe initialization
- swig::container_owner_attribute();
-}
-
-%fragment("reference_container_owner", "header", fragment="container_owner_attribute_init") {
-namespace swig {
- static PyObject* container_owner_attribute() {
- static PyObject* attr = SWIG_Python_str_FromChar("__swig_container");
- return attr;
- }
-
- template <typename T>
- struct container_owner {
- // By default, do not add the back-reference (for value types)
- // Specialization below will check the reference for pointer types.
- static bool back_reference(PyObject* /*child*/, PyObject* /*owner*/) {
- return false;
- }
- };
-
- template <>
- struct container_owner<swig::pointer_category> {
- /*
- * Call to add a back-reference to the owning object when returning a
- * reference from a container. Will only set the reference if child
- * is a SWIG wrapper object that does not own the pointer.
- *
- * returns whether the reference was set or not
- */
- static bool back_reference(PyObject* child, PyObject* owner) {
- SwigPyObject* swigThis = SWIG_Python_GetSwigThis(child);
- if (swigThis && (swigThis->own & SWIG_POINTER_OWN) != SWIG_POINTER_OWN) {
- return PyObject_SetAttr(child, container_owner_attribute(), owner) != -1;
- }
- return false;
- }
- };
-}
-}
-
-%fragment(SWIG_Traits_frag(swig::SwigPtr_PyObject),"header",fragment="StdTraits") {
-namespace swig {
- template <> struct traits<SwigPtr_PyObject > {
- typedef value_category category;
- static const char* type_name() { return "SwigPtr_PyObject"; }
- };
-
- template <> struct traits_from<SwigPtr_PyObject> {
- typedef SwigPtr_PyObject value_type;
- static PyObject *from(const value_type& val) {
- PyObject *obj = static_cast<PyObject *>(val);
- Py_XINCREF(obj);
- return obj;
- }
- };
-
- template <>
- struct traits_check<SwigPtr_PyObject, value_category> {
- static bool check(SwigPtr_PyObject) {
- return true;
- }
- };
-
- template <> struct traits_asval<SwigPtr_PyObject > {
- typedef SwigPtr_PyObject value_type;
- static int asval(PyObject *obj, value_type *val) {
- if (val) *val = obj;
- return SWIG_OK;
- }
- };
-}
-}
-
-%fragment(SWIG_Traits_frag(swig::SwigVar_PyObject),"header",fragment="StdTraits") {
-namespace swig {
- template <> struct traits<SwigVar_PyObject > {
- typedef value_category category;
- static const char* type_name() { return "SwigVar_PyObject"; }
- };
-
- template <> struct traits_from<SwigVar_PyObject> {
- typedef SwigVar_PyObject value_type;
- static PyObject *from(const value_type& val) {
- PyObject *obj = static_cast<PyObject *>(val);
- Py_XINCREF(obj);
- return obj;
- }
- };
-
- template <>
- struct traits_check<SwigVar_PyObject, value_category> {
- static bool check(SwigVar_PyObject) {
- return true;
- }
- };
-
- template <> struct traits_asval<SwigVar_PyObject > {
- typedef SwigVar_PyObject value_type;
- static int asval(PyObject *obj, value_type *val) {
- if (val) *val = obj;
- return SWIG_OK;
- }
- };
-}
-}
-
-%fragment("SwigPySequence_Base","header",fragment="<stddef.h>",fragment="StdTraits")
-{
-%#include <functional>
-
-namespace std {
- template <>
- struct less <PyObject *>
- {
- bool
- operator()(PyObject * v, PyObject *w) const
- {
- bool res;
- SWIG_PYTHON_THREAD_BEGIN_BLOCK;
- res = PyObject_RichCompareBool(v, w, Py_LT) ? true : false;
- /* This may fall into a case of inconsistent
- eg. ObjA > ObjX > ObjB
- but ObjA < ObjB
- */
- if( PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_TypeError) )
- {
- /* Objects can't be compared, this mostly occurred in Python 3.0 */
- /* Compare their ptr directly for a workaround */
- res = (v < w);
- PyErr_Clear();
- }
- SWIG_PYTHON_THREAD_END_BLOCK;
- return res;
- }
- };
-
- template <>
- struct less <swig::SwigPtr_PyObject>
- {
- bool
- operator()(const swig::SwigPtr_PyObject& v, const swig::SwigPtr_PyObject& w) const
- {
- return std::less<PyObject *>()(v, w);
- }
- };
-
- template <>
- struct less <swig::SwigVar_PyObject>
- {
- bool
- operator()(const swig::SwigVar_PyObject& v, const swig::SwigVar_PyObject& w) const
- {
- return std::less<PyObject *>()(v, w);
- }
- };
-
-}
-
-namespace swig {
- template <> struct traits<PyObject *> {
- typedef value_category category;
- static const char* type_name() { return "PyObject *"; }
- };
-
- template <> struct traits_asval<PyObject * > {
- typedef PyObject * value_type;
- static int asval(PyObject *obj, value_type *val) {
- if (val) *val = obj;
- return SWIG_OK;
- }
- };
-
- template <>
- struct traits_check<PyObject *, value_category> {
- static bool check(PyObject *) {
- return true;
- }
- };
-
- template <> struct traits_from<PyObject *> {
- typedef PyObject * value_type;
- static PyObject *from(const value_type& val) {
- Py_XINCREF(val);
- return val;
- }
- };
-
-}
-
-namespace swig {
- template <class Difference>
- inline size_t
- check_index(Difference i, size_t size, bool insert = false) {
- if ( i < 0 ) {
- if ((size_t) (-i) <= size)
- return (size_t) (i + size);
- } else if ( (size_t) i < size ) {
- return (size_t) i;
- } else if (insert && ((size_t) i == size)) {
- return size;
- }
- throw std::out_of_range("index out of range");
- }
-
- template <class Difference>
- void
- slice_adjust(Difference i, Difference j, Py_ssize_t step, size_t size, Difference &ii, Difference &jj, bool insert = false) {
- if (step == 0) {
- throw std::invalid_argument("slice step cannot be zero");
- } else if (step > 0) {
- // Required range: 0 <= i < size, 0 <= j < size, i <= j
- if (i < 0) {
- ii = 0;
- } else if (i < (Difference)size) {
- ii = i;
- } else if (insert && (i >= (Difference)size)) {
- ii = (Difference)size;
- }
- if (j < 0) {
- jj = 0;
- } else {
- jj = (j < (Difference)size) ? j : (Difference)size;
- }
- if (jj < ii)
- jj = ii;
- } else {
- // Required range: -1 <= i < size-1, -1 <= j < size-1, i >= j
- if (i < -1) {
- ii = -1;
- } else if (i < (Difference) size) {
- ii = i;
- } else if (i >= (Difference)(size-1)) {
- ii = (Difference)(size-1);
- }
- if (j < -1) {
- jj = -1;
- } else {
- jj = (j < (Difference)size ) ? j : (Difference)(size-1);
- }
- if (ii < jj)
- ii = jj;
- }
- }
-
- template <class Sequence, class Difference>
- inline typename Sequence::iterator
- getpos(Sequence* self, Difference i) {
- typename Sequence::iterator pos = self->begin();
- std::advance(pos, check_index(i,self->size()));
- return pos;
- }
-
- template <class Sequence, class Difference>
- inline typename Sequence::const_iterator
- cgetpos(const Sequence* self, Difference i) {
- typename Sequence::const_iterator pos = self->begin();
- std::advance(pos, check_index(i,self->size()));
- return pos;
- }
-
- template <class Sequence>
- inline void
- erase(Sequence* seq, const typename Sequence::iterator& position) {
- seq->erase(position);
- }
-
- template <class Sequence>
- struct traits_reserve {
- static void reserve(Sequence & /*seq*/, typename Sequence::size_type /*n*/) {
- // This should be specialized for types that support reserve
- }
- };
-
- template <class Sequence, class Difference>
- inline Sequence*
- getslice(const Sequence* self, Difference i, Difference j, Py_ssize_t step) {
- typename Sequence::size_type size = self->size();
- Difference ii = 0;
- Difference jj = 0;
- swig::slice_adjust(i, j, step, size, ii, jj);
-
- if (step > 0) {
- typename Sequence::const_iterator sb = self->begin();
- typename Sequence::const_iterator se = self->begin();
- std::advance(sb,ii);
- std::advance(se,jj);
- if (step == 1) {
- return new Sequence(sb, se);
- } else {
- Sequence *sequence = new Sequence();
- swig::traits_reserve<Sequence>::reserve(*sequence, (jj - ii + step - 1) / step);
- typename Sequence::const_iterator it = sb;
- while (it!=se) {
- sequence->push_back(*it);
- for (Py_ssize_t c=0; c<step && it!=se; ++c)
- it++;
- }
- return sequence;
- }
- } else {
- Sequence *sequence = new Sequence();
- swig::traits_reserve<Sequence>::reserve(*sequence, (ii - jj - step - 1) / -step);
- typename Sequence::const_reverse_iterator sb = self->rbegin();
- typename Sequence::const_reverse_iterator se = self->rbegin();
- std::advance(sb,size-ii-1);
- std::advance(se,size-jj-1);
- typename Sequence::const_reverse_iterator it = sb;
- while (it!=se) {
- sequence->push_back(*it);
- for (Py_ssize_t c=0; c<-step && it!=se; ++c)
- it++;
- }
- return sequence;
- }
- }
-
- template <class Sequence, class Difference, class InputSeq>
- inline void
- setslice(Sequence* self, Difference i, Difference j, Py_ssize_t step, const InputSeq& is = InputSeq()) {
- typename Sequence::size_type size = self->size();
- Difference ii = 0;
- Difference jj = 0;
- swig::slice_adjust(i, j, step, size, ii, jj, true);
- if (step > 0) {
- if (step == 1) {
- size_t ssize = jj - ii;
- if (ssize <= is.size()) {
- // expanding/staying the same size
- swig::traits_reserve<Sequence>::reserve(*self, self->size() - ssize + is.size());
- typename Sequence::iterator sb = self->begin();
- typename InputSeq::const_iterator isit = is.begin();
- std::advance(sb,ii);
- std::advance(isit, jj - ii);
- self->insert(std::copy(is.begin(), isit, sb), isit, is.end());
- } else {
- // shrinking
- typename Sequence::iterator sb = self->begin();
- typename Sequence::iterator se = self->begin();
- std::advance(sb,ii);
- std::advance(se,jj);
- self->erase(sb,se);
- sb = self->begin();
- std::advance(sb,ii);
- self->insert(sb, is.begin(), is.end());
- }
- } else {
- size_t replacecount = (jj - ii + step - 1) / step;
- if (is.size() != replacecount) {
- char msg[1024];
- sprintf(msg, "attempt to assign sequence of size %lu to extended slice of size %lu", (unsigned long)is.size(), (unsigned long)replacecount);
- throw std::invalid_argument(msg);
- }
- typename Sequence::const_iterator isit = is.begin();
- typename Sequence::iterator it = self->begin();
- std::advance(it,ii);
- for (size_t rc=0; rc<replacecount && it != self->end(); ++rc) {
- *it++ = *isit++;
- for (Py_ssize_t c=0; c<(step-1) && it != self->end(); ++c)
- it++;
- }
- }
- } else {
- size_t replacecount = (ii - jj - step - 1) / -step;
- if (is.size() != replacecount) {
- char msg[1024];
- sprintf(msg, "attempt to assign sequence of size %lu to extended slice of size %lu", (unsigned long)is.size(), (unsigned long)replacecount);
- throw std::invalid_argument(msg);
- }
- typename Sequence::const_iterator isit = is.begin();
- typename Sequence::reverse_iterator it = self->rbegin();
- std::advance(it,size-ii-1);
- for (size_t rc=0; rc<replacecount && it != self->rend(); ++rc) {
- *it++ = *isit++;
- for (Py_ssize_t c=0; c<(-step-1) && it != self->rend(); ++c)
- it++;
- }
- }
- }
-
- template <class Sequence, class Difference>
- inline void
- delslice(Sequence* self, Difference i, Difference j, Py_ssize_t step) {
- typename Sequence::size_type size = self->size();
- Difference ii = 0;
- Difference jj = 0;
- swig::slice_adjust(i, j, step, size, ii, jj, true);
- if (step > 0) {
- typename Sequence::iterator sb = self->begin();
- std::advance(sb,ii);
- if (step == 1) {
- typename Sequence::iterator se = self->begin();
- std::advance(se,jj);
- self->erase(sb,se);
- } else {
- typename Sequence::iterator it = sb;
- size_t delcount = (jj - ii + step - 1) / step;
- while (delcount) {
- it = self->erase(it);
- for (Py_ssize_t c=0; c<(step-1) && it != self->end(); ++c)
- it++;
- delcount--;
- }
- }
- } else {
- typename Sequence::reverse_iterator sb = self->rbegin();
- std::advance(sb,size-ii-1);
- typename Sequence::reverse_iterator it = sb;
- size_t delcount = (ii - jj - step - 1) / -step;
- while (delcount) {
- it = typename Sequence::reverse_iterator(self->erase((++it).base()));
- for (Py_ssize_t c=0; c<(-step-1) && it != self->rend(); ++c)
- it++;
- delcount--;
- }
- }
- }
-}
-}
-
-%fragment("SwigPySequence_Cont","header",
- fragment="StdTraits",
- fragment="SwigPySequence_Base",
- fragment="SwigPyIterator_T")
-{
-namespace swig
-{
- template <class T>
- struct SwigPySequence_Ref
- {
- SwigPySequence_Ref(PyObject* seq, Py_ssize_t index)
- : _seq(seq), _index(index)
- {
- }
-
- operator T () const
- {
- swig::SwigVar_PyObject item = PySequence_GetItem(_seq, _index);
- try {
- return swig::as<T>(item);
- } catch (const std::invalid_argument& e) {
- char msg[1024];
- sprintf(msg, "in sequence element %d ", (int)_index);
- if (!PyErr_Occurred()) {
- ::%type_error(swig::type_name<T>());
- }
- SWIG_Python_AddErrorMsg(msg);
- SWIG_Python_AddErrorMsg(e.what());
- throw;
- }
- }
-
- SwigPySequence_Ref& operator=(const T& v)
- {
- PySequence_SetItem(_seq, _index, swig::from<T>(v));
- return *this;
- }
-
- private:
- PyObject* _seq;
- Py_ssize_t _index;
- };
-
- template <class T>
- struct SwigPySequence_ArrowProxy
- {
- SwigPySequence_ArrowProxy(const T& x): m_value(x) {}
- const T* operator->() const { return &m_value; }
- operator const T*() const { return &m_value; }
- T m_value;
- };
-
- template <class T, class Reference >
- struct SwigPySequence_InputIterator
- {
- typedef SwigPySequence_InputIterator<T, Reference > self;
-
- typedef std::random_access_iterator_tag iterator_category;
- typedef Reference reference;
- typedef T value_type;
- typedef T* pointer;
- typedef Py_ssize_t difference_type;
-
- SwigPySequence_InputIterator()
- {
- }
-
- SwigPySequence_InputIterator(PyObject* seq, Py_ssize_t index)
- : _seq(seq), _index(index)
- {
- }
-
- reference operator*() const
- {
- return reference(_seq, _index);
- }
-
- SwigPySequence_ArrowProxy<T>
- operator->() const {
- return SwigPySequence_ArrowProxy<T>(operator*());
- }
-
- bool operator==(const self& ri) const
- {
- return (_index == ri._index) && (_seq == ri._seq);
- }
-
- bool operator!=(const self& ri) const
- {
- return !(operator==(ri));
- }
-
- self& operator ++ ()
- {
- ++_index;
- return *this;
- }
-
- self& operator -- ()
- {
- --_index;
- return *this;
- }
-
- self& operator += (difference_type n)
- {
- _index += n;
- return *this;
- }
-
- self operator +(difference_type n) const
- {
- return self(_seq, _index + n);
- }
-
- self& operator -= (difference_type n)
- {
- _index -= n;
- return *this;
- }
-
- self operator -(difference_type n) const
- {
- return self(_seq, _index - n);
- }
-
- difference_type operator - (const self& ri) const
- {
- return _index - ri._index;
- }
-
- bool operator < (const self& ri) const
- {
- return _index < ri._index;
- }
-
- reference
- operator[](difference_type n) const
- {
- return reference(_seq, _index + n);
- }
-
- private:
- PyObject* _seq;
- difference_type _index;
- };
-
- // STL container wrapper around a Python sequence
- template <class T>
- struct SwigPySequence_Cont
- {
- typedef SwigPySequence_Ref<T> reference;
- typedef const SwigPySequence_Ref<T> const_reference;
- typedef T value_type;
- typedef T* pointer;
- typedef Py_ssize_t difference_type;
- typedef size_t size_type;
- typedef const pointer const_pointer;
- typedef SwigPySequence_InputIterator<T, reference> iterator;
- typedef SwigPySequence_InputIterator<T, const_reference> const_iterator;
-
- SwigPySequence_Cont(PyObject* seq) : _seq(0)
- {
- if (!PySequence_Check(seq)) {
- throw std::invalid_argument("a sequence is expected");
- }
- _seq = seq;
- Py_INCREF(_seq);
- }
-
- ~SwigPySequence_Cont()
- {
- Py_XDECREF(_seq);
- }
-
- size_type size() const
- {
- return static_cast<size_type>(PySequence_Size(_seq));
- }
-
- bool empty() const
- {
- return size() == 0;
- }
-
- iterator begin()
- {
- return iterator(_seq, 0);
- }
-
- const_iterator begin() const
- {
- return const_iterator(_seq, 0);
- }
-
- iterator end()
- {
- return iterator(_seq, size());
- }
-
- const_iterator end() const
- {
- return const_iterator(_seq, size());
- }
-
- reference operator[](difference_type n)
- {
- return reference(_seq, n);
- }
-
- const_reference operator[](difference_type n) const
- {
- return const_reference(_seq, n);
- }
-
- bool check() const
- {
- Py_ssize_t s = size();
- for (Py_ssize_t i = 0; i < s; ++i) {
- swig::SwigVar_PyObject item = PySequence_GetItem(_seq, i);
- if (!swig::check<value_type>(item))
- return false;
- }
- return true;
- }
-
- private:
- PyObject* _seq;
- };
-
-}
-}
-
-%define %swig_sequence_iterator(Sequence...)
- %swig_sequence_iterator_with_making_function(swig::make_output_iterator,Sequence...)
-%enddef
-
-%define %swig_sequence_forward_iterator(Sequence...)
- %swig_sequence_iterator_with_making_function(swig::make_output_forward_iterator,Sequence...)
-%enddef
-
-%define %swig_sequence_iterator_with_making_function(Make_output_iterator,Sequence...)
-#if defined(SWIG_EXPORT_ITERATOR_METHODS)
- class iterator;
- class reverse_iterator;
- class const_iterator;
- class const_reverse_iterator;
-
- %typemap(out,noblock=1,fragment="SwigPySequence_Cont")
- iterator, reverse_iterator, const_iterator, const_reverse_iterator {
- $result = SWIG_NewPointerObj(Make_output_iterator(%static_cast($1,const $type &)),
- swig::SwigPyIterator::descriptor(),SWIG_POINTER_OWN);
- }
- %typemap(out,noblock=1,fragment="SwigPySequence_Cont")
- std::pair<iterator, iterator>, std::pair<const_iterator, const_iterator> {
- $result = PyTuple_New(2);
- PyTuple_SetItem($result,0,SWIG_NewPointerObj(Make_output_iterator(%static_cast($1,const $type &).first),
- swig::SwigPyIterator::descriptor(),SWIG_POINTER_OWN));
- PyTuple_SetItem($result,1,SWIG_NewPointerObj(Make_output_iterator(%static_cast($1,const $type &).second),
- swig::SwigPyIterator::descriptor(),SWIG_POINTER_OWN));
- }
-
- %fragment("SwigPyPairBoolOutputIterator","header",fragment=SWIG_From_frag(bool),fragment="SwigPySequence_Cont") {}
-
- %typemap(out,noblock=1,fragment="SwigPyPairBoolOutputIterator")
- std::pair<iterator, bool>, std::pair<const_iterator, bool> {
- $result = PyTuple_New(2);
- PyTuple_SetItem($result,0,SWIG_NewPointerObj(Make_output_iterator(%static_cast($1,const $type &).first),
- swig::SwigPyIterator::descriptor(),SWIG_POINTER_OWN));
- PyTuple_SetItem($result,1,SWIG_From(bool)(%static_cast($1,const $type &).second));
- }
-
- %typemap(in,noblock=1,fragment="SwigPySequence_Cont")
- iterator(swig::SwigPyIterator *iter = 0, int res),
- reverse_iterator(swig::SwigPyIterator *iter = 0, int res),
- const_iterator(swig::SwigPyIterator *iter = 0, int res),
- const_reverse_iterator(swig::SwigPyIterator *iter = 0, int res) {
- res = SWIG_ConvertPtr($input, %as_voidptrptr(&iter), swig::SwigPyIterator::descriptor(), 0);
- if (!SWIG_IsOK(res) || !iter) {
- %argument_fail(SWIG_TypeError, "$type", $symname, $argnum);
- } else {
- swig::SwigPyIterator_T<$type > *iter_t = dynamic_cast<swig::SwigPyIterator_T<$type > *>(iter);
- if (iter_t) {
- $1 = iter_t->get_current();
- } else {
- %argument_fail(SWIG_TypeError, "$type", $symname, $argnum);
- }
- }
- }
-
- %typecheck(%checkcode(ITERATOR),noblock=1,fragment="SwigPySequence_Cont")
- iterator, reverse_iterator, const_iterator, const_reverse_iterator {
- swig::SwigPyIterator *iter = 0;
- int res = SWIG_ConvertPtr($input, %as_voidptrptr(&iter), swig::SwigPyIterator::descriptor(), 0);
- $1 = (SWIG_IsOK(res) && iter && (dynamic_cast<swig::SwigPyIterator_T<$type > *>(iter) != 0));
- }
-
- %fragment("SwigPySequence_Cont");
-
- %newobject iterator(PyObject **PYTHON_SELF);
- %extend {
- swig::SwigPyIterator* iterator(PyObject **PYTHON_SELF) {
- return Make_output_iterator(self->begin(), self->begin(), self->end(), *PYTHON_SELF);
- }
-
-#if defined(SWIGPYTHON_BUILTIN)
- %feature("python:slot", "tp_iter", functype="getiterfunc") iterator;
-#else
- %pythoncode %{def __iter__(self):
- return self.iterator()%}
-#endif
- }
-
-#endif //SWIG_EXPORT_ITERATOR_METHODS
-%enddef
-
-
-/**** The python container methods ****/
-
-%define %swig_container_methods(Container...)
-
-/* deprecated in Python 2 */
-#if 1
- %newobject __getslice__;
-#endif
- %newobject __getitem__(SWIGPY_SLICEOBJECT *slice);
-
-#if defined(SWIGPYTHON_BUILTIN)
- %feature("python:slot", "nb_nonzero", functype="inquiry") __nonzero__;
- %feature("python:slot", "sq_length", functype="lenfunc") __len__;
-#endif // SWIGPYTHON_BUILTIN
-
- %extend {
- bool __nonzero__() const {
- return !(self->empty());
- }
-
- /* Alias for Python 3 compatibility */
- bool __bool__() const {
- return !(self->empty());
- }
-
- size_type __len__() const {
- return self->size();
- }
-
- // Although __getitem__, front, back actually use a const value_type& return type, the typemaps below
- // use non-const so that they can be easily overridden by users if necessary.
- %typemap(ret, fragment="reference_container_owner", noblock=1) value_type& __getitem__, value_type& front, value_type& back {
- (void)swig::container_owner<swig::traits<$*1_ltype>::category>::back_reference($result, $self);
- }
- }
-%enddef
-
-
-
-%define %swig_sequence_methods_common(Sequence...)
- %swig_sequence_iterator(%arg(Sequence))
- %swig_container_methods(%arg(Sequence))
-
- %fragment("SwigPySequence_Base");
-
-#if defined(SWIGPYTHON_BUILTIN)
- //%feature("python:slot", "sq_item", functype="ssizeargfunc") __getitem__;
- //%feature("python:slot", "sq_slice", functype="ssizessizeargfunc") __getslice__;
- //%feature("python:slot", "sq_ass_item", functype="ssizeobjargproc") __setitem__;
- //%feature("python:slot", "sq_ass_slice", functype="ssizessizeobjargproc") __setslice__;
- %feature("python:slot", "mp_subscript", functype="binaryfunc") __getitem__;
- %feature("python:slot", "mp_ass_subscript", functype="objobjargproc") __setitem__;
-#endif // SWIGPYTHON_BUILTIN
-
- %extend {
- /* typemap for slice object support */
- %typemap(in) SWIGPY_SLICEOBJECT* {
- if (!PySlice_Check($input)) {
- %argument_fail(SWIG_TypeError, "$type", $symname, $argnum);
- }
- $1 = (SWIGPY_SLICEOBJECT *) $input;
- }
- %typemap(typecheck,precedence=SWIG_TYPECHECK_POINTER) SWIGPY_SLICEOBJECT* {
- $1 = PySlice_Check($input);
- }
-
-/* deprecated in Python 2 */
-#if 1
- Sequence* __getslice__(difference_type i, difference_type j) throw (std::out_of_range, std::invalid_argument) {
- return swig::getslice(self, i, j, 1);
- }
-
- void __setslice__(difference_type i, difference_type j) throw (std::out_of_range, std::invalid_argument) {
- swig::setslice(self, i, j, 1, Sequence());
- }
-
- void __setslice__(difference_type i, difference_type j, const Sequence& v) throw (std::out_of_range, std::invalid_argument) {
- swig::setslice(self, i, j, 1, v);
- }
-
- void __delslice__(difference_type i, difference_type j) throw (std::out_of_range, std::invalid_argument) {
- swig::delslice(self, i, j, 1);
- }
-#endif
-
- void __delitem__(difference_type i) throw (std::out_of_range, std::invalid_argument) {
- swig::erase(self, swig::getpos(self, i));
- }
-
- /* Overloaded methods for Python 3 compatibility
- * (Also useful in Python 2.x)
- */
- Sequence* __getitem__(SWIGPY_SLICEOBJECT *slice) throw (std::out_of_range, std::invalid_argument) {
- Py_ssize_t i, j, step;
- if( !PySlice_Check(slice) ) {
- SWIG_Error(SWIG_TypeError, "Slice object expected.");
- return NULL;
- }
- PySlice_GetIndices(slice, (Py_ssize_t)self->size(), &i, &j, &step);
- Sequence::difference_type id = i;
- Sequence::difference_type jd = j;
- return swig::getslice(self, id, jd, step);
- }
-
- void __setitem__(SWIGPY_SLICEOBJECT *slice, const Sequence& v) throw (std::out_of_range, std::invalid_argument) {
- Py_ssize_t i, j, step;
- if( !PySlice_Check(slice) ) {
- SWIG_Error(SWIG_TypeError, "Slice object expected.");
- return;
- }
- PySlice_GetIndices(slice, (Py_ssize_t)self->size(), &i, &j, &step);
- Sequence::difference_type id = i;
- Sequence::difference_type jd = j;
- swig::setslice(self, id, jd, step, v);
- }
-
- void __setitem__(SWIGPY_SLICEOBJECT *slice) throw (std::out_of_range, std::invalid_argument) {
- Py_ssize_t i, j, step;
- if( !PySlice_Check(slice) ) {
- SWIG_Error(SWIG_TypeError, "Slice object expected.");
- return;
- }
- PySlice_GetIndices(slice, (Py_ssize_t)self->size(), &i, &j, &step);
- Sequence::difference_type id = i;
- Sequence::difference_type jd = j;
- swig::delslice(self, id, jd, step);
- }
-
- void __delitem__(SWIGPY_SLICEOBJECT *slice) throw (std::out_of_range, std::invalid_argument) {
- Py_ssize_t i, j, step;
- if( !PySlice_Check(slice) ) {
- SWIG_Error(SWIG_TypeError, "Slice object expected.");
- return;
- }
- PySlice_GetIndices(slice, (Py_ssize_t)self->size(), &i, &j, &step);
- Sequence::difference_type id = i;
- Sequence::difference_type jd = j;
- swig::delslice(self, id, jd, step);
- }
-
- }
-%enddef
-
-%define %swig_sequence_methods_non_resizable(Sequence...)
- %swig_sequence_methods_common(%arg(Sequence))
- %extend {
- const value_type& __getitem__(difference_type i) const throw (std::out_of_range) {
- return *(swig::cgetpos(self, i));
- }
-
- void __setitem__(difference_type i, const value_type& x) throw (std::out_of_range) {
- *(swig::getpos(self,i)) = x;
- }
-
-#if defined(SWIGPYTHON_BUILTIN)
- // This will be called through the mp_ass_subscript slot to delete an entry.
- void __setitem__(difference_type i) throw (std::out_of_range, std::invalid_argument) {
- swig::erase(self, swig::getpos(self, i));
- }
-#endif
-
- }
-%enddef
-
-%define %swig_sequence_methods(Sequence...)
- %swig_sequence_methods_non_resizable(%arg(Sequence))
- %extend {
- value_type pop() throw (std::out_of_range) {
- if (self->size() == 0)
- throw std::out_of_range("pop from empty container");
- Sequence::value_type x = self->back();
- self->pop_back();
- return x;
- }
-
- void append(const value_type& x) {
- self->push_back(x);
- }
- }
-%enddef
-
-%define %swig_sequence_methods_non_resizable_val(Sequence...)
- %swig_sequence_methods_common(%arg(Sequence))
- %extend {
- value_type __getitem__(difference_type i) throw (std::out_of_range) {
- return *(swig::cgetpos(self, i));
- }
-
- void __setitem__(difference_type i, value_type x) throw (std::out_of_range) {
- *(swig::getpos(self,i)) = x;
- }
-
-#if defined(SWIGPYTHON_BUILTIN)
- // This will be called through the mp_ass_subscript slot to delete an entry.
- void __setitem__(difference_type i) throw (std::out_of_range, std::invalid_argument) {
- swig::erase(self, swig::getpos(self, i));
- }
-#endif
- }
-%enddef
-
-%define %swig_sequence_methods_val(Sequence...)
- %swig_sequence_methods_non_resizable_val(%arg(Sequence))
- %extend {
- value_type pop() throw (std::out_of_range) {
- if (self->size() == 0)
- throw std::out_of_range("pop from empty container");
- Sequence::value_type x = self->back();
- self->pop_back();
- return x;
- }
-
- void append(value_type x) {
- self->push_back(x);
- }
- }
-%enddef
-
-
-
-//
-// Common fragments
-//
-
-%fragment("StdSequenceTraits","header",
- fragment="StdTraits",
- fragment="SwigPySequence_Cont")
-{
-namespace swig {
- template <class SwigPySeq, class Seq>
- inline void
- assign(const SwigPySeq& swigpyseq, Seq* seq) {
- // seq->assign(swigpyseq.begin(), swigpyseq.end()); // not used as not always implemented
- typedef typename SwigPySeq::value_type value_type;
- typename SwigPySeq::const_iterator it = swigpyseq.begin();
- for (;it != swigpyseq.end(); ++it) {
- seq->insert(seq->end(),(value_type)(*it));
- }
- }
-
- template <class Seq, class T = typename Seq::value_type >
- struct traits_asptr_stdseq {
- typedef Seq sequence;
- typedef T value_type;
-
- static int asptr(PyObject *obj, sequence **seq) {
- if (obj == Py_None || SWIG_Python_GetSwigThis(obj)) {
- sequence *p;
- swig_type_info *descriptor = swig::type_info<sequence>();
- if (descriptor && SWIG_IsOK(::SWIG_ConvertPtr(obj, (void **)&p, descriptor, 0))) {
- if (seq) *seq = p;
- return SWIG_OLDOBJ;
- }
- } else if (PySequence_Check(obj)) {
- try {
- SwigPySequence_Cont<value_type> swigpyseq(obj);
- if (seq) {
- sequence *pseq = new sequence();
- assign(swigpyseq, pseq);
- *seq = pseq;
- return SWIG_NEWOBJ;
- } else {
- return swigpyseq.check() ? SWIG_OK : SWIG_ERROR;
- }
- } catch (std::exception& e) {
- if (seq) {
- if (!PyErr_Occurred()) {
- PyErr_SetString(PyExc_TypeError, e.what());
- }
- }
- return SWIG_ERROR;
- }
- }
- return SWIG_ERROR;
- }
- };
-
- template <class Seq, class T = typename Seq::value_type >
- struct traits_from_stdseq {
- typedef Seq sequence;
- typedef T value_type;
- typedef typename Seq::size_type size_type;
- typedef typename sequence::const_iterator const_iterator;
-
- static PyObject *from(const sequence& seq) {
-%#ifdef SWIG_PYTHON_EXTRA_NATIVE_CONTAINERS
- swig_type_info *desc = swig::type_info<sequence>();
- if (desc && desc->clientdata) {
- return SWIG_InternalNewPointerObj(new sequence(seq), desc, SWIG_POINTER_OWN);
- }
-%#endif
- size_type size = seq.size();
- if (size <= (size_type)INT_MAX) {
- PyObject *obj = PyTuple_New((Py_ssize_t)size);
- Py_ssize_t i = 0;
- for (const_iterator it = seq.begin(); it != seq.end(); ++it, ++i) {
- PyTuple_SetItem(obj,i,swig::from<value_type>(*it));
- }
- return obj;
- } else {
- PyErr_SetString(PyExc_OverflowError,"sequence size not valid in python");
- return NULL;
- }
- }
- };
-}
-}
diff --git a/contrib/tools/swig/Lib/python/pyiterators.swg b/contrib/tools/swig/Lib/python/pyiterators.swg
deleted file mode 100644
index cb15e35cda..0000000000
--- a/contrib/tools/swig/Lib/python/pyiterators.swg
+++ /dev/null
@@ -1,458 +0,0 @@
-/* -----------------------------------------------------------------------------
- * pyiterators.swg
- *
- * Implement a python 'output' iterator for Python 2.2 or higher.
- *
- * Users can derive form the SwigPyIterator to implement their
- * own iterators. As an example (real one since we use it for STL/STD
- * containers), the template SwigPyIterator_T does the
- * implementation for generic C++ iterators.
- * ----------------------------------------------------------------------------- */
-
-%include <std_common.i>
-
-%fragment("SwigPyIterator","header",fragment="<stddef.h>") {
-namespace swig {
- struct stop_iteration {
- };
-
- struct SwigPyIterator {
- private:
- SwigPtr_PyObject _seq;
-
- protected:
- SwigPyIterator(PyObject *seq) : _seq(seq)
- {
- }
-
- public:
- virtual ~SwigPyIterator() {}
-
- // Access iterator method, required by Python
- virtual PyObject *value() const = 0;
-
- // Forward iterator method, required by Python
- virtual SwigPyIterator *incr(size_t n = 1) = 0;
-
- // Backward iterator method, very common in C++, but not required in Python
- virtual SwigPyIterator *decr(size_t /*n*/ = 1)
- {
- throw stop_iteration();
- }
-
- // Random access iterator methods, but not required in Python
- virtual ptrdiff_t distance(const SwigPyIterator &/*x*/) const
- {
- throw std::invalid_argument("operation not supported");
- }
-
- virtual bool equal (const SwigPyIterator &/*x*/) const
- {
- throw std::invalid_argument("operation not supported");
- }
-
- // C++ common/needed methods
- virtual SwigPyIterator *copy() const = 0;
-
- PyObject *next()
- {
- SWIG_PYTHON_THREAD_BEGIN_BLOCK; // disable threads
- PyObject *obj = value();
- incr();
- SWIG_PYTHON_THREAD_END_BLOCK; // re-enable threads
- return obj;
- }
-
- /* Make an alias for Python 3.x */
- PyObject *__next__()
- {
- return next();
- }
-
- PyObject *previous()
- {
- SWIG_PYTHON_THREAD_BEGIN_BLOCK; // disable threads
- decr();
- PyObject *obj = value();
- SWIG_PYTHON_THREAD_END_BLOCK; // re-enable threads
- return obj;
- }
-
- SwigPyIterator *advance(ptrdiff_t n)
- {
- return (n > 0) ? incr(n) : decr(-n);
- }
-
- bool operator == (const SwigPyIterator& x) const
- {
- return equal(x);
- }
-
- bool operator != (const SwigPyIterator& x) const
- {
- return ! operator==(x);
- }
-
- SwigPyIterator& operator += (ptrdiff_t n)
- {
- return *advance(n);
- }
-
- SwigPyIterator& operator -= (ptrdiff_t n)
- {
- return *advance(-n);
- }
-
- SwigPyIterator* operator + (ptrdiff_t n) const
- {
- return copy()->advance(n);
- }
-
- SwigPyIterator* operator - (ptrdiff_t n) const
- {
- return copy()->advance(-n);
- }
-
- ptrdiff_t operator - (const SwigPyIterator& x) const
- {
- return x.distance(*this);
- }
-
- static swig_type_info* descriptor() {
- static int init = 0;
- static swig_type_info* desc = 0;
- if (!init) {
- desc = SWIG_TypeQuery("swig::SwigPyIterator *");
- init = 1;
- }
- return desc;
- }
- };
-
-%#if defined(SWIGPYTHON_BUILTIN)
- inline PyObject* make_output_iterator_builtin (PyObject *pyself)
- {
- Py_INCREF(pyself);
- return pyself;
- }
-%#endif
-}
-}
-
-%fragment("SwigPyIterator_T","header",fragment="<stddef.h>",fragment="SwigPyIterator",fragment="StdTraits",fragment="StdIteratorTraits") {
-namespace swig {
- template<typename OutIterator>
- class SwigPyIterator_T : public SwigPyIterator
- {
- public:
- typedef OutIterator out_iterator;
- typedef typename std::iterator_traits<out_iterator>::value_type value_type;
- typedef SwigPyIterator_T<out_iterator> self_type;
-
- SwigPyIterator_T(out_iterator curr, PyObject *seq)
- : SwigPyIterator(seq), current(curr)
- {
- }
-
- const out_iterator& get_current() const
- {
- return current;
- }
-
-
- bool equal (const SwigPyIterator &iter) const
- {
- const self_type *iters = dynamic_cast<const self_type *>(&iter);
- if (iters) {
- return (current == iters->get_current());
- } else {
- throw std::invalid_argument("bad iterator type");
- }
- }
-
- ptrdiff_t distance(const SwigPyIterator &iter) const
- {
- const self_type *iters = dynamic_cast<const self_type *>(&iter);
- if (iters) {
- return std::distance(current, iters->get_current());
- } else {
- throw std::invalid_argument("bad iterator type");
- }
- }
-
- protected:
- out_iterator current;
- };
-
- template <class ValueType>
- struct from_oper
- {
- typedef const ValueType& argument_type;
- typedef PyObject *result_type;
- result_type operator()(argument_type v) const
- {
- return swig::from(v);
- }
- };
-
- template<typename OutIterator,
- typename ValueType = typename std::iterator_traits<OutIterator>::value_type,
- typename FromOper = from_oper<ValueType> >
- class SwigPyForwardIteratorOpen_T : public SwigPyIterator_T<OutIterator>
- {
- public:
- FromOper from;
- typedef OutIterator out_iterator;
- typedef ValueType value_type;
- typedef SwigPyIterator_T<out_iterator> base;
- typedef SwigPyForwardIteratorOpen_T<OutIterator, ValueType, FromOper> self_type;
-
- SwigPyForwardIteratorOpen_T(out_iterator curr, PyObject *seq)
- : SwigPyIterator_T<OutIterator>(curr, seq)
- {
- }
-
- PyObject *value() const {
- return from(static_cast<const value_type&>(*(base::current)));
- }
-
- SwigPyIterator *copy() const
- {
- return new self_type(*this);
- }
-
- SwigPyIterator *incr(size_t n = 1)
- {
- while (n--) {
- ++base::current;
- }
- return this;
- }
-
- };
-
- template<typename OutIterator,
- typename ValueType = typename std::iterator_traits<OutIterator>::value_type,
- typename FromOper = from_oper<ValueType> >
- class SwigPyIteratorOpen_T : public SwigPyForwardIteratorOpen_T<OutIterator, ValueType, FromOper>
- {
- public:
- FromOper from;
- typedef OutIterator out_iterator;
- typedef ValueType value_type;
- typedef SwigPyIterator_T<out_iterator> base;
- typedef SwigPyIteratorOpen_T<OutIterator, ValueType, FromOper> self_type;
-
- SwigPyIteratorOpen_T(out_iterator curr, PyObject *seq)
- : SwigPyForwardIteratorOpen_T<OutIterator>(curr, seq)
- {
- }
-
- SwigPyIterator *decr(size_t n = 1)
- {
- while (n--) {
- --base::current;
- }
- return this;
- }
- };
-
- template<typename OutIterator,
- typename ValueType = typename std::iterator_traits<OutIterator>::value_type,
- typename FromOper = from_oper<ValueType> >
- class SwigPyForwardIteratorClosed_T : public SwigPyIterator_T<OutIterator>
- {
- public:
- FromOper from;
- typedef OutIterator out_iterator;
- typedef ValueType value_type;
- typedef SwigPyIterator_T<out_iterator> base;
- typedef SwigPyForwardIteratorClosed_T<OutIterator, ValueType, FromOper> self_type;
-
- SwigPyForwardIteratorClosed_T(out_iterator curr, out_iterator first, out_iterator last, PyObject *seq)
- : SwigPyIterator_T<OutIterator>(curr, seq), begin(first), end(last)
- {
- }
-
- PyObject *value() const {
- if (base::current == end) {
- throw stop_iteration();
- } else {
- return from(static_cast<const value_type&>(*(base::current)));
- }
- }
-
- SwigPyIterator *copy() const
- {
- return new self_type(*this);
- }
-
- SwigPyIterator *incr(size_t n = 1)
- {
- while (n--) {
- if (base::current == end) {
- throw stop_iteration();
- } else {
- ++base::current;
- }
- }
- return this;
- }
-
- protected:
- out_iterator begin;
- out_iterator end;
- };
-
- template<typename OutIterator,
- typename ValueType = typename std::iterator_traits<OutIterator>::value_type,
- typename FromOper = from_oper<ValueType> >
- class SwigPyIteratorClosed_T : public SwigPyForwardIteratorClosed_T<OutIterator,ValueType,FromOper>
- {
- public:
- FromOper from;
- typedef OutIterator out_iterator;
- typedef ValueType value_type;
- typedef SwigPyIterator_T<out_iterator> base;
- typedef SwigPyForwardIteratorClosed_T<OutIterator, ValueType, FromOper> base0;
- typedef SwigPyIteratorClosed_T<OutIterator, ValueType, FromOper> self_type;
-
- SwigPyIteratorClosed_T(out_iterator curr, out_iterator first, out_iterator last, PyObject *seq)
- : SwigPyForwardIteratorClosed_T<OutIterator,ValueType,FromOper>(curr, first, last, seq)
- {
- }
-
- SwigPyIterator *decr(size_t n = 1)
- {
- while (n--) {
- if (base::current == base0::begin) {
- throw stop_iteration();
- } else {
- --base::current;
- }
- }
- return this;
- }
- };
-
-
- template<typename OutIter>
- inline SwigPyIterator*
- make_output_forward_iterator(const OutIter& current, const OutIter& begin,const OutIter& end, PyObject *seq = 0)
- {
- return new SwigPyForwardIteratorClosed_T<OutIter>(current, begin, end, seq);
- }
-
- template<typename OutIter>
- inline SwigPyIterator*
- make_output_iterator(const OutIter& current, const OutIter& begin,const OutIter& end, PyObject *seq = 0)
- {
- return new SwigPyIteratorClosed_T<OutIter>(current, begin, end, seq);
- }
-
- template<typename OutIter>
- inline SwigPyIterator*
- make_output_forward_iterator(const OutIter& current, PyObject *seq = 0)
- {
- return new SwigPyForwardIteratorOpen_T<OutIter>(current, seq);
- }
-
- template<typename OutIter>
- inline SwigPyIterator*
- make_output_iterator(const OutIter& current, PyObject *seq = 0)
- {
- return new SwigPyIteratorOpen_T<OutIter>(current, seq);
- }
-
-}
-}
-
-
-%fragment("SwigPyIterator");
-namespace swig
-{
- /*
- Throw a StopIteration exception
- */
- %ignore stop_iteration;
- struct stop_iteration {};
-
- %typemap(throws) stop_iteration {
- (void)$1;
- SWIG_SetErrorObj(PyExc_StopIteration, SWIG_Py_Void());
- SWIG_fail;
- }
-
- /*
- Mark methods that return new objects
- */
- %newobject SwigPyIterator::copy;
- %newobject SwigPyIterator::operator + (ptrdiff_t n) const;
- %newobject SwigPyIterator::operator - (ptrdiff_t n) const;
-
- %nodirector SwigPyIterator;
-
-#if defined(SWIGPYTHON_BUILTIN)
- %feature("python:tp_iter") SwigPyIterator "&swig::make_output_iterator_builtin";
- %feature("python:slot", "tp_iternext", functype="iternextfunc") SwigPyIterator::__next__;
-#else
- %extend SwigPyIterator {
- %pythoncode %{def __iter__(self):
- return self%}
- }
-#endif
-
- %catches(swig::stop_iteration) SwigPyIterator::value() const;
- %catches(swig::stop_iteration) SwigPyIterator::incr(size_t n = 1);
- %catches(swig::stop_iteration) SwigPyIterator::decr(size_t n = 1);
- %catches(std::invalid_argument) SwigPyIterator::distance(const SwigPyIterator &x) const;
- %catches(std::invalid_argument) SwigPyIterator::equal (const SwigPyIterator &x) const;
- %catches(swig::stop_iteration) SwigPyIterator::__next__();
- %catches(swig::stop_iteration) SwigPyIterator::next();
- %catches(swig::stop_iteration) SwigPyIterator::previous();
- %catches(swig::stop_iteration) SwigPyIterator::advance(ptrdiff_t n);
- %catches(swig::stop_iteration) SwigPyIterator::operator += (ptrdiff_t n);
- %catches(swig::stop_iteration) SwigPyIterator::operator -= (ptrdiff_t n);
- %catches(swig::stop_iteration) SwigPyIterator::operator + (ptrdiff_t n) const;
- %catches(swig::stop_iteration) SwigPyIterator::operator - (ptrdiff_t n) const;
-
- struct SwigPyIterator
- {
- protected:
- SwigPyIterator(PyObject *seq);
-
- public:
- virtual ~SwigPyIterator();
-
- // Access iterator method, required by Python
- virtual PyObject *value() const = 0;
-
- // Forward iterator method, required by Python
- virtual SwigPyIterator *incr(size_t n = 1) = 0;
-
- // Backward iterator method, very common in C++, but not required in Python
- virtual SwigPyIterator *decr(size_t n = 1);
-
- // Random access iterator methods, but not required in Python
- virtual ptrdiff_t distance(const SwigPyIterator &x) const;
-
- virtual bool equal (const SwigPyIterator &x) const;
-
- // C++ common/needed methods
- virtual SwigPyIterator *copy() const = 0;
-
- PyObject *next();
- PyObject *__next__();
- PyObject *previous();
- SwigPyIterator *advance(ptrdiff_t n);
-
- bool operator == (const SwigPyIterator& x) const;
- bool operator != (const SwigPyIterator& x) const;
- SwigPyIterator& operator += (ptrdiff_t n);
- SwigPyIterator& operator -= (ptrdiff_t n);
- SwigPyIterator* operator + (ptrdiff_t n) const;
- SwigPyIterator* operator - (ptrdiff_t n) const;
- ptrdiff_t operator - (const SwigPyIterator& x) const;
- };
-}
-
diff --git a/contrib/tools/swig/Lib/python/pystdcommon.swg b/contrib/tools/swig/Lib/python/pystdcommon.swg
deleted file mode 100644
index afa71350a9..0000000000
--- a/contrib/tools/swig/Lib/python/pystdcommon.swg
+++ /dev/null
@@ -1,265 +0,0 @@
-%fragment("StdTraits","header",fragment="StdTraitsCommon")
-{
-namespace swig {
- /*
- Traits that provides the from method
- */
- template <class Type> struct traits_from_ptr {
- static PyObject *from(Type *val, int owner = 0) {
- return SWIG_InternalNewPointerObj(val, type_info<Type>(), owner);
- }
- };
-
- template <class Type> struct traits_from {
- static PyObject *from(const Type& val) {
- return traits_from_ptr<Type>::from(new Type(val), 1);
- }
- };
-
- template <class Type> struct traits_from<Type *> {
- static PyObject *from(Type* val) {
- return traits_from_ptr<Type>::from(val, 0);
- }
- };
-
- template <class Type> struct traits_from<const Type *> {
- static PyObject *from(const Type* val) {
- return traits_from_ptr<Type>::from(const_cast<Type*>(val), 0);
- }
- };
-
-
- template <class Type>
- inline PyObject *from(const Type& val) {
- return traits_from<Type>::from(val);
- }
-
- template <class Type>
- inline PyObject *from_ptr(Type* val, int owner) {
- return traits_from_ptr<Type>::from(val, owner);
- }
-
- /*
- Traits that provides the asval/as/check method
- */
- template <class Type>
- struct traits_asptr {
- static int asptr(PyObject *obj, Type **val) {
- int res = SWIG_ERROR;
- swig_type_info *descriptor = type_info<Type>();
- if (val) {
- Type *p = 0;
- int newmem = 0;
- res = descriptor ? SWIG_ConvertPtrAndOwn(obj, (void **)&p, descriptor, 0, &newmem) : SWIG_ERROR;
- if (SWIG_IsOK(res)) {
- if (newmem & SWIG_CAST_NEW_MEMORY) {
- res |= SWIG_NEWOBJMASK;
- }
- *val = p;
- }
- } else {
- res = descriptor ? SWIG_ConvertPtr(obj, 0, descriptor, 0) : SWIG_ERROR;
- }
- return res;
- }
- };
-
- template <class Type>
- inline int asptr(PyObject *obj, Type **vptr) {
- return traits_asptr<Type>::asptr(obj, vptr);
- }
-
- template <class Type>
- struct traits_asval {
- static int asval(PyObject *obj, Type *val) {
- if (val) {
- Type *p = 0;
- int res = traits_asptr<Type>::asptr(obj, &p);
- if (!SWIG_IsOK(res)) return res;
- if (p) {
- typedef typename noconst_traits<Type>::noconst_type noconst_type;
- *(const_cast<noconst_type*>(val)) = *p;
- if (SWIG_IsNewObj(res)){
- %delete(p);
- res = SWIG_DelNewMask(res);
- }
- return res;
- } else {
- return SWIG_ERROR;
- }
- } else {
- return traits_asptr<Type>::asptr(obj, (Type **)(0));
- }
- }
- };
-
- template <class Type> struct traits_asval<Type*> {
- static int asval(PyObject *obj, Type **val) {
- if (val) {
- typedef typename noconst_traits<Type>::noconst_type noconst_type;
- noconst_type *p = 0;
- int res = traits_asptr<noconst_type>::asptr(obj, &p);
- if (SWIG_IsOK(res)) {
- *(const_cast<noconst_type**>(val)) = p;
- }
- return res;
- } else {
- return traits_asptr<Type>::asptr(obj, (Type **)(0));
- }
- }
- };
-
- template <class Type>
- inline int asval(PyObject *obj, Type *val) {
- return traits_asval<Type>::asval(obj, val);
- }
-
- template <class Type>
- struct traits_as<Type, value_category> {
- static Type as(PyObject *obj) {
- Type v;
- int res = asval(obj, &v);
- if (!obj || !SWIG_IsOK(res)) {
- if (!PyErr_Occurred()) {
- ::%type_error(swig::type_name<Type>());
- }
- throw std::invalid_argument("bad type");
- }
- return v;
- }
- };
-
- template <class Type>
- struct traits_as<Type, pointer_category> {
- static Type as(PyObject *obj) {
- Type *v = 0;
- int res = (obj ? traits_asptr<Type>::asptr(obj, &v) : SWIG_ERROR);
- if (SWIG_IsOK(res) && v) {
- if (SWIG_IsNewObj(res)) {
- Type r(*v);
- %delete(v);
- return r;
- } else {
- return *v;
- }
- } else {
- if (!PyErr_Occurred()) {
- %type_error(swig::type_name<Type>());
- }
- throw std::invalid_argument("bad type");
- }
- }
- };
-
- template <class Type>
- struct traits_as<Type*, pointer_category> {
- static Type* as(PyObject *obj) {
- Type *v = 0;
- int res = (obj ? traits_asptr<Type>::asptr(obj, &v) : SWIG_ERROR);
- if (SWIG_IsOK(res)) {
- return v;
- } else {
- if (!PyErr_Occurred()) {
- %type_error(swig::type_name<Type>());
- }
- throw std::invalid_argument("bad type");
- }
- }
- };
-
- template <class Type>
- inline Type as(PyObject *obj) {
- return traits_as<Type, typename traits<Type>::category>::as(obj);
- }
-
- template <class Type>
- struct traits_check<Type, value_category> {
- static bool check(PyObject *obj) {
- int res = obj ? asval(obj, (Type *)(0)) : SWIG_ERROR;
- return SWIG_IsOK(res) ? true : false;
- }
- };
-
- template <class Type>
- struct traits_check<Type, pointer_category> {
- static bool check(PyObject *obj) {
- int res = obj ? asptr(obj, (Type **)(0)) : SWIG_ERROR;
- return SWIG_IsOK(res) ? true : false;
- }
- };
-
- template <class Type>
- inline bool check(PyObject *obj) {
- return traits_check<Type, typename traits<Type>::category>::check(obj);
- }
-}
-}
-
-//
-// Backward compatibility
-//
-
-#ifdef SWIG_PYTHON_BACKWARD_COMP
-%fragment("<string>");
-%{
-PyObject* SwigInt_FromBool(bool b) {
- return PyInt_FromLong(b ? 1L : 0L);
-}
-double SwigNumber_Check(PyObject* o) {
- return PyFloat_Check(o) || PyInt_Check(o) || PyLong_Check(o);
-}
-double SwigNumber_AsDouble(PyObject* o) {
- return PyFloat_Check(o) ? PyFloat_AsDouble(o)
- : (PyInt_Check(o) ? double(PyInt_AsLong(o))
- : double(PyLong_AsLong(o)));
-}
-PyObject* SwigString_FromString(const std::string& s) {
- return PyString_FromStringAndSize(s.data(),s.size());
-}
-std::string SwigString_AsString(PyObject* o) {
- return std::string(PyString_AsString(o));
-}
-%}
-
-#endif
-
-
-%define %specialize_std_container(Type,Check,As,From)
-%{
-namespace swig {
- template <> struct traits_asval<Type > {
- typedef Type value_type;
- static int asval(PyObject *obj, value_type *val) {
- if (Check(obj)) {
- if (val) *val = As(obj);
- return SWIG_OK;
- }
- return SWIG_ERROR;
- }
- };
- template <> struct traits_from<Type > {
- typedef Type value_type;
- static PyObject *from(const value_type& val) {
- return From(val);
- }
- };
-
- template <>
- struct traits_check<Type, value_category> {
- static int check(PyObject *obj) {
- int res = Check(obj);
- return obj && res ? res : 0;
- }
- };
-}
-%}
-%enddef
-
-
-#define specialize_std_vector(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
-#define specialize_std_list(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
-#define specialize_std_deque(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
-#define specialize_std_set(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
-#define specialize_std_multiset(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
-#define specialize_std_unordered_set(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
-#define specialize_std_unordered_multiset(Type,Check,As,From) %specialize_std_container(%arg(Type),Check,As,From)
diff --git a/contrib/tools/swig/Lib/python/pywstrings.swg b/contrib/tools/swig/Lib/python/pywstrings.swg
deleted file mode 100644
index 0e5a78df5e..0000000000
--- a/contrib/tools/swig/Lib/python/pywstrings.swg
+++ /dev/null
@@ -1,85 +0,0 @@
-/* ------------------------------------------------------------
- * utility methods for wchar_t strings
- * ------------------------------------------------------------ */
-
-%{
-#if PY_VERSION_HEX >= 0x03020000
-# define SWIGPY_UNICODE_ARG(obj) ((PyObject*) (obj))
-#else
-# define SWIGPY_UNICODE_ARG(obj) ((PyUnicodeObject*) (obj))
-#endif
-%}
-
-%fragment("SWIG_AsWCharPtrAndSize","header",fragment="<wchar.h>",fragment="SWIG_pwchar_descriptor") {
-SWIGINTERN int
-SWIG_AsWCharPtrAndSize(PyObject *obj, wchar_t **cptr, size_t *psize, int *alloc)
-{
- PyObject *tmp = 0;
- int isunicode = PyUnicode_Check(obj);
-%#if PY_VERSION_HEX < 0x03000000 && !defined(SWIG_PYTHON_STRICT_UNICODE_WCHAR)
- if (!isunicode && PyString_Check(obj)) {
- tmp = PyUnicode_FromObject(obj);
- if (tmp) {
- isunicode = 1;
- obj = tmp;
- } else {
- PyErr_Clear();
- return SWIG_TypeError;
- }
- }
-%#endif
- if (isunicode) {
-%#if PY_VERSION_HEX >= 0x03030000
- Py_ssize_t len = PyUnicode_GetLength(obj);
-%#else
- Py_ssize_t len = PyUnicode_GetSize(obj);
-%#endif
- if (cptr) {
- Py_ssize_t length;
- *cptr = %new_array(len + 1, wchar_t);
- length = PyUnicode_AsWideChar(SWIGPY_UNICODE_ARG(obj), *cptr, len);
- if (length == -1) {
- PyErr_Clear();
- Py_XDECREF(tmp);
- return SWIG_TypeError;
- }
- (*cptr)[length] = 0;
- }
- if (psize) *psize = (size_t) len + 1;
- if (alloc) *alloc = cptr ? SWIG_NEWOBJ : 0;
- Py_XDECREF(tmp);
- return SWIG_OK;
- } else {
- swig_type_info* pwchar_descriptor = SWIG_pwchar_descriptor();
- if (pwchar_descriptor) {
- void * vptr = 0;
- if (SWIG_ConvertPtr(obj, &vptr, pwchar_descriptor, 0) == SWIG_OK) {
- if (cptr) *cptr = (wchar_t *)vptr;
- if (psize) *psize = vptr ? (wcslen((wchar_t *)vptr) + 1) : 0;
- return SWIG_OK;
- }
- }
- }
- return SWIG_TypeError;
-}
-}
-
-%fragment("SWIG_FromWCharPtrAndSize","header",fragment="<wchar.h>",fragment="SWIG_pwchar_descriptor") {
-SWIGINTERNINLINE PyObject *
-SWIG_FromWCharPtrAndSize(const wchar_t * carray, size_t size)
-{
- if (carray) {
- if (size > INT_MAX) {
- swig_type_info* pwchar_descriptor = SWIG_pwchar_descriptor();
- return pwchar_descriptor ?
- SWIG_InternalNewPointerObj(%const_cast(carray,wchar_t *), pwchar_descriptor, 0) : SWIG_Py_Void();
- } else {
- return PyUnicode_FromWideChar(carray, %numeric_cast(size, Py_ssize_t));
- }
- } else {
- return SWIG_Py_Void();
- }
-}
-}
-
-
diff --git a/contrib/tools/swig/Lib/python/std_alloc.i b/contrib/tools/swig/Lib/python/std_alloc.i
deleted file mode 100644
index 35dc051bea..0000000000
--- a/contrib/tools/swig/Lib/python/std_alloc.i
+++ /dev/null
@@ -1 +0,0 @@
-%include <std/std_alloc.i>
diff --git a/contrib/tools/swig/Lib/python/std_char_traits.i b/contrib/tools/swig/Lib/python/std_char_traits.i
deleted file mode 100644
index bf4e6c47dd..0000000000
--- a/contrib/tools/swig/Lib/python/std_char_traits.i
+++ /dev/null
@@ -1 +0,0 @@
-%include <std/std_char_traits.i>
diff --git a/contrib/tools/swig/Lib/python/std_common.i b/contrib/tools/swig/Lib/python/std_common.i
deleted file mode 100644
index 605766238b..0000000000
--- a/contrib/tools/swig/Lib/python/std_common.i
+++ /dev/null
@@ -1,74 +0,0 @@
-%include <std/std_except.i>
-%include <pystdcommon.swg>
-
-
-/*
- Generate the traits for a 'primitive' type, such as 'double',
- for which the SWIG_AsVal and SWIG_From methods are already defined.
-*/
-
-%define %traits_ptypen(Type...)
- %fragment(SWIG_Traits_frag(Type),"header",
- fragment=SWIG_AsVal_frag(Type),
- fragment=SWIG_From_frag(Type),
- fragment="StdTraits") {
-namespace swig {
- template <> struct traits< Type > {
- typedef value_category category;
- static const char* type_name() { return #Type; }
- };
- template <> struct traits_asval< Type > {
- typedef Type value_type;
- static int asval(PyObject *obj, value_type *val) {
- return SWIG_AsVal(Type)(obj, val);
- }
- };
- template <> struct traits_from< Type > {
- typedef Type value_type;
- static PyObject *from(const value_type& val) {
- return SWIG_From(Type)(val);
- }
- };
-}
-}
-%enddef
-
-/* Traits for enums. This is bit of a sneaky trick needed because a generic template specialization of enums
- is not possible (unless using template meta-programming which SWIG doesn't support because of the explicit
- instantiations required using %template). The STL containers define the 'front' method and the typemap
- below is used whenever the front method is wrapped returning an enum. This typemap simply picks up the
- standard enum typemap, but additionally drags in a fragment containing the traits_asval and traits_from
- required in the generated code for enums. */
-
-%define %traits_enum(Type...)
- %fragment("SWIG_Traits_enum_"{Type},"header",
- fragment=SWIG_AsVal_frag(int),
- fragment=SWIG_From_frag(int),
- fragment="StdTraits") {
-namespace swig {
- template <> struct traits_asval< Type > {
- typedef Type value_type;
- static int asval(PyObject *obj, value_type *val) {
- return SWIG_AsVal(int)(obj, (int *)val);
- }
- };
- template <> struct traits_from< Type > {
- typedef Type value_type;
- static PyObject *from(const value_type& val) {
- return SWIG_From(int)((int)val);
- }
- };
-}
-}
-%typemap(out, fragment="SWIG_Traits_enum_"{Type}) const enum SWIGTYPE& front %{$typemap(out, const enum SWIGTYPE&)%}
-%enddef
-
-
-%include <std/std_common.i>
-
-//
-// Generates the traits for all the known primitive
-// C++ types (int, double, ...)
-//
-%apply_cpptypes(%traits_ptypen);
-
diff --git a/contrib/tools/swig/Lib/python/std_container.i b/contrib/tools/swig/Lib/python/std_container.i
deleted file mode 100644
index d24c1570f1..0000000000
--- a/contrib/tools/swig/Lib/python/std_container.i
+++ /dev/null
@@ -1,2 +0,0 @@
-%include <pycontainer.swg>
-%include <std/std_container.i>
diff --git a/contrib/tools/swig/Lib/python/std_except.i b/contrib/tools/swig/Lib/python/std_except.i
deleted file mode 100644
index af98428f65..0000000000
--- a/contrib/tools/swig/Lib/python/std_except.i
+++ /dev/null
@@ -1 +0,0 @@
-%include <typemaps/std_except.swg>
diff --git a/contrib/tools/swig/Lib/python/std_string.i b/contrib/tools/swig/Lib/python/std_string.i
deleted file mode 100644
index dc1378ae6d..0000000000
--- a/contrib/tools/swig/Lib/python/std_string.i
+++ /dev/null
@@ -1 +0,0 @@
-%include <typemaps/std_string.swg>
diff --git a/contrib/tools/swig/Lib/python/std_vector.i b/contrib/tools/swig/Lib/python/std_vector.i
deleted file mode 100644
index 2ac41a54d7..0000000000
--- a/contrib/tools/swig/Lib/python/std_vector.i
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- Vectors
-*/
-
-%fragment("StdVectorTraits","header",fragment="StdSequenceTraits")
-%{
- namespace swig {
- template <class T>
- struct traits_reserve<std::vector<T> > {
- static void reserve(std::vector<T> &seq, typename std::vector<T>::size_type n) {
- seq.reserve(n);
- }
- };
-
- template <class T>
- struct traits_asptr<std::vector<T> > {
- static int asptr(PyObject *obj, std::vector<T> **vec) {
- return traits_asptr_stdseq<std::vector<T> >::asptr(obj, vec);
- }
- };
-
- template <class T>
- struct traits_from<std::vector<T> > {
- static PyObject *from(const std::vector<T>& vec) {
- return traits_from_stdseq<std::vector<T> >::from(vec);
- }
- };
- }
-%}
-
-#define %swig_vector_methods(Type...) %swig_sequence_methods(Type)
-#define %swig_vector_methods_val(Type...) %swig_sequence_methods_val(Type);
-
-%include <std/std_vector.i>
diff --git a/contrib/tools/swig/Lib/python/wchar.i b/contrib/tools/swig/Lib/python/wchar.i
deleted file mode 100644
index 308139a3ab..0000000000
--- a/contrib/tools/swig/Lib/python/wchar.i
+++ /dev/null
@@ -1,21 +0,0 @@
-#ifdef __cplusplus
-
-%{
-#include <cwchar>
-%}
-
-#else
-
-%{
-#include <wchar.h>
-%}
-
-#endif
-
-%types(wchar_t *);
-%include <pywstrings.swg>
-
-/*
- Enable swig wchar support.
-*/
-#define SWIG_WCHAR
diff --git a/contrib/tools/swig/Lib/std/std_alloc.i b/contrib/tools/swig/Lib/std/std_alloc.i
deleted file mode 100644
index e460dc3eaf..0000000000
--- a/contrib/tools/swig/Lib/std/std_alloc.i
+++ /dev/null
@@ -1,77 +0,0 @@
-namespace std
-{
- /**
- * @brief The "standard" allocator, as per [20.4].
- *
- * The private _Alloc is "SGI" style. (See comments at the top
- * of stl_alloc.h.)
- *
- * The underlying allocator behaves as follows.
- * - __default_alloc_template is used via two typedefs
- * - "__single_client_alloc" typedef does no locking for threads
- * - "__alloc" typedef is threadsafe via the locks
- * - __new_alloc is used for memory requests
- *
- * (See @link Allocators allocators info @endlink for more.)
- */
- template<typename _Tp>
- class allocator
- {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef _Tp* pointer;
- typedef const _Tp* const_pointer;
- typedef _Tp& reference;
- typedef const _Tp& const_reference;
- typedef _Tp value_type;
-
- template<typename _Tp1>
- struct rebind;
-
- allocator() throw();
-
- allocator(const allocator& other) throw();
- template<typename _Tp1>
- allocator(const allocator<_Tp1>& other) throw();
- ~allocator() throw();
-
-
- pointer
- address(reference __x) const;
-
-
- const_pointer
- address(const_reference __x) const;
-
-
- // NB: __n is permitted to be 0. The C++ standard says nothing
- // about what the return value is when __n == 0.
- _Tp*
- allocate(size_type __n, const void* = 0);
-
- // __p is not permitted to be a null pointer.
- void
- deallocate(pointer __p, size_type __n);
-
- size_type
- max_size() const throw();
-
- void construct(pointer __p, const _Tp& __val);
- void destroy(pointer __p);
- };
-
- template<>
- class allocator<void>
- {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef void* pointer;
- typedef const void* const_pointer;
- typedef void value_type;
-
- template<typename _Tp1>
- struct rebind;
- };
-} // namespace std
diff --git a/contrib/tools/swig/Lib/std/std_basic_string.i b/contrib/tools/swig/Lib/std/std_basic_string.i
deleted file mode 100644
index e95cb47653..0000000000
--- a/contrib/tools/swig/Lib/std/std_basic_string.i
+++ /dev/null
@@ -1,276 +0,0 @@
-%include <exception.i>
-%include <std_container.i>
-%include <std_alloc.i>
-%include <std_char_traits.i>
-
-%fragment("<string>");
-
-namespace std
-{
- %naturalvar basic_string;
-}
-
-
-namespace std {
-
- template <class _CharT, class _Traits = char_traits<_CharT>, typename _Alloc = allocator<_CharT> >
- class basic_string
- {
-#if !defined(SWIG_STD_MODERN_STL) || defined(SWIG_STD_NOMODERN_STL)
- %ignore push_back;
- %ignore clear;
- %ignore compare;
- %ignore append;
-#endif
-
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef _CharT value_type;
- typedef value_type reference;
- typedef value_type const_reference;
- typedef _Alloc allocator_type;
-
- static const size_type npos;
-
-#ifdef SWIG_EXPORT_ITERATOR_METHODS
- class iterator;
- class reverse_iterator;
- class const_iterator;
- class const_reverse_iterator;
-#endif
-
-
- %traits_swigtype(_CharT);
- %fragment(SWIG_Traits_frag(_CharT));
-
-
- basic_string(const _CharT* __s, size_type __n);
-
- // Capacity:
-
- size_type length() const;
-
- size_type max_size() const;
-
- size_type capacity() const;
-
- void reserve(size_type __res_arg);
- %extend {
- void shrink_to_fit() {
- %#if __cplusplus >= 202002L
- self->shrink_to_fit();
- %#else
- self->reserve();
- %#endif
- }
- }
-
-
- // Modifiers:
-
- basic_string&
- append(const basic_string& __str);
-
- basic_string&
- append(const basic_string& __str, size_type __pos, size_type __n);
-
- basic_string&
- append(const _CharT* __s, size_type __n);
-
- basic_string&
- append(size_type __n, _CharT __c);
-
- basic_string&
- assign(const basic_string& __str);
-
- basic_string&
- assign(const basic_string& __str, size_type __pos, size_type __n);
-
- basic_string&
- assign(const _CharT* __s, size_type __n);
-
- basic_string&
- insert(size_type __pos1, const basic_string& __str);
-
- basic_string&
- insert(size_type __pos1, const basic_string& __str,
- size_type __pos2, size_type __n);
-
- basic_string&
- insert(size_type __pos, const _CharT* __s, size_type __n);
-
- basic_string&
- insert(size_type __pos, size_type __n, _CharT __c);
-
- basic_string&
- erase(size_type __pos = 0, size_type __n = npos);
-
- basic_string&
- replace(size_type __pos, size_type __n, const basic_string& __str);
-
- basic_string&
- replace(size_type __pos1, size_type __n1, const basic_string& __str,
- size_type __pos2, size_type __n2);
-
- basic_string&
- replace(size_type __pos, size_type __n1, const _CharT* __s,
- size_type __n2);
-
- basic_string&
- replace(size_type __pos, size_type __n1, size_type __n2, _CharT __c);
-
-
- size_type
- copy(_CharT* __s, size_type __n, size_type __pos = 0) const;
-
- // String operations:
- const _CharT* c_str() const;
-
- size_type
- find(const _CharT* __s, size_type __pos, size_type __n) const;
-
- size_type
- find(const basic_string& __str, size_type __pos = 0) const;
-
- size_type
- find(_CharT __c, size_type __pos = 0) const;
-
- size_type
- rfind(const basic_string& __str, size_type __pos = npos) const;
-
- size_type
- rfind(const _CharT* __s, size_type __pos, size_type __n) const;
-
- size_type
- rfind(_CharT __c, size_type __pos = npos) const;
-
- size_type
- find_first_of(const basic_string& __str, size_type __pos = 0) const;
-
- size_type
- find_first_of(const _CharT* __s, size_type __pos, size_type __n) const;
-
- size_type
- find_first_of(_CharT __c, size_type __pos = 0) const;
-
- size_type
- find_last_of(const basic_string& __str, size_type __pos = npos) const;
-
- size_type
- find_last_of(const _CharT* __s, size_type __pos, size_type __n) const;
-
- size_type
- find_last_of(_CharT __c, size_type __pos = npos) const;
-
- size_type
- find_first_not_of(const basic_string& __str, size_type __pos = 0) const;
-
- size_type
- find_first_not_of(const _CharT* __s, size_type __pos,
- size_type __n) const;
-
- size_type
- find_first_not_of(_CharT __c, size_type __pos = 0) const;
-
- size_type
- find_last_not_of(const basic_string& __str, size_type __pos = npos) const;
-
- size_type
- find_last_not_of(const _CharT* __s, size_type __pos,
- size_type __n) const;
-
- size_type
- find_last_not_of(_CharT __c, size_type __pos = npos) const;
-
- basic_string
- substr(size_type __pos = 0, size_type __n = npos) const;
-
- int
- compare(const basic_string& __str) const;
-
- int
- compare(size_type __pos, size_type __n, const basic_string& __str) const;
-
- int
- compare(size_type __pos1, size_type __n1, const basic_string& __str,
- size_type __pos2, size_type __n2) const;
-
-
- %ignore pop_back();
- %ignore front() const;
- %ignore back() const;
- %ignore basic_string(size_type n);
- %std_sequence_methods_val(basic_string);
-
-
- %ignore pop();
-
-
-#ifdef %swig_basic_string
- // Add swig/language extra methods
- %swig_basic_string(std::basic_string< _CharT, _Traits, _Alloc >);
-#endif
-
-#ifdef SWIG_EXPORT_ITERATOR_METHODS
-
-
- class iterator;
- class reverse_iterator;
- class const_iterator;
- class const_reverse_iterator;
-
-
- void
- insert(iterator __p, size_type __n, _CharT __c);
-
- basic_string&
- replace(iterator __i1, iterator __i2, const basic_string& __str);
-
- basic_string&
- replace(iterator __i1, iterator __i2, const _CharT* __s, size_type __n);
-
- basic_string&
- replace(iterator __i1, iterator __i2, size_type __n, _CharT __c);
-
-
- basic_string&
- replace(iterator __i1, iterator __i2, const _CharT* __k1, const _CharT* __k2);
-
- basic_string&
- replace(iterator __i1, iterator __i2, const_iterator __k1, const_iterator __k2);
-#endif
-
- basic_string& operator +=(const basic_string& v);
-
- %newobject __add__;
- %newobject __radd__;
- %extend {
-
- std::basic_string< _CharT,_Traits,_Alloc >* __add__(const basic_string& v) {
- std::basic_string< _CharT,_Traits,_Alloc >* res = new std::basic_string< _CharT,_Traits,_Alloc >(*self);
- *res += v;
- return res;
- }
-
- std::basic_string< _CharT,_Traits,_Alloc >* __radd__(const basic_string& v) {
- std::basic_string< _CharT,_Traits,_Alloc >* res = new std::basic_string< _CharT,_Traits,_Alloc >(v);
- *res += *self;
- return res;
- }
-
- std::basic_string< _CharT,_Traits,_Alloc > __str__() {
- return *self;
- }
-
- std::basic_ostream<_CharT, std::char_traits<_CharT> >&
- __rlshift__(std::basic_ostream<_CharT, std::char_traits<_CharT> >& out) {
- out << *self;
- return out;
- }
- }
-
- };
-}
-
-
diff --git a/contrib/tools/swig/Lib/std/std_char_traits.i b/contrib/tools/swig/Lib/std/std_char_traits.i
deleted file mode 100644
index b9b4def32f..0000000000
--- a/contrib/tools/swig/Lib/std/std_char_traits.i
+++ /dev/null
@@ -1,140 +0,0 @@
-%include <std_common.i>
-#if defined(SWIG_WCHAR)
-%include <wchar.i>
-#endif
-
-namespace std
-{
-
- /// 21.1.2 Basis for explicit _Traits specialization
- /// NB: That for any given actual character type this definition is
- /// probably wrong.
- template<class _CharT>
- struct char_traits
- {
- };
-
-
- /// 21.1.4 char_traits specializations
- template<>
- struct char_traits<char> {
- typedef char char_type;
- typedef int int_type;
- typedef streampos pos_type;
- typedef streamoff off_type;
- typedef mbstate_t state_type;
-
- static void
- assign(char_type& __c1, const char_type& __c2);
-
- static bool
- eq(const char_type& __c1, const char_type& __c2);
-
- static bool
- lt(const char_type& __c1, const char_type& __c2);
-
- static int
- compare(const char_type* __s1, const char_type* __s2, size_t __n);
-
- static size_t
- length(const char_type* __s);
-
- static const char_type*
- find(const char_type* __s, size_t __n, const char_type& __a);
-
- static char_type*
- move(char_type* __s1, const char_type* __s2, size_t __n);
-
- static char_type*
- copy(char_type* __s1, const char_type* __s2, size_t __n);
-
- static char_type*
- assign(char_type* __s, size_t __n, char_type __a);
-
- static char_type
- to_char_type(const int_type& __c);
-
- // To keep both the byte 0xff and the eof symbol 0xffffffff
- // from ending up as 0xffffffff.
- static int_type
- to_int_type(const char_type& __c);
-
- static bool
- eq_int_type(const int_type& __c1, const int_type& __c2);
-
- static int_type
- eof() ;
-
- static int_type
- not_eof(const int_type& __c);
- };
-
-
-#if defined(SWIG_WCHAR)
- template<>
- struct char_traits<wchar_t>
- {
- typedef wchar_t char_type;
- typedef wint_t int_type;
- typedef streamoff off_type;
- typedef wstreampos pos_type;
- typedef mbstate_t state_type;
-
- static void
- assign(char_type& __c1, const char_type& __c2);
-
- static bool
- eq(const char_type& __c1, const char_type& __c2);
-
- static bool
- lt(const char_type& __c1, const char_type& __c2);
-
- static int
- compare(const char_type* __s1, const char_type* __s2, size_t __n);
-
- static size_t
- length(const char_type* __s);
-
- static const char_type*
- find(const char_type* __s, size_t __n, const char_type& __a);
-
- static char_type*
- move(char_type* __s1, const char_type* __s2, int_type __n);
-
- static char_type*
- copy(char_type* __s1, const char_type* __s2, size_t __n);
-
- static char_type*
- assign(char_type* __s, size_t __n, char_type __a);
-
- static char_type
- to_char_type(const int_type& __c) ;
-
- static int_type
- to_int_type(const char_type& __c) ;
-
- static bool
- eq_int_type(const int_type& __c1, const int_type& __c2);
-
- static int_type
- eof() ;
-
- static int_type
- not_eof(const int_type& __c);
- };
-#endif
-}
-
-namespace std {
-#ifndef SWIG_STL_WRAP_TRAITS
-%template() char_traits<char>;
-#if defined(SWIG_WCHAR)
-%template() char_traits<wchar_t>;
-#endif
-#else
-%template(char_traits_c) char_traits<char>;
-#if defined(SWIG_WCHAR)
-%template(char_traits_w) char_traits<wchar_t>;
-#endif
-#endif
-}
diff --git a/contrib/tools/swig/Lib/std/std_common.i b/contrib/tools/swig/Lib/std/std_common.i
deleted file mode 100644
index 708f3ceedf..0000000000
--- a/contrib/tools/swig/Lib/std/std_common.i
+++ /dev/null
@@ -1,250 +0,0 @@
-%include <std/std_except.i>
-
-//
-// Use the following macro with modern STL implementations
-//
-//#define SWIG_STD_MODERN_STL
-//
-// Use this to deactivate the previous definition, when using gcc-2.95
-// or similar old compilers.
-//
-//#define SWIG_STD_NOMODERN_STL
-
-// Here, we identify compilers we know have problems with STL.
-%{
-#if defined(__GNUC__)
-# if __GNUC__ == 2 && __GNUC_MINOR <= 96
-# define SWIG_STD_NOMODERN_STL
-# endif
-#endif
-%}
-
-//
-// Common code for supporting the C++ std namespace
-//
-
-%fragment("<string>");
-%fragment("<stdexcept>");
-%fragment("<stddef.h>");
-
-
-%fragment("StdIteratorTraits","header",fragment="<stddef.h>") %{
-#if defined(__SUNPRO_CC) && defined(_RWSTD_VER)
-# if !defined(SWIG_NO_STD_NOITERATOR_TRAITS_STL)
-# define SWIG_STD_NOITERATOR_TRAITS_STL
-# endif
-#endif
-
-#if !defined(SWIG_STD_NOITERATOR_TRAITS_STL)
-#include <iterator>
-#else
-namespace std {
- template <class Iterator>
- struct iterator_traits {
- typedef ptrdiff_t difference_type;
- typedef typename Iterator::value_type value_type;
- };
-
- template <class Iterator, class Category,class T, class Reference, class Pointer, class Distance>
- struct iterator_traits<__reverse_bi_iterator<Iterator,Category,T,Reference,Pointer,Distance> > {
- typedef Distance difference_type;
- typedef T value_type;
- };
-
- template <class T>
- struct iterator_traits<T*> {
- typedef T value_type;
- typedef ptrdiff_t difference_type;
- };
-
- template<typename _InputIterator>
- inline typename iterator_traits<_InputIterator>::difference_type
- distance(_InputIterator __first, _InputIterator __last)
- {
- typename iterator_traits<_InputIterator>::difference_type __n = 0;
- while (__first != __last) {
- ++__first; ++__n;
- }
- return __n;
- }
-}
-#endif
-%}
-
-%fragment("StdTraitsCommon","header",fragment="<string>") %{
-namespace swig {
- template <class Type>
- struct noconst_traits {
- typedef Type noconst_type;
- };
-
- template <class Type>
- struct noconst_traits<const Type> {
- typedef Type noconst_type;
- };
-
- /*
- type categories
- */
- struct pointer_category { };
- struct value_category { };
-
- /*
- General traits that provides type_name and type_info
- */
- template <class Type> struct traits { };
-
- template <class Type>
- inline const char* type_name() {
- return traits<typename noconst_traits<Type >::noconst_type >::type_name();
- }
-
- template <class Type> struct traits_info {
- static swig_type_info *type_query(std::string name) {
- name += " *";
- return SWIG_TypeQuery(name.c_str());
- }
- static swig_type_info *type_info() {
- static swig_type_info *info = type_query(type_name<Type>());
- return info;
- }
- };
-
- /*
- Partial specialization for pointers (traits_info)
- */
- template <class Type> struct traits_info<Type *> {
- static swig_type_info *type_query(std::string name) {
- name += " *";
- return SWIG_TypeQuery(name.c_str());
- }
- static swig_type_info *type_info() {
- static swig_type_info *info = type_query(type_name<Type>());
- return info;
- }
- };
-
- template <class Type>
- inline swig_type_info *type_info() {
- return traits_info<Type>::type_info();
- }
-
- /*
- Partial specialization for pointers (traits)
- */
- template <class Type> struct traits <Type *> {
- typedef pointer_category category;
- static std::string make_ptr_name(const char* name) {
- std::string ptrname = name;
- ptrname += " *";
- return ptrname;
- }
- static const char* type_name() {
- static std::string name = make_ptr_name(swig::type_name<Type>());
- return name.c_str();
- }
- };
-
- template <class Type, class Category>
- struct traits_as { };
-
- template <class Type, class Category>
- struct traits_check { };
-
-}
-%}
-
-/*
- Generate the traits for a swigtype
-*/
-
-%define %traits_swigtype(Type...)
-%fragment(SWIG_Traits_frag(Type),"header",fragment="StdTraits") {
- namespace swig {
- template <> struct traits< Type > {
- typedef pointer_category category;
- static const char* type_name() { return #Type; }
- };
- }
-}
-%enddef
-
-
-
-/*
- Generate the typemaps for a class that has 'value' traits
-*/
-
-%define %typemap_traits(Code,Type...)
- %typemaps_asvalfrom(%arg(Code),
- %arg(swig::asval< Type >),
- %arg(swig::from),
- %arg(SWIG_Traits_frag(Type)),
- %arg(SWIG_Traits_frag(Type)),
- Type);
-%enddef
-
-/*
- Generate the typemaps for a class that behaves more like a 'pointer' or
- plain wrapped Swigtype.
-*/
-
-%define %typemap_traits_ptr(Code,Type...)
- %typemaps_asptrfrom(%arg(Code),
- %arg(swig::asptr),
- %arg(swig::from),
- %arg(SWIG_Traits_frag(Type)),
- %arg(SWIG_Traits_frag(Type)),
- Type);
-%enddef
-
-
-/*
- Equality methods
-*/
-%define %std_equal_methods(Type...)
-%extend Type {
- bool operator == (const Type& v) {
- return *self == v;
- }
-
- bool operator != (const Type& v) {
- return *self != v;
- }
-}
-
-%enddef
-
-/*
- Order methods
-*/
-
-%define %std_order_methods(Type...)
-%extend Type {
- bool operator > (const Type& v) {
- return *self > v;
- }
-
- bool operator < (const Type& v) {
- return *self < v;
- }
-
- bool operator >= (const Type& v) {
- return *self >= v;
- }
-
- bool operator <= (const Type& v) {
- return *self <= v;
- }
-}
-%enddef
-
-/*
- Comparison methods
-*/
-
-%define %std_comp_methods(Type...)
-%std_equal_methods(Type )
-%std_order_methods(Type )
-%enddef
-
diff --git a/contrib/tools/swig/Lib/std/std_container.i b/contrib/tools/swig/Lib/std/std_container.i
deleted file mode 100644
index 570dfde484..0000000000
--- a/contrib/tools/swig/Lib/std/std_container.i
+++ /dev/null
@@ -1,169 +0,0 @@
-%include <std_common.i>
-%include <exception.i>
-%include <std_alloc.i>
-
-%{
-#include <algorithm>
-%}
-
-// Common non-resizable container methods
-
-%define %std_container_methods_non_resizable(container...)
-
- container();
- container(const container& other);
-
- bool empty() const;
- size_type size() const;
- void swap(container& v);
-
-%enddef
-
-%define %std_container_methods_forward_iterators(container...)
-
- #ifdef SWIG_EXPORT_ITERATOR_METHODS
- class iterator;
- class const_iterator;
- iterator begin();
- iterator end();
- #endif
-
-%enddef
-
-%define %std_container_methods_reverse_iterators(container...)
-
- #ifdef SWIG_EXPORT_ITERATOR_METHODS
- class reverse_iterator;
- class const_reverse_iterator;
- reverse_iterator rbegin();
- reverse_iterator rend();
- #endif
-
-%enddef
-
-// Common container methods
-
-%define %std_container_methods(container...)
-
- %std_container_methods_non_resizable(%arg(container))
- %std_container_methods_forward_iterators(%arg(container))
- %std_container_methods_reverse_iterators(%arg(container))
-
- void clear();
- allocator_type get_allocator() const;
-
-%enddef
-
-%define %std_container_methods_without_reverse_iterators(container...)
-
- %std_container_methods_non_resizable(%arg(container))
- %std_container_methods_forward_iterators(%arg(container))
-
- void clear();
- allocator_type get_allocator() const;
-
-%enddef
-
-// Common sequence
-
-%define %std_sequence_methods_common(sequence)
-
- %std_container_methods(%arg(sequence));
-
- sequence(size_type size);
- void pop_back();
-
- void resize(size_type new_size);
-
- #ifdef SWIG_EXPORT_ITERATOR_METHODS
-%extend {
- // %extend wrapper used for differing definitions of these methods introduced in C++11
- iterator erase(iterator pos) { return $self->erase(pos); }
- iterator erase(iterator first, iterator last) { return $self->erase(first, last); }
-}
- #endif
-
-%enddef
-
-%define %std_sequence_methods_non_resizable(sequence)
-
- %std_container_methods_non_resizable(%arg(sequence))
- %std_container_methods_forward_iterators(%arg(container))
- %std_container_methods_reverse_iterators(%arg(container))
-
- const value_type& front() const;
- const value_type& back() const;
-
-%enddef
-
-%define %std_sequence_methods(sequence)
-
- %std_sequence_methods_common(%arg(sequence));
-
- sequence(size_type size, const value_type& value);
- void push_back(const value_type& x);
-
- const value_type& front() const;
- const value_type& back() const;
-
- void assign(size_type n, const value_type& x);
- void resize(size_type new_size, const value_type& x);
-
- #ifdef SWIG_EXPORT_ITERATOR_METHODS
-%extend {
- // %extend wrapper used for differing definitions of these methods introduced in C++11
- iterator insert(iterator pos, const value_type& x) { return $self->insert(pos, x); }
- void insert(iterator pos, size_type n, const value_type& x) { $self->insert(pos, n, x); }
-}
- #endif
-
-%enddef
-
-%define %std_sequence_methods_non_resizable_val(sequence...)
-
- %std_container_methods_non_resizable(%arg(sequence))
- %std_container_methods_forward_iterators(%arg(container))
- %std_container_methods_reverse_iterators(%arg(container))
-
- value_type front() const;
- value_type back() const;
-
-#endif
-
-%enddef
-
-%define %std_sequence_methods_val(sequence...)
-
- %std_sequence_methods_common(%arg(sequence));
-
- sequence(size_type size, value_type value);
- void push_back(value_type x);
-
- value_type front() const;
- value_type back() const;
-
- void assign(size_type n, value_type x);
- void resize(size_type new_size, value_type x);
-
- #ifdef SWIG_EXPORT_ITERATOR_METHODS
-%extend {
- // %extend wrapper used for differing definitions of these methods introduced in C++11
- iterator insert(iterator pos, value_type x) { return $self->insert(pos, x); }
- void insert(iterator pos, size_type n, value_type x) { $self->insert(pos, n, x); }
-}
- #endif
-
-%enddef
-
-
-//
-// Ignore member methods for Type with no default constructor
-//
-%define %std_nodefconst_type(Type...)
-%feature("ignore") std::vector< Type >::vector(size_type size);
-%feature("ignore") std::vector< Type >::resize(size_type size);
-%feature("ignore") std::deque< Type >::deque(size_type size);
-%feature("ignore") std::deque< Type >::resize(size_type size);
-%feature("ignore") std::list< Type >::list(size_type size);
-%feature("ignore") std::list< Type >::resize(size_type size);
-%enddef
diff --git a/contrib/tools/swig/Lib/std/std_except.i b/contrib/tools/swig/Lib/std/std_except.i
deleted file mode 100644
index 728b9c8b52..0000000000
--- a/contrib/tools/swig/Lib/std/std_except.i
+++ /dev/null
@@ -1,73 +0,0 @@
-#if defined(SWIGJAVA) || defined(SWIGCSHARP)
-#error "do not use this version of std_except.i"
-#endif
-
-%{
-#include <typeinfo>
-#include <stdexcept>
-%}
-
-#if defined(SWIG_STD_EXCEPTIONS_AS_CLASSES)
-
-namespace std {
- struct exception
- {
- virtual ~exception() throw();
- virtual const char* what() const throw();
- };
-
- struct bad_cast : exception
- {
- };
-
- struct bad_exception : exception
- {
- };
-
- struct logic_error : exception
- {
- logic_error(const string& msg);
- };
-
- struct domain_error : logic_error
- {
- domain_error(const string& msg);
- };
-
- struct invalid_argument : logic_error
- {
- invalid_argument(const string& msg);
- };
-
- struct length_error : logic_error
- {
- length_error(const string& msg);
- };
-
- struct out_of_range : logic_error
- {
- out_of_range(const string& msg);
- };
-
- struct runtime_error : exception
- {
- runtime_error(const string& msg);
- };
-
- struct range_error : runtime_error
- {
- range_error(const string& msg);
- };
-
- struct overflow_error : runtime_error
- {
- overflow_error(const string& msg);
- };
-
- struct underflow_error : runtime_error
- {
- underflow_error(const string& msg);
- };
-}
-
-#endif
diff --git a/contrib/tools/swig/Lib/std/std_string.i b/contrib/tools/swig/Lib/std/std_string.i
deleted file mode 100644
index 35fcdd16c6..0000000000
--- a/contrib/tools/swig/Lib/std/std_string.i
+++ /dev/null
@@ -1,13 +0,0 @@
-%include <std/std_basic_string.i>
-
-/* plain strings */
-
-namespace std
-{
- %std_comp_methods(basic_string<char>);
- %naturalvar string;
- typedef basic_string<char> string;
-}
-
-
-%template(string) std::basic_string<char>;
diff --git a/contrib/tools/swig/Lib/std/std_vector.i b/contrib/tools/swig/Lib/std/std_vector.i
deleted file mode 100644
index b35f03bea2..0000000000
--- a/contrib/tools/swig/Lib/std/std_vector.i
+++ /dev/null
@@ -1,225 +0,0 @@
-//
-// std::vector
-//
-
-%include <std_container.i>
-
-// Vector
-
-%define %std_vector_methods(vector...)
- %std_sequence_methods(vector)
-
- void reserve(size_type n);
- size_type capacity() const;
-%enddef
-
-
-%define %std_vector_methods_val(vector...)
- %std_sequence_methods_val(vector)
-
- void reserve(size_type n);
- size_type capacity() const;
-%enddef
-
-
-// ------------------------------------------------------------------------
-// std::vector
-//
-// The aim of all that follows would be to integrate std::vector with
-// as much as possible, namely, to allow the user to pass and
-// be returned tuples or lists.
-// const declarations are used to guess the intent of the function being
-// exported; therefore, the following rationale is applied:
-//
-// -- f(std::vector<T>), f(const std::vector<T>&):
-// the parameter being read-only, either a sequence or a
-// previously wrapped std::vector<T> can be passed.
-// -- f(std::vector<T>&), f(std::vector<T>*):
-// the parameter may be modified; therefore, only a wrapped std::vector
-// can be passed.
-// -- std::vector<T> f(), const std::vector<T>& f():
-// the vector is returned by copy; therefore, a sequence of T:s
-// is returned which is most easily used in other functions
-// -- std::vector<T>& f(), std::vector<T>* f():
-// the vector is returned by reference; therefore, a wrapped std::vector
-// is returned
-// -- const std::vector<T>* f(), f(const std::vector<T>*):
-// for consistency, they expect and return a plain vector pointer.
-// ------------------------------------------------------------------------
-
-%{
-#include <vector>
-%}
-
-// exported classes
-
-
-namespace std {
-
- template<class _Tp, class _Alloc = allocator< _Tp > >
- class vector {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef _Tp value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
- typedef _Alloc allocator_type;
-
- %traits_swigtype(_Tp);
- %traits_enum(_Tp);
-
- %fragment(SWIG_Traits_frag(std::vector< _Tp, _Alloc >), "header",
- fragment=SWIG_Traits_frag(_Tp),
- fragment="StdVectorTraits") {
- namespace swig {
- template <> struct traits<std::vector< _Tp, _Alloc > > {
- typedef pointer_category category;
- static const char* type_name() {
- return "std::vector<" #_Tp "," #_Alloc " >";
- }
- };
- }
- }
-
- %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector< _Tp, _Alloc >);
-
-#ifdef %swig_vector_methods
- // Add swig/language extra methods
- %swig_vector_methods(std::vector< _Tp, _Alloc >);
-#endif
-
- %std_vector_methods(vector);
- };
-
- // ***
- // This specialization should disappear or get simplified when
- // a 'const SWIGTYPE*&' can be defined
- // ***
- template<class _Tp, class _Alloc >
- class vector< _Tp*, _Alloc > {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef _Tp* value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
- typedef _Alloc allocator_type;
-
- %traits_swigtype(_Tp);
-
- %fragment(SWIG_Traits_frag(std::vector< _Tp*, _Alloc >), "header",
- fragment=SWIG_Traits_frag(_Tp),
- fragment="StdVectorTraits") {
- namespace swig {
- template <> struct traits<std::vector< _Tp*, _Alloc > > {
- typedef value_category category;
- static const char* type_name() {
- return "std::vector<" #_Tp " *," #_Alloc " >";
- }
- };
- }
- }
-
- %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector< _Tp*, _Alloc >);
-
-#ifdef %swig_vector_methods_val
- // Add swig/language extra methods
- %swig_vector_methods_val(std::vector< _Tp*, _Alloc >);
-#endif
-
- %std_vector_methods_val(vector);
- };
-
- // ***
- // const pointer specialization
- // ***
- template<class _Tp, class _Alloc >
- class vector< _Tp const *, _Alloc > {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef _Tp const * value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef const value_type& const_reference;
- typedef _Alloc allocator_type;
-
- %traits_swigtype(_Tp);
-
- %fragment(SWIG_Traits_frag(std::vector< _Tp const*, _Alloc >), "header",
- fragment=SWIG_Traits_frag(_Tp),
- fragment="StdVectorTraits") {
- namespace swig {
- template <> struct traits<std::vector< _Tp const*, _Alloc > > {
- typedef value_category category;
- static const char* type_name() {
- return "std::vector<" #_Tp " const*," #_Alloc " >";
- }
- };
- }
- }
-
- %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector< _Tp const*, _Alloc >);
-
-#ifdef %swig_vector_methods_val
- // Add swig/language extra methods
- %swig_vector_methods_val(std::vector< _Tp const*, _Alloc >);
-#endif
-
- %std_vector_methods_val(vector);
- };
-
- // ***
- // bool specialization
- // ***
-
- template<class _Alloc >
- class vector<bool,_Alloc > {
- public:
- typedef size_t size_type;
- typedef ptrdiff_t difference_type;
- typedef bool value_type;
- typedef value_type* pointer;
- typedef const value_type* const_pointer;
- typedef value_type& reference;
- typedef bool const_reference;
- typedef _Alloc allocator_type;
-
- %traits_swigtype(bool);
-
- %fragment(SWIG_Traits_frag(std::vector<bool, _Alloc >), "header",
- fragment=SWIG_Traits_frag(bool),
- fragment="StdVectorTraits") {
- namespace swig {
- template <> struct traits<std::vector<bool, _Alloc > > {
- typedef value_category category;
- static const char* type_name() {
- return "std::vector<bool, _Alloc >";
- }
- };
- }
- }
-
- %typemap_traits_ptr(SWIG_TYPECHECK_VECTOR, std::vector<bool, _Alloc >);
-
-
-#ifdef %swig_vector_methods_val
- // Add swig/language extra methods
- %swig_vector_methods_val(std::vector<bool, _Alloc >);
-#endif
-
- %std_vector_methods_val(vector);
-
-#if defined(SWIG_STD_MODERN_STL) && !defined(SWIG_STD_NOMODERN_STL)
- void flip();
-#endif
-
- };
-
-}
diff --git a/contrib/tools/swig/Lib/typemaps/std_except.swg b/contrib/tools/swig/Lib/typemaps/std_except.swg
deleted file mode 100644
index 75d066490f..0000000000
--- a/contrib/tools/swig/Lib/typemaps/std_except.swg
+++ /dev/null
@@ -1,37 +0,0 @@
-%include <typemaps/exception.swg>
-
-/*
- Mark all of std exception classes as "exception classes" via
- the "exceptionclass" feature.
-
- If needed, you can disable it by using %noexceptionclass.
-*/
-
-%define %std_exception_map(Exception, Code)
- %exceptionclass Exception;
-#if !defined(SWIG_STD_EXCEPTIONS_AS_CLASSES)
- %typemap(throws,noblock=1) Exception {
- SWIG_exception_fail(Code, $1.what());
- }
- %ignore Exception;
- struct Exception {
- };
-#endif
-%enddef
-
-namespace std {
- %std_exception_map(bad_cast, SWIG_TypeError);
- %std_exception_map(bad_exception, SWIG_SystemError);
- %std_exception_map(domain_error, SWIG_ValueError);
- %std_exception_map(exception, SWIG_SystemError);
- %std_exception_map(invalid_argument, SWIG_ValueError);
- %std_exception_map(length_error, SWIG_IndexError);
- %std_exception_map(logic_error, SWIG_RuntimeError);
- %std_exception_map(out_of_range, SWIG_IndexError);
- %std_exception_map(overflow_error, SWIG_OverflowError);
- %std_exception_map(range_error, SWIG_OverflowError);
- %std_exception_map(runtime_error, SWIG_RuntimeError);
- %std_exception_map(underflow_error, SWIG_OverflowError);
-}
-
-%include <std/std_except.i>
diff --git a/contrib/tools/swig/Lib/typemaps/std_string.swg b/contrib/tools/swig/Lib/typemaps/std_string.swg
deleted file mode 100644
index 5b57beab56..0000000000
--- a/contrib/tools/swig/Lib/typemaps/std_string.swg
+++ /dev/null
@@ -1,25 +0,0 @@
-//
-// String
-//
-
-
-#ifndef SWIG_STD_BASIC_STRING
-#define SWIG_STD_STRING
-
-%include <typemaps/std_strings.swg>
-
-%fragment("<string>");
-
-namespace std
-{
- %naturalvar string;
- class string;
-}
-
-%typemaps_std_string(std::string, char, SWIG_AsCharPtrAndSize, SWIG_FromCharPtrAndSize, %checkcode(STDSTRING));
-
-#else
-
-%include <std/std_string.i>
-
-#endif
diff --git a/contrib/tools/swig/Lib/typemaps/std_strings.swg b/contrib/tools/swig/Lib/typemaps/std_strings.swg
deleted file mode 100644
index e9c23ba915..0000000000
--- a/contrib/tools/swig/Lib/typemaps/std_strings.swg
+++ /dev/null
@@ -1,78 +0,0 @@
-
-/* defining the String asptr/from methods */
-
-%define %std_string_asptr(String, Char, SWIG_AsCharPtrAndSize, Frag)
-%fragment(SWIG_AsPtr_frag(String),"header",fragment=Frag) {
-SWIGINTERN int
-SWIG_AsPtr_dec(String)(SWIG_Object obj, String **val)
-{
- Char* buf = 0 ; size_t size = 0; int alloc = SWIG_OLDOBJ;
- if (SWIG_IsOK((SWIG_AsCharPtrAndSize(obj, &buf, &size, &alloc)))) {
- if (buf) {
- if (val) *val = new String(buf, size - 1);
- if (alloc == SWIG_NEWOBJ) %delete_array(buf);
- return SWIG_NEWOBJ;
- } else {
- if (val) *val = 0;
- return SWIG_OLDOBJ;
- }
- } else {
- static int init = 0;
- static swig_type_info* descriptor = 0;
- if (!init) {
- descriptor = SWIG_TypeQuery(#String " *");
- init = 1;
- }
- if (descriptor) {
- String *vptr;
- int res = SWIG_ConvertPtr(obj, (void**)&vptr, descriptor, 0);
- if (SWIG_IsOK(res) && val) *val = vptr;
- return res;
- }
- }
- return SWIG_ERROR;
-}
-}
-%enddef
-
-%define %std_string_from(String, SWIG_FromCharPtrAndSize, Frag)
-%fragment(SWIG_From_frag(String),"header",fragment=Frag) {
-SWIGINTERNINLINE SWIG_Object
-SWIG_From_dec(String)(const String& s)
-{
- return SWIG_FromCharPtrAndSize(s.data(), s.size());
-}
-}
-%enddef
-
-%define %std_string_asval(String)
-%fragment(SWIG_AsVal_frag(String),"header", fragment=SWIG_AsPtr_frag(String)) {
-SWIGINTERN int
-SWIG_AsVal_dec(String)(SWIG_Object obj, String *val)
-{
- String* v = (String *) 0;
- int res = SWIG_AsPtr(String)(obj, &v);
- if (!SWIG_IsOK(res)) return res;
- if (v) {
- if (val) *val = *v;
- if (SWIG_IsNewObj(res)) {
- %delete(v);
- res = SWIG_DelNewMask(res);
- }
- return res;
- }
- return SWIG_ERROR;
-}
-}
-%enddef
-
-
-%define %typemaps_std_string(String, Char, AsPtrMethod, FromMethod, CheckCode)
-
-%std_string_asptr(String, Char, AsPtrMethod, #AsPtrMethod)
-%std_string_asval(String)
-%std_string_from(String, FromMethod, #FromMethod)
-
-%typemaps_asptrfromn(%arg(CheckCode), String);
-
-%enddef
diff --git a/contrib/tools/swig/Lib/wchar.i b/contrib/tools/swig/Lib/wchar.i
deleted file mode 100644
index 14de346346..0000000000
--- a/contrib/tools/swig/Lib/wchar.i
+++ /dev/null
@@ -1,11 +0,0 @@
-/* -----------------------------------------------------------------------------
- * wchar.i
- * ----------------------------------------------------------------------------- */
-
-/*
- wchar_t not supported, unless otherwise specified in the target language.
-*/
-
-#if defined(SWIG_WCHAR)
-#undef SWIG_WCHAR
-#endif
diff --git a/devtools/dummy_arcadia/hello_world/hello.cpp b/devtools/dummy_arcadia/hello_world/hello.cpp
deleted file mode 100644
index 72e4b6f4a2..0000000000
--- a/devtools/dummy_arcadia/hello_world/hello.cpp
+++ /dev/null
@@ -1,3 +0,0 @@
-int main() {
- return 0;
-}
diff --git a/devtools/dummy_arcadia/hello_world/ya.make b/devtools/dummy_arcadia/hello_world/ya.make
deleted file mode 100644
index 691c88f86c..0000000000
--- a/devtools/dummy_arcadia/hello_world/ya.make
+++ /dev/null
@@ -1,15 +0,0 @@
-PROGRAM()
-
-SUBSCRIBER(
- spreis
- g:arcadia-devtools
-)
-
-ALLOCATOR(FAKE)
-NO_PLATFORM()
-
-SRCS(
- hello.cpp
-)
-
-END()
diff --git a/devtools/ya/chameleon_bin/__main__.py b/devtools/ya/chameleon_bin/__main__.py
deleted file mode 100644
index f79b940fdb..0000000000
--- a/devtools/ya/chameleon_bin/__main__.py
+++ /dev/null
@@ -1,59 +0,0 @@
-import logging
-import os
-import shutil
-from library.python.testing.recipe import declare_recipe
-import yatest.common
-
-
-class ChameleonRecipe:
- def __init__(self):
- self.bin2_root: str = None
- self.bin3_root: str = None
- self.bin2: str = None
- self.bin3: str = None
-
- def post_init(self):
- self.bin2_root = yatest.common.build_path("devtools/ya/bin")
- self.bin3_root = yatest.common.build_path("devtools/ya/bin3")
- self.bin2 = os.path.join(self.bin2_root, "ya-bin")
- self.bin3 = os.path.join(self.bin3_root, "ya-bin")
-
- logging.info("ya-bin2 exists: %s", os.path.exists(self.bin2))
- logging.info("ya-bin3 exists: %s", os.path.exists(self.bin3))
-
- @classmethod
- def check_argv_py3(cls, argv):
- return argv[0] == "yes"
-
- def start(self, argv):
- self.post_init()
-
- logging.info("args: %s", argv)
-
- assert not (os.path.exists(self.bin2) and os.path.exists(self.bin3)), "Only one version of ya-bin can be in DEPENDS"
-
- if self.check_argv_py3(argv):
- if not os.path.exists(self.bin2):
- if not os.path.exists(self.bin2_root):
- os.mkdir(self.bin2_root)
- logging.debug("Create bin2 root folder: %s", self.bin2_root)
- else:
- logging.debug("Maybe test was restarted")
- shutil.copy(self.bin3, self.bin2)
- logging.debug("copy ya-bin3 to bin2 root: %s -> %s", self.bin3, self.bin2)
-
-
- def stop(self, argv):
- self.post_init()
-
- logging.info("args: %s", argv)
-
- if self.check_argv_py3(argv):
- if os.path.exists(self.bin2):
- logging.debug("Remove bin2 executable: %s", self.bin2)
- # TOFIX v-korovin
- os.remove(self.bin2)
-
-if __name__ == "__main__":
- recipe = ChameleonRecipe()
- declare_recipe(recipe.start, recipe.stop)
diff --git a/devtools/ya/chameleon_bin/recipe.inc b/devtools/ya/chameleon_bin/recipe.inc
deleted file mode 100644
index 4e705cc996..0000000000
--- a/devtools/ya/chameleon_bin/recipe.inc
+++ /dev/null
@@ -1,32 +0,0 @@
-DEPENDS(devtools/ya/chameleon_bin)
-
-INCLUDE(${ARCADIA_ROOT}/devtools/ya/opensource.inc)
-
-IF (PYTHON3)
- DEPENDS(devtools/ya/bin3)
-ELSE()
- IF (NOT OPENSOURCE)
- DEPENDS(devtools/ya/bin)
- ENDIF()
-ENDIF()
-
-IF (NEBIUS)
- DATA(
- arcadia/nebius/devtools/ya-bin/ya.conf
- )
-ELSEIF (OPENSOURCE)
- DATA(
- arcadia/devtools/ya/opensource/ya.conf
- )
-ELSE()
- DATA(
- arcadia/ya.conf
- )
-ENDIF()
-
-DATA(
- arcadia/ya
-)
-
-USE_RECIPE(devtools/ya/chameleon_bin/chameleon_bin ${PYTHON3})
-
diff --git a/devtools/ya/chameleon_bin/ya.make b/devtools/ya/chameleon_bin/ya.make
deleted file mode 100644
index 3b418c2651..0000000000
--- a/devtools/ya/chameleon_bin/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-PY3_PROGRAM()
-
-PY_SRCS(__main__.py)
-
-PEERDIR(
- library/python/testing/recipe
- library/python/testing/yatest_common
-)
-
-NO_LINT()
-
-END()
diff --git a/devtools/ya/handlers/dump/__init__.py b/devtools/ya/handlers/dump/__init__.py
deleted file mode 100644
index 77eb2f45d2..0000000000
--- a/devtools/ya/handlers/dump/__init__.py
+++ /dev/null
@@ -1,1161 +0,0 @@
-from __future__ import absolute_import
-from __future__ import print_function
-import csv
-import pickle
-import exts.yjson as json
-import logging
-import os
-import sys
-import enum
-import six
-
-from build.build_facade import (
- gen_all_loops,
- gen_dir_loops,
- gen_graph,
- gen_licenses_list,
- gen_forced_deps_list,
- gen_modules,
- gen_module_info,
- gen_plan,
- gen_json_graph,
- gen_uids,
- gen_srcdeps,
- gen_dependencies,
- gen_test_dart,
- gen_conf,
- gen_filelist,
- gen_relation,
-)
-from handlers.dump.gen_conf_docs import dump_mmm_docs
-from build.dir_graph import reachable, gen_dir_graph
-from build.compilation_database import dump_compilation_database, COMPILATION_DATABASE_OPTS
-from exts.strtobool import strtobool
-from exts.tmp import temp_dir
-from core.yarg import (
- CompositeHandler,
- OptsHandler,
- ArgConsumer,
- FreeArgConsumer,
- SetValueHook,
- Options,
- SetConstValueHook,
- SetConstAppendHook,
- UpdateValueHook,
- SetAppendHook,
- ExtendHook,
- ArgsValidatingException,
- Group,
-)
-from yalibrary.vcs import vcsversion
-from core.imprint import imprint
-from build.build_opts import (
- YMakeDebugOptions,
- YMakeBinOptions,
- FlagsOptions,
- CustomFetcherOptions,
- ToolsOptions,
- SandboxAuthOptions,
- JavaSpecificOptions,
-)
-from build.build_opts import (
- BuildTypeOptions,
- BuildTargetsOptions,
- ShowHelpOptions,
- CustomBuildRootOptions,
- ContinueOnFailOptions,
-)
-from build.build_opts import YMakeRetryOptions, ConfigurationPresetsOptions
-from core.common_opts import CrossCompilationOptions, YaBin3Options
-from test.explore import generate_tests_by_dart
-from devtools.ya.test.dartfile import decode_recipe_cmdline
-
-import app
-import app_config
-
-logger = logging.getLogger(__name__)
-
-
-class DumpYaHandler(CompositeHandler):
- common_opts = [ShowHelpOptions()]
-
- @staticmethod
- def common_build_facade_opts(with_free_targets=True):
- return [
- FlagsOptions(),
- CustomFetcherOptions(),
- SandboxAuthOptions(),
- YMakeBinOptions(),
- YMakeRetryOptions(),
- CrossCompilationOptions(),
- BuildTypeOptions('release'),
- ContinueOnFailOptions(),
- BuildTargetsOptions(with_free=with_free_targets),
- ToolsOptions(),
- ConfigurationPresetsOptions(),
- YaBin3Options(),
- ]
-
- def __init__(self):
- CompositeHandler.__init__(self, description='Repository related information')
- self['modules'] = OptsHandler(
- action=app.execute(action=do_modules),
- description='All modules',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- DepFilterOptions(),
- PeerDirectoriesOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['module-info'] = OptsHandler(
- action=app.execute(action=do_module_info),
- description='Modules info',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- DepFilterOptions(),
- PeerDirectoriesOptions(),
- DataOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- DumpModuleInfoOptions(),
- ],
- )
- self['src-deps'] = OptsHandler(
- action=app.execute(action=do_dump_srcdeps),
- description='Dump of all source dependencies',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DumpSrcDepsOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['dir-graph'] = OptsHandler(
- action=app.execute(action=do_dir_graph),
- description='Dependencies between directories',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DumpDirOptions(),
- SplitByTypeOptions(),
- DumpTestListOptions(),
- DepTraverseOptions(),
- DepFilterOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- LegacyDepsOptions(),
- ],
- )
- self['dep-graph'] = OptsHandler(
- action=app.execute(action=do_dep_graph),
- description='Dependency internal graph',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepGraphOutputOptions(),
- DepTraverseOptions(),
- DepFilterOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- LegacyDepsOptions(),
- ],
- )
- self['dot-graph'] = OptsHandler(
- action=app.execute(action=do_dot_graph),
- description='Dependency between directories in dot format',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- DepFilterOptions(),
- PeerDirectoriesOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['json-dep-graph'] = OptsHandler(
- action=app.execute(action=do_json_dep_graph),
- description='Dependency graph as json',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- DepFilterOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- LegacyDepsOptions(),
- ],
- )
- self['build-plan'] = OptsHandler(
- action=app.execute(action=do_build_plan),
- description='Build plan',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- CustomBuildRootOptions(),
- YMakeDebugOptions(),
- ],
- )
- self['compile-commands'] = OptsHandler(
- action=app.execute(action=dump_compilation_database),
- description='JSON compilation database',
- opts=COMPILATION_DATABASE_OPTS + [ToolsOptions(), CustomFetcherOptions(), SandboxAuthOptions()],
- )
- self['compilation-database'] = OptsHandler(
- action=app.execute(action=dump_compilation_database),
- description='Alias for compile-commands',
- opts=COMPILATION_DATABASE_OPTS + [ToolsOptions(), CustomFetcherOptions(), SandboxAuthOptions()],
- )
- self['relation'] = OptsHandler(
- action=app.execute(action=do_relation),
- description='PEERDIR relations. Please don\'t run from the arcadia root.',
- opts=self.common_opts
- + self.common_build_facade_opts(False)
- + [
- RelationSrcDstOptions(free_dst=True),
- DepTraverseOptions(),
- DepFilterOptions(),
- PeerDirectoriesOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['all-relations'] = OptsHandler(
- action=app.execute(action=do_all_relations),
- description='All relations between internal graph nodes in dot format. Please don\'t run from the arcadia root.',
- opts=self.common_opts
- + self.common_build_facade_opts(False)
- + [
- RelationSrcDstOptions(free_dst=True),
- DumpAllRelationsOptions(),
- DepTraverseOptions(),
- DepFilterOptions(),
- PeerDirectoriesOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['files'] = OptsHandler(
- action=app.execute(action=do_files),
- description='File list',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- FilesFilterOptions(),
- DepTraverseOptions(),
- DepFilterOptions(),
- DataOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['loops'] = OptsHandler(
- action=app.execute(action=do_loops),
- description='All loops in arcadia',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['peerdir-loops'] = OptsHandler(
- action=app.execute(action=do_peerdir_loops),
- description='Loops by peerdirs',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- DepTraverseOptions(),
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- ],
- )
- self['imprint'] = OptsHandler(
- action=app.execute(action=do_imprint),
- description='Directory imprint',
- opts=self.common_opts + [CustomFetcherOptions(), SandboxAuthOptions(), ToolsOptions()],
- )
- self['uids'] = OptsHandler(
- action=app.execute(action=do_uids),
- description='All targets uids',
- opts=self.common_opts + self.common_build_facade_opts(),
- visible=False,
- )
- self['test-list'] = OptsHandler(
- action=app.execute(action=do_test_list),
- description='All test entries',
- opts=self.common_opts + self.common_build_facade_opts() + [DumpTestListOptions()],
- )
- self['json-test-list'] = OptsHandler(
- action=app.execute(action=do_json_test_list),
- description='All test entries as json',
- opts=self.common_opts + self.common_build_facade_opts() + [DumpTestListOptions()],
- )
- self['conf'] = OptsHandler(
- action=app.execute(action=do_conf),
- description='Print build conf',
- opts=self.common_opts
- + [
- FlagsOptions(),
- CustomFetcherOptions(),
- SandboxAuthOptions(),
- CrossCompilationOptions(),
- ToolsOptions(),
- BuildTypeOptions('release'),
- JavaSpecificOptions(),
- ],
- visible=False,
- )
- self['licenses'] = OptsHandler(
- action=app.execute(action=do_licenses),
- description='Print known licenses grouped by their properties',
- opts=self.common_opts
- + self.common_build_facade_opts()
- + [
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- DumpLicenseOptions(),
- ],
- visible=False,
- )
- self['forced-deps'] = OptsHandler(
- action=app.execute(action=do_forced_deps),
- description='Print known forced dependency management',
- opts=FullForcedDepsOptions(),
- visible=False,
- )
- self['conf-docs'] = OptsHandler(
- action=app.execute(action=do_conf_docs),
- description='Print descriptions of entities (modules, macros, multimodules, etc.)',
- opts=self.common_opts + self.common_build_facade_opts() + [DumpDescriptionOptions()],
- )
- self['root'] = OptsHandler(
- action=app.execute(lambda params: sys.stdout.write(params.arc_root) and 0),
- description='Print Arcadia root',
- opts=self.common_opts,
- )
- self['vcs-info'] = OptsHandler(
- action=app.execute(action=do_dump_vcs_info),
- description='Print VCS revision information.',
- opts=self.common_opts + self.common_build_facade_opts(),
- visible=False,
- )
- self['raw-vcs-info'] = OptsHandler(
- action=app.execute(action=do_dump_raw_vcs_info),
- description='Print VCS revision information.',
- opts=self.common_opts + self.common_build_facade_opts(),
- visible=False,
- )
- self['svn-revision'] = OptsHandler(
- action=app.execute(action=do_dump_svn_revision),
- description='Print SVN revision information.',
- opts=self.common_opts + self.common_build_facade_opts(),
- visible=False,
- )
- self['recipes'] = OptsHandler(
- action=app.execute(action=do_recipes),
- description='All recipes used in tests',
- opts=self.common_opts + self.common_build_facade_opts() + [DumpTestListOptions(), DumpRecipesOptions()],
- )
- if app_config.in_house:
- import devtools.ya.handlers.dump.arcadia_specific as arcadia_specific
- from handlers.dump.debug import debug_handler
-
- self['groups'] = arcadia_specific.GroupsHandler()
- self['atd-revisions'] = OptsHandler(
- action=app.execute(action=arcadia_specific.do_atd_revisions),
- description='Dump revisions of trunk/arcadia_tests_data',
- opts=self.common_opts
- + [DumpAtdRevisionOptions(), CustomFetcherOptions(), SandboxAuthOptions(), ToolsOptions()],
- )
- self['debug'] = debug_handler
-
-
-def _do_dump(gen_func, params, debug_options=[], write_stdout=True, build_root=None, **kwargs):
- for name in 'debug_options', 'filter_opts', 'peerdir_opts':
- if hasattr(params, name):
- debug_options.extend(getattr(params, name))
- logger.debug('abs_targets: %s', params.abs_targets)
- with temp_dir() as tmp:
- res = gen_func(
- build_root=build_root or tmp,
- build_type=params.build_type,
- build_targets=params.abs_targets,
- debug_options=debug_options,
- flags=params.flags,
- ymake_bin=getattr(params, 'ymake_bin', None),
- host_platform=params.host_platform,
- target_platforms=params.target_platforms,
- **kwargs
- )
- if write_stdout:
- sys.stdout.write(res.stdout)
- return res
-
-
-def do_modules(params):
- _do_dump(gen_modules, params)
-
-
-def do_module_info(params, write_stdout=True):
- if params.with_data:
- params.flags['YMAKE_ADD_DATA'] = 'yes'
- return _do_dump(
- gen_func=gen_module_info,
- params=params,
- modules_info_filter=getattr(params, 'modules_info_filter'),
- modules_info_file=getattr(params, 'modules_info_file'),
- write_stdout=write_stdout,
- )
-
-
-def do_dir_graph(params):
- def merge_lists(one, two):
- return sorted(set(one + two))
-
- def get_test_dependencies(deps, arc_root, test, test_data=None):
- data = merge_lists(test_data or [], deps)
- return merge_lists(data, get_canondata_paths(arc_root, test))
-
- if params.dump_deps:
- deps = _do_dump(gen_dependencies, params, params.legacy_deps_opts, write_stdout=False)
- json.dump(json.loads(deps.stdout), sys.stdout, indent=4, sort_keys=True)
- return
-
- if params.dump_reachable_dirs:
- _do_dump(gen_graph, params, params.legacy_deps_opts + ['x', 'M'])
- return
-
- dg = _do_dump(
- gen_dir_graph, params, params.legacy_deps_opts, write_stdout=False, split_by_types=params.split_by_type
- )
-
- tests = get_tests(params)
- for test in tests:
- test_data = test.get_arcadia_test_data()
- if test.project_path in test_data:
- test_data.remove(test.project_path)
- if params.split_by_type:
- if test.project_path not in dg:
- dg[test.project_path] = {}
- dg[test.project_path]["INCLUDE"] = get_test_dependencies(
- dg[test.project_path].get("INCLUDE", []), params.arc_root, test
- )
- dg[test.project_path]["DATA"] = merge_lists(dg[test.project_path].get("DATA", []), test_data)
- else:
- dg[test.project_path] = get_test_dependencies(
- dg.get(test.project_path, []), params.arc_root, test, test_data
- )
-
- if params.trace_from is not None:
- print('\n'.join(reachable(dg, params.trace_from, params.split_by_type)))
-
- elif params.plain_dump:
-
- def get_plain_deps():
- plain_deps = set()
- for k, v in six.iteritems(dg):
- plain_deps.add(k)
- plain_deps |= set(v)
- return sorted(list(plain_deps))
-
- json.dump(get_plain_deps(), sys.stdout, indent=4, sort_keys=True)
- else:
- json.dump(dg, sys.stdout, indent=4, sort_keys=True)
-
-
-def get_canondata_paths(arc_root, test):
- paths = []
- canondata_path = os.path.join(arc_root, test.project_path, "canondata")
- if os.path.exists(canondata_path):
- for root, dirs, files in os.walk(canondata_path):
- paths.append(os.path.relpath(root, arc_root))
- return paths
-
-
-DUMP_OPTS_GROUP = Group("Dump options", 0)
-
-
-class SplitByTypeOptions(Options):
- def __init__(self):
- self.split_by_type = False
-
- @staticmethod
- def consumer():
- return ArgConsumer(
- ['--split'],
- help='split by type',
- hook=SetConstValueHook('split_by_type', True),
- group=DUMP_OPTS_GROUP,
- )
-
-
-class DumpDirOptions(Options):
- def __init__(self):
- self.trace_from = None
- self.plain_dump = False
- self.dump_deps = False
- self.dump_reachable_dirs = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--trace'],
- help='trace from sources',
- hook=SetValueHook('trace_from'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--plain'],
- help='plain dump mode',
- hook=SetConstValueHook('plain_dump', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--dump-deps'],
- help='Dump dependencies (legacy)',
- hook=SetConstValueHook('dump_deps', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--reachable-dirs'],
- help='Dump reachable dirs',
- hook=SetConstValueHook('dump_reachable_dirs', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class PeerDirectoriesOptions(Options):
- def __init__(self):
- self.peerdir_opts = []
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--old-peerdirs'],
- help='Include indirect (module->dir->module) PEERDIRs',
- hook=SetConstAppendHook('peerdir_opts', 'I'),
- group=DUMP_OPTS_GROUP,
- visible=False,
- )
- ]
-
-
-class LegacyDepsOptions(Options):
- def __init__(self):
- self.legacy_deps_opts = ['I']
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--no-legacy-deps'],
- help='Exclude legacy dependencies (inderect PEERDIR relations)',
- hook=SetConstValueHook('legacy_deps_opts', []),
- group=DUMP_OPTS_GROUP,
- )
- ]
-
-
-class DumpSrcDepsOptions(Options):
- def __init__(self):
- self.with_yamake = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--with-yamakes'],
- help='Include ya.make files, that are only used as a build configuration',
- hook=SetConstValueHook('with_yamake', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DepFilterOptions(Options):
- def __init__(self):
- self.filter_opts = []
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--no-tools'],
- help='Exclude tools dependencies',
- hook=SetConstAppendHook('filter_opts', 'V'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--no-addincls'],
- help='Exclude ADDINCLs dependencies',
- hook=SetConstAppendHook('filter_opts', 'T'),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DepGraphOutputType(enum.Enum):
- CUSTOM = 0
- FLAT_JSON = 1
- FLAT_JSON_FILES = 2
-
-
-class DepGraphOutputOptions(Options):
- def __init__(self):
- self.output_type = DepGraphOutputType.CUSTOM
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--flat-json'],
- help='Dump dep-graph in flat json format',
- hook=SetConstValueHook('output_type', DepGraphOutputType.FLAT_JSON),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--flat-json-files'],
- help='Dump dep-graph in flat json format without commands',
- hook=SetConstValueHook('output_type', DepGraphOutputType.FLAT_JSON_FILES),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DepTraverseOptions(Options):
- @staticmethod
- def consumer():
- def test_depends_updater(flags):
- flags['TRAVERSE_DEPENDS'] = 'yes'
- flags['TRAVERSE_RECURSE_FOR_TESTS'] = 'yes'
- return flags
-
- def ignore_recurses_updater(flags):
- flags['TRAVERSE_RECURSE'] = 'no'
- return flags
-
- # Hooks update flags property from FlagsOptions
- return [
- ArgConsumer(
- ['-t', '--force-build-depends'],
- help='Include DEPENDS and RECURSE_FOR_TESTS dependencies',
- hook=UpdateValueHook('flags', test_depends_updater),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['-A'],
- help='Same as -t',
- hook=UpdateValueHook('flags', test_depends_updater),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--ignore-recurses'],
- help='Exclude all RECURSE dependencies',
- hook=UpdateValueHook('flags', ignore_recurses_updater),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DataOptions(Options):
- def __init__(self):
- self.with_data = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--with-data'],
- help='Include DATA files and dirs',
- hook=SetConstValueHook('with_data', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DumpTestListOptions(Options):
- def __init__(self):
- self.follow_dependencies = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--skip-deps'],
- help='Now default and ignored',
- hook=SetConstValueHook('follow_dependencies', False),
- group=DUMP_OPTS_GROUP,
- visible=False,
- deprecated=True,
- ),
- ]
-
-
-class DumpLicenseOptions(Options):
- def __init__(self):
- self.json_licenses = False
- self.link_type = 'static'
- self.custom_tags = []
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--json'],
- help='Machine oriented json representation',
- hook=SetConstValueHook('json_licenses', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--link-type'],
- help='Specify library link type (static|dynamic)',
- hook=SetValueHook('link_type'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--custom-license-tag'],
- help='Specify library link type (static|dynamic)',
- hook=SetAppendHook('custom_tags'),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DumpForcedDepsOptions(Options):
- def __init__(self):
- self.json_forced_deps = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--json'],
- help='Json forced dependency representation',
- hook=SetConstValueHook('json_forced_deps', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-def FullForcedDepsOptions():
- return (
- DumpYaHandler.common_opts
- + DumpYaHandler.common_build_facade_opts()
- + [
- YMakeDebugOptions(),
- CustomBuildRootOptions(),
- DumpForcedDepsOptions(),
- ]
- )
-
-
-class RelationSrcDstOptions(Options):
- def __init__(self, free_dst=False):
- self._free_dst = free_dst
- self.recursive = False
- self.relation_src = []
- self.relation_dst = []
-
- def consumer(self):
- res = [
- ArgConsumer(
- ['--from'],
- help='Dump relations from this target (path relative to the arcadia root)',
- hook=SetAppendHook('relation_src'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--to'],
- help='Dump relations to this target (path relative to the arcadia root)',
- hook=SetAppendHook('relation_dst'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--recursive'],
- help='Show relations between RELATION_SRC and all modules from RELATION_DST directories',
- hook=SetConstValueHook('recursive', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
- if self._free_dst:
- res.append(
- FreeArgConsumer(
- help='relation destination target',
- hook=ExtendHook('relation_dst'),
- )
- )
- return res
-
- def postprocess(self):
- if len(self.relation_dst) == 0:
- raise ArgsValidatingException('Error: no target is set')
-
-
-class FilesFilterOptions(Options):
- def __init__(self):
- self.skip_make_files = False
- self.mark_make_files = False
-
- def consumer(self):
- res = [
- ArgConsumer(
- ['--skip-make-files'],
- help='Skip all make files',
- hook=SetConstValueHook('skip_make_files', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--mark-make-files'],
- help='Mark all make files as Makefile',
- hook=SetConstValueHook('mark_make_files', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
- return res
-
-
-class DumpAllRelationsOptions(Options):
- def __init__(self):
- self.json_format = False
- self.show_targets_deps = False
-
- def consumer(self):
- res = [
- ArgConsumer(
- ['--json'],
- help='Dump relations in json format',
- hook=SetConstValueHook('json_format', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--show-targets-deps'],
- help='Show dependencies between RELATION_DST targets',
- hook=SetConstValueHook('show_targets_deps', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
- return res
-
-
-class DumpDescriptionOptions(Options):
- def __init__(self):
- self.dump_all_conf_docs = False
- self.conf_docs_json = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--dump-all'],
- help='Dump information for all entities including internal',
- hook=SetConstValueHook('dump_all_conf_docs', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--json'],
- help='Dump information for all entities including internal in json format, uses are included',
- hook=SetConstValueHook('conf_docs_json', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DumpAtdRevisionOptions(Options):
- def __init__(self):
- self.human_readable = False
- self.print_size = False
- self.arcadia_revision = None
- self.path = None
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--human-readable'],
- help='Add human-readable paths to comments',
- hook=SetConstValueHook('human_readable', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--print-size'],
- help='Add human-readable paths to comments',
- hook=SetConstValueHook('print_size', True),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--revision'],
- help='Gather revisions for this arcadia revision',
- hook=SetValueHook('arcadia_revision'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--path'],
- help='Gather revisions for this path recursively',
- hook=SetValueHook('path'),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DumpModuleInfoOptions(Options):
- def __init__(self):
- self.modules_info_file = ''
- self.modules_info_filter = ''
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--modules-info-file'],
- help='Save modules informaton into file specified',
- hook=SetValueHook('modules_info_file'),
- group=DUMP_OPTS_GROUP,
- ),
- ArgConsumer(
- ['--modules-info-filter'],
- help='Dump information only for modules matching regexp',
- hook=SetValueHook('modules_info_filter'),
- group=DUMP_OPTS_GROUP,
- ),
- ]
-
-
-class DumpRecipesOptions(Options):
- def __init__(self):
- self.json_format = False
-
- def consumer(self):
- res = [
- ArgConsumer(
- ['--json'],
- help='Dump recipes in json format',
- hook=SetConstValueHook('json_format', True),
- group=DUMP_OPTS_GROUP,
- ),
- ]
- return res
-
-
-def do_dump_srcdeps(params):
- debug_options = []
- params.flags['YMAKE_ADD_DATA'] = 'yes'
- if params.with_yamake:
- debug_options.append('mkf')
- _do_dump(gen_srcdeps, params, debug_options)
-
-
-def do_dot_graph(params):
- _do_dump(gen_modules, params, ['D'])
-
-
-def do_dep_graph(params):
- options = params.legacy_deps_opts
- if params.output_type == DepGraphOutputType.FLAT_JSON:
- options.append('flat-json-with-cmds')
- elif params.output_type == DepGraphOutputType.FLAT_JSON_FILES:
- options.append('flat-json')
- _do_dump(gen_graph, params, options)
-
-
-def do_json_dep_graph(params):
- _do_dump(gen_json_graph, params, params.legacy_deps_opts)
-
-
-def do_licenses(params):
- _do_dump(
- gen_licenses_list,
- params,
- write_stdout=True,
- debug_options=['lic-json' if params.json_licenses else 'lic'],
- lic_link_type=params.link_type,
- lic_custom_tags=params.custom_tags,
- )
-
-
-def do_forced_deps(params, write_stdout=True):
- res = _do_dump(
- gen_forced_deps_list,
- params,
- write_stdout=write_stdout,
- debug_options=['fdm-json' if params.json_forced_deps else 'fdm'],
- )
- return '' if write_stdout else res.stdout
-
-
-# TODO: support host/target platforms opts
-def do_build_plan(params):
- with temp_dir() as tmp:
- graph = gen_plan(
- arc_root=params.arc_root,
- build_root=tmp,
- build_type=params.build_type,
- build_targets=params.abs_targets,
- debug_options=params.debug_options,
- flags=params.flags,
- ymake_bin=params.ymake_bin,
- no_ymake_resource=params.no_ymake_resource,
- vcs_file=params.vcs_file,
- )
- json.dump(graph, sys.stdout, indent=4, sort_keys=True)
-
-
-def do_relation(params):
- options = []
- if params.recursive:
- options.append('recursive')
- for dst in params.relation_dst:
- _do_dump(
- gen_relation,
- params,
- options,
- find_path_from=params.relation_src,
- find_path_to=[dst],
- )
-
-
-def do_all_relations(params):
- options = ['J' if params.json_format else 'D']
- if params.recursive:
- options.append('recursive')
- if params.show_targets_deps:
- options.append('show-targets-deps')
- _do_dump(
- gen_relation,
- params,
- options,
- find_path_from=params.relation_src,
- find_path_to=params.relation_dst,
- )
-
-
-def do_files(params):
- options = []
- if params.with_data:
- options = ['dump-data']
- params.flags['YMAKE_ADD_DATA'] = 'yes'
- if params.skip_make_files:
- options.append('skip-make-files')
- if params.mark_make_files:
- options.append('mark-make-files')
- _do_dump(gen_filelist, params, options)
-
-
-def do_loops(params):
- _do_dump(gen_all_loops, params)
-
-
-def do_peerdir_loops(params):
- _do_dump(gen_dir_loops, params)
-
-
-def do_imprint(params):
- lines = imprint.generate_detailed_imprints(os.curdir)
- writer = csv.writer(sys.stdout, delimiter='\t')
- for line in lines:
- writer.writerow(line)
-
-
-# TODO: support host/target platforms opts
-def do_uids(params):
- uids = gen_uids(
- arc_root=params.arc_root,
- build_root=None,
- build_type=params.build_type,
- build_targets=params.abs_targets,
- debug_options=[],
- flags={},
- ymake_bin=None,
- )
- json.dump(uids, sys.stdout, indent=4, sort_keys=True)
-
-
-def get_tests(params):
- params.flags['TRAVERSE_RECURSE_FOR_TESTS'] = 'yes'
- kwargs = {
- 'build_root': params.bld_root,
- 'build_type': params.build_type,
- 'abs_targets': params.abs_targets,
- 'debug_options': [],
- 'flags': params.flags,
- 'ymake_bin': params.ymake_bin,
- 'arcadia_tests_data_path': 'arcadia_tests_data',
- }
- (
- _,
- test_dart,
- ) = gen_test_dart(**kwargs)
-
- return sorted(
- generate_tests_by_dart(
- test_dart,
- opts=params,
- ),
- key=lambda test: os.path.join(test.project_path, test.name),
- )
-
-
-def do_json_test_list(params):
- json.dump([t.save() for t in get_tests(params)], sys.stdout, indent=4, sort_keys=True)
-
-
-def do_test_list(params):
- print(pickle.dumps(get_tests(params)))
-
-
-def do_conf(params):
- with temp_dir() as tmp:
- generation_conf = _do_dump(gen_conf, params, write_stdout=False, build_root=tmp)
- print(open(generation_conf, 'r').read())
-
-
-def do_conf_docs(params):
- _do_dump(dump_mmm_docs, params, dump_all_conf_docs=params.dump_all_conf_docs, conf_docs_json=params.conf_docs_json)
-
-
-def do_dump_raw_vcs_info(params):
- sys.stdout.write(json.dumps(vcsversion.get_raw_version_info(params.arc_root, params.bld_root)))
-
-
-def do_dump_vcs_info(params):
- fake_data = strtobool(params.flags.get('NO_VCS_DEPENDS', 'no'))
- fake_build_info = strtobool(params.flags.get('CONSISTENT_BUILD', 'no'))
- sys.stdout.write(vcsversion.get_version_info(params.arc_root, params.bld_root, fake_data, fake_build_info) + '\n')
-
-
-def do_dump_svn_revision(params):
- sys.stdout.write(str(vcsversion.repo_config(params.arc_root)))
-
-
-def do_recipes(params):
- tests = get_tests(params)
- test_recipes = []
- for test in tests:
- encoded_recipes = test.recipes
- if not encoded_recipes:
- continue
- decoded_recipes = decode_recipe_cmdline(encoded_recipes)
- test_recipes.append(decoded_recipes)
-
- recipes = set()
- for test_recipe in test_recipes:
- for recipe in test_recipe:
- recipes.add(tuple(recipe))
-
- if params.json_format:
- output = json.dumps(recipes, indent=4, sort_keys=True)
- else:
- recipes_str = [' '.join(recipe) for recipe in recipes]
- output = ' '.join(recipes_str)
- print(output)
diff --git a/devtools/ya/handlers/dump/gen_conf_docs.py b/devtools/ya/handlers/dump/gen_conf_docs.py
deleted file mode 100644
index 4b9978f226..0000000000
--- a/devtools/ya/handlers/dump/gen_conf_docs.py
+++ /dev/null
@@ -1,384 +0,0 @@
-from __future__ import absolute_import, unicode_literals
-import exts.yjson as json
-import re
-import os
-import six
-
-import core.config
-
-import build.genconf
-import build.ymake2
-
-from yalibrary.vcs import vcsversion
-from build.build_facade import gen_conf
-from exts.strtobool import strtobool
-
-
-class _Markdown:
- header = '#'
- alink = 'https://a.yandex-team.ru/arc/trunk/arcadia/'
- # Section nesting count
- scount = 2
- # Description nesting count
- dcount = 6
- # Avoid too many entries in toc for macros
- chunks_in_toc = 10
- # Checks description
- internal_pattern = re.compile(r'^\s*@?internal[.]?\s*$', flags=re.IGNORECASE)
- # Checks description
- deprecated_pattern = re.compile(r'^\s*@?deprecated[.]?\s*$', flags=re.IGNORECASE)
- # Checks header
- h_patterns = {
- 'internal': re.compile('.*#.*internal.*', flags=re.IGNORECASE),
- 'deprecated': re.compile('.*#.*deprecated.*', flags=re.IGNORECASE),
- }
-
- def __init__(self, arc_root, dump_all_descs, use_svn):
- self.descs = {'macros': {}, 'modules': {}, 'multimodules': {}, 'unknowns': {}}
- self.links = {}
- self.anchors = {}
- try:
- self.svn_revision, _ = vcsversion.repo_config(arc_root) if use_svn else (-1, '')
- except Exception:
- self.svn_revision = -1
- self.dump_all_descs = dump_all_descs
-
- def process_entry(self, file, entry):
- if 'kind' not in entry or entry['kind'] != 'node':
- return
-
- self._add_entry_optionally(file, entry)
-
- def dump(self):
- res = self._dump_toc()
-
- for type in ['multimodules', 'modules', 'macros', 'unknowns']:
- if self.descs[type]:
- res += _Markdown._format_section(type)
-
- for name in sorted(self.descs[type]):
- for d in self.descs[type][name]['text']:
- res += six.ensure_str(d)
-
- for type in ['multimodules', 'modules', 'macros', 'unknowns']:
- if self.descs[type]:
- for name in sorted(self.descs[type]):
- link = self.descs[type][name]['link']
- res += six.ensure_str(link)
-
- return res
-
- def _add_entry_optionally(self, file, entry):
- props = entry['properties']
- doc = {'name': props['name'], 'type': props['type'], 'file': file, 'line': entry['range']['line']}
-
- if 'comment' in props:
- doc['long desc'] = props['comment']
- if 'usage' in props:
- doc['usage'] = props['usage']
-
- doc['revision'] = self.svn_revision
-
- descs, link = self._format_entry(doc)
-
- dictionary = (
- self.descs[doc['type'] + 's']
- if doc['type'] in ['macro', 'module', 'multimodule']
- else self.descs['unknowns']
- )
-
- if not self.dump_all_descs and _Markdown._is_internal(descs[0]):
- return
-
- dictionary[doc['name']] = {'text': descs, 'link': link, 'src_data': doc}
-
- @classmethod
- def _is_internal(cls, header):
- return cls.h_patterns['internal'].match(header)
-
- def _dump_toc(self):
- res = '*Do not edit, this file is generated from comments to macros definitions using `ya dump conf-docs{all}`.*\n\n'.format(
- all=' --dump-all' if self.dump_all_descs else ''
- )
- res += '{markup} ya.make {all}commands\n\n'.format(
- markup=_Markdown.header, all='and core.conf ' if self.dump_all_descs else ''
- )
- res += (
- 'General info: [How to write ya.make files](https://wiki.yandex-team.ru/yatool/HowToWriteYaMakeFiles)\n\n'
- )
- res += '{markup} Table of contents\n\n'.format(markup=_Markdown.header * 2)
-
- for type in ['multimodules', 'modules', 'macros', 'unknowns']:
- if self.descs[type]:
- res += _Markdown._format_toc_section(type)
- if type != 'macros':
- for name in sorted(self.descs[type]):
- res += _Markdown._format_toc_header(self.descs[type][name]['src_data'])
- else:
- chunk_cnt = 0
- first_macro = {}
- last_macro = {}
- for name in sorted(self.descs[type]):
- if chunk_cnt == 0:
- first_macro = self.descs[type][name]['src_data']
- chunk_cnt += 1
- last_macro = self.descs[type][name]['src_data']
-
- if chunk_cnt == _Markdown.chunks_in_toc:
- chunk_cnt = 0
- res += _Markdown._format_toc_macro_header(first_macro, last_macro)
-
- if chunk_cnt != 0:
- res += _Markdown._format_toc_macro_header(first_macro, last_macro)
- return res
-
- @classmethod
- def _format_entry(cls, doc):
- descs = []
-
- lines, special_tags = _Markdown._format_desc(doc)
- descs.append(_Markdown._format_header(doc, special_tags))
- descs.append(lines)
- return (descs, _Markdown._format_link(doc))
-
- @classmethod
- def _format_toc_section(cls, type):
- return '{indent} * [{section}](#{anchor})\n'.format(
- indent=' ' * cls.scount, section=type.capitalize(), anchor=type
- )
-
- @classmethod
- def _format_section(cls, type):
- return '{markup} {text} <a name="{anchor}"></a>\n\n'.format(
- markup=cls.header * cls.scount, text=type.capitalize(), anchor=type
- )
-
- @staticmethod
- def _format_header_anchor(doc):
- return '<a name="{atype}_{aname}"></a>'.format(atype=doc['type'], aname=doc['name'])
-
- @classmethod
- def _format_toc_header(cls, doc):
- qual = doc['type'].capitalize() if doc['type'] in ['macro', 'module', 'multimodule'] else "Unknown"
- return '{indent} - {qual} [{name}](#{type}_{name})\n'.format(
- indent=' ' * cls.dcount, qual=qual, name=doc['name'], type=doc['type']
- )
-
- @classmethod
- def _format_toc_macro_header(cls, fdoc, ldoc):
- return '{indent} - Macros [{fname}](#{ftype}_{fname}) .. [{lname}](#{ltype}_{lname})\n'.format(
- indent=' ' * cls.dcount, fname=fdoc['name'], ftype=fdoc['type'], lname=ldoc['name'], ltype=ldoc['type']
- )
-
- # Also adds special tags 'internal' and 'deprecated'
- @classmethod
- def _format_header(cls, doc, special_tags):
- name = doc['name']
- usage = doc['usage'] if 'usage' in doc else ""
- usage = name if not usage else usage
-
- qual = doc['type'].capitalize() if doc['type'] in ['macro', 'module', 'multimodule'] else "Unknown"
-
- usage = _Markdown._remove_formatting(usage.rstrip().lstrip())
- name = _Markdown._remove_formatting(name)
-
- usage = usage.replace(name, '[' + name + "][]", 1)
-
- if special_tags:
- special = ''
- for tag in special_tags:
- if not cls.h_patterns[tag].match(usage):
- special += ' ' + tag
- # Emphasis
- if usage.find("#") != -1:
- usage += special
- else:
- usage += ' #' + special[1:]
-
- # Emphasis
- if usage.find("#") != -1:
- usage = usage.replace('#', "_#", 1)
- usage += "_"
-
- return '{markup} {type} {usage} {anchor}\n'.format(
- markup=cls.header * cls.dcount, type=qual, usage=usage, anchor=_Markdown._format_header_anchor(doc)
- )
-
- # Prints verbatim. Strips unnecessary indent and escapes '_'/'*'.
- @classmethod
- def _format_desc(cls, doc):
- result = ""
-
- desc = ""
- if 'long desc' in doc:
- desc = doc['long desc'].rstrip()
- if not desc:
- desc = " Not documented yet.\n"
-
- lines = _Markdown._strip_blanks(desc.splitlines())
- lines = _Markdown._remove_formatting_markdown(lines)
-
- result += '\n'.join(lines) + "\n\n"
-
- special_tags = []
- if doc['name'].startswith('_') or any([cls.internal_pattern.match(x) for x in lines]):
- special_tags.append("internal")
- if any([cls.deprecated_pattern.match(x) for x in lines]):
- special_tags.append("deprecated")
-
- return (result, special_tags)
-
- @staticmethod
- def _format_link(doc):
- return ' [{tag_name}]: {baselink}{file}{rev}#L{line}\n'.format(
- tag_name=_Markdown._remove_formatting(doc['name']),
- baselink=_Markdown.alink,
- file=doc['file'],
- rev='?rev=' + str(doc['revision']) if doc['revision'] > 0 else '',
- line=doc['line'],
- )
-
- @staticmethod
- def _strip_blanks(lines):
- first = 0
- for line in lines:
- if not line.lstrip().rstrip():
- first += 1
- else:
- break
- last = 0
- for line in reversed(lines):
- if not line.lstrip().rstrip():
- last += 1
- else:
- break
- lines = lines[first : (len(lines) - last)]
- lines = [x.rstrip().expandtabs(4) for x in lines]
- indent = 10000
- for line in lines:
- if line:
- indent = min(indent, len(line) - len(line.lstrip()))
-
- if indent > 0:
- lines = [x.replace(' ' * indent, '', 1) for x in lines]
-
- return lines
-
- # Conditionally removed formatting.
- # Code blocks are not modified
- @staticmethod
- def _remove_formatting_markdown(lines):
- code = False
- new_paragraph = True
- backtick_block = False
-
- res = []
- for line in lines:
- if new_paragraph:
- if line.startswith('\t') or line.startswith(' ' * 4):
- code = True
-
- if not line.startswith('\t') and not line.startswith(' ' * 4):
- code = False
-
- new_paragraph = not line
-
- if line.lstrip() == '```':
- backtick_block = not backtick_block
-
- res.append(line if code or backtick_block else _Markdown._remove_formatting(line))
- return res
-
- # Unconditionally removes formatting due to '_'/'*'
- @staticmethod
- def _remove_formatting(x):
- return x.replace("_", r"\_").replace("*", r"\*")
-
-
-def _gen(
- custom_build_directory,
- build_type,
- build_targets,
- debug_options,
- flags=None,
- warn_mode=None,
- ymake_bin=None,
- platform=None,
- host_platform=None,
- target_platforms=None,
- **kwargs
-):
- generation_conf = gen_conf(
- build_root=custom_build_directory,
- build_type=build_type,
- build_targets=build_targets,
- flags=flags,
- host_platform=host_platform,
- target_platforms=target_platforms,
- )
- res, evlog_dump = build.ymake2.ymake_dump(
- custom_build_directory=custom_build_directory,
- build_type=build_type,
- abs_targets=build_targets,
- debug_options=debug_options,
- warn_mode=warn_mode,
- flags=flags,
- ymake_bin=ymake_bin,
- platform=platform,
- grab_stderr=True,
- custom_conf=generation_conf,
- **kwargs
- )
- return res
-
-
-def dump_mmm_docs(
- build_root,
- build_type,
- build_targets,
- debug_options,
- flags,
- dump_all_conf_docs=None,
- conf_docs_json=None,
- ymake_bin=None,
- platform=None,
- host_platform=None,
- target_platforms=None,
-):
- json_dump_name = os.path.join(build_root, 'ymake.dump.ydx.json')
- arc_root = core.config.find_root_from(build_targets)
- null_ya_make = os.path.join(arc_root, 'build', 'docs', 'empty')
-
- if not conf_docs_json:
- if not os.path.exists(null_ya_make):
- raise "Empty project not found, dump conf-docs may work too long"
-
- res = _gen(
- custom_build_directory=build_root,
- build_type=build_type,
- # Override target
- build_targets=[null_ya_make] if not conf_docs_json else build_targets,
- debug_options=debug_options,
- flags=flags,
- ymake_bin=ymake_bin,
- platform=platform,
- host_platform=host_platform,
- target_platforms=target_platforms,
- yndex_file=json_dump_name,
- )
-
- if conf_docs_json:
- with open(json_dump_name, 'r') as jfile:
- res.stdout += jfile.read()
- else:
- with open(json_dump_name, 'r') as jfile:
- contents = jfile.read()
- jdata = json.loads(contents)
- no_svn = True if 'NO_SVN_DEPENDS' in flags and strtobool(flags['NO_SVN_DEPENDS']) else False
- doc = _Markdown(arc_root, dump_all_conf_docs, not no_svn)
- for efile in jdata:
- for entry in jdata[efile]:
- doc.process_entry(efile, entry)
- res.stdout += doc.dump()
-
- return res
diff --git a/devtools/ya/handlers/dump/ya.make b/devtools/ya/handlers/dump/ya.make
deleted file mode 100644
index adbc66ad9f..0000000000
--- a/devtools/ya/handlers/dump/ya.make
+++ /dev/null
@@ -1,52 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.dump
- __init__.py
- gen_conf_docs.py
-)
-
-PEERDIR(
- contrib/python/pathlib2
- contrib/python/six
- contrib/python/toml
- devtools/ya/app
- devtools/ya/build
- devtools/ya/build/build_facade
- devtools/ya/build/build_opts
- devtools/ya/build/genconf
- devtools/ya/build/ymake2
- devtools/ya/core
- devtools/ya/core/config
- devtools/ya/core/imprint
- devtools/ya/core/yarg
- devtools/ya/exts
- devtools/ya/test/dartfile
- devtools/ya/test/explore
- devtools/ya/yalibrary/debug_store
- devtools/ya/yalibrary/debug_store/processor
- devtools/ya/yalibrary/debug_store/store
- devtools/ya/yalibrary/tools
- devtools/ya/yalibrary/vcs
- devtools/ya/yalibrary/vcs/vcsversion
- devtools/ya/yalibrary/yandex/sandbox/misc
- library/python/fs
- library/python/func
- library/python/tmp
-)
-
-IF (NOT YA_OPENSOURCE)
- PEERDIR(
- devtools/ya/handlers/dump/arcadia_specific
- devtools/ya/handlers/dump/debug
- )
-ENDIF()
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/gc/__init__.py b/devtools/ya/handlers/gc/__init__.py
deleted file mode 100644
index 824a35edf2..0000000000
--- a/devtools/ya/handlers/gc/__init__.py
+++ /dev/null
@@ -1,342 +0,0 @@
-from __future__ import absolute_import
-import glob
-import os
-import logging
-import sys
-import time
-import six
-
-import app
-import core.yarg
-import core.common_opts
-import core.config as cc
-import build.ya_make as ym
-from exts import fs
-from core.common_opts import CustomBuildRootOptions
-from build.build_opts import LocalCacheOptions, DistCacheSetupOptions, parse_size_arg, parse_timespan_arg
-from exts.windows import on_win
-from yalibrary.runner import result_store
-import yalibrary.toolscache as tc
-
-if six.PY3:
- long = int
-
-logger = logging.getLogger(__name__)
-
-
-def _to_size_in_gb(size):
- try:
- return int(float(size) * 1024 * 1024 * 1024)
- except ValueError:
- pass
-
- return parse_size_arg(size)
-
-
-def _to_size_in_mb(size):
- try:
- return int(float(size) * 1024 * 1024)
- except ValueError:
- pass
-
- return parse_size_arg(size)
-
-
-def _to_timespan_in_hours(timespan):
- try:
- return int(float(timespan) * 60 * 60)
- except ValueError:
- pass
-
- return parse_timespan_arg(timespan)
-
-
-class CollectCacheOptions(LocalCacheOptions):
- def __init__(self):
- super(CollectCacheOptions, self).__init__()
- self.object_size_limit = None
- self.age_limit = None
- self.symlinks_ttl = 0
-
- def consumer(self):
- return super(CollectCacheOptions, self).consumer() + [
- core.yarg.ArgConsumer(
- ['--size-limit'],
- help='Strip build cache to size (in GiB if not set explicitly)',
- hook=core.yarg.SetValueHook(
- 'cache_size',
- transform=_to_size_in_gb,
- default_value=lambda x: str(_to_size_in_gb(x) * 1.0 / 1024 / 1024 / 1024),
- ),
- group=core.yarg.BULLET_PROOF_OPT_GROUP,
- visible=False,
- ),
- core.yarg.ArgConsumer(
- ['--object-size-limit'],
- help='Strip build cache from large objects (in MiB if not set explicitly)',
- hook=core.yarg.SetValueHook('object_size_limit', transform=_to_size_in_mb),
- group=core.yarg.BULLET_PROOF_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--age-limit'],
- help='Strip build cache from old objects (in hours if not set explicitly)',
- hook=core.yarg.SetValueHook('age_limit', transform=_to_timespan_in_hours),
- group=core.yarg.BULLET_PROOF_OPT_GROUP,
- ),
- ]
-
-
-class GarbageCollectionYaHandler(core.yarg.CompositeHandler):
- def __init__(self):
- core.yarg.CompositeHandler.__init__(self, description='Collect garbage')
- self['cache'] = core.yarg.OptsHandler(
- action=app.execute(action=do_cache, respawn=app.RespawnType.OPTIONAL),
- description='Strip build cache and old build directories',
- opts=[core.common_opts.ShowHelpOptions(), CollectCacheOptions(), CustomBuildRootOptions()],
- visible=True,
- )
- self['dist_cache'] = core.yarg.OptsHandler(
- action=app.execute(action=do_strip_yt_cache, respawn=app.RespawnType.NONE),
- description='Strip distributed (YT) cache',
- opts=[core.common_opts.ShowHelpOptions(), DistCacheSetupOptions()],
- visible=False,
- )
-
-
-class FilterBySize(object):
- def __init__(self, size_limit):
- self.size_limit = size_limit
- self.total_size = 0
- self._items = {}
-
- def __call__(self, item):
- if item.uid in self._items:
- return False
- size = item.size
- self.total_size += size
- self._items[item.uid] = item
- return self.total_size < self.size_limit
-
-
-class FilterByObjectSize(object):
- def __init__(self, size_limit):
- self.size_limit = size_limit
-
- def __call__(self, item):
- return item.size < self.size_limit
-
-
-class FilterByAge(object):
- def __init__(self, age_limit):
- self.age_limit = age_limit
- self.now = time.time()
-
- def __call__(self, item):
- return item.timestamp > self.now - self.age_limit
-
-
-def _clean_dir_selectively(logs, file_dir, file_name):
- try:
- for i in os.listdir(logs):
- date_dir = os.path.join(logs, i)
- if date_dir != file_dir:
- fs.remove_tree_safe(date_dir)
-
- if file_dir:
- for i in os.listdir(file_dir):
- if i != file_name:
- fs.remove_tree_safe(os.path.join(file_dir, i))
- except OSError as e:
- logger.debug('Cleaning %s root failed %s', logs, e)
-
-
-def _clean_logs():
- logger.debug('Cleaning logs root')
-
- try:
- import app_ctx
-
- file_dir, file_name = os.path.split(app_ctx.file_log)
- except Exception as e:
- file_dir, file_name = ('', '')
- logger.debug('Log name was not obtained %s', e)
-
- logs = os.path.join(cc.misc_root(), 'logs')
- _clean_dir_selectively(logs, file_dir, file_name)
-
-
-def _clean_evlogs():
- logger.debug('Cleaning evlogs root')
-
- try:
- import app_ctx
-
- file_dir, file_name = os.path.split(app_ctx.evlog._fileobj.name)
- except Exception as e:
- file_dir, file_name = ('', '')
- logger.debug('Log name was not obtained %s', e)
-
- evlogs = os.path.join(cc.misc_root(), 'evlogs')
- _clean_dir_selectively(evlogs, file_dir, file_name)
-
-
-def _clean_tools():
- running_ya_bin_dir = os.path.dirname(sys.argv[0])
- base_running_ya_bin_dir = os.path.basename(running_ya_bin_dir).replace('_d', '')
- errors = 0
- for i in os.listdir(os.path.dirname(cc.tool_root())):
- full_path = os.path.join(os.path.dirname(cc.tool_root()), i)
- if i == 'v4':
- if len(glob.glob(os.path.join(full_path, '*.corrupted'))) == 0:
- tc.unlock_all()
- continue
- else:
- errors += 1
- elif full_path == running_ya_bin_dir:
- continue
- elif os.path.isfile(full_path):
- try:
- if os.path.getsize(full_path) < 1024 * 100:
- with open(full_path, 'r') as f:
- if base_running_ya_bin_dir in f.read():
- continue
- except Exception:
- errors += 1
- logger.debug('Cleaning tool %s', full_path)
- fs.remove_tree_safe(full_path)
- return errors
-
-
-def do_cache(opts):
- build_root = opts.custom_build_directory or cc.build_root()
-
- cache = None
- try:
- cache = ym.make_cache(opts, build_root)
- except Exception:
- logger.exception("While initializing cache")
-
- lock = ym.make_lock(opts, build_root, write_lock=True)
-
- with lock:
- if opts.cache_stat:
- import app_ctx
-
- if not cache:
- app_ctx.display.emit_message("Cache not initialized, can't show stats")
- else:
- cache.analyze(app_ctx.display)
- errors = 0
- else:
- errors = _do_collect_cache(cache, build_root, opts)
-
- if cache:
- cache.flush()
-
- logger.debug(
- "ya gc stats %s",
- {
- 'cache_size': opts.cache_size,
- 'object_size_limit': opts.object_size_limit,
- 'age_limit': opts.age_limit,
- 'symlinks_ttl': opts.symlinks_ttl,
- 'errors': errors,
- },
- )
-
-
-def _do_collect_cache(cache, build_root, opts):
- logger.debug('Cleaning tmp root')
- fs.remove_tree_safe(cc.tmp_path())
-
- logger.debug('Cleaning snowden root')
- fs.remove_tree_safe(os.path.join(cc.misc_root(), 'snowden'))
-
- logger.debug('Cleaning build root')
- fs.remove_tree_safe(os.path.join(build_root, 'build_root'))
-
- logger.debug('Cleaning conf root')
- fs.remove_tree_safe(os.path.join(build_root, 'conf'))
-
- errors = _clean_tools()
-
- logger.debug('Cleaning tmp root')
- fs.remove_tree_safe(os.path.join(cc.misc_root(), 'tmp'))
-
- _clean_logs()
- _clean_evlogs()
-
- if not on_win():
- logger.debug('Cleaning symres')
- symres_dir = os.path.join(build_root, 'symres')
- src_dir = cc.find_root(fail_on_error=False) or ""
- if os.path.isdir(symres_dir):
- logger.debug('Cleaning symres %s for %s, ttl=%s', symres_dir, src_dir, opts.symlinks_ttl)
- result_store.SymlinkResultStore(symres_dir, src_dir).sieve(
- state=None, ttl=opts.symlinks_ttl, cleanup=opts.symlinks_ttl == 0
- )
-
- if hasattr(cache, '_store_path'):
- for dir in os.listdir(os.path.join(build_root, 'cache')):
- full_path = os.path.join(build_root, 'cache', dir)
- if cache._store_path == full_path:
- if len(glob.glob(os.path.join(full_path, '*.corrupted'))) == 0:
- continue
- else:
- errors += 1
- logger.debug('Cleaning cache directory %s', full_path)
- fs.remove_tree_safe(full_path)
-
- if opts.cache_size is not None and opts.object_size_limit is None and opts.age_limit is None:
- logger.debug('Cleaning for total size %s', opts.cache_size)
- if hasattr(cache, 'strip'):
- cache.strip(FilterBySize(opts.cache_size))
- elif hasattr(cache, 'strip_total_size'):
- cache.strip_total_size(opts.cache_size)
-
- from yalibrary.toolscache import tc_force_gc
-
- tc_force_gc(opts.cache_size)
-
- if opts.object_size_limit is not None:
- logger.debug('Cleaning for object size %s', opts.object_size_limit)
- if hasattr(cache, 'strip'):
- cache.strip(FilterByObjectSize(opts.object_size_limit))
- elif hasattr(cache, 'strip_max_object_size'):
- cache.strip_max_object_size(opts.object_size_limit)
-
- if opts.age_limit is not None:
- logger.debug('Cleaning for age %s', opts.age_limit)
- if hasattr(cache, 'strip'):
- cache.strip(FilterByAge(opts.age_limit))
- elif hasattr(cache, 'strip_max_age'):
- cache.strip_max_age(opts.age_limit)
-
- return errors
-
-
-def do_strip_yt_cache(opts):
- try:
- from yalibrary.store.yt_store import yt_store
- except ImportError as e:
- logger.warn("YT store is not available: %s", e)
-
- token = opts.yt_token or opts.oauth_token
- cache = yt_store.YtStore(
- opts.yt_proxy,
- opts.yt_dir,
- None,
- token=token,
- readonly=opts.yt_readonly,
- max_cache_size=opts.yt_max_cache_size,
- ttl=opts.yt_store_ttl,
- )
-
- counters = cache.strip()
- if counters:
- logger.info(
- 'Deleted: meta rows:%d, data rows:%d, net data size:%d',
- counters['meta_rows'],
- counters['data_rows'],
- counters['data_size'],
- )
diff --git a/devtools/ya/handlers/gc/ya.make b/devtools/ya/handlers/gc/ya.make
deleted file mode 100644
index 1b8296d1ee..0000000000
--- a/devtools/ya/handlers/gc/ya.make
+++ /dev/null
@@ -1,37 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.gc
- __init__.py
-)
-
-PEERDIR(
- contrib/python/six
- devtools/ya/app
- devtools/ya/build
- devtools/ya/build/build_opts
- devtools/ya/core
- devtools/ya/core/config
- devtools/ya/core/yarg
- devtools/ya/exts
- devtools/ya/yalibrary/runner
- devtools/ya/yalibrary/store
- devtools/ya/yalibrary/toolscache
- library/python/fs
- library/python/windows
-)
-
-IF (NOT YA_OPENSOURCE)
- PEERDIR(
- devtools/ya/yalibrary/store/yt_store # see YA-938
- )
-ENDIF()
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/gen_config/__init__.py b/devtools/ya/handlers/gen_config/__init__.py
deleted file mode 100644
index e771dac871..0000000000
--- a/devtools/ya/handlers/gen_config/__init__.py
+++ /dev/null
@@ -1,48 +0,0 @@
-from __future__ import absolute_import
-import core.common_opts
-import core.yarg
-
-from . import gen_config
-
-
-class GenConfigOptions(core.yarg.Options):
- def __init__(self):
- self.output = None
- self.dump_defaults = False
-
- @staticmethod
- def consumer():
- return [
- core.yarg.SingleFreeArgConsumer(
- help='ya.conf',
- hook=core.yarg.SetValueHook('output'),
- required=False,
- ),
- core.yarg.ArgConsumer(
- ['--dump-defaults'],
- help='Dump default values as JSON',
- hook=core.yarg.SetConstValueHook('dump_defaults', True),
- ),
- ]
-
-
-class GenConfigYaHandler(core.yarg.OptsHandler):
- description = 'Generate default ya config'
-
- def __init__(self):
- self._root_handler = None
- super(GenConfigYaHandler, self).__init__(
- action=self.do_generate,
- description=self.description,
- opts=[
- core.common_opts.ShowHelpOptions(),
- GenConfigOptions(),
- ],
- )
-
- def handle(self, root_handler, args, prefix):
- self._root_handler = root_handler
- super(GenConfigYaHandler, self).handle(root_handler, args, prefix)
-
- def do_generate(self, args):
- gen_config.generate_config(self._root_handler, args.output, args.dump_defaults)
diff --git a/devtools/ya/handlers/gen_config/gen_config.py b/devtools/ya/handlers/gen_config/gen_config.py
deleted file mode 100644
index 56a0d6fc64..0000000000
--- a/devtools/ya/handlers/gen_config/gen_config.py
+++ /dev/null
@@ -1,282 +0,0 @@
-from __future__ import absolute_import
-import sys
-import itertools
-import collections
-import json
-
-import toml
-
-import core.yarg
-import six
-
-toml_encoder = toml.TomlEncoder()
-
-NOT_SET = -1
-
-
-def is_jsonable(x):
- try:
- json.dumps(x)
- return True
- except (TypeError, OverflowError):
- return False
-
-
-def iter_ya_options(root_handler, all_opts=False):
- # prefer setvalue over setconstvalue to provide correct description and default value
- hook_prefer_order = [core.yarg.SetValueHook, core.yarg.SetConstValueHook]
-
- def get_hook_priority(hook, consumer):
- if isinstance(consumer, core.yarg.ConfigConsumer):
- return -1
-
- htype = type(hook)
- if htype in hook_prefer_order:
- pos = hook_prefer_order.index(htype)
- else:
- pos = len(hook_prefer_order)
- return len(hook_prefer_order) - pos
-
- for handler_name, command in six.iteritems(root_handler.sub_handlers):
- if not command.visible:
- continue
-
- top_handler = command.command()
- handlers = collections.deque([([handler_name], top_handler)])
-
- while handlers:
- trace, handler = handlers.pop()
-
- sub_handlers = handler.sub_handlers or {}
- for sub_name in sorted(sub_handlers):
- handlers.append((trace + [sub_name], sub_handlers[sub_name]))
-
- options = getattr(handler, "options", None)
- if not options:
- continue
-
- if all_opts:
- for opt in options:
- for name in vars(opt).keys():
- default = getattr(opt, name)
- if is_jsonable(default):
- yield name, "", default, "", "", ""
- continue
-
- top_consumer = options.consumer()
- if isinstance(top_consumer, core.yarg.Compound):
- consumers = top_consumer.parts
- else:
- consumers = [top_consumer]
-
- # pairwise config option with command line name if it's possible
- params = {}
- for consumer in consumers:
- if not isinstance(consumer, (core.yarg.ArgConsumer, core.yarg.ConfigConsumer)):
- continue
-
- hook = consumer.hook
- if not hook:
- continue
-
- opt_name = getattr(hook, 'name', None)
- if not opt_name or opt_name.startswith("_"):
- continue
-
- if opt_name not in params:
- params[opt_name] = {
- 'configurable': False,
- }
-
- entry = params[opt_name]
- priority = get_hook_priority(hook, consumer)
-
- entry['configurable'] |= isinstance(consumer, core.yarg.ConfigConsumer)
- if priority > entry.get('_priority', -2):
- entry['cmdline_names'] = get_arg_consumer_names(consumer) or entry.get('cmdline_names')
- entry['trace'] = trace or entry.get('trace')
- entry['description'] = getattr(consumer, "help", "") or entry.get('description')
- entry['group'] = getattr(consumer, "group", "") or entry.get('group', 0)
- # default value might be empty list or dict
- if getattr(options, opt_name, None) is None:
- entry['default'] = entry.get('default')
- else:
- entry['default'] = getattr(options, opt_name)
- entry['_priority'] = priority
-
- for name, x in params.items():
- if x['configurable']:
- yield name, x['cmdline_names'], x['default'], x['group'], x['description'], x['trace']
-
-
-def get_arg_consumer_names(consumer):
- if isinstance(consumer, core.yarg.ArgConsumer):
- return list([_f for _f in [consumer.short_name, consumer.long_name] if _f])
- return []
-
-
-def split(iterable, func):
- d1 = []
- d2 = []
- for entry in iterable:
- if func(entry):
- d1.append(entry)
- else:
- d2.append(entry)
- return d1, d2
-
-
-def get_comment(entry):
- parts = []
- if entry['desc']:
- parts.append(entry['desc'])
- if entry['cmdline_names']:
- parts.append("({})".format(", ".join(entry['cmdline_names'])))
- return " ".join(parts)
-
-
-def compress_keys(data, result, trace=None):
- trace = trace or []
- for key, val in six.iteritems(data):
- if isinstance(val, dict):
- result[".".join(trace + [key])] = val
- compress_keys(val, result, trace + [key])
-
-
-def dump_options(entries, output, config, section="", print_comment=True):
- if section:
- output.append("[{}]".format(section))
- for entry in sorted(entries, key=lambda x: x['name']):
- parts = []
- if print_comment:
- comment = get_comment(entry)
- if comment:
- parts.append("# {}".format(comment))
- name = entry['name']
- if name in config and config[name] != entry['default']:
- parts.append("{} = {}".format(name, toml_encoder.dump_value(config[name])))
- else:
- parts.append("# {} = {}".format(name, toml_encoder.dump_value(entry['default'])))
-
- output.append("\n".join(parts))
-
-
-def dump_subgroups(subgroups, output, config, section="", print_comment=True):
- def get_section(name):
- return ".".join([_f for _f in [section, name] if _f])
-
- for entry in subgroups:
- parts = []
- if print_comment:
- comment = get_comment(entry)
- if comment:
- parts.append("# {}".format(comment))
-
- name = entry['name']
- if name in config and config[name] != entry['default']:
- parts.append("[{}]".format(get_section(name)))
- for key, val in config[name].items():
- parts.append("{} = {}".format(key, toml_encoder.dump_value(val)))
- else:
- parts.append("# [{}]".format(get_section(name)))
- for key, val in entry['default'].items():
- parts.append("# {} = {}".format(key, toml_encoder.dump_value(val)))
-
- output.append("\n".join(parts))
-
-
-def dump_config(options, handler_map, user_config):
- def sort_func(x):
- return getattr(x['group'], 'index', NOT_SET)
-
- def get_group_title(name):
- return "{} {} {}".format("=" * 10, name, "=" * (65 - len(name)))
-
- blocks = [
- "# Save config to the junk/{USER}/ya.conf or to the ~/.ya/ya.conf\n# For more info see https://docs.yandex-team.ru/yatool/commands/gen_config"
- ]
-
- # dump all options
- subgroups = []
- data = sorted(options.values(), key=sort_func)
- for group_index, entries in itertools.groupby(data, sort_func):
- entries = list(entries)
-
- if group_index != NOT_SET:
- group_name = entries[0]['group'].name
- if group_name:
- blocks.append("# " + get_group_title(group_name))
-
- subs, opts = split(entries, lambda x: isinstance(x['default'], dict))
- dump_options(opts, blocks, user_config)
- # move all subgroup options out of the default section
- # otherwise every option defined after section will be related to the section
- subgroups += subs
-
- if subgroups:
- blocks.append("# " + get_group_title("Various table options"))
- blocks.append("# Uncomment table name with parameters")
- dump_subgroups(subgroups, blocks, user_config)
-
- # Save user redefined opts for specific handlers
- specific_opts = {}
- user_handler_map = {}
- compress_keys(user_config, user_handler_map)
- for section, keys in six.iteritems(user_handler_map):
- if section not in handler_map:
- continue
- entries = []
- for optname in keys:
- if optname in handler_map[section]:
- entries.append(options[optname])
- if entries:
- specific_opts[section] = entries
-
- if specific_opts:
- blocks.append("# " + get_group_title("Redefined options for specific handlers"))
- for section, entries in sorted(specific_opts.items(), key=lambda x: x[0]):
- subs, opts = split(entries, lambda x: isinstance(x['default'], dict))
- if opts:
- dump_options(opts, blocks, user_handler_map[section], section, print_comment=False)
- if subgroups:
- dump_subgroups(subs, blocks, user_handler_map[section], section, print_comment=False)
-
- return "\n#\n".join(blocks)
-
-
-def generate_config(root_handler, output=None, dump_defaults=None):
- # Don't load global config files to avoid penetration of the global options into user config
- config_files = core.yarg.get_config_files(global_config=False)
- user_config = core.yarg.load_config(config_files)
-
- options = {}
- handler_map = {}
- for name, cmdline_names, default, group, desc, trace in iter_ya_options(root_handler, dump_defaults):
- if name not in options:
- options[name] = {
- 'cmdline_names': cmdline_names,
- 'default': default,
- 'desc': desc,
- 'group': group,
- 'name': name,
- }
- else:
- entry = options[name]
- entry['desc'] = entry['desc'] or desc
- entry['group'] = entry['group'] or group
-
- target = ".".join(trace)
- if target not in handler_map:
- handler_map[target] = []
- handler_map[target].append(name)
-
- if dump_defaults:
- json.dump({k: v['default'] for k, v in options.items()}, sys.stdout, indent=2)
- return
-
- data = dump_config(options, handler_map, user_config)
- if output:
- with open(output, 'w') as afile:
- afile.write(data)
- else:
- sys.stdout.write(data + '\n')
diff --git a/devtools/ya/handlers/gen_config/ya.make b/devtools/ya/handlers/gen_config/ya.make
deleted file mode 100644
index b5fdeece7c..0000000000
--- a/devtools/ya/handlers/gen_config/ya.make
+++ /dev/null
@@ -1,23 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.gen_config
- __init__.py
- gen_config.py
-)
-
-PEERDIR(
- contrib/python/six
- contrib/python/toml
- devtools/ya/core
- devtools/ya/core/yarg
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/ide/__init__.py b/devtools/ya/handlers/ide/__init__.py
deleted file mode 100644
index 4d28d29a69..0000000000
--- a/devtools/ya/handlers/ide/__init__.py
+++ /dev/null
@@ -1,734 +0,0 @@
-from __future__ import absolute_import
-import os.path
-
-import six
-
-import core.yarg
-import core.config
-import core.common_opts
-
-import build.build_opts
-import build.compilation_database as bcd
-
-import ide.ide_common
-import ide.msvs
-import ide.msvs_lite
-import ide.clion2016
-import ide.idea
-import ide.qt
-import ide.remote_ide_qt
-import ide.goland
-import ide.pycharm
-import ide.venv
-import ide.vscode_all
-import ide.vscode_clangd
-import ide.vscode_go
-import ide.vscode_py
-import ide.vscode_ts
-import ide.vscode.opts
-
-import yalibrary.platform_matcher as pm
-
-import app
-import app_config
-
-if app_config.in_house:
- import devtools.ya.ide.fsnotifier
-
-from core.yarg.help_level import HelpLevel
-
-if six.PY3:
- import ide.gradle
-
-
-class TidyOptions(core.yarg.Options):
- def __init__(self):
- self.setup_tidy = False
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- ['--setup-tidy'],
- help="Setup default arcadia's clang-tidy config in a project",
- hook=core.yarg.SetConstValueHook('setup_tidy', True),
- group=core.yarg.BULLET_PROOF_OPT_GROUP,
- ),
- core.yarg.ConfigConsumer('setup_tidy'),
- ]
-
-
-class CLionOptions(core.yarg.Options):
- CLION_OPT_GROUP = core.yarg.Group('CLion project options', 0)
-
- def __init__(self):
- self.filters = []
- self.lite_mode = False
- self.remote_toolchain = None
- self.remote_deploy_config = None
- self.remote_repo_path = None
- self.remote_build_path = None
- self.remote_deploy_host = None
- self.use_sync_server = False
- self.content_root = None
- self.strip_non_final_targets = False
- self.full_targets = False
- self.add_py_targets = False
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- ['--filter', '-f'],
- help='Only consider filtered content',
- hook=core.yarg.SetAppendHook('filters'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--mini', '-m'],
- help='Lite mode for solution (fast open, without build)',
- hook=core.yarg.SetConstValueHook('lite_mode', True),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--remote-toolchain'],
- help='Generate configurations for remote toolchain with this name',
- hook=core.yarg.SetValueHook('remote_toolchain'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--remote-deploy-config'],
- help='Name of the remote server configuration tied to the remote toolchain',
- hook=core.yarg.SetValueHook('remote_deploy_config'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--remote-repo-path'],
- help='Path to the arc repository at the remote host',
- hook=core.yarg.SetValueHook('remote_repo_path'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--remote-build-path'],
- help='Path to the directory for CMake output at the remote host',
- hook=core.yarg.SetValueHook('remote_build_path'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--remote-host'],
- help='Hostname associated with remote server configuration',
- hook=core.yarg.SetValueHook('remote_deploy_host'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--use-sync-server'],
- help='Deploy local files via sync server instead of file watchers',
- hook=core.yarg.SetConstValueHook('use_sync_server', True),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--project-root', '-r'],
- help='Root directory for a CLion project',
- hook=core.yarg.SetValueHook('content_root'),
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--strip-non-final-targets'],
- hook=core.yarg.SetConstValueHook('strip_non_final_targets', True),
- help='Do not create target for non-final nodes',
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--full-targets'],
- hook=core.yarg.SetConstValueHook('full_targets', True),
- help='Old Mode: Enable full targets graph generation for project.',
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--add-py3-targets'],
- hook=core.yarg.SetConstValueHook('add_py_targets', True),
- help='Add Python 3 targets to project',
- group=CLionOptions.CLION_OPT_GROUP,
- ),
- core.yarg.ConfigConsumer('filters'),
- core.yarg.ConfigConsumer('lite_mode'),
- core.yarg.ConfigConsumer('remote_toolchain'),
- core.yarg.ConfigConsumer('remote_deploy_config'),
- core.yarg.ConfigConsumer('remote_repo_path'),
- core.yarg.ConfigConsumer('remote_build_path'),
- core.yarg.ConfigConsumer('remote_deploy_host'),
- core.yarg.ConfigConsumer('use_sync_server'),
- core.yarg.ConfigConsumer('content_root'),
- core.yarg.ConfigConsumer('strip_non_executable_target'),
- core.yarg.ConfigConsumer('full_targets'),
- core.yarg.ConfigConsumer('add_py_targets'),
- ]
-
- def postprocess2(self, params):
- if ' ' in params.project_title:
- raise core.yarg.ArgsValidatingException('Clion project title should not contain space symbol')
- if params.add_py_targets and params.full_targets:
- raise core.yarg.ArgsValidatingException('--add-py-targets must be used without --full-targets')
-
-
-class IdeaOptions(core.yarg.Options):
- IDEA_OPT_GROUP = core.yarg.Group('Idea project options', 0)
- IDE_PLUGIN_INTEGRATION_GROUP = core.yarg.Group('Integration wuth IDE plugin', 1)
-
- def __init__(self):
- self.idea_project_root = None
- self.local = False
- self.group_modules = None
- self.dry_run = False
- self.ymake_bin = None
- self.iml_in_project_root = False
- self.iml_keep_relative_paths = False
- self.idea_files_root = None
- self.project_name = None
- self.minimal = False
- self.directory_based = True
- self.omit_test_data = False
- self.with_content_root_modules = False
- self.external_content_root_modules = []
- self.generate_tests_run = False
- self.generate_tests_for_deps = False
- self.separate_tests_modules = False
- self.auto_exclude_symlinks = False
- self.exclude_dirs = []
- self.with_common_jvm_args_in_junit_template = False
- self.with_long_library_names = False
- self.copy_shared_index_config = False
- self.idea_jdk_version = None
- self.regenerate_with_project_update = False
- self.project_update_targets = []
- self.project_update_kind = None
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- ['-r', '--project-root'],
- help='IntelliJ IDEA project root path',
- hook=core.yarg.SetValueHook('idea_project_root'),
- group=IdeaOptions.IDEA_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['-P', '--project-output'],
- help='IntelliJ IDEA project root path. Please, use instead of -r',
- hook=core.yarg.SetValueHook('idea_project_root'),
- group=IdeaOptions.IDEA_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['-l', '--local'],
- help='Only recurse reachable projects are idea modules',
- hook=core.yarg.SetConstValueHook('local', True),
- group=IdeaOptions.IDEA_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--group-modules'],
- help='Group idea modules according to paths: (tree, flat)',
- hook=core.yarg.SetValueHook('group_modules', values=('tree', 'flat')),
- group=IdeaOptions.IDEA_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['-n', '--dry-run'],
- help='Emulate create project, but do nothing',
- hook=core.yarg.SetConstValueHook('dry_run', True),
- group=IdeaOptions.IDEA_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--ymake-bin'], help='Path to ymake binary', hook=core.yarg.SetValueHook('ymake_bin'), visible=False
- ),
- core.yarg.ArgConsumer(
- ['--iml-in-project-root'],
- help='Store ".iml" files in project root tree(stores in source root tree by default)',
- hook=core.yarg.SetConstValueHook('iml_in_project_root', True),
- ),
- core.yarg.ArgConsumer(
- ['--iml-keep-relative-paths'],
- help='Keep relative paths in ".iml" files (works with --iml-in-project-root)',
- hook=core.yarg.SetConstValueHook('iml_keep_relative_paths', True),
- ),
- core.yarg.ArgConsumer(
- ['--idea-files-root'],
- help='Root for .ipr and .iws files',
- hook=core.yarg.SetValueHook('idea_files_root'),
- ),
- core.yarg.ArgConsumer(
- ['--project-name'],
- help='Idea project name (.ipr and .iws file)',
- hook=core.yarg.SetValueHook('project_name'),
- ),
- core.yarg.ArgConsumer(
- ['--ascetic'],
- help='Create the minimum set of project settings',
- hook=core.yarg.SetConstValueHook('minimal', True),
- ),
- core.yarg.ArgConsumer(
- ['--directory-based'],
- help='Create project in actual (directory based) format',
- hook=core.yarg.SetConstValueHook('directory_based', True),
- visible=False,
- ),
- core.yarg.ArgConsumer(
- ['--omit-test-data'],
- help='Do not export test_data',
- hook=core.yarg.SetConstValueHook('omit_test_data', True),
- ),
- core.yarg.ArgConsumer(
- ['--with-content-root-modules'],
- help='Generate content root modules',
- hook=core.yarg.SetConstValueHook('with_content_root_modules', True),
- ),
- core.yarg.ArgConsumer(
- ['--external-content-root-module'],
- help='Add external content root modules',
- hook=core.yarg.SetAppendHook('external_content_root_modules'),
- ),
- core.yarg.ArgConsumer(
- ['--generate-junit-run-configurations'],
- help='Generate run configuration for junit tests',
- hook=core.yarg.SetConstValueHook('generate_tests_run', True),
- ),
- core.yarg.ArgConsumer(
- ['--generate-tests-for-dependencies'],
- help='Generate tests for PEERDIR dependencies',
- hook=core.yarg.SetConstValueHook('generate_tests_for_deps', True),
- ),
- core.yarg.ArgConsumer(
- ['--separate-tests-modules'],
- help='Do not merge tests modules with their own libraries',
- hook=core.yarg.SetConstValueHook('separate_tests_modules', True),
- ),
- core.yarg.ArgConsumer(
- ['--auto-exclude-symlinks'],
- help='Add all symlink-dirs in modules to exclude dirs',
- hook=core.yarg.SetConstValueHook('auto_exclude_symlinks', True),
- ),
- core.yarg.ArgConsumer(
- ['--exclude-dirs'],
- help='Exclude dirs with specific names from all modules',
- hook=core.yarg.SetAppendHook('exclude_dirs'),
- ),
- core.yarg.ArgConsumer(
- ['--with-common-jvm-args-in-junit-template'],
- help='Add common JVM_ARGS flags to default junit template',
- hook=core.yarg.SetConstValueHook('with_common_jvm_args_in_junit_template', True),
- ),
- core.yarg.ArgConsumer(
- ['--with-long-library-names'],
- help='Generate long library names',
- hook=core.yarg.SetConstValueHook('with_long_library_names', True),
- ),
- core.yarg.ArgConsumer(
- ['--copy-shared-index-config'],
- help='Copy project config for Shared Indexes if exist',
- hook=core.yarg.SetConstValueHook('copy_shared_index_config', True),
- ),
- core.yarg.ArgConsumer(
- ['--idea-jdk-version'],
- help='Project JDK version',
- hook=core.yarg.SetValueHook('idea_jdk_version'),
- ),
- core.yarg.ArgConsumer(
- ['-U', '--regenerate-with-project-update'],
- help='Run `ya project update` upon regeneration from Idea',
- group=IdeaOptions.IDE_PLUGIN_INTEGRATION_GROUP,
- hook=core.yarg.SetConstValueHook('regenerate_with_project_update', True),
- ),
- core.yarg.ArgConsumer(
- ['--project-update-targets'],
- help='Run `ya project update` for this dirs upon regeneration from Idea',
- hook=core.yarg.SetAppendHook('project_update_targets'),
- group=IdeaOptions.IDE_PLUGIN_INTEGRATION_GROUP,
- visible=HelpLevel.ADVANCED,
- ),
- core.yarg.ArgConsumer(
- ['--project-update-kind'],
- help='Type of a project to use in `ya project update` upon regernation from Idea',
- hook=core.yarg.SetValueHook('project_update_kind'),
- group=IdeaOptions.IDE_PLUGIN_INTEGRATION_GROUP,
- visible=HelpLevel.ADVANCED,
- ),
- core.yarg.ConfigConsumer('idea_project_root'),
- core.yarg.ConfigConsumer('local'),
- core.yarg.ConfigConsumer('group_modules'),
- core.yarg.ConfigConsumer('dry_run'),
- core.yarg.ConfigConsumer('iml_in_project_root'),
- core.yarg.ConfigConsumer('iml_keep_relative_paths'),
- core.yarg.ConfigConsumer('idea_files_root'),
- core.yarg.ConfigConsumer('project_name'),
- core.yarg.ConfigConsumer('minimal'),
- core.yarg.ConfigConsumer('directory_based'),
- core.yarg.ConfigConsumer('omit_test_data'),
- core.yarg.ConfigConsumer('with_content_root_modules'),
- core.yarg.ConfigConsumer('external_content_root_modules'),
- core.yarg.ConfigConsumer('generate_tests_run'),
- core.yarg.ConfigConsumer('generate_tests_for_deps'),
- core.yarg.ConfigConsumer('separate_tests_modules'),
- core.yarg.ConfigConsumer('auto_exclude_symlinks'),
- core.yarg.ConfigConsumer('exclude_dirs'),
- core.yarg.ConfigConsumer('with_common_jvm_args_in_junit_template'),
- core.yarg.ConfigConsumer('with_long_library_names'),
- core.yarg.ConfigConsumer('copy_shared_index_config'),
- core.yarg.ConfigConsumer('idea_jdk_version'),
- core.yarg.ConfigConsumer('regenarate_with_project_update'),
- core.yarg.ConfigConsumer('project_update_targets'),
- core.yarg.ConfigConsumer('project_update_kind'),
- ]
-
- def postprocess(self):
- if self.idea_project_root is None:
- raise core.yarg.ArgsValidatingException('Idea project root(-r, --project-root) must be specified.')
-
- self.idea_project_root = os.path.abspath(self.idea_project_root)
-
- if self.iml_keep_relative_paths and not self.iml_in_project_root:
- raise core.yarg.ArgsValidatingException(
- '--iml-keep-relative-paths can be used only with --iml-in-project-root'
- )
-
- if self.generate_tests_run and not self.directory_based:
- raise core.yarg.ArgsValidatingException(
- 'run configurations may be generated only for directory-based project'
- )
-
- for p in self.exclude_dirs:
- if os.path.isabs(p):
- raise core.yarg.ArgsValidatingException('Absolute paths are not allowed in --exclude-dirs')
-
-
-class GradleOptions(core.yarg.Options):
- GRADLE_OPT_GROUP = core.yarg.Group('Gradle project options', 0)
-
- def __init__(self):
- self.gradle_name = None
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- ['--gradle-name'],
- help='Set project name manually',
- hook=core.yarg.SetValueHook('gradle_name'),
- group=GradleOptions.GRADLE_OPT_GROUP,
- )
- ]
-
-
-class PycharmOptions(core.yarg.Options):
- PYCHARM_OPT_GROUP = core.yarg.Group('Pycharm project options', 0)
- PYTHON_WRAPPER_NAME = 'pycharm_python_wrapper'
-
- def __init__(self):
- self.only_generate_wrapper = False
- self.wrapper_name = PycharmOptions.PYTHON_WRAPPER_NAME
- self.list_ide = False
- self.ide_version = None
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- ['--only-generate-wrapper'],
- help="Don not generate Pycharm project, only wrappers",
- hook=core.yarg.SetConstValueHook('only_generate_wrapper', True),
- group=PycharmOptions.PYCHARM_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--wrapper-name'],
- help='Name of generated python wrapper. Use `python` for manual adding wrapper as Python SDK.',
- hook=core.yarg.SetValueHook('wrapper_name'),
- group=PycharmOptions.PYCHARM_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--list-ide'],
- help='List available JB IDE for patching SDK list.',
- hook=core.yarg.SetConstValueHook('list_ide', True),
- group=PycharmOptions.PYCHARM_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--ide-version'],
- help='Change IDE version for patching SDK list. Available IDE: {}'.format(
- ", ".join(ide.pycharm.find_available_ide())
- ),
- hook=core.yarg.SetValueHook('ide_version'),
- group=PycharmOptions.PYCHARM_OPT_GROUP,
- ),
- ]
-
- def postprocess(self):
- if PycharmOptions.PYTHON_WRAPPER_NAME != self.wrapper_name and not self.only_generate_wrapper:
- raise core.yarg.ArgsValidatingException(
- "Custom wrapper name can be used with option --only-generate-wrapper"
- )
-
-
-MSVS_OPTS = ide.msvs.MSVS_OPTS + [ide.ide_common.YaExtraArgsOptions(), core.common_opts.YaBin3Options()]
-
-
-def gen_msvs_solution(params):
- impl = ide.msvs_lite if params.lite else ide.msvs
- return impl.gen_msvs_solution(params)
-
-
-def get_description(text, ref_name):
- if app_config.in_house:
- ref = {
- "c": "https://docs.yandex-team.ru/ya-make/usage/ya_ide/vscode#c",
- "golang": "https://docs.yandex-team.ru/ya-make/usage/ya_ide/vscode#golang",
- "multi": "https://docs.yandex-team.ru/ya-make/usage/ya_ide/vscode#multi",
- "python": "https://docs.yandex-team.ru/ya-make/usage/ya_ide/vscode#python",
- "typescript": "https://docs.yandex-team.ru/ya-make/usage/ya_ide/vscode#typescript",
- }[ref_name]
- return "{}\nDocs: [[c:dark-cyan]]{}[[rst]]".format(text, ref)
- else:
- return text
-
-
-class IdeYaHandler(core.yarg.CompositeHandler):
- description = 'Generate project for IDE'
-
- def __init__(self):
- core.yarg.CompositeHandler.__init__(self, description=self.description)
- self['msvs'] = core.yarg.OptsHandler(
- action=app.execute(gen_msvs_solution),
- description='[[imp]]ya ide msvs[[rst]] is deprecated, please use clangd-based tooling instead',
- opts=MSVS_OPTS,
- examples=[
- core.yarg.UsageExample(
- '{prefix} util/generic util/datetime',
- 'Generate solution for util/generic, util/datetime and all their dependencies',
- ),
- core.yarg.UsageExample('{prefix} -P Output', 'Generate solution in Output directory'),
- core.yarg.UsageExample('{prefix} -T my_solution', 'Generate solution titled my_solution.sln'),
- ],
- visible=(pm.my_platform() == 'win32'),
- )
- self['clion'] = core.yarg.OptsHandler(
- action=app.execute(ide.clion2016.do_clion),
- description='[[imp]]ya ide clion[[rst]] is deprecated, please use clangd-based tooling instead',
- opts=ide.ide_common.ide_via_ya_make_opts()
- + [
- CLionOptions(),
- TidyOptions(),
- core.common_opts.YaBin3Options(),
- ],
- )
-
- self['idea'] = core.yarg.OptsHandler(
- action=app.execute(ide.idea.do_idea),
- description='Generate stub for IntelliJ IDEA',
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.ide_common.IdeYaMakeOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- IdeaOptions(),
- core.common_opts.OutputStyleOptions(),
- core.common_opts.CrossCompilationOptions(),
- core.common_opts.PrintStatisticsOptions(),
- build.build_opts.ContinueOnFailOptions(),
- build.build_opts.YMakeDebugOptions(),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.DistCacheOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.IgnoreRecursesOptions(),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.ExecutorOptions(),
- build.build_opts.CustomFetcherOptions(),
- build.build_opts.SandboxAuthOptions(),
- core.common_opts.YaBin3Options(),
- ],
- unknown_args_as_free=True,
- )
- ide_gradle_opts = ide.ide_common.ide_minimal_opts(targets_free=True) + [
- ide.ide_common.YaExtraArgsOptions(),
- GradleOptions(),
- core.yarg.ShowHelpOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.CustomFetcherOptions(),
- build.build_opts.SandboxAuthOptions(),
- core.common_opts.CrossCompilationOptions(),
- build.build_opts.ToolsOptions(),
- build.build_opts.BuildTypeOptions('release'),
- build.build_opts.JavaSpecificOptions(),
- ]
- if six.PY2:
- self['gradle'] = core.yarg.OptsHandler(
- action=app.execute(
- lambda *a, **k: None,
- handler_python_major_version=3,
- ),
- description='Generate gradle for project',
- opts=ide_gradle_opts,
- visible=False,
- )
- if six.PY3:
- self['gradle'] = core.yarg.OptsHandler(
- action=app.execute(
- ide.gradle.do_gradle,
- handler_python_major_version=3,
- ),
- description='Generate gradle for project',
- opts=ide_gradle_opts,
- visible=False,
- )
- self['qt'] = core.yarg.OptsHandler(
- action=app.execute(self._choose_qt_handler),
- description='[[imp]]ya ide qt[[rst]] is deprecated, please use clangd-based tooling instead',
- opts=ide.qt.QT_OPTS + [core.common_opts.YaBin3Options()],
- )
- self['goland'] = core.yarg.OptsHandler(
- action=app.execute(ide.goland.do_goland),
- description='Generate stub for Goland',
- opts=ide.ide_common.ide_via_ya_make_opts()
- + [
- ide.goland.GolandOptions(),
- core.common_opts.YaBin3Options(),
- ],
- )
- self['pycharm'] = core.yarg.OptsHandler(
- action=app.execute(ide.pycharm.do_pycharm),
- description='Generate PyCharm project.',
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- PycharmOptions(),
- ide.ide_common.IdeYaMakeOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- build.build_opts.DistCacheOptions(),
- core.common_opts.YaBin3Options(),
- ],
- visible=(pm.my_platform() != 'win32'),
- )
- self['vscode-clangd'] = core.yarg.OptsHandler(
- action=app.execute(ide.vscode_clangd.gen_vscode_workspace),
- description=get_description('Generate VSCode clangd C++ project.', ref_name='c'),
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.vscode_clangd.VSCodeClangdOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- bcd.CompilationDatabaseOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.OutputOptions(),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.ArcPrefetchOptions(),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.ToolsOptions(),
- core.common_opts.YaBin3Options(),
- ],
- visible=(pm.my_platform() != 'win32'),
- )
- self['vscode-go'] = core.yarg.OptsHandler(
- action=app.execute(ide.vscode_go.gen_vscode_workspace),
- description=get_description('Generate VSCode Go project.', ref_name='golang'),
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.vscode_go.VSCodeGoOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.ArcPrefetchOptions(),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.ToolsOptions(),
- core.common_opts.YaBin3Options(),
- ],
- )
- self['vscode-py'] = core.yarg.OptsHandler(
- action=app.execute(ide.vscode_py.gen_vscode_workspace),
- description=get_description('Generate VSCode Python project.', ref_name='python'),
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.vscode_py.VSCodePyOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.ArcPrefetchOptions(),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.ToolsOptions(),
- core.common_opts.YaBin3Options(),
- ],
- visible=(pm.my_platform() != 'win32'),
- )
- self['vscode-ts'] = core.yarg.OptsHandler(
- action=app.execute(ide.vscode_py.gen_vscode_workspace),
- description=get_description('Generate VSCode TypeScript project.', ref_name='typescript'),
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.vscode_py.VSCodePyOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.ArcPrefetchOptions(),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.ToolsOptions(),
- core.common_opts.YaBin3Options(),
- ],
- visible=(pm.my_platform() != 'win32'),
- )
- self['vscode'] = core.yarg.OptsHandler(
- action=app.execute(ide.vscode_all.gen_vscode_workspace),
- description=get_description('Generate VSCode multi-language project.', ref_name='multi'),
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.vscode.opts.VSCodeAllOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- bcd.CompilationDatabaseOptions(),
- build.build_opts.ArcPrefetchOptions(prefetch=True, visible=False),
- build.build_opts.FlagsOptions(),
- build.build_opts.OutputOptions(),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.ToolsOptions(),
- core.common_opts.YaBin3Options(),
- ],
- )
- self['vscode-ts'] = core.yarg.OptsHandler(
- action=app.execute(ide.vscode_ts.gen_vscode_workspace),
- description=get_description('Generate VSCode TypeScript project.', ref_name='typescript'),
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- ide.vscode_ts.VSCodeTypeScriptOptions(),
- core.common_opts.YaBin3Options(),
- ],
- visible=False, # FIXME: remove when ready for public release
- )
- self['venv'] = core.yarg.OptsHandler(
- action=app.execute(ide.venv.do_venv),
- description='Create or update python venv',
- opts=ide.ide_common.ide_minimal_opts(targets_free=True)
- + [
- build.build_opts.BuildTypeOptions('release'),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- build.build_opts.ExecutorOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.IgnoreRecursesOptions(),
- build.build_opts.RebuildOptions(),
- core.common_opts.BeVerboseOptions(),
- core.common_opts.CrossCompilationOptions(),
- ide.ide_common.YaExtraArgsOptions(),
- ide.venv.VenvOptions(),
- core.common_opts.YaBin3Options(),
- ],
- visible=(pm.my_platform() != 'win32'),
- )
- if app_config.in_house:
- self['fix-jb-fsnotifier'] = core.yarg.OptsHandler(
- action=app.execute(devtools.ya.ide.fsnotifier.fix_fsnotifier),
- description='Replace fsnotifier for JB IDEs.',
- opts=[
- devtools.ya.ide.fsnotifier.FixFsNotifierOptions(),
- core.common_opts.ShowHelpOptions(),
- core.common_opts.DumpDebugOptions(),
- core.common_opts.AuthOptions(),
- core.common_opts.YaBin3Options(),
- ],
- )
-
- @staticmethod
- def _choose_qt_handler(params):
- if params.run:
- ide.qt.run_qtcreator(params)
- elif params.remote_host:
- ide.remote_ide_qt.generate_remote_project(params)
- else:
- ide.qt.gen_qt_project(params)
diff --git a/devtools/ya/handlers/ide/ya.make b/devtools/ya/handlers/ide/ya.make
deleted file mode 100644
index c0802be535..0000000000
--- a/devtools/ya/handlers/ide/ya.make
+++ /dev/null
@@ -1,31 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.ide
- __init__.py
-)
-
-PEERDIR(
- devtools/ya/app
- devtools/ya/build/build_opts
- devtools/ya/core
- devtools/ya/core/config
- devtools/ya/core/yarg
- devtools/ya/ide
- devtools/ya/yalibrary/platform_matcher
-)
-
-IF (NOT YA_OPENSOURCE)
- PEERDIR(
- devtools/ya/ide/fsnotifier
- )
-ENDIF()
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/java/__init__.py b/devtools/ya/handlers/java/__init__.py
deleted file mode 100644
index 5c4c7c44e3..0000000000
--- a/devtools/ya/handlers/java/__init__.py
+++ /dev/null
@@ -1,49 +0,0 @@
-from __future__ import absolute_import
-from . import helpers
-
-import app
-import core.yarg
-from build import build_opts
-import devtools.ya.test.opts as test_opts
-
-
-def default_options():
- return [
- build_opts.BuildTargetsOptions(with_free=True),
- build_opts.BeVerboseOptions(),
- build_opts.ShowHelpOptions(),
- build_opts.YMakeDebugOptions(),
- build_opts.YMakeBinOptions(),
- build_opts.YMakeRetryOptions(),
- build_opts.FlagsOptions(),
- ]
-
-
-class JavaYaHandler(core.yarg.CompositeHandler):
- def __init__(self):
- super(JavaYaHandler, self).__init__(description='Java build helpers')
-
- self['dependency-tree'] = core.yarg.OptsHandler(
- action=app.execute(action=helpers.print_ymake_dep_tree),
- description='Print dependency tree',
- opts=default_options() + [build_opts.BuildTypeOptions('release')],
- visible=True,
- )
- self['classpath'] = core.yarg.OptsHandler(
- action=app.execute(action=helpers.print_classpath),
- description='Print classpath',
- opts=default_options() + [build_opts.BuildTypeOptions('release')],
- visible=True,
- )
- self['test-classpath'] = core.yarg.OptsHandler(
- action=app.execute(action=helpers.print_test_classpath),
- description='Print run classpath for test module',
- opts=default_options() + [test_opts.RunTestOptions()],
- visible=True,
- )
- self['find-all-paths'] = core.yarg.OptsHandler(
- action=app.execute(action=helpers.find_all_paths),
- description='Find all PEERDIR paths of between two targets',
- opts=default_options() + [build_opts.FindPathOptions()],
- visible=True,
- )
diff --git a/devtools/ya/handlers/java/helpers.py b/devtools/ya/handlers/java/helpers.py
deleted file mode 100644
index 8a5a9cd35a..0000000000
--- a/devtools/ya/handlers/java/helpers.py
+++ /dev/null
@@ -1,250 +0,0 @@
-from __future__ import absolute_import
-from __future__ import print_function
-import time
-import sys
-
-import core.yarg
-import build.build_opts as build_opts
-import build.graph as build_graph
-import build.ya_make as ya_make
-from build.build_facade import gen_managed_dep_tree, gen_targets_classpath
-from exts.tmp import temp_dir
-import yalibrary.formatter as yaformatter
-
-from jbuild.gen import base
-from jbuild.gen import consts
-from six.moves import map
-
-
-def fix_windows(path):
- return path.replace('\\', '/')
-
-
-def get_java_ctx_with_tests(opts):
- jopts = core.yarg.merge_opts(build_opts.ya_make_options(free_build_targets=True)).params()
- jopts.dump_sources = True
- jopts.create_symlinks = False
- jopts.__dict__.update(opts.__dict__)
- jopts.debug_options.append('x')
- jopts.flags["YA_IDE_IDEA"] = "yes"
-
- import app_ctx # XXX: via args
-
- _, tests, _, ctx, _ = build_graph.build_graph_and_tests(
- jopts, check=True, ev_listener=ya_make.get_print_listener(jopts, app_ctx.display), display=app_ctx.display
- )
-
- return ctx, tests
-
-
-def get_java_ctx(opts):
- ctx, _ = get_java_ctx_with_tests(opts)
- return ctx
-
-
-NORMAL = '[[imp]]{path}[[rst]]'
-EXCLUDED = '[[unimp]]{path} (omitted because of [[c:red]]EXCLUDE[[unimp]])[[rst]]'
-CONFLICT = '[[unimp]]{path} (omitted because of [[c:yellow]]conflict with {conflict_resolution}[[unimp]])[[rst]]'
-MANAGED = '[[imp]]{path}[[unimp]] (replaced from [[c:blue]]{orig}[[unimp]] because of [[c:blue]]DEPENDENCY_MANAGEMENT[[unimp]])[[rst]]'
-IGNORED = '[[unimp]]{path} (omitted as contrib proxy library)[[rst]]'
-DUPLICATE = '[[unimp]]{path} (*)[[rst]]'
-DIRECT_MANAGED = '[[imp]]{path}[[unimp]] (replaced from [[c:blue]]unversioned[[unimp]] because of [[c:blue]]DEPENDENCY_MANAGEMENT[[unimp]])[[rst]]'
-DIRECT_DEFAULT = (
- '[[imp]]{path}[[unimp]] (replaced from [[c:magenta]]unversioned[[unimp]] to [[c:magenta]]default[[unimp]])[[rst]]'
-)
-
-
-def arrow_str(length):
- s = ''
-
- if length:
- s += '| ' * (length - 1) + '|-->'
-
- return '[[unimp]]' + s + '[[rst]]'
-
-
-def node_str(dep, excluded=False, expanded_above=False):
- if expanded_above:
- return DUPLICATE.format(path=dep.path)
-
- elif excluded:
- return EXCLUDED.format(path=dep.path)
-
- elif dep.omitted_for_conflict:
- return CONFLICT.format(path=dep.path, conflict_resolution=base.basename_unix_path(dep.conflict_resolution_path))
-
- elif dep.replaced_for_dependency_management:
- return MANAGED.format(path=dep.path, orig=base.basename_unix_path(dep.old_path_for_dependency_management))
-
- elif dep.is_managed:
- return DIRECT_MANAGED.format(path=dep.path)
-
- elif dep.is_default:
- return DIRECT_DEFAULT.format(path=dep.path)
-
- else:
- return NORMAL.format(path=dep.path)
-
-
-def graph_line(depth, dep, excluded=False, expanded_above=False):
- return arrow_str(depth) + node_str(dep, excluded=excluded, expanded_above=expanded_above)
-
-
-def raise_not_a_java_path(path):
- raise WrongInputException('{} is not a java module'.format(path))
-
-
-class WrongInputException(Exception):
- mute = True
-
-
-def print_ymake_dep_tree(opts):
- with temp_dir() as tmp:
- res, evlog = gen_managed_dep_tree(
- build_root=tmp,
- build_type=opts.build_type,
- build_targets=opts.abs_targets,
- debug_options=opts.debug_options,
- flags=opts.flags,
- ymake_bin=opts.ymake_bin,
- )
-
- import app_ctx
-
- ev_listener = ya_make.get_print_listener(opts, app_ctx.display)
- for ev in evlog:
- ev_listener(ev)
-
- formatter = yaformatter.new_formatter(is_tty=sys.stdout.isatty())
- print(formatter.format_message(res.stdout))
- return any('Type' in ev and ev['Type'] == 'Error' for ev in evlog)
-
-
-def print_classpath(opts):
- opts.flags['TRAVERSE_RECURSE'] = 'no'
- opts.flags['TRAVERSE_RECURS_FOR_TEST'] = 'no'
- with temp_dir() as tmp:
- res, evlog = gen_targets_classpath(
- build_root=tmp,
- build_type=opts.build_type,
- build_targets=opts.abs_targets,
- debug_options=opts.debug_options,
- flags=opts.flags,
- ymake_bin=opts.ymake_bin,
- )
-
- import app_ctx
-
- ev_listener = ya_make.get_print_listener(opts, app_ctx.display)
- for ev in evlog:
- ev_listener(ev)
-
- formatter = yaformatter.new_formatter(is_tty=sys.stdout.isatty())
- print(formatter.format_message(res.stdout))
- return any('Type' in ev and ev['Type'] == 'Error' for ev in evlog)
-
-
-def print_test_classpath(opts):
- opts.flags['IGNORE_JAVA_DEPENDENCIES_CONFIGURATION'] = 'yes'
- opts.run_tests = 3
- ctx, tests = get_java_ctx_with_tests(opts)
- remaining_roots = set(map(fix_windows, opts.rel_targets))
-
- formatter = yaformatter.new_formatter(is_tty=sys.stdout.isatty())
- for test in tests:
- if test.project_path not in remaining_roots:
- continue
- classpath = test.get_classpath()
- if classpath is not None:
- classpath = [item[len(consts.BUILD_ROOT) + 1 :] for item in classpath]
- print(formatter.format_message("[[imp]]{}[[rst]]:\n\t{}".format(test.project_path, '\n\t'.join(classpath))))
- remaining_roots.discard(test.project_path)
-
- for path in remaining_roots:
- print(formatter.format_message("[[imp]]{}[[rst]] is not a Java test".format(path)))
-
-
-def iter_all_routes(ctx, src_path, dest_path, route_callback, max_dist=None):
- achievable = {}
-
- def dest_achievable_from(path):
- if path not in achievable:
- achievable[path] = path == dest_path or any(dest_achievable_from(d.path) for d in ctx.by_path[path].deps)
-
- return achievable[path]
-
- stack = []
-
- def visit(path):
- stack.append(path)
-
- if path == dest_path:
- route_callback(stack)
- stack.pop()
- return # no loops
-
- if max_dist is not None and len(stack) - 1 >= max_dist:
- stack.pop()
- return
-
- for dep in ctx.by_path[path].deps:
- dep_path = dep.path
-
- if dest_achievable_from(dep_path):
- visit(dep_path)
-
- stack.pop()
-
- if dest_achievable_from(src_path):
- visit(src_path)
-
-
-def print_route(route):
- dist = len(route) - 1
-
- if dist < 4:
- s = '[[c:green]]{}[[rst]]: '.format(dist)
- elif dist < 7:
- s = '[[c:yellow]]{}[[rst]]: '.format(dist)
- else:
- s = '[[c:red]]{}[[rst]]: '.format(dist)
-
- s += '\n[[unimp]]-->'.join(['[[imp]]' + p for p in route])
-
- return s
-
-
-def find_all_paths(opts):
- start_time = time.time()
- opts.flags['IGNORE_JAVA_DEPENDENCIES_CONFIGURATION'] = 'yes'
- ctx = get_java_ctx(opts)
-
- if len(opts.rel_targets) != 2:
- raise WrongInputException(
- 'Expected: <from> <to>\n' + 'Got: {}'.format(' '.join(['<' + p + '>' for p in opts.rel_targets]))
- )
-
- src_path = fix_windows(opts.rel_targets[0])
- dest_path = fix_windows(opts.rel_targets[1])
-
- if src_path not in ctx.by_path:
- raise_not_a_java_path(src_path)
-
- if dest_path not in ctx.by_path:
- raise_not_a_java_path(dest_path)
-
- count = [0]
-
- formatter = yaformatter.new_formatter(is_tty=sys.stdout.isatty())
-
- def route_callback(route):
- print(formatter.format_message(print_route(route)))
- count[0] += 1
-
- try:
- iter_all_routes(ctx, src_path, dest_path, route_callback, max_dist=opts.max_dist)
- finally:
- report = 'Found [[c:{}]]{}[[rst]] paths in [[c:yellow]]{}[[rst]] seconds'.format(
- 'green' if count[0] else 'red', count[0], time.time() - start_time
- )
- print(formatter.format_message(report))
diff --git a/devtools/ya/handlers/java/ya.make b/devtools/ya/handlers/java/ya.make
deleted file mode 100644
index 96140c1bf1..0000000000
--- a/devtools/ya/handlers/java/ya.make
+++ /dev/null
@@ -1,24 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.java
- __init__.py
- helpers.py
-)
-
-PEERDIR(
- devtools/ya/app
- devtools/ya/core/yarg
- devtools/ya/build
- devtools/ya/build/build_opts
- devtools/ya/test/opts
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/make/__init__.py b/devtools/ya/handlers/make/__init__.py
deleted file mode 100644
index e9d774f562..0000000000
--- a/devtools/ya/handlers/make/__init__.py
+++ /dev/null
@@ -1,32 +0,0 @@
-from __future__ import absolute_import
-from build.build_handler import do_ya_make
-from build.build_opts import ya_make_options
-
-import core.yarg
-
-import app
-
-
-class MakeYaHandler(core.yarg.OptsHandler):
- description = 'Build and run tests\nTo see more help use [[imp]]-hh[[rst]]/[[imp]]-hhh[[rst]]'
-
- def __init__(self):
- core.yarg.OptsHandler.__init__(
- self,
- action=app.execute(action=do_ya_make),
- examples=[
- core.yarg.UsageExample('{prefix} -r', 'Build current directory in release mode', good_looking=100),
- core.yarg.UsageExample(
- '{prefix} -t -j16 library', 'Build and test library with 16 threads', good_looking=99
- ),
- core.yarg.UsageExample(
- '{prefix} --checkout -j0', 'Checkout absent directories without build', good_looking=98
- ),
- ],
- description=self.description,
- opts=ya_make_options(
- free_build_targets=True,
- strip_idle_build_results=True,
- ),
- visible=True,
- )
diff --git a/devtools/ya/handlers/make/ya.make b/devtools/ya/handlers/make/ya.make
deleted file mode 100644
index 275750e1f7..0000000000
--- a/devtools/ya/handlers/make/ya.make
+++ /dev/null
@@ -1,26 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.make
- __init__.py
-)
-
-PEERDIR(
- devtools/ya/app
- devtools/ya/build
- devtools/ya/build/build_opts
- devtools/ya/core/yarg
-)
-
-END()
-
-RECURSE(
- tests
-)
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/maven_import/__init__.py b/devtools/ya/handlers/maven_import/__init__.py
deleted file mode 100644
index f1296c0c83..0000000000
--- a/devtools/ya/handlers/maven_import/__init__.py
+++ /dev/null
@@ -1,31 +0,0 @@
-from __future__ import absolute_import
-import core.yarg
-import jbuild.maven.maven_import as mi
-import core.common_opts as common_opts
-import build.build_opts as build_opts
-import app
-import app_config
-
-from core.yarg.help_level import HelpLevel
-
-
-class MavenImportYaHandler(core.yarg.OptsHandler):
- description = 'Import specified artifact from remote maven repository to arcadia/contrib/java.'
- visible = app_config.in_house
-
- def __init__(self):
- super(MavenImportYaHandler, self).__init__(
- action=app.execute(mi.do_import),
- opts=[
- build_opts.MavenImportOptions(visible=HelpLevel.BASIC),
- build_opts.BuildThreadsOptions(build_threads=None),
- common_opts.OutputStyleOptions(),
- common_opts.TransportOptions(),
- common_opts.ShowHelpOptions(),
- build_opts.YMakeBinOptions(),
- build_opts.CustomFetcherOptions(),
- build_opts.ToolsOptions(),
- ]
- + build_opts.checkout_options(),
- description=self.description,
- )
diff --git a/devtools/ya/handlers/maven_import/ya.make b/devtools/ya/handlers/maven_import/ya.make
deleted file mode 100644
index c2e16c6d35..0000000000
--- a/devtools/ya/handlers/maven_import/ya.make
+++ /dev/null
@@ -1,28 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.maven_import
- __init__.py
-)
-
-PEERDIR(
- devtools/ya/app
- devtools/ya/build/build_opts
- devtools/ya/core
- devtools/ya/core/yarg
- devtools/ya/jbuild
- devtools/ya/yalibrary/tools
-)
-
-END()
-
-RECURSE(
- tests
-)
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/package/__init__.py b/devtools/ya/handlers/package/__init__.py
deleted file mode 100644
index adaf2980f1..0000000000
--- a/devtools/ya/handlers/package/__init__.py
+++ /dev/null
@@ -1,88 +0,0 @@
-from __future__ import absolute_import
-import logging
-
-import app
-import core.yarg
-import core.common_opts
-import build.ya_make
-import build.targets_deref
-import build.build_opts
-import devtools.ya.test.opts as test_opts
-
-import package.docker
-import package.packager
-import devtools.ya.handlers.package.opts as package_opts
-from core.yarg.help_level import HelpLevel
-
-logger = logging.getLogger(__name__)
-
-
-class PackageYaHandler(core.yarg.OptsHandler):
- description = """Build package using json package description in the release build type by default.
-For more info see https://docs.yandex-team.ru/ya-make/usage/ya_package"""
-
- def __init__(self):
- super(PackageYaHandler, self).__init__(
- action=app.execute(package.packager.do_package, respawn=app.RespawnType.OPTIONAL),
- description=self.description,
- examples=[
- core.yarg.UsageExample(
- cmd='{prefix} <path to json description>',
- description='Create tarball package from json description',
- )
- ],
- opts=[
- package_opts.PackageOperationalOptions(),
- package_opts.PackageCustomizableOptions(),
- package_opts.InterimOptions(),
- core.common_opts.LogFileOptions(),
- core.common_opts.EventLogFileOptions(),
- build.build_opts.BuildTypeOptions('release'),
- build.build_opts.BuildThreadsOptions(build_threads=None),
- core.common_opts.CrossCompilationOptions(),
- build.build_opts.ArcPrefetchOptions(),
- build.build_opts.ContentUidsOptions(),
- build.build_opts.KeepTempsOptions(),
- build.build_opts.RebuildOptions(),
- build.build_opts.StrictInputsOptions(),
- build.build_opts.DumpReportOptions(),
- build.build_opts.OutputOptions(),
- build.build_opts.AuthOptions(),
- build.build_opts.YMakeDumpGraphOptions(),
- build.build_opts.YMakeDebugOptions(),
- build.build_opts.YMakeBinOptions(),
- build.build_opts.YMakeRetryOptions(),
- build.build_opts.ExecutorOptions(),
- build.build_opts.ForceDependsOptions(),
- build.build_opts.IgnoreRecursesOptions(),
- core.common_opts.CustomSourceRootOptions(),
- core.common_opts.CustomBuildRootOptions(),
- core.common_opts.ShowHelpOptions(),
- core.common_opts.BeVerboseOptions(),
- core.common_opts.HtmlDisplayOptions(),
- core.common_opts.CommonUploadOptions(),
- build.build_opts.SandboxUploadOptions(ssh_key_option_name="--ssh-key", visible=HelpLevel.BASIC),
- build.build_opts.MDSUploadOptions(visible=HelpLevel.BASIC),
- core.common_opts.TransportOptions(),
- build.build_opts.CustomFetcherOptions(),
- build.build_opts.DistCacheOptions(),
- build.build_opts.FlagsOptions(),
- build.build_opts.PGOOptions(),
- test_opts.RunTestOptions(),
- test_opts.DebuggingOptions(),
- # strip_idle_build_results must be False to avoid removal of build nodes which are
- # reachable due RECURSE and used in package, but not required for tests
- test_opts.DepsOptions(strip_idle_build_results=False),
- test_opts.FileReportsOptions(),
- test_opts.FilteringOptions(test_size_filters=None),
- test_opts.PytestOptions(),
- test_opts.JUnitOptions(),
- test_opts.RuntimeEnvironOptions(),
- test_opts.TestToolOptions(),
- test_opts.UidCalculationOptions(cache_tests=False),
- core.common_opts.YaBin3Options(),
- ]
- + build.build_opts.distbs_options()
- + build.build_opts.checkout_options()
- + build.build_opts.svn_checkout_options(),
- )
diff --git a/devtools/ya/handlers/package/opts/__init__.py b/devtools/ya/handlers/package/opts/__init__.py
deleted file mode 100644
index e65532ef6f..0000000000
--- a/devtools/ya/handlers/package/opts/__init__.py
+++ /dev/null
@@ -1,691 +0,0 @@
-import collections
-import logging
-
-import build.build_opts
-import core.yarg
-import devtools.ya.test.opts as test_opts
-from devtools.ya.package import const
-
-logger = logging.getLogger(__name__)
-
-
-COMMON_SUBGROUP = core.yarg.Group('Common', 1)
-TAR_SUBGROUP = core.yarg.Group('Tar', 2)
-DEB_SUBGROUP = core.yarg.Group('Debian', 3)
-DOCKER_SUBGROUP = core.yarg.Group('Docker', 4)
-AAR_SUBGROUP = core.yarg.Group('Aar', 5)
-RPM_SUBGROUP = core.yarg.Group('Rpm', 6)
-NPM_SUBGROUP = core.yarg.Group('Npm', 7)
-PYTHON_WHEEL_SUBGROUP = core.yarg.Group('Python wheel', 8)
-
-
-class PackageOperationalOptions(core.yarg.Options):
- def __init__(self):
- self.artifactory_password_path = None
- self.build_debian_scripts = False
- self.build_only = False
- self.change_log = None
- self.cleanup = True
- self.codec = None
- self.convert = None
- self.custom_data_root = None
- self.custom_tests_data_root = None
- self.debian_distribution = 'unstable'
- self.debian_upload_token = None # please, do not remove, we really need it in opensource nebius ya
- self.docker_no_cache = False
- self.docker_push_image = False
- self.docker_remote_image_version = None
- self.docker_use_remote_cache = False
- self.dump_build_targets = None
- self.dump_inputs = None
- self.ignore_fail_tests = False
- self.list_codecs = False
- self.nanny_release = None
- self.package_output = None
- self.packages = []
- self.publish_to = {}
- self.raw_package_path = None
- self.run_long_tests = False
- self.sandbox_download_protocols = []
- self.upload = False
- self.wheel_access_key_path = None
- self.wheel_secret_key_path = None
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- names=['--publish-to'],
- help='Publish package to the specified dist',
- hook=core.yarg.DictPutHook('publish_to'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--build-only'],
- hook=core.yarg.SetConstValueHook('build_only', True),
- visible=False,
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--change-log'],
- help='Change log text or path to the existing changelog file',
- hook=core.yarg.SetValueHook('change_log'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--new'],
- help='Use new ya package json format',
- hook=core.yarg.SetConstValueHook('convert', False),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--old'],
- help='Use old ya package json format',
- hook=core.yarg.SetConstValueHook('convert', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--tests-data-root'],
- help="Custom location for arcadia_tests_data dir, defaults to <source root>/../arcadia_tests_data",
- hook=core.yarg.SetValueHook('custom_tests_data_root'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--data-root'],
- help="Custom location for data dir, defaults to <source root>/../data",
- hook=core.yarg.SetValueHook('custom_data_root'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--artifactory-password-path'],
- help='Path to file with artifactory password',
- hook=core.yarg.SetValueHook('artifactory_password_path'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.EnvConsumer(
- 'YA_ARTIFACTORY_PASSWORD_PATH',
- help='Path to file with artifactory password',
- hook=core.yarg.SetValueHook('artifactory_password_path'),
- ),
- core.yarg.ArgConsumer(
- names=['--dump-arcadia-inputs'],
- help='Only dump inputs, do not build package',
- hook=core.yarg.SetValueHook('dump_inputs'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--ignore-fail-tests'],
- help='Create package, no matter tests failed or not',
- hook=core.yarg.SetConstValueHook('ignore_fail_tests', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--codec'],
- help='Codec name for uc compression',
- hook=core.yarg.SetValueHook('codec'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--codecs-list'],
- help='Show available codecs for --uc',
- hook=core.yarg.SetConstValueHook('list_codecs', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- ["-O", "--package-output"],
- help="Specifies directory for package output",
- hook=core.yarg.SetValueHook('package_output'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.FreeArgConsumer(
- help='Package description file name(s)',
- hook=core.yarg.SetValueHook(name='packages'),
- ),
- core.yarg.ArgConsumer(
- ['--sandbox-download-protocol'],
- help='Sandbox download protocols comma-separated (default: http,http_tgz)',
- hook=core.yarg.SetValueHook(
- 'sandbox_download_protocols', transform=lambda val: [_f for _f in val.split(",") if _f]
- ),
- visible=False,
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--wheel-repo-access-key'],
- help='Path to access key for wheel repository',
- hook=core.yarg.SetValueHook('wheel_access_key_path'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=PYTHON_WHEEL_SUBGROUP,
- ),
- core.yarg.EnvConsumer(
- 'YA_WHEEL_REPO_ACCESS_KEY_PATH',
- help='Path to access key for wheel repository',
- hook=core.yarg.SetValueHook('wheel_access_key_path'),
- ),
- core.yarg.ArgConsumer(
- names=['--wheel-repo-secret-key'],
- help='Path to secret key for wheel repository',
- hook=core.yarg.SetValueHook('wheel_secret_key_path'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=PYTHON_WHEEL_SUBGROUP,
- ),
- core.yarg.EnvConsumer(
- 'YA_WHEEL_SECRET_KEY_PATH',
- help='Path to secret key for wheel repository',
- hook=core.yarg.SetValueHook('wheel_secret_key_path'),
- ),
- core.yarg.ArgConsumer(
- names=['--raw-package-path'],
- help="Custom path for raw-package (implies --raw-package)",
- hook=core.yarg.SetValueHook('raw_package_path'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--no-cleanup'],
- help='Do not clean the temporary directory',
- hook=core.yarg.SetConstValueHook('cleanup', False),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--build-debian-scripts'],
- hook=core.yarg.SetConstValueHook('build_debian_scripts', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--debian-distribution'],
- help='Debian distribution',
- hook=core.yarg.SetValueHook('debian_distribution'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.EnvConsumer(
- 'YA_DEBIAN_UPLOAD_TOKEN',
- help='Iam token or path to iam token for nebiuscloud debian repository',
- hook=core.yarg.SetValueHook('debian_upload_token'),
- ),
- core.yarg.ArgConsumer(
- names=['--docker-push'],
- help='Push docker image to registry',
- hook=core.yarg.SetConstValueHook('docker_push_image', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-no-cache'],
- help='Disable docker cache',
- hook=core.yarg.SetConstValueHook('docker_no_cache', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ConfigConsumer("docker_no_cache"),
- core.yarg.ArgConsumer(
- names=['--dump-build-targets'],
- hook=core.yarg.SetValueHook('dump_build_targets'),
- visible=False,
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-use-remote-cache'],
- help='Use image from registry as cache source',
- hook=core.yarg.SetConstValueHook('docker_use_remote_cache', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-remote-image-version'],
- help='Specify image version to be used as cache source',
- hook=core.yarg.SetValueHook('docker_remote_image_version'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--nanny-release'],
- help='Notify nanny about new release',
- hook=core.yarg.SetValueHook(
- 'nanny_release', transform=lambda s: s.upper(), values=const.NANNY_RELEASE_TYPES
- ),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- ['--upload'],
- help='Upload created package to sandbox',
- hook=core.yarg.SetConstValueHook('upload', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- ]
-
- def postprocess(self):
- if self.convert is not None:
- logger.warning("Package format will be detected automatically, no need to use --new and --old")
- if self.nanny_release and not self.docker_push_image:
- raise core.yarg.ArgsValidatingException("Using --nanny-release without --docker-push is pointless")
-
- def postprocess2(self, params):
- if params.raw_package_path and not params.raw_package:
- params.raw_package = True
- # old params compatibility
- if getattr(params, 'run_long_tests', False):
- params.run_tests = test_opts.RunTestOptions.RunAllTests
-
-
-class PackageCustomizableOptions(core.yarg.Options):
- """
- Don't add parameters here by default, otherwise user could use them in package.json.
- For more info see https://docs.yandex-team.ru/ya-make/usage/ya_package/json#params
- """
-
- deb_compression_levels = collections.OrderedDict(
- sorted(
- {
- 'none': 0,
- 'low': 3,
- 'medium': 6,
- 'high': 9,
- }.items(),
- key=lambda i: i[1],
- )
- )
-
- def __init__(self):
- self.arch_all = False
- self.artifactory = None
- self.compress_archive = True
- self.compression_filter = None
- self.compression_level = None
- self.create_dbg = False
- self.custom_version = None
- self.debian_arch = None
- self.debian_compression_level = None
- self.debian_compression_type = 'gzip'
- self.docker_add_host = []
- self.docker_build_arg = {}
- self.docker_build_network = None
- self.docker_platform = None
- self.docker_registry = "registry.yandex.net"
- self.docker_repository = ""
- self.docker_save_image = False
- self.docker_secrets = []
- self.docker_target = None
- self.dupload_max_attempts = 1
- self.dupload_no_mail = False
- self.ensure_package_published = False
- self.force_dupload = False
- self.format = None
- self.full_strip = False
- self.key = None
- self.overwrite_read_only_files = False
- self.raw_package = False
- self.resource_attrs = {}
- self.resource_type = "YA_PACKAGE"
- self.sandbox_task_id = 0
- self.sign = True
- self.sloppy_deb = False
- self.store_debian = True
- self.strip = False
- self.wheel_platform = ""
- self.wheel_python3 = False
-
- @staticmethod
- def consumer():
- return [
- core.yarg.ArgConsumer(
- names=['--strip'],
- help='Strip binaries (only debug symbols: "strip -g")',
- hook=core.yarg.SetConstValueHook('strip', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--full-strip'],
- help='Strip binaries',
- hook=core.yarg.SetConstValueHook('full_strip', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--set-sandbox-task-id'],
- visible=False,
- help='Use the provided task id for the package version if needed',
- hook=core.yarg.SetValueHook('sandbox_task_id', int),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--wheel-platform'],
- visible=True,
- help='Set wheel package platform',
- hook=core.yarg.SetValueHook('wheel_platform'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=PYTHON_WHEEL_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--key'],
- help='The key to use for signing',
- hook=core.yarg.SetValueHook('key'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--debian'],
- help='Build debian package',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.DEBIAN),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--tar'],
- help='Build tarball package',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.TAR),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--no-compression'],
- help="Don't compress tar archive (for --tar only)",
- hook=core.yarg.SetConstValueHook('compress_archive', False),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--create-dbg'],
- help='Create separate package with debug info (works only in case of --strip or --full-strip)',
- hook=core.yarg.SetConstValueHook('create_dbg', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- ["--compression-filter"],
- help="Specifies compression filter (gzip/zstd)",
- hook=core.yarg.SetValueHook('compression_filter'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- ["--compression-level"],
- help="Specifies compression level (0-9 for gzip [6 is default], 0-22 for zstd [3 is default])",
- hook=core.yarg.SetValueHook('compression_level', transform=lambda s: int(s)),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker'],
- help='Build docker',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.DOCKER),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--rpm'],
- help='Build rpm package',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.RPM),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=RPM_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--aar'],
- help='Build aar package',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.AAR),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=AAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--npm'],
- help='Build npm package',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.NPM),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=NPM_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--wheel'],
- help='Build wheel package',
- hook=core.yarg.SetConstValueHook('format', const.PackageFormat.WHEEL),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=PYTHON_WHEEL_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--wheel-python3'],
- help='use python3 when building wheel package',
- hook=core.yarg.SetConstValueHook('wheel_python3', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=PYTHON_WHEEL_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--artifactory'],
- help='Build package and upload it to artifactory',
- hook=core.yarg.SetConstValueHook("artifactory", True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-add-host'],
- help='Docker --add-host',
- hook=core.yarg.SetAppendHook('docker_add_host'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-secret'],
- help='Same as Docker --secret. You can pass few secrets at the same time',
- hook=core.yarg.SetAppendHook('docker_secrets'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-registry'],
- help='Docker registry',
- hook=core.yarg.SetValueHook('docker_registry'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-repository'],
- help='Specify private repository',
- hook=core.yarg.SetValueHook('docker_repository'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-save-image'],
- help='Save docker image to archive',
- hook=core.yarg.SetConstValueHook('docker_save_image', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-network'],
- help='--network parameter for `docker build` command',
- hook=core.yarg.SetValueHook('docker_build_network'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-platform'],
- help='Specify platform for docker build (require buildx)',
- hook=core.yarg.SetValueHook('docker_platform'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-build-arg'],
- help='--build-arg parameter for `docker build` command, set it in the <key>=<value> form',
- hook=core.yarg.DictPutHook('docker_build_arg'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--docker-target'],
- help='Specifying target build stage (--target)',
- hook=core.yarg.SetValueHook('docker_target'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DOCKER_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--raw-package'],
- help="Used with --tar to get package content without tarring",
- hook=core.yarg.SetConstValueHook('raw_package', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=TAR_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--sloppy-and-fast-debian'],
- help="Fewer checks and no compression when building debian package",
- hook=core.yarg.SetConstValueHook('sloppy_deb', True),
- visible=False,
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--not-sign-debian'],
- help='Do not sign debian package',
- hook=core.yarg.SetConstValueHook('sign', False),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--custom-version'],
- help='Custom package version',
- hook=core.yarg.SetValueHook('custom_version'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--debian-arch'],
- help='Debian arch (passed to debuild as `-a`)',
- hook=core.yarg.SetValueHook('debian_arch'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--arch-all'],
- help='Use "Architecture: all" in debian',
- hook=core.yarg.SetConstValueHook('arch_all', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--force-dupload'],
- help='dupload --force',
- hook=core.yarg.SetConstValueHook('force_dupload', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['-z', '--debian-compression'],
- help="deb-file compresson level ({})".format(
- ", ".join(list(PackageCustomizableOptions.deb_compression_levels.keys()))
- ),
- hook=core.yarg.SetValueHook(
- 'debian_compression_level', values=list(PackageCustomizableOptions.deb_compression_levels.keys())
- ),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['-Z', '--debian-compression-type'],
- help="deb-file compression type used when building deb-file (allowed types: {}, gzip (default), xz, bzip2, lzma, none)".format(
- const.DEBIAN_HOST_DEFAULT_COMPRESSION_LEVEL
- ),
- hook=core.yarg.SetValueHook('debian_compression_type'),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--dont-store-debian'],
- help="Save debian package in a separate archive",
- hook=core.yarg.SetConstValueHook('store_debian', False),
- visible=False,
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- ['--upload-resource-type'],
- help='Created resource type',
- hook=core.yarg.SetValueHook('resource_type'),
- group=build.build_opts.SANDBOX_UPLOAD_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- ['--upload-resource-attr'],
- help='Resource attr, set it in the <name>=<value> form',
- hook=core.yarg.DictPutHook(name='resource_attrs'),
- group=build.build_opts.SANDBOX_UPLOAD_OPT_GROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--dupload-max-attempts'],
- help='How many times try to run dupload if it fails',
- hook=core.yarg.SetValueHook('dupload_max_attempts', int),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--dupload-no-mail'],
- help='dupload --no-mail',
- hook=core.yarg.SetConstValueHook('dupload_no_mail', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=DEB_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--overwrite-read-only-files'],
- help='Overwrite read-only files in package',
- hook=core.yarg.SetConstValueHook('overwrite_read_only_files', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- core.yarg.ArgConsumer(
- names=['--ensure-package-published'],
- help='Ensure that package is available in the repository',
- hook=core.yarg.SetConstValueHook('ensure_package_published', True),
- group=core.yarg.PACKAGE_OPT_GROUP,
- subgroup=COMMON_SUBGROUP,
- ),
- ]
-
- def postprocess(self):
- if self.debian_compression_level is not None:
- self.debian_compression_level = self.deb_compression_levels[self.debian_compression_level]
- if self.create_dbg:
- if not self.full_strip:
- self.strip = True
- if self.compression_filter not in (None, 'gzip', 'zstd'):
- raise core.yarg.ArgsValidatingException(
- "Using unsupported compression filter: {}".format(self.compression_filter)
- )
-
-
-class InterimOptions(core.yarg.Options):
- Visible = False
-
- def __init__(self):
- self.verify_patterns_usage = True
-
- # All this options
- # - !! should never be available in YA_PACKAGE sandbox task !!
- # - will be removed when work is done
- def consumer(self):
- return [
- core.yarg.ArgConsumer(
- names=['--fixme-CHEMODAN-80080'],
- help='See CHEMODAN-80080 and DEVTOOLSSUPPORT-12411 for more info',
- hook=core.yarg.SetConstValueHook('verify_patterns_usage', False),
- visible=self.Visible,
- ),
- ]
diff --git a/devtools/ya/handlers/package/opts/ya.make b/devtools/ya/handlers/package/opts/ya.make
deleted file mode 100644
index f13c20f4b5..0000000000
--- a/devtools/ya/handlers/package/opts/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-PY23_LIBRARY()
-
-PY_SRCS(
- __init__.py
-)
-
-PEERDIR(
- devtools/ya/build/build_opts
- devtools/ya/core
- devtools/ya/core/yarg
- devtools/ya/exts
- devtools/ya/package/const
- devtools/ya/test/const
- devtools/ya/test/opts
-)
-
-STYLE_PYTHON()
-
-END()
diff --git a/devtools/ya/handlers/package/ya.make b/devtools/ya/handlers/package/ya.make
deleted file mode 100644
index 98c8a47dd9..0000000000
--- a/devtools/ya/handlers/package/ya.make
+++ /dev/null
@@ -1,27 +0,0 @@
-PY23_LIBRARY()
-
-PY_SRCS(
- NAMESPACE handlers.package
- __init__.py
-)
-
-PEERDIR(
- contrib/python/pathlib2
- devtools/ya/app
- devtools/ya/build
- devtools/ya/build/build_opts
- devtools/ya/core
- devtools/ya/core/yarg
- devtools/ya/handlers/package/opts
- devtools/ya/package
- devtools/ya/test/opts
-)
-
-STYLE_PYTHON()
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- opts
-)
diff --git a/devtools/ya/handlers/test/__init__.py b/devtools/ya/handlers/test/__init__.py
deleted file mode 100644
index 2b3b2e4de4..0000000000
--- a/devtools/ya/handlers/test/__init__.py
+++ /dev/null
@@ -1,40 +0,0 @@
-import app
-
-from build.build_handler import do_ya_make
-from build.build_opts import ya_make_options
-
-import core.yarg
-
-
-class YaTestYaHandler(core.yarg.OptsHandler):
- description = 'Build and run all tests\n[[imp]]ya test[[rst]] is alias for [[imp]]ya make -A[[rst]]'
-
- def __init__(self):
- core.yarg.OptsHandler.__init__(
- self,
- action=app.execute(action=do_ya_make),
- examples=[
- core.yarg.UsageExample(
- '{prefix}',
- 'Build and run all tests',
- good_looking=101,
- ),
- core.yarg.UsageExample(
- '{prefix} -t',
- 'Build and run small tests only',
- good_looking=102,
- ),
- core.yarg.UsageExample(
- '{prefix} -tt',
- 'Build and run small and medium tests',
- good_looking=103,
- ),
- ],
- description=self.description,
- opts=ya_make_options(
- free_build_targets=True,
- is_ya_test=True,
- strip_idle_build_results=True,
- ),
- visible=True,
- )
diff --git a/devtools/ya/handlers/test/ya.make b/devtools/ya/handlers/test/ya.make
deleted file mode 100644
index 70e9dee8a1..0000000000
--- a/devtools/ya/handlers/test/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.test
- __init__.py
-)
-
-PEERDIR(
- devtools/ya/app
- devtools/ya/build
- devtools/ya/build/build_opts
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/tool/__init__.py b/devtools/ya/handlers/tool/__init__.py
deleted file mode 100644
index b4ac4ccdca..0000000000
--- a/devtools/ya/handlers/tool/__init__.py
+++ /dev/null
@@ -1,281 +0,0 @@
-from __future__ import absolute_import
-from __future__ import print_function
-import os
-import sys
-import logging
-
-from core.common_opts import CrossCompilationOptions
-from core.yarg import (
- ArgConsumer,
- CompositeHandler,
- EnvConsumer,
- SetConstValueHook,
- SetValueHook,
- Options,
- OptsHandler,
- FreeArgConsumer,
- ConfigConsumer,
- ExtendHook,
- ShowHelpException,
- SetAppendHook,
- BaseHook,
-)
-
-import app
-
-from build.build_opts import CustomFetcherOptions, SandboxAuthOptions, ToolsOptions
-from core.yarg.groups import PRINT_CONTROL_GROUP
-from core.yarg.help_level import HelpLevel
-from yalibrary.tools import environ, param, resource_id, task_id, tool, tools, toolchain_root, toolchain_sys_libs
-from yalibrary.toolscache import lock_resource
-from yalibrary.platform_matcher import is_darwin_arm64
-import core.config
-import core.respawn
-import exts.process
-import exts.windows
-
-logger = logging.getLogger(__name__)
-
-
-class ToolYaHandler(CompositeHandler):
- description = 'Execute specific tool'
-
- @staticmethod
- def common_download_options():
- return [SandboxAuthOptions(), CustomFetcherOptions(), ToolsOptions()]
-
- def __init__(self):
- CompositeHandler.__init__(
- self,
- description=self.description,
- examples=[
- core.yarg.UsageExample('{prefix} --ya-help', 'Print yatool specific options', good_looking=20),
- core.yarg.UsageExample('{prefix} --print-path', 'Print path to tool executable file', good_looking=10),
- core.yarg.UsageExample(
- '{prefix} --force-update',
- 'Check tool for updates before the update interval elapses',
- good_looking=10,
- ),
- ],
- )
- for x in tools():
- self[x.name] = OptsHandler(
- action=app.execute(action=do_tool, respawn=app.RespawnType.OPTIONAL),
- description=x.description,
- visible=x.visible,
- opts=[ToolOptions(x.name)] + self.common_download_options(),
- unknown_args_as_free=True,
- )
-
-
-class DummyHook(BaseHook):
- def __call__(self, to, *args):
- # type: ("Options", tp.Optional[tp.Any]) -> None
- pass
-
- @staticmethod
- def need_value():
- return False
-
-
-class ToolOptions(Options):
- def __init__(self, tool):
- Options.__init__(self)
- self.tool = tool
- self.print_path = None
- self.print_toolchain_path = None
- self.print_toolchain_sys_libs = None
- self.toolchain = None
- self.param = None
- self.platform = None
- self.target_platforms = []
- self.need_task_id = None
- self.need_resource_id = None
- self.show_help = False
- self.tail_args = []
- self.host_platform = None
- self.hide_arm64_host_warning = False
- self.force_update = False
- self.force_refetch = False
-
- @staticmethod
- def consumer():
- return [
- ArgConsumer(
- ['--print-path'],
- help='Only print path to tool, do not execute',
- hook=SetConstValueHook('print_path', True),
- ),
- ArgConsumer(
- ['--print-toolchain-path'],
- help='Print path to toolchain root',
- hook=SetConstValueHook('print_toolchain_path', True),
- ),
- ArgConsumer(
- ['--print-toolchain-sys-libs'],
- help='Print pathes to toolchsin system libraries',
- hook=SetConstValueHook('print_toolchain_sys_libs', True),
- ),
- ArgConsumer(['--platform'], help="Set specific platform", hook=SetValueHook('platform')),
- ArgConsumer(['--host-platform'], help="Set host platform", hook=SetValueHook('host_platform')),
- EnvConsumer('YA_TOOL_HOST_PLATFORM', hook=SetValueHook('host_platform')),
- ArgConsumer(['--toolchain'], help="Specify toolchain", hook=SetValueHook('toolchain')),
- ArgConsumer(['--get-param'], help="Get specified param", hook=SetValueHook('param')),
- ArgConsumer(
- ['--get-resource-id'],
- help="Get resource id for specific platform (the platform should be specified)",
- hook=SetConstValueHook('need_resource_id', True),
- ),
- ArgConsumer(['--get-task-id'], help="Get task id", hook=SetConstValueHook('need_task_id', True)),
- ArgConsumer(['--ya-help'], help="Show help", hook=SetConstValueHook('show_help', True)),
- ArgConsumer(
- ['--target-platform'],
- help='Target platform',
- hook=SetAppendHook('target_platforms', values=CrossCompilationOptions.generate_target_platforms_cxx()),
- ),
- ArgConsumer(
- ['--hide-arm64-host-warning'],
- help='Hide MacOS arm64 host warning',
- hook=SetConstValueHook('hide_arm64_host_warning', True),
- group=PRINT_CONTROL_GROUP,
- visible=HelpLevel.EXPERT if is_darwin_arm64() else False,
- ),
- EnvConsumer('YA_TOOL_HIDE_ARM64_HOST_WARNING', hook=SetConstValueHook('hide_arm64_host_warning', True)),
- ConfigConsumer('hide_arm64_host_warning'),
- ArgConsumer(
- ['--force-update'],
- help='Check tool for updates before the update interval elapses',
- hook=SetConstValueHook('force_update', True),
- ),
- ArgConsumer(['--force-refetch'], help='Refetch toolchain', hook=SetConstValueHook('force_refetch', True)),
- ArgConsumer(['--print-fastpath-error'], help='Print fast path failure error', hook=DummyHook()),
- FreeArgConsumer(help='arg', hook=ExtendHook(name='tail_args')),
- ]
-
- def postprocess(self):
- if self.show_help:
- raise ShowHelpException()
- if self.toolchain and self.target_platforms:
- raise core.yarg.ArgsValidatingException("Do not use --toolchain and --target-platform args together")
- if self.force_update:
- os.environ['YA_TOOL_FORCE_UPDATE'] = "1"
-
-
-def _replace(s, transformations):
- for k, v in transformations.items():
- s = s.replace('$({})'.format(k), v)
- return s
-
-
-def _useful_env_vars():
- return {'YA_TOOL': sys.argv[0]}
-
-
-def do_tool(params):
- tool_name = params.tool
- extra_args = params.tail_args
- target_platform = params.target_platforms
- host_platform = params.host_platform
- if target_platform:
- if len(target_platform) > 1:
- raise Exception('Multiple target platforms are not supported by this code for now')
- target_platform = target_platform[0]
- else:
- target_platform = None
-
- if is_darwin_arm64() and not host_platform:
- host_platform = 'darwin'
- if not params.hide_arm64_host_warning:
- try:
- import app_ctx
-
- app_ctx.display.emit_message("You use x86_64 version of selected tool.")
- except Exception as e:
- logger.error("Can't print arm64 warning message: {}".format(e))
-
- tool_path = tool(
- tool_name,
- params.toolchain,
- target_platform=target_platform,
- for_platform=host_platform,
- force_refetch=params.force_refetch,
- )
- if exts.windows.on_win() and not tool_path.endswith('.exe'): # XXX: hack. Think about ya.conf.json format
- logger.debug('Rename tool for win: %s', tool_path)
- tool_path += '.exe'
-
- lock_result = False
- for_platform = params.platform or params.host_platform or None
-
- if params.need_task_id:
- tid = task_id(tool_name, params.toolchain)
- if tid is not None:
- print(tid)
- else:
- raise Exception("Tool '{}' has no task id".format(tool_name))
- elif params.need_resource_id:
- print(resource_id(tool_name, params.toolchain, for_platform))
- elif params.param:
- print(param(tool_name, params.toolchain, params.param))
- elif params.print_toolchain_path:
- print(toolchain_root(tool_name, params.toolchain, for_platform))
- lock_result = True
- elif params.print_toolchain_sys_libs:
- print(toolchain_sys_libs(tool_name, params.toolchain, for_platform))
- lock_result = True
- elif params.print_path:
- print(tool_path)
- lock_result = True
- elif os.path.isfile(tool_path):
- env = core.respawn.filter_env(os.environ.copy())
-
- # Remove environment variables set by 'ya' wrapper.
- # They are actually one-time ya-bin parameters rather than inheritable environment
- # for all descendant processes.
- for key in ('YA_SOURCE_ROOT', 'YA_PYVER_REQUIRE', 'YA_PYVER_SET_FORCED'):
- env.pop(key, None)
-
- env.update(_useful_env_vars())
- for key, value in environ(tool_name, params.toolchain).items():
- env[key] = _replace(
- os.pathsep.join(value), {'ROOT': toolchain_root(tool_name, params.toolchain, for_platform)}
- )
- if tool_name == 'gdb':
- # gdb does not fit in 8 MB stack with large cores (DEVTOOLS-5040).
- try:
- import resource as r
- except ImportError:
- pass
- else:
- soft, hard = r.getrlimit(r.RLIMIT_STACK)
- new = 128 << 20
- logger.debug("Limit info: soft=%d hard=%d new=%d", soft, hard, new)
- if hard != r.RLIM_INFINITY:
- new = min(new, hard)
- logger.debug("Limit info: new=%d", new)
- if new > soft:
- logger.debug("Limit info: setting new limits=(%d, %d)", new, hard)
- try:
- r.setrlimit(r.RLIMIT_STACK, (new, hard))
- except ValueError as e:
- logger.error("Failure while setting RLIMIT_STACK ({}, {}), {}".format(new, hard, e))
- logger.exception("While setting RLIMIT_STACK")
- arc_root = core.config.find_root(fail_on_error=False)
- if arc_root is not None:
- logger.debug('Arcadia root found: %s', arc_root)
- extra_args = ['-ex', 'set substitute-path /-S/ {}/'.format(arc_root)] + extra_args
- extra_args = ['-ex', 'set filename-display absolute'] + extra_args
- if (
- tool_name == 'arc'
- and params.username not in {'sandbox', 'root'}
- and os.getenv('YA_ALLOW_TOOL_ARC', 'no') != 'yes'
- ):
- message = (
- 'Please, use natively installed arc, install guide:'
- ' https://docs.yandex-team.ru/devtools/intro/quick-start-guide#arc-setup'
- )
- raise core.yarg.ArgsValidatingException(message)
- exts.process.execve(tool_path, extra_args, env=env)
-
- if lock_result:
- lock_resource(toolchain_root(tool_name, params.toolchain, for_platform))
diff --git a/devtools/ya/handlers/tool/ya.make b/devtools/ya/handlers/tool/ya.make
deleted file mode 100644
index 6021f3ea29..0000000000
--- a/devtools/ya/handlers/tool/ya.make
+++ /dev/null
@@ -1,33 +0,0 @@
-PY23_LIBRARY()
-
-STYLE_PYTHON()
-
-PY_SRCS(
- NAMESPACE handlers.tool
- __init__.py
-)
-
-PEERDIR(
- devtools/ya/app
- devtools/ya/build/build_opts
- devtools/ya/core
- devtools/ya/core/config
- devtools/ya/core/respawn
- devtools/ya/core/yarg
- devtools/ya/exts
- devtools/ya/yalibrary/platform_matcher
- devtools/ya/yalibrary/tools
- devtools/ya/yalibrary/toolscache
- library/python/windows
-)
-
-END()
-
-RECURSE(
- tests
-)
-
-RECURSE_FOR_TESTS(
- bin
- tests
-)
diff --git a/devtools/ya/handlers/ya.make b/devtools/ya/handlers/ya.make
deleted file mode 100644
index d88914356d..0000000000
--- a/devtools/ya/handlers/ya.make
+++ /dev/null
@@ -1,151 +0,0 @@
-PY23_LIBRARY(ya-lib)
-
-SRCDIR(devtools/ya)
-
-PEERDIR(
- devtools/ya/handlers/make
- devtools/ya/handlers/package
- devtools/ya/handlers/test
- devtools/ya/handlers/tool
- devtools/ya/handlers/ide
- devtools/ya/handlers/dump
- devtools/ya/handlers/gc
- devtools/ya/handlers/gen_config
- devtools/ya/handlers/maven_import
- devtools/ya/handlers/java
- # devtools/ya/handlers/analyze_make
-)
-
-IF (NOT YA_OPENSOURCE)
- PEERDIR(
- devtools/ya/handlers/__trace__
- devtools/ya/handlers/addremove
- devtools/ya/handlers/analyze_make
- devtools/ya/handlers/buf
- devtools/ya/handlers/check
- devtools/ya/handlers/clang_tidy
- devtools/ya/handlers/clone
- devtools/ya/handlers/completion
- devtools/ya/handlers/dctl
- devtools/ya/handlers/download
- devtools/ya/handlers/exec
- devtools/ya/handlers/fetch
- devtools/ya/handlers/fix_includes
- devtools/ya/handlers/krevedko
- devtools/ya/handlers/notify
- devtools/ya/handlers/paste
- devtools/ya/handlers/pr
- devtools/ya/handlers/profile
- devtools/ya/handlers/project
- devtools/ya/handlers/py
- devtools/ya/handlers/remote_gdb
- devtools/ya/handlers/repo_check
- devtools/ya/handlers/shell
- devtools/ya/handlers/shelve
- devtools/ya/handlers/stat
- devtools/ya/handlers/style
- devtools/ya/handlers/svn
- devtools/ya/handlers/unshelve
- devtools/ya/handlers/upload
- devtools/ya/handlers/vmctl
- devtools/ya/handlers/webide
- devtools/ya/handlers/whoami
- devtools/ya/handlers/wine
- devtools/ya/handlers/yav
-
- devtools/ya/handlers/py23migration/_ya0bin2
- devtools/ya/handlers/py23migration/_ya0bin3
- devtools/ya/handlers/py23migration/_ya2bin0
- devtools/ya/handlers/py23migration/_ya2bin2
- devtools/ya/handlers/py23migration/_ya3bin0
- devtools/ya/handlers/py23migration/_ya3bin3
- )
- IF (PYTHON3)
- PEERDIR(
- devtools/ya/handlers/py23migration/py23_utils
- devtools/ya/handlers/vim
- devtools/ya/handlers/curl
- devtools/ya/handlers/neovim
- devtools/ya/handlers/gdb
- devtools/ya/handlers/emacs
- devtools/ya/handlers/grep
- devtools/ya/handlers/jstyle
- devtools/ya/handlers/nile
- devtools/ya/handlers/sed
- devtools/ya/handlers/ydb
- devtools/ya/handlers/yql
- )
- ENDIF()
-ENDIF()
-END()
-
-RECURSE(
- __trace__
- addremove
- analyze_make
- autocheck
- buf
- check
- clang_tidy
- clone
- completion
- curl
- dctl
- download
- dump
- emacs
- exec
- fetch
- fix_includes
- gc
- gdb
- gen_config
- grep
- ide
- java
- jstyle
- krevedko
- make
- maven_import
- neovim
- nile
- notify
- package
- paste
- pr
- profile
- project
- py
- remote_gdb
- repo_check
- sed
- shell
- shelve
- stat
- style
- svn
- test
- tool
- unshelve
- upload
- vim
- vmctl
- webide
- whoami
- wine
- yav
- ydb
- yql
-)
-
-IF (NOT OPENSOURCE)
- RECURSE(
- py23migration/_ya0bin2
- py23migration/_ya0bin3
- py23migration/_ya2bin0
- py23migration/_ya3bin0
- py23migration/_ya3bin3
- py23migration/_ya2bin2
- py23migration/py23_utils
- )
-ENDIF()
diff --git a/devtools/ya/opensource/ya.conf b/devtools/ya/opensource/ya.conf
deleted file mode 100644
index fb140d072a..0000000000
--- a/devtools/ya/opensource/ya.conf
+++ /dev/null
@@ -1,92 +0,0 @@
-# Please keep this in sync with arcadia/ya.conf
-
-build_cache = true
-build_cache_conf = ['cas_logging=true', 'graph_info=true']
-build_cache_master = true
-cache_codec = ''
-cache_size = 150374182400
-content_uids = true
-dir_outputs = true
-dir_outputs_test_mode = true
-dump_debug_enabled = true
-fail_maven_export_with_tests = true
-incremental_build_dirs_cleanup = true
-oauth_exchange_ssh_keys = true
-remove_implicit_data_path = true
-remove_result_node = true
-tools_cache = true
-tools_cache_master = true
-use_atd_revisions_info = true
-use_jstyle_server = true
-use_command_file_in_testtool = true
-
-[test_tool3_handlers]
-build_clang_coverage_report = true
-build_go_coverage_report = true
-build_python_coverage_report = false
-build_sancov_coverage_report = true
-build_ts_coverage_report = true
-canonization_result_node = true
-canonize = true
-check_external = true
-check_mds = true
-check_resource = true
-checkout = true
-cov_merge_vfs = false
-create_allure_report = true
-download = true
-list_result_node = true
-list_tests = true
-merge_coverage_inplace = true
-merge_python_coverage = true
-minimize_fuzz_corpus = true
-populate_token_to_sandbox_vault = true
-resolve_clang_coverage = true
-resolve_go_coverage = true
-resolve_java_coverage = true
-resolve_python_coverage = false
-resolve_sancov_coverage = true
-resolve_ts_coverage = true
-result_node = true
-results_accumulator = true
-results_merger = true
-run_boost_test = true
-run_check = true
-run_clang_tidy = true
-run_classpath_clash = true
-run_coverage_extractor = true
-run_custom_lint = true
-run_diff_test = false
-run_eslint = true
-run_exectest = true
-run_fuzz = true
-run_fuzz_result_node = true
-run_g_benchmark = true
-run_go_fmt = true
-run_go_test = true
-run_go_vet = true
-run_gtest = true
-run_hermione = true
-run_javastyle = true
-run_jest = true
-run_ktlint_test = true
-run_pyimports = true
-run_skipped_test = true
-run_test = true
-run_ut = true
-run_y_benchmark = true
-sandbox_run_test = true
-unify_clang_coverage = true
-upload = false
-upload_coverage = false
-ytexec_run_test = true
-
-# ===== opensource only table params =====
-
-[host_platform_flags]
-OPENSOURCE = "yes"
-USE_PREBUILT_TOOLS = "no"
-
-[flags]
-OPENSOURCE = "yes"
-USE_PREBUILT_TOOLS = "no"
diff --git a/library/cpp/CMakeLists.darwin-arm64.txt b/library/cpp/CMakeLists.darwin-arm64.txt
index 58a86cdbb5..918b7e4c77 100644
--- a/library/cpp/CMakeLists.darwin-arm64.txt
+++ b/library/cpp/CMakeLists.darwin-arm64.txt
@@ -7,7 +7,6 @@
add_subdirectory(accurate_accumulate)
-add_subdirectory(actors)
add_subdirectory(archive)
add_subdirectory(balloc)
add_subdirectory(binsaver)
diff --git a/library/cpp/CMakeLists.darwin-x86_64.txt b/library/cpp/CMakeLists.darwin-x86_64.txt
index 5692463e65..4656f580ad 100644
--- a/library/cpp/CMakeLists.darwin-x86_64.txt
+++ b/library/cpp/CMakeLists.darwin-x86_64.txt
@@ -7,7 +7,6 @@
add_subdirectory(accurate_accumulate)
-add_subdirectory(actors)
add_subdirectory(archive)
add_subdirectory(balloc)
add_subdirectory(binsaver)
diff --git a/library/cpp/CMakeLists.linux-aarch64.txt b/library/cpp/CMakeLists.linux-aarch64.txt
index 58a86cdbb5..918b7e4c77 100644
--- a/library/cpp/CMakeLists.linux-aarch64.txt
+++ b/library/cpp/CMakeLists.linux-aarch64.txt
@@ -7,7 +7,6 @@
add_subdirectory(accurate_accumulate)
-add_subdirectory(actors)
add_subdirectory(archive)
add_subdirectory(balloc)
add_subdirectory(binsaver)
diff --git a/library/cpp/CMakeLists.linux-x86_64.txt b/library/cpp/CMakeLists.linux-x86_64.txt
index 5692463e65..4656f580ad 100644
--- a/library/cpp/CMakeLists.linux-x86_64.txt
+++ b/library/cpp/CMakeLists.linux-x86_64.txt
@@ -7,7 +7,6 @@
add_subdirectory(accurate_accumulate)
-add_subdirectory(actors)
add_subdirectory(archive)
add_subdirectory(balloc)
add_subdirectory(binsaver)
diff --git a/library/cpp/CMakeLists.windows-x86_64.txt b/library/cpp/CMakeLists.windows-x86_64.txt
index d8d81b3185..7867eb1061 100644
--- a/library/cpp/CMakeLists.windows-x86_64.txt
+++ b/library/cpp/CMakeLists.windows-x86_64.txt
@@ -7,7 +7,6 @@
add_subdirectory(accurate_accumulate)
-add_subdirectory(actors)
add_subdirectory(archive)
add_subdirectory(balloc)
add_subdirectory(binsaver)
diff --git a/library/cpp/actors/CMakeLists.txt b/library/cpp/actors/CMakeLists.txt
deleted file mode 100644
index becd73cd24..0000000000
--- a/library/cpp/actors/CMakeLists.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(actor_type)
-add_subdirectory(core)
-add_subdirectory(cppcoro)
-add_subdirectory(dnscachelib)
-add_subdirectory(dnsresolver)
-add_subdirectory(examples)
-add_subdirectory(helpers)
-add_subdirectory(http)
-add_subdirectory(interconnect)
-add_subdirectory(log_backend)
-add_subdirectory(memory_log)
-add_subdirectory(prof)
-add_subdirectory(protos)
-add_subdirectory(testlib)
-add_subdirectory(util)
-add_subdirectory(wilson)
diff --git a/library/cpp/actors/README.md b/library/cpp/actors/README.md
deleted file mode 100644
index 22502c391a..0000000000
--- a/library/cpp/actors/README.md
+++ /dev/null
@@ -1,107 +0,0 @@
-## Actor library
-
-### ЧаÑÑ‚ÑŒ перваÑ, вводнаÑ.
-Иногда приходитÑÑ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°Ñ‚ÑŒ аÑинхронные, ÑущеÑтвенно параллельные, меÑтами раÑпределённые программы. Иногда еще и внутреннÑÑ Ð»Ð¾Ð³Ð¸ÐºÐ° нетривиальна, разнородна, пишетÑÑ Ñ€Ð°Ð·Ð½Ñ‹Ð¼Ð¸ командами не один год. Ð’ÑÑ‘ как мы любим. ЧеловечеÑтвом придумано не так много ÑпоÑобов внутренней организации Ñтруктуры и кода таких программ. БольшинÑтво из них плохие (и именно из-за плохих подходов разработка аÑинхронных, многопоточных программ приобрела дурную Ñлаву). Ðекоторые получше. Ð ÑеребрÑной пули как обычно нет.
-
-Когда мы начинали разработку Yandex Database (тогда еще KiKiMR), Ñразу было понÑтно что проÑтыми наколеночными поделиÑми обойтиÑÑŒ (и Ñделать при Ñтом хорошо, так что бы не было Ñтыдно) не получитÑÑ. Ð’ качеÑтве базиÑа мы выбрали меÑÑадж-паÑÑинг и модель акторов. И не пожалели. ПоÑтепенно Ñтот подход раÑпроÑтранилÑÑ Ð½Ð° Ñмежные проекты.
-
-### Базовые концепции.
-ЕÑли отброÑить шелуху – предÑтавлÑем ÑÐµÑ€Ð²Ð¸Ñ (программу в Ñлучае запуÑка изолированного бинарника) как анÑамбль незавиÑимых агентов, взаимодейÑтвующих через отправку аÑинхронных Ñообщений внутри общего окружениÑ. Тут вÑе Ñлова важны:
-
-ÐезавиÑимых – не разделÑÑŽÑ‚ ÑоÑтоÑние и поток выполнениÑ.
-Передача Ñообщений – формализуем протоколы, а не интерфейÑÑ‹.
-
-ÐÑÐ¸Ð½Ñ…Ñ€Ð¾Ð½Ð½Ð°Ñ â€“ не блокируемÑÑ Ð½Ð° отправке Ñообщений.
-Общее окружение – вÑе агенты разделÑÑŽÑ‚ общий пул реÑурÑов и каждый из них, Ð·Ð½Ð°Ñ Ð°Ð´Ñ€ÐµÑ, может поÑлать Ñообщение каждому.
-
-Ð’ более хайповых терминах – очень похоже на колокейтед микроÑервиÑÑ‹, только уровнем ниже. И да, мы заведомо не хотели прÑтать аÑинхронщину и параллелизм от разработчика, Ð¿Ð¾ÐºÐ°Ð·Ñ‹Ð²Ð°Ñ Ð¿Ñ€Ñм Ñамое мÑÑо.
-
-### IActor.
-https://a.yandex-team.ru/arcadia/library/cpp/actors/core/actor.h?rev=r11291267#L310
-Базовый клаÑÑ Ð²Ñех агентов, напрÑмую обычно не иÑпользуетÑÑ. ИнÑтанцируетÑÑ Ð»Ð¸Ð±Ð¾ TActor, либо TActorBootstrapped. ФактичеÑки веÑÑŒ полезный код программы размещаетÑÑ Ð² акторах.
-(важное замечание – в коде увидите ручки Ñ TActorContext и без него, Ñхожие по названию и назначению. Ðа данный момент вариант Ñ TActorContext ÑвлÑетÑÑ ÑƒÑтаревшим, новый код Ñтоит пиÑать без его иÑпользованиÑ).
-Важные методы:
-
-PassAway – единÑтвенный корректный ÑпоÑоб зарегиÑтрированному актору умереть. Может вызыватьÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ находÑÑÑŒ внутри обработчика ÑообщениÑ.
-Send – отправка ÑообщениÑ, Ð·Ð½Ð°Ñ Ð°Ð´Ñ€ÐµÑ Ð¿Ð¾Ð»ÑƒÑ‡Ð°Ñ‚ÐµÐ»Ñ. Ð’ акторе доÑтупен хелпер, принимающий непоÑредÑтвенно Ñообщение. Базовый вызов, принимающий полный event handle – доÑтупен в контекÑте.
-
-Become – уÑтановить функцию-обработчик Ñообщений, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользована при получении Ñледующего ÑообщениÑ.
-
-Register – зарегиÑтрировать новый актор в акторÑиÑтеме, Ñ Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸ÐµÐ¼ нового мейлбокÑа. Важно – Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð° вызова владение актором передаетÑÑ Ð°ÐºÑ‚Ð¾Ñ€ÑиÑтеме, Ñ‚.е. уже к моменту выхода актор может начать выполнÑÑ‚ÑŒÑÑ Ð½Ð° другом потоке, Ð½ÐµÐ»ÑŒÐ·Ñ Ðº нему ни обращатьÑÑ Ð¿Ñ€Ñмыми вызовами, ни даже предполагать что он еще жив.
-
-Schedule – зарегиÑтрировать Ñообщение, которое будет отправлено не менее чем через запрошенную задержку. Ð’ акторе доÑтупен хелпер, декорирующий Ñообщение хендлом отправки Ñамому Ñебе, в контекÑте можно передать полный хендл.
-
-SelfId – узнать ÑобÑтвенный адреÑ. Возвращаемый объект TActorIdentity можно передавать еÑли требуетÑÑ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒ отправку Ñообщений от имени актора (например еÑли пишете полезный код пользуÑÑÑŒ паÑÑивными объектами).
-ПоÑылка Ñообщений дешёваÑ, не нужно на ней чрезмерно Ñкономить (но не беÑÐ¿Ð»Ð°Ñ‚Ð½Ð°Ñ â€“ поÑтому поÑылать ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ ради поÑылки Ñообщений то же не Ñтоит).
-
-ИнÑтанцирование акторов так же дёшево, актор на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¸Ð»Ð¸ фазу запроÑа – вполне Ð½Ð¾Ñ€Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð°ÐºÑ‚Ð¸ÐºÐ°. МультиплекÑировать обработку разных запроÑов в одном акторе – так же вполне нормально. Ð’ нашем коде много примеров и первого, и второго. ПользуйтеÑÑŒ здравым ÑмыÑлов и ÑобÑтвенным вкуÑом.
-Т.к. на Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð°ÐºÑ‚Ð¾Ñ€ занимает тред из пула акторÑиÑтемы – уходить в длинные вычиÑÐ»ÐµÐ½Ð¸Ñ Ð»ÑƒÑ‡ÑˆÐµ на отдельном отÑелённом акторе (и либо отÑелÑÑ‚ÑŒ в отдельный пол акторÑиÑтемы, либо контролировать параллельноÑÑ‚ÑŒ брокером реÑурÑов), блокирующие вызовы делать почти вÑегда ошибка. Стремление напиÑать Ð¼ÑŽÑ‚ÐµÐºÑ - ереÑÑŒ и от лукавого.
-ИдентифицируютÑÑ Ð°ÐºÑ‚Ð¾Ñ€Ñ‹ Ñвоим TActorID-ом, который уникален и вы не должны его придумывать из воздуха, а только получить из региÑтрации (Ð´Ð»Ñ Ð¿Ð¾Ñ€Ð¾Ð¶Ð´Ñ‘Ð½Ð½Ñ‹Ñ… акторов) или его вам должен раÑÑказать кто-то, законно его знающий.
-
-Отправка на неÑущеÑтвующий актор (уже умерший) безопаÑна, Ñообщение будет проÑто выброшено в момент обработки (как обрабатывать недоÑтавку Ñообщений в протоколах раÑÑкажу ниже).
-
-Кроме нормальных TActorID ÑущеÑтвуют еще и ÑервиÑные (ÑоÑтавленные из Ñтрочки и номера ноды). Под ними может быть зарегиÑтрирован реальный актор и фактичеÑки при получении ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¿Ð¾ ÑервиÑному адреÑу – попробует переправить его текущему фактичеÑкому. Это позволÑет размещать хорошо извеÑтные ÑервиÑÑ‹ по хорошо извеÑтному адреÑу, не выÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ñ Ð¿Ð°Ñ€Ð°Ð»Ð»ÐµÐ»ÑŒÐ½ÑƒÑŽ машинерию поиÑка.
-
-Строить из актора конечный автомат при помощи переключений функции-обработчика – выбор в каждом конкретном Ñлучае, иногда удобнее да, иногда Ñваливать вÑÑ‘ в одно ÑоÑтоÑние, а иногда – применÑÑ‚ÑŒ гибридное решение (когда чаÑÑ‚ÑŒ жизненного цикла – обычно инициализации и завершение – выражены в переходах, а чаÑÑ‚ÑŒ – нет).
-Меньше Ñлов, больше дела – Ñтого уже доÑтаточно что бы прочитать Ñамый проÑтой пример. https://a.yandex-team.ru/arc/trunk/arcadia/library/cpp/actors/examples/01_ping_pong
-ЗдеÑÑŒ можно увидеть образец Ñамого проÑтого актора, занимающегоÑÑ Ð¿ÐµÑ€ÐµÐ±Ñ€Ð¾Ñкой Ñообщений и иÑпользующего вÑе оÑновные вызовы. Заодно покрутив за разные ручки (количеÑтво тредов в тредпуле, количеÑтво пар перебраÑывающихÑÑ Ð°ÐºÑ‚Ð¾Ñ€Ð¾Ð²) можно поÑмотреть на изменение Ð¿Ð¾Ð²ÐµÐ´ÐµÐ½Ð¸Ñ ÑиÑтемы (hint: в таких проÑÑ‚Ñ‹Ñ… ÑценариÑÑ… макÑимум перфоманÑа доÑтигаетÑÑ Ð¿Ñ€Ð¸ одном треде в тредпулах).
-
-### Event и Event Handle.
-Полезную нагрузку Ñообщений заворачиваем в наÑледника IEventBase, у которого два важных метода – ÑÐµÑ€Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¸ загрузка. Ð¡ÐµÑ€Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ, а вот загрузка – нет, и Ð´Ð»Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¸Ð· байтовой поÑледовательноÑти – необходимо на Ñтороне Ð¿Ð¾Ð»ÑƒÑ‡Ð°Ñ‚ÐµÐ»Ñ Ñматчить чиÑло-идентификатор типа ивента Ñ Ð¡++ типом. Именно Ñто делают макроÑÑ‹ из hfunc.h. Ðа практике ивенты ÑоздаютÑÑ Ð»Ð¸Ð±Ð¾ как наÑледник TEventLocal<> (Ð´Ð»Ñ Ñтрого локальных Ñообщений) либо как наÑледник TEventPB<> (Ð´Ð»Ñ Ð¿Ð¾Ñ‚ÐµÐ½Ñ†Ð¸Ð°Ð»ÑŒÐ½Ð¾ переÑылаемых по Ñети Ñообщений, типизируютÑÑ protobuf-меÑÑаджем).
-
-Кроме непоÑредÑтвенно ивента (в виде Ñтруктуры либо в виде байтовой Ñтроки) Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÑылки ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼ набор дополнительных полей
-
-ÐдреÑат
-
-Отправитель
-
-Тип ÑообщениÑ
-
-Кука
-
-Флаги
-
-Сообщение + дополнительные Ð¿Ð¾Ð»Ñ = IEventHandle. Именно хендлами акторÑиÑтема и оперирует. <event-type>::TPtr – в примере выше – Ñто и еÑÑ‚ÑŒ указатель на типизированный хендл.
-
-ТехничеÑки типом ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¼Ð¾Ð¶ÐµÑ‚ быть любое чиÑло, которое получатель и отправитель договорилиÑÑŒ понимать как идентификатор ÑообщениÑ. СложившаÑÑÑ Ð¿Ñ€Ð°ÐºÑ‚Ð¸ÐºÐ° – выделÑÑ‚ÑŒ диапазон идентификаторов макроÑом EventSpaceBegin (фактичеÑки блоками по 64к), Ð½Ð°Ñ‡Ð¸Ð½Ð°Ñ Ñ Ð±Ð»Ð¾ÐºÐ° ES_USERSPACE.
-Кука – неинтерпретируемое ui64 чиÑло, передаваемое Ñ Ñ…ÐµÐ½Ð´Ð»Ð¾Ð¼. Хорошей практикой ÑвлÑетÑÑ Ð² ответе ÑервиÑа на Ñообщение выÑтавлÑÑ‚ÑŒ куку в куку иÑходного ÑообщениÑ, оÑобенно Ð´Ð»Ñ ÑервиÑов, потенциально иÑпользуемых конкурентно.
-
-Ð’ флагах неÑколько бит зарезервировано под флаги, декларирующие как необходимо обрабатывать оÑобые Ñитуации и 12 бит – под номер канала интерконнекта, в котором будет переÑылатьÑÑ Ñообщение (Ð´Ð»Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ñ‹Ñ… Ñообщений в имеющихÑÑ Ñ€ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸ÑÑ… номер канала не имеет Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ - Ñ…Ð¾Ñ‚Ñ Ð¼Ð¾Ð¶Ð½Ð¾ предÑтавить реализацию где Ð´Ð»Ñ ÐºÐ°Ð½Ð°Ð»Ð¾Ð² будут незавиÑимые очереди).
-
-### Тредпулы и мейлбокÑÑ‹.
-Ð’ рамках одной акторÑиÑтемы может ÑоÑущеÑтвовать неÑколько незавиÑимых тредпулов, каждый актор региÑтрируетÑÑ Ð½Ð° конкретном и в процеÑÑе жизни не может мигрировать (но может Ñоздавать новые акторы на произвольном тредпуле). ИÑпользуетÑÑ Ð´Ð»Ñ ÐºÑ€ÑƒÐ¿Ð½Ð¾Ð±Ð»Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ Ñ€Ð°Ð·Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ñ€ÐµÑурÑов, либо между разными активноÑÑ‚Ñми (вот здеÑÑŒ – обрабатываем один клаÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñов, а вот здеÑÑŒ - другой), либо между разными профилÑми активноÑти (вот здеÑÑŒ обрабатываем быÑтрые запроÑÑ‹, здеÑÑŒ – медленные, а вот там – вообще батчёвые). Ðапример в YDB работает ÑиÑтемный тредпул (в котором запуÑкаютÑÑ Ð°ÐºÑ‚Ð¾Ñ€Ñ‹, необходимые Ð´Ð»Ñ Ñ„ÑƒÐ½ÐºÑ†Ð¸Ð¾Ð½Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ YDB, и Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ð¾Ð³Ð¾ мы Ñледим что бы не было длительной блокировки в обработчиках), пользовательÑкий тредпул (в котором обрабатываютÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑÑ‹ и потенциально обработчики могут уходить в ÑÐµÐ±Ñ Ð¿Ð¾Ð´Ð¾Ð»ÑŒÑˆÐµ, но Ñто не повлиÑет на инфраÑтруктуру), батчёвый тредпул (куда отгружаетÑÑ Ð´Ð»Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° – компакшены диÑков, Ñканы таблиц и подобное) и, в жирных нодах – тредпул интерконнекта (как наиболее чувÑтвительного к задержкам).
-ПереÑылка Ñообщений между акторами разных тредпулов но одной локальной акторÑиÑтемы оÑтаётÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹, принудительной Ñериализации ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð½Ðµ проиÑходит.
-
-При региÑтрации актор прикреплÑетÑÑ Ðº мейлбокÑу (в типичном Ñлучае на ÑобÑтвенном мейлбокÑе, но по оÑобой нужде можно находÑÑÑŒ внутри обработки ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¸ÐºÑ€ÐµÐ¿Ð¸Ñ‚ÑŒ порождённый актор к текущему активному мейлбокÑу – Ñм. RegisterWithSameMailbox (ранее RegisterLocal) – в Ñтом Ñлучае будет гарантироватьÑÑ Ð¾Ñ‚ÑутÑтвие конкурентной обработки Ñообщений). СобÑтвенно Send – Ñто и еÑÑ‚ÑŒ заворачивание ивента в хендл, помещение хендла в очередь мейлбокÑа и добавление мейлбокÑа в очередь активации тредпула. Ð’ рамках одного мейлбокÑа – обработка FIFO, между мейлбокÑами таких гарантий нет, Ñ…Ð¾Ñ‚Ñ Ð¸ ÑтараемÑÑ Ð°ÐºÑ‚Ð¸Ð²Ð¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒ мейлбокÑÑ‹ примерно в порÑдке поÑÐ²Ð»ÐµÐ½Ð¸Ñ Ð² них Ñообщений.
-
-При региÑтрации актора можно выбрать тип мейлбокÑа, они немного отличаютÑÑ ÑтоимоÑÑ‚ÑŒ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ â€“ либо дёшево, но похуже под контеншеном, либо почти wait-free, но подороже. См. комментарии к TMailboxType за актуальными подÑказками что-как.
-
-Полезные хелперы.
-
-STFUNC – Ð´ÐµÐºÐ»Ð°Ñ€Ð°Ñ†Ð¸Ñ Ñтейт-функции, рекомендую вÑегда иÑпользовать именно такую форму Ð´Ð»Ñ Ð´ÐµÐºÐ»Ð°Ñ€Ð°Ñ†Ð¸Ð¸, Ñ‚.к. потом проще иÑкать.
-
-hFunc – Ð¼Ð°ÐºÑ€Ð¾Ñ Ð²Ñ‹Ð±Ð¾Ñ€Ð° хендлера, передающий ивент в обработчик.
-
-cFunc – Ð¼Ð°ÐºÑ€Ð¾Ñ Ð²Ñ‹Ð±Ð¾Ñ€Ð° хендлера, не передающий ивент в обработчик.
-
-### Обработка Ñбоев.
-Ð’ рамках локальной акторÑиÑтемы доÑтавка Ñообщений гарантирована, еÑли по какой-то причине Ñообщение не доÑтавлено (важно! Именно не доÑтавлено, факт обработанноÑти ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ ÑƒÐ¶Ðµ на ÑовеÑти принимающего актора) – то произойдёт одно из:
-
-ЕÑли выÑтавлен флаг FlagForwardOnNondelivery – Ñообщение будет переправлено на актор, переданный как forwardOnNondelivery при конÑтруировании хендла. Полезно например еÑли какие-то ÑервиÑÑ‹ ÑоздаютÑÑ Ð¿Ð¾ требованию и Ð´Ð»Ñ Ð½ÐµÑозданных ÑервиÑов – желаем зароутить в роутер. Работает только в рамках локальной акторÑиÑтемы.
-
-Иначе при выÑтавленном флаге FlagTrackDelivery – Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²Ð¸Ñ‚ÐµÐ»Ñ Ð±ÑƒÐ´ÐµÑ‚ Ñгенерирован ивент TEvUndelivered от имени недоÑтупного актора. Получение такого ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð³Ð°Ñ€Ð°Ð½Ñ‚Ð¸Ñ€ÑƒÐµÑ‚ что иÑходный ивент не был обработан и никакие Ñффекты не произошли. Ð“ÐµÐ½ÐµÑ€Ð°Ñ†Ð¸Ñ Ð¸ доÑтавка нотификации в рамках локальной акторÑиÑтемы гарантирована, в раÑпределённой – как повезёт, может и потерÑÑ‚ÑŒÑÑ.
-
-Иначе, еÑли никакие флаги не выÑтавлены – Ñообщение будет выброшено.
-
-Т.к. в раÑпределённой ÑиÑтеме доÑтавка нотификаций о недоÑтавке не гарантируетÑÑ, то Ð´Ð»Ñ Ð½Ð°Ð´Ñ‘Ð¶Ð½Ð¾Ð¹ обработки Ñбоев необходим дополнительный механизм – по флагу FlagSubscribeOnSession при переÑечении границ ноды проиÑходит подпиÑка Ð¾Ñ‚Ð¿Ñ€Ð°Ð²Ð¸Ñ‚ÐµÐ»Ñ Ð½Ð° нотификацию о разрыве Ñетевой ÑеÑÑии, в рамках которой Ñообщение было отправлено. Теперь при разрыве Ñетевой ÑеÑÑии отправитель узнает что Ñообщение могло быть недоÑтавлено (а могло и быть доÑтавлено – мы не знаем) и Ñможет отреагировать. Ðужно не забывать отпиÑыватьÑÑ Ð¾Ñ‚ нотификации о разрыве ÑеÑÑий – иначе будут копитьÑÑ Ð²Ð¿Ð»Ð¾Ñ‚ÑŒ до ближайшего разрыва (который может и не Ñкоро произойти).
-
-РезюмируÑ: при необходимоÑти контролировать доÑтавку внутри локальной акторÑиÑтемы – выÑтавлÑем флаг FlagTrackDelivery и обрабатываем TEvUndelivered. Ð”Ð»Ñ Ñ€Ð°Ñпределённой – добавлÑем FlagSubscribeOnSession и дополнительно обрабатываем TEvNodeDisconnected не Ð·Ð°Ð±Ñ‹Ð²Ð°Ñ Ð¾Ñ‚Ð¿Ð¸ÑыватьÑÑ Ð¾Ñ‚ более не нужных подпиÑок.
-
-### Интерконнект.
-Ð›Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð°ÐºÑ‚Ð¾Ñ€ÑиÑтема – Ñто только половина пирога, возможноÑÑ‚ÑŒ объединить их в раÑпределённую – Ð²Ñ‚Ð¾Ñ€Ð°Ñ Ð¿Ð¾Ð»Ð¾Ð²Ð¸Ð½Ð°. Ð ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¸Ð½Ñ‚ÐµÑ€ÐºÐ¾Ð½Ð½ÐµÐºÑ‚Ð° доÑтупна из коробки и умеет
-Передавать ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñ‡ÐµÑ€ÐµÐ· одно tcp-Ñоединение
-МультиплекÑировать разные потоки (ака каналы) в рамках одного ÑоединениÑ, Ð³Ð°Ñ€Ð°Ð½Ñ‚Ð¸Ñ€ÑƒÑ Ð¿Ð¾Ñ€Ñдок в рамках канала
-СтараетÑÑ Ð´ÐµÐ»Ð°Ñ‚ÑŒ Ñто хорошо.
-Ð’ рамках раÑпределённой ÑиÑтемы требуетÑÑ ÐºÐ°Ð¶Ð´Ð¾Ð¹ локальной акторÑиÑтеме назначить уникальный номер (например табличкой или реализовав динамичеÑкую раздачу номеров ноды) и запуÑтить в рамках каждой локальной акторÑиÑтемы локальный неймÑÐµÑ€Ð²Ð¸Ñ (например по табличке ремапинга номера ноды в Ñетевой Ð°Ð´Ñ€ÐµÑ Ð»Ð¸Ð±Ð¾ как кеш опорного неймÑервиÑа).
-Смотрим на второй пример https://a.yandex-team.ru/arcadia/library/cpp/actors/examples/02_discovery
-Тут у Ð½Ð°Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð¸Ñ€ÑƒÐµÑ‚ÑÑ Ñ€Ð°ÑÐ¿Ñ€ÐµÐ´ÐµÐ»Ñ‘Ð½Ð½Ð°Ñ Ð°ÐºÑ‚Ð¾Ñ€ÑиÑтема (в примере вÑе пÑÑ‚ÑŒ запуÑкаютÑÑ Ð² одном бинарнике, но точно так же – можно запуÑкать и чаÑÑ‚Ñми) на пÑÑ‚ÑŒ нод. Ðа каждой ноде запуÑкаетÑÑ Ñ€ÐµÐ¿Ð»Ð¸ÐºÐ° Ð´Ð»Ñ Ð¿Ð°Ð±Ð»Ð¸ÑˆÐ¸Ð½Ð³Ð° Ñтрочек и актор-Ñндпоинт (каждый Ñо Ñвоим портом). Эндпоинты Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ актора-паблишера публикуют Ñвои Ñвки/пароли на раÑпределённый Ñторадж (Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¾Ð¹ нештатных Ñитауций и поддержанием в актуальном ÑоÑтоÑнии). И Ñ€Ñдом лежит Ñ€ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа к Ñтораджу на лиÑтинг опубликованого по мажорити. СобÑтвенно Ñто упрощённый и почищенный от Ñпецифики код, иÑпользуемый в YDB Ð´Ð»Ñ Ð¿ÑƒÐ±Ð»Ð¸ÐºÐ°Ñ†Ð¸Ð¸ и Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ð°ÐºÑ‚ÑƒÐ°Ð»ÑŒÐ½Ñ‹Ñ… Ñндпоинтов пользовательÑкой базы.
diff --git a/library/cpp/actors/actor_type/CMakeLists.darwin-arm64.txt b/library/cpp/actors/actor_type/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 825e96d2d8..0000000000
--- a/library/cpp/actors/actor_type/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-actor_type)
-target_link_libraries(cpp-actors-actor_type PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-util
- cpp-actors-prof
- tools-enum_parser-enum_serialization_runtime
-)
-target_sources(cpp-actors-actor_type PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/indexes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/index_constructor.cpp
-)
-generate_enum_serilization(cpp-actors-actor_type
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.h
- INCLUDE_HEADERS
- library/cpp/actors/actor_type/common.h
-)
diff --git a/library/cpp/actors/actor_type/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/actor_type/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 825e96d2d8..0000000000
--- a/library/cpp/actors/actor_type/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-actor_type)
-target_link_libraries(cpp-actors-actor_type PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-util
- cpp-actors-prof
- tools-enum_parser-enum_serialization_runtime
-)
-target_sources(cpp-actors-actor_type PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/indexes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/index_constructor.cpp
-)
-generate_enum_serilization(cpp-actors-actor_type
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.h
- INCLUDE_HEADERS
- library/cpp/actors/actor_type/common.h
-)
diff --git a/library/cpp/actors/actor_type/CMakeLists.linux-aarch64.txt b/library/cpp/actors/actor_type/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index dc1ebb7a22..0000000000
--- a/library/cpp/actors/actor_type/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-actor_type)
-target_link_libraries(cpp-actors-actor_type PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-util
- cpp-actors-prof
- tools-enum_parser-enum_serialization_runtime
-)
-target_sources(cpp-actors-actor_type PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/indexes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/index_constructor.cpp
-)
-generate_enum_serilization(cpp-actors-actor_type
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.h
- INCLUDE_HEADERS
- library/cpp/actors/actor_type/common.h
-)
diff --git a/library/cpp/actors/actor_type/CMakeLists.linux-x86_64.txt b/library/cpp/actors/actor_type/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index dc1ebb7a22..0000000000
--- a/library/cpp/actors/actor_type/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-actor_type)
-target_link_libraries(cpp-actors-actor_type PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-util
- cpp-actors-prof
- tools-enum_parser-enum_serialization_runtime
-)
-target_sources(cpp-actors-actor_type PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/indexes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/index_constructor.cpp
-)
-generate_enum_serilization(cpp-actors-actor_type
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.h
- INCLUDE_HEADERS
- library/cpp/actors/actor_type/common.h
-)
diff --git a/library/cpp/actors/actor_type/CMakeLists.windows-x86_64.txt b/library/cpp/actors/actor_type/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 825e96d2d8..0000000000
--- a/library/cpp/actors/actor_type/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-actor_type)
-target_link_libraries(cpp-actors-actor_type PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-util
- cpp-actors-prof
- tools-enum_parser-enum_serialization_runtime
-)
-target_sources(cpp-actors-actor_type PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/indexes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/index_constructor.cpp
-)
-generate_enum_serilization(cpp-actors-actor_type
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/actor_type/common.h
- INCLUDE_HEADERS
- library/cpp/actors/actor_type/common.h
-)
diff --git a/library/cpp/actors/actor_type/common.cpp b/library/cpp/actors/actor_type/common.cpp
deleted file mode 100644
index be8569b7f5..0000000000
--- a/library/cpp/actors/actor_type/common.cpp
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "common.h"
-
-namespace NActors {
-
-}
diff --git a/library/cpp/actors/actor_type/common.h b/library/cpp/actors/actor_type/common.h
deleted file mode 100644
index cfda827c3f..0000000000
--- a/library/cpp/actors/actor_type/common.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#pragma once
-namespace NActors {
-
-struct TActorActivityTag {};
-
-}
-
-namespace NActors {
-enum class EInternalActorType {
- OTHER = 0,
- INCORRECT_ACTOR_TYPE_INDEX,
- ACTOR_SYSTEM,
- ACTORLIB_COMMON,
- ACTORLIB_STATS,
- LOG_ACTOR,
- INTERCONNECT_PROXY_TCP,
- INTERCONNECT_SESSION_TCP,
- INTERCONNECT_COMMON,
- SELF_PING_ACTOR,
- TEST_ACTOR_RUNTIME,
- INTERCONNECT_HANDSHAKE,
- INTERCONNECT_POLLER,
- INTERCONNECT_SESSION_KILLER,
- ACTOR_SYSTEM_SCHEDULER_ACTOR,
- ACTOR_FUTURE_CALLBACK,
- INTERCONNECT_MONACTOR,
- INTERCONNECT_LOAD_ACTOR,
- INTERCONNECT_LOAD_RESPONDER,
- NAMESERVICE,
- DNS_RESOLVER,
- INTERCONNECT_PROXY_WRAPPER,
- ACTOR_COROUTINE
-};
-}
diff --git a/library/cpp/actors/actor_type/index_constructor.cpp b/library/cpp/actors/actor_type/index_constructor.cpp
deleted file mode 100644
index 667d3617c3..0000000000
--- a/library/cpp/actors/actor_type/index_constructor.cpp
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "index_constructor.h"
-
-namespace NActors {
-
-}
diff --git a/library/cpp/actors/actor_type/index_constructor.h b/library/cpp/actors/actor_type/index_constructor.h
deleted file mode 100644
index b533643a61..0000000000
--- a/library/cpp/actors/actor_type/index_constructor.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#pragma once
-#include "common.h"
-#include <library/cpp/actors/util/local_process_key.h>
-#include <library/cpp/actors/prof/tag.h>
-
-template <>
-class TLocalProcessKeyStateIndexConstructor<NActors::TActorActivityTag> {
-public:
- static ui32 BuildCurrentIndex(const TStringBuf name, const ui32 /*currentNamesCount*/) {
- return NProfiling::MakeTag(name.data());
- }
-};
diff --git a/library/cpp/actors/actor_type/indexes.cpp b/library/cpp/actors/actor_type/indexes.cpp
deleted file mode 100644
index 628eae08d8..0000000000
--- a/library/cpp/actors/actor_type/indexes.cpp
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "indexes.h"
-
-namespace NActors {
-
-}
diff --git a/library/cpp/actors/actor_type/indexes.h b/library/cpp/actors/actor_type/indexes.h
deleted file mode 100644
index a971847056..0000000000
--- a/library/cpp/actors/actor_type/indexes.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#pragma once
-#include "common.h"
-#include "index_constructor.h"
-#include <library/cpp/actors/util/local_process_key.h>
-
-namespace NActors {
-
-class TActorTypeOperator {
-public:
- static constexpr ui32 GetMaxAvailableActorsCount() {
- return TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount();
- }
-
- template <class TEnum>
- static ui32 GetEnumActivityType(const TEnum enumValue) {
- return TEnumProcessKey<TActorActivityTag, TEnum>::GetIndex(enumValue);
- }
-
- static ui32 GetActorSystemIndex() {
- return TEnumProcessKey<TActorActivityTag, EInternalActorType>::GetIndex(EInternalActorType::ACTOR_SYSTEM);
- }
-
- static ui32 GetOtherActivityIndex() {
- return TEnumProcessKey<TActorActivityTag, EInternalActorType>::GetIndex(EInternalActorType::OTHER);
- }
-
- static ui32 GetActorActivityIncorrectIndex() {
- return TEnumProcessKey<TActorActivityTag, EInternalActorType>::GetIndex(EInternalActorType::INCORRECT_ACTOR_TYPE_INDEX);
- }
-};
-}
diff --git a/library/cpp/actors/actor_type/ya.make b/library/cpp/actors/actor_type/ya.make
deleted file mode 100644
index 736e99cf49..0000000000
--- a/library/cpp/actors/actor_type/ya.make
+++ /dev/null
@@ -1,16 +0,0 @@
-LIBRARY()
-
-SRCS(
- common.cpp
- indexes.cpp
- index_constructor.cpp
-)
-
-PEERDIR(
- library/cpp/actors/util
- library/cpp/actors/prof
-)
-
-GENERATE_ENUM_SERIALIZATION(common.h)
-
-END()
diff --git a/library/cpp/actors/core/CMakeLists.darwin-arm64.txt b/library/cpp/actors/core/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 2090b64ba1..0000000000
--- a/library/cpp/actors/core/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-core)
-target_link_libraries(cpp-actors-core PUBLIC
- contrib-libs-cxxsupp
- yutil
- tools-enum_parser-enum_serialization_runtime
- cpp-actors-actor_type
- cpp-actors-memory_log
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- library-cpp-execprofile
- cpp-json-writer
- library-cpp-logger
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- library-cpp-svnversion
- library-cpp-time_provider
- cpp-threading-future
-)
-target_sources(cpp-actors-core PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_virtual.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorid.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/av_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/callstack.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/cpu_manager.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/events_undelivered.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_base.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_io.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_thread.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/harmonizer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/interconnect.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/io_dispatcher.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_settings.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mailbox.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic_provider.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/worker_context.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/probes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/process_stats.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_cookie.cpp
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/defs.h
- INCLUDE_HEADERS
- library/cpp/actors/core/defs.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.h
- INCLUDE_HEADERS
- library/cpp/actors/core/actor.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_iface.h
- INCLUDE_HEADERS
- library/cpp/actors/core/log_iface.h
-)
diff --git a/library/cpp/actors/core/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/core/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 2090b64ba1..0000000000
--- a/library/cpp/actors/core/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-core)
-target_link_libraries(cpp-actors-core PUBLIC
- contrib-libs-cxxsupp
- yutil
- tools-enum_parser-enum_serialization_runtime
- cpp-actors-actor_type
- cpp-actors-memory_log
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- library-cpp-execprofile
- cpp-json-writer
- library-cpp-logger
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- library-cpp-svnversion
- library-cpp-time_provider
- cpp-threading-future
-)
-target_sources(cpp-actors-core PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_virtual.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorid.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/av_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/callstack.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/cpu_manager.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/events_undelivered.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_base.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_io.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_thread.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/harmonizer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/interconnect.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/io_dispatcher.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_settings.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mailbox.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic_provider.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/worker_context.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/probes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/process_stats.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_cookie.cpp
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/defs.h
- INCLUDE_HEADERS
- library/cpp/actors/core/defs.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.h
- INCLUDE_HEADERS
- library/cpp/actors/core/actor.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_iface.h
- INCLUDE_HEADERS
- library/cpp/actors/core/log_iface.h
-)
diff --git a/library/cpp/actors/core/CMakeLists.linux-aarch64.txt b/library/cpp/actors/core/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 24152462c9..0000000000
--- a/library/cpp/actors/core/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,102 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-core)
-target_link_libraries(cpp-actors-core PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- tools-enum_parser-enum_serialization_runtime
- cpp-actors-actor_type
- cpp-actors-memory_log
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- library-cpp-execprofile
- cpp-json-writer
- library-cpp-logger
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- library-cpp-svnversion
- library-cpp-time_provider
- cpp-threading-future
-)
-target_sources(cpp-actors-core PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_virtual.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorid.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/av_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/callstack.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/cpu_manager.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/events_undelivered.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_base.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_io.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_thread.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/harmonizer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/interconnect.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/io_dispatcher.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_settings.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mailbox.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic_provider.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/worker_context.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/probes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/process_stats.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_cookie.cpp
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/defs.h
- INCLUDE_HEADERS
- library/cpp/actors/core/defs.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.h
- INCLUDE_HEADERS
- library/cpp/actors/core/actor.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_iface.h
- INCLUDE_HEADERS
- library/cpp/actors/core/log_iface.h
-)
diff --git a/library/cpp/actors/core/CMakeLists.linux-x86_64.txt b/library/cpp/actors/core/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 24152462c9..0000000000
--- a/library/cpp/actors/core/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,102 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-core)
-target_link_libraries(cpp-actors-core PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- tools-enum_parser-enum_serialization_runtime
- cpp-actors-actor_type
- cpp-actors-memory_log
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- library-cpp-execprofile
- cpp-json-writer
- library-cpp-logger
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- library-cpp-svnversion
- library-cpp-time_provider
- cpp-threading-future
-)
-target_sources(cpp-actors-core PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_virtual.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorid.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/av_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/callstack.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/cpu_manager.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/events_undelivered.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_base.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_io.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_thread.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/harmonizer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/interconnect.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/io_dispatcher.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_settings.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mailbox.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic_provider.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/worker_context.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/probes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/process_stats.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_cookie.cpp
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/defs.h
- INCLUDE_HEADERS
- library/cpp/actors/core/defs.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.h
- INCLUDE_HEADERS
- library/cpp/actors/core/actor.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_iface.h
- INCLUDE_HEADERS
- library/cpp/actors/core/log_iface.h
-)
diff --git a/library/cpp/actors/core/CMakeLists.windows-x86_64.txt b/library/cpp/actors/core/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 2090b64ba1..0000000000
--- a/library/cpp/actors/core/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,101 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-get_built_tool_path(
- TOOL_enum_parser_bin
- TOOL_enum_parser_dependency
- tools/enum_parser/enum_parser
- enum_parser
-)
-
-add_library(cpp-actors-core)
-target_link_libraries(cpp-actors-core PUBLIC
- contrib-libs-cxxsupp
- yutil
- tools-enum_parser-enum_serialization_runtime
- cpp-actors-actor_type
- cpp-actors-memory_log
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- library-cpp-execprofile
- cpp-json-writer
- library-cpp-logger
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- library-cpp-svnversion
- library-cpp-time_provider
- cpp-threading-future
-)
-target_sources(cpp-actors-core PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_virtual.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorid.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/av_bootstrapped.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/callstack.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/cpu_manager.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/events_undelivered.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_base.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_io.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_thread.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/harmonizer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/interconnect.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/io_dispatcher.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_settings.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_buffer.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mailbox.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/monotonic_provider.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/worker_context.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/probes.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/process_stats.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_basic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_cookie.cpp
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/defs.h
- INCLUDE_HEADERS
- library/cpp/actors/core/defs.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor.h
- INCLUDE_HEADERS
- library/cpp/actors/core/actor.h
-)
-generate_enum_serilization(cpp-actors-core
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_iface.h
- INCLUDE_HEADERS
- library/cpp/actors/core/log_iface.h
-)
diff --git a/library/cpp/actors/core/README.md b/library/cpp/actors/core/README.md
deleted file mode 100644
index cb5fb9dfbd..0000000000
--- a/library/cpp/actors/core/README.md
+++ /dev/null
@@ -1,51 +0,0 @@
-# ActorSystem
-
-## Sending
-
-ÐžÐ±Ñ‹Ñ‡Ð½Ð°Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²ÐºÐ° (Send) ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¾Ñ…Ð¾Ð´Ð¸Ñ‚ Ñледующим образом:
-
-1) По получателю находитÑÑ MailBox
-2) КладетÑÑ Ñообщение в мейлбокÑ
-3) ПроверÑетÑÑ ÐµÐ´Ð¸Ð½Ñвенное ли Ñто Ñообщение в мейлбокÑе, еÑли нет, то больше ничего не делаем
-4) Иначе кладем Ñообщение в очередь активаций и в Ñлучае Ð½Ð°Ð»Ð¸Ñ‡Ð¸Ñ ÑпÑщих потоков, будим один из них
-
-Из Ñтого Ñледует, что мы вÑегда ÑтараемÑÑ Ð±ÑƒÐ´Ð¸Ñ‚ÑŒ поток.
-Ðапример еÑли 2 актора переÑылают друг другу Ñообщение, то они будут по переменно работать в разных потоках.
-
-Ðо они вполне могли бы работать на одном потоке, и Ñкорее вÑего Ñто бы работало Ñффективней:
-
-* Кеши не терÑÑŽÑ‚ÑÑ Ð¸Ð· перехода Ñ Ð¾Ð´Ð½Ð¾Ð³Ð¾ потока на другой.
-* Меньше затрат на пробуждение потоков.
-
-Ð”Ð»Ñ Ñтого Ñделали два других ÑпоÑоба отправки Send<ESendingType::Lazy> и Send<ESendingType::Tail>
-
-Send<ESendingType::Lazy> ÑтараетÑÑ Ð¿Ñ€Ð¸Ð´ÐµÑ€Ð¶Ð°Ñ‚ÑŒ Ð¼ÐµÐ¹Ð»Ð±Ð¾ÐºÑ Ð² который отправили Ñообщение, до Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ текущего мейлбокÑа и работае Ñледующим образом:
-
-1) По получателю находитÑÑ MailBox
-2) КладетÑÑ Ñообщение в мейлбокÑ
-3) ПроверÑетÑÑ ÐµÐ´Ð¸Ð½Ñвенное ли Ñто Ñообщение в мейлбокÑе, еÑли нет, то больше ничего не делаем
-4) Захватываем мейлбокÑ
-5) ЕÑли до Ñтого уже захватили мейлбокÑ, то Ñтарый кладетÑÑ Ð² очередь активаций и пробуем разбудить ÑпÑщий поток
-6) ПоÑле Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ текущего мейлбокÑа проверÑетÑÑ, еÑÑ‚ÑŒ ли Ð°ÐºÑ‚Ð¸Ð²Ð°Ñ†Ð¸Ñ Ð² очереди активаций. ЕÑли еÑÑ‚ÑŒ, то берем из очереди активаций мейлбокÑ, а захваченный кладем в очередь активаций, еÑли же очередь активаций была пуÑтаÑ, то обрабатываем захваченный мейлбокÑ
-
-Из плюÑов, может лишний раз не будить поток и обрабатывать ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð² том же потоке.
-
-Из минуÑов, еÑли поÑле иÑÐ¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ð½Ð¸Ñ Send<ESendingType::Lazy> текущий Ð¼ÐµÐ¹Ð»Ð±Ð¾ÐºÑ Ð±ÑƒÐ´ÐµÑ‚ долго обрабатыватьÑÑ, то Ñто Ð²Ñ€ÐµÐ¼Ñ Ð´Ð¾Ð±Ð°Ð²Ð¸Ñ‚ÑŒÑÑ Ðº времени Ð¾Ð¶Ð¸Ð´Ð°Ð½Ð¸Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²Ð»ÐµÐ½Ð½Ð¾Ð³Ð¾ ÑообщениÑ. Так как его Ð¼ÐµÐ¹Ð»Ð±Ð¾ÐºÑ Ð·Ð°Ñ…Ð²Ð°Ñ‡ÐµÐ½ потоком и не обрабатываетÑÑ. Так же при Ñильной загрузки ÑиÑтемы, когда очередь активаций вÑегда большаÑ, отправленным ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð±ÑƒÐ´ÐµÑ‚ добавлÑÑ‚ÑŒÑÑ Ð»ÐµÑ‚ÐµÐ½Ñи, так как мы не Ñразу отправлÑем Ñообщение, а ждем пока обработка мейлбокÑа закончитÑÑ. И так как очередь акттиваций вÑегда не пуÑтаÑ, то мы Ñ Ð·Ð°Ð´ÐµÑ€Ð¶ÐºÐ¾Ð¹ кладем Ð¼ÐµÐ¹Ð»Ð±Ð¾ÐºÑ Ð² очередь активаций, Ñ…Ð¾Ñ‚Ñ Ð¼Ð¾Ð³Ð»Ð¸ Ñделать Ñто Ñразу.
-
-Стоит иÑпользоватьÑÑ Ð¶ÐµÐ»Ð°Ñ‚ÐµÐ»ÑŒÐ½Ð¾ перед Ñмертью актора, когда извеÑтно что больше он ничего обрабатывать не будет.
-
-Ð”Ð»Ñ Ñлучаев, когда мы не хотим ждать Ð¾ÐºÐ¾Ð½Ñ‡Ð°Ð½Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ мейлбокÑа или Ñмерти актора, и хотим гарантировано обработать отправленное Ñообщение в том же потоке, Ñледует иÑпользовать Send<ESendingType::Tail>.
-
-ПоÑле обработки текущего Ñообщение, обработка мейлбокÑа прерветÑÑ, и начнетÑÑ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° захваченного мейлбокÑа. При Ñтом передаетÑÑ ÐºÐ²Ð¾Ñ‚Ð° Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ñ‹Ð¼ обрабатывалÑÑ Ð¿ÐµÑ€Ð²Ñ‹Ð¹ мейлбокÑ. Ð‘Ð»Ð°Ð³Ð¾Ð´Ð°Ñ€Ñ Ñтому не получитÑÑ Ð·Ð°Ð±Ð»Ð¾ÐºÐ¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒ поток Ð´Ð²ÑƒÐ¼Ñ Ð°ÐºÑ‚Ð¾Ñ€Ð°Ð¼Ð¸ переÑылающими друг другу ÑообщениÑ. Ð’ какой-то момент кончитÑÑ ÐºÐ²Ð¾Ñ‚Ð° по времени или по количеÑтву обработанных Ñообщений.
-
-Send<ESendingType::Tail> работает Ñледующим образом:
-
-1) По получателю находитÑÑ MailBox
-2) КладетÑÑ Ñообщение в мейлбокÑ
-3) ПроверÑетÑÑ ÐµÐ´Ð¸Ð½Ñвенное ли Ñто Ñообщение в мейлбокÑе, еÑли нет, то больше ничего не делаем
-4) Захватываем мейлбокÑa
-5) Ð’Ñе оÑтальные отправки Ñообщений будут работать как обычный Send
-6) ПоÑле Ð·Ð°Ð²ÐµÑ€ÑˆÐµÐ½Ð¸Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ текущего ÑообщениÑ, прерываетÑÑ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° мейлбокÑа и начинаетÑÑ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° захваченного мейлбокÑа Ñ ÐºÐ²Ð¾Ñ‚Ð¾Ð¹ Ñтарого мейлбокÑа
-7) При завершении квоты, захваченный Ð¼ÐµÐ¹Ð»Ð±Ð¾ÐºÑ Ð¾Ð±Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°ÐµÑ‚ÑÑ ÐºÐ°Ðº в Send<ESendingType::Lazy>
-
-ТребуетÑÑ ÐºÐ¾Ð³Ð´Ð° важно продолжить цепочку работы в Ñледующем акторе пока кеши ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ ÐµÑ‰Ðµ прогреты. По Ñравнению Ñ Send<ESendingType::Lazy> гарантировано продолжит обработку ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¸ не имеет проблем Ñ Ð·Ð°Ð´ÐµÑ€Ð¶ÐºÐ¾Ð¹ обработки ÑообщениÑ.
diff --git a/library/cpp/actors/core/actor.cpp b/library/cpp/actors/core/actor.cpp
deleted file mode 100644
index 6d6c92f431..0000000000
--- a/library/cpp/actors/core/actor.cpp
+++ /dev/null
@@ -1,250 +0,0 @@
-#include "actor.h"
-#include "actor_virtual.h"
-#include "actorsystem.h"
-#include "executor_thread.h"
-#include <library/cpp/actors/util/datetime.h>
-
-namespace NActors {
- Y_POD_THREAD(TThreadContext*) TlsThreadContext(nullptr);
- thread_local TActivationContext *TActivationContextHolder::Value = nullptr;
- TActivationContextHolder TlsActivationContext;
-
- [[gnu::noinline]] TActivationContextHolder::operator bool() const {
- asm volatile("");
- return Value != nullptr;
- }
-
- [[gnu::noinline]] TActivationContextHolder::operator TActivationContext*() const {
- asm volatile("");
- return Value;
- }
-
- [[gnu::noinline]] TActivationContext *TActivationContextHolder::operator ->() {
- asm volatile("");
- return Value;
- }
-
- [[gnu::noinline]] TActivationContext& TActivationContextHolder::operator *() {
- asm volatile("");
- return *Value;
- }
-
- [[gnu::noinline]] TActivationContextHolder& TActivationContextHolder::operator =(TActivationContext *context) {
- asm volatile("");
- Value = context;
- return *this;
- }
-
- template<i64 Increment>
- static void UpdateQueueSizeAndTimestamp(TActorUsageImpl<true>& impl, ui64 time) {
- ui64 usedTimeIncrement = 0;
- using T = TActorUsageImpl<true>;
-
- for (;;) {
- uint64_t value = impl.QueueSizeAndTimestamp.load();
- ui64 count = value >> T::TimestampBits;
-
- count += Increment;
- Y_ABORT_UNLESS((count & ~T::CountMask) == 0);
-
- ui64 timestamp = value;
- if (Increment == 1 && count == 1) {
- timestamp = time;
- } else if (Increment == -1 && count == 0) {
- usedTimeIncrement = (static_cast<ui64>(time) - timestamp) & T::TimestampMask;
- timestamp = 0; // reset timestamp to some zero value
- }
-
- const ui64 updated = (timestamp & T::TimestampMask) | (count << T::TimestampBits);
- if (impl.QueueSizeAndTimestamp.compare_exchange_weak(value, updated)) {
- break;
- }
- }
-
- if (usedTimeIncrement && impl.LastUsageTimestamp <= time) {
- impl.UsedTime += usedTimeIncrement;
- }
- }
-
- void TActorUsageImpl<true>::OnEnqueueEvent(ui64 time) {
- UpdateQueueSizeAndTimestamp<+1>(*this, time);
- }
-
- void TActorUsageImpl<true>::OnDequeueEvent() {
- UpdateQueueSizeAndTimestamp<-1>(*this, GetCycleCountFast());
- }
-
- double TActorUsageImpl<true>::GetUsage(ui64 time) {
- ui64 used = UsedTime.exchange(0);
- if (const ui64 value = QueueSizeAndTimestamp.load(); value >> TimestampBits) {
- used += (static_cast<ui64>(time) - value) & TimestampMask;
- }
-
- Y_ABORT_UNLESS(LastUsageTimestamp <= time);
- ui64 passed = time - LastUsageTimestamp;
- LastUsageTimestamp = time;
-
- if (!passed) {
- return 0;
- }
-
- return (double)Min(passed, used) / passed;
- }
-
- void IActor::Describe(IOutputStream &out) const noexcept {
- SelfActorId.Out(out);
- }
-
- bool IActor::Send(TAutoPtr<IEventHandle> ev) const noexcept {
- return TActivationContext::Send(ev);
- }
-
- bool IActor::Send(const TActorId& recipient, IEventBase* ev, ui32 flags, ui64 cookie, NWilson::TTraceId traceId) const noexcept {
- return SelfActorId.Send(recipient, ev, flags, cookie, std::move(traceId));
- }
-
- void TActivationContext::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) {
- TlsActivationContext->ExecutorThread.Schedule(deadline, ev, cookie);
- }
-
- void TActivationContext::Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) {
- TlsActivationContext->ExecutorThread.Schedule(deadline, ev, cookie);
- }
-
- void TActivationContext::Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) {
- TlsActivationContext->ExecutorThread.Schedule(delta, ev, cookie);
- }
-
- void TActorIdentity::Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie) const {
- return TActivationContext::Schedule(deadline, new IEventHandle(*this, {}, ev), cookie);
- }
-
- void TActorIdentity::Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie) const {
- return TActivationContext::Schedule(deadline, new IEventHandle(*this, {}, ev), cookie);
- }
-
- void TActorIdentity::Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie) const {
- return TActivationContext::Schedule(delta, new IEventHandle(*this, {}, ev), cookie);
- }
-
- TActorId TActivationContext::RegisterWithSameMailbox(IActor* actor, TActorId parentId) {
- Y_DEBUG_ABORT_UNLESS(parentId);
- auto& ctx = *TlsActivationContext;
- return ctx.ExecutorThread.RegisterActor(actor, &ctx.Mailbox, parentId.Hint(), parentId);
- }
-
- TActorId TActorContext::RegisterWithSameMailbox(IActor* actor) const {
- return ExecutorThread.RegisterActor(actor, &Mailbox, SelfID.Hint(), SelfID);
- }
-
- TActorId IActor::RegisterWithSameMailbox(IActor* actor) const noexcept {
- return TlsActivationContext->ExecutorThread.RegisterActor(actor, &TlsActivationContext->Mailbox, SelfActorId.Hint(), SelfActorId);
- }
-
- TActorId TActivationContext::InterconnectProxy(ui32 destinationNodeId) {
- return TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(destinationNodeId);
- }
-
- TActorSystem* TActivationContext::ActorSystem() {
- return TlsActivationContext->ExecutorThread.ActorSystem;
- }
-
- i64 TActivationContext::GetCurrentEventTicks() {
- return GetCycleCountFast() - TlsActivationContext->EventStart;
- }
-
- double TActivationContext::GetCurrentEventTicksAsSeconds() {
- return NHPTimer::GetSeconds(GetCurrentEventTicks());
- }
-
- TActorId IActor::Register(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId) const noexcept {
- return TlsActivationContext->ExecutorThread.RegisterActor(actor, mailboxType, poolId, SelfActorId);
- }
-
- void TActorContext::Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie) const {
- ExecutorThread.Schedule(deadline, new IEventHandle(SelfID, TActorId(), ev), cookie);
- }
-
- void TActorContext::Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie) const {
- ExecutorThread.Schedule(deadline, new IEventHandle(SelfID, TActorId(), ev), cookie);
- }
-
- void TActorContext::Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie) const {
- ExecutorThread.Schedule(delta, new IEventHandle(SelfID, TActorId(), ev), cookie);
- }
-
- void IActor::Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie) const noexcept {
- TlsActivationContext->ExecutorThread.Schedule(deadline, new IEventHandle(SelfActorId, TActorId(), ev), cookie);
- }
-
- void IActor::Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie) const noexcept {
- TlsActivationContext->ExecutorThread.Schedule(deadline, new IEventHandle(SelfActorId, TActorId(), ev), cookie);
- }
-
- void IActor::Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie) const noexcept {
- TlsActivationContext->ExecutorThread.Schedule(delta, new IEventHandle(SelfActorId, TActorId(), ev), cookie);
- }
-
- TInstant TActivationContext::Now() {
- return TlsActivationContext->ExecutorThread.ActorSystem->Timestamp();
- }
-
- TMonotonic TActivationContext::Monotonic() {
- return TlsActivationContext->ExecutorThread.ActorSystem->Monotonic();
- }
-
- TInstant TActorContext::Now() const {
- return ExecutorThread.ActorSystem->Timestamp();
- }
-
- TMonotonic TActorContext::Monotonic() const {
- return ExecutorThread.ActorSystem->Monotonic();
- }
-
- NLog::TSettings* TActivationContext::LoggerSettings() const {
- return ExecutorThread.ActorSystem->LoggerSettings();
- }
-
- std::pair<ui32, ui32> TActorContext::CountMailboxEvents(ui32 maxTraverse) const {
- return Mailbox.CountMailboxEvents(SelfID.LocalId(), maxTraverse);
- }
-
- std::pair<ui32, ui32> IActor::CountMailboxEvents(ui32 maxTraverse) const {
- return TlsActivationContext->Mailbox.CountMailboxEvents(SelfActorId.LocalId(), maxTraverse);
- }
-
- void IActor::Die(const TActorContext& ctx) {
- if (ctx.SelfID)
- Y_ABORT_UNLESS(ctx.SelfID == SelfActorId);
- PassAway();
- }
-
- void IActor::PassAway() {
- auto& cx = *TlsActivationContext;
- cx.ExecutorThread.UnregisterActor(&cx.Mailbox, SelfActorId);
- }
-
- double IActor::GetElapsedTicksAsSeconds() const {
- return NHPTimer::GetSeconds(ElapsedTicks);
- }
-
- void TActorCallbackBehaviour::Receive(IActor* actor, TAutoPtr<IEventHandle>& ev) {
- (actor->*StateFunc)(ev);
- }
-
- void TActorVirtualBehaviour::Receive(IActor* actor, std::unique_ptr<IEventHandle> ev) {
- Y_ABORT_UNLESS(!!ev && ev->GetBase());
- ev->GetBase()->Execute(actor, std::move(ev));
- }
-
- void IActor::Registered(TActorSystem* sys, const TActorId& owner) {
- // fallback to legacy method, do not use it anymore
- if (auto eh = AfterRegister(SelfId(), owner)) {
- if (!TlsThreadContext || TlsThreadContext->SendingType == ESendingType::Common) {
- sys->Send(eh);
- } else {
- sys->SpecificSend(eh);
- }
- }
- }
-}
diff --git a/library/cpp/actors/core/actor.h b/library/cpp/actors/core/actor.h
deleted file mode 100644
index 96a8f5d05a..0000000000
--- a/library/cpp/actors/core/actor.h
+++ /dev/null
@@ -1,999 +0,0 @@
-#pragma once
-
-#include "actorsystem.h"
-#include "event.h"
-#include "executor_thread.h"
-#include "monotonic.h"
-#include "thread_context.h"
-
-#include <library/cpp/actors/actor_type/indexes.h>
-#include <library/cpp/actors/util/local_process_key.h>
-
-#include <util/system/tls.h>
-#include <util/generic/noncopyable.h>
-
-namespace NActors {
- class TActorSystem;
- class TMailboxTable;
- struct TMailboxHeader;
-
- class TExecutorThread;
- class IActor;
- class ISchedulerCookie;
- class IExecutorPool;
-
- namespace NLog {
- struct TSettings;
- }
-
- struct TActorContext;
- struct TActivationContext;
-
- class TActivationContextHolder {
- static thread_local TActivationContext *Value;
-
- public:
- [[gnu::noinline]] operator bool() const;
- [[gnu::noinline]] operator TActivationContext*() const;
- [[gnu::noinline]] TActivationContext *operator ->();
- [[gnu::noinline]] TActivationContext& operator *();
- [[gnu::noinline]] TActivationContextHolder& operator=(TActivationContext *context);
- };
-
- extern TActivationContextHolder TlsActivationContext;
-
- struct TActivationContext {
- public:
- TMailboxHeader& Mailbox;
- TExecutorThread& ExecutorThread;
- const NHPTimer::STime EventStart;
-
- protected:
- explicit TActivationContext(TMailboxHeader& mailbox, TExecutorThread& executorThread, NHPTimer::STime eventStart)
- : Mailbox(mailbox)
- , ExecutorThread(executorThread)
- , EventStart(eventStart)
- {
- }
-
- public:
- template <ESendingType SendingType = ESendingType::Common>
- static bool Send(TAutoPtr<IEventHandle> ev);
-
- template <ESendingType SendingType = ESendingType::Common>
- static bool Send(std::unique_ptr<IEventHandle> &&ev);
-
- template <ESendingType SendingType = ESendingType::Common>
- static bool Forward(TAutoPtr<IEventHandle>& ev, const TActorId& recipient);
-
- template <ESendingType SendingType = ESendingType::Common>
- static bool Forward(THolder<IEventHandle>& ev, const TActorId& recipient);
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the wallclock time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- static void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr);
- static void Schedule(TInstant deadline, std::unique_ptr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr) {
- return Schedule(deadline, TAutoPtr<IEventHandle>(ev.release()), cookie);
- }
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the monotonic time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- static void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr);
- static void Schedule(TMonotonic deadline, std::unique_ptr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr) {
- return Schedule(deadline, TAutoPtr<IEventHandle>(ev.release()), cookie);
- }
-
- /**
- * Schedule one-shot event that will be send after given delay.
- *
- * @param delta the time from now to delay event sending
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- static void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr);
- static void Schedule(TDuration delta, std::unique_ptr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr) {
- return Schedule(delta, TAutoPtr<IEventHandle>(ev.release()), cookie);
- }
-
- static TInstant Now();
- static TMonotonic Monotonic();
- NLog::TSettings* LoggerSettings() const;
-
- // register new actor in ActorSystem on new fresh mailbox.
- template <ESendingType SendingType = ESendingType::Common>
- static TActorId Register(IActor* actor, TActorId parentId = TActorId(), TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>());
-
- // Register new actor in ActorSystem on same _mailbox_ as current actor.
- // There is one thread per mailbox to execute actor, which mean
- // no _cpu core scalability_ for such actors.
- // This method of registration can be usefull if multiple actors share
- // some memory.
- static TActorId RegisterWithSameMailbox(IActor* actor, TActorId parentId);
-
- static const TActorContext& AsActorContext();
- static TActorContext ActorContextFor(TActorId id);
-
- static TActorId InterconnectProxy(ui32 nodeid);
- static TActorSystem* ActorSystem();
-
- static i64 GetCurrentEventTicks();
- static double GetCurrentEventTicksAsSeconds();
- };
-
- struct TActorContext: public TActivationContext {
- const TActorId SelfID;
- using TEventFlags = IEventHandle::TEventFlags;
- explicit TActorContext(TMailboxHeader& mailbox, TExecutorThread& executorThread, NHPTimer::STime eventStart, const TActorId& selfID)
- : TActivationContext(mailbox, executorThread, eventStart)
- , SelfID(selfID)
- {
- }
-
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const;
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(const TActorId& recipient, THolder<IEventBase> ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const {
- return Send<SendingType>(recipient, ev.Release(), flags, cookie, std::move(traceId));
- }
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(const TActorId& recipient, std::unique_ptr<IEventBase> ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const {
- return Send<SendingType>(recipient, ev.release(), flags, cookie, std::move(traceId));
- }
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(TAutoPtr<IEventHandle> ev) const;
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(std::unique_ptr<IEventHandle> &&ev) const {
- return Send<SendingType>(TAutoPtr<IEventHandle>(ev.release()));
- }
- template <ESendingType SendingType = ESendingType::Common>
- bool Forward(TAutoPtr<IEventHandle>& ev, const TActorId& recipient) const;
- template <ESendingType SendingType = ESendingType::Common>
- bool Forward(THolder<IEventHandle>& ev, const TActorId& recipient) const;
-
- TInstant Now() const;
- TMonotonic Monotonic() const;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the wallclock time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- void Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the monotonic time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- void Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const;
-
- /**
- * Schedule one-shot event that will be send after given delay.
- *
- * @param delta the time from now to delay event sending
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- void Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const;
-
- TActorContext MakeFor(const TActorId& otherId) const {
- return TActorContext(Mailbox, ExecutorThread, EventStart, otherId);
- }
-
- // register new actor in ActorSystem on new fresh mailbox.
- template <ESendingType SendingType = ESendingType::Common>
- TActorId Register(IActor* actor, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>()) const;
-
- // Register new actor in ActorSystem on same _mailbox_ as current actor.
- // There is one thread per mailbox to execute actor, which mean
- // no _cpu core scalability_ for such actors.
- // This method of registration can be usefull if multiple actors share
- // some memory.
- TActorId RegisterWithSameMailbox(IActor* actor) const;
-
- std::pair<ui32, ui32> CountMailboxEvents(ui32 maxTraverse = Max<ui32>()) const;
- };
-
- struct TActorIdentity: public TActorId {
- using TEventFlags = IEventHandle::TEventFlags;
- explicit TActorIdentity(TActorId actorId)
- : TActorId(actorId)
- {
- }
-
- void operator=(TActorId actorId) {
- *this = TActorIdentity(actorId);
- }
-
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const;
- bool SendWithContinuousExecution(const TActorId& recipient, IEventBase* ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const;
- void Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const;
- void Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const;
- void Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const;
- };
-
- class IActor;
-
- class IActorOps : TNonCopyable {
- public:
- virtual void Describe(IOutputStream&) const noexcept = 0;
- virtual bool Send(const TActorId& recipient, IEventBase*, IEventHandle::TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const noexcept = 0;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the wallclock time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- virtual void Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const noexcept = 0;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the monotonic time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- virtual void Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const noexcept = 0;
-
- /**
- * Schedule one-shot event that will be send after given delay.
- *
- * @param delta the time from now to delay event sending
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- virtual void Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const noexcept = 0;
-
- virtual TActorId Register(IActor*, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>()) const noexcept = 0;
- virtual TActorId RegisterWithSameMailbox(IActor*) const noexcept = 0;
- };
-
- class TDecorator;
-
- class TActorVirtualBehaviour {
- public:
- static void Receive(IActor* actor, std::unique_ptr<IEventHandle> ev);
- public:
- };
-
- class TActorCallbackBehaviour {
- private:
- using TBase = IActor;
- friend class TDecorator;
- public:
- using TReceiveFunc = void (IActor::*)(TAutoPtr<IEventHandle>& ev);
- private:
- TReceiveFunc StateFunc = nullptr;
- public:
- TActorCallbackBehaviour() = default;
- TActorCallbackBehaviour(TReceiveFunc stateFunc)
- : StateFunc(stateFunc) {
- }
- bool Initialized() const {
- return !!StateFunc;
- }
-
- // NOTE: exceptions must not escape state function but if an exception hasn't be caught
- // by the actor then we want to crash an see the stack
- void Receive(IActor* actor, TAutoPtr<IEventHandle>& ev);
-
- template <typename T>
- void Become(T stateFunc) {
- StateFunc = static_cast<TReceiveFunc>(stateFunc);
- }
-
- template <typename T, typename... TArgs>
- void Become(T stateFunc, const TActorContext& ctx, TArgs&&... args) {
- StateFunc = static_cast<TReceiveFunc>(stateFunc);
- ctx.Schedule(std::forward<TArgs>(args)...);
- }
-
- TReceiveFunc CurrentStateFunc() const {
- return StateFunc;
- }
-
- };
-
- template<bool>
- struct TActorUsageImpl {
- void OnEnqueueEvent(ui64 /*time*/) {} // called asynchronously when event is put in the mailbox
- void OnDequeueEvent() {} // called when processed by Executor
- double GetUsage(ui64 /*time*/) { return 0; } // called from collector thread
- void DoActorInit() {}
- };
-
- template<>
- struct TActorUsageImpl<true> {
- static constexpr int TimestampBits = 40;
- static constexpr int CountBits = 24;
- static constexpr ui64 TimestampMask = ((ui64)1 << TimestampBits) - 1;
- static constexpr ui64 CountMask = ((ui64)1 << CountBits) - 1;
-
- std::atomic_uint64_t QueueSizeAndTimestamp = 0;
- std::atomic_uint64_t UsedTime = 0; // how much time did we consume since last GetUsage() call
- ui64 LastUsageTimestamp = 0; // when GetUsage() was called the last time
-
- void OnEnqueueEvent(ui64 time);
- void OnDequeueEvent();
- double GetUsage(ui64 time);
- void DoActorInit() { LastUsageTimestamp = GetCycleCountFast(); }
- };
-
- class IActor
- : protected IActorOps
- , public TActorUsageImpl<ActorLibCollectUsageStats>
- {
- private:
- TActorIdentity SelfActorId;
- i64 ElapsedTicks;
- friend void DoActorInit(TActorSystem*, IActor*, const TActorId&, const TActorId&);
- friend class TDecorator;
-
- private: // stuck actor monitoring
- TMonotonic LastReceiveTimestamp;
- size_t StuckIndex = Max<size_t>();
- friend class TExecutorPoolBaseMailboxed;
- friend class TExecutorThread;
-
- IActor(const ui32 activityType)
- : SelfActorId(TActorId())
- , ElapsedTicks(0)
- , ActivityType(activityType)
- , HandledEvents(0) {
- }
-
- protected:
- TActorCallbackBehaviour CImpl;
- public:
- using TEventFlags = IEventHandle::TEventFlags;
- using TReceiveFunc = TActorCallbackBehaviour::TReceiveFunc;
- /// @sa services.proto NKikimrServices::TActivity::EType
- using EActorActivity = EInternalActorType;
- using EActivityType = EActorActivity;
- ui32 ActivityType;
-
- protected:
- ui64 HandledEvents;
-
- template <typename EEnum = EActivityType, typename std::enable_if<std::is_enum<EEnum>::value, bool>::type v = true>
- IActor(const EEnum activityEnumType = EActivityType::OTHER)
- : IActor(TEnumProcessKey<TActorActivityTag, EEnum>::GetIndex(activityEnumType)) {
- }
-
- IActor(TActorCallbackBehaviour&& cImpl, const ui32 activityType)
- : SelfActorId(TActorId())
- , ElapsedTicks(0)
- , CImpl(std::move(cImpl))
- , ActivityType(activityType)
- , HandledEvents(0)
- {
- }
-
- template <typename EEnum = EActivityType, typename std::enable_if<std::is_enum<EEnum>::value, bool>::type v = true>
- IActor(TActorCallbackBehaviour&& cImpl, const EEnum activityEnumType = EActivityType::OTHER)
- : IActor(std::move(cImpl), TEnumProcessKey<TActorActivityTag, EEnum>::GetIndex(activityEnumType)) {
- }
-
- public:
- template <class TEventBase>
- class TEventSenderFromActor: ::TNonCopyable {
- private:
- TEventFlags Flags = 0;
- ui64 Cookie = 0;
- const TActorIdentity SenderId;
- NWilson::TTraceId TraceId = {};
- std::unique_ptr<TEventBase> Event;
- public:
- template <class... Types>
- TEventSenderFromActor(const IActor* owner, Types&&... args)
- : SenderId(owner->SelfId())
- , Event(new TEventBase(std::forward<Types>(args)...)) {
-
- }
-
- TEventSenderFromActor& SetFlags(const TEventFlags flags) {
- Flags = flags;
- return *this;
- }
-
- TEventSenderFromActor& SetCookie(const ui64 value) {
- Cookie = value;
- return *this;
- }
-
- TEventSenderFromActor& SetTraceId(NWilson::TTraceId&& value) {
- TraceId = std::move(value);
- return *this;
- }
-
- bool SendTo(const TActorId& recipient) {
- return SenderId.Send(recipient, Event.release(), Flags, Cookie, std::move(TraceId));
- }
- };
-
- template <class TEvent, class... Types>
- TEventSenderFromActor<TEvent> Sender(Types&&... args) const {
- return TEventSenderFromActor<TEvent>(this, std::forward<Types>(args)...);
- }
-
- virtual ~IActor() {
- } // must not be called for registered actors, see Die method instead
-
- protected:
- virtual void Die(const TActorContext& ctx); // would unregister actor so call exactly once and only from inside of message processing
- virtual void PassAway();
-
- protected:
- void SetActivityType(ui32 activityType) {
- ActivityType = activityType;
- }
-
- public:
- class TPassAwayGuard: TMoveOnly {
- private:
- IActor* Owner = nullptr;
- public:
- TPassAwayGuard(TPassAwayGuard&& item) {
- Owner = item.Owner;
- item.Owner = nullptr;
- }
-
- TPassAwayGuard(IActor* owner)
- : Owner(owner)
- {
-
- }
-
- ~TPassAwayGuard() {
- if (Owner) {
- Owner->PassAway();
- }
- }
- };
-
- TPassAwayGuard PassAwayGuard() {
- return TPassAwayGuard(this);
- }
-
- // must be called to wrap any call trasitions from one actor to another
- template<typename TActor, typename TMethod, typename... TArgs>
- static std::invoke_result_t<TMethod, TActor, TArgs...> InvokeOtherActor(TActor& actor, TMethod&& method, TArgs&&... args) {
- struct TRecurseContext : TActorContext {
- TActivationContext* const Prev;
-
- TRecurseContext(const TActorId& actorId)
- : TActorContext(TActivationContext::ActorContextFor(actorId))
- , Prev(TlsActivationContext)
- {
- TlsActivationContext = this;
- }
-
- ~TRecurseContext() {
- Y_ABORT_UNLESS(TlsActivationContext == this, "TlsActivationContext mismatch; probably InvokeOtherActor was invoked from a coroutine");
- TlsActivationContext = Prev;
- }
- } context(actor.SelfId());
-
- return std::invoke(std::forward<TMethod>(method), actor, std::forward<TArgs>(args)...);
- }
-
- virtual void Registered(TActorSystem* sys, const TActorId& owner);
-
- virtual TAutoPtr<IEventHandle> AfterRegister(const TActorId& self, const TActorId& parentId) {
- Y_UNUSED(self);
- Y_UNUSED(parentId);
- return TAutoPtr<IEventHandle>();
- }
-
- i64 GetElapsedTicks() const {
- return ElapsedTicks;
- }
- double GetElapsedTicksAsSeconds() const;
- void AddElapsedTicks(i64 ticks) {
- ElapsedTicks += ticks;
- }
- ui32 GetActivityType() const {
- return ActivityType;
- }
- ui64 GetHandledEvents() const {
- return HandledEvents;
- }
- TActorIdentity SelfId() const {
- return SelfActorId;
- }
-
- void Receive(TAutoPtr<IEventHandle>& ev) {
- ++HandledEvents;
- LastReceiveTimestamp = TActivationContext::Monotonic();
- if (CImpl.Initialized()) {
- CImpl.Receive(this, ev);
- } else {
- TActorVirtualBehaviour::Receive(this, std::unique_ptr<IEventHandle>(ev.Release()));
- }
- }
-
- TActorContext ActorContext() const {
- return TActivationContext::ActorContextFor(SelfId());
- }
-
- protected:
- void SetEnoughCpu(bool isEnough) {
- if (TlsThreadContext) {
- TlsThreadContext->IsEnoughCpu = isEnough;
- }
- }
-
- void Describe(IOutputStream&) const noexcept override;
- bool Send(TAutoPtr<IEventHandle> ev) const noexcept;
- bool Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const noexcept final;
- bool Send(const TActorId& recipient, THolder<IEventBase> ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const{
- return Send(recipient, ev.Release(), flags, cookie, std::move(traceId));
- }
- bool Send(const TActorId& recipient, std::unique_ptr<IEventBase> ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const {
- return Send(recipient, ev.release(), flags, cookie, std::move(traceId));
- }
-
- template <class TEvent, class ... TEventArgs>
- bool Send(TActorId recipient, TEventArgs&& ... args) const {
- return Send(recipient, MakeHolder<TEvent>(std::forward<TEventArgs>(args)...));
- }
-
- template <ESendingType SendingType>
- bool Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const;
- template <ESendingType SendingType>
- bool Send(const TActorId& recipient, THolder<IEventBase> ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const {
- return Send(recipient, ev.Release(), flags, cookie, std::move(traceId));
- }
- template <ESendingType SendingType>
- bool Send(const TActorId& recipient, std::unique_ptr<IEventBase> ev, TEventFlags flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) const {
- return Send(recipient, ev.release(), flags, cookie, std::move(traceId));
- }
-
- static bool Forward(TAutoPtr<IEventHandle>& ev, const TActorId& recipient) {
- return TActivationContext::Forward(ev, recipient);
- }
-
- static bool Forward(THolder<IEventHandle>& ev, const TActorId& recipient) {
- return TActivationContext::Forward(ev, recipient);
- }
-
- template <typename TEventHandle>
- static bool Forward(TAutoPtr<TEventHandle>& ev, const TActorId& recipient) {
- TAutoPtr<IEventHandle> evi(ev.Release());
- return TActivationContext::Forward(evi, recipient);
- }
-
- void Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const noexcept final;
- void Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const noexcept final;
- void Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie = nullptr) const noexcept final;
-
- // register new actor in ActorSystem on new fresh mailbox.
- TActorId Register(IActor* actor, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>()) const noexcept final;
-
- template <ESendingType SendingType>
- TActorId Register(IActor* actor, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>()) const noexcept;
-
- // Register new actor in ActorSystem on same _mailbox_ as current actor.
- // There is one thread per mailbox to execute actor, which mean
- // no _cpu core scalability_ for such actors.
- // This method of registration can be useful if multiple actors share
- // some memory.
- TActorId RegisterWithSameMailbox(IActor* actor) const noexcept final;
-
- std::pair<ui32, ui32> CountMailboxEvents(ui32 maxTraverse = Max<ui32>()) const;
-
- private:
- void ChangeSelfId(TActorId actorId) {
- SelfActorId = actorId;
- }
- };
-
- inline size_t GetActivityTypeCount() {
- return TLocalProcessKeyState<TActorActivityTag>::GetInstance().GetCount();
- }
-
- inline TStringBuf GetActivityTypeName(size_t index) {
- return TLocalProcessKeyState<TActorActivityTag>::GetInstance().GetNameByIndex(index);
- }
-
- class IActorCallback: public IActor {
- protected:
- template <class TEnum = IActor::EActivityType>
- IActorCallback(TReceiveFunc stateFunc, const TEnum activityType = IActor::EActivityType::OTHER)
- : IActor(TActorCallbackBehaviour(stateFunc), activityType) {
-
- }
-
- IActorCallback(TReceiveFunc stateFunc, const ui32 activityType)
- : IActor(TActorCallbackBehaviour(stateFunc), activityType) {
-
- }
-
- public:
- template <typename T>
- void Become(T stateFunc) {
- CImpl.Become(stateFunc);
- }
-
- template <typename T, typename... TArgs>
- void Become(T stateFunc, const TActorContext& ctx, TArgs&&... args) {
- CImpl.Become(stateFunc, ctx, std::forward<TArgs>(args)...);
- }
-
- template <typename T, typename... TArgs>
- void Become(T stateFunc, TArgs&&... args) {
- CImpl.Become(stateFunc);
- Schedule(std::forward<TArgs>(args)...);
- }
-
- TReceiveFunc CurrentStateFunc() const {
- return CImpl.CurrentStateFunc();
- }
- };
-
- template <typename TDerived>
- class TActor: public IActorCallback {
- private:
- using TDerivedReceiveFunc = void (TDerived::*)(TAutoPtr<IEventHandle>& ev);
-
- template <typename T, typename = const char*>
- struct HasActorName: std::false_type {};
- template <typename T>
- struct HasActorName<T, decltype((void)T::ActorName, (const char*)nullptr)>: std::true_type {};
-
- template <typename T, typename = const char*>
- struct HasActorActivityType: std::false_type {};
- template <typename T>
- struct HasActorActivityType<T, decltype((void)T::ActorActivityType, (const char*)nullptr)>: std::true_type {};
-
- static ui32 GetActivityTypeIndexImpl() {
- if constexpr(HasActorName<TDerived>::value) {
- return TLocalProcessKey<TActorActivityTag, TDerived::ActorName>::GetIndex();
- } else if constexpr (HasActorActivityType<TDerived>::value) {
- using TActorActivity = decltype(((TDerived*)nullptr)->ActorActivityType());
- static_assert(std::is_enum<TActorActivity>::value);
- return TEnumProcessKey<TActorActivityTag, TActorActivity>::GetIndex(TDerived::ActorActivityType());
- } else {
- // 200 characters is limit for solomon metric tag length
- return TLocalProcessExtKey<TActorActivityTag, TDerived, 200>::GetIndex();
- }
- }
-
- static ui32 GetActivityTypeIndex() {
- static const ui32 result = GetActivityTypeIndexImpl();
- return result;
- }
-
- protected:
- // static constexpr char ActorName[] = "UNNAMED";
-
- TActor(TDerivedReceiveFunc func)
- : IActorCallback(static_cast<TReceiveFunc>(func), GetActivityTypeIndex()) {
- }
-
- template <class TEnum = EActivityType>
- TActor(TDerivedReceiveFunc func, const TEnum activityEnumType = EActivityType::OTHER)
- : IActorCallback(static_cast<TReceiveFunc>(func), activityEnumType) {
- }
-
- TActor(TDerivedReceiveFunc func, const TString& actorName)
- : IActorCallback(static_cast<TReceiveFunc>(func), TLocalProcessKeyState<TActorActivityTag>::GetInstance().Register(actorName)) {
- }
-
- public:
- typedef TDerived TThis;
-
- // UnsafeBecome methods don't verify the bindings of the stateFunc to the TDerived
- template <typename T>
- void UnsafeBecome(T stateFunc) {
- this->IActorCallback::Become(stateFunc);
- }
-
- template <typename T, typename... TArgs>
- void UnsafeBecome(T stateFunc, const TActorContext& ctx, TArgs&&... args) {
- this->IActorCallback::Become(stateFunc, ctx, std::forward<TArgs>(args)...);
- }
-
- template <typename T, typename... TArgs>
- void UnsafeBecome(T stateFunc, TArgs&&... args) {
- this->IActorCallback::Become(stateFunc, std::forward<TArgs>(args)...);
- }
-
- template <typename T>
- void Become(T stateFunc) {
- // TODO(kruall): have to uncomment asserts after end of sync contrib/ydb
- // static_assert(std::is_convertible_v<T, TDerivedReceiveFunc>);
- this->IActorCallback::Become(stateFunc);
- }
-
- template <typename T, typename... TArgs>
- void Become(T stateFunc, const TActorContext& ctx, TArgs&&... args) {
- // static_assert(std::is_convertible_v<T, TDerivedReceiveFunc>);
- this->IActorCallback::Become(stateFunc, ctx, std::forward<TArgs>(args)...);
- }
-
- template <typename T, typename... TArgs>
- void Become(T stateFunc, TArgs&&... args) {
- // static_assert(std::is_convertible_v<T, TDerivedReceiveFunc>);
- this->IActorCallback::Become(stateFunc, std::forward<TArgs>(args)...);
- }
- };
-
-
-#define STFUNC_SIG TAutoPtr<::NActors::IEventHandle>& ev
-#define STATEFN_SIG TAutoPtr<::NActors::IEventHandle>& ev
-#define STFUNC(funcName) void funcName(TAutoPtr<::NActors::IEventHandle>& ev)
-#define STATEFN(funcName) void funcName(TAutoPtr<::NActors::IEventHandle>& ev)
-
-#define STFUNC_STRICT_UNHANDLED_MSG_HANDLER Y_DEBUG_ABORT_UNLESS(false, "%s: unexpected message type 0x%08" PRIx32, __func__, etype);
-
-#define STFUNC_BODY(HANDLERS, UNHANDLED_MSG_HANDLER) \
- switch (const ui32 etype = ev->GetTypeRewrite()) { \
- HANDLERS \
- default: \
- UNHANDLED_MSG_HANDLER \
- }
-
-#define STRICT_STFUNC_BODY(HANDLERS) STFUNC_BODY(HANDLERS, STFUNC_STRICT_UNHANDLED_MSG_HANDLER)
-
-#define STRICT_STFUNC(NAME, HANDLERS) \
- void NAME(STFUNC_SIG) { \
- STRICT_STFUNC_BODY(HANDLERS) \
- }
-
-#define STRICT_STFUNC_EXC(NAME, HANDLERS, EXCEPTION_HANDLERS) \
- void NAME(STFUNC_SIG) { \
- try { \
- STRICT_STFUNC_BODY(HANDLERS) \
- } \
- EXCEPTION_HANDLERS \
- }
-
- inline const TActorContext& TActivationContext::AsActorContext() {
- TActivationContext* tls = TlsActivationContext;
- return *static_cast<TActorContext*>(tls);
- }
-
- inline TActorContext TActivationContext::ActorContextFor(TActorId id) {
- auto& tls = *TlsActivationContext;
- return TActorContext(tls.Mailbox, tls.ExecutorThread, tls.EventStart, id);
- }
-
- class TDecorator : public IActorCallback {
- protected:
- THolder<IActor> Actor;
-
- public:
- TDecorator(THolder<IActor>&& actor)
- : IActorCallback(static_cast<TReceiveFunc>(&TDecorator::State), actor->GetActivityType())
- , Actor(std::move(actor))
- {
- }
-
- void Registered(TActorSystem* sys, const TActorId& owner) override {
- Actor->ChangeSelfId(SelfId());
- Actor->Registered(sys, owner);
- }
-
- virtual bool DoBeforeReceiving(TAutoPtr<IEventHandle>& /*ev*/, const TActorContext& /*ctx*/) {
- return true;
- }
-
- virtual void DoAfterReceiving(const TActorContext& /*ctx*/)
- {
- }
-
- STFUNC(State) {
- auto ctx(ActorContext());
- if (DoBeforeReceiving(ev, ctx)) {
- Actor->Receive(ev);
- DoAfterReceiving(ctx);
- }
- }
- };
-
- // TTestDecorator doesn't work with the real actor system
- struct TTestDecorator : public TDecorator {
- TTestDecorator(THolder<IActor>&& actor)
- : TDecorator(std::move(actor))
- {
- }
-
- virtual ~TTestDecorator() = default;
-
- // This method must be called in the test actor system
- bool BeforeSending(TAutoPtr<IEventHandle>& ev)
- {
- bool send = true;
- TTestDecorator *decorator = dynamic_cast<TTestDecorator*>(Actor.Get());
- if (decorator) {
- send = decorator->BeforeSending(ev);
- }
- return send && ev && DoBeforeSending(ev);
- }
-
- virtual bool DoBeforeSending(TAutoPtr<IEventHandle>& /*ev*/) {
- return true;
- }
- };
-
-
- template <ESendingType SendingType>
- bool TExecutorThread::Send(TAutoPtr<IEventHandle> ev) {
-#ifdef USE_ACTOR_CALLSTACK
- do {
- (ev)->Callstack = TCallstack::GetTlsCallstack();
- (ev)->Callstack.Trace();
- } while (false)
-#endif
- Ctx.IncrementSentEvents();
- return ActorSystem->Send<SendingType>(ev);
- }
-
- template <ESendingType SendingType>
- TActorId TExecutorThread::RegisterActor(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId,
- TActorId parentId)
- {
- if (!parentId) {
- parentId = CurrentRecipient;
- }
- if (poolId == Max<ui32>()) {
- if constexpr (SendingType == ESendingType::Common) {
- return Ctx.Executor->Register(actor, mailboxType, ++RevolvingWriteCounter, parentId);
- } else if (!TlsThreadContext) {
- return Ctx.Executor->Register(actor, mailboxType, ++RevolvingWriteCounter, parentId);
- } else {
- ESendingType previousType = std::exchange(TlsThreadContext->SendingType, SendingType);
- TActorId id = Ctx.Executor->Register(actor, mailboxType, ++RevolvingWriteCounter, parentId);
- TlsThreadContext->SendingType = previousType;
- return id;
- }
- } else {
- return ActorSystem->Register<SendingType>(actor, mailboxType, poolId, ++RevolvingWriteCounter, parentId);
- }
- }
-
- template <ESendingType SendingType>
- TActorId TExecutorThread::RegisterActor(IActor* actor, TMailboxHeader* mailbox, ui32 hint, TActorId parentId) {
- if (!parentId) {
- parentId = CurrentRecipient;
- }
- if constexpr (SendingType == ESendingType::Common) {
- return Ctx.Executor->Register(actor, mailbox, hint, parentId);
- } else if (!TlsActivationContext) {
- return Ctx.Executor->Register(actor, mailbox, hint, parentId);
- } else {
- ESendingType previousType = std::exchange(TlsThreadContext->SendingType, SendingType);
- TActorId id = Ctx.Executor->Register(actor, mailbox, hint, parentId);
- TlsThreadContext->SendingType = previousType;
- return id;
- }
- }
-
-
- template <ESendingType SendingType>
- bool TActivationContext::Send(TAutoPtr<IEventHandle> ev) {
- return TlsActivationContext->ExecutorThread.Send<SendingType>(ev);
- }
-
- template <ESendingType SendingType>
- bool TActivationContext::Send(std::unique_ptr<IEventHandle> &&ev) {
- return TlsActivationContext->ExecutorThread.Send<SendingType>(ev.release());
- }
-
- template <ESendingType SendingType>
- bool TActivationContext::Forward(TAutoPtr<IEventHandle>& ev, const TActorId& recipient) {
- return Send(IEventHandle::Forward(ev, recipient));
- }
-
- template <ESendingType SendingType>
- bool TActivationContext::Forward(THolder<IEventHandle>& ev, const TActorId& recipient) {
- return Send(IEventHandle::Forward(ev, recipient));
- }
-
- template <ESendingType SendingType>
- bool TActorContext::Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags, ui64 cookie, NWilson::TTraceId traceId) const {
- return Send<SendingType>(new IEventHandle(recipient, SelfID, ev, flags, cookie, nullptr, std::move(traceId)));
- }
-
- template <ESendingType SendingType>
- bool TActorContext::Send(TAutoPtr<IEventHandle> ev) const {
- return ExecutorThread.Send<SendingType>(ev);
- }
-
- template <ESendingType SendingType>
- bool TActorContext::Forward(TAutoPtr<IEventHandle>& ev, const TActorId& recipient) const {
- return ExecutorThread.Send<SendingType>(IEventHandle::Forward(ev, recipient));
- }
-
- template <ESendingType SendingType>
- bool TActorContext::Forward(THolder<IEventHandle>& ev, const TActorId& recipient) const {
- return ExecutorThread.Send<SendingType>(IEventHandle::Forward(ev, recipient));
- }
-
- template <ESendingType SendingType>
- TActorId TActivationContext::Register(IActor* actor, TActorId parentId, TMailboxType::EType mailboxType, ui32 poolId) {
- return TlsActivationContext->ExecutorThread.RegisterActor<SendingType>(actor, mailboxType, poolId, parentId);
- }
-
- template <ESendingType SendingType>
- TActorId TActorContext::Register(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId) const {
- return ExecutorThread.RegisterActor<SendingType>(actor, mailboxType, poolId, SelfID);
- }
-
- template <ESendingType SendingType>
- bool TActorIdentity::Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags, ui64 cookie, NWilson::TTraceId traceId) const {
- return TActivationContext::Send<SendingType>(new IEventHandle(recipient, *this, ev, flags, cookie, nullptr, std::move(traceId)));
- }
-
- template <ESendingType SendingType>
- bool IActor::Send(const TActorId& recipient, IEventBase* ev, TEventFlags flags, ui64 cookie, NWilson::TTraceId traceId) const {
- return SelfActorId.Send<SendingType>(recipient, ev, flags, cookie, std::move(traceId));
- }
-
- template <ESendingType SendingType>
- TActorId IActor::Register(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId) const noexcept {
- Y_ABORT_UNLESS(actor);
- return TlsActivationContext->ExecutorThread.RegisterActor<SendingType>(actor, mailboxType, poolId, SelfActorId);
- }
-
-
- template <ESendingType SendingType>
- TActorId TActorSystem::Register(IActor* actor, TMailboxType::EType mailboxType, ui32 executorPool,
- ui64 revolvingCounter, const TActorId& parentId) {
- Y_ABORT_UNLESS(actor);
- Y_ABORT_UNLESS(executorPool < ExecutorPoolCount, "executorPool# %" PRIu32 ", ExecutorPoolCount# %" PRIu32,
- (ui32)executorPool, (ui32)ExecutorPoolCount);
- if constexpr (SendingType == ESendingType::Common) {
- return CpuManager->GetExecutorPool(executorPool)->Register(actor, mailboxType, revolvingCounter, parentId);
- } else if (!TlsThreadContext) {
- return CpuManager->GetExecutorPool(executorPool)->Register(actor, mailboxType, revolvingCounter, parentId);
- } else {
- ESendingType previousType = std::exchange(TlsThreadContext->SendingType, SendingType);
- TActorId id = CpuManager->GetExecutorPool(executorPool)->Register(actor, mailboxType, revolvingCounter, parentId);
- TlsThreadContext->SendingType = previousType;
- return id;
- }
- }
-
- template <ESendingType SendingType>
- bool TActorSystem::Send(TAutoPtr<IEventHandle> ev) const {
- if constexpr (SendingType == ESendingType::Common) {
- return this->GenericSend< &IExecutorPool::Send>(ev);
- } else {
- return this->SpecificSend(ev, SendingType);
- }
- }
-
-}
-
-template <>
-inline void Out<NActors::TActorIdentity>(IOutputStream& o, const NActors::TActorIdentity& x) {
- return x.Out(o);
-}
-
-template <>
-struct THash<NActors::TActorIdentity> {
- inline ui64 operator()(const NActors::TActorIdentity& x) const {
- return x.Hash();
- }
-};
-
-template<> struct std::hash<NActors::TActorIdentity> : THash<NActors::TActorIdentity> {};
diff --git a/library/cpp/actors/core/actor_benchmark_helper.h b/library/cpp/actors/core/actor_benchmark_helper.h
deleted file mode 100644
index 7e9651eb35..0000000000
--- a/library/cpp/actors/core/actor_benchmark_helper.h
+++ /dev/null
@@ -1,763 +0,0 @@
-#include "actor.h"
-#include "events.h"
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "scheduler_basic.h"
-#include "actor_bootstrapped.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/actors/util/threadparkpad.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/threading/chunk_queue/queue.h>
-
-#include <util/generic/algorithm.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/rwlock.h>
-#include <util/system/hp_timer.h>
-#include <vector>
-
-namespace NActors::NTests {
-
-struct TTestEndDecorator : TDecorator {
- TThreadParkPad* Pad;
- TAtomic* ActorsAlive;
-
- TTestEndDecorator(THolder<IActor>&& actor, TThreadParkPad* pad, TAtomic* actorsAlive)
- : TDecorator(std::move(actor))
- , Pad(pad)
- , ActorsAlive(actorsAlive)
- {
- AtomicIncrement(*ActorsAlive);
- }
-
- ~TTestEndDecorator() {
- if (AtomicDecrement(*ActorsAlive) == 0) {
- Pad->Unpark();
- }
- }
-};
-
-
-struct TActorBenchmarkSettings {
- static constexpr bool DefaultNoRealtime = true;
- static constexpr ui32 DefaultSpinThreshold = 1'000'000;
- static constexpr ui32 TotalEventsAmountPerThread = 1'000;
-
- static constexpr auto MailboxTypes = {
- TMailboxType::Simple,
- TMailboxType::Revolving,
- TMailboxType::HTSwap,
- TMailboxType::ReadAsFilled,
- TMailboxType::TinyReadAsFilled
- };
-};
-
-
-template <typename TSettings_ = TActorBenchmarkSettings>
-struct TActorBenchmark {
- using TSettings = TSettings_;
-
- class TDummyActor : public TActor<TDummyActor> {
- public:
- TDummyActor() : TActor<TDummyActor>(&TDummyActor::StateFunc) {}
- STFUNC(StateFunc) {
- (void)ev;
- }
- };
-
- enum class ERole {
- Leader,
- Follower
- };
-
- struct TEvOwnedPing : TEvents::TEvPing {
- TEvOwnedPing(TActorId owner)
- : TEvPing()
- , Owner(owner)
- {}
-
- TActorId Owner;
- };
-
- struct TEventSharedCounters {
- TEventSharedCounters(ui32 count)
- : NotStarted(count)
- , Finished(0)
- , Counters(count)
- , StartedCounters(count)
- , EndedCounters(count)
- {
- for (ui32 idx = 0; idx < count; ++idx) {
- Counters[idx].store(0);
- StartedCounters[idx].store(0);
- EndedCounters[idx].store(0);
- }
- }
-
- std::atomic<ui64> NotStarted = 0;
- std::atomic<ui64> Finished = 0;
- std::vector<NThreading::TPadded<std::atomic<ui64>>> Counters;
- std::vector<NThreading::TPadded<std::atomic<ui64>>> StartedCounters;
- std::vector<NThreading::TPadded<std::atomic<ui64>>> EndedCounters;
- std::atomic<ui64> StartTimeTs = 0;
- std::atomic<ui64> EndTimeTs = 0;
- std::atomic<bool> DoStop = false;
- };
-
- struct TSendReceiveActorParams {
- ui64 OwnEvents = 0;
- ui64 OtherEvents = 0;
- bool EndlessSending = false;
- double *ElapsedTime = nullptr;
- std::vector<TActorId> Receivers;
- bool Allocation = false;
- ESendingType SendingType = ESendingType::Common;
- ui32 Neighbours = 0;
- TEventSharedCounters *SharedCounters;
- ui32 InFlight = 1;
- };
-
- class TSendReceiveActor : public TActorBootstrapped<TSendReceiveActor> {
- public:
- static constexpr auto ActorActivityType() {
- return IActorCallback::EActivityType::ACTORLIB_COMMON;
- }
-
- TSendReceiveActor(const TSendReceiveActorParams &params, ui32 idx=0)
- : OwnEventsCounter(params.OwnEvents)
- , OtherEventsCounter(params.OtherEvents)
- , ElapsedTime(params.ElapsedTime)
- , Receivers(params.Receivers)
- , AllocatesMemory(params.Allocation)
- , SendingType(params.SendingType)
- , MailboxNeighboursCount(params.Neighbours)
- , SharedCounters(params.SharedCounters)
- , PairIdx(idx)
- , EndlessSending(params.EndlessSending)
- , IsLeader(OwnEventsCounter)
- , InFlight(params.InFlight)
- {}
-
- void StoreCounters(std::vector<NThreading::TPadded<std::atomic<ui64>>> &dest) {
- for (ui32 idx = 0; idx < dest.size(); ++idx) {
- dest[idx].store(SharedCounters->Counters[idx]);
- }
- }
-
- void Bootstrap(const TActorContext &ctx) {
- if (SharedCounters && IsLeader) {
- ui32 count = --SharedCounters->NotStarted;
- if (!count) {
- SharedCounters->StartTimeTs = GetCycleCountFast();
- StoreCounters(SharedCounters->StartedCounters);
- }
- }
- if (Receivers.empty() && OwnEventsCounter) {
- Receivers.push_back(this->SelfId());
- }
- Timer.Reset();
- this->Become(&TSendReceiveActor::StateFunc);
- for (ui32 i = 0; i < MailboxNeighboursCount; ++i) {
- ctx.RegisterWithSameMailbox(new TDummyActor());
- }
- for (TActorId receiver : Receivers) {
- for (ui32 eventIdx = 0; eventIdx < InFlight; ++eventIdx) {
- TAutoPtr<IEventHandle> ev = new IEventHandle(receiver, this->SelfId(), new TEvOwnedPing(this->SelfId()));
- SpecialSend(ev, ctx, true);
- }
- }
- }
-
- void SpecialSend(TAutoPtr<IEventHandle> ev, const TActorContext &ctx, bool own) {
- EventsCounter++;
- if (own) {
- --OwnEventsCounter;
- }
- if (SendingType == ESendingType::Lazy) {
- ctx.Send<ESendingType::Lazy>(ev);
- } else if (SendingType == ESendingType::Tail) {
- ctx.Send<ESendingType::Tail>(ev);
- } else {
- ctx.Send(ev);
- }
- }
-
- void Stop() {
- if (SharedCounters && IsLeader) {
- if (!SharedCounters->NotStarted++) {
- StoreCounters(SharedCounters->EndedCounters);
- SharedCounters->EndTimeTs = GetCycleCountFast();
- }
- }
- if (ElapsedTime != nullptr) {
- if (Receivers.size() && Receivers[0] != this->SelfId()) {
- *ElapsedTime = Timer.Passed() / EventsCounter;
- } else {
- *ElapsedTime = Timer.Passed() * 2 / EventsCounter;
- }
- }
- this->PassAway();
- }
-
- bool CheckWorkIsDone() {
- if (OwnEventsCounter || OtherEventsCounter || EndlessSending) {
- return false;
- }
- Stop();
- return true;
- }
-
- STFUNC(StateFunc) {
- ++EventsCounter;
- ui32 counter = ++ReceiveTurn;
- if (SharedCounters) {
- if (counter % 128 == 0) {
- if (IsLeader) {
- SharedCounters->Counters[PairIdx].store(EventsCounter);
- }
- if (SharedCounters->DoStop) {
- Stop();
- return;
- }
- }
- }
- bool own = ev->Get<TEvOwnedPing>()->Owner == this->SelfId();
- if (!own) {
- --OtherEventsCounter;
- }
- if (CheckWorkIsDone())
- return;
-
- auto ctx(this->ActorContext());
- if (AllocatesMemory) {
- SpecialSend(new IEventHandle(ev->Sender, this->SelfId(), new TEvOwnedPing(ev->Get<TEvOwnedPing>()->Owner)), ctx, own);
- } else {
- std::swap(*const_cast<TActorId*>(&ev->Sender), *const_cast<TActorId*>(&ev->Recipient));
- ev->DropRewrite();
- SpecialSend(ev, ctx, own);
- }
-
- CheckWorkIsDone();
- }
-
- private:
- THPTimer Timer;
- ui64 OwnEventsCounter;
- ui64 OtherEventsCounter;
- double* ElapsedTime;
- std::vector<TActorId> Receivers;
- bool AllocatesMemory;
- ESendingType SendingType;
- ui32 MailboxNeighboursCount;
- ui32 EventsCounter = 0;
- TEventSharedCounters *SharedCounters;
- ui32 PairIdx = 0;
- bool EndlessSending = false;
- bool IsLeader = false;
- ui32 InFlight = 1;
- ui32 ReceiveTurn = 0;
- };
-
- static void AddBasicPool(THolder<TActorSystemSetup>& setup, ui32 threads, bool activateEveryEvent, i16 sharedExecutorsCount) {
- TBasicExecutorPoolConfig basic;
- basic.PoolId = setup->GetExecutorsCount();
- basic.PoolName = TStringBuilder() << "b" << basic.PoolId;
- basic.Threads = threads;
- basic.SpinThreshold = TSettings::DefaultSpinThreshold;
- basic.TimePerMailbox = TDuration::Hours(1);
- basic.SharedExecutorsCount = sharedExecutorsCount;
- basic.SoftProcessingDurationTs = Us2Ts(100);
- if (activateEveryEvent) {
- basic.EventsPerMailbox = 1;
- }
- setup->CpuManager.Basic.emplace_back(std::move(basic));
- }
-
- static void AddUnitedPool(THolder<TActorSystemSetup>& setup, ui32 concurrency, bool activateEveryEvent) {
- TUnitedExecutorPoolConfig united;
- united.PoolId = setup->GetExecutorsCount();
- united.PoolName = TStringBuilder() << "u" << united.PoolId;
- united.Concurrency = concurrency;
- united.TimePerMailbox = TDuration::Hours(1);
- if (activateEveryEvent) {
- united.EventsPerMailbox = 1;
- } else {
- united.EventsPerMailbox = ::Max<ui32>();
- }
- setup->CpuManager.United.emplace_back(std::move(united));
- }
-
- static THolder<TActorSystemSetup> GetActorSystemSetup(ui32 unitedCpuCount, bool preemption) {
- auto setup = MakeHolder<NActors::TActorSystemSetup>();
- setup->NodeId = 1;
- setup->CpuManager.UnitedWorkers.CpuCount = unitedCpuCount;
- setup->CpuManager.UnitedWorkers.SpinThresholdUs = TSettings::DefaultSpinThreshold;
- setup->CpuManager.UnitedWorkers.NoRealtime = TSettings::DefaultNoRealtime;
- if (preemption) {
- setup->CpuManager.UnitedWorkers.PoolLimitUs = 500;
- setup->CpuManager.UnitedWorkers.EventLimitUs = 100;
- setup->CpuManager.UnitedWorkers.LimitPrecisionUs = 100;
- } else {
- setup->CpuManager.UnitedWorkers.PoolLimitUs = 100'000'000'000;
- setup->CpuManager.UnitedWorkers.EventLimitUs = 10'000'000'000;
- setup->CpuManager.UnitedWorkers.LimitPrecisionUs = 10'000'000'000;
- }
- setup->Scheduler = new TBasicSchedulerThread(NActors::TSchedulerConfig(512, 0));
- return setup;
- }
-
- enum class EPoolType {
- Basic,
- United
- };
-
- static THolder<TActorSystemSetup> InitActorSystemSetup(EPoolType poolType, ui32 poolsCount, ui32 threads, bool activateEveryEvent, bool preemption) {
- if (poolType == EPoolType::Basic) {
- THolder<TActorSystemSetup> setup = GetActorSystemSetup(0, false);
- for (ui32 i = 0; i < poolsCount; ++i) {
- AddBasicPool(setup, threads, activateEveryEvent, 0);
- }
- return setup;
- } else if (poolType == EPoolType::United) {
- THolder<TActorSystemSetup> setup = GetActorSystemSetup(poolsCount * threads, preemption);
- for (ui32 i = 0; i < poolsCount; ++i) {
- AddUnitedPool(setup, threads, activateEveryEvent);
- }
- return setup;
- }
- Y_ABORT();
- }
-
- static double BenchSendReceive(bool allocation, NActors::TMailboxType::EType mType, EPoolType poolType, ESendingType sendingType) {
- THolder<TActorSystemSetup> setup = InitActorSystemSetup(poolType, 1, 1, false, false);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
- double elapsedTime = 0;
- THolder<IActor> endActor{new TTestEndDecorator(
- THolder(new TSendReceiveActor(
- TSendReceiveActorParams{
- .OwnEvents=TSettings::TotalEventsAmountPerThread,
- .OtherEvents=0,
- .ElapsedTime=&elapsedTime,
- .Allocation=allocation,
- .SendingType=sendingType,
- }
- )),
- &pad,
- &actorsAlive
- )};
-
- actorSystem.Register(endActor.Release(), mType);
-
- pad.Park();
- actorSystem.Stop();
-
- return 1e9 * elapsedTime;
- }
-
- static double BenchSendActivateReceive(ui32 poolsCount, ui32 threads, bool allocation, EPoolType poolType, ESendingType sendingType) {
- THolder<TActorSystemSetup> setup = InitActorSystemSetup(poolType, poolsCount, threads, true, false);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
- double elapsedTime = 0;
- ui32 followerPoolId = 0;
-
- ui32 leaderPoolId = poolsCount == 1 ? 0 : 1;
- ui64 eventsPerPair = TSettings::TotalEventsAmountPerThread;
-
- TActorId followerId = actorSystem.Register(
- new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OtherEvents=eventsPerPair / 2, .Allocation=allocation}
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{
- .OwnEvents=eventsPerPair / 2,
- .ElapsedTime=&elapsedTime,
- .Receivers={followerId},
- .Allocation=allocation,
- .SendingType=sendingType,
- }
- )),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
-
- pad.Park();
- actorSystem.Stop();
-
- return 1e9 * elapsedTime;
- }
-
- static double BenchSendActivateReceiveWithMailboxNeighbours(ui32 MailboxNeighbourActors, EPoolType poolType, ESendingType sendingType) {
- THolder<TActorSystemSetup> setup = InitActorSystemSetup(poolType, 1, 1, false, false);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
- double elapsedTime = 0;
-
- ui64 eventsPerPair = TSettings::TotalEventsAmountPerThread;
-
- TActorId followerId = actorSystem.Register(
- new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{
- .OtherEvents=eventsPerPair / 2,
- .Allocation=false,
- .Neighbours=MailboxNeighbourActors,
- }
- ),
- TMailboxType::HTSwap
- );
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{
- .OwnEvents=eventsPerPair / 2,
- .ElapsedTime=&elapsedTime,
- .Receivers={followerId},
- .Allocation=false,
- .SendingType=sendingType,
- .Neighbours=MailboxNeighbourActors,
- }
- )),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap);
-
- pad.Park();
- actorSystem.Stop();
-
- return 1e9 * elapsedTime;
- }
-
- struct TBenchResult {
- double ElapsedTime;
- ui64 SentEvents;
- ui64 MinPairSentEvents;
- ui64 MaxPairSentEvents;
- };
-
- static auto BenchContentedThreads(ui32 threads, ui32 actorsPairsCount, EPoolType poolType, ESendingType sendingType, TDuration testDuration = TDuration::Zero(), ui32 inFlight = 1) {
- THolder<TActorSystemSetup> setup = InitActorSystemSetup(poolType, 1, threads, false, false);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
-
- TEventSharedCounters sharedCounters(actorsPairsCount);
-
- ui64 totalEvents = TSettings::TotalEventsAmountPerThread * threads;
- ui64 eventsPerPair = totalEvents / actorsPairsCount;
-
- for (ui32 i = 0; i < actorsPairsCount; ++i) {
- ui32 followerPoolId = 0;
- ui32 leaderPoolId = 0;
- TActorId followerId = actorSystem.Register(
- new TSendReceiveActor(
- TSendReceiveActorParams{
- .OtherEvents = eventsPerPair / 2,
- .EndlessSending = bool(testDuration),
- .Allocation = false,
- .SharedCounters = &sharedCounters,
- }
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TSendReceiveActor(TSendReceiveActorParams{
- .OwnEvents = eventsPerPair / 2,
- .EndlessSending = bool(testDuration),
- .Receivers={followerId},
- .Allocation = false,
- .SendingType=sendingType,
- .SharedCounters=&sharedCounters,
- .InFlight = inFlight
- }, i)),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
- }
-
- if (testDuration) {
- Sleep(testDuration);
- for (ui32 idx = 0; idx < actorsPairsCount; ++idx) {
- sharedCounters.EndedCounters[idx].store(sharedCounters.Counters[idx]);
- }
- sharedCounters.EndTimeTs = GetCycleCountFast();
- } else {
- pad.Park();
- }
- actorSystem.Stop();
-
- ui64 sentEvents = sharedCounters.EndedCounters[0] - sharedCounters.StartedCounters[0];
- ui64 minSentEvents = sentEvents;
- ui64 maxSentEvents = sentEvents;
- for (ui32 pairIdx = 1; pairIdx < actorsPairsCount; ++pairIdx) {
- ui64 count = sharedCounters.EndedCounters[pairIdx] - sharedCounters.StartedCounters[pairIdx];
- sentEvents += count;
- minSentEvents = ::Min(minSentEvents, count);
- maxSentEvents = ::Max(maxSentEvents, count);
- }
-
- return TBenchResult {
- .ElapsedTime = 1000 * Ts2Us(sharedCounters.EndTimeTs - sharedCounters.StartTimeTs),
- .SentEvents = sentEvents,
- .MinPairSentEvents = minSentEvents,
- .MaxPairSentEvents = maxSentEvents
- };
- }
-
- static auto BenchStarContentedThreads(ui32 threads, ui32 actorsPairsCount, EPoolType poolType, ESendingType sendingType, TDuration testDuration = TDuration::Zero(), ui32 starMultiply=10) {
- THolder<TActorSystemSetup> setup = InitActorSystemSetup(poolType, 1, threads, true, false);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
-
- TEventSharedCounters sharedCounters(actorsPairsCount);
-
- ui64 totalEvents = TSettings::TotalEventsAmountPerThread * threads;
- ui64 eventsPerPair = totalEvents / actorsPairsCount;
-
- for (ui32 i = 0; i < actorsPairsCount; ++i) {
- ui32 followerPoolId = 0;
- ui32 leaderPoolId = 0;
- std::vector<TActorId> receivers;
- for (ui32 idx = 0; idx < starMultiply; ++idx) {
- TActorId followerId = actorSystem.Register(
- new TSendReceiveActor(
- TSendReceiveActorParams{
- .OtherEvents = eventsPerPair / 2 / starMultiply,
- .EndlessSending = bool(testDuration),
- .Allocation = false,
- .SharedCounters = &sharedCounters,
- }
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- receivers.push_back(followerId);
- }
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TSendReceiveActor(TSendReceiveActorParams{
- .OwnEvents = eventsPerPair / 2,
- .EndlessSending = bool(testDuration),
- .Receivers=receivers,
- .Allocation = false,
- .SendingType=sendingType,
- .SharedCounters=&sharedCounters,
- }, i)),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
- }
-
- if (testDuration) {
- Sleep(testDuration);
- for (ui32 idx = 0; idx < actorsPairsCount; ++idx) {
- sharedCounters.EndedCounters[idx].store(sharedCounters.Counters[idx]);
- }
- sharedCounters.EndTimeTs = GetCycleCountFast();
- } else {
- pad.Park();
- }
- actorSystem.Stop();
-
- ui64 sentEvents = sharedCounters.EndedCounters[0] - sharedCounters.StartedCounters[0];
- ui64 minSentEvents = sentEvents;
- ui64 maxSentEvents = sentEvents;
- for (ui32 pairIdx = 1; pairIdx < actorsPairsCount; ++pairIdx) {
- ui64 count = sharedCounters.EndedCounters[pairIdx] - sharedCounters.StartedCounters[pairIdx];
- sentEvents += count;
- minSentEvents = ::Min(minSentEvents, count);
- maxSentEvents = ::Max(maxSentEvents, count);
- }
-
- return TBenchResult {
- .ElapsedTime = 1000 * Ts2Us(sharedCounters.EndTimeTs - sharedCounters.StartTimeTs),
- .SentEvents = sentEvents,
- .MinPairSentEvents = minSentEvents,
- .MaxPairSentEvents = maxSentEvents
- };
- }
-
-
- static auto Mean(const std::vector<double>& data) {
- return Accumulate(data.begin(), data.end(), 0.0) / data.size();
- }
-
- static auto Deviation(const std::vector<double>& data) {
- auto mean = Mean(data);
- double deviation = 0.0;
- for (const auto& x : data) {
- deviation += (x - mean) * (x - mean);
- }
- return std::sqrt(deviation / data.size());
- }
-
- static double Min(const std::vector<double>& data) {
- return *std::min_element(data.begin(), data.end());
- }
-
- static double Max(const std::vector<double>& data) {
- return *std::max_element(data.begin(), data.end());
- }
-
- template <auto Measurment>
- struct TStats {
- double Mean;
- double Deviation;
- double Min;
- double Max;
-
- TStats(const std::vector<double> &data)
- : Mean(TActorBenchmark::Mean(data))
- , Deviation(TActorBenchmark::Deviation(data))
- , Min(TActorBenchmark::Min(data))
- , Max(TActorBenchmark::Max(data))
- {
- }
-
- TString ToString() {
- return TStringBuilder() << Mean << " ± " << Deviation << " " << Measurment()
- << " " << std::ceil(Deviation / Mean * 1000) / 10.0 << "%"
- << " min " << Min << " " << Measurment() << " max " << Max << " " << Measurment();
- }
- };
-
- static constexpr auto EmptyMsr = []{return "";};
- static constexpr auto NsMsr = []{return "ns";};
-
- struct TStatsBenchResult {
- TStats<NsMsr> ElapsedTime;
- TStats<EmptyMsr> SentEvents;
- TStats<EmptyMsr> MinPairSentEvents;
- TStats<EmptyMsr> MaxPairSentEvents;
-
- TString ToString() {
- return TStringBuilder() << ElapsedTime.ToString() << Endl << SentEvents.ToString() << Endl << MinPairSentEvents.ToString() << Endl << MaxPairSentEvents.ToString();
- }
- };
-
- template <typename Func>
- static auto CountStats(Func func, ui32 itersCount = 5) {
- if constexpr (std::is_same_v<double, std::decay_t<decltype(func())>>) {
- std::vector<double> elapsedTimes;
- for (ui32 i = 0; i < itersCount; ++i) {
- auto elapsedTime = func();
- elapsedTimes.push_back(elapsedTime);
- }
- return TStats<NsMsr>(elapsedTimes);
- } else {
- std::vector<double> elapsedTimes;
- std::vector<double> sentEvents;
- std::vector<double> minPairSentEvents;
- std::vector<double> maxPairSentEvents;
- for (ui32 i = 0; i < itersCount; ++i) {
- TBenchResult result = func();
- elapsedTimes.push_back(result.ElapsedTime);
- sentEvents.push_back(result.SentEvents);
- minPairSentEvents.push_back(result.MinPairSentEvents);
- maxPairSentEvents.push_back(result.MaxPairSentEvents);
- }
- return TStatsBenchResult {
- .ElapsedTime = TStats<NsMsr>(elapsedTimes),
- .SentEvents = TStats<EmptyMsr>(sentEvents),
- .MinPairSentEvents = TStats<EmptyMsr>(minPairSentEvents),
- .MaxPairSentEvents = TStats<EmptyMsr>(maxPairSentEvents),
- };
- }
- }
-
- static void RunBenchSendActivateReceive(ui32 poolsCount, ui32 threads, bool allocation, EPoolType poolType) {
- auto stats = CountStats([=] {
- return BenchSendActivateReceive(poolsCount, threads, allocation, poolType, ESendingType::Common);
- });
- Cerr << stats.ToString() << Endl;
- stats = CountStats([=] {
- return BenchSendActivateReceive(poolsCount, threads, allocation, poolType, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " Lazy" << Endl;
- stats = CountStats([=] {
- return BenchSendActivateReceive(poolsCount, threads, allocation, poolType, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " Tail" << Endl;
- }
-
- static void RunBenchContentedThreads(ui32 threads, EPoolType poolType) {
- for (ui32 actorPairs = 1; actorPairs <= 2 * threads; actorPairs++) {
- auto stats = CountStats([threads, actorPairs, poolType] {
- return BenchContentedThreads(threads, actorPairs, poolType, ESendingType::Common);
- });
- Cerr << stats.ToString() << " actorPairs: " << actorPairs << Endl;
- stats = CountStats([threads, actorPairs, poolType] {
- return BenchContentedThreads(threads, actorPairs, poolType, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " actorPairs: " << actorPairs << " Lazy"<< Endl;
- stats = CountStats([threads, actorPairs, poolType] {
- return BenchContentedThreads(threads, actorPairs, poolType, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " actorPairs: " << actorPairs << " Tail"<< Endl;
- }
- }
-
- static void RunSendActivateReceiveCSV(const std::vector<ui32> &threadsList, const std::vector<ui32> &actorPairsList, const std::vector<ui32> &inFlights, TDuration subtestDuration) {
- Cout << "threads,actorPairs,in_flight,msgs_per_sec,elapsed_seconds,min_pair_sent_msgs,max_pair_sent_msgs" << Endl;
- for (ui32 threads : threadsList) {
- for (ui32 actorPairs : actorPairsList) {
- for (ui32 inFlight : inFlights) {
- auto stats = CountStats([threads, actorPairs, inFlight, subtestDuration] {
- return BenchContentedThreads(threads, actorPairs, EPoolType::Basic, ESendingType::Common, subtestDuration, inFlight);
- }, 3);
- double elapsedSeconds = stats.ElapsedTime.Mean / 1e9;
- ui64 eventsPerSecond = stats.SentEvents.Mean / elapsedSeconds;
- Cout << threads << "," << actorPairs << "," << inFlight << "," << eventsPerSecond << "," << elapsedSeconds << "," << stats.MinPairSentEvents.Min << "," << stats.MaxPairSentEvents.Max << Endl;
- }
- }
- }
- }
-
-
- static void RunStarSendActivateReceiveCSV(const std::vector<ui32> &threadsList, const std::vector<ui32> &actorPairsList, const std::vector<ui32> &starsList) {
- Cout << "threads,actorPairs,star_multiply,msgs_per_sec,elapsed_seconds,min_pair_sent_msgs,max_pair_sent_msgs" << Endl;
- for (ui32 threads : threadsList) {
- for (ui32 actorPairs : actorPairsList) {
- for (ui32 stars : starsList) {
- auto stats = CountStats([threads, actorPairs, stars] {
- return BenchStarContentedThreads(threads, actorPairs, EPoolType::Basic, ESendingType::Common, TDuration::Seconds(1), stars);
- }, 3);
- double elapsedSeconds = stats.ElapsedTime.Mean / 1e9;
- ui64 eventsPerSecond = stats.SentEvents.Mean / elapsedSeconds;
- Cout << threads << "," << actorPairs << "," << stars << "," << eventsPerSecond << "," << elapsedSeconds << "," << stats.MinPairSentEvents.Min << "," << stats.MaxPairSentEvents.Max << Endl;
- }
- }
- }
- }
-};
-
-} // NActors::NTests
diff --git a/library/cpp/actors/core/actor_bootstrapped.cpp b/library/cpp/actors/core/actor_bootstrapped.cpp
deleted file mode 100644
index 8c1effcd5d..0000000000
--- a/library/cpp/actors/core/actor_bootstrapped.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "actor_bootstrapped.h"
-
-namespace NActors {
-}
diff --git a/library/cpp/actors/core/actor_bootstrapped.h b/library/cpp/actors/core/actor_bootstrapped.h
deleted file mode 100644
index 70a6163bc5..0000000000
--- a/library/cpp/actors/core/actor_bootstrapped.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#pragma once
-
-#include "actorsystem.h"
-#include "actor.h"
-#include "events.h"
-#include <util/generic/noncopyable.h>
-
-namespace NActors {
- template<typename T> struct dependent_false : std::false_type {};
-
- template<typename TDerived>
- class TActorBootstrapped: public TActor<TDerived> {
- protected:
- TAutoPtr<IEventHandle> AfterRegister(const TActorId& self, const TActorId& parentId) override {
- return new IEventHandle(TEvents::TSystem::Bootstrap, 0, self, parentId, {}, 0);
- }
-
- STFUNC(StateBootstrap) {
- Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvents::TSystem::Bootstrap, "Unexpected bootstrap message");
- using T = decltype(&TDerived::Bootstrap);
- TDerived& self = static_cast<TDerived&>(*this);
- if constexpr (std::is_invocable_v<T, TDerived, const TActorContext&>) {
- self.Bootstrap(TActivationContext::ActorContextFor(TActor<TDerived>::SelfId()));
- } else if constexpr (std::is_invocable_v<T, TDerived, const TActorId&, const TActorContext&>) {
- self.Bootstrap(ev->Sender, TActivationContext::ActorContextFor(TActor<TDerived>::SelfId()));
- } else if constexpr (std::is_invocable_v<T, TDerived>) {
- self.Bootstrap();
- } else if constexpr (std::is_invocable_v<T, TDerived, const TActorId&>) {
- self.Bootstrap(ev->Sender);
- } else {
- static_assert(dependent_false<TDerived>::value, "No correct Bootstrap() signature");
- }
- }
-
- TActorBootstrapped()
- : TActor<TDerived>(&TDerived::StateBootstrap) {
- }
-
- template <class TEnum>
- TActorBootstrapped(const TEnum activityType)
- : TActor<TDerived>(&TDerived::StateBootstrap, activityType) {
- }
-
- TActorBootstrapped(const TString& activityName)
- : TActor<TDerived>(&TDerived::StateBootstrap, activityName) {
- }
- };
-}
diff --git a/library/cpp/actors/core/actor_coroutine.cpp b/library/cpp/actors/core/actor_coroutine.cpp
deleted file mode 100644
index 32390eaae3..0000000000
--- a/library/cpp/actors/core/actor_coroutine.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-#include "actor_coroutine.h"
-#include "executor_thread.h"
-
-#include <util/system/sanitizers.h>
-#include <util/system/type_name.h>
-#include <util/system/info.h>
-#include <util/system/protect.h>
-
-namespace NActors {
- static const size_t PageSize = NSystemInfo::GetPageSize();
-
-#if !CORO_THROUGH_THREADS
- static size_t AlignStackSize(size_t size) {
- size += PageSize - (size & PageSize - 1) & PageSize - 1;
-#ifndef NDEBUG
- size += PageSize;
-#endif
- return size;
- }
-#endif
-
- TActorCoroImpl::TActorCoroImpl(size_t stackSize, bool allowUnhandledDtor)
- : AllowUnhandledDtor(allowUnhandledDtor)
-#if !CORO_THROUGH_THREADS
- , Stack(AlignStackSize(stackSize))
- , FiberClosure{this, TArrayRef(Stack.Begin(), Stack.End())}
- , FiberContext(FiberClosure)
-#endif
- {
- Y_UNUSED(stackSize);
-#if !CORO_THROUGH_THREADS && !defined(NDEBUG)
- ProtectMemory(STACK_GROW_DOWN ? Stack.Begin() : Stack.End() - PageSize, PageSize, EProtectMemoryMode::PM_NONE);
-#endif
- }
-
- void TActorCoroImpl::Destroy() {
- if (!Finished) { // only resume when we have bootstrapped and Run() was entered and not yet finished; in other case simply terminate
- InvokedFromDtor = true;
- Resume(nullptr);
- }
-#if CORO_THROUGH_THREADS
- if (WorkerThread.joinable()) {
- WorkerThread.join();
- }
-#endif
- }
-
- bool TActorCoroImpl::Send(TAutoPtr<IEventHandle> ev) {
- return GetActorContext().ExecutorThread.Send(ev);
- }
-
- THolder<IEventHandle> TActorCoroImpl::WaitForEvent(TMonotonic deadline) {
- IEventHandle *timeoutEv = nullptr;
- if (deadline != TMonotonic::Max()) {
- TActivationContext::Schedule(deadline, timeoutEv = new IEventHandle(TEvents::TSystem::CoroTimeout, 0,
- SelfActorId, {}, nullptr, 0));
- }
-
- // ensure we have no unprocessed event and return back to actor system to receive one
- Y_ABORT_UNLESS(!Finished);
-
- // obtain pending event and ensure we've got one
- while (THolder<IEventHandle> event = ReturnToActorSystem()) {
- if (event->GetTypeRewrite() != TEvents::TSystem::CoroTimeout) {
- return event;
- } else if (event.Get() == timeoutEv) {
- return nullptr; // it is not a race -- we've got timeout exactly for our current wait
- }
- }
- Y_ABORT("no pending event");
- }
-
- bool TActorCoroImpl::ProcessEvent(THolder<IEventHandle> ev) {
- if (!SelfActorId) { // process bootstrap message, extract actor ids
- Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvents::TSystem::Bootstrap);
- SelfActorId = ev->Recipient;
- ParentActorId = ev->Sender;
- ev.Reset();
-#if CORO_THROUGH_THREADS
- WorkerThread = std::thread(std::bind(&TActorCoroImpl::DoRun, this));
-#endif
- }
-
- // prepare actor context for in-coroutine use
- TActivationContext *ac = TlsActivationContext;
- TActorContext actorContext(ac->Mailbox, ac->ExecutorThread, ac->EventStart, SelfActorId);
- TlsActivationContext = &actorContext;
-
- Resume(std::move(ev));
-
- // drop actor context
- TlsActivationContext = ac;
-
- return Finished;
- }
-
- void TActorCoroImpl::Resume(THolder<IEventHandle> ev) {
- BeforeResume();
-
- Y_ABORT_UNLESS(!PendingEvent);
- PendingEvent.Swap(ev);
-
-#if CORO_THROUGH_THREADS
- ActivationContext = TlsActivationContext;
- InEvent.Signal();
- OutEvent.Wait();
-#else
- // save caller context for a later return
- Y_ABORT_UNLESS(!ActorSystemContext);
- TExceptionSafeContext actorSystemContext;
- ActorSystemContext = &actorSystemContext;
-
- // go to actor coroutine
- ActorSystemContext->SwitchTo(&FiberContext);
-#endif
-
- Y_ABORT_UNLESS(!PendingEvent);
- }
-
- void TActorCoroImpl::DoRun() {
-#if CORO_THROUGH_THREADS
- InEvent.Wait();
- TlsActivationContext = ActivationContext;
-#endif
- if (!InvokedFromDtor) {
- try {
- Run();
- } catch (const TDtorException& /*ex*/) {
- if (!AllowUnhandledDtor) {
- Y_ABORT("unhandled TDtorException");
- }
- } catch (const std::exception& ex) {
- Y_ABORT("unhandled exception of type %s", TypeName(ex).data());
- } catch (...) {
- Y_ABORT("unhandled exception of type not derived from std::exception");
- }
- }
- Finished = true;
- ReturnToActorSystem();
- }
-
- THolder<IEventHandle> TActorCoroImpl::ReturnToActorSystem() {
-#if CORO_THROUGH_THREADS
- OutEvent.Signal();
- if (Finished) {
- return nullptr;
- } else {
- InEvent.Wait(); // wait for reentry
- TlsActivationContext = ActivationContext;
- }
-#else
- TExceptionSafeContext* returnContext = std::exchange(ActorSystemContext, nullptr);
- Y_ABORT_UNLESS(returnContext);
- if (StoreTlsState) {
- StoreTlsState(this);
- }
- FiberContext.SwitchTo(returnContext);
- if (RestoreTlsState) {
- RestoreTlsState(this);
- }
-#endif
-
- if (THolder<IEventHandle> ev = std::exchange(PendingEvent, nullptr)) {
- return ev;
- } else {
- // we have returned from the actor system and it kindly asks us to terminate the coroutine as it is being
- // stopped
- Y_ABORT_UNLESS(InvokedFromDtor);
- throw TDtorException();
- }
- }
-
- TActorCoro::~TActorCoro() {
- Impl->Destroy();
- }
-
- STATEFN(TActorCoro::StateFunc) {
- if (Impl->ProcessEvent(ev)) {
- PassAway();
- }
- }
-}
diff --git a/library/cpp/actors/core/actor_coroutine.h b/library/cpp/actors/core/actor_coroutine.h
deleted file mode 100644
index a098c334cf..0000000000
--- a/library/cpp/actors/core/actor_coroutine.h
+++ /dev/null
@@ -1,230 +0,0 @@
-#pragma once
-
-#include <util/system/context.h>
-#include <util/system/filemap.h>
-
-#include "actor_bootstrapped.h"
-#include "executor_thread.h"
-#include "event_local.h"
-
-#include <thread>
-
-namespace NActors {
-
- class TActorCoro;
-
-#ifndef CORO_THROUGH_THREADS
-# ifdef _tsan_enabled_
-# define CORO_THROUGH_THREADS 1
-# else
-# define CORO_THROUGH_THREADS 0
-# endif
-#endif
-
- class TActorCoroImpl : public ITrampoLine {
- const bool AllowUnhandledDtor;
- bool Finished = false;
- bool InvokedFromDtor = false;
-#if CORO_THROUGH_THREADS
- TAutoEvent InEvent;
- TAutoEvent OutEvent;
- TActivationContext *ActivationContext = nullptr;
- std::thread WorkerThread;
-#else
- TMappedAllocation Stack;
- TContClosure FiberClosure;
- TExceptionSafeContext FiberContext;
- TExceptionSafeContext* ActorSystemContext = nullptr;
-#endif
- THolder<IEventHandle> PendingEvent;
-
- protected:
- TActorIdentity SelfActorId = TActorIdentity(TActorId());
- TActorId ParentActorId;
-
- // Pre-leave and pre-enter hook functions are called by coroutine actor code to conserve the state of required TLS
- // variables.
- //
- // They are called in the following order:
- //
- // 1. coroutine executes WaitForEvent
- // 2. StoreTlsState() is called
- // 3. control is returned to the actor system
- // 4. some event is received, handler called (now in different thread, unconserved TLS variables are changed!)
- // 5. handler transfers control to the coroutine
- // 6. RestoreTlsState() is called
- //
- // These hooks may be used in the following way:
- //
- // thread_local TMyClass *MyObject = nullptr;
- //
- // class TMyCoroImpl : public TActorCoroImpl {
- // TMyClass *SavedMyObject;
- // ...
- // public:
- // TMyCoroImpl()
- // : TActorCoroImpl(...)
- // {
- // StoreTlsState = RestoreTlsState = &TMyCoroImpl::ConserveState;
- // }
- //
- // static void ConserveState(TActorCoroImpl *p) {
- // TMyCoroImpl *my = static_cast<TMyCoroImpl*>(p);
- // std::swap(my->SavedMyObject, MyObject);
- // }
- //
- // ...
- // }
- void (*StoreTlsState)(TActorCoroImpl*) = nullptr;
- void (*RestoreTlsState)(TActorCoroImpl*) = nullptr;
-
-
- private:
- template <typename TFirstEvent, typename... TOtherEvents>
- struct TIsOneOf: public TIsOneOf<TOtherEvents...> {
- bool operator()(IEventHandle& ev) const {
- return ev.GetTypeRewrite() == TFirstEvent::EventType || TIsOneOf<TOtherEvents...>()(ev);
- }
- };
-
- template <typename TSingleEvent>
- struct TIsOneOf<TSingleEvent> {
- bool operator()(IEventHandle& ev) const {
- return ev.GetTypeRewrite() == TSingleEvent::EventType;
- }
- };
-
- protected:
- struct TDtorException : yexception {};
-
- public:
- TActorCoroImpl(size_t stackSize, bool allowUnhandledDtor = false);
- // specify stackSize explicitly for each actor; don't forget about overflow control gap
-
- virtual ~TActorCoroImpl() = default;
-
- virtual void Run() = 0;
-
- virtual void BeforeResume() {}
-
- // Release execution ownership and wait for some event to arrive.
- THolder<IEventHandle> WaitForEvent(TMonotonic deadline = TMonotonic::Max());
-
- // Wait for specific event set by filter functor. Function returns first event that matches filter. On any other
- //
- // kind of event processUnexpectedEvent() is called.
- // Example: WaitForSpecificEvent([](IEventHandle& ev) { return ev.Cookie == 42; });
- template <typename TFunc, typename TCallback, typename = std::enable_if_t<std::is_invocable_v<TCallback, TAutoPtr<IEventHandle>>>>
- THolder<IEventHandle> WaitForSpecificEvent(TFunc&& filter, TCallback processUnexpectedEvent, TMonotonic deadline = TMonotonic::Max()) {
- for (;;) {
- if (THolder<IEventHandle> event = WaitForEvent(deadline); !event) {
- return nullptr;
- } else if (filter(*event)) {
- return event;
- } else {
- processUnexpectedEvent(event);
- }
- }
- }
-
- template <typename TFunc, typename TDerived, typename = std::enable_if_t<std::is_base_of_v<TActorCoroImpl, TDerived>>>
- THolder<IEventHandle> WaitForSpecificEvent(TFunc&& filter, void (TDerived::*processUnexpectedEvent)(TAutoPtr<IEventHandle>),
- TMonotonic deadline = TMonotonic::Max()) {
- auto callback = [&](TAutoPtr<IEventHandle> ev) { (static_cast<TDerived&>(*this).*processUnexpectedEvent)(ev); };
- return WaitForSpecificEvent(std::forward<TFunc>(filter), callback, deadline);
- }
-
- // Wait for specific event or set of events. Function returns first event that matches enlisted type. On any other
- // kind of event processUnexpectedEvent() is called.
- //
- // Example: WaitForSpecificEvent<TEvReadResult, TEvFinished>();
- template <typename TFirstEvent, typename TSecondEvent, typename... TOtherEvents, typename TCallback>
- THolder<IEventHandle> WaitForSpecificEvent(TCallback&& callback, TMonotonic deadline = TMonotonic::Max()) {
- TIsOneOf<TFirstEvent, TSecondEvent, TOtherEvents...> filter;
- return WaitForSpecificEvent(filter, std::forward<TCallback>(callback), deadline);
- }
-
- // Wait for single specific event.
- template <typename TEventType, typename TCallback>
- THolder<typename TEventType::THandle> WaitForSpecificEvent(TCallback&& callback, TMonotonic deadline = TMonotonic::Max()) {
- auto filter = [](IEventHandle& ev) {
- return ev.GetTypeRewrite() == TEventType::EventType;
- };
- THolder<IEventHandle> event = WaitForSpecificEvent(filter, std::forward<TCallback>(callback), deadline);
- return THolder<typename TEventType::THandle>(static_cast<typename TEventType::THandle*>(event ? event.Release() : nullptr));
- }
-
- protected: // Actor System compatibility section
- const TActorContext& GetActorContext() const { return TActivationContext::AsActorContext(); }
- TActorSystem *GetActorSystem() const { return GetActorContext().ExecutorThread.ActorSystem; }
- TInstant Now() const { return GetActorContext().Now(); }
- TMonotonic Monotonic() const { return GetActorContext().Monotonic(); }
-
- bool Send(const TActorId& recipient, IEventBase* ev, ui32 flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) {
- return GetActorContext().Send(recipient, ev, flags, cookie, std::move(traceId));
- }
-
- bool Send(const TActorId& recipient, THolder<IEventBase> ev, ui32 flags = 0, ui64 cookie = 0, NWilson::TTraceId traceId = {}) {
- return GetActorContext().Send(recipient, ev.Release(), flags, cookie, std::move(traceId));
- }
-
- bool Send(TAutoPtr<IEventHandle> ev);
-
- bool Forward(THolder<IEventHandle>& ev, const TActorId& recipient) {
- return Send(IEventHandle::Forward(ev, recipient).Release());
- }
-
- void Schedule(TDuration delta, IEventBase* ev, ISchedulerCookie* cookie = nullptr) {
- return GetActorContext().Schedule(delta, ev, cookie);
- }
-
- void Schedule(TInstant deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) {
- return GetActorContext().Schedule(deadline, ev, cookie);
- }
-
- void Schedule(TMonotonic deadline, IEventBase* ev, ISchedulerCookie* cookie = nullptr) {
- return GetActorContext().Schedule(deadline, ev, cookie);
- }
-
- TActorId Register(IActor* actor, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>()) {
- return GetActorContext().Register(actor, mailboxType, poolId);
- }
-
- TActorId RegisterWithSameMailbox(IActor* actor) {
- return GetActorContext().RegisterWithSameMailbox(actor);
- }
-
- private:
- friend class TActorCoro;
- bool ProcessEvent(THolder<IEventHandle> ev);
- void Destroy();
-
- private:
- /* Resume() function goes to actor coroutine context and continues (or starts) to execute it until actor finishes
- * his job or it is blocked on WaitForEvent. Then the function returns. */
- void Resume(THolder<IEventHandle> ev);
- THolder<IEventHandle> ReturnToActorSystem();
- void DoRun() override final;
- };
-
- class TActorCoro : public IActorCallback {
- THolder<TActorCoroImpl> Impl;
-
- public:
- template <class TEnumActivityType = IActor::EActivityType>
- TActorCoro(THolder<TActorCoroImpl> impl, const TEnumActivityType activityType = IActor::EActivityType::ACTOR_COROUTINE)
- : IActorCallback(static_cast<TReceiveFunc>(&TActorCoro::StateFunc), activityType)
- , Impl(std::move(impl))
- {}
-
- ~TActorCoro();
-
- TAutoPtr<IEventHandle> AfterRegister(const TActorId& self, const TActorId& parent) override {
- return new IEventHandle(TEvents::TSystem::Bootstrap, 0, self, parent, {}, 0);
- }
-
- private:
- STATEFN(StateFunc);
- };
-
-}
diff --git a/library/cpp/actors/core/actor_coroutine_ut.cpp b/library/cpp/actors/core/actor_coroutine_ut.cpp
deleted file mode 100644
index 4567cd142e..0000000000
--- a/library/cpp/actors/core/actor_coroutine_ut.cpp
+++ /dev/null
@@ -1,145 +0,0 @@
-#include "actor_coroutine.h"
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "scheduler_basic.h"
-#include "events.h"
-#include "event_local.h"
-#include "hfunc.h"
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/system/sanitizers.h>
-
-using namespace NActors;
-
-Y_UNIT_TEST_SUITE(ActorCoro) {
- enum {
- Begin = EventSpaceBegin(TEvents::ES_USERSPACE),
- Request,
- Response,
- Enough
- };
-
- struct TEvRequest: public TEventLocal<TEvRequest, Request> {
- };
-
- struct TEvResponse: public TEventLocal<TEvResponse, Response> {
- };
-
- struct TEvEnough: public TEventLocal<TEvEnough, Enough> {
- };
-
- class TBasicResponderActor: public TActorBootstrapped<TBasicResponderActor> {
- TDeque<TActorId> RespondTo;
-
- public:
- TBasicResponderActor() {
- }
-
- void Bootstrap(const TActorContext& /*ctx*/) {
- Become(&TBasicResponderActor::StateFunc);
- }
-
- STFUNC(StateFunc) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvRequest, Handle);
- HFunc(TEvents::TEvWakeup, Handle);
- HFunc(TEvents::TEvPoisonPill, Handle);
- }
- }
-
- void Handle(TEvRequest::TPtr& ev, const TActorContext& ctx) {
- RespondTo.push_back(ev->Sender);
- ctx.Schedule(TDuration::Seconds(1), new TEvents::TEvWakeup);
- }
-
- void Handle(TEvents::TEvWakeup::TPtr& /*ev*/, const TActorContext& ctx) {
- ctx.Send(RespondTo.front(), new TEvResponse());
- RespondTo.pop_front();
- }
-
- void Handle(TEvents::TEvPoisonPill::TPtr& /*ev*/, const TActorContext& ctx) {
- Die(ctx);
- }
- };
-
- class TCoroActor: public TActorCoroImpl {
- TManualEvent& DoneEvent;
- TAtomic& ItemsProcessed;
- bool Finish;
-
- struct TPoisonPillException {};
-
- public:
- TCoroActor(TManualEvent& doneEvent, TAtomic& itemsProcessed)
- : TActorCoroImpl(1 << 20)
- , DoneEvent(doneEvent)
- , ItemsProcessed(itemsProcessed)
- , Finish(false)
- {
- }
-
- void Run() override {
- TActorId child = GetActorContext().Register(new TBasicResponderActor);
- ui32 itemsProcessed = 0;
- try {
- while (!Finish) {
- GetActorContext().Send(child, new TEvRequest());
- THolder<IEventHandle> resp = WaitForSpecificEvent<TEvResponse>(&TCoroActor::ProcessUnexpectedEvent);
- UNIT_ASSERT_EQUAL(resp->GetTypeRewrite(), TEvResponse::EventType);
- ++itemsProcessed;
- }
- } catch (const TPoisonPillException& /*ex*/) {
- }
- GetActorContext().Send(child, new TEvents::TEvPoisonPill);
-
- AtomicSet(ItemsProcessed, itemsProcessed);
- DoneEvent.Signal();
- }
-
- void ProcessUnexpectedEvent(TAutoPtr<IEventHandle> event) {
- if (event->GetTypeRewrite() == Enough) {
- Finish = true;
- } else if (event->GetTypeRewrite() == TEvents::TSystem::Poison) {
- throw TPoisonPillException();
- }
- }
- };
-
- void Check(THolder<IEventBase> && message) {
- THolder<TActorSystemSetup> setup = MakeHolder<TActorSystemSetup>();
- setup->NodeId = 0;
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[setup->ExecutorsCount]);
- for (ui32 i = 0; i < setup->ExecutorsCount; ++i) {
- setup->Executors[i] = new TBasicExecutorPool(i, 5, 10, "basic");
- }
- setup->Scheduler = new TBasicSchedulerThread;
-
- TActorSystem actorSystem(setup);
-
- actorSystem.Start();
-
- TManualEvent doneEvent;
- TAtomic itemsProcessed = 0;
- TActorId actor = actorSystem.Register(new TActorCoro(MakeHolder<TCoroActor>(doneEvent, itemsProcessed)));
- NanoSleep(3UL * 1000 * 1000 * 1000);
- actorSystem.Send(actor, message.Release());
- doneEvent.WaitI();
-
- UNIT_ASSERT(AtomicGet(itemsProcessed) >= 2);
-
- actorSystem.Stop();
- }
-
- Y_UNIT_TEST(Basic) {
- if (NSan::TSanIsOn()) {
- // TODO https://st.yandex-team.ru/DEVTOOLS-3154
- return;
- }
- Check(MakeHolder<TEvEnough>());
- }
-
- Y_UNIT_TEST(PoisonPill) {
- Check(MakeHolder<TEvents::TEvPoisonPill>());
- }
-}
diff --git a/library/cpp/actors/core/actor_ut.cpp b/library/cpp/actors/core/actor_ut.cpp
deleted file mode 100644
index 52d3cbad9c..0000000000
--- a/library/cpp/actors/core/actor_ut.cpp
+++ /dev/null
@@ -1,511 +0,0 @@
-#include "actor.h"
-#include "events.h"
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "scheduler_basic.h"
-#include "actor_bootstrapped.h"
-#include "actor_benchmark_helper.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/actors/util/threadparkpad.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/generic/algorithm.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/rwlock.h>
-#include <util/system/hp_timer.h>
-
-using namespace NActors;
-using namespace NActors::NTests;
-
-Y_UNIT_TEST_SUITE(ActorBenchmark) {
-
- using TActorBenchmark = ::NActors::NTests::TActorBenchmark<>;
- using TSettings = TActorBenchmark::TSettings;
- using TSendReceiveActorParams = TActorBenchmark::TSendReceiveActorParams;
-
- Y_UNIT_TEST(WithSharedExecutors) {
- THolder<TActorSystemSetup> setup = TActorBenchmark::GetActorSystemSetup(0, false);
- TActorBenchmark::AddBasicPool(setup, 2, 1, 0);
- TActorBenchmark::AddBasicPool(setup, 2, 1, 1);
-
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
- THPTimer Timer;
-
- ui64 eventsPerPair = TSettings::TotalEventsAmountPerThread * 4 / 60;
-
- Timer.Reset();
- for (ui32 i = 0; i < 50; ++i) {
- ui32 followerPoolId = 0;
- ui32 leaderPoolId = 0;
- TActorId followerId = actorSystem.Register(
- new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OtherEvents=eventsPerPair / 2, .Allocation=true}
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- THolder<IActor> leader{
- new TTestEndDecorator(THolder(new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OwnEvents=eventsPerPair / 2, .Receivers={followerId}, .Allocation=true}
- )),
- &pad,
- &actorsAlive)
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
- }
- for (ui32 i = 0; i < 10; ++i) {
- ui32 followerPoolId = 1;
- ui32 leaderPoolId = 1;
- TActorId followerId = actorSystem.Register(
- new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OtherEvents=eventsPerPair / 2, .Allocation=true}
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OwnEvents=eventsPerPair / 2, .Receivers={followerId}, .Allocation=true}
- )),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
- }
-
- pad.Park();
- auto elapsedTime = Timer.Passed() / (TSettings::TotalEventsAmountPerThread * 4);
- actorSystem.Stop();
-
- Cerr << "Completed " << 1e9 * elapsedTime << Endl;
- }
-
- Y_UNIT_TEST(WithoutSharedExecutors) {
- THolder<TActorSystemSetup> setup = TActorBenchmark::GetActorSystemSetup(0, false);
- TActorBenchmark::AddBasicPool(setup, 2, 1, 0);
- TActorBenchmark::AddBasicPool(setup, 2, 1, 0);
-
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
- THPTimer Timer;
-
- ui64 eventsPerPair = TSettings::TotalEventsAmountPerThread * 4 / 60;
-
- Timer.Reset();
- for (ui32 i = 0; i < 50; ++i) {
- ui32 followerPoolId = 0;
- ui32 leaderPoolId = 0;
- TActorId followerId = actorSystem.Register(
- new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OtherEvents=eventsPerPair / 2, .Allocation=true}
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OwnEvents=eventsPerPair / 2, .Receivers={followerId}, .Allocation=true}
- )),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
- }
- for (ui32 i = 0; i < 10; ++i) {
- ui32 followerPoolId = 1;
- ui32 leaderPoolId = 1;
- TActorId followerId = actorSystem.Register(
- new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OtherEvents=eventsPerPair / 2, .Allocation=true}
- ),
- TMailboxType::HTSwap,
- followerPoolId
- );
- THolder<IActor> leader{
- new TTestEndDecorator(
- THolder(new TActorBenchmark::TSendReceiveActor(
- TSendReceiveActorParams{.OwnEvents=eventsPerPair / 2, .Receivers={followerId}, .Allocation=true}
- )),
- &pad,
- &actorsAlive
- )
- };
- actorSystem.Register(leader.Release(), TMailboxType::HTSwap, leaderPoolId);
- }
-
- pad.Park();
- auto elapsedTime = Timer.Passed() / (4 * TSettings::TotalEventsAmountPerThread);
- actorSystem.Stop();
-
- Cerr << "Completed " << 1e9 * elapsedTime << Endl;
- }
-
- Y_UNIT_TEST(SendReceive1Pool1ThreadAlloc) {
- for (const auto& mType : TSettings::MailboxTypes) {
- auto stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(true, mType, TActorBenchmark::EPoolType::Basic, ESendingType::Common);
- });
- Cerr << stats.ToString() << " " << mType << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(true, mType, TActorBenchmark::EPoolType::Basic, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " " << mType << " Lazy" << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(true, mType, TActorBenchmark::EPoolType::Basic, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " " << mType << " Tail" << Endl;
- }
- }
-
- Y_UNIT_TEST(SendReceive1Pool1ThreadAllocUnited) {
- for (const auto& mType : TSettings::MailboxTypes) {
- auto stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(true, mType, TActorBenchmark::EPoolType::United, ESendingType::Common);
- });
- Cerr << stats.ToString() << " " << mType << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(true, mType, TActorBenchmark::EPoolType::United, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " " << mType << " Lazy" << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(true, mType, TActorBenchmark::EPoolType::United, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " " << mType << " Tail" << Endl;
- }
- }
-
- Y_UNIT_TEST(SendReceive1Pool1ThreadNoAlloc) {
- for (const auto& mType : TSettings::MailboxTypes) {
- auto stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(false, mType, TActorBenchmark::EPoolType::Basic, ESendingType::Common);
- });
- Cerr << stats.ToString() << " " << mType << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(false, mType, TActorBenchmark::EPoolType::Basic, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " " << mType << " Lazy" << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(false, mType, TActorBenchmark::EPoolType::Basic, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " " << mType << " Tail" << Endl;
- }
- }
-
- Y_UNIT_TEST(SendReceive1Pool1ThreadNoAllocUnited) {
- for (const auto& mType : TSettings::MailboxTypes) {
- auto stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(false, mType, TActorBenchmark::EPoolType::United, ESendingType::Common);
- });
- Cerr << stats.ToString() << " " << mType << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(false, mType, TActorBenchmark::EPoolType::United, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " " << mType << " Lazy" << Endl;
- stats = TActorBenchmark::CountStats([mType] {
- return TActorBenchmark::BenchSendReceive(false, mType, TActorBenchmark::EPoolType::United, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " " << mType << " Tail" << Endl;
- }
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool1ThreadAlloc) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 1, true, TActorBenchmark::EPoolType::Basic);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool1ThreadAllocUnited) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 1, true, TActorBenchmark::EPoolType::United);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool1ThreadNoAlloc) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 1, false, TActorBenchmark::EPoolType::Basic);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool1ThreadNoAllocUnited) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 1, false, TActorBenchmark::EPoolType::United);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool2ThreadsAlloc) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 2, true, TActorBenchmark::EPoolType::Basic);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool2ThreadsAllocUnited) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 2, true, TActorBenchmark::EPoolType::United);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool2ThreadsNoAlloc) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 2, false, TActorBenchmark::EPoolType::Basic);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool2ThreadsNoAllocUnited) {
- TActorBenchmark::RunBenchSendActivateReceive(1, 2, false, TActorBenchmark::EPoolType::United);
- }
-
- Y_UNIT_TEST(SendActivateReceive2Pool1ThreadAlloc) {
- TActorBenchmark::RunBenchSendActivateReceive(2, 1, true, TActorBenchmark::EPoolType::Basic);
- }
-
- Y_UNIT_TEST(SendActivateReceive2Pool1ThreadAllocUnited) {
- TActorBenchmark::RunBenchSendActivateReceive(2, 1, true, TActorBenchmark::EPoolType::United);
- }
-
- Y_UNIT_TEST(SendActivateReceive2Pool1ThreadNoAlloc) {
- TActorBenchmark::RunBenchSendActivateReceive(2, 1, false, TActorBenchmark::EPoolType::Basic);
- }
-
- Y_UNIT_TEST(SendActivateReceive2Pool1ThreadNoAllocUnited) {
- TActorBenchmark::RunBenchSendActivateReceive(2, 1, false, TActorBenchmark::EPoolType::United);
- }
-
- Y_UNIT_TEST(SendActivateReceive1Pool1Threads) { TActorBenchmark::RunBenchContentedThreads(1, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool1ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(1, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool2Threads) { TActorBenchmark::RunBenchContentedThreads(2, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool2ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(2, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool3Threads) { TActorBenchmark::RunBenchContentedThreads(3, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool3ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(3, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool4Threads) { TActorBenchmark::RunBenchContentedThreads(4, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool4ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(4, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool5Threads) { TActorBenchmark::RunBenchContentedThreads(5, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool5ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(5, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool6Threads) { TActorBenchmark::RunBenchContentedThreads(6, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool6ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(6, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool7Threads) { TActorBenchmark::RunBenchContentedThreads(7, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool7ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(7, TActorBenchmark::EPoolType::United); }
- Y_UNIT_TEST(SendActivateReceive1Pool8Threads) { TActorBenchmark::RunBenchContentedThreads(8, TActorBenchmark::EPoolType::Basic); }
- Y_UNIT_TEST(SendActivateReceive1Pool8ThreadsUnited) { TActorBenchmark::RunBenchContentedThreads(8, TActorBenchmark::EPoolType::United); }
-
- Y_UNIT_TEST(SendActivateReceiveCSV) {
- std::vector<ui32> threadsList;
- for (ui32 threads = 1; threads <= 32; threads *= 2) {
- threadsList.push_back(threads);
- }
- std::vector<ui32> actorPairsList;
- for (ui32 actorPairs = 1; actorPairs <= 2 * 32; actorPairs *= 2) {
- actorPairsList.push_back(actorPairs);
- }
- TActorBenchmark::RunSendActivateReceiveCSV(threadsList, actorPairsList, {1}, TDuration::MilliSeconds(100));
- }
-
- Y_UNIT_TEST(SendActivateReceiveWithMailboxNeighbours) {
- TVector<ui32> NeighbourActors = {0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 128, 256};
- for (const auto& neighbour : NeighbourActors) {
- auto stats = TActorBenchmark::CountStats([neighbour] {
- return TActorBenchmark::BenchSendActivateReceiveWithMailboxNeighbours(neighbour, TActorBenchmark::EPoolType::Basic, ESendingType::Common);
- });
- Cerr << stats.ToString() << " neighbourActors: " << neighbour << Endl;
- stats = TActorBenchmark::CountStats([neighbour] {
- return TActorBenchmark::BenchSendActivateReceiveWithMailboxNeighbours(neighbour, TActorBenchmark::EPoolType::Basic, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " neighbourActors: " << neighbour << " Lazy" << Endl;
- stats = TActorBenchmark::CountStats([neighbour] {
- return TActorBenchmark::BenchSendActivateReceiveWithMailboxNeighbours(neighbour, TActorBenchmark::EPoolType::Basic, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " neighbourActors: " << neighbour << " Tail" << Endl;
- }
- }
-
- Y_UNIT_TEST(SendActivateReceiveWithMailboxNeighboursUnited) {
- TVector<ui32> NeighbourActors = {0, 1, 2, 3, 4, 5, 6, 7, 8, 16, 32, 64, 128, 256};
- for (const auto& neighbour : NeighbourActors) {
- auto stats = TActorBenchmark::CountStats([neighbour] {
- return TActorBenchmark::BenchSendActivateReceiveWithMailboxNeighbours(neighbour, TActorBenchmark::EPoolType::United, ESendingType::Common);
- });
- Cerr << stats.ToString() << " neighbourActors: " << neighbour << Endl;
- stats = TActorBenchmark::CountStats([neighbour] {
- return TActorBenchmark::BenchSendActivateReceiveWithMailboxNeighbours(neighbour, TActorBenchmark::EPoolType::United, ESendingType::Lazy);
- });
- Cerr << stats.ToString() << " neighbourActors: " << neighbour << " Lazy" << Endl;
- stats = TActorBenchmark::CountStats([neighbour] {
- return TActorBenchmark::BenchSendActivateReceiveWithMailboxNeighbours(neighbour, TActorBenchmark::EPoolType::United, ESendingType::Tail);
- });
- Cerr << stats.ToString() << " neighbourActors: " << neighbour << " Tail" << Endl;
- }
- }
-}
-
-Y_UNIT_TEST_SUITE(TestDecorator) {
- struct TPingDecorator : TDecorator {
- TAutoPtr<IEventHandle> SavedEvent = nullptr;
- ui64* Counter;
-
- TPingDecorator(THolder<IActor>&& actor, ui64* counter)
- : TDecorator(std::move(actor))
- , Counter(counter)
- {
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle>& ev, const TActorContext&) override {
- *Counter += 1;
- if (ev->Type != TEvents::THelloWorld::Pong) {
- TAutoPtr<IEventHandle> pingEv = new IEventHandle(SelfId(), SelfId(), new TEvents::TEvPing());
- SavedEvent = ev;
- Actor->Receive(pingEv);
- } else {
- Actor->Receive(SavedEvent);
- }
- return false;
- }
- };
-
- struct TPongDecorator : TDecorator {
- ui64* Counter;
-
- TPongDecorator(THolder<IActor>&& actor, ui64* counter)
- : TDecorator(std::move(actor))
- , Counter(counter)
- {
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle>& ev, const TActorContext&) override {
- *Counter += 1;
- if (ev->Type == TEvents::THelloWorld::Ping) {
- TAutoPtr<IEventHandle> pongEv = new IEventHandle(SelfId(), SelfId(), new TEvents::TEvPong());
- Send(SelfId(), new TEvents::TEvPong());
- return false;
- }
- return true;
- }
- };
-
- struct TTestActor : TActorBootstrapped<TTestActor> {
- static constexpr char ActorName[] = "TestActor";
-
- void Bootstrap()
- {
- const auto& activityTypeIndex = GetActivityType();
- Y_ENSURE(activityTypeIndex < GetActivityTypeCount());
- Y_ENSURE(GetActivityTypeName(activityTypeIndex) == "TestActor");
- PassAway();
- }
- };
-
- Y_UNIT_TEST(Basic) {
- THolder<TActorSystemSetup> setup = MakeHolder<TActorSystemSetup>();
- setup->NodeId = 0;
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[setup->ExecutorsCount]);
-
- ui64 ts = GetCycleCountFast();
- THolder<IHarmonizer> harmonizer(MakeHarmonizer(ts));
- for (ui32 i = 0; i < setup->ExecutorsCount; ++i) {
- setup->Executors[i] = new TBasicExecutorPool(i, 1, 10, "basic", harmonizer.Get());
- harmonizer->AddPool(setup->Executors[i].Get());
- }
- setup->Scheduler = new TBasicSchedulerThread;
-
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- THolder<IActor> innerActor = MakeHolder<TTestActor>();
- ui64 pongCounter = 0;
- THolder<IActor> pongActor = MakeHolder<TPongDecorator>(std::move(innerActor), &pongCounter);
- ui64 pingCounter = 0;
- THolder<IActor> pingActor = MakeHolder<TPingDecorator>(std::move(pongActor), &pingCounter);
-
- TThreadParkPad pad;
- TAtomic actorsAlive = 0;
-
- THolder<IActor> endActor = MakeHolder<TTestEndDecorator>(std::move(pingActor), &pad, &actorsAlive);
- actorSystem.Register(endActor.Release(), TMailboxType::HTSwap);
-
- pad.Park();
- actorSystem.Stop();
- UNIT_ASSERT(pongCounter == 2 && pingCounter == 2);
- }
-
- Y_UNIT_TEST(LocalProcessKey) {
- static constexpr char ActorName[] = "TestActor";
-
- UNIT_ASSERT((TEnumProcessKey<TActorActivityTag, IActor::EActorActivity>::GetName(IActor::EActivityType::INTERCONNECT_PROXY_TCP) == "INTERCONNECT_PROXY_TCP"));
- UNIT_ASSERT((TLocalProcessKey<TActorActivityTag, ActorName>::GetName() == ActorName));
- }
-}
-
-Y_UNIT_TEST_SUITE(TestStateFunc) {
- struct TTestActorWithExceptionsStateFunc : TActor<TTestActorWithExceptionsStateFunc> {
- static constexpr char ActorName[] = "TestActorWithExceptionsStateFunc";
-
- TTestActorWithExceptionsStateFunc()
- : TActor<TTestActorWithExceptionsStateFunc>(&TTestActorWithExceptionsStateFunc::StateFunc)
- {
- }
-
- STRICT_STFUNC_EXC(StateFunc,
- hFunc(TEvents::TEvWakeup, Handle),
- ExceptionFunc(yexception, HandleException)
- ExceptionFuncEv(std::exception, HandleException)
- AnyExceptionFunc(HandleException)
- )
-
- void Handle(TEvents::TEvWakeup::TPtr& ev) {
- Owner = ev->Sender;
- switch (ev->Get()->Tag) {
- case ETag::NoException:
- SendResponse(ETag::NoException);
- break;
- case ETag::YException:
- Cerr << "Throw yexception" << Endl;
- throw yexception();
- case ETag::StdException:
- Cerr << "Throw std::exception" << Endl;
- throw std::runtime_error("trololo");
- case ETag::OtherException:
- Cerr << "Throw trash" << Endl;
- throw TString("1");
- default:
- UNIT_ASSERT(false);
- }
- }
-
- void HandleException(const yexception&) {
- Cerr << "Handle yexception" << Endl;
- SendResponse(ETag::YException);
- }
-
- void HandleException(const std::exception&, TAutoPtr<::NActors::IEventHandle>& ev) {
- Cerr << "Handle std::exception from event with type " << ev->Type << Endl;
- SendResponse(ETag::StdException);
- }
-
- void HandleException() {
- Cerr << "Handle trash" << Endl;
- SendResponse(ETag::OtherException);
- }
-
- enum ETag : ui64 {
- NoException,
- YException,
- StdException,
- OtherException,
- };
-
- void SendResponse(ETag tag) {
- Send(Owner, new TEvents::TEvWakeup(tag));
- }
-
- TActorId Owner;
- };
-
- Y_UNIT_TEST(StateFuncWithExceptions) {
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- auto testActor = runtime.Register(new TTestActorWithExceptionsStateFunc());
- for (ui64 tag = 0; tag < 4; ++tag) {
- runtime.Send(new IEventHandle(testActor, sender, new TEvents::TEvWakeup(tag)), 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvents::TEvWakeup>(sender);
- UNIT_ASSERT_VALUES_EQUAL(ev->Get()->Tag, tag);
- }
- }
-}
diff --git a/library/cpp/actors/core/actor_virtual.cpp b/library/cpp/actors/core/actor_virtual.cpp
deleted file mode 100644
index 709cd7be15..0000000000
--- a/library/cpp/actors/core/actor_virtual.cpp
+++ /dev/null
@@ -1,6 +0,0 @@
-#include "actor_virtual.h"
-
-namespace NActors {
-
-
-}
diff --git a/library/cpp/actors/core/actor_virtual.h b/library/cpp/actors/core/actor_virtual.h
deleted file mode 100644
index 78da45138d..0000000000
--- a/library/cpp/actors/core/actor_virtual.h
+++ /dev/null
@@ -1,88 +0,0 @@
-#pragma once
-#include "event.h"
-#include "actor.h"
-
-namespace NActors {
-
-template <class TEvent>
-class TEventContext {
-private:
- TEvent* Event;
- std::unique_ptr<IEventHandle> Handle;
-public:
- const TEvent* operator->() const {
- return Event;
- }
- const IEventHandle& GetHandle() const {
- return *Handle;
- }
- TEventContext(std::unique_ptr<IEventHandle> handle)
- : Handle(std::move(handle))
- {
- Y_DEBUG_ABORT_UNLESS(dynamic_cast<TEvent*>(Handle->GetBase()));
- Event = static_cast<TEvent*>(Handle->GetBase());
- Y_ABORT_UNLESS(Event);
- }
-};
-
-template <class TEvent, class TExpectedActor>
-class IEventForActor: public IEventBase {
-protected:
- virtual bool DoExecute(IActor* actor, std::unique_ptr<IEventHandle> eventPtr) override {
- Y_DEBUG_ABORT_UNLESS(dynamic_cast<TExpectedActor*>(actor));
- auto* actorCorrect = static_cast<TExpectedActor*>(actor);
- TEventContext<TEvent> context(std::move(eventPtr));
- actorCorrect->ProcessEvent(context);
- return true;
- }
-public:
-};
-
-template <class TBaseEvent, class TEvent, class TExpectedObject>
-class IEventForAnything: public TBaseEvent {
-protected:
- virtual bool DoExecute(IActor* actor, std::unique_ptr<IEventHandle> eventPtr) override {
- auto* objImpl = dynamic_cast<TExpectedObject*>(actor);
- if (!objImpl) {
- return false;
- }
- TEventContext<TEvent> context(std::move(eventPtr));
- objImpl->ProcessEvent(context);
- return true;
- }
-public:
-};
-
-template <class TEvent, class TActor>
-class TEventLocalForActor: public IEventForActor<TEvent, TActor> {
-private:
- using TBase = IEventForActor<TEvent, TActor>;
- static TString GetClassTitle() {
- return TStringBuilder() << typeid(TEvent).name() << "->" << typeid(TActor).name();
- }
- static i64 LocalClassId;
-public:
- virtual ui32 Type() const override {
- return LocalClassId;
- }
- virtual TString ToStringHeader() const override {
- return GetClassTitle();
- }
-
- virtual bool SerializeToArcadiaStream(TChunkSerializer* /*serializer*/) const override {
- Y_ABORT("Serialization of local event %s->%s", typeid(TEvent).name(), typeid(TActor).name());
- }
-
- virtual bool IsSerializable() const override {
- return false;
- }
-
- static IEventBase* Load(TEventSerializedData*) {
- Y_ABORT("Loading of local event %s->%s", typeid(TEvent).name(), typeid(TActor).name());
- }
-};
-
-template <class TEvent, class TActor>
-i64 TEventLocalForActor<TEvent, TActor>::LocalClassId = Singleton<TAtomicCounter>()->Inc();
-
-}
diff --git a/library/cpp/actors/core/actorid.cpp b/library/cpp/actors/core/actorid.cpp
deleted file mode 100644
index ccda035eac..0000000000
--- a/library/cpp/actors/core/actorid.cpp
+++ /dev/null
@@ -1,34 +0,0 @@
-#include "actorid.h"
-#include <util/string/builder.h>
-#include <util/string/cast.h>
-
-namespace NActors {
- void TActorId::Out(IOutputStream& o) const {
- o << "[" << NodeId() << ":" << LocalId() << ":" << Hint() << "]";
- }
-
- TString TActorId::ToString() const {
- TString x;
- TStringOutput o(x);
- Out(o);
- return x;
- }
-
- bool TActorId::Parse(const char* buf, ui32 sz) {
- if (sz < 4 || buf[0] != '[' || buf[sz - 1] != ']')
- return false;
-
- size_t semicolons[2];
- TStringBuf str(buf, sz);
- semicolons[0] = str.find(':', 1);
- if (semicolons[0] == TStringBuf::npos)
- return false;
- semicolons[1] = str.find(':', semicolons[0] + 1);
- if (semicolons[1] == TStringBuf::npos)
- return false;
-
- bool success = TryFromString(buf + 1, semicolons[0] - 1, Raw.N.NodeId) && TryFromString(buf + semicolons[0] + 1, semicolons[1] - semicolons[0] - 1, Raw.N.LocalId) && TryFromString(buf + semicolons[1] + 1, sz - semicolons[1] - 2, Raw.N.Hint);
-
- return success;
- }
-}
diff --git a/library/cpp/actors/core/actorid.h b/library/cpp/actors/core/actorid.h
deleted file mode 100644
index 4e9a7bc3c3..0000000000
--- a/library/cpp/actors/core/actorid.h
+++ /dev/null
@@ -1,198 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include <util/stream/output.h> // for IOutputStream
-#include <util/generic/hash.h>
-
-namespace NActors {
- // used as global uniq address of actor
- // also could be used to transport service id (12 byte strings placed in hint-localid)
- // highest 1 bit of node - mark of service id
- // next 11 bits of node-id - pool id
- // next 20 bits - node id itself
-
- struct TActorId {
- static constexpr ui32 MaxServiceIDLength = 12;
- static constexpr ui32 MaxPoolID = 0x000007FF;
- static constexpr ui32 MaxNodeId = 0x000FFFFF;
- static constexpr ui32 PoolIndexShift = 20;
- static constexpr ui32 PoolIndexMask = MaxPoolID << PoolIndexShift;
- static constexpr ui32 ServiceMask = 0x80000000;
- static constexpr ui32 NodeIdMask = MaxNodeId;
-
- private:
- union {
- struct {
- ui64 LocalId;
- ui32 Hint;
- ui32 NodeId;
- } N;
-
- struct {
- ui64 X1;
- ui64 X2;
- } X;
-
- ui8 Buf[16];
- } Raw;
-
- public:
- TActorId() noexcept {
- Raw.X.X1 = 0;
- Raw.X.X2 = 0;
- }
-
- explicit TActorId(ui32 nodeId, ui32 poolId, ui64 localId, ui32 hint) noexcept {
- Y_DEBUG_ABORT_UNLESS(poolId <= MaxPoolID);
- Raw.N.LocalId = localId;
- Raw.N.Hint = hint;
- Raw.N.NodeId = nodeId | (poolId << PoolIndexShift);
- }
-
- explicit TActorId(ui32 nodeId, const TStringBuf& x) noexcept {
- Y_ABORT_UNLESS(x.size() <= MaxServiceIDLength, "service id is too long");
- Raw.N.LocalId = 0;
- Raw.N.Hint = 0;
- Raw.N.NodeId = nodeId | ServiceMask;
- memcpy(Raw.Buf, x.data(), x.size());
- }
-
- explicit TActorId(ui64 x1, ui64 x2) noexcept {
- Raw.X.X1 = x1;
- Raw.X.X2 = x2;
- }
-
- explicit operator bool() const noexcept {
- return Raw.X.X1 != 0 || Raw.X.X2 != 0;
- }
-
- ui64 LocalId() const noexcept {
- return Raw.N.LocalId;
- }
-
- ui32 Hint() const noexcept {
- return Raw.N.Hint;
- }
-
- ui32 NodeId() const noexcept {
- return Raw.N.NodeId & NodeIdMask;
- }
-
- bool IsService() const noexcept {
- return (Raw.N.NodeId & ServiceMask);
- }
-
- TStringBuf ServiceId() const noexcept {
- Y_DEBUG_ABORT_UNLESS(IsService());
- return TStringBuf((const char*)Raw.Buf, MaxServiceIDLength);
- }
-
- static ui32 PoolIndex(ui32 nodeid) noexcept {
- return ((nodeid & PoolIndexMask) >> PoolIndexShift);
- }
-
- ui32 PoolID() const noexcept {
- return PoolIndex(Raw.N.NodeId);
- }
-
- ui64 RawX1() const noexcept {
- return Raw.X.X1;
- }
-
- ui64 RawX2() const noexcept {
- return Raw.X.X2;
- }
-
- bool operator<(const TActorId& x) const noexcept {
- const ui64 s1 = Raw.X.X1;
- const ui64 s2 = Raw.X.X2;
- const ui64 x1 = x.Raw.X.X1;
- const ui64 x2 = x.Raw.X.X2;
-
- return (s1 != x1) ? (s1 < x1) : (s2 < x2);
- }
-
- bool operator!=(const TActorId& x) const noexcept {
- return Raw.X.X1 != x.Raw.X.X1 || Raw.X.X2 != x.Raw.X.X2;
- }
-
- bool operator==(const TActorId& x) const noexcept {
- return !(x != *this);
- }
-
- ui64 Hash() const noexcept {
- const ui32* x = (const ui32*)Raw.Buf;
-
- const ui64 x1 = x[0] * 0x001DFF3D8DC48F5Dull;
- const ui64 x2 = x[1] * 0x179CA10C9242235Dull;
- const ui64 x3 = x[2] * 0x0F530CAD458B0FB1ull;
- const ui64 x4 = x[3] * 0xB5026F5AA96619E9ull;
-
- const ui64 z1 = x1 + x2;
- const ui64 z2 = x3 + x4;
-
- const ui64 sum = 0x5851F42D4C957F2D + z1 + z2;
-
- return (sum >> 32) | (sum << 32);
- }
-
- ui32 Hash32() const noexcept {
- const ui32* x = (const ui32*)Raw.Buf;
-
- const ui64 x1 = x[0] * 0x001DFF3D8DC48F5Dull;
- const ui64 x2 = x[1] * 0x179CA10C9242235Dull;
- const ui64 x3 = x[2] * 0x0F530CAD458B0FB1ull;
- const ui64 x4 = x[3] * 0xB5026F5AA96619E9ull;
-
- const ui64 z1 = x1 + x2;
- const ui64 z2 = x3 + x4;
-
- const ui64 sum = 0x5851F42D4C957F2D + z1 + z2;
-
- return sum >> 32;
- }
-
- struct THash {
- ui64 operator()(const TActorId& actorId) const noexcept {
- return actorId.Hash();
- }
- };
-
- struct THash32 {
- ui64 operator()(const TActorId& actorId) const noexcept {
- return actorId.Hash();
- }
- };
-
- struct TOrderedCmp {
- bool operator()(const TActorId &left, const TActorId &right) const noexcept {
- Y_DEBUG_ABORT_UNLESS(!left.IsService() && !right.IsService(), "ordered compare works for plain actorids only");
- const ui32 n1 = left.NodeId();
- const ui32 n2 = right.NodeId();
-
- return (n1 != n2) ? (n1 < n2) : left.LocalId() < right.LocalId();
- }
- };
-
- TString ToString() const;
- void Out(IOutputStream& o) const;
- bool Parse(const char* buf, ui32 sz);
- };
-
- static_assert(sizeof(TActorId) == 16, "expect sizeof(TActorId) == 16");
- static_assert(MaxPools < TActorId::MaxPoolID); // current implementation of united pool has limit MaxPools on pool id
-}
-
-template <>
-inline void Out<NActors::TActorId>(IOutputStream& o, const NActors::TActorId& x) {
- return x.Out(o);
-}
-
-template <>
-struct THash<NActors::TActorId> {
- inline ui64 operator()(const NActors::TActorId& x) const {
- return x.Hash();
- }
-};
-
-template<> struct std::hash<NActors::TActorId> : THash<NActors::TActorId> {};
diff --git a/library/cpp/actors/core/actorsystem.cpp b/library/cpp/actors/core/actorsystem.cpp
deleted file mode 100644
index c09a208ef5..0000000000
--- a/library/cpp/actors/core/actorsystem.cpp
+++ /dev/null
@@ -1,323 +0,0 @@
-#include "defs.h"
-#include "actorsystem.h"
-#include "callstack.h"
-#include "cpu_manager.h"
-#include "mailbox.h"
-#include "events.h"
-#include "interconnect.h"
-#include "servicemap.h"
-#include "scheduler_queue.h"
-#include "scheduler_actor.h"
-#include "log.h"
-#include "probes.h"
-#include "ask.h"
-#include <library/cpp/actors/util/affinity.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <util/generic/hash.h>
-#include <util/system/rwlock.h>
-#include <util/random/random.h>
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- TActorSetupCmd::TActorSetupCmd()
- : MailboxType(TMailboxType::HTSwap)
- , PoolId(0)
- , Actor(nullptr)
- {
- }
-
- TActorSetupCmd::TActorSetupCmd(TActorSetupCmd&&) = default;
- TActorSetupCmd& TActorSetupCmd::operator=(TActorSetupCmd&&) = default;
- TActorSetupCmd::~TActorSetupCmd() = default;
-
- TActorSetupCmd::TActorSetupCmd(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId)
- : MailboxType(mailboxType)
- , PoolId(poolId)
- , Actor(actor)
- {
- }
-
- TActorSetupCmd::TActorSetupCmd(std::unique_ptr<IActor> actor, TMailboxType::EType mailboxType, ui32 poolId)
- : MailboxType(mailboxType)
- , PoolId(poolId)
- , Actor(std::move(actor))
- {
- }
-
- void TActorSetupCmd::Set(std::unique_ptr<IActor> actor, TMailboxType::EType mailboxType, ui32 poolId) {
- MailboxType = mailboxType;
- PoolId = poolId;
- Actor = std::move(actor);
- }
-
- struct TActorSystem::TServiceMap : TNonCopyable {
- NActors::TServiceMap<TActorId, TActorId, TActorId::THash> LocalMap;
- TTicketLock Lock;
-
- TActorId RegisterLocalService(const TActorId& serviceId, const TActorId& actorId) {
- TTicketLock::TGuard guard(&Lock);
- const TActorId old = LocalMap.Update(serviceId, actorId);
- return old;
- }
-
- TActorId LookupLocal(const TActorId& x) {
- return LocalMap.Find(x);
- }
- };
-
- TActorSystem::TActorSystem(THolder<TActorSystemSetup>& setup, void* appData,
- TIntrusivePtr<NLog::TSettings> loggerSettings)
- : NodeId(setup->NodeId)
- , CpuManager(new TCpuManager(setup))
- , ExecutorPoolCount(CpuManager->GetExecutorsCount())
- , Scheduler(setup->Scheduler)
- , InterconnectCount((ui32)setup->Interconnect.ProxyActors.size())
- , CurrentTimestamp(0)
- , CurrentMonotonic(0)
- , CurrentIDCounter(RandomNumber<ui64>())
- , SystemSetup(setup.Release())
- , DefSelfID(NodeId, "actorsystem")
- , AppData0(appData)
- , LoggerSettings0(loggerSettings)
- , StartExecuted(false)
- , StopExecuted(false)
- , CleanupExecuted(false)
- {
- ServiceMap.Reset(new TServiceMap());
- }
-
- TActorSystem::~TActorSystem() {
- Cleanup();
- }
-
- template <TActorSystem::TEPSendFunction EPSpecificSend>
- bool TActorSystem::GenericSend(TAutoPtr<IEventHandle> ev) const {
- if (Y_UNLIKELY(!ev))
- return false;
-
-#ifdef USE_ACTOR_CALLSTACK
- ev->Callstack.TraceIfEmpty();
-#endif
-
- TActorId recipient = ev->GetRecipientRewrite();
- const ui32 recpNodeId = recipient.NodeId();
-
- if (recpNodeId != NodeId && recpNodeId != 0) {
- // if recipient is not local one - rewrite with forward instruction
- Y_DEBUG_ABORT_UNLESS(!ev->HasEvent() || ev->GetBase()->IsSerializable());
- Y_ABORT_UNLESS(ev->Recipient == recipient,
- "Event rewrite from %s to %s would be lost via interconnect",
- ev->Recipient.ToString().c_str(),
- recipient.ToString().c_str());
- recipient = InterconnectProxy(recpNodeId);
- ev->Rewrite(TEvInterconnect::EvForward, recipient);
- }
- if (recipient.IsService()) {
- TActorId target = ServiceMap->LookupLocal(recipient);
- if (!target && IsInterconnectProxyId(recipient) && ProxyWrapperFactory) {
- const TActorId actorId = ProxyWrapperFactory(const_cast<TActorSystem*>(this),
- GetInterconnectProxyNode(recipient));
- with_lock(ProxyCreationLock) {
- target = ServiceMap->LookupLocal(recipient);
- if (!target) {
- target = actorId;
- ServiceMap->RegisterLocalService(recipient, target);
- }
- }
- if (target != actorId) {
- // a race has occured, terminate newly created actor
- Send(new IEventHandle(TEvents::TSystem::Poison, 0, actorId, {}, nullptr, 0));
- }
- }
- recipient = target;
- ev->Rewrite(ev->GetTypeRewrite(), recipient);
- }
-
- Y_DEBUG_ABORT_UNLESS(recipient == ev->GetRecipientRewrite());
- const ui32 recpPool = recipient.PoolID();
- if (recipient && recpPool < ExecutorPoolCount) {
- if ((CpuManager->GetExecutorPool(recpPool)->*EPSpecificSend)(ev)) {
- return true;
- }
- }
- if (ev) {
- Send(IEventHandle::ForwardOnNondelivery(ev, TEvents::TEvUndelivered::ReasonActorUnknown));
- }
- return false;
- }
-
- template
- bool TActorSystem::GenericSend<&IExecutorPool::Send>(TAutoPtr<IEventHandle> ev) const;
- template
- bool TActorSystem::GenericSend<&IExecutorPool::SpecificSend>(TAutoPtr<IEventHandle> ev) const;
-
- bool TActorSystem::Send(const TActorId& recipient, IEventBase* ev, ui32 flags, ui64 cookie) const {
- return this->Send(new IEventHandle(recipient, DefSelfID, ev, flags, cookie));
- }
-
- bool TActorSystem::SpecificSend(TAutoPtr<IEventHandle> ev) const {
- return this->GenericSend<&IExecutorPool::SpecificSend>(ev);
- }
-
- bool TActorSystem::SpecificSend(TAutoPtr<IEventHandle> ev, ESendingType sendingType) const {
- if (!TlsThreadContext) {
- return this->GenericSend<&IExecutorPool::Send>(ev);
- } else {
- ESendingType previousType = std::exchange(TlsThreadContext->SendingType, sendingType);
- bool isSent = this->GenericSend<&IExecutorPool::SpecificSend>(ev);
- TlsThreadContext->SendingType = previousType;
- return isSent;
- }
- }
-
- void TActorSystem::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) const {
- Schedule(deadline - Timestamp(), ev, cookie);
- }
-
- void TActorSystem::Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) const {
- const auto current = Monotonic();
- if (deadline < current)
- deadline = current;
-
- TTicketLock::TGuard guard(&ScheduleLock);
- ScheduleQueue->Writer.Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TActorSystem::Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) const {
- const auto deadline = Monotonic() + delta;
-
- TTicketLock::TGuard guard(&ScheduleLock);
- ScheduleQueue->Writer.Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- NThreading::TFuture<THolder<IEventBase>> TActorSystem::AskGeneric(TMaybe<ui32> expectedEventType,
- TActorId recipient, THolder<IEventBase> event,
- TDuration timeout) {
- auto promise = NThreading::NewPromise<THolder<IEventBase>>();
- Register(MakeAskActor(expectedEventType, recipient, std::move(event), timeout, promise).Release());
- return promise.GetFuture();
- }
-
- ui64 TActorSystem::AllocateIDSpace(ui64 count) {
- Y_DEBUG_ABORT_UNLESS(count < Max<ui32>() / 65536);
-
- static_assert(sizeof(TAtomic) == sizeof(ui64), "expect sizeof(TAtomic) == sizeof(ui64)");
-
- // get high 32 bits as seconds from epoch
- // it could wrap every century, but we don't expect any actor-reference to live this long so such wrap will do no harm
- const ui64 timeFromEpoch = TInstant::MicroSeconds(RelaxedLoad(&CurrentTimestamp)).Seconds();
-
- // get low 32 bits as counter value
- ui32 lowPartEnd = (ui32)(AtomicAdd(CurrentIDCounter, count));
- while (lowPartEnd < count) // if our request crosses 32bit boundary - retry
- lowPartEnd = (ui32)(AtomicAdd(CurrentIDCounter, count));
-
- const ui64 lowPart = lowPartEnd - count;
- const ui64 ret = (timeFromEpoch << 32) | lowPart;
-
- return ret;
- }
-
- TActorId TActorSystem::InterconnectProxy(ui32 destinationNode) const {
- if (destinationNode < InterconnectCount)
- return Interconnect[destinationNode];
- else if (destinationNode != NodeId)
- return MakeInterconnectProxyId(destinationNode);
- else
- return TActorId();
- }
-
- ui32 TActorSystem::BroadcastToProxies(const std::function<IEventHandle*(const TActorId&)>& eventFabric) {
- // TODO: get rid of this method
- for (ui32 i = 0; i < InterconnectCount; ++i) {
- Send(eventFabric(Interconnect[i]));
- }
- return InterconnectCount;
- }
-
- TActorId TActorSystem::LookupLocalService(const TActorId& x) const {
- return ServiceMap->LookupLocal(x);
- }
-
- TActorId TActorSystem::RegisterLocalService(const TActorId& serviceId, const TActorId& actorId) {
- // TODO: notify old actor about demotion
- return ServiceMap->RegisterLocalService(serviceId, actorId);
- }
-
- void TActorSystem::GetPoolStats(ui32 poolId, TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const {
- CpuManager->GetPoolStats(poolId, poolStats, statsCopy);
- }
-
- THarmonizerStats TActorSystem::GetHarmonizerStats() const {
- return CpuManager->GetHarmonizerStats();
-
- }
-
- void TActorSystem::Start() {
- Y_ABORT_UNLESS(StartExecuted == false);
- StartExecuted = true;
-
- ScheduleQueue.Reset(new NSchedulerQueue::TQueueType());
- TVector<NSchedulerQueue::TReader*> scheduleReaders;
- scheduleReaders.push_back(&ScheduleQueue->Reader);
- CpuManager->PrepareStart(scheduleReaders, this);
- Scheduler->Prepare(this, &CurrentTimestamp, &CurrentMonotonic);
- Scheduler->PrepareSchedules(&scheduleReaders.front(), (ui32)scheduleReaders.size());
-
- // setup interconnect proxies
- {
- TInterconnectSetup& setup = SystemSetup->Interconnect;
- Interconnect.Reset(new TActorId[InterconnectCount + 1]);
- for (ui32 i = 0, e = InterconnectCount; i != e; ++i) {
- TActorSetupCmd& x = setup.ProxyActors[i];
- if (x.Actor) {
- Interconnect[i] = Register(x.Actor.release(), x.MailboxType, x.PoolId, i);
- Y_ABORT_UNLESS(!!Interconnect[i]);
- }
- }
- ProxyWrapperFactory = std::move(SystemSetup->Interconnect.ProxyWrapperFactory);
- }
-
- // setup local services
- {
- for (ui32 i = 0, e = (ui32)SystemSetup->LocalServices.size(); i != e; ++i) {
- std::pair<TActorId, TActorSetupCmd>& x = SystemSetup->LocalServices[i];
- const TActorId xid = Register(x.second.Actor.release(), x.second.MailboxType, x.second.PoolId, i);
- Y_ABORT_UNLESS(!!xid);
- if (!!x.first)
- RegisterLocalService(x.first, xid);
- }
- }
-
- Scheduler->PrepareStart();
- CpuManager->Start();
- Send(MakeSchedulerActorId(), new TEvSchedulerInitialize(scheduleReaders, &CurrentTimestamp, &CurrentMonotonic));
- Scheduler->Start();
- }
-
- void TActorSystem::Stop() {
- if (StopExecuted || !StartExecuted)
- return;
-
- StopExecuted = true;
-
- for (auto&& fn : std::exchange(DeferredPreStop, {})) {
- fn();
- }
-
- Scheduler->PrepareStop();
- CpuManager->PrepareStop();
- Scheduler->Stop();
- CpuManager->Shutdown();
- }
-
- void TActorSystem::Cleanup() {
- Stop();
- if (CleanupExecuted || !StartExecuted)
- return;
- CleanupExecuted = true;
- CpuManager->Cleanup();
- Scheduler.Destroy();
- }
-}
diff --git a/library/cpp/actors/core/actorsystem.h b/library/cpp/actors/core/actorsystem.h
deleted file mode 100644
index df5de39090..0000000000
--- a/library/cpp/actors/core/actorsystem.h
+++ /dev/null
@@ -1,311 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include "balancer.h"
-#include "config.h"
-#include "event.h"
-#include "executor_pool.h"
-#include "log_settings.h"
-#include "scheduler_cookie.h"
-#include "cpu_manager.h"
-#include "executor_thread.h"
-
-#include <library/cpp/threading/future/future.h>
-#include <library/cpp/actors/util/ticket_lock.h>
-
-#include <util/generic/vector.h>
-#include <util/datetime/base.h>
-#include <util/system/mutex.h>
-
-namespace NActors {
- class IActor;
- class TActorSystem;
- class TCpuManager;
- struct TWorkerContext;
-
- inline TActorId MakeInterconnectProxyId(ui32 destNodeId) {
- char data[12];
- memcpy(data, "ICProxy@", 8);
- memcpy(data + 8, &destNodeId, sizeof(ui32));
- return TActorId(0, TStringBuf(data, 12));
- }
-
- inline bool IsInterconnectProxyId(const TActorId& actorId) {
- return actorId.IsService() && !memcmp(actorId.ServiceId().data(), "ICProxy@", 8);
- }
-
- inline ui32 GetInterconnectProxyNode(const TActorId& actorId) {
- ui32 nodeId;
- memcpy(&nodeId, actorId.ServiceId().data() + 8, sizeof(ui32));
- return nodeId;
- }
-
- namespace NSchedulerQueue {
- class TReader;
- struct TQueueType;
- }
-
- // could be proxy to in-pool schedulers (for NUMA-aware executors)
- class ISchedulerThread : TNonCopyable {
- public:
- virtual ~ISchedulerThread() {
- }
-
- virtual void Prepare(TActorSystem* actorSystem, volatile ui64* currentTimestamp, volatile ui64* currentMonotonic) = 0;
- virtual void PrepareSchedules(NSchedulerQueue::TReader** readers, ui32 scheduleReadersCount) = 0;
- virtual void PrepareStart() { /* empty */ }
- virtual void Start() = 0;
- virtual void PrepareStop() = 0;
- virtual void Stop() = 0;
- };
-
- struct TActorSetupCmd {
- TMailboxType::EType MailboxType;
- ui32 PoolId;
- std::unique_ptr<IActor> Actor;
-
- TActorSetupCmd();
- TActorSetupCmd(const TActorSetupCmd&) = delete;
- TActorSetupCmd(TActorSetupCmd&&);
- TActorSetupCmd& operator=(const TActorSetupCmd&) = delete;
- TActorSetupCmd& operator=(TActorSetupCmd&&);
- TActorSetupCmd(std::unique_ptr<IActor> actor, TMailboxType::EType mailboxType, ui32 poolId);
- ~TActorSetupCmd();
-
- // For legacy code, please do not use
- TActorSetupCmd(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId);
-
- void Set(std::unique_ptr<IActor> actor, TMailboxType::EType mailboxType, ui32 poolId);
- };
-
- using TProxyWrapperFactory = std::function<TActorId(TActorSystem*, ui32)>;
-
- struct TInterconnectSetup {
- TVector<TActorSetupCmd> ProxyActors;
- TProxyWrapperFactory ProxyWrapperFactory;
- };
-
- struct TActorSystemSetup {
- ui32 NodeId = 0;
-
- // Either Executors or CpuManager must be initialized
- ui32 ExecutorsCount = 0;
- TArrayHolder<TAutoPtr<IExecutorPool>> Executors;
-
- TAutoPtr<IBalancer> Balancer; // main implementation will be implicitly created if not set
-
- TCpuManagerConfig CpuManager;
-
- TAutoPtr<ISchedulerThread> Scheduler;
-
- TInterconnectSetup Interconnect;
-
- bool MonitorStuckActors = false;
-
- using TLocalServices = TVector<std::pair<TActorId, TActorSetupCmd>>;
- TLocalServices LocalServices;
-
- ui32 GetExecutorsCount() const {
- return Executors ? ExecutorsCount : CpuManager.GetExecutorsCount();
- }
-
- TString GetPoolName(ui32 poolId) const {
- return Executors ? Executors[poolId]->GetName() : CpuManager.GetPoolName(poolId);
- }
-
- ui32 GetThreads(ui32 poolId) const {
- auto result = GetThreadsOptional(poolId);
- Y_ABORT_UNLESS(result, "undefined pool id: %" PRIu32, (ui32)poolId);
- return *result;
- }
-
- std::optional<ui32> GetThreadsOptional(const ui32 poolId) const {
- if (Y_LIKELY(Executors)) {
- if (Y_LIKELY(poolId < ExecutorsCount)) {
- return Executors[poolId]->GetDefaultThreadCount();
- } else {
- return {};
- }
- } else {
- return CpuManager.GetThreadsOptional(poolId);
- }
- }
- };
-
- class TActorSystem : TNonCopyable {
- struct TServiceMap;
-
- public:
- const ui32 NodeId;
-
- private:
- THolder<TCpuManager> CpuManager;
- const ui32 ExecutorPoolCount;
-
- TAutoPtr<ISchedulerThread> Scheduler;
- THolder<TServiceMap> ServiceMap;
-
- const ui32 InterconnectCount;
- TArrayHolder<TActorId> Interconnect;
-
- volatile ui64 CurrentTimestamp;
- volatile ui64 CurrentMonotonic;
- volatile ui64 CurrentIDCounter;
-
- THolder<NSchedulerQueue::TQueueType> ScheduleQueue;
- mutable TTicketLock ScheduleLock;
-
- friend class TExecutorThread;
-
- THolder<TActorSystemSetup> SystemSetup;
- TActorId DefSelfID;
- void* AppData0;
- TIntrusivePtr<NLog::TSettings> LoggerSettings0;
- TProxyWrapperFactory ProxyWrapperFactory;
- TMutex ProxyCreationLock;
-
- bool StartExecuted;
- bool StopExecuted;
- bool CleanupExecuted;
-
- std::deque<std::function<void()>> DeferredPreStop;
- public:
- TActorSystem(THolder<TActorSystemSetup>& setup, void* appData = nullptr,
- TIntrusivePtr<NLog::TSettings> loggerSettings = TIntrusivePtr<NLog::TSettings>(nullptr));
- ~TActorSystem();
-
- void Start();
- void Stop();
- void Cleanup();
-
- template <ESendingType SendingType = ESendingType::Common>
- TActorId Register(IActor* actor, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 executorPool = 0,
- ui64 revolvingCounter = 0, const TActorId& parentId = TActorId());
-
- bool MonitorStuckActors() const { return SystemSetup->MonitorStuckActors; }
-
- private:
- typedef bool (IExecutorPool::*TEPSendFunction)(TAutoPtr<IEventHandle>& ev);
-
- template <TEPSendFunction EPSpecificSend>
- bool GenericSend(TAutoPtr<IEventHandle> ev) const;
-
- public:
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(TAutoPtr<IEventHandle> ev) const;
-
- bool SpecificSend(TAutoPtr<IEventHandle> ev, ESendingType sendingType) const;
- bool SpecificSend(TAutoPtr<IEventHandle> ev) const;
-
- bool Send(const TActorId& recipient, IEventBase* ev, ui32 flags = 0, ui64 cookie = 0) const;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the wallclock time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr) const;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the monotonic time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr) const;
-
- /**
- * Schedule one-shot event that will be send after given delay.
- *
- * @param delta the time from now to delay event sending
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- */
- void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr) const;
-
- /**
- * A way to interact with actors from non-actor context.
- *
- * This method will send the `event` to the `recipient` and then will wait for a response. When response arrives,
- * it will be passed to the future. If response is not of type `T`, the future will resolve into an exception.
- *
- * @tparam T expected response type. Must be derived from `TEventBase`,
- * or use `IEventBase` to catch any response.
- * @param actorSystem actor system that will be used to register an actor that'll wait for response.
- * @param recipient who will get a request.
- * @param event a request message.
- * @return future that will be resolved when a message from `recipient` arrives.
- */
- template <typename T>
- [[nodiscard]]
- NThreading::TFuture<THolder<T>> Ask(TActorId recipient, THolder<IEventBase> event, TDuration timeout = TDuration::Max()) {
- if constexpr (std::is_same_v<T, IEventBase>) {
- return AskGeneric(Nothing(), recipient, std::move(event), timeout);
- } else {
- return AskGeneric(T::EventType, recipient, std::move(event), timeout)
- .Apply([](const NThreading::TFuture<THolder<IEventBase>>& ev) {
- return THolder<T>(static_cast<T*>(const_cast<THolder<IEventBase>&>(ev.GetValueSync()).Release())); // =(
- });
- }
- }
-
- [[nodiscard]]
- NThreading::TFuture<THolder<IEventBase>> AskGeneric(
- TMaybe<ui32> expectedEventType,
- TActorId recipient,
- THolder<IEventBase> event,
- TDuration timeout);
-
- ui64 AllocateIDSpace(ui64 count);
-
- TActorId InterconnectProxy(ui32 destinationNode) const;
- ui32 BroadcastToProxies(const std::function<IEventHandle*(const TActorId&)>&);
-
- void UpdateLinkStatus(ui8 status, ui32 destinationNode);
- ui8 LinkStatus(ui32 destinationNode);
-
- TActorId LookupLocalService(const TActorId& x) const;
- TActorId RegisterLocalService(const TActorId& serviceId, const TActorId& actorId);
-
- TInstant Timestamp() const {
- return TInstant::MicroSeconds(RelaxedLoad(&CurrentTimestamp));
- }
-
- TMonotonic Monotonic() const {
- return TMonotonic::MicroSeconds(RelaxedLoad(&CurrentMonotonic));
- }
-
- template <typename T>
- T* AppData() const {
- return (T*)AppData0;
- }
-
- NLog::TSettings* LoggerSettings() const {
- return LoggerSettings0.Get();
- }
-
- void GetPoolStats(ui32 poolId, TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const;
-
- THarmonizerStats GetHarmonizerStats() const;
-
- std::optional<ui32> GetPoolThreadsCount(const ui32 poolId) const {
- if (!SystemSetup) {
- return {};
- }
- return SystemSetup->GetThreadsOptional(poolId);
- }
-
- void DeferPreStop(std::function<void()> fn) {
- DeferredPreStop.push_back(std::move(fn));
- }
-
- TVector<IExecutorPool*> GetBasicExecutorPools() const {
- return CpuManager->GetBasicExecutorPools();
- }
-
- };
-}
diff --git a/library/cpp/actors/core/actorsystem_ut.cpp b/library/cpp/actors/core/actorsystem_ut.cpp
deleted file mode 100644
index 231d6f0ca1..0000000000
--- a/library/cpp/actors/core/actorsystem_ut.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-#include "actorsystem.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NActors;
-
-Y_UNIT_TEST_SUITE(TActorSystemTest) {
-
- class TTestActor: public TActor<TTestActor> {
- public:
- TTestActor()
- : TActor{&TThis::Main}
- {
- }
-
- STATEFN(Main) {
- Y_UNUSED(ev);
- }
- };
-
- THolder<TTestActorRuntimeBase> CreateRuntime() {
- auto runtime = MakeHolder<TTestActorRuntimeBase>();
- runtime->SetScheduledEventFilter([](auto&&, auto&&, auto&&, auto&&) { return false; });
- runtime->Initialize();
- return runtime;
- }
-
- Y_UNIT_TEST(LocalService) {
- THolder<TTestActorRuntimeBase> runtime = CreateRuntime();
- auto actorA = runtime->Register(new TTestActor);
- auto actorB = runtime->Register(new TTestActor);
-
- TActorId myServiceId{0, TStringBuf{"my-service"}};
-
- auto prevActorId = runtime->RegisterService(myServiceId, actorA);
- UNIT_ASSERT(!prevActorId);
- UNIT_ASSERT_EQUAL(runtime->GetLocalServiceId(myServiceId), actorA);
-
- prevActorId = runtime->RegisterService(myServiceId, actorB);
- UNIT_ASSERT(prevActorId);
- UNIT_ASSERT_EQUAL(prevActorId, actorA);
- UNIT_ASSERT_EQUAL(runtime->GetLocalServiceId(myServiceId), actorB);
- }
-}
diff --git a/library/cpp/actors/core/ask.cpp b/library/cpp/actors/core/ask.cpp
deleted file mode 100644
index 40c6748d56..0000000000
--- a/library/cpp/actors/core/ask.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-#include "ask.h"
-
-#include "actor_bootstrapped.h"
-#include "actorid.h"
-#include "event.h"
-#include "hfunc.h"
-
-namespace NActors {
- namespace {
- class TAskActor: public TActorBootstrapped<TAskActor> {
- enum {
- Timeout = EventSpaceBegin(TEvents::ES_PRIVATE),
- };
-
- // We can't use the standard timeout event because recipient may send us one.
- struct TTimeout: public TEventLocal<TTimeout, Timeout> {
- };
-
- public:
- TAskActor(
- TMaybe<ui32> expectedEventType,
- TActorId recipient,
- THolder<IEventBase> event,
- TDuration timeout,
- const NThreading::TPromise<THolder<IEventBase>>& promise)
- : ExpectedEventType_(expectedEventType)
- , Recipient_(recipient)
- , Event_(std::move(event))
- , Timeout_(timeout)
- , Promise_(promise)
- {
- }
-
- static constexpr char ActorName[] = "ASK_ACTOR";
-
- public:
- void Bootstrap() {
- Send(Recipient_, std::move(Event_));
- Become(&TAskActor::Waiting);
-
- if (Timeout_ != TDuration::Max()) {
- Schedule(Timeout_, new TTimeout);
- }
- }
-
- STATEFN(Waiting) {
- if (ev->GetTypeRewrite() == TTimeout::EventType) {
- Promise_.SetException(std::make_exception_ptr(yexception() << "ask timeout"));
- } else if (!ExpectedEventType_ || ev->GetTypeRewrite() == ExpectedEventType_) {
- Promise_.SetValue(ev.Get()->ReleaseBase());
- } else {
- Promise_.SetException(std::make_exception_ptr(yexception() << "received unexpected response " << ev.Get()->GetBase()->ToString()));
- }
-
- PassAway();
- }
-
- public:
- TMaybe<ui32> ExpectedEventType_;
- TActorId Recipient_;
- THolder<IEventBase> Event_;
- TDuration Timeout_;
- NThreading::TPromise<THolder<IEventBase>> Promise_;
- };
- }
-
- THolder<IActor> MakeAskActor(
- TMaybe<ui32> expectedEventType,
- TActorId recipient,
- THolder<IEventBase> event,
- TDuration timeout,
- const NThreading::TPromise<THolder<IEventBase>>& promise)
- {
- return MakeHolder<TAskActor>(expectedEventType, std::move(recipient), std::move(event), timeout, promise);
- }
-}
diff --git a/library/cpp/actors/core/ask.h b/library/cpp/actors/core/ask.h
deleted file mode 100644
index 036f1833a4..0000000000
--- a/library/cpp/actors/core/ask.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#pragma once
-
-#include "actor.h"
-#include "event.h"
-
-#include <library/cpp/threading/future/future.h>
-
-namespace NActors {
- /**
- * See `TActorSystem::Ask`.
- */
- THolder<IActor> MakeAskActor(
- TMaybe<ui32> expectedEventType,
- TActorId recipient,
- THolder<IEventBase> event,
- TDuration timeout,
- const NThreading::TPromise<THolder<IEventBase>>& promise);
-}
diff --git a/library/cpp/actors/core/ask_ut.cpp b/library/cpp/actors/core/ask_ut.cpp
deleted file mode 100644
index e72ebdba9b..0000000000
--- a/library/cpp/actors/core/ask_ut.cpp
+++ /dev/null
@@ -1,131 +0,0 @@
-#include <library/cpp/testing/unittest/registar.h>
-
-#include "actorsystem.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-
-using namespace NActors;
-
-class TPingPong: public TActor<TPingPong> {
-public:
- TPingPong()
- : TActor(&TPingPong::Main)
- {
- }
-
- STATEFN(Main) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvents::TEvPing, OnPing);
- hFunc(TEvents::TEvBlob, OnBlob);
- }
- }
-
- void OnPing(const TEvents::TEvPing::TPtr& ev) {
- Send(ev->Sender, new TEvents::TEvPong);
- }
-
- void OnBlob(const TEvents::TEvBlob::TPtr& ev) {
- Send(ev->Sender, ev->Release().Release());
- }
-};
-
-class TPing: public TActor<TPing> {
-public:
- TPing()
- : TActor(&TPing::Main)
- {
- }
-
- STATEFN(Main) {
- Y_UNUSED(ev);
- }
-};
-
-THolder<TTestActorRuntimeBase> CreateRuntime() {
- auto runtime = MakeHolder<TTestActorRuntimeBase>();
- runtime->SetScheduledEventFilter([](auto&&, auto&&, auto&&, auto&&) { return false; });
- runtime->Initialize();
- return runtime;
-}
-
-Y_UNIT_TEST_SUITE(AskActor) {
- Y_UNIT_TEST(Ok) {
- auto runtime = CreateRuntime();
- auto pingpong = runtime->Register(new TPingPong);
-
- {
- auto fut = runtime->GetAnyNodeActorSystem()->Ask<TEvents::TEvPong>(
- pingpong,
- THolder(new TEvents::TEvPing));
- runtime->DispatchEvents();
- fut.ExtractValueSync();
- }
-
- {
- auto fut = runtime->GetAnyNodeActorSystem()->Ask<TEvents::TEvBlob>(
- pingpong,
- THolder(new TEvents::TEvBlob("hello!")));
- runtime->DispatchEvents();
- auto ev = fut.ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(ev->Blob, "hello!");
- }
-
- {
- auto fut = runtime->GetAnyNodeActorSystem()->Ask<IEventBase>(
- pingpong,
- THolder(new TEvents::TEvPing));
- runtime->DispatchEvents();
- auto ev = fut.ExtractValueSync();
- UNIT_ASSERT_VALUES_EQUAL(ev->Type(), TEvents::TEvPong::EventType);
- }
- }
-
- Y_UNIT_TEST(Err) {
- auto runtime = CreateRuntime();
- auto pingpong = runtime->Register(new TPingPong);
-
- {
- auto fut = runtime->GetAnyNodeActorSystem()->Ask<TEvents::TEvBlob>(
- pingpong,
- THolder(new TEvents::TEvPing));
- runtime->DispatchEvents();
- UNIT_ASSERT_EXCEPTION_CONTAINS(
- fut.ExtractValueSync(),
- yexception,
- "received unexpected response HelloWorld: Pong");
- }
- }
-
- Y_UNIT_TEST(Timeout) {
- auto runtime = CreateRuntime();
- auto ping = runtime->Register(new TPing);
-
- {
- auto fut = runtime->GetAnyNodeActorSystem()->Ask<TEvents::TEvPong>(
- ping,
- THolder(new TEvents::TEvPing),
- TDuration::Seconds(1));
- auto start = runtime->GetCurrentTime();
- runtime->DispatchEvents({}, TDuration::Seconds(5));
- UNIT_ASSERT_EXCEPTION_CONTAINS(
- fut.ExtractValueSync(),
- yexception,
- "ask timeout");
- UNIT_ASSERT_VALUES_EQUAL(runtime->GetCurrentTime() - start, TDuration::Seconds(1));
- }
-
- {
- auto fut = runtime->GetAnyNodeActorSystem()->Ask<IEventBase>(
- ping,
- THolder(new TEvents::TEvPing),
- TDuration::Seconds(1));
- auto start = runtime->GetCurrentTime();
- runtime->DispatchEvents({}, TDuration::Seconds(5));
- UNIT_ASSERT_EXCEPTION_CONTAINS(
- fut.ExtractValueSync(),
- yexception,
- "ask timeout");
- UNIT_ASSERT_VALUES_EQUAL(runtime->GetCurrentTime() - start, TDuration::Seconds(1));
- }
- }
-}
diff --git a/library/cpp/actors/core/av_bootstrapped.cpp b/library/cpp/actors/core/av_bootstrapped.cpp
deleted file mode 100644
index 771177242e..0000000000
--- a/library/cpp/actors/core/av_bootstrapped.cpp
+++ /dev/null
@@ -1,17 +0,0 @@
-#include "av_bootstrapped.h"
-
-namespace NActors {
-
-class TEventForStart: public TEventLocalForActor<TEventForStart, TActorAutoStart> {
-public:
-};
-
-TAutoPtr<NActors::IEventHandle> TActorAutoStart::AfterRegister(const TActorId& self, const TActorId& parentId) {
- return new IEventHandle(self, parentId, new TEventForStart, 0);
-}
-
-void TActorAutoStart::ProcessEvent(TEventContext<TEventForStart>& ev) {
- DoOnStart(ev.GetHandle().Sender);
-}
-
-}
diff --git a/library/cpp/actors/core/av_bootstrapped.h b/library/cpp/actors/core/av_bootstrapped.h
deleted file mode 100644
index 65bd224152..0000000000
--- a/library/cpp/actors/core/av_bootstrapped.h
+++ /dev/null
@@ -1,18 +0,0 @@
-#pragma once
-#include "actor_virtual.h"
-
-namespace NActors {
-
-class TEventForStart;
-
-class TActorAutoStart: public IActor {
-protected:
- virtual void DoOnStart(const TActorId& senderActorId) = 0;
- TAutoPtr<IEventHandle> AfterRegister(const TActorId& self, const TActorId& parentId) override;
-public:
- void ProcessEvent(TEventContext<TEventForStart>& ev);
-
- TActorAutoStart()
- {}
-};
-}
diff --git a/library/cpp/actors/core/balancer.cpp b/library/cpp/actors/core/balancer.cpp
deleted file mode 100644
index 5e8a280a8b..0000000000
--- a/library/cpp/actors/core/balancer.cpp
+++ /dev/null
@@ -1,311 +0,0 @@
-#include "balancer.h"
-
-#include "probes.h"
-
-#include <library/cpp/actors/util/cpu_load_log.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/intrinsics.h>
-
-#include <util/system/spinlock.h>
-
-#include <algorithm>
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- // Describes balancing-related state of pool, the most notable is `Importance` to add new cpu
- struct TLevel {
- // Balancer will try to give more cpu to overloaded pools
- enum ELoadClass {
- Underloaded = 0,
- Moderate = 1,
- Overloaded = 2,
- };
-
- double ScaleFactor;
- ELoadClass LoadClass;
- ui64 Importance; // pool with lower importance is allowed to pass cpu to pool with higher, but the opposite is forbidden
-
- TLevel() {}
-
- TLevel(const TBalancingConfig& cfg, TPoolId poolId, ui64 currentCpus, double cpuIdle, ui64 addLatencyUs, ui64 worstLatencyUs) {
- ScaleFactor = double(currentCpus) / cfg.Cpus;
- if ((worstLatencyUs + addLatencyUs) < 2000 && cpuIdle > 1.0) { // Uderload criterion, based on estimated latency w/o 1 cpu
- LoadClass = Underloaded;
- } else if (worstLatencyUs > 2000 || cpuIdle < 0.2) { // Overload criterion, based on latency
- LoadClass = Overloaded;
- } else {
- LoadClass = Moderate;
- }
- Importance = MakeImportance(LoadClass, cfg.Priority, ScaleFactor, cpuIdle, poolId);
- }
-
- private:
- // Importance is simple ui64 value (from highest to lowest):
- // 2 Bits: LoadClass
- // 8 Bits: Priority
- // 10 Bits: -ScaleFactor (for max-min fairness with weights equal to TBalancingConfig::Cpus)
- // 10 Bits: -CpuIdle
- // 6 Bits: PoolId
- static ui64 MakeImportance(ELoadClass load, ui8 priority, double scaleFactor, double cpuIdle, TPoolId poolId) {
- ui64 idle = std::clamp<i64>(1024 - cpuIdle * 512, 0, 1023);
- ui64 scale = std::clamp<i64>(1024 - scaleFactor * 32, 0, 1023);
-
- Y_ABORT_UNLESS(ui64(load) < (1ull << 2ull));
- Y_ABORT_UNLESS(ui64(priority) < (1ull << 8ull));
- Y_ABORT_UNLESS(ui64(scale) < (1ull << 10ull));
- Y_ABORT_UNLESS(ui64(idle) < (1ull << 10ull));
- Y_ABORT_UNLESS(ui64(poolId) < (1ull << 6ull));
-
- static_assert(ui64(MaxPools) <= (1ull << 6ull));
-
- ui64 importance =
- (ui64(load) << ui64(6 + 10 + 10 + 8)) |
- (ui64(priority) << ui64(6 + 10 + 10)) |
- (ui64(scale) << ui64(6 + 10)) |
- (ui64(idle) << ui64(6)) |
- ui64(poolId);
- return importance;
- }
- };
-
- // Main balancer implemenation
- class TBalancer: public IBalancer {
- private:
- struct TCpu;
- struct TPool;
-
- bool Disabled = true;
- TSpinLock Lock;
- ui64 NextBalanceTs;
- TVector<TCpu> Cpus; // Indexed by CpuId, can have gaps
- TVector<TPool> Pools; // Indexed by PoolId, can have gaps
- TBalancerConfig Config;
-
- public:
-
- ui64 GetPeriodUs() override;
- // Setup
- TBalancer(const TBalancerConfig& config, const TVector<TUnitedExecutorPoolConfig>& unitedPools, ui64 ts);
- bool AddCpu(const TCpuAllocation& cpuAlloc, TCpuState* cpu) override;
- ~TBalancer();
-
- // Balancing
- bool TryLock(ui64 ts) override;
- void SetPoolStats(TPoolId pool, const TBalancerStats& stats) override;
- void Balance() override;
- void Unlock() override;
-
- private:
- void MoveCpu(TPool& from, TPool& to);
- };
-
- struct TBalancer::TPool {
- TBalancingConfig Config;
- TPoolId PoolId;
- TString PoolName;
-
- // Input data for balancing
- TBalancerStats Prev;
- TBalancerStats Next;
-
- // Derived stats
- double CpuLoad;
- double CpuIdle;
-
- // Classification
- // NOTE: We want to avoid passing cpu back and forth, so we must consider not only current level,
- // NOTE: but expected levels after movements also
- TLevel CurLevel; // Level with current amount of cpu
- TLevel AddLevel; // Level after one cpu acception
- TLevel SubLevel; // Level after one cpu donation
-
- // Balancing state
- ui64 CurrentCpus = 0; // Total number of cpus assigned for this pool (zero means pools is not balanced)
- ui64 PrevCpus = 0; // Cpus in last period
-
- explicit TPool(const TBalancingConfig& cfg = {})
- : Config(cfg)
- {}
-
- void Configure(const TBalancingConfig& cfg, const TString& poolName) {
- Config = cfg;
- // Enforce constraints
- if (Config.Cpus > 0) {
- Config.MinCpus = std::clamp<ui32>(Config.MinCpus, 1, Config.Cpus);
- Config.MaxCpus = Max<ui32>(Config.MaxCpus, Config.Cpus);
- } else {
- Y_ABORT_UNLESS(Config.Cpus == 0,
- "Unexpected negative Config.Cpus# %" PRIi64,
- (i64)Config.Cpus);
- Config.MinCpus = 0;
- Config.MaxCpus = 0;
- }
- PoolName = poolName;
- }
- };
-
- struct TBalancer::TCpu {
- TCpuState* State = nullptr; // Cpu state, nullptr means cpu is not used (gap)
- TCpuAllocation Alloc;
- TPoolId Current;
- TPoolId Assigned;
- };
-
- TBalancer::TBalancer(const TBalancerConfig& config, const TVector<TUnitedExecutorPoolConfig>& unitedPools, ui64 ts)
- : NextBalanceTs(ts)
- , Config(config)
- {
- for (TPoolId pool = 0; pool < MaxPools; pool++) {
- Pools.emplace_back();
- Pools.back().PoolId = pool;
- }
- for (const TUnitedExecutorPoolConfig& united : unitedPools) {
- Pools[united.PoolId].Configure(united.Balancing, united.PoolName);
- }
- }
-
- TBalancer::~TBalancer() {
- }
-
- bool TBalancer::AddCpu(const TCpuAllocation& cpuAlloc, TCpuState* state) {
- // Setup
- TCpuId cpuId = cpuAlloc.CpuId;
- if (Cpus.size() <= cpuId) {
- Cpus.resize(cpuId + 1);
- }
- TCpu& cpu = Cpus[cpuId];
- cpu.State = state;
- cpu.Alloc = cpuAlloc;
-
- // Fill every pool with cpus up to TBalancingConfig::Cpus
- TPoolId pool = 0;
- for (TPool& p : Pools) {
- if (p.CurrentCpus < p.Config.Cpus) {
- p.CurrentCpus++;
- break;
- }
- pool++;
- }
- if (pool != MaxPools) { // cpu under balancer control
- state->SwitchPool(pool);
- state->AssignPool(pool);
- Disabled = false;
- return true;
- }
- return false; // non-balanced cpu
- }
-
- bool TBalancer::TryLock(ui64 ts) {
- if (!Disabled && NextBalanceTs < ts && Lock.TryAcquire()) {
- NextBalanceTs = ts + Us2Ts(Config.PeriodUs);
- return true;
- }
- return false;
- }
-
- void TBalancer::SetPoolStats(TPoolId pool, const TBalancerStats& stats) {
- Y_ABORT_UNLESS(pool < MaxPools);
- TPool& p = Pools[pool];
- p.Prev = p.Next;
- p.Next = stats;
- }
-
- void TBalancer::Balance() {
- // Update every cpu state
- for (TCpu& cpu : Cpus) {
- if (cpu.State) {
- cpu.State->Load(cpu.Assigned, cpu.Current);
- if (cpu.Current < MaxPools && cpu.Current != cpu.Assigned) {
- return; // previous movement has not been applied yet, wait
- }
- }
- }
-
- // Process stats, classify and compute pool importance
- TStackVec<TPool*, MaxPools> order;
- for (TPool& pool : Pools) {
- if (pool.Config.Cpus == 0) {
- continue; // skip gaps (non-existent or non-united pools)
- }
- if (pool.Prev.Ts == 0 || pool.Prev.Ts >= pool.Next.Ts) {
- return; // invalid stats
- }
-
- // Compute derived stats
- pool.CpuLoad = (pool.Next.CpuUs - pool.Prev.CpuUs) / Ts2Us(pool.Next.Ts - pool.Prev.Ts);
- if (pool.Prev.IdleUs == ui64(-1) || pool.Next.IdleUs == ui64(-1)) {
- pool.CpuIdle = pool.CurrentCpus - pool.CpuLoad; // for tests
- } else {
- pool.CpuIdle = (pool.Next.IdleUs - pool.Prev.IdleUs) / Ts2Us(pool.Next.Ts - pool.Prev.Ts);
- }
-
- // Compute levels
- pool.CurLevel = TLevel(pool.Config, pool.PoolId, pool.CurrentCpus, pool.CpuIdle,
- pool.Next.ExpectedLatencyIncreaseUs, pool.Next.WorstActivationTimeUs);
- pool.AddLevel = TLevel(pool.Config, pool.PoolId, pool.CurrentCpus + 1, pool.CpuIdle,
- 0, pool.Next.WorstActivationTimeUs); // we expect taken cpu to became utilized
- pool.SubLevel = TLevel(pool.Config, pool.PoolId, pool.CurrentCpus - 1, pool.CpuIdle - 1,
- pool.Next.ExpectedLatencyIncreaseUs, pool.Next.WorstActivationTimeUs);
-
- // Prepare for balancing
- pool.PrevCpus = pool.CurrentCpus;
- order.push_back(&pool);
- }
-
- // Sort pools by importance
- std::sort(order.begin(), order.end(), [] (TPool* l, TPool* r) {return l->CurLevel.Importance < r->CurLevel.Importance; });
- for (TPool* pool : order) {
- LWPROBE(PoolStats, pool->PoolId, pool->PoolName, pool->CurrentCpus, pool->CurLevel.LoadClass, pool->Config.Priority, pool->CurLevel.ScaleFactor, pool->CpuIdle, pool->CpuLoad, pool->CurLevel.Importance, pool->AddLevel.Importance, pool->SubLevel.Importance);
- }
-
- // Move cpus from lower importance to higher importance pools
- for (auto toIter = order.rbegin(); toIter != order.rend(); ++toIter) {
- TPool& to = **toIter;
- if (to.CurLevel.LoadClass == TLevel::Overloaded && // if pool is overloaded
- to.CurrentCpus < to.Config.MaxCpus) // and constraints would not be violated
- {
- for (auto fromIter = order.begin(); (*fromIter)->CurLevel.Importance < to.CurLevel.Importance; ++fromIter) {
- TPool& from = **fromIter;
- if (from.CurrentCpus == from.PrevCpus && // if not balanced yet
- from.CurrentCpus > from.Config.MinCpus && // and constraints would not be violated
- from.SubLevel.Importance <= to.AddLevel.Importance) // and which of two pools is more important would not change after cpu movement
- {
- MoveCpu(from, to);
- from.CurrentCpus--;
- to.CurrentCpus++;
- break;
- }
- }
- }
- }
- }
-
- void TBalancer::MoveCpu(TBalancer::TPool& from, TBalancer::TPool& to) {
- for (auto ci = Cpus.rbegin(), ce = Cpus.rend(); ci != ce; ci++) {
- TCpu& cpu = *ci;
- if (!cpu.State) {
- continue;
- }
- if (cpu.Assigned == from.PoolId) {
- cpu.State->AssignPool(to.PoolId);
- cpu.Assigned = to.PoolId;
- LWPROBE(MoveCpu, from.PoolId, to.PoolId, from.PoolName, to.PoolName, cpu.Alloc.CpuId);
- return;
- }
- }
- Y_ABORT();
- }
-
- void TBalancer::Unlock() {
- Lock.Release();
- }
-
- ui64 TBalancer::GetPeriodUs() {
- return Config.PeriodUs;
- }
-
- IBalancer* MakeBalancer(const TBalancerConfig& config, const TVector<TUnitedExecutorPoolConfig>& unitedPools, ui64 ts) {
- return new TBalancer(config, unitedPools, ts);
- }
-}
diff --git a/library/cpp/actors/core/balancer.h b/library/cpp/actors/core/balancer.h
deleted file mode 100644
index e1f6f33bf3..0000000000
--- a/library/cpp/actors/core/balancer.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "config.h"
-#include "cpu_state.h"
-
-namespace NActors {
- // Per-pool statistics used by balancer
- struct TBalancerStats {
- ui64 Ts = 0; // Measurement timestamp
- ui64 CpuUs = 0; // Total cpu microseconds consumed by pool on all cpus since start
- ui64 IdleUs = ui64(-1); // Total cpu microseconds in spinning or waiting on futex
- ui64 WorstActivationTimeUs = 0;
- ui64 ExpectedLatencyIncreaseUs = 0;
- };
-
- // Pool cpu balancer
- struct IBalancer {
- virtual ~IBalancer() {}
- virtual bool AddCpu(const TCpuAllocation& cpuAlloc, TCpuState* cpu) = 0;
- virtual bool TryLock(ui64 ts) = 0;
- virtual void SetPoolStats(TPoolId pool, const TBalancerStats& stats) = 0;
- virtual void Balance() = 0;
- virtual void Unlock() = 0;
- virtual ui64 GetPeriodUs() = 0;
- // TODO: add method for reconfiguration on fly
- };
-
- IBalancer* MakeBalancer(const TBalancerConfig& config, const TVector<TUnitedExecutorPoolConfig>& unitedPools, ui64 ts);
-}
diff --git a/library/cpp/actors/core/balancer_ut.cpp b/library/cpp/actors/core/balancer_ut.cpp
deleted file mode 100644
index 7e5e95f4b9..0000000000
--- a/library/cpp/actors/core/balancer_ut.cpp
+++ /dev/null
@@ -1,225 +0,0 @@
-#include "balancer.h"
-
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/lwtrace/all.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/stream/str.h>
-
-using namespace NActors;
-
-////////////////////////////////////////////////////////////////////////////////
-
-Y_UNIT_TEST_SUITE(PoolCpuBalancer) {
- struct TTest {
- TCpuManagerConfig Config;
- TCpuMask Available;
- THolder<IBalancer> Balancer;
- TVector<TCpuState> CpuStates;
- TVector<ui64> CpuUs;
- ui64 Now = 0;
-
- void SetCpuCount(size_t count) {
- Config.UnitedWorkers.CpuCount = count;
- for (TCpuId cpuId = 0; cpuId < count; cpuId++) {
- Available.Set(cpuId);
- }
- }
-
- void AddPool(ui32 minCpus, ui32 cpus, ui32 maxCpus, ui8 priority = 0) {
- TUnitedExecutorPoolConfig u;
- u.PoolId = TPoolId(Config.United.size());
- u.Balancing.Cpus = cpus;
- u.Balancing.MinCpus = minCpus;
- u.Balancing.MaxCpus = maxCpus;
- u.Balancing.Priority = priority;
- Config.United.push_back(u);
- }
-
- void Start() {
- TCpuAllocationConfig allocation(Available, Config);
- Balancer.Reset(MakeBalancer(Config.UnitedWorkers.Balancer, Config.United, 0));
- CpuStates.resize(allocation.Items.size()); // do not resize it later to avoid dangling pointers
- CpuUs.resize(CpuStates.size());
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- bool added = Balancer->AddCpu(cpuAlloc, &CpuStates[cpuAlloc.CpuId]);
- UNIT_ASSERT(added);
- }
- }
-
- void Balance(ui64 deltaTs, const TVector<ui64>& cpuUs) {
- Now += deltaTs;
- ui64 ts = Now;
- if (Balancer->TryLock(ts)) {
- for (TPoolId pool = 0; pool < cpuUs.size(); pool++) {
- CpuUs[pool] += cpuUs[pool];
- TBalancerStats stats;
- stats.Ts = ts;
- stats.CpuUs = CpuUs[pool];
- Balancer->SetPoolStats(pool, stats);
- }
- Balancer->Balance();
- Balancer->Unlock();
- }
- }
-
- void ApplyMovements() {
- for (TCpuState& state : CpuStates) {
- TPoolId current;
- TPoolId assigned;
- state.Load(assigned, current);
- state.SwitchPool(assigned);
- }
- }
-
- static TString ToStr(const TVector<ui64>& values) {
- TStringStream ss;
- ss << "{";
- for (auto v : values) {
- ss << " " << v;
- }
- ss << " }";
- return ss.Str();
- }
-
- void AssertPoolsCurrentCpus(const TVector<ui64>& cpuRequired) {
- TVector<ui64> cpuCurrent;
- cpuCurrent.resize(cpuRequired.size());
- for (TCpuState& state : CpuStates) {
- TPoolId current;
- TPoolId assigned;
- state.Load(assigned, current);
- cpuCurrent[current]++;
- }
- for (TPoolId pool = 0; pool < cpuRequired.size(); pool++) {
- UNIT_ASSERT_C(cpuCurrent[pool] == cpuRequired[pool],
- "cpu distribution mismatch, required " << ToStr(cpuRequired) << " but got " << ToStr(cpuCurrent));
- }
- }
- };
-
- Y_UNIT_TEST(StartLwtrace) {
- NLWTrace::StartLwtraceFromEnv();
- }
-
- Y_UNIT_TEST(AllOverloaded) {
- TTest t;
- int cpus = 10;
- t.SetCpuCount(cpus);
- t.AddPool(1, 1, 10); // pool=0
- t.AddPool(1, 2, 10); // pool=1
- t.AddPool(1, 3, 10); // pool=2
- t.AddPool(1, 4, 10); // pool=2
- t.Start();
- ui64 dts = 1.01 * Us2Ts(t.Config.UnitedWorkers.Balancer.PeriodUs);
- ui64 totalCpuUs = cpus * Ts2Us(dts); // pretend every pool has consumed as whole actorsystem, overload
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {totalCpuUs, totalCpuUs, totalCpuUs, totalCpuUs});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 2, 3, 4});
- }
-
- Y_UNIT_TEST(OneOverloaded) {
- TTest t;
- int cpus = 10;
- t.SetCpuCount(cpus);
- t.AddPool(1, 1, 10); // pool=0
- t.AddPool(1, 2, 10); // pool=1
- t.AddPool(1, 3, 10); // pool=2
- t.AddPool(1, 4, 10); // pool=2
- t.Start();
- ui64 dts = 1.01 * Us2Ts(t.Config.UnitedWorkers.Balancer.PeriodUs);
- ui64 totalCpuUs = cpus * Ts2Us(dts);
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {totalCpuUs, 0, 0, 0});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({7, 1, 1, 1});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {0, totalCpuUs, 0, 0});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 7, 1, 1});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {0, 0, totalCpuUs, 0});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 1, 7, 1});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {0, 0, 0, totalCpuUs});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 1, 1, 7});
- }
-
- Y_UNIT_TEST(TwoOverloadedFairness) {
- TTest t;
- int cpus = 10;
- t.SetCpuCount(cpus);
- t.AddPool(1, 1, 10); // pool=0
- t.AddPool(1, 2, 10); // pool=1
- t.AddPool(1, 3, 10); // pool=2
- t.AddPool(1, 4, 10); // pool=2
- t.Start();
- ui64 dts = 1.01 * Us2Ts(t.Config.UnitedWorkers.Balancer.PeriodUs);
- ui64 totalCpuUs = cpus * Ts2Us(dts);
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {totalCpuUs, totalCpuUs, 0, 0});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({3, 5, 1, 1});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {totalCpuUs, 0, totalCpuUs, 0});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({2, 1, 6, 1});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {totalCpuUs, 0, 0, totalCpuUs});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({2, 1, 1, 6});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {0, totalCpuUs, totalCpuUs, 0});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 3, 5, 1});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {0, totalCpuUs, 0, totalCpuUs});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 3, 1, 5});
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {0, 0, totalCpuUs, totalCpuUs});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({1, 1, 3, 5});
- }
-
- Y_UNIT_TEST(TwoOverloadedPriority) {
- TTest t;
- int cpus = 20;
- t.SetCpuCount(cpus);
- t.AddPool(1, 5, 20, 0); // pool=0
- t.AddPool(1, 5, 20, 1); // pool=1
- t.AddPool(1, 5, 20, 2); // pool=2
- t.AddPool(1, 5, 20, 3); // pool=3
- t.Start();
- ui64 dts = 1.01 * Us2Ts(t.Config.UnitedWorkers.Balancer.PeriodUs);
- ui64 mErlang = Ts2Us(dts) / 1000;
- for (int i = 0; i < cpus; i++) {
- t.Balance(dts, {20000 * mErlang, 2500 * mErlang, 4500 * mErlang, 9500 * mErlang});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({2, 3, 5, 10});
- t.Balance(dts, {20000 * mErlang, 2500 * mErlang, 4500 * mErlang, 8500 * mErlang});
- t.ApplyMovements();
- t.AssertPoolsCurrentCpus({3, 3, 5, 9});
- // NOTE: this operation require one move, but we do not make global analysis, so multiple steps (1->2 & 0->1) are required (can be optimized later)
- for (int i = 0; i < 3; i++) {
- t.Balance(dts, {20000 * mErlang, 2500 * mErlang, 5500 * mErlang, 8500 * mErlang});
- t.ApplyMovements();
- }
- t.AssertPoolsCurrentCpus({2, 3, 6, 9});
- }
-}
diff --git a/library/cpp/actors/core/benchmark_ut.cpp b/library/cpp/actors/core/benchmark_ut.cpp
deleted file mode 100644
index 380e983b92..0000000000
--- a/library/cpp/actors/core/benchmark_ut.cpp
+++ /dev/null
@@ -1,1111 +0,0 @@
-#include "actorsystem.h"
-#include "actor_bootstrapped.h"
-#include "config.h"
-#include "executor_pool_basic.h"
-#include "hfunc.h"
-#include "scheduler_basic.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/thread/pool.h>
-
-#include <algorithm>
-#include <atomic>
-#include <chrono>
-#include <condition_variable>
-#include <iostream>
-#include <memory>
-#include <mutex>
-#include <optional>
-#include <span>
-#include <string>
-#include <thread>
-#include <utility>
-#include <unordered_set>
-#include <unordered_map>
-#include <vector>
-
-#define BENCH_START(label) auto label##Start = std::chrono::steady_clock::now()
-#define BENCH_END(label) std::chrono::steady_clock::now() - label##Start
-
-using namespace NActors;
-using namespace std::chrono_literals;
-
-Y_UNIT_TEST_SUITE(ActorSystemBenchmark) {
-
- enum ESimpleEventType {
- EvQuickSort,
- EvSumVector,
- EvSumVectorResult,
- EvSumSendRequests,
- EvKvSearch,
- EvKvSendRequests,
- };
-
- using TContainer = std::vector<i32>;
- using TActorIds = std::span<TActorId>;
-
- template <typename TId>
- class TActiveEntityRegistry {
- private:
- std::unordered_set<TId> ActiveIds_;
- std::mutex ActiveIdsMutex_;
- std::condition_variable ActiveIdsCv_;
-
- public:
- void SetActive(TId id) {
- std::unique_lock lock(ActiveIdsMutex_);
- ActiveIds_.insert(std::move(id));
- }
-
- void SetInactive(const TId& id) {
- std::unique_lock lock(ActiveIdsMutex_);
- ActiveIds_.erase(id);
- if (ActiveIds_.empty()) {
- ActiveIdsCv_.notify_all();
- }
- }
-
- bool WaitForAllInactive(std::chrono::microseconds timeout = 1ms) {
- std::unique_lock lock(ActiveIdsMutex_);
- ActiveIdsCv_.wait_for(lock, timeout, [this] {
- return ActiveIds_.empty();
- });
- return ActiveIds_.empty();
- }
- };
-
- class TQuickSortEngine {
- public:
- struct TParameters {
- TContainer &Container;
- i32 Left;
- i32 Right;
- void* CustomData = nullptr;
-
- TParameters() = delete;
- TParameters(TContainer& container, i32 left, i32 right, void* customData)
- : Container(container)
- , Left(left)
- , Right(right)
- , CustomData(customData)
- {}
- };
-
- public:
- void Sort(TQuickSortEngine::TParameters& params) {
- auto [newRight, newLeft] = Partition(params.Container, params.Left, params.Right);
- if (!(params.Left < newRight || newLeft < params.Right)) {
- return;
- }
-
- auto [leftParams, rightParams] = SplitParameters(params, newRight, newLeft);
-
- bool ranAsync = false;
-
- if (newLeft < params.Right) {
- ranAsync = TryRunAsync(rightParams);
- if (ranAsync) {
- Sort(rightParams);
- }
- }
- if (params.Left < newRight) {
- if (!ranAsync && !TryRunAsync(leftParams)) {
- Sort(leftParams);
- }
- }
- }
-
- // returns bounds of left and right sub-arrays for the next iteration
- std::pair<i32, i32> Partition(TContainer& container, i32 left, i32 right) {
- ui32 pivotIndex = (left + right) / 2;
- auto pivot = container[pivotIndex];
- while (left <= right) {
- while (container[left] < pivot) {
- left++;
- }
- while (container[right] > pivot) {
- right--;
- }
-
- if (left <= right) {
- std::swap(container[left++], container[right--]);
- }
- }
- return {right, left};
- }
-
- protected:
- virtual std::pair<TParameters, TParameters> SplitParameters(const TParameters& params, i32 newRight, i32 newLeft) = 0;
- virtual bool TryRunAsync(const TParameters& params) = 0;
- };
-
- class TQuickSortTask : public TQuickSortEngine, public IObjectInQueue {
- public:
- using TParameters = TQuickSortEngine::TParameters;
- struct TThreadPoolParameters {
- const ui32 ThreadsLimit = 0;
- std::atomic<ui32> ThreadsUsed = 0;
- TThreadPool& ThreadPool;
-
- TThreadPoolParameters(ui32 threadsLimit, ui32 threadsUsed, TThreadPool &threadPool)
- : ThreadsLimit(threadsLimit)
- , ThreadsUsed(threadsUsed)
- , ThreadPool(threadPool)
- {}
- };
- TParameters Params;
- TActiveEntityRegistry<TQuickSortTask*>& ActiveThreadRegistry;
-
- public:
- TQuickSortTask() = delete;
- TQuickSortTask(TParameters params, TActiveEntityRegistry<TQuickSortTask*>& activeThreadRegistry)
- : Params(params)
- , ActiveThreadRegistry(activeThreadRegistry)
- {
- ActiveThreadRegistry.SetActive(this);
- }
-
- void Process(void*) override {
- Sort(Params);
- ActiveThreadRegistry.SetInactive(this);
- }
-
- protected:
- std::pair<TParameters, TParameters> SplitParameters(const TParameters& params, i32 newRight, i32 newLeft) override {
- return {
- {params.Container, params.Left, newRight, params.CustomData},
- {params.Container, newLeft, params.Right, params.CustomData}
- };
- }
-
- bool TryRunAsync(const TParameters& params) override {
- auto threadPoolParams = static_cast<TThreadPoolParameters*>(params.CustomData);
- if (threadPoolParams->ThreadsUsed++ >= threadPoolParams->ThreadsLimit) {
- threadPoolParams->ThreadsUsed--;
- return false;
- }
- return threadPoolParams->ThreadPool.AddAndOwn(THolder(new TQuickSortTask(params, ActiveThreadRegistry)));
- }
- };
-
- class TEvQuickSort : public TEventLocal<TEvQuickSort, EvQuickSort> {
- public:
- using TParameters = TQuickSortEngine::TParameters;
- struct TActorSystemParameters {
- TActorIds ActorIds;
- std::atomic<ui32> ActorIdsUsed = 0;
- TActorSystemParameters() = delete;
- TActorSystemParameters(const TActorIds& actorIds, ui32 actorIdsUsed = 0)
- : ActorIds(actorIds)
- , ActorIdsUsed(actorIdsUsed)
- {}
- };
-
- TQuickSortEngine::TParameters Params;
-
- public:
- TEvQuickSort() = delete;
- TEvQuickSort(TParameters params, TActiveEntityRegistry<TEvQuickSort*>& activeEventRegistry)
- : Params(params)
- , ActiveEventRegistry_(activeEventRegistry)
- {
- Y_ABORT_UNLESS(!Params.Container.empty());
- Y_ABORT_UNLESS(Params.Right - Params.Left + 1 <= static_cast<i32>(Params.Container.size()),
- "left: %d, right: %d, cont.size: %d", Params.Left, Params.Right, static_cast<i32>(Params.Container.size()));
- ActiveEventRegistry_.SetActive(this);
- }
-
- virtual ~TEvQuickSort() {
- ActiveEventRegistry_.SetInactive(this);
- }
-
- private:
- TActiveEntityRegistry<TEvQuickSort*>& ActiveEventRegistry_;
- };
-
- class TQuickSortActor : public TQuickSortEngine, public TActorBootstrapped<TQuickSortActor> {
- public:
- using TParameters = TQuickSortEngine::TParameters;
-
- private:
- TActiveEntityRegistry<TEvQuickSort*>& ActiveEventRegistry_;
-
- public:
- TQuickSortActor() = delete;
- TQuickSortActor(TActiveEntityRegistry<TEvQuickSort*>& activeEventRegistry)
- : TActorBootstrapped<TQuickSortActor>()
- , ActiveEventRegistry_(activeEventRegistry)
- {}
-
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvQuickSort, Handle);
- default:
- Y_ABORT_UNLESS(false);
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateInit);
- }
-
- protected:
- std::pair<TParameters, TParameters> SplitParameters(const TParameters& params, i32 newRight, i32 newLeft) override {
- return {
- {params.Container, params.Left, newRight, params.CustomData},
- {params.Container, newLeft, params.Right, params.CustomData}
- };
- }
-
- bool TryRunAsync(const TParameters& params) override {
- auto actorSystemParams = static_cast<TEvQuickSort::TActorSystemParameters*>(params.CustomData);
- const auto actorIdIndex = actorSystemParams->ActorIdsUsed++;
- if (actorIdIndex >= actorSystemParams->ActorIds.size()) {
- actorSystemParams->ActorIdsUsed--;
- return false;
- }
- auto targetActorId = actorSystemParams->ActorIds[actorIdIndex];
- Send(targetActorId, new TEvQuickSort(params, ActiveEventRegistry_));
- return true;
- }
-
- private:
- void Handle(TEvQuickSort::TPtr& ev) {
- auto evPtr = ev->Get();
- Sort(evPtr->Params);
- }
- };
-
- std::vector<i32> PrepareVectorToSort(ui32 n) {
- std::vector<i32> numbers(n);
- for (ui32 i = 0; i < numbers.size(); i++) {
- numbers[i] = numbers.size() - i;
- }
- return numbers;
- }
-
- std::unique_ptr<TActorSystem> PrepareActorSystem(ui32 poolThreads, TAffinity* affinity = nullptr) {
- auto setup = MakeHolder<TActorSystemSetup>();
- setup->NodeId = 1;
-
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[setup->ExecutorsCount]);
-
- ui32 poolId = 0;
- ui64 poolSpinThreashold = 20;
- setup->Executors[0].Reset(new TBasicExecutorPool(
- poolId, poolThreads, poolSpinThreashold, "", nullptr, affinity));
- TSchedulerConfig schedulerConfig;
- schedulerConfig.ResolutionMicroseconds = 512;
- schedulerConfig.SpinThreshold = 100;
- setup->Scheduler.Reset(new TBasicSchedulerThread(schedulerConfig));
-
- return std::make_unique<TActorSystem>(setup);
- }
-
- std::vector<TActorId> prepareQuickSortActors(
- TActorSystem* actorSystem, ui32 actorsNum, TActiveEntityRegistry<TEvQuickSort*>& activeEventRegistry
- ) {
- std::vector<TActorId> actorIds;
- actorIds.reserve(actorsNum);
-
- for (ui32 i = 0; i < actorsNum; i++) {
- auto actor = new TQuickSortActor(activeEventRegistry);
- auto actorId = actorSystem->Register(actor);
- actorIds.push_back(actorId);
- }
-
- return actorIds;
- }
-
- std::pair<std::chrono::microseconds, std::vector<i32>> BenchmarkQuickSortActor(
- ui32 threads,
- ui32 iterations,
- ui32 vectorSize
- ) {
- auto actorSystem = PrepareActorSystem(threads);
- actorSystem->Start();
-
- TActiveEntityRegistry<TEvQuickSort*> activeEventRegistry;
- auto actorIds = prepareQuickSortActors(actorSystem.get(), threads, activeEventRegistry);
-
- std::vector<i32> actorSortResult;
- auto actorQsDurationTotal = 0us;
- for (ui32 i = 0; i < iterations; i++) {
- auto numbers = PrepareVectorToSort(vectorSize);
-
- TEvQuickSort::TActorSystemParameters actorSystemParams(actorIds, 1);
- TEvQuickSort::TParameters params(numbers, 0, numbers.size() - 1, &actorSystemParams);
- auto ev3 = new TEvQuickSort(params, activeEventRegistry);
-
- BENCH_START(qs);
-
- actorSystem->Send(actorIds.front(), ev3);
- UNIT_ASSERT_C(activeEventRegistry.WaitForAllInactive(60s), "timeout");
-
- actorQsDurationTotal += std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(qs));
-
- if (i + 1 == iterations) {
- actorSortResult = numbers;
- }
- }
-
- return {actorQsDurationTotal / iterations, actorSortResult};
- }
-
- std::pair<std::chrono::microseconds, std::vector<i32>> BenchmarkQuickSortThreadPool(
- ui32 threads,
- ui32 iterations,
- ui32 vectorSize
- ) {
- TThreadPool threadPool;
- threadPool.Start(threads);
- TActiveEntityRegistry<TQuickSortTask*> activeThreadRegistry;
-
- auto threaPoolSortDurationTotal = 0us;
- std::vector<i32> threadPoolSortResult;
- for (ui32 i = 0; i < iterations; i++) {
- auto numbers = PrepareVectorToSort(vectorSize);
-
- TQuickSortTask::TThreadPoolParameters threadPoolParams(threads, 1, threadPool);
- TQuickSortTask::TParameters params(numbers, 0, numbers.size() - 1, &threadPoolParams);
-
- BENCH_START(thread);
-
- Y_ABORT_UNLESS(threadPool.AddAndOwn(THolder(new TQuickSortTask(params, activeThreadRegistry))));
- UNIT_ASSERT_C(activeThreadRegistry.WaitForAllInactive(60s), "timeout");
-
- threaPoolSortDurationTotal += std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(thread));
-
- if (i + 1 == iterations) {
- threadPoolSortResult = numbers;
- }
- }
-
- threadPool.Stop();
-
- return {threaPoolSortDurationTotal / iterations, threadPoolSortResult};
- }
-
- Y_UNIT_TEST(QuickSortActor) {
- const std::vector<ui32> threadss{1, 4};
- const std::vector<ui32> vectorSizes{100, 1'000, 1'000'000};
- const ui32 iterations = 3;
-
- std::cout << "sep=," << std::endl;
- std::cout << "size,threads,actor_time(us),thread_pool_time(us)" << std::endl;
-
- for (auto vectorSize : vectorSizes) {
- for (auto threads : threadss) {
- std::cerr << "vector size: " << vectorSize << ", threads: " << threads << std::endl;
-
- auto [actorSortDuration, actorSortResult] = BenchmarkQuickSortActor(threads, iterations, vectorSize);
- std::cerr << "actor sort duration: " << actorSortDuration.count() << "us" << std::endl;
-
- auto [threadPoolSortDuration, threadPoolSortResult] = BenchmarkQuickSortThreadPool(threads, iterations, vectorSize);
- std::cerr << "thread pool sort duration: " << threadPoolSortDuration.count() << "us" << std::endl;
-
- auto referenceVector = PrepareVectorToSort(vectorSize);
- std::sort(referenceVector.begin(), referenceVector.end());
-
- UNIT_ASSERT_EQUAL_C(actorSortResult, referenceVector,
- "vector size: " << vectorSize << "; threads: " << threads);
- UNIT_ASSERT_EQUAL_C(threadPoolSortResult, referenceVector,
- "vector size: " << vectorSize << "; threads: " << threads);
-
- std::cout << vectorSize << ","
- << threads << ","
- << actorSortDuration.count() << ","
- << threadPoolSortDuration.count() << std::endl;
- }
-
- std::cerr << "-----" << std::endl << std::endl;
- }
- }
-
- // KV-storage benchmark
-
- using TKvKey = std::string;
- using TKvValue = i32;
- using TDict = std::unordered_map<TKvKey, TKvValue>;
-
- struct TSearchStat {
- ui32 Found = 0;
- ui32 NotFound = 0;
-
- bool operator==(const TSearchStat& other) {
- return Found == other.Found && NotFound == other.NotFound;
- }
- };
-
- class TKvSearchTask : public IObjectInQueue {
- private:
- TKvKey Key_;
- const TDict& Dict_;
- TSearchStat SearchStat_ = {};
-
- public:
- TKvSearchTask() = delete;
- TKvSearchTask(TKvKey key, const TDict& dict)
- : Key_(key)
- , Dict_(dict)
- {}
-
- void Process(void*) override {
- if (Dict_.contains(Key_)) {
- SearchStat_.Found++;
- } else {
- SearchStat_.NotFound++;
- }
- }
- };
-
- class TEvKvSearch : public TEventLocal<TEvKvSearch, EvKvSearch> {
- public:
- TKvKey Key;
-
- public:
- TEvKvSearch() = delete;
- TEvKvSearch(TKvKey key)
- : Key(std::move(key))
- {}
- };
-
- class TEvKvSendRequests : public TEventLocal<TEvKvSendRequests, EvKvSendRequests> {
- public:
- const std::vector<std::string>& KeysToSearch;
- const std::vector<TActorId> SearchActorIds;
-
- public:
- TEvKvSendRequests() = delete;
- TEvKvSendRequests(const std::vector<std::string>& keysToSearch, std::vector<TActorId>&& searchActorIds)
- : KeysToSearch(keysToSearch)
- , SearchActorIds(std::move(searchActorIds))
- {}
- };
-
- class TKvSendRequestActor : public TActorBootstrapped<TKvSendRequestActor> {
- public:
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvKvSendRequests, Handle);
- default:
- Y_ABORT_UNLESS(false);
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateInit);
- }
-
- private:
- void Handle(TEvKvSendRequests::TPtr& ev) {
- auto evPtr = ev->Get();
- ui32 actorIdx = 0;
- for (auto& key : evPtr->KeysToSearch) {
- auto actorId = evPtr->SearchActorIds[actorIdx];
- actorIdx = (actorIdx + 1) % evPtr->SearchActorIds.size();
-
- Send(actorId, new TEvKvSearch(key));
- }
- }
- };
-
- class TKvSearchActor : public TActorBootstrapped<TKvSearchActor> {
- private:
- const TDict& Dict_;
- TSearchStat SearchStat_ = {};
- std::atomic<ui32> CompletedEvents_ = 0;
-
- public:
- TKvSearchActor() = delete;
- TKvSearchActor(const TDict& dict)
- : TActorBootstrapped<TKvSearchActor>()
- , Dict_(dict)
- {}
-
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvKvSearch, Handle);
- default:
- Y_ABORT_UNLESS(false);
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateInit);
- }
-
- const TSearchStat& SearchStat() {
- return SearchStat_;
- }
-
- ui32 CompletedEvents() {
- return CompletedEvents_;
- }
- private:
- void Handle(TEvKvSearch::TPtr& ev) {
- auto evPtr = ev->Get();
-
- if (Dict_.contains(evPtr->Key)) {
- SearchStat_.Found++;
- } else {
- SearchStat_.NotFound++;
- }
- CompletedEvents_++;
- }
- };
-
- TDict prepareKvSearchDict(const i32 dictSize) {
- std::string permutableString = "abcdefghijklm";
- TDict dict;
- for (i32 i = 0; i < dictSize; i++) {
- dict.emplace(permutableString, i);
- std::next_permutation(permutableString.begin(), permutableString.end());
- }
-
- return dict;
- }
-
- std::vector<std::string> prepareKeysToSearch(const TDict &dict, ui32 requestsNumber) {
- std::vector<std::string> keys;
- auto keyAppearances = requestsNumber / dict.size() + 1;
- keys.reserve(keyAppearances * dict.size());
-
- for (auto& [key, _] : dict) {
- for (ui32 i = 0; i < keyAppearances; i++) {
- keys.push_back(key);
-
- // keep the original key value to search
- if (i % 4 == 0) {
- continue;
- }
-
- // make non-exising key
- keys.back() += "nonexistingkey";
- }
- }
-
- Y_ABORT_UNLESS(keys.size() >= requestsNumber);
-
- std::random_shuffle(keys.begin(), keys.end());
- keys.resize(requestsNumber);
-
- return keys;
- }
-
- std::pair<std::vector<TKvSearchActor*>, std::vector<TActorId>> prepareKvSearchActors(
- TActorSystem* actorSystem, ui32 searchActorsNum, const std::vector<TDict>& dicts
- ) {
- std::vector<TKvSearchActor*> searchActors;
- std::vector<TActorId> searchActorIds;
- searchActors.reserve(searchActorsNum);
- searchActorIds.reserve(searchActorsNum);
- for (ui32 i = 0, dictIdx = 0; i < searchActorsNum; i++) {
- const auto& dict = dicts[dictIdx];
- dictIdx = (dictIdx + 1) % dicts.size();
-
- auto kvSearchActor = new TKvSearchActor(dict);
- auto kvSearchActorId = actorSystem->Register(kvSearchActor);
- searchActors.push_back(kvSearchActor);
- searchActorIds.push_back(kvSearchActorId);
- }
-
- return {searchActors, searchActorIds};
- }
-
- ui32 CalculateCompletedEvents(const std::vector<TKvSearchActor*>& actors) {
- ui32 completedEvents = 0;
- for (auto actor : actors) {
- completedEvents += actor->CompletedEvents();
- }
- return completedEvents;
- }
-
- TSearchStat CollectKvSearchActorStat(const std::vector<TKvSearchActor*>& actors) {
- TSearchStat stat;
- for (auto actor : actors) {
- stat.Found += actor->SearchStat().Found;
- stat.NotFound += actor->SearchStat().NotFound;
- }
- return stat;
- }
-
- std::pair<std::chrono::microseconds, TSearchStat> BenchmarkKvActor(
- ui32 threads, ui32 actors, ui32 iterations, const std::vector<TDict>& dicts, const std::vector<std::string>& keysToSearch
- ) {
- TSearchStat stat = {};
- auto kvSearchActorDuration = 0us;
-
- for (ui32 i = 0; i < iterations; i++) {
- auto actorSystem = PrepareActorSystem(threads);
- actorSystem->Start();
-
- auto [kvSearchActors, kvSearchActorIds] = prepareKvSearchActors(actorSystem.get(), actors, dicts);
-
- auto kvSendRequestActorId = actorSystem->Register(new TKvSendRequestActor());
-
- BENCH_START(kvSearch);
- actorSystem->Send(kvSendRequestActorId, new TEvKvSendRequests(keysToSearch, std::move(kvSearchActorIds)));
-
- // CondVar logic gives too much of overhead (2-10 times more than just sleep_for)
- while (CalculateCompletedEvents(kvSearchActors) < keysToSearch.size()) {
- std::this_thread::sleep_for(1us);
- }
-
- kvSearchActorDuration += std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(kvSearch));
-
- if (i + 1 == iterations) {
- stat = CollectKvSearchActorStat(kvSearchActors);
- }
- }
-
- return {kvSearchActorDuration / iterations, stat};
- }
-
- std::pair<std::chrono::microseconds, TSearchStat> BenchmarkKvActorExternalSender(
- ui32 threads, ui32 actors, ui32 iterations, const std::vector<TDict>& dicts, const std::vector<std::string>& keysToSearch
- ) {
- TSearchStat stat = {};
- auto kvSearchActorDuration = 0us;
- for (ui32 i = 0; i < iterations; i++) {
- auto actorSystem = PrepareActorSystem(threads);
- actorSystem->Start();
-
- auto [kvSearchActors, kvSearchActorIds] = prepareKvSearchActors(actorSystem.get(), actors, dicts);
-
- BENCH_START(kvSearch);
- ui32 actorIdToUseIndex = 0;
- for (auto& key : keysToSearch) {
- actorSystem->Send(kvSearchActorIds[actorIdToUseIndex], new TEvKvSearch(key));
- actorIdToUseIndex = (actorIdToUseIndex + 1) % kvSearchActorIds.size();
- }
-
- // CondVar logic gives too much of overhead (2-10 times more than just sleep_for)
- while (CalculateCompletedEvents(kvSearchActors) < keysToSearch.size()) {
- std::this_thread::sleep_for(1us);
- }
-
- kvSearchActorDuration += std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(kvSearch));
-
- if (i + 1 == iterations) {
- stat = CollectKvSearchActorStat(kvSearchActors);
- }
- }
-
- return {kvSearchActorDuration / iterations, stat};
- }
-
- std::chrono::microseconds BenchmarkKvThreadPool(
- ui32 threads, ui32 iterations, const TDict& dict, const std::vector<std::string>& keysToSearch
- ) {
- TThreadPool threadPool;
-
- auto kvSearchActorDuration = 0us;
- for (ui32 i = 0; i < iterations; i++) {
- threadPool.Start(threads);
-
- BENCH_START(kvSearch);
-
- for (auto& key : keysToSearch) {
- Y_ABORT_UNLESS(threadPool.AddAndOwn(THolder(new TKvSearchTask(key, dict))));
- }
-
- // CondVar logic gives too much of overhead (2-10 times more than just sleep_for)
- while (threadPool.Size() > 0) {
- std::this_thread::sleep_for(1us);
- }
- threadPool.Stop();
-
- kvSearchActorDuration += std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(kvSearch));
- }
-
- return {kvSearchActorDuration / iterations};
- }
-
- std::pair<std::chrono::microseconds, TSearchStat> BenchmarkKvSingleThread(
- ui32 iterations, const TDict& dict, const std::vector<std::string>& keysToSearch
- ) {
- TSearchStat stat = {};
- auto kvSearchDuration = 0us;
- for (ui32 i = 0; i < iterations; i++) {
- TSearchStat iterationStat = {};
- BENCH_START(kvSearch);
- for (auto& key : keysToSearch) {
- if (dict.contains(key)) {
- iterationStat.Found++;
- } else {
- iterationStat.NotFound++;
- }
- }
- kvSearchDuration += std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(kvSearch));
-
- if (i + 1 == iterations) {
- stat = iterationStat;
- }
- }
-
- return {kvSearchDuration / iterations, stat};
- }
-
- Y_UNIT_TEST(KvActor) {
- const bool forCI = true;
-
- using TNumbers = std::vector<ui32>;
- const TNumbers threadNumbers = forCI ? TNumbers{1} : TNumbers{1, 4, 8};
- const TNumbers actorNumbers = forCI ? TNumbers{1, 8} : TNumbers{1, 4, 8, 16, 32, 64};
- const TNumbers dictSizes = forCI ? TNumbers{1'000} : TNumbers{1'000, 1'000'000};
- const TNumbers dictsNumbers = forCI ? TNumbers{1} : TNumbers{1, 8};
- const ui32 iterations = 5;
-
- std::cout << "sep=," << std::endl;
- std::cout << "requests_number,dicts_number,dict_size,threads,actors,actor_time(us),actor_ext_time(us),thread_pool_time(us),single_thread_time(us)" << std::endl;
-
- for (auto dictsNumber : dictsNumbers) {
- for (auto dictSize : dictSizes) {
- const auto dict = prepareKvSearchDict(dictSize);
- const ui32 requestsNumber = forCI ? 10'000 : 1'000'000;
- const auto keysToSearch = prepareKeysToSearch(dict, requestsNumber);
-
- for (auto threads : threadNumbers) {
- std::cerr << "requestsNumber: " << requestsNumber
- << ", dictSize: " << dictSize
- << ", threads: " << threads << std::endl;
-
- auto tpKvDuration = BenchmarkKvThreadPool(threads, iterations, dict, keysToSearch);
- std::cerr << "kv search threadpool duration: " << tpKvDuration.count() << "us" << std::endl;
-
- auto [singleThreadKvDuration, singleThreadKvStat] = BenchmarkKvSingleThread(iterations, dict, keysToSearch);
- std::cerr << "kv search single thread duration: " << singleThreadKvDuration.count() << "us" << std::endl;
-
- std::vector<TDict> dicts(dictsNumber, dict);
-
- for (auto actors : actorNumbers) {
- std::cerr << "----" << std::endl
- << "requestsNumber: " << requestsNumber
- << ", dictsNumber: " << dictsNumber
- << ", dictSize: " << dictSize
- << ", threads: " << threads
- << ", actors: " << actors << std::endl;
-
- auto [actorKvDuration, actorKvStat] = BenchmarkKvActor(threads, actors, iterations, dicts, keysToSearch);
- std::cerr << "kv search actor duration: " << actorKvDuration.count() << "us" << std::endl;
-
- auto [actorKvExtDuration, actorKvExtStat] =
- BenchmarkKvActorExternalSender(threads, actors, iterations, dicts, keysToSearch);
- std::cerr << "kv search actor with external message sender duration: "
- << actorKvExtDuration.count() << "us" << std::endl;
- Y_UNUSED(actorKvExtStat);
-
-
- UNIT_ASSERT_EQUAL_C(actorKvStat, singleThreadKvStat,
- "single thread found/not found: " << singleThreadKvStat.Found << "/" << singleThreadKvStat.NotFound << "; "
- "actor stat found/not found: " << actorKvStat.Found << "/" << actorKvStat.NotFound);
-
- std::cout << requestsNumber << ","
- << dictsNumber << ","
- << dictSize << ","
- << threads << ","
- << actors << ","
- << actorKvDuration.count() << ","
- << actorKvExtDuration.count() << ","
- << tpKvDuration.count() << ","
- << singleThreadKvDuration.count() << std::endl;
- }
- std::cerr << "----" << std::endl;
- }
- }
- }
- }
-
- // vector sum benchmark
-
- i64 CalculateOddSum(const TContainer& numbers) {
- i64 result = 0;
- for (auto x : numbers) {
- if (x % 2 == 1) {
- result += x;
- }
- }
-
- return result;
- }
-
- TContainer prepareVectorToSum(const ui32 vectorSize) {
- TContainer numbers;
- numbers.reserve(vectorSize);
- for (ui32 i = 0; i < vectorSize; i++) {
- numbers.push_back(i + 1);
- }
-
- return numbers;
- }
-
- class TEvSumVector : public TEventLocal<TEvSumVector, EvSumVector> {
- public:
- const TContainer Numbers;
-
- public:
- TEvSumVector() = delete;
-
- TEvSumVector(TContainer&& numbers)
- : Numbers(std::move(numbers))
- {}
- };
-
- class TEvSumVectorResult : public TEventLocal<TEvSumVectorResult, EvSumVectorResult> {
- public:
- const i64 Sum = 0;
-
- public:
- TEvSumVectorResult(i64 sum)
- : Sum(sum)
- {}
- };
-
- class TEvSumSendRequests : public TEventLocal<TEvSumSendRequests, EvSumSendRequests> {
- public:
- const ui32 VectorSize;
- const ui32 RequestsNumber;
- const TActorIds ActorIds;
-
- public:
- TEvSumSendRequests() = delete;
- TEvSumSendRequests(ui32 vectorSize, ui32 requestsNumber, TActorIds actorIds)
- : VectorSize(vectorSize)
- , RequestsNumber(requestsNumber)
- , ActorIds(actorIds)
- {}
- };
-
- class TSumProxyActor : public TActorBootstrapped<TSumProxyActor> {
- private:
- i64 LastSum_ = 0;
- ui32 NumberOfResults_ = 0;
- ui32 ExpectedResults_ = 0;
- ui32 VectorSize_ = 0;
- TActorIds SumVectorActorIds_ = {};
- ui32 LastUsedActor_ = 0;
-
- std::mutex NumberOfResultsMutex_;
- std::condition_variable NumberOfResultsCv_;
-
- public:
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvSumSendRequests, HandleRequest);
- hFunc(TEvSumVectorResult, HandleResult);
- default:
- Y_ABORT_UNLESS(false);
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateInit);
- }
-
- i64 LastSum() {
- return LastSum_;
- }
-
- bool WaitForResults(std::chrono::microseconds timeout = 1ms, bool nonZero = true) {
- std::unique_lock lock(NumberOfResultsMutex_);
- NumberOfResultsCv_.wait_for(lock, timeout, [this, nonZero] {
- return ((nonZero && NumberOfResults_ != 0) || !nonZero)
- && NumberOfResults_ == ExpectedResults_;
- });
- return NumberOfResults_ == ExpectedResults_;
- }
-
- void ShiftLastUsedActor(ui32 shift) {
- LastUsedActor_ += shift;
- }
-
- private:
- TActorId NextActorId() {
- auto actorId = SumVectorActorIds_[LastUsedActor_ % SumVectorActorIds_.size()];
- LastUsedActor_ = (LastUsedActor_ + 1) % SumVectorActorIds_.size();
-
- return actorId;
- }
-
- bool SendVectorIfNeeded() {
- if (NumberOfResults_ < ExpectedResults_) {
- Send(NextActorId(), new TEvSumVector(prepareVectorToSum(VectorSize_)));
- return true;
- }
- return false;
- }
-
- void HandleRequest(TEvSumSendRequests::TPtr& ev) {
- auto evPtr = ev->Get();
- ExpectedResults_ = evPtr->RequestsNumber;
- VectorSize_ = evPtr->VectorSize;
- SumVectorActorIds_ = evPtr->ActorIds;
-
- {
- std::unique_lock lock(NumberOfResultsMutex_);
- NumberOfResults_ = 0;
-
- SendVectorIfNeeded();
- }
- }
-
- void HandleResult(TEvSumVectorResult::TPtr& ev) {
- LastSum_ = ev->Get()->Sum;
- {
- std::unique_lock lock(NumberOfResultsMutex_);
- NumberOfResults_++;
-
- if (!SendVectorIfNeeded()) {
- NumberOfResultsCv_.notify_all();
- }
- }
- }
- };
-
- class TSumVectorActor : public TActorBootstrapped<TSumVectorActor> {
- private:
- TActorId ResultActorId_;
-
- public:
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvSumVector, Handle);
- default:
- Y_ABORT_UNLESS(false);
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateInit);
- }
-
- private:
- void Handle(TEvSumVector::TPtr& ev) {
- auto evPtr = ev->Get();
- auto oddSum = CalculateOddSum(evPtr->Numbers);
-
- Send(ev->Sender, new TEvSumVectorResult(oddSum));
- }
- };
-
- std::vector<TActorId> prepareSumActors(TActorSystem* actorSystem, ui32 actorsNumber) {
- std::vector<TActorId> actorIds;
- actorIds.reserve(actorsNumber);
- for (ui32 i = 0; i < actorsNumber; i++) {
- actorIds.push_back(actorSystem->Register(new TSumVectorActor()));
- }
- return actorIds;
- }
-
- std::pair<std::vector<TSumProxyActor*>, std::vector<TActorId>> prepareProxyActors(
- TActorSystem* actorSystem, ui32 actorsNumber
- ) {
- std::pair<std::vector<TSumProxyActor*>, std::vector<TActorId>> result;
- auto& [actors, actorIds] = result;
- actors.reserve(actorsNumber);
- actorIds.reserve(actorsNumber);
- for (ui32 i = 0; i < actorsNumber; i++) {
- actors.push_back(new TSumProxyActor());
- actorIds.push_back(actorSystem->Register(actors.back()));
- actors.back()->ShiftLastUsedActor(i);
- }
-
- return result;
- }
-
- std::chrono::microseconds calcTimeoutForSumVector(
- ui32 vectorSize, ui32 iterations, ui32 proxyActorsNum, ui32 sumActorsNum, ui32 threadsNum
- ) {
- auto expectedMaxTimePerMillion = 100000us;
- auto vectorSizeRatio = vectorSize / 1000000 + 1;
-
- return expectedMaxTimePerMillion * vectorSizeRatio * iterations * proxyActorsNum / std::min(threadsNum, sumActorsNum);
- }
-
- bool WaitForSumActorResult(const std::vector<TSumProxyActor*>& actors, std::chrono::microseconds timeout = 1ms) {
- for (auto& actor : actors) {
- if (!actor->WaitForResults(timeout)) {
- return false;
- }
- }
- return true;
- }
-
- std::pair<std::chrono::microseconds, i64> BenchmarkSumVectorActor(
- ui32 threads,
- ui32 proxyActorsNumber,
- ui32 sumActorsNumber,
- ui32 iterations,
- ui32 vectorSize
- ) {
- auto actorSystem = PrepareActorSystem(threads);
- actorSystem->Start();
-
- auto sumActorIds = prepareSumActors(actorSystem.get(), sumActorsNumber);
- auto [proxyActors, proxyActorIds] = prepareProxyActors(actorSystem.get(), proxyActorsNumber);
-
- auto timeout = calcTimeoutForSumVector(vectorSize, iterations, proxyActorsNumber, sumActorsNumber, threads);
-
- BENCH_START(sumVectorActor);
-
- for (auto proxyActorId : proxyActorIds) {
- actorSystem->Send(proxyActorId, new TEvSumSendRequests(vectorSize, iterations, sumActorIds));
- }
-
- UNIT_ASSERT_C(WaitForSumActorResult(proxyActors, timeout), "timeout");
-
- auto totalDuration = std::chrono::duration_cast<std::chrono::microseconds>(BENCH_END(sumVectorActor));
- auto checkSum = proxyActors.back()->LastSum();
-
- return {totalDuration / iterations, checkSum};
- }
-
- Y_UNIT_TEST(SumVector) {
- using TVui64 = std::vector<ui64>;
- const bool forCI = true;
- const TVui64 vectorSizes = forCI ?
- TVui64{1'000, 1'000'000} : TVui64{1'000, 1'000'000, 10'000'000, 100'000'000};
- const TVui64 threadsNumbers = forCI ? TVui64{1} : TVui64{1, 4};
- const TVui64 proxyActorsNumbers = forCI ? TVui64{1} : TVui64{1, 4};
- const TVui64 sumActorsNumbers = forCI ? TVui64{1} : TVui64{1, 8, 32};
- const ui32 iterations = 30;
-
- std::cout << "sep=," << std::endl;
- std::cout << "size,threads,proxy_actors,sum_actors,duration(us)" << std::endl;
-
- for (auto vectorSize : vectorSizes) {
- for (auto threads : threadsNumbers) {
- for (auto proxyActors : proxyActorsNumbers) {
- for (auto sumActors : sumActorsNumbers) {
- std::cerr << "vector size: " << vectorSize
- << ", threads: " << threads
- << ", proxy actors: " << proxyActors
- << ", sum actors: " << sumActors << std::endl;
-
- auto [duration, resultSum] = BenchmarkSumVectorActor(
- threads, proxyActors, sumActors, iterations, vectorSize);
- std::cerr << "duration: " << duration.count() << "us" << std::endl;
-
- const i64 referenceSum = vectorSize * vectorSize / 4;
- UNIT_ASSERT_EQUAL_C(
- resultSum, referenceSum,
- resultSum << "!=" << referenceSum << "; failed on vectorSize=" << vectorSize
- << ", threads=" << threads
- << ", proxyActors=" << proxyActors
- << ", sumActors=" << sumActors);
-
- std::cout << vectorSize << ","
- << threads << ","
- << proxyActors << ","
- << sumActors << ","
- << duration.count()
- << std::endl;
- }
- }
- }
- }
- }
-}
diff --git a/library/cpp/actors/core/buffer.cpp b/library/cpp/actors/core/buffer.cpp
deleted file mode 100644
index 91ff4dde68..0000000000
--- a/library/cpp/actors/core/buffer.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-#include "buffer.h"
-
-#include <util/system/yassert.h>
-
-#include <algorithm>
-
-TBufferBase::TBufferBase(size_t size) noexcept
- : Size(size)
-{
-}
-
-size_t
-TBufferBase::GetSize() const noexcept {
- return Size;
-}
-
-void TBufferBase::SetSize(size_t size) noexcept {
- Size = size;
-}
-
-/////////////////////////////////////////////////////////////////////
-
-template <typename PointerType>
-TBufferBaseT<PointerType>::TBufferBaseT(PointerType data, size_t size) noexcept
- : TBufferBase(size)
- , Data(data)
-{
-}
-
-template <typename PointerType>
-PointerType
-TBufferBaseT<PointerType>::GetPointer() const noexcept {
- return Data;
-}
-
-template <typename PointerType>
-void TBufferBaseT<PointerType>::Assign(PointerType data, size_t size) noexcept {
- Data = data;
- Size = size;
-}
-
-template <>
-void TBufferBaseT<void*>::Cut(size_t offset) noexcept {
- Y_DEBUG_ABORT_UNLESS(offset <= Size);
- Data = static_cast<char*>(Data) + offset;
- TBufferBase::Size -= offset;
-}
-
-template <>
-void TBufferBaseT<const void*>::Cut(size_t offset) noexcept {
- Y_DEBUG_ABORT_UNLESS(offset <= Size);
- Data = static_cast<const char*>(Data) + offset;
- TBufferBase::Size -= offset;
-}
-
-template class TBufferBaseT<void*>;
-template class TBufferBaseT<const void*>;
-
-/////////////////////////////////////////////////////////////////////
-
-TConstBuffer::TConstBuffer(const void* data, size_t size) noexcept
- : TBufferBaseT<const void*>(data, size)
-{
-}
-
-TConstBuffer::TConstBuffer(const TMutableBuffer& buffer) noexcept
- : TBufferBaseT<const void*>(buffer.GetPointer(), buffer.GetSize())
-{
-}
-
-TConstBuffer
-TConstBuffer::Offset(ptrdiff_t offset, size_t size) const noexcept {
- return TConstBuffer(static_cast<const char*>(Data) + offset, std::min(Size - offset, size));
-}
-
-////////////////////////////////////////////////////////////////////////////////
-
-TMutableBuffer::TMutableBuffer(void* data, size_t size) noexcept
- : TBufferBaseT<void*>(data, size)
-{
-}
-
-TMutableBuffer
-TMutableBuffer::Offset(ptrdiff_t offset, size_t size) const noexcept {
- return TMutableBuffer(static_cast<char*>(Data) + offset, std::min(Size - offset, size));
-}
-
-size_t
-TMutableBuffer::CopyFrom(const TConstBuffer& buffer) const noexcept {
- const auto size = std::min(Size, buffer.Size);
- std::memcpy(Data, buffer.Data, size);
- return size;
-}
diff --git a/library/cpp/actors/core/buffer.h b/library/cpp/actors/core/buffer.h
deleted file mode 100644
index 95425046d6..0000000000
--- a/library/cpp/actors/core/buffer.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#pragma once
-
-#include <limits>
-
-class TConstBuffer;
-class TMutableBuffer;
-
-class TBufferBase {
-public:
- size_t GetSize() const noexcept;
-
- void SetSize(size_t newSize) noexcept;
-
-protected:
- TBufferBase(size_t size = 0) noexcept;
-
- size_t Size;
-};
-
-template <typename PointerType>
-class TBufferBaseT: public TBufferBase {
-public:
- PointerType GetPointer() const noexcept;
-
- void Cut(size_t offset) noexcept;
-
- void Assign(PointerType data = nullptr, size_t size = 0U) noexcept;
-
-protected:
- TBufferBaseT(PointerType data, size_t size) noexcept;
-
- PointerType Data;
-};
-
-/// Represents constant memory buffer, but do not owns it.
-class TConstBuffer: public TBufferBaseT<const void*> {
- friend class TMutableBuffer;
-
-public:
- TConstBuffer(const TMutableBuffer& buffer) noexcept;
-
- TConstBuffer(const void* data = nullptr, size_t size = 0U) noexcept;
-
- TConstBuffer Offset(ptrdiff_t offset, size_t size = std::numeric_limits<size_t>::max()) const noexcept;
-};
-
-/// Represents mutable memory buffer, but do not owns it.
-class TMutableBuffer: public TBufferBaseT<void*> {
- friend class TConstBuffer;
-
-public:
- TMutableBuffer(void* data = nullptr, size_t size = 0U) noexcept;
-
- TMutableBuffer(const TMutableBuffer& value) noexcept
- : TBufferBaseT<void*>(value)
- {
- }
-
- TMutableBuffer Offset(ptrdiff_t offset, size_t size = std::numeric_limits<size_t>::max()) const noexcept;
-
- size_t CopyFrom(const TConstBuffer& buffer) const noexcept;
-};
diff --git a/library/cpp/actors/core/callstack.cpp b/library/cpp/actors/core/callstack.cpp
deleted file mode 100644
index 559cc73550..0000000000
--- a/library/cpp/actors/core/callstack.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-#include "callstack.h"
-#include <util/thread/singleton.h>
-
-#ifdef USE_ACTOR_CALLSTACK
-
-namespace NActors {
- namespace {
- void (*PreviousFormatBackTrace)(IOutputStream*) = 0;
- ui32 ActorBackTraceEnableCounter = 0;
- }
-
- void ActorFormatBackTrace(IOutputStream* out) {
- TStringStream str;
- PreviousFormatBackTrace(&str);
- str << Endl;
- TCallstack::DumpCallstack(str);
- *out << str.Str();
- }
-
- void EnableActorCallstack() {
- if (ActorBackTraceEnableCounter == 0) {
- Y_ABORT_UNLESS(PreviousFormatBackTrace == 0);
- PreviousFormatBackTrace = SetFormatBackTraceFn(ActorFormatBackTrace);
- }
-
- ++ActorBackTraceEnableCounter;
- }
-
- void DisableActorCallstack() {
- --ActorBackTraceEnableCounter;
-
- if (ActorBackTraceEnableCounter == 0) {
- Y_ABORT_UNLESS(PreviousFormatBackTrace);
- SetFormatBackTraceFn(PreviousFormatBackTrace);
- PreviousFormatBackTrace = 0;
- }
- }
-
- TCallstack::TCallstack()
- : BeginIdx(0)
- , Size(0)
- , LinesToSkip(0)
- {
- }
-
- void TCallstack::SetLinesToSkip() {
- TTrace record;
- LinesToSkip = BackTrace(record.Data, TTrace::CAPACITY);
- }
-
- void TCallstack::Trace() {
- size_t currentIdx = (BeginIdx + Size) % RECORDS;
- if (Size == RECORDS) {
- ++BeginIdx;
- } else {
- ++Size;
- }
- TTrace& record = Record[currentIdx];
- record.Size = BackTrace(record.Data, TTrace::CAPACITY);
- record.LinesToSkip = LinesToSkip;
- }
-
- void TCallstack::TraceIfEmpty() {
- if (Size == 0) {
- LinesToSkip = 0;
- Trace();
- }
- }
-
- TCallstack& TCallstack::GetTlsCallstack() {
- return *FastTlsSingleton<TCallstack>();
- }
-
- void TCallstack::DumpCallstack(TStringStream& str) {
- TCallstack& callstack = GetTlsCallstack();
- for (int i = callstack.Size - 1; i >= 0; --i) {
- TTrace& record = callstack.Record[(callstack.BeginIdx + i) % RECORDS];
- str << Endl << "Trace entry " << i << Endl << Endl;
- size_t size = record.Size;
- if (size > record.LinesToSkip && size < TTrace::CAPACITY) {
- size -= record.LinesToSkip;
- }
- if (size > RECORDS_TO_SKIP) {
- FormatBackTrace(&str, &record.Data[RECORDS_TO_SKIP], size - RECORDS_TO_SKIP);
- } else {
- FormatBackTrace(&str, record.Data, size);
- }
- str << Endl;
- }
- }
-}
-
-#endif
diff --git a/library/cpp/actors/core/callstack.h b/library/cpp/actors/core/callstack.h
deleted file mode 100644
index 034d0becf1..0000000000
--- a/library/cpp/actors/core/callstack.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#pragma once
-
-#ifndef NDEBUG
-//#define ENABLE_ACTOR_CALLSTACK
-#endif
-
-#ifdef ENABLE_ACTOR_CALLSTACK
-#include "defs.h"
-#include <util/system/backtrace.h>
-#include <util/stream/str.h>
-#include <util/generic/deque.h>
-#define USE_ACTOR_CALLSTACK
-
-namespace NActors {
- struct TCallstack {
- struct TTrace {
- static const size_t CAPACITY = 50;
- void* Data[CAPACITY];
- size_t Size;
- size_t LinesToSkip;
-
- TTrace()
- : Size(0)
- , LinesToSkip(0)
- {
- }
- };
-
- static const size_t RECORDS = 8;
- static const size_t RECORDS_TO_SKIP = 2;
- TTrace Record[RECORDS];
- size_t BeginIdx;
- size_t Size;
- size_t LinesToSkip;
-
- TCallstack();
- void SetLinesToSkip();
- void Trace();
- void TraceIfEmpty();
- static TCallstack& GetTlsCallstack();
- static void DumpCallstack(TStringStream& str);
- };
-
- void EnableActorCallstack();
- void DisableActorCallstack();
-
-}
-
-#else
-
-namespace NActors {
- inline void EnableActorCallstack(){}
-
- inline void DisableActorCallstack(){}
-
-}
-
-#endif
diff --git a/library/cpp/actors/core/config.h b/library/cpp/actors/core/config.h
deleted file mode 100644
index 04bdd6aebe..0000000000
--- a/library/cpp/actors/core/config.h
+++ /dev/null
@@ -1,260 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include <library/cpp/actors/util/cpumask.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <util/datetime/base.h>
-#include <util/generic/ptr.h>
-#include <util/generic/string.h>
-#include <util/generic/vector.h>
-
-namespace NActors {
-
- struct TBalancingConfig {
- // Default cpu count (used during overload). Zero value disables this pool balancing
- // 1) Sum of `Cpus` on all pools cannot be changed without restart
- // (changing cpu mode between Shared and Assigned is not implemented yet)
- // 2) This sum must be equal to TUnitedWorkersConfig::CpuCount,
- // otherwise `CpuCount - SUM(Cpus)` cpus will be in Shared mode (i.e. actorsystem 2.0)
- ui32 Cpus = 0;
-
- ui32 MinCpus = 0; // Lower balancing bound, should be at least 1, and not greater than `Cpus`
- ui32 MaxCpus = 0; // Higher balancing bound, should be not lower than `Cpus`
- ui8 Priority = 0; // Priority of pool to obtain cpu due to balancing (higher is better)
- ui64 ToleratedLatencyUs = 0; // p100-latency threshold indicating that more cpus are required by pool
- };
-
- struct TBalancerConfig {
- ui64 PeriodUs = 15000000; // Time between balancer steps
- };
-
- enum class EASProfile {
- Default,
- LowCpuConsumption,
- LowLatency,
- };
-
- struct TBasicExecutorPoolConfig {
- static constexpr TDuration DEFAULT_TIME_PER_MAILBOX = TDuration::MilliSeconds(10);
- static constexpr ui32 DEFAULT_EVENTS_PER_MAILBOX = 100;
-
- ui32 PoolId = 0;
- TString PoolName;
- ui32 Threads = 1;
- ui64 SpinThreshold = 100;
- TCpuMask Affinity; // Executor thread affinity
- TDuration TimePerMailbox = DEFAULT_TIME_PER_MAILBOX;
- ui32 EventsPerMailbox = DEFAULT_EVENTS_PER_MAILBOX;
- int RealtimePriority = 0;
- i16 MinThreadCount = 0;
- i16 MaxThreadCount = 0;
- i16 DefaultThreadCount = 0;
- i16 Priority = 0;
- i16 SharedExecutorsCount = 0;
- i16 SoftProcessingDurationTs = 0;
- EASProfile ActorSystemProfile = EASProfile::Default;
- };
-
- struct TIOExecutorPoolConfig {
- ui32 PoolId = 0;
- TString PoolName;
- ui32 Threads = 1;
- TCpuMask Affinity; // Executor thread affinity
- };
-
- struct TUnitedExecutorPoolConfig {
- static constexpr TDuration DEFAULT_TIME_PER_MAILBOX = TDuration::MilliSeconds(10);
- static constexpr ui32 DEFAULT_EVENTS_PER_MAILBOX = 100;
-
- ui32 PoolId = 0;
- TString PoolName;
-
- // Resource sharing
- ui32 Concurrency = 0; // Limits simultaneously running mailboxes count if set to non-zero value (do not set if Balancing.Cpus != 0)
- TPoolWeight Weight = 0; // Weight in fair cpu-local pool scheduler
- TCpuMask Allowed; // Allowed CPUs for workers to run this pool on (ignored if balancer works, i.e. actorsystem 1.5)
-
- // Single mailbox execution limits
- TDuration TimePerMailbox = DEFAULT_TIME_PER_MAILBOX;
- ui32 EventsPerMailbox = DEFAULT_EVENTS_PER_MAILBOX;
-
- // Long-term balancing
- TBalancingConfig Balancing;
- };
-
- struct TUnitedWorkersConfig {
- ui32 CpuCount = 0; // Total CPUs running united workers (i.e. TBasicExecutorPoolConfig::Threads analog); set to zero to disable united workers
- ui64 SpinThresholdUs = 100; // Limit for active spinning in case all pools became idle
- ui64 PoolLimitUs = 500; // Soft limit on pool execution
- ui64 EventLimitUs = 100; // Hard limit on last event execution exceeding pool limit
- ui64 LimitPrecisionUs = 100; // Maximum delay of timer on limit excess (delay needed to avoid settimer syscall on every pool switch)
- ui64 FastWorkerPriority = 10; // Real-time priority of workers not exceeding hard limits
- ui64 IdleWorkerPriority = 20; // Real-time priority of standby workers waiting for hard preemption on timers (should be greater than FastWorkerPriority)
- TCpuMask Allowed; // Allowed CPUs for workers to run on (every worker has affinity for exactly one cpu)
- bool NoRealtime = false; // For environments w/o permissions for RT-threads
- bool NoAffinity = false; // For environments w/o permissions for cpu affinity
- TBalancerConfig Balancer;
- };
-
- struct TSelfPingInfo {
- NMonitoring::TDynamicCounters::TCounterPtr AvgPingCounter;
- NMonitoring::TDynamicCounters::TCounterPtr AvgPingCounterWithSmallWindow;
- ui32 MaxAvgPingUs;
- };
-
- struct TCpuManagerConfig {
- TUnitedWorkersConfig UnitedWorkers;
- TVector<TBasicExecutorPoolConfig> Basic;
- TVector<TIOExecutorPoolConfig> IO;
- TVector<TUnitedExecutorPoolConfig> United;
- TVector<TSelfPingInfo> PingInfoByPool;
-
- ui32 GetExecutorsCount() const {
- return Basic.size() + IO.size() + United.size();
- }
-
- TString GetPoolName(ui32 poolId) const {
- for (const auto& p : Basic) {
- if (p.PoolId == poolId) {
- return p.PoolName;
- }
- }
- for (const auto& p : IO) {
- if (p.PoolId == poolId) {
- return p.PoolName;
- }
- }
- for (const auto& p : United) {
- if (p.PoolId == poolId) {
- return p.PoolName;
- }
- }
- Y_ABORT("undefined pool id: %" PRIu32, (ui32)poolId);
- }
-
- std::optional<ui32> GetThreadsOptional(ui32 poolId) const {
- for (const auto& p : Basic) {
- if (p.PoolId == poolId) {
- return p.DefaultThreadCount;
- }
- }
- for (const auto& p : IO) {
- if (p.PoolId == poolId) {
- return p.Threads;
- }
- }
- for (const auto& p : United) {
- if (p.PoolId == poolId) {
- return p.Concurrency ? p.Concurrency : UnitedWorkers.CpuCount;
- }
- }
- return {};
- }
-
- ui32 GetThreads(ui32 poolId) const {
- auto result = GetThreadsOptional(poolId);
- Y_ABORT_UNLESS(result, "undefined pool id: %" PRIu32, (ui32)poolId);
- return *result;
- }
- };
-
- struct TSchedulerConfig {
- TSchedulerConfig(
- ui64 resolution = 1024,
- ui64 spinThreshold = 100,
- ui64 progress = 10000,
- bool useSchedulerActor = false)
- : ResolutionMicroseconds(resolution)
- , SpinThreshold(spinThreshold)
- , ProgressThreshold(progress)
- , UseSchedulerActor(useSchedulerActor)
- {}
-
- ui64 ResolutionMicroseconds = 1024;
- ui64 SpinThreshold = 100;
- ui64 ProgressThreshold = 10000;
- bool UseSchedulerActor = false; // False is default because tests use scheduler thread
- ui64 RelaxedSendPaceEventsPerSecond = 200000;
- ui64 RelaxedSendPaceEventsPerCycle = RelaxedSendPaceEventsPerSecond * ResolutionMicroseconds / 1000000;
- // For resolution >= 250000 microseconds threshold is SendPace
- // For resolution <= 250 microseconds threshold is 20 * SendPace
- ui64 RelaxedSendThresholdEventsPerSecond = RelaxedSendPaceEventsPerSecond *
- (20 - ((20 - 1) * ClampVal(ResolutionMicroseconds, ui64(250), ui64(250000)) - 250) / (250000 - 250));
- ui64 RelaxedSendThresholdEventsPerCycle = RelaxedSendThresholdEventsPerSecond * ResolutionMicroseconds / 1000000;
-
- // Optional subsection for scheduler counters (usually subsystem=utils)
- NMonitoring::TDynamicCounterPtr MonCounters = nullptr;
- };
-
- struct TCpuAllocation {
- struct TPoolAllocation {
- TPoolId PoolId;
- TPoolWeight Weight;
-
- TPoolAllocation(TPoolId poolId = 0, TPoolWeight weight = 0)
- : PoolId(poolId)
- , Weight(weight)
- {}
- };
-
- TCpuId CpuId;
- TVector<TPoolAllocation> AllowedPools;
-
- TPoolsMask GetPoolsMask() const {
- TPoolsMask mask = 0;
- for (const auto& pa : AllowedPools) {
- if (pa.PoolId < MaxPools) {
- mask &= (1ull << pa.PoolId);
- }
- }
- return mask;
- }
-
- bool HasPool(TPoolId pool) const {
- for (const auto& pa : AllowedPools) {
- if (pa.PoolId == pool) {
- return true;
- }
- }
- return false;
- }
- };
-
- struct TCpuAllocationConfig {
- TVector<TCpuAllocation> Items;
-
- TCpuAllocationConfig(const TCpuMask& available, const TCpuManagerConfig& cfg) {
- for (const TUnitedExecutorPoolConfig& pool : cfg.United) {
- Y_ABORT_UNLESS(pool.PoolId < MaxPools, "wrong PoolId of united executor pool: %s(%d)",
- pool.PoolName.c_str(), (pool.PoolId));
- }
- ui32 allocated[MaxPools] = {0};
- for (TCpuId cpu = 0; cpu < available.Size() && Items.size() < cfg.UnitedWorkers.CpuCount; cpu++) {
- if (available.IsSet(cpu)) {
- TCpuAllocation item;
- item.CpuId = cpu;
- for (const TUnitedExecutorPoolConfig& pool : cfg.United) {
- if (cfg.UnitedWorkers.Allowed.IsEmpty() || cfg.UnitedWorkers.Allowed.IsSet(cpu)) {
- if (pool.Allowed.IsEmpty() || pool.Allowed.IsSet(cpu)) {
- item.AllowedPools.emplace_back(pool.PoolId, pool.Weight);
- allocated[pool.PoolId]++;
- }
- }
- }
- if (!item.AllowedPools.empty()) {
- Items.push_back(item);
- }
- }
- }
- for (const TUnitedExecutorPoolConfig& pool : cfg.United) {
- Y_ABORT_UNLESS(allocated[pool.PoolId] > 0, "unable to allocate cpu for united executor pool: %s(%d)",
- pool.PoolName.c_str(), (pool.PoolId));
- }
- }
-
- operator bool() const {
- return !Items.empty();
- }
- };
-
-}
diff --git a/library/cpp/actors/core/cpu_manager.cpp b/library/cpp/actors/core/cpu_manager.cpp
deleted file mode 100644
index 24b3161e3c..0000000000
--- a/library/cpp/actors/core/cpu_manager.cpp
+++ /dev/null
@@ -1,148 +0,0 @@
-#include "cpu_manager.h"
-#include "probes.h"
-
-#include "executor_pool_basic.h"
-#include "executor_pool_io.h"
-#include "executor_pool_united.h"
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- TCpuManager::TCpuManager(THolder<TActorSystemSetup>& setup)
- : ExecutorPoolCount(setup->GetExecutorsCount())
- , Balancer(setup->Balancer)
- , Config(setup->CpuManager)
- {
- if (setup->Executors) { // Explicit mode w/o united pools
- Executors.Reset(setup->Executors.Release());
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- IExecutorPool* pool = Executors[excIdx].Get();
- Y_ABORT_UNLESS(dynamic_cast<TUnitedExecutorPool*>(pool) == nullptr,
- "united executor pool is prohibited in explicit mode of NActors::TCpuManager");
- }
- } else {
- Setup();
- }
- }
-
- void TCpuManager::Setup() {
- TAffinity available;
- available.Current();
- TCpuAllocationConfig allocation(available, Config);
-
- if (allocation) {
- if (!Balancer) {
- Balancer.Reset(MakeBalancer(Config.UnitedWorkers.Balancer, Config.United, GetCycleCountFast()));
- }
- UnitedWorkers.Reset(new TUnitedWorkers(Config.UnitedWorkers, Config.United, allocation, Balancer.Get()));
- }
-
- ui64 ts = GetCycleCountFast();
- Harmonizer.Reset(MakeHarmonizer(ts));
-
- Executors.Reset(new TAutoPtr<IExecutorPool>[ExecutorPoolCount]);
-
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- Executors[excIdx].Reset(CreateExecutorPool(excIdx));
- if (excIdx < Config.PingInfoByPool.size()) {
- Harmonizer->AddPool(Executors[excIdx].Get(), &Config.PingInfoByPool[excIdx]);
- } else {
- Harmonizer->AddPool(Executors[excIdx].Get());
- }
- }
- }
-
- void TCpuManager::PrepareStart(TVector<NSchedulerQueue::TReader*>& scheduleReaders, TActorSystem* actorSystem) {
- if (UnitedWorkers) {
- UnitedWorkers->Prepare(actorSystem, scheduleReaders);
- }
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- NSchedulerQueue::TReader* readers;
- ui32 readersCount = 0;
- Executors[excIdx]->Prepare(actorSystem, &readers, &readersCount);
- for (ui32 i = 0; i != readersCount; ++i, ++readers) {
- scheduleReaders.push_back(readers);
- }
- }
- }
-
- void TCpuManager::Start() {
- if (UnitedWorkers) {
- UnitedWorkers->Start();
- }
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- Executors[excIdx]->Start();
- }
- }
-
- void TCpuManager::PrepareStop() {
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- Executors[excIdx]->PrepareStop();
- }
- if (UnitedWorkers) {
- UnitedWorkers->PrepareStop();
- }
- }
-
- void TCpuManager::Shutdown() {
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- Executors[excIdx]->Shutdown();
- }
- if (UnitedWorkers) {
- UnitedWorkers->Shutdown();
- }
- for (ui32 round = 0, done = 0; done < ExecutorPoolCount && round < 3; ++round) {
- done = 0;
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- if (Executors[excIdx]->Cleanup()) {
- ++done;
- }
- }
- }
- }
-
- void TCpuManager::Cleanup() {
- for (ui32 round = 0, done = 0; done < ExecutorPoolCount; ++round) {
- Y_ABORT_UNLESS(round < 10, "actorsystem cleanup could not be completed in 10 rounds");
- done = 0;
- for (ui32 excIdx = 0; excIdx != ExecutorPoolCount; ++excIdx) {
- if (Executors[excIdx]->Cleanup()) {
- ++done;
- }
- }
- }
- Executors.Destroy();
- UnitedWorkers.Destroy();
- }
-
- IExecutorPool* TCpuManager::CreateExecutorPool(ui32 poolId) {
- for (TBasicExecutorPoolConfig& cfg : Config.Basic) {
- if (cfg.PoolId == poolId) {
- return new TBasicExecutorPool(cfg, Harmonizer.Get());
- }
- }
- for (TIOExecutorPoolConfig& cfg : Config.IO) {
- if (cfg.PoolId == poolId) {
- return new TIOExecutorPool(cfg);
- }
- }
- for (TUnitedExecutorPoolConfig& cfg : Config.United) {
- if (cfg.PoolId == poolId) {
- IExecutorPool* result = new TUnitedExecutorPool(cfg, UnitedWorkers.Get());
- return result;
- }
- }
- Y_ABORT("missing PoolId: %d", int(poolId));
- }
-
- TVector<IExecutorPool*> TCpuManager::GetBasicExecutorPools() const {
- TVector<IExecutorPool*> pools;
- for (ui32 idx = 0; idx < ExecutorPoolCount; ++idx) {
- if (auto basicPool = dynamic_cast<TBasicExecutorPool*>(Executors[idx].Get()); basicPool != nullptr) {
- pools.push_back(basicPool);
- }
- }
- return pools;
- }
-
-}
diff --git a/library/cpp/actors/core/cpu_manager.h b/library/cpp/actors/core/cpu_manager.h
deleted file mode 100644
index 26ba97aa39..0000000000
--- a/library/cpp/actors/core/cpu_manager.h
+++ /dev/null
@@ -1,55 +0,0 @@
-#pragma once
-
-#include "harmonizer.h"
-#include "executor_pool.h"
-#include "executor_pool_united_workers.h"
-#include "balancer.h"
-
-namespace NActors {
- struct TActorSystemSetup;
-
- class TCpuManager : public TNonCopyable {
- const ui32 ExecutorPoolCount;
- TArrayHolder<TAutoPtr<IExecutorPool>> Executors;
- THolder<TUnitedWorkers> UnitedWorkers;
- THolder<IBalancer> Balancer;
- THolder<IHarmonizer> Harmonizer;
- TCpuManagerConfig Config;
-
- public:
- explicit TCpuManager(THolder<TActorSystemSetup>& setup);
-
- void Setup();
- void PrepareStart(TVector<NSchedulerQueue::TReader*>& scheduleReaders, TActorSystem* actorSystem);
- void Start();
- void PrepareStop();
- void Shutdown();
- void Cleanup();
-
- TVector<IExecutorPool*> GetBasicExecutorPools() const;
-
- ui32 GetExecutorsCount() const {
- return ExecutorPoolCount;
- }
-
- IExecutorPool* GetExecutorPool(ui32 poolId) {
- return Executors[poolId].Get();
- }
-
- void GetPoolStats(ui32 poolId, TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const {
- if (poolId < ExecutorPoolCount) {
- Executors[poolId]->GetCurrentStats(poolStats, statsCopy);
- }
- }
-
- THarmonizerStats GetHarmonizerStats() const {
- if (Harmonizer) {
- return Harmonizer->GetStats();
- }
- return {};
- }
-
- private:
- IExecutorPool* CreateExecutorPool(ui32 poolId);
- };
-}
diff --git a/library/cpp/actors/core/cpu_state.h b/library/cpp/actors/core/cpu_state.h
deleted file mode 100644
index 3f779d5623..0000000000
--- a/library/cpp/actors/core/cpu_state.h
+++ /dev/null
@@ -1,215 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include <library/cpp/actors/util/futex.h>
-
-namespace NActors {
-
- class alignas(64) TCpuState {
- // Atomic cachelign-aligned 64-bit state, see description below
- TAtomic State = 0;
- char Padding[64 - sizeof(TAtomic)];
-
- // Bits 0-31: Currently executing pool
- // - value less than MaxPools means cpu is executing corresponding pool (fast-worker is executing or waiting for slow-workers)
- // - one of Cpu* values in case of idle cpu
- // - used as futex by blocked fast-worker
- static constexpr ui64 CurrentBits = 32;
- static constexpr ui64 CurrentMask = ui64((1ull << CurrentBits) - 1);
-
- // Bits 32-63: Assigned pool
- // - value is set by balancer
- // - NOT used as futex
- // - Not balanced
- static constexpr ui64 AssignedOffs = 32;
- static constexpr ui64 AssignedMask = ~CurrentMask;
-
- public:
- TCpuState() {
- Y_UNUSED(Padding);
- }
-
- void Load(TPoolId& assigned, TPoolId& current) const {
- TAtomicBase state = AtomicLoad(&State);
- assigned = (state & AssignedMask) >> AssignedOffs;
- current = state & CurrentMask;
- }
-
- TPoolId CurrentPool() const {
- return TPoolId(AtomicLoad(&State) & CurrentMask);
- }
-
- void SwitchPool(TPoolId pool) {
- while (true) {
- TAtomicBase state = AtomicLoad(&State);
- if (AtomicCas(&State, (state & ~CurrentMask) | pool, state)) {
- return;
- }
- }
- }
-
- TPoolId AssignedPool() const {
- return TPoolId((AtomicLoad(&State) & AssignedMask) >> AssignedOffs);
- }
-
- // Assigns new pool to cpu and wakes it up if cpu is idle
- void AssignPool(TPoolId pool) {
- while (true) {
- TAtomicBase state = AtomicLoad(&State);
- TPoolId current(state & CurrentMask);
- if (Y_UNLIKELY(current == CpuStopped)) {
- return; // it would be better to shutdown instead of balancing
- }
- // Idle cpu must be woken up after balancing to handle pending tokens (if any) in assigned/schedulable pool(s)
- if (current == CpuSpinning) {
- if (AtomicCas(&State, (ui64(pool) << AssignedOffs) | pool, state)) {
- return; // successfully woken up
- }
- } else if (current == CpuBlocked) {
- if (AtomicCas(&State, (ui64(pool) << AssignedOffs) | pool, state)) {
- FutexWake();
- return; // successfully woken up
- }
- } else {
- if (AtomicCas(&State, (ui64(pool) << AssignedOffs) | (state & ~AssignedMask), state)) {
- return; // wakeup is not required
- }
- }
- }
- }
-
- void Stop() {
- while (true) {
- TAtomicBase state = AtomicLoad(&State);
- if (AtomicCas(&State, (state & ~CurrentMask) | CpuStopped, state)) {
- FutexWake();
- return; // successfully stopped
- }
- }
- }
-
- // Start waiting, returns false in case of actorsystem shutdown
- bool StartSpinning() {
- while (true) {
- TAtomicBase state = AtomicLoad(&State);
- TPoolId current(state & CurrentMask);
- if (Y_UNLIKELY(current == CpuStopped)) {
- return false;
- }
- Y_DEBUG_ABORT_UNLESS(current < MaxPools, "unexpected already waiting state of cpu (%d)", (int)current);
- if (AtomicCas(&State, (state & ~CurrentMask) | CpuSpinning, state)) { // successfully marked as spinning
- return true;
- }
- }
- }
-
- bool StartBlocking() {
- while (true) {
- TAtomicBase state = AtomicLoad(&State);
- TPoolId current(state & CurrentMask);
- if (current == CpuSpinning) {
- if (AtomicCas(&State, (state & ~CurrentMask) | CpuBlocked, state)) {
- return false; // successful switch
- }
- } else {
- return true; // wakeup
- }
- }
- }
-
- bool Block(ui64 timeoutNs, TPoolId& result) {
-#ifdef _linux_
- timespec timeout;
- timeout.tv_sec = timeoutNs / 1'000'000'000;
- timeout.tv_nsec = timeoutNs % 1'000'000'000;
- SysFutex(Futex(), FUTEX_WAIT_PRIVATE, CpuBlocked, &timeout, nullptr, 0);
-#else
- NanoSleep(timeoutNs); // non-linux wake is not supported, cpu will go idle on wake after blocked state
-#endif
- TAtomicBase state = AtomicLoad(&State);
- TPoolId current(state & CurrentMask);
- if (current == CpuBlocked) {
- return false; // timeout
- } else {
- result = current;
- return true; // wakeup
- }
- }
-
- enum EWakeResult {
- Woken, // successfully woken up
- NotIdle, // cpu is already not idle
- Forbidden, // cpu is assigned to another pool
- Stopped, // cpu is shutdown
- };
-
- EWakeResult WakeWithoutToken(TPoolId pool) {
- while (true) {
- TAtomicBase state = RelaxedLoad(&State);
- TPoolId current(state & CurrentMask);
- TPoolId assigned((state & AssignedMask) >> AssignedOffs);
- if (assigned == CpuShared || assigned == pool) {
- if (current == CpuSpinning) {
- if (AtomicCas(&State, (state & ~CurrentMask) | pool, state)) {
- return Woken;
- }
- } else if (current == CpuBlocked) {
- if (AtomicCas(&State, (state & ~CurrentMask) | pool, state)) {
- FutexWake();
- return Woken;
- }
- } else if (current == CpuStopped) {
- return Stopped;
- } else {
- return NotIdle;
- }
- } else {
- return Forbidden;
- }
- }
- }
-
- EWakeResult WakeWithTokenAcquired(TPoolId token) {
- while (true) {
- TAtomicBase state = RelaxedLoad(&State);
- TPoolId current(state & CurrentMask);
- // NOTE: We ignore assigned value because we already have token, so
- // NOTE: not assigned pool may be run here. This will be fixed
- // NOTE: after we finish with current activation
- if (current == CpuSpinning) {
- if (AtomicCas(&State, (state & ~CurrentMask) | token, state)) {
- return Woken;
- }
- } else if (current == CpuBlocked) {
- if (AtomicCas(&State, (state & ~CurrentMask) | token, state)) {
- FutexWake();
- return Woken;
- }
- } else if (current == CpuStopped) {
- return Stopped;
- } else {
- return NotIdle;
- }
- }
- }
-
- bool IsPoolReassigned(TPoolId current) const {
- TAtomicBase state = AtomicLoad(&State);
- TPoolId assigned((state & AssignedMask) >> AssignedOffs);
- return assigned != current;
- }
-
- private:
- void* Futex() {
- return (void*)&State; // little endian assumed
- }
-
- void FutexWake() {
-#ifdef _linux_
- SysFutex(Futex(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
-#endif
- }
- };
-
-}
diff --git a/library/cpp/actors/core/defs.h b/library/cpp/actors/core/defs.h
deleted file mode 100644
index 64b90e995d..0000000000
--- a/library/cpp/actors/core/defs.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#pragma once
-
-// unique tag to fix pragma once gcc glueing: ./library/actorlib/core/defs.h
-
-#include <library/cpp/actors/util/defs.h>
-#include <util/generic/hash.h>
-#include <util/string/printf.h>
-
-// Enables collection of
-// event send/receive counts
-// activation time histograms
-// event processing time histograms
-#define ACTORSLIB_COLLECT_EXEC_STATS
-
-static constexpr bool ActorLibCollectUsageStats = false;
-
-namespace NActors {
- using TPoolId = ui8;
- using TPoolsMask = ui64;
- static constexpr TPoolId PoolBits = 6;
- static constexpr TPoolId MaxPools = (1 << PoolBits) - 1; // maximum amount of pools (poolid=63 is reserved)
- static constexpr TPoolsMask WaitPoolsFlag = (1ull << MaxPools); // wait-for-slow-workers flag bitmask
-
- // Special TPoolId values used by TCpuState
- static constexpr TPoolId CpuSpinning = MaxPools; // fast-worker is actively spinning, no slow-workers
- static constexpr TPoolId CpuBlocked = MaxPools + 1; // fast-worker is blocked, no slow-workers
- static constexpr TPoolId CpuStopped = TPoolId(-1); // special value indicating worker should stop
- static constexpr TPoolId CpuShared = MaxPools; // special value for `assigned` meaning balancer disabled, pool scheduler is used instead
-
- using TPoolWeight = ui16;
- static constexpr TPoolWeight MinPoolWeight = 1;
- static constexpr TPoolWeight DefPoolWeight = 32;
- static constexpr TPoolWeight MaxPoolWeight = 1024;
-
- using TWorkerId = i16;
- static constexpr TWorkerId WorkerBits = 11;
- static constexpr TWorkerId MaxWorkers = 1 << WorkerBits;
-
- using TThreadId = ui64;
- static constexpr TThreadId UnknownThreadId = ui64(-1);
-
- struct TMailboxType {
- enum EType {
- Inherited = -1, // inherit mailbox from parent
- Simple = 0, // simplest queue under producer lock. fastest in no-contention case
- Revolving = 1, // somewhat outdated, tries to be wait-free. replaced by ReadAsFilled
- HTSwap = 2, // other simple lf queue, suggested for low-contention case
- ReadAsFilled = 3, // wait-free queue, suggested for high-contention or latency critical
- TinyReadAsFilled = 4, // same as 3 but with lower overhead
- //Inplace;
- //Direct;
- //Virtual
- };
- };
-
- struct TScopeId : std::pair<ui64, ui64> {
- using TBase = std::pair<ui64, ui64>;
- using TBase::TBase;
- static const TScopeId LocallyGenerated;
- };
-
- static inline TString ScopeIdToString(const TScopeId& scopeId) {
- return Sprintf("<%" PRIu64 ":%" PRIu64 ">", scopeId.first, scopeId.second);
- }
-
- enum class ESendingType {
- Common,
- Lazy,
- Tail,
- };
-
-}
-
-template<>
-struct hash<NActors::TScopeId> : hash<std::pair<ui64, ui64>> {};
-
-class TAffinity;
diff --git a/library/cpp/actors/core/event.cpp b/library/cpp/actors/core/event.cpp
deleted file mode 100644
index 6ffe42f65b..0000000000
--- a/library/cpp/actors/core/event.cpp
+++ /dev/null
@@ -1,51 +0,0 @@
-#include "event.h"
-#include "event_pb.h"
-
-namespace NActors {
-
- const TScopeId TScopeId::LocallyGenerated{
- Max<ui64>(), Max<ui64>()
- };
-
- TString IEventHandle::GetTypeName() const {
- return HasEvent() ? TypeName(*(const_cast<IEventHandle*>(this)->GetBase())) : TypeName(*this);
- }
-
- TString IEventHandle::ToString() const {
- return HasEvent() ? const_cast<IEventHandle*>(this)->GetBase()->ToString().data() : "serialized?";
- }
-
- std::unique_ptr<IEventHandle> IEventHandle::Forward(std::unique_ptr<IEventHandle>&& ev, TActorId recipient) {
- return std::unique_ptr<IEventHandle>(ev->Forward(recipient).Release());
- }
-
- TIntrusivePtr<TEventSerializedData> IEventHandle::ReleaseChainBuffer() {
- if (Buffer) {
- TIntrusivePtr<TEventSerializedData> result;
- DoSwap(result, Buffer);
- Event.Reset();
- return result;
- }
- if (Event) {
- TAllocChunkSerializer serializer;
- Event->SerializeToArcadiaStream(&serializer);
- auto chainBuf = serializer.Release(Event->CreateSerializationInfo());
- Event.Reset();
- return chainBuf;
- }
- return new TEventSerializedData;
- }
-
- TIntrusivePtr<TEventSerializedData> IEventHandle::GetChainBuffer() {
- if (Buffer) {
- return Buffer;
- }
- if (Event) {
- TAllocChunkSerializer serializer;
- Event->SerializeToArcadiaStream(&serializer);
- Buffer = serializer.Release(Event->CreateSerializationInfo());
- return Buffer;
- }
- return new TEventSerializedData;
- }
-}
diff --git a/library/cpp/actors/core/event.h b/library/cpp/actors/core/event.h
deleted file mode 100644
index 3517dc7a68..0000000000
--- a/library/cpp/actors/core/event.h
+++ /dev/null
@@ -1,389 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "actorid.h"
-#include "callstack.h"
-#include "event_load.h"
-
-#include <library/cpp/actors/wilson/wilson_trace.h>
-
-#include <util/system/hp_timer.h>
-#include <util/generic/maybe.h>
-
-namespace NActors {
- class TChunkSerializer;
- class IActor;
- class ISerializerToStream {
- public:
- virtual bool SerializeToArcadiaStream(TChunkSerializer*) const = 0;
- };
-
- class IEventBase
- : TNonCopyable,
- public ISerializerToStream {
- protected:
- // for compatibility with virtual actors
- virtual bool DoExecute(IActor* /*actor*/, std::unique_ptr<IEventHandle> /*eventPtr*/) {
- Y_DEBUG_ABORT_UNLESS(false);
- return false;
- }
- public:
- // actual typing is performed by IEventHandle
-
- virtual ~IEventBase() {
- }
-
- bool Execute(IActor* actor, std::unique_ptr<IEventHandle> eventPtr) {
- return DoExecute(actor, std::move(eventPtr));
- }
-
- virtual TString ToStringHeader() const = 0;
- virtual TString ToString() const {
- return ToStringHeader();
- }
- virtual ui32 CalculateSerializedSize() const {
- return 0;
- }
- virtual ui32 Type() const = 0;
- virtual bool SerializeToArcadiaStream(TChunkSerializer*) const = 0;
- virtual bool IsSerializable() const = 0;
- virtual ui32 CalculateSerializedSizeCached() const {
- return CalculateSerializedSize();
- }
- virtual TEventSerializationInfo CreateSerializationInfo() const { return {}; }
- };
-
- // fat handle
- class IEventHandle : TNonCopyable {
- struct TOnNondelivery {
- TActorId Recipient;
-
- TOnNondelivery(const TActorId& recipient)
- : Recipient(recipient)
- {
- }
- };
-
- public:
- template <typename TEv>
- inline TEv* CastAsLocal() const noexcept {
- auto fits = GetTypeRewrite() == TEv::EventType;
-
- return fits ? static_cast<TEv*>(Event.Get()) : nullptr;
- }
-
- template <typename TEventType>
- TEventType* Get() {
- if (Type != TEventType::EventType)
- Y_ABORT("Event type %" PRIu32 " doesn't match the expected type %" PRIu32, Type, TEventType::EventType);
-
- if (!Event) {
- static TEventSerializedData empty;
- Event.Reset(TEventType::Load(Buffer ? Buffer.Get() : &empty));
- }
-
- if (Event) {
- return static_cast<TEventType*>(Event.Get());
- }
-
- Y_ABORT("Failed to Load() event type %" PRIu32 " class %s", Type, TypeName<TEventType>().data());
- }
-
- template <typename T>
- TAutoPtr<T> Release() {
- TAutoPtr<T> x = Get<T>();
- Y_UNUSED(Event.Release());
- Buffer.Reset();
- return x;
- }
-
- enum EFlags: ui32 {
- FlagTrackDelivery = 1 << 0,
- FlagForwardOnNondelivery = 1 << 1,
- FlagSubscribeOnSession = 1 << 2,
- FlagUseSubChannel = 1 << 3,
- FlagGenerateUnsureUndelivered = 1 << 4,
- FlagExtendedFormat = 1 << 5,
- };
- using TEventFlags = ui32;
-
- const ui32 Type;
- const TEventFlags Flags;
- const TActorId Recipient;
- TActorId Sender;
- const ui64 Cookie;
- const TScopeId OriginScopeId = TScopeId::LocallyGenerated; // filled in when the message is received from Interconnect
-
- // if set, used by ActorSystem/Interconnect to report tracepoints
- NWilson::TTraceId TraceId;
-
- // filled if feeded by interconnect session
- const TActorId InterconnectSession;
-
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- ::NHPTimer::STime SendTime;
-#endif
-
- static const size_t ChannelBits = 12;
- static const size_t ChannelShift = (sizeof(ui32) << 3) - ChannelBits;
-
-#ifdef USE_ACTOR_CALLSTACK
- TCallstack Callstack;
-#endif
- ui16 GetChannel() const noexcept {
- return Flags >> ChannelShift;
- }
-
- ui64 GetSubChannel() const noexcept {
- return Flags & FlagUseSubChannel ? Sender.LocalId() : 0ULL;
- }
-
- static ui32 MakeFlags(ui32 channel, TEventFlags flags) {
- Y_ABORT_UNLESS(channel < (1 << ChannelBits));
- Y_ABORT_UNLESS(flags < (1 << ChannelShift));
- return (flags | (channel << ChannelShift));
- }
-
- private:
- THolder<IEventBase> Event;
- TIntrusivePtr<TEventSerializedData> Buffer;
-
- TActorId RewriteRecipient;
- ui32 RewriteType;
-
- THolder<TOnNondelivery> OnNondeliveryHolder; // only for local events
-
- public:
- void Rewrite(ui32 typeRewrite, TActorId recipientRewrite) {
- RewriteRecipient = recipientRewrite;
- RewriteType = typeRewrite;
- }
-
- void DropRewrite() {
- RewriteRecipient = Recipient;
- RewriteType = Type;
- }
-
- const TActorId& GetRecipientRewrite() const {
- return RewriteRecipient;
- }
-
- ui32 GetTypeRewrite() const {
- return RewriteType;
- }
-
- TActorId GetForwardOnNondeliveryRecipient() const {
- return OnNondeliveryHolder.Get() ? OnNondeliveryHolder->Recipient : TActorId();
- }
-
- IEventHandle(const TActorId& recipient, const TActorId& sender, IEventBase* ev, TEventFlags flags = 0, ui64 cookie = 0,
- const TActorId* forwardOnNondelivery = nullptr, NWilson::TTraceId traceId = {})
- : Type(ev->Type())
- , Flags(flags)
- , Recipient(recipient)
- , Sender(sender)
- , Cookie(cookie)
- , TraceId(std::move(traceId))
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- , SendTime(0)
-#endif
- , Event(ev)
- , RewriteRecipient(Recipient)
- , RewriteType(Type)
- {
- if (forwardOnNondelivery)
- OnNondeliveryHolder.Reset(new TOnNondelivery(*forwardOnNondelivery));
- }
-
- IEventHandle(ui32 type,
- TEventFlags flags,
- const TActorId& recipient,
- const TActorId& sender,
- TIntrusivePtr<TEventSerializedData> buffer,
- ui64 cookie,
- const TActorId* forwardOnNondelivery = nullptr,
- NWilson::TTraceId traceId = {})
- : Type(type)
- , Flags(flags)
- , Recipient(recipient)
- , Sender(sender)
- , Cookie(cookie)
- , TraceId(std::move(traceId))
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- , SendTime(0)
-#endif
- , Buffer(std::move(buffer))
- , RewriteRecipient(Recipient)
- , RewriteType(Type)
- {
- if (forwardOnNondelivery)
- OnNondeliveryHolder.Reset(new TOnNondelivery(*forwardOnNondelivery));
- }
-
- // Special ctor for events from interconnect.
- IEventHandle(const TActorId& session,
- ui32 type,
- TEventFlags flags,
- const TActorId& recipient,
- const TActorId& sender,
- TIntrusivePtr<TEventSerializedData> buffer,
- ui64 cookie,
- TScopeId originScopeId,
- NWilson::TTraceId traceId) noexcept
- : Type(type)
- , Flags(flags)
- , Recipient(recipient)
- , Sender(sender)
- , Cookie(cookie)
- , OriginScopeId(originScopeId)
- , TraceId(std::move(traceId))
- , InterconnectSession(session)
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- , SendTime(0)
-#endif
- , Buffer(std::move(buffer))
- , RewriteRecipient(Recipient)
- , RewriteType(Type)
- {
- }
-
- TIntrusivePtr<TEventSerializedData> GetChainBuffer();
- TIntrusivePtr<TEventSerializedData> ReleaseChainBuffer();
-
- ui32 GetSize() const {
- if (Buffer) {
- return Buffer->GetSize();
- } else if (Event) {
- return Event->CalculateSerializedSize();
- } else {
- return 0;
- }
- }
-
- bool HasBuffer() const {
- return bool(Buffer);
- }
-
- bool HasEvent() const {
- return bool(Event);
- }
-
- IEventBase* GetBase() {
- if (!Event) {
- if (!Buffer)
- return nullptr;
- else
- ythrow TWithBackTrace<yexception>() << "don't know how to load the event from buffer";
- }
-
- return Event.Get();
- }
-
- TAutoPtr<IEventBase> ReleaseBase() {
- TAutoPtr<IEventBase> x = GetBase();
- Y_UNUSED(Event.Release());
- Buffer.Reset();
- return x;
- }
-
- TAutoPtr<IEventHandle> Forward(const TActorId& dest) {
- if (Event)
- return new IEventHandle(dest, Sender, Event.Release(), Flags, Cookie, nullptr, std::move(TraceId));
- else
- return new IEventHandle(Type, Flags, dest, Sender, Buffer, Cookie, nullptr, std::move(TraceId));
- }
-
- TString GetTypeName() const;
- TString ToString() const;
-
- [[nodiscard]] static std::unique_ptr<IEventHandle> Forward(std::unique_ptr<IEventHandle>&& ev, TActorId recipient);
- [[nodiscard]] static std::unique_ptr<IEventHandle> ForwardOnNondelivery(std::unique_ptr<IEventHandle>&& ev, ui32 reason, bool unsure = false);
-
- [[nodiscard]] static TAutoPtr<IEventHandle> Forward(TAutoPtr<IEventHandle>&& ev, TActorId recipient) {
- return Forward(std::unique_ptr<IEventHandle>(ev.Release()), recipient).release();
- }
-
- [[nodiscard]] static THolder<IEventHandle> Forward(THolder<IEventHandle>&& ev, TActorId recipient) {
- return THolder(Forward(std::unique_ptr<IEventHandle>(ev.Release()), recipient).release());
- }
-
- [[nodiscard]] static TAutoPtr<IEventHandle> ForwardOnNondelivery(TAutoPtr<IEventHandle>&& ev, ui32 reason, bool unsure = false) {
- return ForwardOnNondelivery(std::unique_ptr<IEventHandle>(ev.Release()), reason, unsure).release();
- }
-
- [[nodiscard]] static THolder<IEventHandle> ForwardOnNondelivery(THolder<IEventHandle>&& ev, ui32 reason, bool unsure = false) {
- return THolder(ForwardOnNondelivery(std::unique_ptr<IEventHandle>(ev.Release()), reason, unsure).release());
- }
-
- template<typename T>
- static TAutoPtr<T> Release(TAutoPtr<IEventHandle>& ev) {
- return ev->Release<T>();
- }
-
- template<typename T>
- static TAutoPtr<T> Release(THolder<IEventHandle>& ev) {
- return ev->Release<T>();
- }
-
- template <typename TEv>
- inline TEv* StaticCastAsLocal() const noexcept { // blind cast
- return static_cast<TEv*>(Event.Get());
- }
- };
-
- template <typename TEventType>
- class TEventHandle: public IEventHandle {
- TEventHandle(); // we never made instance of TEventHandle
- public:
- TEventType* Get() {
- return IEventHandle::Get<TEventType>();
- }
-
- TAutoPtr<TEventType> Release() {
- return IEventHandle::Release<TEventType>();
- }
- };
-
- static_assert(sizeof(TEventHandle<IEventBase>) == sizeof(IEventHandle), "expect sizeof(TEventHandle<IEventBase>) == sizeof(IEventHandle)");
-
- template <typename TEventType, ui32 EventType0>
- class TEventBase: public IEventBase {
- public:
- static constexpr ui32 EventType = EventType0;
- ui32 Type() const override {
- return EventType0;
- }
- // still abstract
-
- typedef TEventHandle<TEventType> THandle;
- typedef TAutoPtr<THandle> TPtr;
- };
-
-#define DEFINE_SIMPLE_LOCAL_EVENT(eventType, header) \
- TString ToStringHeader() const override { \
- return TString(header); \
- } \
- bool SerializeToArcadiaStream(NActors::TChunkSerializer*) const override { \
- Y_ABORT("Local event " #eventType " is not serializable"); \
- } \
- static IEventBase* Load(NActors::TEventSerializedData*) { \
- Y_ABORT("Local event " #eventType " has no load method"); \
- } \
- bool IsSerializable() const override { \
- return false; \
- }
-
-#define DEFINE_SIMPLE_NONLOCAL_EVENT(eventType, header) \
- TString ToStringHeader() const override { \
- return TString(header); \
- } \
- bool SerializeToArcadiaStream(NActors::TChunkSerializer*) const override { \
- return true; \
- } \
- static IEventBase* Load(NActors::TEventSerializedData*) { \
- return new eventType(); \
- } \
- bool IsSerializable() const override { \
- return true; \
- }
-}
diff --git a/library/cpp/actors/core/event_load.cpp b/library/cpp/actors/core/event_load.cpp
deleted file mode 100644
index 2171678bfb..0000000000
--- a/library/cpp/actors/core/event_load.cpp
+++ /dev/null
@@ -1,5 +0,0 @@
-#include "event_load.h"
-
-namespace NActors {
-
-}
diff --git a/library/cpp/actors/core/event_load.h b/library/cpp/actors/core/event_load.h
deleted file mode 100644
index c776026cc4..0000000000
--- a/library/cpp/actors/core/event_load.h
+++ /dev/null
@@ -1,133 +0,0 @@
-#pragma once
-
-#include <util/stream/walk.h>
-#include <util/system/types.h>
-#include <util/generic/string.h>
-#include <library/cpp/actors/util/rope.h>
-#include <library/cpp/actors/wilson/wilson_trace.h>
-
-namespace NActors {
- class IEventHandle;
-
- struct TConstIoVec {
- const void* Data;
- size_t Size;
- };
-
- struct TIoVec {
- void* Data;
- size_t Size;
- };
-
- struct TEventSectionInfo {
- size_t Headroom = 0; // headroom to be created on the receiving side
- size_t Size = 0; // full size of serialized event section (a chunk in rope)
- size_t Tailroom = 0; // tailroom for the chunk
- size_t Alignment = 0; // required alignment
- bool IsInline = false; // if true, goes through ordinary channel
- };
-
- struct TEventSerializationInfo {
- bool IsExtendedFormat = {};
- std::vector<TEventSectionInfo> Sections;
- // total sum of Size for every section must match actual serialized size of the event
- };
-
- class TEventSerializedData
- : public TThrRefBase
- {
- TRope Rope;
- TEventSerializationInfo SerializationInfo;
-
- public:
- TEventSerializedData() = default;
-
- TEventSerializedData(TRope&& rope, TEventSerializationInfo&& serializationInfo)
- : Rope(std::move(rope))
- , SerializationInfo(std::move(serializationInfo))
- {}
-
- TEventSerializedData(const TEventSerializedData& original, TString extraBuffer)
- : Rope(original.Rope)
- , SerializationInfo(original.SerializationInfo)
- {
- if (!SerializationInfo.Sections.empty()) {
- SerializationInfo.Sections.push_back(TEventSectionInfo{0, extraBuffer.size(), 0, 0, true});
- }
- Append(std::move(extraBuffer));
- }
-
- TEventSerializedData(TString buffer, TEventSerializationInfo&& serializationInfo)
- : SerializationInfo(std::move(serializationInfo))
- {
- Append(std::move(buffer));
- }
-
- void SetSerializationInfo(TEventSerializationInfo&& serializationInfo) {
- SerializationInfo = std::move(serializationInfo);
- }
-
- const TEventSerializationInfo& GetSerializationInfo() const {
- return SerializationInfo;
- }
-
- TRope::TConstIterator GetBeginIter() const {
- return Rope.Begin();
- }
-
- size_t GetSize() const {
- return Rope.GetSize();
- }
-
- TString GetString() const {
- TString result;
- result.reserve(GetSize());
- for (auto it = Rope.Begin(); it.Valid(); it.AdvanceToNextContiguousBlock()) {
- result.append(it.ContiguousData(), it.ContiguousSize());
- }
- return result;
- }
-
- TRope GetRope() const {
- return TRope(Rope);
- }
-
- TRope EraseBack(size_t count) {
- Y_ABORT_UNLESS(count <= Rope.GetSize());
- TRope::TIterator iter = Rope.End();
- iter -= count;
- return Rope.Extract(iter, Rope.End());
- }
-
- void Append(TRope&& from) {
- Rope.Insert(Rope.End(), std::move(from));
- }
-
- void Append(TString buffer) {
- if (buffer) {
- Rope.Insert(Rope.End(), TRope(std::move(buffer)));
- }
- }
- };
-}
-
-class TChainBufWalk : public IWalkInput {
- TIntrusivePtr<NActors::TEventSerializedData> Buffer;
- TRope::TConstIterator Iter;
-
-public:
- TChainBufWalk(TIntrusivePtr<NActors::TEventSerializedData> buffer)
- : Buffer(std::move(buffer))
- , Iter(Buffer->GetBeginIter())
- {}
-
-private:
- size_t DoUnboundedNext(const void **ptr) override {
- const size_t size = Iter.ContiguousSize();
- *ptr = Iter.ContiguousData();
- if (Iter.Valid()) {
- Iter.AdvanceToNextContiguousBlock();
- }
- return size;
- }
-};
diff --git a/library/cpp/actors/core/event_local.h b/library/cpp/actors/core/event_local.h
deleted file mode 100644
index da0f740ba8..0000000000
--- a/library/cpp/actors/core/event_local.h
+++ /dev/null
@@ -1,74 +0,0 @@
-#pragma once
-
-#include "event.h"
-#include "scheduler_cookie.h"
-#include "event_load.h"
-#include <util/system/type_name.h>
-
-namespace NActors {
- template <typename TEv, ui32 TEventType>
- class TEventLocal: public TEventBase<TEv, TEventType> {
- public:
- TString ToStringHeader() const override {
- return TypeName<TEv>();
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer* /*serializer*/) const override {
- Y_ABORT("Serialization of local event %s type %" PRIu32, TypeName<TEv>().data(), TEventType);
- }
-
- bool IsSerializable() const override {
- return false;
- }
-
- static IEventBase* Load(TEventSerializedData*) {
- Y_ABORT("Loading of local event %s type %" PRIu32, TypeName<TEv>().data(), TEventType);
- }
- };
-
- template <typename TEv, ui32 TEventType>
- class TEventScheduler: public TEventLocal<TEv, TEventType> {
- public:
- TSchedulerCookieHolder Cookie;
-
- TEventScheduler(ISchedulerCookie* cookie)
- : Cookie(cookie)
- {
- }
- };
-
- template <ui32 TEventType>
- class TEventSchedulerEv: public TEventScheduler<TEventSchedulerEv<TEventType>, TEventType> {
- public:
- TEventSchedulerEv(ISchedulerCookie* cookie)
- : TEventScheduler<TEventSchedulerEv<TEventType>, TEventType>(cookie)
- {
- }
- };
-
- template <typename TEv, ui32 TEventType>
- class TEventSimple: public TEventBase<TEv, TEventType> {
- public:
- TString ToStringHeader() const override {
- static TString header(TypeName<TEv>());
- return header;
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer* /*serializer*/) const override {
- static_assert(sizeof(TEv) == sizeof(TEventSimple<TEv, TEventType>), "Descendant should be an empty class");
- return true;
- }
-
- bool IsSerializable() const override {
- return true;
- }
-
- static IEventBase* Load(NActors::TEventSerializedData*) {
- return new TEv();
- }
-
- static IEventBase* Load(const TString&) {
- return new TEv();
- }
- };
-}
diff --git a/library/cpp/actors/core/event_pb.cpp b/library/cpp/actors/core/event_pb.cpp
deleted file mode 100644
index e6a863c44e..0000000000
--- a/library/cpp/actors/core/event_pb.cpp
+++ /dev/null
@@ -1,224 +0,0 @@
-#include "event_pb.h"
-
-namespace NActors {
- bool TRopeStream::Next(const void** data, int* size) {
- *data = Iter.ContiguousData();
- *size = Iter.ContiguousSize();
- if (size_t(*size + TotalByteCount) > Size) {
- *size = Size - TotalByteCount;
- Iter += *size;
- } else if (Iter.Valid()) {
- Iter.AdvanceToNextContiguousBlock();
- }
- TotalByteCount += *size;
- return *size != 0;
- }
-
- void TRopeStream::BackUp(int count) {
- Y_ABORT_UNLESS(count <= TotalByteCount);
- Iter -= count;
- TotalByteCount -= count;
- }
-
- bool TRopeStream::Skip(int count) {
- if (static_cast<size_t>(TotalByteCount + count) > Size) {
- count = Size - TotalByteCount;
- }
- Iter += count;
- TotalByteCount += count;
- return static_cast<size_t>(TotalByteCount) != Size;
- }
-
- TCoroutineChunkSerializer::TCoroutineChunkSerializer()
- : TotalSerializedDataSize(0)
- , Stack(64 * 1024)
- , SelfClosure{this, TArrayRef(Stack.Begin(), Stack.End())}
- , InnerContext(SelfClosure)
- {}
-
- TCoroutineChunkSerializer::~TCoroutineChunkSerializer() {
- CancelFlag = true;
- Resume();
- Y_ABORT_UNLESS(Finished);
- }
-
- bool TCoroutineChunkSerializer::AllowsAliasing() const {
- return true;
- }
-
- void TCoroutineChunkSerializer::Produce(const void *data, size_t size) {
- Y_ABORT_UNLESS(size <= SizeRemain);
- SizeRemain -= size;
- TotalSerializedDataSize += size;
-
- if (!Chunks.empty()) {
- auto& last = Chunks.back();
- if (last.first + last.second == data) {
- last.second += size; // just extend the last buffer
- return;
- }
- }
-
- Chunks.emplace_back(static_cast<const char*>(data), size);
- }
-
- bool TCoroutineChunkSerializer::WriteAliasedRaw(const void* data, int size) {
- Y_ABORT_UNLESS(!CancelFlag);
- Y_ABORT_UNLESS(!AbortFlag);
- Y_ABORT_UNLESS(size >= 0);
- while (size) {
- if (const size_t bytesToAppend = Min<size_t>(size, SizeRemain)) {
- const void *produce = data;
- if ((reinterpret_cast<uintptr_t>(data) & 63) + bytesToAppend <= 64 &&
- (Chunks.empty() || data != Chunks.back().first + Chunks.back().second)) {
- memcpy(BufferPtr, data, bytesToAppend);
- produce = BufferPtr;
- BufferPtr += bytesToAppend;
- }
- Produce(produce, bytesToAppend);
- data = static_cast<const char*>(data) + bytesToAppend;
- size -= bytesToAppend;
- } else {
- InnerContext.SwitchTo(BufFeedContext);
- if (CancelFlag || AbortFlag) {
- return false;
- }
- }
- }
- return true;
- }
-
- bool TCoroutineChunkSerializer::Next(void** data, int* size) {
- Y_ABORT_UNLESS(!CancelFlag);
- Y_ABORT_UNLESS(!AbortFlag);
- if (!SizeRemain) {
- InnerContext.SwitchTo(BufFeedContext);
- if (CancelFlag || AbortFlag) {
- return false;
- }
- }
- Y_ABORT_UNLESS(SizeRemain);
- *data = BufferPtr;
- *size = SizeRemain;
- BufferPtr += SizeRemain;
- Produce(*data, *size);
- return true;
- }
-
- void TCoroutineChunkSerializer::BackUp(int count) {
- if (!count) {
- return;
- }
- Y_ABORT_UNLESS(count > 0);
- Y_ABORT_UNLESS(!Chunks.empty());
- TChunk& buf = Chunks.back();
- Y_ABORT_UNLESS((size_t)count <= buf.second);
- Y_ABORT_UNLESS(buf.first + buf.second == BufferPtr, "buf# %p:%zu BufferPtr# %p SizeRemain# %zu NumChunks# %zu",
- buf.first, buf.second, BufferPtr, SizeRemain, Chunks.size());
- buf.second -= count;
- if (!buf.second) {
- Chunks.pop_back();
- }
- BufferPtr -= count;
- SizeRemain += count;
- TotalSerializedDataSize -= count;
- }
-
- void TCoroutineChunkSerializer::Resume() {
- TContMachineContext feedContext;
- BufFeedContext = &feedContext;
- feedContext.SwitchTo(&InnerContext);
- BufFeedContext = nullptr;
- }
-
- bool TCoroutineChunkSerializer::WriteRope(const TRope *rope) {
- for (auto iter = rope->Begin(); iter.Valid(); iter.AdvanceToNextContiguousBlock()) {
- if (!WriteAliasedRaw(iter.ContiguousData(), iter.ContiguousSize())) {
- return false;
- }
- }
- return true;
- }
-
- bool TCoroutineChunkSerializer::WriteString(const TString *s) {
- return WriteAliasedRaw(s->data(), s->length());
- }
-
- std::span<TCoroutineChunkSerializer::TChunk> TCoroutineChunkSerializer::FeedBuf(void* data, size_t size) {
- // fill in base params
- BufferPtr = static_cast<char*>(data);
- SizeRemain = size;
- Y_DEBUG_ABORT_UNLESS(size);
-
- // transfer control to the coroutine
- Y_ABORT_UNLESS(Event);
- Chunks.clear();
- Resume();
-
- return Chunks;
- }
-
- void TCoroutineChunkSerializer::SetSerializingEvent(const IEventBase *event) {
- Y_ABORT_UNLESS(Event == nullptr);
- Event = event;
- TotalSerializedDataSize = 0;
- AbortFlag = false;
- }
-
- void TCoroutineChunkSerializer::Abort() {
- Y_ABORT_UNLESS(Event);
- AbortFlag = true;
- Resume();
- }
-
- void TCoroutineChunkSerializer::DoRun() {
- while (!CancelFlag) {
- Y_ABORT_UNLESS(Event);
- SerializationSuccess = !AbortFlag && Event->SerializeToArcadiaStream(this);
- Event = nullptr;
- if (!CancelFlag) { // cancel flag may have been received during serialization
- InnerContext.SwitchTo(BufFeedContext);
- }
- }
- Finished = true;
- InnerContext.SwitchTo(BufFeedContext);
- }
-
- bool TAllocChunkSerializer::Next(void** pdata, int* psize) {
- if (Backup) {
- // we have some data in backup rope -- move the first chunk from the backup rope to the buffer and return
- // pointer to the buffer; it is safe to remove 'const' here as we uniquely own this buffer
- TRope::TIterator iter = Backup.Begin();
- *pdata = const_cast<char*>(iter.ContiguousData());
- *psize = iter.ContiguousSize();
- iter.AdvanceToNextContiguousBlock();
- Buffers->Append(Backup.Extract(Backup.Begin(), iter));
- } else {
- // no backup buffer, so we have to create new one
- auto item = TRopeAlignedBuffer::Allocate(4096);
- *pdata = item->GetBuffer();
- *psize = item->GetCapacity();
- Buffers->Append(TRope(std::move(item)));
- }
- return true;
- }
-
- void TAllocChunkSerializer::BackUp(int count) {
- Backup.Insert(Backup.Begin(), Buffers->EraseBack(count));
- }
-
- bool TAllocChunkSerializer::WriteAliasedRaw(const void*, int) {
- Y_ABORT_UNLESS(false);
- return false;
- }
-
- bool TAllocChunkSerializer::WriteRope(const TRope *rope) {
- Buffers->Append(TRope(*rope));
- return true;
- }
-
- bool TAllocChunkSerializer::WriteString(const TString *s) {
- Buffers->Append(*s);
- return true;
- }
-}
diff --git a/library/cpp/actors/core/event_pb.h b/library/cpp/actors/core/event_pb.h
deleted file mode 100644
index 5fce7c830f..0000000000
--- a/library/cpp/actors/core/event_pb.h
+++ /dev/null
@@ -1,654 +0,0 @@
-#pragma once
-
-#include "event.h"
-#include "event_load.h"
-
-#include <google/protobuf/io/zero_copy_stream.h>
-#include <google/protobuf/arena.h>
-#include <library/cpp/actors/protos/actors.pb.h>
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-#include <util/generic/deque.h>
-#include <util/system/context.h>
-#include <util/system/filemap.h>
-#include <util/string/builder.h>
-#include <util/thread/lfstack.h>
-#include <array>
-#include <span>
-
-// enable only when patch with this macro was successfully deployed
-#define USE_EXTENDED_PAYLOAD_FORMAT 0
-
-namespace NActors {
-
- class TRopeStream : public NProtoBuf::io::ZeroCopyInputStream {
- TRope::TConstIterator Iter;
- const size_t Size;
-
- public:
- TRopeStream(TRope::TConstIterator iter, size_t size)
- : Iter(iter)
- , Size(size)
- {}
-
- bool Next(const void** data, int* size) override;
- void BackUp(int count) override;
- bool Skip(int count) override;
- int64_t ByteCount() const override {
- return TotalByteCount;
- }
-
- private:
- int64_t TotalByteCount = 0;
- };
-
- class TChunkSerializer : public NProtoBuf::io::ZeroCopyOutputStream {
- public:
- TChunkSerializer() = default;
- virtual ~TChunkSerializer() = default;
-
- virtual bool WriteRope(const TRope *rope) = 0;
- virtual bool WriteString(const TString *s) = 0;
- };
-
- class TAllocChunkSerializer final : public TChunkSerializer {
- public:
- bool Next(void** data, int* size) override;
- void BackUp(int count) override;
- int64_t ByteCount() const override {
- return Buffers->GetSize();
- }
- bool WriteAliasedRaw(const void* data, int size) override;
-
- // WARNING: these methods require owner to retain ownership and immutability of passed objects
- bool WriteRope(const TRope *rope) override;
- bool WriteString(const TString *s) override;
-
- inline TIntrusivePtr<TEventSerializedData> Release(TEventSerializationInfo&& serializationInfo) {
- Buffers->SetSerializationInfo(std::move(serializationInfo));
- return std::move(Buffers);
- }
-
- protected:
- TIntrusivePtr<TEventSerializedData> Buffers = new TEventSerializedData;
- TRope Backup;
- };
-
- class TCoroutineChunkSerializer final : public TChunkSerializer, protected ITrampoLine {
- public:
- using TChunk = std::pair<const char*, size_t>;
-
- TCoroutineChunkSerializer();
- ~TCoroutineChunkSerializer();
-
- void SetSerializingEvent(const IEventBase *event);
- void Abort();
- std::span<TChunk> FeedBuf(void* data, size_t size);
- bool IsComplete() const {
- return !Event;
- }
- bool IsSuccessfull() const {
- return SerializationSuccess;
- }
- const IEventBase *GetCurrentEvent() const {
- return Event;
- }
-
- bool Next(void** data, int* size) override;
- void BackUp(int count) override;
- int64_t ByteCount() const override {
- return TotalSerializedDataSize;
- }
- bool WriteAliasedRaw(const void* data, int size) override;
- bool AllowsAliasing() const override;
-
- bool WriteRope(const TRope *rope) override;
- bool WriteString(const TString *s) override;
-
- protected:
- void DoRun() override;
- void Resume();
- void Produce(const void *data, size_t size);
-
- i64 TotalSerializedDataSize;
- TMappedAllocation Stack;
- TContClosure SelfClosure;
- TContMachineContext InnerContext;
- TContMachineContext *BufFeedContext = nullptr;
- char *BufferPtr;
- size_t SizeRemain;
- std::vector<TChunk> Chunks;
- const IEventBase *Event = nullptr;
- bool CancelFlag = false;
- bool AbortFlag;
- bool SerializationSuccess;
- bool Finished = false;
- };
-
- struct TProtoArenaHolder : public TAtomicRefCount<TProtoArenaHolder> {
- google::protobuf::Arena Arena;
- TProtoArenaHolder() = default;
-
- explicit TProtoArenaHolder(const google::protobuf::ArenaOptions& arenaOptions)
- : Arena(arenaOptions)
- {};
-
- google::protobuf::Arena* Get() {
- return &Arena;
- }
-
- template<typename TRecord>
- TRecord* Allocate() {
- return google::protobuf::Arena::CreateMessage<TRecord>(&Arena);
- }
- };
-
- static const size_t EventMaxByteSize = 140 << 20; // (140MB)
-
- template <typename TEv, typename TRecord /*protobuf record*/, ui32 TEventType, typename TRecHolder>
- class TEventPBBase: public TEventBase<TEv, TEventType> , public TRecHolder {
- // a vector of data buffers referenced by record; if filled, then extended serialization mechanism applies
- TVector<TRope> Payload;
- size_t TotalPayloadSize = 0;
-
- public:
- using TRecHolder::Record;
-
- public:
- using ProtoRecordType = TRecord;
-
- TEventPBBase() = default;
-
- explicit TEventPBBase(const TRecord& rec)
- : TRecHolder(rec)
- {}
-
- explicit TEventPBBase(TRecord&& rec)
- : TRecHolder(rec)
- {}
-
- explicit TEventPBBase(TIntrusivePtr<TProtoArenaHolder> arena)
- : TRecHolder(std::move(arena))
- {}
-
- TString ToStringHeader() const override {
- return Record.GetTypeName();
- }
-
- TString ToString() const override {
- TStringStream ss;
- ss << ToStringHeader() << " " << Record.ShortDebugString();
- return ss.Str();
- }
-
- bool IsSerializable() const override {
- return true;
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer* chunker) const override {
- return SerializeToArcadiaStreamImpl(chunker, TString());
- }
-
- ui32 CalculateSerializedSize() const override {
- ssize_t result = Record.ByteSize();
- if (result >= 0 && Payload) {
- ++result; // marker
- char buf[MaxNumberBytes];
- result += SerializeNumber(Payload.size(), buf);
- for (const TRope& rope : Payload) {
- result += SerializeNumber(rope.GetSize(), buf);
- }
- result += TotalPayloadSize;
- }
- return result;
- }
-
- static IEventBase* Load(TEventSerializedData *input) {
- THolder<TEventPBBase> ev(new TEv());
- if (!input->GetSize()) {
- Y_PROTOBUF_SUPPRESS_NODISCARD ev->Record.ParseFromString(TString());
- } else {
- TRope::TConstIterator iter = input->GetBeginIter();
- ui64 size = input->GetSize();
-
- if (const auto& info = input->GetSerializationInfo(); info.IsExtendedFormat) {
- // check marker
- if (!iter.Valid() || (*iter.ContiguousData() != PayloadMarker && *iter.ContiguousData() != ExtendedPayloadMarker)) {
- Y_ABORT("invalid event");
- }
-
- const bool dataIsSeparate = *iter.ContiguousData() == ExtendedPayloadMarker; // ropes go after sizes
-
- auto fetchRope = [&](size_t len) {
- TRope::TConstIterator begin = iter;
- iter += len;
- size -= len;
- ev->Payload.emplace_back(begin, iter);
- ev->TotalPayloadSize += len;
- };
-
- // skip marker
- iter += 1;
- --size;
- // parse number of payload ropes
- size_t numRopes = DeserializeNumber(iter, size);
- if (numRopes == Max<size_t>()) {
- Y_ABORT("invalid event");
- }
- TStackVec<size_t, 16> ropeLens;
- if (dataIsSeparate) {
- ropeLens.reserve(numRopes);
- }
- while (numRopes--) {
- // parse length of the rope
- const size_t len = DeserializeNumber(iter, size);
- if (len == Max<size_t>() || size < len) {
- Y_ABORT("invalid event len# %zu size# %" PRIu64, len, size);
- }
- // extract the rope
- if (dataIsSeparate) {
- ropeLens.push_back(len);
- } else {
- fetchRope(len);
- }
- }
- for (size_t len : ropeLens) {
- fetchRope(len);
- }
- }
-
- // parse the protobuf
- TRopeStream stream(iter, size);
- if (!ev->Record.ParseFromZeroCopyStream(&stream)) {
- Y_ABORT("Failed to parse protobuf event type %" PRIu32 " class %s", TEventType, TypeName(ev->Record).data());
- }
- }
- ev->CachedByteSize = input->GetSize();
- return ev.Release();
- }
-
- size_t GetCachedByteSize() const {
- if (CachedByteSize == 0) {
- CachedByteSize = CalculateSerializedSize();
- }
- return CachedByteSize;
- }
-
- ui32 CalculateSerializedSizeCached() const override {
- return GetCachedByteSize();
- }
-
- void InvalidateCachedByteSize() {
- CachedByteSize = 0;
- }
-
- TEventSerializationInfo CreateSerializationInfo() const override {
- return CreateSerializationInfoImpl(0);
- }
-
- bool AllowExternalDataChannel() const {
- return TotalPayloadSize >= 4096;
- }
-
- public:
- void ReservePayload(size_t size) {
- Payload.reserve(size);
- }
-
- ui32 AddPayload(TRope&& rope) {
- const ui32 id = Payload.size();
- TotalPayloadSize += rope.size();
- Payload.push_back(std::move(rope));
- InvalidateCachedByteSize();
- return id;
- }
-
- const TRope& GetPayload(ui32 id) const {
- Y_ABORT_UNLESS(id < Payload.size());
- return Payload[id];
- }
-
- ui32 GetPayloadCount() const {
- return Payload.size();
- }
-
- void StripPayload() {
- Payload.clear();
- TotalPayloadSize = 0;
- }
-
- protected:
- TEventSerializationInfo CreateSerializationInfoImpl(size_t preserializedSize) const {
- TEventSerializationInfo info;
- info.IsExtendedFormat = static_cast<bool>(Payload);
-
- if (static_cast<const TEv&>(*this).AllowExternalDataChannel()) {
- if (Payload) {
- char temp[MaxNumberBytes];
-#if USE_EXTENDED_PAYLOAD_FORMAT
- size_t headerLen = 1 + SerializeNumber(Payload.size(), temp);
- for (const TRope& rope : Payload) {
- headerLen += SerializeNumber(rope.size(), temp);
- }
- info.Sections.push_back(TEventSectionInfo{0, headerLen, 0, 0, true});
- for (const TRope& rope : Payload) {
- info.Sections.push_back(TEventSectionInfo{0, rope.size(), 0, 0, false});
- }
-#else
- info.Sections.push_back(TEventSectionInfo{0, 1 + SerializeNumber(Payload.size(), temp), 0, 0, true}); // payload marker and rope count
- for (const TRope& rope : Payload) {
- const size_t ropeSize = rope.GetSize();
- info.Sections.back().Size += SerializeNumber(ropeSize, temp);
- info.Sections.push_back(TEventSectionInfo{0, ropeSize, 0, 0, false}); // data as a separate section
- }
-#endif
- }
-
- const size_t byteSize = Max<ssize_t>(0, Record.ByteSize()) + preserializedSize;
- info.Sections.push_back(TEventSectionInfo{0, byteSize, 0, 0, true}); // protobuf itself
-
-#ifndef NDEBUG
- size_t total = 0;
- for (const auto& section : info.Sections) {
- total += section.Size;
- }
- size_t serialized = CalculateSerializedSize();
- Y_ABORT_UNLESS(total == serialized, "total# %zu serialized# %zu byteSize# %zd Payload.size# %zu", total,
- serialized, byteSize, Payload.size());
-#endif
- }
-
- return info;
- }
-
- bool SerializeToArcadiaStreamImpl(TChunkSerializer* chunker, const TString& preserialized) const {
- // serialize payload first
- if (Payload) {
- void *data;
- int size = 0;
- auto append = [&](const char *p, size_t len) {
- while (len) {
- if (size) {
- const size_t numBytesToCopy = std::min<size_t>(size, len);
- memcpy(data, p, numBytesToCopy);
- data = static_cast<char*>(data) + numBytesToCopy;
- size -= numBytesToCopy;
- p += numBytesToCopy;
- len -= numBytesToCopy;
- } else if (!chunker->Next(&data, &size)) {
- return false;
- }
- }
- return true;
- };
- auto appendNumber = [&](size_t number) {
- char buf[MaxNumberBytes];
- return append(buf, SerializeNumber(number, buf));
- };
-
-#if USE_EXTENDED_PAYLOAD_FORMAT
- char marker = ExtendedPayloadMarker;
- append(&marker, 1);
- if (!appendNumber(Payload.size())) {
- return false;
- }
- for (const TRope& rope : Payload) {
- if (!appendNumber(rope.GetSize())) {
- return false;
- }
- }
- if (size) {
- chunker->BackUp(std::exchange(size, 0));
- }
- for (const TRope& rope : Payload) {
- if (!chunker->WriteRope(&rope)) {
- return false;
- }
- }
-#else
- char marker = PayloadMarker;
- append(&marker, 1);
- if (!appendNumber(Payload.size())) {
- return false;
- }
- for (const TRope& rope : Payload) {
- if (!appendNumber(rope.GetSize())) {
- return false;
- }
- if (rope) {
- if (size) {
- chunker->BackUp(std::exchange(size, 0));
- }
- if (!chunker->WriteRope(&rope)) {
- return false;
- }
- }
- }
- if (size) {
- chunker->BackUp(size);
- }
-#endif
- }
-
- if (preserialized && !chunker->WriteString(&preserialized)) {
- return false;
- }
-
- return Record.SerializeToZeroCopyStream(chunker);
- }
-
- protected:
- mutable size_t CachedByteSize = 0;
-
- static constexpr char ExtendedPayloadMarker = 0x06;
- static constexpr char PayloadMarker = 0x07;
- static constexpr size_t MaxNumberBytes = (sizeof(size_t) * CHAR_BIT + 6) / 7;
-
- static size_t SerializeNumber(size_t num, char *buffer) {
- char *begin = buffer;
- do {
- *buffer++ = (num & 0x7F) | (num >= 128 ? 0x80 : 0x00);
- num >>= 7;
- } while (num);
- return buffer - begin;
- }
-
- static size_t DeserializeNumber(const char **ptr, const char *end) {
- const char *p = *ptr;
- size_t res = 0;
- size_t offset = 0;
- for (;;) {
- if (p == end) {
- return Max<size_t>();
- }
- const char byte = *p++;
- res |= (static_cast<size_t>(byte) & 0x7F) << offset;
- offset += 7;
- if (!(byte & 0x80)) {
- break;
- }
- }
- *ptr = p;
- return res;
- }
-
- static size_t DeserializeNumber(TRope::TConstIterator& iter, ui64& size) {
- size_t res = 0;
- size_t offset = 0;
- for (;;) {
- if (!iter.Valid()) {
- return Max<size_t>();
- }
- const char byte = *iter.ContiguousData();
- iter += 1;
- --size;
- res |= (static_cast<size_t>(byte) & 0x7F) << offset;
- offset += 7;
- if (!(byte & 0x80)) {
- break;
- }
- }
- return res;
- }
- };
-
- // Protobuf record not using arena
- template <typename TRecord>
- struct TRecordHolder {
- TRecord Record;
-
- TRecordHolder() = default;
- TRecordHolder(const TRecord& rec)
- : Record(rec)
- {}
-
- TRecordHolder(TRecord&& rec)
- : Record(std::move(rec))
- {}
- };
-
- // Protobuf arena and a record allocated on it
- template <typename TRecord, size_t InitialBlockSize, size_t MaxBlockSize>
- struct TArenaRecordHolder {
- TIntrusivePtr<TProtoArenaHolder> Arena;
- TRecord& Record;
-
- // Arena depends on block size to be a multiple of 8 for correctness
- // FIXME: uncomment these asserts when code is synchronized between repositories
- // static_assert((InitialBlockSize & 7) == 0, "Misaligned InitialBlockSize");
- // static_assert((MaxBlockSize & 7) == 0, "Misaligned MaxBlockSize");
-
- static const google::protobuf::ArenaOptions GetArenaOptions() {
- google::protobuf::ArenaOptions opts;
- opts.initial_block_size = InitialBlockSize;
- opts.max_block_size = MaxBlockSize;
- return opts;
- }
-
- TArenaRecordHolder()
- : Arena(MakeIntrusive<TProtoArenaHolder>(GetArenaOptions()))
- , Record(*Arena->Allocate<TRecord>())
- {};
-
- TArenaRecordHolder(const TRecord& rec)
- : TArenaRecordHolder()
- {
- Record.CopyFrom(rec);
- }
-
- // not allowed to move from another protobuf, it's a potenial copying
- TArenaRecordHolder(TRecord&& rec) = delete;
-
- TArenaRecordHolder(TIntrusivePtr<TProtoArenaHolder> arena)
- : Arena(std::move(arena))
- , Record(*Arena->Allocate<TRecord>())
- {};
- };
-
- template <typename TEv, typename TRecord, ui32 TEventType>
- class TEventPB : public TEventPBBase<TEv, TRecord, TEventType, TRecordHolder<TRecord> > {
- typedef TEventPBBase<TEv, TRecord, TEventType, TRecordHolder<TRecord> > TPbBase;
- // NOTE: No extra fields allowed: TEventPB must be a "template typedef"
- public:
- using TPbBase::TPbBase;
- };
-
- template <typename TEv, typename TRecord, ui32 TEventType, size_t InitialBlockSize = 512, size_t MaxBlockSize = 16*1024>
- using TEventPBWithArena = TEventPBBase<TEv, TRecord, TEventType, TArenaRecordHolder<TRecord, InitialBlockSize, MaxBlockSize> >;
-
- template <typename TEv, typename TRecord, ui32 TEventType>
- class TEventShortDebugPB: public TEventPB<TEv, TRecord, TEventType> {
- public:
- using TBase = TEventPB<TEv, TRecord, TEventType>;
- TEventShortDebugPB() = default;
- explicit TEventShortDebugPB(const TRecord& rec)
- : TBase(rec)
- {
- }
- explicit TEventShortDebugPB(TRecord&& rec)
- : TBase(std::move(rec))
- {
- }
- TString ToString() const override {
- return TypeName<TEv>() + " { " + TBase::Record.ShortDebugString() + " }";
- }
- };
-
- template <typename TEv, typename TRecord, ui32 TEventType>
- class TEventPreSerializedPB: public TEventPB<TEv, TRecord, TEventType> {
- protected:
- using TBase = TEventPB<TEv, TRecord, TEventType>;
- using TSelf = TEventPreSerializedPB<TEv, TRecord, TEventType>;
- using TBase::Record;
-
- public:
- TString PreSerializedData; // already serialized PB data (using message::SerializeToString)
-
- TEventPreSerializedPB() = default;
-
- explicit TEventPreSerializedPB(const TRecord& rec)
- : TBase(rec)
- {
- }
-
- explicit TEventPreSerializedPB(TRecord&& rec)
- : TBase(std::move(rec))
- {
- }
-
- // when remote event received locally this method will merge preserialized data
- const TRecord& GetRecord() {
- TRecord& base(TBase::Record);
- if (!PreSerializedData.empty()) {
- TRecord copy;
- Y_PROTOBUF_SUPPRESS_NODISCARD copy.ParseFromString(PreSerializedData);
- copy.MergeFrom(base);
- base.Swap(&copy);
- PreSerializedData.clear();
- }
- return TBase::Record;
- }
-
- const TRecord& GetRecord() const {
- return const_cast<TSelf*>(this)->GetRecord();
- }
-
- TRecord* MutableRecord() {
- GetRecord(); // Make sure PreSerializedData is parsed
- return &(TBase::Record);
- }
-
- TString ToString() const override {
- return GetRecord().ShortDebugString();
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer* chunker) const override {
- return TBase::SerializeToArcadiaStreamImpl(chunker, PreSerializedData);
- }
-
- ui32 CalculateSerializedSize() const override {
- return PreSerializedData.size() + TBase::CalculateSerializedSize();
- }
-
- size_t GetCachedByteSize() const {
- return PreSerializedData.size() + TBase::GetCachedByteSize();
- }
-
- ui32 CalculateSerializedSizeCached() const override {
- return GetCachedByteSize();
- }
-
- TEventSerializationInfo CreateSerializationInfo() const override {
- return TBase::CreateSerializationInfoImpl(PreSerializedData.size());
- }
- };
-
- inline TActorId ActorIdFromProto(const NActorsProto::TActorId& actorId) {
- return TActorId(actorId.GetRawX1(), actorId.GetRawX2());
- }
-
- inline void ActorIdToProto(const TActorId& src, NActorsProto::TActorId* dest) {
- Y_DEBUG_ABORT_UNLESS(dest);
- dest->SetRawX1(src.RawX1());
- dest->SetRawX2(src.RawX2());
- }
-}
diff --git a/library/cpp/actors/core/event_pb_payload_ut.cpp b/library/cpp/actors/core/event_pb_payload_ut.cpp
deleted file mode 100644
index c75169db44..0000000000
--- a/library/cpp/actors/core/event_pb_payload_ut.cpp
+++ /dev/null
@@ -1,154 +0,0 @@
-#include "event_pb.h"
-#include "events.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/actors/protos/unittests.pb.h>
-
-using namespace NActors;
-
-enum {
- EvMessageWithPayload = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvArenaMessage,
- EvArenaMessageBig,
- EvMessageWithPayloadPreSerialized
-};
-
-struct TEvMessageWithPayload : TEventPB<TEvMessageWithPayload, TMessageWithPayload, EvMessageWithPayload> {
- TEvMessageWithPayload() = default;
- explicit TEvMessageWithPayload(const TMessageWithPayload& p)
- : TEventPB<TEvMessageWithPayload, TMessageWithPayload, EvMessageWithPayload>(p)
- {}
-};
-
-struct TEvMessageWithPayloadPreSerialized : TEventPreSerializedPB<TEvMessageWithPayloadPreSerialized, TMessageWithPayload, EvMessageWithPayloadPreSerialized> {
-};
-
-
-TRope MakeStringRope(const TString& message) {
- return message ? TRope(message) : TRope();
-}
-
-TString MakeString(size_t len) {
- TString res;
- for (size_t i = 0; i < len; ++i) {
- res += RandomNumber<char>();
- }
- return res;
-}
-
-Y_UNIT_TEST_SUITE(TEventProtoWithPayload) {
-
- template <class TEventFrom, class TEventTo>
- void TestSerializeDeserialize(size_t size1, size_t size2) {
- static_assert(TEventFrom::EventType == TEventTo::EventType, "Must be same event type");
-
- TEventFrom msg;
- msg.Record.SetMeta("hello, world!");
- msg.Record.AddPayloadId(msg.AddPayload(MakeStringRope(MakeString(size1))));
- msg.Record.AddPayloadId(msg.AddPayload(MakeStringRope(MakeString(size2))));
- msg.Record.AddSomeData(MakeString((size1 + size2) % 50 + 11));
-
- auto serializer = MakeHolder<TAllocChunkSerializer>();
- msg.SerializeToArcadiaStream(serializer.Get());
- auto buffers = serializer->Release(msg.CreateSerializationInfo());
- UNIT_ASSERT_VALUES_EQUAL(buffers->GetSize(), msg.CalculateSerializedSize());
- TString ser = buffers->GetString();
-
- TString chunkerRes;
- TCoroutineChunkSerializer chunker;
- chunker.SetSerializingEvent(&msg);
- while (!chunker.IsComplete()) {
- char buffer[4096];
- auto range = chunker.FeedBuf(buffer, sizeof(buffer));
- for (auto [data, size] : range) {
- chunkerRes += TString(data, size);
- }
- }
- UNIT_ASSERT_VALUES_EQUAL(chunkerRes, ser);
-
- THolder<IEventBase> ev2 = THolder(TEventTo::Load(buffers.Get()));
- TEventTo& msg2 = static_cast<TEventTo&>(*ev2);
- UNIT_ASSERT_VALUES_EQUAL(msg2.Record.GetMeta(), msg.Record.GetMeta());
- UNIT_ASSERT_EQUAL(msg2.GetPayload(msg2.Record.GetPayloadId(0)), msg.GetPayload(msg.Record.GetPayloadId(0)));
- UNIT_ASSERT_EQUAL(msg2.GetPayload(msg2.Record.GetPayloadId(1)), msg.GetPayload(msg.Record.GetPayloadId(1)));
- }
-
- template <class TEvent>
- void TestAllSizes(size_t step1 = 100, size_t step2 = 111) {
- for (size_t size1 = 0; size1 < 10000; size1 += step1) {
- for (size_t size2 = 0; size2 < 10000; size2 += step2) {
- TestSerializeDeserialize<TEvent, TEvent>(size1, size2);
- }
- }
- }
-
-#if (!defined(_tsan_enabled_))
- Y_UNIT_TEST(SerializeDeserialize) {
- TestAllSizes<TEvMessageWithPayload>();
- }
-#endif
-
-
- struct TEvArenaMessage : TEventPBWithArena<TEvArenaMessage, TMessageWithPayload, EvArenaMessage> {
- };
-
- Y_UNIT_TEST(SerializeDeserializeArena) {
- TestAllSizes<TEvArenaMessage>(500, 111);
- }
-
-
- struct TEvArenaMessageBig : TEventPBWithArena<TEvArenaMessageBig, TMessageWithPayload, EvArenaMessageBig, 4000, 32000> {
- };
-
- Y_UNIT_TEST(SerializeDeserializeArenaBig) {
- TestAllSizes<TEvArenaMessageBig>(111, 500);
- }
-
-
- // Compatible with TEvArenaMessage but doesn't use arenas
- struct TEvArenaMessageWithoutArena : TEventPB<TEvArenaMessageWithoutArena, TMessageWithPayload, EvArenaMessage> {
- };
-
- Y_UNIT_TEST(Compatibility) {
- TestSerializeDeserialize<TEvArenaMessage, TEvArenaMessageWithoutArena>(200, 14010);
- TestSerializeDeserialize<TEvArenaMessageWithoutArena, TEvArenaMessage>(2000, 4010);
- }
-
- Y_UNIT_TEST(PreSerializedCompatibility) {
- // ensure TEventPreSerializedPB and TEventPB are interchangable with no compatibility issues
- TMessageWithPayload msg;
- msg.SetMeta("hello, world!");
- msg.AddPayloadId(123);
- msg.AddPayloadId(999);
- msg.AddSomeData("abc");
- msg.AddSomeData("xyzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzzz");
-
- TEvMessageWithPayloadPreSerialized e1;
- Y_PROTOBUF_SUPPRESS_NODISCARD msg.SerializeToString(&e1.PreSerializedData);
-
- auto serializer1 = MakeHolder<TAllocChunkSerializer>();
- e1.SerializeToArcadiaStream(serializer1.Get());
- auto buffers1 = serializer1->Release(e1.CreateSerializationInfo());
- UNIT_ASSERT_VALUES_EQUAL(buffers1->GetSize(), e1.CalculateSerializedSize());
- TString ser1 = buffers1->GetString();
-
- TEvMessageWithPayload e2(msg);
- auto serializer2 = MakeHolder<TAllocChunkSerializer>();
- e2.SerializeToArcadiaStream(serializer2.Get());
- auto buffers2 = serializer2->Release(e2.CreateSerializationInfo());
- UNIT_ASSERT_VALUES_EQUAL(buffers2->GetSize(), e2.CalculateSerializedSize());
- TString ser2 = buffers2->GetString();
- UNIT_ASSERT_VALUES_EQUAL(ser1, ser2);
-
- // deserialize
- auto data = MakeIntrusive<TEventSerializedData>(ser1, TEventSerializationInfo{});
- THolder<TEvMessageWithPayloadPreSerialized> parsedEvent(static_cast<TEvMessageWithPayloadPreSerialized*>(TEvMessageWithPayloadPreSerialized::Load(data.Get())));
- UNIT_ASSERT_VALUES_EQUAL(parsedEvent->PreSerializedData, ""); // this field is empty after deserialization
- auto& record = parsedEvent->GetRecord();
- UNIT_ASSERT_VALUES_EQUAL(record.GetMeta(), msg.GetMeta());
- UNIT_ASSERT_VALUES_EQUAL(record.PayloadIdSize(), msg.PayloadIdSize());
- UNIT_ASSERT_VALUES_EQUAL(record.PayloadIdSize(), 2);
- UNIT_ASSERT_VALUES_EQUAL(record.GetPayloadId(0), msg.GetPayloadId(0));
- UNIT_ASSERT_VALUES_EQUAL(record.GetPayloadId(1), msg.GetPayloadId(1));
- }
-}
diff --git a/library/cpp/actors/core/event_pb_ut.cpp b/library/cpp/actors/core/event_pb_ut.cpp
deleted file mode 100644
index 0dfd173651..0000000000
--- a/library/cpp/actors/core/event_pb_ut.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-#include "event_pb.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/actors/protos/unittests.pb.h>
-
-Y_UNIT_TEST_SUITE(TEventSerialization) {
- struct TMockEvent: public NActors::IEventBase {
- TBigMessage* msg;
- bool
- SerializeToArcadiaStream(NActors::TChunkSerializer* chunker) const override {
- return msg->SerializeToZeroCopyStream(chunker);
- }
- bool IsSerializable() const override {
- return true;
- }
- TString ToStringHeader() const override {
- return TString();
- }
- virtual TString Serialize() const {
- return TString();
- }
- ui32 Type() const override {
- return 0;
- };
- };
-
- Y_UNIT_TEST(Coroutine) {
- TString strA(507, 'a');
- TString strB(814, 'b');
- TString strC(198, 'c');
-
- TBigMessage bm;
-
- TSimple* simple0 = bm.AddSimples();
- simple0->SetStr1(strA);
- simple0->SetStr2(strB);
- simple0->SetNumber1(213431324);
-
- TSimple* simple1 = bm.AddSimples();
- simple1->SetStr1(strC);
- simple1->SetStr2(strA);
- simple1->SetNumber1(21039313);
-
- bm.AddManyStr(strA);
- bm.AddManyStr(strC);
- bm.AddManyStr(strB);
-
- bm.SetOneMoreStr(strB);
- bm.SetYANumber(394143);
-
- TString bmSerialized;
- Y_PROTOBUF_SUPPRESS_NODISCARD bm.SerializeToString(&bmSerialized);
- UNIT_ASSERT_UNEQUAL(bmSerialized.size(), 0);
-
- NActors::TCoroutineChunkSerializer chunker;
- for (int i = 0; i < 4; ++i) {
- TMockEvent event;
- event.msg = &bm;
- chunker.SetSerializingEvent(&event);
- char buf1[87];
- TString bmChunkedSerialized;
- while (!chunker.IsComplete()) {
- auto range = chunker.FeedBuf(&buf1[0], sizeof(buf1));
- for (auto [data, size] : range) {
- bmChunkedSerialized.append(data, size);
- }
- }
- UNIT_ASSERT_EQUAL(bmSerialized, bmChunkedSerialized);
- }
- }
-}
diff --git a/library/cpp/actors/core/events.h b/library/cpp/actors/core/events.h
deleted file mode 100644
index 911a76d35e..0000000000
--- a/library/cpp/actors/core/events.h
+++ /dev/null
@@ -1,224 +0,0 @@
-#pragma once
-
-#include "event.h"
-#include "event_pb.h"
-
-#include <library/cpp/actors/protos/actors.pb.h>
-#include <util/system/unaligned_mem.h>
-
-namespace NActors {
- struct TEvents {
- enum EEventSpace {
- ES_HELLOWORLD = 0,
- ES_SYSTEM = 1,
- ES_INTERCONNECT = 2,
- ES_INTERCONNECT_MSGBUS = 3,
- ES_DNS = 4,
- ES_SOCKET_POLLER = 5,
- ES_LOGGER = 6,
- ES_MON = 7,
- ES_INTERCONNECT_TCP = 8,
- ES_PROFILER = 9,
- ES_YF = 10,
- ES_HTTP = 11,
- ES_PGWIRE = 12,
-
- ES_USERSPACE = 4096,
-
- ES_PRIVATE = (1 << 15) - 16,
- ES_MAX = (1 << 15),
- };
-
-#define EventSpaceBegin(eventSpace) (eventSpace << 16u)
-#define EventSpaceEnd(eventSpace) ((eventSpace << 16u) + (1u << 16u))
-
- struct THelloWorld {
- enum {
- Start = EventSpaceBegin(ES_HELLOWORLD),
- Ping,
- Pong,
- Blob,
- End
- };
-
- static_assert(End < EventSpaceEnd(ES_HELLOWORLD), "expect End < EventSpaceEnd(ES_HELLOWORLD)");
- };
-
- struct TEvPing: public TEventBase<TEvPing, THelloWorld::Ping> {
- DEFINE_SIMPLE_NONLOCAL_EVENT(TEvPing, "HelloWorld: Ping");
- };
-
- struct TEvPong: public TEventBase<TEvPong, THelloWorld::Pong> {
- DEFINE_SIMPLE_NONLOCAL_EVENT(TEvPong, "HelloWorld: Pong");
- };
-
- struct TEvBlob: public TEventBase<TEvBlob, THelloWorld::Blob> {
- const TString Blob;
-
- TEvBlob(const TString& blob) noexcept
- : Blob(blob)
- {
- }
-
- TString ToStringHeader() const noexcept override {
- return "THelloWorld::Blob";
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override {
- return serializer->WriteString(&Blob);
- }
-
- static IEventBase* Load(TEventSerializedData* bufs) noexcept {
- return new TEvBlob(bufs->GetString());
- }
-
- bool IsSerializable() const override {
- return true;
- }
- };
-
- struct TSystem {
- enum {
- Start = EventSpaceBegin(ES_SYSTEM),
- Bootstrap, // generic bootstrap event
- Wakeup, // generic timeout
- Subscribe, // generic subscribe to something
- Unsubscribe, // generic unsubscribe from something
- Delivered, // event delivered
- Undelivered, // event undelivered
- Poison, // request actor to shutdown
- Completed, // generic async job result event
- PoisonTaken, // generic Poison taken (reply to PoisonPill event, i.e. died completely)
- FlushLog,
- CallbackCompletion,
- CallbackException,
- Gone, // Generic notification of actor death
- TrackActor,
- UntrackActor,
- InvokeResult,
- CoroTimeout,
- InvokeQuery,
- Wilson,
- End,
-
- // Compatibility section
- PoisonPill = Poison,
- ActorDied = Gone,
- };
-
- static_assert(End < EventSpaceEnd(ES_SYSTEM), "expect End < EventSpaceEnd(ES_SYSTEM)");
- };
-
- struct TEvBootstrap: public TEventBase<TEvBootstrap, TSystem::Bootstrap> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvBootstrap, "System: TEvBootstrap")
- };
-
- struct TEvPoison : public TEventBase<TEvPoison, TSystem::Poison> {
- DEFINE_SIMPLE_NONLOCAL_EVENT(TEvPoison, "System: TEvPoison")
- };
-
- struct TEvWakeup: public TEventBase<TEvWakeup, TSystem::Wakeup> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvWakeup, "System: TEvWakeup")
-
- TEvWakeup(ui64 tag = 0) : Tag(tag) { }
-
- const ui64 Tag = 0;
- };
-
- struct TEvSubscribe: public TEventBase<TEvSubscribe, TSystem::Subscribe> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSubscribe, "System: TEvSubscribe")
- };
-
- struct TEvUnsubscribe: public TEventBase<TEvUnsubscribe, TSystem::Unsubscribe> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvUnsubscribe, "System: TEvUnsubscribe")
- };
-
- struct TEvUndelivered: public TEventBase<TEvUndelivered, TSystem::Undelivered> {
- enum EReason {
- ReasonUnknown,
- ReasonActorUnknown,
- Disconnected
- };
- const ui32 SourceType;
- const EReason Reason;
- const bool Unsure;
- const TString Data;
-
- TEvUndelivered(ui32 sourceType, ui32 reason, bool unsure = false)
- : SourceType(sourceType)
- , Reason(static_cast<EReason>(reason))
- , Unsure(unsure)
- , Data(MakeData(sourceType, reason))
- {}
-
- TString ToStringHeader() const override;
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override;
- static IEventBase* Load(TEventSerializedData* bufs);
- bool IsSerializable() const override;
-
- ui32 CalculateSerializedSize() const override { return 2 * sizeof(ui32); }
-
- static void Out(IOutputStream& o, EReason x);
-
- private:
- static TString MakeData(ui32 sourceType, ui32 reason) {
- TString s = TString::Uninitialized(sizeof(ui32) + sizeof(ui32));
- char *p = s.Detach();
- WriteUnaligned<ui32>(p + 0, sourceType);
- WriteUnaligned<ui32>(p + 4, reason);
- return s;
- }
- };
-
- struct TEvCompleted: public TEventBase<TEvCompleted, TSystem::Completed> {
- const ui32 Id;
- const ui32 Status;
- TEvCompleted(ui32 id = 0, ui32 status = 0)
- : Id(id)
- , Status(status)
- {
- }
-
- DEFINE_SIMPLE_LOCAL_EVENT(TEvCompleted, "System: TEvCompleted")
- };
-
- struct TEvPoisonTaken: public TEventBase<TEvPoisonTaken, TSystem::PoisonTaken> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvPoisonTaken, "System: TEvPoisonTaken")
- };
-
- struct TEvFlushLog: public TEventBase<TEvFlushLog, TSystem::FlushLog> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvFlushLog, "System: TEvFlushLog")
- };
-
- struct TEvCallbackException: public TEventPB<TEvCallbackException,
- NActorsProto::TCallbackException,
- TSystem::CallbackException> {
- TEvCallbackException(const TActorId& id, const TString& msg) {
- ActorIdToProto(id, Record.MutableActorId());
- Record.SetExceptionMessage(msg);
- }
- };
-
- struct TEvCallbackCompletion: public TEventPB<TEvCallbackCompletion,
- NActorsProto::TActorId,
- TSystem::CallbackCompletion> {
- TEvCallbackCompletion(const TActorId& id) {
- ActorIdToProto(id, &Record);
- }
- };
-
- struct TEvGone: public TEventBase<TEvGone, TSystem::Gone> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvGone, "System: TEvGone")
- };
-
- struct TEvInvokeResult;
-
- using TEvPoisonPill = TEvPoison; // Legacy name, deprecated
- using TEvActorDied = TEvGone;
- };
-}
-
-template <>
-inline void Out<NActors::TEvents::TEvUndelivered::EReason>(IOutputStream& o, NActors::TEvents::TEvUndelivered::EReason x) {
- NActors::TEvents::TEvUndelivered::Out(o, x);
-}
diff --git a/library/cpp/actors/core/events_undelivered.cpp b/library/cpp/actors/core/events_undelivered.cpp
deleted file mode 100644
index 7804d5c09d..0000000000
--- a/library/cpp/actors/core/events_undelivered.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-#include "events.h"
-#include "actorsystem.h"
-
-namespace NActors {
- TString TEvents::TEvUndelivered::ToStringHeader() const {
- return "TSystem::Undelivered";
- }
-
- bool TEvents::TEvUndelivered::SerializeToArcadiaStream(TChunkSerializer *serializer) const {
- Y_ABORT_UNLESS(!Unsure); // these are local-only events generated by Interconnect
- return serializer->WriteString(&Data);
- }
-
- void TEvents::TEvUndelivered::Out(IOutputStream& o, EReason x) {
- switch (x) {
- case ReasonActorUnknown:
- o << "ActorUnknown";
- break;
- case Disconnected:
- o << "Disconnected";
- break;
- default:
- o << "Undefined";
- break;
- }
- }
-
- bool TEvents::TEvUndelivered::IsSerializable() const {
- return true;
- }
-
- IEventBase* TEvents::TEvUndelivered::Load(TEventSerializedData* bufs) {
- TString str = bufs->GetString();
- Y_ABORT_UNLESS(str.size() == (sizeof(ui32) + sizeof(ui32)));
- const char* p = str.data();
- const ui64 sourceType = ReadUnaligned<ui32>(p + 0);
- const ui64 reason = ReadUnaligned<ui32>(p + 4);
- return new TEvUndelivered(sourceType, reason);
- }
-
- std::unique_ptr<IEventHandle> IEventHandle::ForwardOnNondelivery(std::unique_ptr<IEventHandle>&& ev, ui32 reason, bool unsure) {
- if (ev->Flags & FlagForwardOnNondelivery) {
- const ui32 updatedFlags = ev->Flags & ~(FlagForwardOnNondelivery | FlagSubscribeOnSession);
- const TActorId recp = ev->OnNondeliveryHolder ? ev->OnNondeliveryHolder->Recipient : TActorId();
-
- if (ev->Event)
- return std::unique_ptr<IEventHandle>(new IEventHandle(recp, ev->Sender, ev->Event.Release(), updatedFlags, ev->Cookie, &ev->Recipient, std::move(ev->TraceId)));
- else
- return std::unique_ptr<IEventHandle>(new IEventHandle(ev->Type, updatedFlags, recp, ev->Sender, ev->Buffer, ev->Cookie, &ev->Recipient, std::move(ev->TraceId)));
- }
-
- if (ev->Flags & FlagTrackDelivery) {
- const ui32 updatedFlags = ev->Flags & ~(FlagTrackDelivery | FlagSubscribeOnSession | FlagGenerateUnsureUndelivered);
- return std::unique_ptr<IEventHandle>(new IEventHandle(ev->Sender, ev->Recipient, new TEvents::TEvUndelivered(ev->Type, reason, unsure), updatedFlags,
- ev->Cookie, nullptr, std::move(ev->TraceId)));
- }
- return {};
- }
-}
diff --git a/library/cpp/actors/core/executelater.h b/library/cpp/actors/core/executelater.h
deleted file mode 100644
index 1d5b5fa5a9..0000000000
--- a/library/cpp/actors/core/executelater.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#pragma once
-
-#include "actor_bootstrapped.h"
-
-#include <utility>
-
-namespace NActors {
- template <typename TCallback>
- class TExecuteLater: public TActorBootstrapped<TExecuteLater<TCallback>> {
- public:
-
- static constexpr char ActorName[] = "AS_EXECUTE_LATER";
-
- TExecuteLater(
- TCallback&& callback,
- IActor::EActivityType activityType,
- ui32 channel = 0,
- ui64 cookie = 0,
- const TActorId& reportCompletionTo = TActorId(),
- const TActorId& reportExceptionTo = TActorId()) noexcept
- : Callback(std::move(callback))
- , Channel(channel)
- , Cookie(cookie)
- , ReportCompletionTo(reportCompletionTo)
- , ReportExceptionTo(reportExceptionTo)
- {
- this->SetActivityType(activityType);
- }
-
- void Bootstrap(const TActorContext& ctx) noexcept {
- try {
- {
- /* RAII, Callback should be destroyed right before sending
- TEvCallbackCompletion */
-
- auto local = std::move(Callback);
- using T = decltype(local);
-
- if constexpr (std::is_invocable_v<T, const TActorContext&>) {
- local(ctx);
- } else {
- local();
- }
- }
-
- if (ReportCompletionTo) {
- ctx.Send(ReportCompletionTo,
- new TEvents::TEvCallbackCompletion(ctx.SelfID),
- Channel, Cookie);
- }
- } catch (...) {
- if (ReportExceptionTo) {
- const TString msg = CurrentExceptionMessage();
- ctx.Send(ReportExceptionTo,
- new TEvents::TEvCallbackException(ctx.SelfID, msg),
- Channel, Cookie);
- }
- }
-
- this->Die(ctx);
- }
-
- private:
- TCallback Callback;
- const ui32 Channel;
- const ui64 Cookie;
- const TActorId ReportCompletionTo;
- const TActorId ReportExceptionTo;
- };
-
- template <typename T>
- IActor* CreateExecuteLaterActor(
- T&& func,
- IActor::EActivityType activityType,
- ui32 channel = 0,
- ui64 cookie = 0,
- const TActorId& reportCompletionTo = TActorId(),
- const TActorId& reportExceptionTo = TActorId()) noexcept {
- return new TExecuteLater<T>(std::forward<T>(func),
- activityType,
- channel,
- cookie,
- reportCompletionTo,
- reportExceptionTo);
- }
-}
diff --git a/library/cpp/actors/core/executor_pool.h b/library/cpp/actors/core/executor_pool.h
deleted file mode 100644
index 5498a6403e..0000000000
--- a/library/cpp/actors/core/executor_pool.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#pragma once
-
-#include "event.h"
-#include "scheduler_queue.h"
-
-namespace NActors {
- class TActorSystem;
- struct TMailboxHeader;
- struct TWorkerContext;
- struct TExecutorPoolStats;
- struct TExecutorThreadStats;
- class ISchedulerCookie;
-
- struct TCpuConsumption {
- double ConsumedUs = 0;
- double BookedUs = 0;
- ui64 NotEnoughCpuExecutions = 0;
-
- void Add(const TCpuConsumption& other) {
- ConsumedUs += other.ConsumedUs;
- BookedUs += other.BookedUs;
- NotEnoughCpuExecutions += other.NotEnoughCpuExecutions;
- }
- };
-
- class IExecutorPool : TNonCopyable {
- public:
- const ui32 PoolId;
-
- TAtomic ActorRegistrations;
- TAtomic DestroyedActors;
-
- IExecutorPool(ui32 poolId)
- : PoolId(poolId)
- , ActorRegistrations(0)
- , DestroyedActors(0)
- {
- }
-
- virtual ~IExecutorPool() {
- }
-
- // for workers
- virtual void Initialize(TWorkerContext& wctx) {
- Y_UNUSED(wctx);
- }
- virtual ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) = 0;
- virtual void ReclaimMailbox(TMailboxType::EType mailboxType, ui32 hint, TWorkerId workerId, ui64 revolvingCounter) = 0;
- virtual TMailboxHeader *ResolveMailbox(ui32 hint) = 0;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the wallclock time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- * @param workerId index of thread which will perform event dispatching
- */
- virtual void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) = 0;
-
- /**
- * Schedule one-shot event that will be send at given time point in the future.
- *
- * @param deadline the monotonic time point in future when event must be send
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- * @param workerId index of thread which will perform event dispatching
- */
- virtual void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) = 0;
-
- /**
- * Schedule one-shot event that will be send after given delay.
- *
- * @param delta the time from now to delay event sending
- * @param ev the event to send
- * @param cookie cookie that will be piggybacked with event
- * @param workerId index of thread which will perform event dispatching
- */
- virtual void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) = 0;
-
- // for actorsystem
- virtual bool Send(TAutoPtr<IEventHandle>& ev) = 0;
- virtual bool SpecificSend(TAutoPtr<IEventHandle>& ev) = 0;
- virtual void ScheduleActivation(ui32 activation) = 0;
- virtual void SpecificScheduleActivation(ui32 activation) = 0;
- virtual void ScheduleActivationEx(ui32 activation, ui64 revolvingCounter) = 0;
- virtual TActorId Register(IActor* actor, TMailboxType::EType mailboxType, ui64 revolvingCounter, const TActorId& parentId) = 0;
- virtual TActorId Register(IActor* actor, TMailboxHeader* mailbox, ui32 hint, const TActorId& parentId) = 0;
-
- // lifecycle stuff
- virtual void Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) = 0;
- virtual void Start() = 0;
- virtual void PrepareStop() = 0;
- virtual void Shutdown() = 0;
- virtual bool Cleanup() = 0;
-
- virtual void GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const {
- // TODO: make pure virtual and override everywhere
- Y_UNUSED(poolStats);
- Y_UNUSED(statsCopy);
- }
-
- virtual TString GetName() const {
- return TString();
- }
-
- virtual ui32 GetThreads() const {
- return 1;
- }
-
- virtual i16 GetPriority() const {
- return 0;
- }
-
- // generic
- virtual TAffinity* Affinity() const = 0;
-
- virtual void SetRealTimeMode() const {}
-
- virtual i16 GetThreadCount() const {
- return 1;
- }
-
- virtual void SetThreadCount(i16 threads) {
- Y_UNUSED(threads);
- }
-
- virtual void SetSpinThresholdCycles(ui32 cycles) {
- Y_UNUSED(cycles);
- }
-
- virtual i16 GetBlockingThreadCount() const {
- return 0;
- }
-
- virtual i16 GetDefaultThreadCount() const {
- return 1;
- }
-
- virtual i16 GetMinThreadCount() const {
- return 1;
- }
-
- virtual i16 GetMaxThreadCount() const {
- return 1;
- }
-
- virtual TCpuConsumption GetThreadCpuConsumption(i16 threadIdx) {
- Y_UNUSED(threadIdx);
- return TCpuConsumption{0.0, 0.0};
- }
- };
-
-}
diff --git a/library/cpp/actors/core/executor_pool_base.cpp b/library/cpp/actors/core/executor_pool_base.cpp
deleted file mode 100644
index 1c9a536b9b..0000000000
--- a/library/cpp/actors/core/executor_pool_base.cpp
+++ /dev/null
@@ -1,271 +0,0 @@
-#include "actorsystem.h"
-#include "actor.h"
-#include "executor_pool_base.h"
-#include "executor_pool_basic_feature_flags.h"
-#include "executor_thread.h"
-#include "mailbox.h"
-#include "probes.h"
-#include <library/cpp/actors/util/datetime.h>
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- void DoActorInit(TActorSystem* sys, IActor* actor, const TActorId& self, const TActorId& owner) {
- actor->SelfActorId = self;
- actor->DoActorInit();
- actor->Registered(sys, owner);
- }
-
- TExecutorPoolBaseMailboxed::TExecutorPoolBaseMailboxed(ui32 poolId)
- : IExecutorPool(poolId)
- , ActorSystem(nullptr)
- , MailboxTable(new TMailboxTable)
- {}
-
- TExecutorPoolBaseMailboxed::~TExecutorPoolBaseMailboxed() {
- MailboxTable.Destroy();
- }
-
-#if defined(ACTORSLIB_COLLECT_EXEC_STATS)
- void TExecutorPoolBaseMailboxed::RecalculateStuckActors(TExecutorThreadStats& stats) const {
- if (!ActorSystem || !ActorSystem->MonitorStuckActors()) {
- return;
- }
-
- const TMonotonic now = ActorSystem->Monotonic();
-
- for (auto& u : stats.UsageByActivity) {
- u.fill(0);
- }
-
- auto accountUsage = [&](ui32 activityType, double usage) {
- Y_ABORT_UNLESS(0 <= usage);
- Y_ABORT_UNLESS(usage <= 1);
- int bin = Min<int>(9, usage * 10);
- ++stats.UsageByActivity[activityType][bin];
- };
-
- std::fill(stats.StuckActorsByActivity.begin(), stats.StuckActorsByActivity.end(), 0);
-
- with_lock (StuckObserverMutex) {
- for (size_t i = 0; i < Actors.size(); ++i) {
- IActor *actor = Actors[i];
- Y_ABORT_UNLESS(actor->StuckIndex == i);
- const TDuration delta = now - actor->LastReceiveTimestamp;
- if (delta > TDuration::Seconds(30)) {
- ++stats.StuckActorsByActivity[actor->GetActivityType()];
- }
- accountUsage(actor->GetActivityType(), actor->GetUsage(GetCycleCountFast()));
- }
- for (const auto& [activityType, usage] : DeadActorsUsage) {
- accountUsage(activityType, usage);
- }
- DeadActorsUsage.clear();
- }
- }
-#endif
-
- TExecutorPoolBase::TExecutorPoolBase(ui32 poolId, ui32 threads, TAffinity* affinity)
- : TExecutorPoolBaseMailboxed(poolId)
- , PoolThreads(threads)
- , ThreadsAffinity(affinity)
- {}
-
- TExecutorPoolBase::~TExecutorPoolBase() {
- while (Activations.Pop(0))
- ;
- }
-
- void TExecutorPoolBaseMailboxed::ReclaimMailbox(TMailboxType::EType mailboxType, ui32 hint, TWorkerId workerId, ui64 revolvingWriteCounter) {
- Y_UNUSED(workerId);
- MailboxTable->ReclaimMailbox(mailboxType, hint, revolvingWriteCounter);
- }
-
- TMailboxHeader *TExecutorPoolBaseMailboxed::ResolveMailbox(ui32 hint) {
- return MailboxTable->Get(hint);
- }
-
- ui64 TExecutorPoolBaseMailboxed::AllocateID() {
- return ActorSystem->AllocateIDSpace(1);
- }
-
- bool TExecutorPoolBaseMailboxed::Send(TAutoPtr<IEventHandle>& ev) {
- Y_DEBUG_ABORT_UNLESS(ev->GetRecipientRewrite().PoolID() == PoolId);
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- RelaxedStore(&ev->SendTime, (::NHPTimer::STime)GetCycleCountFast());
-#endif
- if (TlsThreadContext) {
- TlsThreadContext->IsCurrentRecipientAService = ev->Recipient.IsService();
- }
- return MailboxTable->SendTo(ev, this);
- }
-
- bool TExecutorPoolBaseMailboxed::SpecificSend(TAutoPtr<IEventHandle>& ev) {
- Y_DEBUG_ABORT_UNLESS(ev->GetRecipientRewrite().PoolID() == PoolId);
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- RelaxedStore(&ev->SendTime, (::NHPTimer::STime)GetCycleCountFast());
-#endif
- if (TlsThreadContext) {
- TlsThreadContext->IsCurrentRecipientAService = ev->Recipient.IsService();
- }
- return MailboxTable->SpecificSendTo(ev, this);
- }
-
- void TExecutorPoolBase::ScheduleActivation(ui32 activation) {
- ScheduleActivationEx(activation, AtomicIncrement(ActivationsRevolvingCounter));
- }
-
- Y_FORCE_INLINE bool IsAllowedToCapture(IExecutorPool *self) {
- if (TlsThreadContext->Pool != self || TlsThreadContext->CapturedType == ESendingType::Tail) {
- return false;
- }
- return TlsThreadContext->SendingType != ESendingType::Common;
- }
-
- Y_FORCE_INLINE bool IsTailSend(IExecutorPool *self) {
- return TlsThreadContext->Pool == self && TlsThreadContext->SendingType == ESendingType::Tail && TlsThreadContext->CapturedType != ESendingType::Tail;
- }
-
- void TExecutorPoolBase::SpecificScheduleActivation(ui32 activation) {
- if (NFeatures::IsCommon() && IsAllowedToCapture(this) || IsTailSend(this)) {
- std::swap(TlsThreadContext->CapturedActivation, activation);
- TlsThreadContext->CapturedType = TlsThreadContext->SendingType;
- }
- if (activation) {
- ScheduleActivationEx(activation, AtomicIncrement(ActivationsRevolvingCounter));
- }
- }
-
- TActorId TExecutorPoolBaseMailboxed::Register(IActor* actor, TMailboxType::EType mailboxType, ui64 revolvingWriteCounter, const TActorId& parentId) {
- NHPTimer::STime hpstart = GetCycleCountFast();
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- ui32 at = actor->GetActivityType();
- Y_DEBUG_ABORT_UNLESS(at < Stats.ActorsAliveByActivity.size());
- if (at >= Stats.MaxActivityType()) {
- at = TActorTypeOperator::GetActorActivityIncorrectIndex();
- Y_ABORT_UNLESS(at < Stats.ActorsAliveByActivity.size());
- }
- AtomicIncrement(Stats.ActorsAliveByActivity[at]);
-#endif
- AtomicIncrement(ActorRegistrations);
-
- // first step - find good enough mailbox
- ui32 hint = 0;
- TMailboxHeader* mailbox = nullptr;
-
- if (revolvingWriteCounter == 0)
- revolvingWriteCounter = AtomicIncrement(RegisterRevolvingCounter);
-
- {
- ui32 hintBackoff = 0;
-
- while (hint == 0) {
- hint = MailboxTable->AllocateMailbox(mailboxType, ++revolvingWriteCounter);
- mailbox = MailboxTable->Get(hint);
-
- if (!mailbox->LockFromFree()) {
- MailboxTable->ReclaimMailbox(mailboxType, hintBackoff, ++revolvingWriteCounter);
- hintBackoff = hint;
- hint = 0;
- }
- }
-
- MailboxTable->ReclaimMailbox(mailboxType, hintBackoff, ++revolvingWriteCounter);
- }
-
- const ui64 localActorId = AllocateID();
-
- // ok, got mailbox
- mailbox->AttachActor(localActorId, actor);
-
- // do init
- const TActorId actorId(ActorSystem->NodeId, PoolId, localActorId, hint);
- DoActorInit(ActorSystem, actor, actorId, parentId);
-
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- if (ActorSystem->MonitorStuckActors()) {
- with_lock (StuckObserverMutex) {
- Y_ABORT_UNLESS(actor->StuckIndex == Max<size_t>());
- actor->StuckIndex = Actors.size();
- Actors.push_back(actor);
- }
- }
-#endif
-
- // Once we unlock the mailbox the actor starts running and we cannot use the pointer any more
- actor = nullptr;
-
- switch (mailboxType) {
- case TMailboxType::Simple:
- UnlockFromExecution((TMailboxTable::TSimpleMailbox*)mailbox, this, false, hint, MaxWorkers, ++revolvingWriteCounter);
- break;
- case TMailboxType::Revolving:
- UnlockFromExecution((TMailboxTable::TRevolvingMailbox*)mailbox, this, false, hint, MaxWorkers, ++revolvingWriteCounter);
- break;
- case TMailboxType::HTSwap:
- UnlockFromExecution((TMailboxTable::THTSwapMailbox*)mailbox, this, false, hint, MaxWorkers, ++revolvingWriteCounter);
- break;
- case TMailboxType::ReadAsFilled:
- UnlockFromExecution((TMailboxTable::TReadAsFilledMailbox*)mailbox, this, false, hint, MaxWorkers, ++revolvingWriteCounter);
- break;
- case TMailboxType::TinyReadAsFilled:
- UnlockFromExecution((TMailboxTable::TTinyReadAsFilledMailbox*)mailbox, this, false, hint, MaxWorkers, ++revolvingWriteCounter);
- break;
- default:
- Y_ABORT();
- }
-
- NHPTimer::STime elapsed = GetCycleCountFast() - hpstart;
- if (elapsed > 1000000) {
- LWPROBE(SlowRegisterNew, PoolId, NHPTimer::GetSeconds(elapsed) * 1000.0);
- }
-
- return actorId;
- }
-
- TActorId TExecutorPoolBaseMailboxed::Register(IActor* actor, TMailboxHeader* mailbox, ui32 hint, const TActorId& parentId) {
- NHPTimer::STime hpstart = GetCycleCountFast();
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- ui32 at = actor->GetActivityType();
- if (at >= Stats.MaxActivityType())
- at = 0;
- AtomicIncrement(Stats.ActorsAliveByActivity[at]);
-#endif
- AtomicIncrement(ActorRegistrations);
-
- const ui64 localActorId = AllocateID();
- mailbox->AttachActor(localActorId, actor);
-
- const TActorId actorId(ActorSystem->NodeId, PoolId, localActorId, hint);
- DoActorInit(ActorSystem, actor, actorId, parentId);
-
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- if (ActorSystem->MonitorStuckActors()) {
- with_lock (StuckObserverMutex) {
- Y_ABORT_UNLESS(actor->StuckIndex == Max<size_t>());
- actor->StuckIndex = Actors.size();
- Actors.push_back(actor);
- }
- }
-#endif
-
- NHPTimer::STime elapsed = GetCycleCountFast() - hpstart;
- if (elapsed > 1000000) {
- LWPROBE(SlowRegisterAdd, PoolId, NHPTimer::GetSeconds(elapsed) * 1000.0);
- }
-
- return actorId;
- }
-
- TAffinity* TExecutorPoolBase::Affinity() const {
- return ThreadsAffinity.Get();
- }
-
- bool TExecutorPoolBaseMailboxed::Cleanup() {
- return MailboxTable->Cleanup();
- }
-
- ui32 TExecutorPoolBase::GetThreads() const {
- return PoolThreads;
- }
-}
diff --git a/library/cpp/actors/core/executor_pool_base.h b/library/cpp/actors/core/executor_pool_base.h
deleted file mode 100644
index 6bfabb527f..0000000000
--- a/library/cpp/actors/core/executor_pool_base.h
+++ /dev/null
@@ -1,62 +0,0 @@
-#pragma once
-
-#include "executor_pool.h"
-#include "executor_thread.h"
-#include "mon_stats.h"
-#include "scheduler_queue.h"
-#include <library/cpp/actors/util/affinity.h>
-#include <library/cpp/actors/util/unordered_cache.h>
-#include <library/cpp/actors/util/threadparkpad.h>
-
-namespace NActors {
- class TActorSystem;
-
- class TExecutorPoolBaseMailboxed: public IExecutorPool {
- protected:
- TActorSystem* ActorSystem;
- THolder<TMailboxTable> MailboxTable;
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- // Need to have per pool object to collect stats like actor registrations (because
- // registrations might be done in threads from other pools)
- TExecutorThreadStats Stats;
-
- // Stuck actor monitoring
- TMutex StuckObserverMutex;
- std::vector<IActor*> Actors;
- mutable std::vector<std::tuple<ui32, double>> DeadActorsUsage;
- friend class TExecutorThread;
- void RecalculateStuckActors(TExecutorThreadStats& stats) const;
-#endif
- TAtomic RegisterRevolvingCounter = 0;
- ui64 AllocateID();
- public:
- explicit TExecutorPoolBaseMailboxed(ui32 poolId);
- ~TExecutorPoolBaseMailboxed();
- void ReclaimMailbox(TMailboxType::EType mailboxType, ui32 hint, TWorkerId workerId, ui64 revolvingWriteCounter) override;
- TMailboxHeader *ResolveMailbox(ui32 hint) override;
- bool Send(TAutoPtr<IEventHandle>& ev) override;
- bool SpecificSend(TAutoPtr<IEventHandle>& ev) override;
- TActorId Register(IActor* actor, TMailboxType::EType mailboxType, ui64 revolvingWriteCounter, const TActorId& parentId) override;
- TActorId Register(IActor* actor, TMailboxHeader* mailbox, ui32 hint, const TActorId& parentId) override;
- bool Cleanup() override;
- };
-
- class TExecutorPoolBase: public TExecutorPoolBaseMailboxed {
- protected:
- const i16 PoolThreads;
- TIntrusivePtr<TAffinity> ThreadsAffinity;
- TAtomic Semaphore = 0;
- TUnorderedCache<ui32, 512, 4> Activations;
- TAtomic ActivationsRevolvingCounter = 0;
- volatile bool StopFlag = false;
- public:
- TExecutorPoolBase(ui32 poolId, ui32 threads, TAffinity* affinity);
- ~TExecutorPoolBase();
- void ScheduleActivation(ui32 activation) override;
- void SpecificScheduleActivation(ui32 activation) override;
- TAffinity* Affinity() const override;
- ui32 GetThreads() const override;
- };
-
- void DoActorInit(TActorSystem*, IActor*, const TActorId&, const TActorId&);
-}
diff --git a/library/cpp/actors/core/executor_pool_basic.cpp b/library/cpp/actors/core/executor_pool_basic.cpp
deleted file mode 100644
index 4a9019e26d..0000000000
--- a/library/cpp/actors/core/executor_pool_basic.cpp
+++ /dev/null
@@ -1,749 +0,0 @@
-#include "executor_pool_basic.h"
-#include "executor_pool_basic_feature_flags.h"
-#include "actor.h"
-#include "probes.h"
-#include "mailbox.h"
-#include <library/cpp/actors/util/affinity.h>
-#include <library/cpp/actors/util/datetime.h>
-
-#ifdef _linux_
-#include <pthread.h>
-#endif
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
-
- const double TWaitingStatsConstants::HistogramResolutionUs = MaxSpinThersholdUs / BucketCount;
- const ui64 TWaitingStatsConstants::HistogramResolution = NHPTimer::GetCyclesPerSecond() * 0.000001 * HistogramResolutionUs;
-
- constexpr TDuration TBasicExecutorPool::DEFAULT_TIME_PER_MAILBOX;
-
- TBasicExecutorPool::TBasicExecutorPool(
- ui32 poolId,
- ui32 threads,
- ui64 spinThreshold,
- const TString& poolName,
- IHarmonizer *harmonizer,
- TAffinity* affinity,
- TDuration timePerMailbox,
- ui32 eventsPerMailbox,
- int realtimePriority,
- ui32 maxActivityType,
- i16 minThreadCount,
- i16 maxThreadCount,
- i16 defaultThreadCount,
- i16 priority)
- : TExecutorPoolBase(poolId, threads, affinity)
- , DefaultSpinThresholdCycles(spinThreshold * NHPTimer::GetCyclesPerSecond() * 0.000001) // convert microseconds to cycles
- , SpinThresholdCycles(DefaultSpinThresholdCycles)
- , SpinThresholdCyclesPerThread(new NThreading::TPadded<std::atomic<ui64>>[threads])
- , Threads(new NThreading::TPadded<TThreadCtx>[threads])
- , WaitingStats(new TWaitingStats<ui64>[threads])
- , PoolName(poolName)
- , TimePerMailbox(timePerMailbox)
- , EventsPerMailbox(eventsPerMailbox)
- , RealtimePriority(realtimePriority)
- , ThreadUtilization(0)
- , MaxUtilizationCounter(0)
- , MaxUtilizationAccumulator(0)
- , WrongWakenedThreadCount(0)
- , ThreadCount(threads)
- , MinThreadCount(minThreadCount)
- , MaxThreadCount(maxThreadCount)
- , DefaultThreadCount(defaultThreadCount)
- , Harmonizer(harmonizer)
- , Priority(priority)
- {
- if constexpr (NFeatures::IsLocalQueues()) {
- LocalQueues.Reset(new NThreading::TPadded<std::queue<ui32>>[threads]);
- if constexpr (NFeatures::TLocalQueuesFeatureFlags::FIXED_LOCAL_QUEUE_SIZE) {
- LocalQueueSize = *NFeatures::TLocalQueuesFeatureFlags::FIXED_LOCAL_QUEUE_SIZE;
- } else {
- LocalQueueSize = NFeatures::TLocalQueuesFeatureFlags::MIN_LOCAL_QUEUE_SIZE;
- }
- }
- if constexpr (NFeatures::TSpinFeatureFlags::CalcPerThread) {
- for (ui32 idx = 0; idx < threads; ++idx) {
- SpinThresholdCyclesPerThread[idx].store(0);
- }
- }
- if constexpr (NFeatures::TSpinFeatureFlags::UsePseudoMovingWindow) {
- MovingWaitingStats.Reset(new TWaitingStats<double>[threads]);
- }
-
- Y_UNUSED(maxActivityType);
- i16 limit = Min(threads, (ui32)Max<i16>());
- if (DefaultThreadCount) {
- DefaultThreadCount = Min(DefaultThreadCount, limit);
- } else {
- DefaultThreadCount = limit;
- }
-
- MaxThreadCount = Min(Max(MaxThreadCount, DefaultThreadCount), limit);
-
- if (MinThreadCount) {
- MinThreadCount = Max((i16)1, Min(MinThreadCount, DefaultThreadCount));
- } else {
- MinThreadCount = DefaultThreadCount;
- }
- ThreadCount = MaxThreadCount;
- auto semaphore = TSemaphore();
- semaphore.CurrentThreadCount = ThreadCount;
- Semaphore = semaphore.ConverToI64();
- }
-
- TBasicExecutorPool::TBasicExecutorPool(const TBasicExecutorPoolConfig& cfg, IHarmonizer *harmonizer)
- : TBasicExecutorPool(
- cfg.PoolId,
- cfg.Threads,
- cfg.SpinThreshold,
- cfg.PoolName,
- harmonizer,
- new TAffinity(cfg.Affinity),
- cfg.TimePerMailbox,
- cfg.EventsPerMailbox,
- cfg.RealtimePriority,
- 0,
- cfg.MinThreadCount,
- cfg.MaxThreadCount,
- cfg.DefaultThreadCount,
- cfg.Priority
- )
- {
- SetSharedExecutorsCount(cfg.SharedExecutorsCount);
- SoftProcessingDurationTs = cfg.SoftProcessingDurationTs;
- ActorSystemProfile = cfg.ActorSystemProfile;
- }
-
- TBasicExecutorPool::~TBasicExecutorPool() {
- Threads.Destroy();
- }
-
- bool TBasicExecutorPool::GoToSleep(TThreadCtx& threadCtx, TTimers &timers) {
- do {
- timers.HPNow = GetCycleCountFast();
- timers.Elapsed += timers.HPNow - timers.HPStart;
- if (threadCtx.WaitingPad.Park()) // interrupted
- return true;
- timers.HPStart = GetCycleCountFast();
- timers.Parked += timers.HPStart - timers.HPNow;
- } while (AtomicLoad(&threadCtx.WaitingFlag) == TThreadCtx::WS_BLOCKED && !RelaxedLoad(&StopFlag));
- return false;
- }
-
- ui32 TBasicExecutorPool::GoToSpin(TThreadCtx& threadCtx, i64 start, i64 &end) {
- ui32 spinPauseCount = 0;
- i64 spinThresholdCycles = 0;
- if constexpr (NFeatures::TSpinFeatureFlags::CalcPerThread) {
- spinThresholdCycles = SpinThresholdCyclesPerThread[TlsThreadContext->WorkerId].load();
- } else {
- spinThresholdCycles = SpinThresholdCycles.load();
- }
- do {
- end = GetCycleCountFast();
- if (end >= (start + spinThresholdCycles) || AtomicLoad(&threadCtx.WaitingFlag) != TThreadCtx::WS_ACTIVE) {
- return spinPauseCount;
- }
-
- SpinLockPause();
- spinPauseCount++;
- } while (!RelaxedLoad(&StopFlag));
-
- return spinPauseCount;
- }
-
- bool TBasicExecutorPool::GoToWaiting(TThreadCtx& threadCtx, TTimers &timers, bool needToBlock) {
-#if defined ACTORSLIB_COLLECT_EXEC_STATS
- if (AtomicGetAndIncrement(ThreadUtilization) == 0) {
- // Initially counter contains -t0, the pool start timestamp
- // When the first thread goes to sleep we add t1, so the counter
- // becomes t1-t0 >= 0, or the duration of max utilization so far.
- // If the counter was negative and becomes positive, that means
- // counter just turned into a duration and we should store that
- // duration. Otherwise another thread raced with us and
- // subtracted some other timestamp t2.
- const i64 t = GetCycleCountFast();
- const i64 x = AtomicGetAndAdd(MaxUtilizationCounter, t);
- if (x < 0 && x + t > 0)
- AtomicStore(&MaxUtilizationAccumulator, x + t);
- }
-#endif
-
- i64 startWaiting = GetCycleCountFast();
- i64 endSpinning = 0;
- TAtomic state = AtomicLoad(&threadCtx.WaitingFlag);
- bool wasSleeping = false;
- Y_ABORT_UNLESS(state == TThreadCtx::WS_NONE, "WaitingFlag# %d", int(state));
-
- if (SpinThresholdCycles > 0 && !needToBlock) {
- // spin configured period
- AtomicSet(threadCtx.WaitingFlag, TThreadCtx::WS_ACTIVE);
- ui32 spinPauseCount = GoToSpin(threadCtx, startWaiting, endSpinning);
- SpinningTimeUs += endSpinning - startWaiting;
- // then - sleep
- if (AtomicLoad(&threadCtx.WaitingFlag) == TThreadCtx::WS_ACTIVE) {
- if (AtomicCas(&threadCtx.WaitingFlag, TThreadCtx::WS_BLOCKED, TThreadCtx::WS_ACTIVE)) {
- if (NFeatures::TCommonFeatureFlags::ProbeSpinCycles) {
- LWPROBE(SpinCycles, PoolId, PoolName, spinPauseCount, true);
- }
-
- wasSleeping = true;
- if (GoToSleep(threadCtx, timers)) { // interrupted
- return true;
- }
- AllThreadsSleep.store(false);
- }
- }
- if (NFeatures::TCommonFeatureFlags::ProbeSpinCycles && !wasSleeping) {
- LWPROBE(SpinCycles, PoolId, PoolName, spinPauseCount, false);
- }
- } else {
- AtomicSet(threadCtx.WaitingFlag, TThreadCtx::WS_BLOCKED);
- wasSleeping = true;
- if (GoToSleep(threadCtx, timers)) { // interrupted
- return true;
- }
- AllThreadsSleep.store(false);
- }
-
- i64 needTimeTs = threadCtx.StartWakingTs.exchange(0);
- if (wasSleeping && needTimeTs) {
- ui64 waitingDuration = std::max<i64>(0, needTimeTs - startWaiting);
- ui64 awakingDuration = std::max<i64>(0, GetCycleCountFast() - needTimeTs);
- WaitingStats[TlsThreadContext->WorkerId].AddAwakening(waitingDuration, awakingDuration);
- } else {
- ui64 waitingDuration = std::max<i64>(0, endSpinning - startWaiting);
- if (wasSleeping) {
- WaitingStats[TlsThreadContext->WorkerId].AddFastAwakening(waitingDuration);
- } else {
- WaitingStats[TlsThreadContext->WorkerId].Add(waitingDuration);
- }
- }
-
- Y_DEBUG_ABORT_UNLESS(AtomicLoad(&StopFlag) || AtomicLoad(&threadCtx.WaitingFlag) == TThreadCtx::WS_NONE);
-
-#if defined ACTORSLIB_COLLECT_EXEC_STATS
- if (AtomicDecrement(ThreadUtilization) == 0) {
- // When we started sleeping counter contained t1-t0, or the
- // last duration of max utilization. Now we subtract t2 >= t1,
- // which turns counter negative again, and the next sleep cycle
- // at timestamp t3 would be adding some new duration t3-t2.
- // If the counter was positive and becomes negative that means
- // there are no current races with other threads and we should
- // store the last positive duration we observed. Multiple
- // threads may be adding and subtracting values in potentially
- // arbitrary order, which would cause counter to oscillate
- // around zero. When it crosses zero is a good indication of a
- // correct value.
- const i64 t = GetCycleCountFast();
- const i64 x = AtomicGetAndAdd(MaxUtilizationCounter, -t);
- if (x > 0 && x - t < 0)
- AtomicStore(&MaxUtilizationAccumulator, x);
- }
-#endif
- return false;
- }
-
- void TBasicExecutorPool::AskToGoToSleep(bool *needToWait, bool *needToBlock) {
- TAtomic x = AtomicGet(Semaphore);
- do {
- i64 oldX = x;
- TSemaphore semaphore = TSemaphore::GetSemaphore(x);;
- if (semaphore.CurrentSleepThreadCount < 0) {
- semaphore.CurrentSleepThreadCount++;
- x = AtomicGetAndCas(&Semaphore, semaphore.ConverToI64(), x);
- if (x == oldX) {
- *needToWait = true;
- *needToBlock = true;
- return;
- }
- continue;
- }
-
- if (semaphore.OldSemaphore == 0) {
- semaphore.CurrentSleepThreadCount++;
- if (semaphore.CurrentSleepThreadCount == AtomicLoad(&ThreadCount)) {
- AllThreadsSleep.store(true);
- }
- x = AtomicGetAndCas(&Semaphore, semaphore.ConverToI64(), x);
- if (x == oldX) {
- *needToWait = true;
- *needToBlock = false;
- return;
- }
- continue;
- }
-
- *needToWait = false;
- *needToBlock = false;
- return;
- } while (true);
- }
-
- ui32 TBasicExecutorPool::GetReadyActivationCommon(TWorkerContext& wctx, ui64 revolvingCounter) {
- TWorkerId workerId = wctx.WorkerId;
- Y_DEBUG_ABORT_UNLESS(workerId < PoolThreads);
-
- TTimers timers;
-
- if (Harmonizer) {
- LWPROBE(TryToHarmonize, PoolId, PoolName);
- Harmonizer->Harmonize(timers.HPStart);
- }
-
- if (workerId >= 0) {
- AtomicSet(Threads[workerId].WaitingFlag, TThreadCtx::WS_NONE);
- }
-
- TAtomic x = AtomicGet(Semaphore);
- TSemaphore semaphore = TSemaphore::GetSemaphore(x);
- while (!RelaxedLoad(&StopFlag)) {
- if (!semaphore.OldSemaphore || semaphore.CurrentSleepThreadCount < 0) {
- if (workerId < 0 || !wctx.IsNeededToWaitNextActivation) {
- timers.HPNow = GetCycleCountFast();
- wctx.AddElapsedCycles(ActorSystemIndex, timers.HPNow - timers.HPStart);
- return 0;
- }
-
- bool needToWait = false;
- bool needToBlock = false;
- AskToGoToSleep(&needToWait, &needToBlock);
- if (needToWait) {
- if (GoToWaiting(Threads[workerId], timers, needToBlock)) { // interrupted
- return 0;
- }
- }
- } else {
- if (const ui32 activation = Activations.Pop(++revolvingCounter)) {
- if (workerId >= 0) {
- AtomicSet(Threads[workerId].WaitingFlag, TThreadCtx::WS_RUNNING);
- }
- AtomicDecrement(Semaphore);
- timers.HPNow = GetCycleCountFast();
- timers.Elapsed += timers.HPNow - timers.HPStart;
- wctx.AddElapsedCycles(ActorSystemIndex, timers.Elapsed);
- if (timers.Parked > 0) {
- wctx.AddParkedCycles(timers.Parked);
- }
-
- return activation;
- }
- semaphore.CurrentSleepThreadCount++;
- }
-
- SpinLockPause();
- x = AtomicGet(Semaphore);
- semaphore = TSemaphore::GetSemaphore(x);
- }
-
- return 0;
- }
-
- ui32 TBasicExecutorPool::GetReadyActivationLocalQueue(TWorkerContext& wctx, ui64 revolvingCounter) {
- TWorkerId workerId = wctx.WorkerId;
- Y_DEBUG_ABORT_UNLESS(workerId < static_cast<i32>(PoolThreads));
-
- if (workerId >= 0 && LocalQueues[workerId].size()) {
- ui32 activation = LocalQueues[workerId].front();
- LocalQueues[workerId].pop();
- return activation;
- } else {
- TlsThreadContext->WriteTurn = 0;
- TlsThreadContext->LocalQueueSize = LocalQueueSize.load(std::memory_order_relaxed);
- }
- return GetReadyActivationCommon(wctx, revolvingCounter);
- }
-
- ui32 TBasicExecutorPool::GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) {
- if constexpr (NFeatures::IsLocalQueues()) {
- if (SharedExecutorsCount) {
- return GetReadyActivationCommon(wctx, revolvingCounter);
- }
- return GetReadyActivationLocalQueue(wctx, revolvingCounter);
- } else {
- return GetReadyActivationCommon(wctx, revolvingCounter);
- }
- return 0;
- }
-
- inline void TBasicExecutorPool::WakeUpLoop(i16 currentThreadCount) {
- if (AllThreadsSleep) {
- TThreadCtx& hotThreadCtx = Threads[0];
- if (AtomicCas(&hotThreadCtx.WaitingFlag, TThreadCtx::WS_NONE, TThreadCtx::WS_ACTIVE)) {
- return;
- }
-
- TThreadCtx& coldThreadCtx = Threads[AtomicLoad(&ThreadCount) - 1];
- if (AtomicCas(&coldThreadCtx.WaitingFlag, TThreadCtx::WS_NONE, TThreadCtx::WS_BLOCKED)) {
- if (TlsThreadContext && TlsThreadContext->WaitingStats) {
- ui64 beforeUnpark = GetCycleCountFast();
- coldThreadCtx.StartWakingTs = beforeUnpark;
- coldThreadCtx.WaitingPad.Unpark();
- TlsThreadContext->WaitingStats->AddWakingUp(GetCycleCountFast() - beforeUnpark);
- } else {
- coldThreadCtx.WaitingPad.Unpark();
- }
- return;
- }
- }
- for (i16 i = 0;;) {
- TThreadCtx& threadCtx = Threads[i];
- TThreadCtx::EWaitState state = static_cast<TThreadCtx::EWaitState>(AtomicLoad(&threadCtx.WaitingFlag));
- switch (state) {
- case TThreadCtx::WS_NONE:
- case TThreadCtx::WS_RUNNING:
- if (++i >= MaxThreadCount - SharedExecutorsCount) {
- i = 0;
- }
- break;
- case TThreadCtx::WS_ACTIVE:
- case TThreadCtx::WS_BLOCKED:
- if (AtomicCas(&threadCtx.WaitingFlag, TThreadCtx::WS_NONE, state)) {
- if (state == TThreadCtx::WS_BLOCKED) {
- ui64 beforeUnpark = GetCycleCountFast();
- threadCtx.StartWakingTs = beforeUnpark;
- if (TlsThreadContext && TlsThreadContext->WaitingStats) {
- threadCtx.WaitingPad.Unpark();
- TlsThreadContext->WaitingStats->AddWakingUp(GetCycleCountFast() - beforeUnpark);
- } else {
- threadCtx.WaitingPad.Unpark();
- }
- }
- if (i >= currentThreadCount) {
- AtomicIncrement(WrongWakenedThreadCount);
- }
- return;
- }
- break;
- default:
- Y_ABORT();
- }
- }
- }
-
- void TBasicExecutorPool::ScheduleActivationExCommon(ui32 activation, ui64 revolvingCounter, TAtomic x) {
- TSemaphore semaphore = TSemaphore::GetSemaphore(x);
-
- Activations.Push(activation, revolvingCounter);
- bool needToWakeUp = false;
-
- do {
- needToWakeUp = semaphore.CurrentSleepThreadCount > SharedExecutorsCount;
- i64 oldX = semaphore.ConverToI64();
- semaphore.OldSemaphore++;
- if (needToWakeUp) {
- semaphore.CurrentSleepThreadCount--;
- }
- x = AtomicGetAndCas(&Semaphore, semaphore.ConverToI64(), oldX);
- if (x == oldX) {
- break;
- }
- semaphore = TSemaphore::GetSemaphore(x);
- } while (true);
-
- if (needToWakeUp) { // we must find someone to wake-up
- WakeUpLoop(semaphore.CurrentThreadCount);
- }
- }
-
- void TBasicExecutorPool::ScheduleActivationExLocalQueue(ui32 activation, ui64 revolvingWriteCounter) {
- if (TlsThreadContext && TlsThreadContext->Pool == this && TlsThreadContext->WorkerId >= 0) {
- if (++TlsThreadContext->WriteTurn < TlsThreadContext->LocalQueueSize) {
- LocalQueues[TlsThreadContext->WorkerId].push(activation);
- return;
- }
- if (ActorSystemProfile != EASProfile::Default) {
- TAtomic x = AtomicGet(Semaphore);
- TSemaphore semaphore = TSemaphore::GetSemaphore(x);
- if constexpr (NFeatures::TLocalQueuesFeatureFlags::UseIfAllOtherThreadsAreSleeping) {
- if (semaphore.CurrentSleepThreadCount == semaphore.CurrentThreadCount - 1 && semaphore.OldSemaphore == 0) {
- if (LocalQueues[TlsThreadContext->WorkerId].empty()) {
- LocalQueues[TlsThreadContext->WorkerId].push(activation);
- return;
- }
- }
- }
-
- if constexpr (NFeatures::TLocalQueuesFeatureFlags::UseOnMicroburst) {
- if (semaphore.OldSemaphore >= semaphore.CurrentThreadCount) {
- if (LocalQueues[TlsThreadContext->WorkerId].empty() && TlsThreadContext->WriteTurn < 1) {
- TlsThreadContext->WriteTurn++;
- LocalQueues[TlsThreadContext->WorkerId].push(activation);
- return;
- }
- }
- }
- ScheduleActivationExCommon(activation, revolvingWriteCounter, x);
- return;
- }
- }
- ScheduleActivationExCommon(activation, revolvingWriteCounter, AtomicGet(Semaphore));
- }
-
- void TBasicExecutorPool::ScheduleActivationEx(ui32 activation, ui64 revolvingCounter) {
- if constexpr (NFeatures::IsLocalQueues()) {
- ScheduleActivationExLocalQueue(activation, revolvingCounter);
- } else {
- ScheduleActivationExCommon(activation, revolvingCounter, AtomicGet(Semaphore));
- }
- }
-
- void TBasicExecutorPool::GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const {
- poolStats.MaxUtilizationTime = RelaxedLoad(&MaxUtilizationAccumulator) / (i64)(NHPTimer::GetCyclesPerSecond() / 1000);
- poolStats.WrongWakenedThreadCount = RelaxedLoad(&WrongWakenedThreadCount);
- poolStats.CurrentThreadCount = RelaxedLoad(&ThreadCount);
- poolStats.DefaultThreadCount = DefaultThreadCount;
- poolStats.MaxThreadCount = MaxThreadCount;
- poolStats.SpinningTimeUs = Ts2Us(SpinningTimeUs);
- poolStats.SpinThresholdUs = Ts2Us(SpinThresholdCycles);
- if (Harmonizer) {
- TPoolHarmonizerStats stats = Harmonizer->GetPoolStats(PoolId);
- poolStats.IsNeedy = stats.IsNeedy;
- poolStats.IsStarved = stats.IsStarved;
- poolStats.IsHoggish = stats.IsHoggish;
- poolStats.IncreasingThreadsByNeedyState = stats.IncreasingThreadsByNeedyState;
- poolStats.IncreasingThreadsByExchange = stats.IncreasingThreadsByExchange;
- poolStats.DecreasingThreadsByStarvedState = stats.DecreasingThreadsByStarvedState;
- poolStats.DecreasingThreadsByHoggishState = stats.DecreasingThreadsByHoggishState;
- poolStats.DecreasingThreadsByExchange = stats.DecreasingThreadsByExchange;
- poolStats.PotentialMaxThreadCount = stats.PotentialMaxThreadCount;
- poolStats.MaxConsumedCpuUs = stats.MaxConsumedCpu;
- poolStats.MinConsumedCpuUs = stats.MinConsumedCpu;
- poolStats.MaxBookedCpuUs = stats.MaxBookedCpu;
- poolStats.MinBookedCpuUs = stats.MinBookedCpu;
- }
-
- statsCopy.resize(PoolThreads + 1);
- // Save counters from the pool object
- statsCopy[0] = TExecutorThreadStats();
- statsCopy[0].Aggregate(Stats);
-#if defined(ACTORSLIB_COLLECT_EXEC_STATS)
- RecalculateStuckActors(statsCopy[0]);
-#endif
- // Per-thread stats
- for (i16 i = 0; i < PoolThreads; ++i) {
- Threads[i].Thread->GetCurrentStats(statsCopy[i + 1]);
- }
- }
-
- void TBasicExecutorPool::Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) {
- TAffinityGuard affinityGuard(Affinity());
-
- ActorSystem = actorSystem;
-
- ScheduleReaders.Reset(new NSchedulerQueue::TReader[PoolThreads]);
- ScheduleWriters.Reset(new NSchedulerQueue::TWriter[PoolThreads]);
-
- for (i16 i = 0; i != PoolThreads; ++i) {
- if (i < MaxThreadCount - SharedExecutorsCount) {
- Threads[i].Thread.Reset(
- new TExecutorThread(
- i,
- 0, // CpuId is not used in BASIC pool
- actorSystem,
- this,
- MailboxTable.Get(),
- PoolName,
- TimePerMailbox,
- EventsPerMailbox));
- } else {
- Threads[i].Thread.Reset(
- new TExecutorThread(
- i,
- actorSystem,
- actorSystem->GetBasicExecutorPools(),
- PoolName,
- SoftProcessingDurationTs,
- TimePerMailbox,
- EventsPerMailbox));
- }
- ScheduleWriters[i].Init(ScheduleReaders[i]);
- }
-
- *scheduleReaders = ScheduleReaders.Get();
- *scheduleSz = PoolThreads;
- }
-
- void TBasicExecutorPool::Start() {
- TAffinityGuard affinityGuard(Affinity());
-
- ThreadUtilization = 0;
- AtomicAdd(MaxUtilizationCounter, -(i64)GetCycleCountFast());
-
- for (i16 i = 0; i != PoolThreads; ++i) {
- Threads[i].Thread->Start();
- }
- }
-
- void TBasicExecutorPool::PrepareStop() {
- AtomicStore(&StopFlag, true);
- for (i16 i = 0; i != PoolThreads; ++i) {
- Threads[i].Thread->StopFlag = true;
- Threads[i].WaitingPad.Interrupt();
- }
- }
-
- void TBasicExecutorPool::Shutdown() {
- for (i16 i = 0; i != PoolThreads; ++i)
- Threads[i].Thread->Join();
- }
-
- void TBasicExecutorPool::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_DEBUG_ABORT_UNLESS(workerId < PoolThreads);
-
- Schedule(deadline - ActorSystem->Timestamp(), ev, cookie, workerId);
- }
-
- void TBasicExecutorPool::Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_DEBUG_ABORT_UNLESS(workerId < PoolThreads);
-
- const auto current = ActorSystem->Monotonic();
- if (deadline < current)
- deadline = current;
-
- ScheduleWriters[workerId].Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TBasicExecutorPool::Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_DEBUG_ABORT_UNLESS(workerId < PoolThreads);
-
- const auto deadline = ActorSystem->Monotonic() + delta;
- ScheduleWriters[workerId].Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TBasicExecutorPool::SetRealTimeMode() const {
-// TODO: musl-libc version of `sched_param` struct is for some reason different from pthread
-// version in Ubuntu 12.04
-#if defined(_linux_) && !defined(_musl_)
- if (RealtimePriority != 0) {
- pthread_t threadSelf = pthread_self();
- sched_param param = {RealtimePriority};
- if (pthread_setschedparam(threadSelf, SCHED_FIFO, &param)) {
- Y_ABORT("Cannot set realtime priority");
- }
- }
-#else
- Y_UNUSED(RealtimePriority);
-#endif
- }
-
- i16 TBasicExecutorPool::GetThreadCount() const {
- return AtomicGet(ThreadCount);
- }
-
- void TBasicExecutorPool::SetThreadCount(i16 threads) {
- threads = Max(i16(1), Min(PoolThreads, threads));
- with_lock (ChangeThreadsLock) {
- i16 prevCount = GetThreadCount();
- AtomicSet(ThreadCount, threads);
- TSemaphore semaphore = TSemaphore::GetSemaphore(AtomicGet(Semaphore));
- i64 oldX = semaphore.ConverToI64();
- semaphore.CurrentThreadCount = threads;
- if (threads > prevCount) {
- semaphore.CurrentSleepThreadCount += (i64)threads - prevCount;
- } else {
- semaphore.CurrentSleepThreadCount -= (i64)prevCount - threads;
- }
- AtomicAdd(Semaphore, semaphore.ConverToI64() - oldX);
- LWPROBE(ThreadCount, PoolId, PoolName, threads, MinThreadCount, MaxThreadCount, DefaultThreadCount);
- }
- }
-
- i16 TBasicExecutorPool::GetDefaultThreadCount() const {
- return DefaultThreadCount;
- }
-
- i16 TBasicExecutorPool::GetMinThreadCount() const {
- return MinThreadCount;
- }
-
- i16 TBasicExecutorPool::GetMaxThreadCount() const {
- return MaxThreadCount;
- }
-
- TCpuConsumption TBasicExecutorPool::GetThreadCpuConsumption(i16 threadIdx) {
- if (threadIdx >= PoolThreads) {
- return {0.0, 0.0};
- }
- TThreadCtx& threadCtx = Threads[threadIdx];
- TExecutorThreadStats stats;
- threadCtx.Thread->GetCurrentStats(stats);
- return {Ts2Us(stats.SafeElapsedTicks), static_cast<double>(stats.CpuUs), stats.NotEnoughCpuExecutions};
- }
-
- i16 TBasicExecutorPool::GetBlockingThreadCount() const {
- TAtomic x = AtomicGet(Semaphore);
- TSemaphore semaphore = TSemaphore::GetSemaphore(x);
- return -Min<i16>(semaphore.CurrentSleepThreadCount, 0);
- }
-
- i16 TBasicExecutorPool::GetPriority() const {
- return Priority;
- }
-
- void TBasicExecutorPool::SetSharedExecutorsCount(i16 count) {
- SharedExecutorsCount = count;
- }
-
- void TBasicExecutorPool::SetLocalQueueSize(ui16 size) {
- if constexpr (!NFeatures::TLocalQueuesFeatureFlags::FIXED_LOCAL_QUEUE_SIZE) {
- LocalQueueSize.store(std::max(size, NFeatures::TLocalQueuesFeatureFlags::MAX_LOCAL_QUEUE_SIZE), std::memory_order_relaxed);
- }
- }
-
- void TBasicExecutorPool::Initialize(TWorkerContext& wctx) {
- if (wctx.WorkerId >= 0) {
- TlsThreadContext->WaitingStats = &WaitingStats[wctx.WorkerId];
- }
- }
-
- void TBasicExecutorPool::SetSpinThresholdCycles(ui32 cycles) {
- if (ActorSystemProfile == EASProfile::LowLatency) {
- if (DefaultSpinThresholdCycles > cycles) {
- cycles = DefaultSpinThresholdCycles;
- }
- }
- SpinThresholdCycles = cycles;
- double resolutionUs = TWaitingStatsConstants::HistogramResolutionUs;
- ui32 bucketIdx = cycles / TWaitingStatsConstants::HistogramResolution;
- LWPROBE(ChangeSpinThreshold, PoolId, PoolName, cycles, resolutionUs * bucketIdx, bucketIdx);
- }
-
- void TBasicExecutorPool::GetWaitingStats(TWaitingStats<ui64> &acc) const {
- acc.Clear();
- double resolutionUs = TWaitingStatsConstants::HistogramResolutionUs;
- for (ui32 idx = 0; idx < ThreadCount; ++idx) {
- for (ui32 bucketIdx = 0; bucketIdx < TWaitingStatsConstants::BucketCount; ++bucketIdx) {
- LWPROBE(WaitingHistogramPerThread, PoolId, PoolName, idx, resolutionUs * bucketIdx, resolutionUs * (bucketIdx + 1), WaitingStats[idx].WaitingUntilNeedsTimeHist[bucketIdx].load());
- }
- acc.Add(WaitingStats[idx]);
- }
- for (ui32 bucketIdx = 0; bucketIdx < TWaitingStatsConstants::BucketCount; ++bucketIdx) {
- LWPROBE(WaitingHistogram, PoolId, PoolName, resolutionUs * bucketIdx, resolutionUs * (bucketIdx + 1), acc.WaitingUntilNeedsTimeHist[bucketIdx].load());
- }
- }
-
- void TBasicExecutorPool::ClearWaitingStats() const {
- for (ui32 idx = 0; idx < ThreadCount; ++idx) {
- WaitingStats[idx].Clear();
- }
- }
-
- void TBasicExecutorPool::CalcSpinPerThread(ui64 wakingUpConsumption) {
- for (i16 threadIdx = 0; threadIdx < PoolThreads; ++threadIdx) {
- ui64 newSpinThreshold = 0;
- if constexpr (NFeatures::TSpinFeatureFlags::UsePseudoMovingWindow) {
- MovingWaitingStats[threadIdx].Add(WaitingStats[threadIdx], 0.8, 0.2);
- newSpinThreshold = MovingWaitingStats[threadIdx].CalculateGoodSpinThresholdCycles(wakingUpConsumption);
- } else {
- newSpinThreshold = WaitingStats[threadIdx].CalculateGoodSpinThresholdCycles(wakingUpConsumption);
- }
- SpinThresholdCyclesPerThread[threadIdx].store(newSpinThreshold);
-
- double resolutionUs = TWaitingStatsConstants::HistogramResolutionUs;
- ui32 bucketIdx = newSpinThreshold / TWaitingStatsConstants::HistogramResolution;
- LWPROBE(ChangeSpinThresholdPerThread, PoolId, PoolName, threadIdx, newSpinThreshold, resolutionUs * bucketIdx, bucketIdx);
- }
- }
-}
diff --git a/library/cpp/actors/core/executor_pool_basic.h b/library/cpp/actors/core/executor_pool_basic.h
deleted file mode 100644
index d1a4c9fd27..0000000000
--- a/library/cpp/actors/core/executor_pool_basic.h
+++ /dev/null
@@ -1,291 +0,0 @@
-#pragma once
-
-#include "actorsystem.h"
-#include "executor_thread.h"
-#include "executor_pool_basic_feature_flags.h"
-#include "scheduler_queue.h"
-#include "executor_pool_base.h"
-#include "harmonizer.h"
-#include <library/cpp/actors/actor_type/indexes.h>
-#include <library/cpp/actors/util/unordered_cache.h>
-#include <library/cpp/actors/util/threadparkpad.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-#include <library/cpp/threading/chunk_queue/queue.h>
-
-#include <util/system/mutex.h>
-
-#include <queue>
-
-namespace NActors {
-
- struct TWaitingStatsConstants {
- static constexpr ui64 BucketCount = 128;
- static constexpr double MaxSpinThersholdUs = 12.8;
-
- static constexpr ui64 KnownAvgWakingUpTime = 4250;
- static constexpr ui64 KnownAvgAwakeningTime = 7000;
-
- static const double HistogramResolutionUs;
- static const ui64 HistogramResolution;
- };
-
- template <typename T>
- struct TWaitingStats : TWaitingStatsConstants {
- std::array<std::atomic<T>, BucketCount> WaitingUntilNeedsTimeHist;
-
- std::atomic<T> WakingUpTotalTime;
- std::atomic<T> WakingUpCount;
- std::atomic<T> AwakingTotalTime;
- std::atomic<T> AwakingCount;
-
- TWaitingStats()
- {
- Clear();
- }
-
- void Clear() {
- std::fill(WaitingUntilNeedsTimeHist.begin(), WaitingUntilNeedsTimeHist.end(), 0);
- WakingUpTotalTime = 0;
- WakingUpCount = 0;
- AwakingTotalTime = 0;
- AwakingCount = 0;
- }
-
- void Add(ui64 waitingUntilNeedsTime) {
- ui64 waitIdx = std::min(waitingUntilNeedsTime / HistogramResolution, BucketCount - 1);
- WaitingUntilNeedsTimeHist[waitIdx]++;
- }
-
- void AddAwakening(ui64 waitingUntilNeedsTime, ui64 awakingTime) {
- Add(waitingUntilNeedsTime);
- AwakingTotalTime += awakingTime;
- AwakingCount++;
- }
-
- void AddFastAwakening(ui64 waitingUntilNeedsTime) {
- Add(waitingUntilNeedsTime - HistogramResolution);
- }
-
- void AddWakingUp(ui64 wakingUpTime) {
- WakingUpTotalTime += wakingUpTime;
- WakingUpCount++;
- }
-
- void Add(const TWaitingStats<T> &stats) {
- for (ui32 idx = 0; idx < BucketCount; ++idx) {
- WaitingUntilNeedsTimeHist[idx] += stats.WaitingUntilNeedsTimeHist[idx];
- }
- WakingUpTotalTime += stats.WakingUpTotalTime;
- WakingUpCount += stats.WakingUpCount;
- AwakingTotalTime += stats.AwakingTotalTime;
- AwakingCount += stats.AwakingCount;
- }
-
- template <typename T2>
- void Add(const TWaitingStats<T2> &stats, double oldK, double newK) {
- for (ui32 idx = 0; idx < BucketCount; ++idx) {
- WaitingUntilNeedsTimeHist[idx] = oldK * WaitingUntilNeedsTimeHist[idx] + newK * stats.WaitingUntilNeedsTimeHist[idx];
- }
- WakingUpTotalTime = oldK * WakingUpTotalTime + newK * stats.WakingUpTotalTime;
- WakingUpCount = oldK * WakingUpCount + newK * stats.WakingUpCount;
- AwakingTotalTime = oldK * AwakingTotalTime + newK * stats.AwakingTotalTime;
- AwakingCount = oldK * AwakingCount + newK * stats.AwakingCount;
- }
-
- ui32 CalculateGoodSpinThresholdCycles(ui64 avgWakingUpConsumption) {
- auto &bucketCount = TWaitingStatsConstants::BucketCount;
- auto &resolution = TWaitingStatsConstants::HistogramResolution;
-
- T waitingsCount = std::accumulate(WaitingUntilNeedsTimeHist.begin(), WaitingUntilNeedsTimeHist.end(), 0);
-
- ui32 bestBucketIdx = 0;
- T bestCpuConsumption = Max<T>();
-
- T spinTime = 0;
- T spinCount = 0;
-
- for (ui32 bucketIdx = 0; bucketIdx < bucketCount; ++bucketIdx) {
- auto &bucket = WaitingUntilNeedsTimeHist[bucketIdx];
- ui64 imaginarySpingThreshold = resolution * bucketIdx;
- T cpuConsumption = spinTime + (waitingsCount - spinCount) * (avgWakingUpConsumption + imaginarySpingThreshold);
- if (bestCpuConsumption > cpuConsumption) {
- bestCpuConsumption = cpuConsumption;
- bestBucketIdx = bucketIdx;
- }
- spinTime += (2 * imaginarySpingThreshold + resolution) * bucket / 2;
- spinCount += bucket;
- // LWPROBE(WaitingHistogram, Pool->PoolId, Pool->GetName(), resolutionUs * bucketIdx, resolutionUs * (bucketIdx + 1), bucket);
- }
- ui64 result = resolution * bestBucketIdx;
- return result;
- }
- };
-
- class TBasicExecutorPool: public TExecutorPoolBase {
- struct TThreadCtx {
- TAutoPtr<TExecutorThread> Thread;
- TThreadParkPad WaitingPad;
- TAtomic WaitingFlag;
- std::atomic<i64> StartWakingTs;
- std::atomic<i64> EndWakingTs;
-
- enum EWaitState {
- WS_NONE,
- WS_ACTIVE,
- WS_BLOCKED,
- WS_RUNNING
- };
-
- TThreadCtx()
- : WaitingFlag(WS_NONE)
- {
- }
- };
-
- struct TTimers {
- NHPTimer::STime Elapsed = 0;
- NHPTimer::STime Parked = 0;
- NHPTimer::STime HPStart = GetCycleCountFast();
- NHPTimer::STime HPNow;
- };
-
- NThreading::TPadded<std::atomic_bool> AllThreadsSleep = true;
- const ui64 DefaultSpinThresholdCycles;
- std::atomic<ui64> SpinThresholdCycles;
- std::unique_ptr<NThreading::TPadded< std::atomic<ui64>>[]> SpinThresholdCyclesPerThread;
-
- TArrayHolder<NThreading::TPadded<TThreadCtx>> Threads;
- static_assert(sizeof(std::decay_t<decltype(Threads[0])>) == PLATFORM_CACHE_LINE);
- TArrayHolder<NThreading::TPadded<std::queue<ui32>>> LocalQueues;
- TArrayHolder<TWaitingStats<ui64>> WaitingStats;
- TArrayHolder<TWaitingStats<double>> MovingWaitingStats;
- std::atomic<ui16> LocalQueueSize;
-
-
- TArrayHolder<NSchedulerQueue::TReader> ScheduleReaders;
- TArrayHolder<NSchedulerQueue::TWriter> ScheduleWriters;
-
- const TString PoolName;
- const TDuration TimePerMailbox;
- const ui32 EventsPerMailbox;
- EASProfile ActorSystemProfile;
-
- const int RealtimePriority;
-
- TAtomic ThreadUtilization;
- TAtomic MaxUtilizationCounter;
- TAtomic MaxUtilizationAccumulator;
- TAtomic WrongWakenedThreadCount;
- std::atomic<ui64> SpinningTimeUs;
-
- TAtomic ThreadCount;
- TMutex ChangeThreadsLock;
-
- i16 MinThreadCount;
- i16 MaxThreadCount;
- i16 DefaultThreadCount;
- IHarmonizer *Harmonizer;
- i16 SharedExecutorsCount = 0;
- ui64 SoftProcessingDurationTs = 0;
-
- const i16 Priority = 0;
- const ui32 ActorSystemIndex = NActors::TActorTypeOperator::GetActorSystemIndex();
- public:
- struct TSemaphore {
- i64 OldSemaphore = 0; // 34 bits
- // Sign bit
- i16 CurrentSleepThreadCount = 0; // 14 bits
- // Sign bit
- i16 CurrentThreadCount = 0; // 14 bits
-
- inline i64 ConverToI64() {
- i64 value = (1ll << 34) + OldSemaphore;
- return value
- | (((i64)CurrentSleepThreadCount + (1 << 14)) << 35)
- | ((i64)CurrentThreadCount << 50);
- }
-
- static inline TSemaphore GetSemaphore(i64 value) {
- TSemaphore semaphore;
- semaphore.OldSemaphore = (value & 0x7ffffffffll) - (1ll << 34);
- semaphore.CurrentSleepThreadCount = ((value >> 35) & 0x7fff) - (1 << 14);
- semaphore.CurrentThreadCount = (value >> 50) & 0x3fff;
- return semaphore;
- }
- };
-
- static constexpr TDuration DEFAULT_TIME_PER_MAILBOX = TBasicExecutorPoolConfig::DEFAULT_TIME_PER_MAILBOX;
- static constexpr ui32 DEFAULT_EVENTS_PER_MAILBOX = TBasicExecutorPoolConfig::DEFAULT_EVENTS_PER_MAILBOX;
-
- TBasicExecutorPool(ui32 poolId,
- ui32 threads,
- ui64 spinThreshold,
- const TString& poolName = "",
- IHarmonizer *harmonizer = nullptr,
- TAffinity* affinity = nullptr,
- TDuration timePerMailbox = DEFAULT_TIME_PER_MAILBOX,
- ui32 eventsPerMailbox = DEFAULT_EVENTS_PER_MAILBOX,
- int realtimePriority = 0,
- ui32 maxActivityType = 0 /* deprecated */,
- i16 minThreadCount = 0,
- i16 maxThreadCount = 0,
- i16 defaultThreadCount = 0,
- i16 priority = 0);
- explicit TBasicExecutorPool(const TBasicExecutorPoolConfig& cfg, IHarmonizer *harmonizer);
- ~TBasicExecutorPool();
-
- void SetSharedExecutorsCount(i16 count);
-
- void Initialize(TWorkerContext& wctx) override;
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingReadCounter) override;
- ui32 GetReadyActivationCommon(TWorkerContext& wctx, ui64 revolvingReadCounter);
- ui32 GetReadyActivationLocalQueue(TWorkerContext& wctx, ui64 revolvingReadCounter);
-
- void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
- void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
- void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
-
- void ScheduleActivationEx(ui32 activation, ui64 revolvingWriteCounter) override;
- void ScheduleActivationExCommon(ui32 activation, ui64 revolvingWriteCounter, TAtomic semaphoreValue);
- void ScheduleActivationExLocalQueue(ui32 activation, ui64 revolvingWriteCounter);
-
- void SetLocalQueueSize(ui16 size);
-
- void Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) override;
- void Start() override;
- void PrepareStop() override;
- void Shutdown() override;
-
- void GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const override;
- TString GetName() const override {
- return PoolName;
- }
-
- void SetRealTimeMode() const override;
-
- i16 GetThreadCount() const override;
- void SetThreadCount(i16 threads) override;
- i16 GetDefaultThreadCount() const override;
- i16 GetMinThreadCount() const override;
- i16 GetMaxThreadCount() const override;
- TCpuConsumption GetThreadCpuConsumption(i16 threadIdx) override;
- i16 GetBlockingThreadCount() const override;
- i16 GetPriority() const override;
-
- void SetSpinThresholdCycles(ui32 cycles) override;
-
- void GetWaitingStats(TWaitingStats<ui64> &acc) const;
- void CalcSpinPerThread(ui64 wakingUpConsumption);
- void ClearWaitingStats() const;
-
- private:
- void AskToGoToSleep(bool *needToWait, bool *needToBlock);
-
- void WakeUpLoop(i16 currentThreadCount);
- bool GoToWaiting(TThreadCtx& threadCtx, TTimers &timers, bool needToBlock);
- ui32 GoToSpin(TThreadCtx& threadCtx, i64 start, i64 &end);
- bool GoToSleep(TThreadCtx& threadCtx, TTimers &timers);
- bool GoToBeBlocked(TThreadCtx& threadCtx, TTimers &timers);
- };
-}
diff --git a/library/cpp/actors/core/executor_pool_basic_feature_flags.h b/library/cpp/actors/core/executor_pool_basic_feature_flags.h
deleted file mode 100644
index 17f2a81df4..0000000000
--- a/library/cpp/actors/core/executor_pool_basic_feature_flags.h
+++ /dev/null
@@ -1,50 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include <optional>
-
-
-namespace NActors::NFeatures {
-
- enum class EActorSystemOptimizationType {
- Common,
- LocalQueues,
- };
-
- struct TCommonFeatureFlags {
- static constexpr EActorSystemOptimizationType OptimizationType = EActorSystemOptimizationType::Common;
-
- static constexpr bool ProbeSpinCycles = false;
- };
-
- struct TLocalQueuesFeatureFlags {
- static constexpr EActorSystemOptimizationType OptimizationType = EActorSystemOptimizationType::LocalQueues;
-
- static constexpr ui16 MIN_LOCAL_QUEUE_SIZE = 0;
- static constexpr ui16 MAX_LOCAL_QUEUE_SIZE = 16;
- static constexpr std::optional<ui16> FIXED_LOCAL_QUEUE_SIZE = std::nullopt;
-
- static constexpr bool UseIfAllOtherThreadsAreSleeping = false;
- static constexpr bool UseOnMicroburst = false;
- };
-
- struct TSpinFeatureFlags {
- static constexpr bool DoNotSpinLower = false;
- static constexpr bool UsePseudoMovingWindow = true;
-
- static constexpr bool HotColdThreads = false;
- static constexpr bool CalcPerThread = false;
- };
-
- using TFeatureFlags = TCommonFeatureFlags;
-
- consteval bool IsCommon() {
- return TFeatureFlags::OptimizationType == EActorSystemOptimizationType::Common;
- }
-
- consteval bool IsLocalQueues() {
- return TFeatureFlags::OptimizationType == EActorSystemOptimizationType::LocalQueues;
- }
-
-}
diff --git a/library/cpp/actors/core/executor_pool_basic_ut.cpp b/library/cpp/actors/core/executor_pool_basic_ut.cpp
deleted file mode 100644
index df8df52635..0000000000
--- a/library/cpp/actors/core/executor_pool_basic_ut.cpp
+++ /dev/null
@@ -1,666 +0,0 @@
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "hfunc.h"
-#include "scheduler_basic.h"
-
-#include <library/cpp/actors/util/should_continue.h>
-
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NActors;
-
-#define VALUES_EQUAL(a, b, ...) \
- UNIT_ASSERT_VALUES_EQUAL_C((a), (b), (i64)semaphore.OldSemaphore \
- << ' ' << (i64)semaphore.CurrentSleepThreadCount \
- << ' ' << (i64)semaphore.CurrentThreadCount __VA_ARGS__);
-
-////////////////////////////////////////////////////////////////////////////////
-
-struct TEvMsg : public NActors::TEventBase<TEvMsg, 10347> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvMsg, "ExecutorPoolTest: Msg");
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-class TTestSenderActor : public IActorCallback {
-private:
- using EActivityType = IActor::EActivityType ;
- using EActorActivity = IActor::EActorActivity;
-
-private:
- TAtomic Counter;
- TActorId Receiver;
-
- std::function<void(void)> Action;
-
-public:
- TTestSenderActor(std::function<void(void)> action = [](){},
- EActivityType activityType = EActorActivity::OTHER)
- : IActorCallback(static_cast<TReceiveFunc>(&TTestSenderActor::Execute), activityType)
- , Action(action)
- {}
-
- void Start(TActorId receiver, size_t count)
- {
- AtomicSet(Counter, count);
- Receiver = receiver;
- }
-
- void Stop() {
- while (true) {
- if (GetCounter() == 0) {
- break;
- }
-
- Sleep(TDuration::MilliSeconds(1));
- }
- }
-
- size_t GetCounter() const {
- return AtomicGet(Counter);
- }
-
-private:
- STFUNC(Execute)
- {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvMsg, Handle);
- }
- }
-
- void Handle(TEvMsg::TPtr &ev)
- {
- Y_UNUSED(ev);
- Action();
- TAtomicBase count = AtomicDecrement(Counter);
- Y_ABORT_UNLESS(count != Max<TAtomicBase>());
- if (count) {
- Send(Receiver, new TEvMsg());
- }
- }
-};
-
-THolder<TActorSystemSetup> GetActorSystemSetup(TBasicExecutorPool* pool)
-{
- auto setup = MakeHolder<NActors::TActorSystemSetup>();
- setup->NodeId = 1;
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<NActors::IExecutorPool>[1]);
- setup->Executors[0] = pool;
- setup->Scheduler = new TBasicSchedulerThread(NActors::TSchedulerConfig(512, 0));
- return setup;
-}
-
-Y_UNIT_TEST_SUITE(WaitingBenchs) {
-
- Y_UNIT_TEST(SpinPause) {
- const ui32 count = 1'000'000;
- ui64 startTs = GetCycleCountFast();
- for (ui32 idx = 0; idx < count; ++idx) {
- SpinLockPause();
- }
- ui64 stopTs = GetCycleCountFast();
- Cerr << Ts2Us(stopTs - startTs) / count << Endl;
- Cerr << double(stopTs - startTs) / count << Endl;
- }
-
- struct TThread : public ISimpleThread {
- static const ui64 CyclesInMicroSecond;
- std::array<ui64, 128> Hist;
- ui64 WakingTime = 0;
- ui64 AwakeningTime = 0;
- ui64 SleepTime = 0;
- ui64 IterationCount = 0;
-
- std::atomic<ui64> Awakens = 0;
- std::atomic<ui64> *OtherAwaken;
-
- TThreadParkPad OwnPad;
- TThreadParkPad *OtherPad;
-
- bool IsWaiting = false;
-
- void GoToWait() {
- ui64 start = GetCycleCountFast();
- OwnPad.Park();
- ui64 elapsed = GetCycleCountFast() - start;
- AwakeningTime += elapsed;
- ui64 idx = std::min(Hist.size() - 1, (elapsed - 20 * CyclesInMicroSecond) / CyclesInMicroSecond);
- Hist[idx]++;
- Awakens++;
- }
-
- void GoToWakeUp() {
- ui64 start = GetCycleCountFast();
- OtherPad->Unpark();
- ui64 elapsed = GetCycleCountFast() - start;
- WakingTime += elapsed;
- ui64 idx = std::min(Hist.size() - 1, elapsed / CyclesInMicroSecond);
- Hist[idx]++;
- }
-
- void GoToSleep() {
- ui64 start = GetCycleCountFast();
- ui64 stop = start;
- while (stop - start < 20 * CyclesInMicroSecond) {
- SpinLockPause();
- stop = GetCycleCountFast();
- }
- SleepTime += stop - start;
- }
-
- void* ThreadProc() {
- for (ui32 idx = 0; idx < IterationCount; ++idx) {
- if (IsWaiting) {
- GoToWait();
- } else {
- GoToSleep();
- GoToWakeUp();
- while(OtherAwaken->load() == idx) {
- SpinLockPause();
- }
- }
- }
- return nullptr;
- }
- };
-
- const ui64 TThread::CyclesInMicroSecond = NHPTimer::GetCyclesPerSecond() * 0.000001;
-
- Y_UNIT_TEST(WakingUpTest) {
- TThread a, b;
- constexpr ui64 iterations = 100'000;
- std::fill(a.Hist.begin(), a.Hist.end(), 0);
- std::fill(b.Hist.begin(), b.Hist.end(), 0);
- a.IterationCount = iterations;
- b.IterationCount = iterations;
- a.IsWaiting = true;
- b.IsWaiting = false;
- b.OtherAwaken = &a.Awakens;
- a.OtherPad = &b.OwnPad;
- b.OtherPad = &a.OwnPad;
- a.Start();
- b.Start();
- a.Join();
- b.Join();
-
- ui64 awakeningTime = a.AwakeningTime + b.AwakeningTime - a.SleepTime - b.SleepTime;
- ui64 wakingUpTime = a.WakingTime + b.WakingTime;
-
- Cerr << "AvgAwakeningCycles: " << double(awakeningTime) / iterations << Endl;
- Cerr << "AvgAwakeningUs: " << Ts2Us(awakeningTime) / iterations << Endl;
- Cerr << "AvgSleep20usCycles:" << double(b.SleepTime) / iterations << Endl;
- Cerr << "AvgSleep20usUs:" << Ts2Us(b.SleepTime) / iterations << Endl;
- Cerr << "AvgWakingUpCycles: " << double(wakingUpTime) / iterations << Endl;
- Cerr << "AvgWakingUpUs: " << Ts2Us(wakingUpTime) / iterations << Endl;
-
- Cerr << "AwakeningHist:\n";
- for (ui32 idx = 0; idx < a.Hist.size(); ++idx) {
- if (a.Hist[idx]) {
- if (idx + 1 != a.Hist.size()) {
- Cerr << " [" << idx << "us - " << idx + 1 << "us] " << a.Hist[idx] << Endl;
- } else {
- Cerr << " [" << idx << "us - ...] " << a.Hist[idx] << Endl;
- }
- }
- }
-
- Cerr << "WakingUpHist:\n";
- for (ui32 idx = 0; idx < b.Hist.size(); ++idx) {
- if (b.Hist[idx]) {
- if (idx + 1 != b.Hist.size()) {
- Cerr << " [" << idx << "us - " << idx + 1 << "us] " << b.Hist[idx] << Endl;
- } else {
- Cerr << " [" << idx << "us - ...] " << b.Hist[idx] << Endl;
- }
- }
- }
- }
-
-}
-
-Y_UNIT_TEST_SUITE(BasicExecutorPool) {
-
- Y_UNIT_TEST(Semaphore) {
- TBasicExecutorPool::TSemaphore semaphore;
- semaphore = TBasicExecutorPool::TSemaphore::GetSemaphore(0);
-
- VALUES_EQUAL(0, semaphore.ConverToI64());
- semaphore = TBasicExecutorPool::TSemaphore::GetSemaphore(-1);
- VALUES_EQUAL(-1, semaphore.ConverToI64());
- semaphore = TBasicExecutorPool::TSemaphore::GetSemaphore(1);
- VALUES_EQUAL(1, semaphore.ConverToI64());
-
- for (i64 value = -1'000'000; value <= 1'000'000; ++value) {
- VALUES_EQUAL(TBasicExecutorPool::TSemaphore::GetSemaphore(value).ConverToI64(), value);
- }
-
- for (i8 sleepThreads = -10; sleepThreads <= 10; ++sleepThreads) {
-
- semaphore = TBasicExecutorPool::TSemaphore();
- semaphore.CurrentSleepThreadCount = sleepThreads;
- i64 initialValue = semaphore.ConverToI64();
-
- semaphore = TBasicExecutorPool::TSemaphore::GetSemaphore(initialValue - 1);
- VALUES_EQUAL(-1, semaphore.OldSemaphore);
-
- i64 value = initialValue;
- value -= 100;
- for (i32 expected = -100; expected <= 100; ++expected) {
- semaphore = TBasicExecutorPool::TSemaphore::GetSemaphore(value);
- UNIT_ASSERT_VALUES_EQUAL_C(expected, semaphore.OldSemaphore, (i64)semaphore.OldSemaphore
- << ' ' << (i64)semaphore.CurrentSleepThreadCount
- << ' ' << (i64)semaphore.CurrentThreadCount);
- UNIT_ASSERT_VALUES_EQUAL_C(sleepThreads, semaphore.CurrentSleepThreadCount, (i64)semaphore.OldSemaphore
- << ' ' << (i64)semaphore.CurrentSleepThreadCount
- << ' ' << (i64)semaphore.CurrentThreadCount);
- semaphore = TBasicExecutorPool::TSemaphore();
- semaphore.OldSemaphore = expected;
- semaphore.CurrentSleepThreadCount = sleepThreads;
- UNIT_ASSERT_VALUES_EQUAL(semaphore.ConverToI64(), value);
- value++;
- }
-
- for (i32 expected = 101; expected >= -101; --expected) {
- semaphore = TBasicExecutorPool::TSemaphore::GetSemaphore(value);
- UNIT_ASSERT_VALUES_EQUAL_C(expected, semaphore.OldSemaphore, (i64)semaphore.OldSemaphore
- << ' ' << (i64)semaphore.CurrentSleepThreadCount
- << ' ' << (i64)semaphore.CurrentThreadCount);
- UNIT_ASSERT_VALUES_EQUAL_C(sleepThreads, semaphore.CurrentSleepThreadCount, (i64)semaphore.OldSemaphore
- << ' ' << (i64)semaphore.CurrentSleepThreadCount
- << ' ' << (i64)semaphore.CurrentThreadCount);
- value--;
- }
- }
-
- //UNIT_ASSERT_VALUES_EQUAL_C(-1, TBasicExecutorPool::TSemaphore::GetSemaphore(value-1).OldSemaphore);
- }
-
- Y_UNIT_TEST(CheckCompleteOne) {
- const size_t size = 4;
- const size_t msgCount = 1e4;
- TBasicExecutorPool* executorPool = new TBasicExecutorPool(0, size, 50);
-
- auto setup = GetActorSystemSetup(executorPool);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- auto actor = new TTestSenderActor();
- auto actorId = actorSystem.Register(actor);
- actor->Start(actor->SelfId(), msgCount);
- actorSystem.Send(actorId, new TEvMsg());
-
- while (actor->GetCounter()) {
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Counter is " << actor->GetCounter());
-
- Sleep(TDuration::MilliSeconds(1));
- }
- }
-
- Y_UNIT_TEST(CheckCompleteAll) {
- const size_t size = 4;
- const size_t msgCount = 1e4;
- TBasicExecutorPool* executorPool = new TBasicExecutorPool(0, size, 50);
-
- auto setup = GetActorSystemSetup(executorPool);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- TTestSenderActor* actors[size];
- TActorId actorIds[size];
-
- for (size_t i = 0; i < size; ++i) {
- actors[i] = new TTestSenderActor();
- actorIds[i] = actorSystem.Register(actors[i]);
- }
- for (size_t i = 0; i < size; ++i) {
- actors[i]->Start(actors[i]->SelfId(), msgCount);
- }
- for (size_t i = 0; i < size; ++i) {
- actorSystem.Send(actorIds[i], new TEvMsg());
- }
-
-
- while (true) {
- size_t maxCounter = 0;
- for (size_t i = 0; i < size; ++i) {
- maxCounter = Max(maxCounter, actors[i]->GetCounter());
- }
-
- if (maxCounter == 0) {
- break;
- }
-
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Max counter is " << maxCounter);
-
- Sleep(TDuration::MilliSeconds(1));
- }
- }
-
- Y_UNIT_TEST(CheckCompleteOver) {
- const size_t size = 4;
- const size_t actorsCount = size * 2;
- const size_t msgCount = 1e4;
- TBasicExecutorPool* executorPool = new TBasicExecutorPool(0, size, 50);
-
- auto setup = GetActorSystemSetup(executorPool);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- TTestSenderActor* actors[actorsCount];
- TActorId actorIds[actorsCount];
-
- for (size_t i = 0; i < actorsCount; ++i) {
- actors[i] = new TTestSenderActor();
- actorIds[i] = actorSystem.Register(actors[i]);
- }
- for (size_t i = 0; i < actorsCount; ++i) {
- actors[i]->Start(actors[i]->SelfId(), msgCount);
- }
- for (size_t i = 0; i < actorsCount; ++i) {
- actorSystem.Send(actorIds[i], new TEvMsg());
- }
-
-
- while (true) {
- size_t maxCounter = 0;
- for (size_t i = 0; i < actorsCount; ++i) {
- maxCounter = Max(maxCounter, actors[i]->GetCounter());
- }
-
- if (maxCounter == 0) {
- break;
- }
-
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Max counter is " << maxCounter);
-
- Sleep(TDuration::MilliSeconds(1));
- }
- }
-
- Y_UNIT_TEST(CheckCompleteRoundRobinOver) {
- const size_t size = 4;
- const size_t actorsCount = size * 2;
- const size_t msgCount = 1e2;
- TBasicExecutorPool* executorPool = new TBasicExecutorPool(0, size, 50);
-
- auto setup = GetActorSystemSetup(executorPool);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- TTestSenderActor* actors[actorsCount];
- TActorId actorIds[actorsCount];
-
- for (size_t i = 0; i < actorsCount; ++i) {
- actors[i] = new TTestSenderActor();
- actorIds[i] = actorSystem.Register(actors[i]);
- }
- for (size_t i = 0; i < actorsCount; ++i) {
- actors[i]->Start(actorIds[(i + 1) % actorsCount], msgCount);
- }
- for (size_t i = 0; i < actorsCount; ++i) {
- actorSystem.Send(actorIds[i], new TEvMsg());
- }
-
- while (true) {
- size_t maxCounter = 0;
- for (size_t i = 0; i < actorsCount; ++i) {
- maxCounter = Max(maxCounter, actors[i]->GetCounter());
- }
-
- if (maxCounter == 0) {
- break;
- }
-
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Max counter is " << maxCounter);
-
- Sleep(TDuration::MilliSeconds(1));
- }
- }
-
- Y_UNIT_TEST(CheckStats) {
- const size_t size = 4;
- const size_t msgCount = 1e4;
- TBasicExecutorPool* executorPool = new TBasicExecutorPool(0, size, 50);
-
- auto setup = GetActorSystemSetup(executorPool);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- auto actor = new TTestSenderActor();
- auto actorId = actorSystem.Register(actor);
- actor->Start(actor->SelfId(), msgCount);
- actorSystem.Send(actorId, new TEvMsg());
-
- while (actor->GetCounter()) {
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Counter is " << actor->GetCounter());
-
- Sleep(TDuration::MilliSeconds(1));
- }
-
- TVector<TExecutorThreadStats> stats;
- TExecutorPoolStats poolStats;
- actorSystem.GetPoolStats(0, poolStats, stats);
- // Sum all per-thread counters into the 0th element
- for (ui32 idx = 1; idx < stats.size(); ++idx) {
- stats[0].Aggregate(stats[idx]);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(stats[0].SentEvents, msgCount - 1);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEvents, msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PreemptedEvents, 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].NonDeliveredEvents, 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].EmptyMailboxActivation, 0);
- //UNIT_ASSERT_VALUES_EQUAL(stats[0].CpuUs, 0); // depends on total duration of test, so undefined
- UNIT_ASSERT(stats[0].ElapsedTicks > 0);
- UNIT_ASSERT(stats[0].ParkedTicks > 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].BlockedTicks, 0);
- UNIT_ASSERT(stats[0].ActivationTimeHistogram.TotalSamples >= msgCount / TBasicExecutorPoolConfig::DEFAULT_EVENTS_PER_MAILBOX);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].EventDeliveryTimeHistogram.TotalSamples, msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].EventProcessingCountHistogram.TotalSamples, msgCount);
- UNIT_ASSERT(stats[0].EventProcessingTimeHistogram.TotalSamples > 0);
- UNIT_ASSERT(stats[0].ElapsedTicksByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()] > 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEventsByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()], msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ActorsAliveByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()], 1);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ScheduledEventsByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()], 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolActorRegistrations, 1);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolDestroyedActors, 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolAllocatedMailboxes, 4095); // one line
- UNIT_ASSERT(stats[0].MailboxPushedOutByTime + stats[0].MailboxPushedOutByEventCount >= msgCount / TBasicExecutorPoolConfig::DEFAULT_EVENTS_PER_MAILBOX);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].MailboxPushedOutBySoftPreemption, 0);
- }
-}
-
-Y_UNIT_TEST_SUITE(ChangingThreadsCountInBasicExecutorPool) {
-
- struct TMockState {
- void ActorDo() {}
- };
-
- struct TTestActors {
- const size_t Count;
- TArrayHolder<TTestSenderActor*> Actors;
- TArrayHolder<TActorId> ActorIds;
-
- TTestActors(size_t count)
- : Count(count)
- , Actors(new TTestSenderActor*[count])
- , ActorIds(new TActorId[count])
- { }
-
- void Start(TActorSystem &actorSystem, size_t msgCount) {
- for (size_t i = 0; i < Count; ++i) {
- Actors[i]->Start(Actors[i]->SelfId(), msgCount);
- }
- for (size_t i = 0; i < Count; ++i) {
- actorSystem.Send(ActorIds[i], new TEvMsg());
- }
- }
-
- void Stop() {
- for (size_t i = 0; i < Count; ++i) {
- Actors[i]->Stop();
- }
- }
- };
-
- template <typename TState = TMockState>
- struct TTestCtx {
- const size_t MaxThreadCount;
- const size_t SendingMessageCount;
- std::unique_ptr<TBasicExecutorPool> ExecutorPool;
- THolder<TActorSystemSetup> Setup;
- TActorSystem ActorSystem;
-
- TState State;
-
- TTestCtx(size_t maxThreadCount, size_t sendingMessageCount)
- : MaxThreadCount(maxThreadCount)
- , SendingMessageCount(sendingMessageCount)
- , ExecutorPool(new TBasicExecutorPool(0, MaxThreadCount, 50))
- , Setup(GetActorSystemSetup(ExecutorPool.get()))
- , ActorSystem(Setup)
- {
- }
-
- TTestCtx(size_t maxThreadCount, size_t sendingMessageCount, const TState &state)
- : MaxThreadCount(maxThreadCount)
- , SendingMessageCount(sendingMessageCount)
- , ExecutorPool(new TBasicExecutorPool(0, MaxThreadCount, 50))
- , Setup(GetActorSystemSetup(ExecutorPool.get()))
- , ActorSystem(Setup)
- , State(state)
- {
- }
-
- ~TTestCtx() {
- ExecutorPool.release();
- }
-
- TTestActors RegisterCheckActors(size_t actorCount) {
- TTestActors res(actorCount);
- for (size_t i = 0; i < actorCount; ++i) {
- res.Actors[i] = new TTestSenderActor([&] {
- State.ActorDo();
- });
- res.ActorIds[i] = ActorSystem.Register(res.Actors[i]);
- }
- return res;
- }
- };
-
- struct TCheckingInFlightState {
- TAtomic ExpectedMaximum = 0;
- TAtomic CurrentInFlight = 0;
-
- void ActorStartProcessing() {
- ui32 inFlight = AtomicIncrement(CurrentInFlight);
- ui32 maximum = AtomicGet(ExpectedMaximum);
- if (maximum) {
- UNIT_ASSERT_C(inFlight <= maximum, "inFlight# " << inFlight << " maximum# " << maximum);
- }
- }
-
- void ActorStopProcessing() {
- AtomicDecrement(CurrentInFlight);
- }
-
- void ActorDo() {
- ActorStartProcessing();
- NanoSleep(1'000'000);
- ActorStopProcessing();
- }
- };
-
- Y_UNIT_TEST(DecreaseIncreaseThreadCount) {
- const size_t msgCount = 1e2;
- const size_t size = 4;
- const size_t testCount = 2;
- TTestCtx<TCheckingInFlightState> ctx(size, msgCount);
- ctx.ActorSystem.Start();
-
- TVector<TExecutorThreadStats> statsCopy[testCount];
-
- TTestActors testActors = ctx.RegisterCheckActors(size);
-
- const size_t N = 6;
- const size_t threadsCounts[N] = { 1, 3, 2, 3, 1, 4 };
- for (ui32 idx = 0; idx < 4 * N; ++idx) {
- size_t currentThreadCount = threadsCounts[idx % N];
- ctx.ExecutorPool->SetThreadCount(currentThreadCount);
- AtomicSet(ctx.State.ExpectedMaximum, currentThreadCount);
-
- for (size_t testIdx = 0; testIdx < testCount; ++testIdx) {
- testActors.Start(ctx.ActorSystem, msgCount);
- Sleep(TDuration::MilliSeconds(100));
- testActors.Stop();
- }
- Sleep(TDuration::MilliSeconds(10));
- }
- ctx.ActorSystem.Stop();
- }
-
- Y_UNIT_TEST(ContiniousChangingThreadCount) {
- const size_t msgCount = 1e2;
- const size_t size = 4;
-
- auto begin = TInstant::Now();
- TTestCtx<TCheckingInFlightState> ctx(size, msgCount, TCheckingInFlightState{msgCount});
- ctx.ActorSystem.Start();
- TTestActors testActors = ctx.RegisterCheckActors(size);
-
- testActors.Start(ctx.ActorSystem, msgCount);
-
- const size_t N = 6;
- const size_t threadsCouns[N] = { 1, 3, 2, 3, 1, 4 };
-
- ui64 counter = 0;
-
- TTestSenderActor* changerActor = new TTestSenderActor([&]{
- ctx.State.ActorStartProcessing();
- AtomicSet(ctx.State.ExpectedMaximum, 0);
- ctx.ExecutorPool->SetThreadCount(threadsCouns[counter]);
- NanoSleep(10'000'000);
- AtomicSet(ctx.State.ExpectedMaximum, threadsCouns[counter]);
- counter++;
- if (counter == N) {
- counter = 0;
- }
- ctx.State.ActorStopProcessing();
- });
- TActorId changerActorId = ctx.ActorSystem.Register(changerActor);
- changerActor->Start(changerActorId, msgCount);
- ctx.ActorSystem.Send(changerActorId, new TEvMsg());
-
- while (true) {
- size_t maxCounter = 0;
- for (size_t i = 0; i < size; ++i) {
- maxCounter = Max(maxCounter, testActors.Actors[i]->GetCounter());
- }
- if (maxCounter == 0) {
- break;
- }
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Max counter is " << maxCounter);
- Sleep(TDuration::MilliSeconds(1));
- }
-
- changerActor->Stop();
- ctx.ActorSystem.Stop();
- }
-}
diff --git a/library/cpp/actors/core/executor_pool_io.cpp b/library/cpp/actors/core/executor_pool_io.cpp
deleted file mode 100644
index 78e1d8e1ea..0000000000
--- a/library/cpp/actors/core/executor_pool_io.cpp
+++ /dev/null
@@ -1,157 +0,0 @@
-#include "executor_pool_io.h"
-#include "actor.h"
-#include "mailbox.h"
-#include <library/cpp/actors/util/affinity.h>
-#include <library/cpp/actors/util/datetime.h>
-
-namespace NActors {
- TIOExecutorPool::TIOExecutorPool(ui32 poolId, ui32 threads, const TString& poolName, TAffinity* affinity)
- : TExecutorPoolBase(poolId, threads, affinity)
- , Threads(new TThreadCtx[threads])
- , PoolName(poolName)
- {}
-
- TIOExecutorPool::TIOExecutorPool(const TIOExecutorPoolConfig& cfg)
- : TIOExecutorPool(
- cfg.PoolId,
- cfg.Threads,
- cfg.PoolName,
- new TAffinity(cfg.Affinity)
- )
- {}
-
- TIOExecutorPool::~TIOExecutorPool() {
- Threads.Destroy();
- while (ThreadQueue.Pop(0))
- ;
- }
-
- ui32 TIOExecutorPool::GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) {
- i16 workerId = wctx.WorkerId;
- Y_DEBUG_ABORT_UNLESS(workerId < PoolThreads);
-
- NHPTimer::STime elapsed = 0;
- NHPTimer::STime parked = 0;
- NHPTimer::STime hpstart = GetCycleCountFast();
- NHPTimer::STime hpnow;
-
- const TAtomic x = AtomicDecrement(Semaphore);
- if (x < 0) {
- TThreadCtx& threadCtx = Threads[workerId];
- ThreadQueue.Push(workerId + 1, revolvingCounter);
- hpnow = GetCycleCountFast();
- elapsed += hpnow - hpstart;
- if (threadCtx.Pad.Park())
- return 0;
- hpstart = GetCycleCountFast();
- parked += hpstart - hpnow;
- }
-
- while (!RelaxedLoad(&StopFlag)) {
- if (const ui32 activation = Activations.Pop(++revolvingCounter)) {
- hpnow = GetCycleCountFast();
- elapsed += hpnow - hpstart;
- wctx.AddElapsedCycles(ActorSystemIndex, elapsed);
- if (parked > 0) {
- wctx.AddParkedCycles(parked);
- }
- return activation;
- }
- SpinLockPause();
- }
-
- return 0;
- }
-
- void TIOExecutorPool::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Schedule(deadline - ActorSystem->Timestamp(), ev, cookie, workerId);
- }
-
- void TIOExecutorPool::Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_UNUSED(workerId);
-
- const auto current = ActorSystem->Monotonic();
- if (deadline < current)
- deadline = current;
-
- TTicketLock::TGuard guard(&ScheduleLock);
- ScheduleQueue->Writer.Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TIOExecutorPool::Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_UNUSED(workerId);
- const auto deadline = ActorSystem->Monotonic() + delta;
-
- TTicketLock::TGuard guard(&ScheduleLock);
- ScheduleQueue->Writer.Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TIOExecutorPool::ScheduleActivationEx(ui32 activation, ui64 revolvingWriteCounter) {
- Activations.Push(activation, revolvingWriteCounter);
- const TAtomic x = AtomicIncrement(Semaphore);
- if (x <= 0) {
- for (;; ++revolvingWriteCounter) {
- if (const ui32 x = ThreadQueue.Pop(revolvingWriteCounter)) {
- const ui32 threadIdx = x - 1;
- Threads[threadIdx].Pad.Unpark();
- return;
- }
- SpinLockPause();
- }
- }
- }
-
- void TIOExecutorPool::Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) {
- TAffinityGuard affinityGuard(Affinity());
-
- ActorSystem = actorSystem;
-
- ScheduleQueue.Reset(new NSchedulerQueue::TQueueType());
-
- for (i16 i = 0; i != PoolThreads; ++i) {
- Threads[i].Thread.Reset(new TExecutorThread(i, 0, actorSystem, this, MailboxTable.Get(), PoolName));
- }
-
- *scheduleReaders = &ScheduleQueue->Reader;
- *scheduleSz = 1;
- }
-
- void TIOExecutorPool::Start() {
- TAffinityGuard affinityGuard(Affinity());
-
- for (i16 i = 0; i != PoolThreads; ++i)
- Threads[i].Thread->Start();
- }
-
- void TIOExecutorPool::PrepareStop() {
- AtomicStore(&StopFlag, true);
- for (i16 i = 0; i != PoolThreads; ++i) {
- Threads[i].Thread->StopFlag = true;
- Threads[i].Pad.Interrupt();
- }
- }
-
- void TIOExecutorPool::Shutdown() {
- for (i16 i = 0; i != PoolThreads; ++i)
- Threads[i].Thread->Join();
- }
-
- void TIOExecutorPool::GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const {
- poolStats.CurrentThreadCount = PoolThreads;
- poolStats.DefaultThreadCount = PoolThreads;
- poolStats.MaxThreadCount = PoolThreads;
- poolStats.PotentialMaxThreadCount = PoolThreads;
- statsCopy.resize(PoolThreads + 1);
- // Save counters from the pool object
- statsCopy[0] = TExecutorThreadStats();
- statsCopy[0].Aggregate(Stats);
- // Per-thread stats
- for (i16 i = 0; i < PoolThreads; ++i) {
- Threads[i].Thread->GetCurrentStats(statsCopy[i + 1]);
- }
- }
-
- TString TIOExecutorPool::GetName() const {
- return PoolName;
- }
-}
diff --git a/library/cpp/actors/core/executor_pool_io.h b/library/cpp/actors/core/executor_pool_io.h
deleted file mode 100644
index f3f1a11819..0000000000
--- a/library/cpp/actors/core/executor_pool_io.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#pragma once
-
-#include "actorsystem.h"
-#include "executor_thread.h"
-#include "scheduler_queue.h"
-#include "executor_pool_base.h"
-#include <library/cpp/actors/actor_type/indexes.h>
-#include <library/cpp/actors/util/ticket_lock.h>
-#include <library/cpp/actors/util/unordered_cache.h>
-#include <library/cpp/actors/util/threadparkpad.h>
-#include <util/system/condvar.h>
-
-namespace NActors {
- class TIOExecutorPool: public TExecutorPoolBase {
- struct TThreadCtx {
- TAutoPtr<TExecutorThread> Thread;
- TThreadParkPad Pad;
- };
-
- TArrayHolder<TThreadCtx> Threads;
- TUnorderedCache<ui32, 512, 4> ThreadQueue;
-
- THolder<NSchedulerQueue::TQueueType> ScheduleQueue;
- TTicketLock ScheduleLock;
-
- const TString PoolName;
- const ui32 ActorSystemIndex = NActors::TActorTypeOperator::GetActorSystemIndex();
- public:
- TIOExecutorPool(ui32 poolId, ui32 threads, const TString& poolName = "", TAffinity* affinity = nullptr);
- explicit TIOExecutorPool(const TIOExecutorPoolConfig& cfg);
- ~TIOExecutorPool();
-
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) override;
-
- void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
- void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
- void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
-
- void ScheduleActivationEx(ui32 activation, ui64 revolvingWriteCounter) override;
-
- void Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) override;
- void Start() override;
- void PrepareStop() override;
- void Shutdown() override;
-
- void GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const override;
- TString GetName() const override;
- };
-}
diff --git a/library/cpp/actors/core/executor_pool_united.cpp b/library/cpp/actors/core/executor_pool_united.cpp
deleted file mode 100644
index 960545ffb5..0000000000
--- a/library/cpp/actors/core/executor_pool_united.cpp
+++ /dev/null
@@ -1,1455 +0,0 @@
-#include "executor_pool_united.h"
-#include "executor_pool_united_workers.h"
-
-#include "actor.h"
-#include "balancer.h"
-#include "cpu_state.h"
-#include "executor_thread.h"
-#include "probes.h"
-#include "mailbox.h"
-#include "scheduler_queue.h"
-#include <library/cpp/actors/util/affinity.h>
-#include <library/cpp/actors/util/cpu_load_log.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/futex.h>
-#include <library/cpp/actors/util/intrinsics.h>
-#include <library/cpp/actors/util/timerfd.h>
-
-#include <util/system/datetime.h>
-#include <util/system/hp_timer.h>
-
-#include <algorithm>
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- struct TUnitedWorkers::TWorker: public TNonCopyable {
- TAutoPtr<TExecutorThread> Thread;
- volatile TThreadId ThreadId = UnknownThreadId;
- NSchedulerQueue::TQueueType SchedulerQueue;
- };
-
- struct TUnitedWorkers::TPool: public TNonCopyable {
- TAtomic Waiters = 0; // Number of idle cpus, waiting for activations in this pool
- char Padding[64 - sizeof(TAtomic)];
-
- TUnorderedCache<ui32, 512, 4> Activations; // MPMC-queue for mailbox activations
- TAtomic Active = 0; // Number of mailboxes ready for execution or currently executing
- TAtomic Tokens = 0; // Pending tokens (token is required for worker to start execution, guarantees concurrency limit and activation availability)
- volatile bool StopFlag = false;
-
- // Configuration
- TPoolId PoolId;
- TAtomicBase Concurrency; // Max concurrent workers running this pool
- IExecutorPool* ExecutorPool;
- TMailboxTable* MailboxTable;
- ui64 TimePerMailboxTs;
- ui32 EventsPerMailbox;
-
- // Cpus this pool is allowed to run on
- // Cpus are specified in wake order
- TStackVec<TCpu*, 15> WakeOrderCpus;
-
- ~TPool() {
- while (Activations.Pop(0)) {}
- }
-
- void Stop() {
- AtomicStore(&StopFlag, true);
- }
-
- bool IsUnited() const {
- return WakeOrderCpus.size();
- }
-
- // Add activation of newly scheduled mailbox. Returns generated token (unless concurrency is exceeded)
- bool PushActivation(ui32 activation, ui64 revolvingCounter) {
- Activations.Push(activation, revolvingCounter);
- TAtomicBase active = AtomicIncrement(Active);
- if (active <= Concurrency) { // token generated
- AtomicIncrement(Tokens);
- return true;
- }
- return false;
- }
-
- template <bool Relaxed>
- static bool TryAcquireTokenImpl(TAtomic* tokens) {
- while (true) {
- TAtomicBase value;
- if constexpr (Relaxed) {
- value = RelaxedLoad(tokens);
- } else {
- value = AtomicLoad(tokens);
- }
- if (value > 0) {
- if (AtomicCas(tokens, value - 1, value)) {
- return true; // token acquired
- }
- } else {
- return false; // no more tokens
- }
- }
- }
-
- // Try acquire pending token. Must be done before execution
- bool TryAcquireToken() {
- return TryAcquireTokenImpl<false>(&Tokens);
- }
-
- // Try acquire pending token. Must be done before execution
- bool TryAcquireTokenRelaxed() {
- return TryAcquireTokenImpl<true>(&Tokens);
- }
-
- // Get activation. Requires acquired token.
- void BeginExecution(ui32& activation, ui64 revolvingCounter) {
- while (!RelaxedLoad(&StopFlag)) {
- if (activation = Activations.Pop(++revolvingCounter)) {
- return;
- }
- SpinLockPause();
- }
- activation = 0; // should stop
- }
-
- // End currently active execution and start new one if token is available.
- // Reuses token if it's not destroyed.
- // Returned `true` means successful switch, `activation` is filled.
- // Returned `false` means execution has ended, no need to call StopExecution()
- bool NextExecution(ui32& activation, ui64 revolvingCounter) {
- if (AtomicDecrement(Active) >= Concurrency) { // reuse just released token
- BeginExecution(activation, revolvingCounter);
- return true;
- } else if (TryAcquireToken()) { // another token acquired
- BeginExecution(activation, revolvingCounter);
- return true;
- }
- return false; // no more tokens available
- }
-
- // Stop active execution. Returns released token (unless it is destroyed)
- bool StopExecution() {
- TAtomicBase active = AtomicDecrement(Active);
- if (active >= Concurrency) { // token released
- AtomicIncrement(Tokens);
- return true;
- }
- return false; // token destroyed
- }
-
- // Switch worker context into this pool
- void Switch(TWorkerContext& wctx, ui64 softDeadlineTs, TExecutorThreadStats& stats) {
- wctx.Switch(ExecutorPool, MailboxTable, TimePerMailboxTs, EventsPerMailbox, softDeadlineTs, &stats);
- }
- };
-
- class TPoolScheduler {
- class TSchedulable {
- // Lower PoolBits store PoolId
- // All other higher bits store virtual runtime in cycles
- using TValue = ui64;
- TValue Value;
-
- static constexpr ui64 PoolIdMask = ui64((1ull << PoolBits) - 1);
- static constexpr ui64 VRunTsMask = ~PoolIdMask;
-
- public:
- explicit TSchedulable(TPoolId poolId = MaxPools, ui64 vrunts = 0)
- : Value((poolId & PoolIdMask) | (vrunts & VRunTsMask))
- {}
-
- TPoolId GetPoolId() const {
- return Value & PoolIdMask;
- }
-
- ui64 GetVRunTs() const {
- // Do not truncate pool id
- // NOTE: it decrease accuracy, but improves performance
- return Value;
- }
-
- ui64 GetPreciseVRunTs() const {
- return Value & VRunTsMask;
- }
-
- void SetVRunTs(ui64 vrunts) {
- Value = (Value & PoolIdMask) | (vrunts & VRunTsMask);
- }
-
- void Account(ui64 base, ui64 ts) {
- // Add at least minimum amount to change Value
- SetVRunTs(base + Max(ts, PoolIdMask + 1));
- }
- };
-
- // For min-heap of Items
- struct TCmp {
- bool operator()(TSchedulable lhs, TSchedulable rhs) const {
- return lhs.GetVRunTs() > rhs.GetVRunTs();
- }
- };
-
- TPoolId Size = 0; // total number of pools on this cpu
- TPoolId Current = 0; // index of current pool in `Items`
-
- // At the beginning `Current` items are orginized as binary min-heap -- ready to be scheduled
- // The rest `Size - Current` items are unordered (required to keep track of last vrunts)
- TSchedulable Items[MaxPools]; // virtual runtime in cycles for each pool
- ui64 MinVRunTs = 0; // virtual runtime used by waking pools (system's vrunts)
- ui64 Ts = 0; // real timestamp of current execution start (for accounting)
-
- // Maps PoolId into it's inverse weight
- ui64 InvWeights[MaxPools];
- static constexpr ui64 VRunTsOverflow = ui64(1ull << 62ull) / MaxPoolWeight;
-
- public:
- void AddPool(TPoolId pool, TPoolWeight weight) {
- Items[Size] = TSchedulable(pool, MinVRunTs);
- Size++;
- InvWeights[pool] = MaxPoolWeight / std::clamp(weight ? weight : DefPoolWeight, MinPoolWeight, MaxPoolWeight);
- }
-
- // Iterate over pools in scheduling order
- // should be used in construction:
- // for (TPoolId pool = Begin(); pool != End(); pool = Next())
- TPoolId Begin() {
- // Wrap vruntime around to avoid overflow, if required
- if (Y_UNLIKELY(MinVRunTs >= VRunTsOverflow)) {
- for (TPoolId i = 0; i < Size; i++) {
- ui64 ts = Items[i].GetPreciseVRunTs();
- Items[i].SetVRunTs(ts >= VRunTsOverflow ? ts - VRunTsOverflow : 0);
- }
- MinVRunTs -= VRunTsOverflow;
- }
- Current = Size;
- std::make_heap(Items, Items + Current, TCmp());
- return Next();
- }
-
- constexpr TPoolId End() const {
- return MaxPools;
- }
-
- TPoolId Next() {
- if (Current > 0) {
- std::pop_heap(Items, Items + Current, TCmp());
- Current--;
- return CurrentPool();
- } else {
- return End();
- }
- }
-
- // Scheduling was successful, we are going to run CurrentPool()
- void Scheduled() {
- MinVRunTs = Max(MinVRunTs, Items[Current].GetPreciseVRunTs());
- // NOTE: Ts is propagated on Account() to avoid gaps
- }
-
- // Schedule specific pool that woke up cpu after idle
- void ScheduledAfterIdle(TPoolId pool, ui64 ts) {
- if (Y_UNLIKELY(ts < Ts)) { // anomaly: time goes backwards (e.g. rdtsc is reset to zero on cpu reset)
- Ts = ts; // just skip anomalous time slice
- return;
- }
- MinVRunTs += (ts - Ts) * (MaxPoolWeight / DefPoolWeight); // propagate system's vrunts to blur difference between pools
- Ts = ts; // propagate time w/o accounting to any pool
-
- // Set specified pool as current, it requires scan
- for (Current = 0; Current < Size && pool != Items[Current].GetPoolId(); Current++) {}
- Y_ABORT_UNLESS(Current < Size);
- }
-
- // Account currently running pool till now (ts)
- void Account(ui64 ts) {
- // Skip time slice for the first run and when time goes backwards (e.g. rdtsc is reset to zero on cpu reset)
- if (Y_LIKELY(Ts > 0 && Ts <= ts)) {
- TPoolId pool = CurrentPool();
- Y_ABORT_UNLESS(pool < MaxPools);
- Items[Current].Account(MinVRunTs, (ts - Ts) * InvWeights[pool]);
- }
- Ts = ts; // propagate time
- }
-
- TPoolId CurrentPool() const {
- return Items[Current].GetPoolId();
- }
- };
-
- // Cyclic array of timers for idle workers to wait for hard preemption on
- struct TIdleQueue: public TNonCopyable {
- TArrayHolder<TTimerFd> Timers;
- size_t Size;
- TAtomic EnqueueCounter = 0;
- TAtomic DequeueCounter = 0;
-
- explicit TIdleQueue(size_t size)
- : Timers(new TTimerFd[size])
- , Size(size)
- {}
-
- void Stop() {
- for (size_t i = 0; i < Size; i++) {
- Timers[i].Wake();
- }
- }
-
- // Returns timer which new idle-worker should wait for
- TTimerFd* Enqueue() {
- return &Timers[AtomicGetAndIncrement(EnqueueCounter) % Size];
- }
-
- // Returns timer that hard preemption should trigger to wake idle-worker
- TTimerFd* Dequeue() {
- return &Timers[AtomicGetAndIncrement(DequeueCounter) % Size];
- }
- };
-
- // Base class for cpu-local managers that help workers on single cpu to cooperate
- struct TCpuLocalManager: public TThrRefBase {
- TUnitedWorkers* United;
-
- explicit TCpuLocalManager(TUnitedWorkers* united)
- : United(united)
- {}
-
- virtual TWorkerId WorkerCount() const = 0;
- virtual void AddWorker(TWorkerId workerId) = 0;
- virtual void Stop() = 0;
- };
-
- // Represents cpu with single associated worker that is able to execute any pool.
- // It always executes pool assigned by balancer and switch pool only if assigned pool has changed
- struct TAssignedCpu: public TCpuLocalManager {
- bool Started = false;
-
- TAssignedCpu(TUnitedWorkers* united)
- : TCpuLocalManager(united)
- {}
-
- TWorkerId WorkerCount() const override {
- return 1;
- }
-
- void AddWorker(TWorkerId workerId) override {
- Y_UNUSED(workerId);
- }
-
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) {
- ui32 activation;
- if (Y_UNLIKELY(!Started)) {
- Started = true;
- } else if (Y_UNLIKELY(United->IsPoolReassigned(wctx))) {
- United->StopExecution(wctx.PoolId); // stop current execution and switch pool if reassigned
- } else if (United->NextExecution(wctx.PoolId, activation, revolvingCounter)) {
- return activation; // another activation from currently executing pool (or 0 if stopped)
- }
-
- // Switch to another pool, it blocks until token is acquired
- if (Y_UNLIKELY(!SwitchPool(wctx))) {
- return 0; // stopped
- }
- United->SwitchPool(wctx, 0);
- United->BeginExecution(wctx.PoolId, activation, revolvingCounter);
- return activation;
- }
-
- void Stop() override {
- }
-
- private:
- // Sets next pool to run, and acquires token, blocks if there are no tokens
- bool SwitchPool(TWorkerContext& wctx) {
- if (Y_UNLIKELY(United->IsStopped())) {
- return false;
- }
-
- // Run balancer (if it's time to)
- United->Balance();
-
- // Select pool to execute
- wctx.PoolId = United->AssignedPool(wctx);
- Y_ABORT_UNLESS(wctx.PoolId != CpuShared);
- if (United->TryAcquireToken(wctx.PoolId)) {
- return true;
- }
-
- // No more work -- wait for activations (spinning, then blocked)
- wctx.PoolId = United->Idle(wctx.PoolId, wctx);
-
- // Wakeup or stop occured
- if (Y_UNLIKELY(wctx.PoolId == CpuStopped)) {
- return false;
- }
- return true; // United->Idle() has already acquired token
- }
- };
-
- // Lock-free data structure that help workers on single cpu to discover their state and do hard preemptions
- struct TSharedCpu: public TCpuLocalManager {
- // Current lease
- volatile TLease::TValue CurrentLease;
- char Padding1[64 - sizeof(TLease)];
-
- // Slow pools
- // the highest bit: 1=wait-for-slow-workers mode 0=else
- // any lower bit (poolId is bit position): 1=pool-is-slow 0=pool-is-fast
- volatile TPoolsMask SlowPoolsMask = 0;
- char Padding2[64 - sizeof(TPoolsMask)];
-
- // Must be accessed under never expiring lease to avoid races
- TPoolScheduler PoolSched;
- TWorkerId FastWorker = MaxWorkers;
- TTimerFd* PreemptionTimer = nullptr;
- ui64 HardPreemptionTs = 0;
- bool Started = false;
-
- TIdleQueue IdleQueue;
-
- struct TConfig {
- const TCpuId CpuId;
- const TWorkerId Workers;
- ui64 SoftLimitTs;
- ui64 HardLimitTs;
- ui64 EventLimitTs;
- ui64 LimitPrecisionTs;
- const int IdleWorkerPriority;
- const int FastWorkerPriority;
- const bool NoRealtime;
- const bool NoAffinity;
- const TCpuAllocation CpuAlloc;
-
- TConfig(const TCpuAllocation& allocation, const TUnitedWorkersConfig& united)
- : CpuId(allocation.CpuId)
- , Workers(allocation.AllowedPools.size() + 1)
- , SoftLimitTs(Us2Ts(united.PoolLimitUs))
- , HardLimitTs(Us2Ts(united.PoolLimitUs + united.EventLimitUs))
- , EventLimitTs(Us2Ts(united.EventLimitUs))
- , LimitPrecisionTs(Us2Ts(united.LimitPrecisionUs))
- , IdleWorkerPriority(std::clamp<ui64>(united.IdleWorkerPriority ? united.IdleWorkerPriority : 20, 1, 99))
- , FastWorkerPriority(std::clamp<ui64>(united.FastWorkerPriority ? united.FastWorkerPriority : 10, 1, IdleWorkerPriority - 1))
- , NoRealtime(united.NoRealtime)
- , NoAffinity(united.NoAffinity)
- , CpuAlloc(allocation)
- {}
- };
-
- TConfig Config;
- TVector<TWorkerId> Workers;
-
- TSharedCpu(const TConfig& cfg, TUnitedWorkers* united)
- : TCpuLocalManager(united)
- , IdleQueue(cfg.Workers)
- , Config(cfg)
- {
- for (const auto& pa : Config.CpuAlloc.AllowedPools) {
- PoolSched.AddPool(pa.PoolId, pa.Weight);
- }
- }
-
- TWorkerId WorkerCount() const override {
- return Config.Workers;
- }
-
- void AddWorker(TWorkerId workerId) override {
- if (Workers.empty()) {
- // Grant lease to the first worker
- AtomicStore(&CurrentLease, TLease(workerId, NeverExpire).Value);
- }
- Workers.push_back(workerId);
- }
-
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) {
- ui32 activation;
- if (!wctx.Lease.IsNeverExpiring()) {
- if (wctx.SoftDeadlineTs < GetCycleCountFast()) { // stop if lease has expired or is near to be expired
- United->StopExecution(wctx.PoolId);
- } else if (United->NextExecution(wctx.PoolId, activation, revolvingCounter)) {
- return activation; // another activation from currently executing pool (or 0 if stopped)
- }
- }
-
- // Switch to another pool, it blocks until token is acquired
- if (Y_UNLIKELY(!SwitchPool(wctx))) {
- return 0; // stopped
- }
- United->BeginExecution(wctx.PoolId, activation, revolvingCounter);
- return activation;
- }
-
- void Stop() override {
- IdleQueue.Stop();
- }
-
- private:
- enum EPriority {
- IdlePriority, // highest (real-time, Config.IdleWorkerPriority)
- FastPriority, // normal (real-time, Config.FastWorkerPriority)
- SlowPriority, // lowest (not real-time)
- };
-
- enum EWorkerAction {
- // Fast-worker
- ExecuteFast,
- WaitForSlow,
-
- // Slow-worker
- BecameIdle,
- WakeFast,
-
- // Idle-worker
- BecameFast,
- Standby,
-
- // Common
- Stopped,
- };
-
- // Thread-safe; should be called from worker
- // Blocks for idle-workers; sets lease and next pool to run
- bool SwitchPool(TWorkerContext& wctx) {
- TTimerFd* idleTimer = nullptr;
- while (true) {
- if (DisablePreemptionAndTryExtend(wctx.Lease)) { // if fast-worker
- if (Y_UNLIKELY(!Started)) {
- SetPriority(0, FastPriority);
- Started = true;
- }
- while (true) {
- switch (FastWorkerAction(wctx)) {
- case ExecuteFast:
- United->SwitchPool(wctx, wctx.Lease.GetPreciseExpireTs() - Config.EventLimitTs);
- EnablePreemptionAndGrant(wctx.Lease);
- return true;
- case WaitForSlow:
- FastWorkerSleep(GetCycleCountFast() + Config.SoftLimitTs);
- break;
- case Stopped: return false;
- default: Y_ABORT();
- }
- }
- } else if (wctx.Lease.IsNeverExpiring()) { // if idle-worker
- switch (IdleWorkerAction(idleTimer, wctx.Lease.GetWorkerId())) {
- case BecameFast:
- SetPriority(0, FastPriority);
- break; // try acquire new lease
- case Standby:
- if (!idleTimer) {
- idleTimer = IdleQueue.Enqueue();
- }
- SetPriority(0, IdlePriority);
- idleTimer->Wait();
- break;
- case Stopped: return false;
- default: Y_ABORT();
- }
- } else { // lease has expired and hard preemption occured, so we are executing in a slow-worker
- wctx.IncrementPreemptedEvents();
- switch (SlowWorkerAction(wctx.PoolId)) {
- case WakeFast:
- WakeFastWorker();
- [[fallthrough]]; // no break; pass through
- case BecameIdle:
- wctx.Lease = wctx.Lease.NeverExpire();
- wctx.PoolId = MaxPools;
- idleTimer = nullptr;
- break;
- case Stopped: return false;
- default: Y_ABORT();
- }
- }
- }
- }
-
- enum ETryRunPool {
- RunFastPool,
- RunSlowPool,
- NoTokens,
- };
-
- ETryRunPool TryRun(TPoolId pool) {
- while (true) {
- // updates WaitPoolsFlag in SlowPoolsMask according to scheduled pool slowness
- TPoolsMask slow = AtomicLoad(&SlowPoolsMask);
- if ((1ull << pool) & slow) { // we are about to execute slow pool (fast-worker will just wait, token is NOT required)
- if (slow & WaitPoolsFlag) {
- return RunSlowPool; // wait flag is already set
- } else {
- if (AtomicCas(&SlowPoolsMask, slow | WaitPoolsFlag, slow)) { // try set wait flag
- return RunSlowPool; // wait flag has been successfully set
- }
- }
- } else { // we are about to execute fast pool, token required
- if (slow & WaitPoolsFlag) { // reset wait flag if required
- if (AtomicCas(&SlowPoolsMask, slow & ~WaitPoolsFlag, slow)) { // try reset wait flag
- return United->TryAcquireToken(pool) ? RunFastPool : NoTokens; // wait flag has been successfully reset
- }
- } else {
- return United->TryAcquireToken(pool) ? RunFastPool : NoTokens; // wait flag is already reset
- }
- }
- }
- }
-
- EWorkerAction FastWorkerAction(TWorkerContext& wctx) {
- if (Y_UNLIKELY(United->IsStopped())) {
- return Stopped;
- }
-
- // Account current pool
- ui64 ts = GetCycleCountFast();
- PoolSched.Account(ts);
-
- // Select next pool to execute
- for (wctx.PoolId = PoolSched.Begin(); wctx.PoolId != PoolSched.End(); wctx.PoolId = PoolSched.Next()) {
- switch (TryRun(wctx.PoolId)) {
- case RunFastPool:
- PoolSched.Scheduled();
- wctx.Lease = PostponePreemption(wctx.Lease.GetWorkerId(), ts);
- return ExecuteFast;
- case RunSlowPool:
- PoolSched.Scheduled();
- ResetPreemption(wctx.Lease.GetWorkerId(), ts); // there is no point in preemption during wait
- return WaitForSlow;
- case NoTokens: // concurrency limit reached, or no more work in pool
- break; // just try next pool (if any)
- }
- }
-
- // No more work, no slow-workers -- wait for activations (active, then blocked)
- wctx.PoolId = United->Idle(CpuShared, wctx);
-
- // Wakeup or stop occured
- if (Y_UNLIKELY(wctx.PoolId == CpuStopped)) {
- return Stopped;
- }
- ts = GetCycleCountFast();
- PoolSched.ScheduledAfterIdle(wctx.PoolId, ts);
- wctx.Lease = PostponePreemption(wctx.Lease.GetWorkerId(), ts);
- return ExecuteFast; // United->Idle() has already acquired token
- }
-
- EWorkerAction IdleWorkerAction(TTimerFd* idleTimer, TWorkerId workerId) {
- if (Y_UNLIKELY(United->IsStopped())) {
- return Stopped;
- }
- if (!idleTimer) { // either worker start or became idle -- hard preemption is not required
- return Standby;
- }
-
- TLease lease = TLease(AtomicLoad(&CurrentLease));
- ui64 ts = GetCycleCountFast();
- if (lease.GetExpireTs() < ts) { // current lease has expired
- if (TryBeginHardPreemption(lease)) {
- SetPoolIsSlowFlag(PoolSched.CurrentPool());
- TWorkerId preempted = lease.GetWorkerId();
- SetPriority(United->GetWorkerThreadId(preempted), SlowPriority);
- LWPROBE(HardPreemption, Config.CpuId, PoolSched.CurrentPool(), preempted, workerId);
- EndHardPreemption(workerId);
- return BecameFast;
- } else {
- // Lease has been changed just now, no way we need preemption right now, so no retry needed
- return Standby;
- }
- } else {
- // Lease has not expired yet (maybe never expiring lease)
- return Standby;
- }
- }
-
- EWorkerAction SlowWorkerAction(TPoolId pool) {
- if (Y_UNLIKELY(United->IsStopped())) {
- return Stopped;
- }
- while (true) {
- TPoolsMask slow = AtomicLoad(&SlowPoolsMask);
- if (slow & (1ull << pool)) {
- if (slow == (1ull << pool) & WaitPoolsFlag) { // the last slow pool is going to became fast
- if (AtomicCas(&SlowPoolsMask, 0, slow)) { // reset both pool-is-slow flag and WaitPoolsFlag
- return WakeFast;
- }
- } else { // there are (a) several slow-worker or (b) one slow-worker w/o waiting fast-worker
- if (AtomicCas(&SlowPoolsMask, slow & ~(1ull << pool), slow)) { // reset pool-is-slow flag
- return BecameIdle;
- }
- }
- } else {
- // SlowWorkerAction has been called between TryBeginHardPreemption and SetPoolIsSlowFlag
- // flag for this pool is not set yet, but we can be sure pool is slow:
- // - because SlowWorkerAction has been called;
- // - this mean lease has expired and hard preemption occured.
- // So just wait other worker to call SetPoolIsSlowFlag
- LWPROBE(SlowWorkerActionRace, Config.CpuId, pool, slow);
- }
- }
- }
-
- void SetPoolIsSlowFlag(TPoolId pool) {
- while (true) {
- TPoolsMask slow = AtomicLoad(&SlowPoolsMask);
- if ((slow & (1ull << pool)) == 0) { // if pool is fast
- if (AtomicCas(&SlowPoolsMask, slow | (1ull << pool), slow)) { // set pool-is-slow flag
- return;
- }
- } else {
- Y_ABORT("two slow-workers executing the same pool on the same core");
- return; // pool is already slow
- }
- }
- }
-
- bool TryBeginHardPreemption(TLease lease) {
- return AtomicCas(&CurrentLease, HardPreemptionLease, lease);
- }
-
- void EndHardPreemption(TWorkerId to) {
- ATOMIC_COMPILER_BARRIER();
- if (!AtomicCas(&CurrentLease, TLease(to, NeverExpire), HardPreemptionLease)) {
- Y_ABORT("hard preemption failed");
- }
- }
-
- bool DisablePreemptionAndTryExtend(TLease lease) {
- return AtomicCas(&CurrentLease, lease.NeverExpire(), lease);
- }
-
- void EnablePreemptionAndGrant(TLease lease) {
- ATOMIC_COMPILER_BARRIER();
- if (!AtomicCas(&CurrentLease, lease, lease.NeverExpire())) {
- Y_ABORT("lease grant failed");
- }
- }
-
- void FastWorkerSleep(ui64 deadlineTs) {
- while (true) {
- TPoolsMask slow = AtomicLoad(&SlowPoolsMask);
- if ((slow & WaitPoolsFlag) == 0) {
- return; // woken by WakeFast action
- }
- ui64 ts = GetCycleCountFast();
- if (deadlineTs <= ts) {
- if (AtomicCas(&SlowPoolsMask, slow & ~WaitPoolsFlag, slow)) { // try reset wait flag
- return; // wait flag has been successfully reset after timeout
- }
- } else { // should wait
- ui64 timeoutNs = Ts2Ns(deadlineTs - ts);
-#ifdef _linux_
- timespec timeout;
- timeout.tv_sec = timeoutNs / 1'000'000'000;
- timeout.tv_nsec = timeoutNs % 1'000'000'000;
- SysFutex(FastWorkerFutex(), FUTEX_WAIT_PRIVATE, FastWorkerFutexValue(slow), &timeout, nullptr, 0);
-#else
- NanoSleep(timeoutNs); // non-linux wake is not supported, cpu will go idle on slow -> fast switch
-#endif
- }
- }
- }
-
- void WakeFastWorker() {
-#ifdef _linux_
- SysFutex(FastWorkerFutex(), FUTEX_WAKE_PRIVATE, 1, nullptr, nullptr, 0);
-#endif
- }
-
-#ifdef _linux_
- ui32* FastWorkerFutex() {
- // Actually we wait on one highest bit, but futex value size is 4 bytes on all platforms
- static_assert(sizeof(TPoolsMask) >= 4, "cannot be used as futex value on linux");
- return (ui32*)&SlowPoolsMask + 1; // higher 32 bits (little endian assumed)
- }
-
- ui32 FastWorkerFutexValue(TPoolsMask slow) {
- return ui32(slow >> 32); // higher 32 bits
- }
-#endif
-
- void SetPriority(TThreadId tid, EPriority priority) {
- if (Config.NoRealtime) {
- return;
- }
-#ifdef _linux_
- int policy;
- struct sched_param param;
- switch (priority) {
- case IdlePriority:
- policy = SCHED_FIFO;
- param.sched_priority = Config.IdleWorkerPriority;
- break;
- case FastPriority:
- policy = SCHED_FIFO;
- param.sched_priority = Config.FastWorkerPriority;
- break;
- case SlowPriority:
- policy = SCHED_OTHER;
- param.sched_priority = 0;
- break;
- }
- int ret = sched_setscheduler(tid, policy, &param);
- switch (ret) {
- case 0: return;
- case EINVAL:
- Y_ABORT("sched_setscheduler(%" PRIu64 ", %d, %d) -> EINVAL", tid, policy, param.sched_priority);
- case EPERM:
- // Requirements:
- // * CAP_SYS_NICE capability to run real-time processes and set cpu affinity.
- // Either run under root or set application capabilities:
- // sudo setcap cap_sys_nice=eip BINARY
- // * Non-zero rt-runtime (in case cgroups are used).
- // Either (a) disable global limit on RT processes bandwidth:
- // sudo sysctl -w kernel.sched_rt_runtime_us=-1
- // Or (b) set non-zero rt-runtime for your cgroup:
- // echo -1 > /sys/fs/cgroup/cpu/[cgroup]/cpu.rt_runtime_us
- // (also set the same value for every parent cgroup)
- // https://www.kernel.org/doc/Documentation/scheduler/sched-rt-group.txt
- Y_ABORT("sched_setscheduler(%" PRIu64 ", %d, %d) -> EPERM", tid, policy, param.sched_priority);
- case ESRCH:
- Y_ABORT("sched_setscheduler(%" PRIu64 ", %d, %d) -> ESRCH", tid, policy, param.sched_priority);
- default:
- Y_ABORT("sched_setscheduler(%" PRIu64 ", %d, %d) -> %d", tid, policy, param.sched_priority, ret);
- }
-#else
- Y_UNUSED(tid);
- Y_UNUSED(priority);
-#endif
- }
-
- void ResetPreemption(TWorkerId fastWorkerId, ui64 ts) {
- if (Y_UNLIKELY(!PreemptionTimer)) {
- return;
- }
- if (FastWorker == fastWorkerId && HardPreemptionTs > 0) {
- PreemptionTimer->Reset();
- LWPROBE(ResetPreemptionTimer, Config.CpuId, FastWorker, PreemptionTimer->Fd, Ts2Ms(ts), Ts2Ms(HardPreemptionTs));
- HardPreemptionTs = 0;
- }
- }
-
- TLease PostponePreemption(TWorkerId fastWorkerId, ui64 ts) {
- // Select new timer after hard preemption
- if (FastWorker != fastWorkerId) {
- FastWorker = fastWorkerId;
- PreemptionTimer = IdleQueue.Dequeue();
- HardPreemptionTs = 0;
- }
-
- ui64 hardPreemptionTs = ts + Config.HardLimitTs;
- if (hardPreemptionTs > HardPreemptionTs) {
- // Reset timer (at most once in TickIntervalTs, sacrifice precision)
- HardPreemptionTs = hardPreemptionTs + Config.LimitPrecisionTs;
- PreemptionTimer->Set(HardPreemptionTs);
- LWPROBE(SetPreemptionTimer, Config.CpuId, FastWorker, PreemptionTimer->Fd, Ts2Ms(ts), Ts2Ms(HardPreemptionTs));
- }
-
- return TLease(fastWorkerId, hardPreemptionTs);
- }
- };
-
- // Proxy for start and switching TUnitedExecutorPool-s on single cpu via GetReadyActivation()
- // (does not implement any other method in IExecutorPool)
- class TCpuExecutorPool: public IExecutorPool {
- const TString Name;
-
- public:
- explicit TCpuExecutorPool(const TString& name)
- : IExecutorPool(MaxPools)
- , Name(name)
- {}
-
- TString GetName() const override {
- return Name;
- }
-
- void SetRealTimeMode() const override {
- // derived classes controls rt-priority - do nothing
- }
-
- // Should never be called
- void ReclaimMailbox(TMailboxType::EType, ui32, TWorkerId, ui64) override { Y_ABORT(); }
- TMailboxHeader *ResolveMailbox(ui32) override { Y_ABORT(); }
- void Schedule(TInstant, TAutoPtr<IEventHandle>, ISchedulerCookie*, TWorkerId) override { Y_ABORT(); }
- void Schedule(TMonotonic, TAutoPtr<IEventHandle>, ISchedulerCookie*, TWorkerId) override { Y_ABORT(); }
- void Schedule(TDuration, TAutoPtr<IEventHandle>, ISchedulerCookie*, TWorkerId) override { Y_ABORT(); }
- bool Send(TAutoPtr<IEventHandle>&) override { Y_ABORT(); }
- bool SpecificSend(TAutoPtr<IEventHandle>&) override { Y_ABORT(); }
- void ScheduleActivation(ui32) override { Y_ABORT(); }
- void SpecificScheduleActivation(ui32) override { Y_ABORT(); }
- void ScheduleActivationEx(ui32, ui64) override { Y_ABORT(); }
- TActorId Register(IActor*, TMailboxType::EType, ui64, const TActorId&) override { Y_ABORT(); }
- TActorId Register(IActor*, TMailboxHeader*, ui32, const TActorId&) override { Y_ABORT(); }
- void Prepare(TActorSystem*, NSchedulerQueue::TReader**, ui32*) override { Y_ABORT(); }
- void Start() override { Y_ABORT(); }
- void PrepareStop() override { Y_ABORT(); }
- void Shutdown() override { Y_ABORT(); }
- bool Cleanup() override { Y_ABORT(); }
- };
-
- // Proxy executor pool working with cpu-local scheduler (aka actorsystem 2.0)
- class TSharedCpuExecutorPool: public TCpuExecutorPool {
- TSharedCpu* Local;
- TIntrusivePtr<TAffinity> SingleCpuAffinity; // no migration support yet
- public:
- explicit TSharedCpuExecutorPool(TSharedCpu* local, const TUnitedWorkersConfig& config)
- : TCpuExecutorPool("u-" + ToString(local->Config.CpuId))
- , Local(local)
- , SingleCpuAffinity(config.NoAffinity ? nullptr : new TAffinity(TCpuMask(local->Config.CpuId)))
- {}
-
- TAffinity* Affinity() const override {
- return SingleCpuAffinity.Get();
- }
-
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) override {
- return Local->GetReadyActivation(wctx, revolvingCounter);
- }
- };
-
- // Proxy executor pool working with balancer and assigned pools (aka actorsystem 1.5)
- class TAssignedCpuExecutorPool: public TCpuExecutorPool {
- TAssignedCpu* Local;
- TIntrusivePtr<TAffinity> CpuAffinity;
- public:
- explicit TAssignedCpuExecutorPool(TAssignedCpu* local, const TUnitedWorkersConfig& config)
- : TCpuExecutorPool("United")
- , Local(local)
- , CpuAffinity(config.NoAffinity ? nullptr : new TAffinity(config.Allowed))
- {}
-
- TAffinity* Affinity() const override {
- return CpuAffinity.Get();
- }
-
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) override {
- return Local->GetReadyActivation(wctx, revolvingCounter);
- }
- };
-
- // Representation of a single cpu and it's state visible to other cpus and pools
- struct TUnitedWorkers::TCpu: public TNonCopyable {
- struct TScopedWaiters {
- TCpu& Cpu;
- TPool* AssignedPool; // nullptr if CpuShared
-
- // Subscribe on wakeups from allowed pools
- TScopedWaiters(TCpu& cpu, TPool* assignedPool) : Cpu(cpu), AssignedPool(assignedPool) {
- if (!AssignedPool) {
- for (TPool* pool : Cpu.AllowedPools) {
- AtomicIncrement(pool->Waiters);
- }
- } else {
- AtomicIncrement(AssignedPool->Waiters);
- }
- }
-
- // Unsubscribe from pools we've subscribed on
- ~TScopedWaiters() {
- if (!AssignedPool) {
- for (TPool* pool : Cpu.AllowedPools) {
- AtomicDecrement(pool->Waiters);
- }
- } else {
- AtomicDecrement(AssignedPool->Waiters);
- }
- }
- };
-
- // Current cpu state important for other cpus and balancer
- TCpuState State;
-
- // Thread-safe per pool stats
- // NOTE: It's guaranteed that cpu never executes two instance of the same pool
- TVector<TExecutorThreadStats> PoolStats;
- TCpuLoadLog<1024> LoadLog;
-
-
- // Configuration
- TCpuId CpuId;
- THolder<TCpuLocalManager> LocalManager;
- THolder<TCpuExecutorPool> ExecutorPool;
-
- // Pools allowed to run on this cpu
- TStackVec<TPool*, 15> AllowedPools;
-
- void Stop() {
- if (LocalManager) {
- State.Stop();
- LocalManager->Stop();
- }
- }
-
- bool StartSpinning(TUnitedWorkers* united, TPool* assignedPool, TPoolId& result) {
- // Mark cpu as idle
- if (Y_UNLIKELY(!State.StartSpinning())) {
- result = CpuStopped;
- return true;
- }
-
- // Avoid using multiple atomic seq_cst loads in cycle, use barrier once and relaxed ops
- AtomicBarrier();
-
- // Check there is no pending tokens (can be released before Waiters increment)
- if (!assignedPool) {
- for (TPool* pool : AllowedPools) {
- if (pool->TryAcquireTokenRelaxed()) {
- result = WakeWithTokenAcquired(united, pool->PoolId);
- return true; // token acquired or stop
- }
- }
- } else {
- if (assignedPool->TryAcquireTokenRelaxed()) {
- result = WakeWithTokenAcquired(united, assignedPool->PoolId);
- return true; // token acquired or stop
- }
- }
-
- // At this point we can be sure wakeup won't be lost
- // So we can actively spin or block w/o checking for pending tokens
- return false;
- }
-
- bool ActiveWait(ui64 spinThresholdTs, TPoolId& result) {
- ui64 ts = GetCycleCountFast();
- LoadLog.RegisterBusyPeriod(ts);
- ui64 deadline = ts + spinThresholdTs;
- while (GetCycleCountFast() < deadline) {
- for (ui32 i = 0; i < 12; ++i) {
- TPoolId current = State.CurrentPool();
- if (current == CpuSpinning) {
- SpinLockPause();
- } else {
- result = current;
- LoadLog.RegisterIdlePeriod(GetCycleCountFast());
- return true; // wakeup
- }
- }
- }
- return false; // spin threshold exceeded, no wakeups
- }
-
- bool StartBlocking(TPoolId& result) {
- // Switch into blocked state
- if (State.StartBlocking()) {
- result = State.CurrentPool();
- return true;
- } else {
- return false;
- }
- }
-
- bool BlockedWait(TPoolId& result, ui64 timeoutNs) {
- return State.Block(timeoutNs, result);
- }
-
- void SwitchPool(TPoolId pool) {
- return State.SwitchPool(pool);
- }
-
- private:
- TPoolId WakeWithTokenAcquired(TUnitedWorkers* united, TPoolId token) {
- switch (State.WakeWithTokenAcquired(token)) {
- case TCpuState::Woken: // we've got token and successfully woken up this cpu
- // NOTE: sending thread may also wakeup another worker, which wont be able to acquire token and will go idle (it's ok)
- return token;
- case TCpuState::NotIdle: { // wakeup event has also occured
- TPoolId wakeup = State.CurrentPool();
- if (wakeup != token) { // token and wakeup for different pools
- united->TryWake(wakeup); // rewake another cpu to avoid losing wakeup
- }
- return token;
- }
- case TCpuState::Forbidden:
- Y_ABORT();
- case TCpuState::Stopped:
- return CpuStopped;
- }
- }
- };
-
- TUnitedWorkers::TUnitedWorkers(
- const TUnitedWorkersConfig& config,
- const TVector<TUnitedExecutorPoolConfig>& unitedPools,
- const TCpuAllocationConfig& allocation,
- IBalancer* balancer)
- : Balancer(balancer)
- , Config(config)
- , Allocation(allocation)
- {
- // Find max pool id and initialize pools
- PoolCount = 0;
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- for (const auto& pa : cpuAlloc.AllowedPools) {
- PoolCount = Max<size_t>(PoolCount, pa.PoolId + 1);
- }
- }
- Pools.Reset(new TPool[PoolCount]);
-
- // Find max cpu id and initialize cpus
- CpuCount = 0;
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- CpuCount = Max<size_t>(CpuCount, cpuAlloc.CpuId + 1);
- }
- Cpus.Reset(new TCpu[CpuCount]);
-
- // Setup allocated cpus
- // NOTE: leave gaps for not allocated cpus (default-initialized)
- WorkerCount = 0;
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- TCpu& cpu = Cpus[cpuAlloc.CpuId];
- cpu.CpuId = cpuAlloc.CpuId;
- cpu.PoolStats.resize(PoolCount); // NOTE: also may have gaps
- for (const auto& pa : cpuAlloc.AllowedPools) {
- cpu.AllowedPools.emplace_back(&Pools[pa.PoolId]);
- }
-
- // Setup balancing and cpu-local manager
- if (!Balancer->AddCpu(cpuAlloc, &cpu.State)) {
- cpu.State.SwitchPool(0); // set initial state to non-idle to avoid losing wakeups on start
- cpu.State.AssignPool(CpuShared);
- TSharedCpu* local = new TSharedCpu(TSharedCpu::TConfig(cpuAlloc, Config), this);
- cpu.LocalManager.Reset(local);
- cpu.ExecutorPool.Reset(new TSharedCpuExecutorPool(local, Config));
- } else {
- TAssignedCpu* local = new TAssignedCpu(this);
- cpu.LocalManager.Reset(local);
- cpu.ExecutorPool.Reset(new TAssignedCpuExecutorPool(local, Config));
- }
- WorkerCount += cpu.LocalManager->WorkerCount();
- }
-
- // Initialize workers
- Workers.Reset(new TWorker[WorkerCount]);
-
- // Setup pools
- // NOTE: leave gaps for not united pools (default-initialized)
- for (const TUnitedExecutorPoolConfig& cfg : unitedPools) {
- TPool& pool = Pools[cfg.PoolId];
- Y_ABORT_UNLESS(cfg.PoolId < MaxPools);
- pool.PoolId = cfg.PoolId;
- pool.Concurrency = cfg.Concurrency ? cfg.Concurrency : Config.CpuCount;
- pool.ExecutorPool = nullptr; // should be set later using SetupPool()
- pool.MailboxTable = nullptr; // should be set later using SetupPool()
- pool.TimePerMailboxTs = DurationToCycles(cfg.TimePerMailbox);
- pool.EventsPerMailbox = cfg.EventsPerMailbox;
-
- // Reinitialize per cpu pool stats with right MaxActivityType
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- TCpu& cpu = Cpus[cpuAlloc.CpuId];
- cpu.PoolStats[cfg.PoolId] = TExecutorThreadStats();
- }
-
- // Setup WakeOrderCpus: left to right exclusive cpus, then left to right shared cpus.
- // Waking exclusive cpus first reduce load on shared cpu and improve latency isolation, which is
- // the point of using exclusive cpu. But note that number of actively spinning idle cpus may increase,
- // so cpu consumption on light load is higher.
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- TCpu& cpu = Cpus[cpuAlloc.CpuId];
- if (cpu.AllowedPools.size() == 1 && cpu.AllowedPools[0] == &pool) {
- pool.WakeOrderCpus.emplace_back(&cpu);
- }
- }
- for (const TCpuAllocation& cpuAlloc : allocation.Items) {
- TCpu& cpu = Cpus[cpuAlloc.CpuId];
- if (cpu.AllowedPools.size() > 1 && cpuAlloc.HasPool(pool.PoolId)) {
- pool.WakeOrderCpus.emplace_back(&cpu);
- }
- }
- }
- }
-
- TUnitedWorkers::~TUnitedWorkers() {
- }
-
- void TUnitedWorkers::Prepare(TActorSystem* actorSystem, TVector<NSchedulerQueue::TReader*>& scheduleReaders) {
- // Setup allocated cpus
- // NOTE: leave gaps for not allocated cpus (default-initialized)
- TWorkerId workers = 0;
- for (TCpuId cpuId = 0; cpuId < CpuCount; cpuId++) {
- TCpu& cpu = Cpus[cpuId];
-
- // Setup cpu-local workers
- if (cpu.LocalManager) {
- for (i16 i = 0; i < cpu.LocalManager->WorkerCount(); i++) {
- TWorkerId workerId = workers++;
- cpu.LocalManager->AddWorker(workerId);
-
- // Setup worker
- Y_ABORT_UNLESS(workerId < WorkerCount);
- Workers[workerId].Thread.Reset(new TExecutorThread(
- workerId,
- cpu.CpuId,
- actorSystem,
- cpu.ExecutorPool.Get(), // use cpu-local manager as proxy executor for all workers on cpu
- nullptr, // MailboxTable is pool-specific, will be set on pool switch
- cpu.ExecutorPool->GetName()));
- // NOTE: TWorker::ThreadId will be initialized after in Start()
-
- scheduleReaders.push_back(&Workers[workerId].SchedulerQueue.Reader);
- }
- }
- }
- }
-
- void TUnitedWorkers::Start() {
- for (TWorkerId workerId = 0; workerId < WorkerCount; workerId++) {
- Workers[workerId].Thread->Start();
- }
- for (TWorkerId workerId = 0; workerId < WorkerCount; workerId++) {
- AtomicStore(&Workers[workerId].ThreadId, Workers[workerId].Thread->GetThreadId());
- }
- }
-
- inline TThreadId TUnitedWorkers::GetWorkerThreadId(TWorkerId workerId) const {
- volatile TThreadId* threadId = &Workers[workerId].ThreadId;
-#ifdef _linux_
- while (AtomicLoad(threadId) == UnknownThreadId) {
- NanoSleep(1000);
- }
-#endif
- return AtomicLoad(threadId);
- }
-
- inline NSchedulerQueue::TWriter* TUnitedWorkers::GetScheduleWriter(TWorkerId workerId) const {
- return &Workers[workerId].SchedulerQueue.Writer;
- }
-
- void TUnitedWorkers::SetupPool(TPoolId pool, IExecutorPool* executorPool, TMailboxTable* mailboxTable) {
- Pools[pool].ExecutorPool = executorPool;
- Pools[pool].MailboxTable = mailboxTable;
- }
-
- void TUnitedWorkers::PrepareStop() {
- AtomicStore(&StopFlag, true);
- for (TPoolId pool = 0; pool < PoolCount; pool++) {
- Pools[pool].Stop();
- }
- for (TCpuId cpuId = 0; cpuId < CpuCount; cpuId++) {
- Cpus[cpuId].Stop();
- }
- }
-
- void TUnitedWorkers::Shutdown() {
- for (TWorkerId workerId = 0; workerId < WorkerCount; workerId++) {
- Workers[workerId].Thread->Join();
- }
- }
-
- inline void TUnitedWorkers::PushActivation(TPoolId pool, ui32 activation, ui64 revolvingCounter) {
- if (Pools[pool].PushActivation(activation, revolvingCounter)) { // token generated
- TryWake(pool);
- }
- }
-
- inline bool TUnitedWorkers::TryAcquireToken(TPoolId pool) {
- return Pools[pool].TryAcquireToken();
- }
-
- inline void TUnitedWorkers::TryWake(TPoolId pool) {
- // Avoid using multiple atomic seq_cst loads in cycle, use barrier once
- AtomicBarrier();
-
- // Scan every allowed cpu in pool's wakeup order and try to wake the first idle cpu
- if (RelaxedLoad(&Pools[pool].Waiters) > 0) {
- for (TCpu* cpu : Pools[pool].WakeOrderCpus) {
- if (cpu->State.WakeWithoutToken(pool) == TCpuState::Woken) {
- return; // successful wake up
- }
- }
- }
-
- // Cpu has not been woken up
- }
-
- inline void TUnitedWorkers::BeginExecution(TPoolId pool, ui32& activation, ui64 revolvingCounter) {
- Pools[pool].BeginExecution(activation, revolvingCounter);
- }
-
- inline bool TUnitedWorkers::NextExecution(TPoolId pool, ui32& activation, ui64 revolvingCounter) {
- return Pools[pool].NextExecution(activation, revolvingCounter);
- }
-
- inline void TUnitedWorkers::StopExecution(TPoolId pool) {
- if (Pools[pool].StopExecution()) { // pending token
- TryWake(pool);
- }
- }
-
- inline void TUnitedWorkers::Balance() {
- ui64 ts = GetCycleCountFast();
- if (Balancer->TryLock(ts)) {
- for (TPoolId pool = 0; pool < PoolCount; pool++) {
- if (Pools[pool].IsUnited()) {
- ui64 ElapsedTs = 0;
- ui64 ParkedTs = 0;
- TStackVec<TCpuLoadLog<1024>*, 128> logs;
- ui64 worstActivationTimeUs = 0;
- for (TCpu* cpu : Pools[pool].WakeOrderCpus) {
- TExecutorThreadStats& cpuStats = cpu->PoolStats[pool];
- ElapsedTs += cpuStats.ElapsedTicks;
- ParkedTs += cpuStats.ParkedTicks;
- worstActivationTimeUs = Max(worstActivationTimeUs, cpuStats.WorstActivationTimeUs);
- AtomicStore<decltype(cpuStats.WorstActivationTimeUs)>(&cpuStats.WorstActivationTimeUs, 0ul);
- logs.push_back(&cpu->LoadLog);
- }
- ui64 minPeriodTs = Min(ui64(Us2Ts(Balancer->GetPeriodUs())), ui64((1024ull-2ull)*64ull*128ull*1024ull));
- ui64 estimatedTs = MinusOneCpuEstimator.MaxLatencyIncreaseWithOneLessCpu(
- &logs[0], logs.size(), ts, minPeriodTs);
- TBalancerStats stats;
- stats.Ts = ts;
- stats.CpuUs = Ts2Us(ElapsedTs);
- stats.IdleUs = Ts2Us(ParkedTs);
- stats.ExpectedLatencyIncreaseUs = Ts2Us(estimatedTs);
- stats.WorstActivationTimeUs = worstActivationTimeUs;
- Balancer->SetPoolStats(pool, stats);
- }
- }
- Balancer->Balance();
- Balancer->Unlock();
- }
- }
-
- inline TPoolId TUnitedWorkers::AssignedPool(TWorkerContext& wctx) {
- return Cpus[wctx.CpuId].State.AssignedPool();
- }
-
- inline bool TUnitedWorkers::IsPoolReassigned(TWorkerContext& wctx) {
- return Cpus[wctx.CpuId].State.IsPoolReassigned(wctx.PoolId);
- }
-
- inline void TUnitedWorkers::SwitchPool(TWorkerContext& wctx, ui64 softDeadlineTs) {
- Pools[wctx.PoolId].Switch(wctx, softDeadlineTs, Cpus[wctx.CpuId].PoolStats[wctx.PoolId]);
- Cpus[wctx.CpuId].SwitchPool(wctx.PoolId);
- }
-
- TPoolId TUnitedWorkers::Idle(TPoolId assigned, TWorkerContext& wctx) {
- wctx.SwitchToIdle();
-
- TPoolId result;
- TTimeTracker timeTracker;
- TCpu& cpu = Cpus[wctx.CpuId];
- TPool* assignedPool = assigned == CpuShared ? nullptr : &Pools[assigned];
- TCpu::TScopedWaiters scopedWaiters(cpu, assignedPool);
- while (true) {
- if (cpu.StartSpinning(this, assignedPool, result)) {
- break; // token already acquired (or stop)
- }
- result = WaitSequence(cpu, wctx, timeTracker);
- if (Y_UNLIKELY(result == CpuStopped) || TryAcquireToken(result)) {
- break; // token acquired (or stop)
- }
- }
-
- wctx.AddElapsedCycles(ActorSystemIndex, timeTracker.Elapsed());
- return result;
- }
-
- TPoolId TUnitedWorkers::WaitSequence(TCpu& cpu, TWorkerContext& wctx, TTimeTracker& timeTracker) {
- TPoolId result;
- if (cpu.ActiveWait(Us2Ts(Config.SpinThresholdUs), result)) {
- wctx.AddElapsedCycles(ActorSystemIndex, timeTracker.Elapsed());
- return result;
- }
- if (cpu.StartBlocking(result)) {
- wctx.AddElapsedCycles(ActorSystemIndex, timeTracker.Elapsed());
- return result;
- }
- wctx.AddElapsedCycles(ActorSystemIndex, timeTracker.Elapsed());
- cpu.LoadLog.RegisterBusyPeriod(GetCycleCountFast());
- bool wakeup;
- do {
- wakeup = cpu.BlockedWait(result, Config.Balancer.PeriodUs * 1000);
- wctx.AddParkedCycles(timeTracker.Elapsed());
- } while (!wakeup);
- cpu.LoadLog.RegisterIdlePeriod(GetCycleCountFast());
- return result;
- }
-
- void TUnitedWorkers::GetCurrentStats(TPoolId pool, TVector<TExecutorThreadStats>& statsCopy) const {
- size_t idx = 1;
- statsCopy.resize(idx + Pools[pool].WakeOrderCpus.size());
- for (TCpu* cpu : Pools[pool].WakeOrderCpus) {
- TExecutorThreadStats& s = statsCopy[idx++];
- s = TExecutorThreadStats();
- s.Aggregate(cpu->PoolStats[pool]);
- }
- }
-
- TUnitedExecutorPool::TUnitedExecutorPool(const TUnitedExecutorPoolConfig& cfg, TUnitedWorkers* united)
- : TExecutorPoolBaseMailboxed(cfg.PoolId)
- , United(united)
- , PoolName(cfg.PoolName)
- {
- United->SetupPool(TPoolId(cfg.PoolId), this, MailboxTable.Get());
- }
-
- void TUnitedExecutorPool::Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) {
- ActorSystem = actorSystem;
-
- // Schedule readers are initialized through TUnitedWorkers::Prepare
- *scheduleReaders = nullptr;
- *scheduleSz = 0;
- }
-
- void TUnitedExecutorPool::Start() {
- // workers are actually started in TUnitedWorkers::Start()
- }
-
- void TUnitedExecutorPool::PrepareStop() {
- }
-
- void TUnitedExecutorPool::Shutdown() {
- // workers are actually joined in TUnitedWorkers::Shutdown()
- }
-
- TAffinity* TUnitedExecutorPool::Affinity() const {
- Y_ABORT(); // should never be called, TCpuExecutorPool is used instead
- }
-
- ui32 TUnitedExecutorPool::GetThreads() const {
- return 0;
- }
-
- ui32 TUnitedExecutorPool::GetReadyActivation(TWorkerContext&, ui64) {
- Y_ABORT(); // should never be called, TCpu*ExecutorPool is used instead
- }
-
- inline void TUnitedExecutorPool::ScheduleActivation(ui32 activation) {
- TUnitedExecutorPool::ScheduleActivationEx(activation, AtomicIncrement(ActivationsRevolvingCounter));
- }
-
- inline void TUnitedExecutorPool::SpecificScheduleActivation(ui32 activation) {
- TUnitedExecutorPool::ScheduleActivation(activation);
- }
-
- inline void TUnitedExecutorPool::ScheduleActivationEx(ui32 activation, ui64 revolvingCounter) {
- United->PushActivation(PoolId, activation, revolvingCounter);
- }
-
- void TUnitedExecutorPool::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- TUnitedExecutorPool::Schedule(deadline - ActorSystem->Timestamp(), ev, cookie, workerId);
- }
-
- void TUnitedExecutorPool::Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_DEBUG_ABORT_UNLESS(workerId < United->GetWorkerCount());
- const auto current = ActorSystem->Monotonic();
- if (deadline < current) {
- deadline = current;
- }
- United->GetScheduleWriter(workerId)->Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TUnitedExecutorPool::Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) {
- Y_DEBUG_ABORT_UNLESS(workerId < United->GetWorkerCount());
- const auto deadline = ActorSystem->Monotonic() + delta;
- United->GetScheduleWriter(workerId)->Push(deadline.MicroSeconds(), ev.Release(), cookie);
- }
-
- void TUnitedExecutorPool::GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const {
- Y_UNUSED(poolStats);
- if (statsCopy.empty()) {
- statsCopy.resize(1);
- }
- statsCopy[0] = TExecutorThreadStats();
- statsCopy[0].Aggregate(Stats);
- United->GetCurrentStats(PoolId, statsCopy);
- }
-}
diff --git a/library/cpp/actors/core/executor_pool_united.h b/library/cpp/actors/core/executor_pool_united.h
deleted file mode 100644
index c0563a9053..0000000000
--- a/library/cpp/actors/core/executor_pool_united.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#pragma once
-
-#include "actorsystem.h"
-#include "balancer.h"
-#include "scheduler_queue.h"
-#include "executor_pool_base.h"
-
-#include <library/cpp/actors/util/unordered_cache.h>
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/actors/util/cpu_load_log.h>
-#include <library/cpp/actors/util/unordered_cache.h>
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-
-#include <util/generic/noncopyable.h>
-
-namespace NActors {
- class TMailboxTable;
-
- class TUnitedExecutorPool: public TExecutorPoolBaseMailboxed {
- TUnitedWorkers* United;
- const TString PoolName;
- TAtomic ActivationsRevolvingCounter = 0;
- public:
- TUnitedExecutorPool(const TUnitedExecutorPoolConfig& cfg, TUnitedWorkers* united);
-
- void Prepare(TActorSystem* actorSystem, NSchedulerQueue::TReader** scheduleReaders, ui32* scheduleSz) override;
- void Start() override;
- void PrepareStop() override;
- void Shutdown() override;
-
- TAffinity* Affinity() const override;
- ui32 GetThreads() const override;
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingReadCounter) override;
- void ScheduleActivation(ui32 activation) override;
- void SpecificScheduleActivation(ui32 activation) override;
- void ScheduleActivationEx(ui32 activation, ui64 revolvingWriteCounter) override;
- void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
- void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
- void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie, TWorkerId workerId) override;
-
- void GetCurrentStats(TExecutorPoolStats& poolStats, TVector<TExecutorThreadStats>& statsCopy) const override;
-
- TString GetName() const override {
- return PoolName;
- }
- };
-}
diff --git a/library/cpp/actors/core/executor_pool_united_ut.cpp b/library/cpp/actors/core/executor_pool_united_ut.cpp
deleted file mode 100644
index df3e2d29d8..0000000000
--- a/library/cpp/actors/core/executor_pool_united_ut.cpp
+++ /dev/null
@@ -1,341 +0,0 @@
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "hfunc.h"
-#include "scheduler_basic.h"
-
-#include <library/cpp/actors/util/should_continue.h>
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/actors/protos/unittests.pb.h>
-
-using namespace NActors;
-
-////////////////////////////////////////////////////////////////////////////////
-
-struct TEvMsg : public NActors::TEventBase<TEvMsg, 10347> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvMsg, "ExecutorPoolTest: Msg");
-};
-
-////////////////////////////////////////////////////////////////////////////////
-
-inline ui64 DoTimedWork(ui64 workUs) {
- ui64 startUs = ThreadCPUTime();
- ui64 endUs = startUs + workUs;
- ui64 nowUs = startUs;
- do {
- ui64 endTs = GetCycleCountFast() + Us2Ts(endUs - nowUs);
- while (GetCycleCountFast() <= endTs) {}
- nowUs = ThreadCPUTime();
- } while (nowUs <= endUs);
- return nowUs - startUs;
-}
-
-class TTestSenderActor : public IActorCallback {
-private:
- using EActivityType = IActor::EActivityType ;
- using EActorActivity = IActor::EActorActivity;
-
-private:
- TAtomic Counter;
- TActorId Receiver;
-
- std::function<void(void)> Action;
-
-public:
- TTestSenderActor(std::function<void(void)> action = [](){},
- EActivityType activityType = EActorActivity::OTHER)
- : IActorCallback(static_cast<TReceiveFunc>(&TTestSenderActor::Execute), activityType)
- , Action(action)
- {}
-
- void Start(TActorId receiver, size_t count) {
- AtomicSet(Counter, count);
- Receiver = receiver;
- }
-
- void Stop() {
- while (true) {
- if (GetCounter() == 0) {
- break;
- }
-
- Sleep(TDuration::MilliSeconds(1));
- }
- }
-
- size_t GetCounter() const {
- return AtomicGet(Counter);
- }
-
-private:
- STFUNC(Execute) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvMsg, Handle);
- }
- }
-
- void Handle(TEvMsg::TPtr &ev) {
- Y_UNUSED(ev);
- Action();
- TAtomicBase count = AtomicDecrement(Counter);
- Y_ABORT_UNLESS(count != Max<TAtomicBase>());
- if (count) {
- Send(Receiver, new TEvMsg());
- }
- }
-};
-
-// Single cpu balancer that switches pool on every activation; not thread-safe
-struct TRoundRobinBalancer: public IBalancer {
- TCpuState* State;
- TMap<TPoolId, TPoolId> NextPool;
-
- bool AddCpu(const TCpuAllocation& cpuAlloc, TCpuState* cpu) override {
- State = cpu;
- TPoolId prev = cpuAlloc.AllowedPools.rbegin()->PoolId;
- for (auto& p : cpuAlloc.AllowedPools) {
- NextPool[prev] = p.PoolId;
- prev = p.PoolId;
- }
- return true;
- }
-
- bool TryLock(ui64) override { return true; }
- void SetPoolStats(TPoolId, const TBalancerStats&) override {}
- void Unlock() override {}
-
- void Balance() override {
- TPoolId assigned;
- TPoolId current;
- State->Load(assigned, current);
- State->AssignPool(NextPool[assigned]);
- }
-
- ui64 GetPeriodUs() override {
- return 1000;
- }
-};
-
-void AddUnitedPool(THolder<TActorSystemSetup>& setup, ui32 concurrency = 0) {
- TUnitedExecutorPoolConfig united;
- united.PoolId = setup->GetExecutorsCount();
- united.Concurrency = concurrency;
- setup->CpuManager.United.emplace_back(std::move(united));
-}
-
-THolder<TActorSystemSetup> GetActorSystemSetup(ui32 cpuCount) {
- auto setup = MakeHolder<NActors::TActorSystemSetup>();
- setup->NodeId = 1;
- setup->CpuManager.UnitedWorkers.CpuCount = cpuCount;
- setup->CpuManager.UnitedWorkers.NoRealtime = true; // unavailable in test environment
- setup->Scheduler = new TBasicSchedulerThread(NActors::TSchedulerConfig(512, 0));
- return setup;
-}
-
-Y_UNIT_TEST_SUITE(UnitedExecutorPool) {
-
-#ifdef _linux_
-
- Y_UNIT_TEST(OnePoolManyCpus) {
- const size_t msgCount = 1e4;
- auto setup = GetActorSystemSetup(4);
- AddUnitedPool(setup);
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- auto actor = new TTestSenderActor();
- auto actorId = actorSystem.Register(actor);
- actor->Start(actor->SelfId(), msgCount);
- actorSystem.Send(actorId, new TEvMsg());
-
- while (actor->GetCounter()) {
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "Counter is " << actor->GetCounter());
-
- Sleep(TDuration::MilliSeconds(1));
- }
-
- TVector<TExecutorThreadStats> stats;
- TExecutorPoolStats poolStats;
- actorSystem.GetPoolStats(0, poolStats, stats);
- // Sum all per-thread counters into the 0th element
- for (ui32 idx = 1; idx < stats.size(); ++idx) {
- stats[0].Aggregate(stats[idx]);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(stats[0].SentEvents, msgCount - 1);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEvents, msgCount);
- //UNIT_ASSERT_VALUES_EQUAL(stats[0].PreemptedEvents, 0); // depends on execution time and system load, so may be non-zero
- UNIT_ASSERT_VALUES_EQUAL(stats[0].NonDeliveredEvents, 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].EmptyMailboxActivation, 0);
- //UNIT_ASSERT_VALUES_EQUAL(stats[0].CpuUs, 0); // depends on total duration of test, so undefined
- UNIT_ASSERT(stats[0].ElapsedTicks > 0);
- //UNIT_ASSERT(stats[0].ParkedTicks == 0); // per-pool parked time does not make sense for united pools
- UNIT_ASSERT_VALUES_EQUAL(stats[0].BlockedTicks, 0);
- UNIT_ASSERT(stats[0].ActivationTimeHistogram.TotalSamples >= msgCount / TBasicExecutorPoolConfig::DEFAULT_EVENTS_PER_MAILBOX);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].EventDeliveryTimeHistogram.TotalSamples, msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].EventProcessingCountHistogram.TotalSamples, msgCount);
- UNIT_ASSERT(stats[0].EventProcessingTimeHistogram.TotalSamples > 0);
- UNIT_ASSERT(stats[0].ElapsedTicksByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()] > 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEventsByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()], msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ActorsAliveByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()], 1);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ScheduledEventsByActivity[NActors::TActorTypeOperator::GetOtherActivityIndex()], 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolActorRegistrations, 1);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolDestroyedActors, 0);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolAllocatedMailboxes, 4095); // one line
- UNIT_ASSERT(stats[0].MailboxPushedOutByTime + stats[0].MailboxPushedOutByEventCount + stats[0].MailboxPushedOutBySoftPreemption >= msgCount / TBasicExecutorPoolConfig::DEFAULT_EVENTS_PER_MAILBOX);
- }
-
- Y_UNIT_TEST(ManyPoolsOneSharedCpu) {
- const size_t msgCount = 1e4;
- const size_t pools = 4;
- auto setup = GetActorSystemSetup(1);
- for (size_t pool = 0; pool < pools; pool++) {
- AddUnitedPool(setup);
- }
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- TVector<TTestSenderActor*> actors;
- for (size_t pool = 0; pool < pools; pool++) {
- auto actor = new TTestSenderActor();
- auto actorId = actorSystem.Register(actor, TMailboxType::HTSwap, pool);
- actor->Start(actor->SelfId(), msgCount);
- actorSystem.Send(actorId, new TEvMsg());
- actors.push_back(actor);
- }
-
- while (true) {
- size_t left = 0;
- for (auto actor : actors) {
- left += actor->GetCounter();
- }
- if (left == 0) {
- break;
- }
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "left " << left);
- Sleep(TDuration::MilliSeconds(1));
- }
-
- for (size_t pool = 0; pool < pools; pool++) {
- TVector<TExecutorThreadStats> stats;
- TExecutorPoolStats poolStats;
- actorSystem.GetPoolStats(pool, poolStats, stats);
- // Sum all per-thread counters into the 0th element
- for (ui32 idx = 1; idx < stats.size(); ++idx) {
- stats[0].Aggregate(stats[idx]);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEvents, msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolActorRegistrations, 1);
- }
- }
-
- Y_UNIT_TEST(ManyPoolsOneAssignedCpu) {
- const size_t msgCount = 1e4;
- const size_t pools = 4;
- auto setup = GetActorSystemSetup(1);
- setup->Balancer.Reset(new TRoundRobinBalancer());
- for (size_t pool = 0; pool < pools; pool++) {
- AddUnitedPool(setup);
- }
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- TVector<TTestSenderActor*> actors;
- for (size_t pool = 0; pool < pools; pool++) {
- auto actor = new TTestSenderActor();
- auto actorId = actorSystem.Register(actor, TMailboxType::HTSwap, pool);
- actor->Start(actor->SelfId(), msgCount);
- actorSystem.Send(actorId, new TEvMsg());
- actors.push_back(actor);
- }
-
- while (true) {
- size_t left = 0;
- for (auto actor : actors) {
- left += actor->GetCounter();
- }
- if (left == 0) {
- break;
- }
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(5), "left " << left);
- Sleep(TDuration::MilliSeconds(1));
- }
-
- for (size_t pool = 0; pool < pools; pool++) {
- TVector<TExecutorThreadStats> stats;
- TExecutorPoolStats poolStats;
- actorSystem.GetPoolStats(pool, poolStats, stats);
- // Sum all per-thread counters into the 0th element
- for (ui32 idx = 1; idx < stats.size(); ++idx) {
- stats[0].Aggregate(stats[idx]);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEvents, msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolActorRegistrations, 1);
- }
- }
-
- Y_UNIT_TEST(ManyPoolsOneCpuSlowEvents) {
- const size_t msgCount = 3;
- const size_t pools = 4;
- auto setup = GetActorSystemSetup(1);
- for (size_t pool = 0; pool < pools; pool++) {
- AddUnitedPool(setup);
- }
- TActorSystem actorSystem(setup);
- actorSystem.Start();
-
- auto begin = TInstant::Now();
-
- TVector<TTestSenderActor*> actors;
- for (size_t pool = 0; pool < pools; pool++) {
- auto actor = new TTestSenderActor([]() {
- DoTimedWork(100'000);
- });
- auto actorId = actorSystem.Register(actor, TMailboxType::HTSwap, pool);
- actor->Start(actor->SelfId(), msgCount);
- actorSystem.Send(actorId, new TEvMsg());
- actors.push_back(actor);
- }
-
- while (true) {
- size_t left = 0;
- for (auto actor : actors) {
- left += actor->GetCounter();
- }
- if (left == 0) {
- break;
- }
- auto now = TInstant::Now();
- UNIT_ASSERT_C(now - begin < TDuration::Seconds(15), "left " << left);
- Sleep(TDuration::MilliSeconds(1));
- }
-
- for (size_t pool = 0; pool < pools; pool++) {
- TVector<TExecutorThreadStats> stats;
- TExecutorPoolStats poolStats;
- actorSystem.GetPoolStats(pool, poolStats, stats);
- // Sum all per-thread counters into the 0th element
- for (ui32 idx = 1; idx < stats.size(); ++idx) {
- stats[0].Aggregate(stats[idx]);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(stats[0].ReceivedEvents, msgCount);
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PreemptedEvents, msgCount); // every 100ms event should be preempted
- UNIT_ASSERT_VALUES_EQUAL(stats[0].PoolActorRegistrations, 1);
- }
- }
-
-#endif
-
-}
diff --git a/library/cpp/actors/core/executor_pool_united_workers.h b/library/cpp/actors/core/executor_pool_united_workers.h
deleted file mode 100644
index c683ae7d9f..0000000000
--- a/library/cpp/actors/core/executor_pool_united_workers.h
+++ /dev/null
@@ -1,105 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "balancer.h"
-#include "scheduler_queue.h"
-
-#include <library/cpp/actors/actor_type/indexes.h>
-#include <library/cpp/actors/util/cpu_load_log.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <util/generic/noncopyable.h>
-
-namespace NActors {
- class TActorSystem;
- class TMailboxTable;
-
- class TUnitedWorkers: public TNonCopyable {
- struct TWorker;
- struct TPool;
- struct TCpu;
-
- i16 WorkerCount;
- TArrayHolder<TWorker> Workers; // indexed by WorkerId
- size_t PoolCount;
- TArrayHolder<TPool> Pools; // indexed by PoolId, so may include not used (not united) pools
- size_t CpuCount;
- TArrayHolder<TCpu> Cpus; // indexed by CpuId, so may include not allocated CPUs
-
- IBalancer* Balancer; // external pool cpu balancer
-
- TUnitedWorkersConfig Config;
- TCpuAllocationConfig Allocation;
-
- volatile bool StopFlag = false;
- TMinusOneCpuEstimator<1024> MinusOneCpuEstimator;
- const ui32 ActorSystemIndex = NActors::TActorTypeOperator::GetActorSystemIndex();
- public:
- TUnitedWorkers(
- const TUnitedWorkersConfig& config,
- const TVector<TUnitedExecutorPoolConfig>& unitedPools,
- const TCpuAllocationConfig& allocation,
- IBalancer* balancer);
- ~TUnitedWorkers();
- void Prepare(TActorSystem* actorSystem, TVector<NSchedulerQueue::TReader*>& scheduleReaders);
- void Start();
- void PrepareStop();
- void Shutdown();
-
- bool IsStopped() const {
- return RelaxedLoad(&StopFlag);
- }
-
- TWorkerId GetWorkerCount() const {
- return WorkerCount;
- }
-
- // Returns thread id of a worker
- TThreadId GetWorkerThreadId(TWorkerId workerId) const;
-
- // Returns per worker schedule writers
- NSchedulerQueue::TWriter* GetScheduleWriter(TWorkerId workerId) const;
-
- // Sets executor for specified pool
- void SetupPool(TPoolId pool, IExecutorPool* executorPool, TMailboxTable* mailboxTable);
-
- // Add activation of newly scheduled mailbox and wake cpu to execute it if required
- void PushActivation(TPoolId pool, ui32 activation, ui64 revolvingCounter);
-
- // Try acquire pending token. Must be done before execution
- bool TryAcquireToken(TPoolId pool);
-
- // Try to wake idle cpu waiting for tokens on specified pool
- void TryWake(TPoolId pool);
-
- // Get activation from pool; requires pool's token
- void BeginExecution(TPoolId pool, ui32& activation, ui64 revolvingCounter);
-
- // Stop currently active execution and start new one if token is available
- // NOTE: Reuses token if it's not destroyed
- bool NextExecution(TPoolId pool, ui32& activation, ui64 revolvingCounter);
-
- // Stop active execution
- void StopExecution(TPoolId pool);
-
- // Runs balancer to assign pools to cpus
- void Balance();
-
- // Returns pool to be executed by worker or `CpuShared`
- TPoolId AssignedPool(TWorkerContext& wctx);
-
- // Checks if balancer has assigned another pool for worker's cpu
- bool IsPoolReassigned(TWorkerContext& wctx);
-
- // Switch worker context into specified pool
- void SwitchPool(TWorkerContext& wctx, ui64 softDeadlineTs);
-
- // Wait for tokens from any pool allowed on specified cpu
- TPoolId Idle(TPoolId assigned, TWorkerContext& wctx);
-
- // Fill stats for specified pool
- void GetCurrentStats(TPoolId pool, TVector<TExecutorThreadStats>& statsCopy) const;
-
- private:
- TPoolId WaitSequence(TCpu& cpu, TWorkerContext& wctx, TTimeTracker& timeTracker);
- };
-}
diff --git a/library/cpp/actors/core/executor_thread.cpp b/library/cpp/actors/core/executor_thread.cpp
deleted file mode 100644
index 4aab7b3e31..0000000000
--- a/library/cpp/actors/core/executor_thread.cpp
+++ /dev/null
@@ -1,714 +0,0 @@
-#include "executor_thread.h"
-#include "actorsystem.h"
-#include "actor.h"
-#include "callstack.h"
-#include "mailbox.h"
-#include "event.h"
-#include "events.h"
-#include "executor_pool_base.h"
-#include "probes.h"
-
-#include <library/cpp/actors/prof/tag.h>
-#include <library/cpp/actors/util/affinity.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/thread.h>
-
-#ifdef BALLOC
-#include <library/cpp/balloc/optional/operators.h>
-#endif
-
-#ifdef _linux_
-#include <sys/syscall.h>
-#include <unistd.h>
-#endif
-
-#include <util/system/type_name.h>
-#include <util/system/datetime.h>
-
-LWTRACE_USING(ACTORLIB_PROVIDER)
-
-namespace NActors {
- constexpr TDuration TExecutorThread::DEFAULT_TIME_PER_MAILBOX;
-
- TExecutorThread::TExecutorThread(
- TWorkerId workerId,
- TWorkerId cpuId,
- TActorSystem* actorSystem,
- IExecutorPool* executorPool,
- TMailboxTable* mailboxTable,
- const TString& threadName,
- TDuration timePerMailbox,
- ui32 eventsPerMailbox)
- : ActorSystem(actorSystem)
- , ExecutorPool(executorPool)
- , Ctx(workerId, cpuId)
- , ThreadName(threadName)
- , IsUnitedWorker(true)
- , TimePerMailbox(timePerMailbox)
- , EventsPerMailbox(eventsPerMailbox)
- {
- Ctx.Switch(
- ExecutorPool,
- mailboxTable,
- NHPTimer::GetClockRate() * timePerMailbox.SecondsFloat(),
- eventsPerMailbox,
- ui64(-1), // infinite soft deadline
- &Ctx.WorkerStats);
- }
-
- TExecutorThread::TExecutorThread(TWorkerId workerId,
- TActorSystem* actorSystem,
- TVector<IExecutorPool*> executorPools,
- const TString& threadName,
- ui64 softProcessingDurationTs,
- TDuration timePerMailbox,
- ui32 eventsPerMailbox)
- : ActorSystem(actorSystem)
- , AvailableExecutorPools(executorPools)
- , Ctx(workerId, 0)
- , ThreadName(threadName)
- , IsUnitedWorker(false)
- , TimePerMailbox(timePerMailbox)
- , EventsPerMailbox(eventsPerMailbox)
- , SoftProcessingDurationTs(softProcessingDurationTs)
- {}
-
-
-
- TExecutorThread::~TExecutorThread()
- { }
-
- void TExecutorThread::UnregisterActor(TMailboxHeader* mailbox, TActorId actorId) {
- Y_DEBUG_ABORT_UNLESS(IsUnitedWorker || actorId.PoolID() == ExecutorPool->PoolId && ExecutorPool->ResolveMailbox(actorId.Hint()) == mailbox);
- IActor* actor = mailbox->DetachActor(actorId.LocalId());
- Ctx.DecrementActorsAliveByActivity(actor->GetActivityType());
- DyingActors.push_back(THolder(actor));
- }
-
- void TExecutorThread::DropUnregistered() {
-#if defined(ACTORSLIB_COLLECT_EXEC_STATS)
- if (ActorSystem->MonitorStuckActors()) {
- if (auto *pool = dynamic_cast<TExecutorPoolBaseMailboxed*>(ExecutorPool)) {
- with_lock (pool->StuckObserverMutex) {
- for (const auto& actor : DyingActors) {
- const size_t i = actor->StuckIndex;
- auto& actorPtr = pool->Actors[i];
- actorPtr = pool->Actors.back();
- actorPtr->StuckIndex = i;
- pool->Actors.pop_back();
- pool->DeadActorsUsage.emplace_back(actor->GetActivityType(), actor->GetUsage(GetCycleCountFast()));
- }
- }
- }
- }
-#endif
- DyingActors.clear(); // here is actual destruction of actors
- }
-
- void TExecutorThread::Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) {
- ++CurrentActorScheduledEventsCounter;
- Ctx.Executor->Schedule(deadline, ev, cookie, Ctx.WorkerId);
- }
-
- void TExecutorThread::Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) {
- ++CurrentActorScheduledEventsCounter;
- Ctx.Executor->Schedule(deadline, ev, cookie, Ctx.WorkerId);
- }
-
- void TExecutorThread::Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie) {
- ++CurrentActorScheduledEventsCounter;
- Ctx.Executor->Schedule(delta, ev, cookie, Ctx.WorkerId);
- }
-
- template <class T>
- inline TString SafeTypeName(const T* t) {
- if (t == nullptr) {
- return "nullptr";
- }
- try {
- return TypeName(*t);
- } catch (...) {
- return "unknown-type";
- }
- }
-
- inline void LwTraceSlowDelivery(IEventHandle* ev, const std::type_info* actorType, ui32 poolId, const TActorId& currentRecipient,
- double delivMs, double sinceActivationMs, ui32 eventsExecutedBefore) {
- LWPROBE(EventSlowDelivery,
- poolId,
- delivMs,
- sinceActivationMs,
- eventsExecutedBefore,
- ev && ev->HasEvent() ? ev->GetTypeName() : (ev ? ToString(ev->Type) : TString("nullptr")),
- currentRecipient.ToString(),
- SafeTypeName(actorType));
- }
-
- inline void LwTraceSlowEvent(IEventHandle* ev, ui32 evTypeForTracing, const std::type_info* actorType, ui32 poolId,
- const TActorId& currentRecipient, double eventMs) {
- // Event could have been destroyed by actor->Receive();
- LWPROBE(SlowEvent,
- poolId,
- eventMs,
- ev && ev->HasEvent() ? ev->GetTypeName() : ToString(evTypeForTracing),
- currentRecipient.ToString(),
- SafeTypeName(actorType));
- }
-
- template <typename TMailbox>
- bool TExecutorThread::Execute(TMailbox* mailbox, ui32 hint, bool isTailExecution) {
- Y_DEBUG_ABORT_UNLESS(DyingActors.empty());
-
- bool reclaimAsFree = false;
-
- if (!isTailExecution) {
- Ctx.HPStart = GetCycleCountFast();
- Ctx.ExecutedEvents = 0;
- }
- NHPTimer::STime hpprev = Ctx.HPStart;
-
- IActor* actor = nullptr;
- const std::type_info* actorType = nullptr;
- ui32 prevActivityType = std::numeric_limits<ui32>::max();
- TActorId recipient;
- bool firstEvent = true;
- bool preempted = false;
- for (; Ctx.ExecutedEvents < Ctx.EventsPerMailbox; ++Ctx.ExecutedEvents) {
- if (TAutoPtr<IEventHandle> evExt = mailbox->Pop()) {
- mailbox->ProcessEvents(mailbox);
- NHPTimer::STime hpnow;
- recipient = evExt->GetRecipientRewrite();
- TActorContext ctx(*mailbox, *this, hpprev, recipient);
- TlsActivationContext = &ctx; // ensure dtor (if any) is called within actor system
- // move for destruct before ctx;
- auto ev = std::move(evExt);
- if (actor = mailbox->FindActor(recipient.LocalId())) {
- // Since actor is not null there should be no exceptions
- actorType = &typeid(*actor);
-
-#ifdef USE_ACTOR_CALLSTACK
- TCallstack::GetTlsCallstack() = ev->Callstack;
- TCallstack::GetTlsCallstack().SetLinesToSkip();
-#endif
- CurrentRecipient = recipient;
- CurrentActorScheduledEventsCounter = 0;
-
- if (firstEvent) {
- double usec = Ctx.AddActivationStats(AtomicLoad(&mailbox->ScheduleMoment), hpprev);
- if (usec > 500) {
- GLOBAL_LWPROBE(ACTORLIB_PROVIDER, SlowActivation, Ctx.PoolId, usec / 1000.0);
- }
- firstEvent = false;
- }
-
- i64 usecDeliv = Ctx.AddEventDeliveryStats(ev->SendTime, hpprev);
- if (usecDeliv > 5000) {
- double sinceActivationMs = NHPTimer::GetSeconds(hpprev - Ctx.HPStart) * 1000.0;
- LwTraceSlowDelivery(ev.Get(), actorType, Ctx.PoolId, CurrentRecipient, NHPTimer::GetSeconds(hpprev - ev->SendTime) * 1000.0, sinceActivationMs, Ctx.ExecutedEvents);
- }
-
- ui32 evTypeForTracing = ev->Type;
-
- ui32 activityType = actor->GetActivityType();
- if (activityType != prevActivityType) {
- prevActivityType = activityType;
- NProfiling::TMemoryTagScope::Reset(activityType);
- }
-
- actor->Receive(ev);
- mailbox->ProcessEvents(mailbox);
- actor->OnDequeueEvent();
-
- size_t dyingActorsCnt = DyingActors.size();
- Ctx.UpdateActorsStats(dyingActorsCnt);
- if (dyingActorsCnt) {
- DropUnregistered();
- mailbox->ProcessEvents(mailbox);
- actor = nullptr;
- }
-
- if (mailbox->IsEmpty()) // was not-free and become free, we must reclaim mailbox
- reclaimAsFree = true;
-
- hpnow = GetCycleCountFast();
- NHPTimer::STime elapsed = Ctx.AddEventProcessingStats(hpprev, hpnow, activityType, CurrentActorScheduledEventsCounter);
- if (elapsed > 1000000) {
- LwTraceSlowEvent(ev.Get(), evTypeForTracing, actorType, Ctx.PoolId, CurrentRecipient, NHPTimer::GetSeconds(elapsed) * 1000.0);
- }
-
- // The actor might have been destroyed
- if (actor)
- actor->AddElapsedTicks(elapsed);
-
- CurrentRecipient = TActorId();
- } else {
- actorType = nullptr;
-
- TAutoPtr<IEventHandle> nonDelivered = IEventHandle::ForwardOnNondelivery(std::move(ev), TEvents::TEvUndelivered::ReasonActorUnknown);
- if (nonDelivered.Get()) {
- ActorSystem->Send(nonDelivered);
- } else {
- Ctx.IncrementNonDeliveredEvents();
- }
- hpnow = GetCycleCountFast();
- }
-
- hpprev = hpnow;
-
- if (TlsThreadContext->CapturedType == ESendingType::Tail) {
- AtomicStore(&mailbox->ScheduleMoment, hpnow);
- Ctx.IncrementMailboxPushedOutByTailSending();
- LWTRACK(MailboxPushedOutByTailSending,
- Ctx.Orbit,
- Ctx.PoolId,
- Ctx.Executor->GetName(),
- Ctx.ExecutedEvents + 1,
- CyclesToDuration(hpnow - Ctx.HPStart),
- Ctx.WorkerId,
- recipient.ToString(),
- SafeTypeName(actorType));
- break;
- }
-
- // Soft preemption in united pool
- if (Ctx.SoftDeadlineTs < (ui64)hpnow) {
- AtomicStore(&mailbox->ScheduleMoment, hpnow);
- Ctx.IncrementMailboxPushedOutBySoftPreemption();
- LWTRACK(MailboxPushedOutBySoftPreemption,
- Ctx.Orbit,
- Ctx.PoolId,
- Ctx.Executor->GetName(),
- Ctx.ExecutedEvents + 1,
- CyclesToDuration(hpnow - Ctx.HPStart),
- Ctx.WorkerId,
- recipient.ToString(),
- SafeTypeName(actorType));
- preempted = true;
- break;
- }
-
- // time limit inside one mailbox passed, let others do some work
- if (hpnow - Ctx.HPStart > (i64)Ctx.TimePerMailboxTs) {
- AtomicStore(&mailbox->ScheduleMoment, hpnow);
- Ctx.IncrementMailboxPushedOutByTime();
- LWTRACK(MailboxPushedOutByTime,
- Ctx.Orbit,
- Ctx.PoolId,
- Ctx.Executor->GetName(),
- Ctx.ExecutedEvents + 1,
- CyclesToDuration(hpnow - Ctx.HPStart),
- Ctx.WorkerId,
- recipient.ToString(),
- SafeTypeName(actorType));
- preempted = true;
- break;
- }
-
- if (Ctx.ExecutedEvents + 1 == Ctx.EventsPerMailbox) {
- AtomicStore(&mailbox->ScheduleMoment, hpnow);
- Ctx.IncrementMailboxPushedOutByEventCount();
- LWTRACK(MailboxPushedOutByEventCount,
- Ctx.Orbit,
- Ctx.PoolId,
- Ctx.Executor->GetName(),
- Ctx.ExecutedEvents + 1,
- CyclesToDuration(hpnow - Ctx.HPStart),
- Ctx.WorkerId,
- recipient.ToString(),
- SafeTypeName(actorType));
- preempted = true;
- break;
- }
- } else {
- if (Ctx.ExecutedEvents == 0)
- Ctx.IncrementEmptyMailboxActivation();
- LWTRACK(MailboxEmpty,
- Ctx.Orbit,
- Ctx.PoolId,
- Ctx.Executor->GetName(),
- Ctx.ExecutedEvents,
- CyclesToDuration(GetCycleCountFast() - Ctx.HPStart),
- Ctx.WorkerId,
- recipient.ToString(),
- SafeTypeName(actor));
- break; // empty queue, leave
- }
- }
-
- NProfiling::TMemoryTagScope::Reset(0);
- TlsActivationContext = nullptr;
- UnlockFromExecution(mailbox, Ctx.Executor, reclaimAsFree, hint, Ctx.WorkerId, RevolvingWriteCounter);
- return preempted;
- }
-
- TThreadId TExecutorThread::GetThreadId() const {
-#ifdef _linux_
- while (AtomicLoad(&ThreadId) == UnknownThreadId) {
- NanoSleep(1000);
- }
-#endif
- return ThreadId;
- }
-
- TWorkerId TExecutorThread::GetWorkerId() const {
- return Ctx.WorkerId;
- }
-
- void TExecutorThread::ProcessExecutorPool(IExecutorPool *pool, bool isSharedThread) {
- ExecutorPool = pool;
- TThreadContext threadCtx;
- TlsThreadContext = &threadCtx;
- TlsThreadContext->Pool = static_cast<IExecutorPool*>(ExecutorPool);
- TlsThreadContext->WorkerId = Ctx.WorkerId;
- pool->Initialize(Ctx);
-
- ExecutorPool->SetRealTimeMode();
- TAffinityGuard affinity(ExecutorPool->Affinity());
-
- NHPTimer::STime hpnow = GetCycleCountFast();
- NHPTimer::STime hpprev = hpnow;
- ui64 execCount = 0;
- ui64 readyActivationCount = 0;
- i64 execCycles = 0;
- i64 nonExecCycles = 0;
-
- bool needToStop = false;
-
- auto executeActivation = [&](ui32 activation, bool isTailExecution) {
- LWTRACK(ActivationBegin, Ctx.Orbit, Ctx.CpuId, Ctx.PoolId, Ctx.WorkerId, NHPTimer::GetSeconds(Ctx.Lease.GetPreciseExpireTs()) * 1e3);
- readyActivationCount++;
- if (TMailboxHeader* header = Ctx.MailboxTable->Get(activation)) {
- if (header->LockForExecution()) {
- hpnow = GetCycleCountFast();
- nonExecCycles += hpnow - hpprev;
- hpprev = hpnow;
-#define EXECUTE_MAILBOX(type) \
- case TMailboxType:: type: \
- { \
- using TMailBox = TMailboxTable:: T ## type ## Mailbox ; \
- if (Execute<TMailBox>(static_cast<TMailBox*>(header), activation, isTailExecution)) { \
- TlsThreadContext->CapturedType = ESendingType::Lazy; \
- } \
- } \
- break \
-// EXECUTE_MAILBOX
- switch (header->Type) {
- EXECUTE_MAILBOX(Simple);
- EXECUTE_MAILBOX(Revolving);
- EXECUTE_MAILBOX(HTSwap);
- EXECUTE_MAILBOX(ReadAsFilled);
- EXECUTE_MAILBOX(TinyReadAsFilled);
- }
-#undef EXECUTE_MAILBOX
- hpnow = GetCycleCountFast();
- i64 currentExecCycles = hpnow - hpprev;
- execCycles += currentExecCycles;
- hpprev = hpnow;
- execCount++;
- if (execCycles + nonExecCycles > 39000000) { // every 15 ms at 2.6GHz, so 1000 items is 15 sec (solomon interval)
- LWPROBE(ExecutorThreadStats, ExecutorPool->PoolId, ExecutorPool->GetName(), Ctx.WorkerId,
- execCount, readyActivationCount,
- NHPTimer::GetSeconds(execCycles) * 1000.0, NHPTimer::GetSeconds(nonExecCycles) * 1000.0);
- execCount = 0;
- readyActivationCount = 0;
- execCycles = 0;
- nonExecCycles = 0;
- Ctx.UpdateThreadTime();
- }
-
- if (isSharedThread && (ui64)hpnow > Ctx.SoftDeadlineTs) {
- needToStop = true;
- }
-
- if (!TlsThreadContext->IsEnoughCpu) {
- Ctx.IncreaseNotEnoughCpuExecutions();
- TlsThreadContext->IsEnoughCpu = true;
- }
- }
- }
- LWTRACK(ActivationEnd, Ctx.Orbit, Ctx.CpuId, Ctx.PoolId, Ctx.WorkerId);
- Ctx.Orbit.Reset();
- };
-
- while (!needToStop && !StopFlag.load(std::memory_order_relaxed)) {
- if (TlsThreadContext->CapturedType == ESendingType::Tail) {
- TlsThreadContext->CapturedType = ESendingType::Lazy;
- ui32 activation = std::exchange(TlsThreadContext->CapturedActivation, 0);
- executeActivation(activation, true);
- continue;
- }
- Ctx.IsNeededToWaitNextActivation = !TlsThreadContext->CapturedActivation && !isSharedThread;
- ui32 activation = ExecutorPool->GetReadyActivation(Ctx, ++RevolvingReadCounter);
- if (!activation) {
- activation = std::exchange(TlsThreadContext->CapturedActivation, 0);
- } else if (TlsThreadContext->CapturedActivation) {
- ui32 capturedActivation = std::exchange(TlsThreadContext->CapturedActivation, 0);
- ExecutorPool->ScheduleActivation(capturedActivation);
- }
- if (!activation) {
- break;
- }
- executeActivation(activation, false);
- }
- }
-
- void* TExecutorThread::ThreadProc() {
-#ifdef _linux_
- pid_t tid = syscall(SYS_gettid);
- AtomicSet(ThreadId, (ui64)tid);
-#endif
-
-#ifdef BALLOC
- ThreadDisableBalloc();
-#endif
-
- if (ThreadName) {
- ::SetCurrentThreadName(ThreadName);
- }
-
-
- std::vector<TExecutorPoolBaseMailboxed*> pools;
- pools.reserve(AvailableExecutorPools.size());
- for (auto pool : AvailableExecutorPools) {
- TExecutorPoolBaseMailboxed* mailboxedPool = dynamic_cast<TExecutorPoolBaseMailboxed*>(pool);
- if (mailboxedPool) {
- pools.push_back(mailboxedPool);
- }
- }
-
- if (pools.size() == 1) {
- ExecutorPool = pools[0];
- Ctx.Switch(
- pools[0],
- pools[0]->MailboxTable.Get(),
- NHPTimer::GetClockRate() * TimePerMailbox.SecondsFloat(),
- EventsPerMailbox,
- GetCycleCountFast() + SoftProcessingDurationTs,
- &Ctx.WorkerStats);
- }
-
- if (pools.size() <= 1) {
- ProcessExecutorPool(ExecutorPool, false);
- } else {
- while (!StopFlag.load(std::memory_order_relaxed)) {
- for (auto pool : pools) {
- Ctx.Switch(
- pool,
- pool->MailboxTable.Get(),
- NHPTimer::GetClockRate() * TimePerMailbox.SecondsFloat(),
- EventsPerMailbox,
- GetCycleCountFast() + SoftProcessingDurationTs,
- &Ctx.WorkerStats);
- Ctx.WorkerId = -1;
- ProcessExecutorPool(pool, true);
- }
- }
- }
-
- return nullptr;
- }
-
- // there must be barrier and check-read with following cas
- // or just cas w/o read.
- // or queue unlocks must be performed with exchange and not generic write
- // TODO: check performance of those options under contention
-
- // placed here in hope for better compiler optimization
-
- bool TMailboxHeader::MarkForSchedule() {
- AtomicBarrier();
- for (;;) {
- const ui32 state = AtomicLoad(&ExecutionState);
- switch (state) {
- case TExecutionState::Inactive:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Scheduled, TExecutionState::Inactive))
- return true;
- break;
- case TExecutionState::Scheduled:
- return false;
- case TExecutionState::Leaving:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::LeavingMarked, TExecutionState::Leaving))
- return true;
- break;
- case TExecutionState::Executing:
- case TExecutionState::LeavingMarked:
- return false;
- case TExecutionState::Free:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeScheduled, TExecutionState::Free))
- return true;
- break;
- case TExecutionState::FreeScheduled:
- return false;
- case TExecutionState::FreeLeaving:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeLeavingMarked, TExecutionState::FreeLeaving))
- return true;
- break;
- case TExecutionState::FreeExecuting:
- case TExecutionState::FreeLeavingMarked:
- return false;
- default:
- Y_ABORT();
- }
- }
- }
-
- bool TMailboxHeader::LockForExecution() {
- AtomicBarrier(); // strictly speaking here should be AtomicBarrier, but as we got mailboxes from queue - this barrier is already set implicitly and could be removed
- for (;;) {
- const ui32 state = AtomicLoad(&ExecutionState);
- switch (state) {
- case TExecutionState::Inactive:
- return false;
- case TExecutionState::Scheduled:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Executing, TExecutionState::Scheduled))
- return true;
- break;
- case TExecutionState::Leaving:
- case TExecutionState::Executing:
- case TExecutionState::LeavingMarked:
- return false;
- case TExecutionState::Free:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeExecuting, TExecutionState::Free))
- return true;
- break;
- case TExecutionState::FreeScheduled:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeExecuting, TExecutionState::FreeScheduled))
- return true;
- break;
- case TExecutionState::FreeLeaving:
- case TExecutionState::FreeExecuting:
- case TExecutionState::FreeLeavingMarked:
- return false;
- default:
- Y_ABORT();
- }
- }
- }
-
- bool TMailboxHeader::LockFromFree() {
- AtomicBarrier();
- for (;;) {
- const ui32 state = AtomicLoad(&ExecutionState);
- switch (state) {
- case TExecutionState::Inactive:
- case TExecutionState::Scheduled:
- case TExecutionState::Leaving:
- case TExecutionState::Executing:
- case TExecutionState::LeavingMarked:
- Y_ABORT();
- case TExecutionState::Free:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Executing, TExecutionState::Free))
- return true;
- break;
- case TExecutionState::FreeScheduled:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Executing, TExecutionState::FreeScheduled))
- return true;
- break;
- case TExecutionState::FreeLeaving:
- case TExecutionState::FreeExecuting:
- case TExecutionState::FreeLeavingMarked:
- return false;
- default:
- Y_ABORT();
- }
- }
- }
-
- void TMailboxHeader::UnlockFromExecution1() {
- const ui32 state = AtomicLoad(&ExecutionState);
- if (state == TExecutionState::Executing)
- AtomicStore(&ExecutionState, (ui32)TExecutionState::Leaving);
- else if (state == TExecutionState::FreeExecuting)
- AtomicStore(&ExecutionState, (ui32)TExecutionState::FreeLeaving);
- else
- Y_ABORT();
- AtomicBarrier();
- }
-
- bool TMailboxHeader::UnlockFromExecution2(bool wouldReschedule) {
- AtomicBarrier();
- for (;;) {
- const ui32 state = AtomicLoad(&ExecutionState);
- switch (state) {
- case TExecutionState::Inactive:
- case TExecutionState::Scheduled:
- Y_ABORT();
- case TExecutionState::Leaving:
- if (!wouldReschedule) {
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Inactive, TExecutionState::Leaving))
- return false;
- } else {
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Scheduled, TExecutionState::Leaving))
- return true;
- }
- break;
- case TExecutionState::Executing:
- Y_ABORT();
- case TExecutionState::LeavingMarked:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Scheduled, TExecutionState::LeavingMarked))
- return true;
- break;
- case TExecutionState::Free:
- case TExecutionState::FreeScheduled:
- Y_ABORT();
- case TExecutionState::FreeLeaving:
- if (!wouldReschedule) {
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Free, TExecutionState::FreeLeaving))
- return false;
- } else {
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeScheduled, TExecutionState::FreeLeaving))
- return true;
- }
- break;
- case TExecutionState::FreeExecuting:
- Y_ABORT();
- case TExecutionState::FreeLeavingMarked:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeScheduled, TExecutionState::FreeLeavingMarked))
- return true;
- break;
- default:
- Y_ABORT();
- }
- }
- }
-
- bool TMailboxHeader::UnlockAsFree(bool wouldReschedule) {
- AtomicBarrier();
- for (;;) {
- const ui32 state = AtomicLoad(&ExecutionState);
- switch (state) {
- case TExecutionState::Inactive:
- case TExecutionState::Scheduled:
- Y_ABORT();
- case TExecutionState::Leaving:
- if (!wouldReschedule) {
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::Free, TExecutionState::Leaving))
- return false;
- } else {
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeScheduled, TExecutionState::Leaving))
- return true;
- }
- break;
- case TExecutionState::Executing:
- Y_ABORT();
- case TExecutionState::LeavingMarked:
- if (AtomicUi32Cas(&ExecutionState, TExecutionState::FreeScheduled, TExecutionState::LeavingMarked))
- return true;
- break;
- case TExecutionState::Free:
- case TExecutionState::FreeScheduled:
- case TExecutionState::FreeLeaving:
- case TExecutionState::FreeExecuting:
- case TExecutionState::FreeLeavingMarked:
- Y_ABORT();
- default:
- Y_ABORT();
- }
- }
- }
-
- void TExecutorThread::GetCurrentStats(TExecutorThreadStats& statsCopy) const {
- Ctx.GetCurrentStats(statsCopy);
- }
-
-}
diff --git a/library/cpp/actors/core/executor_thread.h b/library/cpp/actors/core/executor_thread.h
deleted file mode 100644
index b466ad2423..0000000000
--- a/library/cpp/actors/core/executor_thread.h
+++ /dev/null
@@ -1,126 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "event.h"
-#include "callstack.h"
-#include "probes.h"
-#include "worker_context.h"
-#include "log_settings.h"
-
-#include <library/cpp/actors/util/datetime.h>
-
-#include <util/system/thread.h>
-
-namespace NActors {
- class IActor;
- class TActorSystem;
-
- class TExecutorThread: public ISimpleThread {
- public:
- static constexpr TDuration DEFAULT_TIME_PER_MAILBOX =
- TDuration::MilliSeconds(10);
- static constexpr ui32 DEFAULT_EVENTS_PER_MAILBOX = 100;
-
- TExecutorThread(TWorkerId workerId,
- TWorkerId cpuId,
- TActorSystem* actorSystem,
- IExecutorPool* executorPool,
- TMailboxTable* mailboxTable,
- const TString& threadName,
- TDuration timePerMailbox = DEFAULT_TIME_PER_MAILBOX,
- ui32 eventsPerMailbox = DEFAULT_EVENTS_PER_MAILBOX);
-
- TExecutorThread(TWorkerId workerId,
- TActorSystem* actorSystem,
- IExecutorPool* executorPool,
- TMailboxTable* mailboxTable,
- const TString& threadName,
- TDuration timePerMailbox = DEFAULT_TIME_PER_MAILBOX,
- ui32 eventsPerMailbox = DEFAULT_EVENTS_PER_MAILBOX)
- : TExecutorThread(workerId, 0, actorSystem, executorPool, mailboxTable, threadName, timePerMailbox, eventsPerMailbox)
- {}
-
- TExecutorThread(TWorkerId workerId,
- TActorSystem* actorSystem,
- TVector<IExecutorPool*> executorPools,
- const TString& threadName,
- ui64 softProcessingDurationTs,
- TDuration timePerMailbox,
- ui32 eventsPerMailbox);
-
- virtual ~TExecutorThread();
-
- template <ESendingType SendingType = ESendingType::Common>
- TActorId RegisterActor(IActor* actor, TMailboxType::EType mailboxType = TMailboxType::HTSwap, ui32 poolId = Max<ui32>(),
- TActorId parentId = TActorId());
- template <ESendingType SendingType = ESendingType::Common>
- TActorId RegisterActor(IActor* actor, TMailboxHeader* mailbox, ui32 hint, TActorId parentId = TActorId());
- void UnregisterActor(TMailboxHeader* mailbox, TActorId actorId);
- void DropUnregistered();
- const std::vector<THolder<IActor>>& GetUnregistered() const { return DyingActors; }
-
- void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr);
- void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr);
- void Schedule(TDuration delta, TAutoPtr<IEventHandle> ev, ISchedulerCookie* cookie = nullptr);
-
- template <ESendingType SendingType = ESendingType::Common>
- bool Send(TAutoPtr<IEventHandle> ev);
-
- void GetCurrentStats(TExecutorThreadStats& statsCopy) const;
-
- TThreadId GetThreadId() const; // blocks, must be called after Start()
- TWorkerId GetWorkerId() const;
-
- private:
- void* ThreadProc();
-
- void ProcessExecutorPool(IExecutorPool *pool, bool isSharedThread);
-
- template <typename TMailbox>
- bool Execute(TMailbox* mailbox, ui32 hint, bool isTailExecution);
-
- public:
- TActorSystem* const ActorSystem;
- std::atomic<bool> StopFlag = false;
-
- private:
- // Pool-specific
- IExecutorPool* ExecutorPool;
- TVector<IExecutorPool*> AvailableExecutorPools;
-
- // Event-specific (currently executing)
- TVector<THolder<IActor>> DyingActors;
- TActorId CurrentRecipient;
- ui64 CurrentActorScheduledEventsCounter = 0;
-
- // Thread-specific
- TWorkerContext Ctx;
- ui64 RevolvingReadCounter = 0;
- ui64 RevolvingWriteCounter = 0;
- const TString ThreadName;
- volatile TThreadId ThreadId = UnknownThreadId;
- bool IsUnitedWorker = false;
-
- TDuration TimePerMailbox;
- ui32 EventsPerMailbox;
- ui64 SoftProcessingDurationTs;
- };
-
- template <typename TMailbox>
- void UnlockFromExecution(TMailbox* mailbox, IExecutorPool* executorPool, bool asFree, ui32 hint, TWorkerId workerId, ui64& revolvingWriteCounter) {
- mailbox->UnlockFromExecution1();
- const bool needReschedule1 = (nullptr != mailbox->Head());
- if (!asFree) {
- if (mailbox->UnlockFromExecution2(needReschedule1)) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- executorPool->ScheduleActivationEx(hint, ++revolvingWriteCounter);
- }
- } else {
- if (mailbox->UnlockAsFree(needReschedule1)) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- executorPool->ScheduleActivationEx(hint, ++revolvingWriteCounter);
- }
- executorPool->ReclaimMailbox(TMailbox::MailboxType, hint, workerId, ++revolvingWriteCounter);
- }
- }
-}
diff --git a/library/cpp/actors/core/harmonizer.cpp b/library/cpp/actors/core/harmonizer.cpp
deleted file mode 100644
index 4464603dc8..0000000000
--- a/library/cpp/actors/core/harmonizer.cpp
+++ /dev/null
@@ -1,700 +0,0 @@
-#include "harmonizer.h"
-
-#include "probes.h"
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "executor_pool_basic_feature_flags.h"
-
-#include <library/cpp/actors/util/cpu_load_log.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/intrinsics.h>
-
-#include <util/system/spinlock.h>
-
-#include <algorithm>
-
-namespace NActors {
-
-LWTRACE_USING(ACTORLIB_PROVIDER);
-
-constexpr bool CheckBinaryPower(ui64 value) {
- return !(value & (value - 1));
-}
-
-template <ui8 HistoryBufferSize = 8>
-struct TValueHistory {
- static_assert(CheckBinaryPower(HistoryBufferSize));
-
- double History[HistoryBufferSize] = {0.0};
- ui64 HistoryIdx = 0;
- ui64 LastTs = Max<ui64>();
- double LastUs = 0.0;
- double AccumulatedUs = 0.0;
- ui64 AccumulatedTs = 0;
-
- template <bool WithTail=false>
- double Accumulate(auto op, auto comb, ui8 seconds) {
- double acc = AccumulatedUs;
- size_t idx = HistoryIdx;
- ui8 leftSeconds = seconds;
- if constexpr (!WithTail) {
- idx--;
- leftSeconds--;
- if (idx >= HistoryBufferSize) {
- idx = HistoryBufferSize - 1;
- }
- acc = History[idx];
- }
- do {
- idx--;
- leftSeconds--;
- if (idx >= HistoryBufferSize) {
- idx = HistoryBufferSize - 1;
- }
- if constexpr (WithTail) {
- acc = op(acc, History[idx]);
- } else if (leftSeconds) {
- acc = op(acc, History[idx]);
- } else {
- ui64 tsInSecond = Us2Ts(1'000'000.0);
- acc = op(acc, History[idx] * (tsInSecond - AccumulatedTs) / tsInSecond);
- }
- } while (leftSeconds);
- double duration = 1'000'000.0 * seconds;
- if constexpr (WithTail) {
- duration += Ts2Us(AccumulatedTs);
- }
- return comb(acc, duration);
- }
-
- template <bool WithTail=false>
- double GetAvgPartForLastSeconds(ui8 seconds) {
- auto sum = [](double acc, double value) {
- return acc + value;
- };
- auto avg = [](double sum, double duration) {
- return sum / duration;
- };
- return Accumulate<WithTail>(sum, avg, seconds);
- }
-
- double GetAvgPart() {
- return GetAvgPartForLastSeconds<true>(HistoryBufferSize);
- }
-
- double GetMaxForLastSeconds(ui8 seconds) {
- auto max = [](const double& acc, const double& value) {
- return Max(acc, value);
- };
- auto fst = [](const double& value, const double&) { return value; };
- return Accumulate<false>(max, fst, seconds);
- }
-
- double GetMax() {
- return GetMaxForLastSeconds(HistoryBufferSize);
- }
-
- i64 GetMaxInt() {
- return static_cast<i64>(GetMax());
- }
-
- double GetMinForLastSeconds(ui8 seconds) {
- auto min = [](const double& acc, const double& value) {
- return Min(acc, value);
- };
- auto fst = [](const double& value, const double&) { return value; };
- return Accumulate<false>(min, fst, seconds);
- }
-
- double GetMin() {
- return GetMinForLastSeconds(HistoryBufferSize);
- }
-
- i64 GetMinInt() {
- return static_cast<i64>(GetMin());
- }
-
- void Register(ui64 ts, double valueUs) {
- if (ts < LastTs) {
- LastTs = ts;
- LastUs = valueUs;
- AccumulatedUs = 0.0;
- AccumulatedTs = 0;
- return;
- }
- ui64 lastTs = std::exchange(LastTs, ts);
- ui64 dTs = ts - lastTs;
- double lastUs = std::exchange(LastUs, valueUs);
- double dUs = valueUs - lastUs;
- LWPROBE(RegisterValue, ts, lastTs, dTs, Us2Ts(8'000'000.0), valueUs, lastUs, dUs);
-
- if (dTs > Us2Ts(8'000'000.0)) {
- dUs = dUs * 1'000'000.0 / Ts2Us(dTs);
- for (size_t idx = 0; idx < HistoryBufferSize; ++idx) {
- History[idx] = dUs;
- }
- AccumulatedUs = 0.0;
- AccumulatedTs = 0;
- return;
- }
-
- while (dTs > 0) {
- if (AccumulatedTs + dTs < Us2Ts(1'000'000.0)) {
- AccumulatedTs += dTs;
- AccumulatedUs += dUs;
- break;
- } else {
- ui64 addTs = Us2Ts(1'000'000.0) - AccumulatedTs;
- double addUs = dUs * addTs / dTs;
- dTs -= addTs;
- dUs -= addUs;
- History[HistoryIdx] = AccumulatedUs + addUs;
- HistoryIdx = (HistoryIdx + 1) % HistoryBufferSize;
- AccumulatedUs = 0.0;
- AccumulatedTs = 0;
- }
- }
- }
-};
-
-struct TThreadInfo {
- TValueHistory<8> Consumed;
- TValueHistory<8> Booked;
-};
-
-struct TPoolInfo {
- std::vector<TThreadInfo> ThreadInfo;
- IExecutorPool* Pool = nullptr;
- TBasicExecutorPool* BasicPool = nullptr;
- i16 DefaultThreadCount = 0;
- i16 MinThreadCount = 0;
- i16 MaxThreadCount = 0;
- i16 Priority = 0;
- NMonitoring::TDynamicCounters::TCounterPtr AvgPingCounter;
- NMonitoring::TDynamicCounters::TCounterPtr AvgPingCounterWithSmallWindow;
- ui32 MaxAvgPingUs = 0;
- ui64 LastUpdateTs = 0;
- ui64 NotEnoughCpuExecutions = 0;
- ui64 NewNotEnoughCpuExecutions = 0;
- ui16 LocalQueueSize = NFeatures::TLocalQueuesFeatureFlags::MIN_LOCAL_QUEUE_SIZE;
-
- TAtomic LastFlags = 0; // 0 - isNeedy; 1 - isStarved; 2 - isHoggish
- TAtomic IncreasingThreadsByNeedyState = 0;
- TAtomic IncreasingThreadsByExchange = 0;
- TAtomic DecreasingThreadsByStarvedState = 0;
- TAtomic DecreasingThreadsByHoggishState = 0;
- TAtomic DecreasingThreadsByExchange = 0;
- TAtomic PotentialMaxThreadCount = 0;
-
- TValueHistory<16> Consumed;
- TValueHistory<16> Booked;
-
- TAtomic MaxConsumedCpu = 0;
- TAtomic MinConsumedCpu = 0;
- TAtomic MaxBookedCpu = 0;
- TAtomic MinBookedCpu = 0;
-
- std::unique_ptr<TWaitingStats<ui64>> WaitingStats;
- std::unique_ptr<TWaitingStats<double>> MovingWaitingStats;
-
- double GetBooked(i16 threadIdx);
- double GetlastSecondPoolBooked(i16 threadIdx);
- double GetConsumed(i16 threadIdx);
- double GetlastSecondPoolConsumed(i16 threadIdx);
- TCpuConsumption PullStats(ui64 ts);
- i16 GetThreadCount();
- void SetThreadCount(i16 threadCount);
- bool IsAvgPingGood();
-};
-
-double TPoolInfo::GetBooked(i16 threadIdx) {
- if ((size_t)threadIdx < ThreadInfo.size()) {
- return ThreadInfo[threadIdx].Booked.GetAvgPart();
- }
- return 0.0;
-}
-
-double TPoolInfo::GetlastSecondPoolBooked(i16 threadIdx) {
- if ((size_t)threadIdx < ThreadInfo.size()) {
- return ThreadInfo[threadIdx].Booked.GetAvgPartForLastSeconds(1);
- }
- return 0.0;
-}
-
-double TPoolInfo::GetConsumed(i16 threadIdx) {
- if ((size_t)threadIdx < ThreadInfo.size()) {
- return ThreadInfo[threadIdx].Consumed.GetAvgPart();
- }
- return 0.0;
-}
-
-double TPoolInfo::GetlastSecondPoolConsumed(i16 threadIdx) {
- if ((size_t)threadIdx < ThreadInfo.size()) {
- return ThreadInfo[threadIdx].Consumed.GetAvgPartForLastSeconds(1);
- }
- return 0.0;
-}
-
-#define UNROLL_HISTORY(history) (history)[0], (history)[1], (history)[2], (history)[3], (history)[4], (history)[5], (history)[6], (history)[7]
-TCpuConsumption TPoolInfo::PullStats(ui64 ts) {
- TCpuConsumption acc;
- for (i16 threadIdx = 0; threadIdx < MaxThreadCount; ++threadIdx) {
- TThreadInfo &threadInfo = ThreadInfo[threadIdx];
- TCpuConsumption cpuConsumption = Pool->GetThreadCpuConsumption(threadIdx);
- acc.Add(cpuConsumption);
- threadInfo.Consumed.Register(ts, cpuConsumption.ConsumedUs);
- LWPROBE(SavedValues, Pool->PoolId, Pool->GetName(), "consumed", UNROLL_HISTORY(threadInfo.Consumed.History));
- threadInfo.Booked.Register(ts, cpuConsumption.BookedUs);
- LWPROBE(SavedValues, Pool->PoolId, Pool->GetName(), "booked", UNROLL_HISTORY(threadInfo.Booked.History));
- }
- Consumed.Register(ts, acc.ConsumedUs);
- RelaxedStore(&MaxConsumedCpu, Consumed.GetMaxInt());
- RelaxedStore(&MinConsumedCpu, Consumed.GetMinInt());
- Booked.Register(ts, acc.BookedUs);
- RelaxedStore(&MaxBookedCpu, Booked.GetMaxInt());
- RelaxedStore(&MinBookedCpu, Booked.GetMinInt());
- NewNotEnoughCpuExecutions = acc.NotEnoughCpuExecutions - NotEnoughCpuExecutions;
- NotEnoughCpuExecutions = acc.NotEnoughCpuExecutions;
- if (WaitingStats && BasicPool) {
- WaitingStats->Clear();
- BasicPool->GetWaitingStats(*WaitingStats);
- if constexpr (!NFeatures::TSpinFeatureFlags::CalcPerThread) {
- MovingWaitingStats->Add(*WaitingStats, 0.8, 0.2);
- }
- }
- return acc;
-}
-#undef UNROLL_HISTORY
-
-i16 TPoolInfo::GetThreadCount() {
- return Pool->GetThreadCount();
-}
-
-void TPoolInfo::SetThreadCount(i16 threadCount) {
- Pool->SetThreadCount(threadCount);
-}
-
-bool TPoolInfo::IsAvgPingGood() {
- bool res = true;
- if (AvgPingCounter) {
- res &= *AvgPingCounter > MaxAvgPingUs;
- }
- if (AvgPingCounterWithSmallWindow) {
- res &= *AvgPingCounterWithSmallWindow > MaxAvgPingUs;
- }
- return res;
-}
-
-class THarmonizer: public IHarmonizer {
-private:
- std::atomic<bool> IsDisabled = false;
- TSpinLock Lock;
- std::atomic<ui64> NextHarmonizeTs = 0;
- std::vector<TPoolInfo> Pools;
- std::vector<ui16> PriorityOrder;
-
- TValueHistory<16> Consumed;
- TValueHistory<16> Booked;
-
- TAtomic MaxConsumedCpu = 0;
- TAtomic MinConsumedCpu = 0;
- TAtomic MaxBookedCpu = 0;
- TAtomic MinBookedCpu = 0;
-
- std::atomic<double> AvgAwakeningTimeUs = 0;
- std::atomic<double> AvgWakingUpTimeUs = 0;
-
- void PullStats(ui64 ts);
- void HarmonizeImpl(ui64 ts);
- void CalculatePriorityOrder();
-public:
- THarmonizer(ui64 ts);
- virtual ~THarmonizer();
- double Rescale(double value) const;
- void Harmonize(ui64 ts) override;
- void DeclareEmergency(ui64 ts) override;
- void AddPool(IExecutorPool* pool, TSelfPingInfo *pingInfo) override;
- void Enable(bool enable) override;
- TPoolHarmonizerStats GetPoolStats(i16 poolId) const override;
- THarmonizerStats GetStats() const override;
-};
-
-THarmonizer::THarmonizer(ui64 ts) {
- NextHarmonizeTs = ts;
-}
-
-THarmonizer::~THarmonizer() {
-}
-
-double THarmonizer::Rescale(double value) const {
- return Max(0.0, Min(1.0, value * (1.0/0.9)));
-}
-
-void THarmonizer::PullStats(ui64 ts) {
- TCpuConsumption acc;
- for (TPoolInfo &pool : Pools) {
- TCpuConsumption consumption = pool.PullStats(ts);
- acc.Add(consumption);
- }
- Consumed.Register(ts, acc.ConsumedUs);
- RelaxedStore(&MaxConsumedCpu, Consumed.GetMaxInt());
- RelaxedStore(&MinConsumedCpu, Consumed.GetMinInt());
- Booked.Register(ts, acc.BookedUs);
- RelaxedStore(&MaxBookedCpu, Booked.GetMaxInt());
- RelaxedStore(&MinBookedCpu, Booked.GetMinInt());
-}
-
-Y_FORCE_INLINE bool IsStarved(double consumed, double booked) {
- return Max(consumed, booked) > 0.1 && consumed < booked * 0.7;
-}
-
-Y_FORCE_INLINE bool IsHoggish(double booked, ui16 currentThreadCount) {
- return booked < currentThreadCount - 1;
-}
-
-void THarmonizer::HarmonizeImpl(ui64 ts) {
- bool isStarvedPresent = false;
- double booked = 0.0;
- double consumed = 0.0;
- double lastSecondBooked = 0.0;
- i64 beingStopped = 0;
- i64 total = 0;
- TStackVec<size_t, 8> needyPools;
- TStackVec<size_t, 8> hoggishPools;
- TStackVec<bool, 8> isNeedyByPool;
-
- size_t sumOfAdditionalThreads = 0;
-
-
- ui64 TotalWakingUpTime = 0;
- ui64 TotalWakingUps = 0;
- ui64 TotalAwakeningTime = 0;
- ui64 TotalAwakenings = 0;
- for (size_t poolIdx = 0; poolIdx < Pools.size(); ++poolIdx) {
- TPoolInfo& pool = Pools[poolIdx];
- if (pool.WaitingStats) {
- TotalWakingUpTime += pool.WaitingStats->WakingUpTotalTime;
- TotalWakingUps += pool.WaitingStats->WakingUpCount;
- TotalAwakeningTime += pool.WaitingStats->AwakingTotalTime;
- TotalAwakenings += pool.WaitingStats->AwakingCount;
- }
- }
-
- constexpr ui64 knownAvgWakingUpTime = TWaitingStatsConstants::KnownAvgWakingUpTime;
- constexpr ui64 knownAvgAwakeningUpTime = TWaitingStatsConstants::KnownAvgAwakeningTime;
-
- ui64 realAvgWakingUpTime = (TotalWakingUps ? TotalWakingUpTime / TotalWakingUps : knownAvgWakingUpTime);
- ui64 avgWakingUpTime = realAvgWakingUpTime;
- if (avgWakingUpTime > 2 * knownAvgWakingUpTime || !realAvgWakingUpTime) {
- avgWakingUpTime = knownAvgWakingUpTime;
- }
- AvgWakingUpTimeUs = Ts2Us(avgWakingUpTime);
-
- ui64 realAvgAwakeningTime = (TotalAwakenings ? TotalAwakeningTime / TotalAwakenings : knownAvgAwakeningUpTime);
- ui64 avgAwakeningTime = realAvgAwakeningTime;
- if (avgAwakeningTime > 2 * knownAvgAwakeningUpTime || !realAvgAwakeningTime) {
- avgAwakeningTime = knownAvgAwakeningUpTime;
- }
- AvgAwakeningTimeUs = Ts2Us(avgAwakeningTime);
-
- ui64 avgWakingUpConsumption = avgWakingUpTime + avgAwakeningTime;
- LWPROBE(WakingUpConsumption, Ts2Us(avgWakingUpTime), Ts2Us(avgWakingUpTime), Ts2Us(avgAwakeningTime), Ts2Us(realAvgAwakeningTime), Ts2Us(avgWakingUpConsumption));
-
- for (size_t poolIdx = 0; poolIdx < Pools.size(); ++poolIdx) {
- TPoolInfo& pool = Pools[poolIdx];
- if (!pool.BasicPool) {
- continue;
- }
- if constexpr (NFeatures::TSpinFeatureFlags::CalcPerThread) {
- pool.BasicPool->CalcSpinPerThread(avgWakingUpConsumption);
- } else if constexpr (NFeatures::TSpinFeatureFlags::UsePseudoMovingWindow) {
- ui64 newSpinThreshold = pool.MovingWaitingStats->CalculateGoodSpinThresholdCycles(avgWakingUpConsumption);
- pool.BasicPool->SetSpinThresholdCycles(newSpinThreshold);
- } else {
- ui64 newSpinThreshold = pool.WaitingStats->CalculateGoodSpinThresholdCycles(avgWakingUpConsumption);
- pool.BasicPool->SetSpinThresholdCycles(newSpinThreshold);
- }
- pool.BasicPool->ClearWaitingStats();
- }
-
- for (size_t poolIdx = 0; poolIdx < Pools.size(); ++poolIdx) {
- TPoolInfo& pool = Pools[poolIdx];
- total += pool.DefaultThreadCount;
-
- ui32 currentThreadCount = pool.GetThreadCount();
- sumOfAdditionalThreads += currentThreadCount - pool.DefaultThreadCount;
-
- double poolBooked = 0.0;
- double poolConsumed = 0.0;
- double lastSecondPoolBooked = 0.0;
- double lastSecondPoolConsumed = 0.0;
- beingStopped += pool.Pool->GetBlockingThreadCount();
- for (i16 threadIdx = 0; threadIdx < pool.MaxThreadCount; ++threadIdx) {
- poolBooked += Rescale(pool.GetBooked(threadIdx));
- lastSecondPoolBooked += Rescale(pool.GetlastSecondPoolBooked(threadIdx));
- poolConsumed += Rescale(pool.GetConsumed(threadIdx));
- lastSecondPoolConsumed += Rescale(pool.GetlastSecondPoolConsumed(threadIdx));
- }
- bool isStarved = IsStarved(poolConsumed, poolBooked) || IsStarved(lastSecondPoolConsumed, lastSecondPoolBooked);
- if (isStarved) {
- isStarvedPresent = true;
- }
-
- bool isNeedy = (pool.IsAvgPingGood() || pool.NewNotEnoughCpuExecutions) && poolBooked >= currentThreadCount;
- if (pool.AvgPingCounter) {
- if (pool.LastUpdateTs + Us2Ts(3'000'000ull) > ts) {
- isNeedy = false;
- } else {
- pool.LastUpdateTs = ts;
- }
- }
- isNeedyByPool.push_back(isNeedy);
- if (isNeedy) {
- needyPools.push_back(poolIdx);
- }
- bool isHoggish = IsHoggish(poolBooked, currentThreadCount)
- || IsHoggish(lastSecondPoolBooked, currentThreadCount);
- if (isHoggish) {
- hoggishPools.push_back(poolIdx);
- }
- booked += poolBooked;
- consumed += poolConsumed;
- AtomicSet(pool.LastFlags, (i64)isNeedy | ((i64)isStarved << 1) | ((i64)isHoggish << 2));
- LWPROBE(HarmonizeCheckPool, poolIdx, pool.Pool->GetName(), poolBooked, poolConsumed, lastSecondPoolBooked, lastSecondPoolConsumed, pool.GetThreadCount(), pool.MaxThreadCount, isStarved, isNeedy, isHoggish);
- }
- double budget = total - Max(booked, lastSecondBooked);
- i16 budgetInt = static_cast<i16>(Max(budget, 0.0));
- if (budget < -0.1) {
- isStarvedPresent = true;
- }
- for (size_t poolIdx = 0; poolIdx < Pools.size(); ++poolIdx) {
- TPoolInfo& pool = Pools[poolIdx];
- AtomicSet(pool.PotentialMaxThreadCount, Min(pool.MaxThreadCount, budgetInt));
- }
- double overbooked = consumed - booked;
- if (overbooked < 0) {
- isStarvedPresent = false;
- }
-
- if (needyPools.size()) {
- Sort(needyPools.begin(), needyPools.end(), [&] (i16 lhs, i16 rhs) {
- if (Pools[lhs].Priority != Pools[rhs].Priority) {
- return Pools[lhs].Priority > Pools[rhs].Priority;
- }
- return Pools[lhs].Pool->PoolId < Pools[rhs].Pool->PoolId;
- });
- }
-
- if (isStarvedPresent) {
- // last_starved_at_consumed_value = Ñумма по вÑем пулам consumed;
- // TODO(cthulhu): иÑпользовать как лимит планвно уÑтремлÑÑ‚ÑŒ Ñтот лимит к total,
- // иÑпользовать вмеÑто total
- if (beingStopped && beingStopped >= overbooked) {
- // do nothing
- } else {
- for (ui16 poolIdx : PriorityOrder) {
- TPoolInfo &pool = Pools[poolIdx];
- i64 threadCount = pool.GetThreadCount();
- while (threadCount > pool.DefaultThreadCount) {
- pool.SetThreadCount(--threadCount);
- AtomicIncrement(pool.DecreasingThreadsByStarvedState);
- overbooked--;
- sumOfAdditionalThreads--;
-
- LWPROBE(HarmonizeOperation, poolIdx, pool.Pool->GetName(), "decrease by starving", threadCount - 1, pool.DefaultThreadCount, pool.MaxThreadCount);
- if (overbooked < 1) {
- break;
- }
- }
- if (overbooked < 1) {
- break;
- }
- }
- }
- } else {
- for (size_t needyPoolIdx : needyPools) {
- TPoolInfo &pool = Pools[needyPoolIdx];
- i64 threadCount = pool.GetThreadCount();
- if (budget >= 1.0) {
- if (threadCount + 1 <= pool.MaxThreadCount) {
- AtomicIncrement(pool.IncreasingThreadsByNeedyState);
- isNeedyByPool[needyPoolIdx] = false;
- sumOfAdditionalThreads++;
- pool.SetThreadCount(threadCount + 1);
- budget -= 1.0;
- LWPROBE(HarmonizeOperation, needyPoolIdx, pool.Pool->GetName(), "increase by needs", threadCount + 1, pool.DefaultThreadCount, pool.MaxThreadCount);
- }
- }
- if constexpr (NFeatures::IsLocalQueues()) {
- bool needToExpandLocalQueue = budget < 1.0 || threadCount >= pool.MaxThreadCount;
- needToExpandLocalQueue &= (bool)pool.BasicPool;
- needToExpandLocalQueue &= (pool.MaxThreadCount > 1);
- needToExpandLocalQueue &= (pool.LocalQueueSize < NFeatures::TLocalQueuesFeatureFlags::MAX_LOCAL_QUEUE_SIZE);
- if (needToExpandLocalQueue) {
- pool.BasicPool->SetLocalQueueSize(++pool.LocalQueueSize);
- }
- }
- }
- }
-
- if (budget < 1.0) {
- size_t takingAwayThreads = 0;
- for (size_t needyPoolIdx : needyPools) {
- TPoolInfo &pool = Pools[needyPoolIdx];
- i64 threadCount = pool.GetThreadCount();
- sumOfAdditionalThreads -= threadCount - pool.DefaultThreadCount;
- if (sumOfAdditionalThreads < takingAwayThreads + 1) {
- break;
- }
- if (!isNeedyByPool[needyPoolIdx]) {
- continue;
- }
- AtomicIncrement(pool.IncreasingThreadsByExchange);
- isNeedyByPool[needyPoolIdx] = false;
- takingAwayThreads++;
- pool.SetThreadCount(threadCount + 1);
-
- LWPROBE(HarmonizeOperation, needyPoolIdx, pool.Pool->GetName(), "increase by exchanging", threadCount + 1, pool.DefaultThreadCount, pool.MaxThreadCount);
- }
-
- for (ui16 poolIdx : PriorityOrder) {
- if (takingAwayThreads <= 0) {
- break;
- }
-
- TPoolInfo &pool = Pools[poolIdx];
- size_t threadCount = pool.GetThreadCount();
- size_t additionalThreadsCount = Max<size_t>(0L, threadCount - pool.DefaultThreadCount);
- size_t currentTakingAwayThreads = Min(additionalThreadsCount, takingAwayThreads);
-
- if (!currentTakingAwayThreads) {
- continue;
- }
- takingAwayThreads -= currentTakingAwayThreads;
- pool.SetThreadCount(threadCount - currentTakingAwayThreads);
-
- AtomicAdd(pool.DecreasingThreadsByExchange, takingAwayThreads);
- LWPROBE(HarmonizeOperation, poolIdx, pool.Pool->GetName(), "decrease by exchanging", threadCount - currentTakingAwayThreads, pool.DefaultThreadCount, pool.MaxThreadCount);
- }
- }
-
- for (size_t hoggishPoolIdx : hoggishPools) {
- TPoolInfo &pool = Pools[hoggishPoolIdx];
- i64 threadCount = pool.GetThreadCount();
- if (pool.BasicPool && pool.LocalQueueSize > NFeatures::TLocalQueuesFeatureFlags::MIN_LOCAL_QUEUE_SIZE) {
- pool.LocalQueueSize = std::min<ui16>(NFeatures::TLocalQueuesFeatureFlags::MIN_LOCAL_QUEUE_SIZE, pool.LocalQueueSize / 2);
- pool.BasicPool->SetLocalQueueSize(pool.LocalQueueSize);
- }
- if (threadCount > pool.MinThreadCount) {
- AtomicIncrement(pool.DecreasingThreadsByHoggishState);
- LWPROBE(HarmonizeOperation, hoggishPoolIdx, pool.Pool->GetName(), "decrease by hoggish", threadCount - 1, pool.DefaultThreadCount, pool.MaxThreadCount);
- pool.SetThreadCount(threadCount - 1);
- }
- }
-}
-
-void THarmonizer::CalculatePriorityOrder() {
- PriorityOrder.resize(Pools.size());
- Iota(PriorityOrder.begin(), PriorityOrder.end(), 0);
- Sort(PriorityOrder.begin(), PriorityOrder.end(), [&] (i16 lhs, i16 rhs) {
- if (Pools[lhs].Priority != Pools[rhs].Priority) {
- return Pools[lhs].Priority < Pools[rhs].Priority;
- }
- return Pools[lhs].Pool->PoolId > Pools[rhs].Pool->PoolId;
- });
-}
-
-void THarmonizer::Harmonize(ui64 ts) {
- if (IsDisabled || NextHarmonizeTs > ts || !Lock.TryAcquire()) {
- LWPROBE(TryToHarmonizeFailed, ts, NextHarmonizeTs, IsDisabled, false);
- return;
- }
- // Check again under the lock
- if (IsDisabled) {
- LWPROBE(TryToHarmonizeFailed, ts, NextHarmonizeTs, IsDisabled, true);
- Lock.Release();
- return;
- }
- // Will never reach this line disabled
- ui64 previousNextHarmonizeTs = NextHarmonizeTs.exchange(ts + Us2Ts(1'000'000ull));
- LWPROBE(TryToHarmonizeSuccess, ts, NextHarmonizeTs, previousNextHarmonizeTs);
-
- if (PriorityOrder.empty()) {
- CalculatePriorityOrder();
- }
-
- PullStats(ts);
- HarmonizeImpl(ts);
-
- Lock.Release();
-}
-
-void THarmonizer::DeclareEmergency(ui64 ts) {
- NextHarmonizeTs = ts;
-}
-
-void THarmonizer::AddPool(IExecutorPool* pool, TSelfPingInfo *pingInfo) {
- TGuard<TSpinLock> guard(Lock);
- TPoolInfo poolInfo;
- poolInfo.Pool = pool;
- poolInfo.BasicPool = dynamic_cast<TBasicExecutorPool*>(pool);
- poolInfo.DefaultThreadCount = pool->GetDefaultThreadCount();
- poolInfo.MinThreadCount = pool->GetMinThreadCount();
- poolInfo.MaxThreadCount = pool->GetMaxThreadCount();
- poolInfo.ThreadInfo.resize(poolInfo.MaxThreadCount);
- poolInfo.Priority = pool->GetPriority();
- pool->SetThreadCount(poolInfo.DefaultThreadCount);
- if (pingInfo) {
- poolInfo.AvgPingCounter = pingInfo->AvgPingCounter;
- poolInfo.AvgPingCounterWithSmallWindow = pingInfo->AvgPingCounterWithSmallWindow;
- poolInfo.MaxAvgPingUs = pingInfo->MaxAvgPingUs;
- }
- if (poolInfo.BasicPool) {
- poolInfo.WaitingStats.reset(new TWaitingStats<ui64>());
- poolInfo.MovingWaitingStats.reset(new TWaitingStats<double>());
- }
- Pools.push_back(std::move(poolInfo));
- PriorityOrder.clear();
-}
-
-void THarmonizer::Enable(bool enable) {
- TGuard<TSpinLock> guard(Lock);
- IsDisabled = enable;
-}
-
-IHarmonizer* MakeHarmonizer(ui64 ts) {
- return new THarmonizer(ts);
-}
-
-TPoolHarmonizerStats THarmonizer::GetPoolStats(i16 poolId) const {
- const TPoolInfo &pool = Pools[poolId];
- ui64 flags = RelaxedLoad(&pool.LastFlags);
- return TPoolHarmonizerStats{
- .IncreasingThreadsByNeedyState = static_cast<ui64>(RelaxedLoad(&pool.IncreasingThreadsByNeedyState)),
- .IncreasingThreadsByExchange = static_cast<ui64>(RelaxedLoad(&pool.IncreasingThreadsByExchange)),
- .DecreasingThreadsByStarvedState = static_cast<ui64>(RelaxedLoad(&pool.DecreasingThreadsByStarvedState)),
- .DecreasingThreadsByHoggishState = static_cast<ui64>(RelaxedLoad(&pool.DecreasingThreadsByHoggishState)),
- .DecreasingThreadsByExchange = static_cast<ui64>(RelaxedLoad(&pool.DecreasingThreadsByExchange)),
- .MaxConsumedCpu = static_cast<i64>(RelaxedLoad(&pool.MaxConsumedCpu)),
- .MinConsumedCpu = static_cast<i64>(RelaxedLoad(&pool.MinConsumedCpu)),
- .MaxBookedCpu = static_cast<i64>(RelaxedLoad(&pool.MaxBookedCpu)),
- .MinBookedCpu = static_cast<i64>(RelaxedLoad(&pool.MinBookedCpu)),
- .PotentialMaxThreadCount = static_cast<i16>(RelaxedLoad(&pool.PotentialMaxThreadCount)),
- .IsNeedy = static_cast<bool>(flags & 1),
- .IsStarved = static_cast<bool>(flags & 2),
- .IsHoggish = static_cast<bool>(flags & 4),
- };
-}
-
-THarmonizerStats THarmonizer::GetStats() const {
- return THarmonizerStats{
- .MaxConsumedCpu = static_cast<i64>(RelaxedLoad(&MaxConsumedCpu)),
- .MinConsumedCpu = static_cast<i64>(RelaxedLoad(&MinConsumedCpu)),
- .MaxBookedCpu = static_cast<i64>(RelaxedLoad(&MaxBookedCpu)),
- .MinBookedCpu = static_cast<i64>(RelaxedLoad(&MinBookedCpu)),
- .AvgAwakeningTimeUs = AvgAwakeningTimeUs,
- .AvgWakingUpTimeUs = AvgWakingUpTimeUs,
- };
-}
-
-}
diff --git a/library/cpp/actors/core/harmonizer.h b/library/cpp/actors/core/harmonizer.h
deleted file mode 100644
index ba98048e49..0000000000
--- a/library/cpp/actors/core/harmonizer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "config.h"
-
-namespace NActors {
- class IExecutorPool;
-
- template <typename T>
- struct TWaitingStats;
-
- struct TPoolHarmonizerStats {
- ui64 IncreasingThreadsByNeedyState = 0;
- ui64 IncreasingThreadsByExchange = 0;
- ui64 DecreasingThreadsByStarvedState = 0;
- ui64 DecreasingThreadsByHoggishState = 0;
- ui64 DecreasingThreadsByExchange = 0;
- i64 MaxConsumedCpu = 0.0;
- i64 MinConsumedCpu = 0.0;
- i64 MaxBookedCpu = 0.0;
- i64 MinBookedCpu = 0.0;
- i16 PotentialMaxThreadCount = 0;
- bool IsNeedy = false;
- bool IsStarved = false;
- bool IsHoggish = false;
- };
-
- struct THarmonizerStats {
- i64 MaxConsumedCpu = 0.0;
- i64 MinConsumedCpu = 0.0;
- i64 MaxBookedCpu = 0.0;
- i64 MinBookedCpu = 0.0;
-
- double AvgAwakeningTimeUs = 0;
- double AvgWakingUpTimeUs = 0;
- };
-
- // Pool cpu harmonizer
- class IHarmonizer {
- public:
- virtual ~IHarmonizer() {}
- virtual void Harmonize(ui64 ts) = 0;
- virtual void DeclareEmergency(ui64 ts) = 0;
- virtual void AddPool(IExecutorPool* pool, TSelfPingInfo *pingInfo = nullptr) = 0;
- virtual void Enable(bool enable) = 0;
- virtual TPoolHarmonizerStats GetPoolStats(i16 poolId) const = 0;
- virtual THarmonizerStats GetStats() const = 0;
- };
-
- IHarmonizer* MakeHarmonizer(ui64 ts);
-}
diff --git a/library/cpp/actors/core/hfunc.h b/library/cpp/actors/core/hfunc.h
deleted file mode 100644
index 6d1aeeecc3..0000000000
--- a/library/cpp/actors/core/hfunc.h
+++ /dev/null
@@ -1,116 +0,0 @@
-#pragma once
-
-#include "actor.h"
-#include "executor_thread.h"
-
-#include <util/system/defaults.h>
-
-#define HFuncCtx(TEvType, HandleFunc, Ctx) \
- case TEvType::EventType: { \
- typename TEvType::TPtr* x = reinterpret_cast<typename TEvType::TPtr*>(&ev); \
- HandleFunc(*x, Ctx); \
- break; \
- }
-
-#define HFunc(TEvType, HandleFunc) \
- case TEvType::EventType: { \
- typename TEvType::TPtr* x = reinterpret_cast<typename TEvType::TPtr*>(&ev); \
- HandleFunc(*x, this->ActorContext()); \
- break; \
- }
-
-#define hFunc(TEvType, HandleFunc) \
- case TEvType::EventType: { \
- typename TEvType::TPtr* x = reinterpret_cast<typename TEvType::TPtr*>(&ev); \
- HandleFunc(*x); \
- break; \
- }
-
-#define HFuncTraced(TEvType, HandleFunc) \
- case TEvType::EventType: { \
- TRACE_EVENT_TYPE(Y_STRINGIZE(TEvType)); \
- TEvType::TPtr* x = reinterpret_cast<TEvType::TPtr*>(&ev); \
- HandleFunc(*x, this->ActorContext()); \
- break; \
- }
-
-#define hFuncTraced(TEvType, HandleFunc) \
- case TEvType::EventType: { \
- TRACE_EVENT_TYPE(Y_STRINGIZE(TEvType)); \
- typename TEvType::TPtr* x = reinterpret_cast<typename TEvType::TPtr*>(&ev); \
- HandleFunc(*x); \
- break; \
- }
-
-#define HTemplFunc(TEvType, HandleFunc) \
- case TEvType::EventType: { \
- typename TEvType::TPtr* x = reinterpret_cast<typename TEvType::TPtr*>(&ev); \
- HandleFunc(*x, this->ActorContext()); \
- break; \
- }
-
-#define hTemplFunc(TEvType, HandleFunc) \
- case TEvType::EventType: { \
- typename TEvType::TPtr* x = reinterpret_cast<typename TEvType::TPtr*>(&ev); \
- HandleFunc(*x); \
- break; \
- }
-
-#define SFunc(TEvType, HandleFunc) \
- case TEvType::EventType: \
- HandleFunc(this->ActorContext()); \
- break;
-
-#define sFunc(TEvType, HandleFunc) \
- case TEvType::EventType: \
- HandleFunc(); \
- break;
-
-#define CFunc(TEventType, HandleFunc) \
- case TEventType: \
- HandleFunc(this->ActorContext()); \
- break;
-
-#define CFuncCtx(TEventType, HandleFunc, ctx) \
- case TEventType: \
- HandleFunc(ctx); \
- break;
-
-#define cFunc(TEventType, HandleFunc) \
- case TEventType: \
- HandleFunc(); \
- break;
-
-#define FFunc(TEventType, HandleFunc) \
- case TEventType: \
- HandleFunc(ev, this->ActorContext()); \
- break;
-
-#define fFunc(TEventType, HandleFunc) \
- case TEventType: \
- HandleFunc(ev); \
- break;
-
-#define IgnoreFunc(TEvType) \
- case TEvType::EventType: \
- break;
-
-#define ExceptionFunc(ExceptionType, HandleFunc) \
- catch (const ExceptionType& exception) { \
- HandleFunc(exception); \
- }
-
-#define ExceptionFuncEv(ExceptionType, HandleFunc) \
- catch (const ExceptionType& exception) { \
- HandleFunc(exception, ev); \
- }
-
-#define AnyExceptionFunc(HandleFunc) \
- catch (...) { \
- HandleFunc(); \
- }
-
-#define AnyExceptionFuncEv(HandleFunc) \
- catch (...) { \
- HandleFunc(ev); \
- }
diff --git a/library/cpp/actors/core/interconnect.cpp b/library/cpp/actors/core/interconnect.cpp
deleted file mode 100644
index b477f71e57..0000000000
--- a/library/cpp/actors/core/interconnect.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-#include "interconnect.h"
-#include <util/digest/murmur.h>
-#include <google/protobuf/text_format.h>
-
-namespace NActors {
-
- TNodeLocation::TNodeLocation(const NActorsInterconnect::TNodeLocation& location) {
- const NProtoBuf::Descriptor *descriptor = NActorsInterconnect::TNodeLocation::descriptor();
- const NActorsInterconnect::TNodeLocation *locp = &location;
- NActorsInterconnect::TNodeLocation temp; // for legacy location case
-
- // WalleConfig compatibility section
- if (locp->HasBody()) {
- if (locp == &location) {
- temp.CopyFrom(*locp);
- locp = &temp;
- }
- temp.SetUnit(::ToString(temp.GetBody()));
- temp.ClearBody();
- }
-
- // legacy value processing
- if (locp->HasDataCenterNum() || locp->HasRoomNum() || locp->HasRackNum() || locp->HasBodyNum()) {
- if (locp == &location) {
- temp.CopyFrom(*locp);
- locp = &temp;
- }
- LegacyValue = TLegacyValue{temp.GetDataCenterNum(), temp.GetRoomNum(), temp.GetRackNum(), temp.GetBodyNum()};
- temp.ClearDataCenterNum();
- temp.ClearRoomNum();
- temp.ClearRackNum();
- temp.ClearBodyNum();
-
- const NProtoBuf::Reflection *reflection = temp.GetReflection();
- bool fieldsFromNewFormat = false;
- for (int i = 0, count = descriptor->field_count(); !fieldsFromNewFormat && i < count; ++i) {
- fieldsFromNewFormat |= reflection->HasField(temp, descriptor->field(i));
- }
- if (!fieldsFromNewFormat) {
- const auto& v = LegacyValue->DataCenter;
- const char *p = reinterpret_cast<const char*>(&v);
- temp.SetDataCenter(TString(p, strnlen(p, sizeof(ui32))));
- temp.SetModule(::ToString(LegacyValue->Room));
- temp.SetRack(::ToString(LegacyValue->Rack));
- temp.SetUnit(::ToString(LegacyValue->Body));
- }
- }
-
- auto makeString = [&] {
- NProtoBuf::TextFormat::Printer p;
- p.SetSingleLineMode(true);
- TString s;
- p.PrintToString(*locp, &s);
- return s;
- };
-
- // modern format parsing
- const NProtoBuf::Reflection *reflection = locp->GetReflection();
- for (int i = 0, count = descriptor->field_count(); i < count; ++i) {
- const NProtoBuf::FieldDescriptor *field = descriptor->field(i);
- if (reflection->HasField(*locp, field)) {
- Y_ABORT_UNLESS(field->type() == NProtoBuf::FieldDescriptor::TYPE_STRING, "Location# %s", makeString().data());
- Items.emplace_back(TKeys::E(field->number()), reflection->GetString(*locp, field));
- }
- }
- const NProtoBuf::UnknownFieldSet& unknown = locp->unknown_fields();
- for (int i = 0, count = unknown.field_count(); i < count; ++i) {
- const NProtoBuf::UnknownField& field = unknown.field(i);
- Y_ABORT_UNLESS(field.type() == NProtoBuf::UnknownField::TYPE_LENGTH_DELIMITED, "Location# %s", makeString().data());
- Items.emplace_back(TKeys::E(field.number()), field.length_delimited());
- }
- std::sort(Items.begin(), Items.end());
- }
-
- TNodeLocation::TNodeLocation(TFromSerialized, const TString& s)
- : TNodeLocation(ParseLocation(s))
- {}
-
- TNodeLocation::TNodeLocation(const TString& DataCenter, const TString& Module, const TString& Rack, const TString& Unit) {
- if (DataCenter) Items.emplace_back(TKeys::DataCenter, DataCenter);
- if (Module) Items.emplace_back(TKeys::Module, Module);
- if (Rack) Items.emplace_back(TKeys::Rack, Rack);
- if (Unit) Items.emplace_back(TKeys::Unit, Unit);
- }
-
- NActorsInterconnect::TNodeLocation TNodeLocation::ParseLocation(const TString& s) {
- NActorsInterconnect::TNodeLocation res;
- const bool success = res.ParseFromString(s);
- Y_ABORT_UNLESS(success);
- return res;
- }
-
- TString TNodeLocation::ToStringUpTo(TKeys::E upToKey) const {
- const NProtoBuf::Descriptor *descriptor = NActorsInterconnect::TNodeLocation::descriptor();
-
- TStringBuilder res;
- for (const auto& [key, value] : Items) {
- if (upToKey < key) {
- break;
- }
- TString name;
- if (const NProtoBuf::FieldDescriptor *field = descriptor->FindFieldByNumber(key)) {
- name = field->options().GetExtension(NActorsInterconnect::PrintName);
- } else {
- name = ::ToString(int(key));
- }
- if (key != upToKey) {
- res << name << "=" << value << "/";
- } else {
- res << value;
- }
- }
- return res;
- }
-
- void TNodeLocation::Serialize(NActorsInterconnect::TNodeLocation *pb, bool compatibleWithOlderVersions) const {
- const NProtoBuf::Descriptor *descriptor = NActorsInterconnect::TNodeLocation::descriptor();
- const NProtoBuf::Reflection *reflection = pb->GetReflection();
- NProtoBuf::UnknownFieldSet *unknown = pb->mutable_unknown_fields();
- for (const auto& [key, value] : Items) {
- if (const NProtoBuf::FieldDescriptor *field = descriptor->FindFieldByNumber(key)) {
- reflection->SetString(pb, field, value);
- } else {
- unknown->AddLengthDelimited(key)->assign(value);
- }
- }
- if (compatibleWithOlderVersions) {
- GetLegacyValue().Serialize(pb);
- }
- }
-
- TString TNodeLocation::GetSerializedLocation() const {
- NActorsInterconnect::TNodeLocation pb;
- Serialize(&pb, false);
- TString s;
- const bool success = pb.SerializeToString(&s);
- Y_ABORT_UNLESS(success);
- return s;
- }
-
- TNodeLocation::TLegacyValue TNodeLocation::GetLegacyValue() const {
- if (LegacyValue) {
- return *LegacyValue;
- }
-
- ui32 dataCenterId = 0, moduleId = 0, rackId = 0, unitId = 0;
-
- for (const auto& [key, value] : Items) {
- switch (key) {
- case TKeys::DataCenter:
- memcpy(&dataCenterId, value.data(), Min<size_t>(sizeof(dataCenterId), value.length()));
- break;
-
- case TKeys::Module: {
- const bool success = TryFromString(value, moduleId);
- Y_ABORT_UNLESS(success);
- break;
- }
-
- case TKeys::Rack:
- // hacky way to obtain numeric id by a rack name
- if (!TryFromString(value, rackId)) {
- rackId = MurmurHash<ui32>(value.data(), value.length());
- }
- break;
-
- case TKeys::Unit: {
- const bool success = TryFromString(value, unitId);
- Y_ABORT_UNLESS(success);
- break;
- }
-
- default:
- Y_ABORT("unexpected legacy key# %d", key);
- }
- }
-
- return {dataCenterId, moduleId, rackId, unitId};
- }
-
-} // NActors
diff --git a/library/cpp/actors/core/interconnect.h b/library/cpp/actors/core/interconnect.h
deleted file mode 100644
index 46d4fd5303..0000000000
--- a/library/cpp/actors/core/interconnect.h
+++ /dev/null
@@ -1,268 +0,0 @@
-#pragma once
-
-#include "events.h"
-#include "event_local.h"
-#include <library/cpp/actors/protos/interconnect.pb.h>
-#include <util/string/cast.h>
-#include <util/string/builder.h>
-
-namespace NActors {
- class TNodeLocation {
- public:
- struct TKeys {
- enum E : int {
- DataCenter = 10,
- Module = 20,
- Rack = 30,
- Unit = 40,
- };
- };
-
- struct TLegacyValue {
- ui32 DataCenter;
- ui32 Room;
- ui32 Rack;
- ui32 Body;
-
- auto ConvertToTuple() const { return std::make_tuple(DataCenter, Room, Rack, Body); }
-
- int Compare(const TLegacyValue& other) const {
- const auto x = ConvertToTuple();
- const auto y = other.ConvertToTuple();
- if (x < y) {
- return -1;
- } else if (y < x) {
- return 1;
- } else {
- return 0;
- }
- }
-
- friend bool operator ==(const TLegacyValue& x, const TLegacyValue& y) { return x.Compare(y) == 0; }
-
- void Serialize(NActorsInterconnect::TNodeLocation *pb) const {
- pb->SetDataCenterNum(DataCenter);
- pb->SetRoomNum(Room);
- pb->SetRackNum(Rack);
- pb->SetBodyNum(Body);
- }
- };
-
- private:
- std::optional<TLegacyValue> LegacyValue;
- std::vector<std::pair<TKeys::E, TString>> Items;
-
- public:
- // generic ctors
- TNodeLocation() = default;
- TNodeLocation(const TNodeLocation&) = default;
- TNodeLocation(TNodeLocation&&) = default;
- TNodeLocation(const TString& DataCenter, const TString& Module = "", const TString& Rack = "", const TString& Unit = "");
-
- // protobuf-parser ctor
- explicit TNodeLocation(const NActorsInterconnect::TNodeLocation& location);
-
- // serialized protobuf ctor
- static constexpr struct TFromSerialized {} FromSerialized {};
- TNodeLocation(TFromSerialized, const TString& s);
-
- // parser helper function
- static NActorsInterconnect::TNodeLocation ParseLocation(const TString& s);
-
- // assignment operators
- TNodeLocation& operator =(const TNodeLocation&) = default;
- TNodeLocation& operator =(TNodeLocation&&) = default;
-
- // compatibleWithOlderVersions should be set to true when this protobuf is possibly going to be delivered to 21-4
- void Serialize(NActorsInterconnect::TNodeLocation *pb, bool compatibleWithOlderVersions) const;
- TString GetSerializedLocation() const;
-
- TString GetDataCenterId() const { return ToStringUpTo(TKeys::DataCenter); }
- TString GetModuleId() const { return ToStringUpTo(TKeys::Module); }
- TString GetRackId() const { return ToStringUpTo(TKeys::Rack); }
- TString GetUnitId() const { return ToStringUpTo(TKeys::Unit); }
- TString ToString() const { return ToStringUpTo(TKeys::E(Max<int>())); }
- TString ToStringUpTo(TKeys::E upToKey) const;
-
- TLegacyValue GetLegacyValue() const;
-
- const std::vector<std::pair<TKeys::E, TString>>& GetItems() const { return Items; }
-
- bool HasKey(TKeys::E key) const {
- auto comp = [](const auto& p, TKeys::E value) { return p.first < value; };
- const auto it = std::lower_bound(Items.begin(), Items.end(), key, comp);
- return it != Items.end() && it->first == key;
- }
-
- int Compare(const TNodeLocation& other) const {
- if (LegacyValue || other.LegacyValue) {
- return GetLegacyValue().Compare(other.GetLegacyValue());
- } else if (Items < other.Items) {
- return -1;
- } else if (other.Items < Items) {
- return 1;
- } else {
- return 0;
- }
- }
-
- void InheritLegacyValue(const TNodeLocation& other) {
- LegacyValue = other.GetLegacyValue();
- }
-
- friend bool operator ==(const TNodeLocation& x, const TNodeLocation& y) { return x.Compare(y) == 0; }
- friend bool operator !=(const TNodeLocation& x, const TNodeLocation& y) { return x.Compare(y) != 0; }
- friend bool operator < (const TNodeLocation& x, const TNodeLocation& y) { return x.Compare(y) < 0; }
- friend bool operator <=(const TNodeLocation& x, const TNodeLocation& y) { return x.Compare(y) <= 0; }
- friend bool operator > (const TNodeLocation& x, const TNodeLocation& y) { return x.Compare(y) > 0; }
- friend bool operator >=(const TNodeLocation& x, const TNodeLocation& y) { return x.Compare(y) >= 0; }
- };
-
- struct TEvInterconnect {
- enum EEv {
- EvForward = EventSpaceBegin(TEvents::ES_INTERCONNECT),
- EvResolveNode, // resolve info about node (internal)
- EvNodeAddress, // node info (internal)
- EvConnectNode, // request proxy to establish connection (like: we would send something there soon)
- EvAcceptIncoming,
- EvNodeConnected, // node connected notify
- EvNodeDisconnected, // node disconnected notify
- EvRegisterNode,
- EvRegisterNodeResult,
- EvListNodes,
- EvNodesInfo,
- EvDisconnect,
- EvGetNode,
- EvNodeInfo,
- EvClosePeerSocket,
- EvCloseInputSession,
- EvPoisonSession,
- EvTerminate,
- EvEnd
- };
-
- enum ESubscribes {
- SubConnected,
- SubDisconnected,
- };
-
- static_assert(EvEnd < EventSpaceEnd(TEvents::ES_INTERCONNECT), "expect EvEnd < EventSpaceEnd(TEvents::ES_INTERCONNECT)");
-
- struct TEvResolveNode;
- struct TEvNodeAddress;
-
- struct TEvConnectNode: public TEventBase<TEvConnectNode, EvConnectNode> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvConnectNode, "TEvInterconnect::TEvConnectNode")
- };
-
- struct TEvAcceptIncoming;
-
- struct TEvNodeConnected: public TEventLocal<TEvNodeConnected, EvNodeConnected> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvNodeConnected, "TEvInterconnect::TEvNodeConnected")
- TEvNodeConnected(ui32 node) noexcept
- : NodeId(node)
- {
- }
- const ui32 NodeId;
- };
-
- struct TEvNodeDisconnected: public TEventLocal<TEvNodeDisconnected, EvNodeDisconnected> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvNodeDisconnected, "TEvInterconnect::TEvNodeDisconnected")
- TEvNodeDisconnected(ui32 node) noexcept
- : NodeId(node)
- {
- }
- const ui32 NodeId;
- };
-
- struct TEvRegisterNode;
- struct TEvRegisterNodeResult;
-
- struct TEvListNodes: public TEventLocal<TEvListNodes, EvListNodes> {
- const bool SubscribeToStaticNodeChanges = false;
-
- TEvListNodes() = default;
-
- TEvListNodes(bool subscribeToStaticNodeChanges)
- : SubscribeToStaticNodeChanges(subscribeToStaticNodeChanges)
- {}
- };
-
- struct TNodeInfo {
- ui32 NodeId;
- TString Address;
- TString Host;
- TString ResolveHost;
- ui16 Port;
- TNodeLocation Location;
- bool IsStatic = true;
-
- TNodeInfo() = default;
- TNodeInfo(const TNodeInfo&) = default;
- TNodeInfo& operator =(const TNodeInfo&) = default;
- TNodeInfo(ui32 nodeId,
- const TString& address,
- const TString& host,
- const TString& resolveHost,
- ui16 port,
- const TNodeLocation& location,
- bool isStatic = true)
- : NodeId(nodeId)
- , Address(address)
- , Host(host)
- , ResolveHost(resolveHost)
- , Port(port)
- , Location(location)
- , IsStatic(isStatic)
- {
- }
-
- operator ui32() const {
- return NodeId;
- }
- };
-
- struct TEvNodesInfo: public TEventLocal<TEvNodesInfo, EvNodesInfo> {
- TVector<TNodeInfo> Nodes;
-
- const TNodeInfo* GetNodeInfo(ui32 nodeId) const {
- for (const auto& x : Nodes) {
- if (x.NodeId == nodeId)
- return &x;
- }
- return nullptr;
- }
- };
-
- struct TEvDisconnect;
-
- struct TEvGetNode: public TEventLocal<TEvGetNode, EvGetNode> {
- ui32 NodeId;
- TInstant Deadline;
-
- TEvGetNode(ui32 nodeId, TInstant deadline = TInstant::Max())
- : NodeId(nodeId)
- , Deadline(deadline)
- {
- }
- };
-
- struct TEvNodeInfo: public TEventLocal<TEvNodeInfo, EvNodeInfo> {
- TEvNodeInfo(ui32 nodeId)
- : NodeId(nodeId)
- {
- }
-
- ui32 NodeId;
- THolder<TNodeInfo> Node;
- };
-
- struct TEvClosePeerSocket : TEventLocal<TEvClosePeerSocket, EvClosePeerSocket> {};
-
- struct TEvCloseInputSession : TEventLocal<TEvCloseInputSession, EvCloseInputSession> {};
-
- struct TEvPoisonSession : TEventLocal<TEvPoisonSession, EvPoisonSession> {};
-
- struct TEvTerminate : TEventLocal<TEvTerminate, EvTerminate> {};
- };
-}
diff --git a/library/cpp/actors/core/invoke.h b/library/cpp/actors/core/invoke.h
deleted file mode 100644
index c9e29effe2..0000000000
--- a/library/cpp/actors/core/invoke.h
+++ /dev/null
@@ -1,145 +0,0 @@
-#pragma once
-
-#include "actor_bootstrapped.h"
-#include "events.h"
-#include "event_local.h"
-
-#include <any>
-#include <type_traits>
-#include <utility>
-#include <variant>
-
-#include <util/system/type_name.h>
-
-namespace NActors {
-
- struct TEvents::TEvInvokeResult
- : TEventLocal<TEvInvokeResult, TSystem::InvokeResult>
- {
- using TProcessCallback = std::function<void(TEvInvokeResult&, const TActorContext&)>;
- TProcessCallback ProcessCallback;
- std::variant<std::any /* the value */, std::exception_ptr> Result;
-
- // This constructor creates TEvInvokeResult with the result of calling callback(args...) or exception_ptr,
- // if exception occurs during evaluation.
- template<typename TCallback, typename... TArgs>
- TEvInvokeResult(TProcessCallback&& process, TCallback&& callback, TArgs&&... args)
- : ProcessCallback(std::move(process))
- {
- try {
- if constexpr (std::is_void_v<std::invoke_result_t<TCallback, TArgs...>>) {
- // just invoke callback without saving any value
- std::invoke(std::forward<TCallback>(callback), std::forward<TArgs>(args)...);
- } else {
- Result.emplace<std::any>(std::invoke(std::forward<TCallback>(callback), std::forward<TArgs>(args)...));
- }
- } catch (...) {
- Result.emplace<std::exception_ptr>(std::current_exception());
- }
- }
-
- void Process(const TActorContext& ctx) {
- ProcessCallback(*this, ctx);
- }
-
- template<typename TCallback>
- std::invoke_result_t<TCallback, const TActorContext&> GetResult() {
- using T = std::invoke_result_t<TCallback, const TActorContext&>;
- return std::visit([](auto& arg) -> T {
- using TArg = std::decay_t<decltype(arg)>;
- if constexpr (std::is_same_v<TArg, std::exception_ptr>) {
- std::rethrow_exception(arg);
- } else if constexpr (std::is_void_v<T>) {
- Y_ABORT_UNLESS(!arg.has_value());
- } else if (auto *value = std::any_cast<T>(&arg)) {
- return std::move(*value);
- } else {
- Y_ABORT("unspported return type for TEvInvokeResult: actual# %s != expected# %s",
- TypeName(arg.type()).data(), TypeName<T>().data());
- }
- }, Result);
- }
- };
-
- // Invoke Actor is used to make different procedure calls in specific threads pools.
- //
- // Actor is created by CreateInvokeActor(callback, complete) where `callback` is the function that will be invoked
- // upon actor registration, which will issue then TEvInvokeResult to the parent actor with the result of called
- // function. If the called function throws exception, then the exception will arrive in the result. Receiver of
- // this message can either handle it by its own means calling ev.GetResult() (which will rethrow exception if it
- // has occured in called function or return its return value; notice that when there is no return value, then
- // GetResult() should also be called to prevent losing exception), or invoke ev.Process(), which will invoke
- // callback provided as `complete` parameter to the CreateInvokeActor function. Complete handler is invoked with
- // the result-getter lambda as the first argument and the actor system context as the second one. Result-getter
- // should be called to obtain resulting value or exception like the GetResult() method of the TEvInvokeResult event.
- //
- // Notice that `callback` execution usually occurs in separate actor on separate mailbox and should not use parent
- // actor's class. But `complete` handler is invoked in parent context and can use its contents. Do not forget to
- // handle TEvInvokeResult event by calling Process/GetResult method, whichever is necessary.
-
- template<typename TCallback, typename TCompletion, class TEnum>
- class TInvokeActor : public TActorBootstrapped<TInvokeActor<TCallback, TCompletion, TEnum>> {
- private:
- using TBase = TActorBootstrapped<TInvokeActor<TCallback, TCompletion, TEnum>>;
- TCallback Callback;
- TCompletion Complete;
- const TEnum Activity;
- static_assert(std::is_enum<TEnum>::value);
- public:
- TInvokeActor(TCallback&& callback, TCompletion&& complete, const TEnum activity)
- : TBase(activity)
- , Callback(std::move(callback))
- , Complete(std::move(complete))
- , Activity(activity)
- {}
-
- void Bootstrap(const TActorId& parentId, const TActorContext& ctx) {
- auto process = [complete = std::move(Complete)](TEvents::TEvInvokeResult& res, const TActorContext& ctx) {
- complete([&] { return res.GetResult<TCallback>(); }, ctx);
- };
- ctx.Send(parentId, new TEvents::TEvInvokeResult(std::move(process), std::move(Callback), ctx));
- TActorBootstrapped<TInvokeActor>::Die(ctx);
- }
- };
-
- template<typename TEnum, typename TCallback, typename TCompletion>
- std::unique_ptr<IActor> CreateInvokeActor(TCallback&& callback, TCompletion&& complete, const TEnum activity) {
- return std::make_unique<TInvokeActor<std::decay_t<TCallback>, std::decay_t<TCompletion>, TEnum>>(
- std::forward<TCallback>(callback), std::forward<TCompletion>(complete), activity);
- }
-
- template <class TInvokeExecutor>
- class TScheduledInvokeActivity: public TActor<TScheduledInvokeActivity<TInvokeExecutor>> {
- private:
- using TBase = TActor<TScheduledInvokeActivity<TInvokeExecutor>>;
- const TMonotonic Timestamp;
- TInvokeExecutor Executor;
- public:
- TScheduledInvokeActivity(TInvokeExecutor&& executor, const TMonotonic timestamp)
- : TBase(&TBase::TThis::StateFunc)
- , Timestamp(timestamp)
- , Executor(std::move(executor)) {
- }
-
- void StateFunc(STFUNC_SIG) {
- Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvents::TSystem::Wakeup);
- auto g = TBase::PassAwayGuard();
- Executor();
- }
-
- void Registered(TActorSystem* sys, const TActorId& owner) override {
- sys->Schedule(Timestamp, new IEventHandle(TEvents::TSystem::Wakeup, 0, TBase::SelfId(), owner, nullptr, 0));
- }
- };
-
- template<class TInvokeExecutor>
- void ScheduleInvokeActivity(TInvokeExecutor&& executor, const TDuration d) {
- TActivationContext::Register(new TScheduledInvokeActivity<TInvokeExecutor>(std::move(executor), TMonotonic::Now() + d));
- }
-
- template<class TInvokeExecutor>
- void ScheduleInvokeActivity(TInvokeExecutor&& executor, const TMonotonic timestamp) {
- TActivationContext::Register(new TScheduledInvokeActivity<TInvokeExecutor>(std::move(executor), timestamp));
- }
-
-} // NActors
diff --git a/library/cpp/actors/core/io_dispatcher.cpp b/library/cpp/actors/core/io_dispatcher.cpp
deleted file mode 100644
index c7d28c63e0..0000000000
--- a/library/cpp/actors/core/io_dispatcher.cpp
+++ /dev/null
@@ -1,236 +0,0 @@
-#include "io_dispatcher.h"
-#include "actor_bootstrapped.h"
-#include "hfunc.h"
-#include <util/system/mutex.h>
-#include <util/system/condvar.h>
-#include <util/system/thread.h>
-#include <map>
-#include <list>
-
-namespace NActors {
-
- class TIoDispatcherActor : public TActorBootstrapped<TIoDispatcherActor> {
- enum {
- EvNotifyThreadStopped = EventSpaceBegin(TEvents::ES_PRIVATE),
- };
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // IO task queue
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- class TTask {
- TInstant Timestamp;
- std::function<void()> Callback;
-
- public:
- TTask(TInstant timestamp, TEvInvokeQuery *ev)
- : Timestamp(timestamp)
- , Callback(std::move(ev->Callback))
- {}
-
- void Execute() {
- Callback();
- }
-
- TInstant GetTimestamp() const {
- return Timestamp;
- }
- };
-
- class TTaskQueue {
- std::list<TTask> Tasks;
- TMutex Mutex;
- TCondVar CondVar;
- size_t NumThreadsToStop = 0;
-
- public:
- void Enqueue(TInstant timestamp, TEvInvokeQuery *ev) {
- std::list<TTask> list;
- list.emplace_back(timestamp, ev);
- with_lock (Mutex) {
- Tasks.splice(Tasks.end(), std::move(list));
- }
- CondVar.Signal();
- }
-
- bool Dequeue(std::list<TTask>& list, bool *sendNotify) {
- with_lock (Mutex) {
- CondVar.Wait(Mutex, [&] { return NumThreadsToStop || !Tasks.empty(); });
- if (NumThreadsToStop) {
- *sendNotify = NumThreadsToStop != Max<size_t>();
- if (*sendNotify) {
- --NumThreadsToStop;
- }
- return false;
- } else {
- list.splice(list.end(), Tasks, Tasks.begin());
- return true;
- }
- }
- }
-
- void Stop() {
- with_lock (Mutex) {
- NumThreadsToStop = Max<size_t>();
- }
- CondVar.BroadCast();
- }
-
- void StopOne() {
- with_lock (Mutex) {
- ++NumThreadsToStop;
- Y_ABORT_UNLESS(NumThreadsToStop);
- }
- CondVar.Signal();
- }
-
- std::optional<TInstant> GetEarliestTaskTimestamp() {
- with_lock (Mutex) {
- return Tasks.empty() ? std::nullopt : std::make_optional(Tasks.front().GetTimestamp());
- }
- }
- };
-
- TTaskQueue TaskQueue;
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // IO dispatcher threads
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- class TThread : public ISimpleThread {
- TIoDispatcherActor& Actor;
- TActorSystem* const ActorSystem;
-
- public:
- TThread(TIoDispatcherActor& actor, TActorSystem *actorSystem)
- : Actor(actor)
- , ActorSystem(actorSystem)
- {
- Start();
- }
-
- void *ThreadProc() override {
- SetCurrentThreadName("kikimr IO");
- for (;;) {
- std::list<TTask> tasks;
- bool sendNotify;
- if (!Actor.TaskQueue.Dequeue(tasks, &sendNotify)) {
- if (sendNotify) {
- ActorSystem->Send(new IEventHandle(EvNotifyThreadStopped, 0, Actor.SelfId(), TActorId(),
- nullptr, TThread::CurrentThreadId()));
- }
- break;
- }
- for (TTask& task : tasks) {
- task.Execute();
- ++*Actor.TasksCompleted;
- }
- }
- return nullptr;
- }
- };
-
- static constexpr size_t MinThreadCount = 4;
- static constexpr size_t MaxThreadCount = 64;
- std::map<TThread::TId, std::unique_ptr<TThread>> Threads;
- size_t NumRunningThreads = 0;
-
- void StartThread() {
- auto thread = std::make_unique<TThread>(*this, TlsActivationContext->ExecutorThread.ActorSystem);
- const TThread::TId id = thread->Id();
- Threads.emplace(id, std::move(thread));
- *NumThreads = ++NumRunningThreads;
- ++*ThreadsStarted;
- }
-
- void StopThread() {
- Y_ABORT_UNLESS(Threads.size());
- TaskQueue.StopOne();
- *NumThreads = --NumRunningThreads;
- ++*ThreadsStopped;
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Counters
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- NMonitoring::TDynamicCounters::TCounterPtr NumThreads;
- NMonitoring::TDynamicCounters::TCounterPtr TasksAdded;
- NMonitoring::TDynamicCounters::TCounterPtr TasksCompleted;
- NMonitoring::TDynamicCounters::TCounterPtr ThreadsStarted;
- NMonitoring::TDynamicCounters::TCounterPtr ThreadsStopped;
-
- public:
- TIoDispatcherActor(const NMonitoring::TDynamicCounterPtr& counters)
- : NumThreads(counters->GetCounter("NumThreads"))
- , TasksAdded(counters->GetCounter("TasksAdded", true))
- , TasksCompleted(counters->GetCounter("TasksCompleted", true))
- , ThreadsStarted(counters->GetCounter("ThreadsStarted", true))
- , ThreadsStopped(counters->GetCounter("ThreadsStopped", true))
- {}
-
- ~TIoDispatcherActor() override {
- TaskQueue.Stop();
- }
-
- static constexpr char ActorName[] = "IO_DISPATCHER_ACTOR";
-
- void Bootstrap() {
- while (NumRunningThreads < MinThreadCount) {
- StartThread();
- }
- HandleWakeup();
- Become(&TThis::StateFunc);
- }
-
- void HandleThreadStopped(TAutoPtr<IEventHandle> ev) {
- auto it = Threads.find(ev->Cookie);
- Y_ABORT_UNLESS(it != Threads.end());
- it->second->Join();
- Threads.erase(it);
- }
-
- void Handle(TEvInvokeQuery::TPtr ev) {
- ++*TasksAdded;
- TaskQueue.Enqueue(TActivationContext::Now(), ev->Get());
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Thread usage counter logic
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- std::optional<TInstant> IdleTimestamp;
- static constexpr TDuration ThreadStartTime = TDuration::MilliSeconds(500);
- static constexpr TDuration ThreadStopTime = TDuration::MilliSeconds(500);
-
- void HandleWakeup() {
- const TInstant now = TActivationContext::Now();
- std::optional<TInstant> earliest = TaskQueue.GetEarliestTaskTimestamp();
- if (earliest) {
- if (now >= *earliest + ThreadStartTime && NumRunningThreads < MaxThreadCount) {
- StartThread();
- }
- IdleTimestamp.reset();
- } else if (!IdleTimestamp) {
- IdleTimestamp = now;
- } else if (now >= *IdleTimestamp + ThreadStopTime) {
- IdleTimestamp.reset();
- if (NumRunningThreads > MinThreadCount) {
- StopThread();
- }
- }
- Schedule(TDuration::MilliSeconds(100), new TEvents::TEvWakeup);
- }
-
- STRICT_STFUNC(StateFunc, {
- fFunc(EvNotifyThreadStopped, HandleThreadStopped);
- hFunc(TEvInvokeQuery, Handle);
- cFunc(TEvents::TSystem::Wakeup, HandleWakeup);
- cFunc(TEvents::TSystem::Poison, PassAway);
- })
- };
-
- IActor *CreateIoDispatcherActor(const NMonitoring::TDynamicCounterPtr& counters) {
- return new TIoDispatcherActor(counters);
- }
-
-} // NActors
diff --git a/library/cpp/actors/core/io_dispatcher.h b/library/cpp/actors/core/io_dispatcher.h
deleted file mode 100644
index b0e4e60d1a..0000000000
--- a/library/cpp/actors/core/io_dispatcher.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#pragma once
-
-#include "actor.h"
-#include "event_local.h"
-#include "events.h"
-#include "actorsystem.h"
-#include "executor_thread.h"
-#include "executelater.h"
-
-namespace NActors {
-
- struct TEvInvokeQuery : TEventLocal<TEvInvokeQuery, TEvents::TSystem::InvokeQuery> {
- std::function<void()> Callback;
-
- TEvInvokeQuery(std::function<void()>&& callback)
- : Callback(std::move(callback))
- {}
- };
-
- inline TActorId MakeIoDispatcherActorId() {
- return TActorId(0, TStringBuf("IoDispatcher", 12));
- }
-
- extern IActor *CreateIoDispatcherActor(const NMonitoring::TDynamicCounterPtr& counters);
-
- /* InvokeIoCallback enqueues callback() to be executed in IO thread pool and then return result in TEvInvokeResult
- * message to parentId actor.
- */
- template<typename TCallback>
- static void InvokeIoCallback(TCallback&& callback, ui32 poolId, IActor::EActivityType activityType) {
- if (!TActivationContext::Send(new IEventHandle(MakeIoDispatcherActorId(), TActorId(),
- new TEvInvokeQuery(callback)))) {
- TActivationContext::Register(CreateExecuteLaterActor(std::move(callback), activityType), TActorId(),
- TMailboxType::HTSwap, poolId);
- }
- }
-
-} // NActors
diff --git a/library/cpp/actors/core/lease.h b/library/cpp/actors/core/lease.h
deleted file mode 100644
index 650ae7b122..0000000000
--- a/library/cpp/actors/core/lease.h
+++ /dev/null
@@ -1,56 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-namespace NActors {
- // Value representing specific worker's permission for exclusive use of CPU till specific deadline
- struct TLease {
- // Lower WorkerBits store current fast worker id
- // All other higher bits store expiration (hard preemption) timestamp
- using TValue = ui64;
- TValue Value;
-
- static constexpr ui64 WorkerIdMask = ui64((1ull << WorkerBits) - 1);
- static constexpr ui64 ExpireTsMask = ~WorkerIdMask;
-
- explicit constexpr TLease(ui64 value)
- : Value(value)
- {}
-
- constexpr TLease(TWorkerId workerId, ui64 expireTs)
- : Value((workerId & WorkerIdMask) | (expireTs & ExpireTsMask))
- {}
-
- TWorkerId GetWorkerId() const {
- return Value & WorkerIdMask;
- }
-
- TLease NeverExpire() const {
- return TLease(Value | ExpireTsMask);
- }
-
- bool IsNeverExpiring() const {
- return (Value & ExpireTsMask) == ExpireTsMask;
- }
-
- ui64 GetExpireTs() const {
- // Do not truncate worker id
- // NOTE: it decrease accuracy, but improves performance
- return Value;
- }
-
- ui64 GetPreciseExpireTs() const {
- return Value & ExpireTsMask;
- }
-
- operator TValue() const {
- return Value;
- }
- };
-
- // Special expire timestamp values
- static constexpr ui64 NeverExpire = ui64(-1);
-
- // Special hard-preemption-in-progress lease
- static constexpr TLease HardPreemptionLease = TLease(TLease::WorkerIdMask, NeverExpire);
-}
diff --git a/library/cpp/actors/core/log.cpp b/library/cpp/actors/core/log.cpp
deleted file mode 100644
index 1a8880d15d..0000000000
--- a/library/cpp/actors/core/log.cpp
+++ /dev/null
@@ -1,768 +0,0 @@
-#include "log.h"
-
-static_assert(int(NActors::NLog::PRI_EMERG) == int(::TLOG_EMERG), "expect int(NActors::NLog::PRI_EMERG) == int(::TLOG_EMERG)");
-static_assert(int(NActors::NLog::PRI_ALERT) == int(::TLOG_ALERT), "expect int(NActors::NLog::PRI_ALERT) == int(::TLOG_ALERT)");
-static_assert(int(NActors::NLog::PRI_CRIT) == int(::TLOG_CRIT), "expect int(NActors::NLog::PRI_CRIT) == int(::TLOG_CRIT)");
-static_assert(int(NActors::NLog::PRI_ERROR) == int(::TLOG_ERR), "expect int(NActors::NLog::PRI_ERROR) == int(::TLOG_ERR)");
-static_assert(int(NActors::NLog::PRI_WARN) == int(::TLOG_WARNING), "expect int(NActors::NLog::PRI_WARN) == int(::TLOG_WARNING)");
-static_assert(int(NActors::NLog::PRI_NOTICE) == int(::TLOG_NOTICE), "expect int(NActors::NLog::PRI_NOTICE) == int(::TLOG_NOTICE)");
-static_assert(int(NActors::NLog::PRI_INFO) == int(::TLOG_INFO), "expect int(NActors::NLog::PRI_INFO) == int(::TLOG_INFO)");
-static_assert(int(NActors::NLog::PRI_DEBUG) == int(::TLOG_DEBUG), "expect int(NActors::NLog::PRI_DEBUG) == int(::TLOG_DEBUG)");
-static_assert(int(NActors::NLog::PRI_TRACE) == int(::TLOG_RESOURCES), "expect int(NActors::NLog::PRI_TRACE) == int(::TLOG_RESOURCES)");
-
-namespace {
- struct TRecordWithNewline {
- ELogPriority Priority;
- TTempBuf Buf;
-
- TRecordWithNewline(const TLogRecord& rec)
- : Priority(rec.Priority)
- , Buf(rec.Len + 1)
- {
- Buf.Append(rec.Data, rec.Len);
- *Buf.Proceed(1) = '\n';
- }
-
- operator TLogRecord() const {
- return TLogRecord(Priority, Buf.Data(), Buf.Filled());
- }
- };
-}
-
-namespace NActors {
- TLoggerActor::TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- TAutoPtr<TLogBackend> logBackend,
- TIntrusivePtr<NMonitoring::TDynamicCounters> counters)
- : TActor(&TLoggerActor::StateFunc)
- , Settings(settings)
- , LogBackend(logBackend.Release())
- , Metrics(std::make_unique<TLoggerCounters>(counters))
- , LogBuffer(*Metrics, *Settings)
- {
- }
-
- TLoggerActor::TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- std::shared_ptr<TLogBackend> logBackend,
- TIntrusivePtr<NMonitoring::TDynamicCounters> counters)
- : TActor(&TLoggerActor::StateFunc)
- , Settings(settings)
- , LogBackend(logBackend)
- , Metrics(std::make_unique<TLoggerCounters>(counters))
- , LogBuffer(*Metrics, *Settings)
- {
- }
-
- TLoggerActor::TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- TAutoPtr<TLogBackend> logBackend,
- std::shared_ptr<NMonitoring::TMetricRegistry> metrics)
- : TActor(&TLoggerActor::StateFunc)
- , Settings(settings)
- , LogBackend(logBackend.Release())
- , Metrics(std::make_unique<TLoggerMetrics>(metrics))
- , LogBuffer(*Metrics, *Settings)
- {
- }
-
- TLoggerActor::TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- std::shared_ptr<TLogBackend> logBackend,
- std::shared_ptr<NMonitoring::TMetricRegistry> metrics)
- : TActor(&TLoggerActor::StateFunc)
- , Settings(settings)
- , LogBackend(logBackend)
- , Metrics(std::make_unique<TLoggerMetrics>(metrics))
- , LogBuffer(*Metrics, *Settings)
- {
- }
-
- TLoggerActor::~TLoggerActor() {
- }
-
- void TLoggerActor::Log(TInstant time, NLog::EPriority priority, NLog::EComponent component, const char* c, ...) {
- Metrics->IncDirectMsgs();
- if (Settings && Settings->Satisfies(priority, component, 0ull)) {
- va_list params;
- va_start(params, c);
- TString formatted;
- vsprintf(formatted, c, params);
-
- auto ok = OutputRecord(time, NLog::EPrio(priority), component, formatted);
- Y_UNUSED(ok);
- va_end(params);
- }
- }
-
- void TLoggerActor::Throttle(const NLog::TSettings& settings) {
- // throttling via Sleep was removed since it causes unexpected
- // incidents when users try to set AllowDrop=false.
- Y_UNUSED(settings);
- }
-
- void TLoggerActor::FlushLogBufferMessage() {
- if (!LogBuffer.IsEmpty()) {
- NLog::TEvLog *log = LogBuffer.Pop();
- if (!OutputRecord(log)) {
- BecomeDefunct();
- }
- delete log;
- }
- }
-
- void TLoggerActor::FlushLogBufferMessageEvent(TFlushLogBuffer::TPtr& ev, const NActors::TActorContext& ctx) {
- Y_UNUSED(ev);
- FlushLogBufferMessage();
-
- ui64 ignoredCount = LogBuffer.GetIgnoredCount();
- if (ignoredCount > 0) {
- NLog::EPrio prio = LogBuffer.GetIgnoredHighestPrio();
- TString message = Sprintf("Logger overflow! Ignored %" PRIu64 " log records with priority [%s] or lower!", ignoredCount, PriorityToString(prio));
- if (!OutputRecord(ctx.Now(), NActors::NLog::EPrio::Error, Settings->LoggerComponent, message)) {
- BecomeDefunct();
- }
- LogBuffer.ClearIgnoredCount();
- }
-
- if (!LogBuffer.IsEmpty()) {
- ctx.Send(ctx.SelfID, ev->Release().Release());
- }
- }
-
- void TLoggerActor::WriteMessageStat(const NLog::TEvLog& ev) {
- Metrics->IncActorMsgs();
-
- const auto prio = ev.Level.ToPrio();
-
- switch (prio) {
- case ::NActors::NLog::EPrio::Alert:
- Metrics->IncAlertMsgs();
- break;
- case ::NActors::NLog::EPrio::Emerg:
- Metrics->IncEmergMsgs();
- break;
- default:
- break;
- }
-
- }
-
- void TLoggerActor::HandleLogEvent(NLog::TEvLog::TPtr& ev, const NActors::TActorContext& ctx) {
- i64 delayMillisec = (ctx.Now() - ev->Get()->Stamp).MilliSeconds();
- WriteMessageStat(*ev->Get());
- if (Settings->AllowDrop) {
- if (PassedCount > 10 && delayMillisec > (i64)Settings->TimeThresholdMs || !LogBuffer.IsEmpty() || LogBuffer.CheckLogIgnoring()) {
- if (LogBuffer.IsEmpty() && !LogBuffer.CheckLogIgnoring()) {
- ctx.Send(ctx.SelfID, new TFlushLogBuffer());
- }
- LogBuffer.AddLog(ev->Release().Release());
- PassedCount = 0;
-
- if (delayMillisec < (i64)Settings->TimeThresholdMs && !LogBuffer.CheckLogIgnoring()) {
- FlushLogBufferMessage();
- }
- return;
- }
-
- PassedCount++;
- }
-
- if (!OutputRecord(ev->Get())) {
- BecomeDefunct();
- }
- }
-
- void TLoggerActor::BecomeDefunct() {
- Become(&TThis::StateDefunct);
- Schedule(WakeupInterval, new TEvents::TEvWakeup);
- }
-
- void TLoggerActor::HandleLogComponentLevelRequest(TLogComponentLevelRequest::TPtr& ev, const NActors::TActorContext& ctx) {
- Metrics->IncLevelRequests();
- TString explanation;
- int code = Settings->SetLevel(ev->Get()->Priority, ev->Get()->Component, explanation);
- ctx.Send(ev->Sender, new TLogComponentLevelResponse(code, explanation));
- }
-
- void TLoggerActor::RenderComponentPriorities(IOutputStream& str) {
- using namespace NLog;
- HTML(str) {
- TAG(TH4) {
- str << "Priority Settings for the Components";
- }
- TABLE_SORTABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Component";
- }
- TABLEH() {
- str << "Level";
- }
- TABLEH() {
- str << "Sampling Level";
- }
- TABLEH() {
- str << "Sampling Rate";
- }
- }
- }
- TABLEBODY() {
- for (EComponent i = Settings->MinVal; i < Settings->MaxVal; i++) {
- auto name = Settings->ComponentName(i);
- if (!*name)
- continue;
- NLog::TComponentSettings componentSettings = Settings->GetComponentSettings(i);
-
- TABLER() {
- TABLED() {
- str << "<a href='logger?c=" << i << "'>" << name << "</a>";
- }
- TABLED() {
- str << PriorityToString(EPrio(componentSettings.Raw.X.Level));
- }
- TABLED() {
- str << PriorityToString(EPrio(componentSettings.Raw.X.SamplingLevel));
- }
- TABLED() {
- str << componentSettings.Raw.X.SamplingRate;
- }
- }
- }
- }
- }
- }
- }
-
- /*
- * Logger INFO:
- * 1. Current priority settings from components
- * 2. Number of log messages (via actors events, directly)
- * 3. Number of messages per components, per priority
- * 4. Log level changes (last N changes)
- */
- void TLoggerActor::HandleMonInfo(NMon::TEvHttpInfo::TPtr& ev, const TActorContext& ctx) {
- const auto& params = ev->Get()->Request.GetParams();
- NLog::EComponent component = NLog::InvalidComponent;
- NLog::EPriority priority = NLog::PRI_DEBUG;
- NLog::EPriority samplingPriority = NLog::PRI_DEBUG;
- ui32 samplingRate = 0;
- bool hasComponent = false;
- bool hasPriority = false;
- bool hasSamplingPriority = false;
- bool hasSamplingRate = false;
- bool hasAllowDrop = false;
- int allowDrop = 0;
- if (params.Has("c")) {
- if (TryFromString(params.Get("c"), component) && (component == NLog::InvalidComponent || Settings->IsValidComponent(component))) {
- hasComponent = true;
- if (params.Has("p")) {
- int rawPriority;
- if (TryFromString(params.Get("p"), rawPriority) && NLog::TSettings::IsValidPriority((NLog::EPriority)rawPriority)) {
- priority = (NLog::EPriority)rawPriority;
- hasPriority = true;
- }
- }
- if (params.Has("sp")) {
- int rawPriority;
- if (TryFromString(params.Get("sp"), rawPriority) && NLog::TSettings::IsValidPriority((NLog::EPriority)rawPriority)) {
- samplingPriority = (NLog::EPriority)rawPriority;
- hasSamplingPriority = true;
- }
- }
- if (params.Has("sr")) {
- if (TryFromString(params.Get("sr"), samplingRate)) {
- hasSamplingRate = true;
- }
- }
- }
- }
- if (params.Has("allowdrop")) {
- if (TryFromString(params.Get("allowdrop"), allowDrop)) {
- hasAllowDrop = true;
- }
- }
-
- TStringStream str;
- if (hasComponent && !hasPriority && !hasSamplingPriority && !hasSamplingRate) {
- NLog::TComponentSettings componentSettings = Settings->GetComponentSettings(component);
- ui32 samplingRate = componentSettings.Raw.X.SamplingRate;
- HTML(str) {
- DIV_CLASS("row") {
- DIV_CLASS("col-md-12") {
- TAG(TH4) {
- str << "Current log settings for " << Settings->ComponentName(component) << Endl;
- }
- UL() {
- LI() {
- str << "Priority: "
- << NLog::PriorityToString(NLog::EPrio(componentSettings.Raw.X.Level));
- }
- LI() {
- str << "Sampling priority: "
- << NLog::PriorityToString(NLog::EPrio(componentSettings.Raw.X.SamplingLevel));
- }
- LI() {
- str << "Sampling rate: "
- << samplingRate;
- }
- }
- }
- }
-
- DIV_CLASS("row") {
- DIV_CLASS("col-md-12") {
- TAG(TH4) {
- str << "Change priority" << Endl;
- }
- UL() {
- for (int p = NLog::PRI_EMERG; p <= NLog::PRI_TRACE; ++p) {
- LI() {
- str << "<a href='logger?c=" << component << "&p=" << p << "'>"
- << NLog::PriorityToString(NLog::EPrio(p)) << "</a>";
- }
- }
- }
- TAG(TH4) {
- str << "Change sampling priority" << Endl;
- }
- UL() {
- for (int p = NLog::PRI_EMERG; p <= NLog::PRI_TRACE; ++p) {
- LI() {
- str << "<a href='logger?c=" << component << "&sp=" << p << "'>"
- << NLog::PriorityToString(NLog::EPrio(p)) << "</a>";
- }
- }
- }
- TAG(TH4) {
- str << "Change sampling rate" << Endl;
- }
- str << "<form method=\"GET\">" << Endl;
- str << "Rate: <input type=\"number\" name=\"sr\" value=\"" << samplingRate << "\"/>" << Endl;
- str << "<input type=\"hidden\" name=\"c\" value=\"" << component << "\">" << Endl;
- str << "<input class=\"btn btn-primary\" type=\"submit\" value=\"Change\"/>" << Endl;
- str << "</form>" << Endl;
- TAG(TH4) {
- str << "<a href='logger'>Cancel</a>" << Endl;
- }
- }
- }
- }
-
- } else {
- TString explanation;
- if (hasComponent && hasPriority) {
- Settings->SetLevel(priority, component, explanation);
- }
- if (hasComponent && hasSamplingPriority) {
- Settings->SetSamplingLevel(samplingPriority, component, explanation);
- }
- if (hasComponent && hasSamplingRate) {
- Settings->SetSamplingRate(samplingRate, component, explanation);
- }
- if (hasAllowDrop) {
- Settings->SetAllowDrop(allowDrop);
- }
-
- HTML(str) {
- if (!explanation.empty()) {
- DIV_CLASS("row") {
- DIV_CLASS("col-md-12 alert alert-info") {
- str << explanation;
- }
- }
- }
-
- DIV_CLASS("row") {
- DIV_CLASS("col-md-6") {
- RenderComponentPriorities(str);
- }
- DIV_CLASS("col-md-6") {
- TAG(TH4) {
- str << "Change priority for all components";
- }
- TABLE_CLASS("table table-condensed") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Priority";
- }
- }
- }
- TABLEBODY() {
- for (int p = NLog::PRI_EMERG; p <= NLog::PRI_TRACE; ++p) {
- TABLER() {
- TABLED() {
- str << "<a href = 'logger?c=-1&p=" << p << "'>"
- << NLog::PriorityToString(NLog::EPrio(p)) << "</a>";
- }
- }
- }
- }
- }
- TAG(TH4) {
- str << "Change sampling priority for all components";
- }
- TABLE_CLASS("table table-condensed") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Priority";
- }
- }
- }
- TABLEBODY() {
- for (int p = NLog::PRI_EMERG; p <= NLog::PRI_TRACE; ++p) {
- TABLER() {
- TABLED() {
- str << "<a href = 'logger?c=-1&sp=" << p << "'>"
- << NLog::PriorityToString(NLog::EPrio(p)) << "</a>";
- }
- }
- }
- }
- }
- TAG(TH4) {
- str << "Change sampling rate for all components";
- }
- str << "<form method=\"GET\">" << Endl;
- str << "Rate: <input type=\"number\" name=\"sr\" value=\"0\"/>" << Endl;
- str << "<input type=\"hidden\" name=\"c\" value=\"-1\">" << Endl;
- str << "<input class=\"btn btn-primary\" type=\"submit\" value=\"Change\"/>" << Endl;
- str << "</form>" << Endl;
- TAG(TH4) {
- str << "Drop log entries in case of overflow: "
- << (Settings->AllowDrop ? "Enabled" : "Disabled");
- }
- str << "<form method=\"GET\">" << Endl;
- str << "<input type=\"hidden\" name=\"allowdrop\" value=\"" << (Settings->AllowDrop ? "0" : "1") << "\"/>" << Endl;
- str << "<input class=\"btn btn-primary\" type=\"submit\" value=\"" << (Settings->AllowDrop ? "Disable" : "Enable") << "\"/>" << Endl;
- str << "</form>" << Endl;
- }
- }
- Metrics->GetOutputHtml(str);
- }
- }
-
- ctx.Send(ev->Sender, new NMon::TEvHttpInfoRes(str.Str()));
- }
-
- constexpr size_t TimeBufSize = 512;
-
- bool TLoggerActor::OutputRecord(NLog::TEvLog *evLog) noexcept {
- return OutputRecord(evLog->Stamp, evLog->Level.ToPrio(), evLog->Component, evLog->Line);
- }
-
- bool TLoggerActor::OutputRecord(TInstant time, NLog::EPrio priority, NLog::EComponent component,
- const TString& formatted) noexcept try {
- const auto logPrio = ::ELogPriority(ui16(priority));
-
- char buf[TimeBufSize];
- switch (Settings->Format) {
- case NActors::NLog::TSettings::PLAIN_FULL_FORMAT: {
- TStringBuilder logRecord;
- if (Settings->UseLocalTimestamps) {
- logRecord << FormatLocalTimestamp(time, buf);
- } else {
- logRecord << time;
- }
- logRecord
- << Settings->MessagePrefix
- << " :" << Settings->ComponentName(component)
- << " " << PriorityToString(priority)
- << ": " << formatted;
- LogBackend->WriteData(
- TLogRecord(logPrio, logRecord.data(), logRecord.size()));
- } break;
-
- case NActors::NLog::TSettings::PLAIN_SHORT_FORMAT: {
- TStringBuilder logRecord;
- logRecord
- << Settings->ComponentName(component)
- << ": " << formatted;
- LogBackend->WriteData(
- TLogRecord(logPrio, logRecord.data(), logRecord.size()));
- } break;
-
- case NActors::NLog::TSettings::JSON_FORMAT: {
- NJsonWriter::TBuf json;
- json.BeginObject()
- .WriteKey("@timestamp")
- .WriteString(Settings->UseLocalTimestamps ? FormatLocalTimestamp(time, buf) : time.ToString().data())
- .WriteKey("microseconds")
- .WriteULongLong(time.MicroSeconds())
- .WriteKey("host")
- .WriteString(Settings->ShortHostName)
- .WriteKey("cluster")
- .WriteString(Settings->ClusterName)
- .WriteKey("priority")
- .WriteString(PriorityToString(priority))
- .WriteKey("npriority")
- .WriteInt((int)priority)
- .WriteKey("component")
- .WriteString(Settings->ComponentName(component))
- .WriteKey("tag")
- .WriteString("KIKIMR")
- .WriteKey("revision")
- .WriteInt(GetProgramSvnRevision())
- .WriteKey("message")
- .WriteString(formatted)
- .EndObject();
- auto logRecord = json.Str();
- LogBackend->WriteData(
- TLogRecord(logPrio, logRecord.data(), logRecord.size()));
- } break;
- }
-
- return true;
- } catch (...) {
- return false;
- }
-
- void TLoggerActor::HandleLogEventDrop(const NLog::TEvLog::TPtr& ev) {
- WriteMessageStat(*ev->Get());
- Metrics->IncDroppedMsgs();
- }
-
- void TLoggerActor::HandleWakeup() {
- Become(&TThis::StateFunc);
- }
-
- const char* TLoggerActor::FormatLocalTimestamp(TInstant time, char* buf) {
- struct tm localTime;
- time.LocalTime(&localTime);
- int r = strftime(buf, TimeBufSize, "%Y-%m-%d-%H-%M-%S", &localTime);
- Y_ABORT_UNLESS(r != 0);
- return buf;
- }
-
- TAutoPtr<TLogBackend> CreateSysLogBackend(const TString& ident,
- bool logPError, bool logCons) {
- int flags = 0;
- if (logPError)
- flags |= TSysLogBackend::LogPerror;
- if (logCons)
- flags |= TSysLogBackend::LogCons;
-
- return new TSysLogBackend(ident.data(), TSysLogBackend::TSYSLOG_LOCAL1, flags);
- }
-
- class TStderrBackend: public TLogBackend {
- public:
- TStderrBackend() {
- }
- void WriteData(const TLogRecord& rec) override {
-#ifdef _MSC_VER
- if (IsDebuggerPresent()) {
- TString x;
- x.reserve(rec.Len + 2);
- x.append(rec.Data, rec.Len);
- x.append('\n');
- OutputDebugString(x.c_str());
- }
-#endif
- bool isOk = false;
- do {
- try {
- TRecordWithNewline r(rec);
- Cerr.Write(r.Buf.Data(), r.Buf.Filled());
- isOk = true;
- } catch (TSystemError err) {
- // Interrupted system call
- Y_UNUSED(err);
- }
- } while (!isOk);
- }
-
- void ReopenLog() override {
- }
-
- private:
- const TString Indent;
- };
-
- class TLineFileLogBackend: public TFileLogBackend {
- public:
- TLineFileLogBackend(const TString& path)
- : TFileLogBackend(path)
- {
- }
-
- // Append newline after every record
- void WriteData(const TLogRecord& rec) override {
- TFileLogBackend::WriteData(TRecordWithNewline(rec));
- }
- };
-
- class TCompositeLogBackend: public TLogBackend {
- public:
- TCompositeLogBackend(TVector<TAutoPtr<TLogBackend>>&& underlyingBackends)
- : UnderlyingBackends(std::move(underlyingBackends))
- {
- }
-
- void WriteData(const TLogRecord& rec) override {
- for (auto& b: UnderlyingBackends) {
- b->WriteData(rec);
- }
- }
-
- void ReopenLog() override {
- }
-
- private:
- TVector<TAutoPtr<TLogBackend>> UnderlyingBackends;
- };
-
- TAutoPtr<TLogBackend> CreateStderrBackend() {
- return new TStderrBackend();
- }
-
- TAutoPtr<TLogBackend> CreateFileBackend(const TString& fileName) {
- return new TLineFileLogBackend(fileName);
- }
-
- TAutoPtr<TLogBackend> CreateNullBackend() {
- return new TNullLogBackend();
- }
-
- TAutoPtr<TLogBackend> CreateCompositeLogBackend(TVector<TAutoPtr<TLogBackend>>&& underlyingBackends) {
- return new TCompositeLogBackend(std::move(underlyingBackends));
- }
-
- class TLogContext: TNonCopyable {
- private:
- class TStackedContext {
- private:
- const TLogContextGuard* Guard;
- std::optional<NLog::EComponent> Component;
- mutable std::optional<TString> CurrentHeader;
- public:
- TStackedContext(const TLogContextGuard* guard, std::optional<NLog::EComponent>&& component)
- : Guard(guard)
- , Component(std::move(component))
- {
-
- }
-
- ui64 GetId() const {
- return Guard->GetId();
- }
-
- const std::optional<NLog::EComponent>& GetComponent() const {
- return Component;
- }
-
- TString GetCurrentHeader() const {
- if (!CurrentHeader) {
- CurrentHeader = Guard->GetResult();
- }
- return *CurrentHeader;
- }
- };
-
- std::vector<TStackedContext> Stack;
- public:
- void Push(const TLogContextGuard& context) {
- std::optional<NLog::EComponent> component;
- if (Stack.empty() || context.GetComponent()) {
- component = context.GetComponent();
- } else {
- component = Stack.back().GetComponent();
- }
- Stack.emplace_back(&context, std::move(component));
- }
-
- void Pop(const TLogContextGuard& context) {
- Y_ABORT_UNLESS(Stack.size() && Stack.back().GetId() == context.GetId());
- Stack.pop_back();
- }
-
- std::optional<NLog::EComponent> GetCurrentComponent() const {
- if (Stack.empty()) {
- return {};
- }
- return Stack.back().GetComponent();
- }
-
- TString GetCurrentHeader() {
- TStringBuilder sb;
- for (auto&& i : Stack) {
- sb << i.GetCurrentHeader();
- }
- return sb;
- }
- };
-
- namespace {
- Y_POD_THREAD(ui64) GuardId;
- Y_THREAD(TLogContext) TlsLogContext;
-
- }
-
- TLogContextGuard::~TLogContextGuard() {
- TlsLogContext.Get().Pop(*this);
- }
-
- TLogContextGuard::TLogContextGuard(const TLogContextBuilder& builder)
- : TBase(builder.GetResult())
- , Component(builder.GetComponent())
- , Id(++GuardId)
- {
- TlsLogContext.Get().Push(*this);
- }
-
- int TLogContextGuard::GetCurrentComponent(const ::NActors::NLog::EComponent defComponent) {
- return TlsLogContext.Get().GetCurrentComponent().value_or(defComponent);
- }
-
- TLogRecordConstructor::TLogRecordConstructor() {
- TBase::WriteDirectly(TlsLogContext.Get().GetCurrentHeader());
- }
-
- TFormattedRecordWriter::TFormattedRecordWriter(::NActors::NLog::EPriority priority, ::NActors::NLog::EComponent component)
- : ActorContext(NActors::TlsActivationContext ? &NActors::TlsActivationContext->AsActorContext() : nullptr)
- , Priority(priority)
- , Component(component) {
- TBase::WriteDirectly(TlsLogContext.Get().GetCurrentHeader());
- }
-
- TFormattedRecordWriter::~TFormattedRecordWriter() {
- if (ActorContext) {
- ::NActors::MemLogAdapter(*ActorContext, Priority, Component, TBase::GetResult());
- } else {
- Cerr << "FALLBACK_ACTOR_LOGGING;priority=" << Priority << ";component=" << Component << ";" << TBase::GetResult() << Endl;
- }
- }
-
- TVerifyFormattedRecordWriter::TVerifyFormattedRecordWriter(const TString& conditionText)
- : ConditionText(conditionText) {
- TBase::WriteDirectly(TlsLogContext.Get().GetCurrentHeader());
- TBase::Write("verification", ConditionText);
-
- }
-
- TVerifyFormattedRecordWriter::~TVerifyFormattedRecordWriter() {
- const TString data = TBase::GetResult();
- Y_ABORT("%s", data.data());
- }
-
- TEnsureFormattedRecordWriter::TEnsureFormattedRecordWriter(const TString& conditionText)
- : ConditionText(conditionText) {
- TBase::WriteDirectly(TlsLogContext.Get().GetCurrentHeader());
- TBase::Write("verification", ConditionText);
-
- }
-
- TEnsureFormattedRecordWriter::~TEnsureFormattedRecordWriter() noexcept(false) {
- const TString data = TBase::GetResult();
- if (NActors::TlsActivationContext) {
- ::NActors::MemLogAdapter(NActors::TlsActivationContext->AsActorContext(), NLog::EPriority::PRI_ERROR, 0, data);
- } else {
- Cerr << "FALLBACK_EXCEPTION_LOGGING;component=EXCEPTION;" << data << Endl;
- }
- if (!std::uncaught_exceptions()) {
- Y_ENSURE(false, data.data());
- } else {
- Y_ABORT("%s", data.data());
- }
- }
-
-}
diff --git a/library/cpp/actors/core/log.h b/library/cpp/actors/core/log.h
deleted file mode 100644
index 9f1d367932..0000000000
--- a/library/cpp/actors/core/log.h
+++ /dev/null
@@ -1,627 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include "log_iface.h"
-#include "log_settings.h"
-#include "log_metrics.h"
-#include "log_buffer.h"
-#include "actorsystem.h"
-#include "events.h"
-#include "event_local.h"
-#include "hfunc.h"
-#include "mon.h"
-
-#include <util/generic/vector.h>
-#include <util/string/printf.h>
-#include <util/string/builder.h>
-#include <util/system/yassert.h>
-#include <library/cpp/logger/all.h>
-#include <library/cpp/json/writer/json.h>
-#include <library/cpp/svnversion/svnversion.h>
-
-#include <library/cpp/actors/memory_log/memlog.h>
-
-// TODO: limit number of messages per second
-// TODO: make TLogComponentLevelRequest/Response network messages
-
-#define IS_LOG_PRIORITY_ENABLED(priority, component) \
- [p = static_cast<::NActors::NLog::EPriority>(priority), c = static_cast<::NActors::NLog::EComponent>(component)]() -> bool { \
- ::NActors::TActivationContext *context = ::NActors::TlsActivationContext; \
- return !context || context->LoggerSettings()->Satisfies(p, c, 0ull); \
- }()
-
-#define IS_EMERG_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_EMERG, component)
-#define IS_ALERT_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_ALERT, component)
-#define IS_CRIT_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_CRIT, component)
-#define IS_ERROR_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_ERROR, component)
-#define IS_WARN_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_WARN, component)
-#define IS_NOTICE_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_NOTICE, component)
-#define IS_INFO_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_INFO, component)
-#define IS_DEBUG_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_DEBUG, component)
-#define IS_TRACE_LOG_ENABLED(component) IS_LOG_PRIORITY_ENABLED(NActors::NLog::PRI_TRACE, component)
-
-#define LOG_LOG_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, ...) \
- do { \
- ::NActors::NLog::TSettings* mSettings = static_cast<::NActors::NLog::TSettings*>((actorCtxOrSystem).LoggerSettings()); \
- ::NActors::NLog::EPriority mPriority = static_cast<::NActors::NLog::EPriority>(priority); \
- ::NActors::NLog::EComponent mComponent = static_cast<::NActors::NLog::EComponent>(component); \
- if (mSettings && mSettings->Satisfies(mPriority, mComponent, sampleBy)) { \
- ::NActors::MemLogAdapter( \
- actorCtxOrSystem, priority, component, __VA_ARGS__); \
- } \
- } while (0) /**/
-
-#define LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, stream) \
- LOG_LOG_SAMPLED_BY(actorCtxOrSystem, priority, component, sampleBy, [&]() -> TString { \
- TStringBuilder logStringBuilder; \
- logStringBuilder << stream; \
- return std::move(logStringBuilder); \
- }())
-
-#define LOG_LOG(actorCtxOrSystem, priority, component, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, priority, component, 0ull, __VA_ARGS__)
-#define LOG_LOG_S(actorCtxOrSystem, priority, component, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, priority, component, 0ull, stream)
-
-// use these macros for logging via actor system or actor context
-#define LOG_EMERG(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_EMERG, component, __VA_ARGS__)
-#define LOG_ALERT(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_ALERT, component, __VA_ARGS__)
-#define LOG_CRIT(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_CRIT, component, __VA_ARGS__)
-#define LOG_ERROR(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_ERROR, component, __VA_ARGS__)
-#define LOG_WARN(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_WARN, component, __VA_ARGS__)
-#define LOG_NOTICE(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, component, __VA_ARGS__)
-#define LOG_INFO(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_INFO, component, __VA_ARGS__)
-#define LOG_DEBUG(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, component, __VA_ARGS__)
-#define LOG_TRACE(actorCtxOrSystem, component, ...) LOG_LOG(actorCtxOrSystem, NActors::NLog::PRI_TRACE, component, __VA_ARGS__)
-
-#define LOG_EMERG_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_EMERG, component, stream)
-#define LOG_ALERT_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_ALERT, component, stream)
-#define LOG_CRIT_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_CRIT, component, stream)
-#define LOG_ERROR_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_ERROR, component, stream)
-#define LOG_WARN_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_WARN, component, stream)
-#define LOG_NOTICE_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, component, stream)
-#define LOG_INFO_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_INFO, component, stream)
-#define LOG_DEBUG_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, component, stream)
-#define LOG_TRACE_S(actorCtxOrSystem, component, stream) LOG_LOG_S(actorCtxOrSystem, NActors::NLog::PRI_TRACE, component, stream)
-
-#define ALOG_EMERG(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_EMERG, component, stream)
-#define ALOG_ALERT(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_ALERT, component, stream)
-#define ALOG_CRIT(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_CRIT, component, stream)
-#define ALOG_ERROR(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_ERROR, component, stream)
-#define ALOG_WARN(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_WARN, component, stream)
-#define ALOG_NOTICE(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_NOTICE, component, stream)
-#define ALOG_INFO(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_INFO, component, stream)
-#define ALOG_DEBUG(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_DEBUG, component, stream)
-#define ALOG_TRACE(component, stream) LOG_LOG_S(*NActors::TlsActivationContext, NActors::NLog::PRI_TRACE, component, stream)
-
-#define LOG_EMERG_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_EMERG, component, sampleBy, __VA_ARGS__)
-#define LOG_ALERT_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_ALERT, component, sampleBy, __VA_ARGS__)
-#define LOG_CRIT_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_CRIT, component, sampleBy, __VA_ARGS__)
-#define LOG_ERROR_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_ERROR, component, sampleBy, __VA_ARGS__)
-#define LOG_WARN_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_WARN, component, sampleBy, __VA_ARGS__)
-#define LOG_NOTICE_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, component, sampleBy, __VA_ARGS__)
-#define LOG_INFO_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_INFO, component, sampleBy, __VA_ARGS__)
-#define LOG_DEBUG_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, component, sampleBy, __VA_ARGS__)
-#define LOG_TRACE_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, ...) LOG_LOG_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_TRACE, component, sampleBy, __VA_ARGS__)
-
-#define LOG_EMERG_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_EMERG, component, sampleBy, stream)
-#define LOG_ALERT_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_ALERT, component, sampleBy, stream)
-#define LOG_CRIT_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_CRIT, component, sampleBy, stream)
-#define LOG_ERROR_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_ERROR, component, sampleBy, stream)
-#define LOG_WARN_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_WARN, component, sampleBy, stream)
-#define LOG_NOTICE_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_NOTICE, component, sampleBy, stream)
-#define LOG_INFO_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_INFO, component, sampleBy, stream)
-#define LOG_DEBUG_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_DEBUG, component, sampleBy, stream)
-#define LOG_TRACE_S_SAMPLED_BY(actorCtxOrSystem, component, sampleBy, stream) LOG_LOG_S_SAMPLED_BY(actorCtxOrSystem, NActors::NLog::PRI_TRACE, component, sampleBy, stream)
-
-// Log Throttling
-#define LOG_LOG_THROTTLE(throttler, actorCtxOrSystem, priority, component, ...) \
- do { \
- if ((throttler).Kick()) { \
- LOG_LOG(actorCtxOrSystem, priority, component, __VA_ARGS__); \
- } \
- } while (0) /**/
-
-#define LOG_LOG_S_THROTTLE(throttler, actorCtxOrSystem, priority, component, stream) \
- do { \
- if ((throttler).Kick()) { \
- LOG_LOG_S(actorCtxOrSystem, priority, component, stream); \
- } \
- } while (0) /**/
-
-#define TRACE_EVENT(component) \
- const auto& currentTracer = component; \
- if (ev->HasEvent()) { \
- LOG_TRACE(*TlsActivationContext, currentTracer, "%s, received event# %" PRIu32 ", Sender %s, Recipient %s: %s", \
- __FUNCTION__, ev->Type, ev->Sender.ToString().data(), SelfId().ToString().data(), ev->ToString().substr(0, 1000).data()); \
- } else { \
- LOG_TRACE(*TlsActivationContext, currentTracer, "%s, received event# %" PRIu32 ", Sender %s, Recipient %s", \
- __FUNCTION__, ev->Type, ev->Sender.ToString().data(), ev->Recipient.ToString().data()); \
- }
-#define TRACE_EVENT_TYPE(eventType) LOG_TRACE(*TlsActivationContext, currentTracer, "%s, processing event %s", __FUNCTION__, eventType)
-
-class TLog;
-class TLogBackend;
-
-namespace NActors {
- class TLoggerActor;
-
- ////////////////////////////////////////////////////////////////////////////////
- // SET LOG LEVEL FOR A COMPONENT
- ////////////////////////////////////////////////////////////////////////////////
- class TLogComponentLevelRequest: public TEventLocal<TLogComponentLevelRequest, int(NLog::EEv::LevelReq)> {
- public:
- // set given priority for the component
- TLogComponentLevelRequest(NLog::EPriority priority, NLog::EComponent component)
- : Priority(priority)
- , Component(component)
- {
- }
-
- // set given priority for all components
- TLogComponentLevelRequest(NLog::EPriority priority)
- : Priority(priority)
- , Component(NLog::InvalidComponent)
- {
- }
-
- protected:
- NLog::EPriority Priority;
- NLog::EComponent Component;
-
- friend class TLoggerActor;
- };
-
- class TLogComponentLevelResponse: public TEventLocal<TLogComponentLevelResponse, int(NLog::EEv::LevelResp)> {
- public:
- TLogComponentLevelResponse(int code, const TString& explanation)
- : Code(code)
- , Explanation(explanation)
- {
- }
-
- int GetCode() const {
- return Code;
- }
-
- const TString& GetExplanation() const {
- return Explanation;
- }
-
- protected:
- int Code;
- TString Explanation;
- };
-
- class TFlushLogBuffer: public TEventLocal<TFlushLogBuffer, int(NLog::EEv::Buffer)> {
- public:
- TFlushLogBuffer() {
- }
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // LOGGER ACTOR
- ////////////////////////////////////////////////////////////////////////////////
- class TLoggerActor: public TActor<TLoggerActor> {
- public:
- static IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::LOG_ACTOR;
- }
-
- TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- TAutoPtr<TLogBackend> logBackend,
- TIntrusivePtr<NMonitoring::TDynamicCounters> counters);
- TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- std::shared_ptr<TLogBackend> logBackend,
- TIntrusivePtr<NMonitoring::TDynamicCounters> counters);
- TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- TAutoPtr<TLogBackend> logBackend,
- std::shared_ptr<NMonitoring::TMetricRegistry> metrics);
- TLoggerActor(TIntrusivePtr<NLog::TSettings> settings,
- std::shared_ptr<TLogBackend> logBackend,
- std::shared_ptr<NMonitoring::TMetricRegistry> metrics);
- ~TLoggerActor();
-
- void StateFunc(TAutoPtr<IEventHandle>& ev) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TFlushLogBuffer, FlushLogBufferMessageEvent);
- HFunc(NLog::TEvLog, HandleLogEvent);
- HFunc(TLogComponentLevelRequest, HandleLogComponentLevelRequest);
- HFunc(NMon::TEvHttpInfo, HandleMonInfo);
- }
- }
-
- STFUNC(StateDefunct) {
- switch (ev->GetTypeRewrite()) {
- hFunc(NLog::TEvLog, HandleLogEventDrop);
- HFunc(TLogComponentLevelRequest, HandleLogComponentLevelRequest);
- HFunc(NMon::TEvHttpInfo, HandleMonInfo);
- cFunc(TEvents::TEvWakeup::EventType, HandleWakeup);
- }
- }
-
- // Directly call logger instead of sending a message
- void Log(TInstant time, NLog::EPriority priority, NLog::EComponent component, const char* c, ...);
-
- static void Throttle(const NLog::TSettings& settings);
-
- private:
- TIntrusivePtr<NLog::TSettings> Settings;
- std::shared_ptr<TLogBackend> LogBackend;
- ui64 PassedCount = 0;
- TDuration WakeupInterval{TDuration::Seconds(5)};
- std::unique_ptr<ILoggerMetrics> Metrics;
- TLogBuffer LogBuffer;
-
- void BecomeDefunct();
- void FlushLogBufferMessageEvent(TFlushLogBuffer::TPtr& ev, const NActors::TActorContext& ctx);
- void HandleLogEvent(NLog::TEvLog::TPtr& ev, const TActorContext& ctx);
- void HandleLogEventDrop(const NLog::TEvLog::TPtr& ev);
- void HandleLogComponentLevelRequest(TLogComponentLevelRequest::TPtr& ev, const TActorContext& ctx);
- void HandleMonInfo(NMon::TEvHttpInfo::TPtr& ev, const TActorContext& ctx);
- void HandleWakeup();
- [[nodiscard]] bool OutputRecord(NLog::TEvLog *evLog) noexcept;
- [[nodiscard]] bool OutputRecord(TInstant time, NLog::EPrio priority, NLog::EComponent component, const TString& formatted) noexcept;
- void RenderComponentPriorities(IOutputStream& str);
- void FlushLogBufferMessage();
- void WriteMessageStat(const NLog::TEvLog& ev);
- static const char* FormatLocalTimestamp(TInstant time, char* buf);
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // LOG THROTTLING
- // TTrivialLogThrottler -- log a message every 'period' duration
- // Use case:
- // TTrivialLogThrottler throttler(TDuration::Minutes(1));
- // ....
- // LOG_LOG_THROTTLE(throttler, ctx, NActors::NLog::PRI_ERROR, SOME, "Error");
- ////////////////////////////////////////////////////////////////////////////////
- class TTrivialLogThrottler {
- public:
- TTrivialLogThrottler(TDuration period)
- : Period(period)
- {
- }
-
- // return value:
- // true -- write to log
- // false -- don't write to log, throttle
- bool Kick() {
- auto now = TInstant::Now();
- if (now >= (LastWrite + Period)) {
- LastWrite = now;
- return true;
- } else {
- return false;
- }
- }
-
- private:
- TInstant LastWrite;
- TDuration Period;
- };
-
- ////////////////////////////////////////////////////////////////////////////////
- // SYSLOG BACKEND
- ////////////////////////////////////////////////////////////////////////////////
- TAutoPtr<TLogBackend> CreateSysLogBackend(const TString& ident,
- bool logPError, bool logCons);
- TAutoPtr<TLogBackend> CreateStderrBackend();
- TAutoPtr<TLogBackend> CreateFileBackend(const TString& fileName);
- TAutoPtr<TLogBackend> CreateNullBackend();
- TAutoPtr<TLogBackend> CreateCompositeLogBackend(TVector<TAutoPtr<TLogBackend>>&& underlyingBackends);
-
- /////////////////////////////////////////////////////////////////////
- // Logging adaptors for memory log and logging into filesystem
- /////////////////////////////////////////////////////////////////////
-
- namespace NDetail {
- inline void Y_PRINTF_FORMAT(2, 3) PrintfV(TString& dst, const char* format, ...) {
- va_list params;
- va_start(params, format);
- vsprintf(dst, format, params);
- va_end(params);
- }
-
- inline void PrintfV(TString& dst, const char* format, va_list params) {
- vsprintf(dst, format, params);
- }
- } // namespace NDetail
-
- template <typename TCtx>
- inline void DeliverLogMessage(TCtx& ctx, NLog::EPriority mPriority, NLog::EComponent mComponent, TString &&str)
- {
- const NLog::TSettings *mSettings = ctx.LoggerSettings();
- TLoggerActor::Throttle(*mSettings);
- ctx.Send(new IEventHandle(mSettings->LoggerActorId, TActorId(), new NLog::TEvLog(mPriority, mComponent, std::move(str))));
- }
-
- template <typename TCtx, typename... TArgs>
- inline void MemLogAdapter(
- TCtx& actorCtxOrSystem,
- NLog::EPriority mPriority,
- NLog::EComponent mComponent,
- const char* format, TArgs&&... params) {
- TString Formatted;
-
-
- if constexpr (sizeof... (params) > 0) {
- NDetail::PrintfV(Formatted, format, std::forward<TArgs>(params)...);
- } else {
- NDetail::PrintfV(Formatted, "%s", format);
- }
-
- MemLogWrite(Formatted.data(), Formatted.size(), true);
- DeliverLogMessage(actorCtxOrSystem, mPriority, mComponent, std::move(Formatted));
- }
-
- template <typename TCtx>
- Y_WRAPPER inline void MemLogAdapter(
- TCtx& actorCtxOrSystem,
- NLog::EPriority mPriority,
- NLog::EComponent mComponent,
- const TString& str) {
-
- MemLogWrite(str.data(), str.size(), true);
- DeliverLogMessage(actorCtxOrSystem, mPriority, mComponent, TString(str));
- }
-
- template <typename TCtx>
- Y_WRAPPER inline void MemLogAdapter(
- TCtx& actorCtxOrSystem,
- NLog::EPriority mPriority,
- NLog::EComponent mComponent,
- TString&& str) {
-
- MemLogWrite(str.data(), str.size(), true);
- DeliverLogMessage(actorCtxOrSystem, mPriority, mComponent, std::move(str));
- }
-
- class TRecordWriter: public TStringBuilder {
- private:
- const TActorContext* ActorContext = nullptr;
- ::NActors::NLog::EPriority Priority = ::NActors::NLog::EPriority::PRI_INFO;
- ::NActors::NLog::EComponent Component = 0;
- public:
- TRecordWriter(::NActors::NLog::EPriority priority, ::NActors::NLog::EComponent component)
- : ActorContext(NActors::TlsActivationContext ? &NActors::TlsActivationContext->AsActorContext() : nullptr)
- , Priority(priority)
- , Component(component) {
-
- }
-
- ~TRecordWriter() {
- if (ActorContext) {
- ::NActors::MemLogAdapter(*ActorContext, Priority, Component, *this);
- } else {
- Cerr << "FALLBACK_ACTOR_LOGGING;priority=" << Priority << ";component=" << Component << ";" << static_cast<const TStringBuilder&>(*this) << Endl;
- }
- }
- };
-
- class TFormatedStreamWriter: TNonCopyable {
- private:
- TStringBuilder Builder;
- protected:
- template <class TKey, class TValue>
- TFormatedStreamWriter& Write(const TKey& pName, const TValue& pValue) {
- Builder << pName << "=" << pValue << ";";
- return *this;
- }
-
- template <class TKey, class TValue>
- TFormatedStreamWriter& Write(const TKey& pName, const std::optional<TValue>& pValue) {
- if (pValue) {
- Builder << pName << "=" << *pValue << ";";
- } else {
- Builder << pName << "=NO_VALUE_OPTIONAL;";
- }
- return *this;
- }
-
- TFormatedStreamWriter& WriteDirectly(const TString& data) {
- Builder << data;
- return *this;
- }
- public:
- TFormatedStreamWriter() = default;
- TFormatedStreamWriter(const TString& info) {
- Builder << info;
- }
- const TString& GetResult() const {
- return Builder;
- }
- };
-
- class TLogContextBuilder: public TFormatedStreamWriter {
- private:
- using TBase = TFormatedStreamWriter;
- std::optional<::NActors::NLog::EComponent> Component;
- TLogContextBuilder(std::optional<::NActors::NLog::EComponent> component)
- : Component(component) {
- }
- public:
-
- template <class TKey, class TValue>
- TLogContextBuilder& operator()(const TKey& pName, const TValue& pValue) {
- TBase::Write(pName, pValue);
- return *this;
- }
-
- const std::optional<::NActors::NLog::EComponent>& GetComponent() const {
- return Component;
- }
-
- static TLogContextBuilder Build(std::optional<::NActors::NLog::EComponent> component = {}) {
- return TLogContextBuilder(component);
- }
- };
-
- class TLogContextGuard: public TFormatedStreamWriter {
- private:
- using TBase = TFormatedStreamWriter;
- std::optional<::NActors::NLog::EComponent> Component;
- const ui64 Id = 0;
- public:
- TLogContextGuard(const TLogContextBuilder& builder);
-
- ~TLogContextGuard();
-
- static int GetCurrentComponent(const ::NActors::NLog::EComponent defComponent = 0);
-
- const std::optional<::NActors::NLog::EComponent>& GetComponent() const {
- return Component;
- }
-
- ui64 GetId() const {
- return Id;
- }
-
- template <class TKey, class TValue>
- TLogContextGuard& Write(const TKey& pName, const TValue& pValue) {
- TBase::Write(pName, pValue);
- return *this;
- }
-
- };
-
- class TLogRecordConstructor: public TFormatedStreamWriter {
- private:
- using TBase = TFormatedStreamWriter;
- public:
-
- TLogRecordConstructor();
-
- template <class TKey, class TValue>
- TLogRecordConstructor& operator()(const TKey& pName, const TValue& pValue) {
- TBase::Write(pName, pValue);
- return *this;
- }
- };
-
- class TFormattedRecordWriter: public TFormatedStreamWriter {
- private:
- using TBase = TFormatedStreamWriter;
- const TActorContext* ActorContext = nullptr;
- ::NActors::NLog::EPriority Priority = ::NActors::NLog::EPriority::PRI_INFO;
- ::NActors::NLog::EComponent Component = 0;
- public:
-
- TFormattedRecordWriter(::NActors::NLog::EPriority priority, ::NActors::NLog::EComponent component);
-
- template <class TKey, class TValue>
- TFormattedRecordWriter& operator()(const TKey& pName, const TValue& pValue) {
- TBase::Write(pName, pValue);
- return *this;
- }
-
- ~TFormattedRecordWriter();
- };
-
- class TVerifyFormattedRecordWriter: public TFormatedStreamWriter {
- private:
- using TBase = TFormatedStreamWriter;
- const TString ConditionText;
- public:
-
- TVerifyFormattedRecordWriter(const TString& conditionText);
-
- template <class TKey, class TValue>
- TVerifyFormattedRecordWriter& operator()(const TKey& pName, const TValue& pValue) {
- TBase::Write(pName, pValue);
- return *this;
- }
-
- ~TVerifyFormattedRecordWriter();
- };
-
- class TEnsureFormattedRecordWriter: public TFormatedStreamWriter {
- private:
- using TBase = TFormatedStreamWriter;
- const TString ConditionText;
- public:
-
- TEnsureFormattedRecordWriter(const TString& conditionText);
-
- template <class TKey, class TValue>
- TEnsureFormattedRecordWriter& operator()(const TKey& pName, const TValue& pValue) {
- TBase::Write(pName, pValue);
- return *this;
- }
-
- ~TEnsureFormattedRecordWriter() noexcept(false);
- };
-}
-
-#define AFL_VERIFY(condition) if (condition); else NActors::TVerifyFormattedRecordWriter(#condition)("fline", TStringBuilder() << TStringBuf(__FILE__).RAfter(LOCSLASH_C) << ":" << __LINE__)
-#define AFL_ENSURE(condition) if (condition); else NActors::TEnsureFormattedRecordWriter(#condition)("fline", TStringBuilder() << TStringBuf(__FILE__).RAfter(LOCSLASH_C) << ":" << __LINE__)
-
-#ifndef NDEBUG
-/// Assert that depend on NDEBUG macro and outputs message like printf
-#define AFL_VERIFY_DEBUG AFL_VERIFY
-#else
-#define AFL_VERIFY_DEBUG(condition) if (true); else NActors::TVerifyFormattedRecordWriter(#condition)("fline", TStringBuilder() << TStringBuf(__FILE__).RAfter(LOCSLASH_C) << ":" << __LINE__)
-#endif
-
-#define ACTORS_FORMATTED_LOG(mPriority, mComponent) \
- if (NActors::TlsActivationContext && !IS_LOG_PRIORITY_ENABLED(mPriority, mComponent));\
- else NActors::TFormattedRecordWriter(\
- static_cast<::NActors::NLog::EPriority>(mPriority), static_cast<::NActors::NLog::EComponent>(mComponent)\
- )("fline", TStringBuilder() << TStringBuf(__FILE__).RAfter(LOCSLASH_C) << ":" << __LINE__)
-
-#define ACTORS_LOG_STREAM(mPriority, mComponent) \
- if (NActors::TlsActivationContext && !IS_LOG_PRIORITY_ENABLED(mPriority, mComponent));\
- else NActors::TRecordWriter(\
- static_cast<::NActors::NLog::EPriority>(mPriority), static_cast<::NActors::NLog::EComponent>(mComponent)\
- ) << TStringBuf(__FILE__).RAfter(LOCSLASH_C) << ":" << __LINE__ << " :"
-
-#define ALS_TRACE(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_TRACE, component)
-#define ALS_DEBUG(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_DEBUG, component)
-#define ALS_INFO(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_INFO, component)
-#define ALS_NOTICE(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_NOTICE, component)
-#define ALS_WARN(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_WARN, component)
-#define ALS_ERROR(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_ERROR, component)
-#define ALS_CRIT(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_CRIT, component)
-#define ALS_ALERT(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_ALERT, component)
-#define ALS_EMERG(component) ACTORS_LOG_STREAM(NActors::NLog::PRI_EMERG, component)
-
-#define AFL_TRACE(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_TRACE, component)
-#define AFL_DEBUG(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_DEBUG, component)
-#define AFL_INFO(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_INFO, component)
-#define AFL_NOTICE(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_NOTICE, component)
-#define AFL_WARN(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_WARN, component)
-#define AFL_ERROR(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_ERROR, component)
-#define AFL_CRIT(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_CRIT, component)
-#define AFL_ALERT(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_ALERT, component)
-#define AFL_EMERG(component) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_EMERG, component)
-
-#define DETECT_LOG_MACRO(_1, _2, NAME, ...) NAME
-
-#define BASE_CFL_TRACE2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_TRACE, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_DEBUG2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_DEBUG, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_INFO2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_INFO, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_NOTICE2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_NOTICE, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_WARN2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_WARN, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_ERROR2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_ERROR, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_CRIT2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_CRIT, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_ALERT2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_ALERT, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-#define BASE_CFL_EMERG2(k, v) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_EMERG, ::NActors::TLogContextGuard::GetCurrentComponent())(k, v)
-
-#define BASE_CFL_TRACE1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_TRACE, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_DEBUG1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_DEBUG, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_INFO1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_INFO, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_NOTICE1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_NOTICE, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_WARN1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_WARN, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_ERROR1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_ERROR, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_CRIT1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_CRIT, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_ALERT1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_ALERT, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-#define BASE_CFL_EMERG1(defaultComponent) ACTORS_FORMATTED_LOG(NActors::NLog::PRI_EMERG, ::NActors::TLogContextGuard::GetCurrentComponent(defaultComponent))
-
-#define ACFL_TRACE(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_TRACE2, BASE_CFL_TRACE1)(__VA_ARGS__)
-#define ACFL_DEBUG(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_DEBUG2, BASE_CFL_DEBUG1)(__VA_ARGS__)
-#define ACFL_INFO(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_INFO2, BASE_CFL_INFO1)(__VA_ARGS__)
-#define ACFL_NOTICE(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_NOTICE2, BASE_CFL_NOTICE1)(__VA_ARGS__)
-#define ACFL_WARN(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_WARN2, BASE_CFL_WARN1)(__VA_ARGS__)
-#define ACFL_ERROR(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_ERROR2, BASE_CFL_ERROR1)(__VA_ARGS__)
-#define ACFL_CRIT(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_CRIT2, BASE_CFL_CRIT1)(__VA_ARGS__)
-#define ACFL_ALERT(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_ALERT2, BASE_CFL_ALERT1)(__VA_ARGS__)
-#define ACFL_EMERG(...) DETECT_LOG_MACRO(__VA_ARGS__, BASE_CFL_EMERG2, BASE_CFL_EMERG1)(__VA_ARGS__)
diff --git a/library/cpp/actors/core/log_buffer.cpp b/library/cpp/actors/core/log_buffer.cpp
deleted file mode 100644
index 8c80f1d054..0000000000
--- a/library/cpp/actors/core/log_buffer.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-#include "log_buffer.h"
-
-#include <util/system/yassert.h>
-#include <algorithm>
-
-using namespace NActors::NLog;
-
-namespace NActors {
-TLogBuffer::TLogBuffer(ILoggerMetrics &metrics, const NLog::TSettings &settings)
- : Metrics(metrics)
- , Settings(settings)
-{}
-
-size_t TLogBuffer::GetLogCostInBytes(NLog::TEvLog *log) const {
- return LOG_STRUCTURE_BYTES + log->Line.length();
-}
-
-ui16 TLogBuffer::GetPrioIndex(NLog::EPrio prio) {
- return Min(ui16(prio), ui16(LOG_PRIORITIES_NUMBER - 1));
-}
-
-TIntrusiveList<NLog::TEvLog, NLog::TEvLogBufferLevelListTag> &TLogBuffer::GetPrioLogs(NLog::EPrio prio) {
- return PrioLogsList[GetPrioIndex(prio)];
-}
-
-void TLogBuffer::AddLog(NLog::TEvLog *log) {
- NLog::EPrio prio = log->Level.ToPrio();
- if (!CheckSize(log) && prio > NLog::EPrio::Emerg) { // always keep logs with prio Emerg = 0
- HandleIgnoredLog(log);
- return;
- }
-
- SizeBytes += GetLogCostInBytes(log);
- Logs.PushBack(log);
- GetPrioLogs(prio).PushBack(log);
-}
-
-NLog::TEvLog* TLogBuffer::Pop() {
- NLog::TEvLog* log = Logs.PopFront();
- static_cast<TIntrusiveListItem<TEvLog, TEvLogBufferLevelListTag>&>(*log).Unlink();
-
- SizeBytes -= GetLogCostInBytes(log);
-
- return log;
-}
-
-bool TLogBuffer::IsEmpty() const {
- return Logs.Empty();
-}
-
-bool TLogBuffer::CheckLogIgnoring() const {
- return IgnoredCount > 0;
-}
-
-bool TLogBuffer::CheckSize(NLog::TEvLog *log) {
- size_t startSizeBytes = SizeBytes;
-
- size_t logSize = GetLogCostInBytes(log);
- if (SizeBytes + logSize <= Settings.BufferSizeLimitBytes) {
- return true;
- }
-
- ui16 scanHighestPrio = Max((ui16)1, GetPrioIndex(log->Level.ToPrio())); // always keep logs with prio Emerg = 0
- for (ui16 scanPrio = LOG_PRIORITIES_NUMBER - 1; scanPrio >= scanHighestPrio; scanPrio--) {
- TIntrusiveList<NLog::TEvLog, NLog::TEvLogBufferLevelListTag> &scanLogs = PrioLogsList[scanPrio];
- while (!scanLogs.Empty()) {
- NLog::TEvLog* log = scanLogs.PopFront();
- SizeBytes -= GetLogCostInBytes(log);
- HandleIgnoredLog(log);
-
- if (SizeBytes + logSize <= Settings.BufferSizeLimitBytes) {
- return true;
- }
- }
- }
-
- if (startSizeBytes > SizeBytes) {
- return true;
- }
-
- return false;
-}
-
-void TLogBuffer::HandleIgnoredLog(NLog::TEvLog *log) {
- ui16 logPrio = GetPrioIndex(log->Level.ToPrio());
- Metrics.IncIgnoredMsgs();
- if (IgnoredHighestPrio > logPrio) {
- IgnoredHighestPrio = logPrio;
- }
- IgnoredCount++;
- delete log;
-}
-
-ui64 TLogBuffer::GetIgnoredCount() {
- return IgnoredCount;
-}
-
-NLog::EPrio TLogBuffer::GetIgnoredHighestPrio() {
- NLog::EPrio prio = static_cast<NLog::EPrio>(IgnoredHighestPrio);
- return prio;
-}
-
-void TLogBuffer::ClearIgnoredCount() {
- IgnoredHighestPrio = LOG_PRIORITIES_NUMBER - 1;
- IgnoredCount = 0;
-}
-
-}
diff --git a/library/cpp/actors/core/log_buffer.h b/library/cpp/actors/core/log_buffer.h
deleted file mode 100644
index 60bc09cc85..0000000000
--- a/library/cpp/actors/core/log_buffer.h
+++ /dev/null
@@ -1,40 +0,0 @@
-#pragma once
-
-#include "log_metrics.h"
-#include "log_iface.h"
-#include "log_settings.h"
-
-#include <util/generic/intrlist.h>
-
-namespace NActors {
-class TLogBuffer {
- static const size_t LOG_STRUCTURE_BYTES = sizeof(NLog::TEvLog);
- static const ui16 LOG_PRIORITIES_NUMBER = 9;
-
- ILoggerMetrics &Metrics;
- const NLog::TSettings &Settings;
-
- TIntrusiveListWithAutoDelete<NLog::TEvLog, TDelete, NLog::TEvLogBufferMainListTag> Logs;
- TIntrusiveList<NLog::TEvLog, NLog::TEvLogBufferLevelListTag> PrioLogsList[LOG_PRIORITIES_NUMBER];
-
- ui64 SizeBytes = 0;
- ui64 IgnoredCount = 0;
- ui16 IgnoredHighestPrio = LOG_PRIORITIES_NUMBER - 1;
-
- size_t GetLogCostInBytes(NLog::TEvLog *log) const;
- void HandleIgnoredLog(NLog::TEvLog *log);
- bool CheckSize(NLog::TEvLog *log);
- static inline ui16 GetPrioIndex(NLog::EPrio);
- inline TIntrusiveList<NLog::TEvLog, NLog::TEvLogBufferLevelListTag> &GetPrioLogs(NLog::EPrio);
-
- public:
- TLogBuffer(ILoggerMetrics &metrics, const NLog::TSettings &Settings);
- void AddLog(NLog::TEvLog *log);
- NLog::TEvLog *Pop();
- bool IsEmpty() const;
- bool CheckLogIgnoring() const;
- ui64 GetIgnoredCount();
- NLog::EPrio GetIgnoredHighestPrio();
- void ClearIgnoredCount();
-};
-}
diff --git a/library/cpp/actors/core/log_iface.h b/library/cpp/actors/core/log_iface.h
deleted file mode 100644
index b0195f5581..0000000000
--- a/library/cpp/actors/core/log_iface.h
+++ /dev/null
@@ -1,117 +0,0 @@
-#pragma once
-
-#include "events.h"
-#include "event_local.h"
-
-namespace NActors {
- namespace NLog {
- using EComponent = int;
-
- enum EPriority : ui16 { // migrate it to EPrio whenever possible
- PRI_EMERG /* "EMERG" */,
- PRI_ALERT /* "ALERT" */,
- PRI_CRIT /* "CRIT" */,
- PRI_ERROR /* "ERROR" */,
- PRI_WARN /* "WARN" */,
- PRI_NOTICE /* "NOTICE" */,
- PRI_INFO /* "INFO" */,
- PRI_DEBUG /* "DEBUG" */,
- PRI_TRACE /* "TRACE" */
- };
-
- enum class EPrio : ui16 {
- Emerg = 0,
- Alert = 1,
- Crit = 2,
- Error = 3,
- Warn = 4,
- Notice = 5,
- Info = 6,
- Debug = 7,
- Trace = 8,
- };
-
- struct TLevel {
- TLevel(ui32 raw)
- : Raw(raw)
- {
- }
-
- TLevel(EPrio prio)
- : Raw((ui16(prio) + 1) << 8)
- {
- }
-
- EPrio ToPrio() const noexcept {
- const auto major = Raw >> 8;
-
- return major > 0 ? EPrio(major - 1) : EPrio::Emerg;
- }
-
- bool IsUrgentAbortion() const noexcept {
- return (Raw >> 8) == 0;
- }
-
- /* Generalized monotonic level value composed with major and minor
- levels. Minor is used for verbosity within major, basic EPrio
- mapped to (EPrio + 1, 0) and Major = 0 is reserved as special
- space with meaning like EPrio::Emerg but with extened actions.
- Thus logger should map Major = 0 to EPrio::Emerg if it have no
- idea how to handle special emergency actions.
- */
-
- ui32 Raw = 0; // ((ui16(EPrio) + 1) << 8) | ui8(minor)
- };
-
- enum class EEv {
- Log = EventSpaceBegin(TEvents::ES_LOGGER),
- LevelReq,
- LevelResp,
- Ignored,
- Buffer,
- End
- };
-
- static_assert(int(EEv::End) < EventSpaceEnd(TEvents::ES_LOGGER), "");
-
- struct TEvLogBufferMainListTag {};
- struct TEvLogBufferLevelListTag {};
-
- class TEvLog
- : public TEventLocal<TEvLog, int(EEv::Log)>
- , public TIntrusiveListItem<TEvLog, TEvLogBufferMainListTag>
- , public TIntrusiveListItem<TEvLog, TEvLogBufferLevelListTag>
- {
- public:
- TEvLog(TInstant stamp, TLevel level, EComponent comp, const TString &line)
- : Stamp(stamp)
- , Level(level)
- , Component(comp)
- , Line(line)
- {
- }
-
- TEvLog(TInstant stamp, TLevel level, EComponent comp, TString &&line)
- : Stamp(stamp)
- , Level(level)
- , Component(comp)
- , Line(std::move(line))
- {
- }
-
- TEvLog(EPriority prio, EComponent comp, TString line, TInstant time = TInstant::Now())
- : Stamp(time)
- , Level(EPrio(prio))
- , Component(comp)
- , Line(std::move(line))
- {
- }
-
- const TInstant Stamp = TInstant::Max();
- const TLevel Level;
- const EComponent Component = 0;
- TString Line;
- };
-
- }
-}
diff --git a/library/cpp/actors/core/log_metrics.h b/library/cpp/actors/core/log_metrics.h
deleted file mode 100644
index 5005b2b776..0000000000
--- a/library/cpp/actors/core/log_metrics.h
+++ /dev/null
@@ -1,152 +0,0 @@
-#pragma once
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/monlib/metrics/metric_registry.h>
-#include <library/cpp/monlib/service/pages/templates.h>
-
-namespace NActors {
-class ILoggerMetrics {
-public:
- virtual ~ILoggerMetrics() = default;
-
- virtual void IncActorMsgs() = 0;
- virtual void IncDirectMsgs() = 0;
- virtual void IncLevelRequests() = 0;
- virtual void IncIgnoredMsgs() = 0;
- virtual void IncAlertMsgs() = 0;
- virtual void IncEmergMsgs() = 0;
- virtual void IncDroppedMsgs() = 0;
-
- virtual void GetOutputHtml(IOutputStream&) = 0;
-};
-
-class TLoggerCounters : public ILoggerMetrics {
-public:
- TLoggerCounters(TIntrusivePtr<NMonitoring::TDynamicCounters> counters)
- : DynamicCounters(counters)
- {
- ActorMsgs_ = DynamicCounters->GetCounter("ActorMsgs", true);
- DirectMsgs_ = DynamicCounters->GetCounter("DirectMsgs", true);
- LevelRequests_ = DynamicCounters->GetCounter("LevelRequests", true);
- IgnoredMsgs_ = DynamicCounters->GetCounter("IgnoredMsgs", true);
- DroppedMsgs_ = DynamicCounters->GetCounter("DroppedMsgs", true);
-
- AlertMsgs_ = DynamicCounters->GetCounter("AlertMsgs", true);
- EmergMsgs_ = DynamicCounters->GetCounter("EmergMsgs", true);
- }
-
- ~TLoggerCounters() = default;
-
- void IncActorMsgs() override {
- ++*ActorMsgs_;
- }
- void IncDirectMsgs() override {
- ++*DirectMsgs_;
- }
- void IncLevelRequests() override {
- ++*LevelRequests_;
- }
- void IncIgnoredMsgs() override {
- ++*IgnoredMsgs_;
- }
- void IncAlertMsgs() override {
- ++*AlertMsgs_;
- }
- void IncEmergMsgs() override {
- ++*EmergMsgs_;
- }
- void IncDroppedMsgs() override {
- DroppedMsgs_->Inc();
- }
-
- void GetOutputHtml(IOutputStream& str) override {
- HTML(str) {
- DIV_CLASS("row") {
- DIV_CLASS("col-md-12") {
- TAG(TH4) {
- str << "Counters" << Endl;
- }
- DynamicCounters->OutputHtml(str);
- }
- }
- }
- }
-
-private:
- NMonitoring::TDynamicCounters::TCounterPtr ActorMsgs_;
- NMonitoring::TDynamicCounters::TCounterPtr DirectMsgs_;
- NMonitoring::TDynamicCounters::TCounterPtr LevelRequests_;
- NMonitoring::TDynamicCounters::TCounterPtr IgnoredMsgs_;
- NMonitoring::TDynamicCounters::TCounterPtr AlertMsgs_;
- NMonitoring::TDynamicCounters::TCounterPtr EmergMsgs_;
- // Dropped while the logger backend was unavailable
- NMonitoring::TDynamicCounters::TCounterPtr DroppedMsgs_;
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> DynamicCounters;
-};
-
-class TLoggerMetrics : public ILoggerMetrics {
-public:
- TLoggerMetrics(std::shared_ptr<NMonitoring::TMetricRegistry> metrics)
- : Metrics(metrics)
- {
- ActorMsgs_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.actor_msgs"}});
- DirectMsgs_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.direct_msgs"}});
- LevelRequests_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.level_requests"}});
- IgnoredMsgs_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.ignored_msgs"}});
- DroppedMsgs_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.dropped_msgs"}});
-
- AlertMsgs_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.alert_msgs"}});
- EmergMsgs_ = Metrics->Rate(NMonitoring::TLabels{{"sensor", "logger.emerg_msgs"}});
- }
-
- ~TLoggerMetrics() = default;
-
- void IncActorMsgs() override {
- ActorMsgs_->Inc();
- }
- void IncDirectMsgs() override {
- DirectMsgs_->Inc();
- }
- void IncLevelRequests() override {
- LevelRequests_->Inc();
- }
- void IncIgnoredMsgs() override {
- IgnoredMsgs_->Inc();
- }
- void IncAlertMsgs() override {
- AlertMsgs_->Inc();
- }
- void IncEmergMsgs() override {
- EmergMsgs_->Inc();
- }
- void IncDroppedMsgs() override {
- DroppedMsgs_->Inc();
- }
-
- void GetOutputHtml(IOutputStream& str) override {
- HTML(str) {
- DIV_CLASS("row") {
- DIV_CLASS("col-md-12") {
- TAG(TH4) {
- str << "Metrics" << Endl;
- }
- // TODO: Now, TMetricRegistry does not have the GetOutputHtml function
- }
- }
- }
- }
-
-private:
- NMonitoring::TRate* ActorMsgs_;
- NMonitoring::TRate* DirectMsgs_;
- NMonitoring::TRate* LevelRequests_;
- NMonitoring::TRate* IgnoredMsgs_;
- NMonitoring::TRate* AlertMsgs_;
- NMonitoring::TRate* EmergMsgs_;
- // Dropped while the logger backend was unavailable
- NMonitoring::TRate* DroppedMsgs_;
-
- std::shared_ptr<NMonitoring::TMetricRegistry> Metrics;
-};
-}
diff --git a/library/cpp/actors/core/log_settings.cpp b/library/cpp/actors/core/log_settings.cpp
deleted file mode 100644
index fafaf892eb..0000000000
--- a/library/cpp/actors/core/log_settings.cpp
+++ /dev/null
@@ -1,232 +0,0 @@
-#include "log_settings.h"
-
-#include <util/stream/str.h>
-
-namespace NActors {
- namespace NLog {
- TSettings::TSettings(const TActorId& loggerActorId, const EComponent loggerComponent,
- EComponent minVal, EComponent maxVal, EComponentToStringFunc func,
- EPriority defPriority, EPriority defSamplingPriority,
- ui32 defSamplingRate, ui64 timeThresholdMs, ui64 bufferSizeLimitBytes)
- : LoggerActorId(loggerActorId)
- , LoggerComponent(loggerComponent)
- , TimeThresholdMs(timeThresholdMs)
- , BufferSizeLimitBytes(bufferSizeLimitBytes)
- , AllowDrop(true)
- , ThrottleDelay(TDuration::MilliSeconds(100))
- , MinVal(0)
- , MaxVal(0)
- , Mask(0)
- , DefPriority(defPriority)
- , DefSamplingPriority(defSamplingPriority)
- , DefSamplingRate(defSamplingRate)
- , UseLocalTimestamps(false)
- , Format(PLAIN_FULL_FORMAT)
- , ShortHostName("")
- , ClusterName("")
- {
- Append(minVal, maxVal, func);
- }
-
- TSettings::TSettings(const TActorId& loggerActorId, const EComponent loggerComponent,
- EPriority defPriority, EPriority defSamplingPriority,
- ui32 defSamplingRate, ui64 timeThresholdMs, ui64 bufferSizeLimitBytes)
- : LoggerActorId(loggerActorId)
- , LoggerComponent(loggerComponent)
- , TimeThresholdMs(timeThresholdMs)
- , BufferSizeLimitBytes(bufferSizeLimitBytes)
- , AllowDrop(true)
- , ThrottleDelay(TDuration::MilliSeconds(100))
- , MinVal(0)
- , MaxVal(0)
- , Mask(0)
- , DefPriority(defPriority)
- , DefSamplingPriority(defSamplingPriority)
- , DefSamplingRate(defSamplingRate)
- , UseLocalTimestamps(false)
- , Format(PLAIN_FULL_FORMAT)
- , ShortHostName("")
- , ClusterName("")
- {
- }
-
- void TSettings::Append(EComponent minVal, EComponent maxVal, EComponentToStringFunc func) {
- Y_ABORT_UNLESS(minVal >= 0, "NLog::TSettings: minVal must be non-negative");
- Y_ABORT_UNLESS(maxVal > minVal, "NLog::TSettings: maxVal must be greater than minVal");
-
- // update bounds
- if (!MaxVal || minVal < MinVal) {
- MinVal = minVal;
- }
-
- if (!MaxVal || maxVal > MaxVal) {
- MaxVal = maxVal;
-
- // expand ComponentNames to the new bounds
- auto oldMask = Mask;
- Mask = PowerOf2Mask(MaxVal);
-
- TArrayHolder<TAtomic> oldComponentInfo(new TAtomic[Mask + 1]);
- ComponentInfo.Swap(oldComponentInfo);
- int startVal = oldMask ? oldMask + 1 : 0;
- for (int i = 0; i < startVal; i++) {
- AtomicSet(ComponentInfo[i], AtomicGet(oldComponentInfo[i]));
- }
-
- TComponentSettings defSetting(DefPriority, DefSamplingPriority, DefSamplingRate);
- for (int i = startVal; i < Mask + 1; i++) {
- AtomicSet(ComponentInfo[i], defSetting.Raw.Data);
- }
-
- ComponentNames.resize(Mask + 1);
- }
-
- // assign new names but validate if newly added members were not used before
- for (int i = minVal; i <= maxVal; i++) {
- Y_ABORT_UNLESS(!ComponentNames[i], "component name at %d already set: %s",
- i, ComponentNames[i].data());
- ComponentNames[i] = func(i);
- }
- }
-
- int TSettings::SetLevelImpl(
- const TString& name, bool isSampling,
- EPriority priority, EComponent component, TString& explanation) {
- TString titleName(name);
- titleName.to_title();
-
- // check priority
- if (!IsValidPriority(priority)) {
- TStringStream str;
- str << "Invalid " << name;
- explanation = str.Str();
- return 1;
- }
-
- if (component == InvalidComponent) {
- for (int i = 0; i < Mask + 1; i++) {
- TComponentSettings settings = AtomicGet(ComponentInfo[i]);
- if (isSampling) {
- settings.Raw.X.SamplingLevel = priority;
- } else {
- settings.Raw.X.Level = priority;
- }
- AtomicSet(ComponentInfo[i], settings.Raw.Data);
- }
-
- TStringStream str;
-
- str << titleName
- << " for all components has been changed to "
- << PriorityToString(EPrio(priority));
- explanation = str.Str();
- return 0;
- } else {
- if (!IsValidComponent(component)) {
- explanation = "Invalid component";
- return 1;
- }
- TComponentSettings settings = AtomicGet(ComponentInfo[component]);
- EPriority oldPriority;
- if (isSampling) {
- oldPriority = (EPriority)settings.Raw.X.SamplingLevel;
- settings.Raw.X.SamplingLevel = priority;
- } else {
- oldPriority = (EPriority)settings.Raw.X.Level;
- settings.Raw.X.Level = priority;
- }
- AtomicSet(ComponentInfo[component], settings.Raw.Data);
- TStringStream str;
- str << titleName << " for the component " << ComponentNames[component]
- << " has been changed from " << PriorityToString(EPrio(oldPriority))
- << " to " << PriorityToString(EPrio(priority));
- explanation = str.Str();
- return 0;
- }
- }
-
- int TSettings::SetLevel(EPriority priority, EComponent component, TString& explanation) {
- return SetLevelImpl("priority", false,
- priority, component, explanation);
- }
-
- int TSettings::SetSamplingLevel(EPriority priority, EComponent component, TString& explanation) {
- return SetLevelImpl("sampling priority", true,
- priority, component, explanation);
- }
-
- int TSettings::SetSamplingRate(ui32 sampling, EComponent component, TString& explanation) {
- if (component == InvalidComponent) {
- for (int i = 0; i < Mask + 1; i++) {
- TComponentSettings settings = AtomicGet(ComponentInfo[i]);
- settings.Raw.X.SamplingRate = sampling;
- AtomicSet(ComponentInfo[i], settings.Raw.Data);
- }
- TStringStream str;
- str << "Sampling rate for all components has been changed to " << sampling;
- explanation = str.Str();
- } else {
- if (!IsValidComponent(component)) {
- explanation = "Invalid component";
- return 1;
- }
- TComponentSettings settings = AtomicGet(ComponentInfo[component]);
- ui32 oldSampling = settings.Raw.X.SamplingRate;
- settings.Raw.X.SamplingRate = sampling;
- AtomicSet(ComponentInfo[component], settings.Raw.Data);
- TStringStream str;
- str << "Sampling rate for the component " << ComponentNames[component]
- << " has been changed from " << oldSampling
- << " to " << sampling;
- explanation = str.Str();
- }
- return 0;
- }
-
- int TSettings::PowerOf2Mask(int val) {
- int mask = 1;
- while ((val & mask) != val) {
- mask <<= 1;
- mask |= 1;
- }
- return mask;
- }
-
- bool TSettings::IsValidPriority(EPriority priority) {
- return priority == PRI_EMERG || priority == PRI_ALERT ||
- priority == PRI_CRIT || priority == PRI_ERROR ||
- priority == PRI_WARN || priority == PRI_NOTICE ||
- priority == PRI_INFO || priority == PRI_DEBUG || priority == PRI_TRACE;
- }
-
- bool TSettings::IsValidComponent(EComponent component) {
- return (MinVal <= component) && (component <= MaxVal) && !ComponentNames[component].empty();
- }
-
- void TSettings::SetAllowDrop(bool val) {
- AllowDrop = val;
- }
-
- void TSettings::SetThrottleDelay(TDuration value) {
- ThrottleDelay = value;
- }
-
- void TSettings::SetUseLocalTimestamps(bool value) {
- UseLocalTimestamps = value;
- }
-
- EComponent TSettings::FindComponent(const TStringBuf& componentName) const {
- if (componentName.empty())
- return InvalidComponent;
-
- for (EComponent component = MinVal; component <= MaxVal; ++component) {
- if (ComponentNames[component] == componentName)
- return component;
- }
-
- return InvalidComponent;
- }
-
- }
-
-}
diff --git a/library/cpp/actors/core/log_settings.h b/library/cpp/actors/core/log_settings.h
deleted file mode 100644
index f62f55c200..0000000000
--- a/library/cpp/actors/core/log_settings.h
+++ /dev/null
@@ -1,175 +0,0 @@
-#pragma once
-
-#include "log_iface.h"
-#include <util/generic/vector.h>
-#include <util/digest/murmur.h>
-#include <util/random/easy.h>
-
-namespace NActors {
- namespace NLog {
- inline const char* PriorityToString(EPrio priority) {
- switch (priority) {
- case EPrio::Emerg:
- return "EMERG";
- case EPrio::Alert:
- return "ALERT";
- case EPrio::Crit:
- return "CRIT";
- case EPrio::Error:
- return "ERROR";
- case EPrio::Warn:
- return "WARN";
- case EPrio::Notice:
- return "NOTICE";
- case EPrio::Info:
- return "INFO";
- case EPrio::Debug:
- return "DEBUG";
- case EPrio::Trace:
- return "TRACE";
- default:
- return "UNKNOWN";
- }
- }
-
- // You can structure your program to have multiple logical components.
- // In this case you can set different log priorities for different
- // components. And you can change component's priority while system
- // is running. Suspect a component has a bug? Turn DEBUG priority level on
- // for this component.
- static const int InvalidComponent = -1;
-
- // Functions converts EComponent id to string
- using EComponentToStringFunc = std::function<const TString&(EComponent)>;
-
- // Log settings
- struct TComponentSettings {
- union {
- struct {
- ui32 SamplingRate;
- ui8 SamplingLevel;
- ui8 Level;
- } X;
-
- ui64 Data;
- } Raw;
-
- TComponentSettings(TAtomicBase data) {
- Raw.Data = (ui64)data;
- }
-
- TComponentSettings(ui8 level, ui8 samplingLevel, ui32 samplingRate) {
- Raw.X.Level = level;
- Raw.X.SamplingLevel = samplingLevel;
- Raw.X.SamplingRate = samplingRate;
- }
- };
-
- struct TSettings: public TThrRefBase {
- public:
- TActorId LoggerActorId;
- EComponent LoggerComponent;
- ui64 TimeThresholdMs;
- ui64 BufferSizeLimitBytes;
- bool AllowDrop;
- TDuration ThrottleDelay;
- TArrayHolder<TAtomic> ComponentInfo;
- TVector<TString> ComponentNames;
- EComponent MinVal;
- EComponent MaxVal;
- EComponent Mask;
- EPriority DefPriority;
- EPriority DefSamplingPriority;
- ui32 DefSamplingRate;
- bool UseLocalTimestamps;
-
- enum ELogFormat {
- PLAIN_FULL_FORMAT,
- PLAIN_SHORT_FORMAT,
- JSON_FORMAT
- };
- ELogFormat Format;
- TString ShortHostName;
- TString ClusterName;
- TString MessagePrefix;
-
- // The best way to provide minVal, maxVal and func is to have
- // protobuf enumeration of components. In this case protoc
- // automatically generates YOURTYPE_MIN, YOURTYPE_MAX and
- // YOURTYPE_Name for you.
- TSettings(const TActorId& loggerActorId, const EComponent loggerComponent,
- EComponent minVal, EComponent maxVal, EComponentToStringFunc func,
- EPriority defPriority, EPriority defSamplingPriority = PRI_DEBUG,
- ui32 defSamplingRate = 0, ui64 timeThresholdMs = 1000, ui64 bufferSizeLimitBytes = 1024 * 1024 * 300);
-
- TSettings(const TActorId& loggerActorId, const EComponent loggerComponent,
- EPriority defPriority, EPriority defSamplingPriority = PRI_DEBUG,
- ui32 defSamplingRate = 0, ui64 timeThresholdMs = 1000, ui64 bufferSizeLimitBytes = 1024 * 1024 * 300);
-
- void Append(EComponent minVal, EComponent maxVal, EComponentToStringFunc func);
-
- template <typename T>
- void Append(T minVal, T maxVal, const TString& (*func)(T)) {
- Append(
- static_cast<EComponent>(minVal),
- static_cast<EComponent>(maxVal),
- [func](EComponent c) -> const TString& {
- return func(static_cast<T>(c));
- }
- );
- }
-
- inline bool Satisfies(EPriority priority, EComponent component, ui64 sampleBy = 0) const {
- // by using Mask we don't get outside of array boundaries
- TComponentSettings settings = GetComponentSettings(component);
- if (priority > settings.Raw.X.Level) {
- if (priority > settings.Raw.X.SamplingLevel) {
- return false; // priority > both levels ==> do not log
- }
- // priority <= sampling level ==> apply sampling
- ui32 samplingRate = settings.Raw.X.SamplingRate;
- if (samplingRate) {
- ui32 samplingValue = sampleBy ? MurmurHash<ui32>((const char*)&sampleBy, sizeof(sampleBy))
- : samplingRate != 1 ? RandomNumber<ui32>() : 0;
- return (samplingValue % samplingRate == 0);
- } else {
- // sampling rate not set ==> do not log
- return false;
- }
- } else {
- // priority <= log level ==> log
- return true;
- }
- }
-
- inline TComponentSettings GetComponentSettings(EComponent component) const {
- Y_DEBUG_ABORT_UNLESS((component & Mask) == component);
- // by using Mask we don't get outside of array boundaries
- return TComponentSettings(AtomicGet(ComponentInfo[component & Mask]));
- }
-
- const char* ComponentName(EComponent component) const {
- Y_DEBUG_ABORT_UNLESS((component & Mask) == component);
- return ComponentNames[component & Mask].data();
- }
-
- int SetLevel(EPriority priority, EComponent component, TString& explanation);
- int SetSamplingLevel(EPriority priority, EComponent component, TString& explanation);
- int SetSamplingRate(ui32 sampling, EComponent component, TString& explanation);
- EComponent FindComponent(const TStringBuf& componentName) const;
- static int PowerOf2Mask(int val);
- static bool IsValidPriority(EPriority priority);
- bool IsValidComponent(EComponent component);
- void SetAllowDrop(bool val);
- void SetThrottleDelay(TDuration value);
- void SetUseLocalTimestamps(bool value);
-
- private:
- int SetLevelImpl(
- const TString& name, bool isSampling,
- EPriority priority, EComponent component, TString& explanation);
- };
-
- }
-
-}
diff --git a/library/cpp/actors/core/log_ut.cpp b/library/cpp/actors/core/log_ut.cpp
deleted file mode 100644
index 995e3c4121..0000000000
--- a/library/cpp/actors/core/log_ut.cpp
+++ /dev/null
@@ -1,251 +0,0 @@
-#include "log.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-
-using namespace NMonitoring;
-using namespace NActors;
-using namespace NActors::NLog;
-
-namespace {
- const TString& ServiceToString(int) {
- static const TString FAKE{"FAKE"};
- return FAKE;
- }
-
- TIntrusivePtr<TSettings> DefaultSettings() {
- auto loggerId = TActorId{0, "Logger"};
- auto s = MakeIntrusive<TSettings>(loggerId, 0, EPriority::PRI_TRACE);
- s->SetAllowDrop(false);
- s->Append(0, 1, ServiceToString);
- return s;
- }
-
- TIntrusivePtr<TSettings> DroppingSettings(ui64 timeThresholdMs) {
- auto loggerId = TActorId{0, "Logger"};
- auto s = MakeIntrusive<TSettings>(
- loggerId,
- 0,
- EPriority::PRI_TRACE,
- EPriority::PRI_DEBUG,
- (ui32)0,
- timeThresholdMs,
- (ui64)0);
- s->Append(0, 1, ServiceToString);
- return s;
- }
-
- TIntrusivePtr<TSettings> BufferSettings(ui64 bufferSizeLimitBytes) {
- auto loggerId = TActorId{0, "Logger"};
- auto s = MakeIntrusive<TSettings>(
- loggerId,
- 0,
- EPriority::PRI_TRACE,
- EPriority::PRI_DEBUG,
- (ui32)0,
- (ui32)0,
- bufferSizeLimitBytes);
- s->Append(0, 1, ServiceToString);
- s->SetAllowDrop(true);
- return s;
- }
-
- TIntrusivePtr<TSettings> NoBufferSettings() {
- return BufferSettings(0);
- }
-
- class TMockBackend: public TLogBackend {
- public:
- using TWriteImpl = std::function<void(const TLogRecord&)>;
- using TReopenImpl = std::function<void()>;
-
- static void REOPEN_NOP() { }
-
- TMockBackend(TWriteImpl writeImpl, TReopenImpl reopenImpl = REOPEN_NOP)
- : WriteImpl_{writeImpl}
- , ReopenImpl_{reopenImpl}
- {
- }
-
- void WriteData(const TLogRecord& r) override {
- WriteImpl_(r);
- }
-
- void ReopenLog() override { }
-
- void SetWriteImpl(TWriteImpl writeImpl) {
- WriteImpl_ = writeImpl;
- }
-
- private:
- TWriteImpl WriteImpl_;
- TReopenImpl ReopenImpl_;
- };
-
- void ThrowAlways(const TLogRecord&) {
- ythrow yexception();
- };
-
- struct TFixture {
- TFixture(
- TIntrusivePtr<TSettings> settings,
- TMockBackend::TWriteImpl writeImpl = ThrowAlways)
- {
- Runtime.Initialize();
- LogBackend.reset(new TMockBackend{writeImpl});
- LoggerActor = Runtime.Register(new TLoggerActor{settings, LogBackend, Counters});
- Runtime.SetScheduledEventFilter([] (auto&&, auto&&, auto&&, auto) {
- return false;
- });
- }
-
- TFixture(TMockBackend::TWriteImpl writeImpl = ThrowAlways)
- : TFixture(DefaultSettings(), writeImpl)
- {}
-
- void WriteLog() {
- Runtime.Send(new IEventHandle{LoggerActor, {}, new TEvLog(TInstant::Zero(), TLevel{EPrio::Emerg}, 0, "foo")});
- }
-
- void WriteLog(TInstant ts, EPrio prio = EPrio::Emerg, TString msg = "foo") {
- Runtime.Send(new IEventHandle{LoggerActor, {}, new TEvLog(ts, TLevel{prio}, 0, msg)});
- }
-
- void FlushLogBuffer() {
- Runtime.Send(new IEventHandle{LoggerActor, {}, new TFlushLogBuffer()});
- }
-
- void Wakeup() {
- Runtime.Send(new IEventHandle{LoggerActor, {}, new TEvents::TEvWakeup});
- }
-
- TIntrusivePtr<TDynamicCounters> Counters{MakeIntrusive<TDynamicCounters>()};
- std::shared_ptr<TMockBackend> LogBackend;
- TActorId LoggerActor;
- TTestActorRuntimeBase Runtime;
- };
-}
-
-
-Y_UNIT_TEST_SUITE(TLoggerActorTest) {
- Y_UNIT_TEST(NoCrashOnWriteFailure) {
- TFixture test;
- test.WriteLog();
- // everything is okay as long as we get here
- }
-
- Y_UNIT_TEST(SubsequentWritesAreIgnored) {
- size_t count{0};
- auto countWrites = [&count] (auto&& r) {
- count++;
- ThrowAlways(r);
- };
-
- TFixture test{countWrites};
- test.WriteLog();
- UNIT_ASSERT_VALUES_EQUAL(count, 1);
-
- // at this point we should have started dropping messages
- for (auto i = 0; i < 5; ++i) {
- test.WriteLog();
- }
-
- UNIT_ASSERT_VALUES_EQUAL(count, 1);
- }
-
- Y_UNIT_TEST(LoggerCanRecover) {
- TFixture test;
- test.WriteLog();
-
- TVector<TString> messages;
- auto acceptWrites = [&] (const TLogRecord& r) {
- messages.emplace_back(r.Data, r.Len);
- };
-
- auto scheduled = test.Runtime.CaptureScheduledEvents();
- UNIT_ASSERT_VALUES_EQUAL(scheduled.size(), 1);
-
- test.LogBackend->SetWriteImpl(acceptWrites);
- test.Wakeup();
-
- const auto COUNT = 10;
- for (auto i = 0; i < COUNT; ++i) {
- test.WriteLog();
- }
-
- UNIT_ASSERT_VALUES_EQUAL(messages.size(), COUNT);
- }
-
- Y_UNIT_TEST(ShouldObeyTimeThresholdMsWhenOverloaded) {
- TFixture test{DroppingSettings(5000)};
-
- TVector<TString> messages;
- auto acceptWrites = [&] (const TLogRecord& r) {
- messages.emplace_back(r.Data, r.Len);
- };
-
- test.LogBackend->SetWriteImpl(acceptWrites);
- test.Wakeup();
-
- const auto COUNT = 11;
- for (auto i = 0; i < COUNT; ++i) {
- test.WriteLog();
- }
-
- UNIT_ASSERT_VALUES_EQUAL(messages.size(), COUNT);
-
- test.Runtime.AdvanceCurrentTime(TDuration::Seconds(20));
- auto now = test.Runtime.GetCurrentTime();
-
- test.WriteLog(now - TDuration::Seconds(5));
-
- UNIT_ASSERT_VALUES_EQUAL(messages.size(), COUNT + 1);
-
- test.WriteLog(now - TDuration::Seconds(6));
-
- UNIT_ASSERT_VALUES_EQUAL(messages.size(), COUNT + 1);
- }
-
- int BufferTest(TFixture &test, const int COUNT) {
- TVector<TString> messages;
- auto acceptWrites = [&] (const TLogRecord& r) {
- messages.emplace_back(r.Data, r.Len);
- };
-
- test.LogBackend->SetWriteImpl(acceptWrites);
- test.Wakeup();
- test.Runtime.AdvanceCurrentTime(TDuration::Days(1));
- auto now = test.Runtime.GetCurrentTime();
-
- for (auto i = 0; i < COUNT; ++i) {
- test.WriteLog(now - TDuration::Seconds(10), EPrio::Debug, std::to_string(i));
- }
-
- for (auto i = 0; i < COUNT; ++i) {
- test.FlushLogBuffer();
- }
-
- for (ui64 i = 0; i < messages.size(); ++i) {
- Cerr << messages[i] << Endl;
- }
-
- return messages.size();
- }
-
- Y_UNIT_TEST(ShouldUseLogBufferWhenOverloaded) {
- TFixture test{BufferSettings(1024 * 1024 * 300)};
- const auto LOG_COUNT = 100;
- auto outputLogSize = BufferTest(test, LOG_COUNT);
-
- UNIT_ASSERT_VALUES_EQUAL(outputLogSize, LOG_COUNT);
- }
-
- Y_UNIT_TEST(ShouldLoseLogsIfBufferZeroSize) {
- TFixture test{NoBufferSettings()};
- const auto LOG_COUNT = 100;
- auto outputLogSize = BufferTest(test, LOG_COUNT);
-
- UNIT_ASSERT(outputLogSize < LOG_COUNT);
- }
-}
diff --git a/library/cpp/actors/core/mailbox.cpp b/library/cpp/actors/core/mailbox.cpp
deleted file mode 100644
index d11ff9cbcb..0000000000
--- a/library/cpp/actors/core/mailbox.cpp
+++ /dev/null
@@ -1,590 +0,0 @@
-#include "mailbox.h"
-#include "actorsystem.h"
-#include "actor.h"
-
-#include <library/cpp/actors/util/datetime.h>
-
-#include <util/system/sanitizers.h>
-
-namespace NActors {
- TMailboxTable::TMailboxTable()
- : LastAllocatedLine(0)
- , AllocatedMailboxCount(0)
- , CachedSimpleMailboxes(0)
- , CachedRevolvingMailboxes(0)
- , CachedHTSwapMailboxes(0)
- , CachedReadAsFilledMailboxes(0)
- , CachedTinyReadAsFilledMailboxes(0)
- {
- memset((void*)Lines, 0, sizeof(Lines));
- }
-
- bool IsGoodForCleanup(const TMailboxHeader* header) {
- switch (AtomicLoad(&header->ExecutionState)) {
- case TMailboxHeader::TExecutionState::Inactive:
- case TMailboxHeader::TExecutionState::Scheduled:
- return true;
- case TMailboxHeader::TExecutionState::Leaving:
- case TMailboxHeader::TExecutionState::Executing:
- case TMailboxHeader::TExecutionState::LeavingMarked:
- return false;
- case TMailboxHeader::TExecutionState::Free:
- case TMailboxHeader::TExecutionState::FreeScheduled:
- return true;
- case TMailboxHeader::TExecutionState::FreeLeaving:
- case TMailboxHeader::TExecutionState::FreeExecuting:
- case TMailboxHeader::TExecutionState::FreeLeavingMarked:
- return false;
- default:
- Y_ABORT();
- }
- }
-
- template <typename TMailbox>
- void DestructMailboxLine(ui8* begin, ui8* end) {
- const ui32 sx = TMailbox::AlignedSize();
- for (ui8* x = begin; x + sx <= end; x += sx) {
- TMailbox* mailbox = reinterpret_cast<TMailbox*>(x);
- Y_ABORT_UNLESS(IsGoodForCleanup(mailbox));
- mailbox->ExecutionState = Max<ui32>();
- mailbox->~TMailbox();
- }
- }
-
- template <typename TMailbox>
- bool CleanupMailboxLine(ui8* begin, ui8* end) {
- const ui32 sx = TMailbox::AlignedSize();
- bool done = true;
- for (ui8* x = begin; x + sx <= end; x += sx) {
- TMailbox* mailbox = reinterpret_cast<TMailbox*>(x);
- Y_ABORT_UNLESS(IsGoodForCleanup(mailbox));
- done &= mailbox->CleanupActors() && mailbox->CleanupEvents();
- }
- return done;
- }
-
- TMailboxTable::~TMailboxTable() {
- // on cleanup we must traverse everything and free stuff
- for (ui32 i = 0; i < LastAllocatedLine; ++i) {
- if (TMailboxLineHeader* lineHeader = Lines[i]) {
- switch (lineHeader->MailboxType) {
- case TMailboxType::Simple:
- DestructMailboxLine<TSimpleMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::Revolving:
- DestructMailboxLine<TRevolvingMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::HTSwap:
- DestructMailboxLine<THTSwapMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::ReadAsFilled:
- DestructMailboxLine<TReadAsFilledMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::TinyReadAsFilled:
- DestructMailboxLine<TTinyReadAsFilledMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- default:
- Y_ABORT();
- }
-
- lineHeader->~TMailboxLineHeader();
- free(lineHeader);
- Lines[i] = nullptr;
- }
- }
-
- while (MailboxCacheSimple.Pop(0))
- ;
- while (MailboxCacheRevolving.Pop(0))
- ;
- while (MailboxCacheHTSwap.Pop(0))
- ;
- while (MailboxCacheReadAsFilled.Pop(0))
- ;
- while (MailboxCacheTinyReadAsFilled.Pop(0))
- ;
- }
-
- bool TMailboxTable::Cleanup() {
- bool done = true;
- for (ui32 i = 0; i < LastAllocatedLine; ++i) {
- if (TMailboxLineHeader* lineHeader = Lines[i]) {
- switch (lineHeader->MailboxType) {
- case TMailboxType::Simple:
- done &= CleanupMailboxLine<TSimpleMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::Revolving:
- done &= CleanupMailboxLine<TRevolvingMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::HTSwap:
- done &= CleanupMailboxLine<THTSwapMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::ReadAsFilled:
- done &= CleanupMailboxLine<TReadAsFilledMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- case TMailboxType::TinyReadAsFilled:
- done &= CleanupMailboxLine<TTinyReadAsFilledMailbox>((ui8*)lineHeader + 64, (ui8*)lineHeader + LineSize);
- break;
- default:
- Y_ABORT();
- }
- }
- }
- return done;
- }
-
- TMailboxHeader* TMailboxTable::Get(ui32 hint) {
- // get line
- const ui32 lineIndex = (hint & LineIndexMask) >> LineIndexShift;
- const ui32 lineHint = hint & LineHintMask;
-
- Y_ABORT_UNLESS((lineIndex < MaxLines) && (lineHint < LineSize / 64));
- if (lineHint == 0)
- return nullptr;
-
- if (TMailboxLineHeader* const x = AtomicLoad(Lines + lineIndex)) {
- switch (x->MailboxType) {
- case TMailboxType::Simple:
- return TSimpleMailbox::Get(lineHint, x);
- case TMailboxType::Revolving:
- return TRevolvingMailbox::Get(lineHint, x);
- case TMailboxType::HTSwap:
- return THTSwapMailbox::Get(lineHint, x);
- case TMailboxType::ReadAsFilled:
- return TReadAsFilledMailbox::Get(lineHint, x);
- case TMailboxType::TinyReadAsFilled:
- return TTinyReadAsFilledMailbox::Get(lineHint, x);
- default:
- Y_DEBUG_ABORT_UNLESS(false);
- break;
- }
- }
-
- return nullptr;
- }
-
- template <TMailboxTable::TEPScheduleActivationFunction EPSpecificScheduleActivation>
- bool TMailboxTable::GenericSendTo(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool) {
- const TActorId& recipient = ev->GetRecipientRewrite();
- const ui32 hint = recipient.Hint();
-
- // copy-paste from Get to avoid duplicated type-switches
- const ui32 lineIndex = (hint & LineIndexMask) >> LineIndexShift;
- const ui32 lineHint = hint & LineHintMask;
-
- Y_ABORT_UNLESS((lineIndex < MaxLines) && (lineHint < LineSize / 64));
- if (lineHint == 0)
- return false;
-
- if (TMailboxLineHeader* const x = AtomicLoad(Lines + lineIndex)) {
- switch (x->MailboxType) {
- case TMailboxType::Simple: {
- TSimpleMailbox* const mailbox = TSimpleMailbox::Get(lineHint, x);
- mailbox->Push(recipient.LocalId());
-#if (!defined(_tsan_enabled_))
- Y_DEBUG_ABORT_UNLESS(mailbox->Type == (ui32)x->MailboxType);
-#endif
- mailbox->Queue.Push(ev.Release());
- if (mailbox->MarkForSchedule()) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- (executorPool->*EPSpecificScheduleActivation)(hint);
- }
- }
- return true;
- case TMailboxType::Revolving: {
- // The actorid could be stale and coming from a different machine. If local process has restarted than
- // the stale actorid coming from a remote machine might be referencing an actor with simple mailbox
- // which is smaller than revolving mailbox. In this cases 'lineHint' index might be greater than actual
- // array size. Normally its ok to store stale event to other actor's valid mailbox beacuse Receive will
- // compare receiver actor id and discard stale event. But in this case we should discard the event right away
- // instead of trying to enque it to a mailbox at invalid address.
- // NOTE: lineHint is 1-based
- static_assert(TSimpleMailbox::AlignedSize() <= TRevolvingMailbox::AlignedSize(),
- "We expect that one line can store more simple mailboxes than revolving mailboxes");
- if (lineHint > TRevolvingMailbox::MaxMailboxesInLine())
- return false;
-
- TRevolvingMailbox* const mailbox = TRevolvingMailbox::Get(lineHint, x);
- mailbox->Push(recipient.LocalId());
-#if (!defined(_tsan_enabled_))
- Y_DEBUG_ABORT_UNLESS(mailbox->Type == (ui32)x->MailboxType);
-#endif
- mailbox->QueueWriter.Push(ev.Release());
- if (mailbox->MarkForSchedule()) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- (executorPool->*EPSpecificScheduleActivation)(hint);
- }
- }
- return true;
- case TMailboxType::HTSwap: {
- THTSwapMailbox* const mailbox = THTSwapMailbox::Get(lineHint, x);
- mailbox->Push(recipient.LocalId());
-#if (!defined(_tsan_enabled_))
- Y_DEBUG_ABORT_UNLESS(mailbox->Type == (ui32)x->MailboxType);
-#endif
- mailbox->Queue.Push(ev.Release());
- if (mailbox->MarkForSchedule()) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- (executorPool->*EPSpecificScheduleActivation)(hint);
- }
- }
- return true;
- case TMailboxType::ReadAsFilled: {
- if (lineHint > TReadAsFilledMailbox::MaxMailboxesInLine())
- return false;
-
- TReadAsFilledMailbox* const mailbox = TReadAsFilledMailbox::Get(lineHint, x);
- mailbox->Push(recipient.LocalId());
-#if (!defined(_tsan_enabled_))
- Y_DEBUG_ABORT_UNLESS(mailbox->Type == (ui32)x->MailboxType);
-#endif
- mailbox->Queue.Push(ev.Release());
- if (mailbox->MarkForSchedule()) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- (executorPool->*EPSpecificScheduleActivation)(hint);
- }
- }
- return true;
- case TMailboxType::TinyReadAsFilled: {
- if (lineHint > TTinyReadAsFilledMailbox::MaxMailboxesInLine())
- return false;
-
- TTinyReadAsFilledMailbox* const mailbox = TTinyReadAsFilledMailbox::Get(lineHint, x);
- mailbox->Push(recipient.LocalId());
-#if (!defined(_tsan_enabled_))
- Y_DEBUG_ABORT_UNLESS(mailbox->Type == (ui32)x->MailboxType);
-#endif
- mailbox->Queue.Push(ev.Release());
- if (mailbox->MarkForSchedule()) {
- RelaxedStore<NHPTimer::STime>(&mailbox->ScheduleMoment, GetCycleCountFast());
- (executorPool->*EPSpecificScheduleActivation)(hint);
- }
- }
- return true;
- default:
- Y_ABORT("unknown mailbox type");
- }
- }
-
- return false;
- }
-
- ui32 TMailboxTable::AllocateMailbox(TMailboxType::EType type, ui64 revolvingCounter) {
- ui32 x = TryAllocateMailbox(type, revolvingCounter);
- if (x == 0)
- x = AllocateNewLine(type);
- return x;
- }
-
- ui32 TMailboxTable::TryAllocateMailbox(TMailboxType::EType type, ui64 revolvingCounter) {
- switch (type) {
- case TMailboxType::Simple:
- do {
- if (ui32 ret = MailboxCacheSimple.Pop(revolvingCounter)) {
- AtomicDecrement(CachedSimpleMailboxes);
- return ret;
- }
- } while (AtomicGet(CachedSimpleMailboxes) > (MailboxCacheSimple.Concurrency * 512));
- return 0;
- case TMailboxType::Revolving:
- do {
- if (ui32 ret = MailboxCacheRevolving.Pop(revolvingCounter)) {
- AtomicDecrement(CachedRevolvingMailboxes);
- return ret;
- }
- } while (AtomicGet(CachedRevolvingMailboxes) > (MailboxCacheRevolving.Concurrency * 512));
- return 0;
- case TMailboxType::HTSwap:
- do {
- if (ui32 ret = MailboxCacheHTSwap.Pop(revolvingCounter)) {
- AtomicDecrement(CachedHTSwapMailboxes);
- return ret;
- }
- } while (AtomicGet(CachedHTSwapMailboxes) > (MailboxCacheHTSwap.Concurrency * 512));
- return 0;
- case TMailboxType::ReadAsFilled:
- do {
- if (ui32 ret = MailboxCacheReadAsFilled.Pop(revolvingCounter)) {
- AtomicDecrement(CachedReadAsFilledMailboxes);
- return ret;
- }
- } while (AtomicGet(CachedReadAsFilledMailboxes) > (MailboxCacheReadAsFilled.Concurrency * 512));
- return 0;
- case TMailboxType::TinyReadAsFilled:
- do {
- if (ui32 ret = MailboxCacheTinyReadAsFilled.Pop(revolvingCounter)) {
- AtomicDecrement(CachedTinyReadAsFilledMailboxes);
- return ret;
- }
- } while (AtomicGet(CachedTinyReadAsFilledMailboxes) > (MailboxCacheTinyReadAsFilled.Concurrency * 512));
- return 0;
- default:
- Y_ABORT("Unknown mailbox type");
- }
- }
-
-
- template
- bool TMailboxTable::GenericSendTo<&IExecutorPool::ScheduleActivation>(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool);
- template
- bool TMailboxTable::GenericSendTo<&IExecutorPool::SpecificScheduleActivation>(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool);
-
- void TMailboxTable::ReclaimMailbox(TMailboxType::EType type, ui32 hint, ui64 revolvingCounter) {
- if (hint != 0) {
- switch (type) {
- case TMailboxType::Simple:
- MailboxCacheSimple.Push(hint, revolvingCounter);
- AtomicIncrement(CachedSimpleMailboxes);
- break;
- case TMailboxType::Revolving:
- MailboxCacheRevolving.Push(hint, revolvingCounter);
- AtomicIncrement(CachedRevolvingMailboxes);
- break;
- case TMailboxType::HTSwap:
- MailboxCacheHTSwap.Push(hint, revolvingCounter);
- AtomicIncrement(CachedHTSwapMailboxes);
- break;
- case TMailboxType::ReadAsFilled:
- MailboxCacheReadAsFilled.Push(hint, revolvingCounter);
- AtomicIncrement(CachedReadAsFilledMailboxes);
- break;
- case TMailboxType::TinyReadAsFilled:
- MailboxCacheTinyReadAsFilled.Push(hint, revolvingCounter);
- AtomicIncrement(CachedTinyReadAsFilledMailboxes);
- break;
- default:
- Y_ABORT();
- }
- }
- }
-
- TMailboxHeader::TMailboxHeader(TMailboxType::EType type)
- : ExecutionState(TExecutionState::Free)
- , Reserved(0)
- , Type(type)
- , ActorPack(TMailboxActorPack::Simple)
- , Knobs(0)
- {
- ActorsInfo.Simple.ActorId = 0;
- ActorsInfo.Simple.Actor = nullptr;
- }
-
- TMailboxHeader::~TMailboxHeader() {
- CleanupActors();
- }
-
- bool TMailboxHeader::CleanupActors() {
- bool done = true;
- switch (ActorPack) {
- case TMailboxActorPack::Simple: {
- if (ActorsInfo.Simple.ActorId != 0) {
- delete ActorsInfo.Simple.Actor;
- done = false;
- }
- break;
- }
- case TMailboxActorPack::Map: {
- for (auto& [actorId, actor] : *ActorsInfo.Map.ActorsMap) {
- delete actor;
- }
- delete ActorsInfo.Map.ActorsMap;
- done = false;
- break;
- }
- case TMailboxActorPack::Array: {
- for (ui64 i = 0; i < ActorsInfo.Array.ActorsCount; ++i) {
- delete ActorsInfo.Array.ActorsArray->Actors[i].Actor;
- }
- delete ActorsInfo.Array.ActorsArray;
- done = false;
- break;
- }
- }
- ActorPack = TMailboxActorPack::Simple;
- ActorsInfo.Simple.ActorId = 0;
- ActorsInfo.Simple.Actor = nullptr;
- return done;
- }
-
- std::pair<ui32, ui32> TMailboxHeader::CountMailboxEvents(ui64 localActorId, ui32 maxTraverse) {
- switch (Type) {
- case TMailboxType::Simple:
- return static_cast<TMailboxTable::TSimpleMailbox*>(this)->CountSimpleMailboxEvents(localActorId, maxTraverse);
- case TMailboxType::Revolving:
- return static_cast<TMailboxTable::TRevolvingMailbox*>(this)->CountRevolvingMailboxEvents(localActorId, maxTraverse);
- default:
- return {0, 0};
- }
- }
-
- TMailboxUsageImpl<true>::~TMailboxUsageImpl() {
- while (auto *e = PendingEventQueue.Pop()) {
- delete e;
- }
- }
-
- void TMailboxUsageImpl<true>::Push(ui64 localId) {
- PendingEventQueue.Push(new TPendingEvent{localId, GetCycleCountFast()});
- }
-
- void TMailboxUsageImpl<true>::ProcessEvents(TMailboxHeader *mailbox) {
- while (std::unique_ptr<TPendingEvent> e{PendingEventQueue.Pop()}) {
- if (IActor *actor = mailbox->FindActor(e->LocalId)) {
- actor->OnEnqueueEvent(e->Timestamp);
- }
- }
- }
-
- TMailboxTable::TSimpleMailbox::TSimpleMailbox()
- : TMailboxHeader(TMailboxType::Simple)
- , ScheduleMoment(0)
- {
- }
-
- TMailboxTable::TSimpleMailbox::~TSimpleMailbox() {
- CleanupEvents();
- }
-
- bool TMailboxTable::TSimpleMailbox::CleanupEvents() {
- const bool done = (Queue.Head() == nullptr);
- while (IEventHandle* ev = Queue.Pop())
- delete ev;
- return done;
- }
-
- std::pair<ui32, ui32> TMailboxTable::TSimpleMailbox::CountSimpleMailboxEvents(ui64 localActorId, ui32 maxTraverse) {
- ui32 local = 0;
- ui32 total = 0;
-
- auto it = Queue.ReadIterator();
- while (IEventHandle* x = it.Next()) {
- ++total;
- if (x->GetRecipientRewrite().LocalId() == localActorId)
- ++local;
- if (total >= maxTraverse)
- break;
- }
-
- return std::make_pair(local, total);
- }
-
- TMailboxTable::TRevolvingMailbox::TRevolvingMailbox()
- : TMailboxHeader(TMailboxType::Revolving)
- , QueueWriter(QueueReader)
- , Reserved1(0)
- , Reserved2(0)
- , ScheduleMoment(0)
- {
- }
-
- TMailboxTable::TRevolvingMailbox::~TRevolvingMailbox() {
- CleanupEvents();
- }
-
- bool TMailboxTable::TRevolvingMailbox::CleanupEvents() {
- const bool done = (QueueReader.Head() == nullptr);
- while (IEventHandle* ev = QueueReader.Pop())
- delete ev;
- return done;
- }
-
- std::pair<ui32, ui32> TMailboxTable::TRevolvingMailbox::CountRevolvingMailboxEvents(ui64 localActorId, ui32 maxTraverse) {
- ui32 local = 0;
- ui32 total = 0;
-
- auto it = QueueReader.Iterator();
-
- while (IEventHandle* x = it.Next()) {
- ++total;
- if (x->GetRecipientRewrite().LocalId() == localActorId)
- ++local;
- if (total >= maxTraverse)
- break;
- }
-
- return std::make_pair(local, total);
- }
-
- template <typename T>
- static ui32 InitNewLine(ui8* x, ui8* end) {
- const ui32 sx = T::AlignedSize();
-
- for (ui32 index = 1; x + sx <= end; x += sx, ++index)
- ::new (x) T();
-
- return sx;
- }
-
- ui32 TMailboxTable::AllocateNewLine(TMailboxType::EType type) {
- ui8* ptr = (ui8*)malloc(LineSize);
- ui8* end = ptr + LineSize;
-
- const ui32 lineIndex = (ui32)AtomicIncrement(LastAllocatedLine) - 1;
- const ui32 lineIndexMask = (lineIndex << LineIndexShift) & LineIndexMask;
-
- // first 64 bytes is TMailboxLineHeader
- TMailboxLineHeader* header = ::new (ptr) TMailboxLineHeader(type, lineIndex);
-
- ui8* x = ptr + 64;
- ui32 sx = 0;
- TMailboxCache* cache = nullptr;
- TAtomic* counter = nullptr;
-
- switch (type) {
- case TMailboxType::Simple:
- sx = InitNewLine<TSimpleMailbox>(x, end);
- cache = &MailboxCacheSimple;
- counter = &CachedSimpleMailboxes;
- break;
- case TMailboxType::Revolving:
- sx = InitNewLine<TRevolvingMailbox>(x, end);
- cache = &MailboxCacheRevolving;
- counter = &CachedRevolvingMailboxes;
- break;
- case TMailboxType::HTSwap:
- sx = InitNewLine<THTSwapMailbox>(x, end);
- cache = &MailboxCacheHTSwap;
- counter = &CachedHTSwapMailboxes;
- break;
- case TMailboxType::ReadAsFilled:
- sx = InitNewLine<TReadAsFilledMailbox>(x, end);
- cache = &MailboxCacheReadAsFilled;
- counter = &CachedReadAsFilledMailboxes;
- break;
- case TMailboxType::TinyReadAsFilled:
- sx = InitNewLine<TTinyReadAsFilledMailbox>(x, end);
- cache = &MailboxCacheTinyReadAsFilled;
- counter = &CachedTinyReadAsFilledMailboxes;
- break;
- default:
- Y_ABORT();
- }
-
- AtomicStore(Lines + lineIndex, header);
-
- ui32 ret = lineIndexMask | 1;
-
- ui32 index = 2;
- for (ui32 endIndex = LineSize / sx; index != endIndex;) {
- const ui32 bufSize = 8;
- ui32 buf[bufSize];
- ui32 bufIndex;
- for (bufIndex = 0; index != endIndex && bufIndex != bufSize; ++bufIndex, ++index)
- buf[bufIndex] = lineIndexMask | index;
- cache->PushBulk(buf, bufIndex, index);
- AtomicAdd(*counter, bufIndex);
- }
-
- AtomicAdd(AllocatedMailboxCount, index - 1);
-
- return ret;
- }
-
- bool TMailboxTable::SendTo(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool) {
- return GenericSendTo<&IExecutorPool::ScheduleActivation>(ev, executorPool);
- }
-
- bool TMailboxTable::SpecificSendTo(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool) {
- return GenericSendTo<&IExecutorPool::SpecificScheduleActivation>(ev, executorPool);
- }
-}
diff --git a/library/cpp/actors/core/mailbox.h b/library/cpp/actors/core/mailbox.h
deleted file mode 100644
index 4697dedcfd..0000000000
--- a/library/cpp/actors/core/mailbox.h
+++ /dev/null
@@ -1,571 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "event.h"
-#include "executor_pool.h"
-#include "mailbox_queue_simple.h"
-#include "mailbox_queue_revolving.h"
-#include <library/cpp/actors/util/unordered_cache.h>
-#include <library/cpp/threading/queue/mpsc_htswap.h>
-#include <library/cpp/threading/queue/mpsc_read_as_filled.h>
-#include <util/generic/hash.h>
-#include <util/system/hp_timer.h>
-#include <util/generic/ptr.h>
-// TODO: clean all broken arcadia atomic stuff and replace with intrinsics
-
-namespace NActors {
- class IActor;
- class IExecutorPool;
-
- const ui64 ARRAY_CAPACITY = 8;
-
- // structure of hint:
- // 1 bit: is service or direct hint
- // 2 bits: pool index
- // 17 bits: line
- // 12 bits: index of mailbox inside of line
-
- struct TMailboxHeader;
-
- template<bool>
- struct TMailboxUsageImpl {
- void Push(ui64 /*localId*/) {}
- void ProcessEvents(TMailboxHeader* /*mailbox*/) {}
- };
-
- template<>
- struct TMailboxUsageImpl<true> {
- struct TPendingEvent {
- ui64 LocalId;
- ui64 Timestamp;
- };
- NThreading::TReadAsFilledQueue<TPendingEvent> PendingEventQueue;
-
- ~TMailboxUsageImpl();
- void Push(ui64 localId);
- void ProcessEvents(TMailboxHeader *mailbox);
- };
-
- struct TMailboxHeader
- : TMailboxUsageImpl<ActorLibCollectUsageStats>
- {
- struct TMailboxActorPack {
- enum EType {
- Simple = 0,
- Array = 1,
- Map = 2
- };
- };
-
- using TActorMap = THashMap<ui64, IActor*>;
-
- struct TExecutionState {
- enum EState {
- // normal states
- Inactive = 0,
- Scheduled = 1,
- Leaving = 2,
- Executing = 3,
- LeavingMarked = 4,
- // states for free mailboxes (they can still be scheduled so we need duplicates)
- Free = 5,
- FreeScheduled = 6,
- FreeLeaving = 7,
- FreeExecuting = 8,
- FreeLeavingMarked = 9,
- };
- };
-
- volatile ui32 ExecutionState;
- ui32 Reserved : 4; // never changes, always zero
- ui32 Type : 4; // never changes
- ui32 ActorPack : 2;
- ui32 Knobs : 22;
-
- struct TActorPair {
- IActor *Actor;
- ui64 ActorId;
- };
-
- struct alignas(64) TActorArray {
- TActorPair Actors[ARRAY_CAPACITY];
- };
-
- union TActorsInfo {
- TActorPair Simple;
- struct {
- TActorArray* ActorsArray;
- ui64 ActorsCount;
- } Array;
- struct {
- TActorMap* ActorsMap;
- } Map;
- } ActorsInfo;
-
- TMailboxHeader(TMailboxType::EType type);
- ~TMailboxHeader();
-
- bool CleanupActors();
-
- // this interface is used exclusively by executor thread, so implementation is there
-
- bool MarkForSchedule(); // we put something in queue, check should we schedule?
-
- bool LockForExecution(); // we got activation, try to lock mailbox
- bool LockFromFree(); // try to claim mailbox from recycled (could fail if other thread process garbage)
-
- void UnlockFromExecution1(); // prepare for releasing lock
- bool UnlockFromExecution2(bool wouldReschedule); // proceed with releasing lock
- bool UnlockAsFree(bool wouldReschedule); // preceed with releasing lock, but mark as free one
-
- bool IsEmpty() const noexcept {
- return (ActorPack == TMailboxActorPack::Simple && ActorsInfo.Simple.ActorId == 0);
- }
-
- template<typename T>
- void ForEach(T&& callback) noexcept {
- switch (ActorPack) {
- case TMailboxActorPack::Simple:
- if (ActorsInfo.Simple.ActorId) {
- callback(ActorsInfo.Simple.ActorId, ActorsInfo.Simple.Actor);
- }
- break;
-
- case TMailboxActorPack::Map:
- for (const auto& [actorId, actor] : *ActorsInfo.Map.ActorsMap) {
- callback(actorId, actor);
- }
- break;
-
- case TMailboxActorPack::Array:
- for (ui64 i = 0; i < ActorsInfo.Array.ActorsCount; ++i) {
- auto& row = ActorsInfo.Array.ActorsArray->Actors[i];
- callback(row.ActorId, row.Actor);
- }
- break;
- }
- }
-
- IActor* FindActor(ui64 localActorId) noexcept {
- switch (ActorPack) {
- case TMailboxActorPack::Simple: {
- if (ActorsInfo.Simple.ActorId == localActorId)
- return ActorsInfo.Simple.Actor;
- break;
- }
- case TMailboxActorPack::Map: {
- TActorMap::iterator it = ActorsInfo.Map.ActorsMap->find(localActorId);
- if (it != ActorsInfo.Map.ActorsMap->end())
- return it->second;
- break;
- }
- case TMailboxActorPack::Array: {
- for (ui64 i = 0; i < ActorsInfo.Array.ActorsCount; ++i) {
- if (ActorsInfo.Array.ActorsArray->Actors[i].ActorId == localActorId) {
- return ActorsInfo.Array.ActorsArray->Actors[i].Actor;
- }
- }
- break;
- }
- default:
- Y_ABORT();
- }
- return nullptr;
- }
-
- void AttachActor(ui64 localActorId, IActor* actor) noexcept {
- switch (ActorPack) {
- case TMailboxActorPack::Simple: {
- if (ActorsInfo.Simple.ActorId == 0) {
- ActorsInfo.Simple.ActorId = localActorId;
- ActorsInfo.Simple.Actor = actor;
- return;
- } else {
- auto ar = new TActorArray;
- ar->Actors[0] = ActorsInfo.Simple;
- ar->Actors[1] = TActorPair{actor, localActorId};
- ActorsInfo.Array.ActorsCount = 2;
- ActorPack = TMailboxActorPack::Array;
- ActorsInfo.Array.ActorsArray = ar;
- }
- break;
- }
- case TMailboxActorPack::Map: {
- ActorsInfo.Map.ActorsMap->insert(TActorMap::value_type(localActorId, actor));
- break;
- }
- case TMailboxActorPack::Array: {
- if (ActorsInfo.Array.ActorsCount == ARRAY_CAPACITY) {
- TActorMap* mp = new TActorMap();
- for (ui64 i = 0; i < ARRAY_CAPACITY; ++i) {
- mp->emplace(ActorsInfo.Array.ActorsArray->Actors[i].ActorId, ActorsInfo.Array.ActorsArray->Actors[i].Actor);
- }
- mp->emplace(localActorId, actor);
- ActorPack = TMailboxActorPack::Map;
- ActorsInfo.Array.ActorsCount = 0;
- delete ActorsInfo.Array.ActorsArray;
- ActorsInfo.Map.ActorsMap = mp;
- } else {
- ActorsInfo.Array.ActorsArray->Actors[ActorsInfo.Array.ActorsCount++] = TActorPair{actor, localActorId};
- }
- break;
- }
- default:
- Y_ABORT();
- }
- }
-
- IActor* DetachActor(ui64 localActorId) noexcept {
- Y_DEBUG_ABORT_UNLESS(FindActor(localActorId) != nullptr);
-
- IActor* actorToDestruct = nullptr;
-
- switch (ActorPack) {
- case TMailboxActorPack::Simple: {
- Y_ABORT_UNLESS(ActorsInfo.Simple.ActorId == localActorId);
- actorToDestruct = ActorsInfo.Simple.Actor;
-
- ActorsInfo.Simple.ActorId = 0;
- ActorsInfo.Simple.Actor = nullptr;
- break;
- }
- case TMailboxActorPack::Map: {
- TActorMap::iterator it = ActorsInfo.Map.ActorsMap->find(localActorId);
- Y_ABORT_UNLESS(it != ActorsInfo.Map.ActorsMap->end());
-
- actorToDestruct = it->second;
- ActorsInfo.Map.ActorsMap->erase(it);
-
- if (ActorsInfo.Map.ActorsMap->size() == ARRAY_CAPACITY) {
- auto ar = new TActorArray;
- ui64 i = 0;
- for (auto& [actorId, actor] : *ActorsInfo.Map.ActorsMap) {
- ar->Actors[i++] = TActorPair{actor, actorId};
- }
- delete ActorsInfo.Map.ActorsMap;
- ActorPack = TMailboxActorPack::Array;
- ActorsInfo.Array.ActorsArray = ar;
- ActorsInfo.Array.ActorsCount = ARRAY_CAPACITY;
- }
- break;
- }
- case TMailboxActorPack::Array: {
- bool found = false;
- for (ui64 i = 0; i < ActorsInfo.Array.ActorsCount; ++i) {
- if (ActorsInfo.Array.ActorsArray->Actors[i].ActorId == localActorId) {
- found = true;
- actorToDestruct = ActorsInfo.Array.ActorsArray->Actors[i].Actor;
- ActorsInfo.Array.ActorsArray->Actors[i] = ActorsInfo.Array.ActorsArray->Actors[ActorsInfo.Array.ActorsCount - 1];
- ActorsInfo.Array.ActorsCount -= 1;
- break;
- }
- }
- Y_ABORT_UNLESS(found);
-
- if (ActorsInfo.Array.ActorsCount == 1) {
- const TActorPair Actor = ActorsInfo.Array.ActorsArray->Actors[0];
- delete ActorsInfo.Array.ActorsArray;
- ActorPack = TMailboxActorPack::Simple;
- ActorsInfo.Simple = Actor;
- }
- break;
- }
- }
-
- return actorToDestruct;
- }
-
- std::pair<ui32, ui32> CountMailboxEvents(ui64 localActorId, ui32 maxTraverse);
- };
-
- class TMailboxTable : TNonCopyable {
- private:
- struct TMailboxLineHeader {
- const TMailboxType::EType MailboxType;
- const ui32 Index;
- // some more stuff in first cache line, then goes mailboxes
- ui8 Padding[52];
-
- TMailboxLineHeader(TMailboxType::EType type, ui32 index)
- : MailboxType(type)
- , Index(index)
- {
- }
- };
- static_assert(sizeof(TMailboxLineHeader) <= 64, "expect sizeof(TMailboxLineHeader) <= 64");
-
- constexpr static ui64 MaxLines = 131000; // somewhat less then 2^17.
- constexpr static ui64 LineSize = 262144; // 64 * 2^12.
-
- TAtomic LastAllocatedLine;
- TAtomic AllocatedMailboxCount;
-
- typedef TUnorderedCache<ui32, 512, 4> TMailboxCache;
- TMailboxCache MailboxCacheSimple;
- TAtomic CachedSimpleMailboxes;
- TMailboxCache MailboxCacheRevolving;
- TAtomic CachedRevolvingMailboxes;
- TMailboxCache MailboxCacheHTSwap;
- TAtomic CachedHTSwapMailboxes;
- TMailboxCache MailboxCacheReadAsFilled;
- TAtomic CachedReadAsFilledMailboxes;
- TMailboxCache MailboxCacheTinyReadAsFilled;
- TAtomic CachedTinyReadAsFilledMailboxes;
-
- // and here goes large chunk of lines
- // presented as array of static size to avoid sync on access
- TMailboxLineHeader* volatile Lines[MaxLines];
-
- ui32 AllocateNewLine(TMailboxType::EType type);
- ui32 TryAllocateMailbox(TMailboxType::EType type, ui64 revolvingCounter);
-
- public:
- TMailboxTable();
- ~TMailboxTable();
-
- bool Cleanup(); // returns true if nothing found to destruct (so nothing new is possible to be created)
-
- static const ui32 LineIndexShift = 12;
- static const ui32 LineIndexMask = 0x1FFFFu << LineIndexShift;
- static const ui32 LineHintMask = 0xFFFu;
- static const ui32 PoolIndexShift = TActorId::PoolIndexShift;
- static const ui32 PoolIndexMask = TActorId::PoolIndexMask;
-
- static ui32 LineIndex(ui32 hint) {
- return ((hint & LineIndexMask) >> LineIndexShift);
- }
- static ui32 PoolIndex(ui32 hint) {
- return TActorId::PoolIndex(hint);
- }
-
- TMailboxHeader* Get(ui32 hint);
- ui32 AllocateMailbox(TMailboxType::EType type, ui64 revolvingCounter);
- void ReclaimMailbox(TMailboxType::EType type, ui32 hint, ui64 revolvingCounter);
- ui64 GetAllocatedMailboxCount() const {
- return RelaxedLoad(&AllocatedMailboxCount);
- }
-
- private:
- typedef void (IExecutorPool::*TEPScheduleActivationFunction)(ui32 activation);
-
- template <TEPScheduleActivationFunction EPSpecificScheduleActivation>
- bool GenericSendTo(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool);
-
- public:
- bool SendTo(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool);
- bool SpecificSendTo(TAutoPtr<IEventHandle>& ev, IExecutorPool* executorPool);
-
- struct TSimpleMailbox: public TMailboxHeader {
- // 4 bytes - state
- // 4 bytes - knobs
- // 8 bytes - actorid
- // 8 bytes - actor*
- TSimpleMailboxQueue<IEventHandle*, 64> Queue; // 24 + 8 bytes (body, lock)
- NHPTimer::STime ScheduleMoment;
-
- TSimpleMailbox();
- ~TSimpleMailbox();
-
- IEventHandle* Pop() {
- return Queue.Pop();
- }
- IEventHandle* Head() {
- return Queue.Head();
- }
-
- static TSimpleMailbox* Get(ui32 hint, void* line) {
- return (TSimpleMailbox*)((ui8*)line + 64 + (hint - 1) * AlignedSize()); //
- }
- static const TMailboxType::EType MailboxType = TMailboxType::Simple;
- constexpr static ui32 AlignedSize() {
- return ((sizeof(TSimpleMailbox) + 63) / 64) * 64;
- }
-
- std::pair<ui32, ui32> CountSimpleMailboxEvents(ui64 localActorId, ui32 maxTraverse);
- bool CleanupEvents();
- };
-
- struct TRevolvingMailbox: public TMailboxHeader {
- // 4 bytes - state
- // 4 bytes - knobs
- // 8 bytes - actorid
- // 8 bytes - actor*
- TRevolvingMailboxQueue<IEventHandle*, 3, 128>::TReader QueueReader; // 8 * 3 + 4 * 3 + (padding): 40 bytes
- // here goes next cache-line, so less writers<-> reader interference
- TRevolvingMailboxQueue<IEventHandle*, 3, 128>::TWriter QueueWriter; // 8 * 3 + 4 * 3 + 8 : 48 bytes
- ui32 Reserved1;
- ui32 Reserved2;
- NHPTimer::STime ScheduleMoment;
-
- TRevolvingMailbox();
- ~TRevolvingMailbox();
-
- IEventHandle* Pop() {
- return QueueReader.Pop();
- }
- IEventHandle* Head() {
- return QueueReader.Head();
- }
-
- static TRevolvingMailbox* Get(ui32 hint, void* line) {
- return (TRevolvingMailbox*)((ui8*)line + 64 + (hint - 1) * AlignedSize());
- }
-
- constexpr static ui64 MaxMailboxesInLine() {
- return (LineSize - 64) / AlignedSize();
- }
- static const TMailboxType::EType MailboxType = TMailboxType::Revolving;
- constexpr static ui32 AlignedSize() {
- return ((sizeof(TRevolvingMailbox) + 63) / 64) * 64;
- }
-
- std::pair<ui32, ui32> CountRevolvingMailboxEvents(ui64 localActorId, ui32 maxTraverse);
- bool CleanupEvents();
- };
-
- struct THTSwapMailbox: public TMailboxHeader {
- using TQueueType = NThreading::THTSwapQueue<IEventHandle*>;
-
- TQueueType Queue;
- NHPTimer::STime ScheduleMoment;
- char Padding_[16];
-
- THTSwapMailbox()
- : TMailboxHeader(TMailboxType::HTSwap)
- , ScheduleMoment(0)
- {
- }
-
- ~THTSwapMailbox() {
- CleanupEvents();
- }
-
- IEventHandle* Pop() {
- return Queue.Pop();
- }
-
- IEventHandle* Head() {
- return Queue.Peek();
- }
-
- static THTSwapMailbox* Get(ui32 hint, void* line) {
- return (THTSwapMailbox*)((ui8*)line + 64 + (hint - 1) * AlignedSize());
- }
-
- constexpr static ui64 MaxMailboxesInLine() {
- return (LineSize - 64) / AlignedSize();
- }
-
- static const TMailboxType::EType MailboxType = TMailboxType::HTSwap;
-
- constexpr static ui32 AlignedSize() {
- return ((sizeof(THTSwapMailbox) + 63) / 64) * 64;
- }
-
- bool CleanupEvents() {
- const bool done = (Queue.Peek() == nullptr);
- while (IEventHandle* ev = Queue.Pop())
- delete ev;
- return done;
- }
- };
-
- struct TReadAsFilledMailbox: public TMailboxHeader {
- using TQueueType = NThreading::TReadAsFilledQueue<IEventHandle>;
-
- TQueueType Queue;
- NHPTimer::STime ScheduleMoment;
- char Padding_[8];
-
- TReadAsFilledMailbox()
- : TMailboxHeader(TMailboxType::ReadAsFilled)
- , ScheduleMoment(0)
- {
- }
-
- ~TReadAsFilledMailbox() {
- CleanupEvents();
- }
-
- IEventHandle* Pop() {
- return Queue.Pop();
- }
-
- IEventHandle* Head() {
- return Queue.Peek();
- }
-
- static TReadAsFilledMailbox* Get(ui32 hint, void* line) {
- return (TReadAsFilledMailbox*)((ui8*)line + 64 + (hint - 1) * AlignedSize());
- }
-
- constexpr static ui64 MaxMailboxesInLine() {
- return (LineSize - 64) / AlignedSize();
- }
-
- static const TMailboxType::EType MailboxType =
- TMailboxType::ReadAsFilled;
-
- constexpr static ui32 AlignedSize() {
- return ((sizeof(TReadAsFilledMailbox) + 63) / 64) * 64;
- }
-
- bool CleanupEvents() {
- const bool done = (Queue.Peek() == nullptr);
- while (IEventHandle* ev = Queue.Pop())
- delete ev;
- return done;
- }
- };
-
- struct TTinyReadAsFilledMailbox: public TMailboxHeader {
- using TQueueType = NThreading::TReadAsFilledQueue<
- IEventHandle,
- NThreading::TRaFQueueBunchSize<4>>;
-
- TQueueType Queue;
- NHPTimer::STime ScheduleMoment;
- char Padding_[8];
-
- TTinyReadAsFilledMailbox()
- : TMailboxHeader(TMailboxType::TinyReadAsFilled)
- , ScheduleMoment(0)
- {
- }
-
- ~TTinyReadAsFilledMailbox() {
- CleanupEvents();
- }
-
- IEventHandle* Pop() {
- return Queue.Pop();
- }
-
- IEventHandle* Head() {
- return Queue.Peek();
- }
-
- static TTinyReadAsFilledMailbox* Get(ui32 hint, void* line) {
- return (TTinyReadAsFilledMailbox*)((ui8*)line + 64 + (hint - 1) * AlignedSize());
- }
-
- constexpr static ui64 MaxMailboxesInLine() {
- return (LineSize - 64) / AlignedSize();
- }
-
- static const TMailboxType::EType MailboxType =
- TMailboxType::TinyReadAsFilled;
-
- constexpr static ui32 AlignedSize() {
- return ((sizeof(TTinyReadAsFilledMailbox) + 63) / 64) * 64;
- }
-
- bool CleanupEvents() {
- const bool done = (Queue.Peek() == nullptr);
- while (IEventHandle* ev = Queue.Pop())
- delete ev;
- return done;
- }
- };
- };
-}
diff --git a/library/cpp/actors/core/mailbox_queue_revolving.h b/library/cpp/actors/core/mailbox_queue_revolving.h
deleted file mode 100644
index e1a25dd352..0000000000
--- a/library/cpp/actors/core/mailbox_queue_revolving.h
+++ /dev/null
@@ -1,214 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include <library/cpp/actors/util/queue_chunk.h>
-
-namespace NActors {
- // add some concurrency to basic queue to avoid hangs under contention (we pay with memory, so use only when really expect contention)
- // ordering: every completed push guarantied to seen before any not-yet-initiated push. parallel pushes could reorder (and that is natural for concurrent queues).
- // try to place reader/writer on different cache-lines to avoid congestion b/w reader and writers.
- // if strict ordering does not matter - look at TManyOneQueue.
-
- template <typename T, ui32 TWriteConcurrency = 3, ui32 TSize = 128>
- class TRevolvingMailboxQueue {
- static_assert(std::is_integral<T>::value || std::is_pointer<T>::value, "expect std::is_integral<T>::value || std::is_pointer<T>::value");
-
- struct TValTagPair {
- volatile T Value;
- volatile ui64 Tag;
- };
-
- typedef TQueueChunk<TValTagPair, TSize> TChunk;
-
- static_assert(sizeof(TAtomic) == sizeof(TChunk*), "expect sizeof(TAtomic) == sizeof(TChunk*)");
- static_assert(sizeof(TAtomic) == sizeof(ui64), "expect sizeof(TAtomic) == sizeof(ui64)");
-
- public:
- class TWriter;
-
- class TReader {
- TChunk* ReadFrom[TWriteConcurrency];
- ui32 ReadPosition[TWriteConcurrency];
-
- friend class TRevolvingMailboxQueue<T, TWriteConcurrency, TSize>::TWriter; // for access to ReadFrom in constructor
-
- bool ChunkHead(ui32 idx, ui64* tag, T* value) {
- TChunk* head = ReadFrom[idx];
- const ui32 pos = ReadPosition[idx];
- if (pos != TChunk::EntriesCount) {
- if (const T xval = AtomicLoad(&head->Entries[pos].Value)) {
- const ui64 xtag = head->Entries[pos].Tag;
- if (xtag < *tag) {
- *value = xval;
- *tag = xtag;
- return true;
- }
- }
- } else if (TChunk* next = AtomicLoad(&head->Next)) {
- ReadFrom[idx] = next;
- delete head;
- ReadPosition[idx] = 0;
- return ChunkHead(idx, tag, value);
- }
-
- return false;
- }
-
- T Head(bool pop) {
- ui64 tag = Max<ui64>();
- T ret = T{};
- ui32 idx = 0;
-
- for (ui32 i = 0; i < TWriteConcurrency; ++i)
- if (ChunkHead(i, &tag, &ret))
- idx = i;
-
- // w/o second pass we could reorder updates with 'already scanned' range
- if (ret) {
- for (ui32 i = 0; i < TWriteConcurrency; ++i)
- if (ChunkHead(i, &tag, &ret))
- idx = i;
- }
-
- if (pop && ret)
- ++ReadPosition[idx];
-
- return ret;
- }
-
- public:
- TReader() {
- for (ui32 i = 0; i != TWriteConcurrency; ++i) {
- ReadFrom[i] = new TChunk();
- ReadPosition[i] = 0;
- }
- }
-
- ~TReader() {
- Y_DEBUG_ABORT_UNLESS(Head() == 0);
- for (ui32 i = 0; i < TWriteConcurrency; ++i)
- delete ReadFrom[i];
- }
-
- T Pop() {
- return Head(true);
- }
-
- T Head() {
- return Head(false);
- }
-
- class TReadIterator {
- TChunk* ReadFrom[TWriteConcurrency];
- ui32 ReadPosition[TWriteConcurrency];
-
- bool ChunkHead(ui32 idx, ui64* tag, T* value) {
- TChunk* head = ReadFrom[idx];
- const ui32 pos = ReadPosition[idx];
- if (pos != TChunk::EntriesCount) {
- if (const T xval = AtomicLoad(&head->Entries[pos].Value)) {
- const ui64 xtag = head->Entries[pos].Tag;
- if (xtag < *tag) {
- *value = xval;
- *tag = xtag;
- return true;
- }
- }
- } else if (TChunk* next = AtomicLoad(&head->Next)) {
- ReadFrom[idx] = next;
- ReadPosition[idx] = 0;
- return ChunkHead(idx, tag, value);
- }
-
- return false;
- }
-
- public:
- TReadIterator(TChunk* const* readFrom, const ui32* readPosition) {
- memcpy(ReadFrom, readFrom, TWriteConcurrency * sizeof(TChunk*));
- memcpy(ReadPosition, readPosition, TWriteConcurrency * sizeof(ui32));
- }
-
- T Next() {
- ui64 tag = Max<ui64>();
- T ret = T{};
- ui32 idx = 0;
-
- for (ui32 i = 0; i < TWriteConcurrency; ++i)
- if (ChunkHead(i, &tag, &ret))
- idx = i;
-
- // w/o second pass we could reorder updates with 'already scanned' range
- if (ret) {
- for (ui32 i = 0; i < TWriteConcurrency; ++i)
- if (ChunkHead(i, &tag, &ret))
- idx = i;
- }
-
- if (ret)
- ++ReadPosition[idx];
-
- return ret;
- }
- };
-
- TReadIterator Iterator() const {
- return TReadIterator(ReadFrom, ReadPosition);
- }
- };
-
- class TWriter {
- TChunk* volatile WriteTo[TWriteConcurrency];
- volatile ui64 Tag;
- ui32 WritePosition[TWriteConcurrency];
-
- public:
- TWriter(const TReader& reader)
- : Tag(0)
- {
- for (ui32 i = 0; i != TWriteConcurrency; ++i) {
- WriteTo[i] = reader.ReadFrom[i];
- WritePosition[i] = 0;
- }
- }
-
- bool TryPush(T x) {
- Y_ABORT_UNLESS(x != 0);
-
- for (ui32 i = 0; i != TWriteConcurrency; ++i) {
- if (RelaxedLoad(&WriteTo[i]) != nullptr) {
- if (TChunk* writeTo = AtomicSwap(&WriteTo[i], nullptr)) {
- const ui64 nextTag = AtomicIncrement(Tag);
- Y_DEBUG_ABORT_UNLESS(nextTag < Max<ui64>());
- const ui32 writePosition = WritePosition[i];
- if (writePosition != TChunk::EntriesCount) {
- writeTo->Entries[writePosition].Tag = nextTag;
- AtomicStore(&writeTo->Entries[writePosition].Value, x);
- ++WritePosition[i];
- } else {
- TChunk* next = new TChunk();
- next->Entries[0].Tag = nextTag;
- next->Entries[0].Value = x;
- AtomicStore(&writeTo->Next, next);
- writeTo = next;
- WritePosition[i] = 1;
- }
- AtomicStore(WriteTo + i, writeTo);
- return true;
- }
- }
- }
- return false;
- }
-
- ui32 Push(T x) {
- ui32 spins = 0;
- while (!TryPush(x)) {
- ++spins;
- SpinLockPause();
- }
- return spins;
- }
- };
- };
-}
diff --git a/library/cpp/actors/core/mailbox_queue_simple.h b/library/cpp/actors/core/mailbox_queue_simple.h
deleted file mode 100644
index 2e44c21adb..0000000000
--- a/library/cpp/actors/core/mailbox_queue_simple.h
+++ /dev/null
@@ -1,34 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include <library/cpp/actors/util/ticket_lock.h>
-#include <library/cpp/actors/util/queue_oneone_inplace.h>
-
-namespace NActors {
- // dead-simple one-one queue, based on serializability guaranties of x64 and ticket lock to ensure writer unicity.
- template <typename T, ui32 TSize>
- class TSimpleMailboxQueue {
- TOneOneQueueInplace<T, TSize> Queue;
- TTicketLock Lock;
-
- public:
- ui32 Push(T x) noexcept {
- const ui32 spins = Lock.Acquire();
- Queue.Push(x);
- Lock.Release();
- return spins;
- }
-
- T Head() {
- return Queue.Head();
- }
-
- T Pop() {
- return Queue.Pop();
- }
-
- typename TOneOneQueueInplace<T, TSize>::TReadIterator ReadIterator() {
- return Queue.Iterator();
- }
- };
-}
diff --git a/library/cpp/actors/core/mon.h b/library/cpp/actors/core/mon.h
deleted file mode 100644
index ba5debbd17..0000000000
--- a/library/cpp/actors/core/mon.h
+++ /dev/null
@@ -1,267 +0,0 @@
-#pragma once
-
-#include "events.h"
-#include "event_local.h"
-#include <library/cpp/monlib/service/monservice.h>
-#include <library/cpp/monlib/service/pages/mon_page.h>
-
-namespace NActors {
- namespace NMon {
- enum {
- HttpInfo = EventSpaceBegin(NActors::TEvents::ES_MON),
- HttpInfoRes,
- RemoteHttpInfo,
- RemoteHttpInfoRes,
- RemoteJsonInfoRes,
- RemoteBinaryInfoRes,
- End
- };
-
- static_assert(End < EventSpaceEnd(NActors::TEvents::ES_MON), "expect End < EventSpaceEnd(NActors::TEvents::ES_MON)");
-
- // request info from an actor in HTML format
- struct TEvHttpInfo: public NActors::TEventLocal<TEvHttpInfo, HttpInfo> {
- TEvHttpInfo(const NMonitoring::IMonHttpRequest& request, int subReqId = 0)
- : Request(request)
- , SubRequestId(subReqId)
- {
- }
-
- TEvHttpInfo(const NMonitoring::IMonHttpRequest& request, const TString& userToken)
- : Request(request)
- , UserToken(userToken)
- , SubRequestId(0)
- {
- }
-
- const NMonitoring::IMonHttpRequest& Request;
- TString UserToken; // built and serialized
- // SubRequestId != 0 means that we assemble reply from multiple parts and SubRequestId contains this part id
- int SubRequestId;
- };
-
- // base class for HTTP info response
- struct IEvHttpInfoRes: public NActors::TEventLocal<IEvHttpInfoRes, HttpInfoRes> {
- enum EContentType {
- Html,
- Custom,
- };
-
- IEvHttpInfoRes() {
- }
-
- virtual ~IEvHttpInfoRes() {
- }
-
- virtual void Output(IOutputStream& out) const = 0;
- virtual EContentType GetContentType() const = 0;
- };
-
- // Ready to output HTML in TString
- struct TEvHttpInfoRes: public IEvHttpInfoRes {
- TEvHttpInfoRes(const TString& answer, int subReqId = 0, EContentType contentType = Html)
- : Answer(answer)
- , SubRequestId(subReqId)
- , ContentType(contentType)
- {
- }
-
- void Output(IOutputStream& out) const override {
- out << Answer;
- }
-
- EContentType GetContentType() const override {
- return ContentType;
- }
-
- const TString Answer;
- const int SubRequestId;
- const EContentType ContentType;
- };
-
- struct TEvRemoteHttpInfo: public NActors::TEventBase<TEvRemoteHttpInfo, RemoteHttpInfo> {
- TEvRemoteHttpInfo() = default;
-
- TEvRemoteHttpInfo(const TString& query, HTTP_METHOD method = HTTP_METHOD_UNDEFINED)
- : Query(query)
- , Method(method)
- {
- }
-
- TEvRemoteHttpInfo(NActorsProto::TRemoteHttpInfo info)
- : Query(MakeSerializedQuery(info))
- , ExtendedQuery(std::move(info))
- {}
-
- static TString MakeSerializedQuery(const NActorsProto::TRemoteHttpInfo& info) {
- TString s(1, '\0');
- const bool success = info.AppendToString(&s);
- Y_ABORT_UNLESS(success);
- return s;
- }
-
- TString Query;
- HTTP_METHOD Method = HTTP_METHOD_UNDEFINED;
- std::optional<NActorsProto::TRemoteHttpInfo> ExtendedQuery;
-
- TString PathInfo() const {
- if (ExtendedQuery) {
- return ExtendedQuery->GetPath();
- } else {
- const size_t pos = Query.find('?');
- return (pos == TString::npos) ? TString() : Query.substr(0, pos);
- }
- }
-
- TCgiParameters Cgi() const {
- if (ExtendedQuery) {
- TCgiParameters params;
- for (const auto& kv : ExtendedQuery->GetQueryParams()) {
- params.emplace(kv.GetKey(), kv.GetValue());
- }
- return params;
- } else {
- const size_t pos = Query.find('?');
- return TCgiParameters((pos == TString::npos) ? TString() : Query.substr(pos + 1));
- }
- }
-
- HTTP_METHOD GetMethod() const {
- return ExtendedQuery ? static_cast<HTTP_METHOD>(ExtendedQuery->GetMethod()) : Method;
- }
-
- TString ToStringHeader() const override {
- return "TEvRemoteHttpInfo";
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override {
- return serializer->WriteString(&Query);
- }
-
- ui32 CalculateSerializedSize() const override {
- return Query.size();
- }
-
- bool IsSerializable() const override {
- return true;
- }
-
- static IEventBase* Load(TEventSerializedData* bufs) {
- TString s = bufs->GetString();
- if (s.size() && s[0] == '\0') {
- TRope::TConstIterator iter = bufs->GetBeginIter();
- ui64 size = bufs->GetSize();
- iter += 1, --size; // skip '\0'
- TRopeStream stream(iter, size);
-
- auto res = std::make_unique<TEvRemoteHttpInfo>();
- res->Query = s;
- res->ExtendedQuery.emplace();
- const bool success = res->ExtendedQuery->ParseFromZeroCopyStream(&stream);
- Y_ABORT_UNLESS(success);
- return res.release();
- } else {
- return new TEvRemoteHttpInfo(s);
- }
- }
- };
-
- struct TEvRemoteHttpInfoRes: public NActors::TEventBase<TEvRemoteHttpInfoRes, RemoteHttpInfoRes> {
- TEvRemoteHttpInfoRes() {
- }
-
- TEvRemoteHttpInfoRes(const TString& html)
- : Html(html)
- {
- }
-
- TString Html;
-
- TString ToStringHeader() const override {
- return "TEvRemoteHttpInfoRes";
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override {
- return serializer->WriteString(&Html);
- }
-
- ui32 CalculateSerializedSize() const override {
- return Html.size();
- }
-
- bool IsSerializable() const override {
- return true;
- }
-
- static IEventBase* Load(TEventSerializedData* bufs) {
- return new TEvRemoteHttpInfoRes(bufs->GetString());
- }
- };
-
- struct TEvRemoteJsonInfoRes: public NActors::TEventBase<TEvRemoteJsonInfoRes, RemoteJsonInfoRes> {
- TEvRemoteJsonInfoRes() {
- }
-
- TEvRemoteJsonInfoRes(const TString& json)
- : Json(json)
- {
- }
-
- TString Json;
-
- TString ToStringHeader() const override {
- return "TEvRemoteJsonInfoRes";
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override {
- return serializer->WriteString(&Json);
- }
-
- ui32 CalculateSerializedSize() const override {
- return Json.size();
- }
-
- bool IsSerializable() const override {
- return true;
- }
-
- static IEventBase* Load(TEventSerializedData* bufs) {
- return new TEvRemoteJsonInfoRes(bufs->GetString());
- }
- };
-
- struct TEvRemoteBinaryInfoRes: public NActors::TEventBase<TEvRemoteBinaryInfoRes, RemoteBinaryInfoRes> {
- TEvRemoteBinaryInfoRes() {
- }
-
- TEvRemoteBinaryInfoRes(const TString& blob)
- : Blob(blob)
- {
- }
-
- TString Blob;
-
- TString ToStringHeader() const override {
- return "TEvRemoteBinaryInfoRes";
- }
-
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override {
- return serializer->WriteString(&Blob);
- }
-
- ui32 CalculateSerializedSize() const override {
- return Blob.size();
- }
-
- bool IsSerializable() const override {
- return true;
- }
-
- static IEventBase* Load(TEventSerializedData* bufs) {
- return new TEvRemoteBinaryInfoRes(bufs->GetString());
- }
- };
-
- }
-
-}
diff --git a/library/cpp/actors/core/mon_stats.h b/library/cpp/actors/core/mon_stats.h
deleted file mode 100644
index 4fb49d70c0..0000000000
--- a/library/cpp/actors/core/mon_stats.h
+++ /dev/null
@@ -1,191 +0,0 @@
-#pragma once
-
-#include "defs.h"
-//#include "actor.h"
-#include <library/cpp/actors/util/local_process_key.h>
-#include <library/cpp/monlib/metrics/histogram_snapshot.h>
-#include <util/system/hp_timer.h>
-
-namespace NActors {
- struct TLogHistogram : public NMonitoring::IHistogramSnapshot {
- TLogHistogram() {
- memset(Buckets, 0, sizeof(Buckets));
- }
-
- inline void Add(ui64 val, ui64 inc = 1) {
- size_t ind = 0;
-#if defined(__clang__) && __clang_major__ == 3 && __clang_minor__ == 7
- asm volatile("" ::
- : "memory");
-#endif
- if (val > 1) {
- ind = GetValueBitCount(val - 1);
- }
-#if defined(__clang__) && __clang_major__ == 3 && __clang_minor__ == 7
- asm volatile("" ::
- : "memory");
-#endif
- RelaxedStore(&TotalSamples, RelaxedLoad(&TotalSamples) + inc);
- RelaxedStore(&Buckets[ind], RelaxedLoad(&Buckets[ind]) + inc);
- }
-
- void Aggregate(const TLogHistogram& other) {
- const ui64 inc = RelaxedLoad(&other.TotalSamples);
- RelaxedStore(&TotalSamples, RelaxedLoad(&TotalSamples) + inc);
- for (size_t i = 0; i < Y_ARRAY_SIZE(Buckets); ++i) {
- Buckets[i] += RelaxedLoad(&other.Buckets[i]);
- }
- }
-
- // IHistogramSnapshot
- ui32 Count() const override {
- return Y_ARRAY_SIZE(Buckets);
- }
-
- NMonitoring::TBucketBound UpperBound(ui32 index) const override {
- Y_ASSERT(index < Y_ARRAY_SIZE(Buckets));
- if (index == 0) {
- return 1;
- }
- return NMonitoring::TBucketBound(1ull << (index - 1)) * 2.0;
- }
-
- NMonitoring::TBucketValue Value(ui32 index) const override {
- Y_ASSERT(index < Y_ARRAY_SIZE(Buckets));
- return Buckets[index];
- }
-
- ui64 TotalSamples = 0;
- ui64 Buckets[65];
- };
-
- struct TExecutorPoolStats {
- ui64 MaxUtilizationTime = 0;
- ui64 IncreasingThreadsByNeedyState = 0;
- ui64 IncreasingThreadsByExchange = 0;
- ui64 DecreasingThreadsByStarvedState = 0;
- ui64 DecreasingThreadsByHoggishState = 0;
- ui64 DecreasingThreadsByExchange = 0;
- i64 MaxConsumedCpuUs = 0;
- i64 MinConsumedCpuUs = 0;
- i64 MaxBookedCpuUs = 0;
- i64 MinBookedCpuUs = 0;
- double SpinningTimeUs = 0;
- double SpinThresholdUs = 0;
- i16 WrongWakenedThreadCount = 0;
- i16 CurrentThreadCount = 0;
- i16 PotentialMaxThreadCount = 0;
- i16 DefaultThreadCount = 0;
- i16 MaxThreadCount = 0;
- bool IsNeedy = false;
- bool IsStarved = false;
- bool IsHoggish = false;
- };
-
- struct TExecutorThreadStats {
- ui64 SentEvents = 0;
- ui64 ReceivedEvents = 0;
- ui64 PreemptedEvents = 0; // Number of events experienced hard preemption
- ui64 NonDeliveredEvents = 0;
- ui64 EmptyMailboxActivation = 0;
- ui64 CpuUs = 0; // microseconds thread was executing on CPU (accounts for preemtion)
- ui64 SafeElapsedTicks = 0;
- ui64 WorstActivationTimeUs = 0;
- NHPTimer::STime ElapsedTicks = 0;
- NHPTimer::STime ParkedTicks = 0;
- NHPTimer::STime BlockedTicks = 0;
- TLogHistogram ActivationTimeHistogram;
- TLogHistogram EventDeliveryTimeHistogram;
- TLogHistogram EventProcessingCountHistogram;
- TLogHistogram EventProcessingTimeHistogram;
- TVector<NHPTimer::STime> ElapsedTicksByActivity;
- TVector<ui64> ReceivedEventsByActivity;
- TVector<i64> ActorsAliveByActivity; // the sum should be positive, but per-thread might be negative
- TVector<ui64> ScheduledEventsByActivity;
- TVector<ui64> StuckActorsByActivity;
- TVector<std::array<ui64, 10>> UsageByActivity;
- ui64 PoolActorRegistrations = 0;
- ui64 PoolDestroyedActors = 0;
- ui64 PoolAllocatedMailboxes = 0;
- ui64 MailboxPushedOutByTailSending = 0;
- ui64 MailboxPushedOutBySoftPreemption = 0;
- ui64 MailboxPushedOutByTime = 0;
- ui64 MailboxPushedOutByEventCount = 0;
- ui64 NotEnoughCpuExecutions = 0;
-
- TExecutorThreadStats() // must be not empty as 0 used as default
- : ElapsedTicksByActivity(TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount())
- , ReceivedEventsByActivity(TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount())
- , ActorsAliveByActivity(TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount())
- , ScheduledEventsByActivity(TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount())
- , StuckActorsByActivity(TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount())
- , UsageByActivity(TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount())
- {}
-
- template <typename T>
- static void AggregateOne(TVector<T>& self, const TVector<T>& other) {
- const size_t selfSize = self.size();
- const size_t otherSize = other.size();
- if (selfSize < otherSize)
- self.resize(otherSize);
- for (size_t at = 0; at < otherSize; ++at)
- self[at] += RelaxedLoad(&other[at]);
- }
-
- void Aggregate(const TExecutorThreadStats& other) {
- SentEvents += RelaxedLoad(&other.SentEvents);
- ReceivedEvents += RelaxedLoad(&other.ReceivedEvents);
- PreemptedEvents += RelaxedLoad(&other.PreemptedEvents);
- NonDeliveredEvents += RelaxedLoad(&other.NonDeliveredEvents);
- EmptyMailboxActivation += RelaxedLoad(&other.EmptyMailboxActivation);
- CpuUs += RelaxedLoad(&other.CpuUs);
- SafeElapsedTicks += RelaxedLoad(&other.SafeElapsedTicks);
- RelaxedStore(
- &WorstActivationTimeUs,
- std::max(RelaxedLoad(&WorstActivationTimeUs), RelaxedLoad(&other.WorstActivationTimeUs)));
- ElapsedTicks += RelaxedLoad(&other.ElapsedTicks);
- ParkedTicks += RelaxedLoad(&other.ParkedTicks);
- BlockedTicks += RelaxedLoad(&other.BlockedTicks);
- MailboxPushedOutByTailSending += RelaxedLoad(&other.MailboxPushedOutByTailSending);
- MailboxPushedOutBySoftPreemption += RelaxedLoad(&other.MailboxPushedOutBySoftPreemption);
- MailboxPushedOutByTime += RelaxedLoad(&other.MailboxPushedOutByTime);
- MailboxPushedOutByEventCount += RelaxedLoad(&other.MailboxPushedOutByEventCount);
- NotEnoughCpuExecutions += RelaxedLoad(&other.NotEnoughCpuExecutions);
-
- ActivationTimeHistogram.Aggregate(other.ActivationTimeHistogram);
- EventDeliveryTimeHistogram.Aggregate(other.EventDeliveryTimeHistogram);
- EventProcessingCountHistogram.Aggregate(other.EventProcessingCountHistogram);
- EventProcessingTimeHistogram.Aggregate(other.EventProcessingTimeHistogram);
-
- AggregateOne(ElapsedTicksByActivity, other.ElapsedTicksByActivity);
- AggregateOne(ReceivedEventsByActivity, other.ReceivedEventsByActivity);
- AggregateOne(ActorsAliveByActivity, other.ActorsAliveByActivity);
- AggregateOne(ScheduledEventsByActivity, other.ScheduledEventsByActivity);
- AggregateOne(StuckActorsByActivity, other.StuckActorsByActivity);
-
- if (UsageByActivity.size() < other.UsageByActivity.size()) {
- UsageByActivity.resize(other.UsageByActivity.size());
- }
- for (size_t i = 0; i < UsageByActivity.size(); ++i) {
- for (size_t j = 0; j < 10; ++j) {
- UsageByActivity[i][j] += RelaxedLoad(&other.UsageByActivity[i][j]);
- }
- }
-
- RelaxedStore(
- &PoolActorRegistrations,
- std::max(RelaxedLoad(&PoolActorRegistrations), RelaxedLoad(&other.PoolActorRegistrations)));
- RelaxedStore(
- &PoolDestroyedActors,
- std::max(RelaxedLoad(&PoolDestroyedActors), RelaxedLoad(&other.PoolDestroyedActors)));
- RelaxedStore(
- &PoolAllocatedMailboxes,
- std::max(RelaxedLoad(&PoolAllocatedMailboxes), RelaxedLoad(&other.PoolAllocatedMailboxes)));
- }
-
- size_t MaxActivityType() const {
- return ActorsAliveByActivity.size();
- }
- };
-
-}
diff --git a/library/cpp/actors/core/mon_ut.cpp b/library/cpp/actors/core/mon_ut.cpp
deleted file mode 100644
index fa5dbbe71e..0000000000
--- a/library/cpp/actors/core/mon_ut.cpp
+++ /dev/null
@@ -1,29 +0,0 @@
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/actors/core/mon.h>
-
-using namespace NActors;
-using namespace NMon;
-
-Y_UNIT_TEST_SUITE(ActorSystemMon) {
- Y_UNIT_TEST(SerializeEv) {
- NActorsProto::TRemoteHttpInfo info;
- info.SetPath("hello");
-
- auto ev = std::make_unique<TEvRemoteHttpInfo>(info);
- UNIT_ASSERT(ev->ExtendedQuery);
- UNIT_ASSERT_VALUES_EQUAL(ev->ExtendedQuery->GetPath(), info.GetPath());
- UNIT_ASSERT_VALUES_EQUAL(ev->PathInfo(), info.GetPath());
-
- TAllocChunkSerializer ser;
- const bool success = ev->SerializeToArcadiaStream(&ser);
- Y_ABORT_UNLESS(success);
- auto buffer = ser.Release(ev->CreateSerializationInfo());
- std::unique_ptr<TEvRemoteHttpInfo> restored(dynamic_cast<TEvRemoteHttpInfo*>(TEvRemoteHttpInfo::Load(buffer.Get())));
- UNIT_ASSERT(restored->Query == ev->Query);
- UNIT_ASSERT(restored->Query.size());
- UNIT_ASSERT(restored->Query[0] == '\0');
- UNIT_ASSERT(restored->ExtendedQuery);
- UNIT_ASSERT_VALUES_EQUAL(restored->ExtendedQuery->GetPath(), ev->ExtendedQuery->GetPath());
- UNIT_ASSERT_VALUES_EQUAL(restored->PathInfo(), ev->PathInfo());
- }
-}
diff --git a/library/cpp/actors/core/monotonic.cpp b/library/cpp/actors/core/monotonic.cpp
deleted file mode 100644
index 581d80d849..0000000000
--- a/library/cpp/actors/core/monotonic.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "monotonic.h"
diff --git a/library/cpp/actors/core/monotonic.h b/library/cpp/actors/core/monotonic.h
deleted file mode 100644
index 2c53785390..0000000000
--- a/library/cpp/actors/core/monotonic.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#pragma once
-
-#include <util/datetime/base.h>
-#include <library/cpp/time_provider/monotonic.h>
-
-namespace NActors {
-
-using NMonotonic::GetMonotonicMicroSeconds;
-
-using TMonotonic = NMonotonic::TMonotonic;
-
-} // namespace NActors
diff --git a/library/cpp/actors/core/monotonic_provider.cpp b/library/cpp/actors/core/monotonic_provider.cpp
deleted file mode 100644
index f8d91a4eec..0000000000
--- a/library/cpp/actors/core/monotonic_provider.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "monotonic_provider.h"
diff --git a/library/cpp/actors/core/monotonic_provider.h b/library/cpp/actors/core/monotonic_provider.h
deleted file mode 100644
index befe4f7b90..0000000000
--- a/library/cpp/actors/core/monotonic_provider.h
+++ /dev/null
@@ -1,11 +0,0 @@
-#pragma once
-
-#include <library/cpp/time_provider/monotonic_provider.h>
-
-namespace NActors {
-
-using IMonotonicTimeProvider = NMonotonic::IMonotonicTimeProvider;
-
-using NMonotonic::CreateDefaultMonotonicTimeProvider;
-
-} // namespace NActors
diff --git a/library/cpp/actors/core/performance_ut.cpp b/library/cpp/actors/core/performance_ut.cpp
deleted file mode 100644
index 3c1a0ed143..0000000000
--- a/library/cpp/actors/core/performance_ut.cpp
+++ /dev/null
@@ -1,182 +0,0 @@
-#include "actorsystem.h"
-#include "av_bootstrapped.h"
-#include "actor_virtual.h"
-#include "actor_bootstrapped.h"
-#include "config.h"
-#include "event_local.h"
-#include "executor_pool_basic.h"
-#include "executor_pool_io.h"
-#include "executor_pool_base.h"
-#include "scheduler_basic.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NActors;
-
-Y_UNIT_TEST_SUITE(ActorSystemPerformance) {
-
- class TQueueTestRuntime {
- std::unique_ptr<TActorSystem> ActorSystem;
- public:
- TQueueTestRuntime() {
- auto setup = MakeHolder<TActorSystemSetup>();
- setup->NodeId = 1;
- setup->ExecutorsCount = 2;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[2]);
- setup->Executors[0].Reset(new TBasicExecutorPool(0, 4, 20));
- setup->Executors[1].Reset(new TIOExecutorPool(1, 10));
-
- setup->Scheduler.Reset(new TBasicSchedulerThread(TSchedulerConfig(512, 100)));
-
- ActorSystem.reset(new TActorSystem{ setup });
- }
-
- void Start() {
- ActorSystem->Start();
- }
-
- void Stop() {
- ActorSystem->Stop();
- ActorSystem.reset();
- }
-
- void Send(std::unique_ptr<IEventHandle>&& ev) {
- ActorSystem->Send(ev.release());
- }
-
- void Register(IActor* actor) {
- ActorSystem->Register(actor);
- }
- };
-
- enum EEvSubscribe {
- EvDolbExecute,
- EvEnd
- };
-
- class TEventLocalDolbilkaOld: public NActors::TEventLocal<TEventLocalDolbilkaOld, EvDolbExecute> {
- public:
-
- };
-
- class TDolbilkaCommon {
- protected:
- ui32 Counter = 0;
- ui32 CounterLimit = 10000000;
- const TInstant Start = Now();
- std::atomic<TDuration> Duration;
- std::atomic<bool> Ready = false;
- public:
- bool MakeStep() {
- if (++Counter >= CounterLimit) {
- TDolbilkaCommon::Finish();
- return false;
- }
- return true;
- }
- TDuration GetDurationInProgress() const {
- return Now() - Start;
- }
- double GetOperationDuration() const {
- return 1.0 * Duration.load().MicroSeconds() / Counter * 1000;
- }
- void Finish() {
- if (!Ready.exchange(true)) {
- Duration = GetDurationInProgress();
- }
- }
- bool IsFinished() const {
- return Ready;
- }
- };
-
- class TDolbilkaOld: public TDolbilkaCommon, public NActors::TActorBootstrapped<TDolbilkaOld> {
- private:
- using TBase = NActors::TActorBootstrapped<TDolbilkaOld>;
- public:
-
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEventLocalDolbilkaOld, Handle);
- default:
- Y_ABORT_UNLESS(false);
- }
- }
-
- void Handle(TEventLocalDolbilkaOld::TPtr& /*ev*/, const TActorContext&) {
- if (MakeStep()) {
- Sender<TEventLocalDolbilkaOld>().SendTo(SelfId());
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateInit);
- Sender<TEventLocalDolbilkaOld>().SendTo(SelfId());
- }
- };
-
- class TDolbilkaNew;
- class TEventLocalDolbilkaNew: public NActors::TEventLocalForActor<TEventLocalDolbilkaNew, TDolbilkaNew> {
- public:
-
- };
-
- class TDolbilkaNew: public TDolbilkaCommon, public NActors::TActorAutoStart {
- private:
- using TBase = NActors::TActorAutoStart;
- protected:
- virtual void DoOnStart(const TActorId& /*senderActorId*/) override {
- Sender<TEventLocalDolbilkaNew>().SendTo(SelfId());
- }
-
- public:
- void ProcessEvent(NActors::TEventContext<TEventLocalDolbilkaNew>& /*ev*/) {
- if (MakeStep()) {
- Sender<TEventLocalDolbilkaNew>().SendTo(SelfId());
- }
- }
- };
-
- class IDolbilkaSimple {
- public:
- virtual ~IDolbilkaSimple() = default;
- virtual bool ProcessEvent() = 0;
- };
-
- class TDolbilkaSimple: public TDolbilkaCommon, public IDolbilkaSimple {
- private:
-// TMutex Mutex;
- public:
- virtual bool ProcessEvent() override {
-// TGuard<TMutex> g(Mutex);
- return MakeStep();
- }
- };
-
- Y_UNIT_TEST(PerfTest) {
- THolder<TQueueTestRuntime> runtime(new TQueueTestRuntime);
- runtime->Start();
- TDolbilkaNew* dNew = new TDolbilkaNew;
- runtime->Register(dNew);
- while (dNew->GetDurationInProgress() < TDuration::Seconds(1000) && !dNew->IsFinished()) {
- Sleep(TDuration::Seconds(1));
- }
- Y_ABORT_UNLESS(dNew->IsFinished());
- TDolbilkaOld* dOld = new TDolbilkaOld;
- runtime->Register(dOld);
- while (dOld->GetDurationInProgress() < TDuration::Seconds(1000) && !dOld->IsFinished()) {
- Sleep(TDuration::Seconds(1));
- }
- Y_ABORT_UNLESS(dOld->IsFinished());
- std::unique_ptr<TDolbilkaSimple> dSimple(new TDolbilkaSimple);
- IDolbilkaSimple* dSimpleIface = dSimple.get();
- while (dSimpleIface->ProcessEvent()) {
-
- }
- Cerr << "DURATION_OLD: " << 1.0 * dOld->GetOperationDuration() << "ns" << Endl;
- Cerr << "DURATION_NEW: " << 1.0 * dNew->GetOperationDuration() << "ns" << Endl;
- Cerr << "DURATION_SIMPLE: " << 1.0 * dSimple->GetOperationDuration() << "ns" << Endl;
-
- }
-}
diff --git a/library/cpp/actors/core/probes.cpp b/library/cpp/actors/core/probes.cpp
deleted file mode 100644
index 7ace83e102..0000000000
--- a/library/cpp/actors/core/probes.cpp
+++ /dev/null
@@ -1,28 +0,0 @@
-#include "probes.h"
-
-#include "actorsystem.h"
-
-#include <util/string/builder.h>
-
-LWTRACE_DEFINE_PROVIDER(ACTORLIB_PROVIDER);
-
-namespace NActors {
- TVector<NLWTrace::TDashboard> LWTraceDashboards(TActorSystemSetup* setup) {
- TVector<NLWTrace::TDashboard> result;
-
- NLWTrace::TDashboard slowDash;
- ui32 pools = setup->GetExecutorsCount();
- size_t top = 30;
- slowDash.SetName("ActorSystem slow events");
- slowDash.SetDescription(TStringBuilder() << "TOP" << top << " slow event executions >1M cycles for every pool (refresh page to update)");
- for (ui32 pool = 0; pool < pools; pool++) {
- auto* row = slowDash.AddRows();
- auto* cell = row->AddCells();
- cell->SetTitle(TStringBuilder() << pool << ":" << setup->GetPoolName(pool));
- cell->SetUrl(TStringBuilder() << "?mode=log&id=.ACTORLIB_PROVIDER.SlowEvent.ppoolId=" << pool << "&s=eventMs&reverse=y&head=30");
- }
- result.push_back(slowDash);
-
- return result;
- }
-}
diff --git a/library/cpp/actors/core/probes.h b/library/cpp/actors/core/probes.h
deleted file mode 100644
index 531923b5ad..0000000000
--- a/library/cpp/actors/core/probes.h
+++ /dev/null
@@ -1,221 +0,0 @@
-#pragma once
-
-#include <library/cpp/lwtrace/all.h>
-#include <util/generic/vector.h>
-
-#define LWACTORID(x) (x).RawX1(), (x).RawX2(), (x).NodeId(), (x).PoolID()
-#define LWTYPE_ACTORID ui64, ui64, ui32, ui32
-#define LWNAME_ACTORID(n) n "Raw1", n "Raw2", n "NodeId", n "PoolId"
-
-#define ACTORLIB_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \
- PROBE(SlowEvent, GROUPS("ActorLibSlow"), \
- TYPES(ui32, double, TString, TString, TString), \
- NAMES("poolId", "eventMs", "eventType", "actorId", "actorType")) \
- PROBE(EventSlowDelivery, GROUPS("ActorLibSlow"), \
- TYPES(ui32, double, double, ui64, TString, TString, TString), \
- NAMES("poolId", "deliveryMs", "sinceActivationMs", "eventProcessedBefore", "eventType", "actorId", "actorType")) \
- PROBE(SlowActivation, GROUPS("ActorLibSlow"), \
- TYPES(ui32, double), \
- NAMES("poolId", "activationMs")) \
- PROBE(SlowRegisterNew, GROUPS("ActorLibSlow"), \
- TYPES(ui32, double), \
- NAMES("poolId", "registerNewMs")) \
- PROBE(SlowRegisterAdd, GROUPS("ActorLibSlow"), \
- TYPES(ui32, double), \
- NAMES("poolId", "registerAddMs")) \
- PROBE(MailboxPushedOutByTailSending, GROUPS("ActorLibMailbox", "ActorLibMailboxPushedOut"), \
- TYPES(ui32, TString, ui32, TDuration, ui64, TString, TString), \
- NAMES("poolId", "pool", "eventsProcessed", "procTimeMs", "workerId", "actorId", "actorType")) \
- PROBE(MailboxPushedOutBySoftPreemption, GROUPS("ActorLibMailbox", "ActorLibMailboxPushedOut"), \
- TYPES(ui32, TString, ui32, TDuration, ui64, TString, TString), \
- NAMES("poolId", "pool", "eventsProcessed", "procTimeMs", "workerId", "actorId", "actorType")) \
- PROBE(MailboxPushedOutByTime, GROUPS("ActorLibMailbox", "ActorLibMailboxPushedOut"), \
- TYPES(ui32, TString, ui32, TDuration, ui64, TString, TString), \
- NAMES("poolId", "pool", "eventsProcessed", "procTimeMs", "workerId", "actorId", "actorType")) \
- PROBE(MailboxPushedOutByEventCount, GROUPS("ActorLibMailbox", "ActorLibMailboxPushedOut"), \
- TYPES(ui32, TString, ui32, TDuration, ui64, TString, TString), \
- NAMES("poolId", "pool", "eventsProcessed", "procTimeMs", "workerId", "actorId", "actorType")) \
- PROBE(MailboxEmpty, GROUPS("ActorLibMailbox"), \
- TYPES(ui32, TString, ui32, TDuration, ui64, TString, TString), \
- NAMES("poolId", "pool", "eventsProcessed", "procTimeMs", "workerId", "actorId", "actorType")) \
- PROBE(ActivationBegin, GROUPS(), \
- TYPES(ui32, ui32, ui32, double), \
- NAMES("cpu", "poolId", "workerId", "expireMs")) \
- PROBE(ActivationEnd, GROUPS(), \
- TYPES(ui32, ui32, ui32), \
- NAMES("cpu", "poolId", "workerId")) \
- PROBE(ExecutorThreadStats, GROUPS("ActorLibStats"), \
- TYPES(ui32, TString, ui64, ui64, ui64, double, double), \
- NAMES("poolId", "pool", "workerId", "execCount", "readyActivationCount", "execMs", "nonExecMs")) \
- PROBE(SlowICReadLoopAdjustSize, GROUPS("ActorLibSlowIC"), \
- TYPES(double), \
- NAMES("icReadLoopAdjustSizeMs")) \
- PROBE(SlowICReadFromSocket, GROUPS("ActorLibSlowIC"), \
- TYPES(double), \
- NAMES("icReadFromSocketMs")) \
- PROBE(SlowICReadLoopSend, GROUPS("ActorLibSlowIC"), \
- TYPES(double), \
- NAMES("icReadLoopSendMs")) \
- PROBE(SlowICAllocPacketBuffer, GROUPS("ActorLibSlowIC"), \
- TYPES(ui32, double), \
- NAMES("peerId", "icAllocPacketBufferMs")) \
- PROBE(SlowICFillSendingBuffer, GROUPS("ActorLibSlowIC"), \
- TYPES(ui32, double), \
- NAMES("peerId", "icFillSendingBufferMs")) \
- PROBE(SlowICPushSentPackets, GROUPS("ActorLibSlowIC"), \
- TYPES(ui32, double), \
- NAMES("peerId", "icPushSentPacketsMs")) \
- PROBE(SlowICPushSendQueue, GROUPS("ActorLibSlowIC"), \
- TYPES(ui32, double), \
- NAMES("peerId", "icPushSendQueueMs")) \
- PROBE(SlowICWriteData, GROUPS("ActorLibSlowIC"), \
- TYPES(ui32, double), \
- NAMES("peerId", "icWriteDataMs")) \
- PROBE(SlowICDropConfirmed, GROUPS("ActorLibSlowIC"), \
- TYPES(ui32, double), \
- NAMES("peerId", "icDropConfirmedMs")) \
- PROBE(ActorsystemScheduler, GROUPS("Durations"), \
- TYPES(ui64, ui64, ui32, ui32, ui64, ui64), \
- NAMES("timeUs", "timerfd_expirations", "eventsGottenFromQueues", "eventsSent", \
- "eventsInSendQueue", "eventSchedulingErrorUs")) \
- PROBE(ForwardEvent, GROUPS("Orbit", "InterconnectSessionTCP"), \
- TYPES(ui32, ui32, ui32, LWTYPE_ACTORID, LWTYPE_ACTORID, ui64, ui32), \
- NAMES("peerId", "type", "flags", LWNAME_ACTORID("r"), LWNAME_ACTORID("s"), \
- "cookie", "eventSerializedSize")) \
- PROBE(EnqueueEvent, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui64, TDuration, ui16, ui64, ui64), \
- NAMES("peerId", "numEventsInReadyChannels", "enqueueBlockedTotalMs", "channelId", "queueSizeInEvents", "queueSizeInBytes")) \
- PROBE(SerializeToPacketBegin, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui16, ui64), \
- NAMES("peerId", "channelId", "outputQueueSize")) \
- PROBE(SerializeToPacketEnd, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui16, ui64, ui64), \
- NAMES("peerId", "channelId", "outputQueueSize", "offsetInPacket")) \
- PROBE(FillSendingBuffer, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui32, ui64, TDuration), \
- NAMES("peerId", "taskBytesGenerated", "numEventsInReadyChannelsBehind", "fillBlockedTotalMs")) \
- PROBE(PacketGenerated, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui64, ui64, ui64, ui64), \
- NAMES("peerId", "bytesUnwritten", "inflightBytes", "packetsGenerated", "packetSize")) \
- PROBE(PacketWrittenToSocket, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui64, bool, ui64, ui64, TDuration, int), \
- NAMES("peerId", "packetsWrittenToSocket", "triedWriting", "packetDataSize", "bytesUnwritten", "writeBlockedTotalMs", "fd")) \
- PROBE(GenerateTraffic, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double, ui64, ui32, ui64), \
- NAMES("peerId", "generateTrafficMs", "dataBytesSent", "generatedPackets", "generatedBytes")) \
- PROBE(WriteToSocket, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui64, ui64, ui64, ui64, TDuration, int), \
- NAMES("peerId", "bytesWritten", "packetsWritten", "packetsWrittenToSocket", "bytesUnwritten", "writeBlockedTotalMs", "fd")) \
- PROBE(UpdateFromInputSession, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double), \
- NAMES("peerId", "pingMs")) \
- PROBE(UnblockByDropConfirmed, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double), \
- NAMES("peerId", "updateDeliveryMs")) \
- PROBE(DropConfirmed, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, ui64, ui64), \
- NAMES("peerId", "droppedBytes", "inflightBytes")) \
- PROBE(StartRam, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32), \
- NAMES("peerId")) \
- PROBE(FinishRam, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double), \
- NAMES("peerId", "ramMs")) \
- PROBE(SkipGenerateTraffic, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double), \
- NAMES("peerId", "elapsedSinceRamMs")) \
- PROBE(StartBatching, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double), \
- NAMES("peerId", "batchPeriodMs")) \
- PROBE(FinishBatching, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double), \
- NAMES("peerId", "finishBatchDeliveryMs")) \
- PROBE(BlockedWrite, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double, ui64), \
- NAMES("peerId", "sendQueueSize", "writtenBytes")) \
- PROBE(ReadyWrite, GROUPS("InterconnectSessionTCP"), \
- TYPES(ui32, double, double), \
- NAMES("peerId", "readyWriteDeliveryMs", "blockMs")) \
- PROBE(EpollStartWaitIn, GROUPS("EpollThread"), \
- TYPES(), \
- NAMES()) \
- PROBE(EpollFinishWaitIn, GROUPS("EpollThread"), \
- TYPES(i32), \
- NAMES("eventsCount")) \
- PROBE(EpollWaitOut, GROUPS("EpollThread"), \
- TYPES(i32), \
- NAMES("eventsCount")) \
- PROBE(EpollSendReadyRead, GROUPS("EpollThread"), \
- TYPES(bool, bool, int), \
- NAMES("hangup", "event", "fd")) \
- PROBE(EpollSendReadyWrite, GROUPS("EpollThread"), \
- TYPES(bool, bool, int), \
- NAMES("hangup", "event", "fd")) \
- PROBE(HardPreemption, GROUPS("UnitedWorker"), \
- TYPES(ui32, ui32, ui32, ui32), \
- NAMES("cpu", "prevPoolId", "prevWorkerId", "nextWorkerId")) \
- PROBE(SetPreemptionTimer, GROUPS("UnitedWorker", "PreemptionTimer"), \
- TYPES(ui32, ui32, int, double, double), \
- NAMES("cpu", "workerId", "fd", "nowMs", "preemptMs")) \
- PROBE(ResetPreemptionTimer, GROUPS("UnitedWorker", "PreemptionTimer"), \
- TYPES(ui32, ui32, int, double, double), \
- NAMES("cpu", "workerId", "fd", "nowMs", "preemptMs")) \
- PROBE(SlowWorkerActionRace, GROUPS("UnitedWorker"), \
- TYPES(ui32, ui32, ui64), \
- NAMES("cpu", "poolId", "slowPoolsMask")) \
- PROBE(PoolStats, GROUPS("PoolCpuBalancer"), \
- TYPES(ui32, TString, ui64, ui8, ui8, double, double, double, ui64, ui64, ui64), \
- NAMES("poolId", "pool", "currentCpus", "loadClass", "priority", "scaleFactor", "cpuIdle", "cpuLoad", "importance", "addImportance", "subImportance")) \
- PROBE(MoveCpu, GROUPS("PoolCpuBalancer"), \
- TYPES(ui32, ui64, TString, TString, ui32), \
- NAMES("fromPoolId", "toPoolId", "fromPool", "toPool", "cpu")) \
- PROBE(ThreadCount, GROUPS("BasicThreadPool"), \
- TYPES(ui32, TString, ui32, ui32, ui32, ui32), \
- NAMES("poolId", "pool", "threacCount", "minThreadCount", "maxThreadCount", "defaultThreadCount")) \
- PROBE(HarmonizeCheckPool, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, double, double, double, double, ui32, ui32, bool, bool, bool), \
- NAMES("poolId", "pool", "booked", "consumed", "lastSecondBooked", "lastSecondConsumed", "threadCount", "maxThreadCount", "isStarved", "isNeedy", "isHoggish")) \
- PROBE(WakingUpConsumption, GROUPS("Harmonizer"), \
- TYPES(double, double, double, double, double), \
- NAMES("avgWakingUpUs", "realAvgWakingUpUs", "avgAwakeningUs", "realAvgAwakeningUs", "total")) \
- PROBE(ChangeSpinThreshold, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, ui64, double, ui64), \
- NAMES("poolId", "pool", "spinThreshold", "spinThresholdUs", "bucketIdx")) \
- PROBE(WaitingHistogram, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, double, double, ui64), \
- NAMES("poolId", "pool", "fromUs", "toUs", "count")) \
- PROBE(HarmonizeOperation, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, TString, ui32, ui32, ui32), \
- NAMES("poolId", "pool", "operation", "newCount", "minCount", "maxCount")) \
- PROBE(TryToHarmonize, GROUPS("Harmonizer"), \
- TYPES(ui32, TString), \
- NAMES("poolId", "pool")) \
- PROBE(SavedValues, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, TString, double, double, double, double, double, double, double, double), \
- NAMES("poolId", "pool", "valueName", "[0]", "[1]", "[2]", "[3]", "[4]", "[5]", "[6]", "[7]")) \
- PROBE(RegisterValue, GROUPS("Harmonizer"), \
- TYPES(ui64, ui64, ui64, ui64, double, double, double), \
- NAMES("ts", "lastTs", "dTs", "8sTs", "us", "lastUs", "dUs")) \
- PROBE(TryToHarmonizeFailed, GROUPS("Harmonizer"), \
- TYPES(ui64, ui64, bool, bool), \
- NAMES("ts", "nextHarmonizeTs", "isDisabled", "withLock")) \
- PROBE(TryToHarmonizeSuccess, GROUPS("Harmonizer"), \
- TYPES(ui64, ui64, ui64), \
- NAMES("ts", "nextHarmonizeTs", "previousNextHarmonizeTs")) \
- PROBE(SpinCycles, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, ui64, bool), \
- NAMES("poolId", "pool", "spinPauseCount", "IsInterrupted")) \
- PROBE(WaitingHistogramPerThread, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, ui32, double, double, ui64), \
- NAMES("poolId", "pool", "threadIdx", "fromUs", "toUs", "count")) \
- PROBE(ChangeSpinThresholdPerThread, GROUPS("Harmonizer"), \
- TYPES(ui32, TString, ui32, ui64, double, ui64), \
- NAMES("poolId", "pool", "threadIdx", "spinThreshold", "spinThresholdUs", "bucketIdx")) \
- /**/
-
-LWTRACE_DECLARE_PROVIDER(ACTORLIB_PROVIDER)
-
-namespace NActors {
- struct TActorSystemSetup;
- TVector<NLWTrace::TDashboard> LWTraceDashboards(TActorSystemSetup* setup);
-}
diff --git a/library/cpp/actors/core/process_stats.cpp b/library/cpp/actors/core/process_stats.cpp
deleted file mode 100644
index f9028537c5..0000000000
--- a/library/cpp/actors/core/process_stats.cpp
+++ /dev/null
@@ -1,358 +0,0 @@
-#include "actorsystem.h"
-#include "actor_bootstrapped.h"
-#include "hfunc.h"
-#include "process_stats.h"
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/monlib/metrics/metric_registry.h>
-
-#include <util/datetime/uptime.h>
-#include <util/system/defaults.h>
-#include <util/stream/file.h>
-#include <util/system/fs.h>
-#include <util/string/vector.h>
-#include <util/string/split.h>
-
-#ifndef _win_
-#include <sys/user.h>
-#endif
-
-namespace NActors {
-#ifdef _linux_
-
- namespace {
- template <typename TVal>
- static bool ExtractVal(const TString& str, const TString& name, TVal& res) {
- if (!str.StartsWith(name))
- return false;
- size_t pos = name.size();
- while (pos < str.size() && (str[pos] == ' ' || str[pos] == '\t')) {
- pos++;
- }
- res = atol(str.data() + pos);
- return true;
- }
-
- float TicksPerMillisec() {
-#ifdef _SC_CLK_TCK
- return sysconf(_SC_CLK_TCK) / 1000.0;
-#else
- return 1.f;
-#endif
- }
- }
-
- bool TProcStat::Fill(pid_t pid) {
- try {
- TString strPid(ToString(pid));
- TFileInput proc("/proc/" + strPid + "/status");
- TString str;
- while (proc.ReadLine(str)) {
- if (ExtractVal(str, "VmRSS:", Rss))
- continue;
- if (ExtractVal(str, "voluntary_ctxt_switches:", VolCtxSwtch))
- continue;
- if (ExtractVal(str, "nonvoluntary_ctxt_switches:", NonvolCtxSwtch))
- continue;
- }
- // Convert from kB to bytes
- Rss *= 1024;
-
- float tickPerMillisec = TicksPerMillisec();
-
- TFileInput procStat("/proc/" + strPid + "/stat");
- procStat.ReadLine(str);
- if (!str.empty()) {
- sscanf(str.data(),
- "%d %*s %c %d %d %d %d %d %u %lu %lu "
- "%lu %lu %lu %lu %ld %ld %ld %ld %ld "
- "%ld %llu %lu %ld %lu",
- &Pid, &State, &Ppid, &Pgrp, &Session, &TtyNr, &TPgid, &Flags, &MinFlt, &CMinFlt,
- &MajFlt, &CMajFlt, &Utime, &Stime, &CUtime, &CStime, &Priority, &Nice, &NumThreads,
- &ItRealValue, &StartTime, &Vsize, &RssPages, &RssLim);
- Utime /= tickPerMillisec;
- Stime /= tickPerMillisec;
- CUtime /= tickPerMillisec;
- CStime /= tickPerMillisec;
- SystemUptime = ::Uptime();
- Uptime = SystemUptime - TDuration::MilliSeconds(StartTime / TicksPerMillisec());
- }
-
- TFileInput statm("/proc/" + strPid + "/statm");
- statm.ReadLine(str);
- TVector<TString> fields;
- StringSplitter(str).Split(' ').SkipEmpty().Collect(&fields);
- if (fields.size() >= 7) {
- ui64 resident = FromString<ui64>(fields[1]);
- ui64 shared = FromString<ui64>(fields[2]);
- if (PageSize == 0) {
- PageSize = ObtainPageSize();
- }
- FileRss = shared * PageSize;
- AnonRss = (resident - shared) * PageSize;
- }
-
- TFileInput cgroup("/proc/" + strPid + "/cgroup");
- TString line;
- TString memoryCGroup;
- while (cgroup.ReadLine(line) > 0) {
- StringSplitter(line).Split(':').Collect(&fields);
- if (fields.size() > 2 && fields[1] == "memory") {
- memoryCGroup = fields[2];
- break;
- }
- }
-
- TString cgroupFileName = "/sys/fs/cgroup/memory" + memoryCGroup + "/memory.limit_in_bytes";
- if (!NFs::Exists(cgroupFileName)) {
- // fallback for mk8s
- cgroupFileName = "/sys/fs/cgroup/memory/memory.limit_in_bytes";
- }
- TFileInput limit(cgroupFileName);
- if (limit.ReadLine(line) > 0) {
- CGroupMemLim = FromString<ui64>(line);
- if (CGroupMemLim > (1ULL << 40)) {
- CGroupMemLim = 0;
- }
- }
-
- } catch (...) {
- return false;
- }
- return true;
- }
-
- long TProcStat::ObtainPageSize() {
- long sz = sysconf(_SC_PAGESIZE);
- return sz;
- }
-
-#else
-
- bool TProcStat::Fill(pid_t pid) {
- Y_UNUSED(pid);
- return false;
- }
-
- long TProcStat::ObtainPageSize() {
- return 0;
- }
-
-#endif
-
-namespace {
- // Periodically collects process stats and exposes them as mon counters
- template <typename TDerived>
- class TProcStatCollectingActor: public TActorBootstrapped<TProcStatCollectingActor<TDerived>> {
- public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::ACTORLIB_STATS;
- }
-
- TProcStatCollectingActor(TDuration interval)
- : Interval(interval)
- {
- }
-
- void Bootstrap(const TActorContext& ctx) {
- TryUpdateCounters();
- ctx.Schedule(Interval, new TEvents::TEvWakeup());
- static_cast<TDerived*>(this)->Become(&TDerived::StateWork);
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- CFunc(TEvents::TSystem::Wakeup, Wakeup);
- }
- }
-
- private:
- void Wakeup(const TActorContext& ctx) {
- TryUpdateCounters();
- ctx.Schedule(Interval, new TEvents::TEvWakeup());
- }
-
- void TryUpdateCounters() {
- if (ProcStat.Fill(getpid())) {
- static_cast<TDerived*>(this)->UpdateCounters(ProcStat);
- }
- }
-
- private:
- const TDuration Interval;
- TProcStat ProcStat;
- };
-
- // Periodically collects process stats and exposes them as mon counters
- class TDynamicCounterCollector: public TProcStatCollectingActor<TDynamicCounterCollector> {
- using TBase = TProcStatCollectingActor<TDynamicCounterCollector>;
- public:
- TDynamicCounterCollector(
- ui32 intervalSeconds,
- NMonitoring::TDynamicCounterPtr counters)
- : TBase{TDuration::Seconds(intervalSeconds)}
- {
- ProcStatGroup = counters->GetSubgroup("counters", "utils");
-
- VmSize = ProcStatGroup->GetCounter("Process/VmSize", false);
- AnonRssSize = ProcStatGroup->GetCounter("Process/AnonRssSize", false);
- FileRssSize = ProcStatGroup->GetCounter("Process/FileRssSize", false);
- CGroupMemLimit = ProcStatGroup->GetCounter("Process/CGroupMemLimit", false);
- UserTime = ProcStatGroup->GetCounter("Process/UserTime", true);
- SysTime = ProcStatGroup->GetCounter("Process/SystemTime", true);
- MinorPageFaults = ProcStatGroup->GetCounter("Process/MinorPageFaults", true);
- MajorPageFaults = ProcStatGroup->GetCounter("Process/MajorPageFaults", true);
- UptimeSeconds = ProcStatGroup->GetCounter("Process/UptimeSeconds", false);
- NumThreads = ProcStatGroup->GetCounter("Process/NumThreads", false);
- SystemUptimeSeconds = ProcStatGroup->GetCounter("System/UptimeSeconds", false);
- }
-
- void UpdateCounters(const TProcStat& procStat) {
- *VmSize = procStat.Vsize;
- *AnonRssSize = procStat.AnonRss;
- *FileRssSize = procStat.FileRss;
- if (procStat.CGroupMemLim) {
- *CGroupMemLimit = procStat.CGroupMemLim;
- }
- *UserTime = procStat.Utime;
- *SysTime = procStat.Stime;
- *MinorPageFaults = procStat.MinFlt;
- *MajorPageFaults = procStat.MajFlt;
- *UptimeSeconds = procStat.Uptime.Seconds();
- *NumThreads = procStat.NumThreads;
- *SystemUptimeSeconds = procStat.Uptime.Seconds();
- }
-
- private:
- NMonitoring::TDynamicCounterPtr ProcStatGroup;
- NMonitoring::TDynamicCounters::TCounterPtr VmSize;
- NMonitoring::TDynamicCounters::TCounterPtr AnonRssSize;
- NMonitoring::TDynamicCounters::TCounterPtr FileRssSize;
- NMonitoring::TDynamicCounters::TCounterPtr CGroupMemLimit;
- NMonitoring::TDynamicCounters::TCounterPtr UserTime;
- NMonitoring::TDynamicCounters::TCounterPtr SysTime;
- NMonitoring::TDynamicCounters::TCounterPtr MinorPageFaults;
- NMonitoring::TDynamicCounters::TCounterPtr MajorPageFaults;
- NMonitoring::TDynamicCounters::TCounterPtr UptimeSeconds;
- NMonitoring::TDynamicCounters::TCounterPtr NumThreads;
- NMonitoring::TDynamicCounters::TCounterPtr SystemUptimeSeconds;
- };
-
- class TRegistryCollector: public TProcStatCollectingActor<TRegistryCollector> {
- using TBase = TProcStatCollectingActor<TRegistryCollector>;
- public:
- TRegistryCollector(TDuration interval, NMonitoring::TMetricRegistry& registry)
- : TBase{interval}
- {
- VmSize = registry.IntGauge({{"sensor", "process.VmSize"}});
- AnonRssSize = registry.IntGauge({{"sensor", "process.AnonRssSize"}});
- FileRssSize = registry.IntGauge({{"sensor", "process.FileRssSize"}});
- CGroupMemLimit = registry.IntGauge({{"sensor", "process.CGroupMemLimit"}});
- UptimeSeconds = registry.IntGauge({{"sensor", "process.UptimeSeconds"}});
- NumThreads = registry.IntGauge({{"sensor", "process.NumThreads"}});
- SystemUptimeSeconds = registry.IntGauge({{"sensor", "system.UptimeSeconds"}});
-
- UserTime = registry.Rate({{"sensor", "process.UserTime"}});
- SysTime = registry.Rate({{"sensor", "process.SystemTime"}});
- MinorPageFaults = registry.Rate({{"sensor", "process.MinorPageFaults"}});
- MajorPageFaults = registry.Rate({{"sensor", "process.MajorPageFaults"}});
- }
-
- void UpdateCounters(const TProcStat& procStat) {
- VmSize->Set(procStat.Vsize);
- AnonRssSize->Set(procStat.AnonRss);
- FileRssSize->Set(procStat.FileRss);
- CGroupMemLimit->Set(procStat.CGroupMemLim);
- UptimeSeconds->Set(procStat.Uptime.Seconds());
- NumThreads->Set(procStat.NumThreads);
- SystemUptimeSeconds->Set(procStat.SystemUptime.Seconds());
-
- // it is ok here to reset and add metric value, because mutation
- // is performed in siglethreaded context
-
- UserTime->Reset();
- UserTime->Add(procStat.Utime);
-
- SysTime->Reset();
- SysTime->Add(procStat.Stime);
-
- MinorPageFaults->Reset();
- MinorPageFaults->Add(procStat.MinFlt);
-
- MajorPageFaults->Reset();
- MajorPageFaults->Add(procStat.MajFlt);
- }
-
- private:
- NMonitoring::TIntGauge* VmSize;
- NMonitoring::TIntGauge* AnonRssSize;
- NMonitoring::TIntGauge* FileRssSize;
- NMonitoring::TIntGauge* CGroupMemLimit;
- NMonitoring::TRate* UserTime;
- NMonitoring::TRate* SysTime;
- NMonitoring::TRate* MinorPageFaults;
- NMonitoring::TRate* MajorPageFaults;
- NMonitoring::TIntGauge* UptimeSeconds;
- NMonitoring::TIntGauge* NumThreads;
- NMonitoring::TIntGauge* SystemUptimeSeconds;
- };
-
- class TRegistryCollectorShared: public TProcStatCollectingActor<TRegistryCollectorShared> {
- using TBase = TProcStatCollectingActor<TRegistryCollectorShared>;
- public:
- TRegistryCollectorShared(TDuration interval, std::weak_ptr<NMonitoring::TMetricRegistry> registry)
- : TBase{interval}
- , Registry(std::move(registry))
- {
- }
-
- void UpdateCounters(const TProcStat& procStat) {
- std::shared_ptr<NMonitoring::TMetricRegistry> registry = Registry.lock();
- if (registry) {
- registry->IntGauge({{"sensor", "process.VmSize"}})->Set(procStat.Vsize);
- registry->IntGauge({{"sensor", "process.AnonRssSize"}})->Set(procStat.AnonRss);
- registry->IntGauge({{"sensor", "process.FileRssSize"}})->Set(procStat.FileRss);
- registry->IntGauge({{"sensor", "process.CGroupMemLimit"}})->Set(procStat.CGroupMemLim);
- registry->IntGauge({{"sensor", "process.UptimeSeconds"}})->Set(procStat.Uptime.Seconds());
- registry->IntGauge({{"sensor", "process.NumThreads"}})->Set(procStat.NumThreads);
- registry->IntGauge({{"sensor", "system.UptimeSeconds"}})->Set(procStat.SystemUptime.Seconds());
-
- // it is ok here to reset and add metric value, because mutation
- // is performed in siglethreaded context
-
- NMonitoring::TRate* userTime = registry->Rate({{"sensor", "process.UserTime"}});
- NMonitoring::TRate* sysTime = registry->Rate({{"sensor", "process.SystemTime"}});
- NMonitoring::TRate* minorPageFaults = registry->Rate({{"sensor", "process.MinorPageFaults"}});
- NMonitoring::TRate* majorPageFaults = registry->Rate({{"sensor", "process.MajorPageFaults"}});
-
- userTime->Reset();
- userTime->Add(procStat.Utime);
-
- sysTime->Reset();
- sysTime->Add(procStat.Stime);
-
- minorPageFaults->Reset();
- minorPageFaults->Add(procStat.MinFlt);
-
- majorPageFaults->Reset();
- majorPageFaults->Add(procStat.MajFlt);
- }
- }
-
- private:
- std::weak_ptr<NMonitoring::TMetricRegistry> Registry;
- };
-} // namespace
-
- IActor* CreateProcStatCollector(ui32 intervalSec, NMonitoring::TDynamicCounterPtr counters) {
- return new TDynamicCounterCollector(intervalSec, counters);
- }
-
- IActor* CreateProcStatCollector(TDuration interval, NMonitoring::TMetricRegistry& registry) {
- return new TRegistryCollector(interval, registry);
- }
-
- IActor* CreateProcStatCollector(TDuration interval, std::weak_ptr<NMonitoring::TMetricRegistry> registry) {
- return new TRegistryCollectorShared(interval, std::move(registry));
- }
-}
diff --git a/library/cpp/actors/core/process_stats.h b/library/cpp/actors/core/process_stats.h
deleted file mode 100644
index 5681f0eb1a..0000000000
--- a/library/cpp/actors/core/process_stats.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "actor.h"
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-namespace NMonitoring {
- class TMetricRegistry;
-}
-
-namespace NActors {
- struct TProcStat {
- ui64 Rss;
- ui64 VolCtxSwtch;
- ui64 NonvolCtxSwtch;
-
- int Pid;
- char State;
- int Ppid;
- int Pgrp;
- int Session;
- int TtyNr;
- int TPgid;
- unsigned Flags;
- unsigned long MinFlt;
- unsigned long CMinFlt;
- unsigned long MajFlt;
- unsigned long CMajFlt;
- unsigned long Utime;
- unsigned long Stime;
- long CUtime;
- long CStime;
- long Priority;
- long Nice;
- long NumThreads;
- long ItRealValue;
- // StartTime is measured from system boot
- unsigned long long StartTime;
- unsigned long Vsize;
- long RssPages;
- unsigned long RssLim;
- ui64 FileRss;
- ui64 AnonRss;
- ui64 CGroupMemLim = 0;
-
- TDuration Uptime;
- TDuration SystemUptime;
- // ...
-
- TProcStat() {
- Zero(*this);
- Y_UNUSED(PageSize);
- }
-
- bool Fill(pid_t pid);
-
- private:
- long PageSize = 0;
-
- long ObtainPageSize();
- };
-
- IActor* CreateProcStatCollector(ui32 intervalSec, NMonitoring::TDynamicCounterPtr counters);
- IActor* CreateProcStatCollector(TDuration interval, NMonitoring::TMetricRegistry& registry);
- IActor* CreateProcStatCollector(TDuration interval, std::weak_ptr<NMonitoring::TMetricRegistry> registry);
-}
diff --git a/library/cpp/actors/core/scheduler_actor.cpp b/library/cpp/actors/core/scheduler_actor.cpp
deleted file mode 100644
index 73fd6bd183..0000000000
--- a/library/cpp/actors/core/scheduler_actor.cpp
+++ /dev/null
@@ -1,279 +0,0 @@
-#include "actor_bootstrapped.h"
-#include "hfunc.h"
-#include "probes.h"
-#include "scheduler_actor.h"
-#include "scheduler_queue.h"
-
-#include <library/cpp/actors/interconnect/poller_actor.h>
-#include <util/system/hp_timer.h>
-
-#ifdef __linux__
-#include <sys/timerfd.h>
-#include <errno.h>
-
-LWTRACE_USING(ACTORLIB_PROVIDER);
-
-namespace NActors {
- class TTimerDescriptor: public TSharedDescriptor {
- const int Descriptor;
-
- public:
- TTimerDescriptor()
- : Descriptor(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK))
- {
- Y_ABORT_UNLESS(Descriptor != -1, "timerfd_create() failed with %s", strerror(errno));
- }
-
- ~TTimerDescriptor() override {
- close(Descriptor);
- }
-
- int GetDescriptor() override {
- return Descriptor;
- }
- };
-
- class TSchedulerActor: public TActor<TSchedulerActor> {
- const TSchedulerConfig Cfg;
- TIntrusivePtr<TSharedDescriptor> TimerDescriptor;
-
- TVector<NSchedulerQueue::TReader*> Readers;
-
- TActorId PollerActor;
- TPollerToken::TPtr PollerToken;
-
- ui64 RealTime;
- ui64 MonotonicTime;
-
- ui64 ActiveTick;
- typedef TMap<ui64, TAutoPtr<NSchedulerQueue::TQueueType>> TMomentMap; // intrasecond queues
- typedef THashMap<ui64, TAutoPtr<TMomentMap>> TScheduleMap; // over-second schedule
-
- TScheduleMap ScheduleMap;
-
- THolder<NThreading::TLegacyFuture<void, false>> MainCycle;
-
- static const ui64 IntrasecondThreshold = 1048576; // ~second
- TAutoPtr<TMomentMap> ActiveSec;
- volatile ui64* CurrentTimestamp = nullptr;
- volatile ui64* CurrentMonotonic = nullptr;
- TDeque<TAutoPtr<IEventHandle>> EventsToBeSent;
-
- public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::ACTOR_SYSTEM_SCHEDULER_ACTOR;
- }
-
- TSchedulerActor(const TSchedulerConfig& cfg)
- : TActor(&TSchedulerActor::StateFunc)
- , Cfg(cfg)
- , TimerDescriptor(new TTimerDescriptor())
- , PollerActor(MakePollerActorId())
- {
- Y_ASSERT(Cfg.ResolutionMicroseconds != 0);
- Y_ASSERT(Cfg.ProgressThreshold != 0);
- Become(&TSchedulerActor::StateFunc);
- }
-
- void Handle(TEvSchedulerInitialize::TPtr& ev, const TActorContext& ctx) {
- const TEvSchedulerInitialize& evInitialize = *ev->Get();
- Y_ASSERT(evInitialize.ScheduleReaders.size() != 0);
- Readers.resize(evInitialize.ScheduleReaders.size());
- Copy(evInitialize.ScheduleReaders.begin(), evInitialize.ScheduleReaders.end(), Readers.begin());
-
- Y_ASSERT(evInitialize.CurrentTimestamp != nullptr);
- CurrentTimestamp = evInitialize.CurrentTimestamp;
-
- Y_ASSERT(evInitialize.CurrentMonotonic != nullptr);
- CurrentMonotonic = evInitialize.CurrentMonotonic;
-
- struct itimerspec new_time;
- memset(&new_time, 0, sizeof(new_time));
- new_time.it_value.tv_nsec = Cfg.ResolutionMicroseconds * 1000;
- new_time.it_interval.tv_nsec = Cfg.ResolutionMicroseconds * 1000;
- int ret = timerfd_settime(TimerDescriptor->GetDescriptor(), 0, &new_time, NULL);
- Y_ABORT_UNLESS(ret != -1, "timerfd_settime() failed with %s", strerror(errno));
- const bool success = ctx.Send(PollerActor, new TEvPollerRegister(TimerDescriptor, SelfId(), {}));
- Y_ABORT_UNLESS(success);
-
- RealTime = RelaxedLoad(CurrentTimestamp);
- MonotonicTime = RelaxedLoad(CurrentMonotonic);
-
- ActiveTick = AlignUp<ui64>(MonotonicTime, IntrasecondThreshold);
- }
-
- void Handle(TEvPollerRegisterResult::TPtr ev, const TActorContext& ctx) {
- PollerToken = ev->Get()->PollerToken;
- HandleSchedule(ctx);
- }
-
- void UpdateTime() {
- RealTime = TInstant::Now().MicroSeconds();
- MonotonicTime = Max(MonotonicTime, GetMonotonicMicroSeconds());
- AtomicStore(CurrentTimestamp, RealTime);
- AtomicStore(CurrentMonotonic, MonotonicTime);
- }
-
- void TryUpdateTime(NHPTimer::STime* lastTimeUpdate) {
- NHPTimer::STime hpnow;
- GetTimeFast(&hpnow);
- const ui64 elapsedCycles = hpnow > *lastTimeUpdate ? hpnow - *lastTimeUpdate : 0;
- if (elapsedCycles > Cfg.ResolutionMicroseconds * (NHPTimer::GetCyclesPerSecond() / IntrasecondThreshold)) {
- UpdateTime();
- GetTimeFast(lastTimeUpdate);
- }
- }
-
- void HandleSchedule(const TActorContext& ctx) {
- for (;;) {
- NHPTimer::STime schedulingStart;
- GetTimeFast(&schedulingStart);
- NHPTimer::STime lastTimeUpdate = schedulingStart;
-
- ui64 expired;
- ssize_t bytesRead;
- bytesRead = read(TimerDescriptor->GetDescriptor(), &expired, sizeof(expired));
- if (bytesRead == -1) {
- if (errno == EAGAIN) {
- PollerToken->Request(true, false);
- break;
- } else if (errno == EINTR) {
- continue;
- }
- }
- Y_ABORT_UNLESS(bytesRead == sizeof(expired), "Error while reading from timerfd, strerror# %s", strerror(errno));
- UpdateTime();
-
- ui32 eventsGottenFromQueues = 0;
- // collect everything from queues
- for (ui32 i = 0; i != Readers.size(); ++i) {
- while (NSchedulerQueue::TEntry* x = Readers[i]->Pop()) {
- const ui64 instant = AlignUp<ui64>(x->InstantMicroseconds, Cfg.ResolutionMicroseconds);
- IEventHandle* const ev = x->Ev;
- ISchedulerCookie* const cookie = x->Cookie;
-
- // check is cookie still valid? looks like it will hurt performance w/o sagnificant memory save
-
- if (instant <= ActiveTick) {
- if (!ActiveSec)
- ActiveSec.Reset(new TMomentMap());
- TAutoPtr<NSchedulerQueue::TQueueType>& queue = (*ActiveSec)[instant];
- if (!queue)
- queue.Reset(new NSchedulerQueue::TQueueType());
- queue->Writer.Push(instant, ev, cookie);
- } else {
- const ui64 intrasecond = AlignUp<ui64>(instant, IntrasecondThreshold);
- TAutoPtr<TMomentMap>& msec = ScheduleMap[intrasecond];
- if (!msec)
- msec.Reset(new TMomentMap());
- TAutoPtr<NSchedulerQueue::TQueueType>& queue = (*msec)[instant];
- if (!queue)
- queue.Reset(new NSchedulerQueue::TQueueType());
- queue->Writer.Push(instant, ev, cookie);
- }
- ++eventsGottenFromQueues;
- TryUpdateTime(&lastTimeUpdate);
- }
- }
-
- ui64 eventSchedulingErrorUs = 0;
- // send everything triggered on schedule
- for (;;) {
- while (!!ActiveSec && !ActiveSec->empty()) {
- TMomentMap::iterator it = ActiveSec->begin();
- if (it->first <= MonotonicTime) {
- if (NSchedulerQueue::TQueueType* q = it->second.Get()) {
- while (NSchedulerQueue::TEntry* x = q->Reader.Pop()) {
- Y_DEBUG_ABORT_UNLESS(x->InstantMicroseconds <= ActiveTick);
- if (eventSchedulingErrorUs == 0 && MonotonicTime > x->InstantMicroseconds) {
- eventSchedulingErrorUs = MonotonicTime - x->InstantMicroseconds;
- }
- IEventHandle* ev = x->Ev;
- ISchedulerCookie* cookie = x->Cookie;
- if (cookie) {
- if (cookie->Detach()) {
- EventsToBeSent.push_back(ev);
- } else {
- delete ev;
- }
- } else {
- EventsToBeSent.push_back(ev);
- }
- TryUpdateTime(&lastTimeUpdate);
- }
- }
- ActiveSec->erase(it);
- } else {
- break;
- }
- }
-
- if (ActiveTick <= MonotonicTime) {
- Y_DEBUG_ABORT_UNLESS(!ActiveSec || ActiveSec->empty());
- ActiveSec.Destroy();
- ActiveTick += IntrasecondThreshold;
- TScheduleMap::iterator it = ScheduleMap.find(ActiveTick);
- if (it != ScheduleMap.end()) {
- ActiveSec = it->second;
- ScheduleMap.erase(it);
- }
- continue;
- }
-
- // ok, if we are here - then nothing is ready, so send step complete
- break;
- }
-
- // Send all from buffer queue
- const ui64 eventsToBeSentSize = EventsToBeSent.size();
- ui32 sentCount = 0;
- if (eventsToBeSentSize > Cfg.RelaxedSendThresholdEventsPerCycle) {
- sentCount = Cfg.RelaxedSendPaceEventsPerCycle +
- (eventsToBeSentSize - Cfg.RelaxedSendThresholdEventsPerCycle) / 2;
- } else {
- sentCount = Min(eventsToBeSentSize, Cfg.RelaxedSendPaceEventsPerCycle);
- }
- for (ui32 i = 0; i < sentCount; ++i) {
- ctx.Send(EventsToBeSent.front().Release());
- EventsToBeSent.pop_front();
- }
-
- NHPTimer::STime hpnow;
- GetTimeFast(&hpnow);
- const ui64 processingTime = hpnow > schedulingStart ? hpnow - schedulingStart : 0;
- const ui64 elapsedTimeMicroseconds = processingTime / (NHPTimer::GetCyclesPerSecond() / IntrasecondThreshold);
- LWPROBE(ActorsystemScheduler, elapsedTimeMicroseconds, expired, eventsGottenFromQueues, sentCount,
- eventsToBeSentSize, eventSchedulingErrorUs);
- TryUpdateTime(&lastTimeUpdate);
- }
- }
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvSchedulerInitialize, Handle)
- CFunc(TEvPollerReady::EventType, HandleSchedule)
- CFunc(TEvents::TSystem::PoisonPill, Die)
- HFunc(TEvPollerRegisterResult, Handle)
- )
- };
-
- IActor* CreateSchedulerActor(const TSchedulerConfig& cfg) {
- if (cfg.UseSchedulerActor) {
- return new TSchedulerActor(cfg);
- } else {
- return nullptr;
- }
- }
-
-}
-
-#else // linux
-
-namespace NActors {
- IActor* CreateSchedulerActor(const TSchedulerConfig& cfg) {
- Y_UNUSED(cfg);
- return nullptr;
- }
-
-}
-
-#endif // linux
diff --git a/library/cpp/actors/core/scheduler_actor.h b/library/cpp/actors/core/scheduler_actor.h
deleted file mode 100644
index c2c561b43d..0000000000
--- a/library/cpp/actors/core/scheduler_actor.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#pragma once
-
-#include "actor.h"
-#include "event_local.h"
-#include "events.h"
-#include "scheduler_basic.h"
-
-namespace NActors {
- struct TEvSchedulerInitialize : TEventLocal<TEvSchedulerInitialize, TEvents::TSystem::Bootstrap> {
- TVector<NSchedulerQueue::TReader*> ScheduleReaders;
- volatile ui64* CurrentTimestamp;
- volatile ui64* CurrentMonotonic;
-
- TEvSchedulerInitialize(const TVector<NSchedulerQueue::TReader*>& scheduleReaders, volatile ui64* currentTimestamp, volatile ui64* currentMonotonic)
- : ScheduleReaders(scheduleReaders)
- , CurrentTimestamp(currentTimestamp)
- , CurrentMonotonic(currentMonotonic)
- {
- }
- };
-
- IActor* CreateSchedulerActor(const TSchedulerConfig& cfg);
-
- inline TActorId MakeSchedulerActorId() {
- char x[12] = {'s', 'c', 'h', 'e', 'd', 'u', 'l', 'e', 'r', 's', 'e', 'r'};
- return TActorId(0, TStringBuf(x, 12));
- }
-
-}
diff --git a/library/cpp/actors/core/scheduler_actor_ut.cpp b/library/cpp/actors/core/scheduler_actor_ut.cpp
deleted file mode 100644
index 09b7369d36..0000000000
--- a/library/cpp/actors/core/scheduler_actor_ut.cpp
+++ /dev/null
@@ -1,100 +0,0 @@
-#include "actor_coroutine.h"
-#include "actorsystem.h"
-#include "executor_pool_basic.h"
-#include "scheduler_actor.h"
-#include "scheduler_basic.h"
-#include "events.h"
-#include "event_local.h"
-#include "hfunc.h"
-#include <library/cpp/actors/interconnect/poller_actor.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/system/sanitizers.h>
-
-using namespace NActors;
-
-Y_UNIT_TEST_SUITE(SchedulerActor) {
- class TTestActor: public TActorBootstrapped<TTestActor> {
- TManualEvent& DoneEvent;
- TAtomic& EventsProcessed;
- TInstant LastWakeup;
- const TAtomicBase EventsTotalCount;
- const TDuration ScheduleDelta;
-
- public:
- TTestActor(TManualEvent& doneEvent, TAtomic& eventsProcessed, TAtomicBase eventsTotalCount, ui32 scheduleDeltaMs)
- : DoneEvent(doneEvent)
- , EventsProcessed(eventsProcessed)
- , EventsTotalCount(eventsTotalCount)
- , ScheduleDelta(TDuration::MilliSeconds(scheduleDeltaMs))
- {
- }
-
- void Bootstrap(const TActorContext& ctx) {
- LastWakeup = ctx.Now();
- Become(&TThis::StateFunc);
- ctx.Schedule(ScheduleDelta, new TEvents::TEvWakeup());
- }
-
- void Handle(TEvents::TEvWakeup::TPtr& /*ev*/, const TActorContext& ctx) {
- const TInstant now = ctx.Now();
- UNIT_ASSERT(now - LastWakeup >= ScheduleDelta);
- LastWakeup = now;
-
- if (AtomicIncrement(EventsProcessed) == EventsTotalCount) {
- DoneEvent.Signal();
- } else {
- ctx.Schedule(ScheduleDelta, new TEvents::TEvWakeup());
- }
- }
-
- STRICT_STFUNC(StateFunc, {HFunc(TEvents::TEvWakeup, Handle)})
- };
-
- void Test(TAtomicBase eventsTotalCount, ui32 scheduleDeltaMs) {
- THolder<TActorSystemSetup> setup = MakeHolder<TActorSystemSetup>();
- setup->NodeId = 0;
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[setup->ExecutorsCount]);
- for (ui32 i = 0; i < setup->ExecutorsCount; ++i) {
- setup->Executors[i] = new TBasicExecutorPool(i, 5, 10, "basic");
- }
- // create poller actor (whether platform supports it)
- TActorId pollerActorId;
- if (IActor* poller = CreatePollerActor()) {
- pollerActorId = MakePollerActorId();
- setup->LocalServices.emplace_back(pollerActorId, TActorSetupCmd(poller, TMailboxType::ReadAsFilled, 0));
- }
- TActorId schedulerActorId;
- if (IActor* schedulerActor = CreateSchedulerActor(TSchedulerConfig())) {
- schedulerActorId = MakeSchedulerActorId();
- setup->LocalServices.emplace_back(schedulerActorId, TActorSetupCmd(schedulerActor, TMailboxType::ReadAsFilled, 0));
- }
- setup->Scheduler = CreateSchedulerThread(TSchedulerConfig());
-
- TActorSystem actorSystem(setup);
-
- actorSystem.Start();
-
- TManualEvent doneEvent;
- TAtomic eventsProcessed = 0;
- actorSystem.Register(new TTestActor(doneEvent, eventsProcessed, eventsTotalCount, scheduleDeltaMs));
- doneEvent.WaitI();
-
- UNIT_ASSERT(AtomicGet(eventsProcessed) == eventsTotalCount);
-
- actorSystem.Stop();
- }
-
- Y_UNIT_TEST(LongEvents) {
- Test(10, 500);
- }
-
- Y_UNIT_TEST(MediumEvents) {
- Test(100, 50);
- }
-
- Y_UNIT_TEST(QuickEvents) {
- Test(1000, 5);
- }
-}
diff --git a/library/cpp/actors/core/scheduler_basic.cpp b/library/cpp/actors/core/scheduler_basic.cpp
deleted file mode 100644
index 5d66224f05..0000000000
--- a/library/cpp/actors/core/scheduler_basic.cpp
+++ /dev/null
@@ -1,275 +0,0 @@
-#include "scheduler_basic.h"
-#include "scheduler_queue.h"
-#include "actor.h"
-
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/thread.h>
-
-#ifdef BALLOC
-#include <library/cpp/balloc/optional/operators.h>
-#endif
-
-namespace NActors {
-
- struct TBasicSchedulerThread::TMonCounters {
- NMonitoring::TDynamicCounters::TCounterPtr TimeDelayMs;
- NMonitoring::TDynamicCounters::TCounterPtr QueueSize;
- NMonitoring::TDynamicCounters::TCounterPtr EventsSent;
- NMonitoring::TDynamicCounters::TCounterPtr EventsDropped;
- NMonitoring::TDynamicCounters::TCounterPtr EventsAdded;
- NMonitoring::TDynamicCounters::TCounterPtr Iterations;
- NMonitoring::TDynamicCounters::TCounterPtr Sleeps;
- NMonitoring::TDynamicCounters::TCounterPtr ElapsedMicrosec;
-
- TMonCounters(const NMonitoring::TDynamicCounterPtr& counters)
- : TimeDelayMs(counters->GetCounter("Scheduler/TimeDelayMs", false))
- , QueueSize(counters->GetCounter("Scheduler/QueueSize", false))
- , EventsSent(counters->GetCounter("Scheduler/EventsSent", true))
- , EventsDropped(counters->GetCounter("Scheduler/EventsDropped", true))
- , EventsAdded(counters->GetCounter("Scheduler/EventsAdded", true))
- , Iterations(counters->GetCounter("Scheduler/Iterations", true))
- , Sleeps(counters->GetCounter("Scheduler/Sleeps", true))
- , ElapsedMicrosec(counters->GetCounter("Scheduler/ElapsedMicrosec", true))
- { }
- };
-
- TBasicSchedulerThread::TBasicSchedulerThread(const TSchedulerConfig& config)
- : Config(config)
- , MonCounters(Config.MonCounters ? new TMonCounters(Config.MonCounters) : nullptr)
- , ActorSystem(nullptr)
- , CurrentTimestamp(nullptr)
- , CurrentMonotonic(nullptr)
- , TotalReaders(0)
- , StopFlag(false)
- , ScheduleMap(3600)
- {
- Y_ABORT_UNLESS(!Config.UseSchedulerActor, "Cannot create scheduler thread because Config.UseSchedulerActor# true");
- }
-
- TBasicSchedulerThread::~TBasicSchedulerThread() {
- Y_ABORT_UNLESS(!MainCycle);
- }
-
- void TBasicSchedulerThread::CycleFunc() {
-#ifdef BALLOC
- ThreadDisableBalloc();
-#endif
- ::SetCurrentThreadName("Scheduler");
-
- ui64 currentMonotonic = RelaxedLoad(CurrentMonotonic);
- ui64 throttledMonotonic = currentMonotonic;
-
- ui64 activeTick = AlignUp<ui64>(throttledMonotonic, IntrasecondThreshold);
- TAutoPtr<TMomentMap> activeSec;
-
- NHPTimer::STime hpprev = GetCycleCountFast();
- ui64 nextTimestamp = TInstant::Now().MicroSeconds();
- ui64 nextMonotonic = Max(currentMonotonic, GetMonotonicMicroSeconds());
-
- while (!AtomicLoad(&StopFlag)) {
- {
- const ui64 delta = nextMonotonic - throttledMonotonic;
- const ui64 elapsedDelta = nextMonotonic - currentMonotonic;
- const ui64 threshold = Max(Min(Config.ProgressThreshold, 2 * elapsedDelta), ui64(1));
-
- throttledMonotonic = (delta > threshold) ? throttledMonotonic + threshold : nextMonotonic;
-
- if (MonCounters) {
- *MonCounters->TimeDelayMs = (nextMonotonic - throttledMonotonic) / 1000;
- }
- }
- AtomicStore(CurrentTimestamp, nextTimestamp);
- AtomicStore(CurrentMonotonic, nextMonotonic);
- currentMonotonic = nextMonotonic;
-
- if (MonCounters) {
- ++*MonCounters->Iterations;
- }
-
- bool somethingDone = false;
-
- // first step - send everything triggered on schedule
- ui64 eventsSent = 0;
- ui64 eventsDropped = 0;
- for (;;) {
- while (!!activeSec && !activeSec->empty()) {
- TMomentMap::iterator it = activeSec->begin();
- if (it->first <= throttledMonotonic) {
- if (NSchedulerQueue::TQueueType* q = it->second.Get()) {
- while (NSchedulerQueue::TEntry* x = q->Reader.Pop()) {
- somethingDone = true;
- Y_DEBUG_ABORT_UNLESS(x->InstantMicroseconds <= activeTick);
- IEventHandle* ev = x->Ev;
- ISchedulerCookie* cookie = x->Cookie;
- // TODO: lazy send with backoff queue to not hang over contended mailboxes
- if (cookie) {
- if (cookie->Detach()) {
- ActorSystem->Send(ev);
- ++eventsSent;
- } else {
- delete ev;
- ++eventsDropped;
- }
- } else {
- ActorSystem->Send(ev);
- ++eventsSent;
- }
- }
- }
- activeSec->erase(it);
- } else
- break;
- }
-
- if (activeTick <= throttledMonotonic) {
- Y_DEBUG_ABORT_UNLESS(!activeSec || activeSec->empty());
- activeSec.Destroy();
- activeTick += IntrasecondThreshold;
- TScheduleMap::iterator it = ScheduleMap.find(activeTick);
- if (it != ScheduleMap.end()) {
- activeSec = it->second;
- ScheduleMap.erase(it);
- }
- continue;
- }
-
- // ok, if we are here - then nothing is ready, so send step complete
- break;
- }
-
- // second step - collect everything from queues
-
- ui64 eventsAdded = 0;
- for (ui32 i = 0; i != TotalReaders; ++i) {
- while (NSchedulerQueue::TEntry* x = Readers[i]->Pop()) {
- somethingDone = true;
- const ui64 instant = AlignUp<ui64>(x->InstantMicroseconds, Config.ResolutionMicroseconds);
- IEventHandle* const ev = x->Ev;
- ISchedulerCookie* const cookie = x->Cookie;
-
- // check is cookie still valid? looks like it will hurt performance w/o sagnificant memory save
-
- if (instant <= activeTick) {
- if (!activeSec)
- activeSec.Reset(new TMomentMap());
- TAutoPtr<NSchedulerQueue::TQueueType>& queue = (*activeSec)[instant];
- if (!queue)
- queue.Reset(new NSchedulerQueue::TQueueType());
- queue->Writer.Push(instant, ev, cookie);
- } else {
- const ui64 intrasecond = AlignUp<ui64>(instant, IntrasecondThreshold);
- TAutoPtr<TMomentMap>& msec = ScheduleMap[intrasecond];
- if (!msec)
- msec.Reset(new TMomentMap());
- TAutoPtr<NSchedulerQueue::TQueueType>& queue = (*msec)[instant];
- if (!queue)
- queue.Reset(new NSchedulerQueue::TQueueType());
- queue->Writer.Push(instant, ev, cookie);
- }
-
- ++eventsAdded;
- }
- }
-
- NHPTimer::STime hpnow = GetCycleCountFast();
-
- if (MonCounters) {
- *MonCounters->QueueSize -= eventsSent + eventsDropped;
- *MonCounters->QueueSize += eventsAdded;
- *MonCounters->EventsSent += eventsSent;
- *MonCounters->EventsDropped += eventsDropped;
- *MonCounters->EventsAdded += eventsAdded;
- *MonCounters->ElapsedMicrosec += NHPTimer::GetSeconds(hpnow - hpprev) * 1000000;
- }
-
- hpprev = hpnow;
- nextTimestamp = TInstant::Now().MicroSeconds();
- nextMonotonic = Max(currentMonotonic, GetMonotonicMicroSeconds());
-
- // ok complete, if nothing left - sleep
- if (!somethingDone) {
- const ui64 nextInstant = AlignDown<ui64>(throttledMonotonic + Config.ResolutionMicroseconds, Config.ResolutionMicroseconds);
- if (nextMonotonic >= nextInstant) // already in next time-slice
- continue;
-
- const ui64 delta = nextInstant - nextMonotonic;
- if (delta < Config.SpinThreshold) // not so much time left, just spin
- continue;
-
- if (MonCounters) {
- ++*MonCounters->Sleeps;
- }
-
- NanoSleep(delta * 1000); // ok, looks like we should sleep a bit.
-
- // Don't count sleep in elapsed microseconds
- hpprev = GetCycleCountFast();
- nextTimestamp = TInstant::Now().MicroSeconds();
- nextMonotonic = Max(currentMonotonic, GetMonotonicMicroSeconds());
- }
- }
- // ok, die!
- }
-
- void TBasicSchedulerThread::Prepare(TActorSystem* actorSystem, volatile ui64* currentTimestamp, volatile ui64* currentMonotonic) {
- ActorSystem = actorSystem;
- CurrentTimestamp = currentTimestamp;
- CurrentMonotonic = currentMonotonic;
- *CurrentTimestamp = TInstant::Now().MicroSeconds();
- *CurrentMonotonic = GetMonotonicMicroSeconds();
- }
-
- void TBasicSchedulerThread::PrepareSchedules(NSchedulerQueue::TReader** readers, ui32 scheduleReadersCount) {
- Y_ABORT_UNLESS(scheduleReadersCount > 0);
- TotalReaders = scheduleReadersCount;
- Readers.Reset(new NSchedulerQueue::TReader*[scheduleReadersCount]);
- Copy(readers, readers + scheduleReadersCount, Readers.Get());
- }
-
- void TBasicSchedulerThread::PrepareStart() {
- // Called after actor system is initialized, but before executor threads
- // are started, giving us a chance to update current timestamp with a
- // more recent value, taking initialization time into account. This is
- // safe to do, since scheduler thread is not started yet, so no other
- // threads are updating time concurrently.
- AtomicStore(CurrentTimestamp, TInstant::Now().MicroSeconds());
- AtomicStore(CurrentMonotonic, Max(RelaxedLoad(CurrentMonotonic), GetMonotonicMicroSeconds()));
- }
-
- void TBasicSchedulerThread::Start() {
- MainCycle.Reset(new NThreading::TLegacyFuture<void, false>(std::bind(&TBasicSchedulerThread::CycleFunc, this)));
- }
-
- void TBasicSchedulerThread::PrepareStop() {
- AtomicStore(&StopFlag, true);
- }
-
- void TBasicSchedulerThread::Stop() {
- MainCycle->Get();
- MainCycle.Destroy();
- }
-
-}
-
-#ifdef __linux__
-
-namespace NActors {
- ISchedulerThread* CreateSchedulerThread(const TSchedulerConfig& config) {
- if (config.UseSchedulerActor) {
- return new TMockSchedulerThread();
- } else {
- return new TBasicSchedulerThread(config);
- }
- }
-
-}
-
-#else // __linux__
-
-namespace NActors {
- ISchedulerThread* CreateSchedulerThread(const TSchedulerConfig& config) {
- return new TBasicSchedulerThread(config);
- }
-}
-
-#endif // __linux__
diff --git a/library/cpp/actors/core/scheduler_basic.h b/library/cpp/actors/core/scheduler_basic.h
deleted file mode 100644
index 2ccde39235..0000000000
--- a/library/cpp/actors/core/scheduler_basic.h
+++ /dev/null
@@ -1,81 +0,0 @@
-#pragma once
-
-#include "actorsystem.h"
-#include "monotonic.h"
-#include "scheduler_queue.h"
-#include <library/cpp/actors/util/queue_chunk.h>
-#include <library/cpp/threading/future/legacy_future.h>
-#include <util/generic/hash.h>
-#include <util/generic/map.h>
-
-namespace NActors {
-
- class TBasicSchedulerThread: public ISchedulerThread {
- // TODO: replace with NUMA-local threads and per-thread schedules
- const TSchedulerConfig Config;
-
- struct TMonCounters;
- const THolder<TMonCounters> MonCounters;
-
- TActorSystem* ActorSystem;
- volatile ui64* CurrentTimestamp;
- volatile ui64* CurrentMonotonic;
-
- ui32 TotalReaders;
- TArrayHolder<NSchedulerQueue::TReader*> Readers;
-
- volatile bool StopFlag;
-
- typedef TMap<ui64, TAutoPtr<NSchedulerQueue::TQueueType>> TMomentMap; // intrasecond queues
- typedef THashMap<ui64, TAutoPtr<TMomentMap>> TScheduleMap; // over-second schedule
-
- TScheduleMap ScheduleMap;
-
- THolder<NThreading::TLegacyFuture<void, false>> MainCycle;
-
- static const ui64 IntrasecondThreshold = 1048576; // ~second
-
- void CycleFunc();
-
- public:
- TBasicSchedulerThread(const TSchedulerConfig& config = TSchedulerConfig());
- ~TBasicSchedulerThread();
-
- void Prepare(TActorSystem* actorSystem, volatile ui64* currentTimestamp, volatile ui64* currentMonotonic) override;
- void PrepareSchedules(NSchedulerQueue::TReader** readers, ui32 scheduleReadersCount) override;
-
- void PrepareStart() override;
- void Start() override;
- void PrepareStop() override;
- void Stop() override;
- };
-
- class TMockSchedulerThread: public ISchedulerThread {
- public:
- virtual ~TMockSchedulerThread() override {
- }
-
- void Prepare(TActorSystem* actorSystem, volatile ui64* currentTimestamp, volatile ui64* currentMonotonic) override {
- Y_UNUSED(actorSystem);
- *currentTimestamp = TInstant::Now().MicroSeconds();
- *currentMonotonic = GetMonotonicMicroSeconds();
- }
-
- void PrepareSchedules(NSchedulerQueue::TReader** readers, ui32 scheduleReadersCount) override {
- Y_UNUSED(readers);
- Y_UNUSED(scheduleReadersCount);
- }
-
- void Start() override {
- }
-
- void PrepareStop() override {
- }
-
- void Stop() override {
- }
- };
-
- ISchedulerThread* CreateSchedulerThread(const TSchedulerConfig& cfg);
-
-}
diff --git a/library/cpp/actors/core/scheduler_cookie.cpp b/library/cpp/actors/core/scheduler_cookie.cpp
deleted file mode 100644
index b975a80c07..0000000000
--- a/library/cpp/actors/core/scheduler_cookie.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-#include "scheduler_cookie.h"
-
-namespace NActors {
- class TSchedulerCookie2Way: public ISchedulerCookie {
- TAtomic Value;
-
- public:
- TSchedulerCookie2Way()
- : Value(2)
- {
- }
-
- bool IsArmed() noexcept override {
- return (AtomicGet(Value) == 2);
- }
-
- bool Detach() noexcept override {
- const ui64 x = AtomicDecrement(Value);
- if (x == 1)
- return true;
-
- if (x == 0) {
- delete this;
- return false;
- }
-
- Y_ABORT();
- }
-
- bool DetachEvent() noexcept override {
- Y_ABORT();
- }
- };
-
- ISchedulerCookie* ISchedulerCookie::Make2Way() {
- return new TSchedulerCookie2Way();
- }
-
- class TSchedulerCookie3Way: public ISchedulerCookie {
- TAtomic Value;
-
- public:
- TSchedulerCookie3Way()
- : Value(3)
- {
- }
-
- bool IsArmed() noexcept override {
- return (AtomicGet(Value) == 3);
- }
-
- bool Detach() noexcept override {
- const ui64 x = AtomicDecrement(Value);
- if (x == 2)
- return true;
- if (x == 1)
- return false;
- if (x == 0) {
- delete this;
- return false;
- }
-
- Y_ABORT();
- }
-
- bool DetachEvent() noexcept override {
- const ui64 x = AtomicDecrement(Value);
- if (x == 2)
- return false;
- if (x == 1)
- return true;
- if (x == 0) {
- delete this;
- return false;
- }
-
- Y_ABORT();
- }
- };
-
- ISchedulerCookie* ISchedulerCookie::Make3Way() {
- return new TSchedulerCookie3Way();
- }
-}
diff --git a/library/cpp/actors/core/scheduler_cookie.h b/library/cpp/actors/core/scheduler_cookie.h
deleted file mode 100644
index 2c20ca67f3..0000000000
--- a/library/cpp/actors/core/scheduler_cookie.h
+++ /dev/null
@@ -1,78 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include <util/generic/noncopyable.h>
-
-namespace NActors {
- class ISchedulerCookie : TNonCopyable {
- protected:
- virtual ~ISchedulerCookie() {
- }
-
- public:
- virtual bool Detach() noexcept = 0;
- virtual bool DetachEvent() noexcept = 0;
- virtual bool IsArmed() noexcept = 0;
-
- static ISchedulerCookie* Make2Way();
- static ISchedulerCookie* Make3Way();
- };
-
- class TSchedulerCookieHolder : TNonCopyable {
- ISchedulerCookie* Cookie;
-
- public:
- TSchedulerCookieHolder()
- : Cookie(nullptr)
- {
- }
-
- TSchedulerCookieHolder(ISchedulerCookie* x)
- : Cookie(x)
- {
- }
-
- ~TSchedulerCookieHolder() {
- Detach();
- }
-
- bool operator==(const TSchedulerCookieHolder& x) const noexcept {
- return (Cookie == x.Cookie);
- }
-
- ISchedulerCookie* Get() const {
- return Cookie;
- }
-
- ISchedulerCookie* Release() {
- ISchedulerCookie* result = Cookie;
- Cookie = nullptr;
- return result;
- }
-
- void Reset(ISchedulerCookie* cookie) {
- Detach();
- Cookie = cookie;
- }
-
- bool Detach() noexcept {
- if (Cookie) {
- const bool res = Cookie->Detach();
- Cookie = nullptr;
- return res;
- } else {
- return false;
- }
- }
-
- bool DetachEvent() noexcept {
- if (Cookie) {
- const bool res = Cookie->DetachEvent();
- Cookie = nullptr;
- return res;
- } else {
- return false;
- }
- }
- };
-}
diff --git a/library/cpp/actors/core/scheduler_queue.h b/library/cpp/actors/core/scheduler_queue.h
deleted file mode 100644
index 8d827e1ce4..0000000000
--- a/library/cpp/actors/core/scheduler_queue.h
+++ /dev/null
@@ -1,123 +0,0 @@
-#pragma once
-
-#include "scheduler_cookie.h"
-
-#include <library/cpp/actors/util/queue_chunk.h>
-#include <library/cpp/actors/core/event.h>
-
-namespace NActors {
- class IEventHandle;
- class ISchedulerCookie;
-
- namespace NSchedulerQueue {
- struct TEntry {
- ui64 InstantMicroseconds;
- IEventHandle* Ev;
- ISchedulerCookie* Cookie;
- };
-
- struct TChunk : TQueueChunkDerived<TEntry, 512, TChunk> {};
-
- class TReader;
- class TWriter;
- class TWriterWithPadding;
-
- class TReader : ::TNonCopyable {
- TChunk* ReadFrom;
- ui32 ReadPosition;
-
- friend class TWriter;
-
- public:
- TReader()
- : ReadFrom(new TChunk())
- , ReadPosition(0)
- {
- }
-
- ~TReader() {
- while (TEntry* x = Pop()) {
- if (x->Cookie)
- x->Cookie->Detach();
- delete x->Ev;
- }
- delete ReadFrom;
- }
-
- TEntry* Pop() {
- TChunk* head = ReadFrom;
- if (ReadPosition != TChunk::EntriesCount) {
- if (AtomicLoad(&head->Entries[ReadPosition].InstantMicroseconds) != 0)
- return const_cast<TEntry*>(&head->Entries[ReadPosition++]);
- else
- return nullptr;
- } else if (TChunk* next = AtomicLoad(&head->Next)) {
- ReadFrom = next;
- delete head;
- ReadPosition = 0;
- return Pop();
- }
-
- return nullptr;
- }
- };
-
- class TWriter : ::TNonCopyable {
- TChunk* WriteTo;
- ui32 WritePosition;
-
- public:
- TWriter()
- : WriteTo(nullptr)
- , WritePosition(0)
- {
- }
-
- void Init(const TReader& reader) {
- WriteTo = reader.ReadFrom;
- WritePosition = 0;
- }
-
- void Push(ui64 instantMicrosends, IEventHandle* ev, ISchedulerCookie* cookie) {
- if (Y_UNLIKELY(instantMicrosends == 0)) {
- // Protect against Pop() getting stuck forever
- instantMicrosends = 1;
- }
- if (WritePosition != TChunk::EntriesCount) {
- volatile TEntry& entry = WriteTo->Entries[WritePosition];
- entry.Cookie = cookie;
- entry.Ev = ev;
- AtomicStore(&entry.InstantMicroseconds, instantMicrosends);
- ++WritePosition;
- } else {
- TChunk* next = new TChunk();
- volatile TEntry& entry = next->Entries[0];
- entry.Cookie = cookie;
- entry.Ev = ev;
- entry.InstantMicroseconds = instantMicrosends;
- AtomicStore(&WriteTo->Next, next);
- WriteTo = next;
- WritePosition = 1;
- }
- }
- };
-
- class TWriterWithPadding: public TWriter {
- private:
- ui8 CacheLinePadding[64 - sizeof(TWriter)];
-
- void UnusedCacheLinePadding() {
- Y_UNUSED(CacheLinePadding);
- }
- };
-
- struct TQueueType {
- TReader Reader;
- TWriter Writer;
-
- TQueueType() {
- Writer.Init(Reader);
- }
- };
- }
-}
diff --git a/library/cpp/actors/core/servicemap.h b/library/cpp/actors/core/servicemap.h
deleted file mode 100644
index d72e50cae5..0000000000
--- a/library/cpp/actors/core/servicemap.h
+++ /dev/null
@@ -1,168 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-namespace NActors {
- // wait-free one writer multi reader hash-tree for service mapping purposes
- // on fast updates on same key - could lead to false-negatives, we don't care as such cases are broken from service-map app logic
-
- template <typename TKey, typename TValue, typename THash, ui64 BaseSize = 256 * 1024, ui64 ExtCount = 4, ui64 ExtBranching = 4>
- class TServiceMap : TNonCopyable {
- struct TEntry : TNonCopyable {
- ui32 CounterIn;
- ui32 CounterOut;
- TKey Key;
- TValue Value;
-
- TEntry()
- : CounterIn(0)
- , CounterOut(0)
- , Key()
- , Value()
- {
- }
- };
-
- struct TBranch : TNonCopyable {
- TEntry Entries[ExtCount];
- TBranch* Branches[ExtBranching];
-
- TBranch() {
- Fill(Branches, Branches + ExtBranching, (TBranch*)nullptr);
- }
- };
-
- ui32 Counter;
- TBranch* Line[BaseSize];
-
- bool ScanBranch(TBranch* branch, const TKey& key, ui64 hash, TValue& ret) {
- for (ui32 i = 0; i != ExtCount; ++i) {
- const TEntry& entry = branch->Entries[i];
- const ui32 counterIn = AtomicLoad(&entry.CounterIn);
- if (counterIn != 0 && entry.Key == key) {
- ret = entry.Value;
- const ui32 counterOut = AtomicLoad(&entry.CounterOut);
- if (counterOut == counterIn)
- return true;
- }
- }
-
- const ui64 hash0 = hash % ExtBranching;
- if (TBranch* next = AtomicLoad(branch->Branches + hash0))
- return ScanBranch(next, key, hash / ExtBranching, ret);
-
- return false;
- }
-
- void ScanZeroOld(TBranch* branch, const TKey& key, ui64 hash, TEntry** zeroEntry, TEntry*& oldEntry) {
- for (ui32 i = 0; i != ExtCount; ++i) {
- TEntry& entry = branch->Entries[i];
- if (entry.CounterIn == 0) {
- if (zeroEntry && !*zeroEntry) {
- *zeroEntry = &entry;
- if (oldEntry != nullptr)
- return;
- }
- } else {
- if (entry.Key == key) {
- oldEntry = &entry;
- if (!zeroEntry || *zeroEntry)
- return;
- }
- }
- }
-
- const ui64 hash0 = hash % ExtBranching;
- if (TBranch* next = branch->Branches[hash0]) {
- ScanZeroOld(next, key, hash / ExtBranching, zeroEntry, oldEntry);
- } else { // found tail, if zeroEntry requested, but not yet found - insert one
- if (zeroEntry && !*zeroEntry) {
- TBranch* next = new TBranch();
- *zeroEntry = next->Entries;
- AtomicStore(branch->Branches + hash0, next);
- }
- }
- }
-
- public:
- TServiceMap()
- : Counter(0)
- {
- Fill(Line, Line + BaseSize, (TBranch*)nullptr);
- }
-
- ~TServiceMap() {
- for (ui64 i = 0; i < BaseSize; ++i) {
- delete Line[i];
- }
- }
-
- TValue Find(const TKey& key) {
- THash hashOp;
- const ui64 hash = hashOp(key);
- const ui64 hash0 = hash % BaseSize;
-
- if (TBranch* branch = AtomicLoad(Line + hash0)) {
- TValue ret;
- if (ScanBranch(branch, key, hash / BaseSize, ret))
- return ret;
- }
-
- return TValue();
- }
-
- // returns true on update, false on insert
- TValue Update(const TKey& key, const TValue& value) {
- THash hashOp;
- const ui64 hash = hashOp(key);
- const ui64 hash0 = hash % BaseSize;
-
- TEntry* zeroEntry = nullptr;
- TEntry* oldEntry = nullptr;
-
- if (TBranch* branch = Line[hash0]) {
- ScanZeroOld(branch, key, hash / BaseSize, &zeroEntry, oldEntry);
- } else {
- TBranch* next = new TBranch();
- zeroEntry = next->Entries;
- AtomicStore(Line + hash0, next);
- }
-
- // now we got both entries, first - push new one
- const ui32 counter = AtomicUi32Increment(&Counter);
- AtomicStore(&zeroEntry->CounterOut, counter);
- zeroEntry->Key = key;
- zeroEntry->Value = value;
- AtomicStore(&zeroEntry->CounterIn, counter);
-
- if (oldEntry != nullptr) {
- const TValue ret = oldEntry->Value;
- AtomicStore<ui32>(&oldEntry->CounterOut, 0);
- AtomicStore<ui32>(&oldEntry->CounterIn, 0);
- return ret;
- } else {
- return TValue();
- }
- }
-
- bool Erase(const TKey& key) {
- THash hashOp;
- const ui64 hash = hashOp(key);
- const ui64 hash0 = hash % BaseSize;
-
- TEntry* oldEntry = 0;
-
- if (TBranch* branch = Line[hash0]) {
- ScanZeroOld(branch, key, hash / BaseSize, 0, oldEntry);
- }
-
- if (oldEntry != 0) {
- AtomicStore<ui32>(&oldEntry->CounterOut, 0);
- AtomicStore<ui32>(&oldEntry->CounterIn, 0);
- return true;
- } else {
- return false;
- }
- }
- };
-}
diff --git a/library/cpp/actors/core/thread_context.h b/library/cpp/actors/core/thread_context.h
deleted file mode 100644
index 13e493f855..0000000000
--- a/library/cpp/actors/core/thread_context.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include <util/system/tls.h>
-
-
-namespace NActors {
-
- class IExecutorPool;
-
- template <typename T>
- struct TWaitingStats;
-
- struct TThreadContext {
- IExecutorPool *Pool = nullptr;
- ui32 CapturedActivation = 0;
- ESendingType CapturedType = ESendingType::Lazy;
- ESendingType SendingType = ESendingType::Common;
- bool IsEnoughCpu = true;
- ui32 WriteTurn = 0;
- TWorkerId WorkerId;
- ui16 LocalQueueSize = 0;
- TWaitingStats<ui64> *WaitingStats = nullptr;
- bool IsCurrentRecipientAService = false;
- };
-
- extern Y_POD_THREAD(TThreadContext*) TlsThreadContext; // in actor.cpp
-
-}
diff --git a/library/cpp/actors/core/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/core/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 83199b12b6..0000000000
--- a/library/cpp/actors/core/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,88 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut)
-target_include_directories(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core
-)
-target_link_libraries(library-cpp-actors-core-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-core-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/benchmark_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/performance_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_payload_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mon_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut
- TEST_TARGET
- library-cpp-actors-core-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-core-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-core-ut)
diff --git a/library/cpp/actors/core/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/core/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index e56713f557..0000000000
--- a/library/cpp/actors/core/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,89 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut)
-target_include_directories(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core
-)
-target_link_libraries(library-cpp-actors-core-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-core-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/benchmark_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/performance_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_payload_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mon_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut
- TEST_TARGET
- library-cpp-actors-core-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-core-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-core-ut)
diff --git a/library/cpp/actors/core/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/core/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index bfd1f0f226..0000000000
--- a/library/cpp/actors/core/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,92 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut)
-target_include_directories(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core
-)
-target_link_libraries(library-cpp-actors-core-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-core-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/benchmark_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/performance_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_payload_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mon_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut
- TEST_TARGET
- library-cpp-actors-core-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-core-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-core-ut)
diff --git a/library/cpp/actors/core/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/core/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 145a291b45..0000000000
--- a/library/cpp/actors/core/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,94 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut)
-target_include_directories(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core
-)
-target_link_libraries(library-cpp-actors-core-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-core-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/benchmark_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/performance_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_payload_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mon_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut
- TEST_TARGET
- library-cpp-actors-core-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-core-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-core-ut)
diff --git a/library/cpp/actors/core/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/core/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 8e7e6bd499..0000000000
--- a/library/cpp/actors/core/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut)
-target_include_directories(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core
-)
-target_link_libraries(library-cpp-actors-core-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- cpp-actors-testlib
-)
-target_sources(library-cpp-actors-core-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_coroutine_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/benchmark_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/actorsystem_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/performance_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ask_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/balancer_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_payload_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/event_pb_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_basic_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/executor_pool_united_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/mon_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/scheduler_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut
- TEST_TARGET
- library-cpp-actors-core-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-core-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-core-ut)
diff --git a/library/cpp/actors/core/ut/ya.make b/library/cpp/actors/core/ut/ya.make
deleted file mode 100644
index 44803e7619..0000000000
--- a/library/cpp/actors/core/ut/ya.make
+++ /dev/null
@@ -1,43 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/core)
-
-FORK_SUBTESTS()
-IF (SANITIZER_TYPE)
- SIZE(LARGE)
- TIMEOUT(1200)
- TAG(ya:fat)
- SPLIT_FACTOR(20)
- REQUIREMENTS(
- ram:32
- )
-ELSE()
- SIZE(MEDIUM)
- TIMEOUT(600)
- REQUIREMENTS(
- ram:16
- )
-ENDIF()
-
-
-PEERDIR(
- library/cpp/actors/interconnect
- library/cpp/actors/testlib
-)
-
-SRCS(
- actor_coroutine_ut.cpp
- benchmark_ut.cpp
- actor_ut.cpp
- actorsystem_ut.cpp
- performance_ut.cpp
- ask_ut.cpp
- balancer_ut.cpp
- event_pb_payload_ut.cpp
- event_pb_ut.cpp
- executor_pool_basic_ut.cpp
- executor_pool_united_ut.cpp
- log_ut.cpp
- mon_ut.cpp
- scheduler_actor_ut.cpp
-)
-
-END()
diff --git a/library/cpp/actors/core/ut_fat/CMakeLists.darwin-arm64.txt b/library/cpp/actors/core/ut_fat/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index c5420870b1..0000000000
--- a/library/cpp/actors/core/ut_fat/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut_fat)
-target_link_libraries(library-cpp-actors-core-ut_fat PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-core-ut_fat PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-core-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 20
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut_fat
- TEST_TARGET
- library-cpp-actors-core-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- TIMEOUT
- 1200
-)
-target_allocator(library-cpp-actors-core-ut_fat
- system_allocator
-)
-vcs_info(library-cpp-actors-core-ut_fat)
diff --git a/library/cpp/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 88a9860fc1..0000000000
--- a/library/cpp/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut_fat)
-target_link_libraries(library-cpp-actors-core-ut_fat PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-core-ut_fat PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-core-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 20
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut_fat
- TEST_TARGET
- library-cpp-actors-core-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- TIMEOUT
- 1200
-)
-target_allocator(library-cpp-actors-core-ut_fat
- system_allocator
-)
-vcs_info(library-cpp-actors-core-ut_fat)
diff --git a/library/cpp/actors/core/ut_fat/CMakeLists.linux-aarch64.txt b/library/cpp/actors/core/ut_fat/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 653d98fe60..0000000000
--- a/library/cpp/actors/core/ut_fat/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut_fat)
-target_link_libraries(library-cpp-actors-core-ut_fat PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-core-ut_fat PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-core-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 20
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut_fat
- TEST_TARGET
- library-cpp-actors-core-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- TIMEOUT
- 1200
-)
-target_allocator(library-cpp-actors-core-ut_fat
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-core-ut_fat)
diff --git a/library/cpp/actors/core/ut_fat/CMakeLists.linux-x86_64.txt b/library/cpp/actors/core/ut_fat/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index e929e6f394..0000000000
--- a/library/cpp/actors/core/ut_fat/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut_fat)
-target_link_libraries(library-cpp-actors-core-ut_fat PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-core-ut_fat PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-core-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 20
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut_fat
- TEST_TARGET
- library-cpp-actors-core-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- TIMEOUT
- 1200
-)
-target_allocator(library-cpp-actors-core-ut_fat
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-core-ut_fat)
diff --git a/library/cpp/actors/core/ut_fat/CMakeLists.windows-x86_64.txt b/library/cpp/actors/core/ut_fat/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index cd693cd949..0000000000
--- a/library/cpp/actors/core/ut_fat/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-core-ut_fat)
-target_link_libraries(library-cpp-actors-core-ut_fat PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
-)
-target_sources(library-cpp-actors-core-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-core-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 20
-)
-add_yunittest(
- NAME
- library-cpp-actors-core-ut_fat
- TEST_TARGET
- library-cpp-actors-core-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-core-ut_fat
- PROPERTY
- TIMEOUT
- 1200
-)
-target_allocator(library-cpp-actors-core-ut_fat
- system_allocator
-)
-vcs_info(library-cpp-actors-core-ut_fat)
diff --git a/library/cpp/actors/core/ut_fat/actor_benchmark.cpp b/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
deleted file mode 100644
index d47cae6ebb..0000000000
--- a/library/cpp/actors/core/ut_fat/actor_benchmark.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-
-
-#include <library/cpp/actors/core/actor_benchmark_helper.h>
-
-#include <library/cpp/testing/unittest/registar.h>
-
-
-using namespace NActors;
-using namespace NActors::NTests;
-
-
-struct THeavyActorBenchmarkSettings : TActorBenchmarkSettings {
- static constexpr ui32 TotalEventsAmountPerThread = 1'000'000;
-
- static constexpr auto MailboxTypes = {
- TMailboxType::HTSwap,
- };
-};
-
-
-Y_UNIT_TEST_SUITE(HeavyActorBenchmark) {
-
- using TActorBenchmark = ::NActors::NTests::TActorBenchmark<THeavyActorBenchmarkSettings>;
- using TSettings = TActorBenchmark::TSettings;
-
-
- Y_UNIT_TEST(SendActivateReceiveCSV) {
- std::vector<ui32> threadsList;
- for (ui32 threads = 1; threads <= 28; threads++) {
- threadsList.push_back(threads);
- }
- std::vector<ui32> actorPairsList = {512};
- TActorBenchmark::RunSendActivateReceiveCSV(threadsList, actorPairsList, {1,100, 200}, TDuration::Seconds(1));
- }
-
- Y_UNIT_TEST(StarSendActivateReceiveCSV) {
- std::vector<ui32> threadsList;
- for (ui32 threads = 1; threads <= 28; threads++) {
- threadsList.push_back(threads);
- }
- std::vector<ui32> actorPairsList = {512};
- std::vector<ui32> starsList = {10};
- TActorBenchmark::RunStarSendActivateReceiveCSV(threadsList, actorPairsList, starsList);
- }
-
-}
diff --git a/library/cpp/actors/core/ut_fat/ya.make b/library/cpp/actors/core/ut_fat/ya.make
deleted file mode 100644
index 937e59720d..0000000000
--- a/library/cpp/actors/core/ut_fat/ya.make
+++ /dev/null
@@ -1,32 +0,0 @@
-UNITTEST()
-
-FORK_SUBTESTS()
-
-IF (SANITIZER_TYPE)
- SIZE(LARGE)
- TIMEOUT(2400)
- TAG(ya:fat)
- SPLIT_FACTOR(20)
- REQUIREMENTS(
- ram:32
- )
-ELSE()
- SIZE(LARGE)
- TIMEOUT(1200)
- TAG(ya:fat)
- SPLIT_FACTOR(20)
- REQUIREMENTS(
- ram:16
- )
-ENDIF()
-
-
-PEERDIR(
- library/cpp/actors/core
-)
-
-SRCS(
- actor_benchmark.cpp
-)
-
-END()
diff --git a/library/cpp/actors/core/worker_context.cpp b/library/cpp/actors/core/worker_context.cpp
deleted file mode 100644
index ada6c997d4..0000000000
--- a/library/cpp/actors/core/worker_context.cpp
+++ /dev/null
@@ -1,7 +0,0 @@
-#include "worker_context.h"
-#include "probes.h"
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
-}
diff --git a/library/cpp/actors/core/worker_context.h b/library/cpp/actors/core/worker_context.h
deleted file mode 100644
index b51ff55cd3..0000000000
--- a/library/cpp/actors/core/worker_context.h
+++ /dev/null
@@ -1,192 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-//#include "actorsystem.h"
-#include "event.h"
-#include "executor_pool.h"
-#include "lease.h"
-#include "mailbox.h"
-#include "mon_stats.h"
-
-#include <library/cpp/actors/util/cpumask.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/intrinsics.h>
-#include <library/cpp/actors/util/thread.h>
-
-#include <library/cpp/lwtrace/shuttle.h>
-
-namespace NActors {
- struct TWorkerContext {
- TWorkerId WorkerId;
- const TCpuId CpuId;
- TLease Lease;
- IExecutorPool* Executor = nullptr;
- TMailboxTable* MailboxTable = nullptr;
- ui64 TimePerMailboxTs = 0;
- ui32 EventsPerMailbox = 0;
- ui64 SoftDeadlineTs = ui64(-1);
- TExecutorThreadStats* Stats = &WorkerStats; // pool stats
- TExecutorThreadStats WorkerStats;
- TPoolId PoolId = MaxPools;
- mutable NLWTrace::TOrbit Orbit;
- bool IsNeededToWaitNextActivation = true;
- i64 HPStart = 0;
- ui32 ExecutedEvents = 0;
-
- TWorkerContext(TWorkerId workerId, TCpuId cpuId)
- : WorkerId(workerId)
- , CpuId(cpuId)
- , Lease(WorkerId, NeverExpire)
- {}
-
-#ifdef ACTORSLIB_COLLECT_EXEC_STATS
- void GetCurrentStats(TExecutorThreadStats& statsCopy) const {
- statsCopy = TExecutorThreadStats();
- statsCopy.Aggregate(*Stats);
- }
-
- void AddElapsedCycles(ui32 activityType, i64 elapsed) {
- Y_DEBUG_ABORT_UNLESS(activityType < Stats->MaxActivityType());
- RelaxedStore(&Stats->ElapsedTicks, RelaxedLoad(&Stats->ElapsedTicks) + elapsed);
- RelaxedStore(&Stats->ElapsedTicksByActivity[activityType], RelaxedLoad(&Stats->ElapsedTicksByActivity[activityType]) + elapsed);
- }
-
- void AddParkedCycles(i64 elapsed) {
- RelaxedStore(&Stats->ParkedTicks, RelaxedLoad(&Stats->ParkedTicks) + elapsed);
- }
-
- void AddBlockedCycles(i64 elapsed) {
- RelaxedStore(&Stats->BlockedTicks, RelaxedLoad(&Stats->BlockedTicks) + elapsed);
- }
-
- void IncrementSentEvents() {
- RelaxedStore(&Stats->SentEvents, RelaxedLoad(&Stats->SentEvents) + 1);
- }
-
- void IncrementPreemptedEvents() {
- RelaxedStore(&Stats->PreemptedEvents, RelaxedLoad(&Stats->PreemptedEvents) + 1);
- }
-
- void DecrementActorsAliveByActivity(ui32 activityType) {
- if (activityType >= Stats->MaxActivityType()) {
- activityType = 0;
- }
- RelaxedStore(&Stats->ActorsAliveByActivity[activityType], Stats->ActorsAliveByActivity[activityType] - 1);
- }
-
- inline void IncrementNonDeliveredEvents() {
- RelaxedStore(&Stats->NonDeliveredEvents, RelaxedLoad(&Stats->NonDeliveredEvents) + 1);
- }
-
- inline void IncrementMailboxPushedOutByTailSending() {
- RelaxedStore(&Stats->MailboxPushedOutByTailSending, RelaxedLoad(&Stats->MailboxPushedOutByTailSending) + 1);
- }
-
- inline void IncrementMailboxPushedOutBySoftPreemption() {
- RelaxedStore(&Stats->MailboxPushedOutBySoftPreemption, RelaxedLoad(&Stats->MailboxPushedOutBySoftPreemption) + 1);
- }
-
- inline void IncrementMailboxPushedOutByTime() {
- RelaxedStore(&Stats->MailboxPushedOutByTime, RelaxedLoad(&Stats->MailboxPushedOutByTime) + 1);
- }
-
- inline void IncrementMailboxPushedOutByEventCount() {
- RelaxedStore(&Stats->MailboxPushedOutByEventCount, RelaxedLoad(&Stats->MailboxPushedOutByEventCount) + 1);
- }
-
- inline void IncrementEmptyMailboxActivation() {
- RelaxedStore(&Stats->EmptyMailboxActivation, RelaxedLoad(&Stats->EmptyMailboxActivation) + 1);
- }
-
- double AddActivationStats(i64 scheduleTs, i64 deliveredTs) {
- i64 ts = deliveredTs > scheduleTs ? deliveredTs - scheduleTs : 0;
- double usec = NHPTimer::GetSeconds(ts) * 1000000.0;
- Stats->ActivationTimeHistogram.Add(usec);
- RelaxedStore(&Stats->WorstActivationTimeUs, Max(Stats->WorstActivationTimeUs, (ui64)usec));
- return usec;
- }
-
- ui64 AddEventDeliveryStats(i64 sentTs, i64 deliveredTs) {
- ui64 usecDeliv = deliveredTs > sentTs ? NHPTimer::GetSeconds(deliveredTs - sentTs) * 1000000 : 0;
- Stats->EventDeliveryTimeHistogram.Add(usecDeliv);
- return usecDeliv;
- }
-
- i64 AddEventProcessingStats(i64 deliveredTs, i64 processedTs, ui32 activityType, ui64 scheduled) {
- i64 elapsed = processedTs - deliveredTs;
- ui64 usecElapsed = NHPTimer::GetSeconds(elapsed) * 1000000;
- activityType = (activityType >= Stats->MaxActivityType()) ? 0 : activityType;
- Stats->EventProcessingCountHistogram.Add(usecElapsed);
- Stats->EventProcessingTimeHistogram.Add(usecElapsed, elapsed);
- RelaxedStore(&Stats->ReceivedEvents, RelaxedLoad(&Stats->ReceivedEvents) + 1);
- RelaxedStore(&Stats->ReceivedEventsByActivity[activityType], RelaxedLoad(&Stats->ReceivedEventsByActivity[activityType]) + 1);
- RelaxedStore(&Stats->ScheduledEventsByActivity[activityType], RelaxedLoad(&Stats->ScheduledEventsByActivity[activityType]) + scheduled);
- AddElapsedCycles(activityType, elapsed);
- return elapsed;
- }
-
- void UpdateActorsStats(size_t dyingActorsCnt) {
- if (dyingActorsCnt) {
- AtomicAdd(Executor->DestroyedActors, dyingActorsCnt);
- }
- RelaxedStore(&Stats->PoolDestroyedActors, (ui64)RelaxedLoad(&Executor->DestroyedActors));
- RelaxedStore(&Stats->PoolActorRegistrations, (ui64)RelaxedLoad(&Executor->ActorRegistrations));
- RelaxedStore(&Stats->PoolAllocatedMailboxes, MailboxTable->GetAllocatedMailboxCount());
- }
-
- void UpdateThreadTime() {
- RelaxedStore(&WorkerStats.SafeElapsedTicks, (ui64)RelaxedLoad(&WorkerStats.ElapsedTicks));
- RelaxedStore(&WorkerStats.CpuUs, ThreadCPUTime());
- }
-
- void IncreaseNotEnoughCpuExecutions() {
- RelaxedStore(&WorkerStats.NotEnoughCpuExecutions,
- (ui64)RelaxedLoad(&WorkerStats.NotEnoughCpuExecutions) + 1);
- }
-#else
- void GetCurrentStats(TExecutorThreadStats&) const {}
- inline void AddElapsedCycles(ui32, i64) {}
- inline void AddParkedCycles(i64) {}
- inline void AddBlockedCycles(i64) {}
- inline void IncrementSentEvents() {}
- inline void IncrementPreemptedEvents() {}
- inline void IncrementMailboxPushedOutByTailSending() {}
- inline void IncrementMailboxPushedOutBySoftPreemption() {}
- inline void IncrementMailboxPushedOutByTime() {}
- inline void IncrementMailboxPushedOutByEventCount() {}
- inline void IncrementEmptyMailboxActivation() {}
- void DecrementActorsAliveByActivity(ui32) {}
- void IncrementNonDeliveredEvents() {}
- double AddActivationStats(i64, i64) { return 0; }
- ui64 AddEventDeliveryStats(i64, i64) { return 0; }
- i64 AddEventProcessingStats(i64, i64, ui32, ui64) { return 0; }
- void UpdateActorsStats(size_t, IExecutorPool*) {}
- void UpdateThreadTime() {}
- void IncreaseNotEnoughCpuExecutions() {}
-#endif
-
- void Switch(IExecutorPool* executor,
- TMailboxTable* mailboxTable,
- ui64 timePerMailboxTs,
- ui32 eventsPerMailbox,
- ui64 softDeadlineTs,
- TExecutorThreadStats* stats)
- {
- Executor = executor;
- MailboxTable = mailboxTable;
- TimePerMailboxTs = timePerMailboxTs;
- EventsPerMailbox = eventsPerMailbox;
- SoftDeadlineTs = softDeadlineTs;
- Stats = stats;
- PoolId = Executor ? Executor->PoolId : MaxPools;
- }
-
- void SwitchToIdle() {
- Executor = nullptr;
- MailboxTable = nullptr;
- //Stats = &WorkerStats; // TODO: in actorsystem 2.0 idle stats cannot be related to specific pool
- PoolId = MaxPools;
- }
- };
-}
diff --git a/library/cpp/actors/core/ya.make b/library/cpp/actors/core/ya.make
deleted file mode 100644
index 8dadea5fdc..0000000000
--- a/library/cpp/actors/core/ya.make
+++ /dev/null
@@ -1,133 +0,0 @@
-LIBRARY()
-
-NO_WSHADOW()
-
-IF (PROFILE_MEMORY_ALLOCATIONS)
- CFLAGS(-DPROFILE_MEMORY_ALLOCATIONS)
-ENDIF()
-
-IF (ALLOCATOR == "B" OR ALLOCATOR == "BS" OR ALLOCATOR == "C")
- CXXFLAGS(-DBALLOC)
- PEERDIR(
- library/cpp/balloc/optional
- )
-ENDIF()
-
-SRCS(
- actor_bootstrapped.cpp
- actor_coroutine.cpp
- actor_coroutine.h
- actor.cpp
- actor.h
- actor_virtual.cpp
- actorid.cpp
- actorid.h
- actorsystem.cpp
- actorsystem.h
- ask.cpp
- ask.h
- av_bootstrapped.cpp
- balancer.h
- balancer.cpp
- buffer.cpp
- buffer.h
- callstack.cpp
- callstack.h
- config.h
- cpu_manager.cpp
- cpu_manager.h
- cpu_state.h
- defs.h
- event.cpp
- event.h
- event_load.cpp
- event_local.h
- event_pb.cpp
- event_pb.h
- events.h
- events_undelivered.cpp
- executelater.h
- executor_pool_base.cpp
- executor_pool_base.h
- executor_pool_basic.cpp
- executor_pool_basic.h
- executor_pool_io.cpp
- executor_pool_io.h
- executor_pool_united.cpp
- executor_pool_united.h
- executor_thread.cpp
- executor_thread.h
- harmonizer.cpp
- harmonizer.h
- hfunc.h
- interconnect.cpp
- interconnect.h
- invoke.h
- io_dispatcher.cpp
- io_dispatcher.h
- lease.h
- log.cpp
- log.h
- log_settings.cpp
- log_settings.h
- log_buffer.cpp
- log_buffer.h
- log_metrics.h
- mailbox.cpp
- mailbox.h
- mailbox_queue_revolving.h
- mailbox_queue_simple.h
- mon.h
- mon_stats.h
- monotonic.cpp
- monotonic.h
- monotonic_provider.cpp
- monotonic_provider.h
- worker_context.cpp
- worker_context.h
- probes.cpp
- probes.h
- process_stats.cpp
- process_stats.h
- scheduler_actor.cpp
- scheduler_actor.h
- scheduler_basic.cpp
- scheduler_basic.h
- scheduler_cookie.cpp
- scheduler_cookie.h
- scheduler_queue.h
- servicemap.h
-)
-
-GENERATE_ENUM_SERIALIZATION(defs.h)
-GENERATE_ENUM_SERIALIZATION(actor.h)
-GENERATE_ENUM_SERIALIZATION(log_iface.h)
-
-PEERDIR(
- library/cpp/actors/actor_type
- library/cpp/actors/memory_log
- library/cpp/actors/prof
- library/cpp/actors/protos
- library/cpp/actors/util
- library/cpp/execprofile
- library/cpp/json/writer
- library/cpp/logger
- library/cpp/lwtrace
- library/cpp/monlib/dynamic_counters
- library/cpp/svnversion
- library/cpp/time_provider
- library/cpp/threading/future
-)
-
-IF (SANITIZER_TYPE == "thread")
- SUPPRESSIONS(
- tsan.supp
- )
-ENDIF()
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
- ut_fat
-)
diff --git a/library/cpp/actors/cppcoro/CMakeLists.darwin-arm64.txt b/library/cpp/actors/cppcoro/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 9795130141..0000000000
--- a/library/cpp/actors/cppcoro/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(corobenchmark)
-add_subdirectory(ut)
-
-add_library(cpp-actors-cppcoro)
-target_link_libraries(cpp-actors-cppcoro PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_sources(cpp-actors-cppcoro PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/await_callback.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_group.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_result.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task.cpp
-)
diff --git a/library/cpp/actors/cppcoro/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/cppcoro/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 9795130141..0000000000
--- a/library/cpp/actors/cppcoro/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(corobenchmark)
-add_subdirectory(ut)
-
-add_library(cpp-actors-cppcoro)
-target_link_libraries(cpp-actors-cppcoro PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_sources(cpp-actors-cppcoro PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/await_callback.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_group.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_result.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task.cpp
-)
diff --git a/library/cpp/actors/cppcoro/CMakeLists.linux-aarch64.txt b/library/cpp/actors/cppcoro/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 514824dad2..0000000000
--- a/library/cpp/actors/cppcoro/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(corobenchmark)
-add_subdirectory(ut)
-
-add_library(cpp-actors-cppcoro)
-target_link_libraries(cpp-actors-cppcoro PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_sources(cpp-actors-cppcoro PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/await_callback.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_group.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_result.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task.cpp
-)
diff --git a/library/cpp/actors/cppcoro/CMakeLists.linux-x86_64.txt b/library/cpp/actors/cppcoro/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 514824dad2..0000000000
--- a/library/cpp/actors/cppcoro/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(corobenchmark)
-add_subdirectory(ut)
-
-add_library(cpp-actors-cppcoro)
-target_link_libraries(cpp-actors-cppcoro PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_sources(cpp-actors-cppcoro PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/await_callback.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_group.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_result.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task.cpp
-)
diff --git a/library/cpp/actors/cppcoro/CMakeLists.windows-x86_64.txt b/library/cpp/actors/cppcoro/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 9795130141..0000000000
--- a/library/cpp/actors/cppcoro/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(corobenchmark)
-add_subdirectory(ut)
-
-add_library(cpp-actors-cppcoro)
-target_link_libraries(cpp-actors-cppcoro PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_sources(cpp-actors-cppcoro PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/await_callback.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_group.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_result.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task.cpp
-)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt b/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 1043b6f834..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(corobenchmark)
-target_link_libraries(corobenchmark PUBLIC
- contrib-libs-cxxsupp
- yutil
- testing-benchmark-main
- cpp-actors-cppcoro
-)
-target_link_options(corobenchmark PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(corobenchmark PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/corobenchmark/main.cpp
-)
-target_allocator(corobenchmark
- system_allocator
-)
-vcs_info(corobenchmark)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 1b98f8aac0..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,31 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(corobenchmark)
-target_link_libraries(corobenchmark PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- testing-benchmark-main
- cpp-actors-cppcoro
-)
-target_link_options(corobenchmark PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(corobenchmark PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/corobenchmark/main.cpp
-)
-target_allocator(corobenchmark
- system_allocator
-)
-vcs_info(corobenchmark)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt b/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index f12dfdad8d..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(corobenchmark)
-target_link_libraries(corobenchmark PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- testing-benchmark-main
- cpp-actors-cppcoro
-)
-target_link_options(corobenchmark PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(corobenchmark PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/corobenchmark/main.cpp
-)
-target_allocator(corobenchmark
- cpp-malloc-jemalloc
-)
-vcs_info(corobenchmark)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt b/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index e5b37926d1..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,36 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(corobenchmark)
-target_link_libraries(corobenchmark PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- testing-benchmark-main
- cpp-actors-cppcoro
-)
-target_link_options(corobenchmark PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(corobenchmark PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/corobenchmark/main.cpp
-)
-target_allocator(corobenchmark
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(corobenchmark)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt b/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index c9a8359b4b..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(corobenchmark)
-target_link_libraries(corobenchmark PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- testing-benchmark-main
- cpp-actors-cppcoro
-)
-target_sources(corobenchmark PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/corobenchmark/main.cpp
-)
-target_allocator(corobenchmark
- system_allocator
-)
-vcs_info(corobenchmark)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/main.cpp b/library/cpp/actors/cppcoro/corobenchmark/main.cpp
deleted file mode 100644
index 49504e7105..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/main.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-#include <library/cpp/actors/cppcoro/task.h>
-#include <library/cpp/actors/cppcoro/await_callback.h>
-#include <library/cpp/testing/benchmark/bench.h>
-
-using namespace NActors;
-
-namespace {
-
- int LastValue = 0;
-
- Y_NO_INLINE int NextFuncValue() {
- return ++LastValue;
- }
-
- Y_NO_INLINE void IterateFuncValues(size_t iterations) {
- for (size_t i = 0; i < iterations; ++i) {
- int value = NextFuncValue();
- Y_DO_NOT_OPTIMIZE_AWAY(value);
- }
- }
-
- Y_NO_INLINE TTask<int> NextTaskValue() {
- co_return ++LastValue;
- }
-
- Y_NO_INLINE TTask<void> IterateTaskValues(size_t iterations) {
- for (size_t i = 0; i < iterations; ++i) {
- int value = co_await NextTaskValue();
- Y_DO_NOT_OPTIMIZE_AWAY(value);
- }
- }
-
- std::coroutine_handle<> Paused;
-
- struct {
- static bool await_ready() noexcept {
- return false;
- }
- static void await_suspend(std::coroutine_handle<> h) noexcept {
- Paused = h;
- }
- static int await_resume() noexcept {
- return ++LastValue;
- }
- } Pause;
-
- Y_NO_INLINE TTask<void> IteratePauseValues(size_t iterations) {
- for (size_t i = 0; i < iterations; ++i) {
- int value = co_await Pause;
- Y_DO_NOT_OPTIMIZE_AWAY(value);
- }
- }
-
-} // namespace
-
-Y_CPU_BENCHMARK(FuncCalls, iface) {
- IterateFuncValues(iface.Iterations());
-}
-
-Y_CPU_BENCHMARK(TaskCalls, iface) {
- bool finished = false;
- AwaitThenCallback(IterateTaskValues(iface.Iterations()), [&]{
- finished = true;
- });
- Y_ABORT_UNLESS(finished);
-}
-
-Y_CPU_BENCHMARK(CoroAwaits, iface) {
- bool finished = false;
- AwaitThenCallback(IteratePauseValues(iface.Iterations()), [&]{
- finished = true;
- });
- while (!finished) {
- std::exchange(Paused, {}).resume();
- }
-}
diff --git a/library/cpp/actors/cppcoro/corobenchmark/ya.make b/library/cpp/actors/cppcoro/corobenchmark/ya.make
deleted file mode 100644
index ef5ad4135c..0000000000
--- a/library/cpp/actors/cppcoro/corobenchmark/ya.make
+++ /dev/null
@@ -1,11 +0,0 @@
-Y_BENCHMARK()
-
-PEERDIR(
- library/cpp/actors/cppcoro
-)
-
-SRCS(
- main.cpp
-)
-
-END()
diff --git a/library/cpp/actors/cppcoro/task_actor.cpp b/library/cpp/actors/cppcoro/task_actor.cpp
deleted file mode 100644
index 8a9451c8e5..0000000000
--- a/library/cpp/actors/cppcoro/task_actor.cpp
+++ /dev/null
@@ -1,183 +0,0 @@
-#include "task_actor.h"
-#include "await_callback.h"
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/hfunc.h>
-
-namespace NActors {
-
- class TTaskActorImpl;
-
- static Y_POD_THREAD(TTaskActorImpl*) TlsCurrentTaskActor{nullptr};
-
- struct TCurrentTaskActorGuard {
- TCurrentTaskActorGuard(TTaskActorImpl* current) noexcept {
- Y_ABORT_UNLESS(TlsCurrentTaskActor == nullptr);
- TlsCurrentTaskActor = current;
- }
-
- ~TCurrentTaskActorGuard() noexcept {
- TlsCurrentTaskActor = nullptr;
- }
- };
-
- enum : ui32 {
- EvResumeTask = EventSpaceBegin(TEvents::ES_SYSTEM) + 256,
- };
-
- struct TEvResumeTask : public TEventLocal<TEvResumeTask, EvResumeTask> {
- std::coroutine_handle<> Handle;
- TTaskResult<void>* Result;
-
- explicit TEvResumeTask(std::coroutine_handle<> handle, TTaskResult<void>* result) noexcept
- : Handle(handle)
- , Result(result)
- {}
-
- ~TEvResumeTask() noexcept {
- if (Handle) {
- Result->SetException(std::make_exception_ptr(TTaskCancelled()));
- Handle.resume();
- }
- }
- };
-
- class TTaskActorResult final : public TAtomicRefCount<TTaskActorResult> {
- public:
- bool Finished = false;
- };
-
- class TTaskActorImpl : public TActor<TTaskActorImpl> {
- friend class TTaskActor;
- friend class TAfterAwaiter;
- friend class TBindAwaiter;
-
- public:
- TTaskActorImpl(TTask<void>&& task)
- : TActor(&TThis::StateBoot)
- , Task(std::move(task))
- {
- Y_ABORT_UNLESS(Task);
- }
-
- ~TTaskActorImpl() {
- Stopped = true;
- while (EventAwaiter) {
- // Unblock event awaiter until task stops trying
- TCurrentTaskActorGuard guard(this);
- std::exchange(EventAwaiter, {}).resume();
- }
- }
-
- void Registered(TActorSystem* sys, const TActorId& parent) override {
- ParentId = parent;
- sys->Send(new IEventHandle(TEvents::TSystem::Bootstrap, 0, SelfId(), SelfId(), {}, 0));
- }
-
- STATEFN(StateBoot) {
- Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvents::TSystem::Bootstrap, "Expected bootstrap event");
- TCurrentTaskActorGuard guard(this);
- Become(&TThis::StateWork);
- AwaitThenCallback(std::move(Task).WhenDone(),
- [result = Result](TTaskResult<void>&& outcome) noexcept {
- result->Finished = true;
- try {
- outcome.Value();
- } catch (TTaskCancelled&) {
- // ignore
- }
- });
- Check();
- }
-
- STATEFN(StateWork) {
- TCurrentTaskActorGuard guard(this);
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvResumeTask, Handle);
- default:
- Y_ABORT_UNLESS(EventAwaiter);
- Event.reset(ev.Release());
- std::exchange(EventAwaiter, {}).resume();
- }
- Check();
- }
-
- void Handle(TEvResumeTask::TPtr& ev) {
- auto* msg = ev->Get();
- msg->Result->SetValue();
- std::exchange(msg->Handle, {}).resume();
- }
-
- bool Check() {
- if (Result->Finished) {
- Y_ABORT_UNLESS(!EventAwaiter, "Task terminated while waiting for the next event");
- PassAway();
- return false;
- }
-
- Y_ABORT_UNLESS(EventAwaiter, "Task suspended without waiting for the next event");
- return true;
- }
-
- void WaitForEvent(std::coroutine_handle<> h) noexcept {
- Y_ABORT_UNLESS(!EventAwaiter, "Task cannot have multiple awaiters for the next event");
- EventAwaiter = h;
- }
-
- std::unique_ptr<IEventHandle> FinishWaitForEvent() {
- if (Stopped) {
- throw TTaskCancelled();
- }
- Y_ABORT_UNLESS(Event, "Task does not have current event");
- return std::move(Event);
- }
-
- private:
- TIntrusivePtr<TTaskActorResult> Result = MakeIntrusive<TTaskActorResult>();
- TTask<void> Task;
- TActorId ParentId;
- std::coroutine_handle<> EventAwaiter;
- std::unique_ptr<IEventHandle> Event;
- bool Stopped = false;
- };
-
- void TTaskActorNextEvent::await_suspend(std::coroutine_handle<> h) noexcept {
- Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
- TlsCurrentTaskActor->WaitForEvent(h);
- }
-
- std::unique_ptr<IEventHandle> TTaskActorNextEvent::await_resume() {
- Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
- return TlsCurrentTaskActor->FinishWaitForEvent();
- }
-
- IActor* TTaskActor::Create(TTask<void>&& task) {
- return new TTaskActorImpl(std::move(task));
- }
-
- TActorIdentity TTaskActor::SelfId() noexcept {
- Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
- return TlsCurrentTaskActor->SelfId();
- }
-
- TActorId TTaskActor::ParentId() noexcept {
- Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
- return TlsCurrentTaskActor->ParentId;
- }
-
- void TAfterAwaiter::await_suspend(std::coroutine_handle<> h) noexcept {
- Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
- TlsCurrentTaskActor->Schedule(Duration, new TEvResumeTask(h, &Result));
- }
-
- bool TBindAwaiter::await_ready() noexcept {
- if (TlsCurrentTaskActor && TlsCurrentTaskActor->SelfId() == ActorId) {
- return true;
- }
- return false;
- }
-
- void TBindAwaiter::await_suspend(std::coroutine_handle<> h) noexcept {
- Sys->Send(new IEventHandle(ActorId, ActorId, new TEvResumeTask(h, &Result)));
- }
-
-} // namespace NActors
diff --git a/library/cpp/actors/cppcoro/task_actor.h b/library/cpp/actors/cppcoro/task_actor.h
deleted file mode 100644
index 75d498a04e..0000000000
--- a/library/cpp/actors/cppcoro/task_actor.h
+++ /dev/null
@@ -1,107 +0,0 @@
-#include <library/cpp/actors/core/actor.h>
-#include "task.h"
-
-namespace NActors {
-
- struct TTaskActorNextEvent {
- static constexpr bool await_ready() noexcept { return false; }
-
- static void await_suspend(std::coroutine_handle<> h) noexcept;
-
- static std::unique_ptr<IEventHandle> await_resume();
- };
-
- class TAfterAwaiter {
- public:
- TAfterAwaiter(TDuration duration)
- : Duration(duration)
- {}
-
- static constexpr bool await_ready() noexcept { return false; }
-
- void await_suspend(std::coroutine_handle<> h) noexcept;
-
- void await_resume() {
- Result.Value();
- }
-
- private:
- TDuration Duration;
- TTaskResult<void> Result;
- };
-
- class TBindAwaiter {
- public:
- TBindAwaiter(TActorSystem* sys, const TActorId& actorId)
- : Sys(sys)
- , ActorId(actorId)
- {}
-
- bool await_ready() noexcept;
-
- void await_suspend(std::coroutine_handle<> h) noexcept;
-
- void await_resume() {
- Result.Value();
- }
-
- private:
- TActorSystem* Sys;
- TActorId ActorId;
- TTaskResult<void> Result;
- };
-
- class TTaskActor {
- public:
- /**
- * Creates a new actor that will run the specified task.
- */
- static IActor* Create(TTask<void>&& task);
-
- /**
- * Returns the next actor event when awaited
- */
- static constexpr TTaskActorNextEvent NextEvent{};
-
- /**
- * Returns the identity of current task actor.
- */
- static TActorIdentity SelfId() noexcept;
-
- /**
- * Returns an actor id of the actor that registered current task actor.
- */
- static TActorId ParentId() noexcept;
-
- /**
- * Returns awaiter that completes after the specified timeout.
- */
- static TAfterAwaiter After(TDuration duration) noexcept {
- return TAfterAwaiter{ duration };
- }
-
- /**
- * Returns awaiter that completes on actor thread when awaited.
- */
- static TBindAwaiter Bind() noexcept {
- TActorId actorId = SelfId();
- TActorSystem* sys = TActivationContext::ActorSystem();
- return TBindAwaiter{ sys, actorId };
- }
-
- /**
- * Returns a task that runs the specified task, but binds the result
- * back to the actor thread. Useful when the specified task may be
- * working with non-actor coroutines.
- */
- template<class T>
- static TTask<T> Bind(TTask<T>&& task) {
- return [](TTask<T> task, TBindAwaiter bindTask) -> TTask<T> {
- auto result = co_await std::move(task).WhenDone();
- co_await bindTask;
- co_return std::move(result).Value();
- }(std::move(task), Bind());
- }
- };
-
-} // namespace NActors
diff --git a/library/cpp/actors/cppcoro/task_actor_ut.cpp b/library/cpp/actors/cppcoro/task_actor_ut.cpp
deleted file mode 100644
index 43186bfc55..0000000000
--- a/library/cpp/actors/cppcoro/task_actor_ut.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-#include "task_actor.h"
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-
-#include <library/cpp/testing/unittest/registar.h>
-
-Y_UNIT_TEST_SUITE(TaskActor) {
-
- using namespace NActors;
-
- enum : ui32 {
- EvBegin = EventSpaceBegin(TEvents::ES_USERSPACE),
- EvRequest,
- EvResponse,
- EvStop,
- };
-
- struct TEvRequest: public TEventLocal<TEvRequest, EvRequest> {
- };
-
- struct TEvResponse: public TEventLocal<TEvResponse, EvResponse> {
- };
-
- struct TEvStop: public TEventLocal<TEvStop, EvStop> {
- };
-
- TTask<void> SimpleResponder() {
- for (;;) {
- auto ev = co_await TTaskActor::NextEvent;
- Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvRequest::EventType);
- auto* msg = ev->Get<TEvRequest>();
- Y_UNUSED(msg);
- TTaskActor::SelfId().Send(ev->Sender, new TEvResponse);
- }
- }
-
- TTask<void> SimpleRequester(TActorId responder, TManualEvent& doneEvent, std::atomic<int>& itemsProcessed) {
- // Note: it's ok to use lambda capture because captures outlive this coroutine
- auto singleRequest = [&]() -> TTask<bool> {
- TTaskActor::SelfId().Send(responder, new TEvRequest);
- auto ev = co_await TTaskActor::NextEvent;
- switch (ev->GetTypeRewrite()) {
- case TEvResponse::EventType:
- co_return true;
- case TEvStop::EventType:
- co_return false;
- default:
- Y_ABORT("Unexpected event");
- }
- };
- while (co_await singleRequest()) {
- ++itemsProcessed;
- }
- doneEvent.Signal();
- }
-
- void Check(TDuration duration, std::unique_ptr<IEventBase> stopEvent) {
- THolder<TActorSystemSetup> setup = MakeHolder<TActorSystemSetup>();
- setup->NodeId = 0;
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[setup->ExecutorsCount]);
- for (ui32 i = 0; i < setup->ExecutorsCount; ++i) {
- setup->Executors[i] = new TBasicExecutorPool(i, 5, 10, "basic");
- }
- setup->Scheduler = new TBasicSchedulerThread;
-
- TActorSystem actorSystem(setup);
-
- actorSystem.Start();
-
- TManualEvent doneEvent;
- std::atomic<int> itemsProcessed{0};
-
- auto responder = actorSystem.Register(TTaskActor::Create(SimpleResponder()));
- auto requester = actorSystem.Register(TTaskActor::Create(SimpleRequester(responder, doneEvent, itemsProcessed)));
- auto deadline = TMonotonic::Now() + duration;
- while (itemsProcessed.load() < 10) {
- UNIT_ASSERT_C(TMonotonic::Now() < deadline, "cannot observe 10 responses in " << duration);
- Sleep(TDuration::MilliSeconds(100));
- }
- actorSystem.Send(requester, stopEvent.release());
- doneEvent.WaitI();
-
- UNIT_ASSERT_GE(itemsProcessed.load(), 10);
-
- actorSystem.Stop();
- }
-
- Y_UNIT_TEST(Basic) {
- Check(TDuration::Seconds(10), std::make_unique<TEvStop>());
- }
-
-} // Y_UNIT_TEST_SUITE(TaskActor)
diff --git a/library/cpp/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 5ec40dbae9..0000000000
--- a/library/cpp/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-cppcoro-ut)
-target_include_directories(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro
-)
-target_link_libraries(library-cpp-actors-cppcoro-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-cppcoro
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-cppcoro-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-cppcoro-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-cppcoro-ut
- TEST_TARGET
- library-cpp-actors-cppcoro-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-cppcoro-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-cppcoro-ut)
diff --git a/library/cpp/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 7cebff01de..0000000000
--- a/library/cpp/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-cppcoro-ut)
-target_include_directories(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro
-)
-target_link_libraries(library-cpp-actors-cppcoro-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-cppcoro
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-cppcoro-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-cppcoro-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-cppcoro-ut
- TEST_TARGET
- library-cpp-actors-cppcoro-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-cppcoro-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-cppcoro-ut)
diff --git a/library/cpp/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 4a11af3456..0000000000
--- a/library/cpp/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-cppcoro-ut)
-target_include_directories(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro
-)
-target_link_libraries(library-cpp-actors-cppcoro-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-cppcoro
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-cppcoro-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-cppcoro-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-cppcoro-ut
- TEST_TARGET
- library-cpp-actors-cppcoro-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-cppcoro-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-cppcoro-ut)
diff --git a/library/cpp/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 2e2412f989..0000000000
--- a/library/cpp/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-cppcoro-ut)
-target_include_directories(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro
-)
-target_link_libraries(library-cpp-actors-cppcoro-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-cppcoro
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-cppcoro-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-cppcoro-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-cppcoro-ut
- TEST_TARGET
- library-cpp-actors-cppcoro-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-cppcoro-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-cppcoro-ut)
diff --git a/library/cpp/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index e3b8b019c8..0000000000
--- a/library/cpp/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,62 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-cppcoro-ut)
-target_include_directories(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro
-)
-target_link_libraries(library-cpp-actors-cppcoro-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-cppcoro
- cpp-actors-testlib
-)
-target_sources(library-cpp-actors-cppcoro-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/cppcoro/task_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-cppcoro-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-cppcoro-ut
- TEST_TARGET
- library-cpp-actors-cppcoro-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-cppcoro-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-cppcoro-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-cppcoro-ut)
diff --git a/library/cpp/actors/cppcoro/ut/ya.make b/library/cpp/actors/cppcoro/ut/ya.make
deleted file mode 100644
index 24a9c73613..0000000000
--- a/library/cpp/actors/cppcoro/ut/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/cppcoro)
-
-PEERDIR(
- library/cpp/actors/testlib
-)
-
-SRCS(
- task_ut.cpp
- task_actor_ut.cpp
-)
-
-END()
diff --git a/library/cpp/actors/cppcoro/ya.make b/library/cpp/actors/cppcoro/ya.make
deleted file mode 100644
index 4df4f05302..0000000000
--- a/library/cpp/actors/cppcoro/ya.make
+++ /dev/null
@@ -1,25 +0,0 @@
-LIBRARY()
-
-PEERDIR(
- library/cpp/actors/core
-)
-
-SRCS(
- await_callback.cpp
- await_callback.h
- task_actor.cpp
- task_actor.h
- task_group.cpp
- task_group.h
- task_result.cpp
- task_result.h
- task.cpp
- task.h
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- corobenchmark
- ut
-)
diff --git a/library/cpp/actors/dnscachelib/CMakeLists.darwin-arm64.txt b/library/cpp/actors/dnscachelib/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index b769b26408..0000000000
--- a/library/cpp/actors/dnscachelib/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-dnscachelib)
-target_link_libraries(cpp-actors-dnscachelib PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-c-ares
- library-cpp-lwtrace
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-dnscachelib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/dnscache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/probes.cpp
-)
diff --git a/library/cpp/actors/dnscachelib/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/dnscachelib/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index b769b26408..0000000000
--- a/library/cpp/actors/dnscachelib/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-dnscachelib)
-target_link_libraries(cpp-actors-dnscachelib PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-c-ares
- library-cpp-lwtrace
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-dnscachelib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/dnscache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/probes.cpp
-)
diff --git a/library/cpp/actors/dnscachelib/CMakeLists.linux-aarch64.txt b/library/cpp/actors/dnscachelib/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 60de9ca5ab..0000000000
--- a/library/cpp/actors/dnscachelib/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-dnscachelib)
-target_link_libraries(cpp-actors-dnscachelib PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-c-ares
- library-cpp-lwtrace
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-dnscachelib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/dnscache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/probes.cpp
-)
diff --git a/library/cpp/actors/dnscachelib/CMakeLists.linux-x86_64.txt b/library/cpp/actors/dnscachelib/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 60de9ca5ab..0000000000
--- a/library/cpp/actors/dnscachelib/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-dnscachelib)
-target_link_libraries(cpp-actors-dnscachelib PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-c-ares
- library-cpp-lwtrace
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-dnscachelib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/dnscache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/probes.cpp
-)
diff --git a/library/cpp/actors/dnscachelib/CMakeLists.windows-x86_64.txt b/library/cpp/actors/dnscachelib/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index b769b26408..0000000000
--- a/library/cpp/actors/dnscachelib/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-dnscachelib)
-target_link_libraries(cpp-actors-dnscachelib PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-c-ares
- library-cpp-lwtrace
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-dnscachelib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/dnscache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnscachelib/probes.cpp
-)
diff --git a/library/cpp/actors/dnscachelib/dnscache.cpp b/library/cpp/actors/dnscachelib/dnscache.cpp
deleted file mode 100644
index 91ed284c12..0000000000
--- a/library/cpp/actors/dnscachelib/dnscache.cpp
+++ /dev/null
@@ -1,458 +0,0 @@
-#include "dnscache.h"
-#include "probes.h"
-#include "timekeeper.h"
-
-#include <ares.h>
-#include <util/system/guard.h>
-#include <util/datetime/systime.h>
-
-const TDnsCache::THost TDnsCache::NullHost;
-
-LWTRACE_USING(DNSCACHELIB_PROVIDER);
-
-static_assert(sizeof(ares_channel) == sizeof(void*), "expect sizeof(ares_channel) == sizeof(void *)");
-
-TDnsCache::TDnsCache(bool allowIpv4, bool allowIpv6, time_t lifetime, time_t neg, ui32 timeout)
- : EntryLifetime(lifetime)
- , NegativeLifetime(neg)
- , Timeout(TDuration::MicroSeconds(timeout))
- , AllowIpV4(allowIpv4)
- , AllowIpV6(allowIpv6)
- , ACacheHits(0)
- , ACacheMisses(0)
- , PtrCacheHits(0)
- , PtrCacheMisses(0)
-{
-#ifdef _win_
- if (ares_library_init(ARES_LIB_INIT_WIN32) != ARES_SUCCESS) {
- LWPROBE(AresInitFailed);
- ythrow yexception() << "ares_init() failed";
- }
-#endif
-
- ares_channel chan;
-
- if (ares_init(&chan) != ARES_SUCCESS) {
- LWPROBE(AresInitFailed);
- ythrow yexception() << "ares_init() failed";
- }
- Channel = chan;
- LWPROBE(Created);
-}
-
-TDnsCache::~TDnsCache(void) {
- ares_channel chan = static_cast<ares_channel>(Channel);
-
- ares_cancel(chan);
- ares_destroy(chan);
- LWPROBE(Destroyed);
-
-#ifdef _win_
- ares_library_cleanup();
-#endif
-}
-
-TString TDnsCache::GetHostByAddr(const NAddr::IRemoteAddr& addr) {
- in6_addr key;
-
- if (addr.Addr()->sa_family == AF_INET6) {
- const struct sockaddr_in6* s6 = (const struct sockaddr_in6*)(addr.Addr());
- memcpy(&key, &s6->sin6_addr, sizeof(s6->sin6_addr));
- } else if (addr.Addr()->sa_family == AF_INET) {
- const struct sockaddr_in* s4 = (const struct sockaddr_in*)(addr.Addr());
- memset(&key, 0, sizeof(key));
- memcpy(&key, &s4->sin_addr, sizeof(s4->sin_addr));
- } else {
- return "";
- }
- const TAddr& host = ResolveAddr(key, addr.Addr()->sa_family);
-
- return host.Hostname;
-}
-
-TIpHost TDnsCache::Get(const TString& hostname) {
- if (!AllowIpV4)
- return TIpHost(-1);
-
- const THost& addr = Resolve(hostname, AF_INET);
-
- TGuard<TMutex> lock(CacheMtx);
- if (addr.AddrsV4.empty()) {
- return TIpHost(-1);
- }
- return addr.AddrsV4.front();
-}
-
-NAddr::IRemoteAddrPtr TDnsCache::GetAddr(
- const TString& hostname,
- int family,
- TIpPort port,
- bool cacheOnly) {
- if (family != AF_INET && AllowIpV6) {
- const THost& addr = Resolve(hostname, AF_INET6, cacheOnly);
-
- TGuard<TMutex> lock(CacheMtx);
- if (!addr.AddrsV6.empty()) {
- struct sockaddr_in6 sin6;
- Zero(sin6);
- sin6.sin6_family = AF_INET6;
- sin6.sin6_addr = addr.AddrsV6.front();
- sin6.sin6_port = HostToInet(port);
-
- return MakeHolder<NAddr::TIPv6Addr>(sin6);
- }
- }
-
- if (family != AF_INET6 && AllowIpV4) {
- const THost& addr = Resolve(hostname, AF_INET, cacheOnly);
-
- TGuard<TMutex> lock(CacheMtx);
- if (!addr.AddrsV4.empty()) {
- return MakeHolder<NAddr::TIPv4Addr>(TIpAddress(addr.AddrsV4.front(), port));
- }
- }
-
- LWPROBE(FamilyMismatch, family, AllowIpV4, AllowIpV6);
- return nullptr;
-}
-
-void TDnsCache::GetAllAddresses(
- const TString& hostname,
- TVector<NAddr::IRemoteAddrPtr>& addrs) {
- if (AllowIpV4) {
- const THost& addr4 = Resolve(hostname, AF_INET);
-
- TGuard<TMutex> lock(CacheMtx);
- for (size_t i = 0; i < addr4.AddrsV4.size(); i++) {
- addrs.push_back(MakeHolder<NAddr::TIPv4Addr>(TIpAddress(addr4.AddrsV4[i], 0)));
- }
- }
-
- if (AllowIpV6) {
- const THost& addr6 = Resolve(hostname, AF_INET6);
-
- struct sockaddr_in6 sin6;
- Zero(sin6);
- sin6.sin6_family = AF_INET6;
-
- TGuard<TMutex> lock(CacheMtx);
- for (size_t i = 0; i < addr6.AddrsV6.size(); i++) {
- sin6.sin6_addr = addr6.AddrsV6[i];
-
- addrs.push_back(MakeHolder<NAddr::TIPv6Addr>(sin6));
- }
- }
-}
-
-void TDnsCache::GetStats(ui64& a_cache_hits, ui64& a_cache_misses,
- ui64& ptr_cache_hits, ui64& ptr_cache_misses) {
- TGuard<TMutex> lock(CacheMtx);
-
- a_cache_hits = ACacheHits;
- a_cache_misses = ACacheMisses;
- ptr_cache_hits = PtrCacheHits;
- ptr_cache_misses = PtrCacheMisses;
-}
-
-bool TDnsCache::THost::IsStale(int family, const TDnsCache* ctx) const noexcept {
- time_t resolved = family == AF_INET ? ResolvedV4 : ResolvedV6;
- time_t notfound = family == AF_INET ? NotFoundV4 : NotFoundV6;
-
- if (TTimeKeeper::GetTime() - resolved < ctx->EntryLifetime)
- return false;
-
- if (TTimeKeeper::GetTime() - notfound < ctx->NegativeLifetime)
- return false;
-
- return true;
-}
-
-const TDnsCache::THost&
-TDnsCache::Resolve(const TString& hostname, int family, bool cacheOnly) {
- if (!ValidateHName(hostname)) {
- LWPROBE(ResolveNullHost, hostname, family);
- return NullHost;
- }
-
- THostCache::iterator p;
-
- Y_ASSERT(family == AF_INET || family == AF_INET6);
-
- {
- TGuard<TMutex> lock(CacheMtx);
- p = HostCache.find(hostname);
- if (p != HostCache.end()) {
- if (!p->second.IsStale(family, this)) {
- /* Recently resolved, just return cached value */
- ACacheHits += 1;
- THost& host = p->second;
- LWPROBE(ResolveFromCache, hostname, family, host.AddrsV4ToString(), host.AddrsV6ToString(), ACacheHits);
- return host;
- } else {
- LWPROBE(ResolveCacheTimeout, hostname);
- }
- } else {
- /* Never resolved, create cache entry */
- LWPROBE(ResolveCacheNew, hostname);
- p = HostCache.insert(std::make_pair(hostname, THost())).first;
- }
- ACacheMisses += 1;
- }
-
- if (cacheOnly)
- return NullHost;
-
- TAtomic& inprogress = (family == AF_INET ? p->second.InProgressV4 : p->second.InProgressV6);
-
- {
- /* This way only! CacheMtx should always be taken AFTER AresMtx,
- * because later in ares_process it can only be done this way.
- * Lock order reversal will cause deadlock in unfortunate monents.
- */
- TGuard<TMutex> areslock(AresMtx);
- TGuard<TMutex> cachelock(CacheMtx);
-
- if (!inprogress) {
- ares_channel chan = static_cast<ares_channel>(Channel);
- TGHBNContext* ctx = new TGHBNContext();
- ctx->Owner = this;
- ctx->Hostname = hostname;
- ctx->Family = family;
-
- AtomicSet(inprogress, 1);
- ares_gethostbyname(chan, hostname.c_str(), family,
- &TDnsCache::GHBNCallback, ctx);
- }
- }
-
- WaitTask(inprogress);
-
- LWPROBE(ResolveDone, hostname, family, p->second.AddrsV4ToString(), p->second.AddrsV6ToString());
- return p->second;
-}
-
-bool TDnsCache::ValidateHName(const TString& name) const noexcept {
- return name.size() > 0;
-}
-
-const TDnsCache::TAddr& TDnsCache::ResolveAddr(const in6_addr& addr, int family) {
- TAddrCache::iterator p;
-
- {
- TGuard<TMutex> lock(CacheMtx);
- p = AddrCache.find(addr);
- if (p != AddrCache.end()) {
- if (TTimeKeeper::GetTime() - p->second.Resolved < EntryLifetime || TTimeKeeper::GetTime() - p->second.NotFound < NegativeLifetime) {
- /* Recently resolved, just return cached value */
- PtrCacheHits += 1;
- return p->second;
- }
- } else {
- /* Never resolved, create cache entry */
-
- p = AddrCache.insert(std::make_pair(addr, TAddr())).first;
- }
- PtrCacheMisses += 1;
- }
-
- {
- /* This way only! CacheMtx should always be taken AFTER AresMtx,
- * because later in ares_process it can only be done this way.
- * Lock order reversal will cause deadlock in unfortunate monents.
- */
- TGuard<TMutex> areslock(AresMtx);
- TGuard<TMutex> cachelock(CacheMtx);
-
- if (!p->second.InProgress) {
- ares_channel chan = static_cast<ares_channel>(Channel);
- TGHBAContext* ctx = new TGHBAContext();
- ctx->Owner = this;
- ctx->Addr = addr;
-
- AtomicSet(p->second.InProgress, 1);
- ares_gethostbyaddr(chan, &addr,
- family == AF_INET ? sizeof(in_addr) : sizeof(in6_addr),
- family, &TDnsCache::GHBACallback, ctx);
- }
- }
-
- WaitTask(p->second.InProgress);
-
- return p->second;
-}
-
-void TDnsCache::WaitTask(TAtomic& flag) {
- const TInstant start = TInstant(TTimeKeeper::GetTimeval());
-
- while (AtomicGet(flag)) {
- ares_channel chan = static_cast<ares_channel>(Channel);
-
- struct pollfd pfd[ARES_GETSOCK_MAXNUM];
- int nfds;
- ares_socket_t socks[ARES_GETSOCK_MAXNUM];
- int bits;
-
- {
- TGuard<TMutex> lock(AresMtx);
- bits = ares_getsock(chan, socks, ARES_GETSOCK_MAXNUM);
- if (bits == 0) {
- /* other thread did our job */
- continue;
- }
- }
-
- for (nfds = 0; nfds < ARES_GETSOCK_MAXNUM; nfds++) {
- pfd[nfds].events = 0;
- pfd[nfds].revents = 0;
- if (ARES_GETSOCK_READABLE(bits, nfds)) {
- pfd[nfds].fd = socks[nfds];
- pfd[nfds].events |= POLLRDNORM | POLLIN;
- }
- if (ARES_GETSOCK_WRITABLE(bits, nfds)) {
- pfd[nfds].fd = socks[nfds];
- pfd[nfds].events |= POLLWRNORM | POLLOUT;
- }
- if (pfd[nfds].events == 0) {
- break;
- }
- }
-
- Y_ASSERT(nfds != 0);
-
- const TDuration left = TInstant(TTimeKeeper::GetTimeval()) - start;
- const TDuration wait = Max(Timeout - left, TDuration::Zero());
-
- int rv = poll(pfd, nfds, wait.MilliSeconds());
-
- if (rv == -1) {
- if (errno == EINTR) {
- continue;
- }
- /* Unknown error in select, can't recover. Just pretend there was no reply */
- rv = 0;
- }
-
- if (rv == 0) {
- /* poll() timed out */
- TGuard<TMutex> lock(AresMtx);
- ares_process_fd(chan, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
- } else {
- for (int i = 0; i < nfds; i++) {
- if (pfd[i].revents == 0) {
- continue;
- }
- TGuard<TMutex> lock(AresMtx);
- ares_process_fd(chan,
- pfd[i].revents & (POLLRDNORM | POLLIN)
- ? pfd[i].fd
- : ARES_SOCKET_BAD,
- pfd[i].revents & (POLLWRNORM | POLLOUT)
- ? pfd[i].fd
- : ARES_SOCKET_BAD);
- }
- }
-
- if (start + Timeout <= TInstant(TTimeKeeper::GetTimeval())) {
- break;
- }
- }
-}
-
-void TDnsCache::GHBNCallback(void* arg, int status, int, struct hostent* info) {
- THolder<TGHBNContext> ctx(static_cast<TGHBNContext*>(arg));
- TGuard<TMutex> lock(ctx->Owner->CacheMtx);
- THostCache::iterator p = ctx->Owner->HostCache.find(ctx->Hostname);
-
- Y_ASSERT(p != ctx->Owner->HostCache.end());
-
- time_t& resolved = (ctx->Family == AF_INET ? p->second.ResolvedV4 : p->second.ResolvedV6);
- time_t& notfound = (ctx->Family == AF_INET ? p->second.NotFoundV4 : p->second.NotFoundV6);
- TAtomic& inprogress = (ctx->Family == AF_INET ? p->second.InProgressV4 : p->second.InProgressV6);
-
- if (status == ARES_SUCCESS) {
- if (info->h_addrtype == AF_INET) {
- p->second.AddrsV4.clear();
- for (int i = 0; info->h_addr_list[i] != nullptr; i++) {
- p->second.AddrsV4.push_back(*(TIpHost*)(info->h_addr_list[i]));
- }
- /* It is possible to ask ares for IPv6 and have IPv4 addrs instead,
- so take care and set V4 timers anyway.
- */
- p->second.ResolvedV4 = TTimeKeeper::GetTime();
- p->second.ResolvedV4 = 0;
- AtomicSet(p->second.InProgressV4, 0);
- } else if (info->h_addrtype == AF_INET6) {
- p->second.AddrsV6.clear();
- for (int i = 0; info->h_addr_list[i] != nullptr; i++) {
- p->second.AddrsV6.push_back(*(struct in6_addr*)(info->h_addr_list[i]));
- }
- } else {
- Y_ABORT("unknown address type in ares callback");
- }
- resolved = TTimeKeeper::GetTime();
- notfound = 0;
- } else {
- notfound = TTimeKeeper::GetTime();
- resolved = 0;
- }
- AtomicSet(inprogress, 0);
-}
-
-void TDnsCache::GHBACallback(void* arg, int status, int, struct hostent* info) {
- THolder<TGHBAContext> ctx(static_cast<TGHBAContext*>(arg));
- TGuard<TMutex> lock(ctx->Owner->CacheMtx);
- TAddrCache::iterator p = ctx->Owner->AddrCache.find(ctx->Addr);
-
- Y_ASSERT(p != ctx->Owner->AddrCache.end());
-
- if (status == ARES_SUCCESS) {
- p->second.Hostname = info->h_name;
- p->second.Resolved = TTimeKeeper::GetTime();
- p->second.NotFound = 0;
- } else {
- p->second.NotFound = TTimeKeeper::GetTime();
- p->second.Resolved = 0;
- }
- AtomicSet(p->second.InProgress, 0);
-}
-
-TString TDnsCache::THost::AddrsV4ToString() const {
- TStringStream ss;
- bool first = false;
- for (TIpHost addr : AddrsV4) {
- ss << (first ? "" : " ") << IpToString(addr);
- first = false;
- }
- return ss.Str();
-}
-
-TString TDnsCache::THost::AddrsV6ToString() const {
- TStringStream ss;
- bool first = false;
- for (in6_addr addr : AddrsV6) {
- struct sockaddr_in6 sin6;
- Zero(sin6);
- sin6.sin6_family = AF_INET6;
- sin6.sin6_addr = addr;
-
- NAddr::TIPv6Addr addr6(sin6);
- ss << (first ? "" : " ") << NAddr::PrintHost(addr6);
- first = false;
- }
- return ss.Str();
-}
-
-TDnsCache::TAresLibInit::TAresLibInit() {
-#ifdef _win_
- const auto res = ares_library_init(ARES_LIB_INIT_ALL);
- Y_ABORT_UNLESS(res == 0);
-#endif
-}
-
-TDnsCache::TAresLibInit::~TAresLibInit() {
-#ifdef _win_
- ares_library_cleanup();
-#endif
-}
-
-TDnsCache::TAresLibInit TDnsCache::InitAresLib;
diff --git a/library/cpp/actors/dnscachelib/dnscache.h b/library/cpp/actors/dnscachelib/dnscache.h
deleted file mode 100644
index ac36e6ddc0..0000000000
--- a/library/cpp/actors/dnscachelib/dnscache.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#pragma once
-
-#include <library/cpp/deprecated/atomic/atomic.h>
-
-#include <util/generic/map.h>
-#include <util/generic/vector.h>
-#include <util/network/address.h>
-#include <util/system/mutex.h>
-#include <util/datetime/base.h>
-
-/** Asynchronous DNS resolver.
- *
- * This is NOT general purpose resolver! It is designed with very specific assumptions:
- * 1) there is relatively small and rarely changed set of resolved names (like, server pool in cluster)
- * 2) this names supposed to have addresses, absense of A record is equal to DNS error
- * 3) most of the time IP addresses do not change
- * 4) it's OK to return old IP address when DNS server not responding in time
- */
-
-class TDnsCache {
-public:
- TDnsCache(bool allowIpv4 = true, bool allowIpv6 = true, time_t entry_lifetime = 1800, time_t neg_lifetime = 1, ui32 request_timeout = 500000);
- ~TDnsCache();
-
- TString GetHostByAddr(const NAddr::IRemoteAddr&);
-
- // ip in network byte order
- TIpHost Get(const TString& host);
-
- /* use with AF_INET, AF_INET6 or AF_UNSPEC */
- NAddr::IRemoteAddrPtr GetAddr(const TString& host,
- int family,
- TIpPort port = 0,
- bool cacheOnly = false);
-
- void GetAllAddresses(const TString& host, TVector<NAddr::IRemoteAddrPtr>&);
-
- void GetStats(ui64& a_cache_hits, ui64& a_cache_misses,
- ui64& ptr_cache_hits, ui64& ptr_cache_misses);
-
-protected:
- bool ValidateHName(const TString& host) const noexcept;
-
-private:
- struct TGHBNContext {
- TDnsCache* Owner;
- TString Hostname;
- int Family;
- };
-
- struct TGHBAContext {
- TDnsCache* Owner;
- in6_addr Addr;
- };
-
- struct THost {
- THost() noexcept {
- }
-
- TVector<TIpHost> AddrsV4;
- time_t ResolvedV4 = 0;
- time_t NotFoundV4 = 0;
- TAtomic InProgressV4 = 0;
-
- TVector<in6_addr> AddrsV6;
- time_t ResolvedV6 = 0;
- time_t NotFoundV6 = 0;
- TAtomic InProgressV6 = 0;
-
- TString AddrsV4ToString() const;
- TString AddrsV6ToString() const;
-
- bool IsStale(int family, const TDnsCache* ctx) const noexcept;
- };
-
- typedef TMap<TString, THost> THostCache;
-
- struct TAddr {
- TString Hostname;
- time_t Resolved = 0;
- time_t NotFound = 0;
- TAtomic InProgress = 0;
- };
- /* IRemoteAddr is annoingly hard to use, so I'll use in6_addr as key
- * and put v4 addrs in it.
- */
- struct TAddrCmp {
- bool operator()(const in6_addr& left, const in6_addr& right) const {
- for (size_t i = 0; i < sizeof(left); i++) {
- if (left.s6_addr[i] < right.s6_addr[i]) {
- return true;
- } else if (left.s6_addr[i] > right.s6_addr[i]) {
- return false;
- }
- }
- // equal
- return false;
- }
- };
- typedef TMap<in6_addr, TAddr, TAddrCmp> TAddrCache;
-
- const THost& Resolve(const TString&, int family, bool cacheOnly = false);
-
- const TAddr& ResolveAddr(const in6_addr&, int family);
-
- void WaitTask(TAtomic&);
-
- static void GHBNCallback(void* arg, int status, int timeouts,
- struct hostent* info);
-
- static void GHBACallback(void* arg, int status, int timeouts,
- struct hostent* info);
-
- const time_t EntryLifetime;
- const time_t NegativeLifetime;
- const TDuration Timeout;
- const bool AllowIpV4;
- const bool AllowIpV6;
-
- TMutex CacheMtx;
- THostCache HostCache;
- TAddrCache AddrCache;
- ui64 ACacheHits;
- ui64 ACacheMisses;
- ui64 PtrCacheHits;
- ui64 PtrCacheMisses;
-
- const static THost NullHost;
-
- TMutex AresMtx;
- void* Channel;
-
- struct TAresLibInit {
- TAresLibInit();
- ~TAresLibInit();
- };
-
- static TAresLibInit InitAresLib;
-};
diff --git a/library/cpp/actors/dnscachelib/probes.cpp b/library/cpp/actors/dnscachelib/probes.cpp
deleted file mode 100644
index 07734ab20f..0000000000
--- a/library/cpp/actors/dnscachelib/probes.cpp
+++ /dev/null
@@ -1,3 +0,0 @@
-#include "probes.h"
-
-LWTRACE_DEFINE_PROVIDER(DNSCACHELIB_PROVIDER)
diff --git a/library/cpp/actors/dnscachelib/probes.h b/library/cpp/actors/dnscachelib/probes.h
deleted file mode 100644
index 313b7b8712..0000000000
--- a/library/cpp/actors/dnscachelib/probes.h
+++ /dev/null
@@ -1,35 +0,0 @@
-#pragma once
-
-#include <library/cpp/lwtrace/all.h>
-
-#define DNSCACHELIB_PROVIDER(PROBE, EVENT, GROUPS, TYPES, NAMES) \
- PROBE(Created, GROUPS(), TYPES(), NAMES()) \
- PROBE(Destroyed, GROUPS(), TYPES(), NAMES()) \
- PROBE(AresInitFailed, GROUPS(), TYPES(), NAMES()) \
- PROBE(FamilyMismatch, \
- GROUPS(), \
- TYPES(int, bool, bool), \
- NAMES("family", "allowIpV4", "allowIpV6")) \
- PROBE(ResolveNullHost, \
- GROUPS(), \
- TYPES(TString, int), \
- NAMES("hostname", "family")) \
- PROBE(ResolveFromCache, \
- GROUPS(), \
- TYPES(TString, int, TString, TString, ui64), \
- NAMES("hostname", "family", "addrsV4", "addrsV6", "aCacheHits")) \
- PROBE(ResolveDone, \
- GROUPS(), \
- TYPES(TString, int, TString, TString), \
- NAMES("hostname", "family", "addrsV4", "addrsV6")) \
- PROBE(ResolveCacheTimeout, \
- GROUPS(), \
- TYPES(TString), \
- NAMES("hostname")) \
- PROBE(ResolveCacheNew, \
- GROUPS(), \
- TYPES(TString), \
- NAMES("hostname")) \
- /**/
-
-LWTRACE_DECLARE_PROVIDER(DNSCACHELIB_PROVIDER)
diff --git a/library/cpp/actors/dnscachelib/timekeeper.h b/library/cpp/actors/dnscachelib/timekeeper.h
deleted file mode 100644
index 0528d8549c..0000000000
--- a/library/cpp/actors/dnscachelib/timekeeper.h
+++ /dev/null
@@ -1,70 +0,0 @@
-#pragma once
-
-#include <util/datetime/base.h>
-#include <util/generic/singleton.h>
-#include <util/string/cast.h>
-#include <util/system/thread.h>
-#include <util/system/event.h>
-#include <util/system/env.h>
-
-#include <cstdlib>
-
-/* Keeps current time accurate up to 1/10 second */
-
-class TTimeKeeper {
-public:
- static TInstant GetNow(void) {
- return TInstant::MicroSeconds(GetTime());
- }
-
- static time_t GetTime(void) {
- return Singleton<TTimeKeeper>()->CurrentTime.tv_sec;
- }
-
- static const struct timeval& GetTimeval(void) {
- return Singleton<TTimeKeeper>()->CurrentTime;
- }
-
- TTimeKeeper()
- : Thread(&TTimeKeeper::Worker, this)
- {
- ConstTime = !!GetEnv("TEST_TIME");
- if (ConstTime) {
- try {
- CurrentTime.tv_sec = FromString<ui32>(GetEnv("TEST_TIME"));
- } catch (TFromStringException exc) {
- ConstTime = false;
- }
- }
- if (!ConstTime) {
- gettimeofday(&CurrentTime, nullptr);
- Thread.Start();
- }
- }
-
- ~TTimeKeeper() {
- if (!ConstTime) {
- Exit.Signal();
- Thread.Join();
- }
- }
-
-private:
- static const ui32 UpdateInterval = 100000;
- struct timeval CurrentTime;
- bool ConstTime;
- TSystemEvent Exit;
- TThread Thread;
-
- static void* Worker(void* arg) {
- TTimeKeeper* owner = static_cast<TTimeKeeper*>(arg);
-
- do {
- /* Race condition may occur here but locking looks too expensive */
-
- gettimeofday(&owner->CurrentTime, nullptr);
- } while (!owner->Exit.WaitT(TDuration::MicroSeconds(UpdateInterval)));
-
- return nullptr;
- }
-};
diff --git a/library/cpp/actors/dnscachelib/ya.make b/library/cpp/actors/dnscachelib/ya.make
deleted file mode 100644
index 62eaafc8f5..0000000000
--- a/library/cpp/actors/dnscachelib/ya.make
+++ /dev/null
@@ -1,23 +0,0 @@
-LIBRARY()
-
-SRCS(
- dnscache.cpp
- dnscache.h
- probes.cpp
- probes.h
- timekeeper.h
-)
-
-PEERDIR(
- contrib/libs/c-ares
- library/cpp/lwtrace
- library/cpp/deprecated/atomic
-)
-
-IF (NOT EXPORT_CMAKE)
- ADDINCL(
- contrib/libs/c-ares/include
- )
-ENDIF()
-
-END()
diff --git a/library/cpp/actors/dnsresolver/CMakeLists.darwin-arm64.txt b/library/cpp/actors/dnsresolver/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 30d19f1b99..0000000000
--- a/library/cpp/actors/dnsresolver/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-dnsresolver)
-target_link_libraries(cpp-actors-dnsresolver PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- contrib-libs-c-ares
-)
-target_sources(cpp-actors-dnsresolver PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
-)
diff --git a/library/cpp/actors/dnsresolver/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/dnsresolver/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 30d19f1b99..0000000000
--- a/library/cpp/actors/dnsresolver/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-dnsresolver)
-target_link_libraries(cpp-actors-dnsresolver PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- contrib-libs-c-ares
-)
-target_sources(cpp-actors-dnsresolver PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
-)
diff --git a/library/cpp/actors/dnsresolver/CMakeLists.linux-aarch64.txt b/library/cpp/actors/dnsresolver/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index f7dbdca2ef..0000000000
--- a/library/cpp/actors/dnsresolver/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-dnsresolver)
-target_link_libraries(cpp-actors-dnsresolver PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- contrib-libs-c-ares
-)
-target_sources(cpp-actors-dnsresolver PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
-)
diff --git a/library/cpp/actors/dnsresolver/CMakeLists.linux-x86_64.txt b/library/cpp/actors/dnsresolver/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index f7dbdca2ef..0000000000
--- a/library/cpp/actors/dnsresolver/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-dnsresolver)
-target_link_libraries(cpp-actors-dnsresolver PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- contrib-libs-c-ares
-)
-target_sources(cpp-actors-dnsresolver PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
-)
diff --git a/library/cpp/actors/dnsresolver/CMakeLists.windows-x86_64.txt b/library/cpp/actors/dnsresolver/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 30d19f1b99..0000000000
--- a/library/cpp/actors/dnsresolver/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-dnsresolver)
-target_link_libraries(cpp-actors-dnsresolver PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- contrib-libs-c-ares
-)
-target_sources(cpp-actors-dnsresolver PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
-)
diff --git a/library/cpp/actors/dnsresolver/dnsresolver.cpp b/library/cpp/actors/dnsresolver/dnsresolver.cpp
deleted file mode 100644
index d7d8c0e3b6..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver.cpp
+++ /dev/null
@@ -1,485 +0,0 @@
-#include "dnsresolver.h"
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/threading/queue/mpsc_htswap.h>
-#include <util/network/pair.h>
-#include <util/network/socket.h>
-#include <util/string/builder.h>
-#include <util/system/thread.h>
-
-#include <ares.h>
-
-#include <atomic>
-
-namespace NActors {
-namespace NDnsResolver {
-
- class TAresLibraryInitBase {
- protected:
- TAresLibraryInitBase() noexcept {
- int status = ares_library_init(ARES_LIB_INIT_ALL);
- Y_ABORT_UNLESS(status == ARES_SUCCESS, "Unexpected failure to initialize c-ares library");
- }
-
- ~TAresLibraryInitBase() noexcept {
- ares_library_cleanup();
- }
- };
-
- class TCallbackQueueBase {
- protected:
- TCallbackQueueBase() noexcept {
- int err = SocketPair(Sockets, false, true);
- Y_ABORT_UNLESS(err == 0, "Unexpected failure to create a socket pair");
- SetNonBlock(Sockets[0]);
- SetNonBlock(Sockets[1]);
- }
-
- ~TCallbackQueueBase() noexcept {
- closesocket(Sockets[0]);
- closesocket(Sockets[1]);
- }
-
- protected:
- using TCallback = std::function<void()>;
- using TCallbackQueue = NThreading::THTSwapQueue<TCallback>;
-
- void PushCallback(TCallback callback) {
- Y_ABORT_UNLESS(callback, "Cannot push an empty callback");
- CallbackQueue.Push(std::move(callback)); // this is a lockfree queue
-
- // Wake up worker thread on the first activation
- if (Activations.fetch_add(1, std::memory_order_acq_rel) == 0) {
- char ch = 'x';
- ssize_t ret;
-#ifdef _win_
- ret = send(SignalSock(), &ch, 1, 0);
- if (ret == -1) {
- Y_ABORT_UNLESS(WSAGetLastError() == WSAEWOULDBLOCK, "Unexpected send error");
- return;
- }
-#else
- do {
- ret = send(SignalSock(), &ch, 1, 0);
- } while (ret == -1 && errno == EINTR);
- if (ret == -1) {
- Y_ABORT_UNLESS(errno == EAGAIN || errno == EWOULDBLOCK, "Unexpected send error");
- return;
- }
-#endif
- Y_ABORT_UNLESS(ret == 1, "Unexpected send result");
- }
- }
-
- void RunCallbacks() noexcept {
- char ch[32];
- ssize_t ret;
- bool signalled = false;
- for (;;) {
- ret = recv(WaitSock(), ch, sizeof(ch), 0);
- if (ret > 0) {
- signalled = true;
- }
- if (ret == sizeof(ch)) {
- continue;
- }
- if (ret != -1) {
- break;
- }
-#ifdef _win_
- if (WSAGetLastError() == WSAEWOULDBLOCK) {
- break;
- }
- Y_ABORT("Unexpected recv error");
-#else
- if (errno == EAGAIN || errno == EWOULDBLOCK) {
- break;
- }
- Y_ABORT_UNLESS(errno == EINTR, "Unexpected recv error");
-#endif
- }
-
- if (signalled) {
- // There's exactly one write to SignalSock while Activations != 0
- // It's impossible to get signalled while Activations == 0
- // We must set Activations = 0 to receive new signals
- size_t count = Activations.exchange(0, std::memory_order_acq_rel);
- Y_ABORT_UNLESS(count != 0);
-
- // N.B. due to the way HTSwap works we may not be able to pop
- // all callbacks on this activation, however we expect a new
- // delayed activation to happen at a later time.
- while (auto callback = CallbackQueue.Pop()) {
- callback();
- }
- }
- }
-
- SOCKET SignalSock() {
- return Sockets[0];
- }
-
- SOCKET WaitSock() {
- return Sockets[1];
- }
-
- private:
- SOCKET Sockets[2];
- TCallbackQueue CallbackQueue;
- std::atomic<size_t> Activations{ 0 };
- };
-
- class TSimpleDnsResolver
- : public TActor<TSimpleDnsResolver>
- , private TAresLibraryInitBase
- , private TCallbackQueueBase
- {
- public:
- TSimpleDnsResolver(TSimpleDnsResolverOptions options) noexcept
- : TActor(&TThis::StateWork)
- , Options(std::move(options))
- , WorkerThread(&TThis::WorkerThreadStart, this)
- {
- InitAres();
-
- WorkerThread.Start();
- }
-
- ~TSimpleDnsResolver() noexcept override {
- if (!Stopped) {
- PushCallback([this] {
- // Mark as stopped first
- Stopped = true;
-
- // Cancel all current ares requests (will not send replies)
- ares_cancel(AresChannel);
- });
-
- WorkerThread.Join();
- }
-
- StopAres();
- }
-
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::DNS_RESOLVER;
- }
-
- private:
- void InitAres() noexcept {
- struct ares_options options;
- memset(&options, 0, sizeof(options));
- int optmask = 0;
-
- options.flags = ARES_FLAG_STAYOPEN;
- optmask |= ARES_OPT_FLAGS;
-
- options.sock_state_cb = &TThis::SockStateCallback;
- options.sock_state_cb_data = this;
- optmask |= ARES_OPT_SOCK_STATE_CB;
-
- options.timeout = Options.Timeout.MilliSeconds();
- if (options.timeout > 0) {
- optmask |= ARES_OPT_TIMEOUTMS;
- }
-
- options.tries = Options.Attempts;
- if (options.tries > 0) {
- optmask |= ARES_OPT_TRIES;
- }
-
- int err = ares_init_options(&AresChannel, &options, optmask);
- Y_ABORT_UNLESS(err == 0, "Unexpected failure to initialize c-ares channel");
-
- if (Options.Servers) {
- TStringBuilder csv;
- for (const TString& server : Options.Servers) {
- if (csv) {
- csv << ',';
- }
- csv << server;
- }
- err = ares_set_servers_ports_csv(AresChannel, csv.c_str());
- Y_ABORT_UNLESS(err == 0, "Unexpected failure to set a list of dns servers: %s", ares_strerror(err));
- }
- }
-
- void StopAres() noexcept {
- // Destroy the ares channel
- ares_destroy(AresChannel);
- AresChannel = nullptr;
- }
-
- private:
- STRICT_STFUNC(StateWork, {
- hFunc(TEvents::TEvPoison, Handle);
- hFunc(TEvDns::TEvGetHostByName, Handle);
- hFunc(TEvDns::TEvGetAddr, Handle);
- })
-
- void Handle(TEvents::TEvPoison::TPtr&) {
- Y_ABORT_UNLESS(!Stopped);
-
- PushCallback([this] {
- // Cancel all current ares requests (will send notifications)
- ares_cancel(AresChannel);
-
- // Mark as stopped last
- Stopped = true;
- });
-
- WorkerThread.Join();
- PassAway();
- }
-
- private:
- enum class ERequestType {
- GetHostByName,
- GetAddr,
- };
-
- struct TRequestContext : public TThrRefBase {
- using TPtr = TIntrusivePtr<TRequestContext>;
-
- TThis* Self;
- TActorSystem* ActorSystem;
- TActorId SelfId;
- TActorId Sender;
- ui64 Cookie;
- ERequestType Type;
-
- TRequestContext(TThis* self, TActorSystem* as, TActorId selfId, TActorId sender, ui64 cookie, ERequestType type)
- : Self(self)
- , ActorSystem(as)
- , SelfId(selfId)
- , Sender(sender)
- , Cookie(cookie)
- , Type(type)
- { }
- };
-
- private:
- void Handle(TEvDns::TEvGetHostByName::TPtr& ev) {
- auto* msg = ev->Get();
- auto reqCtx = MakeIntrusive<TRequestContext>(
- this, TActivationContext::ActorSystem(), SelfId(), ev->Sender, ev->Cookie, ERequestType::GetHostByName);
- PushCallback([this, reqCtx = std::move(reqCtx), name = std::move(msg->Name), family = msg->Family] () mutable {
- StartGetAddrInfo(std::move(reqCtx), std::move(name), family);
- });
- }
-
- void Handle(TEvDns::TEvGetAddr::TPtr& ev) {
- auto* msg = ev->Get();
- auto reqCtx = MakeIntrusive<TRequestContext>(
- this, TActivationContext::ActorSystem(), SelfId(), ev->Sender, ev->Cookie, ERequestType::GetAddr);
- PushCallback([this, reqCtx = std::move(reqCtx), name = std::move(msg->Name), family = msg->Family] () mutable {
- StartGetAddrInfo(std::move(reqCtx), std::move(name), family);
- });
- }
-
- void StartGetAddrInfo(TRequestContext::TPtr reqCtx, TString name, int family) noexcept {
- reqCtx->Ref();
- ares_addrinfo_hints hints;
- memset(&hints, 0, sizeof(hints));
- hints.ai_flags = ARES_AI_NOSORT;
- hints.ai_family = family;
- ares_getaddrinfo(AresChannel, name.c_str(), nullptr, &hints, &TThis::GetAddrInfoAresCallback, reqCtx.Get());
- }
-
- private:
- static void GetAddrInfoAresCallback(void* arg, int status, int timeouts, ares_addrinfo *result) {
- struct TDeleter {
- void operator ()(ares_addrinfo *ptr) const {
- ares_freeaddrinfo(ptr);
- }
- };
- std::unique_ptr<ares_addrinfo, TDeleter> ptr(result);
-
- Y_UNUSED(timeouts);
- TRequestContext::TPtr reqCtx(static_cast<TRequestContext*>(arg));
- reqCtx->UnRef();
-
- if (reqCtx->Self->Stopped) {
- // Don't send any replies after destruction
- return;
- }
-
- switch (reqCtx->Type) {
- case ERequestType::GetHostByName: {
- auto result = MakeHolder<TEvDns::TEvGetHostByNameResult>();
- if (status == ARES_SUCCESS) {
- for (auto *node = ptr->nodes; node; node = node->ai_next) {
- switch (node->ai_family) {
- case AF_INET: {
- result->AddrsV4.emplace_back(((sockaddr_in*)node->ai_addr)->sin_addr);
- break;
- }
- case AF_INET6: {
- result->AddrsV6.emplace_back(((sockaddr_in6*)node->ai_addr)->sin6_addr);
- break;
- }
- default:
- Y_ABORT("unknown address family in ares callback");
- }
- }
- } else {
- result->ErrorText = ares_strerror(status);
- }
- result->Status = status;
-
- reqCtx->ActorSystem->Send(new IEventHandle(reqCtx->Sender, reqCtx->SelfId, result.Release(), 0, reqCtx->Cookie));
- break;
- }
-
- case ERequestType::GetAddr: {
- auto result = MakeHolder<TEvDns::TEvGetAddrResult>();
- if (status == ARES_SUCCESS && Y_UNLIKELY(ptr->nodes == nullptr)) {
- status = ARES_ENODATA;
- }
- if (status == ARES_SUCCESS) {
- auto *node = ptr->nodes;
- switch (node->ai_family) {
- case AF_INET: {
- result->Addr = ((sockaddr_in*)node->ai_addr)->sin_addr;
- break;
- }
- case AF_INET6: {
- result->Addr = ((sockaddr_in6*)node->ai_addr)->sin6_addr;
- break;
- }
- default:
- Y_ABORT("unknown address family in ares callback");
- }
- } else {
- result->ErrorText = ares_strerror(status);
- }
- result->Status = status;
-
- reqCtx->ActorSystem->Send(new IEventHandle(reqCtx->Sender, reqCtx->SelfId, result.Release(), 0, reqCtx->Cookie));
- break;
- }
- }
- }
-
- private:
- static void SockStateCallback(void* data, ares_socket_t socket_fd, int readable, int writable) {
- static_cast<TThis*>(data)->DoSockStateCallback(socket_fd, readable, writable);
- }
-
- void DoSockStateCallback(ares_socket_t socket_fd, int readable, int writable) noexcept {
- int events = (readable ? (POLLRDNORM | POLLIN) : 0) | (writable ? (POLLWRNORM | POLLOUT) : 0);
- if (events == 0) {
- AresSockStates.erase(socket_fd);
- } else {
- AresSockStates[socket_fd].NeededEvents = events;
- }
- }
-
- private:
- static void* WorkerThreadStart(void* arg) noexcept {
- static_cast<TSimpleDnsResolver*>(arg)->WorkerThreadLoop();
- return nullptr;
- }
-
- void WorkerThreadLoop() noexcept {
- TThread::SetCurrentThreadName("DnsResolver");
-
- TVector<struct pollfd> fds;
- while (!Stopped) {
- fds.clear();
- fds.reserve(1 + AresSockStates.size());
- {
- auto& entry = fds.emplace_back();
- entry.fd = WaitSock();
- entry.events = POLLRDNORM | POLLIN;
- }
- for (auto& kv : AresSockStates) {
- auto& entry = fds.emplace_back();
- entry.fd = kv.first;
- entry.events = kv.second.NeededEvents;
- }
-
- int timeout = -1;
- struct timeval tv;
- if (ares_timeout(AresChannel, nullptr, &tv)) {
- timeout = tv.tv_sec * 1000 + tv.tv_usec / 1000;
- }
-
- int ret = poll(fds.data(), fds.size(), timeout);
- if (ret == -1) {
- if (errno == EINTR) {
- continue;
- }
- // we cannot handle failures, run callbacks and pretend everything is ok
- RunCallbacks();
- if (Stopped) {
- break;
- }
- ret = 0;
- }
-
- bool ares_called = false;
- if (ret > 0) {
- for (size_t i = 0; i < fds.size(); ++i) {
- auto& entry = fds[i];
-
- // Handle WaitSock activation and run callbacks
- if (i == 0) {
- if (entry.revents & (POLLRDNORM | POLLIN)) {
- RunCallbacks();
- if (Stopped) {
- break;
- }
- }
- continue;
- }
-
- // All other sockets belong to ares
- if (entry.revents == 0) {
- continue;
- }
- // Previous invocation of aress_process_fd might have removed some sockets
- if (Y_UNLIKELY(!AresSockStates.contains(entry.fd))) {
- continue;
- }
- ares_process_fd(
- AresChannel,
- entry.revents & (POLLRDNORM | POLLIN) ? entry.fd : ARES_SOCKET_BAD,
- entry.revents & (POLLWRNORM | POLLOUT) ? entry.fd : ARES_SOCKET_BAD);
- ares_called = true;
- }
-
- if (Stopped) {
- break;
- }
- }
-
- if (!ares_called) {
- // Let ares handle timeouts
- ares_process_fd(AresChannel, ARES_SOCKET_BAD, ARES_SOCKET_BAD);
- }
- }
- }
-
- private:
- struct TSockState {
- short NeededEvents = 0; // poll events
- };
-
- private:
- TSimpleDnsResolverOptions Options;
- TThread WorkerThread;
-
- ares_channel AresChannel;
- THashMap<SOCKET, TSockState> AresSockStates;
-
- bool Stopped = false;
- };
-
- IActor* CreateSimpleDnsResolver(TSimpleDnsResolverOptions options) {
- return new TSimpleDnsResolver(std::move(options));
- }
-
-} // namespace NDnsResolver
-} // namespace NActors
diff --git a/library/cpp/actors/dnsresolver/dnsresolver.h b/library/cpp/actors/dnsresolver/dnsresolver.h
deleted file mode 100644
index 1121c31e51..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver.h
+++ /dev/null
@@ -1,128 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/event_local.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <util/network/address.h>
-#include <variant>
-
-namespace NActors {
-namespace NDnsResolver {
-
- struct TEvDns {
- enum EEv {
- EvGetHostByName = EventSpaceBegin(TEvents::ES_DNS),
- EvGetHostByNameResult,
- EvGetAddr,
- EvGetAddrResult,
- };
-
- /**
- * TEvGetHostByName returns the result of ares_gethostbyname
- */
- struct TEvGetHostByName : public TEventLocal<TEvGetHostByName, EvGetHostByName> {
- TString Name;
- int Family;
-
- explicit TEvGetHostByName(TString name, int family = AF_UNSPEC)
- : Name(std::move(name))
- , Family(family)
- { }
- };
-
- struct TEvGetHostByNameResult : public TEventLocal<TEvGetHostByNameResult, EvGetHostByNameResult> {
- TVector<struct in_addr> AddrsV4;
- TVector<struct in6_addr> AddrsV6;
- TString ErrorText;
- int Status = 0;
- };
-
- /**
- * TEvGetAddr returns a single address for a given hostname
- */
- struct TEvGetAddr : public TEventLocal<TEvGetAddr, EvGetAddr> {
- TString Name;
- int Family;
-
- explicit TEvGetAddr(TString name, int family = AF_UNSPEC)
- : Name(std::move(name))
- , Family(family)
- { }
- };
-
- struct TEvGetAddrResult : public TEventLocal<TEvGetAddrResult, EvGetAddrResult> {
- // N.B. "using" here doesn't work with Visual Studio compiler
- typedef struct in6_addr TIPv6Addr;
- typedef struct in_addr TIPv4Addr;
-
- std::variant<std::monostate, TIPv6Addr, TIPv4Addr> Addr;
- TString ErrorText;
- int Status = 0;
-
- bool IsV6() const {
- return std::holds_alternative<TIPv6Addr>(Addr);
- }
-
- bool IsV4() const {
- return std::holds_alternative<TIPv4Addr>(Addr);
- }
-
- const TIPv6Addr& GetAddrV6() const {
- const TIPv6Addr* p = std::get_if<TIPv6Addr>(&Addr);
- Y_ABORT_UNLESS(p, "Result is not an ipv6 address");
- return *p;
- }
-
- const TIPv4Addr& GetAddrV4() const {
- const TIPv4Addr* p = std::get_if<TIPv4Addr>(&Addr);
- Y_ABORT_UNLESS(p, "Result is not an ipv4 address");
- return *p;
- }
- };
- };
-
- struct TSimpleDnsResolverOptions {
- // Initial per-server timeout, grows exponentially with each retry
- TDuration Timeout = TDuration::Seconds(1);
- // Number of attempts per-server
- int Attempts = 2;
- // Optional list of custom dns servers (ip.v4[:port], ip::v6 or [ip::v6]:port format)
- TVector<TString> Servers;
- };
-
- IActor* CreateSimpleDnsResolver(TSimpleDnsResolverOptions options = TSimpleDnsResolverOptions());
-
- struct TCachingDnsResolverOptions {
- // Soft expire time specifies delay before name is refreshed in background
- TDuration SoftNegativeExpireTime = TDuration::Seconds(1);
- TDuration SoftPositiveExpireTime = TDuration::Seconds(10);
- // Hard expire time specifies delay before the last result is forgotten
- TDuration HardNegativeExpireTime = TDuration::Seconds(10);
- TDuration HardPositiveExpireTime = TDuration::Hours(2);
- // Allow these request families
- bool AllowIPv6 = true;
- bool AllowIPv4 = true;
- // Optional counters
- NMonitoring::TDynamicCounterPtr MonCounters = nullptr;
- };
-
- IActor* CreateCachingDnsResolver(TActorId upstream, TCachingDnsResolverOptions options = TCachingDnsResolverOptions());
-
- struct TOnDemandDnsResolverOptions
- : public TSimpleDnsResolverOptions
- , public TCachingDnsResolverOptions
- {
- };
-
- IActor* CreateOnDemandDnsResolver(TOnDemandDnsResolverOptions options = TOnDemandDnsResolverOptions());
-
- /**
- * Returns actor id of a globally registered dns resolver
- */
- inline TActorId MakeDnsResolverActorId() {
- return TActorId(0, TStringBuf("dnsresolver"));
- }
-
-} // namespace NDnsResolver
-} // namespace NActors
diff --git a/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp b/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
deleted file mode 100644
index 83b1847962..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver_caching.cpp
+++ /dev/null
@@ -1,694 +0,0 @@
-#include "dnsresolver.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <util/generic/intrlist.h>
-
-#include <ares.h>
-
-#include <queue>
-
-namespace NActors {
-namespace NDnsResolver {
-
- class TCachingDnsResolver : public TActor<TCachingDnsResolver> {
- public:
- struct TMonCounters {
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingInFlightV4;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingInFlightV6;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingInFlightUnspec;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingErrorsV4;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingErrorsV6;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingErrorsUnspec;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingTotalV4;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingTotalV6;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingTotalUnspec;
-
- NMonitoring::TDynamicCounters::TCounterPtr IncomingInFlight;
- NMonitoring::TDynamicCounters::TCounterPtr IncomingErrors;
- NMonitoring::TDynamicCounters::TCounterPtr IncomingTotal;
-
- NMonitoring::TDynamicCounters::TCounterPtr CacheSize;
- NMonitoring::TDynamicCounters::TCounterPtr CacheHits;
- NMonitoring::TDynamicCounters::TCounterPtr CacheMisses;
-
- TMonCounters(const NMonitoring::TDynamicCounterPtr& counters)
- : OutgoingInFlightV4(counters->GetCounter("DnsResolver/Outgoing/InFlight/V4", false))
- , OutgoingInFlightV6(counters->GetCounter("DnsResolver/Outgoing/InFlight/V6", false))
- , OutgoingInFlightUnspec(counters->GetCounter("DnsResolver/Outgoing/InFlight/Unspec", false))
- , OutgoingErrorsV4(counters->GetCounter("DnsResolver/Outgoing/Errors/V4", true))
- , OutgoingErrorsV6(counters->GetCounter("DnsResolver/Outgoing/Errors/V6", true))
- , OutgoingErrorsUnspec(counters->GetCounter("DnsResolver/Outgoing/Errors/Unspec", true))
- , OutgoingTotalV4(counters->GetCounter("DnsResolver/Outgoing/Total/V4", true))
- , OutgoingTotalV6(counters->GetCounter("DnsResolver/Outgoing/Total/V6", true))
- , OutgoingTotalUnspec(counters->GetCounter("DnsResolver/Outgoing/Total/Unspec", true))
- , IncomingInFlight(counters->GetCounter("DnsResolver/Incoming/InFlight", false))
- , IncomingErrors(counters->GetCounter("DnsResolver/Incoming/Errors", true))
- , IncomingTotal(counters->GetCounter("DnsResolver/Incoming/Total", true))
- , CacheSize(counters->GetCounter("DnsResolver/Cache/Size", false))
- , CacheHits(counters->GetCounter("DnsResolver/Cache/Hits", true))
- , CacheMisses(counters->GetCounter("DnsResolver/Cache/Misses", true))
- { }
-
- const NMonitoring::TDynamicCounters::TCounterPtr& OutgoingInFlightByFamily(int family) const {
- switch (family) {
- case AF_INET:
- return OutgoingInFlightV4;
- case AF_INET6:
- return OutgoingInFlightV6;
- case AF_UNSPEC:
- return OutgoingInFlightUnspec;
- default:
- Y_ABORT("Unexpected family %d", family);
- }
- }
-
- const NMonitoring::TDynamicCounters::TCounterPtr& OutgoingErrorsByFamily(int family) const {
- switch (family) {
- case AF_INET:
- return OutgoingErrorsV4;
- case AF_INET6:
- return OutgoingErrorsV6;
- case AF_UNSPEC:
- return OutgoingErrorsUnspec;
- default:
- Y_ABORT("Unexpected family %d", family);
- }
- }
-
- const NMonitoring::TDynamicCounters::TCounterPtr& OutgoingTotalByFamily(int family) const {
- switch (family) {
- case AF_INET:
- return OutgoingTotalV4;
- case AF_INET6:
- return OutgoingTotalV6;
- case AF_UNSPEC:
- return OutgoingTotalUnspec;
- default:
- Y_ABORT("Unexpected family %d", family);
- }
- }
- };
-
- public:
- TCachingDnsResolver(TActorId upstream, TCachingDnsResolverOptions options)
- : TActor(&TThis::StateWork)
- , Upstream(upstream)
- , Options(std::move(options))
- , MonCounters(Options.MonCounters ? new TMonCounters(Options.MonCounters) : nullptr)
- { }
-
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::DNS_RESOLVER;
- }
-
- private:
- STRICT_STFUNC(StateWork, {
- hFunc(TEvents::TEvPoison, Handle);
- hFunc(TEvDns::TEvGetHostByName, Handle);
- hFunc(TEvDns::TEvGetAddr, Handle);
- hFunc(TEvDns::TEvGetHostByNameResult, Handle);
- hFunc(TEvents::TEvUndelivered, Handle);
- });
-
- void Handle(TEvents::TEvPoison::TPtr&) {
- DropPending(ARES_ECANCELLED);
- PassAway();
- }
-
- void Handle(TEvDns::TEvGetHostByName::TPtr& ev) {
- auto req = MakeHolder<TIncomingRequest>();
- req->Type = EIncomingRequestType::GetHostByName;
- req->Sender = ev->Sender;
- req->Cookie = ev->Cookie;
- req->Name = std::move(ev->Get()->Name);
- req->Family = ev->Get()->Family;
- EnqueueRequest(std::move(req));
- }
-
- void Handle(TEvDns::TEvGetAddr::TPtr& ev) {
- auto req = MakeHolder<TIncomingRequest>();
- req->Type = EIncomingRequestType::GetAddr;
- req->Sender = ev->Sender;
- req->Cookie = ev->Cookie;
- req->Name = std::move(ev->Get()->Name);
- req->Family = ev->Get()->Family;
- EnqueueRequest(std::move(req));
- }
-
- void Handle(TEvDns::TEvGetHostByNameResult::TPtr& ev) {
- auto waitingIt = WaitingRequests.find(ev->Cookie);
- Y_ABORT_UNLESS(waitingIt != WaitingRequests.end(), "Unexpected reply, reqId=%" PRIu64, ev->Cookie);
- auto waitingInfo = waitingIt->second;
- WaitingRequests.erase(waitingIt);
-
- switch (waitingInfo.Family) {
- case AF_UNSPEC:
- case AF_INET6:
- case AF_INET:
- if (ev->Get()->Status) {
- ProcessError(waitingInfo.Family, waitingInfo.Position, ev->Get()->Status, std::move(ev->Get()->ErrorText));
- } else {
- ProcessAddrs(waitingInfo.Family, waitingInfo.Position, std::move(ev->Get()->AddrsV6), std::move(ev->Get()->AddrsV4));
- }
- break;
-
- default:
- Y_ABORT("Unexpected request family %d", waitingInfo.Family);
- }
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr& ev) {
- switch (ev->Get()->SourceType) {
- case TEvDns::TEvGetHostByName::EventType: {
- auto waitingIt = WaitingRequests.find(ev->Cookie);
- Y_ABORT_UNLESS(waitingIt != WaitingRequests.end(), "Unexpected TEvUndelivered, reqId=%" PRIu64, ev->Cookie);
- auto waitingInfo = waitingIt->second;
- WaitingRequests.erase(waitingIt);
-
- switch (waitingInfo.Family) {
- case AF_UNSPEC:
- case AF_INET6:
- case AF_INET:
- ProcessError(waitingInfo.Family, waitingInfo.Position, ARES_ENOTINITIALIZED, "Caching dns resolver cannot deliver to the underlying resolver");
- break;
-
- default:
- Y_ABORT("Unexpected request family %d", waitingInfo.Family);
- }
-
- break;
- }
-
- default:
- Y_ABORT("Unexpected TEvUndelievered, type=%" PRIu32, ev->Get()->SourceType);
- }
- }
-
- private:
- enum EIncomingRequestType {
- GetHostByName,
- GetAddr,
- };
-
- struct TIncomingRequest : public TIntrusiveListItem<TIncomingRequest> {
- EIncomingRequestType Type;
- TActorId Sender;
- ui64 Cookie;
- TString Name;
- int Family;
- };
-
- using TIncomingRequestList = TIntrusiveListWithAutoDelete<TIncomingRequest, TDelete>;
-
- void EnqueueRequest(THolder<TIncomingRequest> req) {
- if (MonCounters) {
- ++*MonCounters->IncomingTotal;
- }
-
- CleanupExpired(TActivationContext::Now());
-
- switch (req->Family) {
- case AF_UNSPEC:
- if (Options.AllowIPv6 && Options.AllowIPv4) {
- EnqueueRequest(AF_UNSPEC, std::move(req));
- return;
- }
- if (Options.AllowIPv6) {
- EnqueueRequest(AF_INET6, std::move(req));
- return;
- }
- if (Options.AllowIPv4) {
- EnqueueRequest(AF_INET, std::move(req));
- return;
- }
- break;
-
- case AF_INET6:
- if (Options.AllowIPv6) {
- EnqueueRequest(AF_INET6, std::move(req));
- return;
- }
- break;
-
- case AF_INET:
- if (Options.AllowIPv4) {
- EnqueueRequest(AF_INET, std::move(req));
- return;
- }
- break;
- }
-
- ReplyWithError(std::move(req), ARES_EBADFAMILY);
- }
-
- void EnqueueRequest(int family, THolder<TIncomingRequest> req) {
- auto now = TActivationContext::Now();
-
- auto& fullState = NameToState[req->Name];
- if (MonCounters) {
- *MonCounters->CacheSize = NameToState.size();
- }
-
- auto& state = fullState.StateByFamily(family);
- EnsureRequest(state, req->Name, family, now);
-
- if (state.IsHardExpired(now)) {
- Y_ABORT_UNLESS(state.Waiting);
- if (MonCounters) {
- ++*MonCounters->CacheMisses;
- }
- state.WaitingRequests.PushBack(req.Release());
- return;
- }
-
- if (MonCounters) {
- ++*MonCounters->CacheHits;
- }
-
- if (state.Status != 0) {
- ReplyWithError(std::move(req), state.Status, state.ErrorText);
- } else {
- ReplyWithAddrs(std::move(req), state.AddrsIPv6, state.AddrsIPv4);
- }
- }
-
- private:
- struct TFamilyState {
- TVector<struct in6_addr> AddrsIPv6;
- TVector<struct in_addr> AddrsIPv4;
- TIncomingRequestList WaitingRequests;
- TInstant SoftDeadline;
- TInstant HardDeadline;
- TInstant NextSoftDeadline;
- TInstant NextHardDeadline;
- TString ErrorText;
- int Status = -1; // never requested before
- bool InSoftHeap = false;
- bool InHardHeap = false;
- bool Waiting = false;
-
- bool Needed() const {
- return InSoftHeap || InHardHeap || Waiting;
- }
-
- bool ServerReplied() const {
- return ServerReplied(Status);
- }
-
- bool IsSoftExpired(TInstant now) const {
- return !InSoftHeap || NextSoftDeadline < now;
- }
-
- bool IsHardExpired(TInstant now) const {
- return !InHardHeap || NextHardDeadline < now;
- }
-
- static bool ServerReplied(int status) {
- return (
- status == ARES_SUCCESS ||
- status == ARES_ENODATA ||
- status == ARES_ENOTFOUND);
- }
- };
-
- struct TState {
- TFamilyState StateUnspec;
- TFamilyState StateIPv6;
- TFamilyState StateIPv4;
-
- bool Needed() const {
- return StateUnspec.Needed() || StateIPv6.Needed() || StateIPv4.Needed();
- }
-
- const TFamilyState& StateByFamily(int family) const {
- switch (family) {
- case AF_UNSPEC:
- return StateUnspec;
- case AF_INET6:
- return StateIPv6;
- case AF_INET:
- return StateIPv4;
- default:
- Y_ABORT("Unsupported family %d", family);
- }
- }
-
- TFamilyState& StateByFamily(int family) {
- switch (family) {
- case AF_UNSPEC:
- return StateUnspec;
- case AF_INET6:
- return StateIPv6;
- case AF_INET:
- return StateIPv4;
- default:
- Y_ABORT("Unsupported family %d", family);
- }
- }
- };
-
- using TNameToState = THashMap<TString, TState>;
-
- template<const TFamilyState TState::* StateToFamily,
- const TInstant TFamilyState::* FamilyToDeadline>
- struct THeapCompare {
- // returns true when b < a
- bool operator()(TNameToState::iterator a, TNameToState::iterator b) const {
- const TState& aState = a->second;
- const TState& bState = b->second;
- const TFamilyState& aFamily = aState.*StateToFamily;
- const TFamilyState& bFamily = bState.*StateToFamily;
- const TInstant& aDeadline = aFamily.*FamilyToDeadline;
- const TInstant& bDeadline = bFamily.*FamilyToDeadline;
- return bDeadline < aDeadline;
- }
- };
-
- template<const TFamilyState TState::* StateToFamily,
- const TInstant TFamilyState::* FamilyToDeadline>
- using TStateHeap = std::priority_queue<
- TNameToState::iterator,
- std::vector<TNameToState::iterator>,
- THeapCompare<StateToFamily, FamilyToDeadline>
- >;
-
- struct TWaitingInfo {
- TNameToState::iterator Position;
- int Family;
- };
-
- private:
- void EnsureRequest(TFamilyState& state, const TString& name, int family, TInstant now) {
- if (state.Waiting) {
- return; // request is already pending
- }
-
- if (!state.IsSoftExpired(now) && !state.IsHardExpired(now)) {
- return; // response is not expired yet
- }
-
- if (MonCounters) {
- ++*MonCounters->OutgoingInFlightByFamily(family);
- ++*MonCounters->OutgoingTotalByFamily(family);
- }
-
- ui64 reqId = ++LastRequestId;
- auto& req = WaitingRequests[reqId];
- req.Position = NameToState.find(name);
- req.Family = family;
- Y_ABORT_UNLESS(req.Position != NameToState.end());
-
- Send(Upstream, new TEvDns::TEvGetHostByName(name, family), IEventHandle::FlagTrackDelivery, reqId);
- state.Waiting = true;
- }
-
- template<TFamilyState TState::* StateToFamily,
- TInstant TFamilyState::* FamilyToDeadline,
- TInstant TFamilyState::* FamilyToNextDeadline,
- bool TFamilyState::* FamilyToFlag,
- class THeap>
- void PushToHeap(THeap& heap, TNameToState::iterator it, TInstant newDeadline) {
- auto& family = it->second.*StateToFamily;
- TInstant& deadline = family.*FamilyToDeadline;
- TInstant& nextDeadline = family.*FamilyToNextDeadline;
- bool& flag = family.*FamilyToFlag;
- nextDeadline = newDeadline;
- if (!flag) {
- deadline = newDeadline;
- heap.push(it);
- flag = true;
- }
- }
-
- void PushSoftUnspec(TNameToState::iterator it, TInstant newDeadline) {
- PushToHeap<&TState::StateUnspec, &TFamilyState::SoftDeadline, &TFamilyState::NextSoftDeadline, &TFamilyState::InSoftHeap>(SoftHeapUnspec, it, newDeadline);
- }
-
- void PushHardUnspec(TNameToState::iterator it, TInstant newDeadline) {
- PushToHeap<&TState::StateUnspec, &TFamilyState::HardDeadline, &TFamilyState::NextHardDeadline, &TFamilyState::InHardHeap>(HardHeapUnspec, it, newDeadline);
- }
-
- void PushSoftV6(TNameToState::iterator it, TInstant newDeadline) {
- PushToHeap<&TState::StateIPv6, &TFamilyState::SoftDeadline, &TFamilyState::NextSoftDeadline, &TFamilyState::InSoftHeap>(SoftHeapIPv6, it, newDeadline);
- }
-
- void PushHardV6(TNameToState::iterator it, TInstant newDeadline) {
- PushToHeap<&TState::StateIPv6, &TFamilyState::HardDeadline, &TFamilyState::NextHardDeadline, &TFamilyState::InHardHeap>(HardHeapIPv6, it, newDeadline);
- }
-
- void PushSoftV4(TNameToState::iterator it, TInstant newDeadline) {
- PushToHeap<&TState::StateIPv4, &TFamilyState::SoftDeadline, &TFamilyState::NextSoftDeadline, &TFamilyState::InSoftHeap>(SoftHeapIPv4, it, newDeadline);
- }
-
- void PushHardV4(TNameToState::iterator it, TInstant newDeadline) {
- PushToHeap<&TState::StateIPv4, &TFamilyState::HardDeadline, &TFamilyState::NextHardDeadline, &TFamilyState::InHardHeap>(HardHeapIPv4, it, newDeadline);
- }
-
- void PushSoft(int family, TNameToState::iterator it, TInstant newDeadline) {
- switch (family) {
- case AF_UNSPEC:
- PushSoftUnspec(it, newDeadline);
- break;
- case AF_INET6:
- PushSoftV6(it, newDeadline);
- break;
- case AF_INET:
- PushSoftV4(it, newDeadline);
- break;
- default:
- Y_ABORT("Unexpected family %d", family);
- }
- }
-
- void PushHard(int family, TNameToState::iterator it, TInstant newDeadline) {
- switch (family) {
- case AF_UNSPEC:
- PushHardUnspec(it, newDeadline);
- break;
- case AF_INET6:
- PushHardV6(it, newDeadline);
- break;
- case AF_INET:
- PushHardV4(it, newDeadline);
- break;
- default:
- Y_ABORT("Unexpected family %d", family);
- }
- }
-
- void ProcessError(int family, TNameToState::iterator it, int status, TString errorText) {
- auto now = TActivationContext::Now();
- if (MonCounters) {
- --*MonCounters->OutgoingInFlightByFamily(family);
- ++*MonCounters->OutgoingErrorsByFamily(family);
- }
-
- auto& state = it->second.StateByFamily(family);
- Y_ABORT_UNLESS(state.Waiting, "Got error for a state we are not waiting");
- state.Waiting = false;
-
- // When we have a cached positive reply, don't overwrite it with spurious errors
- const bool serverReplied = TFamilyState::ServerReplied(status);
- if (!serverReplied && state.ServerReplied() && !state.IsHardExpired(now)) {
- PushSoft(family, it, now + Options.SoftNegativeExpireTime);
- if (state.Status == ARES_SUCCESS) {
- SendAddrs(family, it);
- } else {
- SendErrors(family, it);
- }
- return;
- }
-
- state.Status = status;
- state.ErrorText = std::move(errorText);
- PushSoft(family, it, now + Options.SoftNegativeExpireTime);
- if (serverReplied) {
- // Server actually replied, so keep it cached for longer
- PushHard(family, it, now + Options.HardPositiveExpireTime);
- } else {
- PushHard(family, it, now + Options.HardNegativeExpireTime);
- }
-
- SendErrors(family, it);
- }
-
- void SendErrors(int family, TNameToState::iterator it) {
- auto& state = it->second.StateByFamily(family);
- while (state.WaitingRequests) {
- THolder<TIncomingRequest> req(state.WaitingRequests.PopFront());
- ReplyWithError(std::move(req), state.Status, state.ErrorText);
- }
- }
-
- void ProcessAddrs(int family, TNameToState::iterator it, TVector<struct in6_addr> addrs6, TVector<struct in_addr> addrs4) {
- if (Y_UNLIKELY(addrs6.empty() && addrs4.empty())) {
- // Probably unnecessary: we don't want to deal with empty address lists
- return ProcessError(family, it, ARES_ENODATA, ares_strerror(ARES_ENODATA));
- }
-
- auto now = TActivationContext::Now();
- if (MonCounters) {
- --*MonCounters->OutgoingInFlightByFamily(family);
- }
-
- auto& state = it->second.StateByFamily(family);
- Y_ABORT_UNLESS(state.Waiting, "Got reply for a state we are not waiting");
- state.Waiting = false;
-
- state.Status = ARES_SUCCESS;
- state.AddrsIPv6 = std::move(addrs6);
- state.AddrsIPv4 = std::move(addrs4);
- PushSoft(family, it, now + Options.SoftPositiveExpireTime);
- PushHard(family, it, now + Options.HardPositiveExpireTime);
-
- SendAddrs(family, it);
- }
-
- void SendAddrs(int family, TNameToState::iterator it) {
- auto& state = it->second.StateByFamily(family);
- while (state.WaitingRequests) {
- THolder<TIncomingRequest> req(state.WaitingRequests.PopFront());
- ReplyWithAddrs(std::move(req), state.AddrsIPv6, state.AddrsIPv4);
- }
- }
-
- private:
- template<TFamilyState TState::*StateToFamily,
- TInstant TFamilyState::* FamilyToDeadline,
- TInstant TFamilyState::* FamilyToNextDeadline,
- bool TFamilyState::* FamilyToFlag>
- void DoCleanupExpired(TStateHeap<StateToFamily, FamilyToDeadline>& heap, TInstant now) {
- while (!heap.empty()) {
- auto it = heap.top();
- auto& family = it->second.*StateToFamily;
- TInstant& deadline = family.*FamilyToDeadline;
- if (now <= deadline) {
- break;
- }
-
- bool& flag = family.*FamilyToFlag;
- Y_ABORT_UNLESS(flag);
- heap.pop();
- flag = false;
-
- TInstant& nextDeadline = family.*FamilyToNextDeadline;
- if (now < nextDeadline) {
- deadline = nextDeadline;
- heap.push(it);
- flag = true;
- continue;
- }
-
- // Remove unnecessary items
- if (!it->second.Needed()) {
- NameToState.erase(it);
- if (MonCounters) {
- *MonCounters->CacheSize = NameToState.size();
- }
- }
- }
- }
-
- void CleanupExpired(TInstant now) {
- DoCleanupExpired<&TState::StateUnspec, &TFamilyState::SoftDeadline, &TFamilyState::NextSoftDeadline, &TFamilyState::InSoftHeap>(SoftHeapUnspec, now);
- DoCleanupExpired<&TState::StateUnspec, &TFamilyState::HardDeadline, &TFamilyState::NextHardDeadline, &TFamilyState::InHardHeap>(HardHeapUnspec, now);
- DoCleanupExpired<&TState::StateIPv6, &TFamilyState::SoftDeadline, &TFamilyState::NextSoftDeadline, &TFamilyState::InSoftHeap>(SoftHeapIPv6, now);
- DoCleanupExpired<&TState::StateIPv6, &TFamilyState::HardDeadline, &TFamilyState::NextHardDeadline, &TFamilyState::InHardHeap>(HardHeapIPv6, now);
- DoCleanupExpired<&TState::StateIPv4, &TFamilyState::SoftDeadline, &TFamilyState::NextSoftDeadline, &TFamilyState::InSoftHeap>(SoftHeapIPv4, now);
- DoCleanupExpired<&TState::StateIPv4, &TFamilyState::HardDeadline, &TFamilyState::NextHardDeadline, &TFamilyState::InHardHeap>(HardHeapIPv4, now);
- }
-
- template<class TEvent>
- void SendError(TActorId replyTo, ui64 cookie, int status, const TString& errorText) {
- auto reply = MakeHolder<TEvent>();
- reply->Status = status;
- reply->ErrorText = errorText;
- this->Send(replyTo, reply.Release(), 0, cookie);
- }
-
- void ReplyWithError(THolder<TIncomingRequest> req, int status, const TString& errorText) {
- if (MonCounters) {
- ++*MonCounters->IncomingErrors;
- }
- switch (req->Type) {
- case EIncomingRequestType::GetHostByName: {
- SendError<TEvDns::TEvGetHostByNameResult>(req->Sender, req->Cookie, status, errorText);
- break;
- }
- case EIncomingRequestType::GetAddr: {
- SendError<TEvDns::TEvGetAddrResult>(req->Sender, req->Cookie, status, errorText);
- break;
- }
- }
- }
-
- void ReplyWithAddrs(THolder<TIncomingRequest> req, const TVector<struct in6_addr>& addrs6, const TVector<struct in_addr>& addrs4) {
- switch (req->Type) {
- case EIncomingRequestType::GetHostByName: {
- auto reply = MakeHolder<TEvDns::TEvGetHostByNameResult>();
- reply->AddrsV6 = addrs6;
- reply->AddrsV4 = addrs4;
- Send(req->Sender, reply.Release(), 0, req->Cookie);
- break;
- }
- case EIncomingRequestType::GetAddr: {
- auto reply = MakeHolder<TEvDns::TEvGetAddrResult>();
- if (!addrs6.empty()) {
- reply->Addr = addrs6.front();
- } else if (!addrs4.empty()) {
- reply->Addr = addrs4.front();
- } else {
- Y_ABORT("Unexpected reply with empty address list");
- }
- Send(req->Sender, reply.Release(), 0, req->Cookie);
- break;
- }
- }
- }
-
- void ReplyWithError(THolder<TIncomingRequest> req, int status) {
- ReplyWithError(std::move(req), status, ares_strerror(status));
- }
-
- void DropPending(TIncomingRequestList& list, int status, const TString& errorText) {
- while (list) {
- THolder<TIncomingRequest> req(list.PopFront());
- ReplyWithError(std::move(req), status, errorText);
- }
- }
-
- void DropPending(int status, const TString& errorText) {
- for (auto& [name, state] : NameToState) {
- DropPending(state.StateUnspec.WaitingRequests, status, errorText);
- DropPending(state.StateIPv6.WaitingRequests, status, errorText);
- DropPending(state.StateIPv4.WaitingRequests, status, errorText);
- }
- }
-
- void DropPending(int status) {
- DropPending(status, ares_strerror(status));
- }
-
- private:
- const TActorId Upstream;
- const TCachingDnsResolverOptions Options;
- const THolder<TMonCounters> MonCounters;
-
- TNameToState NameToState;
- TStateHeap<&TState::StateUnspec, &TFamilyState::SoftDeadline> SoftHeapUnspec;
- TStateHeap<&TState::StateUnspec, &TFamilyState::HardDeadline> HardHeapUnspec;
- TStateHeap<&TState::StateIPv6, &TFamilyState::SoftDeadline> SoftHeapIPv6;
- TStateHeap<&TState::StateIPv6, &TFamilyState::HardDeadline> HardHeapIPv6;
- TStateHeap<&TState::StateIPv4, &TFamilyState::SoftDeadline> SoftHeapIPv4;
- TStateHeap<&TState::StateIPv4, &TFamilyState::HardDeadline> HardHeapIPv4;
-
- THashMap<ui64, TWaitingInfo> WaitingRequests;
- ui64 LastRequestId = 0;
- };
-
- IActor* CreateCachingDnsResolver(TActorId upstream, TCachingDnsResolverOptions options) {
- return new TCachingDnsResolver(upstream, std::move(options));
- }
-
-} // namespace NDnsResolver
-} // namespace NActors
diff --git a/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp b/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
deleted file mode 100644
index 60a45f6fba..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
+++ /dev/null
@@ -1,648 +0,0 @@
-#include "dnsresolver.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/string/builder.h>
-
-#include <ares.h>
-
-using namespace NActors;
-using namespace NActors::NDnsResolver;
-
-// FIXME: use a mock resolver
-Y_UNIT_TEST_SUITE(CachingDnsResolver) {
-
- struct TAddrToString {
- TString operator()(const std::monostate&) const {
- return "<nothing>";
- }
-
- TString operator()(const struct in6_addr& addr) const {
- char dst[INET6_ADDRSTRLEN];
- auto res = ares_inet_ntop(AF_INET6, &addr, dst, INET6_ADDRSTRLEN);
- Y_ABORT_UNLESS(res, "Cannot convert ipv6 address");
- return dst;
- }
-
- TString operator()(const struct in_addr& addr) const {
- char dst[INET_ADDRSTRLEN];
- auto res = ares_inet_ntop(AF_INET, &addr, dst, INET_ADDRSTRLEN);
- Y_ABORT_UNLESS(res, "Cannot convert ipv4 address");
- return dst;
- }
- };
-
- TString AddrToString(const std::variant<std::monostate, struct in6_addr, struct in_addr>& v) {
- return std::visit(TAddrToString(), v);
- }
-
- struct TMockReply {
- static constexpr TDuration DefaultDelay = TDuration::MilliSeconds(1);
-
- int Status = 0;
- TDuration Delay;
- TVector<struct in6_addr> AddrsV6;
- TVector<struct in_addr> AddrsV4;
-
- static TMockReply Error(int status, TDuration delay = DefaultDelay) {
- Y_ABORT_UNLESS(status != 0);
- TMockReply reply;
- reply.Status = status;
- reply.Delay = delay;
- return reply;
- }
-
- static TMockReply Empty(TDuration delay = DefaultDelay) {
- TMockReply reply;
- reply.Delay = delay;
- return reply;
- }
-
- static TMockReply ManyV6(const TVector<TString>& addrs, TDuration delay = DefaultDelay) {
- TMockReply reply;
- reply.Delay = delay;
- for (const TString& addr : addrs) {
- void* dst = &reply.AddrsV6.emplace_back();
- int status = ares_inet_pton(AF_INET6, addr.c_str(), dst);
- Y_ABORT_UNLESS(status == 1, "Invalid ipv6 address: %s", addr.c_str());
- }
- return reply;
- }
-
- static TMockReply ManyV4(const TVector<TString>& addrs, TDuration delay = DefaultDelay) {
- TMockReply reply;
- reply.Delay = delay;
- for (const TString& addr : addrs) {
- void* dst = &reply.AddrsV4.emplace_back();
- int status = ares_inet_pton(AF_INET, addr.c_str(), dst);
- Y_ABORT_UNLESS(status == 1, "Invalid ipv4 address: %s", addr.c_str());
- }
- return reply;
- }
-
- static TMockReply SingleV6(const TString& addr, TDuration delay = DefaultDelay) {
- return ManyV6({ addr }, delay);
- }
-
- static TMockReply SingleV4(const TString& addr, TDuration delay = DefaultDelay) {
- return ManyV4({ addr }, delay);
- }
-
- friend TMockReply operator+(const TMockReply& a, const TMockReply& b) {
- Y_ABORT_UNLESS(a.Status == b.Status);
- TMockReply result;
- result.Status = a.Status;
- result.Delay = Max(a.Delay, b.Delay);
- result.AddrsV6.insert(result.AddrsV6.end(), a.AddrsV6.begin(), a.AddrsV6.end());
- result.AddrsV6.insert(result.AddrsV6.end(), b.AddrsV6.begin(), b.AddrsV6.end());
- result.AddrsV4.insert(result.AddrsV4.end(), a.AddrsV4.begin(), a.AddrsV4.end());
- result.AddrsV4.insert(result.AddrsV4.end(), b.AddrsV4.begin(), b.AddrsV4.end());
- return result;
- }
- };
-
- using TMockDnsCallback = std::function<TMockReply (const TString&, int)>;
-
- class TMockDnsResolver : public TActor<TMockDnsResolver> {
- public:
- TMockDnsResolver(TMockDnsCallback callback)
- : TActor(&TThis::StateWork)
- , Callback(std::move(callback))
- { }
-
- private:
- struct TEvPrivate {
- enum EEv {
- EvScheduled = EventSpaceBegin(TEvents::ES_PRIVATE),
- };
-
- struct TEvScheduled : public TEventLocal<TEvScheduled, EvScheduled> {
- TActorId Sender;
- ui64 Cookie;
- TMockReply Reply;
-
- TEvScheduled(TActorId sender, ui64 cookie, TMockReply reply)
- : Sender(sender)
- , Cookie(cookie)
- , Reply(std::move(reply))
- { }
- };
- };
-
- private:
- STRICT_STFUNC(StateWork, {
- hFunc(TEvents::TEvPoison, Handle);
- hFunc(TEvDns::TEvGetHostByName, Handle);
- hFunc(TEvPrivate::TEvScheduled, Handle);
- });
-
- void Handle(TEvents::TEvPoison::TPtr&) {
- PassAway();
- }
-
- void Handle(TEvDns::TEvGetHostByName::TPtr& ev) {
- auto reply = Callback(ev->Get()->Name, ev->Get()->Family);
- if (reply.Delay) {
- Schedule(reply.Delay, new TEvPrivate::TEvScheduled(ev->Sender, ev->Cookie, std::move(reply)));
- } else {
- SendReply(ev->Sender, ev->Cookie, std::move(reply));
- }
- }
-
- void Handle(TEvPrivate::TEvScheduled::TPtr& ev) {
- SendReply(ev->Get()->Sender, ev->Get()->Cookie, std::move(ev->Get()->Reply));
- }
-
- private:
- void SendReply(const TActorId& sender, ui64 cookie, TMockReply&& reply) {
- auto res = MakeHolder<TEvDns::TEvGetHostByNameResult>();
- res->Status = reply.Status;
- if (res->Status != 0) {
- res->ErrorText = ares_strerror(res->Status);
- } else {
- res->AddrsV6 = std::move(reply.AddrsV6);
- res->AddrsV4 = std::move(reply.AddrsV4);
- }
- Send(sender, res.Release(), 0, cookie);
- }
-
- private:
- TMockDnsCallback Callback;
- };
-
- struct TCachingDnsRuntime : public TTestActorRuntimeBase {
- TCachingDnsResolverOptions ResolverOptions;
- TActorId MockResolver;
- TActorId Resolver;
- TActorId Sleeper;
- TString Section_;
-
- NMonitoring::TDynamicCounters::TCounterPtr InFlightUnspec;
- NMonitoring::TDynamicCounters::TCounterPtr InFlight6;
- NMonitoring::TDynamicCounters::TCounterPtr InFlight4;
- NMonitoring::TDynamicCounters::TCounterPtr TotalUnspec;
- NMonitoring::TDynamicCounters::TCounterPtr Total6;
- NMonitoring::TDynamicCounters::TCounterPtr Total4;
- NMonitoring::TDynamicCounters::TCounterPtr Misses;
- NMonitoring::TDynamicCounters::TCounterPtr Hits;
-
- THashMap<TString, TMockReply> ReplyV6;
- THashMap<TString, TMockReply> ReplyV4;
- THashMap<TString, TMockReply> ReplyUnspec;
-
- TCachingDnsRuntime() {
- SetScheduledEventFilter([](auto&&, auto&&, auto&&, auto&&) { return false; });
- ResolverOptions.MonCounters = new NMonitoring::TDynamicCounters;
-
- ReplyV6["localhost"] = TMockReply::SingleV6("::1");
- ReplyV4["localhost"] = TMockReply::SingleV4("127.0.0.1");
- ReplyV6["yandex.ru"] = TMockReply::SingleV6("2a02:6b8:a::a", TDuration::MilliSeconds(500));
- ReplyV4["yandex.ru"] = TMockReply::SingleV4("77.88.55.77", TDuration::MilliSeconds(250));
- ReplyV6["router.asus.com"] = TMockReply::Error(ARES_ENODATA);
- ReplyV4["router.asus.com"] = TMockReply::SingleV4("192.168.0.1");
- ReplyUnspec["localhost"] = ReplyV6.at("localhost") + ReplyV4.at("localhost");
- ReplyUnspec["yandex.ru"] = ReplyV6.at("yandex.ru") + ReplyV4.at("yandex.ru");
- ReplyUnspec["router.asus.com"] = ReplyV4.at("router.asus.com");
- }
-
- void Start(TMockDnsCallback callback) {
- MockResolver = Register(new TMockDnsResolver(std::move(callback)));
- EnableScheduleForActor(MockResolver);
- Resolver = Register(CreateCachingDnsResolver(MockResolver, ResolverOptions));
- Sleeper = AllocateEdgeActor();
-
- InFlightUnspec = ResolverOptions.MonCounters->GetCounter("DnsResolver/Outgoing/InFlight/Unspec", false);
- InFlight6 = ResolverOptions.MonCounters->GetCounter("DnsResolver/Outgoing/InFlight/V6", false);
- InFlight4 = ResolverOptions.MonCounters->GetCounter("DnsResolver/Outgoing/InFlight/V4", false);
- TotalUnspec = ResolverOptions.MonCounters->GetCounter("DnsResolver/Outgoing/Total/Unspec", true);
- Total6 = ResolverOptions.MonCounters->GetCounter("DnsResolver/Outgoing/Total/V6", true);
- Total4 = ResolverOptions.MonCounters->GetCounter("DnsResolver/Outgoing/Total/V4", true);
- Misses = ResolverOptions.MonCounters->GetCounter("DnsResolver/Cache/Misses", true);
- Hits = ResolverOptions.MonCounters->GetCounter("DnsResolver/Cache/Hits", true);
- }
-
- void Start() {
- Start([this](const TString& name, int family) {
- switch (family) {
- case AF_UNSPEC: {
- auto it = ReplyUnspec.find(name);
- if (it != ReplyUnspec.end()) {
- return it->second;
- }
- break;
- }
- case AF_INET6: {
- auto it = ReplyV6.find(name);
- if (it != ReplyV6.end()) {
- return it->second;
- }
- break;
- }
- case AF_INET: {
- auto it = ReplyV4.find(name);
- if (it != ReplyV4.end()) {
- return it->second;
- }
- break;
- }
- }
- return TMockReply::Error(ARES_ENOTFOUND);
- });
- }
-
- void Section(const TString& section) {
- Section_ = section;
- }
-
- void Sleep(TDuration duration) {
- Schedule(new IEventHandle(Sleeper, Sleeper, new TEvents::TEvWakeup), duration);
- GrabEdgeEventRethrow<TEvents::TEvWakeup>(Sleeper);
- }
-
- void WaitNoInFlight() {
- if (*InFlightUnspec || *InFlight6 || *InFlight4) {
- TDispatchOptions options;
- options.CustomFinalCondition = [&]() {
- return !*InFlightUnspec && !*InFlight6 && !*InFlight4;
- };
- DispatchEvents(options);
- UNIT_ASSERT_C(!*InFlight6 && !*InFlight4, "Failed to wait for no inflight in " << Section_);
- }
- }
-
- void SendGetHostByName(const TActorId& sender, const TString& name, int family = AF_UNSPEC) {
- Send(new IEventHandle(Resolver, sender, new TEvDns::TEvGetHostByName(name, family)), 0, true);
- }
-
- void SendGetAddr(const TActorId& sender, const TString& name, int family = AF_UNSPEC) {
- Send(new IEventHandle(Resolver, sender, new TEvDns::TEvGetAddr(name, family)), 0, true);
- }
-
- TEvDns::TEvGetHostByNameResult::TPtr WaitGetHostByName(const TActorId& sender) {
- return GrabEdgeEventRethrow<TEvDns::TEvGetHostByNameResult>(sender);
- }
-
- TEvDns::TEvGetAddrResult::TPtr WaitGetAddr(const TActorId& sender) {
- return GrabEdgeEventRethrow<TEvDns::TEvGetAddrResult>(sender);
- }
-
- void ExpectInFlightUnspec(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(InFlightUnspec->Val(), count, Section_);
- }
-
- void ExpectInFlight6(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(InFlight6->Val(), count, Section_);
- }
-
- void ExpectInFlight4(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(InFlight4->Val(), count, Section_);
- }
-
- void ExpectTotalUnspec(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(TotalUnspec->Val(), count, Section_);
- }
-
- void ExpectTotal6(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(Total6->Val(), count, Section_);
- }
-
- void ExpectTotal4(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(Total4->Val(), count, Section_);
- }
-
- void ExpectUnspec(i64 total, i64 inflight) {
- UNIT_ASSERT_C(
- TotalUnspec->Val() == total && InFlightUnspec->Val() == inflight,
- Section_ << ": Expect6(" << total << ", " << inflight << ") "
- << " but got (" << TotalUnspec->Val() << ", " << InFlightUnspec->Val() << ")");
- }
-
- void Expect6(i64 total, i64 inflight) {
- UNIT_ASSERT_C(
- Total6->Val() == total && InFlight6->Val() == inflight,
- Section_ << ": Expect6(" << total << ", " << inflight << ") "
- << " but got (" << Total6->Val() << ", " << InFlight6->Val() << ")");
- }
-
- void Expect4(i64 total, i64 inflight) {
- UNIT_ASSERT_C(
- Total4->Val() == total && InFlight4->Val() == inflight,
- Section_ << ": Expect4(" << total << ", " << inflight << ") "
- << " got (" << Total4->Val() << ", " << InFlight4->Val() << ")");
- }
-
- void ExpectMisses(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(Misses->Val(), count, Section_);
- }
-
- void ExpectHits(i64 count) {
- UNIT_ASSERT_VALUES_EQUAL_C(Hits->Val(), count, Section_);
- }
-
- void ExpectGetHostByNameError(const TActorId& sender, int status) {
- auto ev = WaitGetHostByName(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, status, Section_ << ": " << ev->Get()->ErrorText);
- }
-
- void ExpectGetAddrError(const TActorId& sender, int status) {
- auto ev = WaitGetAddr(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, status, Section_ << ": " << ev->Get()->ErrorText);
- }
-
- void ExpectGetHostByNameSuccess(const TActorId& sender, const TString& expected) {
- auto ev = WaitGetHostByName(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, 0, Section_ << ": " << ev->Get()->ErrorText);
- TStringBuilder result;
- for (const auto& addr : ev->Get()->AddrsV6) {
- if (result) {
- result << ',';
- }
- result << TAddrToString()(addr);
- }
- for (const auto& addr : ev->Get()->AddrsV4) {
- if (result) {
- result << ',';
- }
- result << TAddrToString()(addr);
- }
- UNIT_ASSERT_VALUES_EQUAL_C(TString(result), expected, Section_);
- }
-
- void ExpectGetAddrSuccess(const TActorId& sender, const TString& expected) {
- auto ev = WaitGetAddr(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, 0, Section_ << ": " << ev->Get()->ErrorText);
- TString result = AddrToString(ev->Get()->Addr);
- UNIT_ASSERT_VALUES_EQUAL_C(result, expected, Section_);
- }
- };
-
- Y_UNIT_TEST(UnusableResolver) {
- TCachingDnsRuntime runtime;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
-
- runtime.Send(new IEventHandle(runtime.MockResolver, { }, new TEvents::TEvPoison), 0, true);
- runtime.SendGetAddr(sender, "foo.ru", AF_UNSPEC);
- runtime.ExpectGetAddrError(sender, ARES_ENOTINITIALIZED);
- }
-
- Y_UNIT_TEST(ResolveCaching) {
- TCachingDnsRuntime runtime;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- // First time resolve, we expect AF_UNSPEC result to be cached
- runtime.Section("First time resolve");
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.ExpectUnspec(1, 0);
- runtime.ExpectMisses(1);
- runtime.ExpectHits(0);
-
- // Second resolve, unspec is a cache hit, ipv6 and ipv4 result in cache misses
- runtime.Section("Second resolve");
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.ExpectUnspec(1, 0);
- runtime.ExpectHits(1);
-
- // Wait until soft expiration and try unspec again
- // Will cause a cache hit, but will start a new request in background
- runtime.Section("Retry both after soft expiration");
- runtime.Sleep(TDuration::Seconds(15));
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.ExpectUnspec(2, 1);
- runtime.ExpectMisses(1);
- runtime.ExpectHits(2);
- runtime.WaitNoInFlight();
-
- // Wait until hard expiration and try unspec again
- // Will cause a cache miss and new resolve requests
- runtime.Section("Retry both after hard expiration");
- runtime.Sleep(TDuration::Hours(2));
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.ExpectUnspec(3, 0);
- runtime.ExpectMisses(2);
- runtime.ExpectHits(2);
-
- // Wait half the hard expiration time, must always result in a cache hit
- runtime.Section("Retry both after half hard expiration");
- for (ui64 i = 1; i <= 4; ++i) {
- runtime.Sleep(TDuration::Hours(1));
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.ExpectUnspec(3 + i, 1);
- runtime.ExpectHits(2 + i);
- runtime.WaitNoInFlight();
- }
-
- // Change unspec result to a timeout, must keep using cached result until hard expiration
- runtime.Section("Dns keeps timing out");
- runtime.ReplyUnspec["yandex.ru"] = TMockReply::Error(ARES_ETIMEOUT);
- for (ui64 i = 1; i <= 4; ++i) {
- runtime.Sleep(TDuration::Seconds(15));
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.ExpectUnspec(7 + i, 1);
- runtime.ExpectHits(6 + i);
- runtime.WaitNoInFlight();
- }
-
- // Change unspec result to v4, must switch to a v4 result eventually
- runtime.Section("Host changes to being ipv4 only");
- runtime.ReplyUnspec["yandex.ru"] = runtime.ReplyV4.at("yandex.ru");
- runtime.Sleep(TDuration::Seconds(2));
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
- runtime.WaitNoInFlight();
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "77.88.55.77");
- runtime.ExpectUnspec(12, 0);
- runtime.ExpectMisses(2);
-
- // Change unspec result to nxdomain, must start returning it
- runtime.Section("Host is removed from dns");
- runtime.ReplyUnspec["yandex.ru"] = TMockReply::Error(ARES_ENOTFOUND);
- runtime.Sleep(TDuration::Seconds(15));
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "77.88.55.77");
- runtime.WaitNoInFlight();
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
- }
-
- Y_UNIT_TEST(ResolveCachingV4) {
- TCachingDnsRuntime runtime;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.Section("First request");
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "192.168.0.1");
- runtime.ExpectMisses(1);
-
- runtime.Section("Second request");
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "192.168.0.1");
- runtime.ExpectHits(1);
-
- runtime.Section("Dns keeps timing out");
- runtime.ReplyUnspec["router.asus.com"] = TMockReply::Error(ARES_ETIMEOUT);
- for (ui64 i = 1; i <= 4; ++i) {
- runtime.Sleep(TDuration::Seconds(15));
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "192.168.0.1");
- runtime.ExpectUnspec(1 + i, 1);
- runtime.ExpectHits(1 + i);
- runtime.WaitNoInFlight();
- }
-
- runtime.Section("Host is removed from dns");
- runtime.ReplyUnspec["router.asus.com"] = TMockReply::Error(ARES_ENOTFOUND);
- runtime.Sleep(TDuration::Seconds(15));
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetAddrSuccess(sender, "192.168.0.1");
- runtime.WaitNoInFlight();
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
- }
-
- Y_UNIT_TEST(EventualTimeout) {
- TCachingDnsRuntime runtime;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.ReplyUnspec["notfound.ru"] = TMockReply::Error(ARES_ENOTFOUND);
- runtime.SendGetAddr(sender, "notfound.ru", AF_UNSPEC);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
-
- runtime.ReplyUnspec["notfound.ru"] = TMockReply::Error(ARES_ETIMEOUT);
- runtime.SendGetAddr(sender, "notfound.ru", AF_UNSPEC);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
- runtime.WaitNoInFlight();
-
- bool timeout = false;
- for (ui64 i = 1; i <= 8; ++i) {
- runtime.Sleep(TDuration::Minutes(30));
- runtime.SendGetAddr(sender, "notfound.ru", AF_UNSPEC);
- auto ev = runtime.WaitGetAddr(sender);
- if (ev->Get()->Status == ARES_ETIMEOUT && i > 2) {
- timeout = true;
- break;
- }
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, ARES_ENOTFOUND,
- "Iteration " << i << ": " << ev->Get()->ErrorText);
- }
-
- UNIT_ASSERT_C(timeout, "DnsResolver did not reply with a timeout");
- }
-
- Y_UNIT_TEST(MultipleRequestsAndHosts) {
- TCachingDnsRuntime runtime;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.SendGetHostByName(sender, "router.asus.com", AF_UNSPEC);
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.SendGetHostByName(sender, "yandex.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetHostByNameSuccess(sender, "192.168.0.1");
- runtime.ExpectGetAddrSuccess(sender, "192.168.0.1");
- runtime.ExpectGetHostByNameSuccess(sender, "2a02:6b8:a::a,77.88.55.77");
- runtime.ExpectGetAddrSuccess(sender, "2a02:6b8:a::a");
-
- runtime.SendGetHostByName(sender, "notfound.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "notfound.ru", AF_UNSPEC);
- runtime.ExpectGetHostByNameError(sender, ARES_ENOTFOUND);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
- }
-
- Y_UNIT_TEST(DisabledIPv6) {
- TCachingDnsRuntime runtime;
- runtime.ResolverOptions.AllowIPv6 = false;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.SendGetHostByName(sender, "yandex.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetHostByNameSuccess(sender, "77.88.55.77");
- runtime.ExpectGetAddrSuccess(sender, "77.88.55.77");
-
- runtime.SendGetHostByName(sender, "yandex.ru", AF_INET6);
- runtime.SendGetAddr(sender, "yandex.ru", AF_INET6);
- runtime.ExpectGetHostByNameError(sender, ARES_EBADFAMILY);
- runtime.ExpectGetAddrError(sender, ARES_EBADFAMILY);
-
- runtime.SendGetHostByName(sender, "yandex.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.ExpectGetHostByNameSuccess(sender, "77.88.55.77");
- runtime.ExpectGetAddrSuccess(sender, "77.88.55.77");
-
- runtime.SendGetHostByName(sender, "notfound.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "notfound.ru", AF_UNSPEC);
- runtime.ExpectGetHostByNameError(sender, ARES_ENOTFOUND);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
- }
-
- Y_UNIT_TEST(DisabledIPv4) {
- TCachingDnsRuntime runtime;
- runtime.ResolverOptions.AllowIPv4 = false;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.SendGetHostByName(sender, "router.asus.com", AF_UNSPEC);
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetHostByNameError(sender, ARES_ENODATA);
- runtime.ExpectGetAddrError(sender, ARES_ENODATA);
-
- runtime.SendGetHostByName(sender, "router.asus.com", AF_INET);
- runtime.SendGetAddr(sender, "router.asus.com", AF_INET);
- runtime.ExpectGetHostByNameError(sender, ARES_EBADFAMILY);
- runtime.ExpectGetAddrError(sender, ARES_EBADFAMILY);
-
- runtime.SendGetHostByName(sender, "router.asus.com", AF_UNSPEC);
- runtime.SendGetAddr(sender, "router.asus.com", AF_UNSPEC);
- runtime.ExpectGetHostByNameError(sender, ARES_ENODATA);
- runtime.ExpectGetAddrError(sender, ARES_ENODATA);
-
- runtime.SendGetHostByName(sender, "notfound.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "notfound.ru", AF_UNSPEC);
- runtime.ExpectGetHostByNameError(sender, ARES_ENOTFOUND);
- runtime.ExpectGetAddrError(sender, ARES_ENOTFOUND);
- }
-
- Y_UNIT_TEST(PoisonPill) {
- TCachingDnsRuntime runtime;
- runtime.Initialize();
- runtime.Start();
-
- auto sender = runtime.AllocateEdgeActor();
-
- runtime.SendGetHostByName(sender, "yandex.ru", AF_UNSPEC);
- runtime.SendGetAddr(sender, "yandex.ru", AF_UNSPEC);
- runtime.Send(new IEventHandle(runtime.Resolver, sender, new TEvents::TEvPoison), 0, true);
- runtime.ExpectGetHostByNameError(sender, ARES_ECANCELLED);
- runtime.ExpectGetAddrError(sender, ARES_ECANCELLED);
- }
-
-}
diff --git a/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp b/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
deleted file mode 100644
index 8b0ddf5a6d..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver_ondemand.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-#include "dnsresolver.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-
-namespace NActors {
-namespace NDnsResolver {
-
- class TOnDemandDnsResolver : public TActor<TOnDemandDnsResolver> {
- public:
- TOnDemandDnsResolver(TOnDemandDnsResolverOptions options)
- : TActor(&TThis::StateWork)
- , Options(std::move(options))
- { }
-
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::DNS_RESOLVER;
- }
-
- private:
- STRICT_STFUNC(StateWork, {
- cFunc(TEvents::TEvPoison::EventType, PassAway);
- fFunc(TEvDns::TEvGetHostByName::EventType, Forward);
- fFunc(TEvDns::TEvGetAddr::EventType, Forward);
- });
-
- void Forward(STATEFN_SIG) {
- ev->Rewrite(ev->GetTypeRewrite(), GetUpstream());
- TActivationContext::Send(ev.Release());
- }
-
- private:
- TActorId GetUpstream() {
- if (Y_UNLIKELY(!CachingResolverId)) {
- if (Y_LIKELY(!SimpleResolverId)) {
- SimpleResolverId = RegisterWithSameMailbox(CreateSimpleDnsResolver(Options));
- }
- CachingResolverId = RegisterWithSameMailbox(CreateCachingDnsResolver(SimpleResolverId, Options));
- }
- return CachingResolverId;
- }
-
- void PassAway() override {
- if (CachingResolverId) {
- Send(CachingResolverId, new TEvents::TEvPoison);
- CachingResolverId = { };
- }
- if (SimpleResolverId) {
- Send(SimpleResolverId, new TEvents::TEvPoison);
- SimpleResolverId = { };
- }
- }
-
- private:
- TOnDemandDnsResolverOptions Options;
- TActorId SimpleResolverId;
- TActorId CachingResolverId;
- };
-
- IActor* CreateOnDemandDnsResolver(TOnDemandDnsResolverOptions options) {
- return new TOnDemandDnsResolver(std::move(options));
- }
-
-} // namespace NDnsResolver
-} // namespace NActors
diff --git a/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp b/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
deleted file mode 100644
index 2758484552..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
+++ /dev/null
@@ -1,24 +0,0 @@
-#include "dnsresolver.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NActors;
-using namespace NActors::NDnsResolver;
-
-Y_UNIT_TEST_SUITE(OnDemandDnsResolver) {
-
- Y_UNIT_TEST(ResolveLocalHost) {
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- auto resolver = runtime.Register(CreateOnDemandDnsResolver());
- runtime.Send(new IEventHandle(resolver, sender, new TEvDns::TEvGetHostByName("localhost", AF_UNSPEC)),
- 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvDns::TEvGetHostByNameResult>(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, 0, ev->Get()->ErrorText);
- size_t addrs = ev->Get()->AddrsV4.size() + ev->Get()->AddrsV6.size();
- UNIT_ASSERT_C(addrs > 0, "Got " << addrs << " addresses");
- }
-
-}
diff --git a/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp b/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
deleted file mode 100644
index 93c4b832e2..0000000000
--- a/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
+++ /dev/null
@@ -1,98 +0,0 @@
-#include "dnsresolver.h"
-
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/string/builder.h>
-
-#include <ares.h>
-
-using namespace NActors;
-using namespace NActors::NDnsResolver;
-
-Y_UNIT_TEST_SUITE(DnsResolver) {
-
- struct TSilentUdpServer {
- TInetDgramSocket Socket;
- ui16 Port;
-
- TSilentUdpServer() {
- TSockAddrInet addr("127.0.0.1", 0);
- int err = Socket.Bind(&addr);
- Y_ABORT_UNLESS(err == 0, "Cannot bind a udp socket");
- Port = addr.GetPort();
- }
- };
-
- Y_UNIT_TEST(ResolveLocalHost) {
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- auto resolver = runtime.Register(CreateSimpleDnsResolver());
- runtime.Send(new IEventHandle(resolver, sender, new TEvDns::TEvGetHostByName("localhost", AF_UNSPEC)),
- 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvDns::TEvGetHostByNameResult>(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, 0, ev->Get()->ErrorText);
- size_t addrs = ev->Get()->AddrsV4.size() + ev->Get()->AddrsV6.size();
- UNIT_ASSERT_C(addrs > 0, "Got " << addrs << " addresses");
- }
-
- Y_UNIT_TEST(ResolveYandexRu) {
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- auto resolver = runtime.Register(CreateSimpleDnsResolver());
- runtime.Send(new IEventHandle(resolver, sender, new TEvDns::TEvGetHostByName("yandex.ru", AF_UNSPEC)),
- 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvDns::TEvGetHostByNameResult>(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, 0, ev->Get()->ErrorText);
- size_t addrs = ev->Get()->AddrsV4.size() + ev->Get()->AddrsV6.size();
- UNIT_ASSERT_C(addrs > 0, "Got " << addrs << " addresses");
- }
-
- Y_UNIT_TEST(GetAddrYandexRu) {
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- auto resolver = runtime.Register(CreateSimpleDnsResolver());
-
- runtime.Send(new IEventHandle(resolver, sender, new TEvDns::TEvGetAddr("yandex.ru", AF_UNSPEC)),
- 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvDns::TEvGetAddrResult>(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, 0, ev->Get()->ErrorText);
- UNIT_ASSERT_C(ev->Get()->IsV4() || ev->Get()->IsV6(), "Expect v4 or v6 address");
- }
-
- Y_UNIT_TEST(ResolveTimeout) {
- TSilentUdpServer server;
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- TSimpleDnsResolverOptions options;
- options.Timeout = TDuration::MilliSeconds(250);
- options.Attempts = 2;
- options.Servers.emplace_back(TStringBuilder() << "127.0.0.1:" << server.Port);
- auto resolver = runtime.Register(CreateSimpleDnsResolver(options));
- runtime.Send(new IEventHandle(resolver, sender, new TEvDns::TEvGetHostByName("timeout.yandex.ru", AF_INET)),
- 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvDns::TEvGetHostByNameResult>(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, ARES_ETIMEOUT, ev->Get()->ErrorText);
- }
-
- Y_UNIT_TEST(ResolveGracefulStop) {
- TSilentUdpServer server;
- TTestActorRuntimeBase runtime;
- runtime.Initialize();
- auto sender = runtime.AllocateEdgeActor();
- TSimpleDnsResolverOptions options;
- options.Timeout = TDuration::Seconds(5);
- options.Attempts = 5;
- options.Servers.emplace_back(TStringBuilder() << "127.0.0.1:" << server.Port);
- auto resolver = runtime.Register(CreateSimpleDnsResolver(options));
- runtime.Send(new IEventHandle(resolver, sender, new TEvDns::TEvGetHostByName("timeout.yandex.ru", AF_INET)),
- 0, true);
- runtime.Send(new IEventHandle(resolver, sender, new TEvents::TEvPoison), 0, true);
- auto ev = runtime.GrabEdgeEventRethrow<TEvDns::TEvGetHostByNameResult>(sender);
- UNIT_ASSERT_VALUES_EQUAL_C(ev->Get()->Status, ARES_ECANCELLED, ev->Get()->ErrorText);
- }
-
-}
diff --git a/library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 8874bee8e5..0000000000
--- a/library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-dnsresolver-ut)
-target_include_directories(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver
- ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
-)
-target_link_libraries(library-cpp-actors-dnsresolver-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-dnsresolver
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-dnsresolver-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-dnsresolver-ut
- TEST_TARGET
- library-cpp-actors-dnsresolver-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-dnsresolver-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-dnsresolver-ut)
diff --git a/library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 9e092bba80..0000000000
--- a/library/cpp/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-dnsresolver-ut)
-target_include_directories(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver
- ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
-)
-target_link_libraries(library-cpp-actors-dnsresolver-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-dnsresolver
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-dnsresolver-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-dnsresolver-ut
- TEST_TARGET
- library-cpp-actors-dnsresolver-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-dnsresolver-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-dnsresolver-ut)
diff --git a/library/cpp/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index c8eb27acd0..0000000000
--- a/library/cpp/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-dnsresolver-ut)
-target_include_directories(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver
- ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
-)
-target_link_libraries(library-cpp-actors-dnsresolver-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-dnsresolver
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-dnsresolver-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-dnsresolver-ut
- TEST_TARGET
- library-cpp-actors-dnsresolver-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-dnsresolver-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-dnsresolver-ut)
diff --git a/library/cpp/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index eae7bc85a3..0000000000
--- a/library/cpp/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-dnsresolver-ut)
-target_include_directories(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver
- ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
-)
-target_link_libraries(library-cpp-actors-dnsresolver-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-dnsresolver
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-dnsresolver-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-dnsresolver-ut
- TEST_TARGET
- library-cpp-actors-dnsresolver-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-dnsresolver-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-dnsresolver-ut)
diff --git a/library/cpp/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 7705d206a3..0000000000
--- a/library/cpp/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,64 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-dnsresolver-ut)
-target_include_directories(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver
- ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
-)
-target_link_libraries(library-cpp-actors-dnsresolver-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-dnsresolver
- cpp-actors-testlib
-)
-target_sources(library-cpp-actors-dnsresolver-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_caching_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/dnsresolver/dnsresolver_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-dnsresolver-ut
- TEST_TARGET
- library-cpp-actors-dnsresolver-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-dnsresolver-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-dnsresolver-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-dnsresolver-ut)
diff --git a/library/cpp/actors/dnsresolver/ut/ya.make b/library/cpp/actors/dnsresolver/ut/ya.make
deleted file mode 100644
index ec4b117bf7..0000000000
--- a/library/cpp/actors/dnsresolver/ut/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/dnsresolver)
-
-PEERDIR(
- library/cpp/actors/testlib
-)
-
-SRCS(
- dnsresolver_caching_ut.cpp
- dnsresolver_ondemand_ut.cpp
- dnsresolver_ut.cpp
-)
-
-ADDINCL(contrib/libs/c-ares/include)
-
-TAG(ya:external)
-REQUIREMENTS(network:full)
-
-END()
diff --git a/library/cpp/actors/dnsresolver/ya.make b/library/cpp/actors/dnsresolver/ya.make
deleted file mode 100644
index 6d9318a254..0000000000
--- a/library/cpp/actors/dnsresolver/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-LIBRARY()
-
-SRCS(
- dnsresolver.cpp
- dnsresolver_caching.cpp
- dnsresolver_ondemand.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
- contrib/libs/c-ares
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
diff --git a/library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt b/library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index b2acea01a5..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(example_01_ping_pong)
-target_link_libraries(example_01_ping_pong PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_link_options(example_01_ping_pong PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(example_01_ping_pong PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/01_ping_pong/main.cpp
-)
-target_allocator(example_01_ping_pong
- library-cpp-lfalloc
-)
-vcs_info(example_01_ping_pong)
diff --git a/library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 1e053eb6b4..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,30 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(example_01_ping_pong)
-target_link_libraries(example_01_ping_pong PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-actors-core
-)
-target_link_options(example_01_ping_pong PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(example_01_ping_pong PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/01_ping_pong/main.cpp
-)
-target_allocator(example_01_ping_pong
- library-cpp-lfalloc
-)
-vcs_info(example_01_ping_pong)
diff --git a/library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt b/library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 5884f06261..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(example_01_ping_pong)
-target_link_libraries(example_01_ping_pong PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
-)
-target_link_options(example_01_ping_pong PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(example_01_ping_pong PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/01_ping_pong/main.cpp
-)
-target_allocator(example_01_ping_pong
- library-cpp-lfalloc
-)
-vcs_info(example_01_ping_pong)
diff --git a/library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt b/library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 7320982029..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,34 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(example_01_ping_pong)
-target_link_libraries(example_01_ping_pong PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-actors-core
-)
-target_link_options(example_01_ping_pong PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(example_01_ping_pong PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/01_ping_pong/main.cpp
-)
-target_allocator(example_01_ping_pong
- library-cpp-lfalloc
-)
-vcs_info(example_01_ping_pong)
diff --git a/library/cpp/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt b/library/cpp/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 22c41768c9..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(example_01_ping_pong)
-target_link_libraries(example_01_ping_pong PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-actors-core
-)
-target_sources(example_01_ping_pong PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/01_ping_pong/main.cpp
-)
-target_allocator(example_01_ping_pong
- library-cpp-lfalloc
-)
-vcs_info(example_01_ping_pong)
diff --git a/library/cpp/actors/examples/01_ping_pong/main.cpp b/library/cpp/actors/examples/01_ping_pong/main.cpp
deleted file mode 100644
index 437f06eadd..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/main.cpp
+++ /dev/null
@@ -1,129 +0,0 @@
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/util/should_continue.h>
-#include <util/system/sigset.h>
-#include <util/generic/xrange.h>
-
-using namespace NActors;
-
-static TProgramShouldContinue ShouldContinue;
-
-void OnTerminate(int) {
- ShouldContinue.ShouldStop();
-}
-
-class TPingActor : public TActorBootstrapped<TPingActor> {
- const TActorId Target;
- ui64 HandledEvents;
- TInstant PeriodStart;
-
- void Handle(TEvents::TEvPing::TPtr &ev) {
- Send(ev->Sender, new TEvents::TEvPong());
- Send(ev->Sender, new TEvents::TEvPing());
- Become(&TThis::StatePing);
- }
-
- void Handle(TEvents::TEvPong::TPtr &ev) {
- Y_UNUSED(ev);
- Become(&TThis::StateWait);
- }
-
- void PrintStats() {
- const i64 ms = (TInstant::Now() - PeriodStart).MilliSeconds();
- Cout << "Handled " << 2 * HandledEvents << " over " << ms << "ms" << Endl;
- ScheduleStats();
- }
-
- void ScheduleStats() {
- HandledEvents = 0;
- PeriodStart = TInstant::Now();
- Schedule(TDuration::Seconds(1), new TEvents::TEvWakeup());
- }
-
-public:
- TPingActor(TActorId target)
- : Target(target)
- , HandledEvents(0)
- , PeriodStart(TInstant::Now())
- {}
-
- STFUNC(StateWait) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvents::TEvPing, Handle);
- sFunc(TEvents::TEvWakeup, PrintStats);
- }
-
- ++HandledEvents;
- }
-
- STFUNC(StatePing) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvents::TEvPong, Handle);
- sFunc(TEvents::TEvWakeup, PrintStats);
- }
-
- ++HandledEvents;
- }
-
- void Bootstrap() {
- if (Target) {
- Become(&TThis::StatePing);
- Send(Target, new TEvents::TEvPing());
- ScheduleStats();
- }
- else {
- Become(&TThis::StateWait);
- };
- }
-};
-
-THolder<TActorSystemSetup> BuildActorSystemSetup(ui32 threads, ui32 pools) {
- Y_ABORT_UNLESS(threads > 0 && threads < 100);
- Y_ABORT_UNLESS(pools > 0 && pools < 10);
-
- auto setup = MakeHolder<TActorSystemSetup>();
-
- setup->NodeId = 1;
-
- setup->ExecutorsCount = pools;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[pools]);
- for (ui32 idx : xrange(pools)) {
- setup->Executors[idx] = new TBasicExecutorPool(idx, threads, 50);
- }
-
- setup->Scheduler = new TBasicSchedulerThread(TSchedulerConfig(512, 0));
-
- return setup;
-}
-
-int main(int argc, char **argv) {
- Y_UNUSED(argc);
- Y_UNUSED(argv);
-
-#ifdef _unix_
- signal(SIGPIPE, SIG_IGN);
-#endif
- signal(SIGINT, &OnTerminate);
- signal(SIGTERM, &OnTerminate);
-
- THolder<TActorSystemSetup> actorSystemSetup = BuildActorSystemSetup(2, 1);
- TActorSystem actorSystem(actorSystemSetup);
-
- actorSystem.Start();
-
- const TActorId a = actorSystem.Register(new TPingActor(TActorId()));
- const TActorId b = actorSystem.Register(new TPingActor(a));
- Y_UNUSED(b);
-
- while (ShouldContinue.PollState() == TProgramShouldContinue::Continue) {
- Sleep(TDuration::MilliSeconds(200));
- }
-
- actorSystem.Stop();
- actorSystem.Cleanup();
-
- return ShouldContinue.GetReturnCode();
-}
diff --git a/library/cpp/actors/examples/01_ping_pong/ya.make b/library/cpp/actors/examples/01_ping_pong/ya.make
deleted file mode 100644
index d33cfd3456..0000000000
--- a/library/cpp/actors/examples/01_ping_pong/ya.make
+++ /dev/null
@@ -1,13 +0,0 @@
-PROGRAM(example_01_ping_pong)
-
-ALLOCATOR(LF)
-
-SRCS(
- main.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
-)
-
-END()
diff --git a/library/cpp/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt b/library/cpp/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 5e379dc7ca..0000000000
--- a/library/cpp/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_executable(example_02_discovery)
-target_link_libraries(example_02_discovery PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-dnsresolver
- cpp-actors-interconnect
- cpp-actors-http
- contrib-libs-protobuf
-)
-target_link_options(example_02_discovery PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_proto_messages(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/protocol.proto
-)
-target_sources(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/endpoint.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/lookup.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/main.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/publish.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/replica.cpp
-)
-target_allocator(example_02_discovery
- library-cpp-lfalloc
-)
-target_proto_addincls(example_02_discovery
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(example_02_discovery
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
-vcs_info(example_02_discovery)
diff --git a/library/cpp/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 1003ca94c5..0000000000
--- a/library/cpp/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_executable(example_02_discovery)
-target_link_libraries(example_02_discovery PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-actors-core
- cpp-actors-dnsresolver
- cpp-actors-interconnect
- cpp-actors-http
- contrib-libs-protobuf
-)
-target_link_options(example_02_discovery PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_proto_messages(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/protocol.proto
-)
-target_sources(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/endpoint.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/lookup.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/main.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/publish.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/replica.cpp
-)
-target_allocator(example_02_discovery
- library-cpp-lfalloc
-)
-target_proto_addincls(example_02_discovery
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(example_02_discovery
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
-vcs_info(example_02_discovery)
diff --git a/library/cpp/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt b/library/cpp/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 564892d9f7..0000000000
--- a/library/cpp/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_executable(example_02_discovery)
-target_link_libraries(example_02_discovery PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-dnsresolver
- cpp-actors-interconnect
- cpp-actors-http
- contrib-libs-protobuf
-)
-target_link_options(example_02_discovery PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_proto_messages(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/protocol.proto
-)
-target_sources(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/endpoint.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/lookup.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/main.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/publish.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/replica.cpp
-)
-target_allocator(example_02_discovery
- library-cpp-lfalloc
-)
-target_proto_addincls(example_02_discovery
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(example_02_discovery
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
-vcs_info(example_02_discovery)
diff --git a/library/cpp/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt b/library/cpp/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index aed3404cb8..0000000000
--- a/library/cpp/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_executable(example_02_discovery)
-target_link_libraries(example_02_discovery PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-actors-core
- cpp-actors-dnsresolver
- cpp-actors-interconnect
- cpp-actors-http
- contrib-libs-protobuf
-)
-target_link_options(example_02_discovery PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_proto_messages(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/protocol.proto
-)
-target_sources(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/endpoint.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/lookup.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/main.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/publish.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/replica.cpp
-)
-target_allocator(example_02_discovery
- library-cpp-lfalloc
-)
-target_proto_addincls(example_02_discovery
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(example_02_discovery
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
-vcs_info(example_02_discovery)
diff --git a/library/cpp/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt b/library/cpp/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 6bf59ae8c9..0000000000
--- a/library/cpp/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_executable(example_02_discovery)
-target_link_libraries(example_02_discovery PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-actors-core
- cpp-actors-dnsresolver
- cpp-actors-interconnect
- cpp-actors-http
- contrib-libs-protobuf
-)
-target_proto_messages(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/protocol.proto
-)
-target_sources(example_02_discovery PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/endpoint.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/lookup.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/main.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/publish.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/examples/02_discovery/replica.cpp
-)
-target_allocator(example_02_discovery
- library-cpp-lfalloc
-)
-target_proto_addincls(example_02_discovery
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(example_02_discovery
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
-vcs_info(example_02_discovery)
diff --git a/library/cpp/actors/examples/02_discovery/endpoint.cpp b/library/cpp/actors/examples/02_discovery/endpoint.cpp
deleted file mode 100644
index 38c068ca8f..0000000000
--- a/library/cpp/actors/examples/02_discovery/endpoint.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-#include "services.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-
-#include <library/cpp/actors/http/http.h>
-#include <library/cpp/actors/http/http_proxy.h>
-
-#include <util/system/hostname.h>
-#include <util/string/builder.h>
-
-class TExampleHttpRequest : public TActor<TExampleHttpRequest> {
- TIntrusivePtr<TExampleStorageConfig> Config;
- const TString PublishKey;
-
- TActorId HttpProxy;
- NHttp::THttpIncomingRequestPtr Request;
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr &ev) {
- Request = std::move(ev->Get()->Request);
- HttpProxy = ev->Sender;
-
- Register(CreateLookupActor(Config.Get(), PublishKey, SelfId()));
- }
-
- void Handle(TEvExample::TEvInfo::TPtr &ev) {
- auto *msg = ev->Get();
-
- TStringBuilder body;
- for (const auto &x : msg->Payloads)
- body << x << Endl;
-
- auto response = Request->CreateResponseOK(body, "application/text; charset=utf-8");
- Send(HttpProxy, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
-
- PassAway();
- }
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExampleHttpRequest(TExampleStorageConfig *config, const TString &publishKey)
- : TActor(&TThis::StateWork)
- , Config(config)
- , PublishKey(publishKey)
- {}
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- hFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
- hFunc(TEvExample::TEvInfo, Handle);
- }
- }
-};
-
-class TExampleHttpEndpoint : public TActorBootstrapped<TExampleHttpEndpoint> {
- TIntrusivePtr<TExampleStorageConfig> Config;
- const TString PublishKey;
- const ui16 HttpPort;
-
- TActorId PublishActor;
- TActorId HttpProxy;
-
- std::shared_ptr<NMonitoring::TMetricRegistry> SensorsRegistry = std::make_shared<NMonitoring::TMetricRegistry>();
-
- void PassAway() override {
- Send(PublishActor, new TEvents::TEvPoison());
- Send(HttpProxy, new TEvents::TEvPoison());
-
- return TActor::PassAway();
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr &ev) {
- const TActorId reqActor = Register(new TExampleHttpRequest(Config.Get(), PublishKey));
- TlsActivationContext->Send(ev->Forward(reqActor));
- }
-
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExampleHttpEndpoint(TExampleStorageConfig *config, const TString &publishKey, ui16 port)
- : Config(config)
- , PublishKey(publishKey)
- , HttpPort(port)
- {
- }
-
- void Bootstrap() {
- const TString publishPayload = ToString(HttpPort);
- PublishActor = Register(CreatePublishActor(Config.Get(), PublishKey, publishPayload));
- HttpProxy = Register(NHttp::CreateHttpProxy(SensorsRegistry));
-
- Send(HttpProxy, new NHttp::TEvHttpProxy::TEvAddListeningPort(HttpPort, FQDNHostName()));
- Send(HttpProxy, new NHttp::TEvHttpProxy::TEvRegisterHandler("/list", SelfId()));
-
- Become(&TThis::StateWork);
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- hFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
- default:
- break;
- }
- }
-};
-
-IActor* CreateEndpointActor(TExampleStorageConfig *config, const TString &publishKey, ui16 port) {
- return new TExampleHttpEndpoint(config, publishKey, port);
-}
diff --git a/library/cpp/actors/examples/02_discovery/lookup.cpp b/library/cpp/actors/examples/02_discovery/lookup.cpp
deleted file mode 100644
index fb136a431c..0000000000
--- a/library/cpp/actors/examples/02_discovery/lookup.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-#include "services.h"
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/interconnect.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <util/generic/set.h>
-#include <util/generic/vector.h>
-
-class TExampleLookupRequestActor : public TActor<TExampleLookupRequestActor> {
- const TActorId Owner;
- const TActorId Replica;
- const TString Key;
-
- void Registered(TActorSystem* sys, const TActorId&) override {
- const auto flags = IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession;
- sys->Send(new IEventHandle(Replica, SelfId(), new TEvExample::TEvReplicaLookup(Key), flags));
- }
-
- void PassAway() override {
- const ui32 replicaNode = Replica.NodeId();
- if (replicaNode != SelfId().NodeId()) {
- const TActorId &interconnectProxy = TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(Replica.NodeId());
- Send(interconnectProxy, new TEvents::TEvUnsubscribe());
- }
- return IActor::PassAway();
- }
-
- void Handle(TEvExample::TEvReplicaInfo::TPtr &ev) {
- Send(Owner, ev->Release().Release());
- return PassAway();
- }
-
- void HandleUndelivered() {
- Send(Owner, new TEvExample::TEvReplicaInfo(Key));
- return PassAway();
- }
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExampleLookupRequestActor(TActorId owner, TActorId replica, const TString &key)
- : TActor(&TThis::StateWork)
- , Owner(owner)
- , Replica(replica)
- , Key(key)
- {}
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvExample::TEvReplicaInfo, Handle);
- sFunc(TEvents::TEvUndelivered, HandleUndelivered);
- sFunc(TEvInterconnect::TEvNodeDisconnected, HandleUndelivered);
- default:
- break;
- }
- }
-};
-
-class TExampleLookupActor : public TActorBootstrapped<TExampleLookupActor> {
- TIntrusiveConstPtr<TExampleStorageConfig> Config;
- const TString Key;
- const TActorId ReplyTo;
- TVector<TActorId> RequestActors;
-
- ui32 TotalReplicas = 0;
- ui32 RepliedSuccess = 0;
- ui32 RepliedError = 0;
-
- TSet<TString> Payloads;
-
- void Handle(TEvExample::TEvReplicaInfo::TPtr &ev) {
- NActorsExample::TEvReplicaInfo &record = ev->Get()->Record;
- if (record.PayloadSize()) {
- ++RepliedSuccess;
- for (const TString &payload : record.GetPayload()) {
- Payloads.insert(payload);
- }
- }
- else {
- ++RepliedError;
- }
-
- const ui32 majority = (TotalReplicas / 2 + 1);
- if (RepliedSuccess == majority || (RepliedSuccess + RepliedError == TotalReplicas))
- return ReplyAndDie();
- }
-
- void ReplyAndDie() {
- TVector<TString> replyPayloads(Payloads.begin(), Payloads.end());
- Send(ReplyTo, new TEvExample::TEvInfo(Key, std::move(replyPayloads)));
- return PassAway();
- }
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExampleLookupActor(TExampleStorageConfig *config, const TString &key, TActorId replyTo)
- : Config(config)
- , Key(key)
- , ReplyTo(replyTo)
- {}
-
- void Bootstrap() {
- Y_ABORT_UNLESS(Config->Replicas.size() > 0);
-
- TotalReplicas = Config->Replicas.size();
- RequestActors.reserve(TotalReplicas);
- for (const auto &replica : Config->Replicas) {
- const TActorId requestActor = Register(new TExampleLookupRequestActor(SelfId(), replica, Key));
- RequestActors.emplace_back(requestActor);
- }
-
- Become(&TThis::StateWork);
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvExample::TEvReplicaInfo, Handle);
- default:
- break;
- }
- }
-};
-
-IActor* CreateLookupActor(TExampleStorageConfig *config, const TString &key, TActorId replyTo) {
- return new TExampleLookupActor(config, key, replyTo);
-}
diff --git a/library/cpp/actors/examples/02_discovery/main.cpp b/library/cpp/actors/examples/02_discovery/main.cpp
deleted file mode 100644
index 9dec850c77..0000000000
--- a/library/cpp/actors/examples/02_discovery/main.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-#include "services.h"
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/dnsresolver/dnsresolver.h>
-#include <library/cpp/actors/interconnect/interconnect.h>
-#include <library/cpp/actors/interconnect/interconnect_common.h>
-#include <library/cpp/actors/interconnect/interconnect_tcp_proxy.h>
-#include <library/cpp/actors/interconnect/interconnect_tcp_server.h>
-#include <library/cpp/actors/interconnect/poller_actor.h>
-#include <library/cpp/actors/interconnect/poller_tcp.h>
-#include <library/cpp/actors/util/should_continue.h>
-
-#include <util/system/sigset.h>
-#include <util/generic/xrange.h>
-
-using namespace NActors;
-using namespace NActors::NDnsResolver;
-
-static const ui32 CfgTotalReplicaNodes = 5;
-static const ui16 CfgBasePort = 13300;
-static const ui16 CfgHttpPort = 8881;
-static const TString PublishKey = "endpoint";
-
-static TProgramShouldContinue ShouldContinue;
-
-void OnTerminate(int) {
- ShouldContinue.ShouldStop();
-}
-
-THolder<TActorSystemSetup> BuildActorSystemSetup(ui32 nodeId, ui32 threads, NMonitoring::TDynamicCounters &counters) {
- Y_ABORT_UNLESS(threads > 0 && threads < 100);
-
- auto setup = MakeHolder<TActorSystemSetup>();
-
- setup->NodeId = nodeId;
-
- setup->ExecutorsCount = 1;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[1]);
- setup->Executors[0] = new TBasicExecutorPool(0, threads, 50);
- setup->Scheduler = new TBasicSchedulerThread(TSchedulerConfig(512, 0));
-
- setup->LocalServices.emplace_back(MakePollerActorId(), TActorSetupCmd(CreatePollerActor(), TMailboxType::ReadAsFilled, 0));
-
- TIntrusivePtr<TTableNameserverSetup> nameserverTable = new TTableNameserverSetup();
- for (ui32 xnode : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
- nameserverTable->StaticNodeTable[xnode] = std::make_pair("127.0.0.1", CfgBasePort + xnode);
- }
-
- setup->LocalServices.emplace_back(
- MakeDnsResolverActorId(),
- TActorSetupCmd(CreateOnDemandDnsResolver(), TMailboxType::ReadAsFilled, 0)
- );
-
- setup->LocalServices.emplace_back(
- GetNameserviceActorId(),
- TActorSetupCmd(CreateNameserverTable(nameserverTable), TMailboxType::ReadAsFilled, 0)
- );
-
- TIntrusivePtr<TInterconnectProxyCommon> icCommon = new TInterconnectProxyCommon();
- icCommon->NameserviceId = GetNameserviceActorId();
- icCommon->MonCounters = counters.GetSubgroup("counters", "interconnect");
- icCommon->TechnicalSelfHostName = "127.0.0.1";
-
- setup->Interconnect.ProxyActors.resize(CfgTotalReplicaNodes + 1);
- for (ui32 xnode : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
- if (xnode != nodeId) {
- IActor *actor = new TInterconnectProxyTCP(xnode, icCommon);
- setup->Interconnect.ProxyActors[xnode] = TActorSetupCmd(actor, TMailboxType::ReadAsFilled, 0);
- }
- else {
- IActor *listener = new TInterconnectListenerTCP("127.0.0.1", CfgBasePort + xnode, icCommon);
- setup->LocalServices.emplace_back(
- MakeInterconnectListenerActorId(false),
- TActorSetupCmd(listener, TMailboxType::ReadAsFilled, 0)
- );
- }
- }
-
- return setup;
-}
-
-int main(int argc, char **argv) {
- Y_UNUSED(argc);
- Y_UNUSED(argv);
-
-#ifdef _unix_
- signal(SIGPIPE, SIG_IGN);
-#endif
- signal(SIGINT, &OnTerminate);
- signal(SIGTERM, &OnTerminate);
-
- TIntrusivePtr<TExampleStorageConfig> config = new TExampleStorageConfig();
- for (ui32 nodeid : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
- config->Replicas.push_back(MakeReplicaId(nodeid));
- }
-
- TVector<THolder<TActorSystem>> actorSystemHolder;
- TVector<TIntrusivePtr<NMonitoring::TDynamicCounters>> countersHolder;
- for (ui32 nodeid : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
- countersHolder.emplace_back(new NMonitoring::TDynamicCounters());
- THolder<TActorSystemSetup> actorSystemSetup = BuildActorSystemSetup(nodeid, 2, *countersHolder.back());
- actorSystemSetup->LocalServices.emplace_back(
- TActorId(),
- TActorSetupCmd(CreateEndpointActor(config.Get(), PublishKey, CfgHttpPort + nodeid), TMailboxType::HTSwap, 0)
- );
-
- actorSystemSetup->LocalServices.emplace_back(
- MakeReplicaId(nodeid),
- TActorSetupCmd(CreateReplica(), TMailboxType::ReadAsFilled, 0)
- );
-
- actorSystemHolder.emplace_back(new TActorSystem(actorSystemSetup));
- }
-
- for (auto &xh : actorSystemHolder)
- xh->Start();
-
- while (ShouldContinue.PollState() == TProgramShouldContinue::Continue) {
- Sleep(TDuration::MilliSeconds(200));
- }
-
- // stop actorsystem to not generate new reqeusts for external services
- // no events would be processed anymore
- for (auto &xh : actorSystemHolder)
- xh->Stop();
-
- // and then cleanup actorsystem
- // from this moment working with actorsystem prohibited
- for (auto &xh : actorSystemHolder)
- xh->Cleanup();
-
- return ShouldContinue.GetReturnCode();
-}
diff --git a/library/cpp/actors/examples/02_discovery/publish.cpp b/library/cpp/actors/examples/02_discovery/publish.cpp
deleted file mode 100644
index d923283e6b..0000000000
--- a/library/cpp/actors/examples/02_discovery/publish.cpp
+++ /dev/null
@@ -1,110 +0,0 @@
-#include "services.h"
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/interconnect.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <util/generic/set.h>
-#include <util/generic/vector.h>
-
-class TExamplePublishReplicaActor : public TActorBootstrapped<TExamplePublishReplicaActor> {
- const TActorId Owner;
- const TActorId Replica;
- const TString Key;
- const TString Payload;
-
- void PassAway() override {
- const ui32 replicaNode = Replica.NodeId();
- if (replicaNode != SelfId().NodeId()) {
- const TActorId &interconnectProxy = TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(Replica.NodeId());
- Send(interconnectProxy, new TEvents::TEvUnsubscribe());
- }
- return IActor::PassAway();
- }
-
- void SomeSleep() {
- Become(&TThis::StateSleep, TDuration::MilliSeconds(250), new TEvents::TEvWakeup());
- }
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExamplePublishReplicaActor(TActorId owner, TActorId replica, const TString &key, const TString &payload)
- : Owner(owner)
- , Replica(replica)
- , Key(key)
- , Payload(payload)
- {}
-
- void Bootstrap() {
- const ui32 flags = IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession;
- Send(Replica, new TEvExample::TEvReplicaPublish(Key, Payload), flags);
- Become(&TThis::StatePublish);
- }
-
- STFUNC(StatePublish) {
- switch (ev->GetTypeRewrite()) {
- sFunc(TEvents::TEvPoison, PassAway);
- sFunc(TEvents::TEvUndelivered, SomeSleep);
- sFunc(TEvInterconnect::TEvNodeDisconnected, SomeSleep);
- default:
- break;
- }
- }
-
- STFUNC(StateSleep) {
- switch (ev->GetTypeRewrite()) {
- sFunc(TEvents::TEvPoison, PassAway);
- sFunc(TEvents::TEvWakeup, Bootstrap);
- default:
- break;
- }
- }
-};
-
-class TExamplePublishActor : public TActorBootstrapped<TExamplePublishActor> {
- TIntrusiveConstPtr<TExampleStorageConfig> Config;
- const TString Key;
- const TString Payload;
- TVector<TActorId> PublishActors;
-
- void PassAway() override {
- for (const auto &x : PublishActors)
- Send(x, new TEvents::TEvPoison());
- return IActor::PassAway();
- }
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExamplePublishActor(TExampleStorageConfig *config, const TString &key, const TString &what)
- : Config(config)
- , Key(key)
- , Payload(what)
- {}
-
- void Bootstrap() {
- for (auto &replica : Config->Replicas) {
- const TActorId x = Register(new TExamplePublishReplicaActor(SelfId(), replica, Key, Payload));
- PublishActors.emplace_back(x);
- }
-
- Become(&TThis::StateWork);
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- sFunc(TEvents::TEvPoison, PassAway);
- default:
- break;
- }
- }
-};
-
-IActor* CreatePublishActor(TExampleStorageConfig *config, const TString &key, const TString &what) {
- return new TExamplePublishActor(config, key, what);
-}
diff --git a/library/cpp/actors/examples/02_discovery/replica.cpp b/library/cpp/actors/examples/02_discovery/replica.cpp
deleted file mode 100644
index 96a6f5f475..0000000000
--- a/library/cpp/actors/examples/02_discovery/replica.cpp
+++ /dev/null
@@ -1,181 +0,0 @@
-#include "services.h"
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/interconnect.h>
-#include <util/generic/set.h>
-#include <util/generic/hash_set.h>
-#include <util/generic/vector.h>
-
-class TExampleReplicaActor : public TActor<TExampleReplicaActor> {
- using TOwnerIndex = TMap<TActorId, ui32, TActorId::TOrderedCmp>;
- using TKeyIndex = THashMap<TString, TSet<ui32>>;
-
- struct TEntry {
- TString Payload;
- TActorId Owner;
- TOwnerIndex::iterator OwnerIt;
- TKeyIndex::iterator KeyIt;
- };
-
- TVector<TEntry> Entries;
- TVector<ui32> AvailableEntries;
-
- TOwnerIndex IndexOwner;
- TKeyIndex IndexKey;
-
- ui32 AllocateEntry() {
- ui32 ret;
- if (AvailableEntries) {
- ret = AvailableEntries.back();
- AvailableEntries.pop_back();
- }
- else {
- ret = Entries.size();
- Entries.emplace_back();
- }
-
- return ret;
- }
-
- bool IsLastEntryOnNode(TOwnerIndex::iterator ownerIt) {
- const ui32 ownerNodeId = ownerIt->first.NodeId();
- if (ownerIt != IndexOwner.begin()) {
- auto x = ownerIt;
- --x;
- if (x->first.NodeId() == ownerNodeId)
- return false;
- }
-
- ++ownerIt;
- if (ownerIt != IndexOwner.end()) {
- if (ownerIt->first.NodeId() == ownerNodeId)
- return false;
- }
-
- return true;
- }
-
- void CleanupEntry(ui32 entryIndex) {
- TEntry &entry = Entries[entryIndex];
- entry.KeyIt->second.erase(entryIndex);
- if (entry.KeyIt->second.empty())
- IndexKey.erase(entry.KeyIt);
-
- if (IsLastEntryOnNode(entry.OwnerIt))
- Send(TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(entry.OwnerIt->first.NodeId()), new TEvents::TEvUnsubscribe());
-
- IndexOwner.erase(entry.OwnerIt);
-
- TString().swap(entry.Payload);
- entry.Owner = TActorId();
- entry.KeyIt = IndexKey.end();
- entry.OwnerIt = IndexOwner.end();
-
- AvailableEntries.emplace_back(entryIndex);
- }
-
- void Handle(TEvExample::TEvReplicaLookup::TPtr &ev) {
- auto &record = ev->Get()->Record;
- const auto &key = record.GetKey();
-
- auto keyIt = IndexKey.find(key);
- if (keyIt == IndexKey.end()) {
- Send(ev->Sender, new TEvExample::TEvReplicaInfo(key), 0, ev->Cookie);
- return;
- }
-
- auto reply = MakeHolder<TEvExample::TEvReplicaInfo>(key);
- reply->Record.MutablePayload()->Reserve(keyIt->second.size());
- for (ui32 entryIndex : keyIt->second) {
- const TEntry &entry = Entries[entryIndex];
- reply->Record.AddPayload(entry.Payload);
- }
-
- Send(ev->Sender, std::move(reply), 0, ev->Cookie);
- }
-
- void Handle(TEvExample::TEvReplicaPublish::TPtr &ev) {
- auto &record = ev->Get()->Record;
- const TString &key = record.GetKey();
- const TString &payload = record.GetPayload();
- const TActorId &owner = ev->Sender;
-
- auto ownerIt = IndexOwner.find(owner);
- if (ownerIt != IndexOwner.end()) {
- const ui32 entryIndex = ownerIt->second;
- TEntry &entry = Entries[entryIndex];
- if (entry.KeyIt->first != key) {
- // reply nothing, request suspicious
- return;
- }
-
- entry.Payload = payload;
- }
- else {
- const ui32 entryIndex = AllocateEntry();
- TEntry &entry = Entries[entryIndex];
-
- entry.Payload = payload;
- entry.Owner = owner;
-
- entry.OwnerIt = IndexOwner.emplace(owner, entryIndex).first;
- entry.KeyIt = IndexKey.emplace(std::make_pair(key, TSet<ui32>())).first;
- entry.KeyIt->second.emplace(entryIndex);
-
- Send(owner, new TEvExample::TEvReplicaPublishAck(), IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession, ev->Cookie);
- }
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr &ev) {
- auto ownerIt = IndexOwner.find(ev->Sender);
- if (ownerIt == IndexOwner.end())
- return;
-
- CleanupEntry(ownerIt->second);
- }
-
- void Handle(TEvInterconnect::TEvNodeDisconnected::TPtr &ev) {
- auto *msg = ev->Get();
- const ui32 nodeId = msg->NodeId;
- auto ownerIt = IndexOwner.lower_bound(TActorId(nodeId, 0, 0, 0));
- while (ownerIt != IndexOwner.end() && ownerIt->first.NodeId() == nodeId) {
- const ui32 idx = ownerIt->second;
- ++ownerIt;
- CleanupEntry(idx);
- }
- }
-
-public:
- static constexpr IActor::EActivityType ActorActivityType() {
- // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
- return EActorActivity::ACTORLIB_COMMON;
- }
-
- TExampleReplicaActor()
- : TActor(&TThis::StateWork)
- {}
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvExample::TEvReplicaLookup, Handle);
- hFunc(TEvExample::TEvReplicaPublish, Handle);
- hFunc(TEvents::TEvUndelivered, Handle);
- hFunc(TEvInterconnect::TEvNodeDisconnected, Handle);
-
- IgnoreFunc(TEvInterconnect::TEvNodeConnected);
- default:
- // here is place to spam some log message on unknown events
- break;
- }
- }
-};
-
-IActor* CreateReplica() {
- return new TExampleReplicaActor();
-}
-
-TActorId MakeReplicaId(ui32 nodeid) {
- char x[12] = { 'r', 'p', 'l' };
- memcpy(x + 5, &nodeid, sizeof(ui32));
- return TActorId(nodeid, TStringBuf(x, 12));
-}
diff --git a/library/cpp/actors/examples/02_discovery/services.h b/library/cpp/actors/examples/02_discovery/services.h
deleted file mode 100644
index 266517c577..0000000000
--- a/library/cpp/actors/examples/02_discovery/services.h
+++ /dev/null
@@ -1,85 +0,0 @@
-#pragma once
-#include <library/cpp/actors/examples/02_discovery/protocol.pb.h>
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/actors/core/event_local.h>
-
-#include <util/generic/vector.h>
-
-using namespace NActors;
-
-struct TExampleStorageConfig : public TThrRefBase {
- TVector<TActorId> Replicas;
-};
-
-struct TEvExample {
- enum EEv {
- EvReplicaLookup = EventSpaceBegin(TEvents::ES_USERSPACE + 1),
- EvReplicaPublish,
-
- EvReplicaInfo = EventSpaceBegin(TEvents::ES_USERSPACE + 2),
- EvReplicaPublishAck,
-
- EvInfo = EventSpaceBegin(TEvents::ES_USERSPACE + 3),
- };
-
- struct TEvReplicaLookup : public TEventPB<TEvReplicaLookup, NActorsExample::TEvReplicaLookup, EvReplicaLookup> {
- TEvReplicaLookup()
- {}
-
- TEvReplicaLookup(const TString &key)
- {
- Record.SetKey(key);
- }
- };
-
- struct TEvReplicaPublish : public TEventPB<TEvReplicaPublish, NActorsExample::TEvReplicaPublish, EvReplicaPublish> {
- TEvReplicaPublish()
- {}
-
- TEvReplicaPublish(const TString &key, const TString &payload)
- {
- Record.SetKey(key);
- Record.SetPayload(payload);
- }
- };
-
- struct TEvReplicaInfo : public TEventPB<TEvReplicaInfo, NActorsExample::TEvReplicaInfo, EvReplicaInfo> {
- TEvReplicaInfo()
- {}
-
- TEvReplicaInfo(const TString &key)
- {
- Record.SetKey(key);
- }
- };
-
- struct TEvReplicaPublishAck : public TEventPB<TEvReplicaPublishAck, NActorsExample::TEvReplicaPublishAck, EvReplicaPublishAck> {
- TEvReplicaPublishAck()
- {}
-
- TEvReplicaPublishAck(const TString &key)
- {
- Record.SetKey(key);
- }
- };
-
- struct TEvInfo : public TEventLocal<TEvInfo, EvInfo> {
- const TString Key;
- const TVector<TString> Payloads;
-
- TEvInfo(const TString &key, TVector<TString> &&payloads)
- : Key(key)
- , Payloads(payloads)
- {}
- };
-};
-
-IActor* CreateReplica();
-IActor* CreatePublishActor(TExampleStorageConfig *config, const TString &key, const TString &what);
-IActor* CreateLookupActor(TExampleStorageConfig *config, const TString &key, TActorId replyTo);
-IActor* CreateEndpointActor(TExampleStorageConfig *config, const TString &publishKey, ui16 httpPort);
-
-TActorId MakeReplicaId(ui32 nodeid);
diff --git a/library/cpp/actors/examples/02_discovery/ya.make b/library/cpp/actors/examples/02_discovery/ya.make
deleted file mode 100644
index 953c13259c..0000000000
--- a/library/cpp/actors/examples/02_discovery/ya.make
+++ /dev/null
@@ -1,25 +0,0 @@
-PROGRAM(example_02_discovery)
-
-ALLOCATOR(LF)
-
-SRCS(
- endpoint.cpp
- lookup.cpp
- main.cpp
- publish.cpp
- replica.cpp
- services.h
-)
-
-SRCS(
- protocol.proto
-)
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/actors/dnsresolver
- library/cpp/actors/interconnect
- library/cpp/actors/http
-)
-
-END()
diff --git a/library/cpp/actors/helpers/CMakeLists.darwin-arm64.txt b/library/cpp/actors/helpers/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 7367c0b925..0000000000
--- a/library/cpp/actors/helpers/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-helpers)
-target_link_libraries(cpp-actors-helpers PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-monlib-dynamic_counters
-)
-target_sources(cpp-actors-helpers PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/activeactors.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/flow_controlled_queue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor.cpp
-)
diff --git a/library/cpp/actors/helpers/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/helpers/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 7367c0b925..0000000000
--- a/library/cpp/actors/helpers/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-helpers)
-target_link_libraries(cpp-actors-helpers PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-monlib-dynamic_counters
-)
-target_sources(cpp-actors-helpers PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/activeactors.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/flow_controlled_queue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor.cpp
-)
diff --git a/library/cpp/actors/helpers/CMakeLists.linux-aarch64.txt b/library/cpp/actors/helpers/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index da8ce6e8e6..0000000000
--- a/library/cpp/actors/helpers/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-helpers)
-target_link_libraries(cpp-actors-helpers PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-monlib-dynamic_counters
-)
-target_sources(cpp-actors-helpers PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/activeactors.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/flow_controlled_queue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor.cpp
-)
diff --git a/library/cpp/actors/helpers/CMakeLists.linux-x86_64.txt b/library/cpp/actors/helpers/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index da8ce6e8e6..0000000000
--- a/library/cpp/actors/helpers/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-helpers)
-target_link_libraries(cpp-actors-helpers PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-monlib-dynamic_counters
-)
-target_sources(cpp-actors-helpers PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/activeactors.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/flow_controlled_queue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor.cpp
-)
diff --git a/library/cpp/actors/helpers/CMakeLists.windows-x86_64.txt b/library/cpp/actors/helpers/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 7367c0b925..0000000000
--- a/library/cpp/actors/helpers/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-helpers)
-target_link_libraries(cpp-actors-helpers PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-monlib-dynamic_counters
-)
-target_sources(cpp-actors-helpers PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/activeactors.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/flow_controlled_queue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor.cpp
-)
diff --git a/library/cpp/actors/helpers/activeactors.cpp b/library/cpp/actors/helpers/activeactors.cpp
deleted file mode 100644
index 145e97dc57..0000000000
--- a/library/cpp/actors/helpers/activeactors.cpp
+++ /dev/null
@@ -1,2 +0,0 @@
-#include "activeactors.h"
-
diff --git a/library/cpp/actors/helpers/activeactors.h b/library/cpp/actors/helpers/activeactors.h
deleted file mode 100644
index ec482e93c8..0000000000
--- a/library/cpp/actors/helpers/activeactors.h
+++ /dev/null
@@ -1,42 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/events.h>
-#include <util/generic/hash_set.h>
-
-namespace NActors {
-
- ////////////////////////////////////////////////////////////////////////////
- // TActiveActors
- // This class helps manage created actors and kill them all on PoisonPill.
- ////////////////////////////////////////////////////////////////////////////
- class TActiveActors : public THashSet<TActorId> {
- public:
- void Insert(const TActorId &aid) {
- bool inserted = insert(aid).second;
- Y_ABORT_UNLESS(inserted);
- }
-
- void Insert(const TActiveActors &moreActors) {
- for (const auto &aid : moreActors) {
- Insert(aid);
- }
- }
-
- void Erase(const TActorId &aid) {
- auto num = erase(aid);
- Y_ABORT_UNLESS(num == 1);
- }
-
- size_t KillAndClear(const TActorContext &ctx) {
- size_t s = size(); // number of actors managed
- for (const auto &x: *this) {
- ctx.Send(x, new TEvents::TEvPoisonPill());
- }
- clear();
- return s; // how many actors we killed
- }
- };
-
-} // NKikimr
-
diff --git a/library/cpp/actors/helpers/flow_controlled_queue.cpp b/library/cpp/actors/helpers/flow_controlled_queue.cpp
deleted file mode 100644
index 49ed7c79f0..0000000000
--- a/library/cpp/actors/helpers/flow_controlled_queue.cpp
+++ /dev/null
@@ -1,215 +0,0 @@
-#include "flow_controlled_queue.h"
-
-#include <library/cpp/actors/core/interconnect.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/util/datetime.h>
-
-#include <util/generic/deque.h>
-#include <util/datetime/cputimer.h>
-#include <util/generic/algorithm.h>
-
-namespace NActors {
-
-class TFlowControlledRequestQueue;
-
-class TFlowControlledRequestActor : public IActorCallback {
- TFlowControlledRequestQueue * const QueueActor;
-
- void HandleReply(TAutoPtr<IEventHandle> &ev);
- void HandleUndelivered(TEvents::TEvUndelivered::TPtr &ev);
-public:
- const TActorId Source;
- const ui64 Cookie;
- const ui32 Flags;
- const ui64 StartCounter;
-
- TFlowControlledRequestActor(ui32 activity, TFlowControlledRequestQueue *queue, TActorId source, ui64 cookie, ui32 flags)
- : IActorCallback(static_cast<TReceiveFunc>(&TFlowControlledRequestActor::StateWait), activity)
- , QueueActor(queue)
- , Source(source)
- , Cookie(cookie)
- , Flags(flags)
- , StartCounter(GetCycleCountFast())
- {}
-
- STATEFN(StateWait) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvents::TEvUndelivered, HandleUndelivered);
- default:
- HandleReply(ev);
- }
- }
-
- TDuration AccumulatedLatency() const {
- const ui64 cc = GetCycleCountFast() - StartCounter;
- return CyclesToDuration(cc);
- }
-
- using IActor::PassAway;
-};
-
-class TFlowControlledRequestQueue : public IActorCallback {
- const TActorId Target;
- const TFlowControlledQueueConfig Config;
-
- TDeque<THolder<IEventHandle>> UnhandledRequests;
- TDeque<TFlowControlledRequestActor *> RegisteredRequests;
-
- bool Subscribed = false;
-
- TDuration MinimalSeenLatency;
-
- bool CanRegister() {
- const ui64 inFly = RegisteredRequests.size();
- if (inFly <= Config.MinAllowedInFly) // <= for handling minAllowed == 0
- return true;
-
- if (inFly >= Config.MaxAllowedInFly)
- return false;
-
- if (Config.TargetDynamicRate) {
- if (const ui64 dynMax = MinimalSeenLatency.MicroSeconds() * Config.TargetDynamicRate / 1000000) {
- if (inFly >= dynMax)
- return false;
- }
- }
-
- const TDuration currentLatency = RegisteredRequests.front()->AccumulatedLatency();
- if (currentLatency <= Config.MinTrackedLatency)
- return true;
-
- if (currentLatency <= MinimalSeenLatency * Config.LatencyFactor)
- return true;
-
- return false;
- }
-
- void HandleForwardedEvent(TAutoPtr<IEventHandle> &ev) {
- if (CanRegister()) {
- RegisterReqActor(ev);
- } else {
- UnhandledRequests.emplace_back(ev.Release());
- }
- }
-
- void RegisterReqActor(THolder<IEventHandle> ev) {
- TFlowControlledRequestActor *reqActor = new TFlowControlledRequestActor(ActivityType, this, ev->Sender, ev->Cookie, ev->Flags);
- const TActorId reqActorId = RegisterWithSameMailbox(reqActor);
- RegisteredRequests.emplace_back(reqActor);
-
- if (!Subscribed && (Target.NodeId() != SelfId().NodeId())) {
- Send(TActivationContext::InterconnectProxy(Target.NodeId()), new TEvents::TEvSubscribe(), IEventHandle::FlagTrackDelivery);
- Subscribed = true;
- }
-
- TActivationContext::Send(new IEventHandle(Target, reqActorId, ev.Get()->ReleaseBase().Release(), IEventHandle::FlagTrackDelivery, ev->Cookie));
- }
-
- void PumpQueue() {
- while (RegisteredRequests && RegisteredRequests.front() == nullptr)
- RegisteredRequests.pop_front();
-
- while (UnhandledRequests && CanRegister()) {
- RegisterReqActor(std::move(UnhandledRequests.front()));
- UnhandledRequests.pop_front();
- }
- }
-
- void HandleDisconnected() {
- Subscribed = false;
-
- const ui32 nodeid = Target.NodeId();
- for (TFlowControlledRequestActor *reqActor : RegisteredRequests) {
- if (reqActor) {
- if (reqActor->Flags & IEventHandle::FlagSubscribeOnSession) {
- TActivationContext::Send(
- new IEventHandle(reqActor->Source, TActorId(), new TEvInterconnect::TEvNodeDisconnected(nodeid), 0, reqActor->Cookie)
- );
- }
- reqActor->PassAway();
- }
- }
-
- RegisteredRequests.clear();
-
- for (auto &ev : UnhandledRequests) {
- const auto reason = TEvents::TEvUndelivered::Disconnected;
- if (ev->Flags & IEventHandle::FlagTrackDelivery) {
- TActivationContext::Send(
- new IEventHandle(ev->Sender, ev->Recipient, new TEvents::TEvUndelivered(ev->GetTypeRewrite(), reason), 0, ev->Cookie)
- );
- }
- }
-
- UnhandledRequests.clear();
- }
-
- void HandlePoison() {
- HandleDisconnected();
-
- if (SelfId().NodeId() != Target.NodeId())
- Send(TActivationContext::InterconnectProxy(Target.NodeId()), new TEvents::TEvUnsubscribe());
-
- PassAway();
- }
-public:
- template <class TEnum>
- TFlowControlledRequestQueue(TActorId target, const TEnum activity, const TFlowControlledQueueConfig &config)
- : IActorCallback(static_cast<TReceiveFunc>(&TFlowControlledRequestQueue::StateWork), activity)
- , Target(target)
- , Config(config)
- , MinimalSeenLatency(TDuration::Seconds(1))
- {}
-
- STATEFN(StateWork) {
- switch (ev->GetTypeRewrite()) {
- cFunc(TEvInterconnect::TEvNodeDisconnected::EventType, HandleDisconnected);
- IgnoreFunc(TEvInterconnect::TEvNodeConnected);
- cFunc(TEvents::TEvUndelivered::EventType, HandleDisconnected);
- cFunc(TEvents::TEvPoison::EventType, HandlePoison);
- default:
- HandleForwardedEvent(ev);
- }
- }
-
- void HandleRequestReply(TAutoPtr<IEventHandle> &ev, TFlowControlledRequestActor *reqActor) {
- auto it = Find(RegisteredRequests, reqActor);
- if (it == RegisteredRequests.end())
- return;
- TActivationContext::Send(ev->Forward(reqActor->Source).Release());
- const TDuration reqLatency = reqActor->AccumulatedLatency();
- if (reqLatency < MinimalSeenLatency)
- MinimalSeenLatency = reqLatency;
-
- *it = nullptr;
- PumpQueue();
- }
-
- void HandleRequestUndelivered(TEvents::TEvUndelivered::TPtr &ev, TFlowControlledRequestActor *reqActor) {
- auto it = Find(RegisteredRequests, reqActor);
- if (it == RegisteredRequests.end())
- return;
-
- TActivationContext::Send(ev->Forward(reqActor->Source).Release());
-
- *it = nullptr;
- PumpQueue();
- }
-};
-
-void TFlowControlledRequestActor::HandleReply(TAutoPtr<IEventHandle> &ev) {
- QueueActor->HandleRequestReply(ev, this);
- PassAway();
-}
-
-void TFlowControlledRequestActor::HandleUndelivered(TEvents::TEvUndelivered::TPtr &ev) {
- QueueActor->HandleRequestUndelivered(ev, this);
- PassAway();
-}
-
-template <class TEnum>
-IActor* CreateFlowControlledRequestQueue(TActorId targetId, const TEnum activity, const TFlowControlledQueueConfig &config) {
- return new TFlowControlledRequestQueue(targetId, activity, config);
-}
-
-}
diff --git a/library/cpp/actors/helpers/flow_controlled_queue.h b/library/cpp/actors/helpers/flow_controlled_queue.h
deleted file mode 100644
index bbfffa18d7..0000000000
--- a/library/cpp/actors/helpers/flow_controlled_queue.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-
-namespace NActors {
-
- struct TFlowControlledQueueConfig {
- ui32 MinAllowedInFly = 20;
- ui32 MaxAllowedInFly = 100;
- ui32 TargetDynamicRate = 0;
-
- TDuration MinTrackedLatency = TDuration::MilliSeconds(20);
- ui32 LatencyFactor = 4;
- };
-
- template <class TEnum = IActor::EActivityType>
- IActor* CreateFlowControlledRequestQueue(TActorId targetId, const TEnum activity = IActor::EActivityType::ACTORLIB_COMMON, const TFlowControlledQueueConfig &config = TFlowControlledQueueConfig());
-
-}
diff --git a/library/cpp/actors/helpers/future_callback.h b/library/cpp/actors/helpers/future_callback.h
deleted file mode 100644
index 6626dd439d..0000000000
--- a/library/cpp/actors/helpers/future_callback.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/hfunc.h>
-
-namespace NActors {
-
-template <typename EventType>
-struct TActorFutureCallback : TActor<TActorFutureCallback<EventType>> {
- using TCallback = std::function<void(TAutoPtr<TEventHandle<EventType>>&)>;
- using TBase = TActor<TActorFutureCallback<EventType>>;
- TCallback Callback;
-
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::ACTOR_FUTURE_CALLBACK;
- }
-
- TActorFutureCallback(TCallback&& callback)
- : TBase(&TActorFutureCallback::StateWaitForEvent)
- , Callback(std::move(callback))
- {}
-
- STRICT_STFUNC(StateWaitForEvent,
- HFunc(EventType, Handle)
- )
-
- void Handle(typename EventType::TPtr ev, const TActorContext& ctx) {
- Callback(ev);
- TBase::Die(ctx);
- }
-};
-
-} // NActors
diff --git a/library/cpp/actors/helpers/mon_histogram_helper.h b/library/cpp/actors/helpers/mon_histogram_helper.h
deleted file mode 100644
index 2c5ef0bbee..0000000000
--- a/library/cpp/actors/helpers/mon_histogram_helper.h
+++ /dev/null
@@ -1,86 +0,0 @@
-#pragma once
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-#include <util/string/cast.h>
-
-namespace NActors {
- namespace NMon {
- class THistogramCounterHelper {
- public:
- THistogramCounterHelper()
- : FirstBucketVal(0)
- , BucketCount(0)
- {
- }
-
- THistogramCounterHelper(const THistogramCounterHelper&) = default;
-
- void Init(NMonitoring::TDynamicCounters* group, const TString& baseName, const TString& unit,
- ui64 firstBucket, ui64 bucketCnt, bool useSensorLabelName = true)
- {
- Y_ASSERT(FirstBucketVal == 0);
- Y_ASSERT(BucketCount == 0);
-
- FirstBucketVal = firstBucket;
- BucketCount = bucketCnt;
- BucketsHolder.reserve(BucketCount);
- Buckets.reserve(BucketCount);
- for (size_t i = 0; i < BucketCount; ++i) {
- TString bucketName = GetBucketName(i) + " " + unit;
- auto labelName = useSensorLabelName ? "sensor" : "name";
- BucketsHolder.push_back(group->GetSubgroup(labelName, baseName)->GetNamedCounter("range", bucketName, true));
- Buckets.push_back(BucketsHolder.back().Get());
- }
- }
-
- void Add(ui64 val) {
- Y_ASSERT(FirstBucketVal != 0);
- Y_ASSERT(BucketCount != 0);
- Y_ABORT_UNLESS(val <= (1ULL << 63ULL));
- size_t ind = 0;
- if (val > FirstBucketVal) {
- ind = GetValueBitCount((2 * val - 1) / FirstBucketVal) - 1;
- if (ind >= BucketCount) {
- ind = BucketCount - 1;
- }
- }
- Buckets[ind]->Inc();
- }
-
- ui64 GetBucketCount() const {
- return BucketCount;
- }
-
- ui64 GetBucketValue(size_t index) const {
- Y_ASSERT(index < BucketCount);
- return Buckets[index]->Val();
- }
-
- void SetBucketValue(ui64 index, ui64 value) {
- Y_ASSERT(index < BucketCount);
- *Buckets[index] = value;
- }
-
- private:
- TString GetBucketName(size_t ind) const {
- Y_ASSERT(FirstBucketVal != 0);
- Y_ASSERT(BucketCount != 0);
- Y_ASSERT(ind < BucketCount);
- if (ind + 1 < BucketCount) {
- return ToString<ui64>(FirstBucketVal << ind);
- } else {
- // Last slot is up to +INF
- return "INF";
- }
- }
-
- private:
- ui64 FirstBucketVal;
- ui64 BucketCount;
- TVector<NMonitoring::TDynamicCounters::TCounterPtr> BucketsHolder;
- TVector<NMonitoring::TDeprecatedCounter*> Buckets;
- };
-
- }
-}
diff --git a/library/cpp/actors/helpers/selfping_actor.cpp b/library/cpp/actors/helpers/selfping_actor.cpp
deleted file mode 100644
index f9f7c297fc..0000000000
--- a/library/cpp/actors/helpers/selfping_actor.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-#include "selfping_actor.h"
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/hfunc.h>
-
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-#include <library/cpp/sliding_window/sliding_window.h>
-
-namespace NActors {
-
-ui64 MeasureTaskDurationNs() {
- // Prepare worm test data
- // 11 * 11 * 3 * 8 = 2904 bytes, fits in L1 cache
- constexpr ui64 Size = 11;
- // Align the data to reduce random alignment effects
- alignas(64) TStackVec<ui64, Size * Size * 3> data;
- ui64 s = 0;
- NHPTimer::STime beginTime;
- NHPTimer::STime endTime;
- // Prepare the data
- data.resize(Size * Size * 3);
- for (ui64 matrixIdx = 0; matrixIdx < 3; ++matrixIdx) {
- for (ui64 y = 0; y < Size; ++y) {
- for (ui64 x = 0; x < Size; ++x) {
- data[matrixIdx * (Size * Size) + y * Size + x] = y * Size + x;
- }
- }
- }
- // Warm-up the cache
- NHPTimer::GetTime(&beginTime);
- for (ui64 idx = 0; idx < data.size(); ++idx) {
- s += data[idx];
- }
- NHPTimer::GetTime(&endTime);
- s += (ui64)(1000000.0 * NHPTimer::GetSeconds(endTime - beginTime));
-
- // Measure the CPU performance
- // C = A * B with injected dependency to s
- NHPTimer::GetTime(&beginTime);
- for (ui64 y = 0; y < Size; ++y) {
- for (ui64 x = 0; x < Size; ++x) {
- for (ui64 i = 0; i < Size; ++i) {
- s += data[y * Size + i] * data[Size * Size + i * Size + x];
- }
- data[2 * Size * Size + y * Size + x] = s;
- s = 0;
- }
- }
- for (ui64 idx = 0; idx < data.size(); ++idx) {
- s += data[idx];
- }
- NHPTimer::GetTime(&endTime);
- // Prepare the result
- double d = 1000000000.0 * (NHPTimer::GetSeconds(endTime - beginTime) + 0.000000001 * (s & 1));
- return (ui64)d;
-}
-
-namespace {
-
-struct TEvPing: public TEventLocal<TEvPing, TEvents::THelloWorld::Ping> {
- TEvPing(double timeStart)
- : TimeStart(timeStart)
- {}
-
- const double TimeStart;
-};
-
-template <class TValueType_>
-struct TAvgOperation {
- struct TValueType {
- ui64 Count = 0;
- TValueType_ Sum = TValueType_();
- };
- using TValueVector = TVector<TValueType>;
-
- static constexpr TValueType InitialValue() {
- return TValueType(); // zero
- }
-
- // Updates value in current bucket and returns window value
- static TValueType UpdateBucket(TValueType windowValue, TValueVector& buckets, size_t index, TValueType newVal) {
- Y_ASSERT(index < buckets.size());
- buckets[index].Sum += newVal.Sum;
- buckets[index].Count += newVal.Count;
- windowValue.Sum += newVal.Sum;
- windowValue.Count += newVal.Count;
- return windowValue;
- }
-
- static TValueType ClearBuckets(TValueType windowValue, TValueVector& buckets, size_t firstElemIndex, size_t bucketsToClear) {
- Y_ASSERT(!buckets.empty());
- Y_ASSERT(firstElemIndex < buckets.size());
- Y_ASSERT(bucketsToClear <= buckets.size());
-
- const size_t arraySize = buckets.size();
- for (size_t i = 0; i < bucketsToClear; ++i) {
- TValueType& curVal = buckets[firstElemIndex];
- windowValue.Sum -= curVal.Sum;
- windowValue.Count -= curVal.Count;
- curVal = InitialValue();
- firstElemIndex = (firstElemIndex + 1) % arraySize;
- }
- return windowValue;
- }
-
-};
-
-class TSelfPingActor : public TActorBootstrapped<TSelfPingActor> {
-private:
- const TDuration SendInterval;
- const NMonitoring::TDynamicCounters::TCounterPtr MaxPingCounter;
- const NMonitoring::TDynamicCounters::TCounterPtr AvgPingCounter;
- const NMonitoring::TDynamicCounters::TCounterPtr AvgPingCounterWithSmallWindow;
- const NMonitoring::TDynamicCounters::TCounterPtr CalculationTimeCounter;
-
- NSlidingWindow::TSlidingWindow<NSlidingWindow::TMaxOperation<ui64>> MaxPingSlidingWindow;
- NSlidingWindow::TSlidingWindow<TAvgOperation<ui64>> AvgPingSlidingWindow;
- NSlidingWindow::TSlidingWindow<TAvgOperation<ui64>> AvgPingSmallSlidingWindow;
- NSlidingWindow::TSlidingWindow<TAvgOperation<ui64>> CalculationSlidingWindow;
-
- THPTimer Timer;
-
-public:
- static constexpr auto ActorActivityType() {
- return EActivityType::SELF_PING_ACTOR;
- }
-
- TSelfPingActor(TDuration sendInterval,
- const NMonitoring::TDynamicCounters::TCounterPtr& maxPingCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& avgPingCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& avgPingSmallWindowCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& calculationTimeCounter)
- : SendInterval(sendInterval)
- , MaxPingCounter(maxPingCounter)
- , AvgPingCounter(avgPingCounter)
- , AvgPingCounterWithSmallWindow(avgPingSmallWindowCounter)
- , CalculationTimeCounter(calculationTimeCounter)
- , MaxPingSlidingWindow(TDuration::Seconds(15), 100)
- , AvgPingSlidingWindow(TDuration::Seconds(15), 100)
- , AvgPingSmallSlidingWindow(TDuration::Seconds(1), 100)
- , CalculationSlidingWindow(TDuration::Seconds(15), 100)
- {
- }
-
- void Bootstrap(const TActorContext& ctx)
- {
- Become(&TSelfPingActor::RunningState);
- SchedulePing(ctx, Timer.Passed());
- }
-
- STFUNC(RunningState)
- {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvPing, HandlePing);
- default:
- Y_ABORT("TSelfPingActor::RunningState: unexpected event 0x%08" PRIx32, ev->GetTypeRewrite());
- }
- }
-
- void HandlePing(TEvPing::TPtr &ev, const TActorContext &ctx)
- {
- const auto now = ctx.Now();
- const double hpNow = Timer.Passed();
- const auto& e = *ev->Get();
- const double passedTime = hpNow - e.TimeStart;
- const ui64 delayUs = passedTime > 0.0 ? static_cast<ui64>(passedTime * 1e6) : 0;
-
- if (MaxPingCounter) {
- *MaxPingCounter = MaxPingSlidingWindow.Update(delayUs, now);
- }
- if (AvgPingCounter) {
- auto res = AvgPingSlidingWindow.Update({1, delayUs}, now);
- *AvgPingCounter = double(res.Sum) / double(res.Count + 1);
- }
- if (AvgPingCounterWithSmallWindow) {
- auto res = AvgPingSmallSlidingWindow.Update({1, delayUs}, now);
- *AvgPingCounterWithSmallWindow = double(res.Sum) / double(res.Count + 1);
- }
-
- if (CalculationTimeCounter) {
- ui64 d = MeasureTaskDurationNs();
- auto res = CalculationSlidingWindow.Update({1, d}, now);
- *CalculationTimeCounter = double(res.Sum) / double(res.Count + 1);
- }
-
- SchedulePing(ctx, hpNow);
- }
-
-private:
- void SchedulePing(const TActorContext &ctx, double hpNow) const
- {
- ctx.Schedule(SendInterval, new TEvPing(hpNow));
- }
-};
-
-} // namespace
-
-IActor* CreateSelfPingActor(
- TDuration sendInterval,
- const NMonitoring::TDynamicCounters::TCounterPtr& maxPingCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& avgPingCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& avgPingSmallWindowCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& calculationTimeCounter)
-{
- return new TSelfPingActor(sendInterval, maxPingCounter, avgPingCounter, avgPingSmallWindowCounter, calculationTimeCounter);
-}
-
-} // NActors
diff --git a/library/cpp/actors/helpers/selfping_actor.h b/library/cpp/actors/helpers/selfping_actor.h
deleted file mode 100644
index a06bfe8292..0000000000
--- a/library/cpp/actors/helpers/selfping_actor.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-namespace NActors {
-
-ui64 MeasureTaskDurationNs();
-NActors::IActor* CreateSelfPingActor(
- TDuration sendInterval,
- const NMonitoring::TDynamicCounters::TCounterPtr& maxPingCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& avgPingCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& avgPingSmallWindowCounter,
- const NMonitoring::TDynamicCounters::TCounterPtr& calculationTimeCounter);
-
-} // NActors
diff --git a/library/cpp/actors/helpers/selfping_actor_ut.cpp b/library/cpp/actors/helpers/selfping_actor_ut.cpp
deleted file mode 100644
index 542f817755..0000000000
--- a/library/cpp/actors/helpers/selfping_actor_ut.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-#include "selfping_actor.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/actors/testlib/test_runtime.h>
-
-namespace NActors {
-namespace Tests {
-
-THolder<TTestActorRuntimeBase> CreateRuntime() {
- auto runtime = MakeHolder<TTestActorRuntimeBase>();
- runtime->SetScheduledEventFilter([](auto&&, auto&&, auto&&, auto&&) { return false; });
- runtime->Initialize();
- return runtime;
-}
-
-Y_UNIT_TEST_SUITE(TSelfPingTest) {
- Y_UNIT_TEST(Basic)
- {
- auto runtime = CreateRuntime();
-
- //const TActorId sender = runtime.AllocateEdgeActor();
-
- NMonitoring::TDynamicCounters::TCounterPtr counter(new NMonitoring::TCounterForPtr());
- NMonitoring::TDynamicCounters::TCounterPtr counter2(new NMonitoring::TCounterForPtr());
- NMonitoring::TDynamicCounters::TCounterPtr counter3(new NMonitoring::TCounterForPtr());
- NMonitoring::TDynamicCounters::TCounterPtr counter4(new NMonitoring::TCounterForPtr());
-
- auto actor = CreateSelfPingActor(
- TDuration::MilliSeconds(100), // sendInterval (unused in test)
- counter, counter2, counter3, counter4);
-
- UNIT_ASSERT_VALUES_EQUAL(counter->Val(), 0);
- UNIT_ASSERT_VALUES_EQUAL(counter2->Val(), 0);
- UNIT_ASSERT_VALUES_EQUAL(counter3->Val(), 0);
- UNIT_ASSERT_VALUES_EQUAL(counter4->Val(), 0);
-
- const TActorId actorId = runtime->Register(actor);
- Y_UNUSED(actorId);
-
- //runtime.Send(new IEventHandle(actorId, sender, new TEvSelfPing::TEvPing(0.0)));
-
- // TODO check after events are handled
- //Sleep(TDuration::Seconds(1));
- //UNIT_ASSERT((intmax_t)counter->Val() >= (intmax_t)Delay.MicroSeconds());
- }
-}
-
-} // namespace Tests
-} // namespace NActors
diff --git a/library/cpp/actors/helpers/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/helpers/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 0112181222..0000000000
--- a/library/cpp/actors/helpers/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-helpers-ut)
-target_include_directories(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers
-)
-target_link_libraries(library-cpp-actors-helpers-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-helpers
- cpp-actors-interconnect
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-helpers-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-helpers-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-helpers-ut
- TEST_TARGET
- library-cpp-actors-helpers-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-helpers-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-helpers-ut)
diff --git a/library/cpp/actors/helpers/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/helpers/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 126b29e574..0000000000
--- a/library/cpp/actors/helpers/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,77 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-helpers-ut)
-target_include_directories(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers
-)
-target_link_libraries(library-cpp-actors-helpers-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-helpers
- cpp-actors-interconnect
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-helpers-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-helpers-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-helpers-ut
- TEST_TARGET
- library-cpp-actors-helpers-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-helpers-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-helpers-ut)
diff --git a/library/cpp/actors/helpers/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/helpers/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 24da0dacd6..0000000000
--- a/library/cpp/actors/helpers/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-helpers-ut)
-target_include_directories(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers
-)
-target_link_libraries(library-cpp-actors-helpers-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-helpers
- cpp-actors-interconnect
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-helpers-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-helpers-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-helpers-ut
- TEST_TARGET
- library-cpp-actors-helpers-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-helpers-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-helpers-ut)
diff --git a/library/cpp/actors/helpers/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/helpers/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index db118dc91e..0000000000
--- a/library/cpp/actors/helpers/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-helpers-ut)
-target_include_directories(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers
-)
-target_link_libraries(library-cpp-actors-helpers-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-helpers
- cpp-actors-interconnect
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-helpers-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-helpers-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-helpers-ut
- TEST_TARGET
- library-cpp-actors-helpers-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-helpers-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-helpers-ut)
diff --git a/library/cpp/actors/helpers/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/helpers/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 7e68870a0e..0000000000
--- a/library/cpp/actors/helpers/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-helpers-ut)
-target_include_directories(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers
-)
-target_link_libraries(library-cpp-actors-helpers-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-helpers
- cpp-actors-interconnect
- cpp-actors-testlib
- cpp-actors-core
-)
-target_sources(library-cpp-actors-helpers-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/helpers/selfping_actor_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-helpers-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-helpers-ut
- TEST_TARGET
- library-cpp-actors-helpers-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-helpers-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-helpers-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-helpers-ut)
diff --git a/library/cpp/actors/helpers/ut/ya.make b/library/cpp/actors/helpers/ut/ya.make
deleted file mode 100644
index 10b298bb72..0000000000
--- a/library/cpp/actors/helpers/ut/ya.make
+++ /dev/null
@@ -1,31 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/helpers)
-
-FORK_SUBTESTS()
-IF (SANITIZER_TYPE)
- SIZE(LARGE)
- TIMEOUT(1200)
- TAG(ya:fat)
- SPLIT_FACTOR(20)
- REQUIREMENTS(
- ram:32
- )
-ELSE()
- SIZE(MEDIUM)
- TIMEOUT(600)
- REQUIREMENTS(
- ram:16
- )
-ENDIF()
-
-
-PEERDIR(
- library/cpp/actors/interconnect
- library/cpp/actors/testlib
- library/cpp/actors/core
-)
-
-SRCS(
- selfping_actor_ut.cpp
-)
-
-END()
diff --git a/library/cpp/actors/helpers/ya.make b/library/cpp/actors/helpers/ya.make
deleted file mode 100644
index 94acdca726..0000000000
--- a/library/cpp/actors/helpers/ya.make
+++ /dev/null
@@ -1,23 +0,0 @@
-LIBRARY()
-
-SRCS(
- activeactors.cpp
- activeactors.h
- flow_controlled_queue.cpp
- flow_controlled_queue.h
- future_callback.h
- mon_histogram_helper.h
- selfping_actor.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/monlib/dynamic_counters
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
-
diff --git a/library/cpp/actors/http/CMakeLists.darwin-arm64.txt b/library/cpp/actors/http/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 1947b6fa39..0000000000
--- a/library/cpp/actors/http/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-http)
-target_link_libraries(cpp-actors-http PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-openssl
- contrib-libs-zlib
- cpp-actors-core
- cpp-actors-interconnect
- library-cpp-dns
- cpp-monlib-metrics
- cpp-string_utils-quote
-)
-target_sources(cpp-actors-http PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_cache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_compress.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_acceptor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_incoming.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_outgoing.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_static.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http.cpp
-)
diff --git a/library/cpp/actors/http/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/http/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 1947b6fa39..0000000000
--- a/library/cpp/actors/http/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-http)
-target_link_libraries(cpp-actors-http PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-openssl
- contrib-libs-zlib
- cpp-actors-core
- cpp-actors-interconnect
- library-cpp-dns
- cpp-monlib-metrics
- cpp-string_utils-quote
-)
-target_sources(cpp-actors-http PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_cache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_compress.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_acceptor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_incoming.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_outgoing.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_static.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http.cpp
-)
diff --git a/library/cpp/actors/http/CMakeLists.linux-aarch64.txt b/library/cpp/actors/http/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index a0e186fa07..0000000000
--- a/library/cpp/actors/http/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-http)
-target_link_libraries(cpp-actors-http PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-openssl
- contrib-libs-zlib
- cpp-actors-core
- cpp-actors-interconnect
- library-cpp-dns
- cpp-monlib-metrics
- cpp-string_utils-quote
-)
-target_sources(cpp-actors-http PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_cache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_compress.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_acceptor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_incoming.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_outgoing.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_static.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http.cpp
-)
diff --git a/library/cpp/actors/http/CMakeLists.linux-x86_64.txt b/library/cpp/actors/http/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index a0e186fa07..0000000000
--- a/library/cpp/actors/http/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,33 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-http)
-target_link_libraries(cpp-actors-http PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-openssl
- contrib-libs-zlib
- cpp-actors-core
- cpp-actors-interconnect
- library-cpp-dns
- cpp-monlib-metrics
- cpp-string_utils-quote
-)
-target_sources(cpp-actors-http PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_cache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_compress.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_acceptor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_incoming.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_outgoing.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_static.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http.cpp
-)
diff --git a/library/cpp/actors/http/CMakeLists.windows-x86_64.txt b/library/cpp/actors/http/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 1947b6fa39..0000000000
--- a/library/cpp/actors/http/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,32 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-http)
-target_link_libraries(cpp-actors-http PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-openssl
- contrib-libs-zlib
- cpp-actors-core
- cpp-actors-interconnect
- library-cpp-dns
- cpp-monlib-metrics
- cpp-string_utils-quote
-)
-target_sources(cpp-actors-http PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_cache.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_compress.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_acceptor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_incoming.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy_outgoing.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_static.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http.cpp
-)
diff --git a/library/cpp/actors/http/http.cpp b/library/cpp/actors/http/http.cpp
deleted file mode 100644
index 9da46e412b..0000000000
--- a/library/cpp/actors/http/http.cpp
+++ /dev/null
@@ -1,823 +0,0 @@
-#include "http.h"
-#include <library/cpp/string_utils/quote/quote.h>
-
-inline TStringBuf operator +(TStringBuf l, TStringBuf r) {
- if (l.empty()) {
- return r;
- }
- if (r.empty()) {
- return l;
- }
- if (l.end() == r.begin()) {
- return TStringBuf(l.data(), l.size() + r.size());
- }
- if (r.end() == l.begin()) {
- return TStringBuf(r.data(), l.size() + r.size());
- }
- Y_ABORT("oops");
- return TStringBuf();
-}
-
-inline TStringBuf operator +=(TStringBuf& l, TStringBuf r) {
- return l = l + r;
-}
-
-static bool is_not_number(TStringBuf v) {
- return v.empty() || std::find_if_not(v.begin(), v.end(), [](unsigned char c) { return std::isdigit(c); }) != v.end();
-}
-
-namespace NHttp {
-
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::Host>() { return "Host"; }
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::Accept>() { return "Accept"; }
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::Connection>() { return "Connection"; }
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::ContentType>() { return "Content-Type"; }
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::ContentLength>() { return "Content-Length"; }
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::TransferEncoding>() { return "Transfer-Encoding"; }
-template <> TStringBuf THttpRequest::GetName<&THttpRequest::AcceptEncoding>() { return "Accept-Encoding"; }
-
-const TMap<TStringBuf, TStringBuf THttpRequest::*, TLessNoCase> THttpRequest::HeadersLocation = {
- { THttpRequest::GetName<&THttpRequest::Host>(), &THttpRequest::Host },
- { THttpRequest::GetName<&THttpRequest::Accept>(), &THttpRequest::Accept },
- { THttpRequest::GetName<&THttpRequest::Connection>(), &THttpRequest::Connection },
- { THttpRequest::GetName<&THttpRequest::ContentType>(), &THttpRequest::ContentType },
- { THttpRequest::GetName<&THttpRequest::ContentLength>(), &THttpRequest::ContentLength },
- { THttpRequest::GetName<&THttpRequest::TransferEncoding>(), &THttpRequest::TransferEncoding },
- { THttpRequest::GetName<&THttpRequest::AcceptEncoding>(), &THttpRequest::AcceptEncoding },
-};
-
-template <> TStringBuf THttpResponse::GetName<&THttpResponse::Connection>() { return "Connection"; }
-template <> TStringBuf THttpResponse::GetName<&THttpResponse::ContentType>() { return "Content-Type"; }
-template <> TStringBuf THttpResponse::GetName<&THttpResponse::ContentLength>() { return "Content-Length"; }
-template <> TStringBuf THttpResponse::GetName<&THttpResponse::TransferEncoding>() { return "Transfer-Encoding"; }
-template <> TStringBuf THttpResponse::GetName<&THttpResponse::LastModified>() { return "Last-Modified"; }
-template <> TStringBuf THttpResponse::GetName<&THttpResponse::ContentEncoding>() { return "Content-Encoding"; }
-
-const TMap<TStringBuf, TStringBuf THttpResponse::*, TLessNoCase> THttpResponse::HeadersLocation = {
- { THttpResponse::GetName<&THttpResponse::Connection>(), &THttpResponse::Connection },
- { THttpResponse::GetName<&THttpResponse::ContentType>(), &THttpResponse::ContentType },
- { THttpResponse::GetName<&THttpResponse::ContentLength>(), &THttpResponse::ContentLength },
- { THttpResponse::GetName<&THttpResponse::TransferEncoding>(), &THttpResponse::TransferEncoding },
- { THttpResponse::GetName<&THttpResponse::LastModified>(), &THttpResponse::LastModified },
- { THttpResponse::GetName<&THttpResponse::ContentEncoding>(), &THttpResponse::ContentEncoding }
-};
-
-void THttpRequest::Clear() {
- // a dirty little trick
- this->~THttpRequest(); // basically, do nothing
- new (this) THttpRequest(); // reset all fields
-}
-
-template <>
-bool THttpParser<THttpRequest, TSocketBuffer>::HaveBody() const {
- if (!Body.empty()) {
- return true;
- }
- return !ContentLength.empty() || !TransferEncoding.empty();
-}
-
-template <>
-void THttpParser<THttpRequest, TSocketBuffer>::Advance(size_t len) {
- TStringBuf data(Pos(), len);
- while (!data.empty()) {
- if (Stage != EParseStage::Error) {
- LastSuccessStage = Stage;
- }
- switch (Stage) {
- case EParseStage::Method: {
- if (ProcessData(Method, data, ' ', MaxMethodSize)) {
- Stage = EParseStage::URL;
- }
- break;
- }
- case EParseStage::URL: {
- if (ProcessData(URL, data, ' ', MaxURLSize)) {
- Stage = EParseStage::Protocol;
- }
- break;
- }
- case EParseStage::Protocol: {
- if (ProcessData(Protocol, data, '/', MaxProtocolSize)) {
- Stage = EParseStage::Version;
- }
- break;
- }
- case EParseStage::Version: {
- if (ProcessData(Version, data, "\r\n", MaxVersionSize)) {
- Stage = EParseStage::Header;
- Headers = data;
- }
- break;
- }
- case EParseStage::Header: {
- if (ProcessData(Header, data, "\r\n", MaxHeaderSize)) {
- if (Header.empty()) {
- if (HaveBody() && (ContentLength.empty() || ContentLength != "0")) {
- Stage = EParseStage::Body;
- } else if (TotalSize.has_value() && !data.empty()) {
- Stage = EParseStage::Body;
- } else {
- Stage = EParseStage::Done;
- }
- } else {
- ProcessHeader(Header);
- }
- Headers = TStringBuf(Headers.data(), data.data() - Headers.data());
- }
- if (Stage != EParseStage::Body) {
- break;
- }
- [[fallthrough]];
- }
- case EParseStage::Body: {
- if (TEqNoCase()(TransferEncoding, "chunked")) {
- Stage = EParseStage::ChunkLength;
- } else if (!ContentLength.empty()) {
- if (is_not_number(ContentLength)) {
- // Invalid content length
- Stage = EParseStage::Error;
- } else if (ProcessData(Content, data, FromStringWithDefault(ContentLength, 0))) {
- Body = Content;
- Stage = EParseStage::Done;
- }
- } else if (TotalSize.has_value()) {
- if (ProcessData(Content, data, GetBodySizeFromTotalSize())) {
- Body = Content;
- Stage = EParseStage::Done;
- }
- } else {
- // Invalid body encoding
- Stage = EParseStage::Error;
- }
- break;
- }
- case EParseStage::ChunkLength: {
- if (ProcessData(Line, data, "\r\n", MaxChunkLengthSize)) {
- if (!Line.empty()) {
- ChunkLength = ParseHex(Line);
- if (ChunkLength <= MaxChunkSize) {
- ContentSize = Content.size() + ChunkLength;
- if (ContentSize <= MaxChunkContentSize) {
- Stage = EParseStage::ChunkData;
- Line.Clear();
- } else {
- // Invalid chunk content length
- Stage = EParseStage::Error;
- }
- } else {
- // Invalid chunk length
- Stage = EParseStage::Error;
- }
- } else {
- // Invalid body encoding
- Stage = EParseStage::Error;
- }
- }
- break;
- }
- case EParseStage::ChunkData: {
- if (!IsError()) {
- if (ProcessData(Content, data, ContentSize)) {
- if (ProcessData(Line, data, 2)) {
- if (Line == "\r\n") {
- if (ChunkLength == 0) {
- Body = Content;
- Stage = EParseStage::Done;
- } else {
- Stage = EParseStage::ChunkLength;
- }
- Line.Clear();
- } else {
- // Invalid body encoding
- Stage = EParseStage::Error;
- }
- }
- }
- }
- break;
- }
-
- case EParseStage::Done:
- case EParseStage::Error: {
- data.Clear();
- break;
- }
- default:
- Y_ABORT("Invalid processing sequence");
- break;
- }
- }
- TSocketBuffer::Advance(len);
-}
-
-template <>
-THttpParser<THttpRequest, TSocketBuffer>::EParseStage THttpParser<THttpRequest, TSocketBuffer>::GetInitialStage() {
- return EParseStage::Method;
-}
-
-template <>
-bool THttpParser<THttpResponse, TSocketBuffer>::HaveBody() const {
- if (!Body.empty()) {
- return true;
- }
- return (!Status.starts_with("1") && Status != "204" && Status != "304")
- && (!ContentType.empty() || !ContentLength.empty() || !TransferEncoding.empty());
-}
-
-template <>
-THttpParser<THttpResponse, TSocketBuffer>::EParseStage THttpParser<THttpResponse, TSocketBuffer>::GetInitialStage() {
- return EParseStage::Protocol;
-}
-
-void THttpResponse::Clear() {
- // a dirty little trick
- this->~THttpResponse(); // basically, do nothing
- new (this) THttpResponse(); // reset all fields
-}
-
-template <>
-void THttpParser<THttpResponse, TSocketBuffer>::Advance(size_t len) {
- TStringBuf data(Pos(), len);
- while (!data.empty()) {
- if (Stage != EParseStage::Error) {
- LastSuccessStage = Stage;
- }
- switch (Stage) {
- case EParseStage::Protocol: {
- if (ProcessData(Protocol, data, '/', MaxProtocolSize)) {
- Stage = EParseStage::Version;
- }
- break;
- }
- case EParseStage::Version: {
- if (ProcessData(Version, data, ' ', MaxVersionSize)) {
- Stage = EParseStage::Status;
- }
- break;
- }
- case EParseStage::Status: {
- if (ProcessData(Status, data, ' ', MaxStatusSize)) {
- Stage = EParseStage::Message;
- }
- break;
- }
- case EParseStage::Message: {
- if (ProcessData(Message, data, "\r\n", MaxMessageSize)) {
- Stage = EParseStage::Header;
- Headers = TStringBuf(data.data(), size_t(0));
- }
- break;
- }
- case EParseStage::Header: {
- if (ProcessData(Header, data, "\r\n", MaxHeaderSize)) {
- if (Header.empty()) {
- if (HaveBody() && (ContentLength.empty() || ContentLength != "0")) {
- Stage = EParseStage::Body;
- } else if (TotalSize.has_value() && !data.empty()) {
- Stage = EParseStage::Body;
- } else {
- Stage = EParseStage::Done;
- }
- } else {
- ProcessHeader(Header);
- }
- Headers = TStringBuf(Headers.data(), data.data() - Headers.data());
- }
- if (Stage != EParseStage::Body) {
- break;
- }
- [[fallthrough]];
- }
- case EParseStage::Body: {
- if (TEqNoCase()(TransferEncoding, "chunked")) {
- Stage = EParseStage::ChunkLength;
- } else if (!ContentLength.empty()) {
- if (is_not_number(ContentLength)) {
- // Invalid content length
- Stage = EParseStage::Error;
- } else if (ProcessData(Body, data, FromStringWithDefault(ContentLength, 0))) {
- Stage = EParseStage::Done;
- if (Body && ContentEncoding == "deflate") {
- Content = DecompressDeflate(Body);
- Body = Content;
- }
- }
- } else if (TotalSize.has_value()) {
- if (ProcessData(Content, data, GetBodySizeFromTotalSize())) {
- Body = Content;
- Stage = EParseStage::Done;
- if (Body && ContentEncoding == "deflate") {
- Content = DecompressDeflate(Body);
- Body = Content;
- }
- }
- } else {
- // Invalid body encoding
- Stage = EParseStage::Error;
- }
- break;
- }
- case EParseStage::ChunkLength: {
- if (ProcessData(Line, data, "\r\n", MaxChunkLengthSize)) {
- if (!Line.empty()) {
- ChunkLength = ParseHex(Line);
- if (ChunkLength <= MaxChunkSize) {
- ContentSize = Content.size() + ChunkLength;
- if (ContentSize <= MaxChunkContentSize) {
- Stage = EParseStage::ChunkData;
- Line.Clear();
- } else {
- // Invalid chunk content length
- Stage = EParseStage::Error;
- }
- } else {
- // Invalid chunk length
- Stage = EParseStage::Error;
- }
- } else {
- // Invalid body encoding
- Stage = EParseStage::Error;
- }
- }
- break;
- }
- case EParseStage::ChunkData: {
- if (!IsError()) {
- if (ProcessData(Content, data, ContentSize)) {
- if (ProcessData(Line, data, 2)) {
- if (Line == "\r\n") {
- if (ChunkLength == 0) {
- Body = Content;
- Stage = EParseStage::Done;
- if (Body && ContentEncoding == "deflate") {
- Content = DecompressDeflate(Body);
- Body = Content;
- }
- } else {
- Stage = EParseStage::ChunkLength;
- }
- Line.Clear();
- } else {
- // Invalid body encoding
- Stage = EParseStage::Error;
- }
- }
- }
- }
- break;
- }
- case EParseStage::Done:
- case EParseStage::Error:
- data.Clear();
- break;
- default:
- // Invalid processing sequence
- Stage = EParseStage::Error;
- break;
- }
- }
- TSocketBuffer::Advance(len);
-}
-
-template <>
-void THttpParser<THttpResponse, TSocketBuffer>::ConnectionClosed() {
- if (Stage == EParseStage::Done) {
- return;
- }
- if (Stage == EParseStage::Body) {
- // ?
- Stage = EParseStage::Done;
- } else {
- LastSuccessStage = Stage;
- Stage = EParseStage::Error;
- }
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponseString(TStringBuf data) {
- THttpParser<THttpResponse, TSocketBuffer> parser(data);
- THeadersBuilder headers(parser.Headers);
- if (!Endpoint->WorkerName.empty()) {
- headers.Set("X-Worker-Name", Endpoint->WorkerName);
- }
- THttpOutgoingResponsePtr response = new THttpOutgoingResponse(this);
- response->InitResponse(parser.Protocol, parser.Version, parser.Status, parser.Message);
- if (parser.HaveBody()) {
- if (parser.ContentType && !Endpoint->CompressContentTypes.empty()) {
- TStringBuf contentType = parser.ContentType.Before(';');
- Trim(contentType, ' ');
- if (Count(Endpoint->CompressContentTypes, contentType) != 0) {
- if (response->EnableCompression()) {
- headers.Erase("Content-Length"); // we will need new length after compression
- }
- }
- }
- headers.Erase("Transfer-Encoding"); // we erase transfer-encoding because we convert body to content-length
- response->Set(headers);
- response->SetBody(parser.Body);
- } else {
- headers.Erase("Transfer-Encoding"); // we erase transfer-encoding because we convert body to content-length
- response->Set(headers);
- if (!response->ContentLength) {
- response->Set<&THttpResponse::ContentLength>("0");
- }
- }
- return response;
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponseOK(TStringBuf body, TStringBuf contentType, TInstant lastModified) {
- return CreateResponse("200", "OK", contentType, body, lastModified);
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponseBadRequest(TStringBuf html, TStringBuf contentType) {
- if (html.empty() && IsError()) {
- contentType = "text/plain";
- html = GetErrorText();
- }
- return CreateResponse("400", "Bad Request", contentType, html);
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponseNotFound(TStringBuf html, TStringBuf contentType) {
- return CreateResponse("404", "Not Found", contentType, html);
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponseServiceUnavailable(TStringBuf html, TStringBuf contentType) {
- return CreateResponse("503", "Service Unavailable", contentType, html);
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponseGatewayTimeout(TStringBuf html, TStringBuf contentType) {
- return CreateResponse("504", "Gateway Timeout", contentType, html);
-}
-
-THttpIncomingResponse::THttpIncomingResponse(THttpOutgoingRequestPtr request)
- : Request(request)
-{}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::ConstructResponse(TStringBuf status, TStringBuf message) {
- TStringBuf version = Version;
- if (version != "1.0" && version != "1.1") {
- version = "1.1";
- }
- THttpOutgoingResponsePtr response = new THttpOutgoingResponse(this, "HTTP", version, status, message);
- return response;
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateIncompleteResponse(TStringBuf status, TStringBuf message, const THeaders& headers) {
- THttpOutgoingResponsePtr response = ConstructResponse(status, message);
- if (!headers.Has("Connection")) {
- response->Set<&THttpResponse::Connection>(GetConnection());
- }
- if (!headers.Has("X-Worker-Name")) {
- if (!Endpoint->WorkerName.empty()) {
- response->Set("X-Worker-Name", Endpoint->WorkerName);
- }
- }
- response->Set(headers);
- return response;
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateIncompleteResponse(TStringBuf status, TStringBuf message, const THeaders& headers, TStringBuf body) {
- THttpOutgoingResponsePtr response = CreateIncompleteResponse(status, message, headers);
- if (!response->ContentType.empty() && !body.empty()) {
- if (!Endpoint->CompressContentTypes.empty()) {
- TStringBuf contentType = response->ContentType.Before(';');
- Trim(contentType, ' ');
- if (Count(Endpoint->CompressContentTypes, contentType) != 0) {
- response->EnableCompression();
- }
- }
- }
- return response;
-}
-
-void THttpIncomingRequest::FinishResponse(THttpOutgoingResponsePtr& response, TStringBuf body) {
- if (response->IsNeedBody() || !body.empty()) {
- if (Method == "HEAD") {
- response->Set<&THttpResponse::ContentLength>(ToString(body.size()));
- } else {
- response->SetBody(body);
- }
- }
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponse(TStringBuf status, TStringBuf message) {
- THttpOutgoingResponsePtr response = CreateIncompleteResponse(status, message);
- FinishResponse(response);
- return response;
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponse(TStringBuf status, TStringBuf message, const THeaders& headers) {
- THttpOutgoingResponsePtr response = CreateIncompleteResponse(status, message, headers);
- FinishResponse(response);
- return response;
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponse(TStringBuf status, TStringBuf message, const THeaders& headers, TStringBuf body) {
- THttpOutgoingResponsePtr response = CreateIncompleteResponse(status, message, headers, body);
- FinishResponse(response, body);
- return response;
-}
-
-THttpOutgoingResponsePtr THttpIncomingRequest::CreateResponse(TStringBuf status, TStringBuf message, TStringBuf contentType, TStringBuf body, TInstant lastModified) {
- NHttp::THeadersBuilder headers;
- if (!contentType.empty() && !body.empty()) {
- headers.Set("Content-Type", contentType);
- }
- if (lastModified) {
- headers.Set("Last-Modified", lastModified.FormatGmTime("%a, %d %b %Y %H:%M:%S GMT"));
- }
- return CreateResponse(status, message, headers, body);
-}
-
-THttpIncomingRequestPtr THttpIncomingRequest::Duplicate() {
- THttpIncomingRequestPtr request = new THttpIncomingRequest(*this);
- request->Reparse();
- request->Timer.Reset();
- return request;
-}
-
-THttpIncomingResponsePtr THttpIncomingResponse::Duplicate(THttpOutgoingRequestPtr request) {
- THttpIncomingResponsePtr response = new THttpIncomingResponse(*this);
- response->Reparse();
- response->Request = request;
- return response;
-}
-
-THttpOutgoingResponsePtr THttpOutgoingResponse::Duplicate(THttpIncomingRequestPtr request) {
- THeadersBuilder headers(Headers);
- if (!request->Endpoint->WorkerName.empty()) {
- headers.Set("X-Worker-Name", request->Endpoint->WorkerName);
- }
- THttpOutgoingResponsePtr response = new THttpOutgoingResponse(request);
- response->InitResponse(Protocol, Version, Status, Message);
- if (Body) {
- if (ContentType && !request->Endpoint->CompressContentTypes.empty()) {
- TStringBuf contentType = ContentType.Before(';');
- Trim(contentType, ' ');
- if (Count(request->Endpoint->CompressContentTypes, contentType) != 0) {
- if (response->EnableCompression()) {
- headers.Erase("Content-Length"); // we will need new length after compression
- }
- }
- }
- response->Set(headers);
- response->SetBody(Body);
- } else {
- response->Set(headers);
- if (!response->ContentLength) {
- response->Set<&THttpResponse::ContentLength>("0");
- }
- }
- return response;
-}
-
-
-THttpOutgoingResponsePtr THttpIncomingResponse::Reverse(THttpIncomingRequestPtr request) {
- THttpOutgoingResponsePtr response = new THttpOutgoingResponse(request);
- response->Assign(Data(), Size());
- response->Reparse();
- return response;
-}
-
-THttpOutgoingRequest::THttpOutgoingRequest(TStringBuf method, TStringBuf scheme, TStringBuf host, TStringBuf uri, TStringBuf protocol, TStringBuf version) {
- Secure = (scheme == "https");
- TString urie = UrlEscapeRet(uri);
- InitRequest(method, urie, protocol, version);
- if (host) {
- Set<&THttpRequest::Host>(host);
- }
-}
-
-THttpOutgoingRequest::THttpOutgoingRequest(TStringBuf method, TStringBuf url, TStringBuf protocol, TStringBuf version) {
- TStringBuf scheme, host, uri;
- if (!CrackURL(url, scheme, host, uri)) {
- Y_ABORT("Invalid URL specified");
- }
- if (!scheme.empty() && scheme != "http" && scheme != "https") {
- Y_ABORT("Invalid URL specified");
- }
- Secure = (scheme == "https");
- TString urie = UrlEscapeRet(uri);
- InitRequest(method, urie, protocol, version);
- if (host) {
- Set<&THttpRequest::Host>(host);
- }
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateRequestString(const TString& data) {
- THttpOutgoingRequestPtr request = new THttpOutgoingRequest();
- request->Assign(data.data(), data.size());
- request->Reparse();
- return request;
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateRequestGet(TStringBuf url) {
- return CreateRequest("GET", url);
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateRequestGet(TStringBuf host, TStringBuf uri) {
- return CreateHttpRequest("GET", host, uri);
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateRequestPost(TStringBuf url, TStringBuf contentType, TStringBuf body) {
- return CreateRequest("POST", url, contentType, body);
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateRequestPost(TStringBuf host, TStringBuf uri, TStringBuf contentType, TStringBuf body) {
- return CreateHttpRequest("POST", host, uri, contentType, body);
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateRequest(TStringBuf method, TStringBuf url, TStringBuf contentType, TStringBuf body) {
- THttpOutgoingRequestPtr request = new THttpOutgoingRequest(method, url, "HTTP", "1.1");
- request->Set<&THttpRequest::Accept>("*/*");
- if (!contentType.empty()) {
- request->Set<&THttpRequest::ContentType>(contentType);
- request->Set<&THttpRequest::Body>(body);
- }
- return request;
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::CreateHttpRequest(TStringBuf method, TStringBuf host, TStringBuf uri, TStringBuf contentType, TStringBuf body) {
- THttpOutgoingRequestPtr request = new THttpOutgoingRequest(method, "http", host, uri, "HTTP", "1.1");
- request->Set<&THttpRequest::Accept>("*/*");
- if (!contentType.empty()) {
- request->Set<&THttpRequest::ContentType>(contentType);
- request->Set<&THttpRequest::Body>(body);
- }
- return request;
-}
-
-THttpOutgoingRequestPtr THttpOutgoingRequest::Duplicate() {
- THttpOutgoingRequestPtr request = new THttpOutgoingRequest(*this);
- request->Reparse();
- return request;
-}
-
-THttpOutgoingResponse::THttpOutgoingResponse(THttpIncomingRequestPtr request)
- : Request(request)
-{}
-
-THttpOutgoingResponse::THttpOutgoingResponse(THttpIncomingRequestPtr request, TStringBuf protocol, TStringBuf version, TStringBuf status, TStringBuf message)
- : Request(request)
-{
- InitResponse(protocol, version, status, message);
-}
-
-const size_t THttpConfig::BUFFER_MIN_STEP;
-const TDuration THttpConfig::CONNECTION_TIMEOUT;
-
-TUrlParameters::TUrlParameters(TStringBuf url) {
- TStringBuf base;
- TStringBuf params;
- if (url.TrySplit('?', base, params)) {
- for (TStringBuf param = params.NextTok('&'); !param.empty(); param = params.NextTok('&')) {
- TStringBuf name = param.NextTok('=');
- Parameters[name] = param;
- }
- }
-}
-
-TString TUrlParameters::operator [](TStringBuf name) const {
- TString value(Get(name));
- CGIUnescape(value);
- return value;
-}
-
-bool TUrlParameters::Has(TStringBuf name) const {
- return Parameters.count(name) != 0;
-}
-
-TStringBuf TUrlParameters::Get(TStringBuf name) const {
- auto it = Parameters.find(name);
- if (it != Parameters.end()) {
- return it->second;
- }
- return TStringBuf();
-}
-
-TString TUrlParameters::Render() const {
- TStringBuilder parameters;
- for (const std::pair<TStringBuf, TStringBuf> parameter : Parameters) {
- if (parameters.empty()) {
- parameters << '?';
- } else {
- parameters << '&';
- }
- parameters << parameter.first;
- parameters << '=';
- parameters << parameter.second;
- }
- return parameters;
-}
-
-TCookies::TCookies(TStringBuf cookie) {
- for (TStringBuf param = cookie.NextTok(';'); !param.empty(); param = cookie.NextTok(';')) {
- param.SkipPrefix(" ");
- TStringBuf name = param.NextTok('=');
- Cookies[name] = param;
- }
-}
-
-TStringBuf TCookies::operator [](TStringBuf name) const {
- return Get(name);
-}
-
-bool TCookies::Has(TStringBuf name) const {
- return Cookies.count(name) != 0;
-}
-
-TStringBuf TCookies::Get(TStringBuf name) const {
- auto it = Cookies.find(name);
- if (it != Cookies.end()) {
- return it->second;
- }
- return TStringBuf();
-}
-
-TString TCookies::Render() const {
- TStringBuilder cookies;
- for (const std::pair<TStringBuf, TStringBuf> cookie : Cookies) {
- if (!cookies.empty()) {
- cookies << ' ';
- }
- cookies << cookie.first;
- cookies << '=';
- cookies << cookie.second;
- cookies << ';';
- }
- return cookies;
-}
-
-TCookiesBuilder::TCookiesBuilder()
- :TCookies(TStringBuf())
-{}
-
-void TCookiesBuilder::Set(TStringBuf name, TStringBuf data) {
- Data.emplace_back(name, data);
- Cookies[Data.back().first] = Data.back().second;
-}
-
-THeaders::THeaders(TStringBuf headers) {
- Parse(headers);
-}
-
-size_t THeaders::Parse(TStringBuf headers) {
- auto start = headers.begin();
- for (TStringBuf param = headers.NextTok("\r\n"); !param.empty(); param = headers.NextTok("\r\n")) {
- TStringBuf name = param.NextTok(":");
- param.SkipPrefix(" ");
- Headers[name] = param;
- }
- return headers.begin() - start;
-}
-
-const TStringBuf THeaders::operator [](TStringBuf name) const {
- return Get(name);
-}
-
-bool THeaders::Has(TStringBuf name) const {
- return Headers.count(name) != 0;
-}
-
-TStringBuf THeaders::Get(TStringBuf name) const {
- auto it = Headers.find(name);
- if (it != Headers.end()) {
- return it->second;
- }
- return TStringBuf();
-}
-
-TString THeaders::Render() const {
- TStringBuilder headers;
- for (const std::pair<TStringBuf, TStringBuf> header : Headers) {
- headers << header.first;
- headers << ": ";
- headers << header.second;
- headers << "\r\n";
- }
- return headers;
-}
-
-THeadersBuilder::THeadersBuilder()
- : THeaders(TStringBuf())
-{}
-
-THeadersBuilder::THeadersBuilder(TStringBuf headers)
- : THeaders(headers)
-{}
-
-THeadersBuilder::THeadersBuilder(const THeadersBuilder& builder) {
- for (const auto& pr : builder.Headers) {
- Set(pr.first, pr.second);
- }
-}
-
-void THeadersBuilder::Set(TStringBuf name, TStringBuf data) {
- Data.emplace_back(name, data);
- Headers[Data.back().first] = Data.back().second;
-}
-
-void THeadersBuilder::Erase(TStringBuf name) {
- Headers.erase(name);
-}
-
-}
diff --git a/library/cpp/actors/http/http.h b/library/cpp/actors/http/http.h
deleted file mode 100644
index d96ab062e8..0000000000
--- a/library/cpp/actors/http/http.h
+++ /dev/null
@@ -1,877 +0,0 @@
-#pragma once
-#include <util/datetime/base.h>
-#include <util/string/builder.h>
-#include <util/system/thread.h>
-#include <util/system/hp_timer.h>
-#include <util/generic/hash_set.h>
-#include <util/generic/buffer.h>
-#include <util/generic/intrlist.h>
-#include "http_config.h"
-
-// TODO(xenoxeno): hide in implementation
-template <typename Type>
-struct THash<TIntrusivePtr<Type>> {
- size_t operator ()(const TIntrusivePtr<Type>& ptr) const { return reinterpret_cast<size_t>(ptr.Get()); }
-};
-
-template<>
-inline void Out<NHttp::THttpConfig::SocketAddressType>(IOutputStream& o, const NHttp::THttpConfig::SocketAddressType& x) {
- o << x->ToString();
-}
-
-namespace NHttp {
-
-bool IsIPv6(const TString& host);
-bool IsIPv4(const TString& host);
-bool CrackURL(TStringBuf url, TStringBuf& scheme, TStringBuf& host, TStringBuf& uri);
-void CrackAddress(const TString& address, TString& hostname, TIpPort& port);
-void TrimBegin(TStringBuf& target, char delim);
-void TrimEnd(TStringBuf& target, char delim);
-void Trim(TStringBuf& target, char delim);
-void TrimEnd(TString& target, char delim);
-TString CompressDeflate(TStringBuf source);
-TString DecompressDeflate(TStringBuf source);
-
-struct TLessNoCase {
- bool operator()(TStringBuf l, TStringBuf r) const {
- auto ll = l.length();
- auto rl = r.length();
- if (ll != rl) {
- return ll < rl;
- }
- return strnicmp(l.data(), r.data(), ll) < 0;
- }
-};
-
-struct TEqNoCase {
- bool operator()(TStringBuf l, TStringBuf r) const {
- auto ll = l.length();
- auto rl = r.length();
- if (ll != rl) {
- return false;
- }
- return strnicmp(l.data(), r.data(), ll) == 0;
- }
-};
-
-struct TSensors {
- TString Direction;
- TString Host;
- TString Url;
- TString Status;
- TDuration Time;
-
- TSensors(
- TStringBuf direction,
- TStringBuf host,
- TStringBuf url,
- TStringBuf status,
- TDuration time)
- : Direction(direction)
- , Host(host)
- , Url(url)
- , Status(status)
- , Time(time)
- {}
-};
-
-struct TUrlParameters {
- THashMap<TStringBuf, TStringBuf> Parameters;
-
- TUrlParameters(TStringBuf url);
- TString operator [](TStringBuf name) const;
- bool Has(TStringBuf name) const;
- TStringBuf Get(TStringBuf name) const; // raw
- TString Render() const;
-};
-
-struct TCookies {
- THashMap<TStringBuf, TStringBuf> Cookies;
-
- TCookies(TStringBuf cookie);
- TCookies(const TCookies&) = delete;
- TStringBuf operator [](TStringBuf name) const;
- bool Has(TStringBuf name) const;
- TStringBuf Get(TStringBuf name) const; // raw
- TString Render() const;
-};
-
-struct TCookiesBuilder : TCookies {
- TDeque<std::pair<TString, TString>> Data;
-
- TCookiesBuilder();
- void Set(TStringBuf name, TStringBuf data);
-};
-
-struct THeaders {
- TMap<TStringBuf, TStringBuf, TLessNoCase> Headers;
-
- THeaders() = default;
- THeaders(TStringBuf headers);
- THeaders(const THeaders&) = delete;
- const TStringBuf operator [](TStringBuf name) const;
- bool Has(TStringBuf name) const;
- TStringBuf Get(TStringBuf name) const; // raw
- size_t Parse(TStringBuf headers);
- TString Render() const;
-};
-
-struct THeadersBuilder : THeaders {
- TDeque<std::pair<TString, TString>> Data;
-
- THeadersBuilder();
- THeadersBuilder(TStringBuf headers);
- THeadersBuilder(const THeadersBuilder& builder);
- void Set(TStringBuf name, TStringBuf data);
- void Erase(TStringBuf name);
-};
-
-class TSocketBuffer : public TBuffer, public THttpConfig {
-public:
- TSocketBuffer()
- : TBuffer(BUFFER_SIZE)
- {}
-
- bool EnsureEnoughSpaceAvailable(size_t need) {
- size_t avail = Avail();
- if (avail < need) {
- Reserve(Capacity() + std::max(need, BUFFER_MIN_STEP));
- return false;
- }
- return true;
- }
-
- // non-destructive variant of AsString
- TString AsString() const {
- return TString(Data(), Size());
- }
-};
-
-class THttpRequest {
-public:
- TStringBuf Method;
- TStringBuf URL;
- TStringBuf Protocol;
- TStringBuf Version;
- TStringBuf Headers;
-
- TStringBuf Host;
- TStringBuf Accept;
- TStringBuf Connection;
- TStringBuf ContentType;
- TStringBuf ContentLength;
- TStringBuf AcceptEncoding;
- TStringBuf TransferEncoding;
-
- TStringBuf Body;
-
- static const TMap<TStringBuf, TStringBuf THttpRequest::*, TLessNoCase> HeadersLocation;
-
- template <TStringBuf THttpRequest::* Header>
- static TStringBuf GetName();
- void Clear();
-};
-
-class THttpResponse {
-public:
- TStringBuf Protocol;
- TStringBuf Version;
- TStringBuf Status;
- TStringBuf Message;
- TStringBuf Headers;
-
- TStringBuf Connection;
- TStringBuf ContentType;
- TStringBuf ContentLength;
- TStringBuf TransferEncoding;
- TStringBuf LastModified;
- TStringBuf ContentEncoding;
-
- TStringBuf Body;
-
- static const TMap<TStringBuf, TStringBuf THttpResponse::*, TLessNoCase> HeadersLocation;
-
- template <TStringBuf THttpResponse::* Header>
- static TStringBuf GetName();
- void Clear();
-};
-
-template <typename HeaderType, typename BufferType>
-class THttpParser : public HeaderType, public BufferType {
-public:
- enum class EParseStage : ui8 {
- Method,
- URL,
- Protocol,
- Version,
- Status,
- Message,
- Header,
- Body,
- ChunkLength,
- ChunkData,
- Done,
- Error,
- };
-
- static constexpr size_t MaxMethodSize = 8;
- static constexpr size_t MaxURLSize = 2048;
- static constexpr size_t MaxProtocolSize = 4;
- static constexpr size_t MaxVersionSize = 4;
- static constexpr size_t MaxStatusSize = 3;
- static constexpr size_t MaxMessageSize = 1024;
- static constexpr size_t MaxHeaderSize = 8192;
- static constexpr size_t MaxChunkLengthSize = 8;
- static constexpr size_t MaxChunkSize = 256 * 1024 * 1024;
- static constexpr size_t MaxChunkContentSize = 1 * 1024 * 1024 * 1024;
-
- EParseStage Stage;
- EParseStage LastSuccessStage;
- TStringBuf Line;
- TStringBuf& Header = Line;
- size_t ChunkLength = 0;
- size_t ContentSize = 0;
- TString Content; // body storage
- std::optional<size_t> TotalSize;
-
- THttpParser(const THttpParser& src)
- : HeaderType(src)
- , BufferType(src)
- , Stage(src.Stage)
- , LastSuccessStage(src.LastSuccessStage)
- , Line()
- , Header(Line)
- , ChunkLength(src.ChunkLength)
- , ContentSize(src.ContentSize)
- , Content(src.Content)
- {}
-
- template <typename StringType>
- bool ProcessData(StringType& target, TStringBuf& source, char delim, size_t maxLen) {
- TStringBuf maxSource(source.substr(0, maxLen + 1 - target.size()));
- size_t pos = maxSource.find(delim);
- target += maxSource.substr(0, pos);
- source.Skip(pos);
- if (target.size() > maxLen) {
- Stage = EParseStage::Error;
- return false;
- }
- if (!source.empty() && *source.begin() == delim) {
- source.Skip(1);
- }
- return pos != TStringBuf::npos;
- }
-
- template <typename StringType>
- bool ProcessData(StringType& target, TStringBuf& source, TStringBuf delim, size_t maxLen) {
- if (delim.empty()) {
- return false;
- }
- if (delim.size() == 1) {
- return ProcessData(target, source, delim[0], maxLen);
- }
- if (ProcessData(target, source, delim.back(), maxLen + 1)) {
- for (signed i = delim.size() - 2; i >= 0; --i) {
- TrimEnd(target, delim[i]);
- }
- return true;
- }
- return false;
- }
-
- template <typename StringType>
- bool ProcessData(StringType& target, TStringBuf& source, size_t size) {
- TStringBuf maxSource(source.substr(0, size - target.size()));
- target += maxSource;
- source.Skip(maxSource.size());
- if (target.size() > size && !source.empty()) {
- Stage = EParseStage::Error;
- return false;
- }
- return target.size() == size;
- }
-
- void ProcessHeader(TStringBuf& header) {
- TStringBuf name = header.NextTok(':');
- TrimBegin(name, ' ');
- TStringBuf value = header;
- Trim(value, ' ');
- auto cit = HeaderType::HeadersLocation.find(name);
- if (cit != HeaderType::HeadersLocation.end()) {
- this->*cit->second = value;
- }
- header.Clear();
- }
-
- size_t ParseHex(TStringBuf value) {
- size_t result = 0;
- for (char ch : value) {
- if (ch >= '0' && ch <= '9') {
- result *= 16;
- result += ch - '0';
- } else if (ch >= 'a' && ch <= 'f') {
- result *= 16;
- result += 10 + ch - 'a';
- } else if (ch >= 'A' && ch <= 'F') {
- result *= 16;
- result += 10 + ch - 'A';
- } else if (ch == ';') {
- break;
- } else if (isspace(ch)) {
- continue;
- } else {
- Stage = EParseStage::Error;
- return 0;
- }
- }
- return result;
- }
-
- void Advance(size_t len);
- void ConnectionClosed();
-
- size_t GetBodySizeFromTotalSize() const {
- return TotalSize.value() - (HeaderType::Headers.end() - BufferType::Data());
- }
-
- void Clear() {
- BufferType::Clear();
- HeaderType::Clear();
- Stage = GetInitialStage();
- Line.Clear();
- Content.clear();
- }
-
- bool IsReady() const {
- return Stage == EParseStage::Done;
- }
-
- bool IsError() const {
- return Stage == EParseStage::Error;
- }
-
- TStringBuf GetErrorText() const {
- switch (LastSuccessStage) {
- case EParseStage::Method:
- return "Invalid http method";
- case EParseStage::URL:
- return "Invalid url";
- case EParseStage::Protocol:
- return "Invalid http protocol";
- case EParseStage::Version:
- return "Invalid http version";
- case EParseStage::Status:
- return "Invalid http status";
- case EParseStage::Message:
- return "Invalid http message";
- case EParseStage::Header:
- return "Invalid http header";
- case EParseStage::Body:
- return "Invalid content body";
- case EParseStage::ChunkLength:
- case EParseStage::ChunkData:
- return "Broken chunked data";
- case EParseStage::Done:
- return "Everything is fine";
- case EParseStage::Error:
- return "Error on error"; // wat? ...because we don't want to include default label here
- }
- }
-
- bool IsDone() const {
- return IsReady() || IsError();
- }
-
- bool HaveBody() const;
-
- bool EnsureEnoughSpaceAvailable(size_t need = BufferType::BUFFER_MIN_STEP) {
- bool result = BufferType::EnsureEnoughSpaceAvailable(need);
- if (!result && !BufferType::Empty()) {
- Reparse();
- }
- return true;
- }
-
- void Reparse() {
- size_t size = BufferType::Size();
- Clear();
- Advance(size);
- }
-
- TStringBuf GetRawData() const {
- return TStringBuf(BufferType::Data(), BufferType::Size());
- }
-
- TString GetObfuscatedData() const {
- THeaders headers(HeaderType::Headers);
- TStringBuf authorization(headers["Authorization"]);
- TStringBuf cookie(headers["Cookie"]);
- TStringBuf x_ydb_auth_ticket(headers["x-ydb-auth-ticket"]);
- TStringBuf x_yacloud_subjecttoken(headers["x-yacloud-subjecttoken"]);
- TString data(GetRawData());
- if (!authorization.empty()) {
- auto pos = data.find(authorization);
- if (pos != TString::npos) {
- data.replace(pos, authorization.size(), TString("<obfuscated>"));
- }
- }
- if (!cookie.empty()) {
- auto pos = data.find(cookie);
- if (pos != TString::npos) {
- data.replace(pos, cookie.size(), TString("<obfuscated>"));
- }
- }
- if (!x_ydb_auth_ticket.empty()) {
- auto pos = data.find(x_ydb_auth_ticket);
- if (pos != TString::npos) {
- data.replace(pos, x_ydb_auth_ticket.size(), TString("<obfuscated>"));
- }
- }
- if (!x_yacloud_subjecttoken.empty()) {
- auto pos = data.find(x_yacloud_subjecttoken);
- if (pos != TString::npos) {
- data.replace(pos, x_yacloud_subjecttoken.size(), TString("<obfuscated>"));
- }
- }
- return data;
- }
-
- static EParseStage GetInitialStage();
-
- THttpParser()
- : Stage(GetInitialStage())
- , LastSuccessStage(Stage)
- {}
-
- THttpParser(TStringBuf data)
- : Stage(GetInitialStage())
- , LastSuccessStage(Stage)
- {
- BufferType::Assign(data.data(), data.size());
- BufferType::Clear(); // reset position to 0
- TotalSize = data.size();
- Advance(data.size());
- }
-};
-
-template <typename HeaderType, typename BufferType>
-class THttpRenderer : public HeaderType, public BufferType {
-public:
- enum class ERenderStage {
- Init,
- Header,
- Body,
- Done,
- Error,
- };
-
- ERenderStage Stage = ERenderStage::Init;
- TString Content; // body storage
-
- //THttpRenderer(TStringBuf method, TStringBuf url, TStringBuf protocol, TStringBuf version); // request
- void InitRequest(TStringBuf method, TStringBuf url, TStringBuf protocol, TStringBuf version) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Init);
- AppendParsedValue<&THttpRequest::Method>(method);
- Append(' ');
- AppendParsedValue<&THttpRequest::URL>(url);
- Append(' ');
- AppendParsedValue<&THttpRequest::Protocol>(protocol);
- Append('/');
- AppendParsedValue<&THttpRequest::Version>(version);
- Append("\r\n");
- Stage = ERenderStage::Header;
- HeaderType::Headers = TStringBuf(BufferType::Pos(), size_t(0));
- }
-
- //THttpRenderer(TStringBuf protocol, TStringBuf version, TStringBuf status, TStringBuf message); // response
- void InitResponse(TStringBuf protocol, TStringBuf version, TStringBuf status, TStringBuf message) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Init);
- AppendParsedValue<&THttpResponse::Protocol>(protocol);
- Append('/');
- AppendParsedValue<&THttpResponse::Version>(version);
- Append(' ');
- AppendParsedValue<&THttpResponse::Status>(status);
- Append(' ');
- AppendParsedValue<&THttpResponse::Message>(message);
- Append("\r\n");
- Stage = ERenderStage::Header;
- HeaderType::Headers = TStringBuf(BufferType::Pos(), size_t(0));
- }
-
- void Append(TStringBuf text) {
- EnsureEnoughSpaceAvailable(text.size());
- BufferType::Append(text.data(), text.size());
- }
-
- void Append(char c) {
- EnsureEnoughSpaceAvailable(sizeof(c));
- BufferType::Append(c);
- }
-
- template <TStringBuf HeaderType::* string>
- void AppendParsedValue(TStringBuf value) {
- Append(value);
- static_cast<HeaderType*>(this)->*string = TStringBuf(BufferType::Pos() - value.size(), value.size());
- }
-
- template <TStringBuf HeaderType::* name>
- void Set(TStringBuf value) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Header);
- Append(HeaderType::template GetName<name>());
- Append(": ");
- AppendParsedValue<name>(value);
- Append("\r\n");
- HeaderType::Headers = TStringBuf(HeaderType::Headers.Data(), BufferType::Pos() - HeaderType::Headers.Data());
- }
-
- void Set(TStringBuf name, TStringBuf value) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Header);
- Append(name);
- Append(": ");
- auto data = BufferType::Pos();
- Append(value);
- auto cit = HeaderType::HeadersLocation.find(name);
- if (cit != HeaderType::HeadersLocation.end()) {
- (this->*cit->second) = TStringBuf(data, BufferType::Pos());
- }
- Append("\r\n");
- HeaderType::Headers = TStringBuf(HeaderType::Headers.Data(), BufferType::Pos() - HeaderType::Headers.Data());
- }
-
- void Set(const THeaders& headers) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Header);
- for (const auto& [name, value] : headers.Headers) {
- Set(name, value);
- }
- HeaderType::Headers = TStringBuf(HeaderType::Headers.Data(), BufferType::Pos() - HeaderType::Headers.Data());
- }
-
- static constexpr TStringBuf ALLOWED_CONTENT_ENCODINGS[] = {"deflate"};
-
- void SetContentEncoding(TStringBuf contentEncoding) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Header);
- if (Count(ALLOWED_CONTENT_ENCODINGS, contentEncoding) != 0) {
- Set("Content-Encoding", contentEncoding);
- }
- }
-
- void FinishHeader() {
- Append("\r\n");
- HeaderType::Headers = TStringBuf(HeaderType::Headers.Data(), BufferType::Pos() - HeaderType::Headers.Data());
- Stage = ERenderStage::Body;
- }
-
- void SetBody(TStringBuf body) {
- Y_DEBUG_ABORT_UNLESS(Stage == ERenderStage::Header);
- if (HeaderType::ContentLength.empty()) {
- Set<&HeaderType::ContentLength>(ToString(body.size()));
- }
- FinishHeader();
- AppendParsedValue<&HeaderType::Body>(body);
- Stage = ERenderStage::Done;
- }
-
- void FinishBody() {
- Stage = ERenderStage::Done;
- }
-
- bool IsDone() const {
- return Stage == ERenderStage::Done;
- }
-
- void Finish() {
- switch (Stage) {
- case ERenderStage::Header:
- FinishHeader();
- FinishBody();
- break;
- case ERenderStage::Body:
- FinishBody();
- break;
- default:
- break;
- }
- }
-
- bool EnsureEnoughSpaceAvailable(size_t need = BufferType::BUFFER_MIN_STEP) {
- bool result = BufferType::EnsureEnoughSpaceAvailable(need);
- if (!result && !BufferType::Empty()) {
- Reparse();
- }
- return true;
- }
-
- void Clear() {
- BufferType::Clear();
- HeaderType::Clear();
- }
-
- void Reparse() {
- // move-magic
- size_t size = BufferType::Size();
- THttpParser<HeaderType, BufferType> parser;
- // move the buffer to parser
- static_cast<BufferType&>(parser) = std::move(static_cast<BufferType&>(*this));
- // reparse
- parser.Clear();
- parser.Advance(size);
- // move buffer and result back
- bool needReassignBody = (parser.Body.data() == parser.Content.data());
- static_cast<HeaderType&>(*this) = std::move(static_cast<HeaderType&>(parser));
- static_cast<BufferType&>(*this) = std::move(static_cast<BufferType&>(parser));
- if (needReassignBody) {
- Content = std::move(parser.Content);
- HeaderType::Body = Content;
- }
- switch (parser.Stage) {
- case THttpParser<HeaderType, BufferType>::EParseStage::Method:
- case THttpParser<HeaderType, BufferType>::EParseStage::URL:
- case THttpParser<HeaderType, BufferType>::EParseStage::Protocol:
- case THttpParser<HeaderType, BufferType>::EParseStage::Version:
- case THttpParser<HeaderType, BufferType>::EParseStage::Status:
- case THttpParser<HeaderType, BufferType>::EParseStage::Message:
- Stage = ERenderStage::Init;
- break;
- case THttpParser<HeaderType, BufferType>::EParseStage::Header:
- Stage = ERenderStage::Header;
- break;
- case THttpParser<HeaderType, BufferType>::EParseStage::Body:
- case THttpParser<HeaderType, BufferType>::EParseStage::ChunkLength:
- case THttpParser<HeaderType, BufferType>::EParseStage::ChunkData:
- Stage = ERenderStage::Body;
- break;
- case THttpParser<HeaderType, BufferType>::EParseStage::Done:
- Stage = ERenderStage::Done;
- break;
- case THttpParser<HeaderType, BufferType>::EParseStage::Error:
- Stage = ERenderStage::Error;
- break;
- }
- Y_ABORT_UNLESS(size == BufferType::Size());
- }
-
- TStringBuf GetRawData() const {
- return TStringBuf(BufferType::Data(), BufferType::Size());
- }
-};
-
-template <>
-template <>
-inline void THttpRenderer<THttpResponse, TSocketBuffer>::Set<&THttpResponse::Body>(TStringBuf value) {
- SetBody(value);
-}
-
-template <>
-template <>
-inline void THttpRenderer<THttpRequest, TSocketBuffer>::Set<&THttpRequest::Body>(TStringBuf value) {
- SetBody(value);
-}
-
-template <>
-template <>
-inline void THttpRenderer<THttpResponse, TSocketBuffer>::Set<&THttpResponse::ContentEncoding>(TStringBuf value) {
- SetContentEncoding(value);
-}
-
-struct THttpEndpointInfo {
- TString WorkerName;
- bool Secure = false;
- const std::vector<TString> CompressContentTypes; // content types, which will be automatically compressed on response
-
- THttpEndpointInfo() = default;
-
-protected:
- THttpEndpointInfo(std::vector<TString> compressContentTypes)
- : CompressContentTypes(std::move(compressContentTypes))
- {}
-};
-
-class THttpIncomingRequest;
-using THttpIncomingRequestPtr = TIntrusivePtr<THttpIncomingRequest>;
-
-class THttpOutgoingResponse;
-using THttpOutgoingResponsePtr = TIntrusivePtr<THttpOutgoingResponse>;
-
-class THttpIncomingRequest :
- public THttpParser<THttpRequest, TSocketBuffer>,
- public TRefCounted<THttpIncomingRequest, TAtomicCounter> {
-public:
- std::shared_ptr<THttpEndpointInfo> Endpoint;
- THttpConfig::SocketAddressType Address;
- THPTimer Timer;
-
- THttpIncomingRequest()
- : Endpoint(std::make_shared<THttpEndpointInfo>())
- {}
-
- THttpIncomingRequest(std::shared_ptr<THttpEndpointInfo> endpoint, const THttpConfig::SocketAddressType& address)
- : Endpoint(std::move(endpoint))
- , Address(address)
- {}
-
- THttpIncomingRequest(TStringBuf content, std::shared_ptr<THttpEndpointInfo> endpoint, const THttpConfig::SocketAddressType& address)
- : THttpParser(content)
- , Endpoint(std::move(endpoint))
- , Address(address)
- {}
-
- bool IsConnectionClose() const {
- if (Connection.empty()) {
- return Version == "1.0";
- } else {
- return TEqNoCase()(Connection, "close");
- }
- }
-
- TStringBuf GetConnection() const {
- if (!Connection.empty()) {
- if (TEqNoCase()(Connection, "keep-alive")) {
- return "keep-alive";
- }
- if (TEqNoCase()(Connection, "close")) {
- return "close";
- }
- }
- return Version == "1.0" ? "close" : "keep-alive";
- }
-
- THttpOutgoingResponsePtr CreateResponseOK(TStringBuf body, TStringBuf contentType = "text/html", TInstant lastModified = TInstant());
- THttpOutgoingResponsePtr CreateResponseString(TStringBuf data);
- THttpOutgoingResponsePtr CreateResponseBadRequest(TStringBuf html = TStringBuf(), TStringBuf contentType = "text/html"); // 400
- THttpOutgoingResponsePtr CreateResponseNotFound(TStringBuf html = TStringBuf(), TStringBuf contentType = "text/html"); // 404
- THttpOutgoingResponsePtr CreateResponseServiceUnavailable(TStringBuf html = TStringBuf(), TStringBuf contentType = "text/html"); // 503
- THttpOutgoingResponsePtr CreateResponseGatewayTimeout(TStringBuf html = TStringBuf(), TStringBuf contentType = "text/html"); // 504
- THttpOutgoingResponsePtr CreateResponse(TStringBuf status, TStringBuf message);
- THttpOutgoingResponsePtr CreateResponse(TStringBuf status, TStringBuf message, const THeaders& headers);
- THttpOutgoingResponsePtr CreateResponse(TStringBuf status, TStringBuf message, const THeaders& headers, TStringBuf body);
- THttpOutgoingResponsePtr CreateResponse(
- TStringBuf status,
- TStringBuf message,
- TStringBuf contentType,
- TStringBuf body = TStringBuf(),
- TInstant lastModified = TInstant());
-
- THttpOutgoingResponsePtr CreateIncompleteResponse(TStringBuf status, TStringBuf message, const THeaders& headers = {});
- THttpOutgoingResponsePtr CreateIncompleteResponse(TStringBuf status, TStringBuf message, const THeaders& headers, TStringBuf body);
-
- THttpIncomingRequestPtr Duplicate();
-
-private:
- THttpOutgoingResponsePtr ConstructResponse(TStringBuf status, TStringBuf message);
- void FinishResponse(THttpOutgoingResponsePtr& response, TStringBuf body = TStringBuf());
-};
-
-class THttpIncomingResponse;
-using THttpIncomingResponsePtr = TIntrusivePtr<THttpIncomingResponse>;
-
-class THttpOutgoingRequest;
-using THttpOutgoingRequestPtr = TIntrusivePtr<THttpOutgoingRequest>;
-
-class THttpIncomingResponse :
- public THttpParser<THttpResponse, TSocketBuffer>,
- public TRefCounted<THttpIncomingResponse, TAtomicCounter> {
-public:
- THttpIncomingResponse(THttpOutgoingRequestPtr request);
-
- THttpOutgoingRequestPtr GetRequest() const {
- return Request;
- }
-
- THttpIncomingResponsePtr Duplicate(THttpOutgoingRequestPtr request);
- THttpOutgoingResponsePtr Reverse(THttpIncomingRequestPtr request);
-
-protected:
- THttpOutgoingRequestPtr Request;
-};
-
-class THttpOutgoingRequest :
- public THttpRenderer<THttpRequest, TSocketBuffer>,
- public TRefCounted<THttpOutgoingRequest, TAtomicCounter> {
-public:
- THPTimer Timer;
- bool Secure = false;
-
- THttpOutgoingRequest() = default;
- THttpOutgoingRequest(TStringBuf method, TStringBuf url, TStringBuf protocol, TStringBuf version);
- THttpOutgoingRequest(TStringBuf method, TStringBuf scheme, TStringBuf host, TStringBuf uri, TStringBuf protocol, TStringBuf version);
- static THttpOutgoingRequestPtr CreateRequestString(TStringBuf data);
- static THttpOutgoingRequestPtr CreateRequestString(const TString& data);
- static THttpOutgoingRequestPtr CreateRequestGet(TStringBuf url);
- static THttpOutgoingRequestPtr CreateRequestGet(TStringBuf host, TStringBuf uri); // http only
- static THttpOutgoingRequestPtr CreateRequestPost(TStringBuf url, TStringBuf contentType = {}, TStringBuf body = {});
- static THttpOutgoingRequestPtr CreateRequestPost(TStringBuf host, TStringBuf uri, TStringBuf contentType, TStringBuf body); // http only
- static THttpOutgoingRequestPtr CreateRequest(TStringBuf method, TStringBuf url, TStringBuf contentType = TStringBuf(), TStringBuf body = TStringBuf());
- static THttpOutgoingRequestPtr CreateHttpRequest(TStringBuf method, TStringBuf host, TStringBuf uri, TStringBuf contentType = TStringBuf(), TStringBuf body = TStringBuf());
- THttpOutgoingRequestPtr Duplicate();
-};
-
-class THttpOutgoingResponse :
- public THttpRenderer<THttpResponse, TSocketBuffer>,
- public TRefCounted<THttpOutgoingResponse, TAtomicCounter> {
-public:
- THttpOutgoingResponse(THttpIncomingRequestPtr request);
- THttpOutgoingResponse(THttpIncomingRequestPtr request, TStringBuf protocol, TStringBuf version, TStringBuf status, TStringBuf message);
-
- bool IsConnectionClose() const {
- if (!Connection.empty()) {
- return TEqNoCase()(Connection, "close");
- } else {
- return Request->IsConnectionClose();
- }
- }
-
- bool IsNeedBody() const {
- return GetRequest()->Method != "HEAD" && Status != "204";
- }
-
- bool EnableCompression() {
- TStringBuf acceptEncoding = Request->AcceptEncoding;
- std::vector<TStringBuf> encodings;
- TStringBuf encoding;
- while (acceptEncoding.NextTok(',', encoding)) {
- Trim(encoding, ' ');
- if (Count(ALLOWED_CONTENT_ENCODINGS, encoding) != 0) {
- encodings.push_back(encoding);
- }
- }
- if (!encodings.empty()) {
- // TODO: prioritize encodings
- SetContentEncoding(encodings.front());
- return true;
- }
- return false;
- }
-
- void SetBody(TStringBuf body) {
- if (ContentEncoding == "deflate") {
- TString compressedBody = CompressDeflate(body);
- THttpRenderer<THttpResponse, TSocketBuffer>::SetBody(compressedBody);
- Body = Content = body;
- } else {
- THttpRenderer<THttpResponse, TSocketBuffer>::SetBody(body);
- }
- }
-
- void SetBody(const TString& body) {
- if (ContentEncoding == "deflate") {
- TString compressedBody = CompressDeflate(body);
- THttpRenderer<THttpResponse, TSocketBuffer>::SetBody(compressedBody);
- Body = Content = body;
- } else {
- THttpRenderer<THttpResponse, TSocketBuffer>::SetBody(body);
- }
- }
-
- THttpIncomingRequestPtr GetRequest() const {
- return Request;
- }
-
- THttpOutgoingResponsePtr Duplicate(THttpIncomingRequestPtr request);
-
-// it's temporary accessible for cleanup
-//protected:
- THttpIncomingRequestPtr Request;
- std::unique_ptr<TSensors> Sensors;
-};
-
-}
diff --git a/library/cpp/actors/http/http_cache.cpp b/library/cpp/actors/http/http_cache.cpp
deleted file mode 100644
index d2856f70c2..0000000000
--- a/library/cpp/actors/http/http_cache.cpp
+++ /dev/null
@@ -1,608 +0,0 @@
-#include "http.h"
-#include "http_proxy.h"
-#include "http_cache.h"
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/http/http.h>
-#include <library/cpp/digest/md5/md5.h>
-#include <util/digest/multi.h>
-#include <util/generic/queue.h>
-#include <util/string/cast.h>
-
-namespace NHttp {
-
-static bool StatusSuccess(const TStringBuf& status) {
- return status.StartsWith("2");
-}
-
-class THttpOutgoingCacheActor : public NActors::TActorBootstrapped<THttpOutgoingCacheActor>, THttpConfig {
-public:
- using TBase = NActors::TActorBootstrapped<THttpOutgoingCacheActor>;
- NActors::TActorId HttpProxyId;
- TGetCachePolicy GetCachePolicy;
- static constexpr TDuration RefreshTimeout = TDuration::Seconds(1);
-
- struct TCacheKey {
- TString Host;
- TString URL;
- TString Headers;
-
- operator size_t() const {
- return MultiHash(Host, URL, Headers);
- }
-
- TString GetId() const {
- return MD5::Calc(Host + ':' + URL + ':' + Headers);
- }
- };
-
- struct TCacheRecord {
- TInstant RefreshTime;
- TInstant DeathTime;
- TCachePolicy CachePolicy;
- NHttp::THttpOutgoingRequestPtr Request;
- NHttp::THttpOutgoingRequestPtr OutgoingRequest;
- TDuration Timeout;
- NHttp::THttpIncomingResponsePtr Response;
- TString Error;
- TVector<NHttp::TEvHttpProxy::TEvHttpOutgoingRequest::TPtr> Waiters;
-
- TCacheRecord(const TCachePolicy cachePolicy)
- : CachePolicy(cachePolicy)
- {}
-
- bool IsValid() const {
- return Response != nullptr || !Error.empty();
- }
-
- void UpdateResponse(NHttp::THttpIncomingResponsePtr response, const TString& error, TInstant now) {
- if (error.empty() || Response == nullptr || !CachePolicy.KeepOnError) {
- Response = response;
- Error = error;
- }
- RefreshTime = now + CachePolicy.TimeToRefresh;
- if (CachePolicy.PaceToRefresh) {
- RefreshTime += TDuration::MilliSeconds(RandomNumber<ui64>() % CachePolicy.PaceToRefresh.MilliSeconds());
- }
- }
-
- TString GetName() const {
- return TStringBuilder() << (Request->Secure ? "https://" : "http://") << Request->Host << Request->URL;
- }
- };
-
- struct TRefreshRecord {
- TCacheKey Key;
- TInstant RefreshTime;
-
- bool operator <(const TRefreshRecord& b) const {
- return RefreshTime > b.RefreshTime;
- }
- };
-
- THashMap<TCacheKey, TCacheRecord> Cache;
- TPriorityQueue<TRefreshRecord> RefreshQueue;
- THashMap<THttpOutgoingRequest*, TCacheKey> OutgoingRequests;
-
- THttpOutgoingCacheActor(const NActors::TActorId& httpProxyId, TGetCachePolicy getCachePolicy)
- : HttpProxyId(httpProxyId)
- , GetCachePolicy(std::move(getCachePolicy))
- {}
-
- static constexpr char ActorName[] = "HTTP_OUT_CACHE_ACTOR";
-
- void Bootstrap(const NActors::TActorContext&) {
- //
- Become(&THttpOutgoingCacheActor::StateWork, RefreshTimeout, new NActors::TEvents::TEvWakeup());
- }
-
- static TString GetCacheHeadersKey(const NHttp::THttpOutgoingRequest* request, const TCachePolicy& policy) {
- TStringBuilder key;
- if (!policy.HeadersToCacheKey.empty()) {
- NHttp::THeaders headers(request->Headers);
- for (const TString& header : policy.HeadersToCacheKey) {
- key << headers[header];
- }
- }
- return key;
- }
-
- static TCacheKey GetCacheKey(const NHttp::THttpOutgoingRequest* request, const TCachePolicy& policy) {
- return { ToString(request->Host), ToString(request->URL), GetCacheHeadersKey(request, policy) };
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpOutgoingResponse::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvAddListeningPort::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvRegisterHandler::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingResponse::TPtr event, const NActors::TActorContext& ctx) {
- NHttp::THttpOutgoingRequestPtr request(event->Get()->Request);
- NHttp::THttpIncomingResponsePtr response(event->Get()->Response);
- auto itRequests = OutgoingRequests.find(request.Get());
- if (itRequests == OutgoingRequests.end()) {
- LOG_ERROR_S(ctx, HttpLog, "Cache received response to unknown request " << request->Host << request->URL);
- return;
- }
- auto key = itRequests->second;
- OutgoingRequests.erase(itRequests);
- auto it = Cache.find(key);
- if (it == Cache.end()) {
- LOG_ERROR_S(ctx, HttpLog, "Cache received response to unknown cache key " << request->Host << request->URL);
- return;
- }
- TCacheRecord& cacheRecord = it->second;
- cacheRecord.OutgoingRequest.Reset();
- for (auto& waiter : cacheRecord.Waiters) {
- NHttp::THttpIncomingResponsePtr response2;
- TString error2;
- if (response != nullptr) {
- response2 = response->Duplicate(waiter->Get()->Request);
- }
- if (!event->Get()->Error.empty()) {
- error2 = event->Get()->Error;
- }
- ctx.Send(waiter->Sender, new NHttp::TEvHttpProxy::TEvHttpIncomingResponse(waiter->Get()->Request, response2, error2));
- }
- cacheRecord.Waiters.clear();
- TString error;
- if (event->Get()->Error.empty()) {
- if (event->Get()->Response != nullptr && !StatusSuccess(event->Get()->Response->Status)) {
- error = event->Get()->Response->Message;
- }
- } else {
- error = event->Get()->Error;
- }
- if (!error.empty()) {
- LOG_WARN_S(ctx, HttpLog, "Error from " << cacheRecord.GetName() << ": " << error);
- }
- LOG_DEBUG_S(ctx, HttpLog, "OutgoingUpdate " << cacheRecord.GetName());
- cacheRecord.UpdateResponse(response, event->Get()->Error, ctx.Now());
- RefreshQueue.push({it->first, it->second.RefreshTime});
- LOG_DEBUG_S(ctx, HttpLog, "OutgoingSchedule " << cacheRecord.GetName() << " at " << cacheRecord.RefreshTime << " until " << cacheRecord.DeathTime);
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpOutgoingRequest::TPtr event, const NActors::TActorContext& ctx) {
- const NHttp::THttpOutgoingRequest* request = event->Get()->Request.Get();
- auto policy = GetCachePolicy(request);
- if (policy.TimeToExpire == TDuration()) {
- ctx.Send(event->Forward(HttpProxyId));
- return;
- }
- auto key = GetCacheKey(request, policy);
- auto it = Cache.find(key);
- if (it != Cache.end()) {
- if (it->second.IsValid()) {
- LOG_DEBUG_S(ctx, HttpLog, "OutgoingRespond "
- << it->second.GetName()
- << " ("
- << ((it->second.Response != nullptr) ? ToString(it->second.Response->Size()) : TString("error"))
- << ")");
- NHttp::THttpIncomingResponsePtr response = it->second.Response;
- if (response != nullptr) {
- response = response->Duplicate(event->Get()->Request);
- }
- ctx.Send(event->Sender,
- new NHttp::TEvHttpProxy::TEvHttpIncomingResponse(event->Get()->Request,
- response,
- it->second.Error));
- it->second.DeathTime = ctx.Now() + it->second.CachePolicy.TimeToExpire; // prolong active cache items
- return;
- }
- } else {
- it = Cache.emplace(key, policy).first;
- it->second.Request = event->Get()->Request;
- it->second.Timeout = event->Get()->Timeout;
- it->second.OutgoingRequest = it->second.Request->Duplicate();
- OutgoingRequests[it->second.OutgoingRequest.Get()] = key;
- LOG_DEBUG_S(ctx, HttpLog, "OutgoingInitiate " << it->second.GetName());
- ctx.Send(HttpProxyId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest(it->second.OutgoingRequest, it->second.Timeout));
- }
- it->second.DeathTime = ctx.Now() + it->second.CachePolicy.TimeToExpire;
- it->second.Waiters.emplace_back(std::move(event));
- }
-
- void HandleRefresh(const NActors::TActorContext& ctx) {
- while (!RefreshQueue.empty() && RefreshQueue.top().RefreshTime <= ctx.Now()) {
- TRefreshRecord rrec = RefreshQueue.top();
- RefreshQueue.pop();
- auto it = Cache.find(rrec.Key);
- if (it != Cache.end()) {
- if (it->second.DeathTime > ctx.Now()) {
- LOG_DEBUG_S(ctx, HttpLog, "OutgoingRefresh " << it->second.GetName());
- it->second.OutgoingRequest = it->second.Request->Duplicate();
- OutgoingRequests[it->second.OutgoingRequest.Get()] = it->first;
- ctx.Send(HttpProxyId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest(it->second.OutgoingRequest, it->second.Timeout));
- } else {
- LOG_DEBUG_S(ctx, HttpLog, "OutgoingForget " << it->second.GetName());
- if (it->second.OutgoingRequest) {
- OutgoingRequests.erase(it->second.OutgoingRequest.Get());
- }
- Cache.erase(it);
- }
- }
- }
- ctx.Schedule(RefreshTimeout, new NActors::TEvents::TEvWakeup());
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NHttp::TEvHttpProxy::TEvHttpIncomingResponse, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvHttpOutgoingRequest, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvAddListeningPort, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvRegisterHandler, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvHttpOutgoingResponse, Handle);
- CFunc(NActors::TEvents::TSystem::Wakeup, HandleRefresh);
- }
- }
-};
-
-const TDuration THttpOutgoingCacheActor::RefreshTimeout;
-
-class THttpIncomingCacheActor : public NActors::TActorBootstrapped<THttpIncomingCacheActor>, THttpConfig {
-public:
- using TBase = NActors::TActorBootstrapped<THttpIncomingCacheActor>;
- NActors::TActorId HttpProxyId;
- TGetCachePolicy GetCachePolicy;
- static constexpr TDuration RefreshTimeout = TDuration::Seconds(1);
- THashMap<TString, TActorId> Handlers;
-
- struct TCacheKey {
- TString Host;
- TString URL;
- TString Headers;
-
- operator size_t() const {
- return MultiHash(Host, URL, Headers);
- }
-
- TString GetId() const {
- return MD5::Calc(Host + ':' + URL + ':' + Headers);
- }
- };
-
- struct TCacheRecord {
- TInstant RefreshTime;
- TInstant DeathTime;
- TCachePolicy CachePolicy;
- TString CacheId;
- NHttp::THttpIncomingRequestPtr Request;
- TDuration Timeout;
- NHttp::THttpOutgoingResponsePtr Response;
- TVector<NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr> Waiters;
- ui32 Retries = 0;
- bool Enqueued = false;
-
- TCacheRecord(const TCachePolicy cachePolicy)
- : CachePolicy(cachePolicy)
- {}
-
- bool IsValid() const {
- return Response != nullptr;
- }
-
- void InitRequest(NHttp::THttpIncomingRequestPtr request) {
- Request = request;
- if (CachePolicy.TimeToExpire) {
- DeathTime = NActors::TlsActivationContext->Now() + CachePolicy.TimeToExpire;
- }
- }
-
- void UpdateResponse(NHttp::THttpOutgoingResponsePtr response, const TString& error, TInstant now) {
- if (error.empty() || !CachePolicy.KeepOnError) {
- Response = response;
- }
- Retries = 0;
- if (CachePolicy.TimeToRefresh) {
- RefreshTime = now + CachePolicy.TimeToRefresh;
- if (CachePolicy.PaceToRefresh) {
- RefreshTime += TDuration::MilliSeconds(RandomNumber<ui64>() % CachePolicy.PaceToRefresh.MilliSeconds());
- }
- }
- }
-
- void UpdateExpireTime() {
- if (CachePolicy.TimeToExpire) {
- DeathTime = NActors::TlsActivationContext->Now() + CachePolicy.TimeToExpire;
- }
- }
-
- TString GetName() const {
- return TStringBuilder() << (Request->Endpoint->Secure ? "https://" : "http://") << Request->Host << Request->URL
- << " (" << CacheId << ")";
- }
- };
-
- struct TRefreshRecord {
- TCacheKey Key;
- TInstant RefreshTime;
-
- bool operator <(const TRefreshRecord& b) const {
- return RefreshTime > b.RefreshTime;
- }
- };
-
- THashMap<TCacheKey, TCacheRecord> Cache;
- TPriorityQueue<TRefreshRecord> RefreshQueue;
- THashMap<THttpIncomingRequest*, TCacheKey> IncomingRequests;
-
- THttpIncomingCacheActor(const NActors::TActorId& httpProxyId, TGetCachePolicy getCachePolicy)
- : HttpProxyId(httpProxyId)
- , GetCachePolicy(std::move(getCachePolicy))
- {}
-
- static constexpr char ActorName[] = "HTTP_IN_CACHE_ACTOR";
-
- void Bootstrap(const NActors::TActorContext&) {
- //
- Become(&THttpIncomingCacheActor::StateWork, RefreshTimeout, new NActors::TEvents::TEvWakeup());
- }
-
- static TString GetCacheHeadersKey(const NHttp::THttpIncomingRequest* request, const TCachePolicy& policy) {
- TStringBuilder key;
- if (!policy.HeadersToCacheKey.empty()) {
- NHttp::THeaders headers(request->Headers);
- for (const TString& header : policy.HeadersToCacheKey) {
- key << headers[header];
- }
- }
- return key;
- }
-
- static TCacheKey GetCacheKey(const NHttp::THttpIncomingRequest* request, const TCachePolicy& policy) {
- return { ToString(request->Host), ToString(request->URL), GetCacheHeadersKey(request, policy) };
- }
-
- TActorId GetRequestHandler(NHttp::THttpIncomingRequestPtr request) {
- TStringBuf url = request->URL.Before('?');
- THashMap<TString, TActorId>::iterator it;
- while (!url.empty()) {
- it = Handlers.find(url);
- if (it != Handlers.end()) {
- return it->second;
- } else {
- if (url.EndsWith('/')) {
- url.Trunc(url.size() - 1);
- }
- size_t pos = url.rfind('/');
- if (pos == TStringBuf::npos) {
- break;
- } else {
- url = url.substr(0, pos + 1);
- }
- }
- }
- return {};
- }
-
- void SendCacheRequest(const TCacheKey& cacheKey, TCacheRecord& cacheRecord, const NActors::TActorContext& ctx) {
- cacheRecord.Request = cacheRecord.Request->Duplicate();
- cacheRecord.Request->AcceptEncoding.Clear(); // disable compression
- IncomingRequests[cacheRecord.Request.Get()] = cacheKey;
- TActorId handler = GetRequestHandler(cacheRecord.Request);
- if (handler) {
- Send(handler, new NHttp::TEvHttpProxy::TEvHttpIncomingRequest(cacheRecord.Request));
- } else {
- LOG_ERROR_S(ctx, HttpLog, "Can't find cache handler for " << cacheRecord.GetName());
- }
- }
-
- void DropCacheRecord(THashMap<TCacheKey, TCacheRecord>::iterator it) {
- if (it->second.Request) {
- IncomingRequests.erase(it->second.Request.Get());
- }
- for (auto& waiter : it->second.Waiters) {
- NHttp::THttpOutgoingResponsePtr response;
- response = waiter->Get()->Request->CreateResponseGatewayTimeout("Timeout", "text/plain");
- Send(waiter->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
- }
- Cache.erase(it);
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingResponse::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpOutgoingRequest::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvAddListeningPort::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(HttpProxyId));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvRegisterHandler::TPtr event, const NActors::TActorContext& ctx) {
- Handlers[event->Get()->Path] = event->Get()->Handler;
- ctx.Send(HttpProxyId, new NHttp::TEvHttpProxy::TEvRegisterHandler(event->Get()->Path, ctx.SelfID));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpOutgoingResponse::TPtr event, const NActors::TActorContext& ctx) {
- NHttp::THttpIncomingRequestPtr request(event->Get()->Response->GetRequest());
- NHttp::THttpOutgoingResponsePtr response(event->Get()->Response);
- auto itRequests = IncomingRequests.find(request.Get());
- if (itRequests == IncomingRequests.end()) {
- LOG_ERROR_S(ctx, HttpLog, "Cache received response to unknown request " << request->Host << request->URL);
- return;
- }
-
- TCacheKey key = itRequests->second;
- auto it = Cache.find(key);
- if (it == Cache.end()) {
- LOG_ERROR_S(ctx, HttpLog, "Cache received response to unknown cache key " << request->Host << request->URL);
- return;
- }
-
- IncomingRequests.erase(itRequests);
- TCacheRecord& cacheRecord = it->second;
- TStringBuf status;
- TString error;
-
- if (event->Get()->Response != nullptr) {
- status = event->Get()->Response->Status;
- if (!StatusSuccess(status)) {
- error = event->Get()->Response->Message;
- }
- }
- if (cacheRecord.CachePolicy.RetriesCount > 0) {
- auto itStatusToRetry = std::find(cacheRecord.CachePolicy.StatusesToRetry.begin(), cacheRecord.CachePolicy.StatusesToRetry.end(), status);
- if (itStatusToRetry != cacheRecord.CachePolicy.StatusesToRetry.end()) {
- if (cacheRecord.Retries < cacheRecord.CachePolicy.RetriesCount) {
- ++cacheRecord.Retries;
- LOG_WARN_S(ctx, HttpLog, "IncomingRetry " << cacheRecord.GetName() << ": " << status << " " << error);
- SendCacheRequest(key, cacheRecord, ctx);
- return;
- }
- }
- }
- for (auto& waiter : cacheRecord.Waiters) {
- NHttp::THttpOutgoingResponsePtr response2;
- response2 = response->Duplicate(waiter->Get()->Request);
- ctx.Send(waiter->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response2));
- }
- cacheRecord.Waiters.clear();
- if (!error.empty()) {
- LOG_WARN_S(ctx, HttpLog, "Error from " << cacheRecord.GetName() << ": " << error);
- if (!cacheRecord.Response) {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingDiscard " << cacheRecord.GetName());
- DropCacheRecord(it);
- return;
- }
- }
- if (cacheRecord.CachePolicy.TimeToRefresh) {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingUpdate " << cacheRecord.GetName());
- cacheRecord.UpdateResponse(response, error, ctx.Now());
- if (!cacheRecord.Enqueued) {
- RefreshQueue.push({it->first, it->second.RefreshTime});
- cacheRecord.Enqueued = true;
- }
- LOG_DEBUG_S(ctx, HttpLog, "IncomingSchedule " << cacheRecord.GetName() << " at " << cacheRecord.RefreshTime << " until " << cacheRecord.DeathTime);
- } else {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingDrop " << cacheRecord.GetName());
- DropCacheRecord(it);
- }
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr event, const NActors::TActorContext& ctx) {
- const NHttp::THttpIncomingRequest* request = event->Get()->Request.Get();
- TCachePolicy policy = GetCachePolicy(request);
- if (policy.TimeToExpire == TDuration() && policy.RetriesCount == 0) {
- TActorId handler = GetRequestHandler(event->Get()->Request);
- if (handler) {
- ctx.Send(event->Forward(handler));
- }
- return;
- }
- auto key = GetCacheKey(request, policy);
- auto it = Cache.find(key);
- if (it != Cache.end() && !policy.DiscardCache) {
- it->second.UpdateExpireTime();
- if (it->second.IsValid()) {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingRespond "
- << it->second.GetName()
- << " ("
- << ((it->second.Response != nullptr) ? ToString(it->second.Response->Size()) : TString("error"))
- << ")");
- NHttp::THttpOutgoingResponsePtr response = it->second.Response;
- if (response != nullptr) {
- response = response->Duplicate(event->Get()->Request);
- }
- ctx.Send(event->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
- return;
- }
- } else {
- it = Cache.emplace(key, policy).first;
- it->second.CacheId = key.GetId(); // for debugging
- it->second.InitRequest(event->Get()->Request);
- if (policy.DiscardCache) {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingDiscardCache " << it->second.GetName());
- }
- LOG_DEBUG_S(ctx, HttpLog, "IncomingInitiate " << it->second.GetName());
- SendCacheRequest(key, it->second, ctx);
- }
- it->second.Waiters.emplace_back(std::move(event));
- }
-
- void HandleRefresh(const NActors::TActorContext& ctx) {
- while (!RefreshQueue.empty() && RefreshQueue.top().RefreshTime <= ctx.Now()) {
- TRefreshRecord rrec = RefreshQueue.top();
- RefreshQueue.pop();
- auto it = Cache.find(rrec.Key);
- if (it != Cache.end()) {
- it->second.Enqueued = false;
- if (it->second.DeathTime > ctx.Now()) {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingRefresh " << it->second.GetName());
- SendCacheRequest(it->first, it->second, ctx);
- } else {
- LOG_DEBUG_S(ctx, HttpLog, "IncomingForget " << it->second.GetName());
- DropCacheRecord(it);
- }
- }
- }
- ctx.Schedule(RefreshTimeout, new NActors::TEvents::TEvWakeup());
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NHttp::TEvHttpProxy::TEvHttpIncomingResponse, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvHttpOutgoingRequest, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvAddListeningPort, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvRegisterHandler, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
- HFunc(NHttp::TEvHttpProxy::TEvHttpOutgoingResponse, Handle);
- CFunc(NActors::TEvents::TSystem::Wakeup, HandleRefresh);
- }
- }
-};
-
-TCachePolicy GetDefaultCachePolicy(const THttpRequest* request, const TCachePolicy& defaultPolicy) {
- TCachePolicy policy = defaultPolicy;
- THeaders headers(request->Headers);
- TStringBuf cacheControl(headers["Cache-Control"]);
- while (TStringBuf cacheItem = cacheControl.NextTok(',')) {
- Trim(cacheItem, ' ');
- if (cacheItem == "no-store" || cacheItem == "no-cache") {
- policy.DiscardCache = true;
- }
- TStringBuf itemName = cacheItem.NextTok('=');
- TrimEnd(itemName, ' ');
- TrimBegin(cacheItem, ' ');
- if (itemName == "max-age") {
- policy.TimeToRefresh = policy.TimeToExpire = TDuration::Seconds(FromString(cacheItem));
- }
- if (itemName == "min-fresh") {
- policy.TimeToRefresh = policy.TimeToExpire = TDuration::Seconds(FromString(cacheItem));
- }
- if (itemName == "stale-if-error") {
- policy.KeepOnError = true;
- }
- }
- return policy;
-}
-
-NActors::IActor* CreateHttpCache(const NActors::TActorId& httpProxyId, TGetCachePolicy cachePolicy) {
- return new THttpOutgoingCacheActor(httpProxyId, std::move(cachePolicy));
-}
-
-NActors::IActor* CreateOutgoingHttpCache(const NActors::TActorId& httpProxyId, TGetCachePolicy cachePolicy) {
- return new THttpOutgoingCacheActor(httpProxyId, std::move(cachePolicy));
-}
-
-NActors::IActor* CreateIncomingHttpCache(const NActors::TActorId& httpProxyId, TGetCachePolicy cachePolicy) {
- return new THttpIncomingCacheActor(httpProxyId, std::move(cachePolicy));
-}
-
-}
diff --git a/library/cpp/actors/http/http_cache.h b/library/cpp/actors/http/http_cache.h
deleted file mode 100644
index ac38bdcac8..0000000000
--- a/library/cpp/actors/http/http_cache.h
+++ /dev/null
@@ -1,27 +0,0 @@
-#pragma once
-#include <library/cpp/actors/core/actor.h>
-#include "http.h"
-
-namespace NHttp {
-
-struct TCachePolicy {
- TDuration TimeToExpire;
- TDuration TimeToRefresh;
- TDuration PaceToRefresh;
- bool KeepOnError = false;
- bool DiscardCache = false;
- TArrayRef<TString> HeadersToCacheKey;
- TArrayRef<TString> StatusesToRetry;
- ui32 RetriesCount = 0;
-
- TCachePolicy() = default;
-};
-
-using TGetCachePolicy = std::function<TCachePolicy(const THttpRequest*)>;
-
-NActors::IActor* CreateHttpCache(const NActors::TActorId& httpProxyId, TGetCachePolicy cachePolicy);
-NActors::IActor* CreateOutgoingHttpCache(const NActors::TActorId& httpProxyId, TGetCachePolicy cachePolicy);
-NActors::IActor* CreateIncomingHttpCache(const NActors::TActorId& httpProxyId, TGetCachePolicy cachePolicy);
-TCachePolicy GetDefaultCachePolicy(const THttpRequest* request, const TCachePolicy& policy = TCachePolicy());
-
-}
diff --git a/library/cpp/actors/http/http_compress.cpp b/library/cpp/actors/http/http_compress.cpp
deleted file mode 100644
index b6593fe99d..0000000000
--- a/library/cpp/actors/http/http_compress.cpp
+++ /dev/null
@@ -1,76 +0,0 @@
-#include "http.h"
-
-#include <zlib.h>
-
-namespace NHttp {
-
-TString CompressDeflate(TStringBuf source) {
- int compressionlevel = Z_BEST_COMPRESSION;
- z_stream zs = {};
-
- if (deflateInit(&zs, compressionlevel) != Z_OK) {
- throw yexception() << "deflateInit failed while compressing";
- }
-
- zs.next_in = (Bytef*)source.data();
- zs.avail_in = source.size();
-
- int ret;
- char outbuffer[32768];
- TString result;
-
- // retrieve the compressed bytes blockwise
- do {
- zs.next_out = reinterpret_cast<Bytef*>(outbuffer);
- zs.avail_out = sizeof(outbuffer);
-
- ret = deflate(&zs, Z_FINISH);
-
- if (result.size() < zs.total_out) {
- result.append(outbuffer, zs.total_out - result.size());
- }
- } while (ret == Z_OK);
-
- deflateEnd(&zs);
-
- if (ret != Z_STREAM_END) {
- throw yexception() << "Exception during zlib compression: (" << ret << ") " << zs.msg;
- }
- return result;
-}
-
-TString DecompressDeflate(TStringBuf source) {
- z_stream zs = {};
-
- if (inflateInit(&zs) != Z_OK) {
- throw yexception() << "inflateInit failed while decompressing";
- }
-
- zs.next_in = (Bytef*)source.data();
- zs.avail_in = source.size();
-
- int ret;
- char outbuffer[32768];
- TString result;
-
- // retrieve the decompressed bytes blockwise
- do {
- zs.next_out = reinterpret_cast<Bytef*>(outbuffer);
- zs.avail_out = sizeof(outbuffer);
-
- ret = inflate(&zs, Z_NO_FLUSH);
-
- if (result.size() < zs.total_out) {
- result.append(outbuffer, zs.total_out - result.size());
- }
- } while (ret == Z_OK);
-
- inflateEnd(&zs);
-
- if (ret != Z_STREAM_END) {
- throw yexception() << "Exception during zlib decompression: (" << ret << ") " << zs.msg;
- }
- return result;
-}
-
-}
diff --git a/library/cpp/actors/http/http_config.h b/library/cpp/actors/http/http_config.h
deleted file mode 100644
index 1a2f8646a3..0000000000
--- a/library/cpp/actors/http/http_config.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-#include <util/network/sock.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-
-class TInet64StreamSocket;
-
-namespace NHttp {
-
-struct THttpConfig {
- static constexpr NActors::NLog::EComponent HttpLog = NActorsServices::EServiceCommon::HTTP;
- static constexpr size_t BUFFER_SIZE = 64 * 1024;
- static constexpr size_t BUFFER_MIN_STEP = 10 * 1024;
- static constexpr int LISTEN_QUEUE = 10;
- static constexpr TDuration SOCKET_TIMEOUT = TDuration::MilliSeconds(60000);
- static constexpr TDuration CONNECTION_TIMEOUT = TDuration::MilliSeconds(60000);
- using SocketType = TInet64StreamSocket;
- using SocketAddressType = std::shared_ptr<ISockAddr>;
-};
-
-}
diff --git a/library/cpp/actors/http/http_proxy.cpp b/library/cpp/actors/http/http_proxy.cpp
deleted file mode 100644
index 74bf497632..0000000000
--- a/library/cpp/actors/http/http_proxy.cpp
+++ /dev/null
@@ -1,364 +0,0 @@
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/monlib/metrics/metric_registry.h>
-#include "http_proxy.h"
-
-namespace NHttp {
-
-class THttpProxy : public NActors::TActorBootstrapped<THttpProxy>, public THttpConfig {
-public:
- IActor* AddListeningPort(TEvHttpProxy::TEvAddListeningPort::TPtr event, const NActors::TActorContext& ctx) {
- IActor* listeningSocket = CreateHttpAcceptorActor(ctx.SelfID, Poller);
- TActorId acceptorId = ctx.Register(listeningSocket);
- ctx.Send(event->Forward(acceptorId));
- Acceptors.emplace_back(acceptorId);
- return listeningSocket;
- }
-
- IActor* AddOutgoingConnection(bool secure, const NActors::TActorContext& ctx) {
- IActor* connectionSocket = CreateOutgoingConnectionActor(ctx.SelfID, secure, Poller);
- TActorId connectionId = ctx.Register(connectionSocket);
- Connections.emplace(connectionId);
- return connectionSocket;
- }
-
- void Bootstrap(const NActors::TActorContext& ctx) {
- Poller = ctx.Register(NActors::CreatePollerActor());
- Become(&THttpProxy::StateWork);
- }
-
- THttpProxy(std::weak_ptr<NMonitoring::TMetricRegistry> registry)
- : Registry(std::move(registry))
- {}
-
- static constexpr char ActorName[] = "HTTP_PROXY_ACTOR";
-
-protected:
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvHttpProxy::TEvAddListeningPort, Handle);
- HFunc(TEvHttpProxy::TEvRegisterHandler, Handle);
- HFunc(TEvHttpProxy::TEvHttpIncomingRequest, Handle);
- HFunc(TEvHttpProxy::TEvHttpOutgoingRequest, Handle);
- HFunc(TEvHttpProxy::TEvHttpIncomingResponse, Handle);
- HFunc(TEvHttpProxy::TEvHttpOutgoingResponse, Handle);
- HFunc(TEvHttpProxy::TEvHttpAcceptorClosed, Handle);
- HFunc(TEvHttpProxy::TEvHttpConnectionClosed, Handle);
- HFunc(TEvHttpProxy::TEvResolveHostRequest, Handle);
- HFunc(TEvHttpProxy::TEvReportSensors, Handle);
- HFunc(NActors::TEvents::TEvPoison, Handle);
- }
- }
-
- void PassAway() override {
- Send(Poller, new NActors::TEvents::TEvPoisonPill());
- for (const NActors::TActorId& connection : Connections) {
- Send(connection, new NActors::TEvents::TEvPoisonPill());
- }
- for (const NActors::TActorId& acceptor : Acceptors) {
- Send(acceptor, new NActors::TEvents::TEvPoisonPill());
- }
- NActors::TActorBootstrapped<THttpProxy>::PassAway();
- }
-
- void Handle(TEvHttpProxy::TEvHttpIncomingRequest::TPtr event, const NActors::TActorContext& ctx) {
- TStringBuf url = event->Get()->Request->URL.Before('?');
- THashMap<TString, TActorId>::iterator it;
- while (!url.empty()) {
- it = Handlers.find(url);
- if (it != Handlers.end()) {
- ctx.Send(event->Forward(it->second));
- return;
- } else {
- if (url.EndsWith('/')) {
- url.Chop(1);
- } else {
- size_t pos = url.rfind('/');
- if (pos == TStringBuf::npos) {
- break;
- } else {
- url = url.substr(0, pos + 1);
- }
- }
- }
- }
- ctx.Send(event->Sender, new TEvHttpProxy::TEvHttpOutgoingResponse(event->Get()->Request->CreateResponseNotFound()));
- }
-
- void Handle(TEvHttpProxy::TEvHttpIncomingResponse::TPtr event, const NActors::TActorContext& ctx) {
- Y_UNUSED(event);
- Y_UNUSED(ctx);
- Y_ABORT("This event shouldn't be there, it should go to the http connection owner directly");
- }
-
- void Handle(TEvHttpProxy::TEvHttpOutgoingResponse::TPtr event, const NActors::TActorContext& ctx) {
- Y_UNUSED(event);
- Y_UNUSED(ctx);
- Y_ABORT("This event shouldn't be there, it should go to the http connection directly");
- }
-
- void Handle(TEvHttpProxy::TEvHttpOutgoingRequest::TPtr event, const NActors::TActorContext& ctx) {
- bool secure(event->Get()->Request->Secure);
- NActors::IActor* actor = AddOutgoingConnection(secure, ctx);
- ctx.Send(event->Forward(actor->SelfId()));
- }
-
- void Handle(TEvHttpProxy::TEvAddListeningPort::TPtr event, const NActors::TActorContext& ctx) {
- AddListeningPort(event, ctx);
- }
-
- void Handle(TEvHttpProxy::TEvHttpAcceptorClosed::TPtr event, const NActors::TActorContext&) {
- for (auto it = Acceptors.begin(); it != Acceptors.end(); ++it) {
- if (*it == event->Get()->ConnectionID) {
- Acceptors.erase(it);
- break;
- }
- }
- }
-
- void Handle(TEvHttpProxy::TEvHttpConnectionClosed::TPtr event, const NActors::TActorContext&) {
- Connections.erase(event->Get()->ConnectionID);
- }
-
- void Handle(TEvHttpProxy::TEvRegisterHandler::TPtr event, const NActors::TActorContext& ctx) {
- LOG_TRACE_S(ctx, HttpLog, "Register handler " << event->Get()->Path << " to " << event->Get()->Handler);
- Handlers[event->Get()->Path] = event->Get()->Handler;
- }
-
- void Handle(TEvHttpProxy::TEvResolveHostRequest::TPtr event, const NActors::TActorContext& ctx) {
- const TString& host(event->Get()->Host);
- auto it = Hosts.find(host);
- if (it == Hosts.end() || it->second.DeadlineTime > ctx.Now()) {
- TString addressPart;
- TIpPort portPart = 0;
- CrackAddress(host, addressPart, portPart);
- if (IsIPv6(addressPart)) {
- if (it == Hosts.end()) {
- it = Hosts.emplace(host, THostEntry()).first;
- }
- it->second.Address = std::make_shared<TSockAddrInet6>(addressPart.data(), portPart);
- it->second.DeadlineTime = ctx.Now() + HostsTimeToLive;
- } else if (IsIPv4(addressPart)) {
- if (it == Hosts.end()) {
- it = Hosts.emplace(host, THostEntry()).first;
- }
- it->second.Address = std::make_shared<TSockAddrInet>(addressPart.data(), portPart);
- it->second.DeadlineTime = ctx.Now() + HostsTimeToLive;
- } else {
- // TODO(xenoxeno): move to another, possible blocking actor
- try {
- const NDns::TResolvedHost* result = NDns::CachedResolve(NDns::TResolveInfo(addressPart, portPart));
- if (result != nullptr) {
- auto pAddr = result->Addr.Begin();
- while (pAddr != result->Addr.End() && pAddr->ai_family != AF_INET && pAddr->ai_family != AF_INET6) {
- ++pAddr;
- }
- if (pAddr == result->Addr.End()) {
- ctx.Send(event->Sender, new TEvHttpProxy::TEvResolveHostResponse("Invalid address family resolved"));
- return;
- }
- THttpConfig::SocketAddressType address;
- switch (pAddr->ai_family) {
- case AF_INET:
- address = std::make_shared<TSockAddrInet>();
- break;
- case AF_INET6:
- address = std::make_shared<TSockAddrInet6>();
- break;
- }
- if (address) {
- memcpy(address->SockAddr(), pAddr->ai_addr, pAddr->ai_addrlen);
- LOG_DEBUG_S(ctx, HttpLog, "Host " << host << " resolved to " << address->ToString());
- if (it == Hosts.end()) {
- it = Hosts.emplace(host, THostEntry()).first;
- }
- it->second.Address = address;
- it->second.DeadlineTime = ctx.Now() + HostsTimeToLive;
- }
- } else {
- ctx.Send(event->Sender, new TEvHttpProxy::TEvResolveHostResponse("Error resolving host"));
- return;
- }
- }
- catch (const yexception& e) {
- ctx.Send(event->Sender, new TEvHttpProxy::TEvResolveHostResponse(e.what()));
- return;
- }
- }
- }
- ctx.Send(event->Sender, new TEvHttpProxy::TEvResolveHostResponse(it->first, it->second.Address));
- }
-
- void Handle(TEvHttpProxy::TEvReportSensors::TPtr event, const NActors::TActorContext&) {
- const TEvHttpProxy::TEvReportSensors& sensors(*event->Get());
- const static TString urlNotFound = "not-found";
- const TString& url = (sensors.Status == "404" ? urlNotFound : sensors.Url);
-
- std::shared_ptr<NMonitoring::TMetricRegistry> registry = Registry.lock();
- if (registry) {
- registry->Rate(
- {
- {"sensor", "count"},
- {"direction", sensors.Direction},
- {"peer", sensors.Host},
- {"url", url},
- {"status", sensors.Status}
- })->Inc();
- registry->HistogramRate(
- {
- {"sensor", "time_us"},
- {"direction", sensors.Direction},
- {"peer", sensors.Host},
- {"url", url},
- {"status", sensors.Status}
- },
- NMonitoring::ExplicitHistogram({1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 30000, 60000}))->Record(sensors.Time.MicroSeconds());
- registry->HistogramRate(
- {
- {"sensor", "time_ms"},
- {"direction", sensors.Direction},
- {"peer", sensors.Host},
- {"url", url},
- {"status", sensors.Status}
- },
- NMonitoring::ExplicitHistogram({1, 5, 10, 50, 100, 500, 1000, 5000, 10000, 30000, 60000}))->Record(sensors.Time.MilliSeconds());
- }
- }
-
- void Handle(NActors::TEvents::TEvPoison::TPtr, const NActors::TActorContext&) {
- for (const TActorId& acceptor : Acceptors) {
- Send(acceptor, new NActors::TEvents::TEvPoisonPill());
- }
- for (const TActorId& connection : Connections) {
- Send(connection, new NActors::TEvents::TEvPoisonPill());
- }
- PassAway();
- }
-
- NActors::TActorId Poller;
- TVector<TActorId> Acceptors;
-
- struct THostEntry {
- THttpConfig::SocketAddressType Address;
- TInstant DeadlineTime;
- };
-
- static constexpr TDuration HostsTimeToLive = TDuration::Seconds(60);
-
- THashMap<TString, THostEntry> Hosts;
- THashMap<TString, TActorId> Handlers;
- THashSet<TActorId> Connections; // outgoing
- std::weak_ptr<NMonitoring::TMetricRegistry> Registry;
-};
-
-TEvHttpProxy::TEvReportSensors* BuildOutgoingRequestSensors(const THttpOutgoingRequestPtr& request, const THttpIncomingResponsePtr& response) {
- return new TEvHttpProxy::TEvReportSensors(
- "out",
- request->Host,
- request->URL.Before('?'),
- response ? response->Status : "504",
- TDuration::Seconds(std::abs(request->Timer.Passed()))
- );
-}
-
-TEvHttpProxy::TEvReportSensors* BuildIncomingRequestSensors(const THttpIncomingRequestPtr& request, const THttpOutgoingResponsePtr& response) {
- const auto& sensors = response->Sensors;
- if (sensors) {
- return new TEvHttpProxy::TEvReportSensors(*sensors);
- }
- return new TEvHttpProxy::TEvReportSensors(
- "in",
- request->Host,
- request->URL.Before('?'),
- response->Status,
- TDuration::Seconds(std::abs(request->Timer.Passed()))
- );
-}
-
-NActors::IActor* CreateHttpProxy(std::weak_ptr<NMonitoring::TMetricRegistry> registry) {
- return new THttpProxy(std::move(registry));
-}
-
-bool IsIPv6(const TString& host) {
- if (host.find_first_not_of(":0123456789abcdef") != TString::npos) {
- return false;
- }
- if (std::count(host.begin(), host.end(), ':') < 2) {
- return false;
- }
- return true;
-}
-
-bool IsIPv4(const TString& host) {
- if (host.find_first_not_of(".0123456789") != TString::npos) {
- return false;
- }
- if (std::count(host.begin(), host.end(), '.') != 3) {
- return false;
- }
- return true;
-}
-
-bool CrackURL(TStringBuf url, TStringBuf& scheme, TStringBuf& host, TStringBuf& uri) {
- url.TrySplit("://", scheme, url);
- auto pos = url.find('/');
- if (pos == TStringBuf::npos) {
- host = url;
- } else {
- host = url.substr(0, pos);
- uri = url.substr(pos);
- }
- return true;
-}
-
-void CrackAddress(const TString& address, TString& hostname, TIpPort& port) {
- size_t first_colon_pos = address.find(':');
- if (first_colon_pos != TString::npos) {
- size_t last_colon_pos = address.rfind(':');
- if (last_colon_pos == first_colon_pos) {
- // only one colon, simple case
- port = FromStringWithDefault<TIpPort>(address.substr(first_colon_pos + 1), 0);
- hostname = address.substr(0, first_colon_pos);
- } else {
- // ipv6?
- size_t closing_bracket_pos = address.rfind(']');
- if (closing_bracket_pos == TString::npos || closing_bracket_pos > last_colon_pos) {
- // whole address is ipv6 host
- hostname = address;
- } else {
- port = FromStringWithDefault<TIpPort>(address.substr(last_colon_pos + 1), 0);
- hostname = address.substr(0, last_colon_pos);
- }
- if (hostname.StartsWith('[') && hostname.EndsWith(']')) {
- hostname = hostname.substr(1, hostname.size() - 2);
- }
- }
- } else {
- hostname = address;
- }
-}
-
-
-void TrimBegin(TStringBuf& target, char delim) {
- while (!target.empty() && *target.begin() == delim) {
- target.Skip(1);
- }
-}
-
-void TrimEnd(TStringBuf& target, char delim) {
- while (!target.empty() && target.back() == delim) {
- target.Trunc(target.size() - 1);
- }
-}
-
-void Trim(TStringBuf& target, char delim) {
- TrimBegin(target, delim);
- TrimEnd(target, delim);
-}
-
-void TrimEnd(TString& target, char delim) {
- while (!target.empty() && target.back() == delim) {
- target.resize(target.size() - 1);
- }
-}
-
-}
diff --git a/library/cpp/actors/http/http_proxy.h b/library/cpp/actors/http/http_proxy.h
deleted file mode 100644
index d9a2c6a71c..0000000000
--- a/library/cpp/actors/http/http_proxy.h
+++ /dev/null
@@ -1,246 +0,0 @@
-#pragma once
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/event_local.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/interconnect/poller_actor.h>
-#include <library/cpp/dns/cache.h>
-#include <library/cpp/monlib/metrics/metric_registry.h>
-#include <util/generic/variant.h>
-#include "http.h"
-#include "http_proxy_sock64.h"
-#include "http_proxy_ssl.h"
-
-namespace NHttp {
-
-struct TSocketDescriptor : NActors::TSharedDescriptor, THttpConfig {
- SocketType Socket;
-
- TSocketDescriptor() = default;
-
- TSocketDescriptor(int af)
- : Socket(af)
- {
- }
-
- TSocketDescriptor(SocketType&& s)
- : Socket(std::move(s))
- {}
-
- int GetDescriptor() override {
- return static_cast<SOCKET>(Socket);
- }
-};
-
-struct TEvHttpProxy {
- enum EEv {
- EvAddListeningPort = EventSpaceBegin(NActors::TEvents::ES_HTTP),
- EvConfirmListen,
- EvRegisterHandler,
- EvHttpIncomingRequest,
- EvHttpOutgoingRequest,
- EvHttpIncomingResponse,
- EvHttpOutgoingResponse,
- EvHttpConnectionOpened,
- EvHttpConnectionClosed,
- EvHttpAcceptorClosed,
- EvResolveHostRequest,
- EvResolveHostResponse,
- EvReportSensors,
- EvEnd
- };
-
- static_assert(EvEnd < EventSpaceEnd(NActors::TEvents::ES_HTTP), "ES_HTTP event space is too small.");
-
- struct TEvAddListeningPort : NActors::TEventLocal<TEvAddListeningPort, EvAddListeningPort> {
- TString Address;
- TIpPort Port;
- TString WorkerName;
- bool Secure = false;
- TString CertificateFile;
- TString PrivateKeyFile;
- TString SslCertificatePem;
- std::vector<TString> CompressContentTypes;
-
- TEvAddListeningPort() = default;
-
- TEvAddListeningPort(TIpPort port)
- : Port(port)
- {}
-
- TEvAddListeningPort(TIpPort port, const TString& workerName)
- : Port(port)
- , WorkerName(workerName)
- {}
- };
-
- struct TEvConfirmListen : NActors::TEventLocal<TEvConfirmListen, EvConfirmListen> {
- THttpConfig::SocketAddressType Address;
- std::shared_ptr<THttpEndpointInfo> Endpoint;
-
- TEvConfirmListen(const THttpConfig::SocketAddressType& address, std::shared_ptr<THttpEndpointInfo> endpoint)
- : Address(address)
- , Endpoint(std::move(endpoint))
- {}
- };
-
- struct TEvRegisterHandler : NActors::TEventLocal<TEvRegisterHandler, EvRegisterHandler> {
- TString Path;
- TActorId Handler;
-
- TEvRegisterHandler(const TString& path, const TActorId& handler)
- : Path(path)
- , Handler(handler)
- {}
- };
-
- struct TEvHttpIncomingRequest : NActors::TEventLocal<TEvHttpIncomingRequest, EvHttpIncomingRequest> {
- THttpIncomingRequestPtr Request;
-
- TEvHttpIncomingRequest(THttpIncomingRequestPtr request)
- : Request(std::move(request))
- {}
- };
-
- struct TEvHttpOutgoingRequest : NActors::TEventLocal<TEvHttpOutgoingRequest, EvHttpOutgoingRequest> {
- THttpOutgoingRequestPtr Request;
- TDuration Timeout;
-
- TEvHttpOutgoingRequest(THttpOutgoingRequestPtr request)
- : Request(std::move(request))
- {}
-
- TEvHttpOutgoingRequest(THttpOutgoingRequestPtr request, TDuration timeout)
- : Request(std::move(request))
- , Timeout(timeout)
- {}
- };
-
- struct TEvHttpIncomingResponse : NActors::TEventLocal<TEvHttpIncomingResponse, EvHttpIncomingResponse> {
- THttpOutgoingRequestPtr Request;
- THttpIncomingResponsePtr Response;
- TString Error;
-
- TEvHttpIncomingResponse(THttpOutgoingRequestPtr request, THttpIncomingResponsePtr response, const TString& error)
- : Request(std::move(request))
- , Response(std::move(response))
- , Error(error)
- {}
-
- TEvHttpIncomingResponse(THttpOutgoingRequestPtr request, THttpIncomingResponsePtr response)
- : Request(std::move(request))
- , Response(std::move(response))
- {}
-
- TString GetError() const {
- TStringBuilder error;
- if (Response != nullptr && !Response->Status.StartsWith('2')) {
- error << Response->Status << ' ' << Response->Message;
- }
- if (!Error.empty()) {
- if (!error.empty()) {
- error << ';';
- }
- error << Error;
- }
- return error;
- }
- };
-
- struct TEvHttpOutgoingResponse : NActors::TEventLocal<TEvHttpOutgoingResponse, EvHttpOutgoingResponse> {
- THttpOutgoingResponsePtr Response;
-
- TEvHttpOutgoingResponse(THttpOutgoingResponsePtr response)
- : Response(std::move(response))
- {}
- };
-
- struct TEvHttpConnectionOpened : NActors::TEventLocal<TEvHttpConnectionOpened, EvHttpConnectionOpened> {
- TString PeerAddress;
- TActorId ConnectionID;
-
- TEvHttpConnectionOpened(const TString& peerAddress, const TActorId& connectionID)
- : PeerAddress(peerAddress)
- , ConnectionID(connectionID)
- {}
- };
-
- struct TEvHttpConnectionClosed : NActors::TEventLocal<TEvHttpConnectionClosed, EvHttpConnectionClosed> {
- TActorId ConnectionID;
- TDeque<THttpIncomingRequestPtr> RecycledRequests;
-
- TEvHttpConnectionClosed(const TActorId& connectionID)
- : ConnectionID(connectionID)
- {}
-
- TEvHttpConnectionClosed(const TActorId& connectionID, TDeque<THttpIncomingRequestPtr> recycledRequests)
- : ConnectionID(connectionID)
- , RecycledRequests(std::move(recycledRequests))
- {}
- };
-
- struct TEvHttpAcceptorClosed : NActors::TEventLocal<TEvHttpAcceptorClosed, EvHttpAcceptorClosed> {
- TActorId ConnectionID;
-
- TEvHttpAcceptorClosed(const TActorId& connectionID)
- : ConnectionID(connectionID)
- {}
- };
-
- struct TEvResolveHostRequest : NActors::TEventLocal<TEvResolveHostRequest, EvResolveHostRequest> {
- TString Host;
-
- TEvResolveHostRequest(const TString& host)
- : Host(host)
- {}
- };
-
- struct TEvResolveHostResponse : NActors::TEventLocal<TEvResolveHostResponse, EvResolveHostResponse> {
- TString Host;
- THttpConfig::SocketAddressType Address;
- TString Error;
-
- TEvResolveHostResponse(const TString& host, THttpConfig::SocketAddressType address)
- : Host(host)
- , Address(address)
- {}
-
- TEvResolveHostResponse(const TString& error)
- : Error(error)
- {}
- };
-
- struct TEvReportSensors : TSensors, NActors::TEventLocal<TEvReportSensors, EvReportSensors> {
- using TSensors::TSensors;
-
- TEvReportSensors(const TSensors& sensors)
- : TSensors(sensors)
- {}
- };
-};
-
-struct TPrivateEndpointInfo : THttpEndpointInfo {
- TActorId Proxy;
- TActorId Owner;
- TSslHelpers::TSslHolder<SSL_CTX> SecureContext;
-
- TPrivateEndpointInfo(const std::vector<TString>& compressContentTypes)
- : THttpEndpointInfo(compressContentTypes)
- {}
-};
-
-NActors::IActor* CreateHttpProxy(std::weak_ptr<NMonitoring::TMetricRegistry> registry = NMonitoring::TMetricRegistry::SharedInstance());
-NActors::IActor* CreateHttpAcceptorActor(const TActorId& owner, const TActorId& poller);
-NActors::IActor* CreateOutgoingConnectionActor(const TActorId& owner, bool secure, const TActorId& poller);
-NActors::IActor* CreateIncomingConnectionActor(
- std::shared_ptr<TPrivateEndpointInfo> endpoint,
- TIntrusivePtr<TSocketDescriptor> socket,
- THttpConfig::SocketAddressType address,
- THttpIncomingRequestPtr recycledRequest = nullptr);
-TEvHttpProxy::TEvReportSensors* BuildOutgoingRequestSensors(const THttpOutgoingRequestPtr& request, const THttpIncomingResponsePtr& response);
-TEvHttpProxy::TEvReportSensors* BuildIncomingRequestSensors(const THttpIncomingRequestPtr& request, const THttpOutgoingResponsePtr& response);
-
-}
diff --git a/library/cpp/actors/http/http_proxy_acceptor.cpp b/library/cpp/actors/http/http_proxy_acceptor.cpp
deleted file mode 100644
index c007f747eb..0000000000
--- a/library/cpp/actors/http/http_proxy_acceptor.cpp
+++ /dev/null
@@ -1,158 +0,0 @@
-#include <util/network/sock.h>
-#include "http_proxy.h"
-#include "http_proxy_ssl.h"
-
-namespace NHttp {
-
-class TAcceptorActor : public NActors::TActor<TAcceptorActor>, public THttpConfig {
-public:
- using TBase = NActors::TActor<TAcceptorActor>;
- const TActorId Owner;
- const TActorId Poller;
- TIntrusivePtr<TSocketDescriptor> Socket;
- NActors::TPollerToken::TPtr PollerToken;
- THashSet<TActorId> Connections;
- TDeque<THttpIncomingRequestPtr> RecycledRequests;
- std::shared_ptr<TPrivateEndpointInfo> Endpoint;
-
- TAcceptorActor(const TActorId& owner, const TActorId& poller)
- : NActors::TActor<TAcceptorActor>(&TAcceptorActor::StateInit)
- , Owner(owner)
- , Poller(poller)
- {
- }
-
- static constexpr char ActorName[] = "HTTP_ACCEPTOR_ACTOR";
-
-protected:
- STFUNC(StateListening) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NActors::TEvPollerRegisterResult, Handle);
- HFunc(NActors::TEvPollerReady, Handle);
- HFunc(TEvHttpProxy::TEvHttpConnectionClosed, Handle);
- HFunc(TEvHttpProxy::TEvReportSensors, Handle);
- }
- }
-
- STFUNC(StateInit) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvHttpProxy::TEvAddListeningPort, HandleInit);
- }
- }
-
- void HandleInit(TEvHttpProxy::TEvAddListeningPort::TPtr event, const NActors::TActorContext& ctx) {
- TString address = event->Get()->Address;
- ui16 port = event->Get()->Port;
- Socket = new TSocketDescriptor(SocketType::GuessAddressFamily(address));
- // for unit tests :(
- SetSockOpt(Socket->Socket, SOL_SOCKET, SO_REUSEADDR, (int)true);
-#ifdef SO_REUSEPORT
- SetSockOpt(Socket->Socket, SOL_SOCKET, SO_REUSEPORT, (int)true);
-#endif
- SocketAddressType bindAddress(Socket->Socket.MakeAddress(address, port));
- Endpoint = std::make_shared<TPrivateEndpointInfo>(event->Get()->CompressContentTypes);
- Endpoint->Owner = ctx.SelfID;
- Endpoint->Proxy = Owner;
- Endpoint->WorkerName = event->Get()->WorkerName;
- Endpoint->Secure = event->Get()->Secure;
- int err = 0;
- if (Endpoint->Secure) {
- if (!event->Get()->SslCertificatePem.empty()) {
- Endpoint->SecureContext = TSslHelpers::CreateServerContext(event->Get()->SslCertificatePem);
- } else {
- Endpoint->SecureContext = TSslHelpers::CreateServerContext(event->Get()->CertificateFile, event->Get()->PrivateKeyFile);
- }
- if (Endpoint->SecureContext == nullptr) {
- err = -1;
- LOG_WARN_S(ctx, HttpLog, "Failed to construct server security context");
- }
- }
- if (err == 0) {
- err = Socket->Socket.Bind(bindAddress.get());
- if (err != 0) {
- LOG_WARN_S(
- ctx,
- HttpLog,
- "Failed to bind " << bindAddress->ToString()
- << ", code: " << err);
- }
- }
- TStringBuf schema = Endpoint->Secure ? "https://" : "http://";
- if (err == 0) {
- err = Socket->Socket.Listen(LISTEN_QUEUE);
- if (err == 0) {
- LOG_INFO_S(ctx, HttpLog, "Listening on " << schema << bindAddress->ToString());
- SetNonBlock(Socket->Socket);
- ctx.Send(Poller, new NActors::TEvPollerRegister(Socket, SelfId(), SelfId()));
- TBase::Become(&TAcceptorActor::StateListening);
- ctx.Send(event->Sender, new TEvHttpProxy::TEvConfirmListen(bindAddress, Endpoint), 0, event->Cookie);
- return;
- } else {
- LOG_WARN_S(
- ctx,
- HttpLog,
- "Failed to listen on " << schema << bindAddress->ToString()
- << ", code: " << err);
- }
- }
- LOG_WARN_S(ctx, HttpLog, "Failed to init - retrying...");
- ctx.ExecutorThread.Schedule(TDuration::Seconds(1), event.Release());
- }
-
- void Die(const NActors::TActorContext& ctx) override {
- ctx.Send(Owner, new TEvHttpProxy::TEvHttpAcceptorClosed(ctx.SelfID));
- for (const NActors::TActorId& connection : Connections) {
- ctx.Send(connection, new NActors::TEvents::TEvPoisonPill());
- }
- }
-
- void Handle(NActors::TEvPollerRegisterResult::TPtr ev, const NActors::TActorContext& /*ctx*/) {
- PollerToken = std::move(ev->Get()->PollerToken);
- PollerToken->Request(true, false); // request read polling
- }
-
- void Handle(NActors::TEvPollerReady::TPtr, const NActors::TActorContext& ctx) {
- for (;;) {
- SocketAddressType addr;
- std::optional<SocketType> s = Socket->Socket.Accept(addr);
- if (!s) {
- if (errno == EAGAIN || errno == EWOULDBLOCK) {
- Y_ABORT_UNLESS(PollerToken);
- if (PollerToken->RequestReadNotificationAfterWouldBlock()) {
- continue; // we can try it again
- }
- }
- break;
- }
- TIntrusivePtr<TSocketDescriptor> socket = new TSocketDescriptor(std::move(s).value());
- NActors::IActor* connectionSocket = nullptr;
- if (RecycledRequests.empty()) {
- connectionSocket = CreateIncomingConnectionActor(Endpoint, socket, addr);
- } else {
- connectionSocket = CreateIncomingConnectionActor(Endpoint, socket, addr, std::move(RecycledRequests.front()));
- RecycledRequests.pop_front();
- }
- NActors::TActorId connectionId = ctx.Register(connectionSocket);
- ctx.Send(Poller, new NActors::TEvPollerRegister(socket, connectionId, connectionId));
- Connections.emplace(connectionId);
- }
- }
-
- void Handle(TEvHttpProxy::TEvHttpConnectionClosed::TPtr event, const NActors::TActorContext&) {
- Connections.erase(event->Get()->ConnectionID);
- for (auto& req : event->Get()->RecycledRequests) {
- req->Clear();
- RecycledRequests.push_back(std::move(req));
- }
- }
-
- void Handle(TEvHttpProxy::TEvReportSensors::TPtr event, const NActors::TActorContext& ctx) {
- ctx.Send(event->Forward(Owner));
- }
-};
-
-NActors::IActor* CreateHttpAcceptorActor(const TActorId& owner, const TActorId& poller) {
- return new TAcceptorActor(owner, poller);
-}
-
-}
diff --git a/library/cpp/actors/http/http_proxy_incoming.cpp b/library/cpp/actors/http/http_proxy_incoming.cpp
deleted file mode 100644
index b98b3c09f3..0000000000
--- a/library/cpp/actors/http/http_proxy_incoming.cpp
+++ /dev/null
@@ -1,310 +0,0 @@
-#include "http_proxy.h"
-#include "http_proxy_sock_impl.h"
-
-namespace NHttp {
-
-using namespace NActors;
-
-template <typename TSocketImpl>
-class TIncomingConnectionActor : public TActor<TIncomingConnectionActor<TSocketImpl>>, public TSocketImpl, virtual public THttpConfig {
-public:
- using TBase = TActor<TIncomingConnectionActor<TSocketImpl>>;
- static constexpr bool RecycleRequests = true;
-
- std::shared_ptr<TPrivateEndpointInfo> Endpoint;
- SocketAddressType Address;
- TList<THttpIncomingRequestPtr> Requests;
- THashMap<THttpIncomingRequestPtr, THttpOutgoingResponsePtr> Responses;
- THttpIncomingRequestPtr CurrentRequest;
- THttpOutgoingResponsePtr CurrentResponse;
- TDeque<THttpIncomingRequestPtr> RecycledRequests;
-
- THPTimer InactivityTimer;
- static constexpr TDuration InactivityTimeout = TDuration::Minutes(2);
- TEvPollerReady* InactivityEvent = nullptr;
-
- TPollerToken::TPtr PollerToken;
-
- TIncomingConnectionActor(
- std::shared_ptr<TPrivateEndpointInfo> endpoint,
- TIntrusivePtr<TSocketDescriptor> socket,
- SocketAddressType address,
- THttpIncomingRequestPtr recycledRequest = nullptr)
- : TBase(&TIncomingConnectionActor::StateAccepting)
- , TSocketImpl(std::move(socket))
- , Endpoint(std::move(endpoint))
- , Address(address)
- {
- if (recycledRequest != nullptr) {
- RecycledRequests.emplace_back(std::move(recycledRequest));
- }
- TSocketImpl::SetNonBlock();
- }
-
- static constexpr char ActorName[] = "IN_CONNECTION_ACTOR";
-
- void CleanupRequest(THttpIncomingRequestPtr& request) {
- if (RecycleRequests) {
- request->Clear();
- RecycledRequests.push_back(std::move(request));
- } else {
- request = nullptr;
- }
- }
-
- void CleanupResponse(THttpOutgoingResponsePtr& response) {
- CleanupRequest(response->Request);
- // TODO: maybe recycle too?
- response = nullptr;
- }
-
- TAutoPtr<IEventHandle> AfterRegister(const TActorId& self, const TActorId& parent) override {
- return new IEventHandle(self, parent, new TEvents::TEvBootstrap());
- }
-
- void Die(const TActorContext& ctx) override {
- ctx.Send(Endpoint->Owner, new TEvHttpProxy::TEvHttpConnectionClosed(ctx.SelfID, std::move(RecycledRequests)));
- TSocketImpl::Shutdown();
- TBase::Die(ctx);
- }
-
-protected:
- void Bootstrap(const TActorContext& ctx) {
- InactivityTimer.Reset();
- ctx.Schedule(InactivityTimeout, InactivityEvent = new TEvPollerReady(nullptr, false, false));
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") incoming connection opened");
- OnAccept(ctx);
- }
-
- void OnAccept(const NActors::TActorContext& ctx) {
- int res;
- bool read = false, write = false;
- for (;;) {
- if ((res = TSocketImpl::OnAccept(Endpoint, read, write)) != 1) {
- if (-res == EAGAIN) {
- if (PollerToken && PollerToken->RequestReadNotificationAfterWouldBlock()) {
- continue;
- }
- return; // wait for further notifications
- } else {
- LOG_ERROR_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed - error in Accept: " << strerror(-res));
- return Die(ctx);
- }
- }
- break;
- }
- TBase::Become(&TIncomingConnectionActor::StateConnected);
- ctx.Send(ctx.SelfID, new TEvPollerReady(nullptr, true, true));
- }
-
- void HandleAccepting(TEvPollerRegisterResult::TPtr ev, const NActors::TActorContext& ctx) {
- PollerToken = std::move(ev->Get()->PollerToken);
- OnAccept(ctx);
- }
-
- void HandleAccepting(NActors::TEvPollerReady::TPtr, const NActors::TActorContext& ctx) {
- OnAccept(ctx);
- }
-
- void HandleConnected(TEvPollerReady::TPtr event, const TActorContext& ctx) {
- if (event->Get()->Read) {
- for (;;) {
- if (CurrentRequest == nullptr) {
- if (RecycleRequests && !RecycledRequests.empty()) {
- CurrentRequest = std::move(RecycledRequests.front());
- RecycledRequests.pop_front();
- CurrentRequest->Address = Address;
- CurrentRequest->Endpoint = Endpoint;
- } else {
- CurrentRequest = new THttpIncomingRequest(Endpoint, Address);
- }
- }
- if (!CurrentRequest->EnsureEnoughSpaceAvailable()) {
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed - not enough space available");
- return Die(ctx);
- }
- ssize_t need = CurrentRequest->Avail();
- bool read = false, write = false;
- ssize_t res = TSocketImpl::Recv(CurrentRequest->Pos(), need, read, write);
- if (res > 0) {
- InactivityTimer.Reset();
- CurrentRequest->Advance(res);
- if (CurrentRequest->IsDone()) {
- Requests.emplace_back(CurrentRequest);
- CurrentRequest->Timer.Reset();
- if (CurrentRequest->IsReady()) {
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") -> (" << CurrentRequest->Method << " " << CurrentRequest->URL << ")");
- ctx.Send(Endpoint->Proxy, new TEvHttpProxy::TEvHttpIncomingRequest(CurrentRequest));
- CurrentRequest = nullptr;
- } else if (CurrentRequest->IsError()) {
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") -! (" << CurrentRequest->Method << " " << CurrentRequest->URL << ")");
- bool success = Respond(CurrentRequest->CreateResponseBadRequest(), ctx);
- if (!success) {
- return;
- }
- CurrentRequest = nullptr;
- }
- }
- } else if (-res == EAGAIN || -res == EWOULDBLOCK) {
- if (PollerToken) {
- if (!read && !write) {
- read = true;
- }
- if (PollerToken->RequestNotificationAfterWouldBlock(read, write)) {
- continue;
- }
- }
- break;
- } else if (-res == EINTR) {
- continue;
- } else if (!res) {
- // connection closed
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed");
- return Die(ctx);
- } else {
- LOG_ERROR_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed - error in Receive: " << strerror(-res));
- return Die(ctx);
- }
- }
- }
- if (event->Get() == InactivityEvent) {
- const TDuration passed = TDuration::Seconds(std::abs(InactivityTimer.Passed()));
- if (passed >= InactivityTimeout) {
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed by inactivity timeout");
- return Die(ctx); // timeout
- } else {
- ctx.Schedule(InactivityTimeout - passed, InactivityEvent = new TEvPollerReady(nullptr, false, false));
- }
- }
- if (event->Get()->Write) {
- FlushOutput(ctx);
- }
- }
-
- void HandleConnected(TEvPollerRegisterResult::TPtr ev, const TActorContext& /*ctx*/) {
- PollerToken = std::move(ev->Get()->PollerToken);
- PollerToken->Request(true, true);
- }
-
- void HandleConnected(TEvHttpProxy::TEvHttpOutgoingResponse::TPtr event, const TActorContext& ctx) {
- Respond(event->Get()->Response, ctx);
- }
-
- bool Respond(THttpOutgoingResponsePtr response, const TActorContext& ctx) {
- THttpIncomingRequestPtr request = response->GetRequest();
- response->Finish();
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") <- (" << response->Status << " " << response->Message << ")");
- if (!response->Status.StartsWith('2') && response->Status != "404") {
- static constexpr size_t MAX_LOGGED_SIZE = 1024;
- LOG_DEBUG_S(ctx, HttpLog,
- "(#"
- << TSocketImpl::GetRawSocket()
- << ","
- << Address
- << ") Request: "
- << request->GetObfuscatedData().substr(0, MAX_LOGGED_SIZE));
- LOG_DEBUG_S(ctx, HttpLog,
- "(#"
- << TSocketImpl::GetRawSocket()
- << ","
- << Address
- << ") Response: "
- << TString(response->GetRawData()).substr(0, MAX_LOGGED_SIZE));
- }
- THolder<TEvHttpProxy::TEvReportSensors> sensors(BuildIncomingRequestSensors(request, response));
- ctx.Send(Endpoint->Owner, sensors.Release());
- if (request == Requests.front() && CurrentResponse == nullptr) {
- CurrentResponse = response;
- return FlushOutput(ctx);
- } else {
- // we are ahead of our pipeline
- Responses.emplace(request, response);
- return true;
- }
- }
-
- bool FlushOutput(const TActorContext& ctx) {
- while (CurrentResponse != nullptr) {
- size_t size = CurrentResponse->Size();
- if (size == 0) {
- Y_ABORT_UNLESS(Requests.front() == CurrentResponse->GetRequest());
- bool close = CurrentResponse->IsConnectionClose();
- Requests.pop_front();
- CleanupResponse(CurrentResponse);
- if (!Requests.empty()) {
- auto it = Responses.find(Requests.front());
- if (it != Responses.end()) {
- CurrentResponse = it->second;
- Responses.erase(it);
- continue;
- } else {
- LOG_ERROR_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed - FlushOutput request not found");
- Die(ctx);
- return false;
- }
- } else {
- if (close) {
- LOG_DEBUG_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed");
- Die(ctx);
- return false;
- } else {
- continue;
- }
- }
- }
- bool read = false, write = false;
- ssize_t res = TSocketImpl::Send(CurrentResponse->Data(), size, read, write);
- if (res > 0) {
- CurrentResponse->ChopHead(res);
- } else if (-res == EINTR) {
- continue;
- } else if (-res == EAGAIN || -res == EWOULDBLOCK) {
- if (PollerToken) {
- if (!read && !write) {
- write = true;
- }
- if (PollerToken->RequestNotificationAfterWouldBlock(read, write)) {
- continue;
- }
- }
- break;
- } else {
- CleanupResponse(CurrentResponse);
- LOG_ERROR_S(ctx, HttpLog, "(#" << TSocketImpl::GetRawSocket() << "," << Address << ") connection closed - error in FlushOutput: " << strerror(-res));
- Die(ctx);
- return false;
- }
- }
- return true;
- }
-
- STFUNC(StateAccepting) {
- switch (ev->GetTypeRewrite()) {
- CFunc(TEvents::TEvBootstrap::EventType, Bootstrap);
- HFunc(TEvPollerReady, HandleAccepting);
- HFunc(TEvPollerRegisterResult, HandleAccepting);
- }
- }
-
- STFUNC(StateConnected) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvPollerReady, HandleConnected);
- HFunc(TEvHttpProxy::TEvHttpOutgoingResponse, HandleConnected);
- HFunc(TEvPollerRegisterResult, HandleConnected);
- }
- }
-};
-
-IActor* CreateIncomingConnectionActor(
- std::shared_ptr<TPrivateEndpointInfo> endpoint,
- TIntrusivePtr<TSocketDescriptor> socket,
- THttpConfig::SocketAddressType address,
- THttpIncomingRequestPtr recycledRequest) {
- if (endpoint->Secure) {
- return new TIncomingConnectionActor<TSecureSocketImpl>(std::move(endpoint), std::move(socket), address, std::move(recycledRequest));
- } else {
- return new TIncomingConnectionActor<TPlainSocketImpl>(std::move(endpoint), std::move(socket), address, std::move(recycledRequest));
- }
-}
-
-}
diff --git a/library/cpp/actors/http/http_proxy_outgoing.cpp b/library/cpp/actors/http/http_proxy_outgoing.cpp
deleted file mode 100644
index b1f27c2c5a..0000000000
--- a/library/cpp/actors/http/http_proxy_outgoing.cpp
+++ /dev/null
@@ -1,335 +0,0 @@
-#include "http_proxy.h"
-#include "http_proxy_sock_impl.h"
-
-namespace NHttp {
-
-template <typename TSocketImpl>
-class TOutgoingConnectionActor : public NActors::TActor<TOutgoingConnectionActor<TSocketImpl>>, public TSocketImpl, virtual public THttpConfig {
-public:
- using TBase = NActors::TActor<TOutgoingConnectionActor<TSocketImpl>>;
- using TSelf = TOutgoingConnectionActor<TSocketImpl>;
- const TActorId Owner;
- const TActorId Poller;
- SocketAddressType Address;
- TActorId RequestOwner;
- THttpOutgoingRequestPtr Request;
- THttpIncomingResponsePtr Response;
- TInstant LastActivity;
- TDuration ConnectionTimeout = CONNECTION_TIMEOUT;
- NActors::TPollerToken::TPtr PollerToken;
-
- TOutgoingConnectionActor(const TActorId& owner, const TActorId& poller)
- : TBase(&TSelf::StateWaiting)
- , Owner(owner)
- , Poller(poller)
- {
- }
-
- static constexpr char ActorName[] = "OUT_CONNECTION_ACTOR";
-
- void Die(const NActors::TActorContext& ctx) override {
- ctx.Send(Owner, new TEvHttpProxy::TEvHttpConnectionClosed(ctx.SelfID));
- TSocketImpl::Shutdown(); // to avoid errors when connection already closed
- TBase::Die(ctx);
- }
-
- TString GetSocketName() {
- TStringBuilder builder;
- if (TSocketImpl::Socket) {
- builder << "(#" << TSocketImpl::GetRawSocket();
- if (Address && Address->SockAddr()->sa_family) {
- builder << "," << Address;
- }
- builder << ") ";
- }
- return builder;
- }
-
- void ReplyAndDie(const NActors::TActorContext& ctx) {
- LOG_DEBUG_S(ctx, HttpLog, GetSocketName() << "-> (" << Response->Status << " " << Response->Message << ")");
- ctx.Send(RequestOwner, new TEvHttpProxy::TEvHttpIncomingResponse(Request, Response));
- RequestOwner = TActorId();
- THolder<TEvHttpProxy::TEvReportSensors> sensors(BuildOutgoingRequestSensors(Request, Response));
- ctx.Send(Owner, sensors.Release());
- LOG_DEBUG_S(ctx, HttpLog, GetSocketName() << "connection closed");
- Die(ctx);
- }
-
- void ReplyErrorAndDie(const NActors::TActorContext& ctx, const TString& error) {
- LOG_ERROR_S(ctx, HttpLog, GetSocketName() << "connection closed with error: " << error);
- if (RequestOwner) {
- ctx.Send(RequestOwner, new TEvHttpProxy::TEvHttpIncomingResponse(Request, Response, error));
- RequestOwner = TActorId();
- THolder<TEvHttpProxy::TEvReportSensors> sensors(BuildOutgoingRequestSensors(Request, Response));
- ctx.Send(Owner, sensors.Release());
- Die(ctx);
- }
- }
-
-protected:
- void FailConnection(const NActors::TActorContext& ctx, const TString& error) {
- if (Request) {
- return ReplyErrorAndDie(ctx, error);
- }
- return TBase::Become(&TOutgoingConnectionActor::StateFailed);
- }
-
- void Connect(const NActors::TActorContext& ctx) {
- LOG_DEBUG_S(ctx, HttpLog, GetSocketName() << "connecting");
- TSocketImpl::Create(Address->SockAddr()->sa_family);
- TSocketImpl::SetNonBlock();
- TSocketImpl::SetTimeout(ConnectionTimeout);
- int res = TSocketImpl::Connect(Address);
- RegisterPoller(ctx);
- switch (-res) {
- case 0:
- return OnConnect(ctx);
- case EINPROGRESS:
- case EAGAIN:
- return TBase::Become(&TOutgoingConnectionActor::StateConnecting);
- default:
- return ReplyErrorAndDie(ctx, strerror(-res));
- }
- }
-
- void FlushOutput(const NActors::TActorContext& ctx) {
- if (Request != nullptr) {
- Request->Finish();
- while (auto size = Request->Size()) {
- bool read = false, write = false;
- ssize_t res = TSocketImpl::Send(Request->Data(), size, read, write);
- if (res > 0) {
- Request->ChopHead(res);
- } else if (-res == EINTR) {
- continue;
- } else if (-res == EAGAIN || -res == EWOULDBLOCK) {
- if (PollerToken) {
- if (!read && !write) {
- write = true;
- }
- if (PollerToken->RequestNotificationAfterWouldBlock(read, write)) {
- continue;
- }
- }
- break;
- } else {
- if (!res) {
- ReplyAndDie(ctx);
- } else {
- ReplyErrorAndDie(ctx, strerror(-res));
- }
- break;
- }
- }
- }
- }
-
- void PullInput(const NActors::TActorContext& ctx) {
- for (;;) {
- if (Response == nullptr) {
- Response = new THttpIncomingResponse(Request);
- }
- if (!Response->EnsureEnoughSpaceAvailable()) {
- return ReplyErrorAndDie(ctx, "Not enough space in socket buffer");
- }
- bool read = false, write = false;
- ssize_t res = TSocketImpl::Recv(Response->Pos(), Response->Avail(), read, write);
- if (res > 0) {
- Response->Advance(res);
- if (Response->IsDone() && Response->IsReady()) {
- return ReplyAndDie(ctx);
- }
- } else if (-res == EINTR) {
- continue;
- } else if (-res == EAGAIN || -res == EWOULDBLOCK) {
- if (PollerToken) {
- if (!read && !write) {
- read = true;
- }
- if (PollerToken->RequestNotificationAfterWouldBlock(read, write)) {
- continue;
- }
- }
- return;
- } else {
- if (!res) {
- Response->ConnectionClosed();
- }
- if (Response->IsDone() && Response->IsReady()) {
- return ReplyAndDie(ctx);
- }
- return ReplyErrorAndDie(ctx, strerror(-res));
- }
- }
- }
-
- void RegisterPoller(const NActors::TActorContext& ctx) {
- ctx.Send(Poller, new NActors::TEvPollerRegister(TSocketImpl::Socket, ctx.SelfID, ctx.SelfID));
- }
-
- void OnConnect(const NActors::TActorContext& ctx) {
- bool read = false, write = false;
- if (int res = TSocketImpl::OnConnect(read, write); res != 1) {
- if (-res == EAGAIN) {
- if (PollerToken) {
- PollerToken->Request(read, write);
- }
- return;
- } else {
- return ReplyErrorAndDie(ctx, strerror(-res));
- }
- }
- LOG_DEBUG_S(ctx, HttpLog, GetSocketName() << "outgoing connection opened");
- TBase::Become(&TOutgoingConnectionActor::StateConnected);
- LOG_DEBUG_S(ctx, HttpLog, GetSocketName() << "<- (" << Request->Method << " " << Request->URL << ")");
- ctx.Send(ctx.SelfID, new NActors::TEvPollerReady(nullptr, true, true));
- }
-
- static int GetPort(SocketAddressType address) {
- switch (address->SockAddr()->sa_family) {
- case AF_INET:
- return ntohs(reinterpret_cast<sockaddr_in*>(address->SockAddr())->sin_port);
- case AF_INET6:
- return ntohs(reinterpret_cast<sockaddr_in6*>(address->SockAddr())->sin6_port);
- }
- return {};
- }
-
- static void SetPort(SocketAddressType address, int port) {
- switch (address->SockAddr()->sa_family) {
- case AF_INET:
- reinterpret_cast<sockaddr_in*>(address->SockAddr())->sin_port = htons(port);
- break;
- case AF_INET6:
- reinterpret_cast<sockaddr_in6*>(address->SockAddr())->sin6_port = htons(port);
- break;
- }
- }
-
- void HandleResolving(TEvHttpProxy::TEvResolveHostResponse::TPtr event, const NActors::TActorContext& ctx) {
- LastActivity = ctx.Now();
- if (!event->Get()->Error.empty()) {
- return FailConnection(ctx, event->Get()->Error);
- }
- Address = event->Get()->Address;
- if (GetPort(Address) == 0) {
- SetPort(Address, Request->Secure ? 443 : 80);
- }
- Connect(ctx);
- }
-
- void HandleConnecting(NActors::TEvPollerReady::TPtr, const NActors::TActorContext& ctx) {
- LastActivity = ctx.Now();
- int res = TSocketImpl::GetError();
- if (res == 0) {
- OnConnect(ctx);
- } else {
- FailConnection(ctx, TStringBuilder() << strerror(res));
- }
- }
-
- void HandleConnecting(NActors::TEvPollerRegisterResult::TPtr ev, const NActors::TActorContext& ctx) {
- PollerToken = std::move(ev->Get()->PollerToken);
- LastActivity = ctx.Now();
- int res = TSocketImpl::GetError();
- if (res == 0) {
- OnConnect(ctx);
- } else {
- FailConnection(ctx, TStringBuilder() << strerror(res));
- }
- }
-
- void HandleWaiting(TEvHttpProxy::TEvHttpOutgoingRequest::TPtr event, const NActors::TActorContext& ctx) {
- LastActivity = ctx.Now();
- Request = std::move(event->Get()->Request);
- TSocketImpl::SetHost(TString(Request->Host));
- LOG_DEBUG_S(ctx, HttpLog, GetSocketName() << "resolving " << TSocketImpl::Host);
- Request->Timer.Reset();
- RequestOwner = event->Sender;
- ctx.Send(Owner, new TEvHttpProxy::TEvResolveHostRequest(TSocketImpl::Host));
- if (event->Get()->Timeout) {
- ConnectionTimeout = event->Get()->Timeout;
- }
- ctx.Schedule(ConnectionTimeout, new NActors::TEvents::TEvWakeup());
- LastActivity = ctx.Now();
- TBase::Become(&TOutgoingConnectionActor::StateResolving);
- }
-
- void HandleConnected(NActors::TEvPollerReady::TPtr event, const NActors::TActorContext& ctx) {
- LastActivity = ctx.Now();
- if (event->Get()->Write && RequestOwner) {
- FlushOutput(ctx);
- }
- if (event->Get()->Read && RequestOwner) {
- PullInput(ctx);
- }
- }
-
- void HandleConnected(NActors::TEvPollerRegisterResult::TPtr ev, const NActors::TActorContext& ctx) {
- PollerToken = std::move(ev->Get()->PollerToken);
- LastActivity = ctx.Now();
- PullInput(ctx);
- FlushOutput(ctx);
- }
-
- void HandleFailed(TEvHttpProxy::TEvHttpOutgoingRequest::TPtr event, const NActors::TActorContext& ctx) {
- Request = std::move(event->Get()->Request);
- RequestOwner = event->Sender;
- ReplyErrorAndDie(ctx, "Failed");
- }
-
- void HandleTimeout(const NActors::TActorContext& ctx) {
- TDuration inactivityTime = ctx.Now() - LastActivity;
- if (inactivityTime >= ConnectionTimeout) {
- FailConnection(ctx, "Connection timed out");
- } else {
- ctx.Schedule(Min(ConnectionTimeout - inactivityTime, TDuration::MilliSeconds(100)), new NActors::TEvents::TEvWakeup());
- }
- }
-
- STFUNC(StateWaiting) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvHttpProxy::TEvHttpOutgoingRequest, HandleWaiting);
- CFunc(NActors::TEvents::TEvWakeup::EventType, HandleTimeout);
- }
- }
-
- STFUNC(StateResolving) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvHttpProxy::TEvResolveHostResponse, HandleResolving);
- CFunc(NActors::TEvents::TEvWakeup::EventType, HandleTimeout);
- }
- }
-
- STFUNC(StateConnecting) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NActors::TEvPollerReady, HandleConnecting);
- CFunc(NActors::TEvents::TEvWakeup::EventType, HandleTimeout);
- HFunc(NActors::TEvPollerRegisterResult, HandleConnecting);
- }
- }
-
- STFUNC(StateConnected) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NActors::TEvPollerReady, HandleConnected);
- CFunc(NActors::TEvents::TEvWakeup::EventType, HandleTimeout);
- HFunc(NActors::TEvPollerRegisterResult, HandleConnected);
- }
- }
-
- STFUNC(StateFailed) {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvHttpProxy::TEvHttpOutgoingRequest, HandleFailed);
- }
- }
-};
-
-NActors::IActor* CreateOutgoingConnectionActor(const TActorId& owner, bool secure, const TActorId& poller) {
- if (secure) {
- return new TOutgoingConnectionActor<TSecureSocketImpl>(owner, poller);
- } else {
- return new TOutgoingConnectionActor<TPlainSocketImpl>(owner, poller);
- }
-}
-
-}
diff --git a/library/cpp/actors/http/http_proxy_sock64.h b/library/cpp/actors/http/http_proxy_sock64.h
deleted file mode 100644
index fa6d9a1e56..0000000000
--- a/library/cpp/actors/http/http_proxy_sock64.h
+++ /dev/null
@@ -1,147 +0,0 @@
-#pragma once
-#include <util/network/sock.h>
-#include "http.h"
-
-class TInet64StreamSocket: public TStreamSocket {
-protected:
- TInet64StreamSocket(const TInet64StreamSocket& parent, SOCKET fd)
- : TStreamSocket(fd)
- , AF(parent.AF)
- {
- }
-
-public:
- TInet64StreamSocket(int af = {}) {
- CreateSocket(af);
- }
-
- std::shared_ptr<ISockAddr> MakeAddress(const TString& address, int port) {
- if (!address) {
- if (AF == AF_INET6) {
- return std::make_shared<TSockAddrInet6>("::", port);
- } else {
- return std::make_shared<TSockAddrInet>(INADDR_ANY, port);
- }
- }
- if (NHttp::IsIPv6(address)) {
- return std::make_shared<TSockAddrInet6>(address.data(), port);
- } else if (NHttp::IsIPv4(address)) {
- return std::make_shared<TSockAddrInet>(address.data(), port);
- }
- struct addrinfo hints = {
- .ai_flags = AI_PASSIVE,
- .ai_family = AF,
- .ai_socktype = SOCK_STREAM,
- };
- struct addrinfo* gai_res = nullptr;
- int gai_ret = getaddrinfo(address.data(), nullptr, &hints, &gai_res);
- std::shared_ptr<ISockAddr> result;
- if (gai_ret == 0 && gai_res->ai_addr) {
- switch (gai_res->ai_addr->sa_family) {
- case AF_INET6: {
- std::shared_ptr<TSockAddrInet6> resultIp6 = std::make_shared<TSockAddrInet6>();
- if (resultIp6->Size() >= gai_res->ai_addrlen) {
- memcpy(resultIp6->SockAddr(), gai_res->ai_addr, gai_res->ai_addrlen);
- resultIp6->SetPort(port);
- result = std::move(resultIp6);
- }
- }
- break;
- case AF_INET: {
- std::shared_ptr<TSockAddrInet> resultIp4 = std::make_shared<TSockAddrInet>();
- if (resultIp4->Size() >= gai_res->ai_addrlen) {
- memcpy(resultIp4->SockAddr(), gai_res->ai_addr, gai_res->ai_addrlen);
- resultIp4->SetPort(port);
- result = std::move(resultIp4);
- }
- }
- break;
- }
- }
- if (gai_res) {
- freeaddrinfo(gai_res);
- }
- if (result) {
- return result;
- }
- throw yexception() << "Unable to resolve address " << address;
- }
-
- static int GuessAddressFamily(const TString& address) {
- if (!address) {
- return 0;
- }
- if (NHttp::IsIPv6(address)) {
- return AF_INET6;
- } else if (NHttp::IsIPv4(address)) {
- return AF_INET;
- }
- struct addrinfo hints = {
- .ai_flags = AI_PASSIVE,
- .ai_family = 0,
- .ai_socktype = SOCK_STREAM,
- };
- int result = 0;
- struct addrinfo* gai_res = nullptr;
- int gai_ret = getaddrinfo(address.data(), nullptr, &hints, &gai_res);
- if (gai_ret == 0 && gai_res->ai_addr) {
- switch (gai_res->ai_addr->sa_family) {
- case AF_INET:
- case AF_INET6:
- result = gai_res->ai_addr->sa_family;
- break;
- }
- }
- if (gai_res) {
- freeaddrinfo(gai_res);
- }
- return result;
- }
-
- static std::shared_ptr<ISockAddr> MakeAddress(const sockaddr_storage& storage) {
- std::shared_ptr<ISockAddr> addr;
- switch (storage.ss_family) {
- case AF_INET:
- addr = std::make_shared<TSockAddrInet>();
- break;
- case AF_INET6:
- addr = std::make_shared<TSockAddrInet6>();
- break;
- }
- if (addr) {
- memcpy(addr->SockAddr(), &storage, addr->Size());
- }
- return addr;
- }
-
- std::optional<TInet64StreamSocket> Accept(std::shared_ptr<ISockAddr>& acceptedAddr) {
- sockaddr_storage addrStorage = {};
- socklen_t addrLen = sizeof(addrStorage);
- SOCKET s = accept((SOCKET)*this, reinterpret_cast<sockaddr*>(&addrStorage), &addrLen);
- if (s == INVALID_SOCKET) {
- return {};
- }
- acceptedAddr = MakeAddress(addrStorage);
- return TInet64StreamSocket(*this, s);
- }
-
-protected:
- int AF = AF_UNSPEC;
-
- void CreateSocket(int af) {
- SOCKET s;
- if (af == 0) {
- s = socket(AF = AF_INET6, SOCK_STREAM, 0);
- if (s < 0) {
- s = socket(AF = AF_INET, SOCK_STREAM, 0);
- }
- } else {
- s = socket(AF = af, SOCK_STREAM, 0);
- }
- if (AF == AF_INET6) {
- SetSockOpt(s, SOL_SOCKET, IPV6_V6ONLY, (int)false);
- }
- TSocketHolder sock(s);
- sock.Swap(*this);
- }
-};
diff --git a/library/cpp/actors/http/http_proxy_sock_impl.h b/library/cpp/actors/http/http_proxy_sock_impl.h
deleted file mode 100644
index 788c99d9b2..0000000000
--- a/library/cpp/actors/http/http_proxy_sock_impl.h
+++ /dev/null
@@ -1,274 +0,0 @@
-#pragma once
-
-#include "http.h"
-#include "http_proxy.h"
-
-namespace NHttp {
-
-struct TPlainSocketImpl : virtual public THttpConfig {
- TIntrusivePtr<TSocketDescriptor> Socket;
- TString Host;
-
- TPlainSocketImpl() = default;
-
- void Create(int af) {
- Socket = new TSocketDescriptor(af);
- }
-
- TPlainSocketImpl(TIntrusivePtr<TSocketDescriptor> socket)
- : Socket(std::move(socket))
- {}
-
- SOCKET GetRawSocket() const {
- return static_cast<SOCKET>(Socket->Socket);
- }
-
- void SetNonBlock(bool nonBlock = true) noexcept {
- try {
- ::SetNonBlock(Socket->Socket, nonBlock);
- }
- catch (const yexception&) {
- }
- }
-
- void SetTimeout(TDuration timeout) noexcept {
- try {
- ::SetSocketTimeout(Socket->Socket, timeout.Seconds(), timeout.MilliSecondsOfSecond());
- }
- catch (const yexception&) {
- }
- }
-
- void Shutdown() {
- //Socket->Socket.ShutDown(SHUT_RDWR); // KIKIMR-3895
- if (Socket) {
- ::shutdown(Socket->Socket, SHUT_RDWR);
- }
- }
-
- int Connect(SocketAddressType address) {
- return Socket->Socket.Connect(address.get());
- }
-
- static constexpr int OnConnect(bool&, bool&) {
- return 1;
- }
-
- static int OnAccept(std::shared_ptr<TPrivateEndpointInfo>, bool&, bool&) {
- return 1;
- }
-
- bool IsGood() {
- int res;
- GetSockOpt(Socket->Socket, SOL_SOCKET, SO_ERROR, res);
- return res == 0;
- }
-
- int GetError() {
- int res;
- GetSockOpt(Socket->Socket, SOL_SOCKET, SO_ERROR, res);
- return res;
- }
-
- ssize_t Send(const void* data, size_t size, bool&, bool&) {
- return Socket->Socket.Send(data, size);
- }
-
- ssize_t Recv(void* data, size_t size, bool&, bool&) {
- return Socket->Socket.Recv(data, size);
- }
-
- void SetHost(const TString& host) {
- Host = host;
- }
-};
-
-struct TSecureSocketImpl : TPlainSocketImpl, TSslHelpers {
- static TSecureSocketImpl* IO(BIO* bio) noexcept {
- return static_cast<TSecureSocketImpl*>(BIO_get_data(bio));
- }
-
- static int IoWrite(BIO* bio, const char* data, int dlen) noexcept {
- BIO_clear_retry_flags(bio);
- int res = IO(bio)->Socket->Socket.Send(data, dlen);
- if (-res == EAGAIN) {
- BIO_set_retry_write(bio);
- }
- return res;
- }
-
- static int IoRead(BIO* bio, char* data, int dlen) noexcept {
- BIO_clear_retry_flags(bio);
- int res = IO(bio)->Socket->Socket.Recv(data, dlen);
- if (-res == EAGAIN) {
- BIO_set_retry_read(bio);
- }
- return res;
- }
-
- static int IoPuts(BIO* bio, const char* buf) noexcept {
- Y_UNUSED(bio);
- Y_UNUSED(buf);
- return -2;
- }
-
- static int IoGets(BIO* bio, char* buf, int size) noexcept {
- Y_UNUSED(bio);
- Y_UNUSED(buf);
- Y_UNUSED(size);
- return -2;
- }
-
- static long IoCtrl(BIO* bio, int cmd, long larg, void* parg) noexcept {
- Y_UNUSED(larg);
- Y_UNUSED(parg);
-
- if (cmd == BIO_CTRL_FLUSH) {
- IO(bio)->Flush();
- return 1;
- }
-
- return -2;
- }
-
- static int IoCreate(BIO* bio) noexcept {
- BIO_set_data(bio, nullptr);
- BIO_set_init(bio, 1);
- return 1;
- }
-
- static int IoDestroy(BIO* bio) noexcept {
- BIO_set_data(bio, nullptr);
- BIO_set_init(bio, 0);
- return 1;
- }
-
- static BIO_METHOD* CreateIoMethod() {
- BIO_METHOD* method = BIO_meth_new(BIO_get_new_index() | BIO_TYPE_SOURCE_SINK, "SecureSocketImpl");
- BIO_meth_set_write(method, IoWrite);
- BIO_meth_set_read(method, IoRead);
- BIO_meth_set_puts(method, IoPuts);
- BIO_meth_set_gets(method, IoGets);
- BIO_meth_set_ctrl(method, IoCtrl);
- BIO_meth_set_create(method, IoCreate);
- BIO_meth_set_destroy(method, IoDestroy);
- return method;
- }
-
- static BIO_METHOD* IoMethod() {
- static BIO_METHOD* method = CreateIoMethod();
- return method;
- }
-
- TSslHolder<BIO> Bio;
- TSslHolder<SSL_CTX> Ctx;
- TSslHolder<SSL> Ssl;
-
- TSecureSocketImpl() = default;
-
- TSecureSocketImpl(TIntrusivePtr<TSocketDescriptor> socket)
- : TPlainSocketImpl(std::move(socket))
- {}
-
- void InitClientSsl() {
- Bio.Reset(BIO_new(IoMethod()));
- BIO_set_data(Bio.Get(), this);
- BIO_set_nbio(Bio.Get(), 1);
- Ctx = CreateClientContext();
- Ssl = ConstructSsl(Ctx.Get(), Bio.Get());
- if (!Host.Empty()) {
- SSL_set_tlsext_host_name(Ssl.Get(), Host.c_str());
- }
- SSL_set_connect_state(Ssl.Get());
- }
-
- void InitServerSsl(SSL_CTX* ctx) {
- Bio.Reset(BIO_new(IoMethod()));
- BIO_set_data(Bio.Get(), this);
- BIO_set_nbio(Bio.Get(), 1);
- Ssl = ConstructSsl(ctx, Bio.Get());
- SSL_set_accept_state(Ssl.Get());
- }
-
- void Flush() {}
-
- ssize_t Send(const void* data, size_t size, bool& read, bool& write) {
- ssize_t res = SSL_write(Ssl.Get(), data, size);
- if (res < 0) {
- res = SSL_get_error(Ssl.Get(), res);
- switch(res) {
- case SSL_ERROR_WANT_READ:
- read = true;
- return -EAGAIN;
- case SSL_ERROR_WANT_WRITE:
- write = true;
- return -EAGAIN;
- default:
- return -EIO;
- }
- }
- return res;
- }
-
- ssize_t Recv(void* data, size_t size, bool& read, bool& write) {
- ssize_t res = SSL_read(Ssl.Get(), data, size);
- if (res < 0) {
- res = SSL_get_error(Ssl.Get(), res);
- switch(res) {
- case SSL_ERROR_WANT_READ:
- read = true;
- return -EAGAIN;
- case SSL_ERROR_WANT_WRITE:
- write = true;
- return -EAGAIN;
- default:
- return -EIO;
- }
- }
- return res;
- }
-
- int OnConnect(bool& read, bool& write) {
- if (!Ssl) {
- InitClientSsl();
- }
- int res = SSL_connect(Ssl.Get());
- if (res <= 0) {
- res = SSL_get_error(Ssl.Get(), res);
- switch(res) {
- case SSL_ERROR_WANT_READ:
- read = true;
- return -EAGAIN;
- case SSL_ERROR_WANT_WRITE:
- write = true;
- return -EAGAIN;
- default:
- return -EIO;
- }
- }
- return res;
- }
-
- int OnAccept(std::shared_ptr<TPrivateEndpointInfo> endpoint, bool& read, bool& write) {
- if (!Ssl) {
- InitServerSsl(endpoint->SecureContext.Get());
- }
- int res = SSL_accept(Ssl.Get());
- if (res <= 0) {
- res = SSL_get_error(Ssl.Get(), res);
- switch(res) {
- case SSL_ERROR_WANT_READ:
- read = true;
- return -EAGAIN;
- case SSL_ERROR_WANT_WRITE:
- write = true;
- return -EAGAIN;
- default:
- return -EIO;
- }
- }
- return res;
- }
-};
-
-}
diff --git a/library/cpp/actors/http/http_proxy_ssl.h b/library/cpp/actors/http/http_proxy_ssl.h
deleted file mode 100644
index 9953430b1c..0000000000
--- a/library/cpp/actors/http/http_proxy_ssl.h
+++ /dev/null
@@ -1,133 +0,0 @@
-#pragma once
-
-#include <openssl/bio.h>
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#include <openssl/tls1.h>
-
-namespace NHttp {
-
-struct TSslHelpers {
- struct TSslDestroy {
- static void Destroy(SSL_CTX* ctx) noexcept {
- SSL_CTX_free(ctx);
- }
-
- static void Destroy(SSL* ssl) noexcept {
- SSL_free(ssl);
- }
-
- static void Destroy(X509* cert) noexcept {
- X509_free(cert);
- }
-
- static void Destroy(EVP_PKEY* pkey) noexcept {
- EVP_PKEY_free(pkey);
- }
-
- static void Destroy(BIO* bio) noexcept {
- BIO_free(bio);
- }
- };
-
- template <typename T>
- using TSslHolder = THolder<T, TSslDestroy>;
-
- static TSslHolder<SSL_CTX> CreateSslCtx(const SSL_METHOD* method) {
- TSslHolder<SSL_CTX> ctx(SSL_CTX_new(method));
-
- if (ctx) {
- SSL_CTX_set_options(ctx.Get(), SSL_OP_NO_SSLv2);
- SSL_CTX_set_options(ctx.Get(), SSL_OP_NO_SSLv3);
- SSL_CTX_set_options(ctx.Get(), SSL_OP_NO_TLSv1);
- SSL_CTX_set_options(ctx.Get(), SSL_OP_NO_TLSv1_1);
- SSL_CTX_set_options(ctx.Get(), SSL_OP_MICROSOFT_SESS_ID_BUG);
- SSL_CTX_set_options(ctx.Get(), SSL_OP_NETSCAPE_CHALLENGE_BUG);
- }
-
- return ctx;
- }
-
- static TSslHolder<SSL_CTX> CreateClientContext() {
- return CreateSslCtx(SSLv23_client_method());
- }
-
- static TSslHolder<SSL_CTX> CreateServerContext(const TString& certificate, const TString& key) {
- TSslHolder<SSL_CTX> ctx = CreateSslCtx(SSLv23_server_method());
- SSL_CTX_set_ecdh_auto(ctx.Get(), 1);
- int res;
- res = SSL_CTX_use_certificate_chain_file(ctx.Get(), certificate.c_str());
- if (res < 0) {
- // TODO(xenoxeno): more diagnostics?
- return nullptr;
- }
- res = SSL_CTX_use_PrivateKey_file(ctx.Get(), key.c_str(), SSL_FILETYPE_PEM);
- if (res < 0) {
- // TODO(xenoxeno): more diagnostics?
- return nullptr;
- }
- return ctx;
- }
-
- static bool LoadX509Chain(TSslHolder<SSL_CTX>& ctx, const TString& pem) {
- TSslHolder<BIO> bio(BIO_new_mem_buf(pem.c_str(), pem.size()));
- if (bio == nullptr) {
- return false;
- }
- TSslHolder<X509> cert(PEM_read_bio_X509_AUX(bio.Get(), nullptr, nullptr, nullptr));
- if (cert == nullptr) {
- return false;
- }
- if (SSL_CTX_use_certificate(ctx.Get(), cert.Release()) <= 0) {
- return false;
- }
- SSL_CTX_clear_chain_certs(ctx.Get());
- while (true) {
- TSslHolder<X509> ca(PEM_read_bio_X509(bio.Get(), nullptr, nullptr, nullptr));
- if (ca == nullptr) {
- break;
- }
- if (!SSL_CTX_add0_chain_cert(ctx.Get(), ca.Release())) {
- return false;
- }
- }
- return true;
- }
-
- static bool LoadPrivateKey(TSslHolder<SSL_CTX>& ctx, const TString& pem) {
- TSslHolder<BIO> bio(BIO_new_mem_buf(pem.c_str(), pem.size()));
- if (bio == nullptr) {
- return false;
- }
- TSslHolder<EVP_PKEY> pkey(PEM_read_bio_PrivateKey(bio.Get(), nullptr, nullptr, nullptr));
- if (SSL_CTX_use_PrivateKey(ctx.Get(), pkey.Release()) <= 0) {
- return false;
- }
- return true;
- }
-
- static TSslHolder<SSL_CTX> CreateServerContext(const TString& pem) {
- TSslHolder<SSL_CTX> ctx = CreateSslCtx(SSLv23_server_method());
- SSL_CTX_set_ecdh_auto(ctx.Get(), 1);
- if (!LoadX509Chain(ctx, pem)) {
- return nullptr;
- }
- if (!LoadPrivateKey(ctx, pem)) {
- return nullptr;
- }
- return ctx;
- }
-
- static TSslHolder<SSL> ConstructSsl(SSL_CTX* ctx, BIO* bio) {
- TSslHolder<SSL> ssl(SSL_new(ctx));
-
- if (ssl) {
- BIO_up_ref(bio); // SSL_set_bio consumes only one reference if rbio and wbio are the same
- SSL_set_bio(ssl.Get(), bio, bio);
- }
-
- return ssl;
- }
-};
-
-}
diff --git a/library/cpp/actors/http/http_static.cpp b/library/cpp/actors/http/http_static.cpp
deleted file mode 100644
index ff36f5486d..0000000000
--- a/library/cpp/actors/http/http_static.cpp
+++ /dev/null
@@ -1,97 +0,0 @@
-#include "http_proxy.h"
-#include "http_static.h"
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/http/http.h>
-#include <library/cpp/resource/resource.h>
-#include <util/folder/path.h>
-#include <util/stream/file.h>
-
-namespace NHttp {
-
-class THttpStaticContentHandler : public NActors::TActor<THttpStaticContentHandler> {
-public:
- using TBase = NActors::TActor<THttpStaticContentHandler>;
- const TFsPath URL;
- const TFsPath FilePath;
- const TFsPath ResourcePath;
- const TFsPath Index;
-
- THttpStaticContentHandler(const TString& url, const TString& filePath, const TString& resourcePath, const TString& index)
- : TBase(&THttpStaticContentHandler::StateWork)
- , URL(url)
- , FilePath(filePath)
- , ResourcePath(resourcePath)
- , Index(index)
- {}
-
- static constexpr char ActorName[] = "HTTP_STATIC_ACTOR";
-
- static TInstant GetCompileTime() {
- tm compileTime;
- strptime(__DATE__ " " __TIME__, "%B %d %Y %H:%M:%S", &compileTime);
- return TInstant::Seconds(mktime(&compileTime));
- }
-
- void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr event, const NActors::TActorContext& ctx) {
- THttpOutgoingResponsePtr response;
- if (event->Get()->Request->Method != "GET") {
- response = event->Get()->Request->CreateResponseBadRequest("Wrong request");
- ctx.Send(event->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
- return;
- }
- TFsPath url(event->Get()->Request->URL.Before('?'));
- if (!url.IsAbsolute()) {
- response = event->Get()->Request->CreateResponseBadRequest("Completely wrong URL");
- ctx.Send(event->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
- return;
- }
- if (url.GetPath().EndsWith('/') && Index.IsDefined()) {
- url /= Index;
- }
- url = url.RelativeTo(URL);
- try {
- // TODO: caching?
- TString contentType = mimetypeByExt(url.GetExtension().c_str());
- TString data;
- TFileStat filestat;
- TFsPath resourcename(ResourcePath / url);
- if (NResource::FindExact(resourcename.GetPath(), &data)) {
- static TInstant compileTime(GetCompileTime());
- filestat.MTime = compileTime.Seconds();
- } else {
- TFsPath filename(FilePath / url);
- if (!filename.IsSubpathOf(FilePath) && filename != FilePath) {
- response = event->Get()->Request->CreateResponseBadRequest("Wrong URL");
- ctx.Send(event->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
- return;
- }
- if (filename.Stat(filestat) && filestat.IsFile()) {
- data = TUnbufferedFileInput(filename).ReadAll();
- }
- }
- if (!filestat.IsNull()) {
- response = event->Get()->Request->CreateResponseOK(data, contentType, TInstant::Seconds(filestat.MTime));
- } else {
- response = event->Get()->Request->CreateResponseNotFound("File not found");
- }
- }
- catch (const yexception&) {
- response = event->Get()->Request->CreateResponseServiceUnavailable("Not available");
- }
- ctx.Send(event->Sender, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
- }
-
- STFUNC(StateWork) {
- switch (ev->GetTypeRewrite()) {
- HFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
- }
- }
-};
-
-NActors::IActor* CreateHttpStaticContentHandler(const TString& url, const TString& filePath, const TString& resourcePath, const TString& index) {
- return new THttpStaticContentHandler(url, filePath, resourcePath, index);
-}
-
-}
diff --git a/library/cpp/actors/http/http_static.h b/library/cpp/actors/http/http_static.h
deleted file mode 100644
index f91e15dfb1..0000000000
--- a/library/cpp/actors/http/http_static.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#pragma once
-#include <library/cpp/actors/core/actor.h>
-#include "http.h"
-
-namespace NHttp {
-
-NActors::IActor* CreateHttpStaticContentHandler(const TString& url, const TString& filePath, const TString& resourcePath, const TString& index = TString());
-
-}
diff --git a/library/cpp/actors/http/http_ut.cpp b/library/cpp/actors/http/http_ut.cpp
deleted file mode 100644
index e06de07867..0000000000
--- a/library/cpp/actors/http/http_ut.cpp
+++ /dev/null
@@ -1,509 +0,0 @@
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/testing/unittest/tests_data.h>
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/testlib/test_runtime.h>
-#include <util/system/tempfile.h>
-#include "http.h"
-#include "http_proxy.h"
-
-
-
-enum EService : NActors::NLog::EComponent {
- MIN,
- Logger,
- MVP,
- MAX
-};
-
-namespace {
-
-template <typename HttpType>
-void EatWholeString(TIntrusivePtr<HttpType>& request, const TString& data) {
- request->EnsureEnoughSpaceAvailable(data.size());
- auto size = std::min(request->Avail(), data.size());
- memcpy(request->Pos(), data.data(), size);
- request->Advance(size);
-}
-
-template <typename HttpType>
-void EatPartialString(TIntrusivePtr<HttpType>& request, const TString& data) {
- for (char c : data) {
- request->EnsureEnoughSpaceAvailable(1);
- memcpy(request->Pos(), &c, 1);
- request->Advance(1);
- }
-}
-
-}
-
-Y_UNIT_TEST_SUITE(HttpProxy) {
- Y_UNIT_TEST(BasicParsing) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "GET /test HTTP/1.1\r\nHost: test\r\nSome-Header: 32344\r\n\r\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "GET");
- UNIT_ASSERT_EQUAL(request->URL, "/test");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->Host, "test");
- UNIT_ASSERT_EQUAL(request->Headers, "Host: test\r\nSome-Header: 32344\r\n\r\n");
- }
-
- Y_UNIT_TEST(GetWithSpecifiedContentType) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "GET /test HTTP/1.1\r\nHost: test\r\nContent-Type: application/json\r\nSome-Header: 32344\r\n\r\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "GET");
- UNIT_ASSERT_EQUAL(request->URL, "/test");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->Host, "test");
- UNIT_ASSERT_EQUAL(request->Headers, "Host: test\r\nContent-Type: application/json\r\nSome-Header: 32344\r\n\r\n");
- }
-
- Y_UNIT_TEST(BasicParsingChunkedBodyRequest) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "POST /Url HTTP/1.1\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n4\r\nthis\r\n4\r\n is \r\n5\r\ntest.\r\n0\r\n\r\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "POST");
- UNIT_ASSERT_EQUAL(request->URL, "/Url");
- UNIT_ASSERT_EQUAL(request->Connection, "close");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->TransferEncoding, "chunked");
- UNIT_ASSERT_EQUAL(request->Body, "this is test.");
- }
-
- Y_UNIT_TEST(BasicPost) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "POST /Url HTTP/1.1\r\nConnection: close\r\nContent-Length: 13\r\n\r\nthis is test.");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "POST");
- UNIT_ASSERT_EQUAL(request->URL, "/Url");
- UNIT_ASSERT_EQUAL(request->Connection, "close");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->TransferEncoding, "");
- UNIT_ASSERT_EQUAL(request->Body, "this is test.");
- }
-
- Y_UNIT_TEST(BasicParsingChunkedBodyResponse) {
- NHttp::THttpOutgoingRequestPtr request = nullptr; //new NHttp::THttpOutgoingRequest();
- NHttp::THttpIncomingResponsePtr response = new NHttp::THttpIncomingResponse(request);
- EatWholeString(response, "HTTP/1.1 200 OK\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n4\r\nthis\r\n4\r\n is \r\n5\r\ntest.\r\n0\r\n\r\n");
- UNIT_ASSERT_EQUAL(response->Stage, NHttp::THttpIncomingResponse::EParseStage::Done);
- UNIT_ASSERT_EQUAL(response->Status, "200");
- UNIT_ASSERT_EQUAL(response->Connection, "close");
- UNIT_ASSERT_EQUAL(response->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(response->Version, "1.1");
- UNIT_ASSERT_EQUAL(response->TransferEncoding, "chunked");
- UNIT_ASSERT_EQUAL(response->Body, "this is test.");
- }
-
- Y_UNIT_TEST(InvalidParsingChunkedBody) {
- NHttp::THttpOutgoingRequestPtr request = nullptr; //new NHttp::THttpOutgoingRequest();
- NHttp::THttpIncomingResponsePtr response = new NHttp::THttpIncomingResponse(request);
- EatWholeString(response, "HTTP/1.1 200 OK\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n5\r\nthis\r\n4\r\n is \r\n5\r\ntest.\r\n0\r\n\r\n");
- UNIT_ASSERT(response->IsError());
- }
-
- Y_UNIT_TEST(AdvancedParsingChunkedBody) {
- NHttp::THttpOutgoingRequestPtr request = nullptr; //new NHttp::THttpOutgoingRequest();
- NHttp::THttpIncomingResponsePtr response = new NHttp::THttpIncomingResponse(request);
- EatWholeString(response, "HTTP/1.1 200 OK\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n6\r\nthis\r\n\r\n4\r\n is \r\n5\r\ntest.\r\n0\r\n\r\n");
- UNIT_ASSERT_EQUAL(response->Stage, NHttp::THttpIncomingResponse::EParseStage::Done);
- UNIT_ASSERT_EQUAL(response->Status, "200");
- UNIT_ASSERT_EQUAL(response->Connection, "close");
- UNIT_ASSERT_EQUAL(response->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(response->Version, "1.1");
- UNIT_ASSERT_EQUAL(response->TransferEncoding, "chunked");
- UNIT_ASSERT_EQUAL(response->Body, "this\r\n is test.");
- }
-
- Y_UNIT_TEST(CreateCompressedResponse) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "GET /Url HTTP/1.1\r\nConnection: close\r\nAccept-Encoding: gzip, deflate\r\n\r\n");
- NHttp::THttpOutgoingResponsePtr response = new NHttp::THttpOutgoingResponse(request, "HTTP", "1.1", "200", "OK");
- TString compressedBody = "something very long to compress with deflate algorithm. something very long to compress with deflate algorithm.";
- response->EnableCompression();
- size_t size1 = response->Size();
- response->SetBody(compressedBody);
- size_t size2 = response->Size();
- size_t compressedBodySize = size2 - size1;
- UNIT_ASSERT_VALUES_EQUAL("deflate", response->ContentEncoding);
- UNIT_ASSERT(compressedBodySize < compressedBody.size());
- NHttp::THttpOutgoingResponsePtr response2 = response->Duplicate(request);
- UNIT_ASSERT_VALUES_EQUAL(response->Body, response2->Body);
- UNIT_ASSERT_VALUES_EQUAL(response->ContentLength, response2->ContentLength);
- UNIT_ASSERT_VALUES_EQUAL(response->Size(), response2->Size());
- }
-
- Y_UNIT_TEST(BasicPartialParsing) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatPartialString(request, "GET /test HTTP/1.1\r\nHost: test\r\nSome-Header: 32344\r\n\r\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "GET");
- UNIT_ASSERT_EQUAL(request->URL, "/test");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->Host, "test");
- UNIT_ASSERT_EQUAL(request->Headers, "Host: test\r\nSome-Header: 32344\r\n\r\n");
- }
-
- Y_UNIT_TEST(BasicPartialParsingChunkedBody) {
- NHttp::THttpOutgoingRequestPtr request = nullptr; //new NHttp::THttpOutgoingRequest();
- NHttp::THttpIncomingResponsePtr response = new NHttp::THttpIncomingResponse(request);
- EatPartialString(response, "HTTP/1.1 200 OK\r\nConnection: close\r\nTransfer-Encoding: chunked\r\n\r\n4\r\nthis\r\n4\r\n is \r\n5\r\ntest.\r\n0\r\n\r\n");
- UNIT_ASSERT_EQUAL(response->Stage, NHttp::THttpIncomingResponse::EParseStage::Done);
- UNIT_ASSERT_EQUAL(response->Status, "200");
- UNIT_ASSERT_EQUAL(response->Connection, "close");
- UNIT_ASSERT_EQUAL(response->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(response->Version, "1.1");
- UNIT_ASSERT_EQUAL(response->TransferEncoding, "chunked");
- UNIT_ASSERT_EQUAL(response->Body, "this is test.");
- }
-
- Y_UNIT_TEST(BasicParsingContentLength0) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatPartialString(request, "GET /test HTTP/1.1\r\nHost: test\r\nContent-Length: 0\r\n\r\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "GET");
- UNIT_ASSERT_EQUAL(request->URL, "/test");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->Host, "test");
- UNIT_ASSERT_EQUAL(request->Headers, "Host: test\r\nContent-Length: 0\r\n\r\n");
- }
-
- Y_UNIT_TEST(AdvancedParsing) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "GE");
- EatWholeString(request, "T");
- EatWholeString(request, " ");
- EatWholeString(request, "/test");
- EatWholeString(request, " HTTP/1.1\r");
- EatWholeString(request, "\nHo");
- EatWholeString(request, "st: test");
- EatWholeString(request, "\r\n");
- EatWholeString(request, "Some-Header: 32344\r\n\r");
- EatWholeString(request, "\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "GET");
- UNIT_ASSERT_EQUAL(request->URL, "/test");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->Host, "test");
- UNIT_ASSERT_EQUAL(request->Headers, "Host: test\r\nSome-Header: 32344\r\n\r\n");
- }
-
- Y_UNIT_TEST(AdvancedPartialParsing) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatPartialString(request, "GE");
- EatPartialString(request, "T");
- EatPartialString(request, " ");
- EatPartialString(request, "/test");
- EatPartialString(request, " HTTP/1.1\r");
- EatPartialString(request, "\nHo");
- EatPartialString(request, "st: test");
- EatPartialString(request, "\r\n");
- EatPartialString(request, "Some-Header: 32344\r\n\r");
- EatPartialString(request, "\n");
- UNIT_ASSERT_EQUAL(request->Stage, NHttp::THttpIncomingRequest::EParseStage::Done);
- UNIT_ASSERT_EQUAL(request->Method, "GET");
- UNIT_ASSERT_EQUAL(request->URL, "/test");
- UNIT_ASSERT_EQUAL(request->Protocol, "HTTP");
- UNIT_ASSERT_EQUAL(request->Version, "1.1");
- UNIT_ASSERT_EQUAL(request->Host, "test");
- UNIT_ASSERT_EQUAL(request->Headers, "Host: test\r\nSome-Header: 32344\r\n\r\n");
- }
-
- Y_UNIT_TEST(BasicRenderBodyWithHeadersAndCookies) {
- NHttp::THttpOutgoingRequestPtr request = NHttp::THttpOutgoingRequest::CreateRequestGet("http://www.yandex.ru/data/url");
- NHttp::THeadersBuilder headers;
- NHttp::TCookiesBuilder cookies;
- cookies.Set("cookie1", "123456");
- cookies.Set("cookie2", "45678");
- headers.Set("Cookie", cookies.Render());
- request->Set(headers);
- TString requestData = request->AsString();
- UNIT_ASSERT_VALUES_EQUAL(requestData, "GET /data/url HTTP/1.1\r\nHost: www.yandex.ru\r\nAccept: */*\r\nCookie: cookie1=123456; cookie2=45678;\r\n");
- }
-
- Y_UNIT_TEST(BasicRenderOutgoingResponse) {
- NHttp::THttpIncomingRequestPtr request = new NHttp::THttpIncomingRequest();
- EatWholeString(request, "GET /test HTTP/1.1\r\nHost: test\r\nSome-Header: 32344\r\n\r\n");
-
- NHttp::THttpOutgoingResponsePtr httpResponseOk = request->CreateResponseOK("response ok");
- UNIT_ASSERT_EQUAL(httpResponseOk->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseOk->Status, "200");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseOk->Message, "OK");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseOk->ContentType, "text/html");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseOk->Body, "response ok");
-
- NHttp::THttpOutgoingResponsePtr httpResponseBadRequest = request->CreateResponseBadRequest();
- UNIT_ASSERT_EQUAL(httpResponseBadRequest->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseBadRequest->Status, "400");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseBadRequest->Message, "Bad Request");
- UNIT_ASSERT(httpResponseBadRequest->ContentType.empty());
- UNIT_ASSERT(httpResponseBadRequest->Body.empty());
-
- NHttp::THttpOutgoingResponsePtr httpResponseNotFound = request->CreateResponseNotFound();
- UNIT_ASSERT_EQUAL(httpResponseNotFound->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseNotFound->Status, "404");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseNotFound->Message, "Not Found");
- UNIT_ASSERT(httpResponseNotFound->ContentType.empty());
- UNIT_ASSERT(httpResponseNotFound->Body.empty());
-
- NHttp::THttpOutgoingResponsePtr httpResponseServiceUnavailable = request->CreateResponseServiceUnavailable();
- UNIT_ASSERT_EQUAL(httpResponseServiceUnavailable->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseServiceUnavailable->Status, "503");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseServiceUnavailable->Message, "Service Unavailable");
- UNIT_ASSERT(httpResponseServiceUnavailable->ContentType.empty());
- UNIT_ASSERT(httpResponseServiceUnavailable->Body.empty());
-
- NHttp::THttpOutgoingResponsePtr httpResponseGatewayTimeout = request->CreateResponseGatewayTimeout("gateway timeout body");
- UNIT_ASSERT_EQUAL(httpResponseGatewayTimeout->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseGatewayTimeout->Status, "504");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseGatewayTimeout->Message, "Gateway Timeout");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseGatewayTimeout->ContentType, "text/html");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseGatewayTimeout->Body, "gateway timeout body");
-
- NHttp::THttpOutgoingResponsePtr httpIncompleteResponse = request->CreateIncompleteResponse("401", "Unauthorized");
- UNIT_ASSERT_EQUAL(httpIncompleteResponse->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Header);
- UNIT_ASSERT_STRINGS_EQUAL(httpIncompleteResponse->Status, "401");
- UNIT_ASSERT_STRINGS_EQUAL(httpIncompleteResponse->Message, "Unauthorized");
-
- NHttp::THttpOutgoingResponsePtr httpResponse = request->CreateResponse("401", "Unauthorized");
- UNIT_ASSERT_EQUAL(httpResponse->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponse->Status, "401");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponse->Message, "Unauthorized");
-
- NHttp::THeadersBuilder headers;
- NHttp::TCookiesBuilder cookies;
- cookies.Set("cookie1", "123456");
- headers.Set("Set-Cookie", cookies.Render());
- headers.Set("Location", "http://www.yandex.ru/data/url");
-
- NHttp::THttpOutgoingResponsePtr httpResponseRedirect = request->CreateResponse("302", "Found", headers);
- UNIT_ASSERT_EQUAL(httpResponseRedirect->Stage, NHttp::THttpOutgoingResponse::ERenderStage::Done);
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseRedirect->Status, "302");
- UNIT_ASSERT_STRINGS_EQUAL(httpResponseRedirect->Message, "Found");
- UNIT_ASSERT_STRING_CONTAINS(httpResponseRedirect->Headers, "Set-Cookie: cookie1=123456;");
- UNIT_ASSERT_STRING_CONTAINS(httpResponseRedirect->Headers, "Location: http://www.yandex.ru/data/url");
- }
-
- Y_UNIT_TEST(BasicRunning4) {
- NActors::TTestActorRuntimeBase actorSystem;
- TPortManager portManager;
- TIpPort port = portManager.GetTcpPort();
- TAutoPtr<NActors::IEventHandle> handle;
- actorSystem.Initialize();
- //actorSystem.SetLogPriority(NActorsServices::HTTP, NActors::NLog::PRI_DEBUG);
-
- NActors::IActor* proxy = NHttp::CreateHttpProxy();
- NActors::TActorId proxyId = actorSystem.Register(proxy);
- actorSystem.Send(new NActors::IEventHandle(proxyId, TActorId(), new NHttp::TEvHttpProxy::TEvAddListeningPort(port)), 0, true);
- actorSystem.DispatchEvents();
-
- NActors::TActorId serverId = actorSystem.AllocateEdgeActor();
- actorSystem.Send(new NActors::IEventHandle(proxyId, serverId, new NHttp::TEvHttpProxy::TEvRegisterHandler("/test", serverId)), 0, true);
-
- NActors::TActorId clientId = actorSystem.AllocateEdgeActor();
- NHttp::THttpOutgoingRequestPtr httpRequest = NHttp::THttpOutgoingRequest::CreateRequestGet("http://127.0.0.1:" + ToString(port) + "/test");
- actorSystem.Send(new NActors::IEventHandle(proxyId, clientId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest(httpRequest)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingRequest* request = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingRequest>(handle);
-
- UNIT_ASSERT_EQUAL(request->Request->URL, "/test");
-
- NHttp::THttpOutgoingResponsePtr httpResponse = request->Request->CreateResponseString("HTTP/1.1 200 Found\r\nConnection: Close\r\nTransfer-Encoding: chunked\r\n\r\n6\r\npassed\r\n0\r\n\r\n");
- actorSystem.Send(new NActors::IEventHandle(handle->Sender, serverId, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(httpResponse)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingResponse* response = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingResponse>(handle);
-
- UNIT_ASSERT_EQUAL(response->Response->Status, "200");
- UNIT_ASSERT_EQUAL(response->Response->Body, "passed");
- }
-
- Y_UNIT_TEST(BasicRunning6) {
- NActors::TTestActorRuntimeBase actorSystem;
- TPortManager portManager;
- TIpPort port = portManager.GetTcpPort();
- TAutoPtr<NActors::IEventHandle> handle;
- actorSystem.Initialize();
- //actorSystem.SetLogPriority(NActorsServices::HTTP, NActors::NLog::PRI_DEBUG);
-
- NActors::IActor* proxy = NHttp::CreateHttpProxy();
- NActors::TActorId proxyId = actorSystem.Register(proxy);
- actorSystem.Send(new NActors::IEventHandle(proxyId, TActorId(), new NHttp::TEvHttpProxy::TEvAddListeningPort(port)), 0, true);
- actorSystem.DispatchEvents();
-
- NActors::TActorId serverId = actorSystem.AllocateEdgeActor();
- actorSystem.Send(new NActors::IEventHandle(proxyId, serverId, new NHttp::TEvHttpProxy::TEvRegisterHandler("/test", serverId)), 0, true);
-
- NActors::TActorId clientId = actorSystem.AllocateEdgeActor();
- NHttp::THttpOutgoingRequestPtr httpRequest = NHttp::THttpOutgoingRequest::CreateRequestGet("http://[::1]:" + ToString(port) + "/test");
- actorSystem.Send(new NActors::IEventHandle(proxyId, clientId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest(httpRequest)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingRequest* request = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingRequest>(handle);
-
- UNIT_ASSERT_EQUAL(request->Request->URL, "/test");
-
- NHttp::THttpOutgoingResponsePtr httpResponse = request->Request->CreateResponseString("HTTP/1.1 200 Found\r\nConnection: Close\r\nTransfer-Encoding: chunked\r\n\r\n6\r\npassed\r\n0\r\n\r\n");
- actorSystem.Send(new NActors::IEventHandle(handle->Sender, serverId, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(httpResponse)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingResponse* response = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingResponse>(handle);
-
- UNIT_ASSERT_EQUAL(response->Response->Status, "200");
- UNIT_ASSERT_EQUAL(response->Response->Body, "passed");
- }
-
- Y_UNIT_TEST(TlsRunning) {
- NActors::TTestActorRuntimeBase actorSystem;
- TPortManager portManager;
- TIpPort port = portManager.GetTcpPort();
- TAutoPtr<NActors::IEventHandle> handle;
- actorSystem.Initialize();
-
- TString certificateContent = R"___(-----BEGIN PRIVATE KEY-----
-MIIEvwIBADANBgkqhkiG9w0BAQEFAASCBKkwggSlAgEAAoIBAQCzRZjodO7Aqe1w
-RyOj6kG6g2nn8ZGAxfao4mLT0jDTbVksrhV/h2s3uldLkFo5WrNQ8WZe+iIbXeFL
-s8tO6hslzreo9sih2IHoRcH5KnS/6YTqVhRTJb1jE2dM8NwYbwTi+T2Pe0FrBPjI
-kgVO50gAtYl9C+fc715uZiSKW+rRlP5OoFTwxrOjiU27RPZjFYyWK9wTI1Es9uRr
-lbZbLl5cY6dK2J1AViRraaYKCWO26VbOPWLsY4OD3e+ZXIc3OMCz6Yb0wmRPeJ60
-bbbkGfI8O27kDdv69MAWHIm0yYMzKEnom1dce7rNQNDEqJfocsYIsg+EvayT1yQ9
-KTBegw7LAgMBAAECggEBAKaOCrotqYQmXArsjRhFFDwMy+BKdzyEr93INrlFl0dX
-WHpCYobRcbOc1G3H94tB0UdqgAnNqtJyLlb+++ydZAuEOu4oGc8EL+10ofq0jzOd
-6Xct8kQt0/6wkFDTlii9PHUDy0X65ZRgUiNGRtg/2I2QG+SpowmI+trm2xwQueFs
-VaWrjc3cVvXx0b8Lu7hqZUv08kgC38stzuRk/n2T5VWSAr7Z4ZWQbO918Dv35HUw
-Wy/0jNUFP9CBCvFJ4l0OoH9nYhWFG+HXWzNdw6/Hca4jciRKo6esCiOZ9uWYv/ec
-/NvX9rgFg8G8/SrTisX10+Bbeq+R1RKwq/IG409TH4ECgYEA14L+3QsgNIUMeYAx
-jSCyk22R/tOHI1BM+GtKPUhnwHlAssrcPcxXMJovl6WL93VauYjym0wpCz9urSpA
-I2CqTsG8GYciA6Dr3mHgD6cK0jj9UPAU6EnZ5S0mjhPqKZqutu9QegzD2uESvuN8
-36xezwQthzAf0nI/P3sJGjVXjikCgYEA1POm5xcV6SmM6HnIdadEebhzZIJ9TXQz
-ry3Jj3a7CKyD5C7fAdkHUTCjgT/2ElxPi9ABkZnC+d/cW9GtJFa0II5qO/agm3KQ
-ZXYiutu9A7xACHYFXRiJEjVUdGG9dKMVOHUEa8IHEgrrcUVM/suy/GgutywIfaXs
-y58IFP24K9MCgYEAk6zjz7wL+XEiNy+sxLQfKf7vB9sSwxQHakK6wHuY/L8Zomp3
-uLEJHfjJm/SIkK0N2g0JkXkCtv5kbKyC/rsCeK0wo52BpVLjzaLr0k34kE0U6B1b
-dkEE2pGx1bG3x4KDLj+Wuct9ecK5Aa0IqIyI+vo16GkFpUM8K9e3SQo8UOECgYEA
-sCZYAkILYtJ293p9giz5rIISGasDAUXE1vxWBXEeJ3+kneTTnZCrx9Im/ewtnWR0
-fF90XL9HFDDD88POqAd8eo2zfKR2l/89SGBfPBg2EtfuU9FkgGyiPciVcqvC7q9U
-B15saMKX3KnhtdGwbfeLt9RqCCTJZT4SUSDcq5hwdvcCgYAxY4Be8mNipj8Cgg22
-mVWSolA0TEzbtUcNk6iGodpi+Z0LKpsPC0YRqPRyh1K+rIltG1BVdmUBHcMlOYxl
-lWWvbJH6PkJWy4n2MF7PO45kjN3pPZg4hgH63JjZeAineBwEArUGb9zHnvzcdRvF
-wuQ2pZHL/HJ0laUSieHDJ5917w==
------END PRIVATE KEY-----
-
-
------BEGIN CERTIFICATE-----
-MIIDjTCCAnWgAwIBAgIURt5IBx0J3xgEaQvmyrFH2A+NkpMwDQYJKoZIhvcNAQEL
-BQAwVjELMAkGA1UEBhMCUlUxDzANBgNVBAgMBk1vc2NvdzEPMA0GA1UEBwwGTW9z
-Y293MQ8wDQYDVQQKDAZZYW5kZXgxFDASBgNVBAMMC3Rlc3Qtc2VydmVyMB4XDTE5
-MDkyMDE3MTQ0MVoXDTQ3MDIwNDE3MTQ0MVowVjELMAkGA1UEBhMCUlUxDzANBgNV
-BAgMBk1vc2NvdzEPMA0GA1UEBwwGTW9zY293MQ8wDQYDVQQKDAZZYW5kZXgxFDAS
-BgNVBAMMC3Rlc3Qtc2VydmVyMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKC
-AQEAs0WY6HTuwKntcEcjo+pBuoNp5/GRgMX2qOJi09Iw021ZLK4Vf4drN7pXS5Ba
-OVqzUPFmXvoiG13hS7PLTuobJc63qPbIodiB6EXB+Sp0v+mE6lYUUyW9YxNnTPDc
-GG8E4vk9j3tBawT4yJIFTudIALWJfQvn3O9ebmYkilvq0ZT+TqBU8Mazo4lNu0T2
-YxWMlivcEyNRLPbka5W2Wy5eXGOnStidQFYka2mmCgljtulWzj1i7GODg93vmVyH
-NzjAs+mG9MJkT3ietG225BnyPDtu5A3b+vTAFhyJtMmDMyhJ6JtXXHu6zUDQxKiX
-6HLGCLIPhL2sk9ckPSkwXoMOywIDAQABo1MwUTAdBgNVHQ4EFgQUDv/xuJ4CvCgG
-fPrZP3hRAt2+/LwwHwYDVR0jBBgwFoAUDv/xuJ4CvCgGfPrZP3hRAt2+/LwwDwYD
-VR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAinKpMYaA2tjLpAnPVbjy
-/ZxSBhhB26RiQp3Re8XOKyhTWqgYE6kldYT0aXgK9x9mPC5obQannDDYxDc7lX+/
-qP/u1X81ZcDRo/f+qQ3iHfT6Ftt/4O3qLnt45MFM6Q7WabRm82x3KjZTqpF3QUdy
-tumWiuAP5DMd1IRDtnKjFHO721OsEsf6NLcqdX89bGeqXDvrkwg3/PNwTyW5E7cj
-feY8L2eWtg6AJUnIBu11wvfzkLiH3QKzHvO/SIZTGf5ihDsJ3aKEE9UNauTL3bVc
-CRA/5XcX13GJwHHj6LCoc3sL7mt8qV9HKY2AOZ88mpObzISZxgPpdKCfjsrdm63V
-6g==
------END CERTIFICATE-----)___";
-
- TTempFileHandle certificateFile;
-
- certificateFile.Write(certificateContent.data(), certificateContent.size());
-
- NActors::IActor* proxy = NHttp::CreateHttpProxy();
- NActors::TActorId proxyId = actorSystem.Register(proxy);
-
- THolder<NHttp::TEvHttpProxy::TEvAddListeningPort> add = MakeHolder<NHttp::TEvHttpProxy::TEvAddListeningPort>(port);
- ///////// https configuration
- add->Secure = true;
- add->CertificateFile = certificateFile.Name();
- add->PrivateKeyFile = certificateFile.Name();
- /////////
- actorSystem.Send(new NActors::IEventHandle(proxyId, TActorId(), add.Release()), 0, true);
- actorSystem.DispatchEvents();
-
- NActors::TActorId serverId = actorSystem.AllocateEdgeActor();
- actorSystem.Send(new NActors::IEventHandle(proxyId, serverId, new NHttp::TEvHttpProxy::TEvRegisterHandler("/test", serverId)), 0, true);
-
- NActors::TActorId clientId = actorSystem.AllocateEdgeActor();
- NHttp::THttpOutgoingRequestPtr httpRequest = NHttp::THttpOutgoingRequest::CreateRequestGet("https://[::1]:" + ToString(port) + "/test");
- actorSystem.Send(new NActors::IEventHandle(proxyId, clientId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest(httpRequest)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingRequest* request = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingRequest>(handle);
-
- UNIT_ASSERT_EQUAL(request->Request->URL, "/test");
-
- NHttp::THttpOutgoingResponsePtr httpResponse = request->Request->CreateResponseString("HTTP/1.1 200 Found\r\nConnection: Close\r\nTransfer-Encoding: chunked\r\n\r\n6\r\npassed\r\n0\r\n\r\n");
- actorSystem.Send(new NActors::IEventHandle(handle->Sender, serverId, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(httpResponse)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingResponse* response = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingResponse>(handle);
-
- UNIT_ASSERT_EQUAL(response->Response->Status, "200");
- UNIT_ASSERT_EQUAL(response->Response->Body, "passed");
- }
-
- /*Y_UNIT_TEST(AdvancedRunning) {
- THolder<NActors::TActorSystemSetup> setup = MakeHolder<NActors::TActorSystemSetup>();
- setup->NodeId = 1;
- setup->ExecutorsCount = 1;
- setup->Executors = new TAutoPtr<NActors::IExecutorPool>[1];
- setup->Executors[0] = new NActors::TBasicExecutorPool(0, 2, 10);
- setup->Scheduler = new NActors::TBasicSchedulerThread(NActors::TSchedulerConfig(512, 100));
- NActors::TActorSystem actorSystem(setup);
- actorSystem.Start();
- NHttp::THttpProxy* incomingProxy = new NHttp::THttpProxy();
- NActors::TActorId incomingProxyId = actorSystem.Register(incomingProxy);
- actorSystem.Send(incomingProxyId, new NHttp::TEvHttpProxy::TEvAddListeningPort(13337));
-
- NHttp::THttpProxy* outgoingProxy = new NHttp::THttpProxy();
- NActors::TActorId outgoingProxyId = actorSystem.Register(outgoingProxy);
-
- THolder<NHttp::THttpStaticStringRequest> httpRequest = MakeHolder<NHttp::THttpStaticStringRequest>("GET /test HTTP/1.1\r\n\r\n");
- actorSystem.Send(outgoingProxyId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest("[::]:13337", std::move(httpRequest)));
-
- Sleep(TDuration::Minutes(60));
- }*/
-
- Y_UNIT_TEST(TooLongHeader) {
- NActors::TTestActorRuntimeBase actorSystem;
- actorSystem.SetUseRealInterconnect();
- TPortManager portManager;
- TIpPort port = portManager.GetTcpPort();
- TAutoPtr<NActors::IEventHandle> handle;
- actorSystem.Initialize();
-
- NActors::IActor* proxy = NHttp::CreateHttpProxy();
- NActors::TActorId proxyId = actorSystem.Register(proxy);
- actorSystem.Send(new NActors::IEventHandle(proxyId, TActorId(), new NHttp::TEvHttpProxy::TEvAddListeningPort(port)), 0, true);
- actorSystem.DispatchEvents();
-
- NActors::TActorId serverId = actorSystem.AllocateEdgeActor();
- actorSystem.Send(new NActors::IEventHandle(proxyId, serverId, new NHttp::TEvHttpProxy::TEvRegisterHandler("/test", serverId)), 0, true);
-
- NActors::TActorId clientId = actorSystem.AllocateEdgeActor();
- NHttp::THttpOutgoingRequestPtr httpRequest = NHttp::THttpOutgoingRequest::CreateRequestGet("http://[::1]:" + ToString(port) + "/test");
- httpRequest->Set("Connection", "close");
- TString longHeader;
- longHeader.append(9000, 'X');
- httpRequest->Set(longHeader, "data");
- actorSystem.Send(new NActors::IEventHandle(proxyId, clientId, new NHttp::TEvHttpProxy::TEvHttpOutgoingRequest(httpRequest)), 0, true);
-
- NHttp::TEvHttpProxy::TEvHttpIncomingResponse* response = actorSystem.GrabEdgeEvent<NHttp::TEvHttpProxy::TEvHttpIncomingResponse>(handle);
-
- UNIT_ASSERT_EQUAL(response->Response->Status, "400");
- UNIT_ASSERT_EQUAL(response->Response->Body, "Invalid http header");
- }
-}
diff --git a/library/cpp/actors/http/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/http/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index f9c9afac61..0000000000
--- a/library/cpp/actors/http/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-http-ut)
-target_include_directories(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http
-)
-target_link_libraries(library-cpp-actors-http-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-http
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-http-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-http-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-http-ut
- TEST_TARGET
- library-cpp-actors-http-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-http-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-http-ut)
diff --git a/library/cpp/actors/http/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/http/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 99677acae5..0000000000
--- a/library/cpp/actors/http/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-http-ut)
-target_include_directories(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http
-)
-target_link_libraries(library-cpp-actors-http-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-http
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-http-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-http-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-http-ut
- TEST_TARGET
- library-cpp-actors-http-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-http-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-http-ut)
diff --git a/library/cpp/actors/http/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/http/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 8818e4418f..0000000000
--- a/library/cpp/actors/http/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-http-ut)
-target_include_directories(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http
-)
-target_link_libraries(library-cpp-actors-http-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-http
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-http-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-http-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-http-ut
- TEST_TARGET
- library-cpp-actors-http-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-http-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-http-ut)
diff --git a/library/cpp/actors/http/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/http/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 620f66ad00..0000000000
--- a/library/cpp/actors/http/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-http-ut)
-target_include_directories(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http
-)
-target_link_libraries(library-cpp-actors-http-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-http
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-http-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http/http_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-http-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-http-ut
- TEST_TARGET
- library-cpp-actors-http-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-http-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-http-ut)
diff --git a/library/cpp/actors/http/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/http/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 73603d626c..0000000000
--- a/library/cpp/actors/http/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,58 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-http-ut)
-target_include_directories(library-cpp-actors-http-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/http
-)
-target_link_libraries(library-cpp-actors-http-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-http
- cpp-actors-testlib
-)
-set_property(
- TARGET
- library-cpp-actors-http-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-http-ut
- TEST_TARGET
- library-cpp-actors-http-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-http-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-http-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-http-ut)
diff --git a/library/cpp/actors/http/ut/ya.make b/library/cpp/actors/http/ut/ya.make
deleted file mode 100644
index 8404308053..0000000000
--- a/library/cpp/actors/http/ut/ya.make
+++ /dev/null
@@ -1,16 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/http)
-
-SIZE(SMALL)
-
-PEERDIR(
- library/cpp/actors/testlib
-)
-
-IF (NOT OS_WINDOWS)
-SRCS(
- http_ut.cpp
-)
-ELSE()
-ENDIF()
-
-END()
diff --git a/library/cpp/actors/http/ya.make b/library/cpp/actors/http/ya.make
deleted file mode 100644
index 9b66988ea9..0000000000
--- a/library/cpp/actors/http/ya.make
+++ /dev/null
@@ -1,36 +0,0 @@
-LIBRARY()
-
-SRCS(
- http_cache.cpp
- http_cache.h
- http_compress.cpp
- http_config.h
- http_proxy_acceptor.cpp
- http_proxy_incoming.cpp
- http_proxy_outgoing.cpp
- http_proxy_sock_impl.h
- http_proxy_sock64.h
- http_proxy_ssl.h
- http_proxy.cpp
- http_proxy.h
- http_static.cpp
- http_static.h
- http.cpp
- http.h
-)
-
-PEERDIR(
- contrib/libs/openssl
- contrib/libs/zlib
- library/cpp/actors/core
- library/cpp/actors/interconnect
- library/cpp/dns
- library/cpp/monlib/metrics
- library/cpp/string_utils/quote
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
diff --git a/library/cpp/actors/interconnect/CMakeLists.darwin-arm64.txt b/library/cpp/actors/interconnect/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 92e1fec219..0000000000
--- a/library/cpp/actors/interconnect/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(mock)
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-add_subdirectory(ut_huge_cluster)
-
-add_library(cpp-actors-interconnect)
-target_link_libraries(cpp-actors-interconnect PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-libc_compat
- contrib-libs-openssl
- contrib-libs-xxhash
- cpp-actors-core
- cpp-actors-dnscachelib
- cpp-actors-dnsresolver
- cpp-actors-helpers
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- cpp-actors-wilson
- cpp-digest-crc32c
- library-cpp-json
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- cpp-monlib-metrics
- service-pages-resources
- service-pages-tablesorter
- cpp-openssl-init
- library-cpp-packedtypes
-)
-target_sources(cpp-actors-interconnect PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_address.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_channel.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_counters.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_handshake.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_mon.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_resolve.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_stream.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/packet.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/types.cpp
-)
diff --git a/library/cpp/actors/interconnect/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/interconnect/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 92e1fec219..0000000000
--- a/library/cpp/actors/interconnect/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(mock)
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-add_subdirectory(ut_huge_cluster)
-
-add_library(cpp-actors-interconnect)
-target_link_libraries(cpp-actors-interconnect PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-libc_compat
- contrib-libs-openssl
- contrib-libs-xxhash
- cpp-actors-core
- cpp-actors-dnscachelib
- cpp-actors-dnsresolver
- cpp-actors-helpers
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- cpp-actors-wilson
- cpp-digest-crc32c
- library-cpp-json
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- cpp-monlib-metrics
- service-pages-resources
- service-pages-tablesorter
- cpp-openssl-init
- library-cpp-packedtypes
-)
-target_sources(cpp-actors-interconnect PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_address.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_channel.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_counters.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_handshake.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_mon.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_resolve.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_stream.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/packet.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/types.cpp
-)
diff --git a/library/cpp/actors/interconnect/CMakeLists.linux-aarch64.txt b/library/cpp/actors/interconnect/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 9fb3219fcf..0000000000
--- a/library/cpp/actors/interconnect/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(mock)
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-add_subdirectory(ut_huge_cluster)
-
-add_library(cpp-actors-interconnect)
-target_link_libraries(cpp-actors-interconnect PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-libc_compat
- contrib-libs-openssl
- contrib-libs-xxhash
- cpp-actors-core
- cpp-actors-dnscachelib
- cpp-actors-dnsresolver
- cpp-actors-helpers
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- cpp-actors-wilson
- cpp-digest-crc32c
- library-cpp-json
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- cpp-monlib-metrics
- service-pages-resources
- service-pages-tablesorter
- cpp-openssl-init
- library-cpp-packedtypes
-)
-target_sources(cpp-actors-interconnect PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_address.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_channel.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_counters.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_handshake.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_mon.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_resolve.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_stream.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/packet.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/types.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_epoll.cpp
-)
diff --git a/library/cpp/actors/interconnect/CMakeLists.linux-x86_64.txt b/library/cpp/actors/interconnect/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 9fb3219fcf..0000000000
--- a/library/cpp/actors/interconnect/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(mock)
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-add_subdirectory(ut_huge_cluster)
-
-add_library(cpp-actors-interconnect)
-target_link_libraries(cpp-actors-interconnect PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-libc_compat
- contrib-libs-openssl
- contrib-libs-xxhash
- cpp-actors-core
- cpp-actors-dnscachelib
- cpp-actors-dnsresolver
- cpp-actors-helpers
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- cpp-actors-wilson
- cpp-digest-crc32c
- library-cpp-json
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- cpp-monlib-metrics
- service-pages-resources
- service-pages-tablesorter
- cpp-openssl-init
- library-cpp-packedtypes
-)
-target_sources(cpp-actors-interconnect PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_address.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_channel.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_counters.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_handshake.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_mon.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_resolve.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_stream.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/packet.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/types.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_epoll.cpp
-)
diff --git a/library/cpp/actors/interconnect/CMakeLists.windows-x86_64.txt b/library/cpp/actors/interconnect/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 92e1fec219..0000000000
--- a/library/cpp/actors/interconnect/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(mock)
-add_subdirectory(ut)
-add_subdirectory(ut_fat)
-add_subdirectory(ut_huge_cluster)
-
-add_library(cpp-actors-interconnect)
-target_link_libraries(cpp-actors-interconnect PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-libc_compat
- contrib-libs-openssl
- contrib-libs-xxhash
- cpp-actors-core
- cpp-actors-dnscachelib
- cpp-actors-dnsresolver
- cpp-actors-helpers
- cpp-actors-prof
- cpp-actors-protos
- cpp-actors-util
- cpp-actors-wilson
- cpp-digest-crc32c
- library-cpp-json
- library-cpp-lwtrace
- cpp-monlib-dynamic_counters
- cpp-monlib-metrics
- service-pages-resources
- service-pages-tablesorter
- cpp-openssl-init
- library-cpp-packedtypes
-)
-target_sources(cpp-actors-interconnect PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_address.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_channel.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_counters.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_handshake.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_mon.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_resolve.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_stream.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/load.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/packet.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_actor.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/types.cpp
-)
diff --git a/library/cpp/actors/interconnect/channel_scheduler.h b/library/cpp/actors/interconnect/channel_scheduler.h
deleted file mode 100644
index b0eac9debc..0000000000
--- a/library/cpp/actors/interconnect/channel_scheduler.h
+++ /dev/null
@@ -1,116 +0,0 @@
-#pragma once
-
-#include "interconnect_channel.h"
-#include "event_holder_pool.h"
-
-#include <memory>
-
-namespace NActors {
-
- class TChannelScheduler {
- const ui32 PeerNodeId;
- std::array<std::optional<TEventOutputChannel>, 16> ChannelArray;
- THashMap<ui16, TEventOutputChannel> ChannelMap;
- std::shared_ptr<IInterconnectMetrics> Metrics;
- TEventHolderPool& Pool;
- const ui32 MaxSerializedEventSize;
- const TSessionParams Params;
-
- struct THeapItem {
- TEventOutputChannel *Channel;
- ui64 WeightConsumed = 0;
-
- friend bool operator <(const THeapItem& x, const THeapItem& y) {
- return x.WeightConsumed > y.WeightConsumed;
- }
- };
-
- std::vector<THeapItem> Heap;
-
- public:
- TChannelScheduler(ui32 peerNodeId, const TChannelsConfig& predefinedChannels,
- std::shared_ptr<IInterconnectMetrics> metrics, TEventHolderPool& pool, ui32 maxSerializedEventSize,
- TSessionParams params)
- : PeerNodeId(peerNodeId)
- , Metrics(std::move(metrics))
- , Pool(pool)
- , MaxSerializedEventSize(maxSerializedEventSize)
- , Params(std::move(params))
- {
- for (const auto& item : predefinedChannels) {
- GetOutputChannel(item.first);
- }
- }
-
- TEventOutputChannel *PickChannelWithLeastConsumedWeight() {
- Y_ABORT_UNLESS(!Heap.empty());
- return Heap.front().Channel;
- }
-
- void AddToHeap(TEventOutputChannel& channel, ui64 counter) {
- Y_DEBUG_ABORT_UNLESS(channel.IsWorking());
- ui64 weight = channel.WeightConsumedOnPause;
- weight -= Min(weight, counter - channel.EqualizeCounterOnPause);
- Heap.push_back(THeapItem{&channel, weight});
- std::push_heap(Heap.begin(), Heap.end());
- }
-
- void FinishPick(ui64 weightConsumed, ui64 counter) {
- std::pop_heap(Heap.begin(), Heap.end());
- auto& item = Heap.back();
- item.WeightConsumed += weightConsumed;
- if (item.Channel->IsWorking()) { // reschedule
- std::push_heap(Heap.begin(), Heap.end());
- } else { // remove from heap
- item.Channel->EqualizeCounterOnPause = counter;
- item.Channel->WeightConsumedOnPause = item.WeightConsumed;
- Heap.pop_back();
- }
- }
-
- TEventOutputChannel& GetOutputChannel(ui16 channel) {
- if (channel < ChannelArray.size()) {
- auto& res = ChannelArray[channel];
- if (Y_UNLIKELY(!res)) {
- res.emplace(Pool, channel, PeerNodeId, MaxSerializedEventSize, Metrics,
- Params);
- }
- return *res;
- } else {
- auto it = ChannelMap.find(channel);
- if (Y_UNLIKELY(it == ChannelMap.end())) {
- it = ChannelMap.emplace(std::piecewise_construct, std::forward_as_tuple(channel),
- std::forward_as_tuple(Pool, channel, PeerNodeId, MaxSerializedEventSize,
- Metrics, Params)).first;
- }
- return it->second;
- }
- }
-
- ui64 Equalize() {
- if (Heap.empty()) {
- return 0; // nothing to do here -- no working channels
- }
-
- // find the minimum consumed weight among working channels and then adjust weights
- const ui64 min = Heap.front().WeightConsumed;
- for (THeapItem& item : Heap) {
- item.WeightConsumed -= min;
- }
- return min;
- }
-
- template<typename TCallback>
- void ForEach(TCallback&& callback) {
- for (auto& channel : ChannelArray) {
- if (channel) {
- callback(*channel);
- }
- }
- for (auto& [id, channel] : ChannelMap) {
- callback(channel);
- }
- }
- };
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/event_filter.h b/library/cpp/actors/interconnect/event_filter.h
deleted file mode 100644
index 47dabf5f16..0000000000
--- a/library/cpp/actors/interconnect/event_filter.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/event.h>
-
-namespace NActors {
-
- enum class ENodeClass {
- SYSTEM,
- LOCAL_TENANT,
- PEER_TENANT,
- COUNT
- };
-
- class TEventFilter : TNonCopyable {
- using TRouteMask = ui16;
-
- TVector<TVector<TRouteMask>> ScopeRoutes;
-
- public:
- TEventFilter()
- : ScopeRoutes(65536)
- {}
-
- void RegisterEvent(ui32 type, TRouteMask routes) {
- auto& evSpaceIndex = ScopeRoutes[type >> 16];
- const ui16 subtype = type & 65535;
- size_t size = (subtype + 512) & ~511;
- if (evSpaceIndex.size() < size) {
- evSpaceIndex.resize(size);
- }
- evSpaceIndex[subtype] = routes;
- }
-
- bool CheckIncomingEvent(const IEventHandle& ev, const TScopeId& localScopeId) const {
- TRouteMask routes = 0;
- if (const auto& evSpaceIndex = ScopeRoutes[ev.Type >> 16]) {
- const ui16 subtype = ev.Type & 65535;
- routes = subtype < evSpaceIndex.size() ? evSpaceIndex[subtype] : 0;
- } else {
- routes = ~TRouteMask(); // allow unfilled event spaces by default
- }
- return routes & MakeRouteMask(GetNodeClass(ev.OriginScopeId, localScopeId), GetNodeClass(localScopeId, ev.OriginScopeId));
- }
-
- static ENodeClass GetNodeClass(const TScopeId& scopeId, const TScopeId& localScopeId) {
- if (scopeId.first == 0) {
- // system scope, or null scope
- return scopeId.second ? ENodeClass::SYSTEM : ENodeClass::COUNT;
- } else if (scopeId == localScopeId) {
- return ENodeClass::LOCAL_TENANT;
- } else {
- return ENodeClass::PEER_TENANT;
- }
- }
-
- static TRouteMask MakeRouteMask(ENodeClass from, ENodeClass to) {
- if (from == ENodeClass::COUNT || to == ENodeClass::COUNT) {
- return 0;
- }
- return 1U << (static_cast<unsigned>(from) * static_cast<unsigned>(ENodeClass::COUNT) + static_cast<unsigned>(to));
- }
-
- static TRouteMask MakeRouteMask(std::initializer_list<std::pair<ENodeClass, ENodeClass>> items) {
- TRouteMask mask = 0;
- for (const auto& p : items) {
- mask |= MakeRouteMask(p.first, p.second);
- }
- return mask;
- }
- };
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/event_holder_pool.h b/library/cpp/actors/interconnect/event_holder_pool.h
deleted file mode 100644
index 0afa1d7a7c..0000000000
--- a/library/cpp/actors/interconnect/event_holder_pool.h
+++ /dev/null
@@ -1,127 +0,0 @@
-#pragma once
-
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-
-#include "packet.h"
-
-namespace NActors {
- struct TEvFreeItems : TEventLocal<TEvFreeItems, EventSpaceBegin(TEvents::ES_PRIVATE)> {
- static constexpr size_t MaxEvents = 256;
-
- std::list<TEventHolder> FreeQueue;
- TStackVec<THolder<IEventBase>, MaxEvents> Events;
- TStackVec<THolder<TEventSerializedData>, MaxEvents> Buffers;
- std::shared_ptr<std::atomic<TAtomicBase>> Counter;
- ui64 NumBytes = 0;
-
- ~TEvFreeItems() {
- if (Counter) {
- TAtomicBase res = Counter->fetch_sub(NumBytes) - NumBytes;
- Y_ABORT_UNLESS(res >= 0);
- }
- }
-
- bool GetInLineForDestruction(const TIntrusivePtr<TInterconnectProxyCommon>& common) {
- Y_ABORT_UNLESS(!Counter);
- const auto& counter = common->DestructorQueueSize;
- const auto& max = common->MaxDestructorQueueSize;
- if (counter && (TAtomicBase)(counter->fetch_add(NumBytes) + NumBytes) > max) {
- counter->fetch_sub(NumBytes);
- return false;
- }
- Counter = counter;
- return true;
- }
- };
-
- class TEventHolderPool {
- using TDestroyCallback = std::function<void(THolder<IEventBase>)>;
-
- static constexpr size_t MaxFreeQueueItems = 32;
- static constexpr size_t FreeQueueTrimThreshold = MaxFreeQueueItems * 2;
- static constexpr ui64 MaxBytesPerMessage = 10 * 1024 * 1024;
-
- TIntrusivePtr<TInterconnectProxyCommon> Common;
- std::list<TEventHolder> Cache;
- THolder<TEvFreeItems> PendingFreeEvent;
- TDestroyCallback DestroyCallback;
-
- public:
- TEventHolderPool(TIntrusivePtr<TInterconnectProxyCommon> common,
- TDestroyCallback destroyCallback)
- : Common(std::move(common))
- , DestroyCallback(std::move(destroyCallback))
- {}
-
- TEventHolder& Allocate(std::list<TEventHolder>& queue) {
- if (Cache.empty()) {
- queue.emplace_back();
- } else {
- queue.splice(queue.end(), Cache, Cache.begin());
- }
- return queue.back();
- }
-
- void Release(std::list<TEventHolder>& queue) {
- for (auto it = queue.begin(); it != queue.end(); ) {
- Release(queue, it++);
- }
- }
-
- void Release(std::list<TEventHolder>& queue, std::list<TEventHolder>::iterator event) {
- bool trim = false;
-
- // release held event, if any
- if (THolder<IEventBase> ev = std::move(event->Event)) {
- auto p = GetPendingEvent();
- p->NumBytes += event->EventSerializedSize;
- auto& events = p->Events;
- events.push_back(std::move(ev));
- trim = trim || events.size() >= TEvFreeItems::MaxEvents || p->NumBytes >= MaxBytesPerMessage;
- }
-
- // release buffer, if any
- if (event->Buffer && event->Buffer.RefCount() == 1) {
- auto p = GetPendingEvent();
- p->NumBytes += event->EventSerializedSize;
- auto& buffers = p->Buffers;
- buffers.emplace_back(event->Buffer.Release());
- trim = trim || buffers.size() >= TEvFreeItems::MaxEvents || p->NumBytes >= MaxBytesPerMessage;
- }
-
- // free event and trim the cache if its size is exceeded
- event->Clear();
- Cache.splice(Cache.end(), queue, event);
- if (Cache.size() >= FreeQueueTrimThreshold) {
- auto& freeQueue = GetPendingEvent()->FreeQueue;
- auto it = Cache.begin();
- std::advance(it, Cache.size() - MaxFreeQueueItems);
- freeQueue.splice(freeQueue.end(), Cache, Cache.begin(), it);
- trim = true;
- }
-
- // release items if we have hit the limit
- if (trim) {
- Trim();
- }
- }
-
- void Trim() {
- if (auto ev = std::move(PendingFreeEvent); ev && ev->GetInLineForDestruction(Common)) {
- DestroyCallback(std::move(ev));
- }
-
- // ensure it is dropped
- PendingFreeEvent.Reset();
- }
-
- private:
- TEvFreeItems* GetPendingEvent() {
- if (!PendingFreeEvent) {
- PendingFreeEvent.Reset(new TEvFreeItems);
- }
- return PendingFreeEvent.Get();
- }
- };
-
-}
diff --git a/library/cpp/actors/interconnect/events_local.h b/library/cpp/actors/interconnect/events_local.h
deleted file mode 100644
index 465899c335..0000000000
--- a/library/cpp/actors/interconnect/events_local.h
+++ /dev/null
@@ -1,438 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/event_local.h>
-#include <library/cpp/actors/protos/interconnect.pb.h>
-#include <util/generic/deque.h>
-#include <util/network/address.h>
-
-#include "interconnect_stream.h"
-#include "types.h"
-
-namespace NActors {
- enum class ENetwork : ui32 {
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // local messages
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- Start = EventSpaceBegin(TEvents::ES_INTERCONNECT_TCP),
-
- SocketReadyRead = Start,
- SocketReadyWrite,
- SocketError,
- Connect,
- Disconnect,
- IncomingConnection,
- HandshakeAsk,
- HandshakeAck,
- HandshakeNak,
- HandshakeDone,
- HandshakeFail,
- Kick,
- Flush,
- NodeInfo,
- BunchOfEventsToDestroy,
- HandshakeRequest,
- HandshakeReplyOK,
- HandshakeReplyError,
- ResolveAddress,
- AddressInfo,
- ResolveError,
- HTTPStreamStatus,
- HTTPSendContent,
- ConnectProtocolWakeup,
- HTTPProtocolRetry,
- EvPollerRegister,
- EvPollerRegisterResult,
- EvPollerReady,
- EvUpdateFromInputSession,
- EvConfirmUpdate,
- EvSessionBufferSizeRequest,
- EvSessionBufferSizeResponse,
- EvProcessPingRequest,
- EvGetSecureSocket,
- EvSecureSocket,
- HandshakeBrokerTake,
- HandshakeBrokerFree,
- HandshakeBrokerPermit,
-
- // external data channel messages
- EvSubscribeForConnection,
- EvReportConnection,
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // nonlocal messages; their indices must be preserved in order to work properly while doing rolling update
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- // interconnect load test message
- EvLoadMessage = Start + 256,
- };
-
- struct TEvSocketReadyRead: public TEventLocal<TEvSocketReadyRead, ui32(ENetwork::SocketReadyRead)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSocketReadyRead, "Network: TEvSocketReadyRead")
- };
-
- struct TEvSocketReadyWrite: public TEventLocal<TEvSocketReadyWrite, ui32(ENetwork::SocketReadyWrite)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSocketReadyWrite, "Network: TEvSocketReadyWrite")
- };
-
- struct TEvSocketError: public TEventLocal<TEvSocketError, ui32(ENetwork::SocketError)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSocketError, ::strerror(Error))
- TString GetReason() const {
- return ::strerror(Error);
- }
- const int Error;
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
-
- TEvSocketError(int error, TIntrusivePtr<NInterconnect::TStreamSocket> sock)
- : Error(error)
- , Socket(std::move(sock))
- {
- }
- };
-
- struct TEvSocketConnect: public TEventLocal<TEvSocketConnect, ui32(ENetwork::Connect)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSocketConnect, "Network: TEvSocketConnect")
- };
-
- struct TEvSocketDisconnect: public TEventLocal<TEvSocketDisconnect, ui32(ENetwork::Disconnect)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSocketDisconnect, "Network: TEvSocketDisconnect")
- TDisconnectReason Reason;
-
- TEvSocketDisconnect(TDisconnectReason reason)
- : Reason(std::move(reason))
- {
- }
- };
-
- struct TEvHandshakeBrokerTake: TEventLocal<TEvHandshakeBrokerTake, ui32(ENetwork::HandshakeBrokerTake)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeBrokerTake, "Network: TEvHandshakeBrokerTake")
- };
-
- struct TEvHandshakeBrokerFree: TEventLocal<TEvHandshakeBrokerFree, ui32(ENetwork::HandshakeBrokerFree)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeBrokerFree, "Network: TEvHandshakeBrokerFree")
- };
-
- struct TEvHandshakeBrokerPermit: TEventLocal<TEvHandshakeBrokerPermit, ui32(ENetwork::HandshakeBrokerPermit)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeBrokerPermit, "Network: TEvHandshakeBrokerPermit")
- };
-
- struct TEvHandshakeAsk: public TEventLocal<TEvHandshakeAsk, ui32(ENetwork::HandshakeAsk)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeAsk, "Network: TEvHandshakeAsk")
- TEvHandshakeAsk(const TActorId& self,
- const TActorId& peer,
- ui64 counter)
- : Self(self)
- , Peer(peer)
- , Counter(counter)
- {
- }
- const TActorId Self;
- const TActorId Peer;
- const ui64 Counter;
- };
-
- struct TEvHandshakeAck: public TEventLocal<TEvHandshakeAck, ui32(ENetwork::HandshakeAck)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeAck, "Network: TEvHandshakeAck")
-
- TEvHandshakeAck(const TActorId& self, ui64 nextPacket, TSessionParams params)
- : Self(self)
- , NextPacket(nextPacket)
- , Params(std::move(params))
- {}
-
- const TActorId Self;
- const ui64 NextPacket;
- const TSessionParams Params;
- };
-
- struct TEvHandshakeNak : TEventLocal<TEvHandshakeNak, ui32(ENetwork::HandshakeNak)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSocketReadyRead, "Network: TEvHandshakeNak")
- };
-
- struct TEvHandshakeRequest
- : public TEventLocal<TEvHandshakeRequest,
- ui32(ENetwork::HandshakeRequest)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeRequest,
- "Network: TEvHandshakeRequest")
-
- NActorsInterconnect::THandshakeRequest Record;
- };
-
- struct TEvHandshakeReplyOK
- : public TEventLocal<TEvHandshakeReplyOK,
- ui32(ENetwork::HandshakeReplyOK)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeReplyOK,
- "Network: TEvHandshakeReplyOK")
-
- NActorsInterconnect::THandshakeReply Record;
- };
-
- struct TEvHandshakeReplyError
- : public TEventLocal<TEvHandshakeReplyError,
- ui32(ENetwork::HandshakeReplyError)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeReplyError,
- "Network: TEvHandshakeReplyError")
-
- TEvHandshakeReplyError(TString error) {
- Record.SetErrorExplaination(error);
- }
-
- NActorsInterconnect::THandshakeReply Record;
- };
-
- struct TEvIncomingConnection: public TEventLocal<TEvIncomingConnection, ui32(ENetwork::IncomingConnection)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvIncomingConnection, "Network: TEvIncomingConnection")
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
- NInterconnect::TAddress Address;
-
- TEvIncomingConnection(TIntrusivePtr<NInterconnect::TStreamSocket> socket, NInterconnect::TAddress address)
- : Socket(std::move(socket))
- , Address(std::move(address))
- {}
- };
-
- struct TEvHandshakeDone: public TEventLocal<TEvHandshakeDone, ui32(ENetwork::HandshakeDone)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeDone, "Network: TEvHandshakeDone")
-
- TEvHandshakeDone(
- TIntrusivePtr<NInterconnect::TStreamSocket> socket,
- const TActorId& peer,
- const TActorId& self,
- ui64 nextPacket,
- TAutoPtr<TProgramInfo>&& programInfo,
- TSessionParams params,
- TIntrusivePtr<NInterconnect::TStreamSocket> xdcSocket)
- : Socket(std::move(socket))
- , Peer(peer)
- , Self(self)
- , NextPacket(nextPacket)
- , ProgramInfo(std::move(programInfo))
- , Params(std::move(params))
- , XdcSocket(std::move(xdcSocket))
- {
- }
-
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
- const TActorId Peer;
- const TActorId Self;
- const ui64 NextPacket;
- TAutoPtr<TProgramInfo> ProgramInfo;
- const TSessionParams Params;
- TIntrusivePtr<NInterconnect::TStreamSocket> XdcSocket;
- };
-
- struct TEvHandshakeFail: public TEventLocal<TEvHandshakeFail, ui32(ENetwork::HandshakeFail)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHandshakeFail, "Network: TEvHandshakeFail")
-
- enum EnumHandshakeFail {
- HANDSHAKE_FAIL_TRANSIENT,
- HANDSHAKE_FAIL_PERMANENT,
- HANDSHAKE_FAIL_SESSION_MISMATCH,
- };
-
- TEvHandshakeFail(EnumHandshakeFail temporary, TString explanation)
- : Temporary(temporary)
- , Explanation(std::move(explanation))
- {
- }
-
- const EnumHandshakeFail Temporary;
- const TString Explanation;
- };
-
- struct TEvKick: public TEventLocal<TEvKick, ui32(ENetwork::Kick)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvKick, "Network: TEvKick")
- };
-
- struct TEvFlush: public TEventLocal<TEvFlush, ui32(ENetwork::Flush)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvFlush, "Network: TEvFlush")
- };
-
- struct TEvLocalNodeInfo
- : public TEventLocal<TEvLocalNodeInfo, ui32(ENetwork::NodeInfo)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvLocalNodeInfo, "Network: TEvLocalNodeInfo")
-
- ui32 NodeId;
- std::vector<NInterconnect::TAddress> Addresses;
- };
-
- struct TEvBunchOfEventsToDestroy : TEventLocal<TEvBunchOfEventsToDestroy, ui32(ENetwork::BunchOfEventsToDestroy)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvBunchOfEventsToDestroy,
- "Network: TEvBunchOfEventsToDestroy")
-
- TEvBunchOfEventsToDestroy(TDeque<TAutoPtr<IEventBase>> events)
- : Events(std::move(events))
- {
- }
-
- TDeque<TAutoPtr<IEventBase>> Events;
- };
-
- struct TEvResolveAddress
- : public TEventLocal<TEvResolveAddress, ui32(ENetwork::ResolveAddress)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvResolveAddress, "Network: TEvResolveAddress")
-
- TString Address;
- ui16 Port;
- };
-
- struct TEvAddressInfo
- : public TEventLocal<TEvAddressInfo, ui32(ENetwork::AddressInfo)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvAddressInfo, "Network: TEvAddressInfo")
-
- NAddr::IRemoteAddrPtr Address;
- };
-
- struct TEvResolveError
- : public TEventLocal<TEvResolveError, ui32(ENetwork::ResolveError)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvResolveError, "Network: TEvResolveError")
-
- TString Explain;
- TString Host;
- };
-
- struct TEvHTTPStreamStatus
- : public TEventLocal<TEvHTTPStreamStatus, ui32(ENetwork::HTTPStreamStatus)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHTTPStreamStatus,
- "Network: TEvHTTPStreamStatus")
- enum EStatus {
- READY,
- COMPLETE,
- ERROR,
- };
-
- EStatus Status;
- TString Error;
- TString HttpHeaders;
- };
-
- struct TEvHTTPSendContent
- : public TEventLocal<TEvHTTPSendContent, ui32(ENetwork::HTTPSendContent)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHTTPSendContent, "Network: TEvHTTPSendContent")
-
- const char* Data;
- size_t Len;
- bool Last;
- };
-
- struct TEvConnectWakeup
- : public TEventLocal<TEvConnectWakeup,
- ui32(ENetwork::ConnectProtocolWakeup)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvConnectWakeup, "Protocols: TEvConnectWakeup")
- };
-
- struct TEvHTTPProtocolRetry
- : public TEventLocal<TEvHTTPProtocolRetry,
- ui32(ENetwork::HTTPProtocolRetry)> {
- DEFINE_SIMPLE_LOCAL_EVENT(TEvHTTPProtocolRetry,
- "Protocols: TEvHTTPProtocolRetry")
- };
-
- struct TEvLoadMessage
- : TEventPB<TEvLoadMessage, NActorsInterconnect::TEvLoadMessage, static_cast<ui32>(ENetwork::EvLoadMessage)> {
- TEvLoadMessage() = default;
-
- template <typename TContainer>
- TEvLoadMessage(const TContainer& route, const TString& id, const TString* payload) {
- for (const TActorId& actorId : route) {
- auto* hop = Record.AddHops();
- if (actorId) {
- ActorIdToProto(actorId, hop->MutableNextHop());
- }
- }
- Record.SetId(id);
- if (payload) {
- Record.SetPayload(*payload);
- }
- }
-
- template <typename TContainer>
- TEvLoadMessage(const TContainer& route, const TString& id, TRope&& payload) {
- for (const TActorId& actorId : route) {
- auto* hop = Record.AddHops();
- if (actorId) {
- ActorIdToProto(actorId, hop->MutableNextHop());
- }
- }
- Record.SetId(id);
- AddPayload(std::move(payload));
- }
- };
-
- struct TEvUpdateFromInputSession : TEventLocal<TEvUpdateFromInputSession, static_cast<ui32>(ENetwork::EvUpdateFromInputSession)> {
- ui64 ConfirmedByInput; // latest Confirm value from processed input packet
- ui64 NumDataBytes;
- TDuration Ping;
-
- TEvUpdateFromInputSession(ui64 confirmedByInput, ui64 numDataBytes, TDuration ping)
- : ConfirmedByInput(confirmedByInput)
- , NumDataBytes(numDataBytes)
- , Ping(ping)
- {
- }
- };
-
- struct TEvConfirmUpdate : TEventLocal<TEvConfirmUpdate, static_cast<ui32>(ENetwork::EvConfirmUpdate)>
- {};
-
- struct TEvSessionBufferSizeRequest : TEventLocal<TEvSessionBufferSizeRequest, static_cast<ui32>(ENetwork::EvSessionBufferSizeRequest)> {
- //DEFINE_SIMPLE_LOCAL_EVENT(TEvSessionBufferSizeRequest, "Session: TEvSessionBufferSizeRequest")
- DEFINE_SIMPLE_LOCAL_EVENT(TEvSessionBufferSizeRequest, "Network: TEvSessionBufferSizeRequest");
- };
-
- struct TEvSessionBufferSizeResponse : TEventLocal<TEvSessionBufferSizeResponse, static_cast<ui32>(ENetwork::EvSessionBufferSizeResponse)> {
- TEvSessionBufferSizeResponse(const TActorId& sessionId, ui64 outputBufferSize)
- : SessionID(sessionId)
- , BufferSize(outputBufferSize)
- {
- }
-
- TActorId SessionID;
- ui64 BufferSize;
- };
-
- struct TEvProcessPingRequest : TEventLocal<TEvProcessPingRequest, static_cast<ui32>(ENetwork::EvProcessPingRequest)> {
- const ui64 Payload;
-
- TEvProcessPingRequest(ui64 payload)
- : Payload(payload)
- {}
- };
-
- struct TEvGetSecureSocket : TEventLocal<TEvGetSecureSocket, (ui32)ENetwork::EvGetSecureSocket> {
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
-
- TEvGetSecureSocket(TIntrusivePtr<NInterconnect::TStreamSocket> socket)
- : Socket(std::move(socket))
- {}
- };
-
- struct TEvSecureSocket : TEventLocal<TEvSecureSocket, (ui32)ENetwork::EvSecureSocket> {
- TIntrusivePtr<NInterconnect::TSecureSocket> Socket;
-
- TEvSecureSocket(TIntrusivePtr<NInterconnect::TSecureSocket> socket)
- : Socket(std::move(socket))
- {}
- };
-
- struct TEvSubscribeForConnection : TEventLocal<TEvSubscribeForConnection, (ui32)ENetwork::EvSubscribeForConnection> {
- TString HandshakeId;
- bool Subscribe;
-
- TEvSubscribeForConnection(TString handshakeId, bool subscribe)
- : HandshakeId(std::move(handshakeId))
- , Subscribe(subscribe)
- {}
- };
-
- struct TEvReportConnection : TEventLocal<TEvReportConnection, (ui32)ENetwork::EvReportConnection> {
- TString HandshakeId;
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
-
- TEvReportConnection(TString handshakeId, TIntrusivePtr<NInterconnect::TStreamSocket> socket)
- : HandshakeId(std::move(handshakeId))
- , Socket(std::move(socket))
- {}
- };
-}
diff --git a/library/cpp/actors/interconnect/handshake_broker.h b/library/cpp/actors/interconnect/handshake_broker.h
deleted file mode 100644
index c850320bd2..0000000000
--- a/library/cpp/actors/interconnect/handshake_broker.h
+++ /dev/null
@@ -1,156 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/interconnect/events_local.h>
-
-#include <deque>
-
-namespace NActors {
- class TBrokerLeaseHolder {
- public:
- TBrokerLeaseHolder(TActorId waiterId, TActorId brokerId)
- : WaiterId(waiterId)
- , BrokerId(brokerId) {
- if (TActivationContext::Send(new IEventHandle(BrokerId, WaiterId, new TEvHandshakeBrokerTake()))) {
- LeaseRequested = true;
- }
- }
-
- ~TBrokerLeaseHolder() {
- if (LeaseRequested) {
- TActivationContext::Send(new IEventHandle(BrokerId, WaiterId, new TEvHandshakeBrokerFree()));
- }
- }
-
- bool IsLeaseRequested() {
- return LeaseRequested;
- }
-
- void ForgetLease() {
- // only call when TDtorException was caught
- LeaseRequested = false;
- }
-
- private:
- TActorId WaiterId;
- TActorId BrokerId;
- bool LeaseRequested = false;
- };
-
- class THandshakeBroker : public TActor<THandshakeBroker> {
- private:
- enum class ESelectionStrategy {
- FIFO = 0,
- LIFO,
- Random,
- };
-
- private:
- void PermitNext() {
- if (Capacity == 0 && !Waiters.empty()) {
- TActorId waiter;
-
- switch (SelectionStrategy) {
- case ESelectionStrategy::FIFO:
- waiter = Waiters.front();
- Waiters.pop_front();
- SelectionStrategy = ESelectionStrategy::LIFO;
- break;
-
- case ESelectionStrategy::LIFO:
- waiter = Waiters.back();
- Waiters.pop_back();
- SelectionStrategy = ESelectionStrategy::Random;
- break;
-
- case ESelectionStrategy::Random: {
- const auto it = WaiterLookup.begin();
- waiter = it->first;
- Waiters.erase(it->second);
- SelectionStrategy = ESelectionStrategy::FIFO;
- break;
- }
-
- default:
- Y_ABORT("Unimplimented selection strategy");
- }
-
- const size_t n = WaiterLookup.erase(waiter);
- Y_ABORT_UNLESS(n == 1);
-
- Send(waiter, new TEvHandshakeBrokerPermit());
- PermittedLeases.insert(waiter);
- } else {
- Capacity += 1;
- }
- }
-
- private:
- using TWaiters = std::list<TActorId>;
- TWaiters Waiters;
- std::unordered_map<TActorId, TWaiters::iterator> WaiterLookup;
- std::unordered_set<TActorId> PermittedLeases;
-
- ESelectionStrategy SelectionStrategy = ESelectionStrategy::FIFO;
-
- ui32 Capacity;
-
- void Handle(TEvHandshakeBrokerTake::TPtr &ev) {
- const TActorId sender = ev->Sender;
- if (Capacity > 0) {
- Capacity -= 1;
- PermittedLeases.insert(sender);
- Send(sender, new TEvHandshakeBrokerPermit());
- } else {
- const auto [it, inserted] = WaiterLookup.try_emplace(sender,
- Waiters.insert(Waiters.end(), sender));
- Y_ABORT_UNLESS(inserted);
- }
- }
-
- void Handle(TEvHandshakeBrokerFree::TPtr& ev) {
- const TActorId sender = ev->Sender;
- if (!PermittedLeases.erase(sender)) {
- // Lease was not permitted yet, remove sender from Waiters queue
- const auto it = WaiterLookup.find(sender);
- Y_ABORT_UNLESS(it != WaiterLookup.end());
- Waiters.erase(it->second);
- WaiterLookup.erase(it);
- }
- PermitNext();
- }
-
- public:
- THandshakeBroker(ui32 inflightLimit)
- : TActor(&TThis::StateFunc)
- , Capacity(inflightLimit)
- {
- }
-
- static constexpr char ActorName[] = "HANDSHAKE_BROKER_ACTOR";
-
- STFUNC(StateFunc) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvHandshakeBrokerTake, Handle);
- hFunc(TEvHandshakeBrokerFree, Handle);
-
- default:
- Y_ABORT("unexpected event 0x%08" PRIx32, ev->GetTypeRewrite());
- }
- }
-
- void Bootstrap() {
- Become(&TThis::StateFunc);
- }
- };
-
- inline IActor* CreateHandshakeBroker(ui32 maxCapacity) {
- return new THandshakeBroker(maxCapacity);
- }
-
- inline TActorId MakeHandshakeBrokerOutId() {
- char x[12] = {'I', 'C', 'H', 's', 'h', 'k', 'B', 'r', 'k', 'O', 'u', 't'};
- return TActorId(0, TStringBuf(std::begin(x), std::end(x)));
- }
-}
diff --git a/library/cpp/actors/interconnect/interconnect.h b/library/cpp/actors/interconnect/interconnect.h
deleted file mode 100644
index 38d8cd4781..0000000000
--- a/library/cpp/actors/interconnect/interconnect.h
+++ /dev/null
@@ -1,189 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/interconnect.h>
-#include <util/generic/map.h>
-#include <util/network/address.h>
-
-namespace NActors {
- struct TInterconnectGlobalState: public TThrRefBase {
- TString SelfAddress;
- ui32 SelfPort;
-
- TVector<TActorId> GlobalNameservers; // todo: add some info about (like expected reply time)
- };
-
- struct TInterconnectProxySetup: public TThrRefBase {
- // synchronous (session -> proxy)
- struct IProxy : TNonCopyable {
- virtual ~IProxy() {
- }
-
- virtual void ActivateSession(const TActorContext& ctx) = 0; // session activated
- virtual void DetachSession(const TActorContext& ctx) = 0; // session is dead
- };
-
- // synchronous (proxy -> session)
- struct ISession : TNonCopyable {
- virtual ~ISession() {
- }
-
- virtual void DetachSession(const TActorContext& ownerCtx, const TActorContext& sessionCtx) = 0; // kill yourself
- virtual void ForwardPacket(TAutoPtr<IEventHandle>& ev, const TActorContext& ownerCtx, const TActorContext& sessionCtx) = 0; // receive packet for forward
- virtual void Connect(const TActorContext& ownerCtx, const TActorContext& sessionCtx) = 0; // begin connection
- virtual bool ReceiveIncomingSession(TAutoPtr<IEventHandle>& ev, const TActorContext& ownerCtx, const TActorContext& sessionCtx) = 0; // handle incoming session, if returns true - then session is dead and must be recreated with new one
- };
-
- ui32 DestinationNode;
-
- TString StaticAddress; // if set - would be used as main destination address
- int StaticPort;
-
- TIntrusivePtr<TInterconnectGlobalState> GlobalState;
-
- virtual IActor* CreateSession(const TActorId& ownerId, IProxy* owner) = 0; // returned actor is session and would be attached to same mailbox as proxy to allow sync calls
- virtual TActorSetupCmd CreateAcceptor() = 0;
- };
-
- struct TNameserverSetup {
- TActorId ServiceID;
-
- TIntrusivePtr<TInterconnectGlobalState> GlobalState;
- };
-
- struct TTableNameserverSetup: public TThrRefBase {
- struct TNodeInfo {
- TString Address;
- TString Host;
- TString ResolveHost;
- ui16 Port;
- TNodeLocation Location;
- TString& first;
- ui16& second;
-
- TNodeInfo()
- : first(Address)
- , second(Port)
- {
- }
-
- TNodeInfo(const TNodeInfo&) = default;
-
- // for testing purposes only
- TNodeInfo(const TString& address, const TString& host, ui16 port)
- : TNodeInfo()
- {
- Address = address;
- Host = host;
- ResolveHost = host;
- Port = port;
- }
-
- TNodeInfo(const TString& address,
- const TString& host,
- const TString& resolveHost,
- ui16 port,
- const TNodeLocation& location)
- : TNodeInfo()
- {
- Address = address;
- Host = host;
- ResolveHost = resolveHost;
- Port = port;
- Location = location;
- }
-
- // for testing purposes only
- TNodeInfo& operator=(const std::pair<TString, ui32>& pr) {
- Address = pr.first;
- Host = pr.first;
- ResolveHost = pr.first;
- Port = pr.second;
- return *this;
- }
-
- TNodeInfo& operator=(const TNodeInfo& ni) {
- Address = ni.Address;
- Host = ni.Host;
- ResolveHost = ni.ResolveHost;
- Port = ni.Port;
- Location = ni.Location;
- return *this;
- }
-
- friend bool operator ==(const TNodeInfo& x, const TNodeInfo& y) {
- return x.Address == y.Address && x.Host == y.Host && x.ResolveHost == y.ResolveHost && x.Port == y.Port
- && x.Location == y.Location;
- }
-
- friend bool operator !=(const TNodeInfo& x, const TNodeInfo& y) {
- return !(x == y);
- }
- };
-
- TMap<ui32, TNodeInfo> StaticNodeTable;
-
- bool IsEntriesUnique() const;
- };
-
- struct TNodeRegistrarSetup {
- TActorId ServiceID;
-
- TIntrusivePtr<TInterconnectGlobalState> GlobalState;
- };
-
- TActorId GetNameserviceActorId();
-
- /**
- * Const table-lookup based name service
- */
-
- IActor* CreateNameserverTable(
- const TIntrusivePtr<TTableNameserverSetup>& setup,
- ui32 poolId = 0);
-
- /**
- * Name service which can be paired with external discovery service.
- * Copies information from setup on the start (table may be empty).
- * Handles TEvNodesInfo to change list of known nodes.
- *
- * If PendingPeriod is not zero, wait for unknown nodeId
- */
-
- IActor* CreateDynamicNameserver(
- const TIntrusivePtr<TTableNameserverSetup>& setup,
- const TDuration& pendingPeriod = TDuration::Zero(),
- ui32 poolId = 0);
-
- /**
- * Creates an actor that resolves host/port and replies with either:
- *
- * - TEvLocalNodeInfo on success
- * - TEvResolveError on errors
- *
- * Optional defaultAddress may be used as fallback.
- */
- IActor* CreateResolveActor(
- const TString& host, ui16 port, ui32 nodeId, const TString& defaultAddress,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline);
-
- inline IActor* CreateResolveActor(
- ui32 nodeId, const TTableNameserverSetup::TNodeInfo& nodeInfo,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline)
- {
- return CreateResolveActor(nodeInfo.ResolveHost, nodeInfo.Port, nodeId, nodeInfo.Address,
- replyTo, replyFrom, deadline);
- }
-
- /**
- * Creates an actor that resolves host/port and replies with either:
- *
- * - TEvAddressInfo on success
- * - TEvResolveError on errors
- */
- IActor* CreateResolveActor(
- const TString& host, ui16 port,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline);
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_address.cpp b/library/cpp/actors/interconnect/interconnect_address.cpp
deleted file mode 100644
index 124cd61325..0000000000
--- a/library/cpp/actors/interconnect/interconnect_address.cpp
+++ /dev/null
@@ -1,106 +0,0 @@
-#include "interconnect_address.h"
-
-#include <util/string/cast.h>
-#include <util/system/file.h>
-
-#if defined(_linux_)
-#include <sys/un.h>
-#include <sys/stat.h>
-#endif
-
-namespace NInterconnect {
- TAddress::TAddress() {
- memset(&Addr, 0, sizeof(Addr));
- }
-
- TAddress::TAddress(NAddr::IRemoteAddr& addr) {
- socklen_t len = addr.Len();
- Y_ABORT_UNLESS(len <= sizeof(Addr));
- memcpy(&Addr.Generic, addr.Addr(), len);
- }
-
- int TAddress::GetFamily() const {
- return Addr.Generic.sa_family;
- }
-
- socklen_t TAddress::Size() const {
- switch (Addr.Generic.sa_family) {
- case AF_INET6:
- return sizeof(sockaddr_in6);
- case AF_INET:
- return sizeof(sockaddr_in);
- default:
- return 0;
- }
- }
-
- sockaddr* TAddress::SockAddr() {
- return &Addr.Generic;
- }
-
- const sockaddr* TAddress::SockAddr() const {
- return &Addr.Generic;
- }
-
- ui16 TAddress::GetPort() const {
- switch (Addr.Generic.sa_family) {
- case AF_INET6:
- return ntohs(Addr.Ipv6.sin6_port);
- case AF_INET:
- return ntohs(Addr.Ipv4.sin_port);
- default:
- return 0;
- }
- }
-
- TString TAddress::ToString() const {
- return GetAddress() + ":" + ::ToString(GetPort());
- }
-
- TAddress::TAddress(const char* addr, ui16 port) {
- memset(&Addr, 0, sizeof(Addr));
- if (inet_pton(Addr.Ipv6.sin6_family = AF_INET6, addr, &Addr.Ipv6.sin6_addr) > 0) {
- Addr.Ipv6.sin6_port = htons(port);
- } else if (inet_pton(Addr.Ipv4.sin_family = AF_INET, addr, &Addr.Ipv4.sin_addr) > 0) {
- Addr.Ipv4.sin_port = htons(port);
- }
- }
-
- TAddress::TAddress(const TString& addr, ui16 port)
- : TAddress(addr.data(), port)
- {}
-
- TAddress::TAddress(in_addr addr, ui16 port) {
- Addr.Ipv4.sin_family = AF_INET;
- Addr.Ipv4.sin_port = htons(port);
- Addr.Ipv4.sin_addr = addr;
- }
-
- TAddress::TAddress(in6_addr addr, ui16 port) {
- Addr.Ipv6.sin6_family = AF_INET6;
- Addr.Ipv6.sin6_port = htons(port);
- Addr.Ipv6.sin6_addr = addr;
- }
-
- TString TAddress::GetAddress() const {
- const void *src;
- socklen_t size;
-
- switch (Addr.Generic.sa_family) {
- case AF_INET6:
- std::tie(src, size) = std::make_tuple(&Addr.Ipv6.sin6_addr, INET6_ADDRSTRLEN);
- break;
-
- case AF_INET:
- std::tie(src, size) = std::make_tuple(&Addr.Ipv4.sin_addr, INET_ADDRSTRLEN);
- break;
-
- default:
- return TString();
- }
-
- char *buffer = static_cast<char*>(alloca(size));
- const char *p = inet_ntop(Addr.Generic.sa_family, const_cast<void*>(src), buffer, size);
- return p ? TString(p) : TString();
- }
-}
diff --git a/library/cpp/actors/interconnect/interconnect_address.h b/library/cpp/actors/interconnect/interconnect_address.h
deleted file mode 100644
index b19d751806..0000000000
--- a/library/cpp/actors/interconnect/interconnect_address.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#pragma once
-
-#include <util/system/defaults.h>
-#include <util/network/init.h>
-#include <util/network/address.h>
-#include <util/generic/string.h>
-
-namespace NInterconnect {
- class TAddress {
- union {
- sockaddr Generic;
- sockaddr_in Ipv4;
- sockaddr_in6 Ipv6;
- } Addr;
-
- public:
- TAddress();
- TAddress(const char* addr, ui16 port);
- TAddress(const TString& addr, ui16 port);
- TAddress(in_addr addr, ui16 port);
- TAddress(in6_addr addr, ui16 port);
- TAddress(NAddr::IRemoteAddr& addr);
- int GetFamily() const;
- socklen_t Size() const;
- ::sockaddr* SockAddr();
- const ::sockaddr* SockAddr() const;
- ui16 GetPort() const;
- TString GetAddress() const;
- TString ToString() const;
-
- static TAddress AnyIPv4(ui16 port) {
- TAddress res;
- res.Addr.Ipv4.sin_family = AF_INET;
- res.Addr.Ipv4.sin_port = htons(port);
- res.Addr.Ipv4.sin_addr.s_addr = htonl(INADDR_ANY);
- return res;
- }
-
- static TAddress AnyIPv6(ui16 port) {
- TAddress res;
- res.Addr.Ipv6.sin6_family = AF_INET6;
- res.Addr.Ipv6.sin6_port = htons(port);
- res.Addr.Ipv6.sin6_addr = in6addr_any;
- return res;
- }
- };
-}
diff --git a/library/cpp/actors/interconnect/interconnect_channel.cpp b/library/cpp/actors/interconnect/interconnect_channel.cpp
deleted file mode 100644
index 71f4d6e5c3..0000000000
--- a/library/cpp/actors/interconnect/interconnect_channel.cpp
+++ /dev/null
@@ -1,360 +0,0 @@
-#include "interconnect_channel.h"
-
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/executor_thread.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/probes.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/actors/prof/tag.h>
-#include <library/cpp/digest/crc32c/crc32c.h>
-
-LWTRACE_USING(ACTORLIB_PROVIDER);
-
-namespace NActors {
- bool TEventOutputChannel::FeedDescriptor(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed) {
- const size_t amount = sizeof(TChannelPart) + sizeof(TEventDescr2);
- if (task.GetInternalFreeAmount() < amount) {
- return false;
- }
-
- auto traceId = event.Span.GetTraceId();
- event.Span.EndOk();
-
- Y_ABORT_UNLESS(SerializationInfo);
- const ui32 flags = (event.Descr.Flags & ~IEventHandle::FlagForwardOnNondelivery) |
- (SerializationInfo->IsExtendedFormat ? IEventHandle::FlagExtendedFormat : 0);
-
- // prepare descriptor record
- TEventDescr2 descr{
- event.Descr.Type,
- flags,
- event.Descr.Recipient,
- event.Descr.Sender,
- event.Descr.Cookie,
- {},
- event.Descr.Checksum,
-#if IC_FORCE_HARDENED_PACKET_CHECKS
- event.EventSerializedSize
-#endif
- };
- traceId.Serialize(&descr.TraceId);
-
- // and channel header before the descriptor
- TChannelPart part{
- .ChannelFlags = static_cast<ui16>(ChannelId | TChannelPart::LastPartFlag),
- .Size = sizeof(descr)
- };
-
- // append them to the packet
- task.Write<false>(&part, sizeof(part));
- task.Write<false>(&descr, sizeof(descr));
-
- *weightConsumed += amount;
- OutputQueueSize -= sizeof(TEventDescr2);
- Metrics->UpdateOutputChannelEvents(ChannelId);
-
- return true;
- }
-
- void TEventOutputChannel::DropConfirmed(ui64 confirm) {
- LOG_DEBUG_IC_SESSION("ICOCH98", "Dropping confirmed messages");
- for (auto it = NotYetConfirmed.begin(); it != NotYetConfirmed.end() && it->Serial <= confirm; ) {
- Pool.Release(NotYetConfirmed, it++);
- }
- }
-
- bool TEventOutputChannel::FeedBuf(TTcpPacketOutTask& task, ui64 serial, ui64 *weightConsumed) {
- for (;;) {
- Y_ABORT_UNLESS(!Queue.empty());
- TEventHolder& event = Queue.front();
-
- switch (State) {
- case EState::INITIAL:
- event.InitChecksum();
- if (event.Buffer) {
- State = EState::BODY;
- Iter = event.Buffer->GetBeginIter();
- SerializationInfo = &event.Buffer->GetSerializationInfo();
- SectionIndex = 0;
- PartLenRemain = 0;
- } else if (event.Event) {
- State = EState::BODY;
- IEventBase *base = event.Event.Get();
- if (event.EventSerializedSize) {
- Chunker.SetSerializingEvent(base);
- }
- SerializationInfoContainer = base->CreateSerializationInfo();
- SerializationInfo = &SerializationInfoContainer;
- SectionIndex = 0;
- PartLenRemain = 0;
- } else { // event without buffer and IEventBase instance
- State = EState::DESCRIPTOR;
- SerializationInfoContainer = {};
- SerializationInfo = &SerializationInfoContainer;
- }
- if (!event.EventSerializedSize) {
- State = EState::DESCRIPTOR;
- } else if (Params.UseExternalDataChannel && !SerializationInfo->Sections.empty()) {
- State = EState::SECTIONS;
- SectionIndex = 0;
- }
- break;
-
- case EState::BODY:
- if (FeedPayload(task, event, weightConsumed)) {
- State = EState::DESCRIPTOR;
- } else {
- return false;
- }
- break;
-
- case EState::DESCRIPTOR:
- if (!FeedDescriptor(task, event, weightConsumed)) {
- return false;
- }
- event.Serial = serial;
- NotYetConfirmed.splice(NotYetConfirmed.end(), Queue, Queue.begin()); // move event to not-yet-confirmed queue
- SerializationInfoContainer = {};
- SerializationInfo = nullptr;
- State = EState::INITIAL;
- return true; // we have processed whole event, signal to the caller
-
- case EState::SECTIONS: {
- if (SectionIndex == 0) {
- size_t totalSectionSize = 0;
- for (const auto& section : SerializationInfo->Sections) {
- totalSectionSize += section.Size;
- }
- Y_ABORT_UNLESS(totalSectionSize == event.EventSerializedSize);
- }
-
- while (SectionIndex != SerializationInfo->Sections.size()) {
- char sectionInfo[1 + NInterconnect::NDetail::MaxNumberBytes * 4];
- char *p = sectionInfo;
-
- const auto& section = SerializationInfo->Sections[SectionIndex];
- char& type = *p++;
- type = static_cast<ui8>(EXdcCommand::DECLARE_SECTION);
- p += NInterconnect::NDetail::SerializeNumber(section.Headroom, p);
- p += NInterconnect::NDetail::SerializeNumber(section.Size, p);
- p += NInterconnect::NDetail::SerializeNumber(section.Tailroom, p);
- p += NInterconnect::NDetail::SerializeNumber(section.Alignment, p);
- if (section.IsInline && Params.UseXdcShuffle) {
- type = static_cast<ui8>(EXdcCommand::DECLARE_SECTION_INLINE);
- }
- Y_ABORT_UNLESS(p <= std::end(sectionInfo));
-
- const size_t declareLen = p - sectionInfo;
- if (sizeof(TChannelPart) + XdcData.size() + declareLen <= task.GetInternalFreeAmount() &&
- XdcData.size() + declareLen <= Max<ui16>()) {
- XdcData.insert(XdcData.end(), sectionInfo, p);
- ++SectionIndex;
- } else {
- break;
- }
- }
-
- if (XdcData.empty()) {
- return false;
- }
-
- TChannelPart part{
- .ChannelFlags = static_cast<ui16>(ChannelId | TChannelPart::XdcFlag),
- .Size = static_cast<ui16>(XdcData.size())
- };
- task.Write<false>(&part, sizeof(part));
- task.Write<false>(XdcData.data(), XdcData.size());
- XdcData.clear();
-
- if (SectionIndex == SerializationInfo->Sections.size()) {
- State = EState::BODY;
- SectionIndex = 0;
- PartLenRemain = 0;
- }
-
- break;
- }
- }
- }
- }
-
- template<bool External>
- bool TEventOutputChannel::SerializeEvent(TTcpPacketOutTask& task, TEventHolder& event, size_t *bytesSerialized) {
- auto addChunk = [&](const void *data, size_t len, bool allowCopy) {
- event.UpdateChecksum(data, len);
- if (allowCopy && (reinterpret_cast<uintptr_t>(data) & 63) + len <= 64) {
- task.Write<External>(data, len);
- } else {
- task.Append<External>(data, len);
- }
- *bytesSerialized += len;
- Y_DEBUG_ABORT_UNLESS(len <= PartLenRemain);
- PartLenRemain -= len;
-
- event.EventActuallySerialized += len;
- if (event.EventActuallySerialized > MaxSerializedEventSize) {
- throw TExSerializedEventTooLarge(event.Descr.Type);
- }
- };
-
- bool complete = false;
- if (event.Event) {
- while (!complete) {
- TMutableContiguousSpan out = task.AcquireSpanForWriting<External>().SubSpan(0, PartLenRemain);
- if (!out.size()) {
- break;
- }
- for (const auto& [buffer, size] : Chunker.FeedBuf(out.data(), out.size())) {
- addChunk(buffer, size, false);
- }
- complete = Chunker.IsComplete();
- if (complete) {
- Y_ABORT_UNLESS(Chunker.IsSuccessfull());
- }
- }
- } else if (event.Buffer) {
- while (const size_t numb = Min<size_t>(External ? task.GetExternalFreeAmount() : task.GetInternalFreeAmount(),
- Iter.ContiguousSize(), PartLenRemain)) {
- const char *obuf = Iter.ContiguousData();
- addChunk(obuf, numb, true);
- Iter += numb;
- }
- complete = !Iter.Valid();
- } else {
- Y_ABORT();
- }
- Y_ABORT_UNLESS(!complete || event.EventActuallySerialized == event.EventSerializedSize,
- "EventActuallySerialized# %" PRIu32 " EventSerializedSize# %" PRIu32 " Type# 0x%08" PRIx32,
- event.EventActuallySerialized, event.EventSerializedSize, event.Descr.Type);
-
- return complete;
- }
-
- bool TEventOutputChannel::FeedPayload(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed) {
- for (;;) {
- // calculate inline or external part size (it may cover a few sections, not just single one)
- while (!PartLenRemain) {
- const auto& sections = SerializationInfo->Sections;
- if (!Params.UseExternalDataChannel || sections.empty()) {
- // all data goes inline
- IsPartInline = true;
- PartLenRemain = Max<size_t>();
- } else if (!Params.UseXdcShuffle) {
- // when UseXdcShuffle feature is not supported by the remote side, we transfer whole event over XDC
- IsPartInline = false;
- PartLenRemain = Max<size_t>();
- } else {
- Y_ABORT_UNLESS(SectionIndex < sections.size());
- IsPartInline = sections[SectionIndex].IsInline;
- while (SectionIndex < sections.size() && IsPartInline == sections[SectionIndex].IsInline) {
- PartLenRemain += sections[SectionIndex].Size;
- ++SectionIndex;
- }
- }
- }
-
- // serialize bytes
- const auto complete = IsPartInline
- ? FeedInlinePayload(task, event, weightConsumed)
- : FeedExternalPayload(task, event, weightConsumed);
- if (!complete) { // no space to serialize
- return false;
- } else if (*complete) { // event serialized
- return true;
- }
- }
- }
-
- std::optional<bool> TEventOutputChannel::FeedInlinePayload(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed) {
- if (task.GetInternalFreeAmount() <= sizeof(TChannelPart)) {
- return std::nullopt;
- }
-
- auto partBookmark = task.Bookmark(sizeof(TChannelPart));
-
- size_t bytesSerialized = 0;
- const bool complete = SerializeEvent<false>(task, event, &bytesSerialized);
-
- Y_DEBUG_ABORT_UNLESS(bytesSerialized);
- Y_ABORT_UNLESS(bytesSerialized <= Max<ui16>());
-
- TChannelPart part{
- .ChannelFlags = ChannelId,
- .Size = static_cast<ui16>(bytesSerialized)
- };
-
- task.WriteBookmark(std::move(partBookmark), &part, sizeof(part));
- *weightConsumed += sizeof(TChannelPart) + part.Size;
- OutputQueueSize -= part.Size;
-
- return complete;
- }
-
- std::optional<bool> TEventOutputChannel::FeedExternalPayload(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed) {
- const size_t partSize = sizeof(TChannelPart) + sizeof(ui8) + sizeof(ui16) + (Params.Encryption ? 0 : sizeof(ui32));
- if (task.GetInternalFreeAmount() < partSize || task.GetExternalFreeAmount() == 0) {
- return std::nullopt;
- }
-
- // clear external checksum for this chunk
- task.ExternalChecksum = 0;
-
- auto partBookmark = task.Bookmark(partSize);
-
- size_t bytesSerialized = 0;
- const bool complete = SerializeEvent<true>(task, event, &bytesSerialized);
-
- Y_ABORT_UNLESS(0 < bytesSerialized && bytesSerialized <= Max<ui16>());
-
- char buffer[partSize];
- TChannelPart *part = reinterpret_cast<TChannelPart*>(buffer);
- *part = {
- .ChannelFlags = static_cast<ui16>(ChannelId | TChannelPart::XdcFlag),
- .Size = static_cast<ui16>(partSize - sizeof(TChannelPart))
- };
- char *ptr = reinterpret_cast<char*>(part + 1);
- *ptr++ = static_cast<ui8>(EXdcCommand::PUSH_DATA);
- *reinterpret_cast<ui16*>(ptr) = bytesSerialized;
- ptr += sizeof(ui16);
- if (task.ChecksummingXxhash()) {
- XXH3_state_t state;
- XXH3_64bits_reset(&state);
- task.XdcStream.ScanLastBytes(bytesSerialized, [&state](TContiguousSpan span) {
- XXH3_64bits_update(&state, span.data(), span.size());
- });
- *reinterpret_cast<ui32*>(ptr) = XXH3_64bits_digest(&state);
- } else if (task.ChecksummingCrc32c()) {
- *reinterpret_cast<ui32*>(ptr) = task.ExternalChecksum;
- }
-
- task.WriteBookmark(std::move(partBookmark), buffer, partSize);
-
- *weightConsumed += partSize + bytesSerialized;
- OutputQueueSize -= bytesSerialized;
-
- return complete;
- }
-
- void TEventOutputChannel::NotifyUndelivered() {
- LOG_DEBUG_IC_SESSION("ICOCH89", "Notyfying about Undelivered messages! NotYetConfirmed size: %zu, Queue size: %zu", NotYetConfirmed.size(), Queue.size());
- if (State == EState::BODY && Queue.front().Event) {
- Y_ABORT_UNLESS(!Chunker.IsComplete()); // chunk must have an event being serialized
- Y_ABORT_UNLESS(!Queue.empty()); // this event must be the first event in queue
- TEventHolder& event = Queue.front();
- Y_ABORT_UNLESS(Chunker.GetCurrentEvent() == event.Event.Get()); // ensure the event is valid
- Chunker.Abort(); // stop serializing current event
- Y_ABORT_UNLESS(Chunker.IsComplete());
- }
- for (auto& item : NotYetConfirmed) {
- if (item.Descr.Flags & IEventHandle::FlagGenerateUnsureUndelivered) { // notify only when unsure flag is set
- item.ForwardOnNondelivery(true);
- }
- }
- Pool.Release(NotYetConfirmed);
- for (auto& item : Queue) {
- item.ForwardOnNondelivery(false);
- }
- Pool.Release(Queue);
- }
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_channel.h b/library/cpp/actors/interconnect/interconnect_channel.h
deleted file mode 100644
index ef2da2fda7..0000000000
--- a/library/cpp/actors/interconnect/interconnect_channel.h
+++ /dev/null
@@ -1,159 +0,0 @@
-#pragma once
-
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/event_load.h>
-#include <library/cpp/actors/util/rope.h>
-#include <util/generic/deque.h>
-#include <util/generic/vector.h>
-#include <util/generic/map.h>
-#include <util/stream/walk.h>
-#include <library/cpp/actors/wilson/wilson_span.h>
-
-#include "interconnect_common.h"
-#include "interconnect_counters.h"
-#include "packet.h"
-#include "event_holder_pool.h"
-
-namespace NActors {
-#pragma pack(push, 1)
-
- struct TChannelPart {
- ui16 ChannelFlags;
- ui16 Size;
-
- static constexpr ui16 LastPartFlag = 0x8000;
- static constexpr ui16 XdcFlag = 0x4000;
- static constexpr ui16 ChannelMask = (1 << IEventHandle::ChannelBits) - 1;
-
- static_assert((LastPartFlag & ChannelMask) == 0);
- static_assert((XdcFlag & ChannelMask) == 0);
-
- ui16 GetChannel() const { return ChannelFlags & ChannelMask; }
- bool IsLastPart() const { return ChannelFlags & LastPartFlag; }
- bool IsXdc() const { return ChannelFlags & XdcFlag; }
-
- TString ToString() const {
- return TStringBuilder() << "{Channel# " << GetChannel()
- << " IsLastPart# " << IsLastPart()
- << " IsXdc# " << IsXdc()
- << " Size# " << Size << "}";
- }
- };
-
-#pragma pack(pop)
-
- enum class EXdcCommand : ui8 {
- DECLARE_SECTION = 1,
- PUSH_DATA,
- DECLARE_SECTION_INLINE,
- };
-
- struct TExSerializedEventTooLarge : std::exception {
- const ui32 Type;
-
- TExSerializedEventTooLarge(ui32 type)
- : Type(type)
- {}
- };
-
- class TEventOutputChannel : public TInterconnectLoggingBase {
- public:
- TEventOutputChannel(TEventHolderPool& pool, ui16 id, ui32 peerNodeId, ui32 maxSerializedEventSize,
- std::shared_ptr<IInterconnectMetrics> metrics, TSessionParams params)
- : TInterconnectLoggingBase(Sprintf("OutputChannel %" PRIu16 " [node %" PRIu32 "]", id, peerNodeId))
- , Pool(pool)
- , PeerNodeId(peerNodeId)
- , ChannelId(id)
- , Metrics(std::move(metrics))
- , Params(std::move(params))
- , MaxSerializedEventSize(maxSerializedEventSize)
- {}
-
- ~TEventOutputChannel() {
- }
-
- std::pair<ui32, TEventHolder*> Push(IEventHandle& ev) {
- TEventHolder& event = Pool.Allocate(Queue);
- const ui32 bytes = event.Fill(ev) + sizeof(TEventDescr2);
- OutputQueueSize += bytes;
- if (event.Span = NWilson::TSpan(15 /*max verbosity*/, NWilson::TTraceId(ev.TraceId), "Interconnect.Queue")) {
- event.Span
- .Attribute("OutputQueueItems", static_cast<i64>(Queue.size()))
- .Attribute("OutputQueueSize", static_cast<i64>(OutputQueueSize));
- }
- return std::make_pair(bytes, &event);
- }
-
- void DropConfirmed(ui64 confirm);
-
- bool FeedBuf(TTcpPacketOutTask& task, ui64 serial, ui64 *weightConsumed);
-
- bool IsEmpty() const {
- return Queue.empty();
- }
-
- bool IsWorking() const {
- return !IsEmpty();
- }
-
- ui32 GetQueueSize() const {
- return (ui32)Queue.size();
- }
-
- ui64 GetBufferedAmountOfData() const {
- return OutputQueueSize;
- }
-
- void NotifyUndelivered();
-
- TEventHolderPool& Pool;
- const ui32 PeerNodeId;
- const ui16 ChannelId;
- std::shared_ptr<IInterconnectMetrics> Metrics;
- const TSessionParams Params;
- const ui32 MaxSerializedEventSize;
- ui64 UnaccountedTraffic = 0;
- ui64 EqualizeCounterOnPause = 0;
- ui64 WeightConsumedOnPause = 0;
-
- enum class EState {
- INITIAL,
- BODY,
- DESCRIPTOR,
- SECTIONS,
- };
- EState State = EState::INITIAL;
-
- protected:
- ui64 OutputQueueSize = 0;
-
- std::list<TEventHolder> Queue;
- std::list<TEventHolder> NotYetConfirmed;
- TRope::TConstIterator Iter;
- TCoroutineChunkSerializer Chunker;
- TEventSerializationInfo SerializationInfoContainer;
- const TEventSerializationInfo *SerializationInfo = nullptr;
- bool IsPartInline = false;
- size_t PartLenRemain = 0;
- size_t SectionIndex = 0;
- std::vector<char> XdcData;
-
- template<bool External>
- bool SerializeEvent(TTcpPacketOutTask& task, TEventHolder& event, size_t *bytesSerialized);
-
- bool FeedPayload(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed);
- std::optional<bool> FeedInlinePayload(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed);
- std::optional<bool> FeedExternalPayload(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed);
-
- bool FeedDescriptor(TTcpPacketOutTask& task, TEventHolder& event, ui64 *weightConsumed);
-
- void AccountTraffic() {
- if (const ui64 amount = std::exchange(UnaccountedTraffic, 0)) {
- Metrics->UpdateOutputChannelTraffic(ChannelId, amount);
- }
- }
-
- friend class TInterconnectSessionTCP;
- };
-}
diff --git a/library/cpp/actors/interconnect/interconnect_common.h b/library/cpp/actors/interconnect/interconnect_common.h
deleted file mode 100644
index 300153d8de..0000000000
--- a/library/cpp/actors/interconnect/interconnect_common.h
+++ /dev/null
@@ -1,140 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actorid.h>
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/monlib/metrics/metric_registry.h>
-#include <util/generic/map.h>
-#include <util/generic/set.h>
-#include <util/system/datetime.h>
-
-#include "poller_tcp.h"
-#include "logging.h"
-#include "event_filter.h"
-
-#include <atomic>
-
-namespace NActors {
- enum class EEncryptionMode {
- DISABLED, // no encryption is required at all
- OPTIONAL, // encryption is enabled when supported by both peers
- REQUIRED, // encryption is mandatory
- };
-
- struct TInterconnectSettings {
- TDuration Handshake;
- TDuration DeadPeer;
- TDuration CloseOnIdle;
- ui32 SendBufferDieLimitInMB = 0;
- ui64 OutputBuffersTotalSizeLimitInMB = 0;
- ui32 TotalInflightAmountOfData = 0;
- bool MergePerPeerCounters = false;
- bool MergePerDataCenterCounters = false;
- ui32 TCPSocketBufferSize = 0;
- TDuration PingPeriod = TDuration::Seconds(3);
- TDuration ForceConfirmPeriod = TDuration::Seconds(1);
- TDuration LostConnection;
- TDuration BatchPeriod;
- bool BindOnAllAddresses = true;
- EEncryptionMode EncryptionMode = EEncryptionMode::DISABLED;
- bool TlsAuthOnly = false;
- TString Certificate; // certificate data in PEM format
- TString PrivateKey; // private key for the certificate in PEM format
- TString CaFilePath; // path to certificate authority file
- TString CipherList; // encryption algorithms
- TDuration MessagePendingTimeout = TDuration::Seconds(1); // timeout for which messages are queued while in PendingConnection state
- ui64 MessagePendingSize = Max<ui64>(); // size of the queue
- ui32 MaxSerializedEventSize = NActors::EventMaxByteSize;
- ui32 PreallocatedBufferSize = 8 << 10; // 8 KB
- ui32 NumPreallocatedBuffers = 16;
- bool EnableExternalDataChannel = false;
- bool ValidateIncomingPeerViaDirectLookup = false;
- ui32 SocketBacklogSize = 0; // SOMAXCONN if zero
-
- ui32 GetSendBufferSize() const {
- ui32 res = 512 * 1024; // 512 kb is the default value for send buffer
- if (TCPSocketBufferSize) {
- res = TCPSocketBufferSize;
- }
- return res;
- }
- };
-
- struct TWhiteboardSessionStatus {
- TActorSystem* ActorSystem;
- ui32 PeerId;
- TString Peer;
- bool Connected;
- bool Green;
- bool Yellow;
- bool Orange;
- bool Red;
- i64 ClockSkew;
-
- TWhiteboardSessionStatus(TActorSystem* actorSystem, ui32 peerId, const TString& peer, bool connected, bool green, bool yellow, bool orange, bool red, i64 clockSkew)
- : ActorSystem(actorSystem)
- , PeerId(peerId)
- , Peer(peer)
- , Connected(connected)
- , Green(green)
- , Yellow(yellow)
- , Orange(orange)
- , Red(red)
- , ClockSkew(clockSkew)
- {}
- };
-
- struct TChannelSettings {
- ui16 Weight;
- };
-
- typedef TMap<ui16, TChannelSettings> TChannelsConfig;
-
- using TRegisterMonPageCallback = std::function<void(const TString& path, const TString& title,
- TActorSystem* actorSystem, const TActorId& actorId)>;
-
- using TInitWhiteboardCallback = std::function<void(ui16 icPort, TActorSystem* actorSystem)>;
-
- using TUpdateWhiteboardCallback = std::function<void(const TWhiteboardSessionStatus& data)>;
-
- struct TInterconnectProxyCommon : TAtomicRefCount<TInterconnectProxyCommon> {
- TActorId NameserviceId;
- NMonitoring::TDynamicCounterPtr MonCounters;
- std::shared_ptr<NMonitoring::IMetricRegistry> Metrics;
- TChannelsConfig ChannelsConfig;
- TInterconnectSettings Settings;
- TRegisterMonPageCallback RegisterMonPage;
- TActorId DestructorId;
- std::shared_ptr<std::atomic<TAtomicBase>> DestructorQueueSize;
- TAtomicBase MaxDestructorQueueSize = 1024 * 1024 * 1024;
- TString ClusterUUID;
- TVector<TString> AcceptUUID;
- ui64 StartTime = GetCycleCountFast();
- TString TechnicalSelfHostName;
- TInitWhiteboardCallback InitWhiteboard;
- TUpdateWhiteboardCallback UpdateWhiteboard;
- ui32 HandshakeBallastSize = 0;
- TAtomic StartedSessionKiller = 0;
- TScopeId LocalScopeId;
- std::shared_ptr<TEventFilter> EventFilter;
- TString Cookie; // unique random identifier of a node instance (generated randomly at every start)
- std::unordered_map<ui16, TString> ChannelName;
- std::optional<ui32> OutgoingHandshakeInflightLimit;
-
- struct TVersionInfo {
- TString Tag; // version tag for this node
- TSet<TString> AcceptedTags; // we accept all enlisted version tags of peer nodes, but no others; empty = accept all
- };
-
- // obsolete compatibility control
- TMaybe<TVersionInfo> VersionInfo;
-
- std::optional<TString> CompatibilityInfo;
- std::function<bool(const TString&, TString&)> ValidateCompatibilityInfo;
- std::function<bool(const TInterconnectProxyCommon::TVersionInfo&, TString&)> ValidateCompatibilityOldFormat;
-
- using TPtr = TIntrusivePtr<TInterconnectProxyCommon>;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_counters.cpp b/library/cpp/actors/interconnect/interconnect_counters.cpp
deleted file mode 100644
index 1c55eab650..0000000000
--- a/library/cpp/actors/interconnect/interconnect_counters.cpp
+++ /dev/null
@@ -1,703 +0,0 @@
-#include "interconnect_counters.h"
-
-#include <library/cpp/monlib/metrics/metric_registry.h>
-#include <library/cpp/monlib/metrics/metric_sub_registry.h>
-
-#include <unordered_map>
-
-namespace NActors {
-
-namespace {
-
- class TInterconnectCounters: public IInterconnectMetrics {
- public:
- struct TOutputChannel {
- NMonitoring::TDynamicCounters::TCounterPtr Traffic;
- NMonitoring::TDynamicCounters::TCounterPtr Events;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingTraffic;
- NMonitoring::TDynamicCounters::TCounterPtr OutgoingEvents;
-
- TOutputChannel() = default;
-
- TOutputChannel(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters,
- NMonitoring::TDynamicCounters::TCounterPtr traffic,
- NMonitoring::TDynamicCounters::TCounterPtr events)
- : Traffic(std::move(traffic))
- , Events(std::move(events))
- , OutgoingTraffic(counters->GetCounter("OutgoingTraffic", true))
- , OutgoingEvents(counters->GetCounter("OutgoingEvents", true))
- {}
-
- TOutputChannel(const TOutputChannel&) = default;
- TOutputChannel &operator=(const TOutputChannel& other) = default;
- };
-
- struct TInputChannel {
- NMonitoring::TDynamicCounters::TCounterPtr Traffic;
- NMonitoring::TDynamicCounters::TCounterPtr Events;
- NMonitoring::TDynamicCounters::TCounterPtr ScopeErrors;
- NMonitoring::TDynamicCounters::TCounterPtr IncomingTraffic;
- NMonitoring::TDynamicCounters::TCounterPtr IncomingEvents;
-
- TInputChannel() = default;
-
- TInputChannel(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters,
- NMonitoring::TDynamicCounters::TCounterPtr traffic,
- NMonitoring::TDynamicCounters::TCounterPtr events,
- NMonitoring::TDynamicCounters::TCounterPtr scopeErrors)
- : Traffic(std::move(traffic))
- , Events(std::move(events))
- , ScopeErrors(std::move(scopeErrors))
- , IncomingTraffic(counters->GetCounter("IncomingTraffic", true))
- , IncomingEvents(counters->GetCounter("IncomingEvents", true))
- {}
-
- TInputChannel(const TInputChannel&) = default;
- TInputChannel &operator=(const TInputChannel& other) = default;
- };
-
- struct TInputChannels : std::unordered_map<ui16, TInputChannel> {
- TInputChannel OtherInputChannel;
-
- TInputChannels() = default;
-
- TInputChannels(const TIntrusivePtr<NMonitoring::TDynamicCounters>& counters,
- const std::unordered_map<ui16, TString>& names,
- NMonitoring::TDynamicCounters::TCounterPtr traffic,
- NMonitoring::TDynamicCounters::TCounterPtr events,
- NMonitoring::TDynamicCounters::TCounterPtr scopeErrors)
- : OtherInputChannel(counters->GetSubgroup("channel", "other"), traffic, events, scopeErrors)
- {
- for (const auto& [id, name] : names) {
- try_emplace(id, counters->GetSubgroup("channel", name), traffic, events, scopeErrors);
- }
- }
-
- TInputChannels(const TInputChannels&) = default;
- TInputChannels &operator=(const TInputChannels& other) = default;
-
- const TInputChannel& Get(ui16 id) const {
- const auto it = find(id);
- return it != end() ? it->second : OtherInputChannel;
- }
- };
-
- private:
- const TInterconnectProxyCommon::TPtr Common;
- const bool MergePerDataCenterCounters;
- const bool MergePerPeerCounters;
- NMonitoring::TDynamicCounterPtr Counters;
- NMonitoring::TDynamicCounterPtr PerSessionCounters;
- NMonitoring::TDynamicCounterPtr PerDataCenterCounters;
- NMonitoring::TDynamicCounterPtr& AdaptiveCounters;
-
- bool Initialized = false;
-
- NMonitoring::TDynamicCounters::TCounterPtr Traffic;
- NMonitoring::TDynamicCounters::TCounterPtr Events;
- NMonitoring::TDynamicCounters::TCounterPtr ScopeErrors;
-
- public:
- TInterconnectCounters(const TInterconnectProxyCommon::TPtr& common)
- : Common(common)
- , MergePerDataCenterCounters(common->Settings.MergePerDataCenterCounters)
- , MergePerPeerCounters(common->Settings.MergePerPeerCounters)
- , Counters(common->MonCounters)
- , AdaptiveCounters(MergePerDataCenterCounters
- ? PerDataCenterCounters :
- MergePerPeerCounters ? Counters : PerSessionCounters)
- {}
-
- void AddInflightDataAmount(ui64 value) override {
- *InflightDataAmount += value;
- }
-
- void SubInflightDataAmount(ui64 value) override {
- *InflightDataAmount -= value;
- }
-
- void AddTotalBytesWritten(ui64 value) override {
- *TotalBytesWritten += value;
- }
-
- void SetClockSkewMicrosec(i64 value) override {
- *ClockSkewMicrosec = value;
- }
-
- void IncSessionDeaths() override {
- ++*SessionDeaths;
- }
-
- void IncHandshakeFails() override {
- ++*HandshakeFails;
- }
-
- void SetConnected(ui32 value) override {
- *Connected = value;
- }
-
- void IncSubscribersCount() override {
- ++*SubscribersCount;
- }
-
- void SubSubscribersCount(ui32 value) override {
- *SubscribersCount -= value;
- }
-
- void SubOutputBuffersTotalSize(ui64 value) override {
- *OutputBuffersTotalSize -= value;
- }
-
- void AddOutputBuffersTotalSize(ui64 value) override {
- *OutputBuffersTotalSize += value;
- }
-
- ui64 GetOutputBuffersTotalSize() const override {
- return *OutputBuffersTotalSize;
- }
-
- void IncDisconnections() override {
- ++*Disconnections;
- }
-
- void IncUsefulWriteWakeups() override {
- ++*UsefulWriteWakeups;
- }
-
- void IncSpuriousWriteWakeups() override {
- ++*SpuriousWriteWakeups;
- }
-
- void IncSendSyscalls(ui64 ns) override {
- ++*SendSyscalls;
- *SendSyscallsNs += ns;
- }
-
- void IncInflyLimitReach() override {
- ++*InflyLimitReach;
- }
-
- void IncUsefulReadWakeups() override {
- ++*UsefulReadWakeups;
- }
-
- void IncSpuriousReadWakeups() override {
- ++*SpuriousReadWakeups;
- }
-
- void IncDisconnectByReason(const TString& s) override {
- if (auto it = DisconnectByReason.find(s); it != DisconnectByReason.end()) {
- it->second->Inc();
- }
- }
-
- void AddInputChannelsIncomingTraffic(ui16 channel, ui64 incomingTraffic) override {
- auto& ch = InputChannels.Get(channel);
- *ch.IncomingTraffic += incomingTraffic;
- }
-
- void IncInputChannelsIncomingEvents(ui16 channel) override {
- auto& ch = InputChannels.Get(channel);
- ++*ch.IncomingEvents;
- }
-
- void IncRecvSyscalls(ui64 ns) override {
- ++*RecvSyscalls;
- *RecvSyscallsNs += ns;
- }
-
- void AddTotalBytesRead(ui64 value) override {
- *TotalBytesRead += value;
- }
-
- void UpdatePingTimeHistogram(ui64 value) override {
- PingTimeHistogram->Collect(value);
- }
-
- void UpdateOutputChannelTraffic(ui16 channel, ui64 value) override {
- auto& ch = GetOutputChannel(channel);
- if (ch.OutgoingTraffic) {
- *ch.OutgoingTraffic += value;
- }
- if (ch.Traffic) {
- *ch.Traffic += value;
- }
- }
-
- void UpdateOutputChannelEvents(ui16 channel) override {
- auto& ch = GetOutputChannel(channel);
- if (ch.OutgoingEvents) {
- ++*ch.OutgoingEvents;
- }
- if (ch.Events) {
- ++*ch.Events;
- }
- }
-
- void SetPeerInfo(const TString& name, const TString& dataCenterId) override {
- if (name != std::exchange(HumanFriendlyPeerHostName, name)) {
- PerSessionCounters.Reset();
- }
- VALGRIND_MAKE_READABLE(&DataCenterId, sizeof(DataCenterId));
- if (dataCenterId != std::exchange(DataCenterId, dataCenterId)) {
- PerDataCenterCounters.Reset();
- }
-
- const bool updatePerDataCenter = !PerDataCenterCounters && MergePerDataCenterCounters;
- if (updatePerDataCenter) {
- PerDataCenterCounters = Counters->GetSubgroup("dataCenterId", *DataCenterId);
- }
-
- const bool updatePerSession = !PerSessionCounters || updatePerDataCenter;
- if (updatePerSession) {
- auto base = MergePerDataCenterCounters ? PerDataCenterCounters : Counters;
- PerSessionCounters = base->GetSubgroup("peer", *HumanFriendlyPeerHostName);
- }
-
- const bool updateGlobal = !Initialized;
-
- const bool updateAdaptive =
- &AdaptiveCounters == &Counters ? updateGlobal :
- &AdaptiveCounters == &PerSessionCounters ? updatePerSession :
- &AdaptiveCounters == &PerDataCenterCounters ? updatePerDataCenter :
- false;
-
- if (updatePerSession) {
- Connected = PerSessionCounters->GetCounter("Connected");
- Disconnections = PerSessionCounters->GetCounter("Disconnections", true);
- ClockSkewMicrosec = PerSessionCounters->GetCounter("ClockSkewMicrosec");
- Traffic = PerSessionCounters->GetCounter("Traffic", true);
- Events = PerSessionCounters->GetCounter("Events", true);
- ScopeErrors = PerSessionCounters->GetCounter("ScopeErrors", true);
-
- for (const auto& [id, name] : Common->ChannelName) {
- OutputChannels.try_emplace(id, Counters->GetSubgroup("channel", name), Traffic, Events);
- }
- OtherOutputChannel = TOutputChannel(Counters->GetSubgroup("channel", "other"), Traffic, Events);
-
- InputChannels = TInputChannels(Counters, Common->ChannelName, Traffic, Events, ScopeErrors);
- }
-
- if (updateAdaptive) {
- SessionDeaths = AdaptiveCounters->GetCounter("Session_Deaths", true);
- HandshakeFails = AdaptiveCounters->GetCounter("Handshake_Fails", true);
- InflyLimitReach = AdaptiveCounters->GetCounter("InflyLimitReach", true);
- InflightDataAmount = AdaptiveCounters->GetCounter("Inflight_Data");
-
- PingTimeHistogram = AdaptiveCounters->GetHistogram(
- "PingTimeUs", NMonitoring::ExponentialHistogram(18, 2, 125));
- }
-
- if (updateGlobal) {
- OutputBuffersTotalSize = Counters->GetCounter("OutputBuffersTotalSize");
- SendSyscalls = Counters->GetCounter("SendSyscalls", true);
- SendSyscallsNs = Counters->GetCounter("SendSyscallsNs", true);
- RecvSyscalls = Counters->GetCounter("RecvSyscalls", true);
- RecvSyscallsNs = Counters->GetCounter("RecvSyscallsNs", true);
- SpuriousReadWakeups = Counters->GetCounter("SpuriousReadWakeups", true);
- UsefulReadWakeups = Counters->GetCounter("UsefulReadWakeups", true);
- SpuriousWriteWakeups = Counters->GetCounter("SpuriousWriteWakeups", true);
- UsefulWriteWakeups = Counters->GetCounter("UsefulWriteWakeups", true);
- SubscribersCount = AdaptiveCounters->GetCounter("SubscribersCount");
- TotalBytesWritten = Counters->GetCounter("TotalBytesWritten", true);
- TotalBytesRead = Counters->GetCounter("TotalBytesRead", true);
-
- auto disconnectReasonGroup = Counters->GetSubgroup("subsystem", "disconnectReason");
- for (const char *reason : TDisconnectReason::Reasons) {
- DisconnectByReason[reason] = disconnectReasonGroup->GetCounter(reason, true);
- }
- }
-
- Initialized = true;
- }
-
- const TOutputChannel& GetOutputChannel(ui16 index) const {
- Y_ABORT_UNLESS(Initialized);
- const auto it = OutputChannels.find(index);
- return it != OutputChannels.end() ? it->second : OtherOutputChannel;
- }
-
- private:
- NMonitoring::TDynamicCounters::TCounterPtr SessionDeaths;
- NMonitoring::TDynamicCounters::TCounterPtr HandshakeFails;
- NMonitoring::TDynamicCounters::TCounterPtr Connected;
- NMonitoring::TDynamicCounters::TCounterPtr Disconnections;
- NMonitoring::TDynamicCounters::TCounterPtr InflightDataAmount;
- NMonitoring::TDynamicCounters::TCounterPtr InflyLimitReach;
- NMonitoring::TDynamicCounters::TCounterPtr OutputBuffersTotalSize;
- NMonitoring::TDynamicCounters::TCounterPtr QueueUtilization;
- NMonitoring::TDynamicCounters::TCounterPtr SubscribersCount;
- NMonitoring::TDynamicCounters::TCounterPtr SendSyscalls;
- NMonitoring::TDynamicCounters::TCounterPtr SendSyscallsNs;
- NMonitoring::TDynamicCounters::TCounterPtr ClockSkewMicrosec;
- NMonitoring::TDynamicCounters::TCounterPtr RecvSyscalls;
- NMonitoring::TDynamicCounters::TCounterPtr RecvSyscallsNs;
- NMonitoring::TDynamicCounters::TCounterPtr UsefulReadWakeups;
- NMonitoring::TDynamicCounters::TCounterPtr SpuriousReadWakeups;
- NMonitoring::TDynamicCounters::TCounterPtr UsefulWriteWakeups;
- NMonitoring::TDynamicCounters::TCounterPtr SpuriousWriteWakeups;
- NMonitoring::THistogramPtr PingTimeHistogram;
-
- std::unordered_map<ui16, TOutputChannel> OutputChannels;
- TOutputChannel OtherOutputChannel;
- TInputChannels InputChannels;
- THashMap<TString, NMonitoring::TDynamicCounters::TCounterPtr> DisconnectByReason;
-
- NMonitoring::TDynamicCounters::TCounterPtr TotalBytesWritten, TotalBytesRead;
- };
-
- class TInterconnectMetrics: public IInterconnectMetrics {
- public:
- struct TOutputChannel {
- NMonitoring::IRate* Traffic;
- NMonitoring::IRate* Events;
- NMonitoring::IRate* OutgoingTraffic;
- NMonitoring::IRate* OutgoingEvents;
-
- TOutputChannel() = default;
-
- TOutputChannel(const std::shared_ptr<NMonitoring::IMetricRegistry>& metrics,
- NMonitoring::IRate* traffic,
- NMonitoring::IRate* events)
- : Traffic(traffic)
- , Events(events)
- , OutgoingTraffic(metrics->Rate(NMonitoring::MakeLabels({{"sensor", "interconnect.outgoing_traffic"}})))
- , OutgoingEvents(metrics->Rate(NMonitoring::MakeLabels({{"sensor", "interconnect.outgoing_events"}})))
- {}
-
- TOutputChannel(const TOutputChannel&) = default;
- TOutputChannel &operator=(const TOutputChannel& other) = default;
- };
-
- struct TInputChannel {
- NMonitoring::IRate* Traffic;
- NMonitoring::IRate* Events;
- NMonitoring::IRate* ScopeErrors;
- NMonitoring::IRate* IncomingTraffic;
- NMonitoring::IRate* IncomingEvents;
-
- TInputChannel() = default;
-
- TInputChannel(const std::shared_ptr<NMonitoring::IMetricRegistry>& metrics,
- NMonitoring::IRate* traffic, NMonitoring::IRate* events,
- NMonitoring::IRate* scopeErrors)
- : Traffic(traffic)
- , Events(events)
- , ScopeErrors(scopeErrors)
- , IncomingTraffic(metrics->Rate(NMonitoring::MakeLabels({{"sensor", "interconnect.incoming_traffic"}})))
- , IncomingEvents(metrics->Rate(NMonitoring::MakeLabels({{"sensor", "interconnect.incoming_events"}})))
- {}
-
- TInputChannel(const TInputChannel&) = default;
- TInputChannel &operator=(const TInputChannel& other) = default;
- };
-
- struct TInputChannels : std::unordered_map<ui16, TInputChannel> {
- TInputChannel OtherInputChannel;
-
- TInputChannels() = default;
-
- TInputChannels(const std::shared_ptr<NMonitoring::IMetricRegistry>& metrics,
- const std::unordered_map<ui16, TString>& names,
- NMonitoring::IRate* traffic, NMonitoring::IRate* events,
- NMonitoring::IRate* scopeErrors)
- : OtherInputChannel(std::make_shared<NMonitoring::TMetricSubRegistry>(
- NMonitoring::TLabels{{"channel", "other"}}, metrics), traffic, events, scopeErrors)
- {
- for (const auto& [id, name] : names) {
- try_emplace(id, std::make_shared<NMonitoring::TMetricSubRegistry>(NMonitoring::TLabels{{"channel", name}}, metrics),
- traffic, events, scopeErrors);
- }
- }
-
- TInputChannels(const TInputChannels&) = default;
- TInputChannels &operator=(const TInputChannels& other) = default;
-
- const TInputChannel& Get(ui16 id) const {
- const auto it = find(id);
- return it != end() ? it->second : OtherInputChannel;
- }
- };
-
- TInterconnectMetrics(const TInterconnectProxyCommon::TPtr& common)
- : Common(common)
- , MergePerDataCenterMetrics_(common->Settings.MergePerDataCenterCounters)
- , MergePerPeerMetrics_(common->Settings.MergePerPeerCounters)
- , Metrics_(common->Metrics)
- , AdaptiveMetrics_(MergePerDataCenterMetrics_
- ? PerDataCenterMetrics_ :
- MergePerPeerMetrics_ ? Metrics_ : PerSessionMetrics_)
- {}
-
- void AddInflightDataAmount(ui64 value) override {
- InflightDataAmount_->Add(value);
- }
-
- void SubInflightDataAmount(ui64 value) override {
- InflightDataAmount_->Add(-value);
- }
-
- void AddTotalBytesWritten(ui64 value) override {
- TotalBytesWritten_->Add(value);
- }
-
- void SetClockSkewMicrosec(i64 value) override {
- ClockSkewMicrosec_->Set(value);
- }
-
- void IncSessionDeaths() override {
- SessionDeaths_->Inc();
- }
-
- void IncHandshakeFails() override {
- HandshakeFails_->Inc();
- }
-
- void SetConnected(ui32 value) override {
- Connected_->Set(value);
- }
-
- void IncSubscribersCount() override {
- SubscribersCount_->Inc();
- }
-
- void SubSubscribersCount(ui32 value) override {
- SubscribersCount_->Add(-value);
- }
-
- void SubOutputBuffersTotalSize(ui64 value) override {
- OutputBuffersTotalSize_->Add(-value);
- }
-
- void AddOutputBuffersTotalSize(ui64 value) override {
- OutputBuffersTotalSize_->Add(value);
- }
-
- ui64 GetOutputBuffersTotalSize() const override {
- return OutputBuffersTotalSize_->Get();
- }
-
- void IncDisconnections() override {
- Disconnections_->Inc();
- }
-
- void IncUsefulWriteWakeups() override {
- UsefulWriteWakeups_->Inc();
- }
-
- void IncSpuriousWriteWakeups() override {
- SpuriousWriteWakeups_->Inc();
- }
-
- void IncSendSyscalls(ui64 /*ns*/) override {
- SendSyscalls_->Inc();
- }
-
- void IncInflyLimitReach() override {
- InflyLimitReach_->Inc();
- }
-
- void IncUsefulReadWakeups() override {
- UsefulReadWakeups_->Inc();
- }
-
- void IncSpuriousReadWakeups() override {
- SpuriousReadWakeups_->Inc();
- }
-
- void IncDisconnectByReason(const TString& s) override {
- if (auto it = DisconnectByReason_.find(s); it != DisconnectByReason_.end()) {
- it->second->Inc();
- }
- }
-
- void AddInputChannelsIncomingTraffic(ui16 channel, ui64 incomingTraffic) override {
- auto& ch = InputChannels_.Get(channel);
- ch.IncomingTraffic->Add(incomingTraffic);
- }
-
- void IncInputChannelsIncomingEvents(ui16 channel) override {
- auto& ch = InputChannels_.Get(channel);
- ch.IncomingEvents->Inc();
- }
-
- void IncRecvSyscalls(ui64 /*ns*/) override {
- RecvSyscalls_->Inc();
- }
-
- void AddTotalBytesRead(ui64 value) override {
- TotalBytesRead_->Add(value);
- }
-
- void UpdatePingTimeHistogram(ui64 value) override {
- PingTimeHistogram_->Record(value);
- }
-
- void UpdateOutputChannelTraffic(ui16 channel, ui64 value) override {
- auto& ch = GetOutputChannel(channel);
- if (ch.OutgoingTraffic) {
- ch.OutgoingTraffic->Add(value);
- }
- if (ch.Traffic) {
- ch.Traffic->Add(value);
- }
- }
-
- void UpdateOutputChannelEvents(ui16 channel) override {
- auto& ch = GetOutputChannel(channel);
- if (ch.OutgoingEvents) {
- ch.OutgoingEvents->Inc();
- }
- if (ch.Events) {
- ch.Events->Inc();
- }
- }
-
- void SetPeerInfo(const TString& name, const TString& dataCenterId) override {
- if (name != std::exchange(HumanFriendlyPeerHostName, name)) {
- PerSessionMetrics_.reset();
- }
- VALGRIND_MAKE_READABLE(&DataCenterId, sizeof(DataCenterId));
- if (dataCenterId != std::exchange(DataCenterId, dataCenterId)) {
- PerDataCenterMetrics_.reset();
- }
-
- const bool updatePerDataCenter = !PerDataCenterMetrics_ && MergePerDataCenterMetrics_;
- if (updatePerDataCenter) {
- PerDataCenterMetrics_ = std::make_shared<NMonitoring::TMetricSubRegistry>(
- NMonitoring::TLabels{{"datacenter_id", *DataCenterId}}, Metrics_);
- }
-
- const bool updatePerSession = !PerSessionMetrics_ || updatePerDataCenter;
- if (updatePerSession) {
- auto base = MergePerDataCenterMetrics_ ? PerDataCenterMetrics_ : Metrics_;
- PerSessionMetrics_ = std::make_shared<NMonitoring::TMetricSubRegistry>(
- NMonitoring::TLabels{{"peer", *HumanFriendlyPeerHostName}}, base);
- }
-
- const bool updateGlobal = !Initialized_;
-
- const bool updateAdaptive =
- &AdaptiveMetrics_ == &Metrics_ ? updateGlobal :
- &AdaptiveMetrics_ == &PerSessionMetrics_ ? updatePerSession :
- &AdaptiveMetrics_ == &PerDataCenterMetrics_ ? updatePerDataCenter :
- false;
-
- auto createRate = [](std::shared_ptr<NMonitoring::IMetricRegistry> metrics, TStringBuf name) mutable {
- return metrics->Rate(NMonitoring::MakeLabels(NMonitoring::TLabels{{"sensor", name}}));
- };
- auto createIntGauge = [](std::shared_ptr<NMonitoring::IMetricRegistry> metrics, TStringBuf name) mutable {
- return metrics->IntGauge(NMonitoring::MakeLabels(NMonitoring::TLabels{{"sensor", name}}));
- };
-
- if (updatePerSession) {
- Connected_ = createIntGauge(PerSessionMetrics_, "interconnect.connected");
- Disconnections_ = createRate(PerSessionMetrics_, "interconnect.disconnections");
- ClockSkewMicrosec_ = createIntGauge(PerSessionMetrics_, "interconnect.clock_skew_microsec");
- Traffic_ = createRate(PerSessionMetrics_, "interconnect.traffic");
- Events_ = createRate(PerSessionMetrics_, "interconnect.events");
- ScopeErrors_ = createRate(PerSessionMetrics_, "interconnect.scope_errors");
-
- for (const auto& [id, name] : Common->ChannelName) {
- OutputChannels_.try_emplace(id, std::make_shared<NMonitoring::TMetricSubRegistry>(
- NMonitoring::TLabels{{"channel", name}}, Metrics_), Traffic_, Events_);
- }
- OtherOutputChannel_ = TOutputChannel(std::make_shared<NMonitoring::TMetricSubRegistry>(
- NMonitoring::TLabels{{"channel", "other"}}, Metrics_), Traffic_, Events_);
-
- InputChannels_ = TInputChannels(Metrics_, Common->ChannelName, Traffic_, Events_, ScopeErrors_);
- }
-
- if (updateAdaptive) {
- SessionDeaths_ = createRate(AdaptiveMetrics_, "interconnect.session_deaths");
- HandshakeFails_ = createRate(AdaptiveMetrics_, "interconnect.handshake_fails");
- InflyLimitReach_ = createRate(AdaptiveMetrics_, "interconnect.infly_limit_reach");
- InflightDataAmount_ = createRate(AdaptiveMetrics_, "interconnect.inflight_data");
- PingTimeHistogram_ = AdaptiveMetrics_->HistogramRate(
- NMonitoring::MakeLabels({{"sensor", "interconnect.ping_time_us"}}), NMonitoring::ExponentialHistogram(18, 2, 125));
- }
-
- if (updateGlobal) {
- OutputBuffersTotalSize_ = createRate(Metrics_, "interconnect.output_buffers_total_size");
- SendSyscalls_ = createRate(Metrics_, "interconnect.send_syscalls");
- RecvSyscalls_ = createRate(Metrics_, "interconnect.recv_syscalls");
- SpuriousReadWakeups_ = createRate(Metrics_, "interconnect.spurious_read_wakeups");
- UsefulReadWakeups_ = createRate(Metrics_, "interconnect.useful_read_wakeups");
- SpuriousWriteWakeups_ = createRate(Metrics_, "interconnect.spurious_write_wakeups");
- UsefulWriteWakeups_ = createRate(Metrics_, "interconnect.useful_write_wakeups");
- SubscribersCount_ = createIntGauge(AdaptiveMetrics_, "interconnect.subscribers_count");
- TotalBytesWritten_ = createRate(Metrics_, "interconnect.total_bytes_written");
- TotalBytesRead_ = createRate(Metrics_, "interconnect.total_bytes_read");
-
- for (const char *reason : TDisconnectReason::Reasons) {
- DisconnectByReason_[reason] = Metrics_->Rate(
- NMonitoring::MakeLabels({
- {"sensor", "interconnect.disconnect_reason"},
- {"reason", reason},
- }));
- }
- }
-
- Initialized_ = true;
- }
-
- const TOutputChannel& GetOutputChannel(ui16 index) const {
- Y_ABORT_UNLESS(Initialized_);
- const auto it = OutputChannels_.find(index);
- return it != OutputChannels_.end() ? it->second : OtherOutputChannel_;
- }
-
- private:
- const TInterconnectProxyCommon::TPtr Common;
- const bool MergePerDataCenterMetrics_;
- const bool MergePerPeerMetrics_;
- std::shared_ptr<NMonitoring::IMetricRegistry> Metrics_;
- std::shared_ptr<NMonitoring::IMetricRegistry> PerSessionMetrics_;
- std::shared_ptr<NMonitoring::IMetricRegistry> PerDataCenterMetrics_;
- std::shared_ptr<NMonitoring::IMetricRegistry>& AdaptiveMetrics_;
- bool Initialized_ = false;
-
- NMonitoring::IRate* Traffic_;
-
- NMonitoring::IRate* Events_;
- NMonitoring::IRate* ScopeErrors_;
- NMonitoring::IRate* Disconnections_;
- NMonitoring::IIntGauge* Connected_;
-
- NMonitoring::IRate* SessionDeaths_;
- NMonitoring::IRate* HandshakeFails_;
- NMonitoring::IRate* InflyLimitReach_;
- NMonitoring::IRate* InflightDataAmount_;
- NMonitoring::IRate* OutputBuffersTotalSize_;
- NMonitoring::IIntGauge* SubscribersCount_;
- NMonitoring::IRate* SendSyscalls_;
- NMonitoring::IRate* RecvSyscalls_;
- NMonitoring::IRate* SpuriousWriteWakeups_;
- NMonitoring::IRate* UsefulWriteWakeups_;
- NMonitoring::IRate* SpuriousReadWakeups_;
- NMonitoring::IRate* UsefulReadWakeups_;
- NMonitoring::IIntGauge* ClockSkewMicrosec_;
-
- NMonitoring::IHistogram* PingTimeHistogram_;
-
- THashMap<ui16, TOutputChannel> OutputChannels_;
- TOutputChannel OtherOutputChannel_;
- TInputChannels InputChannels_;
-
- THashMap<TString, NMonitoring::IRate*> DisconnectByReason_;
-
- NMonitoring::IRate* TotalBytesWritten_;
- NMonitoring::IRate* TotalBytesRead_;
- };
-
-} // namespace
-
-std::unique_ptr<IInterconnectMetrics> CreateInterconnectCounters(const TInterconnectProxyCommon::TPtr& common) {
- return std::make_unique<TInterconnectCounters>(common);
-}
-
-std::unique_ptr<IInterconnectMetrics> CreateInterconnectMetrics(const TInterconnectProxyCommon::TPtr& common) {
- return std::make_unique<TInterconnectMetrics>(common);
-}
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/interconnect_counters.h b/library/cpp/actors/interconnect/interconnect_counters.h
deleted file mode 100644
index 205ccf35b1..0000000000
--- a/library/cpp/actors/interconnect/interconnect_counters.h
+++ /dev/null
@@ -1,57 +0,0 @@
-#pragma once
-
-#include <util/system/valgrind.h>
-
-#include "types.h"
-
-#include "interconnect_common.h"
-
-#include <memory>
-#include <optional>
-
-namespace NActors {
-
-class IInterconnectMetrics {
-public:
- virtual ~IInterconnectMetrics() = default;
-
- virtual void AddInflightDataAmount(ui64 value) = 0;
- virtual void SubInflightDataAmount(ui64 value) = 0;
- virtual void AddTotalBytesWritten(ui64 value) = 0;
- virtual void SetClockSkewMicrosec(i64 value) = 0;
- virtual void IncSessionDeaths() = 0;
- virtual void IncHandshakeFails() = 0;
- virtual void SetConnected(ui32 value) = 0;
- virtual void IncSubscribersCount() = 0;
- virtual void SubSubscribersCount(ui32 value) = 0;
- virtual void SubOutputBuffersTotalSize(ui64 value) = 0;
- virtual void AddOutputBuffersTotalSize(ui64 value) = 0;
- virtual ui64 GetOutputBuffersTotalSize() const = 0;
- virtual void IncDisconnections() = 0;
- virtual void IncUsefulWriteWakeups() = 0;
- virtual void IncSpuriousWriteWakeups() = 0;
- virtual void IncSendSyscalls(ui64 ns) = 0;
- virtual void IncInflyLimitReach() = 0;
- virtual void IncDisconnectByReason(const TString& s) = 0;
- virtual void IncUsefulReadWakeups() = 0;
- virtual void IncSpuriousReadWakeups() = 0;
- virtual void SetPeerInfo(const TString& name, const TString& dataCenterId) = 0;
- virtual void AddInputChannelsIncomingTraffic(ui16 channel, ui64 incomingTraffic) = 0;
- virtual void IncInputChannelsIncomingEvents(ui16 channel) = 0;
- virtual void IncRecvSyscalls(ui64 ns) = 0;
- virtual void AddTotalBytesRead(ui64 value) = 0;
- virtual void UpdatePingTimeHistogram(ui64 value) = 0;
- virtual void UpdateOutputChannelTraffic(ui16 channel, ui64 value) = 0;
- virtual void UpdateOutputChannelEvents(ui16 channel) = 0;
- TString GetHumanFriendlyPeerHostName() const {
- return HumanFriendlyPeerHostName.value_or(TString());
- }
-
-protected:
- std::optional<TString> DataCenterId;
- std::optional<TString> HumanFriendlyPeerHostName;
-};
-
-std::unique_ptr<IInterconnectMetrics> CreateInterconnectCounters(const NActors::TInterconnectProxyCommon::TPtr& common);
-std::unique_ptr<IInterconnectMetrics> CreateInterconnectMetrics(const NActors::TInterconnectProxyCommon::TPtr& common);
-} // NActors
diff --git a/library/cpp/actors/interconnect/interconnect_handshake.cpp b/library/cpp/actors/interconnect/interconnect_handshake.cpp
deleted file mode 100644
index 8c347507b2..0000000000
--- a/library/cpp/actors/interconnect/interconnect_handshake.cpp
+++ /dev/null
@@ -1,1237 +0,0 @@
-#include "interconnect_handshake.h"
-#include "handshake_broker.h"
-#include "interconnect_tcp_proxy.h"
-
-#include <library/cpp/actors/core/actor_coroutine.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <util/system/getpid.h>
-#include <util/random/entropy.h>
-
-#include <google/protobuf/text_format.h>
-
-#include <variant>
-
-namespace NActors {
- static constexpr size_t StackSize = 64 * 1024; // 64k should be enough
-
- class THandshakeActor
- : public TActorCoroImpl
- , public TInterconnectLoggingBase
- {
- struct TExHandshakeFailed : yexception {};
- struct TExPoison {};
-
- static constexpr TDuration ResolveTimeout = TDuration::Seconds(1);
-
-#pragma pack(push, 1)
-
- struct TInitialPacket {
- struct {
- TActorId SelfVirtualId;
- TActorId PeerVirtualId;
- ui64 NextPacket;
- ui64 Version;
- } Header;
- ui32 Checksum;
-
- TInitialPacket() = default;
-
- TInitialPacket(const TActorId& self, const TActorId& peer, ui64 nextPacket, ui64 version) {
- Header.SelfVirtualId = self;
- Header.PeerVirtualId = peer;
- Header.NextPacket = nextPacket;
- Header.Version = version;
- Checksum = Crc32cExtendMSanCompatible(0, &Header, sizeof(Header));
- }
-
- bool Check() const {
- return Checksum == Crc32cExtendMSanCompatible(0, &Header, sizeof(Header));
- }
-
- TString ToString() const {
- return TStringBuilder()
- << "{SelfVirtualId# " << Header.SelfVirtualId.ToString()
- << " PeerVirtualId# " << Header.PeerVirtualId.ToString()
- << " NextPacket# " << Header.NextPacket
- << " Version# " << Header.Version
- << "}";
- }
- };
-
- struct TExHeader {
- static constexpr ui32 MaxSize = 1024 * 1024;
-
- ui32 Checksum;
- ui32 Size;
-
- ui32 CalculateChecksum(const void* data, size_t len) const {
- return Crc32cExtendMSanCompatible(Crc32cExtendMSanCompatible(0, &Size, sizeof(Size)), data, len);
- }
-
- void Sign(const void* data, size_t len) {
- Checksum = CalculateChecksum(data, len);
- }
-
- bool Check(const void* data, size_t len) const {
- return Checksum == CalculateChecksum(data, len);
- }
- };
-
-#pragma pack(pop)
-
- private:
- class TConnection : TNonCopyable {
- THandshakeActor *Actor = nullptr;
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
- TPollerToken::TPtr PollerToken;
-
- public:
- TConnection(THandshakeActor *actor, TIntrusivePtr<NInterconnect::TStreamSocket> socket)
- : Actor(actor)
- , Socket(std::move(socket))
- {}
-
- void Connect(TString *peerAddr) {
- for (const NInterconnect::TAddress& address : Actor->ResolvePeer()) {
- // create the socket with matching address family
- int err = 0;
- Socket = NInterconnect::TStreamSocket::Make(address.GetFamily(), &err);
- if (err == EAFNOSUPPORT) {
- Reset();
- continue;
- } else if (*Socket == -1) {
- Actor->Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "System error: failed to create socket");
- }
-
- // extract peer address
- if (peerAddr) {
- *peerAddr = address.ToString();
- }
-
- // set up socket parameters
- SetupSocket();
-
- // start connecting
- err = -Socket->Connect(address);
- if (err == EINPROGRESS) {
- RegisterInPoller();
- WaitPoller(false, true, "WaitConnect");
- err = Socket->GetConnectStatus();
- } else if (!err) {
- RegisterInPoller();
- }
-
- // check if connection succeeded
- if (err) {
- Reset();
- } else {
- break;
- }
- }
-
- if (!Socket) {
- Actor->Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Couldn't connect to any resolved address", true);
- }
- }
-
- void Reset() {
- Socket.Reset();
- PollerToken.Reset();
- }
-
- void SetupSocket() {
- // switch to nonblocking mode
- try {
- SetNonBlock(*Socket);
- SetNoDelay(*Socket, true);
- } catch (...) {
- Actor->Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "System error: can't up nonblocking mode for socket");
- }
-
- // setup send buffer size
- Socket->SetSendBufferSize(Actor->Common->Settings.GetSendBufferSize());
- }
-
- void RegisterInPoller() {
- Y_ABORT_UNLESS(!PollerToken);
- const bool success = Actor->Send(MakePollerActorId(), new TEvPollerRegister(Socket, Actor->SelfActorId, Actor->SelfActorId));
- Y_ABORT_UNLESS(success);
- auto result = Actor->WaitForSpecificEvent<TEvPollerRegisterResult>("RegisterPoller");
- PollerToken = std::move(result->Get()->PollerToken);
- Y_ABORT_UNLESS(PollerToken);
- Y_ABORT_UNLESS(PollerToken->RefCount() == 1); // ensure exclusive ownership
- }
-
- void WaitPoller(bool read, bool write, TString state) {
- if (!PollerToken->RequestNotificationAfterWouldBlock(read, write)) {
- Actor->WaitForSpecificEvent<TEvPollerReady>(std::move(state));
- }
- }
-
- template <typename TDataPtr, typename TSendRecvFunc>
- void Process(TDataPtr buffer, size_t len, TSendRecvFunc&& sendRecv, bool read, bool write, TString state) {
- Y_ABORT_UNLESS(Socket);
- NInterconnect::TStreamSocket* sock = Socket.Get();
- ssize_t (NInterconnect::TStreamSocket::*pfn)(TDataPtr, size_t, TString*) const = sendRecv;
- size_t processed = 0;
-
- auto error = [&](TString msg) {
- Actor->Fail(TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT,
- Sprintf("Socket error# %s state# %s processed# %zu remain# %zu",
- msg.data(), state.data(), processed, len), true);
- };
-
- while (len) {
- TString err;
- ssize_t nbytes = (sock->*pfn)(buffer, len, &err);
- if (nbytes > 0) {
- buffer = (char*)buffer + nbytes;
- len -= nbytes;
- processed += nbytes;
- } else if (-nbytes == EAGAIN || -nbytes == EWOULDBLOCK) {
- WaitPoller(read, write, state);
- } else if (!nbytes) {
- error("connection unexpectedly closed");
- } else if (-nbytes != EINTR) {
- error(err ? err : TString(strerror(-nbytes)));
- }
- }
- }
-
- void SendData(const void* buffer, size_t len, TString state) {
- Process(buffer, len, &NInterconnect::TStreamSocket::Send, false, true, std::move(state));
- }
-
- void ReceiveData(void* buffer, size_t len, TString state) {
- Process(buffer, len, &NInterconnect::TStreamSocket::Recv, true, false, std::move(state));
- }
-
- void ResetPollerToken() {
- if (PollerToken) {
- Y_ABORT_UNLESS(PollerToken->RefCount() == 1);
- PollerToken.Reset(); // ensure we are going to destroy poller token here as we will re-register the socket within other actor
- }
- }
-
- TIntrusivePtr<NInterconnect::TStreamSocket>& GetSocketRef() { return Socket; }
- operator bool() const { return static_cast<bool>(Socket); }
- };
-
- private:
- TInterconnectProxyCommon::TPtr Common;
- TActorId SelfVirtualId;
- TActorId PeerVirtualId;
- ui32 PeerNodeId = 0;
- ui64 NextPacketToPeer = 0;
- TMaybe<ui64> NextPacketFromPeer; // will be obtained from incoming initial packet
- TString PeerHostName;
- TString PeerAddr;
- TConnection MainChannel;
- TConnection ExternalDataChannel;
- TString State;
- TString HandshakeKind;
- TMaybe<THolder<TProgramInfo>> ProgramInfo; // filled in in case of successful handshake; even if null
- TSessionParams Params;
- std::optional<TInstant> LastLogNotice;
- const TDuration MuteDuration = TDuration::Seconds(15);
- TMonotonic Deadline;
- TActorId HandshakeBroker;
- std::optional<TBrokerLeaseHolder> BrokerLeaseHolder;
- std::optional<TString> HandshakeId; // for XDC
- bool SubscribedForConnection = false;
-
- public:
- THandshakeActor(TInterconnectProxyCommon::TPtr common, const TActorId& self, const TActorId& peer,
- ui32 nodeId, ui64 nextPacket, TString peerHostName, TSessionParams params)
- : TActorCoroImpl(StackSize, true)
- , Common(std::move(common))
- , SelfVirtualId(self)
- , PeerVirtualId(peer)
- , PeerNodeId(nodeId)
- , NextPacketToPeer(nextPacket)
- , PeerHostName(std::move(peerHostName))
- , MainChannel(this, nullptr)
- , ExternalDataChannel(this, nullptr)
- , HandshakeKind("outgoing handshake")
- , Params(std::move(params))
- {
- Y_ABORT_UNLESS(SelfVirtualId);
- Y_ABORT_UNLESS(SelfVirtualId.NodeId());
- Y_ABORT_UNLESS(PeerNodeId);
- HandshakeBroker = MakeHandshakeBrokerOutId();
-
- // generate random handshake id
- HandshakeId = TString::Uninitialized(32);
- EntropyPool().Read(HandshakeId->Detach(), HandshakeId->size());
- }
-
- THandshakeActor(TInterconnectProxyCommon::TPtr common, TSocketPtr socket)
- : TActorCoroImpl(StackSize, true)
- , Common(std::move(common))
- , MainChannel(this, std::move(socket))
- , ExternalDataChannel(this, nullptr)
- , HandshakeKind("incoming handshake")
- {
- Y_ABORT_UNLESS(MainChannel);
- PeerAddr = TString::Uninitialized(1024);
- if (GetRemoteAddr(*MainChannel.GetSocketRef(), PeerAddr.Detach(), PeerAddr.size())) {
- PeerAddr.resize(strlen(PeerAddr.data()));
- } else {
- PeerAddr.clear();
- }
- }
-
- void UpdatePrefix() {
- SetPrefix(Sprintf("Handshake %s [node %" PRIu32 "]", SelfActorId.ToString().data(), PeerNodeId));
- }
-
- void Run() override {
- try {
- RunImpl();
- } catch (const TDtorException&) {
- if (BrokerLeaseHolder) {
- BrokerLeaseHolder->ForgetLease();
- }
- throw;
- } catch (const TExPoison&) {
- // just stop execution, do nothing
- } catch (...) {
- Y_ABORT("unhandled exception");
- }
- if (SubscribedForConnection) {
- SendToProxy(MakeHolder<TEvSubscribeForConnection>(*HandshakeId, false));
- }
- }
-
- void RunImpl() {
- UpdatePrefix();
-
- if (!MainChannel && Common->OutgoingHandshakeInflightLimit) {
- // Create holder, which sends request to broker and automatically frees the place when destroyed
- BrokerLeaseHolder.emplace(SelfActorId, HandshakeBroker);
- }
-
- if (BrokerLeaseHolder && BrokerLeaseHolder->IsLeaseRequested()) {
- WaitForSpecificEvent<TEvHandshakeBrokerPermit>("HandshakeBrokerPermit");
- }
-
- // set up overall handshake process timer
- TDuration timeout = Common->Settings.Handshake;
- if (timeout == TDuration::Zero()) {
- timeout = DEFAULT_HANDSHAKE_TIMEOUT;
- }
- timeout += ResolveTimeout * 2;
-
- if (MainChannel) {
- // Incoming handshakes have shorter timeout than outgoing
- timeout *= 0.9;
- }
-
- Deadline = TActivationContext::Monotonic() + timeout;
- Schedule(Deadline, new TEvents::TEvWakeup);
-
- try {
- const bool incoming = MainChannel;
- if (incoming) {
- PerformIncomingHandshake();
- } else {
- PerformOutgoingHandshake();
- }
-
- // establish encrypted channel, or, in case when encryption is disabled, check if it matches settings
- if (ProgramInfo) {
- if (Params.UseExternalDataChannel) {
- if (incoming) {
- Y_ABORT_UNLESS(SubscribedForConnection);
- auto ev = WaitForSpecificEvent<TEvReportConnection>("WaitInboundXdcStream");
- SubscribedForConnection = false;
- if (ev->Get()->HandshakeId != *HandshakeId) {
- Y_DEBUG_ABORT_UNLESS(false);
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Mismatching HandshakeId in external data channel");
- }
- ExternalDataChannel.GetSocketRef() = std::move(ev->Get()->Socket);
- } else {
- EstablishExternalDataChannel();
- }
- }
-
- if (Params.Encryption) {
- EstablishSecureConnection(MainChannel);
- if (ExternalDataChannel) {
- EstablishSecureConnection(ExternalDataChannel);
- }
- } else if (Common->Settings.EncryptionMode == EEncryptionMode::REQUIRED && !Params.AuthOnly) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Peer doesn't support encryption, which is required");
- }
- }
- } catch (const TExHandshakeFailed&) {
- ProgramInfo.Clear();
- }
-
- if (ProgramInfo) {
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH04", NLog::PRI_INFO, "handshake succeeded");
- Y_ABORT_UNLESS(NextPacketFromPeer);
- MainChannel.ResetPollerToken();
- ExternalDataChannel.ResetPollerToken();
- Y_ABORT_UNLESS(!ExternalDataChannel == !Params.UseExternalDataChannel);
- SendToProxy(MakeHolder<TEvHandshakeDone>(std::move(MainChannel.GetSocketRef()), PeerVirtualId, SelfVirtualId,
- *NextPacketFromPeer, ProgramInfo->Release(), std::move(Params), std::move(ExternalDataChannel.GetSocketRef())));
- }
-
- MainChannel.Reset();
- ExternalDataChannel.Reset();
- }
-
- void EstablishSecureConnection(TConnection& connection) {
- // wrap current socket with secure one
- connection.ResetPollerToken();
- TIntrusivePtr<NInterconnect::TStreamSocket>& socketRef = connection.GetSocketRef();
- auto ev = AskProxy<TEvSecureSocket>(MakeHolder<TEvGetSecureSocket>(socketRef), "AskProxy(TEvSecureContext)");
- TIntrusivePtr<NInterconnect::TSecureSocket> secure = std::move(ev->Get()->Socket); // remember for further use
- socketRef = secure; // replace the socket within the connection
- connection.RegisterInPoller(); // re-register in poller
-
- const ui32 myNodeId = GetActorSystem()->NodeId;
- const bool server = myNodeId < PeerNodeId; // keep server/client role permanent to enable easy TLS session resuming
- for (;;) {
- TString err;
- switch (secure->Establish(server, Params.AuthOnly, err)) {
- case NInterconnect::TSecureSocket::EStatus::SUCCESS:
- if (Params.AuthOnly) {
- Params.Encryption = false;
- Params.AuthCN = secure->GetPeerCommonName();
- connection.ResetPollerToken();
- socketRef = secure->Detach();
- connection.RegisterInPoller();
- }
- return;
-
- case NInterconnect::TSecureSocket::EStatus::ERROR:
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, err, true);
- [[fallthrough]];
-
- case NInterconnect::TSecureSocket::EStatus::WANT_READ:
- connection.WaitPoller(true, false, "ReadEstablish");
- break;
-
- case NInterconnect::TSecureSocket::EStatus::WANT_WRITE:
- connection.WaitPoller(false, true, "WriteEstablish");
- break;
- }
- }
- }
-
- void ProcessUnexpectedEvent(TAutoPtr<IEventHandle> ev) {
- switch (const ui32 type = ev->GetTypeRewrite()) {
- case TEvents::TSystem::Wakeup:
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT, Sprintf("Handshake timed out, State# %s", State.data()), true);
- [[fallthrough]];
-
- case ui32(ENetwork::NodeInfo):
- case TEvInterconnect::EvNodeAddress:
- case ui32(ENetwork::ResolveError):
- break; // most likely a race with resolve timeout
-
- case TEvPollerReady::EventType:
- break;
-
- case TEvents::TSystem::Poison:
- throw TExPoison();
-
- default:
- Y_ABORT("unexpected event 0x%08" PRIx32, type);
- }
- }
-
- template<typename T>
- void SetupCompatibilityInfo(T& proto) {
- if (Common->CompatibilityInfo) {
- proto.SetCompatibilityInfo(*Common->CompatibilityInfo);
- }
- }
-
- template<typename T>
- void SetupVersionTag(T& proto) {
- if (Common->VersionInfo) {
- proto.SetVersionTag(Common->VersionInfo->Tag);
- for (const TString& accepted : Common->VersionInfo->AcceptedTags) {
- proto.AddAcceptedVersionTags(accepted);
- }
- }
- }
-
- template<typename T>
- void SetupClusterUUID(T& proto) {
- auto *pb = proto.MutableClusterUUIDs();
- pb->SetClusterUUID(Common->ClusterUUID);
- for (const TString& uuid : Common->AcceptUUID) {
- pb->AddAcceptUUID(uuid);
- }
- }
-
- template<typename T, typename TCallback>
- void ValidateCompatibilityInfo(const T& proto, TCallback&& errorCallback) {
- // if possible, use new CompatibilityInfo field
- if (proto.HasCompatibilityInfo() && Common->ValidateCompatibilityInfo) {
- TString errorReason;
- if (!Common->ValidateCompatibilityInfo(proto.GetCompatibilityInfo(), errorReason)) {
- TStringStream s("Local and peer CompatibilityInfo are incompatible");
- s << ", errorReason# " << errorReason;
- errorCallback(s.Str());
- }
- } else if (proto.HasVersionTag() && Common->ValidateCompatibilityOldFormat) {
- TInterconnectProxyCommon::TVersionInfo oldFormat;
- oldFormat.Tag = proto.GetVersionTag();
- for (ui32 i = 0; i < proto.AcceptedVersionTagsSize(); ++i) {
- oldFormat.AcceptedTags.insert(proto.GetAcceptedVersionTags(i));
- }
- TString errorReason;
- if (!Common->ValidateCompatibilityOldFormat(oldFormat, errorReason)) {
- TStringStream s("Local CompatibilityInfo and peer TVersionInfo are incompatible");
- s << ", errorReason# " << errorReason;
- errorCallback(s.Str());
- }
- } else if (proto.HasVersionTag()) {
- ValidateVersionTag(proto, std::forward<TCallback>(errorCallback));
- } else {
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH09", NLog::PRI_WARN,
- "Neither CompatibilityInfo nor VersionTag of the peer can be validated, accepting by default");
- }
- }
-
- template<typename T, typename TCallback>
- void ValidateVersionTag(const T& proto, TCallback&& errorCallback) {
- // check if we will accept peer's version tag (if peer provides one and if we have accepted list non-empty)
- if (Common->VersionInfo) {
- if (!proto.HasVersionTag()) {
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH06", NLog::PRI_WARN,
- "peer did not report VersionTag, accepting by default");
- } else if (!Common->VersionInfo->AcceptedTags.count(proto.GetVersionTag())) {
- // we will not accept peer's tag, so check if remote peer would accept our version tag
- size_t i;
- for (i = 0; i < proto.AcceptedVersionTagsSize() && Common->VersionInfo->Tag != proto.GetAcceptedVersionTags(i); ++i)
- {}
- if (i == proto.AcceptedVersionTagsSize()) {
- // peer will neither accept our version -- this is total failure
- TStringStream s("local/peer version tags did not match accepted ones");
- s << " local Tag# " << Common->VersionInfo->Tag << " accepted Tags# [";
- bool first = true;
- for (const auto& tag : Common->VersionInfo->AcceptedTags) {
- s << (std::exchange(first, false) ? "" : " ") << tag;
- }
- s << "] peer Tag# " << proto.GetVersionTag() << " accepted Tags# [";
- first = true;
- for (const auto& tag : proto.GetAcceptedVersionTags()) {
- s << (std::exchange(first, false) ? "" : " ") << tag;
- }
- s << "]";
- errorCallback(s.Str());
- }
- }
- }
- }
-
- template<typename T, typename TCallback>
- void ValidateClusterUUID(const T& proto, TCallback&& errorCallback, const TMaybe<TString>& uuid = {}) {
- auto formatList = [](const auto& list) {
- TStringStream s;
- s << "[";
- for (auto it = list.begin(); it != list.end(); ++it) {
- if (it != list.begin()) {
- s << " ";
- }
- s << *it;
- }
- s << "]";
- return s.Str();
- };
- if (!Common->AcceptUUID) {
- return; // promiscuous mode -- we accept every other peer
- }
- if (!proto.HasClusterUUIDs()) {
- if (uuid) {
- // old-style checking, peer does not support symmetric protoocol
- bool matching = false;
- for (const TString& accepted : Common->AcceptUUID) {
- if (*uuid == accepted) {
- matching = true;
- break;
- }
- }
- if (!matching) {
- errorCallback(Sprintf("Peer ClusterUUID# %s mismatch, AcceptUUID# %s", uuid->data(), formatList(Common->AcceptUUID).data()));
- }
- }
- return; // remote side did not fill in this field -- old version, symmetric protocol is not supported
- }
-
- const auto& uuids = proto.GetClusterUUIDs();
-
- // check if our UUID matches remote accept list
- for (const TString& item : uuids.GetAcceptUUID()) {
- if (item == Common->ClusterUUID) {
- return; // match
- }
- }
-
- // check if remote UUID matches our accept list
- const TString& remoteUUID = uuids.GetClusterUUID();
- for (const TString& item : Common->AcceptUUID) {
- if (item == remoteUUID) {
- return; // match
- }
- }
-
- // no match
- errorCallback(Sprintf("Peer ClusterUUID# %s mismatch, AcceptUUID# %s", remoteUUID.data(), formatList(Common->AcceptUUID).data()));
- }
-
- void ParsePeerScopeId(const NActorsInterconnect::TScopeId& proto) {
- Params.PeerScopeId = {proto.GetX1(), proto.GetX2()};
- }
-
- void FillInScopeId(NActorsInterconnect::TScopeId& proto) {
- const TScopeId& scope = Common->LocalScopeId;
- proto.SetX1(scope.first);
- proto.SetX2(scope.second);
- }
-
- template<typename T>
- void ReportProto(const T& protobuf, const char *msg) {
- auto formatString = [&] {
- google::protobuf::TextFormat::Printer p;
- p.SetSingleLineMode(true);
- TString s;
- p.PrintToString(protobuf, &s);
- return s;
- };
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH07", NLog::PRI_DEBUG, "%s %s", msg,
- formatString().data());
- }
-
- bool CheckPeerCookie(const TString& cookie, TString *error) {
- // set up virtual self id to ensure peer will not drop our connection
- char buf[12] = {'c', 'o', 'o', 'k', 'i', 'e', ' ', 'c', 'h', 'e', 'c', 'k'};
- SelfVirtualId = TActorId(SelfActorId.NodeId(), TStringBuf(buf, 12));
-
- bool success = true;
- try {
- // issue connection and send initial packet
- TConnection tempConnection(this, nullptr);
- tempConnection.Connect(nullptr);
- SendInitialPacket(tempConnection);
-
- // wait for basic response
- TInitialPacket response;
- tempConnection.ReceiveData(&response, sizeof(response), "ReceiveResponse");
- if (!response.Check()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT, "Initial packet CRC error");
- } else if (response.Header.Version != INTERCONNECT_PROTOCOL_VERSION) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, Sprintf("Incompatible protocol %" PRIu64, response.Header.Version));
- }
-
- // issue cookie check request
- NActorsInterconnect::THandshakeRequest request;
- request.SetProtocol(INTERCONNECT_PROTOCOL_VERSION);
- request.SetProgramPID(0);
- request.SetProgramStartTime(0);
- request.SetSerial(0);
- request.SetReceiverNodeId(0);
- request.SetSenderActorId(TString());
- request.SetCookie(cookie);
- request.SetDoCheckCookie(true);
- SendExBlock(tempConnection, request, "ExBlockDoCheckCookie");
-
- // process cookie check reply
- NActorsInterconnect::THandshakeReply reply;
- if (!reply.ParseFromString(ReceiveExBlock(tempConnection, "ExBlockDoCheckCookie"))) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Incorrect packet from peer");
- } else if (reply.HasCookieCheckResult() && !reply.GetCookieCheckResult()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Cookie check error -- possible network problem");
- }
- } catch (const TExHandshakeFailed& e) {
- *error = e.what();
- success = false;
- }
-
- // restore state
- SelfVirtualId = TActorId();
- return success;
- }
-
- void EstablishExternalDataChannel() {
- ExternalDataChannel.Connect(nullptr);
- char buf[12] = {'x', 'd', 'c', ' ', 'p', 'a', 'y', 'l', 'o', 'a', 'd', 0};
- TInitialPacket packet(TActorId(SelfActorId.NodeId(), TStringBuf(buf, 12)), {}, 0, INTERCONNECT_XDC_STREAM_VERSION);
- ExternalDataChannel.SendData(&packet, sizeof(packet), "SendXdcStream");
- NActorsInterconnect::TExternalDataChannelParams params;
- params.SetHandshakeId(*HandshakeId);
- SendExBlock(ExternalDataChannel, params, "ExternalDataChannelParams");
- }
-
- void PerformOutgoingHandshake() {
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH01", NLog::PRI_DEBUG,
- "starting outgoing handshake");
-
- // perform connection and log its result
- MainChannel.Connect(&PeerAddr);
- auto logPriority = std::exchange(LastLogNotice, std::nullopt) ? NActors::NLog::PRI_NOTICE : NActors::NLog::PRI_DEBUG;
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH05", logPriority, "connected to peer");
-
- // send initial request packet
- if (Params.UseExternalDataChannel && PeerVirtualId) { // special case for XDC continuation
- TInitialPacket packet(SelfVirtualId, PeerVirtualId, NextPacketToPeer, INTERCONNECT_XDC_CONTINUATION_VERSION);
- MainChannel.SendData(&packet, sizeof(packet), "SendInitialPacket");
- NActorsInterconnect::TContinuationParams request;
- request.SetHandshakeId(*HandshakeId);
- SendExBlock(MainChannel, request, "ExRequest");
- } else {
- TInitialPacket packet(SelfVirtualId, PeerVirtualId, NextPacketToPeer, INTERCONNECT_PROTOCOL_VERSION);
- MainChannel.SendData(&packet, sizeof(packet), "SendInitialPacket");
- }
-
- TInitialPacket response;
- MainChannel.ReceiveData(&response, sizeof(response), "ReceiveResponse");
- if (!response.Check()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT, "Initial packet CRC error");
- } else if (response.Header.Version != INTERCONNECT_PROTOCOL_VERSION) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, Sprintf("Incompatible protocol %" PRIu64, response.Header.Version));
- }
-
- // extract next packet
- NextPacketFromPeer = response.Header.NextPacket;
-
- if (!PeerVirtualId) {
- // creating new session -- we have to generate request
- NActorsInterconnect::THandshakeRequest request;
-
- request.SetProtocol(INTERCONNECT_PROTOCOL_VERSION);
- request.SetProgramPID(GetPID());
- request.SetProgramStartTime(Common->StartTime);
- request.SetSerial(SelfVirtualId.LocalId());
- request.SetReceiverNodeId(PeerNodeId);
- request.SetSenderActorId(SelfVirtualId.ToString());
- request.SetSenderHostName(Common->TechnicalSelfHostName);
- request.SetReceiverHostName(PeerHostName);
-
- if (Common->LocalScopeId != TScopeId()) {
- FillInScopeId(*request.MutableClientScopeId());
- }
-
- if (Common->Cookie) {
- request.SetCookie(Common->Cookie);
- }
- if (Common->ClusterUUID) {
- request.SetUUID(Common->ClusterUUID);
- }
- SetupClusterUUID(request);
- SetupCompatibilityInfo(request);
- SetupVersionTag(request);
-
- if (const ui32 size = Common->HandshakeBallastSize) {
- TString ballast(size, 0);
- char* data = ballast.Detach();
- for (ui32 i = 0; i < size; ++i) {
- data[i] = i;
- }
- request.SetBallast(ballast);
- }
-
- switch (Common->Settings.EncryptionMode) {
- case EEncryptionMode::DISABLED:
- break;
-
- case EEncryptionMode::OPTIONAL:
- request.SetRequireEncryption(false);
- break;
-
- case EEncryptionMode::REQUIRED:
- request.SetRequireEncryption(true);
- break;
- }
-
- request.SetRequestModernFrame(true);
- request.SetRequestAuthOnly(Common->Settings.TlsAuthOnly);
- request.SetRequestExtendedTraceFmt(true);
- request.SetRequestExternalDataChannel(Common->Settings.EnableExternalDataChannel);
- request.SetRequestXxhash(true);
- request.SetRequestXdcShuffle(true);
- request.SetHandshakeId(*HandshakeId);
-
- SendExBlock(MainChannel, request, "ExRequest");
-
- NActorsInterconnect::THandshakeReply reply;
- if (!reply.ParseFromString(ReceiveExBlock(MainChannel, "ExReply"))) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Incorrect THandshakeReply");
- }
- ReportProto(reply, "ReceiveExBlock ExReply");
-
- if (reply.HasErrorExplaination()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "error from peer: " + reply.GetErrorExplaination());
- } else if (!reply.HasSuccess()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "empty reply");
- }
-
- auto generateError = [this](TString msg) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, msg);
- };
-
- const auto& success = reply.GetSuccess();
- ValidateClusterUUID(success, generateError);
- ValidateCompatibilityInfo(success, generateError);
-
- const auto& s = success.GetSenderActorId();
- PeerVirtualId.Parse(s.data(), s.size());
-
- if (!success.GetUseModernFrame()) {
- generateError("UseModernFrame not set, obsolete peer version");
- } else if (!success.GetUseExtendedTraceFmt()) {
- generateError("UseExtendedTraceFmt not set, obsolete peer version");
- }
-
- // recover flags
- Params.Encryption = success.GetStartEncryption();
- Params.AuthOnly = Params.Encryption && success.GetAuthOnly();
- Params.UseExternalDataChannel = success.GetUseExternalDataChannel();
- Params.UseXxhash = success.GetUseXxhash();
- Params.UseXdcShuffle = success.GetUseXdcShuffle();
- if (success.HasServerScopeId()) {
- ParsePeerScopeId(success.GetServerScopeId());
- }
-
- // recover peer process info from peer's reply
- ProgramInfo = GetProgramInfo(success);
- } else if (!response.Header.SelfVirtualId) {
- // peer reported error -- empty ack was generated by proxy for this request
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_SESSION_MISMATCH, "Peer rejected session continuation handshake");
- } else if (response.Header.SelfVirtualId != PeerVirtualId || response.Header.PeerVirtualId != SelfVirtualId) {
- // resuming existing session; check that virtual ids of peers match each other
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_SESSION_MISMATCH, "Session virtual ID mismatch");
- } else {
- ProgramInfo.ConstructInPlace(); // successful handshake
- }
- }
-
- std::vector<NInterconnect::TAddress> ResolvePeer() {
- // issue request to a nameservice to resolve peer node address
- const auto mono = TActivationContext::Monotonic();
- Send(Common->NameserviceId, new TEvInterconnect::TEvResolveNode(PeerNodeId, TActivationContext::Now() + (Deadline - mono)));
-
- // wait for the result
- auto ev = WaitForSpecificEvent<TEvResolveError, TEvLocalNodeInfo, TEvInterconnect::TEvNodeAddress>(
- "ValidateIncomingPeerViaDirectLookup", mono + ResolveTimeout);
-
- // extract address from the result
- std::vector<NInterconnect::TAddress> addresses;
- if (!ev) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "DNS resolve timed out", true);
- } else if (auto *p = ev->CastAsLocal<TEvLocalNodeInfo>()) {
- addresses = std::move(p->Addresses);
- if (addresses.empty()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "DNS resolve error: no address returned", true);
- }
- } else if (auto *p = ev->CastAsLocal<TEvInterconnect::TEvNodeAddress>()) {
- const auto& r = p->Record;
- if (!r.HasAddress() || !r.HasPort()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "DNS resolve error: no address returned", true);
- }
- addresses.emplace_back(r.GetAddress(), static_cast<ui16>(r.GetPort()));
- } else {
- Y_ABORT_UNLESS(ev->GetTypeRewrite() == ui32(ENetwork::ResolveError));
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "DNS resolve error: " + ev->Get<TEvResolveError>()->Explain
- + ", Unresolved host# " + ev->Get<TEvResolveError>()->Host, true);
- }
-
- return addresses;
- }
-
- void ValidateIncomingPeerViaDirectLookup() {
- auto addresses = ResolvePeer();
-
- for (const auto& address : addresses) {
- if (address.GetAddress() == PeerAddr) {
- return;
- }
- }
-
- auto makeList = [&] {
- TStringStream s;
- s << '[';
- for (auto it = addresses.begin(); it != addresses.end(); ++it) {
- if (it != addresses.begin()) {
- s << ' ';
- }
- s << it->GetAddress();
- }
- s << ']';
- return s.Str();
- };
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, TStringBuilder() << "Connecting node does not resolve to peer address"
- << " PeerAddr# " << PeerAddr << " AddressList# " << makeList(), true);
- }
-
- void PerformIncomingHandshake() {
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH02", NLog::PRI_DEBUG,
- "starting incoming handshake");
-
- // set up incoming socket
- MainChannel.SetupSocket();
- MainChannel.RegisterInPoller();
-
- // wait for initial request packet
- TInitialPacket request;
- MainChannel.ReceiveData(&request, sizeof(request), "ReceiveRequest");
- if (!request.Check()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT, "Initial packet CRC error");
- } else if (request.Header.Version != INTERCONNECT_PROTOCOL_VERSION &&
- request.Header.Version != INTERCONNECT_XDC_CONTINUATION_VERSION &&
- request.Header.Version != INTERCONNECT_XDC_STREAM_VERSION) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, Sprintf("Incompatible protocol %" PRIu64, request.Header.Version));
- }
-
- // extract peer node id from the peer
- PeerNodeId = request.Header.SelfVirtualId.NodeId();
- if (!PeerNodeId) {
- Y_DEBUG_ABORT_UNLESS(false, "PeerNodeId is zero request# %s", request.ToString().data());
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "SelfVirtualId.NodeId is empty in initial packet");
- }
- UpdatePrefix();
-
- // validate incoming peer if needed
- if (Common->Settings.ValidateIncomingPeerViaDirectLookup) {
- ValidateIncomingPeerViaDirectLookup();
- }
-
- // extract next packet
- NextPacketFromPeer = request.Header.NextPacket;
-
- // process some extra payload, if necessary
- switch (request.Header.Version) {
- case INTERCONNECT_XDC_CONTINUATION_VERSION: {
- NActorsInterconnect::TContinuationParams params;
- if (!params.ParseFromString(ReceiveExBlock(MainChannel, "ContinuationParams")) || !params.HasHandshakeId()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Incorrect packet from peer");
- }
- HandshakeId = params.GetHandshakeId();
- SendToProxy(MakeHolder<TEvSubscribeForConnection>(*HandshakeId, true));
- SubscribedForConnection = true;
- break;
- }
- case INTERCONNECT_XDC_STREAM_VERSION: {
- NActorsInterconnect::TExternalDataChannelParams params;
- if (!params.ParseFromString(ReceiveExBlock(MainChannel, "ExternalDataChannelParams")) || !params.HasHandshakeId()) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Incorrect packet from peer");
- }
- MainChannel.ResetPollerToken();
- SendToProxy(MakeHolder<TEvReportConnection>(params.GetHandshakeId(), std::move(MainChannel.GetSocketRef())));
- throw TExHandshakeFailed();
- }
- }
-
- if (request.Header.PeerVirtualId) {
- // issue request to the proxy and wait for the response
- auto reply = AskProxy<TEvHandshakeAck, TEvHandshakeNak>(MakeHolder<TEvHandshakeAsk>(
- request.Header.SelfVirtualId, request.Header.PeerVirtualId, request.Header.NextPacket),
- "TEvHandshakeAsk");
- if (auto *ack = reply->CastAsLocal<TEvHandshakeAck>()) {
- // extract self/peer virtual ids
- SelfVirtualId = ack->Self;
- PeerVirtualId = request.Header.SelfVirtualId;
- NextPacketToPeer = ack->NextPacket;
- Params = ack->Params;
-
- // only succeed in case when proxy returned valid SelfVirtualId; otherwise it wants us to terminate
- // the handshake process and it does not expect the handshake reply
- ProgramInfo.ConstructInPlace();
- } else {
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH08", NLog::PRI_NOTICE,
- "Continuation request rejected by proxy");
-
- // report continuation reject to peer
- SelfVirtualId = TActorId();
- PeerVirtualId = TActorId();
- NextPacketToPeer = 0;
- }
-
- // issue response to the peer
- SendInitialPacket(MainChannel);
- } else {
- // peer wants a new session, clear fields and send initial packet
- SelfVirtualId = TActorId();
- PeerVirtualId = TActorId();
- NextPacketToPeer = 0;
- SendInitialPacket(MainChannel);
-
- // wait for extended request
- auto ev = MakeHolder<TEvHandshakeRequest>();
- auto& request = ev->Record;
- if (!request.ParseFromString(ReceiveExBlock(MainChannel, "ExRequest"))) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Incorrect THandshakeRequest");
- }
- ReportProto(request, "ReceiveExBlock ExRequest");
-
- auto generateError = [this](TString msg) {
- // issue reply to the peer to prevent repeating connection retries
- NActorsInterconnect::THandshakeReply reply;
- reply.SetErrorExplaination(msg);
- SendExBlock(MainChannel, reply, "ExReply");
-
- // terminate ths handshake
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, msg);
- };
-
- // check request cookie
- TString error;
- if (request.HasDoCheckCookie()) {
- NActorsInterconnect::THandshakeReply reply;
- reply.SetCookieCheckResult(request.GetCookie() == Common->Cookie);
- SendExBlock(MainChannel, reply, "ExReplyDoCheckCookie");
- throw TExHandshakeFailed();
- } else if (request.HasCookie() && !CheckPeerCookie(request.GetCookie(), &error)) {
- generateError(TStringBuilder() << "Peer connectivity-checking failed, error# " << error);
- }
-
- // update log prefix with the reported peer host name
- PeerHostName = request.GetSenderHostName();
-
- // parse peer virtual id
- const auto& str = request.GetSenderActorId();
- PeerVirtualId.Parse(str.data(), str.size());
-
- // validate request
- ValidateClusterUUID(request, generateError, request.GetUUID());
- if (request.GetReceiverNodeId() != SelfActorId.NodeId()) {
- generateError(Sprintf("Incorrect ReceiverNodeId# %" PRIu32 " from the peer, expected# %" PRIu32,
- request.GetReceiverNodeId(), SelfActorId.NodeId()));
- } else if (request.GetReceiverHostName() != Common->TechnicalSelfHostName) {
- generateError(Sprintf("ReceiverHostName# %s mismatch, expected# %s", request.GetReceiverHostName().data(),
- Common->TechnicalSelfHostName.data()));
- }
- ValidateCompatibilityInfo(request, generateError);
-
- // check peer node
- auto peerNodeInfo = GetPeerNodeInfo();
- if (!peerNodeInfo) {
- generateError("Peer node not registered in nameservice");
- } else if (peerNodeInfo->Host != request.GetSenderHostName()) {
- generateError("SenderHostName mismatch");
- }
-
- // check request against encryption
- switch (Common->Settings.EncryptionMode) {
- case EEncryptionMode::DISABLED:
- if (request.GetRequireEncryption()) {
- generateError("Peer requested encryption, but it is disabled locally");
- }
- break;
-
- case EEncryptionMode::OPTIONAL:
- Params.Encryption = request.HasRequireEncryption();
- break;
-
- case EEncryptionMode::REQUIRED:
- if (!request.HasRequireEncryption()) {
- generateError("Peer did not request encryption, but it is required locally");
- }
- Params.Encryption = true;
- break;
- }
-
- if (!request.GetRequestModernFrame()) {
- generateError("RequestModernFrame not set, obsolete peer version");
- } else if (!request.GetRequestExtendedTraceFmt()) {
- generateError("RequestExtendedTraceFmt not set, obsolete peer version");
- }
-
- Params.AuthOnly = Params.Encryption && request.GetRequestAuthOnly() && Common->Settings.TlsAuthOnly;
- Params.UseExternalDataChannel = request.GetRequestExternalDataChannel() && Common->Settings.EnableExternalDataChannel;
- Params.UseXxhash = request.GetRequestXxhash();
- Params.UseXdcShuffle = request.GetRequestXdcShuffle();
-
- if (Params.UseExternalDataChannel) {
- if (request.HasHandshakeId()) {
- HandshakeId = request.GetHandshakeId();
- SendToProxy(MakeHolder<TEvSubscribeForConnection>(*HandshakeId, true));
- SubscribedForConnection = true;
- } else {
- generateError("Peer has requested ExternalDataChannel feature, but did not provide HandshakeId");
- }
- }
-
- if (request.HasClientScopeId()) {
- ParsePeerScopeId(request.GetClientScopeId());
- }
-
- // remember program info (assuming successful handshake)
- ProgramInfo = GetProgramInfo(request);
-
- // send to proxy
- auto reply = AskProxy<TEvHandshakeReplyOK, TEvHandshakeReplyError>(std::move(ev), "TEvHandshakeRequest");
-
- // parse it
- if (auto ev = reply->CastAsLocal<TEvHandshakeReplyOK>()) {
- // issue successful reply to the peer
- auto& record = ev->Record;
- Y_ABORT_UNLESS(record.HasSuccess());
- auto& success = *record.MutableSuccess();
- SetupClusterUUID(success);
- SetupCompatibilityInfo(success);
- SetupVersionTag(success);
- success.SetStartEncryption(Params.Encryption);
- if (Common->LocalScopeId != TScopeId()) {
- FillInScopeId(*success.MutableServerScopeId());
- }
- success.SetUseModernFrame(true);
- success.SetAuthOnly(Params.AuthOnly);
- success.SetUseExtendedTraceFmt(true);
- success.SetUseExternalDataChannel(Params.UseExternalDataChannel);
- success.SetUseXxhash(Params.UseXxhash);
- success.SetUseXdcShuffle(Params.UseXdcShuffle);
- SendExBlock(MainChannel, record, "ExReply");
-
- // extract sender actor id (self virtual id)
- const auto& str = success.GetSenderActorId();
- SelfVirtualId.Parse(str.data(), str.size());
- } else if (auto ev = reply->CastAsLocal<TEvHandshakeReplyError>()) {
- // in case of error just send reply to the peer and terminate handshake
- SendExBlock(MainChannel, ev->Record, "ExReply");
- ProgramInfo.Clear(); // do not issue reply to the proxy
- } else {
- Y_ABORT("unexpected event Type# 0x%08" PRIx32, reply->GetTypeRewrite());
- }
- }
- }
-
- template <typename T>
- void SendExBlock(TConnection& connection, const T& proto, const char* what) {
- TString data;
- Y_PROTOBUF_SUPPRESS_NODISCARD proto.SerializeToString(&data);
- Y_ABORT_UNLESS(data.size() <= TExHeader::MaxSize);
-
- ReportProto(proto, Sprintf("SendExBlock %s", what).data());
-
- TExHeader header;
- header.Size = data.size();
- header.Sign(data.data(), data.size());
- connection.SendData(&header, sizeof(header), Sprintf("Send%sHeader", what));
- connection.SendData(data.data(), data.size(), Sprintf("Send%sData", what));
- }
-
- TString ReceiveExBlock(TConnection& connection, const char* what) {
- TExHeader header;
- connection.ReceiveData(&header, sizeof(header), Sprintf("Receive%sHeader", what));
- if (header.Size > TExHeader::MaxSize) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT, "Incorrect extended header size");
- }
-
- TString data;
- data.resize(header.Size);
- connection.ReceiveData(data.Detach(), data.size(), Sprintf("Receive%sData", what));
-
- if (!header.Check(data.data(), data.size())) {
- Fail(TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT, "Extended header CRC error");
- }
-
- return data;
- }
-
- private:
- void SendToProxy(THolder<IEventBase> ev) {
- Y_ABORT_UNLESS(PeerNodeId);
- Send(GetActorSystem()->InterconnectProxy(PeerNodeId), ev.Release());
- }
-
- template <typename TEvent>
- THolder<typename TEvent::THandle> WaitForSpecificEvent(TString state, TMonotonic deadline = TMonotonic::Max()) {
- State = std::move(state);
- return TActorCoroImpl::WaitForSpecificEvent<TEvent>(&THandshakeActor::ProcessUnexpectedEvent, deadline);
- }
-
- template <typename T1, typename T2, typename... TEvents>
- THolder<IEventHandle> WaitForSpecificEvent(TString state, TMonotonic deadline = TMonotonic::Max()) {
- State = std::move(state);
- return TActorCoroImpl::WaitForSpecificEvent<T1, T2, TEvents...>(&THandshakeActor::ProcessUnexpectedEvent, deadline);
- }
-
- template <typename TEvent>
- THolder<typename TEvent::THandle> AskProxy(THolder<IEventBase> ev, TString state) {
- SendToProxy(std::move(ev));
- return WaitForSpecificEvent<TEvent>(std::move(state));
- }
-
- template <typename T1, typename T2, typename... TOther>
- THolder<IEventHandle> AskProxy(THolder<IEventBase> ev, TString state) {
- SendToProxy(std::move(ev));
- return WaitForSpecificEvent<T1, T2, TOther...>(std::move(state));
- }
-
- void Fail(TEvHandshakeFail::EnumHandshakeFail reason, TString explanation, bool network = false) {
- TString msg = Sprintf("%s Peer# %s(%s) %s", HandshakeKind.data(), PeerHostName ? PeerHostName.data() : "<unknown>",
- PeerAddr.size() ? PeerAddr.data() : "<unknown>", explanation.data());
-
- if (network) {
- TInstant now = Now();
- NActors::NLog::EPriority logPriority = NActors::NLog::PRI_DEBUG;
- if (!LastLogNotice || now - *LastLogNotice > MuteDuration) {
- logPriority = NActors::NLog::PRI_NOTICE;
- LastLogNotice.emplace(now);
- }
- LOG_LOG_NET_X(logPriority, PeerNodeId, "network-related error occured on handshake: %s", msg.data());
- } else {
- // calculate log severity based on failure type; permanent failures lead to error log messages
- auto severity = reason == TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT
- ? NActors::NLog::PRI_NOTICE
- : NActors::NLog::PRI_INFO;
-
- LOG_LOG_IC_X(NActorsServices::INTERCONNECT, "ICH03", severity, "handshake failed, explanation# %s", msg.data());
- }
-
- if (PeerNodeId) {
- SendToProxy(MakeHolder<TEvHandshakeFail>(reason, std::move(msg)));
- }
-
- throw TExHandshakeFailed() << explanation;
- }
-
- private:
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // COMMUNICATION BLOCK
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- void SendInitialPacket(TConnection& connection) {
- TInitialPacket packet(SelfVirtualId, PeerVirtualId, NextPacketToPeer, INTERCONNECT_PROTOCOL_VERSION);
- connection.SendData(&packet, sizeof(packet), "SendInitialPacket");
- }
-
- THolder<TEvInterconnect::TNodeInfo> GetPeerNodeInfo() {
- Y_ABORT_UNLESS(PeerNodeId);
- Send(Common->NameserviceId, new TEvInterconnect::TEvGetNode(PeerNodeId, TActivationContext::Now() +
- (Deadline - TActivationContext::Monotonic())));
- auto response = WaitForSpecificEvent<TEvInterconnect::TEvNodeInfo>("GetPeerNodeInfo");
- return std::move(response->Get()->Node);
- }
-
- template <typename T>
- static THolder<TProgramInfo> GetProgramInfo(const T& proto) {
- auto programInfo = MakeHolder<TProgramInfo>();
- programInfo->PID = proto.GetProgramPID();
- programInfo->StartTime = proto.GetProgramStartTime();
- programInfo->Serial = proto.GetSerial();
- return programInfo;
- }
- };
-
- IActor* CreateOutgoingHandshakeActor(TInterconnectProxyCommon::TPtr common, const TActorId& self,
- const TActorId& peer, ui32 nodeId, ui64 nextPacket, TString peerHostName,
- TSessionParams params) {
- return new TActorCoro(MakeHolder<THandshakeActor>(std::move(common), self, peer, nodeId, nextPacket,
- std::move(peerHostName), std::move(params)), IActor::EActivityType::INTERCONNECT_HANDSHAKE);
- }
-
- IActor* CreateIncomingHandshakeActor(TInterconnectProxyCommon::TPtr common, TSocketPtr socket) {
- return new TActorCoro(MakeHolder<THandshakeActor>(std::move(common), std::move(socket)),
- IActor::EActivityType::INTERCONNECT_HANDSHAKE);
- }
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_handshake.h b/library/cpp/actors/interconnect/interconnect_handshake.h
deleted file mode 100644
index c8a7437b80..0000000000
--- a/library/cpp/actors/interconnect/interconnect_handshake.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/actors/core/events.h>
-
-#include "interconnect_common.h"
-#include "interconnect_impl.h"
-#include "poller_tcp.h"
-#include "events_local.h"
-
-namespace NActors {
- static constexpr TDuration DEFAULT_HANDSHAKE_TIMEOUT = TDuration::Seconds(5);
- static constexpr ui64 INTERCONNECT_PROTOCOL_VERSION = 2;
- static constexpr ui64 INTERCONNECT_XDC_CONTINUATION_VERSION = 3;
- static constexpr ui64 INTERCONNECT_XDC_STREAM_VERSION = 4;
-
- using TSocketPtr = TIntrusivePtr<NInterconnect::TStreamSocket>;
-
- IActor* CreateOutgoingHandshakeActor(TInterconnectProxyCommon::TPtr common, const TActorId& self,
- const TActorId& peer, ui32 nodeId, ui64 nextPacket, TString peerHostName,
- TSessionParams params);
-
- IActor* CreateIncomingHandshakeActor(TInterconnectProxyCommon::TPtr common, TSocketPtr socket);
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_impl.h b/library/cpp/actors/interconnect/interconnect_impl.h
deleted file mode 100644
index db598546cc..0000000000
--- a/library/cpp/actors/interconnect/interconnect_impl.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#pragma once
-
-#include "interconnect.h"
-#include <library/cpp/actors/protos/interconnect.pb.h>
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-namespace NActors {
- // resolve node info
- struct TEvInterconnect::TEvResolveNode: public TEventPB<TEvInterconnect::TEvResolveNode, NActorsInterconnect::TEvResolveNode, TEvInterconnect::EvResolveNode> {
- TEvResolveNode() {
- }
-
- TEvResolveNode(ui32 nodeId, TInstant deadline = TInstant::Max()) {
- Record.SetNodeId(nodeId);
- if (deadline != TInstant::Max()) {
- Record.SetDeadline(deadline.GetValue());
- }
- }
- };
-
- // node info
- struct TEvInterconnect::TEvNodeAddress: public TEventPB<TEvInterconnect::TEvNodeAddress, NActorsInterconnect::TEvNodeInfo, TEvInterconnect::EvNodeAddress> {
- TEvNodeAddress() {
- }
-
- TEvNodeAddress(ui32 nodeId) {
- Record.SetNodeId(nodeId);
- }
- };
-
- // register node
- struct TEvInterconnect::TEvRegisterNode: public TEventBase<TEvInterconnect::TEvRegisterNode, TEvInterconnect::EvRegisterNode> {
- };
-
- // reply on register node
- struct TEvInterconnect::TEvRegisterNodeResult: public TEventBase<TEvInterconnect::TEvRegisterNodeResult, TEvInterconnect::EvRegisterNodeResult> {
- };
-
- // disconnect
- struct TEvInterconnect::TEvDisconnect: public TEventLocal<TEvInterconnect::TEvDisconnect, TEvInterconnect::EvDisconnect> {
- };
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_mon.cpp b/library/cpp/actors/interconnect/interconnect_mon.cpp
deleted file mode 100644
index f8245e1d72..0000000000
--- a/library/cpp/actors/interconnect/interconnect_mon.cpp
+++ /dev/null
@@ -1,279 +0,0 @@
-#include "interconnect_mon.h"
-#include "interconnect_tcp_proxy.h"
-
-#include <library/cpp/json/json_value.h>
-#include <library/cpp/json/json_writer.h>
-#include <library/cpp/monlib/service/pages/templates.h>
-
-#include <openssl/ssl.h>
-#include <openssl/pem.h>
-
-namespace NInterconnect {
-
- using namespace NActors;
-
- class TInterconnectMonActor : public TActor<TInterconnectMonActor> {
- class TQueryProcessor : public TActorBootstrapped<TQueryProcessor> {
- const TActorId Sender;
- const bool Json;
- TMap<ui32, TInterconnectProxyTCP::TProxyStats> Stats;
- ui32 PendingReplies = 0;
-
- public:
- static constexpr IActor::EActorActivity ActorActivityType() {
- return EActivityType::INTERCONNECT_MONACTOR;
- }
-
- TQueryProcessor(const TActorId& sender, bool json)
- : Sender(sender)
- , Json(json)
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TThis::StateFunc, ctx, TDuration::Seconds(5), new TEvents::TEvWakeup);
- Send(GetNameserviceActorId(), new TEvInterconnect::TEvListNodes);
- }
-
- void Handle(TEvInterconnect::TEvNodesInfo::TPtr ev, const TActorContext& ctx) {
- TActorSystem* const as = ctx.ExecutorThread.ActorSystem;
- for (const auto& node : ev->Get()->Nodes) {
- Send(as->InterconnectProxy(node.NodeId), new TInterconnectProxyTCP::TEvQueryStats, IEventHandle::FlagTrackDelivery);
- ++PendingReplies;
- }
- GenerateResultWhenReady(ctx);
- }
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvInterconnect::TEvNodesInfo, Handle)
- HFunc(TInterconnectProxyTCP::TEvStats, Handle)
- CFunc(TEvents::TSystem::Undelivered, HandleUndelivered)
- CFunc(TEvents::TSystem::Wakeup, HandleWakeup)
- )
-
- void Handle(TInterconnectProxyTCP::TEvStats::TPtr& ev, const TActorContext& ctx) {
- auto *msg = ev->Get();
- Stats.emplace(msg->PeerNodeId, std::move(msg->ProxyStats));
- --PendingReplies;
- GenerateResultWhenReady(ctx);
- }
-
- void HandleUndelivered(const TActorContext& ctx) {
- --PendingReplies;
- GenerateResultWhenReady(ctx);
- }
-
- void HandleWakeup(const TActorContext& ctx) {
- PendingReplies = 0;
- GenerateResultWhenReady(ctx);
- }
-
- void GenerateResultWhenReady(const TActorContext& ctx) {
- if (!PendingReplies) {
- if (Json) {
- ctx.Send(Sender, new NMon::TEvHttpInfoRes(GenerateJson(), 0, NMon::IEvHttpInfoRes::EContentType::Custom));
- } else {
- ctx.Send(Sender, new NMon::TEvHttpInfoRes(GenerateHtml()));
- }
- Die(ctx);
- }
- }
-
- TString GenerateHtml() {
- TStringStream str;
- HTML(str) {
- TABLE_CLASS("table-sortable table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() { str << "Peer node id"; }
- TABLEH() { str << "State"; }
- TABLEH() { str << "Ping"; }
- TABLEH() { str << "Clock skew"; }
- TABLEH() { str << "Scope id"; }
- TABLEH() { str << "Encryption"; }
- TABLEH() { str << "LastSessionDieTime"; }
- TABLEH() { str << "TotalOutputQueueSize"; }
- TABLEH() { str << "Connected"; }
- TABLEH() { str << "XDC"; }
- TABLEH() { str << "Host"; }
- TABLEH() { str << "Port"; }
- TABLEH() { str << "LastErrorTimestamp"; }
- TABLEH() { str << "LastErrorKind"; }
- TABLEH() { str << "LastErrorExplanation"; }
- }
- }
- TABLEBODY() {
- for (const auto& kv : Stats) {
- TABLER() {
- TABLED() { str << "<a href='" << kv.second.Path << "'>" << kv.first << "</a>"; }
- TABLED() { str << kv.second.State; }
- TABLED() {
- if (kv.second.Ping != TDuration::Zero()) {
- str << kv.second.Ping;
- }
- }
- TABLED() {
- if (kv.second.ClockSkew < 0) {
- str << "-" << TDuration::MicroSeconds(-kv.second.ClockSkew);
- } else {
- str << "+" << TDuration::MicroSeconds(kv.second.ClockSkew);
- }
- }
- TABLED() { str << ScopeIdToString(kv.second.PeerScopeId); }
- TABLED() {
- const char *color = kv.second.Encryption != "none" ? "green" : "red";
- str << "<font color='" << color << "'>" << kv.second.Encryption << "</font>";
- }
- TABLED() {
- if (kv.second.LastSessionDieTime != TInstant::Zero()) {
- str << kv.second.LastSessionDieTime;
- }
- }
- TABLED() { str << kv.second.TotalOutputQueueSize; }
- TABLED() { str << (kv.second.Connected ? "yes" : "<strong>no</strong>"); }
- TABLED() { str << (kv.second.ExternalDataChannel ? "yes" : "no"); }
- TABLED() { str << kv.second.Host; }
- TABLED() { str << kv.second.Port; }
- TABLED() {
- str << "<strong>";
- if (kv.second.LastErrorTimestamp != TInstant::Zero()) {
- str << kv.second.LastErrorTimestamp;
- }
- str << "</strong>";
- }
- TABLED() { str << "<strong>" << kv.second.LastErrorKind << "</strong>"; }
- TABLED() { str << "<strong>" << kv.second.LastErrorExplanation << "</strong>"; }
- }
- }
- }
- }
- }
- return str.Str();
- }
-
- TString GenerateJson() {
- NJson::TJsonValue json;
- for (const auto& [nodeId, info] : Stats) {
- NJson::TJsonValue item;
- item["NodeId"] = nodeId;
-
- auto id = [](const auto& x) { return x; };
- auto toString = [](const auto& x) { return x.ToString(); };
-
-#define JSON(NAME, FUN) item[#NAME] = FUN(info.NAME);
- JSON(Path, id)
- JSON(State, id)
- JSON(PeerScopeId, ScopeIdToString)
- JSON(LastSessionDieTime, toString)
- JSON(TotalOutputQueueSize, id)
- JSON(Connected, id)
- JSON(ExternalDataChannel, id)
- JSON(Host, id)
- JSON(Port, id)
- JSON(LastErrorTimestamp, toString)
- JSON(LastErrorKind, id)
- JSON(LastErrorExplanation, id)
- JSON(Ping, toString)
- JSON(ClockSkew, id)
- JSON(Encryption, id)
-#undef JSON
-
- json[ToString(nodeId)] = item;
- }
- TStringStream str(NMonitoring::HTTPOKJSON);
- NJson::WriteJson(&str, &json);
- return str.Str();
- }
- };
-
- private:
- TIntrusivePtr<TInterconnectProxyCommon> Common;
-
- public:
- static constexpr IActor::EActorActivity ActorActivityType() {
- return EActivityType::INTERCONNECT_MONACTOR;
- }
-
- TInterconnectMonActor(TIntrusivePtr<TInterconnectProxyCommon> common)
- : TActor(&TThis::StateFunc)
- , Common(std::move(common))
- {}
-
- STRICT_STFUNC(StateFunc,
- HFunc(NMon::TEvHttpInfo, Handle)
- )
-
- void Handle(NMon::TEvHttpInfo::TPtr& ev, const TActorContext& ctx) {
- const auto& params = ev->Get()->Request.GetParams();
- int certinfo = 0;
- if (TryFromString(params.Get("certinfo"), certinfo) && certinfo) {
- ctx.Send(ev->Sender, new NMon::TEvHttpInfoRes(GetCertInfoJson(), ev->Get()->SubRequestId,
- NMon::TEvHttpInfoRes::Custom));
- } else {
- const bool json = params.Has("fmt") && params.Get("fmt") == "json";
- ctx.Register(new TQueryProcessor(ev->Sender, json));
- }
- }
-
- TString GetCertInfoJson() const {
- NJson::TJsonValue json(NJson::JSON_MAP);
- if (const TString cert = Common ? Common->Settings.Certificate : TString()) {
- struct TEx : yexception {};
- try {
- const auto& cert = Common->Settings.Certificate;
- std::unique_ptr<BIO, void(*)(BIO*)> bio(BIO_new_mem_buf(cert.data(), cert.size()), &BIO_vfree);
- if (!bio) {
- throw TEx() << "BIO_new_mem_buf failed";
- }
- std::unique_ptr<X509, void(*)(X509*)> x509(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr),
- &X509_free);
- if (!x509) {
- throw TEx() << "PEM_read_bio_X509 failed";
- }
- X509_NAME *name = X509_get_subject_name(x509.get());
- if (!name) {
- throw TEx() << "X509_get_subject_name failed";
- }
- char buffer[4096];
- if (char *p = X509_NAME_oneline(name, buffer, sizeof(buffer))) {
- json["Subject"] = p;
- }
- if (int loc = X509_NAME_get_index_by_NID(name, NID_commonName, -1); loc >= 0) {
- if (X509_NAME_ENTRY *entry = X509_NAME_get_entry(name, loc)) {
- if (ASN1_STRING *data = X509_NAME_ENTRY_get_data(entry)) {
- unsigned char *cn;
- if (const int len = ASN1_STRING_to_UTF8(&cn, data); len >= 0) {
- json["CommonName"] = TString(reinterpret_cast<char*>(cn), len);
- OPENSSL_free(cn);
- }
- }
- }
- }
- auto time = [](const ASN1_TIME *t, const char *name) -> TString {
- if (t) {
- struct tm tm;
- if (ASN1_TIME_to_tm(t, &tm)) {
- return Strftime("%Y-%m-%dT%H:%M:%S%z", &tm);
- } else {
- throw TEx() << "ASN1_TIME_to_tm failed";
- }
- } else {
- throw TEx() << name << " failed";
- }
- };
- json["NotBefore"] = time(X509_get0_notBefore(x509.get()), "X509_get0_notBefore");
- json["NotAfter"] = time(X509_get0_notAfter(x509.get()), "X509_get0_notAfter");
- } catch (const TEx& ex) {
- json["Error"] = ex.what();
- }
- }
- TStringStream str(NMonitoring::HTTPOKJSON);
- NJson::WriteJson(&str, &json);
- return str.Str();
- }
- };
-
- IActor *CreateInterconnectMonActor(TIntrusivePtr<TInterconnectProxyCommon> common) {
- return new TInterconnectMonActor(std::move(common));
- }
-
-} // NInterconnect
diff --git a/library/cpp/actors/interconnect/interconnect_mon.h b/library/cpp/actors/interconnect/interconnect_mon.h
deleted file mode 100644
index 3fb26053fb..0000000000
--- a/library/cpp/actors/interconnect/interconnect_mon.h
+++ /dev/null
@@ -1,15 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include "interconnect_common.h"
-
-namespace NInterconnect {
-
- NActors::IActor *CreateInterconnectMonActor(TIntrusivePtr<NActors::TInterconnectProxyCommon> common = nullptr);
-
- static inline NActors::TActorId MakeInterconnectMonActorId(ui32 nodeId) {
- char s[12] = {'I', 'C', 'O', 'v', 'e', 'r', 'v', 'i', 'e', 'w', 0, 0};
- return NActors::TActorId(nodeId, TStringBuf(s, 12));
- }
-
-} // NInterconnect
diff --git a/library/cpp/actors/interconnect/interconnect_nameserver_base.h b/library/cpp/actors/interconnect/interconnect_nameserver_base.h
deleted file mode 100644
index f9ce456d1c..0000000000
--- a/library/cpp/actors/interconnect/interconnect_nameserver_base.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#include "interconnect.h"
-#include "interconnect_impl.h"
-#include "interconnect_address.h"
-#include "events_local.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/memory_log/memlog.h>
-
-namespace NActors {
-
- template<typename TDerived>
- class TInterconnectNameserverBase : public TActor<TDerived> {
- protected:
- const TMap<ui32, TTableNameserverSetup::TNodeInfo>& NodeTable;
-
- TInterconnectNameserverBase(void (TDerived::*func)(TAutoPtr<IEventHandle>& ev)
- , const TMap<ui32, TTableNameserverSetup::TNodeInfo>& nodeTable)
- : TActor<TDerived>(func)
- , NodeTable(nodeTable)
- {
- }
- public:
-
- void HandleMissedNodeId(TEvInterconnect::TEvResolveNode::TPtr& ev,
- const TActorContext& ctx,
- const TInstant&) {
- auto reply = new TEvLocalNodeInfo;
- reply->NodeId = ev->Get()->Record.GetNodeId();
- ctx.Send(ev->Sender, reply);
- }
-
- void Handle(TEvInterconnect::TEvResolveNode::TPtr& ev,
- const TActorContext& ctx) {
- const TEvInterconnect::TEvResolveNode* request = ev->Get();
- auto& record = request->Record;
- const ui32 nodeId = record.GetNodeId();
- const TInstant deadline = record.HasDeadline() ? TInstant::FromValue(record.GetDeadline()) : TInstant::Max();
- auto it = NodeTable.find(nodeId);
-
- if (it == NodeTable.end()) {
- static_cast<TDerived*>(this)->HandleMissedNodeId(ev, ctx, deadline);
- } else {
- IActor::RegisterWithSameMailbox(
- CreateResolveActor(nodeId, it->second, ev->Sender, this->SelfId(), deadline));
- }
- }
-
- void Handle(TEvResolveAddress::TPtr& ev,
- const TActorContext&) {
- const TEvResolveAddress* request = ev->Get();
-
- IActor::RegisterWithSameMailbox(
- CreateResolveActor(request->Address, request->Port, ev->Sender, this->SelfId(), TInstant::Max()));
- }
-
- void Handle(TEvInterconnect::TEvListNodes::TPtr& ev,
- const TActorContext& ctx) {
- THolder<TEvInterconnect::TEvNodesInfo>
- reply(new TEvInterconnect::TEvNodesInfo());
- reply->Nodes.reserve(NodeTable.size());
- for (const auto& pr : NodeTable) {
- reply->Nodes.emplace_back(pr.first,
- pr.second.Address, pr.second.Host, pr.second.ResolveHost,
- pr.second.Port, pr.second.Location);
- }
- ctx.Send(ev->Sender, reply.Release());
- }
-
- void Handle(TEvInterconnect::TEvGetNode::TPtr& ev,
- const TActorContext& ctx) {
- ui32 nodeId = ev->Get()->NodeId;
- THolder<TEvInterconnect::TEvNodeInfo>
- reply(new TEvInterconnect::TEvNodeInfo(nodeId));
- auto it = NodeTable.find(nodeId);
- if (it != NodeTable.end()) {
- reply->Node = MakeHolder<TEvInterconnect::TNodeInfo>(it->first, it->second.Address,
- it->second.Host, it->second.ResolveHost,
- it->second.Port, it->second.Location);
- }
- ctx.Send(ev->Sender, reply.Release());
- }
- };
-}
diff --git a/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp b/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
deleted file mode 100644
index 867b4b5d39..0000000000
--- a/library/cpp/actors/interconnect/interconnect_nameserver_dynamic.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-#include "interconnect.h"
-#include "interconnect_impl.h"
-#include "interconnect_address.h"
-#include "interconnect_nameserver_base.h"
-#include "events_local.h"
-#include "logging.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/log.h>
-
-namespace NActors {
-
- class TInterconnectDynamicNameserver
- : public TInterconnectNameserverBase<TInterconnectDynamicNameserver>
- , public TInterconnectLoggingBase
- {
- struct TPendingRequest {
- TEvInterconnect::TEvResolveNode::TPtr Request;
- TInstant Deadline;
-
- TPendingRequest(TEvInterconnect::TEvResolveNode::TPtr request, const TInstant& deadline)
- : Request(request), Deadline(deadline)
- {
- }
- };
-
- TMap<ui32, TTableNameserverSetup::TNodeInfo> NodeTable;
- TVector<TPendingRequest> PendingRequests;
- TDuration PendingPeriod;
-
- void PrintInfo() {
- TString logMsg = TStringBuilder() << "Table size: " << NodeTable.size();
- for (const auto& [nodeId, node] : NodeTable) {
- TString str = TStringBuilder() << "\n > Node " << nodeId << " `" << node.Address << "`:" << node.Port << ", host: " << node.Host << ", resolveHost: " << node.ResolveHost;
- logMsg += str;
- }
- LOG_TRACE_IC("ICN01", "%s", logMsg.c_str());
- }
-
- bool IsNodeUpdated(const ui32 nodeId, const TString& address, const ui32 port) {
- bool printInfo = false;
- auto it = NodeTable.find(nodeId);
- if (it == NodeTable.end()) {
- LOG_TRACE_IC("ICN02", "New node %u `%s`: %u",
- nodeId, address.c_str(), port);
- printInfo = true;
- } else if (it->second.Address != address || it->second.Port != port) {
- LOG_TRACE_IC("ICN03", "Updated node %u `%s`: %u (from `%s`: %u)",
- nodeId, address.c_str(), port, it->second.Address.c_str(), it->second.Port);
- printInfo = true;
- Send(TActivationContext::InterconnectProxy(nodeId), new TEvInterconnect::TEvDisconnect);
- }
- return printInfo;
- }
-
- void DiscardTimedOutRequests(const TActorContext& ctx, ui32 compactionCount = 0) {
-
- auto now = Now();
-
- for (auto& pending : PendingRequests) {
- if (pending.Request && pending.Deadline > now) {
- LOG_ERROR_IC("ICN06", "Unknown nodeId: %u", pending.Request->Get()->Record.GetNodeId());
- auto reply = new TEvLocalNodeInfo;
- reply->NodeId = pending.Request->Get()->Record.GetNodeId();
- ctx.Send(pending.Request->Sender, reply);
- pending.Request.Reset();
- compactionCount++;
- }
- }
-
- if (compactionCount) {
- TVector<TPendingRequest> requests;
- if (compactionCount < PendingRequests.size()) { // sanity check
- requests.reserve(PendingRequests.size() - compactionCount);
- }
- for (auto& pending : PendingRequests) {
- if (pending.Request) {
- requests.emplace_back(pending.Request, pending.Deadline);
- }
- }
- PendingRequests.swap(requests);
- }
- }
-
- void SchedulePeriodic() {
- Schedule(TDuration::MilliSeconds(200), new TEvents::TEvWakeup());
- }
-
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::NAMESERVICE;
- }
-
- TInterconnectDynamicNameserver(const TIntrusivePtr<TTableNameserverSetup>& setup, const TDuration& pendingPeriod, ui32 /*resolvePoolId*/ )
- : TInterconnectNameserverBase<TInterconnectDynamicNameserver>(&TInterconnectDynamicNameserver::StateFunc, NodeTable)
- , NodeTable(setup->StaticNodeTable)
- , PendingPeriod(pendingPeriod)
- {
- Y_ABORT_UNLESS(setup->IsEntriesUnique());
- }
-
- STFUNC(StateFunc) {
- try {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvInterconnect::TEvResolveNode, Handle);
- HFunc(TEvResolveAddress, Handle);
- HFunc(TEvInterconnect::TEvListNodes, Handle);
- HFunc(TEvInterconnect::TEvGetNode, Handle);
- HFunc(TEvInterconnect::TEvNodesInfo, HandleUpdate);
- CFunc(TEvents::TEvWakeup::EventType, HandlePeriodic);
- }
- } catch (...) {
- LOG_ERROR_IC("ICN09", "%s", CurrentExceptionMessage().c_str());
- }
- }
-
- void HandleMissedNodeId(TEvInterconnect::TEvResolveNode::TPtr& ev,
- const TActorContext& ctx,
- const TInstant& deadline) {
- if (PendingPeriod) {
- if (PendingRequests.size() == 0) {
- SchedulePeriodic();
- }
- PendingRequests.emplace_back(std::move(ev), Min(deadline, Now() + PendingPeriod));
- } else {
- LOG_ERROR_IC("ICN07", "Unknown nodeId: %u", ev->Get()->Record.GetNodeId());
- TInterconnectNameserverBase::HandleMissedNodeId(ev, ctx, deadline);
- }
- }
-
- void HandleUpdate(TEvInterconnect::TEvNodesInfo::TPtr& ev,
- const TActorContext& ctx) {
-
- auto request = ev->Get();
- LOG_TRACE_IC("ICN04", "Update TEvNodesInfo with sz: %lu ", request->Nodes.size());
-
- bool printInfo = false;
- ui32 compactionCount = 0;
-
- for (const auto& node : request->Nodes) {
- printInfo |= IsNodeUpdated(node.NodeId, node.Address, node.Port);
-
- NodeTable[node.NodeId] = TTableNameserverSetup::TNodeInfo(
- node.Address, node.Host, node.ResolveHost, node.Port, node.Location);
-
- for (auto& pending : PendingRequests) {
- if (pending.Request && pending.Request->Get()->Record.GetNodeId() == node.NodeId) {
- LOG_TRACE_IC("ICN05", "Pending nodeId: %u discovered", node.NodeId);
- RegisterWithSameMailbox(
- CreateResolveActor(node.NodeId, NodeTable[node.NodeId], pending.Request->Sender, SelfId(), pending.Deadline));
- pending.Request.Reset();
- compactionCount++;
- }
- }
- }
-
- if (printInfo) {
- PrintInfo();
- }
-
- DiscardTimedOutRequests(ctx, compactionCount);
- }
-
- void HandlePeriodic(const TActorContext& ctx) {
- DiscardTimedOutRequests(ctx, 0);
- if (PendingRequests.size()) {
- SchedulePeriodic();
- }
- }
- };
-
- IActor* CreateDynamicNameserver(const TIntrusivePtr<TTableNameserverSetup>& setup,
- const TDuration& pendingPeriod,
- ui32 poolId) {
- return new TInterconnectDynamicNameserver(setup, pendingPeriod, poolId);
- }
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp b/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
deleted file mode 100644
index ac565f6f8c..0000000000
--- a/library/cpp/actors/interconnect/interconnect_nameserver_table.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-#include "interconnect.h"
-#include "interconnect_impl.h"
-#include "interconnect_address.h"
-#include "interconnect_nameserver_base.h"
-#include "events_local.h"
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/memory_log/memlog.h>
-
-namespace NActors {
-
- class TInterconnectNameserverTable: public TInterconnectNameserverBase<TInterconnectNameserverTable> {
- TIntrusivePtr<TTableNameserverSetup> Config;
-
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::NAMESERVICE;
- }
-
- TInterconnectNameserverTable(const TIntrusivePtr<TTableNameserverSetup>& setup, ui32 /*resolvePoolId*/)
- : TInterconnectNameserverBase<TInterconnectNameserverTable>(&TInterconnectNameserverTable::StateFunc, setup->StaticNodeTable)
- , Config(setup)
- {
- Y_ABORT_UNLESS(Config->IsEntriesUnique());
- }
-
- STFUNC(StateFunc) {
- try {
- switch (ev->GetTypeRewrite()) {
- HFunc(TEvInterconnect::TEvResolveNode, Handle);
- HFunc(TEvResolveAddress, Handle);
- HFunc(TEvInterconnect::TEvListNodes, Handle);
- HFunc(TEvInterconnect::TEvGetNode, Handle);
- }
- } catch (...) {
- // on error - do nothing
- }
- }
- };
-
- IActor* CreateNameserverTable(const TIntrusivePtr<TTableNameserverSetup>& setup, ui32 poolId) {
- return new TInterconnectNameserverTable(setup, poolId);
- }
-
- bool TTableNameserverSetup::IsEntriesUnique() const {
- TVector<const TNodeInfo*> infos;
- infos.reserve(StaticNodeTable.size());
- for (const auto& x : StaticNodeTable)
- infos.push_back(&x.second);
-
- auto CompareAddressLambda =
- [](const TNodeInfo* left, const TNodeInfo* right) {
- return left->Port == right->Port ? left->Address < right->Address : left->Port < right->Port;
- };
-
- Sort(infos, CompareAddressLambda);
-
- for (ui32 idx = 1, end = StaticNodeTable.size(); idx < end; ++idx) {
- const TNodeInfo* left = infos[idx - 1];
- const TNodeInfo* right = infos[idx];
- if (left->Address && left->Address == right->Address && left->Port == right->Port)
- return false;
- }
-
- auto CompareHostLambda =
- [](const TNodeInfo* left, const TNodeInfo* right) {
- return left->Port == right->Port ? left->ResolveHost < right->ResolveHost : left->Port < right->Port;
- };
-
- Sort(infos, CompareHostLambda);
-
- for (ui32 idx = 1, end = StaticNodeTable.size(); idx < end; ++idx) {
- const TNodeInfo* left = infos[idx - 1];
- const TNodeInfo* right = infos[idx];
- if (left->ResolveHost == right->ResolveHost && left->Port == right->Port)
- return false;
- }
-
- return true;
- }
-
- TActorId GetNameserviceActorId() {
- return TActorId(0, "namesvc");
- }
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp b/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
deleted file mode 100644
index 2c02c632c2..0000000000
--- a/library/cpp/actors/interconnect/interconnect_proxy_wrapper.cpp
+++ /dev/null
@@ -1,47 +0,0 @@
-#include "interconnect_proxy_wrapper.h"
-#include "interconnect_tcp_proxy.h"
-#include <library/cpp/actors/interconnect/mock/ic_mock.h>
-
-namespace NActors {
-
- class TInterconnectProxyWrapper : public IActorCallback {
- TIntrusivePtr<TInterconnectProxyCommon> Common;
- const ui32 NodeId;
- TInterconnectMock *Mock;
- IActor *Proxy = nullptr;
-
- public:
- TInterconnectProxyWrapper(TIntrusivePtr<TInterconnectProxyCommon> common, ui32 nodeId, TInterconnectMock *mock)
- : IActorCallback(static_cast<TReceiveFunc>(&TInterconnectProxyWrapper::StateFunc), EActivityType::INTERCONNECT_PROXY_WRAPPER)
- , Common(std::move(common))
- , NodeId(nodeId)
- , Mock(mock)
- {}
-
- STFUNC(StateFunc) {
- if (ev->GetTypeRewrite() == TEvents::TSystem::Poison && !Proxy) {
- PassAway();
- } else {
- if (!Proxy) {
- IActor *actor = Mock
- ? Mock->CreateProxyMock(TActivationContext::ActorSystem()->NodeId, NodeId, Common)
- : new TInterconnectProxyTCP(NodeId, Common, &Proxy);
- RegisterWithSameMailbox(actor);
- if (Mock) {
- Proxy = actor;
- }
- Y_ABORT_UNLESS(Proxy);
- }
- InvokeOtherActor(*Proxy, &IActor::Receive, ev);
- }
- }
- };
-
- TProxyWrapperFactory CreateProxyWrapperFactory(TIntrusivePtr<TInterconnectProxyCommon> common, ui32 poolId,
- TInterconnectMock *mock) {
- return [=](TActorSystem *as, ui32 nodeId) -> TActorId {
- return as->Register(new TInterconnectProxyWrapper(common, nodeId, mock), TMailboxType::HTSwap, poolId);
- };
- }
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/interconnect_proxy_wrapper.h b/library/cpp/actors/interconnect/interconnect_proxy_wrapper.h
deleted file mode 100644
index e5942351a7..0000000000
--- a/library/cpp/actors/interconnect/interconnect_proxy_wrapper.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#pragma once
-
-#include "interconnect_common.h"
-
-#include <library/cpp/actors/core/actorsystem.h>
-
-namespace NActors {
-
- TProxyWrapperFactory CreateProxyWrapperFactory(TIntrusivePtr<TInterconnectProxyCommon> common, ui32 poolId,
- class TInterconnectMock *mock = nullptr);
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_resolve.cpp b/library/cpp/actors/interconnect/interconnect_resolve.cpp
deleted file mode 100644
index 071af2f7c4..0000000000
--- a/library/cpp/actors/interconnect/interconnect_resolve.cpp
+++ /dev/null
@@ -1,208 +0,0 @@
-#include "interconnect.h"
-#include "interconnect_address.h"
-#include "events_local.h"
-#include "logging.h"
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/dnsresolver/dnsresolver.h>
-
-namespace NActors {
-
- using namespace NActors::NDnsResolver;
-
- class TInterconnectResolveActor
- : public TActorBootstrapped<TInterconnectResolveActor>
- , public TInterconnectLoggingBase
- {
- public:
- TInterconnectResolveActor(
- const TString& host, ui16 port, ui32 nodeId, const TString& defaultAddress,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline)
- : Host(host)
- , NodeId(nodeId)
- , Port(port)
- , DefaultAddress(defaultAddress)
- , ReplyTo(replyTo)
- , ReplyFrom(replyFrom)
- , Deadline(deadline)
- { }
-
- TInterconnectResolveActor(
- const TString& host, ui16 port,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline)
- : Host(host)
- , Port(port)
- , ReplyTo(replyTo)
- , ReplyFrom(replyFrom)
- , Deadline(deadline)
- { }
-
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::NAMESERVICE;
- }
-
- void Bootstrap() {
- TMaybe<TString> errorText;
- if (auto addr = ExtractDefaultAddr(errorText)) {
- LOG_TRACE_IC("ICR01", "Host: %s, CACHED address: %s", Host.c_str(), DefaultAddress.c_str());
- if (NodeId) {
- return SendLocalNodeInfoAndDie({{*addr}});
- } else {
- return SendAddressInfoAndDie(std::move(addr));
- }
- }
-
- if (errorText) {
- SendErrorAndDie(*errorText);
- }
-
- auto now = TActivationContext::Now();
- if (Deadline < now) {
- SendErrorAndDie("Deadline");
- return;
- }
-
- LOG_DEBUG_IC("ICR02", "Host: %s, RESOLVING address ...", Host.c_str());
- Send(MakeDnsResolverActorId(),
- NodeId
- ? static_cast<IEventBase*>(new TEvDns::TEvGetHostByName(Host, AF_UNSPEC))
- : static_cast<IEventBase*>(new TEvDns::TEvGetAddr(Host, AF_UNSPEC)),
- IEventHandle::FlagTrackDelivery);
-
- if (Deadline != TInstant::Max()) {
- Schedule(Deadline, new TEvents::TEvWakeup);
- }
-
- Become(&TThis::StateWork);
- }
-
- STRICT_STFUNC(StateWork, {
- sFunc(TEvents::TEvWakeup, HandleTimeout);
- sFunc(TEvents::TEvUndelivered, HandleUndelivered);
- hFunc(TEvDns::TEvGetAddrResult, Handle);
- hFunc(TEvDns::TEvGetHostByNameResult, Handle);
- });
-
- void HandleTimeout() {
- SendErrorAndDie("Deadline");
- }
-
- void HandleUndelivered() {
- SendErrorAndDie("Dns resolver is unavailable");
- }
-
- void Handle(TEvDns::TEvGetAddrResult::TPtr& ev) {
- if (auto addr = ExtractAddr(ev->Get())) {
- SendAddressInfoAndDie(std::move(addr));
- } else {
- SendErrorAndDie(ev->Get()->ErrorText);
- }
- }
-
- void Handle(TEvDns::TEvGetHostByNameResult::TPtr& ev) {
- auto& msg = *ev->Get();
- if (msg.Status) {
- SendErrorAndDie(msg.ErrorText);
- } else {
- std::vector<NInterconnect::TAddress> addresses;
- for (const auto& ipv6 : msg.AddrsV6) {
- addresses.emplace_back(ipv6, Port);
- }
- for (const auto& ipv4 : msg.AddrsV4) {
- addresses.emplace_back(ipv4, Port);
- }
- SendLocalNodeInfoAndDie(std::move(addresses));
- }
- }
-
- void SendAddressInfoAndDie(NAddr::IRemoteAddrPtr addr) {
- LOG_DEBUG_IC("ICR03", "Host: %s, RESOLVED address", Host.c_str());
- auto reply = new TEvAddressInfo;
- reply->Address = std::move(addr);
- TActivationContext::Send(new IEventHandle(ReplyTo, ReplyFrom, reply));
- PassAway();
- }
-
- void SendLocalNodeInfoAndDie(std::vector<NInterconnect::TAddress> addresses) {
- LOG_DEBUG_IC("ICR04", "Host: %s, RESOLVED address", Host.c_str());
- auto reply = std::make_unique<TEvLocalNodeInfo>();
- reply->NodeId = *NodeId;
- reply->Addresses = std::move(addresses);
- TActivationContext::Send(new IEventHandle(ReplyTo, ReplyFrom, reply.release()));
- PassAway();
- }
-
- void SendErrorAndDie(const TString& errorText) {
- LOG_DEBUG_IC("ICR05", "Host: %s, ERROR resolving: %s", Host.c_str(), errorText.c_str());
- auto *event = new TEvResolveError;
- event->Explain = errorText;
- event->Host = Host;
- TActivationContext::Send(new IEventHandle(ReplyTo, ReplyFrom, event));
- PassAway();
- }
-
- NAddr::IRemoteAddrPtr ExtractAddr(TEvDns::TEvGetAddrResult* msg) {
- if (msg->Status == 0) {
- if (msg->IsV6()) {
- struct sockaddr_in6 sin6;
- Zero(sin6);
- sin6.sin6_family = AF_INET6;
- sin6.sin6_addr = msg->GetAddrV6();
- sin6.sin6_port = HostToInet(Port);
- return MakeHolder<NAddr::TIPv6Addr>(sin6);
- }
-
- if (msg->IsV4()) {
- return MakeHolder<NAddr::TIPv4Addr>(TIpAddress(msg->GetAddrV4().s_addr, Port));
- }
-
- Y_ABORT("Unexpected result address family");
- }
-
- return nullptr;
- }
-
- NAddr::IRemoteAddrPtr ExtractDefaultAddr(TMaybe<TString>& errorText) {
- if (DefaultAddress) {
- NInterconnect::TAddress address(DefaultAddress.data(), Port);
-
- switch (address.GetFamily()) {
- case AF_INET:
- return MakeHolder<NAddr::TIPv4Addr>(*(sockaddr_in*)address.SockAddr());
- case AF_INET6:
- return MakeHolder<NAddr::TIPv6Addr>(*(sockaddr_in6*)address.SockAddr());
- default:
- errorText = "Unsupported default address: " + DefaultAddress;
- break;
- }
- }
-
- return nullptr;
- }
-
- private:
- const TString Host;
- const std::optional<ui32> NodeId;
- const ui16 Port;
- const TString DefaultAddress;
- const TActorId ReplyTo;
- const TActorId ReplyFrom;
- const TInstant Deadline;
- };
-
- IActor* CreateResolveActor(
- const TString& host, ui16 port, ui32 nodeId, const TString& defaultAddress,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline)
- {
- return new TInterconnectResolveActor(host, port, nodeId, defaultAddress, replyTo, replyFrom, deadline);
- }
-
- IActor* CreateResolveActor(
- const TString& host, ui16 port,
- const TActorId& replyTo, const TActorId& replyFrom, TInstant deadline)
- {
- return new TInterconnectResolveActor(host, port, replyTo, replyFrom, deadline);
- }
-
-} // namespace NActors
diff --git a/library/cpp/actors/interconnect/interconnect_stream.cpp b/library/cpp/actors/interconnect/interconnect_stream.cpp
deleted file mode 100644
index f8db077fa4..0000000000
--- a/library/cpp/actors/interconnect/interconnect_stream.cpp
+++ /dev/null
@@ -1,680 +0,0 @@
-#include "interconnect_stream.h"
-#include "logging.h"
-#include "poller_actor.h"
-#include <library/cpp/openssl/init/init.h>
-#include <util/network/socket.h>
-#include <openssl/ssl.h>
-#include <openssl/err.h>
-#include <openssl/pem.h>
-
-#if defined(_win_)
-#include <util/system/file.h>
-#define SOCK_NONBLOCK 0
-#elif defined(_darwin_)
-#define SOCK_NONBLOCK 0
-#else
-#include <sys/un.h>
-#include <sys/stat.h>
-#endif //_win_
-
-#if !defined(_win_)
-#include <sys/ioctl.h>
-#endif
-
-#include <cerrno>
-
-namespace NInterconnect {
- namespace {
- inline int
- LastSocketError() {
-#if defined(_win_)
- return WSAGetLastError();
-#else
- return errno;
-#endif
- }
- }
-
- TSocket::TSocket(SOCKET fd)
- : Descriptor(fd)
- {
- }
-
- TSocket::~TSocket() {
- if (Descriptor == INVALID_SOCKET) {
- return;
- }
-
- auto const result = ::closesocket(Descriptor);
- if (result == 0)
- return;
- switch (LastSocketError()) {
- case EBADF:
- Y_ABORT("Close bad descriptor");
- case EINTR:
- break;
- case EIO:
- Y_ABORT("EIO");
- default:
- Y_ABORT("It's something unexpected");
- }
- }
-
- int TSocket::GetDescriptor() {
- return Descriptor;
- }
-
- int
- TSocket::Bind(const TAddress& addr) const {
- const auto ret = ::bind(Descriptor, addr.SockAddr(), addr.Size());
- if (ret < 0)
- return -LastSocketError();
-
- return 0;
- }
-
- int
- TSocket::Shutdown(int how) const {
- const auto ret = ::shutdown(Descriptor, how);
- if (ret < 0)
- return -LastSocketError();
-
- return 0;
- }
-
- int TSocket::GetConnectStatus() const {
- int err = 0;
- socklen_t len = sizeof(err);
- if (getsockopt(Descriptor, SOL_SOCKET, SO_ERROR, reinterpret_cast<char*>(&err), &len) == -1) {
- err = LastSocketError();
- }
- return err;
- }
-
- /////////////////////////////////////////////////////////////////
-
- TIntrusivePtr<TStreamSocket> TStreamSocket::Make(int domain, int *error) {
- const SOCKET res = ::socket(domain, SOCK_STREAM | SOCK_NONBLOCK, 0);
- if (res == -1) {
- const int err = LastSocketError();
- Y_ABORT_UNLESS(err != EMFILE && err != ENFILE);
- if (error) {
- *error = err;
- }
- }
- return MakeIntrusive<TStreamSocket>(res);
- }
-
- TStreamSocket::TStreamSocket(SOCKET fd)
- : TSocket(fd)
- {
- }
-
- ssize_t
- TStreamSocket::Send(const void* msg, size_t len, TString* /*err*/) const {
- const auto ret = ::send(Descriptor, static_cast<const char*>(msg), int(len), 0);
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
- ssize_t
- TStreamSocket::Recv(void* buf, size_t len, TString* /*err*/) const {
- const auto ret = ::recv(Descriptor, static_cast<char*>(buf), int(len), 0);
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
- ssize_t
- TStreamSocket::WriteV(const struct iovec* iov, int iovcnt) const {
-#ifndef _win_
- const auto ret = ::writev(Descriptor, iov, iovcnt);
- if (ret < 0)
- return -LastSocketError();
- return ret;
-#else
- Y_ABORT("WriteV() unsupported on Windows");
-#endif
- }
-
- ssize_t
- TStreamSocket::ReadV(const struct iovec* iov, int iovcnt) const {
-#ifndef _win_
- const auto ret = ::readv(Descriptor, iov, iovcnt);
- if (ret < 0)
- return -LastSocketError();
- return ret;
-#else
- Y_ABORT("ReadV() unsupported on Windows");
-#endif
- }
-
- ssize_t TStreamSocket::GetUnsentQueueSize() const {
- int num = -1;
-#ifndef _win_ // we have no means to determine output queue size on Windows
- if (ioctl(Descriptor, TIOCOUTQ, &num) == -1) {
- num = -1;
- }
-#endif
- return num;
- }
-
- int
- TStreamSocket::Connect(const TAddress& addr) const {
- const auto ret = ::connect(Descriptor, addr.SockAddr(), addr.Size());
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
- int
- TStreamSocket::Connect(const NAddr::IRemoteAddr* addr) const {
- const auto ret = ::connect(Descriptor, addr->Addr(), addr->Len());
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
- int
- TStreamSocket::Listen(int backlog) const {
- const auto ret = ::listen(Descriptor, backlog);
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
- int
- TStreamSocket::Accept(TAddress& acceptedAddr) const {
- socklen_t acceptedSize = sizeof(::sockaddr_in6);
- const auto ret = ::accept(Descriptor, acceptedAddr.SockAddr(), &acceptedSize);
- if (ret == INVALID_SOCKET)
- return -LastSocketError();
-
- return ret;
- }
-
- void
- TStreamSocket::SetSendBufferSize(i32 len) const {
- (void)SetSockOpt(Descriptor, SOL_SOCKET, SO_SNDBUF, len);
- }
-
- ui32 TStreamSocket::GetSendBufferSize() const {
- ui32 res = 0;
- CheckedGetSockOpt(Descriptor, SOL_SOCKET, SO_SNDBUF, res, "SO_SNDBUF");
- return res;
- }
-
- void TStreamSocket::Request(NActors::TPollerToken& token, bool read, bool write) {
- token.Request(read, write);
- }
-
- bool TStreamSocket::RequestReadNotificationAfterWouldBlock(NActors::TPollerToken& token) {
- return token.RequestReadNotificationAfterWouldBlock();
- }
-
- bool TStreamSocket::RequestWriteNotificationAfterWouldBlock(NActors::TPollerToken& token) {
- return token.RequestWriteNotificationAfterWouldBlock();
- }
-
- size_t TStreamSocket::ExpectedWriteLength() const {
- return 0;
- }
-
- //////////////////////////////////////////////////////
-
- TDatagramSocket::TPtr TDatagramSocket::Make(int domain) {
- const SOCKET res = ::socket(domain, SOCK_DGRAM, 0);
- if (res == -1) {
- const int err = LastSocketError();
- Y_ABORT_UNLESS(err != EMFILE && err != ENFILE);
- }
- return std::make_shared<TDatagramSocket>(res);
- }
-
- TDatagramSocket::TDatagramSocket(SOCKET fd)
- : TSocket(fd)
- {
- }
-
- ssize_t
- TDatagramSocket::SendTo(const void* msg, size_t len, const TAddress& toAddr) const {
- const auto ret = ::sendto(Descriptor, static_cast<const char*>(msg), int(len), 0, toAddr.SockAddr(), toAddr.Size());
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
- ssize_t
- TDatagramSocket::RecvFrom(void* buf, size_t len, TAddress& fromAddr) const {
- socklen_t fromSize = sizeof(::sockaddr_in6);
- const auto ret = ::recvfrom(Descriptor, static_cast<char*>(buf), int(len), 0, fromAddr.SockAddr(), &fromSize);
- if (ret < 0)
- return -LastSocketError();
-
- return ret;
- }
-
-
- // deleter for SSL objects
- struct TDeleter {
- void operator ()(BIO *bio) const {
- BIO_free(bio);
- }
-
- void operator ()(X509 *x509) const {
- X509_free(x509);
- }
-
- void operator ()(RSA *rsa) const {
- RSA_free(rsa);
- }
-
- void operator ()(SSL_CTX *ctx) const {
- SSL_CTX_free(ctx);
- }
- };
-
- class TSecureSocketContext::TImpl {
- std::unique_ptr<SSL_CTX, TDeleter> Ctx;
-
- public:
- TImpl(const TString& certificate, const TString& privateKey, const TString& caFilePath,
- const TString& ciphers) {
- int ret;
- InitOpenSSL();
-#if OPENSSL_VERSION_NUMBER < 0x10100000L
- Ctx.reset(SSL_CTX_new(TLSv1_2_method()));
- Y_ABORT_UNLESS(Ctx, "SSL_CTX_new() failed");
-#else
- Ctx.reset(SSL_CTX_new(TLS_method()));
- Y_ABORT_UNLESS(Ctx, "SSL_CTX_new() failed");
- ret = SSL_CTX_set_min_proto_version(Ctx.get(), TLS1_2_VERSION);
- Y_ABORT_UNLESS(ret == 1, "failed to set min proto version");
- ret = SSL_CTX_set_max_proto_version(Ctx.get(), TLS1_2_VERSION);
- Y_ABORT_UNLESS(ret == 1, "failed to set max proto version");
-#endif
- SSL_CTX_set_verify(Ctx.get(), SSL_VERIFY_PEER | SSL_VERIFY_FAIL_IF_NO_PEER_CERT, &Verify);
- SSL_CTX_set_mode(*this, SSL_MODE_ENABLE_PARTIAL_WRITE | SSL_MODE_ACCEPT_MOVING_WRITE_BUFFER);
-
- // apply certificates in SSL context
- if (certificate) {
- std::unique_ptr<BIO, TDeleter> bio(BIO_new_mem_buf(certificate.data(), certificate.size()));
- Y_ABORT_UNLESS(bio);
-
- // first certificate in the chain is expected to be a leaf
- std::unique_ptr<X509, TDeleter> cert(PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr));
- Y_ABORT_UNLESS(cert, "failed to parse certificate");
- ret = SSL_CTX_use_certificate(Ctx.get(), cert.get());
- Y_ABORT_UNLESS(ret == 1);
-
- // loading additional certificates in the chain, if any
- while(true) {
- X509 *ca = PEM_read_bio_X509(bio.get(), nullptr, nullptr, nullptr);
- if (ca == nullptr) {
- break;
- }
- ret = SSL_CTX_add0_chain_cert(Ctx.get(), ca);
- Y_ABORT_UNLESS(ret == 1);
- // we must not free memory if certificate was added successfully by SSL_CTX_add0_chain_cert
- }
- }
- if (privateKey) {
- std::unique_ptr<BIO, TDeleter> bio(BIO_new_mem_buf(privateKey.data(), privateKey.size()));
- Y_ABORT_UNLESS(bio);
- std::unique_ptr<RSA, TDeleter> pkey(PEM_read_bio_RSAPrivateKey(bio.get(), nullptr, nullptr, nullptr));
- Y_ABORT_UNLESS(pkey);
- ret = SSL_CTX_use_RSAPrivateKey(Ctx.get(), pkey.get());
- Y_ABORT_UNLESS(ret == 1);
- }
- if (caFilePath) {
- ret = SSL_CTX_load_verify_locations(Ctx.get(), caFilePath.data(), nullptr);
- Y_ABORT_UNLESS(ret == 1);
- }
-
- int success = SSL_CTX_set_cipher_list(Ctx.get(), ciphers ? ciphers.data() : "AES128-GCM-SHA256");
- Y_ABORT_UNLESS(success, "failed to set cipher list");
- }
-
- operator SSL_CTX*() const {
- return Ctx.get();
- }
-
- static int GetExIndex() {
- static int index = SSL_get_ex_new_index(0, nullptr, nullptr, nullptr, nullptr);
- return index;
- }
-
- private:
- static int Verify(int preverify, X509_STORE_CTX *ctx) {
- if (!preverify) {
- X509 *badCert = X509_STORE_CTX_get_current_cert(ctx);
- int err = X509_STORE_CTX_get_error(ctx);
- int depth = X509_STORE_CTX_get_error_depth(ctx);
- SSL *ssl = static_cast<SSL*>(X509_STORE_CTX_get_ex_data(ctx, SSL_get_ex_data_X509_STORE_CTX_idx()));
- TString *errp = static_cast<TString*>(SSL_get_ex_data(ssl, GetExIndex()));
- char buffer[1024];
- X509_NAME_oneline(X509_get_subject_name(badCert), buffer, sizeof(buffer));
- TStringBuilder s;
- s << "Error during certificate validation"
- << " error# " << X509_verify_cert_error_string(err)
- << " depth# " << depth
- << " cert# " << buffer;
- if (err == X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT) {
- X509_NAME_oneline(X509_get_issuer_name(badCert), buffer, sizeof(buffer));
- s << " issuer# " << buffer;
- }
- *errp = s;
- }
- return preverify;
- }
- };
-
- TSecureSocketContext::TSecureSocketContext(const TString& certificate, const TString& privateKey,
- const TString& caFilePath, const TString& ciphers)
- : Impl(new TImpl(certificate, privateKey, caFilePath, ciphers))
- {}
-
- TSecureSocketContext::~TSecureSocketContext()
- {}
-
- class TSecureSocket::TImpl {
- SSL *Ssl;
- TString ErrorDescription;
- bool WantRead_ = false;
- bool WantWrite_ = false;
-
- public:
- TImpl(SSL_CTX *ctx, int fd)
- : Ssl(SSL_new(ctx))
- {
- Y_ABORT_UNLESS(Ssl, "SSL_new() failed");
- SSL_set_fd(Ssl, fd);
- SSL_set_ex_data(Ssl, TSecureSocketContext::TImpl::GetExIndex(), &ErrorDescription);
- }
-
- ~TImpl() {
- SSL_free(Ssl);
- }
-
- TString GetErrorStack() {
- if (ErrorDescription) {
- return ErrorDescription;
- }
- std::unique_ptr<BIO, int(*)(BIO*)> mem(BIO_new(BIO_s_mem()), BIO_free);
- ERR_print_errors(mem.get());
- char *p = nullptr;
- auto len = BIO_get_mem_data(mem.get(), &p);
- return TString(p, len);
- }
-
- EStatus ConvertResult(int res, TString& err) {
- switch (res) {
- case SSL_ERROR_NONE:
- return EStatus::SUCCESS;
-
- case SSL_ERROR_WANT_READ:
- return EStatus::WANT_READ;
-
- case SSL_ERROR_WANT_WRITE:
- return EStatus::WANT_WRITE;
-
- case SSL_ERROR_SYSCALL:
- err = TStringBuilder() << "syscall error: " << strerror(LastSocketError()) << ": " << GetErrorStack();
- break;
-
- case SSL_ERROR_ZERO_RETURN:
- err = "TLS negotiation failed";
- break;
-
- case SSL_ERROR_SSL:
- err = "SSL error: " + GetErrorStack();
- break;
-
- default:
- err = "unknown OpenSSL error";
- break;
- }
- return EStatus::ERROR;
- }
-
- enum EConnectState {
- CONNECT,
- SHUTDOWN,
- READ,
- } ConnectState = EConnectState::CONNECT;
-
- EStatus Establish(bool server, bool authOnly, TString& err) {
- switch (ConnectState) {
- case EConnectState::CONNECT: {
- auto callback = server ? SSL_accept : SSL_connect;
- const EStatus status = ConvertResult(SSL_get_error(Ssl, callback(Ssl)), err);
- if (status != EStatus::SUCCESS || !authOnly) {
- return status;
- }
- ConnectState = EConnectState::SHUTDOWN;
- [[fallthrough]];
- }
-
- case EConnectState::SHUTDOWN: {
- const int res = SSL_shutdown(Ssl);
- if (res == 1) {
- return EStatus::SUCCESS;
- } else if (res != 0) {
- return ConvertResult(SSL_get_error(Ssl, res), err);
- }
- ConnectState = EConnectState::READ;
- [[fallthrough]];
- }
-
- case EConnectState::READ: {
- char data[256];
- size_t numRead = 0;
- const int res = SSL_get_error(Ssl, SSL_read_ex(Ssl, data, sizeof(data), &numRead));
- if (res == SSL_ERROR_ZERO_RETURN) {
- return EStatus::SUCCESS;
- } else if (res != SSL_ERROR_NONE) {
- return ConvertResult(res, err);
- } else if (numRead) {
- err = "non-zero return from SSL_read_ex: " + ToString(numRead);
- return EStatus::ERROR;
- } else {
- return EStatus::SUCCESS;
- }
- }
- }
- Y_ABORT();
- }
-
- std::optional<std::pair<const void*, size_t>> BlockedSend;
-
- ssize_t Send(const void* msg, size_t len, TString *err) {
- if (BlockedSend && BlockedSend->first == msg && BlockedSend->second < len) {
- len = BlockedSend->second;
- }
- Y_ABORT_UNLESS(!BlockedSend || *BlockedSend == std::make_pair(msg, len));
- const ssize_t res = Operate(msg, len, &SSL_write_ex, err);
- if (res == -EAGAIN) {
- BlockedSend.emplace(msg, len);
- } else {
- BlockedSend.reset();
- }
- return res;
- }
-
- size_t ExpectedWriteLength() const {
- return BlockedSend ? BlockedSend->second : 0;
- }
-
- std::optional<std::pair<void*, size_t>> BlockedReceive;
-
- ssize_t Recv(void* msg, size_t len, TString *err) {
- if (BlockedReceive && BlockedReceive->first == msg && BlockedReceive->second < len) {
- len = BlockedReceive->second;
- }
- Y_ABORT_UNLESS(!BlockedReceive || *BlockedReceive == std::make_pair(msg, len));
- const ssize_t res = Operate(msg, len, &SSL_read_ex, err);
- if (res == -EAGAIN) {
- BlockedReceive.emplace(msg, len);
- } else {
- BlockedReceive.reset();
- }
- return res;
- }
-
- TString GetCipherName() const {
- return SSL_get_cipher_name(Ssl);
- }
-
- int GetCipherBits() const {
- return SSL_get_cipher_bits(Ssl, nullptr);
- }
-
- TString GetProtocolName() const {
- return SSL_get_cipher_version(Ssl);
- }
-
- TString GetPeerCommonName() const {
- TString res;
- if (X509 *cert = SSL_get_peer_certificate(Ssl)) {
- char buffer[256];
- memset(buffer, 0, sizeof(buffer));
- if (X509_NAME *name = X509_get_subject_name(cert)) {
- X509_NAME_get_text_by_NID(name, NID_commonName, buffer, sizeof(buffer));
- }
- X509_free(cert);
- res = TString(buffer, strnlen(buffer, sizeof(buffer)));
- }
- return res;
- }
-
- bool WantRead() const {
- return WantRead_;
- }
-
- bool WantWrite() const {
- return WantWrite_;
- }
-
- private:
- template<typename TBuffer, typename TOp>
- ssize_t Operate(TBuffer* buffer, size_t len, TOp&& op, TString *err) {
- WantRead_ = WantWrite_ = false;
- size_t processed = 0;
- int ret = op(Ssl, buffer, len, &processed);
- if (ret == 1) {
- return processed;
- }
- switch (const int status = SSL_get_error(Ssl, ret)) {
- case SSL_ERROR_ZERO_RETURN:
- return 0;
-
- case SSL_ERROR_WANT_READ:
- WantRead_ = true;
- return -EAGAIN;
-
- case SSL_ERROR_WANT_WRITE:
- WantWrite_ = true;
- return -EAGAIN;
-
- case SSL_ERROR_SYSCALL:
- return -LastSocketError();
-
- case SSL_ERROR_SSL:
- if (err) {
- *err = GetErrorStack();
- }
- return -EPROTO;
-
- default:
- Y_ABORT("unexpected SSL_get_error() status# %d", status);
- }
- }
- };
-
- TSecureSocket::TSecureSocket(TStreamSocket& socket, TSecureSocketContext::TPtr context)
- : TStreamSocket(socket.ReleaseDescriptor())
- , Context(std::move(context))
- , Impl(new TImpl(*Context->Impl, Descriptor))
- {}
-
- TSecureSocket::~TSecureSocket()
- {}
-
- TSecureSocket::EStatus TSecureSocket::Establish(bool server, bool authOnly, TString& err) const {
- return Impl->Establish(server, authOnly, err);
- }
-
- TIntrusivePtr<TStreamSocket> TSecureSocket::Detach() {
- return MakeIntrusive<TStreamSocket>(ReleaseDescriptor());
- }
-
- ssize_t TSecureSocket::Send(const void* msg, size_t len, TString *err) const {
- return Impl->Send(msg, len, err);
- }
-
- ssize_t TSecureSocket::Recv(void* msg, size_t len, TString *err) const {
- return Impl->Recv(msg, len, err);
- }
-
- ssize_t TSecureSocket::WriteV(const struct iovec* /*iov*/, int /*iovcnt*/) const {
- Y_ABORT("unsupported on SSL sockets");
- }
-
- ssize_t TSecureSocket::ReadV(const struct iovec* /*iov*/, int /*iovcnt*/) const {
- Y_ABORT("unsupported on SSL sockets");
- }
-
- TString TSecureSocket::GetCipherName() const {
- return Impl->GetCipherName();
- }
-
- int TSecureSocket::GetCipherBits() const {
- return Impl->GetCipherBits();
- }
-
- TString TSecureSocket::GetProtocolName() const {
- return Impl->GetProtocolName();
- }
-
- TString TSecureSocket::GetPeerCommonName() const {
- return Impl->GetPeerCommonName();
- }
-
- bool TSecureSocket::WantRead() const {
- return Impl->WantRead();
- }
-
- bool TSecureSocket::WantWrite() const {
- return Impl->WantWrite();
- }
-
- void TSecureSocket::Request(NActors::TPollerToken& token, bool /*read*/, bool /*write*/) {
- token.Request(WantRead(), WantWrite());
- }
-
- bool TSecureSocket::RequestReadNotificationAfterWouldBlock(NActors::TPollerToken& token) {
- bool result = false;
- if (WantRead()) {
- result |= token.RequestReadNotificationAfterWouldBlock();
- }
- if (WantWrite()) {
- result |= token.RequestWriteNotificationAfterWouldBlock();
- }
- return result;
- }
-
- bool TSecureSocket::RequestWriteNotificationAfterWouldBlock(NActors::TPollerToken& token) {
- return RequestReadNotificationAfterWouldBlock(token);
- }
-
- size_t TSecureSocket::ExpectedWriteLength() const {
- return Impl->ExpectedWriteLength();
- }
-}
diff --git a/library/cpp/actors/interconnect/interconnect_stream.h b/library/cpp/actors/interconnect/interconnect_stream.h
deleted file mode 100644
index b9ca804e0e..0000000000
--- a/library/cpp/actors/interconnect/interconnect_stream.h
+++ /dev/null
@@ -1,145 +0,0 @@
-#pragma once
-
-#include <util/generic/string.h>
-#include <util/generic/noncopyable.h>
-#include <util/network/address.h>
-#include <util/network/init.h>
-#include <util/system/defaults.h>
-
-#include "poller.h"
-
-#include "interconnect_address.h"
-
-#include <memory>
-
-#include <sys/uio.h>
-
-namespace NActors {
- class TPollerToken;
-}
-
-namespace NInterconnect {
- class TSocket: public NActors::TSharedDescriptor, public TNonCopyable {
- protected:
- TSocket(SOCKET fd);
-
- virtual ~TSocket() override;
-
- SOCKET Descriptor;
-
- virtual int GetDescriptor() override;
-
- private:
- friend class TSecureSocket;
-
- SOCKET ReleaseDescriptor() {
- return std::exchange(Descriptor, INVALID_SOCKET);
- }
-
- public:
- operator SOCKET() const {
- return Descriptor;
- }
-
- int Bind(const TAddress& addr) const;
- int Shutdown(int how) const;
- int GetConnectStatus() const;
- };
-
- class TStreamSocket: public TSocket {
- public:
- TStreamSocket(SOCKET fd);
-
- static TIntrusivePtr<TStreamSocket> Make(int domain, int *error = nullptr);
-
- virtual ssize_t Send(const void* msg, size_t len, TString *err = nullptr) const;
- virtual ssize_t Recv(void* buf, size_t len, TString *err = nullptr) const;
-
- virtual ssize_t WriteV(const struct iovec* iov, int iovcnt) const;
- virtual ssize_t ReadV(const struct iovec* iov, int iovcnt) const;
-
- int Connect(const TAddress& addr) const;
- int Connect(const NAddr::IRemoteAddr* addr) const;
- int Listen(int backlog) const;
- int Accept(TAddress& acceptedAddr) const;
-
- ssize_t GetUnsentQueueSize() const;
-
- void SetSendBufferSize(i32 len) const;
- ui32 GetSendBufferSize() const;
-
- virtual void Request(NActors::TPollerToken& token, bool read, bool write);
- virtual bool RequestReadNotificationAfterWouldBlock(NActors::TPollerToken& token);
- virtual bool RequestWriteNotificationAfterWouldBlock(NActors::TPollerToken& token);
-
- virtual size_t ExpectedWriteLength() const;
- };
-
- class TSecureSocketContext {
- class TImpl;
- THolder<TImpl> Impl;
-
- friend class TSecureSocket;
-
- public:
- TSecureSocketContext(const TString& certificate, const TString& privateKey, const TString& caFilePath,
- const TString& ciphers);
- ~TSecureSocketContext();
-
- public:
- using TPtr = std::shared_ptr<TSecureSocketContext>;
- };
-
- class TSecureSocket : public TStreamSocket {
- TSecureSocketContext::TPtr Context;
-
- class TImpl;
- THolder<TImpl> Impl;
-
- public:
- enum class EStatus {
- SUCCESS,
- ERROR,
- WANT_READ,
- WANT_WRITE,
- };
-
- public:
- TSecureSocket(TStreamSocket& socket, TSecureSocketContext::TPtr context);
- ~TSecureSocket();
-
- EStatus Establish(bool server, bool authOnly, TString& err) const;
- TIntrusivePtr<TStreamSocket> Detach();
-
- ssize_t Send(const void* msg, size_t len, TString *err) const override;
- ssize_t Recv(void* msg, size_t len, TString *err) const override;
-
- ssize_t WriteV(const struct iovec* iov, int iovcnt) const override;
- ssize_t ReadV(const struct iovec* iov, int iovcnt) const override;
-
- TString GetCipherName() const;
- int GetCipherBits() const;
- TString GetProtocolName() const;
- TString GetPeerCommonName() const;
-
- bool WantRead() const;
- bool WantWrite() const;
- void Request(NActors::TPollerToken& token, bool read, bool write) override;
- bool RequestReadNotificationAfterWouldBlock(NActors::TPollerToken& token) override;
- bool RequestWriteNotificationAfterWouldBlock(NActors::TPollerToken& token) override;
- size_t ExpectedWriteLength() const override;
- };
-
- class TDatagramSocket: public TSocket {
- public:
- typedef std::shared_ptr<TDatagramSocket> TPtr;
-
- TDatagramSocket(SOCKET fd);
-
- static TPtr Make(int domain);
-
- ssize_t SendTo(const void* msg, size_t len, const TAddress& toAddr) const;
- ssize_t RecvFrom(void* buf, size_t len, TAddress& fromAddr) const;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp b/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
deleted file mode 100644
index 1409f2cf0c..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_input_session.cpp
+++ /dev/null
@@ -1,1139 +0,0 @@
-#include "interconnect_tcp_session.h"
-#include "interconnect_tcp_proxy.h"
-#include <library/cpp/actors/core/probes.h>
-#include <library/cpp/actors/util/datetime.h>
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- void TReceiveContext::TPerChannelContext::PrepareCatchBuffer() {
- size_t bytesToCatch = FetchOffset;
- for (auto it = XdcBuffers.begin(), end = it + FetchIndex; it != end; ++it) {
- bytesToCatch += it->size();
- }
-
- XdcCatchBuffer = TRcBuf::Uninitialized(bytesToCatch);
- XdcCatchBytesRead = 0;
- }
-
- void TReceiveContext::TPerChannelContext::ApplyCatchBuffer() {
- if (auto buffer = std::exchange(XdcCatchBuffer, {})) {
- Y_ABORT_UNLESS(XdcCatchBytesRead >= buffer.size());
-
- const size_t offset = XdcCatchBytesRead % buffer.size();
- const char *begin = buffer.data();
- const char *mid = begin + offset;
- const char *end = begin + buffer.size();
- Y_DEBUG_ABORT_UNLESS(begin <= mid && mid < end);
-
- TRope rope;
- rope.Insert(rope.End(), TRcBuf(TRcBuf::Piece, mid, end, buffer));
- if (begin != mid) {
- rope.Insert(rope.End(), TRcBuf(TRcBuf::Piece, begin, mid, buffer));
- }
-
- DropFront(&rope, buffer.size());
- } else {
- Y_DEBUG_ABORT_UNLESS(!XdcCatchBytesRead);
- }
- }
-
- void TReceiveContext::TPerChannelContext::FetchBuffers(ui16 channel, size_t numBytes,
- std::deque<std::tuple<ui16, TMutableContiguousSpan>>& outQ) {
- Y_DEBUG_ABORT_UNLESS(numBytes);
- auto it = XdcBuffers.begin() + FetchIndex;
- for (;;) {
- Y_DEBUG_ABORT_UNLESS(it != XdcBuffers.end());
- const TMutableContiguousSpan span = it->SubSpan(FetchOffset, numBytes);
- outQ.emplace_back(channel, span);
- numBytes -= span.size();
- FetchOffset += span.size();
- if (FetchOffset == it->size()) {
- ++FetchIndex;
- ++it;
- FetchOffset = 0;
- }
- if (!numBytes) {
- break;
- }
- }
- }
-
- void TReceiveContext::TPerChannelContext::DropFront(TRope *from, size_t numBytes) {
- Y_DEBUG_ABORT_UNLESS(from || !XdcCatchBuffer);
-
- size_t n = numBytes;
- for (auto& pendingEvent : PendingEvents) {
- const size_t numBytesInEvent = Min(n, pendingEvent.XdcSizeLeft);
- pendingEvent.XdcSizeLeft -= numBytesInEvent;
- n -= numBytesInEvent;
- if (!n) {
- break;
- }
- }
-
- while (numBytes) {
- Y_DEBUG_ABORT_UNLESS(!XdcBuffers.empty());
- auto& front = XdcBuffers.front();
- if (from) {
- from->ExtractFrontPlain(front.data(), Min(numBytes, front.size()));
- }
- if (numBytes < front.size()) {
- front = front.SubSpan(numBytes, Max<size_t>());
- if (!FetchIndex) { // we are sending this very buffer, adjust sending offset
- Y_DEBUG_ABORT_UNLESS(numBytes <= FetchOffset);
- FetchOffset -= numBytes;
- }
- break;
- } else {
- numBytes -= front.size();
- Y_DEBUG_ABORT_UNLESS(FetchIndex);
- --FetchIndex;
- XdcBuffers.pop_front();
- }
- }
- }
-
- TInputSessionTCP::TInputSessionTCP(const TActorId& sessionId, TIntrusivePtr<NInterconnect::TStreamSocket> socket,
- TIntrusivePtr<NInterconnect::TStreamSocket> xdcSocket, TIntrusivePtr<TReceiveContext> context,
- TInterconnectProxyCommon::TPtr common, std::shared_ptr<IInterconnectMetrics> metrics, ui32 nodeId,
- ui64 lastConfirmed, TDuration deadPeerTimeout, TSessionParams params)
- : SessionId(sessionId)
- , Socket(std::move(socket))
- , XdcSocket(std::move(xdcSocket))
- , Context(std::move(context))
- , Common(std::move(common))
- , NodeId(nodeId)
- , Params(std::move(params))
- , ConfirmedByInput(lastConfirmed)
- , Metrics(std::move(metrics))
- , DeadPeerTimeout(deadPeerTimeout)
- {
- Y_ABORT_UNLESS(Context);
- Y_ABORT_UNLESS(Socket);
- Y_ABORT_UNLESS(SessionId);
- Y_ABORT_UNLESS(!Params.UseExternalDataChannel == !XdcSocket);
-
- Metrics->SetClockSkewMicrosec(0);
-
- Context->UpdateState = EUpdateState::NONE;
-
- // ensure that we do not spawn new session while the previous one is still alive
- TAtomicBase sessions = AtomicIncrement(Context->NumInputSessions);
- Y_ABORT_UNLESS(sessions == 1, "sessions# %" PRIu64, ui64(sessions));
-
- // calculate number of bytes to catch
- for (auto& context : Context->ChannelArray) {
- context.PrepareCatchBuffer();
- }
- for (auto& [channel, context] : Context->ChannelMap) {
- context.PrepareCatchBuffer();
- }
-
- UsageHisto.fill(0);
- InputTrafficArray.fill(0);
-
- XXH3_64bits_reset(&XxhashXdcState);
- }
-
- void TInputSessionTCP::Bootstrap() {
- SetPrefix(Sprintf("InputSession %s [node %" PRIu32 "]", SelfId().ToString().data(), NodeId));
- Become(&TThis::WorkingState, DeadPeerTimeout, new TEvCheckDeadPeer);
- LOG_DEBUG_IC_SESSION("ICIS01", "InputSession created");
- LastReceiveTimestamp = TActivationContext::Monotonic();
- TActivationContext::Send(new IEventHandle(EvResumeReceiveData, 0, SelfId(), {}, nullptr, 0));
- }
-
- STATEFN(TInputSessionTCP::WorkingState) {
- std::unique_ptr<IEventBase> termEv;
-
- try {
- WorkingStateImpl(ev);
- } catch (const TExReestablishConnection& ex) {
- LOG_DEBUG_IC_SESSION("ICIS09", "ReestablishConnection, reason# %s", ex.Reason.ToString().data());
- termEv = std::make_unique<TEvSocketDisconnect>(std::move(ex.Reason));
- } catch (const TExDestroySession& ex) {
- LOG_DEBUG_IC_SESSION("ICIS13", "DestroySession, reason# %s", ex.Reason.ToString().data());
- termEv.reset(TInterconnectSessionTCP::NewEvTerminate(std::move(ex.Reason)));
- }
-
- if (termEv) {
- AtomicDecrement(Context->NumInputSessions);
- Send(SessionId, termEv.release());
- PassAway();
- Socket.Reset();
- }
- }
-
- void TInputSessionTCP::CloseInputSession() {
- CloseInputSessionRequested = true;
- ReceiveData();
- }
-
- void TInputSessionTCP::Handle(TEvPollerReady::TPtr ev) {
- auto *msg = ev->Get();
-
- bool useful = false;
- bool writeBlocked = false;
-
- if (msg->Socket == Socket) {
- useful = std::exchange(Context->MainReadPending, false);
- writeBlocked = Context->MainWriteBlocked;
- } else if (msg->Socket == XdcSocket) {
- useful = std::exchange(Context->XdcReadPending, false);
- writeBlocked = Context->XdcWriteBlocked;
- }
-
- if (useful) {
- Metrics->IncUsefulReadWakeups();
- } else if (!ev->Cookie) {
- Metrics->IncSpuriousReadWakeups();
- }
-
- ReceiveData();
-
- if (Params.Encryption && writeBlocked && ev->Sender != SessionId) {
- Send(SessionId, ev->Release().Release());
- }
- }
-
- void TInputSessionTCP::Handle(TEvPollerRegisterResult::TPtr ev) {
- auto *msg = ev->Get();
- if (msg->Socket == Socket) {
- PollerToken = std::move(msg->PollerToken);
- } else if (msg->Socket == XdcSocket) {
- XdcPollerToken = std::move(msg->PollerToken);
- }
- ReceiveData();
- }
-
- void TInputSessionTCP::ReceiveData() {
- TTimeLimit limit(GetMaxCyclesPerEvent());
- ui64 numDataBytes = 0;
-
- LOG_DEBUG_IC_SESSION("ICIS02", "ReceiveData called");
-
- bool enoughCpu = true;
- bool progress = false;
-
- for (;;) {
- if (progress && limit.CheckExceeded()) {
- // we have hit processing time limit for this message, send notification to resume processing a bit later
- TActivationContext::Send(new IEventHandle(EvResumeReceiveData, 0, SelfId(), {}, nullptr, 0));
- enoughCpu = false;
- ++CpuStarvationEvents;
- break;
- }
-
- // clear iteration progress
- progress = false;
-
- // try to process already fetched part from IncomingData
- switch (State) {
- case EState::HEADER:
- if (IncomingData.GetSize() < sizeof(TTcpPacketHeader_v2)) {
- break;
- } else {
- ProcessHeader();
- progress = true;
- continue;
- }
-
- case EState::PAYLOAD:
- Y_DEBUG_ABORT_UNLESS(PayloadSize);
- if (!IncomingData) {
- break;
- } else {
- ProcessPayload(&numDataBytes);
- progress = true;
- continue;
- }
- }
-
- // try to read more data into buffers
- progress |= ReadMore();
- progress |= ReadXdc(&numDataBytes);
-
- if (!progress) { // no progress was made during this iteration
- PreallocateBuffers();
- break;
- }
- }
-
- SetEnoughCpu(enoughCpu);
-
- // calculate ping time
- auto it = std::min_element(PingQ.begin(), PingQ.end());
- const TDuration ping = it != PingQ.end() ? *it : TDuration::Zero();
-
- // send update to main session actor if something valuable has changed
- if (!UpdateFromInputSession) {
- UpdateFromInputSession = MakeHolder<TEvUpdateFromInputSession>(ConfirmedByInput, numDataBytes, ping);
- } else {
- Y_ABORT_UNLESS(ConfirmedByInput >= UpdateFromInputSession->ConfirmedByInput);
- UpdateFromInputSession->ConfirmedByInput = ConfirmedByInput;
- UpdateFromInputSession->NumDataBytes += numDataBytes;
- UpdateFromInputSession->Ping = Min(UpdateFromInputSession->Ping, ping);
- }
-
- for (;;) {
- EUpdateState state = Context->UpdateState;
- EUpdateState next;
-
- // calculate next state
- switch (state) {
- case EUpdateState::NONE:
- case EUpdateState::CONFIRMING:
- // we have no inflight messages to session actor, we will issue one a bit later
- next = EUpdateState::INFLIGHT;
- break;
-
- case EUpdateState::INFLIGHT:
- case EUpdateState::INFLIGHT_AND_PENDING:
- // we already have inflight message, so we will keep pending message and session actor will issue
- // TEvConfirmUpdate to kick processing
- next = EUpdateState::INFLIGHT_AND_PENDING;
- break;
- }
-
- if (Context->UpdateState.compare_exchange_weak(state, next)) {
- switch (next) {
- case EUpdateState::INFLIGHT:
- Send(SessionId, UpdateFromInputSession.Release());
- break;
-
- case EUpdateState::INFLIGHT_AND_PENDING:
- Y_ABORT_UNLESS(UpdateFromInputSession);
- break;
-
- default:
- Y_ABORT("unexpected state");
- }
- break;
- }
- }
-
- for (size_t channel = 0; channel < InputTrafficArray.size(); ++channel) {
- if (auto& value = InputTrafficArray[channel]) {
- Metrics->AddInputChannelsIncomingTraffic(channel, std::exchange(value, 0));
- }
- }
- for (auto& [channel, value] : std::exchange(InputTrafficMap, {})) {
- if (value) {
- Metrics->AddInputChannelsIncomingTraffic(channel, std::exchange(value, 0));
- }
- }
- }
-
- void TInputSessionTCP::ProcessHeader() {
- TTcpPacketHeader_v2 header;
- const bool success = IncomingData.ExtractFrontPlain(&header, sizeof(header));
- Y_ABORT_UNLESS(success);
- PayloadSize = header.PayloadLength;
- const ui64 serial = header.Serial;
- const ui64 confirm = header.Confirm;
- if (!Params.Encryption) {
- ChecksumExpected = std::exchange(header.Checksum, 0);
- if (Params.UseXxhash) {
- XXH3_64bits_reset(&XxhashState);
- XXH3_64bits_update(&XxhashState, &header, sizeof(header));
- if (!PayloadSize) {
- Checksum = XXH3_64bits_digest(&XxhashState);
- }
- } else {
- Checksum = Crc32cExtendMSanCompatible(0, &header, sizeof(header)); // start calculating checksum now
- }
- if (!PayloadSize && Checksum != ChecksumExpected) {
- LOG_ERROR_IC_SESSION("ICIS10", "payload checksum error");
- throw TExReestablishConnection{TDisconnectReason::ChecksumError()};
- }
- }
- if (PayloadSize >= 65536) {
- LOG_CRIT_IC_SESSION("ICIS07", "payload is way too big");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
- if (ConfirmedByInput < confirm) {
- ConfirmedByInput = confirm;
- if (AtomicGet(Context->ControlPacketId) <= confirm && !NewPingProtocol) {
- ui64 sendTime = AtomicGet(Context->ControlPacketSendTimer);
- TDuration duration = CyclesToDuration(GetCycleCountFast() - sendTime);
- const auto durationUs = duration.MicroSeconds();
- Metrics->UpdatePingTimeHistogram(durationUs);
- PingQ.push_back(duration);
- if (PingQ.size() > 16) {
- PingQ.pop_front();
- }
- AtomicSet(Context->ControlPacketId, 0ULL);
- }
- }
- if (PayloadSize) {
- const ui64 expectedMin = Context->GetLastPacketSerialToConfirm() + 1;
- const ui64 expectedMax = Context->LastProcessedSerial + 1;
- Y_DEBUG_ABORT_UNLESS(expectedMin <= expectedMax);
- if (CurrentSerial ? serial != CurrentSerial + 1 : (serial == 0 || serial > expectedMin)) {
- LOG_CRIT_IC_SESSION("ICIS06", "%s", TString(TStringBuilder()
- << "packet serial number mismatch"
- << " Serial# " << serial
- << " ExpectedMin# " << expectedMin
- << " ExpectedMax# " << expectedMax
- << " CurrentSerial# " << CurrentSerial
- ).data());
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
- IgnorePayload = serial != expectedMax;
- CurrentSerial = serial;
- State = EState::PAYLOAD;
- Y_DEBUG_ABORT_UNLESS(!Payload);
- } else if (serial & TTcpPacketBuf::PingRequestMask) {
- Send(SessionId, new TEvProcessPingRequest(serial & ~TTcpPacketBuf::PingRequestMask));
- } else if (serial & TTcpPacketBuf::PingResponseMask) {
- const ui64 sent = serial & ~TTcpPacketBuf::PingResponseMask;
- const ui64 received = GetCycleCountFast();
- HandlePingResponse(CyclesToDuration(received - sent));
- } else if (serial & TTcpPacketBuf::ClockMask) {
- HandleClock(TInstant::MicroSeconds(serial & ~TTcpPacketBuf::ClockMask));
- }
- if (!PayloadSize) {
- ++PacketsReadFromSocket;
- }
- }
-
- void TInputSessionTCP::ProcessPayload(ui64 *numDataBytes) {
- const size_t numBytes = Min(PayloadSize, IncomingData.GetSize());
- IncomingData.ExtractFront(numBytes, &Payload);
- *numDataBytes += numBytes;
- PayloadSize -= numBytes;
- if (PayloadSize) {
- return; // there is still some data to receive in the Payload rope
- }
- InboundPacketQ.push_back(TInboundPacket{CurrentSerial, 0});
- State = EState::HEADER;
- if (!Params.Encryption) { // see if we are checksumming packet body
- for (const auto&& [data, size] : Payload) {
- if (Params.UseXxhash) {
- XXH3_64bits_update(&XxhashState, data, size);
- } else {
- Checksum = Crc32cExtendMSanCompatible(Checksum, data, size);
- }
- }
- if (Params.UseXxhash) {
- Checksum = XXH3_64bits_digest(&XxhashState);
- }
- if (Checksum != ChecksumExpected) { // validate payload checksum
- LOG_ERROR_IC_SESSION("ICIS04", "payload checksum error");
- throw TExReestablishConnection{TDisconnectReason::ChecksumError()};
- }
- }
- while (Payload) {
- // extract channel part header from the payload stream
- TChannelPart part;
- if (!Payload.ExtractFrontPlain(&part, sizeof(part))) {
- LOG_CRIT_IC_SESSION("ICIS14", "missing TChannelPart header in payload");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- } else if (Payload.GetSize() < part.Size) {
- LOG_CRIT_IC_SESSION("ICIS08", "payload format error ChannelPart# %s", part.ToString().data());
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
-
- const ui16 channel = part.GetChannel();
- auto& context = GetPerChannelContext(channel);
- auto& pendingEvent = context.PendingEvents.empty() || context.PendingEvents.back().EventData
- ? context.PendingEvents.emplace_back()
- : context.PendingEvents.back();
-
- if (part.IsXdc()) { // external data channel command packet
- XdcCommands.resize(part.Size);
- const bool success = Payload.ExtractFrontPlain(XdcCommands.data(), XdcCommands.size());
- Y_ABORT_UNLESS(success);
- ProcessXdcCommand(channel, context);
- } else if (IgnorePayload) { // throw payload out
- Payload.EraseFront(part.Size);
- } else if (!part.IsLastPart()) { // just ordinary inline event data
- Payload.ExtractFront(part.Size, &pendingEvent.InternalPayload);
- } else { // event final block
- TEventDescr2 v2;
-
- if (part.Size != sizeof(v2)) {
- LOG_CRIT_IC_SESSION("ICIS11", "incorrect last part of an event");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
-
- const bool success = Payload.ExtractFrontPlain(&v2, sizeof(v2));
- Y_ABORT_UNLESS(success);
-
- pendingEvent.EventData = TEventData{
- v2.Type,
- v2.Flags,
- v2.Recipient,
- v2.Sender,
- v2.Cookie,
- NWilson::TTraceId(v2.TraceId),
- v2.Checksum,
-#if IC_FORCE_HARDENED_PACKET_CHECKS
- v2.Len
-#endif
- };
-
- Metrics->IncInputChannelsIncomingEvents(channel);
- ProcessEvents(context);
- }
-
- const ui32 traffic = sizeof(part) + part.Size;
- if (channel < InputTrafficArray.size()) {
- InputTrafficArray[channel] += traffic;
- } else {
- InputTrafficMap[channel] += traffic;
- }
- }
-
- // mark packet as processed
- if (IgnorePayload) {
- Y_DEBUG_ABORT_UNLESS(CurrentSerial <= Context->LastProcessedSerial);
- } else {
- ++Context->LastProcessedSerial;
- Y_DEBUG_ABORT_UNLESS(CurrentSerial == Context->LastProcessedSerial);
- }
- XdcCatchStream.Ready = Context->LastProcessedSerial == CurrentSerial;
- ApplyXdcCatchStream();
- ProcessInboundPacketQ(0);
-
- ++PacketsReadFromSocket;
- ++DataPacketsReadFromSocket;
- IgnoredDataPacketsFromSocket += IgnorePayload;
- }
-
- void TInputSessionTCP::ProcessInboundPacketQ(ui64 numXdcBytesRead) {
- for (; !InboundPacketQ.empty(); InboundPacketQ.pop_front()) {
- auto& front = InboundPacketQ.front();
-
- const size_t n = Min(numXdcBytesRead, front.XdcUnreadBytes);
- front.XdcUnreadBytes -= n;
- numXdcBytesRead -= n;
-
- if (front.XdcUnreadBytes) { // we haven't finished this packet yet
- Y_ABORT_UNLESS(!numXdcBytesRead);
- break;
- }
-
- Y_DEBUG_ABORT_UNLESS(front.Serial + InboundPacketQ.size() - 1 <= Context->LastProcessedSerial,
- "front.Serial# %" PRIu64 " LastProcessedSerial# %" PRIu64 " InboundPacketQ.size# %zu",
- front.Serial, Context->LastProcessedSerial, InboundPacketQ.size());
-
- if (Context->GetLastPacketSerialToConfirm() < front.Serial && !Context->AdvanceLastPacketSerialToConfirm(front.Serial)) {
- throw TExReestablishConnection{TDisconnectReason::NewSession()};
- }
- }
- }
-
- void TInputSessionTCP::ProcessXdcCommand(ui16 channel, TReceiveContext::TPerChannelContext& context) {
- const char *ptr = XdcCommands.data();
- const char *end = ptr + XdcCommands.size();
- while (ptr != end) {
- switch (const auto cmd = static_cast<EXdcCommand>(*ptr++)) {
- case EXdcCommand::DECLARE_SECTION:
- case EXdcCommand::DECLARE_SECTION_INLINE: {
- // extract and validate command parameters
- const ui64 headroom = NInterconnect::NDetail::DeserializeNumber(&ptr, end);
- const ui64 size = NInterconnect::NDetail::DeserializeNumber(&ptr, end);
- const ui64 tailroom = NInterconnect::NDetail::DeserializeNumber(&ptr, end);
- const ui64 alignment = NInterconnect::NDetail::DeserializeNumber(&ptr, end);
- if (headroom == Max<ui64>() || size == Max<ui64>() || tailroom == Max<ui64>() || alignment == Max<ui64>()) {
- LOG_CRIT_IC_SESSION("ICIS00", "XDC command format error");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
-
- if (!IgnorePayload) { // process command if packet is being applied
- auto& pendingEvent = context.PendingEvents.back();
- const bool isInline = cmd == EXdcCommand::DECLARE_SECTION_INLINE;
- pendingEvent.SerializationInfo.Sections.push_back(TEventSectionInfo{headroom, size, tailroom,
- alignment, isInline});
-
- Y_ABORT_UNLESS(!isInline || Params.UseXdcShuffle);
- if (!isInline) {
- // allocate buffer and push it into the payload
- auto buffer = TRcBuf::Uninitialized(size, headroom, tailroom);
- if (size) {
- context.XdcBuffers.push_back(buffer.GetContiguousSpanMut());
- }
- pendingEvent.ExternalPayload.Insert(pendingEvent.ExternalPayload.End(), TRope(std::move(buffer)));
- pendingEvent.XdcSizeLeft += size;
- ++XdcSections;
- }
- }
- continue;
- }
-
- case EXdcCommand::PUSH_DATA: {
- const size_t cmdLen = sizeof(ui16) + (Params.Encryption ? 0 : sizeof(ui32));
- if (static_cast<size_t>(end - ptr) < cmdLen) {
- LOG_CRIT_IC_SESSION("ICIS18", "XDC command format error");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
-
- auto size = *reinterpret_cast<const ui16*>(ptr);
- if (!size) {
- LOG_CRIT_IC_SESSION("ICIS03", "XDC empty payload");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
-
- if (!Params.Encryption) {
- const ui32 checksumExpected = *reinterpret_cast<const ui32*>(ptr + sizeof(ui16));
- XdcChecksumQ.emplace_back(size, checksumExpected);
- }
-
- // account channel and number of bytes in XDC for this packet
- auto& packet = InboundPacketQ.back();
- packet.XdcUnreadBytes += size;
-
- if (IgnorePayload) {
- // this packet was already marked as 'processed', all commands had been executed, but we must
- // parse XDC stream from this packet correctly
- const bool apply = Context->GetLastPacketSerialToConfirm() < CurrentSerial &&
- GetPerChannelContext(channel).XdcCatchBuffer;
- XdcCatchStream.BytesPending += size;
- XdcCatchStream.Markup.emplace_back(channel, apply, size);
- } else {
- // find buffers and acquire data buffer pointers
- context.FetchBuffers(channel, size, XdcInputQ);
- }
-
- ptr += cmdLen;
- ++XdcRefs;
- continue;
- }
- }
-
- LOG_CRIT_IC_SESSION("ICIS15", "unexpected XDC command");
- throw TExDestroySession{TDisconnectReason::FormatError()};
- }
- }
-
- void TInputSessionTCP::ProcessEvents(TReceiveContext::TPerChannelContext& context) {
- for (; !context.PendingEvents.empty(); context.PendingEvents.pop_front()) {
- auto& pendingEvent = context.PendingEvents.front();
- if (!pendingEvent.EventData || pendingEvent.XdcSizeLeft) {
- break; // event is not ready yet
- }
- auto& descr = *pendingEvent.EventData;
-
- // create aggregated payload
- TRope payload;
- if (!pendingEvent.SerializationInfo.Sections.empty()) {
- // unshuffle inline and external payloads into single event content
- TRope *prev = nullptr;
- size_t accumSize = 0;
- for (const auto& s : pendingEvent.SerializationInfo.Sections) {
- TRope *rope = s.IsInline
- ? &pendingEvent.InternalPayload
- : &pendingEvent.ExternalPayload;
- if (rope != prev) {
- if (accumSize) {
- prev->ExtractFront(accumSize, &payload);
- }
- prev = rope;
- accumSize = 0;
- }
- accumSize += s.Size;
- }
- if (accumSize) {
- prev->ExtractFront(accumSize, &payload);
- }
-
- if (pendingEvent.InternalPayload || pendingEvent.ExternalPayload) {
- LOG_CRIT_IC_SESSION("ICIS19", "unprocessed payload remains after shuffling"
- " Type# 0x%08" PRIx32 " InternalPayload.size# %zu ExternalPayload.size# %zu",
- descr.Type, pendingEvent.InternalPayload.size(), pendingEvent.ExternalPayload.size());
- Y_DEBUG_ABORT_UNLESS(false);
- throw TExReestablishConnection{TDisconnectReason::FormatError()};
- }
- }
-
- // we add any remains of internal payload to the end
- if (auto& rope = pendingEvent.InternalPayload) {
- rope.ExtractFront(rope.size(), &payload);
- }
- // and ensure there is no unprocessed external payload
- Y_ABORT_UNLESS(!pendingEvent.ExternalPayload);
-
-#if IC_FORCE_HARDENED_PACKET_CHECKS
- if (descr.Len != payload.GetSize()) {
- LOG_CRIT_IC_SESSION("ICIS17", "event length mismatch Type# 0x%08" PRIx32 " received# %zu expected# %" PRIu32,
- descr.Type, payload.GetSize(), descr.Len);
- throw TExReestablishConnection{TDisconnectReason::ChecksumError()};
- }
-#endif
- if (descr.Checksum) {
- ui32 checksum = 0;
- for (const auto&& [data, size] : payload) {
- checksum = Crc32cExtendMSanCompatible(checksum, data, size);
- }
- if (checksum != descr.Checksum) {
- LOG_CRIT_IC_SESSION("ICIS05", "event checksum error Type# 0x%08" PRIx32, descr.Type);
- throw TExReestablishConnection{TDisconnectReason::ChecksumError()};
- }
- }
- pendingEvent.SerializationInfo.IsExtendedFormat = descr.Flags & IEventHandle::FlagExtendedFormat;
- auto ev = std::make_unique<IEventHandle>(SessionId,
- descr.Type,
- descr.Flags & ~IEventHandle::FlagExtendedFormat,
- descr.Recipient,
- descr.Sender,
- MakeIntrusive<TEventSerializedData>(std::move(payload), std::move(pendingEvent.SerializationInfo)),
- descr.Cookie,
- Params.PeerScopeId,
- std::move(descr.TraceId));
- if (Common->EventFilter && !Common->EventFilter->CheckIncomingEvent(*ev, Common->LocalScopeId)) {
- LOG_CRIT_IC_SESSION("ICIC03", "Event dropped due to scope error LocalScopeId# %s PeerScopeId# %s Type# 0x%08" PRIx32,
- ScopeIdToString(Common->LocalScopeId).data(), ScopeIdToString(Params.PeerScopeId).data(), descr.Type);
- ev.reset();
- }
- if (ev) {
- TActivationContext::Send(ev.release());
- }
- }
- }
-
- void TInputSessionTCP::HandleConfirmUpdate() {
- for (;;) {
- switch (EUpdateState state = Context->UpdateState) {
- case EUpdateState::NONE:
- case EUpdateState::INFLIGHT:
- case EUpdateState::INFLIGHT_AND_PENDING:
- // here we may have a race
- return;
-
- case EUpdateState::CONFIRMING:
- Y_ABORT_UNLESS(UpdateFromInputSession);
- if (Context->UpdateState.compare_exchange_weak(state, EUpdateState::INFLIGHT)) {
- Send(SessionId, UpdateFromInputSession.Release());
- return;
- }
- }
- }
- }
-
- ssize_t TInputSessionTCP::Read(NInterconnect::TStreamSocket& socket, const TPollerToken::TPtr& token,
- bool *readPending, const TIoVec *iov, size_t num) {
- for (;;) {
- ssize_t recvres = 0;
- TString err;
- LWPROBE_IF_TOO_LONG(SlowICReadFromSocket, ms) {
- do {
- const ui64 begin = GetCycleCountFast();
- if (num == 1) {
- recvres = socket.Recv(iov->Data, iov->Size, &err);
- } else {
- recvres = socket.ReadV(reinterpret_cast<const iovec*>(iov), num);
- }
- const ui64 end = GetCycleCountFast();
- Metrics->IncRecvSyscalls((end - begin) * 1'000'000 / GetCyclesPerMillisecond());
- } while (recvres == -EINTR);
- }
-
- LOG_DEBUG_IC_SESSION("ICIS12", "Read recvres# %zd num# %zu err# %s", recvres, num, err.data());
-
- if (recvres <= 0 || CloseInputSessionRequested) {
- if ((-recvres != EAGAIN && -recvres != EWOULDBLOCK) || CloseInputSessionRequested) {
- TString message = CloseInputSessionRequested ? "connection closed by debug command"
- : recvres == 0 ? "connection closed by peer"
- : err ? err
- : Sprintf("socket: %s", strerror(-recvres));
- LOG_NOTICE_NET(NodeId, "%s", message.data());
- throw TExReestablishConnection{CloseInputSessionRequested ? TDisconnectReason::Debug() :
- recvres == 0 ? TDisconnectReason::EndOfStream() : TDisconnectReason::FromErrno(-recvres)};
- } else if (token && !*readPending) {
- if (socket.RequestReadNotificationAfterWouldBlock(*token)) {
- continue; // can try again
- } else {
- *readPending = true;
- }
- }
- return -1;
- }
-
- return recvres;
- }
- }
-
- constexpr ui64 GetUsageCountClearMask(size_t items, int bits) {
- ui64 mask = 0;
- for (size_t i = 0; i < items; ++i) {
- mask |= ui64(1 << bits - 2) << i * bits;
- }
- return mask;
- }
-
- bool TInputSessionTCP::ReadMore() {
- PreallocateBuffers();
-
- TStackVec<TIoVec, MaxBuffers> buffs;
- for (auto& item : Buffers) {
- buffs.push_back({item.GetDataMut(), item.size()});
- if (Params.Encryption) {
- break; // do not put more than one buffer in queue to prevent using ReadV
- }
-#ifdef _win_
- break; // do the same thing for Windows build
-#endif
- }
-
- ssize_t recvres = Read(*Socket, PollerToken, &Context->MainReadPending, buffs.data(), buffs.size());
- if (recvres == -1) {
- return false;
- }
-
- Y_ABORT_UNLESS(recvres > 0);
- Metrics->AddTotalBytesRead(recvres);
- BytesReadFromSocket += recvres;
-
- size_t numBuffersCovered = 0;
-
- while (recvres) {
- Y_ABORT_UNLESS(!Buffers.empty());
- auto& buffer = Buffers.front();
- const size_t bytes = Min<size_t>(recvres, buffer.size());
- recvres -= bytes;
- if (const size_t newSize = buffer.size() - bytes) {
- IncomingData.Insert(IncomingData.End(), TRcBuf(TRcBuf::Piece, buffer.data(), bytes, buffer));
- buffer.TrimFront(newSize);
- } else {
- IncomingData.Insert(IncomingData.End(), std::move(buffer));
- Buffers.pop_front();
- }
- ++numBuffersCovered;
- }
-
- if (Buffers.empty()) { // we have read all the data, increase number of buffers
- CurrentBuffers = Min(CurrentBuffers * 2, MaxBuffers);
- } else {
- Y_DEBUG_ABORT_UNLESS(numBuffersCovered);
-
- const size_t index = numBuffersCovered - 1;
-
- static constexpr ui64 itemMask = (1 << BitsPerUsageCount) - 1;
-
- const size_t word = index / ItemsPerUsageCount;
- const size_t offset = index % ItemsPerUsageCount * BitsPerUsageCount;
-
- if ((UsageHisto[word] >> offset & itemMask) == itemMask) { // time to shift
- for (ui64& w : UsageHisto) {
- static constexpr ui64 mask = GetUsageCountClearMask(ItemsPerUsageCount, BitsPerUsageCount);
- w = (w & mask) >> 1;
- }
- }
- UsageHisto[word] += ui64(1) << offset;
-
- while (CurrentBuffers > 1) {
- const size_t index = CurrentBuffers - 1;
- if (UsageHisto[index / ItemsPerUsageCount] >> index % ItemsPerUsageCount * BitsPerUsageCount & itemMask) {
- break;
- } else {
- --CurrentBuffers;
- }
- }
- }
-
- LastReceiveTimestamp = TActivationContext::Monotonic();
-
- return true;
- }
-
- bool TInputSessionTCP::ReadXdcCatchStream(ui64 *numDataBytes) {
- bool progress = false;
-
- while (XdcCatchStream.BytesPending) {
- if (!XdcCatchStream.Buffer) {
- XdcCatchStream.Buffer = TRcBuf::Uninitialized(64 * 1024);
- }
-
- const size_t numBytesToRead = Min<size_t>(XdcCatchStream.BytesPending, XdcCatchStream.Buffer.size());
-
- TIoVec iov{XdcCatchStream.Buffer.GetDataMut(), numBytesToRead};
- ssize_t recvres = Read(*XdcSocket, XdcPollerToken, &Context->XdcReadPending, &iov, 1);
- if (recvres == -1) {
- return progress;
- }
-
- HandleXdcChecksum({XdcCatchStream.Buffer.data(), static_cast<size_t>(recvres)});
-
- XdcCatchStream.BytesPending -= recvres;
- XdcCatchStream.BytesProcessed += recvres;
- *numDataBytes += recvres;
- BytesReadFromXdcSocket += recvres;
-
- // scatter read data
- const char *in = XdcCatchStream.Buffer.data();
- while (recvres) {
- Y_DEBUG_ABORT_UNLESS(!XdcCatchStream.Markup.empty());
- auto& [channel, apply, bytes] = XdcCatchStream.Markup.front();
- size_t bytesInChannel = Min<size_t>(recvres, bytes);
- bytes -= bytesInChannel;
- recvres -= bytesInChannel;
-
- if (apply) {
- auto& context = GetPerChannelContext(channel);
- while (bytesInChannel) {
- const size_t offset = context.XdcCatchBytesRead % context.XdcCatchBuffer.size();
- TMutableContiguousSpan out = context.XdcCatchBuffer.GetContiguousSpanMut().SubSpan(offset, bytesInChannel);
- memcpy(out.data(), in, out.size());
- context.XdcCatchBytesRead += out.size();
- in += out.size();
- bytesInChannel -= out.size();
- }
- } else {
- in += bytesInChannel;
- }
-
- if (!bytes) {
- XdcCatchStream.Markup.pop_front();
- }
- }
-
- progress = true;
- }
-
- ApplyXdcCatchStream();
-
- return progress;
- }
-
- void TInputSessionTCP::ApplyXdcCatchStream() {
- if (!XdcCatchStream.Applied && XdcCatchStream.Ready && !XdcCatchStream.BytesPending) {
- Y_DEBUG_ABORT_UNLESS(XdcCatchStream.Markup.empty());
-
- auto process = [&](auto& context) {
- context.ApplyCatchBuffer();
- ProcessEvents(context);
- };
- for (auto& context : Context->ChannelArray) {
- process(context);
- }
- for (auto& [channel, context] : Context->ChannelMap) {
- process(context);
- }
-
- ProcessInboundPacketQ(XdcCatchStream.BytesProcessed);
-
- XdcCatchStream.Buffer = {};
- XdcCatchStream.Applied = true;
- }
- }
-
- bool TInputSessionTCP::ReadXdc(ui64 *numDataBytes) {
- bool progress = ReadXdcCatchStream(numDataBytes);
-
- // exit if we have no work to do
- if (XdcInputQ.empty() || !XdcCatchStream.Applied) {
- return progress;
- }
-
- TStackVec<TIoVec, 64> buffs;
- size_t size = 0;
- for (auto& [channel, span] : XdcInputQ) {
- buffs.push_back(TIoVec{span.data(), span.size()});
- size += span.size();
- if (buffs.size() == 64 || size >= 1024 * 1024 || Params.Encryption) {
- break;
- }
- }
-
- ssize_t recvres = Read(*XdcSocket, XdcPollerToken, &Context->XdcReadPending, buffs.data(), buffs.size());
- if (recvres == -1) {
- return progress;
- }
-
- // calculate stream checksums
- {
- size_t bytesToChecksum = recvres;
- for (const auto& iov : buffs) {
- const size_t n = Min(bytesToChecksum, iov.Size);
- HandleXdcChecksum({static_cast<const char*>(iov.Data), n});
- bytesToChecksum -= n;
- if (!bytesToChecksum) {
- break;
- }
- }
- }
-
- Y_ABORT_UNLESS(recvres > 0);
- Metrics->AddTotalBytesRead(recvres);
- *numDataBytes += recvres;
- BytesReadFromXdcSocket += recvres;
-
- // cut the XdcInputQ deque
- for (size_t bytesToCut = recvres; bytesToCut; ) {
- Y_ABORT_UNLESS(!XdcInputQ.empty());
- auto& [channel, span] = XdcInputQ.front();
- size_t n = Min(bytesToCut, span.size());
- bytesToCut -= n;
- if (n == span.size()) {
- XdcInputQ.pop_front();
- } else {
- span = span.SubSpan(n, Max<size_t>());
- Y_ABORT_UNLESS(!bytesToCut);
- }
-
- Y_DEBUG_ABORT_UNLESS(n);
- auto& context = GetPerChannelContext(channel);
- context.DropFront(nullptr, n);
- ProcessEvents(context);
- }
-
- // drop fully processed inbound packets
- ProcessInboundPacketQ(recvres);
-
- LastReceiveTimestamp = TActivationContext::Monotonic();
-
- return true;
- }
-
- void TInputSessionTCP::HandleXdcChecksum(TContiguousSpan span) {
- if (Params.Encryption) {
- return;
- }
- while (span.size()) {
- Y_DEBUG_ABORT_UNLESS(!XdcChecksumQ.empty());
- auto& [size, expected] = XdcChecksumQ.front();
- const size_t n = Min<size_t>(size, span.size());
- if (Params.UseXxhash) {
- XXH3_64bits_update(&XxhashXdcState, span.data(), n);
- } else {
- XdcCurrentChecksum = Crc32cExtendMSanCompatible(XdcCurrentChecksum, span.data(), n);
- }
- span = span.SubSpan(n, Max<size_t>());
- size -= n;
- if (!size) {
- if (Params.UseXxhash) {
- XdcCurrentChecksum = XXH3_64bits_digest(&XxhashXdcState);
- XXH3_64bits_reset(&XxhashXdcState);
- }
- if (XdcCurrentChecksum != expected) {
- LOG_ERROR_IC_SESSION("ICIS16", "payload checksum error");
- throw TExReestablishConnection{TDisconnectReason::ChecksumError()};
- }
- XdcChecksumQ.pop_front();
- XdcCurrentChecksum = 0;
- }
- }
- }
-
- void TInputSessionTCP::PreallocateBuffers() {
- // ensure that we have exactly "numBuffers" in queue
- LWPROBE_IF_TOO_LONG(SlowICReadLoopAdjustSize, ms) {
- while (Buffers.size() < CurrentBuffers) {
- Buffers.push_back(TRcBuf::Uninitialized(Common->Settings.PreallocatedBufferSize));
- }
- }
- }
-
- void TInputSessionTCP::PassAway() {
- Metrics->SetClockSkewMicrosec(0);
- TActorBootstrapped::PassAway();
- }
-
- void TInputSessionTCP::HandleCheckDeadPeer() {
- const TMonotonic now = TActivationContext::Monotonic();
- if (now >= LastReceiveTimestamp + DeadPeerTimeout) {
- ReceiveData();
- if (Socket && now >= LastReceiveTimestamp + DeadPeerTimeout) {
- // nothing has changed, terminate session
- throw TExDestroySession{TDisconnectReason::DeadPeer()};
- }
- }
- Schedule(LastReceiveTimestamp + DeadPeerTimeout, new TEvCheckDeadPeer);
- }
-
- void TInputSessionTCP::HandlePingResponse(TDuration passed) {
- PingQ.push_back(passed);
- if (PingQ.size() > 16) {
- PingQ.pop_front();
- }
- const TDuration ping = *std::min_element(PingQ.begin(), PingQ.end());
- const auto pingUs = ping.MicroSeconds();
- Context->PingRTT_us = pingUs;
- NewPingProtocol = true;
- Metrics->UpdatePingTimeHistogram(pingUs);
- }
-
- void TInputSessionTCP::HandleClock(TInstant clock) {
- const TInstant here = TInstant::Now(); // wall clock
- const TInstant remote = clock + TDuration::MicroSeconds(Context->PingRTT_us / 2);
- i64 skew = remote.MicroSeconds() - here.MicroSeconds();
- SkewQ.push_back(skew);
- if (SkewQ.size() > 16) {
- SkewQ.pop_front();
- }
- i64 clockSkew = SkewQ.front();
- for (i64 skew : SkewQ) {
- if (abs(skew) < abs(clockSkew)) {
- clockSkew = skew;
- }
- }
- Context->ClockSkew_us = clockSkew;
- Metrics->SetClockSkewMicrosec(clockSkew);
- }
-
- TReceiveContext::TPerChannelContext& TInputSessionTCP::GetPerChannelContext(ui16 channel) const {
- return channel < std::size(Context->ChannelArray)
- ? Context->ChannelArray[channel]
- : Context->ChannelMap[channel];
- }
-
- void TInputSessionTCP::GenerateHttpInfo(NMon::TEvHttpInfoRes::TPtr ev) {
- TStringStream str;
- ev->Get()->Output(str);
-
- HTML(str) {
- DIV_CLASS("panel panel-info") {
- DIV_CLASS("panel-heading") {
- str << "Input Session";
- }
- DIV_CLASS("panel-body") {
- TABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Sensor";
- }
- TABLEH() {
- str << "Value";
- }
- }
- }
-#define MON_VAR(KEY) \
- TABLER() { \
- TABLED() { str << #KEY; } \
- TABLED() { str << (KEY); } \
- }
-
- TABLEBODY() {
- MON_VAR(BytesReadFromSocket)
- MON_VAR(PacketsReadFromSocket)
- MON_VAR(DataPacketsReadFromSocket)
- MON_VAR(IgnoredDataPacketsFromSocket)
-
- MON_VAR(BytesReadFromXdcSocket)
- MON_VAR(XdcSections)
- MON_VAR(XdcRefs)
- MON_VAR(CpuStarvationEvents)
-
- MON_VAR(PayloadSize)
- MON_VAR(InboundPacketQ.size())
- MON_VAR(XdcInputQ.size())
- MON_VAR(Buffers.size())
- MON_VAR(IncomingData.GetSize())
- MON_VAR(Payload.GetSize())
- MON_VAR(CurrentBuffers)
-
- MON_VAR(Context->LastProcessedSerial)
- MON_VAR(ConfirmedByInput)
- }
- }
- }
- }
- }
-
- TActivationContext::Send(new IEventHandle(ev->Recipient, ev->Sender, new NMon::TEvHttpInfoRes(str.Str())));
- }
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp b/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
deleted file mode 100644
index 514bfb0b84..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_proxy.cpp
+++ /dev/null
@@ -1,944 +0,0 @@
-#include "interconnect_tcp_proxy.h"
-#include "interconnect_handshake.h"
-#include "interconnect_tcp_session.h"
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/monlib/service/pages/templates.h>
-#include <util/system/getpid.h>
-
-namespace NActors {
- static constexpr TDuration GetNodeRequestTimeout = TDuration::Seconds(5);
-
- static constexpr TDuration FirstErrorSleep = TDuration::MilliSeconds(10);
- static constexpr TDuration MaxErrorSleep = TDuration::Seconds(10);
- static constexpr ui32 SleepRetryMultiplier = 4;
-
- static TString PeerNameForHuman(ui32 nodeNum, const TString& longName, ui16 port) {
- TStringBuf token;
- TStringBuf(longName).NextTok('.', token);
- return ToString<ui32>(nodeNum) + ":" + (token.size() > 0 ? TString(token) : longName) + ":" + ToString<ui16>(port);
- }
-
- TInterconnectProxyTCP::TInterconnectProxyTCP(const ui32 node, TInterconnectProxyCommon::TPtr common,
- IActor **dynamicPtr)
- : TActor(&TThis::StateInit)
- , PeerNodeId(node)
- , DynamicPtr(dynamicPtr)
- , Common(std::move(common))
- , SecureContext(new NInterconnect::TSecureSocketContext(Common->Settings.Certificate, Common->Settings.PrivateKey,
- Common->Settings.CaFilePath, Common->Settings.CipherList))
- {
- Y_ABORT_UNLESS(Common);
- Y_ABORT_UNLESS(Common->NameserviceId);
- if (DynamicPtr) {
- Y_ABORT_UNLESS(!*DynamicPtr);
- *DynamicPtr = this;
- }
- }
-
- void TInterconnectProxyTCP::Bootstrap() {
- SetPrefix(Sprintf("Proxy %s [node %" PRIu32 "]", SelfId().ToString().data(), PeerNodeId));
-
- SwitchToInitialState();
-
- LOG_INFO_IC("ICP01", "ready to work");
- }
-
- void TInterconnectProxyTCP::Registered(TActorSystem* sys, const TActorId& owner) {
- if (!DynamicPtr) {
- // perform usual bootstrap for static nodes
- sys->Send(new IEventHandle(TEvents::TSystem::Bootstrap, 0, SelfId(), owner, nullptr, 0));
- }
- if (const auto& mon = Common->RegisterMonPage) {
- TString path = Sprintf("peer%04" PRIu32, PeerNodeId);
- TString title = Sprintf("Peer #%04" PRIu32, PeerNodeId);
- mon(path, title, sys, SelfId());
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // PendingActivation
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- void TInterconnectProxyTCP::RequestNodeInfo(STATEFN_SIG) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(!IncomingHandshakeActor && !OutgoingHandshakeActor && !PendingIncomingHandshakeEvents && !PendingSessionEvents);
- EnqueueSessionEvent(ev);
- StartConfiguring();
- }
-
- void TInterconnectProxyTCP::RequestNodeInfoForIncomingHandshake(STATEFN_SIG) {
- ICPROXY_PROFILED;
-
- if (!Terminated) {
- Y_ABORT_UNLESS(!IncomingHandshakeActor && !OutgoingHandshakeActor && !PendingIncomingHandshakeEvents && !PendingSessionEvents);
- EnqueueIncomingHandshakeEvent(ev);
- StartConfiguring();
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // PendingNodeInfo
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- void TInterconnectProxyTCP::StartConfiguring() {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(!IncomingHandshakeActor && !OutgoingHandshakeActor);
-
- // issue node info request
- Send(Common->NameserviceId, new TEvInterconnect::TEvGetNode(PeerNodeId));
-
- // arm configure timer; store pointer to event to ensure that we will handle correct one if there were any other
- // wakeup events in flight
- SwitchToState(__LINE__, "PendingNodeInfo", &TThis::PendingNodeInfo, GetNodeRequestTimeout,
- ConfigureTimeoutCookie = new TEvents::TEvWakeup);
- }
-
- void TInterconnectProxyTCP::Configure(TEvInterconnect::TEvNodeInfo::TPtr& ev) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(!IncomingHandshakeActor && !OutgoingHandshakeActor && !Session);
-
- if (!ev->Get()->Node) {
- TransitToErrorState("cannot get node info");
- } else {
- auto& info = *ev->Get()->Node;
- TString name = PeerNameForHuman(PeerNodeId, info.Host, info.Port);
- TechnicalPeerHostName = info.Host;
- if (!Metrics) {
- Metrics = Common->Metrics ? CreateInterconnectMetrics(Common) : CreateInterconnectCounters(Common);
- }
- Metrics->SetPeerInfo(name, info.Location.GetDataCenterId());
-
- LOG_DEBUG_IC("ICP02", "configured for host %s", name.data());
-
- ProcessConfigured();
- }
- }
-
- void TInterconnectProxyTCP::ConfigureTimeout(TEvents::TEvWakeup::TPtr& ev) {
- ICPROXY_PROFILED;
-
- if (ev->Get() == ConfigureTimeoutCookie) {
- TransitToErrorState("timed out while waiting for node info");
- }
- }
-
- void TInterconnectProxyTCP::ProcessConfigured() {
- ICPROXY_PROFILED;
-
- // if the request was initiated by some activity involving Interconnect, then we are expected to start handshake
- if (PendingSessionEvents) {
- StartInitialHandshake();
- }
-
- // process incoming handshake requests; all failures were ejected from the queue along with the matching initiation requests
- for (THolder<IEventHandle>& ev : PendingIncomingHandshakeEvents) {
- TAutoPtr<IEventHandle> x(ev.Release());
- IncomingHandshake(x);
- }
- PendingIncomingHandshakeEvents.clear();
-
- // possible situation -- incoming handshake arrives, but actually it is not satisfied and rejected; in this case
- // we are going to return to initial state as we have nothing to do
- if (!IncomingHandshakeActor && !OutgoingHandshakeActor) {
- SwitchToInitialState();
- }
- }
-
- void TInterconnectProxyTCP::StartInitialHandshake() {
- ICPROXY_PROFILED;
-
- // since we are starting initial handshake for some reason, we'll drop any existing handshakes, if any
- DropHandshakes();
-
- // create and register handshake actor
- OutgoingHandshakeActor = Register(CreateOutgoingHandshakeActor(Common, GenerateSessionVirtualId(),
- TActorId(), PeerNodeId, 0, TechnicalPeerHostName, TSessionParams()), TMailboxType::ReadAsFilled);
- OutgoingHandshakeActorCreated = TActivationContext::Now();
-
- // prepare for new handshake
- PrepareNewSessionHandshake();
- }
-
- void TInterconnectProxyTCP::StartResumeHandshake(ui64 inputCounter) {
- ICPROXY_PROFILED;
-
- // drop outgoing handshake if we have one; keep incoming handshakes as they may be useful
- DropOutgoingHandshake();
-
- // ensure that we have session
- Y_ABORT_UNLESS(Session);
-
- // ensure that we have both virtual ids
- Y_ABORT_UNLESS(SessionVirtualId);
- Y_ABORT_UNLESS(RemoteSessionVirtualId);
-
- // create and register handshake actor
- OutgoingHandshakeActor = Register(CreateOutgoingHandshakeActor(Common, SessionVirtualId,
- RemoteSessionVirtualId, PeerNodeId, inputCounter, TechnicalPeerHostName, Session->Params),
- TMailboxType::ReadAsFilled);
- OutgoingHandshakeActorCreated = TActivationContext::Now();
- }
-
- void TInterconnectProxyTCP::IssueIncomingHandshakeReply(const TActorId& handshakeId, ui64 peerLocalId,
- THolder<IEventBase> event) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(!IncomingHandshakeActor);
- IncomingHandshakeActor = handshakeId;
- IncomingHandshakeActorFilledIn = TActivationContext::Now();
- Y_ABORT_UNLESS(!LastSerialFromIncomingHandshake || *LastSerialFromIncomingHandshake <= peerLocalId);
- LastSerialFromIncomingHandshake = peerLocalId;
-
- if (OutgoingHandshakeActor && SelfId().NodeId() < PeerNodeId) {
- // Both outgoing and incoming handshake are in progress. To prevent race condition during semultanous handshake
- // incoming handshake must be held till outgoing handshake is complete or failed
- LOG_DEBUG_IC("ICP06", "reply for incoming handshake (actor %s) is held", IncomingHandshakeActor.ToString().data());
- HeldHandshakeReply = std::move(event);
-
- // Check that we are in one of acceptable states that would properly handle handshake statuses.
- const auto state = CurrentStateFunc();
- Y_ABORT_UNLESS(state == &TThis::PendingConnection || state == &TThis::StateWork, "invalid handshake request in state# %s", State);
- } else {
- LOG_DEBUG_IC("ICP07", "issued incoming handshake reply");
-
- // No race, so we can send reply immediately.
- Y_ABORT_UNLESS(!HeldHandshakeReply);
- Send(IncomingHandshakeActor, event.Release());
-
- // Start waiting for handshake reply, if not yet started; also, if session is already created, then we don't
- // switch from working state.
- if (!Session) {
- LOG_INFO_IC("ICP08", "No active sessions, becoming PendingConnection");
- SwitchToState(__LINE__, "PendingConnection", &TThis::PendingConnection);
- } else {
- Y_ABORT_UNLESS(CurrentStateFunc() == &TThis::StateWork);
- }
- }
- }
-
- void TInterconnectProxyTCP::IncomingHandshake(TEvHandshakeAsk::TPtr& ev) {
- ICPROXY_PROFILED;
-
- TEvHandshakeAsk *msg = ev->Get();
-
- // TEvHandshakeAsk is only applicable for continuation requests
- LOG_DEBUG_IC("ICP09", "(actor %s) from: %s for: %s", ev->Sender.ToString().data(),
- ev->Get()->Self.ToString().data(), ev->Get()->Peer.ToString().data());
-
- if (!Session) {
- // if there is no open session, report error -- continuation request works only with open sessions
- LOG_NOTICE_IC("ICP12", "(actor %s) peer tries to resume nonexistent session Self# %s Peer# %s",
- ev->Sender.ToString().data(), msg->Self.ToString().data(), msg->Peer.ToString().data());
- } else if (SessionVirtualId != ev->Get()->Peer || RemoteSessionVirtualId != ev->Get()->Self) {
- // check session virtual ids for continuation
- LOG_NOTICE_IC("ICP13", "(actor %s) virtual id mismatch with existing session (Peer: %s Self: %s"
- " SessionVirtualId: %s RemoteSessionVirtualId: %s)", ev->Sender.ToString().data(),
- ev->Get()->Peer.ToString().data(), ev->Get()->Self.ToString().data(), SessionVirtualId.ToString().data(),
- RemoteSessionVirtualId.ToString().data());
- } else {
- // if we already have incoming handshake, then terminate existing one
- DropIncomingHandshake();
-
- // issue reply to the sender, possibly holding it while outgoing handshake is at race
- THolder<IEventBase> reply = IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::ProcessHandshakeRequest, ev);
- return IssueIncomingHandshakeReply(ev->Sender, RemoteSessionVirtualId.LocalId(), std::move(reply));
- }
-
- // error case -- report error to the handshake actor
- Send(ev->Sender, new TEvHandshakeNak);
- }
-
- void TInterconnectProxyTCP::IncomingHandshake(TEvHandshakeRequest::TPtr& ev) {
- ICPROXY_PROFILED;
-
- LOG_DEBUG_IC("ICP17", "incoming handshake (actor %s)", ev->Sender.ToString().data());
-
- const auto& record = ev->Get()->Record;
- ui64 remotePID = record.GetProgramPID();
- ui64 remoteStartTime = record.GetProgramStartTime();
- ui64 remoteSerial = record.GetSerial();
-
- if (RemoteProgramInfo && remotePID == RemoteProgramInfo->PID && remoteStartTime == RemoteProgramInfo->StartTime) {
- if (remoteSerial < RemoteProgramInfo->Serial) {
- LOG_INFO_IC("ICP18", "handshake (actor %s) is too old", ev->Sender.ToString().data());
- Send(ev->Sender, new TEvents::TEvPoisonPill);
- return;
- } else {
- RemoteProgramInfo->Serial = remoteSerial;
- }
- } else {
- const auto ptr = new TProgramInfo;
- ptr->PID = remotePID;
- ptr->StartTime = remoteStartTime;
- ptr->Serial = remoteSerial;
- RemoteProgramInfo.Reset(ptr);
- }
-
- /* Let's check peer technical hostname */
- if (record.HasSenderHostName() && TechnicalPeerHostName != record.GetSenderHostName()) {
- Send(ev->Sender, new TEvHandshakeReplyError("host name mismatch"));
- return;
- }
-
- // check sender actor id and check if it is not very old
- if (LastSerialFromIncomingHandshake) {
- const ui64 serial = record.GetSerial();
- if (serial < *LastSerialFromIncomingHandshake) {
- LOG_NOTICE_IC("ICP15", "Handshake# %s has duplicate serial# %" PRIu64
- " LastSerialFromIncomingHandshake# %" PRIu64, ev->Sender.ToString().data(),
- serial, *LastSerialFromIncomingHandshake);
- Send(ev->Sender, new TEvHandshakeReplyError("duplicate serial"));
- return;
- } else if (serial == *LastSerialFromIncomingHandshake) {
- LOG_NOTICE_IC("ICP00", "Handshake# %s is obsolete, serial# %" PRIu64
- " LastSerialFromIncomingHandshake# %" PRIu64, ev->Sender.ToString().data(),
- serial, *LastSerialFromIncomingHandshake);
- Send(ev->Sender, new TEvents::TEvPoisonPill);
- return;
- }
- }
-
- // drop incoming handshake as this is definitely more recent
- DropIncomingHandshake();
-
- // prepare for new session
- PrepareNewSessionHandshake();
-
- auto event = MakeHolder<TEvHandshakeReplyOK>();
- auto* pb = event->Record.MutableSuccess();
- const TActorId virtualId = GenerateSessionVirtualId();
- pb->SetProtocol(INTERCONNECT_PROTOCOL_VERSION);
- pb->SetSenderActorId(virtualId.ToString());
- pb->SetProgramPID(GetPID());
- pb->SetProgramStartTime(Common->StartTime);
- pb->SetSerial(virtualId.LocalId());
-
- IssueIncomingHandshakeReply(ev->Sender, 0, std::move(event));
- }
-
- void TInterconnectProxyTCP::HandleHandshakeStatus(TEvHandshakeDone::TPtr& ev) {
- ICPROXY_PROFILED;
-
- TEvHandshakeDone *msg = ev->Get();
-
- // Terminate handshake actor working in opposite direction, if set up.
- if (ev->Sender == IncomingHandshakeActor) {
- LOG_INFO_IC("ICP19", "incoming handshake succeeded");
- DropIncomingHandshake(false);
- DropOutgoingHandshake();
- } else if (ev->Sender == OutgoingHandshakeActor) {
- LOG_INFO_IC("ICP20", "outgoing handshake succeeded");
- DropIncomingHandshake();
- DropOutgoingHandshake(false);
- } else {
- /* It seems to be an old handshake. */
- return;
- }
-
- // drop any pending XDC subscriptions
- ConnectionSubscriptions.clear();
-
- Y_ABORT_UNLESS(!IncomingHandshakeActor && !OutgoingHandshakeActor);
- SwitchToState(__LINE__, "StateWork", &TThis::StateWork);
-
- if (Session) {
- // this is continuation request, check that virtual ids match
- Y_ABORT_UNLESS(SessionVirtualId == msg->Self && RemoteSessionVirtualId == msg->Peer);
- } else {
- // this is initial request, check that we have virtual ids not filled in
- Y_ABORT_UNLESS(!SessionVirtualId && !RemoteSessionVirtualId);
- }
-
- auto error = [&](const char* description) {
- TransitToErrorState(description);
- };
-
- // If session is not created, then create new one.
- if (!Session) {
- RemoteProgramInfo = std::move(msg->ProgramInfo);
- if (!RemoteProgramInfo) {
- // we have received resume handshake, but session was closed concurrently while handshaking
- return error("Session continuation race");
- }
-
- // Create new session actor.
- SessionID = RegisterWithSameMailbox(Session = new TInterconnectSessionTCP(this, msg->Params));
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::Init);
- SessionVirtualId = msg->Self;
- RemoteSessionVirtualId = msg->Peer;
- LOG_INFO_IC("ICP22", "created new session: %s", SessionID.ToString().data());
- }
-
- // ensure that we have session local/peer virtual ids
- Y_ABORT_UNLESS(Session && SessionVirtualId && RemoteSessionVirtualId);
-
- // Set up new connection for the session.
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::SetNewConnection, ev);
-
- // Reset retry timer
- HoldByErrorWakeupDuration = TDuration::Zero();
-
- /* Forward all held events */
- ProcessPendingSessionEvents();
- }
-
- void TInterconnectProxyTCP::HandleHandshakeStatus(TEvHandshakeFail::TPtr& ev) {
- ICPROXY_PROFILED;
-
- // update error state log; this fail is inconclusive unless this is the last pending handshake
- const bool inconclusive = (ev->Sender != IncomingHandshakeActor && ev->Sender != OutgoingHandshakeActor) ||
- (IncomingHandshakeActor && OutgoingHandshakeActor);
- LogHandshakeFail(ev, inconclusive);
-
- if (ev->Sender == IncomingHandshakeActor) {
- LOG_NOTICE_IC("ICP24", "incoming handshake failed, temporary: %" PRIu32 " explanation: %s outgoing: %s",
- ui32(ev->Get()->Temporary), ev->Get()->Explanation.data(), OutgoingHandshakeActor.ToString().data());
- DropIncomingHandshake(false);
- } else if (ev->Sender == OutgoingHandshakeActor) {
- LOG_NOTICE_IC("ICP25", "outgoing handshake failed, temporary: %" PRIu32 " explanation: %s incoming: %s held: %s",
- ui32(ev->Get()->Temporary), ev->Get()->Explanation.data(), IncomingHandshakeActor.ToString().data(),
- HeldHandshakeReply ? "yes" : "no");
- DropOutgoingHandshake(false);
-
- if (IEventBase* reply = HeldHandshakeReply.Release()) {
- Y_ABORT_UNLESS(IncomingHandshakeActor);
- LOG_DEBUG_IC("ICP26", "sent held handshake reply to %s", IncomingHandshakeActor.ToString().data());
- Send(IncomingHandshakeActor, reply);
- }
-
- // if we have no current session, then we have to drop all pending events as the outgoing handshake has failed
- ProcessPendingSessionEvents();
- } else {
- /* It seems to be an old fail, just ignore it */
- LOG_NOTICE_IC("ICP27", "obsolete handshake fail ignored");
- return;
- }
-
- if (Metrics) {
- Metrics->IncHandshakeFails();
- }
-
- if (IncomingHandshakeActor || OutgoingHandshakeActor) {
- // one of handshakes is still going on
- LOG_DEBUG_IC("ICP28", "other handshake is still going on");
- return;
- }
-
- switch (ev->Get()->Temporary) {
- case TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT:
- if (!Session) {
- if (PendingSessionEvents) {
- // try to start outgoing handshake as we have some events enqueued
- StartInitialHandshake();
- } else {
- // return back to initial state as we have no session and no pending handshakes
- SwitchToInitialState();
- }
- } else if (Session->Socket) {
- // try to reestablish connection -- meaning restart handshake from the last known position
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::ReestablishConnectionWithHandshake,
- TDisconnectReason::HandshakeFailTransient());
- } else {
- // we have no active connection in that session, so just restart handshake from last known position
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::StartHandshake);
- }
- break;
-
- case TEvHandshakeFail::HANDSHAKE_FAIL_SESSION_MISMATCH:
- StartInitialHandshake();
- break;
-
- case TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT:
- TString timeExplanation = " LastSessionDieTime# " + LastSessionDieTime.ToString();
- if (Session) {
- InvokeOtherActor(*Session, &TInterconnectSessionTCP::Terminate,
- TDisconnectReason::HandshakeFailPermanent());
- }
- TransitToErrorState(ev->Get()->Explanation + timeExplanation, false);
- break;
- }
- }
-
- void TInterconnectProxyTCP::LogHandshakeFail(TEvHandshakeFail::TPtr& ev, bool inconclusive) {
- ICPROXY_PROFILED;
-
- TString kind = "unknown";
- switch (ev->Get()->Temporary) {
- case TEvHandshakeFail::HANDSHAKE_FAIL_TRANSIENT:
- kind = Session ? "transient w/session" : "transient w/o session";
- break;
-
- case TEvHandshakeFail::HANDSHAKE_FAIL_SESSION_MISMATCH:
- kind = "session_mismatch";
- break;
-
- case TEvHandshakeFail::HANDSHAKE_FAIL_PERMANENT:
- kind = "permanent";
- break;
- }
- if (inconclusive) {
- kind += " inconclusive";
- }
- UpdateErrorStateLog(TActivationContext::Now(), kind, ev->Get()->Explanation);
- }
-
- void TInterconnectProxyTCP::ProcessPendingSessionEvents() {
- ICPROXY_PROFILED;
-
- while (PendingSessionEvents) {
- TPendingSessionEvent ev = std::move(PendingSessionEvents.front());
- PendingSessionEventsSize -= ev.Size;
- TAutoPtr<IEventHandle> event(ev.Event.Release());
- PendingSessionEvents.pop_front();
-
- if (Session) {
- ForwardSessionEventToSession(event);
- } else {
- DropSessionEvent(event);
- }
- }
- }
-
- void TInterconnectProxyTCP::DropSessionEvent(STATEFN_SIG) {
- ICPROXY_PROFILED;
-
- ValidateEvent(ev, "DropSessionEvent");
- switch (ev->GetTypeRewrite()) {
- case TEvInterconnect::EvForward:
- if (ev->Flags & IEventHandle::FlagSubscribeOnSession) {
- Send(ev->Sender, new TEvInterconnect::TEvNodeDisconnected(PeerNodeId), 0, ev->Cookie);
- }
- TActivationContext::Send(IEventHandle::ForwardOnNondelivery(ev, TEvents::TEvUndelivered::Disconnected));
- break;
-
- case TEvInterconnect::TEvConnectNode::EventType:
- case TEvents::TEvSubscribe::EventType:
- Send(ev->Sender, new TEvInterconnect::TEvNodeDisconnected(PeerNodeId), 0, ev->Cookie);
- break;
-
- case TEvents::TEvUnsubscribe::EventType:
- /* Do nothing */
- break;
-
- default:
- Y_ABORT("Unexpected type of event in held event queue");
- }
- }
-
- void TInterconnectProxyTCP::UnregisterSession(TInterconnectSessionTCP* session) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(Session && Session == session && SessionID);
-
- LOG_INFO_IC("ICP30", "unregister session Session# %s VirtualId# %s", SessionID.ToString().data(),
- SessionVirtualId.ToString().data());
-
- Session = nullptr;
- SessionID = TActorId();
-
- // drop all pending events as we are closed
- ProcessPendingSessionEvents();
-
- // reset virtual ids as this session is terminated
- SessionVirtualId = TActorId();
- RemoteSessionVirtualId = TActorId();
-
- if (Metrics) {
- Metrics->IncSessionDeaths();
- }
- LastSessionDieTime = TActivationContext::Now();
-
- if (IncomingHandshakeActor || OutgoingHandshakeActor) {
- PrepareNewSessionHandshake();
- } else {
- SwitchToInitialState();
- }
- }
-
- void TInterconnectProxyTCP::EnqueueSessionEvent(STATEFN_SIG) {
- ICPROXY_PROFILED;
-
- ValidateEvent(ev, "EnqueueSessionEvent");
- const ui32 size = ev->GetSize();
- PendingSessionEventsSize += size;
- PendingSessionEvents.emplace_back(TActivationContext::Monotonic() + Common->Settings.MessagePendingTimeout, size, ev);
- ScheduleCleanupEventQueue();
- CleanupEventQueue();
- }
-
- void TInterconnectProxyTCP::EnqueueIncomingHandshakeEvent(STATEFN_SIG) {
- ICPROXY_PROFILED;
-
- // enqueue handshake request
- Y_UNUSED();
- PendingIncomingHandshakeEvents.emplace_back(ev);
- }
-
- void TInterconnectProxyTCP::EnqueueIncomingHandshakeEvent(TEvHandshakeDone::TPtr& /*ev*/) {
- ICPROXY_PROFILED;
-
- // TEvHandshakeDone can't get into the queue, because we have to process handshake request first; this may be the
- // race with the previous handshakes, so simply ignore it
- }
-
- void TInterconnectProxyTCP::EnqueueIncomingHandshakeEvent(TEvHandshakeFail::TPtr& ev) {
- ICPROXY_PROFILED;
-
- for (auto it = PendingIncomingHandshakeEvents.begin(); it != PendingIncomingHandshakeEvents.end(); ++it) {
- THolder<IEventHandle>& pendingEvent = *it;
- if (pendingEvent->Sender == ev->Sender) {
- // we have found cancellation request for the pending handshake request; so simply remove it from the
- // deque, as we are not interested in failure reason; must likely it happens because of handshake timeout
- if (pendingEvent->GetTypeRewrite() == TEvHandshakeFail::EventType) {
- TEvHandshakeFail::TPtr tmp(static_cast<TEventHandle<TEvHandshakeFail>*>(pendingEvent.Release()));
- LogHandshakeFail(tmp, true);
- }
- PendingIncomingHandshakeEvents.erase(it);
- break;
- }
- }
- }
-
- void TInterconnectProxyTCP::ForwardSessionEventToSession(STATEFN_SIG) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(Session && SessionID);
- ValidateEvent(ev, "ForwardSessionEventToSession");
- InvokeOtherActor(*Session, &TInterconnectSessionTCP::Receive, ev);
- }
-
- void TInterconnectProxyTCP::GenerateHttpInfo(NMon::TEvHttpInfo::TPtr& ev) {
- ICPROXY_PROFILED;
-
- LOG_INFO_IC("ICP31", "proxy http called");
-
- TStringStream str;
-
- HTML(str) {
- DIV_CLASS("panel panel-info") {
- DIV_CLASS("panel-heading") {
- str << "Proxy";
- }
- DIV_CLASS("panel-body") {
- TABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Sensor";
- }
- TABLEH() {
- str << "Value";
- }
- }
- }
-#define MON_VAR(NAME) \
- TABLER() { \
- TABLED() { \
- str << #NAME; \
- } \
- TABLED() { \
- str << NAME; \
- } \
- }
-
- TABLEBODY() {
- MON_VAR(TActivationContext::Now())
- MON_VAR(SessionID)
- MON_VAR(LastSessionDieTime)
- MON_VAR(IncomingHandshakeActor)
- MON_VAR(IncomingHandshakeActorFilledIn)
- MON_VAR(IncomingHandshakeActorReset)
- MON_VAR(OutgoingHandshakeActor)
- MON_VAR(OutgoingHandshakeActorCreated)
- MON_VAR(OutgoingHandshakeActorReset)
- MON_VAR(State)
- MON_VAR(StateSwitchTime)
- }
- }
- }
- }
-
- DIV_CLASS("panel panel-info") {
- DIV_CLASS("panel-heading") {
- str << "Error Log";
- }
- DIV_CLASS("panel-body") {
- TABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Timestamp";
- }
- TABLEH() {
- str << "Elapsed";
- }
- TABLEH() {
- str << "Kind";
- }
- TABLEH() {
- str << "Explanation";
- }
- }
- }
- TABLEBODY() {
- const TInstant now = TActivationContext::Now();
- const TInstant barrier = now - TDuration::Minutes(1);
- for (auto it = ErrorStateLog.rbegin(); it != ErrorStateLog.rend(); ++it) {
- auto wrapper = [&](const auto& lambda) {
- if (std::get<0>(*it) > barrier) {
- str << "<strong>";
- lambda();
- str << "</strong>";
- } else {
- lambda();
- }
- };
- TABLER() {
- TABLED() {
- wrapper([&] {
- str << std::get<0>(*it);
- });
- }
- TABLED() {
- wrapper([&] {
- str << now - std::get<0>(*it);
- });
- }
- TABLED() {
- wrapper([&] {
- str << std::get<1>(*it);
- });
- }
- TABLED() {
- wrapper([&] {
- str << std::get<2>(*it);
- });
-
- ui32 rep = std::get<3>(*it);
- if (rep != 1) {
- str << " <strong>x" << rep << "</strong>";
- }
- }
- }
- }
- }
- }
- }
- }
- }
-
- TAutoPtr<IEventHandle> h(new IEventHandle(ev->Sender, ev->Recipient, new NMon::TEvHttpInfoRes(str.Str())));
- if (Session) {
- switch (auto& ev = h; ev->GetTypeRewrite()) {
- hFunc(NMon::TEvHttpInfoRes, Session->GenerateHttpInfo);
- default:
- Y_ABORT();
- }
- } else {
- TActivationContext::Send(h.Release());
- }
- }
-
- void TInterconnectProxyTCP::TransitToErrorState(TString explanation, bool updateErrorLog) {
- ICPROXY_PROFILED;
-
- LOG_NOTICE_IC("ICP32", "transit to hold-by-error state Explanation# %s", explanation.data());
- LOG_INFO(*TlsActivationContext, NActorsServices::INTERCONNECT_STATUS, "[%u] error state: %s", PeerNodeId, explanation.data());
-
- if (updateErrorLog) {
- UpdateErrorStateLog(TActivationContext::Now(), "permanent conclusive", explanation);
- }
-
- Y_ABORT_UNLESS(Session == nullptr);
- Y_ABORT_UNLESS(!SessionID);
-
- // recalculate wakeup timeout -- if this is the first failure, then we sleep for default timeout; otherwise we
- // sleep N times longer than the previous try, but not longer than desired number of seconds
- HoldByErrorWakeupDuration = HoldByErrorWakeupDuration != TDuration::Zero()
- ? Min(HoldByErrorWakeupDuration * SleepRetryMultiplier, MaxErrorSleep)
- : FirstErrorSleep;
-
- // transit to required state and arm wakeup timer
- if (Terminated) {
- // switch to this state permanently
- SwitchToState(__LINE__, "HoldByError", &TThis::HoldByError);
- HoldByErrorWakeupCookie = nullptr;
- } else {
- SwitchToState(__LINE__, "HoldByError", &TThis::HoldByError, HoldByErrorWakeupDuration,
- HoldByErrorWakeupCookie = new TEvents::TEvWakeup);
- }
-
- /* Process all pending events. */
- ProcessPendingSessionEvents();
-
- /* Terminate handshakes */
- DropHandshakes();
-
- /* Terminate pending incoming handshake requests. */
- for (auto& ev : PendingIncomingHandshakeEvents) {
- Send(ev->Sender, new TEvents::TEvPoisonPill);
- if (ev->GetTypeRewrite() == TEvHandshakeFail::EventType) {
- TEvHandshakeFail::TPtr tmp(static_cast<TEventHandle<TEvHandshakeFail>*>(ev.Release()));
- LogHandshakeFail(tmp, true);
- }
- }
- PendingIncomingHandshakeEvents.clear();
- }
-
- void TInterconnectProxyTCP::WakeupFromErrorState(TEvents::TEvWakeup::TPtr& ev) {
- ICPROXY_PROFILED;
-
- LOG_INFO_IC("ICP33", "wake up from error state");
-
- if (ev->Get() == HoldByErrorWakeupCookie) {
- SwitchToInitialState();
- }
- }
-
- void TInterconnectProxyTCP::Disconnect() {
- ICPROXY_PROFILED;
-
- // terminate handshakes (if any)
- DropHandshakes();
-
- if (Session) {
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::Terminate, TDisconnectReason::UserRequest());
- } else {
- TransitToErrorState("forced disconnect");
- }
- }
-
- void TInterconnectProxyTCP::ScheduleCleanupEventQueue() {
- ICPROXY_PROFILED;
-
- if (!CleanupEventQueueScheduled && PendingSessionEvents) {
- // apply batching at 50 ms granularity
- Schedule(Max(TDuration::MilliSeconds(50), PendingSessionEvents.front().Deadline - TActivationContext::Monotonic()), new TEvCleanupEventQueue);
- CleanupEventQueueScheduled = true;
- }
- }
-
- void TInterconnectProxyTCP::HandleCleanupEventQueue() {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(CleanupEventQueueScheduled);
- CleanupEventQueueScheduled = false;
- CleanupEventQueue();
- ScheduleCleanupEventQueue();
- }
-
- void TInterconnectProxyTCP::CleanupEventQueue() {
- ICPROXY_PROFILED;
-
- const TMonotonic now = TActivationContext::Monotonic();
- while (PendingSessionEvents) {
- TPendingSessionEvent& ev = PendingSessionEvents.front();
- if (now >= ev.Deadline || PendingSessionEventsSize > Common->Settings.MessagePendingSize) {
- TAutoPtr<IEventHandle> event(ev.Event.Release());
- PendingSessionEventsSize -= ev.Size;
- DropSessionEvent(event);
- PendingSessionEvents.pop_front();
- } else {
- break;
- }
- }
- }
-
- void TInterconnectProxyTCP::HandleClosePeerSocket() {
- ICPROXY_PROFILED;
-
- if (Session && Session->Socket) {
- LOG_INFO_IC("ICP34", "closed connection by debug command");
- Session->Socket->Shutdown(SHUT_RDWR);
- }
- }
-
- void TInterconnectProxyTCP::HandleCloseInputSession() {
- ICPROXY_PROFILED;
-
- if (Session) {
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::CloseInputSession);
- }
- }
-
- void TInterconnectProxyTCP::HandlePoisonSession() {
- ICPROXY_PROFILED;
-
- if (Session) {
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::Terminate, TDisconnectReason::Debug());
- }
- }
-
- void TInterconnectProxyTCP::HandleSessionBufferSizeRequest(TEvSessionBufferSizeRequest::TPtr& ev) {
- ICPROXY_PROFILED;
-
- ui64 bufSize = 0;
- if (Session) {
- bufSize = Session->TotalOutputQueueSize;
- }
-
- Send(ev->Sender, new TEvSessionBufferSizeResponse(SessionID, bufSize));
- }
-
- void TInterconnectProxyTCP::Handle(TEvQueryStats::TPtr& ev) {
- ICPROXY_PROFILED;
-
- TProxyStats stats;
- stats.Path = Sprintf("peer%04" PRIu32, PeerNodeId);
- stats.State = State;
- stats.PeerScopeId = Session ? Session->Params.PeerScopeId : TScopeId();
- stats.LastSessionDieTime = LastSessionDieTime;
- stats.TotalOutputQueueSize = Session ? Session->TotalOutputQueueSize : 0;
- stats.Connected = Session ? (bool)Session->Socket : false;
- stats.ExternalDataChannel = Session && Session->XdcSocket;
- stats.Host = TechnicalPeerHostName;
- stats.Port = 0;
- ui32 rep = 0;
- std::tie(stats.LastErrorTimestamp, stats.LastErrorKind, stats.LastErrorExplanation, rep) = ErrorStateLog
- ? ErrorStateLog.back()
- : std::make_tuple(TInstant(), TString(), TString(), 1U);
- if (rep != 1) {
- stats.LastErrorExplanation += Sprintf(" x%" PRIu32, rep);
- }
- stats.Ping = Session ? Session->GetPingRTT() : TDuration::Zero();
- stats.ClockSkew = Session ? Session->GetClockSkew() : 0;
- if (Session) {
- if (auto *x = dynamic_cast<NInterconnect::TSecureSocket*>(Session->Socket.Get())) {
- stats.Encryption = Sprintf("%s/%u", x->GetCipherName().data(), x->GetCipherBits());
- } else {
- stats.Encryption = "none";
- }
- }
-
- auto response = MakeHolder<TEvStats>();
- response->PeerNodeId = PeerNodeId;
- response->ProxyStats = std::move(stats);
- Send(ev->Sender, response.Release());
- }
-
- void TInterconnectProxyTCP::HandleTerminate() {
- ICPROXY_PROFILED;
-
- if (Session) {
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::Terminate, TDisconnectReason());
- }
- Terminated = true;
- TransitToErrorState("terminated");
- }
-
- void TInterconnectProxyTCP::PassAway() {
- if (Session) {
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::Terminate, TDisconnectReason());
- }
- if (DynamicPtr) {
- Y_ABORT_UNLESS(*DynamicPtr == this);
- *DynamicPtr = nullptr;
- }
- // TODO: unregister actor mon page
- TActor::PassAway();
- }
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_proxy.h b/library/cpp/actors/interconnect/interconnect_tcp_proxy.h
deleted file mode 100644
index 81f043a2e9..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_proxy.h
+++ /dev/null
@@ -1,570 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-
-#include "interconnect_common.h"
-#include "interconnect_counters.h"
-#include "interconnect_tcp_session.h"
-#include "profiler.h"
-
-#define ICPROXY_PROFILED TFunction func(*this, __func__, __LINE__)
-
-namespace NActors {
-
-
- /* WARNING: all proxy actors should be alive during actorsystem activity */
- class TInterconnectProxyTCP
- : public TActor<TInterconnectProxyTCP>
- , public TInterconnectLoggingBase
- , public TProfiled
- {
- enum {
- EvCleanupEventQueue = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvQueryStats,
- EvStats,
- EvPassAwayIfNeeded,
- };
-
- struct TEvCleanupEventQueue : TEventLocal<TEvCleanupEventQueue, EvCleanupEventQueue> {};
-
- public:
- struct TEvQueryStats : TEventLocal<TEvQueryStats, EvQueryStats> {};
-
- struct TProxyStats {
- TString Path;
- TString State;
- TScopeId PeerScopeId;
- TInstant LastSessionDieTime;
- ui64 TotalOutputQueueSize;
- bool Connected;
- bool ExternalDataChannel;
- TString Host;
- ui16 Port;
- TInstant LastErrorTimestamp;
- TString LastErrorKind;
- TString LastErrorExplanation;
- TDuration Ping;
- i64 ClockSkew;
- TString Encryption;
- };
-
- struct TEvStats : TEventLocal<TEvStats, EvStats> {
- ui32 PeerNodeId;
- TProxyStats ProxyStats;
- };
-
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::INTERCONNECT_PROXY_TCP;
- }
-
- TInterconnectProxyTCP(const ui32 node, TInterconnectProxyCommon::TPtr common, IActor **dynamicPtr = nullptr);
-
- STFUNC(StateInit) {
- Bootstrap();
- if (ev->Type != TEvents::TSystem::Bootstrap) { // for dynamic nodes we do not receive Bootstrap event
- Receive(ev);
- }
- }
-
- void Bootstrap();
- void Registered(TActorSystem* sys, const TActorId& owner) override;
-
- private:
- friend class TInterconnectSessionTCP;
- friend class TInterconnectSessionTCPv0;
- friend class THandshake;
- friend class TInputSessionTCP;
-
- void UnregisterSession(TInterconnectSessionTCP* session);
-
-#define SESSION_EVENTS(HANDLER) \
- fFunc(TEvInterconnect::EvForward, HANDLER) \
- fFunc(TEvInterconnect::TEvConnectNode::EventType, HANDLER) \
- fFunc(TEvents::TEvSubscribe::EventType, HANDLER) \
- fFunc(TEvents::TEvUnsubscribe::EventType, HANDLER)
-
-#define INCOMING_HANDSHAKE_EVENTS(HANDLER) \
- fFunc(TEvHandshakeAsk::EventType, HANDLER) \
- fFunc(TEvHandshakeRequest::EventType, HANDLER)
-
-#define HANDSHAKE_STATUS_EVENTS(HANDLER) \
- hFunc(TEvHandshakeDone, HANDLER) \
- hFunc(TEvHandshakeFail, HANDLER)
-
-#define PROXY_STFUNC(STATE, SESSION_HANDLER, INCOMING_HANDSHAKE_HANDLER, \
- HANDSHAKE_STATUS_HANDLER, DISCONNECT_HANDLER, \
- WAKEUP_HANDLER, NODE_INFO_HANDLER) \
- STATEFN(STATE) { \
- const ui32 type = ev->GetTypeRewrite(); \
- const bool profiled = type != TEvInterconnect::EvForward \
- && type != TEvInterconnect::EvConnectNode \
- && type != TEvents::TSystem::Subscribe \
- && type != TEvents::TSystem::Unsubscribe; \
- if (profiled) { \
- TProfiled::Start(); \
- } \
- { \
- TProfiled::TFunction func(*this, __func__, __LINE__); \
- switch (type) { \
- SESSION_EVENTS(SESSION_HANDLER) \
- INCOMING_HANDSHAKE_EVENTS(INCOMING_HANDSHAKE_HANDLER) \
- HANDSHAKE_STATUS_EVENTS(HANDSHAKE_STATUS_HANDLER) \
- cFunc(TEvInterconnect::EvDisconnect, DISCONNECT_HANDLER) \
- hFunc(TEvents::TEvWakeup, WAKEUP_HANDLER) \
- hFunc(TEvGetSecureSocket, Handle) \
- hFunc(NMon::TEvHttpInfo, GenerateHttpInfo) \
- cFunc(EvCleanupEventQueue, HandleCleanupEventQueue) \
- hFunc(TEvInterconnect::TEvNodeInfo, NODE_INFO_HANDLER) \
- cFunc(TEvInterconnect::EvClosePeerSocket, HandleClosePeerSocket) \
- cFunc(TEvInterconnect::EvCloseInputSession, HandleCloseInputSession) \
- cFunc(TEvInterconnect::EvPoisonSession, HandlePoisonSession) \
- hFunc(TEvSessionBufferSizeRequest, HandleSessionBufferSizeRequest) \
- hFunc(TEvQueryStats, Handle) \
- cFunc(TEvInterconnect::EvTerminate, HandleTerminate) \
- cFunc(EvPassAwayIfNeeded, HandlePassAwayIfNeeded) \
- hFunc(TEvSubscribeForConnection, Handle); \
- hFunc(TEvReportConnection, Handle); \
- default: \
- Y_ABORT("unexpected event Type# 0x%08" PRIx32, type); \
- } \
- } \
- if (profiled) { \
- if (TProfiled::Duration() >= TDuration::MilliSeconds(16)) { \
- const TString report = TProfiled::Format(); \
- LOG_ERROR_IC("ICP35", "event processing took too much time %s", report.data()); \
- } \
- TProfiled::Finish(); \
- } \
- }
-
- template <typename T>
- void Ignore(T& /*ev*/) {
- ICPROXY_PROFILED;
- }
-
- void Ignore() {
- ICPROXY_PROFILED;
- }
-
- void Ignore(TEvHandshakeDone::TPtr& ev) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(ev->Sender != IncomingHandshakeActor);
- Y_ABORT_UNLESS(ev->Sender != OutgoingHandshakeActor);
- }
-
- void Ignore(TEvHandshakeFail::TPtr& ev) {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(ev->Sender != IncomingHandshakeActor);
- Y_ABORT_UNLESS(ev->Sender != OutgoingHandshakeActor);
- LogHandshakeFail(ev, true);
- }
-
- const char* State = nullptr;
- TInstant StateSwitchTime;
-
- template <typename... TArgs>
- void SwitchToState(int line, const char* name, TArgs&&... args) {
- ICPROXY_PROFILED;
-
- LOG_DEBUG_IC("ICP77", "@%d %s -> %s", line, State, name);
- State = name;
- StateSwitchTime = TActivationContext::Now();
- Become(std::forward<TArgs>(args)...);
- Y_ABORT_UNLESS(!Terminated || CurrentStateFunc() == &TThis::HoldByError); // ensure we never escape this state
- if (CurrentStateFunc() != &TThis::PendingActivation) {
- PassAwayTimestamp = TMonotonic::Max();
- } else if (DynamicPtr) {
- PassAwayTimestamp = TActivationContext::Monotonic() + TDuration::Seconds(15);
- if (!PassAwayScheduled) {
- TActivationContext::Schedule(PassAwayTimestamp, new IEventHandle(EvPassAwayIfNeeded, 0, SelfId(),
- {}, nullptr, 0));
- PassAwayScheduled = true;
- }
- }
- }
-
- TMonotonic PassAwayTimestamp;
- bool PassAwayScheduled = false;
-
- void SwitchToInitialState() {
- ICPROXY_PROFILED;
-
- Y_ABORT_UNLESS(!PendingSessionEvents && !PendingIncomingHandshakeEvents, "%s PendingSessionEvents# %zu"
- " PendingIncomingHandshakeEvents# %zu State# %s", LogPrefix.data(), PendingSessionEvents.size(),
- PendingIncomingHandshakeEvents.size(), State);
- SwitchToState(__LINE__, "PendingActivation", &TThis::PendingActivation);
- }
-
- void HandlePassAwayIfNeeded() {
- Y_ABORT_UNLESS(PassAwayScheduled);
- const TMonotonic now = TActivationContext::Monotonic();
- if (now >= PassAwayTimestamp) {
- PassAway();
- } else if (PassAwayTimestamp != TMonotonic::Max()) {
- TActivationContext::Schedule(PassAwayTimestamp, new IEventHandle(EvPassAwayIfNeeded, 0, SelfId(),
- {}, nullptr, 0));
- } else {
- PassAwayScheduled = false;
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // PendingActivation
- //
- // In this state we are just waiting for some activities, which may include:
- // * an external Session event
- // * incoming handshake request
- //
- // Upon receiving such event, we put it to corresponding queue and initiate start up by calling IssueGetNodeRequest,
- // which, as the name says, issued TEvGetNode to the nameservice and arms timer to handle timeout (which should not
- // occur, but we want to be sure we don't hang on this), and then switches to PendingNodeInfo state.
-
- PROXY_STFUNC(PendingActivation,
- RequestNodeInfo, // Session events
- RequestNodeInfoForIncomingHandshake, // Incoming handshake requests
- Ignore, // Handshake status
- Ignore, // Disconnect request
- Ignore, // Wakeup
- Ignore // Node info
- )
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // PendingNodeInfo
- //
- // This state is entered when we asked nameserver to provide description for peer node we are working with. All
- // external Session events and incoming handshake requests are enqueued into their respective queues, TEvNodeInfo
- // is main event that triggers processing. On success, we try to initiate outgoing handshake if needed, or process
- // incoming handshakes. On error, we enter HoldByError state.
- //
- // NOTE: handshake status events are also enqueued as the handshake actor may have generated failure event due to
- // timeout or some other reason without waiting for acknowledge, and it must be processed correctly to prevent
- // session hang
-
- PROXY_STFUNC(PendingNodeInfo,
- EnqueueSessionEvent, // Session events
- EnqueueIncomingHandshakeEvent, // Incoming handshake requests
- EnqueueIncomingHandshakeEvent, // Handshake status
- Disconnect, // Disconnect request
- ConfigureTimeout, // Wakeup
- Configure // Node info
- )
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // PendingConnection
- //
- // Here we have issued outgoing handshake or have accepted (or may be both) incoming handshake and we are waiting for
- // the status of the handshake. When one if handshakes finishes, we use this status to establish connection (or to
- // go to error state). When one handshake terminates with error while other is running, we will still wait for the
- // second one to finish.
-
- PROXY_STFUNC(PendingConnection,
- EnqueueSessionEvent, // Session events
- IncomingHandshake, // Incoming handshake requests
- HandleHandshakeStatus, // Handshake status
- Disconnect, // Disconnect request
- Ignore, // Wakeup
- Ignore // Node info
- )
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // StateWork
- //
- // We have accepted session and process any incoming messages with the session. Incoming handshakes are accepted
- // concurrently and applied when finished.
-
- PROXY_STFUNC(StateWork,
- ForwardSessionEventToSession, // Session events
- IncomingHandshake, // Incoming handshake requests
- HandleHandshakeStatus, // Handshake status
- Disconnect, // Disconnect request
- Ignore, // Wakeup
- Ignore // Node info
- )
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // HoldByError
- //
- // When something bad happens with the connection, we sleep in this state. After wake up we go back to
- // PendingActivation.
-
- PROXY_STFUNC(HoldByError,
- DropSessionEvent, // Session events
- RequestNodeInfoForIncomingHandshake, // Incoming handshake requests
- Ignore, // Handshake status
- Ignore, // Disconnect request
- WakeupFromErrorState, // Wakeup
- Ignore // Node info
- )
-
-#undef SESSION_EVENTS
-#undef INCOMING_HANDSHAKE_EVENTS
-#undef HANDSHAKE_STATUS_EVENTS
-#undef PROXY_STFUNC
-
- void ForwardSessionEventToSession(STATEFN_SIG);
- void EnqueueSessionEvent(STATEFN_SIG);
-
- // Incoming handshake handlers, including special wrapper when the IncomingHandshake is used as fFunc
- void IncomingHandshake(STATEFN_SIG) {
- switch (ev->GetTypeRewrite()) {
- hFunc(TEvHandshakeAsk, IncomingHandshake);
- hFunc(TEvHandshakeRequest, IncomingHandshake);
- default:
- Y_ABORT();
- }
- }
- void IncomingHandshake(TEvHandshakeAsk::TPtr& ev);
- void IncomingHandshake(TEvHandshakeRequest::TPtr& ev);
-
- void RequestNodeInfo(STATEFN_SIG);
- void RequestNodeInfoForIncomingHandshake(STATEFN_SIG);
-
- void StartInitialHandshake();
- void StartResumeHandshake(ui64 inputCounter);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Incoming handshake event queue processing
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- void EnqueueIncomingHandshakeEvent(STATEFN_SIG);
- void EnqueueIncomingHandshakeEvent(TEvHandshakeDone::TPtr& ev);
- void EnqueueIncomingHandshakeEvent(TEvHandshakeFail::TPtr& ev);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // PendingNodeInfo
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- IEventBase* ConfigureTimeoutCookie; // pointer to the scheduled event used to match sent and received events
-
- void StartConfiguring();
- void Configure(TEvInterconnect::TEvNodeInfo::TPtr& ev);
- void ConfigureTimeout(TEvents::TEvWakeup::TPtr& ev);
- void ProcessConfigured();
-
- void HandleHandshakeStatus(TEvHandshakeDone::TPtr& ev);
- void HandleHandshakeStatus(TEvHandshakeFail::TPtr& ev);
-
- void TransitToErrorState(TString Explanation, bool updateErrorLog = true);
- void WakeupFromErrorState(TEvents::TEvWakeup::TPtr& ev);
- void Disconnect();
-
- const ui32 PeerNodeId;
- IActor **DynamicPtr;
-
- void ValidateEvent(TAutoPtr<IEventHandle>& ev, const char* func) {
- if (SelfId().NodeId() == PeerNodeId) {
- TString msg = Sprintf("Event Type# 0x%08" PRIx32 " TypeRewrite# 0x%08" PRIx32
- " from Sender# %s sent to the proxy for the node itself via Interconnect;"
- " THIS IS NOT A BUG IN INTERCONNECT, check the event sender instead",
- ev->Type, ev->GetTypeRewrite(), ev->Sender.ToString().data());
- LOG_ERROR_IC("ICP03", "%s", msg.data());
- Y_DEBUG_ABORT_UNLESS(false, "%s", msg.data());
- }
-
- Y_ABORT_UNLESS(ev->GetTypeRewrite() != TEvInterconnect::EvForward || ev->Recipient.NodeId() == PeerNodeId,
- "Recipient/Proxy NodeId mismatch Recipient# %s Type# 0x%08" PRIx32 " PeerNodeId# %" PRIu32 " Func# %s",
- ev->Recipient.ToString().data(), ev->Type, PeerNodeId, func);
- }
-
- // Common with helpers
- // All proxy actors share the same information in the object
- // read only
- TInterconnectProxyCommon::TPtr const Common;
-
- const TActorId& GetNameserviceId() const {
- return Common->NameserviceId;
- }
-
- TString TechnicalPeerHostName;
-
- std::shared_ptr<IInterconnectMetrics> Metrics;
-
- void HandleClosePeerSocket();
- void HandleCloseInputSession();
- void HandlePoisonSession();
-
- void HandleSessionBufferSizeRequest(TEvSessionBufferSizeRequest::TPtr& ev);
-
- bool CleanupEventQueueScheduled = false;
- void ScheduleCleanupEventQueue();
- void HandleCleanupEventQueue();
- void CleanupEventQueue();
-
- // hold all events before connection is established
- struct TPendingSessionEvent {
- TMonotonic Deadline;
- ui32 Size;
- THolder<IEventHandle> Event;
-
- TPendingSessionEvent(TMonotonic deadline, ui32 size, TAutoPtr<IEventHandle> event)
- : Deadline(deadline)
- , Size(size)
- , Event(event)
- {}
- };
- TDeque<TPendingSessionEvent> PendingSessionEvents;
- ui64 PendingSessionEventsSize = 0;
- void ProcessPendingSessionEvents();
- void DropSessionEvent(STATEFN_SIG);
-
- TInterconnectSessionTCP* Session = nullptr;
- TActorId SessionID;
-
- // virtual ids used during handshake to check if it is the connection
- // for the same session or to find out the latest shandshake
- // it's virtual because session actor apears after successfull handshake
- TActorId SessionVirtualId;
- TActorId RemoteSessionVirtualId;
-
- TActorId GenerateSessionVirtualId() {
- ICPROXY_PROFILED;
-
- const ui64 localId = TlsActivationContext->ExecutorThread.ActorSystem->AllocateIDSpace(1);
- return NActors::TActorId(SelfId().NodeId(), 0, localId, 0);
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- TActorId IncomingHandshakeActor;
- TInstant IncomingHandshakeActorFilledIn;
- TInstant IncomingHandshakeActorReset;
- TMaybe<ui64> LastSerialFromIncomingHandshake;
- THolder<IEventBase> HeldHandshakeReply;
-
- void DropIncomingHandshake(bool poison = true) {
- ICPROXY_PROFILED;
-
- if (const TActorId& actorId = std::exchange(IncomingHandshakeActor, TActorId())) {
- LOG_DEBUG_IC("ICP111", "dropped incoming handshake: %s poison: %s", actorId.ToString().data(),
- poison ? "true" : "false");
- if (poison) {
- Send(actorId, new TEvents::TEvPoisonPill);
- }
- LastSerialFromIncomingHandshake.Clear();
- HeldHandshakeReply.Reset();
- IncomingHandshakeActorReset = TActivationContext::Now();
- }
- }
-
- void DropOutgoingHandshake(bool poison = true) {
- ICPROXY_PROFILED;
-
- if (const TActorId& actorId = std::exchange(OutgoingHandshakeActor, TActorId())) {
- LOG_DEBUG_IC("ICP052", "dropped outgoing handshake: %s poison: %s", actorId.ToString().data(),
- poison ? "true" : "false");
- if (poison) {
- Send(actorId, new TEvents::TEvPoisonPill);
- }
- OutgoingHandshakeActorReset = TActivationContext::Now();
- }
- }
-
- void DropHandshakes() {
- ICPROXY_PROFILED;
-
- DropIncomingHandshake();
- DropOutgoingHandshake();
- }
-
- void PrepareNewSessionHandshake() {
- ICPROXY_PROFILED;
-
- // drop existing session if we have one
- if (Session) {
- LOG_INFO_IC("ICP04", "terminating current session as we are negotiating a new one");
- IActor::InvokeOtherActor(*Session, &TInterconnectSessionTCP::Terminate, TDisconnectReason::NewSession());
- }
-
- // ensure we have no current session
- Y_ABORT_UNLESS(!Session);
-
- // switch to pending connection state -- we wait for handshakes, we want more handshakes!
- SwitchToState(__LINE__, "PendingConnection", &TThis::PendingConnection);
- }
-
- void IssueIncomingHandshakeReply(const TActorId& handshakeId, ui64 peerLocalId,
- THolder<IEventBase> event);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- std::unordered_map<TString, TActorId> ConnectionSubscriptions;
-
- void Handle(TEvSubscribeForConnection::TPtr ev) {
- auto& msg = *ev->Get();
- if (msg.Subscribe) {
- if (const auto [it, inserted] = ConnectionSubscriptions.emplace(msg.HandshakeId, ev->Sender); !inserted) {
- Y_DEBUG_ABORT_UNLESS(false);
- ConnectionSubscriptions.erase(it); // collision happened somehow?
- }
- } else {
- ConnectionSubscriptions.erase(msg.HandshakeId);
- }
- }
-
- void Handle(TEvReportConnection::TPtr ev) {
- if (auto nh = ConnectionSubscriptions.extract(ev->Get()->HandshakeId)) {
- TActivationContext::Send(IEventHandle::Forward(ev, nh.mapped()));
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- TActorId OutgoingHandshakeActor;
- TInstant OutgoingHandshakeActorCreated;
- TInstant OutgoingHandshakeActorReset;
-
- TInstant LastSessionDieTime;
-
- void GenerateHttpInfo(NMon::TEvHttpInfo::TPtr& ev);
-
- void Handle(TEvQueryStats::TPtr& ev);
-
- TDuration HoldByErrorWakeupDuration = TDuration::Zero();
- TEvents::TEvWakeup* HoldByErrorWakeupCookie;
-
- THolder<TProgramInfo> RemoteProgramInfo;
- NInterconnect::TSecureSocketContext::TPtr SecureContext;
-
- void Handle(TEvGetSecureSocket::TPtr ev) {
- auto socket = MakeIntrusive<NInterconnect::TSecureSocket>(*ev->Get()->Socket, SecureContext);
- Send(ev->Sender, new TEvSecureSocket(std::move(socket)));
- }
-
- TDeque<THolder<IEventHandle>> PendingIncomingHandshakeEvents;
-
- TDeque<std::tuple<TInstant, TString, TString, ui32>> ErrorStateLog;
-
- void UpdateErrorStateLog(TInstant now, TString kind, TString explanation) {
- ICPROXY_PROFILED;
-
- if (ErrorStateLog) {
- auto& back = ErrorStateLog.back();
- TString lastKind, lastExpl;
- if (kind == std::get<1>(back) && explanation == std::get<2>(back)) {
- std::get<0>(back) = now;
- ++std::get<3>(back);
- return;
- }
- }
-
- ErrorStateLog.emplace_back(now, std::move(kind), std::move(explanation), 1);
- if (ErrorStateLog.size() > 20) {
- ErrorStateLog.pop_front();
- }
- }
-
- void LogHandshakeFail(TEvHandshakeFail::TPtr& ev, bool inconclusive);
-
- bool Terminated = false;
- void HandleTerminate();
-
- void PassAway() override;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_server.cpp b/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
deleted file mode 100644
index df0c172dc1..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_server.cpp
+++ /dev/null
@@ -1,119 +0,0 @@
-#include "interconnect_tcp_server.h"
-#include "interconnect_handshake.h"
-
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-
-#include "interconnect_common.h"
-
-namespace NActors {
- TInterconnectListenerTCP::TInterconnectListenerTCP(const TString& address, ui16 port, TInterconnectProxyCommon::TPtr common, const TMaybe<SOCKET>& socket)
- : TActor(&TThis::Initial)
- , TInterconnectLoggingBase(Sprintf("ICListener: %s", SelfId().ToString().data()))
- , Address(address)
- , Port(port)
- , Listener(
- socket
- ? new NInterconnect::TStreamSocket(*socket)
- : nullptr)
- , ExternalSocket(!!Listener)
- , ProxyCommonCtx(std::move(common))
- {
- if (ExternalSocket) {
- SetNonBlock(*Listener);
- }
- }
-
- TAutoPtr<IEventHandle> TInterconnectListenerTCP::AfterRegister(const TActorId& self, const TActorId& parentId) {
- return new IEventHandle(self, parentId, new TEvents::TEvBootstrap, 0);
- }
-
- void TInterconnectListenerTCP::Die(const TActorContext& ctx) {
- LOG_DEBUG_IC("ICL08", "Dying");
- TActor::Die(ctx);
- }
-
- int TInterconnectListenerTCP::Bind() {
- auto doTry = [&](NInterconnect::TAddress addr) {
- int error;
- Listener = NInterconnect::TStreamSocket::Make(addr.GetFamily(), &error);
- if (*Listener == -1) {
- return error;
- }
- SetNonBlock(*Listener);
- Listener->SetSendBufferSize(ProxyCommonCtx->Settings.GetSendBufferSize()); // TODO(alexvru): WTF?
- SetSockOpt(*Listener, SOL_SOCKET, SO_REUSEADDR, 1);
- if (addr.GetFamily() == AF_INET6) {
- SetSockOpt(*Listener, IPPROTO_IPV6, IPV6_V6ONLY, 0);
- }
- const ui32 backlog = ProxyCommonCtx->Settings.SocketBacklogSize;
- if (const auto e = -Listener->Bind(addr)) {
- return e;
- } else if (const auto e = -Listener->Listen(backlog ? backlog : SOMAXCONN)) {
- return e;
- } else {
- return 0;
- }
- };
-
- if (Address) {
- NInterconnect::TAddress addr(Address, Port);
- if (ProxyCommonCtx->Settings.BindOnAllAddresses) {
- addr = addr.GetFamily() == AF_INET ? NInterconnect::TAddress::AnyIPv4(Port) :
- addr.GetFamily() == AF_INET6 ? NInterconnect::TAddress::AnyIPv6(Port) : addr;
- }
- return doTry(addr);
- } else {
- int error = doTry(NInterconnect::TAddress::AnyIPv6(Port));
- if (error == EAFNOSUPPORT || error == EPROTONOSUPPORT) {
- error = doTry(NInterconnect::TAddress::AnyIPv4(Port));
- }
- return error;
- }
- }
-
- void TInterconnectListenerTCP::Bootstrap(const TActorContext& ctx) {
- if (!Listener) {
- if (const int err = Bind()) {
- LOG_ERROR_IC("ICL01", "Bind failed: %s (%s:%u)", strerror(err), Address.data(), Port);
- Listener.Reset();
- Become(&TThis::Initial, TDuration::Seconds(1), new TEvents::TEvBootstrap);
- return;
- }
- }
- if (const auto& callback = ProxyCommonCtx->InitWhiteboard) {
- callback(Port, TlsActivationContext->ExecutorThread.ActorSystem);
- }
- const bool success = ctx.Send(MakePollerActorId(), new TEvPollerRegister(Listener, SelfId(), {}));
- Y_ABORT_UNLESS(success);
- Become(&TThis::Listen);
- }
-
- void TInterconnectListenerTCP::Handle(TEvPollerRegisterResult::TPtr ev, const TActorContext& ctx) {
- PollerToken = std::move(ev->Get()->PollerToken);
- Process(ctx);
- }
-
- void TInterconnectListenerTCP::Process(const TActorContext& ctx) {
- for (;;) {
- NInterconnect::TAddress address;
- const int r = Listener->Accept(address);
- if (r >= 0) {
- LOG_DEBUG_IC("ICL04", "Accepted from: %s", address.ToString().data());
- auto socket = MakeIntrusive<NInterconnect::TStreamSocket>(static_cast<SOCKET>(r));
- ctx.Register(CreateIncomingHandshakeActor(ProxyCommonCtx, std::move(socket)));
- continue;
- } else if (-r != EAGAIN && -r != EWOULDBLOCK) {
- Y_ABORT_UNLESS(-r != ENFILE && -r != EMFILE && !ExternalSocket);
- LOG_ERROR_IC("ICL06", "Listen failed: %s (%s:%u)", strerror(-r), Address.data(), Port);
- Listener.Reset();
- PollerToken.Reset();
- Become(&TThis::Initial, TDuration::Seconds(1), new TEvents::TEvBootstrap);
- } else if (PollerToken && PollerToken->RequestReadNotificationAfterWouldBlock()) {
- continue;
- }
- break;
- }
- }
-
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_server.h b/library/cpp/actors/interconnect/interconnect_tcp_server.h
deleted file mode 100644
index 41f46ab6d4..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_server.h
+++ /dev/null
@@ -1,58 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/actors/core/events.h>
-
-#include "interconnect_common.h"
-#include "poller_actor.h"
-#include "events_local.h"
-
-namespace NActors {
- class TInterconnectListenerTCP: public TActor<TInterconnectListenerTCP>, public TInterconnectLoggingBase {
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::INTERCONNECT_COMMON;
- }
-
- TInterconnectListenerTCP(const TString& address, ui16 port, TInterconnectProxyCommon::TPtr common, const TMaybe<SOCKET>& socket = Nothing());
- int Bind();
-
- private:
- STFUNC(Initial) {
- switch (ev->GetTypeRewrite()) {
- CFunc(TEvents::TEvBootstrap::EventType, Bootstrap);
- CFunc(TEvents::TEvPoisonPill::EventType, Die);
- }
- }
-
- STFUNC(Listen) {
- switch (ev->GetTypeRewrite()) {
- CFunc(TEvents::TEvPoisonPill::EventType, Die);
- HFunc(TEvPollerRegisterResult, Handle);
- CFunc(TEvPollerReady::EventType, Process);
- }
- }
-
- TAutoPtr<IEventHandle> AfterRegister(const TActorId& self, const TActorId& parentId) override;
-
- void Die(const TActorContext& ctx) override;
-
- void Bootstrap(const TActorContext& ctx);
- void Handle(TEvPollerRegisterResult::TPtr ev, const TActorContext& ctx);
-
- void Process(const TActorContext& ctx);
-
- const TString Address;
- const ui16 Port;
- TIntrusivePtr<NInterconnect::TStreamSocket> Listener;
- const bool ExternalSocket;
- TPollerToken::TPtr PollerToken;
- TInterconnectProxyCommon::TPtr const ProxyCommonCtx;
- };
-
- static inline TActorId MakeInterconnectListenerActorId(bool dynamic) {
- char x[12] = {'I', 'C', 'L', 'i', 's', 't', 'e', 'n', 'e', 'r', '/', dynamic ? 'D' : 'S'};
- return TActorId(0, TStringBuf(x, 12));
- }
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_session.cpp b/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
deleted file mode 100644
index 7d6f8d012f..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_session.cpp
+++ /dev/null
@@ -1,1322 +0,0 @@
-#include "interconnect_tcp_proxy.h"
-#include "interconnect_tcp_session.h"
-#include "interconnect_handshake.h"
-
-#include <library/cpp/actors/core/probes.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/interconnect.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/monlib/service/pages/templates.h>
-
-namespace NActors {
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- template<typename T>
- T Coalesce(T&& x) {
- return x;
- }
-
- template<typename T, typename T2, typename... TRest>
- typename std::common_type<T, T2, TRest...>::type Coalesce(T&& first, T2&& mid, TRest&&... rest) {
- if (first != typename std::remove_reference<T>::type()) {
- return first;
- } else {
- return Coalesce(std::forward<T2>(mid), std::forward<TRest>(rest)...);
- }
- }
-
- TInterconnectSessionTCP::TInterconnectSessionTCP(TInterconnectProxyTCP* const proxy, TSessionParams params)
- : TActor(&TInterconnectSessionTCP::StateFunc)
- , Created(TInstant::Now())
- , Proxy(proxy)
- , CloseOnIdleWatchdog(GetCloseOnIdleTimeout(), std::bind(&TThis::OnCloseOnIdleTimerHit, this))
- , LostConnectionWatchdog(GetLostConnectionTimeout(), std::bind(&TThis::OnLostConnectionTimerHit, this))
- , Params(std::move(params))
- , TotalOutputQueueSize(0)
- , OutputStuckFlag(false)
- , OutputQueueUtilization(16)
- , OutputCounter(0ULL)
- {
- Proxy->Metrics->SetConnected(0);
- ReceiveContext.Reset(new TReceiveContext);
- }
-
- TInterconnectSessionTCP::~TInterconnectSessionTCP() {
- // close socket ASAP when actor system is being shut down
- if (Socket) {
- Socket->Shutdown(SHUT_RDWR);
- }
- if (XdcSocket) {
- XdcSocket->Shutdown(SHUT_RDWR);
- }
- }
-
- void TInterconnectSessionTCP::Init() {
- auto destroyCallback = [as = TlsActivationContext->ExecutorThread.ActorSystem, id = Proxy->Common->DestructorId](THolder<IEventBase> event) {
- as->Send(id, event.Release());
- };
- Pool.ConstructInPlace(Proxy->Common, std::move(destroyCallback));
- ChannelScheduler.ConstructInPlace(Proxy->PeerNodeId, Proxy->Common->ChannelsConfig, Proxy->Metrics, *Pool,
- Proxy->Common->Settings.MaxSerializedEventSize, Params);
-
- LOG_INFO(*TlsActivationContext, NActorsServices::INTERCONNECT_STATUS, "[%u] session created", Proxy->PeerNodeId);
- SetPrefix(Sprintf("Session %s [node %" PRIu32 "]", SelfId().ToString().data(), Proxy->PeerNodeId));
- SendUpdateToWhiteboard();
- }
-
- void TInterconnectSessionTCP::CloseInputSession() {
- Send(ReceiverId, new TEvInterconnect::TEvCloseInputSession);
- }
-
- void TInterconnectSessionTCP::Handle(TEvTerminate::TPtr& ev) {
- Terminate(ev->Get()->Reason);
- }
-
- void TInterconnectSessionTCP::HandlePoison() {
- Terminate(TDisconnectReason());
- }
-
- void TInterconnectSessionTCP::Terminate(TDisconnectReason reason) {
- LOG_INFO_IC_SESSION("ICS01", "socket: %" PRIi64 " reason# %s", (Socket ? i64(*Socket) : -1), reason.ToString().data());
-
- IActor::InvokeOtherActor(*Proxy, &TInterconnectProxyTCP::UnregisterSession, this);
- ShutdownSocket(std::move(reason));
-
- for (const auto& kv : Subscribers) {
- Send(kv.first, new TEvInterconnect::TEvNodeDisconnected(Proxy->PeerNodeId), 0, kv.second);
- }
- Proxy->Metrics->SubSubscribersCount(Subscribers.size());
- Subscribers.clear();
-
- ChannelScheduler->ForEach([&](TEventOutputChannel& channel) {
- channel.NotifyUndelivered();
- });
-
- if (ReceiverId) {
- Send(ReceiverId, new TEvents::TEvPoisonPill);
- }
-
- SendUpdateToWhiteboard(false);
-
- Proxy->Metrics->SubOutputBuffersTotalSize(TotalOutputQueueSize);
- Proxy->Metrics->SubInflightDataAmount(InflightDataAmount);
-
- LOG_INFO(*TlsActivationContext, NActorsServices::INTERCONNECT_STATUS, "[%u] session destroyed", Proxy->PeerNodeId);
-
- if (!Subscribers.empty()) {
- Proxy->Metrics->SubSubscribersCount(Subscribers.size());
- }
-
- TActor::PassAway();
- }
-
- void TInterconnectSessionTCP::PassAway() {
- Y_ABORT("TInterconnectSessionTCP::PassAway() can't be called directly");
- }
-
- void TInterconnectSessionTCP::Forward(STATEFN_SIG) {
- Proxy->ValidateEvent(ev, "Forward");
-
- LOG_DEBUG_IC_SESSION("ICS02", "send event from: %s to: %s", ev->Sender.ToString().data(), ev->Recipient.ToString().data());
- ++MessagesGot;
-
- if (ev->Flags & IEventHandle::FlagSubscribeOnSession) {
- Subscribe(ev);
- }
-
- ui16 evChannel = ev->GetChannel();
- auto& oChannel = ChannelScheduler->GetOutputChannel(evChannel);
- const bool wasWorking = oChannel.IsWorking();
-
- const auto [dataSize, event] = oChannel.Push(*ev);
- LWTRACK(ForwardEvent, event->Orbit, Proxy->PeerNodeId, event->Descr.Type, event->Descr.Flags, LWACTORID(event->Descr.Recipient), LWACTORID(event->Descr.Sender), event->Descr.Cookie, event->EventSerializedSize);
-
- TotalOutputQueueSize += dataSize;
- Proxy->Metrics->AddOutputBuffersTotalSize(dataSize);
- if (!wasWorking) {
- // this channel has returned to work -- it was empty and this we have just put first event in the queue
- ChannelScheduler->AddToHeap(oChannel, EqualizeCounter);
- }
-
- SetOutputStuckFlag(true);
- ++NumEventsInQueue;
- RearmCloseOnIdle();
-
- LWTRACK(EnqueueEvent, event->Orbit, Proxy->PeerNodeId, NumEventsInQueue, GetWriteBlockedTotal(), evChannel, oChannel.GetQueueSize(), oChannel.GetBufferedAmountOfData());
-
- // check for overloaded queues
- ui64 sendBufferDieLimit = Proxy->Common->Settings.SendBufferDieLimitInMB * ui64(1 << 20);
- if (sendBufferDieLimit != 0 && TotalOutputQueueSize > sendBufferDieLimit) {
- LOG_ERROR_IC_SESSION("ICS03", "socket: %" PRIi64 " output queue is overloaded, actual %" PRIu64 " bytes, limit is %" PRIu64,
- Socket ? i64(*Socket) : -1, TotalOutputQueueSize, sendBufferDieLimit);
- return Terminate(TDisconnectReason::QueueOverload());
- }
-
- ui64 outputBuffersTotalSizeLimit = Proxy->Common->Settings.OutputBuffersTotalSizeLimitInMB * ui64(1 << 20);
- if (outputBuffersTotalSizeLimit != 0 && static_cast<ui64>(Proxy->Metrics->GetOutputBuffersTotalSize()) > outputBuffersTotalSizeLimit) {
- LOG_ERROR_IC_SESSION("ICS77", "Exceeded total limit on output buffers size");
- if (AtomicTryLock(&Proxy->Common->StartedSessionKiller)) {
- CreateSessionKillingActor(Proxy->Common);
- }
- }
-
- IssueRam(true);
- }
-
- void TInterconnectSessionTCP::Subscribe(STATEFN_SIG) {
- LOG_DEBUG_IC_SESSION("ICS04", "subscribe for session state for %s", ev->Sender.ToString().data());
- const auto [it, inserted] = Subscribers.emplace(ev->Sender, ev->Cookie);
- if (inserted) {
- Proxy->Metrics->IncSubscribersCount();
- } else {
- it->second = ev->Cookie;
- }
- Send(ev->Sender, new TEvInterconnect::TEvNodeConnected(Proxy->PeerNodeId), 0, ev->Cookie);
- }
-
- void TInterconnectSessionTCP::Unsubscribe(STATEFN_SIG) {
- LOG_DEBUG_IC_SESSION("ICS05", "unsubscribe for session state for %s", ev->Sender.ToString().data());
- Proxy->Metrics->SubSubscribersCount( Subscribers.erase(ev->Sender));
- }
-
- THolder<TEvHandshakeAck> TInterconnectSessionTCP::ProcessHandshakeRequest(TEvHandshakeAsk::TPtr& ev) {
- TEvHandshakeAsk *msg = ev->Get();
-
- // close existing input session, if any, and do nothing upon its destruction
- ReestablishConnection({}, false, TDisconnectReason::NewSession());
- const ui64 lastInputSerial = ReceiveContext->LockLastPacketSerialToConfirm();
-
- LOG_INFO_IC_SESSION("ICS08", "incoming handshake Self# %s Peer# %s Counter# %" PRIu64 " LastInputSerial# %" PRIu64,
- msg->Self.ToString().data(), msg->Peer.ToString().data(), msg->Counter, lastInputSerial);
-
- return MakeHolder<TEvHandshakeAck>(msg->Peer, lastInputSerial, Params);
- }
-
- void TInterconnectSessionTCP::SetNewConnection(TEvHandshakeDone::TPtr& ev) {
- if (ReceiverId) {
- // upon destruction of input session actor invoke this callback again
- ReestablishConnection(std::move(ev), false, TDisconnectReason::NewSession());
- return;
- }
-
- LOG_INFO_IC_SESSION("ICS09", "handshake done sender: %s self: %s peer: %s socket: %" PRIi64,
- ev->Sender.ToString().data(), ev->Get()->Self.ToString().data(), ev->Get()->Peer.ToString().data(),
- i64(*ev->Get()->Socket));
-
- NewConnectionSet = TActivationContext::Now();
- BytesWrittenToSocket = 0;
-
- SendBufferSize = ev->Get()->Socket->GetSendBufferSize();
- Socket = std::move(ev->Get()->Socket);
- XdcSocket = std::move(ev->Get()->XdcSocket);
-
- // there may be a race
- const ui64 nextPacket = Max(LastConfirmed, ev->Get()->NextPacket);
-
- // arm watchdogs
- RearmCloseOnIdle();
-
- // reset activity timestamps
- LastInputActivityTimestamp = LastPayloadActivityTimestamp = TActivationContext::Monotonic();
-
- LOG_INFO_IC_SESSION("ICS10", "traffic start");
-
- // reset parameters to initial values
- WriteBlockedByFullSendBuffer = false;
- ReceiveContext->MainWriteBlocked = false;
- ReceiveContext->XdcWriteBlocked = false;
- ReceiveContext->MainReadPending = false;
- ReceiveContext->XdcReadPending = false;
-
- // create input session actor
- ReceiveContext->UnlockLastPacketSerialToConfirm();
- auto actor = MakeHolder<TInputSessionTCP>(SelfId(), Socket, XdcSocket, ReceiveContext, Proxy->Common,
- Proxy->Metrics, Proxy->PeerNodeId, nextPacket, GetDeadPeerTimeout(), Params);
- ReceiverId = RegisterWithSameMailbox(actor.Release());
-
- // register our socket in poller actor
- LOG_DEBUG_IC_SESSION("ICS11", "registering socket in PollerActor");
- const bool success = Send(MakePollerActorId(), new TEvPollerRegister(Socket, ReceiverId, SelfId()));
- Y_ABORT_UNLESS(success);
- if (XdcSocket) {
- const bool success = Send(MakePollerActorId(), new TEvPollerRegister(XdcSocket, ReceiverId, SelfId()));
- Y_ABORT_UNLESS(success);
- }
-
- LostConnectionWatchdog.Disarm();
- Proxy->Metrics->SetConnected(1);
- LOG_INFO(*TlsActivationContext, NActorsServices::INTERCONNECT_STATUS, "[%u] connected", Proxy->PeerNodeId);
-
- // arm pinger timer
- ResetFlushLogic();
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // REINITIALIZE SEND QUEUE
- //
- // scan through send queue and leave only those packets who have data -- we will simply resend them; drop all other
- // auxiliary packets; also reset packet metrics to zero to start sending from the beginning
- // also reset send queue
-
- // drop confirmed packets first as we do not need unwanted retransmissions
- OutgoingStream.RewindToEnd();
- XdcStream.RewindToEnd();
- XdcOffset = Max<size_t>();
- OutgoingOffset = 0;
- OutgoingIndex = SendQueue.size();
- DropConfirmed(nextPacket);
- OutgoingStream.Rewind();
- OutOfBandStream = {};
- XdcStream.Rewind();
- OutgoingOffset = XdcOffset = 0;
- OutgoingIndex = 0;
- ForcedWriteLength = 0;
-
- const ui64 serial = OutputCounter - SendQueue.size() + 1;
- Y_ABORT_UNLESS(serial > LastConfirmed, "%s serial# %" PRIu64 " LastConfirmed# %" PRIu64, LogPrefix.data(), serial, LastConfirmed);
- LOG_DEBUG_IC_SESSION("ICS06", "rewind SendQueue size# %zu LastConfirmed# %" PRIu64 " NextSerial# %" PRIu64,
- SendQueue.size(), LastConfirmed, serial);
-
- SwitchStuckPeriod();
-
- LastHandshakeDone = TActivationContext::Now();
-
- GenerateTraffic();
- }
-
- void TInterconnectSessionTCP::Handle(TEvUpdateFromInputSession::TPtr& ev) {
- if (ev->Sender == ReceiverId) {
- TEvUpdateFromInputSession& msg = *ev->Get();
-
- // update ping time
- Ping = msg.Ping;
- LWPROBE(UpdateFromInputSession, Proxy->PeerNodeId, Ping.MillisecondsFloat());
-
- bool needConfirm = false;
-
- // update activity timer for dead peer checker
- LastInputActivityTimestamp = TActivationContext::Monotonic();
-
- if (msg.NumDataBytes) {
- UnconfirmedBytes += msg.NumDataBytes;
- if (UnconfirmedBytes >= GetTotalInflightAmountOfData() / 4) {
- needConfirm = true;
- } else {
- SetForcePacketTimestamp(Proxy->Common->Settings.ForceConfirmPeriod);
- }
-
- // reset payload watchdog that controls close-on-idle behaviour
- LastPayloadActivityTimestamp = TActivationContext::Monotonic();
- RearmCloseOnIdle();
- }
-
- LWPROBE_IF_TOO_LONG(SlowICDropConfirmed, Proxy->PeerNodeId, ms) {
- DropConfirmed(msg.ConfirmedByInput);
- }
-
- // if we haven't generated any packets, then make a lone Flush packet without any data
- if (needConfirm && Socket) {
- ++ConfirmPacketsForcedBySize;
- MakePacket(false);
- }
-
- GenerateTraffic();
-
- for (;;) {
- switch (EUpdateState state = ReceiveContext->UpdateState) {
- case EUpdateState::NONE:
- case EUpdateState::CONFIRMING:
- Y_ABORT("unexpected state");
-
- case EUpdateState::INFLIGHT:
- // this message we are processing was the only one in flight, so we can reset state to NONE here
- if (ReceiveContext->UpdateState.compare_exchange_weak(state, EUpdateState::NONE)) {
- return;
- }
- break;
-
- case EUpdateState::INFLIGHT_AND_PENDING:
- // there is more messages pending from the input session actor, so we have to inform it to release
- // that message
- if (ReceiveContext->UpdateState.compare_exchange_weak(state, EUpdateState::CONFIRMING)) {
- Send(ev->Sender, new TEvConfirmUpdate);
- return;
- }
- break;
- }
- }
- }
- }
-
- void TInterconnectSessionTCP::IssueRam(bool batching) {
- const auto& batchPeriod = Proxy->Common->Settings.BatchPeriod;
- if (!RamInQueue || (!batching && RamInQueue->Batching && batchPeriod != TDuration())) {
- auto ev = std::make_unique<TEvRam>(batching);
- RamInQueue = ev.get();
- auto handle = std::make_unique<IEventHandle>(SelfId(), SelfId(), ev.release());
- if (batching && batchPeriod != TDuration()) {
- TActivationContext::Schedule(batchPeriod, handle.release());
- } else {
- TActivationContext::Send(handle.release());
- }
- LWPROBE(StartRam, Proxy->PeerNodeId);
- RamStartedCycles = GetCycleCountFast();
- }
- }
-
- void TInterconnectSessionTCP::HandleRam(TEvRam::TPtr& ev) {
- if (ev->Get() == RamInQueue) {
- LWPROBE(FinishRam, Proxy->PeerNodeId, NHPTimer::GetSeconds(GetCycleCountFast() - ev->SendTime) * 1000.0);
- RamInQueue = nullptr;
- GenerateTraffic();
- }
- }
-
- void TInterconnectSessionTCP::GenerateTraffic() {
- if (!TimeLimit) {
- TimeLimit.emplace(GetMaxCyclesPerEvent());
- }
-
- // generate ping request, if needed
- IssuePingRequest();
-
- while (Socket) {
- ProducePackets();
- if (!Socket) {
- return;
- }
-
- WriteData();
- if (!Socket) {
- return;
- }
-
- bool canProducePackets;
- bool canWriteData;
-
- canProducePackets = NumEventsInQueue && InflightDataAmount < GetTotalInflightAmountOfData() &&
- GetUnsentSize() < GetUnsentLimit();
-
- canWriteData = ((OutgoingStream || OutOfBandStream) && !ReceiveContext->MainWriteBlocked) ||
- (XdcStream && !ReceiveContext->XdcWriteBlocked);
-
- if (!canProducePackets && !canWriteData) {
- SetEnoughCpu(true); // we do not starve
- break;
- } else if (TimeLimit->CheckExceeded()) {
- SetEnoughCpu(false);
- IssueRam(false);
- break;
- }
- }
-
- // account traffic changes
- ChannelScheduler->ForEach([](TEventOutputChannel& channel) {
- channel.AccountTraffic();
- });
-
- // equalize channel weights
- EqualizeCounter += ChannelScheduler->Equalize();
- }
-
- void TInterconnectSessionTCP::ProducePackets() {
- // first, we create as many data packets as we can generate under certain conditions; they include presence
- // of events in channels queues and in flight fitting into requested limit; after we hit one of these conditions
- // we exit cycle
- static constexpr ui32 maxBytesToProduce = 64 * 1024;
- ui32 bytesProduced = 0;
- while (NumEventsInQueue && InflightDataAmount < GetTotalInflightAmountOfData() && GetUnsentSize() < GetUnsentLimit()) {
- if ((bytesProduced && TimeLimit->CheckExceeded()) || bytesProduced >= maxBytesToProduce) {
- break;
- }
- try {
- bytesProduced += MakePacket(true);
- } catch (const TExSerializedEventTooLarge& ex) {
- // terminate session if the event can't be serialized properly
- LOG_CRIT_IC("ICS31", "serialized event Type# 0x%08" PRIx32 " is too large", ex.Type);
- return Terminate(TDisconnectReason::EventTooLarge());
- }
- }
- }
-
- void TInterconnectSessionTCP::StartHandshake() {
- LOG_INFO_IC_SESSION("ICS15", "start handshake");
- IActor::InvokeOtherActor(*Proxy, &TInterconnectProxyTCP::StartResumeHandshake, ReceiveContext->LockLastPacketSerialToConfirm());
- }
-
- void TInterconnectSessionTCP::ReestablishConnectionWithHandshake(TDisconnectReason reason) {
- ReestablishConnection({}, true, std::move(reason));
- }
-
- void TInterconnectSessionTCP::ReestablishConnection(TEvHandshakeDone::TPtr&& ev, bool startHandshakeOnSessionClose,
- TDisconnectReason reason) {
- if (Socket) {
- LOG_INFO_IC_SESSION("ICS13", "reestablish connection");
- ShutdownSocket(std::move(reason)); // stop sending/receiving on socket
- PendingHandshakeDoneEvent = std::move(ev);
- StartHandshakeOnSessionClose = startHandshakeOnSessionClose;
- if (!ReceiverId) {
- ReestablishConnectionExecute();
- }
- }
- }
-
- void TInterconnectSessionTCP::OnDisconnect(TEvSocketDisconnect::TPtr& ev) {
- if (ev->Sender == ReceiverId) {
- if (ev->Get()->Reason == TDisconnectReason::EndOfStream() && !NumEventsInQueue && OutputCounter == LastConfirmed) {
- return Terminate(ev->Get()->Reason);
- }
-
- const bool wasConnected(Socket);
- LOG_INFO_IC_SESSION("ICS07", "socket disconnect %" PRIi64 " reason# %s", Socket ? i64(*Socket) : -1, ev->Get()->Reason.ToString().data());
- ReceiverId = TActorId(); // reset receiver actor id as we have no more receiver yet
- if (wasConnected) {
- // we were sucessfully connected and did not expect failure, so it arrived from the input side; we should
- // restart handshake process, closing our part of socket first
- ShutdownSocket(ev->Get()->Reason);
- StartHandshake();
- } else {
- ReestablishConnectionExecute();
- }
- }
- }
-
- void TInterconnectSessionTCP::ShutdownSocket(TDisconnectReason reason) {
- if (Socket) {
- if (const TString& s = reason.ToString()) {
- Proxy->Metrics->IncDisconnectByReason(s);
- }
-
- LOG_INFO_IC_SESSION("ICS25", "shutdown socket, reason# %s", reason.ToString().data());
- Proxy->UpdateErrorStateLog(TActivationContext::Now(), "close_socket", reason.ToString().data());
- Socket->Shutdown(SHUT_RDWR);
- Socket.Reset();
- Proxy->Metrics->IncDisconnections();
- CloseOnIdleWatchdog.Disarm();
- LostConnectionWatchdog.Rearm(SelfId());
- Proxy->Metrics->SetConnected(0);
- LOG_INFO(*TlsActivationContext, NActorsServices::INTERCONNECT_STATUS, "[%u] disconnected", Proxy->PeerNodeId);
- }
- if (XdcSocket) {
- XdcSocket->Shutdown(SHUT_RDWR);
- XdcSocket.Reset();
- }
- }
-
- void TInterconnectSessionTCP::ReestablishConnectionExecute() {
- bool startHandshakeOnSessionClose = std::exchange(StartHandshakeOnSessionClose, false);
- TEvHandshakeDone::TPtr ev = std::move(PendingHandshakeDoneEvent);
-
- if (startHandshakeOnSessionClose) {
- StartHandshake();
- } else if (ev) {
- SetNewConnection(ev);
- }
- }
-
- void TInterconnectSessionTCP::Handle(TEvPollerReady::TPtr& ev) {
- LOG_DEBUG_IC_SESSION("ICS29", "HandleReadyWrite WriteBlockedByFullSendBuffer# %s",
- WriteBlockedByFullSendBuffer ? "true" : "false");
-
- auto *msg = ev->Get();
- bool useful = false;
- bool readPending = false;
-
- if (msg->Socket == Socket) {
- useful = std::exchange(ReceiveContext->MainWriteBlocked, false);
- readPending = ReceiveContext->MainReadPending;
- } else if (msg->Socket == XdcSocket) {
- useful = std::exchange(ReceiveContext->XdcWriteBlocked, false);
- readPending = ReceiveContext->XdcReadPending;
- }
-
- if (useful) {
- Proxy->Metrics->IncUsefulWriteWakeups();
- } else if (!ev->Cookie) {
- Proxy->Metrics->IncSpuriousWriteWakeups();
- }
-
- if (Params.Encryption && readPending && ev->Sender != ReceiverId) {
- Send(ReceiverId, ev->Release().Release());
- }
-
- GenerateTraffic();
- }
-
- void TInterconnectSessionTCP::Handle(TEvPollerRegisterResult::TPtr ev) {
- auto *msg = ev->Get();
- bool sendPollerReady = false;
-
- if (msg->Socket == Socket) {
- PollerToken = std::move(msg->PollerToken);
- sendPollerReady = ReceiveContext->MainWriteBlocked;
- } else if (msg->Socket == XdcSocket) {
- XdcPollerToken = std::move(msg->PollerToken);
- sendPollerReady = ReceiveContext->XdcWriteBlocked;
- }
-
- if (sendPollerReady) {
- Send(SelfId(), new TEvPollerReady(msg->Socket, false, true));
- }
- }
-
- void TInterconnectSessionTCP::WriteData() {
- // total bytes written during this call
- ui64 written = 0;
-
- auto process = [&](NInterconnect::TOutgoingStream& stream, const TIntrusivePtr<NInterconnect::TStreamSocket>& socket,
- const TPollerToken::TPtr& token, bool *writeBlocked, size_t maxBytes) {
- size_t totalWritten = 0;
-
- if (stream && socket && !*writeBlocked) {
- for (;;) {
- if (const ssize_t r = Write(stream, *socket, maxBytes); r > 0) {
- stream.Advance(r);
- totalWritten += r;
- } else if (r == -1) {
- if (token && socket->RequestWriteNotificationAfterWouldBlock(*token)) {
- continue; // we can try again
- }
- *writeBlocked = true;
- } else if (r == 0) {
- // error condition
- } else {
- Y_UNREACHABLE();
- }
- break;
- }
- }
-
- written += totalWritten;
- return totalWritten;
- };
-
- auto sendQueueIt = SendQueue.begin() + OutgoingIndex;
- static constexpr size_t maxBytesAtOnce = 256 * 1024;
- size_t bytesToSendInMain = maxBytesAtOnce;
-
- Y_DEBUG_ABORT_UNLESS(OutgoingIndex < SendQueue.size() || (OutgoingIndex == SendQueue.size() && !OutgoingOffset && !OutgoingStream));
-
- if (OutOfBandStream) {
- bytesToSendInMain = 0;
-
- if (!ForcedWriteLength && OutgoingOffset) {
- ForcedWriteLength = 1; // send at least one byte from current packet
- }
-
- // align send up to packet boundary
- size_t offset = OutgoingOffset;
- for (auto it = sendQueueIt; ForcedWriteLength; ++it, offset = 0) {
- Y_DEBUG_ABORT_UNLESS(it != SendQueue.end());
- bytesToSendInMain += it->PacketSize - offset; // send remainder of current packet
- ForcedWriteLength -= Min(it->PacketSize - offset, ForcedWriteLength);
- }
- }
-
- if (bytesToSendInMain) {
- const size_t w = process(OutgoingStream, Socket, PollerToken, &ReceiveContext->MainWriteBlocked, bytesToSendInMain);
-
- // adjust sending queue iterator
- for (OutgoingOffset += w; OutgoingOffset && sendQueueIt->PacketSize <= OutgoingOffset; ++sendQueueIt, ++OutgoingIndex) {
- OutgoingOffset -= sendQueueIt->PacketSize;
- }
-
- BytesWrittenToSocket += w;
-
- if (OutOfBandStream) {
- BytesAlignedForOutOfBand += w;
- bytesToSendInMain -= w;
- }
-
- ForcedWriteLength = Socket ? Socket->ExpectedWriteLength() : 0;
- }
-
- if (!bytesToSendInMain && !ForcedWriteLength) {
- if (const size_t w = process(OutOfBandStream, Socket, PollerToken, &ReceiveContext->MainWriteBlocked, maxBytesAtOnce)) {
- OutOfBandStream.DropFront(w);
- BytesWrittenToSocket += w;
- OutOfBandBytesSent += w;
- }
- }
-
- if (const size_t w = process(XdcStream, XdcSocket, XdcPollerToken, &ReceiveContext->XdcWriteBlocked, maxBytesAtOnce)) {
- XdcBytesSent += w;
- XdcOffset += w;
- }
-
- if (written) {
- Proxy->Metrics->AddTotalBytesWritten(written);
- }
-
- DropConfirmed(LastConfirmed);
-
- const bool writeBlockedByFullSendBuffer = ReceiveContext->MainWriteBlocked || ReceiveContext->XdcWriteBlocked;
- if (WriteBlockedByFullSendBuffer < writeBlockedByFullSendBuffer) { // became blocked
- WriteBlockedCycles = GetCycleCountFast();
- LOG_DEBUG_IC_SESSION("ICS18", "hit send buffer limit");
- } else if (writeBlockedByFullSendBuffer < WriteBlockedByFullSendBuffer) { // became unblocked
- WriteBlockedTotal += TDuration::Seconds(NHPTimer::GetSeconds(GetCycleCountFast() - WriteBlockedCycles));
- }
- WriteBlockedByFullSendBuffer = writeBlockedByFullSendBuffer;
- }
-
- ssize_t TInterconnectSessionTCP::Write(NInterconnect::TOutgoingStream& stream, NInterconnect::TStreamSocket& socket,
- size_t maxBytes) {
- LWPROBE_IF_TOO_LONG(SlowICWriteData, Proxy->PeerNodeId, ms) {
- constexpr ui32 iovLimit = 256;
-
- ui32 maxElementsInIOV;
- if (Params.Encryption) {
- maxElementsInIOV = 1;
- } else {
-#if defined(_win_)
- maxElementsInIOV = 1;
-#elif defined(_linux_)
- maxElementsInIOV = Min<ui32>(iovLimit, sysconf(_SC_IOV_MAX));
-#else
- maxElementsInIOV = 64;
-#endif
- }
-
- TStackVec<TConstIoVec, iovLimit> wbuffers;
-
- stream.ProduceIoVec(wbuffers, maxElementsInIOV, maxBytes);
- Y_ABORT_UNLESS(!wbuffers.empty());
-
- TString err;
- ssize_t r = 0;
- { // issue syscall with timing
- const ui64 begin = GetCycleCountFast();
-
- do {
- if (wbuffers.size() == 1) {
- auto& front = wbuffers.front();
- r = socket.Send(front.Data, front.Size, &err);
- } else {
- r = socket.WriteV(reinterpret_cast<const iovec*>(wbuffers.data()), wbuffers.size());
- }
- } while (r == -EINTR);
-
- const ui64 end = GetCycleCountFast();
- Proxy->Metrics->IncSendSyscalls((end - begin) * 1'000'000 / GetCyclesPerMillisecond());
- }
-
- if (r > 0) {
- return r;
- } else if (-r != EAGAIN && -r != EWOULDBLOCK) {
- const TString message = r == 0 ? "connection closed by peer"
- : err ? err
- : Sprintf("socket: %s", strerror(-r));
- LOG_NOTICE_NET(Proxy->PeerNodeId, "%s", message.data());
- if (r == 0 && !NumEventsInQueue && LastConfirmed == OutputCounter) {
- Terminate(TDisconnectReason::EndOfStream());
- } else {
- ReestablishConnectionWithHandshake(r == 0 ? TDisconnectReason::EndOfStream() : TDisconnectReason::FromErrno(-r));
- }
- return 0; // error indicator
- } else {
- return -1; // temporary error
- }
- }
-
- Y_UNREACHABLE();
- }
-
- void TInterconnectSessionTCP::SetForcePacketTimestamp(TDuration period) {
- if (period != TDuration::Max()) {
- // randomize period a bit
- period = TDuration::FromValue(period.GetValue() - RandomNumber<ui64>(period.GetValue() / 10));
- const TMonotonic when = TActivationContext::Monotonic() + period;
- if (when < ForcePacketTimestamp) {
- ForcePacketTimestamp = when;
- ScheduleFlush();
- }
- }
- }
-
- void TInterconnectSessionTCP::ScheduleFlush() {
- if (FlushSchedule.empty() || ForcePacketTimestamp < FlushSchedule.top()) {
- Schedule(ForcePacketTimestamp, new TEvFlush);
- FlushSchedule.push(ForcePacketTimestamp);
- MaxFlushSchedule = Max(MaxFlushSchedule, FlushSchedule.size());
- ++FlushEventsScheduled;
- }
- }
-
- void TInterconnectSessionTCP::HandleFlush() {
- const TMonotonic now = TActivationContext::Monotonic();
- while (FlushSchedule && now >= FlushSchedule.top()) {
- FlushSchedule.pop();
- }
- if (Socket) {
- if (now >= ForcePacketTimestamp) {
- ++ConfirmPacketsForcedByTimeout;
- ++FlushEventsProcessed;
- MakePacket(false); // just generate confirmation packet if we have preconditions for this
- } else if (ForcePacketTimestamp != TMonotonic::Max()) {
- ScheduleFlush();
- }
- GenerateTraffic();
- }
- }
-
- void TInterconnectSessionTCP::ResetFlushLogic() {
- ForcePacketTimestamp = TMonotonic::Max();
- UnconfirmedBytes = 0;
- const TDuration ping = Proxy->Common->Settings.PingPeriod;
- if (ping != TDuration::Zero() && !NumEventsInQueue) {
- SetForcePacketTimestamp(ping);
- }
- }
-
- ui32 TInterconnectSessionTCP::MakePacket(bool data, TMaybe<ui64> pingMask) {
- NInterconnect::TOutgoingStream& stream = data ? OutgoingStream : OutOfBandStream;
-
-#ifndef NDEBUG
- const size_t outgoingStreamSizeBefore = stream.CalculateOutgoingSize();
- const size_t xdcStreamSizeBefore = XdcStream.CalculateOutgoingSize();
-#endif
-
- stream.Align();
- XdcStream.Align();
-
- TTcpPacketOutTask packet(Params, stream, XdcStream);
- ui64 serial = 0;
-
- if (data) {
- // generate serial for this data packet
- serial = ++OutputCounter;
-
- // fill the data packet
- Y_ABORT_UNLESS(NumEventsInQueue);
- LWPROBE_IF_TOO_LONG(SlowICFillSendingBuffer, Proxy->PeerNodeId, ms) {
- FillSendingBuffer(packet, serial);
- }
- Y_ABORT_UNLESS(!packet.IsEmpty());
-
- InflightDataAmount += packet.GetDataSize();
- Proxy->Metrics->AddInflightDataAmount(packet.GetDataSize());
- if (InflightDataAmount > GetTotalInflightAmountOfData()) {
- Proxy->Metrics->IncInflyLimitReach();
- }
-
- if (AtomicGet(ReceiveContext->ControlPacketId) == 0) {
- AtomicSet(ReceiveContext->ControlPacketSendTimer, GetCycleCountFast());
- AtomicSet(ReceiveContext->ControlPacketId, OutputCounter);
- }
-
- // update payload activity timer
- LastPayloadActivityTimestamp = TActivationContext::Monotonic();
- } else if (pingMask) {
- serial = *pingMask;
- }
-
- const ui64 lastInputSerial = ReceiveContext->GetLastPacketSerialToConfirm();
-
- packet.Finish(serial, lastInputSerial);
-
- // count number of bytes pending for write
- const size_t packetSize = packet.GetPacketSize();
-
-#ifndef NDEBUG
- const size_t outgoingStreamSizeAfter = stream.CalculateOutgoingSize();
- const size_t xdcStreamSizeAfter = XdcStream.CalculateOutgoingSize();
-
- Y_ABORT_UNLESS(outgoingStreamSizeAfter == outgoingStreamSizeBefore + packetSize &&
- xdcStreamSizeAfter == xdcStreamSizeBefore + packet.GetExternalSize(),
- "outgoingStreamSizeBefore# %zu outgoingStreamSizeAfter# %zu packetSize# %zu"
- " xdcStreamSizeBefore# %zu xdcStreamSizeAfter# %zu externalSize# %" PRIu32,
- outgoingStreamSizeBefore, outgoingStreamSizeAfter, packetSize,
- xdcStreamSizeBefore, xdcStreamSizeAfter, packet.GetExternalSize());
-#endif
-
- // put outgoing packet metadata here
- if (data) {
- SendQueue.push_back(TOutgoingPacket{
- static_cast<ui32>(packetSize),
- static_cast<ui32>(packet.GetExternalSize())
- });
- }
-
- LOG_DEBUG_IC_SESSION("ICS22", "outgoing packet Serial# %" PRIu64 " Confirm# %" PRIu64 " DataSize# %" PRIu32
- " InflightDataAmount# %" PRIu64, serial, lastInputSerial, packet.GetDataSize(), InflightDataAmount);
-
- // reset forced packet sending timestamp as we have confirmed all received data
- ResetFlushLogic();
-
- ++PacketsGenerated;
-
- return packetSize;
- }
-
- void TInterconnectSessionTCP::DropConfirmed(ui64 confirm) {
- LOG_DEBUG_IC_SESSION("ICS23", "confirm count: %" PRIu64, confirm);
-
- Y_ABORT_UNLESS(LastConfirmed <= confirm && confirm <= OutputCounter,
- "%s confirm# %" PRIu64 " LastConfirmed# %" PRIu64 " OutputCounter# %" PRIu64,
- LogPrefix.data(), confirm, LastConfirmed, OutputCounter);
- LastConfirmed = confirm;
-
- std::optional<ui64> lastDroppedSerial;
- ui32 numDropped = 0;
-
- // drop confirmed packets; this also includes any auxiliary packets as their serial is set to zero, effectively
- // making Serial <= confirm true
- size_t bytesDropped = 0;
- size_t bytesDroppedFromXdc = 0;
- ui64 frontPacketSerial = OutputCounter - SendQueue.size() + 1;
- Y_DEBUG_ABORT_UNLESS(OutgoingIndex < SendQueue.size() || (OutgoingIndex == SendQueue.size() && !OutgoingOffset && !OutgoingStream),
- "OutgoingIndex# %zu SendQueue.size# %zu OutgoingOffset# %zu Unsent# %zu Total# %zu",
- OutgoingIndex, SendQueue.size(), OutgoingOffset, OutgoingStream.CalculateUnsentSize(),
- OutgoingStream.CalculateOutgoingSize());
- while (OutgoingIndex && frontPacketSerial <= confirm && SendQueue.front().ExternalSize <= XdcOffset) {
- auto& front = SendQueue.front();
- lastDroppedSerial.emplace(frontPacketSerial);
- XdcOffset -= front.ExternalSize;
- bytesDropped += front.PacketSize;
- bytesDroppedFromXdc += front.ExternalSize;
- ++numDropped;
-
- ++frontPacketSerial;
- SendQueue.pop_front();
- --OutgoingIndex;
- }
-
- if (!numDropped) {
- return;
- }
-
- const ui64 droppedDataAmount = bytesDropped + bytesDroppedFromXdc - sizeof(TTcpPacketHeader_v2) * numDropped;
- OutgoingStream.DropFront(bytesDropped);
- XdcStream.DropFront(bytesDroppedFromXdc);
- if (lastDroppedSerial) {
- ChannelScheduler->ForEach([&](TEventOutputChannel& channel) {
- channel.DropConfirmed(*lastDroppedSerial);
- });
- }
-
- PacketsConfirmed += numDropped;
- InflightDataAmount -= droppedDataAmount;
- Proxy->Metrics->SubInflightDataAmount(droppedDataAmount);
- LWPROBE(DropConfirmed, Proxy->PeerNodeId, droppedDataAmount, InflightDataAmount);
-
- LOG_DEBUG_IC_SESSION("ICS24", "exit InflightDataAmount: %" PRIu64 " bytes droppedDataAmount: %" PRIu64 " bytes"
- " dropped %" PRIu32 " packets", InflightDataAmount, droppedDataAmount, numDropped);
-
- Pool->Trim(); // send any unsent free requests
-
- RearmCloseOnIdle();
- }
-
- void TInterconnectSessionTCP::FillSendingBuffer(TTcpPacketOutTask& task, ui64 serial) {
- ui32 bytesGenerated = 0;
-
- Y_ABORT_UNLESS(NumEventsInQueue);
- while (NumEventsInQueue) {
- TEventOutputChannel *channel = ChannelScheduler->PickChannelWithLeastConsumedWeight();
- Y_DEBUG_ABORT_UNLESS(!channel->IsEmpty());
-
- // generate some data within this channel
- const ui64 netBefore = channel->GetBufferedAmountOfData();
- ui64 gross = 0;
- const bool eventDone = channel->FeedBuf(task, serial, &gross);
- channel->UnaccountedTraffic += gross;
- const ui64 netAfter = channel->GetBufferedAmountOfData();
- Y_DEBUG_ABORT_UNLESS(netAfter <= netBefore); // net amount should shrink
- const ui64 net = netBefore - netAfter; // number of net bytes serialized
-
- // adjust metrics for local and global queue size
- TotalOutputQueueSize -= net;
- Proxy->Metrics->SubOutputBuffersTotalSize(net);
- bytesGenerated += gross;
- Y_DEBUG_ABORT_UNLESS(!!net == !!gross && gross >= net, "net# %" PRIu64 " gross# %" PRIu64, net, gross);
-
- // return it back to queue or delete, depending on whether this channel is still working or not
- ChannelScheduler->FinishPick(gross, EqualizeCounter);
-
- // update some stats if the packet was fully serialized
- if (eventDone) {
- ++MessagesWrittenToBuffer;
-
- Y_ABORT_UNLESS(NumEventsInQueue);
- --NumEventsInQueue;
-
- if (!NumEventsInQueue) {
- SetOutputStuckFlag(false);
- }
- }
-
- if (!gross) { // no progress -- almost full packet buffer
- break;
- }
- }
-
- Y_ABORT_UNLESS(bytesGenerated); // ensure we are not stalled in serialization
- }
-
- ui32 TInterconnectSessionTCP::CalculateQueueUtilization() {
- SwitchStuckPeriod();
- ui64 sumBusy = 0, sumPeriod = 0;
- for (auto iter = OutputQueueUtilization.begin(); iter != OutputQueueUtilization.end() - 1; ++iter) {
- sumBusy += iter->first;
- sumPeriod += iter->second;
- }
- return sumBusy * 1000000 / sumPeriod;
- }
-
- void TInterconnectSessionTCP::SendUpdateToWhiteboard(bool connected) {
- const ui32 utilization = Socket ? CalculateQueueUtilization() : 0;
-
- if (const auto& callback = Proxy->Common->UpdateWhiteboard) {
- enum class EFlag {
- GREEN,
- YELLOW,
- ORANGE,
- RED,
- };
- EFlag flagState = EFlag::RED;
-
- if (Socket) {
- flagState = EFlag::GREEN;
-
- do {
- auto lastInputDelay = TActivationContext::Monotonic() - LastInputActivityTimestamp;
- if (lastInputDelay * 4 >= GetDeadPeerTimeout() * 3) {
- flagState = EFlag::ORANGE;
- break;
- } else if (lastInputDelay * 2 >= GetDeadPeerTimeout()) {
- flagState = EFlag::YELLOW;
- }
-
- // check utilization
- if (utilization > 875000) { // 7/8
- flagState = EFlag::ORANGE;
- break;
- } else if (utilization > 500000) { // 1/2
- flagState = EFlag::YELLOW;
- }
- } while (false);
- }
-
- callback({TlsActivationContext->ExecutorThread.ActorSystem,
- Proxy->PeerNodeId,
- Proxy->Metrics->GetHumanFriendlyPeerHostName(),
- connected,
- flagState == EFlag::GREEN,
- flagState == EFlag::YELLOW,
- flagState == EFlag::ORANGE,
- flagState == EFlag::RED,
- ReceiveContext->ClockSkew_us.load()});
- }
-
- if (connected) {
- Schedule(TDuration::Seconds(1), new TEvents::TEvWakeup);
- }
- }
-
- void TInterconnectSessionTCP::SetOutputStuckFlag(bool state) {
- if (OutputStuckFlag == state)
- return;
-
- if (OutputQueueUtilization.Size() == 0)
- return;
-
- auto& lastpair = OutputQueueUtilization.Last();
- if (state)
- lastpair.first -= GetCycleCountFast();
- else
- lastpair.first += GetCycleCountFast();
-
- OutputStuckFlag = state;
- }
-
- void TInterconnectSessionTCP::SwitchStuckPeriod() {
- auto now = GetCycleCountFast();
- if (OutputQueueUtilization.Size() != 0) {
- auto& lastpair = OutputQueueUtilization.Last();
- lastpair.second = now - lastpair.second;
- if (OutputStuckFlag)
- lastpair.first += now;
- }
-
- OutputQueueUtilization.Push(std::pair<ui64, ui64>(0, now));
- if (OutputStuckFlag)
- OutputQueueUtilization.Last().first -= now;
- }
-
- TDuration TInterconnectSessionTCP::GetDeadPeerTimeout() const {
- return Coalesce(Proxy->Common->Settings.DeadPeer, DEFAULT_DEADPEER_TIMEOUT);
- }
-
- TDuration TInterconnectSessionTCP::GetCloseOnIdleTimeout() const {
- return Proxy->Common->Settings.CloseOnIdle;
- }
-
- TDuration TInterconnectSessionTCP::GetLostConnectionTimeout() const {
- return Coalesce(Proxy->Common->Settings.LostConnection, DEFAULT_LOST_CONNECTION_TIMEOUT);
- }
-
- ui32 TInterconnectSessionTCP::GetTotalInflightAmountOfData() const {
- return Coalesce(Proxy->Common->Settings.TotalInflightAmountOfData, DEFAULT_TOTAL_INFLIGHT_DATA);
- }
-
- ui64 TInterconnectSessionTCP::GetMaxCyclesPerEvent() const {
- return DurationToCycles(TDuration::MicroSeconds(50));
- }
-
- void TInterconnectSessionTCP::IssuePingRequest() {
- const TMonotonic now = TActivationContext::Monotonic();
- if (now >= LastPingTimestamp + PingPeriodicity) {
- LOG_DEBUG_IC_SESSION("ICS00", "Issuing ping request");
- if (Socket) {
- MakePacket(false, GetCycleCountFast() | TTcpPacketBuf::PingRequestMask);
- MakePacket(false, TInstant::Now().MicroSeconds() | TTcpPacketBuf::ClockMask);
- }
- LastPingTimestamp = now;
- }
- }
-
- void TInterconnectSessionTCP::Handle(TEvProcessPingRequest::TPtr ev) {
- if (Socket) {
- MakePacket(false, ev->Get()->Payload | TTcpPacketBuf::PingResponseMask);
- GenerateTraffic();
- }
- }
-
- void TInterconnectSessionTCP::GenerateHttpInfo(NMon::TEvHttpInfoRes::TPtr& ev) {
- TStringStream str;
- ev->Get()->Output(str);
-
- HTML(str) {
- DIV_CLASS("panel panel-info") {
- DIV_CLASS("panel-heading") {
- str << "Session";
- }
- DIV_CLASS("panel-body") {
- TABLE_CLASS("table") {
- TABLEHEAD() {
- TABLER() {
- TABLEH() {
- str << "Sensor";
- }
- TABLEH() {
- str << "Value";
- }
- }
- }
- TABLEBODY() {
- TABLER() {
- TABLED() {
- str << "Encryption";
- }
- TABLED() {
- str << (Params.Encryption ? "<font color=green>Enabled</font>" : "<font color=red>Disabled</font>");
- }
- }
- if (auto *x = dynamic_cast<NInterconnect::TSecureSocket*>(Socket.Get())) {
- TABLER() {
- TABLED() {
- str << "Cipher name";
- }
- TABLED() {
- str << x->GetCipherName();
- }
- }
- TABLER() {
- TABLED() {
- str << "Cipher bits";
- }
- TABLED() {
- str << x->GetCipherBits();
- }
- }
- TABLER() {
- TABLED() {
- str << "Protocol";
- }
- TABLED() {
- str << x->GetProtocolName();
- }
- }
- TABLER() {
- TABLED() {
- str << "Peer CN";
- }
- TABLED() {
- str << x->GetPeerCommonName();
- }
- }
- }
- TABLER() {
- TABLED() { str << "AuthOnly CN"; }
- TABLED() { str << Params.AuthCN; }
- }
- TABLER() {
- TABLED() {
- str << "Local scope id";
- }
- TABLED() {
- str << ScopeIdToString(Proxy->Common->LocalScopeId);
- }
- }
- TABLER() {
- TABLED() {
- str << "Peer scope id";
- }
- TABLED() {
- str << ScopeIdToString(Params.PeerScopeId);
- }
- }
- TABLER() {
- TABLED() {
- str << "This page generated at";
- }
- TABLED() {
- str << TActivationContext::Now() << " / " << Now();
- }
- }
- TABLER() {
- TABLED() {
- str << "SelfID";
- }
- TABLED() {
- str << SelfId().ToString();
- }
- }
- TABLER() {
- TABLED() { str << "Frame version/Checksum"; }
- TABLED() { str << (Params.Encryption ? "v2/none" : Params.UseXxhash ? "v2/xxhash" : "v2/crc32c"); }
- }
-#define MON_VAR(NAME) \
- TABLER() { \
- TABLED() { \
- str << #NAME; \
- } \
- TABLED() { \
- str << NAME; \
- } \
- }
-
- MON_VAR(Created)
- MON_VAR(Params.UseExternalDataChannel)
- MON_VAR(NewConnectionSet)
- MON_VAR(ReceiverId)
- MON_VAR(MessagesGot)
- MON_VAR(MessagesWrittenToBuffer)
- MON_VAR(PacketsGenerated)
- MON_VAR(PacketsConfirmed)
- MON_VAR(ConfirmPacketsForcedBySize)
- MON_VAR(ConfirmPacketsForcedByTimeout)
-
- TABLER() {
- TABLED() {
- str << "Virtual self ID";
- }
- TABLED() {
- str << Proxy->SessionVirtualId.ToString();
- }
- }
- TABLER() {
- TABLED() {
- str << "Virtual peer ID";
- }
- TABLED() {
- str << Proxy->RemoteSessionVirtualId.ToString();
- }
- }
- TABLER() {
- TABLED() {
- str << "Socket";
- }
- TABLED() {
- str << (Socket ? i64(*Socket) : -1);
- }
- }
- TABLER() {
- TABLED() {
- str << "XDC socket";
- }
- TABLED() {
- str << (XdcSocket ? i64(*XdcSocket) : -1);
- }
- }
-
- ui32 unsentQueueSize = Socket ? Socket->GetUnsentQueueSize() : 0;
-
- const TMonotonic now = TActivationContext::Monotonic();
-
- MON_VAR(OutputStuckFlag)
- MON_VAR(SendQueue.size())
- MON_VAR(NumEventsInQueue)
- MON_VAR(TotalOutputQueueSize)
- MON_VAR(InflightDataAmount)
- MON_VAR(unsentQueueSize)
- MON_VAR(SendBufferSize)
- MON_VAR(now - LastInputActivityTimestamp)
- MON_VAR(now - LastPayloadActivityTimestamp)
- MON_VAR(LastHandshakeDone)
- MON_VAR(OutputCounter)
- MON_VAR(LastConfirmed)
- MON_VAR(FlushSchedule.size())
- MON_VAR(MaxFlushSchedule)
- MON_VAR(FlushEventsScheduled)
- MON_VAR(FlushEventsProcessed)
-
- MON_VAR(GetWriteBlockedTotal())
-
- MON_VAR(BytesWrittenToSocket)
- MON_VAR(XdcBytesSent)
-
- MON_VAR(OutgoingStream.CalculateOutgoingSize())
- MON_VAR(OutgoingStream.CalculateUnsentSize())
- MON_VAR(OutgoingStream.GetSendQueueSize())
- MON_VAR(OutgoingOffset)
- MON_VAR(OutgoingIndex)
-
- MON_VAR(OutOfBandStream.CalculateOutgoingSize())
- MON_VAR(OutOfBandStream.CalculateUnsentSize())
- MON_VAR(OutOfBandStream.GetSendQueueSize())
- MON_VAR(BytesAlignedForOutOfBand)
- MON_VAR(OutOfBandBytesSent)
-
- MON_VAR(XdcStream.CalculateOutgoingSize())
- MON_VAR(XdcStream.CalculateUnsentSize())
- MON_VAR(XdcStream.GetSendQueueSize())
- MON_VAR(XdcOffset)
-
- MON_VAR(CpuStarvationEvents)
- MON_VAR(CpuStarvationEventsOnWriteData)
-
- TString clockSkew;
- i64 x = GetClockSkew();
- if (x < 0) {
- clockSkew = Sprintf("-%s", TDuration::MicroSeconds(-x).ToString().data());
- } else {
- clockSkew = Sprintf("+%s", TDuration::MicroSeconds(x).ToString().data());
- }
-
- MON_VAR(now - LastPingTimestamp)
- MON_VAR(GetPingRTT())
- MON_VAR(clockSkew)
-
- MON_VAR(GetDeadPeerTimeout())
- MON_VAR(GetTotalInflightAmountOfData())
- MON_VAR(GetCloseOnIdleTimeout())
- MON_VAR(Subscribers.size())
- }
- }
- }
- }
- }
-
- auto h = std::make_unique<IEventHandle>(ev->Recipient, ev->Sender, new NMon::TEvHttpInfoRes(str.Str()));
- if (ReceiverId) {
- h->Rewrite(h->Type, ReceiverId);
- }
- TActivationContext::Send(h.release());
- }
-
- void CreateSessionKillingActor(TInterconnectProxyCommon::TPtr common) {
- TlsActivationContext->ExecutorThread.ActorSystem->Register(new TInterconnectSessionKiller(common));
- }
-}
diff --git a/library/cpp/actors/interconnect/interconnect_tcp_session.h b/library/cpp/actors/interconnect/interconnect_tcp_session.h
deleted file mode 100644
index 64519b2667..0000000000
--- a/library/cpp/actors/interconnect/interconnect_tcp_session.h
+++ /dev/null
@@ -1,692 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/util/rope.h>
-#include <library/cpp/actors/util/funnel_queue.h>
-#include <library/cpp/actors/util/recentwnd.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-
-#define XXH_INLINE_ALL
-#include <contrib/libs/xxhash/xxhash.h>
-
-#include <util/generic/queue.h>
-#include <util/generic/deque.h>
-#include <util/datetime/cputimer.h>
-
-#include "interconnect_impl.h"
-#include "poller_tcp.h"
-#include "poller_actor.h"
-#include "interconnect_channel.h"
-#include "logging.h"
-#include "watchdog_timer.h"
-#include "event_holder_pool.h"
-#include "channel_scheduler.h"
-#include "outgoing_stream.h"
-
-#include <unordered_set>
-#include <unordered_map>
-
-namespace NActors {
- class TSlowPathChecker {
- using TTraceCallback = std::function<void(double)>;
- TTraceCallback Callback;
- const NHPTimer::STime Start;
-
- public:
- TSlowPathChecker(TTraceCallback&& callback)
- : Callback(std::move(callback))
- , Start(GetCycleCountFast())
- {
- }
-
- ~TSlowPathChecker() {
- const NHPTimer::STime end = GetCycleCountFast();
- const NHPTimer::STime elapsed = end - Start;
- if (elapsed > 1000000) {
- Callback(NHPTimer::GetSeconds(elapsed) * 1000);
- }
- }
-
- operator bool() const {
- return false;
- }
- };
-
-#define LWPROBE_IF_TOO_LONG(...) \
- if (auto __x = TSlowPathChecker{[&](double ms) { LWPROBE(__VA_ARGS__); }}) \
- ; \
- else
-
- class TTimeLimit {
- public:
- TTimeLimit(ui64 limitInCycles)
- : UpperLimit(limitInCycles == 0 ? 0 : GetCycleCountFast() + limitInCycles)
- {
- }
-
- TTimeLimit(ui64 startTS, ui64 limitInCycles)
- : UpperLimit(limitInCycles == 0 ? 0 : startTS + limitInCycles)
- {
- }
-
- bool CheckExceeded() {
- return UpperLimit != 0 && GetCycleCountFast() > UpperLimit;
- }
-
- const ui64 UpperLimit;
- };
-
- static constexpr TDuration DEFAULT_DEADPEER_TIMEOUT = TDuration::Seconds(10);
- static constexpr TDuration DEFAULT_LOST_CONNECTION_TIMEOUT = TDuration::Seconds(10);
- static constexpr ui32 DEFAULT_MAX_INFLIGHT_DATA = 10240 * 1024;
- static constexpr ui32 DEFAULT_TOTAL_INFLIGHT_DATA = 4 * 10240 * 1024;
-
- class TInterconnectProxyTCP;
-
- enum class EUpdateState : ui8 {
- NONE, // no updates generated by input session yet
- INFLIGHT, // one update is inflight, and no more pending
- INFLIGHT_AND_PENDING, // one update is inflight, and one is pending
- CONFIRMING, // confirmation inflight
- };
-
- struct TReceiveContext: public TAtomicRefCount<TReceiveContext> {
- /* All invokations to these fields should be thread-safe */
-
- ui64 ControlPacketSendTimer = 0;
- ui64 ControlPacketId = 0;
-
- // last processed packet by input session
- std::atomic_uint64_t LastPacketSerialToConfirm = 0;
- static constexpr uint64_t LastPacketSerialToConfirmLockBit = uint64_t(1) << 63;
-
- // for hardened checks
- TAtomic NumInputSessions = 0;
-
- NHPTimer::STime StartTime;
-
- std::atomic<ui64> PingRTT_us = 0;
- std::atomic<i64> ClockSkew_us = 0;
-
- std::atomic<EUpdateState> UpdateState;
- static_assert(std::atomic<EUpdateState>::is_always_lock_free);
-
- bool MainWriteBlocked = false;
- bool XdcWriteBlocked = false;
- bool MainReadPending = false;
- bool XdcReadPending = false;
-
- struct TPerChannelContext {
- struct TPendingEvent {
- TEventSerializationInfo SerializationInfo;
- TRope InternalPayload;
- TRope ExternalPayload;
- std::optional<TEventData> EventData;
-
- // number of bytes remaining through XDC channel
- size_t XdcSizeLeft = 0;
- };
-
- std::deque<TPendingEvent> PendingEvents;
- std::deque<TMutableContiguousSpan> XdcBuffers; // receive queue for current channel
- size_t FetchIndex = 0;
- size_t FetchOffset = 0;
-
- ui64 XdcCatchBytesRead = 0; // number of bytes actually read into cyclic buffer
- TRcBuf XdcCatchBuffer;
-
- void PrepareCatchBuffer();
- void ApplyCatchBuffer();
- void FetchBuffers(ui16 channel, size_t numBytes, std::deque<std::tuple<ui16, TMutableContiguousSpan>>& outQ);
- void DropFront(TRope *from, size_t numBytes);
- };
-
- std::array<TPerChannelContext, 16> ChannelArray;
- std::unordered_map<ui16, TPerChannelContext> ChannelMap;
- ui64 LastProcessedSerial = 0;
-
- TReceiveContext() {
- GetTimeFast(&StartTime);
- }
-
- // returns false if sessions needs to be terminated
- bool AdvanceLastPacketSerialToConfirm(ui64 nextValue) {
- for (;;) {
- uint64_t value = LastPacketSerialToConfirm.load();
- if (value & LastPacketSerialToConfirmLockBit) {
- return false;
- }
- Y_DEBUG_ABORT_UNLESS(value + 1 == nextValue);
- if (LastPacketSerialToConfirm.compare_exchange_weak(value, nextValue)) {
- return true;
- }
- }
- }
-
- ui64 LockLastPacketSerialToConfirm() {
- for (;;) {
- uint64_t value = LastPacketSerialToConfirm.load();
- if (value & LastPacketSerialToConfirmLockBit) {
- return value & ~LastPacketSerialToConfirmLockBit;
- }
- if (LastPacketSerialToConfirm.compare_exchange_strong(value, value | LastPacketSerialToConfirmLockBit)) {
- return value;
- }
- }
- }
-
- void UnlockLastPacketSerialToConfirm() {
- LastPacketSerialToConfirm &= ~LastPacketSerialToConfirmLockBit;
- }
-
- ui64 GetLastPacketSerialToConfirm() {
- return LastPacketSerialToConfirm.load() & ~LastPacketSerialToConfirmLockBit;
- }
- };
-
- class TInputSessionTCP
- : public TActorBootstrapped<TInputSessionTCP>
- , public TInterconnectLoggingBase
- {
- enum {
- EvCheckDeadPeer = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvResumeReceiveData,
- };
-
- struct TEvCheckDeadPeer : TEventLocal<TEvCheckDeadPeer, EvCheckDeadPeer> {};
-
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::INTERCONNECT_SESSION_TCP;
- }
-
- TInputSessionTCP(const TActorId& sessionId,
- TIntrusivePtr<NInterconnect::TStreamSocket> socket,
- TIntrusivePtr<NInterconnect::TStreamSocket> xdcSocket,
- TIntrusivePtr<TReceiveContext> context,
- TInterconnectProxyCommon::TPtr common,
- std::shared_ptr<IInterconnectMetrics> metrics,
- ui32 nodeId,
- ui64 lastConfirmed,
- TDuration deadPeerTimeout,
- TSessionParams params);
-
- private:
- friend class TActorBootstrapped<TInputSessionTCP>;
-
- void Bootstrap();
-
- struct TExReestablishConnection {
- TDisconnectReason Reason;
- };
-
- struct TExDestroySession {
- TDisconnectReason Reason;
- };
-
- STATEFN(WorkingState);
-
- STRICT_STFUNC(WorkingStateImpl,
- cFunc(TEvents::TSystem::PoisonPill, PassAway)
- hFunc(TEvPollerReady, Handle)
- hFunc(TEvPollerRegisterResult, Handle)
- cFunc(EvResumeReceiveData, ReceiveData)
- cFunc(TEvInterconnect::TEvCloseInputSession::EventType, CloseInputSession)
- cFunc(EvCheckDeadPeer, HandleCheckDeadPeer)
- cFunc(TEvConfirmUpdate::EventType, HandleConfirmUpdate)
- hFunc(NMon::TEvHttpInfoRes, GenerateHttpInfo)
- )
-
- private:
- TRope IncomingData;
-
- const TActorId SessionId;
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
- TIntrusivePtr<NInterconnect::TStreamSocket> XdcSocket;
- TPollerToken::TPtr PollerToken;
- TPollerToken::TPtr XdcPollerToken;
- TIntrusivePtr<TReceiveContext> Context;
- TInterconnectProxyCommon::TPtr Common;
- const ui32 NodeId;
- const TSessionParams Params;
- XXH3_state_t XxhashState;
- XXH3_state_t XxhashXdcState;
-
- size_t PayloadSize;
- ui32 ChecksumExpected, Checksum;
- bool IgnorePayload;
- TRope Payload;
- enum class EState {
- HEADER,
- PAYLOAD,
- };
- EState State = EState::HEADER;
- ui64 CurrentSerial = 0;
-
- std::vector<char> XdcCommands;
-
- struct TInboundPacket {
- ui64 Serial;
- size_t XdcUnreadBytes; // number of unread bytes from XDC stream for this exact unprocessed packet
- };
- std::deque<TInboundPacket> InboundPacketQ;
- std::deque<std::tuple<ui16, TMutableContiguousSpan>> XdcInputQ; // target buffers for the XDC stream with channel reference
- std::deque<std::tuple<ui16, ui32>> XdcChecksumQ; // (size, expectedChecksum)
- ui32 XdcCurrentChecksum = 0;
-
- // catch stream -- used after TCP reconnect to match XDC stream with main packet stream
- struct TXdcCatchStream {
- TRcBuf Buffer;
- ui64 BytesPending = 0;
- ui64 BytesProcessed = 0;
- std::deque<std::tuple<ui16, bool, size_t>> Markup; // a queue of tuples (channel, apply, bytes)
- bool Ready = false;
- bool Applied = false;
- };
- TXdcCatchStream XdcCatchStream;
-
- THolder<TEvUpdateFromInputSession> UpdateFromInputSession;
-
- ui64 ConfirmedByInput;
-
- std::shared_ptr<IInterconnectMetrics> Metrics;
- std::array<ui32, 16> InputTrafficArray;
- THashMap<ui16, ui32> InputTrafficMap;
-
- bool CloseInputSessionRequested = false;
-
- void CloseInputSession();
-
- void Handle(TEvPollerReady::TPtr ev);
- void Handle(TEvPollerRegisterResult::TPtr ev);
- void HandleConfirmUpdate();
- void ReceiveData();
- void ProcessHeader();
- void ProcessPayload(ui64 *numDataBytes);
- void ProcessInboundPacketQ(ui64 numXdcBytesRead);
- void ProcessXdcCommand(ui16 channel, TReceiveContext::TPerChannelContext& context);
- void ProcessEvents(TReceiveContext::TPerChannelContext& context);
- ssize_t Read(NInterconnect::TStreamSocket& socket, const TPollerToken::TPtr& token, bool *readPending,
- const TIoVec *iov, size_t num);
- bool ReadMore();
- bool ReadXdcCatchStream(ui64 *numDataBytes);
- void ApplyXdcCatchStream();
- bool ReadXdc(ui64 *numDataBytes);
- void HandleXdcChecksum(TContiguousSpan span);
-
- TReceiveContext::TPerChannelContext& GetPerChannelContext(ui16 channel) const;
-
- void PassAway() override;
-
- TDeque<TRcBuf> Buffers;
-
- size_t CurrentBuffers = 1; // number of buffers currently required to allocate
- static constexpr size_t MaxBuffers = 72; // maximum buffers possible
- static constexpr int BitsPerUsageCount = 5;
- static constexpr size_t ItemsPerUsageCount = sizeof(ui64) * CHAR_BIT / BitsPerUsageCount;
- std::array<ui64, (MaxBuffers + ItemsPerUsageCount - 1) / ItemsPerUsageCount> UsageHisto; // read count histogram
-
- void PreallocateBuffers();
-
- inline ui64 GetMaxCyclesPerEvent() const {
- return DurationToCycles(TDuration::MicroSeconds(500));
- }
-
- const TDuration DeadPeerTimeout;
- TMonotonic LastReceiveTimestamp;
- void HandleCheckDeadPeer();
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // pinger logic
-
- bool NewPingProtocol = false;
- TDeque<TDuration> PingQ; // last N ping samples
- TDeque<i64> SkewQ; // last N calculated clock skew samples
-
- void HandlePingResponse(TDuration passed);
- void HandleClock(TInstant clock);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Stats
-
- ui64 BytesReadFromSocket = 0;
- ui64 PacketsReadFromSocket = 0;
- ui64 DataPacketsReadFromSocket = 0;
- ui64 IgnoredDataPacketsFromSocket = 0;
-
- ui64 BytesReadFromXdcSocket = 0;
- ui64 XdcSections = 0;
- ui64 XdcRefs = 0;
-
- ui64 CpuStarvationEvents = 0;
-
- void GenerateHttpInfo(NMon::TEvHttpInfoRes::TPtr ev);
- };
-
- class TInterconnectSessionTCP
- : public TActor<TInterconnectSessionTCP>
- , public TInterconnectLoggingBase
- {
- enum {
- EvCheckCloseOnIdle = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvCheckLostConnection,
- EvRam,
- EvTerminate,
- EvFreeItems,
- };
-
- struct TEvCheckCloseOnIdle : TEventLocal<TEvCheckCloseOnIdle, EvCheckCloseOnIdle> {};
- struct TEvCheckLostConnection : TEventLocal<TEvCheckLostConnection, EvCheckLostConnection> {};
-
- struct TEvRam : TEventLocal<TEvRam, EvRam> {
- const bool Batching;
- TEvRam(bool batching) : Batching(batching) {}
- };
-
- struct TEvTerminate : TEventLocal<TEvTerminate, EvTerminate> {
- TDisconnectReason Reason;
-
- TEvTerminate(TDisconnectReason reason)
- : Reason(std::move(reason))
- {}
- };
-
- const TInstant Created;
- TInstant NewConnectionSet;
- ui64 MessagesGot = 0;
- ui64 MessagesWrittenToBuffer = 0;
- ui64 PacketsGenerated = 0;
- ui64 BytesWrittenToSocket = 0;
- ui64 PacketsConfirmed = 0;
- ui64 BytesAlignedForOutOfBand = 0;
- ui64 OutOfBandBytesSent = 0;
- ui64 CpuStarvationEvents = 0;
- ui64 CpuStarvationEventsOnWriteData = 0;
-
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::INTERCONNECT_SESSION_TCP;
- }
-
- TInterconnectSessionTCP(TInterconnectProxyTCP* const proxy, TSessionParams params);
- ~TInterconnectSessionTCP();
-
- void Init();
- void CloseInputSession();
-
- static TEvTerminate* NewEvTerminate(TDisconnectReason reason) {
- return new TEvTerminate(std::move(reason));
- }
-
- TDuration GetPingRTT() const {
- return TDuration::MicroSeconds(ReceiveContext->PingRTT_us);
- }
-
- i64 GetClockSkew() const {
- return ReceiveContext->ClockSkew_us;
- }
-
- private:
- friend class TInterconnectProxyTCP;
-
- void Handle(TEvTerminate::TPtr& ev);
- void HandlePoison();
- void Terminate(TDisconnectReason reason);
- void PassAway() override;
-
- void Forward(STATEFN_SIG);
- void Subscribe(STATEFN_SIG);
- void Unsubscribe(STATEFN_SIG);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- std::optional<TTimeLimit> TimeLimit;
-
- STATEFN(StateFunc) {
- TimeLimit.emplace(GetMaxCyclesPerEvent());
- STRICT_STFUNC_BODY(
- fFunc(TEvInterconnect::EvForward, Forward)
- cFunc(TEvents::TEvPoisonPill::EventType, HandlePoison)
- fFunc(TEvInterconnect::TEvConnectNode::EventType, Subscribe)
- fFunc(TEvents::TEvSubscribe::EventType, Subscribe)
- fFunc(TEvents::TEvUnsubscribe::EventType, Unsubscribe)
- cFunc(TEvFlush::EventType, HandleFlush)
- hFunc(TEvPollerReady, Handle)
- hFunc(TEvPollerRegisterResult, Handle)
- hFunc(TEvUpdateFromInputSession, Handle)
- hFunc(TEvRam, HandleRam)
- hFunc(TEvCheckCloseOnIdle, CloseOnIdleWatchdog)
- hFunc(TEvCheckLostConnection, LostConnectionWatchdog)
- cFunc(TEvents::TSystem::Wakeup, SendUpdateToWhiteboard)
- hFunc(TEvSocketDisconnect, OnDisconnect)
- hFunc(TEvTerminate, Handle)
- hFunc(TEvProcessPingRequest, Handle)
- )
- }
-
- void Handle(TEvUpdateFromInputSession::TPtr& ev);
-
- void OnDisconnect(TEvSocketDisconnect::TPtr& ev);
-
- THolder<TEvHandshakeAck> ProcessHandshakeRequest(TEvHandshakeAsk::TPtr& ev);
- void SetNewConnection(TEvHandshakeDone::TPtr& ev);
-
- TEvRam* RamInQueue = nullptr;
- ui64 RamStartedCycles = 0;
- void IssueRam(bool batching);
- void HandleRam(TEvRam::TPtr& ev);
- void GenerateTraffic();
- void ProducePackets();
-
- size_t GetUnsentSize() const {
- return OutgoingStream.CalculateUnsentSize() + OutOfBandStream.CalculateUnsentSize() +
- XdcStream.CalculateUnsentSize();
- }
-
- size_t GetUnsentLimit() const {
- return 128 * 1024;
- }
-
- void SendUpdateToWhiteboard(bool connected = true);
- ui32 CalculateQueueUtilization();
-
- void Handle(TEvPollerReady::TPtr& ev);
- void Handle(TEvPollerRegisterResult::TPtr ev);
- void WriteData();
- ssize_t Write(NInterconnect::TOutgoingStream& stream, NInterconnect::TStreamSocket& socket, size_t maxBytes);
-
- ui32 MakePacket(bool data, TMaybe<ui64> pingMask = {});
- void FillSendingBuffer(TTcpPacketOutTask& packet, ui64 serial);
- void DropConfirmed(ui64 confirm);
- void ShutdownSocket(TDisconnectReason reason);
-
- void StartHandshake();
- void ReestablishConnection(TEvHandshakeDone::TPtr&& ev, bool startHandshakeOnSessionClose,
- TDisconnectReason reason);
- void ReestablishConnectionWithHandshake(TDisconnectReason reason);
- void ReestablishConnectionExecute();
-
- TInterconnectProxyTCP* const Proxy;
-
- // various connection settings access
- TDuration GetDeadPeerTimeout() const;
- TDuration GetCloseOnIdleTimeout() const;
- TDuration GetLostConnectionTimeout() const;
- ui32 GetTotalInflightAmountOfData() const;
- ui64 GetMaxCyclesPerEvent() const;
-
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // pinger
-
- TMonotonic LastPingTimestamp;
- static constexpr TDuration PingPeriodicity = TDuration::Seconds(1);
- void IssuePingRequest();
- void Handle(TEvProcessPingRequest::TPtr ev);
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- TMonotonic LastInputActivityTimestamp;
- TMonotonic LastPayloadActivityTimestamp;
- TWatchdogTimer<TEvCheckCloseOnIdle> CloseOnIdleWatchdog;
- TWatchdogTimer<TEvCheckLostConnection> LostConnectionWatchdog;
-
- void OnCloseOnIdleTimerHit() {
- LOG_INFO_IC("ICS27", "CloseOnIdle timer hit, session terminated");
- Terminate(TDisconnectReason::CloseOnIdle());
- }
-
- void OnLostConnectionTimerHit() {
- LOG_ERROR_IC("ICS28", "LostConnection timer hit, session terminated");
- Terminate(TDisconnectReason::LostConnection());
- }
-
- void RearmCloseOnIdle() {
- if (!NumEventsInQueue && OutputCounter == LastConfirmed) {
- CloseOnIdleWatchdog.Rearm(SelfId());
- } else {
- CloseOnIdleWatchdog.Disarm();
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- const TSessionParams Params;
- TMaybe<TEventHolderPool> Pool;
- TMaybe<TChannelScheduler> ChannelScheduler;
- ui64 TotalOutputQueueSize;
- bool OutputStuckFlag;
- TRecentWnd<std::pair<ui64, ui64>> OutputQueueUtilization;
- size_t NumEventsInQueue = 0;
-
- void SetOutputStuckFlag(bool state);
- void SwitchStuckPeriod();
-
- NInterconnect::TOutgoingStream OutgoingStream;
- NInterconnect::TOutgoingStream OutOfBandStream;
- NInterconnect::TOutgoingStream XdcStream;
-
- struct TOutgoingPacket {
- ui32 PacketSize; // including header
- ui32 ExternalSize;
- };
- std::deque<TOutgoingPacket> SendQueue; // packet boundaries
- size_t OutgoingOffset = 0;
- size_t XdcOffset = 0;
- size_t OutgoingIndex = 0; // index into current packet in SendQueue
- size_t ForcedWriteLength = 0;
-
- ui64 XdcBytesSent = 0;
-
- ui64 WriteBlockedCycles = 0; // start of current block period
- TDuration WriteBlockedTotal; // total incremental duration that session has been blocked
- bool WriteBlockedByFullSendBuffer = false;
-
- TDuration GetWriteBlockedTotal() const {
- return WriteBlockedTotal + (WriteBlockedByFullSendBuffer
- ? TDuration::Seconds(NHPTimer::GetSeconds(GetCycleCountFast() - WriteBlockedCycles))
- : TDuration::Zero());
- }
-
- ui64 OutputCounter;
-
- TInstant LastHandshakeDone;
-
- TIntrusivePtr<NInterconnect::TStreamSocket> Socket;
- TIntrusivePtr<NInterconnect::TStreamSocket> XdcSocket;
- TPollerToken::TPtr PollerToken;
- TPollerToken::TPtr XdcPollerToken;
- ui32 SendBufferSize;
- ui64 InflightDataAmount = 0;
-
- std::unordered_map<TActorId, ui64, TActorId::THash> Subscribers;
-
- // time at which we want to send confirmation packet even if there was no outgoing data
- ui64 UnconfirmedBytes = 0;
- TMonotonic ForcePacketTimestamp = TMonotonic::Max();
- TPriorityQueue<TMonotonic, TVector<TMonotonic>, std::greater<TMonotonic>> FlushSchedule;
- size_t MaxFlushSchedule = 0;
- ui64 FlushEventsScheduled = 0;
- ui64 FlushEventsProcessed = 0;
-
- void SetForcePacketTimestamp(TDuration period);
- void ScheduleFlush();
- void HandleFlush();
- void ResetFlushLogic();
-
- void GenerateHttpInfo(NMon::TEvHttpInfoRes::TPtr& ev);
-
- TIntrusivePtr<TReceiveContext> ReceiveContext;
- TActorId ReceiverId;
- TDuration Ping;
-
- ui64 ConfirmPacketsForcedBySize = 0;
- ui64 ConfirmPacketsForcedByTimeout = 0;
-
- ui64 LastConfirmed = 0;
-
- TEvHandshakeDone::TPtr PendingHandshakeDoneEvent;
- bool StartHandshakeOnSessionClose = false;
-
- ui64 EqualizeCounter = 0;
- };
-
- class TInterconnectSessionKiller
- : public TActorBootstrapped<TInterconnectSessionKiller> {
- ui32 RepliesReceived = 0;
- ui32 RepliesNumber = 0;
- TActorId LargestSession = TActorId();
- ui64 MaxBufferSize = 0;
- TInterconnectProxyCommon::TPtr Common;
-
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::INTERCONNECT_SESSION_KILLER;
- }
-
- TInterconnectSessionKiller(TInterconnectProxyCommon::TPtr common)
- : Common(common)
- {
- }
-
- void Bootstrap() {
- auto sender = SelfId();
- const auto eventFabric = [&sender](const TActorId& recp) -> IEventHandle* {
- auto ev = new TEvSessionBufferSizeRequest();
- return new IEventHandle(recp, sender, ev, IEventHandle::FlagTrackDelivery);
- };
- RepliesNumber = TlsActivationContext->ExecutorThread.ActorSystem->BroadcastToProxies(eventFabric);
- Become(&TInterconnectSessionKiller::StateFunc);
- }
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvSessionBufferSizeResponse, ProcessResponse)
- cFunc(TEvents::TEvUndelivered::EventType, ProcessUndelivered)
- )
-
- void ProcessResponse(TEvSessionBufferSizeResponse::TPtr& ev) {
- RepliesReceived++;
- if (MaxBufferSize < ev->Get()->BufferSize) {
- MaxBufferSize = ev->Get()->BufferSize;
- LargestSession = ev->Get()->SessionID;
- }
- if (RepliesReceived == RepliesNumber) {
- Send(LargestSession, new TEvents::TEvPoisonPill);
- AtomicUnlock(&Common->StartedSessionKiller);
- PassAway();
- }
- }
-
- void ProcessUndelivered() {
- RepliesReceived++;
- }
- };
-
- void CreateSessionKillingActor(TInterconnectProxyCommon::TPtr common);
-
-}
diff --git a/library/cpp/actors/interconnect/load.cpp b/library/cpp/actors/interconnect/load.cpp
deleted file mode 100644
index 20ca0ef8a9..0000000000
--- a/library/cpp/actors/interconnect/load.cpp
+++ /dev/null
@@ -1,405 +0,0 @@
-#include "load.h"
-#include "interconnect_common.h"
-#include "events_local.h"
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <util/generic/queue.h>
-
-namespace NInterconnect {
- using namespace NActors;
-
- enum {
- EvGenerateMessages = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvPublishResults,
- EvQueryTrafficCounter,
- EvTrafficCounter,
- };
-
- struct TEvQueryTrafficCounter : TEventLocal<TEvQueryTrafficCounter, EvQueryTrafficCounter> {};
-
- struct TEvTrafficCounter : TEventLocal<TEvTrafficCounter, EvTrafficCounter> {
- std::shared_ptr<std::atomic_uint64_t> Traffic;
-
- TEvTrafficCounter(std::shared_ptr<std::atomic_uint64_t> traffic)
- : Traffic(std::move(traffic))
- {}
- };
-
- class TLoadResponderActor : public TActor<TLoadResponderActor> {
- STRICT_STFUNC(StateFunc,
- HFunc(TEvLoadMessage, Handle);
- CFunc(TEvents::TSystem::PoisonPill, Die);
- )
-
- void Handle(TEvLoadMessage::TPtr& ev, const TActorContext& ctx) {
- ui64 bytes = ev->Get()->CalculateSerializedSizeCached();
- auto& record = ev->Get()->Record;
- auto *hops = record.MutableHops();
- while (!hops->empty() && !hops->begin()->HasNextHop()) {
- record.ClearPayload();
- ev->Get()->StripPayload();
- hops->erase(hops->begin());
- }
- if (!hops->empty()) {
- // extract actor id of the next hop
- const TActorId nextHopActorId = ActorIdFromProto(hops->begin()->GetNextHop());
- hops->erase(hops->begin());
-
- // forward message to next hop; preserve flags and cookie
- auto msg = MakeHolder<TEvLoadMessage>();
- record.Swap(&msg->Record);
- bytes += msg->CalculateSerializedSizeCached();
- ctx.Send(nextHopActorId, msg.Release(), ev->Flags, ev->Cookie);
- }
- *Traffic += bytes;
- }
-
- public:
- TLoadResponderActor(std::shared_ptr<std::atomic_uint64_t> traffic)
- : TActor(&TLoadResponderActor::StateFunc)
- , Traffic(std::move(traffic))
- {}
-
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::INTERCONNECT_LOAD_RESPONDER;
- }
-
- private:
- std::shared_ptr<std::atomic_uint64_t> Traffic;
- };
-
- class TLoadResponderMasterActor : public TActorBootstrapped<TLoadResponderMasterActor> {
- TVector<TActorId> Slaves;
- ui32 SlaveIndex = 0;
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvLoadMessage, Handle);
- HFunc(TEvQueryTrafficCounter, Handle);
- CFunc(TEvents::TSystem::PoisonPill, Die);
- )
-
- void Handle(TEvLoadMessage::TPtr& ev, const TActorContext& ctx) {
- ctx.ExecutorThread.ActorSystem->Send(ev->Forward(Slaves[SlaveIndex]));
- if (++SlaveIndex == Slaves.size()) {
- SlaveIndex = 0;
- }
- }
-
- void Handle(TEvQueryTrafficCounter::TPtr ev, const TActorContext& ctx) {
- ctx.Send(ev->Sender, new TEvTrafficCounter(Traffic));
- }
-
- void Die(const TActorContext& ctx) override {
- for (const TActorId& actorId : Slaves) {
- ctx.Send(actorId, new TEvents::TEvPoisonPill);
- }
- TActorBootstrapped::Die(ctx);
- }
-
- public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::INTERCONNECT_LOAD_RESPONDER;
- }
-
- TLoadResponderMasterActor()
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TLoadResponderMasterActor::StateFunc);
- while (Slaves.size() < 10) {
- Slaves.push_back(ctx.Register(new TLoadResponderActor(Traffic)));
- }
- }
-
- private:
- std::shared_ptr<std::atomic_uint64_t> Traffic = std::make_shared<std::atomic_uint64_t>();
- };
-
- IActor* CreateLoadResponderActor() {
- return new TLoadResponderMasterActor();
- }
-
- TActorId MakeLoadResponderActorId(ui32 nodeId) {
- char x[12] = {'I', 'C', 'L', 'o', 'a', 'd', 'R', 'e', 's', 'p', 'A', 'c'};
- return TActorId(nodeId, TStringBuf(x, 12));
- }
-
- class TLoadActor: public TActorBootstrapped<TLoadActor> {
- struct TEvGenerateMessages : TEventLocal<TEvGenerateMessages, EvGenerateMessages> {};
- struct TEvPublishResults : TEventLocal<TEvPublishResults, EvPublishResults> {};
-
- struct TMessageInfo {
- TInstant SendTimestamp;
-
- TMessageInfo(const TInstant& sendTimestamp)
- : SendTimestamp(sendTimestamp)
- {
- }
- };
-
- const TLoadParams Params;
- TInstant NextMessageTimestamp;
- THashMap<TString, TMessageInfo> InFly;
- ui64 NextId = 1;
- TVector<TActorId> Hops;
- TActorId FirstHop;
- ui64 NumDropped = 0;
- std::shared_ptr<std::atomic_uint64_t> Traffic;
-
- public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::INTERCONNECT_LOAD_ACTOR;
- }
-
- TLoadActor(const TLoadParams& params)
- : Params(params)
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TLoadActor::QueryTrafficCounter);
- ctx.Send(MakeLoadResponderActorId(SelfId().NodeId()), new TEvQueryTrafficCounter);
- }
-
- void Handle(TEvTrafficCounter::TPtr ev, const TActorContext& ctx) {
- Traffic = std::move(ev->Get()->Traffic);
-
- for (const ui32 nodeId : Params.NodeHops) {
- const TActorId& actorId = nodeId ? MakeLoadResponderActorId(nodeId) : TActorId();
- if (!FirstHop) {
- FirstHop = actorId;
- } else {
- Hops.push_back(actorId);
- }
- }
-
- Hops.push_back(ctx.SelfID);
-
- Become(&TLoadActor::StateFunc);
- NextMessageTimestamp = ctx.Now();
- ResetThroughput(NextMessageTimestamp, *Traffic);
- GenerateMessages(ctx);
- ctx.Schedule(Params.Duration, new TEvents::TEvPoisonPill);
- SchedulePublishResults(ctx);
- }
-
- void GenerateMessages(const TActorContext& ctx) {
- while (InFly.size() < Params.InFlyMax && ctx.Now() >= NextMessageTimestamp) {
- // generate payload
- const ui32 size = Params.SizeMin + RandomNumber(Params.SizeMax - Params.SizeMin + 1);
-
- // generate message id
- const ui64 cookie = NextId++;
- TString id = Sprintf("%" PRIu64, cookie);
-
- // create message and send it to the first hop
- THolder<TEvLoadMessage> ev;
- if (Params.UseProtobufWithPayload && size) {
- auto buffer = TRopeAlignedBuffer::Allocate(size);
- memset(buffer->GetBuffer(), '*', size);
- ev.Reset(new TEvLoadMessage(Hops, id, TRope(buffer)));
- } else {
- TString payload;
- if (size) {
- payload = TString::Uninitialized(size);
- memset(payload.Detach(), '*', size);
- }
- ev.Reset(new TEvLoadMessage(Hops, id, payload ? &payload : nullptr));
- }
- UpdateThroughput(ev->CalculateSerializedSizeCached());
- ctx.Send(FirstHop, ev.Release(), IEventHandle::MakeFlags(Params.Channel, 0), cookie);
-
- // register in the map
- InFly.emplace(id, TMessageInfo(ctx.Now()));
-
- // put item into timeout queue
- PutTimeoutQueueItem(ctx, id);
-
- const TDuration duration = TDuration::MicroSeconds(Params.IntervalMin.GetValue() +
- RandomNumber(Params.IntervalMax.GetValue() - Params.IntervalMin.GetValue() + 1));
- if (Params.SoftLoad) {
- NextMessageTimestamp += duration;
- } else {
- NextMessageTimestamp = ctx.Now() + duration;
- }
- }
-
- // schedule next generate messages call
- if (NextMessageTimestamp > ctx.Now() && InFly.size() < Params.InFlyMax) {
- ctx.Schedule(NextMessageTimestamp - ctx.Now(), new TEvGenerateMessages);
- }
- }
-
- void Handle(TEvLoadMessage::TPtr& ev, const TActorContext& ctx) {
- const auto& record = ev->Get()->Record;
- auto it = InFly.find(record.GetId());
- if (it != InFly.end()) {
- // record message rtt
- const TDuration rtt = ctx.Now() - it->second.SendTimestamp;
- UpdateHistogram(ctx.Now(), rtt);
-
- // update throughput
- UpdateThroughput(ev->Get()->CalculateSerializedSizeCached());
-
- // remove message from the in fly map
- InFly.erase(it);
- } else {
- ++NumDropped;
- }
- GenerateMessages(ctx);
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // RTT HISTOGRAM
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- const TDuration AggregationPeriod = TDuration::Seconds(20);
- TDeque<std::pair<TInstant, TDuration>> Histogram;
-
- void UpdateHistogram(TInstant when, TDuration rtt) {
- Histogram.emplace_back(when, rtt);
-
- const TInstant barrier = when - AggregationPeriod;
- while (Histogram && Histogram.front().first < barrier) {
- Histogram.pop_front();
- }
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // THROUGHPUT
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- TInstant ThroughputFirstSample = TInstant::Zero();
- ui64 ThroughputSamples = 0;
- ui64 ThroughputBytes = 0;
- ui64 TrafficAtBegin = 0;
-
- void UpdateThroughput(ui64 bytes) {
- ThroughputBytes += bytes;
- ++ThroughputSamples;
- }
-
- void ResetThroughput(TInstant when, ui64 traffic) {
- ThroughputFirstSample = when;
- ThroughputSamples = 0;
- ThroughputBytes = 0;
- TrafficAtBegin = traffic;
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // TIMEOUT QUEUE OPERATIONS
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- TQueue<std::pair<TInstant, TString>> TimeoutQueue;
-
- void PutTimeoutQueueItem(const TActorContext& ctx, TString id) {
- TimeoutQueue.emplace(ctx.Now() + TDuration::Minutes(1), std::move(id));
- if (TimeoutQueue.size() == 1) {
- ScheduleWakeup(ctx);
- }
- }
-
- void ScheduleWakeup(const TActorContext& ctx) {
- ctx.Schedule(TimeoutQueue.front().first - ctx.Now(), new TEvents::TEvWakeup);
- }
-
- void HandleWakeup(const TActorContext& ctx) {
- // ui32 numDropped = 0;
-
- while (TimeoutQueue && TimeoutQueue.front().first <= ctx.Now()) {
- /*numDropped += */InFly.erase(TimeoutQueue.front().second);
- TimeoutQueue.pop();
- }
- if (TimeoutQueue) {
- // we still have some elements in timeout queue, so schedule next wake up to tidy up
- ScheduleWakeup(ctx);
- }
-
- GenerateMessages(ctx);
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // RESULT PUBLISHING
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- const TDuration ResultPublishPeriod = TDuration::Seconds(15);
-
- void SchedulePublishResults(const TActorContext& ctx) {
- ctx.Schedule(ResultPublishPeriod, new TEvPublishResults);
- }
-
- void PublishResults(const TActorContext& ctx, bool schedule = true) {
- const TInstant now = ctx.Now();
-
- TStringStream msg;
-
- msg << "Load# '" << Params.Name << "'";
-
- msg << " Throughput# ";
- const TDuration duration = now - ThroughputFirstSample;
- const ui64 traffic = *Traffic;
- msg << "{window# " << duration
- << " bytes# " << ThroughputBytes
- << " samples# " << ThroughputSamples
- << " b/s# " << ui64(ThroughputBytes * 1000000 / duration.MicroSeconds())
- << " common# " << ui64((traffic - TrafficAtBegin) * 1000000 / duration.MicroSeconds())
- << "}";
- ResetThroughput(now, traffic);
-
- msg << " RTT# ";
- if (Histogram) {
- const TDuration duration = Histogram.back().first - Histogram.front().first;
- msg << "{window# " << duration << " samples# " << Histogram.size();
- TVector<TDuration> v;
- v.reserve(Histogram.size());
- for (const auto& item : Histogram) {
- v.push_back(item.second);
- }
- std::sort(v.begin(), v.end());
- for (double q : {0.5, 0.9, 0.99, 0.999, 0.9999, 1.0}) {
- const size_t pos = q * (v.size() - 1);
- msg << Sprintf(" %.4f# %s", q, v[pos].ToString().data());
- }
- msg << "}";
- } else {
- msg << "<empty>";
- }
-
- msg << " NumDropped# " << NumDropped;
-
- if (!schedule) {
- msg << " final";
- }
-
- LOG_NOTICE(ctx, NActorsServices::INTERCONNECT_SPEED_TEST, "%s", msg.Str().data());
-
- if (schedule) {
- SchedulePublishResults(ctx);
- }
- }
-
- STRICT_STFUNC(QueryTrafficCounter,
- HFunc(TEvTrafficCounter, Handle);
- )
-
- STRICT_STFUNC(StateFunc,
- CFunc(TEvents::TSystem::PoisonPill, Die);
- CFunc(TEvents::TSystem::Wakeup, HandleWakeup);
- CFunc(EvPublishResults, PublishResults);
- CFunc(EvGenerateMessages, GenerateMessages);
- HFunc(TEvLoadMessage, Handle);
- )
-
- void Die(const TActorContext& ctx) override {
- PublishResults(ctx, false);
- TActorBootstrapped::Die(ctx);
- }
- };
-
- IActor* CreateLoadActor(const TLoadParams& params) {
- return new TLoadActor(params);
- }
-
-}
diff --git a/library/cpp/actors/interconnect/load.h b/library/cpp/actors/interconnect/load.h
deleted file mode 100644
index 0a01a0dc04..0000000000
--- a/library/cpp/actors/interconnect/load.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-
-namespace NInterconnect {
- // load responder -- lives on every node as a service actor
- NActors::IActor* CreateLoadResponderActor();
- NActors::TActorId MakeLoadResponderActorId(ui32 node);
-
- // load actor -- generates load with specific parameters
- struct TLoadParams {
- TString Name;
- ui32 Channel;
- TVector<ui32> NodeHops; // node ids for the message route
- ui32 SizeMin, SizeMax; // min and max size for payloads
- ui32 InFlyMax; // maximum number of in fly messages
- TDuration IntervalMin, IntervalMax; // min and max intervals between sending messages
- bool SoftLoad; // is the load soft?
- TDuration Duration; // test duration
- bool UseProtobufWithPayload; // store payload separately
- };
- NActors::IActor* CreateLoadActor(const TLoadParams& params);
-
-}
diff --git a/library/cpp/actors/interconnect/logging.h b/library/cpp/actors/interconnect/logging.h
deleted file mode 100644
index 010a4aa93b..0000000000
--- a/library/cpp/actors/interconnect/logging.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-
-#define LOG_LOG_IC_X(component, marker, priority, ...) \
- do { \
- LOG_LOG(this->GetActorContext(), (priority), (component), "%s " marker " %s", LogPrefix.data(), Sprintf(__VA_ARGS__).data()); \
- } while (false)
-
-#define LOG_LOG_NET_X(priority, NODE_ID, FMT, ...) \
- do { \
- const TActorContext& ctx = this->GetActorContext(); \
- LOG_LOG(ctx, (priority), ::NActorsServices::INTERCONNECT_NETWORK, "[%" PRIu32 " <-> %" PRIu32 "] %s", \
- ctx.SelfID.NodeId(), (NODE_ID), Sprintf(FMT, __VA_ARGS__).data()); \
- } while (false)
-
-#define LOG_LOG_IC(component, marker, priority, ...) \
- do { \
- LOG_LOG(::NActors::TActivationContext::AsActorContext(), (priority), (component), "%s " marker " %s", LogPrefix.data(), Sprintf(__VA_ARGS__).data()); \
- } while (false)
-
-#define LOG_LOG_NET(priority, NODE_ID, FMT, ...) \
- do { \
- const TActorContext& ctx = ::NActors::TActivationContext::AsActorContext(); \
- LOG_LOG(ctx, (priority), ::NActorsServices::INTERCONNECT_NETWORK, "[%" PRIu32 " <-> %" PRIu32 "] %s", \
- ctx.SelfID.NodeId(), (NODE_ID), Sprintf(FMT, __VA_ARGS__).data()); \
- } while (false)
-
-#define LOG_EMER_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_EMER, __VA_ARGS__)
-#define LOG_ALERT_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_ALERT, __VA_ARGS__)
-#define LOG_CRIT_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_CRIT, __VA_ARGS__)
-#define LOG_ERROR_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_ERROR, __VA_ARGS__)
-#define LOG_WARN_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_WARN, __VA_ARGS__)
-#define LOG_NOTICE_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_NOTICE, __VA_ARGS__)
-#define LOG_INFO_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_INFO, __VA_ARGS__)
-#define LOG_DEBUG_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_DEBUG, __VA_ARGS__)
-#define LOG_TRACE_IC(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT, marker, ::NActors::NLog::PRI_TRACE, __VA_ARGS__)
-
-#define LOG_EMER_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_EMER, __VA_ARGS__)
-#define LOG_ALERT_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_ALERT, __VA_ARGS__)
-#define LOG_CRIT_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_CRIT, __VA_ARGS__)
-#define LOG_ERROR_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_ERROR, __VA_ARGS__)
-#define LOG_WARN_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_WARN, __VA_ARGS__)
-#define LOG_NOTICE_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_NOTICE, __VA_ARGS__)
-#define LOG_INFO_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_INFO, __VA_ARGS__)
-#define LOG_DEBUG_IC_SESSION(marker, ...) LOG_LOG_IC(::NActorsServices::INTERCONNECT_SESSION, marker, ::NActors::NLog::PRI_DEBUG, __VA_ARGS__)
-
-#define LOG_NOTICE_NET(NODE_ID, FMT, ...) LOG_LOG_NET(::NActors::NLog::PRI_NOTICE, NODE_ID, FMT, __VA_ARGS__)
-#define LOG_DEBUG_NET(NODE_ID, FMT, ...) LOG_LOG_NET(::NActors::NLog::PRI_DEBUG, NODE_ID, FMT, __VA_ARGS__)
-
-namespace NActors {
- class TInterconnectLoggingBase {
- protected:
- const TString LogPrefix;
-
- public:
- TInterconnectLoggingBase() = default;
-
- TInterconnectLoggingBase(const TString& prefix)
- : LogPrefix(prefix)
- {
- }
-
- void SetPrefix(TString logPrefix) const {
- logPrefix.swap(const_cast<TString&>(LogPrefix));
- }
- };
-}
diff --git a/library/cpp/actors/interconnect/mock/CMakeLists.darwin-arm64.txt b/library/cpp/actors/interconnect/mock/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index c29d87b0ce..0000000000
--- a/library/cpp/actors/interconnect/mock/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-interconnect-mock)
-target_link_libraries(actors-interconnect-mock PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-interconnect
-)
-target_sources(actors-interconnect-mock PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/mock/ic_mock.cpp
-)
diff --git a/library/cpp/actors/interconnect/mock/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/interconnect/mock/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index c29d87b0ce..0000000000
--- a/library/cpp/actors/interconnect/mock/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-interconnect-mock)
-target_link_libraries(actors-interconnect-mock PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-interconnect
-)
-target_sources(actors-interconnect-mock PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/mock/ic_mock.cpp
-)
diff --git a/library/cpp/actors/interconnect/mock/CMakeLists.linux-aarch64.txt b/library/cpp/actors/interconnect/mock/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 85d0cf4696..0000000000
--- a/library/cpp/actors/interconnect/mock/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-interconnect-mock)
-target_link_libraries(actors-interconnect-mock PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-interconnect
-)
-target_sources(actors-interconnect-mock PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/mock/ic_mock.cpp
-)
diff --git a/library/cpp/actors/interconnect/mock/CMakeLists.linux-x86_64.txt b/library/cpp/actors/interconnect/mock/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 85d0cf4696..0000000000
--- a/library/cpp/actors/interconnect/mock/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-interconnect-mock)
-target_link_libraries(actors-interconnect-mock PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-interconnect
-)
-target_sources(actors-interconnect-mock PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/mock/ic_mock.cpp
-)
diff --git a/library/cpp/actors/interconnect/mock/CMakeLists.windows-x86_64.txt b/library/cpp/actors/interconnect/mock/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index c29d87b0ce..0000000000
--- a/library/cpp/actors/interconnect/mock/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,18 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-interconnect-mock)
-target_link_libraries(actors-interconnect-mock PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-interconnect
-)
-target_sources(actors-interconnect-mock PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/mock/ic_mock.cpp
-)
diff --git a/library/cpp/actors/interconnect/mock/ic_mock.cpp b/library/cpp/actors/interconnect/mock/ic_mock.cpp
deleted file mode 100644
index 81e181b673..0000000000
--- a/library/cpp/actors/interconnect/mock/ic_mock.cpp
+++ /dev/null
@@ -1,385 +0,0 @@
-#include "ic_mock.h"
-#include <library/cpp/actors/core/interconnect.h>
-#include <util/system/yield.h>
-#include <thread>
-#include <deque>
-
-namespace NActors {
-
- class TInterconnectMock::TImpl {
- enum {
- EvInject = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvCheckSession,
- EvRam,
- };
-
- struct TEvInject : TEventLocal<TEvInject, EvInject> {
- std::deque<std::unique_ptr<IEventHandle>> Messages;
- const TScopeId OriginScopeId;
- const ui64 SenderSessionId;
-
- TEvInject(std::deque<std::unique_ptr<IEventHandle>>&& messages, const TScopeId& originScopeId, ui64 senderSessionId)
- : Messages(std::move(messages))
- , OriginScopeId(originScopeId)
- , SenderSessionId(senderSessionId)
- {}
- };
-
- class TProxyMockActor;
-
- class TConnectionState {
- struct TPeerInfo {
- TRWMutex Mutex;
- TActorSystem *ActorSystem = nullptr;
- TActorId ProxyId;
- };
-
- const ui64 Key;
- TPeerInfo PeerInfo[2];
- std::atomic_uint64_t SessionId = 0;
-
- public:
- TConnectionState(ui64 key)
- : Key(key)
- {}
-
- void Attach(ui32 nodeId, TActorSystem *as, const TActorId& actorId) {
- TPeerInfo *peer = GetPeer(nodeId);
- auto guard = TWriteGuard(peer->Mutex);
- Y_ABORT_UNLESS(!peer->ActorSystem);
- peer->ActorSystem = as;
- peer->ProxyId = actorId;
- as->DeferPreStop([peer] {
- auto guard = TWriteGuard(peer->Mutex);
- peer->ActorSystem = nullptr;
- });
- }
-
- void Inject(ui32 peerNodeId, std::deque<std::unique_ptr<IEventHandle>>&& messages,
- const TScopeId& originScopeId, ui64 senderSessionId) {
- TPeerInfo *peer = GetPeer(peerNodeId);
- auto guard = TReadGuard(peer->Mutex);
- if (peer->ActorSystem) {
- peer->ActorSystem->Send(new IEventHandle(peer->ProxyId, TActorId(), new TEvInject(std::move(messages),
- originScopeId, senderSessionId)));
- } else {
- for (auto&& ev : messages) {
- TActivationContext::Send(IEventHandle::ForwardOnNondelivery(std::move(ev), TEvents::TEvUndelivered::Disconnected));
- }
- }
- }
-
- ui64 GetValidSessionId() const {
- return SessionId;
- }
-
- void InvalidateSessionId(ui32 peerNodeId) {
- ++SessionId;
- TPeerInfo *peer = GetPeer(peerNodeId);
- auto guard = TReadGuard(peer->Mutex);
- if (peer->ActorSystem) {
- peer->ActorSystem->Send(new IEventHandle(EvCheckSession, 0, peer->ProxyId, {}, nullptr, 0));
- }
- }
-
- private:
- TPeerInfo *GetPeer(ui32 nodeId) {
- if (nodeId == ui32(Key)) {
- return PeerInfo;
- } else if (nodeId == ui32(Key >> 32)) {
- return PeerInfo + 1;
- } else {
- Y_ABORT();
- }
- }
- };
-
- class TProxyMockActor : public TActor<TProxyMockActor> {
- class TSessionMockActor : public TActor<TSessionMockActor> {
- std::map<TActorId, ui64> Subscribers;
- TProxyMockActor* const Proxy;
- std::deque<std::unique_ptr<IEventHandle>> Queue;
-
- public:
- const ui64 SessionId;
-
- public:
- TSessionMockActor(TProxyMockActor *proxy, ui64 sessionId)
- : TActor(&TThis::StateFunc)
- , Proxy(proxy)
- , SessionId(sessionId)
- {}
-
- static constexpr char ActorName[] = "SESSION_MOCK_ACTOR";
-
- void Terminate() {
- for (auto&& ev : std::exchange(Queue, {})) {
- TActivationContext::Send(IEventHandle::ForwardOnNondelivery(std::move(ev), TEvents::TEvUndelivered::Disconnected));
- }
- for (const auto& kv : Subscribers) {
- Send(kv.first, new TEvInterconnect::TEvNodeDisconnected(Proxy->PeerNodeId), 0, kv.second);
- }
- Y_ABORT_UNLESS(Proxy->Session == this);
- Proxy->Session = nullptr;
- PassAway();
- }
-
- void HandleForward(TAutoPtr<IEventHandle> ev) {
- if (CheckNodeStatus(ev)) {
- if (ev->Flags & IEventHandle::FlagSubscribeOnSession) {
- Subscribe(ev->Sender, ev->Cookie);
- }
- if (Queue.empty()) {
- TActivationContext::Send(new IEventHandle(EvRam, 0, SelfId(), {}, {}, 0));
- }
- Queue.emplace_back(ev.Release());
- }
- }
-
- void HandleRam() {
- if (SessionId != Proxy->State.GetValidSessionId()) {
- Terminate();
- } else {
- Proxy->PeerInject(std::exchange(Queue, {}));
- }
- }
-
- void Handle(TEvInterconnect::TEvConnectNode::TPtr ev) {
- if (CheckNodeStatus(ev)) {
- Subscribe(ev->Sender, ev->Cookie);
- }
- }
-
- void Handle(TEvents::TEvSubscribe::TPtr ev) {
- if (CheckNodeStatus(ev)) {
- Subscribe(ev->Sender, ev->Cookie);
- }
- }
-
- void Handle(TEvents::TEvUnsubscribe::TPtr ev) {
- if (CheckNodeStatus(ev)) {
- Subscribers.erase(ev->Sender);
- }
- }
-
- void HandlePoison() {
- Proxy->Disconnect();
- }
-
- STRICT_STFUNC(StateFunc,
- fFunc(TEvInterconnect::EvForward, HandleForward)
- hFunc(TEvInterconnect::TEvConnectNode, Handle)
- hFunc(TEvents::TEvSubscribe, Handle)
- hFunc(TEvents::TEvUnsubscribe, Handle)
- hFunc(TEvInterconnect::TEvNodeInfo, HandleNodeInfo)
- cFunc(TEvents::TSystem::Poison, HandlePoison)
- cFunc(EvRam, HandleRam)
- )
-
- private:
- enum EPeerNodeStatus {
- UNKNOWN,
- EXISTS,
- MISSING
- };
-
- bool IsWaitingForNodeInfo = false;
- std::deque<std::unique_ptr<IEventHandle>> WaitingConnections;
- EPeerNodeStatus PeerNodeStatus = EPeerNodeStatus::UNKNOWN;
-
- void Subscribe(const TActorId& actorId, ui64 cookie) {
- Subscribers[actorId] = cookie;
- Send(actorId, new TEvInterconnect::TEvNodeConnected(Proxy->PeerNodeId), 0, cookie);
- }
-
- template <typename TEvent>
- bool CheckNodeStatus(TAutoPtr<TEventHandle<TEvent>>& ev) {
- if (PeerNodeStatus != EPeerNodeStatus::EXISTS) {
- std::unique_ptr<IEventHandle> tmp(ev.Release());
- CheckNonexistentNode(tmp);
- return false;
- }
- return true;
- }
-
- bool CheckNodeStatus(TAutoPtr<IEventHandle>& ev) {
- if (PeerNodeStatus != EPeerNodeStatus::EXISTS) {
- std::unique_ptr<IEventHandle> tmp(ev.Release());
- CheckNonexistentNode(tmp);
- return false;
- }
- return true;
- }
-
- void CheckNonexistentNode(std::unique_ptr<IEventHandle>& ev) {
- if (PeerNodeStatus == EPeerNodeStatus::UNKNOWN) {
- WaitingConnections.emplace_back(ev.release());
- if (!IsWaitingForNodeInfo) {
- Send(Proxy->Common->NameserviceId, new TEvInterconnect::TEvGetNode(Proxy->PeerNodeId));
- IsWaitingForNodeInfo = true;
- }
- } else if (PeerNodeStatus == EPeerNodeStatus::MISSING) {
- switch (ev->GetTypeRewrite()) {
- case TEvInterconnect::EvForward:
- if (ev->Flags & IEventHandle::FlagSubscribeOnSession) {
- Send(ev->Sender, new TEvInterconnect::TEvNodeDisconnected(Proxy->PeerNodeId), 0, ev->Cookie);
- }
- TActivationContext::Send(IEventHandle::ForwardOnNondelivery(std::move(ev), TEvents::TEvUndelivered::Disconnected));
- break;
-
- case TEvents::TEvSubscribe::EventType:
- case TEvInterconnect::TEvConnectNode::EventType:
- Send(ev->Sender, new TEvInterconnect::TEvNodeDisconnected(Proxy->PeerNodeId), 0, ev->Cookie);
- break;
-
- case TEvents::TEvUnsubscribe::EventType:
- break;
-
- default:
- Y_ABORT();
- }
- }
- }
-
- void HandleNodeInfo(TEvInterconnect::TEvNodeInfo::TPtr ev) {
- Y_ABORT_UNLESS(IsWaitingForNodeInfo);
- if (!ev->Get()->Node) {
- PeerNodeStatus = EPeerNodeStatus::MISSING;
- } else {
- PeerNodeStatus = EPeerNodeStatus::EXISTS;
- }
- IsWaitingForNodeInfo = false;
- while (!WaitingConnections.empty()) {
- TAutoPtr<IEventHandle> tmp(WaitingConnections.front().release());
- WaitingConnections.pop_front();
- Receive(tmp);
- }
- }
- };
-
- friend class TSessionMockActor;
-
- const ui32 NodeId;
- const ui32 PeerNodeId;
- TConnectionState& State;
- const TInterconnectProxyCommon::TPtr Common;
- TSessionMockActor *Session = nullptr;
-
- public:
- TProxyMockActor(ui32 nodeId, ui32 peerNodeId, TConnectionState& state, TInterconnectProxyCommon::TPtr common)
- : TActor(&TThis::StateFunc)
- , NodeId(nodeId)
- , PeerNodeId(peerNodeId)
- , State(state)
- , Common(std::move(common))
- {}
-
- static constexpr char ActorName[] = "PROXY_MOCK_ACTOR";
-
- void Registered(TActorSystem *as, const TActorId& parent) override {
- TActor::Registered(as, parent);
- State.Attach(NodeId, as, SelfId());
- }
-
- void Handle(TEvInject::TPtr ev) {
- auto *msg = ev->Get();
- if (Session && Session->SessionId != msg->SenderSessionId) {
- return; // drop messages from other sessions
- }
- if (auto *session = GetSession()) {
- for (auto&& ev : ev->Get()->Messages) {
- auto fw = std::make_unique<IEventHandle>(
- session->SelfId(),
- ev->Type,
- ev->Flags & ~IEventHandle::FlagForwardOnNondelivery,
- ev->Recipient,
- ev->Sender,
- ev->ReleaseChainBuffer(),
- ev->Cookie,
- msg->OriginScopeId,
- std::move(ev->TraceId)
- );
- if (!Common->EventFilter || Common->EventFilter->CheckIncomingEvent(*fw, Common->LocalScopeId)) {
- TActivationContext::Send(fw.release());
- }
- }
- }
- }
-
- void PassAway() override {
- Disconnect();
- TActor::PassAway();
- }
-
- TSessionMockActor *GetSession() {
- CheckSession();
- if (!Session) {
- Session = new TSessionMockActor(this, State.GetValidSessionId());
- RegisterWithSameMailbox(Session);
- }
- return Session;
- }
-
- void HandleSessionEvent(TAutoPtr<IEventHandle> ev) {
- auto *session = GetSession();
- InvokeOtherActor(*session, &TSessionMockActor::Receive, ev);
- }
-
- void Disconnect() {
- State.InvalidateSessionId(PeerNodeId);
- if (Session) {
- Session->Terminate();
- }
- }
-
- void CheckSession() {
- if (Session && Session->SessionId != State.GetValidSessionId()) {
- Session->Terminate();
- }
- }
-
- void PeerInject(std::deque<std::unique_ptr<IEventHandle>>&& messages) {
- Y_ABORT_UNLESS(Session);
- return State.Inject(PeerNodeId, std::move(messages), Common->LocalScopeId, Session->SessionId);
- }
-
- STRICT_STFUNC(StateFunc,
- cFunc(TEvents::TSystem::Poison, PassAway)
- fFunc(TEvInterconnect::EvForward, HandleSessionEvent)
- fFunc(TEvInterconnect::EvConnectNode, HandleSessionEvent)
- fFunc(TEvents::TSystem::Subscribe, HandleSessionEvent)
- fFunc(TEvents::TSystem::Unsubscribe, HandleSessionEvent)
- cFunc(TEvInterconnect::EvDisconnect, Disconnect)
- IgnoreFunc(TEvInterconnect::TEvClosePeerSocket)
- IgnoreFunc(TEvInterconnect::TEvCloseInputSession)
- cFunc(TEvInterconnect::EvPoisonSession, Disconnect)
- hFunc(TEvInject, Handle)
- cFunc(EvCheckSession, CheckSession)
- )
- };
-
- std::unordered_map<ui64, TConnectionState> States;
-
- public:
- IActor *CreateProxyMock(ui32 nodeId, ui32 peerNodeId, TInterconnectProxyCommon::TPtr common) {
- Y_ABORT_UNLESS(nodeId != peerNodeId);
- Y_ABORT_UNLESS(nodeId);
- Y_ABORT_UNLESS(peerNodeId);
- const ui64 key = std::min(nodeId, peerNodeId) | ui64(std::max(nodeId, peerNodeId)) << 32;
- auto it = States.try_emplace(key, key).first;
- return new TProxyMockActor(nodeId, peerNodeId, it->second, std::move(common));
- }
- };
-
- TInterconnectMock::TInterconnectMock()
- : Impl(std::make_unique<TImpl>())
- {}
-
- TInterconnectMock::~TInterconnectMock()
- {}
-
- IActor *TInterconnectMock::CreateProxyMock(ui32 nodeId, ui32 peerNodeId, TInterconnectProxyCommon::TPtr common) {
- return Impl->CreateProxyMock(nodeId, peerNodeId, std::move(common));
- }
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/mock/ic_mock.h b/library/cpp/actors/interconnect/mock/ic_mock.h
deleted file mode 100644
index 636bdc2b7f..0000000000
--- a/library/cpp/actors/interconnect/mock/ic_mock.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-
-#include <library/cpp/actors/interconnect/interconnect_common.h>
-
-namespace NActors {
-
- class TInterconnectMock {
- class TImpl;
- std::unique_ptr<TImpl> Impl;
-
- public:
- TInterconnectMock();
- ~TInterconnectMock();
- IActor *CreateProxyMock(ui32 nodeId, ui32 peerNodeId, TInterconnectProxyCommon::TPtr common);
- };
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/mock/ya.make b/library/cpp/actors/interconnect/mock/ya.make
deleted file mode 100644
index d097e3f094..0000000000
--- a/library/cpp/actors/interconnect/mock/ya.make
+++ /dev/null
@@ -1,14 +0,0 @@
-LIBRARY()
-
-SRCS(
- ic_mock.cpp
- ic_mock.h
-)
-
-SUPPRESSIONS(tsan.supp)
-
-PEERDIR(
- library/cpp/actors/interconnect
-)
-
-END()
diff --git a/library/cpp/actors/interconnect/outgoing_stream.h b/library/cpp/actors/interconnect/outgoing_stream.h
deleted file mode 100644
index 304fa925a8..0000000000
--- a/library/cpp/actors/interconnect/outgoing_stream.h
+++ /dev/null
@@ -1,272 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/event_load.h>
-#include <library/cpp/actors/util/rc_buf.h>
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-#include <deque>
-
-namespace NInterconnect {
-
- template<size_t TotalSize>
- class TOutgoingStreamT {
- static constexpr size_t BufferSize = TotalSize - sizeof(ui32) * 2;
-
- struct TBuffer {
- char Data[BufferSize];
- ui32 RefCount;
- ui32 Index;
-
- struct TDeleter {
- void operator ()(TBuffer *buffer) const {
- free(buffer);
- }
- };
- };
-
- static_assert(sizeof(TBuffer) == TotalSize);
-
- struct TSendChunk {
- TContiguousSpan Span;
- TBuffer *Buffer;
- };
-
- std::vector<std::unique_ptr<TBuffer, typename TBuffer::TDeleter>> Buffers;
- TBuffer *AppendBuffer = nullptr;
- size_t AppendOffset = BufferSize; // into the last buffer
- std::deque<TSendChunk> SendQueue;
- size_t SendQueuePos = 0;
- size_t SendOffset = 0;
- size_t UnsentBytes = 0;
-
- public:
- operator bool() const {
- return SendQueuePos != SendQueue.size();
- }
-
- size_t CalculateOutgoingSize() const {
- size_t res = 0;
- for (const TSendChunk& chunk : SendQueue) {
- res += chunk.Span.size();
- }
- return res;
- }
-
- size_t CalculateUnsentSize() const {
-#ifndef NDEBUG
- size_t res = 0;
- for (auto it = SendQueue.begin() + SendQueuePos; it != SendQueue.end(); ++it) {
- res += it->Span.size();
- }
- Y_ABORT_UNLESS(UnsentBytes == res - SendOffset);
-#endif
- return UnsentBytes;
- }
-
- size_t GetSendQueueSize() const {
- return SendQueue.size();
- }
-
- TMutableContiguousSpan AcquireSpanForWriting(size_t maxLen) {
- if (!maxLen) {
- return {nullptr, 0};
- }
- if (AppendOffset == BufferSize) { // we have no free buffer, allocate one
- Buffers.emplace_back(static_cast<TBuffer*>(malloc(sizeof(TBuffer))));
- AppendBuffer = Buffers.back().get();
- Y_ABORT_UNLESS(AppendBuffer);
- AppendBuffer->RefCount = 1; // through AppendBuffer pointer
- AppendBuffer->Index = Buffers.size() - 1;
- AppendOffset = 0;
- }
- return {AppendBuffer->Data + AppendOffset, Min(maxLen, BufferSize - AppendOffset)};
- }
-
- void Align() {
- if (AppendOffset != BufferSize) {
- AppendOffset += -(reinterpret_cast<uintptr_t>(AppendBuffer->Data) + AppendOffset) & 63;
- if (AppendOffset > BufferSize) {
- AppendOffset = BufferSize;
- DropBufferReference(std::exchange(AppendBuffer, nullptr));
- }
- }
- }
-
- void Append(TContiguousSpan span) {
- if (AppendBuffer && span.data() == AppendBuffer->Data + AppendOffset) { // the only valid case to use previously acquired span
- AppendAcquiredSpan(span);
- } else {
-#ifndef NDEBUG
- // ensure this span does not point into any existing buffer part
- const char *begin = span.data();
- const char *end = span.data() + span.size();
- for (const auto& buffer : Buffers) {
- const char *bufferBegin = buffer->Data;
- const char *bufferEnd = bufferBegin + BufferSize;
- if (bufferBegin < end && begin < bufferEnd) {
- Y_ABORT();
- }
- }
-#endif
- AppendSpanWithGlueing(span, nullptr);
- }
- }
-
- void Write(TContiguousSpan in) {
- while (in.size()) {
- auto outChunk = AcquireSpanForWriting(in.size());
- memcpy(outChunk.data(), in.data(), outChunk.size());
- AppendAcquiredSpan(outChunk);
- in = in.SubSpan(outChunk.size(), Max<size_t>());
- }
- }
-
- using TBookmark = TStackVec<TMutableContiguousSpan, 2>;
-
- TBookmark Bookmark(size_t len) {
- TBookmark bookmark;
-
- while (len) {
- const auto span = AcquireSpanForWriting(len);
- AppendAcquiredSpan(span);
- bookmark.push_back(span);
- len -= span.size();
- }
-
- return bookmark;
- }
-
- void WriteBookmark(TBookmark&& bookmark, TContiguousSpan in) {
- for (auto& outChunk : bookmark) {
- Y_DEBUG_ABORT_UNLESS(outChunk.size() <= in.size());
- memcpy(outChunk.data(), in.data(), outChunk.size());
- in = in.SubSpan(outChunk.size(), Max<size_t>());
- }
- }
-
- void Rewind() {
- SendQueuePos = 0;
- SendOffset = 0;
- UnsentBytes = 0;
- for (const auto& item : SendQueue) {
- UnsentBytes += item.Span.size();
- }
- }
-
- void RewindToEnd() {
- SendQueuePos = SendQueue.size();
- SendOffset = 0;
- UnsentBytes = 0;
- }
-
- template<typename T>
- void ProduceIoVec(T& container, size_t maxItems, size_t maxBytes) {
- size_t offset = SendOffset;
- for (auto it = SendQueue.begin() + SendQueuePos; it != SendQueue.end() && std::size(container) < maxItems && maxBytes; ++it) {
- const TContiguousSpan span = it->Span.SubSpan(offset, maxBytes);
- container.push_back(NActors::TConstIoVec{span.data(), span.size()});
- offset = 0;
- maxBytes -= span.size();
- }
- }
-
- void Advance(size_t numBytes) { // called when numBytes portion of data has been sent
- Y_DEBUG_ABORT_UNLESS(numBytes == 0 || SendQueuePos != SendQueue.size());
- Y_DEBUG_ABORT_UNLESS(numBytes <= UnsentBytes);
- SendOffset += numBytes;
- UnsentBytes -= numBytes;
- for (auto it = SendQueue.begin() + SendQueuePos; SendOffset && it->Span.size() <= SendOffset; ++SendQueuePos, ++it) {
- SendOffset -= it->Span.size();
- Y_DEBUG_ABORT_UNLESS(SendOffset == 0 || SendQueuePos != SendQueue.size() - 1);
- }
- }
-
- void DropFront(size_t numBytes) { // drops first numBytes from the queue, freeing buffers when necessary
- while (numBytes) {
- Y_DEBUG_ABORT_UNLESS(!SendQueue.empty());
- auto& front = SendQueue.front();
- if (numBytes < front.Span.size()) {
- front.Span = front.Span.SubSpan(numBytes, Max<size_t>());
- if (SendQueuePos == 0) {
- Y_DEBUG_ABORT_UNLESS(numBytes <= SendOffset, "numBytes# %zu SendOffset# %zu SendQueuePos# %zu"
- " SendQueue.size# %zu CalculateUnsentSize# %zu", numBytes, SendOffset, SendQueuePos,
- SendQueue.size(), CalculateUnsentSize());
- SendOffset -= numBytes;
- }
- break;
- } else {
- numBytes -= front.Span.size();
- }
- Y_DEBUG_ABORT_UNLESS(!front.Buffer || (front.Span.data() >= front.Buffer->Data &&
- front.Span.data() + front.Span.size() <= front.Buffer->Data + BufferSize));
- DropBufferReference(front.Buffer);
- SendQueue.pop_front();
- if (SendQueuePos) {
- --SendQueuePos;
- } else {
- SendOffset = 0;
- }
- }
- }
-
- template<typename T>
- void ScanLastBytes(size_t numBytes, T&& callback) const {
- auto it = SendQueue.end();
- ssize_t offset = -numBytes;
- while (offset < 0) {
- Y_DEBUG_ABORT_UNLESS(it != SendQueue.begin());
- const TSendChunk& chunk = *--it;
- offset += chunk.Span.size();
- }
- for (; it != SendQueue.end(); ++it, offset = 0) {
- callback(it->Span.SubSpan(offset, Max<size_t>()));
- }
- }
-
- private:
- void AppendAcquiredSpan(TContiguousSpan span) {
- TBuffer *buffer = AppendBuffer;
- Y_DEBUG_ABORT_UNLESS(buffer);
- Y_DEBUG_ABORT_UNLESS(span.data() == AppendBuffer->Data + AppendOffset);
- AppendOffset += span.size();
- Y_DEBUG_ABORT_UNLESS(AppendOffset <= BufferSize);
- if (AppendOffset == BufferSize) {
- AppendBuffer = nullptr;
- } else {
- ++buffer->RefCount;
- }
- AppendSpanWithGlueing(span, buffer);
- }
-
- void AppendSpanWithGlueing(TContiguousSpan span, TBuffer *buffer) {
- UnsentBytes += span.size();
- if (!SendQueue.empty()) {
- auto& back = SendQueue.back();
- if (back.Span.data() + back.Span.size() == span.data()) { // check if it is possible just to extend the last span
- Y_DEBUG_ABORT_UNLESS(buffer == back.Buffer);
- if (SendQueuePos == SendQueue.size()) {
- --SendQueuePos;
- SendOffset = back.Span.size();
- }
- back.Span = {back.Span.data(), back.Span.size() + span.size()};
- DropBufferReference(buffer);
- return;
- }
- }
- SendQueue.push_back(TSendChunk{span, buffer});
- }
-
- void DropBufferReference(TBuffer *buffer) {
- if (buffer && !--buffer->RefCount) {
- const size_t index = buffer->Index;
- auto& cell = Buffers[index];
- Y_DEBUG_ABORT_UNLESS(cell.get() == buffer);
- std::swap(cell, Buffers.back());
- cell->Index = index;
- Buffers.pop_back();
- }
- }
- };
-
- using TOutgoingStream = TOutgoingStreamT<262144>;
-
-} // NInterconnect
diff --git a/library/cpp/actors/interconnect/packet.cpp b/library/cpp/actors/interconnect/packet.cpp
deleted file mode 100644
index 9ba173e330..0000000000
--- a/library/cpp/actors/interconnect/packet.cpp
+++ /dev/null
@@ -1,31 +0,0 @@
-#include "packet.h"
-
-#include <library/cpp/actors/core/probes.h>
-
-#include <util/system/datetime.h>
-
-LWTRACE_USING(ACTORLIB_PROVIDER);
-
-ui32 TEventHolder::Fill(IEventHandle& ev) {
- Serial = 0;
- Descr.Type = ev.Type;
- Descr.Flags = ev.Flags;
- Descr.Recipient = ev.Recipient;
- Descr.Sender = ev.Sender;
- Descr.Cookie = ev.Cookie;
- ForwardRecipient = ev.GetForwardOnNondeliveryRecipient();
- EventActuallySerialized = 0;
- Descr.Checksum = 0;
-
- if (ev.HasBuffer()) {
- Buffer = ev.ReleaseChainBuffer();
- EventSerializedSize = Buffer->GetSize();
- } else if (ev.HasEvent()) {
- Event.Reset(ev.ReleaseBase());
- EventSerializedSize = Event->CalculateSerializedSize();
- } else {
- EventSerializedSize = 0;
- }
-
- return EventSerializedSize;
-}
diff --git a/library/cpp/actors/interconnect/packet.h b/library/cpp/actors/interconnect/packet.h
deleted file mode 100644
index 0a748cda2a..0000000000
--- a/library/cpp/actors/interconnect/packet.h
+++ /dev/null
@@ -1,304 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/event_pb.h>
-#include <library/cpp/actors/core/event_load.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-#include <library/cpp/actors/util/rope.h>
-#include <library/cpp/actors/prof/tag.h>
-#include <library/cpp/actors/wilson/wilson_span.h>
-#include <library/cpp/digest/crc32c/crc32c.h>
-#include <library/cpp/lwtrace/shuttle.h>
-#include <util/generic/string.h>
-#include <util/generic/list.h>
-
-#define XXH_INLINE_ALL
-#include <contrib/libs/xxhash/xxhash.h>
-
-#include "types.h"
-#include "outgoing_stream.h"
-
-#ifndef FORCE_EVENT_CHECKSUM
-#define FORCE_EVENT_CHECKSUM 0
-#endif
-
-// WARNING: turning this feature on will make protocol incompatible with ordinary Interconnect, use with caution
-#define IC_FORCE_HARDENED_PACKET_CHECKS 0
-
-#if IC_FORCE_HARDENED_PACKET_CHECKS
-#undef FORCE_EVENT_CHECKSUM
-#define FORCE_EVENT_CHECKSUM 1
-#endif
-
-Y_FORCE_INLINE ui32 Crc32cExtendMSanCompatible(ui32 checksum, const void *data, size_t len) {
- if constexpr (NSan::MSanIsOn()) {
- const char *begin = static_cast<const char*>(data);
- const char *end = begin + len;
- begin -= reinterpret_cast<uintptr_t>(begin) & 15;
- end += -reinterpret_cast<uintptr_t>(end) & 15;
- NSan::Unpoison(begin, end - begin);
- }
- return Crc32cExtend(checksum, data, len);
-}
-
-#pragma pack(push, 1)
-struct TTcpPacketHeader_v2 {
- ui64 Confirm;
- ui64 Serial;
- ui32 Checksum; // for the whole frame
- ui16 PayloadLength;
-};
-#pragma pack(pop)
-
-struct TTcpPacketBuf {
- static constexpr ui64 PingRequestMask = 0x8000000000000000ULL;
- static constexpr ui64 PingResponseMask = 0x4000000000000000ULL;
- static constexpr ui64 ClockMask = 0x2000000000000000ULL;
-
- static constexpr size_t PacketDataLen = 4096 * 2 - 96 - sizeof(TTcpPacketHeader_v2);
-};
-
-struct TEventData {
- ui32 Type;
- ui32 Flags;
- TActorId Recipient;
- TActorId Sender;
- ui64 Cookie;
- NWilson::TTraceId TraceId;
- ui32 Checksum;
-#if IC_FORCE_HARDENED_PACKET_CHECKS
- ui32 Len;
-#endif
-};
-
-#pragma pack(push, 1)
-
-struct TEventDescr2 {
- ui32 Type;
- ui32 Flags;
- TActorId Recipient;
- TActorId Sender;
- ui64 Cookie;
- NWilson::TTraceId::TSerializedTraceId TraceId;
- ui32 Checksum;
-#if IC_FORCE_HARDENED_PACKET_CHECKS
- ui32 Len;
-#endif
-};
-
-#pragma pack(pop)
-
-struct TEventHolder : TNonCopyable {
- TEventData Descr;
- TActorId ForwardRecipient;
- THolder<IEventBase> Event;
- TIntrusivePtr<TEventSerializedData> Buffer;
- ui64 Serial;
- ui32 EventSerializedSize;
- ui32 EventActuallySerialized;
- mutable NLWTrace::TOrbit Orbit;
- NWilson::TSpan Span;
-
- ui32 Fill(IEventHandle& ev);
-
- void InitChecksum() {
- Descr.Checksum = 0;
- }
-
- void UpdateChecksum(const void *buffer, size_t len) {
- if (FORCE_EVENT_CHECKSUM) {
- Descr.Checksum = Crc32cExtendMSanCompatible(Descr.Checksum, buffer, len);
- }
- }
-
- void ForwardOnNondelivery(bool unsure) {
- TEventData& d = Descr;
- const TActorId& r = d.Recipient;
- const TActorId& s = d.Sender;
- const TActorId *f = ForwardRecipient ? &ForwardRecipient : nullptr;
- Span.EndError("nondelivery");
- auto ev = Event
- ? std::make_unique<IEventHandle>(r, s, Event.Release(), d.Flags, d.Cookie, f, Span.GetTraceId())
- : std::make_unique<IEventHandle>(d.Type, d.Flags, r, s, std::move(Buffer), d.Cookie, f, Span.GetTraceId());
- NActors::TActivationContext::Send(IEventHandle::ForwardOnNondelivery(std::move(ev), NActors::TEvents::TEvUndelivered::Disconnected, unsure));
- }
-
- void Clear() {
- Event.Reset();
- Buffer.Reset();
- Orbit.Reset();
- Span = {};
- }
-};
-
-namespace NActors {
- class TEventOutputChannel;
-}
-
-struct TTcpPacketOutTask : TNonCopyable {
- const TSessionParams& Params;
- NInterconnect::TOutgoingStream& OutgoingStream;
- NInterconnect::TOutgoingStream& XdcStream;
- NInterconnect::TOutgoingStream::TBookmark HeaderBookmark;
- ui32 InternalSize = 0;
- ui32 ExternalSize = 0;
-
- ui32 PreBookmarkChecksum = 0;
- ui32 InternalChecksum = 0;
- ui32 InternalChecksumLen = 0;
- bool InsideBookmark = false;
-
- ui32 ExternalChecksum = 0;
-
- TTcpPacketOutTask(const TSessionParams& params, NInterconnect::TOutgoingStream& outgoingStream,
- NInterconnect::TOutgoingStream& xdcStream)
- : Params(params)
- , OutgoingStream(outgoingStream)
- , XdcStream(xdcStream)
- , HeaderBookmark(OutgoingStream.Bookmark(sizeof(TTcpPacketHeader_v2)))
- {}
-
- // Preallocate some space to fill it later.
- NInterconnect::TOutgoingStream::TBookmark Bookmark(size_t len) {
- if (ChecksummingCrc32c()) {
- Y_DEBUG_ABORT_UNLESS(!InsideBookmark);
- InsideBookmark = true;
- PreBookmarkChecksum = std::exchange(InternalChecksum, 0);
- InternalChecksumLen = 0;
- }
- Y_DEBUG_ABORT_UNLESS(len <= GetInternalFreeAmount());
- InternalSize += len;
- return OutgoingStream.Bookmark(len);
- }
-
- // Write previously bookmarked space.
- void WriteBookmark(NInterconnect::TOutgoingStream::TBookmark&& bookmark, const void *buffer, size_t len) {
- if (ChecksummingCrc32c()) {
- Y_DEBUG_ABORT_UNLESS(InsideBookmark);
- InsideBookmark = false;
- const ui32 bookmarkChecksum = Crc32cExtendMSanCompatible(PreBookmarkChecksum, buffer, len);
- InternalChecksum = Crc32cCombine(bookmarkChecksum, InternalChecksum, InternalChecksumLen);
- }
- OutgoingStream.WriteBookmark(std::move(bookmark), {static_cast<const char*>(buffer), len});
- }
-
- // Acquire raw pointer to write some data.
- template<bool External>
- TMutableContiguousSpan AcquireSpanForWriting() {
- if (External) {
- return XdcStream.AcquireSpanForWriting(GetExternalFreeAmount());
- } else {
- return OutgoingStream.AcquireSpanForWriting(GetInternalFreeAmount());
- }
- }
-
- // Append reference to some data (acquired previously or external pointer).
- template<bool External>
- void Append(const void *buffer, size_t len) {
- Y_DEBUG_ABORT_UNLESS(len <= (External ? GetExternalFreeAmount() : GetInternalFreeAmount()));
- (External ? ExternalSize : InternalSize) += len;
- (External ? XdcStream : OutgoingStream).Append({static_cast<const char*>(buffer), len});
- ProcessChecksum<External>(buffer, len);
- }
-
- // Write some data with copying.
- template<bool External>
- void Write(const void *buffer, size_t len) {
- Y_DEBUG_ABORT_UNLESS(len <= (External ? GetExternalFreeAmount() : GetInternalFreeAmount()));
- (External ? ExternalSize : InternalSize) += len;
- (External ? XdcStream : OutgoingStream).Write({static_cast<const char*>(buffer), len});
- ProcessChecksum<External>(buffer, len);
- }
-
- template<bool External>
- void ProcessChecksum(const void *buffer, size_t len) {
- if (ChecksummingCrc32c()) {
- if (External) {
- ExternalChecksum = Crc32cExtendMSanCompatible(ExternalChecksum, buffer, len);
- } else {
- InternalChecksum = Crc32cExtendMSanCompatible(InternalChecksum, buffer, len);
- InternalChecksumLen += len;
- }
- }
- }
-
- void Finish(ui64 serial, ui64 confirm) {
- Y_ABORT_UNLESS(InternalSize <= Max<ui16>());
-
- TTcpPacketHeader_v2 header{
- confirm,
- serial,
- 0,
- static_cast<ui16>(InternalSize)
- };
-
- if (ChecksummingXxhash()) {
- // write header with zero checksum to calculate whole packet checksum correctly
- OutgoingStream.WriteBookmark(NInterconnect::TOutgoingStream::TBookmark(HeaderBookmark),
- {reinterpret_cast<const char*>(&header), sizeof(header)});
-
- // calculate packet checksum
- XXH3_state_t state;
- XXH3_64bits_reset(&state);
- OutgoingStream.ScanLastBytes(GetPacketSize(), [&state](TContiguousSpan span) {
- XXH3_64bits_update(&state, span.data(), span.size());
- });
- header.Checksum = XXH3_64bits_digest(&state);
- } else if (ChecksummingCrc32c()) {
- Y_DEBUG_ABORT_UNLESS(!InsideBookmark);
- const ui32 headerChecksum = Crc32cExtendMSanCompatible(0, &header, sizeof(header));
- header.Checksum = Crc32cCombine(headerChecksum, InternalChecksum, InternalSize);
- }
-
- OutgoingStream.WriteBookmark(std::exchange(HeaderBookmark, {}), {reinterpret_cast<const char*>(&header),
- sizeof(header)});
- }
-
- bool ChecksummingCrc32c() const {
- return !Params.Encryption && !Params.UseXxhash;
- }
-
- bool ChecksummingXxhash() const {
- return !Params.Encryption && Params.UseXxhash;
- }
-
- bool IsEmpty() const { return GetDataSize() == 0; }
- ui32 GetDataSize() const { return InternalSize + ExternalSize; }
- ui32 GetPacketSize() const { return sizeof(TTcpPacketHeader_v2) + InternalSize; }
- ui32 GetInternalFreeAmount() const { return TTcpPacketBuf::PacketDataLen - InternalSize; }
- ui32 GetExternalFreeAmount() const { return 16384 - ExternalSize; }
- ui32 GetExternalSize() const { return ExternalSize; }
-};
-
-namespace NInterconnect::NDetail {
- static constexpr size_t MaxNumberBytes = (sizeof(ui64) * CHAR_BIT + 6) / 7;
-
- inline size_t SerializeNumber(ui64 num, char *buffer) {
- char *begin = buffer;
- do {
- *buffer++ = (num & 0x7F) | (num >= 128 ? 0x80 : 0x00);
- num >>= 7;
- } while (num);
- return buffer - begin;
- }
-
- inline ui64 DeserializeNumber(const char **ptr, const char *end) {
- const char *p = *ptr;
- size_t res = 0;
- size_t offset = 0;
- for (;;) {
- if (p == end) {
- return Max<ui64>();
- }
- const char byte = *p++;
- res |= (static_cast<size_t>(byte) & 0x7F) << offset;
- offset += 7;
- if (!(byte & 0x80)) {
- break;
- }
- }
- *ptr = p;
- return res;
- }
-}
diff --git a/library/cpp/actors/interconnect/poller.h b/library/cpp/actors/interconnect/poller.h
deleted file mode 100644
index ff7979369f..0000000000
--- a/library/cpp/actors/interconnect/poller.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#pragma once
-
-#include <functional>
-#include <library/cpp/actors/core/events.h>
-
-namespace NActors {
- class TSharedDescriptor: public TThrRefBase {
- public:
- virtual int GetDescriptor() = 0;
- };
-
- using TDelegate = std::function<void()>;
- using TFDDelegate = std::function<TDelegate(const TIntrusivePtr<TSharedDescriptor>&)>;
-
- class IPoller: public TThrRefBase {
- public:
- virtual ~IPoller() = default;
-
- virtual void StartRead(const TIntrusivePtr<TSharedDescriptor>& s, TFDDelegate&& operation) = 0;
- virtual void StartWrite(const TIntrusivePtr<TSharedDescriptor>& s, TFDDelegate&& operation) = 0;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/poller_actor.cpp b/library/cpp/actors/interconnect/poller_actor.cpp
deleted file mode 100644
index 040bdf8651..0000000000
--- a/library/cpp/actors/interconnect/poller_actor.cpp
+++ /dev/null
@@ -1,318 +0,0 @@
-#include "poller_actor.h"
-#include "interconnect_common.h"
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/probes.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/actors/util/funnel_queue.h>
-
-#include <util/generic/intrlist.h>
-#include <util/system/thread.h>
-#include <util/system/event.h>
-#include <util/system/pipe.h>
-
-#include <variant>
-
-namespace NActors {
-
- LWTRACE_USING(ACTORLIB_PROVIDER);
-
- namespace {
- int LastSocketError() {
-#if defined(_win_)
- return WSAGetLastError();
-#else
- return errno;
-#endif
- }
- }
-
- struct TSocketRecord : TThrRefBase {
- const TIntrusivePtr<TSharedDescriptor> Socket;
- const TActorId ReadActorId;
- const TActorId WriteActorId;
- std::atomic_uint32_t Flags = 0;
-
- TSocketRecord(TEvPollerRegister& ev)
- : Socket(std::move(ev.Socket))
- , ReadActorId(ev.ReadActorId)
- , WriteActorId(ev.WriteActorId)
- {}
- };
-
- template<typename TDerived>
- class TPollerThreadBase : public ISimpleThread {
- protected:
- struct TPollerExitThread {}; // issued then we need to terminate the poller thread
-
- struct TPollerWakeup {};
-
- struct TPollerUnregisterSocket {
- TIntrusivePtr<TSharedDescriptor> Socket;
-
- TPollerUnregisterSocket(TIntrusivePtr<TSharedDescriptor> socket)
- : Socket(std::move(socket))
- {}
- };
-
- using TPollerSyncOperation = std::variant<TPollerExitThread, TPollerWakeup, TPollerUnregisterSocket>;
-
- struct TPollerSyncOperationWrapper {
- TPollerSyncOperation Operation;
- TManualEvent Event;
-
- TPollerSyncOperationWrapper(TPollerSyncOperation&& operation)
- : Operation(std::move(operation))
- {}
-
- void Wait() {
- Event.WaitI();
- }
-
- void SignalDone() {
- Event.Signal();
- }
- };
-
- TActorSystem *ActorSystem;
- TPipeHandle ReadEnd, WriteEnd; // pipe for sync event processor
- TFunnelQueue<TPollerSyncOperationWrapper*> SyncOperationsQ; // operation queue
-
- public:
- TPollerThreadBase(TActorSystem *actorSystem)
- : ActorSystem(actorSystem)
- {
- // create a pipe for notifications
- try {
- TPipeHandle::Pipe(ReadEnd, WriteEnd, CloseOnExec);
- } catch (const TFileError& err) {
- Y_ABORT("failed to create pipe");
- }
-
- // switch the read/write ends to nonblocking mode
- SetNonBlock(ReadEnd);
- SetNonBlock(WriteEnd);
- }
-
- void UnregisterSocket(const TIntrusivePtr<TSocketRecord>& record) {
- ExecuteSyncOperation(TPollerUnregisterSocket(record->Socket));
- }
-
- protected:
- void Notify(TSocketRecord *record, bool read, bool write) {
- auto issue = [&](const TActorId& recipient) {
- ActorSystem->Send(new IEventHandle(recipient, {}, new TEvPollerReady(record->Socket, read, write)));
- };
- if (read && record->ReadActorId) {
- issue(record->ReadActorId);
- if (write && record->WriteActorId && record->WriteActorId != record->ReadActorId) {
- issue(record->WriteActorId);
- }
- } else if (write && record->WriteActorId) {
- issue(record->WriteActorId);
- }
- }
-
- void Stop() {
- // signal poller thread to stop and wait for the thread
- ExecuteSyncOperation(TPollerExitThread());
- ISimpleThread::Join();
- }
-
- void ExecuteSyncOperation(TPollerSyncOperation&& op) {
- TPollerSyncOperationWrapper wrapper(std::move(op));
- if (SyncOperationsQ.Push(&wrapper)) {
- // this was the first entry, so we push notification through the pipe
- for (;;) {
- char buffer = '\x00';
- ssize_t nwritten = WriteEnd.Write(&buffer, sizeof(buffer));
- if (nwritten < 0) {
- const int err = LastSocketError();
- if (err == EINTR) {
- continue;
- } else {
- Y_ABORT("WriteEnd.Write() failed with %s", strerror(err));
- }
- } else {
- Y_ABORT_UNLESS(nwritten);
- break;
- }
- }
- }
- // wait for operation to complete
- wrapper.Wait();
- }
-
- void DrainReadEnd() {
- char buffer[4096];
- for (;;) {
- ssize_t n = ReadEnd.Read(buffer, sizeof(buffer));
- if (n < 0) {
- const int error = LastSocketError();
- if (error == EINTR) {
- continue;
- } else if (error == EAGAIN || error == EWOULDBLOCK) {
- break;
- } else {
- Y_ABORT("read() failed with %s", strerror(errno));
- }
- } else {
- Y_ABORT_UNLESS(n);
- }
- }
- }
-
- bool ProcessSyncOpQueue() {
- Y_ABORT_UNLESS(!SyncOperationsQ.IsEmpty());
- do {
- TPollerSyncOperationWrapper *op = SyncOperationsQ.Top();
- if (auto *unregister = std::get_if<TPollerUnregisterSocket>(&op->Operation)) {
- static_cast<TDerived&>(*this).UnregisterSocketInLoop(unregister->Socket);
- op->SignalDone();
- } else if (std::get_if<TPollerExitThread>(&op->Operation)) {
- op->SignalDone();
- return false; // terminate the thread
- } else if (std::get_if<TPollerWakeup>(&op->Operation)) {
- op->SignalDone();
- } else {
- Y_ABORT();
- }
- } while (SyncOperationsQ.Pop());
- return true;
- }
-
- void *ThreadProc() override {
- SetCurrentThreadName("network poller");
- for (;;) {
- if (static_cast<TDerived&>(*this).ProcessEventsInLoop()) { // need to process the queue
- DrainReadEnd();
- if (!ProcessSyncOpQueue()) {
- break;
- }
- }
- }
- return nullptr;
- }
- };
-
-} // namespace NActors
-
-#if defined(_linux_)
-# include "poller_actor_linux.h"
-#elif defined(_darwin_)
-# include "poller_actor_darwin.h"
-#elif defined(_win_)
-# include "poller_actor_win.h"
-#else
-# error "Unsupported platform"
-#endif
-
-namespace NActors {
-
- class TPollerToken::TImpl {
- std::weak_ptr<TPollerThread> Thread;
- TIntrusivePtr<TSocketRecord> Record; // valid only when Thread is held locked
-
- public:
- TImpl(std::shared_ptr<TPollerThread> thread, TIntrusivePtr<TSocketRecord> record)
- : Thread(thread)
- , Record(std::move(record))
- {
- thread->RegisterSocket(Record);
- }
-
- ~TImpl() {
- if (auto thread = Thread.lock()) {
- thread->UnregisterSocket(Record);
- }
- }
-
- void Request(bool read, bool write) {
- if (auto thread = Thread.lock()) {
- thread->Request(Record, read, write, false, false);
- }
- }
-
- bool RequestReadNotificationAfterWouldBlock() {
- if (auto thread = Thread.lock()) {
- return thread->Request(Record, true, false, true, true);
- } else {
- return false;
- }
- }
-
- bool RequestWriteNotificationAfterWouldBlock() {
- if (auto thread = Thread.lock()) {
- return thread->Request(Record, false, true, true, true);
- } else {
- return false;
- }
- }
-
- const TIntrusivePtr<TSharedDescriptor>& Socket() const {
- return Record->Socket;
- }
- };
-
- class TPollerActor: public TActorBootstrapped<TPollerActor> {
- // poller thread
- std::shared_ptr<TPollerThread> PollerThread;
-
- public:
- static constexpr IActor::EActivityType ActorActivityType() {
- return IActor::EActivityType::INTERCONNECT_POLLER;
- }
-
- void Bootstrap() {
- PollerThread = std::make_shared<TPollerThread>(TlsActivationContext->ExecutorThread.ActorSystem);
- Become(&TPollerActor::StateFunc);
- }
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvPollerRegister, Handle);
- cFunc(TEvents::TSystem::Poison, PassAway);
- )
-
- void Handle(TEvPollerRegister::TPtr& ev) {
- auto *msg = ev->Get();
- auto impl = std::make_unique<TPollerToken::TImpl>(PollerThread, MakeIntrusive<TSocketRecord>(*msg));
- auto socket = impl->Socket();
- TPollerToken::TPtr token(new TPollerToken(std::move(impl)));
- if (msg->ReadActorId && msg->WriteActorId && msg->WriteActorId != msg->ReadActorId) {
- Send(msg->ReadActorId, new TEvPollerRegisterResult(socket, token));
- Send(msg->WriteActorId, new TEvPollerRegisterResult(socket, std::move(token)));
- } else if (msg->ReadActorId) {
- Send(msg->ReadActorId, new TEvPollerRegisterResult(socket, std::move(token)));
- } else if (msg->WriteActorId) {
- Send(msg->WriteActorId, new TEvPollerRegisterResult(socket, std::move(token)));
- }
- }
- };
-
- TPollerToken::TPollerToken(std::unique_ptr<TImpl> impl)
- : Impl(std::move(impl))
- {}
-
- TPollerToken::~TPollerToken()
- {}
-
- void TPollerToken::Request(bool read, bool write) {
- Impl->Request(read, write);
- }
-
- bool TPollerToken::RequestReadNotificationAfterWouldBlock() {
- return Impl->RequestReadNotificationAfterWouldBlock();
- }
-
- bool TPollerToken::RequestWriteNotificationAfterWouldBlock() {
- return Impl->RequestWriteNotificationAfterWouldBlock();
- }
-
- IActor* CreatePollerActor() {
- return new TPollerActor();
- }
-
-}
diff --git a/library/cpp/actors/interconnect/poller_actor.h b/library/cpp/actors/interconnect/poller_actor.h
deleted file mode 100644
index 4fb2d49a65..0000000000
--- a/library/cpp/actors/interconnect/poller_actor.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#pragma once
-
-#include "events_local.h"
-#include "poller.h"
-#include <library/cpp/actors/core/actor.h>
-
-namespace NActors {
- struct TEvPollerRegister : TEventLocal<TEvPollerRegister, ui32(ENetwork::EvPollerRegister)> {
- const TIntrusivePtr<TSharedDescriptor> Socket; // socket to watch for
- const TActorId ReadActorId; // actor id to notify about read availability
- const TActorId WriteActorId; // actor id to notify about write availability; may be the same as the ReadActorId
-
- TEvPollerRegister(TIntrusivePtr<TSharedDescriptor> socket, const TActorId& readActorId, const TActorId& writeActorId)
- : Socket(std::move(socket))
- , ReadActorId(readActorId)
- , WriteActorId(writeActorId)
- {}
- };
-
- // poller token is sent in response to TEvPollerRegister; it allows requesting poll when read/write returns EAGAIN
- class TPollerToken : public TThrRefBase {
- class TImpl;
- std::unique_ptr<TImpl> Impl;
-
- friend class TPollerActor;
- TPollerToken(std::unique_ptr<TImpl> impl);
-
- public:
- ~TPollerToken();
- void Request(bool read, bool write);
- bool RequestReadNotificationAfterWouldBlock();
- bool RequestWriteNotificationAfterWouldBlock();
-
- bool RequestNotificationAfterWouldBlock(bool read, bool write) {
- bool status = false;
- status |= read && RequestReadNotificationAfterWouldBlock();
- status |= write && RequestWriteNotificationAfterWouldBlock();
- return status;
- }
-
- using TPtr = TIntrusivePtr<TPollerToken>;
- };
-
- struct TEvPollerRegisterResult : TEventLocal<TEvPollerRegisterResult, ui32(ENetwork::EvPollerRegisterResult)> {
- TIntrusivePtr<TSharedDescriptor> Socket;
- TPollerToken::TPtr PollerToken;
-
- TEvPollerRegisterResult(TIntrusivePtr<TSharedDescriptor> socket, TPollerToken::TPtr pollerToken)
- : Socket(std::move(socket))
- , PollerToken(std::move(pollerToken))
- {}
- };
-
- struct TEvPollerReady : TEventLocal<TEvPollerReady, ui32(ENetwork::EvPollerReady)> {
- TIntrusivePtr<TSharedDescriptor> Socket;
- const bool Read, Write;
-
- TEvPollerReady(TIntrusivePtr<TSharedDescriptor> socket, bool read, bool write)
- : Socket(std::move(socket))
- , Read(read)
- , Write(write)
- {}
- };
-
- IActor* CreatePollerActor();
-
- inline TActorId MakePollerActorId() {
- char x[12] = {'I', 'C', 'P', 'o', 'l', 'l', 'e', 'r', '\xDE', '\xAD', '\xBE', '\xEF'};
- return TActorId(0, TStringBuf(std::begin(x), std::end(x)));
- }
-
-}
diff --git a/library/cpp/actors/interconnect/poller_actor_darwin.h b/library/cpp/actors/interconnect/poller_actor_darwin.h
deleted file mode 100644
index cd763ac589..0000000000
--- a/library/cpp/actors/interconnect/poller_actor_darwin.h
+++ /dev/null
@@ -1,103 +0,0 @@
-#pragma once
-
-#include <sys/event.h>
-
-namespace NActors {
-
- class TKqueueThread : public TPollerThreadBase<TKqueueThread> {
- // KQueue file descriptor
- int KqDescriptor;
-
- void SafeKevent(const struct kevent* ev, int size) {
- int rc;
- do {
- rc = kevent(KqDescriptor, ev, size, nullptr, 0, nullptr);
- } while (rc == -1 && errno == EINTR);
- Y_ABORT_UNLESS(rc != -1, "kevent() failed with %s", strerror(errno));
- }
-
- public:
- TKqueueThread(TActorSystem *actorSystem)
- : TPollerThreadBase(actorSystem)
- {
- // create kqueue
- KqDescriptor = kqueue();
- Y_ABORT_UNLESS(KqDescriptor != -1, "kqueue() failed with %s", strerror(errno));
-
- // set close-on-exit flag
- {
- int flags = fcntl(KqDescriptor, F_GETFD);
- Y_ABORT_UNLESS(flags >= 0, "fcntl(F_GETFD) failed with %s", strerror(errno));
- int rc = fcntl(KqDescriptor, F_SETFD, flags | FD_CLOEXEC);
- Y_ABORT_UNLESS(rc != -1, "fcntl(F_SETFD, +FD_CLOEXEC) failed with %s", strerror(errno));
- }
-
- // register pipe's read end in poller
- struct kevent ev;
- EV_SET(&ev, (int)ReadEnd, EVFILT_READ, EV_ADD | EV_ENABLE, 0, 0, nullptr);
- SafeKevent(&ev, 1);
-
- ISimpleThread::Start(); // start poller thread
- }
-
- ~TKqueueThread() {
- Stop();
- close(KqDescriptor);
- }
-
- bool ProcessEventsInLoop() {
- std::array<struct kevent, 256> events;
-
- int numReady = kevent(KqDescriptor, nullptr, 0, events.data(), events.size(), nullptr);
- if (numReady == -1) {
- if (errno == EINTR) {
- return false;
- } else {
- Y_ABORT("kevent() failed with %s", strerror(errno));
- }
- }
-
- bool res = false;
-
- for (int i = 0; i < numReady; ++i) {
- const struct kevent& ev = events[i];
- if (ev.udata) {
- TSocketRecord *it = static_cast<TSocketRecord*>(ev.udata);
- const bool error = ev.flags & (EV_EOF | EV_ERROR);
- const bool read = error || ev.filter == EVFILT_READ;
- const bool write = error || ev.filter == EVFILT_WRITE;
- Notify(it, read, write);
- } else {
- res = true;
- }
- }
-
- return res;
- }
-
- void UnregisterSocketInLoop(const TIntrusivePtr<TSharedDescriptor>& socket) {
- struct kevent ev[2];
- const int fd = socket->GetDescriptor();
- EV_SET(&ev[0], fd, EVFILT_READ, EV_DELETE, 0, 0, nullptr);
- EV_SET(&ev[1], fd, EVFILT_WRITE, EV_DELETE, 0, 0, nullptr);
- SafeKevent(ev, 2);
- }
-
- void RegisterSocket(const TIntrusivePtr<TSocketRecord>& record) {
- int flags = EV_ADD | EV_CLEAR | EV_ENABLE;
- struct kevent ev[2];
- const int fd = record->Socket->GetDescriptor();
- EV_SET(&ev[0], fd, EVFILT_READ, flags, 0, 0, record.Get());
- EV_SET(&ev[1], fd, EVFILT_WRITE, flags, 0, 0, record.Get());
- SafeKevent(ev, 2);
- }
-
- bool Request(const TIntrusivePtr<TSocketRecord>& /*socket*/, bool /*read*/, bool /*write*/, bool /*suppressNotify*/,
- bool /*afterWouldBlock*/) {
- return false; // no special processing here as we use kqueue in edge-triggered mode
- }
- };
-
- using TPollerThread = TKqueueThread;
-
-}
diff --git a/library/cpp/actors/interconnect/poller_actor_linux.h b/library/cpp/actors/interconnect/poller_actor_linux.h
deleted file mode 100644
index 2cd557e347..0000000000
--- a/library/cpp/actors/interconnect/poller_actor_linux.h
+++ /dev/null
@@ -1,132 +0,0 @@
-#pragma once
-
-#include <sys/epoll.h>
-
-namespace NActors {
-
- enum {
- ReadExpected = 1,
- ReadHit = 2,
- WriteExpected = 4,
- WriteHit = 8,
- };
-
- class TEpollThread : public TPollerThreadBase<TEpollThread> {
- // epoll file descriptor
- int EpollDescriptor;
-
- public:
- TEpollThread(TActorSystem *actorSystem)
- : TPollerThreadBase(actorSystem)
- {
- EpollDescriptor = epoll_create1(EPOLL_CLOEXEC);
- Y_ABORT_UNLESS(EpollDescriptor != -1, "epoll_create1() failed with %s", strerror(errno));
-
- epoll_event event;
- event.data.ptr = nullptr;
- event.events = EPOLLIN;
- if (epoll_ctl(EpollDescriptor, EPOLL_CTL_ADD, ReadEnd, &event) == -1) {
- Y_ABORT("epoll_ctl(EPOLL_CTL_ADD) failed with %s", strerror(errno));
- }
-
- ISimpleThread::Start(); // start poller thread
- }
-
- ~TEpollThread() {
- Stop();
- close(EpollDescriptor);
- }
-
- bool ProcessEventsInLoop() {
- // preallocated array for events
- std::array<epoll_event, 256> events;
-
- // wait indefinitely for event to arrive
- LWPROBE(EpollStartWaitIn);
- int numReady = epoll_wait(EpollDescriptor, events.data(), events.size(), -1);
- LWPROBE(EpollFinishWaitIn, numReady);
-
- // check return status for any errors
- if (numReady == -1) {
- if (errno == EINTR) {
- return false; // restart the call a bit later
- } else {
- Y_ABORT("epoll_wait() failed with %s", strerror(errno));
- }
- }
-
- bool res = false;
-
- for (int i = 0; i < numReady; ++i) {
- const epoll_event& ev = events[i];
- if (auto *record = static_cast<TSocketRecord*>(ev.data.ptr)) {
- const bool read = ev.events & (EPOLLIN | EPOLLHUP | EPOLLRDHUP | EPOLLERR);
- const bool write = ev.events & (EPOLLOUT | EPOLLERR);
- UpdateFlags(record, (read ? ReadHit : 0) | (write ? WriteHit : 0), false /*suppressNotify*/,
- false /*checkQueues*/);
- } else {
- res = true;
- }
- }
-
- return res;
- }
-
- bool UpdateFlags(TSocketRecord *record, ui32 addMask, bool suppressNotify, bool checkQueues) {
- ui32 flags = record->Flags.load(std::memory_order_acquire);
- for (;;) {
- ui32 updated = flags | addMask;
- static constexpr ui32 fullRead = ReadExpected | ReadHit;
- static constexpr ui32 fullWrite = WriteExpected | WriteHit;
- bool read = (updated & fullRead) == fullRead;
- bool write = (updated & fullWrite) == fullWrite;
- updated &= ~((read ? fullRead : 0) | (write ? fullWrite : 0));
- if (record->Flags.compare_exchange_weak(flags, updated, std::memory_order_acq_rel)) {
- if (suppressNotify) {
- return read || write;
- } else {
- if (checkQueues) {
- pollfd fd;
- fd.fd = record->Socket->GetDescriptor();
- const bool queryRead = updated & ReadExpected && !read;
- const bool queryWrite = updated & WriteExpected && !write;
- if (queryRead || queryWrite) {
- fd.events = (queryRead ? POLLIN : 0) | (queryWrite ? POLLOUT : 0);
- if (poll(&fd, 1, 0) != -1) {
- read = queryRead && fd.revents & (POLLIN | POLLHUP | POLLRDHUP | POLLERR);
- write = queryWrite && fd.revents & (POLLOUT | POLLERR);
- }
- }
- }
- Notify(record, read, write);
- return false;
- }
- }
- }
- }
-
- void UnregisterSocketInLoop(const TIntrusivePtr<TSharedDescriptor>& socket) {
- if (epoll_ctl(EpollDescriptor, EPOLL_CTL_DEL, socket->GetDescriptor(), nullptr) == -1) {
- Y_ABORT("epoll_ctl(EPOLL_CTL_DEL) failed with %s", strerror(errno));
- }
- }
-
- void RegisterSocket(const TIntrusivePtr<TSocketRecord>& record) {
- epoll_event event;
- event.events = EPOLLET | EPOLLRDHUP | EPOLLIN | EPOLLOUT;
- event.data.ptr = record.Get();
- if (epoll_ctl(EpollDescriptor, EPOLL_CTL_ADD, record->Socket->GetDescriptor(), &event) == -1) {
- Y_ABORT("epoll_ctl(EPOLL_CTL_ADD) failed with %s", strerror(errno));
- }
- }
-
- bool Request(const TIntrusivePtr<TSocketRecord>& record, bool read, bool write, bool suppressNotify,
- bool afterWouldBlock) {
- return UpdateFlags(record.Get(), (read ? ReadExpected : 0) | (write ? WriteExpected : 0), suppressNotify,
- !afterWouldBlock);
- }
- };
-
- using TPollerThread = TEpollThread;
-
-} // namespace NActors
diff --git a/library/cpp/actors/interconnect/poller_actor_win.h b/library/cpp/actors/interconnect/poller_actor_win.h
deleted file mode 100644
index a4b213ff8c..0000000000
--- a/library/cpp/actors/interconnect/poller_actor_win.h
+++ /dev/null
@@ -1,111 +0,0 @@
-#pragma once
-
-namespace NActors {
-
- class TSelectThread : public TPollerThreadBase<TSelectThread> {
- TMutex Mutex;
- std::unordered_map<SOCKET, TIntrusivePtr<TSocketRecord>> Descriptors;
-
- enum {
- READ = 1,
- WRITE = 2,
- };
-
- public:
- TSelectThread(TActorSystem *actorSystem)
- : TPollerThreadBase(actorSystem)
- {
- Descriptors.emplace(ReadEnd, nullptr);
- ISimpleThread::Start();
- }
-
- ~TSelectThread() {
- Stop();
- }
-
- bool ProcessEventsInLoop() {
- fd_set readfds, writefds, exceptfds;
-
- FD_ZERO(&readfds);
- FD_ZERO(&writefds);
- FD_ZERO(&exceptfds);
- int nfds = 0;
- with_lock (Mutex) {
- for (const auto& [key, record] : Descriptors) {
- const int fd = key;
- auto add = [&](auto& set) {
- FD_SET(fd, &set);
- nfds = Max<int>(nfds, fd + 1);
- };
- if (!record || (record->Flags & READ)) {
- add(readfds);
- }
- if (!record || (record->Flags & WRITE)) {
- add(writefds);
- }
- add(exceptfds);
- }
- }
-
- int res = select(nfds, &readfds, &writefds, &exceptfds, nullptr);
- if (res == -1) {
- const int err = LastSocketError();
- if (err == EINTR) {
- return false; // try a bit later
- } else {
- Y_ABORT("select() failed with %s", strerror(err));
- }
- }
-
- bool flag = false;
-
- with_lock (Mutex) {
- for (const auto& [fd, record] : Descriptors) {
- if (record) {
- const bool error = FD_ISSET(fd, &exceptfds);
- const bool read = error || FD_ISSET(fd, &readfds);
- const bool write = error || FD_ISSET(fd, &writefds);
- if (read) {
- record->Flags &= ~READ;
- }
- if (write) {
- record->Flags &= ~WRITE;
- }
- Notify(record.Get(), read, write);
- } else {
- flag = true;
- }
- }
- }
-
- return flag;
- }
-
- void UnregisterSocketInLoop(const TIntrusivePtr<TSharedDescriptor>& socket) {
- with_lock (Mutex) {
- Descriptors.erase(socket->GetDescriptor());
- }
- }
-
- void RegisterSocket(const TIntrusivePtr<TSocketRecord>& record) {
- with_lock (Mutex) {
- Descriptors.emplace(record->Socket->GetDescriptor(), record);
- }
- ExecuteSyncOperation(TPollerWakeup());
- }
-
- bool Request(const TIntrusivePtr<TSocketRecord>& record, bool read, bool write, bool /*suppressNotify*/,
- bool /*afterWouldBlock*/) {
- with_lock (Mutex) {
- const auto it = Descriptors.find(record->Socket->GetDescriptor());
- Y_ABORT_UNLESS(it != Descriptors.end());
- it->second->Flags |= (read ? READ : 0) | (write ? WRITE : 0);
- }
- ExecuteSyncOperation(TPollerWakeup());
- return false;
- }
- };
-
- using TPollerThread = TSelectThread;
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/poller_tcp.cpp b/library/cpp/actors/interconnect/poller_tcp.cpp
deleted file mode 100644
index ab9b7c85ea..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp.cpp
+++ /dev/null
@@ -1,35 +0,0 @@
-#include "poller_tcp.h"
-
-namespace NInterconnect {
- TPollerThreads::TPollerThreads(size_t units, bool useSelect)
- : Units(units)
- {
- Y_DEBUG_ABORT_UNLESS(!Units.empty());
- for (auto& unit : Units)
- unit = TPollerUnit::Make(useSelect);
- }
-
- TPollerThreads::~TPollerThreads() {
- }
-
- void TPollerThreads::Start() {
- for (const auto& unit : Units)
- unit->Start();
- }
-
- void TPollerThreads::Stop() {
- for (const auto& unit : Units)
- unit->Stop();
- }
-
- void TPollerThreads::StartRead(const TIntrusivePtr<TSharedDescriptor>& s, TFDDelegate&& operation) {
- auto& unit = Units[THash<SOCKET>()(s->GetDescriptor()) % Units.size()];
- unit->StartReadOperation(s, std::move(operation));
- }
-
- void TPollerThreads::StartWrite(const TIntrusivePtr<TSharedDescriptor>& s, TFDDelegate&& operation) {
- auto& unit = Units[THash<SOCKET>()(s->GetDescriptor()) % Units.size()];
- unit->StartWriteOperation(s, std::move(operation));
- }
-
-}
diff --git a/library/cpp/actors/interconnect/poller_tcp.h b/library/cpp/actors/interconnect/poller_tcp.h
deleted file mode 100644
index 310265eccd..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#pragma once
-
-#include "poller_tcp_unit.h"
-#include "poller.h"
-
-#include <util/generic/vector.h>
-#include <util/generic/hash.h>
-
-namespace NInterconnect {
- class TPollerThreads: public NActors::IPoller {
- public:
- TPollerThreads(size_t units = 1U, bool useSelect = false);
- ~TPollerThreads();
-
- void Start();
- void Stop();
-
- void StartRead(const TIntrusivePtr<TSharedDescriptor>& s, TFDDelegate&& operation) override;
- void StartWrite(const TIntrusivePtr<TSharedDescriptor>& s, TFDDelegate&& operation) override;
-
- private:
- TVector<TPollerUnit::TPtr> Units;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/poller_tcp_unit.cpp b/library/cpp/actors/interconnect/poller_tcp_unit.cpp
deleted file mode 100644
index 994d907004..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp_unit.cpp
+++ /dev/null
@@ -1,126 +0,0 @@
-#include "poller_tcp_unit.h"
-
-#if !defined(_win_) && !defined(_darwin_)
-#include "poller_tcp_unit_epoll.h"
-#endif
-
-#include "poller_tcp_unit_select.h"
-#include "poller.h"
-
-#include <library/cpp/actors/prof/tag.h>
-#include <library/cpp/actors/util/intrinsics.h>
-
-#if defined _linux_
-#include <pthread.h>
-#endif
-
-namespace NInterconnect {
- TPollerUnit::TPtr
- TPollerUnit::Make(bool useSelect) {
-#if defined(_win_) || defined(_darwin_)
- Y_UNUSED(useSelect);
- return TPtr(new TPollerUnitSelect);
-#else
- return useSelect ? TPtr(new TPollerUnitSelect) : TPtr(new TPollerUnitEpoll);
-#endif
- }
-
- TPollerUnit::TPollerUnit()
- : StopFlag(true)
- , ReadLoop(TThread::TParams(IdleThread<false>, this).SetName("network read"))
- , WriteLoop(TThread::TParams(IdleThread<true>, this).SetName("network write"))
- {
- }
-
- TPollerUnit::~TPollerUnit() {
- if (!AtomicLoad(&StopFlag))
- Stop();
- }
-
- void
- TPollerUnit::Start() {
- AtomicStore(&StopFlag, false);
- ReadLoop.Start();
- WriteLoop.Start();
- }
-
- void
- TPollerUnit::Stop() {
- AtomicStore(&StopFlag, true);
- ReadLoop.Join();
- WriteLoop.Join();
- }
-
- template <>
- TPollerUnit::TSide&
- TPollerUnit::GetSide<false>() {
- return Read;
- }
-
- template <>
- TPollerUnit::TSide&
- TPollerUnit::GetSide<true>() {
- return Write;
- }
-
- void
- TPollerUnit::StartReadOperation(
- const TIntrusivePtr<TSharedDescriptor>& stream,
- TFDDelegate&& operation) {
- Y_DEBUG_ABORT_UNLESS(stream);
- if (AtomicLoad(&StopFlag))
- return;
- GetSide<false>().InputQueue.Push(TSide::TItem(stream, std::move(operation)));
- }
-
- void
- TPollerUnit::StartWriteOperation(
- const TIntrusivePtr<TSharedDescriptor>& stream,
- TFDDelegate&& operation) {
- Y_DEBUG_ABORT_UNLESS(stream);
- if (AtomicLoad(&StopFlag))
- return;
- GetSide<true>().InputQueue.Push(TSide::TItem(stream, std::move(operation)));
- }
-
- template <bool IsWrite>
- void*
- TPollerUnit::IdleThread(void* param) {
- // TODO: musl-libc version of `sched_param` struct is for some reason different from pthread
- // version in Ubuntu 12.04
-#if defined(_linux_) && !defined(_musl_)
- pthread_t threadSelf = pthread_self();
- sched_param sparam = {20};
- pthread_setschedparam(threadSelf, SCHED_FIFO, &sparam);
-#endif
-
- static_cast<TPollerUnit*>(param)->RunLoop<IsWrite>();
- return nullptr;
- }
-
- template <>
- void
- TPollerUnit::RunLoop<false>() {
- NProfiling::TMemoryTagScope tag("INTERCONNECT_RECEIVED_DATA");
- while (!AtomicLoad(&StopFlag))
- ProcessRead();
- }
-
- template <>
- void
- TPollerUnit::RunLoop<true>() {
- NProfiling::TMemoryTagScope tag("INTERCONNECT_SEND_DATA");
- while (!AtomicLoad(&StopFlag))
- ProcessWrite();
- }
-
- void
- TPollerUnit::TSide::ProcessInput() {
- if (!InputQueue.IsEmpty())
- do {
- auto sock = InputQueue.Top().first->GetDescriptor();
- if (!Operations.emplace(sock, std::move(InputQueue.Top())).second)
- Y_ABORT("Descriptor is already in pooler.");
- } while (InputQueue.Pop());
- }
-}
diff --git a/library/cpp/actors/interconnect/poller_tcp_unit.h b/library/cpp/actors/interconnect/poller_tcp_unit.h
deleted file mode 100644
index 692168b968..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp_unit.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#pragma once
-
-#include <util/system/thread.h>
-#include <library/cpp/actors/util/funnel_queue.h>
-
-#include "interconnect_stream.h"
-
-#include <memory>
-#include <functional>
-#include <unordered_map>
-
-namespace NInterconnect {
- using NActors::TFDDelegate;
- using NActors::TSharedDescriptor;
-
- class TPollerUnit {
- public:
- typedef std::unique_ptr<TPollerUnit> TPtr;
-
- static TPtr Make(bool useSelect);
-
- void Start();
- void Stop();
-
- virtual void StartReadOperation(
- const TIntrusivePtr<TSharedDescriptor>& stream,
- TFDDelegate&& operation);
-
- virtual void StartWriteOperation(
- const TIntrusivePtr<TSharedDescriptor>& stream,
- TFDDelegate&& operation);
-
- virtual ~TPollerUnit();
-
- private:
- virtual void ProcessRead() = 0;
- virtual void ProcessWrite() = 0;
-
- template <bool IsWrite>
- static void* IdleThread(void* param);
-
- template <bool IsWrite>
- void RunLoop();
-
- volatile bool StopFlag;
- TThread ReadLoop, WriteLoop;
-
- protected:
- TPollerUnit();
-
- struct TSide {
- using TOperations =
- std::unordered_map<SOCKET,
- std::pair<TIntrusivePtr<TSharedDescriptor>, TFDDelegate>>;
-
- TOperations Operations;
- using TItem = TOperations::mapped_type;
- TFunnelQueue<TItem> InputQueue;
-
- void ProcessInput();
- } Read, Write;
-
- template <bool IsWrite>
- TSide& GetSide();
- };
-
-}
diff --git a/library/cpp/actors/interconnect/poller_tcp_unit_epoll.cpp b/library/cpp/actors/interconnect/poller_tcp_unit_epoll.cpp
deleted file mode 100644
index aac6d52bb4..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp_unit_epoll.cpp
+++ /dev/null
@@ -1,124 +0,0 @@
-#include "poller_tcp_unit_epoll.h"
-#if !defined(_win_) && !defined(_darwin_)
-#include <unistd.h>
-#include <sys/epoll.h>
-
-#include <csignal>
-#include <cerrno>
-
-namespace NInterconnect {
- namespace {
- void
- DeleteEpoll(int epoll, SOCKET stream) {
- ::epoll_event event = {0, {.fd = stream}};
- if (::epoll_ctl(epoll, EPOLL_CTL_DEL, stream, &event)) {
- Cerr << "epoll_ctl errno: " << errno << Endl;
- Y_ABORT("epoll delete error!");
- }
- }
-
- template <ui32 Events>
- void
- AddEpoll(int epoll, SOCKET stream) {
- ::epoll_event event = {.events = Events};
- event.data.fd = stream;
- if (::epoll_ctl(epoll, EPOLL_CTL_ADD, stream, &event)) {
- Cerr << "epoll_ctl errno: " << errno << Endl;
- Y_ABORT("epoll add error!");
- }
- }
-
- int
- Initialize() {
- const auto epoll = ::epoll_create(10000);
- Y_DEBUG_ABORT_UNLESS(epoll > 0);
- return epoll;
- }
-
- }
-
- TPollerUnitEpoll::TPollerUnitEpoll()
- : ReadDescriptor(Initialize())
- , WriteDescriptor(Initialize())
- {
- // Block on the epoll descriptor.
- ::sigemptyset(&sigmask);
- ::sigaddset(&sigmask, SIGPIPE);
- ::sigaddset(&sigmask, SIGTERM);
- }
-
- TPollerUnitEpoll::~TPollerUnitEpoll() {
- ::close(ReadDescriptor);
- ::close(WriteDescriptor);
- }
-
- template <>
- int TPollerUnitEpoll::GetDescriptor<false>() const {
- return ReadDescriptor;
- }
-
- template <>
- int TPollerUnitEpoll::GetDescriptor<true>() const {
- return WriteDescriptor;
- }
-
- void
- TPollerUnitEpoll::StartReadOperation(
- const TIntrusivePtr<TSharedDescriptor>& s,
- TFDDelegate&& operation) {
- TPollerUnit::StartReadOperation(s, std::move(operation));
- AddEpoll<EPOLLRDHUP | EPOLLIN>(ReadDescriptor, s->GetDescriptor());
- }
-
- void
- TPollerUnitEpoll::StartWriteOperation(
- const TIntrusivePtr<TSharedDescriptor>& s,
- TFDDelegate&& operation) {
- TPollerUnit::StartWriteOperation(s, std::move(operation));
- AddEpoll<EPOLLRDHUP | EPOLLOUT>(WriteDescriptor, s->GetDescriptor());
- }
-
- constexpr int EVENTS_BUF_SIZE = 128;
-
- template <bool WriteOp>
- void
- TPollerUnitEpoll::Process() {
- ::epoll_event events[EVENTS_BUF_SIZE];
-
- const int epoll = GetDescriptor<WriteOp>();
-
- /* Timeout just to check StopFlag sometimes */
- const int result =
- ::epoll_pwait(epoll, events, EVENTS_BUF_SIZE, 200, &sigmask);
-
- if (result == -1 && errno != EINTR)
- Y_ABORT("epoll wait error!");
-
- auto& side = GetSide<WriteOp>();
- side.ProcessInput();
-
- for (int i = 0; i < result; ++i) {
- const auto it = side.Operations.find(events[i].data.fd);
- if (side.Operations.end() == it)
- continue;
- if (const auto& finalizer = it->second.second(it->second.first)) {
- DeleteEpoll(epoll, it->first);
- side.Operations.erase(it);
- finalizer();
- }
- }
- }
-
- void
- TPollerUnitEpoll::ProcessRead() {
- Process<false>();
- }
-
- void
- TPollerUnitEpoll::ProcessWrite() {
- Process<true>();
- }
-
-}
-
-#endif
diff --git a/library/cpp/actors/interconnect/poller_tcp_unit_epoll.h b/library/cpp/actors/interconnect/poller_tcp_unit_epoll.h
deleted file mode 100644
index ff7893eba2..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp_unit_epoll.h
+++ /dev/null
@@ -1,33 +0,0 @@
-#pragma once
-
-#include "poller_tcp_unit.h"
-
-namespace NInterconnect {
- class TPollerUnitEpoll: public TPollerUnit {
- public:
- TPollerUnitEpoll();
- virtual ~TPollerUnitEpoll();
-
- private:
- virtual void StartReadOperation(
- const TIntrusivePtr<TSharedDescriptor>& s,
- TFDDelegate&& operation) override;
-
- virtual void StartWriteOperation(
- const TIntrusivePtr<TSharedDescriptor>& s,
- TFDDelegate&& operation) override;
-
- virtual void ProcessRead() override;
- virtual void ProcessWrite() override;
-
- template <bool Write>
- void Process();
-
- template <bool Write>
- int GetDescriptor() const;
-
- const int ReadDescriptor, WriteDescriptor;
- ::sigset_t sigmask;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp b/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
deleted file mode 100644
index 1615d4679d..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp_unit_select.cpp
+++ /dev/null
@@ -1,86 +0,0 @@
-#include "poller_tcp_unit_select.h"
-
-#include <csignal>
-
-#if defined(_win_)
-#include <winsock2.h>
-#define SOCKET_ERROR_SOURCE ::WSAGetLastError()
-#elif defined(_darwin_)
-#include <cerrno>
-#define SOCKET_ERROR_SOURCE errno
-typedef timeval TIMEVAL;
-#else
-#include <cerrno>
-#define SOCKET_ERROR_SOURCE errno
-#endif
-
-namespace NInterconnect {
- TPollerUnitSelect::TPollerUnitSelect() {
- }
-
- TPollerUnitSelect::~TPollerUnitSelect() {
- }
-
- template <bool IsWrite>
- void
- TPollerUnitSelect::Process() {
- auto& side = GetSide<IsWrite>();
- side.ProcessInput();
-
- enum : size_t { R,
- W,
- E };
- static const auto O = IsWrite ? W : R;
-
- ::fd_set sets[3];
-
- FD_ZERO(&sets[R]);
- FD_ZERO(&sets[W]);
- FD_ZERO(&sets[E]);
-
- for (const auto& operation : side.Operations) {
- FD_SET(operation.first, &sets[O]);
- FD_SET(operation.first, &sets[E]);
- }
-
-#if defined(_win_)
- ::TIMEVAL timeout = {0L, 99991L};
- const auto numberEvents = !side.Operations.empty() ? ::select(FD_SETSIZE, &sets[R], &sets[W], &sets[E], &timeout)
- : (::Sleep(100), 0);
-#elif defined(_darwin_)
- ::TIMEVAL timeout = {0L, 99991L};
- const auto numberEvents = ::select(FD_SETSIZE, &sets[R], &sets[W], &sets[E], &timeout);
-#else
- ::sigset_t sigmask;
- ::sigemptyset(&sigmask);
- ::sigaddset(&sigmask, SIGPIPE);
- ::sigaddset(&sigmask, SIGTERM);
-
- struct ::timespec timeout = {0L, 99999989L};
- const auto numberEvents = ::pselect(FD_SETSIZE, &sets[R], &sets[W], &sets[E], &timeout, &sigmask);
-#endif
-
- Y_DEBUG_ABORT_UNLESS(numberEvents >= 0);
-
- for (auto it = side.Operations.cbegin(); side.Operations.cend() != it;) {
- if (FD_ISSET(it->first, &sets[O]) || FD_ISSET(it->first, &sets[E]))
- if (const auto& finalizer = it->second.second(it->second.first)) {
- side.Operations.erase(it++);
- finalizer();
- continue;
- }
- ++it;
- }
- }
-
- void
- TPollerUnitSelect::ProcessRead() {
- Process<false>();
- }
-
- void
- TPollerUnitSelect::ProcessWrite() {
- Process<true>();
- }
-
-}
diff --git a/library/cpp/actors/interconnect/poller_tcp_unit_select.h b/library/cpp/actors/interconnect/poller_tcp_unit_select.h
deleted file mode 100644
index 0c15217796..0000000000
--- a/library/cpp/actors/interconnect/poller_tcp_unit_select.h
+++ /dev/null
@@ -1,19 +0,0 @@
-#pragma once
-
-#include "poller_tcp_unit.h"
-
-namespace NInterconnect {
- class TPollerUnitSelect: public TPollerUnit {
- public:
- TPollerUnitSelect();
- virtual ~TPollerUnitSelect();
-
- private:
- virtual void ProcessRead() override;
- virtual void ProcessWrite() override;
-
- template <bool IsWrite>
- void Process();
- };
-
-}
diff --git a/library/cpp/actors/interconnect/profiler.h b/library/cpp/actors/interconnect/profiler.h
deleted file mode 100644
index 11dac077ea..0000000000
--- a/library/cpp/actors/interconnect/profiler.h
+++ /dev/null
@@ -1,142 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/util/datetime.h>
-
-namespace NActors {
-
- class TProfiled {
- enum class EType : ui32 {
- ENTRY,
- EXIT,
- };
-
- struct TItem {
- EType Type; // entry kind
- int Line;
- const char *Marker; // name of the profiled function/part
- ui64 Timestamp; // cycles
- };
-
- bool Enable = false;
- mutable TDeque<TItem> Items;
-
- friend class TFunction;
-
- public:
- class TFunction {
- const TProfiled& Profiled;
-
- public:
- TFunction(const TProfiled& profiled, const char *name, int line)
- : Profiled(profiled)
- {
- Log(EType::ENTRY, name, line);
- }
-
- ~TFunction() {
- Log(EType::EXIT, nullptr, 0);
- }
-
- private:
- void Log(EType type, const char *marker, int line) {
- if (Profiled.Enable) {
- Profiled.Items.push_back(TItem{
- type,
- line,
- marker,
- GetCycleCountFast()
- });
- }
- }
- };
-
- public:
- void Start() {
- Enable = true;
- }
-
- void Finish() {
- Items.clear();
- Enable = false;
- }
-
- TDuration Duration() const {
- return CyclesToDuration(Items ? Items.back().Timestamp - Items.front().Timestamp : 0);
- }
-
- TString Format() const {
- TDeque<TItem>::iterator it = Items.begin();
- TString res = FormatLevel(it);
- Y_ABORT_UNLESS(it == Items.end());
- return res;
- }
-
- private:
- TString FormatLevel(TDeque<TItem>::iterator& it) const {
- struct TRecord {
- TString Marker;
- ui64 Duration;
- TString Interior;
-
- bool operator <(const TRecord& other) const {
- return Duration < other.Duration;
- }
- };
- TVector<TRecord> records;
-
- while (it != Items.end() && it->Type != EType::EXIT) {
- Y_ABORT_UNLESS(it->Type == EType::ENTRY);
- const TString marker = Sprintf("%s:%d", it->Marker, it->Line);
- const ui64 begin = it->Timestamp;
- ++it;
- const TString interior = FormatLevel(it);
- Y_ABORT_UNLESS(it != Items.end());
- Y_ABORT_UNLESS(it->Type == EType::EXIT);
- const ui64 end = it->Timestamp;
- records.push_back(TRecord{marker, end - begin, interior});
- ++it;
- }
-
- TStringStream s;
- const ui64 cyclesPerMs = GetCyclesPerMillisecond();
-
- if (records.size() <= 10) {
- bool first = true;
- for (const TRecord& record : records) {
- if (first) {
- first = false;
- } else {
- s << " ";
- }
- s << record.Marker << "(" << (record.Duration * 1000000 / cyclesPerMs) << "ns)";
- if (record.Interior) {
- s << " {" << record.Interior << "}";
- }
- }
- } else {
- TMap<TString, TVector<TRecord>> m;
- for (TRecord& r : records) {
- const TString key = r.Marker;
- m[key].push_back(std::move(r));
- }
-
- s << "unordered ";
- for (auto& [key, value] : m) {
- auto i = std::max_element(value.begin(), value.end());
- ui64 sum = 0;
- for (const auto& item : value) {
- sum += item.Duration;
- }
- sum = sum * 1000000 / cyclesPerMs;
- s << key << " num# " << value.size() << " sum# " << sum << "ns max# " << (i->Duration * 1000000 / cyclesPerMs) << "ns";
- if (i->Interior) {
- s << " {" << i->Interior << "}";
- }
- }
- }
-
- return s.Str();
- }
- };
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/slowpoke_actor.h b/library/cpp/actors/interconnect/slowpoke_actor.h
deleted file mode 100644
index 4b02e5da48..0000000000
--- a/library/cpp/actors/interconnect/slowpoke_actor.h
+++ /dev/null
@@ -1,47 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-
-namespace NActors {
-
- class TSlowpokeActor : public TActorBootstrapped<TSlowpokeActor> {
- const TDuration Duration;
- const TDuration SleepMin;
- const TDuration SleepMax;
- const TDuration RescheduleMin;
- const TDuration RescheduleMax;
-
- public:
- static constexpr NKikimrServices::TActivity::EType ActorActivityType() {
- return NKikimrServices::TActivity::INTERCONNECT_COMMON;
- }
-
- TSlowpokeActor(TDuration duration, TDuration sleepMin, TDuration sleepMax, TDuration rescheduleMin, TDuration rescheduleMax)
- : Duration(duration)
- , SleepMin(sleepMin)
- , SleepMax(sleepMax)
- , RescheduleMin(rescheduleMin)
- , RescheduleMax(rescheduleMax)
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TThis::StateFunc, ctx, Duration, new TEvents::TEvPoisonPill);
- HandleWakeup(ctx);
- }
-
- void HandleWakeup(const TActorContext& ctx) {
- Sleep(RandomDuration(SleepMin, SleepMax));
- ctx.Schedule(RandomDuration(RescheduleMin, RescheduleMax), new TEvents::TEvWakeup);
- }
-
- static TDuration RandomDuration(TDuration min, TDuration max) {
- return min + TDuration::FromValue(RandomNumber<ui64>(max.GetValue() - min.GetValue() + 1));
- }
-
- STRICT_STFUNC(StateFunc,
- CFunc(TEvents::TSystem::PoisonPill, Die)
- CFunc(TEvents::TSystem::Wakeup, HandleWakeup)
- )
- };
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/types.cpp b/library/cpp/actors/interconnect/types.cpp
deleted file mode 100644
index 979c55f277..0000000000
--- a/library/cpp/actors/interconnect/types.cpp
+++ /dev/null
@@ -1,564 +0,0 @@
-#include "types.h"
-#include <util/string/printf.h>
-#include <util/generic/vector.h>
-#include <errno.h>
-
-namespace NActors {
-
- TVector<const char*> TDisconnectReason::Reasons = {
- "EndOfStream",
- "CloseOnIdle",
- "LostConnection",
- "DeadPeer",
- "NewSession",
- "HandshakeFailTransient",
- "HandshakeFailPermanent",
- "UserRequest",
- "Debug",
- "ChecksumError",
- "FormatError",
- "EventTooLarge",
- "QueueOverload",
- "E2BIG",
- "EACCES",
- "EADDRINUSE",
- "EADDRNOTAVAIL",
- "EADV",
- "EAFNOSUPPORT",
- "EAGAIN",
- "EALREADY",
- "EBADE",
- "EBADF",
- "EBADFD",
- "EBADMSG",
- "EBADR",
- "EBADRQC",
- "EBADSLT",
- "EBFONT",
- "EBUSY",
- "ECANCELED",
- "ECHILD",
- "ECHRNG",
- "ECOMM",
- "ECONNABORTED",
- "ECONNREFUSED",
- "ECONNRESET",
- "EDEADLK",
- "EDEADLOCK",
- "EDESTADDRREQ",
- "EDOM",
- "EDOTDOT",
- "EDQUOT",
- "EEXIST",
- "EFAULT",
- "EFBIG",
- "EHOSTDOWN",
- "EHOSTUNREACH",
- "EHWPOISON",
- "EIDRM",
- "EILSEQ",
- "EINPROGRESS",
- "EINTR",
- "EINVAL",
- "EIO",
- "EISCONN",
- "EISDIR",
- "EISNAM",
- "EKEYEXPIRED",
- "EKEYREJECTED",
- "EKEYREVOKED",
- "EL2HLT",
- "EL2NSYNC",
- "EL3HLT",
- "EL3RST",
- "ELIBACC",
- "ELIBBAD",
- "ELIBEXEC",
- "ELIBMAX",
- "ELIBSCN",
- "ELNRNG",
- "ELOOP",
- "EMEDIUMTYPE",
- "EMFILE",
- "EMLINK",
- "EMSGSIZE",
- "EMULTIHOP",
- "ENAMETOOLONG",
- "ENAVAIL",
- "ENETDOWN",
- "ENETRESET",
- "ENETUNREACH",
- "ENFILE",
- "ENOANO",
- "ENOBUFS",
- "ENOCSI",
- "ENODATA",
- "ENODEV",
- "ENOENT",
- "ENOEXEC",
- "ENOKEY",
- "ENOLCK",
- "ENOLINK",
- "ENOMEDIUM",
- "ENOMEM",
- "ENOMSG",
- "ENONET",
- "ENOPKG",
- "ENOPROTOOPT",
- "ENOSPC",
- "ENOSR",
- "ENOSTR",
- "ENOSYS",
- "ENOTBLK",
- "ENOTCONN",
- "ENOTDIR",
- "ENOTEMPTY",
- "ENOTNAM",
- "ENOTRECOVERABLE",
- "ENOTSOCK",
- "ENOTTY",
- "ENOTUNIQ",
- "ENXIO",
- "EOPNOTSUPP",
- "EOVERFLOW",
- "EOWNERDEAD",
- "EPERM",
- "EPFNOSUPPORT",
- "EPIPE",
- "EPROTO",
- "EPROTONOSUPPORT",
- "EPROTOTYPE",
- "ERANGE",
- "EREMCHG",
- "EREMOTE",
- "EREMOTEIO",
- "ERESTART",
- "ERFKILL",
- "EROFS",
- "ESHUTDOWN",
- "ESOCKTNOSUPPORT",
- "ESPIPE",
- "ESRCH",
- "ESRMNT",
- "ESTALE",
- "ESTRPIPE",
- "ETIME",
- "ETIMEDOUT",
- "ETOOMANYREFS",
- "ETXTBSY",
- "EUCLEAN",
- "EUNATCH",
- "EUSERS",
- "EWOULDBLOCK",
- "EXDEV",
- "EXFULL",
- };
-
- TDisconnectReason TDisconnectReason::FromErrno(int err) {
- switch (err) {
-#define REASON(ERRNO) case ERRNO: return TDisconnectReason(TString(#ERRNO))
-#if defined(E2BIG)
- REASON(E2BIG);
-#endif
-#if defined(EACCES)
- REASON(EACCES);
-#endif
-#if defined(EADDRINUSE)
- REASON(EADDRINUSE);
-#endif
-#if defined(EADDRNOTAVAIL)
- REASON(EADDRNOTAVAIL);
-#endif
-#if defined(EADV)
- REASON(EADV);
-#endif
-#if defined(EAFNOSUPPORT)
- REASON(EAFNOSUPPORT);
-#endif
-#if defined(EAGAIN)
- REASON(EAGAIN);
-#endif
-#if defined(EALREADY)
- REASON(EALREADY);
-#endif
-#if defined(EBADE)
- REASON(EBADE);
-#endif
-#if defined(EBADF)
- REASON(EBADF);
-#endif
-#if defined(EBADFD)
- REASON(EBADFD);
-#endif
-#if defined(EBADMSG)
- REASON(EBADMSG);
-#endif
-#if defined(EBADR)
- REASON(EBADR);
-#endif
-#if defined(EBADRQC)
- REASON(EBADRQC);
-#endif
-#if defined(EBADSLT)
- REASON(EBADSLT);
-#endif
-#if defined(EBFONT)
- REASON(EBFONT);
-#endif
-#if defined(EBUSY)
- REASON(EBUSY);
-#endif
-#if defined(ECANCELED)
- REASON(ECANCELED);
-#endif
-#if defined(ECHILD)
- REASON(ECHILD);
-#endif
-#if defined(ECHRNG)
- REASON(ECHRNG);
-#endif
-#if defined(ECOMM)
- REASON(ECOMM);
-#endif
-#if defined(ECONNABORTED)
- REASON(ECONNABORTED);
-#endif
-#if defined(ECONNREFUSED)
- REASON(ECONNREFUSED);
-#endif
-#if defined(ECONNRESET)
- REASON(ECONNRESET);
-#endif
-#if defined(EDEADLK)
- REASON(EDEADLK);
-#endif
-#if defined(EDEADLOCK) && (!defined(EDEADLK) || EDEADLOCK != EDEADLK)
- REASON(EDEADLOCK);
-#endif
-#if defined(EDESTADDRREQ)
- REASON(EDESTADDRREQ);
-#endif
-#if defined(EDOM)
- REASON(EDOM);
-#endif
-#if defined(EDOTDOT)
- REASON(EDOTDOT);
-#endif
-#if defined(EDQUOT)
- REASON(EDQUOT);
-#endif
-#if defined(EEXIST)
- REASON(EEXIST);
-#endif
-#if defined(EFAULT)
- REASON(EFAULT);
-#endif
-#if defined(EFBIG)
- REASON(EFBIG);
-#endif
-#if defined(EHOSTDOWN)
- REASON(EHOSTDOWN);
-#endif
-#if defined(EHOSTUNREACH)
- REASON(EHOSTUNREACH);
-#endif
-#if defined(EHWPOISON)
- REASON(EHWPOISON);
-#endif
-#if defined(EIDRM)
- REASON(EIDRM);
-#endif
-#if defined(EILSEQ)
- REASON(EILSEQ);
-#endif
-#if defined(EINPROGRESS)
- REASON(EINPROGRESS);
-#endif
-#if defined(EINTR)
- REASON(EINTR);
-#endif
-#if defined(EINVAL)
- REASON(EINVAL);
-#endif
-#if defined(EIO)
- REASON(EIO);
-#endif
-#if defined(EISCONN)
- REASON(EISCONN);
-#endif
-#if defined(EISDIR)
- REASON(EISDIR);
-#endif
-#if defined(EISNAM)
- REASON(EISNAM);
-#endif
-#if defined(EKEYEXPIRED)
- REASON(EKEYEXPIRED);
-#endif
-#if defined(EKEYREJECTED)
- REASON(EKEYREJECTED);
-#endif
-#if defined(EKEYREVOKED)
- REASON(EKEYREVOKED);
-#endif
-#if defined(EL2HLT)
- REASON(EL2HLT);
-#endif
-#if defined(EL2NSYNC)
- REASON(EL2NSYNC);
-#endif
-#if defined(EL3HLT)
- REASON(EL3HLT);
-#endif
-#if defined(EL3RST)
- REASON(EL3RST);
-#endif
-#if defined(ELIBACC)
- REASON(ELIBACC);
-#endif
-#if defined(ELIBBAD)
- REASON(ELIBBAD);
-#endif
-#if defined(ELIBEXEC)
- REASON(ELIBEXEC);
-#endif
-#if defined(ELIBMAX)
- REASON(ELIBMAX);
-#endif
-#if defined(ELIBSCN)
- REASON(ELIBSCN);
-#endif
-#if defined(ELNRNG)
- REASON(ELNRNG);
-#endif
-#if defined(ELOOP)
- REASON(ELOOP);
-#endif
-#if defined(EMEDIUMTYPE)
- REASON(EMEDIUMTYPE);
-#endif
-#if defined(EMFILE)
- REASON(EMFILE);
-#endif
-#if defined(EMLINK)
- REASON(EMLINK);
-#endif
-#if defined(EMSGSIZE)
- REASON(EMSGSIZE);
-#endif
-#if defined(EMULTIHOP)
- REASON(EMULTIHOP);
-#endif
-#if defined(ENAMETOOLONG)
- REASON(ENAMETOOLONG);
-#endif
-#if defined(ENAVAIL)
- REASON(ENAVAIL);
-#endif
-#if defined(ENETDOWN)
- REASON(ENETDOWN);
-#endif
-#if defined(ENETRESET)
- REASON(ENETRESET);
-#endif
-#if defined(ENETUNREACH)
- REASON(ENETUNREACH);
-#endif
-#if defined(ENFILE)
- REASON(ENFILE);
-#endif
-#if defined(ENOANO)
- REASON(ENOANO);
-#endif
-#if defined(ENOBUFS)
- REASON(ENOBUFS);
-#endif
-#if defined(ENOCSI)
- REASON(ENOCSI);
-#endif
-#if defined(ENODATA)
- REASON(ENODATA);
-#endif
-#if defined(ENODEV)
- REASON(ENODEV);
-#endif
-#if defined(ENOENT)
- REASON(ENOENT);
-#endif
-#if defined(ENOEXEC)
- REASON(ENOEXEC);
-#endif
-#if defined(ENOKEY)
- REASON(ENOKEY);
-#endif
-#if defined(ENOLCK)
- REASON(ENOLCK);
-#endif
-#if defined(ENOLINK)
- REASON(ENOLINK);
-#endif
-#if defined(ENOMEDIUM)
- REASON(ENOMEDIUM);
-#endif
-#if defined(ENOMEM)
- REASON(ENOMEM);
-#endif
-#if defined(ENOMSG)
- REASON(ENOMSG);
-#endif
-#if defined(ENONET)
- REASON(ENONET);
-#endif
-#if defined(ENOPKG)
- REASON(ENOPKG);
-#endif
-#if defined(ENOPROTOOPT)
- REASON(ENOPROTOOPT);
-#endif
-#if defined(ENOSPC)
- REASON(ENOSPC);
-#endif
-#if defined(ENOSR)
- REASON(ENOSR);
-#endif
-#if defined(ENOSTR)
- REASON(ENOSTR);
-#endif
-#if defined(ENOSYS)
- REASON(ENOSYS);
-#endif
-#if defined(ENOTBLK)
- REASON(ENOTBLK);
-#endif
-#if defined(ENOTCONN)
- REASON(ENOTCONN);
-#endif
-#if defined(ENOTDIR)
- REASON(ENOTDIR);
-#endif
-#if defined(ENOTEMPTY)
- REASON(ENOTEMPTY);
-#endif
-#if defined(ENOTNAM)
- REASON(ENOTNAM);
-#endif
-#if defined(ENOTRECOVERABLE)
- REASON(ENOTRECOVERABLE);
-#endif
-#if defined(ENOTSOCK)
- REASON(ENOTSOCK);
-#endif
-#if defined(ENOTTY)
- REASON(ENOTTY);
-#endif
-#if defined(ENOTUNIQ)
- REASON(ENOTUNIQ);
-#endif
-#if defined(ENXIO)
- REASON(ENXIO);
-#endif
-#if defined(EOPNOTSUPP)
- REASON(EOPNOTSUPP);
-#endif
-#if defined(EOVERFLOW)
- REASON(EOVERFLOW);
-#endif
-#if defined(EOWNERDEAD)
- REASON(EOWNERDEAD);
-#endif
-#if defined(EPERM)
- REASON(EPERM);
-#endif
-#if defined(EPFNOSUPPORT)
- REASON(EPFNOSUPPORT);
-#endif
-#if defined(EPIPE)
- REASON(EPIPE);
-#endif
-#if defined(EPROTO)
- REASON(EPROTO);
-#endif
-#if defined(EPROTONOSUPPORT)
- REASON(EPROTONOSUPPORT);
-#endif
-#if defined(EPROTOTYPE)
- REASON(EPROTOTYPE);
-#endif
-#if defined(ERANGE)
- REASON(ERANGE);
-#endif
-#if defined(EREMCHG)
- REASON(EREMCHG);
-#endif
-#if defined(EREMOTE)
- REASON(EREMOTE);
-#endif
-#if defined(EREMOTEIO)
- REASON(EREMOTEIO);
-#endif
-#if defined(ERESTART)
- REASON(ERESTART);
-#endif
-#if defined(ERFKILL)
- REASON(ERFKILL);
-#endif
-#if defined(EROFS)
- REASON(EROFS);
-#endif
-#if defined(ESHUTDOWN)
- REASON(ESHUTDOWN);
-#endif
-#if defined(ESOCKTNOSUPPORT)
- REASON(ESOCKTNOSUPPORT);
-#endif
-#if defined(ESPIPE)
- REASON(ESPIPE);
-#endif
-#if defined(ESRCH)
- REASON(ESRCH);
-#endif
-#if defined(ESRMNT)
- REASON(ESRMNT);
-#endif
-#if defined(ESTALE)
- REASON(ESTALE);
-#endif
-#if defined(ESTRPIPE)
- REASON(ESTRPIPE);
-#endif
-#if defined(ETIME)
- REASON(ETIME);
-#endif
-#if defined(ETIMEDOUT)
- REASON(ETIMEDOUT);
-#endif
-#if defined(ETOOMANYREFS)
- REASON(ETOOMANYREFS);
-#endif
-#if defined(ETXTBSY)
- REASON(ETXTBSY);
-#endif
-#if defined(EUCLEAN)
- REASON(EUCLEAN);
-#endif
-#if defined(EUNATCH)
- REASON(EUNATCH);
-#endif
-#if defined(EUSERS)
- REASON(EUSERS);
-#endif
-#if defined(EWOULDBLOCK) && (!defined(EAGAIN) || EWOULDBLOCK != EAGAIN)
- REASON(EWOULDBLOCK);
-#endif
-#if defined(EXDEV)
- REASON(EXDEV);
-#endif
-#if defined(EXFULL)
- REASON(EXFULL);
-#endif
- default:
- return TDisconnectReason(Sprintf("errno=%d", errno));
- }
- }
-
-} // NActors
diff --git a/library/cpp/actors/interconnect/types.h b/library/cpp/actors/interconnect/types.h
deleted file mode 100644
index 14b1a1c7a6..0000000000
--- a/library/cpp/actors/interconnect/types.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/defs.h>
-#include <library/cpp/actors/core/actorid.h>
-#include <library/cpp/actors/core/event.h>
-
-#include <util/generic/string.h>
-
-namespace NActors {
-
- class TDisconnectReason {
- TString Text;
-
- private:
- explicit TDisconnectReason(TString text)
- : Text(std::move(text))
- {}
-
- public:
- TDisconnectReason() = default;
- TDisconnectReason(const TDisconnectReason&) = default;
- TDisconnectReason(TDisconnectReason&&) = default;
-
- static TDisconnectReason FromErrno(int err);
-
- static TDisconnectReason EndOfStream() { return TDisconnectReason("EndOfStream"); }
- static TDisconnectReason CloseOnIdle() { return TDisconnectReason("CloseOnIdle"); }
- static TDisconnectReason LostConnection() { return TDisconnectReason("LostConnection"); }
- static TDisconnectReason DeadPeer() { return TDisconnectReason("DeadPeer"); }
- static TDisconnectReason NewSession() { return TDisconnectReason("NewSession"); }
- static TDisconnectReason HandshakeFailTransient() { return TDisconnectReason("HandshakeFailTransient"); }
- static TDisconnectReason HandshakeFailPermanent() { return TDisconnectReason("HandshakeFailPermanent"); }
- static TDisconnectReason UserRequest() { return TDisconnectReason("UserRequest"); }
- static TDisconnectReason Debug() { return TDisconnectReason("Debug"); }
- static TDisconnectReason ChecksumError() { return TDisconnectReason("ChecksumError"); }
- static TDisconnectReason FormatError() { return TDisconnectReason("FormatError"); }
- static TDisconnectReason EventTooLarge() { return TDisconnectReason("EventTooLarge"); }
- static TDisconnectReason QueueOverload() { return TDisconnectReason("QueueOverload"); }
-
- TString ToString() const {
- return Text;
- }
-
- friend bool operator ==(const TDisconnectReason& x, const TDisconnectReason& y) { return x.Text == y.Text; }
-
- static TVector<const char*> Reasons;
- };
-
- struct TProgramInfo {
- ui64 PID = 0;
- ui64 StartTime = 0;
- ui64 Serial = 0;
- };
-
- struct TSessionParams {
- bool Encryption = {};
- bool AuthOnly = {};
- bool UseExternalDataChannel = {};
- bool UseXxhash = {};
- bool UseXdcShuffle = {};
- TString AuthCN;
- NActors::TScopeId PeerScopeId;
- };
-
-} // NActors
-
-using NActors::IEventBase;
-using NActors::IEventHandle;
-using NActors::TActorId;
-using NActors::TConstIoVec;
-using NActors::TEventSerializedData;
-using NActors::TSessionParams;
diff --git a/library/cpp/actors/interconnect/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/interconnect/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 0b2d5cfe5c..0000000000
--- a/library/cpp/actors/interconnect/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,85 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(lib)
-add_subdirectory(protos)
-
-add_executable(library-cpp-actors-interconnect-ut)
-target_link_libraries(library-cpp-actors-interconnect-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-actors-testlib
- cpp-digest-md5
- cpp-testing-unittest
-)
-target_link_options(library-cpp-actors-interconnect-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-interconnect-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/large.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/sticking_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut
- TEST_TARGET
- library-cpp-actors-interconnect-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut)
diff --git a/library/cpp/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 7519ee7ba9..0000000000
--- a/library/cpp/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,86 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(lib)
-add_subdirectory(protos)
-
-add_executable(library-cpp-actors-interconnect-ut)
-target_link_libraries(library-cpp-actors-interconnect-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-actors-testlib
- cpp-digest-md5
- cpp-testing-unittest
-)
-target_link_options(library-cpp-actors-interconnect-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-interconnect-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/large.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/sticking_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut
- TEST_TARGET
- library-cpp-actors-interconnect-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut)
diff --git a/library/cpp/actors/interconnect/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/interconnect/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 1d488c4550..0000000000
--- a/library/cpp/actors/interconnect/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,89 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(lib)
-add_subdirectory(protos)
-
-add_executable(library-cpp-actors-interconnect-ut)
-target_link_libraries(library-cpp-actors-interconnect-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-actors-testlib
- cpp-digest-md5
- cpp-testing-unittest
-)
-target_link_options(library-cpp-actors-interconnect-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-interconnect-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/large.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/sticking_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut
- TEST_TARGET
- library-cpp-actors-interconnect-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-interconnect-ut)
diff --git a/library/cpp/actors/interconnect/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/interconnect/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 3ee5b5f656..0000000000
--- a/library/cpp/actors/interconnect/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,91 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(lib)
-add_subdirectory(protos)
-
-add_executable(library-cpp-actors-interconnect-ut)
-target_link_libraries(library-cpp-actors-interconnect-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-actors-testlib
- cpp-digest-md5
- cpp-testing-unittest
-)
-target_link_options(library-cpp-actors-interconnect-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-interconnect-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/large.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/sticking_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut
- TEST_TARGET
- library-cpp-actors-interconnect-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-interconnect-ut)
diff --git a/library/cpp/actors/interconnect/ut/CMakeLists.txt b/library/cpp/actors/interconnect/ut/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/interconnect/ut/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/interconnect/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/interconnect/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index b928771974..0000000000
--- a/library/cpp/actors/interconnect/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(lib)
-add_subdirectory(protos)
-
-add_executable(library-cpp-actors-interconnect-ut)
-target_link_libraries(library-cpp-actors-interconnect-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-actors-testlib
- cpp-digest-md5
- cpp-testing-unittest
-)
-target_sources(library-cpp-actors-interconnect-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/large.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/sticking_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut
- TEST_TARGET
- library-cpp-actors-interconnect-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- PROCESSORS
- 1
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut)
diff --git a/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp b/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
deleted file mode 100644
index 98e81d7781..0000000000
--- a/library/cpp/actors/interconnect/ut/channel_scheduler_ut.cpp
+++ /dev/null
@@ -1,117 +0,0 @@
-#include <library/cpp/actors/interconnect/channel_scheduler.h>
-#include <library/cpp/actors/interconnect/events_local.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NActors;
-
-Y_UNIT_TEST_SUITE(ChannelScheduler) {
-
- Y_UNIT_TEST(PriorityTraffic) {
- auto common = MakeIntrusive<TInterconnectProxyCommon>();
- common->MonCounters = MakeIntrusive<NMonitoring::TDynamicCounters>();
- std::shared_ptr<IInterconnectMetrics> ctr = CreateInterconnectCounters(common);
- ctr->SetPeerInfo("peer", "1");
- auto callback = [](THolder<IEventBase>) {};
- TEventHolderPool pool(common, callback);
- TSessionParams p;
- TChannelScheduler scheduler(1, {}, ctr, pool, 64 << 20, p);
-
- ui32 numEvents = 0;
-
- auto pushEvent = [&](size_t size, int channel) {
- TString payload(size, 'X');
- auto ev = MakeHolder<IEventHandle>(1, 0, TActorId(), TActorId(), MakeIntrusive<TEventSerializedData>(payload, TEventSerializationInfo{}), 0);
- auto& ch = scheduler.GetOutputChannel(channel);
- const bool wasWorking = ch.IsWorking();
- ch.Push(*ev);
- if (!wasWorking) {
- scheduler.AddToHeap(ch, 0);
- }
- ++numEvents;
- };
-
- for (ui32 i = 0; i < 100; ++i) {
- pushEvent(10000, 1);
- }
-
- for (ui32 i = 0; i < 1000; ++i) {
- pushEvent(1000, 2);
- }
-
- std::map<ui16, ui32> run;
- ui32 step = 0;
-
- std::deque<std::map<ui16, ui32>> window;
-
- NInterconnect::TOutgoingStream stream;
-
- for (; numEvents; ++step) {
- TTcpPacketOutTask task(p, stream, stream);
-
- if (step == 100) {
- for (ui32 i = 0; i < 200; ++i) {
- pushEvent(1000, 3);
- }
- }
-
- std::map<ui16, ui32> ch;
-
- while (numEvents) {
- TEventOutputChannel *channel = scheduler.PickChannelWithLeastConsumedWeight();
- ui32 before = task.GetDataSize();
- ui64 weightConsumed = 0;
- numEvents -= channel->FeedBuf(task, 0, &weightConsumed);
- ui32 after = task.GetDataSize();
- Y_ABORT_UNLESS(after >= before);
- scheduler.FinishPick(weightConsumed, 0);
- const ui32 bytesAdded = after - before;
- if (!bytesAdded) {
- break;
- }
- ch[channel->ChannelId] += bytesAdded;
- }
-
- scheduler.Equalize();
-
- for (const auto& [key, value] : ch) {
- run[key] += value;
- }
- window.push_back(ch);
-
- if (window.size() == 32) {
- for (const auto& [key, value] : window.front()) {
- run[key] -= value;
- if (!run[key]) {
- run.erase(key);
- }
- }
- window.pop_front();
- }
-
- double mean = 0.0;
- for (const auto& [key, value] : run) {
- mean += value;
- }
- mean /= run.size();
-
- double dev = 0.0;
- for (const auto& [key, value] : run) {
- dev += (value - mean) * (value - mean);
- }
- dev = sqrt(dev / run.size());
-
- double devToMean = dev / mean;
-
- Cerr << step << ": ";
- for (const auto& [key, value] : run) {
- Cerr << "ch" << key << "=" << value << " ";
- }
- Cerr << "mean# " << mean << " dev# " << dev << " part# " << devToMean;
-
- Cerr << Endl;
-
- UNIT_ASSERT(devToMean < 1);
- }
- }
-
-}
diff --git a/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp b/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
deleted file mode 100644
index 4cc6a3cc99..0000000000
--- a/library/cpp/actors/interconnect/ut/dynamic_proxy_ut.cpp
+++ /dev/null
@@ -1,179 +0,0 @@
-#include <library/cpp/actors/interconnect/ut/lib/node.h>
-#include <library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-TActorId MakeResponderServiceId(ui32 nodeId) {
- return TActorId(nodeId, TStringBuf("ResponderAct", 12));
-}
-
-class TArriveQueue {
- struct TArrivedItem {
- ui32 QueueId;
- ui32 Index;
- bool Success;
- };
-
- TMutex Lock;
- std::size_t Counter = 0;
- std::vector<TArrivedItem> Items;
-
-public:
- TArriveQueue(size_t capacity)
- : Items(capacity)
- {}
-
- bool Done() const {
- with_lock (Lock) {
- return Counter == Items.size();
- }
- }
-
- void Push(ui64 cookie, bool success) {
- with_lock (Lock) {
- const size_t pos = Counter++;
- TArrivedItem item{.QueueId = static_cast<ui32>(cookie >> 32), .Index = static_cast<ui32>(cookie & 0xffff'ffff),
- .Success = success};
- memcpy(&Items[pos], &item, sizeof(TArrivedItem));
- }
- }
-
- void Check() {
- struct TPerQueueState {
- std::vector<ui32> Ok, Error;
- };
- std::unordered_map<ui32, TPerQueueState> state;
- for (const TArrivedItem& item : Items) {
- auto& st = state[item.QueueId];
- auto& v = item.Success ? st.Ok : st.Error;
- v.push_back(item.Index);
- }
- for (const auto& [queueId, st] : state) {
- ui32 expected = 0;
- for (const ui32 index : st.Ok) {
- Y_ABORT_UNLESS(index == expected);
- ++expected;
- }
- for (const ui32 index : st.Error) {
- Y_ABORT_UNLESS(index == expected);
- ++expected;
- }
- if (st.Error.size()) {
- Cerr << "Error.size# " << st.Error.size() << Endl;
- }
- }
- }
-};
-
-class TResponder : public TActor<TResponder> {
- TArriveQueue& ArriveQueue;
-
-public:
- TResponder(TArriveQueue& arriveQueue)
- : TActor(&TResponder::StateFunc)
- , ArriveQueue(arriveQueue)
- {}
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvents::TEvPing, Handle);
- )
-
- void Handle(TEvents::TEvPing::TPtr ev) {
- ArriveQueue.Push(ev->Cookie, true);
- }
-};
-
-class TSender : public TActor<TSender> {
- TArriveQueue& ArriveQueue;
-
-public:
- TSender(TArriveQueue& arriveQueue)
- : TActor(&TThis::StateFunc)
- , ArriveQueue(arriveQueue)
- {}
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvents::TEvUndelivered, Handle);
- )
-
- void Handle(TEvents::TEvUndelivered::TPtr ev) {
- ArriveQueue.Push(ev->Cookie, false);
- }
-};
-
-void SenderThread(TMutex& lock, TActorSystem *as, ui32 nodeId, ui32 queueId, ui32 count, TArriveQueue& arriveQueue) {
- const TActorId sender = as->Register(new TSender(arriveQueue));
- with_lock(lock) {}
- const TActorId target = MakeResponderServiceId(nodeId);
- for (ui32 i = 0; i < count; ++i) {
- const ui32 flags = IEventHandle::FlagTrackDelivery;
- as->Send(new IEventHandle(TEvents::THelloWorld::Ping, flags, target, sender, nullptr, ((ui64)queueId << 32) | i));
- }
-}
-
-void RaceTestIter(ui32 numThreads, ui32 count) {
- TPortManager portman;
- THashMap<ui32, ui16> nodeToPort;
- const ui32 numNodes = 6; // total
- const ui32 numDynamicNodes = 3;
- for (ui32 i = 1; i <= numNodes; ++i) {
- nodeToPort.emplace(i, portman.GetPort());
- }
-
- NMonitoring::TDynamicCounterPtr counters = new NMonitoring::TDynamicCounters;
- std::list<TNode> nodes;
- for (ui32 i = 1; i <= numNodes; ++i) {
- nodes.emplace_back(i, numNodes, nodeToPort, "127.1.0.0", counters->GetSubgroup("nodeId", TStringBuilder() << i),
- TDuration::Seconds(10), TChannelsConfig(), numDynamicNodes, numThreads);
- }
-
- const ui32 numSenders = 10;
- TArriveQueue arriveQueue(numSenders * numNodes * (numNodes - 1) * count);
- for (TNode& node : nodes) {
- node.RegisterServiceActor(MakeResponderServiceId(node.GetActorSystem()->NodeId), new TResponder(arriveQueue));
- }
-
- TMutex lock;
- std::list<TThread> threads;
- ui32 queueId = 0;
- with_lock(lock) {
- for (TNode& from : nodes) {
- for (ui32 toId = 1; toId <= numNodes; ++toId) {
- if (toId == from.GetActorSystem()->NodeId) {
- continue;
- }
- for (ui32 i = 0; i < numSenders; ++i) {
- threads.emplace_back([=, &lock, &from, &arriveQueue] {
- SenderThread(lock, from.GetActorSystem(), toId, queueId, count, arriveQueue);
- });
- ++queueId;
- }
- }
- }
- for (auto& thread : threads) {
- thread.Start();
- }
- }
- for (auto& thread : threads) {
- thread.Join();
- }
-
- for (THPTimer timer; !arriveQueue.Done(); TDuration::MilliSeconds(10)) {
- Y_ABORT_UNLESS(timer.Passed() < 10);
- }
-
- nodes.clear();
- arriveQueue.Check();
-}
-
-Y_UNIT_TEST_SUITE(DynamicProxy) {
- Y_UNIT_TEST(RaceCheck1) {
- for (ui32 iteration = 0; iteration < 100; ++iteration) {
- RaceTestIter(1 + iteration % 5, 1);
- }
- }
- Y_UNIT_TEST(RaceCheck10) {
- for (ui32 iteration = 0; iteration < 100; ++iteration) {
- RaceTestIter(1 + iteration % 5, 10);
- }
- }
-}
diff --git a/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp b/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
deleted file mode 100644
index 7b45793e26..0000000000
--- a/library/cpp/actors/interconnect/ut/event_holder_pool_ut.cpp
+++ /dev/null
@@ -1,59 +0,0 @@
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/event_local.h>
-#include <library/cpp/actors/interconnect/interconnect_common.h>
-#include <library/cpp/monlib/dynamic_counters/counters.h>
-#include <library/cpp/actors/interconnect/event_holder_pool.h>
-
-#include <atomic>
-
-using namespace NActors;
-
-template<typename T>
-TEventHolderPool Setup(T&& callback) {
- auto common = MakeIntrusive<TInterconnectProxyCommon>();
- common->DestructorQueueSize = std::make_shared<std::atomic<TAtomicBase>>();
- common->MaxDestructorQueueSize = 1024 * 1024;
- return TEventHolderPool(common, callback);
-}
-
-Y_UNIT_TEST_SUITE(EventHolderPool) {
-
- Y_UNIT_TEST(Overflow) {
- TDeque<THolder<IEventBase>> freeQ;
- auto callback = [&](THolder<IEventBase> event) {
- freeQ.push_back(std::move(event));
- };
- auto pool = Setup(std::move(callback));
-
- std::list<TEventHolder> q;
-
- auto& ev1 = pool.Allocate(q);
- ev1.Buffer = MakeIntrusive<TEventSerializedData>(TString::Uninitialized(512 * 1024), TEventSerializationInfo{});
-
- auto& ev2 = pool.Allocate(q);
- ev2.Buffer = MakeIntrusive<TEventSerializedData>(TString::Uninitialized(512 * 1024), TEventSerializationInfo{});
-
- auto& ev3 = pool.Allocate(q);
- ev3.Buffer = MakeIntrusive<TEventSerializedData>(TString::Uninitialized(512 * 1024), TEventSerializationInfo{});
-
- auto& ev4 = pool.Allocate(q);
- ev4.Buffer = MakeIntrusive<TEventSerializedData>(TString::Uninitialized(512 * 1024), TEventSerializationInfo{});
-
- pool.Release(q, q.begin());
- pool.Release(q, q.begin());
- pool.Trim();
- UNIT_ASSERT_VALUES_EQUAL(freeQ.size(), 1);
-
- pool.Release(q, q.begin());
- UNIT_ASSERT_VALUES_EQUAL(freeQ.size(), 1);
-
- freeQ.clear();
- pool.Release(q, q.begin());
- pool.Trim();
- UNIT_ASSERT_VALUES_EQUAL(freeQ.size(), 1);
-
- freeQ.clear(); // if we don't this, we may probablty crash due to the order of object destruction
- }
-
-}
diff --git a/library/cpp/actors/interconnect/ut/interconnect_ut.cpp b/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
deleted file mode 100644
index bc9e86545a..0000000000
--- a/library/cpp/actors/interconnect/ut/interconnect_ut.cpp
+++ /dev/null
@@ -1,177 +0,0 @@
-#include <library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <library/cpp/digest/md5/md5.h>
-#include <util/random/fast.h>
-
-using namespace NActors;
-
-class TSenderActor : public TActorBootstrapped<TSenderActor> {
- const TActorId Recipient;
- using TSessionToCookie = std::unordered_multimap<TActorId, ui64, THash<TActorId>>;
- TSessionToCookie SessionToCookie;
- std::unordered_map<ui64, std::pair<TSessionToCookie::iterator, TString>> InFlight;
- std::unordered_map<ui64, TString> Tentative;
- ui64 NextCookie = 0;
- TActorId SessionId;
- bool SubscribeInFlight = false;
-
-public:
- TSenderActor(TActorId recipient)
- : Recipient(recipient)
- {}
-
- void Bootstrap() {
- Become(&TThis::StateFunc);
- Subscribe();
- }
-
- void Subscribe() {
- Cerr << (TStringBuilder() << "Subscribe" << Endl);
- Y_ABORT_UNLESS(!SubscribeInFlight);
- SubscribeInFlight = true;
- Send(TActivationContext::InterconnectProxy(Recipient.NodeId()), new TEvents::TEvSubscribe);
- }
-
- void IssueQueries() {
- if (!SessionId) {
- return;
- }
- while (InFlight.size() < 10) {
- size_t len = RandomNumber<size_t>(65536) + 1;
- TString data = TString::Uninitialized(len);
- TReallyFastRng32 rng(RandomNumber<ui32>());
- char *p = data.Detach();
- for (size_t i = 0; i < len; ++i) {
- p[i] = rng();
- }
- const TSessionToCookie::iterator s2cIt = SessionToCookie.emplace(SessionId, NextCookie);
- InFlight.emplace(NextCookie, std::make_tuple(s2cIt, MD5::CalcRaw(data)));
- TActivationContext::Send(new IEventHandle(TEvents::THelloWorld::Ping, IEventHandle::FlagTrackDelivery, Recipient,
- SelfId(), MakeIntrusive<TEventSerializedData>(std::move(data), TEventSerializationInfo{}), NextCookie));
-// Cerr << (TStringBuilder() << "Send# " << NextCookie << Endl);
- ++NextCookie;
- }
- }
-
- void HandlePong(TAutoPtr<IEventHandle> ev) {
-// Cerr << (TStringBuilder() << "Receive# " << ev->Cookie << Endl);
- if (const auto it = InFlight.find(ev->Cookie); it != InFlight.end()) {
- auto& [s2cIt, hash] = it->second;
- Y_ABORT_UNLESS(hash == ev->GetChainBuffer()->GetString());
- SessionToCookie.erase(s2cIt);
- InFlight.erase(it);
- } else if (const auto it = Tentative.find(ev->Cookie); it != Tentative.end()) {
- Y_ABORT_UNLESS(it->second == ev->GetChainBuffer()->GetString());
- Tentative.erase(it);
- } else {
- Y_ABORT("Cookie# %" PRIu64, ev->Cookie);
- }
- IssueQueries();
- }
-
- void Handle(TEvInterconnect::TEvNodeConnected::TPtr ev) {
- Cerr << (TStringBuilder() << "TEvNodeConnected" << Endl);
- Y_ABORT_UNLESS(SubscribeInFlight);
- SubscribeInFlight = false;
- Y_ABORT_UNLESS(!SessionId);
- SessionId = ev->Sender;
- IssueQueries();
- }
-
- void Handle(TEvInterconnect::TEvNodeDisconnected::TPtr ev) {
- Cerr << (TStringBuilder() << "TEvNodeDisconnected" << Endl);
- SubscribeInFlight = false;
- if (SessionId) {
- Y_ABORT_UNLESS(SessionId == ev->Sender);
- auto r = SessionToCookie.equal_range(SessionId);
- for (auto it = r.first; it != r.second; ++it) {
- const auto inFlightIt = InFlight.find(it->second);
- Y_ABORT_UNLESS(inFlightIt != InFlight.end());
- Tentative.emplace(inFlightIt->first, inFlightIt->second.second);
- InFlight.erase(it->second);
- }
- SessionToCookie.erase(r.first, r.second);
- SessionId = TActorId();
- }
- Schedule(TDuration::MilliSeconds(100), new TEvents::TEvWakeup);
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr ev) {
- Cerr << (TStringBuilder() << "TEvUndelivered Cookie# " << ev->Cookie << Endl);
- if (const auto it = InFlight.find(ev->Cookie); it != InFlight.end()) {
- auto& [s2cIt, hash] = it->second;
- Tentative.emplace(it->first, hash);
- SessionToCookie.erase(s2cIt);
- InFlight.erase(it);
- IssueQueries();
- }
- }
-
- STRICT_STFUNC(StateFunc,
- fFunc(TEvents::THelloWorld::Pong, HandlePong);
- hFunc(TEvInterconnect::TEvNodeConnected, Handle);
- hFunc(TEvInterconnect::TEvNodeDisconnected, Handle);
- hFunc(TEvents::TEvUndelivered, Handle);
- cFunc(TEvents::TSystem::Wakeup, Subscribe);
- )
-};
-
-class TRecipientActor : public TActor<TRecipientActor> {
-public:
- TRecipientActor()
- : TActor(&TThis::StateFunc)
- {}
-
- void HandlePing(TAutoPtr<IEventHandle>& ev) {
- const TString& data = ev->GetChainBuffer()->GetString();
- const TString& response = MD5::CalcRaw(data);
- TActivationContext::Send(new IEventHandle(TEvents::THelloWorld::Pong, 0, ev->Sender, SelfId(),
- MakeIntrusive<TEventSerializedData>(response, TEventSerializationInfo{}), ev->Cookie));
- }
-
- STRICT_STFUNC(StateFunc,
- fFunc(TEvents::THelloWorld::Ping, HandlePing);
- )
-};
-
-Y_UNIT_TEST_SUITE(Interconnect) {
-
- Y_UNIT_TEST(SessionContinuation) {
- TTestICCluster cluster(2);
- const TActorId recipient = cluster.RegisterActor(new TRecipientActor, 1);
- cluster.RegisterActor(new TSenderActor(recipient), 2);
- for (ui32 i = 0; i < 100; ++i) {
- const ui32 nodeId = 1 + RandomNumber(2u);
- const ui32 peerNodeId = 3 - nodeId;
- const ui32 action = RandomNumber(3u);
- auto *node = cluster.GetNode(nodeId);
- TActorId proxyId = node->InterconnectProxy(peerNodeId);
-
- switch (action) {
- case 0:
- node->Send(proxyId, new TEvInterconnect::TEvClosePeerSocket);
- Cerr << (TStringBuilder() << "nodeId# " << nodeId << " peerNodeId# " << peerNodeId
- << " TEvClosePeerSocket" << Endl);
- break;
-
- case 1:
- node->Send(proxyId, new TEvInterconnect::TEvCloseInputSession);
- Cerr << (TStringBuilder() << "nodeId# " << nodeId << " peerNodeId# " << peerNodeId
- << " TEvCloseInputSession" << Endl);
- break;
-
- case 2:
- node->Send(proxyId, new TEvInterconnect::TEvPoisonSession);
- Cerr << (TStringBuilder() << "nodeId# " << nodeId << " peerNodeId# " << peerNodeId
- << " TEvPoisonSession" << Endl);
- break;
-
- default:
- Y_ABORT();
- }
-
- Sleep(TDuration::MilliSeconds(RandomNumber<ui32>(500) + 100));
- }
- }
-
-}
diff --git a/library/cpp/actors/interconnect/ut/large.cpp b/library/cpp/actors/interconnect/ut/large.cpp
deleted file mode 100644
index 88207f816b..0000000000
--- a/library/cpp/actors/interconnect/ut/large.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-#include "lib/ic_test_cluster.h"
-#include "lib/test_events.h"
-#include "lib/test_actors.h"
-
-#include <library/cpp/actors/interconnect/interconnect_tcp_proxy.h>
-
-#include <library/cpp/testing/unittest/tests_data.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/system/event.h>
-#include <util/system/sanitizers.h>
-
-Y_UNIT_TEST_SUITE(LargeMessage) {
- using namespace NActors;
-
- class TProducer: public TActorBootstrapped<TProducer> {
- const TActorId RecipientActorId;
-
- public:
- TProducer(const TActorId& recipientActorId)
- : RecipientActorId(recipientActorId)
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TThis::StateFunc);
- ctx.Send(RecipientActorId, new TEvTest(1, "hello"), IEventHandle::FlagTrackDelivery, 1);
- ctx.Send(RecipientActorId, new TEvTest(2, TString(150 * 1024 * 1024, 'X')), IEventHandle::FlagTrackDelivery, 2);
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr ev, const TActorContext& ctx) {
- if (ev->Cookie == 2) {
- Cerr << "TEvUndelivered\n";
- ctx.Send(RecipientActorId, new TEvTest(3, "hello"), IEventHandle::FlagTrackDelivery, 3);
- }
- }
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvents::TEvUndelivered, Handle)
- )
- };
-
- class TConsumer : public TActorBootstrapped<TConsumer> {
- TManualEvent& Done;
- TActorId SessionId;
-
- public:
- TConsumer(TManualEvent& done)
- : Done(done)
- {
- }
-
- void Bootstrap(const TActorContext& /*ctx*/) {
- Become(&TThis::StateFunc);
- }
-
- void Handle(TEvTest::TPtr ev, const TActorContext& /*ctx*/) {
- const auto& record = ev->Get()->Record;
- Cerr << "RECEIVED TEvTest\n";
- if (record.GetSequenceNumber() == 1) {
- Y_ABORT_UNLESS(!SessionId);
- SessionId = ev->InterconnectSession;
- } else if (record.GetSequenceNumber() == 3) {
- Y_ABORT_UNLESS(SessionId != ev->InterconnectSession);
- Done.Signal();
- } else {
- Y_ABORT("incorrect sequence number");
- }
- }
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvTest, Handle)
- )
- };
-
- Y_UNIT_TEST(Test) {
- TTestICCluster testCluster(2);
-
- TManualEvent done;
- TConsumer* consumer = new TConsumer(done);
- const TActorId recp = testCluster.RegisterActor(consumer, 1);
- testCluster.RegisterActor(new TProducer(recp), 2);
- done.WaitI();
- }
-
-}
diff --git a/library/cpp/actors/interconnect/ut/lib/CMakeLists.txt b/library/cpp/actors/interconnect/ut/lib/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h b/library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h
deleted file mode 100644
index dd2557e25e..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h
+++ /dev/null
@@ -1,87 +0,0 @@
-#pragma once
-
-#include "node.h"
-#include "interrupter.h"
-
-#include <library/cpp/actors/interconnect/interconnect_tcp_proxy.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/testing/unittest/tests_data.h>
-
-#include <util/generic/noncopyable.h>
-
-class TTestICCluster: public TNonCopyable {
-public:
- struct TTrafficInterrupterSettings {
- TDuration RejectingTrafficTimeout;
- double BandWidth;
- bool Disconnect;
- };
-
-private:
- const ui32 NumNodes;
- const TString Address = "::1";
- TDuration DeadPeerTimeout = TDuration::Seconds(2);
- NMonitoring::TDynamicCounterPtr Counters;
- THashMap<ui32, THolder<TNode>> Nodes;
- TList<TTrafficInterrupter> interrupters;
- NActors::TChannelsConfig ChannelsConfig;
- TPortManager PortManager;
- TIntrusivePtr<NLog::TSettings> LoggerSettings;
-
-public:
- TTestICCluster(ui32 numNodes = 1, NActors::TChannelsConfig channelsConfig = NActors::TChannelsConfig(),
- TTrafficInterrupterSettings* tiSettings = nullptr, TIntrusivePtr<NLog::TSettings> loggerSettings = nullptr)
- : NumNodes(numNodes)
- , Counters(new NMonitoring::TDynamicCounters)
- , ChannelsConfig(channelsConfig)
- , LoggerSettings(loggerSettings)
- {
- THashMap<ui32, ui16> nodeToPortMap;
- THashMap<ui32, THashMap<ui32, ui16>> specificNodePortMap;
-
- for (ui32 i = 1; i <= NumNodes; ++i) {
- nodeToPortMap.emplace(i, PortManager.GetPort());
- }
-
- if (tiSettings) {
- ui32 nodeId;
- ui16 listenPort;
- ui16 forwardPort;
- for (auto& item : nodeToPortMap) {
- nodeId = item.first;
- listenPort = item.second;
- forwardPort = PortManager.GetPort();
-
- specificNodePortMap[nodeId] = nodeToPortMap;
- specificNodePortMap[nodeId].at(nodeId) = forwardPort;
- interrupters.emplace_back(Address, listenPort, forwardPort, tiSettings->RejectingTrafficTimeout, tiSettings->BandWidth, tiSettings->Disconnect);
- interrupters.back().Start();
- }
- }
-
- for (ui32 i = 1; i <= NumNodes; ++i) {
- auto& portMap = tiSettings ? specificNodePortMap[i] : nodeToPortMap;
- Nodes.emplace(i, MakeHolder<TNode>(i, NumNodes, portMap, Address, Counters, DeadPeerTimeout, ChannelsConfig,
- /*numDynamicNodes=*/0, /*numThreads=*/1, LoggerSettings));
- }
- }
-
- TNode* GetNode(ui32 id) {
- return Nodes[id].Get();
- }
-
- ~TTestICCluster() {
- }
-
- TActorId RegisterActor(NActors::IActor* actor, ui32 nodeId) {
- return Nodes[nodeId]->RegisterActor(actor);
- }
-
- TActorId InterconnectProxy(ui32 peerNodeId, ui32 nodeId) {
- return Nodes[nodeId]->InterconnectProxy(peerNodeId);
- }
-
- void KillActor(ui32 nodeId, const TActorId& id) {
- Nodes[nodeId]->Send(id, new NActors::TEvents::TEvPoisonPill);
- }
-};
diff --git a/library/cpp/actors/interconnect/ut/lib/interrupter.h b/library/cpp/actors/interconnect/ut/lib/interrupter.h
deleted file mode 100644
index b00985573a..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/interrupter.h
+++ /dev/null
@@ -1,249 +0,0 @@
-#pragma once
-
-#include <library/cpp/testing/unittest/tests_data.h>
-
-#include <util/network/sock.h>
-#include <util/network/poller.h>
-#include <util/system/thread.h>
-#include <util/system/hp_timer.h>
-#include <util/generic/list.h>
-#include <util/generic/set.h>
-#include <util/generic/vector.h>
-#include <util/generic/deque.h>
-#include <util/random/random.h>
-
-#include <iterator>
-
-class TTrafficInterrupter
- : public ISimpleThread {
- const TString Address;
- const ui16 ForwardPort;
- TInet6StreamSocket ListenSocket;
-
- struct TConnectionDescriptor;
- struct TDelayedPacket {
- TInet6StreamSocket* ForwardSocket = nullptr;
- TVector<char> Data;
- };
- struct TCompare {
- bool operator()(const std::pair<TInstant, TDelayedPacket>& x, const std::pair<TInstant, TDelayedPacket>& y) const {
- return x.first > y.first;
- };
- };
-
- struct TDirectedConnection {
- TInet6StreamSocket* Source = nullptr;
- TInet6StreamSocket* Destination = nullptr;
- TList<TConnectionDescriptor>::iterator ListIterator;
- TInstant Timestamp;
- TPriorityQueue<std::pair<TInstant, TDelayedPacket>, TVector<std::pair<TInstant, TDelayedPacket>>, TCompare> DelayedQueue;
-
- TDirectedConnection(TInet6StreamSocket* source, TInet6StreamSocket* destination)
- : Source(source)
- , Destination(destination)
- {
- }
- };
-
- struct TConnectionDescriptor {
- std::unique_ptr<TInet6StreamSocket> FirstSocket;
- std::unique_ptr<TInet6StreamSocket> SecondSocket;
- TDirectedConnection ForwardConnection;
- TDirectedConnection BackwardConnection;
-
- TConnectionDescriptor(std::unique_ptr<TInet6StreamSocket> firstSock,
- std::unique_ptr<TInet6StreamSocket> secondSock)
- : FirstSocket(std::move(firstSock))
- , SecondSocket(std::move(secondSock))
- , ForwardConnection(FirstSocket.get(), SecondSocket.get())
- , BackwardConnection(SecondSocket.get(), FirstSocket.get())
- {
- }
- };
-
- template <class It = TList<TConnectionDescriptor>::iterator>
- class TCustomListIteratorCompare {
- public:
- bool operator()(const It& it1, const It& it2) const {
- return (&(*it1) < &(*it2));
- }
- };
-
- TList<TConnectionDescriptor> Connections;
- TSet<TList<TConnectionDescriptor>::iterator, TCustomListIteratorCompare<>> DroppedConnections;
-
-public:
- TTrafficInterrupter(TString address, ui16 listenPort, ui16 forwardPort, TDuration rejectingTrafficTimeout, double bandwidth, bool disconnect = true)
- : Address(std::move(address))
- , ForwardPort(forwardPort)
- , ListenSocket()
- , RejectingTrafficTimeout(rejectingTrafficTimeout)
- , CurrentRejectingTimeout(rejectingTrafficTimeout)
- , RejectingStateTimer()
- , Bandwidth(bandwidth)
- , Disconnect(disconnect)
- , RejectingTraffic(false)
- {
- SetReuseAddressAndPort(ListenSocket);
- TSockAddrInet6 addr(Address.data(), listenPort);
- Y_ABORT_UNLESS(ListenSocket.Bind(&addr) == 0);
- Y_ABORT_UNLESS(ListenSocket.Listen(5) == 0);
-
- DelayTraffic = (Bandwidth == 0.0) ? false : true;
-
- ForwardAddrress.Reset(new TSockAddrInet6(Address.data(), ForwardPort));
- const ui32 BufSize = DelayTraffic ? 4096 : 65536 + 4096;
- Buf.resize(BufSize);
- }
-
- ~TTrafficInterrupter() {
- AtomicSet(Running, 0);
- this->Join();
- }
-
-private:
- TAtomic Running = 1;
- TVector<char> Buf;
- TSocketPoller SocketPoller;
- THolder<TSockAddrInet6> ForwardAddrress;
- TVector<void*> Events;
- TDuration RejectingTrafficTimeout;
- TDuration CurrentRejectingTimeout;
- TDuration DefaultPollTimeout = TDuration::MilliSeconds(100);
- TDuration DisconnectTimeout = TDuration::MilliSeconds(100);
- THPTimer RejectingStateTimer;
- THPTimer DisconnectTimer;
- double Bandwidth;
- const bool Disconnect;
- bool RejectingTraffic;
- bool DelayTraffic;
-
- void UpdateRejectingState() {
- if (TDuration::Seconds(std::abs(RejectingStateTimer.Passed())) > CurrentRejectingTimeout) {
- RejectingStateTimer.Reset();
- CurrentRejectingTimeout = (RandomNumber<ui32>(1) ? RejectingTrafficTimeout + TDuration::Seconds(1.0) : RejectingTrafficTimeout - TDuration::Seconds(0.2));
- RejectingTraffic = !RejectingTraffic;
- }
- }
-
- void RandomlyDisconnect() {
- if (TDuration::Seconds(std::abs(DisconnectTimer.Passed())) > DisconnectTimeout) {
- DisconnectTimer.Reset();
- if (RandomNumber<ui32>(100) > 90) {
- if (!Connections.empty()) {
- auto it = Connections.begin();
- std::advance(it, RandomNumber<ui32>(Connections.size()));
- SocketPoller.Unwait(static_cast<SOCKET>(*it->FirstSocket.get()));
- SocketPoller.Unwait(static_cast<SOCKET>(*it->SecondSocket.get()));
- Connections.erase(it);
- }
- }
- }
- }
-
- void* ThreadProc() override {
- int pollReadyCount = 0;
- SocketPoller.WaitRead(static_cast<SOCKET>(ListenSocket), &ListenSocket);
- Events.resize(10);
-
- while (AtomicGet(Running)) {
- if (RejectingTrafficTimeout != TDuration::Zero()) {
- UpdateRejectingState();
- }
- if (Disconnect) {
- RandomlyDisconnect();
- }
- if (!RejectingTraffic) {
- TDuration timeout = DefaultPollTimeout;
- auto updateTimout = [&timeout](TDirectedConnection& conn) {
- if (conn.DelayedQueue) {
- timeout = Min(timeout, conn.DelayedQueue.top().first - TInstant::Now());
- }
- };
- for (auto& it : Connections) {
- updateTimout(it.ForwardConnection);
- updateTimout(it.BackwardConnection);
- }
- pollReadyCount = SocketPoller.WaitT(Events.data(), Events.size(), timeout);
- if (pollReadyCount > 0) {
- for (int i = 0; i < pollReadyCount; i++) {
- HandleSocketPollEvent(Events[i]);
- }
- for (auto it : DroppedConnections) {
- Connections.erase(it);
- }
- DroppedConnections.clear();
- }
- }
- if (DelayTraffic) { // process packets from DelayQueues
- auto processDelayedPackages = [](TDirectedConnection& conn) {
- while (!conn.DelayedQueue.empty()) {
- auto& frontPackage = conn.DelayedQueue.top();
- if (TInstant::Now() >= frontPackage.first) {
- TInet6StreamSocket* sock = frontPackage.second.ForwardSocket;
- if (sock) {
- sock->Send(frontPackage.second.Data.data(), frontPackage.second.Data.size());
- }
- conn.DelayedQueue.pop();
- } else {
- break;
- }
- }
- };
- for (auto& it : Connections) {
- processDelayedPackages(it.ForwardConnection);
- processDelayedPackages(it.BackwardConnection);
- }
- }
- }
- ListenSocket.Close();
- return nullptr;
- }
-
- void HandleSocketPollEvent(void* ev) {
- if (ev == static_cast<void*>(&ListenSocket)) {
- TSockAddrInet6 origin;
- Connections.emplace_back(TConnectionDescriptor(std::unique_ptr<TInet6StreamSocket>(new TInet6StreamSocket), std::unique_ptr<TInet6StreamSocket>(new TInet6StreamSocket)));
- int err = ListenSocket.Accept(Connections.back().FirstSocket.get(), &origin);
- if (!err) {
- err = Connections.back().SecondSocket->Connect(ForwardAddrress.Get());
- if (!err) {
- Connections.back().ForwardConnection.ListIterator = --Connections.end();
- Connections.back().BackwardConnection.ListIterator = --Connections.end();
- SocketPoller.WaitRead(static_cast<SOCKET>(*Connections.back().FirstSocket), &Connections.back().ForwardConnection);
- SocketPoller.WaitRead(static_cast<SOCKET>(*Connections.back().SecondSocket), &Connections.back().BackwardConnection);
- } else {
- Connections.back().FirstSocket->Close();
- }
- } else {
- Connections.pop_back();
- }
- } else {
- TDirectedConnection* directedConnection = static_cast<TDirectedConnection*>(ev);
- int recvSize = 0;
- do {
- recvSize = directedConnection->Source->Recv(Buf.data(), Buf.size());
- } while (recvSize == -EINTR);
-
- if (recvSize > 0) {
- if (DelayTraffic) {
- // put packet into DelayQueue
- const TDuration baseDelay = TDuration::MicroSeconds(recvSize * 1e6 / Bandwidth);
- const TInstant now = TInstant::Now();
- directedConnection->Timestamp = Max(now, directedConnection->Timestamp) + baseDelay;
- TDelayedPacket pkt;
- pkt.ForwardSocket = directedConnection->Destination;
- pkt.Data.resize(recvSize);
- memcpy(pkt.Data.data(), Buf.data(), recvSize);
- directedConnection->DelayedQueue.emplace(directedConnection->Timestamp, std::move(pkt));
- } else {
- directedConnection->Destination->Send(Buf.data(), recvSize);
- }
- } else {
- SocketPoller.Unwait(static_cast<SOCKET>(*directedConnection->Source));
- SocketPoller.Unwait(static_cast<SOCKET>(*directedConnection->Destination));
- DroppedConnections.emplace(directedConnection->ListIterator);
- }
- }
- }
-};
diff --git a/library/cpp/actors/interconnect/ut/lib/node.h b/library/cpp/actors/interconnect/ut/lib/node.h
deleted file mode 100644
index e63a95c31b..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/node.h
+++ /dev/null
@@ -1,149 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/executor_pool_io.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/core/mailbox.h>
-#include <library/cpp/actors/dnsresolver/dnsresolver.h>
-
-#include <library/cpp/actors/interconnect/handshake_broker.h>
-#include <library/cpp/actors/interconnect/interconnect_tcp_server.h>
-#include <library/cpp/actors/interconnect/interconnect_tcp_proxy.h>
-#include <library/cpp/actors/interconnect/interconnect_proxy_wrapper.h>
-
-using namespace NActors;
-
-class TNode {
- THolder<TActorSystem> ActorSystem;
-
-public:
- TNode(ui32 nodeId, ui32 numNodes, const THashMap<ui32, ui16>& nodeToPort, const TString& address,
- NMonitoring::TDynamicCounterPtr counters, TDuration deadPeerTimeout,
- TChannelsConfig channelsSettings = TChannelsConfig(),
- ui32 numDynamicNodes = 0, ui32 numThreads = 1,
- TIntrusivePtr<NLog::TSettings> loggerSettings = nullptr, ui32 inflight = 512 * 1024) {
- TActorSystemSetup setup;
- setup.NodeId = nodeId;
- setup.ExecutorsCount = 2;
- setup.Executors.Reset(new TAutoPtr<IExecutorPool>[setup.ExecutorsCount]);
- setup.Executors[0].Reset(new TBasicExecutorPool(0, numThreads, 20 /* magic number */));
- setup.Executors[1].Reset(new TIOExecutorPool(1, 1));
- setup.Scheduler.Reset(new TBasicSchedulerThread());
- const ui32 interconnectPoolId = 0;
-
- auto common = MakeIntrusive<TInterconnectProxyCommon>();
- common->NameserviceId = GetNameserviceActorId();
- common->MonCounters = counters->GetSubgroup("nodeId", ToString(nodeId));
- common->ChannelsConfig = channelsSettings;
- common->ClusterUUID = "cluster";
- common->AcceptUUID = {common->ClusterUUID};
- common->TechnicalSelfHostName = address;
- common->Settings.Handshake = TDuration::Seconds(1);
- common->Settings.DeadPeer = deadPeerTimeout;
- common->Settings.CloseOnIdle = TDuration::Minutes(1);
- common->Settings.SendBufferDieLimitInMB = 512;
- common->Settings.TotalInflightAmountOfData = inflight;
- common->Settings.TCPSocketBufferSize = 2048 * 1024;
- common->OutgoingHandshakeInflightLimit = 3;
-
- setup.Interconnect.ProxyActors.resize(numNodes + 1 - numDynamicNodes);
- setup.Interconnect.ProxyWrapperFactory = CreateProxyWrapperFactory(common, interconnectPoolId);
-
- for (ui32 i = 1; i <= numNodes; ++i) {
- if (i == nodeId) {
- // create listener actor for local node "nodeId"
- setup.LocalServices.emplace_back(TActorId(), TActorSetupCmd(new TInterconnectListenerTCP(address,
- nodeToPort.at(nodeId), common), TMailboxType::ReadAsFilled, interconnectPoolId));
- } else if (i <= numNodes - numDynamicNodes) {
- // create proxy actor to reach node "i"
- setup.Interconnect.ProxyActors[i] = {new TInterconnectProxyTCP(i, common),
- TMailboxType::ReadAsFilled, interconnectPoolId};
- }
- }
-
- setup.LocalServices.emplace_back(MakePollerActorId(), TActorSetupCmd(CreatePollerActor(),
- TMailboxType::ReadAsFilled, 0));
-
- const TActorId loggerActorId = loggerSettings ? loggerSettings->LoggerActorId : TActorId(0, "logger");
-
- if (!loggerSettings) {
- constexpr ui32 LoggerComponentId = NActorsServices::LOGGER;
- loggerSettings = MakeIntrusive<NLog::TSettings>(
- loggerActorId,
- (NLog::EComponent)LoggerComponentId,
- NLog::PRI_INFO,
- NLog::PRI_DEBUG,
- 0U);
-
- loggerSettings->Append(
- NActorsServices::EServiceCommon_MIN,
- NActorsServices::EServiceCommon_MAX,
- NActorsServices::EServiceCommon_Name
- );
-
- constexpr ui32 WilsonComponentId = 430; // NKikimrServices::WILSON
- static const TString WilsonComponentName = "WILSON";
-
- loggerSettings->Append(
- (NLog::EComponent)WilsonComponentId,
- (NLog::EComponent)WilsonComponentId + 1,
- [](NLog::EComponent) -> const TString & { return WilsonComponentName; });
- }
-
- // register nameserver table
- auto names = MakeIntrusive<TTableNameserverSetup>();
- for (ui32 i = 1; i <= numNodes; ++i) {
- names->StaticNodeTable[i] = TTableNameserverSetup::TNodeInfo(address, address, nodeToPort.at(i));
- }
- setup.LocalServices.emplace_back(
- NDnsResolver::MakeDnsResolverActorId(),
- TActorSetupCmd(
- NDnsResolver::CreateOnDemandDnsResolver(),
- TMailboxType::ReadAsFilled, interconnectPoolId));
- setup.LocalServices.emplace_back(GetNameserviceActorId(), TActorSetupCmd(
- CreateNameserverTable(names, interconnectPoolId), TMailboxType::ReadAsFilled,
- interconnectPoolId));
-
- // register logger
- setup.LocalServices.emplace_back(loggerActorId, TActorSetupCmd(new TLoggerActor(loggerSettings,
- CreateStderrBackend(), counters->GetSubgroup("subsystem", "logger")),
- TMailboxType::ReadAsFilled, 1));
-
- if (common->OutgoingHandshakeInflightLimit) {
- // create handshake broker actor
- setup.LocalServices.emplace_back(MakeHandshakeBrokerOutId(), TActorSetupCmd(
- CreateHandshakeBroker(*common->OutgoingHandshakeInflightLimit),
- TMailboxType::ReadAsFilled, interconnectPoolId));
- }
-
- auto sp = MakeHolder<TActorSystemSetup>(std::move(setup));
- ActorSystem.Reset(new TActorSystem(sp, nullptr, loggerSettings));
- ActorSystem->Start();
- }
-
- ~TNode() {
- ActorSystem->Stop();
- }
-
- bool Send(const TActorId& recipient, IEventBase* ev) {
- return ActorSystem->Send(recipient, ev);
- }
-
- TActorId RegisterActor(IActor* actor) {
- return ActorSystem->Register(actor);
- }
-
- TActorId InterconnectProxy(ui32 peerNodeId) {
- return ActorSystem->InterconnectProxy(peerNodeId);
- }
-
- void RegisterServiceActor(const TActorId& serviceId, IActor* actor) {
- const TActorId actorId = ActorSystem->Register(actor);
- ActorSystem->RegisterLocalService(serviceId, actorId);
- }
-
- TActorSystem *GetActorSystem() const {
- return ActorSystem.Get();
- }
-};
diff --git a/library/cpp/actors/interconnect/ut/lib/test_actors.h b/library/cpp/actors/interconnect/ut/lib/test_actors.h
deleted file mode 100644
index 7591200471..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/test_actors.h
+++ /dev/null
@@ -1,83 +0,0 @@
-#pragma once
-
-namespace NActors {
- class TSenderBaseActor: public TActorBootstrapped<TSenderBaseActor> {
- protected:
- const TActorId RecipientActorId;
- const ui32 Preload;
- ui64 SequenceNumber = 0;
- ui32 InFlySize = 0;
-
- public:
- TSenderBaseActor(const TActorId& recipientActorId, ui32 preload = 1)
- : RecipientActorId(recipientActorId)
- , Preload(preload)
- {
- }
-
- virtual ~TSenderBaseActor() {
- }
-
- virtual void Bootstrap(const TActorContext& ctx) {
- Become(&TSenderBaseActor::StateFunc);
- ctx.Send(ctx.ExecutorThread.ActorSystem->InterconnectProxy(RecipientActorId.NodeId()), new TEvInterconnect::TEvConnectNode);
- }
-
- virtual void SendMessagesIfPossible(const TActorContext& ctx) {
- while (InFlySize < Preload) {
- SendMessage(ctx);
- }
- }
-
- virtual void SendMessage(const TActorContext& /*ctx*/) {
- ++SequenceNumber;
- }
-
- virtual void Handle(TEvents::TEvUndelivered::TPtr& /*ev*/, const TActorContext& ctx) {
- SendMessage(ctx);
- }
-
- virtual void Handle(TEvTestResponse::TPtr& /*ev*/, const TActorContext& ctx) {
- SendMessagesIfPossible(ctx);
- }
-
- void Handle(TEvInterconnect::TEvNodeConnected::TPtr& /*ev*/, const TActorContext& ctx) {
- SendMessagesIfPossible(ctx);
- }
-
- void Handle(TEvInterconnect::TEvNodeDisconnected::TPtr& /*ev*/, const TActorContext& /*ctx*/) {
- }
-
- virtual void Handle(TEvents::TEvPoisonPill::TPtr& /*ev*/, const TActorContext& ctx) {
- Die(ctx);
- }
-
- virtual STRICT_STFUNC(StateFunc,
- HFunc(TEvTestResponse, Handle)
- HFunc(TEvents::TEvUndelivered, Handle)
- HFunc(TEvents::TEvPoisonPill, Handle)
- HFunc(TEvInterconnect::TEvNodeConnected, Handle)
- HFunc(TEvInterconnect::TEvNodeDisconnected, Handle)
- )
- };
-
- class TReceiverBaseActor: public TActor<TReceiverBaseActor> {
- protected:
- ui64 ReceivedCount = 0;
-
- public:
- TReceiverBaseActor()
- : TActor(&TReceiverBaseActor::StateFunc)
- {
- }
-
- virtual ~TReceiverBaseActor() {
- }
-
- virtual STRICT_STFUNC(StateFunc,
- HFunc(TEvTest, Handle)
- )
-
- virtual void Handle(TEvTest::TPtr& /*ev*/, const TActorContext& /*ctx*/) {}
- };
-}
diff --git a/library/cpp/actors/interconnect/ut/lib/test_events.h b/library/cpp/actors/interconnect/ut/lib/test_events.h
deleted file mode 100644
index 1bb5eb7d38..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/test_events.h
+++ /dev/null
@@ -1,54 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/interconnect/ut/protos/interconnect_test.pb.h>
-
-namespace NActors {
- enum {
- EvTest = EventSpaceBegin(TEvents::ES_PRIVATE),
- EvTestChan,
- EvTestSmall,
- EvTestLarge,
- EvTestResponse,
- EvTestStartPolling,
- };
-
- struct TEvTest : TEventPB<TEvTest, NInterconnectTest::TEvTest, EvTest> {
- TEvTest() = default;
-
- TEvTest(ui64 sequenceNumber, const TString& payload) {
- Record.SetSequenceNumber(sequenceNumber);
- Record.SetPayload(payload);
- }
- };
-
- struct TEvTestLarge : TEventPB<TEvTestLarge, NInterconnectTest::TEvTestLarge, EvTestLarge> {
- TEvTestLarge() = default;
-
- TEvTestLarge(ui64 sequenceNumber, const TString& payload) {
- Record.SetSequenceNumber(sequenceNumber);
- Record.SetPayload(payload);
- }
- };
-
- struct TEvTestSmall : TEventPB<TEvTestSmall, NInterconnectTest::TEvTestSmall, EvTestSmall> {
- TEvTestSmall() = default;
-
- TEvTestSmall(ui64 sequenceNumber, const TString& payload) {
- Record.SetSequenceNumber(sequenceNumber);
- Record.SetPayload(payload);
- }
- };
-
- struct TEvTestResponse : TEventPB<TEvTestResponse, NInterconnectTest::TEvTestResponse, EvTestResponse> {
- TEvTestResponse() = default;
-
- TEvTestResponse(ui64 confirmedSequenceNumber) {
- Record.SetConfirmedSequenceNumber(confirmedSequenceNumber);
- }
- };
-
- struct TEvTestStartPolling : TEventPB<TEvTestStartPolling, NInterconnectTest::TEvTestStartPolling, EvTestStartPolling> {
- TEvTestStartPolling() = default;
- };
-
-}
diff --git a/library/cpp/actors/interconnect/ut/lib/ya.make b/library/cpp/actors/interconnect/ut/lib/ya.make
deleted file mode 100644
index 615c6a0e54..0000000000
--- a/library/cpp/actors/interconnect/ut/lib/ya.make
+++ /dev/null
@@ -1,10 +0,0 @@
-LIBRARY()
-
-SRCS(
- node.h
- test_events.h
- test_actors.h
- ic_test_cluster.h
-)
-
-END()
diff --git a/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp b/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
deleted file mode 100644
index 4834e48765..0000000000
--- a/library/cpp/actors/interconnect/ut/outgoing_stream_ut.cpp
+++ /dev/null
@@ -1,147 +0,0 @@
-#include <library/cpp/actors/interconnect/outgoing_stream.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/entropy.h>
-#include <util/stream/null.h>
-
-#define Ctest Cnull
-
-Y_UNIT_TEST_SUITE(OutgoingStream) {
- Y_UNIT_TEST(Basic) {
- std::vector<char> buffer;
- buffer.resize(4 << 20);
-
- TReallyFastRng32 rng(EntropyPool());
- for (char *p = buffer.data(); p != buffer.data() + buffer.size(); p += sizeof(ui32)) {
- *reinterpret_cast<ui32*>(p) = rng();
- }
-
- for (ui32 nIter = 0; nIter < 10; ++nIter) {
- Cerr << "nIter# " << nIter << Endl;
-
- size_t base = 0; // number of dropped bytes
- size_t sendOffset = 0; // offset to base
- size_t pending = 0; // number of bytes in queue
-
- NInterconnect::TOutgoingStreamT<4096> stream;
-
- size_t numRewindsRemain = 10;
-
- while (base != buffer.size()) {
- const size_t bytesToEnd = buffer.size() - (base + sendOffset);
-
- Ctest << "base# " << base << " sendOffset# " << sendOffset << " pending# " << pending
- << " bytesToEnd# " << bytesToEnd;
-
- UNIT_ASSERT_VALUES_EQUAL(stream.CalculateOutgoingSize(), pending + sendOffset);
- UNIT_ASSERT_VALUES_EQUAL(stream.CalculateUnsentSize(), pending);
-
- const size_t maxBuffers = 128;
- std::vector<NActors::TConstIoVec> iov;
- stream.ProduceIoVec(iov, maxBuffers, Max<size_t>());
- size_t offset = base + sendOffset;
- for (const auto& [ptr, len] : iov) {
- UNIT_ASSERT(memcmp(buffer.data() + offset, ptr, len) == 0);
- offset += len;
- }
- UNIT_ASSERT(iov.size() == maxBuffers || offset == base + sendOffset + pending);
-
- const char *nextData = buffer.data() + base + sendOffset + pending;
- const size_t nextDataMaxLen = bytesToEnd - pending;
- const size_t nextDataLen = nextDataMaxLen ? rng() % Min<size_t>(16384, nextDataMaxLen) + 1 : 0;
-
- if (size_t bytesToScan = sendOffset + pending) {
- bytesToScan = rng() % bytesToScan + 1;
- size_t offset = base + sendOffset + pending - bytesToScan;
- stream.ScanLastBytes(bytesToScan, [&](TContiguousSpan span) {
- UNIT_ASSERT(offset + span.size() <= base + sendOffset + pending);
- UNIT_ASSERT(memcmp(buffer.data() + offset, span.data(), span.size()) == 0);
- offset += span.size();
- });
- UNIT_ASSERT_VALUES_EQUAL(offset, base + sendOffset + pending);
- }
-
- enum class EAction {
- COPY_APPEND,
- WRITE,
- REF_APPEND,
- ADVANCE,
- REWIND,
- DROP,
- BOOKMARK
- };
-
- std::vector<EAction> actions;
- if (nextDataLen) {
- actions.push_back(EAction::COPY_APPEND);
- actions.push_back(EAction::WRITE);
- actions.push_back(EAction::REF_APPEND);
- actions.push_back(EAction::BOOKMARK);
- }
- if (numRewindsRemain && sendOffset > 65536) {
- actions.push_back(EAction::REWIND);
- }
- actions.push_back(EAction::ADVANCE);
- actions.push_back(EAction::DROP);
-
- switch (actions[rng() % actions.size()]) {
- case EAction::COPY_APPEND: {
- Ctest << " COPY_APPEND nextDataLen# " << nextDataLen;
- auto span = stream.AcquireSpanForWriting(nextDataLen);
- UNIT_ASSERT(span.size() != 0);
- memcpy(span.data(), nextData, span.size());
- stream.Append(span);
- pending += span.size();
- break;
- }
-
- case EAction::WRITE:
- Ctest << " WRITE nextDataLen# " << nextDataLen;
- stream.Write({nextData, nextDataLen});
- pending += nextDataLen;
- break;
-
- case EAction::REF_APPEND:
- Ctest << " REF_APPEND nextDataLen# " << nextDataLen;
- stream.Append({nextData, nextDataLen});
- pending += nextDataLen;
- break;
-
- case EAction::ADVANCE: {
- const size_t advance = rng() % Min<size_t>(4096, pending + 1);
- Ctest << " ADVANCE advance# " << advance;
- stream.Advance(advance);
- sendOffset += advance;
- pending -= advance;
- break;
- }
-
- case EAction::REWIND:
- Ctest << " REWIND";
- stream.Rewind();
- pending += sendOffset;
- sendOffset = 0;
- --numRewindsRemain;
- break;
-
- case EAction::DROP: {
- const size_t drop = rng() % Min<size_t>(65536, sendOffset + 1);
- Ctest << " DROP drop# " << drop;
- stream.DropFront(drop);
- base += drop;
- sendOffset -= drop;
- break;
- }
-
- case EAction::BOOKMARK:
- Ctest << " BOOKMARK nextDataLen# " << nextDataLen;
- auto bookmark = stream.Bookmark(nextDataLen);
- stream.WriteBookmark(std::move(bookmark), {nextData, nextDataLen});
- pending += nextDataLen;
- break;
- }
-
- Ctest << Endl;
- }
- }
- }
-}
diff --git a/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp b/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
deleted file mode 100644
index 38b9b5a0b6..0000000000
--- a/library/cpp/actors/interconnect/ut/poller_actor_ut.cpp
+++ /dev/null
@@ -1,264 +0,0 @@
-#include <library/cpp/actors/interconnect/poller_actor.h>
-#include <library/cpp/actors/testlib/test_runtime.h>
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/network/pair.h>
-#include <util/network/socket.h>
-
-using namespace NActors;
-
-class TTestSocket: public TSharedDescriptor {
-public:
- explicit TTestSocket(SOCKET fd)
- : Fd_(fd)
- {
- }
-
- int GetDescriptor() override {
- return Fd_;
- }
-
-private:
- SOCKET Fd_;
-};
-using TTestSocketPtr = TIntrusivePtr<TTestSocket>;
-
-// create pair of connected, non-blocking sockets
-std::pair<TTestSocketPtr, TTestSocketPtr> NonBlockSockets() {
- SOCKET fds[2];
- SocketPair(fds);
- SetNonBlock(fds[0]);
- SetNonBlock(fds[1]);
- return {MakeIntrusive<TTestSocket>(fds[0]), MakeIntrusive<TTestSocket>(fds[1])};
-}
-
-std::pair<TTestSocketPtr, TTestSocketPtr> TcpSockets() {
- // create server (listening) socket
- SOCKET server = socket(AF_INET, SOCK_STREAM, 0);
- Y_ABORT_UNLESS(server != -1, "socket() failed with %s", strerror(errno));
-
- // bind it to local address with automatically picked port
- sockaddr_in addr;
- addr.sin_family = AF_INET;
- addr.sin_port = 0;
- addr.sin_addr.s_addr = htonl(INADDR_LOOPBACK);
- if (bind(server, (sockaddr*)&addr, sizeof(addr)) == -1) {
- Y_ABORT("bind() failed with %s", strerror(errno));
- } else if (listen(server, 1) == -1) {
- Y_ABORT("listen() failed with %s", strerror(errno));
- }
-
- // obtain local address for client
- socklen_t len = sizeof(addr);
- if (getsockname(server, (sockaddr*)&addr, &len) == -1) {
- Y_ABORT("getsockname() failed with %s", strerror(errno));
- }
-
- // create client socket
- SOCKET client = socket(AF_INET, SOCK_STREAM, 0);
- Y_ABORT_UNLESS(client != -1, "socket() failed with %s", strerror(errno));
-
- // connect to server
- if (connect(client, (sockaddr*)&addr, len) == -1) {
- Y_ABORT("connect() failed with %s", strerror(errno));
- }
-
- // accept connection from the other side
- SOCKET accepted = accept(server, nullptr, nullptr);
- Y_ABORT_UNLESS(accepted != -1, "accept() failed with %s", strerror(errno));
-
- // close server socket
- closesocket(server);
-
- return std::make_pair(MakeIntrusive<TTestSocket>(client), MakeIntrusive<TTestSocket>(accepted));
-}
-
-class TPollerActorTest: public TTestBase {
- UNIT_TEST_SUITE(TPollerActorTest);
- UNIT_TEST(Registration)
- UNIT_TEST(ReadNotification)
- UNIT_TEST(WriteNotification)
- UNIT_TEST(HangupNotification)
- UNIT_TEST_SUITE_END();
-
-public:
- void SetUp() override {
- ActorSystem_ = MakeHolder<TTestActorRuntimeBase>();
- ActorSystem_->Initialize();
-
- PollerId_ = ActorSystem_->Register(CreatePollerActor());
-
- TDispatchOptions opts;
- opts.FinalEvents.emplace_back(TEvents::TSystem::Bootstrap, 1);
- ActorSystem_->DispatchEvents(opts);
- }
-
- void Registration() {
- auto [s1, s2] = NonBlockSockets();
- auto readerId = ActorSystem_->AllocateEdgeActor();
- auto writerId = ActorSystem_->AllocateEdgeActor();
-
- RegisterSocket(s1, readerId, writerId);
-
- // reader should receive event after socket registration
- TPollerToken::TPtr token;
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerRegisterResult>(readerId);
- token = ev->Get()->PollerToken;
- }
-
- // writer should receive event after socket registration
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerRegisterResult>(writerId);
- UNIT_ASSERT_EQUAL(token, ev->Get()->PollerToken);
- }
- }
-
- void ReadNotification() {
- auto [r, w] = NonBlockSockets();
- auto clientId = ActorSystem_->AllocateEdgeActor();
- RegisterSocket(r, clientId, {});
-
- // notification after registration
- TPollerToken::TPtr token;
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerRegisterResult>(clientId);
- token = ev->Get()->PollerToken;
- }
-
- char buf;
-
- // data not ready yet for read
- UNIT_ASSERT(read(r->GetDescriptor(), &buf, sizeof(buf)) == -1);
- UNIT_ASSERT(errno == EWOULDBLOCK);
-
- // request read poll
- token->Request(true, false);
-
- // write data
- UNIT_ASSERT(write(w->GetDescriptor(), "x", 1) == 1);
-
- // notification after socket become readable
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerReady>(clientId);
- UNIT_ASSERT_EQUAL(ev->Get()->Socket, r);
- UNIT_ASSERT(ev->Get()->Read);
- UNIT_ASSERT(!ev->Get()->Write);
- }
-
- // read data
- UNIT_ASSERT(read(r->GetDescriptor(), &buf, sizeof(buf)) == 1);
- UNIT_ASSERT_EQUAL('x', buf);
-
- // no more data to read
- UNIT_ASSERT(read(r->GetDescriptor(), &buf, sizeof(buf)) == -1);
- UNIT_ASSERT(errno == EWOULDBLOCK);
- }
-
- void WriteNotification() {
- auto [r, w] = TcpSockets();
- auto clientId = ActorSystem_->AllocateEdgeActor();
- SetNonBlock(w->GetDescriptor());
- RegisterSocket(w, TActorId{}, clientId);
-
- // notification after registration
- TPollerToken::TPtr token;
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerRegisterResult>(clientId);
- token = ev->Get()->PollerToken;
- }
-
- char buffer[4096];
- memset(buffer, 'x', sizeof(buffer));
-
- for (int i = 0; i < 1000; ++i) {
- // write as much as possible to send buffer
- ssize_t written = 0;
- for (;;) {
- ssize_t res = send(w->GetDescriptor(), buffer, sizeof(buffer), 0);
- if (res > 0) {
- written += res;
- } else if (res == 0) {
- UNIT_FAIL("unexpected zero return from send()");
- } else {
- UNIT_ASSERT(res == -1);
- if (errno == EINTR) {
- continue;
- } else if (errno == EWOULDBLOCK || errno == EAGAIN) {
- token->Request(false, true);
- break;
- } else {
- UNIT_FAIL("unexpected error from send()");
- }
- }
- }
- Cerr << "written " << written << " bytes" << Endl;
-
- // read all written data from the read end
- for (;;) {
- char buffer[4096];
- ssize_t res = recv(r->GetDescriptor(), buffer, sizeof(buffer), 0);
- if (res > 0) {
- UNIT_ASSERT(written >= res);
- written -= res;
- if (!written) {
- break;
- }
- } else if (res == 0) {
- UNIT_FAIL("unexpected zero return from recv()");
- } else {
- UNIT_ASSERT(res == -1);
- if (errno == EINTR) {
- continue;
- } else {
- UNIT_FAIL("unexpected error from recv()");
- }
- }
- }
-
- // wait for notification after socket becomes writable again
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerReady>(clientId);
- UNIT_ASSERT_EQUAL(ev->Get()->Socket, w);
- UNIT_ASSERT(!ev->Get()->Read);
- UNIT_ASSERT(ev->Get()->Write);
- }
- }
- }
-
- void HangupNotification() {
- auto [r, w] = NonBlockSockets();
- auto clientId = ActorSystem_->AllocateEdgeActor();
- RegisterSocket(r, clientId, TActorId{});
-
- // notification after registration
- TPollerToken::TPtr token;
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerRegisterResult>(clientId);
- token = ev->Get()->PollerToken;
- }
-
- token->Request(true, false);
- ShutDown(w->GetDescriptor(), SHUT_RDWR);
-
- // notification after peer shuts down its socket
- {
- auto ev = ActorSystem_->GrabEdgeEvent<TEvPollerReady>(clientId);
- UNIT_ASSERT_EQUAL(ev->Get()->Socket, r);
- UNIT_ASSERT(ev->Get()->Read);
- }
- }
-
-private:
- void RegisterSocket(TTestSocketPtr socket, TActorId readActorId, TActorId writeActorId) {
- auto ev = new TEvPollerRegister{socket, readActorId, writeActorId};
- ActorSystem_->Send(new IEventHandle(PollerId_, TActorId{}, ev));
- }
-
-private:
- THolder<TTestActorRuntimeBase> ActorSystem_;
- TActorId PollerId_;
-};
-
-UNIT_TEST_SUITE_REGISTRATION(TPollerActorTest);
diff --git a/library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt b/library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 01f1765c08..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(interconnect-ut-protos)
-target_link_libraries(interconnect-ut-protos PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(interconnect-ut-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
-)
-target_proto_addincls(interconnect-ut-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(interconnect-ut-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 01f1765c08..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(interconnect-ut-protos)
-target_link_libraries(interconnect-ut-protos PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(interconnect-ut-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
-)
-target_proto_addincls(interconnect-ut-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(interconnect-ut-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt b/library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 10cf33244c..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(interconnect-ut-protos)
-target_link_libraries(interconnect-ut-protos PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(interconnect-ut-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
-)
-target_proto_addincls(interconnect-ut-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(interconnect-ut-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt b/library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 10cf33244c..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,44 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(interconnect-ut-protos)
-target_link_libraries(interconnect-ut-protos PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(interconnect-ut-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
-)
-target_proto_addincls(interconnect-ut-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(interconnect-ut-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/interconnect/ut/protos/CMakeLists.txt b/library/cpp/actors/interconnect/ut/protos/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt b/library/cpp/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 01f1765c08..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,43 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(interconnect-ut-protos)
-target_link_libraries(interconnect-ut-protos PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(interconnect-ut-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
-)
-target_proto_addincls(interconnect-ut-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(interconnect-ut-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto b/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
deleted file mode 100644
index b74d068a8b..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/interconnect_test.proto
+++ /dev/null
@@ -1,28 +0,0 @@
-package NInterconnectTest;
-
-message TEvTest {
- optional uint64 SequenceNumber = 1;
- optional bytes Payload = 2;
-}
-
-message TEvTestChan {
- optional uint64 SequenceNumber = 1;
- optional uint64 Payload = 2;
-}
-
-message TEvTestLarge {
- optional uint64 SequenceNumber = 1;
- optional bytes Payload = 2;
-}
-
-message TEvTestSmall {
- optional uint64 SequenceNumber = 1;
- optional bytes Payload = 2;
-}
-
-message TEvTestResponse {
- optional uint64 ConfirmedSequenceNumber = 1;
-}
-
-message TEvTestStartPolling {
-}
diff --git a/library/cpp/actors/interconnect/ut/protos/ya.make b/library/cpp/actors/interconnect/ut/protos/ya.make
deleted file mode 100644
index a7ffcd6bd0..0000000000
--- a/library/cpp/actors/interconnect/ut/protos/ya.make
+++ /dev/null
@@ -1,9 +0,0 @@
-PROTO_LIBRARY()
-
-SRCS(
- interconnect_test.proto
-)
-
-EXCLUDE_TAGS(GO_PROTO)
-
-END()
diff --git a/library/cpp/actors/interconnect/ut/sticking_ut.cpp b/library/cpp/actors/interconnect/ut/sticking_ut.cpp
deleted file mode 100644
index 2fa3d0933e..0000000000
--- a/library/cpp/actors/interconnect/ut/sticking_ut.cpp
+++ /dev/null
@@ -1,108 +0,0 @@
-#include <library/cpp/actors/interconnect/ut/lib/node.h>
-#include <library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NActors;
-
-struct TEvPing : TEventBase<TEvPing, TEvents::THelloWorld::Ping> {
- TString Data;
-
- TEvPing(TString data)
- : Data(data)
- {}
-
- TEvPing() = default;
-
- ui32 CalculateSerializedSize() const override { return Data.size(); }
- bool IsSerializable() const override { return true; }
- bool SerializeToArcadiaStream(TChunkSerializer *serializer) const override { serializer->WriteAliasedRaw(Data.data(), Data.size()); return true; }
- TString ToStringHeader() const override { return {}; }
-};
-
-class TPonger : public TActor<TPonger> {
-public:
- TPonger()
- : TActor(&TThis::StateFunc)
- {}
-
- void Handle(TEvPing::TPtr ev) {
- Send(ev->Sender, new TEvents::TEvPong(), 0, ev->Cookie);
- }
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvPing, Handle);
- )
-};
-
-class TPinger : public TActorBootstrapped<TPinger> {
- ui32 PingInFlight = 0;
- TActorId PongerId;
- TDuration MaxRTT;
-
-public:
- TPinger(TActorId pongerId)
- : PongerId(pongerId)
- {}
-
- void Bootstrap() {
- Become(&TThis::StateFunc);
- Action();
- }
-
- void Action() {
- if (PingInFlight) {
- return;
- }
- const ui32 max = 1 + RandomNumber(10u);
- while (PingInFlight < max) {
- IssuePing();
- }
- }
-
- void IssuePing() {
- TString data = TString::Uninitialized(RandomNumber<size_t>(256 * 1024) + 1);
- memset(data.Detach(), 0, data.size());
- Send(PongerId, new TEvPing(data), 0, GetCycleCountFast());
- ++PingInFlight;
- }
-
- void Handle(TEvents::TEvPong::TPtr ev) {
- const TDuration rtt = CyclesToDuration(GetCycleCountFast() - ev->Cookie);
- if (MaxRTT < rtt) {
- MaxRTT = rtt;
- Cerr << "Updated MaxRTT# " << MaxRTT << Endl;
- Y_ABORT_UNLESS(MaxRTT <= TDuration::MilliSeconds(500));
- }
- --PingInFlight;
- Action();
- }
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvents::TEvPong, Handle);
- )
-};
-
-Y_UNIT_TEST_SUITE(Sticking) {
- Y_UNIT_TEST(Check) {
- TPortManager portman;
- THashMap<ui32, ui16> nodeToPort;
- nodeToPort.emplace(1, portman.GetPort());
- nodeToPort.emplace(2, portman.GetPort());
-
- NMonitoring::TDynamicCounterPtr counters = new NMonitoring::TDynamicCounters;
- std::list<TNode> nodes;
- for (auto [nodeId, _] : nodeToPort) {
- nodes.emplace_back(nodeId, nodeToPort.size(), nodeToPort, "127.1.0.0",
- counters->GetSubgroup("nodeId", TStringBuilder() << nodeId), TDuration::Seconds(10),
- TChannelsConfig(), 0, 1, nullptr, 40 << 20);
- }
-
- auto& node1 = *nodes.begin();
- auto& node2 = *++nodes.begin();
-
- const TActorId ponger = node2.RegisterActor(new TPonger());
- node1.RegisterActor(new TPinger(ponger));
-
- Sleep(TDuration::Seconds(10));
- }
-}
diff --git a/library/cpp/actors/interconnect/ut/ya.make b/library/cpp/actors/interconnect/ut/ya.make
deleted file mode 100644
index e5b838635f..0000000000
--- a/library/cpp/actors/interconnect/ut/ya.make
+++ /dev/null
@@ -1,33 +0,0 @@
-UNITTEST()
-
-IF (SANITIZER_TYPE == "thread")
- TIMEOUT(1200)
- SIZE(LARGE)
- TAG(ya:fat)
-ELSE()
- TIMEOUT(600)
- SIZE(MEDIUM)
-ENDIF()
-
-SRCS(
- channel_scheduler_ut.cpp
- event_holder_pool_ut.cpp
- interconnect_ut.cpp
- large.cpp
- outgoing_stream_ut.cpp
- poller_actor_ut.cpp
- dynamic_proxy_ut.cpp
- sticking_ut.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/actors/interconnect
- library/cpp/actors/interconnect/ut/lib
- library/cpp/actors/interconnect/ut/protos
- library/cpp/actors/testlib
- library/cpp/digest/md5
- library/cpp/testing/unittest
-)
-
-END()
diff --git a/library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt b/library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index ad106fc729..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_fat)
-target_link_libraries(library-cpp-actors-interconnect-ut_fat PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- actors-interconnect-mock
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-deprecated-atomic
-)
-target_link_options(library-cpp-actors-interconnect-ut_fat PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-interconnect-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_fat/main.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_fat
- TEST_TARGET
- library-cpp-actors-interconnect-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-interconnect-ut_fat
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut_fat)
diff --git a/library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 4285513499..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_fat)
-target_link_libraries(library-cpp-actors-interconnect-ut_fat PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- actors-interconnect-mock
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-deprecated-atomic
-)
-target_link_options(library-cpp-actors-interconnect-ut_fat PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-interconnect-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_fat/main.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_fat
- TEST_TARGET
- library-cpp-actors-interconnect-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-interconnect-ut_fat
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut_fat)
diff --git a/library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt b/library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 5365ba4e24..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_fat)
-target_link_libraries(library-cpp-actors-interconnect-ut_fat PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- actors-interconnect-mock
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-deprecated-atomic
-)
-target_link_options(library-cpp-actors-interconnect-ut_fat PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-interconnect-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_fat/main.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_fat
- TEST_TARGET
- library-cpp-actors-interconnect-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-interconnect-ut_fat
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-interconnect-ut_fat)
diff --git a/library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt b/library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 86ef393bab..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_fat)
-target_link_libraries(library-cpp-actors-interconnect-ut_fat PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- actors-interconnect-mock
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-deprecated-atomic
-)
-target_link_options(library-cpp-actors-interconnect-ut_fat PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-interconnect-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_fat/main.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_fat
- TEST_TARGET
- library-cpp-actors-interconnect-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-interconnect-ut_fat
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-interconnect-ut_fat)
diff --git a/library/cpp/actors/interconnect/ut_fat/CMakeLists.txt b/library/cpp/actors/interconnect/ut_fat/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt b/library/cpp/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 3841949a28..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,63 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_fat)
-target_link_libraries(library-cpp-actors-interconnect-ut_fat PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- actors-interconnect-mock
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-deprecated-atomic
-)
-target_sources(library-cpp-actors-interconnect-ut_fat PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_fat/main.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_fat
- TEST_TARGET
- library-cpp-actors-interconnect-ut_fat
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- LABELS
- LARGE
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_fat
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-interconnect-ut_fat
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut_fat)
diff --git a/library/cpp/actors/interconnect/ut_fat/main.cpp b/library/cpp/actors/interconnect/ut_fat/main.cpp
deleted file mode 100644
index abd1cd289a..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/main.cpp
+++ /dev/null
@@ -1,133 +0,0 @@
-
-#include <library/cpp/actors/interconnect/interconnect_tcp_proxy.h>
-#include <library/cpp/actors/interconnect/ut/protos/interconnect_test.pb.h>
-#include <library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h>
-#include <library/cpp/actors/interconnect/ut/lib/interrupter.h>
-#include <library/cpp/actors/interconnect/ut/lib/test_events.h>
-#include <library/cpp/actors/interconnect/ut/lib/test_actors.h>
-#include <library/cpp/actors/interconnect/ut/lib/node.h>
-
-#include <library/cpp/testing/unittest/tests_data.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/network/sock.h>
-#include <util/network/poller.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/generic/set.h>
-
-Y_UNIT_TEST_SUITE(InterconnectUnstableConnection) {
- using namespace NActors;
-
- class TSenderActor: public TSenderBaseActor {
- TDeque<ui64> InFly;
- ui16 SendFlags;
-
- public:
- TSenderActor(const TActorId& recipientActorId, ui16 sendFlags)
- : TSenderBaseActor(recipientActorId, 32)
- , SendFlags(sendFlags)
- {
- }
-
- ~TSenderActor() override {
- Cerr << "Sent " << SequenceNumber << " messages\n";
- }
-
- void SendMessage(const TActorContext& ctx) override {
- const ui32 flags = IEventHandle::MakeFlags(0, SendFlags);
- const ui64 cookie = SequenceNumber;
- const TString payload('@', RandomNumber<size_t>(65536) + 4096);
- ctx.Send(RecipientActorId, new TEvTest(SequenceNumber, payload), flags, cookie);
- InFly.push_back(SequenceNumber);
- ++InFlySize;
- ++SequenceNumber;
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr& ev, const TActorContext& ctx) override {
- auto record = std::find(InFly.begin(), InFly.end(), ev->Cookie);
- if (SendFlags & IEventHandle::FlagGenerateUnsureUndelivered) {
- if (record != InFly.end()) {
- InFly.erase(record);
- --InFlySize;
- SendMessage(ctx);
- }
- } else {
- Y_ABORT_UNLESS(record != InFly.end());
- }
- }
-
- void Handle(TEvTestResponse::TPtr& ev, const TActorContext& ctx) override {
- Y_ABORT_UNLESS(InFly);
- const NInterconnectTest::TEvTestResponse& record = ev->Get()->Record;
- Y_ABORT_UNLESS(record.HasConfirmedSequenceNumber());
- if (!(SendFlags & IEventHandle::FlagGenerateUnsureUndelivered)) {
- while (record.GetConfirmedSequenceNumber() != InFly.front()) {
- InFly.pop_front();
- --InFlySize;
- }
- }
- Y_ABORT_UNLESS(record.GetConfirmedSequenceNumber() == InFly.front(), "got# %" PRIu64 " expected# %" PRIu64,
- record.GetConfirmedSequenceNumber(), InFly.front());
- InFly.pop_front();
- --InFlySize;
- SendMessagesIfPossible(ctx);
- }
- };
-
- class TReceiverActor: public TReceiverBaseActor {
- ui64 ReceivedCount = 0;
- TNode* SenderNode = nullptr;
-
- public:
- TReceiverActor(TNode* senderNode)
- : TReceiverBaseActor()
- , SenderNode(senderNode)
- {
- }
-
- void Handle(TEvTest::TPtr& ev, const TActorContext& /*ctx*/) override {
- const NInterconnectTest::TEvTest& m = ev->Get()->Record;
- Y_ABORT_UNLESS(m.HasSequenceNumber());
- Y_ABORT_UNLESS(m.GetSequenceNumber() >= ReceivedCount, "got #%" PRIu64 " expected at least #%" PRIu64,
- m.GetSequenceNumber(), ReceivedCount);
- ++ReceivedCount;
- SenderNode->Send(ev->Sender, new TEvTestResponse(m.GetSequenceNumber()));
- }
-
- ~TReceiverActor() override {
- Cerr << "Received " << ReceivedCount << " messages\n";
- }
- };
-
- Y_UNIT_TEST(InterconnectTestWithProxyUnsureUndelivered) {
- ui32 numNodes = 2;
- double bandWidth = 1000000;
- ui16 flags = IEventHandle::FlagTrackDelivery | IEventHandle::FlagGenerateUnsureUndelivered;
- TTestICCluster::TTrafficInterrupterSettings interrupterSettings{TDuration::Seconds(2), bandWidth, true};
-
- TTestICCluster testCluster(numNodes, TChannelsConfig(), &interrupterSettings);
-
- TReceiverActor* receiverActor = new TReceiverActor(testCluster.GetNode(1));
- const TActorId recipient = testCluster.RegisterActor(receiverActor, 2);
- TSenderActor* senderActor = new TSenderActor(recipient, flags);
- testCluster.RegisterActor(senderActor, 1);
-
- NanoSleep(30ULL * 1000 * 1000 * 1000);
- }
-
- Y_UNIT_TEST(InterconnectTestWithProxy) {
- ui32 numNodes = 2;
- double bandWidth = 1000000;
- ui16 flags = IEventHandle::FlagTrackDelivery;
- TTestICCluster::TTrafficInterrupterSettings interrupterSettings{TDuration::Seconds(2), bandWidth, true};
-
- TTestICCluster testCluster(numNodes, TChannelsConfig(), &interrupterSettings);
-
- TReceiverActor* receiverActor = new TReceiverActor(testCluster.GetNode(1));
- const TActorId recipient = testCluster.RegisterActor(receiverActor, 2);
- TSenderActor* senderActor = new TSenderActor(recipient, flags);
- testCluster.RegisterActor(senderActor, 1);
-
- NanoSleep(30ULL * 1000 * 1000 * 1000);
- }
-}
diff --git a/library/cpp/actors/interconnect/ut_fat/ya.make b/library/cpp/actors/interconnect/ut_fat/ya.make
deleted file mode 100644
index 8361c5d9f7..0000000000
--- a/library/cpp/actors/interconnect/ut_fat/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-UNITTEST()
-
-SIZE(LARGE)
-
-TAG(ya:fat)
-
-SRCS(
- main.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/actors/interconnect
- library/cpp/actors/interconnect/mock
- library/cpp/actors/interconnect/ut/lib
- library/cpp/actors/interconnect/ut/protos
- library/cpp/testing/unittest
- library/cpp/deprecated/atomic
-)
-
-END()
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt b/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index b91e1530bb..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_huge_cluster)
-target_link_libraries(library-cpp-actors-interconnect-ut_huge_cluster PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- PROCESSORS
- 4
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut_huge_cluster
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut_huge_cluster)
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 45fd7e2060..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,76 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_huge_cluster)
-target_link_libraries(library-cpp-actors-interconnect-ut_huge_cluster PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- PROCESSORS
- 4
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut_huge_cluster
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut_huge_cluster)
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt b/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 51c8af6a26..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,79 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_huge_cluster)
-target_link_libraries(library-cpp-actors-interconnect-ut_huge_cluster PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- PROCESSORS
- 4
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut_huge_cluster
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-interconnect-ut_huge_cluster)
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt b/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 97a04cc102..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,81 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_huge_cluster)
-target_link_libraries(library-cpp-actors-interconnect-ut_huge_cluster PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-actors-testlib
-)
-target_link_options(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- PROCESSORS
- 4
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut_huge_cluster
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-interconnect-ut_huge_cluster)
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.txt b/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt b/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 7155e17cd5..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,69 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-interconnect-ut_huge_cluster)
-target_link_libraries(library-cpp-actors-interconnect-ut_huge_cluster PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-core
- cpp-actors-interconnect
- interconnect-ut-lib
- interconnect-ut-protos
- cpp-testing-unittest
- cpp-actors-testlib
-)
-target_sources(library-cpp-actors-interconnect-ut_huge_cluster PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_TARGET
- library-cpp-actors-interconnect-ut_huge_cluster
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- LABELS
- MEDIUM
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- PROCESSORS
- 4
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-interconnect-ut_huge_cluster
- PROPERTY
- TIMEOUT
- 600
-)
-target_allocator(library-cpp-actors-interconnect-ut_huge_cluster
- system_allocator
-)
-vcs_info(library-cpp-actors-interconnect-ut_huge_cluster)
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp b/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
deleted file mode 100644
index cb46a62ed9..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
+++ /dev/null
@@ -1,167 +0,0 @@
-#include <library/cpp/actors/interconnect/ut/lib/ic_test_cluster.h>
-#include <library/cpp/actors/interconnect/ut/lib/test_events.h>
-#include <library/cpp/actors/interconnect/ut/lib/test_actors.h>
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <vector>
-
-Y_UNIT_TEST_SUITE(HugeCluster) {
- using namespace NActors;
-
- class TPoller: public TActor<TPoller> {
- const std::vector<TActorId>& Targets;
- std::unordered_map<TActorId, TManualEvent>& Connected;
-
- public:
- TPoller(const std::vector<TActorId>& targets, std::unordered_map<TActorId, TManualEvent>& events)
- : TActor(&TPoller::StateFunc)
- , Targets(targets)
- , Connected(events)
- {}
-
- void Handle(TEvTestStartPolling::TPtr /*ev*/, const TActorContext& ctx) {
- for (ui32 i = 0; i < Targets.size(); ++i) {
- ctx.Send(Targets[i], new TEvTest(), IEventHandle::FlagTrackDelivery, i);
- }
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr ev, const TActorContext& ctx) {
- const ui32 cookie = ev->Cookie;
- // Cerr << "TEvUndelivered ping from node# " << SelfId().NodeId() << " to node# " << cookie + 1 << Endl;
- ctx.Send(Targets[cookie], new TEvTest(), IEventHandle::FlagTrackDelivery, cookie);
- }
-
- void Handle(TEvTest::TPtr ev, const TActorContext& /*ctx*/) {
- // Cerr << "Polled from " << ev->Sender.ToString() << Endl;
- Connected[ev->Sender].Signal();
- }
-
- void Handle(TEvents::TEvPoisonPill::TPtr& /*ev*/, const TActorContext& ctx) {
- Die(ctx);
- }
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvents::TEvUndelivered, Handle)
- HFunc(TEvTestStartPolling, Handle)
- HFunc(TEvTest, Handle)
- HFunc(TEvents::TEvPoisonPill, Handle)
- )
- };
-
- class TStartPollers : public TActorBootstrapped<TStartPollers> {
- const std::vector<TActorId>& Pollers;
-
- public:
- TStartPollers(const std::vector<TActorId>& pollers)
- : Pollers(pollers)
- {}
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TThis::StateFunc);
- for (ui32 i = 0; i < Pollers.size(); ++i) {
- ctx.Send(Pollers[i], new TEvTestStartPolling(), IEventHandle::FlagTrackDelivery, i);
- }
- }
-
- void Handle(TEvents::TEvUndelivered::TPtr ev, const TActorContext& ctx) {
- const ui32 cookie = ev->Cookie;
- // Cerr << "TEvUndelivered start poller message to node# " << cookie + 1 << Endl;
- ctx.Send(Pollers[cookie], new TEvTestStartPolling(), IEventHandle::FlagTrackDelivery, cookie);
- }
-
- void Handle(TEvents::TEvPoisonPill::TPtr& /*ev*/, const TActorContext& ctx) {
- Die(ctx);
- }
-
- STRICT_STFUNC(StateFunc,
- HFunc(TEvents::TEvUndelivered, Handle)
- HFunc(TEvents::TEvPoisonPill, Handle)
- )
- };
-
- TIntrusivePtr<NLog::TSettings> MakeLogConfigs(NLog::EPriority priority) {
- // custom logger settings
- auto loggerSettings = MakeIntrusive<NLog::TSettings>(
- TActorId(0, "logger"),
- NActorsServices::LOGGER,
- priority,
- priority,
- 0U);
-
- loggerSettings->Append(
- NActorsServices::EServiceCommon_MIN,
- NActorsServices::EServiceCommon_MAX,
- NActorsServices::EServiceCommon_Name
- );
-
- constexpr ui32 WilsonComponentId = 430; // NKikimrServices::WILSON
- static const TString WilsonComponentName = "WILSON";
-
- loggerSettings->Append(
- (NLog::EComponent)WilsonComponentId,
- (NLog::EComponent)WilsonComponentId + 1,
- [](NLog::EComponent) -> const TString & { return WilsonComponentName; });
-
- return loggerSettings;
- }
-
- Y_UNIT_TEST(AllToAll) {
- ui32 nodesNum = 120;
- std::vector<TActorId> pollers(nodesNum);
- std::vector<std::unordered_map<TActorId, TManualEvent>> events(nodesNum);
-
- // Must destroy actor system before shared arrays
- {
- TTestICCluster testCluster(nodesNum, NActors::TChannelsConfig(), nullptr, MakeLogConfigs(NLog::PRI_EMERG));
-
- for (ui32 i = 0; i < nodesNum; ++i) {
- pollers[i] = testCluster.RegisterActor(new TPoller(pollers, events[i]), i + 1);
- }
-
- for (ui32 i = 0; i < nodesNum; ++i) {
- for (const auto& actor : pollers) {
- events[i][actor] = TManualEvent();
- }
- }
-
- testCluster.RegisterActor(new TStartPollers(pollers), 1);
-
- for (ui32 i = 0; i < nodesNum; ++i) {
- for (auto& [_, ev] : events[i]) {
- ev.WaitI();
- }
- }
- }
- }
-
-
- Y_UNIT_TEST(AllToOne) {
- ui32 nodesNum = 500;
- std::vector<TActorId> listeners;
- std::vector<TActorId> pollers(nodesNum - 1);
- std::unordered_map<TActorId, TManualEvent> events;
- std::unordered_map<TActorId, TManualEvent> emptyEventList;
-
- // Must destroy actor system before shared arrays
- {
- TTestICCluster testCluster(nodesNum, NActors::TChannelsConfig(), nullptr, MakeLogConfigs(NLog::PRI_EMERG));
-
- const TActorId listener = testCluster.RegisterActor(new TPoller({}, events), nodesNum);
- listeners = { listener };
- for (ui32 i = 0; i < nodesNum - 1; ++i) {
- pollers[i] = testCluster.RegisterActor(new TPoller(listeners, emptyEventList), i + 1);
- }
-
- for (const auto& actor : pollers) {
- events[actor] = TManualEvent();
- }
-
- testCluster.RegisterActor(new TStartPollers(pollers), 1);
-
- for (auto& [_, ev] : events) {
- ev.WaitI();
- }
- }
- }
-}
diff --git a/library/cpp/actors/interconnect/ut_huge_cluster/ya.make b/library/cpp/actors/interconnect/ut_huge_cluster/ya.make
deleted file mode 100644
index 828783323d..0000000000
--- a/library/cpp/actors/interconnect/ut_huge_cluster/ya.make
+++ /dev/null
@@ -1,34 +0,0 @@
-UNITTEST()
-
-IF (SANITIZER_TYPE OR WITH_VALGRIND)
- TIMEOUT(3600)
- SIZE(LARGE)
- TAG(ya:fat)
-ELSE()
- TIMEOUT(600)
- SIZE(MEDIUM)
-ENDIF()
-
-IF (BUILD_TYPE == "RELEASE" OR BUILD_TYPE == "RELWITHDEBINFO")
- SRCS(
- huge_cluster.cpp
- )
-ELSE ()
- MESSAGE(WARNING "It takes too much time to run test in DEBUG mode, some tests are skipped")
-ENDIF ()
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/actors/interconnect
- library/cpp/actors/interconnect/ut/lib
- library/cpp/actors/interconnect/ut/protos
- library/cpp/testing/unittest
- library/cpp/actors/testlib
-)
-
-REQUIREMENTS(
- cpu:4
- ram:32
-)
-
-END()
diff --git a/library/cpp/actors/interconnect/watchdog_timer.h b/library/cpp/actors/interconnect/watchdog_timer.h
deleted file mode 100644
index 2a5860f84c..0000000000
--- a/library/cpp/actors/interconnect/watchdog_timer.h
+++ /dev/null
@@ -1,69 +0,0 @@
-#pragma once
-
-namespace NActors {
- template<typename TEvent>
- class TWatchdogTimer {
- using TCallback = std::function<void()>;
-
- const TDuration Timeout;
- const TCallback Callback;
-
- TMonotonic TriggerTimestamp = TMonotonic::Max();
- bool EventScheduled = false;
- ui32 Iteration;
-
- static constexpr ui32 NumIterationsBeforeFiring = 2;
-
- public:
- TWatchdogTimer(TDuration timeout, TCallback callback)
- : Timeout(timeout)
- , Callback(std::move(callback))
- {}
-
- void Rearm(const TActorIdentity& actor) {
- if (Timeout != TDuration::Zero() && Timeout != TDuration::Max()) {
- TriggerTimestamp = TActivationContext::Monotonic() + Timeout;
- Iteration = 0;
- Schedule(actor);
- }
- }
-
- void Disarm() {
- TriggerTimestamp = TMonotonic::Max();
- }
-
- bool Armed() const {
- return TriggerTimestamp != TMonotonic::Max();
- }
-
- void operator()(typename TEvent::TPtr& ev) {
- Y_DEBUG_ABORT_UNLESS(EventScheduled);
- EventScheduled = false;
- if (!Armed()) {
- // just do nothing
- } else if (TActivationContext::Monotonic() < TriggerTimestamp) {
- // the time hasn't come yet
- Schedule(TActorIdentity(ev->Recipient));
- } else if (Iteration < NumIterationsBeforeFiring) {
- // time has come, but we will still give actor a chance to process some messages and rearm timer
- ++Iteration;
- TActivationContext::Send(ev.Release()); // send this event into queue once more
- EventScheduled = true;
- } else {
- // no chance to disarm, fire callback
- Disarm();
- Callback();
- }
- }
-
- private:
- void Schedule(const TActorIdentity& actor) {
- Y_DEBUG_ABORT_UNLESS(Armed());
- if (!EventScheduled) {
- actor.Schedule(TriggerTimestamp, new TEvent);
- EventScheduled = true;
- }
- }
- };
-
-}
diff --git a/library/cpp/actors/interconnect/ya.make b/library/cpp/actors/interconnect/ya.make
deleted file mode 100644
index 9cb5bc7d8c..0000000000
--- a/library/cpp/actors/interconnect/ya.make
+++ /dev/null
@@ -1,97 +0,0 @@
-LIBRARY()
-
-NO_WSHADOW()
-
-IF (PROFILE_MEMORY_ALLOCATIONS)
- CFLAGS(-DPROFILE_MEMORY_ALLOCATIONS)
-ENDIF()
-
-SRCS(
- channel_scheduler.h
- event_filter.h
- event_holder_pool.h
- events_local.h
- interconnect_address.cpp
- interconnect_address.h
- interconnect_channel.cpp
- interconnect_channel.h
- interconnect_common.h
- interconnect_counters.cpp
- interconnect.h
- interconnect_handshake.cpp
- interconnect_handshake.h
- interconnect_impl.h
- interconnect_mon.cpp
- interconnect_mon.h
- interconnect_nameserver_dynamic.cpp
- interconnect_nameserver_table.cpp
- interconnect_proxy_wrapper.cpp
- interconnect_proxy_wrapper.h
- interconnect_resolve.cpp
- interconnect_stream.cpp
- interconnect_stream.h
- interconnect_tcp_input_session.cpp
- interconnect_tcp_proxy.cpp
- interconnect_tcp_proxy.h
- interconnect_tcp_server.cpp
- interconnect_tcp_server.h
- interconnect_tcp_session.cpp
- interconnect_tcp_session.h
- load.cpp
- load.h
- logging.h
- packet.cpp
- packet.h
- poller_actor.cpp
- poller_actor.h
- poller.h
- poller_tcp.cpp
- poller_tcp.h
- poller_tcp_unit.cpp
- poller_tcp_unit.h
- poller_tcp_unit_select.cpp
- poller_tcp_unit_select.h
- profiler.h
- slowpoke_actor.h
- types.cpp
- types.h
- watchdog_timer.h
-)
-
-IF (OS_LINUX)
- SRCS(
- poller_tcp_unit_epoll.cpp
- poller_tcp_unit_epoll.h
- )
-ENDIF()
-
-PEERDIR(
- contrib/libs/libc_compat
- contrib/libs/openssl
- contrib/libs/xxhash
- library/cpp/actors/core
- library/cpp/actors/dnscachelib
- library/cpp/actors/dnsresolver
- library/cpp/actors/helpers
- library/cpp/actors/prof
- library/cpp/actors/protos
- library/cpp/actors/util
- library/cpp/actors/wilson
- library/cpp/digest/crc32c
- library/cpp/json
- library/cpp/lwtrace
- library/cpp/monlib/dynamic_counters
- library/cpp/monlib/metrics
- library/cpp/monlib/service/pages/resources
- library/cpp/monlib/service/pages/tablesorter
- library/cpp/openssl/init
- library/cpp/packedtypes
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
- ut_fat
- ut_huge_cluster
-)
diff --git a/library/cpp/actors/log_backend/CMakeLists.darwin-arm64.txt b/library/cpp/actors/log_backend/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 2845e78e35..0000000000
--- a/library/cpp/actors/log_backend/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-log_backend)
-target_link_libraries(cpp-actors-log_backend PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- library-cpp-logger
-)
-target_sources(cpp-actors-log_backend PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/log_backend/actor_log_backend.cpp
-)
diff --git a/library/cpp/actors/log_backend/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/log_backend/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 2845e78e35..0000000000
--- a/library/cpp/actors/log_backend/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-log_backend)
-target_link_libraries(cpp-actors-log_backend PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- library-cpp-logger
-)
-target_sources(cpp-actors-log_backend PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/log_backend/actor_log_backend.cpp
-)
diff --git a/library/cpp/actors/log_backend/CMakeLists.linux-aarch64.txt b/library/cpp/actors/log_backend/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 15786d6c74..0000000000
--- a/library/cpp/actors/log_backend/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-log_backend)
-target_link_libraries(cpp-actors-log_backend PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- library-cpp-logger
-)
-target_sources(cpp-actors-log_backend PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/log_backend/actor_log_backend.cpp
-)
diff --git a/library/cpp/actors/log_backend/CMakeLists.linux-x86_64.txt b/library/cpp/actors/log_backend/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 15786d6c74..0000000000
--- a/library/cpp/actors/log_backend/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,20 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-log_backend)
-target_link_libraries(cpp-actors-log_backend PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- library-cpp-logger
-)
-target_sources(cpp-actors-log_backend PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/log_backend/actor_log_backend.cpp
-)
diff --git a/library/cpp/actors/log_backend/CMakeLists.txt b/library/cpp/actors/log_backend/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/log_backend/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/log_backend/CMakeLists.windows-x86_64.txt b/library/cpp/actors/log_backend/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 2845e78e35..0000000000
--- a/library/cpp/actors/log_backend/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-log_backend)
-target_link_libraries(cpp-actors-log_backend PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- library-cpp-logger
-)
-target_sources(cpp-actors-log_backend PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/log_backend/actor_log_backend.cpp
-)
diff --git a/library/cpp/actors/log_backend/actor_log_backend.cpp b/library/cpp/actors/log_backend/actor_log_backend.cpp
deleted file mode 100644
index a6fdd20c7b..0000000000
--- a/library/cpp/actors/log_backend/actor_log_backend.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-#include "actor_log_backend.h"
-
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/logger/record.h>
-
-namespace {
-
-NActors::NLog::EPriority GetActorLogPriority(ELogPriority priority) {
- switch (priority) {
- case TLOG_EMERG:
- return NActors::NLog::PRI_EMERG;
- case TLOG_ALERT:
- return NActors::NLog::PRI_ALERT;
- case TLOG_CRIT:
- return NActors::NLog::PRI_CRIT;
- case TLOG_ERR:
- return NActors::NLog::PRI_ERROR;
- case TLOG_WARNING:
- return NActors::NLog::PRI_WARN;
- case TLOG_NOTICE:
- return NActors::NLog::PRI_NOTICE;
- case TLOG_INFO:
- return NActors::NLog::PRI_INFO;
- case TLOG_DEBUG:
- return NActors::NLog::PRI_DEBUG;
- default:
- return NActors::NLog::PRI_TRACE;
- }
-}
-
-}
-
-TActorLogBackend::TActorLogBackend(NActors::TActorSystem* actorSystem, int logComponent)
- : ActorSystem(actorSystem)
- , LogComponent(logComponent)
-{
-}
-
-void TActorLogBackend::WriteData(const TLogRecord& rec) {
- LOG_LOG(*ActorSystem, GetActorLogPriority(rec.Priority), LogComponent, TString(rec.Data, rec.Len));
-}
diff --git a/library/cpp/actors/log_backend/actor_log_backend.h b/library/cpp/actors/log_backend/actor_log_backend.h
deleted file mode 100644
index a51427d498..0000000000
--- a/library/cpp/actors/log_backend/actor_log_backend.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-#include <library/cpp/logger/backend.h>
-
-namespace NActors {
-class TActorSystem;
-} // namespace NActors
-
-class TActorLogBackend : public TLogBackend {
-public:
- TActorLogBackend(NActors::TActorSystem* actorSystem, int logComponent);
-
- void WriteData(const TLogRecord& rec) override;
-
- void ReopenLog() override {
- }
-
-private:
- NActors::TActorSystem* const ActorSystem;
- const int LogComponent;
-};
diff --git a/library/cpp/actors/log_backend/ya.make b/library/cpp/actors/log_backend/ya.make
deleted file mode 100644
index ce9f049e9a..0000000000
--- a/library/cpp/actors/log_backend/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-LIBRARY()
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/logger
-)
-
-SRCS(
- actor_log_backend.cpp
-)
-
-END()
diff --git a/library/cpp/actors/memory_log/CMakeLists.darwin-arm64.txt b/library/cpp/actors/memory_log/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 5109f0fcce..0000000000
--- a/library/cpp/actors/memory_log/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-memory_log)
-target_link_libraries(cpp-actors-memory_log PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-threading-queue
- contrib-libs-linuxvdso
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-memory_log PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/memlog.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/mmap.cpp
-)
diff --git a/library/cpp/actors/memory_log/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/memory_log/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 5109f0fcce..0000000000
--- a/library/cpp/actors/memory_log/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-memory_log)
-target_link_libraries(cpp-actors-memory_log PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-threading-queue
- contrib-libs-linuxvdso
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-memory_log PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/memlog.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/mmap.cpp
-)
diff --git a/library/cpp/actors/memory_log/CMakeLists.linux-aarch64.txt b/library/cpp/actors/memory_log/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 1fa79db3ff..0000000000
--- a/library/cpp/actors/memory_log/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-memory_log)
-target_link_libraries(cpp-actors-memory_log PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-threading-queue
- contrib-libs-linuxvdso
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-memory_log PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/memlog.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/mmap.cpp
-)
diff --git a/library/cpp/actors/memory_log/CMakeLists.linux-x86_64.txt b/library/cpp/actors/memory_log/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 1fa79db3ff..0000000000
--- a/library/cpp/actors/memory_log/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-memory_log)
-target_link_libraries(cpp-actors-memory_log PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-threading-queue
- contrib-libs-linuxvdso
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-memory_log PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/memlog.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/mmap.cpp
-)
diff --git a/library/cpp/actors/memory_log/CMakeLists.txt b/library/cpp/actors/memory_log/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/memory_log/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/memory_log/CMakeLists.windows-x86_64.txt b/library/cpp/actors/memory_log/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 5109f0fcce..0000000000
--- a/library/cpp/actors/memory_log/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,21 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(cpp-actors-memory_log)
-target_link_libraries(cpp-actors-memory_log PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-threading-queue
- contrib-libs-linuxvdso
- cpp-deprecated-atomic
-)
-target_sources(cpp-actors-memory_log PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/memlog.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/memory_log/mmap.cpp
-)
diff --git a/library/cpp/actors/memory_log/memlog.cpp b/library/cpp/actors/memory_log/memlog.cpp
deleted file mode 100644
index 263c5c5079..0000000000
--- a/library/cpp/actors/memory_log/memlog.cpp
+++ /dev/null
@@ -1,367 +0,0 @@
-#include "memlog.h"
-
-#include <library/cpp/actors/util/datetime.h>
-
-#include <util/system/info.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/align.h>
-
-#include <contrib/libs/linuxvdso/interface.h>
-
-#if (defined(_i386_) || defined(_x86_64_)) && defined(_linux_)
-#define HAVE_VDSO_GETCPU 1
-#include <contrib/libs/linuxvdso/interface.h>
-static int (*FastGetCpu)(unsigned* cpu, unsigned* node, void* unused);
-#endif
-
-#if defined(_unix_)
-#include <sched.h>
-#elif defined(_win_)
-#include <WinBase.h>
-#else
-#error NO IMPLEMENTATION FOR THE PLATFORM
-#endif
-
-const char TMemoryLog::DEFAULT_LAST_MARK[16] = {
- 'c',
- 'b',
- '7',
- 'B',
- '6',
- '8',
- 'a',
- '8',
- 'A',
- '5',
- '6',
- '1',
- '6',
- '4',
- '5',
- '\n',
-};
-
-const char TMemoryLog::CLEAR_MARK[16] = {
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- ' ',
- '\n',
-};
-
-unsigned TMemoryLog::GetSelfCpu() noexcept {
-#if defined(_unix_)
-#if HAVE_VDSO_GETCPU
- unsigned cpu;
- if (Y_LIKELY(FastGetCpu != nullptr)) {
- auto result = FastGetCpu(&cpu, nullptr, nullptr);
- Y_ABORT_UNLESS(result == 0);
- return cpu;
- } else {
- return 0;
- }
-
-#elif defined(_x86_64_) || defined(_i386_)
-
-#define CPUID(func, eax, ebx, ecx, edx) \
- __asm__ __volatile__( \
- "cpuid" \
- : "=a"(eax), "=b"(ebx), "=c"(ecx), "=d"(edx) \
- : "a"(func));
-
- int a = 0, b = 0, c = 0, d = 0;
- CPUID(0x1, a, b, c, d);
- int acpiID = (b >> 24);
- return acpiID;
-
-#elif defined(__CNUC__)
- return sched_getcpu();
-#else
- return 0;
-#endif
-
-#elif defined(_win_)
- return GetCurrentProcessorNumber();
-#else
- return 0;
-#endif
-}
-
-TMemoryLog* TMemoryLog::MemLogBuffer = nullptr;
-Y_POD_THREAD(TThread::TId)
-TMemoryLog::LogThreadId;
-char* TMemoryLog::LastMarkIsHere = nullptr;
-
-std::atomic<bool> TMemoryLog::PrintLastMark(true);
-
-TMemoryLog::TMemoryLog(size_t totalSize, size_t grainSize)
- : GrainSize(grainSize)
- , FreeGrains(DEFAULT_TOTAL_SIZE / DEFAULT_GRAIN_SIZE * 2)
- , Buf(totalSize)
-{
- Y_ABORT_UNLESS(DEFAULT_TOTAL_SIZE % DEFAULT_GRAIN_SIZE == 0);
- NumberOfGrains = DEFAULT_TOTAL_SIZE / DEFAULT_GRAIN_SIZE;
-
- for (size_t i = 0; i < NumberOfGrains; ++i) {
- new (GetGrain(i)) TGrain;
- }
-
- NumberOfCpus = NSystemInfo::NumberOfCpus();
- Y_ABORT_UNLESS(NumberOfGrains > NumberOfCpus);
- ActiveGrains.Reset(new TGrain*[NumberOfCpus]);
- for (size_t i = 0; i < NumberOfCpus; ++i) {
- ActiveGrains[i] = GetGrain(i);
- }
-
- for (size_t i = NumberOfCpus; i < NumberOfGrains; ++i) {
- FreeGrains.StubbornPush(GetGrain(i));
- }
-
-#if HAVE_VDSO_GETCPU
- auto vdsoFunc = (decltype(FastGetCpu))
- NVdso::Function("__vdso_getcpu", "LINUX_2.6");
- AtomicSet(FastGetCpu, vdsoFunc);
-#endif
-}
-
-void* TMemoryLog::GetWriteBuffer(size_t amount) noexcept {
- // alignment required by NoCacheMemcpy
- amount = AlignUp<size_t>(amount, MemcpyAlignment);
-
- for (ui16 tries = MAX_GET_BUFFER_TRIES; tries-- > 0;) {
- auto myCpu = GetSelfCpu();
-
- TGrain* grain = AtomicGet(ActiveGrains[myCpu]);
-
- if (grain != nullptr) {
- auto mine = AtomicGetAndAdd(grain->WritePointer, amount);
- if (mine + amount <= GrainSize - sizeof(TGrain)) {
- return &grain->Data[mine];
- }
-
- if (!AtomicCas(&ActiveGrains[myCpu], 0, grain)) {
- continue;
- }
-
- FreeGrains.StubbornPush(grain);
- }
-
- grain = (TGrain*)FreeGrains.Pop();
-
- if (grain == nullptr) {
- return nullptr;
- }
-
- grain->WritePointer = 0;
-
- if (!AtomicCas(&ActiveGrains[myCpu], grain, 0)) {
- FreeGrains.StubbornPush(grain);
- continue;
- }
- }
-
- return nullptr;
-}
-
-void ClearAlignedTail(char* tail) noexcept {
- auto aligned = AlignUp(tail, TMemoryLog::MemcpyAlignment);
- if (aligned > tail) {
- memset(tail, 0, aligned - tail);
- }
-}
-
-#if defined(_x86_64_) || defined(_i386_)
-#include <xmmintrin.h>
-// the main motivation is not poluting CPU cache
-NO_SANITIZE_THREAD
-void NoCacheMemcpy(char* dst, const char* src, size_t size) noexcept {
- while (size >= sizeof(__m128) * 2) {
- __m128 a = _mm_load_ps((float*)(src + 0 * sizeof(__m128)));
- __m128 b = _mm_load_ps((float*)(src + 1 * sizeof(__m128)));
- _mm_stream_ps((float*)(dst + 0 * sizeof(__m128)), a);
- _mm_stream_ps((float*)(dst + 1 * sizeof(__m128)), b);
-
- size -= sizeof(__m128) * 2;
- src += sizeof(__m128) * 2;
- dst += sizeof(__m128) * 2;
- }
- memcpy(dst, src, size);
-}
-
-NO_SANITIZE_THREAD
-void NoWCacheMemcpy(char* dst, const char* src, size_t size) noexcept {
- constexpr ui16 ITEMS_COUNT = 1024;
- alignas(TMemoryLog::MemcpyAlignment) __m128 buf[ITEMS_COUNT];
- while (size >= sizeof(buf)) {
- memcpy(&buf, src, sizeof(buf));
-
- for (ui16 i = 0; i < ITEMS_COUNT; ++i) {
- _mm_stream_ps((float*)dst, buf[i]);
- dst += sizeof(__m128);
- }
-
- size -= sizeof(buf);
- src += sizeof(buf);
- }
-
- memcpy(&buf, src, size);
- // no problem to copy few bytes more
- size = AlignUp(size, sizeof(__m128));
- for (ui16 i = 0; i < size / sizeof(__m128); ++i) {
- _mm_stream_ps((float*)dst, buf[i]);
- dst += sizeof(__m128);
- }
-}
-
-#endif
-
-NO_SANITIZE_THREAD
-char* BareMemLogWrite(const char* begin, size_t msgSize, bool isLast) noexcept {
- bool lastMark =
- isLast && TMemoryLog::PrintLastMark.load(std::memory_order_acquire);
- size_t amount = lastMark ? msgSize + TMemoryLog::LAST_MARK_SIZE : msgSize;
-
- char* buffer = (char*)TMemoryLog::GetWriteBufferStatic(amount);
- if (buffer == nullptr) {
- return nullptr;
- }
-
-#if defined(_x86_64_) || defined(_i386_)
- if (AlignDown(begin, TMemoryLog::MemcpyAlignment) == begin) {
- NoCacheMemcpy(buffer, begin, msgSize);
- } else {
- NoWCacheMemcpy(buffer, begin, msgSize);
- }
-#else
- memcpy(buffer, begin, msgSize);
-#endif
-
- if (lastMark) {
- TMemoryLog::ChangeLastMark(buffer + msgSize);
- }
-
- ClearAlignedTail(buffer + amount);
- return buffer;
-}
-
-NO_SANITIZE_THREAD
-bool MemLogWrite(const char* begin, size_t msgSize, bool addLF) noexcept {
- bool lastMark = TMemoryLog::PrintLastMark.load(std::memory_order_acquire);
- size_t amount = lastMark ? msgSize + TMemoryLog::LAST_MARK_SIZE : msgSize;
-
- // Let's construct prolog with timestamp and thread id
- auto threadId = TMemoryLog::GetTheadId();
-
- // alignment required by NoCacheMemcpy
- // check for format for snprintf
- constexpr size_t prologSize = 48;
- alignas(TMemoryLog::MemcpyAlignment) char prolog[prologSize + 1];
- Y_ABORT_UNLESS(AlignDown(&prolog, TMemoryLog::MemcpyAlignment) == &prolog);
-
- int snprintfResult = snprintf(prolog, prologSize + 1,
- "TS %020" PRIu64 " TI %020" PRIu64 " ", GetCycleCountFast(), threadId);
-
- if (snprintfResult < 0) {
- return false;
- }
- Y_ABORT_UNLESS(snprintfResult == prologSize);
-
- amount += prologSize;
- if (addLF) {
- ++amount; // add 1 byte for \n at the end of the message
- }
-
- char* buffer = (char*)TMemoryLog::GetWriteBufferStatic(amount);
- if (buffer == nullptr) {
- return false;
- }
-
-#if defined(_x86_64_) || defined(_i386_)
- // warning: copy prolog first to avoid corruption of the message
- // by prolog tail
- NoCacheMemcpy(buffer, prolog, prologSize);
- if (AlignDown(begin + prologSize, TMemoryLog::MemcpyAlignment) == begin + prologSize) {
- NoCacheMemcpy(buffer + prologSize, begin, msgSize);
- } else {
- NoWCacheMemcpy(buffer + prologSize, begin, msgSize);
- }
-#else
- memcpy(buffer, prolog, prologSize);
- memcpy(buffer + prologSize, begin, msgSize);
-#endif
-
- if (addLF) {
- buffer[prologSize + msgSize] = '\n';
- }
-
- if (lastMark) {
- TMemoryLog::ChangeLastMark(buffer + prologSize + msgSize + (int)addLF);
- }
-
- ClearAlignedTail(buffer + amount);
- return true;
-}
-
-NO_SANITIZE_THREAD
-void TMemoryLog::ChangeLastMark(char* buffer) noexcept {
- memcpy(buffer, DEFAULT_LAST_MARK, LAST_MARK_SIZE);
- auto oldMark = AtomicSwap(&LastMarkIsHere, buffer);
- if (Y_LIKELY(oldMark != nullptr)) {
- memcpy(oldMark, CLEAR_MARK, LAST_MARK_SIZE);
- }
- if (AtomicGet(LastMarkIsHere) != buffer) {
- memcpy(buffer, CLEAR_MARK, LAST_MARK_SIZE);
- AtomicBarrier();
- }
-}
-
-bool MemLogVPrintF(const char* format, va_list params) noexcept {
- auto logger = TMemoryLog::GetMemoryLogger();
- if (logger == nullptr) {
- return false;
- }
-
- auto threadId = TMemoryLog::GetTheadId();
-
- // alignment required by NoCacheMemcpy
- alignas(TMemoryLog::MemcpyAlignment) char buf[TMemoryLog::MAX_MESSAGE_SIZE];
- Y_ABORT_UNLESS(AlignDown(&buf, TMemoryLog::MemcpyAlignment) == &buf);
-
- int prologSize = snprintf(buf,
- TMemoryLog::MAX_MESSAGE_SIZE - 2,
- "TS %020" PRIu64 " TI %020" PRIu64 " ",
- GetCycleCountFast(),
- threadId);
-
- if (Y_UNLIKELY(prologSize < 0)) {
- return false;
- }
- Y_ABORT_UNLESS((ui32)prologSize <= TMemoryLog::MAX_MESSAGE_SIZE);
-
- int add = vsnprintf(
- &buf[prologSize],
- TMemoryLog::MAX_MESSAGE_SIZE - prologSize - 2,
- format, params);
-
- if (Y_UNLIKELY(add < 0)) {
- return false;
- }
- Y_ABORT_UNLESS(add >= 0);
- auto totalSize = prologSize + add;
-
- buf[totalSize++] = '\n';
- Y_ABORT_UNLESS((ui32)totalSize <= TMemoryLog::MAX_MESSAGE_SIZE);
-
- return BareMemLogWrite(buf, totalSize) != nullptr;
-}
diff --git a/library/cpp/actors/memory_log/memlog.h b/library/cpp/actors/memory_log/memlog.h
deleted file mode 100644
index bf4e115c49..0000000000
--- a/library/cpp/actors/memory_log/memlog.h
+++ /dev/null
@@ -1,211 +0,0 @@
-#pragma once
-
-#include <library/cpp/threading/queue/mpmc_unordered_ring.h>
-#include <util/generic/string.h>
-#include <util/string/printf.h>
-#include <util/system/datetime.h>
-#include <util/system/thread.h>
-#include <util/system/types.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/align.h>
-#include <util/system/tls.h>
-
-#include <atomic>
-#include <cstdio>
-
-#ifdef _win_
-#include <util/system/winint.h>
-#endif
-
-#ifndef NO_SANITIZE_THREAD
-#define NO_SANITIZE_THREAD
-#if defined(__has_feature)
-#if __has_feature(thread_sanitizer)
-#undef NO_SANITIZE_THREAD
-#define NO_SANITIZE_THREAD __attribute__((no_sanitize_thread))
-#endif
-#endif
-#endif
-
-class TMemoryLog {
-public:
- static constexpr size_t DEFAULT_TOTAL_SIZE = 10 * 1024 * 1024;
- static constexpr size_t DEFAULT_GRAIN_SIZE = 1024 * 64;
- static constexpr size_t MAX_MESSAGE_SIZE = 1024;
- static constexpr ui16 MAX_GET_BUFFER_TRIES = 4;
- static constexpr ui16 MemcpyAlignment = 16;
-
- // search for cb7B68a8A561645
- static const char DEFAULT_LAST_MARK[16];
- static const char CLEAR_MARK[16];
-
- static constexpr size_t LAST_MARK_SIZE = sizeof(DEFAULT_LAST_MARK);
-
- inline static TMemoryLog* GetMemoryLogger() noexcept {
- return AtomicGet(MemLogBuffer);
- }
-
- void* GetWriteBuffer(size_t amount) noexcept;
-
- inline static void* GetWriteBufferStatic(size_t amount) noexcept {
- auto logger = GetMemoryLogger();
- if (logger == nullptr) {
- return nullptr;
- }
- return logger->GetWriteBuffer(amount);
- }
-
- size_t GetGlobalBufferSize() const noexcept {
- return Buf.GetSize();
- }
-
- inline static void CreateMemoryLogBuffer(
- size_t totalSize = DEFAULT_TOTAL_SIZE,
- size_t grainSize = DEFAULT_GRAIN_SIZE)
- Y_COLD {
- if (AtomicGet(MemLogBuffer) != nullptr) {
- return;
- }
-
- AtomicSet(MemLogBuffer, new TMemoryLog(totalSize, grainSize));
- }
-
- static std::atomic<bool> PrintLastMark;
-
- // buffer must be at least 16 bytes
- static void ChangeLastMark(char* buffer) noexcept;
-
- inline static TThread::TId GetTheadId() noexcept {
- if (LogThreadId == 0) {
- LogThreadId = TThread::CurrentThreadId();
- }
- return LogThreadId;
- }
-
-private:
- TMemoryLog(size_t totalSize, size_t grainSize) Y_COLD;
-
- struct TGrain {
- TAtomic WritePointer = 0;
- char Padding[MemcpyAlignment - sizeof(TAtomic)];
- char Data[];
- };
-
- size_t NumberOfCpus;
- size_t GrainSize;
- size_t NumberOfGrains;
- TArrayPtr<TGrain*> ActiveGrains;
- NThreading::TMPMCUnorderedRing FreeGrains;
-
- TGrain* GetGrain(size_t grainIndex) const noexcept {
- return (TGrain*)((char*)GetGlobalBuffer() + GrainSize * grainIndex);
- }
-
- class TMMapArea {
- public:
- TMMapArea(size_t amount) Y_COLD {
- MMap(amount);
- }
-
- TMMapArea(const TMMapArea&) = delete;
- TMMapArea& operator=(const TMMapArea& copy) = delete;
-
- TMMapArea(TMMapArea&& move) Y_COLD {
- BufPtr = move.BufPtr;
- Size = move.Size;
-
- move.BufPtr = nullptr;
- move.Size = 0;
- }
-
- TMMapArea& operator=(TMMapArea&& move) Y_COLD {
- BufPtr = move.BufPtr;
- Size = move.Size;
-
- move.BufPtr = nullptr;
- move.Size = 0;
- return *this;
- }
-
- void Reset(size_t amount) Y_COLD {
- MUnmap();
- MMap(amount);
- }
-
- ~TMMapArea() noexcept Y_COLD {
- MUnmap();
- }
-
- size_t GetSize() const noexcept {
- return Size;
- }
-
- void* GetPtr() const noexcept {
- return BufPtr;
- }
-
- private:
- void* BufPtr;
- size_t Size;
-#ifdef _win_
- HANDLE Mapping;
-#endif
-
- void MMap(size_t amount);
- void MUnmap();
- };
-
- TMMapArea Buf;
-
- void* GetGlobalBuffer() const noexcept {
- return Buf.GetPtr();
- }
-
- static unsigned GetSelfCpu() noexcept;
-
- static TMemoryLog* MemLogBuffer;
- static Y_POD_THREAD(TThread::TId) LogThreadId;
- static char* LastMarkIsHere;
-};
-
-// it's no use of sanitizing this function
-NO_SANITIZE_THREAD
-char* BareMemLogWrite(
- const char* begin, size_t msgSize, bool isLast = true) noexcept;
-
-// it's no use of sanitizing this function
-NO_SANITIZE_THREAD
-bool MemLogWrite(
- const char* begin, size_t msgSize, bool addLF = false) noexcept;
-
-Y_WRAPPER inline bool MemLogWrite(const char* begin, const char* end) noexcept {
- if (end <= begin) {
- return false;
- }
-
- size_t msgSize = end - begin;
- return MemLogWrite(begin, msgSize);
-}
-
-template <typename TObj>
-bool MemLogWriteStruct(const TObj* obj) noexcept {
- auto begin = (const char*)(const void*)obj;
- return MemLogWrite(begin, begin + sizeof(TObj));
-}
-
-Y_PRINTF_FORMAT(1, 0)
-bool MemLogVPrintF(const char* format, va_list params) noexcept;
-
-Y_PRINTF_FORMAT(1, 2)
-Y_WRAPPER
-inline bool MemLogPrintF(const char* format, ...) noexcept {
- va_list params;
- va_start(params, format);
- auto result = MemLogVPrintF(format, params);
- va_end(params);
- return result;
-}
-
-Y_WRAPPER inline bool MemLogWriteNullTerm(const char* str) noexcept {
- return MemLogWrite(str, strlen(str));
-}
diff --git a/library/cpp/actors/memory_log/mmap.cpp b/library/cpp/actors/memory_log/mmap.cpp
deleted file mode 100644
index 1fe734235e..0000000000
--- a/library/cpp/actors/memory_log/mmap.cpp
+++ /dev/null
@@ -1,63 +0,0 @@
-#include "memlog.h"
-
-#if defined(_unix_)
-#include <sys/mman.h>
-#elif defined(_win_)
-#include <util/system/winint.h>
-#else
-#error NO IMPLEMENTATION FOR THE PLATFORM
-#endif
-
-void TMemoryLog::TMMapArea::MMap(size_t amount) {
- Y_ABORT_UNLESS(amount > 0);
-
-#if defined(_unix_)
- constexpr int mmapProt = PROT_READ | PROT_WRITE;
-#if defined(_linux_)
- constexpr int mmapFlags = MAP_PRIVATE | MAP_ANON | MAP_POPULATE;
-#else
- constexpr int mmapFlags = MAP_PRIVATE | MAP_ANON;
-#endif
-
- BufPtr = ::mmap(nullptr, amount, mmapProt, mmapFlags, -1, 0);
- if (BufPtr == MAP_FAILED) {
- throw std::bad_alloc();
- }
-
-#elif defined(_win_)
- Mapping = ::CreateFileMapping(
- (HANDLE)-1, nullptr, PAGE_READWRITE, 0, amount, nullptr);
- if (Mapping == NULL) {
- throw std::bad_alloc();
- }
- BufPtr = ::MapViewOfFile(Mapping, FILE_MAP_WRITE, 0, 0, amount);
- if (BufPtr == NULL) {
- throw std::bad_alloc();
- }
-#endif
-
- Size = amount;
-}
-
-void TMemoryLog::TMMapArea::MUnmap() {
- if (BufPtr == nullptr) {
- return;
- }
-
-#if defined(_unix_)
- int result = ::munmap(BufPtr, Size);
- Y_ABORT_UNLESS(result == 0);
-
-#elif defined(_win_)
- BOOL result = ::UnmapViewOfFile(BufPtr);
- Y_ABORT_UNLESS(result != 0);
-
- result = ::CloseHandle(Mapping);
- Y_ABORT_UNLESS(result != 0);
-
- Mapping = 0;
-#endif
-
- BufPtr = nullptr;
- Size = 0;
-}
diff --git a/library/cpp/actors/memory_log/ya.make b/library/cpp/actors/memory_log/ya.make
deleted file mode 100644
index ae766a5464..0000000000
--- a/library/cpp/actors/memory_log/ya.make
+++ /dev/null
@@ -1,15 +0,0 @@
-LIBRARY()
-
-SRCS(
- memlog.cpp
- memlog.h
- mmap.cpp
-)
-
-PEERDIR(
- library/cpp/threading/queue
- contrib/libs/linuxvdso
- library/cpp/deprecated/atomic
-)
-
-END()
diff --git a/library/cpp/actors/prof/CMakeLists.darwin-arm64.txt b/library/cpp/actors/prof/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index c641cf5a52..0000000000
--- a/library/cpp/actors/prof/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-prof)
-target_link_libraries(cpp-actors-prof PUBLIC
- contrib-libs-cxxsupp
- yutil
- libs-tcmalloc-malloc_extension
- library-cpp-charset
- cpp-containers-atomizer
-)
-target_sources(cpp-actors-prof PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tag.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tcmalloc.cpp
-)
diff --git a/library/cpp/actors/prof/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/prof/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index c641cf5a52..0000000000
--- a/library/cpp/actors/prof/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-prof)
-target_link_libraries(cpp-actors-prof PUBLIC
- contrib-libs-cxxsupp
- yutil
- libs-tcmalloc-malloc_extension
- library-cpp-charset
- cpp-containers-atomizer
-)
-target_sources(cpp-actors-prof PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tag.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tcmalloc.cpp
-)
diff --git a/library/cpp/actors/prof/CMakeLists.linux-aarch64.txt b/library/cpp/actors/prof/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index fa76970f57..0000000000
--- a/library/cpp/actors/prof/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-prof)
-target_link_libraries(cpp-actors-prof PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- libs-tcmalloc-malloc_extension
- library-cpp-charset
- cpp-containers-atomizer
-)
-target_sources(cpp-actors-prof PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tag.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tcmalloc.cpp
-)
diff --git a/library/cpp/actors/prof/CMakeLists.linux-x86_64.txt b/library/cpp/actors/prof/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index fa76970f57..0000000000
--- a/library/cpp/actors/prof/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-prof)
-target_link_libraries(cpp-actors-prof PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- libs-tcmalloc-malloc_extension
- library-cpp-charset
- cpp-containers-atomizer
-)
-target_sources(cpp-actors-prof PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tag.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tcmalloc.cpp
-)
diff --git a/library/cpp/actors/prof/CMakeLists.txt b/library/cpp/actors/prof/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/prof/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/prof/CMakeLists.windows-x86_64.txt b/library/cpp/actors/prof/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index c641cf5a52..0000000000
--- a/library/cpp/actors/prof/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,22 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-prof)
-target_link_libraries(cpp-actors-prof PUBLIC
- contrib-libs-cxxsupp
- yutil
- libs-tcmalloc-malloc_extension
- library-cpp-charset
- cpp-containers-atomizer
-)
-target_sources(cpp-actors-prof PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tag.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/tcmalloc.cpp
-)
diff --git a/library/cpp/actors/prof/tag.cpp b/library/cpp/actors/prof/tag.cpp
deleted file mode 100644
index 99248a135f..0000000000
--- a/library/cpp/actors/prof/tag.cpp
+++ /dev/null
@@ -1,132 +0,0 @@
-#include "tag.h"
-#include "tcmalloc.h"
-
-#include <library/cpp/charset/ci_string.h>
-#include <library/cpp/containers/atomizer/atomizer.h>
-#include <library/cpp/malloc/api/malloc.h>
-
-#if defined(PROFILE_MEMORY_ALLOCATIONS)
-#include <library/cpp/lfalloc/dbg_info/dbg_info.h>
-#include <library/cpp/ytalloc/api/ytalloc.h>
-#include <library/cpp/yt/memory/memory_tag.h>
-#endif
-
-#include <util/generic/singleton.h>
-#include <util/generic/string.h>
-#include <util/generic/vector.h>
-#include <util/system/mutex.h>
-#include <library/cpp/actors/util/local_process_key.h>
-#include <library/cpp/actors/actor_type/index_constructor.h>
-
-namespace NProfiling {
- class TStringAtoms {
- private:
- TMutex Mutex;
- atomizer<ci_hash, ci_equal_to> Tags;
-
- public:
- static TStringAtoms& Instance() {
- return *Singleton<TStringAtoms>();
- }
-
- ui32 MakeTag(const char* s) {
- Y_ABORT_UNLESS(s);
- with_lock (Mutex) {
- return Tags.string_to_atom(s);
- }
- }
-
- ui32 MakeTags(const TVector<const char*>& ss) {
- Y_ABORT_UNLESS(ss);
- with_lock (Mutex) {
- ui32 baseTag = Tags.string_to_atom(ss[0]);
- ui32 nextTag = baseTag + 1;
- for (auto i = ss.begin() + 1; i != ss.end(); ++i, ++nextTag) {
- Y_ABORT_UNLESS(*i);
- ui32 ctag = Tags.string_to_atom(*i);
- Y_ABORT_UNLESS(ctag == nextTag);
- }
- return baseTag;
- }
- }
-
- const char* GetTag(ui32 tag) const {
- with_lock (Mutex) {
- return Tags.get_atom_name(tag);
- }
- }
-
- size_t GetTagsCount() const {
- with_lock (Mutex) {
- return Tags.size();
- }
- }
- };
-
- ui32 MakeTag(const char* s) {
- return TStringAtoms::Instance().MakeTag(s);
- }
-
- ui32 MakeTags(const TVector<const char*>& ss) {
- return TStringAtoms::Instance().MakeTags(ss);
- }
-
- const char* GetTag(ui32 tag) {
- return TStringAtoms::Instance().GetTag(tag);
- }
-
- size_t GetTagsCount() {
- return TStringAtoms::Instance().GetTagsCount();
- }
-
- static ui32 SetThreadAllocTag_Default(ui32 tag) {
- Y_UNUSED(tag);
- return 0;
- }
-
-#if defined(PROFILE_MEMORY_ALLOCATIONS)
- static ui32 SetThreadAllocTag_YT(ui32 tag) {
- auto prev = NYT::GetCurrentMemoryTag();
- NYT::SetCurrentMemoryTag(tag);
- return prev;
- }
-
- static TSetThreadAllocTag* SetThreadAllocTagFn() {
- const auto& info = NMalloc::MallocInfo();
-
- TStringBuf name(info.Name);
- if (name.StartsWith("lf")) {
- return (TSetThreadAllocTag*)NAllocDbg::SetThreadAllocTag;
- } else if (name.StartsWith("yt")) {
- return SetThreadAllocTag_YT;
- } else if (name.StartsWith("tc")) {
- return SetTCMallocThreadAllocTag;
- } else {
- return SetThreadAllocTag_Default;
- }
- }
-#else
- static TSetThreadAllocTag* SetThreadAllocTagFn() {
- const auto& info = NMalloc::MallocInfo();
-
- TStringBuf name(info.Name);
- if (name.StartsWith("tc")) {
- return SetTCMallocThreadAllocTag;
- } else {
- return SetThreadAllocTag_Default;
- }
- }
-#endif
-
- TSetThreadAllocTag* SetThreadAllocTag = SetThreadAllocTagFn();
-}
-
-TMemoryProfileGuard::TMemoryProfileGuard(const TString& id)
- : Id(id)
-{
- NProfiling::TMemoryTagScope::Reset(TLocalProcessKeyState<NActors::TActorActivityTag>::GetInstance().Register(Id + "-Start"));
-}
-
-TMemoryProfileGuard::~TMemoryProfileGuard() {
- NProfiling::TMemoryTagScope::Reset(TLocalProcessKeyState<NActors::TActorActivityTag>::GetInstance().Register(Id + "-Finish"));
-}
diff --git a/library/cpp/actors/prof/tag.h b/library/cpp/actors/prof/tag.h
deleted file mode 100644
index 1624d9d1e0..0000000000
--- a/library/cpp/actors/prof/tag.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#pragma once
-
-#include <util/generic/string.h>
-#include <util/generic/noncopyable.h>
-#include <util/generic/vector.h>
-
-/*
- Common registry for tagging memory profiler.
- Register a new tag with MakeTag using a unique string.
- Use registered tags with SetThreadAllocTag function in allocator API.
-*/
-
-namespace NProfiling {
- ui32 MakeTag(const char* s);
-
- // Make only unique tags. Y_ABORT_UNLESS inside.
- ui32 MakeTags(const TVector<const char*>& ss);
-
- const char* GetTag(ui32 tag);
- size_t GetTagsCount();
-
- using TSetThreadAllocTag = ui32(ui32 tag);
- extern TSetThreadAllocTag* SetThreadAllocTag;
-
- class TMemoryTagScope {
- public:
- explicit TMemoryTagScope(ui32 tag)
- : RestoreTag(SetThreadAllocTag(tag))
- {
- }
-
- explicit TMemoryTagScope(const char* tagName) {
- ui32 newTag = MakeTag(tagName);
- RestoreTag = SetThreadAllocTag(newTag);
- }
-
- TMemoryTagScope(TMemoryTagScope&& move)
- : RestoreTag(move.RestoreTag)
- , Released(move.Released)
- {
- move.Released = true;
- }
-
- TMemoryTagScope& operator=(TMemoryTagScope&& move) {
- RestoreTag = move.RestoreTag;
- Released = move.Released;
- move.Released = true;
- return *this;
- }
-
- static void Reset(ui32 tag) {
- SetThreadAllocTag(tag);
- }
-
- void Release() {
- if (!Released) {
- SetThreadAllocTag(RestoreTag);
- Released = true;
- }
- }
-
- ~TMemoryTagScope() {
- if (!Released) {
- SetThreadAllocTag(RestoreTag);
- }
- }
-
- protected:
- TMemoryTagScope(const TMemoryTagScope&) = delete;
- void operator=(const TMemoryTagScope&) = delete;
-
- ui32 RestoreTag = 0;
- bool Released = false;
- };
-}
-
-class TMemoryProfileGuard: TNonCopyable {
-private:
- const TString Id;
-public:
- TMemoryProfileGuard(const TString& id);
- ~TMemoryProfileGuard();
-
-};
diff --git a/library/cpp/actors/prof/tcmalloc.cpp b/library/cpp/actors/prof/tcmalloc.cpp
deleted file mode 100644
index 3d4f203dbb..0000000000
--- a/library/cpp/actors/prof/tcmalloc.cpp
+++ /dev/null
@@ -1,32 +0,0 @@
-#include "tcmalloc.h"
-
-#include <contrib/libs/tcmalloc/tcmalloc/malloc_extension.h>
-
-namespace NProfiling {
-
-static thread_local ui32 AllocationTag = 0;
-
-static struct TInitTCMallocCallbacks {
- static void* CreateTag() {
- return reinterpret_cast<void*>(AllocationTag);
- }
- static void* CopyTag(void* tag) {
- return tag;
- }
- static void DestroyTag(void* tag) {
- Y_UNUSED(tag);
- }
-
- TInitTCMallocCallbacks() {
- tcmalloc::MallocExtension::SetSampleUserDataCallbacks(
- CreateTag, CopyTag, DestroyTag);
- }
-} InitTCMallocCallbacks;
-
-ui32 SetTCMallocThreadAllocTag(ui32 tag) {
- ui32 prev = AllocationTag;
- AllocationTag = tag;
- return prev;
-}
-
-}
diff --git a/library/cpp/actors/prof/tcmalloc.h b/library/cpp/actors/prof/tcmalloc.h
deleted file mode 100644
index 659fb4eaf3..0000000000
--- a/library/cpp/actors/prof/tcmalloc.h
+++ /dev/null
@@ -1,9 +0,0 @@
-#pragma once
-
-#include <util/generic/fwd.h>
-
-namespace NProfiling {
-
-ui32 SetTCMallocThreadAllocTag(ui32 tag);
-
-}
diff --git a/library/cpp/actors/prof/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/prof/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 44995c4d4b..0000000000
--- a/library/cpp/actors/prof/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,66 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-prof-ut)
-target_include_directories(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof
-)
-target_link_libraries(library-cpp-actors-prof-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-prof
-)
-target_link_options(library-cpp-actors-prof-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/ut/tag_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-prof-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-prof-ut
- TEST_TARGET
- library-cpp-actors-prof-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-prof-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-prof-ut)
diff --git a/library/cpp/actors/prof/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/prof/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 0ec56d8762..0000000000
--- a/library/cpp/actors/prof/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-prof-ut)
-target_include_directories(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof
-)
-target_link_libraries(library-cpp-actors-prof-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-prof
-)
-target_link_options(library-cpp-actors-prof-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/ut/tag_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-prof-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-prof-ut
- TEST_TARGET
- library-cpp-actors-prof-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-prof-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-prof-ut)
diff --git a/library/cpp/actors/prof/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/prof/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 591055b744..0000000000
--- a/library/cpp/actors/prof/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,70 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-prof-ut)
-target_include_directories(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof
-)
-target_link_libraries(library-cpp-actors-prof-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-prof
-)
-target_link_options(library-cpp-actors-prof-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/ut/tag_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-prof-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-prof-ut
- TEST_TARGET
- library-cpp-actors-prof-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-prof-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-prof-ut)
diff --git a/library/cpp/actors/prof/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/prof/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 7c84eda1b0..0000000000
--- a/library/cpp/actors/prof/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,72 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-prof-ut)
-target_include_directories(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof
-)
-target_link_libraries(library-cpp-actors-prof-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-prof
-)
-target_link_options(library-cpp-actors-prof-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/ut/tag_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-prof-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-prof-ut
- TEST_TARGET
- library-cpp-actors-prof-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-prof-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-prof-ut)
diff --git a/library/cpp/actors/prof/ut/CMakeLists.txt b/library/cpp/actors/prof/ut/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/prof/ut/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/prof/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/prof/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 3165130380..0000000000
--- a/library/cpp/actors/prof/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,60 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-prof-ut)
-target_include_directories(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof
-)
-target_link_libraries(library-cpp-actors-prof-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-prof
-)
-target_sources(library-cpp-actors-prof-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/prof/ut/tag_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-prof-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-prof-ut
- TEST_TARGET
- library-cpp-actors-prof-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-prof-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-prof-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-prof-ut)
diff --git a/library/cpp/actors/prof/ut/tag_ut.cpp b/library/cpp/actors/prof/ut/tag_ut.cpp
deleted file mode 100644
index accf3921ab..0000000000
--- a/library/cpp/actors/prof/ut/tag_ut.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-#include "tag.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-
-using namespace NProfiling;
-
-class TAtomTagsTest: public TTestBase {
-private:
- UNIT_TEST_SUITE(TAtomTagsTest);
- UNIT_TEST(Test_MakeTag);
- UNIT_TEST(Test_Make2Tags);
- UNIT_TEST(Test_MakeTagTwice);
-
- UNIT_TEST(Test_MakeAndGetTag);
-
- UNIT_TEST(Test_MakeVector);
- UNIT_TEST_SUITE_END();
-
-public:
- void Test_MakeTag();
- void Test_Make2Tags();
- void Test_MakeTagTwice();
- void Test_MakeAndGetTag();
- void Test_MakeVector();
-};
-
-UNIT_TEST_SUITE_REGISTRATION(TAtomTagsTest);
-
-void TAtomTagsTest::Test_MakeTag() {
- ui32 tag = MakeTag("a tag");
- UNIT_ASSERT(tag != 0);
-}
-
-void TAtomTagsTest::Test_Make2Tags() {
- ui32 tag1 = MakeTag("a tag 1");
- ui32 tag2 = MakeTag("a tag 2");
- UNIT_ASSERT(tag1 != 0);
- UNIT_ASSERT(tag2 != 0);
- UNIT_ASSERT(tag1 != tag2);
-}
-
-void TAtomTagsTest::Test_MakeTagTwice() {
- ui32 tag1 = MakeTag("a tag twice");
- ui32 tag2 = MakeTag("a tag twice");
- UNIT_ASSERT(tag1 != 0);
- UNIT_ASSERT(tag1 == tag2);
-}
-
-void TAtomTagsTest::Test_MakeAndGetTag() {
- const char* makeStr = "tag to get";
- ui32 tag = MakeTag(makeStr);
- const char* tagStr = GetTag(tag);
- UNIT_ASSERT_STRINGS_EQUAL(makeStr, tagStr);
-}
-
-void TAtomTagsTest::Test_MakeVector() {
- TVector<const char*> strs = {
- "vector tag 0",
- "vector tag 1",
- "vector tag 3",
- "vector tag 4"};
- ui32 baseTag = MakeTags(strs);
- UNIT_ASSERT(baseTag != 0);
- for (ui32 i = 0; i < strs.size(); ++i) {
- const char* str = GetTag(baseTag + i);
- UNIT_ASSERT_STRINGS_EQUAL(str, strs[i]);
- }
-}
diff --git a/library/cpp/actors/prof/ut/ya.make b/library/cpp/actors/prof/ut/ya.make
deleted file mode 100644
index e439856698..0000000000
--- a/library/cpp/actors/prof/ut/ya.make
+++ /dev/null
@@ -1,7 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/prof)
-
-SRCS(
- tag_ut.cpp
-)
-
-END()
diff --git a/library/cpp/actors/prof/ya.make b/library/cpp/actors/prof/ya.make
deleted file mode 100644
index 4bae10a443..0000000000
--- a/library/cpp/actors/prof/ya.make
+++ /dev/null
@@ -1,28 +0,0 @@
-LIBRARY()
-
-SRCS(
- tag.cpp
- tcmalloc.cpp
-)
-
-PEERDIR(
- contrib/libs/tcmalloc/malloc_extension
- library/cpp/charset
- library/cpp/containers/atomizer
-)
-
-IF (PROFILE_MEMORY_ALLOCATIONS)
- CFLAGS(-DPROFILE_MEMORY_ALLOCATIONS)
- PEERDIR(
- library/cpp/malloc/api
- library/cpp/lfalloc/dbg_info
- library/cpp/ytalloc/api
- library/cpp/yt/memory
- )
-ENDIF()
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
diff --git a/library/cpp/actors/protos/CMakeLists.darwin-arm64.txt b/library/cpp/actors/protos/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index be3257b127..0000000000
--- a/library/cpp/actors/protos/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(cpp-actors-protos)
-target_link_libraries(cpp-actors-protos PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(cpp-actors-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/actors.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/interconnect.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/services_common.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/unittests.proto
-)
-target_proto_addincls(cpp-actors-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(cpp-actors-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/protos/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/protos/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index be3257b127..0000000000
--- a/library/cpp/actors/protos/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(cpp-actors-protos)
-target_link_libraries(cpp-actors-protos PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(cpp-actors-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/actors.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/interconnect.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/services_common.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/unittests.proto
-)
-target_proto_addincls(cpp-actors-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(cpp-actors-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/protos/CMakeLists.linux-aarch64.txt b/library/cpp/actors/protos/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index a9da706400..0000000000
--- a/library/cpp/actors/protos/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(cpp-actors-protos)
-target_link_libraries(cpp-actors-protos PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(cpp-actors-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/actors.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/interconnect.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/services_common.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/unittests.proto
-)
-target_proto_addincls(cpp-actors-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(cpp-actors-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/protos/CMakeLists.linux-x86_64.txt b/library/cpp/actors/protos/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index a9da706400..0000000000
--- a/library/cpp/actors/protos/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,83 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(cpp-actors-protos)
-target_link_libraries(cpp-actors-protos PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(cpp-actors-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/actors.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/interconnect.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/services_common.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/unittests.proto
-)
-target_proto_addincls(cpp-actors-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(cpp-actors-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/protos/CMakeLists.txt b/library/cpp/actors/protos/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/protos/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/protos/CMakeLists.windows-x86_64.txt b/library/cpp/actors/protos/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index be3257b127..0000000000
--- a/library/cpp/actors/protos/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,82 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-get_built_tool_path(
- TOOL_protoc_bin
- TOOL_protoc_dependency
- contrib/tools/protoc/bin
- protoc
-)
-get_built_tool_path(
- TOOL_cpp_styleguide_bin
- TOOL_cpp_styleguide_dependency
- contrib/tools/protoc/plugins/cpp_styleguide
- cpp_styleguide
-)
-
-add_library(cpp-actors-protos)
-target_link_libraries(cpp-actors-protos PUBLIC
- contrib-libs-cxxsupp
- yutil
- contrib-libs-protobuf
-)
-target_proto_messages(cpp-actors-protos PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/actors.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/interconnect.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/services_common.proto
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/protos/unittests.proto
-)
-target_proto_addincls(cpp-actors-protos
- ./
- ${CMAKE_SOURCE_DIR}/
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
- ${CMAKE_BINARY_DIR}
- ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
-)
-target_proto_outs(cpp-actors-protos
- --cpp_out=${CMAKE_BINARY_DIR}/
- --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
-)
diff --git a/library/cpp/actors/protos/actors.proto b/library/cpp/actors/protos/actors.proto
deleted file mode 100644
index 329eb998df..0000000000
--- a/library/cpp/actors/protos/actors.proto
+++ /dev/null
@@ -1,41 +0,0 @@
-package NActorsProto;
-option java_package = "ru.yandex.kikimr.proto";
-option java_outer_classname = "NActorsBaseProto";
-
-message TActorId {
- required fixed64 RawX1 = 1;
- required fixed64 RawX2 = 2;
-}
-
-message TTraceId {
- optional bytes Data = 1;
-}
-
-message TCallbackException {
- required TActorId ActorId = 1;
- required string ExceptionMessage = 2;
-}
-
-message TRemoteHttpInfo {
- message TQueryParam {
- optional string Key = 1;
- optional string Value = 2;
- }
-
- message THeader {
- optional string Name = 1;
- optional string Value = 2;
- }
-
- optional uint32 Method = 1; // HTTP_METHOD enum
- optional string Path = 2;
- repeated TQueryParam QueryParams = 3;
- repeated TQueryParam PostParams = 4;
- optional bytes PostContent = 8;
- repeated THeader Headers = 9;
- optional string RemoteAddr = 7;
-
- // for compatibility reasons (incorrect field types merged in 21-4)
- reserved 5;
- reserved 6;
-}
diff --git a/library/cpp/actors/protos/interconnect.proto b/library/cpp/actors/protos/interconnect.proto
deleted file mode 100644
index 0e88f3bce5..0000000000
--- a/library/cpp/actors/protos/interconnect.proto
+++ /dev/null
@@ -1,133 +0,0 @@
-import "library/cpp/actors/protos/actors.proto";
-import "google/protobuf/descriptor.proto";
-
-package NActorsInterconnect;
-option java_package = "ru.yandex.kikimr.proto";
-
-message TEvResolveNode {
- optional uint32 NodeId = 1;
- optional uint64 Deadline = 2;
-}
-
-message TEvNodeInfo {
- optional uint32 NodeId = 1;
- optional string Address = 2;
- optional uint32 Port = 3;
-}
-
-extend google.protobuf.FieldOptions {
- optional string PrintName = 50376;
-}
-
-message TNodeLocation {
- // compatibility section -- will be removed in future versions
- optional uint32 DataCenterNum = 1 [deprecated=true];
- optional uint32 RoomNum = 2 [deprecated=true];
- optional uint32 RackNum = 3 [deprecated=true];
- optional uint32 BodyNum = 4 [deprecated=true];
- optional uint32 Body = 100500 [deprecated=true]; // for compatibility with WalleLocation
-
- optional string DataCenter = 10 [(PrintName) = "DC"];
- optional string Module = 20 [(PrintName) = "M"];
- optional string Rack = 30 [(PrintName) = "R"];
- optional string Unit = 40 [(PrintName) = "U"];
-}
-
-message TClusterUUIDs {
- optional string ClusterUUID = 1;
- repeated string AcceptUUID = 2;
-}
-
-message TScopeId {
- optional fixed64 X1 = 1;
- optional fixed64 X2 = 2;
-}
-
-message THandshakeRequest {
- required uint64 Protocol = 1;
-
- required uint64 ProgramPID = 2;
- required uint64 ProgramStartTime = 3;
- required uint64 Serial = 4;
-
- required uint32 ReceiverNodeId = 5;
- required string SenderActorId = 6;
-
- optional string SenderHostName = 7;
- optional string ReceiverHostName = 8;
- optional string UUID = 9;
- optional TClusterUUIDs ClusterUUIDs = 13;
-
- optional bytes Ballast = 10;
-
- optional string VersionTag = 11;
- repeated string AcceptedVersionTags = 12;
-
- optional bool RequireEncryption = 14;
- optional TScopeId ClientScopeId = 15;
-
- optional string Cookie = 16;
- optional bool DoCheckCookie = 17;
-
- optional bool RequestModernFrame = 18;
- optional bool RequestAuthOnly = 19;
- optional bool RequestExtendedTraceFmt = 20;
- optional bool RequestExternalDataChannel = 21;
- optional bool RequestXxhash = 24;
- optional bool RequestXdcShuffle = 25;
-
- optional bytes CompatibilityInfo = 22;
-
- optional bytes HandshakeId = 23;
-}
-
-message THandshakeSuccess {
- required uint64 Protocol = 1;
-
- required uint64 ProgramPID = 2;
- required uint64 ProgramStartTime = 3;
- required uint64 Serial = 4;
-
- required string SenderActorId = 5;
-
- optional string VersionTag = 6;
- repeated string AcceptedVersionTags = 7;
-
- optional TClusterUUIDs ClusterUUIDs = 8;
-
- optional bool StartEncryption = 9;
- optional TScopeId ServerScopeId = 10;
-
- optional bool UseModernFrame = 11;
- optional bool AuthOnly = 12;
- optional bool UseExtendedTraceFmt = 13;
- optional bool UseExternalDataChannel = 14;
- optional bool UseXxhash = 16;
- optional bool UseXdcShuffle = 17;
-
- optional bytes CompatibilityInfo = 15;
-}
-
-message THandshakeReply {
- optional THandshakeSuccess Success = 1;
- optional string ErrorExplaination = 2;
- optional bool CookieCheckResult = 3;
-}
-
-message TEvLoadMessage {
- message THop {
- optional NActorsProto.TActorId NextHop = 1; // if zero, then the payload is trimmed out of the message
- }
-
- repeated THop Hops = 1; // the route for the message
- optional string Id = 3; // message identifier
- optional bytes Payload = 4; // data payload
-}
-
-message TContinuationParams {
- optional bytes HandshakeId = 1;
-}
-
-message TExternalDataChannelParams {
- optional bytes HandshakeId = 1;
-}
diff --git a/library/cpp/actors/protos/services_common.proto b/library/cpp/actors/protos/services_common.proto
deleted file mode 100644
index 1191859f03..0000000000
--- a/library/cpp/actors/protos/services_common.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-package NActorsServices;
-option java_package = "ru.yandex.kikimr.proto";
-
-// 0-255 range
-enum EServiceCommon {
- // WARN: This must be the smallest value in the enumeration
-
- GLOBAL = 0;
- INTERCONNECT = 1;
- TEST = 2;
- PROTOCOLS = 3;
- INTERCONNECT_SPEED_TEST = 4;
- INTERCONNECT_STATUS = 5;
- INTERCONNECT_NETWORK = 6;
- INTERCONNECT_SESSION = 7;
- HTTP = 8;
- LOGGER = 9;
-
- // This value is reserved boundary. Is must not be aliased with any values
- // TODO: use reseved values upon protobuf update
- // COMMON_END = 256;
-};
diff --git a/library/cpp/actors/protos/unittests.proto b/library/cpp/actors/protos/unittests.proto
deleted file mode 100644
index 4b1af85e01..0000000000
--- a/library/cpp/actors/protos/unittests.proto
+++ /dev/null
@@ -1,20 +0,0 @@
-option cc_enable_arenas = true;
-
-message TSimple {
- required string Str1 = 1;
- optional string Str2 = 2;
- optional uint64 Number1 = 3;
-}
-
-message TBigMessage {
- repeated TSimple Simples = 1;
- repeated string ManyStr = 2;
- optional string OneMoreStr = 3;
- optional uint64 YANumber = 4;
-}
-
-message TMessageWithPayload {
- optional string Meta = 1;
- repeated uint32 PayloadId = 2;
- repeated bytes SomeData = 3;
-}
diff --git a/library/cpp/actors/protos/ya.make b/library/cpp/actors/protos/ya.make
deleted file mode 100644
index c9139191ec..0000000000
--- a/library/cpp/actors/protos/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-PROTO_LIBRARY()
-
-SRCS(
- actors.proto
- interconnect.proto
- services_common.proto
- unittests.proto
-)
-
-EXCLUDE_TAGS(GO_PROTO)
-
-END()
diff --git a/library/cpp/actors/testlib/CMakeLists.darwin-arm64.txt b/library/cpp/actors/testlib/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 4f1c8d01a2..0000000000
--- a/library/cpp/actors/testlib/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-testlib)
-target_link_libraries(cpp-actors-testlib PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- actors-interconnect-mock
- cpp-actors-protos
- library-cpp-random_provider
- library-cpp-time_provider
-)
-target_sources(cpp-actors-testlib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/test_runtime.cpp
-)
diff --git a/library/cpp/actors/testlib/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/testlib/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 4f1c8d01a2..0000000000
--- a/library/cpp/actors/testlib/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-testlib)
-target_link_libraries(cpp-actors-testlib PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- actors-interconnect-mock
- cpp-actors-protos
- library-cpp-random_provider
- library-cpp-time_provider
-)
-target_sources(cpp-actors-testlib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/test_runtime.cpp
-)
diff --git a/library/cpp/actors/testlib/CMakeLists.linux-aarch64.txt b/library/cpp/actors/testlib/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 40a5c9c26f..0000000000
--- a/library/cpp/actors/testlib/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-testlib)
-target_link_libraries(cpp-actors-testlib PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- actors-interconnect-mock
- cpp-actors-protos
- library-cpp-random_provider
- library-cpp-time_provider
-)
-target_sources(cpp-actors-testlib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/test_runtime.cpp
-)
diff --git a/library/cpp/actors/testlib/CMakeLists.linux-x86_64.txt b/library/cpp/actors/testlib/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 40a5c9c26f..0000000000
--- a/library/cpp/actors/testlib/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,24 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-testlib)
-target_link_libraries(cpp-actors-testlib PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- actors-interconnect-mock
- cpp-actors-protos
- library-cpp-random_provider
- library-cpp-time_provider
-)
-target_sources(cpp-actors-testlib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/test_runtime.cpp
-)
diff --git a/library/cpp/actors/testlib/CMakeLists.txt b/library/cpp/actors/testlib/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/testlib/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/testlib/CMakeLists.windows-x86_64.txt b/library/cpp/actors/testlib/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 4f1c8d01a2..0000000000
--- a/library/cpp/actors/testlib/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,23 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-testlib)
-target_link_libraries(cpp-actors-testlib PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- actors-interconnect-mock
- cpp-actors-protos
- library-cpp-random_provider
- library-cpp-time_provider
-)
-target_sources(cpp-actors-testlib PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/test_runtime.cpp
-)
diff --git a/library/cpp/actors/testlib/decorator_ut.cpp b/library/cpp/actors/testlib/decorator_ut.cpp
deleted file mode 100644
index fe5c769290..0000000000
--- a/library/cpp/actors/testlib/decorator_ut.cpp
+++ /dev/null
@@ -1,327 +0,0 @@
-#include "test_runtime.h"
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/testing/unittest/registar.h>
-
-
-using namespace NActors;
-
-
-Y_UNIT_TEST_SUITE(TesTTestDecorator) {
-
- bool IsVerbose = false;
- void Write(TString msg) {
- if (IsVerbose) {
- Cerr << (TStringBuilder() << msg << Endl);
- }
- }
-
- struct TDyingChecker : TTestDecorator {
- TActorId MasterId;
-
- TDyingChecker(THolder<IActor> &&actor, TActorId masterId)
- : TTestDecorator(std::move(actor))
- , MasterId(masterId)
- {
- Write("TDyingChecker::Construct\n");
- }
-
- virtual ~TDyingChecker() {
- Write("TDyingChecker::~TDyingChecker");
- TActivationContext::Send(new IEventHandle(MasterId, SelfId(), new TEvents::TEvPing()));
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle> &/*ev*/, const TActorContext &/*ctx*/) override {
- Write("TDyingChecker::DoBeforeReceiving");
- return true;
- }
-
- void DoAfterReceiving(const TActorContext &/*ctx*/) override {
- Write("TDyingChecker::DoAfterReceiving");
- }
- };
-
- struct TTestMasterActor : TActorBootstrapped<TTestMasterActor> {
- friend TActorBootstrapped<TTestMasterActor>;
-
- TSet<TActorId> ActorIds;
- TVector<THolder<IActor>> Actors;
- TActorId EdgeActor;
-
- TTestMasterActor(TVector<THolder<IActor>> &&actors, TActorId edgeActor)
- : TActorBootstrapped()
- , Actors(std::move(actors))
- , EdgeActor(edgeActor)
- {
- }
-
- void Bootstrap()
- {
- Write("Start master actor");
- for (auto &actor : Actors) {
- THolder<IActor> decaratedActor = MakeHolder<TDyingChecker>(std::move(actor), SelfId());
- TActorId id = Register(decaratedActor.Release());
- Write("Register test actor");
- UNIT_ASSERT(ActorIds.insert(id).second);
- }
- Become(&TTestMasterActor::State);
- }
-
- STATEFN(State) {
- auto it = ActorIds.find(ev->Sender);
- UNIT_ASSERT(it != ActorIds.end());
- Write("End test actor");
- ActorIds.erase(it);
- if (!ActorIds) {
- Send(EdgeActor, new TEvents::TEvPing());
- PassAway();
- }
- }
- };
-
- enum {
- Begin = EventSpaceBegin(TEvents::ES_USERSPACE),
- EvWords
- };
-
- struct TEvWords : TEventLocal<TEvWords, EvWords> {
- TVector<TString> Words;
-
- TEvWords()
- : TEventLocal()
- {
- }
- };
-
- struct TFizzBuzzToFooBar : TTestDecorator {
- TFizzBuzzToFooBar(THolder<IActor> &&actor)
- : TTestDecorator(std::move(actor))
- {
- }
-
- bool DoBeforeSending(TAutoPtr<IEventHandle> &ev) override {
- if (ev->Type == TEvents::TSystem::Bootstrap) {
- return true;
- }
- Write("TFizzBuzzToFooBar::DoBeforeSending");
- TEventHandle<TEvWords> *handle = reinterpret_cast<TEventHandle<TEvWords>*>(ev.Get());
- UNIT_ASSERT(handle);
- TEvWords *event = handle->Get();
- TVector<TString> &words = event->Words;
- TStringBuilder wordsMsg;
- for (auto &word : words) {
- wordsMsg << word << ';';
- }
- Write(TStringBuilder() << "Send# " << wordsMsg);
- if (words.size() == 2 && words[0] == "Fizz" && words[1] == "Buzz") {
- words[0] = "Foo";
- words[1] = "Bar";
- }
- return true;
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle> &/*ev*/, const TActorContext &/*ctx*/) override {
- Write("TFizzBuzzToFooBar::DoBeforeReceiving");
- return true;
- }
-
- void DoAfterReceiving(const TActorContext &/*ctx*/) override {
- Write("TFizzBuzzToFooBar::DoAfterReceiving");
- }
- };
-
- struct TWordEraser : TTestDecorator {
- TString ErasingWord;
-
- TWordEraser(THolder<IActor> &&actor, TString word)
- : TTestDecorator(std::move(actor))
- , ErasingWord(word)
- {
- }
-
- bool DoBeforeSending(TAutoPtr<IEventHandle> &ev) override {
- if (ev->Type == TEvents::TSystem::Bootstrap) {
- return true;
- }
- Write("TWordEraser::DoBeforeSending");
- TEventHandle<TEvWords> *handle = reinterpret_cast<TEventHandle<TEvWords>*>(ev.Get());
- UNIT_ASSERT(handle);
- TEvWords *event = handle->Get();
- TVector<TString> &words = event->Words;
- auto it = Find(words.begin(), words.end(), ErasingWord);
- if (it != words.end()) {
- words.erase(it);
- }
- return true;
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle> &/*ev*/, const TActorContext &/*ctx*/) override {
- Write("TWordEraser::DoBeforeReceiving");
- return true;
- }
-
- void DoAfterReceiving(const TActorContext &/*ctx*/) override {
- Write("TWordEraser::DoAfterReceiving");
- }
- };
-
- struct TWithoutWordsDroper : TTestDecorator {
- TWithoutWordsDroper(THolder<IActor> &&actor)
- : TTestDecorator(std::move(actor))
- {
- }
-
- bool DoBeforeSending(TAutoPtr<IEventHandle> &ev) override {
- if (ev->Type == TEvents::TSystem::Bootstrap) {
- return true;
- }
- Write("TWithoutWordsDroper::DoBeforeSending");
- TEventHandle<TEvWords> *handle = reinterpret_cast<TEventHandle<TEvWords>*>(ev.Get());
- UNIT_ASSERT(handle);
- TEvWords *event = handle->Get();
- return bool(event->Words);
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle> &/*ev*/, const TActorContext &/*ctx*/) override {
- Write("TWithoutWordsDroper::DoBeforeReceiving");
- return true;
- }
-
- void DoAfterReceiving(const TActorContext &/*ctx*/) override {
- Write("TWithoutWordsDroper::DoAfterReceiving");
- }
- };
-
- struct TFooBarReceiver : TActorBootstrapped<TFooBarReceiver> {
- TActorId MasterId;
- ui64 Counter = 0;
-
- TFooBarReceiver(TActorId masterId)
- : TActorBootstrapped()
- , MasterId(masterId)
- {
- }
-
- void Bootstrap()
- {
- Become(&TFooBarReceiver::State);
- }
-
- STATEFN(State) {
- TEventHandle<TEvWords> *handle = reinterpret_cast<TEventHandle<TEvWords>*>(ev.Get());
- UNIT_ASSERT(handle);
- UNIT_ASSERT(handle->Sender == MasterId);
- TEvWords *event = handle->Get();
- TVector<TString> &words = event->Words;
- UNIT_ASSERT(words.size() == 2 && words[0] == "Foo" && words[1] == "Bar");
- Write(TStringBuilder() << "Receive# " << Counter + 1 << '/' << 2);
- if (++Counter == 2) {
- PassAway();
- }
- }
- };
-
- struct TFizzBuzzSender : TActorBootstrapped<TFizzBuzzSender> {
- TActorId SlaveId;
-
- TFizzBuzzSender()
- : TActorBootstrapped()
- {
- Write("TFizzBuzzSender::Construct");
- }
-
- void Bootstrap() {
- Write("TFizzBuzzSender::Bootstrap");
- THolder<IActor> actor = MakeHolder<TFooBarReceiver>(SelfId());
- THolder<IActor> decoratedActor = MakeHolder<TDyingChecker>(std::move(actor), SelfId());
- SlaveId = Register(decoratedActor.Release());
- for (ui64 idx = 1; idx <= 30; ++idx) {
- THolder<TEvWords> ev = MakeHolder<TEvWords>();
- if (idx % 3 == 0) {
- ev->Words.push_back("Fizz");
- }
- if (idx % 5 == 0) {
- ev->Words.push_back("Buzz");
- }
- Send(SlaveId, ev.Release());
- Write("TFizzBuzzSender::Send words");
- }
- Become(&TFizzBuzzSender::State);
- }
-
- STATEFN(State) {
- UNIT_ASSERT(ev->Sender == SlaveId);
- PassAway();
- }
- };
-
- struct TCounters {
- ui64 SendedCount = 0;
- ui64 RecievedCount = 0;
- };
-
- struct TCountingDecorator : TTestDecorator {
- TCounters *Counters;
-
- TCountingDecorator(THolder<IActor> &&actor, TCounters *counters)
- : TTestDecorator(std::move(actor))
- , Counters(counters)
- {
- }
-
- bool DoBeforeSending(TAutoPtr<IEventHandle> &ev) override {
- if (ev->Type == TEvents::TSystem::Bootstrap) {
- return true;
- }
- Write("TCountingDecorator::DoBeforeSending");
- Counters->SendedCount++;
- return true;
- }
-
- bool DoBeforeReceiving(TAutoPtr<IEventHandle> &/*ev*/, const TActorContext &/*ctx*/) override {
- Write("TCountingDecorator::DoBeforeReceiving");
- Counters->RecievedCount++;
- return true;
- }
- };
-
- bool ScheduledFilterFunc(NActors::TTestActorRuntimeBase& runtime, TAutoPtr<NActors::IEventHandle>& event,
- TDuration delay, TInstant& deadline) {
- if (runtime.IsScheduleForActorEnabled(event->GetRecipientRewrite())) {
- deadline = runtime.GetTimeProvider()->Now() + delay;
- return false;
- }
- return true;
- }
-
- THolder<IActor> CreateFizzBuzzSender() {
- THolder<IActor> actor = MakeHolder<TFizzBuzzSender>();
- THolder<IActor> foobar = MakeHolder<TFizzBuzzToFooBar>(std::move(actor));
- THolder<IActor> fizzEraser = MakeHolder<TWordEraser>(std::move(foobar), "Fizz");
- THolder<IActor> buzzEraser = MakeHolder<TWordEraser>(std::move(fizzEraser), "Buzz");
- return MakeHolder<TWithoutWordsDroper>(std::move(buzzEraser));
- }
-
- Y_UNIT_TEST(Basic) {
- TTestActorRuntimeBase runtime(1, false);
-
- runtime.SetScheduledEventFilter(&ScheduledFilterFunc);
- runtime.SetEventFilter([](NActors::TTestActorRuntimeBase&, TAutoPtr<NActors::IEventHandle>&) {
- return false;
- });
- runtime.Initialize();
-
- TActorId edgeActor = runtime.AllocateEdgeActor();
- TVector<THolder<IActor>> actors(1);
- actors[0] = CreateFizzBuzzSender();
- //actors[1] = CreateFizzBuzzSender();
- THolder<IActor> testActor = MakeHolder<TTestMasterActor>(std::move(actors), edgeActor);
- Write("Start test");
- runtime.Register(testActor.Release());
-
- TAutoPtr<IEventHandle> handle;
- auto ev = runtime.GrabEdgeEventRethrow<TEvents::TEvPing>(handle);
- UNIT_ASSERT(ev);
- Write("Stop test");
- }
-}
diff --git a/library/cpp/actors/testlib/test_runtime.cpp b/library/cpp/actors/testlib/test_runtime.cpp
deleted file mode 100644
index 18e58c21de..0000000000
--- a/library/cpp/actors/testlib/test_runtime.cpp
+++ /dev/null
@@ -1,1968 +0,0 @@
-#include "test_runtime.h"
-
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/callstack.h>
-#include <library/cpp/actors/core/executor_pool_basic.h>
-#include <library/cpp/actors/core/executor_pool_io.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/scheduler_basic.h>
-#include <library/cpp/actors/util/datetime.h>
-#include <library/cpp/actors/protos/services_common.pb.h>
-#include <library/cpp/random_provider/random_provider.h>
-#include <library/cpp/actors/interconnect/interconnect.h>
-#include <library/cpp/actors/interconnect/interconnect_tcp_proxy.h>
-#include <library/cpp/actors/interconnect/interconnect_proxy_wrapper.h>
-
-#include <util/generic/maybe.h>
-#include <util/generic/bt_exception.h>
-#include <util/random/mersenne.h>
-#include <util/string/printf.h>
-#include <typeinfo>
-
-bool VERBOSE = false;
-const bool PRINT_EVENT_BODY = false;
-
-namespace {
-
- TString MakeClusterId() {
- pid_t pid = getpid();
- TStringBuilder uuid;
- uuid << "Cluster for process with id: " << pid;
- return uuid;
- }
-}
-
-namespace NActors {
- ui64 TScheduledEventQueueItem::NextUniqueId = 0;
-
- void PrintEvent(TAutoPtr<IEventHandle>& ev, const TTestActorRuntimeBase* runtime) {
- Cerr << "mailbox: " << ev->GetRecipientRewrite().Hint() << ", type: " << Sprintf("%08x", ev->GetTypeRewrite())
- << ", from " << ev->Sender.LocalId();
- TString name = runtime->GetActorName(ev->Sender);
- if (!name.empty())
- Cerr << " \"" << name << "\"";
- Cerr << ", to " << ev->GetRecipientRewrite().LocalId();
- name = runtime->GetActorName(ev->GetRecipientRewrite());
- if (!name.empty())
- Cerr << " \"" << name << "\"";
- Cerr << ", ";
- if (ev->HasEvent())
- Cerr << " : " << (PRINT_EVENT_BODY ? ev->ToString() : ev->GetTypeName());
- else if (ev->HasBuffer())
- Cerr << " : BUFFER";
- else
- Cerr << " : EMPTY";
-
- Cerr << "\n";
- }
-
- TTestActorRuntimeBase::TNodeDataBase::TNodeDataBase() {
- ActorSystemTimestamp = nullptr;
- ActorSystemMonotonic = nullptr;
- }
-
- void TTestActorRuntimeBase::TNodeDataBase::Stop() {
- if (Poller)
- Poller->Stop();
-
- if (MailboxTable) {
- for (ui32 round = 0; !MailboxTable->Cleanup(); ++round)
- Y_ABORT_UNLESS(round < 10, "cyclic event/actor spawn while trying to shutdown actorsystem stub");
- }
-
- if (ActorSystem)
- ActorSystem->Stop();
-
- ActorSystem.Destroy();
- Poller.Reset();
- }
-
- TTestActorRuntimeBase::TNodeDataBase::~TNodeDataBase() {
- Stop();
- }
-
-
- class TTestActorRuntimeBase::TEdgeActor : public TActor<TEdgeActor> {
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::TEST_ACTOR_RUNTIME;
- }
-
- TEdgeActor(TTestActorRuntimeBase* runtime)
- : TActor(&TEdgeActor::StateFunc)
- , Runtime(runtime)
- {
- }
-
- STFUNC(StateFunc) {
- TGuard<TMutex> guard(Runtime->Mutex);
- bool verbose = (Runtime->CurrentDispatchContext ? !Runtime->CurrentDispatchContext->Options->Quiet : true) && VERBOSE;
- if (Runtime->BlockedOutput.find(ev->Sender) != Runtime->BlockedOutput.end()) {
- verbose = false;
- }
-
- if (verbose) {
- Cerr << "Got event at " << TInstant::MicroSeconds(Runtime->CurrentTimestamp) << ", ";
- PrintEvent(ev, Runtime);
- }
-
- if (!Runtime->EventFilterFunc(*Runtime, ev)) {
- ui32 nodeId = ev->GetRecipientRewrite().NodeId();
- Y_ABORT_UNLESS(nodeId != 0);
- ui32 mailboxHint = ev->GetRecipientRewrite().Hint();
- Runtime->GetMailbox(nodeId, mailboxHint).Send(ev);
- Runtime->MailboxesHasEvents.Signal();
- if (verbose)
- Cerr << "Event was added to sent queue\n";
- }
- else {
- if (verbose)
- Cerr << "Event was dropped\n";
- }
- }
-
- private:
- TTestActorRuntimeBase* Runtime;
- };
-
- void TEventMailBox::Send(TAutoPtr<IEventHandle> ev) {
- IEventHandle* ptr = ev.Get();
- Y_ABORT_UNLESS(ptr);
-#ifdef DEBUG_ORDER_EVENTS
- ui64 counter = NextToSend++;
- TrackSent[ptr] = counter;
-#endif
- Sent.push_back(ev);
- }
-
- TAutoPtr<IEventHandle> TEventMailBox::Pop() {
- TAutoPtr<IEventHandle> result = Sent.front();
- Sent.pop_front();
-#ifdef DEBUG_ORDER_EVENTS
- auto it = TrackSent.find(result.Get());
- if (it != TrackSent.end()) {
- Y_ABORT_UNLESS(ExpectedReceive == it->second);
- TrackSent.erase(result.Get());
- ++ExpectedReceive;
- }
-#endif
- return result;
- }
-
- bool TEventMailBox::IsEmpty() const {
- return Sent.empty();
- }
-
- void TEventMailBox::Capture(TEventsList& evList) {
- evList.insert(evList.end(), Sent.begin(), Sent.end());
- Sent.clear();
- }
-
- void TEventMailBox::PushFront(TAutoPtr<IEventHandle>& ev) {
- Sent.push_front(ev);
- }
-
- void TEventMailBox::PushFront(TEventsList& evList) {
- for (auto rit = evList.rbegin(); rit != evList.rend(); ++rit) {
- if (*rit) {
- Sent.push_front(*rit);
- }
- }
- }
-
- void TEventMailBox::CaptureScheduled(TScheduledEventsList& evList) {
- for (auto it = Scheduled.begin(); it != Scheduled.end(); ++it) {
- evList.insert(*it);
- }
-
- Scheduled.clear();
- }
-
- void TEventMailBox::PushScheduled(TScheduledEventsList& evList) {
- for (auto it = evList.begin(); it != evList.end(); ++it) {
- if (it->Event) {
- Scheduled.insert(*it);
- }
- }
-
- evList.clear();
- }
-
- bool TEventMailBox::IsActive(const TInstant& currentTime) const {
- return currentTime >= InactiveUntil;
- }
-
- void TEventMailBox::Freeze(const TInstant& deadline) {
- if (deadline > InactiveUntil)
- InactiveUntil = deadline;
- }
-
- TInstant TEventMailBox::GetInactiveUntil() const {
- return InactiveUntil;
- }
-
- void TEventMailBox::Schedule(const TScheduledEventQueueItem& item) {
- Scheduled.insert(item);
- }
-
- bool TEventMailBox::IsScheduledEmpty() const {
- return Scheduled.empty();
- }
-
- TInstant TEventMailBox::GetFirstScheduleDeadline() const {
- return Scheduled.begin()->Deadline;
- }
-
- ui64 TEventMailBox::GetSentEventCount() const {
- return Sent.size();
- }
-
- class TTestActorRuntimeBase::TTimeProvider : public ITimeProvider {
- public:
- TTimeProvider(TTestActorRuntimeBase& runtime)
- : Runtime(runtime)
- {
- }
-
- TInstant Now() override {
- return Runtime.GetCurrentTime();
- }
-
- private:
- TTestActorRuntimeBase& Runtime;
- };
-
- class TTestActorRuntimeBase::TMonotonicTimeProvider : public IMonotonicTimeProvider {
- public:
- TMonotonicTimeProvider(TTestActorRuntimeBase& runtime)
- : Runtime(runtime)
- { }
-
- TMonotonic Now() override {
- return Runtime.GetCurrentMonotonicTime();
- }
-
- private:
- TTestActorRuntimeBase& Runtime;
- };
-
- class TTestActorRuntimeBase::TSchedulerThreadStub : public ISchedulerThread {
- public:
- TSchedulerThreadStub(TTestActorRuntimeBase* runtime, TTestActorRuntimeBase::TNodeDataBase* node)
- : Runtime(runtime)
- , Node(node)
- {
- Y_UNUSED(Runtime);
- }
-
- void Prepare(TActorSystem *actorSystem, volatile ui64 *currentTimestamp, volatile ui64 *currentMonotonic) override {
- Y_UNUSED(actorSystem);
- Node->ActorSystemTimestamp = currentTimestamp;
- Node->ActorSystemMonotonic = currentMonotonic;
- }
-
- void PrepareSchedules(NSchedulerQueue::TReader **readers, ui32 scheduleReadersCount) override {
- Y_UNUSED(readers);
- Y_UNUSED(scheduleReadersCount);
- }
-
- void Start() override {
- }
-
- void PrepareStop() override {
- }
-
- void Stop() override {
- }
-
- private:
- TTestActorRuntimeBase* Runtime;
- TTestActorRuntimeBase::TNodeDataBase* Node;
- };
-
- class TTestActorRuntimeBase::TExecutorPoolStub : public IExecutorPool {
- public:
- TExecutorPoolStub(TTestActorRuntimeBase* runtime, ui32 nodeIndex, TTestActorRuntimeBase::TNodeDataBase* node, ui32 poolId)
- : IExecutorPool(poolId)
- , Runtime(runtime)
- , NodeIndex(nodeIndex)
- , Node(node)
- {
- }
-
- TTestActorRuntimeBase* GetRuntime() {
- return Runtime;
- }
-
- // for threads
- ui32 GetReadyActivation(TWorkerContext& wctx, ui64 revolvingCounter) override {
- Y_UNUSED(wctx);
- Y_UNUSED(revolvingCounter);
- Y_ABORT();
- }
-
- void ReclaimMailbox(TMailboxType::EType mailboxType, ui32 hint, TWorkerId workerId, ui64 revolvingCounter) override {
- Y_UNUSED(workerId);
- Node->MailboxTable->ReclaimMailbox(mailboxType, hint, revolvingCounter);
- }
-
- TMailboxHeader *ResolveMailbox(ui32 hint) override {
- return Node->MailboxTable->Get(hint);
- }
-
- void Schedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie *cookie, TWorkerId workerId) override {
- DoSchedule(deadline, ev, cookie, workerId);
- }
-
- void Schedule(TMonotonic deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie *cookie, TWorkerId workerId) override {
- DoSchedule(TInstant::FromValue(deadline.GetValue()), ev, cookie, workerId);
- }
-
- void Schedule(TDuration delay, TAutoPtr<IEventHandle> ev, ISchedulerCookie *cookie, TWorkerId workerId) override {
- TInstant deadline = Runtime->GetTimeProvider()->Now() + delay;
- DoSchedule(deadline, ev, cookie, workerId);
- }
-
- void DoSchedule(TInstant deadline, TAutoPtr<IEventHandle> ev, ISchedulerCookie *cookie, TWorkerId workerId) {
- Y_UNUSED(workerId);
-
- TGuard<TMutex> guard(Runtime->Mutex);
- bool verbose = (Runtime->CurrentDispatchContext ? !Runtime->CurrentDispatchContext->Options->Quiet : true) && VERBOSE;
- if (Runtime->BlockedOutput.find(ev->Sender) != Runtime->BlockedOutput.end()) {
- verbose = false;
- }
-
- if (verbose) {
- Cerr << "Got scheduled event at " << TInstant::MicroSeconds(Runtime->CurrentTimestamp) << ", ";
- PrintEvent(ev, Runtime);
- }
-
- auto now = Runtime->GetTimeProvider()->Now();
- if (deadline < now) {
- deadline = now; // avoid going backwards in time
- }
- TDuration delay = (deadline - now);
-
- if (Runtime->SingleSysEnv || !Runtime->ScheduledEventFilterFunc(*Runtime, ev, delay, deadline)) {
- ui32 mailboxHint = ev->GetRecipientRewrite().Hint();
- Runtime->GetMailbox(Runtime->FirstNodeId + NodeIndex, mailboxHint).Schedule(TScheduledEventQueueItem(deadline, ev, cookie));
- Runtime->MailboxesHasEvents.Signal();
- if (verbose)
- Cerr << "Event was added to scheduled queue\n";
- } else {
- if (cookie) {
- cookie->Detach();
- }
- if (verbose) {
- Cerr << "Scheduled event for " << ev->GetRecipientRewrite().ToString() << " was dropped\n";
- }
- }
- }
-
- // for actorsystem
- bool SpecificSend(TAutoPtr<IEventHandle>& ev) override {
- return Send(ev);
- }
-
- bool Send(TAutoPtr<IEventHandle>& ev) override {
- TGuard<TMutex> guard(Runtime->Mutex);
- bool verbose = (Runtime->CurrentDispatchContext ? !Runtime->CurrentDispatchContext->Options->Quiet : true) && VERBOSE;
- if (Runtime->BlockedOutput.find(ev->Sender) != Runtime->BlockedOutput.end()) {
- verbose = false;
- }
-
- if (verbose) {
- Cerr << "Got event at " << TInstant::MicroSeconds(Runtime->CurrentTimestamp) << ", ";
- PrintEvent(ev, Runtime);
- }
-
- if (!Runtime->EventFilterFunc(*Runtime, ev)) {
- ui32 nodeId = ev->GetRecipientRewrite().NodeId();
- Y_ABORT_UNLESS(nodeId != 0);
- TNodeDataBase* node = Runtime->Nodes[nodeId].Get();
-
- if (!AllowSendFrom(node, ev)) {
- return true;
- }
-
- ui32 mailboxHint = ev->GetRecipientRewrite().Hint();
- if (ev->GetTypeRewrite() == ui32(NActors::NLog::EEv::Log)) {
- const NActors::TActorId loggerActorId = NActors::TActorId(nodeId, "logger");
- TActorId logger = node->ActorSystem->LookupLocalService(loggerActorId);
- if (ev->GetRecipientRewrite() == logger) {
- TMailboxHeader* mailbox = node->MailboxTable->Get(mailboxHint);
- IActor* recipientActor = mailbox->FindActor(ev->GetRecipientRewrite().LocalId());
- if (recipientActor) {
- TActorContext ctx(*mailbox, *node->ExecutorThread, GetCycleCountFast(), ev->GetRecipientRewrite());
- TActivationContext *prevTlsActivationContext = TlsActivationContext;
- TlsActivationContext = &ctx;
- recipientActor->Receive(ev);
- TlsActivationContext = prevTlsActivationContext;
- // we expect the logger to never die in tests
- }
- }
- } else {
- Runtime->GetMailbox(nodeId, mailboxHint).Send(ev);
- Runtime->MailboxesHasEvents.Signal();
- }
- if (verbose)
- Cerr << "Event was added to sent queue\n";
- } else {
- if (verbose)
- Cerr << "Event was dropped\n";
- }
- return true;
- }
-
- void ScheduleActivation(ui32 activation) override {
- Y_UNUSED(activation);
- }
-
- void SpecificScheduleActivation(ui32 activation) override {
- Y_UNUSED(activation);
- }
-
- void ScheduleActivationEx(ui32 activation, ui64 revolvingCounter) override {
- Y_UNUSED(activation);
- Y_UNUSED(revolvingCounter);
- }
-
- TActorId Register(IActor *actor, TMailboxType::EType mailboxType, ui64 revolvingCounter,
- const TActorId& parentId) override {
- return Runtime->Register(actor, NodeIndex, PoolId, mailboxType, revolvingCounter, parentId);
- }
-
- TActorId Register(IActor *actor, TMailboxHeader *mailbox, ui32 hint, const TActorId& parentId) override {
- return Runtime->Register(actor, NodeIndex, PoolId, mailbox, hint, parentId);
- }
-
- // lifecycle stuff
- void Prepare(TActorSystem *actorSystem, NSchedulerQueue::TReader **scheduleReaders, ui32 *scheduleSz) override {
- Y_UNUSED(actorSystem);
- Y_UNUSED(scheduleReaders);
- Y_UNUSED(scheduleSz);
- }
-
- void Start() override {
- }
-
- void PrepareStop() override {
- }
-
- void Shutdown() override {
- }
-
- bool Cleanup() override {
- return true;
- }
-
- // generic
- TAffinity* Affinity() const override {
- Y_ABORT();
- }
-
- private:
- TTestActorRuntimeBase* const Runtime;
- const ui32 NodeIndex;
- TTestActorRuntimeBase::TNodeDataBase* const Node;
- };
-
- IExecutorPool* TTestActorRuntimeBase::CreateExecutorPoolStub(TTestActorRuntimeBase* runtime, ui32 nodeIndex, TTestActorRuntimeBase::TNodeDataBase* node, ui32 poolId) {
- return new TExecutorPoolStub{runtime, nodeIndex, node, poolId};
- }
-
-
- ui32 TTestActorRuntimeBase::NextNodeId = 1;
-
- TTestActorRuntimeBase::TTestActorRuntimeBase(THeSingleSystemEnv)
- : TTestActorRuntimeBase(1, 1, false)
- {
- SingleSysEnv = true;
- }
-
- TTestActorRuntimeBase::TTestActorRuntimeBase(ui32 nodeCount, ui32 dataCenterCount, bool useRealThreads)
- : ScheduledCount(0)
- , ScheduledLimit(100000)
- , MainThreadId(TThread::CurrentThreadId())
- , ClusterUUID(MakeClusterId())
- , FirstNodeId(NextNodeId)
- , NodeCount(nodeCount)
- , DataCenterCount(dataCenterCount)
- , UseRealThreads(useRealThreads)
- , LocalId(0)
- , DispatchCyclesCount(0)
- , DispatchedEventsCount(0)
- , NeedMonitoring(false)
- , RandomProvider(CreateDeterministicRandomProvider(DefaultRandomSeed))
- , TimeProvider(new TTimeProvider(*this))
- , MonotonicTimeProvider(new TMonotonicTimeProvider(*this))
- , ShouldContinue()
- , CurrentTimestamp(0)
- , DispatchTimeout(DEFAULT_DISPATCH_TIMEOUT)
- , ReschedulingDelay(TDuration::MicroSeconds(0))
- , ObserverFunc(&TTestActorRuntimeBase::DefaultObserverFunc)
- , ScheduledEventsSelectorFunc(&CollapsedTimeScheduledEventsSelector)
- , EventFilterFunc(&TTestActorRuntimeBase::DefaultFilterFunc)
- , ScheduledEventFilterFunc(&TTestActorRuntimeBase::NopFilterFunc)
- , RegistrationObserver(&TTestActorRuntimeBase::DefaultRegistrationObserver)
- , CurrentDispatchContext(nullptr)
- {
- SetDispatcherRandomSeed(TInstant::Now(), 0);
- EnableActorCallstack();
- }
-
- void TTestActorRuntimeBase::InitNode(TNodeDataBase* node, size_t nodeIndex) {
- const NActors::TActorId loggerActorId = NActors::TActorId(FirstNodeId + nodeIndex, "logger");
- node->LogSettings = new NActors::NLog::TSettings(loggerActorId, NActorsServices::LOGGER,
- NActors::NLog::PRI_WARN, NActors::NLog::PRI_WARN, 0);
- node->LogSettings->SetAllowDrop(false);
- node->LogSettings->SetThrottleDelay(TDuration::Zero());
- node->DynamicCounters = new NMonitoring::TDynamicCounters;
-
- InitNodeImpl(node, nodeIndex);
- }
-
- void TTestActorRuntimeBase::InitNodeImpl(TNodeDataBase* node, size_t nodeIndex) {
- node->LogSettings->Append(
- NActorsServices::EServiceCommon_MIN,
- NActorsServices::EServiceCommon_MAX,
- NActorsServices::EServiceCommon_Name
- );
-
- if (!UseRealThreads) {
- node->SchedulerPool.Reset(CreateExecutorPoolStub(this, nodeIndex, node, 0));
- node->MailboxTable.Reset(new TMailboxTable());
- node->ActorSystem = MakeActorSystem(nodeIndex, node);
- node->ExecutorThread.Reset(new TExecutorThread(0, 0, node->ActorSystem.Get(), node->SchedulerPool.Get(), node->MailboxTable.Get(), "TestExecutor"));
- } else {
- node->ActorSystem = MakeActorSystem(nodeIndex, node);
- }
-
- node->ActorSystem->Start();
- }
-
- bool TTestActorRuntimeBase::AllowSendFrom(TNodeDataBase* node, TAutoPtr<IEventHandle>& ev) {
- ui64 senderLocalId = ev->Sender.LocalId();
- ui64 senderMailboxHint = ev->Sender.Hint();
- TMailboxHeader* senderMailbox = node->MailboxTable->Get(senderMailboxHint);
- if (senderMailbox) {
- IActor* senderActor = senderMailbox->FindActor(senderLocalId);
- TTestDecorator *decorator = dynamic_cast<TTestDecorator*>(senderActor);
- return !decorator || decorator->BeforeSending(ev);
- }
- return true;
- }
-
- TTestActorRuntimeBase::TTestActorRuntimeBase(ui32 nodeCount, ui32 dataCenterCount)
- : TTestActorRuntimeBase(nodeCount, dataCenterCount, false) {
- }
-
- TTestActorRuntimeBase::TTestActorRuntimeBase(ui32 nodeCount, bool useRealThreads)
- : TTestActorRuntimeBase(nodeCount, nodeCount, useRealThreads) {
- }
-
- TTestActorRuntimeBase::~TTestActorRuntimeBase() {
- CleanupNodes();
- Cerr.Flush();
- Cerr.Flush();
- Clog.Flush();
-
- DisableActorCallstack();
- }
-
- void TTestActorRuntimeBase::CleanupNodes() {
- Nodes.clear();
- }
-
- bool TTestActorRuntimeBase::IsRealThreads() const {
- return UseRealThreads;
- }
-
- TTestActorRuntimeBase::EEventAction TTestActorRuntimeBase::DefaultObserverFunc(TAutoPtr<IEventHandle>& event) {
- Y_UNUSED(event);
- return EEventAction::PROCESS;
- }
-
- void TTestActorRuntimeBase::DroppingScheduledEventsSelector(TTestActorRuntimeBase& runtime, TScheduledEventsList& scheduledEvents, TEventsList& queue) {
- Y_UNUSED(runtime);
- Y_UNUSED(queue);
- scheduledEvents.clear();
- }
-
- bool TTestActorRuntimeBase::DefaultFilterFunc(TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event) {
- Y_UNUSED(runtime);
- Y_UNUSED(event);
- return false;
- }
-
- bool TTestActorRuntimeBase::NopFilterFunc(TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event, TDuration delay, TInstant& deadline) {
- Y_UNUSED(runtime);
- Y_UNUSED(delay);
- Y_UNUSED(event);
- Y_UNUSED(deadline);
- return true;
- }
-
-
- void TTestActorRuntimeBase::DefaultRegistrationObserver(TTestActorRuntimeBase& runtime, const TActorId& parentId, const TActorId& actorId) {
- if (runtime.ScheduleWhiteList.find(parentId) != runtime.ScheduleWhiteList.end()) {
- runtime.ScheduleWhiteList.insert(actorId);
- runtime.ScheduleWhiteListParent[actorId] = parentId;
- }
- }
-
- class TScheduledTreeItem {
- public:
- TString Name;
- ui64 Count;
- TVector<TScheduledTreeItem> Children;
-
- TScheduledTreeItem(const TString& name)
- : Name(name)
- , Count(0)
- {}
-
- TScheduledTreeItem* GetItem(const TString& name) {
- TScheduledTreeItem* item = nullptr;
- for (TScheduledTreeItem& i : Children) {
- if (i.Name == name) {
- item = &i;
- break;
- }
- }
- if (item != nullptr)
- return item;
- Children.emplace_back(name);
- return &Children.back();
- }
-
- void RecursiveSort() {
- Sort(Children, [](const TScheduledTreeItem& a, const TScheduledTreeItem& b) -> bool { return a.Count > b.Count; });
- for (TScheduledTreeItem& item : Children) {
- item.RecursiveSort();
- }
- }
-
- void Print(IOutputStream& stream, const TString& prefix) {
- for (auto it = Children.begin(); it != Children.end(); ++it) {
- bool lastChild = (std::next(it) == Children.end());
- TString connectionPrefix = lastChild ? "└─ " : "├─ ";
- TString subChildPrefix = lastChild ? " " : "│ ";
- stream << prefix << connectionPrefix << it->Name << " (" << it->Count << ")\n";
- it->Print(stream, prefix + subChildPrefix);
- }
- }
-
- void Print(IOutputStream& stream) {
- stream << Name << " (" << Count << ")\n";
- Print(stream, TString());
- }
- };
-
- void TTestActorRuntimeBase::CollapsedTimeScheduledEventsSelector(TTestActorRuntimeBase& runtime, TScheduledEventsList& scheduledEvents, TEventsList& queue) {
- if (scheduledEvents.empty())
- return;
-
- TInstant time = scheduledEvents.begin()->Deadline;
- while (!scheduledEvents.empty() && scheduledEvents.begin()->Deadline == time) {
-// static THashMap<std::pair<TActorId, TString>, ui64> eventTypes;
- auto& item = *scheduledEvents.begin();
- TString name = item.Event->GetTypeName();
-// eventTypes[std::make_pair(item.Event->Recipient, name)]++;
- runtime.ScheduledCount++;
- if (runtime.ScheduledCount > runtime.ScheduledLimit) {
-// TScheduledTreeItem root("Root");
-// TVector<TString> path;
-// for (const auto& pr : eventTypes) {
-// path.clear();
-// path.push_back(runtime.GetActorName(pr.first.first));
-// for (auto it = runtime.ScheduleWhiteListParent.find(pr.first.first); it != runtime.ScheduleWhiteListParent.end(); it = runtime.ScheduleWhiteListParent.find(it->second)) {
-// path.insert(path.begin(), runtime.GetActorName(it->second));
-// }
-// path.push_back("<" + pr.first.second + ">"); // event name;
-// ui64 count = pr.second;
-// TScheduledTreeItem* item = &root;
-// item->Count += count;
-// for (TString name : path) {
-// item = item->GetItem(name);
-// item->Count += count;
-// }
-// }
-// root.RecursiveSort();
-// root.Print(Cerr);
-
- ythrow TSchedulingLimitReachedException(runtime.ScheduledLimit);
- }
- if (item.Cookie->Get()) {
- if (item.Cookie->Detach()) {
- queue.push_back(item.Event);
- }
- } else {
- queue.push_back(item.Event);
- }
-
- scheduledEvents.erase(scheduledEvents.begin());
- }
-
- runtime.UpdateCurrentTime(time);
- }
-
- TTestActorRuntimeBase::TEventObserver TTestActorRuntimeBase::SetObserverFunc(TEventObserver observerFunc) {
- TGuard<TMutex> guard(Mutex);
- auto result = ObserverFunc;
- ObserverFunc = observerFunc;
- return result;
- }
-
- TTestActorRuntimeBase::TScheduledEventsSelector TTestActorRuntimeBase::SetScheduledEventsSelectorFunc(TScheduledEventsSelector scheduledEventsSelectorFunc) {
- TGuard<TMutex> guard(Mutex);
- auto result = ScheduledEventsSelectorFunc;
- ScheduledEventsSelectorFunc = scheduledEventsSelectorFunc;
- return result;
- }
-
- TTestActorRuntimeBase::TEventFilter TTestActorRuntimeBase::SetEventFilter(TEventFilter filterFunc) {
- TGuard<TMutex> guard(Mutex);
- auto result = EventFilterFunc;
- EventFilterFunc = filterFunc;
- return result;
- }
-
- TTestActorRuntimeBase::TScheduledEventFilter TTestActorRuntimeBase::SetScheduledEventFilter(TScheduledEventFilter filterFunc) {
- TGuard<TMutex> guard(Mutex);
- auto result = ScheduledEventFilterFunc;
- ScheduledEventFilterFunc = filterFunc;
- return result;
- }
-
- TTestActorRuntimeBase::TRegistrationObserver TTestActorRuntimeBase::SetRegistrationObserverFunc(TRegistrationObserver observerFunc) {
- TGuard<TMutex> guard(Mutex);
- auto result = RegistrationObserver;
- RegistrationObserver = observerFunc;
- return result;
- }
-
- bool TTestActorRuntimeBase::IsVerbose() {
- return VERBOSE;
- }
-
- void TTestActorRuntimeBase::SetVerbose(bool verbose) {
- VERBOSE = verbose;
- }
-
- void TTestActorRuntimeBase::AddLocalService(const TActorId& actorId, TActorSetupCmd cmd, ui32 nodeIndex) {
- Y_ABORT_UNLESS(!IsInitialized);
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- auto node = Nodes[nodeIndex + FirstNodeId];
- if (!node) {
- node = GetNodeFactory().CreateNode();
- Nodes[nodeIndex + FirstNodeId] = node;
- }
-
- node->LocalServicesActors[actorId] = cmd.Actor.get();
- node->LocalServices.push_back(std::make_pair(actorId, TTestActorSetupCmd(std::move(cmd))));
- }
-
- void TTestActorRuntimeBase::InitNodes() {
- NextNodeId += NodeCount;
- Y_ABORT_UNLESS(NodeCount > 0);
-
- for (ui32 nodeIndex = 0; nodeIndex < NodeCount; ++nodeIndex) {
- auto nodeIt = Nodes.emplace(FirstNodeId + nodeIndex, GetNodeFactory().CreateNode()).first;
- TNodeDataBase* node = nodeIt->second.Get();
- InitNode(node, nodeIndex);
- }
-
- }
-
- void TTestActorRuntimeBase::Initialize() {
- InitNodes();
- IsInitialized = true;
- }
-
- void SetupCrossDC() {
-
- }
-
- TDuration TTestActorRuntimeBase::SetDispatchTimeout(TDuration timeout) {
- TGuard<TMutex> guard(Mutex);
- TDuration oldTimeout = DispatchTimeout;
- DispatchTimeout = timeout;
- return oldTimeout;
- }
-
- TDuration TTestActorRuntimeBase::SetReschedulingDelay(TDuration delay) {
- TGuard<TMutex> guard(Mutex);
- TDuration oldDelay = ReschedulingDelay;
- ReschedulingDelay = delay;
- return oldDelay;
- }
-
- void TTestActorRuntimeBase::SetLogBackend(const TAutoPtr<TLogBackend> logBackend) {
- Y_ABORT_UNLESS(!IsInitialized);
- TGuard<TMutex> guard(Mutex);
- LogBackend = logBackend;
- }
-
- void TTestActorRuntimeBase::SetLogPriority(NActors::NLog::EComponent component, NActors::NLog::EPriority priority) {
- TGuard<TMutex> guard(Mutex);
- for (ui32 nodeIndex = 0; nodeIndex < NodeCount; ++nodeIndex) {
- TNodeDataBase* node = Nodes[FirstNodeId + nodeIndex].Get();
- TString explanation;
- auto status = node->LogSettings->SetLevel(priority, component, explanation);
- if (status) {
- Y_ABORT("SetLogPriority failed: %s", explanation.c_str());
- }
- }
- }
-
- TInstant TTestActorRuntimeBase::GetCurrentTime() const {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(!UseRealThreads);
- return TInstant::MicroSeconds(CurrentTimestamp);
- }
-
- TMonotonic TTestActorRuntimeBase::GetCurrentMonotonicTime() const {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(!UseRealThreads);
- return TMonotonic::MicroSeconds(CurrentTimestamp);
- }
-
- void TTestActorRuntimeBase::UpdateCurrentTime(TInstant newTime) {
- static int counter = 0;
- ++counter;
- if (VERBOSE) {
- Cerr << "UpdateCurrentTime(" << counter << "," << newTime << ")\n";
- }
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(!UseRealThreads);
- if (newTime.MicroSeconds() > CurrentTimestamp) {
- CurrentTimestamp = newTime.MicroSeconds();
- for (auto& kv : Nodes) {
- AtomicStore(kv.second->ActorSystemTimestamp, CurrentTimestamp);
- AtomicStore(kv.second->ActorSystemMonotonic, CurrentTimestamp);
- }
- }
- }
-
- void TTestActorRuntimeBase::AdvanceCurrentTime(TDuration duration) {
- UpdateCurrentTime(GetCurrentTime() + duration);
- }
-
- TIntrusivePtr<ITimeProvider> TTestActorRuntimeBase::GetTimeProvider() {
- Y_ABORT_UNLESS(!UseRealThreads);
- return TimeProvider;
- }
-
- TIntrusivePtr<IMonotonicTimeProvider> TTestActorRuntimeBase::GetMonotonicTimeProvider() {
- Y_ABORT_UNLESS(!UseRealThreads);
- return MonotonicTimeProvider;
- }
-
- ui32 TTestActorRuntimeBase::GetNodeId(ui32 index) const {
- Y_ABORT_UNLESS(index < NodeCount);
- return FirstNodeId + index;
- }
-
- ui32 TTestActorRuntimeBase::GetNodeCount() const {
- return NodeCount;
- }
-
- ui64 TTestActorRuntimeBase::AllocateLocalId() {
- TGuard<TMutex> guard(Mutex);
- ui64 nextId = ++LocalId;
- if (VERBOSE) {
- Cerr << "Allocated id: " << nextId << "\n";
- }
-
- return nextId;
- }
-
- ui32 TTestActorRuntimeBase::InterconnectPoolId() const {
- if (UseRealThreads && NSan::TSanIsOn()) {
- // Interconnect coroutines may move across threads
- // Use a special single-threaded pool to avoid that
- return 4;
- }
- return 0;
- }
-
- TString TTestActorRuntimeBase::GetTempDir() {
- if (!TmpDir)
- TmpDir.Reset(new TTempDir());
- return (*TmpDir)();
- }
-
- TActorId TTestActorRuntimeBase::Register(IActor* actor, ui32 nodeIndex, ui32 poolId, TMailboxType::EType mailboxType,
- ui64 revolvingCounter, const TActorId& parentId) {
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- TGuard<TMutex> guard(Mutex);
- TNodeDataBase* node = Nodes[FirstNodeId + nodeIndex].Get();
- if (UseRealThreads) {
- Y_ABORT_UNLESS(poolId < node->ExecutorPools.size());
- return node->ExecutorPools[poolId]->Register(actor, mailboxType, revolvingCounter, parentId);
- }
-
- // first step - find good enough mailbox
- ui32 hint = 0;
- TMailboxHeader *mailbox = nullptr;
-
- {
- ui32 hintBackoff = 0;
-
- while (hint == 0) {
- hint = node->MailboxTable->AllocateMailbox(mailboxType, ++revolvingCounter);
- mailbox = node->MailboxTable->Get(hint);
-
- if (!mailbox->LockFromFree()) {
- node->MailboxTable->ReclaimMailbox(mailboxType, hintBackoff, ++revolvingCounter);
- hintBackoff = hint;
- hint = 0;
- }
- }
-
- node->MailboxTable->ReclaimMailbox(mailboxType, hintBackoff, ++revolvingCounter);
- }
-
- const ui64 localActorId = AllocateLocalId();
- if (VERBOSE) {
- Cerr << "Register actor " << TypeName(*actor) << " as " << localActorId << ", mailbox: " << hint << "\n";
- }
-
- // ok, got mailbox
- mailbox->AttachActor(localActorId, actor);
-
- // do init
- const TActorId actorId(FirstNodeId + nodeIndex, poolId, localActorId, hint);
- ActorNames[actorId] = TypeName(*actor);
- RegistrationObserver(*this, parentId ? parentId : CurrentRecipient, actorId);
- DoActorInit(node->ActorSystem.Get(), actor, actorId, parentId ? parentId : CurrentRecipient);
-
- switch (mailboxType) {
- case TMailboxType::Simple:
- UnlockFromExecution((TMailboxTable::TSimpleMailbox *)mailbox, node->ExecutorPools[0], false, hint, MaxWorkers, ++revolvingCounter);
- break;
- case TMailboxType::Revolving:
- UnlockFromExecution((TMailboxTable::TRevolvingMailbox *)mailbox, node->ExecutorPools[0], false, hint, MaxWorkers, ++revolvingCounter);
- break;
- case TMailboxType::HTSwap:
- UnlockFromExecution((TMailboxTable::THTSwapMailbox *)mailbox, node->ExecutorPools[0], false, hint, MaxWorkers, ++revolvingCounter);
- break;
- case TMailboxType::ReadAsFilled:
- UnlockFromExecution((TMailboxTable::TReadAsFilledMailbox *)mailbox, node->ExecutorPools[0], false, hint, MaxWorkers, ++revolvingCounter);
- break;
- case TMailboxType::TinyReadAsFilled:
- UnlockFromExecution((TMailboxTable::TTinyReadAsFilledMailbox *)mailbox, node->ExecutorPools[0], false, hint, MaxWorkers, ++revolvingCounter);
- break;
- default:
- Y_ABORT("Unsupported mailbox type");
- }
-
- return actorId;
- }
-
- TActorId TTestActorRuntimeBase::Register(IActor *actor, ui32 nodeIndex, ui32 poolId, TMailboxHeader *mailbox, ui32 hint,
- const TActorId& parentId) {
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- TGuard<TMutex> guard(Mutex);
- TNodeDataBase* node = Nodes[FirstNodeId + nodeIndex].Get();
- if (UseRealThreads) {
- Y_ABORT_UNLESS(poolId < node->ExecutorPools.size());
- return node->ExecutorPools[poolId]->Register(actor, mailbox, hint, parentId);
- }
-
- const ui64 localActorId = AllocateLocalId();
- if (VERBOSE) {
- Cerr << "Register actor " << TypeName(*actor) << " as " << localActorId << "\n";
- }
-
- mailbox->AttachActor(localActorId, actor);
- const TActorId actorId(FirstNodeId + nodeIndex, poolId, localActorId, hint);
- ActorNames[actorId] = TypeName(*actor);
- RegistrationObserver(*this, parentId ? parentId : CurrentRecipient, actorId);
- DoActorInit(node->ActorSystem.Get(), actor, actorId, parentId ? parentId : CurrentRecipient);
-
- return actorId;
- }
-
- TActorId TTestActorRuntimeBase::RegisterService(const TActorId& serviceId, const TActorId& actorId, ui32 nodeIndex) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- TNodeDataBase* node = Nodes[FirstNodeId + nodeIndex].Get();
- if (!UseRealThreads) {
- IActor* actor = FindActor(actorId, node);
- node->LocalServicesActors[serviceId] = actor;
- node->ActorToActorId[actor] = actorId;
- }
-
- return node->ActorSystem->RegisterLocalService(serviceId, actorId);
- }
-
- TActorId TTestActorRuntimeBase::AllocateEdgeActor(ui32 nodeIndex) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- TActorId edgeActor = Register(new TEdgeActor(this), nodeIndex);
- EdgeActors.insert(edgeActor);
- EdgeActorByMailbox[TEventMailboxId(edgeActor.NodeId(), edgeActor.Hint())] = edgeActor;
- return edgeActor;
- }
-
- TEventsList TTestActorRuntimeBase::CaptureEvents() {
- TGuard<TMutex> guard(Mutex);
- TEventsList result;
- for (auto& mbox : Mailboxes) {
- mbox.second->Capture(result);
- }
-
- return result;
- }
-
- TEventsList TTestActorRuntimeBase::CaptureMailboxEvents(ui32 hint, ui32 nodeId) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeId >= FirstNodeId && nodeId < FirstNodeId + NodeCount);
- TEventsList result;
- GetMailbox(nodeId, hint).Capture(result);
- return result;
- }
-
- void TTestActorRuntimeBase::PushFront(TAutoPtr<IEventHandle>& ev) {
- TGuard<TMutex> guard(Mutex);
- ui32 nodeId = ev->GetRecipientRewrite().NodeId();
- Y_ABORT_UNLESS(nodeId != 0);
- GetMailbox(nodeId, ev->GetRecipientRewrite().Hint()).PushFront(ev);
- }
-
- void TTestActorRuntimeBase::PushEventsFront(TEventsList& events) {
- TGuard<TMutex> guard(Mutex);
- for (auto rit = events.rbegin(); rit != events.rend(); ++rit) {
- if (*rit) {
- auto& ev = *rit;
- ui32 nodeId = ev->GetRecipientRewrite().NodeId();
- Y_ABORT_UNLESS(nodeId != 0);
- GetMailbox(nodeId, ev->GetRecipientRewrite().Hint()).PushFront(ev);
- }
- }
-
- events.clear();
- }
-
- void TTestActorRuntimeBase::PushMailboxEventsFront(ui32 hint, ui32 nodeId, TEventsList& events) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeId >= FirstNodeId && nodeId < FirstNodeId + NodeCount);
- TEventsList result;
- GetMailbox(nodeId, hint).PushFront(events);
- events.clear();
- }
-
- TScheduledEventsList TTestActorRuntimeBase::CaptureScheduledEvents() {
- TGuard<TMutex> guard(Mutex);
- TScheduledEventsList result;
- for (auto& mbox : Mailboxes) {
- mbox.second->CaptureScheduled(result);
- }
-
- return result;
- }
-
- bool TTestActorRuntimeBase::DispatchEvents(const TDispatchOptions& options) {
- return DispatchEvents(options, TInstant::Max());
- }
-
- bool TTestActorRuntimeBase::DispatchEvents(const TDispatchOptions& options, TDuration simTimeout) {
- return DispatchEvents(options, TInstant::MicroSeconds(CurrentTimestamp) + simTimeout);
- }
-
- bool TTestActorRuntimeBase::DispatchEvents(const TDispatchOptions& options, TInstant simDeadline) {
- TGuard<TMutex> guard(Mutex);
- return DispatchEventsInternal(options, simDeadline);
- }
-
- // Mutex must be locked by caller!
- bool TTestActorRuntimeBase::DispatchEventsInternal(const TDispatchOptions& options, TInstant simDeadline) {
- TDispatchContext localContext;
- localContext.Options = &options;
- localContext.PrevContext = nullptr;
- bool verbose = !options.Quiet && VERBOSE;
-
- struct TDispatchContextSetter {
- TDispatchContextSetter(TTestActorRuntimeBase& runtime, TDispatchContext& lastContext)
- : Runtime(runtime)
- {
- lastContext.PrevContext = Runtime.CurrentDispatchContext;
- Runtime.CurrentDispatchContext = &lastContext;
- }
-
- ~TDispatchContextSetter() {
- Runtime.CurrentDispatchContext = Runtime.CurrentDispatchContext->PrevContext;
- }
-
- TTestActorRuntimeBase& Runtime;
- } DispatchContextSetter(*this, localContext);
-
- TInstant dispatchTime = TInstant::MicroSeconds(0);
- TInstant deadline = dispatchTime + DispatchTimeout;
- const TDuration scheduledEventsInspectInterval = TDuration::MilliSeconds(10);
- TInstant inspectScheduledEventsAt = dispatchTime + scheduledEventsInspectInterval;
- if (verbose) {
- Cerr << "Start dispatch at " << TInstant::MicroSeconds(CurrentTimestamp) << ", deadline is " << deadline << "\n";
- }
-
- struct TTempEdgeEventsCaptor {
- TTempEdgeEventsCaptor(TTestActorRuntimeBase& runtime)
- : Runtime(runtime)
- , HasEvents(false)
- {
- for (auto edgeActor : Runtime.EdgeActors) {
- TEventsList events;
- Runtime.GetMailbox(edgeActor.NodeId(), edgeActor.Hint()).Capture(events);
- auto mboxId = TEventMailboxId(edgeActor.NodeId(), edgeActor.Hint());
- auto storeIt = Store.find(mboxId);
- Y_ABORT_UNLESS(storeIt == Store.end());
- storeIt = Store.insert(std::make_pair(mboxId, new TEventMailBox)).first;
- storeIt->second->PushFront(events);
- if (!events.empty())
- HasEvents = true;
- }
- }
-
- ~TTempEdgeEventsCaptor() {
- for (auto edgeActor : Runtime.EdgeActors) {
- auto mboxId = TEventMailboxId(edgeActor.NodeId(), edgeActor.Hint());
- auto storeIt = Store.find(mboxId);
- if (storeIt == Store.end()) {
- continue;
- }
-
- TEventsList events;
- storeIt->second->Capture(events);
- Runtime.GetMailbox(edgeActor.NodeId(), edgeActor.Hint()).PushFront(events);
- }
- }
-
- TTestActorRuntimeBase& Runtime;
- TEventMailBoxList Store;
- bool HasEvents;
- };
-
- TEventMailBoxList restrictedMailboxes;
- const bool useRestrictedMailboxes = !options.OnlyMailboxes.empty();
- for (auto mailboxId : options.OnlyMailboxes) {
- auto it = Mailboxes.find(mailboxId);
- if (it == Mailboxes.end()) {
- it = Mailboxes.insert(std::make_pair(mailboxId, new TEventMailBox())).first;
- }
-
- restrictedMailboxes.insert(std::make_pair(mailboxId, it->second));
- }
-
- TAutoPtr<TTempEdgeEventsCaptor> tempEdgeEventsCaptor;
- if (!restrictedMailboxes) {
- tempEdgeEventsCaptor.Reset(new TTempEdgeEventsCaptor(*this));
- }
-
- TEventMailBoxList& currentMailboxes = useRestrictedMailboxes ? restrictedMailboxes : Mailboxes;
- while (!currentMailboxes.empty()) {
- bool hasProgress = true;
- while (hasProgress) {
- ++DispatchCyclesCount;
- hasProgress = false;
-
- ui64 eventsToDispatch = 0;
- for (auto mboxIt = currentMailboxes.begin(); mboxIt != currentMailboxes.end(); ++mboxIt) {
- if (mboxIt->second->IsActive(TInstant::MicroSeconds(CurrentTimestamp))) {
- eventsToDispatch += mboxIt->second->GetSentEventCount();
- }
- }
- ui32 eventsDispatched = 0;
-
- //TODO: count events before each cycle, break after dispatching that much events
- bool isEmpty = false;
- while (!isEmpty && eventsDispatched < eventsToDispatch) {
- ui64 mailboxCount = currentMailboxes.size();
- ui64 startWith = mailboxCount ? DispatcherRandomProvider->GenRand64() % mailboxCount : 0ull;
- auto startWithMboxIt = currentMailboxes.begin();
- for (ui64 i = 0; i < startWith; ++i) {
- ++startWithMboxIt;
- }
- auto endWithMboxIt = startWithMboxIt;
-
- isEmpty = true;
- auto mboxIt = startWithMboxIt;
- TDeque<TEventMailboxId> suspectedBoxes;
- while (true) {
- auto& mbox = *mboxIt;
- bool isIgnored = true;
- if (!mbox.second->IsEmpty()) {
- HandleNonEmptyMailboxesForEachContext(mbox.first);
- if (mbox.second->IsActive(TInstant::MicroSeconds(CurrentTimestamp))) {
-
- bool isEdgeMailbox = false;
- if (EdgeActorByMailbox.FindPtr(TEventMailboxId(mbox.first.NodeId, mbox.first.Hint))) {
- isEdgeMailbox = true;
- TEventsList events;
- mbox.second->Capture(events);
-
- TEventsList eventsToPush;
- for (auto& ev : events) {
- TInverseGuard<TMutex> inverseGuard(Mutex);
-
- for (auto observer : ObserverFuncs) {
- observer(ev);
- if (!ev) break;
- }
-
- if(ev && ObserverFunc(ev) != EEventAction::DROP && ev)
- eventsToPush.push_back(ev);
- }
- mbox.second->PushFront(eventsToPush);
- }
-
- if (!isEdgeMailbox) {
- isEmpty = false;
- isIgnored = false;
- ++eventsDispatched;
- ++DispatchedEventsCount;
- if (DispatchedEventsCount > DispatchedEventsLimit) {
- ythrow TWithBackTrace<yexception>() << "Dispatched "
- << DispatchedEventsLimit << " events, limit reached.";
- }
-
- auto ev = mbox.second->Pop();
- if (BlockedOutput.find(ev->Sender) == BlockedOutput.end()) {
- //UpdateCurrentTime(TInstant::MicroSeconds(CurrentTimestamp + 10));
- if (verbose) {
- Cerr << "Process event at " << TInstant::MicroSeconds(CurrentTimestamp) << ", ";
- PrintEvent(ev, this);
- }
- }
-
- hasProgress = true;
- EEventAction action = EEventAction::PROCESS;
- {
- TInverseGuard<TMutex> inverseGuard(Mutex);
-
- for (auto observer : ObserverFuncs) {
- observer(ev);
- if(!ev) break;
- }
-
- if (ev)
- action = ObserverFunc(ev);
- }
-
- if (ev) {
- switch (action) {
- case EEventAction::PROCESS:
- UpdateFinalEventsStatsForEachContext(*ev);
- SendInternal(ev.Release(), mbox.first.NodeId - FirstNodeId, false);
- break;
- case EEventAction::DROP:
- // do nothing
- break;
- case EEventAction::RESCHEDULE: {
- TInstant deadline = TInstant::MicroSeconds(CurrentTimestamp) + ReschedulingDelay;
- mbox.second->Freeze(deadline);
- mbox.second->PushFront(ev);
- break;
- }
- default:
- Y_ABORT("Unknown action");
- }
- }
- }
- }
-
- }
- Y_ABORT_UNLESS(mboxIt != currentMailboxes.end());
- if (!isIgnored && !CurrentDispatchContext->PrevContext && !restrictedMailboxes &&
- mboxIt->second->IsEmpty() &&
- mboxIt->second->IsScheduledEmpty() &&
- mboxIt->second->IsActive(TInstant::MicroSeconds(CurrentTimestamp))) {
- suspectedBoxes.push_back(mboxIt->first);
- }
- ++mboxIt;
- if (mboxIt == currentMailboxes.end()) {
- mboxIt = currentMailboxes.begin();
- }
- Y_ABORT_UNLESS(endWithMboxIt != currentMailboxes.end());
- if (mboxIt == endWithMboxIt) {
- break;
- }
- }
-
- for (auto id : suspectedBoxes) {
- auto it = currentMailboxes.find(id);
- if (it != currentMailboxes.end() && it->second->IsEmpty() && it->second->IsScheduledEmpty() &&
- it->second->IsActive(TInstant::MicroSeconds(CurrentTimestamp))) {
- currentMailboxes.erase(it);
- }
- }
- }
- }
-
- if (localContext.FinalEventFound) {
- return true;
- }
-
- if (!localContext.FoundNonEmptyMailboxes.empty())
- return true;
-
- if (options.CustomFinalCondition && options.CustomFinalCondition())
- return true;
-
- if (options.FinalEvents.empty()) {
- for (auto& mbox : currentMailboxes) {
- if (!mbox.second->IsActive(TInstant::MicroSeconds(CurrentTimestamp)))
- continue;
-
- if (!mbox.second->IsEmpty()) {
- if (verbose) {
- Cerr << "Dispatch complete with non-empty queue at " << TInstant::MicroSeconds(CurrentTimestamp) << "\n";
- }
-
- return true;
- }
- }
- }
-
- if (TInstant::MicroSeconds(CurrentTimestamp) > simDeadline) {
- return false;
- }
-
- if (dispatchTime >= deadline) {
- if (verbose) {
- Cerr << "Reach deadline at " << TInstant::MicroSeconds(CurrentTimestamp) << "\n";
- }
-
- ythrow TWithBackTrace<TEmptyEventQueueException>();
- }
-
- if (!options.Quiet && dispatchTime >= inspectScheduledEventsAt) {
- inspectScheduledEventsAt = dispatchTime + scheduledEventsInspectInterval;
- bool isEmpty = true;
- TMaybe<TInstant> nearestMailboxDeadline;
- TVector<TIntrusivePtr<TEventMailBox>> nextScheduleMboxes;
- TMaybe<TInstant> nextScheduleDeadline;
- for (auto& mbox : currentMailboxes) {
- if (!mbox.second->IsActive(TInstant::MicroSeconds(CurrentTimestamp))) {
- if (!nearestMailboxDeadline.Defined() || *nearestMailboxDeadline.Get() > mbox.second->GetInactiveUntil()) {
- nearestMailboxDeadline = mbox.second->GetInactiveUntil();
- }
-
- continue;
- }
-
- if (mbox.second->IsScheduledEmpty())
- continue;
-
- auto firstScheduleDeadline = mbox.second->GetFirstScheduleDeadline();
- if (!nextScheduleDeadline || firstScheduleDeadline < *nextScheduleDeadline) {
- nextScheduleMboxes.clear();
- nextScheduleMboxes.emplace_back(mbox.second);
- nextScheduleDeadline = firstScheduleDeadline;
- } else if (firstScheduleDeadline == *nextScheduleDeadline) {
- nextScheduleMboxes.emplace_back(mbox.second);
- }
- }
-
- for (const auto& nextScheduleMbox : nextScheduleMboxes) {
- TEventsList selectedEvents;
- TScheduledEventsList capturedScheduledEvents;
- nextScheduleMbox->CaptureScheduled(capturedScheduledEvents);
- ScheduledEventsSelectorFunc(*this, capturedScheduledEvents, selectedEvents);
- nextScheduleMbox->PushScheduled(capturedScheduledEvents);
- for (auto& event : selectedEvents) {
- if (verbose && (BlockedOutput.find(event->Sender) == BlockedOutput.end())) {
- Cerr << "Selected scheduled event at " << TInstant::MicroSeconds(CurrentTimestamp) << ", ";
- PrintEvent(event, this);
- }
-
- nextScheduleMbox->Send(event);
- isEmpty = false;
- }
- }
-
- if (!isEmpty) {
- if (verbose) {
- Cerr << "Process selected events at " << TInstant::MicroSeconds(CurrentTimestamp) << "\n";
- }
-
- deadline = dispatchTime + DispatchTimeout;
- continue;
- }
-
- if (nearestMailboxDeadline.Defined()) {
- if (verbose) {
- Cerr << "Forward time to " << *nearestMailboxDeadline.Get() << "\n";
- }
-
- UpdateCurrentTime(*nearestMailboxDeadline.Get());
- continue;
- }
- }
-
- TDuration waitDelay = TDuration::MilliSeconds(10);
- dispatchTime += waitDelay;
- MailboxesHasEvents.WaitT(Mutex, waitDelay);
- }
- return false;
- }
-
- void TTestActorRuntimeBase::HandleNonEmptyMailboxesForEachContext(TEventMailboxId mboxId) {
- TDispatchContext* context = CurrentDispatchContext;
- while (context) {
- const auto& nonEmptyMailboxes = context->Options->NonEmptyMailboxes;
- if (Find(nonEmptyMailboxes.begin(), nonEmptyMailboxes.end(), mboxId) != nonEmptyMailboxes.end()) {
- context->FoundNonEmptyMailboxes.insert(mboxId);
- }
-
- context = context->PrevContext;
- }
- }
-
- void TTestActorRuntimeBase::UpdateFinalEventsStatsForEachContext(IEventHandle& ev) {
- TDispatchContext* context = CurrentDispatchContext;
- while (context) {
- for (const auto& finalEvent : context->Options->FinalEvents) {
- if (finalEvent.EventCheck(ev)) {
- auto& freq = context->FinalEventFrequency[&finalEvent];
- if (++freq >= finalEvent.RequiredCount) {
- context->FinalEventFound = true;
- }
- }
- }
-
- context = context->PrevContext;
- }
- }
-
- void TTestActorRuntimeBase::Send(const TActorId& recipient, const TActorId& sender, TAutoPtr<IEventBase> ev, ui32 senderNodeIndex, bool viaActorSystem) {
- Send(new IEventHandle(recipient, sender, ev.Release()), senderNodeIndex, viaActorSystem);
- }
-
- void TTestActorRuntimeBase::Send(TAutoPtr<IEventHandle> ev, ui32 senderNodeIndex, bool viaActorSystem) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(senderNodeIndex < NodeCount, "senderNodeIndex# %" PRIu32 " < NodeCount# %" PRIu32,
- senderNodeIndex, NodeCount);
- SendInternal(ev, senderNodeIndex, viaActorSystem);
- }
-
- void TTestActorRuntimeBase::SendAsync(TAutoPtr<IEventHandle> ev, ui32 senderNodeIndex) {
- Send(ev, senderNodeIndex, true);
- }
-
- void TTestActorRuntimeBase::Schedule(TAutoPtr<IEventHandle> ev, const TDuration& duration, ui32 nodeIndex) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- ui32 nodeId = FirstNodeId + nodeIndex;
- ui32 mailboxHint = ev->GetRecipientRewrite().Hint();
- TInstant deadline = TInstant::MicroSeconds(CurrentTimestamp) + duration;
- GetMailbox(nodeId, mailboxHint).Schedule(TScheduledEventQueueItem(deadline, ev, nullptr));
- if (VERBOSE)
- Cerr << "Event was added to scheduled queue\n";
- }
-
- void TTestActorRuntimeBase::ClearCounters() {
- TGuard<TMutex> guard(Mutex);
- EvCounters.clear();
- }
-
- ui64 TTestActorRuntimeBase::GetCounter(ui32 evType) const {
- TGuard<TMutex> guard(Mutex);
- auto it = EvCounters.find(evType);
- if (it == EvCounters.end())
- return 0;
-
- return it->second;
- }
-
- TActorId TTestActorRuntimeBase::GetLocalServiceId(const TActorId& serviceId, ui32 nodeIndex) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- TNodeDataBase* node = Nodes[FirstNodeId + nodeIndex].Get();
- return node->ActorSystem->LookupLocalService(serviceId);
- }
-
- void TTestActorRuntimeBase::WaitForEdgeEvents(TEventFilter filter, const TSet<TActorId>& edgeFilter, TDuration simTimeout) {
- TGuard<TMutex> guard(Mutex);
- ui32 dispatchCount = 0;
- if (!edgeFilter.empty()) {
- for (auto edgeActor : edgeFilter) {
- Y_ABORT_UNLESS(EdgeActors.contains(edgeActor), "%s is not an edge actor", ToString(edgeActor).data());
- }
- }
- const TSet<TActorId>& edgeActors = edgeFilter.empty() ? EdgeActors : edgeFilter;
- TInstant deadline = TInstant::MicroSeconds(CurrentTimestamp) + simTimeout;
- for (;;) {
- for (auto edgeActor : edgeActors) {
- TEventsList events;
- auto& mbox = GetMailbox(edgeActor.NodeId(), edgeActor.Hint());
- bool foundEvent = false;
- mbox.Capture(events);
- for (auto& ev : events) {
- if (filter(*this, ev)) {
- foundEvent = true;
- break;
- }
- }
-
- mbox.PushFront(events);
- if (foundEvent)
- return;
- }
-
- ++dispatchCount;
- {
- if (!DispatchEventsInternal(TDispatchOptions(), deadline)) {
- return; // Timed out; event was not found
- }
- }
-
- Y_ABORT_UNLESS(dispatchCount < 1000, "Hard limit to prevent endless loop");
- }
- }
-
- TActorId TTestActorRuntimeBase::GetInterconnectProxy(ui32 nodeIndexFrom, ui32 nodeIndexTo) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeIndexFrom < NodeCount);
- Y_ABORT_UNLESS(nodeIndexTo < NodeCount);
- Y_ABORT_UNLESS(nodeIndexFrom != nodeIndexTo);
- TNodeDataBase* node = Nodes[FirstNodeId + nodeIndexFrom].Get();
- return node->ActorSystem->InterconnectProxy(FirstNodeId + nodeIndexTo);
- }
-
- void TTestActorRuntimeBase::BlockOutputForActor(const TActorId& actorId) {
- TGuard<TMutex> guard(Mutex);
- BlockedOutput.insert(actorId);
- }
-
- void TTestActorRuntimeBase::SetDispatcherRandomSeed(TInstant time, ui64 iteration) {
- ui64 days = (time.Hours() / 24);
- DispatcherRandomSeed = (days << 32) ^ iteration;
- DispatcherRandomProvider = CreateDeterministicRandomProvider(DispatcherRandomSeed);
- }
-
- IActor* TTestActorRuntimeBase::FindActor(const TActorId& actorId, ui32 nodeIndex) const {
- TGuard<TMutex> guard(Mutex);
- if (nodeIndex == Max<ui32>()) {
- Y_ABORT_UNLESS(actorId.NodeId());
- nodeIndex = actorId.NodeId() - FirstNodeId;
- }
-
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- auto nodeIt = Nodes.find(FirstNodeId + nodeIndex);
- Y_ABORT_UNLESS(nodeIt != Nodes.end());
- TNodeDataBase* node = nodeIt->second.Get();
- return FindActor(actorId, node);
- }
-
- void TTestActorRuntimeBase::EnableScheduleForActor(const TActorId& actorId, bool allow) {
- TGuard<TMutex> guard(Mutex);
- if (allow) {
- if (VERBOSE) {
- Cerr << "Actor " << actorId << " added to schedule whitelist";
- }
- ScheduleWhiteList.insert(actorId);
- } else {
- if (VERBOSE) {
- Cerr << "Actor " << actorId << " removed from schedule whitelist";
- }
- ScheduleWhiteList.erase(actorId);
- }
- }
-
- bool TTestActorRuntimeBase::IsScheduleForActorEnabled(const TActorId& actorId) const {
- TGuard<TMutex> guard(Mutex);
- return ScheduleWhiteList.find(actorId) != ScheduleWhiteList.end();
- }
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> TTestActorRuntimeBase::GetDynamicCounters(ui32 nodeIndex) {
- TGuard<TMutex> guard(Mutex);
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- ui32 nodeId = FirstNodeId + nodeIndex;
- TNodeDataBase* node = Nodes[nodeId].Get();
- return node->DynamicCounters;
- }
-
- void TTestActorRuntimeBase::SetupMonitoring() {
- NeedMonitoring = true;
- }
-
- void TTestActorRuntimeBase::SendInternal(TAutoPtr<IEventHandle> ev, ui32 nodeIndex, bool viaActorSystem) {
- Y_ABORT_UNLESS(nodeIndex < NodeCount);
- ui32 nodeId = FirstNodeId + nodeIndex;
- TNodeDataBase* node = Nodes[nodeId].Get();
- ui32 targetNode = ev->GetRecipientRewrite().NodeId();
- ui32 targetNodeIndex;
- if (targetNode == 0) {
- targetNodeIndex = nodeIndex;
- } else {
- targetNodeIndex = targetNode - FirstNodeId;
- Y_ABORT_UNLESS(targetNodeIndex < NodeCount);
- }
-
- if (viaActorSystem || UseRealThreads || ev->GetRecipientRewrite().IsService() || (targetNodeIndex != nodeIndex)) {
- node->ActorSystem->Send(ev);
- return;
- }
-
- Y_ABORT_UNLESS(!ev->GetRecipientRewrite().IsService() && (targetNodeIndex == nodeIndex));
-
- if (!AllowSendFrom(node, ev)) {
- return;
- }
-
- ui32 mailboxHint = ev->GetRecipientRewrite().Hint();
- TEventMailBox& mbox = GetMailbox(nodeId, mailboxHint);
- if (!mbox.IsActive(TInstant::MicroSeconds(CurrentTimestamp))) {
- mbox.PushFront(ev);
- return;
- }
-
- ui64 recipientLocalId = ev->GetRecipientRewrite().LocalId();
- if ((BlockedOutput.find(ev->Sender) == BlockedOutput.end()) && VERBOSE) {
- Cerr << "Send event, ";
- PrintEvent(ev, this);
- }
-
- EvCounters[ev->GetTypeRewrite()]++;
-
- TMailboxHeader* mailbox = node->MailboxTable->Get(mailboxHint);
- IActor* recipientActor = mailbox->FindActor(recipientLocalId);
- if (recipientActor) {
- // Save actorId by value in order to prevent ctx from being invalidated during another Send call.
- TActorId actorId = ev->GetRecipientRewrite();
- node->ActorToActorId[recipientActor] = ev->GetRecipientRewrite();
- TActorContext ctx(*mailbox, *node->ExecutorThread, GetCycleCountFast(), actorId);
- TActivationContext *prevTlsActivationContext = TlsActivationContext;
- TlsActivationContext = &ctx;
- CurrentRecipient = actorId;
- {
- TInverseGuard<TMutex> inverseGuard(Mutex);
-#ifdef USE_ACTOR_CALLSTACK
- TCallstack::GetTlsCallstack() = ev->Callstack;
- TCallstack::GetTlsCallstack().SetLinesToSkip();
-#endif
- recipientActor->Receive(ev);
- node->ExecutorThread->DropUnregistered();
- }
- CurrentRecipient = TActorId();
- TlsActivationContext = prevTlsActivationContext;
- } else {
- if (VERBOSE) {
- Cerr << "Failed to find actor with local id: " << recipientLocalId << "\n";
- }
-
- auto fw = IEventHandle::ForwardOnNondelivery(ev, TEvents::TEvUndelivered::ReasonActorUnknown);
- node->ActorSystem->Send(fw);
- }
- }
-
- IActor* TTestActorRuntimeBase::FindActor(const TActorId& actorId, TNodeDataBase* node) const {
- ui32 mailboxHint = actorId.Hint();
- ui64 localId = actorId.LocalId();
- TMailboxHeader* mailbox = node->MailboxTable->Get(mailboxHint);
- IActor* actor = mailbox->FindActor(localId);
- return actor;
- }
-
- THolder<TActorSystemSetup> TTestActorRuntimeBase::MakeActorSystemSetup(ui32 nodeIndex, TNodeDataBase* node) {
- THolder<TActorSystemSetup> setup(new TActorSystemSetup);
- setup->NodeId = FirstNodeId + nodeIndex;
-
- if (UseRealThreads) {
- setup->ExecutorsCount = 5;
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[5]);
- setup->Executors[0].Reset(new TBasicExecutorPool(0, 2, 20));
- setup->Executors[1].Reset(new TBasicExecutorPool(1, 2, 20));
- setup->Executors[2].Reset(new TIOExecutorPool(2, 1));
- setup->Executors[3].Reset(new TBasicExecutorPool(3, 2, 20));
- setup->Executors[4].Reset(new TBasicExecutorPool(4, 1, 20));
- setup->Scheduler.Reset(new TBasicSchedulerThread(TSchedulerConfig(512, 100)));
- } else {
- setup->ExecutorsCount = 1;
- setup->Scheduler.Reset(new TSchedulerThreadStub(this, node));
- setup->Executors.Reset(new TAutoPtr<IExecutorPool>[1]);
- setup->Executors[0].Reset(new TExecutorPoolStub(this, nodeIndex, node, 0));
- }
-
- InitActorSystemSetup(*setup);
-
- return setup;
- }
-
- THolder<TActorSystem> TTestActorRuntimeBase::MakeActorSystem(ui32 nodeIndex, TNodeDataBase* node) {
- auto setup = MakeActorSystemSetup(nodeIndex, node);
-
- node->ExecutorPools.resize(setup->ExecutorsCount);
- for (ui32 i = 0; i < setup->ExecutorsCount; ++i) {
- node->ExecutorPools[i] = setup->Executors[i].Get();
- }
-
- const auto& interconnectCounters = GetCountersForComponent(node->DynamicCounters, "interconnect");
-
- for (const auto& cmd : node->LocalServices) {
- setup->LocalServices.emplace_back(cmd.first, TActorSetupCmd(cmd.second.Actor, cmd.second.MailboxType, cmd.second.PoolId));
- }
- setup->Interconnect.ProxyActors.resize(FirstNodeId + NodeCount);
- const TActorId nameserviceId = GetNameserviceActorId();
-
- TIntrusivePtr<TInterconnectProxyCommon> common;
- common.Reset(new TInterconnectProxyCommon);
- common->NameserviceId = nameserviceId;
- common->MonCounters = interconnectCounters;
- common->TechnicalSelfHostName = "::1";
-
- if (!UseRealThreads) {
- common->Settings.DeadPeer = TDuration::Max();
- common->Settings.CloseOnIdle = TDuration::Max();
- common->Settings.PingPeriod = TDuration::Max();
- common->Settings.ForceConfirmPeriod = TDuration::Max();
- common->Settings.Handshake = TDuration::Max();
- }
-
- common->ClusterUUID = ClusterUUID;
- common->AcceptUUID = {ClusterUUID};
-
- if (ICCommonSetupper) {
- ICCommonSetupper(nodeIndex, common);
- }
-
- for (ui32 proxyNodeIndex = 0; proxyNodeIndex < NodeCount; ++proxyNodeIndex) {
- if (proxyNodeIndex == nodeIndex)
- continue;
-
- const ui32 peerNodeId = FirstNodeId + proxyNodeIndex;
-
- IActor *proxyActor = UseRealInterconnect
- ? new TInterconnectProxyTCP(peerNodeId, common)
- : InterconnectMock.CreateProxyMock(setup->NodeId, peerNodeId, common);
-
- setup->Interconnect.ProxyActors[peerNodeId] = {proxyActor, TMailboxType::ReadAsFilled, InterconnectPoolId()};
- }
-
- setup->Interconnect.ProxyWrapperFactory = CreateProxyWrapperFactory(common, InterconnectPoolId(), &InterconnectMock);
-
- if (UseRealInterconnect) {
- setup->LocalServices.emplace_back(MakePollerActorId(), NActors::TActorSetupCmd(CreatePollerActor(),
- NActors::TMailboxType::Simple, InterconnectPoolId()));
- }
-
- if (!SingleSysEnv) { // Single system env should do this self
- TAutoPtr<TLogBackend> logBackend = LogBackend ? LogBackend : NActors::CreateStderrBackend();
- NActors::TLoggerActor *loggerActor = new NActors::TLoggerActor(node->LogSettings,
- logBackend, GetCountersForComponent(node->DynamicCounters, "utils"));
- NActors::TActorSetupCmd loggerActorCmd(loggerActor, NActors::TMailboxType::Simple, node->GetLoggerPoolId());
- std::pair<NActors::TActorId, NActors::TActorSetupCmd> loggerActorPair(node->LogSettings->LoggerActorId, std::move(loggerActorCmd));
- setup->LocalServices.push_back(std::move(loggerActorPair));
- }
-
- return THolder<TActorSystem>(new TActorSystem(setup, node->GetAppData(), node->LogSettings));
- }
-
- TActorSystem* TTestActorRuntimeBase::SingleSys() const {
- Y_ABORT_UNLESS(Nodes.size() == 1, "Works only for single system env");
-
- return Nodes.begin()->second->ActorSystem.Get();
- }
-
- TActorSystem* TTestActorRuntimeBase::GetAnyNodeActorSystem() {
- for (auto& x : Nodes) {
- return x.second->ActorSystem.Get();
- }
- Y_ABORT("Don't use this method.");
- }
-
- TActorSystem* TTestActorRuntimeBase::GetActorSystem(ui32 nodeId) {
- auto it = Nodes.find(GetNodeId(nodeId));
- Y_ABORT_UNLESS(it != Nodes.end());
- return it->second->ActorSystem.Get();
- }
-
-
- TEventMailBox& TTestActorRuntimeBase::GetMailbox(ui32 nodeId, ui32 hint) {
- TGuard<TMutex> guard(Mutex);
- auto mboxId = TEventMailboxId(nodeId, hint);
- auto it = Mailboxes.find(mboxId);
- if (it == Mailboxes.end()) {
- it = Mailboxes.insert(std::make_pair(mboxId, new TEventMailBox())).first;
- }
-
- return *it->second;
- }
-
- void TTestActorRuntimeBase::ClearMailbox(ui32 nodeId, ui32 hint) {
- TGuard<TMutex> guard(Mutex);
- auto mboxId = TEventMailboxId(nodeId, hint);
- Mailboxes.erase(mboxId);
- }
-
- TString TTestActorRuntimeBase::GetActorName(const TActorId& actorId) const {
- auto it = ActorNames.find(actorId);
- if (it != ActorNames.end())
- return it->second;
- return actorId.ToString();
- }
-
- struct TStrandingActorDecoratorContext : public TThrRefBase {
- TStrandingActorDecoratorContext()
- : Queue(new TQueueType)
- {
- }
-
- typedef TOneOneQueueInplace<IEventHandle*, 32> TQueueType;
- TAutoPtr<TQueueType, TQueueType::TPtrCleanDestructor> Queue;
- };
-
- class TStrandingActorDecorator : public TActorBootstrapped<TStrandingActorDecorator> {
- public:
- class TReplyActor : public TActor<TReplyActor> {
- public:
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::TEST_ACTOR_RUNTIME;
- }
-
- TReplyActor(TStrandingActorDecorator* owner)
- : TActor(&TReplyActor::StateFunc)
- , Owner(owner)
- {
- }
-
- STFUNC(StateFunc);
-
- private:
- TStrandingActorDecorator* const Owner;
- };
-
- static constexpr EActivityType ActorActivityType() {
- return EActivityType::TEST_ACTOR_RUNTIME;
- }
-
- TStrandingActorDecorator(const TActorId& delegatee, bool isSync, const TVector<TActorId>& additionalActors,
- TSimpleSharedPtr<TStrandingActorDecoratorContext> context, TTestActorRuntimeBase* runtime,
- TReplyCheckerCreator createReplyChecker)
- : Delegatee(delegatee)
- , IsSync(isSync)
- , AdditionalActors(additionalActors)
- , Context(context)
- , HasReply(false)
- , Runtime(runtime)
- , ReplyChecker(createReplyChecker())
- {
- if (IsSync) {
- Y_ABORT_UNLESS(!runtime->IsRealThreads());
- }
- }
-
- void Bootstrap(const TActorContext& ctx) {
- Become(&TStrandingActorDecorator::StateFunc);
- ReplyId = ctx.RegisterWithSameMailbox(new TReplyActor(this));
- DelegateeOptions.OnlyMailboxes.push_back(TEventMailboxId(Delegatee.NodeId(), Delegatee.Hint()));
- for (const auto& actor : AdditionalActors) {
- DelegateeOptions.OnlyMailboxes.push_back(TEventMailboxId(actor.NodeId(), actor.Hint()));
- }
-
- DelegateeOptions.OnlyMailboxes.push_back(TEventMailboxId(ReplyId.NodeId(), ReplyId.Hint()));
- DelegateeOptions.NonEmptyMailboxes.push_back(TEventMailboxId(ReplyId.NodeId(), ReplyId.Hint()));
- DelegateeOptions.Quiet = true;
- }
-
- STFUNC(StateFunc) {
- bool wasEmpty = !Context->Queue->Head();
- Context->Queue->Push(ev.Release());
- if (wasEmpty) {
- SendHead(ActorContext());
- }
- }
-
- STFUNC(Reply) {
- Y_ABORT_UNLESS(!HasReply);
- IEventHandle *requestEv = Context->Queue->Head();
- TActorId originalSender = requestEv->Sender;
- HasReply = !ReplyChecker->IsWaitingForMoreResponses(ev.Get());
- if (HasReply) {
- delete Context->Queue->Pop();
- }
- auto ctx(ActorContext());
- ctx.ExecutorThread.Send(IEventHandle::Forward(ev, originalSender));
- if (!IsSync && Context->Queue->Head()) {
- SendHead(ctx);
- }
- }
-
- private:
- void SendHead(const TActorContext& ctx) {
- if (!IsSync) {
- ctx.ExecutorThread.Send(GetForwardedEvent().Release());
- } else {
- while (Context->Queue->Head()) {
- HasReply = false;
- ctx.ExecutorThread.Send(GetForwardedEvent().Release());
- int count = 100;
- while (!HasReply && count > 0) {
- try {
- Runtime->DispatchEvents(DelegateeOptions);
- } catch (TEmptyEventQueueException&) {
- count--;
- Cerr << "No reply" << Endl;
- }
- }
-
- Runtime->UpdateCurrentTime(Runtime->GetCurrentTime() + TDuration::MicroSeconds(1000));
- }
- }
- }
-
- TAutoPtr<IEventHandle> GetForwardedEvent() {
- IEventHandle* ev = Context->Queue->Head();
- ReplyChecker->OnRequest(ev);
- TAutoPtr<IEventHandle> forwardedEv = ev->HasEvent()
- ? new IEventHandle(Delegatee, ReplyId, ev->ReleaseBase().Release(), ev->Flags, ev->Cookie)
- : new IEventHandle(ev->GetTypeRewrite(), ev->Flags, Delegatee, ReplyId, ev->ReleaseChainBuffer(), ev->Cookie);
-
- return forwardedEv;
- }
- private:
- const TActorId Delegatee;
- const bool IsSync;
- const TVector<TActorId> AdditionalActors;
- TSimpleSharedPtr<TStrandingActorDecoratorContext> Context;
- TActorId ReplyId;
- bool HasReply;
- TDispatchOptions DelegateeOptions;
- TTestActorRuntimeBase* Runtime;
- THolder<IReplyChecker> ReplyChecker;
- };
-
- void TStrandingActorDecorator::TReplyActor::StateFunc(STFUNC_SIG) {
- Owner->Reply(ev);
- }
-
- class TStrandingDecoratorFactory : public IStrandingDecoratorFactory {
- public:
- TStrandingDecoratorFactory(TTestActorRuntimeBase* runtime,
- TReplyCheckerCreator createReplyChecker)
- : Context(new TStrandingActorDecoratorContext())
- , Runtime(runtime)
- , CreateReplyChecker(createReplyChecker)
- {
- }
-
- IActor* Wrap(const TActorId& delegatee, bool isSync, const TVector<TActorId>& additionalActors) override {
- return new TStrandingActorDecorator(delegatee, isSync, additionalActors, Context, Runtime,
- CreateReplyChecker);
- }
-
- private:
- TSimpleSharedPtr<TStrandingActorDecoratorContext> Context;
- TTestActorRuntimeBase* Runtime;
- TReplyCheckerCreator CreateReplyChecker;
- };
-
- TAutoPtr<IStrandingDecoratorFactory> CreateStrandingDecoratorFactory(TTestActorRuntimeBase* runtime,
- TReplyCheckerCreator createReplyChecker) {
- return TAutoPtr<IStrandingDecoratorFactory>(new TStrandingDecoratorFactory(runtime, createReplyChecker));
- }
-
- ui64 DefaultRandomSeed = 9999;
-}
diff --git a/library/cpp/actors/testlib/test_runtime.h b/library/cpp/actors/testlib/test_runtime.h
deleted file mode 100644
index f46d3906e4..0000000000
--- a/library/cpp/actors/testlib/test_runtime.h
+++ /dev/null
@@ -1,814 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/actorsystem.h>
-#include <library/cpp/actors/core/log.h>
-#include <library/cpp/actors/core/events.h>
-#include <library/cpp/actors/core/executor_thread.h>
-#include <library/cpp/actors/core/mailbox.h>
-#include <library/cpp/actors/core/monotonic_provider.h>
-#include <library/cpp/actors/util/should_continue.h>
-#include <library/cpp/actors/interconnect/poller_tcp.h>
-#include <library/cpp/actors/interconnect/mock/ic_mock.h>
-#include <library/cpp/random_provider/random_provider.h>
-#include <library/cpp/time_provider/time_provider.h>
-#include <library/cpp/testing/unittest/tests_data.h>
-
-#include <util/datetime/base.h>
-#include <util/folder/tempdir.h>
-#include <util/generic/deque.h>
-#include <util/generic/hash.h>
-#include <util/generic/noncopyable.h>
-#include <util/generic/ptr.h>
-#include <util/generic/queue.h>
-#include <util/generic/set.h>
-#include <util/generic/vector.h>
-#include <util/system/defaults.h>
-#include <util/system/mutex.h>
-#include <util/system/condvar.h>
-#include <util/system/thread.h>
-#include <util/system/sanitizers.h>
-#include <util/system/valgrind.h>
-#include <utility>
-
-#include <functional>
-
-const TDuration DEFAULT_DISPATCH_TIMEOUT = NSan::PlainOrUnderSanitizer(
- NValgrind::PlainOrUnderValgrind(TDuration::Seconds(60), TDuration::Seconds(120)),
- TDuration::Seconds(120)
-);
-
-
-namespace NActors {
- struct THeSingleSystemEnv { };
-
- struct TTestActorSetupCmd { // like TActorSetupCmd, but not owning the Actor
- TTestActorSetupCmd(IActor* actor, TMailboxType::EType mailboxType, ui32 poolId)
- : MailboxType(mailboxType)
- , PoolId(poolId)
- , Actor(actor)
- {
- }
-
- TTestActorSetupCmd(TActorSetupCmd cmd)
- : MailboxType(cmd.MailboxType)
- , PoolId(cmd.PoolId)
- , Actor(cmd.Actor.release())
- {
- }
-
- TMailboxType::EType MailboxType;
- ui32 PoolId;
- IActor* Actor;
- };
-
- struct TEventMailboxId {
- TEventMailboxId()
- : NodeId(0)
- , Hint(0)
- {
- }
-
- TEventMailboxId(ui32 nodeId, ui32 hint)
- : NodeId(nodeId)
- , Hint(hint)
- {
- }
-
- bool operator<(const TEventMailboxId& other) const {
- return (NodeId < other.NodeId) || (NodeId == other.NodeId) && (Hint < other.Hint);
- }
-
- bool operator==(const TEventMailboxId& other) const {
- return (NodeId == other.NodeId) && (Hint == other.Hint);
- }
-
- struct THash {
- ui64 operator()(const TEventMailboxId& mboxId) const noexcept {
- return mboxId.NodeId * 31ULL + mboxId.Hint;
- }
- };
-
- ui32 NodeId;
- ui32 Hint;
- };
-
- struct TDispatchOptions {
- struct TFinalEventCondition {
- std::function<bool(IEventHandle& ev)> EventCheck;
- ui32 RequiredCount;
-
- TFinalEventCondition(ui32 eventType, ui32 requiredCount = 1)
- : EventCheck([eventType](IEventHandle& ev) -> bool { return ev.GetTypeRewrite() == eventType; })
- , RequiredCount(requiredCount)
- {
- }
-
- TFinalEventCondition(std::function<bool(IEventHandle& ev)> eventCheck, ui32 requiredCount = 1)
- : EventCheck(eventCheck)
- , RequiredCount(requiredCount)
- {
- }
- };
-
- TVector<TFinalEventCondition> FinalEvents;
- TVector<TEventMailboxId> NonEmptyMailboxes;
- TVector<TEventMailboxId> OnlyMailboxes;
- std::function<bool()> CustomFinalCondition;
- bool Quiet = false;
- };
-
- struct TScheduledEventQueueItem {
- TInstant Deadline;
- TAutoPtr<IEventHandle> Event;
- TAutoPtr<TSchedulerCookieHolder> Cookie;
- ui64 UniqueId;
-
- TScheduledEventQueueItem(TInstant deadline, TAutoPtr<IEventHandle> event, ISchedulerCookie* cookie)
- : Deadline(deadline)
- , Event(event)
- , Cookie(new TSchedulerCookieHolder(cookie))
- , UniqueId(++NextUniqueId)
- {}
-
- bool operator<(const TScheduledEventQueueItem& other) const {
- if (Deadline < other.Deadline)
- return true;
-
- if (Deadline > other.Deadline)
- return false;
-
- return UniqueId < other.UniqueId;
- }
-
- static ui64 NextUniqueId;
- };
-
- typedef TDeque<TAutoPtr<IEventHandle>> TEventsList;
- typedef TSet<TScheduledEventQueueItem> TScheduledEventsList;
-
- class TEventMailBox : public TThrRefBase {
- public:
- TEventMailBox()
- : InactiveUntil(TInstant::MicroSeconds(0))
-#ifdef DEBUG_ORDER_EVENTS
- , ExpectedReceive(0)
- , NextToSend(0)
-#endif
- {
- }
-
- void Send(TAutoPtr<IEventHandle> ev);
- bool IsEmpty() const;
- TAutoPtr<IEventHandle> Pop();
- void Capture(TEventsList& evList);
- void PushFront(TAutoPtr<IEventHandle>& ev);
- void PushFront(TEventsList& evList);
- void CaptureScheduled(TScheduledEventsList& evList);
- void PushScheduled(TScheduledEventsList& evList);
- bool IsActive(const TInstant& currentTime) const;
- void Freeze(const TInstant& deadline);
- TInstant GetInactiveUntil() const;
- void Schedule(const TScheduledEventQueueItem& item);
- bool IsScheduledEmpty() const;
- TInstant GetFirstScheduleDeadline() const;
- ui64 GetSentEventCount() const;
-
- private:
- TScheduledEventsList Scheduled;
- TInstant InactiveUntil;
- TEventsList Sent;
-#ifdef DEBUG_ORDER_EVENTS
- TMap<IEventHandle*, ui64> TrackSent;
- ui64 ExpectedReceive;
- ui64 NextToSend;
-#endif
- };
-
- typedef THashMap<TEventMailboxId, TIntrusivePtr<TEventMailBox>, TEventMailboxId::THash> TEventMailBoxList;
-
- class TEmptyEventQueueException : public yexception {
- public:
- TEmptyEventQueueException() {
- Append("Event queue is still empty.");
- }
- };
-
- class TSchedulingLimitReachedException : public yexception {
- public:
- TSchedulingLimitReachedException(ui64 limit) {
- TStringStream str;
- str << "TestActorRuntime Processed over " << limit << " events.";
- Append(str.Str());
- }
- };
-
- class TTestActorRuntimeBase: public TNonCopyable {
- public:
- class TEdgeActor;
- class TSchedulerThreadStub;
- class TExecutorPoolStub;
- class TTimeProvider;
- class TMonotonicTimeProvider;
-
- enum class EEventAction {
- PROCESS,
- DROP,
- RESCHEDULE
- };
-
- typedef std::function<EEventAction(TAutoPtr<IEventHandle>& event)> TEventObserver;
- typedef std::function<void(TTestActorRuntimeBase& runtime, TScheduledEventsList& scheduledEvents, TEventsList& queue)> TScheduledEventsSelector;
- typedef std::function<bool(TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event)> TEventFilter;
- typedef std::function<bool(TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event, TDuration delay, TInstant& deadline)> TScheduledEventFilter;
- typedef std::function<void(TTestActorRuntimeBase& runtime, const TActorId& parentId, const TActorId& actorId)> TRegistrationObserver;
-
-
- TTestActorRuntimeBase(THeSingleSystemEnv);
- TTestActorRuntimeBase(ui32 nodeCount, ui32 dataCenterCount, bool UseRealThreads);
- TTestActorRuntimeBase(ui32 nodeCount, ui32 dataCenterCount);
- TTestActorRuntimeBase(ui32 nodeCount = 1, bool useRealThreads = false);
- virtual ~TTestActorRuntimeBase();
- bool IsRealThreads() const;
- static EEventAction DefaultObserverFunc(TAutoPtr<IEventHandle>& event);
- static void DroppingScheduledEventsSelector(TTestActorRuntimeBase& runtime, TScheduledEventsList& scheduledEvents, TEventsList& queue);
- static void CollapsedTimeScheduledEventsSelector(TTestActorRuntimeBase& runtime, TScheduledEventsList& scheduledEvents, TEventsList& queue);
- static bool DefaultFilterFunc(TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event);
- static bool NopFilterFunc(TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event, TDuration delay, TInstant& deadline);
- static void DefaultRegistrationObserver(TTestActorRuntimeBase& runtime, const TActorId& parentId, const TActorId& actorId);
- TEventObserver SetObserverFunc(TEventObserver observerFunc); // deprecated, use AddObserver
- TScheduledEventsSelector SetScheduledEventsSelectorFunc(TScheduledEventsSelector scheduledEventsSelectorFunc);
- TEventFilter SetEventFilter(TEventFilter filterFunc);
- TScheduledEventFilter SetScheduledEventFilter(TScheduledEventFilter filterFunc);
- TRegistrationObserver SetRegistrationObserverFunc(TRegistrationObserver observerFunc);
- static bool IsVerbose();
- static void SetVerbose(bool verbose);
- TDuration SetDispatchTimeout(TDuration timeout);
- void SetDispatchedEventsLimit(ui64 limit) {
- DispatchedEventsLimit = limit;
- }
- TDuration SetReschedulingDelay(TDuration delay);
- void SetLogBackend(const TAutoPtr<TLogBackend> logBackend);
- void SetLogPriority(NActors::NLog::EComponent component, NActors::NLog::EPriority priority);
- TIntrusivePtr<ITimeProvider> GetTimeProvider();
- TIntrusivePtr<IMonotonicTimeProvider> GetMonotonicTimeProvider();
- TInstant GetCurrentTime() const;
- TMonotonic GetCurrentMonotonicTime() const;
- void UpdateCurrentTime(TInstant newTime);
- void AdvanceCurrentTime(TDuration duration);
- void AddLocalService(const TActorId& actorId, TActorSetupCmd cmd, ui32 nodeIndex = 0);
- virtual void Initialize();
- ui32 GetNodeId(ui32 index = 0) const;
- ui32 GetNodeCount() const;
- ui64 AllocateLocalId();
- ui32 InterconnectPoolId() const;
- TString GetTempDir();
- TActorId Register(IActor* actor, ui32 nodeIndex = 0, ui32 poolId = 0,
- TMailboxType::EType mailboxType = TMailboxType::Simple, ui64 revolvingCounter = 0,
- const TActorId& parentid = TActorId());
- TActorId Register(IActor *actor, ui32 nodeIndex, ui32 poolId, TMailboxHeader *mailbox, ui32 hint,
- const TActorId& parentid = TActorId());
- TActorId RegisterService(const TActorId& serviceId, const TActorId& actorId, ui32 nodeIndex = 0);
- TActorId AllocateEdgeActor(ui32 nodeIndex = 0);
- TEventsList CaptureEvents();
- TEventsList CaptureMailboxEvents(ui32 hint, ui32 nodeId);
- TScheduledEventsList CaptureScheduledEvents();
- void PushFront(TAutoPtr<IEventHandle>& ev);
- void PushEventsFront(TEventsList& events);
- void PushMailboxEventsFront(ui32 hint, ui32 nodeId, TEventsList& events);
- // doesn't dispatch events for edge actors
- bool DispatchEvents(const TDispatchOptions& options = TDispatchOptions());
- bool DispatchEvents(const TDispatchOptions& options, TDuration simTimeout);
- bool DispatchEvents(const TDispatchOptions& options, TInstant simDeadline);
- void Send(const TActorId& recipient, const TActorId& sender, TAutoPtr<IEventBase> ev, ui32 senderNodeIndex = 0, bool viaActorSystem = false);
- void Send(TAutoPtr<IEventHandle> ev, ui32 senderNodeIndex = 0, bool viaActorSystem = false);
- void SendAsync(TAutoPtr<IEventHandle> ev, ui32 senderNodeIndex = 0);
- void Schedule(TAutoPtr<IEventHandle> ev, const TDuration& duration, ui32 nodeIndex = 0);
- void ClearCounters();
- ui64 GetCounter(ui32 evType) const;
- TActorId GetLocalServiceId(const TActorId& serviceId, ui32 nodeIndex = 0);
- void WaitForEdgeEvents(TEventFilter filter, const TSet<TActorId>& edgeFilter = {}, TDuration simTimeout = TDuration::Max());
- TActorId GetInterconnectProxy(ui32 nodeIndexFrom, ui32 nodeIndexTo);
- void BlockOutputForActor(const TActorId& actorId);
- IActor* FindActor(const TActorId& actorId, ui32 nodeIndex = Max<ui32>()) const;
- void EnableScheduleForActor(const TActorId& actorId, bool allow = true);
- bool IsScheduleForActorEnabled(const TActorId& actorId) const;
- TIntrusivePtr<NMonitoring::TDynamicCounters> GetDynamicCounters(ui32 nodeIndex = 0);
- void SetupMonitoring();
-
- using TEventObserverCollection = std::list<std::function<void(TAutoPtr<IEventHandle>& event)>>;
- class TEventObserverHolder {
- public:
- TEventObserverHolder(TEventObserverCollection& list, TEventObserverCollection::iterator&& iter)
- : List(list)
- , Iter(iter)
- {
- }
-
- ~TEventObserverHolder()
- {
- Remove();
- }
-
- void Remove()
- {
- if (Iter == List.end()) {
- return;
- }
-
- List.erase(Iter);
- Iter = List.end();
- }
- private:
- TEventObserverCollection& List;
- TEventObserverCollection::iterator Iter;
- };
-
- // An example of using AddObserver in unit tests
- /*
- auto observerHolder = runtime.AddObserver<TEvDataShard::TEvRead>([&](TEvDataShard::TEvRead::TPtr& event) {
- // Do something with the event inside the calback
- Cout << "An event is observed " << ev->Get()->Record.ShortDebugString() << Endl;
-
- // Optionally reset the event, all subsequent handlers of this event will not be called
- event.Reset();
- });
-
- // Do something inside the main code of the unit test
-
- // Optionally remove the observer, otherwise it will be destroyed in its destructor
- observerHolder.Remove();
- */
-
- template <typename TEvType>
- TEventObserverHolder AddObserver(std::function<void(typename TEvType::TPtr&)> observerFunc)
- {
- auto baseFunc = [observerFunc](TAutoPtr<IEventHandle>& event) {
- if (event && event->GetTypeRewrite() == TEvType::EventType)
- observerFunc(*(reinterpret_cast<typename TEvType::TPtr*>(&event)));
- };
-
- auto iter = ObserverFuncs.insert(ObserverFuncs.end(), baseFunc);
- return TEventObserverHolder(ObserverFuncs, std::move(iter));
- }
-
- TEventObserverHolder AddObserver(std::function<void(TAutoPtr<IEventHandle>&)> observerFunc)
- {
- auto iter = ObserverFuncs.insert(ObserverFuncs.end(), observerFunc);
- return TEventObserverHolder(ObserverFuncs, std::move(iter));
- }
-
- template<typename T>
- void AppendToLogSettings(NLog::EComponent minVal, NLog::EComponent maxVal, T func) {
- Y_ABORT_UNLESS(!IsInitialized);
-
- for (const auto& pair : Nodes) {
- pair.second->LogSettings->Append(minVal, maxVal, func);
- }
- }
-
- TIntrusivePtr<NLog::TSettings> GetLogSettings(ui32 nodeIdx)
- {
- return Nodes[FirstNodeId + nodeIdx]->LogSettings;
- }
-
- TActorSystem* SingleSys() const;
- TActorSystem* GetAnyNodeActorSystem();
- TActorSystem* GetActorSystem(ui32 nodeId);
- template <typename TEvent>
- TEvent* GrabEdgeEventIf(TAutoPtr<IEventHandle>& handle, std::function<bool(const TEvent&)> predicate, TDuration simTimeout = TDuration::Max()) {
- handle.Destroy();
- const ui32 eventType = TEvent::EventType;
- WaitForEdgeEvents([&](TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event) {
- Y_UNUSED(runtime);
- if (event->GetTypeRewrite() != eventType)
- return false;
-
- TEvent* typedEvent = event->Get<TEvent>();
- if (predicate(*typedEvent)) {
- handle = event;
- return true;
- }
-
- return false;
- }, {}, simTimeout);
-
- if (simTimeout == TDuration::Max())
- Y_ABORT_UNLESS(handle);
-
- if (handle) {
- return handle->Get<TEvent>();
- } else {
- return nullptr;
- }
- }
-
- template<class TEvent>
- typename TEvent::TPtr GrabEdgeEventIf(
- const TSet<TActorId>& edgeFilter,
- const std::function<bool(const typename TEvent::TPtr&)>& predicate,
- TDuration simTimeout = TDuration::Max())
- {
- typename TEvent::TPtr handle;
- const ui32 eventType = TEvent::EventType;
- WaitForEdgeEvents([&](TTestActorRuntimeBase& runtime, TAutoPtr<IEventHandle>& event) {
- Y_UNUSED(runtime);
- if (event->GetTypeRewrite() != eventType)
- return false;
-
- typename TEvent::TPtr* typedEvent = reinterpret_cast<typename TEvent::TPtr*>(&event);
- if (predicate(*typedEvent)) {
- handle = *typedEvent;
- return true;
- }
-
- return false;
- }, edgeFilter, simTimeout);
-
- if (simTimeout == TDuration::Max())
- Y_ABORT_UNLESS(handle);
-
- return handle;
- }
-
- template<class TEvent>
- typename TEvent::TPtr GrabEdgeEventIf(
- const TActorId& edgeActor,
- const std::function<bool(const typename TEvent::TPtr&)>& predicate,
- TDuration simTimeout = TDuration::Max())
- {
- TSet<TActorId> edgeFilter{edgeActor};
- return GrabEdgeEventIf<TEvent>(edgeFilter, predicate, simTimeout);
- }
-
- template <typename TEvent>
- TEvent* GrabEdgeEvent(TAutoPtr<IEventHandle>& handle, TDuration simTimeout = TDuration::Max()) {
- std::function<bool(const TEvent&)> truth = [](const TEvent&) { return true; };
- return GrabEdgeEventIf(handle, truth, simTimeout);
- }
-
- template <typename TEvent>
- THolder<TEvent> GrabEdgeEvent(TDuration simTimeout = TDuration::Max()) {
- TAutoPtr<IEventHandle> handle;
- std::function<bool(const TEvent&)> truth = [](const TEvent&) { return true; };
- GrabEdgeEventIf(handle, truth, simTimeout);
- if (handle) {
- return THolder<TEvent>(handle->Release<TEvent>());
- }
- return {};
- }
-
- template<class TEvent>
- typename TEvent::TPtr GrabEdgeEvent(const TSet<TActorId>& edgeFilter, TDuration simTimeout = TDuration::Max()) {
- return GrabEdgeEventIf<TEvent>(edgeFilter, [](const typename TEvent::TPtr&) { return true; }, simTimeout);
- }
-
- template<class TEvent>
- typename TEvent::TPtr GrabEdgeEvent(const TActorId& edgeActor, TDuration simTimeout = TDuration::Max()) {
- TSet<TActorId> edgeFilter{edgeActor};
- return GrabEdgeEvent<TEvent>(edgeFilter, simTimeout);
- }
-
- // replace with std::variant<>
- template <typename... TEvents>
- std::tuple<TEvents*...> GrabEdgeEvents(TAutoPtr<IEventHandle>& handle, TDuration simTimeout = TDuration::Max()) {
- handle.Destroy();
- auto eventTypes = { TEvents::EventType... };
- WaitForEdgeEvents([&](TTestActorRuntimeBase&, TAutoPtr<IEventHandle>& event) {
- if (std::find(std::begin(eventTypes), std::end(eventTypes), event->GetTypeRewrite()) == std::end(eventTypes))
- return false;
- handle = event;
- return true;
- }, {}, simTimeout);
- if (simTimeout == TDuration::Max())
- Y_ABORT_UNLESS(handle);
- if (handle) {
- return std::make_tuple(handle->Type == TEvents::EventType
- ? handle->Get<TEvents>()
- : static_cast<TEvents*>(nullptr)...);
- }
- return {};
- }
-
- template <typename TEvent>
- TEvent* GrabEdgeEventRethrow(TAutoPtr<IEventHandle>& handle, TDuration simTimeout = TDuration::Max()) {
- try {
- return GrabEdgeEvent<TEvent>(handle, simTimeout);
- } catch (...) {
- ythrow TWithBackTrace<yexception>() << "Exception occured while waiting for " << TypeName<TEvent>() << ": " << CurrentExceptionMessage();
- }
- }
-
- template<class TEvent>
- typename TEvent::TPtr GrabEdgeEventRethrow(const TSet<TActorId>& edgeFilter, TDuration simTimeout = TDuration::Max()) {
- try {
- return GrabEdgeEvent<TEvent>(edgeFilter, simTimeout);
- } catch (...) {
- ythrow TWithBackTrace<yexception>() << "Exception occured while waiting for " << TypeName<TEvent>() << ": " << CurrentExceptionMessage();
- }
- }
-
- template<class TEvent>
- typename TEvent::TPtr GrabEdgeEventRethrow(const TActorId& edgeActor, TDuration simTimeout = TDuration::Max()) {
- try {
- return GrabEdgeEvent<TEvent>(edgeActor, simTimeout);
- } catch (...) {
- ythrow TWithBackTrace<yexception>() << "Exception occured while waiting for " << TypeName<TEvent>() << ": " << CurrentExceptionMessage();
- }
- }
-
- template <typename... TEvents>
- static TString TypeNames() {
- static TString names[] = { TypeName<TEvents>()... };
- TString result;
- for (const TString& s : names) {
- if (result.empty()) {
- result += '<';
- } else {
- result += ',';
- }
- result += s;
- }
- if (!result.empty()) {
- result += '>';
- }
- return result;
- }
-
- template <typename... TEvents>
- std::tuple<TEvents*...> GrabEdgeEventsRethrow(TAutoPtr<IEventHandle>& handle, TDuration simTimeout = TDuration::Max()) {
- try {
- return GrabEdgeEvents<TEvents...>(handle, simTimeout);
- } catch (...) {
- ythrow TWithBackTrace<yexception>() << "Exception occured while waiting for " << TypeNames<TEvents...>() << ": " << CurrentExceptionMessage();
- }
- }
-
- void ResetScheduledCount() {
- ScheduledCount = 0;
- }
-
- void SetScheduledLimit(ui64 limit) {
- ScheduledLimit = limit;
- }
-
- void SetDispatcherRandomSeed(TInstant time, ui64 iteration);
- TString GetActorName(const TActorId& actorId) const;
-
- const TVector<ui64>& GetTxAllocatorTabletIds() const { return TxAllocatorTabletIds; }
- void SetTxAllocatorTabletIds(const TVector<ui64>& ids) { TxAllocatorTabletIds = ids; }
-
- void SetUseRealInterconnect() {
- UseRealInterconnect = true;
- }
-
- void SetICCommonSetupper(std::function<void(ui32, TIntrusivePtr<TInterconnectProxyCommon>)>&& icCommonSetupper) {
- ICCommonSetupper = std::move(icCommonSetupper);
- }
-
- protected:
- struct TNodeDataBase;
- TNodeDataBase* GetRawNode(ui32 node) const {
- return Nodes.at(FirstNodeId + node).Get();
- }
-
- static IExecutorPool* CreateExecutorPoolStub(TTestActorRuntimeBase* runtime, ui32 nodeIndex, TNodeDataBase* node, ui32 poolId);
- virtual TIntrusivePtr<NMonitoring::TDynamicCounters> GetCountersForComponent(TIntrusivePtr<NMonitoring::TDynamicCounters> counters, const char* component) {
- Y_UNUSED(counters);
- Y_UNUSED(component);
-
- // do nothing, just return the existing counters
- return counters;
- }
-
- THolder<TActorSystemSetup> MakeActorSystemSetup(ui32 nodeIndex, TNodeDataBase* node);
- THolder<TActorSystem> MakeActorSystem(ui32 nodeIndex, TNodeDataBase* node);
- virtual void InitActorSystemSetup(TActorSystemSetup& setup) {
- Y_UNUSED(setup);
- }
-
- private:
- IActor* FindActor(const TActorId& actorId, TNodeDataBase* node) const;
- void SendInternal(TAutoPtr<IEventHandle> ev, ui32 nodeIndex, bool viaActorSystem);
- TEventMailBox& GetMailbox(ui32 nodeId, ui32 hint);
- void ClearMailbox(ui32 nodeId, ui32 hint);
- void HandleNonEmptyMailboxesForEachContext(TEventMailboxId mboxId);
- void UpdateFinalEventsStatsForEachContext(IEventHandle& ev);
- bool DispatchEventsInternal(const TDispatchOptions& options, TInstant simDeadline);
-
- private:
- ui64 ScheduledCount;
- ui64 ScheduledLimit;
- THolder<TTempDir> TmpDir;
- const TThread::TId MainThreadId;
-
- protected:
- bool UseRealInterconnect = false;
- TInterconnectMock InterconnectMock;
- bool IsInitialized = false;
- bool SingleSysEnv = false;
- const TString ClusterUUID;
- const ui32 FirstNodeId;
- const ui32 NodeCount;
- const ui32 DataCenterCount;
- const bool UseRealThreads;
- std::function<void(ui32, TIntrusivePtr<TInterconnectProxyCommon>)> ICCommonSetupper;
-
- ui64 LocalId;
- TMutex Mutex;
- TCondVar MailboxesHasEvents;
- TEventMailBoxList Mailboxes;
- TMap<ui32, ui64> EvCounters;
- ui64 DispatchCyclesCount;
- ui64 DispatchedEventsCount;
- ui64 DispatchedEventsLimit = 2'500'000;
- TActorId CurrentRecipient;
- ui64 DispatcherRandomSeed;
- TIntrusivePtr<IRandomProvider> DispatcherRandomProvider;
- TAutoPtr<TLogBackend> LogBackend;
- bool NeedMonitoring;
-
- TIntrusivePtr<IRandomProvider> RandomProvider;
- TIntrusivePtr<ITimeProvider> TimeProvider;
- TIntrusivePtr<IMonotonicTimeProvider> MonotonicTimeProvider;
-
- protected:
- struct TNodeDataBase: public TThrRefBase {
- TNodeDataBase();
- void Stop();
- virtual ~TNodeDataBase();
- virtual ui64 GetLoggerPoolId() const {
- return 0;
- }
-
- template <typename T = void>
- T* GetAppData() {
- return static_cast<T*>(AppData0.get());
- }
-
- template <typename T = void>
- const T* GetAppData() const {
- return static_cast<T*>(AppData0.get());
- }
-
- TIntrusivePtr<NMonitoring::TDynamicCounters> DynamicCounters;
- TIntrusivePtr<NActors::NLog::TSettings> LogSettings;
- TIntrusivePtr<NInterconnect::TPollerThreads> Poller;
- volatile ui64* ActorSystemTimestamp;
- volatile ui64* ActorSystemMonotonic;
- TVector<std::pair<TActorId, TTestActorSetupCmd>> LocalServices;
- TMap<TActorId, IActor*> LocalServicesActors;
- TMap<IActor*, TActorId> ActorToActorId;
- THolder<TMailboxTable> MailboxTable;
- std::shared_ptr<void> AppData0;
- THolder<TActorSystem> ActorSystem;
- THolder<IExecutorPool> SchedulerPool;
- TVector<IExecutorPool*> ExecutorPools;
- THolder<TExecutorThread> ExecutorThread;
- };
-
- struct INodeFactory {
- virtual ~INodeFactory() = default;
- virtual TIntrusivePtr<TNodeDataBase> CreateNode() = 0;
- };
-
- struct TDefaultNodeFactory final: INodeFactory {
- virtual TIntrusivePtr<TNodeDataBase> CreateNode() override {
- return new TNodeDataBase();
- }
- };
-
- INodeFactory& GetNodeFactory() {
- return *NodeFactory;
- }
-
- virtual TNodeDataBase* GetNodeById(size_t idx) {
- return Nodes[idx].Get();
- }
-
- void InitNodes();
- void CleanupNodes();
- virtual void InitNodeImpl(TNodeDataBase*, size_t);
-
- static bool AllowSendFrom(TNodeDataBase* node, TAutoPtr<IEventHandle>& ev);
-
- protected:
- THolder<INodeFactory> NodeFactory{new TDefaultNodeFactory};
-
- private:
- void InitNode(TNodeDataBase* node, size_t idx);
-
- struct TDispatchContext {
- const TDispatchOptions* Options;
- TDispatchContext* PrevContext;
-
- TMap<const TDispatchOptions::TFinalEventCondition*, ui32> FinalEventFrequency;
- TSet<TEventMailboxId> FoundNonEmptyMailboxes;
- bool FinalEventFound = false;
- };
-
- TProgramShouldContinue ShouldContinue;
- TMap<ui32, TIntrusivePtr<TNodeDataBase>> Nodes;
- ui64 CurrentTimestamp;
- TSet<TActorId> EdgeActors;
- THashMap<TEventMailboxId, TActorId, TEventMailboxId::THash> EdgeActorByMailbox;
- TDuration DispatchTimeout;
- TDuration ReschedulingDelay;
- TEventObserver ObserverFunc;
- TEventObserverCollection ObserverFuncs;
- TScheduledEventsSelector ScheduledEventsSelectorFunc;
- TEventFilter EventFilterFunc;
- TScheduledEventFilter ScheduledEventFilterFunc;
- TRegistrationObserver RegistrationObserver;
- TSet<TActorId> BlockedOutput;
- TSet<TActorId> ScheduleWhiteList;
- THashMap<TActorId, TActorId> ScheduleWhiteListParent;
- THashMap<TActorId, TString> ActorNames;
- TDispatchContext* CurrentDispatchContext;
- TVector<ui64> TxAllocatorTabletIds;
-
- static ui32 NextNodeId;
- };
-
- template <typename TEvent>
- TEvent* FindEvent(TEventsList& events) {
- for (auto& event : events) {
- if (event && event->GetTypeRewrite() == TEvent::EventType) {
- return event->CastAsLocal<TEvent>();
- }
- }
-
- return nullptr;
- }
-
- template <typename TEvent>
- TEvent* FindEvent(TEventsList& events, const std::function<bool(const TEvent&)>& predicate) {
- for (auto& event : events) {
- if (event && event->GetTypeRewrite() == TEvent::EventType && predicate(*event->CastAsLocal<TEvent>())) {
- return event->CastAsLocal<TEvent>();
- }
- }
-
- return nullptr;
- }
-
- template <typename TEvent>
- TEvent* GrabEvent(TEventsList& events, TAutoPtr<IEventHandle>& ev) {
- ev.Destroy();
- for (auto& event : events) {
- if (event && event->GetTypeRewrite() == TEvent::EventType) {
- ev = event;
- return ev->CastAsLocal<TEvent>();
- }
- }
-
- return nullptr;
- }
-
- template <typename TEvent>
- TEvent* GrabEvent(TEventsList& events, TAutoPtr<IEventHandle>& ev,
- const std::function<bool(const typename TEvent::TPtr&)>& predicate) {
- ev.Destroy();
- for (auto& event : events) {
- if (event && event->GetTypeRewrite() == TEvent::EventType) {
- if (predicate(reinterpret_cast<const typename TEvent::TPtr&>(event))) {
- ev = event;
- return ev->CastAsLocal<TEvent>();
- }
- }
- }
-
- return nullptr;
- }
-
- class IStrandingDecoratorFactory {
- public:
- virtual ~IStrandingDecoratorFactory() {}
- virtual IActor* Wrap(const TActorId& delegatee, bool isSync, const TVector<TActorId>& additionalActors) = 0;
- };
-
- struct IReplyChecker {
- virtual ~IReplyChecker() {}
- virtual void OnRequest(IEventHandle *request) = 0;
- virtual bool IsWaitingForMoreResponses(IEventHandle *response) = 0;
- };
-
- struct TNoneReplyChecker : IReplyChecker {
- void OnRequest(IEventHandle*) override {
- }
-
- bool IsWaitingForMoreResponses(IEventHandle*) override {
- return false;
- }
- };
-
- using TReplyCheckerCreator = std::function<THolder<IReplyChecker>(void)>;
-
- inline THolder<IReplyChecker> CreateNoneReplyChecker() {
- return MakeHolder<TNoneReplyChecker>();
- }
-
- TAutoPtr<IStrandingDecoratorFactory> CreateStrandingDecoratorFactory(TTestActorRuntimeBase* runtime,
- TReplyCheckerCreator createReplyChecker = CreateNoneReplyChecker);
- extern ui64 DefaultRandomSeed;
-}
diff --git a/library/cpp/actors/testlib/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/testlib/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index ae1df12ed6..0000000000
--- a/library/cpp/actors/testlib/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-testlib-ut)
-target_include_directories(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib
-)
-target_link_libraries(library-cpp-actors-testlib-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-testlib-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/decorator_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-testlib-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-testlib-ut
- TEST_TARGET
- library-cpp-actors-testlib-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-testlib-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-testlib-ut)
diff --git a/library/cpp/actors/testlib/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/testlib/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 03d106c63f..0000000000
--- a/library/cpp/actors/testlib/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-testlib-ut)
-target_include_directories(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib
-)
-target_link_libraries(library-cpp-actors-testlib-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-testlib-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/decorator_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-testlib-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-testlib-ut
- TEST_TARGET
- library-cpp-actors-testlib-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-testlib-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-testlib-ut)
diff --git a/library/cpp/actors/testlib/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/testlib/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 598b6e53de..0000000000
--- a/library/cpp/actors/testlib/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-testlib-ut)
-target_include_directories(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib
-)
-target_link_libraries(library-cpp-actors-testlib-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-testlib-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/decorator_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-testlib-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-testlib-ut
- TEST_TARGET
- library-cpp-actors-testlib-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-testlib-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-testlib-ut)
diff --git a/library/cpp/actors/testlib/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/testlib/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 42713842da..0000000000
--- a/library/cpp/actors/testlib/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,73 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-testlib-ut)
-target_include_directories(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib
-)
-target_link_libraries(library-cpp-actors-testlib-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-testlib
- cpp-actors-core
-)
-target_link_options(library-cpp-actors-testlib-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/decorator_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-testlib-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-testlib-ut
- TEST_TARGET
- library-cpp-actors-testlib-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-testlib-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-testlib-ut)
diff --git a/library/cpp/actors/testlib/ut/CMakeLists.txt b/library/cpp/actors/testlib/ut/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/testlib/ut/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/testlib/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/testlib/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 077ccae7fb..0000000000
--- a/library/cpp/actors/testlib/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,61 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-testlib-ut)
-target_include_directories(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib
-)
-target_link_libraries(library-cpp-actors-testlib-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-testlib
- cpp-actors-core
-)
-target_sources(library-cpp-actors-testlib-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/testlib/decorator_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-testlib-ut
- PROPERTY
- SPLIT_FACTOR
- 10
-)
-add_yunittest(
- NAME
- library-cpp-actors-testlib-ut
- TEST_TARGET
- library-cpp-actors-testlib-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-testlib-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-testlib-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-testlib-ut)
diff --git a/library/cpp/actors/testlib/ut/ya.make b/library/cpp/actors/testlib/ut/ya.make
deleted file mode 100644
index ea6aef37a6..0000000000
--- a/library/cpp/actors/testlib/ut/ya.make
+++ /dev/null
@@ -1,15 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/testlib)
-
-FORK_SUBTESTS()
-SIZE(SMALL)
-
-
-PEERDIR(
- library/cpp/actors/core
-)
-
-SRCS(
- decorator_ut.cpp
-)
-
-END()
diff --git a/library/cpp/actors/testlib/ya.make b/library/cpp/actors/testlib/ya.make
deleted file mode 100644
index 0bd44ddd57..0000000000
--- a/library/cpp/actors/testlib/ya.make
+++ /dev/null
@@ -1,23 +0,0 @@
-LIBRARY()
-
-SRCS(
- test_runtime.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/actors/interconnect/mock
- library/cpp/actors/protos
- library/cpp/random_provider
- library/cpp/time_provider
-)
-
-IF (GCC)
- CFLAGS(-fno-devirtualize-speculatively)
-ENDIF()
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
diff --git a/library/cpp/actors/util/CMakeLists.darwin-arm64.txt b/library/cpp/actors/util/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index be68d418f7..0000000000
--- a/library/cpp/actors/util/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-util)
-target_link_libraries(cpp-actors-util PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-containers-absl_flat_hash
- cpp-deprecated-atomic
- library-cpp-pop_count
-)
-target_sources(cpp-actors-util PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/affinity.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_track.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/should_continue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/threadparkpad.cpp
-)
diff --git a/library/cpp/actors/util/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/util/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index be68d418f7..0000000000
--- a/library/cpp/actors/util/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-util)
-target_link_libraries(cpp-actors-util PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-containers-absl_flat_hash
- cpp-deprecated-atomic
- library-cpp-pop_count
-)
-target_sources(cpp-actors-util PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/affinity.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_track.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/should_continue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/threadparkpad.cpp
-)
diff --git a/library/cpp/actors/util/CMakeLists.linux-aarch64.txt b/library/cpp/actors/util/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 9c5183c2bd..0000000000
--- a/library/cpp/actors/util/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-util)
-target_link_libraries(cpp-actors-util PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-containers-absl_flat_hash
- cpp-deprecated-atomic
- library-cpp-pop_count
-)
-target_sources(cpp-actors-util PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/affinity.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_track.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/should_continue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/threadparkpad.cpp
-)
diff --git a/library/cpp/actors/util/CMakeLists.linux-x86_64.txt b/library/cpp/actors/util/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 9c5183c2bd..0000000000
--- a/library/cpp/actors/util/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,29 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-util)
-target_link_libraries(cpp-actors-util PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-containers-absl_flat_hash
- cpp-deprecated-atomic
- library-cpp-pop_count
-)
-target_sources(cpp-actors-util PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/affinity.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_track.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/should_continue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/threadparkpad.cpp
-)
diff --git a/library/cpp/actors/util/CMakeLists.txt b/library/cpp/actors/util/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/util/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/util/CMakeLists.windows-x86_64.txt b/library/cpp/actors/util/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index be68d418f7..0000000000
--- a/library/cpp/actors/util/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,28 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(ut)
-
-add_library(cpp-actors-util)
-target_link_libraries(cpp-actors-util PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-containers-absl_flat_hash
- cpp-deprecated-atomic
- library-cpp-pop_count
-)
-target_sources(cpp-actors-util PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/affinity.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_track.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/should_continue.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/threadparkpad.cpp
-)
diff --git a/library/cpp/actors/util/README.md b/library/cpp/actors/util/README.md
deleted file mode 100644
index ff2d573fe8..0000000000
--- a/library/cpp/actors/util/README.md
+++ /dev/null
@@ -1,99 +0,0 @@
-## Memory tracker
-
-https://a.yandex-team.ru/arc/trunk/arcadia/library/cpp/actors/util/memory_track.h
-
-ИÑпользование:
-
-* отÑлеживание аллокаций ÑкземплÑров конкретного клаÑÑа через new/delete и new[]/delete[]
-* отÑлеживание аллокаций в контейнерах
-* ручное отÑлеживание моментов аллокации/деаллокации
-
-----
-
-### ОтÑлеживание аллокаций клаÑÑа через new/delete
-
-ИÑпользование Ñ Ð°Ð²Ñ‚Ð¾Ð¼Ð°Ñ‚Ð¸Ñ‡ÐµÑки генерируемой меткой:
-
-```cpp
-#include <library/cpp/actors/util/memory_track.h>
-
-struct TTypeLabeled
- : public NActors::NMemory::TTrack<TTypeLabeled>
-{
- char payload[16];
-};
-```
-
-ИÑпользование Ñ Ð¿Ð¾Ð»ÑŒÐ·Ð¾Ð²Ð°Ñ‚ÐµÐ»ÑŒÑким именем метки:
-
-```cpp
-#include <library/cpp/actors/util/memory_track.h>
-
-static const char NamedLabel[] = "NamedLabel";
-
-struct TNameLabeled
- : public NActors::NMemory::TTrack<TNameLabeled, NamedLabel>
-{
- char payload[32];
-};
-```
-
-----
-
-### ОтÑлеживание аллокаций в контейнерах
-
-```cpp
-#include <library/cpp/actors/util/memory_track.h>
-
-static const char InContainerLabel[] = "InContainerLabel";
-
-struct TInContainer {
- char payload[16];
-};
-
-std::vector<TInContainer, NActors::NMemory::TAlloc<TInContainer>> vecT;
-
-std::vector<TInContainer, NActors::NMemory::TAlloc<TInContainer, InContainerLabel>> vecN;
-
-using TKey = int;
-
-std::map<TKey, TInContainer, std::less<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>>> mapT;
-
-std::map<TKey, TInContainer, std::less<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>, InContainerLabel>> mapN;
-
-std::unordered_map<TKey, TInContainer, std::hash<TKey>, std::equal_to<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>>> umapT;
-
-std::unordered_map<TKey, TInContainer, std::hash<TKey>, std::equal_to<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>, InContainerLabel>> umapN;
-```
-
-----
-
-### Ручное отÑлеживание аллокаций/деаллокаций
-
-```cpp
-#include <library/cpp/actors/util/memory_track.h>
-
-static const char ManualLabel[] = "ManualLabel";
-
-...
-NActors::NMemory::TLabel<ManualLabel>::Add(size);
-
-...
-NActors::NMemory::TLabel<ManualLabel>::Sub(size);
-```
-
-----
-
-### Собираемые метрики
-
-Ð¡ÐµÑ€Ð²Ð¸Ñ **utils**, пользовательÑÐºÐ°Ñ Ð¼ÐµÑ‚ÐºÐ° **label**, ÑенÑоры:
-
-- MT/Count: количеÑтво аллокаций в моменте
-- MT/Memory: Ð°Ð»Ð»Ð¾Ñ†Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð½Ð°Ñ Ð¿Ð°Ð¼ÑÑ‚ÑŒ в моменте
-- MT/PeakCount: пиковое значение количеÑтва аллокаций (ÑÑмплитÑÑ Ñ Ñ„Ð¸ÐºÑированной чаÑтотой)
-- MT/PeakMemory: пиковое значение аллоцированной памÑти
-
diff --git a/library/cpp/actors/util/affinity.cpp b/library/cpp/actors/util/affinity.cpp
deleted file mode 100644
index 5851105ae7..0000000000
--- a/library/cpp/actors/util/affinity.cpp
+++ /dev/null
@@ -1,93 +0,0 @@
-#include "affinity.h"
-
-#ifdef _linux_
-#include <sched.h>
-#endif
-
-class TAffinity::TImpl {
-#ifdef _linux_
- cpu_set_t Mask;
-#endif
-public:
- TImpl() {
-#ifdef _linux_
- int ar = sched_getaffinity(0, sizeof(cpu_set_t), &Mask);
- Y_DEBUG_ABORT_UNLESS(ar == 0);
-#endif
- }
-
- explicit TImpl(const ui8* cpus, ui32 size) {
-#ifdef _linux_
- CPU_ZERO(&Mask);
- for (ui32 i = 0; i != size; ++i) {
- if (cpus[i]) {
- CPU_SET(i, &Mask);
- }
- }
-#else
- Y_UNUSED(cpus);
- Y_UNUSED(size);
-#endif
- }
-
- void Set() const {
-#ifdef _linux_
- int ar = sched_setaffinity(0, sizeof(cpu_set_t), &Mask);
- Y_DEBUG_ABORT_UNLESS(ar == 0);
-#endif
- }
-
- operator TCpuMask() const {
- TCpuMask result;
-#ifdef _linux_
- for (ui32 i = 0; i != CPU_SETSIZE; ++i) {
- result.Cpus.emplace_back(CPU_ISSET(i, &Mask));
- }
- result.RemoveTrailingZeros();
-#endif
- return result;
- }
-
-};
-
-TAffinity::TAffinity() {
-}
-
-TAffinity::~TAffinity() {
-}
-
-TAffinity::TAffinity(const ui8* x, ui32 sz) {
- if (x && sz) {
- Impl.Reset(new TImpl(x, sz));
- }
-}
-
-TAffinity::TAffinity(const TCpuMask& mask) {
- if (!mask.IsEmpty()) {
- static_assert(sizeof(ui8) == sizeof(mask.Cpus[0]));
- const ui8* x = reinterpret_cast<const ui8*>(&mask.Cpus[0]);
- const ui32 sz = mask.Size();
- Impl.Reset(new TImpl(x, sz));
- }
-}
-
-void TAffinity::Current() {
- Impl.Reset(new TImpl());
-}
-
-void TAffinity::Set() const {
- if (!!Impl) {
- Impl->Set();
- }
-}
-
-bool TAffinity::Empty() const {
- return !Impl;
-}
-
-TAffinity::operator TCpuMask() const {
- if (!!Impl) {
- return *Impl;
- }
- return TCpuMask();
-}
diff --git a/library/cpp/actors/util/affinity.h b/library/cpp/actors/util/affinity.h
deleted file mode 100644
index ae106ed180..0000000000
--- a/library/cpp/actors/util/affinity.h
+++ /dev/null
@@ -1,49 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "cpumask.h"
-
-// Platform-specific class to set or get thread affinity
-class TAffinity: public TThrRefBase, TNonCopyable {
- class TImpl;
- THolder<TImpl> Impl;
-
-public:
- TAffinity();
- TAffinity(const ui8* cpus, ui32 size);
- explicit TAffinity(const TCpuMask& mask);
- ~TAffinity();
-
- void Current();
- void Set() const;
- bool Empty() const;
-
- operator TCpuMask() const;
-};
-
-// Scoped affinity setter
-class TAffinityGuard : TNonCopyable {
- bool Stacked;
- TAffinity OldAffinity;
-
-public:
- TAffinityGuard(const TAffinity* affinity) {
- Stacked = false;
- if (affinity && !affinity->Empty()) {
- OldAffinity.Current();
- affinity->Set();
- Stacked = true;
- }
- }
-
- ~TAffinityGuard() {
- Release();
- }
-
- void Release() {
- if (Stacked) {
- OldAffinity.Set();
- Stacked = false;
- }
- }
-};
diff --git a/library/cpp/actors/util/cpu_load_log.h b/library/cpp/actors/util/cpu_load_log.h
deleted file mode 100644
index 225f7148da..0000000000
--- a/library/cpp/actors/util/cpu_load_log.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <library/cpp/pop_count/popcount.h>
-
-static constexpr ui64 BitDurationNs = 131'072; // A power of 2
-
-template <ui64 DataSize>
-struct TCpuLoadLog {
- static constexpr ui64 BitsSize = DataSize * 64;
- TAtomic LastTimeNs = 0;
- ui64 Data[DataSize];
-
- TCpuLoadLog() {
- LastTimeNs = 0;
- for (size_t i = 0; i < DataSize; ++i) {
- Data[i] = 0;
- }
- }
-
- TCpuLoadLog(ui64 timeNs) {
- LastTimeNs = timeNs;
- for (size_t i = 0; i < DataSize; ++i) {
- Data[i] = 0;
- }
- }
-
- void RegisterBusyPeriod(ui64 timeNs) {
- RegisterBusyPeriod<true>(timeNs, AtomicGet(LastTimeNs));
- }
-
- template <bool ModifyLastTime>
- void RegisterBusyPeriod(ui64 timeNs, ui64 lastTimeNs) {
- timeNs |= 1ull;
- if (timeNs < lastTimeNs) {
- for (ui64 i = 0; i < DataSize; ++i) {
- AtomicSet(Data[i], ~0ull);
- }
- if (ModifyLastTime) {
- AtomicSet(LastTimeNs, timeNs);
- }
- return;
- }
- const ui64 lastIdx = timeNs / BitDurationNs;
- const ui64 curIdx = lastTimeNs / BitDurationNs;
- ui64 firstElementIdx = curIdx / 64;
- const ui64 firstBitIdx = curIdx % 64;
- const ui64 lastElementIdx = lastIdx / 64;
- const ui64 lastBitIdx = lastIdx % 64;
- if (firstElementIdx == lastElementIdx) {
- ui64 prevValue = 0;
- if (firstBitIdx != 0) {
- prevValue = AtomicGet(Data[firstElementIdx % DataSize]);
- }
- const ui64 bits = (((~0ull) << (firstBitIdx + (63-lastBitIdx))) >> (63-lastBitIdx));
- const ui64 newValue = prevValue | bits;
- AtomicSet(Data[firstElementIdx % DataSize], newValue);
- if (ModifyLastTime) {
- AtomicSet(LastTimeNs, timeNs);
- }
- return;
- }
- // process the first element
- ui64 prevValue = 0;
- if (firstBitIdx != 0) {
- prevValue = AtomicGet(Data[firstElementIdx % DataSize]);
- }
- const ui64 bits = ((~0ull) << firstBitIdx);
- const ui64 newValue = (prevValue | bits);
- AtomicSet(Data[firstElementIdx % DataSize], newValue);
- ++firstElementIdx;
- // process the fully filled elements
- const ui64 firstLoop = firstElementIdx / DataSize;
- const ui64 lastLoop = lastElementIdx / DataSize;
- const ui64 lastOffset = lastElementIdx % DataSize;
- if (firstLoop < lastLoop) {
- for (ui64 i = firstElementIdx % DataSize; i < DataSize; ++i) {
- AtomicSet(Data[i], ~0ull);
- }
- for (ui64 i = 0; i < lastOffset; ++i) {
- AtomicSet(Data[i], ~0ull);
- }
- } else {
- for (ui64 i = firstElementIdx % DataSize; i < lastOffset; ++i) {
- AtomicSet(Data[i], ~0ull);
- }
- }
- // process the last element
- const ui64 newValue2 = ((~0ull) >> (63-lastBitIdx));
- AtomicSet(Data[lastOffset], newValue2);
- if (ModifyLastTime) {
- AtomicSet(LastTimeNs, timeNs);
- }
- }
-
- void RegisterIdlePeriod(ui64 timeNs) {
- timeNs &= ~1ull;
- ui64 lastTimeNs = AtomicGet(LastTimeNs);
- if (timeNs < lastTimeNs) {
- // Fast check first, slower chec later
- if ((timeNs | 1ull) < lastTimeNs) {
- // Time goes back, dont panic, just mark the whole array 'busy'
- for (ui64 i = 0; i < DataSize; ++i) {
- AtomicSet(Data[i], ~0ull);
- }
- AtomicSet(LastTimeNs, timeNs);
- return;
- }
- }
- const ui64 curIdx = lastTimeNs / BitDurationNs;
- const ui64 lastIdx = timeNs / BitDurationNs;
- ui64 firstElementIdx = curIdx / 64;
- const ui64 lastElementIdx = lastIdx / 64;
- if (firstElementIdx >= lastElementIdx) {
- AtomicSet(LastTimeNs, timeNs);
- return;
- }
- // process the first partially filled element
- ++firstElementIdx;
- // process all other elements
- const ui64 firstLoop = firstElementIdx / DataSize;
- const ui64 lastLoop = lastElementIdx / DataSize;
- const ui64 lastOffset = lastElementIdx % DataSize;
- if (firstLoop < lastLoop) {
- for (ui64 i = firstElementIdx % DataSize; i < DataSize; ++i) {
- AtomicSet(Data[i], 0);
- }
- for (ui64 i = 0; i <= lastOffset; ++i) {
- AtomicSet(Data[i], 0);
- }
- } else {
- for (ui64 i = firstElementIdx % DataSize; i <= lastOffset; ++i) {
- AtomicSet(Data[i], 0);
- }
- }
- AtomicSet(LastTimeNs, timeNs);
- }
-};
-
-template <ui64 DataSize>
-struct TMinusOneCpuEstimator {
- static constexpr ui64 BitsSize = DataSize * 64;
- ui64 BeginDelayIdx;
- ui64 EndDelayIdx;
- ui64 Idle;
- ui64 Delay[BitsSize];
-
- ui64 MaxLatencyIncreaseWithOneLessCpu(TCpuLoadLog<DataSize>** logs, i64 logCount, ui64 timeNs, ui64 periodNs) {
- Y_ABORT_UNLESS(logCount > 0);
- ui64 endTimeNs = timeNs;
-
- ui64 lastTimeNs = timeNs;
- for (i64 log_idx = 0; log_idx < logCount; ++log_idx) {
- ui64 x = AtomicGet(logs[log_idx]->LastTimeNs);
- if ((x & 1) == 1) {
- lastTimeNs = Min(lastTimeNs, x);
- } else {
- logs[log_idx]->template RegisterBusyPeriod<false>(endTimeNs, x);
- }
- }
- const ui64 beginTimeNs = periodNs < timeNs ? timeNs - periodNs : 0;
-
- ui64 beginIdx = beginTimeNs / BitDurationNs;
- ui64 lastIdx = lastTimeNs / BitDurationNs;
- ui64 beginElementIdx = beginIdx / 64;
- ui64 lastElementIdx = lastIdx / 64;
-
- BeginDelayIdx = 0;
- EndDelayIdx = 0;
- Idle = 0;
- ui64 maxDelay = 0;
- ui64 bucket = 0;
- for (ui64 idx = beginElementIdx; idx <= lastElementIdx; ++idx) {
- ui64 i = idx % DataSize;
- ui64 input = AtomicGet(logs[0]->Data[i]);
- ui64 all_busy = ~0ull;
- for (i64 log_idx = 1; log_idx < logCount; ++log_idx) {
- ui64 x = AtomicGet(logs[log_idx]->Data[i]);
- all_busy &= x;
- }
- if (!input) {
- if (!bucket) {
- Idle += 64 - PopCount(all_busy);
- continue;
- }
- }
- for (i64 bit_idx = 0; bit_idx < 64; ++bit_idx) {
- ui64 x = (1ull << bit_idx);
- if (all_busy & x) {
- if (input & x) {
- // Push into the queue
- bucket++;
- Delay[EndDelayIdx] = EndDelayIdx;
- ++EndDelayIdx;
- } else {
- // All busy
- }
- } else {
- if (input & x) {
- // Move success
- } else {
- if (bucket) {
- // Remove from the queue
- bucket--;
- ui64 stored = Delay[BeginDelayIdx];
- ++BeginDelayIdx;
- ui64 delay = EndDelayIdx - stored;
- maxDelay = Max(maxDelay, delay);
- //Cerr << "bit_idx: " << bit_idx << " stored: " << stored << " delay: " << delay << Endl;
- } else {
- Idle++;
- }
- }
- }
- }
- }
- if (bucket) {
- ui64 stored = Delay[BeginDelayIdx];
- ui64 delay = EndDelayIdx - stored;
- maxDelay = Max(maxDelay, delay);
- //Cerr << "last stored: " << stored << " delay: " << delay << Endl;
- }
- return maxDelay * BitDurationNs;
- }
-};
-
diff --git a/library/cpp/actors/util/cpu_load_log_ut.cpp b/library/cpp/actors/util/cpu_load_log_ut.cpp
deleted file mode 100644
index 7109123c6e..0000000000
--- a/library/cpp/actors/util/cpu_load_log_ut.cpp
+++ /dev/null
@@ -1,275 +0,0 @@
-#include "cpu_load_log.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/random.h>
-#include <util/system/hp_timer.h>
-#include <util/system/sanitizers.h>
-#include <util/system/thread.h>
-
-Y_UNIT_TEST_SUITE(CpuLoadLog) {
-
- TString PrintBits(ui64 x) {
- TStringStream str;
- for (ui64 i = 0; i < 64; ++i) {
- if (x & (1ull << i)) {
- str << "1";
- } else {
- str << "0";
- }
- }
- return str.Str();
- }
-
- Y_UNIT_TEST(FillAll) {
- TCpuLoadLog<5> log(100*BitDurationNs);
- log.RegisterBusyPeriod(101*BitDurationNs);
- log.RegisterBusyPeriod(163*BitDurationNs);
- log.RegisterBusyPeriod(164*BitDurationNs);
- log.RegisterBusyPeriod(165*BitDurationNs);
- log.RegisterBusyPeriod(331*BitDurationNs);
- log.RegisterBusyPeriod(340*BitDurationNs);
- log.RegisterBusyPeriod(420*BitDurationNs);
- log.RegisterBusyPeriod(511*BitDurationNs);
- //for (ui64 i = 0; i < 5; ++i) {
- // Cerr << "i: " << i << " bits: " << PrintBits(log.Data[i]) << Endl;
- //}
- for (ui64 i = 0; i < 5; ++i) {
- UNIT_ASSERT_C((ui64(log.Data[i]) == ~ui64(0)), "Unequal at " << i << "\n got: " << PrintBits(log.Data[i])
- << "\n expected: " << PrintBits(~ui64(0)));
- }
- }
-
- Y_UNIT_TEST(PartialFill) {
- TCpuLoadLog<5> log(0*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b0ull));
- log.RegisterBusyPeriod(0*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b1ull));
- log.RegisterBusyPeriod(0*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b1ull));
- log.RegisterBusyPeriod(1*BitDurationNs/2);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b1ull));
- log.RegisterBusyPeriod(1*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b11ull));
- log.RegisterIdlePeriod(3*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b11ull));
- log.RegisterBusyPeriod(3*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0b1011ull));
- log.RegisterBusyPeriod(63*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits((~0ull)^0b0100ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0b0ull));
- log.RegisterBusyPeriod(128*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits((~0ull)^0b0100ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(0b1ull));
- log.RegisterBusyPeriod(1*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(~0ull));
- log.RegisterBusyPeriod(2*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(~0ull));
- log.RegisterBusyPeriod(64*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0b1ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(~0ull));
- log.RegisterIdlePeriod(128*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0b1ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(~0ull));
- log.RegisterIdlePeriod(192*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0b1ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(~0ull));
- log.RegisterBusyPeriod(192*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(~0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0b1ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(0b1ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(~0ull));
- log.RegisterIdlePeriod((192+5*64-1)*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(0b1ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(0ull));
- log.RegisterIdlePeriod((192+15*64)*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[0]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[1]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[2]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[3]), PrintBits(0ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log.Data[4]), PrintBits(0ull));
- }
-
- Y_UNIT_TEST(Estimator) {
- TCpuLoadLog<5> *log[10];
- log[0] = new TCpuLoadLog<5>(0*BitDurationNs);
- log[1] = new TCpuLoadLog<5>(0*BitDurationNs);
- TMinusOneCpuEstimator<5> estimator;
-
-
- for (ui64 i = 0; i < 5*64; i+=2) {
- log[0]->RegisterIdlePeriod(i*BitDurationNs);
- log[0]->RegisterBusyPeriod(i*BitDurationNs);
- }
- log[0]->RegisterIdlePeriod((5*64-2)*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log[0]->Data[0]),
- PrintBits(0b0101010101010101010101010101010101010101010101010101010101010101ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log[0]->Data[4]),
- PrintBits(0b0101010101010101010101010101010101010101010101010101010101010101ull));
- for (ui64 i = 0; i < 5*64-1; i+=2) {
- log[1]->RegisterIdlePeriod((i+1)*BitDurationNs);
- log[1]->RegisterBusyPeriod((i+1)*BitDurationNs);
- }
- log[1]->RegisterIdlePeriod((5*64-2+1)*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log[1]->Data[0]),
- PrintBits(0b1010101010101010101010101010101010101010101010101010101010101010ull));
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log[1]->Data[4]),
- PrintBits(0b1010101010101010101010101010101010101010101010101010101010101010ull));
-
- ui64 value = estimator.MaxLatencyIncreaseWithOneLessCpu(log, 2, (5*64)*BitDurationNs-1, 3*64*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(value/BitDurationNs, 1);
-
- value = estimator.MaxLatencyIncreaseWithOneLessCpu(log, 2, (5*64+10)*BitDurationNs, 3*64*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(value/BitDurationNs, 12);
-
- delete log[0];
- delete log[1];
- }
-
- Y_UNIT_TEST(Estimator2) {
- TCpuLoadLog<5> *log[2];
- log[0] = new TCpuLoadLog<5>(0*BitDurationNs);
- log[1] = new TCpuLoadLog<5>(0*BitDurationNs);
- TMinusOneCpuEstimator<5> estimator;
-
- for (ui64 i = 0; i < 5*64; i+=2) {
- log[0]->RegisterIdlePeriod(i*BitDurationNs);
- log[0]->RegisterBusyPeriod(i*BitDurationNs);
- }
- for (ui64 i = 0; i < 5; ++i) {
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log[0]->Data[i]),
- PrintBits(0b0101010101010101010101010101010101010101010101010101010101010101ull));
- }
- for (ui64 i = 0; i < 5*64-1; i+=2) {
- log[1]->RegisterIdlePeriod((i+1)*BitDurationNs);
- log[1]->RegisterBusyPeriod((i+1)*BitDurationNs);
- }
- for (ui64 i = 0; i < 5; ++i) {
- UNIT_ASSERT_VALUES_EQUAL(PrintBits(log[1]->Data[i]),
- PrintBits(0b1010101010101010101010101010101010101010101010101010101010101010ull));
- }
-
- log[0]->Data[2] = ~0ull;
- ui64 value = estimator.MaxLatencyIncreaseWithOneLessCpu(log, 2, (5*64-1)*BitDurationNs, 3*64*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(value/BitDurationNs, 32);
-
- delete log[0];
- delete log[1];
- }
-
- Y_UNIT_TEST(Estimator3) {
- TCpuLoadLog<5> *log[3];
- log[0] = new TCpuLoadLog<5>(0*BitDurationNs);
- log[1] = new TCpuLoadLog<5>(0*BitDurationNs);
- log[2] = new TCpuLoadLog<5>(0*BitDurationNs);
- TMinusOneCpuEstimator<5> estimator;
-
- for (ui64 i = 0; i < 5*64; i+=8) {
- log[0]->RegisterIdlePeriod(i*BitDurationNs);
- log[0]->RegisterBusyPeriod((i+3)*BitDurationNs);
- log[1]->RegisterIdlePeriod(i*BitDurationNs);
- log[1]->RegisterBusyPeriod((i+3)*BitDurationNs);
- log[2]->RegisterIdlePeriod(i*BitDurationNs);
- log[2]->RegisterBusyPeriod((i+3)*BitDurationNs);
- }
- for (ui64 i = 0; i < 5; ++i) {
- for (ui64 n = 0; n < 3; ++n) {
- UNIT_ASSERT_VALUES_EQUAL_C(PrintBits(log[n]->Data[i]),
- PrintBits(0b0000111100001111000011110000111100001111000011110000111100001111ull),
- " i: " << i << " n: " << n);
- }
- }
-
- ui64 value = estimator.MaxLatencyIncreaseWithOneLessCpu(log, 3, (5*64-5)*BitDurationNs, 3*64*BitDurationNs);
- UNIT_ASSERT_VALUES_EQUAL(value/BitDurationNs, 4);
-
- delete log[0];
- delete log[1];
- delete log[2];
- }
- /*
- class TWorkerThread : public ISimpleThread {
- private:
- std::function<void()> Func;
- double Time = 0.0;
-
- public:
- TWorkerThread(std::function<void()> func)
- : Func(std::move(func))
- { }
-
- double GetTime() const {
- return Time;
- }
-
- static THolder<TWorkerThread> Spawn(std::function<void()> func) {
- THolder<TWorkerThread> thread = MakeHolder<TWorkerThread>(std::move(func));
- thread->Start();
- return thread;
- }
-
- private:
- void* ThreadProc() noexcept override {
- THPTimer timer;
- Func();
- Time = timer.Passed();
- return nullptr;
- }
- };
-
- void DoConcurrentPushPop(size_t threads, ui64 perThreadCount) {
- // Concurrency factor 4 is up to 16 threads
-
- auto workerFunc = [&](size_t threadIndex) {
- };
-
- TVector<THolder<TWorkerThread>> workers(threads);
- for (size_t i = 0; i < threads; ++i) {
- workers[i] = TWorkerThread::Spawn([workerFunc, i]() {
- workerFunc(i);
- });
- }
-
- double maxTime = 0;
- for (size_t i = 0; i < threads; ++i) {
- workers[i]->Join();
- maxTime = Max(maxTime, workers[i]->GetTime());
- }
-
- UNIT_ASSERT_VALUES_EQUAL(popped, 0u);
-
- Cerr << "Concurrent with " << threads << " threads: " << maxTime << " seconds" << Endl;
- }
-
- void DoConcurrentPushPop_3times(size_t threads, ui64 perThreadCount) {
- for (size_t i = 0; i < 3; ++i) {
- DoConcurrentPushPop(threads, perThreadCount);
- }
- }
-
- static constexpr ui64 PER_THREAD_COUNT = NSan::PlainOrUnderSanitizer(1000000, 100000);
-
- Y_UNIT_TEST(ConcurrentPushPop_1thread) { DoConcurrentPushPop_3times(1, PER_THREAD_COUNT); }
- */
-}
diff --git a/library/cpp/actors/util/cpumask.h b/library/cpp/actors/util/cpumask.h
deleted file mode 100644
index 29741aa1d6..0000000000
--- a/library/cpp/actors/util/cpumask.h
+++ /dev/null
@@ -1,133 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include <library/cpp/containers/stack_vector/stack_vec.h>
-
-#include <util/string/split.h>
-#include <util/generic/yexception.h>
-
-using TCpuId = ui32;
-
-// Simple data structure to operate with set of cpus
-struct TCpuMask {
- TStackVec<bool, 1024> Cpus;
-
- // Creates empty mask
- TCpuMask() {}
-
- // Creates mask with single cpu set
- explicit TCpuMask(TCpuId cpuId) {
- Set(cpuId);
- }
-
- // Initialize mask from raw boolean array
- template <class T>
- TCpuMask(const T* cpus, TCpuId size) {
- Cpus.reserve(size);
- for (TCpuId i = 0; i != size; ++i) {
- Cpus.emplace_back(bool(cpus[i]));
- }
- }
-
- // Parse a numerical list of processors. The numbers are separated by commas and may include ranges. For example: 0,5,7,9-11
- explicit TCpuMask(const TString& cpuList) {
- try {
- for (TStringBuf s : StringSplitter(cpuList).Split(',')) {
- TCpuId l, r;
- if (s.find('-') != TString::npos) {
- StringSplitter(s).Split('-').CollectInto(&l, &r);
- } else {
- l = r = FromString<TCpuId>(s);
- }
- if (r >= Cpus.size()) {
- Cpus.resize(r + 1, false);
- }
- for (TCpuId cpu = l; cpu <= r; cpu++) {
- Cpus[cpu] = true;
- }
- }
- } catch (...) {
- ythrow TWithBackTrace<yexception>() << "Exception occured while parsing cpu list '" << cpuList << "': " << CurrentExceptionMessage();
- }
- }
-
- // Returns size of underlying vector
- TCpuId Size() const {
- return Cpus.size();
- }
-
- // Returns number of set bits in mask
- TCpuId CpuCount() const {
- TCpuId result = 0;
- for (bool value : Cpus) {
- result += value;
- }
- return result;
- }
-
- bool IsEmpty() const {
- for (bool value : Cpus) {
- if (value) {
- return false;
- }
- }
- return true;
- }
-
- bool IsSet(TCpuId cpu) const {
- return cpu < Cpus.size() && Cpus[cpu];
- }
-
- void Set(TCpuId cpu) {
- if (cpu >= Cpus.size()) {
- Cpus.resize(cpu + 1, false);
- }
- Cpus[cpu] = true;
- }
-
- void Reset(TCpuId cpu) {
- if (cpu < Cpus.size()) {
- Cpus[cpu] = false;
- }
- }
-
- void RemoveTrailingZeros() {
- while (!Cpus.empty() && !Cpus.back()) {
- Cpus.pop_back();
- }
- }
-
- explicit operator bool() const {
- return !IsEmpty();
- }
-
- TCpuMask operator &(const TCpuMask& rhs) const {
- TCpuMask result;
- TCpuId size = Max(Size(), rhs.Size());
- result.Cpus.reserve(size);
- for (TCpuId cpu = 0; cpu < size; cpu++) {
- result.Cpus.emplace_back(IsSet(cpu) && rhs.IsSet(cpu));
- }
- return result;
- }
-
- TCpuMask operator |(const TCpuMask& rhs) const {
- TCpuMask result;
- TCpuId size = Max(Size(), rhs.Size());
- result.Cpus.reserve(size);
- for (TCpuId cpu = 0; cpu < size; cpu++) {
- result.Cpus.emplace_back(IsSet(cpu) || rhs.IsSet(cpu));
- }
- return result;
- }
-
- TCpuMask operator -(const TCpuMask& rhs) const {
- TCpuMask result;
- result.Cpus.reserve(Size());
- for (TCpuId cpu = 0; cpu < Size(); cpu++) {
- result.Cpus.emplace_back(IsSet(cpu) && !rhs.IsSet(cpu));
- }
- return result;
- }
-};
diff --git a/library/cpp/actors/util/datetime.h b/library/cpp/actors/util/datetime.h
deleted file mode 100644
index cbec5965d6..0000000000
--- a/library/cpp/actors/util/datetime.h
+++ /dev/null
@@ -1,82 +0,0 @@
-#pragma once
-
-#include <util/system/defaults.h>
-#include <util/system/hp_timer.h>
-#include <util/system/platform.h>
-
-#if defined(_win_)
-#include <intrin.h>
-#pragma intrinsic(__rdtsc)
-#endif // _win_
-
-#if defined(_darwin_) && !defined(_x86_)
-#include <mach/mach_time.h>
-#endif
-
-// GetCycleCount() from util/system/datetime.h uses rdtscp, which is more accurate than rdtsc,
-// but rdtscp disables processor's out-of-order execution, so it can be slow
-Y_FORCE_INLINE ui64 GetCycleCountFast() {
-#if defined(_MSC_VER)
- // Generates the rdtsc instruction, which returns the processor time stamp.
- // The processor time stamp records the number of clock cycles since the last reset.
- return __rdtsc();
-#elif defined(__clang__) && !defined(_arm64_)
- return __builtin_readcyclecounter();
-#elif defined(_x86_64_)
- unsigned hi, lo;
- __asm__ __volatile__("rdtsc"
- : "=a"(lo), "=d"(hi));
- return ((unsigned long long)lo) | (((unsigned long long)hi) << 32);
-#elif defined(_i386_)
- ui64 x;
- __asm__ volatile("rdtsc\n\t"
- : "=A"(x));
- return x;
-#elif defined(_darwin_)
- return mach_absolute_time();
-#elif defined(_arm32_)
- return MicroSeconds();
-#elif defined(_arm64_)
- ui64 x;
-
- __asm__ __volatile__("isb; mrs %0, cntvct_el0"
- : "=r"(x));
-
- return x;
-#else
-#error "unsupported arch"
-#endif
-}
-
-// NHPTimer::GetTime fast analog
-Y_FORCE_INLINE void GetTimeFast(NHPTimer::STime* pTime) noexcept {
- *pTime = GetCycleCountFast();
-}
-
-namespace NActors {
- inline double Ts2Ns(ui64 ts) {
- return NHPTimer::GetSeconds(ts) * 1e9;
- }
-
- inline double Ts2Us(ui64 ts) {
- return NHPTimer::GetSeconds(ts) * 1e6;
- }
-
- inline double Ts2Ms(ui64 ts) {
- return NHPTimer::GetSeconds(ts) * 1e3;
- }
-
- inline ui64 Us2Ts(double us) {
- return ui64(NHPTimer::GetClockRate() * us / 1e6);
- }
-
- struct TTimeTracker {
- ui64 Ts;
- TTimeTracker(): Ts(GetCycleCountFast()) {}
- ui64 Elapsed() {
- ui64 ts = GetCycleCountFast();
- std::swap(Ts, ts);
- return Ts - ts;
- }
- };
-}
diff --git a/library/cpp/actors/util/defs.h b/library/cpp/actors/util/defs.h
deleted file mode 100644
index 70f969753e..0000000000
--- a/library/cpp/actors/util/defs.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#pragma once
-
-// unique tag to fix pragma once gcc glueing: ./library/actors/util/defs.h
-
-#include <util/system/defaults.h>
-#include <util/generic/bt_exception.h>
-#include <util/generic/noncopyable.h>
-#include <util/generic/ptr.h>
-#include <util/generic/string.h>
-#include <util/generic/yexception.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/align.h>
-#include <util/generic/vector.h>
-#include <util/datetime/base.h>
-#include <util/generic/ylimits.h>
-#include "intrinsics.h"
diff --git a/library/cpp/actors/util/funnel_queue.h b/library/cpp/actors/util/funnel_queue.h
deleted file mode 100644
index 15af57b121..0000000000
--- a/library/cpp/actors/util/funnel_queue.h
+++ /dev/null
@@ -1,240 +0,0 @@
-#pragma once
-
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/generic/noncopyable.h>
-
-template <typename ElementType>
-class TFunnelQueue: private TNonCopyable {
-public:
- TFunnelQueue() noexcept
- : Front(nullptr)
- , Back(nullptr)
- {
- }
-
- virtual ~TFunnelQueue() noexcept {
- for (auto entry = Front; entry; entry = DeleteEntry(entry))
- continue;
- }
-
- /// Push element. Can be used from many threads. Return true if is first element.
- bool
- Push(ElementType&& element) noexcept {
- TEntry* const next = NewEntry(static_cast<ElementType&&>(element));
- TEntry* const prev = AtomicSwap(&Back, next);
- AtomicSet(prev ? prev->Next : Front, next);
- return !prev;
- }
-
- /// Extract top element. Must be used only from one thread. Return true if have more.
- bool
- Pop() noexcept {
- if (TEntry* const top = AtomicGet(Front)) {
- const auto last = AtomicCas(&Back, nullptr, top);
- if (last) // This is last element in queue. Queue is empty now.
- AtomicCas(&Front, nullptr, top);
- else // This element is not last.
- for (;;) {
- if (const auto next = AtomicGet(top->Next)) {
- AtomicSet(Front, next);
- break;
- }
- // But Next is null. Wait next assignment in spin lock.
- }
-
- DeleteEntry(top);
- return !last;
- }
-
- return false;
- }
-
- /// Peek top element. Must be used only from one thread.
- ElementType&
- Top() const noexcept {
- return AtomicGet(Front)->Data;
- }
-
- bool
- IsEmpty() const noexcept {
- return !AtomicGet(Front);
- }
-
-protected:
- class TEntry: private TNonCopyable {
- friend class TFunnelQueue;
-
- private:
- explicit TEntry(ElementType&& element) noexcept
- : Data(static_cast<ElementType&&>(element))
- , Next(nullptr)
- {
- }
-
- ~TEntry() noexcept {
- }
-
- public:
- ElementType Data;
- TEntry* volatile Next;
- };
-
- TEntry* volatile Front;
- TEntry* volatile Back;
-
- virtual TEntry* NewEntry(ElementType&& element) noexcept {
- return new TEntry(static_cast<ElementType&&>(element));
- }
-
- virtual TEntry* DeleteEntry(TEntry* entry) noexcept {
- const auto next = entry->Next;
- delete entry;
- return next;
- }
-
-protected:
- struct TEntryIter {
- TEntry* ptr;
-
- ElementType& operator*() {
- return ptr->Data;
- }
-
- ElementType* operator->() {
- return &ptr->Data;
- }
-
- TEntryIter& operator++() {
- ptr = AtomicGet(ptr->Next);
- return *this;
- }
-
- bool operator!=(const TEntryIter& other) const {
- return ptr != other.ptr;
- }
-
- bool operator==(const TEntryIter& other) const {
- return ptr == other.ptr;
- }
- };
-
- struct TConstEntryIter {
- const TEntry* ptr;
-
- const ElementType& operator*() {
- return ptr->Data;
- }
-
- const ElementType* operator->() {
- return &ptr->Data;
- }
-
- TEntryIter& operator++() {
- ptr = AtomicGet(ptr->Next);
- return *this;
- }
-
- bool operator!=(const TConstEntryIter& other) const {
- return ptr != other.ptr;
- }
-
- bool operator==(const TConstEntryIter& other) const {
- return ptr == other.ptr;
- }
- };
-
-public:
- using const_iterator = TConstEntryIter;
- using iterator = TEntryIter;
-
- iterator begin() {
- return {AtomicGet(Front)};
- }
- const_iterator cbegin() {
- return {AtomicGet(Front)};
- }
- const_iterator begin() const {
- return {AtomicGet(Front)};
- }
-
- iterator end() {
- return {nullptr};
- }
- const_iterator cend() {
- return {nullptr};
- }
- const_iterator end() const {
- return {nullptr};
- }
-};
-
-template <typename ElementType>
-class TPooledFunnelQueue: public TFunnelQueue<ElementType> {
-public:
- TPooledFunnelQueue() noexcept
- : Stack(nullptr)
- {
- }
-
- virtual ~TPooledFunnelQueue() noexcept override {
- for (auto entry = TBase::Front; entry; entry = TBase::DeleteEntry(entry))
- continue;
- for (auto entry = Stack; entry; entry = TBase::DeleteEntry(entry))
- continue;
- TBase::Back = TBase::Front = Stack = nullptr;
- }
-
-private:
- typedef TFunnelQueue<ElementType> TBase;
-
- typename TBase::TEntry* volatile Stack;
-
-protected:
- virtual typename TBase::TEntry* NewEntry(ElementType&& element) noexcept override {
- while (const auto top = AtomicGet(Stack))
- if (AtomicCas(&Stack, top->Next, top)) {
- top->Data = static_cast<ElementType&&>(element);
- AtomicSet(top->Next, nullptr);
- return top;
- }
-
- return TBase::NewEntry(static_cast<ElementType&&>(element));
- }
-
- virtual typename TBase::TEntry* DeleteEntry(typename TBase::TEntry* entry) noexcept override {
- entry->Data = ElementType();
- const auto next = entry->Next;
- do
- AtomicSet(entry->Next, AtomicGet(Stack));
- while (!AtomicCas(&Stack, entry, entry->Next));
- return next;
- }
-};
-
-template <typename ElementType, template <typename T> class TQueueType = TFunnelQueue>
-class TCountedFunnelQueue: public TQueueType<ElementType> {
-public:
- TCountedFunnelQueue() noexcept
- : Count(0)
- {
- }
-
- TAtomicBase GetSize() const noexcept {
- return AtomicGet(Count);
- }
-
-private:
- typedef TQueueType<ElementType> TBase;
-
- virtual typename TBase::TEntry* NewEntry(ElementType&& element) noexcept override {
- AtomicAdd(Count, 1);
- return TBase::NewEntry(static_cast<ElementType&&>(element));
- }
-
- virtual typename TBase::TEntry* DeleteEntry(typename TBase::TEntry* entry) noexcept override {
- AtomicSub(Count, 1);
- return TBase::DeleteEntry(entry);
- }
-
- TAtomic Count;
-};
diff --git a/library/cpp/actors/util/futex.h b/library/cpp/actors/util/futex.h
deleted file mode 100644
index c193f8d128..0000000000
--- a/library/cpp/actors/util/futex.h
+++ /dev/null
@@ -1,13 +0,0 @@
-#pragma once
-
-#ifdef _linux_
-
-#include <linux/futex.h>
-#include <unistd.h>
-#include <sys/syscall.h>
-
-static long SysFutex(void* addr1, int op, int val1, struct timespec* timeout, void* addr2, int val3) {
- return syscall(SYS_futex, addr1, op, val1, timeout, addr2, val3);
-}
-
-#endif
diff --git a/library/cpp/actors/util/intrinsics.h b/library/cpp/actors/util/intrinsics.h
deleted file mode 100644
index c02b633b70..0000000000
--- a/library/cpp/actors/util/intrinsics.h
+++ /dev/null
@@ -1,97 +0,0 @@
-#pragma once
-
-#include <util/system/defaults.h>
-#include <library/cpp/deprecated/atomic/atomic.h>
-#include <util/system/spinlock.h>
-
-#include <library/cpp/sse/sse.h> // The header chooses appropriate SSE support
-
-static_assert(sizeof(TAtomic) == 8, "expect sizeof(TAtomic) == 8");
-
-// we need explicit 32 bit operations to keep cache-line friendly packs
-// so have to define some atomics additionaly to arcadia one
-#ifdef _win_
-#pragma intrinsic(_InterlockedCompareExchange)
-#pragma intrinsic(_InterlockedExchangeAdd)
-#pragma intrinsic(_InterlockedIncrement)
-#pragma intrinsic(_InterlockedDecrement)
-#endif
-
-inline bool AtomicUi32Cas(volatile ui32* a, ui32 exchange, ui32 compare) {
-#ifdef _win_
- return _InterlockedCompareExchange((volatile long*)a, exchange, compare) == (long)compare;
-#else
- ui32 expected = compare;
- return __atomic_compare_exchange_n(a, &expected, exchange, false, __ATOMIC_SEQ_CST, __ATOMIC_SEQ_CST);
-#endif
-}
-
-inline ui32 AtomicUi32Add(volatile ui32* a, ui32 add) {
-#ifdef _win_
- return _InterlockedExchangeAdd((volatile long*)a, add) + add;
-#else
- return __atomic_add_fetch(a, add, __ATOMIC_SEQ_CST);
-#endif
-}
-
-inline ui32 AtomicUi32Sub(volatile ui32* a, ui32 sub) {
-#ifdef _win_
- return _InterlockedExchangeAdd((volatile long*)a, -(long)sub) - sub;
-#else
- return __atomic_sub_fetch(a, sub, __ATOMIC_SEQ_CST);
-#endif
-}
-
-inline ui32 AtomicUi32Increment(volatile ui32* a) {
-#ifdef _win_
- return _InterlockedIncrement((volatile long*)a);
-#else
- return __atomic_add_fetch(a, 1, __ATOMIC_SEQ_CST);
-#endif
-}
-
-inline ui32 AtomicUi32Decrement(volatile ui32* a) {
-#ifdef _win_
- return _InterlockedDecrement((volatile long*)a);
-#else
- return __atomic_sub_fetch(a, 1, __ATOMIC_SEQ_CST);
-#endif
-}
-
-template <typename T>
-inline void AtomicStore(volatile T* a, T x) {
- static_assert(std::is_integral<T>::value || std::is_pointer<T>::value, "expect std::is_integral<T>::value || std::is_pointer<T>::value");
-#ifdef _win_
- *a = x;
-#else
- __atomic_store_n(a, x, __ATOMIC_RELEASE);
-#endif
-}
-
-template <typename T>
-inline void RelaxedStore(volatile T* a, T x) {
- static_assert(std::is_integral<T>::value || std::is_pointer<T>::value, "expect std::is_integral<T>::value || std::is_pointer<T>::value");
-#ifdef _win_
- *a = x;
-#else
- __atomic_store_n(a, x, __ATOMIC_RELAXED);
-#endif
-}
-
-template <typename T>
-inline T AtomicLoad(volatile T* a) {
-#ifdef _win_
- return *a;
-#else
- return __atomic_load_n(a, __ATOMIC_ACQUIRE);
-#endif
-}
-
-template <typename T>
-inline T RelaxedLoad(volatile T* a) {
-#ifdef _win_
- return *a;
-#else
- return __atomic_load_n(a, __ATOMIC_RELAXED);
-#endif
-}
diff --git a/library/cpp/actors/util/local_process_key.h b/library/cpp/actors/util/local_process_key.h
deleted file mode 100644
index bff8bef81b..0000000000
--- a/library/cpp/actors/util/local_process_key.h
+++ /dev/null
@@ -1,157 +0,0 @@
-#pragma once
-
-#include <util/string/builder.h>
-#include <util/system/mutex.h>
-#include <util/generic/strbuf.h>
-#include <util/generic/vector.h>
-#include <util/generic/hash.h>
-#include <util/generic/singleton.h>
-#include <util/generic/serialized_enum.h>
-
-class TLocalProcessKeyStateIndexLimiter {
-public:
- static constexpr ui32 GetMaxKeysCount() {
- return 10000;
- }
-};
-
-template <class T>
-class TLocalProcessKeyStateIndexConstructor {
-public:
-};
-
-template <typename T>
-class TLocalProcessKeyState {
-
-template <typename U, const char* Name>
-friend class TLocalProcessKey;
-template <typename U, class TClass, ui32 KeyLengthLimit>
-friend class TLocalProcessExtKey;
-template <typename U, typename EnumT>
-friend class TEnumProcessKey;
-
-public:
- static TLocalProcessKeyState& GetInstance() {
- return *Singleton<TLocalProcessKeyState<T>>();
- }
-
- ui32 GetCount() const {
- return MaxKeysCount;
- }
-
- TStringBuf GetNameByIndex(size_t index) const {
- Y_ABORT_UNLESS(index < Names.size());
- return Names[index];
- }
-
- size_t GetIndexByName(TStringBuf name) const {
- TGuard<TMutex> g(Mutex);
- auto it = Map.find(name);
- Y_ENSURE(it != Map.end());
- return it->second;
- }
-
- TLocalProcessKeyState() {
- Names.resize(MaxKeysCount);
- }
-
- size_t Register(TStringBuf name) {
- TGuard<TMutex> g(Mutex);
- auto it = Map.find(name);
- if (it != Map.end()) {
- return it->second;
- }
- const ui32 index = TLocalProcessKeyStateIndexConstructor<T>::BuildCurrentIndex(name, Names.size());
- auto x = Map.emplace(name, index);
- if (x.second) {
- Y_ABORT_UNLESS(index < Names.size(), "a lot of actors or tags for memory monitoring");
- Names[index] = name;
- }
-
- return x.first->second;
- }
-
-private:
-
- static constexpr ui32 MaxKeysCount = TLocalProcessKeyStateIndexLimiter::GetMaxKeysCount();
-
-private:
- TVector<TString> Names;
- THashMap<TString, size_t> Map;
- TMutex Mutex;
-};
-
-template <typename T, const char* Name>
-class TLocalProcessKey {
-public:
- static TStringBuf GetName() {
- return Name;
- }
-
- static size_t GetIndex() {
- return Index;
- }
-
-private:
- inline static size_t Index = TLocalProcessKeyState<T>::GetInstance().Register(Name);
-};
-
-template <typename T, class TClass, ui32 KeyLengthLimit = 0>
-class TLocalProcessExtKey {
-public:
- static TStringBuf GetName() {
- return Name;
- }
-
- static size_t GetIndex() {
- return Index;
- }
-
-private:
-
- static TString TypeNameRobust() {
- const TString className = TypeName<TClass>();
- if (KeyLengthLimit && className.size() > KeyLengthLimit) {
- return className.substr(0, KeyLengthLimit - 3) + "...";
- } else {
- return className;
- }
- }
-
- static const inline TString Name = TypeName<TClass>();
- inline static size_t Index = TLocalProcessKeyState<T>::GetInstance().Register(TypeNameRobust());
-};
-
-template <typename T, typename EnumT>
-class TEnumProcessKey {
-public:
- static TStringBuf GetName(const EnumT key) {
- return TLocalProcessKeyState<T>::GetInstance().GetNameByIndex(GetIndex(key));
- }
-
- static size_t GetIndex(const EnumT key) {
- ui32 index = static_cast<ui32>(key);
- Y_ABORT_UNLESS(index < Enum2Index.size());
- return Enum2Index[index];
- }
-
-private:
- inline static TVector<size_t> RegisterAll() {
- static_assert(std::is_enum<EnumT>::value, "Enum is required");
-
- TVector<size_t> enum2Index;
- auto names = GetEnumNames<EnumT>();
- ui32 maxId = 0;
- for (const auto& [k, v] : names) {
- maxId = Max(maxId, static_cast<ui32>(k));
- }
- enum2Index.resize(maxId + 1);
- for (const auto& [k, v] : names) {
- ui32 enumId = static_cast<ui32>(k);
- enum2Index[enumId] = TLocalProcessKeyState<T>::GetInstance().Register(v);
- }
- return enum2Index;
- }
-
- inline static TVector<size_t> Enum2Index = RegisterAll();
-};
diff --git a/library/cpp/actors/util/memory_track.cpp b/library/cpp/actors/util/memory_track.cpp
deleted file mode 100644
index 5f422116be..0000000000
--- a/library/cpp/actors/util/memory_track.cpp
+++ /dev/null
@@ -1,38 +0,0 @@
-#include "memory_track.h"
-#include "memory_tracker.h"
-
-namespace NActors {
-namespace NMemory {
-
-namespace NPrivate {
-
-TThreadLocalInfo::TThreadLocalInfo()
- : Metrics(TMemoryTracker::Instance()->GetCount())
-{
- TMemoryTracker::Instance()->OnCreateThread(this);
-}
-
-TThreadLocalInfo::~TThreadLocalInfo() {
- TMemoryTracker::Instance()->OnDestroyThread(this);
-}
-
-TMetric* TThreadLocalInfo::GetMetric(size_t index) {
- if (Y_UNLIKELY(index >= Metrics.size())) {
- return &Null;
- }
- return &Metrics[index];
-}
-
-const std::vector<TMetric>& TThreadLocalInfo::GetMetrics() const {
- return Metrics;
-}
-
-size_t TBaseLabel::RegisterStaticMemoryLabel(const char* name, bool hasSensor) {
- return TMemoryTracker::Instance()->RegisterStaticMemoryLabel(name, hasSensor);
-}
-
-}
-
-}
-}
-
diff --git a/library/cpp/actors/util/memory_track.h b/library/cpp/actors/util/memory_track.h
deleted file mode 100644
index 6035333eeb..0000000000
--- a/library/cpp/actors/util/memory_track.h
+++ /dev/null
@@ -1,293 +0,0 @@
-#pragma once
-
-#include <vector>
-
-#include <util/system/type_name.h>
-#include <util/thread/singleton.h>
-
-#define ENABLE_MEMORY_TRACKING
-
-namespace NActors {
-namespace NMemory {
-
-namespace NPrivate {
-
-class TMetric {
- std::atomic<ssize_t> Memory;
- std::atomic<ssize_t> Count;
-
- void Copy(const TMetric& other) {
- Memory.store(other.GetMemory(), std::memory_order_relaxed);
- Count.store(other.GetCount(), std::memory_order_relaxed);
- }
-
-public:
- TMetric()
- : Memory(0)
- , Count(0)
- {}
-
- inline TMetric(const TMetric& other) {
- Copy(other);
- }
-
- inline TMetric(TMetric&& other) {
- Copy(other);
- }
-
- inline TMetric& operator=(const TMetric& other) {
- Copy(other);
- return *this;
- }
-
- inline TMetric& operator=(TMetric&& other) {
- Copy(other);
- return *this;
- }
-
- inline ssize_t GetMemory() const {
- return Memory.load(std::memory_order_relaxed);
- }
- inline void SetMemory(ssize_t value) {
- Memory.store(value, std::memory_order_relaxed);
- }
-
- inline ssize_t GetCount() const {
- return Count.load(std::memory_order_relaxed);
- }
- inline void SetCount(ssize_t value) {
- Count.store(value, std::memory_order_relaxed);
- }
-
- inline void operator+=(const TMetric& other) {
- SetMemory(GetMemory() + other.GetMemory());
- SetCount(GetCount() + other.GetCount());
- }
-
- inline void CalculatePeak(const TMetric& other) {
- SetMemory(Max(GetMemory(), other.GetMemory()));
- SetCount(Max(GetCount(), other.GetCount()));
- }
-
- inline void Add(size_t size) {
- SetMemory(GetMemory() + size);
- SetCount(GetCount() + 1);
- }
-
- inline void Sub(size_t size) {
- SetMemory(GetMemory() - size);
- SetCount(GetCount() - 1);
- }
-};
-
-
-class TThreadLocalInfo {
-public:
- TThreadLocalInfo();
- ~TThreadLocalInfo();
-
- TMetric* GetMetric(size_t index);
- const std::vector<TMetric>& GetMetrics() const;
-
-private:
- std::vector<TMetric> Metrics;
-
- inline static TMetric Null = {};
-};
-
-
-class TBaseLabel {
-protected:
- static size_t RegisterStaticMemoryLabel(const char* name, bool hasSensor);
-
- inline static TMetric* GetLocalMetric(size_t index) {
- return FastTlsSingleton<TThreadLocalInfo>()->GetMetric(index);
- }
-};
-
-
-template <const char* Name>
-class TNameLabel
- : TBaseLabel
-{
-public:
- static void Add(size_t size) {
-#if defined(ENABLE_MEMORY_TRACKING)
- Y_UNUSED(MetricInit);
-
- if (Y_UNLIKELY(!Metric)) {
- Metric = GetLocalMetric(Index);
- }
-
- Metric->Add(size);
-#else
- Y_UNUSED(size);
-#endif
- }
-
- static void Sub(size_t size) {
-#if defined(ENABLE_MEMORY_TRACKING)
- Y_UNUSED(MetricInit);
-
- if (Y_UNLIKELY(!Metric)) {
- Metric = GetLocalMetric(Index);
- }
-
- Metric->Sub(size);
-#else
- Y_UNUSED(size);
-#endif
- }
-
-private:
-#if defined(ENABLE_MEMORY_TRACKING)
- inline static size_t Index = Max<size_t>();
- inline static struct TMetricInit {
- TMetricInit() {
- Index = RegisterStaticMemoryLabel(Name, true);
- }
- } MetricInit;
-
- inline static thread_local TMetric* Metric = nullptr;
-#endif
-};
-
-
-template <typename TType>
-class TTypeLabel
- : TBaseLabel
-{
-public:
- static void Add(size_t size) {
-#if defined(ENABLE_MEMORY_TRACKING)
- Y_UNUSED(MetricInit);
-
- if (Y_UNLIKELY(!Metric)) {
- Metric = GetLocalMetric(Index);
- }
-
- Metric->Add(size);
-#else
- Y_UNUSED(size);
-#endif
- }
-
- static void Sub(size_t size) {
-#if defined(ENABLE_MEMORY_TRACKING)
- Y_UNUSED(MetricInit);
-
- if (Y_UNLIKELY(!Metric)) {
- Metric = GetLocalMetric(Index);
- }
-
- Metric->Sub(size);
-#else
- Y_UNUSED(size);
-#endif
- }
-
-private:
-#if defined(ENABLE_MEMORY_TRACKING)
- inline static size_t Index = Max<size_t>();
- inline static struct TMetricInit {
- TMetricInit() {
- Index = RegisterStaticMemoryLabel(TypeName<TType>().c_str(), false);
- }
- } MetricInit;
-
- inline static thread_local TMetric* Metric = nullptr;
-#endif
-};
-
-
-template <typename T>
-struct TTrackHelper {
-#if defined(ENABLE_MEMORY_TRACKING)
- void* operator new(size_t size) {
- T::Add(size);
- return malloc(size);
- }
-
- void* operator new[](size_t size) {
- T::Add(size);
- return malloc(size);
- }
-
- void operator delete(void* ptr, size_t size) {
- T::Sub(size);
- free(ptr);
- }
-
- void operator delete[](void* ptr, size_t size) {
- T::Sub(size);
- free(ptr);
- }
-#endif
-};
-
-template <typename TType, typename T>
-struct TAllocHelper {
- typedef size_t size_type;
- typedef TType value_type;
- typedef TType* pointer;
- typedef const TType* const_pointer;
-
- struct propagate_on_container_copy_assignment : public std::false_type {};
- struct propagate_on_container_move_assignment : public std::false_type {};
- struct propagate_on_container_swap : public std::false_type {};
-
- pointer allocate(size_type n, const void* hint = nullptr) {
- Y_UNUSED(hint);
- auto size = n * sizeof(TType);
- T::Add(size);
- return (pointer)malloc(size);
- }
-
- void deallocate(pointer ptr, size_t n) {
- auto size = n * sizeof(TType);
- T::Sub(size);
- free((void*)ptr);
- }
-};
-
-} // NPrivate
-
-
-template <const char* Name>
-using TLabel = NPrivate::TNameLabel<Name>;
-
-template <typename TType, const char* Name = nullptr>
-struct TTrack
- : public NPrivate::TTrackHelper<NPrivate::TNameLabel<Name>>
-{
-};
-
-template <typename TType>
-struct TTrack<TType, nullptr>
- : public NPrivate::TTrackHelper<NPrivate::TTypeLabel<TType>>
-{
-};
-
-template <typename TType, const char* Name = nullptr>
-struct TAlloc
- : public NPrivate::TAllocHelper<TType, NPrivate::TNameLabel<Name>>
-{
- template<typename U>
- struct rebind {
- typedef TAlloc<U, Name> other;
- };
-};
-
-template <typename TType>
-struct TAlloc<TType, nullptr>
- : public NPrivate::TAllocHelper<TType, NPrivate::TTypeLabel<TType>>
-{
- template<typename U>
- struct rebind {
- typedef TAlloc<U> other;
- };
-};
-
-}
-}
-
diff --git a/library/cpp/actors/util/memory_tracker.cpp b/library/cpp/actors/util/memory_tracker.cpp
deleted file mode 100644
index 8a12452c71..0000000000
--- a/library/cpp/actors/util/memory_tracker.cpp
+++ /dev/null
@@ -1,103 +0,0 @@
-#include "memory_tracker.h"
-
-#include <util/generic/xrange.h>
-
-namespace NActors {
-namespace NMemory {
-
-namespace NPrivate {
-
-TMemoryTracker* TMemoryTracker::Instance() {
- return SingletonWithPriority<TMemoryTracker, 0>();
-}
-
-void TMemoryTracker::Initialize() {
- GlobalMetrics.resize(Indices.size());
-}
-
-const std::map<TString, size_t>& TMemoryTracker::GetMetricIndices() const {
- return Indices;
-}
-
-const std::unordered_set<size_t>& TMemoryTracker::GetSensors() const {
- return Sensors;
-}
-
-TString TMemoryTracker::GetName(size_t index) const {
- return Names[index];
-}
-
-size_t TMemoryTracker::GetCount() const {
- return Indices.size();
-}
-
-void TMemoryTracker::GatherMetrics(std::vector<TMetric>& metrics) const {
- metrics.resize(0);
- auto count = GetCount();
-
- if (!count || GlobalMetrics.size() != count) {
- return;
- }
-
- TReadGuard guard(LockThreadInfo);
-
- metrics.resize(count);
- for (size_t i : xrange(count)) {
- metrics[i] += GlobalMetrics[i];
- }
-
- for (auto info : ThreadInfo) {
- auto& localMetrics = info->GetMetrics();
- if (localMetrics.size() == count) {
- for (size_t i : xrange(count)) {
- metrics[i] += localMetrics[i];
- }
- }
- }
-}
-
-size_t TMemoryTracker::RegisterStaticMemoryLabel(const char* name, bool hasSensor) {
- size_t index = 0;
- auto found = Indices.find(name);
- if (found == Indices.end()) {
- TString str(name);
- auto next = Names.size();
- Indices.emplace(str, next);
- Names.push_back(str);
- index = next;
- } else {
- index = found->second;
- }
-
- if (hasSensor) {
- Sensors.emplace(index);
- }
- return index;
-}
-
-void TMemoryTracker::OnCreateThread(TThreadLocalInfo* info) {
- TWriteGuard guard(LockThreadInfo);
- ThreadInfo.insert(info);
-}
-
-void TMemoryTracker::OnDestroyThread(TThreadLocalInfo* info) {
- TWriteGuard guard(LockThreadInfo);
-
- auto count = GetCount();
- if (count && GlobalMetrics.size() == count) {
- const auto& localMetrics = info->GetMetrics();
- if (localMetrics.size() == count) {
- for (size_t i : xrange(count)) {
- GlobalMetrics[i] += localMetrics[i];
- }
- }
- }
-
- ThreadInfo.erase(info);
-}
-
-}
-
-}
-}
-
diff --git a/library/cpp/actors/util/memory_tracker.h b/library/cpp/actors/util/memory_tracker.h
deleted file mode 100644
index e74508191b..0000000000
--- a/library/cpp/actors/util/memory_tracker.h
+++ /dev/null
@@ -1,53 +0,0 @@
-#pragma once
-
-#include "memory_track.h"
-
-#include <map>
-#include <unordered_map>
-#include <unordered_set>
-
-#include <util/system/rwlock.h>
-
-namespace NActors {
-namespace NMemory {
-
-namespace NPrivate {
-
-class TMemoryTracker {
-public:
- static TMemoryTracker* Instance();
-
- void Initialize();
-
- const std::map<TString, size_t>& GetMetricIndices() const;
- const std::unordered_set<size_t>& GetSensors() const;
- TString GetName(size_t index) const;
- size_t GetCount() const;
-
- void GatherMetrics(std::vector<TMetric>& metrics) const;
-
-private:
- size_t RegisterStaticMemoryLabel(const char* name, bool hasSensor);
-
- void OnCreateThread(TThreadLocalInfo* info);
- void OnDestroyThread(TThreadLocalInfo* info);
-
-private:
- std::map<TString, size_t> Indices;
- std::vector<TString> Names;
-
- std::vector<TMetric> GlobalMetrics;
-
- std::unordered_set<size_t> Sensors;
-
- std::unordered_set<TThreadLocalInfo*> ThreadInfo;
- TRWMutex LockThreadInfo;
-
- friend class TThreadLocalInfo;
- friend class TBaseLabel;
-};
-
-}
-
-}
-}
diff --git a/library/cpp/actors/util/memory_tracker_ut.cpp b/library/cpp/actors/util/memory_tracker_ut.cpp
deleted file mode 100644
index 1b8eff7cc5..0000000000
--- a/library/cpp/actors/util/memory_tracker_ut.cpp
+++ /dev/null
@@ -1,263 +0,0 @@
-#include "memory_tracker.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/system/hp_timer.h>
-#include <util/system/thread.h>
-
-namespace NActors {
-namespace NMemory {
-
-Y_UNIT_TEST_SUITE(TMemoryTrackerTest) {
-
-#if defined(ENABLE_MEMORY_TRACKING)
-
-using namespace NPrivate;
-
-size_t FindLabelIndex(const char* label) {
- auto indices = TMemoryTracker::Instance()->GetMetricIndices();
- auto it = indices.find(label);
- UNIT_ASSERT(it != indices.end());
- return it->second;
-}
-
-
-struct TTypeLabeled
- : public NActors::NMemory::TTrack<TTypeLabeled>
-{
- char payload[16];
-};
-
-static constexpr char NamedLabel[] = "NamedLabel";
-
-struct TNameLabeled
- : public NActors::NMemory::TTrack<TNameLabeled, NamedLabel>
-{
- char payload[32];
-};
-
-#ifndef _win_
-Y_UNIT_TEST(Gathering)
-{
- TMemoryTracker::Instance()->Initialize();
-
- auto* typed = new TTypeLabeled;
- auto* typedArray = new TTypeLabeled[3];
-
- auto* named = new TNameLabeled;
- auto* namedArray = new TNameLabeled[5];
- NActors::NMemory::TLabel<NamedLabel>::Add(100);
-
- std::vector<TMetric> metrics;
- TMemoryTracker::Instance()->GatherMetrics(metrics);
-
- auto typeIndex = FindLabelIndex(TypeName<TTypeLabeled>().c_str());
- UNIT_ASSERT(typeIndex < metrics.size());
- UNIT_ASSERT(metrics[typeIndex].GetMemory() == sizeof(TTypeLabeled) * 4 + sizeof(size_t));
- UNIT_ASSERT(metrics[typeIndex].GetCount() == 2);
-
- auto nameIndex = FindLabelIndex(NamedLabel);
- UNIT_ASSERT(nameIndex < metrics.size());
- UNIT_ASSERT(metrics[nameIndex].GetMemory() == sizeof(TNameLabeled) * 6 + sizeof(size_t) + 100);
- UNIT_ASSERT(metrics[nameIndex].GetCount() == 3);
-
- NActors::NMemory::TLabel<NamedLabel>::Sub(100);
- delete [] namedArray;
- delete named;
-
- delete [] typedArray;
- delete typed;
-
- TMemoryTracker::Instance()->GatherMetrics(metrics);
-
- UNIT_ASSERT(metrics[typeIndex].GetMemory() == 0);
- UNIT_ASSERT(metrics[typeIndex].GetCount() == 0);
-
- UNIT_ASSERT(metrics[nameIndex].GetMemory() == 0);
- UNIT_ASSERT(metrics[nameIndex].GetCount() == 0);
-}
-#endif
-
-static constexpr char InContainerLabel[] = "InContainerLabel";
-
-struct TInContainer {
- char payload[16];
-};
-
-Y_UNIT_TEST(Containers) {
- TMemoryTracker::Instance()->Initialize();
-
- std::vector<TInContainer, NActors::NMemory::TAlloc<TInContainer>> vecT;
- vecT.resize(5);
-
- std::vector<TInContainer, NActors::NMemory::TAlloc<TInContainer, InContainerLabel>> vecN;
- vecN.resize(7);
-
- using TKey = int;
-
- std::map<TKey, TInContainer, std::less<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>>> mapT;
- mapT.emplace(0, TInContainer());
- mapT.emplace(1, TInContainer());
-
- std::map<TKey, TInContainer, std::less<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>, InContainerLabel>> mapN;
- mapN.emplace(0, TInContainer());
-
- std::unordered_map<TKey, TInContainer, std::hash<TKey>, std::equal_to<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>>> umapT;
- umapT.emplace(0, TInContainer());
-
- std::unordered_map<TKey, TInContainer, std::hash<TKey>, std::equal_to<TKey>,
- NActors::NMemory::TAlloc<std::pair<const TKey, TInContainer>, InContainerLabel>> umapN;
- umapN.emplace(0, TInContainer());
- umapN.emplace(1, TInContainer());
-
- std::vector<TMetric> metrics;
- TMemoryTracker::Instance()->GatherMetrics(metrics);
-
- auto indices = TMemoryTracker::Instance()->GetMetricIndices();
- for (auto& [name, index] : indices) {
- Cerr << "---- " << name
- << ": memory = " << metrics[index].GetMemory()
- << ", count = " << metrics[index].GetCount() << Endl;
- }
-
- auto vecTIndex = FindLabelIndex(TypeName<TInContainer>().c_str());
- UNIT_ASSERT(metrics[vecTIndex].GetMemory() >= ssize_t(sizeof(TInContainer) * 5));
- UNIT_ASSERT(metrics[vecTIndex].GetCount() == 1);
-
- auto labelIndex = FindLabelIndex(InContainerLabel);
- UNIT_ASSERT(metrics[labelIndex].GetCount() == 5);
- UNIT_ASSERT(metrics[labelIndex].GetMemory() >= ssize_t(
- sizeof(TInContainer) * 7 +
- sizeof(decltype(mapN)::value_type) +
- sizeof(decltype(umapN)::value_type) * 2));
-}
-
-
-static constexpr char InThreadLabel[] = "InThreadLabel";
-
-struct TInThread
- : public NActors::NMemory::TTrack<TInThread, InThreadLabel>
-{
- char payload[16];
-};
-
-void* ThreadProc(void*) {
- return new TInThread;
-}
-
-Y_UNIT_TEST(Threads) {
- TMemoryTracker::Instance()->Initialize();
-
- auto index = FindLabelIndex(InThreadLabel);
-
- auto* object1 = new TInThread;
-
- std::vector<TMetric> metrics;
- TMemoryTracker::Instance()->GatherMetrics(metrics);
- UNIT_ASSERT(metrics[index].GetMemory() == sizeof(TInThread));
- UNIT_ASSERT(metrics[index].GetCount() == 1);
-
- TThread thread(&ThreadProc, nullptr);
- thread.Start();
- auto* object2 = static_cast<TInThread*>(thread.Join());
-
- TMemoryTracker::Instance()->GatherMetrics(metrics);
- UNIT_ASSERT(metrics[index].GetMemory() == sizeof(TInThread) * 2);
- UNIT_ASSERT(metrics[index].GetCount() == 2);
-
- delete object2;
-
- TMemoryTracker::Instance()->GatherMetrics(metrics);
- UNIT_ASSERT(metrics[index].GetMemory() == sizeof(TInThread));
- UNIT_ASSERT(metrics[index].GetCount() == 1);
-
- delete object1;
-}
-
-
-struct TNotTracked {
- char payload[16];
-};
-
-struct TTracked
- : public NActors::NMemory::TTrack<TTracked>
-{
- char payload[16];
-};
-
-template <typename T>
-double MeasureAllocations() {
- constexpr size_t objectsCount = 4 << 20;
-
- std::vector<T*> objects;
- objects.resize(objectsCount);
-
- THPTimer timer;
-
- for (size_t i = 0; i < objectsCount; ++i) {
- objects[i] = new T;
- }
-
- for (size_t i = 0; i < objectsCount; ++i) {
- delete objects[i];
- }
-
- auto seconds = timer.Passed();
- Cerr << "---- objects: " << objectsCount << ", time: " << seconds << Endl;
- return seconds;
-}
-
-Y_UNIT_TEST(Performance) {
- TMemoryTracker::Instance()->Initialize();
-
- constexpr size_t Runs = 16;
-
- Cerr << "---- warmup" << Endl;
- MeasureAllocations<TNotTracked>();
- MeasureAllocations<TTracked>();
-
- std::vector<double> noTrack;
- std::vector<double> track;
-
- for (size_t run = 0; run < Runs; ++run) {
- Cerr << "---- no track" << Endl;
- auto time = MeasureAllocations<TNotTracked>();
- noTrack.push_back(time);
-
- Cerr << "---- track" << Endl;
- time = MeasureAllocations<TTracked>();
- track.push_back(time);
- }
-
- double meanNoTrack = 0, stddevNoTrack = 0;
- double meanTrack = 0, stddevTrack = 0;
- for (size_t i = 0; i < Runs; ++i) {
- meanNoTrack += noTrack[i];
- meanTrack += track[i];
- }
- meanNoTrack /= Runs;
- meanTrack /= Runs;
-
- auto sqr = [](double val) { return val * val; };
-
- for (size_t i = 0; i < Runs; ++i) {
- stddevNoTrack += sqr(noTrack[i] - meanNoTrack);
- stddevTrack += sqr(track[i] - meanTrack);
- }
- stddevNoTrack = sqrt(stddevNoTrack / (Runs - 1));
- stddevTrack = sqrt(stddevTrack / (Runs - 1));
-
- Cerr << "---- no track - mean: " << meanNoTrack << ", stddev: " << stddevNoTrack << Endl;
- Cerr << "---- track - mean: " << meanTrack << ", stddev: " << stddevTrack << Endl;
- Cerr << "---- tracking is slower by " << int((meanTrack / meanNoTrack - 1.0) * 100) << "%" << Endl;
-}
-
-#endif
-
-}
-
-}
-}
diff --git a/library/cpp/actors/util/named_tuple.h b/library/cpp/actors/util/named_tuple.h
deleted file mode 100644
index 67f185bba8..0000000000
--- a/library/cpp/actors/util/named_tuple.h
+++ /dev/null
@@ -1,30 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-template <typename TDerived>
-struct TNamedTupleBase {
- friend bool operator==(const TDerived& x, const TDerived& y) {
- return x.ConvertToTuple() == y.ConvertToTuple();
- }
-
- friend bool operator!=(const TDerived& x, const TDerived& y) {
- return x.ConvertToTuple() != y.ConvertToTuple();
- }
-
- friend bool operator<(const TDerived& x, const TDerived& y) {
- return x.ConvertToTuple() < y.ConvertToTuple();
- }
-
- friend bool operator<=(const TDerived& x, const TDerived& y) {
- return x.ConvertToTuple() <= y.ConvertToTuple();
- }
-
- friend bool operator>(const TDerived& x, const TDerived& y) {
- return x.ConvertToTuple() > y.ConvertToTuple();
- }
-
- friend bool operator>=(const TDerived& x, const TDerived& y) {
- return x.ConvertToTuple() >= y.ConvertToTuple();
- }
-};
diff --git a/library/cpp/actors/util/queue_chunk.h b/library/cpp/actors/util/queue_chunk.h
deleted file mode 100644
index 8a4e02d8cb..0000000000
--- a/library/cpp/actors/util/queue_chunk.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-template <typename T, ui32 TSize, typename TDerived>
-struct TQueueChunkDerived {
- static const ui32 EntriesCount = (TSize - sizeof(TQueueChunkDerived*)) / sizeof(T);
- static_assert(EntriesCount > 0, "expect EntriesCount > 0");
-
- volatile T Entries[EntriesCount];
- TDerived* volatile Next;
-
- TQueueChunkDerived() {
- memset(this, 0, sizeof(TQueueChunkDerived));
- }
-};
-
-template <typename T, ui32 TSize>
-struct TQueueChunk {
- static const ui32 EntriesCount = (TSize - sizeof(TQueueChunk*)) / sizeof(T);
- static_assert(EntriesCount > 0, "expect EntriesCount > 0");
-
- volatile T Entries[EntriesCount];
- TQueueChunk* volatile Next;
-
- TQueueChunk() {
- memset(this, 0, sizeof(TQueueChunk));
- }
-};
diff --git a/library/cpp/actors/util/queue_oneone_inplace.h b/library/cpp/actors/util/queue_oneone_inplace.h
deleted file mode 100644
index 288011955a..0000000000
--- a/library/cpp/actors/util/queue_oneone_inplace.h
+++ /dev/null
@@ -1,118 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "queue_chunk.h"
-
-template <typename T, ui32 TSize, typename TChunk = TQueueChunk<T, TSize>>
-class TOneOneQueueInplace : TNonCopyable {
- static_assert(std::is_integral<T>::value || std::is_pointer<T>::value, "expect std::is_integral<T>::value || std::is_pointer<T>::valuer");
-
- TChunk* ReadFrom;
- ui32 ReadPosition;
- ui32 WritePosition;
- TChunk* WriteTo;
-
- friend class TReadIterator;
-
-public:
- class TReadIterator {
- TChunk* ReadFrom;
- ui32 ReadPosition;
-
- public:
- TReadIterator(TChunk* readFrom, ui32 readPosition)
- : ReadFrom(readFrom)
- , ReadPosition(readPosition)
- {
- }
-
- inline T Next() {
- TChunk* head = ReadFrom;
- if (ReadPosition != TChunk::EntriesCount) {
- return AtomicLoad(&head->Entries[ReadPosition++]);
- } else if (TChunk* next = AtomicLoad(&head->Next)) {
- ReadFrom = next;
- ReadPosition = 0;
- return Next();
- }
- return T{};
- }
- };
-
- TOneOneQueueInplace()
- : ReadFrom(new TChunk())
- , ReadPosition(0)
- , WritePosition(0)
- , WriteTo(ReadFrom)
- {
- }
-
- ~TOneOneQueueInplace() {
- Y_DEBUG_ABORT_UNLESS(Head() == 0);
- delete ReadFrom;
- }
-
- struct TPtrCleanDestructor {
- static inline void Destroy(TOneOneQueueInplace<T, TSize>* x) noexcept {
- while (T head = x->Pop())
- delete head;
- delete x;
- }
- };
-
- struct TCleanDestructor {
- static inline void Destroy(TOneOneQueueInplace<T, TSize>* x) noexcept {
- while (x->Pop() != nullptr)
- continue;
- delete x;
- }
- };
-
- struct TPtrCleanInplaceMallocDestructor {
- template <typename TPtrVal>
- static inline void Destroy(TOneOneQueueInplace<TPtrVal*, TSize>* x) noexcept {
- while (TPtrVal* head = x->Pop()) {
- head->~TPtrVal();
- free(head);
- }
- delete x;
- }
- };
-
- void Push(T x) noexcept {
- if (WritePosition != TChunk::EntriesCount) {
- AtomicStore(&WriteTo->Entries[WritePosition], x);
- ++WritePosition;
- } else {
- TChunk* next = new TChunk();
- next->Entries[0] = x;
- AtomicStore(&WriteTo->Next, next);
- WriteTo = next;
- WritePosition = 1;
- }
- }
-
- T Head() {
- TChunk* head = ReadFrom;
- if (ReadPosition != TChunk::EntriesCount) {
- return AtomicLoad(&head->Entries[ReadPosition]);
- } else if (TChunk* next = AtomicLoad(&head->Next)) {
- ReadFrom = next;
- delete head;
- ReadPosition = 0;
- return Head();
- }
- return T{};
- }
-
- T Pop() {
- T ret = Head();
- if (ret)
- ++ReadPosition;
- return ret;
- }
-
- TReadIterator Iterator() {
- return TReadIterator(ReadFrom, ReadPosition);
- }
-};
diff --git a/library/cpp/actors/util/rc_buf.cpp b/library/cpp/actors/util/rc_buf.cpp
deleted file mode 100644
index 946c9846ee..0000000000
--- a/library/cpp/actors/util/rc_buf.cpp
+++ /dev/null
@@ -1,6 +0,0 @@
-#include "rc_buf.h"
-
-template<>
-void Out<TRcBuf>(IOutputStream& s, const TRcBuf& x) {
- s.Write(TStringBuf(x));
-}
diff --git a/library/cpp/actors/util/rc_buf.h b/library/cpp/actors/util/rc_buf.h
deleted file mode 100644
index db0f7deff5..0000000000
--- a/library/cpp/actors/util/rc_buf.h
+++ /dev/null
@@ -1,1120 +0,0 @@
-#pragma once
-
-#include <atomic>
-#include <new>
-
-#include <util/generic/ptr.h>
-#include <util/generic/string.h>
-#include <util/generic/hash_set.h>
-#include <util/generic/scope.h>
-#include <util/stream/str.h>
-#include <util/system/sanitizers.h>
-#include <util/system/valgrind.h>
-#include <util/generic/array_ref.h>
-#include <util/system/sys_alloc.h>
-
-#include "shared_data.h"
-#include "rc_buf_backend.h"
-
-#ifdef KIKIMR_TRACE_CONTIGUOUS_DATA_GROW
-#include "shared_data_backtracing_owner.h"
-#endif
-
-namespace NContiguousDataDetails {
- template<typename TContainer>
- struct TContainerTraits {
- static char* UnsafeGetDataMut(const TContainer& backend) {
- return const_cast<char*>(backend.data());
- }
- };
-} // NContiguousDataDetails
-
-class TContiguousSpan
-{
-private:
- const char *Data_ = nullptr;
- size_t Size_ = 0;
-
-public:
- TContiguousSpan() = default;
-
- TContiguousSpan(const char *data, size_t size)
- : Data_(data)
- , Size_(size)
- {}
-
- TContiguousSpan(const TString& str)
- : Data_(str.data())
- , Size_(str.size())
- {}
-
- TContiguousSpan(const TStringBuf& str)
- : Data_(str.data())
- , Size_(str.size())
- {}
-
- TContiguousSpan(const TArrayRef<char>& ref)
- : Data_(ref.data())
- , Size_(ref.size())
- {}
-
- TContiguousSpan(const TArrayRef<const char>& ref)
- : Data_(ref.data())
- , Size_(ref.size())
- {}
-
- TContiguousSpan(const NActors::TSharedData& data)
- : Data_(data.data())
- , Size_(data.size())
- {}
-
- const char& operator[](size_t index) const {
- Y_DEBUG_ABORT_UNLESS(index < Size_);
- return Data_[index];
- }
-
- const char *data() const noexcept {
- return Data_;
- }
-
- size_t size() const noexcept {
- return Size_;
- }
-
- const char *GetData() const noexcept {
- return Data_;
- }
-
- size_t GetSize() const noexcept {
- return Size_;
- }
-
- const char *Data() const noexcept {
- return Data_;
- }
-
- size_t Size() const noexcept {
- return Size_;
- }
-
- TContiguousSpan SubSpan(size_t pos, size_t n) const noexcept {
- pos = Min(pos, size());
- n = Min(n, size() - pos);
- return TContiguousSpan(data() + pos, n);
- }
-
- template<std::size_t Index>
- auto get() const noexcept
- {
- static_assert(Index < 2,
- "Index out of bounds for TContiguousSpan");
- if constexpr (Index == 0) return Data_;
- if constexpr (Index == 1) return Size_;
- }
-
- friend bool operator==(const TContiguousSpan& x, const TContiguousSpan& y) { return Compare(x, y) == 0; }
- friend bool operator!=(const TContiguousSpan& x, const TContiguousSpan& y) { return Compare(x, y) != 0; }
- friend bool operator< (const TContiguousSpan& x, const TContiguousSpan& y) { return Compare(x, y) < 0; }
- friend bool operator<=(const TContiguousSpan& x, const TContiguousSpan& y) { return Compare(x, y) <= 0; }
- friend bool operator> (const TContiguousSpan& x, const TContiguousSpan& y) { return Compare(x, y) > 0; }
- friend bool operator>=(const TContiguousSpan& x, const TContiguousSpan& y) { return Compare(x, y) >= 0; }
-
-private:
- static int Compare(const TContiguousSpan& x, const TContiguousSpan& y) {
- int res = 0;
- if (const size_t common = std::min(x.size(), y.size())) {
- res = std::memcmp(x.data(), y.data(), common);
- }
- return res ? res : x.size() - y.size();
- }
-};
-
-
-
-namespace std
-{
- template<>
- struct tuple_size<::TContiguousSpan>
- : integral_constant<size_t, 2> {};
-
- template<>
- struct tuple_element<0, ::TContiguousSpan>
- {
- using type = const char *;
- };
-
- template<>
- struct tuple_element<1, ::TContiguousSpan>
- {
- using type = size_t;
- };
-}
-
-template <
- class TLeft,
- class TRight,
- typename std::enable_if<std::is_convertible<TLeft,TContiguousSpan>::value>::type* = nullptr,
- typename std::enable_if<std::is_convertible<TRight,TContiguousSpan>::value>::type* = nullptr
- >
-bool operator==(const TLeft& lhs, const TRight& rhs) {
- return TContiguousSpan(lhs) == TContiguousSpan(rhs);
-}
-
-template <
- class TLeft,
- class TRight,
- typename std::enable_if<std::is_convertible<TLeft,TContiguousSpan>::value>::type* = nullptr,
- typename std::enable_if<std::is_convertible<TRight,TContiguousSpan>::value>::type* = nullptr
- >
-bool operator!=(const TLeft& lhs, const TRight& rhs) {
- return TContiguousSpan(lhs) != TContiguousSpan(rhs);
-}
-
-template <
- class TLeft,
- class TRight,
- typename std::enable_if<std::is_convertible<TLeft,TContiguousSpan>::value>::type* = nullptr,
- typename std::enable_if<std::is_convertible<TRight,TContiguousSpan>::value>::type* = nullptr
- >
-bool operator<(const TLeft& lhs, const TRight& rhs) {
- return TContiguousSpan(lhs) < TContiguousSpan(rhs);
-}
-
-template <
- class TLeft,
- class TRight,
- typename std::enable_if<std::is_convertible<TLeft,TContiguousSpan>::value>::type* = nullptr,
- typename std::enable_if<std::is_convertible<TRight,TContiguousSpan>::value>::type* = nullptr
- >
-bool operator<=(const TLeft& lhs, const TRight& rhs) {
- return TContiguousSpan(lhs) <= TContiguousSpan(rhs);
-}
-
-template <
- class TLeft,
- class TRight,
- typename std::enable_if<std::is_convertible<TLeft,TContiguousSpan>::value>::type* = nullptr,
- typename std::enable_if<std::is_convertible<TRight,TContiguousSpan>::value>::type* = nullptr
- >
-bool operator>(const TLeft& lhs, const TRight& rhs) {
- return TContiguousSpan(lhs) > TContiguousSpan(rhs);
-}
-
-template <
- class TLeft,
- class TRight,
- typename std::enable_if<std::is_convertible<TLeft,TContiguousSpan>::value>::type* = nullptr,
- typename std::enable_if<std::is_convertible<TRight,TContiguousSpan>::value>::type* = nullptr
- >
-bool operator>=(const TLeft& lhs, const TRight& rhs) {
- return TContiguousSpan(lhs) >= TContiguousSpan(rhs);
-}
-
-
-class TMutableContiguousSpan
-{
-private:
- char *Data = nullptr;
- size_t Size = 0;
-
-public:
- TMutableContiguousSpan() = default;
-
- TMutableContiguousSpan(char *data, size_t size)
- : Data(data)
- , Size(size)
- {}
-
- char *data() noexcept {
- return Data;
- }
-
- char *GetData() noexcept {
- return Data;
- }
-
- TMutableContiguousSpan SubSpan(size_t pos, size_t n) noexcept {
- pos = Min(pos, size());
- n = Min(n, size() - pos);
- return TMutableContiguousSpan(data() + pos, n);
- }
-
- const char *data() const noexcept {
- return Data;
- }
-
- size_t size() const noexcept {
- return Size;
- }
-
- const char *GetData() const noexcept {
- return Data;
- }
-
- size_t GetSize() const noexcept {
- return Size;
- }
-
- TContiguousSpan SubSpan(size_t pos, size_t n) const noexcept {
- pos = Min(pos, size());
- n = Min(n, size() - pos);
- return TContiguousSpan(data() + pos, n);
- }
-
- operator TContiguousSpan() const noexcept {
- return TContiguousSpan(Data, Size);
- }
-};
-
-struct IContiguousChunk : TThrRefBase {
- using TPtr = TIntrusivePtr<IContiguousChunk>;
-
- virtual ~IContiguousChunk() = default;
-
- /**
- * Should give immutable access to data
- */
- virtual TContiguousSpan GetData() const = 0;
-
- /**
- * Should give mutable access to underlying data
- * If data is shared - data should be copied
- * E.g. for TString str.Detach() should be used
- * Possibly invalidates previous *GetData*() calls
- */
- virtual TMutableContiguousSpan GetDataMut() = 0;
-
- /**
- * Should give mutable access to undelying data as fast as possible
- * Even if data is shared this property should be ignored
- * E.g. in TString const_cast<char *>(str.data()) should be used
- * Possibly invalidates previous *GetData*() calls
- */
- virtual TMutableContiguousSpan UnsafeGetDataMut() {
- return GetDataMut();
- }
-
- /**
- * Should return true if GetDataMut() would not copy contents when called.
- */
- virtual bool IsPrivate() const {
- return true;
- }
-
- virtual size_t GetOccupiedMemorySize() const = 0;
-};
-
-class TRope;
-class TRopeArena;
-
-class TRcBuf {
- friend class TRope;
- friend class TRopeArena;
-
- using TInternalBackend = NDetail::TRcBufInternalBackend;
-
- class TBackend {
- enum class EType : uintptr_t {
- STRING,
- SHARED_DATA,
- INTERNAL_BACKEND,
- EXTERNAL_BACKEND,
- };
-
- struct TBackendHolder {
- uintptr_t Data[2];
- explicit operator bool() const noexcept {
- return Data[0] || Data[1];
- }
- friend bool operator ==(const TBackendHolder& x, const TBackendHolder& y) {
- return x.Data[0] == y.Data[0] && x.Data[1] == y.Data[1];
- }
- };
-
- constexpr static TBackendHolder Empty = {0, 0};
-
-#ifndef TSTRING_IS_STD_STRING
- static_assert(sizeof(TBackendHolder) >= sizeof(TString));
-#endif
- static_assert(sizeof(TBackendHolder) >= sizeof(NActors::TSharedData));
- static_assert(sizeof(TBackendHolder) >= sizeof(TInternalBackend));
-
- TBackendHolder Owner = TBackend::Empty; // lower bits contain type of the owner
-
- public:
- using TCookies = TInternalBackend::TCookies;
-
- static constexpr struct TControlToken {} ControlToken;
- static constexpr size_t CookiesSize = sizeof(TCookies);
-
- TBackend() = default;
-
- TBackend(const TBackend& other)
- : Owner(other.Owner ? Clone(other.Owner) : TBackend::Empty)
- {}
-
- TBackend(TBackend&& other)
- : Owner(std::exchange(other.Owner, TBackend::Empty))
- {}
-
- TBackend(TString s)
- : Owner(Construct<TString>(EType::STRING, std::move(s)))
- {}
-
- TBackend(NActors::TSharedData s)
- : Owner(Construct<NActors::TSharedData>(EType::SHARED_DATA, std::move(s)))
- {}
-
- TBackend(TInternalBackend backend)
- : Owner(Construct<TInternalBackend>(EType::INTERNAL_BACKEND, std::move(backend)))
- {}
-
- TBackend(IContiguousChunk::TPtr backend)
- : Owner(Construct<IContiguousChunk::TPtr>(EType::EXTERNAL_BACKEND, std::move(backend)))
- {}
-
- ~TBackend() {
- if (Owner) {
- Destroy(Owner);
- }
- }
-
- TBackend& operator =(const TBackend& other) {
- if (Y_UNLIKELY(this == &other)) {
- return *this;
- }
-
- if (Owner) {
- Destroy(Owner);
- }
- if (other.Owner) {
- Owner = Clone(other.Owner);
- } else {
- Owner = TBackend::Empty;
- }
- return *this;
- }
-
- TBackend& operator =(TBackend&& other) {
- if (Y_UNLIKELY(this == &other)) {
- return *this;
- }
-
- if (Owner) {
- Destroy(Owner);
- }
- Owner = std::exchange(other.Owner, TBackend::Empty);
- return *this;
- }
-
- bool operator ==(const TBackend& other) const {
- return Owner == other.Owner;
- }
-
- const void *UniqueId() const {
- return reinterpret_cast<const void*>(Owner.Data[0]);
- }
-
- TCookies* GetCookies() {
- if(!Owner) {
- return nullptr;
- }
- return Visit(Owner, [](EType, auto& value) -> TCookies* {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, TInternalBackend>) {
- return value.GetCookies();
- } else {
- return nullptr;
- }
- });
- }
-
- const TCookies* GetCookies() const {
- return const_cast<TBackend&>(*this).GetCookies();
- }
-
- bool IsPrivate() const {
- if(!Owner) {
- return true;
- }
- return Visit(Owner, [](EType, auto& value) -> bool {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, NActors::TSharedData> || std::is_same_v<T, TInternalBackend>) {
- return value.IsPrivate();
- } else if constexpr (std::is_same_v<T, TString>) {
- return value.IsDetached();
- } else if constexpr (std::is_same_v<T, IContiguousChunk::TPtr>) {
- return value.RefCount() == 1 && value->IsPrivate();
- } else {
- static_assert(TDependentFalse<T>);
- }
- });
- }
-
- TContiguousSpan GetData() const {
- if (!Owner) {
- return TContiguousSpan();
- }
- return Visit(Owner, [](EType, auto& value) -> TContiguousSpan {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, TString> || std::is_same_v<T, NActors::TSharedData> || std::is_same_v<T, TInternalBackend>) {
- return {value.data(), value.size()};
- } else if constexpr (std::is_same_v<T, IContiguousChunk::TPtr>) {
- return value->GetData();
- } else {
- static_assert(TDependentFalse<T>, "unexpected type");
- }
- });
- }
-
- TMutableContiguousSpan GetDataMut() {
- if (!Owner) {
- return TMutableContiguousSpan();
- }
- return Visit(Owner, [](EType, auto& value) -> TMutableContiguousSpan {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, TString>) {
- return {value.Detach(), value.size()};
- } else if constexpr (std::is_same_v<T, NActors::TSharedData>) {
- if (value.IsShared()) {
- value = NActors::TSharedData::Copy(value.data(), value.size());
- }
- return {value.mutable_data(), value.size()};
- } else if constexpr (std::is_same_v<T, TInternalBackend>) {
- if (value.IsShared()) {
- value = TInternalBackend::Copy(value.data(), value.size());
- }
- return {value.mutable_data(), value.size()};
- } else if constexpr (std::is_same_v<T, IContiguousChunk::TPtr>) {
- return value->GetDataMut();
- } else {
- static_assert(TDependentFalse<T>, "unexpected type");
- }
- });
- }
-
- TMutableContiguousSpan UnsafeGetDataMut() const {
- if (!Owner) {
- return TMutableContiguousSpan();
- }
- return Visit(Owner, [](EType, auto& value) -> TMutableContiguousSpan {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, TString>) {
- return {const_cast<char*>(value.data()), value.size()};
- } else if constexpr (std::is_same_v<T, NActors::TSharedData> || std::is_same_v<T, TInternalBackend>) {
- return {const_cast<char*>(value.data()), value.size()};
- } else if constexpr (std::is_same_v<T, IContiguousChunk::TPtr>) {
- return value->UnsafeGetDataMut();
- } else {
- static_assert(TDependentFalse<T>, "unexpected type");
- }
- });
- }
-
- size_t GetOccupiedMemorySize() const {
- if (!Owner) {
- return 0;
- }
- return Visit(Owner, [](EType, auto& value) {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, TString>) {
- return value.capacity();
- } else if constexpr (std::is_same_v<T, NActors::TSharedData> || std::is_same_v<T, TInternalBackend>) {
- return value.size(); // There is no capacity
- } else if constexpr (std::is_same_v<T, IContiguousChunk::TPtr>) {
- return value->GetOccupiedMemorySize();
- } else {
- static_assert(TDependentFalse<T>, "unexpected type");
- }
- });
- }
-
- template <class TType>
- bool ContainsNativeType() const {
- if (!Owner) {
- return false;
- }
- return Visit(Owner, [](EType, auto& value) {
- using T = std::decay_t<decltype(value)>;
- return std::is_same_v<T, TType>;
- });
- }
-
- bool CanGrowFront(const char* begin) const {
- if (!Owner) {
- return false;
- }
- const TCookies* cookies = GetCookies();
- return cookies && (IsPrivate() || cookies->Begin.load() == begin);
- }
-
- bool CanGrowBack(const char* end) const {
- if (!Owner) {
- return false;
- }
- const TCookies* cookies = GetCookies();
- return cookies && (IsPrivate() || cookies->End.load() == end);
- }
-
- void UpdateCookiesUnsafe(const char* contBegin, const char* contEnd) {
- if (!Owner) {
- return;
- }
- TCookies* cookies = GetCookies();
- if (cookies) {
- cookies->Begin.store(contBegin);
- cookies->End.store(contEnd);
- }
- }
-
- bool UpdateCookiesBegin(const char* curBegin, const char* contBegin) {
- if (!Owner) {
- return false;
- }
-
- TCookies* cookies = GetCookies();
- if (cookies) {
- return cookies->Begin.compare_exchange_weak(curBegin, contBegin);
- }
- return false;
- }
-
- bool UpdateCookiesEnd(const char* curEnd, const char* contEnd) {
- if (!Owner) {
- return false;
- }
-
- TCookies* cookies = GetCookies();
- if (cookies) {
- return cookies->End.compare_exchange_weak(curEnd, contEnd);
- }
- return false;
- }
-
- template <typename TResult, typename TCallback>
- std::invoke_result_t<TCallback, const TResult*> ApplySpecificValue(TCallback&& callback) const {
- static_assert(std::is_same_v<TResult, TString> ||
- std::is_same_v<TResult, NActors::TSharedData> ||
- std::is_same_v<TResult, TInternalBackend> ||
- std::is_same_v<TResult, IContiguousChunk::TPtr>);
-
- if (!Owner) {
- return callback(nullptr);
- }
- return Visit(Owner, [&](EType, auto& value) {
- using T = std::decay_t<decltype(value)>;
- if constexpr (std::is_same_v<T, TResult>) {
- return callback(&value);
- } else {
- return callback(nullptr);
- }
- });
- }
-
- explicit operator bool() const {
- return static_cast<bool>(Owner);
- }
-
- private:
- static constexpr uintptr_t TypeMask = (1 << 3) - 1;
- static constexpr uintptr_t ValueMask = ~TypeMask;
-
- template<typename T>
- struct TObjectHolder {
- struct TWrappedObject : TThrRefBase {
- T Value;
- TWrappedObject(T&& value)
- : Value(std::move(value))
- {}
- };
- TIntrusivePtr<TWrappedObject> Object;
-
- TObjectHolder(T&& object)
- : Object(MakeIntrusive<TWrappedObject>(std::move(object)))
- {}
- };
-
- template<typename TObject>
- static TBackendHolder Construct(EType type, TObject&& object) {
- if constexpr (sizeof(TObject) <= sizeof(TBackendHolder)) {
- TBackendHolder res = TBackend::Empty;
- new(&res) std::decay_t<TObject>(std::forward<TObject>(object));
- Y_DEBUG_ABORT_UNLESS((res.Data[0] & ValueMask) == res.Data[0]);
- res.Data[0] = res.Data[0] | static_cast<uintptr_t>(type);
- return res;
- } else {
- return Construct<TObjectHolder<TObject>>(type, TObjectHolder<TObject>(std::forward<TObject>(object)));
- }
- }
-
- template<typename TOwner, typename TCallback, bool IsConst = std::is_const_v<TOwner>>
- static std::invoke_result_t<TCallback, EType, std::conditional_t<IsConst, const TString&, TString&>> VisitRaw(TOwner& origValue, TCallback&& callback) {
- Y_DEBUG_ABORT_UNLESS(origValue);
- const EType type = static_cast<EType>(origValue.Data[0] & TypeMask);
- TBackendHolder value(origValue);
- value.Data[0] = value.Data[0] & ValueMask;
- // bring object type back
- Y_SCOPE_EXIT(&value, &origValue, type){
- if constexpr(!IsConst) {
- value.Data[0] = value.Data[0] | static_cast<uintptr_t>(type);
- origValue = value;
- } else {
- Y_UNUSED(value);
- Y_UNUSED(origValue);
- Y_UNUSED(type);
- }
- };
- auto caller = [&](auto& value) { return std::invoke(std::forward<TCallback>(callback), type, value); };
- auto wrapper = [&](auto& value) {
- using T = std::decay_t<decltype(value)>;
- if constexpr (sizeof(T) <= sizeof(TBackendHolder)) {
- return caller(value);
- } else {
- return caller(reinterpret_cast<std::conditional_t<IsConst, const TObjectHolder<T>&, TObjectHolder<T>&>>(value));
- }
- };
- switch (type) {
- case EType::STRING: return wrapper(reinterpret_cast<std::conditional_t<IsConst, const TString&, TString&>>(value));
- case EType::SHARED_DATA: return wrapper(reinterpret_cast<std::conditional_t<IsConst, const NActors::TSharedData&, NActors::TSharedData&>>(value));
- case EType::INTERNAL_BACKEND: return wrapper(reinterpret_cast<std::conditional_t<IsConst, const TInternalBackend&, TInternalBackend&>>(value));
- case EType::EXTERNAL_BACKEND: return wrapper(reinterpret_cast<std::conditional_t<IsConst, const IContiguousChunk::TPtr&, IContiguousChunk::TPtr&>>(value));
- }
- Y_ABORT("Unexpected type# %" PRIu64, static_cast<ui64>(type));
- }
-
- template<typename TOwner, typename TCallback, bool IsConst = std::is_const_v<TOwner>>
- static std::invoke_result_t<TCallback, EType, std::conditional_t<IsConst, const TString&, TString&>> Visit(TOwner& value, TCallback&& callback) {
- return VisitRaw(value, [&](EType type, auto& value) {
- return std::invoke(std::forward<TCallback>(callback), type, Unwrap(value));
- });
- }
-
- template<typename T> static T& Unwrap(T& object) { return object; }
- template<typename T> static T& Unwrap(TObjectHolder<T>& holder) { return holder.Object->Value; }
- template<typename T> static const T& Unwrap(const TObjectHolder<T>& holder) { return holder.Object->Value; }
-
- template<typename TOwner>
- static TBackendHolder Clone(TOwner& value) {
- return VisitRaw(value, [](EType type, auto& value) { return Construct(type, value); });
- }
-
- template<typename TOwner>
- static void Destroy(TOwner& value) {
- VisitRaw(value, [](EType, auto& value) { CallDtor(value); });
- }
-
- template<typename T>
- static void CallDtor(T& value) {
- value.~T();
- }
- };
-
- static constexpr struct TOwnedPiece {} OwnedPiece{};
-
- TBackend Backend; // who actually holds the data
- const char *Begin; // data start
- const char *End; // data end
-
- explicit TRcBuf(TInternalBackend s, const char *data, size_t size)
- : Backend(std::move(s))
- {
- Y_ABORT_UNLESS(Backend.GetData().data() == nullptr ||
- (Backend.GetCookies() && Backend.GetCookies()->Begin == data && Backend.GetCookies()->End == data + size));
- Begin = data;
- End = data + size;
- }
-
- explicit TRcBuf(TInternalBackend s)
- : Backend(std::move(s))
- {
- auto span = Backend.GetData();
- Begin = span.data();
- End = Begin + span.size();
- }
-
- TRcBuf(TOwnedPiece, const char *data, size_t size, const TRcBuf& from)
- : TRcBuf(from.Backend, {data, size})
- {
- Y_ABORT_UNLESS(data >= from.GetData());
- Y_ABORT_UNLESS(data < from.GetData() + from.GetSize());
- Y_ABORT_UNLESS(data + size <= from.GetData() + from.GetSize());
- Backend.UpdateCookiesUnsafe(Begin, End);
- }
-
- TRcBuf(TOwnedPiece, const char *begin, const char *end, const TRcBuf& from)
- : TRcBuf(OwnedPiece, begin, end - begin, from)
- {}
-
-public:
- static constexpr struct TPiece {} Piece{};
-
- enum class EResizeResult {
- NoAlloc,
- Alloc,
- };
-
- enum class EResizeStrategy {
- KeepRooms,
- FailOnCopy,
- // SaveAllocs, // Move data if there is enough space in (headroom + size + tailroom)
- };
-
- TRcBuf()
- : Begin(nullptr)
- , End(nullptr)
- {}
-
- template<typename T>
- TRcBuf(T&& backend, const TContiguousSpan& data)
- : Backend(std::forward<T>(backend))
- , Begin(data.data())
- , End(Begin + data.size())
- {}
-
- explicit TRcBuf(TString s)
- : Backend(std::move(s))
- {
- auto span = Backend.GetData();
- Begin = span.data();
- End = Begin + span.size();
- }
-
- explicit TRcBuf(NActors::TSharedData s)
- : Backend(std::move(s))
- {
- auto span = Backend.GetData();
- Begin = span.data();
- End = Begin + span.size();
- }
-
- TRcBuf(IContiguousChunk::TPtr backend)
- : TRcBuf(backend, backend->GetData())
- {}
-
- TRcBuf(TPiece, const char *data, size_t size, const TRcBuf& from)
- : TRcBuf(from.Backend, {data, size})
- {
- Y_ABORT_UNLESS(data >= from.GetData());
- Y_ABORT_UNLESS(data < from.GetData() + from.GetSize());
- Y_ABORT_UNLESS(data + size <= from.GetData() + from.GetSize());
- }
-
- TRcBuf(TPiece, const char *begin, const char *end, const TRcBuf& from)
- : TRcBuf(Piece, begin, end - begin, from)
- {}
-
- TRcBuf(const TRcBuf& other)
- : Backend(other.Backend)
- , Begin(other.Begin)
- , End(other.End)
- {}
-
- TRcBuf(TRcBuf&& other)
- : Backend(std::move(other.Backend))
- , Begin(other.Begin)
- , End(other.End)
- {}
-
- TRcBuf& operator =(const TRcBuf&) = default;
- TRcBuf& operator =(TRcBuf&&) = default;
-
- static TRcBuf Uninitialized(size_t size, size_t headroom = 0, size_t tailroom = 0)
- {
- if (size == 0) {
- return TRcBuf();
- }
-
- if (headroom == 0 && tailroom == 0) {
- TInternalBackend res = TInternalBackend::Uninitialized(size);
- return TRcBuf(
- OwnedPiece,
- res.data(),
- res.data() + res.size(),
- TRcBuf(res));
- }
-
- TInternalBackend res = TInternalBackend::Uninitialized(size, headroom, tailroom);
- return TRcBuf(res, res.data() + headroom, size);
- }
-
- static TRcBuf Copy(TContiguousSpan data, size_t headroom = 0, size_t tailroom = 0) {
- TRcBuf res = Uninitialized(data.size(), headroom, tailroom);
- std::memcpy(res.UnsafeGetDataMut(), data.GetData(), data.GetSize());
- return res;
- }
-
- static TRcBuf Copy(const char* data, size_t size, size_t headroom = 0, size_t tailroom = 0) {
- return Copy({data, size}, headroom, tailroom);
- }
-
- template <class TType>
- bool ContainsNativeType() const {
- return Backend.ContainsNativeType<TType>();
- }
-
- bool CanGrowFront() const noexcept {
- return Backend.CanGrowFront(Begin);
- }
-
- bool CanGrowBack() const noexcept {
- return Backend.CanGrowBack(End);
- }
-
- size_t GetSize() const {
- return End - Begin;
- }
-
- size_t Size() const {
- return End - Begin;
- }
-
- size_t GetOccupiedMemorySize() const {
- return Backend.GetOccupiedMemorySize();
- }
-
- const char* GetData() const {
- return Begin;
- }
-
- char* GetDataMut(size_t headroom = 0, size_t tailroom = 0) {
- const TContiguousSpan backendData = Backend.GetData();
- if (IsPrivate() || (backendData.data() == GetData() && backendData.size() == GetSize())) { // if we own container or reference it whole
- const char* oldBegin = backendData.data();
- ptrdiff_t offset = Begin - oldBegin;
- size_t size = GetSize();
- char* newBegin = Backend.GetDataMut().data();
- Begin = newBegin + offset;
- End = Begin + size;
- return newBegin + offset;
- } else { // make a copy of referenced data
- *this = Copy(GetContiguousSpan(), headroom, tailroom);
- return Backend.GetDataMut().data();
- }
- }
-
- char* UnsafeGetDataMut() {
- const char* oldBegin = Backend.GetData().data();
- ptrdiff_t offset = Begin - oldBegin;
- size_t size = GetSize();
- char* newBegin = Backend.UnsafeGetDataMut().data();
- Begin = newBegin + offset;
- End = Begin + size;
- return newBegin + offset;
- }
-
- template <class TResult>
- TResult ExtractUnderlyingContainerOrCopy() const {
- static_assert(std::is_same_v<TResult, TString> ||
- std::is_same_v<TResult, NActors::TSharedData> ||
- std::is_same_v<TResult, TInternalBackend>);
-
- constexpr bool isSharedData = std::is_same_v<TResult, NActors::TSharedData>;
- TResult res;
-
- const bool found = Backend.ApplySpecificValue<TResult>([&](const TResult *raw) {
- if (raw && raw->data() == Begin && (isSharedData ? End <= Begin + raw->size() : End == Begin + raw->size())) {
- if constexpr (isSharedData) {
- raw->TrimBack(size());
- }
- res = TResult(*raw);
- return true;
- }
- return false;
- });
-
- if (!found) {
- res = TResult::Uninitialized(GetSize());
- char* data = NContiguousDataDetails::TContainerTraits<TResult>::UnsafeGetDataMut(res);
- std::memcpy(data, GetData(), GetSize());
- }
-
- return res;
- }
-
- TContiguousSpan GetContiguousSpan() const {
- return {GetData(), GetSize()};
- }
-
- TStringBuf Slice(size_t pos = 0, size_t len = Max<size_t>()) const noexcept {
- pos = Min(pos, size());
- len = Min(len, size() - pos);
- return {const_cast<TRcBuf*>(this)->UnsafeGetDataMut() + pos, len};
- }
-
- explicit operator TStringBuf() const noexcept {
- return Slice();
- }
-
- TMutableContiguousSpan GetContiguousSpanMut() {
- return {GetDataMut(), GetSize()};
- }
-
- TMutableContiguousSpan UnsafeGetContiguousSpanMut() {
- return {UnsafeGetDataMut(), GetSize()};
- }
-
- bool HasBuffer() const {
- return static_cast<bool>(Backend);
- }
-
- size_t size() const {
- return GetSize();
- }
-
- bool empty() const {
- return !static_cast<bool>(Backend);
- }
-
- operator bool() const {
- return !empty();
- }
-
- const char* data() const {
- return GetData();
- }
-
- const char* Data() const {
- return GetData();
- }
-
- const char* begin() const {
- return Begin;
- }
-
- const char* end() const {
- return End;
- }
-
- char& operator[](size_t pos) {
- return UnsafeGetDataMut()[pos];
- }
-
- const char& operator[](size_t pos) const {
- return GetData()[pos];
- }
-
- void reserve(size_t size) {
- ReserveTailroom(size);
- }
-
- void ReserveHeadroom(size_t size) {
- if (Headroom() >= size) {
- return;
- }
- auto newData = TRcBuf::Uninitialized(GetSize(), size, UnsafeTailroom());
- if (auto data = GetData(); data) {
- std::memcpy(newData.UnsafeGetDataMut(), GetData(), GetSize());
- }
- *this = std::move(newData);
- }
-
- void ReserveTailroom(size_t size) {
- if (Tailroom() >= size) {
- return;
- }
- auto newData = TRcBuf::Uninitialized(GetSize(), UnsafeHeadroom(), size);
- if (auto data = GetData(); data) {
- std::memcpy(newData.UnsafeGetDataMut(), GetData(), GetSize());
- }
- *this = std::move(newData);
- }
-
- void ReserveBidi(size_t headroom, size_t tailroom) {
- if (Headroom() >= headroom && Tailroom() >= tailroom) {
- return;
- }
- auto newData = TRcBuf::Uninitialized(
- GetSize(),
- std::max(UnsafeHeadroom(), headroom),
- std::max(UnsafeTailroom(), tailroom));
- if (auto data = GetData(); data) {
- std::memcpy(newData.UnsafeGetDataMut(), GetData(), GetSize());
- }
- *this = std::move(newData);
- }
-
- EResizeResult GrowFront(size_t size, EResizeStrategy strategy = EResizeStrategy::KeepRooms) {
- if (Headroom() >= size && Backend.UpdateCookiesBegin(Begin, Begin - size)) {
- Begin -= size;
- return EResizeResult::NoAlloc;
- } else {
- if (strategy == EResizeStrategy::FailOnCopy && static_cast<bool>(Backend)) {
- Y_ABORT("Fail on grow");
- }
- auto newData = TRcBuf::Uninitialized(size + GetSize(), UnsafeHeadroom() > size ? UnsafeHeadroom() - size : 0, UnsafeTailroom());
- if (auto data = GetData(); data) {
- std::memcpy(newData.UnsafeGetDataMut() + size, GetData(), GetSize());
- }
- *this = std::move(newData);
- return EResizeResult::Alloc;
- }
- }
-
- EResizeResult GrowBack(size_t size, EResizeStrategy strategy = EResizeStrategy::KeepRooms) {
- if (Tailroom() > size && Backend.UpdateCookiesEnd(End, End + size)) {
- End += size;
- return EResizeResult::NoAlloc;
- } else {
- if (strategy == EResizeStrategy::FailOnCopy && static_cast<bool>(Backend)) {
- Y_ABORT("Fail on grow");
- }
- auto newData = TRcBuf::Uninitialized(size + GetSize(), UnsafeHeadroom(), UnsafeTailroom() > size ? UnsafeTailroom() - size : 0);
- if (auto data = GetData(); data) {
- std::memcpy(newData.UnsafeGetDataMut(), GetData(), GetSize());
- }
- *this = std::move(newData);
- return EResizeResult::Alloc;
- }
- }
-
- void TrimBack(size_t size) {
- Y_ABORT_UNLESS(size <= GetSize());
- End = End - (GetSize() - size);
- }
-
- void TrimFront(size_t size) {
- Y_ABORT_UNLESS(size <= GetSize());
- Begin = Begin + (GetSize() - size);
- }
-
- char* Detach() {
- return GetDataMut();
- }
-
- bool IsPrivate() const {
- return Backend.IsPrivate();
- }
-
- size_t UnsafeHeadroom() const {
- return Begin - Backend.GetData().data();
- }
-
- size_t UnsafeTailroom() const {
- auto span = Backend.GetData();
- return (span.GetData() + span.GetSize()) - End;
- }
-
- size_t Headroom() const {
- if (Backend.CanGrowFront(Begin)) {
- return UnsafeHeadroom();
- }
-
- return 0;
- }
-
- size_t Tailroom() const {
- if (Backend.CanGrowBack(End)) {
- return UnsafeTailroom();
- }
-
- return 0;
- }
-
- operator TContiguousSpan() const noexcept {
- return TContiguousSpan(GetData(), GetSize());
- }
-
- explicit operator TMutableContiguousSpan() noexcept {
- return TMutableContiguousSpan(GetDataMut(), GetSize());
- }
-};
diff --git a/library/cpp/actors/util/rc_buf_backend.h b/library/cpp/actors/util/rc_buf_backend.h
deleted file mode 100644
index 9cb8616554..0000000000
--- a/library/cpp/actors/util/rc_buf_backend.h
+++ /dev/null
@@ -1,230 +0,0 @@
-#pragma once
-
-#include <atomic>
-
-#include <library/cpp/deprecated/atomic/atomic.h>
-
-#include <util/system/types.h>
-#include <util/system/compiler.h>
-#include <util/generic/array_ref.h>
-#include <util/system/sys_alloc.h>
-
-namespace NDetail {
-
-struct TRcBufInternalBackend {
-public:
- struct TCookies {
- using TSelf = TCookies;
- std::atomic<const char*> Begin;
- std::atomic<const char*> End;
-
- static size_t BytesToAligned(size_t size) {
- bool misaligned = size % alignof(TSelf);
- return misaligned ? alignof(TSelf) - size % alignof(TSelf) : 0;
- }
-
- static size_t BytesToAlloc(size_t size) {
- return size + BytesToAligned(size) + sizeof(TSelf);
- }
- };
-private:
- // to be binary compatible with TSharedData
- struct THeader : public TCookies {
- TAtomic RefCount;
- ui64 Zero = 0;
- };
-
- enum : size_t {
- HeaderSize = sizeof(THeader),
- OverheadSize = HeaderSize,
- MaxDataSize = (std::numeric_limits<size_t>::max() - OverheadSize)
- };
-
-public:
- TRcBufInternalBackend() noexcept
- : Data_(nullptr)
- , Size_(0)
- { }
-
- ~TRcBufInternalBackend() noexcept {
- Release();
- }
-
- TRcBufInternalBackend(const TRcBufInternalBackend& other) noexcept
- : Data_(other.Data_)
- , Size_(other.Size_)
- {
- AddRef();
- }
-
- TRcBufInternalBackend(TRcBufInternalBackend&& other) noexcept
- : Data_(other.Data_)
- , Size_(other.Size_)
- {
- other.Data_ = nullptr;
- other.Size_ = 0;
- }
-
- TRcBufInternalBackend& operator=(const TRcBufInternalBackend& other) noexcept {
- if (this != &other) {
- Release();
- Data_ = other.Data_;
- Size_ = other.Size_;
- AddRef();
- }
- return *this;
- }
-
- TRcBufInternalBackend& operator=(TRcBufInternalBackend&& other) noexcept {
- if (this != &other) {
- Release();
- Data_ = other.Data_;
- Size_ = other.Size_;
- other.Data_ = nullptr;
- other.Size_ = 0;
- }
- return *this;
- }
-
- Y_FORCE_INLINE explicit operator bool() const { return Size_ > 0; }
-
- Y_FORCE_INLINE char* mutable_data() { return Data(); }
- Y_FORCE_INLINE char* mutable_begin() { return Data(); }
- Y_FORCE_INLINE char* mutable_end() { return Data() + Size_; }
-
- Y_FORCE_INLINE const char* data() const { return Data(); }
- Y_FORCE_INLINE const char* begin() const { return Data(); }
- Y_FORCE_INLINE const char* end() const { return Data() + Size_; }
-
- Y_FORCE_INLINE size_t size() const { return Size_; }
-
- /**
- * Copies data to new allocated buffer if data is shared
- * New container loses original owner
- * Returns pointer to mutable buffer
- */
- char* Detach() {
- if (IsShared()) {
- *this = TRcBufInternalBackend::Copy(data(), size());
- }
- return Data_;
- }
-
- bool IsPrivate() const {
- return Data_ ? IsPrivate(Header()) : true;
- }
-
- bool IsShared() const {
- return !IsPrivate();
- }
-
- TString ToString() const {
- return TString(data(), size());
- }
-
- TCookies* GetCookies() {
- return Header();
- }
-
- /**
- * Attach to pre-allocated data with a preceding THeader
- */
- static TRcBufInternalBackend AttachUnsafe(char* data, size_t size) noexcept {
- TRcBufInternalBackend result;
- result.Data_ = data;
- result.Size_ = size;
- return result;
- }
-
- /**
- * Make uninitialized buffer of the specified size
- */
- static TRcBufInternalBackend Uninitialized(size_t size, size_t headroom = 0, size_t tailroom = 0) {
- size_t fullSize = checkedSum(size, checkedSum(headroom, tailroom));
- return AttachUnsafe(Allocate(size, headroom, tailroom), fullSize);
- }
-
- /**
- * Make a copy of the specified data
- */
- static TRcBufInternalBackend Copy(const void* data, size_t size) {
- TRcBufInternalBackend result = Uninitialized(size);
- if (size) {
- ::memcpy(result.Data(), data, size);
- }
- return result;
- }
-
-private:
- Y_FORCE_INLINE THeader* Header() const noexcept {
- Y_DEBUG_ABORT_UNLESS(Data_);
- return reinterpret_cast<THeader*>(Data_);
- }
-
- Y_FORCE_INLINE char* Data() const noexcept {
- Y_DEBUG_ABORT_UNLESS(Data_);
- return Data_ + OverheadSize;
- }
-
- static bool IsPrivate(THeader* header) noexcept {
- return 1 == AtomicGet(header->RefCount);
- }
-
- void AddRef() noexcept {
- if (Data_) {
- AtomicIncrement(Header()->RefCount);
- }
- }
-
- void Release() noexcept {
- if (Data_) {
- auto* header = Header();
- if (IsPrivate(header) || 0 == AtomicDecrement(header->RefCount)) {
- Deallocate(Data_);
- }
- }
- }
-
-private:
- static size_t checkedSum(size_t a, size_t b) {
- if (a > std::numeric_limits<size_t>::max() - b) {
- throw std::length_error("Allocate size overflow");
- }
- return a + b;
- }
-
- static char* Allocate(size_t size, size_t headroom = 0, size_t tailroom = 0) {
- char* data = nullptr;
- size_t fullSize = checkedSum(size, checkedSum(headroom, tailroom));
- if (fullSize > 0) {
- if (fullSize >= MaxDataSize) {
- throw std::length_error("Allocate size overflow");
- }
- auto allocSize = OverheadSize + fullSize;
- char* raw = reinterpret_cast<char*>(y_allocate(allocSize));
-
- auto* header = reinterpret_cast<THeader*>(raw);
- header->Begin = raw + OverheadSize + headroom;
- header->End = raw + allocSize - tailroom;
- header->RefCount = 1;
-
- data = raw;
- }
-
- return data;
- }
-
- static void Deallocate(char* data) noexcept {
- if (data) {
- char* raw = data;
-
- y_deallocate(raw);
- }
- }
-
-private:
- char* Data_;
- size_t Size_;
-};
-
-} // namespace NDetail
diff --git a/library/cpp/actors/util/rc_buf_ut.cpp b/library/cpp/actors/util/rc_buf_ut.cpp
deleted file mode 100644
index c23e8b68d0..0000000000
--- a/library/cpp/actors/util/rc_buf_ut.cpp
+++ /dev/null
@@ -1,207 +0,0 @@
-#include "rc_buf.h"
-#include "ut_helpers.h"
-#include "rope.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/random.h>
-
-Y_UNIT_TEST_SUITE(TRcBuf) {
- Y_UNIT_TEST(TypeSize) {
- UNIT_ASSERT_EQUAL(sizeof(TRcBuf), 4 * sizeof(uintptr_t));
- }
-
- Y_UNIT_TEST(Slice) {
- auto data = TRcBuf::Copy("Hello", 5);
- UNIT_ASSERT_VALUES_EQUAL(TString(TStringBuf(data)), TString("Hello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice()), TString("Hello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(1)), TString("ello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(1, 3)), TString("ell"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(1, 100)), TString("ello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(0, 4)), TString("Hell"));
- }
-
- Y_UNIT_TEST(CrossCompare) {
- TString str = "some very long string";
- const TString constStr(str);
- TStringBuf strbuf = str;
- const TStringBuf constStrbuf = str;
- TContiguousSpan span(str);
- const TContiguousSpan constSpan(str);
- TMutableContiguousSpan mutableSpan(const_cast<char*>(str.data()), str.size());
- const TMutableContiguousSpan constMutableSpan(const_cast<char*>(str.data()), str.size());
- TRcBuf data(str);
- const TRcBuf constData(str);
- TArrayRef<char> arrRef(const_cast<char*>(str.data()), str.size());
- const TArrayRef<char> constArrRef(const_cast<char*>(str.data()), str.size());
- TArrayRef<const char> arrConstRef(const_cast<char*>(str.data()), str.size());
- const TArrayRef<const char> constArrConstRef(const_cast<char*>(str.data()), str.size());
- NActors::TSharedData sharedData = NActors::TSharedData::Copy(str.data(), str.size());
- const NActors::TSharedData constSharedData(sharedData);
-
- Permutate(
- [](auto& arg1, auto& arg2) {
- UNIT_ASSERT(arg1 == arg2);
- },
- str,
- constStr,
- strbuf,
- constStrbuf,
- span,
- constSpan,
- mutableSpan,
- constMutableSpan,
- data,
- constData,
- arrRef,
- constArrRef,
- arrConstRef,
- constArrConstRef,
- sharedData,
- constSharedData);
- }
-
- Y_UNIT_TEST(Detach) {
- TRcBuf data = TRcBuf::Copy(TString("test"));
- TRcBuf data2 = data;
- char* res = data2.Detach();
- UNIT_ASSERT_UNEQUAL(data.GetData(), data2.GetData());
- UNIT_ASSERT_EQUAL(res, data2.GetData());
- UNIT_ASSERT_EQUAL(::memcmp(res, "test", 4), 0);
- UNIT_ASSERT_EQUAL(::memcmp(data.GetData(), "test", 4), 0);
- }
-
- Y_UNIT_TEST(Resize) {
- TRcBuf data = TRcBuf::Uninitialized(10, 20, 30);
- UNIT_ASSERT_EQUAL(data.size(), 10);
- UNIT_ASSERT_EQUAL(data.Headroom(), 20);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 30);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 60);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), data.size() + data.Headroom() + data.Tailroom());
- data.GrowFront(5);
- UNIT_ASSERT_EQUAL(data.size(), 15);
- UNIT_ASSERT_EQUAL(data.Headroom(), 15);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 30);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 60);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), data.size() + data.Headroom() + data.Tailroom());
- data.GrowBack(5);
- UNIT_ASSERT_EQUAL(data.size(), 20);
- UNIT_ASSERT_EQUAL(data.Headroom(), 15);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 25);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 60);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), data.size() + data.Headroom() + data.Tailroom());
- data.GrowFront(21);
- UNIT_ASSERT_EQUAL(data.size(), 41);
- UNIT_ASSERT_EQUAL(data.Headroom(), 0);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 25);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 66);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), data.size() + data.Headroom() + data.Tailroom());
- data.GrowBack(32);
- UNIT_ASSERT_EQUAL(data.size(), 73);
- UNIT_ASSERT_EQUAL(data.Headroom(), 0);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 0);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 73);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), data.size() + data.Headroom() + data.Tailroom());
- }
-
- Y_UNIT_TEST(ResizeUnshare) {
- TRcBuf data = TRcBuf::Uninitialized(10, 20, 30);
- TRcBuf otherData(data);
- UNIT_ASSERT_EQUAL(data.data(), otherData.data());
- UNIT_ASSERT_EQUAL(data.size(), 10);
- UNIT_ASSERT_EQUAL(data.Headroom(), 20);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 30);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 60);
- UNIT_ASSERT_EQUAL(otherData.size(), 10);
- UNIT_ASSERT_EQUAL(otherData.Headroom(), 20);
- UNIT_ASSERT_EQUAL(otherData.Tailroom(), 30);
- UNIT_ASSERT_EQUAL(otherData.GetOccupiedMemorySize(), 60);
- data.GrowFront(5);
- data.GrowBack(5);
- UNIT_ASSERT_EQUAL(data.data() + 5, otherData.data());
- UNIT_ASSERT_EQUAL(data.size(), 20);
- UNIT_ASSERT_EQUAL(data.Headroom(), 15);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 25);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 60);
- otherData.GrowFront(5);
- UNIT_ASSERT_UNEQUAL(data.data(), otherData.data());
- UNIT_ASSERT_EQUAL(otherData.size(), 15);
- UNIT_ASSERT_EQUAL(otherData.Headroom(), 15);
- UNIT_ASSERT_EQUAL(otherData.Tailroom(), 30);
- UNIT_ASSERT_EQUAL(otherData.GetOccupiedMemorySize(), 60);
- data.TrimBack(15);
- data.TrimFront(10);
- UNIT_ASSERT_EQUAL(data.size(), 10);
- UNIT_ASSERT_EQUAL(data.Headroom(), 20);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 30);
- UNIT_ASSERT_EQUAL(data.GetOccupiedMemorySize(), 60);
- }
-
- Y_UNIT_TEST(Trim) {
- TRcBuf data = TRcBuf::Uninitialized(10, 20, 30);
- TRcBuf otherData(data);
- otherData.TrimBack(5);
- UNIT_ASSERT_EQUAL(data.data(), otherData.data());
- UNIT_ASSERT_EQUAL(otherData.Headroom(), 20);
- UNIT_ASSERT_EQUAL(otherData.Tailroom(), 0);
- TRcBuf otherData2(data);
- otherData2.TrimBack(2);
- otherData2.TrimFront(1);
- UNIT_ASSERT_EQUAL(data.data() + 1, otherData2.data());
- UNIT_ASSERT_EQUAL(otherData2.Headroom(), 0);
- UNIT_ASSERT_EQUAL(otherData2.Tailroom(), 0);
- otherData.TrimBack(2);
- otherData.TrimFront(1);
- UNIT_ASSERT_EQUAL(otherData.data(), otherData2.data());
- data.GrowFront(5);
- data.GrowBack(5);
- UNIT_ASSERT_EQUAL(data.data() + 6, otherData2.data());
- UNIT_ASSERT_EQUAL(data.data() + 6, otherData.data());
- otherData.GrowFront(1);
- UNIT_ASSERT_UNEQUAL(data.data() + 7, otherData.data());
- otherData2.GrowBack(1);
- UNIT_ASSERT_UNEQUAL(data.data() + 6, otherData2.data());
- data = TRcBuf::Uninitialized(10);
- otherData = data;
- data.TrimBack(5);
- UNIT_ASSERT_EQUAL(data.data(), otherData.data());
- UNIT_ASSERT_EQUAL(data.size(), 5);
- }
-
- Y_UNIT_TEST(SliceUnshare) {
- TRcBuf data = TRcBuf::Uninitialized(10, 20, 30);
- TRcBuf otherData(TRcBuf::Piece, data.data() + 1, data.size() - 2, data);
- UNIT_ASSERT_EQUAL(otherData.Headroom(), 0);
- UNIT_ASSERT_EQUAL(otherData.Tailroom(), 0);
- }
-
- Y_UNIT_TEST(Reserve) {
- TRcBuf data = TRcBuf::Copy("test", 4, 5, 6);
- TRcBuf data2 = data;
- data.reserve(1);
- data.ReserveTailroom(6);
- UNIT_ASSERT_EQUAL(data.GetData(), data2.GetData());
- UNIT_ASSERT_EQUAL(data.GetSize(), data2.GetSize());
- UNIT_ASSERT_EQUAL(data.Tailroom(), 6);
- data.ReserveHeadroom(5);
- UNIT_ASSERT_EQUAL(data.GetData(), data2.GetData());
- UNIT_ASSERT_EQUAL(data.GetSize(), data2.GetSize());
- UNIT_ASSERT_EQUAL(data.Headroom(), 5);
- data.ReserveBidi(5, 6);
- UNIT_ASSERT_EQUAL(data.GetData(), data2.GetData());
- UNIT_ASSERT_EQUAL(data.GetSize(), data2.GetSize());
- UNIT_ASSERT_EQUAL(data.Headroom(), 5);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 6);
- data.ReserveHeadroom(6);
- UNIT_ASSERT_EQUAL(data.Headroom(), 6);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 6);
- UNIT_ASSERT_EQUAL(::memcmp(data.GetData(), "test", 4), 0);
- data.ReserveTailroom(7);
- UNIT_ASSERT_EQUAL(data.Headroom(), 6);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 7);
- UNIT_ASSERT_EQUAL(::memcmp(data.GetData(), "test", 4), 0);
- data.ReserveBidi(7, 8);
- UNIT_ASSERT_EQUAL(data.Headroom(), 7);
- UNIT_ASSERT_EQUAL(data.Tailroom(), 8);
- UNIT_ASSERT_EQUAL(::memcmp(data.GetData(), "test", 4), 0);
- }
-}
diff --git a/library/cpp/actors/util/recentwnd.h b/library/cpp/actors/util/recentwnd.h
deleted file mode 100644
index 0f5ee17fa0..0000000000
--- a/library/cpp/actors/util/recentwnd.h
+++ /dev/null
@@ -1,67 +0,0 @@
-#pragma once
-
-#include <util/generic/deque.h>
-
-template <typename TElem,
- template <typename, typename...> class TContainer = TDeque>
-class TRecentWnd {
-public:
- TRecentWnd(ui32 wndSize)
- : MaxWndSize_(wndSize)
- {
- }
-
- void Push(const TElem& elem) {
- if (Window_.size() == MaxWndSize_)
- Window_.erase(Window_.begin());
- Window_.emplace_back(elem);
- }
-
- void Push(TElem&& elem) {
- if (Window_.size() == MaxWndSize_)
- Window_.erase(Window_.begin());
- Window_.emplace_back(std::move(elem));
- }
-
- TElem& Last() {
- return Window_.back();
- }
- const TElem& Last() const {
- return Window_.back();
- }
- bool Full() const {
- return Window_.size() == MaxWndSize_;
- }
- ui64 Size() const {
- return Window_.size();
- }
-
- using const_iterator = typename TContainer<TElem>::const_iterator;
-
- const_iterator begin() {
- return Window_.begin();
- }
- const_iterator end() {
- return Window_.end();
- }
-
- void Reset(ui32 wndSize = 0) {
- Window_.clear();
- if (wndSize != 0) {
- MaxWndSize_ = wndSize;
- }
- }
-
- void ResetWnd(ui32 wndSize) {
- Y_ABORT_UNLESS(wndSize != 0);
- MaxWndSize_ = wndSize;
- if (Window_.size() > MaxWndSize_) {
- Window_.erase(Window_.begin(),
- Window_.begin() + Window_.size() - MaxWndSize_);
- }
- }
-
-private:
- TContainer<TElem> Window_;
- ui32 MaxWndSize_;
-};
diff --git a/library/cpp/actors/util/rope.cpp b/library/cpp/actors/util/rope.cpp
deleted file mode 100644
index 0927774099..0000000000
--- a/library/cpp/actors/util/rope.cpp
+++ /dev/null
@@ -1,13 +0,0 @@
-#include "rope.h"
-#include <library/cpp/containers/absl_flat_hash/flat_hash_set.h>
-
-size_t TRope::GetOccupiedMemorySize() const {
- size_t res = 0;
- absl::flat_hash_set<const void*> chunks;
- for (const auto& chunk : Chain) {
- if (const auto [it, inserted] = chunks.insert(chunk.Backend.UniqueId()); inserted) {
- res += chunk.Backend.GetOccupiedMemorySize();
- }
- }
- return res;
-}
diff --git a/library/cpp/actors/util/rope.h b/library/cpp/actors/util/rope.h
deleted file mode 100644
index b092d502cd..0000000000
--- a/library/cpp/actors/util/rope.h
+++ /dev/null
@@ -1,1148 +0,0 @@
-#pragma once
-
-#include <util/generic/ptr.h>
-#include <util/generic/string.h>
-#include <util/generic/hash_set.h>
-#include <util/generic/scope.h>
-#include <util/stream/zerocopy.h>
-#include <util/stream/str.h>
-#include <util/system/sanitizers.h>
-#include <util/system/valgrind.h>
-
-// exactly one of them must be included
-#include "rope_cont_embedded_list.h"
-//#include "rope_cont_list.h"
-//#include "rope_cont_deque.h"
-
-#include "rc_buf.h"
-
-class TRopeAlignedBuffer : public IContiguousChunk {
- static constexpr size_t Alignment = 16;
- static constexpr size_t MallocAlignment = sizeof(size_t);
-
- ui32 Size;
- const ui32 Capacity;
- const ui32 Offset;
- alignas(Alignment) char Data[];
-
- TRopeAlignedBuffer(size_t size)
- : Size(size)
- , Capacity(size)
- , Offset((Alignment - reinterpret_cast<uintptr_t>(Data)) & (Alignment - 1))
- {
- Y_ABORT_UNLESS(Offset <= Alignment - MallocAlignment);
- }
-
-public:
- static TIntrusivePtr<TRopeAlignedBuffer> Allocate(size_t size) {
- return new(malloc(sizeof(TRopeAlignedBuffer) + size + Alignment - MallocAlignment)) TRopeAlignedBuffer(size);
- }
-
- void *operator new(size_t) {
- Y_ABORT();
- }
-
- void *operator new(size_t, void *ptr) {
- return ptr;
- }
-
- void operator delete(void *ptr) {
- free(ptr);
- }
-
- void operator delete(void* p, void* ptr) {
- Y_UNUSED(p);
- Y_UNUSED(ptr);
- }
-
- TContiguousSpan GetData() const override {
- return {Data + Offset, Size};
- }
-
- TMutableContiguousSpan GetDataMut() override {
- return {Data + Offset, Size};
- }
-
- size_t GetOccupiedMemorySize() const override {
- return Capacity;
- }
-
- size_t GetCapacity() const {
- return Capacity;
- }
-
- char *GetBuffer() {
- return Data + Offset;
- }
-};
-
-namespace NRopeDetails {
-
- template<bool IsConst, typename TRope, typename TList>
- struct TIteratorTraits;
-
- template<typename TRope, typename TList>
- struct TIteratorTraits<true, TRope, TList> {
- using TRopePtr = const TRope*;
- using TListIterator = typename TList::const_iterator;
- };
-
- template<typename TRope, typename TList>
- struct TIteratorTraits<false, TRope, TList> {
- using TRopePtr = TRope*;
- using TListIterator = typename TList::iterator;
- };
-
-} // NRopeDetails
-
-class TRopeArena;
-
-template<typename T>
-struct always_false : std::false_type {};
-
-class TRope {
- friend class TRopeArena;
-
- using TChunkList = NRopeDetails::TChunkList<TRcBuf>;
-
-private:
- // we use list here to store chain items as we have to keep valid iterators when erase/insert operations are invoked;
- // iterator uses underlying container's iterator, so we have to use container that keeps valid iterators on delete,
- // thus, the list
- TChunkList Chain;
- size_t Size = 0;
-
-private:
- template<bool IsConst>
- class TIteratorImpl {
- using TTraits = NRopeDetails::TIteratorTraits<IsConst, TRope, TChunkList>;
-
- typename TTraits::TRopePtr Rope;
- typename TTraits::TListIterator Iter;
- const char *Ptr; // ptr is always nullptr when iterator is positioned at the rope end
-
-#ifndef NDEBUG
- ui32 ValidityToken;
-#endif
-
- private:
- TIteratorImpl(typename TTraits::TRopePtr rope, typename TTraits::TListIterator iter, const char *ptr = nullptr)
- : Rope(rope)
- , Iter(iter)
- , Ptr(ptr)
-#ifndef NDEBUG
- , ValidityToken(Rope->GetValidityToken())
-#endif
- {}
-
- public:
- TIteratorImpl()
- : Rope(nullptr)
- , Ptr(nullptr)
- {}
-
- template<bool IsOtherConst>
- TIteratorImpl(const TIteratorImpl<IsOtherConst>& other)
- : Rope(other.Rope)
- , Iter(other.Iter)
- , Ptr(other.Ptr)
-#ifndef NDEBUG
- , ValidityToken(other.ValidityToken)
-#endif
- {}
-
- void CheckValid() const {
-#ifndef NDEBUG
- Y_ABORT_UNLESS(ValidityToken == Rope->GetValidityToken());
- Y_ABORT_UNLESS(Iter == Rope->Chain.end() || Iter->Backend);
-#endif
- }
-
- TIteratorImpl& operator +=(size_t amount) {
- CheckValid();
-
- while (amount) {
- Y_DEBUG_ABORT_UNLESS(Valid());
- const size_t max = ContiguousSize();
- const size_t num = std::min(amount, max);
- amount -= num;
- Ptr += num;
- if (Ptr == Iter->End) {
- AdvanceToNextContiguousBlock();
- }
- }
-
- return *this;
- }
-
- TIteratorImpl operator +(size_t amount) const {
- CheckValid();
-
- return TIteratorImpl(*this) += amount;
- }
-
- TIteratorImpl& operator -=(size_t amount) {
- CheckValid();
-
- while (amount) {
- const size_t num = Ptr ? std::min<size_t>(amount, Ptr - Iter->Begin) : 0;
- amount -= num;
- Ptr -= num;
- if (amount) {
- Y_DEBUG_ABORT_UNLESS(Iter != GetChainBegin());
- --Iter;
- Ptr = Iter->End;
- }
- }
-
- return *this;
- }
-
- TIteratorImpl operator -(size_t amount) const {
- CheckValid();
- return TIteratorImpl(*this) -= amount;
- }
-
- std::pair<const char*, size_t> operator *() const {
- return {ContiguousData(), ContiguousSize()};
- }
-
- TIteratorImpl& operator ++() {
- AdvanceToNextContiguousBlock();
- return *this;
- }
-
- TIteratorImpl operator ++(int) const {
- auto it(*this);
- it.AdvanceToNextContiguousBlock();
- return it;
- }
-
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
- // Operation with contiguous data
- ////////////////////////////////////////////////////////////////////////////////////////////////////////////////
-
- // Get the pointer to the contiguous block of data; valid locations are [Data; Data + Size).
- const char *ContiguousData() const {
- CheckValid();
- return Ptr;
- }
-
- template<bool Mut = !IsConst, std::enable_if_t<Mut, bool> = true>
- char *ContiguousDataMut() {
- CheckValid();
- return GetChunk().GetDataMut();
- }
-
- template<bool Mut = !IsConst, std::enable_if_t<Mut, bool> = true>
- char *UnsafeContiguousDataMut() {
- CheckValid();
- return GetChunk().UnsafeGetDataMut();
- }
-
- // Get the amount of contiguous block.
- size_t ContiguousSize() const {
- CheckValid();
- return Ptr ? Iter->End - Ptr : 0;
- }
-
- size_t ChunkOffset() const {
- return Ptr ? Ptr - Iter->Begin : 0;
- }
-
- // Advance to next contiguous block of data.
- void AdvanceToNextContiguousBlock() {
- CheckValid();
- Y_DEBUG_ABORT_UNLESS(Valid());
- ++Iter;
- Ptr = Iter != GetChainEnd() ? Iter->Begin : nullptr;
- }
-
- // Extract some data and advance. Size is not checked here, to it must be provided valid.
- void ExtractPlainDataAndAdvance(void *buffer, size_t len) {
- CheckValid();
-
- while (len) {
- Y_DEBUG_ABORT_UNLESS(Ptr);
-
- // calculate amount of bytes we need to move
- const size_t max = ContiguousSize();
- const size_t num = std::min(len, max);
-
- // copy data to the buffer and advance buffer pointers
- memcpy(buffer, Ptr, num);
- buffer = static_cast<char*>(buffer) + num;
- len -= num;
-
- // advance iterator itself
- Ptr += num;
- if (Ptr == Iter->End) {
- AdvanceToNextContiguousBlock();
- }
- }
- }
-
- // Checks if the iterator points to the end of the rope or not.
- bool Valid() const {
- CheckValid();
- return Ptr;
- }
-
- template<bool IsOtherConst>
- bool operator ==(const TIteratorImpl<IsOtherConst>& other) const {
- Y_DEBUG_ABORT_UNLESS(Rope == other.Rope);
- CheckValid();
- other.CheckValid();
- return Iter == other.Iter && Ptr == other.Ptr;
- }
-
- template<bool IsOtherConst>
- bool operator !=(const TIteratorImpl<IsOtherConst>& other) const {
- CheckValid();
- other.CheckValid();
- return !(*this == other);
- }
-
- private:
- friend class TRope;
-
- typename TTraits::TListIterator operator ->() const {
- CheckValid();
- return Iter;
- }
-
- const TRcBuf& GetChunk() const {
- CheckValid();
- return *Iter;
- }
-
- template<bool Mut = !IsConst, std::enable_if_t<Mut, bool> = true>
- TRcBuf& GetChunk() {
- CheckValid();
- return *Iter;
- }
-
- typename TTraits::TListIterator GetChainBegin() const {
- CheckValid();
- return Rope->Chain.begin();
- }
-
- typename TTraits::TListIterator GetChainEnd() const {
- CheckValid();
- return Rope->Chain.end();
- }
-
- bool PointsToChunkMiddle() const {
- CheckValid();
- return Ptr && Ptr != Iter->Begin;
- }
- };
-
-public:
-#ifndef NDEBUG
- ui32 ValidityToken = 0;
- ui32 GetValidityToken() const { return ValidityToken; }
- void InvalidateIterators() { ++ValidityToken; }
-#else
- void InvalidateIterators() {}
-#endif
-
-public:
- using TConstIterator = TIteratorImpl<true>;
- using TIterator = TIteratorImpl<false>;
-
-public:
- TRope() = default;
- TRope(const TRope& rope) = default;
-
- TRope(const TRcBuf& data) {
- if(!data.HasBuffer()) {
- return;
- }
- Size = data.GetSize();
- Chain.PutToEnd(data);
- }
-
- TRope(TRcBuf&& data) {
- if(!data.HasBuffer()) {
- return;
- }
- Size = data.GetSize();
- Chain.PutToEnd(std::move(data));
- }
-
- TRope(TRope&& rope)
- : Chain(std::move(rope.Chain))
- , Size(std::exchange(rope.Size, 0))
- {
- rope.InvalidateIterators();
- }
-
- explicit TRope(TString s) {
- if (s) {
- Size = s.size();
- if (s.capacity() < 32) {
- s.reserve(32);
- }
- Chain.PutToEnd(std::move(s));
- }
- }
-
- explicit TRope(NActors::TSharedData s) {
- Size = s.size();
- Chain.PutToEnd(std::move(s));
- }
-
- TRope(IContiguousChunk::TPtr item) {
- Size = item->GetData().size();
- Chain.PutToEnd(std::move(item));
- }
-
- TRope(TConstIterator begin, TConstIterator end) {
- Y_DEBUG_ABORT_UNLESS(begin.Rope == end.Rope);
- if (begin.Rope == this) {
- TRope temp(begin, end);
- *this = std::move(temp);
- return;
- }
-
- while (begin.Iter != end.Iter) {
- const size_t size = begin.ContiguousSize();
- Chain.PutToEnd(TRcBuf::Piece, begin.ContiguousData(), size, begin.GetChunk());
- begin.AdvanceToNextContiguousBlock();
- Size += size;
- }
-
- if (begin != end && end.PointsToChunkMiddle()) {
- Chain.PutToEnd(TRcBuf::Piece, begin.Ptr, end.Ptr, begin.GetChunk());
- Size += end.Ptr - begin.Ptr;
- }
- }
-
- ~TRope() {
- }
-
- // creates a copy of rope with chunks with inefficient storage ratio being copied with arena allocator
- static TRope CopySpaceOptimized(TRope&& origin, size_t worstRatioPer1k, TRopeArena& arena);
-
- TRope& operator=(const TRope& other) {
- Chain = other.Chain;
- Size = other.Size;
- return *this;
- }
-
- TRope& operator=(TRope&& other) {
- Chain = std::move(other.Chain);
- Size = std::exchange(other.Size, 0);
- InvalidateIterators();
- other.InvalidateIterators();
- return *this;
- }
-
- size_t GetSize() const {
- return Size;
- }
-
- size_t size() const {
- return Size;
- }
-
- size_t capacity() const {
- return Size;
- }
-
- bool IsEmpty() const {
- return !Size;
- }
-
- bool empty() const {
- return IsEmpty();
- }
-
- operator bool() const {
- return Chain;
- }
-
- TIterator Begin() {
- return *this ? TIterator(this, Chain.begin(), Chain.GetFirstChunk().Begin) : End();
- }
-
- TIterator End() {
- return TIterator(this, Chain.end());
- }
-
- TIterator Iterator(TChunkList::iterator it) {
- return TIterator(this, it, it != Chain.end() ? it->Begin : nullptr);
- }
-
- TIterator Position(size_t index) {
- return Begin() + index;
- }
-
- TConstIterator Begin() const {
- return *this ? TConstIterator(this, Chain.begin(), Chain.GetFirstChunk().Begin) : End();
- }
-
- TConstIterator End() const {
- return TConstIterator(this, Chain.end());
- }
-
- TConstIterator Position(size_t index) const {
- return Begin() + index;
- }
-
- TConstIterator begin() const { return Begin(); }
- TConstIterator end() const { return End(); }
-
- void Erase(TIterator begin, TIterator end) {
- Cut(begin, end, nullptr);
- }
-
- TRope Extract(TIterator begin, TIterator end) {
- TRope res;
- Cut(begin, end, &res);
- return res;
- }
-
- void ExtractFront(size_t num, TRope *dest) {
- Y_ABORT_UNLESS(Size >= num);
- if (num == Size && !*dest) {
- *dest = std::move(*this);
- return;
- }
- Size -= num;
- dest->Size += num;
-
- TChunkList::iterator first = Chain.begin();
-
- if (num >= first->GetSize() && dest->Chain) { // see if we can glue first chunk to the destination rope
- auto& last = dest->Chain.GetLastChunk();
- if (last.Backend == first->Backend && last.End == first->Begin) {
- last.End = first->End;
- num -= first->GetSize();
- first = Chain.Erase(first);
- }
- }
-
- TChunkList::iterator it;
- for (it = first; num && num >= it->GetSize(); ++it) {
- num -= it->GetSize();
- }
- first = dest->Chain.Splice(dest->Chain.end(), Chain, first, it);
-
- if (num) { // still more data to extract
- if (dest->Chain) {
- auto& last = dest->Chain.GetLastChunk();
- if (last.Backend == first->Backend && last.End == first->Begin) {
- first->Begin += num;
- last.End = first->Begin;
- return;
- }
- }
- dest->Chain.PutToEnd(TRcBuf::Piece, first->Begin, first->Begin + num, *first);
- first->Begin += num;
- }
- }
-
- void Insert(TIterator pos, TRope&& rope) {
- Y_DEBUG_ABORT_UNLESS(this == pos.Rope);
- Y_DEBUG_ABORT_UNLESS(this != &rope);
-
- if (!rope) {
- return; // do nothing for empty rope
- }
-
- // adjust size
- Size += std::exchange(rope.Size, 0);
-
- // check if we have to split the block
- if (pos.PointsToChunkMiddle()) {
- pos.Iter = Chain.InsertBefore(pos.Iter, TRcBuf::Piece, pos->Begin, pos.Ptr, pos.GetChunk());
- ++pos.Iter;
- pos->Begin = pos.Ptr;
- }
-
- // perform glueing if possible
- TRcBuf *ropeLeft = &rope.Chain.GetFirstChunk();
- TRcBuf *ropeRight = &rope.Chain.GetLastChunk();
- bool gluedLeft = false, gluedRight = false;
- if (pos.Iter != Chain.begin()) { // glue left part whenever possible
- // obtain iterator to previous chunk
- auto prev(pos.Iter);
- --prev;
- if (prev->End == ropeLeft->Begin && prev->Backend == ropeLeft->Backend) { // it is glueable
- prev->End = ropeLeft->End;
- gluedLeft = true;
- }
- }
- if (pos.Iter != Chain.end() && ropeRight->End == pos->Begin && ropeRight->Backend == pos->Backend) {
- pos->Begin = ropeRight->Begin;
- gluedRight = true;
- }
- if (gluedLeft) {
- rope.Chain.EraseFront();
- }
- if (gluedRight) {
- if (rope) {
- rope.Chain.EraseBack();
- } else { // it looks like double-glueing for the same chunk, we have to drop previous one
- auto prev(pos.Iter);
- --prev;
- pos->Begin = prev->Begin;
- pos.Iter = Chain.Erase(prev);
- }
- }
- if (rope) { // insert remains
- Chain.Splice(pos.Iter, rope.Chain, rope.Chain.begin(), rope.Chain.end());
- }
- Y_DEBUG_ABORT_UNLESS(!rope);
- InvalidateIterators();
- }
-
- void EraseFront(size_t len) {
- Y_DEBUG_ABORT_UNLESS(Size >= len);
- Size -= len;
-
- while (len) {
- Y_DEBUG_ABORT_UNLESS(Chain);
- TRcBuf& item = Chain.GetFirstChunk();
- const size_t itemSize = item.GetSize();
- if (len >= itemSize) {
- Chain.EraseFront();
- len -= itemSize;
- } else {
- item.Begin += len;
- break;
- }
- }
-
- InvalidateIterators();
- }
-
- void EraseBack(size_t len) {
- Y_DEBUG_ABORT_UNLESS(Size >= len);
- Size -= len;
-
- while (len) {
- Y_DEBUG_ABORT_UNLESS(Chain);
- TRcBuf& item = Chain.GetLastChunk();
- const size_t itemSize = item.GetSize();
- if (len >= itemSize) {
- Chain.EraseBack();
- len -= itemSize;
- } else {
- item.End -= len;
- break;
- }
- }
-
- InvalidateIterators();
- }
-
- bool ExtractFrontPlain(void *buffer, size_t len) {
- // check if we have enough data in the rope
- if (Size < len) {
- return false;
- }
- Size -= len;
- while (len) {
- auto& chunk = Chain.GetFirstChunk();
- Y_DEBUG_ABORT_UNLESS(chunk.Backend);
- const size_t num = Min(len, chunk.GetSize());
- memcpy(buffer, chunk.Begin, num);
- buffer = static_cast<char*>(buffer) + num;
- len -= num;
- chunk.Begin += num;
- if (chunk.Begin == chunk.End) {
- Chain.EraseFront();
- }
- }
- InvalidateIterators();
- return true;
- }
-
- bool FetchFrontPlain(char **ptr, size_t *remain) {
- const size_t num = Min(*remain, Size);
- ExtractFrontPlain(*ptr, num);
- *ptr += num;
- *remain -= num;
- return !*remain;
- }
-
- static int Compare(const TRope& x, const TRope& y) {
- TConstIterator xIter = x.Begin(), yIter = y.Begin();
- while (xIter.Valid() && yIter.Valid()) {
- const size_t step = std::min(xIter.ContiguousSize(), yIter.ContiguousSize());
- if (int res = memcmp(xIter.ContiguousData(), yIter.ContiguousData(), step)) {
- return res;
- }
- xIter += step;
- yIter += step;
- }
- return xIter.Valid() - yIter.Valid();
- }
-
- static int Compare(const TRope& x, const TContiguousSpan& y) {
- TConstIterator xIter = x.Begin();
- const char* yData = y.data();
- size_t yOffset = 0;
- while (xIter.Valid() && yOffset != y.size()) {
- const size_t step = std::min(xIter.ContiguousSize(), y.size() - yOffset);
- if (int res = memcmp(xIter.ContiguousData(), yData + yOffset, step)) {
- return res;
- }
- xIter += step;
- yOffset += step;
- }
- return xIter.Valid() - (yOffset != y.size());
- }
-
- static int Compare(const TContiguousSpan& x, const TRope& y) {
- return -Compare(y, x);
- }
-
- // Use this method carefully -- it may significantly reduce performance when misused.
- TString ConvertToString() const {
- return ExtractUnderlyingContainerOrCopy<TString>();
- }
-
- /**
- * WARN: this method supports extracting only for natively supported types for any other type the data *will* be copied
- */
- template <class TResult>
- TResult ExtractUnderlyingContainerOrCopy() const {
- if (Chain.begin() != Chain.end() && ++Chain.begin() == Chain.end()) {
- return Chain.GetFirstChunk().ExtractUnderlyingContainerOrCopy<TResult>();
- }
-
- const size_t size = GetSize();
- TResult res = TResult::Uninitialized(size);
- char* data = NContiguousDataDetails::TContainerTraits<TResult>::UnsafeGetDataMut(res);
- Begin().ExtractPlainDataAndAdvance(data, size);
- return res;
- }
-
- void clear() {
- Erase(Begin(), End());
- }
-
- bool IsContiguous() const {
- if(Begin() == End() || (++Begin() == End())) {
- return true;
- }
- return false;
- }
-
- void Compact(size_t headroom = 0, size_t tailroom = 0) {
- if(!IsContiguous()) {
- // TODO(innokentii): use better container, when most outer users stop use TString
- TRcBuf res = TRcBuf::Uninitialized(GetSize(), headroom, tailroom);
- Begin().ExtractPlainDataAndAdvance(res.UnsafeGetDataMut(), res.size());
- Erase(Begin(), End());
- Insert(End(), TRope(res));
- }
- }
-
- static TRope Uninitialized(size_t size)
- {
- TRcBuf res = TRcBuf::Uninitialized(size);
- return TRope(res);
- }
-
- /**
- * Compacts data and calls GetData() on undelying container
- * WARN: Will copy if data isn't contiguous
- */
- TContiguousSpan GetContiguousSpan() {
- if(Begin() == End()) {
- return {nullptr, 0};
- }
- Compact();
- return Begin()->GetContiguousSpan();
- }
-
- /**
- * Compacts data and calls GetDataMut() on undelying container
- * WARN: Will copy if data isn't contiguous
- */
- TMutableContiguousSpan GetContiguousSpanMut() {
- if(Begin() == End()) {
- return {nullptr, 0};
- }
- Compact();
- return Begin()->GetContiguousSpanMut();
- }
-
- /**
- * Compacts data and calls UnsafeGetDataMut() on undelying container
- * WARN: Will copy if data isn't contiguous
- * WARN: Even if underlying container is shared - returns reference to its underlying data
- */
- TMutableContiguousSpan UnsafeGetContiguousSpanMut() {
- if(Begin() == End()) {
- return {nullptr, 0};
- }
- Compact();
- return Begin()->UnsafeGetContiguousSpanMut();
- }
-
- TString DebugString() const {
- TStringStream s;
- s << "{Size# " << Size;
- for (const auto& chunk : Chain) {
- const char *data;
- data = chunk.Backend.GetData().data();
- s << " [" << chunk.Begin - data << ", " << chunk.End - data << ")@" << chunk.Backend.UniqueId();
- }
- s << "}";
- return s.Str();
- }
-
- explicit operator TRcBuf() {
- if(GetSize() == 0) {
- return TRcBuf();
- }
- Compact();
- return TRcBuf(Begin().GetChunk());
- }
-
- size_t GetOccupiedMemorySize() const;
-
- friend bool operator==(const TRope& x, const TRope& y) { return Compare(x, y) == 0; }
- friend bool operator!=(const TRope& x, const TRope& y) { return Compare(x, y) != 0; }
- friend bool operator< (const TRope& x, const TRope& y) { return Compare(x, y) < 0; }
- friend bool operator<=(const TRope& x, const TRope& y) { return Compare(x, y) <= 0; }
- friend bool operator> (const TRope& x, const TRope& y) { return Compare(x, y) > 0; }
- friend bool operator>=(const TRope& x, const TRope& y) { return Compare(x, y) >= 0; }
-
- friend bool operator==(const TRope& x, const TContiguousSpan& y) { return Compare(x, y) == 0; }
- friend bool operator!=(const TRope& x, const TContiguousSpan& y) { return Compare(x, y) != 0; }
- friend bool operator< (const TRope& x, const TContiguousSpan& y) { return Compare(x, y) < 0; }
- friend bool operator<=(const TRope& x, const TContiguousSpan& y) { return Compare(x, y) <= 0; }
- friend bool operator> (const TRope& x, const TContiguousSpan& y) { return Compare(x, y) > 0; }
- friend bool operator>=(const TRope& x, const TContiguousSpan& y) { return Compare(x, y) >= 0; }
-
- friend bool operator==(const TContiguousSpan& x, const TRope& y) { return Compare(x, y) == 0; }
- friend bool operator!=(const TContiguousSpan& x, const TRope& y) { return Compare(x, y) != 0; }
- friend bool operator< (const TContiguousSpan& x, const TRope& y) { return Compare(x, y) < 0; }
- friend bool operator<=(const TContiguousSpan& x, const TRope& y) { return Compare(x, y) <= 0; }
- friend bool operator> (const TContiguousSpan& x, const TRope& y) { return Compare(x, y) > 0; }
- friend bool operator>=(const TContiguousSpan& x, const TRope& y) { return Compare(x, y) >= 0; }
-
- // FIXME(innokentii) temporary hack
- friend bool operator==(const TRope& x, const TRcBuf& y) { return Compare(x, y.GetContiguousSpan()) == 0; }
- friend bool operator!=(const TRope& x, const TRcBuf& y) { return Compare(x, y.GetContiguousSpan()) != 0; }
- friend bool operator< (const TRope& x, const TRcBuf& y) { return Compare(x, y.GetContiguousSpan()) < 0; }
- friend bool operator<=(const TRope& x, const TRcBuf& y) { return Compare(x, y.GetContiguousSpan()) <= 0; }
- friend bool operator> (const TRope& x, const TRcBuf& y) { return Compare(x, y.GetContiguousSpan()) > 0; }
- friend bool operator>=(const TRope& x, const TRcBuf& y) { return Compare(x, y.GetContiguousSpan()) >= 0; }
-
- friend bool operator==(const TRcBuf& x, const TRope& y) { return Compare(x.GetContiguousSpan(), y) == 0; }
- friend bool operator!=(const TRcBuf& x, const TRope& y) { return Compare(x.GetContiguousSpan(), y) != 0; }
- friend bool operator< (const TRcBuf& x, const TRope& y) { return Compare(x.GetContiguousSpan(), y) < 0; }
- friend bool operator<=(const TRcBuf& x, const TRope& y) { return Compare(x.GetContiguousSpan(), y) <= 0; }
- friend bool operator> (const TRcBuf& x, const TRope& y) { return Compare(x.GetContiguousSpan(), y) > 0; }
- friend bool operator>=(const TRcBuf& x, const TRope& y) { return Compare(x.GetContiguousSpan(), y) >= 0; }
-
-
-private:
- void Cut(TIterator begin, TIterator end, TRope *target) {
- // ensure all iterators are belong to us
- Y_DEBUG_ABORT_UNLESS(this == begin.Rope && this == end.Rope);
-
- // if begin and end are equal, we do nothing -- checking this case allows us to find out that begin does not
- // point to End(), for example
- if (begin == end) {
- return;
- }
-
- auto addBlock = [&](const TRcBuf& from, const char *begin, const char *end) {
- if (target) {
- target->Chain.PutToEnd(TRcBuf::Piece, begin, end, from);
- target->Size += end - begin;
- }
- Size -= end - begin;
- };
-
- // consider special case -- when begin and end point to the same block; in this case we have to split up this
- // block into two parts
- if (begin.Iter == end.Iter) {
- TRcBuf chunkToSplit = begin.GetChunk();
- addBlock(chunkToSplit, begin.Ptr, end.Ptr);
- const char *firstChunkBegin = begin.PointsToChunkMiddle() ? begin->Begin : nullptr;
- begin->Begin = end.Ptr; // this affects both begin and end iterator pointed values
- if (firstChunkBegin) {
- Chain.InsertBefore(begin.Iter, TRcBuf::Piece, firstChunkBegin, begin.Ptr, chunkToSplit);
- }
- } else {
- // check the first iterator -- if it starts not from the begin of the block, we have to adjust end of the
- // first block to match begin iterator and switch to next block
- if (begin.PointsToChunkMiddle()) {
- addBlock(begin.GetChunk(), begin.Ptr, begin->End);
- begin->End = begin.Ptr;
- begin.AdvanceToNextContiguousBlock();
- }
-
- // now drop full blocks
- size_t rangeSize = 0;
- for (auto it = begin.Iter; it != end.Iter; ++it) {
- Y_DEBUG_ABORT_UNLESS(it->GetSize());
- rangeSize += it->GetSize();
- }
- if (rangeSize) {
- if (target) {
- end.Iter = target->Chain.Splice(target->Chain.end(), Chain, begin.Iter, end.Iter);
- target->Size += rangeSize;
- } else {
- end.Iter = Chain.Erase(begin.Iter, end.Iter);
- }
- Size -= rangeSize;
- }
-
- // and cut the last block if necessary
- if (end.PointsToChunkMiddle()) {
- addBlock(end.GetChunk(), end->Begin, end.Ptr);
- end->Begin = end.Ptr;
- }
- }
-
- InvalidateIterators();
- }
-};
-
-class TRopeArena {
- using TAllocateCallback = std::function<TIntrusivePtr<IContiguousChunk>()>;
-
- TAllocateCallback Allocator;
- TRope Arena;
-
-public:
- TRopeArena(TAllocateCallback&& allocator)
- : Allocator(std::move(allocator))
- {}
-
- TRope CreateRope(const void *buffer, size_t len) {
- TRope res;
-
- while (len) {
- if (Arena) {
- auto iter = Arena.Begin();
- Y_DEBUG_ABORT_UNLESS(iter.Valid());
- char *dest = const_cast<char*>(iter.ContiguousData());
- const size_t bytesToCopy = std::min(len, iter.ContiguousSize());
- memcpy(dest, buffer, bytesToCopy);
- buffer = static_cast<const char*>(buffer) + bytesToCopy;
- len -= bytesToCopy;
- res.Insert(res.End(), Arena.Extract(Arena.Begin(), Arena.Position(bytesToCopy)));
- } else {
- Arena.Insert(Arena.End(), TRope(Allocator()));
- }
- }
-
- // align arena on 8-byte boundary
- const size_t align = 8;
- if (const size_t padding = Arena.GetSize() % align) {
- Arena.EraseFront(padding);
- }
-
- return res;
- }
-};
-
-struct TRopeUtils {
- static void Memset(TRope::TConstIterator dst, char c, size_t size) {
- while (size) {
- Y_DEBUG_ABORT_UNLESS(dst.Valid());
- size_t len = std::min(size, dst.ContiguousSize());
- memset(const_cast<char*>(dst.ContiguousData()), c, len);
- dst += len;
- size -= len;
- }
- }
-
- static void Memcpy(TRope::TConstIterator dst, TRope::TConstIterator src, size_t size) {
- while (size) {
- Y_DEBUG_ABORT_UNLESS(dst.Valid() && src.Valid(),
- "Invalid iterator in memcpy: dst.Valid() - %" PRIu32 ", src.Valid() - %" PRIu32,
- (ui32)dst.Valid(), (ui32)src.Valid());
- size_t len = std::min(size, std::min(dst.ContiguousSize(), src.ContiguousSize()));
- memcpy(const_cast<char*>(dst.ContiguousData()), src.ContiguousData(), len);
- dst += len;
- src += len;
- size -= len;
- }
- }
-
- static void Memcpy(TRope::TConstIterator dst, const char* src, size_t size) {
- while (size) {
- Y_DEBUG_ABORT_UNLESS(dst.Valid());
- size_t len = std::min(size, dst.ContiguousSize());
- memcpy(const_cast<char*>(dst.ContiguousData()), src, len);
- size -= len;
- dst += len;
- src += len;
- }
- }
-
- static void Memcpy(char* dst, TRope::TConstIterator src, size_t size) {
- while (size) {
- Y_DEBUG_ABORT_UNLESS(src.Valid());
- size_t len = std::min(size, src.ContiguousSize());
- memcpy(dst, src.ContiguousData(), len);
- size -= len;
- dst += len;
- src += len;
- }
- }
-
- // copy less or equal to sizeBound bytes, until src is valid
- static size_t SafeMemcpy(char* dst, TRope::TIterator src, size_t sizeBound) {
- size_t origSize = sizeBound;
- while (sizeBound && src.Valid()) {
- size_t len = Min(sizeBound, src.ContiguousSize());
- memcpy(dst, src.ContiguousData(), len);
- sizeBound -= len;
- dst += len;
- src += len;
- }
- return origSize - sizeBound;
- }
-};
-
-template<size_t BLOCK, size_t ALIGN = 16>
-class TRopeSlideView {
- alignas(ALIGN) char Slide[BLOCK]; // use if distance from current point and next chunk is less than BLOCK
- TRope::TIterator Position; // current position at rope
- size_t Size;
- char* Head; // points to data, it might be current rope chunk or Slide
-
-private:
- void FillBlock() {
- size_t chunkSize = Position.ContiguousSize();
- if (chunkSize >= BLOCK) {
- Size = chunkSize;
- Head = const_cast<char*>(Position.ContiguousData());
- } else {
- Size = TRopeUtils::SafeMemcpy(Slide, Position, BLOCK);
- Head = Slide;
- }
- }
-
-public:
- TRopeSlideView(TRope::TIterator position)
- : Position(position)
- {
- FillBlock();
- }
-
- TRopeSlideView(TRope &rope)
- : TRopeSlideView(rope.Begin())
- {}
-
- // if view on slide then copy slide to rope
- void FlushBlock() {
- if (Head == Slide) {
- TRopeUtils::Memcpy(Position, Head, Size);
- }
- }
-
- TRope::TIterator operator+=(size_t amount) {
- Position += amount;
- FillBlock();
- return Position;
- }
-
- TRope::TIterator GetPosition() const {
- return Position;
- }
-
- char* GetHead() const {
- return Head;
- }
-
- ui8* GetUi8Head() const {
- return reinterpret_cast<ui8*>(Head);
- }
-
- size_t ContiguousSize() const {
- return Size;
- }
-
- bool IsOnChunk() const {
- return Head != Slide;
- }
-};
-
-class TRopeZeroCopyInput : public IZeroCopyInput {
- TRope::TConstIterator Iter;
- const char* Data = nullptr;
- size_t Len = 0;
-
-private:
- size_t DoNext(const void** ptr, size_t len) override {
- Y_DEBUG_ABORT_UNLESS(ptr);
- if (Len == 0) {
- if (Iter.Valid()) {
- Data = Iter.ContiguousData();
- Len = Iter.ContiguousSize();
- Y_DEBUG_ABORT_UNLESS(Len);
- Y_DEBUG_ABORT_UNLESS(Data);
- ++Iter;
- } else {
- Data = nullptr;
- }
- }
-
- size_t chunk = std::min(Len, len);
- *ptr = Data;
- Data += chunk;
- Len -= chunk;
- return chunk;
- }
-
-public:
- explicit TRopeZeroCopyInput(TRope::TConstIterator iter)
- : Iter(iter)
- {
- }
-};
-
-inline TRope TRope::CopySpaceOptimized(TRope&& origin, size_t worstRatioPer1k, TRopeArena& arena) {
- TRope res;
- for (TRcBuf& chunk : origin.Chain) {
- size_t ratio = chunk.GetSize() * 1024 / chunk.GetOccupiedMemorySize();
- if (ratio < 1024 - worstRatioPer1k) {
- res.Insert(res.End(), arena.CreateRope(chunk.Begin, chunk.GetSize()));
- } else {
- res.Chain.PutToEnd(std::move(chunk));
- }
- }
- res.Size = origin.Size;
- origin = TRope();
- return res;
-}
-
-
-#if defined(WITH_VALGRIND) || defined(_msan_enabled_)
-
-inline void CheckRopeIsDefined(TRope::TConstIterator begin, ui64 size) {
- while (size) {
- ui64 contiguousSize = Min(size, begin.ContiguousSize());
-# if defined(WITH_VALGRIND)
- VALGRIND_CHECK_MEM_IS_DEFINED(begin.ContiguousData(), contiguousSize);
-# endif
-# if defined(_msan_enabled_)
- NSan::CheckMemIsInitialized(begin.ContiguousData(), contiguousSize);
-# endif
- size -= contiguousSize;
- begin += contiguousSize;
- }
-}
-
-# define CHECK_ROPE_IS_DEFINED(begin, size) CheckRopeIsDefined(begin, size)
-
-#else
-
-# define CHECK_ROPE_IS_DEFINED(begin, size) do {} while (false)
-
-#endif
diff --git a/library/cpp/actors/util/rope_cont_embedded_list.h b/library/cpp/actors/util/rope_cont_embedded_list.h
deleted file mode 100644
index 294599538f..0000000000
--- a/library/cpp/actors/util/rope_cont_embedded_list.h
+++ /dev/null
@@ -1,391 +0,0 @@
-#pragma once
-
-#include <util/generic/intrlist.h>
-
-#include <util/random/random.h>
-
-namespace NRopeDetails {
-
-template<typename TChunk>
-class TChunkList {
- struct TItem : TChunk {
- TItem *Next = nullptr;
- TItem *Prev = nullptr;
-#ifndef NDEBUG
- ui64 ValidityToken = RandomNumber<ui64>();
-#endif
-
- template<typename... TArgs> TItem(TArgs&&... args) : TChunk(std::forward<TArgs>(args)...) {}
-
- ~TItem() {
- Invalidate();
- if (IsInUse()) {
- Unlink();
- }
- }
-
- void LinkBefore(TItem *item) {
- Next = item;
- Prev = item->Prev;
- Next->Prev = Prev->Next = this;
- }
-
- void Unlink() {
- Next->Prev = Prev;
- Prev->Next = Next;
- }
-
- bool IsInUse() const {
- return Next != nullptr;
- }
-
- void ClearSingleItem() {
- Y_DEBUG_ABORT_UNLESS(Next == this && Prev == this);
- static_cast<TChunk&>(*this) = {};
- Next = Prev = nullptr;
- }
-
- template<typename... TArgs>
- TItem *PrepareForUse(TArgs&&... args) {
- Y_DEBUG_ABORT_UNLESS(!IsInUse());
- static_cast<TChunk&>(*this) = TChunk(std::forward<TArgs>(args)...);
- Next = Prev = this;
- Invalidate();
- return this;
- }
-
- static void TransferRange(TItem *insertBefore, TItem *first, TItem *last) { // [first, last] -> insertBefore
- first->Prev->Next = last->Next;
- last->Next->Prev = first->Prev;
- first->Prev = insertBefore->Prev;
- last->Next = insertBefore;
- first->Prev->Next = first;
- last->Next->Prev = last;
- }
-
- void Invalidate() {
-#ifndef NDEBUG
- ValidityToken = RandomNumber<ui64>();
-#endif
- }
- };
-
- // There are three possible states for the list:
- // 1. It is empty. Next = Prev = nullptr, TChunk is default-constructed.
- // 2. It contains single item. Next = Prev = &Root, TChunk contains data.
- // 3. It has more than one item. Next and Prev make up a double-linked list starting with Root item; TChunk contains
- // first item.
- // This container scheme leads to the following properties:
- // 1. Deleting first item in the list invalidates iterators to the first two items.
- // 2. Inserting something before the first item also invalidates iterators to the first two items.
- // This happens because Root is always the first element of the list and when inserting before the Root, we have to
- // shift original Root element to the allocated item and replace Root with newly inserted value.
- // This also makes right-to-left traversing more efficient in some cases.
- TItem Root;
-
- template<typename... TArgs>
- TItem *AllocateItem(TArgs&&... args) {
- return Root.IsInUse()
- ? new TItem{std::forward<TArgs>(args)...}
- : Root.PrepareForUse(std::forward<TArgs>(args)...);
- }
-
-private:
- template<bool IsConst>
- class TIterator {
- friend class TChunkList;
-
- using TChunkListType = std::conditional_t<IsConst, const TChunkList, TChunkList>;
- using TItemType = std::conditional_t<IsConst, const TItem, TItem>;
- using TChunkType = std::conditional_t<IsConst, const TChunk, TChunk>;
-
- TChunkListType *Cont = nullptr;
- TItemType *Item = nullptr;
-#ifndef NDEBUG
- ui64 ValidityToken = 0;
-#endif
-
- private:
- TIterator(TChunkListType *cont, TItemType *item)
- : Cont(cont)
- , Item(item)
- {
- UpdateValidityToken();
- }
-
- public:
- TIterator() = default;
-
- template<bool OtherIsConst, typename = std::enable_if_t<OtherIsConst <= IsConst>>
- TIterator(const TIterator<OtherIsConst>& other)
- : Cont(other.Cont)
- , Item(other.Item)
- {
- UpdateValidityToken();
- }
-
- TChunkType& operator *() const {
- CheckValid();
- return *Item;
- }
-
- TChunkType *operator ->() const {
- CheckValid();
- return Item;
- }
-
- TIterator& operator++() {
- CheckValid();
- Y_DEBUG_ABORT_UNLESS(Item);
- Item = Item->Next;
- if (Item == &Cont->Root) {
- Item = nullptr; // make it end
- }
- UpdateValidityToken();
- return *this;
- }
-
- TIterator operator ++(int) {
- TIterator res(*this);
- ++*this;
- return res;
- }
-
- TIterator& operator--() {
- CheckValid();
- if (!Item) {
- Y_DEBUG_ABORT_UNLESS(*Cont);
- Item = Cont->Root.Prev;
- } else {
- Y_DEBUG_ABORT_UNLESS(Item != &Cont->Root);
- Item = Item->Prev;
- }
- UpdateValidityToken();
- return *this;
- }
-
- TIterator operator --(int) {
- TIterator res(*this);
- --*this;
- return res;
- }
-
- friend bool operator ==(const TIterator& x, const TIterator& y) {
- Y_DEBUG_ABORT_UNLESS(x.Cont == y.Cont);
- x.CheckValid();
- y.CheckValid();
- return x.Item == y.Item;
- }
-
- friend bool operator !=(const TIterator& x, const TIterator& y) {
- return !(x == y);
- }
-
- private:
- void CheckValid() const {
-#ifndef NDEBUG
- Y_DEBUG_ABORT_UNLESS(ValidityToken == (Item ? Item->ValidityToken : 0));
- Y_DEBUG_ABORT_UNLESS(Cont && (Item != &Cont->Root || *Cont));
-#endif
- }
-
- void UpdateValidityToken() {
-#ifndef NDEBUG
- ValidityToken = Item ? Item->ValidityToken : 0;
-#endif
- CheckValid();
- }
- };
-
-public:
- using iterator = TIterator<false>;
- using const_iterator = TIterator<true>;
-
-public:
- TChunkList()
- {}
-
- ~TChunkList() {
- Erase(begin(), end());
- Y_DEBUG_ABORT_UNLESS(!*this);
- }
-
- TChunkList(const TChunkList& other) {
- *this = other;
- }
-
- TChunkList(TChunkList&& other) {
- *this = std::move(other);
- }
-
- TChunkList& operator=(const TChunkList& other) {
- if (this != &other) {
- Erase(begin(), end());
- for (const TChunk& chunk : other) {
- PutToEnd(TChunk(chunk));
- }
- }
- return *this;
- }
-
- TChunkList& operator=(TChunkList&& other) {
- if (this != &other) {
- Erase(begin(), end());
- Y_DEBUG_ABORT_UNLESS(!*this);
- if (other.Root.IsInUse()) { // do we have something to move?
- Root.PrepareForUse(std::move(static_cast<TChunk&>(other.Root)));
- if (other.Root.Next != &other.Root) { // does other contain more than one item?
- TItem::TransferRange(&Root, other.Root.Next, other.Root.Prev);
- }
- other.Root.ClearSingleItem();
- }
- }
- return *this;
- }
-
- template<typename... TArgs>
- void PutToEnd(TArgs&&... args) {
- InsertBefore(end(), std::forward<TArgs>(args)...);
- }
-
- template<typename... TArgs>
- iterator InsertBefore(iterator pos, TArgs&&... args) {
- TItem *item = AllocateItem<TArgs...>(std::forward<TArgs>(args)...);
- if (item == &Root) {
- // this is the first item, we don't do anything about it
- } else if (pos.Item != &Root) {
- item->LinkBefore(pos.Item ? pos.Item : &Root);
- } else {
- item->LinkBefore(Root.Next);
- std::swap(static_cast<TChunk&>(*item), static_cast<TChunk&>(Root));
- item = &Root;
- Root.Invalidate();
- }
- return {this, item};
- }
-
- iterator Erase(iterator pos) {
- Pop(pos);
- return pos;
- }
-
- iterator Erase(iterator first, iterator last) {
- if (first == last) {
- return last;
- }
- for (;;) {
- if (last == begin()) {
- EraseFront();
- return begin();
- } else if (--last == first) {
- return Erase(last);
- } else {
- last = Erase(last);
- }
- }
- }
-
- void EraseFront() {
- PopFront();
- }
-
- void EraseBack() {
- Y_DEBUG_ABORT_UNLESS(*this);
- if (Root.Prev != &Root) {
- delete Root.Prev;
- } else {
- EraseFront();
- }
- }
-
- // Splice moves elements from the 'from' list in the range [first, last) to *this, inserting them before 'pos'. It
- // returns iterator of the next remaining item in the 'from' list.
- iterator Splice(iterator pos, TChunkList& from, iterator first, iterator last) {
- if (first == last) { // the source range is empty
- return last;
- }
-
- const bool fromBegin = first == from.begin();
- if (fromBegin) { // remember we have to transfer the first item before returning
- ++first;
- }
-
- // 'first' here either equals to 'last' or points to the middle of the 'from' list
-
- const bool toBegin = pos == begin();
- if (toBegin && first != last) {
- // we are inserting item to the begin of the list, so move the first item of the range; it is important here
- // that 'last' iterator doesn't get invalidated
- pos = InsertBefore(begin(), from.Pop(first));
- ++pos;
- }
-
- const auto temp = last;
- if (first != last) {
- --last; // set 'last' pointing to the actual last element of the source range
-
- Y_DEBUG_ABORT_UNLESS(first.Item != &from.Root);
- Y_DEBUG_ABORT_UNLESS(pos.Item != &Root);
-
- TItem* const firstItem = first.Item;
- TItem* const lastItem = last.Item;
- TItem* const posItem = pos.Item ? pos.Item : &Root;
-
- TItem::TransferRange(posItem, firstItem, lastItem);
-
- // adjust 'pos' to point to the first inserted item
- pos = {this, firstItem};
- }
-
- if (fromBegin) {
- InsertBefore(toBegin ? begin() : pos, from.PopFront());
- return from.begin();
- } else {
- return temp;
- }
- }
-
- operator bool() const { return Root.IsInUse(); }
-
- TChunk& GetFirstChunk() { Y_DEBUG_ABORT_UNLESS(*this); return Root; }
- const TChunk& GetFirstChunk() const { Y_DEBUG_ABORT_UNLESS(*this); return Root; }
- TChunk& GetLastChunk() { Y_DEBUG_ABORT_UNLESS(*this); return *Root.Prev; }
-
- iterator begin() { return *this ? iterator(this, &Root) : end(); }
- const_iterator begin() const { return *this ? const_iterator(this, &Root) : end(); }
- iterator end() { return {this, nullptr}; }
- const_iterator end() const { return {this, nullptr}; }
-
-private:
- TChunk Pop(iterator& pos) {
- pos.CheckValid();
- Y_DEBUG_ABORT_UNLESS(pos.Item);
-
- if (pos.Item == &Root) {
- TChunk res = PopFront();
- pos = begin();
- return res;
- } else {
- Y_DEBUG_ABORT_UNLESS(pos != end());
- TItem* const item = pos++.Item;
- TChunk res = std::move(static_cast<TChunk&>(*item));
- delete item;
- return res;
- }
- }
-
- TChunk PopFront() {
- Y_DEBUG_ABORT_UNLESS(*this);
- TChunk res = std::move(static_cast<TChunk&>(Root));
- if (Root.Next != &Root) {
- static_cast<TChunk&>(Root) = std::move(static_cast<TChunk&>(*Root.Next));
- delete Root.Next;
- Root.Invalidate();
- } else {
- Root.ClearSingleItem();
- }
- return res;
- }
-};
-
-} // NRopeDetails
diff --git a/library/cpp/actors/util/rope_ut.cpp b/library/cpp/actors/util/rope_ut.cpp
deleted file mode 100644
index 0ff85d6c59..0000000000
--- a/library/cpp/actors/util/rope_ut.cpp
+++ /dev/null
@@ -1,418 +0,0 @@
-#include "rope.h"
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/random.h>
-#include "ut_helpers.h"
-
-class TRopeStringBackend : public IContiguousChunk {
- TString Buffer;
-
-public:
- TRopeStringBackend(TString buffer)
- : Buffer(std::move(buffer))
- {}
-
- TContiguousSpan GetData() const override {
- return {Buffer.data(), Buffer.size()};
- }
-
- TMutableContiguousSpan GetDataMut() override {
- return {Buffer.Detach(), Buffer.size()};
- }
-
- TMutableContiguousSpan UnsafeGetDataMut() override {
- return {const_cast<char*>(Buffer.data()), Buffer.size()};
- }
-
- size_t GetOccupiedMemorySize() const override {
- return Buffer.capacity();
- }
-};
-
-TRope CreateRope(TString s, size_t sliceSize) {
- TRope res;
- for (size_t i = 0; i < s.size(); ) {
- size_t len = std::min(sliceSize, s.size() - i);
- if (i % 2) {
- res.Insert(res.End(), TRope(MakeIntrusive<TRopeStringBackend>(s.substr(i, len))));
- } else {
- res.Insert(res.End(), TRope(s.substr(i, len)));
- }
- i += len;
- }
- return res;
-}
-
-TString RopeToString(const TRope& rope) {
- TString res;
- auto iter = rope.Begin();
- while (iter != rope.End()) {
- res.append(iter.ContiguousData(), iter.ContiguousSize());
- iter.AdvanceToNextContiguousBlock();
- }
-
- UNIT_ASSERT_VALUES_EQUAL(rope.GetSize(), res.size());
-
- TString temp = TString::Uninitialized(rope.GetSize());
- rope.Begin().ExtractPlainDataAndAdvance(temp.Detach(), temp.size());
- UNIT_ASSERT_VALUES_EQUAL(temp, res);
-
- return res;
-}
-
-TString Text = "No elements are copied or moved, only the internal pointers of the list nodes are re-pointed.";
-
-Y_UNIT_TEST_SUITE(TRope) {
- Y_UNIT_TEST(StringCompare) {
- TRope rope = CreateRope(Text, 10);
- UNIT_ASSERT_EQUAL(rope, Text);
- UNIT_ASSERT_EQUAL(Text, rope);
- rope.Erase(rope.Begin() + 10, rope.Begin() + 11);
- UNIT_ASSERT_UNEQUAL(rope, Text);
- UNIT_ASSERT_UNEQUAL(Text, rope);
- TString str("aa");
- rope = TRope(TString("ab"));
- UNIT_ASSERT_LT(str, rope);
- UNIT_ASSERT_GT(rope, str);
- str = TString("aa");
- rope = TRope(TString("a"));
- UNIT_ASSERT_LT(rope, str);
- UNIT_ASSERT_GT(str, rope);
- str = TString("a");
- rope = TRope(TString("aa"));
- UNIT_ASSERT_LT(str, rope);
- UNIT_ASSERT_GT(rope, str);
- }
-
- Y_UNIT_TEST(Leak) {
- const size_t begin = 10, end = 20;
- TRope rope = CreateRope(Text, 10);
- rope.Erase(rope.Begin() + begin, rope.Begin() + end);
- }
-
- Y_UNIT_TEST(Compacted) {
- TRope rope = CreateRope(Text, 10);
- UNIT_ASSERT_EQUAL(rope.UnsafeGetContiguousSpanMut(), Text);
- UNIT_ASSERT(rope.IsContiguous());
- }
-
-#ifndef TSTRING_IS_STD_STRING
- Y_UNIT_TEST(ExtractZeroCopy) {
- TString str = Text;
- TRope packed(str);
- TString extracted = packed.ExtractUnderlyingContainerOrCopy<TString>();
- UNIT_ASSERT_EQUAL(str.data(), extracted.data());
- }
-
- Y_UNIT_TEST(ExtractZeroCopySlice) {
- TString str = Text;
- TRope sliced(str);
- sliced.EraseFront(1);
- TString extracted = sliced.ExtractUnderlyingContainerOrCopy<TString>();
- UNIT_ASSERT_UNEQUAL(str.data(), extracted.data());
- TRope sliced2(str);
- sliced2.EraseBack(1);
- TString extracted2 = sliced2.ExtractUnderlyingContainerOrCopy<TString>();
- UNIT_ASSERT_UNEQUAL(str.data(), extracted2.data());
- }
-
- Y_UNIT_TEST(TStringDetach) {
- TRope pf;
- TRope rope;
- TString string = TString(Text.data(), Text.size());
- rope = TRope(string);
- pf = rope;
- pf.GetContiguousSpanMut();
- UNIT_ASSERT(!string.IsDetached());
- rope.GetContiguousSpanMut();
- UNIT_ASSERT(string.IsDetached());
- }
-
- Y_UNIT_TEST(TStringUnsafeShared) {
- TRope pf;
- TRope rope;
- TString string = TString(Text.data(), Text.size());
- rope = TRope(string);
- pf = rope;
- UNIT_ASSERT(pf.IsContiguous());
- UNIT_ASSERT_EQUAL(pf.UnsafeGetContiguousSpanMut().data(), string.data());
- UNIT_ASSERT(!string.IsDetached());
- }
-
- Y_UNIT_TEST(ContiguousDataInterop) {
- TString string = "Some long-long text needed for not sharing data and testing";
- TRcBuf data(string);
- UNIT_ASSERT_EQUAL(data.UnsafeGetDataMut(), &(*string.cbegin()));
- TRope rope(data); // check operator TRope
- UNIT_ASSERT_EQUAL(rope.UnsafeGetContiguousSpanMut().data(), &(*string.cbegin()));
- TRcBuf otherData(rope);
- UNIT_ASSERT_EQUAL(otherData.UnsafeGetDataMut(), &(*string.cbegin()));
- TString extractedBack = otherData.ExtractUnderlyingContainerOrCopy<TString>();
- UNIT_ASSERT_EQUAL(extractedBack.data(), &(*string.cbegin()));
- }
-#endif
- Y_UNIT_TEST(CrossCompare) {
- TString str = "some very long string";
- const TString constStr(str);
- TStringBuf strbuf = str;
- const TStringBuf constStrbuf = str;
- TContiguousSpan span(str);
- const TContiguousSpan constSpan(str);
- TMutableContiguousSpan mutableSpan(const_cast<char*>(str.data()), str.size());
- const TMutableContiguousSpan constMutableSpan(const_cast<char*>(str.data()), str.size());
- TRcBuf data(str);
- const TRcBuf constData(str);
- TArrayRef<char> arrRef(const_cast<char*>(str.data()), str.size());
- const TArrayRef<char> constArrRef(const_cast<char*>(str.data()), str.size());
- TArrayRef<const char> arrConstRef(const_cast<char*>(str.data()), str.size());
- const TArrayRef<const char> constArrConstRef(const_cast<char*>(str.data()), str.size());
- NActors::TSharedData sharedData = NActors::TSharedData::Copy(str.data(), str.size());
- const NActors::TSharedData constSharedData(sharedData);
- TRope rope(str);
- const TRope constRope(str);
-
- Permutate(
- [](auto& arg1, auto& arg2) {
- UNIT_ASSERT(arg1 == arg2);
- },
- str,
- constStr,
- strbuf,
- constStrbuf,
- span,
- constSpan,
- mutableSpan,
- constMutableSpan,
- data,
- constData,
- arrRef,
- constArrRef,
- arrConstRef,
- constArrConstRef,
- sharedData,
- constSharedData,
- rope,
- constRope);
- }
-
- Y_UNIT_TEST(BasicRange) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope::TIterator rBegin = rope.Begin() + begin;
- TRope::TIterator rEnd = rope.Begin() + end;
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(TRope(rBegin, rEnd)), Text.substr(begin, end - begin));
- }
- }
- }
-
- Y_UNIT_TEST(Erase) {
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope rope = CreateRope(Text, 10);
- rope.Erase(rope.Begin() + begin, rope.Begin() + end);
- TString text = Text;
- text.erase(text.begin() + begin, text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), text);
- }
- }
- }
-
- Y_UNIT_TEST(Insert) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope part = TRope(rope.Begin() + begin, rope.Begin() + end);
- for (size_t where = 0; where <= Text.size(); ++where) {
- TRope x(rope);
- x.Insert(x.Begin() + where, TRope(part));
- UNIT_ASSERT_VALUES_EQUAL(x.GetSize(), rope.GetSize() + part.GetSize());
- TString text = Text;
- text.insert(text.begin() + where, Text.begin() + begin, Text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(x), text);
- }
- }
- }
- }
-
- Y_UNIT_TEST(Extract) {
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope rope = CreateRope(Text, 10);
- TRope part = rope.Extract(rope.Begin() + begin, rope.Begin() + end);
- TString text = Text;
- text.erase(text.begin() + begin, text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), text);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(part), Text.substr(begin, end - begin));
- }
- }
- }
-
- Y_UNIT_TEST(EraseFront) {
- for (size_t pos = 0; pos <= Text.size(); ++pos) {
- TRope rope = CreateRope(Text, 10);
- rope.EraseFront(pos);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text.substr(pos));
- }
- }
-
- Y_UNIT_TEST(EraseBack) {
- for (size_t pos = 0; pos <= Text.size(); ++pos) {
- TRope rope = CreateRope(Text, 10);
- rope.EraseBack(pos);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text.substr(0, Text.size() - pos));
- }
- }
-
- Y_UNIT_TEST(ExtractFront) {
- for (size_t step = 1; step <= Text.size(); ++step) {
- TRope rope = CreateRope(Text, 10);
- TRope out;
- while (const size_t len = Min(step, rope.GetSize())) {
- rope.ExtractFront(len, &out);
- UNIT_ASSERT(rope.GetSize() + out.GetSize() == Text.size());
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(out), Text.substr(0, out.GetSize()));
- }
- }
- }
-
- Y_UNIT_TEST(ExtractFrontPlain) {
- for (size_t step = 1; step <= Text.size(); ++step) {
- TRope rope = CreateRope(Text, 10);
- TString buffer = Text;
- size_t remain = rope.GetSize();
- while (const size_t len = Min(step, remain)) {
- TString data = TString::Uninitialized(len);
- rope.ExtractFrontPlain(data.Detach(), data.size());
- UNIT_ASSERT_VALUES_EQUAL(data, buffer.substr(0, len));
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), buffer.substr(len));
- buffer = buffer.substr(len);
- remain -= len;
- }
- }
- }
-
- Y_UNIT_TEST(FetchFrontPlain) {
- char s[10];
- char *data = s;
- size_t remain = sizeof(s);
- TRope rope = TRope(TString("HELLO"));
- UNIT_ASSERT(!rope.FetchFrontPlain(&data, &remain));
- UNIT_ASSERT(!rope);
- rope.Insert(rope.End(), TRope(TString("WORLD!!!")));
- UNIT_ASSERT(rope.FetchFrontPlain(&data, &remain));
- UNIT_ASSERT(!remain);
- UNIT_ASSERT(rope.GetSize() == 3);
- UNIT_ASSERT_VALUES_EQUAL(rope.ConvertToString(), "!!!");
- UNIT_ASSERT(!strncmp(s, "HELLOWORLD", 10));
- }
-
- Y_UNIT_TEST(Glueing) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin <= Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TString repr = rope.DebugString();
- TRope temp = rope.Extract(rope.Position(begin), rope.Position(end));
- rope.Insert(rope.Position(begin), std::move(temp));
- UNIT_ASSERT_VALUES_EQUAL(repr, rope.DebugString());
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text);
- }
- }
- }
-
- Y_UNIT_TEST(IterWalk) {
- TRope rope = CreateRope(Text, 10);
- for (size_t step1 = 0; step1 <= rope.GetSize(); ++step1) {
- for (size_t step2 = 0; step2 <= step1; ++step2) {
- TRope::TConstIterator iter = rope.Begin();
- iter += step1;
- iter -= step2;
- UNIT_ASSERT(iter == rope.Position(step1 - step2));
- }
- }
- }
-
- Y_UNIT_TEST(Compare) {
- auto check = [](const TString& x, const TString& y) {
- const TRope xRope = CreateRope(x, 7);
- const TRope yRope = CreateRope(y, 11);
- UNIT_ASSERT_VALUES_EQUAL(xRope == yRope, x == y);
- UNIT_ASSERT_VALUES_EQUAL(xRope == y, x == y);
- UNIT_ASSERT_VALUES_EQUAL(x == yRope, x == y);
- UNIT_ASSERT_VALUES_EQUAL(xRope != yRope, x != y);
- UNIT_ASSERT_VALUES_EQUAL(xRope != y, x != y);
- UNIT_ASSERT_VALUES_EQUAL(x != yRope, x != y);
- UNIT_ASSERT_VALUES_EQUAL(xRope < yRope, x < y);
- UNIT_ASSERT_VALUES_EQUAL(xRope < y, x < y);
- UNIT_ASSERT_VALUES_EQUAL(x < yRope, x < y);
- UNIT_ASSERT_VALUES_EQUAL(xRope <= yRope, x <= y);
- UNIT_ASSERT_VALUES_EQUAL(xRope <= y, x <= y);
- UNIT_ASSERT_VALUES_EQUAL(x <= yRope, x <= y);
- UNIT_ASSERT_VALUES_EQUAL(xRope > yRope, x > y);
- UNIT_ASSERT_VALUES_EQUAL(xRope > y, x > y);
- UNIT_ASSERT_VALUES_EQUAL(x > yRope, x > y);
- UNIT_ASSERT_VALUES_EQUAL(xRope >= yRope, x >= y);
- UNIT_ASSERT_VALUES_EQUAL(xRope >= y, x >= y);
- UNIT_ASSERT_VALUES_EQUAL(x >= yRope, x >= y);
- };
-
- TVector<TString> pool;
- for (size_t k = 0; k < 10; ++k) {
- size_t len = RandomNumber<size_t>(100) + 100;
- TString s = TString::Uninitialized(len);
- char *p = s.Detach();
- for (size_t j = 0; j < len; ++j) {
- *p++ = RandomNumber<unsigned char>();
- }
- pool.push_back(std::move(s));
- }
-
- for (const TString& x : pool) {
- for (const TString& y : pool) {
- check(x, y);
- }
- }
- }
-
- Y_UNIT_TEST(RopeZeroCopyInputBasic) {
- TRope rope = CreateRope(Text, 3);
- TRopeZeroCopyInput input(rope.Begin());
-
- TString result;
- TStringOutput output(result);
- TransferData(&input, &output);
- UNIT_ASSERT_EQUAL(result, Text);
- }
-
- Y_UNIT_TEST(RopeZeroCopyInput) {
- TRope rope;
- rope.Insert(rope.End(), TRope{"abc"});
- rope.Insert(rope.End(), TRope{TString{}});
- rope.Insert(rope.End(), TRope{"de"});
- rope.Insert(rope.End(), TRope{TString{}});
- rope.Insert(rope.End(), TRope{TString{}});
- rope.Insert(rope.End(), TRope{"fghi"});
-
- TRopeZeroCopyInput input(rope.Begin());
-
- const char* data = nullptr;
- size_t len;
-
- len = input.Next(&data, 2);
- UNIT_ASSERT_EQUAL("ab", TStringBuf(data, len));
-
- len = input.Next(&data, 3);
- UNIT_ASSERT_EQUAL("c", TStringBuf(data, len));
-
- len = input.Next(&data, 3);
- UNIT_ASSERT_EQUAL("de", TStringBuf(data, len));
-
- len = input.Next(&data);
- UNIT_ASSERT_EQUAL("fghi", TStringBuf(data, len));
-
- len = input.Next(&data);
- UNIT_ASSERT_EQUAL(len, 0);
-
- len = input.Next(&data);
- UNIT_ASSERT_EQUAL(len, 0);
- }
-}
diff --git a/library/cpp/actors/util/shared_data.cpp b/library/cpp/actors/util/shared_data.cpp
deleted file mode 100644
index 51311ce7a3..0000000000
--- a/library/cpp/actors/util/shared_data.cpp
+++ /dev/null
@@ -1,49 +0,0 @@
-#include "shared_data.h"
-
-#include "memory_tracker.h"
-
-#include <util/system/sys_alloc.h>
-#include <util/system/sanitizers.h>
-
-namespace NActors {
-
- static constexpr char MemoryLabelSharedData[] = "Tablet/TSharedData/Buffers";
-
- char* TSharedData::Allocate(size_t size) {
- char* data = nullptr;
- if (size > 0) {
- if (size >= MaxDataSize) {
- throw std::length_error("Allocate size overflow");
- }
- auto allocSize = OverheadSize + size;
- char* raw = reinterpret_cast<char*>(y_allocate(allocSize));
-
- auto* privateHeader = reinterpret_cast<TPrivateHeader*>(raw);
- privateHeader->AllocSize = allocSize;
- NActors::NMemory::TLabel<MemoryLabelSharedData>::Add(allocSize);
-
- auto* header = reinterpret_cast<THeader*>(raw + PrivateHeaderSize);
- header->RefCount = 1;
- header->Owner = nullptr;
-
- data = raw + OverheadSize;
- NSan::Poison(data, size);
- }
- return data;
- }
-
- void TSharedData::Deallocate(char* data) noexcept {
- if (data) {
- char* raw = data - OverheadSize;
-
- auto* privateHeader = reinterpret_cast<TPrivateHeader*>(raw);
- NActors::NMemory::TLabel<MemoryLabelSharedData>::Sub(privateHeader->AllocSize);
-
- auto* header = reinterpret_cast<THeader*>(raw + PrivateHeaderSize);
- Y_DEBUG_ABORT_UNLESS(header->Owner == nullptr);
-
- y_deallocate(raw);
- }
- }
-
-}
diff --git a/library/cpp/actors/util/shared_data.h b/library/cpp/actors/util/shared_data.h
deleted file mode 100644
index bd9afb00a5..0000000000
--- a/library/cpp/actors/util/shared_data.h
+++ /dev/null
@@ -1,227 +0,0 @@
-#pragma once
-
-#include <library/cpp/deprecated/atomic/atomic.h>
-
-#include <util/system/types.h>
-#include <util/system/compiler.h>
-#include <util/generic/array_ref.h>
-
-namespace NActors {
-
- class TSharedData {
- public:
- class IOwner {
- public:
- virtual ~IOwner() = default;
-
- virtual void Deallocate(char*) noexcept = 0;
- };
-
- struct TPrivateHeader {
- size_t AllocSize;
- size_t Pad;
- };
-
- static_assert(sizeof(TPrivateHeader) == 16, "TPrivateHeader has an unexpected size");
-
- struct THeader {
- TAtomic RefCount;
- IOwner* Owner;
- };
-
- static_assert(sizeof(THeader) == 16, "THeader has an unexpected size");
-
- enum : size_t {
- PrivateHeaderSize = sizeof(TPrivateHeader),
- HeaderSize = sizeof(THeader),
- OverheadSize = PrivateHeaderSize + HeaderSize,
- MaxDataSize = (std::numeric_limits<size_t>::max() - OverheadSize)
- };
-
- public:
- TSharedData() noexcept
- : Data_(nullptr)
- , Size_(0)
- { }
-
- ~TSharedData() noexcept {
- Release();
- }
-
- TSharedData(const TSharedData& other) noexcept
- : Data_(other.Data_)
- , Size_(other.Size_)
- {
- AddRef();
- }
-
- TSharedData(TSharedData&& other) noexcept
- : Data_(other.Data_)
- , Size_(other.Size_)
- {
- other.Data_ = nullptr;
- other.Size_ = 0;
- }
-
- TSharedData& operator=(const TSharedData& other) noexcept {
- if (this != &other) {
- Release();
- Data_ = other.Data_;
- Size_ = other.Size_;
- AddRef();
- }
- return *this;
- }
-
- TSharedData& operator=(TSharedData&& other) noexcept {
- if (this != &other) {
- Release();
- Data_ = other.Data_;
- Size_ = other.Size_;
- other.Data_ = nullptr;
- other.Size_ = 0;
- }
- return *this;
- }
-
- Y_FORCE_INLINE explicit operator bool() const { return Size_ > 0; }
-
- Y_FORCE_INLINE char* mutable_data() { Y_DEBUG_ABORT_UNLESS(IsPrivate()); return Data_; }
- Y_FORCE_INLINE char* mutable_begin() { Y_DEBUG_ABORT_UNLESS(IsPrivate()); return Data_; }
- Y_FORCE_INLINE char* mutable_end() { Y_DEBUG_ABORT_UNLESS(IsPrivate()); return Data_ + Size_; }
-
- Y_FORCE_INLINE const char* data() const { return Data_; }
- Y_FORCE_INLINE const char* begin() const { return Data_; }
- Y_FORCE_INLINE const char* end() const { return Data_ + Size_; }
-
- Y_FORCE_INLINE size_t size() const { return Size_; }
-
- /**
- * Trims data to the specified size
- * Underlying data is not reallocated
- * Returns trimmed amount in bytes
- */
- size_t TrimBack(size_t size) noexcept {
- size_t trimmed = 0;
- if (Size_ > size) {
- trimmed = Size_ - size;
- if (!size) {
- Release();
- Data_ = nullptr;
- }
- Size_ = size;
- }
- return trimmed;
- }
-
- /**
- * Copies data to new allocated buffer if data is shared
- * New container loses original owner
- * Returns pointer to mutable buffer
- */
- char* Detach() {
- if (IsShared()) {
- *this = TSharedData::Copy(data(), size());
- }
- return Data_;
- }
-
- /**
- * Returns a view of underlying data starting with pos and up to len bytes
- */
- TStringBuf Slice(size_t pos = 0, size_t len = -1) const noexcept {
- pos = Min(pos, Size_);
- len = Min(len, Size_ - pos);
- return { Data_ + pos, len };
- }
-
- explicit operator TStringBuf() const noexcept {
- return Slice();
- }
-
- bool IsPrivate() const {
- return Data_ ? IsPrivate(Header()) : true;
- }
-
- bool IsShared() const {
- return !IsPrivate();
- }
-
- TString ToString() const {
- return TString(data(), size());
- }
-
- /**
- * Attach to pre-allocated data with a preceding THeader
- */
- static TSharedData AttachUnsafe(char* data, size_t size) noexcept {
- TSharedData result;
- result.Data_ = data;
- result.Size_ = size;
- return result;
- }
-
- /**
- * Make uninitialized buffer of the specified size
- */
- static TSharedData Uninitialized(size_t size) {
- return AttachUnsafe(Allocate(size), size);
- }
-
- /**
- * Make a copy of the specified data
- */
- static TSharedData Copy(const void* data, size_t size) {
- TSharedData result = Uninitialized(size);
- if (size) {
- ::memcpy(result.Data_, data, size);
- }
- return result;
- }
-
- /**
- * Make a copy of the specified data
- */
- static TSharedData Copy(TArrayRef<const char> data) {
- return Copy(data.data(), data.size());
- }
-
- private:
- Y_FORCE_INLINE THeader* Header() const noexcept {
- Y_DEBUG_ABORT_UNLESS(Data_);
- return reinterpret_cast<THeader*>(Data_ - sizeof(THeader));
- }
-
- static bool IsPrivate(THeader* header) noexcept {
- return 1 == AtomicGet(header->RefCount);
- }
-
- void AddRef() noexcept {
- if (Data_) {
- AtomicIncrement(Header()->RefCount);
- }
- }
-
- void Release() noexcept {
- if (Data_) {
- auto* header = Header();
- if (IsPrivate(header) || 0 == AtomicDecrement(header->RefCount)) {
- if (auto* owner = header->Owner) {
- owner->Deallocate(Data_);
- } else {
- Deallocate(Data_);
- }
- }
- }
- }
-
- private:
- static char* Allocate(size_t size);
- static void Deallocate(char* data) noexcept;
-
- private:
- char* Data_;
- size_t Size_;
- };
-
-}
diff --git a/library/cpp/actors/util/shared_data_backtracing_owner.h b/library/cpp/actors/util/shared_data_backtracing_owner.h
deleted file mode 100644
index ea479d5fd1..0000000000
--- a/library/cpp/actors/util/shared_data_backtracing_owner.h
+++ /dev/null
@@ -1,88 +0,0 @@
-#pragma once
-
-#include <util/system/sys_alloc.h>
-#include <util/system/backtrace.h>
-
-#include "shared_data.h"
-
-class TBackTracingOwner : public NActors::TSharedData::IOwner {
- using THeader = NActors::TSharedData::THeader;
- using TSelf = TBackTracingOwner;
- using IOwner = NActors::TSharedData::IOwner;
-
- static constexpr size_t PrivateHeaderSize = NActors::TSharedData::PrivateHeaderSize;
- static constexpr size_t HeaderSize = NActors::TSharedData::HeaderSize;
- static constexpr size_t OverheadSize = NActors::TSharedData::OverheadSize;
-
- IOwner* RealOwner = nullptr;
- TBackTrace BackTrace;
- const char* Info;
-public:
-
- static constexpr const char* INFO_FROM_SHARED_DATA = "FROM_SHARED_DATA";
- static constexpr const char* INFO_COPIED_STRING = "COPIED_STRING";
- static constexpr const char* INFO_ALLOC_UNINITIALIZED = "ALLOC_UNINITIALIZED";
- static constexpr const char* INFO_ALLOC_UNINIT_ROOMS = "ALLOC_UNINIT_ROOMS";
-
- static char* Allocate(size_t size, const char* info = nullptr) {
- char* raw = reinterpret_cast<char*>(y_allocate(OverheadSize + size));
- THeader* header = reinterpret_cast<THeader*>(raw + PrivateHeaderSize);
- TSelf* btOwner = new TSelf;
- btOwner->BackTrace.Capture();
- btOwner->Info = info;
- header->RefCount = 1;
- header->Owner = btOwner;
- char* data = raw + OverheadSize;
- return data;
- }
-
- static void FakeOwner(const NActors::TSharedData& data, const char* info = nullptr) {
- THeader* header = Header(data);
- if (header) {
- TSelf* btOwner = new TSelf();
- btOwner->BackTrace.Capture();
- btOwner->Info = info;
- if (header->Owner) {
- btOwner->RealOwner = header->Owner;
- }
- header->Owner = btOwner;
- }
- }
-
- static void UnsafePrintBackTrace(NActors::TSharedData& data) {
- THeader* header = Header(data);
- if(header->Owner) {
- TSelf* owner = static_cast<TSelf*>(header->Owner);
- owner->PrintBackTrace();
- }
- }
-
- void Deallocate(char* data) noexcept override {
- if (!RealOwner) {
- char* raw = data - OverheadSize;
- y_deallocate(raw);
- } else {
- RealOwner->Deallocate(data);
- }
-
- delete this;
- }
-
- IOwner* GetRealOwner() const {
- return RealOwner;
- }
-
- void PrintBackTrace() {
- Cerr << "Deallocate TSharedData with info# " << Info << Endl;
- BackTrace.PrintTo(Cerr);
- }
-private:
- static Y_FORCE_INLINE THeader* Header(const NActors::TSharedData& d) noexcept {
- char* data = const_cast<char*>(d.data());
- if (data) {
- return reinterpret_cast<THeader*>(data - HeaderSize);
- } else {
- return nullptr;
- }
- }
-};
diff --git a/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp b/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
deleted file mode 100644
index 4939403454..0000000000
--- a/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
+++ /dev/null
@@ -1,231 +0,0 @@
-#include <library/cpp/actors/util/rope.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/random.h>
-
-#include "shared_data_rope_backend.h"
-
-namespace NActors {
-
- namespace {
-
- TRope CreateRope(TString s, size_t sliceSize) {
- TRope res;
- for (size_t i = 0; i < s.size(); ) {
- size_t len = std::min(sliceSize, s.size() - i);
- if (i % 2) {
- auto str = s.substr(i, len);
- res.Insert(res.End(), TRope(
- TSharedData::Copy(str.data(), str.size())));
- } else {
- res.Insert(res.End(), TRope(s.substr(i, len)));
- }
- i += len;
- }
- return res;
- }
-
- TString RopeToString(const TRope& rope) {
- TString res;
- auto iter = rope.Begin();
- while (iter != rope.End()) {
- res.append(iter.ContiguousData(), iter.ContiguousSize());
- iter.AdvanceToNextContiguousBlock();
- }
-
- UNIT_ASSERT_VALUES_EQUAL(rope.GetSize(), res.size());
-
- TString temp = TString::Uninitialized(rope.GetSize());
- rope.Begin().ExtractPlainDataAndAdvance(temp.Detach(), temp.size());
- UNIT_ASSERT_VALUES_EQUAL(temp, res);
-
- return res;
- }
-
- TString Text = "No elements are copied or moved, only the internal pointers of the list nodes are re-pointed.";
-
- }
-
- Y_UNIT_TEST_SUITE(TRopeSharedDataNativeBackend) {
-
- // Same tests as in TRope but with new CreateRope using TSharedData backend
-
- Y_UNIT_TEST(Leak) {
- const size_t begin = 10, end = 20;
- TRope rope = CreateRope(Text, 10);
- rope.Erase(rope.Begin() + begin, rope.Begin() + end);
- }
-
- Y_UNIT_TEST(BasicRange) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope::TIterator rBegin = rope.Begin() + begin;
- TRope::TIterator rEnd = rope.Begin() + end;
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(TRope(rBegin, rEnd)), Text.substr(begin, end - begin));
- }
- }
- }
-
- Y_UNIT_TEST(Erase) {
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope rope = CreateRope(Text, 10);
- rope.Erase(rope.Begin() + begin, rope.Begin() + end);
- TString text = Text;
- text.erase(text.begin() + begin, text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), text);
- }
- }
- }
-
- Y_UNIT_TEST(Insert) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope part = TRope(rope.Begin() + begin, rope.Begin() + end);
- for (size_t where = 0; where <= Text.size(); ++where) {
- TRope x(rope);
- x.Insert(x.Begin() + where, TRope(part));
- UNIT_ASSERT_VALUES_EQUAL(x.GetSize(), rope.GetSize() + part.GetSize());
- TString text = Text;
- text.insert(text.begin() + where, Text.begin() + begin, Text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(x), text);
- }
- }
- }
- }
-
- Y_UNIT_TEST(Extract) {
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope rope = CreateRope(Text, 10);
- TRope part = rope.Extract(rope.Begin() + begin, rope.Begin() + end);
- TString text = Text;
- text.erase(text.begin() + begin, text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), text);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(part), Text.substr(begin, end - begin));
- }
- }
- }
-
- Y_UNIT_TEST(EraseFront) {
- for (size_t pos = 0; pos <= Text.size(); ++pos) {
- TRope rope = CreateRope(Text, 10);
- rope.EraseFront(pos);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text.substr(pos));
- }
- }
-
- Y_UNIT_TEST(EraseBack) {
- for (size_t pos = 0; pos <= Text.size(); ++pos) {
- TRope rope = CreateRope(Text, 10);
- rope.EraseBack(pos);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text.substr(0, Text.size() - pos));
- }
- }
-
- Y_UNIT_TEST(ExtractFront) {
- for (size_t step = 1; step <= Text.size(); ++step) {
- TRope rope = CreateRope(Text, 10);
- TRope out;
- while (const size_t len = Min(step, rope.GetSize())) {
- rope.ExtractFront(len, &out);
- UNIT_ASSERT(rope.GetSize() + out.GetSize() == Text.size());
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(out), Text.substr(0, out.GetSize()));
- }
- }
- }
-
- Y_UNIT_TEST(ExtractFrontPlain) {
- for (size_t step = 1; step <= Text.size(); ++step) {
- TRope rope = CreateRope(Text, 10);
- TString buffer = Text;
- auto it = rope.Begin();
- size_t remain = rope.GetSize();
- while (const size_t len = Min(step, remain)) {
- TString data = TString::Uninitialized(len);
- it.ExtractPlainDataAndAdvance(data.Detach(), data.size());
- UNIT_ASSERT_VALUES_EQUAL(data, buffer.substr(0, len));
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(TRope(it, rope.End())), buffer.substr(len));
- buffer = buffer.substr(len);
- remain -= len;
- }
- }
- }
-
- Y_UNIT_TEST(Glueing) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin <= Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TString repr = rope.DebugString();
- TRope temp = rope.Extract(rope.Position(begin), rope.Position(end));
- rope.Insert(rope.Position(begin), std::move(temp));
- UNIT_ASSERT_VALUES_EQUAL(repr, rope.DebugString());
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text);
- }
- }
- }
-
- Y_UNIT_TEST(IterWalk) {
- TRope rope = CreateRope(Text, 10);
- for (size_t step1 = 0; step1 <= rope.GetSize(); ++step1) {
- for (size_t step2 = 0; step2 <= step1; ++step2) {
- TRope::TConstIterator iter = rope.Begin();
- iter += step1;
- iter -= step2;
- UNIT_ASSERT(iter == rope.Position(step1 - step2));
- }
- }
- }
-
- Y_UNIT_TEST(Compare) {
- auto check = [](const TString& x, const TString& y) {
- const TRope xRope = CreateRope(x, 7);
- const TRope yRope = CreateRope(y, 11);
- UNIT_ASSERT_VALUES_EQUAL(xRope == yRope, x == y);
- UNIT_ASSERT_VALUES_EQUAL(xRope != yRope, x != y);
- UNIT_ASSERT_VALUES_EQUAL(xRope < yRope, x < y);
- UNIT_ASSERT_VALUES_EQUAL(xRope <= yRope, x <= y);
- UNIT_ASSERT_VALUES_EQUAL(xRope > yRope, x > y);
- UNIT_ASSERT_VALUES_EQUAL(xRope >= yRope, x >= y);
- };
-
- TVector<TString> pool;
- for (size_t k = 0; k < 10; ++k) {
- size_t len = RandomNumber<size_t>(100) + 100;
- TString s = TString::Uninitialized(len);
- char *p = s.Detach();
- for (size_t j = 0; j < len; ++j) {
- *p++ = RandomNumber<unsigned char>();
- }
- pool.push_back(std::move(s));
- }
-
- for (const TString& x : pool) {
- for (const TString& y : pool) {
- check(x, y);
- }
- }
- }
-
- // Specific TSharedDataRopeBackend tests
-
- Y_UNIT_TEST(RopeOnlyBorrows) {
- TSharedData data = TSharedData::Copy(Text.data(), Text.size());
- {
- TRope rope;
- rope.Insert(rope.End(), TRope(data));
- UNIT_ASSERT(data.IsShared());
- TSharedData dataCopy = data;
- UNIT_ASSERT(dataCopy.IsShared());
- UNIT_ASSERT_EQUAL(dataCopy.data(), data.data());
- rope.Insert(rope.End(), TRope(data));
- rope.Insert(rope.End(), TRope(data));
- dataCopy.TrimBack(10);
- UNIT_ASSERT_EQUAL(rope.GetSize(), data.size() * 3);
- }
- UNIT_ASSERT(data.IsPrivate());
- }
- }
-
-} // namespace NActors
diff --git a/library/cpp/actors/util/shared_data_rope_backend.h b/library/cpp/actors/util/shared_data_rope_backend.h
deleted file mode 100644
index a221ae668b..0000000000
--- a/library/cpp/actors/util/shared_data_rope_backend.h
+++ /dev/null
@@ -1,41 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/util/rc_buf.h>
-
-#include "shared_data.h"
-
-namespace NActors {
-
-class TRopeSharedDataBackend : public IContiguousChunk {
- TSharedData Buffer;
-
-public:
- TRopeSharedDataBackend(TSharedData buffer)
- : Buffer(std::move(buffer))
- {}
-
- TContiguousSpan GetData() const override {
- return {Buffer.data(), Buffer.size()};
- }
-
- TMutableContiguousSpan GetDataMut() override {
- if(Buffer.IsShared()) {
- Buffer = TSharedData::Copy(Buffer.data(), Buffer.size());
- }
- return {Buffer.mutable_data(), Buffer.size()};
- }
-
- TMutableContiguousSpan UnsafeGetDataMut() override {
- return {const_cast<char *>(Buffer.data()), Buffer.size()};
- }
-
- bool IsPrivate() const override {
- return Buffer.IsPrivate();
- }
-
- size_t GetOccupiedMemorySize() const override {
- return Buffer.size();
- }
-};
-
-} // namespace NActors
diff --git a/library/cpp/actors/util/shared_data_rope_backend_ut.cpp b/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
deleted file mode 100644
index b2b4e04634..0000000000
--- a/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
+++ /dev/null
@@ -1,231 +0,0 @@
-#include <library/cpp/actors/util/rope.h>
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/random.h>
-
-#include "shared_data_rope_backend.h"
-
-namespace NActors {
-
- namespace {
-
- TRope CreateRope(TString s, size_t sliceSize) {
- TRope res;
- for (size_t i = 0; i < s.size(); ) {
- size_t len = std::min(sliceSize, s.size() - i);
- if (i % 2) {
- auto str = s.substr(i, len);
- res.Insert(res.End(), TRope(MakeIntrusive<TRopeSharedDataBackend>(
- TSharedData::Copy(str.data(), str.size()))));
- } else {
- res.Insert(res.End(), TRope(s.substr(i, len)));
- }
- i += len;
- }
- return res;
- }
-
- TString RopeToString(const TRope& rope) {
- TString res;
- auto iter = rope.Begin();
- while (iter != rope.End()) {
- res.append(iter.ContiguousData(), iter.ContiguousSize());
- iter.AdvanceToNextContiguousBlock();
- }
-
- UNIT_ASSERT_VALUES_EQUAL(rope.GetSize(), res.size());
-
- TString temp = TString::Uninitialized(rope.GetSize());
- rope.Begin().ExtractPlainDataAndAdvance(temp.Detach(), temp.size());
- UNIT_ASSERT_VALUES_EQUAL(temp, res);
-
- return res;
- }
-
- TString Text = "No elements are copied or moved, only the internal pointers of the list nodes are re-pointed.";
-
- }
-
- Y_UNIT_TEST_SUITE(TRopeSharedDataBackend) {
-
- // Same tests as in TRope but with new CreateRope using TSharedData backend
-
- Y_UNIT_TEST(Leak) {
- const size_t begin = 10, end = 20;
- TRope rope = CreateRope(Text, 10);
- rope.Erase(rope.Begin() + begin, rope.Begin() + end);
- }
-
- Y_UNIT_TEST(BasicRange) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope::TIterator rBegin = rope.Begin() + begin;
- TRope::TIterator rEnd = rope.Begin() + end;
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(TRope(rBegin, rEnd)), Text.substr(begin, end - begin));
- }
- }
- }
-
- Y_UNIT_TEST(Erase) {
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope rope = CreateRope(Text, 10);
- rope.Erase(rope.Begin() + begin, rope.Begin() + end);
- TString text = Text;
- text.erase(text.begin() + begin, text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), text);
- }
- }
- }
-
- Y_UNIT_TEST(Insert) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope part = TRope(rope.Begin() + begin, rope.Begin() + end);
- for (size_t where = 0; where <= Text.size(); ++where) {
- TRope x(rope);
- x.Insert(x.Begin() + where, TRope(part));
- UNIT_ASSERT_VALUES_EQUAL(x.GetSize(), rope.GetSize() + part.GetSize());
- TString text = Text;
- text.insert(text.begin() + where, Text.begin() + begin, Text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(x), text);
- }
- }
- }
- }
-
- Y_UNIT_TEST(Extract) {
- for (size_t begin = 0; begin < Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TRope rope = CreateRope(Text, 10);
- TRope part = rope.Extract(rope.Begin() + begin, rope.Begin() + end);
- TString text = Text;
- text.erase(text.begin() + begin, text.begin() + end);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), text);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(part), Text.substr(begin, end - begin));
- }
- }
- }
-
- Y_UNIT_TEST(EraseFront) {
- for (size_t pos = 0; pos <= Text.size(); ++pos) {
- TRope rope = CreateRope(Text, 10);
- rope.EraseFront(pos);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text.substr(pos));
- }
- }
-
- Y_UNIT_TEST(EraseBack) {
- for (size_t pos = 0; pos <= Text.size(); ++pos) {
- TRope rope = CreateRope(Text, 10);
- rope.EraseBack(pos);
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text.substr(0, Text.size() - pos));
- }
- }
-
- Y_UNIT_TEST(ExtractFront) {
- for (size_t step = 1; step <= Text.size(); ++step) {
- TRope rope = CreateRope(Text, 10);
- TRope out;
- while (const size_t len = Min(step, rope.GetSize())) {
- rope.ExtractFront(len, &out);
- UNIT_ASSERT(rope.GetSize() + out.GetSize() == Text.size());
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(out), Text.substr(0, out.GetSize()));
- }
- }
- }
-
- Y_UNIT_TEST(ExtractFrontPlain) {
- for (size_t step = 1; step <= Text.size(); ++step) {
- TRope rope = CreateRope(Text, 10);
- TString buffer = Text;
- auto it = rope.Begin();
- size_t remain = rope.GetSize();
- while (const size_t len = Min(step, remain)) {
- TString data = TString::Uninitialized(len);
- it.ExtractPlainDataAndAdvance(data.Detach(), data.size());
- UNIT_ASSERT_VALUES_EQUAL(data, buffer.substr(0, len));
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(TRope(it, rope.End())), buffer.substr(len));
- buffer = buffer.substr(len);
- remain -= len;
- }
- }
- }
-
- Y_UNIT_TEST(Glueing) {
- TRope rope = CreateRope(Text, 10);
- for (size_t begin = 0; begin <= Text.size(); ++begin) {
- for (size_t end = begin; end <= Text.size(); ++end) {
- TString repr = rope.DebugString();
- TRope temp = rope.Extract(rope.Position(begin), rope.Position(end));
- rope.Insert(rope.Position(begin), std::move(temp));
- UNIT_ASSERT_VALUES_EQUAL(repr, rope.DebugString());
- UNIT_ASSERT_VALUES_EQUAL(RopeToString(rope), Text);
- }
- }
- }
-
- Y_UNIT_TEST(IterWalk) {
- TRope rope = CreateRope(Text, 10);
- for (size_t step1 = 0; step1 <= rope.GetSize(); ++step1) {
- for (size_t step2 = 0; step2 <= step1; ++step2) {
- TRope::TConstIterator iter = rope.Begin();
- iter += step1;
- iter -= step2;
- UNIT_ASSERT(iter == rope.Position(step1 - step2));
- }
- }
- }
-
- Y_UNIT_TEST(Compare) {
- auto check = [](const TString& x, const TString& y) {
- const TRope xRope = CreateRope(x, 7);
- const TRope yRope = CreateRope(y, 11);
- UNIT_ASSERT_VALUES_EQUAL(xRope == yRope, x == y);
- UNIT_ASSERT_VALUES_EQUAL(xRope != yRope, x != y);
- UNIT_ASSERT_VALUES_EQUAL(xRope < yRope, x < y);
- UNIT_ASSERT_VALUES_EQUAL(xRope <= yRope, x <= y);
- UNIT_ASSERT_VALUES_EQUAL(xRope > yRope, x > y);
- UNIT_ASSERT_VALUES_EQUAL(xRope >= yRope, x >= y);
- };
-
- TVector<TString> pool;
- for (size_t k = 0; k < 10; ++k) {
- size_t len = RandomNumber<size_t>(100) + 100;
- TString s = TString::Uninitialized(len);
- char *p = s.Detach();
- for (size_t j = 0; j < len; ++j) {
- *p++ = RandomNumber<unsigned char>();
- }
- pool.push_back(std::move(s));
- }
-
- for (const TString& x : pool) {
- for (const TString& y : pool) {
- check(x, y);
- }
- }
- }
-
- // Specific TSharedDataRopeBackend tests
-
- Y_UNIT_TEST(RopeOnlyBorrows) {
- TSharedData data = TSharedData::Copy(Text.data(), Text.size());
- {
- TRope rope;
- rope.Insert(rope.End(), TRope(MakeIntrusive<TRopeSharedDataBackend>(data)));
- UNIT_ASSERT(data.IsShared());
- TSharedData dataCopy = data;
- UNIT_ASSERT(dataCopy.IsShared());
- UNIT_ASSERT_EQUAL(dataCopy.data(), data.data());
- rope.Insert(rope.End(), TRope(MakeIntrusive<TRopeSharedDataBackend>(data)));
- rope.Insert(rope.End(), TRope(MakeIntrusive<TRopeSharedDataBackend>(data)));
- dataCopy.TrimBack(10);
- UNIT_ASSERT_EQUAL(rope.GetSize(), data.size() * 3);
- }
- UNIT_ASSERT(data.IsPrivate());
- }
- }
-
-} // namespace NActors
diff --git a/library/cpp/actors/util/shared_data_ut.cpp b/library/cpp/actors/util/shared_data_ut.cpp
deleted file mode 100644
index 2f7dc2ccc8..0000000000
--- a/library/cpp/actors/util/shared_data_ut.cpp
+++ /dev/null
@@ -1,205 +0,0 @@
-#include "shared_data.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/generic/hash.h>
-#include <util/generic/deque.h>
-#include <util/system/sys_alloc.h>
-
-namespace NActors {
-
- Y_UNIT_TEST_SUITE(TSharedDataTest) {
-
- Y_UNIT_TEST(BasicBehavior) {
- auto data = TSharedData::Copy("Hello", 5);
- UNIT_ASSERT(data.IsPrivate());
- UNIT_ASSERT(!data.IsShared());
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 5u);
- UNIT_ASSERT_VALUES_EQUAL(data.end() - data.begin(), 5u);
- UNIT_ASSERT_VALUES_EQUAL(data.mutable_end() - data.mutable_begin(), 5u);
- UNIT_ASSERT(data.begin() == data.data());
- UNIT_ASSERT(data.mutable_data() == data.data());
- UNIT_ASSERT(data.mutable_begin() == data.mutable_data());
-
- UNIT_ASSERT_VALUES_EQUAL(data.ToString(), TString("Hello"));
- UNIT_ASSERT_VALUES_EQUAL(::memcmp(data.data(), "Hello", 5), 0);
-
- auto link = data;
- UNIT_ASSERT(!link.IsPrivate());
- UNIT_ASSERT(!data.IsPrivate());
- UNIT_ASSERT(link.IsShared());
- UNIT_ASSERT(data.IsShared());
- UNIT_ASSERT(link.data() == data.data());
- UNIT_ASSERT(link.size() == data.size());
-
- link = { };
- UNIT_ASSERT(link.IsPrivate());
- UNIT_ASSERT(data.IsPrivate());
- UNIT_ASSERT(!link.IsShared());
- UNIT_ASSERT(!data.IsShared());
-
- UNIT_ASSERT_VALUES_EQUAL(TString(TStringBuf(data)), TString("Hello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice()), TString("Hello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(1)), TString("ello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(1, 3)), TString("ell"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(1, 100)), TString("ello"));
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice(0, 4)), TString("Hell"));
-
- link = data;
- UNIT_ASSERT(link.data() == data.data());
- UNIT_ASSERT_VALUES_UNEQUAL(link.Detach(), data.data());
- UNIT_ASSERT_EQUAL(data.size(), link.size());
- UNIT_ASSERT_VALUES_EQUAL(TString(data.Slice()), TString(link.Slice()));
- }
-
- Y_UNIT_TEST(TrimBehavior) {
- auto data = TSharedData::Uninitialized(42);
-
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 42u);
- UNIT_ASSERT(data.data() != nullptr);
-
- // Trim to non-zero does not change addresses
- const char* ptr1 = data.data();
- data.TrimBack(31);
- const char* ptr2 = data.data();
-
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 31u);
- UNIT_ASSERT(ptr1 == ptr2);
-
- // Trim to zero releases underlying data
- data.TrimBack(0);
-
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 0u);
- UNIT_ASSERT(data.data() == nullptr);
- }
-
- class TCustomOwner : public TSharedData::IOwner {
- using THeader = TSharedData::THeader;
-
- public:
- TSharedData Allocate(size_t size) {
- char* raw = reinterpret_cast<char*>(y_allocate(sizeof(THeader) + size));
- THeader* header = reinterpret_cast<THeader*>(raw);
- header->RefCount = 1;
- header->Owner = this;
- char* data = raw + sizeof(THeader);
- Y_ABORT_UNLESS(Allocated_.insert(data).second);
- return TSharedData::AttachUnsafe(data, size);
- }
-
- void Deallocate(char* data) noexcept {
- Y_ABORT_UNLESS(Allocated_.erase(data) > 0);
- char* raw = data - sizeof(THeader);
- y_deallocate(raw);
- Deallocated_.push_back(data);
- }
-
- char* NextDeallocated() {
- char* result = nullptr;
- if (Deallocated_) {
- result = Deallocated_.front();
- Deallocated_.pop_front();
- }
- return result;
- }
-
- private:
- THashSet<void*> Allocated_;
- TDeque<char*> Deallocated_;
- };
-
- Y_UNIT_TEST(CustomOwner) {
- TCustomOwner owner;
- const char* ptr;
-
- // Test destructor releases data
- {
- auto data = owner.Allocate(42);
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 42u);
- ptr = data.data();
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- }
-
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
-
- // Test assignment releases data
- {
- auto data = owner.Allocate(42);
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 42u);
- ptr = data.data();
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- data = { };
- }
-
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
-
- // Test copies keep references correctly
- {
- auto data = owner.Allocate(42);
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 42u);
- ptr = data.data();
- auto copy = data;
- UNIT_ASSERT_VALUES_EQUAL(copy.size(), 42u);
- UNIT_ASSERT(copy.data() == ptr);
- data = { };
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 0u);
- UNIT_ASSERT(data.data() == nullptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- }
-
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
-
- // Test assignment releases correct data
- {
- auto data1 = owner.Allocate(42);
- UNIT_ASSERT_VALUES_EQUAL(data1.size(), 42u);
- auto data2 = owner.Allocate(31);
- UNIT_ASSERT_VALUES_EQUAL(data2.size(), 31u);
- ptr = data1.data();
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- data1 = data2;
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- ptr = data2.data();
- UNIT_ASSERT_VALUES_EQUAL(data1.size(), 31u);
- UNIT_ASSERT(data1.data() == ptr);
- }
-
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
-
- // Test moves don't produce dangling references
- {
- auto data = owner.Allocate(42);
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 42u);
- ptr = data.data();
- auto moved = std::move(data);
- UNIT_ASSERT_VALUES_EQUAL(moved.size(), 42u);
- UNIT_ASSERT(moved.data() == ptr);
- UNIT_ASSERT_VALUES_EQUAL(data.size(), 0u);
- UNIT_ASSERT(data.data() == nullptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- }
-
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
-
- // Test Detach copies correctly and doesn't affect owned data
- {
- auto data = owner.Allocate(42);
- auto disowned = data;
- disowned.Detach();
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
- }
-
- UNIT_ASSERT(owner.NextDeallocated() == ptr);
- UNIT_ASSERT(owner.NextDeallocated() == nullptr);
-
- }
-
- }
-
-}
diff --git a/library/cpp/actors/util/should_continue.cpp b/library/cpp/actors/util/should_continue.cpp
deleted file mode 100644
index 258e6a0aff..0000000000
--- a/library/cpp/actors/util/should_continue.cpp
+++ /dev/null
@@ -1,23 +0,0 @@
-#include "should_continue.h"
-
-void TProgramShouldContinue::ShouldRestart() {
- AtomicSet(State, Restart);
-}
-
-void TProgramShouldContinue::ShouldStop(int returnCode) {
- AtomicSet(ReturnCode, returnCode);
- AtomicSet(State, Stop);
-}
-
-TProgramShouldContinue::EState TProgramShouldContinue::PollState() {
- return static_cast<EState>(AtomicGet(State));
-}
-
-int TProgramShouldContinue::GetReturnCode() {
- return static_cast<int>(AtomicGet(ReturnCode));
-}
-
-void TProgramShouldContinue::Reset() {
- AtomicSet(ReturnCode, 0);
- AtomicSet(State, Continue);
-}
diff --git a/library/cpp/actors/util/should_continue.h b/library/cpp/actors/util/should_continue.h
deleted file mode 100644
index 76acc40dc4..0000000000
--- a/library/cpp/actors/util/should_continue.h
+++ /dev/null
@@ -1,22 +0,0 @@
-#pragma once
-#include "defs.h"
-
-class TProgramShouldContinue {
-public:
- enum EState {
- Continue,
- Stop,
- Restart,
- };
-
- void ShouldRestart();
- void ShouldStop(int returnCode = 0);
-
- EState PollState();
- int GetReturnCode();
-
- void Reset();
-private:
- TAtomic ReturnCode = 0;
- TAtomic State = Continue;
-};
diff --git a/library/cpp/actors/util/thread.h b/library/cpp/actors/util/thread.h
deleted file mode 100644
index d742c8c585..0000000000
--- a/library/cpp/actors/util/thread.h
+++ /dev/null
@@ -1,26 +0,0 @@
-#pragma once
-
-#include <util/generic/strbuf.h>
-#include <util/stream/str.h>
-#include <util/system/execpath.h>
-#include <util/system/thread.h>
-#include <util/system/thread.h>
-#include <time.h>
-
-inline void SetCurrentThreadName(const TString& name,
- const ui32 maxCharsFromProcessName = 8) {
-#if defined(_linux_)
- // linux limits threadname by 15 + \0
-
- TStringBuf procName(GetExecPath());
- procName = procName.RNextTok('/');
- procName = procName.SubStr(0, maxCharsFromProcessName);
-
- TStringStream linuxName;
- linuxName << procName << "." << name;
- TThread::SetCurrentThreadName(linuxName.Str().data());
-#else
- Y_UNUSED(maxCharsFromProcessName);
- TThread::SetCurrentThreadName(name.data());
-#endif
-}
diff --git a/library/cpp/actors/util/thread_load_log.h b/library/cpp/actors/util/thread_load_log.h
deleted file mode 100644
index 132e99a52d..0000000000
--- a/library/cpp/actors/util/thread_load_log.h
+++ /dev/null
@@ -1,363 +0,0 @@
-#pragma once
-
-#include "defs.h"
-
-#include <util/system/types.h>
-
-#include <type_traits>
-#include <algorithm>
-#include <atomic>
-#include <limits>
-#include <queue>
-
-template <ui64 TIME_SLOT_COUNT, ui64 TIME_SLOT_LENGTH_NS = 131'072, typename Type = std::uint8_t>
-class TThreadLoad {
-public:
- using TimeSlotType = Type;
-
-private:
- static constexpr auto TIME_SLOT_MAX_VALUE = std::numeric_limits<TimeSlotType>::max();
- static constexpr ui64 TIME_SLOT_PART_COUNT = TIME_SLOT_MAX_VALUE + 1;
- static constexpr auto TIME_SLOT_PART_LENGTH_NS = TIME_SLOT_LENGTH_NS / TIME_SLOT_PART_COUNT;
-
- template <typename T>
- static void AtomicAddBound(std::atomic<T>& val, i64 inc) {
- if (inc == 0) {
- return;
- }
-
- auto newVal = val.load();
- auto oldVal = newVal;
-
- do {
- static constexpr auto MAX_VALUE = std::numeric_limits<T>::max();
-
- if (oldVal >= MAX_VALUE) {
- return;
- }
- newVal = std::min<i64>(MAX_VALUE, static_cast<i64>(oldVal) + inc);
- } while (!val.compare_exchange_weak(oldVal, newVal));
- }
-
- template <typename T>
- static void AtomicSubBound(std::atomic<T>& val, i64 sub) {
- if (sub == 0) {
- return;
- }
-
- auto newVal = val.load();
- auto oldVal = newVal;
-
- do {
- if (oldVal == 0) {
- return;
- }
- newVal = std::max<i64>(0, static_cast<i64>(oldVal) - sub);
- } while (!val.compare_exchange_weak(oldVal, newVal));
- }
-
- void UpdateCompleteTimeSlots(ui64 firstSlotNumber, ui64 lastSlotNumber, TimeSlotType timeSlotValue) {
- ui32 firstSlotIndex = firstSlotNumber % TIME_SLOT_COUNT;
- ui32 lastSlotIndex = lastSlotNumber % TIME_SLOT_COUNT;
-
- const ui64 firstTimeSlotsPass = firstSlotNumber / TIME_SLOT_COUNT;
- const ui64 lastTimeSlotsPass = lastSlotNumber / TIME_SLOT_COUNT;
-
- if (firstTimeSlotsPass == lastTimeSlotsPass) {
- // first and last time slots are in the same pass
- for (auto slotNumber = firstSlotNumber + 1; slotNumber < lastSlotNumber; ++slotNumber) {
- auto slotIndex = slotNumber % TIME_SLOT_COUNT;
- TimeSlots[slotIndex] = timeSlotValue;
- }
- } else if (firstTimeSlotsPass + 1 == lastTimeSlotsPass) {
- for (auto slotIndex = (firstSlotNumber + 1) % TIME_SLOT_COUNT; firstSlotIndex < slotIndex && slotIndex < TIME_SLOT_COUNT; ++slotIndex) {
- TimeSlots[slotIndex] = timeSlotValue;
- }
- for (auto slotIndex = 0u; slotIndex < lastSlotIndex; ++slotIndex) {
- TimeSlots[slotIndex] = timeSlotValue;
- }
- } else {
- for (auto slotIndex = 0u; slotIndex < TIME_SLOT_COUNT; ++slotIndex) {
- TimeSlots[slotIndex] = timeSlotValue;
- }
- }
- }
-
-public:
- std::atomic<ui64> LastTimeNs;
- std::atomic<TimeSlotType> TimeSlots[TIME_SLOT_COUNT];
- std::atomic<bool> LastRegisteredPeriodIsBusy = false;
-
- explicit TThreadLoad(ui64 timeNs = 0) {
- static_assert(std::is_unsigned<TimeSlotType>::value);
-
- LastTimeNs = timeNs;
- for (size_t i = 0; i < TIME_SLOT_COUNT; ++i) {
- TimeSlots[i] = 0;
- }
- }
-
- static constexpr auto GetTimeSlotCount() {
- return TIME_SLOT_COUNT;
- }
-
- static constexpr auto GetTimeSlotLengthNs() {
- return TIME_SLOT_LENGTH_NS;
- }
-
- static constexpr auto GetTimeSlotPartLengthNs() {
- return TIME_SLOT_PART_LENGTH_NS;
- }
-
- static constexpr auto GetTimeSlotPartCount() {
- return TIME_SLOT_PART_COUNT;
- }
-
- static constexpr auto GetTimeSlotMaxValue() {
- return TIME_SLOT_MAX_VALUE;
- }
-
- static constexpr auto GetTimeWindowLengthNs() {
- return TIME_SLOT_COUNT * TIME_SLOT_LENGTH_NS;
- }
-
- void RegisterBusyPeriod(ui64 timeNs) {
- RegisterBusyPeriod<true>(timeNs, LastTimeNs.load());
- }
-
- template <bool ModifyLastTime>
- void RegisterBusyPeriod(ui64 timeNs, ui64 lastTimeNs) {
- LastRegisteredPeriodIsBusy = true;
-
- if (timeNs < lastTimeNs) {
- // when time goes back, mark all time slots as 'free'
- for (size_t i = 0u; i < TIME_SLOT_COUNT; ++i) {
- TimeSlots[i] = 0;
- }
-
- if (ModifyLastTime) {
- LastTimeNs = timeNs;
- }
-
- return;
- }
-
- // lastTimeNs <= timeNs
- ui64 firstSlotNumber = lastTimeNs / TIME_SLOT_LENGTH_NS;
- ui32 firstSlotIndex = firstSlotNumber % TIME_SLOT_COUNT;
- ui64 lastSlotNumber = timeNs / TIME_SLOT_LENGTH_NS;
- ui32 lastSlotIndex = lastSlotNumber % TIME_SLOT_COUNT;
-
- if (firstSlotNumber == lastSlotNumber) {
- ui32 slotLengthNs = timeNs - lastTimeNs;
- ui32 slotPartsCount = (slotLengthNs + TIME_SLOT_PART_LENGTH_NS - 1) / TIME_SLOT_PART_LENGTH_NS;
- AtomicAddBound(TimeSlots[firstSlotIndex], slotPartsCount);
-
- if (ModifyLastTime) {
- LastTimeNs = timeNs;
- }
- return;
- }
-
- ui32 firstSlotLengthNs = TIME_SLOT_LENGTH_NS - (lastTimeNs % TIME_SLOT_LENGTH_NS);
- ui32 firstSlotPartsCount = (firstSlotLengthNs + TIME_SLOT_PART_LENGTH_NS - 1) / TIME_SLOT_PART_LENGTH_NS;
- ui32 lastSlotLengthNs = timeNs % TIME_SLOT_LENGTH_NS;
- ui32 lastSlotPartsCount = (lastSlotLengthNs + TIME_SLOT_PART_LENGTH_NS - 1) / TIME_SLOT_PART_LENGTH_NS;
-
- // process first time slot
- AtomicAddBound(TimeSlots[firstSlotIndex], firstSlotPartsCount);
-
- // process complete time slots
- UpdateCompleteTimeSlots(firstSlotNumber, lastSlotNumber, TIME_SLOT_MAX_VALUE);
-
- // process last time slot
- AtomicAddBound(TimeSlots[lastSlotIndex], lastSlotPartsCount);
-
- if (ModifyLastTime) {
- LastTimeNs = timeNs;
- }
- }
-
- void RegisterIdlePeriod(ui64 timeNs) {
- LastRegisteredPeriodIsBusy = false;
-
- ui64 lastTimeNs = LastTimeNs.load();
- if (timeNs < lastTimeNs) {
- // when time goes back, mark all time slots as 'busy'
- for (size_t i = 0u; i < TIME_SLOT_COUNT; ++i) {
- TimeSlots[i] = TIME_SLOT_MAX_VALUE;
- }
- LastTimeNs = timeNs;
- return;
- }
-
- // lastTimeNs <= timeNs
- ui64 firstSlotNumber = lastTimeNs / TIME_SLOT_LENGTH_NS;
- ui32 firstSlotIndex = firstSlotNumber % TIME_SLOT_COUNT;
- ui64 lastSlotNumber = timeNs / TIME_SLOT_LENGTH_NS;
- ui32 lastSlotIndex = lastSlotNumber % TIME_SLOT_COUNT;
-
- if (firstSlotNumber == lastSlotNumber) {
- ui32 slotLengthNs = timeNs - lastTimeNs;
- ui32 slotPartsCount = slotLengthNs / TIME_SLOT_PART_LENGTH_NS;
-
- AtomicSubBound(TimeSlots[firstSlotIndex], slotPartsCount);
-
- LastTimeNs = timeNs;
- return;
- }
-
- ui32 firstSlotLengthNs = TIME_SLOT_LENGTH_NS - (lastTimeNs % TIME_SLOT_LENGTH_NS);
- ui32 firstSlotPartsCount = (firstSlotLengthNs + TIME_SLOT_PART_LENGTH_NS - 1) / TIME_SLOT_PART_LENGTH_NS;
- ui32 lastSlotLengthNs = timeNs % TIME_SLOT_LENGTH_NS;
- ui32 lastSlotPartsCount = (lastSlotLengthNs + TIME_SLOT_PART_LENGTH_NS - 1) / TIME_SLOT_PART_LENGTH_NS;
-
- // process first time slot
- AtomicSubBound(TimeSlots[firstSlotIndex], firstSlotPartsCount);
-
- // process complete time slots
- UpdateCompleteTimeSlots(firstSlotNumber, lastSlotNumber, 0);
-
- // process last time slot
- AtomicSubBound(TimeSlots[lastSlotIndex], lastSlotPartsCount);
-
- LastTimeNs = timeNs;
- }
-};
-
-class TMinusOneThreadEstimator {
-private:
- template <typename T, int MaxSize>
- class TArrayQueue {
- public:
- bool empty() const {
- return FrontIndex == -1;
- }
-
- bool full() const {
- return (RearIndex + 1) % MaxSize == FrontIndex;
- }
-
- T& front() {
- return Data[FrontIndex];
- }
-
- bool push(T &&t) {
- if (full()) {
- return false;
- }
-
- if (FrontIndex == -1) {
- FrontIndex = 0;
- }
-
- RearIndex = (RearIndex + 1) % MaxSize;
- Data[RearIndex] = std::move(t);
- return true;
- }
-
- bool pop() {
- if (empty()) {
- return false;
- }
-
- if (FrontIndex == RearIndex) {
- FrontIndex = RearIndex = -1;
- } else {
- FrontIndex = (FrontIndex + 1) % MaxSize;
- }
-
- return true;
- }
-
- private:
- int FrontIndex = -1;
- int RearIndex = -1;
- T Data[MaxSize];
- };
-
-public:
- template <typename T>
- ui64 MaxLatencyIncreaseWithOneLessCpu(T **threadLoads, ui32 threadCount, ui64 timeNs, ui64 periodNs) {
- Y_ABORT_UNLESS(threadCount > 0);
-
- struct TTimeSlotData {
- typename T::TimeSlotType Load;
- ui64 Index;
- };
-
- ui64 lastTimeNs = timeNs;
- for (auto threadIndex = 0u; threadIndex < threadCount; ++threadIndex) {
- if (threadLoads[threadIndex]->LastRegisteredPeriodIsBusy.load()) {
- lastTimeNs = std::min(lastTimeNs, threadLoads[threadIndex]->LastTimeNs.load());
- } else {
- // make interval [lastTimeNs, timeNs] 'busy'
- threadLoads[threadIndex]->template RegisterBusyPeriod<false>(timeNs, threadLoads[threadIndex]->LastTimeNs.load());
- }
- }
-
- periodNs = std::min(T::GetTimeWindowLengthNs(), periodNs);
-
- ui64 beginTimeNs = periodNs < timeNs ? timeNs - periodNs : 0;
-
- ui64 firstSlotNumber = beginTimeNs / T::GetTimeSlotLengthNs();
- ui64 lastSlotNumber = (lastTimeNs + T::GetTimeSlotLengthNs() - 1) / T::GetTimeSlotLengthNs();
-
- ui64 maxTimeSlotShiftCount = 0u;
- TArrayQueue<TTimeSlotData, T::GetTimeSlotCount()> firstThreadLoadDataQueue;
-
- for (auto slotNumber = firstSlotNumber; slotNumber < lastSlotNumber; ++slotNumber) {
- ui64 slotIndex = slotNumber % T::GetTimeSlotCount();
-
- typename T::TimeSlotType firstThreadTimeSlotValue = threadLoads[0]->TimeSlots[slotIndex].load();
-
- // distribute previous load of the first thread by other threads
- auto foundIdleThread = false;
-
- for (auto threadIndex = 1u; threadIndex < threadCount; ++threadIndex) {
- typename T::TimeSlotType thisThreadAvailableTimeSlotLoad = threadLoads[threadIndex]->GetTimeSlotMaxValue() - threadLoads[threadIndex]->TimeSlots[slotIndex].load();
-
- while (!firstThreadLoadDataQueue.empty() && thisThreadAvailableTimeSlotLoad > 0) {
- auto& firstThreadLoadData = firstThreadLoadDataQueue.front();
-
- auto distributedLoad = std::min(thisThreadAvailableTimeSlotLoad, firstThreadLoadData.Load);
-
- thisThreadAvailableTimeSlotLoad -= distributedLoad;
- firstThreadLoadData.Load -= distributedLoad;
-
- if (firstThreadLoadData.Load == 0) {
- auto timeSlotShiftCount = slotIndex - firstThreadLoadData.Index;
- maxTimeSlotShiftCount = std::max(maxTimeSlotShiftCount, timeSlotShiftCount);
- auto res = firstThreadLoadDataQueue.pop();
- Y_ABORT_UNLESS(res);
- }
- }
-
- if (thisThreadAvailableTimeSlotLoad == threadLoads[threadIndex]->GetTimeSlotMaxValue()) {
- foundIdleThread = true;
- }
- }
-
- // distribute current load of the first thread by other threads
- if (firstThreadTimeSlotValue > 0) {
- if (foundIdleThread) {
- // The current load of the first thead can be
- // moved to the idle thread so there is nothing to do
- } else {
- // The current load of the first thread can be later
- // processed by the following time slots of other threads
- auto res = firstThreadLoadDataQueue.push({firstThreadTimeSlotValue, slotIndex});
- Y_ABORT_UNLESS(res);
- }
- }
- }
-
- if (!firstThreadLoadDataQueue.empty()) {
- const auto& timeSlotData = firstThreadLoadDataQueue.front();
- auto timeSlotShiftCount = T::GetTimeSlotCount() - timeSlotData.Index;
- maxTimeSlotShiftCount = std::max(maxTimeSlotShiftCount, timeSlotShiftCount);
- }
-
- return maxTimeSlotShiftCount * T::GetTimeSlotLengthNs();
- }
-};
diff --git a/library/cpp/actors/util/thread_load_log_ut.cpp b/library/cpp/actors/util/thread_load_log_ut.cpp
deleted file mode 100644
index 20e776cff6..0000000000
--- a/library/cpp/actors/util/thread_load_log_ut.cpp
+++ /dev/null
@@ -1,966 +0,0 @@
-#include "thread_load_log.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-
-#include <util/random/random.h>
-#include <util/system/hp_timer.h>
-#include <util/system/thread.h>
-#include <util/system/types.h>
-#include <util/system/sanitizers.h>
-
-#include <limits>
-
-Y_UNIT_TEST_SUITE(ThreadLoadLog) {
-
- Y_UNIT_TEST(TThreadLoad8BitSlotType) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
-
- using TSlotType = std::uint8_t;
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, TSlotType>;
-
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeWindowLengthNs(), timeWindowLengthNs);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotLengthNs(), timeSlotLengthNs);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotCount(), timeSlotCount);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotMaxValue(), std::numeric_limits<TSlotType>::max());
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotPartCount(), (ui64)std::numeric_limits<TSlotType>::max() + 1);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotPartLengthNs(), T::GetTimeSlotLengthNs() / T::GetTimeSlotPartCount());
- }
-
- Y_UNIT_TEST(TThreadLoad16BitSlotType) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
-
- using TSlotType = std::uint16_t;
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, TSlotType>;
-
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeWindowLengthNs(), timeWindowLengthNs);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotLengthNs(), timeSlotLengthNs);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotCount(), timeSlotCount);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotMaxValue(), std::numeric_limits<TSlotType>::max());
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotPartCount(), (ui64)std::numeric_limits<TSlotType>::max() + 1);
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotPartLengthNs(), T::GetTimeSlotLengthNs() / T::GetTimeSlotPartCount());
- }
-
- Y_UNIT_TEST(TThreadLoad8BitSlotTypeWindowBusy) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
-
- using TSlotType = std::uint8_t;
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, TSlotType>;
-
- T threadLoad;
- threadLoad.RegisterBusyPeriod(T::GetTimeWindowLengthNs());
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), T::GetTimeWindowLengthNs());
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), T::GetTimeSlotMaxValue());
- }
- }
-
- Y_UNIT_TEST(TThreadLoad16BitSlotTypeWindowBusy) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
-
- using TSlotType = std::uint16_t;
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, TSlotType>;
-
- T threadLoad;
- threadLoad.RegisterBusyPeriod(T::GetTimeWindowLengthNs());
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), T::GetTimeWindowLengthNs());
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), T::GetTimeSlotMaxValue());
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstTimeSlot1) {
- TThreadLoad<38400> threadLoad;
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs() - 1;
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
-
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstTimeSlot2) {
- using T = TThreadLoad<38400>;
-
- ui32 startNs = 2 * T::GetTimeSlotPartLengthNs();
- T threadLoad(startNs);
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 3 * T::GetTimeSlotPartLengthNs() - 1;
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
-
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstTimeSlot3) {
- TThreadLoad<38400> threadLoad;
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
-
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstTimeSlot4) {
- using T = TThreadLoad<38400>;
-
- ui32 startNs = 2 * T::GetTimeSlotPartLengthNs();
- T threadLoad(startNs);
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 3 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), (timeNs - startNs) / T::GetTimeSlotPartLengthNs());
-
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstTwoTimeSlots1) {
- TThreadLoad<38400> threadLoad;
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 2 * threadLoad.GetTimeSlotLengthNs() - 1;
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
-
- for (auto slotIndex = 2u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstTwoTimeSlots2) {
- TThreadLoad<38400> threadLoad;
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 2 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
-
- for (auto slotIndex = 2u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstThreeTimeSlots1) {
- TThreadLoad<38400> threadLoad;
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 3 * threadLoad.GetTimeSlotLengthNs() - 1;
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
-
- for (auto slotIndex = 3u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstThreeTimeSlots2) {
- TThreadLoad<38400> threadLoad;
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 3 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
-
- for (auto slotIndex = 3u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterBusyPeriodFirstThreeTimeSlots3) {
- using T = TThreadLoad<38400>;
-
- ui32 startNs = 3 * T::GetTimeSlotPartLengthNs();
- T threadLoad(startNs);
-
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 0;
- threadLoad.RegisterBusyPeriod(timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstTimeSlot1) {
- using T = TThreadLoad<38400>;
-
- ui64 timeNs = T::GetTimeSlotPartLengthNs();
- T threadLoad(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 2 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 3 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 0);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 4 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstTimeSlot2) {
- using T = TThreadLoad<38400>;
-
- ui64 timeNs = T::GetTimeSlotPartLengthNs();
- T threadLoad(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 2 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 3 * T::GetTimeSlotPartLengthNs() - 1;
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 4 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 3);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstTimeSlot3) {
- using T = TThreadLoad<38400>;
-
- ui64 timeNs = T::GetTimeSlotPartLengthNs();
- T threadLoad(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 2 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 3 * T::GetTimeSlotPartLengthNs() - 1;
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 4 * T::GetTimeSlotPartLengthNs() - 2;
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 5 * T::GetTimeSlotPartLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 3);
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstTwoTimeSlots1) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 2 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 0);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 2u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstTwoTimeSlots2) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs() - 1;
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 2 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 2u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstTwoTimeSlots3) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs() - 1;
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = 2 * threadLoad.GetTimeSlotLengthNs() - 1;
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 1);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 2u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstThreeTimeSlots1) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- timeNs = 2 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- timeNs = 3 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), 0);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 3u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstThreeTimeSlots2) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- timeNs = 3 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 1u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstThreeTimeSlots3) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- timeNs = 3 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 0);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 3u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstThreeTimeSlots4) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = threadLoad.GetTimeSlotLengthNs() + 2 * threadLoad.GetTimeSlotPartLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- timeNs = 3 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 0);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotPartCount() - 2);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 3u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodFirstThreeTimeSlots5) {
- using T = TThreadLoad<38400>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 2 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 2u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = timeNs + threadLoad.GetTimeWindowLengthNs() + threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(TThreadLoadRegisterIdlePeriodOverTimeWindow) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint8_t>;
-
- T threadLoad;
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), 0);
- for (auto slotIndex = 0u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- ui64 timeNs = 5 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[3].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[4].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 5u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
-
- timeNs = timeNs + threadLoad.GetTimeWindowLengthNs() - 3 * threadLoad.GetTimeSlotLengthNs();
- threadLoad.RegisterIdlePeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.LastTimeNs.load(), timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[0].load(), 0);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[1].load(), 0);
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[2].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[3].load(), threadLoad.GetTimeSlotMaxValue());
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[4].load(), threadLoad.GetTimeSlotMaxValue());
- for (auto slotIndex = 5u; slotIndex < threadLoad.GetTimeSlotCount(); ++slotIndex) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoad.TimeSlots[slotIndex].load(), 0);
- }
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimatorTwoThreadLoadsZeroShiftNs) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- UNIT_ASSERT_VALUES_EQUAL(T::GetTimeSlotPartCount(), (ui64)std::numeric_limits<std::uint16_t>::max() + 1);
-
- T *threadLoads[2];
- threadLoads[0] = new T;
- threadLoads[1] = new T;
-
- for (ui64 i = 1; i < timeSlotCount; i += 2) {
- threadLoads[0]->RegisterIdlePeriod(i * T::GetTimeSlotLengthNs());
- threadLoads[0]->RegisterBusyPeriod((i + 1) * T::GetTimeSlotLengthNs());
- }
-
- for (ui64 i = 1; i < timeSlotCount; i += 2) {
- threadLoads[1]->RegisterBusyPeriod(i * T::GetTimeSlotLengthNs());
- threadLoads[1]->RegisterIdlePeriod((i + 1) * T::GetTimeSlotLengthNs());
- }
-
- TMinusOneThreadEstimator estimator;
- ui64 value = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, 2, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
- UNIT_ASSERT_VALUES_EQUAL(value, 0);
-
- delete threadLoads[0];
- delete threadLoads[1];
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimatorTwoThreadLoadsOneTimeSlotShift1) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
- constexpr auto threadCount = 2;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- T *threadLoads[threadCount];
-
- for (auto t = 0u; t < threadCount; ++t) {
- threadLoads[t] = new T;
-
- for (ui64 i = 2; i < threadLoads[t]->GetTimeSlotCount(); i += 2) {
- threadLoads[t]->RegisterIdlePeriod((i - 1) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterBusyPeriod(i * T::GetTimeSlotLengthNs());
- }
-
- threadLoads[t]->RegisterIdlePeriod((threadLoads[t]->GetTimeSlotCount() - 1) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterBusyPeriod(threadLoads[t]->GetTimeSlotCount() * T::GetTimeSlotLengthNs());
-
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 2 == 1) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- TMinusOneThreadEstimator estimator;
- auto result = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, threadCount, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
-
- for (ui64 t = 0; t < threadCount; ++t) {
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 2 == 1) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- UNIT_ASSERT_VALUES_EQUAL(result, T::GetTimeSlotLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- delete threadLoads[t];
- }
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimatorTwoThreadLoadsOneTimeSlotShift2) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
- constexpr auto threadCount = 2;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- T *threadLoads[threadCount];
-
- for (auto t = 0u; t < threadCount; ++t) {
- threadLoads[t] = new T;
-
- for (ui64 i = 2; i < threadLoads[t]->GetTimeSlotCount(); i += 2) {
- threadLoads[t]->RegisterBusyPeriod((i - 1) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterIdlePeriod(i * T::GetTimeSlotLengthNs());
- }
-
- threadLoads[t]->RegisterBusyPeriod((threadLoads[t]->GetTimeSlotCount() - 1) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterIdlePeriod(threadLoads[t]->GetTimeSlotCount() * T::GetTimeSlotLengthNs());
-
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 2 == 0) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- TMinusOneThreadEstimator estimator;
- auto result = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, threadCount, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
-
- for (ui64 t = 0; t < threadCount; ++t) {
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 2 == 0) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- UNIT_ASSERT_VALUES_EQUAL(result, T::GetTimeSlotLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- delete threadLoads[t];
- }
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimatorTwoThreadLoadsTwoTimeSlotsShift1) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
- constexpr auto threadCount = 2;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- T *threadLoads[threadCount];
-
- for (auto t = 0u; t < threadCount; ++t) {
- threadLoads[t] = new T;
-
- for (ui64 i = 4; i < threadLoads[t]->GetTimeSlotCount(); i += 4) {
- threadLoads[t]->RegisterIdlePeriod((i - 2) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterBusyPeriod(i * T::GetTimeSlotLengthNs());
- }
-
- threadLoads[t]->RegisterIdlePeriod((threadLoads[t]->GetTimeSlotCount() - 2) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterBusyPeriod(threadLoads[t]->GetTimeSlotCount() * T::GetTimeSlotLengthNs());
-
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 4 == 2 || s % 4 == 3) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- TMinusOneThreadEstimator estimator;
- auto result = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, threadCount, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
-
- for (ui64 t = 0; t < threadCount; ++t) {
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 4 == 2 || s % 4 == 3) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->TimeSlots[s].load(), 0);
- }
- }
- }
-
- UNIT_ASSERT_VALUES_EQUAL(result, 2 * T::GetTimeSlotLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- delete threadLoads[t];
- }
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimatorTwoThreadLoadsTwoTimeSlotsShift2) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
- constexpr auto threadCount = 2;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- T *threadLoads[threadCount];
-
- for (auto t = 0u; t < threadCount; ++t) {
- threadLoads[t] = new T;
-
- for (ui64 i = 4; i < threadLoads[t]->GetTimeSlotCount(); i += 4) {
- threadLoads[t]->RegisterBusyPeriod((i - 2) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterIdlePeriod(i * T::GetTimeSlotLengthNs());
- }
-
- threadLoads[t]->RegisterBusyPeriod((threadLoads[t]->GetTimeSlotCount() - 2) * T::GetTimeSlotLengthNs());
- threadLoads[t]->RegisterIdlePeriod(threadLoads[t]->GetTimeSlotCount() * T::GetTimeSlotLengthNs());
-
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 4 == 0 || s % 4 == 1) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- TMinusOneThreadEstimator estimator;
- auto result = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, threadCount, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
-
- for (ui64 t = 0; t < threadCount; ++t) {
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- if (s % 4 == 0 || s % 4 == 1) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- } else {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
- }
- }
-
- UNIT_ASSERT_VALUES_EQUAL(result, 2 * T::GetTimeSlotLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- delete threadLoads[t];
- }
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimatorTwoThreadLoadsTwoTimeSlotsShift3) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
- constexpr auto threadCount = 2;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- T *threadLoads[threadCount];
-
- for (auto t = 0u; t < threadCount; ++t) {
- threadLoads[t] = new T;
-
- auto timeNs = T::GetTimeWindowLengthNs() - 1.5 * T::GetTimeSlotLengthNs();
- threadLoads[t]->RegisterIdlePeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->LastTimeNs.load(), timeNs);
-
- timeNs = T::GetTimeWindowLengthNs();
- threadLoads[t]->RegisterBusyPeriod(timeNs);
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->LastTimeNs.load(), timeNs);
-
- for (ui64 s = 0; s + 2 < threadLoads[t]->GetTimeSlotCount(); ++s) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->TimeSlots[timeSlotCount - 2].load(), T::GetTimeSlotPartCount() / 2);
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->TimeSlots[timeSlotCount - 1].load(), T::GetTimeSlotMaxValue());
- }
-
- TMinusOneThreadEstimator estimator;
- auto result = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, threadCount, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- for (ui64 s = 0; s + 2 < threadLoads[t]->GetTimeSlotCount(); ++s) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), 0, ToString(s).c_str());
- }
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->TimeSlots[timeSlotCount - 2].load(), T::GetTimeSlotPartCount() / 2);
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->TimeSlots[timeSlotCount - 1].load(), T::GetTimeSlotMaxValue());
- }
-
- UNIT_ASSERT_VALUES_EQUAL(result, 2 * T::GetTimeSlotLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- delete threadLoads[t];
- }
- }
-
- Y_UNIT_TEST(MinusOneThreadEstimator16ThreadLoadsAllTimeSlots) {
- constexpr auto timeWindowLengthNs = 5368709120ull; // 5 * 2 ^ 30 ~5 sec
- constexpr auto timeSlotLengthNs = 524288ull; // 2 ^ 19 ns ~ 512 usec
- constexpr auto timeSlotCount = timeWindowLengthNs / timeSlotLengthNs;
- constexpr auto threadCount = 16;
- constexpr auto estimatesCount = 16;
-
- using T = TThreadLoad<timeSlotCount, timeSlotLengthNs, std::uint16_t>;
-
- for (auto e = 0u; e < estimatesCount; ++e) {
- T *threadLoads[threadCount];
-
- for (auto t = 0u; t < threadCount; ++t) {
- threadLoads[t] = new T;
- auto timeNs = threadLoads[t]->GetTimeWindowLengthNs();
- threadLoads[t]->RegisterBusyPeriod(timeNs);
-
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->LastTimeNs.load(), timeNs);
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- }
- }
-
- ui64 result = 0;
- {
- THPTimer timer;
- TMinusOneThreadEstimator estimator;
- result = estimator.MaxLatencyIncreaseWithOneLessCpu(threadLoads, threadCount, T::GetTimeWindowLengthNs(), T::GetTimeWindowLengthNs());
- // output in microseconds
- auto passed = timer.Passed() * 1000000;
- Y_UNUSED(passed);
- // Cerr << "timer : " << passed << " " << __LINE__ << Endl;
- }
-
- for (ui64 t = 0; t < threadCount; ++t) {
- UNIT_ASSERT_VALUES_EQUAL(threadLoads[t]->LastTimeNs.load(), T::GetTimeWindowLengthNs());
- for (ui64 s = 0; s < threadLoads[t]->GetTimeSlotCount(); ++s) {
- UNIT_ASSERT_VALUES_EQUAL_C(threadLoads[t]->TimeSlots[s].load(), T::GetTimeSlotMaxValue(), ToString(s).c_str());
- }
- }
-
- UNIT_ASSERT_VALUES_EQUAL(result, T::GetTimeWindowLengthNs());
-
- for (auto t = 0u; t < threadCount; ++t) {
- delete threadLoads[t];
- }
- }
- }
-}
diff --git a/library/cpp/actors/util/threadparkpad.cpp b/library/cpp/actors/util/threadparkpad.cpp
deleted file mode 100644
index b939d6b61a..0000000000
--- a/library/cpp/actors/util/threadparkpad.cpp
+++ /dev/null
@@ -1,152 +0,0 @@
-#include "threadparkpad.h"
-#include <util/system/winint.h>
-
-#ifdef _linux_
-
-#include "futex.h"
-
-namespace NActors {
- class TThreadParkPad::TImpl {
- volatile bool Interrupted;
- int Futex;
-
- public:
- TImpl()
- : Interrupted(false)
- , Futex(0)
- {
- }
- ~TImpl() {
- }
-
- bool Park() noexcept {
- __atomic_fetch_sub(&Futex, 1, __ATOMIC_SEQ_CST);
- while (__atomic_load_n(&Futex, __ATOMIC_ACQUIRE) == -1)
- SysFutex(&Futex, FUTEX_WAIT_PRIVATE, -1, nullptr, nullptr, 0);
- return IsInterrupted();
- }
-
- void Unpark() noexcept {
- const int old = __atomic_fetch_add(&Futex, 1, __ATOMIC_SEQ_CST);
- if (old == -1)
- SysFutex(&Futex, FUTEX_WAKE_PRIVATE, -1, nullptr, nullptr, 0);
- }
-
- void Interrupt() noexcept {
- __atomic_store_n(&Interrupted, true, __ATOMIC_SEQ_CST);
- Unpark();
- }
-
- bool IsInterrupted() const noexcept {
- return __atomic_load_n(&Interrupted, __ATOMIC_ACQUIRE);
- }
- };
-
-#elif defined _win32_
-#include <library/cpp/deprecated/atomic/atomic.h>
-
-#include <util/generic/bt_exception.h>
-#include <util/generic/yexception.h>
-
-namespace NActors {
- class TThreadParkPad::TImpl {
- TAtomic Interrupted;
- HANDLE EvHandle;
-
- public:
- TImpl()
- : Interrupted(false)
- {
- EvHandle = ::CreateEvent(0, false, false, 0);
- if (!EvHandle)
- ythrow TWithBackTrace<yexception>() << "::CreateEvent failed";
- }
- ~TImpl() {
- if (EvHandle)
- ::CloseHandle(EvHandle);
- }
-
- bool Park() noexcept {
- ::WaitForSingleObject(EvHandle, INFINITE);
- return AtomicGet(Interrupted);
- }
-
- void Unpark() noexcept {
- ::SetEvent(EvHandle);
- }
-
- void Interrupt() noexcept {
- AtomicSet(Interrupted, true);
- Unpark();
- }
-
- bool IsInterrupted() const noexcept {
- return AtomicGet(Interrupted);
- }
- };
-
-#else
-
-#include <library/cpp/deprecated/atomic/atomic.h>
-
-#include <util/system/event.h>
-
-namespace NActors {
- class TThreadParkPad::TImpl {
- TAtomic Interrupted;
- TSystemEvent Ev;
-
- public:
- TImpl()
- : Interrupted(false)
- , Ev(TSystemEvent::rAuto)
- {
- }
- ~TImpl() {
- }
-
- bool Park() noexcept {
- Ev.Wait();
- return AtomicGet(Interrupted);
- }
-
- void Unpark() noexcept {
- Ev.Signal();
- }
-
- void Interrupt() noexcept {
- AtomicSet(Interrupted, true);
- Unpark();
- }
-
- bool IsInterrupted() const noexcept {
- return AtomicGet(Interrupted);
- }
- };
-#endif
-
- TThreadParkPad::TThreadParkPad()
- : Impl(new TThreadParkPad::TImpl())
- {
- }
-
- TThreadParkPad::~TThreadParkPad() {
- }
-
- bool TThreadParkPad::Park() noexcept {
- return Impl->Park();
- }
-
- void TThreadParkPad::Unpark() noexcept {
- Impl->Unpark();
- }
-
- void TThreadParkPad::Interrupt() noexcept {
- Impl->Interrupt();
- }
-
- bool TThreadParkPad::Interrupted() const noexcept {
- return Impl->IsInterrupted();
- }
-
-}
diff --git a/library/cpp/actors/util/threadparkpad.h b/library/cpp/actors/util/threadparkpad.h
deleted file mode 100644
index 5b574ccf34..0000000000
--- a/library/cpp/actors/util/threadparkpad.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include <util/generic/ptr.h>
-
-namespace NActors {
- class TThreadParkPad {
- private:
- class TImpl;
- THolder<TImpl> Impl;
-
- public:
- TThreadParkPad();
- ~TThreadParkPad();
-
- bool Park() noexcept;
- void Unpark() noexcept;
- void Interrupt() noexcept;
- bool Interrupted() const noexcept;
- };
-
-}
diff --git a/library/cpp/actors/util/ticket_lock.h b/library/cpp/actors/util/ticket_lock.h
deleted file mode 100644
index 30355c3390..0000000000
--- a/library/cpp/actors/util/ticket_lock.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#pragma once
-
-#include "intrinsics.h"
-#include <util/system/guard.h>
-#include <util/system/yassert.h>
-
-class TTicketLock : TNonCopyable {
- ui32 TicketIn;
- ui32 TicketOut;
-
-public:
- TTicketLock()
- : TicketIn(0)
- , TicketOut(0)
- {
- }
-
- void Release() noexcept {
- AtomicUi32Increment(&TicketOut);
- }
-
- ui32 Acquire() noexcept {
- ui32 revolves = 0;
- const ui32 ticket = AtomicUi32Increment(&TicketIn) - 1;
- while (ticket != AtomicLoad(&TicketOut)) {
- Y_DEBUG_ABORT_UNLESS(ticket >= AtomicLoad(&TicketOut));
- SpinLockPause();
- ++revolves;
- }
- return revolves;
- }
-
- bool TryAcquire() noexcept {
- const ui32 x = AtomicLoad(&TicketOut);
- if (x == AtomicLoad(&TicketIn) && AtomicUi32Cas(&TicketIn, x + 1, x))
- return true;
- else
- return false;
- }
-
- bool IsLocked() noexcept {
- const ui32 ticketIn = AtomicLoad(&TicketIn);
- const ui32 ticketOut = AtomicLoad(&TicketOut);
- return (ticketIn != ticketOut);
- }
-
- typedef ::TGuard<TTicketLock> TGuard;
-};
diff --git a/library/cpp/actors/util/timerfd.h b/library/cpp/actors/util/timerfd.h
deleted file mode 100644
index 78ae27e2ee..0000000000
--- a/library/cpp/actors/util/timerfd.h
+++ /dev/null
@@ -1,65 +0,0 @@
-#pragma once
-
-#include "datetime.h"
-
-#include <util/generic/noncopyable.h>
-
-#ifdef _linux_
-
-#include <util/system/yassert.h>
-#include <errno.h>
-#include <sys/timerfd.h>
-
-struct TTimerFd: public TNonCopyable {
- int Fd;
-
- TTimerFd() {
- Fd = timerfd_create(CLOCK_MONOTONIC, 0);
- Y_ABORT_UNLESS(Fd != -1, "timerfd_create(CLOCK_MONOTONIC, 0) -> -1; errno:%d: %s", int(errno), strerror(errno));
- }
-
- ~TTimerFd() {
- close(Fd);
- }
-
- void Set(ui64 ts) {
- ui64 now = GetCycleCountFast();
- Arm(now >= ts? 1: NHPTimer::GetSeconds(ts - now) * 1e9);
- }
-
- void Reset() {
- Arm(0); // disarm timer
- }
-
- void Wait() {
- ui64 expirations;
- ssize_t s = read(Fd, &expirations, sizeof(ui64));
- Y_UNUSED(s); // Y_ABORT_UNLESS(s == sizeof(ui64));
- }
-
- void Wake() {
- Arm(1);
- }
-private:
- void Arm(ui64 ns) {
- struct itimerspec spec;
- spec.it_value.tv_sec = ns / 1'000'000'000;
- spec.it_value.tv_nsec = ns % 1'000'000'000;
- spec.it_interval.tv_sec = 0;
- spec.it_interval.tv_nsec = 0;
- int ret = timerfd_settime(Fd, 0, &spec, nullptr);
- Y_ABORT_UNLESS(ret != -1, "timerfd_settime(%d, 0, %" PRIu64 "ns, 0) -> %d; errno:%d: %s", Fd, ns, ret, int(errno), strerror(errno));
- }
-};
-
-#else
-
-struct TTimerFd: public TNonCopyable {
- int Fd = 0;
- void Set(ui64) {}
- void Reset() {}
- void Wait() {}
- void Wake() {}
-};
-
-#endif
diff --git a/library/cpp/actors/util/unordered_cache.h b/library/cpp/actors/util/unordered_cache.h
deleted file mode 100644
index 40794fc04b..0000000000
--- a/library/cpp/actors/util/unordered_cache.h
+++ /dev/null
@@ -1,201 +0,0 @@
-#pragma once
-
-#include "defs.h"
-#include "queue_chunk.h"
-
-template <typename T, ui32 Size = 512, ui32 ConcurrencyFactor = 1, typename TChunk = TQueueChunk<T, Size>>
-class TUnorderedCache : TNonCopyable {
- static_assert(std::is_integral<T>::value || std::is_pointer<T>::value, "expect std::is_integral<T>::value || std::is_pointer<T>::value");
-
-public:
- static constexpr ui32 Concurrency = ConcurrencyFactor * 4;
-
-private:
- struct TReadSlot {
- TChunk* volatile ReadFrom;
- volatile ui32 ReadPosition;
- char Padding[64 - sizeof(TChunk*) - sizeof(ui32)]; // 1 slot per cache line
- };
-
- struct TWriteSlot {
- TChunk* volatile WriteTo;
- volatile ui32 WritePosition;
- char Padding[64 - sizeof(TChunk*) - sizeof(ui32)]; // 1 slot per cache line
- };
-
- static_assert(sizeof(TReadSlot) == 64, "expect sizeof(TReadSlot) == 64");
- static_assert(sizeof(TWriteSlot) == 64, "expect sizeof(TWriteSlot) == 64");
-
-private:
- TReadSlot ReadSlots[Concurrency];
- TWriteSlot WriteSlots[Concurrency];
-
- static_assert(sizeof(TChunk*) == sizeof(TAtomic), "expect sizeof(TChunk*) == sizeof(TAtomic)");
-
-private:
- struct TLockedWriter {
- TWriteSlot* Slot;
- TChunk* WriteTo;
-
- TLockedWriter()
- : Slot(nullptr)
- , WriteTo(nullptr)
- { }
-
- TLockedWriter(TWriteSlot* slot, TChunk* writeTo)
- : Slot(slot)
- , WriteTo(writeTo)
- { }
-
- ~TLockedWriter() noexcept {
- Drop();
- }
-
- void Drop() {
- if (Slot) {
- AtomicStore(&Slot->WriteTo, WriteTo);
- Slot = nullptr;
- }
- }
-
- TLockedWriter(const TLockedWriter&) = delete;
- TLockedWriter& operator=(const TLockedWriter&) = delete;
-
- TLockedWriter(TLockedWriter&& rhs)
- : Slot(rhs.Slot)
- , WriteTo(rhs.WriteTo)
- {
- rhs.Slot = nullptr;
- }
-
- TLockedWriter& operator=(TLockedWriter&& rhs) {
- if (Y_LIKELY(this != &rhs)) {
- Drop();
- Slot = rhs.Slot;
- WriteTo = rhs.WriteTo;
- rhs.Slot = nullptr;
- }
- return *this;
- }
- };
-
-private:
- TLockedWriter LockWriter(ui64 writerRotation) {
- ui32 cycle = 0;
- for (;;) {
- TWriteSlot* slot = &WriteSlots[writerRotation % Concurrency];
- if (AtomicLoad(&slot->WriteTo) != nullptr) {
- if (TChunk* writeTo = AtomicSwap(&slot->WriteTo, nullptr)) {
- return TLockedWriter(slot, writeTo);
- }
- }
- ++writerRotation;
-
- // Do a spinlock pause after a full cycle
- if (++cycle == Concurrency) {
- SpinLockPause();
- cycle = 0;
- }
- }
- }
-
- void WriteOne(TLockedWriter& lock, T x) {
- Y_DEBUG_ABORT_UNLESS(x != 0);
-
- const ui32 pos = AtomicLoad(&lock.Slot->WritePosition);
- if (pos != TChunk::EntriesCount) {
- AtomicStore(&lock.Slot->WritePosition, pos + 1);
- AtomicStore(&lock.WriteTo->Entries[pos], x);
- } else {
- TChunk* next = new TChunk();
- AtomicStore(&next->Entries[0], x);
- AtomicStore(&lock.Slot->WritePosition, 1u);
- AtomicStore(&lock.WriteTo->Next, next);
- lock.WriteTo = next;
- }
- }
-
-public:
- TUnorderedCache() {
- for (ui32 i = 0; i < Concurrency; ++i) {
- ReadSlots[i].ReadFrom = new TChunk();
- ReadSlots[i].ReadPosition = 0;
-
- WriteSlots[i].WriteTo = ReadSlots[i].ReadFrom;
- WriteSlots[i].WritePosition = 0;
- }
- }
-
- ~TUnorderedCache() {
- Y_ABORT_UNLESS(!Pop(0));
-
- for (ui64 i = 0; i < Concurrency; ++i) {
- if (ReadSlots[i].ReadFrom) {
- delete ReadSlots[i].ReadFrom;
- ReadSlots[i].ReadFrom = nullptr;
- }
- WriteSlots[i].WriteTo = nullptr;
- }
- }
-
- T Pop(ui64 readerRotation) noexcept {
- ui64 readerIndex = readerRotation;
- const ui64 endIndex = readerIndex + Concurrency;
- for (; readerIndex != endIndex; ++readerIndex) {
- TReadSlot* slot = &ReadSlots[readerIndex % Concurrency];
- if (AtomicLoad(&slot->ReadFrom) != nullptr) {
- if (TChunk* readFrom = AtomicSwap(&slot->ReadFrom, nullptr)) {
- const ui32 pos = AtomicLoad(&slot->ReadPosition);
- if (pos != TChunk::EntriesCount) {
- if (T ret = AtomicLoad(&readFrom->Entries[pos])) {
- AtomicStore(&slot->ReadPosition, pos + 1);
- AtomicStore(&slot->ReadFrom, readFrom); // release lock with same chunk
- return ret; // found, return
- } else {
- AtomicStore(&slot->ReadFrom, readFrom); // release lock with same chunk
- }
- } else if (TChunk* next = AtomicLoad(&readFrom->Next)) {
- if (T ret = AtomicLoad(&next->Entries[0])) {
- AtomicStore(&slot->ReadPosition, 1u);
- AtomicStore(&slot->ReadFrom, next); // release lock with next chunk
- delete readFrom;
- return ret;
- } else {
- AtomicStore(&slot->ReadPosition, 0u);
- AtomicStore(&slot->ReadFrom, next); // release lock with new chunk
- delete readFrom;
- }
- } else {
- // nothing in old chunk and no next chunk, just release lock with old chunk
- AtomicStore(&slot->ReadFrom, readFrom);
- }
- }
- }
- }
-
- return 0; // got nothing after full cycle, return
- }
-
- void Push(T x, ui64 writerRotation) {
- TLockedWriter lock = LockWriter(writerRotation);
- WriteOne(lock, x);
- }
-
- void PushBulk(T* x, ui32 xcount, ui64 writerRotation) {
- for (;;) {
- // Fill no more then one queue chunk per round
- const ui32 xround = Min(xcount, (ui32)TChunk::EntriesCount);
-
- {
- TLockedWriter lock = LockWriter(writerRotation++);
- for (T* end = x + xround; x != end; ++x)
- WriteOne(lock, *x);
- }
-
- if (xcount <= TChunk::EntriesCount)
- break;
-
- xcount -= TChunk::EntriesCount;
- }
- }
-};
diff --git a/library/cpp/actors/util/unordered_cache_ut.cpp b/library/cpp/actors/util/unordered_cache_ut.cpp
deleted file mode 100644
index 37865f2f91..0000000000
--- a/library/cpp/actors/util/unordered_cache_ut.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-#include "unordered_cache.h"
-
-#include <library/cpp/testing/unittest/registar.h>
-#include <util/random/random.h>
-#include <util/system/hp_timer.h>
-#include <util/system/sanitizers.h>
-#include <util/system/thread.h>
-
-Y_UNIT_TEST_SUITE(UnorderedCache) {
-
- void DoOnePushOnePop(ui64 count) {
- TUnorderedCache<ui64> queue;
-
- ui64 readRotation = 0;
- ui64 writeRotation = 0;
-
- auto popped = queue.Pop(readRotation++);
- UNIT_ASSERT_VALUES_EQUAL(popped, 0u);
-
- for (ui64 i = 0; i < count; ++i) {
- queue.Push(i + 1, writeRotation++);
- popped = queue.Pop(readRotation++);
- UNIT_ASSERT_VALUES_EQUAL(popped, i + 1);
-
- popped = queue.Pop(readRotation++);
- UNIT_ASSERT_VALUES_EQUAL(popped, 0u);
- }
- }
-
- Y_UNIT_TEST(OnePushOnePop) {
- DoOnePushOnePop(1);
- }
-
- Y_UNIT_TEST(OnePushOnePop_Repeat1M) {
- DoOnePushOnePop(1000000);
- }
-
- /**
- * Simplified thread spawning for testing
- */
- class TWorkerThread : public ISimpleThread {
- private:
- std::function<void()> Func;
- double Time = 0.0;
-
- public:
- TWorkerThread(std::function<void()> func)
- : Func(std::move(func))
- { }
-
- double GetTime() const {
- return Time;
- }
-
- static THolder<TWorkerThread> Spawn(std::function<void()> func) {
- THolder<TWorkerThread> thread = MakeHolder<TWorkerThread>(std::move(func));
- thread->Start();
- return thread;
- }
-
- private:
- void* ThreadProc() noexcept override {
- THPTimer timer;
- Func();
- Time = timer.Passed();
- return nullptr;
- }
- };
-
- void DoConcurrentPushPop(size_t threads, ui64 perThreadCount) {
- // Concurrency factor 4 is up to 16 threads
- TUnorderedCache<ui64, 512, 4> queue;
-
- auto workerFunc = [&](size_t threadIndex) {
- ui64 readRotation = 0;
- ui64 writeRotation = 0;
- ui64 readsDone = 0;
- ui64 writesDone = 0;
- for (;;) {
- bool canRead = readsDone < writesDone;
- bool canWrite = writesDone < perThreadCount;
- if (!canRead && !canWrite) {
- break;
- }
- if (canRead && canWrite) {
- // Randomly choose between read and write
- if (RandomNumber<ui64>(2)) {
- canRead = false;
- } else {
- canWrite = false;
- }
- }
- if (canRead) {
- ui64 popped = queue.Pop(readRotation++);
- if (popped) {
- ++readsDone;
- }
- }
- if (canWrite) {
- queue.Push(1 + writesDone * threads + threadIndex, writeRotation++);
- ++writesDone;
- }
- }
- };
-
- TVector<THolder<TWorkerThread>> workers(threads);
- for (size_t i = 0; i < threads; ++i) {
- workers[i] = TWorkerThread::Spawn([workerFunc, i]() {
- workerFunc(i);
- });
- }
-
- double maxTime = 0;
- for (size_t i = 0; i < threads; ++i) {
- workers[i]->Join();
- maxTime = Max(maxTime, workers[i]->GetTime());
- }
-
- auto popped = queue.Pop(0);
- UNIT_ASSERT_VALUES_EQUAL(popped, 0u);
-
- Cerr << "Concurrent with " << threads << " threads: " << maxTime << " seconds" << Endl;
- }
-
- void DoConcurrentPushPop_3times(size_t threads, ui64 perThreadCount) {
- for (size_t i = 0; i < 3; ++i) {
- DoConcurrentPushPop(threads, perThreadCount);
- }
- }
-
- static constexpr ui64 PER_THREAD_COUNT = NSan::PlainOrUnderSanitizer(1000000, 100000);
-
- Y_UNIT_TEST(ConcurrentPushPop_1thread) { DoConcurrentPushPop_3times(1, PER_THREAD_COUNT); }
- Y_UNIT_TEST(ConcurrentPushPop_2threads) { DoConcurrentPushPop_3times(2, PER_THREAD_COUNT); }
- Y_UNIT_TEST(ConcurrentPushPop_4threads) { DoConcurrentPushPop_3times(4, PER_THREAD_COUNT); }
- Y_UNIT_TEST(ConcurrentPushPop_8threads) { DoConcurrentPushPop_3times(8, PER_THREAD_COUNT); }
- Y_UNIT_TEST(ConcurrentPushPop_16threads) { DoConcurrentPushPop_3times(16, PER_THREAD_COUNT); }
-}
diff --git a/library/cpp/actors/util/ut/CMakeLists.darwin-arm64.txt b/library/cpp/actors/util/ut/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 9b02cd1836..0000000000
--- a/library/cpp/actors/util/ut/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,74 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-util-ut)
-target_include_directories(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util
-)
-target_link_libraries(library-cpp-actors-util-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-util
-)
-target_link_options(library-cpp-actors-util-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/cpu_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/thread_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/unordered_cache_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-util-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-util-ut
- TEST_TARGET
- library-cpp-actors-util-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-util-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-util-ut)
diff --git a/library/cpp/actors/util/ut/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/util/ut/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index f02b2d926c..0000000000
--- a/library/cpp/actors/util/ut/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,75 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-util-ut)
-target_include_directories(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util
-)
-target_link_libraries(library-cpp-actors-util-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-util
-)
-target_link_options(library-cpp-actors-util-ut PRIVATE
- -Wl,-platform_version,macos,11.0,11.0
- -fPIC
- -fPIC
- -framework
- CoreFoundation
-)
-target_sources(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/cpu_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/thread_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/unordered_cache_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-util-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-util-ut
- TEST_TARGET
- library-cpp-actors-util-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-util-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-util-ut)
diff --git a/library/cpp/actors/util/ut/CMakeLists.linux-aarch64.txt b/library/cpp/actors/util/ut/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 27ff864fef..0000000000
--- a/library/cpp/actors/util/ut/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,78 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-util-ut)
-target_include_directories(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util
-)
-target_link_libraries(library-cpp-actors-util-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-testing-unittest_main
- cpp-actors-util
-)
-target_link_options(library-cpp-actors-util-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/cpu_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/thread_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/unordered_cache_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-util-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-util-ut
- TEST_TARGET
- library-cpp-actors-util-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-util-ut
- cpp-malloc-jemalloc
-)
-vcs_info(library-cpp-actors-util-ut)
diff --git a/library/cpp/actors/util/ut/CMakeLists.linux-x86_64.txt b/library/cpp/actors/util/ut/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index d1143a475b..0000000000
--- a/library/cpp/actors/util/ut/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,80 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-util-ut)
-target_include_directories(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util
-)
-target_link_libraries(library-cpp-actors-util-ut PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-util
-)
-target_link_options(library-cpp-actors-util-ut PRIVATE
- -ldl
- -lrt
- -Wl,--no-as-needed
- -fPIC
- -fPIC
- -lpthread
- -lrt
- -ldl
-)
-target_sources(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/cpu_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/thread_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/unordered_cache_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-util-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-util-ut
- TEST_TARGET
- library-cpp-actors-util-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-util-ut
- cpp-malloc-tcmalloc
- libs-tcmalloc-no_percpu_cache
-)
-vcs_info(library-cpp-actors-util-ut)
diff --git a/library/cpp/actors/util/ut/CMakeLists.txt b/library/cpp/actors/util/ut/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/util/ut/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/util/ut/CMakeLists.windows-x86_64.txt b/library/cpp/actors/util/ut/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 3af5d98ef0..0000000000
--- a/library/cpp/actors/util/ut/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,68 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_executable(library-cpp-actors-util-ut)
-target_include_directories(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util
-)
-target_link_libraries(library-cpp-actors-util-ut PUBLIC
- contrib-libs-cxxsupp
- yutil
- library-cpp-cpuid_check
- cpp-testing-unittest_main
- cpp-actors-util
-)
-target_sources(library-cpp-actors-util-ut PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/cpu_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/memory_tracker_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/thread_load_log_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rope_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/rc_buf_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/shared_data_native_rope_backend_ut.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/util/unordered_cache_ut.cpp
-)
-set_property(
- TARGET
- library-cpp-actors-util-ut
- PROPERTY
- SPLIT_FACTOR
- 1
-)
-add_yunittest(
- NAME
- library-cpp-actors-util-ut
- TEST_TARGET
- library-cpp-actors-util-ut
- TEST_ARG
- --print-before-suite
- --print-before-test
- --fork-tests
- --print-times
- --show-fails
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- LABELS
- SMALL
-)
-set_yunittest_property(
- TEST
- library-cpp-actors-util-ut
- PROPERTY
- PROCESSORS
- 1
-)
-target_allocator(library-cpp-actors-util-ut
- system_allocator
-)
-vcs_info(library-cpp-actors-util-ut)
diff --git a/library/cpp/actors/util/ut/ya.make b/library/cpp/actors/util/ut/ya.make
deleted file mode 100644
index 9ac8504751..0000000000
--- a/library/cpp/actors/util/ut/ya.make
+++ /dev/null
@@ -1,20 +0,0 @@
-UNITTEST_FOR(library/cpp/actors/util)
-
-IF (WITH_VALGRIND)
- TIMEOUT(600)
- SIZE(MEDIUM)
-ENDIF()
-
-SRCS(
- cpu_load_log_ut.cpp
- memory_tracker_ut.cpp
- thread_load_log_ut.cpp
- rope_ut.cpp
- rc_buf_ut.cpp
- shared_data_ut.cpp
- shared_data_rope_backend_ut.cpp
- shared_data_native_rope_backend_ut.cpp
- unordered_cache_ut.cpp
-)
-
-END()
diff --git a/library/cpp/actors/util/ut_helpers.h b/library/cpp/actors/util/ut_helpers.h
deleted file mode 100644
index d3fe873233..0000000000
--- a/library/cpp/actors/util/ut_helpers.h
+++ /dev/null
@@ -1,12 +0,0 @@
-#pragma once
-
-// calls TCallback for all args permutations including (id, id)
-template <class TCallback, class... TArgs>
-void Permutate(TCallback&& fn, TArgs&&... args)
-{
- auto forAll = [&](auto& arg){
- (fn(std::forward<decltype(arg)>(arg), std::forward<decltype(args)>(args)), ...);
- };
-
- (forAll(std::forward<decltype(args)>(args)), ...);
-}
diff --git a/library/cpp/actors/util/ya.make b/library/cpp/actors/util/ya.make
deleted file mode 100644
index 48d595c156..0000000000
--- a/library/cpp/actors/util/ya.make
+++ /dev/null
@@ -1,50 +0,0 @@
-LIBRARY()
-
-SRCS(
- affinity.cpp
- affinity.h
- cpu_load_log.h
- cpumask.h
- datetime.h
- defs.h
- funnel_queue.h
- futex.h
- intrinsics.h
- local_process_key.h
- named_tuple.h
- queue_chunk.h
- queue_oneone_inplace.h
- memory_track.cpp
- memory_track.h
- memory_tracker.cpp
- memory_tracker.h
- recentwnd.h
- rope.cpp
- rope.h
- rc_buf.cpp
- rc_buf.h
- shared_data.h
- shared_data.cpp
- shared_data_rope_backend.h
- should_continue.cpp
- should_continue.h
- thread.h
- threadparkpad.cpp
- threadparkpad.h
- thread_load_log.h
- ticket_lock.h
- timerfd.h
- unordered_cache.h
-)
-
-PEERDIR(
- library/cpp/containers/absl_flat_hash
- library/cpp/deprecated/atomic
- library/cpp/pop_count
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
diff --git a/library/cpp/actors/wilson/CMakeLists.darwin-arm64.txt b/library/cpp/actors/wilson/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 45704ccd5a..0000000000
--- a/library/cpp/actors/wilson/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(protos)
-
-add_library(cpp-actors-wilson)
-target_link_libraries(cpp-actors-wilson PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-protos
- actors-wilson-protos
-)
-target_sources(cpp-actors-wilson PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_profile_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_trace.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_uploader.cpp
-)
diff --git a/library/cpp/actors/wilson/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/wilson/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 45704ccd5a..0000000000
--- a/library/cpp/actors/wilson/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(protos)
-
-add_library(cpp-actors-wilson)
-target_link_libraries(cpp-actors-wilson PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-protos
- actors-wilson-protos
-)
-target_sources(cpp-actors-wilson PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_profile_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_trace.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_uploader.cpp
-)
diff --git a/library/cpp/actors/wilson/CMakeLists.linux-aarch64.txt b/library/cpp/actors/wilson/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index ccc87489ae..0000000000
--- a/library/cpp/actors/wilson/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(protos)
-
-add_library(cpp-actors-wilson)
-target_link_libraries(cpp-actors-wilson PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-protos
- actors-wilson-protos
-)
-target_sources(cpp-actors-wilson PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_profile_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_trace.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_uploader.cpp
-)
diff --git a/library/cpp/actors/wilson/CMakeLists.linux-x86_64.txt b/library/cpp/actors/wilson/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index ccc87489ae..0000000000
--- a/library/cpp/actors/wilson/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,26 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(protos)
-
-add_library(cpp-actors-wilson)
-target_link_libraries(cpp-actors-wilson PUBLIC
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-protos
- actors-wilson-protos
-)
-target_sources(cpp-actors-wilson PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_profile_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_trace.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_uploader.cpp
-)
diff --git a/library/cpp/actors/wilson/CMakeLists.txt b/library/cpp/actors/wilson/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/wilson/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/wilson/CMakeLists.windows-x86_64.txt b/library/cpp/actors/wilson/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 45704ccd5a..0000000000
--- a/library/cpp/actors/wilson/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,25 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-add_subdirectory(protos)
-
-add_library(cpp-actors-wilson)
-target_link_libraries(cpp-actors-wilson PUBLIC
- contrib-libs-cxxsupp
- yutil
- cpp-actors-core
- cpp-actors-protos
- actors-wilson-protos
-)
-target_sources(cpp-actors-wilson PRIVATE
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_event.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_profile_span.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_trace.cpp
- ${CMAKE_SOURCE_DIR}/library/cpp/actors/wilson/wilson_uploader.cpp
-)
diff --git a/library/cpp/actors/wilson/protos/CMakeLists.darwin-arm64.txt b/library/cpp/actors/wilson/protos/CMakeLists.darwin-arm64.txt
deleted file mode 100644
index 92afc01bb4..0000000000
--- a/library/cpp/actors/wilson/protos/CMakeLists.darwin-arm64.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-wilson-protos INTERFACE)
-target_link_libraries(actors-wilson-protos INTERFACE
- contrib-libs-cxxsupp
- yutil
- contrib-libs-opentelemetry-proto
-)
diff --git a/library/cpp/actors/wilson/protos/CMakeLists.darwin-x86_64.txt b/library/cpp/actors/wilson/protos/CMakeLists.darwin-x86_64.txt
deleted file mode 100644
index 92afc01bb4..0000000000
--- a/library/cpp/actors/wilson/protos/CMakeLists.darwin-x86_64.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-wilson-protos INTERFACE)
-target_link_libraries(actors-wilson-protos INTERFACE
- contrib-libs-cxxsupp
- yutil
- contrib-libs-opentelemetry-proto
-)
diff --git a/library/cpp/actors/wilson/protos/CMakeLists.linux-aarch64.txt b/library/cpp/actors/wilson/protos/CMakeLists.linux-aarch64.txt
deleted file mode 100644
index 101316f4fc..0000000000
--- a/library/cpp/actors/wilson/protos/CMakeLists.linux-aarch64.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-wilson-protos INTERFACE)
-target_link_libraries(actors-wilson-protos INTERFACE
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-opentelemetry-proto
-)
diff --git a/library/cpp/actors/wilson/protos/CMakeLists.linux-x86_64.txt b/library/cpp/actors/wilson/protos/CMakeLists.linux-x86_64.txt
deleted file mode 100644
index 101316f4fc..0000000000
--- a/library/cpp/actors/wilson/protos/CMakeLists.linux-x86_64.txt
+++ /dev/null
@@ -1,16 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-wilson-protos INTERFACE)
-target_link_libraries(actors-wilson-protos INTERFACE
- contrib-libs-linux-headers
- contrib-libs-cxxsupp
- yutil
- contrib-libs-opentelemetry-proto
-)
diff --git a/library/cpp/actors/wilson/protos/CMakeLists.txt b/library/cpp/actors/wilson/protos/CMakeLists.txt
deleted file mode 100644
index 2dce3a77fe..0000000000
--- a/library/cpp/actors/wilson/protos/CMakeLists.txt
+++ /dev/null
@@ -1,19 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-if (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "aarch64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-aarch64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64")
- include(CMakeLists.darwin-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Darwin" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "arm64")
- include(CMakeLists.darwin-arm64.txt)
-elseif (WIN32 AND CMAKE_SYSTEM_PROCESSOR STREQUAL "AMD64" AND NOT HAVE_CUDA)
- include(CMakeLists.windows-x86_64.txt)
-elseif (CMAKE_SYSTEM_NAME STREQUAL "Linux" AND CMAKE_SYSTEM_PROCESSOR STREQUAL "x86_64" AND NOT HAVE_CUDA)
- include(CMakeLists.linux-x86_64.txt)
-endif()
diff --git a/library/cpp/actors/wilson/protos/CMakeLists.windows-x86_64.txt b/library/cpp/actors/wilson/protos/CMakeLists.windows-x86_64.txt
deleted file mode 100644
index 92afc01bb4..0000000000
--- a/library/cpp/actors/wilson/protos/CMakeLists.windows-x86_64.txt
+++ /dev/null
@@ -1,15 +0,0 @@
-
-# This file was generated by the build system used internally in the Yandex monorepo.
-# Only simple modifications are allowed (adding source-files to targets, adding simple properties
-# like target_include_directories). These modifications will be ported to original
-# ya.make files by maintainers. Any complex modifications which can't be ported back to the
-# original buildsystem will not be accepted.
-
-
-
-add_library(actors-wilson-protos INTERFACE)
-target_link_libraries(actors-wilson-protos INTERFACE
- contrib-libs-cxxsupp
- yutil
- contrib-libs-opentelemetry-proto
-)
diff --git a/library/cpp/actors/wilson/protos/ya.make b/library/cpp/actors/wilson/protos/ya.make
deleted file mode 100644
index e9db290efd..0000000000
--- a/library/cpp/actors/wilson/protos/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-PROTO_LIBRARY()
-
- PEERDIR(
- contrib/libs/opentelemetry-proto
- )
-
- EXCLUDE_TAGS(
- GO_PROTO
- JAVA_PROTO
- )
-
-END()
diff --git a/library/cpp/actors/wilson/wilson_event.cpp b/library/cpp/actors/wilson/wilson_event.cpp
deleted file mode 100644
index ad51550d91..0000000000
--- a/library/cpp/actors/wilson/wilson_event.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "wilson_event.h"
-
-namespace NWilson {
-}
diff --git a/library/cpp/actors/wilson/wilson_event.h b/library/cpp/actors/wilson/wilson_event.h
deleted file mode 100644
index 4b6a7612c0..0000000000
--- a/library/cpp/actors/wilson/wilson_event.h
+++ /dev/null
@@ -1,21 +0,0 @@
-#pragma once
-
-#include "wilson_trace.h"
-
-#include <library/cpp/string_utils/base64/base64.h>
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/log.h>
-
-namespace NWilson {
-
- // stub for NBS
- template<typename TActorSystem>
- inline bool TraceEnabled(const TActorSystem&) {
- return false;
- }
-
- template<typename TActorSystem, typename TEvent>
- inline void TraceEvent(const TActorSystem&, TTraceId*, TEvent&&, TInstant)
- {}
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/wilson_profile_span.cpp b/library/cpp/actors/wilson/wilson_profile_span.cpp
deleted file mode 100644
index e908ed2b8a..0000000000
--- a/library/cpp/actors/wilson/wilson_profile_span.cpp
+++ /dev/null
@@ -1,178 +0,0 @@
-#include "wilson_profile_span.h"
-#include <library/cpp/json/writer/json.h>
-
-namespace NWilson {
-
-void TProfileSpan::AddMax(const TString& eventId, const TString& /*info*/) {
- if (!Enabled) {
- return;
- }
- auto it = PairInstances.find(eventId);
- if (it == PairInstances.end()) {
- PairInstances.emplace(eventId, TMinMaxPair::BuildMax(Now()));
- } else {
- it->second.AddMax(Now());
- }
-}
-
-void TProfileSpan::AddMin(const TString& eventId, const TString& /*info*/) {
- if (!Enabled) {
- return;
- }
- auto it = PairInstances.find(eventId);
- if (it == PairInstances.end()) {
- PairInstances.emplace(eventId, TMinMaxPair::BuildMin(Now()));
- } else {
- it->second.AddMin(Now());
- }
-}
-
-TProfileSpan::TProfileSpan(const ui8 verbosity, TTraceId parentId, std::optional<TString> name)
- : TBase(verbosity, std::move(parentId), name, NWilson::EFlags::AUTO_END)
-{
-
-}
-
-TProfileSpan::~TProfileSpan() {
- if (Enabled && (ResultTimes.GetMapSafe().size() || PairInstances.size())) {
- TBase::Attribute("profile", ProfileToString());
- }
-}
-
-NWilson::TProfileSpan TProfileSpan::BuildChildrenSpan(std::optional<TString> name, const ui8 verbosity) const {
- TTraceId parentTraceId = TBase::GetTraceId();
- const ui8 newVerbosity = verbosity ? verbosity : parentTraceId.GetVerbosity();
- return TProfileSpan(newVerbosity, std::move(parentTraceId), name);
-}
-
-TString TProfileSpan::ProfileToString() const {
- if (!Enabled) {
- return "DISABLED";
- }
- TStringBuilder sb;
- FlushNoGuards();
- {
- NJsonWriter::TBuf sout;
- ResultTimes.InsertValue("-current_guards_count", CurrentJsonPath.size());
- ResultTimes.InsertValue("-duration", (Now() - StartTime).MicroSeconds() * 0.000001);
- sout.WriteJsonValue(&ResultTimes, true, EFloatToStringMode::PREC_POINT_DIGITS, 6);
- sb << sout.Str();
- }
- sb << ";";
- sb << "Pairs:{";
- for (auto&& i : PairInstances) {
- sb << i.first << ":" << i.second.ToString() << ";";
- }
- sb << "}";
- return sb;
-}
-
-void TProfileSpan::FlushNoGuards() const {
- if (!Enabled) {
- return;
- }
- if (CurrentJsonPath.empty()) {
- NJson::TJsonValue* currentNodeOutside;
- if (!ResultTimes.GetValuePointer("--outside_duration", &currentNodeOutside)) {
- currentNodeOutside = &ResultTimes.InsertValue("--outside_duration", 0);
- currentNodeOutside->SetType(NJson::JSON_DOUBLE);
- }
- currentNodeOutside->SetValue(currentNodeOutside->GetDoubleRobust() + (Now() - LastNoGuards).MicroSeconds() * 0.000001);
- LastNoGuards = Now();
- }
-}
-
-NWilson::TProfileSpan::TMinMaxPair TProfileSpan::TMinMaxPair::BuildMin(const TInstant value) {
- TMinMaxPair result;
- result.MinMinInstance = value;
- result.MaxMinInstance = value;
- return result;
-}
-
-NWilson::TProfileSpan::TMinMaxPair TProfileSpan::TMinMaxPair::BuildMax(const TInstant value) {
- TMinMaxPair result;
- result.MaxInstance = value;
- return result;
-}
-
-void TProfileSpan::TMinMaxPair::AddMax(const TInstant instance) {
- if (!MaxInstance) {
- MaxInstance = instance;
- } else {
- MaxInstance = Max(*MaxInstance, instance);
- }
-}
-
-void TProfileSpan::TMinMaxPair::AddMin(const TInstant instance) {
- if (!MinMinInstance) {
- MinMinInstance = instance;
- } else {
- MinMinInstance = Min(*MinMinInstance, instance);
- }
- if (!MaxMinInstance) {
- MaxMinInstance = instance;
- } else {
- MaxMinInstance = Max(*MaxMinInstance, instance);
- }
-}
-
-TString TProfileSpan::TMinMaxPair::ToString() const {
- TStringBuilder sb;
- sb << "[";
- if (MinMinInstance) {
- sb << MinMinInstance->MicroSeconds();
- } else {
- sb << "UNDEFINED";
- }
- sb << "-";
- if (MaxMinInstance) {
- sb << MaxMinInstance->MicroSeconds();
- } else {
- sb << "UNDEFINED";
- }
- sb << ",";
- if (MaxInstance) {
- sb << MaxInstance->MicroSeconds();
- } else {
- sb << "UNDEFINED";
- }
- if (MaxInstance && MinMinInstance) {
- sb << ",";
- sb << *MaxInstance - *MaxMinInstance << "-" << *MaxInstance - *MinMinInstance;
- }
- sb << "]";
- return sb;
-}
-
-TProfileSpan::TGuard::~TGuard() {
- if (!Owner.Enabled) {
- return;
- }
- Y_ABORT_UNLESS(CurrentNodeDuration->IsDouble());
- CurrentNodeDuration->SetValue((Now() - Start).MicroSeconds() * 0.000001 + CurrentNodeDuration->GetDoubleRobust());
- Y_ABORT_UNLESS(Owner.CurrentJsonPath.size());
- Owner.CurrentJsonPath.pop_back();
- if (Owner.CurrentJsonPath.empty()) {
- Owner.LastNoGuards = Now();
- }
-}
-
-TProfileSpan::TGuard::TGuard(const TString& event, TProfileSpan& owner, const TString& /*info*/)
- : Owner(owner) {
- if (!Owner.Enabled) {
- return;
- }
- Owner.FlushNoGuards();
- NJson::TJsonValue* currentNode = Owner.CurrentJsonPath.empty() ? &Owner.ResultTimes : Owner.CurrentJsonPath.back();
- NJson::TJsonValue* currentNodeParent;
- if (!currentNode->GetValuePointer(event, &currentNodeParent)) {
- currentNodeParent = &currentNode->InsertValue(event, NJson::JSON_MAP);
- }
- Owner.CurrentJsonPath.emplace_back(currentNodeParent);
- if (!currentNodeParent->GetValuePointer("--duration", &CurrentNodeDuration)) {
- CurrentNodeDuration = &currentNodeParent->InsertValue("--duration", 0);
- CurrentNodeDuration->SetType(NJson::JSON_DOUBLE);
- }
-}
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/wilson_profile_span.h b/library/cpp/actors/wilson/wilson_profile_span.h
deleted file mode 100644
index f12747e4ac..0000000000
--- a/library/cpp/actors/wilson/wilson_profile_span.h
+++ /dev/null
@@ -1,75 +0,0 @@
-#pragma once
-#include "wilson_span.h"
-#include <library/cpp/json/writer/json_value.h>
-
-namespace NWilson {
-
-class TProfileSpan: public TSpan {
-private:
- using TBase = TSpan;
- class TMinMaxPair {
- private:
- std::optional<TInstant> MinMinInstance;
- std::optional<TInstant> MaxMinInstance;
- std::optional<TInstant> MaxInstance;
- public:
- static TMinMaxPair BuildMin(const TInstant value);
- static TMinMaxPair BuildMax(const TInstant value);
- void AddMax(const TInstant instance);
- void AddMin(const TInstant instance);
- TString ToString() const;
- };
- mutable NJson::TJsonValue ResultTimes = NJson::JSON_MAP;
- std::map<TString, TMinMaxPair> PairInstances;
- std::vector<NJson::TJsonValue*> CurrentJsonPath;
- mutable TInstant LastNoGuards = Now();
- const TInstant StartTime = Now();
- bool Enabled = true;
-
- void FlushNoGuards() const;
- TProfileSpan() = default;
-public:
- TProfileSpan(const ui8 verbosity, TTraceId parentId, std::optional<TString> name);
- ~TProfileSpan();
-
- TProfileSpan BuildChildrenSpan(std::optional<TString> name, const ui8 verbosity = 0) const;
-
- using TBase::TBase;
- TString ProfileToString() const;
-
- TProfileSpan& SetEnabled(const bool value) {
- Enabled = value;
- return *this;
- }
-
- class TGuard {
- private:
- TProfileSpan& Owner;
- const TInstant Start = Now();
- NJson::TJsonValue* CurrentNodeDuration;
- public:
- TGuard(const TString& event, TProfileSpan& owner, const TString& info);
- ~TGuard();
- };
-
- template <class TEventId, class T = TString>
- TGuard StartStackTimeGuard(const TEventId event, const T& info = Default<T>()) {
- return TGuard(::ToString(event), *this, ::ToString(info));
- }
-
- template <class TEventId, class T = TString>
- void AddMin(const TEventId event, const T& info = Default<T>()) {
- AddMin(::ToString(event), ::ToString(info));
- }
-
- template <class TEventId, class T = TString>
- void AddMax(const TEventId event, const T& info = Default<T>()) {
- AddMax(::ToString(event), ::ToString(info));
- }
-
- void AddMin(const TString& eventId, const TString& info);
- void AddMax(const TString& eventId, const TString& info);
-
-};
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/wilson_span.cpp b/library/cpp/actors/wilson/wilson_span.cpp
deleted file mode 100644
index dcd458be7c..0000000000
--- a/library/cpp/actors/wilson/wilson_span.cpp
+++ /dev/null
@@ -1,62 +0,0 @@
-#include "wilson_span.h"
-#include "wilson_uploader.h"
-#include <library/cpp/actors/core/log.h>
-#include <google/protobuf/text_format.h>
-
-namespace NWilson {
-
- using namespace NActors;
-
- void SerializeValue(TAttributeValue value, NCommonProto::AnyValue *pb) {
- switch (value.index()) {
- case 0:
- pb->set_string_value(std::get<0>(std::move(value)));
- break;
-
- case 1:
- pb->set_bool_value(std::get<1>(value));
- break;
-
- case 2:
- pb->set_int_value(std::get<2>(value));
- break;
-
- case 3:
- pb->set_double_value(std::get<3>(std::move(value)));
- break;
-
- case 4: {
- auto *array = pb->mutable_array_value();
- for (auto&& item : std::get<4>(std::move(value))) {
- SerializeValue(std::move(item), array->add_values());
- }
- break;
- }
-
- case 5: {
- auto *kv = pb->mutable_kvlist_value();
- for (auto&& [key, value] : std::get<5>(std::move(value))) {
- SerializeKeyValue(std::move(key), std::move(value), kv->add_values());
- }
- break;
- }
-
- case 6:
- pb->set_bytes_value(std::get<6>(std::move(value)));
- break;
- }
- }
-
- void SerializeKeyValue(TString key, TAttributeValue value, NCommonProto::KeyValue *pb) {
- pb->set_key(std::move(key));
- SerializeValue(std::move(value), pb->mutable_value());
- }
-
- void TSpan::Send() {
- if (TlsActivationContext) {
- TActivationContext::Send(new IEventHandle(MakeWilsonUploaderId(), {}, new TEvWilson(&Data->Span)));
- }
- Data->Sent = true;
- }
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/wilson_span.h b/library/cpp/actors/wilson/wilson_span.h
deleted file mode 100644
index 2ebf837cda..0000000000
--- a/library/cpp/actors/wilson/wilson_span.h
+++ /dev/null
@@ -1,244 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/actorsystem.h>
-#include <opentelemetry/proto/trace/v1/trace.pb.h>
-#include <util/generic/hash.h>
-#include <util/datetime/cputimer.h>
-
-#include "wilson_trace.h"
-
-namespace NWilson {
-
- enum class ERelation {
- FollowsFrom,
- ChildOf,
- };
-
- namespace NTraceProto = opentelemetry::proto::trace::v1;
- namespace NCommonProto = opentelemetry::proto::common::v1;
-
- struct TArrayValue;
- struct TKeyValueList;
- struct TBytes;
-
- using TAttributeValue = std::variant<
- TString,
- bool,
- i64,
- double,
- TArrayValue,
- TKeyValueList,
- TBytes
- >;
-
- struct TArrayValue : std::vector<TAttributeValue> {};
- struct TKeyValueList : THashMap<TString, TAttributeValue> {};
- struct TBytes : TString {};
-
- void SerializeKeyValue(TString key, TAttributeValue value, NCommonProto::KeyValue *pb);
-
- enum class EFlags : ui32 {
- NONE = 0,
- AUTO_END = 1,
- };
-
- Y_DECLARE_FLAGS(TFlags, EFlags);
- Y_DECLARE_OPERATORS_FOR_FLAGS(TFlags);
-
- class TSpan {
- struct TData {
- const TInstant StartTime;
- const ui64 StartCycles;
- const TTraceId TraceId;
- NTraceProto::Span Span;
- TFlags Flags;
- int UncaughtExceptions = std::uncaught_exceptions();
- bool Sent = false;
- bool Ignored = false;
-
- TData(TInstant startTime, ui64 startCycles, TTraceId traceId, TFlags flags)
- : StartTime(startTime)
- , StartCycles(startCycles)
- , TraceId(std::move(traceId))
- , Flags(flags)
- {}
-
- ~TData() {
- Y_DEBUG_ABORT_UNLESS(Sent || Ignored);
- }
- };
-
- std::unique_ptr<TData> Data;
-
- public:
- TSpan() = default;
- TSpan(const TSpan&) = delete;
- TSpan(TSpan&&) = default;
-
- TSpan(ui8 verbosity, TTraceId parentId, std::optional<TString> name, TFlags flags = EFlags::NONE)
- : Data(parentId
- ? std::make_unique<TData>(TInstant::Now(), GetCycleCount(), parentId.Span(verbosity), flags)
- : nullptr)
- {
- if (Y_UNLIKELY(*this)) {
- if (verbosity <= parentId.GetVerbosity()) {
- if (!parentId.IsRoot()) {
- Data->Span.set_parent_span_id(parentId.GetSpanIdPtr(), parentId.GetSpanIdSize());
- }
- Data->Span.set_start_time_unix_nano(Data->StartTime.NanoSeconds());
- Data->Span.set_kind(opentelemetry::proto::trace::v1::Span::SPAN_KIND_INTERNAL);
-
- if (name) {
- Name(std::move(*name));
- }
-
- Attribute("node_id", NActors::TActivationContext::ActorSystem()->NodeId);
- } else {
- Data->Ignored = true; // ignore this span due to verbosity mismatch, still allowing child spans to be created
- }
- }
- }
-
- ~TSpan() {
- if (Y_UNLIKELY(*this)) {
- if (std::uncaught_exceptions() != Data->UncaughtExceptions) {
- EndError("span terminated due to stack unwinding");
- } else if (Data->Flags & EFlags::AUTO_END) {
- End();
- } else {
- EndError("unterminated span");
- }
- }
- }
-
- TSpan& operator =(const TSpan&) = delete;
-
- TSpan& operator =(TSpan&& other) {
- if (this != &other) {
- if (Y_UNLIKELY(*this)) {
- EndError("TSpan instance incorrectly overwritten");
- }
- Data = std::exchange(other.Data, nullptr);
- }
- return *this;
- }
-
- explicit operator bool() const {
- return Data && !Data->Sent && !Data->Ignored;
- }
-
- TSpan& EnableAutoEnd() {
- if (Y_UNLIKELY(*this)) {
- Data->Flags |= EFlags::AUTO_END;
- } else {
- VerifyNotSent();
- }
- return *this;
- }
-
- TSpan& Relation(ERelation /*relation*/) {
- if (Y_UNLIKELY(*this)) {
- // update relation in data somehow
- } else {
- VerifyNotSent();
- }
- return *this;
- }
-
- TSpan& Name(TString name) {
- if (Y_UNLIKELY(*this)) {
- Data->Span.set_name(std::move(name));
- } else {
- VerifyNotSent();
- }
- return *this;
- }
-
- TSpan& Attribute(TString name, TAttributeValue value) {
- if (Y_UNLIKELY(*this)) {
- SerializeKeyValue(std::move(name), std::move(value), Data->Span.add_attributes());
- } else {
- VerifyNotSent();
- }
- return *this;
- }
-
- TSpan& Event(TString name, TKeyValueList attributes) {
- if (Y_UNLIKELY(*this)) {
- auto *event = Data->Span.add_events();
- event->set_time_unix_nano(TimeUnixNano());
- event->set_name(std::move(name));
- for (auto&& [key, value] : attributes) {
- SerializeKeyValue(std::move(key), std::move(value), event->add_attributes());
- }
- } else {
- VerifyNotSent();
- }
- return *this;
- }
-
- TSpan& Link(const TTraceId& traceId, TKeyValueList attributes) {
- if (Y_UNLIKELY(*this)) {
- auto *link = Data->Span.add_links();
- link->set_trace_id(traceId.GetTraceIdPtr(), traceId.GetTraceIdSize());
- link->set_span_id(traceId.GetSpanIdPtr(), traceId.GetSpanIdSize());
- for (auto&& [key, value] : attributes) {
- SerializeKeyValue(std::move(key), std::move(value), link->add_attributes());
- }
- } else {
- VerifyNotSent();
- }
- return *this;
- }
-
- void EndOk() {
- if (Y_UNLIKELY(*this)) {
- auto *status = Data->Span.mutable_status();
- status->set_code(NTraceProto::Status::STATUS_CODE_OK);
- End();
- } else {
- VerifyNotSent();
- }
- }
-
- void EndError(TString error) {
- if (Y_UNLIKELY(*this)) {
- auto *status = Data->Span.mutable_status();
- status->set_code(NTraceProto::Status::STATUS_CODE_ERROR);
- status->set_message(std::move(error));
- End();
- } else {
- VerifyNotSent();
- }
- }
-
- void End() {
- if (Y_UNLIKELY(*this)) {
- Data->Span.set_trace_id(Data->TraceId.GetTraceIdPtr(), Data->TraceId.GetTraceIdSize());
- Data->Span.set_span_id(Data->TraceId.GetSpanIdPtr(), Data->TraceId.GetSpanIdSize());
- Data->Span.set_end_time_unix_nano(TimeUnixNano());
- Send();
- } else {
- VerifyNotSent();
- }
- }
-
- TTraceId GetTraceId() const {
- return Data ? TTraceId(Data->TraceId) : TTraceId();
- }
-
- private:
- void Send();
-
- ui64 TimeUnixNano() const {
- const TInstant now = Data->StartTime + CyclesToDuration(GetCycleCount() - Data->StartCycles);
- return now.NanoSeconds();
- }
-
- void VerifyNotSent() {
- Y_DEBUG_ABORT_UNLESS(!Data || !Data->Sent, "span has been ended");
- }
- };
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/wilson_trace.cpp b/library/cpp/actors/wilson/wilson_trace.cpp
deleted file mode 100644
index 73bed31da3..0000000000
--- a/library/cpp/actors/wilson/wilson_trace.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "wilson_trace.h"
-
-namespace NWilson {
-}
diff --git a/library/cpp/actors/wilson/wilson_trace.h b/library/cpp/actors/wilson/wilson_trace.h
deleted file mode 100644
index 41a6505134..0000000000
--- a/library/cpp/actors/wilson/wilson_trace.h
+++ /dev/null
@@ -1,234 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/monotonic.h>
-#include <library/cpp/actors/protos/actors.pb.h>
-
-#include <library/cpp/string_utils/base64/base64.h>
-
-#include <util/stream/output.h>
-#include <util/random/random.h>
-#include <util/random/fast.h>
-
-#include <util/string/printf.h>
-
-#include <array>
-
-namespace NWilson {
- class TTraceId {
- using TTrace = std::array<ui64, 2>;
-
- TTrace TraceId; // Random id of topmost client request
- ui64 SpanId;
- union {
- struct {
- ui32 Verbosity : 4;
- ui32 TimeToLive : 12;
- };
- ui32 Raw;
- };
-
- private:
- TTraceId(TTrace traceId, ui64 spanId, ui8 verbosity, ui32 timeToLive)
- : TraceId(traceId)
- {
- if (timeToLive == Max<ui32>()) {
- timeToLive = 4095;
- }
- Y_ABORT_UNLESS(verbosity <= 15);
- Y_ABORT_UNLESS(timeToLive <= 4095);
- SpanId = spanId;
- Verbosity = verbosity;
- TimeToLive = timeToLive;
- }
-
- static TTrace GenerateTraceId() {
- for (;;) {
- TTrace res;
- ui32 *p = reinterpret_cast<ui32*>(res.data());
-
- TReallyFastRng32 rng(RandomNumber<ui64>());
- p[0] = rng();
- p[1] = rng();
- p[2] = rng();
- p[3] = rng();
-
- if (res[0] || res[1]) {
- return res;
- }
- }
- }
-
- static ui64 GenerateSpanId() {
- for (;;) {
- if (const ui64 res = RandomNumber<ui64>(); res) { // SpanId can't be zero
- return res;
- }
- }
- }
-
- public:
- using TSerializedTraceId = char[sizeof(TTrace) + sizeof(ui64) + sizeof(ui32)];
-
- public:
- TTraceId(ui64) // NBS stub
- : TTraceId()
- {}
-
- TTraceId() {
- TraceId.fill(0);
- SpanId = 0;
- Raw = 0;
- }
-
- explicit TTraceId(TTrace traceId)
- : TraceId(traceId)
- {
- SpanId = 0;
- Raw = 0;
- }
-
- // allow move semantic
- TTraceId(TTraceId&& other)
- : TraceId(other.TraceId)
- , SpanId(other.SpanId)
- , Raw(other.Raw)
- {
- other.TraceId.fill(0);
- other.SpanId = 1; // make it explicitly invalid
- other.Raw = 0;
- }
-
- // explicit copy
- explicit TTraceId(const TTraceId& other)
- : TraceId(other.TraceId)
- , SpanId(other.SpanId)
- , Raw(other.Raw)
- {
- // validate trace id only when we are making a copy
- other.Validate();
- }
-
- TTraceId(const TSerializedTraceId& in) {
- const char *p = in;
- memcpy(TraceId.data(), p, sizeof(TraceId));
- p += sizeof(TraceId);
- memcpy(&SpanId, p, sizeof(SpanId));
- p += sizeof(SpanId);
- memcpy(&Raw, p, sizeof(Raw));
- p += sizeof(Raw);
- Y_DEBUG_ABORT_UNLESS(p - in == sizeof(TSerializedTraceId));
- }
-
- TTraceId(const NActorsProto::TTraceId& pb)
- : TTraceId()
- {
- if (pb.HasData()) {
- const auto& data = pb.GetData();
- if (data.size() == sizeof(TSerializedTraceId)) {
- *this = *reinterpret_cast<const TSerializedTraceId*>(data.data());
- }
- }
- }
-
- void Serialize(TSerializedTraceId *out) const {
- char *p = *out;
- memcpy(p, TraceId.data(), sizeof(TraceId));
- p += sizeof(TraceId);
- memcpy(p, &SpanId, sizeof(SpanId));
- p += sizeof(SpanId);
- memcpy(p, &Raw, sizeof(Raw));
- p += sizeof(Raw);
- Y_DEBUG_ABORT_UNLESS(p - *out == sizeof(TSerializedTraceId));
- }
-
- void Serialize(NActorsProto::TTraceId *pb) const {
- if (*this) {
- TSerializedTraceId data;
- Serialize(&data);
- pb->SetData(reinterpret_cast<const char*>(&data), sizeof(data));
- }
- }
-
- TTraceId& operator=(TTraceId&& other) {
- if (this != &other) {
- TraceId = other.TraceId;
- SpanId = other.SpanId;
- Raw = other.Raw;
- other.TraceId.fill(0);
- other.SpanId = 1; // make it explicitly invalid
- other.Raw = 0;
- }
- return *this;
- }
-
- // do not allow implicit copy of trace id
- TTraceId& operator=(const TTraceId& other) = delete;
-
- static TTraceId NewTraceId(ui8 verbosity, ui32 timeToLive) {
- return TTraceId(GenerateTraceId(), 0, verbosity, timeToLive);
- }
-
- static TTraceId NewTraceIdThrottled(ui8 verbosity, ui32 timeToLive, std::atomic<NActors::TMonotonic>& counter,
- NActors::TMonotonic now, TDuration periodBetweenSamples) {
- static_assert(std::atomic<NActors::TMonotonic>::is_always_lock_free);
- for (;;) {
- NActors::TMonotonic ts = counter.load();
- if (now < ts) {
- return {};
- } else if (counter.compare_exchange_strong(ts, now + periodBetweenSamples)) {
- return NewTraceId(verbosity, timeToLive);
- }
- }
- }
-
- static TTraceId NewTraceId() { // NBS stub
- return TTraceId();
- }
-
- TTraceId Span(ui8 verbosity) const {
- Validate();
- if (!*this || !TimeToLive) {
- return TTraceId();
- } else if (verbosity <= Verbosity) {
- return TTraceId(TraceId, GenerateSpanId(), Verbosity, TimeToLive - 1);
- } else {
- return TTraceId(TraceId, SpanId, Verbosity, TimeToLive - 1);
- }
- }
-
- TTraceId Span() const { // compatibility stub
- return {};
- }
-
- // Check if request tracing is enabled
- explicit operator bool() const {
- return TraceId[0] || TraceId[1];
- }
-
- bool IsRoot() const {
- return !SpanId;
- }
-
- friend bool operator==(const TTraceId& x, const TTraceId& y) {
- return x.TraceId == y.TraceId && x.SpanId == y.SpanId && x.Raw == y.Raw;
- }
-
- ui8 GetVerbosity() const {
- return Verbosity;
- }
-
- const void *GetTraceIdPtr() const { return TraceId.data(); }
- static constexpr size_t GetTraceIdSize() { return sizeof(TTrace); }
- const void *GetSpanIdPtr() const { return &SpanId; }
- static constexpr size_t GetSpanIdSize() { return sizeof(ui64); }
-
- void Validate() const {
- Y_DEBUG_ABORT_UNLESS(*this || !SpanId);
- }
-
- // for compatibility with NBS
- TTraceId Clone() const { return NWilson::TTraceId(*this); }
- ui64 GetTraceId() const { return 0; }
- void OutputSpanId(IOutputStream&) const {}
- };
-}
diff --git a/library/cpp/actors/wilson/wilson_uploader.cpp b/library/cpp/actors/wilson/wilson_uploader.cpp
deleted file mode 100644
index 3599d66809..0000000000
--- a/library/cpp/actors/wilson/wilson_uploader.cpp
+++ /dev/null
@@ -1,193 +0,0 @@
-#include "wilson_uploader.h"
-#include <library/cpp/actors/core/actor_bootstrapped.h>
-#include <library/cpp/actors/core/hfunc.h>
-#include <library/cpp/actors/core/log.h>
-#include <opentelemetry/proto/collector/trace/v1/trace_service.pb.h>
-#include <opentelemetry/proto/collector/trace/v1/trace_service.grpc.pb.h>
-#include <util/stream/file.h>
-#include <util/string/hex.h>
-#include <grpc++/grpc++.h>
-#include <chrono>
-
-namespace NWilson {
-
- using namespace NActors;
-
- namespace NServiceProto = opentelemetry::proto::collector::trace::v1;
- namespace NTraceProto = opentelemetry::proto::trace::v1;
-
- namespace {
-
- class TWilsonUploader
- : public TActorBootstrapped<TWilsonUploader>
- {
- TString Host;
- ui16 Port;
- TString RootCA;
- TString ServiceName;
-
- std::shared_ptr<grpc::Channel> Channel;
- std::unique_ptr<NServiceProto::TraceService::Stub> Stub;
- grpc::CompletionQueue CQ;
-
- std::unique_ptr<grpc::ClientContext> Context;
- std::unique_ptr<grpc::ClientAsyncResponseReader<NServiceProto::ExportTraceServiceResponse>> Reader;
- NServiceProto::ExportTraceServiceResponse Response;
- grpc::Status Status;
-
- struct TSpanQueueItem {
- TMonotonic ExpirationTimestamp;
- NTraceProto::Span Span;
- ui32 Size;
- };
-
- std::deque<TSpanQueueItem> Spans;
- ui64 SpansSize = 0;
- TMonotonic NextSendTimestamp;
- ui32 MaxSpansAtOnce = 25;
- ui32 MaxSpansPerSecond = 10;
- TDuration MaxSpanTimeInQueue = TDuration::Seconds(60);
-
- bool WakeupScheduled = false;
-
- public:
- TWilsonUploader(TString host, ui16 port, TString rootCA, TString serviceName)
- : Host(std::move(host))
- , Port(std::move(port))
- , RootCA(std::move(rootCA))
- , ServiceName(std::move(serviceName))
- {}
-
- ~TWilsonUploader() {
- CQ.Shutdown();
- }
-
- static constexpr char ActorName[] = "WILSON_UPLOADER_ACTOR";
-
- void Bootstrap() {
- Become(&TThis::StateFunc);
-
- Channel = grpc::CreateChannel(TStringBuilder() << Host << ":" << Port, RootCA ? grpc::SslCredentials({
- .pem_root_certs = TFileInput(RootCA).ReadAll(),
- }) : grpc::InsecureChannelCredentials());
- Stub = NServiceProto::TraceService::NewStub(Channel);
-
- LOG_INFO_S(*TlsActivationContext, 430 /* NKikimrServices::WILSON */, "TWilsonUploader::Bootstrap");
- }
-
- void Handle(TEvWilson::TPtr ev) {
- if (SpansSize >= 100'000'000) {
- LOG_ERROR_S(*TlsActivationContext, 430 /* NKikimrServices::WILSON */, "dropped span due to overflow");
- } else {
- const TMonotonic expirationTimestamp = TActivationContext::Monotonic() + MaxSpanTimeInQueue;
- auto& span = ev->Get()->Span;
- const ui32 size = span.ByteSizeLong();
- Spans.push_back(TSpanQueueItem{expirationTimestamp, std::move(span), size});
- SpansSize += size;
- CheckIfDone();
- TryToSend();
- }
- }
-
- void TryToSend() {
- const TMonotonic now = TActivationContext::Monotonic();
-
- ui32 numSpansDropped = 0;
- while (!Spans.empty()) {
- const TSpanQueueItem& item = Spans.front();
- if (item.ExpirationTimestamp <= now) {
- SpansSize -= item.Size;
- Spans.pop_front();
- ++numSpansDropped;
- } else {
- break;
- }
- }
-
- if (numSpansDropped) {
- LOG_ERROR_S(*TlsActivationContext, 430 /* NKikimrServices::WILSON */,
- "dropped " << numSpansDropped << " span(s) due to expiration");
- }
-
- if (Context || Spans.empty()) {
- return;
- } else if (now < NextSendTimestamp) {
- ScheduleWakeup(NextSendTimestamp);
- return;
- }
-
- NServiceProto::ExportTraceServiceRequest request;
- auto *rspan = request.add_resource_spans();
- auto *serviceNameAttr = rspan->mutable_resource()->add_attributes();
- serviceNameAttr->set_key("service.name");
- serviceNameAttr->mutable_value()->set_string_value(ServiceName);
- auto *sspan = rspan->add_scope_spans();
-
- NextSendTimestamp = now;
- for (ui32 i = 0; i < MaxSpansAtOnce && !Spans.empty(); ++i, Spans.pop_front()) {
- auto& item = Spans.front();
- auto& s = item.Span;
-
- LOG_DEBUG_S(*TlsActivationContext, 430 /* NKikimrServices::WILSON */, "exporting span"
- << " TraceId# " << HexEncode(s.trace_id())
- << " SpanId# " << HexEncode(s.span_id())
- << " ParentSpanId# " << HexEncode(s.parent_span_id())
- << " Name# " << s.name());
-
- SpansSize -= item.Size;
- s.Swap(sspan->add_spans());
- NextSendTimestamp += TDuration::MicroSeconds(1'000'000 / MaxSpansPerSecond);
- }
-
- Context = std::make_unique<grpc::ClientContext>();
- Reader = Stub->AsyncExport(Context.get(), std::move(request), &CQ);
- Reader->Finish(&Response, &Status, nullptr);
- }
-
- void CheckIfDone() {
- if (Context) {
- void *tag;
- bool ok;
- if (CQ.AsyncNext(&tag, &ok, std::chrono::system_clock::now()) == grpc::CompletionQueue::GOT_EVENT) {
- if (!Status.ok()) {
- LOG_ERROR_S(*TlsActivationContext, 430 /* NKikimrServices::WILSON */,
- "failed to commit traces: " << Status.error_message());
- }
-
- Reader.reset();
- Context.reset();
- } else {
- ScheduleWakeup(TDuration::MilliSeconds(100));
- }
- }
- }
-
- template<typename T>
- void ScheduleWakeup(T&& deadline) {
- if (!WakeupScheduled) {
- TActivationContext::Schedule(deadline, new IEventHandle(TEvents::TSystem::Wakeup, 0, SelfId(), {},
- nullptr, 0));
- WakeupScheduled = true;
- }
- }
-
- void HandleWakeup() {
- Y_ABORT_UNLESS(WakeupScheduled);
- WakeupScheduled = false;
- CheckIfDone();
- TryToSend();
- }
-
- STRICT_STFUNC(StateFunc,
- hFunc(TEvWilson, Handle);
- cFunc(TEvents::TSystem::Wakeup, HandleWakeup);
- );
- };
-
- } // anonymous
-
- IActor *CreateWilsonUploader(TString host, ui16 port, TString rootCA, TString serviceName) {
- return new TWilsonUploader(std::move(host), port, std::move(rootCA), std::move(serviceName));
- }
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/wilson_uploader.h b/library/cpp/actors/wilson/wilson_uploader.h
deleted file mode 100644
index 86c23ccefe..0000000000
--- a/library/cpp/actors/wilson/wilson_uploader.h
+++ /dev/null
@@ -1,24 +0,0 @@
-#pragma once
-
-#include <library/cpp/actors/core/actor.h>
-#include <library/cpp/actors/core/event_local.h>
-#include <library/cpp/actors/core/events.h>
-#include <opentelemetry/proto/trace/v1/trace.pb.h>
-
-namespace NWilson {
-
- struct TEvWilson : NActors::TEventLocal<TEvWilson, NActors::TEvents::TSystem::Wilson> {
- opentelemetry::proto::trace::v1::Span Span;
-
- TEvWilson(opentelemetry::proto::trace::v1::Span *span) {
- Span.Swap(span);
- }
- };
-
- inline NActors::TActorId MakeWilsonUploaderId() {
- return NActors::TActorId(0, TStringBuf("WilsonUpload", 12));
- }
-
- NActors::IActor *CreateWilsonUploader(TString host, ui16 port, TString rootCA, TString serviceName);
-
-} // NWilson
diff --git a/library/cpp/actors/wilson/ya.make b/library/cpp/actors/wilson/ya.make
deleted file mode 100644
index 0dc8ba79ad..0000000000
--- a/library/cpp/actors/wilson/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-LIBRARY()
-
-SRCS(
- wilson_event.cpp
- wilson_span.cpp
- wilson_profile_span.cpp
- wilson_trace.cpp
- wilson_uploader.cpp
-)
-
-PEERDIR(
- library/cpp/actors/core
- library/cpp/actors/protos
- library/cpp/actors/wilson/protos
-)
-
-END()
-
-RECURSE(
- protos
-)
diff --git a/library/cpp/deprecated/autoarray/README.md b/library/cpp/deprecated/autoarray/README.md
deleted file mode 100644
index 1d83147cee..0000000000
--- a/library/cpp/deprecated/autoarray/README.md
+++ /dev/null
@@ -1,3 +0,0 @@
-Pre-C++11 vector-like container.
-
-Just use std::vector. If you need to fill your vector with custom-constructed data, use reserve+emplace_back (but make sure that your elements are movable).
diff --git a/library/cpp/deprecated/autoarray/autoarray.cpp b/library/cpp/deprecated/autoarray/autoarray.cpp
deleted file mode 100644
index 15167f27f6..0000000000
--- a/library/cpp/deprecated/autoarray/autoarray.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "autoarray.h"
diff --git a/library/cpp/deprecated/autoarray/autoarray.h b/library/cpp/deprecated/autoarray/autoarray.h
deleted file mode 100644
index 2aa12c5916..0000000000
--- a/library/cpp/deprecated/autoarray/autoarray.h
+++ /dev/null
@@ -1,264 +0,0 @@
-#pragma once
-
-#include <util/system/compat.h>
-#include <util/system/yassert.h>
-#include <util/system/defaults.h>
-#include <util/system/sys_alloc.h>
-
-#include <util/generic/typetraits.h>
-#include <utility>
-
-#include <new>
-#include <util/generic/noncopyable.h>
-
-struct autoarray_getindex {
- autoarray_getindex() = default;
-};
-
-struct aarr_b0 {
- aarr_b0() = default;
-};
-
-struct aarr_nofill {
- aarr_nofill() = default;
-};
-
-template <typename T>
-struct ynd_type_traits {
- enum {
- empty_destructor = TTypeTraits<T>::IsPod,
- };
-};
-
-template <class T>
-class autoarray : TNonCopyable {
-protected:
- T* arr;
- size_t _size;
-
-private:
- void AllocBuf(size_t siz) {
- arr = nullptr;
- _size = 0;
- if (siz) {
- arr = (T*)y_allocate(sizeof(T) * siz);
- _size = siz;
- }
- }
-
-public:
- using value_type = T;
- using iterator = T*;
- using const_iterator = const T*;
-
- autoarray()
- : arr(nullptr)
- , _size(0)
- {
- }
- autoarray(size_t siz) {
- AllocBuf(siz);
- T* curr = arr;
- try {
- for (T* end = arr + _size; curr != end; ++curr)
- new (curr) T();
- } catch (...) {
- for (--curr; curr >= arr; --curr)
- curr->~T();
- y_deallocate(arr);
- throw;
- }
- }
- template <class A>
- explicit autoarray(size_t siz, A& fill) {
- AllocBuf(siz);
- T* curr = arr;
- try {
- for (T* end = arr + _size; curr != end; ++curr)
- new (curr) T(fill);
- } catch (...) {
- for (--curr; curr >= arr; --curr)
- curr->~T();
- y_deallocate(arr);
- throw;
- }
- }
- explicit autoarray(size_t siz, autoarray_getindex) {
- AllocBuf(siz);
- size_t nCurrent = 0;
- try {
- for (nCurrent = 0; nCurrent < _size; ++nCurrent)
- new (&arr[nCurrent]) T(nCurrent);
- } catch (...) {
- for (size_t n = 0; n < nCurrent; ++n)
- arr[n].~T();
- y_deallocate(arr);
- throw;
- }
- }
- explicit autoarray(size_t siz, aarr_b0) {
- AllocBuf(siz);
- memset(arr, 0, _size * sizeof(T));
- }
- explicit autoarray(size_t siz, aarr_nofill) {
- AllocBuf(siz);
- }
- template <class A>
- explicit autoarray(const A* fill, size_t siz) {
- AllocBuf(siz);
- size_t nCurrent = 0;
- try {
- for (nCurrent = 0; nCurrent < _size; ++nCurrent)
- new (&arr[nCurrent]) T(fill[nCurrent]);
- } catch (...) {
- for (size_t n = 0; n < nCurrent; ++n)
- arr[n].~T();
- y_deallocate(arr);
- throw;
- }
- }
- template <class A, class B>
- explicit autoarray(const A* fill, const B* cfill, size_t siz) {
- AllocBuf(siz);
- size_t nCurrent = 0;
- try {
- for (nCurrent = 0; nCurrent < _size; ++nCurrent)
- new (&arr[nCurrent]) T(fill[nCurrent], cfill);
- } catch (...) {
- for (size_t n = 0; n < nCurrent; ++n)
- arr[n].~T();
- y_deallocate(arr);
- throw;
- }
- }
- template <class A>
- explicit autoarray(const A* fill, size_t initsiz, size_t fullsiz) {
- AllocBuf(fullsiz);
- size_t nCurrent = 0;
- try {
- for (nCurrent = 0; nCurrent < ((initsiz < _size) ? initsiz : _size); ++nCurrent)
- new (&arr[nCurrent]) T(fill[nCurrent]);
- for (; nCurrent < _size; ++nCurrent)
- new (&arr[nCurrent]) T();
- } catch (...) {
- for (size_t n = 0; n < nCurrent; ++n)
- arr[n].~T();
- y_deallocate(arr);
- throw;
- }
- }
- template <class A>
- explicit autoarray(const A* fill, size_t initsiz, size_t fullsiz, const T& dummy) {
- AllocBuf(fullsiz);
- size_t nCurrent = 0;
- try {
- for (nCurrent = 0; nCurrent < ((initsiz < _size) ? initsiz : _size); ++nCurrent)
- new (&arr[nCurrent]) T(fill[nCurrent]);
- for (; nCurrent < _size; ++nCurrent)
- new (&arr[nCurrent]) T(dummy);
- } catch (...) {
- for (size_t n = 0; n < nCurrent; ++n)
- arr[n].~T();
- y_deallocate(arr);
- throw;
- }
- }
-
- template <class... R>
- explicit autoarray(size_t siz, R&&... fill) {
- AllocBuf(siz);
- T* curr = arr;
- try {
- for (T* end = arr + _size; curr != end; ++curr)
- new (curr) T(std::forward<R>(fill)...);
- } catch (...) {
- for (--curr; curr >= arr; --curr)
- curr->~T();
- y_deallocate(arr);
- throw;
- }
- }
- ~autoarray() {
- if (_size) {
- if (!ynd_type_traits<T>::empty_destructor)
- for (T *curr = arr, *end = arr + _size; curr != end; ++curr)
- curr->~T();
- y_deallocate(arr);
- }
- }
- T& operator[](size_t pos) {
- Y_ASSERT(pos < _size);
- return arr[pos];
- }
- const T& operator[](size_t pos) const {
- Y_ASSERT(pos < _size);
- return arr[pos];
- }
- size_t size() const {
- return _size;
- }
- void swap(autoarray& with) {
- T* tmp_arr = arr;
- size_t tmp_size = _size;
- arr = with.arr;
- _size = with._size;
- with.arr = tmp_arr;
- with._size = tmp_size;
- }
- void resize(size_t siz) {
- autoarray<T> tmp(arr, _size, siz);
- swap(tmp);
- }
- void resize(size_t siz, const T& dummy) {
- autoarray<T> tmp(arr, _size, siz, dummy);
- swap(tmp);
- }
- T* rawpointer() {
- return arr;
- }
- const T* operator~() const {
- return arr;
- }
- T* begin() {
- return arr;
- }
- T* end() {
- return arr + _size;
- }
- T& back() {
- Y_ASSERT(_size);
- return arr[_size - 1];
- }
- bool empty() const {
- return !_size;
- }
- bool operator!() const {
- return !_size;
- }
- size_t operator+() const {
- return _size;
- }
- const T* begin() const {
- return arr;
- }
- const T* end() const {
- return arr + _size;
- }
- const T& back() const {
- Y_ASSERT(_size);
- return arr[_size - 1];
- }
- //operator T*() { return arr; }
-};
-
-template <class T>
-inline bool operator==(const autoarray<T>& a, const autoarray<T>& b) {
- size_t count = a.size();
- if (count != b.size())
- return false;
- for (size_t i = 0; i < count; ++i) {
- if (a[i] != b[i])
- return false;
- }
- return true;
-}
diff --git a/library/cpp/deprecated/autoarray/ya.make b/library/cpp/deprecated/autoarray/ya.make
deleted file mode 100644
index 4b055f8c29..0000000000
--- a/library/cpp/deprecated/autoarray/ya.make
+++ /dev/null
@@ -1,7 +0,0 @@
-LIBRARY()
-
-SRCS(
- autoarray.cpp
-)
-
-END()
diff --git a/library/cpp/deprecated/fgood/README.md b/library/cpp/deprecated/fgood/README.md
deleted file mode 100644
index 4f66289657..0000000000
--- a/library/cpp/deprecated/fgood/README.md
+++ /dev/null
@@ -1,15 +0,0 @@
-Some ancient wrappers on top of FILE*, and some string manupulation functions.
-
-Alternatives are as follows.
-
-For TFILEPtr. Use TIFStream or TOFStream if you need IO. For some rare use cases a TFileMap might also do.
-
-For fput/fget/getline. Use streams API.
-
-For struct ffb and struct prnstr. Just don't use them. Even if you can figure out what they do.
-
-For sf family of functions and TLineSplitter. Just use Split* from util/string/split.h
-
-For TSFReader. Use TMapTsvFile.
-
-For read_or_die family of functions. Use streams API.
diff --git a/library/cpp/deprecated/fgood/ffb.cpp b/library/cpp/deprecated/fgood/ffb.cpp
deleted file mode 100644
index aa9da861a6..0000000000
--- a/library/cpp/deprecated/fgood/ffb.cpp
+++ /dev/null
@@ -1,407 +0,0 @@
-#include "ffb.h"
-
-#include <util/string/util.h> // str_spn
-#include <util/system/compat.h>
-#include <util/generic/yexception.h>
-
-#include <cstdio>
-#include <algorithm>
-
-#include <ctype.h>
-
-#ifdef _win_
-#include <io.h>
-#else
-#include <unistd.h>
-#endif
-
-ffb::ffb(FILE* file)
- : TFILEPtr(file)
-{
- if (file && !isatty(fileno(file)) && BUFSIZ < 512 * 1024)
- setvbuf(file, nullptr, _IOFBF, 512 * 1024);
-}
-
-void ffb::operator=(FILE* f) {
- TFILEPtr::operator=(f);
- if (f && !isatty(fileno(f)) && BUFSIZ < 512 * 1024)
- setvbuf(f, nullptr, _IOFBF, 512 * 1024);
-}
-
-void ffb::open(const char* name, const char* mode) {
- TFILEPtr::open(name, mode);
- if (!isatty(fileno(*this)) && BUFSIZ < 512 * 1024)
- setvbuf(*this, nullptr, _IOFBF, 512 * 1024);
-}
-
-int sf(char** fb, char* buf) { //don't want to call sf(fb, buf, 32)
- if (!(*buf && *buf != 10)) {
- *fb = nullptr;
- return 0;
- }
- int n = 1;
- fb[0] = buf;
- while (*buf && *buf != 10 && n < 31) {
- if (*buf == '\t') {
- *buf++ = 0;
- fb[n++] = buf;
- continue;
- }
- buf++;
- }
- if (*buf == 10 && buf[-1] == 13)
- buf[-1] = 0;
- *buf = 0;
- fb[n] = nullptr;
- return n;
-}
-
-int sf(char** fb, char* buf, size_t fb_sz) {
- if (!(*buf && *buf != 10)) {
- *fb = nullptr;
- return 0;
- }
- fb_sz--;
- int n = 1;
- fb[0] = buf;
- while (*buf && *buf != 10 && n < (int)fb_sz) {
- if (*buf == '\t') {
- *buf++ = 0;
- fb[n++] = buf;
- continue;
- }
- buf++;
- }
- if (*buf == 10 && buf[-1] == 13)
- buf[-1] = 0;
- *buf = 0;
- fb[n] = nullptr;
- return n;
-}
-
-inline int sf_blank(char** fb, char* buf, size_t fb_sz) {
- while (isspace((ui8)*buf))
- buf++;
- if (!*buf) {
- *fb = nullptr;
- return 0;
- }
- fb_sz--;
- int n = 1;
- fb[0] = buf;
- while (*buf && *buf != 10 && n < (int)fb_sz) {
- if (isspace((ui8)*buf)) {
- *buf++ = 0;
- while (isspace((ui8)*buf))
- buf++;
- if (*buf)
- fb[n++] = buf;
- continue;
- }
- buf++;
- }
- if (*buf == 10 && buf[-1] == 13)
- buf[-1] = 0;
- *buf = 0;
- fb[n] = nullptr;
- return n;
-}
-
-int sf(char fs, char** fb, char* buf, size_t fb_sz) {
- if (fs == ' ')
- return sf_blank(fb, buf, fb_sz);
- while (*buf == fs)
- buf++;
- if (!(*buf && *buf != 10)) {
- *fb = nullptr;
- return 0;
- }
- fb_sz--;
- int n = 1;
- fb[0] = buf;
- while (*buf && *buf != 10 && n < (int)fb_sz) {
- if (*buf == fs) {
- *buf++ = 0;
- while (*buf == fs)
- buf++;
- fb[n++] = buf;
- continue;
- }
- buf++;
- }
- if (*buf == 10 && buf[-1] == 13)
- buf[-1] = 0;
- *buf = 0;
- fb[n] = nullptr;
- return n;
-}
-
-int sf(const char* fs, char** fb, char* buf, size_t fb_sz) {
- if (!(*buf && *buf != 10)) {
- *fb = nullptr;
- return 0;
- }
- int fs_len = strlen(fs);
- fb_sz--;
- int n = 1;
- fb[0] = buf;
- while (*buf && *buf != 10 && n < (int)fb_sz) {
- if (*buf == *fs && !strncmp(buf + 1, fs + 1, fs_len - 1)) {
- *buf = 0;
- buf += fs_len;
- fb[n++] = buf;
- continue;
- }
- buf++;
- }
- if (*buf == 10 && buf[-1] == 13)
- buf[-1] = 0;
- *buf = 0;
- fb[n] = nullptr;
- return n;
-}
-
-inline bool is_end(const char* p) {
- return !p || !p[0];
-}
-
-int sf(const char* seps, char* buf, char** fb, size_t fb_sz) {
- if (fb_sz < 1 || is_end(buf)) {
- *fb = nullptr;
- return 0;
- }
- str_spn sseps(seps);
- fb[0] = nullptr;
- int n = 0;
- // skip leading delimeters
- buf = sseps.cbrk(buf);
- if (is_end(buf))
- return 0;
- // store fields
- while (n < (int)fb_sz) {
- fb[n++] = buf;
- // find delimeters
- buf = sseps.brk(buf + 1);
- if (is_end(buf))
- break;
- *buf = 0;
- // skip delimiters
- buf = sseps.cbrk(buf + 1);
- if (is_end(buf))
- break;
- }
- fb[n] = nullptr;
- return n;
-}
-
-void TLineSplitter::operator()(char* p, TVector<char*>& fields) const {
- if (!p || !*p)
- return;
- char* q = p;
- while (1) {
- p = Sep.brk(p);
- if (q && (p - q || !SkipEmpty()))
- fields.push_back(q);
- q = nullptr;
- if (!*p)
- break;
- if (SepStrLen == 1 || (SepStrLen > 1 && !strncmp(p + 1, SepStr + 1, SepStrLen - 1))) {
- *p = 0;
- p += SepStrLen;
- q = p;
- } else
- p++;
- }
-}
-
-void TLineSplitter::operator()(const char* p, TVector<std::pair<const char*, size_t>>& fields) const {
- if (!p || !*p)
- return;
- const char* q = p;
- while (1) {
- p = Sep.brk(p);
- if (q && (p - q || !SkipEmpty()))
- fields.push_back(std::make_pair(q, p - q));
- q = nullptr;
- if (!*p)
- break;
- if (SepStrLen == 1 || (SepStrLen > 1 && !strncmp(p + 1, SepStr + 1, SepStrLen - 1))) {
- p += SepStrLen;
- q = p;
- } else
- p++;
- }
-}
-
-TSFReader::TSFReader(const char* fname, char sep, i32 nfrq) // if sep == ' ' isspace will be imitated (for compat)
- : Split(str_spn(sep == ' ' ? "\t\n\v\f\r " : TString(1, sep).data()), sep == ' ')
- , OpenPipe(false)
-{
- Open(fname, nfrq);
-}
-
-TSFReader::TSFReader(const char* fname, const char* sep, i32 nfrq)
- : Split(sep, false)
- , OpenPipe(false)
-{
- Open(fname, nfrq);
-}
-
-TSFReader::TSFReader(const char* fname, const TLineSplitter& spl, i32 nfrq)
- : Split(spl)
- , OpenPipe(false)
-{
- Open(fname, nfrq);
-}
-
-void TSFReader::Open(const char* fname, i32 nfrq, size_t vbuf_size) {
- FieldsRequired = nfrq;
- NF = NR = 0;
-
- if (IsOpen())
- File.close();
-
- if (!fname)
- return;
-
- if (!strcmp(fname, "/dev/stdin")) {
- File.assign(stdin, "/dev/stdin");
- } else {
- if (OpenPipe)
- File.popen(fname, "r");
- else
- File.open(fname, "r");
- }
- OpenPipe = false;
- if (!isatty(fileno(File)))
- setvbuf(File, nullptr, _IOFBF, vbuf_size);
-}
-
-void TSFReader::Popen(const char* pname, i32 nfrq, size_t vbuf_size) {
- OpenPipe = true;
- Open(pname, nfrq, vbuf_size);
-}
-
-bool TSFReader::NextLine(segmented_string_pool* pool) {
- size_t line_len = 0;
-
-#ifdef __FreeBSD__
- char* ptr = fgetln(File, &line_len);
- if (!ptr)
- return false;
- if (!line_len || ptr[line_len - 1] != '\n') { // last line w/o newline
- Buf.AssignNoAlias(ptr, line_len);
- ptr = Buf.begin();
- } else {
- // can safely replace newline with \0
- ptr[line_len - 1] = 0;
- --line_len;
- }
-#else
- if (!getline(File, Buf))
- return false;
- char* ptr = Buf.begin();
- line_len = Buf.size();
-#endif
- if (line_len && ptr[line_len - 1] == '\r')
- ptr[line_len - 1] = 0;
-
- if (pool) {
- char* nptr = pool->append(ptr);
- Y_ASSERT(!strcmp(ptr, nptr));
- ptr = nptr;
- }
-
- ++NR;
- Fields.clear();
- Split(ptr, Fields);
- NF = Fields.size();
-
- if (FieldsRequired != -1 && FieldsRequired != (int)NF)
- ythrow yexception() << File.name() << " line " << NR << ": " << NF << " fields, expected " << FieldsRequired;
-
- return true;
-}
-
-int prnstr::f(const char* c, ...) {
- va_list params;
- int n = asize - pos, k;
- va_start(params, c);
- while ((k = vsnprintf(buf + pos, n, c, params)) >= n) {
- n += asize, asize *= 2;
- while (k + pos >= n)
- n += asize, asize *= 2;
- char* t = new char[asize];
- memcpy(t, buf, pos);
- delete[] buf;
- buf = t;
- va_end(params);
- va_start(params, c);
- }
- pos += k;
- va_end(params);
- return k;
-}
-int prnstr::s(const char* c, size_t k) {
- if (!c)
- return 0;
- size_t n = asize - pos;
- if (k >= n) {
- n += asize, asize *= 2;
- while (k + pos >= n)
- n += asize, asize *= 2;
- char* t = new char[asize];
- memcpy(t, buf, pos);
- delete[] buf;
- buf = t;
- }
- memcpy(buf + pos, c, k);
- pos += k;
- buf[pos] = 0;
- return k;
-}
-void prnstr::clear() {
- pos = 0;
- if (asize > 32768) {
- asize = 32768;
- delete[] buf;
- buf = new char[asize];
- }
-}
-
-void prnstr::swap(prnstr& w) {
- std::swap(buf, w.buf);
- std::swap(pos, w.pos);
- std::swap(asize, w.asize);
-}
-
-FILE* read_or_die(const char* fname) {
- FILE* f = fopen(fname, "rb");
- if (!f)
- err(1, "%s", fname);
- return f;
-}
-FILE* write_or_die(const char* fname) {
- FILE* f = fopen(fname, "wb");
- if (!f)
- err(1, "%s", fname);
- return f;
-}
-FILE* fopen_or_die(const char* fname, const char* mode) {
- FILE* f = fopen(fname, mode);
- if (!f)
- err(1, "%s (mode '%s')", fname, mode);
- return f;
-}
-
-FILE* fopen_chk(const char* fname, const char* mode) {
- FILE* f = fopen(fname, mode);
- if (!f)
- ythrow yexception() << fname << " (mode '" << mode << "'): " << LastSystemErrorText();
- return f;
-}
-
-void fclose_chk(FILE* f, const char* fname) {
- if (fclose(f))
- ythrow yexception() << "file " << fname << ": " << LastSystemErrorText();
-}
diff --git a/library/cpp/deprecated/fgood/ffb.h b/library/cpp/deprecated/fgood/ffb.h
deleted file mode 100644
index ca229eb65a..0000000000
--- a/library/cpp/deprecated/fgood/ffb.h
+++ /dev/null
@@ -1,264 +0,0 @@
-#pragma once
-
-#include "fgood.h"
-
-#include <util/string/util.h> // str_spn
-#include <util/string/split.h> // str_spn
-#include <util/memory/segmented_string_pool.h>
-#include <util/generic/string.h>
-#include <util/generic/vector.h>
-#include <util/generic/noncopyable.h>
-
-#include <utility>
-
-#include <cstdarg>
-#include <cstring>
-
-struct ffb: public TFILEPtr {
- ffb() {
- }
- ffb(FILE* file);
- ffb(const char* name, const char* mode) {
- open(name, mode);
- }
- void operator=(FILE* f); // take ownership
- void open(const char* name, const char* mode);
- int f(const char* c, ...) {
- va_list args;
- va_start(args, c);
- return vfprintf(*this, c, args);
- }
- void s(const char* c) {
- fsput(c, strlen(c));
- }
- void b(const void* cc, int n) {
- fsput((const char*)cc, n);
- }
- void B(const void* cc, int N) {
- fsput((const char*)cc, N);
- }
- void c(char c) {
- fputc(c);
- }
- void cbe(wchar16 c) { // big endian utf-16
- fputc(char(c >> 8)); //Hi8
- fputc(char(c & 255)); //Lo8
- }
- void sbe(const wchar16* c) {
- for (; *c; c++)
- cbe(*c);
- }
- void fclose() {
- close();
- }
-};
-
-// split fields of tab-delimited line of text
-// here and below fb actual size must be fb_sz + 1 to allow fb[fb_sz] be zero
-int sf(char** fb, char* buf, size_t fb_sz);
-int sf(char** fb, char* buf /* fb_sz == 32 */);
-
-// split fields of char-delimited line of text
-// Achtung: delim = ' ' imitates awk: initial separators are skipped,
-// repeated seps treated as one, all chars less than ' ' treated as separators.
-int sf(char fs, char** fb, char* buf, size_t fb_sz = 32);
-
-// split fields of string-delimited line of text (fs is NOT a regexp)
-// (usually fs is "@@")
-int sf(const char* fs, char** fb, char* buf, size_t fb_sz = 32);
-
-// split fields of char-delimited line of text, set of char-separators is given
-// Achtung: repeated seps treated as one, initial seps are skipped
-// newlines are NOT ignored.
-int sf(const char* seps, char* buf, char** fb, size_t fb_sz = 32);
-
-inline char* chomp(char* buf) {
- char* c = buf + strlen(buf);
- if (c > buf && c[-1] == '\n') {
- *--c = 0;
-#ifdef _win32_
- if (c > buf && c[-1] == '\r')
- *--c = 0;
-#endif
- }
- return buf;
-}
-
-inline char* chomp_cr(char* buf) {
- char* c = buf + strlen(buf);
- if (c > buf && c[-1] == '\n')
- *--c = 0;
- if (c > buf && c[-1] == '\r')
- *--c = 0;
- return buf;
-}
-
-class TLineSplitter {
-protected:
- enum { // Default: Split string by SepStr
- SplitByAnySep = 1, // Split string by Sep
- NoEmptyFields = 2 // Skip all empty fields between separators
- };
-
-private:
- ui32 Flags;
- const str_spn Sep; // collection of separators
- const char* SepStr; // pointer exact string to separate by
- size_t SepStrLen; // length of separator string
-
-public:
- TLineSplitter(const char* sep, bool noEmpty)
- : Flags(noEmpty ? NoEmptyFields : 0)
- , Sep(TString(sep, 1).data())
- , SepStr(sep)
- , SepStrLen(strlen(sep))
- {
- }
- TLineSplitter(const str_spn& sep, bool noEmpty = false)
- : Flags(SplitByAnySep | (noEmpty ? NoEmptyFields : 0))
- , Sep(sep)
- , SepStr(nullptr)
- , SepStrLen(1)
- {
- }
- bool AnySep() const {
- return Flags & SplitByAnySep;
- }
- bool SkipEmpty() const {
- return Flags & NoEmptyFields;
- }
- /// Separates string onto tokens
- /// Expecting a zero-terminated string
- /// By default returns empty fields between sequential separators
- void operator()(char* p, TVector<char*>& fields) const;
- /// Same, but for const string - fills vector of pairs (pointer, length)
- void operator()(const char* p, TVector<std::pair<const char*, size_t>>& fields) const;
-};
-
-/**
- * Use library/cpp/map_text_file/map_tsv_file.h instead.
- */
-class TSFReader {
- TString Buf; // buffer used for non-'\n'-terminated string and for non-freebsd work
- TLineSplitter Split;
- TVector<char*> Fields;
- size_t NF; // Fields.size()
- size_t NR;
-
- TFILEPtr File;
-
- bool OpenPipe; // internal flag that turns open() to popen()
-
- i32 FieldsRequired; // if != -1, != nf, terminate program
-
-public:
- // char separator
- // Achtung: delim = ' ' imitates awk: initial separators are skipped,
- // all chars less than ' ' treated as separators.
- TSFReader(const char* fname = nullptr, char sep = '\t', i32 nf_reqired = -1);
- // exact string separator
- TSFReader(const char* fname, const char* sep, i32 nf_reqired = -1);
- // fully customizable
- TSFReader(const char* fname, const TLineSplitter& spl, i32 nf_reqired = -1);
-
- void Open(const char* fname, i32 nf_reqired = -1, size_t vbufsize = 1u << 21); // use "/dev/stdin" for stdin
- void Popen(const char* pname, i32 nf_reqired = -1, size_t vbufsize = 1u << 21);
-
- bool NextLine(segmented_string_pool* pool = nullptr);
-
- bool IsOpen() const {
- return (FILE*)File != nullptr;
- }
- bool IsEof() const {
- return feof(File);
- }
- void Close() {
- File.close();
- }
- void Rewind() {
- File.seek(0, SEEK_SET);
- }
- void Seek(i64 offset, int mode = SEEK_SET) {
- File.seek(offset, mode);
- }
- i64 Tell() const {
- return ftell(File);
- }
- char*& operator[](size_t ind) {
- //if (ind >= NF)
- // throw yexception("Can't return reference to unexisting field %" PRISZT, ind);
- return Fields[ind];
- }
- const char* operator[](size_t ind) const {
- if (ind >= NF)
- return nullptr;
- return Fields[ind];
- }
- operator int() const { // note: empty input line makes 0 fields
- return (int)NF;
- }
- const char* Name() const {
- return File.name().data();
- }
- size_t Line() const {
- return NR;
- }
- const TVector<char*>& GetFields() const {
- return Fields;
- }
-};
-
-struct prnstr {
- char* buf;
- int pos;
- int asize;
- prnstr()
- : pos(0)
- {
- asize = 32;
- buf = new char[asize];
- }
- explicit prnstr(int asz)
- : pos(0)
- {
- asize = asz;
- buf = new char[asize];
- }
- int f(const char* c, ...);
- int s(const char* c1, const char* c2);
- int s(const char* c1, const char* c2, const char* c3);
- int s(const char* c, size_t len);
- //int s(const char *c);
- int s(const char* c) {
- return c ? s(c, strlen(c)) : 0;
- }
- int s(const TString& c);
- int s_htmesc(const char* c, bool enc_utf = false);
- int s_htmesc_w(const char* c);
- int c(char c);
- int cu(wchar32 c); //for utf-8
- void restart() {
- *buf = 0;
- pos = 0;
- }
- const char* operator~() const {
- return buf;
- }
- int operator+() const {
- return pos;
- }
- ~prnstr() {
- delete[] buf;
- }
- void clear();
- void swap(prnstr& w);
-};
-
-// functions that terminate program upon failure
-FILE* read_or_die(const char* fname);
-FILE* write_or_die(const char* fname);
-FILE* fopen_or_die(const char* fname, const char* mode);
-
-// functions that throw upon failure
-FILE* fopen_chk(const char* fname, const char* mode);
-void fclose_chk(FILE* f, const char* fname_dbg);
diff --git a/library/cpp/deprecated/fgood/fgood.cpp b/library/cpp/deprecated/fgood/fgood.cpp
deleted file mode 100644
index 5d4725bfae..0000000000
--- a/library/cpp/deprecated/fgood/fgood.cpp
+++ /dev/null
@@ -1,70 +0,0 @@
-#include "fgood.h"
-
-#include <util/generic/cast.h>
-#include <util/string/cast.h>
-#include <util/system/fstat.h>
-
-#ifdef _win32_
-#include <io.h>
-#endif
-
-i64 TFILEPtr::length() const {
-#ifdef _win32_
- FHANDLE fd = (FHANDLE)_get_osfhandle(fileno(m_file));
-#else
- FHANDLE fd = fileno(m_file);
-#endif
- i64 rv = GetFileLength(fd);
- if (rv < 0)
- ythrow yexception() << "TFILEPtr::length() " << Name.data() << ": " << LastSystemErrorText();
- return rv;
-}
-
-FILE* OpenFILEOrFail(const TString& name, const char* mode) {
- FILE* res = ::fopen(name.data(), mode);
- if (!res) {
- ythrow yexception() << "can't open \'" << name << "\' with mode \'" << mode << "\': " << LastSystemErrorText();
- }
- return res;
-}
-
-void TFILECloser::Destroy(FILE* file) {
- ::fclose(file);
-}
-
-#ifdef _freebsd_ // fgetln
-#define getline getline_alt_4test
-#endif // _freebsd_
-
-bool getline(TFILEPtr& f, TString& s) {
- char buf[4096];
- char* buf_ptr;
- if (s.capacity() > sizeof(buf)) {
- s.resize(s.capacity());
- if ((buf_ptr = fgets(s.begin(), IntegerCast<int>(s.capacity()), f)) == nullptr)
- return false;
- } else {
- if ((buf_ptr = fgets(buf, sizeof(buf), f)) == nullptr)
- return false;
- }
- size_t buf_len = strlen(buf_ptr);
- bool line_complete = buf_len && buf_ptr[buf_len - 1] == '\n';
- if (line_complete)
- buf_len--;
- if (buf_ptr == s.begin())
- s.resize(buf_len);
- else
- s.AssignNoAlias(buf, buf_len);
- if (line_complete)
- return true;
- while (fgets(buf, sizeof(buf), f)) {
- size_t buf_len2 = strlen(buf);
- if (buf_len2 && buf[buf_len2 - 1] == '\n') {
- buf[buf_len2 - 1] = 0;
- s.append(buf, buf_len2 - 1);
- return true;
- }
- s.append(buf, buf_len2);
- }
- return true;
-}
diff --git a/library/cpp/deprecated/fgood/fgood.h b/library/cpp/deprecated/fgood/fgood.h
deleted file mode 100644
index 0aaf910c0f..0000000000
--- a/library/cpp/deprecated/fgood/fgood.h
+++ /dev/null
@@ -1,328 +0,0 @@
-#pragma once
-
-#include <util/system/yassert.h>
-#include <util/system/defaults.h>
-#include <util/generic/string.h>
-#include <util/generic/yexception.h>
-#include <util/generic/ptr.h>
-
-#include "fput.h"
-
-#include <cstdio>
-
-#include <fcntl.h>
-
-#ifdef _unix_
-extern "C" int __ungetc(int, FILE*);
-#endif
-
-#if (!defined(__FreeBSD__) && !defined(__linux__) && !defined(_darwin_) && !defined(_cygwin_)) || defined(_bionic_)
-#define feof_unlocked(_stream) feof(_stream)
-#define ferror_unlocked(_stream) ferror(_stream)
-#endif
-
-#ifndef _unix_
-#if defined(_MSC_VER) && (_MSC_VER < 1900)
-#define getc_unlocked(_stream) (--(_stream)->_cnt >= 0 ? 0xff & *(_stream)->_ptr++ : _filbuf(_stream))
-#define putc_unlocked(_c, _stream) (--(_stream)->_cnt >= 0 ? 0xff & (*(_stream)->_ptr++ = (char)(_c)) : _flsbuf((_c), (_stream)))
-#else
-#define getc_unlocked(_stream) getc(_stream)
-#define putc_unlocked(_c, _stream) putc(_c, _stream)
-#endif
-#endif
-
-inline bool fgood(FILE* f) {
- return !feof_unlocked(f) && !ferror_unlocked(f);
-}
-
-#ifdef _win32_
-// These functions will work only with static MSVC runtime linkage. For dynamic linkage,
-// fseeki64.c and ftelli64.c from CRT sources should be included in project
-extern "C" int __cdecl _fseeki64(FILE*, __int64, int);
-extern "C" __int64 __cdecl _ftelli64(FILE*);
-
-inline i64 ftello(FILE* stream) {
- return _ftelli64(stream);
-}
-
-inline int fseeko(FILE* stream, i64 offset, int origin) {
- return _fseeki64(stream, offset, origin);
-}
-#endif
-
-class TFILEPtr {
-private:
- enum { SHOULD_CLOSE = 1,
- IS_PIPE = 2 };
- FILE* m_file;
- int m_Flags;
- TString Name;
-
-public:
- TFILEPtr() noexcept {
- m_file = nullptr;
- m_Flags = 0;
- }
- TFILEPtr(const TString& name, const char* mode) {
- m_file = nullptr;
- m_Flags = 0;
- open(name, mode);
- }
- TFILEPtr(const TFILEPtr& src) noexcept {
- m_file = src.m_file;
- m_Flags = 0;
- }
- TFILEPtr& operator=(const TFILEPtr& src) {
- if (src.m_file != m_file) {
- close();
- m_file = src.m_file;
- m_Flags = 0;
- }
- return *this;
- }
- explicit TFILEPtr(FILE* f) noexcept { // take ownership
- m_file = f;
- m_Flags = SHOULD_CLOSE;
- }
- TFILEPtr& operator=(FILE* f) { // take ownership
- if (f != m_file) {
- close();
- m_file = f;
- m_Flags = SHOULD_CLOSE;
- }
- return *this;
- }
- const TString& name() const {
- return Name;
- }
- operator FILE*() const noexcept {
- return m_file;
- }
- FILE* operator->() const noexcept {
- return m_file;
- }
- bool operator!() const noexcept {
- return m_file == nullptr;
- }
- bool operator!=(FILE* f) const noexcept {
- return m_file != f;
- }
- bool operator==(FILE* f) const noexcept {
- return m_file == f;
- }
- ~TFILEPtr() {
- close();
- }
- void Y_PRINTF_FORMAT(2, 3) check(const char* message, ...) const {
- if (Y_UNLIKELY(!fgood(m_file))) {
- va_list args;
- va_start(args, message);
- char buf[512];
- vsnprintf(buf, 512, message, args);
- // XXX: errno is undefined here
- ythrow yexception() << buf << ": " << LastSystemErrorText() << ", " << Name.data() << " at offset " << (i64)ftell();
- }
- }
- TFILEPtr& assign(FILE* f, const char* name = nullptr) { // take ownership and have a name
- *this = f;
- if (name)
- Name = name;
- return *this;
- }
- void open(const TString& name, const char* mode) {
- Y_ASSERT(!name.empty());
- Y_ASSERT(m_file == nullptr);
- m_file = ::fopen(name.data(), mode);
- if (!m_file)
- ythrow yexception() << "can't open \'" << name << "\' with mode \'" << mode << "\': " << LastSystemErrorText();
- m_Flags = SHOULD_CLOSE;
- Name = name;
- }
- void popen(const TString& command, const char* mode) {
- Y_ASSERT(!command.empty());
- Y_ASSERT(m_file == nullptr);
- m_file = ::popen(command.data(), mode);
- if (!m_file)
- ythrow yexception() << "can't execute \'" << command << "\' with mode \'" << mode << "\': " << LastSystemErrorText();
- m_Flags = IS_PIPE | SHOULD_CLOSE;
- Name = command;
- }
- void close() {
- if (m_file != nullptr && (m_Flags & SHOULD_CLOSE)) {
- if ((m_Flags & IS_PIPE) ? ::pclose(m_file) : ::fclose(m_file)) {
- m_file = nullptr;
- m_Flags = 0;
- if (!UncaughtException())
- ythrow yexception() << "can't close file " << Name.data() << ": " << LastSystemErrorText();
- }
- }
- m_file = nullptr;
- m_Flags = 0;
- Name.clear();
- }
- size_t write(const void* buffer, size_t size, size_t count) const {
- Y_ASSERT(m_file != nullptr);
- size_t r = ::fwrite(buffer, size, count, m_file);
- check("can't write %lu bytes", (unsigned long)size * count);
- return r;
- }
- size_t read(void* buffer, size_t size, size_t count) const {
- Y_ASSERT(m_file != nullptr);
- size_t r = ::fread(buffer, size, count, m_file);
- if (ferror_unlocked(m_file))
- ythrow yexception() << "can't read " << (unsigned long)size * count << " bytes: " << LastSystemErrorText() << ", " << Name.data() << " at offset " << (i64)ftell();
- return r;
- }
- char* fgets(char* buffer, int size) const {
- Y_ASSERT(m_file != nullptr);
- char* r = ::fgets(buffer, size, m_file);
- if (ferror_unlocked(m_file))
- ythrow yexception() << "can't read string of maximum size " << size << ": " << LastSystemErrorText() << ", " << Name.data() << " at offset " << (i64)ftell();
- return r;
- }
- void Y_PRINTF_FORMAT(2, 3) fprintf(const char* format, ...) {
- Y_ASSERT(m_file != nullptr);
- va_list args;
- va_start(args, format);
- vfprintf(m_file, format, args);
- check("can't write");
- }
- void seek(i64 offset, int origin) const {
- Y_ASSERT(m_file != nullptr);
-#if defined(_unix_) || defined(_win32_)
- if (fseeko(m_file, offset, origin) != 0)
-#else
- Y_ASSERT(offset == (i64)(i32)offset);
- if (::fseek(m_file, (long)offset, origin) != 0)
-#endif
- ythrow yexception() << "can't seek " << Name.data() << " by " << offset << ": " << LastSystemErrorText();
- }
- i64 length() const; // uses various system headers -> in fileptr.cpp
-
- void setDirect() const {
-#if !defined(_win_) && !defined(_darwin_)
- if (!m_file)
- ythrow yexception() << "file not open";
- if (fcntl(fileno(m_file), F_SETFL, O_DIRECT) == -1)
- ythrow yexception() << "Cannot set O_DIRECT flag";
-#endif
- }
-
- // for convenience
-
- i64 ftell() const noexcept {
-#if defined(_unix_) || defined(_win32_)
- return ftello(m_file);
-#else
- return ftell(m_file);
-#endif
- }
- bool eof() const noexcept {
- Y_ASSERT(m_file != nullptr);
- return feof_unlocked(m_file) != 0;
- }
- int fputc(int c) {
- Y_ASSERT(m_file != nullptr);
- return putc_unlocked(c, m_file);
- }
- size_t fputs(const char* buffer) const {
- return write(buffer, strlen(buffer), 1);
- }
- int fgetc() {
- Y_ASSERT(m_file != nullptr);
- return getc_unlocked(m_file);
- }
- int ungetc(int c) {
- Y_ASSERT(m_file != nullptr);
- return ::ungetc(c, m_file);
- }
- template <class T>
- size_t fput(const T& a) {
- Y_ASSERT(m_file != nullptr);
- return ::fput(m_file, a);
- }
- template <class T>
- size_t fget(T& a) {
- Y_ASSERT(m_file != nullptr);
- return ::fget(m_file, a);
- }
- size_t fsput(const char* s, size_t l) {
- Y_ASSERT(m_file != nullptr);
- return ::fsput(m_file, s, l);
- }
- size_t fsget(char* s, size_t l) {
- Y_ASSERT(m_file != nullptr);
- return ::fsget(m_file, s, l);
- }
-
- void fflush() {
- ::fflush(m_file);
- }
-
- /* This block contains some TFile/TStream - compatible names */
- size_t Read(void* bufferIn, size_t numBytes) {
- size_t r = fsget((char*)bufferIn, numBytes);
- if (Y_UNLIKELY(ferror_unlocked(m_file)))
- ythrow yexception() << "can't read " << numBytes << " bytes: " << LastSystemErrorText() << ", " << Name << " at offset " << (i64)ftell();
- return r;
- }
- void Write(const void* buffer, size_t numBytes) {
- write(buffer, 1, numBytes);
- }
- i64 Seek(i64 offset, int origin /*SeekDir*/) {
- seek(offset, origin);
- return ftell();
- }
- i64 GetPosition() const noexcept {
- return ftell();
- }
- i64 GetLength() const noexcept {
- return length();
- }
- bool ReadLine(TString& st);
-
- /* Similar to TAutoPtr::Release - return pointer and forget about it. */
- FILE* Release() noexcept {
- FILE* result = m_file;
- m_file = nullptr;
- m_Flags = 0;
- Name.clear();
- return result;
- }
-};
-
-inline void fclose(TFILEPtr& F) {
- F.close();
-}
-
-inline void fseek(const TFILEPtr& F, i64 offset, int whence) {
- F.seek(offset, whence);
-}
-
-#ifdef _freebsd_ // fgetln
-inline bool getline(TFILEPtr& f, TString& s) {
- size_t len;
- char* buf = fgetln(f, &len);
- if (!buf)
- return false;
- if (len && buf[len - 1] == '\n')
- len--;
- s.AssignNoAlias(buf, len);
- return true;
-}
-#else
-bool getline(TFILEPtr& f, TString& s);
-#endif //_freebsd_
-
-inline bool TFILEPtr::ReadLine(TString& st) {
- return getline(*this, st);
-}
-
-FILE* OpenFILEOrFail(const TString& name, const char* mode);
-
-//Should be used with THolder
-struct TFILECloser {
- static void Destroy(FILE* file);
-};
-
-using TFILEHolder = THolder<FILE, TFILECloser>;
diff --git a/library/cpp/deprecated/fgood/fput.h b/library/cpp/deprecated/fgood/fput.h
deleted file mode 100644
index 690b06332d..0000000000
--- a/library/cpp/deprecated/fgood/fput.h
+++ /dev/null
@@ -1,79 +0,0 @@
-#pragma once
-
-#include <util/system/defaults.h>
-#include <util/system/valgrind.h>
-
-#include <cstdio>
-
-#ifdef __FreeBSD__
-#include <cstring>
-
-template <class T>
-Y_FORCE_INLINE size_t fput(FILE* F, const T& a) {
- if (Y_LIKELY(F->_w >= int(sizeof(a)))) {
- memcpy(F->_p, &a, sizeof(a));
- F->_p += sizeof(a);
- F->_w -= sizeof(a);
- return 1;
- } else {
- return fwrite(&a, sizeof(a), 1, F);
- }
-}
-
-template <class T>
-Y_FORCE_INLINE size_t fget(FILE* F, T& a) {
- if (Y_LIKELY(F->_r >= int(sizeof(a)))) {
- memcpy(&a, F->_p, sizeof(a));
- F->_p += sizeof(a);
- F->_r -= sizeof(a);
- return 1;
- } else {
- return fread(&a, sizeof(a), 1, F);
- }
-}
-
-inline size_t fsput(FILE* F, const char* s, size_t l) {
- VALGRIND_CHECK_READABLE(s, l);
-
- if ((size_t)F->_w >= l) {
- memcpy(F->_p, s, l);
- F->_p += l;
- F->_w -= l;
- return l;
- } else {
- return fwrite(s, 1, l, F);
- }
-}
-
-inline size_t fsget(FILE* F, char* s, size_t l) {
- if ((size_t)F->_r >= l) {
- memcpy(s, F->_p, l);
- F->_p += l;
- F->_r -= l;
- return l;
- } else {
- return fread(s, 1, l, F);
- }
-}
-#else
-template <class T>
-Y_FORCE_INLINE size_t fput(FILE* F, const T& a) {
- return fwrite(&a, sizeof(a), 1, F);
-}
-
-template <class T>
-Y_FORCE_INLINE size_t fget(FILE* F, T& a) {
- return fread(&a, sizeof(a), 1, F);
-}
-
-inline size_t fsput(FILE* F, const char* s, size_t l) {
-#ifdef WITH_VALGRIND
- VALGRIND_CHECK_READABLE(s, l);
-#endif
- return fwrite(s, 1, l, F);
-}
-
-inline size_t fsget(FILE* F, char* s, size_t l) {
- return fread(s, 1, l, F);
-}
-#endif
diff --git a/library/cpp/deprecated/fgood/ya.make b/library/cpp/deprecated/fgood/ya.make
deleted file mode 100644
index 2394f9ad7a..0000000000
--- a/library/cpp/deprecated/fgood/ya.make
+++ /dev/null
@@ -1,8 +0,0 @@
-LIBRARY()
-
-SRCS(
- ffb.cpp
- fgood.cpp
-)
-
-END()
diff --git a/library/cpp/deprecated/mapped_file/mapped_file.cpp b/library/cpp/deprecated/mapped_file/mapped_file.cpp
deleted file mode 100644
index b0e4511299..0000000000
--- a/library/cpp/deprecated/mapped_file/mapped_file.cpp
+++ /dev/null
@@ -1,64 +0,0 @@
-#include "mapped_file.h"
-
-#include <util/generic/yexception.h>
-#include <util/system/defaults.h>
-#include <util/system/hi_lo.h>
-#include <util/system/filemap.h>
-
-TMappedFile::TMappedFile(TFileMap* map, const char* dbgName) {
- Map_ = map;
- i64 len = Map_->Length();
- if (Hi32(len) != 0 && sizeof(size_t) <= sizeof(ui32))
- ythrow yexception() << "File '" << dbgName << "' mapping error: " << len << " too large";
-
- Map_->Map(0, static_cast<size_t>(len));
-}
-
-TMappedFile::TMappedFile(const TFile& file, TFileMap::EOpenMode om, const char* dbgName)
- : Map_(nullptr)
-{
- init(file, om, dbgName);
-}
-
-void TMappedFile::precharge(size_t off, size_t size) const {
- if (!Map_)
- return;
-
- Map_->Precharge(off, size);
-}
-
-void TMappedFile::init(const TString& name) {
- THolder<TFileMap> map(new TFileMap(name));
- TMappedFile newFile(map.Get(), name.data());
- Y_UNUSED(map.Release());
- newFile.swap(*this);
- newFile.term();
-}
-
-void TMappedFile::init(const TString& name, size_t length, TFileMap::EOpenMode om) {
- THolder<TFileMap> map(new TFileMap(name, length, om));
- TMappedFile newFile(map.Get(), name.data());
- Y_UNUSED(map.Release());
- newFile.swap(*this);
- newFile.term();
-}
-
-void TMappedFile::init(const TFile& file, TFileMap::EOpenMode om, const char* dbgName) {
- THolder<TFileMap> map(new TFileMap(file, om));
- TMappedFile newFile(map.Get(), dbgName);
- Y_UNUSED(map.Release());
- newFile.swap(*this);
- newFile.term();
-}
-
-void TMappedFile::init(const TString& name, TFileMap::EOpenMode om) {
- THolder<TFileMap> map(new TFileMap(name, om));
- TMappedFile newFile(map.Get(), name.data());
- Y_UNUSED(map.Release());
- newFile.swap(*this);
- newFile.term();
-}
-
-void TMappedFile::flush() {
- Map_->Flush();
-}
diff --git a/library/cpp/deprecated/mapped_file/ya.make b/library/cpp/deprecated/mapped_file/ya.make
deleted file mode 100644
index 309341f1da..0000000000
--- a/library/cpp/deprecated/mapped_file/ya.make
+++ /dev/null
@@ -1,7 +0,0 @@
-LIBRARY()
-
-SRCS(
- mapped_file.cpp
-)
-
-END()
diff --git a/library/cpp/eventlog/common.h b/library/cpp/eventlog/common.h
deleted file mode 100644
index 75c512c13e..0000000000
--- a/library/cpp/eventlog/common.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#pragma once
-
-template <class T>
-class TPacketInputStream {
-public:
- virtual bool Avail() const = 0;
- virtual T operator*() const = 0;
- virtual bool Next() = 0;
- virtual ~TPacketInputStream() = default;
-};
diff --git a/library/cpp/eventlog/evdecoder.cpp b/library/cpp/eventlog/evdecoder.cpp
deleted file mode 100644
index e4413a1b0e..0000000000
--- a/library/cpp/eventlog/evdecoder.cpp
+++ /dev/null
@@ -1,112 +0,0 @@
-#include <util/memory/tempbuf.h>
-#include <util/string/cast.h>
-#include <util/stream/output.h>
-
-#include "evdecoder.h"
-#include "logparser.h"
-
-static const char* const UNKNOWN_EVENT_CLASS = "Unknown event class";
-
-static inline void LogError(ui64 frameAddr, const char* msg, bool strict) {
- if (!strict) {
- Cerr << "EventDecoder warning @" << frameAddr << ": " << msg << Endl;
- } else {
- ythrow yexception() << "EventDecoder error @" << frameAddr << ": " << msg;
- }
-}
-
-static inline bool SkipData(IInputStream& s, size_t amount) {
- return (amount == s.Skip(amount));
-}
-
-// There are 2 log fomats: the one, that allows event skip without event decode (it has stored event length)
-// and another, that requires each event decode just to seek over stream. needRead == true means the latter format.
-static inline THolder<TEvent> DoDecodeEvent(IInputStream& s, const TEventFilter* const filter, const bool needRead, IEventFactory* fac) {
- TEventTimestamp ts;
- TEventClass c;
- THolder<TEvent> e;
-
- ::Load(&s, ts);
- ::Load(&s, c);
-
- bool needReturn = false;
-
- if (!filter || filter->EventAllowed(c)) {
- needReturn = true;
- }
-
- if (needRead || needReturn) {
- e.Reset(fac->CreateLogEvent(c));
-
- if (!!e) {
- e->Timestamp = ts;
- e->Load(s);
- } else if (needReturn) {
- e.Reset(new TUnknownEvent(ts, c));
- }
-
- if (!needReturn) {
- e.Reset(nullptr);
- }
- }
-
- return e;
-}
-
-THolder<TEvent> DecodeFramed(IInputStream& inp, ui64 frameAddr, const TEventFilter* const filter, IEventFactory* fac, bool strict) {
- ui32 len;
- ::Load(&inp, len);
-
- if (len < sizeof(ui32)) {
- ythrow TEventDecoderError() << "invalid event length";
- }
-
- TLengthLimitedInput s(&inp, len - sizeof(ui32));
-
- try {
- THolder<TEvent> e = DoDecodeEvent(s, filter, false, fac);
- if (!!e) {
- if (!s.Left()) {
- return e;
- } else if (e->Class == 0) {
- if (!SkipData(s, s.Left())) {
- ythrow TEventDecoderError() << "cannot skip bad event";
- }
-
- return e;
- }
-
- LogError(frameAddr, "Event is not fully read", strict);
- }
- } catch (const TLoadEOF&) {
- if (s.Left()) {
- throw;
- }
-
- LogError(frameAddr, "Unexpected event end", strict);
- }
-
- if (!SkipData(s, s.Left())) {
- ythrow TEventDecoderError() << "cannot skip bad event";
- }
-
- return nullptr;
-}
-
-THolder<TEvent> DecodeEvent(IInputStream& s, bool framed, ui64 frameAddr, const TEventFilter* const filter, IEventFactory* fac, bool strict) {
- try {
- if (framed) {
- return DecodeFramed(s, frameAddr, filter, fac, strict);
- } else {
- THolder<TEvent> e = DoDecodeEvent(s, filter, true, fac);
- // e(0) means event, skipped by filter. Not an error.
- if (!!e && !e->Class) {
- ythrow TEventDecoderError() << UNKNOWN_EVENT_CLASS;
- }
-
- return e;
- }
- } catch (const TLoadEOF&) {
- ythrow TEventDecoderError() << "unexpected frame end";
- }
-}
diff --git a/library/cpp/eventlog/evdecoder.h b/library/cpp/eventlog/evdecoder.h
deleted file mode 100644
index eedfc82174..0000000000
--- a/library/cpp/eventlog/evdecoder.h
+++ /dev/null
@@ -1,16 +0,0 @@
-#pragma once
-
-#include <util/generic/yexception.h>
-#include <util/generic/ptr.h>
-
-#include "eventlog.h"
-
-class TEvent;
-class IInputStream;
-class TEventFilter;
-
-struct TEventDecoderError: public yexception {
-};
-
-THolder<TEvent> DecodeEvent(IInputStream& s, bool framed, ui64 frameAddr, const TEventFilter* const filter, IEventFactory* fac, bool strict = false);
-bool AcceptableContent(TEventLogFormat);
diff --git a/library/cpp/eventlog/event_field_output.cpp b/library/cpp/eventlog/event_field_output.cpp
deleted file mode 100644
index f9d98dac9d..0000000000
--- a/library/cpp/eventlog/event_field_output.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-#include "event_field_output.h"
-
-#include <util/string/split.h>
-
-namespace {
- TString MakeSeparators(EFieldOutputFlags flags) {
- TString res;
- res.reserve(3);
-
- if (flags & EFieldOutputFlag::EscapeTab) {
- res.append('\t');
- }
- if (flags & EFieldOutputFlag::EscapeNewLine) {
- res.append('\n');
- res.append('\r');
- }
- if (flags & EFieldOutputFlag::EscapeBackSlash) {
- res.append('\\');
- }
-
- return res;
- }
-}
-
-TEventFieldOutput::TEventFieldOutput(IOutputStream& output, EFieldOutputFlags flags)
- : Output(output)
- , Flags(flags)
- , Separators(MakeSeparators(flags))
-{
-}
-
-IOutputStream& TEventFieldOutput::GetOutputStream() {
- return Output;
-}
-
-EFieldOutputFlags TEventFieldOutput::GetFlags() const {
- return Flags;
-}
-
-void TEventFieldOutput::DoWrite(const void* buf, size_t len) {
- if (!Flags) {
- Output.Write(buf, len);
- return;
- }
-
- TStringBuf chunk{static_cast<const char*>(buf), len};
-
- for (const auto part : StringSplitter(chunk).SplitBySet(Separators.data())) {
- TStringBuf token = part.Token();
- TStringBuf delim = part.Delim();
-
- if (!token.empty()) {
- Output.Write(token);
- }
- if ("\n" == delim) {
- Output.Write(TStringBuf("\\n"));
- } else if ("\r" == delim) {
- Output.Write(TStringBuf("\\r"));
- } else if ("\t" == delim) {
- Output.Write(TStringBuf("\\t"));
- } else if ("\\" == delim) {
- Output.Write(TStringBuf("\\\\"));
- } else {
- Y_ASSERT(delim.empty());
- }
- }
-}
-
diff --git a/library/cpp/eventlog/event_field_output.h b/library/cpp/eventlog/event_field_output.h
deleted file mode 100644
index ed9db0ae16..0000000000
--- a/library/cpp/eventlog/event_field_output.h
+++ /dev/null
@@ -1,29 +0,0 @@
-#pragma once
-
-#include <util/stream/output.h>
-#include <util/generic/flags.h>
-
-enum class EFieldOutputFlag {
- EscapeTab = 0x1, // escape \t in field value
- EscapeNewLine = 0x2, // escape \n in field value
- EscapeBackSlash = 0x4 // escape \ in field value
-};
-
-Y_DECLARE_FLAGS(EFieldOutputFlags, EFieldOutputFlag);
-Y_DECLARE_OPERATORS_FOR_FLAGS(EFieldOutputFlags);
-
-class TEventFieldOutput: public IOutputStream {
-public:
- TEventFieldOutput(IOutputStream& output, EFieldOutputFlags flags);
-
- IOutputStream& GetOutputStream();
- EFieldOutputFlags GetFlags() const;
-
-protected:
- void DoWrite(const void* buf, size_t len) override;
-
-private:
- IOutputStream& Output;
- EFieldOutputFlags Flags;
- TString Separators;
-};
diff --git a/library/cpp/eventlog/event_field_printer.cpp b/library/cpp/eventlog/event_field_printer.cpp
deleted file mode 100644
index 29c6b4b661..0000000000
--- a/library/cpp/eventlog/event_field_printer.cpp
+++ /dev/null
@@ -1,27 +0,0 @@
-#include "event_field_printer.h"
-
-#include <library/cpp/protobuf/json/proto2json.h>
-
-namespace {
-
- const NProtobufJson::TProto2JsonConfig PROTO_2_JSON_CONFIG = NProtobufJson::TProto2JsonConfig()
- .SetMissingRepeatedKeyMode(NProtobufJson::TProto2JsonConfig::MissingKeyDefault)
- .AddStringTransform(MakeIntrusive<NProtobufJson::TBase64EncodeBytesTransform>());
-
-} // namespace
-
-TEventProtobufMessageFieldPrinter::TEventProtobufMessageFieldPrinter(EProtobufMessageFieldPrintMode mode)
- : Mode(mode)
-{}
-
-template <>
-void TEventProtobufMessageFieldPrinter::PrintProtobufMessageFieldToOutput<google::protobuf::Message, false>(const google::protobuf::Message& field, TEventFieldOutput& output) {
- switch (Mode) {
- case EProtobufMessageFieldPrintMode::DEFAULT:
- case EProtobufMessageFieldPrintMode::JSON: {
- // Do not use field.PrintJSON() here: IGNIETFERRO-2002
- NProtobufJson::Proto2Json(field, output, PROTO_2_JSON_CONFIG);
- break;
- }
- }
-}
diff --git a/library/cpp/eventlog/event_field_printer.h b/library/cpp/eventlog/event_field_printer.h
deleted file mode 100644
index 835e8f4a85..0000000000
--- a/library/cpp/eventlog/event_field_printer.h
+++ /dev/null
@@ -1,38 +0,0 @@
-#pragma once
-
-#include "event_field_output.h"
-
-#include <google/protobuf/message.h>
-
-// NB: For historical reasons print code for all primitive types/repeated fields/etc generated by https://a.yandex-team.ru/arc/trunk/arcadia/tools/event2cpp
-
-enum class EProtobufMessageFieldPrintMode {
- // Use <TEventProtobufMessageFieldType>::Print method for fields that has it
- // Print json for other fields
- DEFAULT = 0,
-
- JSON = 1,
-};
-
-class TEventProtobufMessageFieldPrinter {
-public:
- explicit TEventProtobufMessageFieldPrinter(EProtobufMessageFieldPrintMode mode);
-
- template <typename TEventProtobufMessageFieldType, bool HasPrintFunction>
- void PrintProtobufMessageFieldToOutput(const TEventProtobufMessageFieldType& field, TEventFieldOutput& output) {
- if constexpr (HasPrintFunction) {
- if (Mode == EProtobufMessageFieldPrintMode::DEFAULT) {
- field.Print(output.GetOutputStream(), output.GetFlags());
- return;
- }
- }
-
- PrintProtobufMessageFieldToOutput<google::protobuf::Message, false>(field, output);
- }
-
- template <>
- void PrintProtobufMessageFieldToOutput<google::protobuf::Message, false>(const google::protobuf::Message& field, TEventFieldOutput& output);
-
-private:
- EProtobufMessageFieldPrintMode Mode;
-};
diff --git a/library/cpp/eventlog/eventlog.cpp b/library/cpp/eventlog/eventlog.cpp
deleted file mode 100644
index 458a632b4a..0000000000
--- a/library/cpp/eventlog/eventlog.cpp
+++ /dev/null
@@ -1,554 +0,0 @@
-#include <util/datetime/base.h>
-#include <util/stream/zlib.h>
-#include <util/stream/length.h>
-#include <util/generic/buffer.h>
-#include <util/generic/yexception.h>
-#include <util/digest/murmur.h>
-#include <util/generic/singleton.h>
-#include <util/generic/function.h>
-#include <util/stream/output.h>
-#include <util/stream/format.h>
-#include <util/stream/null.h>
-
-#include <google/protobuf/messagext.h>
-
-#include "eventlog.h"
-#include "events_extension.h"
-#include "evdecoder.h"
-#include "logparser.h"
-#include <library/cpp/eventlog/proto/internal.pb.h>
-
-#include <library/cpp/json/json_writer.h>
-#include <library/cpp/protobuf/json/proto2json.h>
-
-
-TAtomic eventlogFrameCounter = 0;
-
-namespace {
-
- const NProtobufJson::TProto2JsonConfig PROTO_2_JSON_CONFIG = NProtobufJson::TProto2JsonConfig()
- .SetMissingRepeatedKeyMode(NProtobufJson::TProto2JsonConfig::MissingKeyDefault)
- .AddStringTransform(MakeIntrusive<NProtobufJson::TBase64EncodeBytesTransform>());
-
- ui32 GenerateFrameId() {
- return ui32(AtomicAdd(eventlogFrameCounter, 1));
- }
-
- inline const NProtoBuf::Message* UnknownEventMessage() {
- return Singleton<NEventLogInternal::TUnknownEvent>();
- }
-
-} // namespace
-
-void TEvent::Print(IOutputStream& out, const TOutputOptions& options, const TEventState& eventState) const {
- if (options.OutputFormat == TOutputFormat::TabSeparatedRaw) {
- PrintHeader(out, options, eventState);
- DoPrint(out, {});
- } else if (options.OutputFormat == TOutputFormat::TabSeparated) {
- PrintHeader(out, options, eventState);
- DoPrint(
- out,
- EFieldOutputFlags{} | EFieldOutputFlag::EscapeNewLine | EFieldOutputFlag::EscapeBackSlash);
- } else if (options.OutputFormat == TOutputFormat::Json) {
- NJson::TJsonWriterConfig jsonWriterConfig;
- jsonWriterConfig.FormatOutput = 0;
- NJson::TJsonWriter jsonWriter(&out, jsonWriterConfig);
-
- jsonWriter.OpenMap();
- PrintJsonHeader(jsonWriter);
- DoPrintJson(jsonWriter);
- jsonWriter.CloseMap();
- }
-}
-
-void TEvent::PrintHeader(IOutputStream& out, const TOutputOptions& options, const TEventState& eventState) const {
- if (options.HumanReadable) {
- out << TInstant::MicroSeconds(Timestamp).ToString() << "\t";
- if (Timestamp >= eventState.FrameStartTime)
- out << "+" << HumanReadable(TDuration::MicroSeconds(Timestamp - eventState.FrameStartTime));
- else // a bug somewhere? anyway, let's handle it in a nice fashion
- out << "-" << HumanReadable(TDuration::MicroSeconds(eventState.FrameStartTime - Timestamp));
-
- if (Timestamp >= eventState.PrevEventTime)
- out << " (+" << HumanReadable(TDuration::MicroSeconds(Timestamp - eventState.PrevEventTime)) << ")";
- // else: these events are async and out-of-order, relative time diff makes no sense, skip it
-
- out << "\tF# " << FrameId << '\t';
- } else {
- out << static_cast<TEventTimestamp>(Timestamp);
- out << '\t' << FrameId << '\t';
- }
-}
-
-void TEvent::PrintJsonHeader(NJson::TJsonWriter& jsonWriter) const {
- jsonWriter.Write("Timestamp", Timestamp);
- jsonWriter.Write("FrameId", FrameId);
-}
-
-class TProtobufEvent: public TEvent {
-public:
- TProtobufEvent(TEventTimestamp t, size_t eventId, const NProtoBuf::Message& msg)
- : TEvent(eventId, t)
- , Message_(&msg)
- , EventFactory_(NProtoBuf::TEventFactory::Instance())
- {
- }
-
- TProtobufEvent()
- : TEvent(0, 0)
- , EventFactory_(NProtoBuf::TEventFactory::Instance())
- {
- }
-
- explicit TProtobufEvent(ui32 id, NProtoBuf::TEventFactory* eventFactory = NProtoBuf::TEventFactory::Instance())
- : TEvent(id, 0)
- , EventFactory_(eventFactory)
- {
- InnerMsg_.Reset(EventFactory_->CreateEvent(Class));
- Message_ = InnerMsg_.Get();
- }
-
- ui32 Id() const {
- return Class;
- }
-
- void Load(IInputStream& in) override {
- if (!!InnerMsg_) {
- InnerMsg_->ParseFromArcadiaStream(&in);
- } else {
- TransferData(&in, &Cnull);
- }
- }
-
- void Save(IOutputStream& out) const override {
- Message_->SerializeToArcadiaStream(&out);
- }
-
- void SaveToBuffer(TBufferOutput& buf) const override {
- size_t messageSize = Message_->ByteSize();
- size_t before = buf.Buffer().Size();
- buf.Buffer().Advance(messageSize);
- Y_PROTOBUF_SUPPRESS_NODISCARD Message_->SerializeToArray(buf.Buffer().Data() + before, messageSize);
- }
-
- TStringBuf GetName() const override {
- return EventFactory_->NameById(Id());
- }
-
-private:
- void DoPrint(IOutputStream& out, EFieldOutputFlags flags) const override {
- EventFactory_->PrintEvent(Id(), Message_, out, flags);
- }
- void DoPrintJson(NJson::TJsonWriter& jsonWriter) const override {
- jsonWriter.OpenMap("EventBody");
- jsonWriter.Write("Type", GetName());
-
- jsonWriter.Write("Fields");
- NProtobufJson::Proto2Json(*GetProto(), jsonWriter, PROTO_2_JSON_CONFIG);
-
- jsonWriter.CloseMap();
- }
-
- const NProtoBuf::Message* GetProto() const override {
- if (Message_) {
- return Message_;
- }
-
- return UnknownEventMessage();
- }
-
-private:
- const NProtoBuf::Message* Message_ = nullptr;
- NProtoBuf::TEventFactory* EventFactory_;
- THolder<NProtoBuf::Message> InnerMsg_;
-
- friend class TEventLogFrame;
-};
-
-void TEventLogFrame::LogProtobufEvent(size_t eventId, const NProtoBuf::Message& ev) {
- TProtobufEvent event(Now().MicroSeconds(), eventId, ev);
-
- LogEventImpl(event);
-}
-
-void TEventLogFrame::LogProtobufEvent(TEventTimestamp timestamp, size_t eventId, const NProtoBuf::Message& ev) {
- TProtobufEvent event(timestamp, eventId, ev);
-
- LogEventImpl(event);
-}
-
-template <>
-void TEventLogFrame::DebugDump(const TProtobufEvent& ev) {
- static TMutex lock;
-
- with_lock (lock) {
- Cerr << ev.Timestamp << "\t" << ev.GetName() << "\t";
- ev.GetProto()->PrintJSON(Cerr);
- Cerr << Endl;
- }
-}
-
-#pragma pack(push, 1)
-struct TFrameHeaderData {
- char SyncField[COMPRESSED_LOG_FRAME_SYNC_DATA.size()];
- TCompressedFrameBaseHeader Header;
- TCompressedFrameHeader2 HeaderEx;
-};
-#pragma pack(pop)
-
-TEventLogFrame::TEventLogFrame(IEventLog& parentLog, bool needAlwaysSafeAdd, TWriteFrameCallbackPtr writeFrameCallback)
- : EvLog_(parentLog.HasNullBackend() ? nullptr : &parentLog)
- , NeedAlwaysSafeAdd_(needAlwaysSafeAdd)
- , ForceDump_(false)
- , WriteFrameCallback_(std::move(writeFrameCallback))
-{
- DoInit();
-}
-
-TEventLogFrame::TEventLogFrame(IEventLog* parentLog, bool needAlwaysSafeAdd, TWriteFrameCallbackPtr writeFrameCallback)
- : EvLog_(parentLog)
- , NeedAlwaysSafeAdd_(needAlwaysSafeAdd)
- , ForceDump_(false)
- , WriteFrameCallback_(std::move(writeFrameCallback))
-{
- if (EvLog_ && EvLog_->HasNullBackend()) {
- EvLog_ = nullptr;
- }
-
- DoInit();
-}
-
-TEventLogFrame::TEventLogFrame(bool needAlwaysSafeAdd, TWriteFrameCallbackPtr writeFrameCallback)
- : EvLog_(nullptr)
- , NeedAlwaysSafeAdd_(needAlwaysSafeAdd)
- , ForceDump_(false)
- , WriteFrameCallback_(std::move(writeFrameCallback))
-{
- DoInit();
-}
-
-void TEventLogFrame::Flush() {
- if (EvLog_ == nullptr)
- return;
-
- TBuffer& buf = Buf_.Buffer();
-
- if (buf.Empty()) {
- return;
- }
-
- EvLog_->WriteFrame(buf, StartTimestamp_, EndTimestamp_, WriteFrameCallback_, std::move(MetaFlags_));
-
- DoInit();
-
- return;
-}
-
-void TEventLogFrame::SafeFlush() {
- TGuard<TMutex> g(Mtx_);
- Flush();
-}
-
-void TEventLogFrame::AddEvent(TEventTimestamp timestamp) {
- if (timestamp < StartTimestamp_) {
- StartTimestamp_ = timestamp;
- }
-
- if (timestamp > EndTimestamp_) {
- EndTimestamp_ = timestamp;
- }
-}
-
-void TEventLogFrame::DoInit() {
- Buf_.Buffer().Clear();
-
- StartTimestamp_ = (TEventTimestamp)-1;
- EndTimestamp_ = 0;
-}
-
-void TEventLogFrame::VisitEvents(ILogFrameEventVisitor& visitor, IEventFactory* eventFactory) {
- const auto doVisit = [this, &visitor, eventFactory]() {
- TBuffer& buf = Buf_.Buffer();
-
- TBufferInput bufferInput(buf);
- TLengthLimitedInput limitedInput(&bufferInput, buf.size());
-
- TEventFilter EventFilter(false);
-
- while (limitedInput.Left()) {
- THolder<TEvent> event = DecodeEvent(limitedInput, true, 0, &EventFilter, eventFactory);
-
- visitor.Visit(*event);
- }
- };
- if (NeedAlwaysSafeAdd_) {
- TGuard<TMutex> g(Mtx_);
- doVisit();
- } else {
- doVisit();
- }
-}
-
-TSelfFlushLogFrame::TSelfFlushLogFrame(IEventLog& parentLog, bool needAlwaysSafeAdd, TWriteFrameCallbackPtr writeFrameCallback)
- : TEventLogFrame(parentLog, needAlwaysSafeAdd, std::move(writeFrameCallback))
-{
-}
-
-TSelfFlushLogFrame::TSelfFlushLogFrame(IEventLog* parentLog, bool needAlwaysSafeAdd, TWriteFrameCallbackPtr writeFrameCallback)
- : TEventLogFrame(parentLog, needAlwaysSafeAdd, std::move(writeFrameCallback))
-{
-}
-
-TSelfFlushLogFrame::TSelfFlushLogFrame(bool needAlwaysSafeAdd, TWriteFrameCallbackPtr writeFrameCallback)
- : TEventLogFrame(needAlwaysSafeAdd, std::move(writeFrameCallback))
-{
-}
-
-TSelfFlushLogFrame::~TSelfFlushLogFrame() {
- try {
- Flush();
- } catch (...) {
- }
-}
-
-IEventLog::~IEventLog() {
-}
-
-static THolder<TLogBackend> ConstructBackend(const TString& fileName, const TEventLogBackendOptions& backendOpts) {
- try {
- THolder<TLogBackend> backend;
- if (backendOpts.UseSyncPageCacheBackend) {
- backend = MakeHolder<TSyncPageCacheFileLogBackend>(fileName, backendOpts.SyncPageCacheBackendBufferSize, backendOpts.SyncPageCacheBackendMaxPendingSize);
- } else {
- backend = MakeHolder<TFileLogBackend>(fileName);
- }
- return MakeHolder<TReopenLogBackend>(std::move(backend));
- } catch (...) {
- Cdbg << "Warning: Cannot open event log '" << fileName << "': " << CurrentExceptionMessage() << "." << Endl;
- }
-
- return MakeHolder<TNullLogBackend>();
-}
-
-TEventLog::TEventLog(const TString& fileName, TEventLogFormat contentFormat, const TEventLogBackendOptions& backendOpts, TMaybe<TEventLogFormat> logFormat)
- : Log_(ConstructBackend(fileName, backendOpts))
- , ContentFormat_(contentFormat)
- , LogFormat_(logFormat.Defined() ? *logFormat : COMPRESSED_LOG_FORMAT_V4)
- , HasNullBackend_(Log_.IsNullLog())
- , Lz4hcCodec_(NBlockCodecs::Codec("lz4hc"))
- , ZstdCodec_(NBlockCodecs::Codec("zstd_1"))
-{
- Y_ENSURE(LogFormat_ == COMPRESSED_LOG_FORMAT_V4 || LogFormat_ == COMPRESSED_LOG_FORMAT_V5);
-
- if (contentFormat & 0xff000000) {
- ythrow yexception() << "wrong compressed event log content format code (" << contentFormat << ")";
- }
-}
-
-TEventLog::TEventLog(const TString& fileName, TEventLogFormat contentFormat, const TEventLogBackendOptions& backendOpts)
- : TEventLog(fileName, contentFormat, backendOpts, COMPRESSED_LOG_FORMAT_V4)
-{
-}
-
-TEventLog::TEventLog(const TLog& log, TEventLogFormat contentFormat, TEventLogFormat logFormat)
- : Log_(log)
- , ContentFormat_(contentFormat)
- , LogFormat_(logFormat)
- , HasNullBackend_(Log_.IsNullLog())
- , Lz4hcCodec_(NBlockCodecs::Codec("lz4hc"))
- , ZstdCodec_(NBlockCodecs::Codec("zstd_1"))
-{
- if (contentFormat & 0xff000000) {
- ythrow yexception() << "wrong compressed event log content format code (" << contentFormat << ")";
- }
-}
-
-TEventLog::TEventLog(TEventLogFormat contentFormat, TEventLogFormat logFormat)
- : Log_(MakeHolder<TNullLogBackend>())
- , ContentFormat_(contentFormat)
- , LogFormat_(logFormat)
- , HasNullBackend_(true)
- , Lz4hcCodec_(NBlockCodecs::Codec("lz4hc"))
- , ZstdCodec_(NBlockCodecs::Codec("zstd_1"))
-{
- if (contentFormat & 0xff000000) {
- ythrow yexception() << "wrong compressed event log content format code (" << contentFormat << ")";
- }
-}
-
-TEventLog::~TEventLog() {
-}
-
-void TEventLog::ReopenLog() {
- Log_.ReopenLog();
-}
-
-void TEventLog::CloseLog() {
- Log_.CloseLog();
-}
-
-void TEventLog::Flush() {
-}
-
-namespace {
- class TOnExceptionAction {
- public:
- TOnExceptionAction(std::function<void()>&& f)
- : F_(std::move(f))
- {
- }
-
- ~TOnExceptionAction() {
- if (F_ && UncaughtException()) {
- try {
- F_();
- } catch (...) {
- }
- }
- }
-
- private:
- std::function<void()> F_;
- };
-}
-
-void TEventLog::WriteFrame(TBuffer& buffer,
- TEventTimestamp startTimestamp,
- TEventTimestamp endTimestamp,
- TWriteFrameCallbackPtr writeFrameCallback,
- TLogRecord::TMetaFlags metaFlags) {
- Y_ENSURE(LogFormat_ == COMPRESSED_LOG_FORMAT_V4 || LogFormat_ == COMPRESSED_LOG_FORMAT_V5);
-
- TBuffer& b1 = buffer;
-
- size_t maxCompressedLength = (LogFormat_ == COMPRESSED_LOG_FORMAT_V4) ? b1.Size() + 256 : ZstdCodec_->MaxCompressedLength(b1);
-
- // Reserve enough memory to minimize reallocs
- TBufferOutput outbuf(sizeof(TFrameHeaderData) + maxCompressedLength);
- TBuffer& b2 = outbuf.Buffer();
- b2.Proceed(sizeof(TFrameHeaderData));
-
- {
- TFrameHeaderData& hdr = *reinterpret_cast<TFrameHeaderData*>(b2.data());
-
- memcpy(hdr.SyncField, COMPRESSED_LOG_FRAME_SYNC_DATA.data(), COMPRESSED_LOG_FRAME_SYNC_DATA.size());
- hdr.Header.Format = (LogFormat_ << 24) | (ContentFormat_ & 0xffffff);
- hdr.Header.FrameId = GenerateFrameId();
- hdr.HeaderEx.UncompressedDatalen = (ui32)b1.Size();
- hdr.HeaderEx.StartTimestamp = startTimestamp;
- hdr.HeaderEx.EndTimestamp = endTimestamp;
- hdr.HeaderEx.PayloadChecksum = 0;
- hdr.HeaderEx.CompressorVersion = 0;
- }
-
- if (LogFormat_ == COMPRESSED_LOG_FORMAT_V4) {
- TBuffer encoded(b1.Size() + sizeof(TFrameHeaderData) + 256);
- Lz4hcCodec_->Encode(b1, encoded);
-
- TZLibCompress compr(&outbuf, ZLib::ZLib, 6, 2048);
- compr.Write(encoded.data(), encoded.size());
- compr.Finish();
- } else {
- b2.Advance(ZstdCodec_->Compress(b1, b2.Pos()));
- }
-
- {
- const size_t k = sizeof(TCompressedFrameBaseHeader) + COMPRESSED_LOG_FRAME_SYNC_DATA.size();
- TFrameHeaderData& hdr = *reinterpret_cast<TFrameHeaderData*>(b2.data());
- hdr.Header.Length = static_cast<ui32>(b2.size() - k);
- hdr.HeaderEx.PayloadChecksum = MurmurHash<ui32>(b2.data() + sizeof(TFrameHeaderData), b2.size() - sizeof(TFrameHeaderData));
-
- const size_t n = sizeof(TFrameHeaderData) - (COMPRESSED_LOG_FRAME_SYNC_DATA.size() + sizeof(hdr.HeaderEx.HeaderChecksum));
- hdr.HeaderEx.HeaderChecksum = MurmurHash<ui32>(b2.data() + COMPRESSED_LOG_FRAME_SYNC_DATA.size(), n);
- }
-
- const TBuffer& frameData = outbuf.Buffer();
-
- TOnExceptionAction actionCallback([this] {
- if (ErrorCallback_) {
- ErrorCallback_->OnWriteError();
- }
- });
-
- if (writeFrameCallback) {
- writeFrameCallback->OnAfterCompress(frameData, startTimestamp, endTimestamp);
- }
-
- Log_.Write(frameData.Data(), frameData.Size(), std::move(metaFlags));
- if (SuccessCallback_) {
- SuccessCallback_->OnWriteSuccess(frameData);
- }
-}
-
-TEvent* TProtobufEventFactory::CreateLogEvent(TEventClass c) {
- return new TProtobufEvent(c, EventFactory_);
-}
-
-TEventClass TProtobufEventFactory::ClassByName(TStringBuf name) const {
- return EventFactory_->IdByName(name);
-}
-
-TEventClass TProtobufEventFactory::EventClassBegin() const {
- const auto& items = EventFactory_->FactoryItems();
-
- if (items.empty()) {
- return static_cast<TEventClass>(0);
- }
-
- return static_cast<TEventClass>(items.begin()->first);
-}
-
-TEventClass TProtobufEventFactory::EventClassEnd() const {
- const auto& items = EventFactory_->FactoryItems();
-
- if (items.empty()) {
- return static_cast<TEventClass>(0);
- }
-
- return static_cast<TEventClass>(items.rbegin()->first + 1);
-}
-
-namespace NEvClass {
- IEventFactory* Factory() {
- return Singleton<TProtobufEventFactory>();
- }
-
- IEventProcessor* Processor() {
- return Singleton<TProtobufEventProcessor>();
- }
-}
-
-const NProtoBuf::Message* TUnknownEvent::GetProto() const {
- return UnknownEventMessage();
-}
-
-TStringBuf TUnknownEvent::GetName() const {
- return TStringBuf("UnknownEvent");
-}
-
-void TUnknownEvent::DoPrintJson(NJson::TJsonWriter& jsonWriter) const {
- jsonWriter.OpenMap("EventBody");
- jsonWriter.Write("Type", GetName());
- jsonWriter.Write("EventId", (size_t)Class);
- jsonWriter.CloseMap();
-}
-
-TStringBuf TEndOfFrameEvent::GetName() const {
- return TStringBuf("EndOfFrame");
-}
-
-const NProtoBuf::Message* TEndOfFrameEvent::GetProto() const {
- return Singleton<NEventLogInternal::TEndOfFrameEvent>();
-}
-
-void TEndOfFrameEvent::DoPrintJson(NJson::TJsonWriter& jsonWriter) const {
- jsonWriter.OpenMap("EventBody");
- jsonWriter.Write("Type", GetName());
- jsonWriter.OpenMap("Fields");
- jsonWriter.CloseMap();
- jsonWriter.CloseMap();
-}
-
-THolder<TEvent> MakeProtobufLogEvent(TEventTimestamp ts, TEventClass eventId, google::protobuf::Message& ev) {
- return MakeHolder<TProtobufEvent>(ts, eventId, ev);
-}
diff --git a/library/cpp/eventlog/eventlog.h b/library/cpp/eventlog/eventlog.h
deleted file mode 100644
index 45c2dfb17f..0000000000
--- a/library/cpp/eventlog/eventlog.h
+++ /dev/null
@@ -1,623 +0,0 @@
-#pragma once
-
-#include "eventlog_int.h"
-#include "event_field_output.h"
-#include "events_extension.h"
-
-#include <library/cpp/blockcodecs/codecs.h>
-#include <library/cpp/logger/all.h>
-
-#include <google/protobuf/message.h>
-
-#include <util/datetime/base.h>
-#include <util/generic/ptr.h>
-#include <util/generic/string.h>
-#include <util/stream/output.h>
-#include <util/stream/buffer.h>
-#include <util/stream/str.h>
-#include <util/system/mutex.h>
-#include <util/stream/output.h>
-#include <util/system/env.h>
-#include <util/system/unaligned_mem.h>
-#include <util/ysaveload.h>
-
-#include <cstdlib>
-
-namespace NJson {
- class TJsonWriter;
-}
-
-class IEventLog;
-
-class TEvent : public TThrRefBase {
-public:
- enum class TOutputFormat {
- TabSeparated,
- TabSeparatedRaw, // disables escaping
- Json
- };
-
- struct TOutputOptions {
- TOutputFormat OutputFormat = TOutputFormat::TabSeparated;
- // Dump some fields (e.g. timestamp) in more human-readable format
- bool HumanReadable = false;
-
- TOutputOptions(TOutputFormat outputFormat = TOutputFormat::TabSeparated)
- : OutputFormat(outputFormat)
- {
- }
-
- TOutputOptions(TOutputFormat outputFormat, bool humanReadable)
- : OutputFormat(outputFormat)
- , HumanReadable(humanReadable)
- {
- }
- };
-
- struct TEventState {
- TEventTimestamp FrameStartTime = 0;
- TEventTimestamp PrevEventTime = 0;
- TEventState() {
- }
- };
-
- TEvent(TEventClass c, TEventTimestamp t)
- : Class(c)
- , Timestamp(t)
- {
- }
-
- virtual ~TEvent() = default;
-
- // Note, that descendants MUST have Save() & Load() methods to alter
- // only its new variables, not the base class!
- virtual void Save(IOutputStream& out) const = 0;
- virtual void SaveToBuffer(TBufferOutput& out) const {
- Save(out);
- }
-
- // Note, that descendants MUST have Save() & Load() methods to alter
- // only its new variables, not the base class!
- virtual void Load(IInputStream& i) = 0;
-
- virtual TStringBuf GetName() const = 0;
- virtual const NProtoBuf::Message* GetProto() const = 0;
-
- void Print(IOutputStream& out, const TOutputOptions& options = TOutputOptions(), const TEventState& eventState = TEventState()) const;
- void PrintHeader(IOutputStream& out, const TOutputOptions& options, const TEventState& eventState) const;
-
- TString ToString() const {
- TStringStream buff;
- Print(buff);
- return buff.Str();
- }
-
- void FullSaveToBuffer(TBufferOutput& buf) const {
- SaveMessageHeader(buf);
- this->SaveToBuffer(buf);
- }
-
- void FullSave(IOutputStream& o) const {
- SaveMessageHeader(o);
- this->Save(o);
- }
-
- void FullLoad(IInputStream& i) {
- ::Load(&i, Timestamp);
- ::Load(&i, Class);
- this->Load(i);
- }
-
- template <class T>
- const T* Get() const {
- return static_cast<const T*>(this->GetProto());
- }
-
- TEventClass Class;
- TEventTimestamp Timestamp;
- ui32 FrameId = 0;
-
-private:
- void SaveMessageHeader(IOutputStream& out) const {
- ::Save(&out, Timestamp);
- ::Save(&out, Class);
- }
-
- virtual void DoPrint(IOutputStream& out, EFieldOutputFlags flags) const = 0;
- virtual void DoPrintJson(NJson::TJsonWriter& jsonWriter) const = 0;
-
- void PrintJsonHeader(NJson::TJsonWriter& jsonWriter) const;
-};
-
-using TEventPtr = TIntrusivePtr<TEvent>;
-using TConstEventPtr = TIntrusiveConstPtr<TEvent>;
-
-class IEventProcessor {
-public:
- virtual void SetOptions(const TEvent::TOutputOptions& options) {
- Options_ = options;
- }
- virtual void ProcessEvent(const TEvent* ev) = 0;
- virtual bool CheckedProcessEvent(const TEvent* ev) {
- ProcessEvent(ev);
- return true;
- }
- virtual ~IEventProcessor() = default;
-
-protected:
- TEvent::TOutputOptions Options_;
-};
-
-class IEventFactory {
-public:
- virtual TEvent* CreateLogEvent(TEventClass c) = 0;
- virtual TEventLogFormat CurrentFormat() = 0;
- virtual TEventClass ClassByName(TStringBuf name) const = 0;
- virtual TEventClass EventClassBegin() const = 0;
- virtual TEventClass EventClassEnd() const = 0;
- virtual ~IEventFactory() = default;
-};
-
-class TUnknownEvent: public TEvent {
-public:
- TUnknownEvent(TEventTimestamp ts, TEventClass cls)
- : TEvent(cls, ts)
- {
- }
-
- ~TUnknownEvent() override = default;
-
- void Save(IOutputStream& /* o */) const override {
- ythrow yexception() << "TUnknownEvent cannot be saved";
- }
-
- void Load(IInputStream& /* i */) override {
- ythrow yexception() << "TUnknownEvent cannot be loaded";
- }
-
- TStringBuf GetName() const override;
-
-private:
- void DoPrint(IOutputStream& out, EFieldOutputFlags) const override {
- out << GetName() << "\t" << (size_t)Class;
- }
-
- void DoPrintJson(NJson::TJsonWriter& jsonWriter) const override;
-
- const NProtoBuf::Message* GetProto() const override;
-};
-
-class TEndOfFrameEvent: public TEvent {
-public:
- enum {
- EventClass = 0
- };
-
- TEndOfFrameEvent(TEventTimestamp ts)
- : TEvent(TEndOfFrameEvent::EventClass, ts)
- {
- }
-
- ~TEndOfFrameEvent() override = default;
-
- void Save(IOutputStream& o) const override {
- (void)o;
- ythrow yexception() << "TEndOfFrameEvent cannot be saved";
- }
-
- void Load(IInputStream& i) override {
- (void)i;
- ythrow yexception() << "TEndOfFrameEvent cannot be loaded";
- }
-
- TStringBuf GetName() const override;
-
-private:
- void DoPrint(IOutputStream& out, EFieldOutputFlags) const override {
- out << GetName();
- }
- void DoPrintJson(NJson::TJsonWriter& jsonWriter) const override;
-
- const NProtoBuf::Message* GetProto() const override;
-};
-
-class ILogFrameEventVisitor {
-public:
- virtual ~ILogFrameEventVisitor() = default;
-
- virtual void Visit(const TEvent& event) = 0;
-};
-
-class IWriteFrameCallback : public TAtomicRefCount<IWriteFrameCallback> {
-public:
- virtual ~IWriteFrameCallback() = default;
-
- virtual void OnAfterCompress(const TBuffer& compressedFrame, TEventTimestamp startTimestamp, TEventTimestamp endTimestamp) = 0;
-};
-
-using TWriteFrameCallbackPtr = TIntrusivePtr<IWriteFrameCallback>;
-
-class TEventLogFrame {
-public:
- TEventLogFrame(bool needAlwaysSafeAdd = false, TWriteFrameCallbackPtr writeFrameCallback = nullptr);
- TEventLogFrame(IEventLog& parentLog, bool needAlwaysSafeAdd = false, TWriteFrameCallbackPtr writeFrameCallback = nullptr);
- TEventLogFrame(IEventLog* parentLog, bool needAlwaysSafeAdd = false, TWriteFrameCallbackPtr writeFrameCallback = nullptr);
-
- virtual ~TEventLogFrame() = default;
-
- void Flush();
- void SafeFlush();
-
- void ForceDump() {
- ForceDump_ = true;
- }
-
- template <class T>
- inline void LogEvent(const T& ev) {
- if (NeedAlwaysSafeAdd_) {
- SafeLogEvent(ev);
- } else {
- UnSafeLogEvent(ev);
- }
- }
-
- template <class T>
- inline void LogEvent(TEventTimestamp timestamp, const T& ev) {
- if (NeedAlwaysSafeAdd_) {
- SafeLogEvent(timestamp, ev);
- } else {
- UnSafeLogEvent(timestamp, ev);
- }
- }
-
- template <class T>
- inline void UnSafeLogEvent(const T& ev) {
- if (!IsEventIgnored(ev.ID))
- LogProtobufEvent(ev.ID, ev);
- }
-
- template <class T>
- inline void UnSafeLogEvent(TEventTimestamp timestamp, const T& ev) {
- if (!IsEventIgnored(ev.ID))
- LogProtobufEvent(timestamp, ev.ID, ev);
- }
-
- template <class T>
- inline void SafeLogEvent(const T& ev) {
- if (!IsEventIgnored(ev.ID)) {
- TGuard<TMutex> g(Mtx_);
- LogProtobufEvent(ev.ID, ev);
- }
- }
-
- template <class T>
- inline void SafeLogEvent(TEventTimestamp timestamp, const T& ev) {
- if (!IsEventIgnored(ev.ID)) {
- TGuard<TMutex> g(Mtx_);
- LogProtobufEvent(timestamp, ev.ID, ev);
- }
- }
-
- void VisitEvents(ILogFrameEventVisitor& visitor, IEventFactory* eventFactory);
-
- inline bool IsEventIgnored(size_t eventId) const {
- Y_UNUSED(eventId); // in future we might want to selectively discard only some kinds of messages
- return !IsDebugModeEnabled() && EvLog_ == nullptr && !ForceDump_;
- }
-
- void Enable(IEventLog& evLog) {
- EvLog_ = &evLog;
- }
-
- void Disable() {
- EvLog_ = nullptr;
- }
-
- void SetNeedAlwaysSafeAdd(bool val) {
- NeedAlwaysSafeAdd_ = val;
- }
-
- void SetWriteFrameCallback(TWriteFrameCallbackPtr writeFrameCallback) {
- WriteFrameCallback_ = writeFrameCallback;
- }
-
- void AddMetaFlag(const TString& key, const TString& value) {
- if (NeedAlwaysSafeAdd_) {
- TGuard<TMutex> g(Mtx_);
- MetaFlags_.emplace_back(key, value);
- } else {
- MetaFlags_.emplace_back(key, value);
- }
- }
-
-protected:
- void LogProtobufEvent(size_t eventId, const NProtoBuf::Message& ev);
- void LogProtobufEvent(TEventTimestamp timestamp, size_t eventId, const NProtoBuf::Message& ev);
-
-private:
- static bool IsDebugModeEnabled() {
- static struct TSelector {
- bool Flag;
-
- TSelector()
- : Flag(GetEnv("EVLOG_DEBUG") == TStringBuf("1"))
- {
- }
- } selector;
-
- return selector.Flag;
- }
-
- template <class T>
- void DebugDump(const T& ev);
-
- // T must be a descendant of NEvClass::TEvent
- template <class T>
- inline void LogEventImpl(const T& ev) {
- if (EvLog_ != nullptr || ForceDump_) {
- TBuffer& b = Buf_.Buffer();
- size_t lastSize = b.size();
- ::Save(&Buf_, ui32(0));
- ev.FullSaveToBuffer(Buf_);
- WriteUnaligned<ui32>(b.data() + lastSize, (ui32)(b.size() - lastSize));
- AddEvent(ev.Timestamp);
- }
-
- if (IsDebugModeEnabled()) {
- DebugDump(ev);
- }
- }
-
- void AddEvent(TEventTimestamp timestamp);
- void DoInit();
-
-private:
- TBufferOutput Buf_;
- TEventTimestamp StartTimestamp_, EndTimestamp_;
- IEventLog* EvLog_;
- TMutex Mtx_;
- bool NeedAlwaysSafeAdd_;
- bool ForceDump_;
- TWriteFrameCallbackPtr WriteFrameCallback_;
- TLogRecord::TMetaFlags MetaFlags_;
- friend class TEventRecord;
-};
-
-class TSelfFlushLogFrame: public TEventLogFrame, public TAtomicRefCount<TSelfFlushLogFrame> {
-public:
- TSelfFlushLogFrame(bool needAlwaysSafeAdd = false, TWriteFrameCallbackPtr writeFrameCallback = nullptr);
- TSelfFlushLogFrame(IEventLog& parentLog, bool needAlwaysSafeAdd = false, TWriteFrameCallbackPtr writeFrameCallback = nullptr);
- TSelfFlushLogFrame(IEventLog* parentLog, bool needAlwaysSafeAdd = false, TWriteFrameCallbackPtr writeFrameCallback = nullptr);
-
- virtual ~TSelfFlushLogFrame();
-};
-
-using TSelfFlushLogFramePtr = TIntrusivePtr<TSelfFlushLogFrame>;
-
-class IEventLog: public TAtomicRefCount<IEventLog> {
-public:
- class IErrorCallback {
- public:
- virtual ~IErrorCallback() {
- }
-
- virtual void OnWriteError() = 0;
- };
-
- class ISuccessCallback {
- public:
- virtual ~ISuccessCallback() {
- }
-
- virtual void OnWriteSuccess(const TBuffer& frameData) = 0;
- };
-
- virtual ~IEventLog();
-
- virtual void ReopenLog() = 0;
- virtual void CloseLog() = 0;
- virtual void Flush() = 0;
- virtual void SetErrorCallback(IErrorCallback*) {
- }
- virtual void SetSuccessCallback(ISuccessCallback*) {
- }
-
- template <class T>
- void LogEvent(const T& ev) {
- TEventLogFrame frame(*this);
- frame.LogEvent(ev);
- frame.Flush();
- }
-
- virtual bool HasNullBackend() const = 0;
-
- virtual void WriteFrame(TBuffer& buffer,
- TEventTimestamp startTimestamp,
- TEventTimestamp endTimestamp,
- TWriteFrameCallbackPtr writeFrameCallback = nullptr,
- TLogRecord::TMetaFlags metaFlags = {}) = 0;
-};
-
-struct TEventLogBackendOptions {
- bool UseSyncPageCacheBackend = false;
- size_t SyncPageCacheBackendBufferSize = 0;
- size_t SyncPageCacheBackendMaxPendingSize = 0;
-};
-
-class TEventLog: public IEventLog {
-public:
- /*
- * Параметр contentformat указывает формат контента лога, например какие могут в логе
- * вÑтретитÑÑ ÐºÐ»Ð°ÑÑÑ‹ Ñобытий, какие параметры у Ñтих Ñобытий, и пр. Старший байт параметра
- * должен быть нулевым.
- */
- TEventLog(const TString& fileName, TEventLogFormat contentFormat, const TEventLogBackendOptions& backendOpts, TMaybe<TEventLogFormat> logFormat);
- TEventLog(const TString& fileName, TEventLogFormat contentFormat, const TEventLogBackendOptions& backendOpts = {});
- TEventLog(const TLog& log, TEventLogFormat contentFormat, TEventLogFormat logFormat = COMPRESSED_LOG_FORMAT_V4);
- TEventLog(TEventLogFormat contentFormat, TEventLogFormat logFormat = COMPRESSED_LOG_FORMAT_V4);
-
- ~TEventLog() override;
-
- void ReopenLog() override;
- void CloseLog() override;
- void Flush() override;
- void SetErrorCallback(IErrorCallback* errorCallback) override {
- ErrorCallback_ = errorCallback;
- }
- void SetSuccessCallback(ISuccessCallback* successCallback) override {
- SuccessCallback_ = successCallback;
- }
-
- template <class T>
- void LogEvent(const T& ev) {
- TEventLogFrame frame(*this);
- frame.LogEvent(ev);
- frame.Flush();
- }
-
- bool HasNullBackend() const override {
- return HasNullBackend_;
- }
-
- void WriteFrame(TBuffer& buffer,
- TEventTimestamp startTimestamp,
- TEventTimestamp endTimestamp,
- TWriteFrameCallbackPtr writeFrameCallback = nullptr,
- TLogRecord::TMetaFlags metaFlags = {}) override;
-
-private:
- mutable TLog Log_;
- TEventLogFormat ContentFormat_;
- const TEventLogFormat LogFormat_;
- bool HasNullBackend_;
- const NBlockCodecs::ICodec* const Lz4hcCodec_;
- const NBlockCodecs::ICodec* const ZstdCodec_;
- IErrorCallback* ErrorCallback_ = nullptr;
- ISuccessCallback* SuccessCallback_ = nullptr;
-};
-
-using TEventLogPtr = TIntrusivePtr<IEventLog>;
-
-class TEventLogWithSlave: public IEventLog {
-public:
- TEventLogWithSlave(IEventLog& parentLog)
- : Slave_(&parentLog)
- {
- }
-
- TEventLogWithSlave(const TEventLogPtr& parentLog)
- : SlavePtr_(parentLog)
- , Slave_(SlavePtr_.Get())
- {
- }
-
- ~TEventLogWithSlave() override {
- try {
- Slave().Flush();
- } catch (...) {
- }
- }
-
- void Flush() override {
- Slave().Flush();
- }
-
- void ReopenLog() override {
- return Slave().ReopenLog();
- }
- void CloseLog() override {
- return Slave().CloseLog();
- }
-
- bool HasNullBackend() const override {
- return Slave().HasNullBackend();
- }
-
- void WriteFrame(TBuffer& buffer,
- TEventTimestamp startTimestamp,
- TEventTimestamp endTimestamp,
- TWriteFrameCallbackPtr writeFrameCallback = nullptr,
- TLogRecord::TMetaFlags metaFlags = {}) override {
- Slave().WriteFrame(buffer, startTimestamp, endTimestamp, writeFrameCallback, std::move(metaFlags));
- }
-
- void SetErrorCallback(IErrorCallback* errorCallback) override {
- Slave().SetErrorCallback(errorCallback);
- }
-
- void SetSuccessCallback(ISuccessCallback* successCallback) override {
- Slave().SetSuccessCallback(successCallback);
- }
-
-protected:
- inline IEventLog& Slave() const {
- return *Slave_;
- }
-
-private:
- TEventLogPtr SlavePtr_;
- IEventLog* Slave_ = nullptr;
-};
-
-extern TAtomic eventlogFrameCounter;
-
-class TProtobufEventProcessor: public IEventProcessor {
-public:
- void ProcessEvent(const TEvent* ev) override final {
- ProcessEvent(ev, &Cout);
- }
-
- void ProcessEvent(const TEvent* ev, IOutputStream *out) {
- UpdateEventState(ev);
- DoProcessEvent(ev, out);
- EventState_.PrevEventTime = ev->Timestamp;
- }
-protected:
- virtual void DoProcessEvent(const TEvent * ev, IOutputStream *out) {
- ev->Print(*out, Options_, EventState_);
- (*out) << Endl;
- }
- ui32 CurrentFrameId_ = Max<ui32>();
- TEvent::TEventState EventState_;
-
-private:
- void UpdateEventState(const TEvent *ev) {
- if (ev->FrameId != CurrentFrameId_) {
- EventState_.FrameStartTime = ev->Timestamp;
- EventState_.PrevEventTime = ev->Timestamp;
- CurrentFrameId_ = ev->FrameId;
- }
- }
-};
-
-class TProtobufEventFactory: public IEventFactory {
-public:
- TProtobufEventFactory(NProtoBuf::TEventFactory* factory = NProtoBuf::TEventFactory::Instance())
- : EventFactory_(factory)
- {
- }
-
- TEvent* CreateLogEvent(TEventClass c) override;
-
- TEventLogFormat CurrentFormat() override {
- return 0;
- }
-
- TEventClass ClassByName(TStringBuf name) const override;
-
- TEventClass EventClassBegin() const override;
-
- TEventClass EventClassEnd() const override;
-
- ~TProtobufEventFactory() override = default;
-
-private:
- NProtoBuf::TEventFactory* EventFactory_;
-};
-
-THolder<TEvent> MakeProtobufLogEvent(TEventTimestamp ts, TEventClass eventId, google::protobuf::Message& ev);
-
-namespace NEvClass {
- IEventFactory* Factory();
- IEventProcessor* Processor();
-}
diff --git a/library/cpp/eventlog/eventlog_int.cpp b/library/cpp/eventlog/eventlog_int.cpp
deleted file mode 100644
index faa8c42cbe..0000000000
--- a/library/cpp/eventlog/eventlog_int.cpp
+++ /dev/null
@@ -1,12 +0,0 @@
-#include "eventlog_int.h"
-
-#include <util/string/cast.h>
-
-TMaybe<TEventLogFormat> ParseEventLogFormat(TStringBuf str) {
- EEventLogFormat format;
- if (TryFromString(str, format)) {
- return static_cast<TEventLogFormat>(format);
- } else {
- return {};
- }
-}
diff --git a/library/cpp/eventlog/eventlog_int.h b/library/cpp/eventlog/eventlog_int.h
deleted file mode 100644
index eb00fecfab..0000000000
--- a/library/cpp/eventlog/eventlog_int.h
+++ /dev/null
@@ -1,72 +0,0 @@
-#pragma once
-
-#include <util/stream/output.h>
-#include <util/generic/maybe.h>
-#include <util/generic/utility.h>
-#include <util/generic/yexception.h>
-#include <util/ysaveload.h>
-
-using TEventClass = ui32;
-using TEventLogFormat = ui32;
-using TEventTimestamp = ui64;
-
-constexpr TStringBuf COMPRESSED_LOG_FRAME_SYNC_DATA =
- "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00"
- "\x00\x00\x00\x00\xfe\x00\x00\xff\xff\x00\x00\xff\xff\x00"
- "\x00\xff\xff\x00\x00\xff\xff\x00\x00\xff\xff\x00\x00\xff"
- "\xff\x00\x00\xff\xff\x00\x00\xff"sv;
-
-static_assert(COMPRESSED_LOG_FRAME_SYNC_DATA.size() == 64);
-
-/*
- * Коды форматов логов. Форматом лога ÑчитаетÑÑ Ñ„Ð¾Ñ€Ð¼Ð°Ñ‚ Ñлужебных
- * Ñтруктур лога. К примеру формат заголовка, наличие компреÑÑии, и Ñ‚.д.
- * Имеет значение только 1 младший байт.
- */
-
-enum EEventLogFormat : TEventLogFormat {
- // Формат верÑии 1. ИÑпользуетÑÑ ÐºÐ¾Ð¼Ð¿Ñ€ÐµÑÑор LZQ.
- COMPRESSED_LOG_FORMAT_V1 = 1,
-
- // Формат верÑии 2. ИÑпользуетÑÑ ÐºÐ¾Ð¼Ð¿Ñ€ÐµÑÑор ZLIB. Добавлены CRC заголовка и данных,
- // поле типа компреÑÑора.
- COMPRESSED_LOG_FORMAT_V2 = 2,
-
- // Формат верÑии 3. ИÑпользуетÑÑ ÐºÐ¾Ð¼Ð¿Ñ€ÐµÑÑор ZLIB. Ð’ начинке фреймов перед каждым Ñобытием добавлен его размер.
- COMPRESSED_LOG_FORMAT_V3 = 3,
-
- // Lz4hc codec + zlib
- COMPRESSED_LOG_FORMAT_V4 = 4 /* "zlib_lz4" */,
-
- // zstd
- COMPRESSED_LOG_FORMAT_V5 = 5 /* "zstd" */,
-};
-
-TMaybe<TEventLogFormat> ParseEventLogFormat(TStringBuf str);
-
-#pragma pack(push, 1)
-
-struct TCompressedFrameBaseHeader {
- TEventLogFormat Format;
- ui32 Length; // Длина оÑтатка фрейма в байтах, поÑле Ñтого заголовка
- ui32 FrameId;
-};
-
-struct TCompressedFrameHeader {
- TEventTimestamp StartTimestamp;
- TEventTimestamp EndTimestamp;
- ui32 UncompressedDatalen; // Длина данных, которые были закомпреÑÑированы
- ui32 PayloadChecksum; // Ð’ логе верÑии 1 поле не иÑпользуетÑÑ
-};
-
-struct TCompressedFrameHeader2: public TCompressedFrameHeader {
- ui8 CompressorVersion; // Ð¡ÐµÐ¹Ñ‡Ð°Ñ Ð½Ðµ иÑпользуетÑÑ
- ui32 HeaderChecksum;
-};
-
-#pragma pack(pop)
-
-Y_DECLARE_PODTYPE(TCompressedFrameBaseHeader);
-Y_DECLARE_PODTYPE(TCompressedFrameHeader);
-Y_DECLARE_PODTYPE(TCompressedFrameHeader2);
diff --git a/library/cpp/eventlog/events_extension.h b/library/cpp/eventlog/events_extension.h
deleted file mode 100644
index 0cf062f959..0000000000
--- a/library/cpp/eventlog/events_extension.h
+++ /dev/null
@@ -1,161 +0,0 @@
-#pragma once
-
-#include "event_field_output.h"
-
-#include <google/protobuf/descriptor.h>
-#include <google/protobuf/message.h>
-
-#include <library/cpp/threading/atomic/bool.h>
-#include <library/cpp/string_utils/base64/base64.h>
-
-#include <util/generic/map.h>
-#include <util/generic/deque.h>
-#include <util/generic/singleton.h>
-#include <util/string/hex.h>
-#include <util/system/guard.h>
-#include <util/system/mutex.h>
-
-namespace NProtoBuf {
- class TEventFactory {
- public:
- typedef ::google::protobuf::Message Message;
- typedef void (*TEventSerializer)(const Message* event, IOutputStream& output, EFieldOutputFlags flags);
- typedef void (*TRegistrationFunc)();
-
- private:
- class TFactoryItem {
- public:
- TFactoryItem(const Message* prototype, const TEventSerializer serializer)
- : Prototype_(prototype)
- , Serializer_(serializer)
- {
- }
-
- TStringBuf GetName() const {
- return Prototype_->GetDescriptor()->name();
- }
-
- Message* Create() const {
- return Prototype_->New();
- }
-
- void PrintEvent(const Message* event, IOutputStream& out, EFieldOutputFlags flags) const {
- (*Serializer_)(event, out, flags);
- }
-
- private:
- const Message* Prototype_;
- const TEventSerializer Serializer_;
- };
-
- typedef TMap<size_t, TFactoryItem> TFactoryMap;
-
- public:
- TEventFactory()
- : FactoryItems_()
- {
- }
-
- void ScheduleRegistration(TRegistrationFunc func) {
- EventRegistrators_.push_back(func);
- }
-
- void RegisterEvent(size_t eventId, const Message* prototype, const TEventSerializer serializer) {
- FactoryItems_.insert(std::make_pair(eventId, TFactoryItem(prototype, serializer)));
- }
-
- size_t IdByName(TStringBuf eventname) {
- DelayedRegistration();
- for (TFactoryMap::const_iterator it = FactoryItems_.begin(); it != FactoryItems_.end(); ++it) {
- if (it->second.GetName() == eventname)
- return it->first;
- }
-
- ythrow yexception() << "do not know event '" << eventname << "'";
- }
-
- TStringBuf NameById(size_t id) {
- DelayedRegistration();
- TFactoryMap::const_iterator it = FactoryItems_.find(id);
- return it != FactoryItems_.end() ? it->second.GetName() : TStringBuf();
- }
-
- Message* CreateEvent(size_t eventId) {
- DelayedRegistration();
- TFactoryMap::const_iterator it = FactoryItems_.find(eventId);
-
- if (it != FactoryItems_.end()) {
- return it->second.Create();
- }
-
- return nullptr;
- }
-
- const TMap<size_t, TFactoryItem>& FactoryItems() {
- DelayedRegistration();
- return FactoryItems_;
- }
-
- void PrintEvent(
- size_t eventId,
- const Message* event,
- IOutputStream& output,
- EFieldOutputFlags flags = {}) {
- DelayedRegistration();
- TFactoryMap::const_iterator it = FactoryItems_.find(eventId);
-
- if (it != FactoryItems_.end()) {
- it->second.PrintEvent(event, output, flags);
- }
- }
-
- static TEventFactory* Instance() {
- return Singleton<TEventFactory>();
- }
-
- private:
- void DelayedRegistration() {
- if (!DelayedRegistrationDone_) {
- TGuard<TMutex> guard(MutexEventRegistrators_);
- Y_UNUSED(guard);
- while (!EventRegistrators_.empty()) {
- EventRegistrators_.front()();
- EventRegistrators_.pop_front();
- }
- DelayedRegistrationDone_ = true;
- }
- }
-
- private:
- TMap<size_t, TFactoryItem> FactoryItems_;
- TDeque<TRegistrationFunc> EventRegistrators_;
- NAtomic::TBool DelayedRegistrationDone_ = false;
- TMutex MutexEventRegistrators_;
- };
-
- template <typename T>
- void PrintAsBytes(const T& obj, IOutputStream& output) {
- const ui8* b = reinterpret_cast<const ui8*>(&obj);
- const ui8* e = b + sizeof(T);
- const char* delim = "";
-
- while (b != e) {
- output << delim;
- output << (int)*b++;
- delim = ".";
- }
- }
-
- template <typename T>
- void PrintAsHex(const T& obj, IOutputStream& output) {
- output << "0x";
- output << HexEncode(&obj, sizeof(T));
- }
-
- inline void PrintAsBase64(TStringBuf data, IOutputStream& output) {
- if (!data.empty()) {
- output << Base64Encode(data);
- }
- }
-
-}
diff --git a/library/cpp/eventlog/iterator.cpp b/library/cpp/eventlog/iterator.cpp
deleted file mode 100644
index 71f955bca8..0000000000
--- a/library/cpp/eventlog/iterator.cpp
+++ /dev/null
@@ -1,88 +0,0 @@
-#include "iterator.h"
-
-#include <library/cpp/streams/growing_file_input/growing_file_input.h>
-
-#include <util/string/cast.h>
-#include <util/string/split.h>
-#include <util/string/type.h>
-#include <util/stream/file.h>
-
-using namespace NEventLog;
-
-namespace {
- inline TIntrusivePtr<TEventFilter> ConstructEventFilter(bool enableEvents, const TString& evList, IEventFactory* fac) {
- if (evList.empty()) {
- return nullptr;
- }
-
- TVector<TString> events;
-
- StringSplitter(evList).Split(',').SkipEmpty().Collect(&events);
- if (events.empty()) {
- return nullptr;
- }
-
- TIntrusivePtr<TEventFilter> filter(new TEventFilter(enableEvents));
-
- for (const auto& event : events) {
- if (IsNumber(event))
- filter->AddEventClass(FromString<size_t>(event));
- else
- filter->AddEventClass(fac->ClassByName(event));
- }
-
- return filter;
- }
-
- struct TIterator: public IIterator {
- inline TIterator(const TOptions& o, IEventFactory* fac)
- : First(true)
- {
- if (o.FileName.size()) {
- if (o.ForceStreamMode || o.TailFMode) {
- FileInput.Reset(o.TailFMode ? (IInputStream*)new TGrowingFileInput(o.FileName) : (IInputStream*)new TUnbufferedFileInput(o.FileName));
- FrameStream.Reset(new TFrameStreamer(*FileInput, fac, o.FrameFilter));
- } else {
- FrameStream.Reset(new TFrameStreamer(o.FileName, o.StartTime, o.EndTime, o.MaxRequestDuration, fac, o.FrameFilter));
- }
- } else {
- FrameStream.Reset(new TFrameStreamer(*o.Input, fac, o.FrameFilter));
- }
-
- EvFilter = ConstructEventFilter(o.EnableEvents, o.EvList, fac);
- EventStream.Reset(new TEventStreamer(*FrameStream, o.StartTime, o.EndTime, o.ForceStrongOrdering, EvFilter, o.ForceLosslessStrongOrdering));
- }
-
- TConstEventPtr Next() override {
- if (First) {
- First = false;
-
- if (!EventStream->Avail()) {
- return nullptr;
- }
- } else {
- if (!EventStream->Next()) {
- return nullptr;
- }
- }
-
- return **EventStream;
- }
-
- THolder<IInputStream> FileInput;
- THolder<TFrameStreamer> FrameStream;
- TIntrusivePtr<TEventFilter> EvFilter;
- THolder<TEventStreamer> EventStream;
- bool First;
- };
-}
-
-IIterator::~IIterator() = default;
-
-THolder<IIterator> NEventLog::CreateIterator(const TOptions& o, IEventFactory* fac) {
- return MakeHolder<TIterator>(o, fac);
-}
-
-THolder<IIterator> NEventLog::CreateIterator(const TOptions& o) {
- return MakeHolder<TIterator>(o, NEvClass::Factory());
-}
diff --git a/library/cpp/eventlog/iterator.h b/library/cpp/eventlog/iterator.h
deleted file mode 100644
index 71a61ed549..0000000000
--- a/library/cpp/eventlog/iterator.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#pragma once
-
-#include <util/stream/input.h>
-#include <util/generic/ptr.h>
-#include <util/generic/string.h>
-#include <util/generic/iterator.h>
-
-#include "eventlog.h"
-#include "logparser.h"
-
-namespace NEventLog {
- struct TOptions {
- inline TOptions& SetFileName(const TString& fileName) {
- FileName = fileName;
-
- return *this;
- }
-
- inline TOptions& SetForceStrongOrdering(bool v) {
- if(!ForceLosslessStrongOrdering) {
- ForceStrongOrdering = v;
- }
-
- return *this;
- }
-
- ui64 StartTime = MIN_START_TIME;
- ui64 EndTime = MAX_END_TIME;
- ui64 MaxRequestDuration = MAX_REQUEST_DURATION;
- TString FileName;
- bool ForceStrongOrdering = false;
- bool ForceWeakOrdering = false;
- bool EnableEvents = true;
- TString EvList;
- bool ForceStreamMode = false;
- bool ForceLosslessStrongOrdering = false;
- bool TailFMode = false;
- IInputStream* Input = &Cin;
- IFrameFilterRef FrameFilter;
- };
-
- class IIterator: public TInputRangeAdaptor<IIterator> {
- public:
- virtual ~IIterator();
-
- virtual TConstEventPtr Next() = 0;
- };
-
- THolder<IIterator> CreateIterator(const TOptions& o);
- THolder<IIterator> CreateIterator(const TOptions& o, IEventFactory* fac);
-}
diff --git a/library/cpp/eventlog/logparser.cpp b/library/cpp/eventlog/logparser.cpp
deleted file mode 100644
index 6f8959f788..0000000000
--- a/library/cpp/eventlog/logparser.cpp
+++ /dev/null
@@ -1,814 +0,0 @@
-#include "logparser.h"
-#include "evdecoder.h"
-
-#include <util/stream/output.h>
-#include <util/stream/zlib.h>
-#include <util/digest/murmur.h>
-#include <util/generic/algorithm.h>
-#include <util/generic/scope.h>
-#include <util/generic/hash_set.h>
-#include <util/string/split.h>
-#include <util/string/cast.h>
-#include <util/string/escape.h>
-#include <util/string/builder.h>
-
-#include <contrib/libs/re2/re2/re2.h>
-
-#include <algorithm>
-#include <array>
-
-namespace {
- bool FastforwardUntilSyncHeader(IInputStream* in) {
- // Usually this function finds the correct header at the first hit
- std::array<char, COMPRESSED_LOG_FRAME_SYNC_DATA.size()> buffer;
- if (in->Load(buffer.data(), buffer.size()) != buffer.size()) {
- return false;
- }
-
- auto begin = buffer.begin();
-
- for (;;) {
- if (std::mismatch(
- begin, buffer.end(),
- COMPRESSED_LOG_FRAME_SYNC_DATA.begin()).first == buffer.end() &&
- std::mismatch(
- buffer.begin(), begin,
- COMPRESSED_LOG_FRAME_SYNC_DATA.begin() + (buffer.end() - begin)).first == begin) {
- return true;
- }
- if (!in->ReadChar(*begin)) {
- return false;
- }
- ++begin;
- if (begin == buffer.end()) {
- begin = buffer.begin();
- }
- }
- }
-
- bool HasCorrectChecksum(const TFrameHeader& header) {
- // Calculating hash over all the fields of the read header except for the field with the hash of the header itself.
- const size_t baseSize = sizeof(TCompressedFrameBaseHeader) + sizeof(TCompressedFrameHeader2) - sizeof(ui32);
- const ui32 checksum = MurmurHash<ui32>(&header.Basehdr, baseSize);
- return checksum == header.Framehdr.HeaderChecksum;
- }
-
- TMaybe<TFrameHeader> FindNextFrameHeader(IInputStream* in) {
- for (;;) {
- if (FastforwardUntilSyncHeader(in)) {
- try {
- return TFrameHeader(*in);
- } catch (const TFrameLoadError& err) {
- Cdbg << err.what() << Endl;
- in->Skip(err.SkipAfter);
- }
- } else {
- return Nothing();
- }
- }
- }
-
- std::pair<TMaybe<TFrameHeader>, TStringBuf> FindNextFrameHeader(TStringBuf span) {
- for (;;) {
- auto iter = std::search(
- span.begin(), span.end(),
- COMPRESSED_LOG_FRAME_SYNC_DATA.begin(), COMPRESSED_LOG_FRAME_SYNC_DATA.end());
- const size_t offset = iter - span.begin();
-
- if (offset != span.size()) {
- span = span.substr(offset);
- try {
- TMemoryInput in(
- span.data() + COMPRESSED_LOG_FRAME_SYNC_DATA.size(),
- span.size() - COMPRESSED_LOG_FRAME_SYNC_DATA.size());
- return {TFrameHeader(in), span};
- } catch (const TFrameLoadError& err) {
- Cdbg << err.what() << Endl;
- span = span.substr(err.SkipAfter);
- }
- } else {
- return {Nothing(), {}};
- }
- }
- }
-
- size_t FindFrames(const TStringBuf span, ui64 start, ui64 end, ui64 maxRequestDuration) {
- Y_ENSURE(start <= end);
-
- const auto leftTimeBound = start - Min(start, maxRequestDuration);
- const auto rightTimeBound = end + Min(maxRequestDuration, Max<ui64>() - end);
-
- TStringBuf subspan = span;
- TMaybe<TFrameHeader> maybeLeftFrame;
- std::tie(maybeLeftFrame, subspan) = FindNextFrameHeader(subspan);
-
- if (!maybeLeftFrame || maybeLeftFrame->EndTime() > rightTimeBound) {
- return span.size();
- }
-
- if (maybeLeftFrame->StartTime() > leftTimeBound) {
- return 0;
- }
-
- while (subspan.size() > maybeLeftFrame->FullLength()) {
- const auto mid = subspan.data() + subspan.size() / 2;
- auto [midFrame, rightHalfSpan] = FindNextFrameHeader({mid, subspan.data() + subspan.size()});
- if (!midFrame) {
- // If mid is in the middle of the last frame, here we will lose it meaning that
- // we will find previous frame as the result.
- // This is fine because we will iterate frames starting from that.
- subspan = subspan.substr(0, subspan.size() / 2);
- continue;
- }
- if (midFrame->StartTime() <= leftTimeBound) {
- maybeLeftFrame = midFrame;
- subspan = rightHalfSpan;
- } else {
- subspan = subspan.substr(0, subspan.size() / 2);
- }
- }
-
- return subspan.data() - span.data();
- }
-}
-
-TFrameHeader::TFrameHeader(IInputStream& in) {
- try {
- ::Load(&in, Basehdr);
-
- Y_ENSURE(Basehdr.Length, "Empty frame additional data");
-
- ::Load(&in, Framehdr);
- switch (LogFormat()) {
- case COMPRESSED_LOG_FORMAT_V1:
- break;
-
- case COMPRESSED_LOG_FORMAT_V2:
- case COMPRESSED_LOG_FORMAT_V3:
- case COMPRESSED_LOG_FORMAT_V4:
- case COMPRESSED_LOG_FORMAT_V5:
- Y_ENSURE(!Framehdr.CompressorVersion, "Wrong compressor");
-
- Y_ENSURE(HasCorrectChecksum(*this), "Wrong header checksum");
- break;
-
- default:
- ythrow yexception() << "Unsupported log structure format";
- };
-
- Y_ENSURE(Framehdr.StartTimestamp <= Framehdr.EndTimestamp, "Wrong start/end timestamps");
-
- // Each frame must contain at least one event.
- Y_ENSURE(Framehdr.UncompressedDatalen, "Empty frame payload");
- } catch (...) {
- TString location = "";
- if (const auto* cnt = dynamic_cast<TCountingInput *>(&in)) {
- location = "@ " + ToString(cnt->Counter());
- }
- ythrow TFrameLoadError(FrameLength()) << "Frame Load Error" << location << ": " << CurrentExceptionMessage();
- }
-}
-
-TFrame::TFrame(IInputStream& in, TFrameHeader header, IEventFactory* fac)
- : TFrameHeader(header)
- , Limiter_(MakeHolder<TLengthLimitedInput>(&in, header.FrameLength()))
- , Fac_(fac)
-{
- if (auto* cnt = dynamic_cast<TCountingInput *>(&in)) {
- Address_ = cnt->Counter() - sizeof(TFrameHeader);
- } else {
- Address_ = 0;
- }
-}
-
-TFrame::TIterator TFrame::GetIterator(TIntrusiveConstPtr<TEventFilter> eventFilter) const {
- if (EventsCache_.empty()) {
- for (TFrameDecoder decoder{*this, eventFilter.Get()}; decoder.Avail(); decoder.Next()) {
- EventsCache_.emplace_back(*decoder);
- }
- }
-
- return TIterator(*this, eventFilter);
-}
-
-void TFrame::ClearEventsCache() const {
- EventsCache_.clear();
-}
-
-TString TFrame::GetCompressedFrame() const {
- const auto left = Limiter_->Left();
- TString payload = Limiter_->ReadAll();
- Y_ENSURE(payload.size() == left, "Could not read frame payload: premature end of stream");
- const ui32 checksum = MurmurHash<ui32>(payload.data(), payload.size());
- Y_ENSURE(checksum == Framehdr.PayloadChecksum, "Invalid frame checksum");
-
- return payload;
-}
-
-TString TFrame::GetRawFrame() const {
- TString frameBuf = GetCompressedFrame();
- TStringInput sin(frameBuf);
- return TZLibDecompress{&sin}.ReadAll();
-}
-
-TFrame::TIterator::TIterator(const TFrame& frame, TIntrusiveConstPtr<TEventFilter> filter)
- : Frame_(frame)
- , Size_(frame.EventsCache_.size())
- , Filter_(filter)
- , Index_(0)
-{
- SkipToValidEvent();
-}
-
-TConstEventPtr TFrame::TIterator::operator*() const {
- return Frame_.GetEvent(Index_);
-}
-
-bool TFrame::TIterator::Next() {
- Index_++;
- SkipToValidEvent();
- return Index_ < Size_;
-}
-
-void TFrame::TIterator::SkipToValidEvent() {
- if (!Filter_) {
- return;
- }
-
- for (; Index_ < Size_; ++Index_) {
- if (Filter_->EventAllowed(Frame_.GetEvent(Index_)->Class)) {
- break;
- }
- }
-}
-
-TMaybe<TFrame> FindNextFrame(IInputStream* in, IEventFactory* eventFactory) {
- if (auto header = FindNextFrameHeader(in)) {
- return TFrame{*in, *header, eventFactory};
- } else {
- return Nothing();
- }
-}
-
-TContainsEventFrameFilter::TContainsEventFrameFilter(const TString& unparsedMatchGroups, const IEventFactory* eventFactory) {
- TVector<TStringBuf> tokens;
-
- SplitWithEscaping(tokens, unparsedMatchGroups, "/");
-
- // Amount of match groups
- size_t size = tokens.size();
- MatchGroups.resize(size);
-
- for (size_t i = 0; i < size; i++) {
- TMatchGroup& group = MatchGroups[i];
- TVector<TStringBuf> groupTokens;
- SplitWithEscaping(groupTokens, tokens[i], ":");
-
- Y_ENSURE(groupTokens.size() == 3);
-
- try {
- group.EventID = eventFactory->ClassByName(groupTokens[0]);
- } catch (yexception& e) {
- if (!TryFromString<TEventClass>(groupTokens[0], group.EventID)) {
- e << "\nAppend:\n" << "Cannot derive EventId from EventType: " << groupTokens[0];
- throw e;
- }
- }
-
- group.FieldName = groupTokens[1];
- group.ValueToMatch = UnescapeCharacters(groupTokens[2], "/:");
- }
-}
-
-bool TContainsEventFrameFilter::FrameAllowed(const TFrame& frame) const {
- THashSet<size_t> toMatchSet;
- for (size_t i = 0; i < MatchGroups.size(); i++) {
- toMatchSet.insert(i);
- }
-
- for (auto it = frame.GetIterator(); it.Avail(); it.Next()) {
- TConstEventPtr event(*it);
- TVector<size_t> indicesToErase;
-
- if (!toMatchSet.empty()) {
- const NProtoBuf::Message* message = event->GetProto();
- const google::protobuf::Descriptor* descriptor = message->GetDescriptor();
- const google::protobuf::Reflection* reflection = message->GetReflection();
-
- Y_ENSURE(descriptor);
- Y_ENSURE(reflection);
-
- for (size_t groupIndex : toMatchSet) {
- const TMatchGroup& group = MatchGroups[groupIndex];
-
- if (event->Class == group.EventID) {
- TVector<TString> parts = StringSplitter(group.FieldName).Split('.').ToList<TString>();
- TString lastPart = std::move(parts.back());
- parts.pop_back();
-
- for (auto part : parts) {
- auto fieldDescriptor = descriptor->FindFieldByName(part);
- Y_ENSURE(fieldDescriptor, "Cannot find field \"" + part + "\". Full fieldname is \"" + group.FieldName + "\".");
-
- message = &reflection->GetMessage(*message, fieldDescriptor);
- descriptor = message->GetDescriptor();
- reflection = message->GetReflection();
-
- Y_ENSURE(descriptor);
- Y_ENSURE(reflection);
- }
-
- const google::protobuf::FieldDescriptor* fieldDescriptor = descriptor->FindFieldByName(lastPart);
- Y_ENSURE(fieldDescriptor, "Cannot find field \"" + lastPart + "\". Full fieldname is \"" + group.FieldName + "\".");
-
- TString fieldValue = GetEventFieldAsString(message, fieldDescriptor, reflection);
- if (re2::RE2::FullMatch(fieldValue, group.ValueToMatch)) {
- indicesToErase.push_back(groupIndex);
- }
- }
- }
-
- for (size_t idx : indicesToErase) {
- toMatchSet.erase(idx);
- }
-
- if (toMatchSet.empty()) {
- return true;
- }
- }
- }
-
- return toMatchSet.empty();
-}
-
-void SplitWithEscaping(TVector<TStringBuf>& tokens, const TStringBuf& stringToSplit, const TStringBuf& externalCharacterSet) {
- size_t tokenStart = 0;
- const TString characterSet = TString::Join("\\", externalCharacterSet);
-
- for (size_t position = stringToSplit.find_first_of(characterSet); position != TString::npos; position = stringToSplit.find_first_of(characterSet, position + 1)) {
- if (stringToSplit[position] == '\\') {
- position++;
- } else {
- if (tokenStart != position) {
- tokens.push_back(TStringBuf(stringToSplit, tokenStart, position - tokenStart));
- }
- tokenStart = position + 1;
- }
- }
-
- if (tokenStart < stringToSplit.size()) {
- tokens.push_back(TStringBuf(stringToSplit, tokenStart, stringToSplit.size() - tokenStart));
- }
-}
-
-TString UnescapeCharacters(const TStringBuf& stringToUnescape, const TStringBuf& characterSet) {
- TStringBuilder stringBuilder;
- size_t tokenStart = 0;
-
- for (size_t position = stringToUnescape.find('\\', 0u); position != TString::npos; position = stringToUnescape.find('\\', position + 2)) {
- if (position + 1 < stringToUnescape.size() && characterSet.find(stringToUnescape[position + 1]) != TString::npos) {
- stringBuilder << TStringBuf(stringToUnescape, tokenStart, position - tokenStart);
- tokenStart = position + 1;
- }
- }
-
- if (tokenStart < stringToUnescape.size()) {
- stringBuilder << TStringBuf(stringToUnescape, tokenStart, stringToUnescape.size() - tokenStart);
- }
-
- return stringBuilder;
-}
-
-TString GetEventFieldAsString(const NProtoBuf::Message* message, const google::protobuf::FieldDescriptor* fieldDescriptor, const google::protobuf::Reflection* reflection) {
- Y_ENSURE(message);
- Y_ENSURE(fieldDescriptor);
- Y_ENSURE(reflection);
-
- TString result;
- switch (fieldDescriptor->type()) {
- case google::protobuf::FieldDescriptor::Type::TYPE_DOUBLE:
- result = ToString(reflection->GetDouble(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_FLOAT:
- result = ToString(reflection->GetFloat(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_BOOL:
- result = ToString(reflection->GetBool(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_INT32:
- result = ToString(reflection->GetInt32(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_UINT32:
- result = ToString(reflection->GetUInt32(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_INT64:
- result = ToString(reflection->GetInt64(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_UINT64:
- result = ToString(reflection->GetUInt64(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_STRING:
- result = ToString(reflection->GetString(*message, fieldDescriptor));
- break;
- case google::protobuf::FieldDescriptor::Type::TYPE_ENUM:
- {
- const NProtoBuf::EnumValueDescriptor* enumValueDescriptor = reflection->GetEnum(*message, fieldDescriptor);
- result = ToString(enumValueDescriptor->name());
- }
- break;
- default:
- throw yexception() << "GetEventFieldAsString for type " << fieldDescriptor->type_name() << " is not implemented.";
- }
- return result;
-}
-
-TFrameStreamer::TFrameStreamer(IInputStream& s, IEventFactory* fac, IFrameFilterRef ff)
- : In_(&s)
- , FrameFilter_(ff)
- , EventFactory_(fac)
-{
- Frame_ = FindNextFrame(&In_, EventFactory_);
-
- SkipToAllowedFrame();
-}
-
-TFrameStreamer::TFrameStreamer(
- const TString& fileName,
- ui64 startTime,
- ui64 endTime,
- ui64 maxRequestDuration,
- IEventFactory* fac,
- IFrameFilterRef ff)
- : File_(TBlob::FromFile(fileName))
- , MemoryIn_(File_.Data(), File_.Size())
- , In_(&MemoryIn_)
- , StartTime_(startTime)
- , EndTime_(endTime)
- , CutoffTime_(endTime + Min(maxRequestDuration, Max<ui64>() - endTime))
- , FrameFilter_(ff)
- , EventFactory_(fac)
-{
- In_.Skip(FindFrames(File_.AsStringBuf(), startTime, endTime, maxRequestDuration));
- Frame_ = FindNextFrame(&In_, fac);
- SkipToAllowedFrame();
-}
-
-TFrameStreamer::~TFrameStreamer() = default;
-
-bool TFrameStreamer::Avail() const {
- return Frame_.Defined();
-}
-
-const TFrame& TFrameStreamer::operator*() const {
- Y_ENSURE(Frame_, "Frame streamer depleted");
-
- return *Frame_;
-}
-
-bool TFrameStreamer::Next() {
- DoNext();
- SkipToAllowedFrame();
-
- return Frame_.Defined();
-}
-
-bool TFrameStreamer::AllowedTimeRange(const TFrame& frame) const {
- const bool allowedStartTime = (StartTime_ == 0) || ((StartTime_ <= frame.StartTime()) && (frame.StartTime() <= EndTime_));
- const bool allowedEndTime = (EndTime_ == 0) || ((StartTime_ <= frame.EndTime()) && (frame.EndTime() <= EndTime_));
- return allowedStartTime || allowedEndTime;
-}
-
-bool TFrameStreamer::DoNext() {
- if (!Frame_) {
- return false;
- }
- In_.Skip(Frame_->Limiter_->Left());
- Frame_ = FindNextFrame(&In_, EventFactory_);
-
- if (Frame_ && CutoffTime_ > 0 && Frame_->EndTime() > CutoffTime_) {
- Frame_.Clear();
- }
-
- return Frame_.Defined();
-}
-
-namespace {
- struct TDecodeBuffer {
- TDecodeBuffer(const TString codec, IInputStream& src, size_t bs) {
- TBuffer from(bs);
-
- {
- TBufferOutput b(from);
- TransferData(&src, &b);
- }
-
- NBlockCodecs::Codec(codec)->Decode(from, DecodeBuffer);
- }
-
- explicit TDecodeBuffer(IInputStream& src) {
- TBufferOutput b(DecodeBuffer);
- TransferData(&src, &b);
- }
-
- TBuffer DecodeBuffer;
- };
-
- class TBlockCodecStream: private TDecodeBuffer, public TBufferInput {
- public:
- TBlockCodecStream(const TString codec, IInputStream& src, size_t bs)
- : TDecodeBuffer(codec, src, bs)
- , TBufferInput(DecodeBuffer)
- {}
-
- explicit TBlockCodecStream(IInputStream& src)
- : TDecodeBuffer(src)
- , TBufferInput(DecodeBuffer)
- {}
- };
-}
-
-TFrameDecoder::TFrameDecoder(const TFrame& fr, const TEventFilter* const filter, bool strict, bool withRawData)
- : Frame_(fr)
- , Event_(nullptr)
- , Flt_(filter)
- , Fac_(fr.Fac_)
- , EndOfFrame_(new TEndOfFrameEvent(Frame_.EndTime()))
- , Strict_(strict)
- , WithRawData_(withRawData)
-{
- switch (fr.LogFormat()) {
- case COMPRESSED_LOG_FORMAT_V2:
- case COMPRESSED_LOG_FORMAT_V3:
- case COMPRESSED_LOG_FORMAT_V4:
- case COMPRESSED_LOG_FORMAT_V5: {
- const auto payload = fr.GetCompressedFrame();
- TMemoryInput payloadInput{payload};
-
- if (fr.LogFormat() == COMPRESSED_LOG_FORMAT_V5) {
- Decompressor_.Reset(new TBlockCodecStream("zstd_1", payloadInput, payload.size()));
- } else {
- TZLibDecompress zlib(&payloadInput);
- Decompressor_.Reset(new TBlockCodecStream(zlib));
- if (fr.LogFormat() == COMPRESSED_LOG_FORMAT_V4) {
- Decompressor_.Reset(new TBlockCodecStream("lz4hc", *Decompressor_, payload.size()));
- }
- }
-
- break;
- }
-
- default:
- ythrow yexception() << "unsupported log format: " << fr.LogFormat() << Endl;
- break;
- };
-
- if (WithRawData_) {
- TBufferOutput out(UncompressedData_);
- TLengthLimitedInput limiter(Decompressor_.Get(), fr.Framehdr.UncompressedDatalen);
-
- TransferData(&limiter, &out);
- Decompressor_.Reset(new TMemoryInput(UncompressedData_.data(), UncompressedData_.size()));
- }
-
- Limiter_.Reset(new TLengthLimitedInput(Decompressor_.Get(), fr.Framehdr.UncompressedDatalen));
-
- Decode();
-}
-
-TFrameDecoder::~TFrameDecoder() = default;
-
-bool TFrameDecoder::Avail() const {
- return HaveData();
-}
-
-TConstEventPtr TFrameDecoder::operator*() const {
- Y_ENSURE(HaveData(), "Decoder depleted");
-
- return Event_;
-}
-
-bool TFrameDecoder::Next() {
- if (HaveData()) {
- Decode();
- }
-
- return HaveData();
-}
-
-void TFrameDecoder::Decode() {
- Event_ = nullptr;
- const bool framed = (Frame_.LogFormat() == COMPRESSED_LOG_FORMAT_V3) || (Frame_.LogFormat() == COMPRESSED_LOG_FORMAT_V4 || Frame_.LogFormat() == COMPRESSED_LOG_FORMAT_V5);
-
- size_t evBegin = 0;
- size_t evEnd = 0;
- if (WithRawData_)
- evBegin = UncompressedData_.Size() - Limiter_->Left();
-
- while (Limiter_->Left() && !(Event_ = DecodeEvent(*Limiter_, framed, Frame_.Address(), Flt_, Fac_, Strict_).Release())) {
- }
-
- if (WithRawData_) {
- evEnd = UncompressedData_.Size() - Limiter_->Left();
- RawEventData_ = TStringBuf(UncompressedData_.data() + evBegin, UncompressedData_.data() + evEnd);
- }
-
- if (!Event_ && (!Flt_ || (Flt_->EventAllowed(TEndOfFrameEvent::EventClass)))) {
- Event_ = EndOfFrame_.Release();
- }
-
- if (!!Event_) {
- Event_->FrameId = Frame_.FrameId();
- }
-}
-
-const TStringBuf TFrameDecoder::GetRawEvent() const {
- return RawEventData_;
-}
-
-TEventStreamer::TEventStreamer(TFrameStream& fs, ui64 s, ui64 e, bool strongOrdering, TIntrusivePtr<TEventFilter> filter, bool losslessStrongOrdering)
- : Frames_(fs)
- , Start_(s)
- , End_(e)
- , MaxEndTimestamp_(0)
- , Frontier_(0)
- , StrongOrdering_(strongOrdering)
- , LosslessStrongOrdering_(losslessStrongOrdering)
- , EventFilter_(filter)
-{
-
- if (Start_ > End_) {
- ythrow yexception() << "Wrong main interval";
- }
-
- TEventStreamer::Next();
-}
-
-TEventStreamer::~TEventStreamer() = default;
-
-bool TEventStreamer::Avail() const {
- return Events_.Avail() && (*Events_)->Timestamp <= Frontier_;
-}
-
-TConstEventPtr TEventStreamer::operator*() const {
- Y_ENSURE(TEventStreamer::Avail(), "Event streamer depleted");
-
- return *Events_;
-}
-
-bool TEventStreamer::Next() {
- if (Events_.Avail() && Events_.Next() && (*Events_)->Timestamp <= Frontier_) {
- return true;
- }
-
- for (;;) {
- if (!LoadMoreEvents()) {
- return false;
- }
-
- if (TEventStreamer::Avail()) {
- return true;
- }
- }
-}
-
-/*
-Two parameters are used in the function:
-Frontier - the moment of time up to which inclusively all the log events made their way
- into the buffer (and might have been already extracted out of it).
-Horizon - the moment of time, that equals to Frontier + MAX_REQUEST_DURATION.
-In order to get all the log events up to the Frontier inclusively,
- frames need to be read until "end time" of the current frame exceeds the Horizon.
-*/
-bool TEventStreamer::LoadMoreEvents() {
- if (!Frames_.Avail()) {
- return false;
- }
-
- const TFrame& fr1 = *Frames_;
- const ui64 maxRequestDuration = (StrongOrdering_ ? MAX_REQUEST_DURATION : 0);
-
- if (fr1.EndTime() <= Frontier_ + maxRequestDuration) {
- ythrow yexception() << "Wrong frame stream state";
- }
-
- if (Frontier_ >= End_) {
- return false;
- }
-
- const ui64 old_frontier = Frontier_;
- Frontier_ = fr1.EndTime();
-
- {
- Y_DEFER {
- Events_.Reorder(StrongOrdering_);
- };
-
- for (; Frames_.Avail(); Frames_.Next()) {
- const TFrame& fr2 = *Frames_;
-
- // Frames need to start later than the Frontier.
- if (StrongOrdering_ && fr2.StartTime() <= old_frontier) {
- Cdbg << "Invalid frame encountered" << Endl;
- continue;
- }
-
- if (fr2.EndTime() > MaxEndTimestamp_) {
- MaxEndTimestamp_ = fr2.EndTime();
- }
-
- if (fr2.EndTime() > Frontier_ + maxRequestDuration && !LosslessStrongOrdering_) {
- return true;
- }
-
- // Checking for the frame to be within the main time borders.
- if (fr2.EndTime() >= Start_ && fr2.StartTime() <= End_) {
- TransferEvents(fr2);
- }
- }
- }
-
- Frontier_ = MaxEndTimestamp_;
-
- return true;
-}
-
-void TEventStreamer::TransferEvents(const TFrame& fr) {
- Events_.SetCheckpoint();
-
- try {
- for (auto it = fr.GetIterator(EventFilter_); it.Avail(); it.Next()) {
- TConstEventPtr ev = *it;
-
- if (ev->Timestamp > fr.EndTime() || ev->Timestamp < fr.StartTime()) {
- ythrow TInvalidEventTimestamps() << "Event timestamp out of frame range";
- }
-
- if (ev->Timestamp >= Start_ && ev->Timestamp <= End_) {
- Events_.Append(ev, StrongOrdering_);
- }
- }
- } catch (const TInvalidEventTimestamps& err) {
- Events_.Rollback();
- Cdbg << "EventsTransfer error: InvalidEventTimestamps: " << err.what() << Endl;
- } catch (const TFrameLoadError& err) {
- Events_.Rollback();
- Cdbg << "EventsTransfer error: " << err.what() << Endl;
- } catch (const TEventDecoderError& err) {
- Events_.Rollback();
- Cdbg << "EventsTransfer error: EventDecoder error: " << err.what() << Endl;
- } catch (const TZLibDecompressorError& err) {
- Events_.Rollback();
- Cdbg << "EventsTransfer error: ZLibDecompressor error: " << err.what() << Endl;
- } catch (...) {
- Events_.Rollback();
- throw;
- }
-}
-
-void TEventStreamer::TEventBuffer::SetCheckpoint() {
- BufLen_ = Buffer_.size();
-}
-
-void TEventStreamer::TEventBuffer::Rollback() {
- Buffer_.resize(BufLen_);
-}
-
-void TEventStreamer::TEventBuffer::Reorder(bool strongOrdering) {
- SetCheckpoint();
-
- std::reverse(Buffer_.begin(), Buffer_.end());
-
- if (strongOrdering) {
- StableSort(Buffer_.begin(), Buffer_.end(), [&](const auto& a, const auto& b) {
- return (a->Timestamp > b->Timestamp) ||
- ((a->Timestamp == b->Timestamp) && !a->Class && b->Class);
- });
- }
-}
-
-void TEventStreamer::TEventBuffer::Append(TConstEventPtr ev, bool strongOrdering) {
- // Events in buffer output must be in an ascending order.
- Y_ENSURE(!strongOrdering || ev->Timestamp >= LastTimestamp_, "Trying to append out-of-order event");
-
- Buffer_.push_back(std::move(ev));
-}
-
-bool TEventStreamer::TEventBuffer::Avail() const {
- return !Buffer_.empty();
-}
-
-TConstEventPtr TEventStreamer::TEventBuffer::operator*() const {
- Y_ENSURE(!Buffer_.empty(), "Event buffer is empty");
-
- return Buffer_.back();
-}
-
-bool TEventStreamer::TEventBuffer::Next() {
- if (!Buffer_.empty()) {
- LastTimestamp_ = Buffer_.back()->Timestamp;
- Buffer_.pop_back();
- return !Buffer_.empty();
- } else {
- return false;
- }
-}
diff --git a/library/cpp/eventlog/logparser.h b/library/cpp/eventlog/logparser.h
deleted file mode 100644
index f819e72589..0000000000
--- a/library/cpp/eventlog/logparser.h
+++ /dev/null
@@ -1,343 +0,0 @@
-#pragma once
-
-#include <util/generic/ptr.h>
-#include <util/generic/yexception.h>
-#include <util/generic/vector.h>
-#include <util/generic/set.h>
-#include <util/generic/maybe.h>
-#include <util/memory/blob.h>
-#include <util/stream/length.h>
-#include <util/stream/mem.h>
-
-#include "eventlog_int.h"
-#include "eventlog.h"
-#include "common.h"
-
-class IInputStream;
-
-static const ui64 MAX_REQUEST_DURATION = 60'000'000;
-static const ui64 MIN_START_TIME = MAX_REQUEST_DURATION;
-static const ui64 MAX_END_TIME = ((ui64)-1) - MAX_REQUEST_DURATION;
-
-class TEventFilter: public TSet<TEventClass>, public TSimpleRefCount<TEventFilter> {
-public:
- TEventFilter(bool enableEvents)
- : Enable_(enableEvents)
- {
- }
-
- void AddEventClass(TEventClass cls) {
- insert(cls);
- }
-
- bool EventAllowed(TEventClass cls) const {
- bool found = (find(cls) != end());
-
- return Enable_ == found;
- }
-
-private:
- bool Enable_;
-};
-
-using TEventStream = TPacketInputStream<TConstEventPtr>;
-
-struct TFrameHeader {
- // Reads header from the stream. The caller must make sure that the
- // sync data is present just befor the stream position.
- explicit TFrameHeader(IInputStream& in);
-
- ui64 StartTime() const {
- return Framehdr.StartTimestamp;
- }
-
- ui64 EndTime() const {
- return Framehdr.EndTimestamp;
- }
-
- ui32 FrameId() const {
- return Basehdr.FrameId;
- }
-
- ui64 Duration() const {
- return EndTime() - StartTime();
- }
-
- TEventLogFormat ContentFormat() const {
- return Basehdr.Format & 0xffffff;
- }
-
- TEventLogFormat LogFormat() const {
- return Basehdr.Format >> 24;
- }
-
- ui64 FrameLength() const {
- return Basehdr.Length - sizeof(TCompressedFrameHeader2);
- }
-
- // Length including the header
- ui64 FullLength() const {
- return sizeof(*this) + FrameLength();
- }
-
- TCompressedFrameBaseHeader Basehdr;
- TCompressedFrameHeader2 Framehdr;
-};
-
-struct TFrameLoadError: public yexception {
- explicit TFrameLoadError(size_t skipAfter)
- : SkipAfter(skipAfter)
- {}
-
- size_t SkipAfter;
-};
-
-class TFrame : public TFrameHeader {
-public:
- // Reads the frame after the header has been read.
- TFrame(IInputStream& in, TFrameHeader header, IEventFactory*);
-
- TString GetRawFrame() const;
- TString GetCompressedFrame() const;
-
- ui64 Address() const { return Address_; }
-
-private:
- const TConstEventPtr& GetEvent(size_t index) const {
- return EventsCache_[index];
- }
-
- void ClearEventsCache() const;
-
- THolder<TLengthLimitedInput> Limiter_;
- mutable TVector<TConstEventPtr> EventsCache_;
-
- IEventFactory* Fac_;
- ui64 Address_;
-
- friend class TFrameDecoder;
- friend class TFrameStreamer;
-
-private:
- class TIterator: TEventStream {
- public:
- TIterator(const TFrame& frame, TIntrusiveConstPtr<TEventFilter> filter);
- ~TIterator() override = default;
-
- bool Avail() const override {
- return Index_ < Size_;
- }
-
- TConstEventPtr operator*() const override;
- bool Next() override;
-
- private:
- void SkipToValidEvent();
-
- const TFrame& Frame_;
- size_t Size_;
- TIntrusiveConstPtr<TEventFilter> Filter_;
- size_t Index_;
- };
-
-public:
- TFrame::TIterator GetIterator(TIntrusiveConstPtr<TEventFilter> eventFilter = nullptr) const;
-};
-
-// If `in` is derived from TCountingInput, Frame's address will
-// be set accorting to the in->Counter(). Otherwise it will be zeroO
-TMaybe<TFrame> FindNextFrame(IInputStream* in, IEventFactory*);
-
-using TFrameStream = TPacketInputStream<const TFrame&>;
-
-class IFrameFilter: public TSimpleRefCount<IFrameFilter> {
-public:
- IFrameFilter() {
- }
-
- virtual ~IFrameFilter() = default;
-
- virtual bool FrameAllowed(const TFrame& frame) const = 0;
-};
-
-using IFrameFilterRef = TIntrusivePtr<IFrameFilter>;
-
-class TDurationFrameFilter: public IFrameFilter {
-public:
- TDurationFrameFilter(ui64 minFrameDuration, ui64 maxFrameDuration = Max<ui64>())
- : MinDuration_(minFrameDuration)
- , MaxDuration_(maxFrameDuration)
- {
- }
-
- bool FrameAllowed(const TFrame& frame) const override {
- return frame.Duration() >= MinDuration_ && frame.Duration() <= MaxDuration_;
- }
-
-private:
- const ui64 MinDuration_;
- const ui64 MaxDuration_;
-};
-
-class TFrameIdFrameFilter: public IFrameFilter {
-public:
- TFrameIdFrameFilter(ui32 frameId)
- : FrameId_(frameId)
- {
- }
-
- bool FrameAllowed(const TFrame& frame) const override {
- return frame.FrameId() == FrameId_;
- }
-
-private:
- const ui32 FrameId_;
-};
-
-class TContainsEventFrameFilter: public IFrameFilter {
-public:
- TContainsEventFrameFilter(const TString& args, const IEventFactory* fac);
-
- bool FrameAllowed(const TFrame& frame) const override;
-
-private:
- struct TMatchGroup {
- TEventClass EventID;
- TString FieldName;
- TString ValueToMatch;
- };
-
- TVector<TMatchGroup> MatchGroups;
-};
-
-void SplitWithEscaping(TVector<TStringBuf>& tokens, const TStringBuf& stringToSplit, const TStringBuf& externalCharacterSet);
-
-TString UnescapeCharacters(const TStringBuf& stringToUnescape, const TStringBuf& characterSet);
-
-TString GetEventFieldAsString(const NProtoBuf::Message* message, const google::protobuf::FieldDescriptor* fieldDescriptor, const google::protobuf::Reflection* reflection);
-
-class TFrameStreamer: public TFrameStream {
-public:
- TFrameStreamer(IInputStream&, IEventFactory* fac, IFrameFilterRef ff = nullptr);
- TFrameStreamer(
- const TString& fileName,
- ui64 startTime,
- ui64 endTime,
- ui64 maxRequestDuration,
- IEventFactory* fac,
- IFrameFilterRef ff = nullptr);
- ~TFrameStreamer() override;
-
- bool Avail() const override;
- const TFrame& operator*() const override;
- bool Next() override;
-
-private:
- bool DoNext();
- bool AllowedTimeRange(const TFrame& frame) const;
-
- bool AllowedFrame(const TFrame& frame) const {
- return AllowedTimeRange(frame) && (!FrameFilter_ || FrameFilter_->FrameAllowed(frame));
- }
-
- void SkipToAllowedFrame() {
- if (Frame_) {
- while (!AllowedFrame(*Frame_) && DoNext()) {
- //do nothing
- }
- }
- }
-
- TBlob File_;
- TMemoryInput MemoryIn_;
- TCountingInput In_;
- THolder<IInputStream> Stream_;
- ui64 StartTime_ = 0;
- ui64 EndTime_ = 0;
- ui64 CutoffTime_ = 0;
- TMaybe<TFrame> Frame_;
- IFrameFilterRef FrameFilter_;
- IEventFactory* EventFactory_;
-};
-
-class TFrameDecoder: TEventStream {
-public:
- TFrameDecoder(const TFrame&, const TEventFilter* const filter, bool strict = false, bool withRawData = false);
- ~TFrameDecoder() override;
-
- bool Avail() const override;
-
- TConstEventPtr operator*() const override;
- bool Next() override;
-
- const TStringBuf GetRawEvent() const;
-
-private:
- TFrameDecoder(const TFrameDecoder&);
- void operator=(const TFrameDecoder&);
-
- inline bool HaveData() const {
- return Event_ != nullptr;
- }
-
- void Decode();
-
-private:
- const TFrame& Frame_;
- THolder<IInputStream> Decompressor_;
- THolder<TLengthLimitedInput> Limiter_;
- TEventPtr Event_;
- const TEventFilter* const Flt_;
- IEventFactory* Fac_;
- THolder<TEvent> EndOfFrame_;
- bool Strict_;
- TBuffer UncompressedData_;
- TStringBuf RawEventData_;
- bool WithRawData_;
-};
-
-class TEventStreamer: public TEventStream {
-public:
- TEventStreamer(TFrameStream&, ui64 start, ui64 end, bool strongOrdering, TIntrusivePtr<TEventFilter> filter, bool losslessStrongOrdering = false);
- ~TEventStreamer() override;
-
- bool Avail() const override;
- TConstEventPtr operator*() const override;
- bool Next() override;
-
-private:
- class TEventBuffer: public TEventStream {
- public:
- void SetCheckpoint();
- void Rollback();
- void Reorder(bool strongOrdering);
- void Append(TConstEventPtr event, bool strongOrdering);
-
- bool Avail() const override;
- TConstEventPtr operator*() const override;
- bool Next() override;
-
- private:
- TVector<TConstEventPtr> Buffer_;
- size_t BufLen_ = 0;
- ui64 LastTimestamp_ = 0;
- };
-
-private:
- struct TInvalidEventTimestamps: public yexception {
- };
-
- bool LoadMoreEvents();
- void TransferEvents(const TFrame&);
-
-private:
- TFrameStream& Frames_;
- TEventBuffer Events_;
-
- ui64 Start_, End_;
- ui64 MaxEndTimestamp_;
- ui64 Frontier_;
- bool StrongOrdering_;
- bool LosslessStrongOrdering_;
- TIntrusivePtr<TEventFilter> EventFilter_;
-};
diff --git a/library/cpp/eventlog/proto/events_extension.proto b/library/cpp/eventlog/proto/events_extension.proto
deleted file mode 100644
index 7db1af3a59..0000000000
--- a/library/cpp/eventlog/proto/events_extension.proto
+++ /dev/null
@@ -1,22 +0,0 @@
-import "google/protobuf/descriptor.proto";
-
-option go_package = "github.com/ydb-platform/ydb/library/cpp/eventlog/proto;extensions";
-option java_package = "NEventLogEventsExtension";
-
-extend google.protobuf.MessageOptions {
- optional uint32 message_id = 50001;
- optional string realm_name = 50002;
-}
-
-message Repr {
- enum ReprType {
- none = 0;
- as_bytes = 1; // Only for primitive types
- as_hex = 2; // Only for primitive types
- as_base64 = 3; // Only for 'string' and 'bytes' fields
- };
-}
-
-extend google.protobuf.FieldOptions {
- optional Repr.ReprType repr = 55003 [default = none];
-}
diff --git a/library/cpp/eventlog/proto/internal.proto b/library/cpp/eventlog/proto/internal.proto
deleted file mode 100644
index 8070a09685..0000000000
--- a/library/cpp/eventlog/proto/internal.proto
+++ /dev/null
@@ -1,9 +0,0 @@
-option go_package = "github.com/ydb-platform/ydb/library/cpp/eventlog/proto;extensions";
-
-package NEventLogInternal;
-
-message TUnknownEvent {
-};
-
-message TEndOfFrameEvent {
-};
diff --git a/library/cpp/eventlog/proto/ya.make b/library/cpp/eventlog/proto/ya.make
deleted file mode 100644
index fbf5a6c619..0000000000
--- a/library/cpp/eventlog/proto/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-PROTO_LIBRARY()
-
-IF (NOT PY_PROTOS_FOR)
- INCLUDE_TAGS(GO_PROTO)
-ENDIF()
-
-SRCS(
- events_extension.proto
- internal.proto
-)
-
-END()
diff --git a/library/cpp/eventlog/threaded_eventlog.cpp b/library/cpp/eventlog/threaded_eventlog.cpp
deleted file mode 100644
index 67839063fb..0000000000
--- a/library/cpp/eventlog/threaded_eventlog.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "threaded_eventlog.h"
diff --git a/library/cpp/eventlog/threaded_eventlog.h b/library/cpp/eventlog/threaded_eventlog.h
deleted file mode 100644
index 52382b856d..0000000000
--- a/library/cpp/eventlog/threaded_eventlog.h
+++ /dev/null
@@ -1,154 +0,0 @@
-#pragma once
-
-#include "eventlog.h"
-
-#include <util/generic/string.h>
-#include <util/thread/pool.h>
-
-class TThreadedEventLog: public TEventLogWithSlave {
-public:
- class TWrapper;
- using TOverflowCallback = std::function<void(TWrapper& wrapper)>;
-
- enum class EDegradationResult {
- ShouldWrite,
- ShouldDrop,
- };
- using TDegradationCallback = std::function<EDegradationResult(float fillFactor)>;
-
-public:
- TThreadedEventLog(
- IEventLog& parentLog,
- size_t threadCount,
- size_t queueSize,
- TOverflowCallback cb,
- TDegradationCallback degradationCallback = {})
- : TEventLogWithSlave(parentLog)
- , LogSaver(TThreadPoolParams().SetThreadName("ThreadedEventLog"))
- , ThreadCount(threadCount)
- , QueueSize(queueSize)
- , OverflowCallback(std::move(cb))
- , DegradationCallback(std::move(degradationCallback))
- {
- Init();
- }
-
- TThreadedEventLog(
- const TEventLogPtr& parentLog,
- size_t threadCount,
- size_t queueSize,
- TOverflowCallback cb,
- TDegradationCallback degradationCallback = {})
- : TEventLogWithSlave(parentLog)
- , LogSaver(TThreadPoolParams().SetThreadName("ThreadedEventLog"))
- , ThreadCount(threadCount)
- , QueueSize(queueSize)
- , OverflowCallback(std::move(cb))
- , DegradationCallback(std::move(degradationCallback))
- {
- Init();
- }
-
- TThreadedEventLog(IEventLog& parentLog)
- : TThreadedEventLog(parentLog, 1, 0, TOverflowCallback())
- {
- }
-
- TThreadedEventLog(const TEventLogPtr& parentLog)
- : TThreadedEventLog(parentLog, 1, 0, TOverflowCallback())
- {
- }
-
- ~TThreadedEventLog() override {
- try {
- LogSaver.Stop();
- } catch (...) {
- }
- }
-
- void ReopenLog() override {
- TEventLogWithSlave::ReopenLog();
- }
-
- void CloseLog() override {
- LogSaver.Stop();
- TEventLogWithSlave::CloseLog();
- }
-
- void WriteFrame(TBuffer& buffer,
- TEventTimestamp startTimestamp,
- TEventTimestamp endTimestamp,
- TWriteFrameCallbackPtr writeFrameCallback = nullptr,
- TLogRecord::TMetaFlags metaFlags = {}) override {
- float fillFactor = 0.0f;
- if (Y_LIKELY(LogSaver.GetMaxQueueSize() > 0)) {
- fillFactor = static_cast<float>(LogSaver.Size()) / LogSaver.GetMaxQueueSize();
- }
-
- EDegradationResult status = EDegradationResult::ShouldWrite;
- if (DegradationCallback) {
- status = DegradationCallback(fillFactor);
- }
- if (Y_UNLIKELY(status == EDegradationResult::ShouldDrop)) {
- return;
- }
-
- THolder<TWrapper> wrapped;
- wrapped.Reset(new TWrapper(buffer, startTimestamp, endTimestamp, Slave(), writeFrameCallback, std::move(metaFlags)));
-
- if (LogSaver.Add(wrapped.Get())) {
- Y_UNUSED(wrapped.Release());
- } else if (OverflowCallback) {
- OverflowCallback(*wrapped);
- }
- }
-
-private:
- void Init() {
- LogSaver.Start(ThreadCount, QueueSize);
- }
-
-public:
- class TWrapper: public IObjectInQueue {
- public:
- TWrapper(TBuffer& buffer,
- TEventTimestamp startTimestamp,
- TEventTimestamp endTimestamp,
- IEventLog& slave,
- TWriteFrameCallbackPtr writeFrameCallback = nullptr,
- TLogRecord::TMetaFlags metaFlags = {})
- : StartTimestamp(startTimestamp)
- , EndTimestamp(endTimestamp)
- , Slave(&slave)
- , WriteFrameCallback(writeFrameCallback)
- , MetaFlags(std::move(metaFlags))
- {
- Buffer.Swap(buffer);
- }
-
- void Process(void*) override {
- THolder<TWrapper> holder(this);
-
- WriteFrame();
- }
-
- void WriteFrame() {
- Slave->WriteFrame(Buffer, StartTimestamp, EndTimestamp, WriteFrameCallback, std::move(MetaFlags));
- }
-
- private:
- TBuffer Buffer;
- TEventTimestamp StartTimestamp;
- TEventTimestamp EndTimestamp;
- IEventLog* Slave;
- TWriteFrameCallbackPtr WriteFrameCallback;
- TLogRecord::TMetaFlags MetaFlags;
- };
-
-private:
- TThreadPool LogSaver;
- const size_t ThreadCount;
- const size_t QueueSize;
- const TOverflowCallback OverflowCallback;
- const TDegradationCallback DegradationCallback;
-};
diff --git a/library/cpp/eventlog/ya.make b/library/cpp/eventlog/ya.make
deleted file mode 100644
index fbbc1eff00..0000000000
--- a/library/cpp/eventlog/ya.make
+++ /dev/null
@@ -1,29 +0,0 @@
-LIBRARY()
-
-PEERDIR(
- library/cpp/blockcodecs
- library/cpp/eventlog/proto
- library/cpp/json
- library/cpp/logger
- library/cpp/protobuf/json
- library/cpp/streams/growing_file_input
- library/cpp/string_utils/base64
- contrib/libs/re2
-)
-
-SRCS(
- common.h
- evdecoder.cpp
- event_field_output.cpp
- event_field_printer.cpp
- eventlog.cpp
- eventlog_int.cpp
- iterator.cpp
- logparser.cpp
- threaded_eventlog.cpp
-)
-
-GENERATE_ENUM_SERIALIZATION(eventlog.h)
-GENERATE_ENUM_SERIALIZATION(eventlog_int.h)
-
-END()
diff --git a/library/cpp/fieldcalc/field_calc.cpp b/library/cpp/fieldcalc/field_calc.cpp
deleted file mode 100644
index 1066b5b5e6..0000000000
--- a/library/cpp/fieldcalc/field_calc.cpp
+++ /dev/null
@@ -1,1136 +0,0 @@
-#include <cstdio>
-
-#include <util/str_stl.h>
-#include <util/string/subst.h>
-#include <util/string/util.h>
-#include <util/string/cast.h>
-#include <util/stream/printf.h>
-
-#include "field_calc_int.h"
-
-using namespace std;
-
-enum Operators {
- OP_ADD,
- OP_SUBSTRACT,
- OP_MULTIPLY,
- OP_DIVIDE,
- OP_MODULUS,
- OP_REGEXP,
- OP_REGEXP_NOT,
- OP_LEFT_SHIFT,
- OP_RIGHT_SHIFT,
- OP_EQUAL,
- OP_NOT_EQUAL,
- OP_LESS,
- OP_LESS_OR_EQUAL,
- OP_GREATER,
- OP_GREATER_OR_EQUAL,
- OP_XOR,
- OP_BITWISE_OR,
- OP_BITWISE_AND,
- OP_LOGICAL_OR,
- OP_LOGICAL_AND,
- OP_UNARY_NOT,
- OP_UNARY_COMPLEMENT,
- OP_UNARY_MINUS,
- OP_LOG,
- OP_LOG10,
- OP_ROUND,
- OP_ASSIGN,
- OP_QUESTION,
- OP_COLON,
-
- OP_UNKNOWN,
-};
-
-struct calc_op;
-
-struct calc_elem {
- dump_item item;
- char oper;
- int op_prio;
-};
-
-struct calc_op {
- dump_item Left, Right;
- char Oper;
- bool force_long;
- bool unary;
- bool is_variable;
- bool string_op; // TODO -> bitop
-
- // for local vars
- mutable bool calculated;
- mutable eval_res_type result;
-
- calc_op(calc_elem& left, calc_elem& right)
- : Left(left.item)
- , Right(right.item)
- , Oper(right.oper)
- , is_variable(false)
- , calculated(false)
- , result(false)
- {
- force_long = Oper == OP_XOR || Oper == OP_BITWISE_OR || Oper == OP_BITWISE_AND ||
- Oper == OP_LOGICAL_OR || Oper == OP_LOGICAL_AND || Oper == OP_UNARY_NOT ||
- Oper == OP_UNARY_COMPLEMENT || Oper == OP_LEFT_SHIFT || Oper == OP_RIGHT_SHIFT ||
- Oper == OP_MODULUS;
- unary = Oper == OP_UNARY_NOT || Oper == OP_UNARY_COMPLEMENT || Oper == OP_UNARY_MINUS ||
- Oper == OP_LOG || Oper == OP_LOG10 || Oper == OP_ROUND;
- string_op = IsStringType(Left.type) && IsStringType(Right.type) &&
- (Oper == OP_REGEXP || Oper == OP_REGEXP_NOT || Oper == OP_EQUAL || Oper == OP_NOT_EQUAL ||
- Oper == OP_LESS || Oper == OP_LESS_OR_EQUAL || Oper == OP_GREATER || Oper == OP_GREATER_OR_EQUAL);
- if (Oper == OP_REGEXP || Oper == OP_REGEXP_NOT) {
- if (!string_op)
- ythrow yexception() << "calc-expr: regexp requested for non-strings";
- ythrow yexception() << "calc-expr: regexps currently not supported";
- }
- }
-
- Y_FORCE_INLINE void eval(const char** dd) const {
- if (is_variable) {
- if (!calculated) {
- do_eval(dd);
- calculated = true;
- }
- } else {
- do_eval(dd);
- }
- }
-
-private:
- Y_FORCE_INLINE void do_eval(const char** dd) const;
-};
-
-void calc_op::do_eval(const char** dd) const {
- eval_res_type left1 = unary ? (eval_res_type) false : Left.eval(dd);
- if (Oper == OP_QUESTION) {
- left1.to_long();
- if (left1.res_long) {
- result = Right.eval(dd);
- } else {
- result = eval_res_type(); // null
- }
- return;
- } else if (Oper == OP_COLON) {
- if (left1.is_null()) {
- result = Right.eval(dd);
- } else {
- result = left1;
- }
- return;
- }
-
- if (Y_UNLIKELY(string_op)) {
- TStringBuf left2 = Left.GetStrBuf(dd);
- TStringBuf right2 = Right.GetStrBuf(dd);
- switch (Oper) {
- case OP_REGEXP:
- result = false;
- break;
- case OP_REGEXP_NOT:
- result = false;
- break;
- case OP_EQUAL:
- result = left2 == right2;
- break;
- case OP_NOT_EQUAL:
- result = left2 != right2;
- break;
- case OP_LESS:
- result = left2 < right2;
- break;
- case OP_LESS_OR_EQUAL:
- result = left2 <= right2;
- break;
- case OP_GREATER:
- result = left2 > right2;
- break;
- case OP_GREATER_OR_EQUAL:
- result = left2 >= right2;
- break;
- default:
- assert(false);
- }
- return;
- }
-
- eval_res_type right1 = Right.eval(dd);
- if (force_long) { // logical ops will be all long
- left1.to_long();
- right1.to_long();
- }
- switch (Oper) {
- case OP_ADD:
- result = left1 + right1;
- break;
- case OP_SUBSTRACT:
- result = left1 - right1;
- break;
- case OP_MULTIPLY:
- result = left1 * right1;
- break;
- case OP_DIVIDE:
- result = left1 / right1;
- break;
- case OP_MODULUS:
- result = left1.res_long ? left1.res_long % right1.res_long : 0;
- break;
- case OP_LEFT_SHIFT:
- result = left1.res_long << right1.res_long;
- break;
- case OP_RIGHT_SHIFT:
- result = left1.res_long >> right1.res_long;
- break;
- case OP_EQUAL:
- result = left1 == right1;
- break;
- case OP_NOT_EQUAL:
- result = !(left1 == right1);
- break;
- case OP_LESS:
- result = left1 < right1;
- break;
- case OP_LESS_OR_EQUAL:
- result = !(right1 < left1);
- break; // <=
- case OP_GREATER:
- result = right1 < left1;
- break;
- case OP_GREATER_OR_EQUAL:
- result = !(left1 < right1);
- break; // >=
- case OP_XOR:
- result = left1.res_long ^ right1.res_long;
- break;
- case OP_BITWISE_OR:
- result = left1.res_long | right1.res_long;
- break;
- case OP_BITWISE_AND:
- result = left1.res_long & right1.res_long;
- break;
- case OP_LOGICAL_OR:
- result = left1.res_long || right1.res_long;
- break;
- case OP_LOGICAL_AND:
- result = left1.res_long && right1.res_long;
- break;
- case OP_UNARY_NOT:
- result = !right1.res_long;
- break;
- case OP_UNARY_COMPLEMENT:
- result = ~right1.res_long;
- break;
- case OP_UNARY_MINUS:
- result = Minus(right1);
- break;
- case OP_LOG:
- result = Log(right1);
- break;
- case OP_LOG10:
- result = Log10(right1);
- break;
- case OP_ROUND:
- result = Round(right1);
- break;
- default:
- assert(false);
- }
-}
-
-namespace {
- // copy-paste of fcat(TString)
- // we don't want it to be too slow, yet we don't want do slow down our
- // main functionality, libc fprintf, even a little
- size_t Y_PRINTF_FORMAT(2, 3) fprintf(TString* s, const char* c, ...) {
- TStringOutput so(*s);
-
- va_list params;
- va_start(params, c);
- const size_t ret = Printf(so, c, params);
- va_end(params);
-
- return ret;
- }
- size_t Y_PRINTF_FORMAT(2, 3) fprintf(IOutputStream* s, const char* c, ...) {
- va_list params;
- va_start(params, c);
- const size_t ret = Printf(*s, c, params);
- va_end(params);
-
- return ret;
- }
-}
-
-template <class TOut>
-void dump_item::print(TOut* p, const char** dd) const {
- const char* d = dd[pack_id];
- const fake* f = reinterpret_cast<const fake*>(d);
-
- switch (type) {
- case DIT_FAKE_ITEM:
- assert(false);
- break;
- case DIT_MATH_RESULT:
- assert(false);
- break; // must call eval instead
- case DIT_NAME:
- assert(false);
- break; // no op
-
- case DIT_BOOL_FIELD:
- fprintf(p, *(bool*)(d + field_offset) ? "true" : "false");
- break;
- case DIT_UI8_FIELD:
- fprintf(p, "%u", *(ui8*)(d + field_offset));
- break;
- case DIT_UI16_FIELD:
- fprintf(p, "%u", *(ui16*)(d + field_offset));
- break;
- case DIT_UI32_FIELD:
- fprintf(p, "%u", *(ui32*)(d + field_offset));
- break;
- case DIT_I64_FIELD:
- fprintf(p, "%" PRId64, *(i64*)(d + field_offset));
- break;
- case DIT_UI64_FIELD:
- fprintf(p, "%" PRIu64, *(ui64*)(d + field_offset));
- break;
- case DIT_FLOAT_FIELD:
- fprintf(p, "%.4f", *(float*)(d + field_offset));
- break;
- case DIT_DOUBLE_FIELD:
- fprintf(p, "%.7f", *(double*)(d + field_offset));
- break;
- case DIT_TIME_T32_FIELD:
- fprintf(p, "%ld", (long)*(time_t32*)(d + field_offset));
- break;
- case DIT_PF16UI32_FIELD:
- fprintf(p, "%u", (ui32) * (pf16ui32*)(d + field_offset));
- break;
- case DIT_PF16FLOAT_FIELD:
- fprintf(p, "%.4f", (float)*(pf16float*)(d + field_offset));
- break;
- case DIT_SF16FLOAT_FIELD:
- fprintf(p, "%.4f", (float)*(sf16float*)(d + field_offset));
- break;
- case DIT_STRING_FIELD:
- fprintf(p, "%s", (d + field_offset));
- break;
-
- case DIT_LONG_CONST:
- fprintf(p, "%ld", long_const);
- break;
- case DIT_FLOAT_CONST:
- fprintf(p, "%.4f", float_const);
- break;
- case DIT_STR_CONST:
- fprintf(p, "%.*s", (int)the_buf.size(), the_buf.data());
- break;
-
- case DIT_INT_FUNCTION:
- fprintf(p, "%d", (f->*int_fn)());
- break;
- case DIT_FLOAT_FUNCTION:
- fprintf(p, "%.4f", (f->*float_fn)());
- break;
- case DIT_BOOL_FUNCTION:
- fprintf(p, "%d", (f->*bool_fn)());
- break;
- case DIT_STR_FUNCTION:
- fprintf(p, "%s", (f->*str_fn)());
- break;
- case DIT_STRBUF_FUNCTION:
- the_buf.clear();
- fprintf(p, "%s", (f->*strbuf_2_fn)(the_buf, nullptr));
- break;
-
- case DIT_UI8_EXT_FUNCTION:
- fprintf(p, "%u", (*ui8_ext_fn)(f));
- break;
- case DIT_UI16_EXT_FUNCTION:
- fprintf(p, "%u", (*ui16_ext_fn)(f));
- break;
- case DIT_UI32_EXT_FUNCTION:
- fprintf(p, "%u", (*ui32_ext_fn)(f));
- break;
- case DIT_UI64_EXT_FUNCTION:
- fprintf(p, "%" PRIu64, (*ui64_ext_fn)(f));
- break;
-
- case DIT_UI8_ENUM_EQ:
- fprintf(p, "%d", *(ui8*)(d + field_offset) == enum_val);
- break;
- case DIT_UI8_ENUM_SET:
- fprintf(p, "%d", !!(*(ui8*)(d + field_offset) & enum_val));
- break;
-
- case DIT_UI16_ENUM_EQ:
- fprintf(p, "%d", *(ui16*)(d + field_offset) == enum_val);
- break;
- case DIT_UI16_ENUM_SET:
- fprintf(p, "%d", !!(*(ui16*)(d + field_offset) & enum_val));
- break;
-
- case DIT_UI32_ENUM_EQ:
- fprintf(p, "%d", *(ui32*)(d + field_offset) == enum_val);
- break;
- case DIT_UI32_ENUM_SET:
- fprintf(p, "%d", !!(*(ui32*)(d + field_offset) & enum_val));
- break;
-
- case DIT_INT_ENUM_FUNCTION_EQ:
- fprintf(p, "%d", (ui32)(f->*int_enum_fn)() == enum_val);
- break;
- case DIT_INT_ENUM_FUNCTION_SET:
- fprintf(p, "%d", !!(ui32)((f->*int_enum_fn)() & enum_val));
- break;
-
- case DIT_BOOL_FUNC_FIXED_STR:
- fprintf(p, "%u", (ui32)(f->*bool_strbuf_fn)(the_buf));
- break;
- case DIT_UI8_FUNC_FIXED_STR:
- fprintf(p, "%u", (ui32)(f->*ui8_strbuf_fn)(the_buf));
- break;
- case DIT_UI16_FUNC_FIXED_STR:
- fprintf(p, "%u", (ui32)(f->*ui16_strbuf_fn)(the_buf));
- break;
- case DIT_UI32_FUNC_FIXED_STR:
- fprintf(p, "%u", (f->*ui32_strbuf_fn)(the_buf));
- break;
- case DIT_I64_FUNC_FIXED_STR:
- fprintf(p, "%" PRId64, (f->*i64_strbuf_fn)(the_buf));
- break;
- case DIT_UI64_FUNC_FIXED_STR:
- fprintf(p, "%" PRIu64, (f->*ui64_strbuf_fn)(the_buf));
- break;
- case DIT_FLOAT_FUNC_FIXED_STR:
- fprintf(p, "%.4f", (f->*float_strbuf_fn)(the_buf));
- break;
- case DIT_DOUBLE_FUNC_FIXED_STR:
- fprintf(p, "%.7f", (f->*double_strbuf_fn)(the_buf));
- break;
-
- case DIT_RESOLVE_BY_NAME:
- fprintf(p, "%s", (f->*resolve_fn)(the_buf).data());
- break;
-
- default:
- assert(false);
- break;
- }
-}
-
-// instantiate, just for a case
-template void dump_item::print<FILE>(FILE* p, const char** dd) const;
-template void dump_item::print<TString>(TString* p, const char** dd) const;
-template void dump_item::print<IOutputStream>(IOutputStream* p, const char** dd) const;
-
-TStringBuf dump_item::GetStrBuf(const char** dd) const {
- const char* d = dd[pack_id];
- const fake* f = reinterpret_cast<const fake*>(d);
- switch (type) {
- case DIT_STRING_FIELD:
- return d + field_offset;
- case DIT_STR_CONST:
- return the_buf;
- case DIT_STR_FUNCTION:
- return (f->*str_fn)();
- case DIT_STRBUF_FUNCTION:
- the_buf.clear();
- return (f->*strbuf_2_fn)(the_buf, nullptr);
- case DIT_RESOLVE_BY_NAME:
- return (f->*resolve_fn)(the_buf);
- default:
- assert(false);
- return TStringBuf();
- }
-}
-
-// recursive
-eval_res_type dump_item::eval(const char** dd) const {
- const char* d = dd[pack_id];
- const fake* f = reinterpret_cast<const fake*>(d);
-
- switch (type) {
- case DIT_FAKE_ITEM:
- assert(false);
- return (long int)0;
- case DIT_MATH_RESULT:
- this->op->eval(dd);
- return this->op->result;
- case DIT_NAME:
- assert(false);
- return (long int)0;
-
- case DIT_BOOL_FIELD:
- return (ui32) * (bool*)(d + field_offset);
- case DIT_UI8_FIELD:
- return (ui32) * (ui8*)(d + field_offset);
- case DIT_UI16_FIELD:
- return (ui32) * (ui16*)(d + field_offset);
- case DIT_UI32_FIELD:
- return (ui32) * (ui32*)(d + field_offset);
- case DIT_I64_FIELD:
- return (long)*(i64*)(d + field_offset); // TODO: 64 bit support in calculator?
- case DIT_UI64_FIELD:
- return (long)*(ui64*)(d + field_offset); // TODO: 64 bit support in calculator?
- case DIT_FLOAT_FIELD:
- return (float)*(float*)(d + field_offset);
- case DIT_DOUBLE_FIELD:
- return *(double*)(d + field_offset);
- case DIT_TIME_T32_FIELD:
- return (long)*(time_t32*)(d + field_offset);
- case DIT_PF16UI32_FIELD:
- return (ui32) * (pf16ui32*)(d + field_offset);
- case DIT_PF16FLOAT_FIELD:
- return (float)*(pf16float*)(d + field_offset);
- case DIT_SF16FLOAT_FIELD:
- return (float)*(sf16float*)(d + field_offset);
- case DIT_STRING_FIELD:
- return !!d[field_offset]; // we don't have any string functions, just 0 if empty
-
- case DIT_LONG_CONST:
- return long_const;
- case DIT_FLOAT_CONST:
- return float_const;
- case DIT_STR_CONST:
- return !!the_buf;
-
- case DIT_INT_FUNCTION:
- return (long)(f->*int_fn)();
- case DIT_FLOAT_FUNCTION:
- return (float)(f->*float_fn)();
- case DIT_BOOL_FUNCTION:
- return (long)(f->*bool_fn)();
- case DIT_STR_FUNCTION:
- return !!*(f->*str_fn)(); // string -> int
- case DIT_STRBUF_FUNCTION:
- the_buf.clear();
- return !!*(f->*strbuf_2_fn)(the_buf, nullptr); // string -> 0/1
-
- case DIT_UI8_EXT_FUNCTION:
- return (ui32)(*ui8_ext_fn)(f);
- case DIT_UI16_EXT_FUNCTION:
- return (ui32)(*ui16_ext_fn)(f);
- case DIT_UI32_EXT_FUNCTION:
- return (ui32)(*ui32_ext_fn)(f);
- case DIT_UI64_EXT_FUNCTION:
- return (long)(*ui64_ext_fn)(f); // TODO: 64 bit support in calculator?
-
- case DIT_UI8_ENUM_EQ:
- return (ui32)(*(ui8*)(d + field_offset) == enum_val);
- case DIT_UI8_ENUM_SET:
- return !!(ui32)(*(ui8*)(d + field_offset) & enum_val);
-
- case DIT_UI16_ENUM_EQ:
- return (ui32)(*(ui16*)(d + field_offset) == enum_val);
- case DIT_UI16_ENUM_SET:
- return !!(ui32)(*(ui16*)(d + field_offset) & enum_val);
-
- case DIT_UI32_ENUM_EQ:
- return (ui32)(*(ui32*)(d + field_offset) == enum_val);
- case DIT_UI32_ENUM_SET:
- return !!(ui32)(*(ui32*)(d + field_offset) & enum_val);
-
- case DIT_INT_ENUM_FUNCTION_EQ:
- return (ui32)((ui32)(f->*int_enum_fn)() == enum_val);
- case DIT_INT_ENUM_FUNCTION_SET:
- return !!(ui32)((ui32)(f->*int_enum_fn)() & enum_val);
-
- case DIT_BOOL_FUNC_FIXED_STR:
- return (ui32)(f->*bool_strbuf_fn)(the_buf);
- case DIT_UI8_FUNC_FIXED_STR:
- return (ui32)(f->*ui8_strbuf_fn)(the_buf);
- case DIT_UI16_FUNC_FIXED_STR:
- return (ui32)(f->*ui16_strbuf_fn)(the_buf);
- case DIT_UI32_FUNC_FIXED_STR:
- return (ui32)(f->*ui32_strbuf_fn)(the_buf);
- case DIT_I64_FUNC_FIXED_STR:
- return (long)(f->*i64_strbuf_fn)(the_buf);
- case DIT_UI64_FUNC_FIXED_STR:
- return (long)(f->*ui64_strbuf_fn)(the_buf);
- case DIT_FLOAT_FUNC_FIXED_STR:
- return (float)(f->*float_strbuf_fn)(the_buf);
- case DIT_DOUBLE_FUNC_FIXED_STR:
- return (double)(f->*double_strbuf_fn)(the_buf);
-
- case DIT_RESOLVE_BY_NAME:
- return !!(f->*resolve_fn)(the_buf);
-
- default:
- assert(false);
- break;
- }
-
- // unreached
- return eval_res_type(false);
-}
-
-void dump_item::set_arrind(int arrind) {
- switch (type) {
- case DIT_BOOL_FIELD:
- field_offset += arrind * sizeof(bool);
- break;
- case DIT_UI8_FIELD:
- field_offset += arrind * sizeof(ui8);
- break;
- case DIT_UI16_FIELD:
- field_offset += arrind * sizeof(ui16);
- break;
- case DIT_UI32_FIELD:
- field_offset += arrind * sizeof(ui32);
- break;
- case DIT_I64_FIELD:
- field_offset += arrind * sizeof(i64);
- break;
- case DIT_UI64_FIELD:
- field_offset += arrind * sizeof(ui64);
- break;
- case DIT_FLOAT_FIELD:
- field_offset += arrind * sizeof(float);
- break;
- case DIT_DOUBLE_FIELD:
- field_offset += arrind * sizeof(double);
- break;
- case DIT_TIME_T32_FIELD:
- field_offset += arrind * sizeof(time_t32);
- break;
- case DIT_PF16UI32_FIELD:
- field_offset += arrind * sizeof(pf16ui32);
- break;
- case DIT_PF16FLOAT_FIELD:
- field_offset += arrind * sizeof(pf16float);
- break;
- case DIT_SF16FLOAT_FIELD:
- field_offset += arrind * sizeof(sf16float);
- break;
- default:
- break;
- }
-}
-
-static str_spn FieldNameChars("a-zA-Z0-9_$", true);
-static str_spn MathOpChars("-+=*%/&|<>()!~^?:#", true);
-static str_spn SpaceChars("\t\n\r ", true);
-
-TFieldCalculatorBase::TFieldCalculatorBase() {
-}
-
-TFieldCalculatorBase::~TFieldCalculatorBase() = default;
-
-bool TFieldCalculatorBase::item_by_name(dump_item& it, const char* name) const {
- for (size_t i = 0; i < named_dump_items.size(); i++) {
- const named_dump_item* list = named_dump_items[i].first;
- size_t sz = named_dump_items[i].second;
- for (unsigned int n = 0; n < sz; n++) {
- if (!stricmp(name, list[n].name)) {
- it = list[n].item;
- it.pack_id = i;
- return true;
- }
- }
- }
- return false;
-}
-
-bool TFieldCalculatorBase::get_local_var(dump_item& dst, char* var_name) {
- TMap<const char*, dump_item>::const_iterator it = local_vars.find(var_name);
- if (it == local_vars.end()) {
- // New local variable
- dst.type = DIT_LOCAL_VARIABLE;
- dst.local_var_name = pool.append(var_name);
- return false;
- } else {
- dst = it->second;
- return true;
- }
-}
-
-char* TFieldCalculatorBase::get_field(dump_item& dst, char* s) {
- if (!stricmp(s, "name")) {
- dst.type = DIT_NAME;
- return s + 4; // leave there 0
- }
-
- if (*s == '"' || *s == '\'') {
- char* end = strchr(s + 1, *s);
- bool hasEsc = false;
- while (end && end > s + 1 && end[-1] == '\\') {
- end = strchr(end + 1, *s);
- hasEsc = true;
- }
- if (!end)
- ythrow yexception() << "calc-expr: unterminated string constant at " << s;
- dst.type = DIT_STR_CONST;
- dst.the_buf.assign(s + 1, end);
- if (hasEsc)
- SubstGlobal(dst.the_buf, *s == '"' ? "\\\"" : "\\'", *s == '"' ? "\"" : "'");
- dst.set_arrind(0); // just for a case
- return end + 1;
- }
-
- bool is_number = isdigit((ui8)*s) || (*s == '+' || *s == '-') && isdigit((ui8)s[1]), is_float = false;
- char* end = FieldNameChars.cbrk(s + is_number);
- if (is_number && *end == '.') {
- is_float = true;
- end = FieldNameChars.cbrk(end + 1);
- }
- char* next = SpaceChars.cbrk(end);
- int arr_index = 0;
- bool has_arr_index = false;
- if (*next == '[') {
- arr_index = atoi(next + 1);
- has_arr_index = true;
- next = strchr(next, ']');
- if (!next)
- ythrow yexception() << "calc-expr: No closing ']' for '" << s << "'";
- next = SpaceChars.cbrk(next + 1);
- }
- char end_sav = *end;
- *end = 0;
-
- if (!item_by_name(dst, s)) {
- if (!is_number) {
- get_local_var(dst, s);
- } else if (is_float) {
- dst = (float)strtod(s, nullptr);
- } else
- dst = strtol(s, nullptr, 10);
-
- dst.pack_id = 0;
- *end = end_sav;
- return next;
- }
-
- // check array/not array
- if (has_arr_index && !dst.is_array_field())
- ythrow yexception() << "calc-expr: field " << s << " is not an array";
-
- //if (!has_arr_index && dst.is_array_field())
- // yexception("calc-expr: field %s is array, index required", s);
-
- if (has_arr_index && (arr_index < 0 || arr_index >= dst.arr_length))
- ythrow yexception() << "calc-expr: array index [" << arr_index << "] is out of range for field " << s << " (length is " << dst.arr_length << ")";
-
- *end = end_sav;
- dst.set_arrind(arr_index);
- return next;
-}
-
-// BEGIN Stack calculator functions
-inline char* skipspace(char* c, int& bracket_depth) {
- while ((ui8)*c <= ' ' && *c || *c == '(' || *c == ')') {
- if (*c == '(')
- bracket_depth++;
- else if (*c == ')')
- bracket_depth--;
- c++;
- }
- return c;
-}
-
-void ensure_defined(const dump_item& item) {
- if (item.type == DIT_LOCAL_VARIABLE) {
- ythrow yexception() << "Usage of non-defined field or local variable '" << item.local_var_name << "'";
- }
-}
-
-void TFieldCalculatorBase::emit_op(TVector<calc_op>& ops, calc_elem& left, calc_elem& right) {
- int out_op = ops.size();
- char oper = right.oper;
- ensure_defined(right.item);
- if (oper == OP_ASSIGN) {
- if (left.item.type != DIT_LOCAL_VARIABLE) {
- ythrow yexception() << "Assignment only to local variables is allowed";
- }
- if (local_vars.find(left.item.local_var_name) != local_vars.end()) {
- ythrow yexception() << "Reassignment to the local variable " << left.item.local_var_name << " is not allowed";
- }
- local_vars[left.item.local_var_name] = right.item;
- if (right.item.type == DIT_MATH_RESULT) {
- calc_ops[right.item.arr_ind].is_variable = true;
- }
- left = right;
- } else {
- ensure_defined(left.item);
- ops.push_back(calc_op(left, right));
- left.item.type = DIT_MATH_RESULT;
- left.item.arr_ind = out_op;
- }
-}
-
-inline int get_op_prio(char c) {
- switch (c) {
- case OP_ASSIGN:
- return 1;
- case OP_QUESTION:
- case OP_COLON:
- return 2;
- case OP_LOGICAL_OR:
- return 3;
- case OP_LOGICAL_AND:
- return 4;
- case OP_BITWISE_OR:
- return 5;
- case OP_XOR:
- return 6;
- case OP_BITWISE_AND:
- return 7;
- case OP_EQUAL:
- case OP_NOT_EQUAL:
- return 8;
- case OP_LESS:
- case OP_LESS_OR_EQUAL:
- case OP_GREATER:
- case OP_GREATER_OR_EQUAL:
- return 9;
- case OP_LEFT_SHIFT:
- case OP_RIGHT_SHIFT:
- return 10;
- case OP_ADD:
- case OP_SUBSTRACT:
- return 11;
- case OP_MULTIPLY:
- case OP_DIVIDE:
- case OP_MODULUS:
- return 12;
- case OP_REGEXP:
- case OP_REGEXP_NOT:
- return 13;
- case OP_UNARY_NOT:
- case OP_UNARY_COMPLEMENT:
- case OP_UNARY_MINUS:
- case OP_LOG:
- case OP_LOG10:
- case OP_ROUND:
- return 14;
- default:
- return 0;
- }
-}
-
-Operators get_oper(char*& c, bool unary_op_near) {
- Operators cur_oper = OP_UNKNOWN;
- switch (*c++) {
- case '&':
- if (*c == '&')
- cur_oper = OP_LOGICAL_AND, c++;
- else
- cur_oper = OP_BITWISE_AND;
- break;
- case '|':
- if (*c == '|')
- cur_oper = OP_LOGICAL_OR, c++;
- else
- cur_oper = OP_BITWISE_OR;
- break;
- case '<':
- if (*c == '=')
- cur_oper = OP_LESS_OR_EQUAL, c++;
- else if (*c == '<')
- cur_oper = OP_LEFT_SHIFT, c++;
- else
- cur_oper = OP_LESS;
- break;
- case '>':
- if (*c == '=')
- cur_oper = OP_GREATER_OR_EQUAL, c++;
- else if (*c == '>')
- cur_oper = OP_RIGHT_SHIFT, c++;
- else
- cur_oper = OP_GREATER;
- break;
- case '!':
- if (*c == '=')
- cur_oper = OP_NOT_EQUAL, c++;
- else if (*c == '~')
- cur_oper = OP_REGEXP_NOT, c++;
- else
- cur_oper = OP_UNARY_NOT;
- break;
- case '=':
- if (*c == '=')
- cur_oper = OP_EQUAL, c++;
- else if (*c == '~')
- cur_oper = OP_REGEXP, c++;
- else
- cur_oper = OP_ASSIGN;
- break;
- case '-':
- if (unary_op_near)
- cur_oper = OP_UNARY_MINUS;
- else
- cur_oper = OP_SUBSTRACT;
- break;
- case '#':
- if (!strncmp(c, "LOG#", 4)) {
- cur_oper = OP_LOG;
- c += 4;
- } else if (!strncmp(c, "LOG10#", 6)) {
- cur_oper = OP_LOG10;
- c += 6;
- } else if (!strncmp(c, "ROUND#", 6)) {
- cur_oper = OP_ROUND;
- c += 6;
- }
- break;
- case '+':
- cur_oper = OP_ADD;
- break;
- case '*':
- cur_oper = OP_MULTIPLY;
- break;
- case '/':
- cur_oper = OP_DIVIDE;
- break;
- case '%':
- cur_oper = OP_MODULUS;
- break;
- case '^':
- cur_oper = OP_XOR;
- break;
- case '~':
- cur_oper = OP_UNARY_COMPLEMENT;
- break;
- case '?':
- cur_oper = OP_QUESTION;
- break;
- case ':':
- cur_oper = OP_COLON;
- break;
- }
- return cur_oper;
-}
-// END Stack calculator functions
-
-void TFieldCalculatorBase::Compile(char** field_names, int field_count) {
- out_el = 0, out_cond = 0;
- autoarray<dump_item>(field_count).swap(printouts);
- autoarray<dump_item>(field_count).swap(conditions);
- local_vars.clear();
-
- // parse arguments into calculator's "pseudo-code"
- for (int el = 0; el < field_count; el++) {
- char* c = field_names[el];
- bool is_expr = !!*MathOpChars.brk(c), is_cond = *c == '?';
- if (is_cond)
- c++;
- if (!is_expr && !is_cond) {
- get_field(printouts[out_el], c);
- ensure_defined(printouts[out_el]);
- ++out_el;
- continue;
- } else { // Stack Calculator
- const int maxstack = 64;
- calc_elem fstack[maxstack]; // calculator's stack
- int bdepth = 0; // brackets depth
- int stack_cur = -1;
- bool unary_op_near = false; // indicates that the next operator in unary
- bool had_assignment_out_of_brackets = false;
- int uop_seq = 0; // maintains right-to left order for unary operators
- while (*(c = skipspace(c, bdepth))) {
- /** https://wiki.yandex.ru/JandeksPoisk/Antispam/OwnersData/attselect#calc */
- //printf("1.%i c = '%s'\n", unary_op_near, c);
- Operators cur_oper = OP_UNKNOWN;
- int op_prio = 0;
- if (stack_cur >= 0) {
- cur_oper = get_oper(c, unary_op_near);
- op_prio = get_op_prio(cur_oper);
- if (!op_prio)
- ythrow yexception() << "calc-expr: Unsupported operator '" << c[-1] << "'";
- op_prio += bdepth * 256 + uop_seq;
- if (unary_op_near)
- uop_seq += 20;
- while (op_prio <= fstack[stack_cur].op_prio && stack_cur > 0) {
- emit_op(calc_ops, fstack[stack_cur - 1], fstack[stack_cur]);
- stack_cur--;
- }
- }
- //printf("2.%i c = '%s'\n", unary_op_near, c);
- had_assignment_out_of_brackets |= (bdepth == 0 && cur_oper == OP_ASSIGN);
- c = skipspace(c, bdepth);
- unary_op_near = *c == '-' && !isdigit((ui8)c[1]) || *c == '~' || (*c == '!' && c[1] != '=') ||
- !strncmp(c, "#LOG#", 5) || !strncmp(c, "#LOG10#", 7) || !strncmp(c, "#ROUND#", 7);
- if (!unary_op_near)
- uop_seq = 0;
- if (stack_cur >= maxstack - 1)
- ythrow yexception() << "calc-expr: Math eval stack overflow!\n";
- stack_cur++;
- fstack[stack_cur].oper = cur_oper;
- fstack[stack_cur].op_prio = op_prio;
- //printf("3.%i c = '%s'\n", unary_op_near, c);
- if (unary_op_near)
- fstack[stack_cur].item = dump_item();
- else
- c = get_field(fstack[stack_cur].item, c);
- }
- while (stack_cur > 0) {
- emit_op(calc_ops, fstack[stack_cur - 1], fstack[stack_cur]);
- stack_cur--;
- }
- ensure_defined(fstack[0].item);
- if (is_cond) {
- if (had_assignment_out_of_brackets)
- ythrow yexception() << "Assignment in condition. (Did you mean '==' instead of '='?)";
- if (fstack[0].item.type != DIT_FAKE_ITEM) // Skip empty conditions: "?()".
- conditions[out_cond++] = fstack[0].item;
- } else if (!had_assignment_out_of_brackets) {
- printouts[out_el++] = fstack[0].item;
- }
- }
- }
- // calc_ops will not grow any more, so arr_ind -> op
- for (int n = 0; n < out_cond; n++)
- conditions[n].rewrite_op(calc_ops.data());
- for (int n = 0; n < out_el; n++)
- printouts[n].rewrite_op(calc_ops.data());
- for (auto& local_var : local_vars) {
- local_var.second.rewrite_op(calc_ops.data());
- }
- for (int n = 0; n < (int)calc_ops.size(); n++) {
- calc_ops[n].Left.rewrite_op(calc_ops.data());
- calc_ops[n].Right.rewrite_op(calc_ops.data());
- }
-}
-
-void dump_item::rewrite_op(const calc_op* ops) {
- if (type == DIT_MATH_RESULT)
- op = ops + arr_ind;
-}
-
-void TFieldCalculatorBase::MarkLocalVarsAsUncalculated() {
- for (auto& local_var : local_vars) {
- if (local_var.second.type == DIT_MATH_RESULT) {
- local_var.second.op->calculated = false;
- }
- }
-}
-
-bool TFieldCalculatorBase::Cond(const char** d) {
- MarkLocalVarsAsUncalculated();
- for (int n = 0; n < out_cond; n++) {
- /** https://wiki.yandex.ru/JandeksPoisk/Antispam/OwnersData/attselect#conditions */
- eval_res_type res = conditions[n].eval(d);
- bool is_true = res.type == 0 ? !!res.res_ui32 : res.type == 1 ? !!res.res_long : !!res.res_dbl;
- if (!is_true)
- return false;
- }
- return true;
-}
-
-bool TFieldCalculatorBase::CondById(const char** d, int condNumber) {
- MarkLocalVarsAsUncalculated();
- if (condNumber >= out_cond)
- return false;
- eval_res_type res = conditions[condNumber].eval(d);
- bool is_true = res.type == 0 ? !!res.res_ui32 : res.type == 1 ? !!res.res_long : !!res.res_dbl;
- if (!is_true)
- return false;
- return true;
-}
-
-void TFieldCalculatorBase::Print(FILE* p, const char** d, const char* Name) {
- for (int n = 0; n < out_el; n++) {
- if (printouts[n].type == DIT_NAME) {
- fprintf(p, "%s", Name);
- } else if (printouts[n].type == DIT_MATH_RESULT) { // calculate
- eval_res_type res = printouts[n].eval(d);
- switch (res.type) {
- case 0:
- fprintf(p, "%u", res.res_ui32);
- break;
- case 1:
- fprintf(p, "%ld", res.res_long);
- break;
- case 2:
- fprintf(p, "%f", res.res_dbl);
- break;
- }
- } else {
- printouts[n].print(p, d);
- }
- fprintf(p, n != out_el - 1 ? "\t" : "\n");
- }
-}
-
-void TFieldCalculatorBase::CalcAll(const char** d, TVector<float>& result) const {
- result.clear();
- for (int n = 0; n < out_el; ++n) {
- if (printouts[n].type == DIT_MATH_RESULT || printouts[n].type == DIT_FLOAT_FIELD) {
- eval_res_type res = printouts[n].eval(d);
- result.push_back(res.res_dbl);
- }
- }
-}
-
-void TFieldCalculatorBase::SelfTest() {
- if (out_el < 1)
- ythrow yexception() << "Please specify conditions for test mode";
- const char* dummy = "";
- eval_res_type res = printouts[0].eval(&dummy);
- switch (res.type) {
- case 0:
- printf("%u\n", res.res_ui32);
- break;
- case 1:
- printf("%ld\n", res.res_long);
- break;
- case 2:
- printf("%f\n", res.res_dbl);
- break;
- }
-}
-
-void TFieldCalculatorBase::PrintDiff(const char* rec1, const char* rec2) {
- for (size_t n = 0; n < named_dump_items[0].second; n++) {
- const dump_item& field = named_dump_items[0].first[n].item;
- if (!field.is_field())
- continue; // not really a field
- for (int ind = 0, arrsz = field.is_array_field() ? field.arr_length : 1; ind < arrsz; ind++) {
- intptr_t sav_field_offset = field.field_offset;
- const_cast<dump_item&>(field).set_arrind(ind);
- if (field.eval(&rec1) == field.eval(&rec2)) {
- const_cast<dump_item&>(field).field_offset = sav_field_offset;
- continue;
- }
- if (field.is_array_field())
- printf("\t%s[%i]: ", named_dump_items[0].first[n].name, ind);
- else
- printf("\t%s: ", named_dump_items[0].first[n].name);
- field.print(stdout, &rec1);
- printf(" -> ");
- field.print(stdout, &rec2);
- const_cast<dump_item&>(field).field_offset = sav_field_offset;
- }
- }
-}
-
-void TFieldCalculatorBase::DumpAll(IOutputStream& s, const char** d, const TStringBuf& delim) {
- bool firstPrinted = false;
- for (size_t k = 0; k < named_dump_items.size(); k++) {
- const named_dump_item* fields = named_dump_items[k].first;
- size_t numFields = named_dump_items[k].second;
- const char* obj = d[k];
- for (size_t n = 0; n < numFields; n++) {
- const dump_item& field = fields[n].item;
- if (!field.is_field())
- continue;
- for (int ind = 0, arrsz = field.is_array_field() ? field.arr_length : 1; ind < arrsz; ind++) {
- if (firstPrinted)
- s << delim;
- else
- firstPrinted = true;
- s << fields[n].name;
- if (field.is_array_field())
- Printf(s, "[%i]", ind);
- s << "=";
- intptr_t sav_field_offset = field.field_offset;
- const_cast<dump_item&>(field).set_arrind(ind);
- field.print(&s, &obj);
- const_cast<dump_item&>(field).field_offset = sav_field_offset;
- }
- }
- }
-}
diff --git a/library/cpp/fieldcalc/field_calc.h b/library/cpp/fieldcalc/field_calc.h
deleted file mode 100644
index 46bf371a60..0000000000
--- a/library/cpp/fieldcalc/field_calc.h
+++ /dev/null
@@ -1,136 +0,0 @@
-#pragma once
-
-#include <cstdio>
-
-#include <library/cpp/deprecated/autoarray/autoarray.h>
-#include <util/generic/map.h>
-#include <util/generic/vector.h>
-#include <util/memory/segmented_string_pool.h>
-
-struct dump_item;
-struct calc_op;
-struct named_dump_item;
-struct calc_elem;
-class IOutputStream;
-
-template <class T>
-std::pair<const named_dump_item*, size_t> get_named_dump_items();
-
-class TFieldCalculatorBase {
-private:
- segmented_string_pool pool;
- void emit_op(TVector<calc_op>& ops, calc_elem& left, calc_elem& right);
- void MarkLocalVarsAsUncalculated();
-
-protected:
- autoarray<dump_item> printouts, conditions;
- int out_el, out_cond;
- TVector<calc_op> calc_ops; // operands for calculator, indexed by arr_ind for DIT_math_result
-
- TVector<std::pair<const named_dump_item*, size_t>> named_dump_items;
- TMap<const char*, dump_item> local_vars;
-
- char* get_field(dump_item& dst, char* s);
- bool get_local_var(dump_item& dst, char* s);
- virtual bool item_by_name(dump_item& it, const char* name) const;
-
- TFieldCalculatorBase();
- virtual ~TFieldCalculatorBase();
-
- bool Cond(const char** d);
- bool CondById(const char** d, int condNumber);
- void Print(FILE* p, const char** d, const char* Name);
- void Compile(char** field_names, int field_count);
- void SelfTest();
- void PrintDiff(const char* d1, const char* d2);
- void CalcAll(const char** d, TVector<float>& result) const;
- void DumpAll(IOutputStream& s, const char** d, const TStringBuf& delim);
-};
-
-template <class T>
-class TFieldCalculator: protected TFieldCalculatorBase {
-public:
- TFieldCalculator() {
- named_dump_items.push_back(get_named_dump_items<T>());
- }
-
- ~TFieldCalculator() override = default;
-
- bool Cond(const T& d) {
- const char* dd = reinterpret_cast<const char*>(&d);
- return TFieldCalculatorBase::Cond(&dd);
- }
-
- bool CondById(const T& d, int condNumber) {
- const char* dd = reinterpret_cast<const char*>(&d);
- return TFieldCalculatorBase::CondById(&dd, condNumber);
- }
-
- void Print(const T& d, const char* Name) {
- const char* dd = reinterpret_cast<const char*>(&d);
- return TFieldCalculatorBase::Print(stdout, &dd, Name);
- }
-
- void Print(FILE* p, const T& d, const char* Name) {
- const char* dd = reinterpret_cast<const char*>(&d);
- return TFieldCalculatorBase::Print(p, &dd, Name);
- }
-
- size_t Compile(char** field_names, int field_count) {
- TFieldCalculatorBase::Compile(field_names, field_count);
- return out_el; // number of fields printed
- }
-
- void SelfTest() {
- return TFieldCalculatorBase::SelfTest();
- }
-
- void PrintDiff(const T& d1, const T& d2) {
- return TFieldCalculatorBase::PrintDiff((const char*)&d1, (const char*)&d2);
- }
-
- void CalcAll(const T& d, TVector<float>& result) const {
- const char* dd = reinterpret_cast<const char*>(&d);
- return TFieldCalculatorBase::CalcAll(&dd, result);
- }
-
- // it appends to `result', clear it yourself
- void DumpAll(IOutputStream& s, const T& d, const TStringBuf& delim) {
- const char* dd = reinterpret_cast<const char*>(&d);
- return TFieldCalculatorBase::DumpAll(s, &dd, delim);
- }
-};
-
-template <class T, class T2>
-class TFieldCalculator2: protected TFieldCalculator<T> {
-public:
- TFieldCalculator2() {
- TFieldCalculator<T>::named_dump_items.push_back(get_named_dump_items<T2>());
- }
-
- ~TFieldCalculator2() override = default;
-
- bool Cond(const T& d, const T2& d2) {
- const char* dd[2] = {reinterpret_cast<const char*>(&d), reinterpret_cast<const char*>(&d2)};
- return TFieldCalculatorBase::Cond(dd);
- }
-
- bool CondById(const T& d, const T2& d2, int condNumber) {
- const char* dd[2] = {reinterpret_cast<const char*>(&d), reinterpret_cast<const char*>(&d2)};
- return TFieldCalculatorBase::CondById(dd, condNumber);
- }
-
- void Print(const T& d, const T2& d2, const char* Name) {
- const char* dd[2] = {reinterpret_cast<const char*>(&d), reinterpret_cast<const char*>(&d2)};
- return TFieldCalculatorBase::Print(stdout, dd, Name);
- }
-
- void Print(FILE* p, const T& d, const T2& d2, const char* Name) {
- const char* dd[2] = {reinterpret_cast<const char*>(&d), reinterpret_cast<const char*>(&d2)};
- return TFieldCalculatorBase::Print(p, dd, Name);
- }
-
- size_t Compile(char** field_names, int field_count) {
- return TFieldCalculator<T>::Compile(field_names, field_count);
- }
-};
diff --git a/library/cpp/fieldcalc/field_calc_int.h b/library/cpp/fieldcalc/field_calc_int.h
deleted file mode 100644
index 5f71fafbda..0000000000
--- a/library/cpp/fieldcalc/field_calc_int.h
+++ /dev/null
@@ -1,593 +0,0 @@
-#pragma once
-
-#include <cmath>
-
-#include <util/system/defaults.h>
-#include <util/system/yassert.h>
-#include <util/memory/alloc.h>
-#include <util/generic/yexception.h>
-
-#include "lossy_types.h"
-#include "field_calc.h"
-
-// eval_res_type
-struct eval_res_type {
- union {
- ui32 res_ui32;
- long res_long;
- double res_dbl;
- };
- int type;
- eval_res_type(ui32 v)
- : res_ui32(v)
- , type(0)
- {
- }
- eval_res_type(long v)
- : res_long(v)
- , type(1)
- {
- }
- eval_res_type(bool v)
- : res_long(v)
- , type(1)
- {
- }
- eval_res_type(double v)
- : res_dbl(v)
- , type(2)
- {
- }
- // a special null value for ternary operator
- explicit eval_res_type()
- : type(3)
- {
- }
- operator ui32() const;
- operator long() const;
- operator double() const;
- void to_long();
- bool is_null() const;
-};
-
-inline bool eval_res_type::is_null() const {
- return type == 3;
-}
-
-inline void eval_res_type::to_long() {
- if (type == 0)
- res_long = res_ui32;
- else if (type == 2)
- res_long = (long)res_dbl;
- type = 1;
-}
-
-inline eval_res_type::operator ui32() const {
- assert(type == 0);
- return res_ui32;
-}
-
-inline eval_res_type::operator long() const {
- assert(type == 0 || type == 1);
- return type == 1 ? res_long : res_ui32;
-}
-
-inline eval_res_type::operator double() const {
- return type == 2 ? res_dbl : type == 1 ? (double)res_long : (double)res_ui32;
-}
-
-inline eval_res_type operator+(const eval_res_type& a, const eval_res_type& b) {
- switch (std::max(a.type, b.type)) {
- case 0:
- return (ui32)a + (ui32)b;
- case 1:
- return (long)a + (long)b;
- /*case 2*/ default:
- return (double)a + (double)b;
- }
-}
-
-inline eval_res_type operator-(const eval_res_type& a, const eval_res_type& b) {
- switch (std::max(a.type, b.type)) {
- case 0:
- case 1:
- return (long)a - (long)b;
- /*case 2*/ default:
- return (double)a - (double)b;
- }
-}
-
-inline eval_res_type Minus(const eval_res_type& a) {
- switch (a.type) {
- case 0:
- return -(long)a.res_ui32;
- case 1:
- return -a.res_long;
- /*case 2*/ default:
- return -a.res_dbl;
- }
-}
-
-inline eval_res_type Log(const eval_res_type& a) {
- switch (a.type) {
- case 0:
- return log(a.res_ui32);
- case 1:
- return log(a.res_long);
- /*case 2*/ default:
- return log(a.res_dbl);
- }
-}
-
-inline eval_res_type Log10(const eval_res_type& a) {
- switch (a.type) {
- case 0:
- return log10(a.res_ui32);
- case 1:
- return log10(a.res_long);
- /*case 2*/ default:
- return log10(a.res_dbl);
- }
-}
-
-inline eval_res_type Round(const eval_res_type& a) {
- switch (a.type) {
- case 0:
- return a.res_ui32;
- case 1:
- return a.res_long;
- /*case 2*/ default:
- return round(a.res_dbl);
- }
-}
-
-inline bool operator==(const eval_res_type& a, const eval_res_type& b) {
- switch (std::max(a.type, b.type)) {
- case 0:
- return (ui32)a == (ui32)b;
- case 1:
- return (long)a == (long)b;
- /*case 2*/ default:
- return (double)a == (double)b;
- }
-}
-
-inline bool operator<(const eval_res_type& a, const eval_res_type& b) {
- switch (std::max(a.type, b.type)) {
- case 0:
- return (ui32)a < (ui32)b;
- case 1:
- return (long)a < (long)b;
- /*case 2*/ default:
- return (double)a < (double)b;
- }
-}
-
-inline eval_res_type operator*(const eval_res_type& a, const eval_res_type& b) {
- switch (std::max(a.type, b.type)) {
- case 0:
- return (ui32)a * (ui32)b;
- case 1:
- return (long)a * (long)b;
- /*case 2*/ default:
- return (double)a * (double)b;
- }
-}
-
-inline double operator/(const eval_res_type& a, const eval_res_type& b) {
- double a1 = a, b1 = b;
- if (b1 == 0) {
- if (a1 == 0)
- return 0.; // assume that a should be 0
- ythrow yexception() << "Division by zero"; // TODO: show parameter names
- }
- return a1 / b1;
-}
-
-// dump_item
-enum EDumpItemType {
- DIT_FAKE_ITEM, // fake item - value never used
- DIT_MATH_RESULT, // eval result
- DIT_NAME,
-
- DIT_FIELDS_START, // Start of item types for real fields
-
- DIT_BOOL_FIELD,
- DIT_UI8_FIELD,
- DIT_UI16_FIELD,
- DIT_UI32_FIELD,
- DIT_I64_FIELD,
- DIT_UI64_FIELD,
- DIT_FLOAT_FIELD,
- DIT_DOUBLE_FIELD,
- DIT_TIME_T32_FIELD,
- DIT_PF16UI32_FIELD,
- DIT_PF16FLOAT_FIELD,
- DIT_SF16FLOAT_FIELD,
- DIT_STRING_FIELD, // new
-
- DIT_FIELDS_END, // End of item types for real fields
-
- DIT_LONG_CONST,
- DIT_FLOAT_CONST,
- DIT_STR_CONST,
-
- DIT_INT_FUNCTION,
- DIT_FLOAT_FUNCTION,
- DIT_BOOL_FUNCTION,
- DIT_STR_FUNCTION, // new
- DIT_STRBUF_FUNCTION, // new
-
- DIT_UI8_EXT_FUNCTION,
- DIT_UI16_EXT_FUNCTION,
- DIT_UI32_EXT_FUNCTION,
- DIT_UI64_EXT_FUNCTION,
-
- DIT_UI8_ENUM_EQ,
- DIT_UI8_ENUM_SET,
- DIT_UI16_ENUM_EQ,
- DIT_UI16_ENUM_SET,
- DIT_UI32_ENUM_EQ,
- DIT_UI32_ENUM_SET,
- DIT_INT_ENUM_FUNCTION_EQ,
- DIT_INT_ENUM_FUNCTION_SET,
-
- DIT_BOOL_FUNC_FIXED_STR,
- DIT_UI8_FUNC_FIXED_STR,
- DIT_UI16_FUNC_FIXED_STR,
- DIT_UI32_FUNC_FIXED_STR,
- DIT_I64_FUNC_FIXED_STR,
- DIT_UI64_FUNC_FIXED_STR,
- DIT_FLOAT_FUNC_FIXED_STR,
- DIT_DOUBLE_FUNC_FIXED_STR,
-
- DIT_RESOLVE_BY_NAME, //new - for external functions
-
- DIT_LOCAL_VARIABLE
-};
-
-inline bool IsStringType(EDumpItemType type) {
- return type == DIT_STRING_FIELD || type == DIT_STR_CONST || type == DIT_STR_FUNCTION || type == DIT_STRBUF_FUNCTION || type == DIT_RESOLVE_BY_NAME;
-}
-
-struct fake {};
-
-struct calc_op;
-
-typedef int (fake::*int_fn_t)() const;
-typedef float (fake::*float_fn_t)() const;
-typedef bool (fake::*bool_fn_t)() const;
-typedef ui16 (fake::*ui16_fn_t)() const;
-typedef ui32 (fake::*ui32_fn_t)() const;
-typedef bool (fake::*bool_strbuf_fn_t)(const TStringBuf&) const; // string -> bool
-typedef ui8 (fake::*ui8_strbuf_fn_t)(const TStringBuf&) const; // string -> ui8
-typedef ui16 (fake::*ui16_strbuf_fn_t)(const TStringBuf&) const; // string -> ui16
-typedef ui32 (fake::*ui32_strbuf_fn_t)(const TStringBuf&) const; // string -> ui32
-typedef i64 (fake::*i64_strbuf_fn_t)(const TStringBuf&) const; // string -> i64
-typedef ui64 (fake::*ui64_strbuf_fn_t)(const TStringBuf&) const; // string -> ui64
-typedef float (fake::*float_strbuf_fn_t)(const TStringBuf&) const; // string -> float
-typedef double (fake::*double_strbuf_fn_t)(const TStringBuf&) const; // string -> double
-typedef const char* (fake::*str_fn_t)() const;
-typedef const char* (fake::*strbuf_2_fn_t)(TString& buf, const char* nul) const;
-typedef TStringBuf (fake::*resolve_fn_t)(const TStringBuf&) const; // string -> string, $var -> "value"
-
-// note: we can not reuse the above signatures, calling conventions may differ
-typedef ui8 (*ui8_ext_fn_t)(const fake*);
-typedef ui16 (*ui16_ext_fn_t)(const fake*);
-typedef ui32 (*ui32_ext_fn_t)(const fake*);
-typedef ui64 (*ui64_ext_fn_t)(const fake*);
-
-struct dump_item {
- EDumpItemType type;
- int pack_id = 0;
-
- union {
- // fields
- intptr_t field_offset;
-
- // constants
- long long_const;
- float float_const;
-
- // functions
- int_fn_t int_fn;
- float_fn_t float_fn;
- bool_fn_t bool_fn;
- str_fn_t str_fn;
- strbuf_2_fn_t strbuf_2_fn;
- resolve_fn_t resolve_fn;
-
- bool_strbuf_fn_t bool_strbuf_fn;
- ui8_strbuf_fn_t ui8_strbuf_fn;
- ui16_strbuf_fn_t ui16_strbuf_fn;
- ui32_strbuf_fn_t ui32_strbuf_fn;
- i64_strbuf_fn_t i64_strbuf_fn;
- ui64_strbuf_fn_t ui64_strbuf_fn;
- float_strbuf_fn_t float_strbuf_fn;
- double_strbuf_fn_t double_strbuf_fn;
-
- ui8_ext_fn_t ui8_ext_fn;
- ui16_ext_fn_t ui16_ext_fn;
- ui32_ext_fn_t ui32_ext_fn;
- ui64_ext_fn_t ui64_ext_fn;
-
- // enum
- int_fn_t int_enum_fn;
-
- // for DIT_MATH_RESULT
- const calc_op* op;
- };
-
- // for enum
- ui32 enum_val;
-
- // for local vars, also used to mark accessor functions to use them in dump
- const char* local_var_name = nullptr;
-
- int arr_ind; // externally initialized!
- int arr_length;
-
- mutable TString the_buf; // buffer for string function, string constants also here
-
- // Ctors
- dump_item()
- : type(DIT_FAKE_ITEM)
- , field_offset(0)
- {
- }
-
- dump_item(bool* ptr, int arrlen = 0)
- : type(DIT_BOOL_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(ui8* ptr, int arrlen = 0)
- : type(DIT_UI8_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(ui16* ptr, int arrlen = 0)
- : type(DIT_UI16_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(ui32* ptr, int arrlen = 0)
- : type(DIT_UI32_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(i64* ptr, int arrlen = 0)
- : type(DIT_I64_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(ui64* ptr, int arrlen = 0)
- : type(DIT_UI64_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(float* ptr, int arrlen = 0)
- : type(DIT_FLOAT_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(double* ptr, int arrlen = 0)
- : type(DIT_DOUBLE_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(time_t32* ptr, int arrlen = 0)
- : type(DIT_TIME_T32_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(pf16ui32* ptr, int arrlen = 0)
- : type(DIT_PF16UI32_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(pf16float* ptr, int arrlen = 0)
- : type(DIT_PF16FLOAT_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(sf16float* ptr, int arrlen = 0)
- : type(DIT_SF16FLOAT_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
- dump_item(char* ptr, int arrlen = 0)
- : type(DIT_STRING_FIELD)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , arr_length(arrlen)
- {
- }
-
- dump_item(long val)
- : type(DIT_LONG_CONST)
- , long_const(val)
- {
- }
- dump_item(float val)
- : type(DIT_FLOAT_CONST)
- , float_const(val)
- {
- }
- dump_item(TString& val)
- : type(DIT_STR_CONST)
- , the_buf(val)
- {
- }
-
- dump_item(int_fn_t fn)
- : type(DIT_INT_FUNCTION)
- , int_fn(fn)
- {
- }
- dump_item(float_fn_t fn)
- : type(DIT_FLOAT_FUNCTION)
- , float_fn(fn)
- {
- }
- dump_item(bool_fn_t fn)
- : type(DIT_BOOL_FUNCTION)
- , bool_fn(fn)
- {
- }
- dump_item(bool_strbuf_fn_t fn, const char* name)
- : type(DIT_BOOL_FUNC_FIXED_STR)
- , bool_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(ui8_strbuf_fn_t fn, const char* name)
- : type(DIT_UI8_FUNC_FIXED_STR)
- , ui8_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(ui16_strbuf_fn_t fn, const char* name)
- : type(DIT_UI16_FUNC_FIXED_STR)
- , ui16_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(ui32_strbuf_fn_t fn, const char* name)
- : type(DIT_UI32_FUNC_FIXED_STR)
- , ui32_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(i64_strbuf_fn_t fn, const char* name)
- : type(DIT_I64_FUNC_FIXED_STR)
- , i64_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(ui64_strbuf_fn_t fn, const char* name)
- : type(DIT_UI64_FUNC_FIXED_STR)
- , ui64_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(float_strbuf_fn_t fn, const char* name)
- : type(DIT_FLOAT_FUNC_FIXED_STR)
- , float_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(double_strbuf_fn_t fn, const char* name)
- : type(DIT_DOUBLE_FUNC_FIXED_STR)
- , double_strbuf_fn(fn)
- , the_buf(name)
- {
- }
- dump_item(str_fn_t fn)
- : type(DIT_STR_FUNCTION)
- , str_fn(fn)
- {
- }
- dump_item(strbuf_2_fn_t fn)
- : type(DIT_STRBUF_FUNCTION)
- , strbuf_2_fn(fn)
- {
- }
-
- dump_item(ui8_ext_fn_t fn, const char* lvn = nullptr)
- : type(DIT_UI8_EXT_FUNCTION)
- , ui8_ext_fn(fn)
- , local_var_name(lvn)
- {
- }
- dump_item(ui16_ext_fn_t fn, const char* lvn = nullptr)
- : type(DIT_UI16_EXT_FUNCTION)
- , ui16_ext_fn(fn)
- , local_var_name(lvn)
- {
- }
- dump_item(ui32_ext_fn_t fn, const char* lvn = nullptr)
- : type(DIT_UI32_EXT_FUNCTION)
- , ui32_ext_fn(fn)
- , local_var_name(lvn)
- {
- }
- dump_item(ui64_ext_fn_t fn, const char* lvn = nullptr)
- : type(DIT_UI64_EXT_FUNCTION)
- , ui64_ext_fn(fn)
- , local_var_name(lvn)
- {
- }
-
- dump_item(ui8* ptr, ui32 val, bool bitset)
- : type(bitset ? DIT_UI8_ENUM_SET : DIT_UI8_ENUM_EQ)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , enum_val(val)
- {
- }
-
- dump_item(ui16* ptr, ui32 val, bool bitset)
- : type(bitset ? DIT_UI16_ENUM_SET : DIT_UI16_ENUM_EQ)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , enum_val(val)
- {
- }
-
- dump_item(ui32* ptr, ui32 val, bool bitset)
- : type(bitset ? DIT_UI32_ENUM_SET : DIT_UI32_ENUM_EQ)
- , field_offset(reinterpret_cast<intptr_t>(ptr))
- , enum_val(val)
- {
- }
-
- dump_item(int_fn_t fn, ui32 val, bool bitset)
- : type(bitset ? DIT_INT_ENUM_FUNCTION_SET : DIT_INT_ENUM_FUNCTION_EQ)
- , int_enum_fn(fn)
- , enum_val(val)
- {
- }
-
- dump_item(resolve_fn_t fn, const char* name)
- : type(DIT_RESOLVE_BY_NAME)
- , resolve_fn(fn)
- , the_buf(name)
- {
- } //name of variable saved in the_buf
-
- // Functions
- template <class TOut> // implemented for FILE*, TString* (appends) and IOutputStream*
- void print(TOut* p, const char** dd) const;
- TStringBuf GetStrBuf(const char** dd) const; // for char-types only!
- eval_res_type eval(const char** dd) const;
- void set_arrind(int arrind);
- void rewrite_op(const calc_op* ops);
-
- bool is_accessor_func() const {
- return type >= DIT_INT_FUNCTION && type <= DIT_UI64_EXT_FUNCTION && local_var_name;
- }
-
- bool is_field() const {
- return type > DIT_FIELDS_START && type < DIT_FIELDS_END || is_accessor_func();
- }
-
- bool is_array_field() const {
- return is_field() && arr_length > 0;
- }
-};
-
-// named_dump_item
-struct named_dump_item {
- const char* name;
- dump_item item;
-};
diff --git a/library/cpp/fieldcalc/lossy_types.h b/library/cpp/fieldcalc/lossy_types.h
deleted file mode 100644
index 98acfea902..0000000000
--- a/library/cpp/fieldcalc/lossy_types.h
+++ /dev/null
@@ -1,52 +0,0 @@
-#pragma once
-
-#include <util/generic/cast.h>
-
-// although target value is float, this thing is only used as unsigned int container
-struct pf16ui32 {
- ui16 val;
- pf16ui32()
- : val(0)
- {
- }
- void operator=(ui32 t) {
- val = static_cast<ui16>(BitCast<ui32>(static_cast<float>(t)) >> 15);
- }
- operator ui32() const {
- return (ui32)BitCast<float>((ui32)(val << 15));
- }
-};
-
-// unsigned float value
-struct pf16float {
- ui16 val;
- pf16float()
- : val(0)
- {
- }
- void operator=(float t) {
- assert(t >= 0.);
- val = static_cast<ui16>(BitCast<ui32>(t) >> 15);
- }
- operator float() const {
- return BitCast<float>((ui32)(val << 15));
- }
-};
-
-// signed float value
-struct sf16float {
- ui16 val;
- sf16float()
- : val(0)
- {
- }
- void operator=(float t) {
- assert(t >= 0.);
- val = BitCast<ui32>(t) >> 16;
- }
- operator float() const {
- return BitCast<float>((ui32)(val << 16));
- }
-};
-
-typedef i32 time_t32; // not really lossy, should be placed somewhere else
diff --git a/library/cpp/fieldcalc/ya.make b/library/cpp/fieldcalc/ya.make
deleted file mode 100644
index 9796592996..0000000000
--- a/library/cpp/fieldcalc/ya.make
+++ /dev/null
@@ -1,13 +0,0 @@
-LIBRARY()
-
-PEERDIR(
- library/cpp/deprecated/autoarray
-)
-
-SRCS(
- field_calc.cpp
- lossy_types.h
- field_calc_int.h
-)
-
-END()
diff --git a/library/cpp/malloc/galloc/malloc-info.cpp b/library/cpp/malloc/galloc/malloc-info.cpp
deleted file mode 100644
index fbcfa7ee06..0000000000
--- a/library/cpp/malloc/galloc/malloc-info.cpp
+++ /dev/null
@@ -1,9 +0,0 @@
-#include <library/cpp/malloc/api/malloc.h>
-
-using namespace NMalloc;
-
-TMallocInfo NMalloc::MallocInfo() {
- TMallocInfo r;
- r.Name = "tcmalloc";
- return r;
-}
diff --git a/library/cpp/malloc/galloc/ya.make b/library/cpp/malloc/galloc/ya.make
deleted file mode 100644
index b6646a6cf6..0000000000
--- a/library/cpp/malloc/galloc/ya.make
+++ /dev/null
@@ -1,15 +0,0 @@
-LIBRARY()
-
-NO_UTIL()
-ALLOCATOR_IMPL()
-
-PEERDIR(
- library/cpp/malloc/api
- contrib/deprecated/galloc
-)
-
-SRCS(
- malloc-info.cpp
-)
-
-END()
diff --git a/library/cpp/on_disk/multi_blob/multiblob.cpp b/library/cpp/on_disk/multi_blob/multiblob.cpp
deleted file mode 100644
index d92b31e613..0000000000
--- a/library/cpp/on_disk/multi_blob/multiblob.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-#include <util/generic/yexception.h>
-#include <util/system/align.h>
-
-#include <library/cpp/on_disk/chunks/reader.h>
-
-#include "multiblob.h"
-
-void TSubBlobs::ReadMultiBlob(const TBlob& multi) {
- if (multi.Size() < sizeof(TMultiBlobHeader)) {
- ythrow yexception() << "not a blob, too small";
- }
-
- Multi = multi;
- memcpy((void*)&Header, Multi.Data(), sizeof(TMultiBlobHeader));
-
- if (Header.BlobMetaSig != BLOBMETASIG) {
- if (Header.BlobRecordSig != TMultiBlobHeader::RecordSig) {
- if (ReadChunkedData(multi))
- return;
- }
- ythrow yexception() << "is not a blob, MetaSig was read: "
- << Header.BlobMetaSig
- << ", must be" << BLOBMETASIG;
- }
-
- if (Header.BlobRecordSig != TMultiBlobHeader::RecordSig)
- ythrow yexception() << "unknown multiblob RecordSig "
- << Header.BlobRecordSig;
-
- reserve(size() + Header.Count);
- if (Header.Flags & EMF_INTERLAY) {
- size_t pos = Header.HeaderSize();
- for (size_t i = 0; i < Header.Count; ++i) {
- pos = AlignUp<ui64>(pos, sizeof(ui64));
- ui64 size = *((ui64*)((const char*)multi.Data() + pos));
- pos = AlignUp<ui64>(pos + sizeof(ui64), Header.Align);
- push_back(multi.SubBlob(pos, pos + size));
- pos += size;
- }
- } else {
- const ui64* sizes = Header.Sizes(multi.Data());
- size_t pos = Header.HeaderSize() + Header.Count * sizeof(ui64);
- for (size_t i = 0; i < Header.Count; ++i) {
- pos = AlignUp<ui64>(pos, Header.Align);
- push_back(multi.SubBlob(pos, pos + *sizes));
- pos += *sizes;
- sizes++;
- }
- }
-}
-
-bool TSubBlobs::ReadChunkedData(const TBlob& multi) noexcept {
- Multi = multi;
- memset((void*)&Header, 0, sizeof(Header));
-
- TChunkedDataReader reader(Multi);
- Header.Count = reader.GetBlocksCount();
- resize(GetHeader()->Count);
- for (size_t i = 0; i < size(); ++i)
- // We can use TBlob::NoCopy() because of reader.GetBlock(i) returns
- // address into memory of multi blob.
- // This knowledge was acquired from implementation of
- // TChunkedDataReader, so we need care about any changes that.
- (*this)[i] = TBlob::NoCopy(reader.GetBlock(i), reader.GetBlockLen(i));
- Header.Flags |= EMF_CHUNKED_DATA_READER;
- return true;
-}
diff --git a/library/cpp/on_disk/multi_blob/multiblob.h b/library/cpp/on_disk/multi_blob/multiblob.h
deleted file mode 100644
index b40a5ae6af..0000000000
--- a/library/cpp/on_disk/multi_blob/multiblob.h
+++ /dev/null
@@ -1,77 +0,0 @@
-#pragma once
-
-#include <util/generic/vector.h>
-#include <util/memory/blob.h>
-
-#define BLOBMETASIG 0x3456789Au
-
-enum E_Multiblob_Flags {
- // if EMF_INTERLAY is clear
- // multiblob format
- // HeaderSize() bytes for TMultiBlobHeader
- // Count*sizeof(ui64) bytes for blob sizes
- // blob1
- // (alignment)
- // blob2
- // (alignment)
- // ...
- // (alignment)
- // blobn
- // if EMF_INTERLAY is set
- // multiblob format
- // HeaderSize() bytes for TMultiBlobHeader
- // size1 ui64, the size of 1st blob
- // blob1
- // (alignment)
- // size2 ui64, the size of 2nd blob
- // blob2
- // (alignment)
- // ...
- // (alignment)
- // sizen ui64, the size of n'th blob
- // blobn
- EMF_INTERLAY = 1,
-
- // Means that multiblob contains blocks in TChunkedDataReader format
- // Legacy, use it only for old files, created for TChunkedDataReader
- EMF_CHUNKED_DATA_READER = 2,
-
- // Flags that may be configured for blobbuilder in client code
- EMF_WRITEABLE = EMF_INTERLAY,
-};
-
-struct TMultiBlobHeader {
- // data
- ui32 BlobMetaSig;
- ui32 BlobRecordSig;
- ui64 Count; // count of sub blobs
- ui32 Align; // alignment for every subblob
- ui32 Flags;
- static const ui32 RecordSig = 0x23456789;
- static inline size_t HeaderSize() {
- return 4 * sizeof(ui64);
- }
- inline const ui64* Sizes(const void* Data) const {
- return (const ui64*)((const char*)Data + HeaderSize());
- }
-};
-
-class TSubBlobs: public TVector<TBlob> {
-public:
- TSubBlobs() {
- }
- TSubBlobs(const TBlob& multi) {
- ReadMultiBlob(multi);
- }
- void ReadMultiBlob(const TBlob& multi);
- const TMultiBlobHeader* GetHeader() const {
- return (const TMultiBlobHeader*)&Header;
- }
-
-protected:
- TMultiBlobHeader Header;
- TBlob Multi;
-
-private:
- bool ReadChunkedData(const TBlob& multi) noexcept;
-};
diff --git a/library/cpp/on_disk/multi_blob/multiblob_builder.cpp b/library/cpp/on_disk/multi_blob/multiblob_builder.cpp
deleted file mode 100644
index 44aa4a6c2f..0000000000
--- a/library/cpp/on_disk/multi_blob/multiblob_builder.cpp
+++ /dev/null
@@ -1,146 +0,0 @@
-#include <util/memory/tempbuf.h>
-#include <util/system/align.h>
-
-#include "multiblob_builder.h"
-
-/*
- * TBlobSaverMemory
- */
-TBlobSaverMemory::TBlobSaverMemory(const void* ptr, size_t size)
- : Blob(TBlob::NoCopy(ptr, size))
-{
-}
-
-TBlobSaverMemory::TBlobSaverMemory(const TBlob& blob)
- : Blob(blob)
-{
-}
-
-void TBlobSaverMemory::Save(IOutputStream& output, ui32 /*flags*/) {
- output.Write((void*)Blob.Data(), Blob.Length());
-}
-
-size_t TBlobSaverMemory::GetLength() {
- return Blob.Length();
-}
-
-/*
- * TBlobSaverFile
- */
-
-TBlobSaverFile::TBlobSaverFile(TFile file)
- : File(file)
-{
- Y_ASSERT(File.IsOpen());
-}
-
-TBlobSaverFile::TBlobSaverFile(const char* filename, EOpenMode oMode)
- : File(filename, oMode)
-{
- Y_ASSERT(File.IsOpen());
-}
-
-void TBlobSaverFile::Save(IOutputStream& output, ui32 /*flags*/) {
- TTempBuf buffer(1 << 20);
- while (size_t size = File.Read((void*)buffer.Data(), buffer.Size()))
- output.Write((void*)buffer.Data(), size);
-}
-
-size_t TBlobSaverFile::GetLength() {
- return File.GetLength();
-}
-
-/*
- * TMultiBlobBuilder
- */
-
-TMultiBlobBuilder::TMultiBlobBuilder(bool isOwn)
- : IsOwner(isOwn)
-{
-}
-
-TMultiBlobBuilder::~TMultiBlobBuilder() {
- if (IsOwner)
- DeleteSubBlobs();
-}
-
-namespace {
- ui64 PadToAlign(IOutputStream& output, ui64 fromPos, ui32 align) {
- ui64 toPos = AlignUp<ui64>(fromPos, align);
- for (; fromPos < toPos; ++fromPos) {
- output << (char)0;
- }
- return toPos;
- }
-}
-
-void TMultiBlobBuilder::Save(IOutputStream& output, ui32 flags) {
- TMultiBlobHeader header;
- memset((void*)&header, 0, sizeof(header));
- header.BlobMetaSig = BLOBMETASIG;
- header.BlobRecordSig = TMultiBlobHeader::RecordSig;
- header.Count = Blobs.size();
- header.Align = ALIGN;
- header.Flags = flags & EMF_WRITEABLE;
- output.Write((void*)&header, sizeof(header));
- for (size_t i = sizeof(header); i < header.HeaderSize(); ++i)
- output << (char)0;
- ui64 pos = header.HeaderSize();
- if (header.Flags & EMF_INTERLAY) {
- for (size_t i = 0; i < Blobs.size(); ++i) {
- ui64 size = Blobs[i]->GetLength();
- pos = PadToAlign(output, pos, sizeof(ui64)); // Align size record
- output.Write((void*)&size, sizeof(ui64));
- pos = PadToAlign(output, pos + sizeof(ui64), header.Align); // Align blob
- Blobs[i]->Save(output, header.Flags);
- pos += size;
- }
- } else {
- for (size_t i = 0; i < Blobs.size(); ++i) {
- ui64 size = Blobs[i]->GetLength();
- output.Write((void*)&size, sizeof(ui64));
- }
- pos += Blobs.size() * sizeof(ui64);
- for (size_t i = 0; i < Blobs.size(); ++i) {
- pos = PadToAlign(output, pos, header.Align);
- Blobs[i]->Save(output, header.Flags);
- pos += Blobs[i]->GetLength();
- }
- }
- // Compensate for imprecise size
- for (ui64 len = GetLength(); pos < len; ++pos) {
- output << (char)0;
- }
-}
-
-size_t TMultiBlobBuilder::GetLength() {
- // Sizes may be diferent with and without EMF_INTERLAY, so choose greater of 2
- size_t resNonInter = TMultiBlobHeader::HeaderSize() + Blobs.size() * sizeof(ui64);
- size_t resInterlay = TMultiBlobHeader::HeaderSize();
- for (size_t i = 0; i < Blobs.size(); ++i) {
- resInterlay = AlignUp<ui64>(resInterlay, sizeof(ui64)) + sizeof(ui64);
- resInterlay = AlignUp<ui64>(resInterlay, ALIGN) + Blobs[i]->GetLength();
- resNonInter = AlignUp<ui64>(resNonInter, ALIGN) + Blobs[i]->GetLength();
- }
- resInterlay = AlignUp<ui64>(resInterlay, ALIGN);
- resNonInter = AlignUp<ui64>(resNonInter, ALIGN);
- return Max(resNonInter, resInterlay);
-}
-
-TMultiBlobBuilder::TSavers& TMultiBlobBuilder::GetBlobs() {
- return Blobs;
-}
-
-const TMultiBlobBuilder::TSavers& TMultiBlobBuilder::GetBlobs() const {
- return Blobs;
-}
-
-void TMultiBlobBuilder::AddBlob(IBlobSaverBase* blob) {
- Blobs.push_back(blob);
-}
-
-void TMultiBlobBuilder::DeleteSubBlobs() {
- for (size_t i = 0; i < Blobs.size(); ++i)
- delete Blobs[i];
- Blobs.clear();
-}
diff --git a/library/cpp/on_disk/multi_blob/multiblob_builder.h b/library/cpp/on_disk/multi_blob/multiblob_builder.h
deleted file mode 100644
index a8e3c6d35e..0000000000
--- a/library/cpp/on_disk/multi_blob/multiblob_builder.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#pragma once
-
-#include <util/system/align.h>
-#include <util/stream/output.h>
-#include <util/stream/file.h>
-#include <util/draft/holder_vector.h>
-
-#include "multiblob.h"
-
-class IBlobSaverBase {
-public:
- virtual ~IBlobSaverBase() {
- }
- virtual void Save(IOutputStream& output, ui32 flags = 0) = 0;
- virtual size_t GetLength() = 0;
-};
-
-inline void MultiBlobSave(IOutputStream& output, IBlobSaverBase& saver) {
- saver.Save(output);
-}
-
-class TBlobSaverMemory: public IBlobSaverBase {
-public:
- TBlobSaverMemory(const void* ptr, size_t size);
- TBlobSaverMemory(const TBlob& blob);
- void Save(IOutputStream& output, ui32 flags = 0) override;
- size_t GetLength() override;
-
-private:
- TBlob Blob;
-};
-
-class TBlobSaverFile: public IBlobSaverBase {
-public:
- TBlobSaverFile(TFile file);
- TBlobSaverFile(const char* filename, EOpenMode oMode = RdOnly);
- void Save(IOutputStream& output, ui32 flags = 0) override;
- size_t GetLength() override;
-
-protected:
- TFile File;
-};
-
-class TMultiBlobBuilder: public IBlobSaverBase {
-protected:
- // Data will be stored with default alignment DEVTOOLS-4548
- static const size_t ALIGN = 16;
-
-public:
- typedef TVector<IBlobSaverBase*> TSavers;
-
- TMultiBlobBuilder(bool isOwn = true);
- ~TMultiBlobBuilder() override;
- void Save(IOutputStream& output, ui32 flags = 0) override;
- size_t GetLength() override;
- TSavers& GetBlobs();
- const TSavers& GetBlobs() const;
- void AddBlob(IBlobSaverBase* blob);
- void DeleteSubBlobs();
-
-protected:
- TSavers Blobs;
- bool IsOwner;
-};
diff --git a/library/cpp/on_disk/multi_blob/ya.make b/library/cpp/on_disk/multi_blob/ya.make
deleted file mode 100644
index 50615fc901..0000000000
--- a/library/cpp/on_disk/multi_blob/ya.make
+++ /dev/null
@@ -1,13 +0,0 @@
-LIBRARY()
-
-SRCS(
- multiblob.cpp
- multiblob_builder.cpp
-)
-
-PEERDIR(
- library/cpp/on_disk/chunks
- util/draft
-)
-
-END()
diff --git a/library/cpp/on_disk/st_hash/fake.cpp b/library/cpp/on_disk/st_hash/fake.cpp
deleted file mode 100644
index ef5af4d432..0000000000
--- a/library/cpp/on_disk/st_hash/fake.cpp
+++ /dev/null
@@ -1,4 +0,0 @@
-#include "save_stl.h"
-#include "static_hash.h"
-#include "static_hash_map.h"
-#include "sthash_iterators.h"
diff --git a/library/cpp/on_disk/st_hash/save_stl.h b/library/cpp/on_disk/st_hash/save_stl.h
deleted file mode 100644
index 00f8f0e20d..0000000000
--- a/library/cpp/on_disk/st_hash/save_stl.h
+++ /dev/null
@@ -1,84 +0,0 @@
-#pragma once
-
-#include <util/generic/hash.h>
-#include <util/system/yassert.h>
-#include <util/stream/output.h>
-
-// this structure might be replaced with sthashtable class
-template <class HF, class Eq, class size_type>
-struct sthashtable_nvm_sv {
- sthashtable_nvm_sv() {
- if (sizeof(sthashtable_nvm_sv) != sizeof(HF) + sizeof(Eq) + 3 * sizeof(size_type)) {
- memset(this, 0, sizeof(sthashtable_nvm_sv));
- }
- }
-
- sthashtable_nvm_sv(const HF& phf, const Eq& peq, const size_type& pnb, const size_type& pne, const size_type& pnd)
- : sthashtable_nvm_sv()
- {
- hf = phf;
- eq = peq;
- num_buckets = pnb;
- num_elements = pne;
- data_end_off = pnd;
- }
-
- HF hf;
- Eq eq;
- size_type num_buckets;
- size_type num_elements;
- size_type data_end_off;
-};
-
-/**
- * Some hack to save both THashMap and sthash.
- * Working with stHash does not depend on the template parameters, because the content of stHash is not used inside this method.
- */
-template <class V, class K, class HF, class Ex, class Eq, class A>
-template <class KeySaver>
-inline int THashTable<V, K, HF, Ex, Eq, A>::save_for_st(IOutputStream* stream, KeySaver& ks, sthash<int, int, THash<int>, TEqualTo<int>, typename KeySaver::TSizeType>* stHash) const {
- Y_ASSERT(!stHash || stHash->bucket_count() == bucket_count());
- typedef sthashtable_nvm_sv<HF, Eq, typename KeySaver::TSizeType> sv_type;
- sv_type sv = {this->_get_hash_fun(), this->_get_key_eq(), static_cast<typename KeySaver::TSizeType>(buckets.size()), static_cast<typename KeySaver::TSizeType>(num_elements), 0};
- // to do: m.b. use just the size of corresponding object?
- typename KeySaver::TSizeType cur_off = sizeof(sv_type) +
- (sv.num_buckets + 1) * sizeof(typename KeySaver::TSizeType);
- sv.data_end_off = cur_off;
- const_iterator n;
- for (n = begin(); n != end(); ++n) {
- sv.data_end_off += static_cast<typename KeySaver::TSizeType>(ks.GetRecordSize(*n));
- }
- typename KeySaver::TSizeType* sb = stHash ? (typename KeySaver::TSizeType*)(stHash->buckets()) : nullptr;
- if (stHash)
- sv.data_end_off += static_cast<typename KeySaver::TSizeType>(sb[buckets.size()] - sb[0]);
- //saver.Align(sizeof(char*));
- stream->Write(&sv, sizeof(sv));
-
- size_type i;
- //save vector
- for (i = 0; i < buckets.size(); ++i) {
- node* cur = buckets[i];
- stream->Write(&cur_off, sizeof(cur_off));
- if (cur) {
- while (!((uintptr_t)cur & 1)) {
- cur_off += static_cast<typename KeySaver::TSizeType>(ks.GetRecordSize(cur->val));
- cur = cur->next;
- }
- }
- if (stHash)
- cur_off += static_cast<typename KeySaver::TSizeType>(sb[i + 1] - sb[i]);
- }
- stream->Write(&cur_off, sizeof(cur_off)); // end mark
- for (i = 0; i < buckets.size(); ++i) {
- node* cur = buckets[i];
- if (cur) {
- while (!((uintptr_t)cur & 1)) {
- ks.SaveRecord(stream, cur->val);
- cur = cur->next;
- }
- }
- if (stHash)
- stream->Write((const char*)stHash + sb[i], sb[i + 1] - sb[i]);
- }
- return 0;
-}
diff --git a/library/cpp/on_disk/st_hash/static_hash.h b/library/cpp/on_disk/st_hash/static_hash.h
deleted file mode 100644
index ca7a6ccd36..0000000000
--- a/library/cpp/on_disk/st_hash/static_hash.h
+++ /dev/null
@@ -1,420 +0,0 @@
-#pragma once
-
-#include "save_stl.h"
-#include "sthash_iterators.h"
-
-#include <util/generic/hash.h>
-#include <util/generic/vector.h>
-#include <util/generic/buffer.h>
-#include <util/generic/cast.h>
-#include <util/generic/yexception.h> // for save/load only
-#include <util/stream/file.h>
-#include <util/stream/buffer.h>
-#include <utility>
-
-#include <memory>
-#include <algorithm>
-#include <functional>
-
-#include <cstdlib>
-#include <cstddef>
-
-#ifdef _MSC_VER
-#pragma warning(push)
-#pragma warning(disable : 4624) // 'destructor could not be generated because a base class destructor is inaccessible'
-#endif
-
-template <class HashType, class KeySaver>
-inline void SaveHashToStreamEx(HashType& hash, IOutputStream* stream) {
- KeySaver ks;
- if (hash.save_for_st(stream, ks))
- ythrow yexception() << "Could not save hash to stream";
-}
-
-template <class HashType>
-inline void SaveHashToStream(HashType& hash, IOutputStream* stream) {
- typedef TSthashWriter<typename HashType::key_type, typename HashType::mapped_type, ui64> KeySaver;
- return SaveHashToStreamEx<HashType, KeySaver>(hash, stream);
-}
-
-template <class HashType, class KeySaver>
-inline void SaveHashToFileEx(HashType& hash, const char* fileName) {
- TFileOutput output(fileName);
- SaveHashToStreamEx<HashType, KeySaver>(hash, &output);
-}
-
-template <class HashType>
-inline void SaveHashToFile(HashType& hash, const char* fileName) {
- typedef TSthashWriter<typename HashType::key_type, typename HashType::mapped_type, ui64> KeySaver;
- return SaveHashToFileEx<HashType, KeySaver>(hash, fileName);
-}
-
-template <class HashType>
-inline void SaveHashSetToFile(HashType& hash, const char* fileName) {
- typedef TSthashSetWriter<typename HashType::key_type, ui64> KeySaver;
- return SaveHashToFileEx<HashType, KeySaver>(hash, fileName);
-}
-
-template <class HashType>
-inline void SaveHashToFile32(HashType& hash, const char* fileName) {
- typedef TSthashWriter<typename HashType::key_type, typename HashType::mapped_type, ui32> KeySaver;
- return SaveHashToFileEx<HashType, KeySaver>(hash, fileName);
-}
-
-template <class HashType, class KeySaver>
-inline void SaveHashToBufferEx(HashType& hash, TBuffer& buffer, sthash<int, int, THash<int>, TEqualTo<int>, typename KeySaver::TSizeType>* stHash = nullptr) {
- TBufferOutput stream(buffer);
- KeySaver ks;
- if (hash.save_for_st(&stream, ks, stHash))
- ythrow yexception() << "Could not save hash to memory";
-}
-
-template <class HashType>
-inline void SaveHashToBuffer(HashType& hash, TBuffer& buffer) {
- typedef TSthashWriter<typename HashType::key_type, typename HashType::mapped_type, ui64> KeySaver;
- SaveHashToBufferEx<HashType, KeySaver>(hash, buffer);
-}
-
-/**
- * Some hack to save both THashMap and sthash.
- * THashMap and sthash must have same bucket_count().
- */
-template <class HashType, class StHashType>
-inline void SaveHashToBuffer(HashType& hash, TBuffer& buffer, StHashType* stHash) {
- typedef TSthashWriter<typename HashType::key_type, typename HashType::mapped_type, ui64> KeySaver;
- typedef sthash<int, int, THash<int>, TEqualTo<int>, typename KeySaver::TSizeType>* SH;
-
- SH sh = reinterpret_cast<SH>(stHash);
- SaveHashToBufferEx<HashType, KeySaver>(hash, buffer, sh);
-}
-
-template <class HashType>
-inline void SaveHashToBuffer32(HashType& hash, TBuffer& buffer) {
- typedef TSthashWriter<typename HashType::key_type, typename HashType::mapped_type, ui32> KeySaver;
- SaveHashToBufferEx<HashType, KeySaver>(hash, buffer);
-}
-
-template <class Iter, typename size_type_f = ui64>
-class sthashtable {
-public:
- typedef typename Iter::TKeyType key_type;
- typedef typename Iter::TValueType value_type;
- typedef typename Iter::THasherType hasher;
- typedef typename Iter::TKeyEqualType key_equal;
-
- typedef size_type_f size_type;
- typedef ptrdiff_t difference_type;
- typedef const value_type* const_pointer;
- typedef const value_type& const_reference;
-
- typedef Iter const_iterator;
-
- const hasher hash_funct() const {
- return hash;
- }
- const key_equal key_eq() const {
- return equals;
- }
-
-private:
- const hasher hash;
- const key_equal equals;
-
-private:
- const_iterator iter_at_bucket(size_type bucket) const {
- return (const_iterator)(((char*)this + buckets()[bucket]));
- }
-
- const_iterator iter_at_bucket_or_end(size_type bucket) const {
- if (bucket < num_buckets)
- return (const_iterator)(((char*)this + buckets()[bucket]));
- else
- return end();
- }
-
- const size_type num_buckets;
- const size_type num_elements;
- const size_type data_end_off;
-
-protected: //shut up gcc warning
- // we can't construct/destroy this object at all!
- sthashtable();
- sthashtable(const sthashtable& ht);
- ~sthashtable();
-
-public:
- // const size_type *buckets;
- const size_type* buckets() const {
- return (size_type*)((char*)this + sizeof(*this));
- }
- const size_type buckets(size_type n) const {
- return buckets()[n];
- }
-
- size_type size() const {
- return num_elements;
- }
- size_type max_size() const {
- return size_type(-1);
- }
- bool empty() const {
- return size() == 0;
- }
-
- const_iterator begin() const {
- return num_buckets ? iter_at_bucket(0) : end();
- }
-
- const_iterator end() const {
- return (const_iterator)(((char*)this + data_end_off));
- }
-
-public:
- size_type size_in_bytes() const {
- return data_end_off;
- }
-
- size_type bucket_count() const {
- return num_buckets;
- }
-
- size_type elems_in_bucket(size_type bucket) const {
- size_type result = 0;
- const_iterator first = iter_at_bucket(bucket);
- const_iterator last = iter_at_bucket_or_end(bucket + 1);
-
- for (; first != last; ++first)
- ++result;
- return result;
- }
-
- template <class TheKey>
- const_iterator find(const TheKey& key) const {
- size_type n = bkt_num_key(key);
- const_iterator first(iter_at_bucket(n)), last(iter_at_bucket_or_end(n + 1));
- for (;
- first != last && !first.KeyEquals(equals, key);
- ++first) {
- }
- if (first != last)
- return first;
- return end();
- }
-
- size_type count(const key_type& key) const {
- const size_type n = bkt_num_key(key);
- size_type result = 0;
- const_iterator first = iter_at_bucket(n);
- const_iterator last = iter_at_bucket_or_end(n + 1);
-
- for (; first != last; ++first)
- if (first.KeyEquals(equals, key))
- ++result;
- return result;
- }
-
- std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const;
-
-private:
- template <class TheKey>
- size_type bkt_num_key(const TheKey& key) const {
- return hash(key) % num_buckets;
- }
-};
-
-template <class I, class size_type_f>
-std::pair<I, I> sthashtable<I, size_type_f>::equal_range(const key_type& key) const {
- typedef std::pair<const_iterator, const_iterator> pii;
- const size_type n = bkt_num_key(key);
- const_iterator first = iter_at_bucket(n);
- const_iterator last = iter_at_bucket_or_end(n + 1);
-
- for (; first != last; ++first) {
- if (first.KeyEquals(equals, key)) {
- const_iterator cur = first;
- ++cur;
- for (; cur != last; ++cur)
- if (!cur.KeyEquals(equals, key))
- return pii(const_iterator(first),
- const_iterator(cur));
- return pii(const_iterator(first),
- const_iterator(last));
- }
- }
- return pii(end(), end());
-}
-
-/* end __SGI_STL_HASHTABLE_H */
-
-template <class Key, class T, class HashFcn /*= hash<Key>*/,
- class EqualKey = TEqualTo<Key>, typename size_type_f = ui64>
-class sthash {
-private:
- typedef sthashtable<TSthashIterator<const Key, const T, HashFcn, EqualKey>, size_type_f> ht;
- ht rep;
-
-public:
- typedef typename ht::key_type key_type;
- typedef typename ht::value_type value_type;
- typedef typename ht::hasher hasher;
- typedef typename ht::key_equal key_equal;
- typedef T mapped_type;
-
- typedef typename ht::size_type size_type;
- typedef typename ht::difference_type difference_type;
- typedef typename ht::const_pointer const_pointer;
- typedef typename ht::const_reference const_reference;
-
- typedef typename ht::const_iterator const_iterator;
-
- const hasher hash_funct() const {
- return rep.hash_funct();
- }
- const key_equal key_eq() const {
- return rep.key_eq();
- }
-
-public:
- size_type size() const {
- return rep.size();
- }
- size_type max_size() const {
- return rep.max_size();
- }
- bool empty() const {
- return rep.empty();
- }
-
- const_iterator begin() const {
- return rep.begin();
- }
- const_iterator end() const {
- return rep.end();
- }
-
-public:
- template <class TheKey>
- const_iterator find(const TheKey& key) const {
- return rep.find(key);
- }
- template <class TheKey>
- bool has(const TheKey& key) const {
- return rep.find(key) != rep.end();
- }
-
- size_type count(const key_type& key) const {
- return rep.count(key);
- }
-
- std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
- return rep.equal_range(key);
- }
-
- size_type size_in_bytes() const {
- return rep.size_in_bytes();
- }
-
- size_type bucket_count() const {
- return rep.bucket_count();
- }
- size_type max_bucket_count() const {
- return rep.max_bucket_count();
- }
- size_type elems_in_bucket(size_type n) const {
- return rep.elems_in_bucket(n);
- }
-
- const size_type* buckets() const {
- return rep.buckets();
- }
- const size_type buckets(size_type n) const {
- return rep.buckets()[n];
- }
-};
-
-template <class Key, class HashFcn,
- class EqualKey = TEqualTo<Key>, typename size_type_f = ui64>
-class sthash_set: public sthash<Key, TEmptyValue, HashFcn, EqualKey, size_type_f> {
- typedef sthash<Key, TEmptyValue, HashFcn, EqualKey, size_type_f> Base;
-
-public:
- using Base::const_iterator;
- using Base::hasher;
- using Base::key_equal;
- using Base::key_type;
- using Base::size_type;
- using Base::value_type;
-};
-
-template <class Key, class T, class HashFcn /*= hash<Key>*/,
- class EqualKey = TEqualTo<Key>, typename size_type_f = ui64>
-class sthash_mm {
-private:
- typedef sthashtable<TSthashIterator<const Key, T, HashFcn, EqualKey>, size_type_f> ht;
- ht rep;
-
-public:
- typedef typename ht::key_type key_type;
- typedef typename ht::value_type value_type;
- typedef typename ht::hasher hasher;
- typedef typename ht::key_equal key_equal;
- typedef T mapped_type;
-
- typedef typename ht::size_type size_type;
- typedef typename ht::difference_type difference_type;
- typedef typename ht::const_pointer const_pointer;
- typedef typename ht::const_reference const_reference;
-
- typedef typename ht::const_iterator const_iterator;
-
- const hasher hash_funct() const {
- return rep.hash_funct();
- }
- const key_equal key_eq() const {
- return rep.key_eq();
- }
-
-public:
- size_type size() const {
- return rep.size();
- }
- size_type max_size() const {
- return rep.max_size();
- }
- bool empty() const {
- return rep.empty();
- }
-
- const_iterator begin() const {
- return rep.begin();
- }
- const_iterator end() const {
- return rep.end();
- }
-
- const_iterator find(const key_type& key) const {
- return rep.find(key);
- }
-
- size_type count(const key_type& key) const {
- return rep.count(key);
- }
-
- std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
- return rep.equal_range(key);
- }
-
- size_type bucket_count() const {
- return rep.bucket_count();
- }
- size_type max_bucket_count() const {
- return rep.max_bucket_count();
- }
- size_type elems_in_bucket(size_type n) const {
- return rep.elems_in_bucket(n);
- }
-};
-
-#ifdef _MSC_VER
-#pragma warning(pop)
-#endif
diff --git a/library/cpp/on_disk/st_hash/static_hash_map.h b/library/cpp/on_disk/st_hash/static_hash_map.h
deleted file mode 100644
index 5dc50abd39..0000000000
--- a/library/cpp/on_disk/st_hash/static_hash_map.h
+++ /dev/null
@@ -1,59 +0,0 @@
-#pragma once
-
-#include "static_hash.h"
-
-#include <library/cpp/deprecated/mapped_file/mapped_file.h>
-
-#include <util/system/filemap.h>
-
-template <class SH>
-struct sthash_mapped_c {
- typedef SH H;
- typedef typename H::const_iterator const_iterator;
- TMappedFile M;
- H* hsh;
- sthash_mapped_c()
- : M()
- , hsh(nullptr)
- {
- }
- sthash_mapped_c(const char* fname, bool precharge)
- : M()
- , hsh(nullptr)
- {
- Open(fname, precharge);
- }
- void Open(const char* fname, bool precharge) {
- M.init(fname);
- if (precharge)
- M.precharge();
- hsh = (H*)M.getData();
- if (M.getSize() < sizeof(H) || (ssize_t)M.getSize() != hsh->end().Data - (char*)hsh)
- ythrow yexception() << "Could not map hash: " << fname << " is damaged";
- }
- H* operator->() {
- return hsh;
- }
- const H* operator->() const {
- return hsh;
- }
- H* GetSthash() {
- return hsh;
- }
- const H* GetSthash() const {
- return hsh;
- }
-};
-
-template <class Key, class T, class Hash>
-struct sthash_mapped: public sthash_mapped_c<sthash<Key, T, Hash>> {
- typedef sthash<Key, T, Hash> H;
- sthash_mapped(const char* fname, bool precharge)
- : sthash_mapped_c<H>(fname, precharge)
- {
- }
- sthash_mapped()
- : sthash_mapped_c<H>()
- {
- }
-};
diff --git a/library/cpp/on_disk/st_hash/sthash_iterators.h b/library/cpp/on_disk/st_hash/sthash_iterators.h
deleted file mode 100644
index 6a9ebdd6c3..0000000000
--- a/library/cpp/on_disk/st_hash/sthash_iterators.h
+++ /dev/null
@@ -1,334 +0,0 @@
-#pragma once
-
-#include "save_stl.h"
-
-#include <util/system/align.h>
-
-/**
- This file provides functionality for saving some relatively simple THashMap object
- to disk in a form that can be mapped read-only (via mmap) at any address.
- That saved object is accessed via pointer to sthash object (that must have
- the same parameters as original THashMap object)
-
- If either key or value are variable-sized (i.e. contain pointers), user must
- write his own instantiation of TSthashIterator (read iterator for sthash) and
- TSthashWriter (write iterator for THashMap).
- An example for <const char *, B> pair is in here.
-**/
-
-// TEmptyValue and SizeOfEx are helpers for sthash_set
-struct TEmptyValue {
- TEmptyValue() = default;
-};
-
-template <class T>
-inline size_t SizeOfEx() {
- return sizeof(T);
-}
-
-template <>
-inline size_t SizeOfEx<TEmptyValue>() {
- return 0;
-}
-template <>
-inline size_t SizeOfEx<const TEmptyValue>() {
- return 0;
-}
-
-template <class TKey, class TValue, class HashFcn, class EqualKey>
-struct TSthashIterator {
- // Implementation for simple types
- typedef const TKey TKeyType;
- typedef const TValue TValueType;
- typedef EqualKey TKeyEqualType;
- typedef HashFcn THasherType;
-
- const char* Data;
- TSthashIterator()
- : Data(nullptr)
- {
- }
- explicit TSthashIterator(const char* data)
- : Data(data)
- {
- }
- void operator++() {
- Data += GetLength();
- }
-
- bool operator!=(const TSthashIterator& that) const {
- return Data != that.Data;
- }
- bool operator==(const TSthashIterator& that) const {
- return Data == that.Data;
- }
- TKey& Key() const {
- return *(TKey*)Data;
- }
- TValue& Value() {
- return *(TValue*)(Data + sizeof(TKey));
- }
- const TValue& Value() const {
- return *(const TValue*)(Data + sizeof(TKey));
- }
-
- template <class AnotherKeyType>
- bool KeyEquals(const EqualKey& eq, const AnotherKeyType& key) const {
- return eq(*(TKey*)Data, key);
- }
-
- size_t GetLength() const {
- return sizeof(TKey) + SizeOfEx<TValue>();
- }
-};
-
-template <class Key, class Value, typename size_type_o = ui64>
-struct TSthashWriter {
- typedef size_type_o TSizeType;
- size_t GetRecordSize(const std::pair<const Key, const Value>&) const {
- return sizeof(Key) + SizeOfEx<Value>();
- }
- int SaveRecord(IOutputStream* stream, const std::pair<const Key, const Value>& record) const {
- stream->Write(&record.first, sizeof(Key));
- stream->Write(&record.second, SizeOfEx<Value>());
- return 0;
- }
-};
-
-// Remember that this simplified implementation makes a copy of `key' in std::make_pair.
-// It can also waste some memory on undesired alignment.
-template <class Key, typename size_type_o = ui64>
-struct TSthashSetWriter: public TSthashWriter<Key, TEmptyValue, size_type_o> {
- typedef TSthashWriter<Key, TEmptyValue, size_type_o> MapWriter;
- size_t GetRecordSize(const Key& key) const {
- return MapWriter::GetRecordSize(std::make_pair(key, TEmptyValue()));
- }
- int SaveRecord(IOutputStream* stream, const Key& key) const {
- return MapWriter::SaveRecord(stream, std::make_pair(key, TEmptyValue()));
- }
-};
-
-// we can't save something with pointers without additional tricks
-
-template <class A, class B, class HashFcn, class EqualKey>
-struct TSthashIterator<A*, B, HashFcn, EqualKey> {};
-
-template <class A, class B, class HashFcn, class EqualKey>
-struct TSthashIterator<A, B*, HashFcn, EqualKey> {};
-
-template <class A, class B, typename size_type_o>
-struct TSthashWriter<A*, B*, size_type_o> {};
-
-template <class A, class B, typename size_type_o>
-struct TSthashWriter<A*, B, size_type_o> {};
-
-template <class A, class B, typename size_type_o>
-struct TSthashWriter<A, B*, size_type_o> {};
-
-template <class T>
-inline size_t AlignForChrKey() {
- return 4; // TODO: change this (requeres rebuilt of a few existing files)
-}
-
-template <>
-inline size_t AlignForChrKey<TEmptyValue>() {
- return 1;
-}
-
-template <>
-inline size_t AlignForChrKey<const TEmptyValue>() {
- return AlignForChrKey<TEmptyValue>();
-}
-
-// !! note that for char*, physical placement of key and value is swapped
-template <class TValue, class HashFcn, class EqualKey>
-struct TSthashIterator<const char* const, TValue, HashFcn, EqualKey> {
- typedef const TValue TValueType;
- typedef const char* TKeyType;
- typedef EqualKey TKeyEqualType;
- typedef HashFcn THasherType;
-
- const char* Data;
- TSthashIterator()
- : Data(nullptr)
- {
- }
- TSthashIterator(const char* data)
- : Data(data)
- {
- }
- void operator++() {
- Data += GetLength();
- }
-
- bool operator!=(const TSthashIterator& that) const {
- return Data != that.Data;
- }
- bool operator==(const TSthashIterator& that) const {
- return Data == that.Data;
- }
- const char* Key() const {
- return Data + SizeOfEx<TValue>();
- }
- TValue& Value() {
- return *(TValue*)Data;
- }
- const TValue& Value() const {
- return *(const TValue*)Data;
- }
-
- template <class K>
- bool KeyEquals(const EqualKey& eq, const K& k) const {
- return eq(Data + SizeOfEx<TValue>(), k);
- }
-
- size_t GetLength() const {
- size_t length = strlen(Data + SizeOfEx<TValue>()) + 1 + SizeOfEx<TValue>();
- length = AlignUp(length, AlignForChrKey<TValue>());
- return length;
- }
-};
-
-template <class Value, typename size_type_o>
-struct TSthashWriter<const char*, Value, size_type_o> {
- typedef size_type_o TSizeType;
- size_t GetRecordSize(const std::pair<const char*, const Value>& record) const {
- size_t length = strlen(record.first) + 1 + SizeOfEx<Value>();
- length = AlignUp(length, AlignForChrKey<Value>());
- return length;
- }
- int SaveRecord(IOutputStream* stream, const std::pair<const char*, const Value>& record) const {
- const char* alignBuffer = "qqqq";
- stream->Write(&record.second, SizeOfEx<Value>());
- size_t length = strlen(record.first) + 1;
- stream->Write(record.first, length);
- length = AlignUpSpace(length, AlignForChrKey<Value>());
- if (length)
- stream->Write(alignBuffer, length);
- return 0;
- }
-};
-
-template <class TKey, class HashFcn, class EqualKey>
-struct TSthashIterator<TKey, const char* const, HashFcn, EqualKey> {
- typedef const TKey TKeyType;
- typedef const char* TValueType;
- typedef EqualKey TKeyEqualType;
- typedef HashFcn THasherType;
-
- const char* Data;
- TSthashIterator()
- : Data(nullptr)
- {
- }
- TSthashIterator(const char* data)
- : Data(data)
- {
- }
- void operator++() {
- Data += GetLength();
- }
-
- bool operator!=(const TSthashIterator& that) const {
- return Data != that.Data;
- }
- bool operator==(const TSthashIterator& that) const {
- return Data == that.Data;
- }
- TKey& Key() {
- return *(TKey*)Data;
- }
- const char* Value() const {
- return Data + sizeof(TKey);
- }
-
- template <class K>
- bool KeyEquals(const EqualKey& eq, const K& k) const {
- return eq(*(TKey*)Data, k);
- }
-
- size_t GetLength() const {
- size_t length = strlen(Data + sizeof(TKey)) + 1 + sizeof(TKey);
- length = AlignUp(length, (size_t)4);
- return length;
- }
-};
-
-template <class Key, typename size_type_o>
-struct TSthashWriter<Key, const char*, size_type_o> {
- typedef size_type_o TSizeType;
- size_t GetRecordSize(const std::pair<const Key, const char*>& record) const {
- size_t length = strlen(record.second) + 1 + sizeof(Key);
- length = AlignUp(length, (size_t)4);
- return length;
- }
- int SaveRecord(IOutputStream* stream, const std::pair<const Key, const char*>& record) const {
- const char* alignBuffer = "qqqq";
- stream->Write(&record.first, sizeof(Key));
- size_t length = strlen(record.second) + 1;
- stream->Write(record.second, length);
- length = AlignUpSpace(length, (size_t)4);
- if (length)
- stream->Write(alignBuffer, length);
- return 0;
- }
-};
-
-template <class HashFcn, class EqualKey>
-struct TSthashIterator<const char* const, const char* const, HashFcn, EqualKey> {
- typedef const char* TKeyType;
- typedef const char* TValueType;
- typedef EqualKey TKeyEqualType;
- typedef HashFcn THasherType;
-
- const char* Data;
- TSthashIterator()
- : Data(nullptr)
- {
- }
- TSthashIterator(const char* data)
- : Data(data)
- {
- }
- void operator++() {
- Data += GetLength();
- }
-
- bool operator!=(const TSthashIterator& that) const {
- return Data != that.Data;
- }
- bool operator==(const TSthashIterator& that) const {
- return Data == that.Data;
- }
- const char* Key() const {
- return Data;
- }
- const char* Value() const {
- return Data + strlen(Data) + 1;
- }
-
- template <class K>
- bool KeyEquals(const EqualKey& eq, const K& k) const {
- return eq(Data, k);
- }
-
- size_t GetLength() const {
- size_t length = strlen(Data) + 1;
- length += strlen(Data + length) + 1;
- return length;
- }
-};
-
-template <typename size_type_o>
-struct TSthashWriter<const char*, const char*, size_type_o> {
- typedef size_type_o TSizeType;
- size_t GetRecordSize(const std::pair<const char*, const char*>& record) const {
- size_t size = strlen(record.first) + strlen(record.second) + 2;
- return size;
- }
- int SaveRecord(IOutputStream* stream, const std::pair<const char*, const char*>& record) const {
- stream->Write(record.first, strlen(record.first) + 1);
- stream->Write(record.second, strlen(record.second) + 1);
- return 0;
- }
-};
diff --git a/library/cpp/on_disk/st_hash/ya.make b/library/cpp/on_disk/st_hash/ya.make
deleted file mode 100644
index 8c6d05711c..0000000000
--- a/library/cpp/on_disk/st_hash/ya.make
+++ /dev/null
@@ -1,15 +0,0 @@
-LIBRARY()
-
-SRCS(
- fake.cpp
- save_stl.h
- static_hash.h
- static_hash_map.h
- sthash_iterators.h
-)
-
-PEERDIR(
- library/cpp/deprecated/mapped_file
-)
-
-END()
diff --git a/library/cpp/remmap/remmap.cpp b/library/cpp/remmap/remmap.cpp
deleted file mode 100644
index ce72af7352..0000000000
--- a/library/cpp/remmap/remmap.cpp
+++ /dev/null
@@ -1,138 +0,0 @@
-#include <util/system/info.h>
-#include <util/system/defaults.h>
-
-#if defined(_win_)
-#include <util/system/winint.h>
-#elif defined(_unix_)
-#include <sys/types.h>
-#include <sys/mman.h>
-
-#ifndef MAP_NOCORE
-#define MAP_NOCORE 0
-#endif
-#else
-#error todo
-#endif
-
-#include "remmap.h"
-
-static const size_t REMMAP_PAGESIZE = NSystemInfo::GetPageSize();
-
-#if defined(_unix_)
-TRemmapAllocation::TRemmapAllocation()
- : Ptr_(nullptr)
- , Size_(0)
-{
-}
-
-TRemmapAllocation::TRemmapAllocation(size_t size, char* base)
- : Ptr_(nullptr)
- , Size_(0)
-{
- Alloc(size, base);
-}
-
-char* TRemmapAllocation::Alloc(size_t size, char* base) {
- assert(Ptr_ == nullptr);
-
- if (!size)
- return nullptr;
-
- const size_t HUGESIZE = size_t(16) << 30;
- Ptr_ = CommonMMap(HUGESIZE, base);
-
- if (Ptr_ != (char*)MAP_FAILED)
- munmap((void*)Ptr_, HUGESIZE);
- else
- Ptr_ = nullptr;
-
- Ptr_ = CommonMMap(AlignUp(size, REMMAP_PAGESIZE), Ptr_);
- if (Ptr_ == (char*)MAP_FAILED)
- Ptr_ = nullptr;
-
- Size_ = Ptr_ ? size : 0;
- return Ptr_;
-}
-
-char* TRemmapAllocation::Realloc(size_t newsize) {
- if (Ptr_ == nullptr)
- return Alloc(newsize);
-
- size_t realSize = AlignUp(Size_, REMMAP_PAGESIZE);
- size_t needSize = AlignUp(newsize, REMMAP_PAGESIZE);
-
- if (needSize > realSize) {
- char* part = Ptr_ + realSize;
- char* bunch = CommonMMap(needSize - realSize, part);
- if (bunch != (char*)MAP_FAILED && bunch != part)
- munmap(bunch, needSize - realSize);
- if (bunch == (char*)MAP_FAILED || bunch != part)
- return FullRealloc(newsize);
- } else if (needSize < realSize)
- munmap(Ptr_ + needSize, realSize - needSize);
-
- if ((Size_ = newsize) == 0)
- Ptr_ = nullptr;
-
- return Ptr_;
-}
-
-void TRemmapAllocation::Dealloc() {
- if (Ptr_ != nullptr)
- munmap(Ptr_, AlignUp(Size_, REMMAP_PAGESIZE));
- Ptr_ = nullptr;
- Size_ = 0;
-}
-
-char* TRemmapAllocation::FullRealloc(size_t newsize) {
- char* newPtr = CommonMMap(newsize);
- Y_ABORT_UNLESS(newPtr != MAP_FAILED, "mmap failed");
-
- size_t useful = Min(Size_, newsize), cur = 0;
-
- for (; cur + REMMAP_PAGESIZE < useful; cur += REMMAP_PAGESIZE) {
- memcpy((void*)&newPtr[cur], (void*)&Ptr_[cur], REMMAP_PAGESIZE);
- munmap((void*)&Ptr_[cur], REMMAP_PAGESIZE);
- }
-
- memcpy((void*)&newPtr[cur], (void*)&Ptr_[cur], useful - cur);
- munmap((void*)&Ptr_[cur], AlignUp(Size_ - cur, REMMAP_PAGESIZE));
-
- Size_ = newsize;
- return (Ptr_ = newPtr);
-}
-
-inline char* TRemmapAllocation::CommonMMap(size_t size, char* base) {
- return (char*)mmap((void*)base, size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANON, -1, 0);
-}
-
-#else
-TRemmapAllocation::TRemmapAllocation()
- : Allocation_(0, false, NULL)
-{
-}
-
-TRemmapAllocation::TRemmapAllocation(size_t size, char* base)
- : Allocation_(size, false, (void*)base)
-{
-}
-
-char* TRemmapAllocation::Alloc(size_t size, char* base) {
- return (char*)Allocation_.Alloc(size, (void*)base);
-}
-
-char* TRemmapAllocation::Realloc(size_t newsize) {
- return FullRealloc(newsize);
-}
-
-void TRemmapAllocation::Dealloc() {
- Allocation_.Dealloc();
-}
-
-char* TRemmapAllocation::FullRealloc(size_t newsize) {
- TMappedAllocation other(newsize);
- memcpy(other.Ptr(), Allocation_.Ptr(), Min(other.MappedSize(), Allocation_.MappedSize()));
- Allocation_.swap(other);
- return Data();
-}
-#endif
diff --git a/library/cpp/remmap/remmap.h b/library/cpp/remmap/remmap.h
deleted file mode 100644
index 7cb738f7ae..0000000000
--- a/library/cpp/remmap/remmap.h
+++ /dev/null
@@ -1,64 +0,0 @@
-#pragma once
-
-#include <util/system/yassert.h>
-#include <util/system/align.h>
-#include <util/system/info.h>
-#include <util/system/filemap.h>
-#include <util/memory/alloc.h>
-#include <util/generic/noncopyable.h>
-
-class TRemmapAllocation : TNonCopyable {
-public:
- TRemmapAllocation();
- TRemmapAllocation(size_t size, char* base = nullptr);
-
- ~TRemmapAllocation() {
- Dealloc();
- }
-
- char* Alloc(size_t size, char* base = nullptr);
- char* Realloc(size_t newsize);
- void Dealloc();
- char* FullRealloc(size_t newsize);
-
-#if defined(_unix_)
-private:
- inline char* CommonMMap(size_t size, char* base = nullptr);
-
- char* Ptr_;
- size_t Size_;
-
-public:
- inline void* Ptr() const {
- return (void*)Ptr_;
- }
- inline char* Data(ui32 pos = 0) const {
- return Ptr_ + pos;
- }
- inline size_t Size() const {
- return Size_;
- }
- inline void swap(TRemmapAllocation& other) {
- DoSwap(Ptr_, other.Ptr_);
- DoSwap(Size_, other.Size_);
- }
-
-#else
-private:
- TMappedAllocation Allocation_;
-
-public:
- inline void* Ptr() const {
- return Allocation_.Ptr();
- }
- inline char* Data(ui32 pos = 0) const {
- return Allocation_.Data(pos);
- }
- inline size_t Size() const {
- return Allocation_.MappedSize();
- }
- inline void swap(TRemmapAllocation& other) {
- Allocation_.swap(other.Allocation_);
- }
-#endif
-};
diff --git a/library/cpp/remmap/ya.make b/library/cpp/remmap/ya.make
deleted file mode 100644
index 281df6443a..0000000000
--- a/library/cpp/remmap/ya.make
+++ /dev/null
@@ -1,7 +0,0 @@
-LIBRARY()
-
-SRCS(
- remmap.cpp
-)
-
-END()
diff --git a/library/cpp/sqlite3/sqlite.cpp b/library/cpp/sqlite3/sqlite.cpp
deleted file mode 100644
index 98e498f76b..0000000000
--- a/library/cpp/sqlite3/sqlite.cpp
+++ /dev/null
@@ -1,288 +0,0 @@
-#include "sqlite.h"
-
-#include <util/generic/singleton.h>
-#include <util/generic/scope.h>
-
-#include <cstdlib>
-
-using namespace NSQLite;
-
-namespace {
- struct TSQLiteInit {
- inline TSQLiteInit() {
- int ret = sqlite3_config(SQLITE_CONFIG_MULTITHREAD);
-
- if (ret != SQLITE_OK) {
- ythrow TSQLiteError(ret) << "init failure";
- }
- }
-
- static inline void Ensure() {
- Singleton<TSQLiteInit>();
- }
- };
-}
-
-namespace NSQLite {
- TSQLiteError::TSQLiteError(sqlite3* hndl)
- : ErrorCode(sqlite3_errcode(hndl))
- {
- *this << sqlite3_errmsg(hndl) << ". ";
- }
-
- TSQLiteError::TSQLiteError(int rc)
- : ErrorCode(rc)
- {
- *this << sqlite3_errstr(rc) << " (" << rc << "). ";
- }
-
- TSQLiteDB::TSQLiteDB(const TString& path) {
- TSQLiteInit::Ensure();
-
- sqlite3* db = nullptr;
- const int rc = sqlite3_open(path.data(), &db);
-
- H_.Reset(db);
-
- if (rc) {
- ythrow TSQLiteError(Handle()) << "can not init db " << path.Quote();
- }
- }
-
- TSQLiteDB::TSQLiteDB(const TString& path, int flags) {
- TSQLiteInit::Ensure();
-
- sqlite3* db = nullptr;
- const int rc = sqlite3_open_v2(path.data(), &db, flags, nullptr);
-
- H_.Reset(db);
-
- if (rc) {
- ythrow TSQLiteError(Handle()) << "can not init db " << path.Quote();
- }
- }
-
- sqlite3* TSQLiteDB::Handle() const noexcept {
- return H_.Get();
- }
-
- size_t TSQLiteDB::RowsAffected() const noexcept {
- return static_cast<size_t>(sqlite3_changes(H_.Get()));
- }
-
- TSQLiteStatement::TSQLiteStatement(TSQLiteDB& db, const TString& s)
- : S_(s)
- {
- if (!S_.empty() && S_[S_.size() - 1] != ';') {
- S_ += ';';
- }
-
- sqlite3_stmt* st = nullptr;
- const char* tail = nullptr;
- const int rc = sqlite3_prepare_v2(db.Handle(), S_.data(), S_.size() + 1, &st, &tail);
-
- H_.Reset(st);
-
- if (rc != SQLITE_OK) {
- ythrow TSQLiteError(db.Handle()) << "can not prepare " << S_.Quote();
- }
- }
-
- void TSQLiteStatement::Execute() {
- while (Step()) {
- }
-
- Reset();
- }
-
- TSQLiteStatement& TSQLiteStatement::Bind(size_t idx, i64 val) {
- sqlite3_bind_int64(Handle(), idx, val);
- return *this;
- }
-
- TSQLiteStatement& TSQLiteStatement::Bind(size_t idx, int val) {
- sqlite3_bind_int(Handle(), idx, val);
- return *this;
- }
-
- TSQLiteStatement& TSQLiteStatement::Bind(size_t idx) {
- sqlite3_bind_null(Handle(), idx);
- return *this;
- }
-
- TSQLiteStatement& TSQLiteStatement::Bind(size_t idx, double val) {
- sqlite3_bind_double(Handle(), idx, val);
- return *this;
- }
-
- void TSQLiteStatement::BindText(size_t idx, const char* text, size_t len, TFreeFunc func) {
- sqlite3_bind_text(Handle(), idx, text, len, func);
- }
-
- TSQLiteStatement& TSQLiteStatement::Bind(size_t idx, TStringBuf str) {
- BindText(idx, str.data(), str.size(), SQLITE_STATIC);
- return *this;
- }
-
- TSQLiteStatement& TSQLiteStatement::BindBlob(size_t idx, TStringBuf blob) {
- sqlite3_bind_blob(Handle(), idx, blob.data(), blob.size(), SQLITE_STATIC);
- return *this;
- }
-
- size_t TSQLiteStatement::BoundNamePosition(TStringBuf name) const noexcept {
- return sqlite3_bind_parameter_index(Handle(), name.data());
- }
-
- size_t TSQLiteStatement::BoundParameterCount() const noexcept {
- return sqlite3_bind_parameter_count(Handle());
- }
-
- const char* TSQLiteStatement::BoundParameterName(size_t idx) const noexcept {
- return sqlite3_bind_parameter_name(Handle(), idx);
- }
-
- sqlite3_stmt* TSQLiteStatement::Handle() const noexcept {
- return H_.Get();
- }
-
- bool TSQLiteStatement::Step() {
- const int rc = sqlite3_step(Handle());
-
- switch (rc) {
- case SQLITE_ROW:
- return true;
-
- case SQLITE_DONE:
- return false;
-
- default:
- break;
- }
-
- char* stmt = rc == SQLITE_CONSTRAINT ? sqlite3_expanded_sql(Handle()) : nullptr;
- Y_DEFER {
- if (stmt != nullptr) {
- sqlite3_free(reinterpret_cast<void*>(stmt));
- stmt = nullptr;
- }
- };
- if (stmt != nullptr) {
- ythrow TSQLiteError(rc) << "step failed: " << stmt;
- } else {
- ythrow TSQLiteError(rc) << "step failed";
- }
- }
-
- i64 TSQLiteStatement::ColumnInt64(size_t idx) {
- return sqlite3_column_int64(Handle(), idx);
- }
-
- double TSQLiteStatement::ColumnDouble(size_t idx) {
- return sqlite3_column_double(Handle(), idx);
- }
-
- TStringBuf TSQLiteStatement::ColumnText(size_t idx) {
- return reinterpret_cast<const char*>(sqlite3_column_text(Handle(), idx));
- }
-
- TStringBuf TSQLiteStatement::ColumnBlob(size_t idx) {
- const void* blob = sqlite3_column_blob(Handle(), idx);
- size_t size = sqlite3_column_bytes(Handle(), idx);
- return TStringBuf(static_cast<const char*>(blob), size);
- }
-
- void TSQLiteStatement::ColumnAccept(size_t idx, ISQLiteColumnVisitor& visitor) {
- const auto columnType = sqlite3_column_type(Handle(), idx);
- switch (columnType) {
- case SQLITE_INTEGER:
- visitor.OnColumnInt64(ColumnInt64(idx));
- break;
- case SQLITE_FLOAT:
- visitor.OnColumnDouble(ColumnDouble(idx));
- break;
- case SQLITE_TEXT:
- visitor.OnColumnText(ColumnText(idx));
- break;
- case SQLITE_BLOB:
- visitor.OnColumnBlob(ColumnBlob(idx));
- break;
- case SQLITE_NULL:
- visitor.OnColumnNull();
- break;
- }
- }
-
- size_t TSQLiteStatement::ColumnCount() const noexcept {
- return static_cast<size_t>(sqlite3_column_count(Handle()));
- }
-
- TStringBuf TSQLiteStatement::ColumnName(size_t idx) const noexcept {
- return sqlite3_column_name(Handle(), idx);
- }
-
- void TSQLiteStatement::Reset() {
- const int rc = sqlite3_reset(Handle());
-
- if (rc != SQLITE_OK) {
- ythrow TSQLiteError(rc) << "reset failed";
- }
- }
-
- void TSQLiteStatement::ResetHard() {
- (void)sqlite3_reset(Handle());
- }
-
- void TSQLiteStatement::ClearBindings() noexcept {
- // No error is documented.
- // sqlite3.c's code always returns SQLITE_OK.
- (void)sqlite3_clear_bindings(Handle());
- }
-
- TSQLiteTransaction::TSQLiteTransaction(TSQLiteDB& db)
- : Db(&db)
- {
- Execute("BEGIN TRANSACTION");
- }
-
- TSQLiteTransaction::~TSQLiteTransaction() {
- if (Db) {
- Rollback();
- }
- }
-
- void TSQLiteTransaction::Commit() {
- Execute("COMMIT TRANSACTION");
- Db = nullptr;
- }
-
- void TSQLiteTransaction::Rollback() {
- Execute("ROLLBACK TRANSACTION");
- Db = nullptr;
- }
-
- void TSQLiteTransaction::Execute(const TString& query) {
- Y_ENSURE(Db, "Transaction is already ended");
- TSQLiteStatement st(*Db, query);
- st.Execute();
- }
-
- TSimpleDB::TSimpleDB(const TString& path)
- : TSQLiteDB(path)
- , Start_(*this, "begin transaction")
- , End_(*this, "end transaction")
- {
- }
-
- void TSimpleDB::Execute(const TString& statement) {
- TSQLiteStatement(*this, statement).Execute();
- }
-
- void TSimpleDB::Acquire() {
- Start_.Execute();
- }
-
- void TSimpleDB::Release() {
- End_.Execute();
- }
-
-}
diff --git a/library/cpp/sqlite3/sqlite.h b/library/cpp/sqlite3/sqlite.h
deleted file mode 100644
index 8b35e2606a..0000000000
--- a/library/cpp/sqlite3/sqlite.h
+++ /dev/null
@@ -1,136 +0,0 @@
-#pragma once
-
-#include <util/generic/yexception.h>
-#include <util/generic/ptr.h>
-
-#include <contrib/libs/sqlite3/sqlite3.h>
-
-namespace NSQLite {
- class TSQLiteError: public yexception {
- public:
- TSQLiteError(sqlite3* hndl);
- TSQLiteError(int rc);
-
- int GetErrorCode() const {
- return ErrorCode;
- }
-
- private:
- int ErrorCode;
- };
-
- template <class T, int (*Func)(T*)>
- struct TCFree {
- static void Destroy(T* t) {
- Func(t);
- }
- };
-
- class TSQLiteDB {
- public:
- TSQLiteDB(const TString& path, int flags);
- TSQLiteDB(const TString& path);
-
- sqlite3* Handle() const noexcept;
- size_t RowsAffected() const noexcept;
-
- private:
- THolder<sqlite3, TCFree<sqlite3, sqlite3_close>> H_;
- };
-
- class ISQLiteColumnVisitor {
- public:
- virtual ~ISQLiteColumnVisitor() = default;
-
- virtual void OnColumnInt64(i64 value) = 0;
- virtual void OnColumnDouble(double value) = 0;
- virtual void OnColumnText(TStringBuf value) = 0;
- virtual void OnColumnBlob(TStringBuf value) = 0;
- virtual void OnColumnNull() = 0;
- };
-
- class TSQLiteStatement {
- public:
- TSQLiteStatement(TSQLiteDB& db, const TString& s);
-
- void Execute();
- TSQLiteStatement& Bind(size_t idx, i64 val);
- TSQLiteStatement& Bind(size_t idx, int val);
- TSQLiteStatement& Bind(size_t idx);
- TSQLiteStatement& Bind(size_t idx, double val);
- TSQLiteStatement& Bind(size_t idx, TStringBuf str);
- TSQLiteStatement& BindBlob(size_t idx, TStringBuf blob);
- template <typename Value>
- TSQLiteStatement& Bind(TStringBuf name, Value val) {
- size_t idx = BoundNamePosition(name);
- Y_ASSERT(idx > 0);
- return Bind(idx, val);
- }
- TSQLiteStatement& BindBlob(TStringBuf name, TStringBuf blob) {
- size_t idx = BoundNamePosition(name);
- Y_ASSERT(idx > 0);
- return BindBlob(idx, blob);
- }
- TSQLiteStatement& Bind(TStringBuf name) {
- size_t idx = BoundNamePosition(name);
- Y_ASSERT(idx > 0);
- return Bind(idx);
- }
- size_t BoundNamePosition(TStringBuf name) const noexcept;
- size_t BoundParameterCount() const noexcept;
- const char* BoundParameterName(size_t idx) const noexcept;
-
- sqlite3_stmt* Handle() const noexcept;
- bool Step();
- i64 ColumnInt64(size_t idx);
- double ColumnDouble(size_t idx);
- TStringBuf ColumnText(size_t idx);
- TStringBuf ColumnBlob(size_t idx);
- void ColumnAccept(size_t idx, ISQLiteColumnVisitor& visitor);
- size_t ColumnCount() const noexcept;
- TStringBuf ColumnName(size_t idx) const noexcept;
- void Reset();
- // Ignore last error on this statement
- void ResetHard();
- void ClearBindings() noexcept;
-
- private:
- typedef void (*TFreeFunc)(void*);
- void BindText(size_t col, const char* text, size_t len, TFreeFunc func);
-
- private:
- TString S_;
- THolder<sqlite3_stmt, TCFree<sqlite3_stmt, sqlite3_finalize>> H_;
- };
-
- /**
- * Forces user to commit transaction explicitly, to not get exception in destructor (with all consequences of it).
- */
- class TSQLiteTransaction: private TNonCopyable {
- private:
- TSQLiteDB* Db;
-
- public:
- TSQLiteTransaction(TSQLiteDB& db);
- ~TSQLiteTransaction();
-
- void Commit();
- void Rollback();
-
- private:
- void Execute(const TString& query);
- };
-
- class TSimpleDB: public TSQLiteDB {
- public:
- TSimpleDB(const TString& path);
-
- void Execute(const TString& statement);
- void Acquire();
- void Release();
-
- private:
- TSQLiteStatement Start_;
- TSQLiteStatement End_;
- };
-}
diff --git a/library/cpp/sqlite3/ya.make b/library/cpp/sqlite3/ya.make
deleted file mode 100644
index 15417e278d..0000000000
--- a/library/cpp/sqlite3/ya.make
+++ /dev/null
@@ -1,13 +0,0 @@
-LIBRARY()
-
-SRCS(
- sqlite.cpp
-)
-
-PEERDIR(
- contrib/libs/sqlite3
-)
-
-END()
-
-RECURSE_FOR_TESTS(ut)
diff --git a/library/cpp/streams/growing_file_input/growing_file_input.cpp b/library/cpp/streams/growing_file_input/growing_file_input.cpp
deleted file mode 100644
index 0bbfa5ade9..0000000000
--- a/library/cpp/streams/growing_file_input/growing_file_input.cpp
+++ /dev/null
@@ -1,40 +0,0 @@
-#include "growing_file_input.h"
-
-#include <util/datetime/base.h>
-#include <util/generic/yexception.h>
-
-TGrowingFileInput::TGrowingFileInput(const TString& path)
- : File_(path, OpenExisting | RdOnly | Seq)
-{
- if (!File_.IsOpen()) {
- ythrow TIoException() << "file " << path << " not open";
- }
-
- File_.Seek(0, sEnd);
-}
-
-TGrowingFileInput::TGrowingFileInput(const TFile& file)
- : File_(file)
-{
- if (!File_.IsOpen()) {
- ythrow TIoException() << "file (" << file.GetName() << ") not open";
- }
-
- File_.Seek(0, sEnd);
-}
-
-size_t TGrowingFileInput::DoRead(void* buf, size_t len) {
- for (int sleepTime = 1;;) {
- size_t rr = File_.Read(buf, len);
-
- if (rr != 0) {
- return rr;
- }
-
- NanoSleep((ui64)sleepTime * 1000000);
-
- if (sleepTime < 2000) {
- sleepTime <<= 1;
- }
- }
-}
diff --git a/library/cpp/streams/growing_file_input/growing_file_input.h b/library/cpp/streams/growing_file_input/growing_file_input.h
deleted file mode 100644
index 9054a5f3da..0000000000
--- a/library/cpp/streams/growing_file_input/growing_file_input.h
+++ /dev/null
@@ -1,23 +0,0 @@
-#pragma once
-
-#include <util/stream/input.h>
-#include <util/system/file.h>
-
-/**
- * Growing file input stream.
- *
- * File descriptor offsets to the end of the file, when the object is created.
- *
- * Read function waites for reading at least one byte.
- */
-class TGrowingFileInput: public IInputStream {
-public:
- TGrowingFileInput(const TFile& file);
- TGrowingFileInput(const TString& path);
-
-private:
- size_t DoRead(void* buf, size_t len) override;
-
-private:
- TFile File_;
-};
diff --git a/library/cpp/streams/growing_file_input/ya.make b/library/cpp/streams/growing_file_input/ya.make
deleted file mode 100644
index 69c56fea46..0000000000
--- a/library/cpp/streams/growing_file_input/ya.make
+++ /dev/null
@@ -1,11 +0,0 @@
-LIBRARY()
-
-SRCS(
- growing_file_input.cpp
-)
-
-END()
-
-RECURSE_FOR_TESTS(
- ut
-)
diff --git a/library/cpp/string_utils/subst_buf/substbuf.cpp b/library/cpp/string_utils/subst_buf/substbuf.cpp
deleted file mode 100644
index f23cb24b19..0000000000
--- a/library/cpp/string_utils/subst_buf/substbuf.cpp
+++ /dev/null
@@ -1 +0,0 @@
-#include "substbuf.h"
diff --git a/library/cpp/string_utils/subst_buf/substbuf.h b/library/cpp/string_utils/subst_buf/substbuf.h
deleted file mode 100644
index 357ee68ae3..0000000000
--- a/library/cpp/string_utils/subst_buf/substbuf.h
+++ /dev/null
@@ -1,63 +0,0 @@
-#pragma once
-
-#include <util/generic/vector.h>
-#include <util/generic/strbuf.h>
-#include <util/string/subst.h>
-
-/// ЗаменÑет в Ñтроке одни подÑтроки на другие.
-template <class TBuf, class TPool>
-size_t SubstGlobal(TBuf& s, const TBuf& from, const TBuf& to, TPool& pool) {
- if (from.empty())
- return 0;
-
- TVector<size_t> offs;
- for (size_t off = 0; (off = s.find(from, off)) != TBuf::npos; off += from.length())
- offs.push_back(off);
- if (offs.empty())
- return 0;
-
- size_t dstSize = s.size() + ssize_t(offs.size()) * ssize_t(to.size() - from.size());
- const size_t charTypeSz = sizeof(typename TBuf::char_type);
- typename TBuf::char_type* dst = (typename TBuf::char_type*)pool.Allocate((dstSize + 1) * charTypeSz);
- dst[dstSize] = 0;
-
- typename TBuf::char_type* p = dst;
- size_t lastSrc = 0;
- for (auto off : offs) {
- memcpy(p, s.data() + lastSrc, (off - lastSrc) * charTypeSz);
- p += off - lastSrc;
- lastSrc = off + from.size();
- memcpy(p, to.data(), to.size() * charTypeSz);
- p += to.size();
- }
- memcpy(p, s.data() + lastSrc, (s.size() - lastSrc) * charTypeSz);
- p += s.size() - lastSrc;
- Y_ASSERT(p - dst == (ssize_t)dstSize);
-
- s = TBuf(dst, dstSize);
- return offs.size();
-}
-
-template <class TPool>
-size_t SubstGlobal(TStringBuf& s, const TStringBuf& from, const TStringBuf& to, TPool& pool) {
- return SubstGlobal<TStringBuf, TPool>(s, from, to, pool);
-}
-
-/// ЗаменÑет в Ñтроке одни подÑтроки на другие.
-template <class TBuf, class TPool>
-inline size_t SubstGlobal(TBuf& s, typename TBuf::char_type from, typename TBuf::char_type to, TPool& pool) {
- size_t result = 0;
- size_t off = s.find(from);
- if (off == TBuf::npos)
- return 0;
-
- s = TBuf(pool.Append(s), s.size());
-
- for (typename TBuf::char_type* it = const_cast<typename TBuf::char_type*>(s.begin()) + off; it != s.end(); ++it) {
- if (*it == from) {
- *it = to;
- ++result;
- }
- }
- return result;
-}
diff --git a/library/cpp/string_utils/subst_buf/ya.make b/library/cpp/string_utils/subst_buf/ya.make
deleted file mode 100644
index 8b8793f5b3..0000000000
--- a/library/cpp/string_utils/subst_buf/ya.make
+++ /dev/null
@@ -1,7 +0,0 @@
-LIBRARY()
-
-SRCS(
- substbuf.cpp
-)
-
-END()
diff --git a/library/cpp/ucompress/README.md b/library/cpp/ucompress/README.md
deleted file mode 100644
index 5a6e9d8f42..0000000000
--- a/library/cpp/ucompress/README.md
+++ /dev/null
@@ -1 +0,0 @@
-Compatible implementation of library/python/compress (also known as "uc" - uber compressor: tools/uc, ya tool uc).
diff --git a/library/cpp/ucompress/common.h b/library/cpp/ucompress/common.h
deleted file mode 100644
index d59cde9cf1..0000000000
--- a/library/cpp/ucompress/common.h
+++ /dev/null
@@ -1,8 +0,0 @@
-#pragma once
-
-
-namespace NUCompress {
- // These limitations come from original implementation - library/python/compress
- using TBlockLen = ui32;
- constexpr TBlockLen MaxCompressedLen = 100000000;
-}
diff --git a/library/cpp/ucompress/reader.cpp b/library/cpp/ucompress/reader.cpp
deleted file mode 100644
index 45a8ca8da2..0000000000
--- a/library/cpp/ucompress/reader.cpp
+++ /dev/null
@@ -1,58 +0,0 @@
-#include "reader.h"
-#include "common.h"
-
-#include <library/cpp/blockcodecs/codecs.h>
-#include <library/cpp/json/json_reader.h>
-
-#include <util/system/byteorder.h>
-
-
-using namespace NUCompress;
-
-TDecodedInput::TDecodedInput(IInputStream* in)
- : S_(in)
-{
- Y_ENSURE_EX(S_, TBadArgumentException() << "Null output stream");
-}
-
-TDecodedInput::~TDecodedInput() = default;
-
-size_t TDecodedInput::DoUnboundedNext(const void** ptr) {
- if (!C_) {
- TBlockLen blockLen = 0;
- S_->LoadOrFail(&blockLen, sizeof(blockLen));
- blockLen = LittleToHost(blockLen);
- Y_ENSURE(blockLen <= MaxCompressedLen, "broken stream");
-
- TString buf = TString::Uninitialized(blockLen);
- S_->LoadOrFail(buf.Detach(), blockLen);
-
- NJson::TJsonValue hdr;
- Y_ENSURE(NJson::ReadJsonTree(buf, &hdr), "cannot parse header, suspect old format");
-
- auto& codecName = hdr["codec"].GetString();
- Y_ENSURE(codecName, "header does not have codec info");
-
- // Throws TNotFound
- C_ = NBlockCodecs::Codec(codecName);
- Y_ASSERT(C_);
- }
-
- TBlockLen blockLen = 0;
- size_t actualRead = S_->Load(&blockLen, sizeof(blockLen));
- if (!actualRead) {
- // End of stream
- return 0;
- }
- Y_ENSURE(actualRead == sizeof(blockLen), "broken stream: cannot read block length");
- blockLen = LittleToHost(blockLen);
- Y_ENSURE(blockLen <= MaxCompressedLen, "broken stream");
-
- TBuffer block;
- block.Resize(blockLen);
- S_->LoadOrFail(block.Data(), blockLen);
-
- C_->Decode(block, D_);
- *ptr = D_.Data();
- return D_.Size();
-}
diff --git a/library/cpp/ucompress/reader.h b/library/cpp/ucompress/reader.h
deleted file mode 100644
index 5a5d1c9a89..0000000000
--- a/library/cpp/ucompress/reader.h
+++ /dev/null
@@ -1,25 +0,0 @@
-#pragma once
-
-#include <util/generic/buffer.h>
-#include <util/stream/walk.h>
-
-
-namespace NBlockCodecs {
- struct ICodec;
-}
-
-namespace NUCompress {
- class TDecodedInput: public IWalkInput {
- public:
- TDecodedInput(IInputStream* in);
- ~TDecodedInput() override;
-
- private:
- size_t DoUnboundedNext(const void** ptr) override;
-
- private:
- IInputStream* const S_;
- const NBlockCodecs::ICodec* C_ = nullptr;
- TBuffer D_;
- };
-}
diff --git a/library/cpp/ucompress/writer.cpp b/library/cpp/ucompress/writer.cpp
deleted file mode 100644
index 40f8b12108..0000000000
--- a/library/cpp/ucompress/writer.cpp
+++ /dev/null
@@ -1,95 +0,0 @@
-#include "writer.h"
-#include "common.h"
-
-#include <library/cpp/blockcodecs/codecs.h>
-#include <library/cpp/json/writer/json.h>
-
-#include <util/generic/scope.h>
-#include <util/generic/yexception.h>
-#include <util/system/byteorder.h>
-
-
-using namespace NUCompress;
-
-TCodedOutput::TCodedOutput(IOutputStream* out, const NBlockCodecs::ICodec* c, size_t bufLen)
- : C_(c)
- , D_(bufLen)
- , S_(out)
-{
- Y_ENSURE_EX(C_, TBadArgumentException() << "Null codec");
- Y_ENSURE_EX(S_, TBadArgumentException() << "Null output stream");
- D_.Resize(bufLen);
- Y_ENSURE_EX(C_->MaxCompressedLength(D_) <= MaxCompressedLen, TBadArgumentException() << "Too big buffer size: " << bufLen);
- D_.Clear();
-}
-
-TCodedOutput::~TCodedOutput() {
- try {
- Finish();
- } catch (...) {
- }
-}
-
-void TCodedOutput::DoWrite(const void* buf, size_t len) {
- Y_ENSURE(S_, "Stream finished already");
- const char* in = static_cast<const char*>(buf);
-
- while (len) {
- const size_t avail = D_.Avail();
- if (len < avail) {
- D_.Append(in, len);
- return;
- }
-
- D_.Append(in, avail);
- Y_ASSERT(!D_.Avail());
- in += avail;
- len -= avail;
-
- FlushImpl();
- }
-}
-
-void TCodedOutput::FlushImpl() {
- if (!HdrWritten) {
- NJsonWriter::TBuf jBuf;
- jBuf.BeginObject();
- jBuf.WriteKey("codec");
- jBuf.WriteString(C_->Name());
- jBuf.EndObject();
-
- TString jStr = jBuf.Str() + '\n';
- const TBlockLen lenToSave = HostToLittle(jStr.length());
- S_->Write(&lenToSave, sizeof(lenToSave));
- S_->Write(jStr.Detach(), jStr.length());
- HdrWritten = true;
- }
-
- O_.Reserve(C_->MaxCompressedLength(D_));
- const size_t oLen = C_->Compress(D_, O_.Data());
- Y_ASSERT(oLen <= MaxCompressedLen);
-
- const TBlockLen lenToSave = HostToLittle(oLen);
- S_->Write(&lenToSave, sizeof(lenToSave));
- S_->Write(O_.Data(), oLen);
-
- D_.Clear();
- O_.Clear();
-}
-
-void TCodedOutput::DoFlush() {
- if (S_ && D_) {
- FlushImpl();
- }
-}
-
-void TCodedOutput::DoFinish() {
- if (S_) {
- Y_DEFER {
- S_ = nullptr;
- };
- FlushImpl();
- // Write zero-length block as EOF marker.
- FlushImpl();
- }
-}
diff --git a/library/cpp/ucompress/writer.h b/library/cpp/ucompress/writer.h
deleted file mode 100644
index 4d3ae71093..0000000000
--- a/library/cpp/ucompress/writer.h
+++ /dev/null
@@ -1,31 +0,0 @@
-#pragma once
-
-#include <util/generic/buffer.h>
-#include <util/stream/output.h>
-
-
-namespace NBlockCodecs {
- struct ICodec;
-}
-
-namespace NUCompress {
- class TCodedOutput: public IOutputStream {
- public:
- TCodedOutput(IOutputStream* out, const NBlockCodecs::ICodec* c, size_t bufLen = 16 << 20);
- ~TCodedOutput() override;
-
- private:
- void DoWrite(const void* buf, size_t len) override;
- void DoFlush() override;
- void DoFinish() override;
-
- void FlushImpl();
-
- private:
- const NBlockCodecs::ICodec* const C_;
- TBuffer D_;
- TBuffer O_;
- IOutputStream* S_;
- bool HdrWritten = false;
- };
-}
diff --git a/library/cpp/ucompress/ya.make b/library/cpp/ucompress/ya.make
deleted file mode 100644
index 6582dd9a41..0000000000
--- a/library/cpp/ucompress/ya.make
+++ /dev/null
@@ -1,18 +0,0 @@
-LIBRARY()
-
-PEERDIR(
- library/cpp/blockcodecs
- library/cpp/json
-)
-
-SRCS(
- reader.cpp
- writer.cpp
-)
-
-END()
-
-RECURSE(
- tests
- ut
-)
diff --git a/library/cpp/zipatch/reader.cpp b/library/cpp/zipatch/reader.cpp
deleted file mode 100644
index 03ac365da1..0000000000
--- a/library/cpp/zipatch/reader.cpp
+++ /dev/null
@@ -1,173 +0,0 @@
-#include "reader.h"
-
-#include <library/cpp/json/json_reader.h>
-#include <library/cpp/json/json_value.h>
-
-#include <util/generic/hash.h>
-#include <util/memory/tempbuf.h>
-
-#include <contrib/libs/libarchive/libarchive/archive.h>
-#include <contrib/libs/libarchive/libarchive/archive_entry.h>
-
-using namespace NJson;
-
-namespace NZipatch {
-
-class TReader::TImpl {
-
- using TEntry = archive_entry;
-
-public:
- TImpl() {
- if ((Archive_ = archive_read_new()) == nullptr) {
- ythrow yexception() << "can't create archive object";
- }
- }
-
- TImpl(const TFsPath& path)
- : TImpl()
- {
- archive_read_support_filter_all(Archive_);
- archive_read_support_format_zip(Archive_);
-
- if (ARCHIVE_OK != archive_read_open_filename(Archive_, TString(path).c_str(), 10240)) {
- ythrow yexception() << "can't open archive path = " << path;
- }
-
- Read();
- }
-
- TImpl(const TStringBuf buf)
- : TImpl()
- {
- archive_read_support_filter_all(Archive_);
- archive_read_support_format_zip(Archive_);
-
- if (ARCHIVE_OK != archive_read_open_memory(Archive_, buf.data(), buf.size())) {
- ythrow yexception() << "can't open in-memory archive";
- }
-
- Read();
- }
-
- ~TImpl() {
- for (const auto& item : Files_) {
- archive_entry_free(item.second.first);
- }
- if (Archive_) {
- archive_read_free(Archive_);
- }
- }
-
- void Enumerate(TOnEvent cb) const {
- for (const auto& item : Actions_) {
- TEvent event;
-
- event.Action = GetTypeFromString(item["type"].GetStringSafe(TString()));
- event.Path = item["path"].GetStringSafe(TString());
- event.Executable = item["executable"].GetBooleanSafe(false);
- event.Symlink = false;
-
- if (event.Action == Copy || event.Action == Move) {
- event.Source.Path = item["orig_path"].GetStringSafe(TString());
- event.Source.Revision = item["orig_revision"].GetUIntegerRobust();
- }
- if (event.Action == StoreFile) {
- auto fi = Files_.find(event.Path);
- if (fi == Files_.end()) {
- ythrow yexception() << "can't find file; path = " << event.Path;
- }
-
- event.Data = fi->second.second;
- event.Symlink = archive_entry_filetype(fi->second.first) == AE_IFLNK;
- }
-
- if (event.Path) {
- cb(event);
- }
- }
- }
-
-private:
- EAction GetTypeFromString(const TString& type) const {
- if (type == "store_file") {
- return StoreFile;
- }
- if (type == "mkdir") {
- return MkDir;
- }
- if (type == "remove_file" || type == "remove_tree") {
- return Remove;
- }
- if (type == "svn_copy") {
- return Copy;
- }
- return Unknown;
- }
-
- void Read() {
- TEntry* current = nullptr;
-
- while (archive_read_next_header(Archive_, &current) == ARCHIVE_OK) {
- const TStringBuf path(archive_entry_pathname(current));
-
- if (path == "actions.json") {
- TJsonValue value;
- ReadJsonFastTree(GetData(current), &value, true);
-
- for (const auto& item : value.GetArraySafe()) {
- Actions_.push_back(item);
- }
- } else if (AsciiHasPrefix(path, "files/")) {
- TEntry* entry = archive_entry_clone(current);
-
- Files_.emplace(path.substr(6), std::make_pair(entry, GetData(current)));
- }
- }
-
- archive_read_close(Archive_);
- }
-
- TString GetData(TEntry* current) const {
- if (archive_entry_filetype(current) == AE_IFLNK) {
- return archive_entry_symlink(current);
- }
-
- if (const auto size = archive_entry_size(current)) {
- TTempBuf data(size);
-
- if (archive_read_data(Archive_, data.Data(), size) != size) {
- ythrow yexception() << "can't read entry";
- }
-
- return TString(data.Data(), size);
- }
-
- return TString();
- }
-
-private:
- struct archive* Archive_;
- TVector<TJsonValue> Actions_;
- THashMap<TString, std::pair<TEntry*, TString>> Files_;
-};
-
-TReader::TReader(const TFsPath& path)
- : Impl_(new TImpl(path))
-{
-}
-
-TReader::TReader(const TStringBuf buf)
- : Impl_(new TImpl(buf))
-{
-}
-
-TReader::~TReader()
-{ }
-
-void TReader::Enumerate(TOnEvent cb) const {
- Impl_->Enumerate(cb);
-}
-
-} // namespace NZipatch
-
diff --git a/library/cpp/zipatch/reader.h b/library/cpp/zipatch/reader.h
deleted file mode 100644
index a94bc79b71..0000000000
--- a/library/cpp/zipatch/reader.h
+++ /dev/null
@@ -1,48 +0,0 @@
-#pragma once
-
-#include <util/folder/path.h>
-#include <util/generic/ptr.h>
-
-namespace NZipatch {
-
-class TReader {
-public:
- enum EAction {
- Unknown = 0,
- Copy,
- MkDir,
- Move,
- Remove,
- StoreFile,
- };
-
- struct TSource {
- TString Path;
- ui64 Revision;
- };
-
- struct TEvent {
- EAction Action;
- TString Path;
- TStringBuf Data;
- TSource Source;
- bool Executable;
- bool Symlink;
- };
-
- using TOnEvent = std::function<void(const TEvent&)>;
-
-public:
- TReader(const TFsPath& path);
- TReader(const TStringBuf buf);
- ~TReader();
-
- void Enumerate(TOnEvent cb) const;
-
-private:
- class TImpl;
- THolder<TImpl> Impl_;
-};
-
-} // namespace NZipatch
-
diff --git a/library/cpp/zipatch/writer.cpp b/library/cpp/zipatch/writer.cpp
deleted file mode 100644
index a9ca451b01..0000000000
--- a/library/cpp/zipatch/writer.cpp
+++ /dev/null
@@ -1,232 +0,0 @@
-#include "writer.h"
-
-#include <library/cpp/json/json_value.h>
-#include <library/cpp/json/json_writer.h>
-
-#include <util/string/join.h>
-
-#include <contrib/libs/libarchive/libarchive/archive.h>
-#include <contrib/libs/libarchive/libarchive/archive_entry.h>
-
-using namespace NJson;
-
-namespace NZipatch {
-
-class TWriter::TImpl {
-public:
- TImpl(const TFsPath& path)
- : Actions_(new TJsonValue(JSON_ARRAY))
- , Meta_(new TJsonValue(JSON_MAP))
- , Revprops_(new TJsonValue(JSON_MAP))
- , Archive_(nullptr)
- {
- Archive_ = archive_write_new();
- if (!Archive_) {
- ythrow yexception() << "can't create archive object";
- }
- archive_write_set_format_zip(Archive_);
- archive_write_zip_set_compression_deflate(Archive_);
-
- if (ARCHIVE_OK != archive_write_open_filename(Archive_, TString(path).c_str())) {
- ythrow yexception() << "can't open archive path = " << path;
- }
- }
-
- ~TImpl() {
- if (Actions_ || Meta_ || Revprops_) {
- Finish();
- }
- if (Archive_) {
- archive_write_free(Archive_);
- }
- }
-
- void Finish() {
- if (Actions_) {
- if (Archive_) {
- WriteEntry("actions.json", WriteJson(Actions_.Get(), true, false));
- }
-
- Actions_.Destroy();
- }
-
- if (Meta_) {
- if (Archive_) {
- WriteEntry("meta.json", WriteJson(Meta_.Get(), true));
- }
-
- Meta_.Destroy();
- }
-
- if (Revprops_) {
- if (Archive_) {
- WriteEntry("revprops.json", WriteJson(Revprops_.Get(), true));
- }
-
- Revprops_.Destroy();
- }
-
- if (Archive_) {
- archive_write_close(Archive_);
- }
- }
-
- void Copy(const TString& path, const TOrigin& origin) {
- Y_ASSERT(origin.Path);
- Y_ASSERT(origin.Revision);
-
- if (Actions_) {
- TJsonValue item;
- item["type"] = "svn_copy";
- item["path"] = path;
- item["orig_path"] = origin.Path;
- item["orig_revision"] = origin.Revision;
- Actions_->AppendValue(item);
- }
- }
-
- void MkDir(const TString& path) {
- if (Actions_) {
- TJsonValue item;
- item["type"] = "mkdir";
- item["path"] = path;
- Actions_->AppendValue(item);
- }
- }
-
- void RemoveFile(const TString& path) {
- if (Actions_) {
- TJsonValue item;
- item["type"] = "remove_file";
- item["path"] = path;
- Actions_->AppendValue(item);
- }
- }
-
- void RemoveTree(const TString& path) {
- if (Actions_) {
- TJsonValue item;
- item["type"] = "remove_tree";
- item["path"] = path;
- Actions_->AppendValue(item);
- }
- }
-
- void StoreFile(
- const TString& path,
- const TString& data,
- const bool execute,
- const bool symlink,
- const TMaybe<bool> binaryHint,
- const TMaybe<bool> encrypted)
- {
- if (Actions_) {
- const TString file = Join("/", "files", path);
- TJsonValue item;
- item["type"] = "store_file";
- item["executable"] = execute;
- item["path"] = path;
- item["file"] = file;
- if (binaryHint.Defined()) {
- item["binary_hint"] = *binaryHint;
- }
- if (encrypted.Defined()) {
- item["encrypted"] = *encrypted;
- }
- Actions_->AppendValue(item);
- WriteEntry(file, data, symlink);
- }
- }
-
- void SetBaseSvnRevision(ui64 revision) {
- if (Meta_) {
- (*Meta_)["base_svn_revision"] = revision;
- }
- }
-
- void AddRevprop(const TString& prop, const TString& value) {
- if (Revprops_) {
- (*Revprops_)[prop] = value;
- }
- }
-
-private:
- void WriteEntry(
- const TString& path,
- const TString& data,
- const bool symlink = false)
- {
- struct archive_entry* const entry = archive_entry_new();
- // Write header.
- archive_entry_set_pathname(entry, path.c_str());
- archive_entry_set_size(entry, data.size());
- archive_entry_set_filetype(entry, symlink ? AE_IFLNK : AE_IFREG);
- archive_entry_set_perm(entry, 0644);
- if (symlink) {
- archive_entry_set_symlink(entry, data.c_str());
- }
- archive_write_header(Archive_, entry);
- // Write data.
- // If entry is symlink then entry size become zero.
- if (archive_entry_size(entry) > 0) {
- archive_write_data(Archive_, data.data(), data.size());
- }
- archive_entry_free(entry);
- }
-
-private:
- THolder<NJson::TJsonValue> Actions_;
- THolder<NJson::TJsonValue> Meta_;
- THolder<NJson::TJsonValue> Revprops_;
- struct archive* Archive_;
-};
-
-TWriter::TWriter(const TFsPath& path)
- : Impl_(new TImpl(path))
-{
-}
-
-TWriter::~TWriter()
-{ }
-
-void TWriter::Finish() {
- Impl_->Finish();
-}
-
-void TWriter::SetBaseSvnRevision(ui64 revision) {
- Impl_->SetBaseSvnRevision(revision);
-}
-
-void TWriter::AddRevprop(const TString& prop, const TString& value) {
- Impl_->AddRevprop(prop, value);
-}
-
-void TWriter::Copy(const TString& path, const TOrigin& origin) {
- Impl_->Copy(path, origin);
-}
-
-void TWriter::MkDir(const TString& path) {
- Impl_->MkDir(path);
-}
-
-void TWriter::RemoveFile(const TString& path) {
- Impl_->RemoveFile(path);
-}
-
-void TWriter::RemoveTree(const TString& path) {
- Impl_->RemoveTree(path);
-}
-
-void TWriter::StoreFile(
- const TString& path,
- const TString& data,
- const bool execute,
- const bool symlink,
- const TMaybe<bool> binaryHint,
- const TMaybe<bool> encrypted)
-{
- Impl_->StoreFile(path, data, execute, symlink, binaryHint, encrypted);
-}
-
-} // namespace NZipatch
-
diff --git a/library/cpp/zipatch/writer.h b/library/cpp/zipatch/writer.h
deleted file mode 100644
index 75cbe49777..0000000000
--- a/library/cpp/zipatch/writer.h
+++ /dev/null
@@ -1,51 +0,0 @@
-#pragma once
-
-#include <util/folder/path.h>
-#include <util/generic/ptr.h>
-#include <util/generic/maybe.h>
-
-namespace NZipatch {
-
-class TWriter {
-public:
- struct TOrigin {
- TString Path;
- ui64 Revision;
-
- inline TOrigin(const TString& path, const ui64 revision)
- : Path(path)
- , Revision(revision)
- { }
- };
-
- TWriter(const TFsPath& path);
- ~TWriter();
-
- void Finish();
-
- void SetBaseSvnRevision(ui64 revision);
-
- void AddRevprop(const TString& prop, const TString& value);
-
- void Copy(const TString& path, const TOrigin& origin);
-
- void MkDir(const TString& path);
-
- void RemoveFile(const TString& path);
-
- void RemoveTree(const TString& path);
-
- void StoreFile(const TString& path,
- const TString& data,
- const bool execute,
- const bool symlink,
- const TMaybe<bool> binaryHint = Nothing(),
- const TMaybe<bool> encrypted = Nothing());
-
-private:
- class TImpl;
- THolder<TImpl> Impl_;
-};
-
-} // namespace NZipatch
-
diff --git a/library/cpp/zipatch/ya.make b/library/cpp/zipatch/ya.make
deleted file mode 100644
index f8fd6006b2..0000000000
--- a/library/cpp/zipatch/ya.make
+++ /dev/null
@@ -1,16 +0,0 @@
-LIBRARY()
-
-SRCS(
- reader.cpp
- writer.cpp
-)
-
-PEERDIR(
- contrib/libs/libarchive
- library/cpp/json
-)
-
-GENERATE_ENUM_SERIALIZATION(reader.h)
-
-END()
-
diff --git a/library/python/mlockall/__init__.py b/library/python/mlockall/__init__.py
deleted file mode 100644
index 4d867d692e..0000000000
--- a/library/python/mlockall/__init__.py
+++ /dev/null
@@ -1,10 +0,0 @@
-import sys
-
-
-def mlockall_current():
- if not sys.platform.startswith('linux'):
- return -1
-
- import library.python.mlockall.mlockall as ml
-
- return ml.mlockall_current()
diff --git a/library/python/mlockall/mlockall.pyx b/library/python/mlockall/mlockall.pyx
deleted file mode 100644
index b35d661a42..0000000000
--- a/library/python/mlockall/mlockall.pyx
+++ /dev/null
@@ -1,19 +0,0 @@
-cdef extern from "<util/system/error.h>":
- int LastSystemError()
-
-cdef extern from "<util/system/mlock.h>":
- cdef enum ELockAllMemoryFlag:
- LockCurrentMemory
- LockFutureMemory
- cppclass ELockAllMemoryFlags:
- operator=(ELockAllMemoryFlag)
- void LockAllMemory(ELockAllMemoryFlags flags) except+
-
-def mlockall_current():
- cdef ELockAllMemoryFlags flags
- try:
- flags = LockCurrentMemory
- LockAllMemory(flags)
- return 0
- except Exception:
- return LastSystemError()
diff --git a/library/python/mlockall/ya.make b/library/python/mlockall/ya.make
deleted file mode 100644
index 96eba051ab..0000000000
--- a/library/python/mlockall/ya.make
+++ /dev/null
@@ -1,14 +0,0 @@
-PY23_LIBRARY()
-
-PY_SRCS(
- __init__.py
-)
-
-IF (OS_LINUX)
- PY_SRCS(
- mlockall.pyx
- )
-ENDIF()
-
-END()
-
diff --git a/library/python/nstools/__init__.py b/library/python/nstools/__init__.py
deleted file mode 100644
index 34cc0f9574..0000000000
--- a/library/python/nstools/__init__.py
+++ /dev/null
@@ -1,6 +0,0 @@
-from .nstools import unshare_ns, move_to_ns
-
-__all__ = [
- 'unshare_ns',
- 'move_to_ns'
-]
diff --git a/library/python/nstools/nstools.pyx b/library/python/nstools/nstools.pyx
deleted file mode 100644
index 5ef30373ff..0000000000
--- a/library/python/nstools/nstools.pyx
+++ /dev/null
@@ -1,28 +0,0 @@
-from cpython.exc cimport PyErr_SetFromErrno
-
-cdef extern from "<sched.h>" nogil:
- int setns(int fd, int mode)
- int unshare(int flags)
-
- cpdef enum:
- Fs "CLONE_FS"
- # Cgroup "CLONE_NEWCGROUP"
- Ipc "CLONE_NEWIPC"
- Network "CLONE_NEWNET"
- Mount "CLONE_NEWNS"
- Pid "CLONE_NEWPID"
- User "CLONE_NEWUSER"
- Uts "CLONE_NEWUTS"
-
-def unshare_ns(int flags):
- cdef int ret = unshare(flags)
- if ret != 0:
- PyErr_SetFromErrno(OSError)
-
-def move_to_ns(object fileobject, int mode):
- if not isinstance(fileobject, int):
- fileobject = fileobject.fileno()
-
- cdef int ret = setns(fileobject, mode)
- if ret != 0:
- PyErr_SetFromErrno(OSError)
diff --git a/library/python/nstools/ya.make b/library/python/nstools/ya.make
deleted file mode 100644
index 49eddeb919..0000000000
--- a/library/python/nstools/ya.make
+++ /dev/null
@@ -1,14 +0,0 @@
-PY23_LIBRARY()
-
-IF(OS_LINUX)
-PY_SRCS(
- __init__.py
- nstools.pyx
-)
-ELSE()
-PY_SRCS(
- nstools.py
-)
-ENDIF()
-
-END()
diff --git a/library/python/symbols/libmagic/syms.cpp b/library/python/symbols/libmagic/syms.cpp
deleted file mode 100644
index 839441ae14..0000000000
--- a/library/python/symbols/libmagic/syms.cpp
+++ /dev/null
@@ -1,19 +0,0 @@
-#include <contrib/libs/libmagic/src/magic.h>
-
-#include <library/python/symbols/registry/syms.h>
-
-BEGIN_SYMS("magic")
-SYM(magic_open)
-SYM(magic_close)
-SYM(magic_error)
-SYM(magic_errno)
-SYM(magic_file)
-SYM(magic_buffer)
-SYM(magic_load)
-SYM(magic_setflags)
-SYM(magic_check)
-SYM(magic_compile)
-SYM(magic_descriptor)
-SYM(magic_list)
-SYM(magic_version)
-END_SYMS()
diff --git a/library/python/symbols/libmagic/ya.make b/library/python/symbols/libmagic/ya.make
deleted file mode 100644
index a248603a41..0000000000
--- a/library/python/symbols/libmagic/ya.make
+++ /dev/null
@@ -1,12 +0,0 @@
-LIBRARY()
-
-PEERDIR(
- contrib/libs/libmagic
- library/python/symbols/registry
-)
-
-SRCS(
- GLOBAL syms.cpp
-)
-
-END()
diff --git a/library/python/testing/coverage_utils/__init__.py b/library/python/testing/coverage_utils/__init__.py
deleted file mode 100644
index 3313eee7b5..0000000000
--- a/library/python/testing/coverage_utils/__init__.py
+++ /dev/null
@@ -1,14 +0,0 @@
-import re
-
-
-def make_filter(prefix_filter, exclude_regexp):
- filters = []
- if prefix_filter:
- filters.append(lambda x: x.startswith(prefix_filter))
- if exclude_regexp:
- regexp = re.compile(exclude_regexp)
- filters.append(lambda x: not regexp.search(x))
-
- if filters:
- return lambda x: all(pred(x) for pred in filters)
- return lambda x: True
diff --git a/library/python/testing/coverage_utils/ya.make b/library/python/testing/coverage_utils/ya.make
deleted file mode 100644
index 3582136180..0000000000
--- a/library/python/testing/coverage_utils/ya.make
+++ /dev/null
@@ -1,5 +0,0 @@
-PY23_LIBRARY()
-
-PY_SRCS(__init__.py)
-
-END()
diff --git a/library/python/testing/system_info/__init__.py b/library/python/testing/system_info/__init__.py
deleted file mode 100644
index 8bad854d97..0000000000
--- a/library/python/testing/system_info/__init__.py
+++ /dev/null
@@ -1,204 +0,0 @@
-import collections
-import psutil
-from functools import wraps
-
-
-def safe(name):
- def decorator_safe(func):
- """
- Decorator for try-catch on string assembly
- """
-
- @wraps(func)
- def wrap_safe(*args, **kwargs):
- try:
- return func(*args, **kwargs)
- except Exception as e:
- return "Failed to get {}: {}".format(name, e)
-
- return wrap_safe
-
- return decorator_safe
-
-
-def get_proc_attrib(attr):
- if callable(attr):
- try:
- return attr()
- except psutil.Error:
- return None
- else:
- return attr
-
-
-@safe("cpu/mem info")
-def _cpu_mem_str():
- vm = psutil.virtual_memory()
- cpu_tp = psutil.cpu_times_percent(0.1)
-
- str_items = []
- str_items.append(
- "CPU: Idle: {}% User: {}% System: {}% IOwait: {}%\n".format(
- cpu_tp.idle, cpu_tp.user, cpu_tp.system, cpu_tp.iowait
- )
- )
-
- str_items.append(
- "MEM: total {} Gb available: {} Gb used: {} Gb free: {} Gb active: {} Gb inactive: {} Gb shared: {} Gb\n".format(
- round(vm.total / 1e9, 2),
- round(vm.available / 1e9, 2),
- round(vm.used / 1e9, 2),
- round(vm.free / 1e9, 2),
- round(vm.active / 1e9, 2),
- round(vm.inactive / 1e9, 2),
- round(vm.shared / 1e9, 2),
- )
- )
-
- str_items.append("Used swap: {}%\n".format(psutil.swap_memory().percent))
-
- return "".join(str_items)
-
-
-@safe("processes tree")
-def _proc_tree_str():
- tree = collections.defaultdict(list)
- for p in psutil.process_iter():
- try:
- tree[p.ppid()].append(p.pid)
- except (psutil.NoSuchProcess, psutil.ZombieProcess):
- pass
- # on systems supporting PID 0, PID 0's parent is usually 0
- if 0 in tree and 0 in tree[0]:
- tree[0].remove(0)
-
- return _print_proc_tree(min(tree), tree)
-
-
-def _print_proc_tree(parent_root, tree, indent_root=''):
- stack = [(parent_root, indent_root, "")]
- str_items = list()
-
- while len(stack) > 0:
- try:
- parent, indent, prefix = stack.pop()
- p = psutil.Process(parent)
- name = get_proc_attrib(p.name)
- str_items.append("{}({}, '{}'".format(prefix, parent, name if name else '?'))
-
- exe = get_proc_attrib(p.exe)
- if exe:
- str_items.append(" [{}]".format(exe))
-
- str_items.append(") ")
- str_items.append(" st: {}".format(p.status()))
- str_items.append(" mem: {}%".format(round(p.memory_percent(), 2)))
-
- ndfs = get_proc_attrib(p.num_fds)
- if ndfs and ndfs > 0:
- str_items.append(" fds: {}".format(ndfs))
-
- conns = get_proc_attrib(p.connections)
- if conns and len(conns) > 1:
- str_items.append(" num con: {}".format(len(conns)))
-
- ths = get_proc_attrib(p.num_threads)
- if ths and ths > 1:
- str_items.append(" threads: {}".format(ths))
-
- str_items.append("\n")
- except psutil.Error:
- name = "?"
- str_items.append("({}, '{}')\n".format(parent, name))
-
- if parent not in tree:
- continue
-
- child = tree[parent][-1]
- stack.append((child, indent + " ", indent + "`_ "))
-
- children = tree[parent][:-1]
- children.reverse()
- for child in children:
- stack.append((child, indent + "| ", indent + "|- "))
-
- return "".join(str_items)
-
-
-@safe("network info")
-def _network_conn_str():
- str_items = list()
-
- counters = psutil.net_io_counters()
- str_items.append(
- "\nPackSent: {} PackRecv: {} ErrIn: {} ErrOut: {} DropIn: {} DropOut: {}\n\n".format(
- counters.packets_sent,
- counters.packets_recv,
- counters.errin,
- counters.errout,
- counters.dropin,
- counters.dropout,
- )
- )
-
- ifaces = psutil.net_if_addrs()
- conns = psutil.net_connections()
- list_ip = collections.defaultdict(list)
- for con in conns:
- list_ip[con.laddr.ip].append(con)
-
- for name, addrs in ifaces.iteritems():
- str_items.append("{}:\n".format(name))
-
- for ip in addrs:
- str_items.append(" {}".format(ip.address))
- if ip.netmask:
- str_items.append(" mask={}".format(ip.netmask))
- if ip.broadcast:
- str_items.append(" bc={}".format(ip.broadcast))
- str_items.append("\n")
-
- for con in list_ip[ip.address]:
- str_items.append(" {}".format(con.laddr.port))
- if con.raddr:
- str_items.append(" <--> {} : {}".format(con.raddr.ip, con.raddr.port))
- str_items.append(" (stat: {}".format(con.status))
- if con.pid:
- str_items.append(" proc: {} (pid={})".format(psutil.Process(con.pid).exe(), con.pid))
- str_items.append(")\n")
-
- del list_ip[ip.address]
-
- str_items.append("***\n")
- for ip, conns in list_ip.iteritems():
- str_items.append(" {}\n".format(ip))
-
- for con in conns:
- str_items.append(" {}".format(con.laddr.port))
- if con.raddr:
- str_items.append(" <--> {} : {}".format(con.raddr.ip, con.raddr.port))
- str_items.append(" (stat: {}".format(con.status))
- if con.pid:
- str_items.append(" proc: {} (pid={})".format(psutil.Process(con.pid).exe(), con.pid))
- str_items.append(")\n")
-
- return "".join(str_items)
-
-
-@safe("info")
-def get_system_info():
- str_items = list()
-
- str_items.append("\n --- CPU MEM --- \n")
- str_items.append(_cpu_mem_str())
- str_items.append("\n")
-
- str_items.append("\n --- PROCESSES TREE --- \n")
- str_items.append(_proc_tree_str())
- str_items.append("\n")
-
- str_items.append("\n --- NETWORK INFO --- \n")
- str_items.append(_network_conn_str())
- str_items.append("\n")
-
- return "".join(str_items)
diff --git a/library/python/testing/system_info/ya.make b/library/python/testing/system_info/ya.make
deleted file mode 100644
index f655db8ebe..0000000000
--- a/library/python/testing/system_info/ya.make
+++ /dev/null
@@ -1,15 +0,0 @@
-PY23_LIBRARY()
-
-PY_SRCS(__init__.py)
-
-PEERDIR(
- contrib/python/psutil
-)
-
-STYLE_PYTHON()
-
-END()
-
-RECURSE_FOR_TESTS(
- test
-)
diff --git a/library/recipes/docker_compose/example/Dockerfile b/library/recipes/docker_compose/example/Dockerfile
deleted file mode 100644
index 8e67d74e89..0000000000
--- a/library/recipes/docker_compose/example/Dockerfile
+++ /dev/null
@@ -1,5 +0,0 @@
-FROM python:3.4-alpine
-ADD . /code
-WORKDIR /code
-RUN pip install -r requirements.txt
-CMD ["python", "app.py"]
diff --git a/library/recipes/docker_compose/example/app.py b/library/recipes/docker_compose/example/app.py
deleted file mode 100644
index 77afcc9b6a..0000000000
--- a/library/recipes/docker_compose/example/app.py
+++ /dev/null
@@ -1,17 +0,0 @@
-import time
-
-import redis
-from flask import Flask
-
-
-app = Flask(__name__)
-cache = redis.Redis(host='redis', port=6379)
-
-
-@app.route('/')
-def hello():
- return 'Hello World!'
-
-
-if __name__ == "__main__":
- app.run(host="0.0.0.0", debug=True)
diff --git a/library/recipes/docker_compose/example/docker-compose.yml b/library/recipes/docker_compose/example/docker-compose.yml
deleted file mode 100644
index 780f263945..0000000000
--- a/library/recipes/docker_compose/example/docker-compose.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-version: '3.4'
-services:
- web:
- build:
- context: .
- network: host
- ports:
- - "5000:5000"
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/example/requirements.txt b/library/recipes/docker_compose/example/requirements.txt
deleted file mode 100644
index 1a5dc97b12..0000000000
--- a/library/recipes/docker_compose/example/requirements.txt
+++ /dev/null
@@ -1,2 +0,0 @@
-flask
-redis
diff --git a/library/recipes/docker_compose/example/test.py b/library/recipes/docker_compose/example/test.py
deleted file mode 100644
index c6769eab7f..0000000000
--- a/library/recipes/docker_compose/example/test.py
+++ /dev/null
@@ -1,7 +0,0 @@
-import urllib.request
-
-
-def test_compose_works():
- request = urllib.request.urlopen("http://localhost:5000")
- response = request.read().decode(request.headers.get_content_charset())
- assert 'Hello World!' in response
diff --git a/library/recipes/docker_compose/example/ya.make b/library/recipes/docker_compose/example/ya.make
deleted file mode 100644
index 96029ca075..0000000000
--- a/library/recipes/docker_compose/example/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-PY3TEST()
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
-)
-
-END()
diff --git a/library/recipes/docker_compose/example_network_go/Dockerfile b/library/recipes/docker_compose/example_network_go/Dockerfile
deleted file mode 100644
index 96fa38bd48..0000000000
--- a/library/recipes/docker_compose/example_network_go/Dockerfile
+++ /dev/null
@@ -1 +0,0 @@
-FROM ubuntu:xenial
diff --git a/library/recipes/docker_compose/example_network_go/docker-compose.yml b/library/recipes/docker_compose/example_network_go/docker-compose.yml
deleted file mode 100644
index 4122b00537..0000000000
--- a/library/recipes/docker_compose/example_network_go/docker-compose.yml
+++ /dev/null
@@ -1,13 +0,0 @@
-version: '3.4'
-services:
- test:
- build:
- context: .
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
- hostname: redis
-
-networks:
- default:
- external:
- name: example_network_go_test
diff --git a/library/recipes/docker_compose/example_network_go/go_test.go b/library/recipes/docker_compose/example_network_go/go_test.go
deleted file mode 100644
index 76d37ede76..0000000000
--- a/library/recipes/docker_compose/example_network_go/go_test.go
+++ /dev/null
@@ -1,21 +0,0 @@
-package example
-
-import (
- "context"
- "testing"
-
- "github.com/go-redis/redis/v8"
- "github.com/stretchr/testify/require"
-)
-
-func TestFoo(t *testing.T) {
- c := redis.NewUniversalClient(
- &redis.UniversalOptions{
- Addrs: []string{"redis:6379"},
- },
- )
-
- sc := c.Ping(context.Background())
- require.NoError(t, sc.Err())
- t.Log(sc)
-}
diff --git a/library/recipes/docker_compose/example_network_go/recipe-config.yml b/library/recipes/docker_compose/example_network_go/recipe-config.yml
deleted file mode 100644
index cef53ef20c..0000000000
--- a/library/recipes/docker_compose/example_network_go/recipe-config.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-test-host: test
-
-networks:
- example_network_go_test:
- ipv6: true
diff --git a/library/recipes/docker_compose/example_network_go/ya.make b/library/recipes/docker_compose/example_network_go/ya.make
deleted file mode 100644
index 569d6290f4..0000000000
--- a/library/recipes/docker_compose/example_network_go/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-GO_TEST()
-
-GO_TEST_SRCS(go_test.go)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(RECIPE_CONFIG_FILE library/recipes/docker_compose/example_network_go/recipe-config.yml)
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-END()
diff --git a/library/recipes/docker_compose/example_test_container/Dockerfile b/library/recipes/docker_compose/example_test_container/Dockerfile
deleted file mode 100644
index be2ca6ee27..0000000000
--- a/library/recipes/docker_compose/example_test_container/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:xenial
-ADD . /code
diff --git a/library/recipes/docker_compose/example_test_container/docker-compose.yml b/library/recipes/docker_compose/example_test_container/docker-compose.yml
deleted file mode 100644
index 6a3af8615b..0000000000
--- a/library/recipes/docker_compose/example_test_container/docker-compose.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-version: '3.4'
-services:
- test:
- build:
- context: .
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/example_test_container/test.py b/library/recipes/docker_compose/example_test_container/test.py
deleted file mode 100644
index 980943f48a..0000000000
--- a/library/recipes/docker_compose/example_test_container/test.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import os
-
-
-def test_compose_works():
- assert os.path.exists("/code")
diff --git a/library/recipes/docker_compose/example_test_container/ya.make b/library/recipes/docker_compose/example_test_container/ya.make
deleted file mode 100644
index 89f6b21a97..0000000000
--- a/library/recipes/docker_compose/example_test_container/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-PY3TEST()
-
-TEST_SRCS(test.py)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(DOCKER_TEST_HOST test)
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-END()
diff --git a/library/recipes/docker_compose/example_test_container_go/Dockerfile b/library/recipes/docker_compose/example_test_container_go/Dockerfile
deleted file mode 100644
index be2ca6ee27..0000000000
--- a/library/recipes/docker_compose/example_test_container_go/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:xenial
-ADD . /code
diff --git a/library/recipes/docker_compose/example_test_container_go/docker-compose.yml b/library/recipes/docker_compose/example_test_container_go/docker-compose.yml
deleted file mode 100644
index 6a3af8615b..0000000000
--- a/library/recipes/docker_compose/example_test_container_go/docker-compose.yml
+++ /dev/null
@@ -1,7 +0,0 @@
-version: '3.4'
-services:
- test:
- build:
- context: .
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/example_test_container_go/go_test.go b/library/recipes/docker_compose/example_test_container_go/go_test.go
deleted file mode 100644
index 1771ccfc38..0000000000
--- a/library/recipes/docker_compose/example_test_container_go/go_test.go
+++ /dev/null
@@ -1,13 +0,0 @@
-package example
-
-import (
- "os"
- "testing"
-
- "github.com/stretchr/testify/require"
-)
-
-func TestFoo(t *testing.T) {
- _, err := os.Stat("/code")
- require.NoError(t, err)
-}
diff --git a/library/recipes/docker_compose/example_test_container_go/ya.make b/library/recipes/docker_compose/example_test_container_go/ya.make
deleted file mode 100644
index dd31291009..0000000000
--- a/library/recipes/docker_compose/example_test_container_go/ya.make
+++ /dev/null
@@ -1,19 +0,0 @@
-GO_TEST()
-
-GO_TEST_SRCS(go_test.go)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(DOCKER_TEST_HOST test)
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-END()
diff --git a/library/recipes/docker_compose/example_with_context/docker-compose.yml b/library/recipes/docker_compose/example_with_context/docker-compose.yml
deleted file mode 100644
index d92d094247..0000000000
--- a/library/recipes/docker_compose/example_with_context/docker-compose.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-version: '3.4'
-services:
- sweb:
- build:
- context: $main
- network: host
- ports:
- - "5000:5000"
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/example_with_context/docker-context.yml b/library/recipes/docker_compose/example_with_context/docker-context.yml
deleted file mode 100644
index 19adc96fad..0000000000
--- a/library/recipes/docker_compose/example_with_context/docker-context.yml
+++ /dev/null
@@ -1,5 +0,0 @@
-main:
- - build://devtools/dummy_arcadia/hello_world/hello_world: bin/hello
- - arcadia://library/recipes/docker_compose/example/Dockerfile: Dockerfile
- - arcadia://library/recipes/docker_compose/example/app.py: app.py
- - arcadia://library/recipes/docker_compose/example/requirements.txt: requirements.txt
diff --git a/library/recipes/docker_compose/example_with_context/test.py b/library/recipes/docker_compose/example_with_context/test.py
deleted file mode 100644
index b7f13fb105..0000000000
--- a/library/recipes/docker_compose/example_with_context/test.py
+++ /dev/null
@@ -1,9 +0,0 @@
-import urllib.request
-
-
-def test_compose_works():
- # import pdb; pdb.set_trace()
-
- request = urllib.request.urlopen("http://localhost:5000")
- response = request.read().decode(request.headers.get_content_charset())
- assert 'Hello World!' in response
diff --git a/library/recipes/docker_compose/example_with_context/ya.make b/library/recipes/docker_compose/example_with_context/ya.make
deleted file mode 100644
index b1dde5e827..0000000000
--- a/library/recipes/docker_compose/example_with_context/ya.make
+++ /dev/null
@@ -1,29 +0,0 @@
-PY3TEST()
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(DOCKER_CONTEXT_FILE library/recipes/docker_compose/example_with_context/docker-context.yml)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
-)
-
-DATA(
- arcadia/library/recipes/docker_compose/example
-)
-
-DEPENDS(
- devtools/dummy_arcadia/hello_world
-)
-
-END()
diff --git a/library/recipes/docker_compose/example_with_recipe_config/Dockerfile b/library/recipes/docker_compose/example_with_recipe_config/Dockerfile
deleted file mode 100644
index 1079e205b5..0000000000
--- a/library/recipes/docker_compose/example_with_recipe_config/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:xenial
-ADD . /app
diff --git a/library/recipes/docker_compose/example_with_recipe_config/docker-compose.yml b/library/recipes/docker_compose/example_with_recipe_config/docker-compose.yml
deleted file mode 100644
index 20419b32eb..0000000000
--- a/library/recipes/docker_compose/example_with_recipe_config/docker-compose.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-version: '3.4'
-services:
- test:
- build:
- context: $main
-
diff --git a/library/recipes/docker_compose/example_with_recipe_config/recipe-config.yml b/library/recipes/docker_compose/example_with_recipe_config/recipe-config.yml
deleted file mode 100644
index 278ceb6521..0000000000
--- a/library/recipes/docker_compose/example_with_recipe_config/recipe-config.yml
+++ /dev/null
@@ -1,10 +0,0 @@
-test-host: test # name of container to execute test in
-
-context: # contexts to build images
- main:
- - build://devtools/dummy_arcadia/hello_world/hello_world: bin/hello
- - arcadia://library/recipes/docker_compose/example_with_recipe_config/Dockerfile: Dockerfile
-
-save:
- test:
- - /tmp/output \ No newline at end of file
diff --git a/library/recipes/docker_compose/example_with_recipe_config/test.py b/library/recipes/docker_compose/example_with_recipe_config/test.py
deleted file mode 100644
index 08c4d7b6e5..0000000000
--- a/library/recipes/docker_compose/example_with_recipe_config/test.py
+++ /dev/null
@@ -1,12 +0,0 @@
-import os
-import logging
-
-import yatest.common
-
-
-def test():
- os.makedirs("/tmp/output")
- with open("/tmp/output/out.txt", "w") as f:
- res = yatest.common.execute("/app/bin/hello")
- f.write("/bin/hello stdout: {}".format(res.std_out))
- logging.info("out: %s", res.std_out)
diff --git a/library/recipes/docker_compose/example_with_recipe_config/ya.make b/library/recipes/docker_compose/example_with_recipe_config/ya.make
deleted file mode 100644
index 3ab2889a01..0000000000
--- a/library/recipes/docker_compose/example_with_recipe_config/ya.make
+++ /dev/null
@@ -1,27 +0,0 @@
-PY3TEST()
-
-OWNER(g:yatool)
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(RECIPE_CONFIG_FILE library/recipes/docker_compose/example_with_recipe_config/recipe-config.yml)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
-)
-
-DEPENDS(
- devtools/dummy_arcadia/hello_world
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/Dockerfile
deleted file mode 100644
index be2ca6ee27..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:xenial
-ADD . /code
diff --git a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/docker-compose.yml b/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/docker-compose.yml
deleted file mode 100644
index 5eef027d27..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/docker-compose.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-version: '3.4'
-services:
- test:
- user: $CURRENT_UID
- build:
- context: .
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/test.py b/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/test.py
deleted file mode 100644
index 980943f48a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/test.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import os
-
-
-def test_compose_works():
- assert os.path.exists("/code")
diff --git a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/ya.make b/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/ya.make
deleted file mode 100644
index 4e623da45d..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/invalid_test_container_name/ya.make
+++ /dev/null
@@ -1,24 +0,0 @@
-PY3TEST()
-
-OWNER(g:yatool)
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(DOCKER_TEST_HOST not_existing_container_name)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/Dockerfile
deleted file mode 100644
index be2ca6ee27..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:xenial
-ADD . /code
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/docker-compose.yml b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/docker-compose.yml
deleted file mode 100644
index 39a25fc869..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/docker-compose.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-version: '3.4'
-services:
- test:
- user: $CURRENT_UID
- build:
- context: .
- command: 'echo "say hello"'
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/test.py b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/test.py
deleted file mode 100644
index 980943f48a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/test.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import os
-
-
-def test_compose_works():
- assert os.path.exists("/code")
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/ya.make b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/ya.make
deleted file mode 100644
index 6ea9fdfc8a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_command/ya.make
+++ /dev/null
@@ -1,24 +0,0 @@
-PY3TEST()
-
-OWNER(g:yatool)
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(DOCKER_TEST_HOST test)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/Dockerfile
deleted file mode 100644
index be2ca6ee27..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/Dockerfile
+++ /dev/null
@@ -1,2 +0,0 @@
-FROM ubuntu:xenial
-ADD . /code
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/docker-compose.yml b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/docker-compose.yml
deleted file mode 100644
index 798539866c..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/docker-compose.yml
+++ /dev/null
@@ -1,8 +0,0 @@
-version: '3.4'
-services:
- test:
- build:
- context: .
- user: 'root:root'
- redis:
- image: "redis:alpine@sha256:66ccc75f079ab9059c900e9545bbd271bff78a66f94b45827e6901f57fb973f1"
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/test.py b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/test.py
deleted file mode 100644
index 980943f48a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/test.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import os
-
-
-def test_compose_works():
- assert os.path.exists("/code")
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/ya.make b/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/ya.make
deleted file mode 100644
index 6ea9fdfc8a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_container_with_existing_user/ya.make
+++ /dev/null
@@ -1,24 +0,0 @@
-PY3TEST()
-
-OWNER(g:yatool)
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-SET(DOCKER_TEST_HOST test)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/docker-compose.yml b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/docker-compose.yml
deleted file mode 100644
index 73f93866e9..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/docker-compose.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-version: '3.4'
-services:
- srv1:
- build: srv1
- srv2:
- build: srv2
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/Dockerfile
deleted file mode 100644
index 3b871be284..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM python:3.4-alpine
-ADD . /code
-WORKDIR /code
-CMD ["python", "app.py"]
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/app.py b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/app.py
deleted file mode 100644
index 244a421fe3..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv1/app.py
+++ /dev/null
@@ -1,2 +0,0 @@
-if __name__ == "__main__":
- pass
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/Dockerfile
deleted file mode 100644
index 3b871be284..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM python:3.4-alpine
-ADD . /code
-WORKDIR /code
-CMD ["python", "app.py"]
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/app.py b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/app.py
deleted file mode 100644
index cfff70f109..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/srv2/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import time
-
-
-if __name__ == "__main__":
- while True:
- time.sleep(1)
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/test.py b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/test.py
deleted file mode 100644
index fe819e6c1a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/test.py
+++ /dev/null
@@ -1,2 +0,0 @@
-def test_simple():
- return
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/ya.make b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/ya.make
deleted file mode 100644
index 01260fb7e5..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_exit_0/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-PY3TEST()
-
-OWNER(g:yatool)
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/docker-compose.yml b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/docker-compose.yml
deleted file mode 100644
index 73f93866e9..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/docker-compose.yml
+++ /dev/null
@@ -1,6 +0,0 @@
-version: '3.4'
-services:
- srv1:
- build: srv1
- srv2:
- build: srv2
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/Dockerfile
deleted file mode 100644
index 3b871be284..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM python:3.4-alpine
-ADD . /code
-WORKDIR /code
-CMD ["python", "app.py"]
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/app.py b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/app.py
deleted file mode 100644
index 55b1ce720e..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv1/app.py
+++ /dev/null
@@ -1,5 +0,0 @@
-import sys
-
-
-if __name__ == "__main__":
- sys.exit(5)
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/Dockerfile b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/Dockerfile
deleted file mode 100644
index 3b871be284..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/Dockerfile
+++ /dev/null
@@ -1,4 +0,0 @@
-FROM python:3.4-alpine
-ADD . /code
-WORKDIR /code
-CMD ["python", "app.py"]
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/app.py b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/app.py
deleted file mode 100644
index cfff70f109..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/srv2/app.py
+++ /dev/null
@@ -1,6 +0,0 @@
-import time
-
-
-if __name__ == "__main__":
- while True:
- time.sleep(1)
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/test.py b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/test.py
deleted file mode 100644
index fe819e6c1a..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/test.py
+++ /dev/null
@@ -1,2 +0,0 @@
-def test_simple():
- return
diff --git a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/ya.make b/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/ya.make
deleted file mode 100644
index 01260fb7e5..0000000000
--- a/library/recipes/docker_compose/test/acceptance/data/test_recipe_container_fail/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-PY3TEST()
-
-OWNER(g:yatool)
-
-TEST_SRCS(
- test.py
-)
-
-# To use docker-compose.yml from another directory, set DOCKER_COMPOSE_FILE variable with Arcadia relative path to the file
-# and do not forget to add the directory to the DATA macro, e.g.:
-# SET(DOCKER_COMPOSE_FILE library/recipes/docker_compose/test/docker-compose-1.yml)
-# DATA(arcadia/library/recipes/docker_compose/test)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/acceptance/test_docker_compose.py b/library/recipes/docker_compose/test/acceptance/test_docker_compose.py
deleted file mode 100644
index 8253f63392..0000000000
--- a/library/recipes/docker_compose/test/acceptance/test_docker_compose.py
+++ /dev/null
@@ -1,92 +0,0 @@
-# coding: utf-8
-
-import os
-
-import test.tests.common as tests_common
-import yatest.common
-
-
-class TestDockerCompose(tests_common.YaTest):
-
- def test_run(self):
- res = self.run_ya_make_test(cwd=yatest.common.source_path("library/recipes/docker_compose/example"), args=["-A", "--test-type", "py3test"])
- assert res.get_tests_count() == 1
- assert res.get_suites_count() == 1
-
- res.verify_test("test.py", "test_compose_works", "OK")
- for output_type in ['std_out', 'std_err']:
- assert os.path.exists(os.path.join(
- res.output_root, "library/recipes/docker_compose/example",
- "test-results", "py3test", "testing_out_stuff", "containers", "example_redis_1", "container_{}.log".format(output_type)
- ))
- assert os.path.exists(os.path.join(
- res.output_root, "library/recipes/docker_compose/example",
- "test-results", "py3test", "testing_out_stuff", "containers", "example_web_1", "container_{}.log".format(output_type)
- ))
-
- def test_run_with_context(self):
- res = self.run_ya_make_test(cwd=yatest.common.source_path("library/recipes/docker_compose/example_with_context"), args=["-A", "--test-type", "py3test"])
- assert res.get_tests_count() == 1
- assert res.get_suites_count() == 1
-
- res.verify_test("test.py", "test_compose_works", "OK")
-
- def test_run_test_in_container(self):
- with open("stdin", "wb") as stdin:
- # need to pass stdin as docker-compose exec needs it (it runs `docker exec --interactive`)
- res = self.run_ya_make_test(
- cwd=yatest.common.source_path("library/recipes/docker_compose/example_test_container"), args=["-A", "--test-type", "py3test"], stdin=stdin)
- assert res.get_tests_count() == 1
- assert res.get_suites_count() == 1
-
- res.verify_test("test.py", "test_compose_works", "OK")
-
- def test_invalid_test_container_name(self):
- res = self.run_ya_make_test(cwd=yatest.common.test_source_path("data/invalid_test_container_name"), args=["-A", "--test-type", "py3test"])
- assert res.get_tests_count() == 0
- assert res.get_suites_count() == 1
- assert "Service with name 'not_existing_container_name' was not found to be setup as a host for running test" in res.err
-
- def test_container_with_existing_command(self):
- res = self.run_ya_make_test(cwd=yatest.common.test_source_path("data/test_container_with_existing_command"), args=["-A", "--test-type", "py3test"])
- assert res.get_tests_count() == 0
- assert res.get_suites_count() == 1
- assert "Test hosting service 'test' has `command` section which is not supported by testing framework" in res.err
-
- def test_container_with_existing_user(self):
- res = self.run_ya_make_test(cwd=yatest.common.test_source_path("data/test_container_with_existing_user"), args=["-A", "--test-type", "py3test"])
- assert res.get_tests_count() == 0
- assert res.get_suites_count() == 1
- assert "Test hosting service 'test' has `user` section which is not supported by testing framework" in res.err
-
- def test_run_with_recipe_config(self):
- with open("stdin", "wb") as stdin:
- # need to pass stdin as docker-compose exec needs it (it runs `docker exec --interactive`
- res = self.run_ya_make_test(
- cwd=yatest.common.source_path("library/recipes/docker_compose/example_with_recipe_config"),
- args=["-A", "--test-type", "py3test"],
- stdin=stdin
- )
-
- assert res.get_tests_count() == 1
- assert res.get_suites_count() == 1
-
- res.verify_test("test.py", "test", "OK")
-
- assert os.path.exists(os.path.join(
- res.output_root,
- "library/recipes/docker_compose/example_with_recipe_config/test-results/py3test/testing_out_stuff/containers/py3test_test_1/output/",
- "out.txt",
- ))
-
- def test_recipe_container_exit_0(self):
- res = self.run_ya_make_test(cwd=yatest.common.test_source_path("data/test_recipe_container_exit_0"),
- args=["-A", "--test-type", "py3test"])
- res.verify_test("test.py", "test_simple", "OK")
-
- def test_recipe_container_fail(self):
- res = self.run_ya_make_test(cwd=yatest.common.test_source_path("data/test_recipe_container_fail"),
- args=["-A", "--test-type", "py3test"])
- assert "DockerComposeRecipeException" in res.err
- assert "Has failed containers" in res.err
- assert "srv1" in res.err
diff --git a/library/recipes/docker_compose/test/acceptance/ya.make b/library/recipes/docker_compose/test/acceptance/ya.make
deleted file mode 100644
index 069ac4257c..0000000000
--- a/library/recipes/docker_compose/test/acceptance/ya.make
+++ /dev/null
@@ -1,40 +0,0 @@
-
-PY3TEST()
-
-TEST_SRCS(
- test_docker_compose.py
-)
-
-INCLUDE(${ARCADIA_ROOT}/library/recipes/docker_compose/large.inc)
-INCLUDE(${ARCADIA_ROOT}/devtools/ya/chameleon_bin/recipe.inc)
-
-TAG(
- ya:external
- ya:force_sandbox
- ya:dirty
-)
-
-REQUIREMENTS(
- container:4467981730 # bionic with fuse allowed
- cpu:all
- dns:dns64
-)
-
-DATA(
- arcadia/library/recipes/docker_compose/example
- arcadia/library/recipes/docker_compose/example_with_context
- arcadia/library/recipes/docker_compose/example_test_container
- arcadia/library/recipes/docker_compose/example_with_recipe_config
-)
-
-PEERDIR(
- devtools/ya/test/tests/lib/common
-)
-
-DEPENDS(
- devtools/ya/test/programs/test_tool/bin
- devtools/ya/test/programs/test_tool/bin3
- devtools/ymake/bin
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/ut/context.yml b/library/recipes/docker_compose/test/ut/context.yml
deleted file mode 100644
index 655bd657aa..0000000000
--- a/library/recipes/docker_compose/test/ut/context.yml
+++ /dev/null
@@ -1,9 +0,0 @@
-context1:
- - arcadia://library/recipes/docker_compose/test/ut/data/dir1: dir1
- - arcadia://library/recipes/docker_compose/test/ut/data/dir2: dir1/dir2
- - arcadia://library/recipes/docker_compose/test/ut/data/file1.txt: file1.txt
- - arcadia://library/recipes/docker_compose/test/ut/data/file2.txt: dir1/file2.txt
- - build://devtools/dummy_arcadia/hello_world/hello_world: dir1/hello
-
-context2:
- - arcadia://library/recipes/docker_compose/test/ut/data/file1.txt: file1.txt \ No newline at end of file
diff --git a/library/recipes/docker_compose/test/ut/data/dir1/file3.txt b/library/recipes/docker_compose/test/ut/data/dir1/file3.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/library/recipes/docker_compose/test/ut/data/dir1/file3.txt
+++ /dev/null
diff --git a/library/recipes/docker_compose/test/ut/data/dir2/file4.txt b/library/recipes/docker_compose/test/ut/data/dir2/file4.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/library/recipes/docker_compose/test/ut/data/dir2/file4.txt
+++ /dev/null
diff --git a/library/recipes/docker_compose/test/ut/data/file1.txt b/library/recipes/docker_compose/test/ut/data/file1.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/library/recipes/docker_compose/test/ut/data/file1.txt
+++ /dev/null
diff --git a/library/recipes/docker_compose/test/ut/data/file2.txt b/library/recipes/docker_compose/test/ut/data/file2.txt
deleted file mode 100644
index e69de29bb2..0000000000
--- a/library/recipes/docker_compose/test/ut/data/file2.txt
+++ /dev/null
diff --git a/library/recipes/docker_compose/test/ut/init_dir/init.txt b/library/recipes/docker_compose/test/ut/init_dir/init.txt
deleted file mode 100644
index a0a41ab1c0..0000000000
--- a/library/recipes/docker_compose/test/ut/init_dir/init.txt
+++ /dev/null
@@ -1 +0,0 @@
-init.txt \ No newline at end of file
diff --git a/library/recipes/docker_compose/test/ut/test_docker_context.py b/library/recipes/docker_compose/test/ut/test_docker_context.py
deleted file mode 100644
index 2e8008afb6..0000000000
--- a/library/recipes/docker_compose/test/ut/test_docker_context.py
+++ /dev/null
@@ -1,31 +0,0 @@
-import os
-import yaml
-import yatest.common
-
-import library.recipes.docker_compose.lib as lib
-
-
-def test_create_context():
- root = yatest.common.work_path("context_root")
- with open(yatest.common.test_source_path("context.yml")) as f:
- ctx = yaml.safe_load(f)
- context = lib._create_context(ctx, yatest.common.test_source_path("init_dir"), root)
- assert "context1" in context
- expected_context_paths = {
- "context1": [
- "init.txt",
- "dir1/file3.txt",
- "dir1/dir2/file4.txt",
- "file1.txt",
- "dir1/file2.txt",
- "dir1/hello",
- ],
- "context2": [
- "init.txt",
- "file1.txt",
- ]
- }
- for c, expected_paths in expected_context_paths.iteritems():
- assert c in context
- for p in expected_paths:
- assert os.path.exists(os.path.join(root, c, p))
diff --git a/library/recipes/docker_compose/test/ut/ya.make b/library/recipes/docker_compose/test/ut/ya.make
deleted file mode 100644
index 008bf2beed..0000000000
--- a/library/recipes/docker_compose/test/ut/ya.make
+++ /dev/null
@@ -1,16 +0,0 @@
-PY2TEST()
-
-TEST_SRCS(
- test_docker_context.py
-)
-
-PEERDIR(
- contrib/python/PyYAML
- library/recipes/docker_compose/lib
-)
-
-DEPENDS(
- devtools/dummy_arcadia/hello_world
-)
-
-END()
diff --git a/library/recipes/docker_compose/test/ya.make b/library/recipes/docker_compose/test/ya.make
deleted file mode 100644
index 1dd36ee3f6..0000000000
--- a/library/recipes/docker_compose/test/ya.make
+++ /dev/null
@@ -1,4 +0,0 @@
-RECURSE(
- acceptance
- ut
-)
diff --git a/library/recipes/docker_compose/ya.make b/library/recipes/docker_compose/ya.make
index c4a05a80cf..c15b87945f 100644
--- a/library/recipes/docker_compose/ya.make
+++ b/library/recipes/docker_compose/ya.make
@@ -12,11 +12,15 @@ PY_SRCS(
END()
-RECURSE_FOR_TESTS(
- example
- example_network_go
- example_test_container
- example_test_container_go
- example_with_context
- test
-)
+
+IF (NOT OPENSOURCE OR OPENSOURCE_PROJECT == "ya" OR AUTOCHECK)
+ # Don't export tests and examples to customers
+ RECURSE_FOR_TESTS(
+ example
+ example_network_go
+ example_test_container
+ example_test_container_go
+ example_with_context
+ test
+ )
+ENDIF()
diff --git a/tools/event2cpp/bin/ya.make b/tools/event2cpp/bin/ya.make
deleted file mode 100644
index c56449821b..0000000000
--- a/tools/event2cpp/bin/ya.make
+++ /dev/null
@@ -1,21 +0,0 @@
-SET(IDE_FOLDER "_Builders")
-
-PROGRAM(event2cpp)
-
-PEERDIR(
- ADDINCL contrib/libs/protobuf
- contrib/libs/protoc
- library/cpp/eventlog/proto
-)
-
-SRCDIR(
- tools/event2cpp
-)
-
-SRCS(
- proto_events.cpp
-)
-
-INCLUDE(${ARCADIA_ROOT}/build/prebuilt/tools/event2cpp/ya.make.induced_deps)
-
-END()
diff --git a/tools/event2cpp/proto_events.cpp b/tools/event2cpp/proto_events.cpp
deleted file mode 100644
index 66e9296d2c..0000000000
--- a/tools/event2cpp/proto_events.cpp
+++ /dev/null
@@ -1,893 +0,0 @@
-#include <google/protobuf/compiler/cpp/cpp_helpers.h>
-#include <google/protobuf/io/zero_copy_stream.h>
-#include <google/protobuf/io/printer.h>
-#include <google/protobuf/stubs/strutil.h>
-#include <google/protobuf/stubs/common.h>
-#include <google/protobuf/descriptor.h>
-#include <google/protobuf/descriptor.pb.h>
-
-#include <util/string/cast.h>
-#include <util/generic/singleton.h>
-#include <util/generic/yexception.h>
-
-#include <library/cpp/eventlog/proto/events_extension.pb.h>
-
-#include "proto_events.h"
-
-namespace NProtoBuf::NCompiler::NPlugins {
-
-namespace NInternal {
- using namespace google::protobuf;
- using namespace google::protobuf::compiler;
- using namespace google::protobuf::compiler::cpp;
-
- typedef std::map<TProtoStringType, TProtoStringType> TVariables;
-
- void CheckMessageId(size_t id, const TProtoStringType& name) {
- typedef std::map<size_t, TProtoStringType> TMessageIds;
- TMessageIds* ids = Singleton<TMessageIds>();
- TMessageIds::const_iterator it = ids->find(id);
-
- if (it != ids->end()) {
- throw yexception() << "Duplicate message_id = " << id
- << " in messages " << name
- << " and " << it->second << Endl;
- }
-
- (*ids)[id] = name;
- }
-
- void SetCommonFieldVariables(const FieldDescriptor* descriptor, TVariables* variables) {
- (*variables)["rname"] = descriptor->name();
- (*variables)["name"] = FieldName(descriptor);
- }
-
- TProtoStringType HeaderFileName(const FileDescriptor* file) {
- TProtoStringType basename = cpp::StripProto(file->name());
-
- return basename.append(".pb.h");
- }
-
- TProtoStringType SourceFileName(const FileDescriptor* file) {
- TProtoStringType basename = cpp::StripProto(file->name());
-
- return basename.append(".pb.cc");
- }
-
- void GeneratePrintingCycle(TVariables vars, TProtoStringType printTemplate, io::Printer* printer) {
- printer->Print("\n{\n");
- printer->Indent();
- printer->Print(vars,
- "NProtoBuf::$repeated_field_type$< $type$ >::const_iterator b = $name$().begin();\n"
- "NProtoBuf::$repeated_field_type$< $type$ >::const_iterator e = $name$().end();\n\n");
- printer->Print("output << \"[\";\n");
- printer->Print("if (b != e) {\n");
- vars["obj"] = "(*b++)";
- printer->Print(vars, printTemplate.c_str());
- printer->Print(";\n");
- printer->Print(vars,
- "for (NProtoBuf::$repeated_field_type$< $type$ >::const_iterator it = b; it != e; ++it) {\n");
- printer->Indent();
- printer->Print("output << \",\";\n");
- vars["obj"] = "(*it)";
- printer->Print(vars, printTemplate.c_str());
- printer->Print(";\n");
- printer->Outdent();
- printer->Print("}\n}\n");
- printer->Print("output << \"]\";\n");
- printer->Outdent();
- printer->Print("}\n");
- }
-
- class TFieldExtGenerator {
- public:
- TFieldExtGenerator(const FieldDescriptor* field)
- : Descriptor_(field)
- {
- SetCommonFieldVariables(Descriptor_, &Variables_);
- }
-
- virtual ~TFieldExtGenerator() {
- }
-
- virtual bool NeedProtobufMessageFieldPrinter() const {
- return false;
- }
-
- virtual void GenerateCtorArgument(io::Printer* printer) = 0;
- virtual void GenerateInitializer(io::Printer* printer, const TString& prefix) = 0;
- virtual void GeneratePrintingCode(io::Printer* printer) = 0;
- protected:
- const FieldDescriptor* Descriptor_;
- TVariables Variables_;
- };
-
- class TMessageFieldExtGenerator: public TFieldExtGenerator {
- public:
- TMessageFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["type"] = ClassName(Descriptor_->message_type(), true);
- Variables_["has_print_function"] = Descriptor_->message_type()->options().HasExtension(message_id) ? "true" : "false";
- }
-
- bool NeedProtobufMessageFieldPrinter() const override {
- return true;
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- "const $type$& arg_$name$");
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(Variables_,
- "$prefix$mutable_$name$()->CopyFrom(arg_$name$);\n");
- }
-
- void GeneratePrintingCode(io::Printer* printer) override {
- printer->Print("output << \"{\";\n");
- printer->Print(Variables_,
- "protobufMessageFieldPrinter.PrintProtobufMessageFieldToOutput<$type$, $has_print_function$>($name$(), escapedOutput);\n");
- printer->Print("output << \"}\";\n");
- }
- };
-
- class TMapFieldExtGenerator: public TFieldExtGenerator {
- public:
- TMapFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- }
-
- void GenerateCtorArgument(io::Printer* /* printer */) override {
- }
-
- void GenerateInitializer(io::Printer* /* printer */, const TString& /* prefix */) override {
- }
-
- void GeneratePrintingCode(io::Printer* /* printer */) override {
- }
- };
-
- class TRepeatedMessageFieldExtGenerator: public TFieldExtGenerator {
- public:
- TRepeatedMessageFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["type"] = ClassName(Descriptor_->message_type(), true);
- Variables_["repeated_field_type"] = "RepeatedPtrField";
- Variables_["has_print_function"] = Descriptor_->message_type()->options().HasExtension(message_id) ? "true" : "false";
- }
-
- bool NeedProtobufMessageFieldPrinter() const override {
- return true;
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- "const $type$& arg_$name$");
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(Variables_,
- "$prefix$add_$name$()->CopyFrom(arg_$name$);\n");
- }
- void GeneratePrintingCode(io::Printer* printer) override {
- GeneratePrintingCycle(Variables_, "protobufMessageFieldPrinter.PrintProtobufMessageFieldToOutput<$type$, $has_print_function$>($obj$, escapedOutput)", printer);
- }
- };
-
- class TStringFieldExtGenerator: public TFieldExtGenerator {
- public:
- TStringFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["pointer_type"] = Descriptor_->type() == FieldDescriptor::TYPE_BYTES ? "void" : "char";
- Variables_["type"] = "TProtoStringType";
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- (Descriptor_->type() == FieldDescriptor::TYPE_BYTES ?
- "const $pointer_type$* arg_$name$, size_t arg_$name$_size" : "const $type$& arg_$name$")
- );
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(
- Variables_,
- Descriptor_->type() == FieldDescriptor::TYPE_BYTES ?
- "$prefix$set_$name$(arg_$name$, arg_$name$_size);\n" :
- "$prefix$set_$name$(arg_$name$);\n"
- );
- }
-
- void GeneratePrintingCode(io::Printer* printer) override {
- Repr::ReprType fmt = Repr::none;
-
- if (Descriptor_->options().HasExtension(repr)) {
- fmt = Descriptor_->options().GetExtension(repr);
- }
-
- switch (fmt) {
- case Repr::as_base64:
- printer->Print(Variables_, "NProtoBuf::PrintAsBase64($name$(), output);\n");
- break;
-
- case Repr::none:
- /* TODO: proper error handling?*/
- default:
- printer->Print(Variables_, "escapedOutput << $name$();\n");
- break;
- }
- }
- };
-
- class TRepeatedStringFieldExtGenerator: public TFieldExtGenerator {
- public:
- TRepeatedStringFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["pointer_type"] = Descriptor_->type() == FieldDescriptor::TYPE_BYTES ? "void" : "char";
- Variables_["type"] = "TProtoStringType";
- Variables_["repeated_field_type"] = "RepeatedPtrField";
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- (Descriptor_->type() == FieldDescriptor::TYPE_BYTES ?
- "const $pointer_type$* arg_$name$, size_t arg_$name$_size": "const $type$& arg_$name$")
- );
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(
- Variables_,
- Descriptor_->type() == FieldDescriptor::TYPE_BYTES ?
- "$prefix$add_$name$(arg_$name$, arg_$name$_size);\n" :
- "$prefix$add_$name$(arg_$name$);\n"
- );
- }
- void GeneratePrintingCode(io::Printer* printer) override {
- GeneratePrintingCycle(Variables_, "output << \"\\\"\" << $obj$ << \"\\\"\"", printer);
- }
- };
-
- class TEnumFieldExtGenerator: public TFieldExtGenerator {
- public:
- TEnumFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["type"] = ClassName(Descriptor_->enum_type(), true);
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- "$type$ arg_$name$");
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(Variables_,
- "$prefix$set_$name$(arg_$name$);\n");
- }
-
- void GeneratePrintingCode(io::Printer* printer) override {
- printer->Print(Variables_,
- "output << $type$_Name($name$());\n");
- }
- };
-
- class TRepeatedEnumFieldExtGenerator: public TFieldExtGenerator {
- public:
- TRepeatedEnumFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["type"] = ClassName(Descriptor_->enum_type(), true);
- Variables_["repeated_field_type"] = "RepeatedField";
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- "$type$ arg_$name$");
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(Variables_,
- "$prefix$add_$name$(arg_$name$);\n");
- }
-
- void GeneratePrintingCode(io::Printer* printer) override {
- TStringStream pattern;
-
- TProtoStringType type = Variables_["type"];
- pattern << "output << " << type << "_Name(" << type << "($obj$))";
- Variables_["type"] = "int";
- GeneratePrintingCycle(Variables_, pattern.Str(), printer);
- Variables_["type"] = type;
- }
- };
-
- class TPrimitiveFieldExtGenerator: public TFieldExtGenerator {
- public:
- TPrimitiveFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["type"] = PrimitiveTypeName(Descriptor_->cpp_type());
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- "$type$ arg_$name$");
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(Variables_,
- "$prefix$set_$name$(arg_$name$);\n");
- }
-
- void GeneratePrintingCode(io::Printer* printer) override {
- Repr::ReprType fmt = Repr::none;
-
- if (Descriptor_->options().HasExtension(repr)) {
- fmt = Descriptor_->options().GetExtension(repr);
- }
-
- switch (fmt) {
- case Repr::as_bytes:
- printer->Print(Variables_, "NProtoBuf::PrintAsBytes($name$(), output);\n");
- break;
-
- case Repr::as_hex:
- printer->Print(Variables_, "NProtoBuf::PrintAsHex($name$(), output);\n");
- break;
-
- case Repr::none:
- /* TODO: proper error handling? */
- default:
- printer->Print(Variables_, "output << $name$();\n");
- break;
- }
- }
- };
-
- class TRepeatedPrimitiveFieldExtGenerator: public TFieldExtGenerator {
- public:
- TRepeatedPrimitiveFieldExtGenerator(const FieldDescriptor* field)
- : TFieldExtGenerator(field)
- {
- Variables_["type"] = PrimitiveTypeName(Descriptor_->cpp_type());
- Variables_["repeated_field_type"] = "RepeatedField";
- }
-
- void GenerateCtorArgument(io::Printer* printer) override {
- printer->Print(Variables_,
- "$type$ arg_$name$");
- }
-
- void GenerateInitializer(io::Printer* printer, const TString& prefix) override {
- Variables_["prefix"] = prefix;
- printer->Print(Variables_,
- "$prefix$add_$name$(arg_$name$);\n");
- }
-
- void GeneratePrintingCode(io::Printer* printer) override {
- GeneratePrintingCycle(Variables_, "output << $obj$", printer);
- }
- };
-
- std::unique_ptr<TFieldExtGenerator> MakeGenerator(const FieldDescriptor* field) {
- if (field->is_map()) {
- return std::make_unique<TMapFieldExtGenerator>(field);
- } else if (field->is_repeated()) {
- switch (field->cpp_type()) {
- case FieldDescriptor::CPPTYPE_MESSAGE:
- return std::make_unique<TRepeatedMessageFieldExtGenerator>(field);
- case FieldDescriptor::CPPTYPE_STRING:
- switch (field->options().ctype()) {
- default: // RepeatedStringFieldExtGenerator handles unknown ctypes.
- case FieldOptions::STRING:
- return std::make_unique<TRepeatedStringFieldExtGenerator>(field);
- }
- case FieldDescriptor::CPPTYPE_ENUM:
- return std::make_unique<TRepeatedEnumFieldExtGenerator>(field);
- default:
- return std::make_unique<TRepeatedPrimitiveFieldExtGenerator>(field);
- }
- } else {
- switch (field->cpp_type()) {
- case FieldDescriptor::CPPTYPE_MESSAGE:
- return std::make_unique<TMessageFieldExtGenerator>(field);
- case FieldDescriptor::CPPTYPE_STRING:
- switch (field->options().ctype()) {
- default: // StringFieldGenerator handles unknown ctypes.
- case FieldOptions::STRING:
- return std::make_unique<TStringFieldExtGenerator>(field);
- }
- case FieldDescriptor::CPPTYPE_ENUM:
- return std::make_unique<TEnumFieldExtGenerator>(field);
- default:
- return std::make_unique<TPrimitiveFieldExtGenerator>(field);
- }
- }
- }
-
- class TMessageExtGenerator {
- public:
- TMessageExtGenerator(const Descriptor* descriptor, OutputDirectory* outputDirectory)
- : Descriptor_(descriptor)
- , HasMessageId_(Descriptor_->options().HasExtension(message_id))
- , ClassName_(ClassName(Descriptor_, false))
- , OutputDirectory_(outputDirectory)
- , HasGeneratorWithProtobufMessageFieldPrinter_(false)
- , CanGenerateSpecialConstructor_(false)
- {
- NestedGenerators_.reserve(descriptor->nested_type_count());
- for (int i = 0; i < descriptor->nested_type_count(); i++) {
- NestedGenerators_.emplace_back(descriptor->nested_type(i), OutputDirectory_);
- }
-
- if (HasMessageId_) {
- FieldGenerators_.reserve(descriptor->field_count());
- for (int i = 0; i < descriptor->field_count(); i++) {
- FieldGenerators_.emplace_back(MakeGenerator(descriptor->field(i)));
- HasGeneratorWithProtobufMessageFieldPrinter_ |= FieldGenerators_.back()->NeedProtobufMessageFieldPrinter();
- }
- }
-
- {
- size_t intFieldCount = 0;
- size_t mapFieldCount = 0;
- size_t nonMapFieldCount = 0;
- for (int i = 0; i < Descriptor_->field_count(); ++i) {
- const FieldDescriptor* field = Descriptor_->field(i);
- if (field->is_map()) {
- ++mapFieldCount;
- } else {
- ++nonMapFieldCount;
- }
- switch (field->cpp_type()) {
- case FieldDescriptor::CPPTYPE_INT32:
- case FieldDescriptor::CPPTYPE_INT64:
- case FieldDescriptor::CPPTYPE_UINT32:
- case FieldDescriptor::CPPTYPE_UINT64:
- ++intFieldCount;
- break;
- default:
- break;
- }
- }
-
- CanGenerateSpecialConstructor_ = (
- // Certain field combinations would result in ambiguous constructor generation.
- // Do not generate special contructor for such combinations.
- (intFieldCount != nonMapFieldCount || nonMapFieldCount != 2) &&
-
- // Generate special contructor only if there is at least one non-map Field.
- nonMapFieldCount > 0
- );
- }
-
- }
-
- void GenerateClassDefinitionExtension() {
- if (Descriptor_->options().HasExtension(realm_name) || Descriptor_->options().HasExtension(message_id)) {
- GeneratePseudonim();
- }
-
- if (!HasMessageId_) {
- return;
- }
-
- CheckMessageId(Descriptor_->options().GetExtension(message_id), ClassName_);
-
- TProtoStringType fileName = HeaderFileName(Descriptor_->file());
- TProtoStringType scope = "class_scope:" + Descriptor_->full_name();
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, scope));
- io::Printer printer(output.get(), '$');
-
- printer.Print("//Yandex events extension.\n");
- GenerateHeaderImpl(&printer);
-
- for (auto& nestedGenerator: NestedGenerators_) {
- nestedGenerator.GenerateClassDefinitionExtension();
- }
- }
-
- bool GenerateClassExtension() {
- TProtoStringType fileName = SourceFileName(Descriptor_->file());
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, "namespace_scope"));
- io::Printer printer(output.get(), '$');
-
- bool hasEventExtension = GenerateSourceImpl(&printer);
-
- for (auto& nestedGenerator: NestedGenerators_) {
- hasEventExtension |= nestedGenerator.GenerateSourceImpl(&printer);
- }
-
- return hasEventExtension;
- }
-
- void GenerateRegistration(io::Printer* printer) {
- if (!HasMessageId_) {
- return;
- }
-
- TVariables vars;
- vars["classname"] = ClassName_;
-
- printer->Print(vars, "NProtoBuf::TEventFactory::Instance()->RegisterEvent($classname$::descriptor()->options().GetExtension(message_id), factory->GetPrototype($classname$::descriptor()), $classname$::Print);\n");
- }
-
- private:
- void GenerateHeaderImpl(io::Printer* printer) {
- TVariables vars;
- TProtoStringType mId(ToString(Descriptor_->options().GetExtension(message_id)));
- vars["classname"] = ClassName_;
- vars["messageid"] = mId.data();
- vars["superclass"] = SuperClassName(Descriptor_, Options{});
-
- printer->Print(vars,
- "enum {ID = $messageid$};\n\n");
-
- {
- /*
- * Unconditionally generate FromFields() factory method,
- * so it could be used in template code
- */
- printer->Print(vars, "static $classname$ FromFields(\n");
- GenerateCtorArgs(printer);
- printer->Print(");\n");
- }
-
- if (CanGenerateSpecialConstructor_) {
- printer->Print(vars, "$classname$(\n");
- GenerateCtorArgs(printer);
- printer->Print(");\n");
- }
-
- {
- printer->Print("void Print(IOutputStream& output, EFieldOutputFlags outputFlags = {}) const;\n");
- printer->Print("static void Print(const google::protobuf::Message* ev, IOutputStream& output, EFieldOutputFlags outputFlags = {});\n");
- }
- }
-
- void GeneratePseudonim() {
- TProtoStringType fileName = HeaderFileName(Descriptor_->file());
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, "namespace_scope"));
- io::Printer printer(output.get(), '$');
-
- std::vector<TProtoStringType> realm_parts;
-
- if (Descriptor_->options().HasExtension(realm_name)) {
- SplitStringUsing(Descriptor_->options().GetExtension(realm_name), ".", &realm_parts);
- }
-
- if (realm_parts.size() > 0) printer.Print("\n");
-
- for (size_t i = 0; i < realm_parts.size(); ++i) {
- printer.Print("namespace $part$ {\n",
- "part", realm_parts[i]);
- }
-
- printer.Print("typedef $fullclassname$ T$classname$;\n",
- "fullclassname", FullClassName(Descriptor_),
- "classname", ClassName_);
-
- for (size_t i = realm_parts.size(); i > 0; --i) {
- printer.Print("} // namespace $part$\n",
- "part", realm_parts[i - 1]);
- }
- }
-
- TProtoStringType FullClassName(const Descriptor* descriptor) {
- TProtoStringType result;
- std::vector<TProtoStringType> parts;
-
- SplitStringUsing(descriptor->file()->package(), ".", &parts);
- for (size_t i = 0; i < parts.size(); ++i) {
- result += "::" + parts[i];
- }
-
- result += "::" + ClassName(descriptor, false);
-
- return result;
- }
-
- bool GenerateSourceImpl(io::Printer* printer) {
- if (!HasMessageId_) {
- return false;
- }
-
- TVariables vars;
- vars["classname"] = ClassName_;
-
- {
- // Generate static $classname$::FromFields impl.
- printer->Print(vars, "$classname$ $classname$::FromFields(\n");
- GenerateCtorArgs(printer);
- printer->Print(")\n");
-
- printer->Print("{\n");
-
- printer->Indent();
- printer->Print(vars, "$classname$ result;\n");
- GenerateFieldInitializers(printer, /* prefix = */ "result.");
- printer->Print("return result;\n");
- printer->Outdent();
-
- printer->Print("}\n\n");
- }
-
- if (CanGenerateSpecialConstructor_) {
- // Generate special constructor impl.
- printer->Print(vars, "$classname$::$classname$(\n");
- GenerateCtorArgs(printer);
- printer->Print(")\n");
-
- printer->Print("{\n");
-
- printer->Indent();
- printer->Print("SharedCtor();\n");
- GenerateFieldInitializers(printer, /* prefix = */ "");
- printer->Outdent();
-
- printer->Print("}\n\n");
- }
-
- {
- // Generate $classname$::Print impl.
- const size_t fieldCount = Descriptor_->field_count();
- if (fieldCount > 0) {
- printer->Print(vars,
- "void $classname$::Print(IOutputStream& output, EFieldOutputFlags outputFlags) const {\n");
- printer->Indent();
- printer->Print(
- "TEventFieldOutput escapedOutput{output, outputFlags};\n"
- "Y_UNUSED(escapedOutput);\n");
-
- if (HasGeneratorWithProtobufMessageFieldPrinter_) {
- printer->Print(
- "TEventProtobufMessageFieldPrinter protobufMessageFieldPrinter(EProtobufMessageFieldPrintMode::DEFAULT);\n");
- }
- } else {
- printer->Print(vars,
- "void $classname$::Print(IOutputStream& output, EFieldOutputFlags) const {\n");
- printer->Indent();
- }
-
- printer->Print(vars,
- "output << \"$classname$\";\n");
-
- for (size_t i = 0; i < fieldCount; ++i) {
- printer->Print("output << \"\\t\";\n");
- FieldGenerators_[i]->GeneratePrintingCode(printer);
- }
-
- printer->Outdent();
- printer->Print("}\n\n");
- }
-
- {
- // Generate static $classname$::Print impl.
- printer->Print(vars,
- "void $classname$::Print(const google::protobuf::Message* ev, IOutputStream& output, EFieldOutputFlags outputFlags) {\n");
- printer->Indent();
- printer->Print(vars,
- "const $classname$* This(CheckedCast<const $classname$*>(ev));\n");
- printer->Print(
- "This->Print(output, outputFlags);\n");
- printer->Outdent();
- printer->Print("}\n\n");
- }
-
- return true;
- }
-
- void GenerateCtorArgs(io::Printer* printer) {
- printer->Indent();
- const size_t fieldCount = Descriptor_->field_count();
- bool isFirst = true;
- for (size_t i = 0; i < fieldCount; ++i) {
- if (Descriptor_->field(i)->is_map()) {
- continue;
- }
- const char* delimiter = isFirst ? "" : ", ";
- isFirst = false;
- printer->Print(delimiter);
- FieldGenerators_[i]->GenerateCtorArgument(printer);
- }
- printer->Outdent();
- }
-
- void GenerateFieldInitializers(io::Printer* printer, const TString& prefix) {
- for (auto& fieldGeneratorHolder: FieldGenerators_) {
- fieldGeneratorHolder->GenerateInitializer(printer, prefix);
- }
- }
-
- private:
- const Descriptor* Descriptor_;
- const bool HasMessageId_;
- TProtoStringType ClassName_;
- OutputDirectory* OutputDirectory_;
- bool HasGeneratorWithProtobufMessageFieldPrinter_;
- bool CanGenerateSpecialConstructor_;
- std::vector<std::unique_ptr<TFieldExtGenerator>> FieldGenerators_;
- std::vector<TMessageExtGenerator> NestedGenerators_;
- };
-
- class TFileExtGenerator {
- public:
- TFileExtGenerator(const FileDescriptor* file, OutputDirectory* output_directory)
- : OutputDirectory_(output_directory)
- , File_(file)
- {
- MessageGenerators_.reserve(file->message_type_count());
- for (int i = 0; i < file->message_type_count(); i++) {
- MessageGenerators_.emplace_back(file->message_type(i), OutputDirectory_);
- }
- }
-
- void GenerateHeaderExtensions() {
- TProtoStringType fileName = HeaderFileName(File_);
-
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, "includes"));
- io::Printer printer(output.get(), '$');
-
- printer.Print("#include <library/cpp/eventlog/event_field_output.h>\n");
- printer.Print("#include <library/cpp/eventlog/event_field_printer.h>\n");
-
- for (auto& messageGenerator: MessageGenerators_) {
- messageGenerator.GenerateClassDefinitionExtension();
- }
- }
-
- void GenerateSourceExtensions() {
- TProtoStringType fileName = SourceFileName(File_);
-
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, "includes"));
- io::Printer printer(output.get(), '$');
- printer.Print("#include <google/protobuf/io/printer.h>\n");
- printer.Print("#include <google/protobuf/io/zero_copy_stream_impl_lite.h>\n");
- printer.Print("#include <google/protobuf/stubs/strutil.h>\n");
- printer.Print("#include <library/cpp/eventlog/events_extension.h>\n");
- printer.Print("#include <util/generic/cast.h>\n");
- printer.Print("#include <util/stream/output.h>\n");
-
- bool hasEventExtension = false;
-
- for (auto& messageGenerator: MessageGenerators_) {
- hasEventExtension |= messageGenerator.GenerateClassExtension();
- }
-
- if (hasEventExtension) {
- GenerateEventRegistrations();
- }
- }
-
- void GenerateEventRegistrations() {
- TVariables vars;
- TProtoStringType fileId = FilenameIdentifier(File_->name());
- vars["regfunction"] = "regevent_" + fileId;
- vars["regclassname"] = "TRegister_" + fileId;
- vars["regvarname"] = "registrator_" + fileId ;
- vars["filename"] = File_->name();
-
- {
- TProtoStringType fileName = SourceFileName(File_);
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, "namespace_scope"));
- io::Printer printer(output.get(), '$');
-
- GenerateRegistrationFunction(vars, printer);
- GenerateRegistratorDefinition(vars, printer);
- }
-
- {
-
- TProtoStringType fileName = HeaderFileName(File_);
- std::unique_ptr<io::ZeroCopyOutputStream> output(
- OutputDirectory_->OpenForInsert(fileName, "namespace_scope"));
- io::Printer printer(output.get(), '$');
- GenerateRegistratorDeclaration(vars, printer);
- }
- }
-
- void GenerateRegistrationFunction(const TVariables& vars, io::Printer& printer) {
- printer.Print(vars,
- "void $regfunction$() {\n");
- printer.Indent();
-
- printer.Print("google::protobuf::MessageFactory* factory = google::protobuf::MessageFactory::generated_factory();\n\n");
- for (auto& messageGenerator: MessageGenerators_) {
- messageGenerator.GenerateRegistration(&printer);
- }
- printer.Outdent();
- printer.Print("}\n\n");
- }
-
- void GenerateRegistratorDeclaration(const TVariables& vars, io::Printer& printer) {
- printer.Print(vars, "\nclass $regclassname$ {\n");
- printer.Print("public:\n");
- printer.Indent();
- printer.Print(vars, "$regclassname$();\n");
- printer.Outdent();
- printer.Print("private:\n");
- printer.Indent();
- printer.Print("static bool Registered;\n");
- printer.Outdent();
- printer.Print(vars, "};\n");
- printer.Print(vars, "static $regclassname$ $regvarname$;\n\n");
- }
-
- void GenerateRegistratorDefinition(const TVariables& vars, io::Printer& printer) {
- printer.Print(vars, "$regclassname$::$regclassname$() {\n");
- printer.Indent();
- printer.Print("if (!Registered) {\n");
- printer.Indent();
- printer.Print(vars, "NProtoBuf::TEventFactory::Instance()->ScheduleRegistration(&$regfunction$);\n");
- printer.Print("Registered = true;\n");
- printer.Outdent();
- printer.Print("}\n");
- printer.Outdent();
- printer.Print("}\n\n");
- printer.Print(vars, "bool $regclassname$::Registered;\n\n");
- }
- private:
- OutputDirectory* OutputDirectory_;
- const FileDescriptor* File_;
- std::vector<TMessageExtGenerator> MessageGenerators_;
- };
-}
-
- bool TProtoEventExtensionGenerator::Generate(const google::protobuf::FileDescriptor* file,
- const TProtoStringType& parameter,
- google::protobuf::compiler::OutputDirectory* outputDirectory,
- TProtoStringType* error) const {
- Y_UNUSED(parameter);
- Y_UNUSED(error);
-
- NInternal::TFileExtGenerator fileGenerator(file, outputDirectory);
-
- // Generate header.
- fileGenerator.GenerateHeaderExtensions();
-
- // Generate cc file.
- fileGenerator.GenerateSourceExtensions();
-
- return true;
- }
-
-} // namespace NProtoBuf::NCompiler::NPlugins
-
-int main(int argc, char* argv[]) {
-#ifdef _MSC_VER
- // Don't print a silly message or stick a modal dialog box in my face,
- // please.
- _set_abort_behavior(0u, ~0u);
-#endif // !_MSC_VER
-
- try {
- NProtoBuf::NCompiler::NPlugins::TProtoEventExtensionGenerator generator;
- return google::protobuf::compiler::PluginMain(argc, argv, &generator);
- } catch (yexception& e) {
- Cerr << e.what() << Endl;
- } catch (...) {
- Cerr << "Unknown error in TProtoEventExtensionGenerator" << Endl;
- }
-
- return 1;
-}
diff --git a/tools/event2cpp/proto_events.h b/tools/event2cpp/proto_events.h
deleted file mode 100644
index 628b4856af..0000000000
--- a/tools/event2cpp/proto_events.h
+++ /dev/null
@@ -1,20 +0,0 @@
-#pragma once
-
-#include <google/protobuf/compiler/plugin.h>
-#include <google/protobuf/compiler/code_generator.h>
-#include <google/protobuf/stubs/common.h>
-
-namespace NProtoBuf::NCompiler::NPlugins {
-
-class TProtoEventExtensionGenerator : public google::protobuf::compiler::CodeGenerator {
- public:
- TProtoEventExtensionGenerator() {}
- ~TProtoEventExtensionGenerator() override {}
-
- bool Generate(const google::protobuf::FileDescriptor* file,
- const TProtoStringType& parameter,
- google::protobuf::compiler::OutputDirectory* output_directory,
- TProtoStringType* error) const override;
-};
-
-} // namespace NProtoBuf::NCompiler::NPlugins
diff --git a/tools/event2cpp/ya.make b/tools/event2cpp/ya.make
deleted file mode 100644
index 9a89909346..0000000000
--- a/tools/event2cpp/ya.make
+++ /dev/null
@@ -1,13 +0,0 @@
-SET(IDE_FOLDER "_Builders")
-
-IF (USE_PREBUILT_TOOLS)
- INCLUDE(${ARCADIA_ROOT}/build/prebuilt/tools/event2cpp/ya.make.prebuilt)
-ENDIF()
-
-IF (NOT PREBUILT)
- INCLUDE(${ARCADIA_ROOT}/tools/event2cpp/bin/ya.make)
-ENDIF()
-
-RECURSE(
- bin
-)
diff --git a/tools/struct2fieldcalc/parsestruct.rl b/tools/struct2fieldcalc/parsestruct.rl
deleted file mode 100644
index eec9022a40..0000000000
--- a/tools/struct2fieldcalc/parsestruct.rl
+++ /dev/null
@@ -1,669 +0,0 @@
-#include <library/cpp/getopt/small/opt2.h>
-
-#include <library/cpp/deprecated/fgood/fgood.h>
-#include <util/generic/hash.h>
-#include <util/string/split.h>
-#include <util/string/printf.h>
-
-#include <map>
-#include <set>
-#include <stdarg.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <string.h>
-#include <vector>
-
-#include <fcntl.h>
-
-// For ragel
-static char *tokstart, *tokend;
-static int act, cs;
-
-static int curly_level = 0;
-static int wanted_curly_level = -1;
-static int wanted_curly_level2 = -1; // for nested anonymous union / struct
-
-static char *dmp_name_start = 0;
-static char *dmp_name_end = 0;
-static char *dmp_from_start = 0;
-static char *dmp_from_end = 0;
-static char *dmp_to_start = 0;
-static char *dmp_to_end = 0;
-static char *dmp_mode_start = 0;
-
-// Parser state
-static TString current_struct;
-static TString current_enum;
-static TString last_expression;
-
-int line;
-int last_entry_line;
-TString current_file;
-
-// List of all emitted callbacks
-typedef std::vector<std::pair<TString, int> > callback_list;
-static callback_list getvals;
-static callback_list prints;
-
-// Enum -> vector of enumerators name map
-typedef std::vector<TString> enumerators_list;
-typedef std::map<TString, enumerators_list> enums_map;
-static enums_map enums;
-
-struct dump_items_list : public std::vector<std::pair<TString, TString> > {
- std::set<TString> wanted_enums;
- std::vector<TString> efunctions;
-};
-typedef std::map<TString, dump_items_list> structs_map;
-static structs_map structs;
-
-// Token types for parser
-enum TokenType {
- TT_IDENTIFIER,
- TT_ENUM,
- TT_STRUCT,
- TT_UNION,
- TT_CONST,
- TT_PUBLIC,
- TT_DECIMAL,
- TT_COMMA,
- TT_EQUALS,
- TT_SEMICOLON,
- TT_COLON,
- TT_OPENSQUARE,
- TT_CLOSESQUARE,
- TT_OPENPAREN,
- TT_CLOSEPAREN,
- TT_OPENCURLY,
- TT_CLOSECURLY,
- TT_PIPE,
- TT_COMPLEXTYPE,
-
- TT_OTHER,
-};
-
-// Structure for single token found by ragel
-struct Token {
- Token(int t, char *begin, int length): type(t), string(begin, length) {}
-
- bool check(int t) {
- return t == type;
- }
-
- int type;
- TString string;
-};
-
-// Stack of currently accumilated tokens
-typedef std::vector<Token> TokenStack;
-TokenStack token_stack;
-
-// Parser state
-static enum {
- DEFAULT,
- ENUM,
- STRUCT,
-} state = DEFAULT;
-
-void errexit(const char *fmt, ...) {
- fprintf(stderr, "%s:%d: ", current_file.data(), line);
-
- va_list ap;
- va_start(ap, fmt);
- vfprintf(stderr, fmt, ap);
- va_end(ap);
-
- fprintf(stderr, "\n");
-
- exit(1);
-}
-
-void emit_entry(const TString &name, bool nosave, const char *fmt, ...) {
- char buffer[1024];
-
- TString expression;
-
- va_list ap;
- va_start(ap, fmt);
- if (vsnprintf(buffer, sizeof(buffer) - 1, fmt, ap) >= (int)sizeof(buffer) - 1)
- errexit("expression buffer overflow");
- va_end(ap);
-
- expression = buffer;
-
- if (!nosave)
- last_expression = expression;
-
- last_entry_line = line;
- structs[current_struct].push_back(std::make_pair(name, expression));
-}
-
-void emit_message(const char *fmt, ...) {
- char buffer[1024];
-
- va_list ap;
- va_start(ap, fmt);
- if (vsnprintf(buffer, sizeof(buffer) - 1, fmt, ap) >= (int)sizeof(buffer) - 1)
- errexit("expression buffer overflow");
- va_end(ap);
-
- structs[current_struct].push_back(std::make_pair(TString(), buffer));
-}
-
-// Checks token stack to match specific length and token type pattern
-bool check_token_stack(int count, ...) {
- int len = token_stack.size();
- if (len < count)
- return false;
-
- va_list ap;
- va_start(ap, count);
-
- for (int i = 0; i < count; i++) {
- if (!token_stack[i+len-count].check(va_arg(ap, int))) {
- va_end(ap);
- return false;
- }
- }
-
- va_end(ap);
- return true;
-}
-
-// Test all available patterns when parsing enums
-bool check_enum_patterns() {
- int len = token_stack.size();
-
- /* enumerator: IDENTIFIER = DECIMAL */
- if (check_token_stack(3, TT_IDENTIFIER, TT_EQUALS, TT_DECIMAL)) {
- enums[current_enum].push_back(token_stack[len - 3].string);
- return true;
- }
-
- /* enumerator: IDENTIFIER = IDENTIFIER [|] IDENTIFIER [[|] IDENTIFIER ...]*/
- if (check_token_stack(4, TT_IDENTIFIER, TT_PIPE, TT_IDENTIFIER, TT_COMMA)) {
- int i = token_stack.size() - 3;
- for (; i > 0; i--)
- if (!token_stack[i].check(TT_IDENTIFIER) && !token_stack[i].check(TT_PIPE))
- break;
- if (i > 0 && token_stack[i].check(TT_EQUALS) && token_stack[i - 1].check(TT_IDENTIFIER)) {
- enums[current_enum].push_back(token_stack[i - 1].string);
- return true;
- }
- }
-
- return false;
-}
-
-// Test all available patterns when parsing structs
-bool check_struct_patterns() {
- int len = token_stack.size();
-
- /* field: IDENTIFIER [IDENTIFIER ...] IDENTIFIER */
- if (check_token_stack(3, TT_IDENTIFIER, TT_IDENTIFIER, TT_SEMICOLON)) {
- emit_entry(token_stack[len - 2].string, false, "&((%s*)0)->%s", current_struct.data(), token_stack[len - 2].string.c_str());
- return true;
- }
-
- /* bitfield: IDENTIFIER [IDENTIFIER ...] IDENTIFIER : DECIMAL ; */
- if (check_token_stack(5, TT_IDENTIFIER, TT_IDENTIFIER, TT_COLON, TT_DECIMAL, TT_SEMICOLON)) {
- const TString &type = token_stack[len - 5].string;
- const TString &name = token_stack[len - 4].string;
- structs[current_struct].efunctions.push_back(Sprintf("%s Get%sFrom%s(const %s *ptr){ return ptr->%s; }", type.data(), name.data(), current_struct.data(), current_struct.data(), name.data()));
- emit_entry(token_stack[len - 4].string, false, "dump_item((%s_ext_fn_t)Get%sFrom%s, \"%s\")", type.data(), name.data(), current_struct.data(), name.data());
- //errexit("bitfields currently unsupported");
- return true;
- }
-
- /* array field: IDENTIFIER [IDENTIFIER ...] IDENTIFIER [ DECIMAL ] ; */
- if (check_token_stack(6, TT_IDENTIFIER, TT_IDENTIFIER, TT_OPENSQUARE, TT_DECIMAL, TT_CLOSESQUARE, TT_SEMICOLON)) {
- emit_entry(token_stack[len - 5].string, false, "dump_item(&((%s*)0)->%s[0], %s)", current_struct.data(), token_stack[len - 5].string.c_str(), token_stack[len - 3].string.c_str());
- return true;
- }
-
- /* array field: char [IDENTIFIER ...] IDENTIFIER [ IDENTIFIER */
- if (check_token_stack(4, TT_IDENTIFIER, TT_IDENTIFIER, TT_OPENSQUARE, TT_IDENTIFIER) && token_stack[len - 4].string == "char") {
- emit_entry(token_stack[len - 3].string, false, "dump_item(&((%s*)0)->%s[0])", current_struct.data(), token_stack[len - 3].string.c_str());
- return true;
- }
-
- /* simple function: IDENTIFIER [IDENTIFIER ...] IDENTIFIER ( ) const { */
- if (check_token_stack(6, TT_IDENTIFIER, TT_IDENTIFIER, TT_OPENPAREN, TT_CLOSEPAREN, TT_CONST, TT_OPENCURLY)) {
- emit_entry(token_stack[len - 5].string, false, "(%s_fn_t)&%s::%s", token_stack[len-6].string.c_str(), current_struct.data(), token_stack[len - 5].string.c_str());
- return true;
- }
-
- /* special function: COMPLEXTYPE IDENTIFIER ( ) const { */
- if (check_token_stack(6, TT_COMPLEXTYPE, TT_IDENTIFIER, TT_OPENPAREN, TT_CLOSEPAREN, TT_CONST, TT_OPENCURLY)) {
- emit_entry(token_stack[len - 5].string, false, "(%s_fn_t)&%s::%s", /*token_stack[len-6].string.c_str()*/"str", current_struct.data(), token_stack[len - 5].string.c_str());
- return true;
- }
-
- /* special function: COMPLEXTYPE IDENTIFIER (TString &buf, COMPLEXTYPE IDENTIFIER) const { */
- if (check_token_stack(12, TT_COMPLEXTYPE, TT_IDENTIFIER, TT_OPENPAREN, TT_IDENTIFIER, TT_OTHER, TT_IDENTIFIER, TT_COMMA, TT_COMPLEXTYPE, TT_IDENTIFIER, TT_CLOSEPAREN, TT_CONST, TT_SEMICOLON)) {
- emit_entry(token_stack[len - 11].string, false, "(%s_fn_t)&%s::%s", /*token_stack[len-12].string.c_str()*/"strbuf_2", current_struct.data(), token_stack[len - 11].string.c_str());
- return true;
- }
-
- return false;
-}
-
-// Process single token
-void process_token(int type) {
- // Skip stuff with higher nesting level than we need
- if (wanted_curly_level > 0 && curly_level > wanted_curly_level2)
- return;
-
- // Add token to stack
- token_stack.push_back(Token(type, tokstart, tokend - tokstart));
- int len = token_stack.size();
-
- // Match end of token stack with patterns depending on mode
- switch(state) {
- case ENUM:
- if (check_enum_patterns())
- token_stack.clear();
- return;
-
- case STRUCT:
- if (check_struct_patterns())
- token_stack.clear();
- if (check_token_stack(2, TT_STRUCT, TT_OPENCURLY) || check_token_stack(2, TT_UNION, TT_OPENCURLY))
- wanted_curly_level2 = curly_level + 1;
- return;
-
- default: // state == DEFAULT
- {
- // enum encountered
- if (check_token_stack(3, TT_ENUM, TT_IDENTIFIER, TT_OPENCURLY)) {
- state = ENUM;
- current_enum = token_stack[len - 2].string;
- current_struct = "";
- wanted_curly_level = wanted_curly_level2 = curly_level + 1;
- token_stack.clear();
- return;
- }
-
- // struct encountered
- int id_pos = 0, base_pos = 0;
- if (check_token_stack(3, TT_STRUCT, TT_IDENTIFIER, TT_OPENCURLY))
- id_pos = len - 2;
- if (check_token_stack(5, TT_STRUCT, TT_IDENTIFIER, TT_COLON, TT_IDENTIFIER, TT_OPENCURLY))
- id_pos = len - 4, base_pos = len - 2;
- if (check_token_stack(6, TT_STRUCT, TT_IDENTIFIER, TT_COLON, TT_PUBLIC, TT_IDENTIFIER, TT_OPENCURLY))
- id_pos = len - 5, base_pos = len - 2;
-
- if (id_pos) {
- state = STRUCT;
- current_enum = "";
- current_struct = token_stack[id_pos].string;
-
- wanted_curly_level = wanted_curly_level2 = curly_level+1;
-
- if (structs.find(current_struct) != structs.end())
- errexit("struct %s was already met", current_struct.data());
-
- if (base_pos) {
- TString base_struct = token_stack[base_pos].string;
-
- // May duplicate dumped enums
- structs_map::iterator base = structs.find(token_stack[base_pos].string);
-
- if (base == structs.end())
- errexit("for struct %s, base struct %s was not found!\n", current_struct.data(), base_struct.data());
-
- // Copy members of base struct
- structs[current_struct].insert(structs[current_struct].end(), base->second.begin(), base->second.end());
- structs[current_struct].wanted_enums.insert(base->second.wanted_enums.begin(), base->second.wanted_enums.end());
- }
- emit_message("// members of struct %s...", current_struct.data());
- token_stack.clear();
- return;
- }
- current_enum = current_struct = "";
- }
- }
-}
-
-%%{
- machine Scanner;
- write data nofinal;
-
- # Floating literals.
- fract_const = digit* '.' digit+ | digit+ '.';
- exponent = [eE] [+\-]? digit+;
- float_suffix = [flFL];
- identifier = [a-zA-Z_] [a-zA-Z0-9_]*;
- chrtype = "const" space+ "char" space* "*";
-
- c_comment :=
- ('\n' %{line++;} | any)* :>> '*/' @{ fgoto main; };
-
- main := |*
-
- # high priority single-char tokens
- '{' {
- process_token(TT_OPENCURLY);
- curly_level++;
- };
- '}' {
- curly_level--;
- process_token(TT_CLOSECURLY);
- if (curly_level < wanted_curly_level) {
- state = DEFAULT;
- wanted_curly_level = wanted_curly_level2 = -1;
- }
- if (curly_level < wanted_curly_level2) {
- wanted_curly_level2 = curly_level;
- }
- };
- ',' { process_token(TT_COMMA); };
- '=' { process_token(TT_EQUALS); };
- ';' { process_token(TT_SEMICOLON); };
- ':' { process_token(TT_COLON); };
- '[' { process_token(TT_OPENSQUARE); };
- ']' { process_token(TT_CLOSESQUARE); };
- '(' { process_token(TT_OPENPAREN); };
- ')' { process_token(TT_CLOSEPAREN); };
- '|' { process_token(TT_PIPE); };
-
- # Single and double literals.
- 'L'? "'" ( [^'\\\n] | /\\./ )* "'" {
- process_token(TT_OTHER);
- };
- 'L'? '"' ( [^"\\\n] | /\\./ )* '"' {
- process_token(TT_OTHER);
- };
-
- # Identifiers & reserved words
- "enum" { process_token(TT_ENUM); };
- "struct" { process_token(TT_STRUCT); };
- "union" { process_token(TT_UNION); };
- "const" { process_token(TT_CONST); };
- "public" { process_token(TT_PUBLIC); };
- chrtype { process_token(TT_COMPLEXTYPE); };
- identifier { process_token(TT_IDENTIFIER); };
-
- # Floating literals.
- fract_const exponent? float_suffix? | digit+ exponent float_suffix? {
- process_token(TT_OTHER);
- };
-
- # Integer decimal. Leading part buffered by float.
- ( '0' | [1-9] [0-9]* ) [ulUL]{0,3} {
- process_token(TT_DECIMAL);
- };
-
- # Integer octal. Leading part buffered by float.
- '0' [0-9]+ [ulUL]{0,2} {
- process_token(TT_OTHER);
- };
-
- # Integer hex. Leading 0 buffered by float.
- '0' ( 'x' [0-9a-fA-F]+ [ulUL]{0,2} ) {
- process_token(TT_OTHER);
- };
-
- # Only buffer the second item, first buffered by symbol. */
- '::' { process_token(TT_OTHER); };
- '==' { process_token(TT_OTHER); };
- '!=' { process_token(TT_OTHER); };
- '&&' { process_token(TT_OTHER); };
- '||' { process_token(TT_OTHER); };
- '*=' { process_token(TT_OTHER); };
- '/=' { process_token(TT_OTHER); };
- '%=' { process_token(TT_OTHER); };
- '+=' { process_token(TT_OTHER); };
- '-=' { process_token(TT_OTHER); };
- '&=' { process_token(TT_OTHER); };
- '^=' { process_token(TT_OTHER); };
- '|=' { process_token(TT_OTHER); };
- '++' { process_token(TT_OTHER); };
- '--' { process_token(TT_OTHER); };
- '->' { process_token(TT_OTHER); };
- '->*' { process_token(TT_OTHER); };
- '.*' { process_token(TT_OTHER); };
-
- # Three char compounds, first item already buffered. */
- '...' { process_token(TT_OTHER); };
-
- # Single char symbols.
- ( punct - [_"'] ) { process_token(TT_OTHER); };
-
- # Comments with special meaning
- '//' [^\n]* '@nodmp' [^\n]* '\n' {
- if (line != last_entry_line)
- errexit("cannot find which entry @nodmp belongs to (last entry was at line %d)", last_entry_line);
-
- structs[current_struct].pop_back();
- line++;
- };
-
- '//' space*
- ( '@dmp'
- >{
- dmp_name_start = dmp_from_start = dmp_to_start =
- dmp_name_end = dmp_from_end = dmp_to_end =
- dmp_mode_start = 0;
- }
- )
- space* ( ( 'bitmask' | 'val' ) > { dmp_mode_start = p; } ) space*
- ( identifier
- >{ dmp_name_start = p; }
- %{ dmp_name_end = p-1; }
- )
- space* 'eff_name' space* 's/^'
- ( [a-zA-Z0-9_]*
- >{ dmp_from_start = p; }
- %{ dmp_from_end = p-1; }
- )
- '/'
- ( [a-zA-Z0-9_]*
- >{ dmp_to_start = p; }
- %{ dmp_to_end = p-1; }
- )
- '/' [^\n]* '\n'
- {
- if (line != last_entry_line)
- errexit("cannot find which entry @dmp belongs to (last entry was at line %d)", last_entry_line);
-
- // @dmp processor
- TString enum_name = "";
- TString prefix_del = "";
- TString prefix_add = "";
-
- // Put enum name for current item to TString
- if (dmp_name_start && dmp_name_end && dmp_name_start < dmp_name_end)
- enum_name = TString(dmp_name_start, dmp_name_end-dmp_name_start+1);
-
- // Put prefix for current item to TString
- if (dmp_from_start && dmp_from_end && dmp_from_start < dmp_from_end)
- prefix_del = TString(dmp_from_start, dmp_from_end-dmp_from_start+1);
-
- // Put replace prefix for current item to TString
- if (dmp_to_start && dmp_to_end && dmp_to_start < dmp_to_end)
- prefix_add = TString(dmp_to_start, dmp_to_end-dmp_to_start+1);
-
- // Find corresponding enum
- enums_map::iterator en = enums.find(enum_name);
- if (en == enums.end())
- errexit("bad enum name: %s", enum_name.c_str());
-
- if (prefix_del == "" && prefix_add == "")
- errexit("enum name should be changed, /^// does nothing");
-
- if (!dmp_mode_start || (*dmp_mode_start != 'b' && *dmp_mode_start != 'v'))
- errexit("unknown enum alias mode");
-
- emit_message("// enum-aliases for last field: enum=%s, prefix=%s, replace=%s",
- enum_name.c_str(), prefix_del.c_str(), prefix_add.c_str());
-
- // Make sure we have enum constants dumped
- structs[current_struct].wanted_enums.insert(enum_name);
-
- // Emit a callback for all enumerators
- for (enumerators_list::iterator i = en->second.begin(); i != en->second.end(); ++i) {
- TString enumerator_name = *i;
-
-
- if (!enumerator_name.StartsWith(prefix_del))
- errexit("bad prefix %s for enumerator %s", prefix_del.c_str(), enumerator_name.c_str());
-
- enumerator_name.replace(0, prefix_del.length(), prefix_add);
-
- emit_entry(enumerator_name, true, "dump_item(%s, %s, %s)", last_expression.data(), i->data(), *dmp_mode_start == 'b' ? "true" : "false");
- }
- emit_message("// end enum-aliases");
-
- line++;
- };
-
- # Comments and whitespace.
- '/*' { fgoto c_comment; };
- '//' [^\n]* '\n' { line++; };
-
- '\n' { line++; };
- ( any - 33..126 );
-
- *|;
-}%%
-
-int main(int argc, char **argv) {
- TVector<TString> wanted_structures;
- TVector<TString> wanted_enums_v;
- TVector<TString> tmp;
-
- Opt2 opt(argc, argv, "s:e:ESR:");
- for (auto s : opt.MArg('s', "<name1,name2,...> - structs to process")) {
- Split(s, ",", tmp);
- wanted_structures.insert(wanted_structures.end(), tmp.begin(), tmp.end());
- }
- for (auto s : opt.MArg('e', "<name1,name2,...> - enums to process")) {
- Split(s, ",", tmp);
- wanted_enums_v.insert(wanted_enums_v.end(), tmp.begin(), tmp.end());
- }
- TString srcRoot = opt.Arg('R', "<dir> - project source root (to make includes relative to)", 0);
- bool dump_all_enums = opt.Has('E', " - dump all enums (don't need -e)");
- bool add_stream_out = opt.Has('S', " - generate Out<>() for IOutputStream output (slow)");
- opt.AutoUsageErr("include.h ...");
- if (wanted_structures.empty() && wanted_enums_v.empty() && !dump_all_enums)
- warnx("nothing to dump");
-
- if (srcRoot && srcRoot.back() != '/')
- srcRoot += '/';
- // Header
- printf("// THIS IS GENERATED FILE, DO NOT EDIT!\n");
- printf("// Generated by struct2fieldcalc with command line:");
- for (int i = 0; i < argc; i++) {
- TStringBuf arg = argv[i];
- if (srcRoot && arg.StartsWith(srcRoot)) {
- arg.Skip(srcRoot.size());
- printf(" ${ARCADIA_ROOT}/%.*s", (int)arg.size(), arg.data());
- } else
- printf(" %.*s", (int)arg.size(), arg.data());
- }
- printf("\n\n");
- printf("#include <library/cpp/fieldcalc/field_calc_int.h>\n\n");
-
- for (size_t arg = 0; arg < opt.Pos.size(); arg++) {
- current_file = opt.Pos[arg];
- line = 0;
- last_entry_line = 0;
-
- %% write init;
-
- // Open input file
- TFILEPtr f(opt.Pos[arg], "rb");
- TVector<char> buf(f.length());
- if (f.read(buf.begin(), 1, buf.size()) != buf.size())
- errexit("short read in input file");
- f.close();
-
- char *p = buf.begin();
- char *pe = buf.end();
-
- TStringBuf arc_path = opt.Pos[arg];
- if (srcRoot && arc_path.StartsWith(srcRoot))
- arc_path.Skip(srcRoot.size());
-
- printf("#include <%.*s>\n", (int)arc_path.size(), arc_path.data());
-
- line = 1;
-
- // Parse file, emits functions
- %% write exec;
-
- // Done
- if (cs == Scanner_error)
- errexit("parse error");
- }
- printf("\n");
-
- if (wanted_structures.empty())
- for (structs_map::iterator s = structs.begin(); s != structs.end(); ++s)
- wanted_structures.push_back(s->first);
-
- for (TVector<TString>::iterator ws = wanted_structures.begin(); ws != wanted_structures.end(); ++ws) {
- structs_map::iterator s = structs.find(*ws);
- if (s == structs.end())
- errx(1, "Structure %s not found in input files", ws->data());
-
- if (!s->second.efunctions.empty()) {
- printf("namespace {\n");
- for (auto f : s->second.efunctions)
- printf(" %s\n", f.data());
- printf("}\n");
- }
-
- printf("template <> std::pair<const named_dump_item*, size_t> get_named_dump_items<%s>() {\n", s->first.c_str());
- printf(" static named_dump_item items[] = {\n");
-
- std::set<TString> avail_names;
-
- // dump structure fields
- for (dump_items_list::iterator i = s->second.begin(); i != s->second.end(); ++i) {
- if (!i->first)
- printf(" %s\n", i->second.data());
- else {
- if (avail_names.find(i->first) != avail_names.end()) {
- warnx("Identifier %s is already used (while processing struct %s) - NOT DUMPING!", i->first.data(), ws->data());
- continue;
- }
- avail_names.insert(i->first);
- printf(" { \"%s\", %s },\n", i->first.data(), i->second.data());
- }
- }
-
- std::set<TString> &wanted_enums = s->second.wanted_enums;
- if (dump_all_enums)
- for (enums_map::iterator en = enums.begin(); en != enums.end(); ++en)
- wanted_enums.insert(en->first);
- else
- wanted_enums.insert(wanted_enums_v.begin(), wanted_enums_v.end());
-
- // dump enum values for the structure
- for (std::set<TString>::iterator wen = wanted_enums.begin(); wen != wanted_enums.end(); ++wen) {
- enums_map::iterator en = enums.find(*wen);
- if (en == enums.end())
- errx(1, "Unexpected: enum %s not found (while processing struct %s)", wen->data(), ws->data());
-
- printf(" // members of enum %s\n", en->first.c_str());
- for (enumerators_list::iterator e = en->second.begin(); e != en->second.end(); ++e) {
- if (avail_names.find(*e) != avail_names.end())
- errx(1, "Identifier %s is already used (while processing enum %s for struct %s)", e->data(), wen->data(), ws->data());
- printf(" { \"%s\", (long)%s },\n", e->data(), e->data());
- }
- }
- printf(" };\n\n");
- printf(" return std::make_pair(items, sizeof(items)/sizeof(*items));\n");
- printf("}\n\n");
- if (add_stream_out) {
- printf("template<>"
- "void Out<%s>(IOutputStream& os, TTypeTraits<%s>::TFuncParam s) {\n"
- " TFieldCalculator<%s>().DumpAll(os, s, \" \");\n"
- "}\n\n", s->first.data(), s->first.data(), s->first.data());
- }
- }
-
- return 0;
-}
diff --git a/tools/struct2fieldcalc/ya.make b/tools/struct2fieldcalc/ya.make
deleted file mode 100644
index a75d06b306..0000000000
--- a/tools/struct2fieldcalc/ya.make
+++ /dev/null
@@ -1,16 +0,0 @@
-PROGRAM()
-
-PEERDIR(
- library/cpp/getopt/small
- library/cpp/deprecated/fgood
-)
-
-INDUCED_DEPS(h+cpp
- ${ARCADIA_ROOT}/library/cpp/fieldcalc/field_calc_int.h
-)
-
-SRCS(
- parsestruct.rl
-)
-
-END()
diff --git a/vendor/github.com/dgryski/go-rendezvous/rdv.go b/vendor/github.com/dgryski/go-rendezvous/rdv.go
deleted file mode 100644
index 7a6f8203c6..0000000000
--- a/vendor/github.com/dgryski/go-rendezvous/rdv.go
+++ /dev/null
@@ -1,79 +0,0 @@
-package rendezvous
-
-type Rendezvous struct {
- nodes map[string]int
- nstr []string
- nhash []uint64
- hash Hasher
-}
-
-type Hasher func(s string) uint64
-
-func New(nodes []string, hash Hasher) *Rendezvous {
- r := &Rendezvous{
- nodes: make(map[string]int, len(nodes)),
- nstr: make([]string, len(nodes)),
- nhash: make([]uint64, len(nodes)),
- hash: hash,
- }
-
- for i, n := range nodes {
- r.nodes[n] = i
- r.nstr[i] = n
- r.nhash[i] = hash(n)
- }
-
- return r
-}
-
-func (r *Rendezvous) Lookup(k string) string {
- // short-circuit if we're empty
- if len(r.nodes) == 0 {
- return ""
- }
-
- khash := r.hash(k)
-
- var midx int
- var mhash = xorshiftMult64(khash ^ r.nhash[0])
-
- for i, nhash := range r.nhash[1:] {
- if h := xorshiftMult64(khash ^ nhash); h > mhash {
- midx = i + 1
- mhash = h
- }
- }
-
- return r.nstr[midx]
-}
-
-func (r *Rendezvous) Add(node string) {
- r.nodes[node] = len(r.nstr)
- r.nstr = append(r.nstr, node)
- r.nhash = append(r.nhash, r.hash(node))
-}
-
-func (r *Rendezvous) Remove(node string) {
- // find index of node to remove
- nidx := r.nodes[node]
-
- // remove from the slices
- l := len(r.nstr)
- r.nstr[nidx] = r.nstr[l]
- r.nstr = r.nstr[:l]
-
- r.nhash[nidx] = r.nhash[l]
- r.nhash = r.nhash[:l]
-
- // update the map
- delete(r.nodes, node)
- moved := r.nstr[nidx]
- r.nodes[moved] = nidx
-}
-
-func xorshiftMult64(x uint64) uint64 {
- x ^= x >> 12 // a
- x ^= x << 25 // b
- x ^= x >> 27 // c
- return x * 2685821657736338717
-}
diff --git a/vendor/github.com/dgryski/go-rendezvous/ya.make b/vendor/github.com/dgryski/go-rendezvous/ya.make
deleted file mode 100644
index 7cc1e13a7d..0000000000
--- a/vendor/github.com/dgryski/go-rendezvous/ya.make
+++ /dev/null
@@ -1,11 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(MIT)
-
-SRCS(rdv.go)
-
-GO_TEST_SRCS(rdv_test.go)
-
-END()
-
-RECURSE(gotest)
diff --git a/vendor/github.com/go-redis/redis/v8/cluster.go b/vendor/github.com/go-redis/redis/v8/cluster.go
deleted file mode 100644
index a54f2f37ed..0000000000
--- a/vendor/github.com/go-redis/redis/v8/cluster.go
+++ /dev/null
@@ -1,1750 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "fmt"
- "math"
- "net"
- "runtime"
- "sort"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-var errClusterNoNodes = fmt.Errorf("redis: cluster has no nodes")
-
-// ClusterOptions are used to configure a cluster client and should be
-// passed to NewClusterClient.
-type ClusterOptions struct {
- // A seed list of host:port addresses of cluster nodes.
- Addrs []string
-
- // NewClient creates a cluster node client with provided name and options.
- NewClient func(opt *Options) *Client
-
- // The maximum number of retries before giving up. Command is retried
- // on network errors and MOVED/ASK redirects.
- // Default is 3 retries.
- MaxRedirects int
-
- // Enables read-only commands on slave nodes.
- ReadOnly bool
- // Allows routing read-only commands to the closest master or slave node.
- // It automatically enables ReadOnly.
- RouteByLatency bool
- // Allows routing read-only commands to the random master or slave node.
- // It automatically enables ReadOnly.
- RouteRandomly bool
-
- // Optional function that returns cluster slots information.
- // It is useful to manually create cluster of standalone Redis servers
- // and load-balance read/write operations between master and slaves.
- // It can use service like ZooKeeper to maintain configuration information
- // and Cluster.ReloadState to manually trigger state reloading.
- ClusterSlots func(context.Context) ([]ClusterSlot, error)
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
-
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- // PoolSize applies per cluster node and not for the whole cluster.
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-}
-
-func (opt *ClusterOptions) init() {
- if opt.MaxRedirects == -1 {
- opt.MaxRedirects = 0
- } else if opt.MaxRedirects == 0 {
- opt.MaxRedirects = 3
- }
-
- if opt.RouteByLatency || opt.RouteRandomly {
- opt.ReadOnly = true
- }
-
- if opt.PoolSize == 0 {
- opt.PoolSize = 5 * runtime.GOMAXPROCS(0)
- }
-
- switch opt.ReadTimeout {
- case -1:
- opt.ReadTimeout = 0
- case 0:
- opt.ReadTimeout = 3 * time.Second
- }
- switch opt.WriteTimeout {
- case -1:
- opt.WriteTimeout = 0
- case 0:
- opt.WriteTimeout = opt.ReadTimeout
- }
-
- if opt.MaxRetries == 0 {
- opt.MaxRetries = -1
- }
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-
- if opt.NewClient == nil {
- opt.NewClient = NewClient
- }
-}
-
-func (opt *ClusterOptions) clientOptions() *Options {
- const disableIdleCheck = -1
-
- return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: disableIdleCheck,
-
- TLSConfig: opt.TLSConfig,
- // If ClusterSlots is populated, then we probably have an artificial
- // cluster whose nodes are not in clustering mode (otherwise there isn't
- // much use for ClusterSlots config). This means we cannot execute the
- // READONLY command against that node -- setting readOnly to false in such
- // situations in the options below will prevent that from happening.
- readOnly: opt.ReadOnly && opt.ClusterSlots == nil,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type clusterNode struct {
- Client *Client
-
- latency uint32 // atomic
- generation uint32 // atomic
- failing uint32 // atomic
-}
-
-func newClusterNode(clOpt *ClusterOptions, addr string) *clusterNode {
- opt := clOpt.clientOptions()
- opt.Addr = addr
- node := clusterNode{
- Client: clOpt.NewClient(opt),
- }
-
- node.latency = math.MaxUint32
- if clOpt.RouteByLatency {
- go node.updateLatency()
- }
-
- return &node
-}
-
-func (n *clusterNode) String() string {
- return n.Client.String()
-}
-
-func (n *clusterNode) Close() error {
- return n.Client.Close()
-}
-
-func (n *clusterNode) updateLatency() {
- const numProbe = 10
- var dur uint64
-
- for i := 0; i < numProbe; i++ {
- time.Sleep(time.Duration(10+rand.Intn(10)) * time.Millisecond)
-
- start := time.Now()
- n.Client.Ping(context.TODO())
- dur += uint64(time.Since(start) / time.Microsecond)
- }
-
- latency := float64(dur) / float64(numProbe)
- atomic.StoreUint32(&n.latency, uint32(latency+0.5))
-}
-
-func (n *clusterNode) Latency() time.Duration {
- latency := atomic.LoadUint32(&n.latency)
- return time.Duration(latency) * time.Microsecond
-}
-
-func (n *clusterNode) MarkAsFailing() {
- atomic.StoreUint32(&n.failing, uint32(time.Now().Unix()))
-}
-
-func (n *clusterNode) Failing() bool {
- const timeout = 15 // 15 seconds
-
- failing := atomic.LoadUint32(&n.failing)
- if failing == 0 {
- return false
- }
- if time.Now().Unix()-int64(failing) < timeout {
- return true
- }
- atomic.StoreUint32(&n.failing, 0)
- return false
-}
-
-func (n *clusterNode) Generation() uint32 {
- return atomic.LoadUint32(&n.generation)
-}
-
-func (n *clusterNode) SetGeneration(gen uint32) {
- for {
- v := atomic.LoadUint32(&n.generation)
- if gen < v || atomic.CompareAndSwapUint32(&n.generation, v, gen) {
- break
- }
- }
-}
-
-//------------------------------------------------------------------------------
-
-type clusterNodes struct {
- opt *ClusterOptions
-
- mu sync.RWMutex
- addrs []string
- nodes map[string]*clusterNode
- activeAddrs []string
- closed bool
-
- _generation uint32 // atomic
-}
-
-func newClusterNodes(opt *ClusterOptions) *clusterNodes {
- return &clusterNodes{
- opt: opt,
-
- addrs: opt.Addrs,
- nodes: make(map[string]*clusterNode),
- }
-}
-
-func (c *clusterNodes) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil
- }
- c.closed = true
-
- var firstErr error
- for _, node := range c.nodes {
- if err := node.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
-
- c.nodes = nil
- c.activeAddrs = nil
-
- return firstErr
-}
-
-func (c *clusterNodes) Addrs() ([]string, error) {
- var addrs []string
-
- c.mu.RLock()
- closed := c.closed //nolint:ifshort
- if !closed {
- if len(c.activeAddrs) > 0 {
- addrs = c.activeAddrs
- } else {
- addrs = c.addrs
- }
- }
- c.mu.RUnlock()
-
- if closed {
- return nil, pool.ErrClosed
- }
- if len(addrs) == 0 {
- return nil, errClusterNoNodes
- }
- return addrs, nil
-}
-
-func (c *clusterNodes) NextGeneration() uint32 {
- return atomic.AddUint32(&c._generation, 1)
-}
-
-// GC removes unused nodes.
-func (c *clusterNodes) GC(generation uint32) {
- //nolint:prealloc
- var collected []*clusterNode
-
- c.mu.Lock()
-
- c.activeAddrs = c.activeAddrs[:0]
- for addr, node := range c.nodes {
- if node.Generation() >= generation {
- c.activeAddrs = append(c.activeAddrs, addr)
- if c.opt.RouteByLatency {
- go node.updateLatency()
- }
- continue
- }
-
- delete(c.nodes, addr)
- collected = append(collected, node)
- }
-
- c.mu.Unlock()
-
- for _, node := range collected {
- _ = node.Client.Close()
- }
-}
-
-func (c *clusterNodes) GetOrCreate(addr string) (*clusterNode, error) {
- node, err := c.get(addr)
- if err != nil {
- return nil, err
- }
- if node != nil {
- return node, nil
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- node, ok := c.nodes[addr]
- if ok {
- return node, nil
- }
-
- node = newClusterNode(c.opt, addr)
-
- c.addrs = appendIfNotExists(c.addrs, addr)
- c.nodes[addr] = node
-
- return node, nil
-}
-
-func (c *clusterNodes) get(addr string) (*clusterNode, error) {
- var node *clusterNode
- var err error
- c.mu.RLock()
- if c.closed {
- err = pool.ErrClosed
- } else {
- node = c.nodes[addr]
- }
- c.mu.RUnlock()
- return node, err
-}
-
-func (c *clusterNodes) All() ([]*clusterNode, error) {
- c.mu.RLock()
- defer c.mu.RUnlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- cp := make([]*clusterNode, 0, len(c.nodes))
- for _, node := range c.nodes {
- cp = append(cp, node)
- }
- return cp, nil
-}
-
-func (c *clusterNodes) Random() (*clusterNode, error) {
- addrs, err := c.Addrs()
- if err != nil {
- return nil, err
- }
-
- n := rand.Intn(len(addrs))
- return c.GetOrCreate(addrs[n])
-}
-
-//------------------------------------------------------------------------------
-
-type clusterSlot struct {
- start, end int
- nodes []*clusterNode
-}
-
-type clusterSlotSlice []*clusterSlot
-
-func (p clusterSlotSlice) Len() int {
- return len(p)
-}
-
-func (p clusterSlotSlice) Less(i, j int) bool {
- return p[i].start < p[j].start
-}
-
-func (p clusterSlotSlice) Swap(i, j int) {
- p[i], p[j] = p[j], p[i]
-}
-
-type clusterState struct {
- nodes *clusterNodes
- Masters []*clusterNode
- Slaves []*clusterNode
-
- slots []*clusterSlot
-
- generation uint32
- createdAt time.Time
-}
-
-func newClusterState(
- nodes *clusterNodes, slots []ClusterSlot, origin string,
-) (*clusterState, error) {
- c := clusterState{
- nodes: nodes,
-
- slots: make([]*clusterSlot, 0, len(slots)),
-
- generation: nodes.NextGeneration(),
- createdAt: time.Now(),
- }
-
- originHost, _, _ := net.SplitHostPort(origin)
- isLoopbackOrigin := isLoopback(originHost)
-
- for _, slot := range slots {
- var nodes []*clusterNode
- for i, slotNode := range slot.Nodes {
- addr := slotNode.Addr
- if !isLoopbackOrigin {
- addr = replaceLoopbackHost(addr, originHost)
- }
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return nil, err
- }
-
- node.SetGeneration(c.generation)
- nodes = append(nodes, node)
-
- if i == 0 {
- c.Masters = appendUniqueNode(c.Masters, node)
- } else {
- c.Slaves = appendUniqueNode(c.Slaves, node)
- }
- }
-
- c.slots = append(c.slots, &clusterSlot{
- start: slot.Start,
- end: slot.End,
- nodes: nodes,
- })
- }
-
- sort.Sort(clusterSlotSlice(c.slots))
-
- time.AfterFunc(time.Minute, func() {
- nodes.GC(c.generation)
- })
-
- return &c, nil
-}
-
-func replaceLoopbackHost(nodeAddr, originHost string) string {
- nodeHost, nodePort, err := net.SplitHostPort(nodeAddr)
- if err != nil {
- return nodeAddr
- }
-
- nodeIP := net.ParseIP(nodeHost)
- if nodeIP == nil {
- return nodeAddr
- }
-
- if !nodeIP.IsLoopback() {
- return nodeAddr
- }
-
- // Use origin host which is not loopback and node port.
- return net.JoinHostPort(originHost, nodePort)
-}
-
-func isLoopback(host string) bool {
- ip := net.ParseIP(host)
- if ip == nil {
- return true
- }
- return ip.IsLoopback()
-}
-
-func (c *clusterState) slotMasterNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) > 0 {
- return nodes[0], nil
- }
- return c.nodes.Random()
-}
-
-func (c *clusterState) slotSlaveNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- switch len(nodes) {
- case 0:
- return c.nodes.Random()
- case 1:
- return nodes[0], nil
- case 2:
- if slave := nodes[1]; !slave.Failing() {
- return slave, nil
- }
- return nodes[0], nil
- default:
- var slave *clusterNode
- for i := 0; i < 10; i++ {
- n := rand.Intn(len(nodes)-1) + 1
- slave = nodes[n]
- if !slave.Failing() {
- return slave, nil
- }
- }
-
- // All slaves are loading - use master.
- return nodes[0], nil
- }
-}
-
-func (c *clusterState) slotClosestNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) == 0 {
- return c.nodes.Random()
- }
-
- var node *clusterNode
- for _, n := range nodes {
- if n.Failing() {
- continue
- }
- if node == nil || n.Latency() < node.Latency() {
- node = n
- }
- }
- if node != nil {
- return node, nil
- }
-
- // If all nodes are failing - return random node
- return c.nodes.Random()
-}
-
-func (c *clusterState) slotRandomNode(slot int) (*clusterNode, error) {
- nodes := c.slotNodes(slot)
- if len(nodes) == 0 {
- return c.nodes.Random()
- }
- if len(nodes) == 1 {
- return nodes[0], nil
- }
- randomNodes := rand.Perm(len(nodes))
- for _, idx := range randomNodes {
- if node := nodes[idx]; !node.Failing() {
- return node, nil
- }
- }
- return nodes[randomNodes[0]], nil
-}
-
-func (c *clusterState) slotNodes(slot int) []*clusterNode {
- i := sort.Search(len(c.slots), func(i int) bool {
- return c.slots[i].end >= slot
- })
- if i >= len(c.slots) {
- return nil
- }
- x := c.slots[i]
- if slot >= x.start && slot <= x.end {
- return x.nodes
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type clusterStateHolder struct {
- load func(ctx context.Context) (*clusterState, error)
-
- state atomic.Value
- reloading uint32 // atomic
-}
-
-func newClusterStateHolder(fn func(ctx context.Context) (*clusterState, error)) *clusterStateHolder {
- return &clusterStateHolder{
- load: fn,
- }
-}
-
-func (c *clusterStateHolder) Reload(ctx context.Context) (*clusterState, error) {
- state, err := c.load(ctx)
- if err != nil {
- return nil, err
- }
- c.state.Store(state)
- return state, nil
-}
-
-func (c *clusterStateHolder) LazyReload() {
- if !atomic.CompareAndSwapUint32(&c.reloading, 0, 1) {
- return
- }
- go func() {
- defer atomic.StoreUint32(&c.reloading, 0)
-
- _, err := c.Reload(context.Background())
- if err != nil {
- return
- }
- time.Sleep(200 * time.Millisecond)
- }()
-}
-
-func (c *clusterStateHolder) Get(ctx context.Context) (*clusterState, error) {
- v := c.state.Load()
- if v == nil {
- return c.Reload(ctx)
- }
-
- state := v.(*clusterState)
- if time.Since(state.createdAt) > 10*time.Second {
- c.LazyReload()
- }
- return state, nil
-}
-
-func (c *clusterStateHolder) ReloadOrGet(ctx context.Context) (*clusterState, error) {
- state, err := c.Reload(ctx)
- if err == nil {
- return state, nil
- }
- return c.Get(ctx)
-}
-
-//------------------------------------------------------------------------------
-
-type clusterClient struct {
- opt *ClusterOptions
- nodes *clusterNodes
- state *clusterStateHolder //nolint:structcheck
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
-// ClusterClient is a Redis Cluster client representing a pool of zero
-// or more underlying connections. It's safe for concurrent use by
-// multiple goroutines.
-type ClusterClient struct {
- *clusterClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClusterClient returns a Redis Cluster client as described in
-// http://redis.io/topics/cluster-spec.
-func NewClusterClient(opt *ClusterOptions) *ClusterClient {
- opt.init()
-
- c := &ClusterClient{
- clusterClient: &clusterClient{
- opt: opt,
- nodes: newClusterNodes(opt),
- },
- ctx: context.Background(),
- }
- c.state = newClusterStateHolder(c.loadState)
- c.cmdsInfoCache = newCmdsInfoCache(c.cmdsInfo)
- c.cmdable = c.Process
-
- if opt.IdleCheckFrequency > 0 {
- go c.reaper(opt.IdleCheckFrequency)
- }
-
- return c
-}
-
-func (c *ClusterClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *ClusterClient) WithContext(ctx context.Context) *ClusterClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *ClusterClient) Options() *ClusterOptions {
- return c.opt
-}
-
-// ReloadState reloads cluster state. If available it calls ClusterSlots func
-// to get cluster slots information.
-func (c *ClusterClient) ReloadState(ctx context.Context) {
- c.state.LazyReload()
-}
-
-// Close closes the cluster client, releasing any open resources.
-//
-// It is rare to Close a ClusterClient, as the ClusterClient is meant
-// to be long-lived and shared between many goroutines.
-func (c *ClusterClient) Close() error {
- return c.nodes.Close()
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *ClusterClient) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *ClusterClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
-}
-
-func (c *ClusterClient) process(ctx context.Context, cmd Cmder) error {
- cmdInfo := c.cmdInfo(cmd.Name())
- slot := c.cmdSlot(cmd)
-
- var node *clusterNode
- var ask bool
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- if node == nil {
- var err error
- node, err = c.cmdNode(ctx, cmdInfo, slot)
- if err != nil {
- return err
- }
- }
-
- if ask {
- pipe := node.Client.Pipeline()
- _ = pipe.Process(ctx, NewCmd(ctx, "asking"))
- _ = pipe.Process(ctx, cmd)
- _, lastErr = pipe.Exec(ctx)
- _ = pipe.Close()
- ask = false
- } else {
- lastErr = node.Client.Process(ctx, cmd)
- }
-
- // If there is no error - we are done.
- if lastErr == nil {
- return nil
- }
- if isReadOnly := isReadOnlyError(lastErr); isReadOnly || lastErr == pool.ErrClosed {
- if isReadOnly {
- c.state.LazyReload()
- }
- node = nil
- continue
- }
-
- // If slave is loading - pick another node.
- if c.opt.ReadOnly && isLoadingError(lastErr) {
- node.MarkAsFailing()
- node = nil
- continue
- }
-
- var moved bool
- var addr string
- moved, ask, addr = isMovedError(lastErr)
- if moved || ask {
- c.state.LazyReload()
-
- var err error
- node, err = c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
- continue
- }
-
- if shouldRetry(lastErr, cmd.readTimeout() == nil) {
- // First retry the same node.
- if attempt == 0 {
- continue
- }
-
- // Second try another node.
- node.MarkAsFailing()
- node = nil
- continue
- }
-
- return lastErr
- }
- return lastErr
-}
-
-// ForEachMaster concurrently calls the fn on each master node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachMaster(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- state, err := c.state.ReloadOrGet(ctx)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
-
- for _, master := range state.Masters {
- wg.Add(1)
- go func(node *clusterNode) {
- defer wg.Done()
- err := fn(ctx, node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(master)
- }
-
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// ForEachSlave concurrently calls the fn on each slave node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachSlave(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- state, err := c.state.ReloadOrGet(ctx)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
-
- for _, slave := range state.Slaves {
- wg.Add(1)
- go func(node *clusterNode) {
- defer wg.Done()
- err := fn(ctx, node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(slave)
- }
-
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// ForEachShard concurrently calls the fn on each known node in the cluster.
-// It returns the first error if any.
-func (c *ClusterClient) ForEachShard(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- state, err := c.state.ReloadOrGet(ctx)
- if err != nil {
- return err
- }
-
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
-
- worker := func(node *clusterNode) {
- defer wg.Done()
- err := fn(ctx, node.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }
-
- for _, node := range state.Masters {
- wg.Add(1)
- go worker(node)
- }
- for _, node := range state.Slaves {
- wg.Add(1)
- go worker(node)
- }
-
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-// PoolStats returns accumulated connection pool stats.
-func (c *ClusterClient) PoolStats() *PoolStats {
- var acc PoolStats
-
- state, _ := c.state.Get(context.TODO())
- if state == nil {
- return &acc
- }
-
- for _, node := range state.Masters {
- s := node.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
-
- acc.TotalConns += s.TotalConns
- acc.IdleConns += s.IdleConns
- acc.StaleConns += s.StaleConns
- }
-
- for _, node := range state.Slaves {
- s := node.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
-
- acc.TotalConns += s.TotalConns
- acc.IdleConns += s.IdleConns
- acc.StaleConns += s.StaleConns
- }
-
- return &acc
-}
-
-func (c *ClusterClient) loadState(ctx context.Context) (*clusterState, error) {
- if c.opt.ClusterSlots != nil {
- slots, err := c.opt.ClusterSlots(ctx)
- if err != nil {
- return nil, err
- }
- return newClusterState(c.nodes, slots, "")
- }
-
- addrs, err := c.nodes.Addrs()
- if err != nil {
- return nil, err
- }
-
- var firstErr error
-
- for _, idx := range rand.Perm(len(addrs)) {
- addr := addrs[idx]
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
-
- slots, err := node.Client.ClusterSlots(ctx).Result()
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
-
- return newClusterState(c.nodes, slots, node.Client.opt.Addr)
- }
-
- /*
- * No node is connectable. It's possible that all nodes' IP has changed.
- * Clear activeAddrs to let client be able to re-connect using the initial
- * setting of the addresses (e.g. [redis-cluster-0:6379, redis-cluster-1:6379]),
- * which might have chance to resolve domain name and get updated IP address.
- */
- c.nodes.mu.Lock()
- c.nodes.activeAddrs = nil
- c.nodes.mu.Unlock()
-
- return nil, firstErr
-}
-
-// reaper closes idle connections to the cluster.
-func (c *ClusterClient) reaper(idleCheckFrequency time.Duration) {
- ticker := time.NewTicker(idleCheckFrequency)
- defer ticker.Stop()
-
- for range ticker.C {
- nodes, err := c.nodes.All()
- if err != nil {
- break
- }
-
- for _, node := range nodes {
- _, err := node.Client.connPool.(*pool.ConnPool).ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(c.Context(), "ReapStaleConns failed: %s", err)
- }
- }
- }
-}
-
-func (c *ClusterClient) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *ClusterClient) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *ClusterClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c._processPipeline)
-}
-
-func (c *ClusterClient) _processPipeline(ctx context.Context, cmds []Cmder) error {
- cmdsMap := newCmdsMap()
- err := c.mapCmdsByNode(ctx, cmdsMap, cmds)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- }
-
- failedCmds := newCmdsMap()
- var wg sync.WaitGroup
-
- for node, cmds := range cmdsMap.m {
- wg.Add(1)
- go func(node *clusterNode, cmds []Cmder) {
- defer wg.Done()
-
- err := c._processPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
- }(node, cmds)
- }
-
- wg.Wait()
- if len(failedCmds.m) == 0 {
- break
- }
- cmdsMap = failedCmds
- }
-
- return cmdsFirstErr(cmds)
-}
-
-func (c *ClusterClient) mapCmdsByNode(ctx context.Context, cmdsMap *cmdsMap, cmds []Cmder) error {
- state, err := c.state.Get(ctx)
- if err != nil {
- return err
- }
-
- if c.opt.ReadOnly && c.cmdsAreReadOnly(cmds) {
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- node, err := c.slotReadOnlyNode(state, slot)
- if err != nil {
- return err
- }
- cmdsMap.Add(node, cmd)
- }
- return nil
- }
-
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- node, err := state.slotMasterNode(slot)
- if err != nil {
- return err
- }
- cmdsMap.Add(node, cmd)
- }
- return nil
-}
-
-func (c *ClusterClient) cmdsAreReadOnly(cmds []Cmder) bool {
- for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(cmd.Name())
- if cmdInfo == nil || !cmdInfo.ReadOnly {
- return false
- }
- }
- return true
-}
-
-func (c *ClusterClient) _processPipelineNode(
- ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
-) error {
- return node.Client.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
-
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return c.pipelineReadCmds(ctx, node, rd, cmds, failedCmds)
- })
- })
- })
-}
-
-func (c *ClusterClient) pipelineReadCmds(
- ctx context.Context,
- node *clusterNode,
- rd *proto.Reader,
- cmds []Cmder,
- failedCmds *cmdsMap,
-) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
-
- if err == nil {
- continue
- }
-
- if c.checkMovedErr(ctx, cmd, err, failedCmds) {
- continue
- }
-
- if c.opt.ReadOnly && isLoadingError(err) {
- node.MarkAsFailing()
- return err
- }
- if isRedisError(err) {
- continue
- }
- return err
- }
- return nil
-}
-
-func (c *ClusterClient) checkMovedErr(
- ctx context.Context, cmd Cmder, err error, failedCmds *cmdsMap,
-) bool {
- moved, ask, addr := isMovedError(err)
- if !moved && !ask {
- return false
- }
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return false
- }
-
- if moved {
- c.state.LazyReload()
- failedCmds.Add(node, cmd)
- return true
- }
-
- if ask {
- failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
- return true
- }
-
- panic("not reached")
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *ClusterClient) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *ClusterClient) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-func (c *ClusterClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c._processTxPipeline)
-}
-
-func (c *ClusterClient) _processTxPipeline(ctx context.Context, cmds []Cmder) error {
- // Trim multi .. exec.
- cmds = cmds[1 : len(cmds)-1]
-
- state, err := c.state.Get(ctx)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- cmdsMap := c.mapCmdsBySlot(cmds)
- for slot, cmds := range cmdsMap {
- node, err := state.slotMasterNode(slot)
- if err != nil {
- setCmdsErr(cmds, err)
- continue
- }
-
- cmdsMap := map[*clusterNode][]Cmder{node: cmds}
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- }
-
- failedCmds := newCmdsMap()
- var wg sync.WaitGroup
-
- for node, cmds := range cmdsMap {
- wg.Add(1)
- go func(node *clusterNode, cmds []Cmder) {
- defer wg.Done()
-
- err := c._processTxPipelineNode(ctx, node, cmds, failedCmds)
- if err == nil {
- return
- }
-
- if attempt < c.opt.MaxRedirects {
- if err := c.mapCmdsByNode(ctx, failedCmds, cmds); err != nil {
- setCmdsErr(cmds, err)
- }
- } else {
- setCmdsErr(cmds, err)
- }
- }(node, cmds)
- }
-
- wg.Wait()
- if len(failedCmds.m) == 0 {
- break
- }
- cmdsMap = failedCmds.m
- }
- }
-
- return cmdsFirstErr(cmds)
-}
-
-func (c *ClusterClient) mapCmdsBySlot(cmds []Cmder) map[int][]Cmder {
- cmdsMap := make(map[int][]Cmder)
- for _, cmd := range cmds {
- slot := c.cmdSlot(cmd)
- cmdsMap[slot] = append(cmdsMap[slot], cmd)
- }
- return cmdsMap
-}
-
-func (c *ClusterClient) _processTxPipelineNode(
- ctx context.Context, node *clusterNode, cmds []Cmder, failedCmds *cmdsMap,
-) error {
- return node.Client.hooks.processTxPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return node.Client.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return err
- }
-
- return cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := c.txPipelineReadQueued(ctx, rd, statusCmd, cmds, failedCmds)
- if err != nil {
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- return c.cmdsMoved(ctx, cmds, moved, ask, addr, failedCmds)
- }
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- })
- })
-}
-
-func (c *ClusterClient) txPipelineReadQueued(
- ctx context.Context,
- rd *proto.Reader,
- statusCmd *StatusCmd,
- cmds []Cmder,
- failedCmds *cmdsMap,
-) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for _, cmd := range cmds {
- err := statusCmd.readReply(rd)
- if err == nil || c.checkMovedErr(ctx, cmd, err, failedCmds) || isRedisError(err) {
- continue
- }
- return err
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- return fmt.Errorf("redis: expected '*', but got line %q", line)
- }
-
- return nil
-}
-
-func (c *ClusterClient) cmdsMoved(
- ctx context.Context, cmds []Cmder,
- moved, ask bool,
- addr string,
- failedCmds *cmdsMap,
-) error {
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
-
- if moved {
- c.state.LazyReload()
- for _, cmd := range cmds {
- failedCmds.Add(node, cmd)
- }
- return nil
- }
-
- if ask {
- for _, cmd := range cmds {
- failedCmds.Add(node, NewCmd(ctx, "asking"), cmd)
- }
- return nil
- }
-
- return nil
-}
-
-func (c *ClusterClient) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- if len(keys) == 0 {
- return fmt.Errorf("redis: Watch requires at least one key")
- }
-
- slot := hashtag.Slot(keys[0])
- for _, key := range keys[1:] {
- if hashtag.Slot(key) != slot {
- err := fmt.Errorf("redis: Watch requires all keys to be in the same slot")
- return err
- }
- }
-
- node, err := c.slotMasterNode(ctx, slot)
- if err != nil {
- return err
- }
-
- for attempt := 0; attempt <= c.opt.MaxRedirects; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- err = node.Client.Watch(ctx, fn, keys...)
- if err == nil {
- break
- }
-
- moved, ask, addr := isMovedError(err)
- if moved || ask {
- node, err = c.nodes.GetOrCreate(addr)
- if err != nil {
- return err
- }
- continue
- }
-
- if isReadOnly := isReadOnlyError(err); isReadOnly || err == pool.ErrClosed {
- if isReadOnly {
- c.state.LazyReload()
- }
- node, err = c.slotMasterNode(ctx, slot)
- if err != nil {
- return err
- }
- continue
- }
-
- if shouldRetry(err, true) {
- continue
- }
-
- return err
- }
-
- return err
-}
-
-func (c *ClusterClient) pubSub() *PubSub {
- var node *clusterNode
- pubsub := &PubSub{
- opt: c.opt.clientOptions(),
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- if node != nil {
- panic("node != nil")
- }
-
- var err error
- if len(channels) > 0 {
- slot := hashtag.Slot(channels[0])
- node, err = c.slotMasterNode(ctx, slot)
- } else {
- node, err = c.nodes.Random()
- }
- if err != nil {
- return nil, err
- }
-
- cn, err := node.Client.newConn(context.TODO())
- if err != nil {
- node = nil
-
- return nil, err
- }
-
- return cn, nil
- },
- closeConn: func(cn *pool.Conn) error {
- err := node.Client.connPool.CloseConn(cn)
- node = nil
- return err
- },
- }
- pubsub.init()
-
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-func (c *ClusterClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *ClusterClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-func (c *ClusterClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *ClusterClient) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
- // Try 3 random nodes.
- const nodeLimit = 3
-
- addrs, err := c.nodes.Addrs()
- if err != nil {
- return nil, err
- }
-
- var firstErr error
-
- perm := rand.Perm(len(addrs))
- if len(perm) > nodeLimit {
- perm = perm[:nodeLimit]
- }
-
- for _, idx := range perm {
- addr := addrs[idx]
-
- node, err := c.nodes.GetOrCreate(addr)
- if err != nil {
- if firstErr == nil {
- firstErr = err
- }
- continue
- }
-
- info, err := node.Client.Command(ctx).Result()
- if err == nil {
- return info, nil
- }
- if firstErr == nil {
- firstErr = err
- }
- }
-
- if firstErr == nil {
- panic("not reached")
- }
- return nil, firstErr
-}
-
-func (c *ClusterClient) cmdInfo(name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(c.ctx)
- if err != nil {
- return nil
- }
-
- info := cmdsInfo[name]
- if info == nil {
- internal.Logger.Printf(c.Context(), "info for cmd=%s not found", name)
- }
- return info
-}
-
-func (c *ClusterClient) cmdSlot(cmd Cmder) int {
- args := cmd.Args()
- if args[0] == "cluster" && args[1] == "getkeysinslot" {
- return args[2].(int)
- }
-
- cmdInfo := c.cmdInfo(cmd.Name())
- return cmdSlot(cmd, cmdFirstKeyPos(cmd, cmdInfo))
-}
-
-func cmdSlot(cmd Cmder, pos int) int {
- if pos == 0 {
- return hashtag.RandomSlot()
- }
- firstKey := cmd.stringArg(pos)
- return hashtag.Slot(firstKey)
-}
-
-func (c *ClusterClient) cmdNode(
- ctx context.Context,
- cmdInfo *CommandInfo,
- slot int,
-) (*clusterNode, error) {
- state, err := c.state.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if c.opt.ReadOnly && cmdInfo != nil && cmdInfo.ReadOnly {
- return c.slotReadOnlyNode(state, slot)
- }
- return state.slotMasterNode(slot)
-}
-
-func (c *clusterClient) slotReadOnlyNode(state *clusterState, slot int) (*clusterNode, error) {
- if c.opt.RouteByLatency {
- return state.slotClosestNode(slot)
- }
- if c.opt.RouteRandomly {
- return state.slotRandomNode(slot)
- }
- return state.slotSlaveNode(slot)
-}
-
-func (c *ClusterClient) slotMasterNode(ctx context.Context, slot int) (*clusterNode, error) {
- state, err := c.state.Get(ctx)
- if err != nil {
- return nil, err
- }
- return state.slotMasterNode(slot)
-}
-
-// SlaveForKey gets a client for a replica node to run any command on it.
-// This is especially useful if we want to run a particular lua script which has
-// only read only commands on the replica.
-// This is because other redis commands generally have a flag that points that
-// they are read only and automatically run on the replica nodes
-// if ClusterOptions.ReadOnly flag is set to true.
-func (c *ClusterClient) SlaveForKey(ctx context.Context, key string) (*Client, error) {
- state, err := c.state.Get(ctx)
- if err != nil {
- return nil, err
- }
- slot := hashtag.Slot(key)
- node, err := c.slotReadOnlyNode(state, slot)
- if err != nil {
- return nil, err
- }
- return node.Client, err
-}
-
-// MasterForKey return a client to the master node for a particular key.
-func (c *ClusterClient) MasterForKey(ctx context.Context, key string) (*Client, error) {
- slot := hashtag.Slot(key)
- node, err := c.slotMasterNode(ctx, slot)
- if err != nil {
- return nil, err
- }
- return node.Client, err
-}
-
-func appendUniqueNode(nodes []*clusterNode, node *clusterNode) []*clusterNode {
- for _, n := range nodes {
- if n == node {
- return nodes
- }
- }
- return append(nodes, node)
-}
-
-func appendIfNotExists(ss []string, es ...string) []string {
-loop:
- for _, e := range es {
- for _, s := range ss {
- if s == e {
- continue loop
- }
- }
- ss = append(ss, e)
- }
- return ss
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsMap struct {
- mu sync.Mutex
- m map[*clusterNode][]Cmder
-}
-
-func newCmdsMap() *cmdsMap {
- return &cmdsMap{
- m: make(map[*clusterNode][]Cmder),
- }
-}
-
-func (m *cmdsMap) Add(node *clusterNode, cmds ...Cmder) {
- m.mu.Lock()
- m.m[node] = append(m.m[node], cmds...)
- m.mu.Unlock()
-}
diff --git a/vendor/github.com/go-redis/redis/v8/cluster_commands.go b/vendor/github.com/go-redis/redis/v8/cluster_commands.go
deleted file mode 100644
index 085bce83d5..0000000000
--- a/vendor/github.com/go-redis/redis/v8/cluster_commands.go
+++ /dev/null
@@ -1,109 +0,0 @@
-package redis
-
-import (
- "context"
- "sync"
- "sync/atomic"
-)
-
-func (c *ClusterClient) DBSize(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "dbsize")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- var size int64
- err := c.ForEachMaster(ctx, func(ctx context.Context, master *Client) error {
- n, err := master.DBSize(ctx).Result()
- if err != nil {
- return err
- }
- atomic.AddInt64(&size, n)
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- } else {
- cmd.val = size
- }
- return nil
- })
- return cmd
-}
-
-func (c *ClusterClient) ScriptLoad(ctx context.Context, script string) *StringCmd {
- cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
- err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
- val, err := shard.ScriptLoad(ctx, script).Result()
- if err != nil {
- return err
- }
-
- mu.Lock()
- if cmd.Val() == "" {
- cmd.val = val
- }
- mu.Unlock()
-
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- }
- return nil
- })
- return cmd
-}
-
-func (c *ClusterClient) ScriptFlush(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
- return shard.ScriptFlush(ctx).Err()
- })
- if err != nil {
- cmd.SetErr(err)
- }
- return nil
- })
- return cmd
-}
-
-func (c *ClusterClient) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(ctx, args...)
-
- result := make([]bool, len(hashes))
- for i := range result {
- result[i] = true
- }
-
- _ = c.hooks.process(ctx, cmd, func(ctx context.Context, _ Cmder) error {
- mu := &sync.Mutex{}
- err := c.ForEachShard(ctx, func(ctx context.Context, shard *Client) error {
- val, err := shard.ScriptExists(ctx, hashes...).Result()
- if err != nil {
- return err
- }
-
- mu.Lock()
- for i, v := range val {
- result[i] = result[i] && v
- }
- mu.Unlock()
-
- return nil
- })
- if err != nil {
- cmd.SetErr(err)
- } else {
- cmd.val = result
- }
- return nil
- })
- return cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v8/command.go b/vendor/github.com/go-redis/redis/v8/command.go
deleted file mode 100644
index 4bb12a85be..0000000000
--- a/vendor/github.com/go-redis/redis/v8/command.go
+++ /dev/null
@@ -1,3478 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "net"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hscan"
- "github.com/go-redis/redis/v8/internal/proto"
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type Cmder interface {
- Name() string
- FullName() string
- Args() []interface{}
- String() string
- stringArg(int) string
- firstKeyPos() int8
- SetFirstKeyPos(int8)
-
- readTimeout() *time.Duration
- readReply(rd *proto.Reader) error
-
- SetErr(error)
- Err() error
-}
-
-func setCmdsErr(cmds []Cmder, e error) {
- for _, cmd := range cmds {
- if cmd.Err() == nil {
- cmd.SetErr(e)
- }
- }
-}
-
-func cmdsFirstErr(cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := cmd.Err(); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmds(wr *proto.Writer, cmds []Cmder) error {
- for _, cmd := range cmds {
- if err := writeCmd(wr, cmd); err != nil {
- return err
- }
- }
- return nil
-}
-
-func writeCmd(wr *proto.Writer, cmd Cmder) error {
- return wr.WriteArgs(cmd.Args())
-}
-
-func cmdFirstKeyPos(cmd Cmder, info *CommandInfo) int {
- if pos := cmd.firstKeyPos(); pos != 0 {
- return int(pos)
- }
-
- switch cmd.Name() {
- case "eval", "evalsha":
- if cmd.stringArg(2) != "0" {
- return 3
- }
-
- return 0
- case "publish":
- return 1
- case "memory":
- // https://github.com/redis/redis/issues/7493
- if cmd.stringArg(1) == "usage" {
- return 2
- }
- }
-
- if info != nil {
- return int(info.FirstKeyPos)
- }
- return 0
-}
-
-func cmdString(cmd Cmder, val interface{}) string {
- b := make([]byte, 0, 64)
-
- for i, arg := range cmd.Args() {
- if i > 0 {
- b = append(b, ' ')
- }
- b = internal.AppendArg(b, arg)
- }
-
- if err := cmd.Err(); err != nil {
- b = append(b, ": "...)
- b = append(b, err.Error()...)
- } else if val != nil {
- b = append(b, ": "...)
- b = internal.AppendArg(b, val)
- }
-
- return internal.String(b)
-}
-
-//------------------------------------------------------------------------------
-
-type baseCmd struct {
- ctx context.Context
- args []interface{}
- err error
- keyPos int8
-
- _readTimeout *time.Duration
-}
-
-var _ Cmder = (*Cmd)(nil)
-
-func (cmd *baseCmd) Name() string {
- if len(cmd.args) == 0 {
- return ""
- }
- // Cmd name must be lower cased.
- return internal.ToLower(cmd.stringArg(0))
-}
-
-func (cmd *baseCmd) FullName() string {
- switch name := cmd.Name(); name {
- case "cluster", "command":
- if len(cmd.args) == 1 {
- return name
- }
- if s2, ok := cmd.args[1].(string); ok {
- return name + " " + s2
- }
- return name
- default:
- return name
- }
-}
-
-func (cmd *baseCmd) Args() []interface{} {
- return cmd.args
-}
-
-func (cmd *baseCmd) stringArg(pos int) string {
- if pos < 0 || pos >= len(cmd.args) {
- return ""
- }
- arg := cmd.args[pos]
- switch v := arg.(type) {
- case string:
- return v
- default:
- // TODO: consider using appendArg
- return fmt.Sprint(v)
- }
-}
-
-func (cmd *baseCmd) firstKeyPos() int8 {
- return cmd.keyPos
-}
-
-func (cmd *baseCmd) SetFirstKeyPos(keyPos int8) {
- cmd.keyPos = keyPos
-}
-
-func (cmd *baseCmd) SetErr(e error) {
- cmd.err = e
-}
-
-func (cmd *baseCmd) Err() error {
- return cmd.err
-}
-
-func (cmd *baseCmd) readTimeout() *time.Duration {
- return cmd._readTimeout
-}
-
-func (cmd *baseCmd) setReadTimeout(d time.Duration) {
- cmd._readTimeout = &d
-}
-
-//------------------------------------------------------------------------------
-
-type Cmd struct {
- baseCmd
-
- val interface{}
-}
-
-func NewCmd(ctx context.Context, args ...interface{}) *Cmd {
- return &Cmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *Cmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *Cmd) SetVal(val interface{}) {
- cmd.val = val
-}
-
-func (cmd *Cmd) Val() interface{} {
- return cmd.val
-}
-
-func (cmd *Cmd) Result() (interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *Cmd) Text() (string, error) {
- if cmd.err != nil {
- return "", cmd.err
- }
- return toString(cmd.val)
-}
-
-func toString(val interface{}) (string, error) {
- switch val := val.(type) {
- case string:
- return val, nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for String", val)
- return "", err
- }
-}
-
-func (cmd *Cmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- switch val := cmd.val.(type) {
- case int64:
- return int(val), nil
- case string:
- return strconv.Atoi(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toInt64(cmd.val)
-}
-
-func toInt64(val interface{}) (int64, error) {
- switch val := val.(type) {
- case int64:
- return val, nil
- case string:
- return strconv.ParseInt(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Int64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toUint64(cmd.val)
-}
-
-func toUint64(val interface{}) (uint64, error) {
- switch val := val.(type) {
- case int64:
- return uint64(val), nil
- case string:
- return strconv.ParseUint(val, 10, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Uint64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat32(cmd.val)
-}
-
-func toFloat32(val interface{}) (float32, error) {
- switch val := val.(type) {
- case int64:
- return float32(val), nil
- case string:
- f, err := strconv.ParseFloat(val, 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float32", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return toFloat64(cmd.val)
-}
-
-func toFloat64(val interface{}) (float64, error) {
- switch val := val.(type) {
- case int64:
- return float64(val), nil
- case string:
- return strconv.ParseFloat(val, 64)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Float64", val)
- return 0, err
- }
-}
-
-func (cmd *Cmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return toBool(cmd.val)
-}
-
-func toBool(val interface{}) (bool, error) {
- switch val := val.(type) {
- case int64:
- return val != 0, nil
- case string:
- return strconv.ParseBool(val)
- default:
- err := fmt.Errorf("redis: unexpected type=%T for Bool", val)
- return false, err
- }
-}
-
-func (cmd *Cmd) Slice() ([]interface{}, error) {
- if cmd.err != nil {
- return nil, cmd.err
- }
- switch val := cmd.val.(type) {
- case []interface{}:
- return val, nil
- default:
- return nil, fmt.Errorf("redis: unexpected type=%T for Slice", val)
- }
-}
-
-func (cmd *Cmd) StringSlice() ([]string, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- ss := make([]string, len(slice))
- for i, iface := range slice {
- val, err := toString(iface)
- if err != nil {
- return nil, err
- }
- ss[i] = val
- }
- return ss, nil
-}
-
-func (cmd *Cmd) Int64Slice() ([]int64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]int64, len(slice))
- for i, iface := range slice {
- val, err := toInt64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Uint64Slice() ([]uint64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- nums := make([]uint64, len(slice))
- for i, iface := range slice {
- val, err := toUint64(iface)
- if err != nil {
- return nil, err
- }
- nums[i] = val
- }
- return nums, nil
-}
-
-func (cmd *Cmd) Float32Slice() ([]float32, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float32, len(slice))
- for i, iface := range slice {
- val, err := toFloat32(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) Float64Slice() ([]float64, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- floats := make([]float64, len(slice))
- for i, iface := range slice {
- val, err := toFloat64(iface)
- if err != nil {
- return nil, err
- }
- floats[i] = val
- }
- return floats, nil
-}
-
-func (cmd *Cmd) BoolSlice() ([]bool, error) {
- slice, err := cmd.Slice()
- if err != nil {
- return nil, err
- }
-
- bools := make([]bool, len(slice))
- for i, iface := range slice {
- val, err := toBool(iface)
- if err != nil {
- return nil, err
- }
- bools[i] = val
- }
- return bools, nil
-}
-
-func (cmd *Cmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadReply(sliceParser)
- return err
-}
-
-// sliceParser implements proto.MultiBulkParse.
-func sliceParser(rd *proto.Reader, n int64) (interface{}, error) {
- vals := make([]interface{}, n)
- for i := 0; i < len(vals); i++ {
- v, err := rd.ReadReply(sliceParser)
- if err != nil {
- if err == Nil {
- vals[i] = nil
- continue
- }
- if err, ok := err.(proto.RedisError); ok {
- vals[i] = err
- continue
- }
- return nil, err
- }
- vals[i] = v
- }
- return vals, nil
-}
-
-//------------------------------------------------------------------------------
-
-type SliceCmd struct {
- baseCmd
-
- val []interface{}
-}
-
-var _ Cmder = (*SliceCmd)(nil)
-
-func NewSliceCmd(ctx context.Context, args ...interface{}) *SliceCmd {
- return &SliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SliceCmd) SetVal(val []interface{}) {
- cmd.val = val
-}
-
-func (cmd *SliceCmd) Val() []interface{} {
- return cmd.val
-}
-
-func (cmd *SliceCmd) Result() ([]interface{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *SliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *SliceCmd) Scan(dst interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- // Pass the list of keys and values.
- // Skip the first two args for: HMGET key
- var args []interface{}
- if cmd.args[0] == "hmget" {
- args = cmd.args[2:]
- } else {
- // Otherwise, it's: MGET field field ...
- args = cmd.args[1:]
- }
-
- return hscan.Scan(dst, args, cmd.val)
-}
-
-func (cmd *SliceCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(sliceParser)
- if err != nil {
- return err
- }
- cmd.val = v.([]interface{})
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type StatusCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StatusCmd)(nil)
-
-func NewStatusCmd(ctx context.Context, args ...interface{}) *StatusCmd {
- return &StatusCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StatusCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StatusCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StatusCmd) Result() (string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StatusCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StatusCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntCmd struct {
- baseCmd
-
- val int64
-}
-
-var _ Cmder = (*IntCmd)(nil)
-
-func NewIntCmd(ctx context.Context, args ...interface{}) *IntCmd {
- return &IntCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntCmd) SetVal(val int64) {
- cmd.val = val
-}
-
-func (cmd *IntCmd) Val() int64 {
- return cmd.val
-}
-
-func (cmd *IntCmd) Result() (int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntCmd) Uint64() (uint64, error) {
- return uint64(cmd.val), cmd.err
-}
-
-func (cmd *IntCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadIntReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type IntSliceCmd struct {
- baseCmd
-
- val []int64
-}
-
-var _ Cmder = (*IntSliceCmd)(nil)
-
-func NewIntSliceCmd(ctx context.Context, args ...interface{}) *IntSliceCmd {
- return &IntSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *IntSliceCmd) SetVal(val []int64) {
- cmd.val = val
-}
-
-func (cmd *IntSliceCmd) Val() []int64 {
- return cmd.val
-}
-
-func (cmd *IntSliceCmd) Result() ([]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *IntSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *IntSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]int64, n)
- for i := 0; i < len(cmd.val); i++ {
- num, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = num
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type DurationCmd struct {
- baseCmd
-
- val time.Duration
- precision time.Duration
-}
-
-var _ Cmder = (*DurationCmd)(nil)
-
-func NewDurationCmd(ctx context.Context, precision time.Duration, args ...interface{}) *DurationCmd {
- return &DurationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- precision: precision,
- }
-}
-
-func (cmd *DurationCmd) SetVal(val time.Duration) {
- cmd.val = val
-}
-
-func (cmd *DurationCmd) Val() time.Duration {
- return cmd.val
-}
-
-func (cmd *DurationCmd) Result() (time.Duration, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *DurationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *DurationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadIntReply()
- if err != nil {
- return err
- }
- switch n {
- // -2 if the key does not exist
- // -1 if the key exists but has no associated expire
- case -2, -1:
- cmd.val = time.Duration(n)
- default:
- cmd.val = time.Duration(n) * cmd.precision
- }
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type TimeCmd struct {
- baseCmd
-
- val time.Time
-}
-
-var _ Cmder = (*TimeCmd)(nil)
-
-func NewTimeCmd(ctx context.Context, args ...interface{}) *TimeCmd {
- return &TimeCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *TimeCmd) SetVal(val time.Time) {
- cmd.val = val
-}
-
-func (cmd *TimeCmd) Val() time.Time {
- return cmd.val
-}
-
-func (cmd *TimeCmd) Result() (time.Time, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *TimeCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *TimeCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d elements, expected 2", n)
- }
-
- sec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- microsec, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- cmd.val = time.Unix(sec, microsec*1000)
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolCmd struct {
- baseCmd
-
- val bool
-}
-
-var _ Cmder = (*BoolCmd)(nil)
-
-func NewBoolCmd(ctx context.Context, args ...interface{}) *BoolCmd {
- return &BoolCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolCmd) SetVal(val bool) {
- cmd.val = val
-}
-
-func (cmd *BoolCmd) Val() bool {
- return cmd.val
-}
-
-func (cmd *BoolCmd) Result() (bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(nil)
- // `SET key value NX` returns nil when key already exists. But
- // `SETNX key value` returns bool (0/1). So convert nil to bool.
- if err == Nil {
- cmd.val = false
- return nil
- }
- if err != nil {
- return err
- }
- switch v := v.(type) {
- case int64:
- cmd.val = v == 1
- return nil
- case string:
- cmd.val = v == "OK"
- return nil
- default:
- return fmt.Errorf("got %T, wanted int64 or string", v)
- }
-}
-
-//------------------------------------------------------------------------------
-
-type StringCmd struct {
- baseCmd
-
- val string
-}
-
-var _ Cmder = (*StringCmd)(nil)
-
-func NewStringCmd(ctx context.Context, args ...interface{}) *StringCmd {
- return &StringCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringCmd) SetVal(val string) {
- cmd.val = val
-}
-
-func (cmd *StringCmd) Val() string {
- return cmd.val
-}
-
-func (cmd *StringCmd) Result() (string, error) {
- return cmd.Val(), cmd.err
-}
-
-func (cmd *StringCmd) Bytes() ([]byte, error) {
- return util.StringToBytes(cmd.val), cmd.err
-}
-
-func (cmd *StringCmd) Bool() (bool, error) {
- if cmd.err != nil {
- return false, cmd.err
- }
- return strconv.ParseBool(cmd.val)
-}
-
-func (cmd *StringCmd) Int() (int, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.Atoi(cmd.Val())
-}
-
-func (cmd *StringCmd) Int64() (int64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseInt(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Uint64() (uint64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseUint(cmd.Val(), 10, 64)
-}
-
-func (cmd *StringCmd) Float32() (float32, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- f, err := strconv.ParseFloat(cmd.Val(), 32)
- if err != nil {
- return 0, err
- }
- return float32(f), nil
-}
-
-func (cmd *StringCmd) Float64() (float64, error) {
- if cmd.err != nil {
- return 0, cmd.err
- }
- return strconv.ParseFloat(cmd.Val(), 64)
-}
-
-func (cmd *StringCmd) Time() (time.Time, error) {
- if cmd.err != nil {
- return time.Time{}, cmd.err
- }
- return time.Parse(time.RFC3339Nano, cmd.Val())
-}
-
-func (cmd *StringCmd) Scan(val interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
- return proto.Scan([]byte(cmd.val), val)
-}
-
-func (cmd *StringCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadString()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatCmd struct {
- baseCmd
-
- val float64
-}
-
-var _ Cmder = (*FloatCmd)(nil)
-
-func NewFloatCmd(ctx context.Context, args ...interface{}) *FloatCmd {
- return &FloatCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatCmd) SetVal(val float64) {
- cmd.val = val
-}
-
-func (cmd *FloatCmd) Val() float64 {
- return cmd.val
-}
-
-func (cmd *FloatCmd) Result() (float64, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *FloatCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatCmd) readReply(rd *proto.Reader) (err error) {
- cmd.val, err = rd.ReadFloatReply()
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type FloatSliceCmd struct {
- baseCmd
-
- val []float64
-}
-
-var _ Cmder = (*FloatSliceCmd)(nil)
-
-func NewFloatSliceCmd(ctx context.Context, args ...interface{}) *FloatSliceCmd {
- return &FloatSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *FloatSliceCmd) SetVal(val []float64) {
- cmd.val = val
-}
-
-func (cmd *FloatSliceCmd) Val() []float64 {
- return cmd.val
-}
-
-func (cmd *FloatSliceCmd) Result() ([]float64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *FloatSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *FloatSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]float64, n)
- for i := 0; i < len(cmd.val); i++ {
- switch num, err := rd.ReadFloatReply(); {
- case err == Nil:
- cmd.val[i] = 0
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = num
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringSliceCmd struct {
- baseCmd
-
- val []string
-}
-
-var _ Cmder = (*StringSliceCmd)(nil)
-
-func NewStringSliceCmd(ctx context.Context, args ...interface{}) *StringSliceCmd {
- return &StringSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringSliceCmd) SetVal(val []string) {
- cmd.val = val
-}
-
-func (cmd *StringSliceCmd) Val() []string {
- return cmd.val
-}
-
-func (cmd *StringSliceCmd) Result() ([]string, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *StringSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringSliceCmd) ScanSlice(container interface{}) error {
- return proto.ScanSlice(cmd.Val(), container)
-}
-
-func (cmd *StringSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]string, n)
- for i := 0; i < len(cmd.val); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.val[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.val[i] = s
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type BoolSliceCmd struct {
- baseCmd
-
- val []bool
-}
-
-var _ Cmder = (*BoolSliceCmd)(nil)
-
-func NewBoolSliceCmd(ctx context.Context, args ...interface{}) *BoolSliceCmd {
- return &BoolSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *BoolSliceCmd) SetVal(val []bool) {
- cmd.val = val
-}
-
-func (cmd *BoolSliceCmd) Val() []bool {
- return cmd.val
-}
-
-func (cmd *BoolSliceCmd) Result() ([]bool, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *BoolSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *BoolSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]bool, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.val[i] = n == 1
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStringMapCmd struct {
- baseCmd
-
- val map[string]string
-}
-
-var _ Cmder = (*StringStringMapCmd)(nil)
-
-func NewStringStringMapCmd(ctx context.Context, args ...interface{}) *StringStringMapCmd {
- return &StringStringMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStringMapCmd) SetVal(val map[string]string) {
- cmd.val = val
-}
-
-func (cmd *StringStringMapCmd) Val() map[string]string {
- return cmd.val
-}
-
-func (cmd *StringStringMapCmd) Result() (map[string]string, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStringMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-// Scan scans the results from the map into a destination struct. The map keys
-// are matched in the Redis struct fields by the `redis:"field"` tag.
-func (cmd *StringStringMapCmd) Scan(dest interface{}) error {
- if cmd.err != nil {
- return cmd.err
- }
-
- strct, err := hscan.Struct(dest)
- if err != nil {
- return err
- }
-
- for k, v := range cmd.val {
- if err := strct.Scan(k, v); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (cmd *StringStringMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]string, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = value
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringIntMapCmd struct {
- baseCmd
-
- val map[string]int64
-}
-
-var _ Cmder = (*StringIntMapCmd)(nil)
-
-func NewStringIntMapCmd(ctx context.Context, args ...interface{}) *StringIntMapCmd {
- return &StringIntMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringIntMapCmd) SetVal(val map[string]int64) {
- cmd.val = val
-}
-
-func (cmd *StringIntMapCmd) Val() map[string]int64 {
- return cmd.val
-}
-
-func (cmd *StringIntMapCmd) Result() (map[string]int64, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringIntMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringIntMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]int64, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- n, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[key] = n
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type StringStructMapCmd struct {
- baseCmd
-
- val map[string]struct{}
-}
-
-var _ Cmder = (*StringStructMapCmd)(nil)
-
-func NewStringStructMapCmd(ctx context.Context, args ...interface{}) *StringStructMapCmd {
- return &StringStructMapCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *StringStructMapCmd) SetVal(val map[string]struct{}) {
- cmd.val = val
-}
-
-func (cmd *StringStructMapCmd) Val() map[string]struct{} {
- return cmd.val
-}
-
-func (cmd *StringStructMapCmd) Result() (map[string]struct{}, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *StringStructMapCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *StringStructMapCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]struct{}, n)
- for i := int64(0); i < n; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- cmd.val[key] = struct{}{}
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XMessage struct {
- ID string
- Values map[string]interface{}
-}
-
-type XMessageSliceCmd struct {
- baseCmd
-
- val []XMessage
-}
-
-var _ Cmder = (*XMessageSliceCmd)(nil)
-
-func NewXMessageSliceCmd(ctx context.Context, args ...interface{}) *XMessageSliceCmd {
- return &XMessageSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XMessageSliceCmd) SetVal(val []XMessage) {
- cmd.val = val
-}
-
-func (cmd *XMessageSliceCmd) Val() []XMessage {
- return cmd.val
-}
-
-func (cmd *XMessageSliceCmd) Result() ([]XMessage, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XMessageSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XMessageSliceCmd) readReply(rd *proto.Reader) error {
- var err error
- cmd.val, err = readXMessageSlice(rd)
- return err
-}
-
-func readXMessageSlice(rd *proto.Reader) ([]XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- msgs := make([]XMessage, n)
- for i := 0; i < n; i++ {
- var err error
- msgs[i], err = readXMessage(rd)
- if err != nil {
- return nil, err
- }
- }
- return msgs, nil
-}
-
-func readXMessage(rd *proto.Reader) (XMessage, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return XMessage{}, err
- }
- if n != 2 {
- return XMessage{}, fmt.Errorf("got %d, wanted 2", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return XMessage{}, err
- }
-
- var values map[string]interface{}
-
- v, err := rd.ReadArrayReply(stringInterfaceMapParser)
- if err != nil {
- if err != proto.Nil {
- return XMessage{}, err
- }
- } else {
- values = v.(map[string]interface{})
- }
-
- return XMessage{
- ID: id,
- Values: values,
- }, nil
-}
-
-// stringInterfaceMapParser implements proto.MultiBulkParse.
-func stringInterfaceMapParser(rd *proto.Reader, n int64) (interface{}, error) {
- m := make(map[string]interface{}, n/2)
- for i := int64(0); i < n; i += 2 {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- value, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- m[key] = value
- }
- return m, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XStream struct {
- Stream string
- Messages []XMessage
-}
-
-type XStreamSliceCmd struct {
- baseCmd
-
- val []XStream
-}
-
-var _ Cmder = (*XStreamSliceCmd)(nil)
-
-func NewXStreamSliceCmd(ctx context.Context, args ...interface{}) *XStreamSliceCmd {
- return &XStreamSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XStreamSliceCmd) SetVal(val []XStream) {
- cmd.val = val
-}
-
-func (cmd *XStreamSliceCmd) Val() []XStream {
- return cmd.val
-}
-
-func (cmd *XStreamSliceCmd) Result() ([]XStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XStreamSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XStreamSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XStream, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- stream, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- msgs, err := readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = XStream{
- Stream: stream,
- Messages: msgs,
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPending struct {
- Count int64
- Lower string
- Higher string
- Consumers map[string]int64
-}
-
-type XPendingCmd struct {
- baseCmd
- val *XPending
-}
-
-var _ Cmder = (*XPendingCmd)(nil)
-
-func NewXPendingCmd(ctx context.Context, args ...interface{}) *XPendingCmd {
- return &XPendingCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingCmd) SetVal(val *XPending) {
- cmd.val = val
-}
-
-func (cmd *XPendingCmd) Val() *XPending {
- return cmd.val
-}
-
-func (cmd *XPendingCmd) Result() (*XPending, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- count, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- lower, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- higher, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = &XPending{
- Count: count,
- Lower: lower,
- Higher: higher,
- }
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- for i := int64(0); i < n; i++ {
- _, err = rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
-
- consumerName, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumerPending, err := rd.ReadInt()
- if err != nil {
- return nil, err
- }
-
- if cmd.val.Consumers == nil {
- cmd.val.Consumers = make(map[string]int64)
- }
- cmd.val.Consumers[consumerName] = consumerPending
-
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- if err != nil && err != Nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XPendingExt struct {
- ID string
- Consumer string
- Idle time.Duration
- RetryCount int64
-}
-
-type XPendingExtCmd struct {
- baseCmd
- val []XPendingExt
-}
-
-var _ Cmder = (*XPendingExtCmd)(nil)
-
-func NewXPendingExtCmd(ctx context.Context, args ...interface{}) *XPendingExtCmd {
- return &XPendingExtCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XPendingExtCmd) SetVal(val []XPendingExt) {
- cmd.val = val
-}
-
-func (cmd *XPendingExtCmd) Val() []XPendingExt {
- return cmd.val
-}
-
-func (cmd *XPendingExtCmd) Result() ([]XPendingExt, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XPendingExtCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XPendingExtCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]XPendingExt, 0, n)
- for i := int64(0); i < n; i++ {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 4 {
- return nil, fmt.Errorf("got %d, wanted 4", n)
- }
-
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- consumer, err := rd.ReadString()
- if err != nil && err != Nil {
- return nil, err
- }
-
- idle, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- retryCount, err := rd.ReadIntReply()
- if err != nil && err != Nil {
- return nil, err
- }
-
- cmd.val = append(cmd.val, XPendingExt{
- ID: id,
- Consumer: consumer,
- Idle: time.Duration(idle) * time.Millisecond,
- RetryCount: retryCount,
- })
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimCmd struct {
- baseCmd
-
- start string
- val []XMessage
-}
-
-var _ Cmder = (*XAutoClaimCmd)(nil)
-
-func NewXAutoClaimCmd(ctx context.Context, args ...interface{}) *XAutoClaimCmd {
- return &XAutoClaimCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimCmd) SetVal(val []XMessage, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimCmd) Val() (messages []XMessage, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimCmd) Result() (messages []XMessage, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val, err = readXMessageSlice(rd)
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XAutoClaimJustIDCmd struct {
- baseCmd
-
- start string
- val []string
-}
-
-var _ Cmder = (*XAutoClaimJustIDCmd)(nil)
-
-func NewXAutoClaimJustIDCmd(ctx context.Context, args ...interface{}) *XAutoClaimJustIDCmd {
- return &XAutoClaimJustIDCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XAutoClaimJustIDCmd) SetVal(val []string, start string) {
- cmd.val = val
- cmd.start = start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Val() (ids []string, start string) {
- return cmd.val, cmd.start
-}
-
-func (cmd *XAutoClaimJustIDCmd) Result() (ids []string, start string, err error) {
- return cmd.val, cmd.start, cmd.err
-}
-
-func (cmd *XAutoClaimJustIDCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XAutoClaimJustIDCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 2 {
- return nil, fmt.Errorf("got %d, wanted 2", n)
- }
- var err error
-
- cmd.start, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- cmd.val = make([]string, nn)
- for i := 0; i < nn; i++ {
- cmd.val[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoConsumersCmd struct {
- baseCmd
- val []XInfoConsumer
-}
-
-type XInfoConsumer struct {
- Name string
- Pending int64
- Idle int64
-}
-
-var _ Cmder = (*XInfoConsumersCmd)(nil)
-
-func NewXInfoConsumersCmd(ctx context.Context, stream string, group string) *XInfoConsumersCmd {
- return &XInfoConsumersCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "consumers", stream, group},
- },
- }
-}
-
-func (cmd *XInfoConsumersCmd) SetVal(val []XInfoConsumer) {
- cmd.val = val
-}
-
-func (cmd *XInfoConsumersCmd) Val() []XInfoConsumer {
- return cmd.val
-}
-
-func (cmd *XInfoConsumersCmd) Result() ([]XInfoConsumer, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoConsumersCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoConsumersCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoConsumer, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXConsumerInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXConsumerInfo(rd *proto.Reader) (XInfoConsumer, error) {
- var consumer XInfoConsumer
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return consumer, err
- }
- if n != 6 {
- return consumer, fmt.Errorf("redis: got %d elements in XINFO CONSUMERS reply, wanted 6", n)
- }
-
- for i := 0; i < 3; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return consumer, err
- }
-
- switch key {
- case "name":
- consumer.Name = val
- case "pending":
- consumer.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- case "idle":
- consumer.Idle, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return consumer, err
- }
- default:
- return consumer, fmt.Errorf("redis: unexpected content %s in XINFO CONSUMERS reply", key)
- }
- }
-
- return consumer, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoGroupsCmd struct {
- baseCmd
- val []XInfoGroup
-}
-
-type XInfoGroup struct {
- Name string
- Consumers int64
- Pending int64
- LastDeliveredID string
-}
-
-var _ Cmder = (*XInfoGroupsCmd)(nil)
-
-func NewXInfoGroupsCmd(ctx context.Context, stream string) *XInfoGroupsCmd {
- return &XInfoGroupsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "groups", stream},
- },
- }
-}
-
-func (cmd *XInfoGroupsCmd) SetVal(val []XInfoGroup) {
- cmd.val = val
-}
-
-func (cmd *XInfoGroupsCmd) Val() []XInfoGroup {
- return cmd.val
-}
-
-func (cmd *XInfoGroupsCmd) Result() ([]XInfoGroup, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoGroupsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoGroupsCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]XInfoGroup, n)
-
- for i := 0; i < n; i++ {
- cmd.val[i], err = readXGroupInfo(rd)
- if err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func readXGroupInfo(rd *proto.Reader) (XInfoGroup, error) {
- var group XInfoGroup
-
- n, err := rd.ReadArrayLen()
- if err != nil {
- return group, err
- }
- if n != 8 {
- return group, fmt.Errorf("redis: got %d elements in XINFO GROUPS reply, wanted 8", n)
- }
-
- for i := 0; i < 4; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- val, err := rd.ReadString()
- if err != nil {
- return group, err
- }
-
- switch key {
- case "name":
- group.Name = val
- case "consumers":
- group.Consumers, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "pending":
- group.Pending, err = strconv.ParseInt(val, 0, 64)
- if err != nil {
- return group, err
- }
- case "last-delivered-id":
- group.LastDeliveredID = val
- default:
- return group, fmt.Errorf("redis: unexpected content %s in XINFO GROUPS reply", key)
- }
- }
-
- return group, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamCmd struct {
- baseCmd
- val *XInfoStream
-}
-
-type XInfoStream struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- Groups int64
- LastGeneratedID string
- FirstEntry XMessage
- LastEntry XMessage
-}
-
-var _ Cmder = (*XInfoStreamCmd)(nil)
-
-func NewXInfoStreamCmd(ctx context.Context, stream string) *XInfoStreamCmd {
- return &XInfoStreamCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: []interface{}{"xinfo", "stream", stream},
- },
- }
-}
-
-func (cmd *XInfoStreamCmd) SetVal(val *XInfoStream) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamCmd) Val() *XInfoStream {
- return cmd.val
-}
-
-func (cmd *XInfoStreamCmd) Result() (*XInfoStream, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadReply(xStreamInfoParser)
- if err != nil {
- return err
- }
- cmd.val = v.(*XInfoStream)
- return nil
-}
-
-func xStreamInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 14 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 14", n)
- }
- var info XInfoStream
- for i := 0; i < 7; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- switch key {
- case "length":
- info.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- info.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- info.RadixTreeNodes, err = rd.ReadIntReply()
- case "groups":
- info.Groups, err = rd.ReadIntReply()
- case "last-generated-id":
- info.LastGeneratedID, err = rd.ReadString()
- case "first-entry":
- info.FirstEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- case "last-entry":
- info.LastEntry, err = readXMessage(rd)
- if err == Nil {
- err = nil
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return nil, err
- }
- }
- return &info, nil
-}
-
-//------------------------------------------------------------------------------
-
-type XInfoStreamFullCmd struct {
- baseCmd
- val *XInfoStreamFull
-}
-
-type XInfoStreamFull struct {
- Length int64
- RadixTreeKeys int64
- RadixTreeNodes int64
- LastGeneratedID string
- Entries []XMessage
- Groups []XInfoStreamGroup
-}
-
-type XInfoStreamGroup struct {
- Name string
- LastDeliveredID string
- PelCount int64
- Pending []XInfoStreamGroupPending
- Consumers []XInfoStreamConsumer
-}
-
-type XInfoStreamGroupPending struct {
- ID string
- Consumer string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-type XInfoStreamConsumer struct {
- Name string
- SeenTime time.Time
- PelCount int64
- Pending []XInfoStreamConsumerPending
-}
-
-type XInfoStreamConsumerPending struct {
- ID string
- DeliveryTime time.Time
- DeliveryCount int64
-}
-
-var _ Cmder = (*XInfoStreamFullCmd)(nil)
-
-func NewXInfoStreamFullCmd(ctx context.Context, args ...interface{}) *XInfoStreamFullCmd {
- return &XInfoStreamFullCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *XInfoStreamFullCmd) SetVal(val *XInfoStreamFull) {
- cmd.val = val
-}
-
-func (cmd *XInfoStreamFullCmd) Val() *XInfoStreamFull {
- return cmd.val
-}
-
-func (cmd *XInfoStreamFullCmd) Result() (*XInfoStreamFull, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *XInfoStreamFullCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *XInfoStreamFullCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if n != 12 {
- return fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 12", n)
- }
-
- cmd.val = &XInfoStreamFull{}
-
- for i := 0; i < 6; i++ {
- key, err := rd.ReadString()
- if err != nil {
- return err
- }
-
- switch key {
- case "length":
- cmd.val.Length, err = rd.ReadIntReply()
- case "radix-tree-keys":
- cmd.val.RadixTreeKeys, err = rd.ReadIntReply()
- case "radix-tree-nodes":
- cmd.val.RadixTreeNodes, err = rd.ReadIntReply()
- case "last-generated-id":
- cmd.val.LastGeneratedID, err = rd.ReadString()
- case "entries":
- cmd.val.Entries, err = readXMessageSlice(rd)
- case "groups":
- cmd.val.Groups, err = readStreamGroups(rd)
- default:
- return fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
- if err != nil {
- return err
- }
- }
- return nil
-}
-
-func readStreamGroups(rd *proto.Reader) ([]XInfoStreamGroup, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- groups := make([]XInfoStreamGroup, 0, n)
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 10 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 10", nn)
- }
-
- group := XInfoStreamGroup{}
-
- for f := 0; f < 5; f++ {
- key, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch key {
- case "name":
- group.Name, err = rd.ReadString()
- case "last-delivered-id":
- group.LastDeliveredID, err = rd.ReadString()
- case "pel-count":
- group.PelCount, err = rd.ReadIntReply()
- case "pending":
- group.Pending, err = readXInfoStreamGroupPending(rd)
- case "consumers":
- group.Consumers, err = readXInfoStreamConsumers(rd)
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", key)
- }
-
- if err != nil {
- return nil, err
- }
- }
-
- groups = append(groups, group)
- }
-
- return groups, nil
-}
-
-func readXInfoStreamGroupPending(rd *proto.Reader) ([]XInfoStreamGroupPending, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- pending := make([]XInfoStreamGroupPending, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 4 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 4", nn)
- }
-
- p := XInfoStreamGroupPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- p.Consumer, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- pending = append(pending, p)
- }
-
- return pending, nil
-}
-
-func readXInfoStreamConsumers(rd *proto.Reader) ([]XInfoStreamConsumer, error) {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- consumers := make([]XInfoStreamConsumer, 0, n)
-
- for i := 0; i < n; i++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 8 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM FULL reply,"+
- "wanted 8", nn)
- }
-
- c := XInfoStreamConsumer{}
-
- for f := 0; f < 4; f++ {
- cKey, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- switch cKey {
- case "name":
- c.Name, err = rd.ReadString()
- case "seen-time":
- seen, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- c.SeenTime = time.Unix(seen/1000, seen%1000*int64(time.Millisecond))
- case "pel-count":
- c.PelCount, err = rd.ReadIntReply()
- case "pending":
- pendingNumber, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
-
- c.Pending = make([]XInfoStreamConsumerPending, 0, pendingNumber)
-
- for pn := 0; pn < pendingNumber; pn++ {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if nn != 3 {
- return nil, fmt.Errorf("redis: got %d elements in XINFO STREAM reply,"+
- "wanted 3", nn)
- }
-
- p := XInfoStreamConsumerPending{}
-
- p.ID, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- delivery, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- p.DeliveryTime = time.Unix(delivery/1000, delivery%1000*int64(time.Millisecond))
-
- p.DeliveryCount, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- c.Pending = append(c.Pending, p)
- }
- default:
- return nil, fmt.Errorf("redis: unexpected content %s "+
- "in XINFO STREAM reply", cKey)
- }
- if err != nil {
- return nil, err
- }
- }
- consumers = append(consumers, c)
- }
-
- return consumers, nil
-}
-
-//------------------------------------------------------------------------------
-
-type ZSliceCmd struct {
- baseCmd
-
- val []Z
-}
-
-var _ Cmder = (*ZSliceCmd)(nil)
-
-func NewZSliceCmd(ctx context.Context, args ...interface{}) *ZSliceCmd {
- return &ZSliceCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZSliceCmd) SetVal(val []Z) {
- cmd.val = val
-}
-
-func (cmd *ZSliceCmd) Val() []Z {
- return cmd.val
-}
-
-func (cmd *ZSliceCmd) Result() ([]Z, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *ZSliceCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZSliceCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]Z, n/2)
- for i := 0; i < len(cmd.val); i++ {
- member, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- score, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = Z{
- Member: member,
- Score: score,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ZWithKeyCmd struct {
- baseCmd
-
- val *ZWithKey
-}
-
-var _ Cmder = (*ZWithKeyCmd)(nil)
-
-func NewZWithKeyCmd(ctx context.Context, args ...interface{}) *ZWithKeyCmd {
- return &ZWithKeyCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ZWithKeyCmd) SetVal(val *ZWithKey) {
- cmd.val = val
-}
-
-func (cmd *ZWithKeyCmd) Val() *ZWithKey {
- return cmd.val
-}
-
-func (cmd *ZWithKeyCmd) Result() (*ZWithKey, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ZWithKeyCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ZWithKeyCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- if n != 3 {
- return nil, fmt.Errorf("got %d elements, expected 3", n)
- }
-
- cmd.val = &ZWithKey{}
- var err error
-
- cmd.val.Key, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Member, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- cmd.val.Score, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type ScanCmd struct {
- baseCmd
-
- page []string
- cursor uint64
-
- process cmdable
-}
-
-var _ Cmder = (*ScanCmd)(nil)
-
-func NewScanCmd(ctx context.Context, process cmdable, args ...interface{}) *ScanCmd {
- return &ScanCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- process: process,
- }
-}
-
-func (cmd *ScanCmd) SetVal(page []string, cursor uint64) {
- cmd.page = page
- cmd.cursor = cursor
-}
-
-func (cmd *ScanCmd) Val() (keys []string, cursor uint64) {
- return cmd.page, cmd.cursor
-}
-
-func (cmd *ScanCmd) Result() (keys []string, cursor uint64, err error) {
- return cmd.page, cmd.cursor, cmd.err
-}
-
-func (cmd *ScanCmd) String() string {
- return cmdString(cmd, cmd.page)
-}
-
-func (cmd *ScanCmd) readReply(rd *proto.Reader) (err error) {
- cmd.page, cmd.cursor, err = rd.ReadScanReply()
- return err
-}
-
-// Iterator creates a new ScanIterator.
-func (cmd *ScanCmd) Iterator() *ScanIterator {
- return &ScanIterator{
- cmd: cmd,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ClusterNode struct {
- ID string
- Addr string
-}
-
-type ClusterSlot struct {
- Start int
- End int
- Nodes []ClusterNode
-}
-
-type ClusterSlotsCmd struct {
- baseCmd
-
- val []ClusterSlot
-}
-
-var _ Cmder = (*ClusterSlotsCmd)(nil)
-
-func NewClusterSlotsCmd(ctx context.Context, args ...interface{}) *ClusterSlotsCmd {
- return &ClusterSlotsCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *ClusterSlotsCmd) SetVal(val []ClusterSlot) {
- cmd.val = val
-}
-
-func (cmd *ClusterSlotsCmd) Val() []ClusterSlot {
- return cmd.val
-}
-
-func (cmd *ClusterSlotsCmd) Result() ([]ClusterSlot, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *ClusterSlotsCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *ClusterSlotsCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]ClusterSlot, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 2 {
- err := fmt.Errorf("redis: got %d elements in cluster info, expected at least 2", n)
- return nil, err
- }
-
- start, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- end, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- nodes := make([]ClusterNode, n-2)
- for j := 0; j < len(nodes); j++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 && n != 3 {
- err := fmt.Errorf("got %d elements in cluster info address, expected 2 or 3", n)
- return nil, err
- }
-
- ip, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- port, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- nodes[j].Addr = net.JoinHostPort(ip, port)
-
- if n == 3 {
- id, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- nodes[j].ID = id
- }
- }
-
- cmd.val[i] = ClusterSlot{
- Start: int(start),
- End: int(end),
- Nodes: nodes,
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-// GeoLocation is used with GeoAdd to add geospatial location.
-type GeoLocation struct {
- Name string
- Longitude, Latitude, Dist float64
- GeoHash int64
-}
-
-// GeoRadiusQuery is used with GeoRadius to query geospatial index.
-type GeoRadiusQuery struct {
- Radius float64
- // Can be m, km, ft, or mi. Default is km.
- Unit string
- WithCoord bool
- WithDist bool
- WithGeoHash bool
- Count int
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Store string
- StoreDist string
-}
-
-type GeoLocationCmd struct {
- baseCmd
-
- q *GeoRadiusQuery
- locations []GeoLocation
-}
-
-var _ Cmder = (*GeoLocationCmd)(nil)
-
-func NewGeoLocationCmd(ctx context.Context, q *GeoRadiusQuery, args ...interface{}) *GeoLocationCmd {
- return &GeoLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: geoLocationArgs(q, args...),
- },
- q: q,
- }
-}
-
-func geoLocationArgs(q *GeoRadiusQuery, args ...interface{}) []interface{} {
- args = append(args, q.Radius)
- if q.Unit != "" {
- args = append(args, q.Unit)
- } else {
- args = append(args, "km")
- }
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithGeoHash {
- args = append(args, "withhash")
- }
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- }
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
- if q.Store != "" {
- args = append(args, "store")
- args = append(args, q.Store)
- }
- if q.StoreDist != "" {
- args = append(args, "storedist")
- args = append(args, q.StoreDist)
- }
- return args
-}
-
-func (cmd *GeoLocationCmd) SetVal(locations []GeoLocation) {
- cmd.locations = locations
-}
-
-func (cmd *GeoLocationCmd) Val() []GeoLocation {
- return cmd.locations
-}
-
-func (cmd *GeoLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.locations, cmd.err
-}
-
-func (cmd *GeoLocationCmd) String() string {
- return cmdString(cmd, cmd.locations)
-}
-
-func (cmd *GeoLocationCmd) readReply(rd *proto.Reader) error {
- v, err := rd.ReadArrayReply(newGeoLocationSliceParser(cmd.q))
- if err != nil {
- return err
- }
- cmd.locations = v.([]GeoLocation)
- return nil
-}
-
-func newGeoLocationSliceParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- locs := make([]GeoLocation, 0, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(newGeoLocationParser(q))
- if err != nil {
- return nil, err
- }
- switch vv := v.(type) {
- case string:
- locs = append(locs, GeoLocation{
- Name: vv,
- })
- case *GeoLocation:
- // TODO: avoid copying
- locs = append(locs, *vv)
- default:
- return nil, fmt.Errorf("got %T, expected string or *GeoLocation", v)
- }
- }
- return locs, nil
- }
-}
-
-func newGeoLocationParser(q *GeoRadiusQuery) proto.MultiBulkParse {
- return func(rd *proto.Reader, n int64) (interface{}, error) {
- var loc GeoLocation
- var err error
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- if q.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithGeoHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- }
- if q.WithCoord {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n != 2 {
- return nil, fmt.Errorf("got %d coordinates, expected 2", n)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
- }
-
- return &loc, nil
- }
-}
-
-//------------------------------------------------------------------------------
-
-// GeoSearchQuery is used for GEOSearch/GEOSearchStore command query.
-type GeoSearchQuery struct {
- Member string
-
- // Latitude and Longitude when using FromLonLat option.
- Longitude float64
- Latitude float64
-
- // Distance and unit when using ByRadius option.
- // Can use m, km, ft, or mi. Default is km.
- Radius float64
- RadiusUnit string
-
- // Height, width and unit when using ByBox option.
- // Can be m, km, ft, or mi. Default is km.
- BoxWidth float64
- BoxHeight float64
- BoxUnit string
-
- // Can be ASC or DESC. Default is no sort order.
- Sort string
- Count int
- CountAny bool
-}
-
-type GeoSearchLocationQuery struct {
- GeoSearchQuery
-
- WithCoord bool
- WithDist bool
- WithHash bool
-}
-
-type GeoSearchStoreQuery struct {
- GeoSearchQuery
-
- // When using the StoreDist option, the command stores the items in a
- // sorted set populated with their distance from the center of the circle or box,
- // as a floating-point number, in the same unit specified for that shape.
- StoreDist bool
-}
-
-func geoSearchLocationArgs(q *GeoSearchLocationQuery, args []interface{}) []interface{} {
- args = geoSearchArgs(&q.GeoSearchQuery, args)
-
- if q.WithCoord {
- args = append(args, "withcoord")
- }
- if q.WithDist {
- args = append(args, "withdist")
- }
- if q.WithHash {
- args = append(args, "withhash")
- }
-
- return args
-}
-
-func geoSearchArgs(q *GeoSearchQuery, args []interface{}) []interface{} {
- if q.Member != "" {
- args = append(args, "frommember", q.Member)
- } else {
- args = append(args, "fromlonlat", q.Longitude, q.Latitude)
- }
-
- if q.Radius > 0 {
- if q.RadiusUnit == "" {
- q.RadiusUnit = "km"
- }
- args = append(args, "byradius", q.Radius, q.RadiusUnit)
- } else {
- if q.BoxUnit == "" {
- q.BoxUnit = "km"
- }
- args = append(args, "bybox", q.BoxWidth, q.BoxHeight, q.BoxUnit)
- }
-
- if q.Sort != "" {
- args = append(args, q.Sort)
- }
-
- if q.Count > 0 {
- args = append(args, "count", q.Count)
- if q.CountAny {
- args = append(args, "any")
- }
- }
-
- return args
-}
-
-type GeoSearchLocationCmd struct {
- baseCmd
-
- opt *GeoSearchLocationQuery
- val []GeoLocation
-}
-
-var _ Cmder = (*GeoSearchLocationCmd)(nil)
-
-func NewGeoSearchLocationCmd(
- ctx context.Context, opt *GeoSearchLocationQuery, args ...interface{},
-) *GeoSearchLocationCmd {
- return &GeoSearchLocationCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- opt: opt,
- }
-}
-
-func (cmd *GeoSearchLocationCmd) SetVal(val []GeoLocation) {
- cmd.val = val
-}
-
-func (cmd *GeoSearchLocationCmd) Val() []GeoLocation {
- return cmd.val
-}
-
-func (cmd *GeoSearchLocationCmd) Result() ([]GeoLocation, error) {
- return cmd.val, cmd.err
-}
-
-func (cmd *GeoSearchLocationCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoSearchLocationCmd) readReply(rd *proto.Reader) error {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- cmd.val = make([]GeoLocation, n)
- for i := 0; i < n; i++ {
- _, err = rd.ReadArrayLen()
- if err != nil {
- return err
- }
-
- var loc GeoLocation
-
- loc.Name, err = rd.ReadString()
- if err != nil {
- return err
- }
- if cmd.opt.WithDist {
- loc.Dist, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithHash {
- loc.GeoHash, err = rd.ReadIntReply()
- if err != nil {
- return err
- }
- }
- if cmd.opt.WithCoord {
- nn, err := rd.ReadArrayLen()
- if err != nil {
- return err
- }
- if nn != 2 {
- return fmt.Errorf("got %d coordinates, expected 2", nn)
- }
-
- loc.Longitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- loc.Latitude, err = rd.ReadFloatReply()
- if err != nil {
- return err
- }
- }
-
- cmd.val[i] = loc
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-type GeoPos struct {
- Longitude, Latitude float64
-}
-
-type GeoPosCmd struct {
- baseCmd
-
- val []*GeoPos
-}
-
-var _ Cmder = (*GeoPosCmd)(nil)
-
-func NewGeoPosCmd(ctx context.Context, args ...interface{}) *GeoPosCmd {
- return &GeoPosCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *GeoPosCmd) SetVal(val []*GeoPos) {
- cmd.val = val
-}
-
-func (cmd *GeoPosCmd) Val() []*GeoPos {
- return cmd.val
-}
-
-func (cmd *GeoPosCmd) Result() ([]*GeoPos, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *GeoPosCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *GeoPosCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]*GeoPos, n)
- for i := 0; i < len(cmd.val); i++ {
- i := i
- _, err := rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- longitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- latitude, err := rd.ReadFloatReply()
- if err != nil {
- return nil, err
- }
-
- cmd.val[i] = &GeoPos{
- Longitude: longitude,
- Latitude: latitude,
- }
- return nil, nil
- })
- if err != nil {
- if err == Nil {
- cmd.val[i] = nil
- continue
- }
- return nil, err
- }
- }
- return nil, nil
- })
- return err
-}
-
-//------------------------------------------------------------------------------
-
-type CommandInfo struct {
- Name string
- Arity int8
- Flags []string
- ACLFlags []string
- FirstKeyPos int8
- LastKeyPos int8
- StepCount int8
- ReadOnly bool
-}
-
-type CommandsInfoCmd struct {
- baseCmd
-
- val map[string]*CommandInfo
-}
-
-var _ Cmder = (*CommandsInfoCmd)(nil)
-
-func NewCommandsInfoCmd(ctx context.Context, args ...interface{}) *CommandsInfoCmd {
- return &CommandsInfoCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *CommandsInfoCmd) SetVal(val map[string]*CommandInfo) {
- cmd.val = val
-}
-
-func (cmd *CommandsInfoCmd) Val() map[string]*CommandInfo {
- return cmd.val
-}
-
-func (cmd *CommandsInfoCmd) Result() (map[string]*CommandInfo, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *CommandsInfoCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *CommandsInfoCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make(map[string]*CommandInfo, n)
- for i := int64(0); i < n; i++ {
- v, err := rd.ReadReply(commandInfoParser)
- if err != nil {
- return nil, err
- }
- vv := v.(*CommandInfo)
- cmd.val[vv.Name] = vv
- }
- return nil, nil
- })
- return err
-}
-
-func commandInfoParser(rd *proto.Reader, n int64) (interface{}, error) {
- const numArgRedis5 = 6
- const numArgRedis6 = 7
-
- switch n {
- case numArgRedis5, numArgRedis6:
- // continue
- default:
- return nil, fmt.Errorf("redis: got %d elements in COMMAND reply, wanted 7", n)
- }
-
- var cmd CommandInfo
- var err error
-
- cmd.Name, err = rd.ReadString()
- if err != nil {
- return nil, err
- }
-
- arity, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.Arity = int8(arity)
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.Flags = make([]string, n)
- for i := 0; i < len(cmd.Flags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.Flags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.Flags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- firstKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.FirstKeyPos = int8(firstKeyPos)
-
- lastKeyPos, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.LastKeyPos = int8(lastKeyPos)
-
- stepCount, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- cmd.StepCount = int8(stepCount)
-
- for _, flag := range cmd.Flags {
- if flag == "readonly" {
- cmd.ReadOnly = true
- break
- }
- }
-
- if n == numArgRedis5 {
- return &cmd, nil
- }
-
- _, err = rd.ReadReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.ACLFlags = make([]string, n)
- for i := 0; i < len(cmd.ACLFlags); i++ {
- switch s, err := rd.ReadString(); {
- case err == Nil:
- cmd.ACLFlags[i] = ""
- case err != nil:
- return nil, err
- default:
- cmd.ACLFlags[i] = s
- }
- }
- return nil, nil
- })
- if err != nil {
- return nil, err
- }
-
- return &cmd, nil
-}
-
-//------------------------------------------------------------------------------
-
-type cmdsInfoCache struct {
- fn func(ctx context.Context) (map[string]*CommandInfo, error)
-
- once internal.Once
- cmds map[string]*CommandInfo
-}
-
-func newCmdsInfoCache(fn func(ctx context.Context) (map[string]*CommandInfo, error)) *cmdsInfoCache {
- return &cmdsInfoCache{
- fn: fn,
- }
-}
-
-func (c *cmdsInfoCache) Get(ctx context.Context) (map[string]*CommandInfo, error) {
- err := c.once.Do(func() error {
- cmds, err := c.fn(ctx)
- if err != nil {
- return err
- }
-
- // Extensions have cmd names in upper case. Convert them to lower case.
- for k, v := range cmds {
- lower := internal.ToLower(k)
- if lower != k {
- cmds[lower] = v
- }
- }
-
- c.cmds = cmds
- return nil
- })
- return c.cmds, err
-}
-
-//------------------------------------------------------------------------------
-
-type SlowLog struct {
- ID int64
- Time time.Time
- Duration time.Duration
- Args []string
- // These are also optional fields emitted only by Redis 4.0 or greater:
- // https://redis.io/commands/slowlog#output-format
- ClientAddr string
- ClientName string
-}
-
-type SlowLogCmd struct {
- baseCmd
-
- val []SlowLog
-}
-
-var _ Cmder = (*SlowLogCmd)(nil)
-
-func NewSlowLogCmd(ctx context.Context, args ...interface{}) *SlowLogCmd {
- return &SlowLogCmd{
- baseCmd: baseCmd{
- ctx: ctx,
- args: args,
- },
- }
-}
-
-func (cmd *SlowLogCmd) SetVal(val []SlowLog) {
- cmd.val = val
-}
-
-func (cmd *SlowLogCmd) Val() []SlowLog {
- return cmd.val
-}
-
-func (cmd *SlowLogCmd) Result() ([]SlowLog, error) {
- return cmd.Val(), cmd.Err()
-}
-
-func (cmd *SlowLogCmd) String() string {
- return cmdString(cmd, cmd.val)
-}
-
-func (cmd *SlowLogCmd) readReply(rd *proto.Reader) error {
- _, err := rd.ReadArrayReply(func(rd *proto.Reader, n int64) (interface{}, error) {
- cmd.val = make([]SlowLog, n)
- for i := 0; i < len(cmd.val); i++ {
- n, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if n < 4 {
- err := fmt.Errorf("redis: got %d elements in slowlog get, expected at least 4", n)
- return nil, err
- }
-
- id, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
-
- createdAt, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- createdAtTime := time.Unix(createdAt, 0)
-
- costs, err := rd.ReadIntReply()
- if err != nil {
- return nil, err
- }
- costsDuration := time.Duration(costs) * time.Microsecond
-
- cmdLen, err := rd.ReadArrayLen()
- if err != nil {
- return nil, err
- }
- if cmdLen < 1 {
- err := fmt.Errorf("redis: got %d elements commands reply in slowlog get, expected at least 1", cmdLen)
- return nil, err
- }
-
- cmdString := make([]string, cmdLen)
- for i := 0; i < cmdLen; i++ {
- cmdString[i], err = rd.ReadString()
- if err != nil {
- return nil, err
- }
- }
-
- var address, name string
- for i := 4; i < n; i++ {
- str, err := rd.ReadString()
- if err != nil {
- return nil, err
- }
- if i == 4 {
- address = str
- } else if i == 5 {
- name = str
- }
- }
-
- cmd.val[i] = SlowLog{
- ID: id,
- Time: createdAtTime,
- Duration: costsDuration,
- Args: cmdString,
- ClientAddr: address,
- ClientName: name,
- }
- }
- return nil, nil
- })
- return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/commands.go b/vendor/github.com/go-redis/redis/v8/commands.go
deleted file mode 100644
index bbfe089df1..0000000000
--- a/vendor/github.com/go-redis/redis/v8/commands.go
+++ /dev/null
@@ -1,3475 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "io"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
-)
-
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-// For example:
-//
-// rdb.Set(ctx, key, value, redis.KeepTTL)
-const KeepTTL = -1
-
-func usePrecise(dur time.Duration) bool {
- return dur < time.Second || dur%time.Second != 0
-}
-
-func formatMs(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Millisecond {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1ms",
- dur, time.Millisecond,
- )
- return 1
- }
- return int64(dur / time.Millisecond)
-}
-
-func formatSec(ctx context.Context, dur time.Duration) int64 {
- if dur > 0 && dur < time.Second {
- internal.Logger.Printf(
- ctx,
- "specified duration is %s, but minimal supported value is %s - truncating to 1s",
- dur, time.Second,
- )
- return 1
- }
- return int64(dur / time.Second)
-}
-
-func appendArgs(dst, src []interface{}) []interface{} {
- if len(src) == 1 {
- return appendArg(dst, src[0])
- }
-
- dst = append(dst, src...)
- return dst
-}
-
-func appendArg(dst []interface{}, arg interface{}) []interface{} {
- switch arg := arg.(type) {
- case []string:
- for _, s := range arg {
- dst = append(dst, s)
- }
- return dst
- case []interface{}:
- dst = append(dst, arg...)
- return dst
- case map[string]interface{}:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- case map[string]string:
- for k, v := range arg {
- dst = append(dst, k, v)
- }
- return dst
- default:
- return append(dst, arg)
- }
-}
-
-type Cmdable interface {
- Pipeline() Pipeliner
- Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
-
- TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error)
- TxPipeline() Pipeliner
-
- Command(ctx context.Context) *CommandsInfoCmd
- ClientGetName(ctx context.Context) *StringCmd
- Echo(ctx context.Context, message interface{}) *StringCmd
- Ping(ctx context.Context) *StatusCmd
- Quit(ctx context.Context) *StatusCmd
- Del(ctx context.Context, keys ...string) *IntCmd
- Unlink(ctx context.Context, keys ...string) *IntCmd
- Dump(ctx context.Context, key string) *StringCmd
- Exists(ctx context.Context, keys ...string) *IntCmd
- Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- Keys(ctx context.Context, pattern string) *StringSliceCmd
- Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd
- Move(ctx context.Context, key string, db int) *BoolCmd
- ObjectRefCount(ctx context.Context, key string) *IntCmd
- ObjectEncoding(ctx context.Context, key string) *StringCmd
- ObjectIdleTime(ctx context.Context, key string) *DurationCmd
- Persist(ctx context.Context, key string) *BoolCmd
- PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd
- PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd
- PTTL(ctx context.Context, key string) *DurationCmd
- RandomKey(ctx context.Context) *StringCmd
- Rename(ctx context.Context, key, newkey string) *StatusCmd
- RenameNX(ctx context.Context, key, newkey string) *BoolCmd
- Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd
- Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd
- SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd
- SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd
- Touch(ctx context.Context, keys ...string) *IntCmd
- TTL(ctx context.Context, key string) *DurationCmd
- Type(ctx context.Context, key string) *StatusCmd
- Append(ctx context.Context, key, value string) *IntCmd
- Decr(ctx context.Context, key string) *IntCmd
- DecrBy(ctx context.Context, key string, decrement int64) *IntCmd
- Get(ctx context.Context, key string) *StringCmd
- GetRange(ctx context.Context, key string, start, end int64) *StringCmd
- GetSet(ctx context.Context, key string, value interface{}) *StringCmd
- GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd
- GetDel(ctx context.Context, key string) *StringCmd
- Incr(ctx context.Context, key string) *IntCmd
- IncrBy(ctx context.Context, key string, value int64) *IntCmd
- IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd
- MGet(ctx context.Context, keys ...string) *SliceCmd
- MSet(ctx context.Context, values ...interface{}) *StatusCmd
- MSetNX(ctx context.Context, values ...interface{}) *BoolCmd
- Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd
- // TODO: rename to SetEx
- SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd
- SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd
- SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd
- StrLen(ctx context.Context, key string) *IntCmd
- Copy(ctx context.Context, sourceKey string, destKey string, db int, replace bool) *IntCmd
-
- GetBit(ctx context.Context, key string, offset int64) *IntCmd
- SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd
- BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd
- BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd
- BitOpNot(ctx context.Context, destKey string, key string) *IntCmd
- BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd
- BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd
-
- Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd
- ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd
- SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
- ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd
-
- HDel(ctx context.Context, key string, fields ...string) *IntCmd
- HExists(ctx context.Context, key, field string) *BoolCmd
- HGet(ctx context.Context, key, field string) *StringCmd
- HGetAll(ctx context.Context, key string) *StringStringMapCmd
- HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd
- HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd
- HKeys(ctx context.Context, key string) *StringSliceCmd
- HLen(ctx context.Context, key string) *IntCmd
- HMGet(ctx context.Context, key string, fields ...string) *SliceCmd
- HSet(ctx context.Context, key string, values ...interface{}) *IntCmd
- HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd
- HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd
- HVals(ctx context.Context, key string) *StringSliceCmd
- HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd
-
- BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd
- BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd
- LIndex(ctx context.Context, key string, index int64) *StringCmd
- LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd
- LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd
- LLen(ctx context.Context, key string) *IntCmd
- LPop(ctx context.Context, key string) *StringCmd
- LPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- LPos(ctx context.Context, key string, value string, args LPosArgs) *IntCmd
- LPosCount(ctx context.Context, key string, value string, count int64, args LPosArgs) *IntSliceCmd
- LPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd
- LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd
- LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd
- RPop(ctx context.Context, key string) *StringCmd
- RPopCount(ctx context.Context, key string, count int) *StringSliceCmd
- RPopLPush(ctx context.Context, source, destination string) *StringCmd
- RPush(ctx context.Context, key string, values ...interface{}) *IntCmd
- RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd
- LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd
- BLMove(ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration) *StringCmd
-
- SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd
- SCard(ctx context.Context, key string) *IntCmd
- SDiff(ctx context.Context, keys ...string) *StringSliceCmd
- SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SInter(ctx context.Context, keys ...string) *StringSliceCmd
- SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd
- SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd
- SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd
- SMembers(ctx context.Context, key string) *StringSliceCmd
- SMembersMap(ctx context.Context, key string) *StringStructMapCmd
- SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd
- SPop(ctx context.Context, key string) *StringCmd
- SPopN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRandMember(ctx context.Context, key string) *StringCmd
- SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd
- SRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- SUnion(ctx context.Context, keys ...string) *StringSliceCmd
- SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- XAdd(ctx context.Context, a *XAddArgs) *StringCmd
- XDel(ctx context.Context, stream string, ids ...string) *IntCmd
- XLen(ctx context.Context, stream string) *IntCmd
- XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd
- XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd
- XRevRange(ctx context.Context, stream string, start, stop string) *XMessageSliceCmd
- XRevRangeN(ctx context.Context, stream string, start, stop string, count int64) *XMessageSliceCmd
- XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd
- XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd
- XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd
- XGroupDestroy(ctx context.Context, stream, group string) *IntCmd
- XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd
- XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd
- XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd
- XPending(ctx context.Context, stream, group string) *XPendingCmd
- XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd
- XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd
- XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd
- XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd
- XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd
-
- // TODO: XTrim and XTrimApprox remove in v9.
- XTrim(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd
- XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd
- XTrimMinID(ctx context.Context, key string, minID string) *IntCmd
- XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd
- XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd
- XInfoStream(ctx context.Context, key string) *XInfoStreamCmd
- XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd
- XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd
-
- BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
- BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd
-
- // TODO: remove
- // ZAddCh
- // ZIncr
- // ZAddNXCh
- // ZAddXXCh
- // ZIncrNX
- // ZIncrXX
- // in v9.
- // use ZAddArgs and ZAddArgsIncr.
-
- ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd
- ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd
- ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd
- ZIncr(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd
- ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd
- ZCard(ctx context.Context, key string) *IntCmd
- ZCount(ctx context.Context, key, min, max string) *IntCmd
- ZLexCount(ctx context.Context, key, min, max string) *IntCmd
- ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd
- ZInter(ctx context.Context, store *ZStore) *StringSliceCmd
- ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd
- ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd
- ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd
- ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd
- ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd
- ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd
- ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd
- ZRank(ctx context.Context, key, member string) *IntCmd
- ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd
- ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd
- ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd
- ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd
- ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd
- ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd
- ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd
- ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd
- ZRevRank(ctx context.Context, key, member string) *IntCmd
- ZScore(ctx context.Context, key, member string) *FloatCmd
- ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd
- ZUnion(ctx context.Context, store ZStore) *StringSliceCmd
- ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd
- ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd
- ZDiff(ctx context.Context, keys ...string) *StringSliceCmd
- ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd
- ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd
-
- PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd
- PFCount(ctx context.Context, keys ...string) *IntCmd
- PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd
-
- BgRewriteAOF(ctx context.Context) *StatusCmd
- BgSave(ctx context.Context) *StatusCmd
- ClientKill(ctx context.Context, ipPort string) *StatusCmd
- ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd
- ClientList(ctx context.Context) *StringCmd
- ClientPause(ctx context.Context, dur time.Duration) *BoolCmd
- ClientID(ctx context.Context) *IntCmd
- ConfigGet(ctx context.Context, parameter string) *SliceCmd
- ConfigResetStat(ctx context.Context) *StatusCmd
- ConfigSet(ctx context.Context, parameter, value string) *StatusCmd
- ConfigRewrite(ctx context.Context) *StatusCmd
- DBSize(ctx context.Context) *IntCmd
- FlushAll(ctx context.Context) *StatusCmd
- FlushAllAsync(ctx context.Context) *StatusCmd
- FlushDB(ctx context.Context) *StatusCmd
- FlushDBAsync(ctx context.Context) *StatusCmd
- Info(ctx context.Context, section ...string) *StringCmd
- LastSave(ctx context.Context) *IntCmd
- Save(ctx context.Context) *StatusCmd
- Shutdown(ctx context.Context) *StatusCmd
- ShutdownSave(ctx context.Context) *StatusCmd
- ShutdownNoSave(ctx context.Context) *StatusCmd
- SlaveOf(ctx context.Context, host, port string) *StatusCmd
- Time(ctx context.Context) *TimeCmd
- DebugObject(ctx context.Context, key string) *StringCmd
- ReadOnly(ctx context.Context) *StatusCmd
- ReadWrite(ctx context.Context) *StatusCmd
- MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd
-
- Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
- EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
- ScriptFlush(ctx context.Context) *StatusCmd
- ScriptKill(ctx context.Context) *StatusCmd
- ScriptLoad(ctx context.Context, script string) *StringCmd
-
- Publish(ctx context.Context, channel string, message interface{}) *IntCmd
- PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd
- PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd
- PubSubNumPat(ctx context.Context) *IntCmd
-
- ClusterSlots(ctx context.Context) *ClusterSlotsCmd
- ClusterNodes(ctx context.Context) *StringCmd
- ClusterMeet(ctx context.Context, host, port string) *StatusCmd
- ClusterForget(ctx context.Context, nodeID string) *StatusCmd
- ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd
- ClusterResetSoft(ctx context.Context) *StatusCmd
- ClusterResetHard(ctx context.Context) *StatusCmd
- ClusterInfo(ctx context.Context) *StringCmd
- ClusterKeySlot(ctx context.Context, key string) *IntCmd
- ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd
- ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd
- ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd
- ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd
- ClusterSaveConfig(ctx context.Context) *StatusCmd
- ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd
- ClusterFailover(ctx context.Context) *StatusCmd
- ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd
- ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd
-
- GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd
- GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd
- GeoRadius(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusStore(ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery) *IntCmd
- GeoRadiusByMember(ctx context.Context, key, member string, query *GeoRadiusQuery) *GeoLocationCmd
- GeoRadiusByMemberStore(ctx context.Context, key, member string, query *GeoRadiusQuery) *IntCmd
- GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd
- GeoSearchLocation(ctx context.Context, key string, q *GeoSearchLocationQuery) *GeoSearchLocationCmd
- GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd
- GeoDist(ctx context.Context, key string, member1, member2, unit string) *FloatCmd
- GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd
-}
-
-type StatefulCmdable interface {
- Cmdable
- Auth(ctx context.Context, password string) *StatusCmd
- AuthACL(ctx context.Context, username, password string) *StatusCmd
- Select(ctx context.Context, index int) *StatusCmd
- SwapDB(ctx context.Context, index1, index2 int) *StatusCmd
- ClientSetName(ctx context.Context, name string) *BoolCmd
-}
-
-var (
- _ Cmdable = (*Client)(nil)
- _ Cmdable = (*Tx)(nil)
- _ Cmdable = (*Ring)(nil)
- _ Cmdable = (*ClusterClient)(nil)
-)
-
-type cmdable func(ctx context.Context, cmd Cmder) error
-
-type statefulCmdable func(ctx context.Context, cmd Cmder) error
-
-//------------------------------------------------------------------------------
-
-func (c statefulCmdable) Auth(ctx context.Context, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// AuthACL Perform an AUTH command, using the given user and pass.
-// Should be used to authenticate the current connection with one of the connections defined in the ACL list
-// when connecting to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
-func (c statefulCmdable) AuthACL(ctx context.Context, username, password string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "auth", username, password)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Wait(ctx context.Context, numSlaves int, timeout time.Duration) *IntCmd {
- cmd := NewIntCmd(ctx, "wait", numSlaves, int(timeout/time.Millisecond))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) Select(ctx context.Context, index int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "select", index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c statefulCmdable) SwapDB(ctx context.Context, index1, index2 int) *StatusCmd {
- cmd := NewStatusCmd(ctx, "swapdb", index1, index2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientSetName assigns a name to the connection.
-func (c statefulCmdable) ClientSetName(ctx context.Context, name string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "setname", name)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Command(ctx context.Context) *CommandsInfoCmd {
- cmd := NewCommandsInfoCmd(ctx, "command")
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientGetName returns the name of the connection.
-func (c cmdable) ClientGetName(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "getname")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Echo(ctx context.Context, message interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "echo", message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Ping(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "ping")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Quit(_ context.Context) *StatusCmd {
- panic("not implemented")
-}
-
-func (c cmdable) Del(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "del"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Unlink(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unlink"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Dump(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "dump", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Exists(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "exists"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Expire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "")
-}
-
-func (c cmdable) ExpireNX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "NX")
-}
-
-func (c cmdable) ExpireXX(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "XX")
-}
-
-func (c cmdable) ExpireGT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "GT")
-}
-
-func (c cmdable) ExpireLT(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- return c.expire(ctx, key, expiration, "LT")
-}
-
-func (c cmdable) expire(
- ctx context.Context, key string, expiration time.Duration, mode string,
-) *BoolCmd {
- args := make([]interface{}, 3, 4)
- args[0] = "expire"
- args[1] = key
- args[2] = formatSec(ctx, expiration)
- if mode != "" {
- args = append(args, mode)
- }
-
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(ctx, "expireat", key, tm.Unix())
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Keys(ctx context.Context, pattern string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "keys", pattern)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Migrate(ctx context.Context, host, port, key string, db int, timeout time.Duration) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "migrate",
- host,
- port,
- key,
- db,
- formatMs(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Move(ctx context.Context, key string, db int) *BoolCmd {
- cmd := NewBoolCmd(ctx, "move", key, db)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectRefCount(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "object", "refcount", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectEncoding(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "object", "encoding", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ObjectIdleTime(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "object", "idletime", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Persist(ctx context.Context, key string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "persist", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpire(ctx context.Context, key string, expiration time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "pexpire", key, formatMs(ctx, expiration))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PExpireAt(ctx context.Context, key string, tm time.Time) *BoolCmd {
- cmd := NewBoolCmd(
- ctx,
- "pexpireat",
- key,
- tm.UnixNano()/int64(time.Millisecond),
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PTTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Millisecond, "pttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RandomKey(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "randomkey")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Rename(ctx context.Context, key, newkey string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "rename", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RenameNX(ctx context.Context, key, newkey string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "renamenx", key, newkey)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Restore(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RestoreReplace(ctx context.Context, key string, ttl time.Duration, value string) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "restore",
- key,
- formatMs(ctx, ttl),
- value,
- "replace",
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type Sort struct {
- By string
- Offset, Count int64
- Get []string
- Order string
- Alpha bool
-}
-
-func (sort *Sort) args(key string) []interface{} {
- args := []interface{}{"sort", key}
- if sort.By != "" {
- args = append(args, "by", sort.By)
- }
- if sort.Offset != 0 || sort.Count != 0 {
- args = append(args, "limit", sort.Offset, sort.Count)
- }
- for _, get := range sort.Get {
- args = append(args, "get", get)
- }
- if sort.Order != "" {
- args = append(args, sort.Order)
- }
- if sort.Alpha {
- args = append(args, "alpha")
- }
- return args
-}
-
-func (c cmdable) Sort(ctx context.Context, key string, sort *Sort) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortStore(ctx context.Context, key, store string, sort *Sort) *IntCmd {
- args := sort.args(key)
- if store != "" {
- args = append(args, "store", store)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SortInterfaces(ctx context.Context, key string, sort *Sort) *SliceCmd {
- cmd := NewSliceCmd(ctx, sort.args(key)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Touch(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, len(keys)+1)
- args[0] = "touch"
- for i, key := range keys {
- args[i+1] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) TTL(ctx context.Context, key string) *DurationCmd {
- cmd := NewDurationCmd(ctx, time.Second, "ttl", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Type(ctx context.Context, key string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "type", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Append(ctx context.Context, key, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "append", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Decr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "decr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DecrBy(ctx context.Context, key string, decrement int64) *IntCmd {
- cmd := NewIntCmd(ctx, "decrby", key, decrement)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Get Redis `GET key` command. It returns redis.Nil error when key does not exist.
-func (c cmdable) Get(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "get", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetRange(ctx context.Context, key string, start, end int64) *StringCmd {
- cmd := NewStringCmd(ctx, "getrange", key, start, end)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GetSet(ctx context.Context, key string, value interface{}) *StringCmd {
- cmd := NewStringCmd(ctx, "getset", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetEx An expiration of zero removes the TTL associated with the key (i.e. GETEX key persist).
-// Requires Redis >= 6.2.0.
-func (c cmdable) GetEx(ctx context.Context, key string, expiration time.Duration) *StringCmd {
- args := make([]interface{}, 0, 4)
- args = append(args, "getex", key)
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == 0 {
- args = append(args, "persist")
- }
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GetDel redis-server version >= 6.2.0.
-func (c cmdable) GetDel(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "getdel", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Incr(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "incr", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrBy(ctx context.Context, key string, value int64) *IntCmd {
- cmd := NewIntCmd(ctx, "incrby", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) IncrByFloat(ctx context.Context, key string, value float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "incrbyfloat", key, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MGet(ctx context.Context, keys ...string) *SliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "mget"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSet is like Set but accepts multiple values:
-// - MSet("key1", "value1", "key2", "value2")
-// - MSet([]string{"key1", "value1", "key2", "value2"})
-// - MSet(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSet(ctx context.Context, values ...interface{}) *StatusCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "mset"
- args = appendArgs(args, values)
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// MSetNX is like SetNX but accepts multiple values:
-// - MSetNX("key1", "value1", "key2", "value2")
-// - MSetNX([]string{"key1", "value1", "key2", "value2"})
-// - MSetNX(map[string]interface{}{"key1": "value1", "key2": "value2"})
-func (c cmdable) MSetNX(ctx context.Context, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 1, 1+len(values))
- args[0] = "msetnx"
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Set Redis `SET key value [expiration]` command.
-// Use expiration for `SETEX`-like behavior.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) Set(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- args := make([]interface{}, 3, 5)
- args[0] = "set"
- args[1] = key
- args[2] = value
- if expiration > 0 {
- if usePrecise(expiration) {
- args = append(args, "px", formatMs(ctx, expiration))
- } else {
- args = append(args, "ex", formatSec(ctx, expiration))
- }
- } else if expiration == KeepTTL {
- args = append(args, "keepttl")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetArgs provides arguments for the SetArgs function.
-type SetArgs struct {
- // Mode can be `NX` or `XX` or empty.
- Mode string
-
- // Zero `TTL` or `Expiration` means that the key has no expiration time.
- TTL time.Duration
- ExpireAt time.Time
-
- // When Get is true, the command returns the old value stored at key, or nil when key did not exist.
- Get bool
-
- // KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
- // otherwise you will receive an error: (error) ERR syntax error.
- KeepTTL bool
-}
-
-// SetArgs supports all the options that the SET command supports.
-// It is the alternative to the Set function when you want
-// to have more control over the options.
-func (c cmdable) SetArgs(ctx context.Context, key string, value interface{}, a SetArgs) *StatusCmd {
- args := []interface{}{"set", key, value}
-
- if a.KeepTTL {
- args = append(args, "keepttl")
- }
-
- if !a.ExpireAt.IsZero() {
- args = append(args, "exat", a.ExpireAt.Unix())
- }
- if a.TTL > 0 {
- if usePrecise(a.TTL) {
- args = append(args, "px", formatMs(ctx, a.TTL))
- } else {
- args = append(args, "ex", formatSec(ctx, a.TTL))
- }
- }
-
- if a.Mode != "" {
- args = append(args, a.Mode)
- }
-
- if a.Get {
- args = append(args, "get")
- }
-
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetEX Redis `SETEX key expiration value` command.
-func (c cmdable) SetEX(ctx context.Context, key string, value interface{}, expiration time.Duration) *StatusCmd {
- cmd := NewStatusCmd(ctx, "setex", key, formatSec(ctx, expiration), value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetNX Redis `SET key value [expiration] NX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetNX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- // Use old `SETNX` to support old Redis versions.
- cmd = NewBoolCmd(ctx, "setnx", key, value)
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "nx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "nx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "nx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SetXX Redis `SET key value [expiration] XX` command.
-//
-// Zero expiration means the key has no expiration time.
-// KeepTTL is a Redis KEEPTTL option to keep existing TTL, it requires your redis-server version >= 6.0,
-// otherwise you will receive an error: (error) ERR syntax error.
-func (c cmdable) SetXX(ctx context.Context, key string, value interface{}, expiration time.Duration) *BoolCmd {
- var cmd *BoolCmd
- switch expiration {
- case 0:
- cmd = NewBoolCmd(ctx, "set", key, value, "xx")
- case KeepTTL:
- cmd = NewBoolCmd(ctx, "set", key, value, "keepttl", "xx")
- default:
- if usePrecise(expiration) {
- cmd = NewBoolCmd(ctx, "set", key, value, "px", formatMs(ctx, expiration), "xx")
- } else {
- cmd = NewBoolCmd(ctx, "set", key, value, "ex", formatSec(ctx, expiration), "xx")
- }
- }
-
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetRange(ctx context.Context, key string, offset int64, value string) *IntCmd {
- cmd := NewIntCmd(ctx, "setrange", key, offset, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) StrLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "strlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Copy(ctx context.Context, sourceKey, destKey string, db int, replace bool) *IntCmd {
- args := []interface{}{"copy", sourceKey, destKey, "DB", db}
- if replace {
- args = append(args, "REPLACE")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GetBit(ctx context.Context, key string, offset int64) *IntCmd {
- cmd := NewIntCmd(ctx, "getbit", key, offset)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SetBit(ctx context.Context, key string, offset int64, value int) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "setbit",
- key,
- offset,
- value,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-type BitCount struct {
- Start, End int64
-}
-
-func (c cmdable) BitCount(ctx context.Context, key string, bitCount *BitCount) *IntCmd {
- args := []interface{}{"bitcount", key}
- if bitCount != nil {
- args = append(
- args,
- bitCount.Start,
- bitCount.End,
- )
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) bitOp(ctx context.Context, op, destKey string, keys ...string) *IntCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "bitop"
- args[1] = op
- args[2] = destKey
- for i, key := range keys {
- args[3+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitOpAnd(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "and", destKey, keys...)
-}
-
-func (c cmdable) BitOpOr(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "or", destKey, keys...)
-}
-
-func (c cmdable) BitOpXor(ctx context.Context, destKey string, keys ...string) *IntCmd {
- return c.bitOp(ctx, "xor", destKey, keys...)
-}
-
-func (c cmdable) BitOpNot(ctx context.Context, destKey string, key string) *IntCmd {
- return c.bitOp(ctx, "not", destKey, key)
-}
-
-func (c cmdable) BitPos(ctx context.Context, key string, bit int64, pos ...int64) *IntCmd {
- args := make([]interface{}, 3+len(pos))
- args[0] = "bitpos"
- args[1] = key
- args[2] = bit
- switch len(pos) {
- case 0:
- case 1:
- args[3] = pos[0]
- case 2:
- args[3] = pos[0]
- args[4] = pos[1]
- default:
- panic("too many arguments")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BitField(ctx context.Context, key string, args ...interface{}) *IntSliceCmd {
- a := make([]interface{}, 0, 2+len(args))
- a = append(a, "bitfield")
- a = append(a, key)
- a = append(a, args...)
- cmd := NewIntSliceCmd(ctx, a...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Scan(ctx context.Context, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScanType(ctx context.Context, cursor uint64, match string, count int64, keyType string) *ScanCmd {
- args := []interface{}{"scan", cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- if keyType != "" {
- args = append(args, "type", keyType)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"sscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"hscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScan(ctx context.Context, key string, cursor uint64, match string, count int64) *ScanCmd {
- args := []interface{}{"zscan", key, cursor}
- if match != "" {
- args = append(args, "match", match)
- }
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewScanCmd(ctx, c, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) HDel(ctx context.Context, key string, fields ...string) *IntCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hdel"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HExists(ctx context.Context, key, field string) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hexists", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGet(ctx context.Context, key, field string) *StringCmd {
- cmd := NewStringCmd(ctx, "hget", key, field)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HGetAll(ctx context.Context, key string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "hgetall", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrBy(ctx context.Context, key, field string, incr int64) *IntCmd {
- cmd := NewIntCmd(ctx, "hincrby", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HIncrByFloat(ctx context.Context, key, field string, incr float64) *FloatCmd {
- cmd := NewFloatCmd(ctx, "hincrbyfloat", key, field, incr)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HKeys(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hkeys", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "hlen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMGet returns the values for the specified fields in the hash stored at key.
-// It returns an interface{} to distinguish between empty string and nil value.
-func (c cmdable) HMGet(ctx context.Context, key string, fields ...string) *SliceCmd {
- args := make([]interface{}, 2+len(fields))
- args[0] = "hmget"
- args[1] = key
- for i, field := range fields {
- args[2+i] = field
- }
- cmd := NewSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HSet accepts values in following formats:
-// - HSet("myhash", "key1", "value1", "key2", "value2")
-// - HSet("myhash", []string{"key1", "value1", "key2", "value2"})
-// - HSet("myhash", map[string]interface{}{"key1": "value1", "key2": "value2"})
-//
-// Note that it requires Redis v4 for multiple field/value pairs support.
-func (c cmdable) HSet(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HMSet is a deprecated version of HSet left for compatibility with Redis 3.
-func (c cmdable) HMSet(ctx context.Context, key string, values ...interface{}) *BoolCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "hmset"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewBoolCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HSetNX(ctx context.Context, key, field string, value interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "hsetnx", key, field, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) HVals(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "hvals", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// HRandField redis-server version >= 6.2.0.
-func (c cmdable) HRandField(ctx context.Context, key string, count int, withValues bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "hrandfield", key, count)
- if withValues {
- args = append(args, "withvalues")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BLPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "blpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPop(ctx context.Context, timeout time.Duration, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "brpop"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(keys)+1] = formatSec(ctx, timeout)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BRPopLPush(ctx context.Context, source, destination string, timeout time.Duration) *StringCmd {
- cmd := NewStringCmd(
- ctx,
- "brpoplpush",
- source,
- destination,
- formatSec(ctx, timeout),
- )
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LIndex(ctx context.Context, key string, index int64) *StringCmd {
- cmd := NewStringCmd(ctx, "lindex", key, index)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsert(ctx context.Context, key, op string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, op, pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertBefore(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "before", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LInsertAfter(ctx context.Context, key string, pivot, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "linsert", key, "after", pivot, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LLen(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "llen", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "lpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "lpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type LPosArgs struct {
- Rank, MaxLen int64
-}
-
-func (c cmdable) LPos(ctx context.Context, key string, value string, a LPosArgs) *IntCmd {
- args := []interface{}{"lpos", key, value}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
-
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPosCount(ctx context.Context, key string, value string, count int64, a LPosArgs) *IntSliceCmd {
- args := []interface{}{"lpos", key, value, "count", count}
- if a.Rank != 0 {
- args = append(args, "rank", a.Rank)
- }
- if a.MaxLen != 0 {
- args = append(args, "maxlen", a.MaxLen)
- }
- cmd := NewIntSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "lpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(
- ctx,
- "lrange",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LRem(ctx context.Context, key string, count int64, value interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "lrem", key, count, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LSet(ctx context.Context, key string, index int64, value interface{}) *StatusCmd {
- cmd := NewStatusCmd(ctx, "lset", key, index, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LTrim(ctx context.Context, key string, start, stop int64) *StatusCmd {
- cmd := NewStatusCmd(
- ctx,
- "ltrim",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopCount(ctx context.Context, key string, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "rpop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPopLPush(ctx context.Context, source, destination string) *StringCmd {
- cmd := NewStringCmd(ctx, "rpoplpush", source, destination)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPush(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpush"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) RPushX(ctx context.Context, key string, values ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(values))
- args[0] = "rpushx"
- args[1] = key
- args = appendArgs(args, values)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LMove(ctx context.Context, source, destination, srcpos, destpos string) *StringCmd {
- cmd := NewStringCmd(ctx, "lmove", source, destination, srcpos, destpos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BLMove(
- ctx context.Context, source, destination, srcpos, destpos string, timeout time.Duration,
-) *StringCmd {
- cmd := NewStringCmd(ctx, "blmove", source, destination, srcpos, destpos, formatSec(ctx, timeout))
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) SAdd(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "sadd"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "scard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sdiff"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sdiffstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInter(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sinter"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SInterStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sinterstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SIsMember(ctx context.Context, key string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "sismember", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMIsMember Redis `SMISMEMBER key member [member ...]` command.
-func (c cmdable) SMIsMember(ctx context.Context, key string, members ...interface{}) *BoolSliceCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "smismember"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembers Redis `SMEMBERS key` command output as a slice.
-func (c cmdable) SMembers(ctx context.Context, key string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SMembersMap Redis `SMEMBERS key` command output as a map.
-func (c cmdable) SMembersMap(ctx context.Context, key string) *StringStructMapCmd {
- cmd := NewStringStructMapCmd(ctx, "smembers", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SMove(ctx context.Context, source, destination string, member interface{}) *BoolCmd {
- cmd := NewBoolCmd(ctx, "smove", source, destination, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPop Redis `SPOP key` command.
-func (c cmdable) SPop(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "spop", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SPopN Redis `SPOP key count` command.
-func (c cmdable) SPopN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "spop", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMember Redis `SRANDMEMBER key` command.
-func (c cmdable) SRandMember(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "srandmember", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// SRandMemberN Redis `SRANDMEMBER key count` command.
-func (c cmdable) SRandMemberN(ctx context.Context, key string, count int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "srandmember", key, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "srem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnion(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "sunion"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SUnionStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "sunionstore"
- args[1] = destination
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// XAddArgs accepts values in the following formats:
-// - XAddArgs.Values = []interface{}{"key1", "value1", "key2", "value2"}
-// - XAddArgs.Values = []string("key1", "value1", "key2", "value2")
-// - XAddArgs.Values = map[string]interface{}{"key1": "value1", "key2": "value2"}
-//
-// Note that map will not preserve the order of key-value pairs.
-// MaxLen/MaxLenApprox and MinID are in conflict, only one of them can be used.
-type XAddArgs struct {
- Stream string
- NoMkStream bool
- MaxLen int64 // MAXLEN N
-
- // Deprecated: use MaxLen+Approx, remove in v9.
- MaxLenApprox int64 // MAXLEN ~ N
-
- MinID string
- // Approx causes MaxLen and MinID to use "~" matcher (instead of "=").
- Approx bool
- Limit int64
- ID string
- Values interface{}
-}
-
-// XAdd a.Limit has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-func (c cmdable) XAdd(ctx context.Context, a *XAddArgs) *StringCmd {
- args := make([]interface{}, 0, 11)
- args = append(args, "xadd", a.Stream)
- if a.NoMkStream {
- args = append(args, "nomkstream")
- }
- switch {
- case a.MaxLen > 0:
- if a.Approx {
- args = append(args, "maxlen", "~", a.MaxLen)
- } else {
- args = append(args, "maxlen", a.MaxLen)
- }
- case a.MaxLenApprox > 0:
- // TODO remove in v9.
- args = append(args, "maxlen", "~", a.MaxLenApprox)
- case a.MinID != "":
- if a.Approx {
- args = append(args, "minid", "~", a.MinID)
- } else {
- args = append(args, "minid", a.MinID)
- }
- }
- if a.Limit > 0 {
- args = append(args, "limit", a.Limit)
- }
- if a.ID != "" {
- args = append(args, a.ID)
- } else {
- args = append(args, "*")
- }
- args = appendArg(args, a.Values)
-
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XDel(ctx context.Context, stream string, ids ...string) *IntCmd {
- args := []interface{}{"xdel", stream}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XLen(ctx context.Context, stream string) *IntCmd {
- cmd := NewIntCmd(ctx, "xlen", stream)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRange(ctx context.Context, stream, start, stop string) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XRevRangeN(ctx context.Context, stream, start, stop string, count int64) *XMessageSliceCmd {
- cmd := NewXMessageSliceCmd(ctx, "xrevrange", stream, start, stop, "count", count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadArgs struct {
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
-}
-
-func (c cmdable) XRead(ctx context.Context, a *XReadArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 6+len(a.Streams))
- args = append(args, "xread")
-
- keyPos := int8(1)
- if a.Count > 0 {
- args = append(args, "count")
- args = append(args, a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block")
- args = append(args, int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XReadStreams(ctx context.Context, streams ...string) *XStreamSliceCmd {
- return c.XRead(ctx, &XReadArgs{
- Streams: streams,
- Block: -1,
- })
-}
-
-func (c cmdable) XGroupCreate(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateMkStream(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "create", stream, group, start, "mkstream")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupSetID(ctx context.Context, stream, group, start string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "xgroup", "setid", stream, group, start)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDestroy(ctx context.Context, stream, group string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "destroy", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupCreateConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "createconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XGroupDelConsumer(ctx context.Context, stream, group, consumer string) *IntCmd {
- cmd := NewIntCmd(ctx, "xgroup", "delconsumer", stream, group, consumer)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XReadGroupArgs struct {
- Group string
- Consumer string
- Streams []string // list of streams and ids, e.g. stream1 stream2 id1 id2
- Count int64
- Block time.Duration
- NoAck bool
-}
-
-func (c cmdable) XReadGroup(ctx context.Context, a *XReadGroupArgs) *XStreamSliceCmd {
- args := make([]interface{}, 0, 10+len(a.Streams))
- args = append(args, "xreadgroup", "group", a.Group, a.Consumer)
-
- keyPos := int8(4)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- keyPos += 2
- }
- if a.Block >= 0 {
- args = append(args, "block", int64(a.Block/time.Millisecond))
- keyPos += 2
- }
- if a.NoAck {
- args = append(args, "noack")
- keyPos++
- }
- args = append(args, "streams")
- keyPos++
- for _, s := range a.Streams {
- args = append(args, s)
- }
-
- cmd := NewXStreamSliceCmd(ctx, args...)
- if a.Block >= 0 {
- cmd.setReadTimeout(a.Block)
- }
- cmd.SetFirstKeyPos(keyPos)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAck(ctx context.Context, stream, group string, ids ...string) *IntCmd {
- args := []interface{}{"xack", stream, group}
- for _, id := range ids {
- args = append(args, id)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XPending(ctx context.Context, stream, group string) *XPendingCmd {
- cmd := NewXPendingCmd(ctx, "xpending", stream, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XPendingExtArgs struct {
- Stream string
- Group string
- Idle time.Duration
- Start string
- End string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XPendingExt(ctx context.Context, a *XPendingExtArgs) *XPendingExtCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "xpending", a.Stream, a.Group)
- if a.Idle != 0 {
- args = append(args, "idle", formatMs(ctx, a.Idle))
- }
- args = append(args, a.Start, a.End, a.Count)
- if a.Consumer != "" {
- args = append(args, a.Consumer)
- }
- cmd := NewXPendingExtCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-type XAutoClaimArgs struct {
- Stream string
- Group string
- MinIdle time.Duration
- Start string
- Count int64
- Consumer string
-}
-
-func (c cmdable) XAutoClaim(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimCmd {
- args := xAutoClaimArgs(ctx, a)
- cmd := NewXAutoClaimCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XAutoClaimJustID(ctx context.Context, a *XAutoClaimArgs) *XAutoClaimJustIDCmd {
- args := xAutoClaimArgs(ctx, a)
- args = append(args, "justid")
- cmd := NewXAutoClaimJustIDCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xAutoClaimArgs(ctx context.Context, a *XAutoClaimArgs) []interface{} {
- args := make([]interface{}, 0, 8)
- args = append(args, "xautoclaim", a.Stream, a.Group, a.Consumer, formatMs(ctx, a.MinIdle), a.Start)
- if a.Count > 0 {
- args = append(args, "count", a.Count)
- }
- return args
-}
-
-type XClaimArgs struct {
- Stream string
- Group string
- Consumer string
- MinIdle time.Duration
- Messages []string
-}
-
-func (c cmdable) XClaim(ctx context.Context, a *XClaimArgs) *XMessageSliceCmd {
- args := xClaimArgs(a)
- cmd := NewXMessageSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XClaimJustID(ctx context.Context, a *XClaimArgs) *StringSliceCmd {
- args := xClaimArgs(a)
- args = append(args, "justid")
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func xClaimArgs(a *XClaimArgs) []interface{} {
- args := make([]interface{}, 0, 5+len(a.Messages))
- args = append(args,
- "xclaim",
- a.Stream,
- a.Group, a.Consumer,
- int64(a.MinIdle/time.Millisecond))
- for _, id := range a.Messages {
- args = append(args, id)
- }
- return args
-}
-
-// xTrim If approx is true, add the "~" parameter, otherwise it is the default "=" (redis default).
-// example:
-// XTRIM key MAXLEN/MINID threshold LIMIT limit.
-// XTRIM key MAXLEN/MINID ~ threshold LIMIT limit.
-// The redis-server version is lower than 6.2, please set limit to 0.
-func (c cmdable) xTrim(
- ctx context.Context, key, strategy string,
- approx bool, threshold interface{}, limit int64,
-) *IntCmd {
- args := make([]interface{}, 0, 7)
- args = append(args, "xtrim", key, strategy)
- if approx {
- args = append(args, "~")
- }
- args = append(args, threshold)
- if limit > 0 {
- args = append(args, "limit", limit)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// Deprecated: use XTrimMaxLen, remove in v9.
-func (c cmdable) XTrim(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// Deprecated: use XTrimMaxLenApprox, remove in v9.
-func (c cmdable) XTrimApprox(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, 0)
-}
-
-// XTrimMaxLen No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MAXLEN maxLen
-func (c cmdable) XTrimMaxLen(ctx context.Context, key string, maxLen int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", false, maxLen, 0)
-}
-
-// XTrimMaxLenApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MAXLEN ~ maxLen LIMIT limit
-func (c cmdable) XTrimMaxLenApprox(ctx context.Context, key string, maxLen, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "maxlen", true, maxLen, limit)
-}
-
-// XTrimMinID No `~` rules are used, `limit` cannot be used.
-// cmd: XTRIM key MINID minID
-func (c cmdable) XTrimMinID(ctx context.Context, key string, minID string) *IntCmd {
- return c.xTrim(ctx, key, "minid", false, minID, 0)
-}
-
-// XTrimMinIDApprox LIMIT has a bug, please confirm it and use it.
-// issue: https://github.com/redis/redis/issues/9046
-// cmd: XTRIM key MINID ~ minID LIMIT limit
-func (c cmdable) XTrimMinIDApprox(ctx context.Context, key string, minID string, limit int64) *IntCmd {
- return c.xTrim(ctx, key, "minid", true, minID, limit)
-}
-
-func (c cmdable) XInfoConsumers(ctx context.Context, key string, group string) *XInfoConsumersCmd {
- cmd := NewXInfoConsumersCmd(ctx, key, group)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoGroups(ctx context.Context, key string) *XInfoGroupsCmd {
- cmd := NewXInfoGroupsCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) XInfoStream(ctx context.Context, key string) *XInfoStreamCmd {
- cmd := NewXInfoStreamCmd(ctx, key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// XInfoStreamFull XINFO STREAM FULL [COUNT count]
-// redis-server >= 6.0.
-func (c cmdable) XInfoStreamFull(ctx context.Context, key string, count int) *XInfoStreamFullCmd {
- args := make([]interface{}, 0, 6)
- args = append(args, "xinfo", "stream", key, "full")
- if count > 0 {
- args = append(args, "count", count)
- }
- cmd := NewXInfoStreamFullCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Z represents sorted set member.
-type Z struct {
- Score float64
- Member interface{}
-}
-
-// ZWithKey represents sorted set member including the name of the key where it was popped.
-type ZWithKey struct {
- Z
- Key string
-}
-
-// ZStore is used as an arg to ZInter/ZInterStore and ZUnion/ZUnionStore.
-type ZStore struct {
- Keys []string
- Weights []float64
- // Can be SUM, MIN or MAX.
- Aggregate string
-}
-
-func (z ZStore) len() (n int) {
- n = len(z.Keys)
- if len(z.Weights) > 0 {
- n += 1 + len(z.Weights)
- }
- if z.Aggregate != "" {
- n += 2
- }
- return n
-}
-
-func (z ZStore) appendArgs(args []interface{}) []interface{} {
- for _, key := range z.Keys {
- args = append(args, key)
- }
- if len(z.Weights) > 0 {
- args = append(args, "weights")
- for _, weights := range z.Weights {
- args = append(args, weights)
- }
- }
- if z.Aggregate != "" {
- args = append(args, "aggregate", z.Aggregate)
- }
- return args
-}
-
-// BZPopMax Redis `BZPOPMAX key [key ...] timeout` command.
-func (c cmdable) BZPopMax(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmax"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// BZPopMin Redis `BZPOPMIN key [key ...] timeout` command.
-func (c cmdable) BZPopMin(ctx context.Context, timeout time.Duration, keys ...string) *ZWithKeyCmd {
- args := make([]interface{}, 1+len(keys)+1)
- args[0] = "bzpopmin"
- for i, key := range keys {
- args[1+i] = key
- }
- args[len(args)-1] = formatSec(ctx, timeout)
- cmd := NewZWithKeyCmd(ctx, args...)
- cmd.setReadTimeout(timeout)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAddArgs WARN: The GT, LT and NX options are mutually exclusive.
-type ZAddArgs struct {
- NX bool
- XX bool
- LT bool
- GT bool
- Ch bool
- Members []Z
-}
-
-func (c cmdable) zAddArgs(key string, args ZAddArgs, incr bool) []interface{} {
- a := make([]interface{}, 0, 6+2*len(args.Members))
- a = append(a, "zadd", key)
-
- // The GT, LT and NX options are mutually exclusive.
- if args.NX {
- a = append(a, "nx")
- } else {
- if args.XX {
- a = append(a, "xx")
- }
- if args.GT {
- a = append(a, "gt")
- } else if args.LT {
- a = append(a, "lt")
- }
- }
- if args.Ch {
- a = append(a, "ch")
- }
- if incr {
- a = append(a, "incr")
- }
- for _, m := range args.Members {
- a = append(a, m.Score)
- a = append(a, m.Member)
- }
- return a
-}
-
-func (c cmdable) ZAddArgs(ctx context.Context, key string, args ZAddArgs) *IntCmd {
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZAddArgsIncr(ctx context.Context, key string, args ZAddArgs) *FloatCmd {
- cmd := NewFloatCmd(ctx, c.zAddArgs(key, args, true)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// TODO: Compatible with v8 api, will be removed in v9.
-func (c cmdable) zAdd(ctx context.Context, key string, args ZAddArgs, members ...*Z) *IntCmd {
- args.Members = make([]Z, len(members))
- for i, m := range members {
- args.Members[i] = *m
- }
- cmd := NewIntCmd(ctx, c.zAddArgs(key, args, false)...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZAdd Redis `ZADD key score member [score member ...]` command.
-func (c cmdable) ZAdd(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{}, members...)
-}
-
-// ZAddNX Redis `ZADD key NX score member [score member ...]` command.
-func (c cmdable) ZAddNX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- }, members...)
-}
-
-// ZAddXX Redis `ZADD key XX score member [score member ...]` command.
-func (c cmdable) ZAddXX(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- }, members...)
-}
-
-// ZAddCh Redis `ZADD key CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- Ch: true,
- }, members...)
-}
-
-// ZAddNXCh Redis `ZADD key NX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// NX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddNXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- NX: true,
- Ch: true,
- }, members...)
-}
-
-// ZAddXXCh Redis `ZADD key XX CH score member [score member ...]` command.
-// Deprecated: Use
-// client.ZAddArgs(ctx, ZAddArgs{
-// XX: true,
-// Ch: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZAddXXCh(ctx context.Context, key string, members ...*Z) *IntCmd {
- return c.zAdd(ctx, key, ZAddArgs{
- XX: true,
- Ch: true,
- }, members...)
-}
-
-// ZIncr Redis `ZADD key INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncr(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- Members: []Z{*member},
- })
-}
-
-// ZIncrNX Redis `ZADD key NX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// NX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrNX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- NX: true,
- Members: []Z{*member},
- })
-}
-
-// ZIncrXX Redis `ZADD key XX INCR score member` command.
-// Deprecated: Use
-// client.ZAddArgsIncr(ctx, ZAddArgs{
-// XX: true,
-// Members: []Z,
-// })
-// remove in v9.
-func (c cmdable) ZIncrXX(ctx context.Context, key string, member *Z) *FloatCmd {
- return c.ZAddArgsIncr(ctx, key, ZAddArgs{
- XX: true,
- Members: []Z{*member},
- })
-}
-
-func (c cmdable) ZCard(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcard", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZLexCount(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zlexcount", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZIncrBy(ctx context.Context, key string, increment float64, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zincrby", key, increment, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterStore(ctx context.Context, destination string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinterstore", destination, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInter(ctx context.Context, store *ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZInterWithScores(ctx context.Context, store *ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zinter", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZMScore(ctx context.Context, key string, members ...string) *FloatSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "zmscore"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewFloatSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMax(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmax",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZPopMin(ctx context.Context, key string, count ...int64) *ZSliceCmd {
- args := []interface{}{
- "zpopmin",
- key,
- }
-
- switch len(count) {
- case 0:
- break
- case 1:
- args = append(args, count[0])
- default:
- panic("too many arguments")
- }
-
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRangeArgs is all the options of the ZRange command.
-// In version> 6.2.0, you can replace the(cmd):
-// ZREVRANGE,
-// ZRANGEBYSCORE,
-// ZREVRANGEBYSCORE,
-// ZRANGEBYLEX,
-// ZREVRANGEBYLEX.
-// Please pay attention to your redis-server version.
-//
-// Rev, ByScore, ByLex and Offset+Count options require redis-server 6.2.0 and higher.
-type ZRangeArgs struct {
- Key string
-
- // When the ByScore option is provided, the open interval(exclusive) can be set.
- // By default, the score intervals specified by <Start> and <Stop> are closed (inclusive).
- // It is similar to the deprecated(6.2.0+) ZRangeByScore command.
- // For example:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "(3",
- // Stop: 8,
- // ByScore: true,
- // }
- // cmd: "ZRange example-key (3 8 ByScore" (3 < score <= 8).
- //
- // For the ByLex option, it is similar to the deprecated(6.2.0+) ZRangeByLex command.
- // You can set the <Start> and <Stop> options as follows:
- // ZRangeArgs{
- // Key: "example-key",
- // Start: "[abc",
- // Stop: "(def",
- // ByLex: true,
- // }
- // cmd: "ZRange example-key [abc (def ByLex"
- //
- // For normal cases (ByScore==false && ByLex==false), <Start> and <Stop> should be set to the index range (int).
- // You can read the documentation for more information: https://redis.io/commands/zrange
- Start interface{}
- Stop interface{}
-
- // The ByScore and ByLex options are mutually exclusive.
- ByScore bool
- ByLex bool
-
- Rev bool
-
- // limit offset count.
- Offset int64
- Count int64
-}
-
-func (z ZRangeArgs) appendArgs(args []interface{}) []interface{} {
- // For Rev+ByScore/ByLex, we need to adjust the position of <Start> and <Stop>.
- if z.Rev && (z.ByScore || z.ByLex) {
- args = append(args, z.Key, z.Stop, z.Start)
- } else {
- args = append(args, z.Key, z.Start, z.Stop)
- }
-
- if z.ByScore {
- args = append(args, "byscore")
- } else if z.ByLex {
- args = append(args, "bylex")
- }
- if z.Rev {
- args = append(args, "rev")
- }
- if z.Offset != 0 || z.Count != 0 {
- args = append(args, "limit", z.Offset, z.Count)
- }
- return args
-}
-
-func (c cmdable) ZRangeArgs(ctx context.Context, z ZRangeArgs) *StringSliceCmd {
- args := make([]interface{}, 0, 9)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeArgsWithScores(ctx context.Context, z ZRangeArgs) *ZSliceCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrange")
- args = z.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- return c.ZRangeArgs(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-func (c cmdable) ZRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- return c.ZRangeArgsWithScores(ctx, ZRangeArgs{
- Key: key,
- Start: start,
- Stop: stop,
- })
-}
-
-type ZRangeBy struct {
- Min, Max string
- Offset, Count int64
-}
-
-func (c cmdable) zRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy, withScores bool) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Min, opt.Max}
- if withScores {
- args = append(args, "withscores")
- }
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebyscore", key, opt, false)
-}
-
-func (c cmdable) ZRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRangeBy(ctx, "zrangebylex", key, opt, false)
-}
-
-func (c cmdable) ZRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrangebyscore", key, opt.Min, opt.Max, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRangeStore(ctx context.Context, dst string, z ZRangeArgs) *IntCmd {
- args := make([]interface{}, 0, 10)
- args = append(args, "zrangestore", dst)
- args = z.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRem(ctx context.Context, key string, members ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(members))
- args[0] = "zrem"
- args[1] = key
- args = appendArgs(args, members)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByRank(ctx context.Context, key string, start, stop int64) *IntCmd {
- cmd := NewIntCmd(
- ctx,
- "zremrangebyrank",
- key,
- start,
- stop,
- )
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByScore(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebyscore", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRemRangeByLex(ctx context.Context, key, min, max string) *IntCmd {
- cmd := NewIntCmd(ctx, "zremrangebylex", key, min, max)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRange(ctx context.Context, key string, start, stop int64) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "zrevrange", key, start, stop)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeWithScores(ctx context.Context, key string, start, stop int64) *ZSliceCmd {
- cmd := NewZSliceCmd(ctx, "zrevrange", key, start, stop, "withscores")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) zRevRangeBy(ctx context.Context, zcmd, key string, opt *ZRangeBy) *StringSliceCmd {
- args := []interface{}{zcmd, key, opt.Max, opt.Min}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRangeByScore(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebyscore", key, opt)
-}
-
-func (c cmdable) ZRevRangeByLex(ctx context.Context, key string, opt *ZRangeBy) *StringSliceCmd {
- return c.zRevRangeBy(ctx, "zrevrangebylex", key, opt)
-}
-
-func (c cmdable) ZRevRangeByScoreWithScores(ctx context.Context, key string, opt *ZRangeBy) *ZSliceCmd {
- args := []interface{}{"zrevrangebyscore", key, opt.Max, opt.Min, "withscores"}
- if opt.Offset != 0 || opt.Count != 0 {
- args = append(
- args,
- "limit",
- opt.Offset,
- opt.Count,
- )
- }
- cmd := NewZSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZRevRank(ctx context.Context, key, member string) *IntCmd {
- cmd := NewIntCmd(ctx, "zrevrank", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZScore(ctx context.Context, key, member string) *FloatCmd {
- cmd := NewFloatCmd(ctx, "zscore", key, member)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnion(ctx context.Context, store ZStore) *StringSliceCmd {
- args := make([]interface{}, 0, 2+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionWithScores(ctx context.Context, store ZStore) *ZSliceCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunion", len(store.Keys))
- args = store.appendArgs(args)
- args = append(args, "withscores")
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ZUnionStore(ctx context.Context, dest string, store *ZStore) *IntCmd {
- args := make([]interface{}, 0, 3+store.len())
- args = append(args, "zunionstore", dest, len(store.Keys))
- args = store.appendArgs(args)
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZRandMember redis-server version >= 6.2.0.
-func (c cmdable) ZRandMember(ctx context.Context, key string, count int, withScores bool) *StringSliceCmd {
- args := make([]interface{}, 0, 4)
-
- // Although count=0 is meaningless, redis accepts count=0.
- args = append(args, "zrandmember", key, count)
- if withScores {
- args = append(args, "withscores")
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiff redis-server version >= 6.2.0.
-func (c cmdable) ZDiff(ctx context.Context, keys ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
-
- cmd := NewStringSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffWithScores redis-server version >= 6.2.0.
-func (c cmdable) ZDiffWithScores(ctx context.Context, keys ...string) *ZSliceCmd {
- args := make([]interface{}, 3+len(keys))
- args[0] = "zdiff"
- args[1] = len(keys)
- for i, key := range keys {
- args[i+2] = key
- }
- args[len(keys)+2] = "withscores"
-
- cmd := NewZSliceCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ZDiffStore redis-server version >=6.2.0.
-func (c cmdable) ZDiffStore(ctx context.Context, destination string, keys ...string) *IntCmd {
- args := make([]interface{}, 0, 3+len(keys))
- args = append(args, "zdiffstore", destination, len(keys))
- for _, key := range keys {
- args = append(args, key)
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) PFAdd(ctx context.Context, key string, els ...interface{}) *IntCmd {
- args := make([]interface{}, 2, 2+len(els))
- args[0] = "pfadd"
- args[1] = key
- args = appendArgs(args, els)
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFCount(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "pfcount"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PFMerge(ctx context.Context, dest string, keys ...string) *StatusCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "pfmerge"
- args[1] = dest
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) BgRewriteAOF(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgrewriteaof")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) BgSave(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "bgsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientKill(ctx context.Context, ipPort string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "client", "kill", ipPort)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// ClientKillByFilter is new style syntax, while the ClientKill is old
-//
-// CLIENT KILL <option> [value] ... <option> [value]
-func (c cmdable) ClientKillByFilter(ctx context.Context, keys ...string) *IntCmd {
- args := make([]interface{}, 2+len(keys))
- args[0] = "client"
- args[1] = "kill"
- for i, key := range keys {
- args[2+i] = key
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientList(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "client", "list")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientPause(ctx context.Context, dur time.Duration) *BoolCmd {
- cmd := NewBoolCmd(ctx, "client", "pause", formatMs(ctx, dur))
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientID(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "id")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblock(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClientUnblockWithError(ctx context.Context, id int64) *IntCmd {
- cmd := NewIntCmd(ctx, "client", "unblock", id, "error")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigGet(ctx context.Context, parameter string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "config", "get", parameter)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigResetStat(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "resetstat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigSet(ctx context.Context, parameter, value string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "set", parameter, value)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ConfigRewrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "config", "rewrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DBSize(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "dbsize")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAll(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushAllAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushall", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDB(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) FlushDBAsync(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "flushdb", "async")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Info(ctx context.Context, section ...string) *StringCmd {
- args := []interface{}{"info"}
- if len(section) > 0 {
- args = append(args, section[0])
- }
- cmd := NewStringCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) LastSave(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "lastsave")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Save(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "save")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) shutdown(ctx context.Context, modifier string) *StatusCmd {
- var args []interface{}
- if modifier == "" {
- args = []interface{}{"shutdown"}
- } else {
- args = []interface{}{"shutdown", modifier}
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- if err := cmd.Err(); err != nil {
- if err == io.EOF {
- // Server quit as expected.
- cmd.err = nil
- }
- } else {
- // Server did not quit. String reply contains the reason.
- cmd.err = errors.New(cmd.val)
- cmd.val = ""
- }
- return cmd
-}
-
-func (c cmdable) Shutdown(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "")
-}
-
-func (c cmdable) ShutdownSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "save")
-}
-
-func (c cmdable) ShutdownNoSave(ctx context.Context) *StatusCmd {
- return c.shutdown(ctx, "nosave")
-}
-
-func (c cmdable) SlaveOf(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "slaveof", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) SlowLogGet(ctx context.Context, num int64) *SlowLogCmd {
- cmd := NewSlowLogCmd(context.Background(), "slowlog", "get", num)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) Sync(_ context.Context) {
- panic("not implemented")
-}
-
-func (c cmdable) Time(ctx context.Context) *TimeCmd {
- cmd := NewTimeCmd(ctx, "time")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) DebugObject(ctx context.Context, key string) *StringCmd {
- cmd := NewStringCmd(ctx, "debug", "object", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadOnly(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readonly")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ReadWrite(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "readwrite")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) MemoryUsage(ctx context.Context, key string, samples ...int) *IntCmd {
- args := []interface{}{"memory", "usage", key}
- if len(samples) > 0 {
- if len(samples) != 1 {
- panic("MemoryUsage expects single sample count")
- }
- args = append(args, "SAMPLES", samples[0])
- }
- cmd := NewIntCmd(ctx, args...)
- cmd.SetFirstKeyPos(2)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "eval"
- cmdArgs[1] = script
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd {
- cmdArgs := make([]interface{}, 3+len(keys), 3+len(keys)+len(args))
- cmdArgs[0] = "evalsha"
- cmdArgs[1] = sha1
- cmdArgs[2] = len(keys)
- for i, key := range keys {
- cmdArgs[3+i] = key
- }
- cmdArgs = appendArgs(cmdArgs, args)
- cmd := NewCmd(ctx, cmdArgs...)
- cmd.SetFirstKeyPos(3)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd {
- args := make([]interface{}, 2+len(hashes))
- args[0] = "script"
- args[1] = "exists"
- for i, hash := range hashes {
- args[2+i] = hash
- }
- cmd := NewBoolSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptFlush(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "flush")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptKill(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "script", "kill")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ScriptLoad(ctx context.Context, script string) *StringCmd {
- cmd := NewStringCmd(ctx, "script", "load", script)
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-// Publish posts the message to the channel.
-func (c cmdable) Publish(ctx context.Context, channel string, message interface{}) *IntCmd {
- cmd := NewIntCmd(ctx, "publish", channel, message)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubChannels(ctx context.Context, pattern string) *StringSliceCmd {
- args := []interface{}{"pubsub", "channels"}
- if pattern != "*" {
- args = append(args, pattern)
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumSub(ctx context.Context, channels ...string) *StringIntMapCmd {
- args := make([]interface{}, 2+len(channels))
- args[0] = "pubsub"
- args[1] = "numsub"
- for i, channel := range channels {
- args[2+i] = channel
- }
- cmd := NewStringIntMapCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) PubSubNumPat(ctx context.Context) *IntCmd {
- cmd := NewIntCmd(ctx, "pubsub", "numpat")
- _ = c(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) ClusterSlots(ctx context.Context) *ClusterSlotsCmd {
- cmd := NewClusterSlotsCmd(ctx, "cluster", "slots")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterNodes(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "nodes")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterMeet(ctx context.Context, host, port string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "meet", host, port)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterForget(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "forget", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterReplicate(ctx context.Context, nodeID string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "replicate", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetSoft(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "soft")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterResetHard(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "reset", "hard")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterInfo(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "cluster", "info")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterKeySlot(ctx context.Context, key string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "keyslot", key)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterGetKeysInSlot(ctx context.Context, slot int, count int) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "getkeysinslot", slot, count)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountFailureReports(ctx context.Context, nodeID string) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "count-failure-reports", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterCountKeysInSlot(ctx context.Context, slot int) *IntCmd {
- cmd := NewIntCmd(ctx, "cluster", "countkeysinslot", slot)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "delslots"
- for i, slot := range slots {
- args[2+i] = slot
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterDelSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterDelSlots(ctx, slots...)
-}
-
-func (c cmdable) ClusterSaveConfig(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "saveconfig")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterSlaves(ctx context.Context, nodeID string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "cluster", "slaves", nodeID)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterFailover(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "cluster", "failover")
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlots(ctx context.Context, slots ...int) *StatusCmd {
- args := make([]interface{}, 2+len(slots))
- args[0] = "cluster"
- args[1] = "addslots"
- for i, num := range slots {
- args[2+i] = num
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) ClusterAddSlotsRange(ctx context.Context, min, max int) *StatusCmd {
- size := max - min + 1
- slots := make([]int, size)
- for i := 0; i < size; i++ {
- slots[i] = min + i
- }
- return c.ClusterAddSlots(ctx, slots...)
-}
-
-//------------------------------------------------------------------------------
-
-func (c cmdable) GeoAdd(ctx context.Context, key string, geoLocation ...*GeoLocation) *IntCmd {
- args := make([]interface{}, 2+3*len(geoLocation))
- args[0] = "geoadd"
- args[1] = key
- for i, eachLoc := range geoLocation {
- args[2+3*i] = eachLoc.Longitude
- args[2+3*i+1] = eachLoc.Latitude
- args[2+3*i+2] = eachLoc.Name
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadius is a read-only GEORADIUS_RO command.
-func (c cmdable) GeoRadius(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadius_ro", key, longitude, latitude)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadius does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusStore is a writing GEORADIUS command.
-func (c cmdable) GeoRadiusStore(
- ctx context.Context, key string, longitude, latitude float64, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadius", key, longitude, latitude)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMember is a read-only GEORADIUSBYMEMBER_RO command.
-func (c cmdable) GeoRadiusByMember(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *GeoLocationCmd {
- cmd := NewGeoLocationCmd(ctx, query, "georadiusbymember_ro", key, member)
- if query.Store != "" || query.StoreDist != "" {
- cmd.SetErr(errors.New("GeoRadiusByMember does not support Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-// GeoRadiusByMemberStore is a writing GEORADIUSBYMEMBER command.
-func (c cmdable) GeoRadiusByMemberStore(
- ctx context.Context, key, member string, query *GeoRadiusQuery,
-) *IntCmd {
- args := geoLocationArgs(query, "georadiusbymember", key, member)
- cmd := NewIntCmd(ctx, args...)
- if query.Store == "" && query.StoreDist == "" {
- cmd.SetErr(errors.New("GeoRadiusByMemberStore requires Store or StoreDist"))
- return cmd
- }
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearch(ctx context.Context, key string, q *GeoSearchQuery) *StringSliceCmd {
- args := make([]interface{}, 0, 13)
- args = append(args, "geosearch", key)
- args = geoSearchArgs(q, args)
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchLocation(
- ctx context.Context, key string, q *GeoSearchLocationQuery,
-) *GeoSearchLocationCmd {
- args := make([]interface{}, 0, 16)
- args = append(args, "geosearch", key)
- args = geoSearchLocationArgs(q, args)
- cmd := NewGeoSearchLocationCmd(ctx, q, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoSearchStore(ctx context.Context, key, store string, q *GeoSearchStoreQuery) *IntCmd {
- args := make([]interface{}, 0, 15)
- args = append(args, "geosearchstore", store, key)
- args = geoSearchArgs(&q.GeoSearchQuery, args)
- if q.StoreDist {
- args = append(args, "storedist")
- }
- cmd := NewIntCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoDist(
- ctx context.Context, key string, member1, member2, unit string,
-) *FloatCmd {
- if unit == "" {
- unit = "km"
- }
- cmd := NewFloatCmd(ctx, "geodist", key, member1, member2, unit)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoHash(ctx context.Context, key string, members ...string) *StringSliceCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geohash"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewStringSliceCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
-
-func (c cmdable) GeoPos(ctx context.Context, key string, members ...string) *GeoPosCmd {
- args := make([]interface{}, 2+len(members))
- args[0] = "geopos"
- args[1] = key
- for i, member := range members {
- args[2+i] = member
- }
- cmd := NewGeoPosCmd(ctx, args...)
- _ = c(ctx, cmd)
- return cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v8/doc.go b/vendor/github.com/go-redis/redis/v8/doc.go
deleted file mode 100644
index 55262533a6..0000000000
--- a/vendor/github.com/go-redis/redis/v8/doc.go
+++ /dev/null
@@ -1,4 +0,0 @@
-/*
-Package redis implements a Redis client.
-*/
-package redis
diff --git a/vendor/github.com/go-redis/redis/v8/error.go b/vendor/github.com/go-redis/redis/v8/error.go
deleted file mode 100644
index 521594bbd0..0000000000
--- a/vendor/github.com/go-redis/redis/v8/error.go
+++ /dev/null
@@ -1,144 +0,0 @@
-package redis
-
-import (
- "context"
- "io"
- "net"
- "strings"
-
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// ErrClosed performs any operation on the closed client will return this error.
-var ErrClosed = pool.ErrClosed
-
-type Error interface {
- error
-
- // RedisError is a no-op function but
- // serves to distinguish types that are Redis
- // errors from ordinary errors: a type is a
- // Redis error if it has a RedisError method.
- RedisError()
-}
-
-var _ Error = proto.RedisError("")
-
-func shouldRetry(err error, retryTimeout bool) bool {
- switch err {
- case io.EOF, io.ErrUnexpectedEOF:
- return true
- case nil, context.Canceled, context.DeadlineExceeded:
- return false
- }
-
- if v, ok := err.(timeoutError); ok {
- if v.Timeout() {
- return retryTimeout
- }
- return true
- }
-
- s := err.Error()
- if s == "ERR max number of clients reached" {
- return true
- }
- if strings.HasPrefix(s, "LOADING ") {
- return true
- }
- if strings.HasPrefix(s, "READONLY ") {
- return true
- }
- if strings.HasPrefix(s, "CLUSTERDOWN ") {
- return true
- }
- if strings.HasPrefix(s, "TRYAGAIN ") {
- return true
- }
-
- return false
-}
-
-func isRedisError(err error) bool {
- _, ok := err.(proto.RedisError)
- return ok
-}
-
-func isBadConn(err error, allowTimeout bool, addr string) bool {
- switch err {
- case nil:
- return false
- case context.Canceled, context.DeadlineExceeded:
- return true
- }
-
- if isRedisError(err) {
- switch {
- case isReadOnlyError(err):
- // Close connections in read only state in case domain addr is used
- // and domain resolves to a different Redis Server. See #790.
- return true
- case isMovedSameConnAddr(err, addr):
- // Close connections when we are asked to move to the same addr
- // of the connection. Force a DNS resolution when all connections
- // of the pool are recycled
- return true
- default:
- return false
- }
- }
-
- if allowTimeout {
- if netErr, ok := err.(net.Error); ok && netErr.Timeout() {
- return !netErr.Temporary()
- }
- }
-
- return true
-}
-
-func isMovedError(err error) (moved bool, ask bool, addr string) {
- if !isRedisError(err) {
- return
- }
-
- s := err.Error()
- switch {
- case strings.HasPrefix(s, "MOVED "):
- moved = true
- case strings.HasPrefix(s, "ASK "):
- ask = true
- default:
- return
- }
-
- ind := strings.LastIndex(s, " ")
- if ind == -1 {
- return false, false, ""
- }
- addr = s[ind+1:]
- return
-}
-
-func isLoadingError(err error) bool {
- return strings.HasPrefix(err.Error(), "LOADING ")
-}
-
-func isReadOnlyError(err error) bool {
- return strings.HasPrefix(err.Error(), "READONLY ")
-}
-
-func isMovedSameConnAddr(err error, addr string) bool {
- redisError := err.Error()
- if !strings.HasPrefix(redisError, "MOVED ") {
- return false
- }
- return strings.HasSuffix(redisError, " "+addr)
-}
-
-//------------------------------------------------------------------------------
-
-type timeoutError interface {
- Timeout() bool
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/arg.go b/vendor/github.com/go-redis/redis/v8/internal/arg.go
deleted file mode 100644
index b97fa0d685..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/arg.go
+++ /dev/null
@@ -1,56 +0,0 @@
-package internal
-
-import (
- "fmt"
- "strconv"
- "time"
-)
-
-func AppendArg(b []byte, v interface{}) []byte {
- switch v := v.(type) {
- case nil:
- return append(b, "<nil>"...)
- case string:
- return appendUTF8String(b, Bytes(v))
- case []byte:
- return appendUTF8String(b, v)
- case int:
- return strconv.AppendInt(b, int64(v), 10)
- case int8:
- return strconv.AppendInt(b, int64(v), 10)
- case int16:
- return strconv.AppendInt(b, int64(v), 10)
- case int32:
- return strconv.AppendInt(b, int64(v), 10)
- case int64:
- return strconv.AppendInt(b, v, 10)
- case uint:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint8:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint16:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint32:
- return strconv.AppendUint(b, uint64(v), 10)
- case uint64:
- return strconv.AppendUint(b, v, 10)
- case float32:
- return strconv.AppendFloat(b, float64(v), 'f', -1, 64)
- case float64:
- return strconv.AppendFloat(b, v, 'f', -1, 64)
- case bool:
- if v {
- return append(b, "true"...)
- }
- return append(b, "false"...)
- case time.Time:
- return v.AppendFormat(b, time.RFC3339Nano)
- default:
- return append(b, fmt.Sprint(v)...)
- }
-}
-
-func appendUTF8String(dst []byte, src []byte) []byte {
- dst = append(dst, src...)
- return dst
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go b/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
deleted file mode 100644
index b3a4f211e3..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hashtag/hashtag.go
+++ /dev/null
@@ -1,78 +0,0 @@
-package hashtag
-
-import (
- "strings"
-
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-const slotNumber = 16384
-
-// CRC16 implementation according to CCITT standards.
-// Copyright 2001-2010 Georges Menie (www.menie.org)
-// Copyright 2013 The Go Authors. All rights reserved.
-// http://redis.io/topics/cluster-spec#appendix-a-crc16-reference-implementation-in-ansi-c
-var crc16tab = [256]uint16{
- 0x0000, 0x1021, 0x2042, 0x3063, 0x4084, 0x50a5, 0x60c6, 0x70e7,
- 0x8108, 0x9129, 0xa14a, 0xb16b, 0xc18c, 0xd1ad, 0xe1ce, 0xf1ef,
- 0x1231, 0x0210, 0x3273, 0x2252, 0x52b5, 0x4294, 0x72f7, 0x62d6,
- 0x9339, 0x8318, 0xb37b, 0xa35a, 0xd3bd, 0xc39c, 0xf3ff, 0xe3de,
- 0x2462, 0x3443, 0x0420, 0x1401, 0x64e6, 0x74c7, 0x44a4, 0x5485,
- 0xa56a, 0xb54b, 0x8528, 0x9509, 0xe5ee, 0xf5cf, 0xc5ac, 0xd58d,
- 0x3653, 0x2672, 0x1611, 0x0630, 0x76d7, 0x66f6, 0x5695, 0x46b4,
- 0xb75b, 0xa77a, 0x9719, 0x8738, 0xf7df, 0xe7fe, 0xd79d, 0xc7bc,
- 0x48c4, 0x58e5, 0x6886, 0x78a7, 0x0840, 0x1861, 0x2802, 0x3823,
- 0xc9cc, 0xd9ed, 0xe98e, 0xf9af, 0x8948, 0x9969, 0xa90a, 0xb92b,
- 0x5af5, 0x4ad4, 0x7ab7, 0x6a96, 0x1a71, 0x0a50, 0x3a33, 0x2a12,
- 0xdbfd, 0xcbdc, 0xfbbf, 0xeb9e, 0x9b79, 0x8b58, 0xbb3b, 0xab1a,
- 0x6ca6, 0x7c87, 0x4ce4, 0x5cc5, 0x2c22, 0x3c03, 0x0c60, 0x1c41,
- 0xedae, 0xfd8f, 0xcdec, 0xddcd, 0xad2a, 0xbd0b, 0x8d68, 0x9d49,
- 0x7e97, 0x6eb6, 0x5ed5, 0x4ef4, 0x3e13, 0x2e32, 0x1e51, 0x0e70,
- 0xff9f, 0xefbe, 0xdfdd, 0xcffc, 0xbf1b, 0xaf3a, 0x9f59, 0x8f78,
- 0x9188, 0x81a9, 0xb1ca, 0xa1eb, 0xd10c, 0xc12d, 0xf14e, 0xe16f,
- 0x1080, 0x00a1, 0x30c2, 0x20e3, 0x5004, 0x4025, 0x7046, 0x6067,
- 0x83b9, 0x9398, 0xa3fb, 0xb3da, 0xc33d, 0xd31c, 0xe37f, 0xf35e,
- 0x02b1, 0x1290, 0x22f3, 0x32d2, 0x4235, 0x5214, 0x6277, 0x7256,
- 0xb5ea, 0xa5cb, 0x95a8, 0x8589, 0xf56e, 0xe54f, 0xd52c, 0xc50d,
- 0x34e2, 0x24c3, 0x14a0, 0x0481, 0x7466, 0x6447, 0x5424, 0x4405,
- 0xa7db, 0xb7fa, 0x8799, 0x97b8, 0xe75f, 0xf77e, 0xc71d, 0xd73c,
- 0x26d3, 0x36f2, 0x0691, 0x16b0, 0x6657, 0x7676, 0x4615, 0x5634,
- 0xd94c, 0xc96d, 0xf90e, 0xe92f, 0x99c8, 0x89e9, 0xb98a, 0xa9ab,
- 0x5844, 0x4865, 0x7806, 0x6827, 0x18c0, 0x08e1, 0x3882, 0x28a3,
- 0xcb7d, 0xdb5c, 0xeb3f, 0xfb1e, 0x8bf9, 0x9bd8, 0xabbb, 0xbb9a,
- 0x4a75, 0x5a54, 0x6a37, 0x7a16, 0x0af1, 0x1ad0, 0x2ab3, 0x3a92,
- 0xfd2e, 0xed0f, 0xdd6c, 0xcd4d, 0xbdaa, 0xad8b, 0x9de8, 0x8dc9,
- 0x7c26, 0x6c07, 0x5c64, 0x4c45, 0x3ca2, 0x2c83, 0x1ce0, 0x0cc1,
- 0xef1f, 0xff3e, 0xcf5d, 0xdf7c, 0xaf9b, 0xbfba, 0x8fd9, 0x9ff8,
- 0x6e17, 0x7e36, 0x4e55, 0x5e74, 0x2e93, 0x3eb2, 0x0ed1, 0x1ef0,
-}
-
-func Key(key string) string {
- if s := strings.IndexByte(key, '{'); s > -1 {
- if e := strings.IndexByte(key[s+1:], '}'); e > 0 {
- return key[s+1 : s+e+1]
- }
- }
- return key
-}
-
-func RandomSlot() int {
- return rand.Intn(slotNumber)
-}
-
-// Slot returns a consistent slot number between 0 and 16383
-// for any given string key.
-func Slot(key string) int {
- if key == "" {
- return RandomSlot()
- }
- key = Key(key)
- return int(crc16sum(key)) % slotNumber
-}
-
-func crc16sum(key string) (crc uint16) {
- for i := 0; i < len(key); i++ {
- crc = (crc << 8) ^ crc16tab[(byte(crc>>8)^key[i])&0x00ff]
- }
- return
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make b/vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make
deleted file mode 100644
index ada1b318a8..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hashtag/ya.make
+++ /dev/null
@@ -1,11 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(hashtag.go)
-
-GO_TEST_SRCS(hashtag_test.go)
-
-END()
-
-RECURSE(gotest)
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
deleted file mode 100644
index 852c8bd525..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hscan/hscan.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package hscan
-
-import (
- "errors"
- "fmt"
- "reflect"
- "strconv"
-)
-
-// decoderFunc represents decoding functions for default built-in types.
-type decoderFunc func(reflect.Value, string) error
-
-var (
- // List of built-in decoders indexed by their numeric constant values (eg: reflect.Bool = 1).
- decoders = []decoderFunc{
- reflect.Bool: decodeBool,
- reflect.Int: decodeInt,
- reflect.Int8: decodeInt8,
- reflect.Int16: decodeInt16,
- reflect.Int32: decodeInt32,
- reflect.Int64: decodeInt64,
- reflect.Uint: decodeUint,
- reflect.Uint8: decodeUint8,
- reflect.Uint16: decodeUint16,
- reflect.Uint32: decodeUint32,
- reflect.Uint64: decodeUint64,
- reflect.Float32: decodeFloat32,
- reflect.Float64: decodeFloat64,
- reflect.Complex64: decodeUnsupported,
- reflect.Complex128: decodeUnsupported,
- reflect.Array: decodeUnsupported,
- reflect.Chan: decodeUnsupported,
- reflect.Func: decodeUnsupported,
- reflect.Interface: decodeUnsupported,
- reflect.Map: decodeUnsupported,
- reflect.Ptr: decodeUnsupported,
- reflect.Slice: decodeSlice,
- reflect.String: decodeString,
- reflect.Struct: decodeUnsupported,
- reflect.UnsafePointer: decodeUnsupported,
- }
-
- // Global map of struct field specs that is populated once for every new
- // struct type that is scanned. This caches the field types and the corresponding
- // decoder functions to avoid iterating through struct fields on subsequent scans.
- globalStructMap = newStructMap()
-)
-
-func Struct(dst interface{}) (StructValue, error) {
- v := reflect.ValueOf(dst)
-
- // The destination to scan into should be a struct pointer.
- if v.Kind() != reflect.Ptr || v.IsNil() {
- return StructValue{}, fmt.Errorf("redis.Scan(non-pointer %T)", dst)
- }
-
- v = v.Elem()
- if v.Kind() != reflect.Struct {
- return StructValue{}, fmt.Errorf("redis.Scan(non-struct %T)", dst)
- }
-
- return StructValue{
- spec: globalStructMap.get(v.Type()),
- value: v,
- }, nil
-}
-
-// Scan scans the results from a key-value Redis map result set to a destination struct.
-// The Redis keys are matched to the struct's field with the `redis` tag.
-func Scan(dst interface{}, keys []interface{}, vals []interface{}) error {
- if len(keys) != len(vals) {
- return errors.New("args should have the same number of keys and vals")
- }
-
- strct, err := Struct(dst)
- if err != nil {
- return err
- }
-
- // Iterate through the (key, value) sequence.
- for i := 0; i < len(vals); i++ {
- key, ok := keys[i].(string)
- if !ok {
- continue
- }
-
- val, ok := vals[i].(string)
- if !ok {
- continue
- }
-
- if err := strct.Scan(key, val); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func decodeBool(f reflect.Value, s string) error {
- b, err := strconv.ParseBool(s)
- if err != nil {
- return err
- }
- f.SetBool(b)
- return nil
-}
-
-func decodeInt8(f reflect.Value, s string) error {
- return decodeNumber(f, s, 8)
-}
-
-func decodeInt16(f reflect.Value, s string) error {
- return decodeNumber(f, s, 16)
-}
-
-func decodeInt32(f reflect.Value, s string) error {
- return decodeNumber(f, s, 32)
-}
-
-func decodeInt64(f reflect.Value, s string) error {
- return decodeNumber(f, s, 64)
-}
-
-func decodeInt(f reflect.Value, s string) error {
- return decodeNumber(f, s, 0)
-}
-
-func decodeNumber(f reflect.Value, s string, bitSize int) error {
- v, err := strconv.ParseInt(s, 10, bitSize)
- if err != nil {
- return err
- }
- f.SetInt(v)
- return nil
-}
-
-func decodeUint8(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 8)
-}
-
-func decodeUint16(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 16)
-}
-
-func decodeUint32(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 32)
-}
-
-func decodeUint64(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 64)
-}
-
-func decodeUint(f reflect.Value, s string) error {
- return decodeUnsignedNumber(f, s, 0)
-}
-
-func decodeUnsignedNumber(f reflect.Value, s string, bitSize int) error {
- v, err := strconv.ParseUint(s, 10, bitSize)
- if err != nil {
- return err
- }
- f.SetUint(v)
- return nil
-}
-
-func decodeFloat32(f reflect.Value, s string) error {
- v, err := strconv.ParseFloat(s, 32)
- if err != nil {
- return err
- }
- f.SetFloat(v)
- return nil
-}
-
-// although the default is float64, but we better define it.
-func decodeFloat64(f reflect.Value, s string) error {
- v, err := strconv.ParseFloat(s, 64)
- if err != nil {
- return err
- }
- f.SetFloat(v)
- return nil
-}
-
-func decodeString(f reflect.Value, s string) error {
- f.SetString(s)
- return nil
-}
-
-func decodeSlice(f reflect.Value, s string) error {
- // []byte slice ([]uint8).
- if f.Type().Elem().Kind() == reflect.Uint8 {
- f.SetBytes([]byte(s))
- }
- return nil
-}
-
-func decodeUnsupported(v reflect.Value, s string) error {
- return fmt.Errorf("redis.Scan(unsupported %s)", v.Type())
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go b/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
deleted file mode 100644
index 6839412ba2..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hscan/structmap.go
+++ /dev/null
@@ -1,93 +0,0 @@
-package hscan
-
-import (
- "fmt"
- "reflect"
- "strings"
- "sync"
-)
-
-// structMap contains the map of struct fields for target structs
-// indexed by the struct type.
-type structMap struct {
- m sync.Map
-}
-
-func newStructMap() *structMap {
- return new(structMap)
-}
-
-func (s *structMap) get(t reflect.Type) *structSpec {
- if v, ok := s.m.Load(t); ok {
- return v.(*structSpec)
- }
-
- spec := newStructSpec(t, "redis")
- s.m.Store(t, spec)
- return spec
-}
-
-//------------------------------------------------------------------------------
-
-// structSpec contains the list of all fields in a target struct.
-type structSpec struct {
- m map[string]*structField
-}
-
-func (s *structSpec) set(tag string, sf *structField) {
- s.m[tag] = sf
-}
-
-func newStructSpec(t reflect.Type, fieldTag string) *structSpec {
- numField := t.NumField()
- out := &structSpec{
- m: make(map[string]*structField, numField),
- }
-
- for i := 0; i < numField; i++ {
- f := t.Field(i)
-
- tag := f.Tag.Get(fieldTag)
- if tag == "" || tag == "-" {
- continue
- }
-
- tag = strings.Split(tag, ",")[0]
- if tag == "" {
- continue
- }
-
- // Use the built-in decoder.
- out.set(tag, &structField{index: i, fn: decoders[f.Type.Kind()]})
- }
-
- return out
-}
-
-//------------------------------------------------------------------------------
-
-// structField represents a single field in a target struct.
-type structField struct {
- index int
- fn decoderFunc
-}
-
-//------------------------------------------------------------------------------
-
-type StructValue struct {
- spec *structSpec
- value reflect.Value
-}
-
-func (s StructValue) Scan(key string, value string) error {
- field, ok := s.spec.m[key]
- if !ok {
- return nil
- }
- if err := field.fn(s.value.Field(field.index), value); err != nil {
- t := s.value.Type()
- return fmt.Errorf("cannot scan redis.result %s into struct field %s.%s of type %s, error-%s",
- value, t.Name(), t.Field(field.index).Name, t.Field(field.index).Type, err.Error())
- }
- return nil
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make b/vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make
deleted file mode 100644
index b193b03b81..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/hscan/ya.make
+++ /dev/null
@@ -1,14 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(
- hscan.go
- structmap.go
-)
-
-GO_TEST_SRCS(hscan_test.go)
-
-END()
-
-RECURSE(gotest)
diff --git a/vendor/github.com/go-redis/redis/v8/internal/internal.go b/vendor/github.com/go-redis/redis/v8/internal/internal.go
deleted file mode 100644
index 4a59c599be..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/internal.go
+++ /dev/null
@@ -1,29 +0,0 @@
-package internal
-
-import (
- "time"
-
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-func RetryBackoff(retry int, minBackoff, maxBackoff time.Duration) time.Duration {
- if retry < 0 {
- panic("not reached")
- }
- if minBackoff == 0 {
- return 0
- }
-
- d := minBackoff << uint(retry)
- if d < minBackoff {
- return maxBackoff
- }
-
- d = minBackoff + time.Duration(rand.Int63n(int64(d)))
-
- if d > maxBackoff || d < minBackoff {
- d = maxBackoff
- }
-
- return d
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/log.go b/vendor/github.com/go-redis/redis/v8/internal/log.go
deleted file mode 100644
index c8b9213de4..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/log.go
+++ /dev/null
@@ -1,26 +0,0 @@
-package internal
-
-import (
- "context"
- "fmt"
- "log"
- "os"
-)
-
-type Logging interface {
- Printf(ctx context.Context, format string, v ...interface{})
-}
-
-type logger struct {
- log *log.Logger
-}
-
-func (l *logger) Printf(ctx context.Context, format string, v ...interface{}) {
- _ = l.log.Output(2, fmt.Sprintf(format, v...))
-}
-
-// Logger calls Output to print to the stderr.
-// Arguments are handled in the manner of fmt.Print.
-var Logger Logging = &logger{
- log: log.New(os.Stderr, "redis: ", log.LstdFlags|log.Lshortfile),
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/once.go b/vendor/github.com/go-redis/redis/v8/internal/once.go
deleted file mode 100644
index 64f46272ae..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/once.go
+++ /dev/null
@@ -1,60 +0,0 @@
-/*
-Copyright 2014 The Camlistore Authors
-
-Licensed under the Apache License, Version 2.0 (the "License");
-you may not use this file except in compliance with the License.
-You may obtain a copy of the License at
-
- http://www.apache.org/licenses/LICENSE-2.0
-
-Unless required by applicable law or agreed to in writing, software
-distributed under the License is distributed on an "AS IS" BASIS,
-WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-See the License for the specific language governing permissions and
-limitations under the License.
-*/
-
-package internal
-
-import (
- "sync"
- "sync/atomic"
-)
-
-// A Once will perform a successful action exactly once.
-//
-// Unlike a sync.Once, this Once's func returns an error
-// and is re-armed on failure.
-type Once struct {
- m sync.Mutex
- done uint32
-}
-
-// Do calls the function f if and only if Do has not been invoked
-// without error for this instance of Once. In other words, given
-// var once Once
-// if once.Do(f) is called multiple times, only the first call will
-// invoke f, even if f has a different value in each invocation unless
-// f returns an error. A new instance of Once is required for each
-// function to execute.
-//
-// Do is intended for initialization that must be run exactly once. Since f
-// is niladic, it may be necessary to use a function literal to capture the
-// arguments to a function to be invoked by Do:
-// err := config.once.Do(func() error { return config.init(filename) })
-func (o *Once) Do(f func() error) error {
- if atomic.LoadUint32(&o.done) == 1 {
- return nil
- }
- // Slow-path.
- o.m.Lock()
- defer o.m.Unlock()
- var err error
- if o.done == 0 {
- err = f()
- if err == nil {
- atomic.StoreUint32(&o.done, 1)
- }
- }
- return err
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go b/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
deleted file mode 100644
index 5661659865..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/conn.go
+++ /dev/null
@@ -1,121 +0,0 @@
-package pool
-
-import (
- "bufio"
- "context"
- "net"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-var noDeadline = time.Time{}
-
-type Conn struct {
- usedAt int64 // atomic
- netConn net.Conn
-
- rd *proto.Reader
- bw *bufio.Writer
- wr *proto.Writer
-
- Inited bool
- pooled bool
- createdAt time.Time
-}
-
-func NewConn(netConn net.Conn) *Conn {
- cn := &Conn{
- netConn: netConn,
- createdAt: time.Now(),
- }
- cn.rd = proto.NewReader(netConn)
- cn.bw = bufio.NewWriter(netConn)
- cn.wr = proto.NewWriter(cn.bw)
- cn.SetUsedAt(time.Now())
- return cn
-}
-
-func (cn *Conn) UsedAt() time.Time {
- unix := atomic.LoadInt64(&cn.usedAt)
- return time.Unix(unix, 0)
-}
-
-func (cn *Conn) SetUsedAt(tm time.Time) {
- atomic.StoreInt64(&cn.usedAt, tm.Unix())
-}
-
-func (cn *Conn) SetNetConn(netConn net.Conn) {
- cn.netConn = netConn
- cn.rd.Reset(netConn)
- cn.bw.Reset(netConn)
-}
-
-func (cn *Conn) Write(b []byte) (int, error) {
- return cn.netConn.Write(b)
-}
-
-func (cn *Conn) RemoteAddr() net.Addr {
- if cn.netConn != nil {
- return cn.netConn.RemoteAddr()
- }
- return nil
-}
-
-func (cn *Conn) WithReader(ctx context.Context, timeout time.Duration, fn func(rd *proto.Reader) error) error {
- if err := cn.netConn.SetReadDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
- }
- return fn(cn.rd)
-}
-
-func (cn *Conn) WithWriter(
- ctx context.Context, timeout time.Duration, fn func(wr *proto.Writer) error,
-) error {
- if err := cn.netConn.SetWriteDeadline(cn.deadline(ctx, timeout)); err != nil {
- return err
- }
-
- if cn.bw.Buffered() > 0 {
- cn.bw.Reset(cn.netConn)
- }
-
- if err := fn(cn.wr); err != nil {
- return err
- }
-
- return cn.bw.Flush()
-}
-
-func (cn *Conn) Close() error {
- return cn.netConn.Close()
-}
-
-func (cn *Conn) deadline(ctx context.Context, timeout time.Duration) time.Time {
- tm := time.Now()
- cn.SetUsedAt(tm)
-
- if timeout > 0 {
- tm = tm.Add(timeout)
- }
-
- if ctx != nil {
- deadline, ok := ctx.Deadline()
- if ok {
- if timeout == 0 {
- return deadline
- }
- if deadline.Before(tm) {
- return deadline
- }
- return tm
- }
- }
-
- if timeout > 0 {
- return tm
- }
-
- return noDeadline
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
deleted file mode 100644
index 44a4e779df..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool.go
+++ /dev/null
@@ -1,557 +0,0 @@
-package pool
-
-import (
- "context"
- "errors"
- "net"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
-)
-
-var (
- // ErrClosed performs any operation on the closed client will return this error.
- ErrClosed = errors.New("redis: client is closed")
-
- // ErrPoolTimeout timed out waiting to get a connection from the connection pool.
- ErrPoolTimeout = errors.New("redis: connection pool timeout")
-)
-
-var timers = sync.Pool{
- New: func() interface{} {
- t := time.NewTimer(time.Hour)
- t.Stop()
- return t
- },
-}
-
-// Stats contains pool state information and accumulated stats.
-type Stats struct {
- Hits uint32 // number of times free connection was found in the pool
- Misses uint32 // number of times free connection was NOT found in the pool
- Timeouts uint32 // number of times a wait timeout occurred
-
- TotalConns uint32 // number of total connections in the pool
- IdleConns uint32 // number of idle connections in the pool
- StaleConns uint32 // number of stale connections removed from the pool
-}
-
-type Pooler interface {
- NewConn(context.Context) (*Conn, error)
- CloseConn(*Conn) error
-
- Get(context.Context) (*Conn, error)
- Put(context.Context, *Conn)
- Remove(context.Context, *Conn, error)
-
- Len() int
- IdleLen() int
- Stats() *Stats
-
- Close() error
-}
-
-type Options struct {
- Dialer func(context.Context) (net.Conn, error)
- OnClose func(*Conn) error
-
- PoolFIFO bool
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-}
-
-type lastDialErrorWrap struct {
- err error
-}
-
-type ConnPool struct {
- opt *Options
-
- dialErrorsNum uint32 // atomic
-
- lastDialError atomic.Value
-
- queue chan struct{}
-
- connsMu sync.Mutex
- conns []*Conn
- idleConns []*Conn
- poolSize int
- idleConnsLen int
-
- stats Stats
-
- _closed uint32 // atomic
- closedCh chan struct{}
-}
-
-var _ Pooler = (*ConnPool)(nil)
-
-func NewConnPool(opt *Options) *ConnPool {
- p := &ConnPool{
- opt: opt,
-
- queue: make(chan struct{}, opt.PoolSize),
- conns: make([]*Conn, 0, opt.PoolSize),
- idleConns: make([]*Conn, 0, opt.PoolSize),
- closedCh: make(chan struct{}),
- }
-
- p.connsMu.Lock()
- p.checkMinIdleConns()
- p.connsMu.Unlock()
-
- if opt.IdleTimeout > 0 && opt.IdleCheckFrequency > 0 {
- go p.reaper(opt.IdleCheckFrequency)
- }
-
- return p
-}
-
-func (p *ConnPool) checkMinIdleConns() {
- if p.opt.MinIdleConns == 0 {
- return
- }
- for p.poolSize < p.opt.PoolSize && p.idleConnsLen < p.opt.MinIdleConns {
- p.poolSize++
- p.idleConnsLen++
-
- go func() {
- err := p.addIdleConn()
- if err != nil && err != ErrClosed {
- p.connsMu.Lock()
- p.poolSize--
- p.idleConnsLen--
- p.connsMu.Unlock()
- }
- }()
- }
-}
-
-func (p *ConnPool) addIdleConn() error {
- cn, err := p.dialConn(context.TODO(), true)
- if err != nil {
- return err
- }
-
- p.connsMu.Lock()
- defer p.connsMu.Unlock()
-
- // It is not allowed to add new connections to the closed connection pool.
- if p.closed() {
- _ = cn.Close()
- return ErrClosed
- }
-
- p.conns = append(p.conns, cn)
- p.idleConns = append(p.idleConns, cn)
- return nil
-}
-
-func (p *ConnPool) NewConn(ctx context.Context) (*Conn, error) {
- return p.newConn(ctx, false)
-}
-
-func (p *ConnPool) newConn(ctx context.Context, pooled bool) (*Conn, error) {
- cn, err := p.dialConn(ctx, pooled)
- if err != nil {
- return nil, err
- }
-
- p.connsMu.Lock()
- defer p.connsMu.Unlock()
-
- // It is not allowed to add new connections to the closed connection pool.
- if p.closed() {
- _ = cn.Close()
- return nil, ErrClosed
- }
-
- p.conns = append(p.conns, cn)
- if pooled {
- // If pool is full remove the cn on next Put.
- if p.poolSize >= p.opt.PoolSize {
- cn.pooled = false
- } else {
- p.poolSize++
- }
- }
-
- return cn, nil
-}
-
-func (p *ConnPool) dialConn(ctx context.Context, pooled bool) (*Conn, error) {
- if p.closed() {
- return nil, ErrClosed
- }
-
- if atomic.LoadUint32(&p.dialErrorsNum) >= uint32(p.opt.PoolSize) {
- return nil, p.getLastDialError()
- }
-
- netConn, err := p.opt.Dialer(ctx)
- if err != nil {
- p.setLastDialError(err)
- if atomic.AddUint32(&p.dialErrorsNum, 1) == uint32(p.opt.PoolSize) {
- go p.tryDial()
- }
- return nil, err
- }
-
- cn := NewConn(netConn)
- cn.pooled = pooled
- return cn, nil
-}
-
-func (p *ConnPool) tryDial() {
- for {
- if p.closed() {
- return
- }
-
- conn, err := p.opt.Dialer(context.Background())
- if err != nil {
- p.setLastDialError(err)
- time.Sleep(time.Second)
- continue
- }
-
- atomic.StoreUint32(&p.dialErrorsNum, 0)
- _ = conn.Close()
- return
- }
-}
-
-func (p *ConnPool) setLastDialError(err error) {
- p.lastDialError.Store(&lastDialErrorWrap{err: err})
-}
-
-func (p *ConnPool) getLastDialError() error {
- err, _ := p.lastDialError.Load().(*lastDialErrorWrap)
- if err != nil {
- return err.err
- }
- return nil
-}
-
-// Get returns existed connection from the pool or creates a new one.
-func (p *ConnPool) Get(ctx context.Context) (*Conn, error) {
- if p.closed() {
- return nil, ErrClosed
- }
-
- if err := p.waitTurn(ctx); err != nil {
- return nil, err
- }
-
- for {
- p.connsMu.Lock()
- cn, err := p.popIdle()
- p.connsMu.Unlock()
-
- if err != nil {
- return nil, err
- }
-
- if cn == nil {
- break
- }
-
- if p.isStaleConn(cn) {
- _ = p.CloseConn(cn)
- continue
- }
-
- atomic.AddUint32(&p.stats.Hits, 1)
- return cn, nil
- }
-
- atomic.AddUint32(&p.stats.Misses, 1)
-
- newcn, err := p.newConn(ctx, true)
- if err != nil {
- p.freeTurn()
- return nil, err
- }
-
- return newcn, nil
-}
-
-func (p *ConnPool) getTurn() {
- p.queue <- struct{}{}
-}
-
-func (p *ConnPool) waitTurn(ctx context.Context) error {
- select {
- case <-ctx.Done():
- return ctx.Err()
- default:
- }
-
- select {
- case p.queue <- struct{}{}:
- return nil
- default:
- }
-
- timer := timers.Get().(*time.Timer)
- timer.Reset(p.opt.PoolTimeout)
-
- select {
- case <-ctx.Done():
- if !timer.Stop() {
- <-timer.C
- }
- timers.Put(timer)
- return ctx.Err()
- case p.queue <- struct{}{}:
- if !timer.Stop() {
- <-timer.C
- }
- timers.Put(timer)
- return nil
- case <-timer.C:
- timers.Put(timer)
- atomic.AddUint32(&p.stats.Timeouts, 1)
- return ErrPoolTimeout
- }
-}
-
-func (p *ConnPool) freeTurn() {
- <-p.queue
-}
-
-func (p *ConnPool) popIdle() (*Conn, error) {
- if p.closed() {
- return nil, ErrClosed
- }
- n := len(p.idleConns)
- if n == 0 {
- return nil, nil
- }
-
- var cn *Conn
- if p.opt.PoolFIFO {
- cn = p.idleConns[0]
- copy(p.idleConns, p.idleConns[1:])
- p.idleConns = p.idleConns[:n-1]
- } else {
- idx := n - 1
- cn = p.idleConns[idx]
- p.idleConns = p.idleConns[:idx]
- }
- p.idleConnsLen--
- p.checkMinIdleConns()
- return cn, nil
-}
-
-func (p *ConnPool) Put(ctx context.Context, cn *Conn) {
- if cn.rd.Buffered() > 0 {
- internal.Logger.Printf(ctx, "Conn has unread data")
- p.Remove(ctx, cn, BadConnError{})
- return
- }
-
- if !cn.pooled {
- p.Remove(ctx, cn, nil)
- return
- }
-
- p.connsMu.Lock()
- p.idleConns = append(p.idleConns, cn)
- p.idleConnsLen++
- p.connsMu.Unlock()
- p.freeTurn()
-}
-
-func (p *ConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
- p.removeConnWithLock(cn)
- p.freeTurn()
- _ = p.closeConn(cn)
-}
-
-func (p *ConnPool) CloseConn(cn *Conn) error {
- p.removeConnWithLock(cn)
- return p.closeConn(cn)
-}
-
-func (p *ConnPool) removeConnWithLock(cn *Conn) {
- p.connsMu.Lock()
- p.removeConn(cn)
- p.connsMu.Unlock()
-}
-
-func (p *ConnPool) removeConn(cn *Conn) {
- for i, c := range p.conns {
- if c == cn {
- p.conns = append(p.conns[:i], p.conns[i+1:]...)
- if cn.pooled {
- p.poolSize--
- p.checkMinIdleConns()
- }
- return
- }
- }
-}
-
-func (p *ConnPool) closeConn(cn *Conn) error {
- if p.opt.OnClose != nil {
- _ = p.opt.OnClose(cn)
- }
- return cn.Close()
-}
-
-// Len returns total number of connections.
-func (p *ConnPool) Len() int {
- p.connsMu.Lock()
- n := len(p.conns)
- p.connsMu.Unlock()
- return n
-}
-
-// IdleLen returns number of idle connections.
-func (p *ConnPool) IdleLen() int {
- p.connsMu.Lock()
- n := p.idleConnsLen
- p.connsMu.Unlock()
- return n
-}
-
-func (p *ConnPool) Stats() *Stats {
- idleLen := p.IdleLen()
- return &Stats{
- Hits: atomic.LoadUint32(&p.stats.Hits),
- Misses: atomic.LoadUint32(&p.stats.Misses),
- Timeouts: atomic.LoadUint32(&p.stats.Timeouts),
-
- TotalConns: uint32(p.Len()),
- IdleConns: uint32(idleLen),
- StaleConns: atomic.LoadUint32(&p.stats.StaleConns),
- }
-}
-
-func (p *ConnPool) closed() bool {
- return atomic.LoadUint32(&p._closed) == 1
-}
-
-func (p *ConnPool) Filter(fn func(*Conn) bool) error {
- p.connsMu.Lock()
- defer p.connsMu.Unlock()
-
- var firstErr error
- for _, cn := range p.conns {
- if fn(cn) {
- if err := p.closeConn(cn); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- }
- return firstErr
-}
-
-func (p *ConnPool) Close() error {
- if !atomic.CompareAndSwapUint32(&p._closed, 0, 1) {
- return ErrClosed
- }
- close(p.closedCh)
-
- var firstErr error
- p.connsMu.Lock()
- for _, cn := range p.conns {
- if err := p.closeConn(cn); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- p.conns = nil
- p.poolSize = 0
- p.idleConns = nil
- p.idleConnsLen = 0
- p.connsMu.Unlock()
-
- return firstErr
-}
-
-func (p *ConnPool) reaper(frequency time.Duration) {
- ticker := time.NewTicker(frequency)
- defer ticker.Stop()
-
- for {
- select {
- case <-ticker.C:
- // It is possible that ticker and closedCh arrive together,
- // and select pseudo-randomly pick ticker case, we double
- // check here to prevent being executed after closed.
- if p.closed() {
- return
- }
- _, err := p.ReapStaleConns()
- if err != nil {
- internal.Logger.Printf(context.Background(), "ReapStaleConns failed: %s", err)
- continue
- }
- case <-p.closedCh:
- return
- }
- }
-}
-
-func (p *ConnPool) ReapStaleConns() (int, error) {
- var n int
- for {
- p.getTurn()
-
- p.connsMu.Lock()
- cn := p.reapStaleConn()
- p.connsMu.Unlock()
-
- p.freeTurn()
-
- if cn != nil {
- _ = p.closeConn(cn)
- n++
- } else {
- break
- }
- }
- atomic.AddUint32(&p.stats.StaleConns, uint32(n))
- return n, nil
-}
-
-func (p *ConnPool) reapStaleConn() *Conn {
- if len(p.idleConns) == 0 {
- return nil
- }
-
- cn := p.idleConns[0]
- if !p.isStaleConn(cn) {
- return nil
- }
-
- p.idleConns = append(p.idleConns[:0], p.idleConns[1:]...)
- p.idleConnsLen--
- p.removeConn(cn)
-
- return cn
-}
-
-func (p *ConnPool) isStaleConn(cn *Conn) bool {
- if p.opt.IdleTimeout == 0 && p.opt.MaxConnAge == 0 {
- return false
- }
-
- now := time.Now()
- if p.opt.IdleTimeout > 0 && now.Sub(cn.UsedAt()) >= p.opt.IdleTimeout {
- return true
- }
- if p.opt.MaxConnAge > 0 && now.Sub(cn.createdAt) >= p.opt.MaxConnAge {
- return true
- }
-
- return false
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
deleted file mode 100644
index 5a3fde191b..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_single.go
+++ /dev/null
@@ -1,58 +0,0 @@
-package pool
-
-import "context"
-
-type SingleConnPool struct {
- pool Pooler
- cn *Conn
- stickyErr error
-}
-
-var _ Pooler = (*SingleConnPool)(nil)
-
-func NewSingleConnPool(pool Pooler, cn *Conn) *SingleConnPool {
- return &SingleConnPool{
- pool: pool,
- cn: cn,
- }
-}
-
-func (p *SingleConnPool) NewConn(ctx context.Context) (*Conn, error) {
- return p.pool.NewConn(ctx)
-}
-
-func (p *SingleConnPool) CloseConn(cn *Conn) error {
- return p.pool.CloseConn(cn)
-}
-
-func (p *SingleConnPool) Get(ctx context.Context) (*Conn, error) {
- if p.stickyErr != nil {
- return nil, p.stickyErr
- }
- return p.cn, nil
-}
-
-func (p *SingleConnPool) Put(ctx context.Context, cn *Conn) {}
-
-func (p *SingleConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
- p.cn = nil
- p.stickyErr = reason
-}
-
-func (p *SingleConnPool) Close() error {
- p.cn = nil
- p.stickyErr = ErrClosed
- return nil
-}
-
-func (p *SingleConnPool) Len() int {
- return 0
-}
-
-func (p *SingleConnPool) IdleLen() int {
- return 0
-}
-
-func (p *SingleConnPool) Stats() *Stats {
- return &Stats{}
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go b/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
deleted file mode 100644
index 3adb99bc82..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/pool_sticky.go
+++ /dev/null
@@ -1,201 +0,0 @@
-package pool
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
-)
-
-const (
- stateDefault = 0
- stateInited = 1
- stateClosed = 2
-)
-
-type BadConnError struct {
- wrapped error
-}
-
-var _ error = (*BadConnError)(nil)
-
-func (e BadConnError) Error() string {
- s := "redis: Conn is in a bad state"
- if e.wrapped != nil {
- s += ": " + e.wrapped.Error()
- }
- return s
-}
-
-func (e BadConnError) Unwrap() error {
- return e.wrapped
-}
-
-//------------------------------------------------------------------------------
-
-type StickyConnPool struct {
- pool Pooler
- shared int32 // atomic
-
- state uint32 // atomic
- ch chan *Conn
-
- _badConnError atomic.Value
-}
-
-var _ Pooler = (*StickyConnPool)(nil)
-
-func NewStickyConnPool(pool Pooler) *StickyConnPool {
- p, ok := pool.(*StickyConnPool)
- if !ok {
- p = &StickyConnPool{
- pool: pool,
- ch: make(chan *Conn, 1),
- }
- }
- atomic.AddInt32(&p.shared, 1)
- return p
-}
-
-func (p *StickyConnPool) NewConn(ctx context.Context) (*Conn, error) {
- return p.pool.NewConn(ctx)
-}
-
-func (p *StickyConnPool) CloseConn(cn *Conn) error {
- return p.pool.CloseConn(cn)
-}
-
-func (p *StickyConnPool) Get(ctx context.Context) (*Conn, error) {
- // In worst case this races with Close which is not a very common operation.
- for i := 0; i < 1000; i++ {
- switch atomic.LoadUint32(&p.state) {
- case stateDefault:
- cn, err := p.pool.Get(ctx)
- if err != nil {
- return nil, err
- }
- if atomic.CompareAndSwapUint32(&p.state, stateDefault, stateInited) {
- return cn, nil
- }
- p.pool.Remove(ctx, cn, ErrClosed)
- case stateInited:
- if err := p.badConnError(); err != nil {
- return nil, err
- }
- cn, ok := <-p.ch
- if !ok {
- return nil, ErrClosed
- }
- return cn, nil
- case stateClosed:
- return nil, ErrClosed
- default:
- panic("not reached")
- }
- }
- return nil, fmt.Errorf("redis: StickyConnPool.Get: infinite loop")
-}
-
-func (p *StickyConnPool) Put(ctx context.Context, cn *Conn) {
- defer func() {
- if recover() != nil {
- p.freeConn(ctx, cn)
- }
- }()
- p.ch <- cn
-}
-
-func (p *StickyConnPool) freeConn(ctx context.Context, cn *Conn) {
- if err := p.badConnError(); err != nil {
- p.pool.Remove(ctx, cn, err)
- } else {
- p.pool.Put(ctx, cn)
- }
-}
-
-func (p *StickyConnPool) Remove(ctx context.Context, cn *Conn, reason error) {
- defer func() {
- if recover() != nil {
- p.pool.Remove(ctx, cn, ErrClosed)
- }
- }()
- p._badConnError.Store(BadConnError{wrapped: reason})
- p.ch <- cn
-}
-
-func (p *StickyConnPool) Close() error {
- if shared := atomic.AddInt32(&p.shared, -1); shared > 0 {
- return nil
- }
-
- for i := 0; i < 1000; i++ {
- state := atomic.LoadUint32(&p.state)
- if state == stateClosed {
- return ErrClosed
- }
- if atomic.CompareAndSwapUint32(&p.state, state, stateClosed) {
- close(p.ch)
- cn, ok := <-p.ch
- if ok {
- p.freeConn(context.TODO(), cn)
- }
- return nil
- }
- }
-
- return errors.New("redis: StickyConnPool.Close: infinite loop")
-}
-
-func (p *StickyConnPool) Reset(ctx context.Context) error {
- if p.badConnError() == nil {
- return nil
- }
-
- select {
- case cn, ok := <-p.ch:
- if !ok {
- return ErrClosed
- }
- p.pool.Remove(ctx, cn, ErrClosed)
- p._badConnError.Store(BadConnError{wrapped: nil})
- default:
- return errors.New("redis: StickyConnPool does not have a Conn")
- }
-
- if !atomic.CompareAndSwapUint32(&p.state, stateInited, stateDefault) {
- state := atomic.LoadUint32(&p.state)
- return fmt.Errorf("redis: invalid StickyConnPool state: %d", state)
- }
-
- return nil
-}
-
-func (p *StickyConnPool) badConnError() error {
- if v := p._badConnError.Load(); v != nil {
- if err := v.(BadConnError); err.wrapped != nil {
- return err
- }
- }
- return nil
-}
-
-func (p *StickyConnPool) Len() int {
- switch atomic.LoadUint32(&p.state) {
- case stateDefault:
- return 0
- case stateInited:
- return 1
- case stateClosed:
- return 0
- default:
- panic("not reached")
- }
-}
-
-func (p *StickyConnPool) IdleLen() int {
- return len(p.ch)
-}
-
-func (p *StickyConnPool) Stats() *Stats {
- return &Stats{}
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/pool/ya.make b/vendor/github.com/go-redis/redis/v8/internal/pool/ya.make
deleted file mode 100644
index 31e095aba1..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/pool/ya.make
+++ /dev/null
@@ -1,22 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(
- conn.go
- pool.go
- pool_single.go
- pool_sticky.go
-)
-
-GO_TEST_SRCS(export_test.go)
-
-GO_XTEST_SRCS(
- bench_test.go
- main_test.go
- pool_test.go
-)
-
-END()
-
-RECURSE(gotest)
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go b/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
deleted file mode 100644
index 0e6ca779b1..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/reader.go
+++ /dev/null
@@ -1,332 +0,0 @@
-package proto
-
-import (
- "bufio"
- "fmt"
- "io"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// redis resp protocol data type.
-const (
- ErrorReply = '-'
- StatusReply = '+'
- IntReply = ':'
- StringReply = '$'
- ArrayReply = '*'
-)
-
-//------------------------------------------------------------------------------
-
-const Nil = RedisError("redis: nil") // nolint:errname
-
-type RedisError string
-
-func (e RedisError) Error() string { return string(e) }
-
-func (RedisError) RedisError() {}
-
-//------------------------------------------------------------------------------
-
-type MultiBulkParse func(*Reader, int64) (interface{}, error)
-
-type Reader struct {
- rd *bufio.Reader
- _buf []byte
-}
-
-func NewReader(rd io.Reader) *Reader {
- return &Reader{
- rd: bufio.NewReader(rd),
- _buf: make([]byte, 64),
- }
-}
-
-func (r *Reader) Buffered() int {
- return r.rd.Buffered()
-}
-
-func (r *Reader) Peek(n int) ([]byte, error) {
- return r.rd.Peek(n)
-}
-
-func (r *Reader) Reset(rd io.Reader) {
- r.rd.Reset(rd)
-}
-
-func (r *Reader) ReadLine() ([]byte, error) {
- line, err := r.readLine()
- if err != nil {
- return nil, err
- }
- if isNilReply(line) {
- return nil, Nil
- }
- return line, nil
-}
-
-// readLine that returns an error if:
-// - there is a pending read error;
-// - or line does not end with \r\n.
-func (r *Reader) readLine() ([]byte, error) {
- b, err := r.rd.ReadSlice('\n')
- if err != nil {
- if err != bufio.ErrBufferFull {
- return nil, err
- }
-
- full := make([]byte, len(b))
- copy(full, b)
-
- b, err = r.rd.ReadBytes('\n')
- if err != nil {
- return nil, err
- }
-
- full = append(full, b...) //nolint:makezero
- b = full
- }
- if len(b) <= 2 || b[len(b)-1] != '\n' || b[len(b)-2] != '\r' {
- return nil, fmt.Errorf("redis: invalid reply: %q", b)
- }
- return b[:len(b)-2], nil
-}
-
-func (r *Reader) ReadReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
-
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- case StringReply:
- return r.readStringReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- if m == nil {
- err := fmt.Errorf("redis: got %.100q, but multi bulk parser is nil", line)
- return nil, err
- }
- return m(r, n)
- }
- return nil, fmt.Errorf("redis: can't parse %.100q", line)
-}
-
-func (r *Reader) ReadIntReply() (int64, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case IntReply:
- return util.ParseInt(line[1:], 10, 64)
- default:
- return 0, fmt.Errorf("redis: can't parse int reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadString() (string, error) {
- line, err := r.ReadLine()
- if err != nil {
- return "", err
- }
- switch line[0] {
- case ErrorReply:
- return "", ParseErrorReply(line)
- case StringReply:
- return r.readStringReply(line)
- case StatusReply:
- return string(line[1:]), nil
- case IntReply:
- return string(line[1:]), nil
- default:
- return "", fmt.Errorf("redis: can't parse reply=%.100q reading string", line)
- }
-}
-
-func (r *Reader) readStringReply(line []byte) (string, error) {
- if isNilReply(line) {
- return "", Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return "", err
- }
-
- b := make([]byte, replyLen+2)
- _, err = io.ReadFull(r.rd, b)
- if err != nil {
- return "", err
- }
-
- return util.BytesToString(b[:replyLen]), nil
-}
-
-func (r *Reader) ReadArrayReply(m MultiBulkParse) (interface{}, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return nil, err
- }
- return m(r, n)
- default:
- return nil, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadArrayLen() (int, error) {
- line, err := r.ReadLine()
- if err != nil {
- return 0, err
- }
- switch line[0] {
- case ErrorReply:
- return 0, ParseErrorReply(line)
- case ArrayReply:
- n, err := parseArrayLen(line)
- if err != nil {
- return 0, err
- }
- return int(n), nil
- default:
- return 0, fmt.Errorf("redis: can't parse array reply: %.100q", line)
- }
-}
-
-func (r *Reader) ReadScanReply() ([]string, uint64, error) {
- n, err := r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
- if n != 2 {
- return nil, 0, fmt.Errorf("redis: got %d elements in scan reply, expected 2", n)
- }
-
- cursor, err := r.ReadUint()
- if err != nil {
- return nil, 0, err
- }
-
- n, err = r.ReadArrayLen()
- if err != nil {
- return nil, 0, err
- }
-
- keys := make([]string, n)
-
- for i := 0; i < n; i++ {
- key, err := r.ReadString()
- if err != nil {
- return nil, 0, err
- }
- keys[i] = key
- }
-
- return keys, cursor, err
-}
-
-func (r *Reader) ReadInt() (int64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseInt(b, 10, 64)
-}
-
-func (r *Reader) ReadUint() (uint64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseUint(b, 10, 64)
-}
-
-func (r *Reader) ReadFloatReply() (float64, error) {
- b, err := r.readTmpBytesReply()
- if err != nil {
- return 0, err
- }
- return util.ParseFloat(b, 64)
-}
-
-func (r *Reader) readTmpBytesReply() ([]byte, error) {
- line, err := r.ReadLine()
- if err != nil {
- return nil, err
- }
- switch line[0] {
- case ErrorReply:
- return nil, ParseErrorReply(line)
- case StringReply:
- return r._readTmpBytesReply(line)
- case StatusReply:
- return line[1:], nil
- default:
- return nil, fmt.Errorf("redis: can't parse string reply: %.100q", line)
- }
-}
-
-func (r *Reader) _readTmpBytesReply(line []byte) ([]byte, error) {
- if isNilReply(line) {
- return nil, Nil
- }
-
- replyLen, err := util.Atoi(line[1:])
- if err != nil {
- return nil, err
- }
-
- buf := r.buf(replyLen + 2)
- _, err = io.ReadFull(r.rd, buf)
- if err != nil {
- return nil, err
- }
-
- return buf[:replyLen], nil
-}
-
-func (r *Reader) buf(n int) []byte {
- if n <= cap(r._buf) {
- return r._buf[:n]
- }
- d := n - cap(r._buf)
- r._buf = append(r._buf, make([]byte, d)...)
- return r._buf
-}
-
-func isNilReply(b []byte) bool {
- return len(b) == 3 &&
- (b[0] == StringReply || b[0] == ArrayReply) &&
- b[1] == '-' && b[2] == '1'
-}
-
-func ParseErrorReply(line []byte) error {
- return RedisError(string(line[1:]))
-}
-
-func parseArrayLen(line []byte) (int64, error) {
- if isNilReply(line) {
- return 0, Nil
- }
- return util.ParseInt(line[1:], 10, 64)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go b/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
deleted file mode 100644
index 0e994765fe..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/scan.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package proto
-
-import (
- "encoding"
- "fmt"
- "reflect"
- "time"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-// Scan parses bytes `b` to `v` with appropriate type.
-//nolint:gocyclo
-func Scan(b []byte, v interface{}) error {
- switch v := v.(type) {
- case nil:
- return fmt.Errorf("redis: Scan(nil)")
- case *string:
- *v = util.BytesToString(b)
- return nil
- case *[]byte:
- *v = b
- return nil
- case *int:
- var err error
- *v, err = util.Atoi(b)
- return err
- case *int8:
- n, err := util.ParseInt(b, 10, 8)
- if err != nil {
- return err
- }
- *v = int8(n)
- return nil
- case *int16:
- n, err := util.ParseInt(b, 10, 16)
- if err != nil {
- return err
- }
- *v = int16(n)
- return nil
- case *int32:
- n, err := util.ParseInt(b, 10, 32)
- if err != nil {
- return err
- }
- *v = int32(n)
- return nil
- case *int64:
- n, err := util.ParseInt(b, 10, 64)
- if err != nil {
- return err
- }
- *v = n
- return nil
- case *uint:
- n, err := util.ParseUint(b, 10, 64)
- if err != nil {
- return err
- }
- *v = uint(n)
- return nil
- case *uint8:
- n, err := util.ParseUint(b, 10, 8)
- if err != nil {
- return err
- }
- *v = uint8(n)
- return nil
- case *uint16:
- n, err := util.ParseUint(b, 10, 16)
- if err != nil {
- return err
- }
- *v = uint16(n)
- return nil
- case *uint32:
- n, err := util.ParseUint(b, 10, 32)
- if err != nil {
- return err
- }
- *v = uint32(n)
- return nil
- case *uint64:
- n, err := util.ParseUint(b, 10, 64)
- if err != nil {
- return err
- }
- *v = n
- return nil
- case *float32:
- n, err := util.ParseFloat(b, 32)
- if err != nil {
- return err
- }
- *v = float32(n)
- return err
- case *float64:
- var err error
- *v, err = util.ParseFloat(b, 64)
- return err
- case *bool:
- *v = len(b) == 1 && b[0] == '1'
- return nil
- case *time.Time:
- var err error
- *v, err = time.Parse(time.RFC3339Nano, util.BytesToString(b))
- return err
- case *time.Duration:
- n, err := util.ParseInt(b, 10, 64)
- if err != nil {
- return err
- }
- *v = time.Duration(n)
- return nil
- case encoding.BinaryUnmarshaler:
- return v.UnmarshalBinary(b)
- default:
- return fmt.Errorf(
- "redis: can't unmarshal %T (consider implementing BinaryUnmarshaler)", v)
- }
-}
-
-func ScanSlice(data []string, slice interface{}) error {
- v := reflect.ValueOf(slice)
- if !v.IsValid() {
- return fmt.Errorf("redis: ScanSlice(nil)")
- }
- if v.Kind() != reflect.Ptr {
- return fmt.Errorf("redis: ScanSlice(non-pointer %T)", slice)
- }
- v = v.Elem()
- if v.Kind() != reflect.Slice {
- return fmt.Errorf("redis: ScanSlice(non-slice %T)", slice)
- }
-
- next := makeSliceNextElemFunc(v)
- for i, s := range data {
- elem := next()
- if err := Scan([]byte(s), elem.Addr().Interface()); err != nil {
- err = fmt.Errorf("redis: ScanSlice index=%d value=%q failed: %w", i, s, err)
- return err
- }
- }
-
- return nil
-}
-
-func makeSliceNextElemFunc(v reflect.Value) func() reflect.Value {
- elemType := v.Type().Elem()
-
- if elemType.Kind() == reflect.Ptr {
- elemType = elemType.Elem()
- return func() reflect.Value {
- if v.Len() < v.Cap() {
- v.Set(v.Slice(0, v.Len()+1))
- elem := v.Index(v.Len() - 1)
- if elem.IsNil() {
- elem.Set(reflect.New(elemType))
- }
- return elem.Elem()
- }
-
- elem := reflect.New(elemType)
- v.Set(reflect.Append(v, elem))
- return elem.Elem()
- }
- }
-
- zero := reflect.Zero(elemType)
- return func() reflect.Value {
- if v.Len() < v.Cap() {
- v.Set(v.Slice(0, v.Len()+1))
- return v.Index(v.Len() - 1)
- }
-
- v.Set(reflect.Append(v, zero))
- return v.Index(v.Len() - 1)
- }
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go b/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
deleted file mode 100644
index c4260981ed..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/writer.go
+++ /dev/null
@@ -1,155 +0,0 @@
-package proto
-
-import (
- "encoding"
- "fmt"
- "io"
- "strconv"
- "time"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-type writer interface {
- io.Writer
- io.ByteWriter
- // io.StringWriter
- WriteString(s string) (n int, err error)
-}
-
-type Writer struct {
- writer
-
- lenBuf []byte
- numBuf []byte
-}
-
-func NewWriter(wr writer) *Writer {
- return &Writer{
- writer: wr,
-
- lenBuf: make([]byte, 64),
- numBuf: make([]byte, 64),
- }
-}
-
-func (w *Writer) WriteArgs(args []interface{}) error {
- if err := w.WriteByte(ArrayReply); err != nil {
- return err
- }
-
- if err := w.writeLen(len(args)); err != nil {
- return err
- }
-
- for _, arg := range args {
- if err := w.WriteArg(arg); err != nil {
- return err
- }
- }
-
- return nil
-}
-
-func (w *Writer) writeLen(n int) error {
- w.lenBuf = strconv.AppendUint(w.lenBuf[:0], uint64(n), 10)
- w.lenBuf = append(w.lenBuf, '\r', '\n')
- _, err := w.Write(w.lenBuf)
- return err
-}
-
-func (w *Writer) WriteArg(v interface{}) error {
- switch v := v.(type) {
- case nil:
- return w.string("")
- case string:
- return w.string(v)
- case []byte:
- return w.bytes(v)
- case int:
- return w.int(int64(v))
- case int8:
- return w.int(int64(v))
- case int16:
- return w.int(int64(v))
- case int32:
- return w.int(int64(v))
- case int64:
- return w.int(v)
- case uint:
- return w.uint(uint64(v))
- case uint8:
- return w.uint(uint64(v))
- case uint16:
- return w.uint(uint64(v))
- case uint32:
- return w.uint(uint64(v))
- case uint64:
- return w.uint(v)
- case float32:
- return w.float(float64(v))
- case float64:
- return w.float(v)
- case bool:
- if v {
- return w.int(1)
- }
- return w.int(0)
- case time.Time:
- w.numBuf = v.AppendFormat(w.numBuf[:0], time.RFC3339Nano)
- return w.bytes(w.numBuf)
- case time.Duration:
- return w.int(v.Nanoseconds())
- case encoding.BinaryMarshaler:
- b, err := v.MarshalBinary()
- if err != nil {
- return err
- }
- return w.bytes(b)
- default:
- return fmt.Errorf(
- "redis: can't marshal %T (implement encoding.BinaryMarshaler)", v)
- }
-}
-
-func (w *Writer) bytes(b []byte) error {
- if err := w.WriteByte(StringReply); err != nil {
- return err
- }
-
- if err := w.writeLen(len(b)); err != nil {
- return err
- }
-
- if _, err := w.Write(b); err != nil {
- return err
- }
-
- return w.crlf()
-}
-
-func (w *Writer) string(s string) error {
- return w.bytes(util.StringToBytes(s))
-}
-
-func (w *Writer) uint(n uint64) error {
- w.numBuf = strconv.AppendUint(w.numBuf[:0], n, 10)
- return w.bytes(w.numBuf)
-}
-
-func (w *Writer) int(n int64) error {
- w.numBuf = strconv.AppendInt(w.numBuf[:0], n, 10)
- return w.bytes(w.numBuf)
-}
-
-func (w *Writer) float(f float64) error {
- w.numBuf = strconv.AppendFloat(w.numBuf[:0], f, 'f', -1, 64)
- return w.bytes(w.numBuf)
-}
-
-func (w *Writer) crlf() error {
- if err := w.WriteByte('\r'); err != nil {
- return err
- }
- return w.WriteByte('\n')
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/proto/ya.make b/vendor/github.com/go-redis/redis/v8/internal/proto/ya.make
deleted file mode 100644
index 6ba8a5f13e..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/proto/ya.make
+++ /dev/null
@@ -1,20 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(
- reader.go
- scan.go
- writer.go
-)
-
-GO_XTEST_SRCS(
- proto_test.go
- reader_test.go
- scan_test.go
- writer_test.go
-)
-
-END()
-
-RECURSE(gotest)
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go b/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
deleted file mode 100644
index 2edccba94f..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/rand/rand.go
+++ /dev/null
@@ -1,50 +0,0 @@
-package rand
-
-import (
- "math/rand"
- "sync"
-)
-
-// Int returns a non-negative pseudo-random int.
-func Int() int { return pseudo.Int() }
-
-// Intn returns, as an int, a non-negative pseudo-random number in [0,n).
-// It panics if n <= 0.
-func Intn(n int) int { return pseudo.Intn(n) }
-
-// Int63n returns, as an int64, a non-negative pseudo-random number in [0,n).
-// It panics if n <= 0.
-func Int63n(n int64) int64 { return pseudo.Int63n(n) }
-
-// Perm returns, as a slice of n ints, a pseudo-random permutation of the integers [0,n).
-func Perm(n int) []int { return pseudo.Perm(n) }
-
-// Seed uses the provided seed value to initialize the default Source to a
-// deterministic state. If Seed is not called, the generator behaves as if
-// seeded by Seed(1).
-func Seed(n int64) { pseudo.Seed(n) }
-
-var pseudo = rand.New(&source{src: rand.NewSource(1)})
-
-type source struct {
- src rand.Source
- mu sync.Mutex
-}
-
-func (s *source) Int63() int64 {
- s.mu.Lock()
- n := s.src.Int63()
- s.mu.Unlock()
- return n
-}
-
-func (s *source) Seed(seed int64) {
- s.mu.Lock()
- s.src.Seed(seed)
- s.mu.Unlock()
-}
-
-// Shuffle pseudo-randomizes the order of elements.
-// n is the number of elements.
-// swap swaps the elements with indexes i and j.
-func Shuffle(n int, swap func(i, j int)) { pseudo.Shuffle(n, swap) }
diff --git a/vendor/github.com/go-redis/redis/v8/internal/rand/ya.make b/vendor/github.com/go-redis/redis/v8/internal/rand/ya.make
deleted file mode 100644
index eb2f7feb15..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/rand/ya.make
+++ /dev/null
@@ -1,7 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(rand.go)
-
-END()
diff --git a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
deleted file mode 100644
index 9f2e418f79..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/unsafe.go
+++ /dev/null
@@ -1,21 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package internal
-
-import "unsafe"
-
-// String converts byte slice to string.
-func String(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// Bytes converts string to byte slice.
-func Bytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util.go b/vendor/github.com/go-redis/redis/v8/internal/util.go
deleted file mode 100644
index e34a7f0326..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util.go
+++ /dev/null
@@ -1,46 +0,0 @@
-package internal
-
-import (
- "context"
- "time"
-
- "github.com/go-redis/redis/v8/internal/util"
-)
-
-func Sleep(ctx context.Context, dur time.Duration) error {
- t := time.NewTimer(dur)
- defer t.Stop()
-
- select {
- case <-t.C:
- return nil
- case <-ctx.Done():
- return ctx.Err()
- }
-}
-
-func ToLower(s string) string {
- if isLower(s) {
- return s
- }
-
- b := make([]byte, len(s))
- for i := range b {
- c := s[i]
- if c >= 'A' && c <= 'Z' {
- c += 'a' - 'A'
- }
- b[i] = c
- }
- return util.BytesToString(b)
-}
-
-func isLower(s string) bool {
- for i := 0; i < len(s); i++ {
- c := s[i]
- if c >= 'A' && c <= 'Z' {
- return false
- }
- }
- return true
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go b/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
deleted file mode 100644
index db5033802a..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util/strconv.go
+++ /dev/null
@@ -1,19 +0,0 @@
-package util
-
-import "strconv"
-
-func Atoi(b []byte) (int, error) {
- return strconv.Atoi(BytesToString(b))
-}
-
-func ParseInt(b []byte, base int, bitSize int) (int64, error) {
- return strconv.ParseInt(BytesToString(b), base, bitSize)
-}
-
-func ParseUint(b []byte, base int, bitSize int) (uint64, error) {
- return strconv.ParseUint(BytesToString(b), base, bitSize)
-}
-
-func ParseFloat(b []byte, bitSize int) (float64, error) {
- return strconv.ParseFloat(BytesToString(b), bitSize)
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go b/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
deleted file mode 100644
index daa8d7692a..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util/unsafe.go
+++ /dev/null
@@ -1,23 +0,0 @@
-//go:build !appengine
-// +build !appengine
-
-package util
-
-import (
- "unsafe"
-)
-
-// BytesToString converts byte slice to string.
-func BytesToString(b []byte) string {
- return *(*string)(unsafe.Pointer(&b))
-}
-
-// StringToBytes converts string to byte slice.
-func StringToBytes(s string) []byte {
- return *(*[]byte)(unsafe.Pointer(
- &struct {
- string
- Cap int
- }{s, len(s)},
- ))
-}
diff --git a/vendor/github.com/go-redis/redis/v8/internal/util/ya.make b/vendor/github.com/go-redis/redis/v8/internal/util/ya.make
deleted file mode 100644
index 3497e3cf34..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/util/ya.make
+++ /dev/null
@@ -1,10 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(
- strconv.go
- unsafe.go
-)
-
-END()
diff --git a/vendor/github.com/go-redis/redis/v8/internal/ya.make b/vendor/github.com/go-redis/redis/v8/internal/ya.make
deleted file mode 100644
index 1cf123fac4..0000000000
--- a/vendor/github.com/go-redis/redis/v8/internal/ya.make
+++ /dev/null
@@ -1,26 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(
- arg.go
- internal.go
- log.go
- once.go
- unsafe.go
- util.go
-)
-
-GO_TEST_SRCS(internal_test.go)
-
-END()
-
-RECURSE(
- gotest
- hashtag
- hscan
- pool
- proto
- rand
- util
-)
diff --git a/vendor/github.com/go-redis/redis/v8/iterator.go b/vendor/github.com/go-redis/redis/v8/iterator.go
deleted file mode 100644
index 2f8bc2beda..0000000000
--- a/vendor/github.com/go-redis/redis/v8/iterator.go
+++ /dev/null
@@ -1,77 +0,0 @@
-package redis
-
-import (
- "context"
- "sync"
-)
-
-// ScanIterator is used to incrementally iterate over a collection of elements.
-// It's safe for concurrent use by multiple goroutines.
-type ScanIterator struct {
- mu sync.Mutex // protects Scanner and pos
- cmd *ScanCmd
- pos int
-}
-
-// Err returns the last iterator error, if any.
-func (it *ScanIterator) Err() error {
- it.mu.Lock()
- err := it.cmd.Err()
- it.mu.Unlock()
- return err
-}
-
-// Next advances the cursor and returns true if more values can be read.
-func (it *ScanIterator) Next(ctx context.Context) bool {
- it.mu.Lock()
- defer it.mu.Unlock()
-
- // Instantly return on errors.
- if it.cmd.Err() != nil {
- return false
- }
-
- // Advance cursor, check if we are still within range.
- if it.pos < len(it.cmd.page) {
- it.pos++
- return true
- }
-
- for {
- // Return if there is no more data to fetch.
- if it.cmd.cursor == 0 {
- return false
- }
-
- // Fetch next page.
- switch it.cmd.args[0] {
- case "scan", "qscan":
- it.cmd.args[1] = it.cmd.cursor
- default:
- it.cmd.args[2] = it.cmd.cursor
- }
-
- err := it.cmd.process(ctx, it.cmd)
- if err != nil {
- return false
- }
-
- it.pos = 1
-
- // Redis can occasionally return empty page.
- if len(it.cmd.page) > 0 {
- return true
- }
- }
-}
-
-// Val returns the key/field at the current cursor position.
-func (it *ScanIterator) Val() string {
- var v string
- it.mu.Lock()
- if it.cmd.Err() == nil && it.pos > 0 && it.pos <= len(it.cmd.page) {
- v = it.cmd.page[it.pos-1]
- }
- it.mu.Unlock()
- return v
-}
diff --git a/vendor/github.com/go-redis/redis/v8/options.go b/vendor/github.com/go-redis/redis/v8/options.go
deleted file mode 100644
index a4abe32c3a..0000000000
--- a/vendor/github.com/go-redis/redis/v8/options.go
+++ /dev/null
@@ -1,429 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "net/url"
- "runtime"
- "sort"
- "strconv"
- "strings"
- "time"
-
- "github.com/go-redis/redis/v8/internal/pool"
-)
-
-// Limiter is the interface of a rate limiter or a circuit breaker.
-type Limiter interface {
- // Allow returns nil if operation is allowed or an error otherwise.
- // If operation is allowed client must ReportResult of the operation
- // whether it is a success or a failure.
- Allow() error
- // ReportResult reports the result of the previously allowed operation.
- // nil indicates a success, non-nil error usually indicates a failure.
- ReportResult(result error)
-}
-
-// Options keeps the settings to setup redis connection.
-type Options struct {
- // The network type, either tcp or unix.
- // Default is tcp.
- Network string
- // host:port address.
- Addr string
-
- // Dialer creates new network connection and has priority over
- // Network and Addr options.
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
-
- // Hook that is called when new connection is established.
- OnConnect func(ctx context.Context, cn *Conn) error
-
- // Use the specified Username to authenticate the current connection
- // with one of the connections defined in the ACL list when connecting
- // to a Redis 6.0 instance, or greater, that is using the Redis ACL system.
- Username string
- // Optional password. Must match the password specified in the
- // requirepass server configuration option (if connecting to a Redis 5.0 instance, or lower),
- // or the User Password when connecting to a Redis 6.0 instance, or greater,
- // that is using the Redis ACL system.
- Password string
-
- // Database to be selected after connecting to the server.
- DB int
-
- // Maximum number of retries before giving up.
- // Default is 3 retries; -1 (not 0) disables retries.
- MaxRetries int
- // Minimum backoff between each retry.
- // Default is 8 milliseconds; -1 disables backoff.
- MinRetryBackoff time.Duration
- // Maximum backoff between each retry.
- // Default is 512 milliseconds; -1 disables backoff.
- MaxRetryBackoff time.Duration
-
- // Dial timeout for establishing new connections.
- // Default is 5 seconds.
- DialTimeout time.Duration
- // Timeout for socket reads. If reached, commands will fail
- // with a timeout instead of blocking. Use value -1 for no timeout and 0 for default.
- // Default is 3 seconds.
- ReadTimeout time.Duration
- // Timeout for socket writes. If reached, commands will fail
- // with a timeout instead of blocking.
- // Default is ReadTimeout.
- WriteTimeout time.Duration
-
- // Type of connection pool.
- // true for FIFO pool, false for LIFO pool.
- // Note that fifo has higher overhead compared to lifo.
- PoolFIFO bool
- // Maximum number of socket connections.
- // Default is 10 connections per every available CPU as reported by runtime.GOMAXPROCS.
- PoolSize int
- // Minimum number of idle connections which is useful when establishing
- // new connection is slow.
- MinIdleConns int
- // Connection age at which client retires (closes) the connection.
- // Default is to not close aged connections.
- MaxConnAge time.Duration
- // Amount of time client waits for connection if all connections
- // are busy before returning an error.
- // Default is ReadTimeout + 1 second.
- PoolTimeout time.Duration
- // Amount of time after which client closes idle connections.
- // Should be less than server's timeout.
- // Default is 5 minutes. -1 disables idle timeout check.
- IdleTimeout time.Duration
- // Frequency of idle checks made by idle connections reaper.
- // Default is 1 minute. -1 disables idle connections reaper,
- // but idle connections are still discarded by the client
- // if IdleTimeout is set.
- IdleCheckFrequency time.Duration
-
- // Enables read only queries on slave nodes.
- readOnly bool
-
- // TLS Config to use. When set TLS will be negotiated.
- TLSConfig *tls.Config
-
- // Limiter interface used to implemented circuit breaker or rate limiter.
- Limiter Limiter
-}
-
-func (opt *Options) init() {
- if opt.Addr == "" {
- opt.Addr = "localhost:6379"
- }
- if opt.Network == "" {
- if strings.HasPrefix(opt.Addr, "/") {
- opt.Network = "unix"
- } else {
- opt.Network = "tcp"
- }
- }
- if opt.DialTimeout == 0 {
- opt.DialTimeout = 5 * time.Second
- }
- if opt.Dialer == nil {
- opt.Dialer = func(ctx context.Context, network, addr string) (net.Conn, error) {
- netDialer := &net.Dialer{
- Timeout: opt.DialTimeout,
- KeepAlive: 5 * time.Minute,
- }
- if opt.TLSConfig == nil {
- return netDialer.DialContext(ctx, network, addr)
- }
- return tls.DialWithDialer(netDialer, network, addr, opt.TLSConfig)
- }
- }
- if opt.PoolSize == 0 {
- opt.PoolSize = 10 * runtime.GOMAXPROCS(0)
- }
- switch opt.ReadTimeout {
- case -1:
- opt.ReadTimeout = 0
- case 0:
- opt.ReadTimeout = 3 * time.Second
- }
- switch opt.WriteTimeout {
- case -1:
- opt.WriteTimeout = 0
- case 0:
- opt.WriteTimeout = opt.ReadTimeout
- }
- if opt.PoolTimeout == 0 {
- opt.PoolTimeout = opt.ReadTimeout + time.Second
- }
- if opt.IdleTimeout == 0 {
- opt.IdleTimeout = 5 * time.Minute
- }
- if opt.IdleCheckFrequency == 0 {
- opt.IdleCheckFrequency = time.Minute
- }
-
- if opt.MaxRetries == -1 {
- opt.MaxRetries = 0
- } else if opt.MaxRetries == 0 {
- opt.MaxRetries = 3
- }
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-}
-
-func (opt *Options) clone() *Options {
- clone := *opt
- return &clone
-}
-
-// ParseURL parses an URL into Options that can be used to connect to Redis.
-// Scheme is required.
-// There are two connection types: by tcp socket and by unix socket.
-// Tcp connection:
-// redis://<user>:<password>@<host>:<port>/<db_number>
-// Unix connection:
-// unix://<user>:<password>@</path/to/redis.sock>?db=<db_number>
-// Most Option fields can be set using query parameters, with the following restrictions:
-// - field names are mapped using snake-case conversion: to set MaxRetries, use max_retries
-// - only scalar type fields are supported (bool, int, time.Duration)
-// - for time.Duration fields, values must be a valid input for time.ParseDuration();
-// additionally a plain integer as value (i.e. without unit) is intepreted as seconds
-// - to disable a duration field, use value less than or equal to 0; to use the default
-// value, leave the value blank or remove the parameter
-// - only the last value is interpreted if a parameter is given multiple times
-// - fields "network", "addr", "username" and "password" can only be set using other
-// URL attributes (scheme, host, userinfo, resp.), query paremeters using these
-// names will be treated as unknown parameters
-// - unknown parameter names will result in an error
-// Examples:
-// redis://user:password@localhost:6789/3?dial_timeout=3&db=1&read_timeout=6s&max_retries=2
-// is equivalent to:
-// &Options{
-// Network: "tcp",
-// Addr: "localhost:6789",
-// DB: 1, // path "/3" was overridden by "&db=1"
-// DialTimeout: 3 * time.Second, // no time unit = seconds
-// ReadTimeout: 6 * time.Second,
-// MaxRetries: 2,
-// }
-func ParseURL(redisURL string) (*Options, error) {
- u, err := url.Parse(redisURL)
- if err != nil {
- return nil, err
- }
-
- switch u.Scheme {
- case "redis", "rediss":
- return setupTCPConn(u)
- case "unix":
- return setupUnixConn(u)
- default:
- return nil, fmt.Errorf("redis: invalid URL scheme: %s", u.Scheme)
- }
-}
-
-func setupTCPConn(u *url.URL) (*Options, error) {
- o := &Options{Network: "tcp"}
-
- o.Username, o.Password = getUserPassword(u)
-
- h, p, err := net.SplitHostPort(u.Host)
- if err != nil {
- h = u.Host
- }
- if h == "" {
- h = "localhost"
- }
- if p == "" {
- p = "6379"
- }
- o.Addr = net.JoinHostPort(h, p)
-
- f := strings.FieldsFunc(u.Path, func(r rune) bool {
- return r == '/'
- })
- switch len(f) {
- case 0:
- o.DB = 0
- case 1:
- if o.DB, err = strconv.Atoi(f[0]); err != nil {
- return nil, fmt.Errorf("redis: invalid database number: %q", f[0])
- }
- default:
- return nil, fmt.Errorf("redis: invalid URL path: %s", u.Path)
- }
-
- if u.Scheme == "rediss" {
- o.TLSConfig = &tls.Config{ServerName: h}
- }
-
- return setupConnParams(u, o)
-}
-
-func setupUnixConn(u *url.URL) (*Options, error) {
- o := &Options{
- Network: "unix",
- }
-
- if strings.TrimSpace(u.Path) == "" { // path is required with unix connection
- return nil, errors.New("redis: empty unix socket path")
- }
- o.Addr = u.Path
- o.Username, o.Password = getUserPassword(u)
- return setupConnParams(u, o)
-}
-
-type queryOptions struct {
- q url.Values
- err error
-}
-
-func (o *queryOptions) string(name string) string {
- vs := o.q[name]
- if len(vs) == 0 {
- return ""
- }
- delete(o.q, name) // enable detection of unknown parameters
- return vs[len(vs)-1]
-}
-
-func (o *queryOptions) int(name string) int {
- s := o.string(name)
- if s == "" {
- return 0
- }
- i, err := strconv.Atoi(s)
- if err == nil {
- return i
- }
- if o.err == nil {
- o.err = fmt.Errorf("redis: invalid %s number: %s", name, err)
- }
- return 0
-}
-
-func (o *queryOptions) duration(name string) time.Duration {
- s := o.string(name)
- if s == "" {
- return 0
- }
- // try plain number first
- if i, err := strconv.Atoi(s); err == nil {
- if i <= 0 {
- // disable timeouts
- return -1
- }
- return time.Duration(i) * time.Second
- }
- dur, err := time.ParseDuration(s)
- if err == nil {
- return dur
- }
- if o.err == nil {
- o.err = fmt.Errorf("redis: invalid %s duration: %w", name, err)
- }
- return 0
-}
-
-func (o *queryOptions) bool(name string) bool {
- switch s := o.string(name); s {
- case "true", "1":
- return true
- case "false", "0", "":
- return false
- default:
- if o.err == nil {
- o.err = fmt.Errorf("redis: invalid %s boolean: expected true/false/1/0 or an empty string, got %q", name, s)
- }
- return false
- }
-}
-
-func (o *queryOptions) remaining() []string {
- if len(o.q) == 0 {
- return nil
- }
- keys := make([]string, 0, len(o.q))
- for k := range o.q {
- keys = append(keys, k)
- }
- sort.Strings(keys)
- return keys
-}
-
-// setupConnParams converts query parameters in u to option value in o.
-func setupConnParams(u *url.URL, o *Options) (*Options, error) {
- q := queryOptions{q: u.Query()}
-
- // compat: a future major release may use q.int("db")
- if tmp := q.string("db"); tmp != "" {
- db, err := strconv.Atoi(tmp)
- if err != nil {
- return nil, fmt.Errorf("redis: invalid database number: %w", err)
- }
- o.DB = db
- }
-
- o.MaxRetries = q.int("max_retries")
- o.MinRetryBackoff = q.duration("min_retry_backoff")
- o.MaxRetryBackoff = q.duration("max_retry_backoff")
- o.DialTimeout = q.duration("dial_timeout")
- o.ReadTimeout = q.duration("read_timeout")
- o.WriteTimeout = q.duration("write_timeout")
- o.PoolFIFO = q.bool("pool_fifo")
- o.PoolSize = q.int("pool_size")
- o.MinIdleConns = q.int("min_idle_conns")
- o.MaxConnAge = q.duration("max_conn_age")
- o.PoolTimeout = q.duration("pool_timeout")
- o.IdleTimeout = q.duration("idle_timeout")
- o.IdleCheckFrequency = q.duration("idle_check_frequency")
- if q.err != nil {
- return nil, q.err
- }
-
- // any parameters left?
- if r := q.remaining(); len(r) > 0 {
- return nil, fmt.Errorf("redis: unexpected option: %s", strings.Join(r, ", "))
- }
-
- return o, nil
-}
-
-func getUserPassword(u *url.URL) (string, string) {
- var user, password string
- if u.User != nil {
- user = u.User.Username()
- if p, ok := u.User.Password(); ok {
- password = p
- }
- }
- return user, password
-}
-
-func newConnPool(opt *Options) *pool.ConnPool {
- return pool.NewConnPool(&pool.Options{
- Dialer: func(ctx context.Context) (net.Conn, error) {
- return opt.Dialer(ctx, opt.Network, opt.Addr)
- },
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- })
-}
diff --git a/vendor/github.com/go-redis/redis/v8/pipeline.go b/vendor/github.com/go-redis/redis/v8/pipeline.go
deleted file mode 100644
index 31bab971e6..0000000000
--- a/vendor/github.com/go-redis/redis/v8/pipeline.go
+++ /dev/null
@@ -1,147 +0,0 @@
-package redis
-
-import (
- "context"
- "sync"
-
- "github.com/go-redis/redis/v8/internal/pool"
-)
-
-type pipelineExecer func(context.Context, []Cmder) error
-
-// Pipeliner is an mechanism to realise Redis Pipeline technique.
-//
-// Pipelining is a technique to extremely speed up processing by packing
-// operations to batches, send them at once to Redis and read a replies in a
-// singe step.
-// See https://redis.io/topics/pipelining
-//
-// Pay attention, that Pipeline is not a transaction, so you can get unexpected
-// results in case of big pipelines and small read/write timeouts.
-// Redis client has retransmission logic in case of timeouts, pipeline
-// can be retransmitted and commands can be executed more then once.
-// To avoid this: it is good idea to use reasonable bigger read/write timeouts
-// depends of your batch size and/or use TxPipeline.
-type Pipeliner interface {
- StatefulCmdable
- Len() int
- Do(ctx context.Context, args ...interface{}) *Cmd
- Process(ctx context.Context, cmd Cmder) error
- Close() error
- Discard() error
- Exec(ctx context.Context) ([]Cmder, error)
-}
-
-var _ Pipeliner = (*Pipeline)(nil)
-
-// Pipeline implements pipelining as described in
-// http://redis.io/topics/pipelining. It's safe for concurrent use
-// by multiple goroutines.
-type Pipeline struct {
- cmdable
- statefulCmdable
-
- ctx context.Context
- exec pipelineExecer
-
- mu sync.Mutex
- cmds []Cmder
- closed bool
-}
-
-func (c *Pipeline) init() {
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
-}
-
-// Len returns the number of queued commands.
-func (c *Pipeline) Len() int {
- c.mu.Lock()
- ln := len(c.cmds)
- c.mu.Unlock()
- return ln
-}
-
-// Do queues the custom command for later execution.
-func (c *Pipeline) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Process queues the cmd for later execution.
-func (c *Pipeline) Process(ctx context.Context, cmd Cmder) error {
- c.mu.Lock()
- c.cmds = append(c.cmds, cmd)
- c.mu.Unlock()
- return nil
-}
-
-// Close closes the pipeline, releasing any open resources.
-func (c *Pipeline) Close() error {
- c.mu.Lock()
- _ = c.discard()
- c.closed = true
- c.mu.Unlock()
- return nil
-}
-
-// Discard resets the pipeline and discards queued commands.
-func (c *Pipeline) Discard() error {
- c.mu.Lock()
- err := c.discard()
- c.mu.Unlock()
- return err
-}
-
-func (c *Pipeline) discard() error {
- if c.closed {
- return pool.ErrClosed
- }
- c.cmds = c.cmds[:0]
- return nil
-}
-
-// Exec executes all previously queued commands using one
-// client-server roundtrip.
-//
-// Exec always returns list of commands and error of the first failed
-// command if any.
-func (c *Pipeline) Exec(ctx context.Context) ([]Cmder, error) {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil, pool.ErrClosed
- }
-
- if len(c.cmds) == 0 {
- return nil, nil
- }
-
- cmds := c.cmds
- c.cmds = nil
-
- return cmds, c.exec(ctx, cmds)
-}
-
-func (c *Pipeline) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- if err := fn(c); err != nil {
- return nil, err
- }
- cmds, err := c.Exec(ctx)
- _ = c.Close()
- return cmds, err
-}
-
-func (c *Pipeline) Pipeline() Pipeliner {
- return c
-}
-
-func (c *Pipeline) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipelined(ctx, fn)
-}
-
-func (c *Pipeline) TxPipeline() Pipeliner {
- return c
-}
diff --git a/vendor/github.com/go-redis/redis/v8/pubsub.go b/vendor/github.com/go-redis/redis/v8/pubsub.go
deleted file mode 100644
index efc2354af0..0000000000
--- a/vendor/github.com/go-redis/redis/v8/pubsub.go
+++ /dev/null
@@ -1,668 +0,0 @@
-package redis
-
-import (
- "context"
- "fmt"
- "strings"
- "sync"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// PubSub implements Pub/Sub commands as described in
-// http://redis.io/topics/pubsub. Message receiving is NOT safe
-// for concurrent use by multiple goroutines.
-//
-// PubSub automatically reconnects to Redis Server and resubscribes
-// to the channels in case of network errors.
-type PubSub struct {
- opt *Options
-
- newConn func(ctx context.Context, channels []string) (*pool.Conn, error)
- closeConn func(*pool.Conn) error
-
- mu sync.Mutex
- cn *pool.Conn
- channels map[string]struct{}
- patterns map[string]struct{}
-
- closed bool
- exit chan struct{}
-
- cmd *Cmd
-
- chOnce sync.Once
- msgCh *channel
- allCh *channel
-}
-
-func (c *PubSub) init() {
- c.exit = make(chan struct{})
-}
-
-func (c *PubSub) String() string {
- channels := mapKeys(c.channels)
- channels = append(channels, mapKeys(c.patterns)...)
- return fmt.Sprintf("PubSub(%s)", strings.Join(channels, ", "))
-}
-
-func (c *PubSub) connWithLock(ctx context.Context) (*pool.Conn, error) {
- c.mu.Lock()
- cn, err := c.conn(ctx, nil)
- c.mu.Unlock()
- return cn, err
-}
-
-func (c *PubSub) conn(ctx context.Context, newChannels []string) (*pool.Conn, error) {
- if c.closed {
- return nil, pool.ErrClosed
- }
- if c.cn != nil {
- return c.cn, nil
- }
-
- channels := mapKeys(c.channels)
- channels = append(channels, newChannels...)
-
- cn, err := c.newConn(ctx, channels)
- if err != nil {
- return nil, err
- }
-
- if err := c.resubscribe(ctx, cn); err != nil {
- _ = c.closeConn(cn)
- return nil, err
- }
-
- c.cn = cn
- return cn, nil
-}
-
-func (c *PubSub) writeCmd(ctx context.Context, cn *pool.Conn, cmd Cmder) error {
- return cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
-}
-
-func (c *PubSub) resubscribe(ctx context.Context, cn *pool.Conn) error {
- var firstErr error
-
- if len(c.channels) > 0 {
- firstErr = c._subscribe(ctx, cn, "subscribe", mapKeys(c.channels))
- }
-
- if len(c.patterns) > 0 {
- err := c._subscribe(ctx, cn, "psubscribe", mapKeys(c.patterns))
- if err != nil && firstErr == nil {
- firstErr = err
- }
- }
-
- return firstErr
-}
-
-func mapKeys(m map[string]struct{}) []string {
- s := make([]string, len(m))
- i := 0
- for k := range m {
- s[i] = k
- i++
- }
- return s
-}
-
-func (c *PubSub) _subscribe(
- ctx context.Context, cn *pool.Conn, redisCmd string, channels []string,
-) error {
- args := make([]interface{}, 0, 1+len(channels))
- args = append(args, redisCmd)
- for _, channel := range channels {
- args = append(args, channel)
- }
- cmd := NewSliceCmd(ctx, args...)
- return c.writeCmd(ctx, cn, cmd)
-}
-
-func (c *PubSub) releaseConnWithLock(
- ctx context.Context,
- cn *pool.Conn,
- err error,
- allowTimeout bool,
-) {
- c.mu.Lock()
- c.releaseConn(ctx, cn, err, allowTimeout)
- c.mu.Unlock()
-}
-
-func (c *PubSub) releaseConn(ctx context.Context, cn *pool.Conn, err error, allowTimeout bool) {
- if c.cn != cn {
- return
- }
- if isBadConn(err, allowTimeout, c.opt.Addr) {
- c.reconnect(ctx, err)
- }
-}
-
-func (c *PubSub) reconnect(ctx context.Context, reason error) {
- _ = c.closeTheCn(reason)
- _, _ = c.conn(ctx, nil)
-}
-
-func (c *PubSub) closeTheCn(reason error) error {
- if c.cn == nil {
- return nil
- }
- if !c.closed {
- internal.Logger.Printf(c.getContext(), "redis: discarding bad PubSub connection: %s", reason)
- }
- err := c.closeConn(c.cn)
- c.cn = nil
- return err
-}
-
-func (c *PubSub) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return pool.ErrClosed
- }
- c.closed = true
- close(c.exit)
-
- return c.closeTheCn(pool.ErrClosed)
-}
-
-// Subscribe the client to the specified channels. It returns
-// empty subscription if there are no channels.
-func (c *PubSub) Subscribe(ctx context.Context, channels ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- err := c.subscribe(ctx, "subscribe", channels...)
- if c.channels == nil {
- c.channels = make(map[string]struct{})
- }
- for _, s := range channels {
- c.channels[s] = struct{}{}
- }
- return err
-}
-
-// PSubscribe the client to the given patterns. It returns
-// empty subscription if there are no patterns.
-func (c *PubSub) PSubscribe(ctx context.Context, patterns ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- err := c.subscribe(ctx, "psubscribe", patterns...)
- if c.patterns == nil {
- c.patterns = make(map[string]struct{})
- }
- for _, s := range patterns {
- c.patterns[s] = struct{}{}
- }
- return err
-}
-
-// Unsubscribe the client from the given channels, or from all of
-// them if none is given.
-func (c *PubSub) Unsubscribe(ctx context.Context, channels ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, channel := range channels {
- delete(c.channels, channel)
- }
- err := c.subscribe(ctx, "unsubscribe", channels...)
- return err
-}
-
-// PUnsubscribe the client from the given patterns, or from all of
-// them if none is given.
-func (c *PubSub) PUnsubscribe(ctx context.Context, patterns ...string) error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- for _, pattern := range patterns {
- delete(c.patterns, pattern)
- }
- err := c.subscribe(ctx, "punsubscribe", patterns...)
- return err
-}
-
-func (c *PubSub) subscribe(ctx context.Context, redisCmd string, channels ...string) error {
- cn, err := c.conn(ctx, channels)
- if err != nil {
- return err
- }
-
- err = c._subscribe(ctx, cn, redisCmd, channels)
- c.releaseConn(ctx, cn, err, false)
- return err
-}
-
-func (c *PubSub) Ping(ctx context.Context, payload ...string) error {
- args := []interface{}{"ping"}
- if len(payload) == 1 {
- args = append(args, payload[0])
- }
- cmd := NewCmd(ctx, args...)
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- cn, err := c.conn(ctx, nil)
- if err != nil {
- return err
- }
-
- err = c.writeCmd(ctx, cn, cmd)
- c.releaseConn(ctx, cn, err, false)
- return err
-}
-
-// Subscription received after a successful subscription to channel.
-type Subscription struct {
- // Can be "subscribe", "unsubscribe", "psubscribe" or "punsubscribe".
- Kind string
- // Channel name we have subscribed to.
- Channel string
- // Number of channels we are currently subscribed to.
- Count int
-}
-
-func (m *Subscription) String() string {
- return fmt.Sprintf("%s: %s", m.Kind, m.Channel)
-}
-
-// Message received as result of a PUBLISH command issued by another client.
-type Message struct {
- Channel string
- Pattern string
- Payload string
- PayloadSlice []string
-}
-
-func (m *Message) String() string {
- return fmt.Sprintf("Message<%s: %s>", m.Channel, m.Payload)
-}
-
-// Pong received as result of a PING command issued by another client.
-type Pong struct {
- Payload string
-}
-
-func (p *Pong) String() string {
- if p.Payload != "" {
- return fmt.Sprintf("Pong<%s>", p.Payload)
- }
- return "Pong"
-}
-
-func (c *PubSub) newMessage(reply interface{}) (interface{}, error) {
- switch reply := reply.(type) {
- case string:
- return &Pong{
- Payload: reply,
- }, nil
- case []interface{}:
- switch kind := reply[0].(string); kind {
- case "subscribe", "unsubscribe", "psubscribe", "punsubscribe":
- // Can be nil in case of "unsubscribe".
- channel, _ := reply[1].(string)
- return &Subscription{
- Kind: kind,
- Channel: channel,
- Count: int(reply[2].(int64)),
- }, nil
- case "message":
- switch payload := reply[2].(type) {
- case string:
- return &Message{
- Channel: reply[1].(string),
- Payload: payload,
- }, nil
- case []interface{}:
- ss := make([]string, len(payload))
- for i, s := range payload {
- ss[i] = s.(string)
- }
- return &Message{
- Channel: reply[1].(string),
- PayloadSlice: ss,
- }, nil
- default:
- return nil, fmt.Errorf("redis: unsupported pubsub message payload: %T", payload)
- }
- case "pmessage":
- return &Message{
- Pattern: reply[1].(string),
- Channel: reply[2].(string),
- Payload: reply[3].(string),
- }, nil
- case "pong":
- return &Pong{
- Payload: reply[1].(string),
- }, nil
- default:
- return nil, fmt.Errorf("redis: unsupported pubsub message: %q", kind)
- }
- default:
- return nil, fmt.Errorf("redis: unsupported pubsub message: %#v", reply)
- }
-}
-
-// ReceiveTimeout acts like Receive but returns an error if message
-// is not received in time. This is low-level API and in most cases
-// Channel should be used instead.
-func (c *PubSub) ReceiveTimeout(ctx context.Context, timeout time.Duration) (interface{}, error) {
- if c.cmd == nil {
- c.cmd = NewCmd(ctx)
- }
-
- // Don't hold the lock to allow subscriptions and pings.
-
- cn, err := c.connWithLock(ctx)
- if err != nil {
- return nil, err
- }
-
- err = cn.WithReader(ctx, timeout, func(rd *proto.Reader) error {
- return c.cmd.readReply(rd)
- })
-
- c.releaseConnWithLock(ctx, cn, err, timeout > 0)
-
- if err != nil {
- return nil, err
- }
-
- return c.newMessage(c.cmd.Val())
-}
-
-// Receive returns a message as a Subscription, Message, Pong or error.
-// See PubSub example for details. This is low-level API and in most cases
-// Channel should be used instead.
-func (c *PubSub) Receive(ctx context.Context) (interface{}, error) {
- return c.ReceiveTimeout(ctx, 0)
-}
-
-// ReceiveMessage returns a Message or error ignoring Subscription and Pong
-// messages. This is low-level API and in most cases Channel should be used
-// instead.
-func (c *PubSub) ReceiveMessage(ctx context.Context) (*Message, error) {
- for {
- msg, err := c.Receive(ctx)
- if err != nil {
- return nil, err
- }
-
- switch msg := msg.(type) {
- case *Subscription:
- // Ignore.
- case *Pong:
- // Ignore.
- case *Message:
- return msg, nil
- default:
- err := fmt.Errorf("redis: unknown message: %T", msg)
- return nil, err
- }
- }
-}
-
-func (c *PubSub) getContext() context.Context {
- if c.cmd != nil {
- return c.cmd.ctx
- }
- return context.Background()
-}
-
-//------------------------------------------------------------------------------
-
-// Channel returns a Go channel for concurrently receiving messages.
-// The channel is closed together with the PubSub. If the Go channel
-// is blocked full for 30 seconds the message is dropped.
-// Receive* APIs can not be used after channel is created.
-//
-// go-redis periodically sends ping messages to test connection health
-// and re-subscribes if ping can not not received for 30 seconds.
-func (c *PubSub) Channel(opts ...ChannelOption) <-chan *Message {
- c.chOnce.Do(func() {
- c.msgCh = newChannel(c, opts...)
- c.msgCh.initMsgChan()
- })
- if c.msgCh == nil {
- err := fmt.Errorf("redis: Channel can't be called after ChannelWithSubscriptions")
- panic(err)
- }
- return c.msgCh.msgCh
-}
-
-// ChannelSize is like Channel, but creates a Go channel
-// with specified buffer size.
-//
-// Deprecated: use Channel(WithChannelSize(size)), remove in v9.
-func (c *PubSub) ChannelSize(size int) <-chan *Message {
- return c.Channel(WithChannelSize(size))
-}
-
-// ChannelWithSubscriptions is like Channel, but message type can be either
-// *Subscription or *Message. Subscription messages can be used to detect
-// reconnections.
-//
-// ChannelWithSubscriptions can not be used together with Channel or ChannelSize.
-func (c *PubSub) ChannelWithSubscriptions(_ context.Context, size int) <-chan interface{} {
- c.chOnce.Do(func() {
- c.allCh = newChannel(c, WithChannelSize(size))
- c.allCh.initAllChan()
- })
- if c.allCh == nil {
- err := fmt.Errorf("redis: ChannelWithSubscriptions can't be called after Channel")
- panic(err)
- }
- return c.allCh.allCh
-}
-
-type ChannelOption func(c *channel)
-
-// WithChannelSize specifies the Go chan size that is used to buffer incoming messages.
-//
-// The default is 100 messages.
-func WithChannelSize(size int) ChannelOption {
- return func(c *channel) {
- c.chanSize = size
- }
-}
-
-// WithChannelHealthCheckInterval specifies the health check interval.
-// PubSub will ping Redis Server if it does not receive any messages within the interval.
-// To disable health check, use zero interval.
-//
-// The default is 3 seconds.
-func WithChannelHealthCheckInterval(d time.Duration) ChannelOption {
- return func(c *channel) {
- c.checkInterval = d
- }
-}
-
-// WithChannelSendTimeout specifies the channel send timeout after which
-// the message is dropped.
-//
-// The default is 60 seconds.
-func WithChannelSendTimeout(d time.Duration) ChannelOption {
- return func(c *channel) {
- c.chanSendTimeout = d
- }
-}
-
-type channel struct {
- pubSub *PubSub
-
- msgCh chan *Message
- allCh chan interface{}
- ping chan struct{}
-
- chanSize int
- chanSendTimeout time.Duration
- checkInterval time.Duration
-}
-
-func newChannel(pubSub *PubSub, opts ...ChannelOption) *channel {
- c := &channel{
- pubSub: pubSub,
-
- chanSize: 100,
- chanSendTimeout: time.Minute,
- checkInterval: 3 * time.Second,
- }
- for _, opt := range opts {
- opt(c)
- }
- if c.checkInterval > 0 {
- c.initHealthCheck()
- }
- return c
-}
-
-func (c *channel) initHealthCheck() {
- ctx := context.TODO()
- c.ping = make(chan struct{}, 1)
-
- go func() {
- timer := time.NewTimer(time.Minute)
- timer.Stop()
-
- for {
- timer.Reset(c.checkInterval)
- select {
- case <-c.ping:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- if pingErr := c.pubSub.Ping(ctx); pingErr != nil {
- c.pubSub.mu.Lock()
- c.pubSub.reconnect(ctx, pingErr)
- c.pubSub.mu.Unlock()
- }
- case <-c.pubSub.exit:
- return
- }
- }
- }()
-}
-
-// initMsgChan must be in sync with initAllChan.
-func (c *channel) initMsgChan() {
- ctx := context.TODO()
- c.msgCh = make(chan *Message, c.chanSize)
-
- go func() {
- timer := time.NewTimer(time.Minute)
- timer.Stop()
-
- var errCount int
- for {
- msg, err := c.pubSub.Receive(ctx)
- if err != nil {
- if err == pool.ErrClosed {
- close(c.msgCh)
- return
- }
- if errCount > 0 {
- time.Sleep(100 * time.Millisecond)
- }
- errCount++
- continue
- }
-
- errCount = 0
-
- // Any message is as good as a ping.
- select {
- case c.ping <- struct{}{}:
- default:
- }
-
- switch msg := msg.(type) {
- case *Subscription:
- // Ignore.
- case *Pong:
- // Ignore.
- case *Message:
- timer.Reset(c.chanSendTimeout)
- select {
- case c.msgCh <- msg:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- internal.Logger.Printf(
- ctx, "redis: %s channel is full for %s (message is dropped)",
- c, c.chanSendTimeout)
- }
- default:
- internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
- }
- }
- }()
-}
-
-// initAllChan must be in sync with initMsgChan.
-func (c *channel) initAllChan() {
- ctx := context.TODO()
- c.allCh = make(chan interface{}, c.chanSize)
-
- go func() {
- timer := time.NewTimer(time.Minute)
- timer.Stop()
-
- var errCount int
- for {
- msg, err := c.pubSub.Receive(ctx)
- if err != nil {
- if err == pool.ErrClosed {
- close(c.allCh)
- return
- }
- if errCount > 0 {
- time.Sleep(100 * time.Millisecond)
- }
- errCount++
- continue
- }
-
- errCount = 0
-
- // Any message is as good as a ping.
- select {
- case c.ping <- struct{}{}:
- default:
- }
-
- switch msg := msg.(type) {
- case *Pong:
- // Ignore.
- case *Subscription, *Message:
- timer.Reset(c.chanSendTimeout)
- select {
- case c.allCh <- msg:
- if !timer.Stop() {
- <-timer.C
- }
- case <-timer.C:
- internal.Logger.Printf(
- ctx, "redis: %s channel is full for %s (message is dropped)",
- c, c.chanSendTimeout)
- }
- default:
- internal.Logger.Printf(ctx, "redis: unknown message type: %T", msg)
- }
- }
- }()
-}
diff --git a/vendor/github.com/go-redis/redis/v8/redis.go b/vendor/github.com/go-redis/redis/v8/redis.go
deleted file mode 100644
index bcf8a2a94b..0000000000
--- a/vendor/github.com/go-redis/redis/v8/redis.go
+++ /dev/null
@@ -1,773 +0,0 @@
-package redis
-
-import (
- "context"
- "errors"
- "fmt"
- "sync/atomic"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// Nil reply returned by Redis when key does not exist.
-const Nil = proto.Nil
-
-func SetLogger(logger internal.Logging) {
- internal.Logger = logger
-}
-
-//------------------------------------------------------------------------------
-
-type Hook interface {
- BeforeProcess(ctx context.Context, cmd Cmder) (context.Context, error)
- AfterProcess(ctx context.Context, cmd Cmder) error
-
- BeforeProcessPipeline(ctx context.Context, cmds []Cmder) (context.Context, error)
- AfterProcessPipeline(ctx context.Context, cmds []Cmder) error
-}
-
-type hooks struct {
- hooks []Hook
-}
-
-func (hs *hooks) lock() {
- hs.hooks = hs.hooks[:len(hs.hooks):len(hs.hooks)]
-}
-
-func (hs hooks) clone() hooks {
- clone := hs
- clone.lock()
- return clone
-}
-
-func (hs *hooks) AddHook(hook Hook) {
- hs.hooks = append(hs.hooks, hook)
-}
-
-func (hs hooks) process(
- ctx context.Context, cmd Cmder, fn func(context.Context, Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmd)
- cmd.SetErr(err)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcess(ctx, cmd)
- if retErr != nil {
- cmd.SetErr(retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmd)
- cmd.SetErr(retErr)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcess(ctx, cmd); err != nil {
- retErr = err
- cmd.SetErr(retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- if len(hs.hooks) == 0 {
- err := fn(ctx, cmds)
- return err
- }
-
- var hookIndex int
- var retErr error
-
- for ; hookIndex < len(hs.hooks) && retErr == nil; hookIndex++ {
- ctx, retErr = hs.hooks[hookIndex].BeforeProcessPipeline(ctx, cmds)
- if retErr != nil {
- setCmdsErr(cmds, retErr)
- }
- }
-
- if retErr == nil {
- retErr = fn(ctx, cmds)
- }
-
- for hookIndex--; hookIndex >= 0; hookIndex-- {
- if err := hs.hooks[hookIndex].AfterProcessPipeline(ctx, cmds); err != nil {
- retErr = err
- setCmdsErr(cmds, retErr)
- }
- }
-
- return retErr
-}
-
-func (hs hooks) processTxPipeline(
- ctx context.Context, cmds []Cmder, fn func(context.Context, []Cmder) error,
-) error {
- cmds = wrapMultiExec(ctx, cmds)
- return hs.processPipeline(ctx, cmds, fn)
-}
-
-//------------------------------------------------------------------------------
-
-type baseClient struct {
- opt *Options
- connPool pool.Pooler
-
- onClose func() error // hook called when client is closed
-}
-
-func newBaseClient(opt *Options, connPool pool.Pooler) *baseClient {
- return &baseClient{
- opt: opt,
- connPool: connPool,
- }
-}
-
-func (c *baseClient) clone() *baseClient {
- clone := *c
- return &clone
-}
-
-func (c *baseClient) withTimeout(timeout time.Duration) *baseClient {
- opt := c.opt.clone()
- opt.ReadTimeout = timeout
- opt.WriteTimeout = timeout
-
- clone := c.clone()
- clone.opt = opt
-
- return clone
-}
-
-func (c *baseClient) String() string {
- return fmt.Sprintf("Redis<%s db:%d>", c.getAddr(), c.opt.DB)
-}
-
-func (c *baseClient) newConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.NewConn(ctx)
- if err != nil {
- return nil, err
- }
-
- err = c.initConn(ctx, cn)
- if err != nil {
- _ = c.connPool.CloseConn(cn)
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) getConn(ctx context.Context) (*pool.Conn, error) {
- if c.opt.Limiter != nil {
- err := c.opt.Limiter.Allow()
- if err != nil {
- return nil, err
- }
- }
-
- cn, err := c._getConn(ctx)
- if err != nil {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) _getConn(ctx context.Context) (*pool.Conn, error) {
- cn, err := c.connPool.Get(ctx)
- if err != nil {
- return nil, err
- }
-
- if cn.Inited {
- return cn, nil
- }
-
- if err := c.initConn(ctx, cn); err != nil {
- c.connPool.Remove(ctx, cn, err)
- if err := errors.Unwrap(err); err != nil {
- return nil, err
- }
- return nil, err
- }
-
- return cn, nil
-}
-
-func (c *baseClient) initConn(ctx context.Context, cn *pool.Conn) error {
- if cn.Inited {
- return nil
- }
- cn.Inited = true
-
- if c.opt.Password == "" &&
- c.opt.DB == 0 &&
- !c.opt.readOnly &&
- c.opt.OnConnect == nil {
- return nil
- }
-
- connPool := pool.NewSingleConnPool(c.connPool, cn)
- conn := newConn(ctx, c.opt, connPool)
-
- _, err := conn.Pipelined(ctx, func(pipe Pipeliner) error {
- if c.opt.Password != "" {
- if c.opt.Username != "" {
- pipe.AuthACL(ctx, c.opt.Username, c.opt.Password)
- } else {
- pipe.Auth(ctx, c.opt.Password)
- }
- }
-
- if c.opt.DB > 0 {
- pipe.Select(ctx, c.opt.DB)
- }
-
- if c.opt.readOnly {
- pipe.ReadOnly(ctx)
- }
-
- return nil
- })
- if err != nil {
- return err
- }
-
- if c.opt.OnConnect != nil {
- return c.opt.OnConnect(ctx, conn)
- }
- return nil
-}
-
-func (c *baseClient) releaseConn(ctx context.Context, cn *pool.Conn, err error) {
- if c.opt.Limiter != nil {
- c.opt.Limiter.ReportResult(err)
- }
-
- if isBadConn(err, false, c.opt.Addr) {
- c.connPool.Remove(ctx, cn, err)
- } else {
- c.connPool.Put(ctx, cn)
- }
-}
-
-func (c *baseClient) withConn(
- ctx context.Context, fn func(context.Context, *pool.Conn) error,
-) error {
- cn, err := c.getConn(ctx)
- if err != nil {
- return err
- }
-
- defer func() {
- c.releaseConn(ctx, cn, err)
- }()
-
- done := ctx.Done() //nolint:ifshort
-
- if done == nil {
- err = fn(ctx, cn)
- return err
- }
-
- errc := make(chan error, 1)
- go func() { errc <- fn(ctx, cn) }()
-
- select {
- case <-done:
- _ = cn.Close()
- // Wait for the goroutine to finish and send something.
- <-errc
-
- err = ctx.Err()
- return err
- case err = <-errc:
- return err
- }
-}
-
-func (c *baseClient) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- attempt := attempt
-
- retry, err := c._process(ctx, cmd, attempt)
- if err == nil || !retry {
- return err
- }
-
- lastErr = err
- }
- return lastErr
-}
-
-func (c *baseClient) _process(ctx context.Context, cmd Cmder, attempt int) (bool, error) {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return false, err
- }
- }
-
- retryTimeout := uint32(1)
- err := c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmd(wr, cmd)
- })
- if err != nil {
- return err
- }
-
- err = cn.WithReader(ctx, c.cmdTimeout(cmd), cmd.readReply)
- if err != nil {
- if cmd.readTimeout() == nil {
- atomic.StoreUint32(&retryTimeout, 1)
- }
- return err
- }
-
- return nil
- })
- if err == nil {
- return false, nil
- }
-
- retry := shouldRetry(err, atomic.LoadUint32(&retryTimeout) == 1)
- return retry, err
-}
-
-func (c *baseClient) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-func (c *baseClient) cmdTimeout(cmd Cmder) time.Duration {
- if timeout := cmd.readTimeout(); timeout != nil {
- t := *timeout
- if t == 0 {
- return 0
- }
- return t + 10*time.Second
- }
- return c.opt.ReadTimeout
-}
-
-// Close closes the client, releasing any open resources.
-//
-// It is rare to Close a Client, as the Client is meant to be
-// long-lived and shared between many goroutines.
-func (c *baseClient) Close() error {
- var firstErr error
- if c.onClose != nil {
- if err := c.onClose(); err != nil {
- firstErr = err
- }
- }
- if err := c.connPool.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- return firstErr
-}
-
-func (c *baseClient) getAddr() string {
- return c.opt.Addr
-}
-
-func (c *baseClient) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.pipelineProcessCmds)
-}
-
-func (c *baseClient) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, c.txPipelineProcessCmds)
-}
-
-type pipelineProcessor func(context.Context, *pool.Conn, []Cmder) (bool, error)
-
-func (c *baseClient) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- err := c._generalProcessPipeline(ctx, cmds, p)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
- return cmdsFirstErr(cmds)
-}
-
-func (c *baseClient) _generalProcessPipeline(
- ctx context.Context, cmds []Cmder, p pipelineProcessor,
-) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- var canRetry bool
- lastErr = c.withConn(ctx, func(ctx context.Context, cn *pool.Conn) error {
- var err error
- canRetry, err = p(ctx, cn, cmds)
- return err
- })
- if lastErr == nil || !canRetry || !shouldRetry(lastErr, true) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *baseClient) pipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- return pipelineReadCmds(rd, cmds)
- })
- return true, err
-}
-
-func pipelineReadCmds(rd *proto.Reader, cmds []Cmder) error {
- for _, cmd := range cmds {
- err := cmd.readReply(rd)
- cmd.SetErr(err)
- if err != nil && !isRedisError(err) {
- return err
- }
- }
- return nil
-}
-
-func (c *baseClient) txPipelineProcessCmds(
- ctx context.Context, cn *pool.Conn, cmds []Cmder,
-) (bool, error) {
- err := cn.WithWriter(ctx, c.opt.WriteTimeout, func(wr *proto.Writer) error {
- return writeCmds(wr, cmds)
- })
- if err != nil {
- return true, err
- }
-
- err = cn.WithReader(ctx, c.opt.ReadTimeout, func(rd *proto.Reader) error {
- statusCmd := cmds[0].(*StatusCmd)
- // Trim multi and exec.
- cmds = cmds[1 : len(cmds)-1]
-
- err := txPipelineReadQueued(rd, statusCmd, cmds)
- if err != nil {
- return err
- }
-
- return pipelineReadCmds(rd, cmds)
- })
- return false, err
-}
-
-func wrapMultiExec(ctx context.Context, cmds []Cmder) []Cmder {
- if len(cmds) == 0 {
- panic("not reached")
- }
- cmdCopy := make([]Cmder, len(cmds)+2)
- cmdCopy[0] = NewStatusCmd(ctx, "multi")
- copy(cmdCopy[1:], cmds)
- cmdCopy[len(cmdCopy)-1] = NewSliceCmd(ctx, "exec")
- return cmdCopy
-}
-
-func txPipelineReadQueued(rd *proto.Reader, statusCmd *StatusCmd, cmds []Cmder) error {
- // Parse queued replies.
- if err := statusCmd.readReply(rd); err != nil {
- return err
- }
-
- for range cmds {
- if err := statusCmd.readReply(rd); err != nil && !isRedisError(err) {
- return err
- }
- }
-
- // Parse number of replies.
- line, err := rd.ReadLine()
- if err != nil {
- if err == Nil {
- err = TxFailedErr
- }
- return err
- }
-
- switch line[0] {
- case proto.ErrorReply:
- return proto.ParseErrorReply(line)
- case proto.ArrayReply:
- // ok
- default:
- err := fmt.Errorf("redis: expected '*', but got line %q", line)
- return err
- }
-
- return nil
-}
-
-//------------------------------------------------------------------------------
-
-// Client is a Redis client representing a pool of zero or more
-// underlying connections. It's safe for concurrent use by multiple
-// goroutines.
-type Client struct {
- *baseClient
- cmdable
- hooks
- ctx context.Context
-}
-
-// NewClient returns a client to the Redis Server specified by Options.
-func NewClient(opt *Options) *Client {
- opt.init()
-
- c := Client{
- baseClient: newBaseClient(opt, newConnPool(opt)),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
-
- return &c
-}
-
-func (c *Client) clone() *Client {
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- return &clone
-}
-
-func (c *Client) WithTimeout(timeout time.Duration) *Client {
- clone := c.clone()
- clone.baseClient = c.baseClient.withTimeout(timeout)
- return clone
-}
-
-func (c *Client) Context() context.Context {
- return c.ctx
-}
-
-func (c *Client) WithContext(ctx context.Context) *Client {
- if ctx == nil {
- panic("nil context")
- }
- clone := c.clone()
- clone.ctx = ctx
- return clone
-}
-
-func (c *Client) Conn(ctx context.Context) *Conn {
- return newConn(ctx, c.opt, pool.NewStickyConnPool(c.connPool))
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Client) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Client) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Client) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Client) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Client) Options() *Options {
- return c.opt
-}
-
-type PoolStats pool.Stats
-
-// PoolStats returns connection pool stats.
-func (c *Client) PoolStats() *PoolStats {
- stats := c.connPool.Stats()
- return (*PoolStats)(stats)
-}
-
-func (c *Client) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Client) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Client) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Client) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-// Note that this method does not wait on a response from Redis, so the
-// subscription may not be active immediately. To force the connection to wait,
-// you may call the Receive() method on the returned *PubSub like so:
-//
-// sub := client.Subscribe(queryResp)
-// iface, err := sub.Receive()
-// if err != nil {
-// // handle error
-// }
-//
-// // Should be *Subscription, but others are possible if other actions have been
-// // taken on sub since it was created.
-// switch iface.(type) {
-// case *Subscription:
-// // subscribe succeeded
-// case *Message:
-// // received first message
-// case *Pong:
-// // pong received
-// default:
-// // handle error
-// }
-//
-// ch := sub.Channel()
-func (c *Client) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *Client) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-//------------------------------------------------------------------------------
-
-type conn struct {
- baseClient
- cmdable
- statefulCmdable
- hooks // TODO: inherit hooks
-}
-
-// Conn represents a single Redis connection rather than a pool of connections.
-// Prefer running commands from Client unless there is a specific need
-// for a continuous single Redis connection.
-type Conn struct {
- *conn
- ctx context.Context
-}
-
-func newConn(ctx context.Context, opt *Options, connPool pool.Pooler) *Conn {
- c := Conn{
- conn: &conn{
- baseClient: baseClient{
- opt: opt,
- connPool: connPool,
- },
- },
- ctx: ctx,
- }
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
- return &c
-}
-
-func (c *Conn) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *Conn) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
-}
-
-func (c *Conn) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
-}
-
-func (c *Conn) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Conn) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Conn) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline acts like Pipeline, but wraps queued commands with MULTI/EXEC.
-func (c *Conn) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
diff --git a/vendor/github.com/go-redis/redis/v8/result.go b/vendor/github.com/go-redis/redis/v8/result.go
deleted file mode 100644
index 24cfd49940..0000000000
--- a/vendor/github.com/go-redis/redis/v8/result.go
+++ /dev/null
@@ -1,180 +0,0 @@
-package redis
-
-import "time"
-
-// NewCmdResult returns a Cmd initialised with val and err for testing.
-func NewCmdResult(val interface{}, err error) *Cmd {
- var cmd Cmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewSliceResult returns a SliceCmd initialised with val and err for testing.
-func NewSliceResult(val []interface{}, err error) *SliceCmd {
- var cmd SliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStatusResult returns a StatusCmd initialised with val and err for testing.
-func NewStatusResult(val string, err error) *StatusCmd {
- var cmd StatusCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewIntResult returns an IntCmd initialised with val and err for testing.
-func NewIntResult(val int64, err error) *IntCmd {
- var cmd IntCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewDurationResult returns a DurationCmd initialised with val and err for testing.
-func NewDurationResult(val time.Duration, err error) *DurationCmd {
- var cmd DurationCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewBoolResult returns a BoolCmd initialised with val and err for testing.
-func NewBoolResult(val bool, err error) *BoolCmd {
- var cmd BoolCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringResult returns a StringCmd initialised with val and err for testing.
-func NewStringResult(val string, err error) *StringCmd {
- var cmd StringCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewFloatResult returns a FloatCmd initialised with val and err for testing.
-func NewFloatResult(val float64, err error) *FloatCmd {
- var cmd FloatCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringSliceResult returns a StringSliceCmd initialised with val and err for testing.
-func NewStringSliceResult(val []string, err error) *StringSliceCmd {
- var cmd StringSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewBoolSliceResult returns a BoolSliceCmd initialised with val and err for testing.
-func NewBoolSliceResult(val []bool, err error) *BoolSliceCmd {
- var cmd BoolSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringStringMapResult returns a StringStringMapCmd initialised with val and err for testing.
-func NewStringStringMapResult(val map[string]string, err error) *StringStringMapCmd {
- var cmd StringStringMapCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewStringIntMapCmdResult returns a StringIntMapCmd initialised with val and err for testing.
-func NewStringIntMapCmdResult(val map[string]int64, err error) *StringIntMapCmd {
- var cmd StringIntMapCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewTimeCmdResult returns a TimeCmd initialised with val and err for testing.
-func NewTimeCmdResult(val time.Time, err error) *TimeCmd {
- var cmd TimeCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewZSliceCmdResult returns a ZSliceCmd initialised with val and err for testing.
-func NewZSliceCmdResult(val []Z, err error) *ZSliceCmd {
- var cmd ZSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewZWithKeyCmdResult returns a NewZWithKeyCmd initialised with val and err for testing.
-func NewZWithKeyCmdResult(val *ZWithKey, err error) *ZWithKeyCmd {
- var cmd ZWithKeyCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewScanCmdResult returns a ScanCmd initialised with val and err for testing.
-func NewScanCmdResult(keys []string, cursor uint64, err error) *ScanCmd {
- var cmd ScanCmd
- cmd.page = keys
- cmd.cursor = cursor
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewClusterSlotsCmdResult returns a ClusterSlotsCmd initialised with val and err for testing.
-func NewClusterSlotsCmdResult(val []ClusterSlot, err error) *ClusterSlotsCmd {
- var cmd ClusterSlotsCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewGeoLocationCmdResult returns a GeoLocationCmd initialised with val and err for testing.
-func NewGeoLocationCmdResult(val []GeoLocation, err error) *GeoLocationCmd {
- var cmd GeoLocationCmd
- cmd.locations = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewGeoPosCmdResult returns a GeoPosCmd initialised with val and err for testing.
-func NewGeoPosCmdResult(val []*GeoPos, err error) *GeoPosCmd {
- var cmd GeoPosCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewCommandsInfoCmdResult returns a CommandsInfoCmd initialised with val and err for testing.
-func NewCommandsInfoCmdResult(val map[string]*CommandInfo, err error) *CommandsInfoCmd {
- var cmd CommandsInfoCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewXMessageSliceCmdResult returns a XMessageSliceCmd initialised with val and err for testing.
-func NewXMessageSliceCmdResult(val []XMessage, err error) *XMessageSliceCmd {
- var cmd XMessageSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
-
-// NewXStreamSliceCmdResult returns a XStreamSliceCmd initialised with val and err for testing.
-func NewXStreamSliceCmdResult(val []XStream, err error) *XStreamSliceCmd {
- var cmd XStreamSliceCmd
- cmd.val = val
- cmd.SetErr(err)
- return &cmd
-}
diff --git a/vendor/github.com/go-redis/redis/v8/ring.go b/vendor/github.com/go-redis/redis/v8/ring.go
deleted file mode 100644
index 4df00fc857..0000000000
--- a/vendor/github.com/go-redis/redis/v8/ring.go
+++ /dev/null
@@ -1,736 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "fmt"
- "net"
- "strconv"
- "sync"
- "sync/atomic"
- "time"
-
- "github.com/cespare/xxhash/v2"
- rendezvous "github.com/dgryski/go-rendezvous" //nolint
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/hashtag"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-var errRingShardsDown = errors.New("redis: all ring shards are down")
-
-//------------------------------------------------------------------------------
-
-type ConsistentHash interface {
- Get(string) string
-}
-
-type rendezvousWrapper struct {
- *rendezvous.Rendezvous
-}
-
-func (w rendezvousWrapper) Get(key string) string {
- return w.Lookup(key)
-}
-
-func newRendezvous(shards []string) ConsistentHash {
- return rendezvousWrapper{rendezvous.New(shards, xxhash.Sum64String)}
-}
-
-//------------------------------------------------------------------------------
-
-// RingOptions are used to configure a ring client and should be
-// passed to NewRing.
-type RingOptions struct {
- // Map of name => host:port addresses of ring shards.
- Addrs map[string]string
-
- // NewClient creates a shard client with provided name and options.
- NewClient func(name string, opt *Options) *Client
-
- // Frequency of PING commands sent to check shards availability.
- // Shard is considered down after 3 subsequent failed checks.
- HeartbeatFrequency time.Duration
-
- // NewConsistentHash returns a consistent hash that is used
- // to distribute keys across the shards.
- //
- // See https://medium.com/@dgryski/consistent-hashing-algorithmic-tradeoffs-ef6b8e2fcae8
- // for consistent hashing algorithmic tradeoffs.
- NewConsistentHash func(shards []string) ConsistentHash
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
- DB int
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
- Limiter Limiter
-}
-
-func (opt *RingOptions) init() {
- if opt.NewClient == nil {
- opt.NewClient = func(name string, opt *Options) *Client {
- return NewClient(opt)
- }
- }
-
- if opt.HeartbeatFrequency == 0 {
- opt.HeartbeatFrequency = 500 * time.Millisecond
- }
-
- if opt.NewConsistentHash == nil {
- opt.NewConsistentHash = newRendezvous
- }
-
- if opt.MaxRetries == -1 {
- opt.MaxRetries = 0
- } else if opt.MaxRetries == 0 {
- opt.MaxRetries = 3
- }
- switch opt.MinRetryBackoff {
- case -1:
- opt.MinRetryBackoff = 0
- case 0:
- opt.MinRetryBackoff = 8 * time.Millisecond
- }
- switch opt.MaxRetryBackoff {
- case -1:
- opt.MaxRetryBackoff = 0
- case 0:
- opt.MaxRetryBackoff = 512 * time.Millisecond
- }
-}
-
-func (opt *RingOptions) clientOptions() *Options {
- return &Options{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- Username: opt.Username,
- Password: opt.Password,
- DB: opt.DB,
-
- MaxRetries: -1,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
-
- TLSConfig: opt.TLSConfig,
- Limiter: opt.Limiter,
- }
-}
-
-//------------------------------------------------------------------------------
-
-type ringShard struct {
- Client *Client
- down int32
-}
-
-func newRingShard(opt *RingOptions, name, addr string) *ringShard {
- clopt := opt.clientOptions()
- clopt.Addr = addr
-
- return &ringShard{
- Client: opt.NewClient(name, clopt),
- }
-}
-
-func (shard *ringShard) String() string {
- var state string
- if shard.IsUp() {
- state = "up"
- } else {
- state = "down"
- }
- return fmt.Sprintf("%s is %s", shard.Client, state)
-}
-
-func (shard *ringShard) IsDown() bool {
- const threshold = 3
- return atomic.LoadInt32(&shard.down) >= threshold
-}
-
-func (shard *ringShard) IsUp() bool {
- return !shard.IsDown()
-}
-
-// Vote votes to set shard state and returns true if state was changed.
-func (shard *ringShard) Vote(up bool) bool {
- if up {
- changed := shard.IsDown()
- atomic.StoreInt32(&shard.down, 0)
- return changed
- }
-
- if shard.IsDown() {
- return false
- }
-
- atomic.AddInt32(&shard.down, 1)
- return shard.IsDown()
-}
-
-//------------------------------------------------------------------------------
-
-type ringShards struct {
- opt *RingOptions
-
- mu sync.RWMutex
- hash ConsistentHash
- shards map[string]*ringShard // read only
- list []*ringShard // read only
- numShard int
- closed bool
-}
-
-func newRingShards(opt *RingOptions) *ringShards {
- shards := make(map[string]*ringShard, len(opt.Addrs))
- list := make([]*ringShard, 0, len(shards))
-
- for name, addr := range opt.Addrs {
- shard := newRingShard(opt, name, addr)
- shards[name] = shard
-
- list = append(list, shard)
- }
-
- c := &ringShards{
- opt: opt,
-
- shards: shards,
- list: list,
- }
- c.rebalance()
-
- return c
-}
-
-func (c *ringShards) List() []*ringShard {
- var list []*ringShard
-
- c.mu.RLock()
- if !c.closed {
- list = c.list
- }
- c.mu.RUnlock()
-
- return list
-}
-
-func (c *ringShards) Hash(key string) string {
- key = hashtag.Key(key)
-
- var hash string
-
- c.mu.RLock()
- if c.numShard > 0 {
- hash = c.hash.Get(key)
- }
- c.mu.RUnlock()
-
- return hash
-}
-
-func (c *ringShards) GetByKey(key string) (*ringShard, error) {
- key = hashtag.Key(key)
-
- c.mu.RLock()
-
- if c.closed {
- c.mu.RUnlock()
- return nil, pool.ErrClosed
- }
-
- if c.numShard == 0 {
- c.mu.RUnlock()
- return nil, errRingShardsDown
- }
-
- hash := c.hash.Get(key)
- if hash == "" {
- c.mu.RUnlock()
- return nil, errRingShardsDown
- }
-
- shard := c.shards[hash]
- c.mu.RUnlock()
-
- return shard, nil
-}
-
-func (c *ringShards) GetByName(shardName string) (*ringShard, error) {
- if shardName == "" {
- return c.Random()
- }
-
- c.mu.RLock()
- shard := c.shards[shardName]
- c.mu.RUnlock()
- return shard, nil
-}
-
-func (c *ringShards) Random() (*ringShard, error) {
- return c.GetByKey(strconv.Itoa(rand.Int()))
-}
-
-// heartbeat monitors state of each shard in the ring.
-func (c *ringShards) Heartbeat(frequency time.Duration) {
- ticker := time.NewTicker(frequency)
- defer ticker.Stop()
-
- ctx := context.Background()
- for range ticker.C {
- var rebalance bool
-
- for _, shard := range c.List() {
- err := shard.Client.Ping(ctx).Err()
- isUp := err == nil || err == pool.ErrPoolTimeout
- if shard.Vote(isUp) {
- internal.Logger.Printf(context.Background(), "ring shard state changed: %s", shard)
- rebalance = true
- }
- }
-
- if rebalance {
- c.rebalance()
- }
- }
-}
-
-// rebalance removes dead shards from the Ring.
-func (c *ringShards) rebalance() {
- c.mu.RLock()
- shards := c.shards
- c.mu.RUnlock()
-
- liveShards := make([]string, 0, len(shards))
-
- for name, shard := range shards {
- if shard.IsUp() {
- liveShards = append(liveShards, name)
- }
- }
-
- hash := c.opt.NewConsistentHash(liveShards)
-
- c.mu.Lock()
- c.hash = hash
- c.numShard = len(liveShards)
- c.mu.Unlock()
-}
-
-func (c *ringShards) Len() int {
- c.mu.RLock()
- l := c.numShard
- c.mu.RUnlock()
- return l
-}
-
-func (c *ringShards) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.closed {
- return nil
- }
- c.closed = true
-
- var firstErr error
- for _, shard := range c.shards {
- if err := shard.Client.Close(); err != nil && firstErr == nil {
- firstErr = err
- }
- }
- c.hash = nil
- c.shards = nil
- c.list = nil
-
- return firstErr
-}
-
-//------------------------------------------------------------------------------
-
-type ring struct {
- opt *RingOptions
- shards *ringShards
- cmdsInfoCache *cmdsInfoCache //nolint:structcheck
-}
-
-// Ring is a Redis client that uses consistent hashing to distribute
-// keys across multiple Redis servers (shards). It's safe for
-// concurrent use by multiple goroutines.
-//
-// Ring monitors the state of each shard and removes dead shards from
-// the ring. When a shard comes online it is added back to the ring. This
-// gives you maximum availability and partition tolerance, but no
-// consistency between different shards or even clients. Each client
-// uses shards that are available to the client and does not do any
-// coordination when shard state is changed.
-//
-// Ring should be used when you need multiple Redis servers for caching
-// and can tolerate losing data when one of the servers dies.
-// Otherwise you should use Redis Cluster.
-type Ring struct {
- *ring
- cmdable
- hooks
- ctx context.Context
-}
-
-func NewRing(opt *RingOptions) *Ring {
- opt.init()
-
- ring := Ring{
- ring: &ring{
- opt: opt,
- shards: newRingShards(opt),
- },
- ctx: context.Background(),
- }
-
- ring.cmdsInfoCache = newCmdsInfoCache(ring.cmdsInfo)
- ring.cmdable = ring.Process
-
- go ring.shards.Heartbeat(opt.HeartbeatFrequency)
-
- return &ring
-}
-
-func (c *Ring) Context() context.Context {
- return c.ctx
-}
-
-func (c *Ring) WithContext(ctx context.Context) *Ring {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.cmdable = clone.Process
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
-// Do creates a Cmd from the args and processes the cmd.
-func (c *Ring) Do(ctx context.Context, args ...interface{}) *Cmd {
- cmd := NewCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *Ring) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.process)
-}
-
-// Options returns read-only Options that were used to create the client.
-func (c *Ring) Options() *RingOptions {
- return c.opt
-}
-
-func (c *Ring) retryBackoff(attempt int) time.Duration {
- return internal.RetryBackoff(attempt, c.opt.MinRetryBackoff, c.opt.MaxRetryBackoff)
-}
-
-// PoolStats returns accumulated connection pool stats.
-func (c *Ring) PoolStats() *PoolStats {
- shards := c.shards.List()
- var acc PoolStats
- for _, shard := range shards {
- s := shard.Client.connPool.Stats()
- acc.Hits += s.Hits
- acc.Misses += s.Misses
- acc.Timeouts += s.Timeouts
- acc.TotalConns += s.TotalConns
- acc.IdleConns += s.IdleConns
- }
- return &acc
-}
-
-// Len returns the current number of shards in the ring.
-func (c *Ring) Len() int {
- return c.shards.Len()
-}
-
-// Subscribe subscribes the client to the specified channels.
-func (c *Ring) Subscribe(ctx context.Context, channels ...string) *PubSub {
- if len(channels) == 0 {
- panic("at least one channel is required")
- }
-
- shard, err := c.shards.GetByKey(channels[0])
- if err != nil {
- // TODO: return PubSub with sticky error
- panic(err)
- }
- return shard.Client.Subscribe(ctx, channels...)
-}
-
-// PSubscribe subscribes the client to the given patterns.
-func (c *Ring) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- if len(channels) == 0 {
- panic("at least one channel is required")
- }
-
- shard, err := c.shards.GetByKey(channels[0])
- if err != nil {
- // TODO: return PubSub with sticky error
- panic(err)
- }
- return shard.Client.PSubscribe(ctx, channels...)
-}
-
-// ForEachShard concurrently calls the fn on each live shard in the ring.
-// It returns the first error if any.
-func (c *Ring) ForEachShard(
- ctx context.Context,
- fn func(ctx context.Context, client *Client) error,
-) error {
- shards := c.shards.List()
- var wg sync.WaitGroup
- errCh := make(chan error, 1)
- for _, shard := range shards {
- if shard.IsDown() {
- continue
- }
-
- wg.Add(1)
- go func(shard *ringShard) {
- defer wg.Done()
- err := fn(ctx, shard.Client)
- if err != nil {
- select {
- case errCh <- err:
- default:
- }
- }
- }(shard)
- }
- wg.Wait()
-
- select {
- case err := <-errCh:
- return err
- default:
- return nil
- }
-}
-
-func (c *Ring) cmdsInfo(ctx context.Context) (map[string]*CommandInfo, error) {
- shards := c.shards.List()
- var firstErr error
- for _, shard := range shards {
- cmdsInfo, err := shard.Client.Command(ctx).Result()
- if err == nil {
- return cmdsInfo, nil
- }
- if firstErr == nil {
- firstErr = err
- }
- }
- if firstErr == nil {
- return nil, errRingShardsDown
- }
- return nil, firstErr
-}
-
-func (c *Ring) cmdInfo(ctx context.Context, name string) *CommandInfo {
- cmdsInfo, err := c.cmdsInfoCache.Get(ctx)
- if err != nil {
- return nil
- }
- info := cmdsInfo[name]
- if info == nil {
- internal.Logger.Printf(ctx, "info for cmd=%s not found", name)
- }
- return info
-}
-
-func (c *Ring) cmdShard(ctx context.Context, cmd Cmder) (*ringShard, error) {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- pos := cmdFirstKeyPos(cmd, cmdInfo)
- if pos == 0 {
- return c.shards.Random()
- }
- firstKey := cmd.stringArg(pos)
- return c.shards.GetByKey(firstKey)
-}
-
-func (c *Ring) process(ctx context.Context, cmd Cmder) error {
- var lastErr error
- for attempt := 0; attempt <= c.opt.MaxRetries; attempt++ {
- if attempt > 0 {
- if err := internal.Sleep(ctx, c.retryBackoff(attempt)); err != nil {
- return err
- }
- }
-
- shard, err := c.cmdShard(ctx, cmd)
- if err != nil {
- return err
- }
-
- lastErr = shard.Client.Process(ctx, cmd)
- if lastErr == nil || !shouldRetry(lastErr, cmd.readTimeout() == nil) {
- return lastErr
- }
- }
- return lastErr
-}
-
-func (c *Ring) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-func (c *Ring) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Ring) processPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, false)
- })
-}
-
-func (c *Ring) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-func (c *Ring) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: c.processTxPipeline,
- }
- pipe.init()
- return &pipe
-}
-
-func (c *Ring) processTxPipeline(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, func(ctx context.Context, cmds []Cmder) error {
- return c.generalProcessPipeline(ctx, cmds, true)
- })
-}
-
-func (c *Ring) generalProcessPipeline(
- ctx context.Context, cmds []Cmder, tx bool,
-) error {
- cmdsMap := make(map[string][]Cmder)
- for _, cmd := range cmds {
- cmdInfo := c.cmdInfo(ctx, cmd.Name())
- hash := cmd.stringArg(cmdFirstKeyPos(cmd, cmdInfo))
- if hash != "" {
- hash = c.shards.Hash(hash)
- }
- cmdsMap[hash] = append(cmdsMap[hash], cmd)
- }
-
- var wg sync.WaitGroup
- for hash, cmds := range cmdsMap {
- wg.Add(1)
- go func(hash string, cmds []Cmder) {
- defer wg.Done()
-
- _ = c.processShardPipeline(ctx, hash, cmds, tx)
- }(hash, cmds)
- }
-
- wg.Wait()
- return cmdsFirstErr(cmds)
-}
-
-func (c *Ring) processShardPipeline(
- ctx context.Context, hash string, cmds []Cmder, tx bool,
-) error {
- // TODO: retry?
- shard, err := c.shards.GetByName(hash)
- if err != nil {
- setCmdsErr(cmds, err)
- return err
- }
-
- if tx {
- return shard.Client.processTxPipeline(ctx, cmds)
- }
- return shard.Client.processPipeline(ctx, cmds)
-}
-
-func (c *Ring) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- if len(keys) == 0 {
- return fmt.Errorf("redis: Watch requires at least one key")
- }
-
- var shards []*ringShard
- for _, key := range keys {
- if key != "" {
- shard, err := c.shards.GetByKey(hashtag.Key(key))
- if err != nil {
- return err
- }
-
- shards = append(shards, shard)
- }
- }
-
- if len(shards) == 0 {
- return fmt.Errorf("redis: Watch requires at least one shard")
- }
-
- if len(shards) > 1 {
- for _, shard := range shards[1:] {
- if shard.Client != shards[0].Client {
- err := fmt.Errorf("redis: Watch requires all keys to be in the same shard")
- return err
- }
- }
- }
-
- return shards[0].Client.Watch(ctx, fn, keys...)
-}
-
-// Close closes the ring client, releasing any open resources.
-//
-// It is rare to Close a Ring, as the Ring is meant to be long-lived
-// and shared between many goroutines.
-func (c *Ring) Close() error {
- return c.shards.Close()
-}
diff --git a/vendor/github.com/go-redis/redis/v8/script.go b/vendor/github.com/go-redis/redis/v8/script.go
deleted file mode 100644
index 5cab18d617..0000000000
--- a/vendor/github.com/go-redis/redis/v8/script.go
+++ /dev/null
@@ -1,65 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/sha1"
- "encoding/hex"
- "io"
- "strings"
-)
-
-type Scripter interface {
- Eval(ctx context.Context, script string, keys []string, args ...interface{}) *Cmd
- EvalSha(ctx context.Context, sha1 string, keys []string, args ...interface{}) *Cmd
- ScriptExists(ctx context.Context, hashes ...string) *BoolSliceCmd
- ScriptLoad(ctx context.Context, script string) *StringCmd
-}
-
-var (
- _ Scripter = (*Client)(nil)
- _ Scripter = (*Ring)(nil)
- _ Scripter = (*ClusterClient)(nil)
-)
-
-type Script struct {
- src, hash string
-}
-
-func NewScript(src string) *Script {
- h := sha1.New()
- _, _ = io.WriteString(h, src)
- return &Script{
- src: src,
- hash: hex.EncodeToString(h.Sum(nil)),
- }
-}
-
-func (s *Script) Hash() string {
- return s.hash
-}
-
-func (s *Script) Load(ctx context.Context, c Scripter) *StringCmd {
- return c.ScriptLoad(ctx, s.src)
-}
-
-func (s *Script) Exists(ctx context.Context, c Scripter) *BoolSliceCmd {
- return c.ScriptExists(ctx, s.hash)
-}
-
-func (s *Script) Eval(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
- return c.Eval(ctx, s.src, keys, args...)
-}
-
-func (s *Script) EvalSha(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
- return c.EvalSha(ctx, s.hash, keys, args...)
-}
-
-// Run optimistically uses EVALSHA to run the script. If script does not exist
-// it is retried using EVAL.
-func (s *Script) Run(ctx context.Context, c Scripter, keys []string, args ...interface{}) *Cmd {
- r := s.EvalSha(ctx, c, keys, args...)
- if err := r.Err(); err != nil && strings.HasPrefix(err.Error(), "NOSCRIPT ") {
- return s.Eval(ctx, c, keys, args...)
- }
- return r
-}
diff --git a/vendor/github.com/go-redis/redis/v8/sentinel.go b/vendor/github.com/go-redis/redis/v8/sentinel.go
deleted file mode 100644
index ec6221dc83..0000000000
--- a/vendor/github.com/go-redis/redis/v8/sentinel.go
+++ /dev/null
@@ -1,796 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "errors"
- "net"
- "strings"
- "sync"
- "time"
-
- "github.com/go-redis/redis/v8/internal"
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/rand"
-)
-
-//------------------------------------------------------------------------------
-
-// FailoverOptions are used to configure a failover client and should
-// be passed to NewFailoverClient.
-type FailoverOptions struct {
- // The master name.
- MasterName string
- // A seed list of host:port addresses of sentinel nodes.
- SentinelAddrs []string
-
- // If specified with SentinelPassword, enables ACL-based authentication (via
- // AUTH <user> <pass>).
- SentinelUsername string
- // Sentinel password from "requirepass <password>" (if enabled) in Sentinel
- // configuration, or, if SentinelUsername is also supplied, used for ACL-based
- // authentication.
- SentinelPassword string
-
- // Allows routing read-only commands to the closest master or slave node.
- // This option only works with NewFailoverClusterClient.
- RouteByLatency bool
- // Allows routing read-only commands to the random master or slave node.
- // This option only works with NewFailoverClusterClient.
- RouteRandomly bool
-
- // Route all commands to slave read-only nodes.
- SlaveOnly bool
-
- // Use slaves disconnected with master when cannot get connected slaves
- // Now, this option only works in RandomSlaveAddr function.
- UseDisconnectedSlaves bool
-
- // Following options are copied from Options struct.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
- DB int
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-}
-
-func (opt *FailoverOptions) clientOptions() *Options {
- return &Options{
- Addr: "FailoverClient",
-
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- DB: opt.DB,
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-func (opt *FailoverOptions) sentinelOptions(addr string) *Options {
- return &Options{
- Addr: addr,
-
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- DB: 0,
- Username: opt.SentinelUsername,
- Password: opt.SentinelPassword,
-
- MaxRetries: opt.MaxRetries,
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-func (opt *FailoverOptions) clusterOptions() *ClusterOptions {
- return &ClusterOptions{
- Dialer: opt.Dialer,
- OnConnect: opt.OnConnect,
-
- Username: opt.Username,
- Password: opt.Password,
-
- MaxRedirects: opt.MaxRetries,
-
- RouteByLatency: opt.RouteByLatency,
- RouteRandomly: opt.RouteRandomly,
-
- MinRetryBackoff: opt.MinRetryBackoff,
- MaxRetryBackoff: opt.MaxRetryBackoff,
-
- DialTimeout: opt.DialTimeout,
- ReadTimeout: opt.ReadTimeout,
- WriteTimeout: opt.WriteTimeout,
-
- PoolFIFO: opt.PoolFIFO,
- PoolSize: opt.PoolSize,
- PoolTimeout: opt.PoolTimeout,
- IdleTimeout: opt.IdleTimeout,
- IdleCheckFrequency: opt.IdleCheckFrequency,
- MinIdleConns: opt.MinIdleConns,
- MaxConnAge: opt.MaxConnAge,
-
- TLSConfig: opt.TLSConfig,
- }
-}
-
-// NewFailoverClient returns a Redis client that uses Redis Sentinel
-// for automatic failover. It's safe for concurrent use by multiple
-// goroutines.
-func NewFailoverClient(failoverOpt *FailoverOptions) *Client {
- if failoverOpt.RouteByLatency {
- panic("to route commands by latency, use NewFailoverClusterClient")
- }
- if failoverOpt.RouteRandomly {
- panic("to route commands randomly, use NewFailoverClusterClient")
- }
-
- sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
- copy(sentinelAddrs, failoverOpt.SentinelAddrs)
-
- rand.Shuffle(len(sentinelAddrs), func(i, j int) {
- sentinelAddrs[i], sentinelAddrs[j] = sentinelAddrs[j], sentinelAddrs[i]
- })
-
- failover := &sentinelFailover{
- opt: failoverOpt,
- sentinelAddrs: sentinelAddrs,
- }
-
- opt := failoverOpt.clientOptions()
- opt.Dialer = masterSlaveDialer(failover)
- opt.init()
-
- connPool := newConnPool(opt)
-
- failover.mu.Lock()
- failover.onFailover = func(ctx context.Context, addr string) {
- _ = connPool.Filter(func(cn *pool.Conn) bool {
- return cn.RemoteAddr().String() != addr
- })
- }
- failover.mu.Unlock()
-
- c := Client{
- baseClient: newBaseClient(opt, connPool),
- ctx: context.Background(),
- }
- c.cmdable = c.Process
- c.onClose = failover.Close
-
- return &c
-}
-
-func masterSlaveDialer(
- failover *sentinelFailover,
-) func(ctx context.Context, network, addr string) (net.Conn, error) {
- return func(ctx context.Context, network, _ string) (net.Conn, error) {
- var addr string
- var err error
-
- if failover.opt.SlaveOnly {
- addr, err = failover.RandomSlaveAddr(ctx)
- } else {
- addr, err = failover.MasterAddr(ctx)
- if err == nil {
- failover.trySwitchMaster(ctx, addr)
- }
- }
- if err != nil {
- return nil, err
- }
- if failover.opt.Dialer != nil {
- return failover.opt.Dialer(ctx, network, addr)
- }
-
- netDialer := &net.Dialer{
- Timeout: failover.opt.DialTimeout,
- KeepAlive: 5 * time.Minute,
- }
- if failover.opt.TLSConfig == nil {
- return netDialer.DialContext(ctx, network, addr)
- }
- return tls.DialWithDialer(netDialer, network, addr, failover.opt.TLSConfig)
- }
-}
-
-//------------------------------------------------------------------------------
-
-// SentinelClient is a client for a Redis Sentinel.
-type SentinelClient struct {
- *baseClient
- hooks
- ctx context.Context
-}
-
-func NewSentinelClient(opt *Options) *SentinelClient {
- opt.init()
- c := &SentinelClient{
- baseClient: &baseClient{
- opt: opt,
- connPool: newConnPool(opt),
- },
- ctx: context.Background(),
- }
- return c
-}
-
-func (c *SentinelClient) Context() context.Context {
- return c.ctx
-}
-
-func (c *SentinelClient) WithContext(ctx context.Context) *SentinelClient {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.ctx = ctx
- return &clone
-}
-
-func (c *SentinelClient) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-func (c *SentinelClient) pubSub() *PubSub {
- pubsub := &PubSub{
- opt: c.opt,
-
- newConn: func(ctx context.Context, channels []string) (*pool.Conn, error) {
- return c.newConn(ctx)
- },
- closeConn: c.connPool.CloseConn,
- }
- pubsub.init()
- return pubsub
-}
-
-// Ping is used to test if a connection is still alive, or to
-// measure latency.
-func (c *SentinelClient) Ping(ctx context.Context) *StringCmd {
- cmd := NewStringCmd(ctx, "ping")
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Subscribe subscribes the client to the specified channels.
-// Channels can be omitted to create empty subscription.
-func (c *SentinelClient) Subscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.Subscribe(ctx, channels...)
- }
- return pubsub
-}
-
-// PSubscribe subscribes the client to the given patterns.
-// Patterns can be omitted to create empty subscription.
-func (c *SentinelClient) PSubscribe(ctx context.Context, channels ...string) *PubSub {
- pubsub := c.pubSub()
- if len(channels) > 0 {
- _ = pubsub.PSubscribe(ctx, channels...)
- }
- return pubsub
-}
-
-func (c *SentinelClient) GetMasterAddrByName(ctx context.Context, name string) *StringSliceCmd {
- cmd := NewStringSliceCmd(ctx, "sentinel", "get-master-addr-by-name", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-func (c *SentinelClient) Sentinels(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "sentinels", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Failover forces a failover as if the master was not reachable, and without
-// asking for agreement to other Sentinels.
-func (c *SentinelClient) Failover(ctx context.Context, name string) *StatusCmd {
- cmd := NewStatusCmd(ctx, "sentinel", "failover", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Reset resets all the masters with matching name. The pattern argument is a
-// glob-style pattern. The reset process clears any previous state in a master
-// (including a failover in progress), and removes every slave and sentinel
-// already discovered and associated with the master.
-func (c *SentinelClient) Reset(ctx context.Context, pattern string) *IntCmd {
- cmd := NewIntCmd(ctx, "sentinel", "reset", pattern)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// FlushConfig forces Sentinel to rewrite its configuration on disk, including
-// the current Sentinel state.
-func (c *SentinelClient) FlushConfig(ctx context.Context) *StatusCmd {
- cmd := NewStatusCmd(ctx, "sentinel", "flushconfig")
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Master shows the state and info of the specified master.
-func (c *SentinelClient) Master(ctx context.Context, name string) *StringStringMapCmd {
- cmd := NewStringStringMapCmd(ctx, "sentinel", "master", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Masters shows a list of monitored masters and their state.
-func (c *SentinelClient) Masters(ctx context.Context) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "masters")
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Slaves shows a list of slaves for the specified master and their state.
-func (c *SentinelClient) Slaves(ctx context.Context, name string) *SliceCmd {
- cmd := NewSliceCmd(ctx, "sentinel", "slaves", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// CkQuorum checks if the current Sentinel configuration is able to reach the
-// quorum needed to failover a master, and the majority needed to authorize the
-// failover. This command should be used in monitoring systems to check if a
-// Sentinel deployment is ok.
-func (c *SentinelClient) CkQuorum(ctx context.Context, name string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "ckquorum", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Monitor tells the Sentinel to start monitoring a new master with the specified
-// name, ip, port, and quorum.
-func (c *SentinelClient) Monitor(ctx context.Context, name, ip, port, quorum string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "monitor", name, ip, port, quorum)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Set is used in order to change configuration parameters of a specific master.
-func (c *SentinelClient) Set(ctx context.Context, name, option, value string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "set", name, option, value)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Remove is used in order to remove the specified master: the master will no
-// longer be monitored, and will totally be removed from the internal state of
-// the Sentinel.
-func (c *SentinelClient) Remove(ctx context.Context, name string) *StringCmd {
- cmd := NewStringCmd(ctx, "sentinel", "remove", name)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-//------------------------------------------------------------------------------
-
-type sentinelFailover struct {
- opt *FailoverOptions
-
- sentinelAddrs []string
-
- onFailover func(ctx context.Context, addr string)
- onUpdate func(ctx context.Context)
-
- mu sync.RWMutex
- _masterAddr string
- sentinel *SentinelClient
- pubsub *PubSub
-}
-
-func (c *sentinelFailover) Close() error {
- c.mu.Lock()
- defer c.mu.Unlock()
- if c.sentinel != nil {
- return c.closeSentinel()
- }
- return nil
-}
-
-func (c *sentinelFailover) closeSentinel() error {
- firstErr := c.pubsub.Close()
- c.pubsub = nil
-
- err := c.sentinel.Close()
- if err != nil && firstErr == nil {
- firstErr = err
- }
- c.sentinel = nil
-
- return firstErr
-}
-
-func (c *sentinelFailover) RandomSlaveAddr(ctx context.Context) (string, error) {
- if c.opt == nil {
- return "", errors.New("opt is nil")
- }
-
- addresses, err := c.slaveAddrs(ctx, false)
- if err != nil {
- return "", err
- }
-
- if len(addresses) == 0 && c.opt.UseDisconnectedSlaves {
- addresses, err = c.slaveAddrs(ctx, true)
- if err != nil {
- return "", err
- }
- }
-
- if len(addresses) == 0 {
- return c.MasterAddr(ctx)
- }
- return addresses[rand.Intn(len(addresses))], nil
-}
-
-func (c *sentinelFailover) MasterAddr(ctx context.Context) (string, error) {
- c.mu.RLock()
- sentinel := c.sentinel
- c.mu.RUnlock()
-
- if sentinel != nil {
- addr := c.getMasterAddr(ctx, sentinel)
- if addr != "" {
- return addr, nil
- }
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.sentinel != nil {
- addr := c.getMasterAddr(ctx, c.sentinel)
- if addr != "" {
- return addr, nil
- }
- _ = c.closeSentinel()
- }
-
- for i, sentinelAddr := range c.sentinelAddrs {
- sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
-
- masterAddr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName master=%q failed: %s",
- c.opt.MasterName, err)
- _ = sentinel.Close()
- continue
- }
-
- // Push working sentinel to the top.
- c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
- c.setSentinel(ctx, sentinel)
-
- addr := net.JoinHostPort(masterAddr[0], masterAddr[1])
- return addr, nil
- }
-
- return "", errors.New("redis: all sentinels specified in configuration are unreachable")
-}
-
-func (c *sentinelFailover) slaveAddrs(ctx context.Context, useDisconnected bool) ([]string, error) {
- c.mu.RLock()
- sentinel := c.sentinel
- c.mu.RUnlock()
-
- if sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, sentinel)
- if len(addrs) > 0 {
- return addrs, nil
- }
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if c.sentinel != nil {
- addrs := c.getSlaveAddrs(ctx, c.sentinel)
- if len(addrs) > 0 {
- return addrs, nil
- }
- _ = c.closeSentinel()
- }
-
- var sentinelReachable bool
-
- for i, sentinelAddr := range c.sentinelAddrs {
- sentinel := NewSentinelClient(c.opt.sentinelOptions(sentinelAddr))
-
- slaves, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves master=%q failed: %s",
- c.opt.MasterName, err)
- _ = sentinel.Close()
- continue
- }
- sentinelReachable = true
- addrs := parseSlaveAddrs(slaves, useDisconnected)
- if len(addrs) == 0 {
- continue
- }
- // Push working sentinel to the top.
- c.sentinelAddrs[0], c.sentinelAddrs[i] = c.sentinelAddrs[i], c.sentinelAddrs[0]
- c.setSentinel(ctx, sentinel)
-
- return addrs, nil
- }
-
- if sentinelReachable {
- return []string{}, nil
- }
- return []string{}, errors.New("redis: all sentinels specified in configuration are unreachable")
-}
-
-func (c *sentinelFailover) getMasterAddr(ctx context.Context, sentinel *SentinelClient) string {
- addr, err := sentinel.GetMasterAddrByName(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: GetMasterAddrByName name=%q failed: %s",
- c.opt.MasterName, err)
- return ""
- }
- return net.JoinHostPort(addr[0], addr[1])
-}
-
-func (c *sentinelFailover) getSlaveAddrs(ctx context.Context, sentinel *SentinelClient) []string {
- addrs, err := sentinel.Slaves(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Slaves name=%q failed: %s",
- c.opt.MasterName, err)
- return []string{}
- }
- return parseSlaveAddrs(addrs, false)
-}
-
-func parseSlaveAddrs(addrs []interface{}, keepDisconnected bool) []string {
- nodes := make([]string, 0, len(addrs))
- for _, node := range addrs {
- ip := ""
- port := ""
- flags := []string{}
- lastkey := ""
- isDown := false
-
- for _, key := range node.([]interface{}) {
- switch lastkey {
- case "ip":
- ip = key.(string)
- case "port":
- port = key.(string)
- case "flags":
- flags = strings.Split(key.(string), ",")
- }
- lastkey = key.(string)
- }
-
- for _, flag := range flags {
- switch flag {
- case "s_down", "o_down":
- isDown = true
- case "disconnected":
- if !keepDisconnected {
- isDown = true
- }
- }
- }
-
- if !isDown {
- nodes = append(nodes, net.JoinHostPort(ip, port))
- }
- }
-
- return nodes
-}
-
-func (c *sentinelFailover) trySwitchMaster(ctx context.Context, addr string) {
- c.mu.RLock()
- currentAddr := c._masterAddr //nolint:ifshort
- c.mu.RUnlock()
-
- if addr == currentAddr {
- return
- }
-
- c.mu.Lock()
- defer c.mu.Unlock()
-
- if addr == c._masterAddr {
- return
- }
- c._masterAddr = addr
-
- internal.Logger.Printf(ctx, "sentinel: new master=%q addr=%q",
- c.opt.MasterName, addr)
- if c.onFailover != nil {
- c.onFailover(ctx, addr)
- }
-}
-
-func (c *sentinelFailover) setSentinel(ctx context.Context, sentinel *SentinelClient) {
- if c.sentinel != nil {
- panic("not reached")
- }
- c.sentinel = sentinel
- c.discoverSentinels(ctx)
-
- c.pubsub = sentinel.Subscribe(ctx, "+switch-master", "+slave-reconf-done")
- go c.listen(c.pubsub)
-}
-
-func (c *sentinelFailover) discoverSentinels(ctx context.Context) {
- sentinels, err := c.sentinel.Sentinels(ctx, c.opt.MasterName).Result()
- if err != nil {
- internal.Logger.Printf(ctx, "sentinel: Sentinels master=%q failed: %s", c.opt.MasterName, err)
- return
- }
- for _, sentinel := range sentinels {
- vals := sentinel.([]interface{})
- var ip, port string
- for i := 0; i < len(vals); i += 2 {
- key := vals[i].(string)
- switch key {
- case "ip":
- ip = vals[i+1].(string)
- case "port":
- port = vals[i+1].(string)
- }
- }
- if ip != "" && port != "" {
- sentinelAddr := net.JoinHostPort(ip, port)
- if !contains(c.sentinelAddrs, sentinelAddr) {
- internal.Logger.Printf(ctx, "sentinel: discovered new sentinel=%q for master=%q",
- sentinelAddr, c.opt.MasterName)
- c.sentinelAddrs = append(c.sentinelAddrs, sentinelAddr)
- }
- }
- }
-}
-
-func (c *sentinelFailover) listen(pubsub *PubSub) {
- ctx := context.TODO()
-
- if c.onUpdate != nil {
- c.onUpdate(ctx)
- }
-
- ch := pubsub.Channel()
- for msg := range ch {
- if msg.Channel == "+switch-master" {
- parts := strings.Split(msg.Payload, " ")
- if parts[0] != c.opt.MasterName {
- internal.Logger.Printf(pubsub.getContext(), "sentinel: ignore addr for master=%q", parts[0])
- continue
- }
- addr := net.JoinHostPort(parts[3], parts[4])
- c.trySwitchMaster(pubsub.getContext(), addr)
- }
-
- if c.onUpdate != nil {
- c.onUpdate(ctx)
- }
- }
-}
-
-func contains(slice []string, str string) bool {
- for _, s := range slice {
- if s == str {
- return true
- }
- }
- return false
-}
-
-//------------------------------------------------------------------------------
-
-// NewFailoverClusterClient returns a client that supports routing read-only commands
-// to a slave node.
-func NewFailoverClusterClient(failoverOpt *FailoverOptions) *ClusterClient {
- sentinelAddrs := make([]string, len(failoverOpt.SentinelAddrs))
- copy(sentinelAddrs, failoverOpt.SentinelAddrs)
-
- failover := &sentinelFailover{
- opt: failoverOpt,
- sentinelAddrs: sentinelAddrs,
- }
-
- opt := failoverOpt.clusterOptions()
- opt.ClusterSlots = func(ctx context.Context) ([]ClusterSlot, error) {
- masterAddr, err := failover.MasterAddr(ctx)
- if err != nil {
- return nil, err
- }
-
- nodes := []ClusterNode{{
- Addr: masterAddr,
- }}
-
- slaveAddrs, err := failover.slaveAddrs(ctx, false)
- if err != nil {
- return nil, err
- }
-
- for _, slaveAddr := range slaveAddrs {
- nodes = append(nodes, ClusterNode{
- Addr: slaveAddr,
- })
- }
-
- slots := []ClusterSlot{
- {
- Start: 0,
- End: 16383,
- Nodes: nodes,
- },
- }
- return slots, nil
- }
-
- c := NewClusterClient(opt)
-
- failover.mu.Lock()
- failover.onUpdate = func(ctx context.Context) {
- c.ReloadState(ctx)
- }
- failover.mu.Unlock()
-
- return c
-}
diff --git a/vendor/github.com/go-redis/redis/v8/tx.go b/vendor/github.com/go-redis/redis/v8/tx.go
deleted file mode 100644
index 8c9d87202a..0000000000
--- a/vendor/github.com/go-redis/redis/v8/tx.go
+++ /dev/null
@@ -1,149 +0,0 @@
-package redis
-
-import (
- "context"
-
- "github.com/go-redis/redis/v8/internal/pool"
- "github.com/go-redis/redis/v8/internal/proto"
-)
-
-// TxFailedErr transaction redis failed.
-const TxFailedErr = proto.RedisError("redis: transaction failed")
-
-// Tx implements Redis transactions as described in
-// http://redis.io/topics/transactions. It's NOT safe for concurrent use
-// by multiple goroutines, because Exec resets list of watched keys.
-//
-// If you don't need WATCH, use Pipeline instead.
-type Tx struct {
- baseClient
- cmdable
- statefulCmdable
- hooks
- ctx context.Context
-}
-
-func (c *Client) newTx(ctx context.Context) *Tx {
- tx := Tx{
- baseClient: baseClient{
- opt: c.opt,
- connPool: pool.NewStickyConnPool(c.connPool),
- },
- hooks: c.hooks.clone(),
- ctx: ctx,
- }
- tx.init()
- return &tx
-}
-
-func (c *Tx) init() {
- c.cmdable = c.Process
- c.statefulCmdable = c.Process
-}
-
-func (c *Tx) Context() context.Context {
- return c.ctx
-}
-
-func (c *Tx) WithContext(ctx context.Context) *Tx {
- if ctx == nil {
- panic("nil context")
- }
- clone := *c
- clone.init()
- clone.hooks.lock()
- clone.ctx = ctx
- return &clone
-}
-
-func (c *Tx) Process(ctx context.Context, cmd Cmder) error {
- return c.hooks.process(ctx, cmd, c.baseClient.process)
-}
-
-// Watch prepares a transaction and marks the keys to be watched
-// for conditional execution if there are any keys.
-//
-// The transaction is automatically closed when fn exits.
-func (c *Client) Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error {
- tx := c.newTx(ctx)
- defer tx.Close(ctx)
- if len(keys) > 0 {
- if err := tx.Watch(ctx, keys...).Err(); err != nil {
- return err
- }
- }
- return fn(tx)
-}
-
-// Close closes the transaction, releasing any open resources.
-func (c *Tx) Close(ctx context.Context) error {
- _ = c.Unwatch(ctx).Err()
- return c.baseClient.Close()
-}
-
-// Watch marks the keys to be watched for conditional execution
-// of a transaction.
-func (c *Tx) Watch(ctx context.Context, keys ...string) *StatusCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "watch"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Unwatch flushes all the previously watched keys for a transaction.
-func (c *Tx) Unwatch(ctx context.Context, keys ...string) *StatusCmd {
- args := make([]interface{}, 1+len(keys))
- args[0] = "unwatch"
- for i, key := range keys {
- args[1+i] = key
- }
- cmd := NewStatusCmd(ctx, args...)
- _ = c.Process(ctx, cmd)
- return cmd
-}
-
-// Pipeline creates a pipeline. Usually it is more convenient to use Pipelined.
-func (c *Tx) Pipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processPipeline(ctx, cmds, c.baseClient.processPipeline)
- },
- }
- pipe.init()
- return &pipe
-}
-
-// Pipelined executes commands queued in the fn outside of the transaction.
-// Use TxPipelined if you need transactional behavior.
-func (c *Tx) Pipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.Pipeline().Pipelined(ctx, fn)
-}
-
-// TxPipelined executes commands queued in the fn in the transaction.
-//
-// When using WATCH, EXEC will execute commands only if the watched keys
-// were not modified, allowing for a check-and-set mechanism.
-//
-// Exec always returns list of commands. If transaction fails
-// TxFailedErr is returned. Otherwise Exec returns an error of the first
-// failed command or nil.
-func (c *Tx) TxPipelined(ctx context.Context, fn func(Pipeliner) error) ([]Cmder, error) {
- return c.TxPipeline().Pipelined(ctx, fn)
-}
-
-// TxPipeline creates a pipeline. Usually it is more convenient to use TxPipelined.
-func (c *Tx) TxPipeline() Pipeliner {
- pipe := Pipeline{
- ctx: c.ctx,
- exec: func(ctx context.Context, cmds []Cmder) error {
- return c.hooks.processTxPipeline(ctx, cmds, c.baseClient.processTxPipeline)
- },
- }
- pipe.init()
- return &pipe
-}
diff --git a/vendor/github.com/go-redis/redis/v8/universal.go b/vendor/github.com/go-redis/redis/v8/universal.go
deleted file mode 100644
index c89b3e5d74..0000000000
--- a/vendor/github.com/go-redis/redis/v8/universal.go
+++ /dev/null
@@ -1,215 +0,0 @@
-package redis
-
-import (
- "context"
- "crypto/tls"
- "net"
- "time"
-)
-
-// UniversalOptions information is required by UniversalClient to establish
-// connections.
-type UniversalOptions struct {
- // Either a single address or a seed list of host:port addresses
- // of cluster/sentinel nodes.
- Addrs []string
-
- // Database to be selected after connecting to the server.
- // Only single-node and failover clients.
- DB int
-
- // Common options.
-
- Dialer func(ctx context.Context, network, addr string) (net.Conn, error)
- OnConnect func(ctx context.Context, cn *Conn) error
-
- Username string
- Password string
- SentinelUsername string
- SentinelPassword string
-
- MaxRetries int
- MinRetryBackoff time.Duration
- MaxRetryBackoff time.Duration
-
- DialTimeout time.Duration
- ReadTimeout time.Duration
- WriteTimeout time.Duration
-
- // PoolFIFO uses FIFO mode for each node connection pool GET/PUT (default LIFO).
- PoolFIFO bool
-
- PoolSize int
- MinIdleConns int
- MaxConnAge time.Duration
- PoolTimeout time.Duration
- IdleTimeout time.Duration
- IdleCheckFrequency time.Duration
-
- TLSConfig *tls.Config
-
- // Only cluster clients.
-
- MaxRedirects int
- ReadOnly bool
- RouteByLatency bool
- RouteRandomly bool
-
- // The sentinel master name.
- // Only failover clients.
-
- MasterName string
-}
-
-// Cluster returns cluster options created from the universal options.
-func (o *UniversalOptions) Cluster() *ClusterOptions {
- if len(o.Addrs) == 0 {
- o.Addrs = []string{"127.0.0.1:6379"}
- }
-
- return &ClusterOptions{
- Addrs: o.Addrs,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
-
- Username: o.Username,
- Password: o.Password,
-
- MaxRedirects: o.MaxRedirects,
- ReadOnly: o.ReadOnly,
- RouteByLatency: o.RouteByLatency,
- RouteRandomly: o.RouteRandomly,
-
- MaxRetries: o.MaxRetries,
- MinRetryBackoff: o.MinRetryBackoff,
- MaxRetryBackoff: o.MaxRetryBackoff,
-
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
-
- TLSConfig: o.TLSConfig,
- }
-}
-
-// Failover returns failover options created from the universal options.
-func (o *UniversalOptions) Failover() *FailoverOptions {
- if len(o.Addrs) == 0 {
- o.Addrs = []string{"127.0.0.1:26379"}
- }
-
- return &FailoverOptions{
- SentinelAddrs: o.Addrs,
- MasterName: o.MasterName,
-
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
-
- DB: o.DB,
- Username: o.Username,
- Password: o.Password,
- SentinelUsername: o.SentinelUsername,
- SentinelPassword: o.SentinelPassword,
-
- MaxRetries: o.MaxRetries,
- MinRetryBackoff: o.MinRetryBackoff,
- MaxRetryBackoff: o.MaxRetryBackoff,
-
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
-
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
-
- TLSConfig: o.TLSConfig,
- }
-}
-
-// Simple returns basic options created from the universal options.
-func (o *UniversalOptions) Simple() *Options {
- addr := "127.0.0.1:6379"
- if len(o.Addrs) > 0 {
- addr = o.Addrs[0]
- }
-
- return &Options{
- Addr: addr,
- Dialer: o.Dialer,
- OnConnect: o.OnConnect,
-
- DB: o.DB,
- Username: o.Username,
- Password: o.Password,
-
- MaxRetries: o.MaxRetries,
- MinRetryBackoff: o.MinRetryBackoff,
- MaxRetryBackoff: o.MaxRetryBackoff,
-
- DialTimeout: o.DialTimeout,
- ReadTimeout: o.ReadTimeout,
- WriteTimeout: o.WriteTimeout,
-
- PoolFIFO: o.PoolFIFO,
- PoolSize: o.PoolSize,
- MinIdleConns: o.MinIdleConns,
- MaxConnAge: o.MaxConnAge,
- PoolTimeout: o.PoolTimeout,
- IdleTimeout: o.IdleTimeout,
- IdleCheckFrequency: o.IdleCheckFrequency,
-
- TLSConfig: o.TLSConfig,
- }
-}
-
-// --------------------------------------------------------------------
-
-// UniversalClient is an abstract client which - based on the provided options -
-// represents either a ClusterClient, a FailoverClient, or a single-node Client.
-// This can be useful for testing cluster-specific applications locally or having different
-// clients in different environments.
-type UniversalClient interface {
- Cmdable
- Context() context.Context
- AddHook(Hook)
- Watch(ctx context.Context, fn func(*Tx) error, keys ...string) error
- Do(ctx context.Context, args ...interface{}) *Cmd
- Process(ctx context.Context, cmd Cmder) error
- Subscribe(ctx context.Context, channels ...string) *PubSub
- PSubscribe(ctx context.Context, channels ...string) *PubSub
- Close() error
- PoolStats() *PoolStats
-}
-
-var (
- _ UniversalClient = (*Client)(nil)
- _ UniversalClient = (*ClusterClient)(nil)
- _ UniversalClient = (*Ring)(nil)
-)
-
-// NewUniversalClient returns a new multi client. The type of the returned client depends
-// on the following conditions:
-//
-// 1. If the MasterName option is specified, a sentinel-backed FailoverClient is returned.
-// 2. if the number of Addrs is two or more, a ClusterClient is returned.
-// 3. Otherwise, a single-node Client is returned.
-func NewUniversalClient(opts *UniversalOptions) UniversalClient {
- if opts.MasterName != "" {
- return NewFailoverClient(opts.Failover())
- } else if len(opts.Addrs) > 1 {
- return NewClusterClient(opts.Cluster())
- }
- return NewClient(opts.Simple())
-}
diff --git a/vendor/github.com/go-redis/redis/v8/version.go b/vendor/github.com/go-redis/redis/v8/version.go
deleted file mode 100644
index 112c9a2da0..0000000000
--- a/vendor/github.com/go-redis/redis/v8/version.go
+++ /dev/null
@@ -1,6 +0,0 @@
-package redis
-
-// Version is the current release version.
-func Version() string {
- return "8.11.5"
-}
diff --git a/vendor/github.com/go-redis/redis/v8/ya.make b/vendor/github.com/go-redis/redis/v8/ya.make
deleted file mode 100644
index 80ccb486c6..0000000000
--- a/vendor/github.com/go-redis/redis/v8/ya.make
+++ /dev/null
@@ -1,58 +0,0 @@
-GO_LIBRARY()
-
-LICENSE(BSD-2-Clause)
-
-SRCS(
- cluster.go
- cluster_commands.go
- command.go
- commands.go
- doc.go
- error.go
- iterator.go
- options.go
- pipeline.go
- pubsub.go
- redis.go
- result.go
- ring.go
- script.go
- sentinel.go
- tx.go
- universal.go
- version.go
-)
-
-GO_TEST_SRCS(
- bench_decode_test.go
- export_test.go
- internal_test.go
- options_test.go
-)
-
-GO_XTEST_SRCS(
- bench_test.go
- cluster_test.go
- command_test.go
- commands_test.go
- example_instrumentation_test.go
- example_test.go
- iterator_test.go
- main_test.go
- pipeline_test.go
- pool_test.go
- pubsub_test.go
- race_test.go
- redis_test.go
- ring_test.go
- sentinel_test.go
- tx_test.go
- universal_test.go
-)
-
-END()
-
-RECURSE(
- # gotest
- internal
-)
diff --git a/ydb/library/actors/CMakeLists.txt b/ydb/library/actors/CMakeLists.txt
index 77b760d34a..becd73cd24 100644
--- a/ydb/library/actors/CMakeLists.txt
+++ b/ydb/library/actors/CMakeLists.txt
@@ -8,8 +8,10 @@
add_subdirectory(actor_type)
add_subdirectory(core)
+add_subdirectory(cppcoro)
add_subdirectory(dnscachelib)
add_subdirectory(dnsresolver)
+add_subdirectory(examples)
add_subdirectory(helpers)
add_subdirectory(http)
add_subdirectory(interconnect)
diff --git a/ydb/library/actors/README.md b/ydb/library/actors/README.md
new file mode 100644
index 0000000000..c94b96c7a4
--- /dev/null
+++ b/ydb/library/actors/README.md
@@ -0,0 +1,107 @@
+## Actor library
+
+### ЧаÑÑ‚ÑŒ перваÑ, вводнаÑ.
+Иногда приходитÑÑ Ñ€Ð°Ð·Ñ€Ð°Ð±Ð°Ñ‚Ñ‹Ð²Ð°Ñ‚ÑŒ аÑинхронные, ÑущеÑтвенно параллельные, меÑтами раÑпределённые программы. Иногда еще и внутреннÑÑ Ð»Ð¾Ð³Ð¸ÐºÐ° нетривиальна, разнородна, пишетÑÑ Ñ€Ð°Ð·Ð½Ñ‹Ð¼Ð¸ командами не один год. Ð’ÑÑ‘ как мы любим. ЧеловечеÑтвом придумано не так много ÑпоÑобов внутренней организации Ñтруктуры и кода таких программ. БольшинÑтво из них плохие (и именно из-за плохих подходов разработка аÑинхронных, многопоточных программ приобрела дурную Ñлаву). Ðекоторые получше. Ð ÑеребрÑной пули как обычно нет.
+
+Когда мы начинали разработку Yandex Database (тогда еще KiKiMR), Ñразу было понÑтно что проÑтыми наколеночными поделиÑми обойтиÑÑŒ (и Ñделать при Ñтом хорошо, так что бы не было Ñтыдно) не получитÑÑ. Ð’ качеÑтве базиÑа мы выбрали меÑÑадж-паÑÑинг и модель акторов. И не пожалели. ПоÑтепенно Ñтот подход раÑпроÑтранилÑÑ Ð½Ð° Ñмежные проекты.
+
+### Базовые концепции.
+ЕÑли отброÑить шелуху – предÑтавлÑем ÑÐµÑ€Ð²Ð¸Ñ (программу в Ñлучае запуÑка изолированного бинарника) как анÑамбль незавиÑимых агентов, взаимодейÑтвующих через отправку аÑинхронных Ñообщений внутри общего окружениÑ. Тут вÑе Ñлова важны:
+
+ÐезавиÑимых – не разделÑÑŽÑ‚ ÑоÑтоÑние и поток выполнениÑ.
+Передача Ñообщений – формализуем протоколы, а не интерфейÑÑ‹.
+
+ÐÑÐ¸Ð½Ñ…Ñ€Ð¾Ð½Ð½Ð°Ñ â€“ не блокируемÑÑ Ð½Ð° отправке Ñообщений.
+Общее окружение – вÑе агенты разделÑÑŽÑ‚ общий пул реÑурÑов и каждый из них, Ð·Ð½Ð°Ñ Ð°Ð´Ñ€ÐµÑ, может поÑлать Ñообщение каждому.
+
+Ð’ более хайповых терминах – очень похоже на колокейтед микроÑервиÑÑ‹, только уровнем ниже. И да, мы заведомо не хотели прÑтать аÑинхронщину и параллелизм от разработчика, Ð¿Ð¾ÐºÐ°Ð·Ñ‹Ð²Ð°Ñ Ð¿Ñ€Ñм Ñамое мÑÑо.
+
+### IActor.
+https://a.yandex-team.ru/arcadia/ydb/library/actors/core/actor.h?rev=r11291267#L310
+Базовый клаÑÑ Ð²Ñех агентов, напрÑмую обычно не иÑпользуетÑÑ. ИнÑтанцируетÑÑ Ð»Ð¸Ð±Ð¾ TActor, либо TActorBootstrapped. ФактичеÑки веÑÑŒ полезный код программы размещаетÑÑ Ð² акторах.
+(важное замечание – в коде увидите ручки Ñ TActorContext и без него, Ñхожие по названию и назначению. Ðа данный момент вариант Ñ TActorContext ÑвлÑетÑÑ ÑƒÑтаревшим, новый код Ñтоит пиÑать без его иÑпользованиÑ).
+Важные методы:
+
+PassAway – единÑтвенный корректный ÑпоÑоб зарегиÑтрированному актору умереть. Может вызыватьÑÑ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ находÑÑÑŒ внутри обработчика ÑообщениÑ.
+Send – отправка ÑообщениÑ, Ð·Ð½Ð°Ñ Ð°Ð´Ñ€ÐµÑ Ð¿Ð¾Ð»ÑƒÑ‡Ð°Ñ‚ÐµÐ»Ñ. Ð’ акторе доÑтупен хелпер, принимающий непоÑредÑтвенно Ñообщение. Базовый вызов, принимающий полный event handle – доÑтупен в контекÑте.
+
+Become – уÑтановить функцию-обработчик Ñообщений, ÐºÐ¾Ñ‚Ð¾Ñ€Ð°Ñ Ð±ÑƒÐ´ÐµÑ‚ иÑпользована при получении Ñледующего ÑообщениÑ.
+
+Register – зарегиÑтрировать новый актор в акторÑиÑтеме, Ñ Ð²Ñ‹Ð´ÐµÐ»ÐµÐ½Ð¸ÐµÐ¼ нового мейлбокÑа. Важно – Ñ Ð¼Ð¾Ð¼ÐµÐ½Ñ‚Ð° вызова владение актором передаетÑÑ Ð°ÐºÑ‚Ð¾Ñ€ÑиÑтеме, Ñ‚.е. уже к моменту выхода актор может начать выполнÑÑ‚ÑŒÑÑ Ð½Ð° другом потоке, Ð½ÐµÐ»ÑŒÐ·Ñ Ðº нему ни обращатьÑÑ Ð¿Ñ€Ñмыми вызовами, ни даже предполагать что он еще жив.
+
+Schedule – зарегиÑтрировать Ñообщение, которое будет отправлено не менее чем через запрошенную задержку. Ð’ акторе доÑтупен хелпер, декорирующий Ñообщение хендлом отправки Ñамому Ñебе, в контекÑте можно передать полный хендл.
+
+SelfId – узнать ÑобÑтвенный адреÑ. Возвращаемый объект TActorIdentity можно передавать еÑли требуетÑÑ Ð´ÐµÐ»ÐµÐ³Ð¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒ отправку Ñообщений от имени актора (например еÑли пишете полезный код пользуÑÑÑŒ паÑÑивными объектами).
+ПоÑылка Ñообщений дешёваÑ, не нужно на ней чрезмерно Ñкономить (но не беÑÐ¿Ð»Ð°Ñ‚Ð½Ð°Ñ â€“ поÑтому поÑылать ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñ‚Ð¾Ð»ÑŒÐºÐ¾ ради поÑылки Ñообщений то же не Ñтоит).
+
+ИнÑтанцирование акторов так же дёшево, актор на Ð·Ð°Ð¿Ñ€Ð¾Ñ Ð¸Ð»Ð¸ фазу запроÑа – вполне Ð½Ð¾Ñ€Ð¼Ð°Ð»ÑŒÐ½Ð°Ñ Ð¿Ñ€Ð°ÐºÑ‚Ð¸ÐºÐ°. МультиплекÑировать обработку разных запроÑов в одном акторе – так же вполне нормально. Ð’ нашем коде много примеров и первого, и второго. ПользуйтеÑÑŒ здравым ÑмыÑлов и ÑобÑтвенным вкуÑом.
+Т.к. на Ð²Ñ€ÐµÐ¼Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¸ ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð°ÐºÑ‚Ð¾Ñ€ занимает тред из пула акторÑиÑтемы – уходить в длинные вычиÑÐ»ÐµÐ½Ð¸Ñ Ð»ÑƒÑ‡ÑˆÐµ на отдельном отÑелённом акторе (и либо отÑелÑÑ‚ÑŒ в отдельный пол акторÑиÑтемы, либо контролировать параллельноÑÑ‚ÑŒ брокером реÑурÑов), блокирующие вызовы делать почти вÑегда ошибка. Стремление напиÑать Ð¼ÑŽÑ‚ÐµÐºÑ - ереÑÑŒ и от лукавого.
+ИдентифицируютÑÑ Ð°ÐºÑ‚Ð¾Ñ€Ñ‹ Ñвоим TActorID-ом, который уникален и вы не должны его придумывать из воздуха, а только получить из региÑтрации (Ð´Ð»Ñ Ð¿Ð¾Ñ€Ð¾Ð¶Ð´Ñ‘Ð½Ð½Ñ‹Ñ… акторов) или его вам должен раÑÑказать кто-то, законно его знающий.
+
+Отправка на неÑущеÑтвующий актор (уже умерший) безопаÑна, Ñообщение будет проÑто выброшено в момент обработки (как обрабатывать недоÑтавку Ñообщений в протоколах раÑÑкажу ниже).
+
+Кроме нормальных TActorID ÑущеÑтвуют еще и ÑервиÑные (ÑоÑтавленные из Ñтрочки и номера ноды). Под ними может быть зарегиÑтрирован реальный актор и фактичеÑки при получении ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¿Ð¾ ÑервиÑному адреÑу – попробует переправить его текущему фактичеÑкому. Это позволÑет размещать хорошо извеÑтные ÑервиÑÑ‹ по хорошо извеÑтному адреÑу, не выÑÑ‚Ñ€Ð°Ð¸Ð²Ð°Ñ Ð¿Ð°Ñ€Ð°Ð»Ð»ÐµÐ»ÑŒÐ½ÑƒÑŽ машинерию поиÑка.
+
+Строить из актора конечный автомат при помощи переключений функции-обработчика – выбор в каждом конкретном Ñлучае, иногда удобнее да, иногда Ñваливать вÑÑ‘ в одно ÑоÑтоÑние, а иногда – применÑÑ‚ÑŒ гибридное решение (когда чаÑÑ‚ÑŒ жизненного цикла – обычно инициализации и завершение – выражены в переходах, а чаÑÑ‚ÑŒ – нет).
+Меньше Ñлов, больше дела – Ñтого уже доÑтаточно что бы прочитать Ñамый проÑтой пример. https://a.yandex-team.ru/arc/trunk/arcadia/ydb/library/actors/examples/01_ping_pong
+ЗдеÑÑŒ можно увидеть образец Ñамого проÑтого актора, занимающегоÑÑ Ð¿ÐµÑ€ÐµÐ±Ñ€Ð¾Ñкой Ñообщений и иÑпользующего вÑе оÑновные вызовы. Заодно покрутив за разные ручки (количеÑтво тредов в тредпуле, количеÑтво пар перебраÑывающихÑÑ Ð°ÐºÑ‚Ð¾Ñ€Ð¾Ð²) можно поÑмотреть на изменение Ð¿Ð¾Ð²ÐµÐ´ÐµÐ½Ð¸Ñ ÑиÑтемы (hint: в таких проÑÑ‚Ñ‹Ñ… ÑценариÑÑ… макÑимум перфоманÑа доÑтигаетÑÑ Ð¿Ñ€Ð¸ одном треде в тредпулах).
+
+### Event и Event Handle.
+Полезную нагрузку Ñообщений заворачиваем в наÑледника IEventBase, у которого два важных метода – ÑÐµÑ€Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¸ загрузка. Ð¡ÐµÑ€Ð¸Ð°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð²Ð¸Ñ€Ñ‚ÑƒÐ°Ð»ÑŒÐ½Ð°Ñ, а вот загрузка – нет, и Ð´Ð»Ñ Ñ€Ð°Ð·Ð±Ð¾Ñ€Ð° ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¸Ð· байтовой поÑледовательноÑти – необходимо на Ñтороне Ð¿Ð¾Ð»ÑƒÑ‡Ð°Ñ‚ÐµÐ»Ñ Ñматчить чиÑло-идентификатор типа ивента Ñ Ð¡++ типом. Именно Ñто делают макроÑÑ‹ из hfunc.h. Ðа практике ивенты ÑоздаютÑÑ Ð»Ð¸Ð±Ð¾ как наÑледник TEventLocal<> (Ð´Ð»Ñ Ñтрого локальных Ñообщений) либо как наÑледник TEventPB<> (Ð´Ð»Ñ Ð¿Ð¾Ñ‚ÐµÐ½Ñ†Ð¸Ð°Ð»ÑŒÐ½Ð¾ переÑылаемых по Ñети Ñообщений, типизируютÑÑ protobuf-меÑÑаджем).
+
+Кроме непоÑредÑтвенно ивента (в виде Ñтруктуры либо в виде байтовой Ñтроки) Ð´Ð»Ñ Ð¿ÐµÑ€ÐµÑылки ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð½ÐµÐ¾Ð±Ñ…Ð¾Ð´Ð¸Ð¼ набор дополнительных полей
+
+ÐдреÑат
+
+Отправитель
+
+Тип ÑообщениÑ
+
+Кука
+
+Флаги
+
+Сообщение + дополнительные Ð¿Ð¾Ð»Ñ = IEventHandle. Именно хендлами акторÑиÑтема и оперирует. <event-type>::TPtr – в примере выше – Ñто и еÑÑ‚ÑŒ указатель на типизированный хендл.
+
+ТехничеÑки типом ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¼Ð¾Ð¶ÐµÑ‚ быть любое чиÑло, которое получатель и отправитель договорилиÑÑŒ понимать как идентификатор ÑообщениÑ. СложившаÑÑÑ Ð¿Ñ€Ð°ÐºÑ‚Ð¸ÐºÐ° – выделÑÑ‚ÑŒ диапазон идентификаторов макроÑом EventSpaceBegin (фактичеÑки блоками по 64к), Ð½Ð°Ñ‡Ð¸Ð½Ð°Ñ Ñ Ð±Ð»Ð¾ÐºÐ° ES_USERSPACE.
+Кука – неинтерпретируемое ui64 чиÑло, передаваемое Ñ Ñ…ÐµÐ½Ð´Ð»Ð¾Ð¼. Хорошей практикой ÑвлÑетÑÑ Ð² ответе ÑервиÑа на Ñообщение выÑтавлÑÑ‚ÑŒ куку в куку иÑходного ÑообщениÑ, оÑобенно Ð´Ð»Ñ ÑервиÑов, потенциально иÑпользуемых конкурентно.
+
+Ð’ флагах неÑколько бит зарезервировано под флаги, декларирующие как необходимо обрабатывать оÑобые Ñитуации и 12 бит – под номер канала интерконнекта, в котором будет переÑылатьÑÑ Ñообщение (Ð´Ð»Ñ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ñ‹Ñ… Ñообщений в имеющихÑÑ Ñ€ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸ÑÑ… номер канала не имеет Ð·Ð½Ð°Ñ‡ÐµÐ½Ð¸Ñ - Ñ…Ð¾Ñ‚Ñ Ð¼Ð¾Ð¶Ð½Ð¾ предÑтавить реализацию где Ð´Ð»Ñ ÐºÐ°Ð½Ð°Ð»Ð¾Ð² будут незавиÑимые очереди).
+
+### Тредпулы и мейлбокÑÑ‹.
+Ð’ рамках одной акторÑиÑтемы может ÑоÑущеÑтвовать неÑколько незавиÑимых тредпулов, каждый актор региÑтрируетÑÑ Ð½Ð° конкретном и в процеÑÑе жизни не может мигрировать (но может Ñоздавать новые акторы на произвольном тредпуле). ИÑпользуетÑÑ Ð´Ð»Ñ ÐºÑ€ÑƒÐ¿Ð½Ð¾Ð±Ð»Ð¾Ñ‡Ð½Ð¾Ð³Ð¾ Ñ€Ð°Ð·Ð´ÐµÐ»ÐµÐ½Ð¸Ñ Ñ€ÐµÑурÑов, либо между разными активноÑÑ‚Ñми (вот здеÑÑŒ – обрабатываем один клаÑÑ Ð·Ð°Ð¿Ñ€Ð¾Ñов, а вот здеÑÑŒ - другой), либо между разными профилÑми активноÑти (вот здеÑÑŒ обрабатываем быÑтрые запроÑÑ‹, здеÑÑŒ – медленные, а вот там – вообще батчёвые). Ðапример в YDB работает ÑиÑтемный тредпул (в котором запуÑкаютÑÑ Ð°ÐºÑ‚Ð¾Ñ€Ñ‹, необходимые Ð´Ð»Ñ Ñ„ÑƒÐ½ÐºÑ†Ð¸Ð¾Ð½Ð¸Ñ€Ð¾Ð²Ð°Ð½Ð¸Ñ YDB, и Ð´Ð»Ñ ÐºÐ¾Ñ‚Ð¾Ñ€Ð¾Ð³Ð¾ мы Ñледим что бы не было длительной блокировки в обработчиках), пользовательÑкий тредпул (в котором обрабатываютÑÑ Ð·Ð°Ð¿Ñ€Ð¾ÑÑ‹ и потенциально обработчики могут уходить в ÑÐµÐ±Ñ Ð¿Ð¾Ð´Ð¾Ð»ÑŒÑˆÐµ, но Ñто не повлиÑет на инфраÑтруктуру), батчёвый тредпул (куда отгружаетÑÑ Ð´Ð»Ð¸Ñ‚ÐµÐ»ÑŒÐ½Ð°Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ° – компакшены диÑков, Ñканы таблиц и подобное) и, в жирных нодах – тредпул интерконнекта (как наиболее чувÑтвительного к задержкам).
+ПереÑылка Ñообщений между акторами разных тредпулов но одной локальной акторÑиÑтемы оÑтаётÑÑ Ð»Ð¾ÐºÐ°Ð»ÑŒÐ½Ð¾Ð¹, принудительной Ñериализации ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð½Ðµ проиÑходит.
+
+При региÑтрации актор прикреплÑетÑÑ Ðº мейлбокÑу (в типичном Ñлучае на ÑобÑтвенном мейлбокÑе, но по оÑобой нужде можно находÑÑÑŒ внутри обработки ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð¿Ñ€Ð¸ÐºÑ€ÐµÐ¿Ð¸Ñ‚ÑŒ порождённый актор к текущему активному мейлбокÑу – Ñм. RegisterWithSameMailbox (ранее RegisterLocal) – в Ñтом Ñлучае будет гарантироватьÑÑ Ð¾Ñ‚ÑутÑтвие конкурентной обработки Ñообщений). СобÑтвенно Send – Ñто и еÑÑ‚ÑŒ заворачивание ивента в хендл, помещение хендла в очередь мейлбокÑа и добавление мейлбокÑа в очередь активации тредпула. Ð’ рамках одного мейлбокÑа – обработка FIFO, между мейлбокÑами таких гарантий нет, Ñ…Ð¾Ñ‚Ñ Ð¸ ÑтараемÑÑ Ð°ÐºÑ‚Ð¸Ð²Ð¸Ñ€Ð¾Ð²Ð°Ñ‚ÑŒ мейлбокÑÑ‹ примерно в порÑдке поÑÐ²Ð»ÐµÐ½Ð¸Ñ Ð² них Ñообщений.
+
+При региÑтрации актора можно выбрать тип мейлбокÑа, они немного отличаютÑÑ ÑтоимоÑÑ‚ÑŒ Ð´Ð¾Ð±Ð°Ð²Ð»ÐµÐ½Ð¸Ñ â€“ либо дёшево, но похуже под контеншеном, либо почти wait-free, но подороже. См. комментарии к TMailboxType за актуальными подÑказками что-как.
+
+Полезные хелперы.
+
+STFUNC – Ð´ÐµÐºÐ»Ð°Ñ€Ð°Ñ†Ð¸Ñ Ñтейт-функции, рекомендую вÑегда иÑпользовать именно такую форму Ð´Ð»Ñ Ð´ÐµÐºÐ»Ð°Ñ€Ð°Ñ†Ð¸Ð¸, Ñ‚.к. потом проще иÑкать.
+
+hFunc – Ð¼Ð°ÐºÑ€Ð¾Ñ Ð²Ñ‹Ð±Ð¾Ñ€Ð° хендлера, передающий ивент в обработчик.
+
+cFunc – Ð¼Ð°ÐºÑ€Ð¾Ñ Ð²Ñ‹Ð±Ð¾Ñ€Ð° хендлера, не передающий ивент в обработчик.
+
+### Обработка Ñбоев.
+Ð’ рамках локальной акторÑиÑтемы доÑтавка Ñообщений гарантирована, еÑли по какой-то причине Ñообщение не доÑтавлено (важно! Именно не доÑтавлено, факт обработанноÑти ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ ÑƒÐ¶Ðµ на ÑовеÑти принимающего актора) – то произойдёт одно из:
+
+ЕÑли выÑтавлен флаг FlagForwardOnNondelivery – Ñообщение будет переправлено на актор, переданный как forwardOnNondelivery при конÑтруировании хендла. Полезно например еÑли какие-то ÑервиÑÑ‹ ÑоздаютÑÑ Ð¿Ð¾ требованию и Ð´Ð»Ñ Ð½ÐµÑозданных ÑервиÑов – желаем зароутить в роутер. Работает только в рамках локальной акторÑиÑтемы.
+
+Иначе при выÑтавленном флаге FlagTrackDelivery – Ð´Ð»Ñ Ð¾Ñ‚Ð¿Ñ€Ð°Ð²Ð¸Ñ‚ÐµÐ»Ñ Ð±ÑƒÐ´ÐµÑ‚ Ñгенерирован ивент TEvUndelivered от имени недоÑтупного актора. Получение такого ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ð³Ð°Ñ€Ð°Ð½Ñ‚Ð¸Ñ€ÑƒÐµÑ‚ что иÑходный ивент не был обработан и никакие Ñффекты не произошли. Ð“ÐµÐ½ÐµÑ€Ð°Ñ†Ð¸Ñ Ð¸ доÑтавка нотификации в рамках локальной акторÑиÑтемы гарантирована, в раÑпределённой – как повезёт, может и потерÑÑ‚ÑŒÑÑ.
+
+Иначе, еÑли никакие флаги не выÑтавлены – Ñообщение будет выброшено.
+
+Т.к. в раÑпределённой ÑиÑтеме доÑтавка нотификаций о недоÑтавке не гарантируетÑÑ, то Ð´Ð»Ñ Ð½Ð°Ð´Ñ‘Ð¶Ð½Ð¾Ð¹ обработки Ñбоев необходим дополнительный механизм – по флагу FlagSubscribeOnSession при переÑечении границ ноды проиÑходит подпиÑка Ð¾Ñ‚Ð¿Ñ€Ð°Ð²Ð¸Ñ‚ÐµÐ»Ñ Ð½Ð° нотификацию о разрыве Ñетевой ÑеÑÑии, в рамках которой Ñообщение было отправлено. Теперь при разрыве Ñетевой ÑеÑÑии отправитель узнает что Ñообщение могло быть недоÑтавлено (а могло и быть доÑтавлено – мы не знаем) и Ñможет отреагировать. Ðужно не забывать отпиÑыватьÑÑ Ð¾Ñ‚ нотификации о разрыве ÑеÑÑий – иначе будут копитьÑÑ Ð²Ð¿Ð»Ð¾Ñ‚ÑŒ до ближайшего разрыва (который может и не Ñкоро произойти).
+
+РезюмируÑ: при необходимоÑти контролировать доÑтавку внутри локальной акторÑиÑтемы – выÑтавлÑем флаг FlagTrackDelivery и обрабатываем TEvUndelivered. Ð”Ð»Ñ Ñ€Ð°Ñпределённой – добавлÑем FlagSubscribeOnSession и дополнительно обрабатываем TEvNodeDisconnected не Ð·Ð°Ð±Ñ‹Ð²Ð°Ñ Ð¾Ñ‚Ð¿Ð¸ÑыватьÑÑ Ð¾Ñ‚ более не нужных подпиÑок.
+
+### Интерконнект.
+Ð›Ð¾ÐºÐ°Ð»ÑŒÐ½Ð°Ñ Ð°ÐºÑ‚Ð¾Ñ€ÑиÑтема – Ñто только половина пирога, возможноÑÑ‚ÑŒ объединить их в раÑпределённую – Ð²Ñ‚Ð¾Ñ€Ð°Ñ Ð¿Ð¾Ð»Ð¾Ð²Ð¸Ð½Ð°. Ð ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð¸Ð½Ñ‚ÐµÑ€ÐºÐ¾Ð½Ð½ÐµÐºÑ‚Ð° доÑтупна из коробки и умеет
+Передавать ÑÐ¾Ð¾Ð±Ñ‰ÐµÐ½Ð¸Ñ Ñ‡ÐµÑ€ÐµÐ· одно tcp-Ñоединение
+МультиплекÑировать разные потоки (ака каналы) в рамках одного ÑоединениÑ, Ð³Ð°Ñ€Ð°Ð½Ñ‚Ð¸Ñ€ÑƒÑ Ð¿Ð¾Ñ€Ñдок в рамках канала
+СтараетÑÑ Ð´ÐµÐ»Ð°Ñ‚ÑŒ Ñто хорошо.
+Ð’ рамках раÑпределённой ÑиÑтемы требуетÑÑ ÐºÐ°Ð¶Ð´Ð¾Ð¹ локальной акторÑиÑтеме назначить уникальный номер (например табличкой или реализовав динамичеÑкую раздачу номеров ноды) и запуÑтить в рамках каждой локальной акторÑиÑтемы локальный неймÑÐµÑ€Ð²Ð¸Ñ (например по табличке ремапинга номера ноды в Ñетевой Ð°Ð´Ñ€ÐµÑ Ð»Ð¸Ð±Ð¾ как кеш опорного неймÑервиÑа).
+Смотрим на второй пример https://a.yandex-team.ru/arcadia/ydb/library/actors/examples/02_discovery
+Тут у Ð½Ð°Ñ ÐºÐ¾Ð½Ñ„Ð¸Ð³ÑƒÑ€Ð¸Ñ€ÑƒÐµÑ‚ÑÑ Ñ€Ð°ÑÐ¿Ñ€ÐµÐ´ÐµÐ»Ñ‘Ð½Ð½Ð°Ñ Ð°ÐºÑ‚Ð¾Ñ€ÑиÑтема (в примере вÑе пÑÑ‚ÑŒ запуÑкаютÑÑ Ð² одном бинарнике, но точно так же – можно запуÑкать и чаÑÑ‚Ñми) на пÑÑ‚ÑŒ нод. Ðа каждой ноде запуÑкаетÑÑ Ñ€ÐµÐ¿Ð»Ð¸ÐºÐ° Ð´Ð»Ñ Ð¿Ð°Ð±Ð»Ð¸ÑˆÐ¸Ð½Ð³Ð° Ñтрочек и актор-Ñндпоинт (каждый Ñо Ñвоим портом). Эндпоинты Ñ Ð¿Ð¾Ð¼Ð¾Ñ‰ÑŒÑŽ актора-паблишера публикуют Ñвои Ñвки/пароли на раÑпределённый Ñторадж (Ñ Ð¾Ð±Ñ€Ð°Ð±Ð¾Ñ‚ÐºÐ¾Ð¹ нештатных Ñитауций и поддержанием в актуальном ÑоÑтоÑнии). И Ñ€Ñдом лежит Ñ€ÐµÐ°Ð»Ð¸Ð·Ð°Ñ†Ð¸Ñ Ð·Ð°Ð¿Ñ€Ð¾Ñа к Ñтораджу на лиÑтинг опубликованого по мажорити. СобÑтвенно Ñто упрощённый и почищенный от Ñпецифики код, иÑпользуемый в YDB Ð´Ð»Ñ Ð¿ÑƒÐ±Ð»Ð¸ÐºÐ°Ñ†Ð¸Ð¸ и Ð½Ð°Ñ…Ð¾Ð¶Ð´ÐµÐ½Ð¸Ñ Ð°ÐºÑ‚ÑƒÐ°Ð»ÑŒÐ½Ñ‹Ñ… Ñндпоинтов пользовательÑкой базы.
diff --git a/ydb/library/actors/core/CMakeLists.darwin-arm64.txt b/ydb/library/actors/core/CMakeLists.darwin-arm64.txt
index acad464783..d606663430 100644
--- a/ydb/library/actors/core/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/core/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,8 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
get_built_tool_path(
TOOL_enum_parser_bin
TOOL_enum_parser_dependency
diff --git a/ydb/library/actors/core/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/core/CMakeLists.darwin-x86_64.txt
index acad464783..d606663430 100644
--- a/ydb/library/actors/core/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/core/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,8 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
get_built_tool_path(
TOOL_enum_parser_bin
TOOL_enum_parser_dependency
diff --git a/ydb/library/actors/core/CMakeLists.linux-aarch64.txt b/ydb/library/actors/core/CMakeLists.linux-aarch64.txt
index 9c381c07ba..0c23452507 100644
--- a/ydb/library/actors/core/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/core/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,8 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
get_built_tool_path(
TOOL_enum_parser_bin
TOOL_enum_parser_dependency
diff --git a/ydb/library/actors/core/CMakeLists.linux-x86_64.txt b/ydb/library/actors/core/CMakeLists.linux-x86_64.txt
index 9c381c07ba..0c23452507 100644
--- a/ydb/library/actors/core/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/core/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,8 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
get_built_tool_path(
TOOL_enum_parser_bin
TOOL_enum_parser_dependency
diff --git a/ydb/library/actors/core/CMakeLists.windows-x86_64.txt b/ydb/library/actors/core/CMakeLists.windows-x86_64.txt
index acad464783..d606663430 100644
--- a/ydb/library/actors/core/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/core/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,8 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
get_built_tool_path(
TOOL_enum_parser_bin
TOOL_enum_parser_dependency
diff --git a/library/cpp/actors/core/tsan.supp b/ydb/library/actors/core/tsan.supp
index ca5be0c0f0..ca5be0c0f0 100644
--- a/library/cpp/actors/core/tsan.supp
+++ b/ydb/library/actors/core/tsan.supp
diff --git a/ydb/library/actors/core/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/core/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..72ca15e72e
--- /dev/null
+++ b/ydb/library/actors/core/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,88 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut)
+target_include_directories(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core
+)
+target_link_libraries(ydb-library-actors-core-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-core-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_coroutine_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/benchmark_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actorsystem_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/performance_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ask_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/balancer_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_payload_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_basic_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_united_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/mon_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/scheduler_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut
+ TEST_TARGET
+ ydb-library-actors-core-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-core-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-core-ut)
diff --git a/ydb/library/actors/core/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/core/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..784612191d
--- /dev/null
+++ b/ydb/library/actors/core/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,89 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut)
+target_include_directories(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core
+)
+target_link_libraries(ydb-library-actors-core-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-core-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_coroutine_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/benchmark_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actorsystem_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/performance_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ask_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/balancer_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_payload_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_basic_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_united_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/mon_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/scheduler_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut
+ TEST_TARGET
+ ydb-library-actors-core-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-core-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-core-ut)
diff --git a/ydb/library/actors/core/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/core/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..260aa2596f
--- /dev/null
+++ b/ydb/library/actors/core/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,92 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut)
+target_include_directories(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core
+)
+target_link_libraries(ydb-library-actors-core-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-core-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_coroutine_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/benchmark_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actorsystem_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/performance_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ask_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/balancer_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_payload_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_basic_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_united_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/mon_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/scheduler_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut
+ TEST_TARGET
+ ydb-library-actors-core-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-core-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-core-ut)
diff --git a/ydb/library/actors/core/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/core/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..4adb4cbe7e
--- /dev/null
+++ b/ydb/library/actors/core/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,94 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut)
+target_include_directories(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core
+)
+target_link_libraries(ydb-library-actors-core-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-core-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_coroutine_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/benchmark_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actorsystem_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/performance_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ask_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/balancer_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_payload_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_basic_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_united_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/mon_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/scheduler_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut
+ TEST_TARGET
+ ydb-library-actors-core-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-core-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-core-ut)
diff --git a/library/cpp/actors/actor_type/CMakeLists.txt b/ydb/library/actors/core/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/actor_type/CMakeLists.txt
+++ b/ydb/library/actors/core/ut/CMakeLists.txt
diff --git a/ydb/library/actors/core/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/core/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..56c30fa8ef
--- /dev/null
+++ b/ydb/library/actors/core/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,82 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut)
+target_include_directories(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core
+)
+target_link_libraries(ydb-library-actors-core-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ library-actors-testlib
+)
+target_sources(ydb-library-actors-core-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_coroutine_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/benchmark_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/actorsystem_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/performance_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ask_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/balancer_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_payload_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/event_pb_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_basic_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/executor_pool_united_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/mon_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/scheduler_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut
+ TEST_TARGET
+ ydb-library-actors-core-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-core-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-core-ut)
diff --git a/ydb/library/actors/core/ut_fat/CMakeLists.darwin-arm64.txt b/ydb/library/actors/core/ut_fat/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..3450db8c63
--- /dev/null
+++ b/ydb/library/actors/core/ut_fat/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut_fat)
+target_link_libraries(ydb-library-actors-core-ut_fat PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+)
+target_link_options(ydb-library-actors-core-ut_fat PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-core-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ut_fat/actor_benchmark.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 20
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut_fat
+ TEST_TARGET
+ ydb-library-actors-core-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ TIMEOUT
+ 1200
+)
+target_allocator(ydb-library-actors-core-ut_fat
+ system_allocator
+)
+vcs_info(ydb-library-actors-core-ut_fat)
diff --git a/ydb/library/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..52d1619cea
--- /dev/null
+++ b/ydb/library/actors/core/ut_fat/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut_fat)
+target_link_libraries(ydb-library-actors-core-ut_fat PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+)
+target_link_options(ydb-library-actors-core-ut_fat PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-core-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ut_fat/actor_benchmark.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 20
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut_fat
+ TEST_TARGET
+ ydb-library-actors-core-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ TIMEOUT
+ 1200
+)
+target_allocator(ydb-library-actors-core-ut_fat
+ system_allocator
+)
+vcs_info(ydb-library-actors-core-ut_fat)
diff --git a/ydb/library/actors/core/ut_fat/CMakeLists.linux-aarch64.txt b/ydb/library/actors/core/ut_fat/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..1efe7c2285
--- /dev/null
+++ b/ydb/library/actors/core/ut_fat/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,74 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut_fat)
+target_link_libraries(ydb-library-actors-core-ut_fat PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+)
+target_link_options(ydb-library-actors-core-ut_fat PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-core-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ut_fat/actor_benchmark.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 20
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut_fat
+ TEST_TARGET
+ ydb-library-actors-core-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ TIMEOUT
+ 1200
+)
+target_allocator(ydb-library-actors-core-ut_fat
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-core-ut_fat)
diff --git a/ydb/library/actors/core/ut_fat/CMakeLists.linux-x86_64.txt b/ydb/library/actors/core/ut_fat/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..1163a0cc02
--- /dev/null
+++ b/ydb/library/actors/core/ut_fat/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,76 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut_fat)
+target_link_libraries(ydb-library-actors-core-ut_fat PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+)
+target_link_options(ydb-library-actors-core-ut_fat PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-core-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ut_fat/actor_benchmark.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 20
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut_fat
+ TEST_TARGET
+ ydb-library-actors-core-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ TIMEOUT
+ 1200
+)
+target_allocator(ydb-library-actors-core-ut_fat
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-core-ut_fat)
diff --git a/library/cpp/actors/core/CMakeLists.txt b/ydb/library/actors/core/ut_fat/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/core/CMakeLists.txt
+++ b/ydb/library/actors/core/ut_fat/CMakeLists.txt
diff --git a/ydb/library/actors/core/ut_fat/CMakeLists.windows-x86_64.txt b/ydb/library/actors/core/ut_fat/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..d700cdd9e0
--- /dev/null
+++ b/ydb/library/actors/core/ut_fat/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,64 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-core-ut_fat)
+target_link_libraries(ydb-library-actors-core-ut_fat PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+)
+target_sources(ydb-library-actors-core-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/core/ut_fat/actor_benchmark.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 20
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-core-ut_fat
+ TEST_TARGET
+ ydb-library-actors-core-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-core-ut_fat
+ PROPERTY
+ TIMEOUT
+ 1200
+)
+target_allocator(ydb-library-actors-core-ut_fat
+ system_allocator
+)
+vcs_info(ydb-library-actors-core-ut_fat)
diff --git a/ydb/library/actors/cppcoro/CMakeLists.darwin-arm64.txt b/ydb/library/actors/cppcoro/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..8e2847d70b
--- /dev/null
+++ b/ydb/library/actors/cppcoro/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,24 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(corobenchmark)
+add_subdirectory(ut)
+
+add_library(library-actors-cppcoro)
+target_link_libraries(library-actors-cppcoro PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_sources(library-actors-cppcoro PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/await_callback.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_group.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_result.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task.cpp
+)
diff --git a/ydb/library/actors/cppcoro/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/cppcoro/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..8e2847d70b
--- /dev/null
+++ b/ydb/library/actors/cppcoro/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,24 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(corobenchmark)
+add_subdirectory(ut)
+
+add_library(library-actors-cppcoro)
+target_link_libraries(library-actors-cppcoro PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_sources(library-actors-cppcoro PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/await_callback.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_group.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_result.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task.cpp
+)
diff --git a/ydb/library/actors/cppcoro/CMakeLists.linux-aarch64.txt b/ydb/library/actors/cppcoro/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..1e83180813
--- /dev/null
+++ b/ydb/library/actors/cppcoro/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,25 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(corobenchmark)
+add_subdirectory(ut)
+
+add_library(library-actors-cppcoro)
+target_link_libraries(library-actors-cppcoro PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_sources(library-actors-cppcoro PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/await_callback.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_group.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_result.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task.cpp
+)
diff --git a/ydb/library/actors/cppcoro/CMakeLists.linux-x86_64.txt b/ydb/library/actors/cppcoro/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..1e83180813
--- /dev/null
+++ b/ydb/library/actors/cppcoro/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,25 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(corobenchmark)
+add_subdirectory(ut)
+
+add_library(library-actors-cppcoro)
+target_link_libraries(library-actors-cppcoro PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_sources(library-actors-cppcoro PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/await_callback.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_group.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_result.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task.cpp
+)
diff --git a/library/cpp/actors/core/ut/CMakeLists.txt b/ydb/library/actors/cppcoro/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/core/ut/CMakeLists.txt
+++ b/ydb/library/actors/cppcoro/CMakeLists.txt
diff --git a/ydb/library/actors/cppcoro/CMakeLists.windows-x86_64.txt b/ydb/library/actors/cppcoro/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..8e2847d70b
--- /dev/null
+++ b/ydb/library/actors/cppcoro/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,24 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(corobenchmark)
+add_subdirectory(ut)
+
+add_library(library-actors-cppcoro)
+target_link_libraries(library-actors-cppcoro PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_sources(library-actors-cppcoro PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/await_callback.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_group.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_result.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task.cpp
+)
diff --git a/library/cpp/actors/cppcoro/await_callback.cpp b/ydb/library/actors/cppcoro/await_callback.cpp
index 5132131a8e..5132131a8e 100644
--- a/library/cpp/actors/cppcoro/await_callback.cpp
+++ b/ydb/library/actors/cppcoro/await_callback.cpp
diff --git a/library/cpp/actors/cppcoro/await_callback.h b/ydb/library/actors/cppcoro/await_callback.h
index fcb2eb78f9..fcb2eb78f9 100644
--- a/library/cpp/actors/cppcoro/await_callback.h
+++ b/ydb/library/actors/cppcoro/await_callback.h
diff --git a/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..c4ae691e51
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,30 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(corobenchmark)
+target_link_libraries(corobenchmark PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ testing-benchmark-main
+ library-actors-cppcoro
+)
+target_link_options(corobenchmark PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(corobenchmark PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/corobenchmark/main.cpp
+)
+target_allocator(corobenchmark
+ system_allocator
+)
+vcs_info(corobenchmark)
diff --git a/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..ffebcc68c8
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,31 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(corobenchmark)
+target_link_libraries(corobenchmark PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ testing-benchmark-main
+ library-actors-cppcoro
+)
+target_link_options(corobenchmark PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(corobenchmark PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/corobenchmark/main.cpp
+)
+target_allocator(corobenchmark
+ system_allocator
+)
+vcs_info(corobenchmark)
diff --git a/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..8ae4d82601
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,34 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(corobenchmark)
+target_link_libraries(corobenchmark PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ testing-benchmark-main
+ library-actors-cppcoro
+)
+target_link_options(corobenchmark PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(corobenchmark PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/corobenchmark/main.cpp
+)
+target_allocator(corobenchmark
+ cpp-malloc-jemalloc
+)
+vcs_info(corobenchmark)
diff --git a/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..7160c3cdfe
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,36 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(corobenchmark)
+target_link_libraries(corobenchmark PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ testing-benchmark-main
+ library-actors-cppcoro
+)
+target_link_options(corobenchmark PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(corobenchmark PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/corobenchmark/main.cpp
+)
+target_allocator(corobenchmark
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(corobenchmark)
diff --git a/library/cpp/actors/core/ut_fat/CMakeLists.txt b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/core/ut_fat/CMakeLists.txt
+++ b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.txt
diff --git a/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..a9186c56c3
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,24 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(corobenchmark)
+target_link_libraries(corobenchmark PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ testing-benchmark-main
+ library-actors-cppcoro
+)
+target_sources(corobenchmark PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/corobenchmark/main.cpp
+)
+target_allocator(corobenchmark
+ system_allocator
+)
+vcs_info(corobenchmark)
diff --git a/ydb/library/actors/cppcoro/corobenchmark/main.cpp b/ydb/library/actors/cppcoro/corobenchmark/main.cpp
new file mode 100644
index 0000000000..36bbf242d3
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/main.cpp
@@ -0,0 +1,76 @@
+#include <ydb/library/actors/cppcoro/task.h>
+#include <ydb/library/actors/cppcoro/await_callback.h>
+#include <library/cpp/testing/benchmark/bench.h>
+
+using namespace NActors;
+
+namespace {
+
+ int LastValue = 0;
+
+ Y_NO_INLINE int NextFuncValue() {
+ return ++LastValue;
+ }
+
+ Y_NO_INLINE void IterateFuncValues(size_t iterations) {
+ for (size_t i = 0; i < iterations; ++i) {
+ int value = NextFuncValue();
+ Y_DO_NOT_OPTIMIZE_AWAY(value);
+ }
+ }
+
+ Y_NO_INLINE TTask<int> NextTaskValue() {
+ co_return ++LastValue;
+ }
+
+ Y_NO_INLINE TTask<void> IterateTaskValues(size_t iterations) {
+ for (size_t i = 0; i < iterations; ++i) {
+ int value = co_await NextTaskValue();
+ Y_DO_NOT_OPTIMIZE_AWAY(value);
+ }
+ }
+
+ std::coroutine_handle<> Paused;
+
+ struct {
+ static bool await_ready() noexcept {
+ return false;
+ }
+ static void await_suspend(std::coroutine_handle<> h) noexcept {
+ Paused = h;
+ }
+ static int await_resume() noexcept {
+ return ++LastValue;
+ }
+ } Pause;
+
+ Y_NO_INLINE TTask<void> IteratePauseValues(size_t iterations) {
+ for (size_t i = 0; i < iterations; ++i) {
+ int value = co_await Pause;
+ Y_DO_NOT_OPTIMIZE_AWAY(value);
+ }
+ }
+
+} // namespace
+
+Y_CPU_BENCHMARK(FuncCalls, iface) {
+ IterateFuncValues(iface.Iterations());
+}
+
+Y_CPU_BENCHMARK(TaskCalls, iface) {
+ bool finished = false;
+ AwaitThenCallback(IterateTaskValues(iface.Iterations()), [&]{
+ finished = true;
+ });
+ Y_ABORT_UNLESS(finished);
+}
+
+Y_CPU_BENCHMARK(CoroAwaits, iface) {
+ bool finished = false;
+ AwaitThenCallback(IteratePauseValues(iface.Iterations()), [&]{
+ finished = true;
+ });
+ while (!finished) {
+ std::exchange(Paused, {}).resume();
+ }
+}
diff --git a/ydb/library/actors/cppcoro/corobenchmark/ya.make b/ydb/library/actors/cppcoro/corobenchmark/ya.make
new file mode 100644
index 0000000000..1b78f71740
--- /dev/null
+++ b/ydb/library/actors/cppcoro/corobenchmark/ya.make
@@ -0,0 +1,11 @@
+Y_BENCHMARK()
+
+PEERDIR(
+ ydb/library/actors/cppcoro
+)
+
+SRCS(
+ main.cpp
+)
+
+END()
diff --git a/library/cpp/actors/cppcoro/task.cpp b/ydb/library/actors/cppcoro/task.cpp
index 204c27c573..204c27c573 100644
--- a/library/cpp/actors/cppcoro/task.cpp
+++ b/ydb/library/actors/cppcoro/task.cpp
diff --git a/library/cpp/actors/cppcoro/task.h b/ydb/library/actors/cppcoro/task.h
index bb5a385db2..bb5a385db2 100644
--- a/library/cpp/actors/cppcoro/task.h
+++ b/ydb/library/actors/cppcoro/task.h
diff --git a/ydb/library/actors/cppcoro/task_actor.cpp b/ydb/library/actors/cppcoro/task_actor.cpp
new file mode 100644
index 0000000000..78699bd521
--- /dev/null
+++ b/ydb/library/actors/cppcoro/task_actor.cpp
@@ -0,0 +1,183 @@
+#include "task_actor.h"
+#include "await_callback.h"
+#include <ydb/library/actors/core/actor.h>
+#include <ydb/library/actors/core/hfunc.h>
+
+namespace NActors {
+
+ class TTaskActorImpl;
+
+ static Y_POD_THREAD(TTaskActorImpl*) TlsCurrentTaskActor{nullptr};
+
+ struct TCurrentTaskActorGuard {
+ TCurrentTaskActorGuard(TTaskActorImpl* current) noexcept {
+ Y_ABORT_UNLESS(TlsCurrentTaskActor == nullptr);
+ TlsCurrentTaskActor = current;
+ }
+
+ ~TCurrentTaskActorGuard() noexcept {
+ TlsCurrentTaskActor = nullptr;
+ }
+ };
+
+ enum : ui32 {
+ EvResumeTask = EventSpaceBegin(TEvents::ES_SYSTEM) + 256,
+ };
+
+ struct TEvResumeTask : public TEventLocal<TEvResumeTask, EvResumeTask> {
+ std::coroutine_handle<> Handle;
+ TTaskResult<void>* Result;
+
+ explicit TEvResumeTask(std::coroutine_handle<> handle, TTaskResult<void>* result) noexcept
+ : Handle(handle)
+ , Result(result)
+ {}
+
+ ~TEvResumeTask() noexcept {
+ if (Handle) {
+ Result->SetException(std::make_exception_ptr(TTaskCancelled()));
+ Handle.resume();
+ }
+ }
+ };
+
+ class TTaskActorResult final : public TAtomicRefCount<TTaskActorResult> {
+ public:
+ bool Finished = false;
+ };
+
+ class TTaskActorImpl : public TActor<TTaskActorImpl> {
+ friend class TTaskActor;
+ friend class TAfterAwaiter;
+ friend class TBindAwaiter;
+
+ public:
+ TTaskActorImpl(TTask<void>&& task)
+ : TActor(&TThis::StateBoot)
+ , Task(std::move(task))
+ {
+ Y_ABORT_UNLESS(Task);
+ }
+
+ ~TTaskActorImpl() {
+ Stopped = true;
+ while (EventAwaiter) {
+ // Unblock event awaiter until task stops trying
+ TCurrentTaskActorGuard guard(this);
+ std::exchange(EventAwaiter, {}).resume();
+ }
+ }
+
+ void Registered(TActorSystem* sys, const TActorId& parent) override {
+ ParentId = parent;
+ sys->Send(new IEventHandle(TEvents::TSystem::Bootstrap, 0, SelfId(), SelfId(), {}, 0));
+ }
+
+ STATEFN(StateBoot) {
+ Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvents::TSystem::Bootstrap, "Expected bootstrap event");
+ TCurrentTaskActorGuard guard(this);
+ Become(&TThis::StateWork);
+ AwaitThenCallback(std::move(Task).WhenDone(),
+ [result = Result](TTaskResult<void>&& outcome) noexcept {
+ result->Finished = true;
+ try {
+ outcome.Value();
+ } catch (TTaskCancelled&) {
+ // ignore
+ }
+ });
+ Check();
+ }
+
+ STATEFN(StateWork) {
+ TCurrentTaskActorGuard guard(this);
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvResumeTask, Handle);
+ default:
+ Y_ABORT_UNLESS(EventAwaiter);
+ Event.reset(ev.Release());
+ std::exchange(EventAwaiter, {}).resume();
+ }
+ Check();
+ }
+
+ void Handle(TEvResumeTask::TPtr& ev) {
+ auto* msg = ev->Get();
+ msg->Result->SetValue();
+ std::exchange(msg->Handle, {}).resume();
+ }
+
+ bool Check() {
+ if (Result->Finished) {
+ Y_ABORT_UNLESS(!EventAwaiter, "Task terminated while waiting for the next event");
+ PassAway();
+ return false;
+ }
+
+ Y_ABORT_UNLESS(EventAwaiter, "Task suspended without waiting for the next event");
+ return true;
+ }
+
+ void WaitForEvent(std::coroutine_handle<> h) noexcept {
+ Y_ABORT_UNLESS(!EventAwaiter, "Task cannot have multiple awaiters for the next event");
+ EventAwaiter = h;
+ }
+
+ std::unique_ptr<IEventHandle> FinishWaitForEvent() {
+ if (Stopped) {
+ throw TTaskCancelled();
+ }
+ Y_ABORT_UNLESS(Event, "Task does not have current event");
+ return std::move(Event);
+ }
+
+ private:
+ TIntrusivePtr<TTaskActorResult> Result = MakeIntrusive<TTaskActorResult>();
+ TTask<void> Task;
+ TActorId ParentId;
+ std::coroutine_handle<> EventAwaiter;
+ std::unique_ptr<IEventHandle> Event;
+ bool Stopped = false;
+ };
+
+ void TTaskActorNextEvent::await_suspend(std::coroutine_handle<> h) noexcept {
+ Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
+ TlsCurrentTaskActor->WaitForEvent(h);
+ }
+
+ std::unique_ptr<IEventHandle> TTaskActorNextEvent::await_resume() {
+ Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
+ return TlsCurrentTaskActor->FinishWaitForEvent();
+ }
+
+ IActor* TTaskActor::Create(TTask<void>&& task) {
+ return new TTaskActorImpl(std::move(task));
+ }
+
+ TActorIdentity TTaskActor::SelfId() noexcept {
+ Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
+ return TlsCurrentTaskActor->SelfId();
+ }
+
+ TActorId TTaskActor::ParentId() noexcept {
+ Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
+ return TlsCurrentTaskActor->ParentId;
+ }
+
+ void TAfterAwaiter::await_suspend(std::coroutine_handle<> h) noexcept {
+ Y_ABORT_UNLESS(TlsCurrentTaskActor, "Not in a task actor context");
+ TlsCurrentTaskActor->Schedule(Duration, new TEvResumeTask(h, &Result));
+ }
+
+ bool TBindAwaiter::await_ready() noexcept {
+ if (TlsCurrentTaskActor && TlsCurrentTaskActor->SelfId() == ActorId) {
+ return true;
+ }
+ return false;
+ }
+
+ void TBindAwaiter::await_suspend(std::coroutine_handle<> h) noexcept {
+ Sys->Send(new IEventHandle(ActorId, ActorId, new TEvResumeTask(h, &Result)));
+ }
+
+} // namespace NActors
diff --git a/ydb/library/actors/cppcoro/task_actor.h b/ydb/library/actors/cppcoro/task_actor.h
new file mode 100644
index 0000000000..55736451d8
--- /dev/null
+++ b/ydb/library/actors/cppcoro/task_actor.h
@@ -0,0 +1,107 @@
+#include <ydb/library/actors/core/actor.h>
+#include "task.h"
+
+namespace NActors {
+
+ struct TTaskActorNextEvent {
+ static constexpr bool await_ready() noexcept { return false; }
+
+ static void await_suspend(std::coroutine_handle<> h) noexcept;
+
+ static std::unique_ptr<IEventHandle> await_resume();
+ };
+
+ class TAfterAwaiter {
+ public:
+ TAfterAwaiter(TDuration duration)
+ : Duration(duration)
+ {}
+
+ static constexpr bool await_ready() noexcept { return false; }
+
+ void await_suspend(std::coroutine_handle<> h) noexcept;
+
+ void await_resume() {
+ Result.Value();
+ }
+
+ private:
+ TDuration Duration;
+ TTaskResult<void> Result;
+ };
+
+ class TBindAwaiter {
+ public:
+ TBindAwaiter(TActorSystem* sys, const TActorId& actorId)
+ : Sys(sys)
+ , ActorId(actorId)
+ {}
+
+ bool await_ready() noexcept;
+
+ void await_suspend(std::coroutine_handle<> h) noexcept;
+
+ void await_resume() {
+ Result.Value();
+ }
+
+ private:
+ TActorSystem* Sys;
+ TActorId ActorId;
+ TTaskResult<void> Result;
+ };
+
+ class TTaskActor {
+ public:
+ /**
+ * Creates a new actor that will run the specified task.
+ */
+ static IActor* Create(TTask<void>&& task);
+
+ /**
+ * Returns the next actor event when awaited
+ */
+ static constexpr TTaskActorNextEvent NextEvent{};
+
+ /**
+ * Returns the identity of current task actor.
+ */
+ static TActorIdentity SelfId() noexcept;
+
+ /**
+ * Returns an actor id of the actor that registered current task actor.
+ */
+ static TActorId ParentId() noexcept;
+
+ /**
+ * Returns awaiter that completes after the specified timeout.
+ */
+ static TAfterAwaiter After(TDuration duration) noexcept {
+ return TAfterAwaiter{ duration };
+ }
+
+ /**
+ * Returns awaiter that completes on actor thread when awaited.
+ */
+ static TBindAwaiter Bind() noexcept {
+ TActorId actorId = SelfId();
+ TActorSystem* sys = TActivationContext::ActorSystem();
+ return TBindAwaiter{ sys, actorId };
+ }
+
+ /**
+ * Returns a task that runs the specified task, but binds the result
+ * back to the actor thread. Useful when the specified task may be
+ * working with non-actor coroutines.
+ */
+ template<class T>
+ static TTask<T> Bind(TTask<T>&& task) {
+ return [](TTask<T> task, TBindAwaiter bindTask) -> TTask<T> {
+ auto result = co_await std::move(task).WhenDone();
+ co_await bindTask;
+ co_return std::move(result).Value();
+ }(std::move(task), Bind());
+ }
+ };
+
+} // namespace NActors
diff --git a/ydb/library/actors/cppcoro/task_actor_ut.cpp b/ydb/library/actors/cppcoro/task_actor_ut.cpp
new file mode 100644
index 0000000000..4274ef6671
--- /dev/null
+++ b/ydb/library/actors/cppcoro/task_actor_ut.cpp
@@ -0,0 +1,93 @@
+#include "task_actor.h"
+#include <ydb/library/actors/core/executor_pool_basic.h>
+#include <ydb/library/actors/core/scheduler_basic.h>
+
+#include <library/cpp/testing/unittest/registar.h>
+
+Y_UNIT_TEST_SUITE(TaskActor) {
+
+ using namespace NActors;
+
+ enum : ui32 {
+ EvBegin = EventSpaceBegin(TEvents::ES_USERSPACE),
+ EvRequest,
+ EvResponse,
+ EvStop,
+ };
+
+ struct TEvRequest: public TEventLocal<TEvRequest, EvRequest> {
+ };
+
+ struct TEvResponse: public TEventLocal<TEvResponse, EvResponse> {
+ };
+
+ struct TEvStop: public TEventLocal<TEvStop, EvStop> {
+ };
+
+ TTask<void> SimpleResponder() {
+ for (;;) {
+ auto ev = co_await TTaskActor::NextEvent;
+ Y_ABORT_UNLESS(ev->GetTypeRewrite() == TEvRequest::EventType);
+ auto* msg = ev->Get<TEvRequest>();
+ Y_UNUSED(msg);
+ TTaskActor::SelfId().Send(ev->Sender, new TEvResponse);
+ }
+ }
+
+ TTask<void> SimpleRequester(TActorId responder, TManualEvent& doneEvent, std::atomic<int>& itemsProcessed) {
+ // Note: it's ok to use lambda capture because captures outlive this coroutine
+ auto singleRequest = [&]() -> TTask<bool> {
+ TTaskActor::SelfId().Send(responder, new TEvRequest);
+ auto ev = co_await TTaskActor::NextEvent;
+ switch (ev->GetTypeRewrite()) {
+ case TEvResponse::EventType:
+ co_return true;
+ case TEvStop::EventType:
+ co_return false;
+ default:
+ Y_ABORT("Unexpected event");
+ }
+ };
+ while (co_await singleRequest()) {
+ ++itemsProcessed;
+ }
+ doneEvent.Signal();
+ }
+
+ void Check(TDuration duration, std::unique_ptr<IEventBase> stopEvent) {
+ THolder<TActorSystemSetup> setup = MakeHolder<TActorSystemSetup>();
+ setup->NodeId = 0;
+ setup->ExecutorsCount = 1;
+ setup->Executors.Reset(new TAutoPtr<IExecutorPool>[setup->ExecutorsCount]);
+ for (ui32 i = 0; i < setup->ExecutorsCount; ++i) {
+ setup->Executors[i] = new TBasicExecutorPool(i, 5, 10, "basic");
+ }
+ setup->Scheduler = new TBasicSchedulerThread;
+
+ TActorSystem actorSystem(setup);
+
+ actorSystem.Start();
+
+ TManualEvent doneEvent;
+ std::atomic<int> itemsProcessed{0};
+
+ auto responder = actorSystem.Register(TTaskActor::Create(SimpleResponder()));
+ auto requester = actorSystem.Register(TTaskActor::Create(SimpleRequester(responder, doneEvent, itemsProcessed)));
+ auto deadline = TMonotonic::Now() + duration;
+ while (itemsProcessed.load() < 10) {
+ UNIT_ASSERT_C(TMonotonic::Now() < deadline, "cannot observe 10 responses in " << duration);
+ Sleep(TDuration::MilliSeconds(100));
+ }
+ actorSystem.Send(requester, stopEvent.release());
+ doneEvent.WaitI();
+
+ UNIT_ASSERT_GE(itemsProcessed.load(), 10);
+
+ actorSystem.Stop();
+ }
+
+ Y_UNIT_TEST(Basic) {
+ Check(TDuration::Seconds(10), std::make_unique<TEvStop>());
+ }
+
+} // Y_UNIT_TEST_SUITE(TaskActor)
diff --git a/library/cpp/actors/cppcoro/task_group.cpp b/ydb/library/actors/cppcoro/task_group.cpp
index 9ddd30d707..9ddd30d707 100644
--- a/library/cpp/actors/cppcoro/task_group.cpp
+++ b/ydb/library/actors/cppcoro/task_group.cpp
diff --git a/library/cpp/actors/cppcoro/task_group.h b/ydb/library/actors/cppcoro/task_group.h
index 1de0cf5c1e..1de0cf5c1e 100644
--- a/library/cpp/actors/cppcoro/task_group.h
+++ b/ydb/library/actors/cppcoro/task_group.h
diff --git a/library/cpp/actors/cppcoro/task_result.cpp b/ydb/library/actors/cppcoro/task_result.cpp
index bb1a1dc5ca..bb1a1dc5ca 100644
--- a/library/cpp/actors/cppcoro/task_result.cpp
+++ b/ydb/library/actors/cppcoro/task_result.cpp
diff --git a/library/cpp/actors/cppcoro/task_result.h b/ydb/library/actors/cppcoro/task_result.h
index da78c53b7a..da78c53b7a 100644
--- a/library/cpp/actors/cppcoro/task_result.h
+++ b/ydb/library/actors/cppcoro/task_result.h
diff --git a/library/cpp/actors/cppcoro/task_ut.cpp b/ydb/library/actors/cppcoro/task_ut.cpp
index a1ed5426fc..a1ed5426fc 100644
--- a/library/cpp/actors/cppcoro/task_ut.cpp
+++ b/ydb/library/actors/cppcoro/task_ut.cpp
diff --git a/ydb/library/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..c277a4b75b
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-cppcoro-ut)
+target_include_directories(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro
+)
+target_link_libraries(ydb-library-actors-cppcoro-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-cppcoro
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-cppcoro-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-cppcoro-ut
+ TEST_TARGET
+ ydb-library-actors-cppcoro-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-cppcoro-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-cppcoro-ut)
diff --git a/ydb/library/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..4166f65445
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,69 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-cppcoro-ut)
+target_include_directories(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro
+)
+target_link_libraries(ydb-library-actors-cppcoro-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-cppcoro
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-cppcoro-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-cppcoro-ut
+ TEST_TARGET
+ ydb-library-actors-cppcoro-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-cppcoro-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-cppcoro-ut)
diff --git a/ydb/library/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..a91773f80f
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,72 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-cppcoro-ut)
+target_include_directories(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro
+)
+target_link_libraries(ydb-library-actors-cppcoro-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-cppcoro
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-cppcoro-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-cppcoro-ut
+ TEST_TARGET
+ ydb-library-actors-cppcoro-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-cppcoro-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-cppcoro-ut)
diff --git a/ydb/library/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..41f3d81b2d
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,74 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-cppcoro-ut)
+target_include_directories(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro
+)
+target_link_libraries(ydb-library-actors-cppcoro-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-cppcoro
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-cppcoro-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-cppcoro-ut
+ TEST_TARGET
+ ydb-library-actors-cppcoro-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-cppcoro-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-cppcoro-ut)
diff --git a/library/cpp/actors/cppcoro/CMakeLists.txt b/ydb/library/actors/cppcoro/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/cppcoro/CMakeLists.txt
+++ b/ydb/library/actors/cppcoro/ut/CMakeLists.txt
diff --git a/ydb/library/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..ddb5bbb146
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,62 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-cppcoro-ut)
+target_include_directories(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro
+)
+target_link_libraries(ydb-library-actors-cppcoro-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-cppcoro
+ library-actors-testlib
+)
+target_sources(ydb-library-actors-cppcoro-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/cppcoro/task_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-cppcoro-ut
+ TEST_TARGET
+ ydb-library-actors-cppcoro-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-cppcoro-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-cppcoro-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-cppcoro-ut)
diff --git a/ydb/library/actors/cppcoro/ut/ya.make b/ydb/library/actors/cppcoro/ut/ya.make
new file mode 100644
index 0000000000..4f8f1da5ed
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ut/ya.make
@@ -0,0 +1,12 @@
+UNITTEST_FOR(ydb/library/actors/cppcoro)
+
+PEERDIR(
+ ydb/library/actors/testlib
+)
+
+SRCS(
+ task_ut.cpp
+ task_actor_ut.cpp
+)
+
+END()
diff --git a/ydb/library/actors/cppcoro/ya.make b/ydb/library/actors/cppcoro/ya.make
new file mode 100644
index 0000000000..4c93e7d60d
--- /dev/null
+++ b/ydb/library/actors/cppcoro/ya.make
@@ -0,0 +1,25 @@
+LIBRARY()
+
+PEERDIR(
+ ydb/library/actors/core
+)
+
+SRCS(
+ await_callback.cpp
+ await_callback.h
+ task_actor.cpp
+ task_actor.h
+ task_group.cpp
+ task_group.h
+ task_result.cpp
+ task_result.h
+ task.cpp
+ task.h
+)
+
+END()
+
+RECURSE_FOR_TESTS(
+ corobenchmark
+ ut
+)
diff --git a/ydb/library/actors/dnsresolver/CMakeLists.darwin-arm64.txt b/ydb/library/actors/dnsresolver/CMakeLists.darwin-arm64.txt
index 466c5c1a39..0239ed699b 100644
--- a/ydb/library/actors/dnsresolver/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/dnsresolver/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-dnsresolver)
target_link_libraries(library-actors-dnsresolver PUBLIC
diff --git a/ydb/library/actors/dnsresolver/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/dnsresolver/CMakeLists.darwin-x86_64.txt
index 466c5c1a39..0239ed699b 100644
--- a/ydb/library/actors/dnsresolver/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/dnsresolver/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-dnsresolver)
target_link_libraries(library-actors-dnsresolver PUBLIC
diff --git a/ydb/library/actors/dnsresolver/CMakeLists.linux-aarch64.txt b/ydb/library/actors/dnsresolver/CMakeLists.linux-aarch64.txt
index f525afb544..7ae8839137 100644
--- a/ydb/library/actors/dnsresolver/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/dnsresolver/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-dnsresolver)
target_link_libraries(library-actors-dnsresolver PUBLIC
diff --git a/ydb/library/actors/dnsresolver/CMakeLists.linux-x86_64.txt b/ydb/library/actors/dnsresolver/CMakeLists.linux-x86_64.txt
index f525afb544..7ae8839137 100644
--- a/ydb/library/actors/dnsresolver/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/dnsresolver/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-dnsresolver)
target_link_libraries(library-actors-dnsresolver PUBLIC
diff --git a/ydb/library/actors/dnsresolver/CMakeLists.windows-x86_64.txt b/ydb/library/actors/dnsresolver/CMakeLists.windows-x86_64.txt
index 466c5c1a39..0239ed699b 100644
--- a/ydb/library/actors/dnsresolver/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/dnsresolver/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-dnsresolver)
target_link_libraries(library-actors-dnsresolver PUBLIC
diff --git a/ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..d39e015be6
--- /dev/null
+++ b/ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-dnsresolver-ut)
+target_include_directories(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver
+ ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
+)
+target_link_libraries(ydb-library-actors-dnsresolver-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-dnsresolver
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-dnsresolver-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_caching_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-dnsresolver-ut
+ TEST_TARGET
+ ydb-library-actors-dnsresolver-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-dnsresolver-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-dnsresolver-ut)
diff --git a/ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..0ec77485fa
--- /dev/null
+++ b/ydb/library/actors/dnsresolver/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-dnsresolver-ut)
+target_include_directories(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver
+ ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
+)
+target_link_libraries(ydb-library-actors-dnsresolver-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-dnsresolver
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-dnsresolver-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_caching_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-dnsresolver-ut
+ TEST_TARGET
+ ydb-library-actors-dnsresolver-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-dnsresolver-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-dnsresolver-ut)
diff --git a/ydb/library/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..f0ef871fd6
--- /dev/null
+++ b/ydb/library/actors/dnsresolver/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,74 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-dnsresolver-ut)
+target_include_directories(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver
+ ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
+)
+target_link_libraries(ydb-library-actors-dnsresolver-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-dnsresolver
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-dnsresolver-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_caching_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-dnsresolver-ut
+ TEST_TARGET
+ ydb-library-actors-dnsresolver-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-dnsresolver-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-dnsresolver-ut)
diff --git a/ydb/library/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..28de710f26
--- /dev/null
+++ b/ydb/library/actors/dnsresolver/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,76 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-dnsresolver-ut)
+target_include_directories(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver
+ ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
+)
+target_link_libraries(ydb-library-actors-dnsresolver-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-dnsresolver
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-dnsresolver-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_caching_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-dnsresolver-ut
+ TEST_TARGET
+ ydb-library-actors-dnsresolver-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-dnsresolver-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-dnsresolver-ut)
diff --git a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.txt b/ydb/library/actors/dnsresolver/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/cppcoro/corobenchmark/CMakeLists.txt
+++ b/ydb/library/actors/dnsresolver/ut/CMakeLists.txt
diff --git a/ydb/library/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..e604ef5b88
--- /dev/null
+++ b/ydb/library/actors/dnsresolver/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,64 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-dnsresolver-ut)
+target_include_directories(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver
+ ${CMAKE_SOURCE_DIR}/contrib/libs/c-ares/include
+)
+target_link_libraries(ydb-library-actors-dnsresolver-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-dnsresolver
+ library-actors-testlib
+)
+target_sources(ydb-library-actors-dnsresolver-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_caching_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ondemand_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/dnsresolver/dnsresolver_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-dnsresolver-ut
+ TEST_TARGET
+ ydb-library-actors-dnsresolver-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-dnsresolver-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-dnsresolver-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-dnsresolver-ut)
diff --git a/ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt b/ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..9d2cf5c700
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,29 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(example_01_ping_pong)
+target_link_libraries(example_01_ping_pong PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_link_options(example_01_ping_pong PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(example_01_ping_pong PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/01_ping_pong/main.cpp
+)
+target_allocator(example_01_ping_pong
+ library-cpp-lfalloc
+)
+vcs_info(example_01_ping_pong)
diff --git a/ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..bb175a877a
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,30 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(example_01_ping_pong)
+target_link_libraries(example_01_ping_pong PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ library-actors-core
+)
+target_link_options(example_01_ping_pong PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(example_01_ping_pong PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/01_ping_pong/main.cpp
+)
+target_allocator(example_01_ping_pong
+ library-cpp-lfalloc
+)
+vcs_info(example_01_ping_pong)
diff --git a/ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt b/ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..73dc0b020d
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,33 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(example_01_ping_pong)
+target_link_libraries(example_01_ping_pong PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+)
+target_link_options(example_01_ping_pong PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(example_01_ping_pong PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/01_ping_pong/main.cpp
+)
+target_allocator(example_01_ping_pong
+ library-cpp-lfalloc
+)
+vcs_info(example_01_ping_pong)
diff --git a/ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt b/ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..f30012ff41
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,34 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(example_01_ping_pong)
+target_link_libraries(example_01_ping_pong PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ library-actors-core
+)
+target_link_options(example_01_ping_pong PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(example_01_ping_pong PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/01_ping_pong/main.cpp
+)
+target_allocator(example_01_ping_pong
+ library-cpp-lfalloc
+)
+vcs_info(example_01_ping_pong)
diff --git a/library/cpp/actors/cppcoro/ut/CMakeLists.txt b/ydb/library/actors/examples/01_ping_pong/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/cppcoro/ut/CMakeLists.txt
+++ b/ydb/library/actors/examples/01_ping_pong/CMakeLists.txt
diff --git a/ydb/library/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt b/ydb/library/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..d25574b9c2
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,23 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(example_01_ping_pong)
+target_link_libraries(example_01_ping_pong PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ library-actors-core
+)
+target_sources(example_01_ping_pong PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/01_ping_pong/main.cpp
+)
+target_allocator(example_01_ping_pong
+ library-cpp-lfalloc
+)
+vcs_info(example_01_ping_pong)
diff --git a/ydb/library/actors/examples/01_ping_pong/main.cpp b/ydb/library/actors/examples/01_ping_pong/main.cpp
new file mode 100644
index 0000000000..78a2db8e90
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/main.cpp
@@ -0,0 +1,129 @@
+#include <ydb/library/actors/core/actorsystem.h>
+#include <ydb/library/actors/core/executor_pool_basic.h>
+#include <ydb/library/actors/core/scheduler_basic.h>
+#include <ydb/library/actors/core/log.h>
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+#include <ydb/library/actors/util/should_continue.h>
+#include <util/system/sigset.h>
+#include <util/generic/xrange.h>
+
+using namespace NActors;
+
+static TProgramShouldContinue ShouldContinue;
+
+void OnTerminate(int) {
+ ShouldContinue.ShouldStop();
+}
+
+class TPingActor : public TActorBootstrapped<TPingActor> {
+ const TActorId Target;
+ ui64 HandledEvents;
+ TInstant PeriodStart;
+
+ void Handle(TEvents::TEvPing::TPtr &ev) {
+ Send(ev->Sender, new TEvents::TEvPong());
+ Send(ev->Sender, new TEvents::TEvPing());
+ Become(&TThis::StatePing);
+ }
+
+ void Handle(TEvents::TEvPong::TPtr &ev) {
+ Y_UNUSED(ev);
+ Become(&TThis::StateWait);
+ }
+
+ void PrintStats() {
+ const i64 ms = (TInstant::Now() - PeriodStart).MilliSeconds();
+ Cout << "Handled " << 2 * HandledEvents << " over " << ms << "ms" << Endl;
+ ScheduleStats();
+ }
+
+ void ScheduleStats() {
+ HandledEvents = 0;
+ PeriodStart = TInstant::Now();
+ Schedule(TDuration::Seconds(1), new TEvents::TEvWakeup());
+ }
+
+public:
+ TPingActor(TActorId target)
+ : Target(target)
+ , HandledEvents(0)
+ , PeriodStart(TInstant::Now())
+ {}
+
+ STFUNC(StateWait) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvents::TEvPing, Handle);
+ sFunc(TEvents::TEvWakeup, PrintStats);
+ }
+
+ ++HandledEvents;
+ }
+
+ STFUNC(StatePing) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvents::TEvPong, Handle);
+ sFunc(TEvents::TEvWakeup, PrintStats);
+ }
+
+ ++HandledEvents;
+ }
+
+ void Bootstrap() {
+ if (Target) {
+ Become(&TThis::StatePing);
+ Send(Target, new TEvents::TEvPing());
+ ScheduleStats();
+ }
+ else {
+ Become(&TThis::StateWait);
+ };
+ }
+};
+
+THolder<TActorSystemSetup> BuildActorSystemSetup(ui32 threads, ui32 pools) {
+ Y_ABORT_UNLESS(threads > 0 && threads < 100);
+ Y_ABORT_UNLESS(pools > 0 && pools < 10);
+
+ auto setup = MakeHolder<TActorSystemSetup>();
+
+ setup->NodeId = 1;
+
+ setup->ExecutorsCount = pools;
+ setup->Executors.Reset(new TAutoPtr<IExecutorPool>[pools]);
+ for (ui32 idx : xrange(pools)) {
+ setup->Executors[idx] = new TBasicExecutorPool(idx, threads, 50);
+ }
+
+ setup->Scheduler = new TBasicSchedulerThread(TSchedulerConfig(512, 0));
+
+ return setup;
+}
+
+int main(int argc, char **argv) {
+ Y_UNUSED(argc);
+ Y_UNUSED(argv);
+
+#ifdef _unix_
+ signal(SIGPIPE, SIG_IGN);
+#endif
+ signal(SIGINT, &OnTerminate);
+ signal(SIGTERM, &OnTerminate);
+
+ THolder<TActorSystemSetup> actorSystemSetup = BuildActorSystemSetup(2, 1);
+ TActorSystem actorSystem(actorSystemSetup);
+
+ actorSystem.Start();
+
+ const TActorId a = actorSystem.Register(new TPingActor(TActorId()));
+ const TActorId b = actorSystem.Register(new TPingActor(a));
+ Y_UNUSED(b);
+
+ while (ShouldContinue.PollState() == TProgramShouldContinue::Continue) {
+ Sleep(TDuration::MilliSeconds(200));
+ }
+
+ actorSystem.Stop();
+ actorSystem.Cleanup();
+
+ return ShouldContinue.GetReturnCode();
+}
diff --git a/ydb/library/actors/examples/01_ping_pong/ya.make b/ydb/library/actors/examples/01_ping_pong/ya.make
new file mode 100644
index 0000000000..16d53f2ab9
--- /dev/null
+++ b/ydb/library/actors/examples/01_ping_pong/ya.make
@@ -0,0 +1,13 @@
+PROGRAM(example_01_ping_pong)
+
+ALLOCATOR(LF)
+
+SRCS(
+ main.cpp
+)
+
+PEERDIR(
+ ydb/library/actors/core
+)
+
+END()
diff --git a/ydb/library/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt b/ydb/library/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..c5783b9062
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,66 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_executable(example_02_discovery)
+target_link_libraries(example_02_discovery PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+ library-actors-dnsresolver
+ library-actors-interconnect
+ library-actors-http
+ contrib-libs-protobuf
+)
+target_link_options(example_02_discovery PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_proto_messages(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/protocol.proto
+)
+target_sources(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/lookup.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/main.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/publish.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/replica.cpp
+)
+target_allocator(example_02_discovery
+ library-cpp-lfalloc
+)
+target_proto_addincls(example_02_discovery
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(example_02_discovery
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
+vcs_info(example_02_discovery)
diff --git a/ydb/library/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..51c6fb2446
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,67 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_executable(example_02_discovery)
+target_link_libraries(example_02_discovery PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ library-actors-core
+ library-actors-dnsresolver
+ library-actors-interconnect
+ library-actors-http
+ contrib-libs-protobuf
+)
+target_link_options(example_02_discovery PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_proto_messages(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/protocol.proto
+)
+target_sources(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/lookup.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/main.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/publish.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/replica.cpp
+)
+target_allocator(example_02_discovery
+ library-cpp-lfalloc
+)
+target_proto_addincls(example_02_discovery
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(example_02_discovery
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
+vcs_info(example_02_discovery)
diff --git a/ydb/library/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt b/ydb/library/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..457698edea
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_executable(example_02_discovery)
+target_link_libraries(example_02_discovery PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-actors-core
+ library-actors-dnsresolver
+ library-actors-interconnect
+ library-actors-http
+ contrib-libs-protobuf
+)
+target_link_options(example_02_discovery PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_proto_messages(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/protocol.proto
+)
+target_sources(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/lookup.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/main.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/publish.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/replica.cpp
+)
+target_allocator(example_02_discovery
+ library-cpp-lfalloc
+)
+target_proto_addincls(example_02_discovery
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(example_02_discovery
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
+vcs_info(example_02_discovery)
diff --git a/ydb/library/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt b/ydb/library/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..2efa983d14
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_executable(example_02_discovery)
+target_link_libraries(example_02_discovery PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ library-actors-core
+ library-actors-dnsresolver
+ library-actors-interconnect
+ library-actors-http
+ contrib-libs-protobuf
+)
+target_link_options(example_02_discovery PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_proto_messages(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/protocol.proto
+)
+target_sources(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/lookup.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/main.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/publish.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/replica.cpp
+)
+target_allocator(example_02_discovery
+ library-cpp-lfalloc
+)
+target_proto_addincls(example_02_discovery
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(example_02_discovery
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
+vcs_info(example_02_discovery)
diff --git a/library/cpp/actors/dnscachelib/CMakeLists.txt b/ydb/library/actors/examples/02_discovery/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/dnscachelib/CMakeLists.txt
+++ b/ydb/library/actors/examples/02_discovery/CMakeLists.txt
diff --git a/ydb/library/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt b/ydb/library/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..e8a950fdf7
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,60 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_executable(example_02_discovery)
+target_link_libraries(example_02_discovery PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ library-actors-core
+ library-actors-dnsresolver
+ library-actors-interconnect
+ library-actors-http
+ contrib-libs-protobuf
+)
+target_proto_messages(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/protocol.proto
+)
+target_sources(example_02_discovery PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/endpoint.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/lookup.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/main.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/publish.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/examples/02_discovery/replica.cpp
+)
+target_allocator(example_02_discovery
+ library-cpp-lfalloc
+)
+target_proto_addincls(example_02_discovery
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_SOURCE_DIR}/contrib/libs/opentelemetry-proto
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(example_02_discovery
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
+vcs_info(example_02_discovery)
diff --git a/ydb/library/actors/examples/02_discovery/endpoint.cpp b/ydb/library/actors/examples/02_discovery/endpoint.cpp
new file mode 100644
index 0000000000..000ffb5415
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/endpoint.cpp
@@ -0,0 +1,116 @@
+#include "services.h"
+
+#include <ydb/library/actors/core/hfunc.h>
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+#include <ydb/library/actors/protos/services_common.pb.h>
+
+#include <ydb/library/actors/http/http.h>
+#include <ydb/library/actors/http/http_proxy.h>
+
+#include <util/system/hostname.h>
+#include <util/string/builder.h>
+
+class TExampleHttpRequest : public TActor<TExampleHttpRequest> {
+ TIntrusivePtr<TExampleStorageConfig> Config;
+ const TString PublishKey;
+
+ TActorId HttpProxy;
+ NHttp::THttpIncomingRequestPtr Request;
+
+ void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr &ev) {
+ Request = std::move(ev->Get()->Request);
+ HttpProxy = ev->Sender;
+
+ Register(CreateLookupActor(Config.Get(), PublishKey, SelfId()));
+ }
+
+ void Handle(TEvExample::TEvInfo::TPtr &ev) {
+ auto *msg = ev->Get();
+
+ TStringBuilder body;
+ for (const auto &x : msg->Payloads)
+ body << x << Endl;
+
+ auto response = Request->CreateResponseOK(body, "application/text; charset=utf-8");
+ Send(HttpProxy, new NHttp::TEvHttpProxy::TEvHttpOutgoingResponse(response));
+
+ PassAway();
+ }
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExampleHttpRequest(TExampleStorageConfig *config, const TString &publishKey)
+ : TActor(&TThis::StateWork)
+ , Config(config)
+ , PublishKey(publishKey)
+ {}
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
+ hFunc(TEvExample::TEvInfo, Handle);
+ }
+ }
+};
+
+class TExampleHttpEndpoint : public TActorBootstrapped<TExampleHttpEndpoint> {
+ TIntrusivePtr<TExampleStorageConfig> Config;
+ const TString PublishKey;
+ const ui16 HttpPort;
+
+ TActorId PublishActor;
+ TActorId HttpProxy;
+
+ std::shared_ptr<NMonitoring::TMetricRegistry> SensorsRegistry = std::make_shared<NMonitoring::TMetricRegistry>();
+
+ void PassAway() override {
+ Send(PublishActor, new TEvents::TEvPoison());
+ Send(HttpProxy, new TEvents::TEvPoison());
+
+ return TActor::PassAway();
+ }
+
+ void Handle(NHttp::TEvHttpProxy::TEvHttpIncomingRequest::TPtr &ev) {
+ const TActorId reqActor = Register(new TExampleHttpRequest(Config.Get(), PublishKey));
+ TlsActivationContext->Send(ev->Forward(reqActor));
+ }
+
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExampleHttpEndpoint(TExampleStorageConfig *config, const TString &publishKey, ui16 port)
+ : Config(config)
+ , PublishKey(publishKey)
+ , HttpPort(port)
+ {
+ }
+
+ void Bootstrap() {
+ const TString publishPayload = ToString(HttpPort);
+ PublishActor = Register(CreatePublishActor(Config.Get(), PublishKey, publishPayload));
+ HttpProxy = Register(NHttp::CreateHttpProxy(SensorsRegistry));
+
+ Send(HttpProxy, new NHttp::TEvHttpProxy::TEvAddListeningPort(HttpPort, FQDNHostName()));
+ Send(HttpProxy, new NHttp::TEvHttpProxy::TEvRegisterHandler("/list", SelfId()));
+
+ Become(&TThis::StateWork);
+ }
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(NHttp::TEvHttpProxy::TEvHttpIncomingRequest, Handle);
+ default:
+ break;
+ }
+ }
+};
+
+IActor* CreateEndpointActor(TExampleStorageConfig *config, const TString &publishKey, ui16 port) {
+ return new TExampleHttpEndpoint(config, publishKey, port);
+}
diff --git a/ydb/library/actors/examples/02_discovery/lookup.cpp b/ydb/library/actors/examples/02_discovery/lookup.cpp
new file mode 100644
index 0000000000..22ee998478
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/lookup.cpp
@@ -0,0 +1,132 @@
+#include "services.h"
+
+#include <ydb/library/actors/core/actorsystem.h>
+#include <ydb/library/actors/core/hfunc.h>
+#include <ydb/library/actors/core/interconnect.h>
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+#include <util/generic/set.h>
+#include <util/generic/vector.h>
+
+class TExampleLookupRequestActor : public TActor<TExampleLookupRequestActor> {
+ const TActorId Owner;
+ const TActorId Replica;
+ const TString Key;
+
+ void Registered(TActorSystem* sys, const TActorId&) override {
+ const auto flags = IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession;
+ sys->Send(new IEventHandle(Replica, SelfId(), new TEvExample::TEvReplicaLookup(Key), flags));
+ }
+
+ void PassAway() override {
+ const ui32 replicaNode = Replica.NodeId();
+ if (replicaNode != SelfId().NodeId()) {
+ const TActorId &interconnectProxy = TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(Replica.NodeId());
+ Send(interconnectProxy, new TEvents::TEvUnsubscribe());
+ }
+ return IActor::PassAway();
+ }
+
+ void Handle(TEvExample::TEvReplicaInfo::TPtr &ev) {
+ Send(Owner, ev->Release().Release());
+ return PassAway();
+ }
+
+ void HandleUndelivered() {
+ Send(Owner, new TEvExample::TEvReplicaInfo(Key));
+ return PassAway();
+ }
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExampleLookupRequestActor(TActorId owner, TActorId replica, const TString &key)
+ : TActor(&TThis::StateWork)
+ , Owner(owner)
+ , Replica(replica)
+ , Key(key)
+ {}
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvExample::TEvReplicaInfo, Handle);
+ sFunc(TEvents::TEvUndelivered, HandleUndelivered);
+ sFunc(TEvInterconnect::TEvNodeDisconnected, HandleUndelivered);
+ default:
+ break;
+ }
+ }
+};
+
+class TExampleLookupActor : public TActorBootstrapped<TExampleLookupActor> {
+ TIntrusiveConstPtr<TExampleStorageConfig> Config;
+ const TString Key;
+ const TActorId ReplyTo;
+ TVector<TActorId> RequestActors;
+
+ ui32 TotalReplicas = 0;
+ ui32 RepliedSuccess = 0;
+ ui32 RepliedError = 0;
+
+ TSet<TString> Payloads;
+
+ void Handle(TEvExample::TEvReplicaInfo::TPtr &ev) {
+ NActorsExample::TEvReplicaInfo &record = ev->Get()->Record;
+ if (record.PayloadSize()) {
+ ++RepliedSuccess;
+ for (const TString &payload : record.GetPayload()) {
+ Payloads.insert(payload);
+ }
+ }
+ else {
+ ++RepliedError;
+ }
+
+ const ui32 majority = (TotalReplicas / 2 + 1);
+ if (RepliedSuccess == majority || (RepliedSuccess + RepliedError == TotalReplicas))
+ return ReplyAndDie();
+ }
+
+ void ReplyAndDie() {
+ TVector<TString> replyPayloads(Payloads.begin(), Payloads.end());
+ Send(ReplyTo, new TEvExample::TEvInfo(Key, std::move(replyPayloads)));
+ return PassAway();
+ }
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExampleLookupActor(TExampleStorageConfig *config, const TString &key, TActorId replyTo)
+ : Config(config)
+ , Key(key)
+ , ReplyTo(replyTo)
+ {}
+
+ void Bootstrap() {
+ Y_ABORT_UNLESS(Config->Replicas.size() > 0);
+
+ TotalReplicas = Config->Replicas.size();
+ RequestActors.reserve(TotalReplicas);
+ for (const auto &replica : Config->Replicas) {
+ const TActorId requestActor = Register(new TExampleLookupRequestActor(SelfId(), replica, Key));
+ RequestActors.emplace_back(requestActor);
+ }
+
+ Become(&TThis::StateWork);
+ }
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvExample::TEvReplicaInfo, Handle);
+ default:
+ break;
+ }
+ }
+};
+
+IActor* CreateLookupActor(TExampleStorageConfig *config, const TString &key, TActorId replyTo) {
+ return new TExampleLookupActor(config, key, replyTo);
+}
diff --git a/ydb/library/actors/examples/02_discovery/main.cpp b/ydb/library/actors/examples/02_discovery/main.cpp
new file mode 100644
index 0000000000..3e092f63f6
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/main.cpp
@@ -0,0 +1,136 @@
+#include "services.h"
+
+#include <ydb/library/actors/core/actorsystem.h>
+#include <ydb/library/actors/core/executor_pool_basic.h>
+#include <ydb/library/actors/core/scheduler_basic.h>
+#include <ydb/library/actors/core/log.h>
+#include <ydb/library/actors/dnsresolver/dnsresolver.h>
+#include <ydb/library/actors/interconnect/interconnect.h>
+#include <ydb/library/actors/interconnect/interconnect_common.h>
+#include <ydb/library/actors/interconnect/interconnect_tcp_proxy.h>
+#include <ydb/library/actors/interconnect/interconnect_tcp_server.h>
+#include <ydb/library/actors/interconnect/poller_actor.h>
+#include <ydb/library/actors/interconnect/poller_tcp.h>
+#include <ydb/library/actors/util/should_continue.h>
+
+#include <util/system/sigset.h>
+#include <util/generic/xrange.h>
+
+using namespace NActors;
+using namespace NActors::NDnsResolver;
+
+static const ui32 CfgTotalReplicaNodes = 5;
+static const ui16 CfgBasePort = 13300;
+static const ui16 CfgHttpPort = 8881;
+static const TString PublishKey = "endpoint";
+
+static TProgramShouldContinue ShouldContinue;
+
+void OnTerminate(int) {
+ ShouldContinue.ShouldStop();
+}
+
+THolder<TActorSystemSetup> BuildActorSystemSetup(ui32 nodeId, ui32 threads, NMonitoring::TDynamicCounters &counters) {
+ Y_ABORT_UNLESS(threads > 0 && threads < 100);
+
+ auto setup = MakeHolder<TActorSystemSetup>();
+
+ setup->NodeId = nodeId;
+
+ setup->ExecutorsCount = 1;
+ setup->Executors.Reset(new TAutoPtr<IExecutorPool>[1]);
+ setup->Executors[0] = new TBasicExecutorPool(0, threads, 50);
+ setup->Scheduler = new TBasicSchedulerThread(TSchedulerConfig(512, 0));
+
+ setup->LocalServices.emplace_back(MakePollerActorId(), TActorSetupCmd(CreatePollerActor(), TMailboxType::ReadAsFilled, 0));
+
+ TIntrusivePtr<TTableNameserverSetup> nameserverTable = new TTableNameserverSetup();
+ for (ui32 xnode : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
+ nameserverTable->StaticNodeTable[xnode] = std::make_pair("127.0.0.1", CfgBasePort + xnode);
+ }
+
+ setup->LocalServices.emplace_back(
+ MakeDnsResolverActorId(),
+ TActorSetupCmd(CreateOnDemandDnsResolver(), TMailboxType::ReadAsFilled, 0)
+ );
+
+ setup->LocalServices.emplace_back(
+ GetNameserviceActorId(),
+ TActorSetupCmd(CreateNameserverTable(nameserverTable), TMailboxType::ReadAsFilled, 0)
+ );
+
+ TIntrusivePtr<TInterconnectProxyCommon> icCommon = new TInterconnectProxyCommon();
+ icCommon->NameserviceId = GetNameserviceActorId();
+ icCommon->MonCounters = counters.GetSubgroup("counters", "interconnect");
+ icCommon->TechnicalSelfHostName = "127.0.0.1";
+
+ setup->Interconnect.ProxyActors.resize(CfgTotalReplicaNodes + 1);
+ for (ui32 xnode : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
+ if (xnode != nodeId) {
+ IActor *actor = new TInterconnectProxyTCP(xnode, icCommon);
+ setup->Interconnect.ProxyActors[xnode] = TActorSetupCmd(actor, TMailboxType::ReadAsFilled, 0);
+ }
+ else {
+ IActor *listener = new TInterconnectListenerTCP("127.0.0.1", CfgBasePort + xnode, icCommon);
+ setup->LocalServices.emplace_back(
+ MakeInterconnectListenerActorId(false),
+ TActorSetupCmd(listener, TMailboxType::ReadAsFilled, 0)
+ );
+ }
+ }
+
+ return setup;
+}
+
+int main(int argc, char **argv) {
+ Y_UNUSED(argc);
+ Y_UNUSED(argv);
+
+#ifdef _unix_
+ signal(SIGPIPE, SIG_IGN);
+#endif
+ signal(SIGINT, &OnTerminate);
+ signal(SIGTERM, &OnTerminate);
+
+ TIntrusivePtr<TExampleStorageConfig> config = new TExampleStorageConfig();
+ for (ui32 nodeid : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
+ config->Replicas.push_back(MakeReplicaId(nodeid));
+ }
+
+ TVector<THolder<TActorSystem>> actorSystemHolder;
+ TVector<TIntrusivePtr<NMonitoring::TDynamicCounters>> countersHolder;
+ for (ui32 nodeid : xrange<ui32>(1, CfgTotalReplicaNodes + 1)) {
+ countersHolder.emplace_back(new NMonitoring::TDynamicCounters());
+ THolder<TActorSystemSetup> actorSystemSetup = BuildActorSystemSetup(nodeid, 2, *countersHolder.back());
+ actorSystemSetup->LocalServices.emplace_back(
+ TActorId(),
+ TActorSetupCmd(CreateEndpointActor(config.Get(), PublishKey, CfgHttpPort + nodeid), TMailboxType::HTSwap, 0)
+ );
+
+ actorSystemSetup->LocalServices.emplace_back(
+ MakeReplicaId(nodeid),
+ TActorSetupCmd(CreateReplica(), TMailboxType::ReadAsFilled, 0)
+ );
+
+ actorSystemHolder.emplace_back(new TActorSystem(actorSystemSetup));
+ }
+
+ for (auto &xh : actorSystemHolder)
+ xh->Start();
+
+ while (ShouldContinue.PollState() == TProgramShouldContinue::Continue) {
+ Sleep(TDuration::MilliSeconds(200));
+ }
+
+ // stop actorsystem to not generate new reqeusts for external services
+ // no events would be processed anymore
+ for (auto &xh : actorSystemHolder)
+ xh->Stop();
+
+ // and then cleanup actorsystem
+ // from this moment working with actorsystem prohibited
+ for (auto &xh : actorSystemHolder)
+ xh->Cleanup();
+
+ return ShouldContinue.GetReturnCode();
+}
diff --git a/library/cpp/actors/examples/02_discovery/protocol.proto b/ydb/library/actors/examples/02_discovery/protocol.proto
index 41cc2cc9c8..41cc2cc9c8 100644
--- a/library/cpp/actors/examples/02_discovery/protocol.proto
+++ b/ydb/library/actors/examples/02_discovery/protocol.proto
diff --git a/ydb/library/actors/examples/02_discovery/publish.cpp b/ydb/library/actors/examples/02_discovery/publish.cpp
new file mode 100644
index 0000000000..559b083d18
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/publish.cpp
@@ -0,0 +1,110 @@
+#include "services.h"
+
+#include <ydb/library/actors/core/actorsystem.h>
+#include <ydb/library/actors/core/hfunc.h>
+#include <ydb/library/actors/core/interconnect.h>
+#include <ydb/library/actors/core/actor_bootstrapped.h>
+#include <util/generic/set.h>
+#include <util/generic/vector.h>
+
+class TExamplePublishReplicaActor : public TActorBootstrapped<TExamplePublishReplicaActor> {
+ const TActorId Owner;
+ const TActorId Replica;
+ const TString Key;
+ const TString Payload;
+
+ void PassAway() override {
+ const ui32 replicaNode = Replica.NodeId();
+ if (replicaNode != SelfId().NodeId()) {
+ const TActorId &interconnectProxy = TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(Replica.NodeId());
+ Send(interconnectProxy, new TEvents::TEvUnsubscribe());
+ }
+ return IActor::PassAway();
+ }
+
+ void SomeSleep() {
+ Become(&TThis::StateSleep, TDuration::MilliSeconds(250), new TEvents::TEvWakeup());
+ }
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExamplePublishReplicaActor(TActorId owner, TActorId replica, const TString &key, const TString &payload)
+ : Owner(owner)
+ , Replica(replica)
+ , Key(key)
+ , Payload(payload)
+ {}
+
+ void Bootstrap() {
+ const ui32 flags = IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession;
+ Send(Replica, new TEvExample::TEvReplicaPublish(Key, Payload), flags);
+ Become(&TThis::StatePublish);
+ }
+
+ STFUNC(StatePublish) {
+ switch (ev->GetTypeRewrite()) {
+ sFunc(TEvents::TEvPoison, PassAway);
+ sFunc(TEvents::TEvUndelivered, SomeSleep);
+ sFunc(TEvInterconnect::TEvNodeDisconnected, SomeSleep);
+ default:
+ break;
+ }
+ }
+
+ STFUNC(StateSleep) {
+ switch (ev->GetTypeRewrite()) {
+ sFunc(TEvents::TEvPoison, PassAway);
+ sFunc(TEvents::TEvWakeup, Bootstrap);
+ default:
+ break;
+ }
+ }
+};
+
+class TExamplePublishActor : public TActorBootstrapped<TExamplePublishActor> {
+ TIntrusiveConstPtr<TExampleStorageConfig> Config;
+ const TString Key;
+ const TString Payload;
+ TVector<TActorId> PublishActors;
+
+ void PassAway() override {
+ for (const auto &x : PublishActors)
+ Send(x, new TEvents::TEvPoison());
+ return IActor::PassAway();
+ }
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExamplePublishActor(TExampleStorageConfig *config, const TString &key, const TString &what)
+ : Config(config)
+ , Key(key)
+ , Payload(what)
+ {}
+
+ void Bootstrap() {
+ for (auto &replica : Config->Replicas) {
+ const TActorId x = Register(new TExamplePublishReplicaActor(SelfId(), replica, Key, Payload));
+ PublishActors.emplace_back(x);
+ }
+
+ Become(&TThis::StateWork);
+ }
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ sFunc(TEvents::TEvPoison, PassAway);
+ default:
+ break;
+ }
+ }
+};
+
+IActor* CreatePublishActor(TExampleStorageConfig *config, const TString &key, const TString &what) {
+ return new TExamplePublishActor(config, key, what);
+}
diff --git a/ydb/library/actors/examples/02_discovery/replica.cpp b/ydb/library/actors/examples/02_discovery/replica.cpp
new file mode 100644
index 0000000000..2c251ef691
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/replica.cpp
@@ -0,0 +1,181 @@
+#include "services.h"
+#include <ydb/library/actors/core/actorsystem.h>
+#include <ydb/library/actors/core/hfunc.h>
+#include <ydb/library/actors/core/interconnect.h>
+#include <util/generic/set.h>
+#include <util/generic/hash_set.h>
+#include <util/generic/vector.h>
+
+class TExampleReplicaActor : public TActor<TExampleReplicaActor> {
+ using TOwnerIndex = TMap<TActorId, ui32, TActorId::TOrderedCmp>;
+ using TKeyIndex = THashMap<TString, TSet<ui32>>;
+
+ struct TEntry {
+ TString Payload;
+ TActorId Owner;
+ TOwnerIndex::iterator OwnerIt;
+ TKeyIndex::iterator KeyIt;
+ };
+
+ TVector<TEntry> Entries;
+ TVector<ui32> AvailableEntries;
+
+ TOwnerIndex IndexOwner;
+ TKeyIndex IndexKey;
+
+ ui32 AllocateEntry() {
+ ui32 ret;
+ if (AvailableEntries) {
+ ret = AvailableEntries.back();
+ AvailableEntries.pop_back();
+ }
+ else {
+ ret = Entries.size();
+ Entries.emplace_back();
+ }
+
+ return ret;
+ }
+
+ bool IsLastEntryOnNode(TOwnerIndex::iterator ownerIt) {
+ const ui32 ownerNodeId = ownerIt->first.NodeId();
+ if (ownerIt != IndexOwner.begin()) {
+ auto x = ownerIt;
+ --x;
+ if (x->first.NodeId() == ownerNodeId)
+ return false;
+ }
+
+ ++ownerIt;
+ if (ownerIt != IndexOwner.end()) {
+ if (ownerIt->first.NodeId() == ownerNodeId)
+ return false;
+ }
+
+ return true;
+ }
+
+ void CleanupEntry(ui32 entryIndex) {
+ TEntry &entry = Entries[entryIndex];
+ entry.KeyIt->second.erase(entryIndex);
+ if (entry.KeyIt->second.empty())
+ IndexKey.erase(entry.KeyIt);
+
+ if (IsLastEntryOnNode(entry.OwnerIt))
+ Send(TlsActivationContext->ExecutorThread.ActorSystem->InterconnectProxy(entry.OwnerIt->first.NodeId()), new TEvents::TEvUnsubscribe());
+
+ IndexOwner.erase(entry.OwnerIt);
+
+ TString().swap(entry.Payload);
+ entry.Owner = TActorId();
+ entry.KeyIt = IndexKey.end();
+ entry.OwnerIt = IndexOwner.end();
+
+ AvailableEntries.emplace_back(entryIndex);
+ }
+
+ void Handle(TEvExample::TEvReplicaLookup::TPtr &ev) {
+ auto &record = ev->Get()->Record;
+ const auto &key = record.GetKey();
+
+ auto keyIt = IndexKey.find(key);
+ if (keyIt == IndexKey.end()) {
+ Send(ev->Sender, new TEvExample::TEvReplicaInfo(key), 0, ev->Cookie);
+ return;
+ }
+
+ auto reply = MakeHolder<TEvExample::TEvReplicaInfo>(key);
+ reply->Record.MutablePayload()->Reserve(keyIt->second.size());
+ for (ui32 entryIndex : keyIt->second) {
+ const TEntry &entry = Entries[entryIndex];
+ reply->Record.AddPayload(entry.Payload);
+ }
+
+ Send(ev->Sender, std::move(reply), 0, ev->Cookie);
+ }
+
+ void Handle(TEvExample::TEvReplicaPublish::TPtr &ev) {
+ auto &record = ev->Get()->Record;
+ const TString &key = record.GetKey();
+ const TString &payload = record.GetPayload();
+ const TActorId &owner = ev->Sender;
+
+ auto ownerIt = IndexOwner.find(owner);
+ if (ownerIt != IndexOwner.end()) {
+ const ui32 entryIndex = ownerIt->second;
+ TEntry &entry = Entries[entryIndex];
+ if (entry.KeyIt->first != key) {
+ // reply nothing, request suspicious
+ return;
+ }
+
+ entry.Payload = payload;
+ }
+ else {
+ const ui32 entryIndex = AllocateEntry();
+ TEntry &entry = Entries[entryIndex];
+
+ entry.Payload = payload;
+ entry.Owner = owner;
+
+ entry.OwnerIt = IndexOwner.emplace(owner, entryIndex).first;
+ entry.KeyIt = IndexKey.emplace(std::make_pair(key, TSet<ui32>())).first;
+ entry.KeyIt->second.emplace(entryIndex);
+
+ Send(owner, new TEvExample::TEvReplicaPublishAck(), IEventHandle::FlagTrackDelivery | IEventHandle::FlagSubscribeOnSession, ev->Cookie);
+ }
+ }
+
+ void Handle(TEvents::TEvUndelivered::TPtr &ev) {
+ auto ownerIt = IndexOwner.find(ev->Sender);
+ if (ownerIt == IndexOwner.end())
+ return;
+
+ CleanupEntry(ownerIt->second);
+ }
+
+ void Handle(TEvInterconnect::TEvNodeDisconnected::TPtr &ev) {
+ auto *msg = ev->Get();
+ const ui32 nodeId = msg->NodeId;
+ auto ownerIt = IndexOwner.lower_bound(TActorId(nodeId, 0, 0, 0));
+ while (ownerIt != IndexOwner.end() && ownerIt->first.NodeId() == nodeId) {
+ const ui32 idx = ownerIt->second;
+ ++ownerIt;
+ CleanupEntry(idx);
+ }
+ }
+
+public:
+ static constexpr IActor::EActivityType ActorActivityType() {
+ // define app-specific activity tag to track elapsed cpu | handled events | actor count in Solomon
+ return EActorActivity::ACTORLIB_COMMON;
+ }
+
+ TExampleReplicaActor()
+ : TActor(&TThis::StateWork)
+ {}
+
+ STFUNC(StateWork) {
+ switch (ev->GetTypeRewrite()) {
+ hFunc(TEvExample::TEvReplicaLookup, Handle);
+ hFunc(TEvExample::TEvReplicaPublish, Handle);
+ hFunc(TEvents::TEvUndelivered, Handle);
+ hFunc(TEvInterconnect::TEvNodeDisconnected, Handle);
+
+ IgnoreFunc(TEvInterconnect::TEvNodeConnected);
+ default:
+ // here is place to spam some log message on unknown events
+ break;
+ }
+ }
+};
+
+IActor* CreateReplica() {
+ return new TExampleReplicaActor();
+}
+
+TActorId MakeReplicaId(ui32 nodeid) {
+ char x[12] = { 'r', 'p', 'l' };
+ memcpy(x + 5, &nodeid, sizeof(ui32));
+ return TActorId(nodeid, TStringBuf(x, 12));
+}
diff --git a/ydb/library/actors/examples/02_discovery/services.h b/ydb/library/actors/examples/02_discovery/services.h
new file mode 100644
index 0000000000..d1ae6f240a
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/services.h
@@ -0,0 +1,85 @@
+#pragma once
+#include <ydb/library/actors/examples/02_discovery/protocol.pb.h>
+
+#include <ydb/library/actors/core/actor.h>
+#include <ydb/library/actors/core/events.h>
+#include <ydb/library/actors/core/event_pb.h>
+#include <ydb/library/actors/core/event_local.h>
+
+#include <util/generic/vector.h>
+
+using namespace NActors;
+
+struct TExampleStorageConfig : public TThrRefBase {
+ TVector<TActorId> Replicas;
+};
+
+struct TEvExample {
+ enum EEv {
+ EvReplicaLookup = EventSpaceBegin(TEvents::ES_USERSPACE + 1),
+ EvReplicaPublish,
+
+ EvReplicaInfo = EventSpaceBegin(TEvents::ES_USERSPACE + 2),
+ EvReplicaPublishAck,
+
+ EvInfo = EventSpaceBegin(TEvents::ES_USERSPACE + 3),
+ };
+
+ struct TEvReplicaLookup : public TEventPB<TEvReplicaLookup, NActorsExample::TEvReplicaLookup, EvReplicaLookup> {
+ TEvReplicaLookup()
+ {}
+
+ TEvReplicaLookup(const TString &key)
+ {
+ Record.SetKey(key);
+ }
+ };
+
+ struct TEvReplicaPublish : public TEventPB<TEvReplicaPublish, NActorsExample::TEvReplicaPublish, EvReplicaPublish> {
+ TEvReplicaPublish()
+ {}
+
+ TEvReplicaPublish(const TString &key, const TString &payload)
+ {
+ Record.SetKey(key);
+ Record.SetPayload(payload);
+ }
+ };
+
+ struct TEvReplicaInfo : public TEventPB<TEvReplicaInfo, NActorsExample::TEvReplicaInfo, EvReplicaInfo> {
+ TEvReplicaInfo()
+ {}
+
+ TEvReplicaInfo(const TString &key)
+ {
+ Record.SetKey(key);
+ }
+ };
+
+ struct TEvReplicaPublishAck : public TEventPB<TEvReplicaPublishAck, NActorsExample::TEvReplicaPublishAck, EvReplicaPublishAck> {
+ TEvReplicaPublishAck()
+ {}
+
+ TEvReplicaPublishAck(const TString &key)
+ {
+ Record.SetKey(key);
+ }
+ };
+
+ struct TEvInfo : public TEventLocal<TEvInfo, EvInfo> {
+ const TString Key;
+ const TVector<TString> Payloads;
+
+ TEvInfo(const TString &key, TVector<TString> &&payloads)
+ : Key(key)
+ , Payloads(payloads)
+ {}
+ };
+};
+
+IActor* CreateReplica();
+IActor* CreatePublishActor(TExampleStorageConfig *config, const TString &key, const TString &what);
+IActor* CreateLookupActor(TExampleStorageConfig *config, const TString &key, TActorId replyTo);
+IActor* CreateEndpointActor(TExampleStorageConfig *config, const TString &publishKey, ui16 httpPort);
+
+TActorId MakeReplicaId(ui32 nodeid);
diff --git a/ydb/library/actors/examples/02_discovery/ya.make b/ydb/library/actors/examples/02_discovery/ya.make
new file mode 100644
index 0000000000..d7b972897d
--- /dev/null
+++ b/ydb/library/actors/examples/02_discovery/ya.make
@@ -0,0 +1,25 @@
+PROGRAM(example_02_discovery)
+
+ALLOCATOR(LF)
+
+SRCS(
+ endpoint.cpp
+ lookup.cpp
+ main.cpp
+ publish.cpp
+ replica.cpp
+ services.h
+)
+
+SRCS(
+ protocol.proto
+)
+
+PEERDIR(
+ ydb/library/actors/core
+ ydb/library/actors/dnsresolver
+ ydb/library/actors/interconnect
+ ydb/library/actors/http
+)
+
+END()
diff --git a/library/cpp/actors/examples/CMakeLists.txt b/ydb/library/actors/examples/CMakeLists.txt
index bcda1cfeef..bcda1cfeef 100644
--- a/library/cpp/actors/examples/CMakeLists.txt
+++ b/ydb/library/actors/examples/CMakeLists.txt
diff --git a/library/cpp/actors/examples/ya.make b/ydb/library/actors/examples/ya.make
index 0a98074b47..0a98074b47 100644
--- a/library/cpp/actors/examples/ya.make
+++ b/ydb/library/actors/examples/ya.make
diff --git a/ydb/library/actors/helpers/CMakeLists.darwin-arm64.txt b/ydb/library/actors/helpers/CMakeLists.darwin-arm64.txt
index a1cfad5ae6..4392ee98f6 100644
--- a/ydb/library/actors/helpers/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/helpers/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-helpers)
target_link_libraries(library-actors-helpers PUBLIC
diff --git a/ydb/library/actors/helpers/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/helpers/CMakeLists.darwin-x86_64.txt
index a1cfad5ae6..4392ee98f6 100644
--- a/ydb/library/actors/helpers/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/helpers/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-helpers)
target_link_libraries(library-actors-helpers PUBLIC
diff --git a/ydb/library/actors/helpers/CMakeLists.linux-aarch64.txt b/ydb/library/actors/helpers/CMakeLists.linux-aarch64.txt
index 6d69ee9378..b9a2080232 100644
--- a/ydb/library/actors/helpers/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/helpers/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-helpers)
target_link_libraries(library-actors-helpers PUBLIC
diff --git a/ydb/library/actors/helpers/CMakeLists.linux-x86_64.txt b/ydb/library/actors/helpers/CMakeLists.linux-x86_64.txt
index 6d69ee9378..b9a2080232 100644
--- a/ydb/library/actors/helpers/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/helpers/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-helpers)
target_link_libraries(library-actors-helpers PUBLIC
diff --git a/ydb/library/actors/helpers/CMakeLists.windows-x86_64.txt b/ydb/library/actors/helpers/CMakeLists.windows-x86_64.txt
index a1cfad5ae6..4392ee98f6 100644
--- a/ydb/library/actors/helpers/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/helpers/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-helpers)
target_link_libraries(library-actors-helpers PUBLIC
diff --git a/ydb/library/actors/helpers/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/helpers/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..eb490e93de
--- /dev/null
+++ b/ydb/library/actors/helpers/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,76 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-helpers-ut)
+target_include_directories(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers
+)
+target_link_libraries(ydb-library-actors-helpers-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-helpers
+ library-actors-interconnect
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-helpers-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers/selfping_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-helpers-ut
+ TEST_TARGET
+ ydb-library-actors-helpers-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-helpers-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-helpers-ut)
diff --git a/ydb/library/actors/helpers/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/helpers/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..613b811455
--- /dev/null
+++ b/ydb/library/actors/helpers/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,77 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-helpers-ut)
+target_include_directories(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers
+)
+target_link_libraries(ydb-library-actors-helpers-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-helpers
+ library-actors-interconnect
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-helpers-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers/selfping_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-helpers-ut
+ TEST_TARGET
+ ydb-library-actors-helpers-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-helpers-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-helpers-ut)
diff --git a/ydb/library/actors/helpers/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/helpers/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..7ad8585da6
--- /dev/null
+++ b/ydb/library/actors/helpers/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,80 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-helpers-ut)
+target_include_directories(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers
+)
+target_link_libraries(ydb-library-actors-helpers-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-helpers
+ library-actors-interconnect
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-helpers-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers/selfping_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-helpers-ut
+ TEST_TARGET
+ ydb-library-actors-helpers-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-helpers-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-helpers-ut)
diff --git a/ydb/library/actors/helpers/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/helpers/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..937dab9a47
--- /dev/null
+++ b/ydb/library/actors/helpers/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,82 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-helpers-ut)
+target_include_directories(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers
+)
+target_link_libraries(ydb-library-actors-helpers-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-helpers
+ library-actors-interconnect
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-helpers-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers/selfping_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-helpers-ut
+ TEST_TARGET
+ ydb-library-actors-helpers-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-helpers-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-helpers-ut)
diff --git a/library/cpp/actors/dnsresolver/CMakeLists.txt b/ydb/library/actors/helpers/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/dnsresolver/CMakeLists.txt
+++ b/ydb/library/actors/helpers/ut/CMakeLists.txt
diff --git a/ydb/library/actors/helpers/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/helpers/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..e50e892332
--- /dev/null
+++ b/ydb/library/actors/helpers/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-helpers-ut)
+target_include_directories(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers
+)
+target_link_libraries(ydb-library-actors-helpers-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-helpers
+ library-actors-interconnect
+ library-actors-testlib
+ library-actors-core
+)
+target_sources(ydb-library-actors-helpers-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/helpers/selfping_actor_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-helpers-ut
+ TEST_TARGET
+ ydb-library-actors-helpers-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-helpers-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-helpers-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-helpers-ut)
diff --git a/ydb/library/actors/http/CMakeLists.darwin-arm64.txt b/ydb/library/actors/http/CMakeLists.darwin-arm64.txt
index 003ae208e0..c1852d9c0e 100644
--- a/ydb/library/actors/http/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/http/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-http)
target_link_libraries(library-actors-http PUBLIC
diff --git a/ydb/library/actors/http/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/http/CMakeLists.darwin-x86_64.txt
index 003ae208e0..c1852d9c0e 100644
--- a/ydb/library/actors/http/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/http/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-http)
target_link_libraries(library-actors-http PUBLIC
diff --git a/ydb/library/actors/http/CMakeLists.linux-aarch64.txt b/ydb/library/actors/http/CMakeLists.linux-aarch64.txt
index 4503650b08..eb987fc90c 100644
--- a/ydb/library/actors/http/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/http/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-http)
target_link_libraries(library-actors-http PUBLIC
diff --git a/ydb/library/actors/http/CMakeLists.linux-x86_64.txt b/ydb/library/actors/http/CMakeLists.linux-x86_64.txt
index 4503650b08..eb987fc90c 100644
--- a/ydb/library/actors/http/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/http/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-http)
target_link_libraries(library-actors-http PUBLIC
diff --git a/ydb/library/actors/http/CMakeLists.windows-x86_64.txt b/ydb/library/actors/http/CMakeLists.windows-x86_64.txt
index 003ae208e0..c1852d9c0e 100644
--- a/ydb/library/actors/http/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/http/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-http)
target_link_libraries(library-actors-http PUBLIC
diff --git a/ydb/library/actors/http/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/http/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..42ca8a551d
--- /dev/null
+++ b/ydb/library/actors/http/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,67 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-http-ut)
+target_include_directories(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http
+)
+target_link_libraries(ydb-library-actors-http-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-http
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-http-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http/http_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-http-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-http-ut
+ TEST_TARGET
+ ydb-library-actors-http-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-http-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-http-ut)
diff --git a/ydb/library/actors/http/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/http/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..a7563e08fd
--- /dev/null
+++ b/ydb/library/actors/http/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-http-ut)
+target_include_directories(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http
+)
+target_link_libraries(ydb-library-actors-http-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-http
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-http-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http/http_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-http-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-http-ut
+ TEST_TARGET
+ ydb-library-actors-http-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-http-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-http-ut)
diff --git a/ydb/library/actors/http/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/http/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..b8acf5c261
--- /dev/null
+++ b/ydb/library/actors/http/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-http-ut)
+target_include_directories(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http
+)
+target_link_libraries(ydb-library-actors-http-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-http
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-http-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http/http_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-http-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-http-ut
+ TEST_TARGET
+ ydb-library-actors-http-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-http-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-http-ut)
diff --git a/ydb/library/actors/http/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/http/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..e33b1f6c20
--- /dev/null
+++ b/ydb/library/actors/http/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,73 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-http-ut)
+target_include_directories(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http
+)
+target_link_libraries(ydb-library-actors-http-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-http
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-http-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http/http_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-http-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-http-ut
+ TEST_TARGET
+ ydb-library-actors-http-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-http-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-http-ut)
diff --git a/library/cpp/actors/dnsresolver/ut/CMakeLists.txt b/ydb/library/actors/http/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/dnsresolver/ut/CMakeLists.txt
+++ b/ydb/library/actors/http/ut/CMakeLists.txt
diff --git a/ydb/library/actors/http/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/http/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..f1adc52bda
--- /dev/null
+++ b/ydb/library/actors/http/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,58 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-http-ut)
+target_include_directories(ydb-library-actors-http-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/http
+)
+target_link_libraries(ydb-library-actors-http-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-http
+ library-actors-testlib
+)
+set_property(
+ TARGET
+ ydb-library-actors-http-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-http-ut
+ TEST_TARGET
+ ydb-library-actors-http-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-http-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-http-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-http-ut)
diff --git a/ydb/library/actors/interconnect/CMakeLists.darwin-arm64.txt b/ydb/library/actors/interconnect/CMakeLists.darwin-arm64.txt
index 158bcccff5..aa0d0be89c 100644
--- a/ydb/library/actors/interconnect/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/interconnect/CMakeLists.darwin-arm64.txt
@@ -7,6 +7,9 @@
add_subdirectory(mock)
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
+add_subdirectory(ut_huge_cluster)
add_library(library-actors-interconnect)
target_link_libraries(library-actors-interconnect PUBLIC
diff --git a/ydb/library/actors/interconnect/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/interconnect/CMakeLists.darwin-x86_64.txt
index 158bcccff5..aa0d0be89c 100644
--- a/ydb/library/actors/interconnect/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/interconnect/CMakeLists.darwin-x86_64.txt
@@ -7,6 +7,9 @@
add_subdirectory(mock)
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
+add_subdirectory(ut_huge_cluster)
add_library(library-actors-interconnect)
target_link_libraries(library-actors-interconnect PUBLIC
diff --git a/ydb/library/actors/interconnect/CMakeLists.linux-aarch64.txt b/ydb/library/actors/interconnect/CMakeLists.linux-aarch64.txt
index 9e53effeff..0d7462732c 100644
--- a/ydb/library/actors/interconnect/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/interconnect/CMakeLists.linux-aarch64.txt
@@ -7,6 +7,9 @@
add_subdirectory(mock)
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
+add_subdirectory(ut_huge_cluster)
add_library(library-actors-interconnect)
target_link_libraries(library-actors-interconnect PUBLIC
diff --git a/ydb/library/actors/interconnect/CMakeLists.linux-x86_64.txt b/ydb/library/actors/interconnect/CMakeLists.linux-x86_64.txt
index 9e53effeff..0d7462732c 100644
--- a/ydb/library/actors/interconnect/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/interconnect/CMakeLists.linux-x86_64.txt
@@ -7,6 +7,9 @@
add_subdirectory(mock)
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
+add_subdirectory(ut_huge_cluster)
add_library(library-actors-interconnect)
target_link_libraries(library-actors-interconnect PUBLIC
diff --git a/ydb/library/actors/interconnect/CMakeLists.windows-x86_64.txt b/ydb/library/actors/interconnect/CMakeLists.windows-x86_64.txt
index 158bcccff5..aa0d0be89c 100644
--- a/ydb/library/actors/interconnect/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/interconnect/CMakeLists.windows-x86_64.txt
@@ -7,6 +7,9 @@
add_subdirectory(mock)
+add_subdirectory(ut)
+add_subdirectory(ut_fat)
+add_subdirectory(ut_huge_cluster)
add_library(library-actors-interconnect)
target_link_libraries(library-actors-interconnect PUBLIC
diff --git a/ydb/library/actors/interconnect/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/interconnect/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..b8d9f5448f
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,85 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(lib)
+add_subdirectory(protos)
+
+add_executable(ydb-library-actors-interconnect-ut)
+target_link_libraries(ydb-library-actors-interconnect-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ library-actors-testlib
+ cpp-digest-md5
+ cpp-testing-unittest
+)
+target_link_options(ydb-library-actors-interconnect-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-interconnect-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/channel_scheduler_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/event_holder_pool_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/interconnect_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/large.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/outgoing_stream_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/poller_actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/sticking_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut)
diff --git a/ydb/library/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..86b4d1f2cc
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,86 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(lib)
+add_subdirectory(protos)
+
+add_executable(ydb-library-actors-interconnect-ut)
+target_link_libraries(ydb-library-actors-interconnect-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ library-actors-testlib
+ cpp-digest-md5
+ cpp-testing-unittest
+)
+target_link_options(ydb-library-actors-interconnect-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-interconnect-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/channel_scheduler_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/event_holder_pool_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/interconnect_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/large.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/outgoing_stream_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/poller_actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/sticking_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut)
diff --git a/ydb/library/actors/interconnect/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/interconnect/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..279a83d646
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,89 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(lib)
+add_subdirectory(protos)
+
+add_executable(ydb-library-actors-interconnect-ut)
+target_link_libraries(ydb-library-actors-interconnect-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ library-actors-testlib
+ cpp-digest-md5
+ cpp-testing-unittest
+)
+target_link_options(ydb-library-actors-interconnect-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-interconnect-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/channel_scheduler_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/event_holder_pool_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/interconnect_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/large.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/outgoing_stream_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/poller_actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/sticking_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-interconnect-ut)
diff --git a/ydb/library/actors/interconnect/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/interconnect/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..183c47e61b
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,91 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(lib)
+add_subdirectory(protos)
+
+add_executable(ydb-library-actors-interconnect-ut)
+target_link_libraries(ydb-library-actors-interconnect-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ library-actors-testlib
+ cpp-digest-md5
+ cpp-testing-unittest
+)
+target_link_options(ydb-library-actors-interconnect-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-interconnect-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/channel_scheduler_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/event_holder_pool_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/interconnect_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/large.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/outgoing_stream_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/poller_actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/sticking_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-interconnect-ut)
diff --git a/library/cpp/actors/examples/01_ping_pong/CMakeLists.txt b/ydb/library/actors/interconnect/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/examples/01_ping_pong/CMakeLists.txt
+++ b/ydb/library/actors/interconnect/ut/CMakeLists.txt
diff --git a/ydb/library/actors/interconnect/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/interconnect/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..a9e78893c4
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,79 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+add_subdirectory(lib)
+add_subdirectory(protos)
+
+add_executable(ydb-library-actors-interconnect-ut)
+target_link_libraries(ydb-library-actors-interconnect-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ library-actors-testlib
+ cpp-digest-md5
+ cpp-testing-unittest
+)
+target_sources(ydb-library-actors-interconnect-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/channel_scheduler_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/event_holder_pool_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/interconnect_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/large.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/outgoing_stream_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/poller_actor_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/dynamic_proxy_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/sticking_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut)
diff --git a/library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt b/ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt
index a6a86ac09b..a6a86ac09b 100644
--- a/library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-arm64.txt
diff --git a/library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt
index a6a86ac09b..a6a86ac09b 100644
--- a/library/cpp/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/interconnect/ut/lib/CMakeLists.darwin-x86_64.txt
diff --git a/library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt b/ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt
index b20c3b0de9..b20c3b0de9 100644
--- a/library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-aarch64.txt
diff --git a/library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt b/ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt
index b20c3b0de9..b20c3b0de9 100644
--- a/library/cpp/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/interconnect/ut/lib/CMakeLists.linux-x86_64.txt
diff --git a/library/cpp/actors/examples/02_discovery/CMakeLists.txt b/ydb/library/actors/interconnect/ut/lib/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/examples/02_discovery/CMakeLists.txt
+++ b/ydb/library/actors/interconnect/ut/lib/CMakeLists.txt
diff --git a/library/cpp/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt b/ydb/library/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt
index a6a86ac09b..a6a86ac09b 100644
--- a/library/cpp/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/interconnect/ut/lib/CMakeLists.windows-x86_64.txt
diff --git a/ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt b/ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..bf5d35cd2f
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,43 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_library(interconnect-ut-protos)
+target_link_libraries(interconnect-ut-protos PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ contrib-libs-protobuf
+)
+target_proto_messages(interconnect-ut-protos PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/protos/interconnect_test.proto
+)
+target_proto_addincls(interconnect-ut-protos
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(interconnect-ut-protos
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
diff --git a/ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..bf5d35cd2f
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/protos/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,43 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_library(interconnect-ut-protos)
+target_link_libraries(interconnect-ut-protos PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ contrib-libs-protobuf
+)
+target_proto_messages(interconnect-ut-protos PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/protos/interconnect_test.proto
+)
+target_proto_addincls(interconnect-ut-protos
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(interconnect-ut-protos
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
diff --git a/ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt b/ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..b78993ebda
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,44 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_library(interconnect-ut-protos)
+target_link_libraries(interconnect-ut-protos PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ contrib-libs-protobuf
+)
+target_proto_messages(interconnect-ut-protos PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/protos/interconnect_test.proto
+)
+target_proto_addincls(interconnect-ut-protos
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(interconnect-ut-protos
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
diff --git a/ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt b/ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..b78993ebda
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/protos/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,44 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_library(interconnect-ut-protos)
+target_link_libraries(interconnect-ut-protos PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ contrib-libs-protobuf
+)
+target_proto_messages(interconnect-ut-protos PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/protos/interconnect_test.proto
+)
+target_proto_addincls(interconnect-ut-protos
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(interconnect-ut-protos
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
diff --git a/library/cpp/actors/helpers/CMakeLists.txt b/ydb/library/actors/interconnect/ut/protos/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/helpers/CMakeLists.txt
+++ b/ydb/library/actors/interconnect/ut/protos/CMakeLists.txt
diff --git a/ydb/library/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt b/ydb/library/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..bf5d35cd2f
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut/protos/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,43 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+get_built_tool_path(
+ TOOL_protoc_bin
+ TOOL_protoc_dependency
+ contrib/tools/protoc/bin
+ protoc
+)
+get_built_tool_path(
+ TOOL_cpp_styleguide_bin
+ TOOL_cpp_styleguide_dependency
+ contrib/tools/protoc/plugins/cpp_styleguide
+ cpp_styleguide
+)
+
+add_library(interconnect-ut-protos)
+target_link_libraries(interconnect-ut-protos PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ contrib-libs-protobuf
+)
+target_proto_messages(interconnect-ut-protos PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut/protos/interconnect_test.proto
+)
+target_proto_addincls(interconnect-ut-protos
+ ./
+ ${CMAKE_SOURCE_DIR}/
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+ ${CMAKE_BINARY_DIR}
+ ${CMAKE_SOURCE_DIR}/contrib/libs/protobuf/src
+)
+target_proto_outs(interconnect-ut-protos
+ --cpp_out=${CMAKE_BINARY_DIR}/
+ --cpp_styleguide_out=${CMAKE_BINARY_DIR}/
+)
diff --git a/ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt b/ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..dc5e934b3d
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,69 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_fat)
+target_link_libraries(ydb-library-actors-interconnect-ut_fat PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ actors-interconnect-mock
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ cpp-deprecated-atomic
+)
+target_link_options(ydb-library-actors-interconnect-ut_fat PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-interconnect-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_fat/main.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_fat
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-interconnect-ut_fat
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut_fat)
diff --git a/ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..7dada11537
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_fat/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_fat)
+target_link_libraries(ydb-library-actors-interconnect-ut_fat PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ actors-interconnect-mock
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ cpp-deprecated-atomic
+)
+target_link_options(ydb-library-actors-interconnect-ut_fat PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-interconnect-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_fat/main.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_fat
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-interconnect-ut_fat
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut_fat)
diff --git a/ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt b/ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..7551d04caf
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,73 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_fat)
+target_link_libraries(ydb-library-actors-interconnect-ut_fat PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ actors-interconnect-mock
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ cpp-deprecated-atomic
+)
+target_link_options(ydb-library-actors-interconnect-ut_fat PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-interconnect-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_fat/main.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_fat
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-interconnect-ut_fat
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-interconnect-ut_fat)
diff --git a/ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt b/ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..22b569c1e7
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_fat/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,75 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_fat)
+target_link_libraries(ydb-library-actors-interconnect-ut_fat PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ actors-interconnect-mock
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ cpp-deprecated-atomic
+)
+target_link_options(ydb-library-actors-interconnect-ut_fat PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-interconnect-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_fat/main.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_fat
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-interconnect-ut_fat
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-interconnect-ut_fat)
diff --git a/library/cpp/actors/helpers/ut/CMakeLists.txt b/ydb/library/actors/interconnect/ut_fat/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/helpers/ut/CMakeLists.txt
+++ b/ydb/library/actors/interconnect/ut_fat/CMakeLists.txt
diff --git a/ydb/library/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt b/ydb/library/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..bcd9e01547
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_fat/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,63 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_fat)
+target_link_libraries(ydb-library-actors-interconnect-ut_fat PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ actors-interconnect-mock
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ cpp-deprecated-atomic
+)
+target_sources(ydb-library-actors-interconnect-ut_fat PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_fat/main.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_fat
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_fat
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ LABELS
+ LARGE
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_fat
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-interconnect-ut_fat
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut_fat)
diff --git a/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..728322815b
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,75 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_huge_cluster)
+target_link_libraries(ydb-library-actors-interconnect-ut_huge_cluster PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ PROCESSORS
+ 4
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut_huge_cluster
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut_huge_cluster)
diff --git a/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..82afb6345b
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,76 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_huge_cluster)
+target_link_libraries(ydb-library-actors-interconnect-ut_huge_cluster PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ PROCESSORS
+ 4
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut_huge_cluster
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut_huge_cluster)
diff --git a/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..0e692cf153
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,79 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_huge_cluster)
+target_link_libraries(ydb-library-actors-interconnect-ut_huge_cluster PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ PROCESSORS
+ 4
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut_huge_cluster
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-interconnect-ut_huge_cluster)
diff --git a/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..2718ff5569
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,81 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_huge_cluster)
+target_link_libraries(ydb-library-actors-interconnect-ut_huge_cluster PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ library-actors-testlib
+)
+target_link_options(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ PROCESSORS
+ 4
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut_huge_cluster
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-interconnect-ut_huge_cluster)
diff --git a/library/cpp/actors/http/CMakeLists.txt b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/http/CMakeLists.txt
+++ b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.txt
diff --git a/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..ac8a111e4b
--- /dev/null
+++ b/ydb/library/actors/interconnect/ut_huge_cluster/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,69 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-interconnect-ut_huge_cluster)
+target_link_libraries(ydb-library-actors-interconnect-ut_huge_cluster PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-core
+ library-actors-interconnect
+ interconnect-ut-lib
+ interconnect-ut-protos
+ cpp-testing-unittest
+ library-actors-testlib
+)
+target_sources(ydb-library-actors-interconnect-ut_huge_cluster PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/interconnect/ut_huge_cluster/huge_cluster.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_TARGET
+ ydb-library-actors-interconnect-ut_huge_cluster
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ LABELS
+ MEDIUM
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ PROCESSORS
+ 4
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-interconnect-ut_huge_cluster
+ PROPERTY
+ TIMEOUT
+ 600
+)
+target_allocator(ydb-library-actors-interconnect-ut_huge_cluster
+ system_allocator
+)
+vcs_info(ydb-library-actors-interconnect-ut_huge_cluster)
diff --git a/ydb/library/actors/prof/CMakeLists.darwin-arm64.txt b/ydb/library/actors/prof/CMakeLists.darwin-arm64.txt
index 496e629e0b..da58c952e7 100644
--- a/ydb/library/actors/prof/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/prof/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-prof)
target_link_libraries(library-actors-prof PUBLIC
diff --git a/ydb/library/actors/prof/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/prof/CMakeLists.darwin-x86_64.txt
index 496e629e0b..da58c952e7 100644
--- a/ydb/library/actors/prof/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/prof/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-prof)
target_link_libraries(library-actors-prof PUBLIC
diff --git a/ydb/library/actors/prof/CMakeLists.linux-aarch64.txt b/ydb/library/actors/prof/CMakeLists.linux-aarch64.txt
index db7556efc9..f750fe5943 100644
--- a/ydb/library/actors/prof/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/prof/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-prof)
target_link_libraries(library-actors-prof PUBLIC
diff --git a/ydb/library/actors/prof/CMakeLists.linux-x86_64.txt b/ydb/library/actors/prof/CMakeLists.linux-x86_64.txt
index db7556efc9..f750fe5943 100644
--- a/ydb/library/actors/prof/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/prof/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-prof)
target_link_libraries(library-actors-prof PUBLIC
diff --git a/ydb/library/actors/prof/CMakeLists.windows-x86_64.txt b/ydb/library/actors/prof/CMakeLists.windows-x86_64.txt
index 496e629e0b..da58c952e7 100644
--- a/ydb/library/actors/prof/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/prof/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-prof)
target_link_libraries(library-actors-prof PUBLIC
diff --git a/ydb/library/actors/prof/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/prof/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..bbe8c2ad2b
--- /dev/null
+++ b/ydb/library/actors/prof/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,66 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-prof-ut)
+target_include_directories(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof
+)
+target_link_libraries(ydb-library-actors-prof-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-prof
+)
+target_link_options(ydb-library-actors-prof-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof/ut/tag_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-prof-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-prof-ut
+ TEST_TARGET
+ ydb-library-actors-prof-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-prof-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-prof-ut)
diff --git a/ydb/library/actors/prof/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/prof/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..d404ef6bce
--- /dev/null
+++ b/ydb/library/actors/prof/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,67 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-prof-ut)
+target_include_directories(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof
+)
+target_link_libraries(ydb-library-actors-prof-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-prof
+)
+target_link_options(ydb-library-actors-prof-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof/ut/tag_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-prof-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-prof-ut
+ TEST_TARGET
+ ydb-library-actors-prof-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-prof-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-prof-ut)
diff --git a/ydb/library/actors/prof/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/prof/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..d8a0e6e13d
--- /dev/null
+++ b/ydb/library/actors/prof/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,70 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-prof-ut)
+target_include_directories(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof
+)
+target_link_libraries(ydb-library-actors-prof-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-prof
+)
+target_link_options(ydb-library-actors-prof-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof/ut/tag_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-prof-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-prof-ut
+ TEST_TARGET
+ ydb-library-actors-prof-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-prof-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-prof-ut)
diff --git a/ydb/library/actors/prof/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/prof/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..cceb96b672
--- /dev/null
+++ b/ydb/library/actors/prof/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,72 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-prof-ut)
+target_include_directories(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof
+)
+target_link_libraries(ydb-library-actors-prof-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-prof
+)
+target_link_options(ydb-library-actors-prof-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof/ut/tag_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-prof-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-prof-ut
+ TEST_TARGET
+ ydb-library-actors-prof-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-prof-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-prof-ut)
diff --git a/library/cpp/actors/http/ut/CMakeLists.txt b/ydb/library/actors/prof/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/http/ut/CMakeLists.txt
+++ b/ydb/library/actors/prof/ut/CMakeLists.txt
diff --git a/ydb/library/actors/prof/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/prof/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..bfb38a55a6
--- /dev/null
+++ b/ydb/library/actors/prof/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,60 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-prof-ut)
+target_include_directories(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof
+)
+target_link_libraries(ydb-library-actors-prof-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-prof
+)
+target_sources(ydb-library-actors-prof-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/prof/ut/tag_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-prof-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-prof-ut
+ TEST_TARGET
+ ydb-library-actors-prof-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-prof-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-prof-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-prof-ut)
diff --git a/ydb/library/actors/testlib/CMakeLists.darwin-arm64.txt b/ydb/library/actors/testlib/CMakeLists.darwin-arm64.txt
index e755f2870e..e65674383b 100644
--- a/ydb/library/actors/testlib/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/testlib/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-testlib)
target_link_libraries(library-actors-testlib PUBLIC
diff --git a/ydb/library/actors/testlib/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/testlib/CMakeLists.darwin-x86_64.txt
index e755f2870e..e65674383b 100644
--- a/ydb/library/actors/testlib/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/testlib/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-testlib)
target_link_libraries(library-actors-testlib PUBLIC
diff --git a/ydb/library/actors/testlib/CMakeLists.linux-aarch64.txt b/ydb/library/actors/testlib/CMakeLists.linux-aarch64.txt
index 7b41c533aa..143acbad0e 100644
--- a/ydb/library/actors/testlib/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/testlib/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-testlib)
target_link_libraries(library-actors-testlib PUBLIC
diff --git a/ydb/library/actors/testlib/CMakeLists.linux-x86_64.txt b/ydb/library/actors/testlib/CMakeLists.linux-x86_64.txt
index 7b41c533aa..143acbad0e 100644
--- a/ydb/library/actors/testlib/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/testlib/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-testlib)
target_link_libraries(library-actors-testlib PUBLIC
diff --git a/ydb/library/actors/testlib/CMakeLists.windows-x86_64.txt b/ydb/library/actors/testlib/CMakeLists.windows-x86_64.txt
index e755f2870e..e65674383b 100644
--- a/ydb/library/actors/testlib/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/testlib/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-testlib)
target_link_libraries(library-actors-testlib PUBLIC
diff --git a/ydb/library/actors/testlib/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/testlib/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..34c0be7c70
--- /dev/null
+++ b/ydb/library/actors/testlib/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,67 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-testlib-ut)
+target_include_directories(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib
+)
+target_link_libraries(ydb-library-actors-testlib-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-testlib-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib/decorator_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-testlib-ut
+ TEST_TARGET
+ ydb-library-actors-testlib-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-testlib-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-testlib-ut)
diff --git a/ydb/library/actors/testlib/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/testlib/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..d1e9a26dd5
--- /dev/null
+++ b/ydb/library/actors/testlib/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-testlib-ut)
+target_include_directories(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib
+)
+target_link_libraries(ydb-library-actors-testlib-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-testlib-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib/decorator_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-testlib-ut
+ TEST_TARGET
+ ydb-library-actors-testlib-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-testlib-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-testlib-ut)
diff --git a/ydb/library/actors/testlib/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/testlib/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..8e8d2153a9
--- /dev/null
+++ b/ydb/library/actors/testlib/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,71 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-testlib-ut)
+target_include_directories(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib
+)
+target_link_libraries(ydb-library-actors-testlib-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-testlib-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib/decorator_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-testlib-ut
+ TEST_TARGET
+ ydb-library-actors-testlib-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-testlib-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-testlib-ut)
diff --git a/ydb/library/actors/testlib/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/testlib/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..7360d63cef
--- /dev/null
+++ b/ydb/library/actors/testlib/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,73 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-testlib-ut)
+target_include_directories(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib
+)
+target_link_libraries(ydb-library-actors-testlib-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-testlib
+ library-actors-core
+)
+target_link_options(ydb-library-actors-testlib-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib/decorator_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-testlib-ut
+ TEST_TARGET
+ ydb-library-actors-testlib-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-testlib-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-testlib-ut)
diff --git a/library/cpp/actors/interconnect/CMakeLists.txt b/ydb/library/actors/testlib/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/interconnect/CMakeLists.txt
+++ b/ydb/library/actors/testlib/ut/CMakeLists.txt
diff --git a/ydb/library/actors/testlib/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/testlib/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..a156d09b75
--- /dev/null
+++ b/ydb/library/actors/testlib/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,61 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-testlib-ut)
+target_include_directories(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib
+)
+target_link_libraries(ydb-library-actors-testlib-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-testlib
+ library-actors-core
+)
+target_sources(ydb-library-actors-testlib-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/testlib/decorator_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 10
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-testlib-ut
+ TEST_TARGET
+ ydb-library-actors-testlib-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-testlib-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-testlib-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-testlib-ut)
diff --git a/ydb/library/actors/util/CMakeLists.darwin-arm64.txt b/ydb/library/actors/util/CMakeLists.darwin-arm64.txt
index b4047e9283..5e9051b086 100644
--- a/ydb/library/actors/util/CMakeLists.darwin-arm64.txt
+++ b/ydb/library/actors/util/CMakeLists.darwin-arm64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-util)
target_link_libraries(library-actors-util PUBLIC
diff --git a/ydb/library/actors/util/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/util/CMakeLists.darwin-x86_64.txt
index b4047e9283..5e9051b086 100644
--- a/ydb/library/actors/util/CMakeLists.darwin-x86_64.txt
+++ b/ydb/library/actors/util/CMakeLists.darwin-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-util)
target_link_libraries(library-actors-util PUBLIC
diff --git a/ydb/library/actors/util/CMakeLists.linux-aarch64.txt b/ydb/library/actors/util/CMakeLists.linux-aarch64.txt
index 5439fb4740..ff623584ba 100644
--- a/ydb/library/actors/util/CMakeLists.linux-aarch64.txt
+++ b/ydb/library/actors/util/CMakeLists.linux-aarch64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-util)
target_link_libraries(library-actors-util PUBLIC
diff --git a/ydb/library/actors/util/CMakeLists.linux-x86_64.txt b/ydb/library/actors/util/CMakeLists.linux-x86_64.txt
index 5439fb4740..ff623584ba 100644
--- a/ydb/library/actors/util/CMakeLists.linux-x86_64.txt
+++ b/ydb/library/actors/util/CMakeLists.linux-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-util)
target_link_libraries(library-actors-util PUBLIC
diff --git a/ydb/library/actors/util/CMakeLists.windows-x86_64.txt b/ydb/library/actors/util/CMakeLists.windows-x86_64.txt
index b4047e9283..5e9051b086 100644
--- a/ydb/library/actors/util/CMakeLists.windows-x86_64.txt
+++ b/ydb/library/actors/util/CMakeLists.windows-x86_64.txt
@@ -6,6 +6,7 @@
# original buildsystem will not be accepted.
+add_subdirectory(ut)
add_library(library-actors-util)
target_link_libraries(library-actors-util PUBLIC
diff --git a/ydb/library/actors/util/ut/CMakeLists.darwin-arm64.txt b/ydb/library/actors/util/ut/CMakeLists.darwin-arm64.txt
new file mode 100644
index 0000000000..77a22c8dc6
--- /dev/null
+++ b/ydb/library/actors/util/ut/CMakeLists.darwin-arm64.txt
@@ -0,0 +1,74 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-util-ut)
+target_include_directories(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util
+)
+target_link_libraries(ydb-library-actors-util-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-util
+)
+target_link_options(ydb-library-actors-util-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/cpu_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/memory_tracker_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/thread_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rope_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rc_buf_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_native_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/unordered_cache_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-util-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-util-ut
+ TEST_TARGET
+ ydb-library-actors-util-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-util-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-util-ut)
diff --git a/ydb/library/actors/util/ut/CMakeLists.darwin-x86_64.txt b/ydb/library/actors/util/ut/CMakeLists.darwin-x86_64.txt
new file mode 100644
index 0000000000..1e4d43ea8d
--- /dev/null
+++ b/ydb/library/actors/util/ut/CMakeLists.darwin-x86_64.txt
@@ -0,0 +1,75 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-util-ut)
+target_include_directories(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util
+)
+target_link_libraries(ydb-library-actors-util-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-util
+)
+target_link_options(ydb-library-actors-util-ut PRIVATE
+ -Wl,-platform_version,macos,11.0,11.0
+ -fPIC
+ -fPIC
+ -framework
+ CoreFoundation
+)
+target_sources(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/cpu_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/memory_tracker_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/thread_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rope_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rc_buf_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_native_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/unordered_cache_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-util-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-util-ut
+ TEST_TARGET
+ ydb-library-actors-util-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-util-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-util-ut)
diff --git a/ydb/library/actors/util/ut/CMakeLists.linux-aarch64.txt b/ydb/library/actors/util/ut/CMakeLists.linux-aarch64.txt
new file mode 100644
index 0000000000..6964e48984
--- /dev/null
+++ b/ydb/library/actors/util/ut/CMakeLists.linux-aarch64.txt
@@ -0,0 +1,78 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-util-ut)
+target_include_directories(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util
+)
+target_link_libraries(ydb-library-actors-util-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ cpp-testing-unittest_main
+ library-actors-util
+)
+target_link_options(ydb-library-actors-util-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/cpu_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/memory_tracker_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/thread_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rope_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rc_buf_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_native_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/unordered_cache_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-util-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-util-ut
+ TEST_TARGET
+ ydb-library-actors-util-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-util-ut
+ cpp-malloc-jemalloc
+)
+vcs_info(ydb-library-actors-util-ut)
diff --git a/ydb/library/actors/util/ut/CMakeLists.linux-x86_64.txt b/ydb/library/actors/util/ut/CMakeLists.linux-x86_64.txt
new file mode 100644
index 0000000000..a058eff709
--- /dev/null
+++ b/ydb/library/actors/util/ut/CMakeLists.linux-x86_64.txt
@@ -0,0 +1,80 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-util-ut)
+target_include_directories(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util
+)
+target_link_libraries(ydb-library-actors-util-ut PUBLIC
+ contrib-libs-linux-headers
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-util
+)
+target_link_options(ydb-library-actors-util-ut PRIVATE
+ -ldl
+ -lrt
+ -Wl,--no-as-needed
+ -fPIC
+ -fPIC
+ -lpthread
+ -lrt
+ -ldl
+)
+target_sources(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/cpu_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/memory_tracker_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/thread_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rope_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rc_buf_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_native_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/unordered_cache_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-util-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-util-ut
+ TEST_TARGET
+ ydb-library-actors-util-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-util-ut
+ cpp-malloc-tcmalloc
+ libs-tcmalloc-no_percpu_cache
+)
+vcs_info(ydb-library-actors-util-ut)
diff --git a/library/cpp/actors/interconnect/mock/CMakeLists.txt b/ydb/library/actors/util/ut/CMakeLists.txt
index 2dce3a77fe..2dce3a77fe 100644
--- a/library/cpp/actors/interconnect/mock/CMakeLists.txt
+++ b/ydb/library/actors/util/ut/CMakeLists.txt
diff --git a/ydb/library/actors/util/ut/CMakeLists.windows-x86_64.txt b/ydb/library/actors/util/ut/CMakeLists.windows-x86_64.txt
new file mode 100644
index 0000000000..b3c2fd0724
--- /dev/null
+++ b/ydb/library/actors/util/ut/CMakeLists.windows-x86_64.txt
@@ -0,0 +1,68 @@
+
+# This file was generated by the build system used internally in the Yandex monorepo.
+# Only simple modifications are allowed (adding source-files to targets, adding simple properties
+# like target_include_directories). These modifications will be ported to original
+# ya.make files by maintainers. Any complex modifications which can't be ported back to the
+# original buildsystem will not be accepted.
+
+
+
+add_executable(ydb-library-actors-util-ut)
+target_include_directories(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util
+)
+target_link_libraries(ydb-library-actors-util-ut PUBLIC
+ contrib-libs-cxxsupp
+ yutil
+ library-cpp-cpuid_check
+ cpp-testing-unittest_main
+ library-actors-util
+)
+target_sources(ydb-library-actors-util-ut PRIVATE
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/cpu_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/memory_tracker_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/thread_load_log_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rope_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/rc_buf_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/shared_data_native_rope_backend_ut.cpp
+ ${CMAKE_SOURCE_DIR}/ydb/library/actors/util/unordered_cache_ut.cpp
+)
+set_property(
+ TARGET
+ ydb-library-actors-util-ut
+ PROPERTY
+ SPLIT_FACTOR
+ 1
+)
+add_yunittest(
+ NAME
+ ydb-library-actors-util-ut
+ TEST_TARGET
+ ydb-library-actors-util-ut
+ TEST_ARG
+ --print-before-suite
+ --print-before-test
+ --fork-tests
+ --print-times
+ --show-fails
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ LABELS
+ SMALL
+)
+set_yunittest_property(
+ TEST
+ ydb-library-actors-util-ut
+ PROPERTY
+ PROCESSORS
+ 1
+)
+target_allocator(ydb-library-actors-util-ut
+ system_allocator
+)
+vcs_info(ydb-library-actors-util-ut)
diff --git a/library/cpp/actors/ya.make b/ydb/library/actors/ya.make
index 00d7801798..00d7801798 100644
--- a/library/cpp/actors/ya.make
+++ b/ydb/library/actors/ya.make